summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/.gitignore1
-rw-r--r--Documentation/ABI/obsolete/sysfs-kernel-fadump_enabled9
-rw-r--r--Documentation/ABI/obsolete/sysfs-kernel-fadump_registered10
-rw-r--r--Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem10
-rw-r--r--Documentation/ABI/removed/sysfs-kernel-fadump_release_opalcore9
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs14
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-cti241
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-jz4780-efuse16
-rw-r--r--Documentation/ABI/testing/sysfs-driver-uacce39
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups21
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs5
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-fadump40
-rw-r--r--Documentation/PCI/boot-interrupts.rst155
-rw-r--r--Documentation/PCI/index.rst1
-rw-r--r--Documentation/PCI/pcieaer-howto.rst23
-rw-r--r--Documentation/admin-guide/binfmt-misc.rst4
-rw-r--r--Documentation/admin-guide/cgroup-v1/cpusets.rst11
-rw-r--r--Documentation/admin-guide/cgroup-v1/hugetlb.rst103
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst11
-rw-r--r--Documentation/admin-guide/dynamic-debug-howto.rst3
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt29
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst14
-rw-r--r--Documentation/admin-guide/mm/userfaultfd.rst51
-rw-r--r--Documentation/admin-guide/pm/suspend-flows.rst270
-rw-r--r--Documentation/admin-guide/pm/system-wide.rst1
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst3
-rw-r--r--Documentation/core-api/mm-api.rst3
-rw-r--r--Documentation/core-api/pin_user_pages.rst86
-rw-r--r--Documentation/dev-tools/kunit/index.rst40
-rw-r--r--Documentation/dev-tools/kunit/kunit-tool.rst7
-rw-r--r--Documentation/dev-tools/kunit/start.rst80
-rw-r--r--Documentation/dev-tools/kunit/usage.rst14
-rw-r--r--Documentation/devicetree/bindings/.gitignore1
-rw-r--r--Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,integrator.yaml86
-rw-r--r--Documentation/devicetree/bindings/arm/arm,realview.yaml123
-rw-r--r--Documentation/devicetree/bindings/arm/arm,versatile.yaml71
-rw-r--r--Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml223
-rw-r--r--Documentation/devicetree/bindings/arm/arm-boards237
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt36
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.txt10
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml21
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml21
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550-cpu-method.txt36
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml21
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml88
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.txt31
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml29
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,hr2.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml28
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,ns2.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml23
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,nsp-cpu-method.txt39
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,nsp.txt34
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml36
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,stingray.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml24
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.txt10
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml22
-rw-r--r--Documentation/devicetree/bindings/arm/coresight-cti.yaml336
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml46
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml69
-rw-r--r--Documentation/devicetree/bindings/arm/l2c2x0.yaml45
-rw-r--r--Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml5
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.yaml41
-rw-r--r--Documentation/devicetree/bindings/arm/psci.yaml5
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml8
-rw-r--r--Documentation/devicetree/bindings/arm/renesas,prr.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/renesas.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml12
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/pmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/cache-uniphier.txt60
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml102
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/uniphier.txt47
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/uniphier.yaml61
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml26
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt300
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml354
-rw-r--r--Documentation/devicetree/bindings/arm/vexpress.txt229
-rw-r--r--Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml71
-rw-r--r--Documentation/devicetree/bindings/ata/sata_rcar.txt36
-rw-r--r--Documentation/devicetree/bindings/bus/socionext,uniphier-system-bus.yaml96
-rw-r--r--Documentation/devicetree/bindings/bus/ti-sysc.txt1
-rw-r--r--Documentation/devicetree/bindings/bus/uniphier-system-bus.txt66
-rw-r--r--Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml54
-rw-r--r--Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml103
-rw-r--r--Documentation/devicetree/bindings/clock/arm-integrator.txt34
-rw-r--r--Documentation/devicetree/bindings/clock/arm-syscon-icst.txt70
-rw-r--r--Documentation/devicetree/bindings/clock/clock-bindings.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/fsl,plldig.yaml5
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mm-clock.txt29
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mm-clock.yaml68
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mn-clock.yaml50
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mp-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mq-clock.txt20
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mq-clock.yaml72
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml64
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/milbeaut-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml72
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,mmcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml62
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt100
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml119
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt17
-rw-r--r--Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml94
-rw-r--r--Documentation/devicetree/bindings/clock/sprd,sc9860-clk.txt (renamed from Documentation/devicetree/bindings/clock/sprd.txt)2
-rw-r--r--Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml105
-rw-r--r--Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml35
-rw-r--r--Documentation/devicetree/bindings/clock/uniphier-clock.txt132
-rw-r--r--Documentation/devicetree/bindings/connector/samsung,usb-connector-11pin.txt2
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.txt135
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.yaml206
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-dcp.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec4.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.yaml37
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt23
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ps8640.yaml112
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml159
-rw-r--r--Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt36
-rw-r--r--Documentation/devicetree/bindings/display/ilitek,ili9486.yaml73
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt1
-rw-r--r--Documentation/devicetree/bindings/display/msm/gmu.txt116
-rw-r--r--Documentation/devicetree/bindings/display/msm/gmu.yaml123
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt55
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml69
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml122
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt29
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g133han01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g185han01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt28
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml80
-rw-r--r--Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timing.txt124
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timings.yaml77
-rw-r--r--Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/edt,et-series.txt55
-rw-r--r--Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml49
-rw-r--r--Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml55
-rw-r--r--Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt32
-rw-r--r--Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt42
-rw-r--r--Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml56
-rw-r--r--Documentation/devicetree/bindings/display/panel/nvd,9128.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt23
-rw-r--r--Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml53
-rw-r--r--Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt11
-rw-r--r--Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt20
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common.yaml15
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-dpi.txt50
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-dpi.yaml81
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml67
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml209
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-timing.yaml227
-rw-r--r--Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml56
-rw-r--r--Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml50
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt29
-rw-r--r--Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt16
-rw-r--r--Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt48
-rw-r--r--Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt19
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml40
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7735r.txt35
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7735r.yaml78
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml152
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml208
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml106
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/socionext,uniphier-mio-dmac.yaml63
-rw-r--r--Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml63
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-udma.yaml19
-rw-r--r--Documentation/devicetree/bindings/dma/uniphier-mio-dmac.txt25
-rw-r--r--Documentation/devicetree/bindings/dsp/fsl,dsp.yaml2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml2
-rw-r--r--Documentation/devicetree/bindings/example-schema.yaml4
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt24
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml56
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml3
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.txt1
-rw-r--r--Documentation/devicetree/bindings/gnss/gnss.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-uniphier.txt51
-rw-r--r--Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml94
-rw-r--r--Documentation/devicetree/bindings/gpio/xylon,logicvc-gpio.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml8
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml5
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-rotator.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpu/vivante,gc.yaml69
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml97
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-at91.txt10
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt26
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.txt68
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml136
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-uniphier-f.txt25
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-uniphier.txt25
-rw-r--r--Documentation/devicetree/bindings/i2c/socionext,uniphier-fi2c.yaml50
-rw-r--r--Documentation/devicetree/bindings/i2c/socionext,uniphier-i2c.yaml50
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml10
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/light/adux1020.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/light/bh1750.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/light/isl29018.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/light/noa1305.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/light/stk33xx.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2583.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2772.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/light/veml6030.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/bmp085.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml40
-rw-r--r--Documentation/devicetree/bindings/input/gpio-vibrator.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/max77650-onkey.yaml3
-rw-r--r--Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt28
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,bcm-voter.yaml45
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml62
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml85
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt24
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml74
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/msi.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt32
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml61
-rw-r--r--Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-max77650.yaml3
-rw-r--r--Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml3
-rw-r--r--Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml80
-rw-r--r--Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml2
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.txt2
-rw-r--r--Documentation/devicetree/bindings/mailbox/mtk-gce.txt10
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml11
-rw-r--r--Documentation/devicetree/bindings/media/renesas,ceu.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.txt217
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.yaml402
-rw-r--r--Documentation/devicetree/bindings/mfd/max77650.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml5
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stpmic1.txt61
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stpmic1.yaml339
-rw-r--r--Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml143
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-cadence.txt80
-rw-r--r--Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml99
-rw-r--r--Documentation/devicetree/bindings/mmc/uniphier-sd.txt55
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt10
-rw-r--r--Documentation/devicetree/bindings/mtd/denali,nand.yaml148
-rw-r--r--Documentation/devicetree/bindings/mtd/denali-nand.txt61
-rw-r--r--Documentation/devicetree/bindings/mtd/nand-macronix.txt27
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bluetooth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/can/bosch,m_can.yaml144
-rw-r--r--Documentation/devicetree/bindings/net/can/can-transceiver.txt24
-rw-r--r--Documentation/devicetree/bindings/net/can/can-transceiver.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/can/m_can.txt75
-rw-r--r--Documentation/devicetree/bindings/net/can/tcan4x5x.txt2
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-bluetooth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca7000.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml25
-rw-r--r--Documentation/devicetree/bindings/net/ti-bluetooth.txt3
-rw-r--r--Documentation/devicetree/bindings/nvmem/ingenic,jz4780-efuse.yaml45
-rw-r--r--Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt22
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt27
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml49
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt66
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml76
-rw-r--r--Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml27
-rw-r--r--Documentation/devicetree/bindings/pci/cdns-pcie.yaml31
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt52
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt127
-rw-r--r--Documentation/devicetree/bindings/pci/pci-ep.yaml41
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml35
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml52
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/marvell,mmp3-usb-phy.yaml42
-rw-r--r--Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt24
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mmp3-usb.txt13
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt81
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml155
-rw-r--r--Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt36
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml82
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.txt39
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml82
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml25
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt36
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml82
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml153
-rw-r--r--Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.txt27
-rw-r--r--Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml42
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml40
-rw-r--r--Documentation/devicetree/bindings/power/power-domain.yaml28
-rw-r--r--Documentation/devicetree/bindings/power/renesas,apmu.txt35
-rw-r--r--Documentation/devicetree/bindings/power/renesas,apmu.yaml55
-rw-r--r--Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt62
-rw-r--r--Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml73
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/max77650-charger.yaml3
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-mediatek.txt5
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml4
-rw-r--r--Documentation/devicetree/bindings/regulator/max77650-regulator.yaml5
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt64
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml324
-rw-r--r--Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml2
-rw-r--r--Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml2
-rw-r--r--Documentation/devicetree/bindings/reset/renesas,rst.txt48
-rw-r--r--Documentation/devicetree/bindings/reset/renesas,rst.yaml63
-rw-r--r--Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml2
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,bcm2835.txt40
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml61
-rw-r--r--Documentation/devicetree/bindings/rtc/ingenic,jz4740-rtc.txt37
-rw-r--r--Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml83
-rw-r--r--Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mt2712.txt14
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,hscif.yaml135
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt150
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci.yaml69
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml172
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scifa.yaml107
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scifb.yaml98
-rw-r--r--Documentation/devicetree/bindings/serial/serial.txt56
-rw-r--r--Documentation/devicetree/bindings/serial/serial.yaml131
-rw-r--r--Documentation/devicetree/bindings/serial/slave-device.txt45
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml46
-rw-r--r--Documentation/devicetree/bindings/serial/uniphier-uart.txt22
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/imx/fsl,aips-bus.yaml47
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/pwrap.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt50
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau7118.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,aiu.yaml113
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml51
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml113
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,t9015.yaml58
-rw-r--r--Documentation/devicetree/bindings/sound/brcm,bcm63xx-audio.txt29
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml69
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l51.txt33
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt44
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml67
-rw-r--r--Documentation/devicetree/bindings/sound/ingenic,aic.yaml92
-rw-r--r--Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,fsi.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.txt49
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.yaml111
-rw-r--r--Documentation/devicetree/bindings/sound/rt5682.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-i2s.txt62
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml87
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt56
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml80
-rw-r--r--Documentation/devicetree/bindings/sound/tas2562.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320adcx140.yaml82
-rw-r--r--Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml11
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.txt36
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml88
-rw-r--r--Documentation/devicetree/bindings/sram/qcom,ocmem.yaml14
-rw-r--r--Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml4
-rw-r--r--Documentation/devicetree/bindings/thermal/armada-thermal.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml37
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.txt78
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.yaml139
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml4
-rw-r--r--Documentation/devicetree/bindings/timer/arm,global_timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml2
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml19
-rw-r--r--Documentation/devicetree/bindings/usb/exynos-usb.txt9
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fusb302.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/generic.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,musb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml190
-rw-r--r--Documentation/devicetree/bindings/usb/richtek,rt1711h.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/typec-tcpci.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-conn-gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml37
-rw-r--r--Documentation/devicetree/bindings/watchdog/st,stpmic1-wdt.txt11
-rw-r--r--Documentation/driver-api/gpio/driver.rst4
-rw-r--r--Documentation/driver-api/libata.rst2
-rw-r--r--Documentation/driver-api/soundwire/stream.rst61
-rw-r--r--Documentation/filesystems/f2fs.rst4
-rw-r--r--Documentation/filesystems/fiemap.txt6
-rw-r--r--Documentation/filesystems/path-lookup.rst7
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst18
-rw-r--r--Documentation/gpu/i915.rst4
-rw-r--r--Documentation/gpu/todo.rst53
-rw-r--r--Documentation/i2c/smbus-protocol.rst2
-rw-r--r--Documentation/index.rst2
-rw-r--r--Documentation/mhi/index.rst18
-rw-r--r--Documentation/mhi/mhi.rst218
-rw-r--r--Documentation/mhi/topology.rst60
-rw-r--r--Documentation/misc-devices/uacce.rst176
-rw-r--r--Documentation/networking/devlink/devlink-trap.rst2
-rw-r--r--Documentation/openrisc/openrisc_port.rst4
-rw-r--r--Documentation/powerpc/firmware-assisted-dump.rst32
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst3
-rw-r--r--Documentation/remoteproc.txt2
-rw-r--r--Documentation/scsi/53c700.rst (renamed from Documentation/scsi/53c700.txt)61
-rw-r--r--Documentation/scsi/BusLogic.rst (renamed from Documentation/scsi/BusLogic.txt)89
-rw-r--r--Documentation/scsi/FlashPoint.rst176
-rw-r--r--Documentation/scsi/FlashPoint.txt163
-rw-r--r--Documentation/scsi/NinjaSCSI.rst164
-rw-r--r--Documentation/scsi/NinjaSCSI.txt128
-rw-r--r--Documentation/scsi/aacraid.rst (renamed from Documentation/scsi/aacraid.txt)59
-rw-r--r--Documentation/scsi/advansys.rst (renamed from Documentation/scsi/advansys.txt)129
-rw-r--r--Documentation/scsi/aha152x.rst (renamed from Documentation/scsi/aha152x.txt)73
-rw-r--r--Documentation/scsi/aic79xx.rst593
-rw-r--r--Documentation/scsi/aic79xx.txt497
-rw-r--r--Documentation/scsi/aic7xxx.rst458
-rw-r--r--Documentation/scsi/aic7xxx.txt394
-rw-r--r--Documentation/scsi/arcmsr_spec.rst907
-rw-r--r--Documentation/scsi/arcmsr_spec.txt574
-rw-r--r--Documentation/scsi/bfa.rst (renamed from Documentation/scsi/bfa.txt)28
-rw-r--r--Documentation/scsi/bnx2fc.rst (renamed from Documentation/scsi/bnx2fc.txt)18
-rw-r--r--Documentation/scsi/cxgb3i.rst (renamed from Documentation/scsi/cxgb3i.txt)22
-rw-r--r--Documentation/scsi/dc395x.rst (renamed from Documentation/scsi/dc395x.txt)79
-rw-r--r--Documentation/scsi/dpti.rst92
-rw-r--r--Documentation/scsi/dpti.txt83
-rw-r--r--Documentation/scsi/g_NCR5380.rst93
-rw-r--r--Documentation/scsi/g_NCR5380.txt68
-rw-r--r--Documentation/scsi/hpsa.rst (renamed from Documentation/scsi/hpsa.txt)79
-rw-r--r--Documentation/scsi/hptiop.rst (renamed from Documentation/scsi/hptiop.txt)45
-rw-r--r--Documentation/scsi/index.rst51
-rw-r--r--Documentation/scsi/libsas.rst (renamed from Documentation/scsi/libsas.txt)352
-rw-r--r--Documentation/scsi/link_power_management_policy.rst (renamed from Documentation/scsi/link_power_management_policy.txt)12
-rw-r--r--Documentation/scsi/lpfc.rst (renamed from Documentation/scsi/lpfc.txt)16
-rw-r--r--Documentation/scsi/megaraid.rst (renamed from Documentation/scsi/megaraid.txt)47
-rw-r--r--Documentation/scsi/ncr53c8xx.rst (renamed from Documentation/scsi/ncr53c8xx.txt)1871
-rw-r--r--Documentation/scsi/ppa.rst18
-rw-r--r--Documentation/scsi/ppa.txt14
-rw-r--r--Documentation/scsi/qlogicfas.rst (renamed from Documentation/scsi/qlogicfas.txt)17
-rw-r--r--Documentation/scsi/scsi-changer.rst (renamed from Documentation/scsi/scsi-changer.txt)36
-rw-r--r--Documentation/scsi/scsi-generic.rst (renamed from Documentation/scsi/scsi-generic.txt)75
-rw-r--r--Documentation/scsi/scsi-parameters.rst (renamed from Documentation/scsi/scsi-parameters.txt)28
-rw-r--r--Documentation/scsi/scsi.rst (renamed from Documentation/scsi/scsi.txt)31
-rw-r--r--Documentation/scsi/scsi_eh.rst (renamed from Documentation/scsi/scsi_eh.txt)213
-rw-r--r--Documentation/scsi/scsi_fc_transport.rst (renamed from Documentation/scsi/scsi_fc_transport.txt)242
-rw-r--r--Documentation/scsi/scsi_mid_low_api.rst1313
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt1259
-rw-r--r--Documentation/scsi/scsi_transport_srp/Makefile7
-rw-r--r--Documentation/scsi/scsi_transport_srp/figures.rst6
-rw-r--r--Documentation/scsi/sd-parameters.rst27
-rw-r--r--Documentation/scsi/sd-parameters.txt22
-rw-r--r--Documentation/scsi/smartpqi.rst (renamed from Documentation/scsi/smartpqi.txt)52
-rw-r--r--Documentation/scsi/st.rst (renamed from Documentation/scsi/st.txt)301
-rw-r--r--Documentation/scsi/sym53c500_cs.rst (renamed from Documentation/scsi/sym53c500_cs.txt)8
-rw-r--r--Documentation/scsi/sym53c8xx_2.rst (renamed from Documentation/scsi/sym53c8xx_2.txt)1131
-rw-r--r--Documentation/scsi/tcm_qla2xxx.rst (renamed from Documentation/scsi/tcm_qla2xxx.txt)26
-rw-r--r--Documentation/scsi/ufs.rst (renamed from Documentation/scsi/ufs.txt)84
-rw-r--r--Documentation/scsi/wd719x.rst24
-rw-r--r--Documentation/scsi/wd719x.txt21
-rw-r--r--Documentation/sound/alsa-configuration.rst13
-rw-r--r--Documentation/sound/hd-audio/index.rst1
-rw-r--r--Documentation/sound/hd-audio/models.rst2
-rw-r--r--Documentation/sound/hd-audio/realtek-pc-beep.rst129
-rw-r--r--Documentation/sound/soc/codec-to-codec.rst9
-rw-r--r--Documentation/trace/coresight/coresight-ect.rst222
-rw-r--r--Documentation/trace/coresight/coresight.rst13
-rw-r--r--Documentation/trace/ftrace.rst82
-rw-r--r--Documentation/virt/kvm/api.rst128
-rw-r--r--Documentation/virt/kvm/arm/hyp-abi.rst5
-rw-r--r--Documentation/virt/kvm/devices/s390_flic.rst11
-rw-r--r--Documentation/virt/kvm/index.rst2
-rw-r--r--Documentation/virt/kvm/locking.rst11
-rw-r--r--Documentation/virt/kvm/s390-pv-boot.rst84
-rw-r--r--Documentation/virt/kvm/s390-pv.rst116
-rw-r--r--Documentation/vm/.gitignore1
-rw-r--r--Documentation/vm/free_page_reporting.rst40
-rw-r--r--Documentation/vm/hmm.rst12
-rw-r--r--Documentation/vm/zswap.rst20
-rw-r--r--MAINTAINERS310
-rw-r--r--arch/.gitignore1
-rw-r--r--arch/Kconfig15
-rw-r--r--arch/alpha/configs/defconfig4
-rw-r--r--arch/alpha/include/asm/Kbuild11
-rw-r--r--arch/alpha/include/asm/mmzone.h2
-rw-r--r--arch/alpha/kernel/.gitignore1
-rw-r--r--arch/alpha/kernel/sys_nautilus.c52
-rw-r--r--arch/alpha/kernel/syscalls/syscallhdr.sh2
-rw-r--r--arch/alpha/mm/fault.c6
-rw-r--r--arch/arc/Kconfig50
-rw-r--r--arch/arc/boot/.gitignore1
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi1
-rw-r--r--arch/arc/include/asm/Kbuild21
-rw-r--r--arch/arc/include/asm/arcregs.h26
-rw-r--r--arch/arc/include/asm/asserts.h34
-rw-r--r--arch/arc/include/asm/dsp-impl.h150
-rw-r--r--arch/arc/include/asm/dsp.h29
-rw-r--r--arch/arc/include/asm/entry-arcv2.h6
-rw-r--r--arch/arc/include/asm/processor.h4
-rw-r--r--arch/arc/include/asm/ptrace.h3
-rw-r--r--arch/arc/include/asm/switch_to.h2
-rw-r--r--arch/arc/kernel/.gitignore1
-rw-r--r--arch/arc/kernel/asm-offsets.c4
-rw-r--r--arch/arc/kernel/head.S4
-rw-r--r--arch/arc/kernel/setup.c34
-rw-r--r--arch/arc/mm/fault.c35
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/Kconfig.debug42
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/.gitignore1
-rw-r--r--arch/arm/boot/compressed/.gitignore1
-rw-r--r--arch/arm/boot/compressed/head.S136
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S1
-rw-r--r--arch/arm/boot/dts/Makefile20
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi21
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi133
-rw-r--r--arch/arm/boot/dts/am4372.dtsi191
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi77
-rw-r--r--arch/arm/boot/dts/am57-pruss.dtsi50
-rw-r--r--arch/arm/boot/dts/am5718.dtsi1
-rw-r--r--arch/arm/boot/dts/am5728.dtsi1
-rw-r--r--arch/arm/boot/dts/am5748.dtsi1
-rw-r--r--arch/arm/boot/dts/arm-realview-pbx.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-sam9x60ek.dts5
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts12
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts33
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts33
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9x5dm.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi-4-b.dts74
-rw-r--r--arch/arm/boot/dts/bcm2711.dtsi25
-rw-r--r--arch/arm/boot/dts/dm814x-clocks.dtsi14
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi260
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi148
-rw-r--r--arch/arm/boot/dts/dra62x.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7.dtsi219
-rw-r--r--arch/arm/boot/dts/dra72x.dtsi6
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi10
-rw-r--r--arch/arm/boot/dts/ecx-2000.dts6
-rw-r--r--arch/arm/boot/dts/ecx-common.dtsi17
-rw-r--r--arch/arm/boot/dts/exynos3250-artik5.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi9
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts10
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts2
-rw-r--r--arch/arm/boot/dts/exynos5422-cpus.dtsi8
-rw-r--r--arch/arm/boot/dts/exynos5422-odroid-core.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidhc1.dts30
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi59
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dir-685.dts50
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dns-313.dts2
-rw-r--r--arch/arm/boot/dts/gemini-nas4220b.dts4
-rw-r--r--arch/arm/boot/dts/gemini-sl93512r.dts4
-rw-r--r--arch/arm/boot/dts/gemini-sq201.dts2
-rw-r--r--arch/arm/boot/dts/gemini.dtsi8
-rw-r--r--arch/arm/boot/dts/highbank.dts11
-rw-r--r--arch/arm/boot/dts/imx23-olinuxino.dts2
-rw-r--r--arch/arm/boot/dts/imx23.dtsi10
-rw-r--r--arch/arm/boot/dts/imx25-pinfunc.h8
-rw-r--r--arch/arm/boot/dts/imx25.dtsi4
-rw-r--r--arch/arm/boot/dts/imx27.dtsi2
-rw-r--r--arch/arm/boot/dts/imx28-apx4devkit.dts10
-rw-r--r--arch/arm/boot/dts/imx28.dtsi2
-rw-r--r--arch/arm/boot/dts/imx31.dtsi4
-rw-r--r--arch/arm/boot/dts/imx35.dtsi4
-rw-r--r--arch/arm/boot/dts/imx50.dtsi4
-rw-r--r--arch/arm/boot/dts/imx51-zii-rdu1.dts5
-rw-r--r--arch/arm/boot/dts/imx51.dtsi17
-rw-r--r--arch/arm/boot/dts/imx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts40
-rw-r--r--arch/arm/boot/dts/imx6dl-pico-dwarf.dts17
-rw-r--r--arch/arm/boot/dts/imx6dl-pico-hobbit.dts17
-rw-r--r--arch/arm/boot/dts/imx6dl-pico-nymph.dts17
-rw-r--r--arch/arm/boot/dts/imx6dl-pico-pi.dts17
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts16
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6dl.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6q-apalis-eval.dts40
-rw-r--r--arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts40
-rw-r--r--arch/arm/boot/dts/imx6q-apalis-ixora.dts40
-rw-r--r--arch/arm/boot/dts/imx6q-marsboard.dts16
-rw-r--r--arch/arm/boot/dts/imx6q-novena.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-pico-dwarf.dts17
-rw-r--r--arch/arm/boot/dts/imx6q-pico-hobbit.dts17
-rw-r--r--arch/arm/boot/dts/imx6q-pico-nymph.dts17
-rw-r--r--arch/arm/boot/dts/imx6q-pico-pi.dts17
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-apalis.dtsi40
-rw-r--r--arch/arm/boot/dts/imx6qdl-colibri.dtsi40
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw553x.dtsi31
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5910.dtsi25
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6qdl-pico-dwarf.dtsi45
-rw-r--r--arch/arm/boot/dts/imx6qdl-pico-hobbit.dtsi37
-rw-r--r--arch/arm/boot/dts/imx6qdl-pico-nymph.dtsi54
-rw-r--r--arch/arm/boot/dts/imx6qdl-pico-pi.dtsi31
-rw-r--r--arch/arm/boot/dts/imx6qdl-pico.dtsi617
-rw-r--r--arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi7
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi36
-rw-r--r--arch/arm/boot/dts/imx6qp.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi28
-rw-r--r--arch/arm/boot/dts/imx6sll.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6sx-nitrogen6sx.dts20
-rw-r--r--arch/arm/boot/dts/imx6sx-pinfunc.h286
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6sx-softing-vining-2000.dts22
-rw-r--r--arch/arm/boot/dts/imx6sx-udoo-neo.dtsi28
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi34
-rw-r--r--arch/arm/boot/dts/imx6ul-ccimx6ulsbcpro.dts2
-rw-r--r--arch/arm/boot/dts/imx6ul-pico-dwarf.dts52
-rw-r--r--arch/arm/boot/dts/imx6ul-pico.dtsi39
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi28
-rw-r--r--arch/arm/boot/dts/imx6ull.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7-colibri-aster.dtsi169
-rw-r--r--arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi46
-rw-r--r--arch/arm/boot/dts/imx7-colibri.dtsi230
-rw-r--r--arch/arm/boot/dts/imx7d-cl-som-imx7.dts4
-rw-r--r--arch/arm/boot/dts/imx7d-colibri-aster.dts20
-rw-r--r--arch/arm/boot/dts/imx7d-colibri-emmc-aster.dts20
-rw-r--r--arch/arm/boot/dts/imx7d-colibri-emmc.dtsi26
-rw-r--r--arch/arm/boot/dts/imx7d-colibri-eval-v3.dts41
-rw-r--r--arch/arm/boot/dts/imx7d-colibri.dtsi41
-rw-r--r--arch/arm/boot/dts/imx7d-pico-dwarf.dts87
-rw-r--r--arch/arm/boot/dts/imx7d-pico-nymph.dts84
-rw-r--r--arch/arm/boot/dts/imx7s-colibri-aster.dts15
-rw-r--r--arch/arm/boot/dts/imx7s-colibri-eval-v3.dts41
-rw-r--r--arch/arm/boot/dts/imx7s-colibri.dtsi41
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi38
-rw-r--r--arch/arm/boot/dts/imx7ulp-evk.dts3
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi4
-rw-r--r--arch/arm/boot/dts/integratorap.dts1
-rw-r--r--arch/arm/boot/dts/mt2701.dtsi2
-rw-r--r--arch/arm/boot/dts/mt7623.dtsi2
-rw-r--r--arch/arm/boot/dts/mt7629.dtsi16
-rw-r--r--arch/arm/boot/dts/omap4-l4.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4.dtsi279
-rw-r--r--arch/arm/boot/dts/omap5-l4-abe.dtsi16
-rw-r--r--arch/arm/boot/dts/omap5.dtsi241
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-apq8084.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-msm8660.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-msm8960.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts71
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts25
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi10
-rw-r--r--arch/arm/boot/dts/r7s72100-gr-peach.dts3
-rw-r--r--arch/arm/boot/dts/r7s72100.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a73a4-ape6evm.dts7
-rw-r--r--arch/arm/boot/dts/r8a7743.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7744.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7745-iwg22d-sodimm-dbhd-ca.dts6
-rw-r--r--arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts93
-rw-r--r--arch/arm/boot/dts/r8a7745.dtsi3
-rw-r--r--arch/arm/boot/dts/r8a77470.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7790-lager.dts1
-rw-r--r--arch/arm/boot/dts/r8a7790-stout.dts1
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts1
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts1
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7793-gose.dts1
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi5
-rw-r--r--arch/arm/boot/dts/r8a7794-alt.dts1
-rw-r--r--arch/arm/boot/dts/r8a7794-silk.dts1
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3036-kylin.dts2
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi8
-rw-r--r--arch/arm/boot/dts/rk3066a.dtsi18
-rw-r--r--arch/arm/boot/dts/rk3188-bqedison2qc.dts29
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi10
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi17
-rw-r--r--arch/arm/boot/dts/rk3288-evb-act8846.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-evb-rk808.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-firefly-reload.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-r89.dts1
-rw-r--r--arch/arm/boot/dts/rk3288-tinker.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3288-vyasa.dts28
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi26
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi4
-rw-r--r--arch/arm/boot/dts/rv1108.dtsi12
-rw-r--r--arch/arm/boot/dts/sam9x60.dtsi8
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi33
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi35
-rw-r--r--arch/arm/boot/dts/sh73a0-kzm9g.dts9
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi12
-rw-r--r--arch/arm/boot/dts/socfpga_arria10_socdk_nand.dts4
-rw-r--r--arch/arm/boot/dts/ste-ab8500.dtsi6
-rw-r--r--arch/arm/boot/dts/ste-ab8505.dtsi6
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi6
-rw-r--r--arch/arm/boot/dts/ste-href-stuib.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-skomer.dts581
-rw-r--r--arch/arm/boot/dts/stm32746g-eval.dts2
-rw-r--r--arch/arm/boot/dts/stm32mp15-pinctrl.dtsi93
-rw-r--r--arch/arm/boot/dts/stm32mp151.dtsi16
-rw-r--r--arch/arm/boot/dts/stm32mp153.dtsi1
-rw-r--r--arch/arm/boot/dts/stm32mp157a-avenger96.dts11
-rw-r--r--arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts265
-rw-r--r--arch/arm/boot/dts/stm32mp157c-dhcom-som.dtsi368
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ed1.dts16
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ev1.dts13
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dkx.dtsi10
-rw-r--r--arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts257
-rw-r--r--arch/arm/boot/dts/sun5i.dtsi5
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-linutronix-testbox-v2.dts47
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi25
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi3
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts1
-rw-r--r--arch/arm/boot/dts/sun8i-a83t.dtsi73
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi25
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi32
-rw-r--r--arch/arm/boot/dts/sun8i-v3s.dtsi2
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi19
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts8
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra124-apalis.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts14
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts8
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra30-apalis-v1.1.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra30-apalis.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts8
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra30-colibri.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-ld4.dtsi10
-rw-r--r--arch/arm/boot/dts/uniphier-pro4.dtsi12
-rw-r--r--arch/arm/boot/dts/uniphier-pro5.dtsi164
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi14
-rw-r--r--arch/arm/boot/dts/uniphier-ref-daughter.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-sld8.dtsi10
-rw-r--r--arch/arm/boot/dts/versatile-ab-ib2.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts2
-rw-r--r--arch/arm/boot/dts/vf-colibri-eval-v3.dtsi40
-rw-r--r--arch/arm/boot/dts/vf-colibri.dtsi39
-rw-r--r--arch/arm/boot/dts/vf500-colibri-eval-v3.dts40
-rw-r--r--arch/arm/boot/dts/vf500-colibri.dtsi40
-rw-r--r--arch/arm/boot/dts/vf500.dtsi4
-rw-r--r--arch/arm/boot/dts/vf610-colibri-eval-v3.dts40
-rw-r--r--arch/arm/boot/dts/vf610-colibri.dtsi40
-rw-r--r--arch/arm/boot/dts/vf610-zii-cfu1.dts8
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev.dtsi8
-rw-r--r--arch/arm/boot/dts/vf610-zii-spb4.dts8
-rw-r--r--arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts8
-rw-r--r--arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts8
-rw-r--r--arch/arm/boot/dts/vf610m4-colibri.dts39
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi4
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/axm55xx_defconfig2
-rw-r--r--arch/arm/configs/bcm2835_defconfig5
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/exynos_defconfig11
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig3
-rw-r--r--arch/arm/configs/integrator_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig9
-rw-r--r--arch/arm/configs/omap2plus_defconfig27
-rw-r--r--arch/arm/configs/qcom_defconfig6
-rw-r--r--arch/arm/configs/rpc_defconfig1
-rw-r--r--arch/arm/configs/s3c2410_defconfig1
-rw-r--r--arch/arm/configs/shmobile_defconfig2
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm/configs/versatile_defconfig2
-rw-r--r--arch/arm/crypto/.gitignore2
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c1
-rw-r--r--arch/arm/crypto/ghash-ce-core.S5
-rw-r--r--arch/arm/include/asm/Kbuild12
-rw-r--r--arch/arm/include/asm/arch_gicv3.h114
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/kvm_arm.h239
-rw-r--r--arch/arm/include/asm/kvm_asm.h77
-rw-r--r--arch/arm/include/asm/kvm_coproc.h36
-rw-r--r--arch/arm/include/asm/kvm_emulate.h372
-rw-r--r--arch/arm/include/asm/kvm_host.h456
-rw-r--r--arch/arm/include/asm/kvm_hyp.h127
-rw-r--r--arch/arm/include/asm/kvm_mmu.h435
-rw-r--r--arch/arm/include/asm/kvm_ras.h14
-rw-r--r--arch/arm/include/asm/nwflash.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h20
-rw-r--r--arch/arm/include/asm/pgtable.h9
-rw-r--r--arch/arm/include/asm/sections.h6
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h75
-rw-r--r--arch/arm/include/asm/virt.h17
-rw-r--r--arch/arm/include/debug/stm32.S9
-rw-r--r--arch/arm/include/uapi/asm/kvm.h314
-rw-r--r--arch/arm/kernel/.gitignore1
-rw-r--r--arch/arm/kernel/asm-offsets.c11
-rw-r--r--arch/arm/kernel/hyp-stub.S39
-rw-r--r--arch/arm/kernel/relocate_kernel.S8
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S8
-rw-r--r--arch/arm/kernel/vmlinux.lds.S8
-rw-r--r--arch/arm/kernel/vmlinux.lds.h10
-rw-r--r--arch/arm/kvm/Kconfig59
-rw-r--r--arch/arm/kvm/Makefile43
-rw-r--r--arch/arm/kvm/coproc.c1455
-rw-r--r--arch/arm/kvm/coproc.h130
-rw-r--r--arch/arm/kvm/coproc_a15.c39
-rw-r--r--arch/arm/kvm/coproc_a7.c42
-rw-r--r--arch/arm/kvm/emulate.c166
-rw-r--r--arch/arm/kvm/guest.c387
-rw-r--r--arch/arm/kvm/handle_exit.c175
-rw-r--r--arch/arm/kvm/hyp/Makefile34
-rw-r--r--arch/arm/kvm/hyp/banked-sr.c70
-rw-r--r--arch/arm/kvm/hyp/cp15-sr.c72
-rw-r--r--arch/arm/kvm/hyp/entry.S121
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S295
-rw-r--r--arch/arm/kvm/hyp/s2-setup.c22
-rw-r--r--arch/arm/kvm/hyp/switch.c242
-rw-r--r--arch/arm/kvm/hyp/tlb.c68
-rw-r--r--arch/arm/kvm/hyp/vfp.S57
-rw-r--r--arch/arm/kvm/init.S157
-rw-r--r--arch/arm/kvm/interrupts.S36
-rw-r--r--arch/arm/kvm/irq.h16
-rw-r--r--arch/arm/kvm/reset.c86
-rw-r--r--arch/arm/kvm/trace.h86
-rw-r--r--arch/arm/kvm/vgic-v3-coproc.c27
-rw-r--r--arch/arm/mach-at91/.gitignore1
-rw-r--r--arch/arm/mach-at91/Kconfig1
-rw-r--r--arch/arm/mach-at91/pm.c35
-rw-r--r--arch/arm/mach-at91/pm.h2
-rw-r--r--arch/arm/mach-at91/pm_data-offsets.c4
-rw-r--r--arch/arm/mach-at91/pm_suspend.S189
-rw-r--r--arch/arm/mach-bcm/Kconfig8
-rw-r--r--arch/arm/mach-cns3xxx/core.c10
-rw-r--r--arch/arm/mach-dove/common.c2
-rw-r--r--arch/arm/mach-ebsa110/core.c12
-rw-r--r--arch/arm/mach-ep93xx/timer-ep93xx.c14
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c11
-rw-r--r--arch/arm/mach-footbridge/isa-irq.c10
-rw-r--r--arch/arm/mach-footbridge/isa-timer.c11
-rw-r--r--arch/arm/mach-imx/Kconfig10
-rw-r--r--arch/arm/mach-imx/anatop.c7
-rw-r--r--arch/arm/mach-imx/gpc.c4
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c13
-rw-r--r--arch/arm/mach-imx/mach-imx6sl.c1
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c1
-rw-r--r--arch/arm/mach-imx/platsmp.c1
-rw-r--r--arch/arm/mach-imx/pm-imx6.c2
-rw-r--r--arch/arm/mach-imx/pm-imx7ulp.c1
-rw-r--r--arch/arm/mach-imx/src.c3
-rw-r--r--arch/arm/mach-integrator/impd1.c11
-rw-r--r--arch/arm/mach-iop32x/time.c12
-rw-r--r--arch/arm/mach-mmp/time.c11
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c47
-rw-r--r--arch/arm/mach-omap1/pm.c13
-rw-r--r--arch/arm/mach-omap1/time.c10
-rw-r--r--arch/arm/mach-omap1/timer32k.c10
-rw-r--r--arch/arm/mach-omap2/.gitignore1
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c9
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c26
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c19
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c40
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c115
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_43xx_data.c114
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c531
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c288
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c251
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c231
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c6
-rw-r--r--arch/arm/mach-omap2/pm33xx-core.c137
-rw-r--r--arch/arm/mach-omap2/pm34xx.c8
-rw-r--r--arch/arm/mach-omap2/timer.c11
-rw-r--r--arch/arm/mach-orion5x/Kconfig2
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c3
-rw-r--r--arch/arm/mach-qcom/Kconfig5
-rw-r--r--arch/arm/mach-rpc/time.c8
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c1
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c2
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c1
-rw-r--r--arch/arm/mach-spear/time.c9
-rw-r--r--arch/arm/mach-sunxi/sunxi.c2
-rw-r--r--arch/arm/mach-tegra/Makefile19
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra114.c89
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c212
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra30.c132
-rw-r--r--arch/arm/mach-tegra/cpuidle.c50
-rw-r--r--arch/arm/mach-tegra/cpuidle.h21
-rw-r--r--arch/arm/mach-tegra/irq.c3
-rw-r--r--arch/arm/mach-tegra/pm.c54
-rw-r--r--arch/arm/mach-tegra/pm.h4
-rw-r--r--arch/arm/mach-tegra/reset-handler.S11
-rw-r--r--arch/arm/mach-tegra/reset.h9
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S170
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S6
-rw-r--r--arch/arm/mach-tegra/sleep.h15
-rw-r--r--arch/arm/mach-tegra/tegra.c7
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mm/dma-mapping.c76
-rw-r--r--arch/arm/mm/fault.c7
-rw-r--r--arch/arm/mm/mmu.c26
-rw-r--r--arch/arm/plat-orion/time.c10
-rw-r--r--arch/arm/vdso/.gitignore1
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/boot/.gitignore1
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts17
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts170
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi379
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts460
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi44
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts21
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts20
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts33
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi37
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts30
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi66
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-a1.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg-s400.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi136
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts25
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi52
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi9
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts87
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi28
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi24
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts2
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts2
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts91
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts72
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts117
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts51
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts187
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts65
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi184
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi37
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts15
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi168
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi74
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi21
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi81
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts270
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-pinfunc.h931
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi654
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts148
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-phanbell.dts104
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi18
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp.dtsi39
-rw-r--r--arch/arm64/boot/dts/freescale/s32v234.dtsi4
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts6
-rw-r--r--arch/arm64/boot/dts/marvell/Makefile3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi5
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6797.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi53
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8516-pinfunc.h663
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts20
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8516.dtsi457
-rw-r--r--arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi221
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132.dtsi4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts26
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi19
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi36
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts81
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi250
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi3
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi45
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts24
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-smaug.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile2
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts64
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi443
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi21
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi33
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/pm6150.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm8998.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-idp.dts64
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi605
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi15
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts283
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts89
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi672
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts114
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts29
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi444
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1-hihope-rzg2m-ex-idk-1110wr.dts52
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77950.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi11
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi16
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts32
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi89
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi31
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi29
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995.dtsi14
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile1
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi32
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-a1.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-evb.dts270
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts39
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts43
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts1096
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi11
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts3
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi10
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi20
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts10
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi55
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi20
-rw-r--r--arch/arm64/configs/defconfig98
-rw-r--r--arch/arm64/crypto/.gitignore2
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c1
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c20
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c23
-rw-r--r--arch/arm64/include/asm/Kbuild18
-rw-r--r--arch/arm64/include/asm/archrandom.h14
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/kernel/.gitignore1
-rw-r--r--arch/arm64/kernel/vdso/.gitignore1
-rw-r--r--arch/arm64/kernel/vdso32/.gitignore1
-rw-r--r--arch/arm64/kvm/fpsimd.c1
-rw-r--r--arch/arm64/kvm/guest.c1
-rw-r--r--arch/arm64/kvm/hyp/switch.c1
-rw-r--r--arch/arm64/kvm/sys_regs.c1
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c1
-rw-r--r--arch/arm64/mm/fault.c26
-rw-r--r--arch/c6x/include/asm/Kbuild37
-rw-r--r--arch/c6x/include/asm/unaligned.h65
-rw-r--r--arch/csky/Kconfig13
-rw-r--r--arch/csky/abiv1/inc/abi/entry.h5
-rw-r--r--arch/csky/abiv2/fpu.c5
-rw-r--r--arch/csky/abiv2/inc/abi/entry.h67
-rw-r--r--arch/csky/abiv2/inc/abi/fpu.h3
-rw-r--r--arch/csky/abiv2/mcount.S48
-rw-r--r--arch/csky/include/asm/Kbuild36
-rw-r--r--arch/csky/include/asm/ftrace.h2
-rw-r--r--arch/csky/include/asm/kprobes.h48
-rw-r--r--arch/csky/include/asm/probes.h24
-rw-r--r--arch/csky/include/asm/processor.h1
-rw-r--r--arch/csky/include/asm/ptrace.h43
-rw-r--r--arch/csky/include/asm/thread_info.h2
-rw-r--r--arch/csky/include/asm/uprobes.h33
-rw-r--r--arch/csky/kernel/Makefile1
-rw-r--r--arch/csky/kernel/asm-offsets.c1
-rw-r--r--arch/csky/kernel/entry.S18
-rw-r--r--arch/csky/kernel/ftrace.c42
-rw-r--r--arch/csky/kernel/head.S5
-rw-r--r--arch/csky/kernel/probes/Makefile7
-rw-r--r--arch/csky/kernel/probes/decode-insn.c49
-rw-r--r--arch/csky/kernel/probes/decode-insn.h20
-rw-r--r--arch/csky/kernel/probes/ftrace.c66
-rw-r--r--arch/csky/kernel/probes/kprobes.c499
-rw-r--r--arch/csky/kernel/probes/kprobes_trampoline.S19
-rw-r--r--arch/csky/kernel/probes/simulate-insn.c398
-rw-r--r--arch/csky/kernel/probes/simulate-insn.h49
-rw-r--r--arch/csky/kernel/probes/uprobes.c150
-rw-r--r--arch/csky/kernel/ptrace.c103
-rw-r--r--arch/csky/kernel/setup.c63
-rw-r--r--arch/csky/kernel/signal.c6
-rw-r--r--arch/csky/kernel/smp.c6
-rw-r--r--arch/csky/kernel/traps.c29
-rw-r--r--arch/csky/mm/cachev2.c45
-rw-r--r--arch/csky/mm/fault.c13
-rw-r--r--arch/h8300/include/asm/Kbuild46
-rw-r--r--arch/hexagon/include/asm/Kbuild33
-rw-r--r--arch/hexagon/mm/vm_fault.c5
-rw-r--r--arch/ia64/configs/bigsur_defconfig3
-rw-r--r--arch/ia64/configs/generic_defconfig3
-rw-r--r--arch/ia64/configs/gensparse_defconfig3
-rw-r--r--arch/ia64/configs/tiger_defconfig3
-rw-r--r--arch/ia64/configs/zx1_defconfig4
-rw-r--r--arch/ia64/include/asm/Kbuild7
-rw-r--r--arch/ia64/kernel/.gitignore1
-rw-r--r--arch/ia64/kernel/syscalls/syscallhdr.sh2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/mm/fault.c5
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig1
-rw-r--r--arch/m68k/configs/atari_defconfig1
-rw-r--r--arch/m68k/configs/bvme6000_defconfig1
-rw-r--r--arch/m68k/configs/hp300_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/configs/mvme147_defconfig1
-rw-r--r--arch/m68k/configs/mvme16x_defconfig1
-rw-r--r--arch/m68k/configs/q40_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig1
-rw-r--r--arch/m68k/configs/sun3x_defconfig1
-rw-r--r--arch/m68k/include/asm/Kbuild24
-rw-r--r--arch/m68k/kernel/.gitignore1
-rw-r--r--arch/m68k/mm/fault.c9
-rw-r--r--arch/microblaze/Kconfig4
-rw-r--r--arch/microblaze/boot/.gitignore1
-rw-r--r--arch/microblaze/include/asm/Kbuild29
-rw-r--r--arch/microblaze/include/asm/irq.h3
-rw-r--r--arch/microblaze/kernel/.gitignore1
-rw-r--r--arch/microblaze/kernel/irq.c21
-rw-r--r--arch/microblaze/kernel/syscalls/syscallhdr.sh2
-rw-r--r--arch/microblaze/mm/consistent.c9
-rw-r--r--arch/microblaze/mm/fault.c5
-rw-r--r--arch/microblaze/pci/pci-common.c7
-rw-r--r--arch/mips/Kconfig3
-rw-r--r--arch/mips/boot/.gitignore1
-rw-r--r--arch/mips/boot/compressed/.gitignore1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7425.dtsi4
-rw-r--r--arch/mips/boot/tools/.gitignore1
-rw-r--r--arch/mips/configs/bigsur_defconfig1
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/ip32_defconfig1
-rw-r--r--arch/mips/configs/jazz_defconfig1
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/include/asm/Kbuild13
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/kernel/.gitignore1
-rw-r--r--arch/mips/kernel/syscalls/syscallhdr.sh3
-rw-r--r--arch/mips/kvm/mips.c75
-rw-r--r--arch/mips/mm/dma-noncoherent.c7
-rw-r--r--arch/mips/mm/fault.c7
-rw-r--r--arch/mips/tools/.gitignore1
-rw-r--r--arch/mips/vdso/.gitignore1
-rw-r--r--arch/nds32/include/asm/Kbuild37
-rw-r--r--arch/nds32/kernel/.gitignore1
-rw-r--r--arch/nds32/kernel/vdso/.gitignore1
-rw-r--r--arch/nds32/kernel/vmlinux.lds.S1
-rw-r--r--arch/nds32/mm/fault.c5
-rw-r--r--arch/nios2/Kconfig3
-rw-r--r--arch/nios2/boot/.gitignore1
-rw-r--r--arch/nios2/include/asm/Kbuild38
-rw-r--r--arch/nios2/kernel/.gitignore1
-rw-r--r--arch/nios2/mm/dma-mapping.c12
-rw-r--r--arch/nios2/mm/fault.c7
-rw-r--r--arch/openrisc/Kconfig3
-rw-r--r--arch/openrisc/configs/or1ksim_defconfig1
-rw-r--r--arch/openrisc/configs/simple_smp_defconfig1
-rw-r--r--arch/openrisc/include/asm/Kbuild36
-rw-r--r--arch/openrisc/include/uapi/asm/unistd.h1
-rw-r--r--arch/openrisc/kernel/.gitignore1
-rw-r--r--arch/openrisc/kernel/dma.c55
-rw-r--r--arch/openrisc/kernel/process.c18
-rw-r--r--arch/openrisc/kernel/smp.c3
-rw-r--r--arch/openrisc/kernel/traps.c7
-rw-r--r--arch/openrisc/mm/fault.c5
-rw-r--r--arch/parisc/boot/.gitignore1
-rw-r--r--arch/parisc/boot/compressed/.gitignore1
-rw-r--r--arch/parisc/include/asm/Kbuild18
-rw-r--r--arch/parisc/include/asm/spinlock.h160
-rw-r--r--arch/parisc/include/asm/spinlock_types.h14
-rw-r--r--arch/parisc/kernel/.gitignore1
-rw-r--r--arch/parisc/kernel/alternative.c37
-rw-r--r--arch/parisc/kernel/irq.c22
-rw-r--r--arch/parisc/kernel/syscall.S2
-rw-r--r--arch/parisc/kernel/syscalls/syscallhdr.sh2
-rw-r--r--arch/parisc/kernel/syscalls/syscalltbl.sh4
-rw-r--r--arch/parisc/mm/fault.c8
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Makefile12
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/crt0.S3
-rw-r--r--arch/powerpc/configs/85xx-hw.config1
-rw-r--r--arch/powerpc/configs/amigaone_defconfig1
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/include/asm/Kbuild4
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h15
-rw-r--r--arch/powerpc/include/asm/book3s/32/hash.h8
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h24
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h5
-rw-r--r--arch/powerpc/include/asm/cache.h2
-rw-r--r--arch/powerpc/include/asm/cacheflush.h6
-rw-r--r--arch/powerpc/include/asm/cputime.h33
-rw-r--r--arch/powerpc/include/asm/dma.h3
-rw-r--r--arch/powerpc/include/asm/drmem.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h26
-rw-r--r--arch/powerpc/include/asm/exception-64s.h4
-rw-r--r--arch/powerpc/include/asm/hw_irq.h6
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_uvmem.h6
-rw-r--r--arch/powerpc/include/asm/kvm_host.h4
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h21
-rw-r--r--arch/powerpc/include/asm/mce.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/opal-api.h1
-rw-r--r--arch/powerpc/include/asm/perf_event.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h19
-rw-r--r--arch/powerpc/include/asm/ptrace.h5
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/setjmp.h6
-rw-r--r--arch/powerpc/include/asm/signal.h3
-rw-r--r--arch/powerpc/include/asm/switch_to.h11
-rw-r--r--arch/powerpc/include/asm/time.h4
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/include/asm/vdso.h24
-rw-r--r--arch/powerpc/kernel/.gitignore1
-rw-r--r--arch/powerpc/kernel/Makefile11
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cputable.c1
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c1
-rw-r--r--arch/powerpc/kernel/eeh.c145
-rw-r--r--arch/powerpc/kernel/entry_32.S38
-rw-r--r--arch/powerpc/kernel/entry_64.S895
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S287
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2043
-rw-r--r--arch/powerpc/kernel/fadump.c134
-rw-r--r--arch/powerpc/kernel/head_32.S9
-rw-r--r--arch/powerpc/kernel/head_32.h28
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/head_booke.h5
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c16
-rw-r--r--arch/powerpc/kernel/irq.c192
-rw-r--r--arch/powerpc/kernel/kprobes.c84
-rw-r--r--arch/powerpc/kernel/mce.c14
-rw-r--r--arch/powerpc/kernel/mce_power.c8
-rw-r--r--arch/powerpc/kernel/misc.S4
-rw-r--r--arch/powerpc/kernel/of_platform.c12
-rw-r--r--arch/powerpc/kernel/paca.c14
-rw-r--r--arch/powerpc/kernel/pci-common.c8
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c2
-rw-r--r--arch/powerpc/kernel/process.c124
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/ptrace.c3468
-rw-r--r--arch/powerpc/kernel/ptrace/Makefile20
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-adv.c492
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-altivec.c128
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-decl.h184
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-noadv.c265
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-novsx.c57
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-spe.c68
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-tm.c851
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c904
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-vsx.c151
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c481
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace32.c (renamed from arch/powerpc/kernel/ptrace32.c)11
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/setup.h6
-rw-r--r--arch/powerpc/kernel/setup_32.c1
-rw-r--r--arch/powerpc/kernel/setup_64.c32
-rw-r--r--arch/powerpc/kernel/signal.h2
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c31
-rw-r--r--arch/powerpc/kernel/stacktrace.c6
-rw-r--r--arch/powerpc/kernel/syscall_64.c379
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl22
-rw-r--r--arch/powerpc/kernel/syscalls/syscallhdr.sh3
-rw-r--r--arch/powerpc/kernel/sysfs.c381
-rw-r--r--arch/powerpc/kernel/systbl.S9
-rw-r--r--arch/powerpc/kernel/time.c9
-rw-r--r--arch/powerpc/kernel/traps.c25
-rw-r--r--arch/powerpc/kernel/vdso.c5
-rw-r--r--arch/powerpc/kernel/vdso32/.gitignore1
-rw-r--r--arch/powerpc/kernel/vdso64/.gitignore1
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/kexec/Makefile3
-rw-r--r--arch/powerpc/kvm/book3s.c25
-rw-r--r--arch/powerpc/kvm/book3s.h1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c124
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c99
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S11
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm.c28
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm_builtin.c16
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c22
-rw-r--r--arch/powerpc/kvm/book3s_pr.c25
-rw-r--r--arch/powerpc/kvm/book3s_segment.S7
-rw-r--r--arch/powerpc/kvm/booke.c26
-rw-r--r--arch/powerpc/kvm/booke.h2
-rw-r--r--arch/powerpc/kvm/e500.c1
-rw-r--r--arch/powerpc/kvm/e500_mmu.c4
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/e500mc.c1
-rw-r--r--arch/powerpc/kvm/mpic.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c35
-rw-r--r--arch/powerpc/kvm/timing.c17
-rw-r--r--arch/powerpc/kvm/timing.h1
-rw-r--r--arch/powerpc/lib/sstep.c5
-rw-r--r--arch/powerpc/lib/test_emulate_step.c7
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S27
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c2
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c11
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c7
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c39
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c12
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c7
-rw-r--r--arch/powerpc/mm/fault.c22
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c10
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/mm/nohash/40x.c4
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S12
-rw-r--r--arch/powerpc/mm/numa.c97
-rw-r--r--arch/powerpc/mm/pgtable_32.c41
-rw-r--r--arch/powerpc/mm/ptdump/bats.c8
-rw-r--r--arch/powerpc/mm/ptdump/hashpagetable.c7
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c8
-rw-r--r--arch/powerpc/mm/ptdump/segment_regs.c8
-rw-r--r--arch/powerpc/platforms/44x/warp.c3
-rw-r--r--arch/powerpc/platforms/52xx/efika.c1
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c9
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c11
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c9
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c9
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype8
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c1
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/.gitignore1
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c15
-rw-r--r--arch/powerpc/platforms/maple/setup.c34
-rw-r--r--arch/powerpc/platforms/powermac/pic.c29
-rw-r--r--arch/powerpc/platforms/powermac/setup.c1
-rw-r--r--arch/powerpc/platforms/powermac/smp.c20
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c37
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c21
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c55
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c24
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c5
-rw-r--r--arch/powerpc/platforms/powernv/setup.c4
-rw-r--r--arch/powerpc/platforms/powernv/vas-debug.c37
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c87
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c10
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c10
-rw-r--r--arch/powerpc/platforms/pseries/of_helpers.c2
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c5
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/powerpc/platforms/pseries/vio.c2
-rw-r--r--arch/powerpc/platforms/pseries/vphn.c3
-rw-r--r--arch/powerpc/purgatory/.gitignore1
-rw-r--r--arch/powerpc/sysdev/xive/common.c126
-rw-r--r--arch/powerpc/sysdev/xive/native.c7
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c23
-rw-r--r--arch/powerpc/sysdev/xive/xive-internal.h9
-rw-r--r--arch/powerpc/xmon/Makefile3
-rw-r--r--arch/powerpc/xmon/xmon.c14
-rw-r--r--arch/riscv/boot/.gitignore1
-rw-r--r--arch/riscv/include/asm/Kbuild28
-rw-r--r--arch/riscv/kernel/.gitignore1
-rw-r--r--arch/riscv/kernel/vdso/.gitignore1
-rw-r--r--arch/riscv/mm/fault.c9
-rw-r--r--arch/s390/Kconfig74
-rw-r--r--arch/s390/appldata/appldata_os.c2
-rw-r--r--arch/s390/boot/.gitignore1
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/boot/compressed/.gitignore1
-rw-r--r--arch/s390/boot/install.sh17
-rw-r--r--arch/s390/boot/uv.c20
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c3
-rw-r--r--arch/s390/include/asm/Kbuild15
-rw-r--r--arch/s390/include/asm/gmap.h6
-rw-r--r--arch/s390/include/asm/hw_irq.h1
-rw-r--r--arch/s390/include/asm/ipl.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h117
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h45
-rw-r--r--arch/s390/include/asm/numa.h13
-rw-r--r--arch/s390/include/asm/page.h23
-rw-r--r--arch/s390/include/asm/pci.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h39
-rw-r--r--arch/s390/include/asm/pgtable.h35
-rw-r--r--arch/s390/include/asm/processor.h10
-rw-r--r--arch/s390/include/asm/setup.h7
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/topology.h15
-rw-r--r--arch/s390/include/asm/uv.h251
-rw-r--r--arch/s390/kernel/.gitignore1
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/diag.c2
-rw-r--r--arch/s390/kernel/entry.S65
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/ipl.c73
-rw-r--r--arch/s390/kernel/irq.c26
-rw-r--r--arch/s390/kernel/machine_kexec.c31
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c123
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c44
-rw-r--r--arch/s390/kernel/pgm_check.S4
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/processor.c34
-rw-r--r--arch/s390/kernel/setup.c13
-rw-r--r--arch/s390/kernel/signal.c4
-rw-r--r--arch/s390/kernel/smp.c13
-rw-r--r--arch/s390/kernel/suspend.c240
-rw-r--r--arch/s390/kernel/swsusp.S276
-rw-r--r--arch/s390/kernel/topology.c34
-rw-r--r--arch/s390/kernel/traps.c2
-rw-r--r--arch/s390/kernel/uv.c414
-rw-r--r--arch/s390/kernel/vdso64/.gitignore1
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/diag.c6
-rw-r--r--arch/s390/kvm/gaccess.c23
-rw-r--r--arch/s390/kvm/intercept.c123
-rw-r--r--arch/s390/kvm/interrupt.c401
-rw-r--r--arch/s390/kvm/kvm-s390.c597
-rw-r--r--arch/s390/kvm/kvm-s390.h51
-rw-r--r--arch/s390/kvm/priv.c13
-rw-r--r--arch/s390/kvm/pv.c303
-rw-r--r--arch/s390/mm/cmm.c46
-rw-r--r--arch/s390/mm/fault.c109
-rw-r--r--arch/s390/mm/gmap.c72
-rw-r--r--arch/s390/mm/hugetlbpage.c11
-rw-r--r--arch/s390/mm/mmap.c40
-rw-r--r--arch/s390/mm/pageattr.c16
-rw-r--r--arch/s390/mm/pgalloc.c108
-rw-r--r--arch/s390/mm/vmem.c4
-rw-r--r--arch/s390/numa/Makefile2
-rw-r--r--arch/s390/numa/mode_emu.c577
-rw-r--r--arch/s390/numa/numa.c147
-rw-r--r--arch/s390/numa/numa_mode.h25
-rw-r--r--arch/s390/numa/toptree.c351
-rw-r--r--arch/s390/numa/toptree.h61
-rw-r--r--arch/s390/pci/pci.c83
-rw-r--r--arch/s390/pci/pci_clp.c2
-rw-r--r--arch/s390/purgatory/.gitignore1
-rw-r--r--arch/s390/tools/.gitignore1
-rw-r--r--arch/sh/boot/.gitignore1
-rw-r--r--arch/sh/boot/compressed/.gitignore1
-rw-r--r--arch/sh/configs/sh03_defconfig1
-rw-r--r--arch/sh/include/asm/Kbuild16
-rw-r--r--arch/sh/include/mach-common/mach/highlander.h4
-rw-r--r--arch/sh/kernel/.gitignore1
-rw-r--r--arch/sh/kernel/syscalls/syscallhdr.sh2
-rw-r--r--arch/sh/kernel/vsyscall/.gitignore1
-rw-r--r--arch/sh/mm/fault.c15
-rw-r--r--arch/sparc/boot/.gitignore1
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/sparc/include/asm/Kbuild14
-rw-r--r--arch/sparc/include/asm/dma-mapping.h15
-rw-r--r--arch/sparc/kernel/.gitignore1
-rw-r--r--arch/sparc/kernel/ioport.c3
-rw-r--r--arch/sparc/kernel/of_device_common.c1
-rw-r--r--arch/sparc/kernel/syscalls/syscallhdr.sh2
-rw-r--r--arch/sparc/mm/fault_32.c5
-rw-r--r--arch/sparc/mm/fault_64.c5
-rw-r--r--arch/sparc/mm/io-unit.c9
-rw-r--r--arch/sparc/mm/iommu.c15
-rw-r--r--arch/sparc/mm/mm_32.h3
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/sparc/vdso/.gitignore1
-rw-r--r--arch/sparc/vdso/vdso32/.gitignore1
-rw-r--r--arch/sparc/vdso/vdso32/vclock_gettime.c4
-rw-r--r--arch/um/.gitignore1
-rw-r--r--arch/um/Kconfig9
-rw-r--r--arch/um/configs/i386_defconfig2
-rw-r--r--arch/um/configs/x86_64_defconfig2
-rw-r--r--arch/um/drivers/Kconfig3
-rw-r--r--arch/um/drivers/mconsole_kern.c28
-rw-r--r--arch/um/drivers/net_kern.c13
-rw-r--r--arch/um/drivers/random.c4
-rw-r--r--arch/um/drivers/ubd_kern.c12
-rw-r--r--arch/um/drivers/vector_kern.c5
-rw-r--r--arch/um/drivers/vector_user.c15
-rw-r--r--arch/um/drivers/vhost_user.h12
-rw-r--r--arch/um/drivers/virtio_uml.c153
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/delay.h30
-rw-r--r--arch/um/include/linux/time-internal.h84
-rw-r--r--arch/um/include/shared/os.h1
-rw-r--r--arch/um/include/shared/timer-internal.h76
-rw-r--r--arch/um/kernel/kmsg_dump.c9
-rw-r--r--arch/um/kernel/process.c39
-rw-r--r--arch/um/kernel/skas/syscall.c5
-rw-r--r--arch/um/kernel/time.c538
-rw-r--r--arch/um/kernel/trap.c3
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/um/os-Linux/file.c31
-rw-r--r--arch/um/os-Linux/time.c1
-rw-r--r--arch/um/os-Linux/umid.c5
-rw-r--r--arch/unicore32/.gitignore1
-rw-r--r--arch/unicore32/include/asm/Kbuild34
-rw-r--r--arch/unicore32/mm/fault.c8
-rw-r--r--arch/x86/.gitignore1
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/boot/.gitignore1
-rw-r--r--arch/x86/boot/apm.c2
-rw-r--r--arch/x86/boot/compressed/.gitignore1
-rw-r--r--arch/x86/boot/tools/.gitignore1
-rw-r--r--arch/x86/configs/i386_defconfig2
-rw-r--r--arch/x86/configs/x86_64_defconfig2
-rw-r--r--arch/x86/crypto/.gitignore1
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c3546
-rw-r--r--arch/x86/entry/vdso/.gitignore1
-rw-r--r--arch/x86/entry/vdso/vdso32/.gitignore1
-rw-r--r--arch/x86/entry/vdso/vdso32/vclock_gettime.c4
-rw-r--r--arch/x86/include/asm/Kbuild2
-rw-r--r--arch/x86/include/asm/bitops.h4
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h41
-rw-r--r--arch/x86/include/asm/kvm_host.h105
-rw-r--r--arch/x86/include/asm/kvm_page_track.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h15
-rw-r--r--arch/x86/include/asm/mshyperv.h8
-rw-r--r--arch/x86/include/asm/pgtable.h67
-rw-r--r--arch/x86/include/asm/pgtable_64.h8
-rw-r--r--arch/x86/include/asm/pgtable_types.h12
-rw-r--r--arch/x86/include/asm/uaccess.h64
-rw-r--r--arch/x86/include/asm/vmx.h12
-rw-r--r--arch/x86/kernel/.gitignore1
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/cpu/.gitignore1
-rw-r--r--arch/x86/kvm/cpuid.c944
-rw-r--r--arch/x86/kvm/cpuid.h151
-rw-r--r--arch/x86/kvm/emulate.c57
-rw-r--r--arch/x86/kvm/hyperv.c8
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h10
-rw-r--r--arch/x86/kvm/kvm_emulate.h (renamed from arch/x86/include/asm/kvm_emulate.h)43
-rw-r--r--arch/x86/kvm/lapic.c85
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.h10
-rw-r--r--arch/x86/kvm/mmu/mmu.c209
-rw-r--r--arch/x86/kvm/mmu/page_track.c16
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h4
-rw-r--r--arch/x86/kvm/pmu.c34
-rw-r--r--arch/x86/kvm/pmu.h11
-rw-r--r--arch/x86/kvm/svm.c407
-rw-r--r--arch/x86/kvm/trace.h50
-rw-r--r--arch/x86/kvm/vmx/capabilities.h25
-rw-r--r--arch/x86/kvm/vmx/evmcs.h7
-rw-r--r--arch/x86/kvm/vmx/nested.c183
-rw-r--r--arch/x86/kvm/vmx/nested.h8
-rw-r--r--arch/x86/kvm/vmx/ops.h27
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c8
-rw-r--r--arch/x86/kvm/vmx/vmenter.S72
-rw-r--r--arch/x86/kvm/vmx/vmx.c665
-rw-r--r--arch/x86/kvm/vmx/vmx.h8
-rw-r--r--arch/x86/kvm/x86.c787
-rw-r--r--arch/x86/kvm/x86.h28
-rw-r--r--arch/x86/lib/.gitignore1
-rw-r--r--arch/x86/mm/fault.c32
-rw-r--r--arch/x86/realmode/rm/.gitignore1
-rw-r--r--arch/x86/tools/.gitignore1
-rw-r--r--arch/x86/um/asm/processor.h12
-rw-r--r--arch/x86/um/vdso/.gitignore1
-rw-r--r--arch/x86/xen/smp_pv.c3
-rw-r--r--arch/x86/xen/xen-head.S18
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/boot/.gitignore1
-rw-r--r--arch/xtensa/boot/boot-elf/.gitignore1
-rw-r--r--arch/xtensa/boot/lib/.gitignore1
-rw-r--r--arch/xtensa/include/asm/Kbuild26
-rw-r--r--arch/xtensa/kernel/.gitignore1
-rw-r--r--arch/xtensa/kernel/pci-dma.c12
-rw-r--r--arch/xtensa/kernel/syscalls/syscallhdr.sh2
-rw-r--r--arch/xtensa/mm/fault.c5
-rw-r--r--block/blk-mq-virtio.c2
-rw-r--r--block/scsi_ioctl.c4
-rw-r--r--certs/.gitignore4
-rw-r--r--crypto/af_alg.c10
-rw-r--r--crypto/algif_hash.c6
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/ccm.c29
-rw-r--r--crypto/cryptd.c37
-rw-r--r--crypto/ctr.c29
-rw-r--r--crypto/cts.c27
-rw-r--r--crypto/gcm.c66
-rw-r--r--crypto/geniv.c17
-rw-r--r--crypto/lrw.c28
-rw-r--r--crypto/md5.c3
-rw-r--r--crypto/pcrypt.c33
-rw-r--r--crypto/proc.c2
-rw-r--r--crypto/rng.c8
-rw-r--r--crypto/rsa-pkcs1pad.c59
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/testmgr.c28
-rw-r--r--crypto/xts.c28
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/dbinput.c16
-rw-r--r--drivers/acpi/acpica/dbxface.c1
-rw-r--r--drivers/acpi/acpica/dswexec.c33
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c35
-rw-r--r--drivers/acpi/acpica/nsnames.c6
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c9
-rw-r--r--drivers/acpi/acpica/utprint.c7
-rw-r--r--drivers/acpi/cppc_acpi.c33
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c8
-rw-r--r--drivers/acpi/pci_root.c15
-rw-r--r--drivers/acpi/processor_throttling.c7
-rw-r--r--drivers/acpi/sleep.c4
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/video_detect.c9
-rw-r--r--drivers/acpi/wakeup.c81
-rw-r--r--drivers/android/binderfs.c200
-rw-r--r--drivers/atm/.gitignore2
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/panel.c2
-rw-r--r--drivers/base/memory.c130
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/power/main.c7
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c33
-rw-r--r--drivers/bus/hisi_lpc.c27
-rw-r--r--drivers/bus/mhi/Kconfig14
-rw-r--r--drivers/bus/mhi/Makefile2
-rw-r--r--drivers/bus/mhi/core/Makefile3
-rw-r--r--drivers/bus/mhi/core/boot.c507
-rw-r--r--drivers/bus/mhi/core/init.c1293
-rw-r--r--drivers/bus/mhi/core/internal.h687
-rw-r--r--drivers/bus/mhi/core/main.c1529
-rw-r--r--drivers/bus/mhi/core/pm.c969
-rw-r--r--drivers/bus/ti-sysc.c604
-rw-r--r--drivers/char/Kconfig167
-rw-r--r--drivers/char/Makefile5
-rw-r--r--drivers/char/applicom.c1
-rw-r--r--drivers/char/efirtc.c366
-rw-r--r--drivers/char/hw_random/Kconfig17
-rw-r--r--drivers/char/hw_random/imx-rngc.c85
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c1
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/nwbutton.h1
-rw-r--r--drivers/char/nwflash.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c4
-rw-r--r--drivers/char/ppdev.c20
-rw-r--r--drivers/char/random.c84
-rw-r--r--drivers/char/rtc.c1311
-rw-r--r--drivers/char/toshiba.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c8
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/at91/Makefile4
-rw-r--r--drivers/clk/at91/at91rm9200.c199
-rw-r--r--drivers/clk/at91/at91sam9g45.c220
-rw-r--r--drivers/clk/at91/at91sam9n12.c238
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c91
-rw-r--r--drivers/clk/at91/clk-usb.c9
-rw-r--r--drivers/clk/at91/sam9x60.c14
-rw-r--r--drivers/clk/at91/sama5d3.c240
-rw-r--r--drivers/clk/clk-si5341.c212
-rw-r--r--drivers/clk/clk.c127
-rw-r--r--drivers/clk/imx/clk-composite-8m.c20
-rw-r--r--drivers/clk/imx/clk-fixup-div.c2
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-gate2.c8
-rw-r--r--drivers/clk/imx/clk-imx6sl.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c2
-rw-r--r--drivers/clk/imx/clk-imx8mm.c63
-rw-r--r--drivers/clk/imx/clk-imx8mn.c41
-rw-r--r--drivers/clk/imx/clk-imx8mp.c24
-rw-r--r--drivers/clk/imx/clk-imx8mq.c53
-rw-r--r--drivers/clk/imx/clk-pfdv2.c61
-rw-r--r--drivers/clk/imx/clk-pll14xx.c4
-rw-r--r--drivers/clk/imx/clk-pllv4.c12
-rw-r--r--drivers/clk/imx/clk-sscg-pll.c14
-rw-r--r--drivers/clk/imx/clk.h13
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c4
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c55
-rw-r--r--drivers/clk/ingenic/tcu.c10
-rw-r--r--drivers/clk/keystone/Kconfig8
-rw-r--r--drivers/clk/keystone/Makefile1
-rw-r--r--drivers/clk/keystone/syscon-clk.c172
-rw-r--r--drivers/clk/meson/g12a.c129
-rw-r--r--drivers/clk/meson/g12a.h6
-rw-r--r--drivers/clk/meson/gxbb.c21
-rw-r--r--drivers/clk/meson/gxbb.h2
-rw-r--r--drivers/clk/meson/meson8b.c21
-rw-r--r--drivers/clk/mmp/Makefile2
-rw-r--r--drivers/clk/mmp/clk-mix.c2
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c146
-rw-r--r--drivers/clk/mmp/clk-pll.c139
-rw-r--r--drivers/clk/mmp/clk.c31
-rw-r--r--drivers/clk/mmp/clk.h31
-rw-r--r--drivers/clk/qcom/Kconfig16
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c277
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h12
-rw-r--r--drivers/clk/qcom/clk-rpm.c35
-rw-r--r--drivers/clk/qcom/clk-rpmh.c79
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c50
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c2
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c72
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c52
-rw-r--r--drivers/clk/qcom/gcc-sm8250.c3690
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c37
-rw-r--r--drivers/clk/qcom/mss-sc7180.c143
-rw-r--r--drivers/clk/renesas/Kconfig3
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c10
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c40
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c4
-rw-r--r--drivers/clk/samsung/clk.c4
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c40
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c42
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c17
-rw-r--r--drivers/clk/socfpga/clk-s10.c29
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h25
-rw-r--r--drivers/clk/sprd/Kconfig8
-rw-r--r--drivers/clk/sprd/Makefile1
-rw-r--r--drivers/clk/sprd/common.c10
-rw-r--r--drivers/clk/sprd/composite.h39
-rw-r--r--drivers/clk/sprd/div.h20
-rw-r--r--drivers/clk/sprd/gate.c17
-rw-r--r--drivers/clk/sprd/gate.h120
-rw-r--r--drivers/clk/sprd/mux.h28
-rw-r--r--drivers/clk/sprd/pll.c7
-rw-r--r--drivers/clk/sprd/pll.h55
-rw-r--r--drivers/clk/sprd/sc9863a-clk.c1772
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c115
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-id.h12
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c37
-rw-r--r--drivers/clk/tegra/clk-tegra-pmc.c122
-rw-r--r--drivers/clk/tegra/clk-tegra114.c43
-rw-r--r--drivers/clk/tegra/clk-tegra124.c48
-rw-r--r--drivers/clk/tegra/clk-tegra20.c9
-rw-r--r--drivers/clk/tegra/clk-tegra210.c34
-rw-r--r--drivers/clk/tegra/clk-tegra30.c33
-rw-r--r--drivers/clk/tegra/clk.h1
-rw-r--r--drivers/clk/ti/clk-814x.c7
-rw-r--r--drivers/clk/versatile/clk-icst.c25
-rw-r--r--drivers/clk/versatile/clk-icst.h22
-rw-r--r--drivers/clk/versatile/clk-impd1.c79
-rw-r--r--drivers/clocksource/timer-vf-pit.c2
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpufreq/Kconfig.x862
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c30
-rw-r--r--drivers/cpuidle/Kconfig.arm8
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c392
-rw-r--r--drivers/crypto/Kconfig50
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c4
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h2
-rw-r--r--drivers/crypto/atmel-i2c.c3
-rw-r--r--drivers/crypto/bcm/util.c40
-rw-r--r--drivers/crypto/caam/Kconfig2
-rw-r--r--drivers/crypto/caam/caamalg.c415
-rw-r--r--drivers/crypto/caam/caamalg_desc.c30
-rw-r--r--drivers/crypto/caam/caamalg_qi.c4
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h6
-rw-r--r--drivers/crypto/caam/caamhash.c340
-rw-r--r--drivers/crypto/caam/caampkc.c185
-rw-r--r--drivers/crypto/caam/caampkc.h10
-rw-r--r--drivers/crypto/caam/caamrng.c405
-rw-r--r--drivers/crypto/caam/ctrl.c88
-rw-r--r--drivers/crypto/caam/desc.h2
-rw-r--r--drivers/crypto/caam/intern.h9
-rw-r--r--drivers/crypto/caam/jr.c49
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/caam/qi.c60
-rw-r--r--drivers/crypto/caam/qi.h4
-rw-r--r--drivers/crypto/caam/regs.h7
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c3
-rw-r--r--drivers/crypto/ccp/sev-dev.c39
-rw-r--r--drivers/crypto/ccp/sp-dev.h1
-rw-r--r--drivers/crypto/ccp/sp-pci.c9
-rw-r--r--drivers/crypto/ccree/cc_aead.c176
-rw-r--r--drivers/crypto/ccree/cc_aead.h3
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c229
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h5
-rw-r--r--drivers/crypto/ccree/cc_cipher.c78
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c29
-rw-r--r--drivers/crypto/ccree/cc_driver.c127
-rw-r--r--drivers/crypto/ccree/cc_driver.h18
-rw-r--r--drivers/crypto/ccree/cc_hash.c228
-rw-r--r--drivers/crypto/ccree/cc_hash.h31
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h332
-rw-r--r--drivers/crypto/ccree/cc_pm.c60
-rw-r--r--drivers/crypto/ccree/cc_pm.h21
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c48
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h19
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c78
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h45
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c358
-rw-r--r--drivers/crypto/chelsio/chcr_core.c3
-rw-r--r--drivers/crypto/chelsio/chcr_core.h6
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h16
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c10
-rw-r--r--drivers/crypto/hisilicon/Kconfig2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h3
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c20
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c160
-rw-r--r--drivers/crypto/hisilicon/qm.c619
-rw-r--r--drivers/crypto/hisilicon/qm.h72
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h12
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c260
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c294
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c54
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c324
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/marvell/Kconfig37
-rw-r--r--drivers/crypto/marvell/Makefile7
-rw-r--r--drivers/crypto/marvell/cesa/Makefile3
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c (renamed from drivers/crypto/marvell/cesa.c)0
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h (renamed from drivers/crypto/marvell/cesa.h)5
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c (renamed from drivers/crypto/marvell/cipher.c)15
-rw-r--r--drivers/crypto/marvell/cesa/hash.c (renamed from drivers/crypto/marvell/hash.c)38
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c (renamed from drivers/crypto/marvell/tdma.c)10
-rw-r--r--drivers/crypto/marvell/octeontx/Makefile6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_common.h51
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h824
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf.h34
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_main.c307
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c253
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c1686
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h180
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf.h104
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c1744
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.h188
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c985
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c247
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c612
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h227
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c2
-rw-r--r--drivers/crypto/mxs-dcp.c58
-rw-r--r--drivers/crypto/nx/nx.h2
-rw-r--r--drivers/crypto/omap-sham.c4
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c3
-rw-r--r--drivers/crypto/qce/common.c2
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/dma.c11
-rw-r--r--drivers/crypto/qce/dma.h2
-rw-r--r--drivers/crypto/qce/skcipher.c30
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/vmx/.gitignore1
-rw-r--r--drivers/crypto/xilinx/Makefile2
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c457
-rw-r--r--drivers/dma-buf/Kconfig12
-rw-r--r--drivers/dma-buf/dma-buf.c110
-rw-r--r--drivers/dma/Kconfig15
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_hdmac.c121
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/at_xdmac.c44
-rw-r--r--drivers/dma/bcm-sba-raid.c2
-rw-r--r--drivers/dma/dmaengine.c102
-rw-r--r--drivers/dma/dmaengine.h16
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c15
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c21
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.h2
-rw-r--r--drivers/dma/idxd/cdev.c4
-rw-r--r--drivers/dma/idxd/device.c4
-rw-r--r--drivers/dma/idxd/sysfs.c19
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sprd-dma.c26
-rw-r--r--drivers/dma/stm32-dma.c96
-rw-r--r--drivers/dma/stm32-dmamux.c93
-rw-r--r--drivers/dma/stm32-mdma.c78
-rw-r--r--drivers/dma/sun4i-dma.c4
-rw-r--r--drivers/dma/tegra20-apb-dma.c546
-rw-r--r--drivers/dma/tegra210-adma.c2
-rw-r--r--drivers/dma/ti/dma-crossbar.c8
-rw-r--r--drivers/dma/ti/edma.c79
-rw-r--r--drivers/dma/ti/k3-udma-glue.c18
-rw-r--r--drivers/dma/ti/k3-udma.c113
-rw-r--r--drivers/dma/ti/omap-dma.c2
-rw-r--r--drivers/dma/uniphier-mdmac.c2
-rw-r--r--drivers/dma/uniphier-xdmac.c609
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c65
-rw-r--r--drivers/eisa/.gitignore1
-rw-r--r--drivers/extcon/extcon-axp288.c32
-rw-r--r--drivers/extcon/extcon-palmas.c8
-rw-r--r--drivers/extcon/extcon.c1
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/common.h115
-rw-r--r--drivers/firmware/arm_scmi/driver.c293
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c184
-rw-r--r--drivers/firmware/arm_scmi/perf.c2
-rw-r--r--drivers/firmware/arm_scmi/shmem.c83
-rw-r--r--drivers/firmware/arm_scpi.c4
-rw-r--r--drivers/firmware/imx/Kconfig4
-rw-r--r--drivers/firmware/imx/scu-pd.c13
-rw-r--r--drivers/firmware/meson/meson_sm.c2
-rw-r--r--drivers/firmware/stratix10-svc.c1
-rw-r--r--drivers/firmware/tegra/Kconfig2
-rw-r--r--drivers/firmware/xilinx/Kconfig2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c25
-rw-r--r--drivers/gpio/Kconfig11
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-brcmstb.c44
-rw-r--r--drivers/gpio/gpio-davinci.c7
-rw-r--r--drivers/gpio/gpio-eic-sprd.c9
-rw-r--r--drivers/gpio/gpio-mlxbf2.c335
-rw-r--r--drivers/gpio/gpio-mmio.c23
-rw-r--r--drivers/gpio/gpio-mockup.c2
-rw-r--r--drivers/gpio/gpio-mt7621.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-mxc.c7
-rw-r--r--drivers/gpio/gpio-omap.c29
-rw-r--r--drivers/gpio/gpio-pl061.c13
-rw-r--r--drivers/gpio/gpio-pxa.c11
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-siox.c28
-rw-r--r--drivers/gpio/gpio-tegra186.c64
-rw-r--r--drivers/gpio/gpio-uniphier.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c9
-rw-r--r--drivers/gpio/gpio-zx.c10
-rw-r--r--drivers/gpio/gpiolib-devres.c46
-rw-r--r--drivers/gpio/gpiolib-of.c139
-rw-r--r--drivers/gpio/gpiolib-of.h2
-rw-r--r--drivers/gpio/gpiolib.c1300
-rw-r--r--drivers/gpio/gpiolib.h4
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c250
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c476
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h338
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c257
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c152
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c150
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c310
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c91
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c242
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c298
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c2204
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c168
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c204
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c208
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h72
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h75
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h51
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c69
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c67
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h28
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h63
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c69
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h60
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c12
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c36
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h17
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c183
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h32
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h10
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c20
-rw-r--r--drivers/gpu/drm/amd/display/modules/vmid/vmid.c16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c199
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c59
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c155
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c88
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c70
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h6
-rw-r--r--drivers/gpu/drm/ast/ast_main.c24
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c27
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c24
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c34
-rw-r--r--drivers/gpu/drm/bridge/Kconfig51
-rw-r--r--drivers/gpu/drm/bridge/Makefile6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig13
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h40
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c28
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c20
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c54
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c295
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c300
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c21
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/panel.c23
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c349
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c3
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c342
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c329
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c9
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c1046
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c267
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c235
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c211
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c51
-rw-r--r--drivers/gpu/drm/drm_atomic.c117
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c83
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c102
-rw-r--r--drivers/gpu/drm/drm_auth.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c751
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c379
-rw-r--r--drivers/gpu/drm/drm_bufs.c40
-rw-r--r--drivers/gpu/drm/drm_client.c2
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c12
-rw-r--r--drivers/gpu/drm/drm_connector.c96
-rw-r--r--drivers/gpu/drm/drm_context.c28
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c2
-rw-r--r--drivers/gpu/drm/drm_dma.c21
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c141
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c200
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_edid.c211
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c22
-rw-r--r--drivers/gpu/drm/drm_file.c231
-rw-r--r--drivers/gpu/drm/drm_format_helper.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c122
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c61
-rw-r--r--drivers/gpu/drm/drm_hdcp.c158
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioctl.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/drm_lock.c11
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c39
-rw-r--r--drivers/gpu/drm/drm_mm.c10
-rw-r--r--drivers/gpu/drm/drm_pci.c82
-rw-r--r--drivers/gpu/drm/drm_scatter.c3
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c46
-rw-r--r--drivers/gpu/drm/drm_syncobj.c87
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/drm_vblank.c177
-rw-r--r--drivers/gpu/drm/drm_vm.c26
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c60
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c52
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c42
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c59
-rw-r--r--drivers/gpu/drm/etnaviv/state_blt.xml.h2
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h36
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c79
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c11
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Kconfig7
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile25
-rw-r--r--drivers/gpu/drm/i915/Makefile19
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c406
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c255
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c444
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1106
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h73
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c602
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c163
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c (renamed from drivers/gpu/drm/i915/intel_csr.c)46
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.h (renamed from drivers/gpu/drm/i915/intel_csr.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c1385
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h72
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3546
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2134
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c754
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h119
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c876
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c193
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c108
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1521
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c168
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c264
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c223
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h87
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c527
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c433
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c203
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c108
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c223
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c407
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c363
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c465
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c445
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c14
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c494
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c96
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c504
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c789
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c136
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c102
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c178
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c74
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c8
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c402
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.h15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c27
-rw-r--r--drivers/gpu/drm/i915/gt/hsw_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.c63
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c98
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c174
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c106
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c68
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c117
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c386
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c76
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c109
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c236
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c78
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c244
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/ivb_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c1842
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c24
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c296
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c188
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c9
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c445
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c255
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c69
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c208
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c63
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h62
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c211
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c309
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c127
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c256
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c18
-rw-r--r--drivers/gpu/drm/i915/i915_active.c139
-rw-r--r--drivers/gpu/drm/i915/i915_active.h11
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2428
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c250
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.h14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1203
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h235
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h5
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c7
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.h17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c287
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_params.c11
-rw-r--r--drivers/gpu/drm/i915/i915_params.h74
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c18
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c103
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h67
-rw-r--r--drivers/gpu/drm/i915/i915_request.c258
-rw-r--r--drivers/gpu/drm/i915/i915_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c22
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c17
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h2
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h66
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c1
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h22
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c72
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h25
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c98
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h11
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c45
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c500
-rw-r--r--drivers/gpu/drm/i915/intel_dram.h14
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c21
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c66
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c765
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c11
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c203
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c1
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c489
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.h18
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c176
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c16
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c134
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h4
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c63
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c5
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h1
-rw-r--r--drivers/gpu/drm/lima/lima_regs.h1
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c35
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h6
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c46
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c9
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c64
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c180
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c93
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h7
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c10
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c86
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c115
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c98
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c620
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h71
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c82
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c95
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c16
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h12
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c28
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c17
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig22
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c97
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c183
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c137
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c217
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c349
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c71
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c313
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c59
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c295
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c48
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c34
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c53
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c178
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c269
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c247
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c88
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c83
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig44
-rw-r--r--drivers/gpu/drm/panel/Makefile5
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c854
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c352
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c526
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c14
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c1098
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c293
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c332
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c123
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h26
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c30
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c18
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c6
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c73
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c43
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/.gitignore1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c5
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h27
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c56
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c86
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c11
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c2
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c4
-rw-r--r--drivers/gpu/drm/stm/ltdc.c103
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c104
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h14
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c129
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h2
-rw-r--r--drivers/gpu/drm/tegra/dc.c20
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c34
-rw-r--r--drivers/gpu/drm/tidss/Kconfig14
-rw-r--r--drivers/gpu/drm/tidss/Makefile12
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c432
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.h48
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c2753
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h137
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h243
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c285
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h39
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c88
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.h17
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c146
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h77
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c299
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.h15
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c217
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h25
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.c202
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h22
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig22
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c286
-rw-r--r--drivers/gpu/drm/tiny/repaper.c21
-rw-r--r--drivers/gpu/drm/tiny/st7586.c9
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c76
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c271
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c173
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c1
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h41
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c13
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h49
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c90
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c109
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c369
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h161
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h787
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h466
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h36
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h58
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h347
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h382
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c213
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c86
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h184
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c429
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c78
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c66
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c387
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c610
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c166
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c5
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c19
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c4
-rw-r--r--drivers/gpu/trace/Kconfig4
-rw-r--r--drivers/gpu/trace/Makefile3
-rw-r--r--drivers/gpu/trace/trace_gpu_mem.c13
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-appleir.c12
-rw-r--r--drivers/hid/hid-glorious.c86
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-lg-g15.c6
-rw-r--r--drivers/hid/hid-logitech-dj.c11
-rw-r--r--drivers/hid/hid-mcp2221.c742
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hid/hid-rmi.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h2
-rw-r--r--drivers/hv/hv_balloon.c25
-rw-r--r--drivers/hwspinlock/Kconfig12
-rw-r--r--drivers/hwspinlock/hwspinlock_internal.h2
-rw-r--r--drivers/hwtracing/coresight/Kconfig21
-rw-r--r--drivers/hwtracing/coresight/Makefile3
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c485
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c1206
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.c745
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h235
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h15
-rw-r--r--drivers/hwtracing/coresight/coresight.c86
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h2
-rw-r--r--drivers/hwtracing/intel_th/msu.c49
-rw-r--r--drivers/hwtracing/intel_th/pci.c8
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c6
-rw-r--r--drivers/i2c/busses/i2c-altera.c6
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c27
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c78
-rw-r--r--drivers/i2c/busses/i2c-at91.h4
-rw-r--r--drivers/i2c/busses/i2c-axxia.c4
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c14
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c8
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c33
-rw-r--r--drivers/i2c/busses/i2c-cadence.c7
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c36
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c61
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c4
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c3
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c12
-rw-r--r--drivers/i2c/busses/i2c-efm32.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c18
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c10
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c4
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c155
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c6
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c21
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c6
-rw-r--r--drivers/i2c/busses/i2c-mxs.c10
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c8
-rw-r--r--drivers/i2c/busses/i2c-omap.c6
-rw-r--r--drivers/i2c/busses/i2c-owl.c9
-rw-r--r--drivers/i2c/busses/i2c-parport.c12
-rw-r--r--drivers/i2c/busses/i2c-powermac.c15
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c58
-rw-r--r--drivers/i2c/busses/i2c-qup.c11
-rw-r--r--drivers/i2c/busses/i2c-rcar.c24
-rw-r--r--drivers/i2c/busses/i2c-riic.c6
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c12
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c9
-rw-r--r--drivers/i2c/busses/i2c-sirf.c3
-rw-r--r--drivers/i2c/busses/i2c-sprd.c9
-rw-r--r--drivers/i2c/busses/i2c-st.c6
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c10
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c245
-rw-r--r--drivers/i2c/busses/i2c-stu300.c6
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c18
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c13
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c6
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c7
-rw-r--r--drivers/i2c/busses/i2c-wmt.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c18
-rw-r--r--drivers/i2c/busses/i2c-xlr.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c2
-rw-r--r--drivers/i2c/i2c-core-base.c38
-rw-r--r--drivers/i2c/i2c-core-smbus.c26
-rw-r--r--drivers/i2c/i2c-dev.c50
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/ide/ide-scan-pci.c8
-rw-r--r--drivers/infiniband/core/cache.c19
-rw-r--r--drivers/infiniband/core/cm.c746
-rw-r--r--drivers/infiniband/core/cma.c114
-rw-r--r--drivers/infiniband/core/cma_configfs.c6
-rw-r--r--drivers/infiniband/core/cma_priv.h6
-rw-r--r--drivers/infiniband/core/mad_priv.h4
-rw-r--r--drivers/infiniband/core/multicast.c2
-rw-r--r--drivers/infiniband/core/rw.c12
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/ucma.c61
-rw-r--r--drivers/infiniband/core/umem.c11
-rw-r--r--drivers/infiniband/core/verbs.c24
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c926
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c492
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c489
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h95
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c463
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h85
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c470
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h145
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c48
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h8
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h7
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c158
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c29
-rw-r--r--drivers/infiniband/hw/efa/efa_common_defs.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_regs_defs.h25
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c51
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c4
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c26
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h4
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c26
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c474
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c46
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1851
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c977
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c96
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h26
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c24
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h12
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c12
-rw-r--r--drivers/infiniband/hw/mlx4/main.c9
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c21
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c265
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h89
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c608
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qos.c136
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c137
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c21
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h6
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/interconnect/qcom/Kconfig25
-rw-r--r--drivers/interconnect/qcom/Makefile8
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c366
-rw-r--r--drivers/interconnect/qcom/bcm-voter.h27
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c150
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h149
-rw-r--r--drivers/interconnect/qcom/osm-l3.c276
-rw-r--r--drivers/interconnect/qcom/sc7180.c641
-rw-r--r--drivers/interconnect/qcom/sc7180.h151
-rw-r--r--drivers/interconnect/qcom/sdm845.c1055
-rw-r--r--drivers/interconnect/qcom/sdm845.h142
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c35
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/macintosh/ans-lcd.h2
-rw-r--r--drivers/macintosh/therm_windtunnel.c4
-rw-r--r--drivers/macintosh/via-pmu.c3
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c8
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c20
-rw-r--r--drivers/mailbox/imx-mailbox.c288
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c128
-rw-r--r--drivers/mailbox/sun6i-msgbox.c326
-rw-r--r--drivers/md/dm-clone-metadata.c15
-rw-r--r--drivers/md/dm-clone-metadata.h2
-rw-r--r--drivers/md/dm-clone-target.c66
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-integrity.c302
-rw-r--r--drivers/md/dm-verity-fec.c1
-rw-r--r--drivers/md/dm-writecache.c138
-rw-r--r--drivers/md/dm-zoned-metadata.c1
-rw-r--r--drivers/md/dm.c5
-rw-r--r--drivers/memory/.gitignore1
-rw-r--r--drivers/memory/tegra/tegra124-emc.c5
-rw-r--r--drivers/memory/tegra/tegra20-emc.c5
-rw-r--r--drivers/memory/tegra/tegra30-emc.c5
-rw-r--r--drivers/message/fusion/mptlan.h5
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/rts5227.c1
-rw-r--r--drivers/misc/eeprom/at24.c1
-rw-r--r--drivers/misc/habanalabs/command_submission.c51
-rw-r--r--drivers/misc/habanalabs/debugfs.c92
-rw-r--r--drivers/misc/habanalabs/device.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c204
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c4
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c2
-rw-r--r--drivers/misc/habanalabs/habanalabs.h62
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c11
-rw-r--r--drivers/misc/habanalabs/hwmon.c106
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h20
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h4
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h39
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h39
-rw-r--r--drivers/misc/habanalabs/memory.c222
-rw-r--r--drivers/misc/habanalabs/mmu.c110
-rw-r--r--drivers/misc/lkdtm/bugs.c75
-rw-r--r--drivers/misc/lkdtm/core.c3
-rw-r--r--drivers/misc/lkdtm/lkdtm.h3
-rw-r--r--drivers/misc/lkdtm/stackleak.c25
-rw-r--r--drivers/misc/mei/bus-fixup.c4
-rw-r--r--drivers/misc/mei/client.c4
-rw-r--r--drivers/misc/mei/hw-me-regs.h6
-rw-r--r--drivers/misc/mei/hw.h5
-rw-r--r--drivers/misc/mei/mei_dev.h2
-rw-r--r--drivers/misc/mei/pci-me.c17
-rw-r--r--drivers/misc/mei/pci-txe.c5
-rw-r--r--drivers/misc/mic/host/mic_boot.c2
-rw-r--r--drivers/misc/mic/host/mic_x100.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c213
-rw-r--r--drivers/misc/sgi-gru/grulib.h2
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/misc/uacce/Kconfig13
-rw-r--r--drivers/misc/uacce/Makefile2
-rw-r--r--drivers/misc/uacce/uacce.c633
-rw-r--r--drivers/misc/vexpress-syscfg.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c5
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c17
-rw-r--r--drivers/mtd/chips/cfi_util.c12
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/phram.c19
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c12
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c15
-rw-r--r--drivers/mtd/inftlmount.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c5
-rw-r--r--drivers/mtd/mtdblock.c5
-rw-r--r--drivers/mtd/mtdchar.c12
-rw-r--r--drivers/mtd/mtdcore.c250
-rw-r--r--drivers/mtd/mtdpart.c695
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c2
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c237
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c293
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c34
-rw-r--r--drivers/mtd/nand/raw/denali.c1
-rw-r--r--drivers/mtd/nand/raw/denali.h2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c21
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig1
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c4
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4725b_bch.c4
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c4
-rw-r--r--drivers/mtd/nand/raw/internals.h1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c40
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c71
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c2
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c6
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c227
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c58
-rw-r--r--drivers/mtd/nand/raw/nandsim.c4
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c8
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c105
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c44
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c17
-rw-r--r--drivers/mtd/nand/spi/core.c104
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c45
-rw-r--r--drivers/mtd/nand/spi/macronix.c30
-rw-r--r--drivers/mtd/nand/spi/micron.c172
-rw-r--r--drivers/mtd/nand/spi/paragon.c28
-rw-r--r--drivers/mtd/nand/spi/toshiba.c208
-rw-r--r--drivers/mtd/nand/spi/winbond.c34
-rw-r--r--drivers/mtd/spi-nor/Kconfig75
-rw-r--r--drivers/mtd/spi-nor/Makefile25
-rw-r--r--drivers/mtd/spi-nor/atmel.c46
-rw-r--r--drivers/mtd/spi-nor/catalyst.c29
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig75
-rw-r--r--drivers/mtd/spi-nor/controllers/Makefile8
-rw-r--r--drivers/mtd/spi-nor/controllers/aspeed-smc.c (renamed from drivers/mtd/spi-nor/aspeed-smc.c)4
-rw-r--r--drivers/mtd/spi-nor/controllers/cadence-quadspi.c (renamed from drivers/mtd/spi-nor/cadence-quadspi.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c (renamed from drivers/mtd/spi-nor/hisi-sfc.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-pci.c (renamed from drivers/mtd/spi-nor/intel-spi-pci.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-platform.c (renamed from drivers/mtd/spi-nor/intel-spi-platform.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.c (renamed from drivers/mtd/spi-nor/intel-spi.c)0
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi.h (renamed from drivers/mtd/spi-nor/intel-spi.h)0
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c (renamed from drivers/mtd/spi-nor/nxp-spifi.c)0
-rw-r--r--drivers/mtd/spi-nor/core.c3466
-rw-r--r--drivers/mtd/spi-nor/core.h441
-rw-r--r--drivers/mtd/spi-nor/eon.c34
-rw-r--r--drivers/mtd/spi-nor/esmt.c25
-rw-r--r--drivers/mtd/spi-nor/everspin.c27
-rw-r--r--drivers/mtd/spi-nor/fujitsu.c20
-rw-r--r--drivers/mtd/spi-nor/gigadevice.c59
-rw-r--r--drivers/mtd/spi-nor/intel.c32
-rw-r--r--drivers/mtd/spi-nor/issi.c83
-rw-r--r--drivers/mtd/spi-nor/macronix.c98
-rw-r--r--drivers/mtd/spi-nor/micron-st.c157
-rw-r--r--drivers/mtd/spi-nor/sfdp.c1204
-rw-r--r--drivers/mtd/spi-nor/sfdp.h98
-rw-r--r--drivers/mtd/spi-nor/spansion.c95
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c5434
-rw-r--r--drivers/mtd/spi-nor/sst.c151
-rw-r--r--drivers/mtd/spi-nor/winbond.c112
-rw-r--r--drivers/mtd/spi-nor/xilinx.c94
-rw-r--r--drivers/mtd/spi-nor/xmc.c23
-rw-r--r--drivers/mtd/ubi/attach.c2
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c15
-rw-r--r--drivers/mtd/ubi/ubi-media.h2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c9
-rw-r--r--drivers/net/dsa/mt7530.c88
-rw-r--r--drivers/net/dsa/mt7530.h10
-rw-r--r--drivers/net/ethernet/freescale/fec.h7
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c149
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c44
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c52
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/micrel.c7
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/usb/pegasus.c38
-rw-r--r--drivers/net/wan/.gitignore1
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c4
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/nvme/target/rdma.c6
-rw-r--r--drivers/nvmem/Kconfig12
-rw-r--r--drivers/nvmem/Makefile5
-rw-r--r--drivers/nvmem/core.c365
-rw-r--r--drivers/nvmem/imx-ocotp.c29
-rw-r--r--drivers/nvmem/jz4780-efuse.c239
-rw-r--r--drivers/nvmem/mxs-ocotp.c30
-rw-r--r--drivers/nvmem/nvmem-sysfs.c263
-rw-r--r--drivers/nvmem/nvmem.h64
-rw-r--r--drivers/nvmem/sprd-efuse.c27
-rw-r--r--drivers/of/address.c273
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/property.c4
-rw-r--r--drivers/of/resolver.c5
-rw-r--r--drivers/of/unittest-data/Makefile8
-rw-r--r--drivers/of/unittest-data/overlay_gpio_01.dts23
-rw-r--r--drivers/of/unittest-data/overlay_gpio_02a.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_02b.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_03.dts23
-rw-r--r--drivers/of/unittest-data/overlay_gpio_04a.dts16
-rw-r--r--drivers/of/unittest-data/overlay_gpio_04b.dts16
-rw-r--r--drivers/of/unittest.c669
-rw-r--r--drivers/parisc/eisa.c8
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/dwc/Kconfig29
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c231
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c5
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c116
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c144
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h12
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c712
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig34
-rw-r--r--drivers/pci/controller/mobiveil/Makefile5
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c267
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c (renamed from drivers/pci/controller/pcie-mobiveil.c)564
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c61
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.c231
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h226
-rw-r--r--drivers/pci/controller/pci-hyperv.c260
-rw-r--r--drivers/pci/controller/pci-tegra.c187
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c4
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c402
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c28
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c137
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c10
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c35
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c93
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c5
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c4
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c99
-rw-r--r--drivers/pci/p2pdma.c3
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-sysfs.c33
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/pci/pci.h32
-rw-r--r--drivers/pci/pcie/Kconfig10
-rw-r--r--drivers/pci/pcie/Makefile1
-rw-r--r--drivers/pci/pcie/aer.c40
-rw-r--r--drivers/pci/pcie/aspm.c6
-rw-r--r--drivers/pci/pcie/dpc.c137
-rw-r--r--drivers/pci/pcie/edr.c239
-rw-r--r--drivers/pci/pcie/err.c66
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_core.c21
-rw-r--r--drivers/pci/probe.c42
-rw-r--r--drivers/pci/quirks.c120
-rw-r--r--drivers/pci/rom.c17
-rw-r--r--drivers/pci/setup-bus.c34
-rw-r--r--drivers/pci/slot.c38
-rw-r--r--drivers/pcmcia/cs_internal.h2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/sa1100_simpad.c6
-rw-r--r--drivers/pcmcia/soc_common.h2
-rw-r--r--drivers/pcmcia/yenta_socket.c10
-rw-r--r--drivers/phy/amlogic/Kconfig22
-rw-r--r--drivers/phy/amlogic/Makefile12
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c188
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-pcie.c192
-rw-r--r--drivers/pinctrl/Kconfig12
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c510
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c111
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c5
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/devicetree.c1
-rw-r--r--drivers/pinctrl/freescale/Kconfig8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6765.c11
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8183.c7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c264
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h16
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c363
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.h3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c35
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c27
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c17
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c7
-rw-r--r--drivers/pinctrl/pinconf-generic.c1
-rw-r--r--drivers/pinctrl/pinctrl-amd.c5
-rw-r--r--drivers/pinctrl/pinctrl-at91.c5
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c7
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c300
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c62
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c5
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c5
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c5
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c5
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c7
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c5
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c5
-rw-r--r--drivers/pinctrl/pinctrl-rza2.c6
-rw-r--r--drivers/pinctrl/pinctrl-st.c14
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c17
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c9
-rw-r--r--drivers/pinctrl/qcom/Kconfig10
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c1107
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c46
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c307
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c5
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c2
-rw-r--r--drivers/pinctrl/sprd/Kconfig10
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c25
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c16
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c52
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h5
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra194.c47
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c6
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c10
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/at91-reset.c190
-rw-r--r--drivers/power/reset/sc27xx-poweroff.c21
-rw-r--r--drivers/power/supply/Kconfig2
-rw-r--r--drivers/power/supply/ab8500_charger.c35
-rw-r--r--drivers/power/supply/axp288_charger.c57
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c4
-rw-r--r--drivers/power/supply/bq27xxx_battery.c5
-rw-r--r--drivers/power/supply/ingenic-battery.c3
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c12
-rw-r--r--drivers/power/supply/twl4030_charger.c4
-rw-r--r--drivers/remoteproc/Kconfig14
-rw-r--r--drivers/remoteproc/imx_rproc.c11
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c4
-rw-r--r--drivers/remoteproc/mtk_scp.c2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c1200
-rw-r--r--drivers/remoteproc/omap_remoteproc.h50
-rw-r--r--drivers/remoteproc/qcom_q6v5.c20
-rw-r--r--drivers/remoteproc/qcom_q6v5.h1
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c10
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c133
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c10
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c161
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c16
-rw-r--r--drivers/remoteproc/remoteproc_elf_helpers.h96
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c189
-rw-r--r--drivers/remoteproc/remoteproc_internal.h16
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c8
-rw-r--r--drivers/remoteproc/st_remoteproc.c4
-rw-r--r--drivers/remoteproc/st_slim_rproc.c6
-rw-r--r--drivers/remoteproc/stm32_rproc.c4
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c4
-rw-r--r--drivers/rtc/Kconfig17
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/class.c49
-rw-r--r--drivers/rtc/hctosys.c69
-rw-r--r--drivers/rtc/rtc-88pm860x.c104
-rw-r--r--drivers/rtc/rtc-ab8500.c10
-rw-r--r--drivers/rtc/rtc-au1xxx.c29
-rw-r--r--drivers/rtc/rtc-bd70528.c4
-rw-r--r--drivers/rtc/rtc-cmos.c5
-rw-r--r--drivers/rtc/rtc-cpcap.c13
-rw-r--r--drivers/rtc/rtc-da9052.c18
-rw-r--r--drivers/rtc/rtc-davinci.c58
-rw-r--r--drivers/rtc/rtc-ds1305.c10
-rw-r--r--drivers/rtc/rtc-ds1307.c126
-rw-r--r--drivers/rtc/rtc-ds1374.c27
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c23
-rw-r--r--drivers/rtc/rtc-imx-sc.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c9
-rw-r--r--drivers/rtc/rtc-m48t35.c7
-rw-r--r--drivers/rtc/rtc-mpc5121.c61
-rw-r--r--drivers/rtc/rtc-mt2712.c423
-rw-r--r--drivers/rtc/rtc-mxc.c46
-rw-r--r--drivers/rtc/rtc-omap.c1
-rw-r--r--drivers/rtc/rtc-pcf85063.c157
-rw-r--r--drivers/rtc/rtc-pl030.c27
-rw-r--r--drivers/rtc/rtc-pl031.c53
-rw-r--r--drivers/rtc/rtc-pm8xxx.c40
-rw-r--r--drivers/rtc/rtc-puv3.c14
-rw-r--r--drivers/rtc/rtc-sa1100.c40
-rw-r--r--drivers/rtc/rtc-sh.c3
-rw-r--r--drivers/rtc/rtc-sirfsoc.c44
-rw-r--r--drivers/rtc/rtc-snvs.c28
-rw-r--r--drivers/rtc/rtc-starfire.c10
-rw-r--r--drivers/rtc/rtc-sun6i.c47
-rw-r--r--drivers/rtc/rtc-zynqmp.c27
-rw-r--r--drivers/rtc/sysfs.c2
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c2
-rw-r--r--drivers/s390/char/raw3270.h2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_pci.c2
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/s390/char/tape_core.c6
-rw-r--r--drivers/s390/cio/airq.c8
-rw-r--r--drivers/s390/cio/ccwgroup.c69
-rw-r--r--drivers/s390/cio/chsc.c5
-rw-r--r--drivers/s390/cio/chsc.h3
-rw-r--r--drivers/s390/cio/cio.c8
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/idset.c2
-rw-r--r--drivers/s390/cio/qdio.h9
-rw-r--r--drivers/s390/cio/qdio_debug.c59
-rw-r--r--drivers/s390/cio/qdio_debug.h3
-rw-r--r--drivers/s390/cio/qdio_main.c23
-rw-r--r--drivers/s390/cio/qdio_setup.c29
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c212
-rw-r--r--drivers/s390/crypto/ap_bus.h5
-rw-r--r--drivers/s390/crypto/ap_card.c17
-rw-r--r--drivers/s390/crypto/ap_queue.c75
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/s390/crypto/zcrypt_card.c6
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c33
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c76
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c10
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c10
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c4
-rw-r--r--drivers/s390/net/ism_drv.c20
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c44
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h32
-rw-r--r--drivers/s390/scsi/zfcp_def.h6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h12
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c290
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h23
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c70
-rw-r--r--drivers/sbus/char/envctrl.c2
-rw-r--r--drivers/sbus/char/flash.c4
-rw-r--r--drivers/sbus/char/uctrl.c2
-rw-r--r--drivers/scsi/.gitignore1
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/Kconfig51
-rw-r--r--drivers/scsi/aacraid/aachba.c83
-rw-r--r--drivers/scsi/aacraid/comminit.c35
-rw-r--r--drivers/scsi/aacraid/commsup.c48
-rw-r--r--drivers/scsi/aacraid/linit.c171
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/.gitignore1
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx2
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c22
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c20
-rw-r--r--drivers/scsi/ch.c40
-rw-r--r--drivers/scsi/dc395x.c34
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h2
-rw-r--r--drivers/scsi/dpt_i2o.c27
-rw-r--r--drivers/scsi/dpti.h5
-rw-r--r--drivers/scsi/fnic/fnic_trace.c58
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h2
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c3
-rw-r--r--drivers/scsi/hosts.c65
-rw-r--r--drivers/scsi/hpsa.c80
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c207
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h3
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/isci/sas.h2
-rw-r--r--drivers/scsi/libiscsi.c9
-rw-r--r--drivers/scsi/lpfc/lpfc.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c141
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c519
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h62
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.h2
-rw-r--r--drivers/scsi/mvumi.h4
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/pcmcia/Kconfig2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c51
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h5
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c22
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c80
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h7
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c155
-rw-r--r--drivers/scsi/pmcraid.h2
-rw-r--r--drivers/scsi/qedi/qedi.h3
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h1
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c18
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c104
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h387
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h173
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h23
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1699
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c202
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c295
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c388
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c722
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c25
-rw-r--r--drivers/scsi/scsi.c18
-rw-r--r--drivers/scsi/scsi_error.c1
-rw-r--r--drivers/scsi/scsi_lib.c98
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c18
-rw-r--r--drivers/scsi/scsi_trace.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c135
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c22
-rw-r--r--drivers/scsi/snic/vnic_devcmd.h2
-rw-r--r--drivers/scsi/sr.c20
-rw-r--r--drivers/scsi/sr.h2
-rw-r--r--drivers/scsi/sr_vendor.c8
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c2
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c141
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h15
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c146
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c28
-rw-r--r--drivers/scsi/ufs/ufs.h3
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h1
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c362
-rw-r--r--drivers/scsi/ufs/ufshcd.h205
-rw-r--r--drivers/scsi/ufs/unipro.h7
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/scsi/zorro_esp.c5
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/Kconfig13
-rw-r--r--drivers/soc/amlogic/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-secure-pwrc.c204
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c69
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c767
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h158
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soc/fsl/qe/qe_common.c2
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c2
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soc/fsl/qe/ucc_slow.c33
-rw-r--r--drivers/soc/imx/Kconfig11
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/gpc.c24
-rw-r--r--drivers/soc/imx/gpcv2.c1
-rw-r--r--drivers/soc/imx/soc-imx8m.c (renamed from drivers/soc/imx/soc-imx8.c)0
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c1
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c128
-rw-r--r--drivers/soc/qcom/Kconfig7
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c123
-rw-r--r--drivers/soc/qcom/pdr_interface.c757
-rw-r--r--drivers/soc/qcom/pdr_internal.h379
-rw-r--r--drivers/soc/qcom/qcom_aoss.c6
-rw-r--r--drivers/soc/qcom/rpmh-internal.h1
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/qcom/rpmh.c22
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/renesas/Kconfig18
-rw-r--r--drivers/soc/renesas/rcar-sysc.h4
-rw-r--r--drivers/soc/renesas/renesas-soc.c2
-rw-r--r--drivers/soc/tegra/pmc.c688
-rw-r--r--drivers/soc/ti/pm33xx.c21
-rw-r--r--drivers/soundwire/bus.c537
-rw-r--r--drivers/soundwire/bus.h9
-rw-r--r--drivers/soundwire/bus_type.c5
-rw-r--r--drivers/soundwire/cadence_master.c282
-rw-r--r--drivers/soundwire/cadence_master.h17
-rw-r--r--drivers/soundwire/intel.c200
-rw-r--r--drivers/soundwire/qcom.c15
-rw-r--r--drivers/soundwire/slave.c4
-rw-r--r--drivers/soundwire/stream.c115
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/staging/comedi/drivers/ni_routing/tools/.gitignore1
-rw-r--r--drivers/staging/greybus/tools/.gitignore1
-rw-r--r--drivers/staging/speakup/devsynth.c10
-rw-r--r--drivers/staging/speakup/speakup_soft.c14
-rw-r--r--drivers/target/iscsi/iscsi_target.c82
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c5
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c4
-rw-r--r--drivers/target/target_core_spc.c13
-rw-r--r--drivers/target/target_core_tmr.c6
-rw-r--r--drivers/target/target_core_transport.c3
-rw-r--r--drivers/target/target_core_ua.c8
-rw-r--r--drivers/tee/tee_core.c1
-rw-r--r--drivers/tee/tee_private.h3
-rw-r--r--drivers/tee/tee_shm.c85
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c2
-rw-r--r--drivers/tty/Kconfig173
-rw-r--r--drivers/tty/ehv_bytechan.c21
-rw-r--r--drivers/tty/hvc/Kconfig3
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/uio/uio.c38
-rw-r--r--drivers/uio/uio_pdrv_genirq.c34
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c1
-rw-r--r--drivers/usb/gadget/function/storage_common.h5
-rw-r--r--drivers/vfio/pci/vfio_pci.c390
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h10
-rw-r--r--drivers/vfio/platform/vfio_platform.c2
-rw-r--r--drivers/vfio/vfio.c198
-rw-r--r--drivers/vfio/vfio_iommu_type1.c78
-rw-r--r--drivers/video/backlight/Kconfig8
-rw-r--r--drivers/video/console/Kconfig76
-rw-r--r--drivers/video/fbdev/Kconfig9
-rw-r--r--drivers/video/fbdev/aty/mach64_gx.c3
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c26
-rw-r--r--drivers/video/fbdev/cg14.c3
-rw-r--r--drivers/video/fbdev/core/Makefile1
-rw-r--r--drivers/video/fbdev/core/fbcon.c27
-rw-r--r--drivers/video/fbdev/core/fbmem.c38
-rw-r--r--drivers/video/fbdev/hyperv_fb.c4
-rw-r--r--drivers/video/fbdev/kyro/STG4000OverlayDevice.c3
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c15
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.h2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c41
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.c5
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c7
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c16
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/w100fb.c18
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/video/hdmi.c11
-rw-r--r--drivers/video/logo/.gitignore4
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c2
-rw-r--r--drivers/virtio/Kconfig1
-rw-r--r--drivers/virtio/virtio_balloon.c180
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.h8
-rw-r--r--drivers/xen/xenbus/xenbus_client.c126
-rw-r--r--drivers/zorro/.gitignore1
-rw-r--r--fs/9p/Kconfig20
-rw-r--r--fs/9p/vfs_file.c5
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/Kconfig3
-rw-r--r--fs/Makefile1
-rw-r--r--fs/autofs/dev-ioctl.c6
-rw-r--r--fs/binfmt_elf.c48
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/debugfs/file.c8
-rw-r--r--fs/eventpoll.c64
-rw-r--r--fs/exec.c80
-rw-r--r--fs/exfat/Kconfig21
-rw-r--r--fs/exfat/Makefile8
-rw-r--r--fs/exfat/balloc.c280
-rw-r--r--fs/exfat/cache.c325
-rw-r--r--fs/exfat/dir.c1238
-rw-r--r--fs/exfat/exfat_fs.h519
-rw-r--r--fs/exfat/exfat_raw.h184
-rw-r--r--fs/exfat/fatent.c463
-rw-r--r--fs/exfat/file.c360
-rw-r--r--fs/exfat/inode.c671
-rw-r--r--fs/exfat/misc.c163
-rw-r--r--fs/exfat/namei.c1448
-rw-r--r--fs/exfat/nls.c831
-rw-r--r--fs/exfat/super.c722
-rw-r--r--fs/ext2/xattr.c18
-rw-r--r--fs/ext2/xattr.h2
-rw-r--r--fs/ext4/balloc.c7
-rw-r--r--fs/ext4/block_validity.c18
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h59
-rw-r--r--fs/ext4/ext4_jbd2.c14
-rw-r--r--fs/ext4/ext4_jbd2.h3
-rw-r--r--fs/ext4/extents.c480
-rw-r--r--fs/ext4/file.c1
-rw-r--r--fs/ext4/ialloc.c36
-rw-r--r--fs/ext4/indirect.c2
-rw-r--r--fs/ext4/inline.c54
-rw-r--r--fs/ext4/inode.c57
-rw-r--r--fs/ext4/ioctl.c12
-rw-r--r--fs/ext4/mballoc.c38
-rw-r--r--fs/ext4/mmp.c13
-rw-r--r--fs/ext4/move_extent.c4
-rw-r--r--fs/ext4/namei.c28
-rw-r--r--fs/ext4/super.c212
-rw-r--r--fs/ext4/xattr.c10
-rw-r--r--fs/ext4/xattr.h4
-rw-r--r--fs/f2fs/Kconfig9
-rw-r--r--fs/f2fs/checkpoint.c42
-rw-r--r--fs/f2fs/compress.c317
-rw-r--r--fs/f2fs/data.c141
-rw-r--r--fs/f2fs/debug.c3
-rw-r--r--fs/f2fs/dir.c16
-rw-r--r--fs/f2fs/f2fs.h206
-rw-r--r--fs/f2fs/file.c91
-rw-r--r--fs/f2fs/gc.c51
-rw-r--r--fs/f2fs/inode.c29
-rw-r--r--fs/f2fs/namei.c12
-rw-r--r--fs/f2fs/node.c33
-rw-r--r--fs/f2fs/recovery.c12
-rw-r--r--fs/f2fs/segment.c54
-rw-r--r--fs/f2fs/segment.h2
-rw-r--r--fs/f2fs/shrinker.c2
-rw-r--r--fs/f2fs/super.c89
-rw-r--r--fs/f2fs/sysfs.c50
-rw-r--r--fs/f2fs/xattr.c67
-rw-r--r--fs/f2fs/xattr.h9
-rw-r--r--fs/fs_parser.c2
-rw-r--r--fs/hostfs/hostfs_kern.c12
-rw-r--r--fs/hugetlbfs/inode.c30
-rw-r--r--fs/internal.h1
-rw-r--r--fs/iomap/buffered-io.c7
-rw-r--r--fs/iomap/direct-io.c4
-rw-r--r--fs/iomap/trace.h27
-rw-r--r--fs/jbd2/commit.c7
-rw-r--r--fs/kernfs/inode.c91
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/namei.c1474
-rw-r--r--fs/namespace.c96
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/callback.h4
-rw-r--r--fs/nfs/callback_proc.c69
-rw-r--r--fs/nfs/delegation.c319
-rw-r--r--fs/nfs/dir.c79
-rw-r--r--fs/nfs/direct.c197
-rw-r--r--fs/nfs/dns_resolve.c11
-rw-r--r--fs/nfs/filelayout/filelayout.c165
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c229
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h2
-rw-r--r--fs/nfs/fs_context.c9
-rw-r--r--fs/nfs/inode.c28
-rw-r--r--fs/nfs/internal.h36
-rw-r--r--fs/nfs/namespace.c67
-rw-r--r--fs/nfs/nfs4_fs.h4
-rw-r--r--fs/nfs/nfs4file.c3
-rw-r--r--fs/nfs/nfs4namespace.c2
-rw-r--r--fs/nfs/nfs4proc.c19
-rw-r--r--fs/nfs/nfs4state.c24
-rw-r--r--fs/nfs/nfs4trace.h8
-rw-r--r--fs/nfs/nfsroot.c2
-rw-r--r--fs/nfs/nfstrace.h1
-rw-r--r--fs/nfs/pagelist.c367
-rw-r--r--fs/nfs/pnfs.c241
-rw-r--r--fs/nfs/pnfs.h143
-rw-r--r--fs/nfs/pnfs_nfs.c514
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/super.c35
-rw-r--r--fs/nfs/unlink.c4
-rw-r--r--fs/nfs/write.c276
-rw-r--r--fs/nfsd/Kconfig2
-rw-r--r--fs/nfsd/export.c45
-rw-r--r--fs/nfsd/filecache.c2
-rw-r--r--fs/nfsd/netns.h2
-rw-r--r--fs/nfsd/nfs4idmap.c14
-rw-r--r--fs/nfsd/nfs4state.c87
-rw-r--r--fs/nfsd/nfs4xdr.c38
-rw-r--r--fs/nfsd/nfsctl.c1
-rw-r--r--fs/nfsd/nfsfh.c13
-rw-r--r--fs/nfsd/nfssvc.c3
-rw-r--r--fs/nfsd/trace.h122
-rw-r--r--fs/notify/fanotify/fanotify.c302
-rw-r--r--fs/notify/fanotify/fanotify.h189
-rw-r--r--fs/notify/fanotify/fanotify_user.c220
-rw-r--r--fs/notify/fsnotify.c22
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c12
-rw-r--r--fs/notify/inotify/inotify_user.c2
-rw-r--r--fs/ocfs2/alloc.c3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c12
-rw-r--r--fs/ocfs2/cluster/netdebug.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c27
-rw-r--r--fs/ocfs2/cluster/tcp.h2
-rw-r--r--fs/ocfs2/dir.c4
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h8
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c100
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlm/dlmthread.c3
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/namei.c15
-rw-r--r--fs/ocfs2/ocfs2_fs.h18
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/ocfs2/reservations.c3
-rw-r--r--fs/ocfs2/stackglue.c2
-rw-r--r--fs/ocfs2/suballoc.c5
-rw-r--r--fs/ocfs2/super.c46
-rw-r--r--fs/open.c4
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/array.c39
-rw-r--r--fs/proc/base.c121
-rw-r--r--fs/proc/cpuinfo.c1
-rw-r--r--fs/proc/generic.c31
-rw-r--r--fs/proc/inode.c261
-rw-r--r--fs/proc/internal.h10
-rw-r--r--fs/proc/kmsg.c1
-rw-r--r--fs/proc/proc_sysctl.c45
-rw-r--r--fs/proc/root.c36
-rw-r--r--fs/proc/stat.c1
-rw-r--r--fs/proc/task_mmu.c95
-rw-r--r--fs/reiserfs/do_balan.c2
-rw-r--r--fs/reiserfs/ioctl.c11
-rw-r--r--fs/reiserfs/namei.c10
-rw-r--r--fs/seq_file.c28
-rw-r--r--fs/sysfs/group.c20
-rw-r--r--fs/ubifs/io.c16
-rw-r--r--fs/ubifs/journal.c1
-rw-r--r--fs/ubifs/orphan.c13
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/unicode/.gitignore1
-rw-r--r--fs/userfaultfd.c168
-rw-r--r--fs/xattr.c17
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/libxfs/xfs_ag.c16
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c99
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h9
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c119
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.h7
-rw-r--r--fs/xfs/libxfs/xfs_attr.c351
-rw-r--r--fs/xfs/libxfs/xfs_attr.h114
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c130
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h1
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c88
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h3
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c50
-rw-r--r--fs/xfs/libxfs/xfs_btree.c93
-rw-r--r--fs/xfs/libxfs/xfs_btree.h82
-rw-r--r--fs/xfs/libxfs/xfs_btree_staging.c879
-rw-r--r--fs/xfs/libxfs/xfs_btree_staging.h123
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c17
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h11
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h12
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c33
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c32
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c11
-rw-r--r--fs/xfs/libxfs/xfs_format.h48
-rw-r--r--fs/xfs/libxfs/xfs_fs.h32
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c35
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c104
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h6
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c43
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h5
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h9
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h10
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c110
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c104
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h6
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c123
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c99
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h5
-rw-r--r--fs/xfs/libxfs/xfs_sb.c17
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c2
-rw-r--r--fs/xfs/scrub/agheader.c20
-rw-r--r--fs/xfs/scrub/agheader_repair.c78
-rw-r--r--fs/xfs/scrub/alloc.c2
-rw-r--r--fs/xfs/scrub/attr.c20
-rw-r--r--fs/xfs/scrub/bitmap.c87
-rw-r--r--fs/xfs/scrub/bitmap.h23
-rw-r--r--fs/xfs/scrub/bmap.c4
-rw-r--r--fs/xfs/scrub/dabtree.c42
-rw-r--r--fs/xfs/scrub/dir.c13
-rw-r--r--fs/xfs/scrub/ialloc.c8
-rw-r--r--fs/xfs/scrub/refcount.c2
-rw-r--r--fs/xfs/scrub/repair.c28
-rw-r--r--fs/xfs/scrub/repair.h6
-rw-r--r--fs/xfs/scrub/rmap.c2
-rw-r--r--fs/xfs/scrub/scrub.c9
-rw-r--r--fs/xfs/scrub/trace.c4
-rw-r--r--fs/xfs/scrub/trace.h4
-rw-r--r--fs/xfs/xfs_acl.c132
-rw-r--r--fs/xfs/xfs_acl.h6
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_attr_inactive.c6
-rw-r--r--fs/xfs/xfs_attr_list.c169
-rw-r--r--fs/xfs/xfs_bmap_util.c73
-rw-r--r--fs/xfs/xfs_buf.c29
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_dir2_readdir.c12
-rw-r--r--fs/xfs/xfs_discard.c7
-rw-r--r--fs/xfs/xfs_dquot.c4
-rw-r--r--fs/xfs/xfs_dquot_item.c44
-rw-r--r--fs/xfs/xfs_dquot_item.h1
-rw-r--r--fs/xfs/xfs_error.c7
-rw-r--r--fs/xfs/xfs_error.h2
-rw-r--r--fs/xfs/xfs_fsmap.c13
-rw-r--r--fs/xfs/xfs_icache.c4
-rw-r--r--fs/xfs/xfs_inode.c57
-rw-r--r--fs/xfs/xfs_inode_item.c16
-rw-r--r--fs/xfs/xfs_ioctl.c355
-rw-r--r--fs/xfs/xfs_ioctl.h35
-rw-r--r--fs/xfs/xfs_ioctl32.c99
-rw-r--r--fs/xfs/xfs_iops.c25
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_linux.h27
-rw-r--r--fs/xfs/xfs_log.c472
-rw-r--r--fs/xfs/xfs_log.h5
-rw-r--r--fs/xfs/xfs_log_cil.c58
-rw-r--r--fs/xfs/xfs_log_priv.h9
-rw-r--r--fs/xfs/xfs_log_recover.c18
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_qm.c55
-rw-r--r--fs/xfs/xfs_qm_syscalls.c13
-rw-r--r--fs/xfs/xfs_quota.h4
-rw-r--r--fs/xfs/xfs_stats.c10
-rw-r--r--fs/xfs/xfs_symlink.c6
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trace.h209
-rw-r--r--fs/xfs/xfs_trans.c7
-rw-r--r--fs/xfs/xfs_trans_ail.c5
-rw-r--r--fs/xfs/xfs_xattr.c92
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl2.h21
-rw-r--r--include/acpi/actbl3.h6
-rw-r--r--include/acpi/acuuid.h2
-rw-r--r--include/acpi/processor.h8
-rw-r--r--include/asm-generic/Kbuild52
-rw-r--r--include/asm-generic/gpio.h4
-rw-r--r--include/asm-generic/pgtable.h1
-rw-r--r--include/asm-generic/pgtable_uffd.h66
-rw-r--r--include/asm-generic/tlb.h3
-rw-r--r--include/crypto/aead.h48
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/drm/bridge/dw_hdmi.h1
-rw-r--r--include/drm/bridge/mhl.h4
-rw-r--r--include/drm/drm_atomic.h76
-rw-r--r--include/drm/drm_atomic_helper.h8
-rw-r--r--include/drm/drm_atomic_state_helper.h13
-rw-r--r--include/drm/drm_bridge.h405
-rw-r--r--include/drm/drm_bridge_connector.h18
-rw-r--r--include/drm/drm_client.h7
-rw-r--r--include/drm/drm_connector.h46
-rw-r--r--include/drm/drm_crtc.h80
-rw-r--r--include/drm/drm_device.h2
-rw-r--r--include/drm/drm_dp_helper.h26
-rw-r--r--include/drm/drm_dp_mst_helper.h17
-rw-r--r--include/drm/drm_drv.h194
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_encoder.h3
-rw-r--r--include/drm/drm_fb_helper.h27
-rw-r--r--include/drm/drm_file.h10
-rw-r--r--include/drm/drm_gem_vram_helper.h9
-rw-r--r--include/drm/drm_hdcp.h6
-rw-r--r--include/drm/drm_legacy.h6
-rw-r--r--include/drm/drm_mipi_dbi.h12
-rw-r--r--include/drm/drm_mm.h2
-rw-r--r--include/drm/drm_modes.h11
-rw-r--r--include/drm/drm_modeset_helper_vtables.h63
-rw-r--r--include/drm/drm_panel.h3
-rw-r--r--include/drm/drm_pci.h11
-rw-r--r--include/drm/drm_print.h78
-rw-r--r--include/drm/drm_simple_kms_helper.h11
-rw-r--r--include/drm/drm_vblank.h36
-rw-r--r--include/drm/gpu_scheduler.h13
-rw-r--r--include/drm/i915_mei_hdcp_interface.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h14
-rw-r--r--include/drm/ttm/ttm_bo_driver.h15
-rw-r--r--include/dt-bindings/arm/coresight-cti-dt.h37
-rw-r--r--include/dt-bindings/bus/ti-sysc.h4
-rw-r--r--include/dt-bindings/clock/dm814.h5
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h2
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h1
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h3
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h11
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h8
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h5
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h9
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h13
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7180.h7
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8150.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8250.h271
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc7180.h3
-rw-r--r--include/dt-bindings/clock/qcom,mss-sc7180.h12
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h4
-rw-r--r--include/dt-bindings/clock/qcom,rpmh.h4
-rw-r--r--include/dt-bindings/clock/sprd,sc9863a-clk.h334
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h2
-rw-r--r--include/dt-bindings/clock/tegra114-car.h4
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h4
-rw-r--r--include/dt-bindings/clock/tegra210-car.h4
-rw-r--r--include/dt-bindings/clock/tegra30-car.h4
-rw-r--r--include/dt-bindings/interconnect/qcom,osm-l3.h12
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7180.h161
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm845.h263
-rw-r--r--include/dt-bindings/power/meson-a1-power.h32
-rw-r--r--include/dt-bindings/soc/tegra-pmc.h16
-rw-r--r--include/dt-bindings/sound/meson-aiu.h18
-rw-r--r--include/dt-bindings/sound/meson-g12a-toacodec.h10
-rw-r--r--include/keys/big_key-type.h2
-rw-r--r--include/keys/user-type.h3
-rw-r--r--include/kunit/test.h63
-rw-r--r--include/kvm/arm_vgic.h3
-rw-r--r--include/linux/acpi.h11
-rw-r--r--include/linux/aer.h9
-rw-r--r--include/linux/binfmts.h8
-rw-r--r--include/linux/bitmap.h8
-rw-r--r--include/linux/bitops.h4
-rw-r--r--include/linux/bits.h22
-rw-r--r--include/linux/bootconfig.h3
-rw-r--r--include/linux/cgroup-defs.h10
-rw-r--r--include/linux/cgroup.h23
-rw-r--r--include/linux/clk/at91_pmc.h23
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_types.h11
-rw-r--r--include/linux/coresight.h27
-rw-r--r--include/linux/debugfs.h1
-rw-r--r--include/linux/dma-buf.h97
-rw-r--r--include/linux/dma-noncoherent.h4
-rw-r--r--include/linux/dmaengine.h68
-rw-r--r--include/linux/err.h3
-rw-r--r--include/linux/extcon-provider.h28
-rw-r--r--include/linux/extcon.h5
-rw-r--r--include/linux/f2fs_fs.h1
-rw-r--r--include/linux/fanotify.h3
-rw-r--r--include/linux/firmware/imx/ipc.h1
-rw-r--r--include/linux/firmware/meson/meson_sm.h2
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h4
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/fsl/mc.h16
-rw-r--r--include/linux/fsnotify.h138
-rw-r--r--include/linux/fsnotify_backend.h70
-rw-r--r--include/linux/gfp.h8
-rw-r--r--include/linux/gpio.h2
-rw-r--r--include/linux/gpio/consumer.h13
-rw-r--r--include/linux/gpio/driver.h139
-rw-r--r--include/linux/hashtable.h4
-rw-r--r--include/linux/hdmi.h2
-rw-r--r--include/linux/hmm.h125
-rw-r--r--include/linux/huge_mm.h53
-rw-r--r--include/linux/hugetlb.h76
-rw-r--r--include/linux/hugetlb_cgroup.h169
-rw-r--r--include/linux/i2c-smbus.h9
-rw-r--r--include/linux/i2c.h8
-rw-r--r--include/linux/ima.h3
-rw-r--r--include/linux/irqdesc.h2
-rw-r--r--include/linux/irqflags.h27
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kernfs.h11
-rw-r--r--include/linux/key-type.h2
-rw-r--r--include/linux/kfifo.h73
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/kvm_host.h71
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/memcontrol.h42
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/memory_hotplug.h13
-rw-r--r--include/linux/mempolicy.h29
-rw-r--r--include/linux/memremap.h6
-rw-r--r--include/linux/mfd/wm8994/pdata.h2
-rw-r--r--include/linux/mhi.h700
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/miscdevice.h14
-rw-r--r--include/linux/mlx5/device.h6
-rw-r--r--include/linux/mlx5/driver.h17
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h8
-rw-r--r--include/linux/mm.h281
-rw-r--r--include/linux/mm_inline.h15
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/mmzone.h53
-rw-r--r--include/linux/mod_devicetable.h13
-rw-r--r--include/linux/mtd/mtd.h125
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/rawnand.h11
-rw-r--r--include/linux/mtd/spi-nor.h285
-rw-r--r--include/linux/mtd/spinand.h67
-rw-r--r--include/linux/namei.h4
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_page.h5
-rw-r--r--include/linux/nfs_xdr.h32
-rw-r--r--include/linux/nvmem-consumer.h7
-rw-r--r--include/linux/of_address.h13
-rw-r--r--include/linux/of_gpio.h9
-rw-r--r--include/linux/page-flags.h16
-rw-r--r--include/linux/page_ref.h9
-rw-r--r--include/linux/page_reporting.h26
-rw-r--r--include/linux/pagemap.h29
-rw-r--r--include/linux/pci-acpi.h8
-rw-r--r--include/linux/pci-epc.h27
-rw-r--r--include/linux/pci-epf.h29
-rw-r--r--include/linux/pci.h10
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu_counter.h4
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/pinctrl/pinctrl.h2
-rw-r--r--include/linux/platform_data/pm33xx.h6
-rw-r--r--include/linux/platform_data/remoteproc-omap.h51
-rw-r--r--include/linux/platform_data/simplefb.h2
-rw-r--r--include/linux/platform_data/ti-sysc.h5
-rw-r--r--include/linux/power/charger-manager.h2
-rw-r--r--include/linux/proc_fs.h21
-rw-r--r--include/linux/proc_ns.h5
-rw-r--r--include/linux/random.h22
-rw-r--r--include/linux/remoteproc.h16
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/rtc.h12
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sched/signal.h25
-rw-r--r--include/linux/sched/task.h4
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/shmem_fs.h10
-rw-r--r--include/linux/skbuff.h38
-rw-r--r--include/linux/soc/qcom/apr.h1
-rw-r--r--include/linux/soc/qcom/pdr.h29
-rw-r--r--include/linux/soc/qcom/qmi.h1
-rw-r--r--include/linux/soundwire/sdw.h49
-rw-r--r--include/linux/stackdepot.h2
-rw-r--r--include/linux/sunrpc/cache.h9
-rw-r--r--include/linux/sunrpc/rpc_rdma.h3
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sunrpc/svc.h5
-rw-r--r--include/linux/sunrpc/svc_rdma.h24
-rw-r--r--include/linux/sunrpc/svc_xprt.h2
-rw-r--r--include/linux/sunrpc/xdr.h68
-rw-r--r--include/linux/suspend.h34
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/swapops.h5
-rw-r--r--include/linux/sysfs.h15
-rw-r--r--include/linux/tee_drv.h19
-rw-r--r--include/linux/topology.h17
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/uacce.h163
-rw-r--r--include/linux/uio_driver.h33
-rw-r--r--include/linux/unaligned/be_byteshift.h6
-rw-r--r--include/linux/unaligned/generic.h46
-rw-r--r--include/linux/unaligned/le_byteshift.h6
-rw-r--r--include/linux/usb/audio-v2.h12
-rw-r--r--include/linux/userfaultfd_k.h42
-rw-r--r--include/linux/vfio.h17
-rw-r--r--include/linux/vm_event_item.h5
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/linux/xarray.h10
-rw-r--r--include/linux/xattr.h3
-rw-r--r--include/net/9p/client.h4
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/sock.h6
-rw-r--r--include/rdma/ib_cache.h1
-rw-r--r--include/rdma/ib_cm.h1
-rw-r--r--include/rdma/ib_fmr_pool.h2
-rw-r--r--include/rdma/ib_verbs.h49
-rw-r--r--include/rdma/opa_vnic.h2
-rw-r--r--include/rdma/rdmavt_mr.h2
-rw-r--r--include/rdma/rdmavt_qp.h2
-rw-r--r--include/rdma/uverbs_ioctl.h2
-rw-r--r--include/scsi/iscsi_if.h11
-rw-r--r--include/scsi/scsi_bsg_iscsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_device.h8
-rw-r--r--include/scsi/scsi_host.h17
-rw-r--r--include/scsi/scsi_ioctl.h2
-rw-r--r--include/scsi/scsi_transport_iscsi.h10
-rw-r--r--include/scsi/sg.h2
-rw-r--r--include/scsi/srp.h8
-rw-r--r--include/soc/fsl/dpaa2-io.h6
-rw-r--r--include/soc/fsl/qe/ucc_fast.h6
-rw-r--r--include/soc/fsl/qe/ucc_slow.h13
-rw-r--r--include/soc/qcom/rpmh.h5
-rw-r--r--include/soc/tegra/bpmp-abi.h10
-rw-r--r--include/soc/tegra/cpuidle.h2
-rw-r--r--include/soc/tegra/irq.h (renamed from arch/arm/mach-tegra/irq.h)8
-rw-r--r--include/soc/tegra/pm.h31
-rw-r--r--include/soc/tegra/pmc.h3
-rw-r--r--include/sound/compress_driver.h40
-rw-r--r--include/sound/core.h1
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/sound/pcm.h23
-rw-r--r--include/sound/pcm_params.h7
-rw-r--r--include/sound/rt5682.h10
-rw-r--r--include/sound/soc-acpi.h39
-rw-r--r--include/sound/soc-dai.h44
-rw-r--r--include/sound/soc-dapm.h6
-rw-r--r--include/sound/soc-dpcm.h20
-rw-r--r--include/sound/soc.h44
-rw-r--r--include/sound/sof/dai-intel.h18
-rw-r--r--include/sound/sof/header.h11
-rw-r--r--include/sound/sof/info.h22
-rw-r--r--include/sound/sof/topology.h3
-rw-r--r--include/target/iscsi/iscsi_target_core.h2
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h45
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/gpu_mem.h57
-rw-r--r--include/trace/events/huge_memory.h1
-rw-r--r--include/trace/events/mmap.h48
-rw-r--r--include/trace/events/mmflags.h1
-rw-r--r--include/trace/events/qla.h39
-rw-r--r--include/trace/events/rpcgss.h59
-rw-r--r--include/trace/events/rpcrdma.h220
-rw-r--r--include/trace/events/sunrpc.h76
-rw-r--r--include/trace/events/target.h11
-rw-r--r--include/trace/events/vmscan.h2
-rw-r--r--include/uapi/drm/amdgpu_drm.h5
-rw-r--r--include/uapi/drm/drm.h2
-rw-r--r--include/uapi/drm/i915_drm.h21
-rw-r--r--include/uapi/drm/lima_drm.h9
-rw-r--r--include/uapi/drm/vmwgfx_drm.h16
-rw-r--r--include/uapi/linux/coresight-stm.h6
-rw-r--r--include/uapi/linux/fanotify.h13
-rw-r--r--include/uapi/linux/gpio.h38
-rw-r--r--include/uapi/linux/idxd.h21
-rw-r--r--include/uapi/linux/kvm.h47
-rw-r--r--include/uapi/linux/mman.h5
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/netfilter/xt_IDLETIMER.h1
-rw-r--r--include/uapi/linux/pci_regs.h2
-rw-r--r--include/uapi/linux/pcitest.h8
-rw-r--r--include/uapi/linux/perf_event.h16
-rw-r--r--include/uapi/linux/rtc.h11
-rw-r--r--include/uapi/linux/sched.h5
-rw-r--r--include/uapi/linux/um_timetravel.h128
-rw-r--r--include/uapi/linux/userfaultfd.h40
-rw-r--r--include/uapi/linux/vfio.h37
-rw-r--r--include/uapi/linux/virtio_balloon.h1
-rw-r--r--include/uapi/misc/uacce/hisi_qm.h23
-rw-r--r--include/uapi/misc/uacce/uacce.h38
-rw-r--r--include/uapi/rdma/mlx5-abi.h6
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h35
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h10
-rw-r--r--include/uapi/scsi/fc/fc_els.h211
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h2
-rw-r--r--include/uapi/sound/asoc.h1
-rw-r--r--include/uapi/sound/compress_offload.h2
-rw-r--r--include/uapi/sound/compress_params.h37
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--include/video/mmp_disp.h2
-rw-r--r--include/video/samsung_fimd.h2
-rw-r--r--include/xen/xenbus.h7
-rw-r--r--init/Kconfig11
-rw-r--r--init/init_task.c1
-rw-r--r--init/main.c14
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/shm.c2
-rw-r--r--ipc/util.c1
-rw-r--r--kernel/.gitignore4
-rw-r--r--kernel/audit_fsnotify.c13
-rw-r--r--kernel/audit_watch.c16
-rw-r--r--kernel/cgroup/cgroup-v1.c34
-rw-r--r--kernel/cgroup/cgroup.c378
-rw-r--r--kernel/cgroup/cpuset.c8
-rw-r--r--kernel/cgroup/pids.c15
-rw-r--r--kernel/configs/tiny.config1
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/debug/kdb/.gitignore1
-rw-r--r--kernel/debug/kdb/kdb_main.c20
-rw-r--r--kernel/dma/coherent.c13
-rw-r--r--kernel/dma/direct.c25
-rw-r--r--kernel/events/core.c148
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/extable.c3
-rw-r--r--kernel/fork.c43
-rw-r--r--kernel/gcov/fs.c2
-rw-r--r--kernel/gcov/gcc_3_4.c6
-rw-r--r--kernel/gcov/gcc_4_7.c2
-rw-r--r--kernel/kallsyms.c2
-rw-r--r--kernel/kcmp.c8
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/padata.c9
-rw-r--r--kernel/pid.c13
-rw-r--r--kernel/pid_namespace.c38
-rw-r--r--kernel/power/Kconfig3
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/power/main.c7
-rw-r--r--kernel/power/snapshot.c18
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sysctl.c29
-rw-r--r--kernel/time/hrtimer.c5
-rw-r--r--kernel/trace/ftrace.c200
-rw-r--r--kernel/trace/ring_buffer.c239
-rw-r--r--kernel/trace/trace.c110
-rw-r--r--kernel/trace/trace.h39
-rw-r--r--kernel/trace/trace_entries.h4
-rw-r--r--kernel/trace/trace_events.c280
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_hwlat.c24
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/trace/trace_output.c19
-rw-r--r--kernel/workqueue.c12
-rw-r--r--lib/.gitignore4
-rw-r--r--lib/Kconfig.debug33
-rw-r--r--lib/Kconfig.ubsan49
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bch.c2
-rw-r--r--lib/bootconfig.c35
-rw-r--r--lib/dynamic_debug.c30
-rw-r--r--lib/kunit/Kconfig8
-rw-r--r--lib/kunit/Makefile4
-rw-r--r--lib/kunit/assert.c79
-rw-r--r--lib/kunit/debugfs.c116
-rw-r--r--lib/kunit/debugfs.h30
-rw-r--r--lib/kunit/kunit-test.c44
-rw-r--r--lib/kunit/test.c148
-rw-r--r--lib/list-test.c4
-rw-r--r--lib/percpu-refcount.c7
-rw-r--r--lib/radix-tree.c8
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/rbtree.c4
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/stackdepot.c39
-rw-r--r--lib/test_bitmap.c2
-rw-r--r--lib/test_kasan.c19
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_lockup.c599
-rw-r--r--lib/test_stackinit.c28
-rw-r--r--lib/test_xarray.c55
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--lib/ubsan.c47
-rw-r--r--lib/xarray.c9
-rw-r--r--mm/Kconfig135
-rw-r--r--mm/Makefile2
-rw-r--r--mm/compaction.c32
-rw-r--r--mm/debug.c44
-rw-r--r--mm/dmapool.c4
-rw-r--r--mm/filemap.c85
-rw-r--r--mm/gup.c667
-rw-r--r--mm/gup_benchmark.c71
-rw-r--r--mm/hmm.c470
-rw-r--r--mm/huge_memory.c109
-rw-r--r--mm/hugetlb.c803
-rw-r--r--mm/hugetlb_cgroup.c317
-rw-r--r--mm/internal.h34
-rw-r--r--mm/kasan/common.c49
-rw-r--r--mm/kasan/generic.c9
-rw-r--r--mm/kasan/generic_report.c11
-rw-r--r--mm/kasan/kasan.h2
-rw-r--r--mm/kasan/report.c15
-rw-r--r--mm/kasan/tags.c9
-rw-r--r--mm/kasan/tags_report.c11
-rw-r--r--mm/khugepaged.c43
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/ksm.c5
-rw-r--r--mm/list_lru.c14
-rw-r--r--mm/mapping_dirty_helpers.c42
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/memcontrol.c347
-rw-r--r--mm/memory-failure.c31
-rw-r--r--mm/memory.c69
-rw-r--r--mm/memory_hotplug.c49
-rw-r--r--mm/mempolicy.c84
-rw-r--r--mm/memremap.c4
-rw-r--r--mm/migrate.c152
-rw-r--r--mm/mm_init.c2
-rw-r--r--mm/mmap.c38
-rw-r--r--mm/mprotect.c76
-rw-r--r--mm/mremap.c92
-rw-r--r--mm/page-writeback.c19
-rw-r--r--mm/page_alloc.c246
-rw-r--r--mm/page_counter.c23
-rw-r--r--mm/page_ext.c5
-rw-r--r--mm/page_isolation.c6
-rw-r--r--mm/page_reporting.c364
-rw-r--r--mm/page_reporting.h54
-rw-r--r--mm/percpu-stats.c2
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/process_vm_access.c2
-rw-r--r--mm/rmap.c62
-rw-r--r--mm/shmem.c168
-rw-r--r--mm/shuffle.c14
-rw-r--r--mm/shuffle.h6
-rw-r--r--mm/slab.h22
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c30
-rw-r--r--mm/sparse.c163
-rw-r--r--mm/swap.c25
-rw-r--r--mm/swap_slots.c12
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c11
-rw-r--r--mm/userfaultfd.c105
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmpressure.c8
-rw-r--r--mm/vmscan.c123
-rw-r--r--mm/vmstat.c5
-rw-r--r--mm/zsmalloc.c10
-rw-r--r--mm/zswap.c24
-rw-r--r--net/9p/client.c134
-rw-r--r--net/bpfilter/.gitignore1
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/hsr/hsr_netlink.c10
-rw-r--r--net/ipv4/devinet.c13
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/icmp.c21
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/rpl.c6
-rw-r--r--net/l2tp/l2tp_netlink.c16
-rw-r--r--net/mptcp/options.c2
-rw-r--r--net/mptcp/pm.c2
-rw-r--r--net/mptcp/pm_netlink.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c3
-rw-r--r--net/netfilter/nf_tables_api.c7
-rw-r--r--net/netfilter/nft_lookup.c12
-rw-r--r--net/netfilter/nft_set_bitmap.c1
-rw-r--r--net/netfilter/nft_set_rbtree.c23
-rw-r--r--net/netfilter/xt_IDLETIMER.c3
-rw-r--r--net/openvswitch/flow_table.c10
-rw-r--r--net/qrtr/qrtr.c7
-rw-r--r--net/rds/message.c6
-rw-r--r--net/rds/rdma.c53
-rw-r--r--net/rds/rds.h17
-rw-r--r--net/rxrpc/key.c27
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/cls_tcindex.c1
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c96
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c60
-rw-r--r--net/sunrpc/cache.c128
-rw-r--r--net/sunrpc/clnt.c9
-rw-r--r--net/sunrpc/sched.c22
-rw-r--r--net/sunrpc/socklib.c141
-rw-r--r--net/sunrpc/socklib.h15
-rw-r--r--net/sunrpc/sunrpc.h4
-rw-r--r--net/sunrpc/svc.c20
-rw-r--r--net/sunrpc/svc_xprt.c22
-rw-r--r--net/sunrpc/svcauth_unix.c12
-rw-r--r--net/sunrpc/svcsock.c202
-rw-r--r--net/sunrpc/xdr.c55
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c8
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c154
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c68
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c17
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c244
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c57
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c512
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c8
-rw-r--r--net/sunrpc/xprtrdma/transport.c72
-rw-r--r--net/sunrpc/xprtrdma/verbs.c683
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h89
-rw-r--r--net/sunrpc/xprtsock.c190
-rw-r--r--net/tls/tls_main.c4
-rw-r--r--net/wireless/.gitignore1
-rw-r--r--samples/auxdisplay/.gitignore1
-rw-r--r--samples/bpf/.gitignore1
-rw-r--r--samples/connector/.gitignore1
-rw-r--r--samples/hidraw/.gitignore1
-rw-r--r--samples/hw_breakpoint/data_breakpoint.c11
-rw-r--r--samples/mei/.gitignore1
-rw-r--r--samples/mic/mpssd/.gitignore1
-rw-r--r--samples/pidfd/.gitignore1
-rw-r--r--samples/seccomp/.gitignore1
-rw-r--r--samples/timers/.gitignore1
-rw-r--r--samples/vfs/.gitignore1
-rw-r--r--samples/watchdog/.gitignore1
-rw-r--r--scripts/.gitignore4
-rw-r--r--scripts/Makefile.ubsan16
-rw-r--r--scripts/basic/.gitignore1
-rwxr-xr-xscripts/checkpatch.pl155
-rwxr-xr-xscripts/documentation-file-ref-check2
-rw-r--r--scripts/dtc/.gitignore1
-rw-r--r--scripts/dtc/Makefile.dtc23
-rw-r--r--scripts/dtc/checks.c25
-rw-r--r--scripts/dtc/libfdt/Makefile.libfdt18
-rw-r--r--scripts/dtc/libfdt/fdt.c99
-rw-r--r--scripts/dtc/libfdt/fdt_ro.c143
-rw-r--r--scripts/dtc/libfdt/fdt_rw.c42
-rw-r--r--scripts/dtc/libfdt/fdt_sw.c19
-rw-r--r--scripts/dtc/libfdt/libfdt.h9
-rw-r--r--scripts/dtc/libfdt/libfdt_internal.h122
-rwxr-xr-xscripts/dtc/update-dtc-source.sh4
-rw-r--r--scripts/dtc/version_gen.h2
-rw-r--r--scripts/gcc-plugins/.gitignore1
-rw-r--r--scripts/gdb/linux/.gitignore1
-rw-r--r--scripts/genksyms/.gitignore1
-rw-r--r--scripts/kconfig/.gitignore4
-rw-r--r--scripts/mod/.gitignore1
-rw-r--r--scripts/mod/devicetable-offsets.c3
-rw-r--r--scripts/mod/file2alias.c10
-rw-r--r--scripts/selinux/genheaders/.gitignore1
-rw-r--r--scripts/selinux/mdp/.gitignore2
-rw-r--r--scripts/spelling.txt21
-rwxr-xr-xscripts/ver_linux24
-rw-r--r--security/apparmor/.gitignore4
-rw-r--r--security/integrity/digsig.c2
-rw-r--r--security/integrity/digsig_asymmetric.c2
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/evm/evm_main.c2
-rw-r--r--security/integrity/evm/evm_secfs.c2
-rw-r--r--security/integrity/ima/Kconfig7
-rw-r--r--security/integrity/ima/Makefile6
-rw-r--r--security/integrity/ima/ima_asymmetric_keys.c2
-rw-r--r--security/integrity/ima/ima_crypto.c2
-rw-r--r--security/integrity/ima/ima_fs.c2
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_kexec.c1
-rw-r--r--security/integrity/ima/ima_main.c5
-rw-r--r--security/integrity/ima/ima_policy.c2
-rw-r--r--security/integrity/ima/ima_queue.c2
-rw-r--r--security/integrity/ima/ima_queue_keys.c2
-rw-r--r--security/integrity/ima/ima_template.c2
-rw-r--r--security/integrity/ima/ima_template_lib.c2
-rw-r--r--security/integrity/integrity.h6
-rw-r--r--security/keys/big_key.c11
-rw-r--r--security/keys/encrypted-keys/encrypted.c7
-rw-r--r--security/keys/internal.h12
-rw-r--r--security/keys/keyctl.c103
-rw-r--r--security/keys/keyring.c6
-rw-r--r--security/keys/request_key_auth.c7
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c14
-rw-r--r--security/keys/user_defined.c5
-rw-r--r--security/selinux/.gitignore1
-rw-r--r--security/tomoyo/.gitignore1
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c8
-rw-r--r--sound/core/compress_offload.c42
-rw-r--r--sound/core/device.c21
-rw-r--r--sound/core/info.c2
-rw-r--r--sound/core/oss/pcm_oss.c23
-rw-r--r--sound/core/oss/pcm_plugin.c120
-rw-r--r--sound/core/oss/rate.c2
-rw-r--r--sound/core/pcm.c2
-rw-r--r--sound/core/pcm_dmaengine.c8
-rw-r--r--sound/core/pcm_misc.c35
-rw-r--r--sound/core/pcm_native.c47
-rw-r--r--sound/drivers/aloop.c6
-rw-r--r--sound/drivers/dummy.c6
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/firewire/digi00x/digi00x.c2
-rw-r--r--sound/firewire/fireface/ff.c2
-rw-r--r--sound/firewire/fireworks/fireworks.c2
-rw-r--r--sound/firewire/tascam/tascam-hwdep.c2
-rw-r--r--sound/firewire/tascam/tascam.c2
-rw-r--r--sound/hda/hdac_device.c2
-rw-r--r--sound/isa/sb/emu8000_pcm.c4
-rw-r--r--sound/oss/.gitignore2
-rw-r--r--sound/pci/ali5451/ali5451.c6
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c4
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_controller.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c3
-rw-r--r--sound/pci/hda/patch_hdmi.c313
-rw-r--r--sound/pci/hda/patch_realtek.c231
-rw-r--r--sound/pci/korg1212/korg1212.c2
-rw-r--r--sound/pci/rme9652/hdsp.c3
-rw-r--r--sound/pci/via82xx.c6
-rw-r--r--sound/pci/via82xx_modem.c6
-rw-r--r--sound/ppc/keywest.c9
-rw-r--r--sound/soc/amd/Kconfig10
-rw-r--r--sound/soc/amd/Makefile2
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c2
-rw-r--r--sound/soc/amd/acp-rt5645.c4
-rw-r--r--sound/soc/amd/acp3x-rt5682-max9836.c376
-rw-r--r--sound/soc/amd/raven/acp3x-i2s.c44
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c6
-rw-r--r--sound/soc/amd/raven/pci-acp3x.c7
-rw-r--r--sound/soc/atmel/atmel-pcm-dma.c4
-rw-r--r--sound/soc/atmel/atmel-pcm-pdc.c2
-rw-r--r--sound/soc/atmel/atmel_wm8904.c2
-rw-r--r--sound/soc/atmel/mchp-i2s-mcc.c8
-rw-r--r--sound/soc/atmel/mikroe-proto.c2
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c2
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c2
-rw-r--r--sound/soc/au1x/db1200.c2
-rw-r--r--sound/soc/au1x/dbdma2.c2
-rw-r--r--sound/soc/au1x/dma.c2
-rw-r--r--sound/soc/au1x/psc-ac97.c2
-rw-r--r--sound/soc/bcm/Kconfig9
-rw-r--r--sound/soc/bcm/Makefile4
-rw-r--r--sound/soc/bcm/bcm63xx-i2s-whistler.c317
-rw-r--r--sound/soc/bcm/bcm63xx-i2s.h90
-rw-r--r--sound/soc/bcm/bcm63xx-pcm-whistler.c485
-rw-r--r--sound/soc/bcm/cygnus-pcm.c22
-rw-r--r--sound/soc/cirrus/edb93xx.c4
-rw-r--r--sound/soc/cirrus/snappercl15.c4
-rw-r--r--sound/soc/codecs/Kconfig627
-rw-r--r--sound/soc/codecs/Makefile4
-rw-r--r--sound/soc/codecs/cros_ec_codec.c25
-rw-r--r--sound/soc/codecs/cs4271.c4
-rw-r--r--sound/soc/codecs/cs47l15.c4
-rw-r--r--sound/soc/codecs/cs47l24.c6
-rw-r--r--sound/soc/codecs/cs47l35.c6
-rw-r--r--sound/soc/codecs/cs47l85.c6
-rw-r--r--sound/soc/codecs/cs47l90.c6
-rw-r--r--sound/soc/codecs/cs47l92.c4
-rw-r--r--sound/soc/codecs/hdac_hdmi.c6
-rw-r--r--sound/soc/codecs/max98357a.c37
-rw-r--r--sound/soc/codecs/mt6660.c81
-rw-r--r--sound/soc/codecs/rk3328_codec.c31
-rw-r--r--sound/soc/codecs/rl6231.c1
-rw-r--r--sound/soc/codecs/rl6231.h2
-rw-r--r--sound/soc/codecs/rt1015.c10
-rw-r--r--sound/soc/codecs/rt1308-sdw.c38
-rw-r--r--sound/soc/codecs/rt1308-sdw.h2
-rw-r--r--sound/soc/codecs/rt5659.c2
-rw-r--r--sound/soc/codecs/rt5682-sdw.c333
-rw-r--r--sound/soc/codecs/rt5682-sdw.h20
-rw-r--r--sound/soc/codecs/rt5682.c1298
-rw-r--r--sound/soc/codecs/rt5682.h100
-rw-r--r--sound/soc/codecs/tas2562.c121
-rw-r--r--sound/soc/codecs/tas2562.h12
-rw-r--r--sound/soc/codecs/tlv320adcx140.c920
-rw-r--r--sound/soc/codecs/tlv320adcx140.h131
-rw-r--r--sound/soc/codecs/wcd9335.c18
-rw-r--r--sound/soc/codecs/wcd9335.h7
-rw-r--r--sound/soc/codecs/wcd934x.c37
-rw-r--r--sound/soc/codecs/wm0010.c2
-rw-r--r--sound/soc/codecs/wm5110.c6
-rw-r--r--sound/soc/codecs/wm8974.c8
-rw-r--r--sound/soc/codecs/wm_adsp.c14
-rw-r--r--sound/soc/codecs/wsa881x.c46
-rw-r--r--sound/soc/dwc/dwc-i2s.c8
-rw-r--r--sound/soc/dwc/dwc-pcm.c2
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c4
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c10
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c10
-rw-r--r--sound/soc/fsl/fsl_spdif.c10
-rw-r--r--sound/soc/fsl/fsl_ssi.c8
-rw-r--r--sound/soc/fsl/imx-audmix.c8
-rw-r--r--sound/soc/fsl/imx-mc13783.c4
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c2
-rw-r--r--sound/soc/fsl/mpc5200_dma.c10
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c2
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c4
-rw-r--r--sound/soc/fsl/mx27vis-aic32x4.c4
-rw-r--r--sound/soc/fsl/p1022_ds.c4
-rw-r--r--sound/soc/fsl/p1022_rdk.c4
-rw-r--r--sound/soc/fsl/wm1133-ev1.c6
-rw-r--r--sound/soc/generic/simple-card-utils.c60
-rw-r--r--sound/soc/img/img-i2s-in.c2
-rw-r--r--sound/soc/img/img-i2s-out.c2
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c6
-rw-r--r--sound/soc/intel/atom/sst/sst_pci.c2
-rw-r--r--sound/soc/intel/boards/Kconfig57
-rw-r--r--sound/soc/intel/boards/Makefile12
-rw-r--r--sound/soc/intel/boards/bdw-rt5650.c15
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c15
-rw-r--r--sound/soc/intel/boards/broadwell.c13
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c10
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c10
-rw-r--r--sound/soc/intel/boards/byt-max98090.c2
-rw-r--r--sound/soc/intel/boards/byt-rt5640.c4
-rw-r--r--sound/soc/intel/boards/bytcht_cx2072x.c10
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c10
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c8
-rw-r--r--sound/soc/intel/boards/bytcht_nocodec.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c8
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c8
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c8
-rw-r--r--sound/soc/intel/boards/cht_bsw_nau8824.c6
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c16
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c8
-rw-r--r--sound/soc/intel/boards/cml_rt1011_rt5682.c13
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c13
-rw-r--r--sound/soc/intel/boards/haswell.c4
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c8
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c14
-rw-r--r--sound/soc/intel/boards/kbl_rt5660.c6
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c12
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c12
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h4
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c27
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c14
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c19
-rw-r--r--sound/soc/intel/boards/skl_rt286.c8
-rw-r--r--sound/soc/intel/boards/sof_da7219_max98373.c83
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.c80
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.h24
-rw-r--r--sound/soc/intel/boards/sof_pcm512x.c448
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c137
-rw-r--r--sound/soc/intel/boards/sof_sdw.c962
-rw-r--r--sound/soc/intel/boards/sof_sdw_common.h114
-rw-r--r--sound/soc/intel/boards/sof_sdw_dmic.c42
-rw-r--r--sound/soc/intel/boards/sof_sdw_hdmi.c97
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt1308.c151
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt5682.c126
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt700.c125
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt711.c156
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt715.c42
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c7
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c111
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c103
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-jsl-match.c34
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-tgl-match.c92
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c26
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c3
-rw-r--r--sound/soc/intel/skylake/cnl-sst.c35
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c3
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c20
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h2
-rw-r--r--sound/soc/intel/skylake/skl.c33
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c78
-rw-r--r--sound/soc/kirkwood/armada-370-db.c2
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c2
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.c10
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.c2
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-cs42448.c4
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-wm8960.c4
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-max98090.c4
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c4
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c6
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c23
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c117
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c2
-rw-r--r--sound/soc/meson/Kconfig41
-rw-r--r--sound/soc/meson/Makefile19
-rw-r--r--sound/soc/meson/aiu-acodec-ctrl.c203
-rw-r--r--sound/soc/meson/aiu-codec-ctrl.c151
-rw-r--r--sound/soc/meson/aiu-encoder-i2s.c365
-rw-r--r--sound/soc/meson/aiu-encoder-spdif.c209
-rw-r--r--sound/soc/meson/aiu-fifo-i2s.c153
-rw-r--r--sound/soc/meson/aiu-fifo-spdif.c186
-rw-r--r--sound/soc/meson/aiu-fifo.c223
-rw-r--r--sound/soc/meson/aiu-fifo.h50
-rw-r--r--sound/soc/meson/aiu.c388
-rw-r--r--sound/soc/meson/aiu.h89
-rw-r--r--sound/soc/meson/axg-card.c414
-rw-r--r--sound/soc/meson/axg-fifo.c2
-rw-r--r--sound/soc/meson/g12a-toacodec.c252
-rw-r--r--sound/soc/meson/g12a-tohdmitx.c219
-rw-r--r--sound/soc/meson/gx-card.c141
-rw-r--r--sound/soc/meson/meson-card-utils.c385
-rw-r--r--sound/soc/meson/meson-card.h55
-rw-r--r--sound/soc/meson/meson-codec-glue.c149
-rw-r--r--sound/soc/meson/meson-codec-glue.h32
-rw-r--r--sound/soc/meson/t9015.c333
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c4
-rw-r--r--sound/soc/pxa/Kconfig22
-rw-r--r--sound/soc/pxa/brownstone.c4
-rw-r--r--sound/soc/pxa/corgi.c4
-rw-r--r--sound/soc/pxa/hx4700.c4
-rw-r--r--sound/soc/pxa/imote2.c4
-rw-r--r--sound/soc/pxa/magician.c14
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c4
-rw-r--r--sound/soc/pxa/mmp-pcm.c2
-rw-r--r--sound/soc/pxa/mmp-sspa.c2
-rw-r--r--sound/soc/pxa/poodle.c4
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c2
-rw-r--r--sound/soc/pxa/spitz.c4
-rw-r--r--sound/soc/pxa/ttc-dkb.c2
-rw-r--r--sound/soc/pxa/z2.c4
-rw-r--r--sound/soc/pxa/zylonite.c6
-rw-r--r--sound/soc/qcom/Kconfig2
-rw-r--r--sound/soc/qcom/apq8016_sbc.c9
-rw-r--r--sound/soc/qcom/apq8096.c6
-rw-r--r--sound/soc/qcom/lpass-platform.c4
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c173
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c243
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.h51
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c21
-rw-r--r--sound/soc/qcom/sdm845.c105
-rw-r--r--sound/soc/qcom/storm.c2
-rw-r--r--sound/soc/rockchip/rk3288_hdmi_analog.c4
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c16
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c6
-rw-r--r--sound/soc/rockchip/rockchip_rt5645.c6
-rw-r--r--sound/soc/samsung/Kconfig4
-rw-r--r--sound/soc/samsung/arndale.c10
-rw-r--r--sound/soc/samsung/bells.c16
-rw-r--r--sound/soc/samsung/h1940_uda1380.c2
-rw-r--r--sound/soc/samsung/i2s.c2
-rw-r--r--sound/soc/samsung/jive_wm8750.c4
-rw-r--r--sound/soc/samsung/littlemill.c16
-rw-r--r--sound/soc/samsung/lowland.c6
-rw-r--r--sound/soc/samsung/neo1973_wm8753.c10
-rw-r--r--sound/soc/samsung/odroid.c6
-rw-r--r--sound/soc/samsung/pcm.c4
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c2
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c2
-rw-r--r--sound/soc/samsung/s3c24xx_simtec.c4
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c6
-rw-r--r--sound/soc/samsung/smartq_wm8987.c4
-rw-r--r--sound/soc/samsung/smdk_spdif.c2
-rw-r--r--sound/soc/samsung/smdk_wm8580.c2
-rw-r--r--sound/soc/samsung/smdk_wm8994.c4
-rw-r--r--sound/soc/samsung/smdk_wm8994pcm.c6
-rw-r--r--sound/soc/samsung/snow.c8
-rw-r--r--sound/soc/samsung/spdif.c8
-rw-r--r--sound/soc/samsung/speyside.c10
-rw-r--r--sound/soc/samsung/tm2_wm5110.c19
-rw-r--r--sound/soc/samsung/tobermory.c10
-rw-r--r--sound/soc/sh/dma-sh7760.c16
-rw-r--r--sound/soc/sh/fsi.c5
-rw-r--r--sound/soc/sh/migor.c6
-rw-r--r--sound/soc/sh/rcar/core.c2
-rw-r--r--sound/soc/soc-compress.c5
-rw-r--r--sound/soc/soc-core.c290
-rw-r--r--sound/soc/soc-dai.c18
-rw-r--r--sound/soc/soc-dapm.c220
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c26
-rw-r--r--sound/soc/soc-pcm.c1619
-rw-r--r--sound/soc/soc-topology.c24
-rw-r--r--sound/soc/sof/Kconfig9
-rw-r--r--sound/soc/sof/Makefile1
-rw-r--r--sound/soc/sof/compress.c146
-rw-r--r--sound/soc/sof/compress.h31
-rw-r--r--sound/soc/sof/core.c10
-rw-r--r--sound/soc/sof/debug.c226
-rw-r--r--sound/soc/sof/imx/imx8.c57
-rw-r--r--sound/soc/sof/intel/Kconfig20
-rw-r--r--sound/soc/sof/intel/Makefile1
-rw-r--r--sound/soc/sof/intel/apl.c9
-rw-r--r--sound/soc/sof/intel/cnl.c51
-rw-r--r--sound/soc/sof/intel/hda-codec.c11
-rw-r--r--sound/soc/sof/intel/hda-compress.c114
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c40
-rw-r--r--sound/soc/sof/intel/hda-dai.c130
-rw-r--r--sound/soc/sof/intel/hda-dsp.c331
-rw-r--r--sound/soc/sof/intel/hda-ipc.c24
-rw-r--r--sound/soc/sof/intel/hda-loader.c40
-rw-r--r--sound/soc/sof/intel/hda-pcm.c8
-rw-r--r--sound/soc/sof/intel/hda-stream.c27
-rw-r--r--sound/soc/sof/intel/hda.c433
-rw-r--r--sound/soc/sof/intel/hda.h120
-rw-r--r--sound/soc/sof/ipc.c41
-rw-r--r--sound/soc/sof/loader.c6
-rw-r--r--sound/soc/sof/ops.h59
-rw-r--r--sound/soc/sof/pcm.c19
-rw-r--r--sound/soc/sof/pm.c176
-rw-r--r--sound/soc/sof/probe.c290
-rw-r--r--sound/soc/sof/probe.h85
-rw-r--r--sound/soc/sof/sof-audio.c59
-rw-r--r--sound/soc/sof/sof-audio.h6
-rw-r--r--sound/soc/sof/sof-of-dev.c10
-rw-r--r--sound/soc/sof/sof-priv.h71
-rw-r--r--sound/soc/sof/topology.c25
-rw-r--r--sound/soc/sprd/Kconfig2
-rw-r--r--sound/soc/sprd/sprd-mcdt.h2
-rw-r--r--sound/soc/sprd/sprd-pcm-compress.c4
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.c2
-rw-r--r--sound/soc/stm/stm32_adfsdm.c12
-rw-r--r--sound/soc/stm/stm32_i2s.c75
-rw-r--r--sound/soc/stm/stm32_sai.c26
-rw-r--r--sound/soc/stm/stm32_sai_sub.c13
-rw-r--r--sound/soc/stm/stm32_spdifrx.c89
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c2
-rw-r--r--sound/soc/sunxi/sun8i-codec.c3
-rw-r--r--sound/soc/tegra/tegra_alc5632.c2
-rw-r--r--sound/soc/tegra/tegra_max98090.c2
-rw-r--r--sound/soc/tegra/tegra_rt5640.c2
-rw-r--r--sound/soc/tegra/tegra_rt5677.c2
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c2
-rw-r--r--sound/soc/tegra/tegra_wm8753.c2
-rw-r--r--sound/soc/tegra/tegra_wm8903.c24
-rw-r--r--sound/soc/tegra/trimslice.c2
-rw-r--r--sound/soc/ti/Kconfig8
-rw-r--r--sound/soc/ti/Makefile2
-rw-r--r--sound/soc/ti/ams-delta.c4
-rw-r--r--sound/soc/ti/davinci-evm.c4
-rw-r--r--sound/soc/ti/davinci-mcasp.c13
-rw-r--r--sound/soc/ti/davinci-vcif.c4
-rw-r--r--sound/soc/ti/n810.c2
-rw-r--r--sound/soc/ti/omap-abe-twl6040.c6
-rw-r--r--sound/soc/ti/omap-mcbsp-st.c2
-rw-r--r--sound/soc/ti/omap-mcbsp.c4
-rw-r--r--sound/soc/ti/omap-mcpdm.c2
-rw-r--r--sound/soc/ti/omap3pandora.c4
-rw-r--r--sound/soc/ti/osk5912.c2
-rw-r--r--sound/soc/ti/rx51.c2
-rw-r--r--sound/soc/ti/udma-pcm.c43
-rw-r--r--sound/soc/ti/udma-pcm.h18
-rw-r--r--sound/soc/txx9/txx9aclc.c2
-rw-r--r--sound/soc/uniphier/aio-compress.c22
-rw-r--r--sound/soc/uniphier/aio-dma.c6
-rw-r--r--sound/soc/ux500/mop500_ab8500.c6
-rw-r--r--sound/soc/ux500/ux500_pcm.c8
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c2
-rw-r--r--sound/soc/zte/zx-spdif.c1
-rw-r--r--sound/soc/zte/zx-tdm.c3
-rw-r--r--sound/usb/Makefile1
-rw-r--r--sound/usb/card.c38
-rw-r--r--sound/usb/clock.c59
-rw-r--r--sound/usb/format.c37
-rw-r--r--sound/usb/midi.c31
-rw-r--r--sound/usb/mixer.c33
-rw-r--r--sound/usb/mixer_quirks.c5
-rw-r--r--sound/usb/mixer_s1810c.c595
-rw-r--r--sound/usb/mixer_s1810c.h7
-rw-r--r--sound/usb/pcm.c7
-rw-r--r--sound/usb/proc.c2
-rw-r--r--sound/usb/quirks-table.h2
-rw-r--r--sound/usb/quirks.c88
-rw-r--r--sound/usb/quirks.h2
-rw-r--r--sound/usb/stream.c3
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c9
-rw-r--r--tools/accounting/.gitignore1
-rw-r--r--tools/accounting/getdelays.c2
-rw-r--r--tools/arch/x86/include/asm/unistd_64.h3
-rw-r--r--tools/bootconfig/.gitignore1
-rw-r--r--tools/bootconfig/Makefile27
-rw-r--r--tools/bootconfig/main.c35
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh14
-rw-r--r--tools/bpf/.gitignore1
-rw-r--r--tools/bpf/bpftool/.gitignore1
-rw-r--r--tools/bpf/runqslower/.gitignore1
-rw-r--r--tools/build/.gitignore1
-rw-r--r--tools/build/Makefile.feature3
-rw-r--r--tools/build/feature/.gitignore1
-rw-r--r--tools/build/feature/Makefile5
-rw-r--r--tools/build/feature/test-file-handle.c17
-rw-r--r--tools/cgroup/.gitignore1
-rw-r--r--tools/gpio/.gitignore2
-rw-r--r--tools/gpio/Build1
-rw-r--r--tools/gpio/Makefile13
-rw-r--r--tools/gpio/gpio-hammer.c19
-rw-r--r--tools/gpio/gpio-utils.c6
-rw-r--r--tools/gpio/gpio-watch.c99
-rw-r--r--tools/iio/.gitignore1
-rw-r--r--tools/include/uapi/linux/perf_event.h16
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat256
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt44
-rw-r--r--tools/laptop/dslm/.gitignore1
-rw-r--r--tools/leds/.gitignore1
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/lockdep/.gitignore1
-rw-r--r--tools/lib/perf/include/perf/event.h7
-rw-r--r--tools/lib/rbtree.c4
-rw-r--r--tools/lib/traceevent/.gitignore1
-rw-r--r--tools/memory-model/.gitignore1
-rw-r--r--tools/memory-model/litmus-tests/.gitignore1
-rw-r--r--tools/objtool/.gitignore1
-rw-r--r--tools/pci/pcitest.c37
-rw-r--r--tools/pcmcia/.gitignore1
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Documentation/perf-config.txt14
-rw-r--r--tools/perf/Documentation/perf-record.txt23
-rw-r--r--tools/perf/Documentation/perf-report.txt6
-rw-r--r--tools/perf/Documentation/perf-script.txt14
-rw-r--r--tools/perf/Documentation/perf-top.txt9
-rw-r--r--tools/perf/Makefile.config15
-rw-r--r--tools/perf/Makefile.perf11
-rw-r--r--tools/perf/arch/arm64/util/Build2
-rw-r--r--tools/perf/arch/arm64/util/machine.c27
-rw-r--r--tools/perf/arch/arm64/util/sym-handling.c19
-rw-r--r--tools/perf/arch/powerpc/util/Build1
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c10
-rw-r--r--tools/perf/builtin-diff.c1
-rw-r--r--tools/perf/builtin-record.c16
-rw-r--r--tools/perf/builtin-report.c17
-rw-r--r--tools/perf/builtin-script.c60
-rw-r--r--tools/perf/builtin-top.c30
-rw-r--r--tools/perf/pmu-events/arch/test/test_cpu/branch.json (renamed from tools/perf/pmu-events/arch/x86/amdfam17h/branch.json)0
-rw-r--r--tools/perf/pmu-events/arch/test/test_cpu/other.json26
-rw-r--r--tools/perf/pmu-events/arch/test/test_cpu/uncore.json21
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/cache.json329
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/other.json65
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/branch.json23
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/cache.json294
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/core.json (renamed from tools/perf/pmu-events/arch/x86/amdfam17h/core.json)15
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json (renamed from tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json)64
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/memory.json (renamed from tools/perf/pmu-events/arch/x86/amdfam17h/memory.json)82
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/other.json56
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/branch.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/cache.json338
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/core.json130
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json140
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/memory.json341
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/other.json115
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv3
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json2
-rw-r--r--tools/perf/pmu-events/jevents.c30
-rw-r--r--tools/perf/tests/.gitignore1
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/builtin-test.c4
-rw-r--r--tools/perf/tests/make10
-rw-r--r--tools/perf/tests/pmu-events.c379
-rw-r--r--tools/perf/tests/sample-parsing.c6
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/ui/browsers/hists.c126
-rw-r--r--tools/perf/ui/hist.c93
-rw-r--r--tools/perf/ui/keysyms.h1
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/cgroup.c80
-rw-r--r--tools/perf/util/cgroup.h17
-rw-r--r--tools/perf/util/cpumap.c10
-rw-r--r--tools/perf/util/dsos.c22
-rw-r--r--tools/perf/util/env.c2
-rw-r--r--tools/perf/util/env.h6
-rw-r--r--tools/perf/util/event.c39
-rw-r--r--tools/perf/util/event.h6
-rw-r--r--tools/perf/util/evsel.c18
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/hist.c13
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/machine.c19
-rw-r--r--tools/perf/util/machine.h3
-rw-r--r--tools/perf/util/metricgroup.c49
-rw-r--r--tools/perf/util/parse-events.c6
-rw-r--r--tools/perf/util/parse-events.l12
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c2
-rw-r--r--tools/perf/util/pmu.c39
-rw-r--r--tools/perf/util/pmu.h5
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/record.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c3
-rw-r--r--tools/perf/util/session.c4
-rw-r--r--tools/perf/util/setup.py2
-rw-r--r--tools/perf/util/sort.c43
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat-display.c6
-rw-r--r--tools/perf/util/symbol-elf.c10
-rw-r--r--tools/perf/util/symbol_conf.h1
-rw-r--r--tools/perf/util/synthetic-events.c130
-rw-r--r--tools/perf/util/synthetic-events.h1
-rw-r--r--tools/perf/util/tool.h2
-rw-r--r--tools/power/acpi/.gitignore1
-rw-r--r--tools/power/cpupower/.gitignore1
-rw-r--r--tools/power/x86/intel-speed-select/.gitignore1
-rw-r--r--tools/power/x86/turbostat/.gitignore1
-rw-r--r--tools/spi/.gitignore1
-rw-r--r--tools/testing/kunit/.gitattributes1
-rw-r--r--tools/testing/kunit/.gitignore1
-rw-r--r--tools/testing/kunit/configs/broken_on_uml.config41
-rwxr-xr-xtools/testing/kunit/kunit.py38
-rw-r--r--tools/testing/kunit/kunit_config.py41
-rw-r--r--tools/testing/kunit/kunit_kernel.py84
-rw-r--r--tools/testing/kunit/kunit_parser.py51
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py108
-rw-r--r--tools/testing/kunit/test_data/test_config_printk_time.log31
-rw-r--r--tools/testing/kunit/test_data/test_interrupted_tap_output.log37
-rw-r--r--tools/testing/kunit/test_data/test_kernel_panic_interrupt.log25
-rw-r--r--tools/testing/kunit/test_data/test_multiple_prefixes.log31
-rw-r--r--tools/testing/kunit/test_data/test_output_with_prefix_isolated_correctly.log33
-rw-r--r--tools/testing/kunit/test_data/test_pound_no_prefix.log33
-rw-r--r--tools/testing/kunit/test_data/test_pound_sign.log33
-rw-r--r--tools/testing/radix-tree/.gitignore1
-rw-r--r--tools/testing/radix-tree/Makefile4
-rw-r--r--tools/testing/radix-tree/iteration_check_2.c87
-rw-r--r--tools/testing/radix-tree/linux.c32
-rw-r--r--tools/testing/radix-tree/linux/slab.h6
-rw-r--r--tools/testing/radix-tree/main.c1
-rw-r--r--tools/testing/radix-tree/test.h1
-rw-r--r--tools/testing/selftests/.gitignore1
-rw-r--r--tools/testing/selftests/Makefile5
-rw-r--r--tools/testing/selftests/android/Makefile2
-rw-r--r--tools/testing/selftests/android/ion/.gitignore1
-rw-r--r--tools/testing/selftests/android/ion/Makefile2
-rw-r--r--tools/testing/selftests/arm64/signal/.gitignore1
-rw-r--r--tools/testing/selftests/arm64/tags/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/map_tests/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/verifier/.gitignore1
-rw-r--r--tools/testing/selftests/breakpoints/.gitignore1
-rw-r--r--tools/testing/selftests/capabilities/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/Makefile6
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c126
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h4
-rw-r--r--tools/testing/selftests/cgroup/test_core.c177
-rw-r--r--tools/testing/selftests/clone3/.gitignore1
-rw-r--r--tools/testing/selftests/clone3/clone3_selftests.h19
-rw-r--r--tools/testing/selftests/drivers/.gitignore1
-rw-r--r--tools/testing/selftests/efivarfs/.gitignore1
-rw-r--r--tools/testing/selftests/exec/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/binderfs/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile4
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c498
-rw-r--r--tools/testing/selftests/filesystems/epoll/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c67
-rw-r--r--tools/testing/selftests/ftrace/.gitignore1
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc125
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc108
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc2
-rw-r--r--tools/testing/selftests/futex/functional/.gitignore1
-rw-r--r--tools/testing/selftests/gpio/.gitignore1
-rw-r--r--tools/testing/selftests/ia64/.gitignore1
-rw-r--r--tools/testing/selftests/intel_pstate/.gitignore1
-rw-r--r--tools/testing/selftests/ipc/.gitignore1
-rw-r--r--tools/testing/selftests/ir/.gitignore1
-rw-r--r--tools/testing/selftests/kcmp/.gitignore1
-rw-r--r--tools/testing/selftests/kselftest_harness.h144
-rw-r--r--tools/testing/selftests/kvm/.gitignore8
-rw-r--r--tools/testing/selftests/kvm/Makefile12
-rw-r--r--tools/testing/selftests/kvm/clear_dirty_log_test.c4
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c661
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c107
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h2
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h126
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h28
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c8
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c41
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c2
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c6
-rw-r--r--tools/testing/selftests/kvm/lib/io.c12
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c170
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h59
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c78
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c93
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c201
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c2
-rw-r--r--tools/testing/selftests/kvm/s390x/resets.c138
-rw-r--r--tools/testing/selftests/kvm/s390x/sync_regs_test.c13
-rw-r--r--tools/testing/selftests/kvm/steal_time.c352
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c35
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmio_warning_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_memory_region_test.c141
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c17
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/xss_msr_test.c2
-rw-r--r--tools/testing/selftests/lib.mk3
-rw-r--r--tools/testing/selftests/media_tests/.gitignore1
-rw-r--r--tools/testing/selftests/membarrier/.gitignore1
-rw-r--r--tools/testing/selftests/memfd/.gitignore1
-rw-r--r--tools/testing/selftests/memfd/Makefile9
-rw-r--r--tools/testing/selftests/mount/.gitignore1
-rw-r--r--tools/testing/selftests/mqueue/.gitignore1
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/forwarding/.gitignore1
-rw-r--r--tools/testing/selftests/net/mptcp/.gitignore1
-rw-r--r--tools/testing/selftests/nsfs/.gitignore1
-rw-r--r--tools/testing/selftests/openat2/.gitignore1
-rw-r--r--tools/testing/selftests/pid_namespace/.gitignore1
-rw-r--r--tools/testing/selftests/pid_namespace/Makefile8
-rw-r--r--tools/testing/selftests/pid_namespace/config2
-rw-r--r--tools/testing/selftests/pid_namespace/regression_enomem.c45
-rw-r--r--tools/testing/selftests/pidfd/.gitignore1
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h2
-rw-r--r--tools/testing/selftests/powerpc/alignment/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/settings1
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/dscr/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/dscr/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/dscr/settings1
-rw-r--r--tools/testing/selftests/powerpc/math/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/pmu/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/primitives/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/security/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/signal/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/signal/settings1
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_vdso.c127
-rw-r--r--tools/testing/selftests/powerpc/stringloops/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/syscalls/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/tm/settings1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c74
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c284
-rw-r--r--tools/testing/selftests/powerpc/vphn/.gitignore1
-rw-r--r--tools/testing/selftests/prctl/.gitignore1
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/pstore/.gitignore1
-rw-r--r--tools/testing/selftests/ptp/.gitignore1
-rw-r--r--tools/testing/selftests/ptrace/.gitignore1
-rw-r--r--tools/testing/selftests/ptrace/Makefile4
-rw-r--r--tools/testing/selftests/ptrace/vmaccess.c86
-rw-r--r--tools/testing/selftests/rcutorture/.gitignore1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore1
-rw-r--r--tools/testing/selftests/resctrl/Makefile17
-rw-r--r--tools/testing/selftests/resctrl/README53
-rw-r--r--tools/testing/selftests/resctrl/cache.c272
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c250
-rw-r--r--tools/testing/selftests/resctrl/cqm_test.c176
-rw-r--r--tools/testing/selftests/resctrl/fill_buf.c213
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c171
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c145
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h107
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c202
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c744
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c722
-rw-r--r--tools/testing/selftests/rseq/.gitignore1
-rw-r--r--tools/testing/selftests/rtc/.gitignore1
-rw-r--r--tools/testing/selftests/safesetid/.gitignore1
-rw-r--r--tools/testing/selftests/seccomp/.gitignore1
-rw-r--r--tools/testing/selftests/seccomp/Makefile17
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c10
-rw-r--r--tools/testing/selftests/sigaltstack/.gitignore1
-rw-r--r--tools/testing/selftests/size/.gitignore1
-rw-r--r--tools/testing/selftests/sparc64/drivers/.gitignore1
-rw-r--r--tools/testing/selftests/splice/.gitignore1
-rw-r--r--tools/testing/selftests/sync/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore1
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py5
-rw-r--r--tools/testing/selftests/timens/.gitignore1
-rw-r--r--tools/testing/selftests/timens/exec.c1
-rw-r--r--tools/testing/selftests/timens/procfs.c1
-rw-r--r--tools/testing/selftests/timens/timens.c1
-rw-r--r--tools/testing/selftests/timens/timer.c1
-rw-r--r--tools/testing/selftests/timers/.gitignore1
-rw-r--r--tools/testing/selftests/tmpfs/.gitignore1
-rw-r--r--tools/testing/selftests/vDSO/.gitignore1
-rw-r--r--tools/testing/selftests/vm/.gitignore2
-rw-r--r--tools/testing/selftests/vm/Makefile2
-rw-r--r--tools/testing/selftests/vm/charge_reserved_hugetlb.sh575
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c15
-rw-r--r--tools/testing/selftests/vm/hugetlb_reparenting_test.sh244
-rw-r--r--tools/testing/selftests/vm/map_hugetlb.c14
-rw-r--r--tools/testing/selftests/vm/mlock2-tests.c233
-rw-r--r--tools/testing/selftests/vm/mremap_dontunmap.c313
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests37
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c225
-rw-r--r--tools/testing/selftests/vm/write_hugetlb_memory.sh23
-rw-r--r--tools/testing/selftests/vm/write_to_hugetlbfs.c242
-rw-r--r--tools/testing/selftests/watchdog/.gitignore1
-rw-r--r--tools/testing/selftests/wireguard/qemu/.gitignore1
-rw-r--r--tools/testing/selftests/x86/.gitignore1
-rw-r--r--tools/testing/vsock/.gitignore1
-rw-r--r--tools/thermal/tmon/.gitignore1
-rw-r--r--tools/usb/.gitignore1
-rw-r--r--tools/usb/usbip/.gitignore1
-rw-r--r--tools/virtio/.gitignore1
-rw-r--r--tools/vm/.gitignore1
-rw-r--r--usr/.gitignore1
-rw-r--r--usr/include/.gitignore1
-rw-r--r--virt/kvm/arm/arch_timer.c2
-rw-r--r--virt/kvm/arm/arm.c60
-rw-r--r--virt/kvm/arm/mmu.c20
-rw-r--r--virt/kvm/arm/psci.c1
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c81
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c88
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c107
-rw-r--r--virt/kvm/arm/vgic/vgic.h1
-rw-r--r--virt/kvm/kvm_main.c663
5557 files changed, 250920 insertions, 110472 deletions
diff --git a/.gitignore b/.gitignore
index 72ef86a5570d..2258e906f01c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# NOTE! Don't add files that are generated in specific
# subdirectories here. Add them in the ".gitignore" file
diff --git a/Documentation/.gitignore b/Documentation/.gitignore
index e74fec8693b2..d6dc7c9b8e25 100644
--- a/Documentation/.gitignore
+++ b/Documentation/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
output
*.pyc
diff --git a/Documentation/ABI/obsolete/sysfs-kernel-fadump_enabled b/Documentation/ABI/obsolete/sysfs-kernel-fadump_enabled
new file mode 100644
index 000000000000..e9c2de8b3688
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-kernel-fadump_enabled
@@ -0,0 +1,9 @@
+This ABI is renamed and moved to a new location /sys/kernel/fadump/enabled.
+
+What: /sys/kernel/fadump_enabled
+Date: Feb 2012
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Primarily used to identify whether the FADump is enabled in
+ the kernel or not.
+User: Kdump service
diff --git a/Documentation/ABI/obsolete/sysfs-kernel-fadump_registered b/Documentation/ABI/obsolete/sysfs-kernel-fadump_registered
new file mode 100644
index 000000000000..0360be39c98e
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-kernel-fadump_registered
@@ -0,0 +1,10 @@
+This ABI is renamed and moved to a new location /sys/kernel/fadump/registered.¬
+
+What: /sys/kernel/fadump_registered
+Date: Feb 2012
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read/write
+ Helps to control the dump collect feature from userspace.
+ Setting 1 to this file enables the system to collect the
+ dump and 0 to disable it.
+User: Kdump service
diff --git a/Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem b/Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem
new file mode 100644
index 000000000000..6ce0b129ab12
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem
@@ -0,0 +1,10 @@
+This ABI is renamed and moved to a new location /sys/kernel/fadump/release_mem.¬
+
+What: /sys/kernel/fadump_release_mem
+Date: Feb 2012
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: write only
+ This is a special sysfs file and only available when
+ the system is booted to capture the vmcore using FADump.
+ It is used to release the memory reserved by FADump to
+ save the crash dump.
diff --git a/Documentation/ABI/removed/sysfs-kernel-fadump_release_opalcore b/Documentation/ABI/removed/sysfs-kernel-fadump_release_opalcore
new file mode 100644
index 000000000000..a8d46cd0f4e6
--- /dev/null
+++ b/Documentation/ABI/removed/sysfs-kernel-fadump_release_opalcore
@@ -0,0 +1,9 @@
+This ABI is moved to /sys/firmware/opal/mpipl/release_core.
+
+What: /sys/kernel/fadump_release_opalcore
+Date: Sep 2019
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: write only
+ The sysfs file is available when the system is booted to
+ collect the dump on OPAL based machine. It used to release
+ the memory used to collect the opalcore.
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
index f0ac14b70ecb..a73601c5121e 100644
--- a/Documentation/ABI/testing/debugfs-driver-habanalabs
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -43,6 +43,20 @@ Description: Allows the root user to read or write directly through the
If the IOMMU is disabled, it also allows the root user to read
or write from the host a device VA of a host mapped memory
+What: /sys/kernel/debug/habanalabs/hl<n>/data64
+Date: Jan 2020
+KernelVersion: 5.6
+Contact: oded.gabbay@gmail.com
+Description: Allows the root user to read or write 64 bit data directly
+ through the device's PCI bar. Writing to this file generates a
+ write transaction while reading from the file generates a read
+ transaction. This custom interface is needed (instead of using
+ the generic Linux user-space PCI mapping) because the DDR bar
+ is very small compared to the DDR memory and only the driver can
+ move the bar before and after the transaction.
+ If the IOMMU is disabled, it also allows the root user to read
+ or write from the host a device VA of a host mapped memory
+
What: /sys/kernel/debug/habanalabs/hl<n>/device
Date: Jan 2019
KernelVersion: 5.1
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti b/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti
new file mode 100644
index 000000000000..9d11502b4390
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti
@@ -0,0 +1,241 @@
+What: /sys/bus/coresight/devices/<cti-name>/enable
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Enable/Disable the CTI hardware.
+
+What: /sys/bus/coresight/devices/<cti-name>/powered
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Indicate if the CTI hardware is powered.
+
+What: /sys/bus/coresight/devices/<cti-name>/ctmid
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Display the associated CTM ID
+
+What: /sys/bus/coresight/devices/<cti-name>/nr_trigger_cons
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Number of devices connected to triggers on this CTI
+
+What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/name
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Name of connected device <N>
+
+What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/in_signals
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Input trigger signals from connected device <N>
+
+What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/in_types
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Functional types for the input trigger signals
+ from connected device <N>
+
+What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/out_signals
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Output trigger signals to connected device <N>
+
+What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/out_types
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Functional types for the output trigger signals
+ to connected device <N>
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/inout_sel
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Select the index for inen and outen registers.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/inen
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Read or write the CTIINEN register selected by inout_sel.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/outen
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Read or write the CTIOUTEN register selected by inout_sel.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/gate
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Read or write CTIGATE register.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/asicctl
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Read or write ASICCTL register.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/intack
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Write the INTACK register.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/appset
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Set CTIAPPSET register to activate channel. Read back to
+ determine current value of register.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/appclear
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Write APPCLEAR register to deactivate channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/apppulse
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Write APPPULSE to pulse a channel active for one clock
+ cycle.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/chinstatus
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Read current status of channel inputs.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/choutstatus
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) read current status of channel outputs.
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/triginstatus
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) read current status of input trigger signals
+
+What: /sys/bus/coresight/devices/<cti-name>/regs/trigoutstatus
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) read current status of output trigger signals.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/trigin_attach
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Attach a CTI input trigger to a CTM channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/trigin_detach
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Detach a CTI input trigger from a CTM channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_attach
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Attach a CTI output trigger to a CTM channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_detach
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Detach a CTI output trigger from a CTM channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_gate_enable
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Enable CTIGATE for single channel (W) or list enabled
+ channels through the gate (R).
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_gate_disable
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Disable CTIGATE for single channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_set
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Activate a single channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_clear
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Deactivate a single channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_pulse
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Pulse a single channel - activate for a single clock cycle.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_filtered
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) List of output triggers filtered across all connections.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/trig_filter_enable
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Enable or disable trigger output signal filtering.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_inuse
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) show channels with at least one attached trigger signal.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_free
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) show channels with no attached trigger signals.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_sel
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (RW) Write channel number to select a channel to view, read to
+ see selected channel number.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_in
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Read to see input triggers connected to selected view
+ channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_out
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (R) Read to see output triggers connected to selected view
+ channel.
+
+What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_reset
+Date: March 2020
+KernelVersion 5.7
+Contact: Mike Leach or Mathieu Poirier
+Description: (W) Clear all channel / trigger programming.
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
index 456cb62b384c..7fd2601c2831 100644
--- a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
@@ -40,3 +40,11 @@ Description: (RW) Trigger window switch for the MSC's buffer, in
triggering a window switch for the buffer. Returns an error in any
other operating mode or attempts to write something other than "1".
+What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/stop_on_full
+Date: March 2020
+KernelVersion: 5.7
+Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Description: (RW) Configure whether trace stops when the last available window
+ becomes full (1/y/Y) or wraps around and continues until the next
+ window becomes available again (0/n/N).
+
diff --git a/Documentation/ABI/testing/sysfs-driver-jz4780-efuse b/Documentation/ABI/testing/sysfs-driver-jz4780-efuse
new file mode 100644
index 000000000000..bb6f5d6ceea0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-jz4780-efuse
@@ -0,0 +1,16 @@
+What: /sys/devices/*/<our-device>/nvmem
+Date: December 2017
+Contact: PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+Description: read-only access to the efuse on the Ingenic JZ4780 SoC
+ The SoC has a one time programmable 8K efuse that is
+ split into segments. The driver supports read only.
+ The segments are
+ 0x000 64 bit Random Number
+ 0x008 128 bit Ingenic Chip ID
+ 0x018 128 bit Customer ID
+ 0x028 3520 bit Reserved
+ 0x1E0 8 bit Protect Segment
+ 0x1E1 2296 bit HDMI Key
+ 0x300 2048 bit Security boot key
+Users: any user space application which wants to read the Chip
+ and Customer ID
diff --git a/Documentation/ABI/testing/sysfs-driver-uacce b/Documentation/ABI/testing/sysfs-driver-uacce
new file mode 100644
index 000000000000..08f2591138af
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-uacce
@@ -0,0 +1,39 @@
+What: /sys/class/uacce/<dev_name>/api
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: linux-accelerators@lists.ozlabs.org
+Description: Api of the device
+ Can be any string and up to userspace to parse.
+ Application use the api to match the correct driver
+
+What: /sys/class/uacce/<dev_name>/flags
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: linux-accelerators@lists.ozlabs.org
+Description: Attributes of the device, see UACCE_DEV_xxx flag defined in uacce.h
+
+What: /sys/class/uacce/<dev_name>/available_instances
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: linux-accelerators@lists.ozlabs.org
+Description: Available instances left of the device
+ Return -ENODEV if uacce_ops get_available_instances is not provided
+
+What: /sys/class/uacce/<dev_name>/algorithms
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: linux-accelerators@lists.ozlabs.org
+Description: Algorithms supported by this accelerator, separated by new line.
+ Can be any string and up to userspace to parse.
+
+What: /sys/class/uacce/<dev_name>/region_mmio_size
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: linux-accelerators@lists.ozlabs.org
+Description: Size (bytes) of mmio region queue file
+
+What: /sys/class/uacce/<dev_name>/region_dus_size
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: linux-accelerators@lists.ozlabs.org
+Description: Size (bytes) of dus region queue file
diff --git a/Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups b/Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups
new file mode 100644
index 000000000000..3a2dfe542e8c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-opal-sensor-groups
@@ -0,0 +1,21 @@
+What: /sys/firmware/opal/sensor_groups
+Date: August 2017
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: Sensor groups directory for POWER9 powernv servers
+
+ Each folder in this directory contains a sensor group
+ which are classified based on type of the sensor
+ like power, temperature, frequency, current, etc. They
+ can also indicate the group of sensors belonging to
+ different owners like CSM, Profiler, Job-Scheduler
+
+What: /sys/firmware/opal/sensor_groups/<sensor_group_name>/clear
+Date: August 2017
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: Sysfs file to clear the min-max of all the sensors
+ belonging to the group.
+
+ Writing 1 to this file will clear the minimum and
+ maximum values of all the sensors in the group.
+ In POWER9, the min-max of a sensor is the historical minimum
+ and maximum value of the sensor cached by OCC.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 1a6cd5397129..bd8a0d19abe6 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -318,3 +318,8 @@ Date: September 2019
Contact: "Hridya Valsaraju" <hridya@google.com>
Description: Average number of valid blocks.
Available when CONFIG_F2FS_STAT_FS=y.
+
+What: /sys/fs/f2fs/<disk>/mounted_time_sec
+Date: February 2020
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description: Show the mounted time in secs of this partition.
diff --git a/Documentation/ABI/testing/sysfs-kernel-fadump b/Documentation/ABI/testing/sysfs-kernel-fadump
new file mode 100644
index 000000000000..8f7a64a81783
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-fadump
@@ -0,0 +1,40 @@
+What: /sys/kernel/fadump/*
+Date: Dec 2019
+Contact: linuxppc-dev@lists.ozlabs.org
+Description:
+ The /sys/kernel/fadump/* is a collection of FADump sysfs
+ file provide information about the configuration status
+ of Firmware Assisted Dump (FADump).
+
+What: /sys/kernel/fadump/enabled
+Date: Dec 2019
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Primarily used to identify whether the FADump is enabled in
+ the kernel or not.
+User: Kdump service
+
+What: /sys/kernel/fadump/registered
+Date: Dec 2019
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read/write
+ Helps to control the dump collect feature from userspace.
+ Setting 1 to this file enables the system to collect the
+ dump and 0 to disable it.
+User: Kdump service
+
+What: /sys/kernel/fadump/release_mem
+Date: Dec 2019
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: write only
+ This is a special sysfs file and only available when
+ the system is booted to capture the vmcore using FADump.
+ It is used to release the memory reserved by FADump to
+ save the crash dump.
+
+What: /sys/kernel/fadump/mem_reserved
+Date: Dec 2019
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Provide information about the amount of memory reserved by
+ FADump to save the crash dump in bytes.
diff --git a/Documentation/PCI/boot-interrupts.rst b/Documentation/PCI/boot-interrupts.rst
new file mode 100644
index 000000000000..d078ef3eb192
--- /dev/null
+++ b/Documentation/PCI/boot-interrupts.rst
@@ -0,0 +1,155 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Boot Interrupts
+===============
+
+:Author: - Sean V Kelley <sean.v.kelley@linux.intel.com>
+
+Overview
+========
+
+On PCI Express, interrupts are represented with either MSI or inbound
+interrupt messages (Assert_INTx/Deassert_INTx). The integrated IO-APIC in a
+given Core IO converts the legacy interrupt messages from PCI Express to
+MSI interrupts. If the IO-APIC is disabled (via the mask bits in the
+IO-APIC table entries), the messages are routed to the legacy PCH. This
+in-band interrupt mechanism was traditionally necessary for systems that
+did not support the IO-APIC and for boot. Intel in the past has used the
+term "boot interrupts" to describe this mechanism. Further, the PCI Express
+protocol describes this in-band legacy wire-interrupt INTx mechanism for
+I/O devices to signal PCI-style level interrupts. The subsequent paragraphs
+describe problems with the Core IO handling of INTx message routing to the
+PCH and mitigation within BIOS and the OS.
+
+
+Issue
+=====
+
+When in-band legacy INTx messages are forwarded to the PCH, they in turn
+trigger a new interrupt for which the OS likely lacks a handler. When an
+interrupt goes unhandled over time, they are tracked by the Linux kernel as
+Spurious Interrupts. The IRQ will be disabled by the Linux kernel after it
+reaches a specific count with the error "nobody cared". This disabled IRQ
+now prevents valid usage by an existing interrupt which may happen to share
+the IRQ line.
+
+ irq 19: nobody cared (try booting with the "irqpoll" option)
+ CPU: 0 PID: 2988 Comm: irq/34-nipalk Tainted: 4.14.87-rt49-02410-g4a640ec-dirty #1
+ Hardware name: National Instruments NI PXIe-8880/NI PXIe-8880, BIOS 2.1.5f1 01/09/2020
+ Call Trace:
+ <IRQ>
+ ? dump_stack+0x46/0x5e
+ ? __report_bad_irq+0x2e/0xb0
+ ? note_interrupt+0x242/0x290
+ ? nNIKAL100_memoryRead16+0x8/0x10 [nikal]
+ ? handle_irq_event_percpu+0x55/0x70
+ ? handle_irq_event+0x4f/0x80
+ ? handle_fasteoi_irq+0x81/0x180
+ ? handle_irq+0x1c/0x30
+ ? do_IRQ+0x41/0xd0
+ ? common_interrupt+0x84/0x84
+ </IRQ>
+
+ handlers:
+ irq_default_primary_handler threaded usb_hcd_irq
+ Disabling IRQ #19
+
+
+Conditions
+==========
+
+The use of threaded interrupts is the most likely condition to trigger
+this problem today. Threaded interrupts may not be reenabled after the IRQ
+handler wakes. These "one shot" conditions mean that the threaded interrupt
+needs to keep the interrupt line masked until the threaded handler has run.
+Especially when dealing with high data rate interrupts, the thread needs to
+run to completion; otherwise some handlers will end up in stack overflows
+since the interrupt of the issuing device is still active.
+
+Affected Chipsets
+=================
+
+The legacy interrupt forwarding mechanism exists today in a number of
+devices including but not limited to chipsets from AMD/ATI, Broadcom, and
+Intel. Changes made through the mitigations below have been applied to
+drivers/pci/quirks.c
+
+Starting with ICX there are no longer any IO-APICs in the Core IO's
+devices. IO-APIC is only in the PCH. Devices connected to the Core IO's
+PCIe Root Ports will use native MSI/MSI-X mechanisms.
+
+Mitigations
+===========
+
+The mitigations take the form of PCI quirks. The preference has been to
+first identify and make use of a means to disable the routing to the PCH.
+In such a case a quirk to disable boot interrupt generation can be
+added.[1]
+
+ Intel® 6300ESB I/O Controller Hub
+ Alternate Base Address Register:
+ BIE: Boot Interrupt Enable
+ 0 = Boot interrupt is enabled.
+ 1 = Boot interrupt is disabled.
+
+ Intel® Sandy Bridge through Sky Lake based Xeon servers:
+ Coherent Interface Protocol Interrupt Control
+ dis_intx_route2pch/dis_intx_route2ich/dis_intx_route2dmi2:
+ When this bit is set. Local INTx messages received from the
+ Intel® Quick Data DMA/PCI Express ports are not routed to legacy
+ PCH - they are either converted into MSI via the integrated IO-APIC
+ (if the IO-APIC mask bit is clear in the appropriate entries)
+ or cause no further action (when mask bit is set)
+
+In the absence of a way to directly disable the routing, another approach
+has been to make use of PCI Interrupt pin to INTx routing tables for
+purposes of redirecting the interrupt handler to the rerouted interrupt
+line by default. Therefore, on chipsets where this INTx routing cannot be
+disabled, the Linux kernel will reroute the valid interrupt to its legacy
+interrupt. This redirection of the handler will prevent the occurrence of
+the spurious interrupt detection which would ordinarily disable the IRQ
+line due to excessive unhandled counts.[2]
+
+The config option X86_REROUTE_FOR_BROKEN_BOOT_IRQS exists to enable (or
+disable) the redirection of the interrupt handler to the PCH interrupt
+line. The option can be overridden by either pci=ioapicreroute or
+pci=noioapicreroute.[3]
+
+
+More Documentation
+==================
+
+There is an overview of the legacy interrupt handling in several datasheets
+(6300ESB and 6700PXH below). While largely the same, it provides insight
+into the evolution of its handling with chipsets.
+
+Example of disabling of the boot interrupt
+------------------------------------------
+
+Intel® 6300ESB I/O Controller Hub (Document # 300641-004US)
+ 5.7.3 Boot Interrupt
+ https://www.intel.com/content/dam/doc/datasheet/6300esb-io-controller-hub-datasheet.pdf
+
+Intel® Xeon® Processor E5-1600/2400/2600/4600 v3 Product Families
+Datasheet - Volume 2: Registers (Document # 330784-003)
+ 6.6.41 cipintrc Coherent Interface Protocol Interrupt Control
+ https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e5-v3-datasheet-vol-2.pdf
+
+Example of handler rerouting
+----------------------------
+
+Intel® 6700PXH 64-bit PCI Hub (Document # 302628)
+ 2.15.2 PCI Express Legacy INTx Support and Boot Interrupt
+ https://www.intel.com/content/dam/doc/datasheet/6700pxh-64-bit-pci-hub-datasheet.pdf
+
+
+If you have any legacy PCI interrupt questions that aren't answered, email me.
+
+Cheers,
+ Sean V Kelley
+ sean.v.kelley@linux.intel.com
+
+[1] https://lore.kernel.org/r/12131949181903-git-send-email-sassmann@suse.de/
+[2] https://lore.kernel.org/r/12131949182094-git-send-email-sassmann@suse.de/
+[3] https://lore.kernel.org/r/487C8EA7.6020205@suse.de/
diff --git a/Documentation/PCI/index.rst b/Documentation/PCI/index.rst
index 6768305e4c26..8f66feaafd4f 100644
--- a/Documentation/PCI/index.rst
+++ b/Documentation/PCI/index.rst
@@ -16,3 +16,4 @@ Linux PCI Bus Subsystem
pci-error-recovery
pcieaer-howto
endpoint/index
+ boot-interrupts
diff --git a/Documentation/PCI/pcieaer-howto.rst b/Documentation/PCI/pcieaer-howto.rst
index 18bdefaafd1a..0b36b9ebfa4b 100644
--- a/Documentation/PCI/pcieaer-howto.rst
+++ b/Documentation/PCI/pcieaer-howto.rst
@@ -156,12 +156,6 @@ default reset_link function, but different upstream ports might
have different specifications to reset pci express link, so all
upstream ports should provide their own reset_link functions.
-In struct pcie_port_service_driver, a new pointer, reset_link, is
-added.
-::
-
- pci_ers_result_t (*reset_link) (struct pci_dev *dev);
-
Section 3.2.2.2 provides more detailed info on when to call
reset_link.
@@ -212,15 +206,10 @@ error_detected(dev, pci_channel_io_frozen) to all drivers within
a hierarchy in question. Then, performing link reset at upstream is
necessary. As different kinds of devices might use different approaches
to reset link, AER port service driver is required to provide the
-function to reset link. Firstly, kernel looks for if the upstream
-component has an aer driver. If it has, kernel uses the reset_link
-callback of the aer driver. If the upstream component has no aer driver
-and the port is downstream port, we will perform a hot reset as the
-default by setting the Secondary Bus Reset bit of the Bridge Control
-register associated with the downstream port. As for upstream ports,
-they should provide their own aer service drivers with reset_link
-function. If error_detected returns PCI_ERS_RESULT_CAN_RECOVER and
-reset_link returns PCI_ERS_RESULT_RECOVERED, the error handling goes
+function to reset link via callback parameter of pcie_do_recovery()
+function. If reset_link is not NULL, recovery function will use it
+to reset the link. If error_detected returns PCI_ERS_RESULT_CAN_RECOVER
+and reset_link returns PCI_ERS_RESULT_RECOVERED, the error handling goes
to mmio_enabled.
helper functions
@@ -243,9 +232,9 @@ messages to root port when an error is detected.
::
- int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);`
+ int pci_aer_clear_nonfatal_status(struct pci_dev *dev);`
-pci_cleanup_aer_uncorrect_error_status cleanups the uncorrectable
+pci_aer_clear_nonfatal_status clears non-fatal errors in the uncorrectable
error status register.
Frequent Asked Questions
diff --git a/Documentation/admin-guide/binfmt-misc.rst b/Documentation/admin-guide/binfmt-misc.rst
index 95c93bbe408a..7a864131e5ea 100644
--- a/Documentation/admin-guide/binfmt-misc.rst
+++ b/Documentation/admin-guide/binfmt-misc.rst
@@ -140,8 +140,8 @@ Hints
-----
If you want to pass special arguments to your interpreter, you can
-write a wrapper script for it. See Documentation/admin-guide/java.rst for an
-example.
+write a wrapper script for it.
+See :doc:`Documentation/admin-guide/java.rst <./java>` for an example.
Your interpreter should NOT look in the PATH for the filename; the kernel
passes it the full filename (or the file descriptor) to use. Using ``$PATH`` can
diff --git a/Documentation/admin-guide/cgroup-v1/cpusets.rst b/Documentation/admin-guide/cgroup-v1/cpusets.rst
index 86a6ae995d54..7ade3abd342a 100644
--- a/Documentation/admin-guide/cgroup-v1/cpusets.rst
+++ b/Documentation/admin-guide/cgroup-v1/cpusets.rst
@@ -223,6 +223,17 @@ cpu_online_mask using a CPU hotplug notifier, and the mems file
automatically tracks the value of node_states[N_MEMORY]--i.e.,
nodes with memory--using the cpuset_track_online_nodes() hook.
+The cpuset.effective_cpus and cpuset.effective_mems files are
+normally read-only copies of cpuset.cpus and cpuset.mems files
+respectively. If the cpuset cgroup filesystem is mounted with the
+special "cpuset_v2_mode" option, the behavior of these files will become
+similar to the corresponding files in cpuset v2. In other words, hotplug
+events will not change cpuset.cpus and cpuset.mems. Those events will
+only affect cpuset.effective_cpus and cpuset.effective_mems which show
+the actual cpus and memory nodes that are currently used by this cpuset.
+See Documentation/admin-guide/cgroup-v2.rst for more information about
+cpuset v2 behavior.
+
1.4 What are exclusive cpusets ?
--------------------------------
diff --git a/Documentation/admin-guide/cgroup-v1/hugetlb.rst b/Documentation/admin-guide/cgroup-v1/hugetlb.rst
index a3902aa253a9..338f2c7d7a1c 100644
--- a/Documentation/admin-guide/cgroup-v1/hugetlb.rst
+++ b/Documentation/admin-guide/cgroup-v1/hugetlb.rst
@@ -2,13 +2,6 @@
HugeTLB Controller
==================
-The HugeTLB controller allows to limit the HugeTLB usage per control group and
-enforces the controller limit during page fault. Since HugeTLB doesn't
-support page reclaim, enforcing the limit at page fault time implies that,
-the application will get SIGBUS signal if it tries to access HugeTLB pages
-beyond its limit. This requires the application to know beforehand how much
-HugeTLB pages it would require for its use.
-
HugeTLB controller can be created by first mounting the cgroup filesystem.
# mount -t cgroup -o hugetlb none /sys/fs/cgroup
@@ -28,10 +21,14 @@ process (bash) into it.
Brief summary of control files::
- hugetlb.<hugepagesize>.limit_in_bytes # set/show limit of "hugepagesize" hugetlb usage
- hugetlb.<hugepagesize>.max_usage_in_bytes # show max "hugepagesize" hugetlb usage recorded
- hugetlb.<hugepagesize>.usage_in_bytes # show current usage for "hugepagesize" hugetlb
- hugetlb.<hugepagesize>.failcnt # show the number of allocation failure due to HugeTLB limit
+ hugetlb.<hugepagesize>.rsvd.limit_in_bytes # set/show limit of "hugepagesize" hugetlb reservations
+ hugetlb.<hugepagesize>.rsvd.max_usage_in_bytes # show max "hugepagesize" hugetlb reservations and no-reserve faults
+ hugetlb.<hugepagesize>.rsvd.usage_in_bytes # show current reservations and no-reserve faults for "hugepagesize" hugetlb
+ hugetlb.<hugepagesize>.rsvd.failcnt # show the number of allocation failure due to HugeTLB reservation limit
+ hugetlb.<hugepagesize>.limit_in_bytes # set/show limit of "hugepagesize" hugetlb faults
+ hugetlb.<hugepagesize>.max_usage_in_bytes # show max "hugepagesize" hugetlb usage recorded
+ hugetlb.<hugepagesize>.usage_in_bytes # show current usage for "hugepagesize" hugetlb
+ hugetlb.<hugepagesize>.failcnt # show the number of allocation failure due to HugeTLB usage limit
For a system supporting three hugepage sizes (64k, 32M and 1G), the control
files include::
@@ -40,11 +37,95 @@ files include::
hugetlb.1GB.max_usage_in_bytes
hugetlb.1GB.usage_in_bytes
hugetlb.1GB.failcnt
+ hugetlb.1GB.rsvd.limit_in_bytes
+ hugetlb.1GB.rsvd.max_usage_in_bytes
+ hugetlb.1GB.rsvd.usage_in_bytes
+ hugetlb.1GB.rsvd.failcnt
hugetlb.64KB.limit_in_bytes
hugetlb.64KB.max_usage_in_bytes
hugetlb.64KB.usage_in_bytes
hugetlb.64KB.failcnt
+ hugetlb.64KB.rsvd.limit_in_bytes
+ hugetlb.64KB.rsvd.max_usage_in_bytes
+ hugetlb.64KB.rsvd.usage_in_bytes
+ hugetlb.64KB.rsvd.failcnt
hugetlb.32MB.limit_in_bytes
hugetlb.32MB.max_usage_in_bytes
hugetlb.32MB.usage_in_bytes
hugetlb.32MB.failcnt
+ hugetlb.32MB.rsvd.limit_in_bytes
+ hugetlb.32MB.rsvd.max_usage_in_bytes
+ hugetlb.32MB.rsvd.usage_in_bytes
+ hugetlb.32MB.rsvd.failcnt
+
+
+1. Page fault accounting
+
+hugetlb.<hugepagesize>.limit_in_bytes
+hugetlb.<hugepagesize>.max_usage_in_bytes
+hugetlb.<hugepagesize>.usage_in_bytes
+hugetlb.<hugepagesize>.failcnt
+
+The HugeTLB controller allows users to limit the HugeTLB usage (page fault) per
+control group and enforces the limit during page fault. Since HugeTLB
+doesn't support page reclaim, enforcing the limit at page fault time implies
+that, the application will get SIGBUS signal if it tries to fault in HugeTLB
+pages beyond its limit. Therefore the application needs to know exactly how many
+HugeTLB pages it uses before hand, and the sysadmin needs to make sure that
+there are enough available on the machine for all the users to avoid processes
+getting SIGBUS.
+
+
+2. Reservation accounting
+
+hugetlb.<hugepagesize>.rsvd.limit_in_bytes
+hugetlb.<hugepagesize>.rsvd.max_usage_in_bytes
+hugetlb.<hugepagesize>.rsvd.usage_in_bytes
+hugetlb.<hugepagesize>.rsvd.failcnt
+
+The HugeTLB controller allows to limit the HugeTLB reservations per control
+group and enforces the controller limit at reservation time and at the fault of
+HugeTLB memory for which no reservation exists. Since reservation limits are
+enforced at reservation time (on mmap or shget), reservation limits never causes
+the application to get SIGBUS signal if the memory was reserved before hand. For
+MAP_NORESERVE allocations, the reservation limit behaves the same as the fault
+limit, enforcing memory usage at fault time and causing the application to
+receive a SIGBUS if it's crossing its limit.
+
+Reservation limits are superior to page fault limits described above, since
+reservation limits are enforced at reservation time (on mmap or shget), and
+never causes the application to get SIGBUS signal if the memory was reserved
+before hand. This allows for easier fallback to alternatives such as
+non-HugeTLB memory for example. In the case of page fault accounting, it's very
+hard to avoid processes getting SIGBUS since the sysadmin needs precisely know
+the HugeTLB usage of all the tasks in the system and make sure there is enough
+pages to satisfy all requests. Avoiding tasks getting SIGBUS on overcommited
+systems is practically impossible with page fault accounting.
+
+
+3. Caveats with shared memory
+
+For shared HugeTLB memory, both HugeTLB reservation and page faults are charged
+to the first task that causes the memory to be reserved or faulted, and all
+subsequent uses of this reserved or faulted memory is done without charging.
+
+Shared HugeTLB memory is only uncharged when it is unreserved or deallocated.
+This is usually when the HugeTLB file is deleted, and not when the task that
+caused the reservation or fault has exited.
+
+
+4. Caveats with HugeTLB cgroup offline.
+
+When a HugeTLB cgroup goes offline with some reservations or faults still
+charged to it, the behavior is as follows:
+
+- The fault charges are charged to the parent HugeTLB cgroup (reparented),
+- the reservation charges remain on the offline HugeTLB cgroup.
+
+This means that if a HugeTLB cgroup gets offlined while there is still HugeTLB
+reservations charged to it, that cgroup persists as a zombie until all HugeTLB
+reservations are uncharged. HugeTLB reservations behave in this manner to match
+the memory controller whose cgroups also persist as zombie until all charged
+memory is uncharged. Also, the tracking of HugeTLB reservations is a bit more
+complex compared to the tracking of HugeTLB faults, so it is significantly
+harder to reparent reservations at offline time.
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index fbb111616705..bcc80269bb6a 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -188,6 +188,17 @@ cgroup v2 currently supports the following mount options.
modified through remount from the init namespace. The mount
option is ignored on non-init namespace mounts.
+ memory_recursiveprot
+
+ Recursively apply memory.min and memory.low protection to
+ entire subtrees, without requiring explicit downward
+ propagation into leaf cgroups. This allows protecting entire
+ subtrees from one another, while retaining free competition
+ within those subtrees. This should have been the default
+ behavior but is a mount-option to avoid regressing setups
+ relying on the original semantics (e.g. specifying bogusly
+ high 'bypass' protection values at higher tree levels).
+
Organizing Processes and Threads
--------------------------------
diff --git a/Documentation/admin-guide/dynamic-debug-howto.rst b/Documentation/admin-guide/dynamic-debug-howto.rst
index 252e5ef324e5..0dc2eb8e44e5 100644
--- a/Documentation/admin-guide/dynamic-debug-howto.rst
+++ b/Documentation/admin-guide/dynamic-debug-howto.rst
@@ -54,6 +54,9 @@ If you make a mistake with the syntax, the write will fail thus::
<debugfs>/dynamic_debug/control
-bash: echo: write error: Invalid argument
+Note, for systems without 'debugfs' enabled, the control file can be
+found in ``/proc/dynamic_debug/control``.
+
Viewing Dynamic Debug Behaviour
===============================
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index ed73df5f1369..86aae1fa099a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -22,11 +22,13 @@
default: 0
acpi_backlight= [HW,ACPI]
- acpi_backlight=vendor
- acpi_backlight=video
- If set to vendor, prefer vendor specific driver
+ { vendor | video | native | none }
+ If set to vendor, prefer vendor-specific driver
(e.g. thinkpad_acpi, sony_acpi, etc.) instead
of the ACPI video.ko driver.
+ If set to video, use the ACPI video.ko driver.
+ If set to native, use the device's native backlight mode.
+ If set to none, disable the ACPI backlight interface.
acpi_force_32bit_fadt_addr
force FADT to use 32 bit addresses rather than the
@@ -2571,13 +2573,22 @@
For details see: Documentation/admin-guide/hw-vuln/mds.rst
mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
- Amount of memory to be used when the kernel is not able
- to see the whole system memory or for test.
+ Amount of memory to be used in cases as follows:
+
+ 1 for test;
+ 2 when the kernel is not able to see the whole system memory;
+ 3 memory that lies after 'mem=' boundary is excluded from
+ the hypervisor, then assigned to KVM guests.
+
[X86] Work as limiting max address. Use together
with memmap= to avoid physical address space collisions.
Without memmap= PCI devices could be placed at addresses
belonging to unused RAM.
+ Note that this only takes effects during boot time since
+ in above case 3, memory may need be hot added after boot
+ if system memory of hypervisor is not sufficient.
+
mem=nopentium [BUGS=X86-32] Disable usage of 4MB pages for kernel
memory.
@@ -3720,6 +3731,9 @@
Override pmtimer IOPort with a hex value.
e.g. pmtmr=0x508
+ pm_debug_messages [SUSPEND,KNL]
+ Enable suspend/resume debug messages during boot up.
+
pnp.debug=1 [PNP]
Enable PNP debug messages (depends on the
CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time
@@ -3821,6 +3835,11 @@
before loading.
See Documentation/admin-guide/blockdev/ramdisk.rst.
+ prot_virt= [S390] enable hosting protected virtual machines
+ isolated from the hypervisor (if hardware supports
+ that).
+ Format: <bool>
+
psi= [KNL] Enable or disable pressure stall information
tracking.
Format: <bool>
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index bd5714547cee..2f31de8f7c74 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -310,6 +310,11 @@ thp_fault_fallback
is incremented if a page fault fails to allocate
a huge page and instead falls back to using small pages.
+thp_fault_fallback_charge
+ is incremented if a page fault fails to charge a huge page and
+ instead falls back to using small pages even though the
+ allocation was successful.
+
thp_collapse_alloc_failed
is incremented if khugepaged found a range
of pages that should be collapsed into one huge page but failed
@@ -319,6 +324,15 @@ thp_file_alloc
is incremented every time a file huge page is successfully
allocated.
+thp_file_fallback
+ is incremented if a file huge page is attempted to be allocated
+ but fails and instead falls back to using small pages.
+
+thp_file_fallback_charge
+ is incremented if a file huge page cannot be charged and instead
+ falls back to using small pages even though the allocation was
+ successful.
+
thp_file_mapped
is incremented every time a file huge page is mapped into
user address space.
diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst
index 5048cf661a8a..c30176e67900 100644
--- a/Documentation/admin-guide/mm/userfaultfd.rst
+++ b/Documentation/admin-guide/mm/userfaultfd.rst
@@ -108,6 +108,57 @@ UFFDIO_COPY. They're atomic as in guaranteeing that nothing can see an
half copied page since it'll keep userfaulting until the copy has
finished.
+Notes:
+
+- If you requested UFFDIO_REGISTER_MODE_MISSING when registering then
+ you must provide some kind of page in your thread after reading from
+ the uffd. You must provide either UFFDIO_COPY or UFFDIO_ZEROPAGE.
+ The normal behavior of the OS automatically providing a zero page on
+ an annonymous mmaping is not in place.
+
+- None of the page-delivering ioctls default to the range that you
+ registered with. You must fill in all fields for the appropriate
+ ioctl struct including the range.
+
+- You get the address of the access that triggered the missing page
+ event out of a struct uffd_msg that you read in the thread from the
+ uffd. You can supply as many pages as you want with UFFDIO_COPY or
+ UFFDIO_ZEROPAGE. Keep in mind that unless you used DONTWAKE then
+ the first of any of those IOCTLs wakes up the faulting thread.
+
+- Be sure to test for all errors including (pollfd[0].revents &
+ POLLERR). This can happen, e.g. when ranges supplied were
+ incorrect.
+
+Write Protect Notifications
+---------------------------
+
+This is equivalent to (but faster than) using mprotect and a SIGSEGV
+signal handler.
+
+Firstly you need to register a range with UFFDIO_REGISTER_MODE_WP.
+Instead of using mprotect(2) you use ioctl(uffd, UFFDIO_WRITEPROTECT,
+struct *uffdio_writeprotect) while mode = UFFDIO_WRITEPROTECT_MODE_WP
+in the struct passed in. The range does not default to and does not
+have to be identical to the range you registered with. You can write
+protect as many ranges as you like (inside the registered range).
+Then, in the thread reading from uffd the struct will have
+msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WP set. Now you send
+ioctl(uffd, UFFDIO_WRITEPROTECT, struct *uffdio_writeprotect) again
+while pagefault.mode does not have UFFDIO_WRITEPROTECT_MODE_WP set.
+This wakes up the thread which will continue to run with writes. This
+allows you to do the bookkeeping about the write in the uffd reading
+thread before the ioctl.
+
+If you registered with both UFFDIO_REGISTER_MODE_MISSING and
+UFFDIO_REGISTER_MODE_WP then you need to think about the sequence in
+which you supply a page and undo write protect. Note that there is a
+difference between writes into a WP area and into a !WP area. The
+former will have UFFD_PAGEFAULT_FLAG_WP set, the latter
+UFFD_PAGEFAULT_FLAG_WRITE. The latter did not fail on protection but
+you still need to supply a page when UFFDIO_REGISTER_MODE_MISSING was
+used.
+
QEMU/KVM
========
diff --git a/Documentation/admin-guide/pm/suspend-flows.rst b/Documentation/admin-guide/pm/suspend-flows.rst
new file mode 100644
index 000000000000..c479d7462647
--- /dev/null
+++ b/Documentation/admin-guide/pm/suspend-flows.rst
@@ -0,0 +1,270 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+=========================
+System Suspend Code Flows
+=========================
+
+:Copyright: |copy| 2020 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+At least one global system-wide transition needs to be carried out for the
+system to get from the working state into one of the supported
+:doc:`sleep states <sleep-states>`. Hibernation requires more than one
+transition to occur for this purpose, but the other sleep states, commonly
+referred to as *system-wide suspend* (or simply *system suspend*) states, need
+only one.
+
+For those sleep states, the transition from the working state of the system into
+the target sleep state is referred to as *system suspend* too (in the majority
+of cases, whether this means a transition or a sleep state of the system should
+be clear from the context) and the transition back from the sleep state into the
+working state is referred to as *system resume*.
+
+The kernel code flows associated with the suspend and resume transitions for
+different sleep states of the system are quite similar, but there are some
+significant differences between the :ref:`suspend-to-idle <s2idle>` code flows
+and the code flows related to the :ref:`suspend-to-RAM <s2ram>` and
+:ref:`standby <standby>` sleep states.
+
+The :ref:`suspend-to-RAM <s2ram>` and :ref:`standby <standby>` sleep states
+cannot be implemented without platform support and the difference between them
+boils down to the platform-specific actions carried out by the suspend and
+resume hooks that need to be provided by the platform driver to make them
+available. Apart from that, the suspend and resume code flows for these sleep
+states are mostly identical, so they both together will be referred to as
+*platform-dependent suspend* states in what follows.
+
+
+.. _s2idle_suspend:
+
+Suspend-to-idle Suspend Code Flow
+=================================
+
+The following steps are taken in order to transition the system from the working
+state to the :ref:`suspend-to-idle <s2idle>` sleep state:
+
+ 1. Invoking system-wide suspend notifiers.
+
+ Kernel subsystems can register callbacks to be invoked when the suspend
+ transition is about to occur and when the resume transition has finished.
+
+ That allows them to prepare for the change of the system state and to clean
+ up after getting back to the working state.
+
+ 2. Freezing tasks.
+
+ Tasks are frozen primarily in order to avoid unchecked hardware accesses
+ from user space through MMIO regions or I/O registers exposed directly to
+ it and to prevent user space from entering the kernel while the next step
+ of the transition is in progress (which might have been problematic for
+ various reasons).
+
+ All user space tasks are intercepted as though they were sent a signal and
+ put into uninterruptible sleep until the end of the subsequent system resume
+ transition.
+
+ The kernel threads that choose to be frozen during system suspend for
+ specific reasons are frozen subsequently, but they are not intercepted.
+ Instead, they are expected to periodically check whether or not they need
+ to be frozen and to put themselves into uninterruptible sleep if so. [Note,
+ however, that kernel threads can use locking and other concurrency controls
+ available in kernel space to synchronize themselves with system suspend and
+ resume, which can be much more precise than the freezing, so the latter is
+ not a recommended option for kernel threads.]
+
+ 3. Suspending devices and reconfiguring IRQs.
+
+ Devices are suspended in four phases called *prepare*, *suspend*,
+ *late suspend* and *noirq suspend* (see :ref:`driverapi_pm_devices` for more
+ information on what exactly happens in each phase).
+
+ Every device is visited in each phase, but typically it is not physically
+ accessed in more than two of them.
+
+ The runtime PM API is disabled for every device during the *late* suspend
+ phase and high-level ("action") interrupt handlers are prevented from being
+ invoked before the *noirq* suspend phase.
+
+ Interrupts are still handled after that, but they are only acknowledged to
+ interrupt controllers without performing any device-specific actions that
+ would be triggered in the working state of the system (those actions are
+ deferred till the subsequent system resume transition as described
+ `below <s2idle_resume_>`_).
+
+ IRQs associated with system wakeup devices are "armed" so that the resume
+ transition of the system is started when one of them signals an event.
+
+ 4. Freezing the scheduler tick and suspending timekeeping.
+
+ When all devices have been suspended, CPUs enter the idle loop and are put
+ into the deepest available idle state. While doing that, each of them
+ "freezes" its own scheduler tick so that the timer events associated with
+ the tick do not occur until the CPU is woken up by another interrupt source.
+
+ The last CPU to enter the idle state also stops the timekeeping which
+ (among other things) prevents high resolution timers from triggering going
+ forward until the first CPU that is woken up restarts the timekeeping.
+ That allows the CPUs to stay in the deep idle state relatively long in one
+ go.
+
+ From this point on, the CPUs can only be woken up by non-timer hardware
+ interrupts. If that happens, they go back to the idle state unless the
+ interrupt that woke up one of them comes from an IRQ that has been armed for
+ system wakeup, in which case the system resume transition is started.
+
+
+.. _s2idle_resume:
+
+Suspend-to-idle Resume Code Flow
+================================
+
+The following steps are taken in order to transition the system from the
+:ref:`suspend-to-idle <s2idle>` sleep state into the working state:
+
+ 1. Resuming timekeeping and unfreezing the scheduler tick.
+
+ When one of the CPUs is woken up (by a non-timer hardware interrupt), it
+ leaves the idle state entered in the last step of the preceding suspend
+ transition, restarts the timekeeping (unless it has been restarted already
+ by another CPU that woke up earlier) and the scheduler tick on that CPU is
+ unfrozen.
+
+ If the interrupt that has woken up the CPU was armed for system wakeup,
+ the system resume transition begins.
+
+ 2. Resuming devices and restoring the working-state configuration of IRQs.
+
+ Devices are resumed in four phases called *noirq resume*, *early resume*,
+ *resume* and *complete* (see :ref:`driverapi_pm_devices` for more
+ information on what exactly happens in each phase).
+
+ Every device is visited in each phase, but typically it is not physically
+ accessed in more than two of them.
+
+ The working-state configuration of IRQs is restored after the *noirq* resume
+ phase and the runtime PM API is re-enabled for every device whose driver
+ supports it during the *early* resume phase.
+
+ 3. Thawing tasks.
+
+ Tasks frozen in step 2 of the preceding `suspend <s2idle_suspend_>`_
+ transition are "thawed", which means that they are woken up from the
+ uninterruptible sleep that they went into at that time and user space tasks
+ are allowed to exit the kernel.
+
+ 4. Invoking system-wide resume notifiers.
+
+ This is analogous to step 1 of the `suspend <s2idle_suspend_>`_ transition
+ and the same set of callbacks is invoked at this point, but a different
+ "notification type" parameter value is passed to them.
+
+
+Platform-dependent Suspend Code Flow
+====================================
+
+The following steps are taken in order to transition the system from the working
+state to platform-dependent suspend state:
+
+ 1. Invoking system-wide suspend notifiers.
+
+ This step is the same as step 1 of the suspend-to-idle suspend transition
+ described `above <s2idle_suspend_>`_.
+
+ 2. Freezing tasks.
+
+ This step is the same as step 2 of the suspend-to-idle suspend transition
+ described `above <s2idle_suspend_>`_.
+
+ 3. Suspending devices and reconfiguring IRQs.
+
+ This step is analogous to step 3 of the suspend-to-idle suspend transition
+ described `above <s2idle_suspend_>`_, but the arming of IRQs for system
+ wakeup generally does not have any effect on the platform.
+
+ There are platforms that can go into a very deep low-power state internally
+ when all CPUs in them are in sufficiently deep idle states and all I/O
+ devices have been put into low-power states. On those platforms,
+ suspend-to-idle can reduce system power very effectively.
+
+ On the other platforms, however, low-level components (like interrupt
+ controllers) need to be turned off in a platform-specific way (implemented
+ in the hooks provided by the platform driver) to achieve comparable power
+ reduction.
+
+ That usually prevents in-band hardware interrupts from waking up the system,
+ which must be done in a special platform-dependent way. Then, the
+ configuration of system wakeup sources usually starts when system wakeup
+ devices are suspended and is finalized by the platform suspend hooks later
+ on.
+
+ 4. Disabling non-boot CPUs.
+
+ On some platforms the suspend hooks mentioned above must run in a one-CPU
+ configuration of the system (in particular, the hardware cannot be accessed
+ by any code running in parallel with the platform suspend hooks that may,
+ and often do, trap into the platform firmware in order to finalize the
+ suspend transition).
+
+ For this reason, the CPU offline/online (CPU hotplug) framework is used
+ to take all of the CPUs in the system, except for one (the boot CPU),
+ offline (typically, the CPUs that have been taken offline go into deep idle
+ states).
+
+ This means that all tasks are migrated away from those CPUs and all IRQs are
+ rerouted to the only CPU that remains online.
+
+ 5. Suspending core system components.
+
+ This prepares the core system components for (possibly) losing power going
+ forward and suspends the timekeeping.
+
+ 6. Platform-specific power removal.
+
+ This is expected to remove power from all of the system components except
+ for the memory controller and RAM (in order to preserve the contents of the
+ latter) and some devices designated for system wakeup.
+
+ In many cases control is passed to the platform firmware which is expected
+ to finalize the suspend transition as needed.
+
+
+Platform-dependent Resume Code Flow
+===================================
+
+The following steps are taken in order to transition the system from a
+platform-dependent suspend state into the working state:
+
+ 1. Platform-specific system wakeup.
+
+ The platform is woken up by a signal from one of the designated system
+ wakeup devices (which need not be an in-band hardware interrupt) and
+ control is passed back to the kernel (the working configuration of the
+ platform may need to be restored by the platform firmware before the
+ kernel gets control again).
+
+ 2. Resuming core system components.
+
+ The suspend-time configuration of the core system components is restored and
+ the timekeeping is resumed.
+
+ 3. Re-enabling non-boot CPUs.
+
+ The CPUs disabled in step 4 of the preceding suspend transition are taken
+ back online and their suspend-time configuration is restored.
+
+ 4. Resuming devices and restoring the working-state configuration of IRQs.
+
+ This step is the same as step 2 of the suspend-to-idle suspend transition
+ described `above <s2idle_resume_>`_.
+
+ 5. Thawing tasks.
+
+ This step is the same as step 3 of the suspend-to-idle suspend transition
+ described `above <s2idle_resume_>`_.
+
+ 6. Invoking system-wide resume notifiers.
+
+ This step is the same as step 4 of the suspend-to-idle suspend transition
+ described `above <s2idle_resume_>`_.
diff --git a/Documentation/admin-guide/pm/system-wide.rst b/Documentation/admin-guide/pm/system-wide.rst
index 2b1f987b34f0..1a1924d71006 100644
--- a/Documentation/admin-guide/pm/system-wide.rst
+++ b/Documentation/admin-guide/pm/system-wide.rst
@@ -8,3 +8,4 @@ System-Wide Power Management
:maxdepth: 2
sleep-states
+ suspend-flows
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 64aeee1009ca..0329a4d3fa9e 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -128,6 +128,9 @@ allowed to examine the unevictable lru (mlocked pages) for pages to compact.
This should be used on systems where stalls for minor page faults are an
acceptable trade for large contiguous free memory. Set to 0 to prevent
compaction from moving pages that are unevictable. Default value is 1.
+On CONFIG_PREEMPT_RT the default value is 0 in order to avoid a page fault, due
+to compaction, which would block the task from becomming active until the fault
+is resolved.
dirty_background_bytes
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index be726986ff75..2adffb3f7914 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -73,6 +73,9 @@ File Mapping and Page Cache
.. kernel-doc:: mm/truncate.c
:export:
+.. kernel-doc:: include/linux/pagemap.h
+ :internal:
+
Memory pools
============
diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst
index 1d490155ecd7..2e939ff10b86 100644
--- a/Documentation/core-api/pin_user_pages.rst
+++ b/Documentation/core-api/pin_user_pages.rst
@@ -52,8 +52,22 @@ Which flags are set by each wrapper
For these pin_user_pages*() functions, FOLL_PIN is OR'd in with whatever gup
flags the caller provides. The caller is required to pass in a non-null struct
-pages* array, and the function then pin pages by incrementing each by a special
-value. For now, that value is +1, just like get_user_pages*().::
+pages* array, and the function then pins pages by incrementing each by a special
+value: GUP_PIN_COUNTING_BIAS.
+
+For huge pages (and in fact, any compound page of more than 2 pages), the
+GUP_PIN_COUNTING_BIAS scheme is not used. Instead, an exact form of pin counting
+is achieved, by using the 3rd struct page in the compound page. A new struct
+page field, hpage_pinned_refcount, has been added in order to support this.
+
+This approach for compound pages avoids the counting upper limit problems that
+are discussed below. Those limitations would have been aggravated severely by
+huge pages, because each tail page adds a refcount to the head page. And in
+fact, testing revealed that, without a separate hpage_pinned_refcount field,
+page overflows were seen in some huge page stress tests.
+
+This also means that huge pages and compound pages (of order > 1) do not suffer
+from the false positives problem that is mentioned below.::
Function
--------
@@ -99,27 +113,6 @@ pages:
This also leads to limitations: there are only 31-10==21 bits available for a
counter that increments 10 bits at a time.
-TODO: for 1GB and larger huge pages, this is cutting it close. That's because
-when pin_user_pages() follows such pages, it increments the head page by "1"
-(where "1" used to mean "+1" for get_user_pages(), but now means "+1024" for
-pin_user_pages()) for each tail page. So if you have a 1GB huge page:
-
-* There are 256K (18 bits) worth of 4 KB tail pages.
-* There are 21 bits available to count up via GUP_PIN_COUNTING_BIAS (that is,
- 10 bits at a time)
-* There are 21 - 18 == 3 bits available to count. Except that there aren't,
- because you need to allow for a few normal get_page() calls on the head page,
- as well. Fortunately, the approach of using addition, rather than "hard"
- bitfields, within page->_refcount, allows for sharing these bits gracefully.
- But we're still looking at about 8 references.
-
-This, however, is a missing feature more than anything else, because it's easily
-solved by addressing an obvious inefficiency in the original get_user_pages()
-approach of retrieving pages: stop treating all the pages as if they were
-PAGE_SIZE. Retrieve huge pages as huge pages. The callers need to be aware of
-this, so some work is required. Once that's in place, this limitation mostly
-disappears from view, because there will be ample refcounting range available.
-
* Callers must specifically request "dma-pinned tracking of pages". In other
words, just calling get_user_pages() will not suffice; a new set of functions,
pin_user_page() and related, must be used.
@@ -173,8 +166,8 @@ CASE 4: Pinning for struct page manipulation only
-------------------------------------------------
Here, normal GUP calls are sufficient, so neither flag needs to be set.
-page_dma_pinned(): the whole point of pinning
-=============================================
+page_maybe_dma_pinned(): the whole point of pinning
+===================================================
The whole point of marking pages as "DMA-pinned" or "gup-pinned" is to be able
to query, "is this page DMA-pinned?" That allows code such as page_mkclean()
@@ -186,7 +179,7 @@ and debates (see the References at the end of this document). It's a TODO item
here: fill in the details once that's worked out. Meanwhile, it's safe to say
that having this available: ::
- static inline bool page_dma_pinned(struct page *page)
+ static inline bool page_maybe_dma_pinned(struct page *page)
...is a prerequisite to solving the long-running gup+DMA problem.
@@ -215,12 +208,42 @@ has the following new calls to exercise the new pin*() wrapper functions:
You can monitor how many total dma-pinned pages have been acquired and released
since the system was booted, via two new /proc/vmstat entries: ::
- /proc/vmstat/nr_foll_pin_requested
- /proc/vmstat/nr_foll_pin_requested
+ /proc/vmstat/nr_foll_pin_acquired
+ /proc/vmstat/nr_foll_pin_released
+
+Under normal conditions, these two values will be equal unless there are any
+long-term [R]DMA pins in place, or during pin/unpin transitions.
+
+* nr_foll_pin_acquired: This is the number of logical pins that have been
+ acquired since the system was powered on. For huge pages, the head page is
+ pinned once for each page (head page and each tail page) within the huge page.
+ This follows the same sort of behavior that get_user_pages() uses for huge
+ pages: the head page is refcounted once for each tail or head page in the huge
+ page, when get_user_pages() is applied to a huge page.
+
+* nr_foll_pin_released: The number of logical pins that have been released since
+ the system was powered on. Note that pages are released (unpinned) on a
+ PAGE_SIZE granularity, even if the original pin was applied to a huge page.
+ Becaused of the pin count behavior described above in "nr_foll_pin_acquired",
+ the accounting balances out, so that after doing this::
+
+ pin_user_pages(huge_page);
+ for (each page in huge_page)
+ unpin_user_page(page);
+
+...the following is expected::
+
+ nr_foll_pin_released == nr_foll_pin_acquired
+
+(...unless it was already out of balance due to a long-term RDMA pin being in
+place.)
+
+Other diagnostics
+=================
-Those are both going to show zero, unless CONFIG_DEBUG_VM is set. This is
-because there is a noticeable performance drop in unpin_user_page(), when they
-are activated.
+dump_page() has been enhanced slightly, to handle these new counting fields, and
+to better report on compound pages in general. Specifically, for compound pages
+with order > 1, the exact (hpage_pinned_refcount) pincount is reported.
References
==========
@@ -228,5 +251,6 @@ References
* `Some slow progress on get_user_pages() (Apr 2, 2019) <https://lwn.net/Articles/784574/>`_
* `DMA and get_user_pages() (LPC: Dec 12, 2018) <https://lwn.net/Articles/774411/>`_
* `The trouble with get_user_pages() (Apr 30, 2018) <https://lwn.net/Articles/753027/>`_
+* `LWN kernel index: get_user_pages() <https://lwn.net/Kernel/Index/#Memory_management-get_user_pages>`_
John Hubbard, October, 2019
diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst
index d16a4d2c3a41..e93606ecfb01 100644
--- a/Documentation/dev-tools/kunit/index.rst
+++ b/Documentation/dev-tools/kunit/index.rst
@@ -17,14 +17,23 @@ What is KUnit?
==============
KUnit is a lightweight unit testing and mocking framework for the Linux kernel.
-These tests are able to be run locally on a developer's workstation without a VM
-or special hardware.
KUnit is heavily inspired by JUnit, Python's unittest.mock, and
Googletest/Googlemock for C++. KUnit provides facilities for defining unit test
cases, grouping related test cases into test suites, providing common
infrastructure for running tests, and much more.
+KUnit consists of a kernel component, which provides a set of macros for easily
+writing unit tests. Tests written against KUnit will run on kernel boot if
+built-in, or when loaded if built as a module. These tests write out results to
+the kernel log in `TAP <https://testanything.org/>`_ format.
+
+To make running these tests (and reading the results) easier, KUnit offers
+:doc:`kunit_tool <kunit-tool>`, which builds a `User Mode Linux
+<http://user-mode-linux.sourceforge.net>`_ kernel, runs it, and parses the test
+results. This provides a quick way of running KUnit tests during development,
+without requiring a virtual machine or separate hardware.
+
Get started now: :doc:`start`
Why KUnit?
@@ -36,21 +45,20 @@ allow all possible code paths to be tested in the code under test; this is only
possible if the code under test is very small and does not have any external
dependencies outside of the test's control like hardware.
-Outside of KUnit, there are no testing frameworks currently
-available for the kernel that do not require installing the kernel on a test
-machine or in a VM and all require tests to be written in userspace running on
-the kernel; this is true for Autotest, and kselftest, disqualifying
-any of them from being considered unit testing frameworks.
+KUnit provides a common framework for unit tests within the kernel.
+
+KUnit tests can be run on most architectures, and most tests are architecture
+independent. All built-in KUnit tests run on kernel startup. Alternatively,
+KUnit and KUnit tests can be built as modules and tests will run when the test
+module is loaded.
-KUnit addresses the problem of being able to run tests without needing a virtual
-machine or actual hardware with User Mode Linux. User Mode Linux is a Linux
-architecture, like ARM or x86; however, unlike other architectures it compiles
-to a standalone program that can be run like any other program directly inside
-of a host operating system; to be clear, it does not require any virtualization
-support; it is just a regular program.
+.. note::
-Alternatively, kunit and kunit tests can be built as modules and tests will
-run when the test module is loaded.
+ KUnit can also run tests without needing a virtual machine or actual
+ hardware under User Mode Linux. User Mode Linux is a Linux architecture,
+ like ARM or x86, which compiles the kernel as a Linux executable. KUnit
+ can be used with UML either by building with ``ARCH=um`` (like any other
+ architecture), or by using :doc:`kunit_tool <kunit-tool>`.
KUnit is fast. Excluding build time, from invocation to completion KUnit can run
several dozen tests in only 10 to 20 seconds; this might not sound like a big
@@ -81,3 +89,5 @@ How do I use it?
* :doc:`start` - for new users of KUnit
* :doc:`usage` - for a more detailed explanation of KUnit features
* :doc:`api/index` - for the list of KUnit APIs used for testing
+* :doc:`kunit-tool` - for more information on the kunit_tool helper script
+* :doc:`faq` - for answers to some common questions about KUnit
diff --git a/Documentation/dev-tools/kunit/kunit-tool.rst b/Documentation/dev-tools/kunit/kunit-tool.rst
index 50d46394e97e..949af2da81e5 100644
--- a/Documentation/dev-tools/kunit/kunit-tool.rst
+++ b/Documentation/dev-tools/kunit/kunit-tool.rst
@@ -12,6 +12,13 @@ the Linux kernel as UML (`User Mode Linux
<http://user-mode-linux.sourceforge.net/>`_), running KUnit tests, parsing
the test results and displaying them in a user friendly manner.
+kunit_tool addresses the problem of being able to run tests without needing a
+virtual machine or actual hardware with User Mode Linux. User Mode Linux is a
+Linux architecture, like ARM or x86; however, unlike other architectures it
+compiles the kernel as a standalone Linux executable that can be run like any
+other program directly inside of a host operating system. To be clear, it does
+not require any virtualization support: it is just a regular program.
+
What is a kunitconfig?
======================
diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst
index 4e1d24db6b13..e1c5ce80ce12 100644
--- a/Documentation/dev-tools/kunit/start.rst
+++ b/Documentation/dev-tools/kunit/start.rst
@@ -9,11 +9,10 @@ Installing dependencies
KUnit has the same dependencies as the Linux kernel. As long as you can build
the kernel, you can run KUnit.
-KUnit Wrapper
-=============
-Included with KUnit is a simple Python wrapper that helps format the output to
-easily use and read KUnit output. It handles building and running the kernel, as
-well as formatting the output.
+Running tests with the KUnit Wrapper
+====================================
+Included with KUnit is a simple Python wrapper which runs tests under User Mode
+Linux, and formats the test results.
The wrapper can be run with:
@@ -21,22 +20,42 @@ The wrapper can be run with:
./tools/testing/kunit/kunit.py run --defconfig
-For more information on this wrapper (also called kunit_tool) checkout the
+For more information on this wrapper (also called kunit_tool) check out the
:doc:`kunit-tool` page.
Creating a .kunitconfig
-=======================
-The Python script is a thin wrapper around Kbuild. As such, it needs to be
-configured with a ``.kunitconfig`` file. This file essentially contains the
-regular Kernel config, with the specific test targets as well.
-
+-----------------------
+If you want to run a specific set of tests (rather than those listed in the
+KUnit defconfig), you can provide Kconfig options in the ``.kunitconfig`` file.
+This file essentially contains the regular Kernel config, with the specific
+test targets as well. The ``.kunitconfig`` should also contain any other config
+options required by the tests.
+
+A good starting point for a ``.kunitconfig`` is the KUnit defconfig:
.. code-block:: bash
cd $PATH_TO_LINUX_REPO
cp arch/um/configs/kunit_defconfig .kunitconfig
-Verifying KUnit Works
----------------------
+You can then add any other Kconfig options you wish, e.g.:
+.. code-block:: none
+
+ CONFIG_LIST_KUNIT_TEST=y
+
+:doc:`kunit_tool <kunit-tool>` will ensure that all config options set in
+``.kunitconfig`` are set in the kernel ``.config`` before running the tests.
+It'll warn you if you haven't included the dependencies of the options you're
+using.
+
+.. note::
+ Note that removing something from the ``.kunitconfig`` will not trigger a
+ rebuild of the ``.config`` file: the configuration is only updated if the
+ ``.kunitconfig`` is not a subset of ``.config``. This means that you can use
+ other tools (such as make menuconfig) to adjust other config options.
+
+
+Running the tests
+-----------------
To make sure that everything is set up correctly, simply invoke the Python
wrapper from your kernel repo:
@@ -62,6 +81,41 @@ followed by a list of tests that are run. All of them should be passing.
Because it is building a lot of sources for the first time, the
``Building KUnit kernel`` step may take a while.
+Running tests without the KUnit Wrapper
+=======================================
+
+If you'd rather not use the KUnit Wrapper (if, for example, you need to
+integrate with other systems, or use an architecture other than UML), KUnit can
+be included in any kernel, and the results read out and parsed manually.
+
+.. note::
+ KUnit is not designed for use in a production system, and it's possible that
+ tests may reduce the stability or security of the system.
+
+
+
+Configuring the kernel
+----------------------
+
+In order to enable KUnit itself, you simply need to enable the ``CONFIG_KUNIT``
+Kconfig option (it's under Kernel Hacking/Kernel Testing and Coverage in
+menuconfig). From there, you can enable any KUnit tests you want: they usually
+have config options ending in ``_KUNIT_TEST``.
+
+KUnit and KUnit tests can be compiled as modules: in this case the tests in a
+module will be run when the module is loaded.
+
+Running the tests
+-----------------
+
+Build and run your kernel as usual. Test output will be written to the kernel
+log in `TAP <https://testanything.org/>`_ format.
+
+.. note::
+ It's possible that there will be other lines and/or data interspersed in the
+ TAP output.
+
+
Writing your first test
=======================
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index 607758a66a99..473a2361ec37 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -591,3 +591,17 @@ able to run one test case per invocation.
.. TODO(brendanhiggins@google.com): Add an actual example of an architecture
dependent KUnit test.
+
+KUnit debugfs representation
+============================
+When kunit test suites are initialized, they create an associated directory
+in /sys/kernel/debug/kunit/<test-suite>. The directory contains one file
+
+- results: "cat results" displays results of each test case and the results
+ of the entire suite for the last test run.
+
+The debugfs representation is primarily of use when kunit test suites are
+run in a native environment, either as modules or builtin. Having a way
+to display results like this is valuable as otherwise results can be
+intermixed with other events in dmesg output. The maximum size of each
+results file is KUNIT_LOG_SIZE bytes (defined in include/kunit/test.h).
diff --git a/Documentation/devicetree/bindings/.gitignore b/Documentation/devicetree/bindings/.gitignore
index 57afa1533a5f..5c6d8ea1a09c 100644
--- a/Documentation/devicetree/bindings/.gitignore
+++ b/Documentation/devicetree/bindings/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.example.dts
processed-schema*.yaml
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml b/Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml
index e4131fa42b26..572381306681 100644
--- a/Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml
+++ b/Documentation/devicetree/bindings/arm/altera/socfpga-clk-manager.yaml
@@ -21,6 +21,8 @@ properties:
required:
- compatible
+additionalProperties: false
+
examples:
- |
clkmgr@ffd04000 {
diff --git a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
index 853d7d2b56f5..66213bd95e6e 100644
--- a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
@@ -43,6 +43,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
ao-secure@140 {
diff --git a/Documentation/devicetree/bindings/arm/arm,integrator.yaml b/Documentation/devicetree/bindings/arm/arm,integrator.yaml
new file mode 100644
index 000000000000..192ded470e32
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,integrator.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,integrator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Integrator Boards Device Tree Bindings
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |+
+ These were the first ARM platforms officially supported by ARM Ltd.
+ They are ARMv4, ARMv5 and ARMv6-capable using different core tiles,
+ so the system is modular and can host a variety of CPU tiles called
+ "core tiles" and referred to in the device tree as "core modules".
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: ARM Integrator Application Platform, this board has a PCI
+ host and several PCI slots, as well as a number of slots for logical
+ expansion modules, it is referred to as an "ASIC Development
+ Motherboard" and is extended with custom FPGA and is intended for
+ rapid prototyping. See ARM DUI 0098B. This board can physically come
+ pre-packaged in a PC Tower form factor called Integrator/PP1 or a
+ special metal fixture called Integrator/PP2, see ARM DUI 0169A.
+ items:
+ - const: arm,integrator-ap
+ - description: ARM Integrator Compact Platform (HBI-0086), this board has
+ a compact form factor and mainly consists of the bare minimum
+ peripherals to make use of the core module. See ARM DUI 0159B.
+ items:
+ - const: arm,integrator-cp
+ - description: ARM Integrator Standard Development Board (SDB) Platform,
+ this board is a PCI-based board conforming to the Microsoft SDB
+ (HARP) specification. See ARM DUI 0099A.
+ items:
+ - const: arm,integrator-sp
+
+ core-module@10000000:
+ type: object
+ description: the root node in the Integrator platforms must contain
+ a core module child node. They are always at physical address
+ 0x10000000 in all the Integrator variants.
+ properties:
+ compatible:
+ items:
+ - const: arm,core-module-integrator
+ - const: syscon
+ - const: simple-mfd
+ reg:
+ maxItems: 1
+
+ required:
+ - compatible
+ - reg
+
+patternProperties:
+ "^syscon@[0-9a-f]+$":
+ description: All Integrator boards must provide a system controller as a
+ node in the root of the device tree.
+ type: object
+ properties:
+ compatible:
+ items:
+ - enum:
+ - arm,integrator-ap-syscon
+ - arm,integrator-cp-syscon
+ - arm,integrator-sp-syscon
+ - const: syscon
+ reg:
+ maxItems: 1
+
+ required:
+ - compatible
+ - reg
+
+
+required:
+ - compatible
+ - core-module@10000000
+
+...
diff --git a/Documentation/devicetree/bindings/arm/arm,realview.yaml b/Documentation/devicetree/bindings/arm/arm,realview.yaml
new file mode 100644
index 000000000000..d6e85d198afe
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,realview.yaml
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,realview.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM RealView Boards Device Tree Bindings
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |+
+ The ARM RealView series of reference designs were built to explore the ARM
+ 11, Cortex A-8 and Cortex A-9 CPUs. This included new features compared to
+ the earlier CPUs such as TrustZone and multicore (MPCore).
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: ARM RealView Emulation Baseboard (HBI-0140) was created
+ as a generic platform to test different FPGA designs, and has
+ pluggable CPU modules, see ARM DUI 0303E.
+ items:
+ - const: arm,realview-eb
+ - description: ARM RealView Platform Baseboard for ARM1176JZF-S
+ (HBI-0147) was created as a development board to test ARM TrustZone,
+ CoreSight and Intelligent Energy Management (IEM) see ARM DUI 0425F.
+ items:
+ - const: arm,realview-pb1176
+ - description: ARM RealView Platform Baseboard for ARM 11 MPCore
+ (HBI-0159, HBI-0175 and HBI-0176) was created to showcase
+ multiprocessing with ARM11 using MPCore using symmetric
+ multiprocessing (SMP). See ARM DUI 0351E.
+ items:
+ - const: arm,realview-pb11mp
+ - description: ARM RealView Platform Baseboard for Cortex-A8 (HBI-0178,
+ HBI-0176 and HBI-0175) was the first reference platform for the
+ Cortex CPU family, including a Cortex-A8 test chip.
+ items:
+ - const: arm,realview-pba8
+ - description: ARM RealView Platform Baseboard Explore for Cortex-A9
+ (HBI-0182 and HBI-0183) was the reference platform for the Cortex-A9
+ CPU.
+ items:
+ - const: arm,realview-pbx
+
+ soc:
+ description: All RealView boards must provide a soc node in the root of the
+ device tree, representing the System-on-Chip since these test chips are
+ rather complex.
+ type: object
+ properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: arm,realview-eb-soc
+ - const: simple-bus
+ - items:
+ - const: arm,realview-pb1176-soc
+ - const: simple-bus
+ - items:
+ - const: arm,realview-pb11mp-soc
+ - const: simple-bus
+ - items:
+ - const: arm,realview-pba8-soc
+ - const: simple-bus
+ - items:
+ - const: arm,realview-pbx-soc
+ - const: simple-bus
+
+ patternProperties:
+ "^.*syscon@[0-9a-f]+$":
+ type: object
+ description: All RealView boards must provide a syscon system controller
+ node inside the soc node.
+ properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: arm,realview-eb11mp-revb-syscon
+ - const: arm,realview-eb-syscon
+ - const: syscon
+ - const: simple-mfd
+ - items:
+ - const: arm,realview-eb11mp-revc-syscon
+ - const: arm,realview-eb-syscon
+ - const: syscon
+ - const: simple-mfd
+ - items:
+ - const: arm,realview-eb-syscon
+ - const: syscon
+ - const: simple-mfd
+ - items:
+ - const: arm,realview-pb1176-syscon
+ - const: syscon
+ - const: simple-mfd
+ - items:
+ - const: arm,realview-pb11mp-syscon
+ - const: syscon
+ - const: simple-mfd
+ - items:
+ - const: arm,realview-pba8-syscon
+ - const: syscon
+ - const: simple-mfd
+ - items:
+ - const: arm,realview-pbx-syscon
+ - const: syscon
+ - const: simple-mfd
+
+ required:
+ - compatible
+ - reg
+
+ required:
+ - compatible
+
+required:
+ - compatible
+ - soc
+
+...
diff --git a/Documentation/devicetree/bindings/arm/arm,versatile.yaml b/Documentation/devicetree/bindings/arm/arm,versatile.yaml
new file mode 100644
index 000000000000..06efd2a075c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,versatile.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,versatile.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Versatile Boards Device Tree Bindings
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |+
+ The ARM Versatile boards are two variants of ARM926EJ-S evaluation boards
+ with various pluggable interface boards, in essence the Versatile PB version
+ is a superset of the Versatile AB version.
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: The ARM Versatile Application Baseboard (HBI-0118) is an
+ evaluation board specifically for the ARM926EJ-S. It can be connected
+ to an IB1 interface board for a touchscreen-type use case or an IB2
+ for a candybar phone-type use case. See ARM DUI 0225D.
+ items:
+ - const: arm,versatile-ab
+ - description: The ARM Versatile Platform Baseboard (HBI-0117) is an
+ extension of the Versatile Application Baseboard that includes a
+ PCI host controller. Like the sibling board, it is done specifically
+ for ARM926EJ-S. See ARM DUI 0224B.
+ items:
+ - const: arm,versatile-pb
+
+ core-module@10000000:
+ type: object
+ description: the root node in the Versatile platforms must contain
+ a core module child node. They are always at physical address
+ 0x10000000 in all the Versatile variants.
+ properties:
+ compatible:
+ items:
+ - const: arm,core-module-versatile
+ - const: syscon
+ - const: simple-mfd
+ reg:
+ maxItems: 1
+
+ required:
+ - compatible
+ - reg
+
+patternProperties:
+ "^syscon@[0-9a-f]+$":
+ type: object
+ description: When fitted with the IB2 Interface Board, the Versatile
+ AB will present an optional system controller node which controls the
+ extra peripherals on the interface board.
+ properties:
+ compatible:
+ contains:
+ const: arm,versatile-ib2-syscon
+ required:
+ - compatible
+ - reg
+
+required:
+ - compatible
+ - core-module@10000000
+
+...
diff --git a/Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml b/Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
new file mode 100644
index 000000000000..8c06a73f716c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
@@ -0,0 +1,223 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,vexpress-juno.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Versatile Express and Juno Boards Device Tree Bindings
+
+maintainers:
+ - Sudeep Holla <sudeep.holla@arm.com>
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |+
+ ARM's Versatile Express platform were built as reference designs for exploring
+ multicore Cortex-A class systems. The Versatile Express family contains both
+ 32 bit (Aarch32) and 64 bit (Aarch64) systems.
+
+ The board consist of a motherboard and one or more daughterboards (tiles). The
+ motherboard provides a set of peripherals. Processor and RAM "live" on the
+ tiles.
+
+ The motherboard and each core tile should be described by a separate Device
+ Tree source file, with the tile's description including the motherboard file
+ using an include directive. As the motherboard can be initialized in one of
+ two different configurations ("memory maps"), care must be taken to include
+ the correct one.
+
+ When a new generation of boards were introduced under the name "Juno", these
+ shared to many common characteristics with the Versatile Express that the
+ "arm,vexpress" compatible was retained in the root node, and these are
+ included in this binding schema as well.
+
+ The root node indicates the CPU SoC on the core tile, and this
+ is a daughterboard to the main motherboard. The name used in the compatible
+ string shall match the name given in the core tile's technical reference
+ manual, followed by "arm,vexpress" as an additional compatible value. If
+ further subvariants are released of the core tile, even more fine-granular
+ compatible strings with up to three compatible strings are used.
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: CoreTile Express A9x4 (V2P-CA9) has 4 Cortex A9 CPU cores
+ in MPCore configuration in a test chip on the core tile. See ARM
+ DUI 0448I. This was the first Versatile Express platform.
+ items:
+ - const: arm,vexpress,v2p-ca9
+ - const: arm,vexpress
+ - description: CoreTile Express A5x2 (V2P-CA5s) has 2 Cortex A5 CPU cores
+ in a test chip on the core tile. It is intended to evaluate NEON, FPU
+ and Jazelle support in the Cortex A5 family. See ARM DUI 0541C.
+ items:
+ - const: arm,vexpress,v2p-ca5s
+ - const: arm,vexpress
+ - description: Coretile Express A15x2 (V2P-CA15) has 2 Cortex A15 CPU
+ cores in a MPCore configuration in a test chip on the core tile. See
+ ARM DUI 0604F.
+ items:
+ - const: arm,vexpress,v2p-ca15
+ - const: arm,vexpress
+ - description: CoreTile Express A15x4 (V2P-CA15, HBI-0237A) has 4 Cortex
+ A15 CPU cores in a test chip on the core tile. This is the first test
+ chip called "TC1".
+ items:
+ - const: arm,vexpress,v2p-ca15,tc1
+ - const: arm,vexpress,v2p-ca15
+ - const: arm,vexpress
+ - description: Coretile Express A15x2 A7x3 (V2P-CA15_A7) has 2 Cortex A15
+ CPU cores and 3 Cortex A7 cores in a big.LITTLE MPCore configuration
+ in a test chip on the core tile. See ARM DDI 0503I.
+ items:
+ - const: arm,vexpress,v2p-ca15_a7
+ - const: arm,vexpress
+ - description: LogicTile Express 20MG (V2F-1XV7) has 2 Cortex A53 CPU
+ cores in a test chip on the core tile. See ARM DDI 0498D.
+ items:
+ - const: arm,vexpress,v2f-1xv7,ca53x2
+ - const: arm,vexpress,v2f-1xv7
+ - const: arm,vexpress
+ - description: Arm Versatile Express Juno "r0" (the first Juno board,
+ V2M-Juno) was introduced as a vehicle for evaluating big.LITTLE on
+ AArch64 CPU cores. It has 2 Cortex A57 CPU cores and 4 Cortex A53
+ cores in a big.LITTLE configuration. It also features the MALI T624
+ GPU. See ARM document 100113_0000_07_en.
+ items:
+ - const: arm,juno
+ - const: arm,vexpress
+ - description: Arm Versatile Express Juno r1 Development Platform
+ (V2M-Juno r1) was introduced mainly aimed at development of PCIe
+ based systems. Juno r1 also has support for AXI masters placed on
+ the TLX connectors to join the coherency domain. Otherwise it is the
+ same configuration as Juno r0. See ARM document 100122_0100_06_en.
+ items:
+ - const: arm,juno-r1
+ - const: arm,juno
+ - const: arm,vexpress
+ - description: Arm Versatile Express Juno r2 Development Platform
+ (V2M-Juno r2). It has the same feature set as Juno r0 and r1. See
+ ARM document 100114_0200_04_en.
+ items:
+ - const: arm,juno-r2
+ - const: arm,juno
+ - const: arm,vexpress
+ - description: Arm AEMv8a Versatile Express Real-Time System Model
+ (VE RTSM) is a programmers view of the Versatile Express with Arm
+ v8A hardware. See ARM DUI 0575D.
+ items:
+ - const: arm,rtsm_ve,aemv8a
+ - const: arm,vexpress
+ - description: Arm FVP (Fixed Virtual Platform) base model revision C
+ See ARM Document 100964_1190_00_en.
+ items:
+ - const: arm,fvp-base-revc
+ - const: arm,vexpress
+ - description: Arm Foundation model for Aarch64
+ items:
+ - const: arm,foundation-aarch64
+ - const: arm,vexpress
+
+ arm,hbi:
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: This indicates the ARM HBI (Hardware Board ID), this is
+ ARM's unique board model ID, visible on the PCB's silkscreen.
+
+ arm,vexpress,site:
+ description: As Versatile Express can be configured in number of physically
+ different setups, the device tree should describe platform topology.
+ For this reason the root node and main motherboard node must define this
+ property, describing the physical location of the children nodes.
+ 0 means motherboard site, while 1 and 2 are daughterboard sites, and
+ 0xf means "sisterboard" which is the site containing the main CPU tile.
+ allOf:
+ - $ref: '/schemas/types.yaml#/definitions/uint32'
+ - minimum: 0
+ maximum: 15
+
+ arm,vexpress,position:
+ description: When daughterboards are stacked on one site, their position
+ in the stack be be described this attribute.
+ allOf:
+ - $ref: '/schemas/types.yaml#/definitions/uint32'
+ - minimum: 0
+ maximum: 3
+
+ arm,vexpress,dcc:
+ description: When describing tiles consisting of more than one DCC, its
+ number can be specified with this attribute.
+ allOf:
+ - $ref: '/schemas/types.yaml#/definitions/uint32'
+ - minimum: 0
+ maximum: 3
+
+patternProperties:
+ "^bus@[0-9a-f]+$":
+ description: Static Memory Bus (SMB) node, if this exists it describes
+ the connection between the motherboard and any tiles. Sometimes the
+ compatible is placed directly under this node, sometimes it is placed
+ in a subnode named "motherboard". Sometimes the compatible includes
+ "arm,vexpress,v2?-p1" sometimes (on software models) is is just
+ "simple-bus". If the compatible is placed in the "motherboard" node,
+ it is stricter and always has two compatibles.
+ type: object
+ allOf:
+ - $ref: '/schemas/simple-bus.yaml'
+
+ properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - arm,vexpress,v2m-p1
+ - arm,vexpress,v2p-p1
+ - const: simple-bus
+ - const: simple-bus
+ motherboard:
+ type: object
+ description: The motherboard description provides a single "motherboard"
+ node using 2 address cells corresponding to the Static Memory Bus
+ used between the motherboard and the tile. The first cell defines the
+ Chip Select (CS) line number, the second cell address offset within
+ the CS. All interrupt lines between the motherboard and the tile
+ are active high and are described using single cell.
+ properties:
+ "#address-cells":
+ const: 2
+ "#size-cells":
+ const: 1
+ compatible:
+ items:
+ - enum:
+ - arm,vexpress,v2m-p1
+ - arm,vexpress,v2p-p1
+ - const: simple-bus
+ arm,v2m-memory-map:
+ description: This describes the memory map type.
+ allOf:
+ - $ref: '/schemas/types.yaml#/definitions/string'
+ - enum:
+ - rs1
+ - rs2
+ required:
+ - compatible
+ required:
+ - compatible
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - arm,vexpress,v2p-ca9
+ - arm,vexpress,v2p-ca5s
+ - arm,vexpress,v2p-ca15
+ - arm,vexpress,v2p-ca15_a7
+ - arm,vexpress,v2f-1xv7,ca53x2
+ then:
+ required:
+ - arm,hbi
+
+...
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
deleted file mode 100644
index 96b1dad58253..000000000000
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ /dev/null
@@ -1,237 +0,0 @@
-ARM Integrator/AP (Application Platform) and Integrator/CP (Compact Platform)
------------------------------------------------------------------------------
-ARM's oldest Linux-supported platform with connectors for different core
-tiles of ARMv4, ARMv5 and ARMv6 type.
-
-Required properties (in root node):
- compatible = "arm,integrator-ap"; /* Application Platform */
- compatible = "arm,integrator-cp"; /* Compact Platform */
-
-FPGA type interrupt controllers, see the versatile-fpga-irq binding doc.
-
-Required nodes:
-
-- core-module: the root node to the Integrator platforms must have
- a core-module with regs and the compatible string
- "arm,core-module-integrator"
-- external-bus-interface: the root node to the Integrator platforms
- must have an external bus interface with regs and the
- compatible-string "arm,external-bus-interface"
-
- Required properties for the core module:
- - regs: the location and size of the core module registers, one
- range of 0x200 bytes.
-
-- syscon: the root node of the Integrator platforms must have a
- system controller node pointing to the control registers,
- with the compatible string
- "arm,integrator-ap-syscon"
- "arm,integrator-cp-syscon"
- respectively.
-
- Required properties for the system controller:
- - regs: the location and size of the system controller registers,
- one range of 0x100 bytes.
-
- Required properties for the AP system controller:
- - interrupts: the AP syscon node must include the logical module
- interrupts, stated in order of module instance <module 0>,
- <module 1>, <module 2> ... for the CP system controller this
- is not required not of any use.
-
-/dts-v1/;
-/include/ "integrator.dtsi"
-
-/ {
- model = "ARM Integrator/AP";
- compatible = "arm,integrator-ap";
-
- core-module@10000000 {
- compatible = "arm,core-module-integrator";
- reg = <0x10000000 0x200>;
- };
-
- ebi@12000000 {
- compatible = "arm,external-bus-interface";
- reg = <0x12000000 0x100>;
- };
-
- syscon {
- compatible = "arm,integrator-ap-syscon";
- reg = <0x11000000 0x100>;
- interrupt-parent = <&pic>;
- /* These are the logic module IRQs */
- interrupts = <9>, <10>, <11>, <12>;
- };
-};
-
-
-ARM Versatile Application and Platform Baseboards
--------------------------------------------------
-ARM's development hardware platform with connectors for customizable
-core tiles. The hardware configuration of the Versatile boards is
-highly customizable.
-
-Required properties (in root node):
- compatible = "arm,versatile-ab"; /* Application baseboard */
- compatible = "arm,versatile-pb"; /* Platform baseboard */
-
-Interrupt controllers:
-- VIC required properties:
- compatible = "arm,versatile-vic";
- interrupt-controller;
- #interrupt-cells = <1>;
-
-- SIC required properties:
- compatible = "arm,versatile-sic";
- interrupt-controller;
- #interrupt-cells = <1>;
-
-Required nodes:
-
-- core-module: the root node to the Versatile platforms must have
- a core-module with regs and the compatible strings
- "arm,core-module-versatile", "syscon"
-
-Optional nodes:
-
-- arm,versatile-ib2-syscon : if the Versatile has an IB2 interface
- board mounted, this has a separate system controller that is
- defined in this node.
- Required properties:
- compatible = "arm,versatile-ib2-syscon", "syscon"
-
-ARM RealView Boards
--------------------
-The RealView boards cover tailored evaluation boards that are used to explore
-the ARM11 and Cortex A-8 and Cortex A-9 processors.
-
-Required properties (in root node):
- /* RealView Emulation Baseboard */
- compatible = "arm,realview-eb";
- /* RealView Platform Baseboard for ARM1176JZF-S */
- compatible = "arm,realview-pb1176";
- /* RealView Platform Baseboard for ARM11 MPCore */
- compatible = "arm,realview-pb11mp";
- /* RealView Platform Baseboard for Cortex A-8 */
- compatible = "arm,realview-pba8";
- /* RealView Platform Baseboard Explore for Cortex A-9 */
- compatible = "arm,realview-pbx";
-
-Required nodes:
-
-- soc: some node of the RealView platforms must be the SoC
- node that contain the SoC-specific devices, with the compatible
- string set to one of these tuples:
- "arm,realview-eb-soc", "simple-bus"
- "arm,realview-pb1176-soc", "simple-bus"
- "arm,realview-pb11mp-soc", "simple-bus"
- "arm,realview-pba8-soc", "simple-bus"
- "arm,realview-pbx-soc", "simple-bus"
-
-- syscon: some subnode of the RealView SoC node must be a
- system controller node pointing to the control registers,
- with the compatible string set to one of these:
- "arm,realview-eb11mp-revb-syscon", "arm,realview-eb-syscon", "syscon"
- "arm,realview-eb11mp-revc-syscon", "arm,realview-eb-syscon", "syscon"
- "arm,realview-eb-syscon", "syscon"
- "arm,realview-pb1176-syscon", "syscon"
- "arm,realview-pb11mp-syscon", "syscon"
- "arm,realview-pba8-syscon", "syscon"
- "arm,realview-pbx-syscon", "syscon"
-
- Required properties for the system controller:
- - regs: the location and size of the system controller registers,
- one range of 0x1000 bytes.
-
-Example:
-
-/dts-v1/;
-#include <dt-bindings/interrupt-controller/irq.h>
-
-/ {
- model = "ARM RealView PB1176 with device tree";
- compatible = "arm,realview-pb1176";
- #address-cells = <1>;
- #size-cells = <1>;
-
- soc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "arm,realview-pb1176-soc", "simple-bus";
- ranges;
-
- syscon: syscon@10000000 {
- compatible = "arm,realview-syscon", "syscon";
- reg = <0x10000000 0x1000>;
- };
-
- };
-};
-
-ARM Versatile Express Boards
------------------------------
-For details on the device tree bindings for ARM Versatile Express boards
-please consult the vexpress.txt file in the same directory as this file.
-
-ARM Juno Boards
-----------------
-The Juno boards are targeting development for AArch64 systems. The first
-iteration, Juno r0, is a vehicle for evaluating big.LITTLE on AArch64,
-with the second iteration, Juno r1, mainly aimed at development of PCIe
-based systems. Juno r1 also has support for AXI masters placed on the TLX
-connectors to join the coherency domain.
-
-Juno boards are described in a similar way to ARM Versatile Express boards,
-with the motherboard part of the hardware being described in a separate file
-to highlight the fact that is part of the support infrastructure for the SoC.
-Juno device tree bindings also share the Versatile Express bindings as
-described under the RS1 memory mapping.
-
-Required properties (in root node):
- compatible = "arm,juno"; /* For Juno r0 board */
- compatible = "arm,juno-r1"; /* For Juno r1 board */
- compatible = "arm,juno-r2"; /* For Juno r2 board */
-
-Required nodes:
-The description for the board must include:
- - a "psci" node describing the boot method used for the secondary CPUs.
- A detailed description of the bindings used for "psci" nodes is present
- in the psci.yaml file.
- - a "cpus" node describing the available cores and their associated
- "enable-method"s. For more details see cpus.yaml file.
-
-Example:
-
-/dts-v1/;
-/ {
- model = "ARM Juno development board (r0)";
- compatible = "arm,juno", "arm,vexpress";
- interrupt-parent = <&gic>;
- #address-cells = <2>;
- #size-cells = <2>;
-
- cpus {
- #address-cells = <2>;
- #size-cells = <0>;
-
- A57_0: cpu@0 {
- compatible = "arm,cortex-a57";
- reg = <0x0 0x0>;
- device_type = "cpu";
- enable-method = "psci";
- };
-
- .....
-
- A53_0: cpu@100 {
- compatible = "arm,cortex-a53";
- reg = <0x0 0x100>;
- device_type = "cpu";
- enable-method = "psci";
- };
-
- .....
- };
-
-};
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt
deleted file mode 100644
index e3f996920403..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Broadcom Kona Family CPU Enable Method
---------------------------------------
-This binding defines the enable method used for starting secondary
-CPUs in the following Broadcom SoCs:
- BCM11130, BCM11140, BCM11351, BCM28145, BCM28155, BCM21664
-
-The enable method is specified by defining the following required
-properties in the "cpu" device tree node:
- - enable-method = "brcm,bcm11351-cpu-method";
- - secondary-boot-reg = <...>;
-
-The secondary-boot-reg property is a u32 value that specifies the
-physical address of the register used to request the ROM holding pen
-code release a secondary CPU. The value written to the register is
-formed by encoding the target CPU id into the low bits of the
-physical start address it should jump to.
-
-Example:
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <0>;
- };
-
- cpu1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <1>;
- enable-method = "brcm,bcm11351-cpu-method";
- secondary-boot-reg = <0x3500417c>;
- };
- };
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.txt
deleted file mode 100644
index 0ff6560e6094..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Broadcom BCM11351 device tree bindings
--------------------------------------------
-
-Boards with the bcm281xx SoC family (which includes bcm11130, bcm11140,
-bcm11351, bcm28145, bcm28155 SoCs) shall have the following properties:
-
-Required root node property:
-
-compatible = "brcm,bcm11351";
-DEPRECATED: compatible = "bcm,bcm11351";
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml
new file mode 100644
index 000000000000..b5ef2666e6b2
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,bcm11351.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM11351 device tree bindings
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm28155-ap
+ - const: brcm,bcm11351
+
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.txt
deleted file mode 100644
index e0774255e1a6..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Broadcom BCM21664 device tree bindings
---------------------------------------
-
-This document describes the device tree bindings for boards with the BCM21664
-SoC.
-
-Required root node property:
- - compatible: brcm,bcm21664
-
-Example:
- / {
- model = "BCM21664 SoC";
- compatible = "brcm,bcm21664";
- [...]
- }
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml
new file mode 100644
index 000000000000..aafbd6a27708
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,bcm21664.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM21664 device tree bindings
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm21664-garnet
+ - const: brcm,bcm21664
+
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550-cpu-method.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550-cpu-method.txt
deleted file mode 100644
index a3af54c0e404..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550-cpu-method.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Broadcom Kona Family CPU Enable Method
---------------------------------------
-This binding defines the enable method used for starting secondary
-CPUs in the following Broadcom SoCs:
- BCM23550
-
-The enable method is specified by defining the following required
-properties in the "cpu" device tree node:
- - enable-method = "brcm,bcm23550";
- - secondary-boot-reg = <...>;
-
-The secondary-boot-reg property is a u32 value that specifies the
-physical address of the register used to request the ROM holding pen
-code release a secondary CPU. The value written to the register is
-formed by encoding the target CPU id into the low bits of the
-physical start address it should jump to.
-
-Example:
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <0>;
- };
-
- cpu1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <1>;
- enable-method = "brcm,bcm23550";
- secondary-boot-reg = <0x3500417c>;
- };
- };
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.txt
deleted file mode 100644
index 080baad923d6..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Broadcom BCM23550 device tree bindings
---------------------------------------
-
-This document describes the device tree bindings for boards with the BCM23550
-SoC.
-
-Required root node property:
- - compatible: brcm,bcm23550
-
-Example:
- / {
- model = "BCM23550 SoC";
- compatible = "brcm,bcm23550";
- [...]
- }
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml
new file mode 100644
index 000000000000..c4b4efd28a55
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,bcm23550.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM23550 device tree bindings
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm23550-sparrow
+ - const: brcm,bcm23550
+
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.txt
deleted file mode 100644
index 8608a776caa7..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Broadcom BCM4708 device tree bindings
--------------------------------------------
-
-Boards with the BCM4708 SoC shall have the following properties:
-
-Required root node property:
-
-bcm4708
-compatible = "brcm,bcm4708";
-
-bcm4709
-compatible = "brcm,bcm4709";
-
-bcm53012
-compatible = "brcm,bcm53012";
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml
new file mode 100644
index 000000000000..d48313c7ae45
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm4708.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,bcm4708.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM4708 device tree bindings
+
+description:
+ Broadcom BCM4708/47081/4709/47094/53012 Wi-Fi/network SoCs based
+ on the iProc architecture (Northstar).
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Hauke Mehrtens <hauke@hauke-m.de>
+ - Rafal Milecki <zajec5@gmail.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: BCM4708 based boards
+ items:
+ - enum:
+ - asus,rt-ac56u
+ - asus,rt-ac68u
+ - buffalo,wzr-1750dhp
+ - linksys,ea6300-v1
+ - linksys,ea6500-v2
+ - luxul,xap-1510v1
+ - luxul,xwc-1000
+ - netgear,r6250v1
+ - netgear,r6300v2
+ - smartrg,sr400ac
+ - brcm,bcm94708
+ - const: brcm,bcm4708
+
+ - description: BCM47081 based boards
+ items:
+ - enum:
+ - asus,rt-n18u
+ - buffalo,wzr-600dhp2
+ - buffalo,wzr-900dhp
+ - luxul,xap-1410v1
+ - luxul,xwr-1200v1
+ - tplink,archer-c5-v2
+ - const: brcm,bcm47081
+ - const: brcm,bcm4708
+
+ - description: BCM4709 based boards
+ items:
+ - enum:
+ - asus,rt-ac87u
+ - buffalo,wxr-1900dhp
+ - linksys,ea9200
+ - netgear,r7000
+ - netgear,r8000
+ - tplink,archer-c9-v1
+ - brcm,bcm94709
+ - const: brcm,bcm4709
+ - const: brcm,bcm4708
+
+ - description: BCM47094 based boards
+ items:
+ - enum:
+ - dlink,dir-885l
+ - linksys,panamera
+ - luxul,abr-4500-v1
+ - luxul,xap-1610-v1
+ - luxul,xbr-4500-v1
+ - luxul,xwc-2000-v1
+ - luxul,xwr-3100v1
+ - luxul,xwr-3150-v1
+ - netgear,r8500
+ - phicomm,k3
+ - const: brcm,bcm47094
+ - const: brcm,bcm4708
+
+ - description: BCM53012 based boards
+ items:
+ - enum:
+ - brcm,bcm953012er
+ - brcm,bcm953012hr
+ - brcm,bcm953012k
+ - const: brcm,brcm53012
+ - const: brcm,bcm4708
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.txt
deleted file mode 100644
index 4c77169bb534..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Broadcom Cygnus device tree bindings
-------------------------------------
-
-
-Boards with Cygnus SoCs shall have the following properties:
-
-Required root node property:
-
-BCM11300
-compatible = "brcm,bcm11300", "brcm,cygnus";
-
-BCM11320
-compatible = "brcm,bcm11320", "brcm,cygnus";
-
-BCM11350
-compatible = "brcm,bcm11350", "brcm,cygnus";
-
-BCM11360
-compatible = "brcm,bcm11360", "brcm,cygnus";
-
-BCM58300
-compatible = "brcm,bcm58300", "brcm,cygnus";
-
-BCM58302
-compatible = "brcm,bcm58302", "brcm,cygnus";
-
-BCM58303
-compatible = "brcm,bcm58303", "brcm,cygnus";
-
-BCM58305
-compatible = "brcm,bcm58305", "brcm,cygnus";
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml
new file mode 100644
index 000000000000..fe111e72dac3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,cygnus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Cygnus device tree bindings
+
+maintainers:
+ - Ray Jui <rjui@broadcom.com>
+ - Scott Branden <sbranden@broadcom.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm11300
+ - brcm,bcm11320
+ - brcm,bcm11350
+ - brcm,bcm11360
+ - brcm,bcm58300
+ - brcm,bcm58302
+ - brcm,bcm58303
+ - brcm,bcm58305
+ - const: brcm,cygnus
+
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.txt
deleted file mode 100644
index a124c7fc4dcd..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Broadcom Hurricane 2 device tree bindings
----------------------------------------
-
-Broadcom Hurricane 2 family of SoCs are used for switching control. These SoCs
-are based on Broadcom's iProc SoC architecture and feature a single core Cortex
-A9 ARM CPUs, DDR2/DDR3 memory, PCIe GEN-2, USB 2.0 and USB 3.0, serial and NAND
-flash and a PCIe attached integrated switching engine.
-
-Boards with Hurricane SoCs shall have the following properties:
-
-Required root node property:
-
-BCM53342
-compatible = "brcm,bcm53342", "brcm,hr2";
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml
new file mode 100644
index 000000000000..1158f49b0b83
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,hr2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Hurricane 2 device tree bindings
+
+description:
+ Broadcom Hurricane 2 family of SoCs are used for switching control. These SoCs
+ are based on Broadcom's iProc SoC architecture and feature a single core Cortex
+ A9 ARM CPUs, DDR2/DDR3 memory, PCIe GEN-2, USB 2.0 and USB 3.0, serial and NAND
+ flash and a PCIe attached integrated switching engine.
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - ubnt,unifi-switch8
+ - const: brcm,bcm53342
+ - const: brcm,hr2
+
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.txt
deleted file mode 100644
index 35f056f4a1c3..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Broadcom North Star 2 (NS2) device tree bindings
-------------------------------------------------
-
-Boards with NS2 shall have the following properties:
-
-Required root node property:
-
-NS2 SVK board
-compatible = "brcm,ns2-svk", "brcm,ns2";
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml
new file mode 100644
index 000000000000..2451704f87f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,ns2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom North Star 2 (NS2) device tree bindings
+
+maintainers:
+ - Ray Jui <rjui@broadcom.com>
+ - Scott Branden <sbranden@broadcom.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - brcm,ns2-svk
+ - brcm,ns2-xmc
+ - const: brcm,ns2
+
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,nsp-cpu-method.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,nsp-cpu-method.txt
deleted file mode 100644
index 677ef9d9f445..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,nsp-cpu-method.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Broadcom Northstar Plus SoC CPU Enable Method
----------------------------------------------
-This binding defines the enable method used for starting secondary
-CPU in the following Broadcom SoCs:
- BCM58522, BCM58525, BCM58535, BCM58622, BCM58623, BCM58625, BCM88312
-
-The enable method is specified by defining the following required
-properties in the corresponding secondary "cpu" device tree node:
- - enable-method = "brcm,bcm-nsp-smp";
- - secondary-boot-reg = <...>;
-
-The secondary-boot-reg property is a u32 value that specifies the
-physical address of the register which should hold the common
-entry point for a secondary CPU. This entry is cpu node specific
-and should be added per cpu. E.g., in case of NSP (BCM58625) which
-is a dual core CPU SoC, this entry should be added to cpu1 node.
-
-
-Example:
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- next-level-cache = <&L2>;
- reg = <0>;
- };
-
- cpu1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- next-level-cache = <&L2>;
- enable-method = "brcm,bcm-nsp-smp";
- secondary-boot-reg = <0xffff042c>;
- reg = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.txt
deleted file mode 100644
index eae53e4556be..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Broadcom Northstar Plus device tree bindings
---------------------------------------------
-
-Broadcom Northstar Plus family of SoCs are used for switching control
-and management applications as well as residential router/gateway
-applications. The SoC features dual core Cortex A9 ARM CPUs, integrating
-several peripheral interfaces including multiple Gigabit Ethernet PHYs,
-DDR3 memory, PCIE Gen-2, USB 2.0 and USB 3.0, serial and NAND flash,
-SATA and several other IO controllers.
-
-Boards with Northstar Plus SoCs shall have the following properties:
-
-Required root node property:
-
-BCM58522
-compatible = "brcm,bcm58522", "brcm,nsp";
-
-BCM58525
-compatible = "brcm,bcm58525", "brcm,nsp";
-
-BCM58535
-compatible = "brcm,bcm58535", "brcm,nsp";
-
-BCM58622
-compatible = "brcm,bcm58622", "brcm,nsp";
-
-BCM58623
-compatible = "brcm,bcm58623", "brcm,nsp";
-
-BCM58625
-compatible = "brcm,bcm58625", "brcm,nsp";
-
-BCM88312
-compatible = "brcm,bcm88312", "brcm,nsp";
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml
new file mode 100644
index 000000000000..fe364cebf57f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,nsp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Northstar Plus device tree bindings
+
+description:
+ Broadcom Northstar Plus family of SoCs are used for switching control
+ and management applications as well as residential router/gateway
+ applications. The SoC features dual core Cortex A9 ARM CPUs, integrating
+ several peripheral interfaces including multiple Gigabit Ethernet PHYs,
+ DDR3 memory, PCIE Gen-2, USB 2.0 and USB 3.0, serial and NAND flash,
+ SATA and several other IO controllers.
+
+maintainers:
+ - Ray Jui <rjui@broadcom.com>
+ - Scott Branden <sbranden@broadcom.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm58522
+ - brcm,bcm58525
+ - brcm,bcm58535
+ - brcm,bcm58622
+ - brcm,bcm58623
+ - brcm,bcm58625
+ - brcm,bcm88312
+ - const: brcm,nsp
+
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.txt
deleted file mode 100644
index 23a02178dd44..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Broadcom Stingray device tree bindings
-------------------------------------------------
-
-Boards with Stingray shall have the following properties:
-
-Required root node property:
-
-Stingray Combo SVK board
-compatible = "brcm,bcm958742k", "brcm,stingray";
-
-Stingray SST100 board
-compatible = "brcm,bcm958742t", "brcm,stingray";
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml
new file mode 100644
index 000000000000..4ad2b2124ab4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,stingray.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Stingray device tree bindings
+
+maintainers:
+ - Ray Jui <rjui@broadcom.com>
+ - Scott Branden <sbranden@broadcom.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm958742k
+ - brcm,bcm958742t
+ - brcm,bcm958802a802x
+ - const: brcm,stingray
+
+...
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.txt
deleted file mode 100644
index 223ed3471c08..000000000000
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Broadcom Vulcan device tree bindings
-------------------------------------
-
-Boards with Broadcom Vulcan shall have the following root property:
-
-Broadcom Vulcan Evaluation Board:
- compatible = "brcm,vulcan-eval", "brcm,vulcan-soc";
-
-Generic Vulcan board:
- compatible = "brcm,vulcan-soc";
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml b/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml
new file mode 100644
index 000000000000..c5b6f31c20b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/bcm/brcm,vulcan-soc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Vulcan device tree bindings
+
+maintainers:
+ - Robert Richter <rrichter@marvell.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ items:
+ - enum:
+ - brcm,vulcan-eval
+ - cavium,thunderx2-cn9900
+ - const: brcm,vulcan-soc
+
+...
diff --git a/Documentation/devicetree/bindings/arm/coresight-cti.yaml b/Documentation/devicetree/bindings/arm/coresight-cti.yaml
new file mode 100644
index 000000000000..3db3642bd532
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/coresight-cti.yaml
@@ -0,0 +1,336 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+# Copyright 2019 Linaro Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/coresight-cti.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Coresight Cross Trigger Interface (CTI) device.
+
+description: |
+ The CoreSight Embedded Cross Trigger (ECT) consists of CTI devices connected
+ to one or more CoreSight components and/or a CPU, with CTIs interconnected in
+ a star topology via the Cross Trigger Matrix (CTM), which is not programmable.
+ The ECT components are not part of the trace generation data path and are thus
+ not part of the CoreSight graph described in the general CoreSight bindings
+ file coresight.txt.
+
+ The CTI component properties define the connections between the individual
+ CTI and the components it is directly connected to, consisting of input and
+ output hardware trigger signals. CTIs can have a maximum number of input and
+ output hardware trigger signals (8 each for v1 CTI, 32 each for v2 CTI). The
+ number is defined at design time, the maximum of each defined in the DEVID
+ register.
+
+ CTIs are interconnected in a star topology via the CTM, using a number of
+ programmable channels, usually 4, but again implementation defined and
+ described in the DEVID register. The star topology is not required to be
+ described in the bindings as the actual connections are software
+ programmable.
+
+ In general the connections between CTI and components via the trigger signals
+ are implementation defined, except when the CTI is connected to an ARM v8
+ architecture core and optional ETM.
+
+ In this case the ARM v8 architecture defines the required signal connections
+ between CTI and the CPU core and ETM if present. In the case of a v8
+ architecturally connected CTI an additional compatible string is used to
+ indicate this feature (arm,coresight-cti-v8-arch).
+
+ When CTI trigger connection information is unavailable then a minimal driver
+ binding can be declared with no explicit trigger signals. This will result
+ the driver detecting the maximum available triggers and channels from the
+ DEVID register and make them all available for use as a single default
+ connection. Any user / client application will require additional information
+ on the connections between the CTI and other components for correct operation.
+ This information might be found by enabling the Integration Test registers in
+ the driver (set CONFIG_CORESIGHT_CTI_INTEGRATION_TEST in Kernel
+ configuration). These registers may be used to explore the trigger connections
+ between CTI and other CoreSight components.
+
+ Certain triggers between CoreSight devices and the CTI have specific types
+ and usages. These can be defined along with the signal indexes with the
+ constants defined in <dt-bindings/arm/coresight-cti-dt.h>
+
+ For example a CTI connected to a core will usually have a DBGREQ signal. This
+ is defined in the binding as type PE_EDBGREQ. These types will appear in an
+ optional array alongside the signal indexes. Omitting types will default all
+ signals to GEN_IO.
+
+ Note that some hardware trigger signals can be connected to non-CoreSight
+ components (e.g. UART etc) depending on hardware implementation.
+
+maintainers:
+ - Mike Leach <mike.leach@linaro.org>
+
+allOf:
+ - $ref: /schemas/arm/primecell.yaml#
+
+# Need a custom select here or 'arm,primecell' will match on lots of nodes
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - arm,coresight-cti
+ required:
+ - compatible
+
+properties:
+ $nodename:
+ pattern: "^cti(@[0-9a-f]+)$"
+ compatible:
+ oneOf:
+ - items:
+ - const: arm,coresight-cti
+ - const: arm,primecell
+ - items:
+ - const: arm,coresight-cti-v8-arch
+ - const: arm,coresight-cti
+ - const: arm,primecell
+
+ reg:
+ maxItems: 1
+
+ cpu:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Handle to cpu this device is associated with. This must appear in the
+ base cti node if compatible string arm,coresight-cti-v8-arch is used,
+ or may appear in a trig-conns child node when appropriate.
+
+ arm,cti-ctm-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Defines the CTM this CTI is connected to, in large systems with multiple
+ separate CTI/CTM nets. Typically multi-socket systems where the CTM is
+ propagated between sockets.
+
+ arm,cs-dev-assoc:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ defines a phandle reference to an associated CoreSight trace device.
+ When the associated trace device is enabled, then the respective CTI
+ will be enabled. Use in a trig-conns node, or in CTI base node when
+ compatible string arm,coresight-cti-v8-arch used. If the associated
+ device has not been registered then the node name will be stored as
+ the connection name for later resolution. If the associated device is
+ not a CoreSight device or not registered then the node name will remain
+ the connection name and automatic enabling will not occur.
+
+ # size cells and address cells required if trig-conns node present.
+ "#size-cells":
+ const: 0
+
+ "#address-cells":
+ const: 1
+
+patternProperties:
+ '^trig-conns@([0-9]+)$':
+ type: object
+ description:
+ A trigger connections child node which describes the trigger signals
+ between this CTI and another hardware device. This device may be a CPU,
+ CoreSight device, any other hardware device or simple external IO lines.
+ The connection may have both input and output triggers, or only one or the
+ other.
+
+ properties:
+ reg:
+ maxItems: 1
+
+ arm,trig-in-sigs:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 32
+ description:
+ List of CTI trigger in signal numbers in use by a trig-conns node.
+
+ arm,trig-in-types:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 32
+ description:
+ List of constants representing the types for the CTI trigger in
+ signals. Types in this array match to the corresponding signal in the
+ arm,trig-in-sigs array. If the -types array is smaller, or omitted
+ completely, then the types will default to GEN_IO.
+
+ arm,trig-out-sigs:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 32
+ description:
+ List of CTI trigger out signal numbers in use by a trig-conns node.
+
+ arm,trig-out-types:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 32
+ description:
+ List of constants representing the types for the CTI trigger out
+ signals. Types in this array match to the corresponding signal
+ in the arm,trig-out-sigs array. If the "-types" array is smaller,
+ or omitted completely, then the types will default to GEN_IO.
+
+ arm,trig-filters:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 32
+ description:
+ List of CTI trigger out signals that will be blocked from becoming
+ active, unless filtering is disabled on the driver.
+
+ arm,trig-conn-name:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string
+ description:
+ Defines a connection name that will be displayed, if the cpu or
+ arm,cs-dev-assoc properties are not being used in this connection.
+ Principle use for CTI that are connected to non-CoreSight devices, or
+ external IO.
+
+ anyOf:
+ - required:
+ - arm,trig-in-sigs
+ - required:
+ - arm,trig-out-sigs
+ oneOf:
+ - required:
+ - arm,trig-conn-name
+ - required:
+ - cpu
+ - required:
+ - arm,cs-dev-assoc
+ required:
+ - reg
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: arm,coresight-cti-v8-arch
+
+then:
+ required:
+ - cpu
+
+examples:
+ # minimum CTI definition. DEVID register used to set number of triggers.
+ - |
+ cti@20020000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x20020000 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ };
+ # v8 architecturally defined CTI - CPU + ETM connections generated by the
+ # driver according to the v8 architecture specification.
+ - |
+ cti@859000 {
+ compatible = "arm,coresight-cti-v8-arch", "arm,coresight-cti",
+ "arm,primecell";
+ reg = <0x859000 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU1>;
+ arm,cs-dev-assoc = <&etm1>;
+ };
+ # Implementation defined CTI - CPU + ETM connections explicitly defined..
+ # Shows use of type constants from dt-bindings/arm/coresight-cti-dt.h
+ # #size-cells and #address-cells are required if trig-conns@ nodes present.
+ - |
+ #include <dt-bindings/arm/coresight-cti-dt.h>
+
+ cti@858000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x858000 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+
+ arm,cti-ctm-id = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ trig-conns@0 {
+ reg = <0>;
+ arm,trig-in-sigs = <4 5 6 7>;
+ arm,trig-in-types = <ETM_EXTOUT
+ ETM_EXTOUT
+ ETM_EXTOUT
+ ETM_EXTOUT>;
+ arm,trig-out-sigs = <4 5 6 7>;
+ arm,trig-out-types = <ETM_EXTIN
+ ETM_EXTIN
+ ETM_EXTIN
+ ETM_EXTIN>;
+ arm,cs-dev-assoc = <&etm0>;
+ };
+
+ trig-conns@1 {
+ reg = <1>;
+ cpu = <&CPU0>;
+ arm,trig-in-sigs = <0 1>;
+ arm,trig-in-types = <PE_DBGTRIGGER
+ PE_PMUIRQ>;
+ arm,trig-out-sigs=<0 1 2 >;
+ arm,trig-out-types = <PE_EDBGREQ
+ PE_DBGRESTART
+ PE_CTIIRQ>;
+
+ arm,trig-filters = <0>;
+ };
+ };
+ # Implementation defined CTI - non CoreSight component connections.
+ - |
+ cti@20110000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0 0x20110000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ trig-conns@0 {
+ reg = <0>;
+ arm,trig-in-sigs=<0>;
+ arm,trig-in-types=<GEN_INTREQ>;
+ arm,trig-out-sigs=<0>;
+ arm,trig-out-types=<GEN_HALTREQ>;
+ arm,trig-conn-name = "sys_profiler";
+ };
+
+ trig-conns@1 {
+ reg = <1>;
+ arm,trig-out-sigs=<2 3>;
+ arm,trig-out-types=<GEN_HALTREQ GEN_RESTARTREQ>;
+ arm,trig-conn-name = "watchdog";
+ };
+
+ trig-conns@2 {
+ reg = <2>;
+ arm,trig-in-sigs=<1 6>;
+ arm,trig-in-types=<GEN_HALTREQ GEN_RESTARTREQ>;
+ arm,trig-conn-name = "g_counter";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index d02c42d21f2f..846f6daae71b 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -45,6 +45,10 @@ its hardware characteristcs.
- Coresight Address Translation Unit (CATU)
"arm,coresight-catu", "arm,primecell";
+ - Coresight Cross Trigger Interface (CTI):
+ "arm,coresight-cti", "arm,primecell";
+ See coresight-cti.yaml for full CTI definitions.
+
* reg: physical base address and length of the register
set(s) of the component.
@@ -72,6 +76,9 @@ its hardware characteristcs.
* reg-names: the only acceptable values are "stm-base" and
"stm-stimulus-base", each corresponding to the areas defined in "reg".
+* Required properties for Coresight Cross Trigger Interface (CTI)
+ See coresight-cti.yaml for full CTI definitions.
+
* Required properties for devices that don't show up on the AMBA bus, such as
non-configurable replicators and non-configurable funnels:
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 0d5b61056b10..a01814765ddb 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -123,11 +123,18 @@ properties:
- arm,cortex-a12
- arm,cortex-a15
- arm,cortex-a17
+ - arm,cortex-a32
+ - arm,cortex-a34
+ - arm,cortex-a35
- arm,cortex-a53
- arm,cortex-a55
- arm,cortex-a57
+ - arm,cortex-a65
- arm,cortex-a72
- arm,cortex-a73
+ - arm,cortex-a75
+ - arm,cortex-a76
+ - arm,cortex-a77
- arm,cortex-m0
- arm,cortex-m0+
- arm,cortex-m1
@@ -136,6 +143,8 @@ properties:
- arm,cortex-r4
- arm,cortex-r5
- arm,cortex-r7
+ - arm,neoverse-e1
+ - arm,neoverse-n1
- brcm,brahma-b15
- brcm,brahma-b53
- brcm,vulcan
@@ -155,6 +164,8 @@ properties:
- nvidia,tegra194-carmel
- qcom,krait
- qcom,kryo
+ - qcom,kryo260
+ - qcom,kryo280
- qcom,kryo385
- qcom,kryo485
- qcom,scorpion
@@ -201,6 +212,8 @@ properties:
- rockchip,rk3066-smp
- socionext,milbeaut-m10v-smp
- ste,dbx500-smp
+ - ti,am3352
+ - ti,am4372
cpu-release-addr:
$ref: '/schemas/types.yaml#/definitions/uint64'
@@ -287,6 +300,39 @@ properties:
While optional, it is the preferred way to get access to
the cpu-core power-domains.
+ secondary-boot-reg:
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: |
+ Required for systems that have an "enable-method" property value of
+ "brcm,bcm11351-cpu-method", "brcm,bcm23550" or "brcm,bcm-nsp-smp".
+
+ This includes the following SoCs: |
+ BCM11130, BCM11140, BCM11351, BCM28145, BCM28155, BCM21664, BCM23550
+ BCM58522, BCM58525, BCM58535, BCM58622, BCM58623, BCM58625, BCM88312
+
+ The secondary-boot-reg property is a u32 value that specifies the
+ physical address of the register used to request the ROM holding pen
+ code release a secondary CPU. The value written to the register is
+ formed by encoding the target CPU id into the low bits of the
+ physical start address it should jump to.
+
+if:
+ # If the enable-method property contains one of those values
+ properties:
+ enable-method:
+ contains:
+ enum:
+ - brcm,bcm11351-cpu-method
+ - brcm,bcm23550
+ - brcm,bcm-nsp-smp
+ # and if enable-method is present
+ required:
+ - enable-method
+
+then:
+ required:
+ - secondary-boot-reg
+
required:
- device_type
- reg
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
index e07735a8c2c7..8cac6fafaab0 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
@@ -164,7 +164,7 @@ Required properties:
- compatible: should be:
"fsl,imx8qxp-sc-key"
followed by "fsl,imx-sc-key";
-- linux,keycodes: See Documentation/devicetree/bindings/input/keys.txt
+- linux,keycodes: See Documentation/devicetree/bindings/input/input.yaml
Example (imx8qxp):
-------------
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 0e17e1f6fb80..cd3fbe7e3948 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -119,6 +119,10 @@ properties:
- fsl,imx6q-sabreauto
- fsl,imx6q-sabrelite
- fsl,imx6q-sabresd
+ - technexion,imx6q-pico-dwarf # TechNexion i.MX6Q Pico-Dwarf
+ - technexion,imx6q-pico-hobbit # TechNexion i.MX6Q Pico-Hobbit
+ - technexion,imx6q-pico-nymph # TechNexion i.MX6Q Pico-Nymph
+ - technexion,imx6q-pico-pi # TechNexion i.MX6Q Pico-Pi
- technologic,imx6q-ts4900
- technologic,imx6q-ts7970
- toradex,apalis_imx6q # Apalis iMX6 Module
@@ -166,6 +170,10 @@ properties:
- emtrion,emcon-mx6-avari # emCON-MX6S or emCON-MX6DL SoM on Avari Base
- fsl,imx6dl-sabreauto # i.MX6 DualLite/Solo SABRE Automotive Board
- fsl,imx6dl-sabresd # i.MX6 DualLite SABRE Smart Device Board
+ - technexion,imx6dl-pico-dwarf # TechNexion i.MX6DL Pico-Dwarf
+ - technexion,imx6dl-pico-hobbit # TechNexion i.MX6DL Pico-Hobbit
+ - technexion,imx6dl-pico-nymph # TechNexion i.MX6DL Pico-Nymph
+ - technexion,imx6dl-pico-pi # TechNexion i.MX6DL Pico-Pi
- technologic,imx6dl-ts4900
- technologic,imx6dl-ts7970
- toradex,colibri_imx6dl # Colibri iMX6 Module
@@ -225,6 +233,9 @@ properties:
- fsl,imx6ul-14x14-evk # i.MX6 UltraLite 14x14 EVK Board
- kontron,imx6ul-n6310-som # Kontron N6310 SOM
- kontron,imx6ul-n6311-som # Kontron N6311 SOM
+ - technexion,imx6ul-pico-dwarf # TechNexion i.MX6UL Pico-Dwarf
+ - technexion,imx6ul-pico-hobbit # TechNexion i.MX6UL Pico-Hobbit
+ - technexion,imx6ul-pico-pi # TechNexion i.MX6UL Pico-Pi
- const: fsl,imx6ul
- description: Kontron N6310 S Board
@@ -274,6 +285,7 @@ properties:
items:
- enum:
- toradex,colibri-imx7s # Colibri iMX7 Solo Module
+ - toradex,colibri-imx7s-aster # Colibri iMX7 Solo Module on Aster Carrier Board
- toradex,colibri-imx7s-eval-v3 # Colibri iMX7 Solo Module on Colibri Evaluation Board V3
- tq,imx7s-mba7 # i.MX7S TQ MBa7 with TQMa7S SoM
- const: fsl,imx7s
@@ -284,8 +296,14 @@ properties:
- fsl,imx7d-sdb # i.MX7 SabreSD Board
- fsl,imx7d-sdb-reva # i.MX7 SabreSD Rev-A Board
- novtech,imx7d-meerkat96 # i.MX7 Meerkat96 Board
+ - technexion,imx7d-pico-dwarf # TechNexion i.MX7D Pico-Dwarf
+ - technexion,imx7d-pico-hobbit # TechNexion i.MX7D Pico-Hobbit
+ - technexion,imx7d-pico-nymph # TechNexion i.MX7D Pico-Nymph
+ - technexion,imx7d-pico-pi # TechNexion i.MX7D Pico-Pi
- toradex,colibri-imx7d # Colibri iMX7 Dual Module
+ - toradex,colibri-imx7d-aster # Colibri iMX7 Dual Module on Aster Carrier Board
- toradex,colibri-imx7d-emmc # Colibri iMX7 Dual 1GB (eMMC) Module
+ - toradex,colibri-imx7d-emmc-aster # Colibri iMX7 Dual 1GB (eMMC) Module on Aster Carrier Board
- toradex,colibri-imx7d-emmc-eval-v3 # Colibri iMX7 Dual 1GB (eMMC) Module on Colibri Evaluation Board V3
- toradex,colibri-imx7d-eval-v3 # Colibri iMX7 Dual Module on Colibri Evaluation Board V3
- tq,imx7d-mba7 # i.MX7D TQ MBa7 with TQMa7D SoM
@@ -324,6 +342,12 @@ properties:
- fsl,imx8mn-evk # i.MX8MN LPDDR4 EVK Board
- const: fsl,imx8mn
+ - description: i.MX8MP based Boards
+ items:
+ - enum:
+ - fsl,imx8mp-evk # i.MX8MP EVK Board
+ - const: fsl,imx8mp
+
- description: i.MX8MQ based Boards
items:
- enum:
@@ -395,6 +419,51 @@ properties:
- fsl,ls1021a-twr
- const: fsl,ls1021a
+ - description: LS1028A based Boards
+ items:
+ - enum:
+ - fsl,ls1028a-qds
+ - fsl,ls1028a-rdb
+ - const: fsl,ls1028a
+
+ - description: Kontron KBox A-230-LS
+ items:
+ - const: kontron,kbox-a-230-ls
+ - const: kontron,sl28-var4
+ - const: kontron,sl28
+ - const: fsl,ls1028a
+ - description:
+ Kontron SMARC-sAL28 board on the SMARC Eval Carrier 2.0
+ items:
+ - enum:
+ - kontron,sl28-var2-ads2
+ - kontron,sl28-var3-ads2
+ - kontron,sl28-var4-ads2
+ - enum:
+ - kontron,sl28-var2
+ - kontron,sl28-var3
+ - kontron,sl28-var4
+ - const: kontron,sl28
+ - const: fsl,ls1028a
+
+ - description:
+ Kontron SMARC-sAL28 board (on a generic/undefined carrier)
+ items:
+ - enum:
+ - kontron,sl28-var2
+ - kontron,sl28-var3
+ - kontron,sl28-var4
+ - const: kontron,sl28
+ - const: fsl,ls1028a
+
+ - description:
+ Kontron SMARC-sAL28 board (base). This is used in the base device
+ tree which is compatible with the overlays provided by the
+ vendor.
+ items:
+ - const: kontron,sl28
+ - const: fsl,ls1028a
+
- description: LS1043A based Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.yaml b/Documentation/devicetree/bindings/arm/l2c2x0.yaml
index 913a8cd8b2c0..5d1d50eea26e 100644
--- a/Documentation/devicetree/bindings/arm/l2c2x0.yaml
+++ b/Documentation/devicetree/bindings/arm/l2c2x0.yaml
@@ -29,27 +29,30 @@ allOf:
properties:
compatible:
- enum:
- - arm,pl310-cache
- - arm,l220-cache
- - arm,l210-cache
- # DEPRECATED by "brcm,bcm11351-a2-pl310-cache"
- - bcm,bcm11351-a2-pl310-cache
- # For Broadcom bcm11351 chipset where an
- # offset needs to be added to the address before passing down to the L2
- # cache controller
- - brcm,bcm11351-a2-pl310-cache
- # Marvell Controller designed to be
- # compatible with the ARM one, with system cache mode (meaning
- # maintenance operations on L1 are broadcasted to the L2 and L2
- # performs the same operation).
- - marvell,aurora-system-cache
- # Marvell Controller designed to be
- # compatible with the ARM one with outer cache mode.
- - marvell,aurora-outer-cache
- # Marvell Tauros3 cache controller, compatible
- # with arm,pl310-cache controller.
- - marvell,tauros3-cache
+ oneOf:
+ - enum:
+ - arm,pl310-cache
+ - arm,l220-cache
+ - arm,l210-cache
+ # DEPRECATED by "brcm,bcm11351-a2-pl310-cache"
+ - bcm,bcm11351-a2-pl310-cache
+ # For Broadcom bcm11351 chipset where an
+ # offset needs to be added to the address before passing down to the L2
+ # cache controller
+ - brcm,bcm11351-a2-pl310-cache
+ # Marvell Controller designed to be
+ # compatible with the ARM one, with system cache mode (meaning
+ # maintenance operations on L1 are broadcasted to the L2 and L2
+ # performs the same operation).
+ - marvell,aurora-system-cache
+ # Marvell Controller designed to be
+ # compatible with the ARM one with outer cache mode.
+ - marvell,aurora-outer-cache
+ - items:
+ # Marvell Tauros3 cache controller, compatible
+ # with arm,pl310-cache controller.
+ - const: marvell,tauros3-cache
+ - const: arm,pl310-cache
cache-level:
const: 2
diff --git a/Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml b/Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml
index 818dfe6de512..3235ec9e9bad 100644
--- a/Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml
+++ b/Documentation/devicetree/bindings/arm/mrvl/mrvl.yaml
@@ -28,8 +28,11 @@ properties:
items:
- enum:
- mrvl,mmp2-brownstone
+ - olpc,xo-1.75
- const: mrvl,mmp2
- description: MMP3 based boards
items:
- - const: mrvl,mmp3
+ - enum:
+ - dell,wyse-ariel
+ - const: marvell,mmp3
...
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
index 79902f470e4b..c3a8604dfa80 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
@@ -43,6 +43,8 @@ required:
- reg-names
- interrupts
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/Documentation/devicetree/bindings/arm/pmu.yaml b/Documentation/devicetree/bindings/arm/pmu.yaml
index 52ae094ce330..97df36d301c9 100644
--- a/Documentation/devicetree/bindings/arm/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/pmu.yaml
@@ -20,27 +20,36 @@ properties:
items:
- enum:
- apm,potenza-pmu
- - arm,armv8-pmuv3
- - arm,cortex-a73-pmu
- - arm,cortex-a72-pmu
- - arm,cortex-a57-pmu
- - arm,cortex-a53-pmu
- - arm,cortex-a35-pmu
- - arm,cortex-a17-pmu
- - arm,cortex-a15-pmu
- - arm,cortex-a12-pmu
- - arm,cortex-a9-pmu
- - arm,cortex-a8-pmu
- - arm,cortex-a7-pmu
- - arm,cortex-a5-pmu
- - arm,arm11mpcore-pmu
- - arm,arm1176-pmu
+ - arm,armv8-pmuv3 # Only for s/w models
- arm,arm1136-pmu
+ - arm,arm1176-pmu
+ - arm,arm11mpcore-pmu
+ - arm,cortex-a5-pmu
+ - arm,cortex-a7-pmu
+ - arm,cortex-a8-pmu
+ - arm,cortex-a9-pmu
+ - arm,cortex-a12-pmu
+ - arm,cortex-a15-pmu
+ - arm,cortex-a17-pmu
+ - arm,cortex-a32-pmu
+ - arm,cortex-a34-pmu
+ - arm,cortex-a35-pmu
+ - arm,cortex-a53-pmu
+ - arm,cortex-a55-pmu
+ - arm,cortex-a57-pmu
+ - arm,cortex-a65-pmu
+ - arm,cortex-a72-pmu
+ - arm,cortex-a73-pmu
+ - arm,cortex-a75-pmu
+ - arm,cortex-a76-pmu
+ - arm,cortex-a77-pmu
+ - arm,neoverse-e1-pmu
+ - arm,neoverse-n1-pmu
- brcm,vulcan-pmu
- cavium,thunder-pmu
+ - qcom,krait-pmu
- qcom,scorpion-pmu
- qcom,scorpion-mp-pmu
- - qcom,krait-pmu
interrupts:
# Don't know how many CPUs, so no constraints to specify
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index 5e66934455bb..9247b58c26fc 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -32,6 +32,9 @@ description: |+
http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
properties:
+ $nodename:
+ const: psci
+
compatible:
oneOf:
- description:
@@ -141,6 +144,8 @@ allOf:
- cpu_off
- cpu_on
+additionalProperties: false
+
examples:
- |+
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index 5976c0b16b65..64ddae3bd39f 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -28,6 +28,7 @@ description: |
apq8074
apq8084
apq8096
+ ipq6018
ipq8074
mdm9615
msm8916
@@ -41,6 +42,7 @@ description: |
The 'board' element must be one of the following strings:
cdp
+ cp01-c1
dragonboard
hk01
idp
@@ -150,4 +152,10 @@ properties:
- enum:
- qcom,sc7180-idp
- const: qcom,sc7180
+
+ - items:
+ - enum:
+ - qcom,ipq6018-cp01-c1
+ - const: qcom,ipq6018
+
...
diff --git a/Documentation/devicetree/bindings/arm/renesas,prr.yaml b/Documentation/devicetree/bindings/arm/renesas,prr.yaml
index 7f8d17f33983..dd087643a9f8 100644
--- a/Documentation/devicetree/bindings/arm/renesas,prr.yaml
+++ b/Documentation/devicetree/bindings/arm/renesas,prr.yaml
@@ -27,6 +27,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
prr: chipid@ff000044 {
diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml
index 9436124c5809..611094d9186b 100644
--- a/Documentation/devicetree/bindings/arm/renesas.yaml
+++ b/Documentation/devicetree/bindings/arm/renesas.yaml
@@ -208,6 +208,7 @@ properties:
- description: R-Car M3-W+ (R8A77961)
items:
- enum:
+ - renesas,m3ulcb # M3ULCB (R-Car Starter Kit Pro, RTP8J77961ASKB0SK0SA05A (M3 ES3.0))
- renesas,salvator-xs # Salvator-XS (Salvator-X 2nd version, RTP0RC7796SIPB0012SA5A)
- const: renesas,r8a77961
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index 874b0eaa2a75..715586dea9bb 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -402,6 +402,11 @@ properties:
- const: phytec,rk3288-phycore-som
- const: rockchip,rk3288
+ - description: Pine64 PinebookPro
+ items:
+ - const: pine64,pinebook-pro
+ - const: rockchip,rk3399
+
- description: Pine64 Rock64
items:
- const: pine64,rock64
@@ -443,7 +448,7 @@ properties:
- description: Rockchip Kylin
items:
- - const: rockchip,kylin-rk3036
+ - const: rockchip,rk3036-kylin
- const: rockchip,rk3036
- description: Rockchip PX3 Evaluation board
@@ -468,6 +473,11 @@ properties:
- const: rockchip,r88
- const: rockchip,rk3368
+ - description: Rockchip RK3036 Evaluation board
+ items:
+ - const: rockchip,rk3036-evb
+ - const: rockchip,rk3036
+
- description: Rockchip RK3228 Evaluation board
items:
- const: rockchip,rk3228-evb
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
index afcd70803c12..0425d333b50d 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
@@ -30,6 +30,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
chipid@10000000 {
diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.yaml b/Documentation/devicetree/bindings/arm/samsung/pmu.yaml
index 73b56fc5bf58..c9651892710e 100644
--- a/Documentation/devicetree/bindings/arm/samsung/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/samsung/pmu.yaml
@@ -89,6 +89,8 @@ required:
- clock-names
- clocks
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/exynos5250.h>
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml b/Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml
index 51d23b6f8a94..3d9abad3c749 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml
@@ -23,6 +23,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
firmware@203f000 {
diff --git a/Documentation/devicetree/bindings/arm/socionext/cache-uniphier.txt b/Documentation/devicetree/bindings/arm/socionext/cache-uniphier.txt
deleted file mode 100644
index d27a646f48a9..000000000000
--- a/Documentation/devicetree/bindings/arm/socionext/cache-uniphier.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-UniPhier outer cache controller
-
-UniPhier SoCs are integrated with a full-custom outer cache controller system.
-All of them have a level 2 cache controller, and some have a level 3 cache
-controller as well.
-
-Required properties:
-- compatible: should be "socionext,uniphier-system-cache"
-- reg: offsets and lengths of the register sets for the device. It should
- contain 3 regions: control register, revision register, operation register,
- in this order.
-- cache-unified: specifies the cache is a unified cache.
-- cache-size: specifies the size in bytes of the cache
-- cache-sets: specifies the number of associativity sets of the cache
-- cache-line-size: specifies the line size in bytes
-- cache-level: specifies the level in the cache hierarchy. The value should
- be 2 for L2 cache, 3 for L3 cache, etc.
-
-Optional properties:
-- next-level-cache: phandle to the next level cache if present. The next level
- cache should be also compatible with "socionext,uniphier-system-cache".
-
-The L2 cache must exist to use the L3 cache; the cache hierarchy must be
-indicated correctly with "next-level-cache" properties.
-
-Example 1 (system with L2):
- l2: l2-cache@500c0000 {
- compatible = "socionext,uniphier-system-cache";
- reg = <0x500c0000 0x2000>, <0x503c0100 0x4>,
- <0x506c0000 0x400>;
- cache-unified;
- cache-size = <0x80000>;
- cache-sets = <256>;
- cache-line-size = <128>;
- cache-level = <2>;
- };
-
-Example 2 (system with L2 and L3):
- l2: l2-cache@500c0000 {
- compatible = "socionext,uniphier-system-cache";
- reg = <0x500c0000 0x2000>, <0x503c0100 0x8>,
- <0x506c0000 0x400>;
- cache-unified;
- cache-size = <0x200000>;
- cache-sets = <512>;
- cache-line-size = <128>;
- cache-level = <2>;
- next-level-cache = <&l3>;
- };
-
- l3: l3-cache@500c8000 {
- compatible = "socionext,uniphier-system-cache";
- reg = <0x500c8000 0x2000>, <0x503c8100 0x8>,
- <0x506c8000 0x400>;
- cache-unified;
- cache-size = <0x400000>;
- cache-sets = <512>;
- cache-line-size = <256>;
- cache-level = <3>;
- };
diff --git a/Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml b/Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml
new file mode 100644
index 000000000000..2e765bb3e6f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/socionext/socionext,uniphier-system-cache.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/socionext/socionext,uniphier-system-cache.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier outer cache controller
+
+description: |
+ UniPhier ARM 32-bit SoCs are integrated with a full-custom outer cache
+ controller system. All of them have a level 2 cache controller, and some
+ have a level 3 cache controller as well.
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ compatible:
+ const: socionext,uniphier-system-cache
+
+ reg:
+ description: |
+ should contain 3 regions: control register, revision register,
+ operation register, in this order.
+ minItems: 3
+ maxItems: 3
+
+ interrupts:
+ description: |
+ Interrupts can be used to notify the completion of cache operations.
+ The number of interrupts should match to the number of CPU cores.
+ The specified interrupts correspond to CPU0, CPU1, ... in this order.
+ minItems: 1
+ maxItems: 4
+
+ cache-unified: true
+
+ cache-size: true
+
+ cache-sets: true
+
+ cache-line-size: true
+
+ cache-level:
+ minimum: 2
+ maximum: 3
+
+ next-level-cache: true
+
+allOf:
+ - $ref: /schemas/cache-controller.yaml#
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - cache-unified
+ - cache-size
+ - cache-sets
+ - cache-line-size
+ - cache-level
+
+examples:
+ - |
+ // System with L2.
+ cache-controller@500c0000 {
+ compatible = "socionext,uniphier-system-cache";
+ reg = <0x500c0000 0x2000>, <0x503c0100 0x4>, <0x506c0000 0x400>;
+ interrupts = <0 174 4>, <0 175 4>, <0 190 4>, <0 191 4>;
+ cache-unified;
+ cache-size = <0x140000>;
+ cache-sets = <512>;
+ cache-line-size = <128>;
+ cache-level = <2>;
+ };
+ - |
+ // System with L2 and L3.
+ // L2 should specify the next level cache by 'next-level-cache'.
+ l2: cache-controller@500c0000 {
+ compatible = "socionext,uniphier-system-cache";
+ reg = <0x500c0000 0x2000>, <0x503c0100 0x8>, <0x506c0000 0x400>;
+ interrupts = <0 190 4>, <0 191 4>;
+ cache-unified;
+ cache-size = <0x200000>;
+ cache-sets = <512>;
+ cache-line-size = <128>;
+ cache-level = <2>;
+ next-level-cache = <&l3>;
+ };
+
+ l3: cache-controller@500c8000 {
+ compatible = "socionext,uniphier-system-cache";
+ reg = <0x500c8000 0x2000>, <0x503c8100 0x8>, <0x506c8000 0x400>;
+ interrupts = <0 174 4>, <0 175 4>;
+ cache-unified;
+ cache-size = <0x200000>;
+ cache-sets = <512>;
+ cache-line-size = <256>;
+ cache-level = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/socionext/uniphier.txt b/Documentation/devicetree/bindings/arm/socionext/uniphier.txt
deleted file mode 100644
index b3ed1033740e..000000000000
--- a/Documentation/devicetree/bindings/arm/socionext/uniphier.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-Socionext UniPhier SoC family
------------------------------
-
-Required properties in the root node:
- - compatible: should contain board and SoC compatible strings
-
-SoC and board compatible strings:
- (sorted chronologically)
-
- - LD4 SoC: "socionext,uniphier-ld4"
- - Reference Board: "socionext,uniphier-ld4-ref"
-
- - Pro4 SoC: "socionext,uniphier-pro4"
- - Reference Board: "socionext,uniphier-pro4-ref"
- - Ace Board: "socionext,uniphier-pro4-ace"
- - Sanji Board: "socionext,uniphier-pro4-sanji"
-
- - sLD8 SoC: "socionext,uniphier-sld8"
- - Reference Board: "socionext,uniphier-sld8-ref"
-
- - PXs2 SoC: "socionext,uniphier-pxs2"
- - Gentil Board: "socionext,uniphier-pxs2-gentil"
- - Vodka Board: "socionext,uniphier-pxs2-vodka"
-
- - LD6b SoC: "socionext,uniphier-ld6b"
- - Reference Board: "socionext,uniphier-ld6b-ref"
-
- - LD11 SoC: "socionext,uniphier-ld11"
- - Reference Board: "socionext,uniphier-ld11-ref"
- - Global Board: "socionext,uniphier-ld11-global"
-
- - LD20 SoC: "socionext,uniphier-ld20"
- - Reference Board: "socionext,uniphier-ld20-ref"
- - Global Board: "socionext,uniphier-ld20-global"
-
- - PXs3 SoC: "socionext,uniphier-pxs3"
- - Reference Board: "socionext,uniphier-pxs3-ref"
-
-Example:
-
-/dts-v1/;
-
-/ {
- compatible = "socionext,uniphier-ld20-ref", "socionext,uniphier-ld20";
-
- ...
-};
diff --git a/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml b/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml
new file mode 100644
index 000000000000..65ad6d8a3c99
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/socionext/uniphier.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier platform device tree bindings
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ $nodename:
+ const: /
+ compatible:
+ oneOf:
+ - description: LD4 SoC boards
+ items:
+ - enum:
+ - socionext,uniphier-ld4-ref
+ - const: socionext,uniphier-ld4
+ - description: Pro4 SoC boards
+ items:
+ - enum:
+ - socionext,uniphier-pro4-ace
+ - socionext,uniphier-pro4-ref
+ - socionext,uniphier-pro4-sanji
+ - const: socionext,uniphier-pro4
+ - description: sLD8 SoC boards
+ items:
+ - enum:
+ - socionext,uniphier-sld8-ref
+ - const: socionext,uniphier-sld8
+ - description: PXs2 SoC boards
+ items:
+ - enum:
+ - socionext,uniphier-pxs2-gentil
+ - socionext,uniphier-pxs2-vodka
+ - const: socionext,uniphier-pxs2
+ - description: LD6b SoC boards
+ items:
+ - enum:
+ - socionext,uniphier-ld6b-ref
+ - const: socionext,uniphier-ld6b
+ - description: LD11 SoC boards
+ items:
+ - enum:
+ - socionext,uniphier-ld11-global
+ - socionext,uniphier-ld11-ref
+ - const: socionext,uniphier-ld11
+ - description: LD20 SoC boards
+ items:
+ - enum:
+ - socionext,uniphier-ld20-global
+ - socionext,uniphier-ld20-ref
+ - const: socionext,uniphier-ld20
+ - description: PXs3 SoC boards
+ items:
+ - enum:
+ - socionext,uniphier-pxs3-ref
+ - const: socionext,uniphier-pxs3
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
index 0dedf94c8578..baff80197d5a 100644
--- a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
@@ -29,6 +29,8 @@ required:
- reg
- clocks
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/stm32mp1-clks.h>
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index 327ce6730823..abf2d97fb7ae 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -394,6 +394,12 @@ properties:
- const: linksprite,pcduino3-nano
- const: allwinner,sun7i-a20
+ - description: Linutronix Testbox v2
+ items:
+ - const: linutronix,testbox-v2
+ - const: lamobo,lamobo-r1
+ - const: allwinner,sun7i-a20
+
- description: HAOYU Electronics Marsboard A10
items:
- const: haoyu,a10-marsboard
@@ -636,6 +642,21 @@ properties:
- const: pine64,pinebook
- const: allwinner,sun50i-a64
+ - description: Pine64 PinePhone Developer Batch (1.0)
+ items:
+ - const: pine64,pinephone-1.0
+ - const: allwinner,sun50i-a64
+
+ - description: Pine64 PinePhone Braveheart (1.1)
+ items:
+ - const: pine64,pinephone-1.1
+ - const: allwinner,sun50i-a64
+
+ - description: Pine64 PineTab
+ items:
+ - const: pine64,pinetab
+ - const: allwinner,sun50i-a64
+
- description: Pine64 SoPine Baseboard
items:
- const: pine64,sopine-baseboard
@@ -647,6 +668,11 @@ properties:
- const: pineriver,mini-xplus
- const: allwinner,sun4i-a10
+ - description: PocketBook Touch Lux 3
+ items:
+ - const: pocketbook,touch-lux-3
+ - const: allwinner,sun5i-a13
+
- description: Point of View Protab2-IPS9
items:
- const: pov,protab2-ips9
diff --git a/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml b/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml
index 9370e64992dd..aa0738b4d534 100644
--- a/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi/allwinner,sun4i-a10-mbus.yaml
@@ -30,6 +30,7 @@ properties:
enum:
- allwinner,sun5i-a13-mbus
- allwinner,sun8i-h3-mbus
+ - allwinner,sun50i-a64-mbus
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt
deleted file mode 100644
index cb12f33a247f..000000000000
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt
+++ /dev/null
@@ -1,300 +0,0 @@
-NVIDIA Tegra Power Management Controller (PMC)
-
-== Power Management Controller Node ==
-
-The PMC block interacts with an external Power Management Unit. The PMC
-mostly controls the entry and exit of the system from different sleep
-modes. It provides power-gating controllers for SoC and CPU power-islands.
-
-Required properties:
-- name : Should be pmc
-- compatible : Should contain one of the following:
- For Tegra20 must contain "nvidia,tegra20-pmc".
- For Tegra30 must contain "nvidia,tegra30-pmc".
- For Tegra114 must contain "nvidia,tegra114-pmc"
- For Tegra124 must contain "nvidia,tegra124-pmc"
- For Tegra132 must contain "nvidia,tegra124-pmc"
- For Tegra210 must contain "nvidia,tegra210-pmc"
-- reg : Offset and length of the register set for the device
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- "pclk" (The Tegra clock of that name),
- "clk32k_in" (The 32KHz clock input to Tegra).
-
-Optional properties:
-- nvidia,invert-interrupt : If present, inverts the PMU interrupt signal.
- The PMU is an external Power Management Unit, whose interrupt output
- signal is fed into the PMC. This signal is optionally inverted, and then
- fed into the ARM GIC. The PMC is not involved in the detection or
- handling of this interrupt signal, merely its inversion.
-- nvidia,suspend-mode : The suspend mode that the platform should use.
- Valid values are 0, 1 and 2:
- 0 (LP0): CPU + Core voltage off and DRAM in self-refresh
- 1 (LP1): CPU voltage off and DRAM in self-refresh
- 2 (LP2): CPU voltage off
-- nvidia,core-power-req-active-high : Boolean, core power request active-high
-- nvidia,sys-clock-req-active-high : Boolean, system clock request active-high
-- nvidia,combined-power-req : Boolean, combined power request for CPU & Core
-- nvidia,cpu-pwr-good-en : Boolean, CPU power good signal (from PMIC to PMC)
- is enabled.
-
-Required properties when nvidia,suspend-mode is specified:
-- nvidia,cpu-pwr-good-time : CPU power good time in uS.
-- nvidia,cpu-pwr-off-time : CPU power off time in uS.
-- nvidia,core-pwr-good-time : <Oscillator-stable-time Power-stable-time>
- Core power good time in uS.
-- nvidia,core-pwr-off-time : Core power off time in uS.
-
-Required properties when nvidia,suspend-mode=<0>:
-- nvidia,lp0-vec : <start length> Starting address and length of LP0 vector
- The LP0 vector contains the warm boot code that is executed by AVP when
- resuming from the LP0 state. The AVP (Audio-Video Processor) is an ARM7
- processor and always being the first boot processor when chip is power on
- or resume from deep sleep mode. When the system is resumed from the deep
- sleep mode, the warm boot code will restore some PLLs, clocks and then
- bring up CPU0 for resuming the system.
-
-Hardware-triggered thermal reset:
-On Tegra30, Tegra114 and Tegra124, if the 'i2c-thermtrip' subnode exists,
-hardware-triggered thermal reset will be enabled.
-
-Required properties for hardware-triggered thermal reset (inside 'i2c-thermtrip'):
-- nvidia,i2c-controller-id : ID of I2C controller to send poweroff command to. Valid values are
- described in section 9.2.148 "APBDEV_PMC_SCRATCH53_0" of the
- Tegra K1 Technical Reference Manual.
-- nvidia,bus-addr : Bus address of the PMU on the I2C bus
-- nvidia,reg-addr : I2C register address to write poweroff command to
-- nvidia,reg-data : Poweroff command to write to PMU
-
-Optional properties for hardware-triggered thermal reset (inside 'i2c-thermtrip'):
-- nvidia,pinmux-id : Pinmux used by the hardware when issuing poweroff command.
- Defaults to 0. Valid values are described in section 12.5.2
- "Pinmux Support" of the Tegra4 Technical Reference Manual.
-
-Optional nodes:
-- powergates : This node contains a hierarchy of power domain nodes, which
- should match the powergates on the Tegra SoC. See "Powergate
- Nodes" below.
-
-Example:
-
-/ SoC dts including file
-pmc@7000f400 {
- compatible = "nvidia,tegra20-pmc";
- reg = <0x7000e400 0x400>;
- clocks = <&tegra_car 110>, <&clk32k_in>;
- clock-names = "pclk", "clk32k_in";
- nvidia,invert-interrupt;
- nvidia,suspend-mode = <1>;
- nvidia,cpu-pwr-good-time = <2000>;
- nvidia,cpu-pwr-off-time = <100>;
- nvidia,core-pwr-good-time = <3845 3845>;
- nvidia,core-pwr-off-time = <458>;
- nvidia,core-power-req-active-high;
- nvidia,sys-clock-req-active-high;
- nvidia,lp0-vec = <0xbdffd000 0x2000>;
-};
-
-/ Tegra board dts file
-{
- ...
- pmc@7000f400 {
- i2c-thermtrip {
- nvidia,i2c-controller-id = <4>;
- nvidia,bus-addr = <0x40>;
- nvidia,reg-addr = <0x36>;
- nvidia,reg-data = <0x2>;
- };
- };
- ...
- clocks {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- clk32k_in: clock {
- compatible = "fixed-clock";
- reg=<0>;
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
- };
- ...
-};
-
-
-== Powergate Nodes ==
-
-Each of the powergate nodes represents a power-domain on the Tegra SoC
-that can be power-gated by the Tegra PMC. The name of the powergate node
-should be one of the below. Note that not every powergate is applicable
-to all Tegra devices and the following list shows which powergates are
-applicable to which devices. Please refer to the Tegra TRM for more
-details on the various powergates.
-
- Name Description Devices Applicable
- 3d 3D Graphics Tegra20/114/124/210
- 3d0 3D Graphics 0 Tegra30
- 3d1 3D Graphics 1 Tegra30
- aud Audio Tegra210
- dfd Debug Tegra210
- dis Display A Tegra114/124/210
- disb Display B Tegra114/124/210
- heg 2D Graphics Tegra30/114/124/210
- iram Internal RAM Tegra124/210
- mpe MPEG Encode All
- nvdec NVIDIA Video Decode Engine Tegra210
- nvjpg NVIDIA JPEG Engine Tegra210
- pcie PCIE Tegra20/30/124/210
- sata SATA Tegra30/124/210
- sor Display interfaces Tegra124/210
- ve2 Video Encode Engine 2 Tegra210
- venc Video Encode Engine All
- vdec Video Decode Engine Tegra20/30/114/124
- vic Video Imaging Compositor Tegra124/210
- xusba USB Partition A Tegra114/124/210
- xusbb USB Partition B Tegra114/124/210
- xusbc USB Partition C Tegra114/124/210
-
-Required properties:
- - clocks: Must contain an entry for each clock required by the PMC for
- controlling a power-gate. See ../clocks/clock-bindings.txt for details.
- - resets: Must contain an entry for each reset required by the PMC for
- controlling a power-gate. See ../reset/reset.txt for details.
- - #power-domain-cells: Must be 0.
-
-Example:
-
- pmc: pmc@7000e400 {
- compatible = "nvidia,tegra210-pmc";
- reg = <0x0 0x7000e400 0x0 0x400>;
- clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
- clock-names = "pclk", "clk32k_in";
-
- powergates {
- pd_audio: aud {
- clocks = <&tegra_car TEGRA210_CLK_APE>,
- <&tegra_car TEGRA210_CLK_APB2APE>;
- resets = <&tegra_car 198>;
- #power-domain-cells = <0>;
- };
- };
- };
-
-
-== Powergate Clients ==
-
-Hardware blocks belonging to a power domain should contain a "power-domains"
-property that is a phandle pointing to the corresponding powergate node.
-
-Example:
-
- adma: adma@702e2000 {
- ...
- power-domains = <&pd_audio>;
- ...
- };
-
-== Pad Control ==
-
-On Tegra SoCs a pad is a set of pins which are configured as a group.
-The pin grouping is a fixed attribute of the hardware. The PMC can be
-used to set pad power state and signaling voltage. A pad can be either
-in active or power down mode. The support for power state and signaling
-voltage configuration varies depending on the pad in question. 3.3 V and
-1.8 V signaling voltages are supported on pins where software
-controllable signaling voltage switching is available.
-
-The pad configuration state nodes are placed under the pmc node and they
-are referred to by the pinctrl client properties. For more information
-see Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt.
-The pad name should be used as the value of the pins property in pin
-configuration nodes.
-
-The following pads are present on Tegra124 and Tegra132:
-audio bb cam comp
-csia csb cse dsi
-dsib dsic dsid hdmi
-hsic hv lvds mipi-bias
-nand pex-bias pex-clk1 pex-clk2
-pex-cntrl sdmmc1 sdmmc3 sdmmc4
-sys_ddc uart usb0 usb1
-usb2 usb_bias
-
-The following pads are present on Tegra210:
-audio audio-hv cam csia
-csib csic csid csie
-csif dbg debug-nonao dmic
-dp dsi dsib dsic
-dsid emmc emmc2 gpio
-hdmi hsic lvds mipi-bias
-pex-bias pex-clk1 pex-clk2 pex-cntrl
-sdmmc1 sdmmc3 spi spi-hv
-uart usb0 usb1 usb2
-usb3 usb-bias
-
-Required pin configuration properties:
- - pins: Must contain name of the pad(s) to be configured.
-
-Optional pin configuration properties:
- - low-power-enable: Configure the pad into power down mode
- - low-power-disable: Configure the pad into active mode
- - power-source: Must contain either TEGRA_IO_PAD_VOLTAGE_1V8
- or TEGRA_IO_PAD_VOLTAGE_3V3 to select between signaling voltages.
- The values are defined in
- include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h.
-
-Note: The power state can be configured on all of the Tegra124 and
- Tegra132 pads. None of the Tegra124 or Tegra132 pads support
- signaling voltage switching.
-
-Note: All of the listed Tegra210 pads except pex-cntrl support power
- state configuration. Signaling voltage switching is supported on
- following Tegra210 pads: audio, audio-hv, cam, dbg, dmic, gpio,
- pex-cntrl, sdmmc1, sdmmc3, spi, spi-hv, and uart.
-
-Pad configuration state example:
- pmc: pmc@7000e400 {
- compatible = "nvidia,tegra210-pmc";
- reg = <0x0 0x7000e400 0x0 0x400>;
- clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
- clock-names = "pclk", "clk32k_in";
-
- ...
-
- sdmmc1_3v3: sdmmc1-3v3 {
- pins = "sdmmc1";
- power-source = <TEGRA_IO_PAD_VOLTAGE_3V3>;
- };
-
- sdmmc1_1v8: sdmmc1-1v8 {
- pins = "sdmmc1";
- power-source = <TEGRA_IO_PAD_VOLTAGE_1V8>;
- };
-
- hdmi_off: hdmi-off {
- pins = "hdmi";
- low-power-enable;
- }
-
- hdmi_on: hdmi-on {
- pins = "hdmi";
- low-power-disable;
- }
- };
-
-Pinctrl client example:
- sdmmc1: sdhci@700b0000 {
- ...
- pinctrl-names = "sdmmc-3v3", "sdmmc-1v8";
- pinctrl-0 = <&sdmmc1_3v3>;
- pinctrl-1 = <&sdmmc1_1v8>;
- };
- ...
- sor@54540000 {
- ...
- pinctrl-0 = <&hdmi_off>;
- pinctrl-1 = <&hdmi_on>;
- pinctrl-names = "hdmi-on", "hdmi-off";
- };
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
new file mode 100644
index 000000000000..f17bb353f65e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
@@ -0,0 +1,354 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/tegra/nvidia,tegra20-pmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Tegra Power Management Controller (PMC)
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jonathan Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra20-pmc
+ - nvidia,tegra20-pmc
+ - nvidia,tegra30-pmc
+ - nvidia,tegra114-pmc
+ - nvidia,tegra124-pmc
+ - nvidia,tegra210-pmc
+
+ reg:
+ maxItems: 1
+ description:
+ Offset and length of the register set for the device.
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: clk32k_in
+ description:
+ Must includes entries pclk and clk32k_in.
+ pclk is the Tegra clock of that name and clk32k_in is 32KHz clock
+ input to Tegra.
+
+ clocks:
+ maxItems: 2
+ description:
+ Must contain an entry for each entry in clock-names.
+ See ../clocks/clocks-bindings.txt for details.
+
+ '#clock-cells':
+ const: 1
+ description:
+ Tegra PMC has clk_out_1, clk_out_2, and clk_out_3.
+ PMC also has blink control which allows 32Khz clock output to
+ Tegra blink pad.
+ Consumer of PMC clock should specify the desired clock by having
+ the clock ID in its "clocks" phandle cell with pmc clock provider.
+ See include/dt-bindings/soc/tegra-pmc.h for the list of Tegra PMC
+ clock IDs.
+
+ '#interrupt-cells':
+ const: 2
+ description:
+ Specifies number of cells needed to encode an interrupt source.
+ The value must be 2.
+
+ interrupt-controller: true
+
+ nvidia,invert-interrupt:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Inverts the PMU interrupt signal.
+ The PMU is an external Power Management Unit, whose interrupt output
+ signal is fed into the PMC. This signal is optionally inverted, and
+ then fed into the ARM GIC. The PMC is not involved in the detection
+ or handling of this interrupt signal, merely its inversion.
+
+ nvidia,core-power-req-active-high:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Core power request active-high.
+
+ nvidia,sys-clock-req-active-high:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: System clock request active-high.
+
+ nvidia,combined-power-req:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: combined power request for CPU and Core.
+
+ nvidia,cpu-pwr-good-en:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ CPU power good signal from external PMIC to PMC is enabled.
+
+ nvidia,suspend-mode:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2]
+ description:
+ The suspend mode that the platform should use.
+ Mode 0 is for LP0, CPU + Core voltage off and DRAM in self-refresh
+ Mode 1 is for LP1, CPU voltage off and DRAM in self-refresh
+ Mode 2 is for LP2, CPU voltage off
+
+ nvidia,cpu-pwr-good-time:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: CPU power good time in uSec.
+
+ nvidia,cpu-pwr-off-time:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: CPU power off time in uSec.
+
+ nvidia,core-pwr-good-time:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ <Oscillator-stable-time Power-stable-time>
+ Core power good time in uSec.
+
+ nvidia,core-pwr-off-time:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Core power off time in uSec.
+
+ nvidia,lp0-vec:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ <start length> Starting address and length of LP0 vector.
+ The LP0 vector contains the warm boot code that is executed
+ by AVP when resuming from the LP0 state.
+ The AVP (Audio-Video Processor) is an ARM7 processor and
+ always being the first boot processor when chip is power on
+ or resume from deep sleep mode. When the system is resumed
+ from the deep sleep mode, the warm boot code will restore
+ some PLLs, clocks and then brings up CPU0 for resuming the
+ system.
+
+ i2c-thermtrip:
+ type: object
+ description:
+ On Tegra30, Tegra114 and Tegra124 if i2c-thermtrip subnode exists,
+ hardware-triggered thermal reset will be enabled.
+
+ properties:
+ nvidia,i2c-controller-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ ID of I2C controller to send poweroff command to PMU.
+ Valid values are described in section 9.2.148
+ "APBDEV_PMC_SCRATCH53_0" of the Tegra K1 Technical Reference
+ Manual.
+
+ nvidia,bus-addr:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Bus address of the PMU on the I2C bus.
+
+ nvidia,reg-addr:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: PMU I2C register address to issue poweroff command.
+
+ nvidia,reg-data:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Poweroff command to write to PMU.
+
+ nvidia,pinmux-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Pinmux used by the hardware when issuing Poweroff command.
+ Defaults to 0. Valid values are described in section 12.5.2
+ "Pinmux Support" of the Tegra4 Technical Reference Manual.
+
+ required:
+ - nvidia,i2c-controller-id
+ - nvidia,bus-addr
+ - nvidia,reg-addr
+ - nvidia,reg-data
+
+ additionalProperties: false
+
+ powergates:
+ type: object
+ description: |
+ This node contains a hierarchy of power domain nodes, which should
+ match the powergates on the Tegra SoC. Each powergate node
+ represents a power-domain on the Tegra SoC that can be power-gated
+ by the Tegra PMC.
+ Hardware blocks belonging to a power domain should contain
+ "power-domains" property that is a phandle pointing to corresponding
+ powergate node.
+ The name of the powergate node should be one of the below. Note that
+ not every powergate is applicable to all Tegra devices and the following
+ list shows which powergates are applicable to which devices.
+ Please refer to Tegra TRM for mode details on the powergate nodes to
+ use for each power-gate block inside Tegra.
+ Name Description Devices Applicable
+ 3d 3D Graphics Tegra20/114/124/210
+ 3d0 3D Graphics 0 Tegra30
+ 3d1 3D Graphics 1 Tegra30
+ aud Audio Tegra210
+ dfd Debug Tegra210
+ dis Display A Tegra114/124/210
+ disb Display B Tegra114/124/210
+ heg 2D Graphics Tegra30/114/124/210
+ iram Internal RAM Tegra124/210
+ mpe MPEG Encode All
+ nvdec NVIDIA Video Decode Engine Tegra210
+ nvjpg NVIDIA JPEG Engine Tegra210
+ pcie PCIE Tegra20/30/124/210
+ sata SATA Tegra30/124/210
+ sor Display interfaces Tegra124/210
+ ve2 Video Encode Engine 2 Tegra210
+ venc Video Encode Engine All
+ vdec Video Decode Engine Tegra20/30/114/124
+ vic Video Imaging Compositor Tegra124/210
+ xusba USB Partition A Tegra114/124/210
+ xusbb USB Partition B Tegra114/124/210
+ xusbc USB Partition C Tegra114/124/210
+
+ patternProperties:
+ "^[a-z0-9]+$":
+ type: object
+
+ patternProperties:
+ clocks:
+ minItems: 1
+ maxItems: 8
+ description:
+ Must contain an entry for each clock required by the PMC
+ for controlling a power-gate.
+ See ../clocks/clock-bindings.txt document for more details.
+
+ resets:
+ minItems: 1
+ maxItems: 8
+ description:
+ Must contain an entry for each reset required by the PMC
+ for controlling a power-gate.
+ See ../reset/reset.txt for more details.
+
+ '#power-domain-cells':
+ const: 0
+ description: Must be 0.
+
+ required:
+ - clocks
+ - resets
+ - '#power-domain-cells'
+
+ additionalProperties: false
+
+patternProperties:
+ "^[a-f0-9]+-[a-f0-9]+$":
+ type: object
+ description:
+ This is a Pad configuration node. On Tegra SOCs a pad is a set of
+ pins which are configured as a group. The pin grouping is a fixed
+ attribute of the hardware. The PMC can be used to set pad power state
+ and signaling voltage. A pad can be either in active or power down mode.
+ The support for power state and signaling voltage configuration varies
+ depending on the pad in question. 3.3V and 1.8V signaling voltages
+ are supported on pins where software controllable signaling voltage
+ switching is available.
+
+ The pad configuration state nodes are placed under the pmc node and they
+ are referred to by the pinctrl client properties. For more information
+ see Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt.
+ The pad name should be used as the value of the pins property in pin
+ configuration nodes.
+
+ The following pads are present on Tegra124 and Tegra132
+ audio, bb, cam, comp, csia, csb, cse, dsi, dsib, dsic, dsid, hdmi, hsic,
+ hv, lvds, mipi-bias, nand, pex-bias, pex-clk1, pex-clk2, pex-cntrl,
+ sdmmc1, sdmmc3, sdmmc4, sys_ddc, uart, usb0, usb1, usb2, usb_bias.
+
+ The following pads are present on Tegra210
+ audio, audio-hv, cam, csia, csib, csic, csid, csie, csif, dbg,
+ debug-nonao, dmic, dp, dsi, dsib, dsic, dsid, emmc, emmc2, gpio, hdmi,
+ hsic, lvds, mipi-bias, pex-bias, pex-clk1, pex-clk2, pex-cntrl, sdmmc1,
+ sdmmc3, spi, spi-hv, uart, usb0, usb1, usb2, usb3, usb-bias.
+
+ properties:
+ pins:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Must contain name of the pad(s) to be configured.
+
+ low-power-enable:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Configure the pad into power down mode.
+
+ low-power-disable:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Configure the pad into active mode.
+
+ power-source:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Must contain either TEGRA_IO_PAD_VOLTAGE_1V8 or
+ TEGRA_IO_PAD_VOLTAGE_3V3 to select between signaling voltages.
+ The values are defined in
+ include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h.
+ Power state can be configured on all Tegra124 and Tegra132
+ pads. None of the Tegra124 or Tegra132 pads support signaling
+ voltage switching.
+ All of the listed Tegra210 pads except pex-cntrl support power
+ state configuration. Signaling voltage switching is supported
+ on below Tegra210 pads.
+ audio, audio-hv, cam, dbg, dmic, gpio, pex-cntrl, sdmmc1,
+ sdmmc3, spi, spi-hv, and uart.
+
+ required:
+ - pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clock-names
+ - clocks
+ - '#clock-cells'
+
+dependencies:
+ "nvidia,suspend-mode": ["nvidia,core-pwr-off-time", "nvidia,cpu-pwr-off-time"]
+ "nvidia,core-pwr-off-time": ["nvidia,core-pwr-good-time"]
+ "nvidia,cpu-pwr-off-time": ["nvidia,cpu-pwr-good-time"]
+
+examples:
+ - |
+
+ #include <dt-bindings/clock/tegra210-car.h>
+ #include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h>
+ #include <dt-bindings/soc/tegra-pmc.h>
+
+ tegra_pmc: pmc@7000e400 {
+ compatible = "nvidia,tegra210-pmc";
+ reg = <0x0 0x7000e400 0x0 0x400>;
+ clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
+ clock-names = "pclk", "clk32k_in";
+ #clock-cells = <1>;
+
+ nvidia,invert-interrupt;
+ nvidia,suspend-mode = <0>;
+ nvidia,cpu-pwr-good-time = <0>;
+ nvidia,cpu-pwr-off-time = <0>;
+ nvidia,core-pwr-good-time = <4587 3876>;
+ nvidia,core-pwr-off-time = <39065>;
+ nvidia,core-power-req-active-high;
+ nvidia,sys-clock-req-active-high;
+
+ powergates {
+ pd_audio: aud {
+ clocks = <&tegra_car TEGRA210_CLK_APE>,
+ <&tegra_car TEGRA210_CLK_APB2APE>;
+ resets = <&tegra_car 198>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_xusbss: xusba {
+ clocks = <&tegra_car TEGRA210_CLK_XUSB_SS>;
+ resets = <&tegra_car TEGRA210_CLK_XUSB_SS>;
+ #power-domain-cells = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/vexpress.txt b/Documentation/devicetree/bindings/arm/vexpress.txt
deleted file mode 100644
index 39844cd0bcce..000000000000
--- a/Documentation/devicetree/bindings/arm/vexpress.txt
+++ /dev/null
@@ -1,229 +0,0 @@
-ARM Versatile Express boards family
------------------------------------
-
-ARM's Versatile Express platform consists of a motherboard and one
-or more daughterboards (tiles). The motherboard provides a set of
-peripherals. Processor and RAM "live" on the tiles.
-
-The motherboard and each core tile should be described by a separate
-Device Tree source file, with the tile's description including
-the motherboard file using a /include/ directive. As the motherboard
-can be initialized in one of two different configurations ("memory
-maps"), care must be taken to include the correct one.
-
-
-Root node
----------
-
-Required properties in the root node:
-- compatible value:
- compatible = "arm,vexpress,<model>", "arm,vexpress";
- where <model> is the full tile model name (as used in the tile's
- Technical Reference Manual), eg.:
- - for Coretile Express A5x2 (V2P-CA5s):
- compatible = "arm,vexpress,v2p-ca5s", "arm,vexpress";
- - for Coretile Express A9x4 (V2P-CA9):
- compatible = "arm,vexpress,v2p-ca9", "arm,vexpress";
- If a tile comes in several variants or can be used in more then one
- configuration, the compatible value should be:
- compatible = "arm,vexpress,<model>,<variant>", \
- "arm,vexpress,<model>", "arm,vexpress";
- eg:
- - Coretile Express A15x2 (V2P-CA15) with Tech Chip 1:
- compatible = "arm,vexpress,v2p-ca15,tc1", \
- "arm,vexpress,v2p-ca15", "arm,vexpress";
- - LogicTile Express 13MG (V2F-2XV6) running Cortex-A7 (3 cores) SMM:
- compatible = "arm,vexpress,v2f-2xv6,ca7x3", \
- "arm,vexpress,v2f-2xv6", "arm,vexpress";
-
-Optional properties in the root node:
-- tile model name (use name from the tile's Technical Reference
- Manual, eg. "V2P-CA5s")
- model = "<model>";
-- tile's HBI number (unique ARM's board model ID, visible on the
- PCB's silkscreen) in hexadecimal transcription:
- arm,hbi = <0xhbi>
- eg:
- - for Coretile Express A5x2 (V2P-CA5s) HBI-0191:
- arm,hbi = <0x191>;
- - Coretile Express A9x4 (V2P-CA9) HBI-0225:
- arm,hbi = <0x225>;
-
-
-CPU nodes
----------
-
-Top-level standard "cpus" node is required. It must contain a node
-with device_type = "cpu" property for every available core, eg.:
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a5";
- reg = <0>;
- };
- };
-
-
-Configuration infrastructure
-----------------------------
-
-The platform has an elaborated configuration system, consisting of
-microcontrollers residing on the mother- and daughterboards known
-as Motherboard/Daughterboard Configuration Controller (MCC and DCC).
-The controllers are responsible for the platform initialization
-(reset generation, flash programming, FPGA bitfiles loading etc.)
-but also control clock generators, voltage regulators, gather
-environmental data like temperature, power consumption etc. Even
-the video output switch (FPGA) is controlled that way.
-
-The controllers are not mapped into normal memory address space
-and must be accessed through bridges - other devices capable
-of generating transactions on the configuration bus.
-
-The nodes describing configuration controllers must define
-the following properties:
-- compatible value:
- compatible = "arm,vexpress,config-bus";
-- bridge phandle:
- arm,vexpress,config-bridge = <phandle>;
-and children describing available functions.
-
-
-Platform topology
------------------
-
-As Versatile Express can be configured in number of physically
-different setups, the device tree should describe platform topology.
-Root node and main motherboard node must define the following
-property, describing physical location of the children nodes:
-- site number:
- arm,vexpress,site = <number>;
- where 0 means motherboard, 1 or 2 are daugtherboard sites,
- 0xf means "master" site (site containing main CPU tile)
-- when daughterboards are stacked on one site, their position
- in the stack be be described with:
- arm,vexpress,position = <number>;
-- when describing tiles consisting more than one DCC, its number
- can be described with:
- arm,vexpress,dcc = <number>;
-
-Any of the numbers above defaults to zero if not defined in
-the node or any of its parent.
-
-
-Motherboard
------------
-
-The motherboard description file provides a single "motherboard" node
-using 2 address cells corresponding to the Static Memory Bus used
-between the motherboard and the tile. The first cell defines the Chip
-Select (CS) line number, the second cell address offset within the CS.
-All interrupt lines between the motherboard and the tile are active
-high and are described using single cell.
-
-Optional properties of the "motherboard" node:
-- motherboard's memory map variant:
- arm,v2m-memory-map = "<name>";
- where name is one of:
- - "rs1" - for RS1 map (i.a. peripherals on CS3); this map is also
- referred to as "ARM Cortex-A Series memory map":
- arm,v2m-memory-map = "rs1";
- When this property is missing, the motherboard is using the original
- memory map (also known as the "Legacy memory map", primarily used
- with the original CoreTile Express A9x4) with peripherals on CS7.
-
-Motherboard .dtsi files provide a set of labelled peripherals that
-can be used to obtain required phandle in the tile's "aliases" node:
-- UARTs, note that the numbers correspond to the physical connectors
- on the motherboard's back panel:
- v2m_serial0, v2m_serial1, v2m_serial2 and v2m_serial3
-- I2C controllers:
- v2m_i2c_dvi and v2m_i2c_pcie
-- SP804 timers:
- v2m_timer01 and v2m_timer23
-
-The tile description should define a "smb" node, describing the
-Static Memory Bus between the tile and motherboard. It must define
-the following properties:
-- "simple-bus" compatible value (to ensure creation of the children)
- compatible = "simple-bus";
-- mapping of the SMB CS/offset addresses into main address space:
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <...>;
-- interrupts mapping:
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 63>;
- interrupt-map = <...>;
-
-
-Example of a VE tile description (simplified)
----------------------------------------------
-
-/dts-v1/;
-
-/ {
- model = "V2P-CA5s";
- arm,hbi = <0x225>;
- arm,vexpress,site = <0xf>;
- compatible = "arm,vexpress-v2p-ca5s", "arm,vexpress";
- interrupt-parent = <&gic>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- chosen { };
-
- aliases {
- serial0 = &v2m_serial0;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a5";
- reg = <0>;
- };
- };
-
- gic: interrupt-controller@2c001000 {
- compatible = "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- #address-cells = <0>;
- interrupt-controller;
- reg = <0x2c001000 0x1000>,
- <0x2c000100 0x100>;
- };
-
- dcc {
- compatible = "arm,vexpress,config-bus";
- arm,vexpress,config-bridge = <&v2m_sysreg>;
-
- osc@0 {
- compatible = "arm,vexpress-osc";
- };
- };
-
- smb {
- compatible = "simple-bus";
-
- #address-cells = <2>;
- #size-cells = <1>;
- /* CS0 is visible at 0x08000000 */
- ranges = <0 0 0x08000000 0x04000000>;
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 63>;
- /* Active high IRQ 0 is connected to GIC's SPI0 */
- interrupt-map = <0 0 0 &gic 0 0 4>;
-
- /include/ "vexpress-v2m-rs1.dtsi"
- };
-};
-
diff --git a/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml b/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
new file mode 100644
index 000000000000..7b69831060d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/ata/renesas,rcar-sata.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas R-Car Serial-ATA Interface
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,sata-r8a7779 # R-Car H1
+ - items:
+ - enum:
+ - renesas,sata-r8a7790-es1 # R-Car H2 ES1
+ - renesas,sata-r8a7790 # R-Car H2 other than ES1
+ - renesas,sata-r8a7791 # R-Car M2-W
+ - renesas,sata-r8a7793 # R-Car M2-N
+ - const: renesas,rcar-gen2-sata # generic R-Car Gen2
+ - items:
+ - enum:
+ - renesas,sata-r8a774b1 # RZ/G2N
+ - renesas,sata-r8a7795 # R-Car H3
+ - renesas,sata-r8a77965 # R-Car M3-N
+ - const: renesas,rcar-gen3-sata # generic R-Car Gen3 or RZ/G2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7791-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7791-sysc.h>
+
+ sata@ee300000 {
+ compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata";
+ reg = <0xee300000 0x200000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 815>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 815>;
+ };
diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt
deleted file mode 100644
index a2fbdc91570d..000000000000
--- a/Documentation/devicetree/bindings/ata/sata_rcar.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-* Renesas R-Car SATA
-
-Required properties:
-- compatible : should contain one or more of the following:
- - "renesas,sata-r8a774b1" for RZ/G2N
- - "renesas,sata-r8a7779" for R-Car H1
- - "renesas,sata-r8a7790-es1" for R-Car H2 ES1
- - "renesas,sata-r8a7790" for R-Car H2 other than ES1
- - "renesas,sata-r8a7791" for R-Car M2-W
- - "renesas,sata-r8a7793" for R-Car M2-N
- - "renesas,sata-r8a7795" for R-Car H3
- - "renesas,sata-r8a77965" for R-Car M3-N
- - "renesas,rcar-gen2-sata" for a generic R-Car Gen2
- compatible device
- - "renesas,rcar-gen3-sata" for a generic R-Car Gen3 or
- RZ/G2 compatible device
- - "renesas,rcar-sata" is deprecated
-
- When compatible with the generic version nodes
- must list the SoC-specific version corresponding
- to the platform first followed by the generic
- version.
-
-- reg : address and length of the SATA registers;
-- interrupts : must consist of one interrupt specifier.
-- clocks : must contain a reference to the functional clock.
-
-Example:
-
-sata0: sata@ee300000 {
- compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata";
- reg = <0 0xee300000 0 0x2000>;
- interrupt-parent = <&gic>;
- interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp8_clks R8A7791_CLK_SATA0>;
-};
diff --git a/Documentation/devicetree/bindings/bus/socionext,uniphier-system-bus.yaml b/Documentation/devicetree/bindings/bus/socionext,uniphier-system-bus.yaml
new file mode 100644
index 000000000000..c4c9119e4a20
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/socionext,uniphier-system-bus.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/socionext,uniphier-system-bus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier System Bus
+
+description: |
+ The UniPhier System Bus is an external bus that connects on-board devices to
+ the UniPhier SoC. It is a simple (semi-)parallel bus with address, data, and
+ some control signals. It supports up to 8 banks (chip selects).
+
+ Before any access to the bus, the bus controller must be configured; the bus
+ controller registers provide the control for the translation from the offset
+ within each bank to the CPU-viewed address. The needed setup includes the
+ base address, the size of each bank. Optionally, some timing parameters can
+ be optimized for faster bus access.
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ compatible:
+ const: socionext,uniphier-system-bus
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ description: |
+ The first cell is the bank number (chip select).
+ The second cell is the address offset within the bank.
+ const: 2
+
+ "#size-cells":
+ const: 1
+
+ ranges:
+ description: |
+ Provide address translation from the System Bus to the parent bus.
+
+ Note:
+ The address region(s) that can be assigned for the System Bus is
+ implementation defined. Some SoCs can use 0x00000000-0x0fffffff and
+ 0x40000000-0x4fffffff, while other SoCs only 0x40000000-0x4fffffff.
+ There might be additional limitations depending on SoCs and the boot mode.
+ The address translation is arbitrary as long as the banks are assigned in
+ the supported address space with the required alignment and they do not
+ overlap one another.
+
+ For example, it is possible to map:
+ bank 0 to 0x42000000-0x43ffffff, bank 5 to 0x46000000-0x46ffffff
+ It is also possible to map:
+ bank 0 to 0x48000000-0x49ffffff, bank 5 to 0x44000000-0x44ffffff
+ There is no reason to stick to a particular translation mapping, but the
+ "ranges" property should provide a "reasonable" default that is known to
+ work. The software should initialize the bus controller according to it.
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+examples:
+ - |
+ // In this example,
+ // - the Ethernet device is connected at the offset 0x01f00000 of CS1 and
+ // mapped to 0x43f00000 of the parent bus.
+ // - the UART device is connected at the offset 0x00200000 of CS5 and
+ // mapped to 0x46200000 of the parent bus.
+
+ system-bus@58c00000 {
+ compatible = "socionext,uniphier-system-bus";
+ reg = <0x58c00000 0x400>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0x00000000 0x42000000 0x02000000>,
+ <5 0x00000000 0x46000000 0x01000000>;
+
+ ethernet@1,01f00000 {
+ compatible = "smsc,lan9115";
+ reg = <1 0x01f00000 0x1000>;
+ interrupts = <0 48 4>;
+ phy-mode = "mii";
+ };
+
+ uart@5,00200000 {
+ compatible = "ns16550a";
+ reg = <5 0x00200000 0x20>;
+ interrupts = <0 49 4>;
+ clock-frequency = <12288000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/bus/ti-sysc.txt b/Documentation/devicetree/bindings/bus/ti-sysc.txt
index 233eb8294204..c984143d08d2 100644
--- a/Documentation/devicetree/bindings/bus/ti-sysc.txt
+++ b/Documentation/devicetree/bindings/bus/ti-sysc.txt
@@ -38,6 +38,7 @@ Required standard properties:
"ti,sysc-dra7-mcasp"
"ti,sysc-usb-host-fs"
"ti,sysc-dra7-mcan"
+ "ti,sysc-pruss"
- reg shall have register areas implemented for the interconnect
target module in question such as revision, sysc and syss
diff --git a/Documentation/devicetree/bindings/bus/uniphier-system-bus.txt b/Documentation/devicetree/bindings/bus/uniphier-system-bus.txt
deleted file mode 100644
index 68ef80afff16..000000000000
--- a/Documentation/devicetree/bindings/bus/uniphier-system-bus.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-UniPhier System Bus
-
-The UniPhier System Bus is an external bus that connects on-board devices to
-the UniPhier SoC. It is a simple (semi-)parallel bus with address, data, and
-some control signals. It supports up to 8 banks (chip selects).
-
-Before any access to the bus, the bus controller must be configured; the bus
-controller registers provide the control for the translation from the offset
-within each bank to the CPU-viewed address. The needed setup includes the base
-address, the size of each bank. Optionally, some timing parameters can be
-optimized for faster bus access.
-
-Required properties:
-- compatible: should be "socionext,uniphier-system-bus".
-- reg: offset and length of the register set for the bus controller device.
-- #address-cells: should be 2. The first cell is the bank number (chip select).
- The second cell is the address offset within the bank.
-- #size-cells: should be 1.
-- ranges: should provide a proper address translation from the System Bus to
- the parent bus.
-
-Note:
-The address region(s) that can be assigned for the System Bus is implementation
-defined. Some SoCs can use 0x00000000-0x0fffffff and 0x40000000-0x4fffffff,
-while other SoCs can only use 0x40000000-0x4fffffff. There might be additional
-limitations depending on SoCs and the boot mode. The address translation is
-arbitrary as long as the banks are assigned in the supported address space with
-the required alignment and they do not overlap one another.
-For example, it is possible to map:
- bank 0 to 0x42000000-0x43ffffff, bank 5 to 0x46000000-0x46ffffff
-It is also possible to map:
- bank 0 to 0x48000000-0x49ffffff, bank 5 to 0x44000000-0x44ffffff
-There is no reason to stick to a particular translation mapping, but the
-"ranges" property should provide a "reasonable" default that is known to work.
-The software should initialize the bus controller according to it.
-
-Example:
-
- system-bus {
- compatible = "socionext,uniphier-system-bus";
- reg = <0x58c00000 0x400>;
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <1 0x00000000 0x42000000 0x02000000
- 5 0x00000000 0x46000000 0x01000000>;
-
- ethernet@1,01f00000 {
- compatible = "smsc,lan9115";
- reg = <1 0x01f00000 0x1000>;
- interrupts = <0 48 4>
- phy-mode = "mii";
- };
-
- uart@5,00200000 {
- compatible = "ns16550a";
- reg = <5 0x00200000 0x20>;
- interrupts = <0 49 4>
- clock-frequency = <12288000>;
- };
- };
-
-In this example,
- - the Ethernet device is connected at the offset 0x01f00000 of CS1 and
- mapped to 0x43f00000 of the parent bus.
- - the UART device is connected at the offset 0x00200000 of CS5 and
- mapped to 0x46200000 of the parent bus.
diff --git a/Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml b/Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml
new file mode 100644
index 000000000000..6d7396ab8bee
--- /dev/null
+++ b/Documentation/devicetree/bindings/chrome/google,cros-ec-typec.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/chrome/google,cros-ec-typec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Google Chrome OS EC(Embedded Controller) Type C port driver.
+
+maintainers:
+ - Benson Leung <bleung@chromium.org>
+ - Prashant Malani <pmalani@chromium.org>
+
+description:
+ Chrome OS devices have an Embedded Controller(EC) which has access to
+ Type C port state. This node is intended to allow the host to read and
+ control the Type C ports. The node for this device should be under a
+ cros-ec node like google,cros-ec-spi.
+
+properties:
+ compatible:
+ const: google,cros-ec-typec
+
+ connector:
+ $ref: /schemas/connector/usb-connector.yaml#
+
+required:
+ - compatible
+
+examples:
+ - |+
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cros_ec: ec@0 {
+ compatible = "google,cros-ec-spi";
+ reg = <0>;
+
+ typec {
+ compatible = "google,cros-ec-typec";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "source";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml b/Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
new file mode 100644
index 000000000000..de9a465096db
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/arm,syscon-icst.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM System Controller ICST Clocks
+
+maintainers:
+ - Linus Walleij <linusw@kernel.org>
+
+description: |
+ The ICS525 and ICS307 oscillators are produced by Integrated
+ Devices Technology (IDT). ARM integrated these oscillators deeply into their
+ reference designs by adding special control registers that manage such
+ oscillators to their system controllers.
+
+ The various ARM system controllers contain logic to serialize and initialize
+ an ICST clock request after a write to the 32 bit register at an offset
+ into the system controller. Furthermore, to even be able to alter one of
+ these frequencies, the system controller must first be unlocked by
+ writing a special token to another offset in the system controller.
+
+ Some ARM hardware contain special versions of the serial interface that only
+ connects the low 8 bits of the VDW (missing one bit), hard-wires RDW to
+ different values and sometimes also hard-wires the output divider. They
+ therefore have special compatible strings as per this table (the OD value is
+ the value on the pins, not the resulting output divider).
+
+ In the core modules and logic tiles, the ICST is a configurable clock fed
+ from a 24 MHz clock on the motherboard (usually the main crystal) used for
+ generating e.g. video clocks. It is located on the core module and there is
+ only one of these. This clock node must be a subnode of the core module.
+
+ Hardware variant RDW OD VDW
+
+ Integrator/AP 22 1 Bit 8 0, rest variable
+ integratorap-cm
+
+ Integrator/AP 46 3 Bit 8 0, rest variable
+ integratorap-sys
+
+ Integrator/AP 22 or 1 17 or (33 or 25 MHz)
+ integratorap-pci 14 1 14
+
+ Integrator/CP 22 variable Bit 8 0, rest variable
+ integratorcp-cm-core
+
+ Integrator/CP 22 variable Bit 8 0, rest variable
+ integratorcp-cm-mem
+
+ The ICST oscillator must be provided inside a system controller node.
+
+properties:
+ "#clock-cells":
+ const: 0
+
+ compatible:
+ enum:
+ - arm,syscon-icst525
+ - arm,syscon-icst307
+ - arm,syscon-icst525-integratorap-cm
+ - arm,syscon-icst525-integratorap-sys
+ - arm,syscon-icst525-integratorap-pci
+ - arm,syscon-icst525-integratorcp-cm-core
+ - arm,syscon-icst525-integratorcp-cm-mem
+ - arm,integrator-cm-auxosc
+ - arm,versatile-cm-auxosc
+ - arm,impd-vco1
+ - arm,impd-vco2
+
+ clocks:
+ description: Parent clock for the ICST VCO
+ maxItems: 1
+
+ clock-output-names:
+ maxItems: 1
+
+ lock-offset:
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: Offset to the unlocking register for the oscillator
+
+ vco-offset:
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: Offset to the VCO register for the oscillator
+
+required:
+ - "#clock-cells"
+ - compatible
+ - clocks
+
+examples:
+ - |
+ vco1: clock@00 {
+ compatible = "arm,impd1-vco1";
+ #clock-cells = <0>;
+ lock-offset = <0x08>;
+ vco-offset = <0x00>;
+ clocks = <&sysclk>;
+ clock-output-names = "IM-PD1-VCO1";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/arm-integrator.txt b/Documentation/devicetree/bindings/clock/arm-integrator.txt
deleted file mode 100644
index 11f5f95f571b..000000000000
--- a/Documentation/devicetree/bindings/clock/arm-integrator.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Clock bindings for ARM Integrator and Versatile Core Module clocks
-
-Auxiliary Oscillator Clock
-
-This is a configurable clock fed from a 24 MHz chrystal,
-used for generating e.g. video clocks. It is located on the
-core module and there is only one of these.
-
-This clock node *must* be a subnode of the core module, since
-it obtains the base address for it's address range from its
-parent node.
-
-
-Required properties:
-- compatible: must be "arm,integrator-cm-auxosc" or "arm,versatile-cm-auxosc"
-- #clock-cells: must be <0>
-
-Optional properties:
-- clocks: parent clock(s)
-
-Example:
-
-core-module@10000000 {
- xtal24mhz: xtal24mhz@24M {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- };
- auxosc: cm_aux_osc@25M {
- #clock-cells = <0>;
- compatible = "arm,integrator-cm-auxosc";
- clocks = <&xtal24mhz>;
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt b/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt
deleted file mode 100644
index 4cd81742038f..000000000000
--- a/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-ARM System Controller ICST clocks
-
-The ICS525 and ICS307 oscillators are produced by Integrated Devices
-Technology (IDT). ARM integrated these oscillators deeply into their
-reference designs by adding special control registers that manage such
-oscillators to their system controllers.
-
-The various ARM system controllers contain logic to serialize and initialize
-an ICST clock request after a write to the 32 bit register at an offset
-into the system controller. Furthermore, to even be able to alter one of
-these frequencies, the system controller must first be unlocked by
-writing a special token to another offset in the system controller.
-
-Some ARM hardware contain special versions of the serial interface that only
-connects the low 8 bits of the VDW (missing one bit), hardwires RDW to
-different values and sometimes also hardwire the output divider. They
-therefore have special compatible strings as per this table (the OD value is
-the value on the pins, not the resulting output divider):
-
-Hardware variant: RDW OD VDW
-
-Integrator/AP 22 1 Bit 8 0, rest variable
-integratorap-cm
-
-Integrator/AP 46 3 Bit 8 0, rest variable
-integratorap-sys
-
-Integrator/AP 22 or 1 17 or (33 or 25 MHz)
-integratorap-pci 14 1 14
-
-Integrator/CP 22 variable Bit 8 0, rest variable
-integratorcp-cm-core
-
-Integrator/CP 22 variable Bit 8 0, rest variable
-integratorcp-cm-mem
-
-The ICST oscillator must be provided inside a system controller node.
-
-Required properties:
-- compatible: must be one of
- "arm,syscon-icst525"
- "arm,syscon-icst307"
- "arm,syscon-icst525-integratorap-cm"
- "arm,syscon-icst525-integratorap-sys"
- "arm,syscon-icst525-integratorap-pci"
- "arm,syscon-icst525-integratorcp-cm-core"
- "arm,syscon-icst525-integratorcp-cm-mem"
-- lock-offset: the offset address into the system controller where the
- unlocking register is located
-- vco-offset: the offset address into the system controller where the
- ICST control register is located (even 32 bit address)
-- #clock-cells: must be <0>
-- clocks: parent clock, since the ICST needs a parent clock to derive its
- frequency from, this attribute is compulsory.
-
-Example:
-
-syscon: syscon@10000000 {
- compatible = "syscon";
- reg = <0x10000000 0x1000>;
-
- oscclk0: osc0@c {
- compatible = "arm,syscon-icst307";
- #clock-cells = <0>;
- lock-offset = <0x20>;
- vco-offset = <0x0c>;
- clocks = <&xtal24mhz>;
- };
- (...)
-};
diff --git a/Documentation/devicetree/bindings/clock/clock-bindings.txt b/Documentation/devicetree/bindings/clock/clock-bindings.txt
index b646bbcf7f92..8a55fdcf96ee 100644
--- a/Documentation/devicetree/bindings/clock/clock-bindings.txt
+++ b/Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -94,7 +94,7 @@ clock is connected to output 0 of the &ref.
/* external oscillator */
osc: oscillator {
compatible = "fixed-clock";
- #clock-cells = <1>;
+ #clock-cells = <0>;
clock-frequency = <32678>;
clock-output-names = "osc";
};
diff --git a/Documentation/devicetree/bindings/clock/fsl,plldig.yaml b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
index c8350030b374..a203d5d498db 100644
--- a/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
+++ b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
@@ -21,6 +21,9 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
'#clock-cells':
const: 0
@@ -41,6 +44,8 @@ required:
- clocks
- '#clock-cells'
+additionalProperties: false
+
examples:
# Display PIXEL Clock node:
- |
diff --git a/Documentation/devicetree/bindings/clock/imx8mm-clock.txt b/Documentation/devicetree/bindings/clock/imx8mm-clock.txt
deleted file mode 100644
index 8e4ab9e619a1..000000000000
--- a/Documentation/devicetree/bindings/clock/imx8mm-clock.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* Clock bindings for NXP i.MX8M Mini
-
-Required properties:
-- compatible: Should be "fsl,imx8mm-ccm"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-- clocks: list of clock specifiers, must contain an entry for each required
- entry in clock-names
-- clock-names: should include the following entries:
- - "osc_32k"
- - "osc_24m"
- - "clk_ext1"
- - "clk_ext2"
- - "clk_ext3"
- - "clk_ext4"
-
-clk: clock-controller@30380000 {
- compatible = "fsl,imx8mm-ccm";
- reg = <0x0 0x30380000 0x0 0x10000>;
- #clock-cells = <1>;
- clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
- <&clk_ext3>, <&clk_ext4>;
- clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
- "clk_ext3", "clk_ext4";
-};
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mm-clock.h
-for the full list of i.MX8M Mini clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/imx8mm-clock.yaml b/Documentation/devicetree/bindings/clock/imx8mm-clock.yaml
new file mode 100644
index 000000000000..ec830db1367b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx8mm-clock.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx8mm-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8M Mini Clock Control Module Binding
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description: |
+ NXP i.MX8M Mini clock control module is an integrated clock controller, which
+ generates and supplies to all modules.
+
+properties:
+ compatible:
+ const: fsl,imx8mm-ccm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: 32k osc
+ - description: 24m osc
+ - description: ext1 clock input
+ - description: ext2 clock input
+ - description: ext3 clock input
+ - description: ext4 clock input
+
+ clock-names:
+ items:
+ - const: osc_32k
+ - const: osc_24m
+ - const: clk_ext1
+ - const: clk_ext2
+ - const: clk_ext3
+ - const: clk_ext4
+
+ '#clock-cells':
+ const: 1
+ description:
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mm-clock.h
+ for the full list of i.MX8M Mini clock IDs.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+
+examples:
+ # Clock Control Module node:
+ - |
+ clk: clock-controller@30380000 {
+ compatible = "fsl,imx8mm-ccm";
+ reg = <0x30380000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
+ <&clk_ext3>, <&clk_ext4>;
+ clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/imx8mn-clock.yaml b/Documentation/devicetree/bindings/clock/imx8mn-clock.yaml
index cd0b8a341321..bdaa29616ab1 100644
--- a/Documentation/devicetree/bindings/clock/imx8mn-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/imx8mn-clock.yaml
@@ -40,7 +40,7 @@ properties:
'#clock-cells':
const: 1
- description: |
+ description:
The clock consumer should specify the desired clock by having the clock
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mn-clock.h
for the full list of i.MX8M Nano clock IDs.
@@ -52,12 +52,14 @@ required:
- clock-names
- '#clock-cells'
+additionalProperties: false
+
examples:
# Clock Control Module node:
- |
clk: clock-controller@30380000 {
compatible = "fsl,imx8mn-ccm";
- reg = <0x0 0x30380000 0x0 0x10000>;
+ reg = <0x30380000 0x10000>;
#clock-cells = <1>;
clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>,
<&clk_ext2>, <&clk_ext3>, <&clk_ext4>;
@@ -65,48 +67,4 @@ examples:
"clk_ext2", "clk_ext3", "clk_ext4";
};
- # Required external clocks for Clock Control Module node:
- - |
- osc_32k: clock-osc-32k {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "osc_32k";
- };
-
- osc_24m: clock-osc-24m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- clock-output-names = "osc_24m";
- };
-
- clk_ext1: clock-ext1 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <133000000>;
- clock-output-names = "clk_ext1";
- };
-
- clk_ext2: clock-ext2 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <133000000>;
- clock-output-names = "clk_ext2";
- };
-
- clk_ext3: clock-ext3 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <133000000>;
- clock-output-names = "clk_ext3";
- };
-
- clk_ext4: clock-ext4 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency= <133000000>;
- clock-output-names = "clk_ext4";
- };
-
...
diff --git a/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml b/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml
index 89aee63c9019..4351a1dbb4f7 100644
--- a/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/imx8mp-clock.yaml
@@ -52,6 +52,8 @@ required:
- clock-names
- '#clock-cells'
+additionalProperties: false
+
examples:
# Clock Control Module node:
- |
diff --git a/Documentation/devicetree/bindings/clock/imx8mq-clock.txt b/Documentation/devicetree/bindings/clock/imx8mq-clock.txt
deleted file mode 100644
index 52de8263e012..000000000000
--- a/Documentation/devicetree/bindings/clock/imx8mq-clock.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-* Clock bindings for NXP i.MX8M Quad
-
-Required properties:
-- compatible: Should be "fsl,imx8mq-ccm"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-- clocks: list of clock specifiers, must contain an entry for each required
- entry in clock-names
-- clock-names: should include the following entries:
- - "ckil"
- - "osc_25m"
- - "osc_27m"
- - "clk_ext1"
- - "clk_ext2"
- - "clk_ext3"
- - "clk_ext4"
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mq-clock.h
-for the full list of i.MX8M Quad clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/imx8mq-clock.yaml b/Documentation/devicetree/bindings/clock/imx8mq-clock.yaml
new file mode 100644
index 000000000000..05d7d1471e0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx8mq-clock.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx8mq-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8M Quad Clock Control Module Binding
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description: |
+ NXP i.MX8M Quad clock control module is an integrated clock controller, which
+ generates and supplies to all modules.
+
+properties:
+ compatible:
+ const: fsl,imx8mq-ccm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: 32k osc
+ - description: 25m osc
+ - description: 27m osc
+ - description: ext1 clock input
+ - description: ext2 clock input
+ - description: ext3 clock input
+ - description: ext4 clock input
+
+ clock-names:
+ items:
+ - const: ckil
+ - const: osc_25m
+ - const: osc_27m
+ - const: clk_ext1
+ - const: clk_ext2
+ - const: clk_ext3
+ - const: clk_ext4
+
+ '#clock-cells':
+ const: 1
+ description:
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mq-clock.h
+ for the full list of i.MX8M Quad clock IDs.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+
+examples:
+ # Clock Control Module node:
+ - |
+ clk: clock-controller@30380000 {
+ compatible = "fsl,imx8mq-ccm";
+ reg = <0x30380000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&ckil>, <&osc_25m>, <&osc_27m>,
+ <&clk_ext1>, <&clk_ext2>,
+ <&clk_ext3>, <&clk_ext4>;
+ clock-names = "ckil", "osc_25m", "osc_27m",
+ "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml
new file mode 100644
index 000000000000..e2b6ac96bbcb
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/marvell,mmp2-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MMP2 and MMP3 Clock Controller
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+description: |
+ The clock subsystem on MMP2 or MMP3 generates and supplies clock to various
+ controllers within the SoC.
+
+ Each clock is assigned an identifier and client nodes use this identifier
+ to specify the clock which they consume.
+
+ All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
+
+properties:
+ compatible:
+ enum:
+ - marvell,mmp2-clock # controller compatible with MMP2 SoC
+ - marvell,mmp3-clock # controller compatible with MMP3 SoC
+
+ reg:
+ items:
+ - description: MPMU register region
+ - description: APMU register region
+ - description: APBC register region
+
+ reg-names:
+ items:
+ - const: mpmu
+ - const: apmu
+ - const: apbc
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - '#clock-cells'
+ - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@d4050000 {
+ compatible = "marvell,mmp2-clock";
+ reg = <0xd4050000 0x1000>,
+ <0xd4282800 0x400>,
+ <0xd4015000 0x1000>;
+ reg-names = "mpmu", "apmu", "apbc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
deleted file mode 100644
index 23b52dc02266..000000000000
--- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-* Marvell MMP2 Clock Controller
-
-The MMP2 clock subsystem generates and supplies clock to various
-controllers within the MMP2 SoC.
-
-Required Properties:
-
-- compatible: should be one of the following.
- - "marvell,mmp2-clock" - controller compatible with MMP2 SoC.
-
-- reg: physical base address of the clock subsystem and length of memory mapped
- region. There are 3 places in SOC has clock control logic:
- "mpmu", "apmu", "apbc". So three reg spaces need to be defined.
-
-- #clock-cells: should be 1.
-- #reset-cells: should be 1.
-
-Each clock is assigned an identifier and client nodes use this identifier
-to specify the clock which they consume.
-
-All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
diff --git a/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml b/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
index f0b804a7f096..0e8b07710451 100644
--- a/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
@@ -35,6 +35,8 @@ required:
- clocks
- '#clock-cells'
+additionalProperties: false
+
examples:
# Clock controller node:
- |
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
index 3647007f82ca..eacccc88bbf6 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
@@ -68,6 +68,8 @@ required:
- nvmem-cell-names
- '#thermal-sensor-cells'
+additionalProperties: false
+
examples:
- |
clock-controller@900000 {
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
index 89c6e070e7ac..98572b4a9b60 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-ipq8074.yaml
@@ -40,6 +40,8 @@ required:
- '#clock-cells'
- '#reset-cells'
+additionalProperties: false
+
examples:
- |
clock-controller@1800000 {
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
index 18e4e77b8cfa..5a5b2214f0ca 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml
@@ -56,6 +56,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
clock-controller@300000 {
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml
index 1d3cae980471..a0bb713929b0 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8998.yaml
@@ -66,6 +66,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml
index 8cdece395eba..ce06f3f8c3e3 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-qcs404.yaml
@@ -40,6 +40,8 @@ required:
- '#clock-cells'
- '#reset-cells'
+additionalProperties: false
+
examples:
- |
clock-controller@1800000 {
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
index ee4f968e2909..a345320e0e49 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
@@ -58,6 +58,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
index 888e9a708390..36f3b3668ced 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
@@ -56,6 +56,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
new file mode 100644
index 000000000000..2c40a8aa9815
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-sm8250.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for SM8250
+
+maintainers:
+ - Stephen Boyd <sboyd@kernel.org>
+ - Taniya Das <tdas@codeaurora.org>
+
+description: |
+ Qualcomm global clock control module which supports the clocks, resets and
+ power domains on SM8250.
+
+ See also:
+ - dt-bindings/clock/qcom,gcc-sm8250.h
+
+properties:
+ compatible:
+ const: qcom,gcc-sm8250
+
+ clocks:
+ items:
+ - description: Board XO source
+ - description: Sleep clock source
+
+ clock-names:
+ items:
+ - const: bi_tcxo
+ - const: sleep_clk
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ protected-clocks:
+ description:
+ Protected clock specifier list as per common clock binding.
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - reg
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sm8250";
+ reg = <0 0x00100000 0 0x1f0000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&sleep_clk>;
+ clock-names = "bi_tcxo", "sleep_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
index d18f8ab9eeee..e533bb0cfd2b 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
@@ -74,6 +74,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
# Example for GCC for MSM8960:
- |
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
index 85518494ce43..f684fe67db84 100644
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
@@ -74,6 +74,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml
index 7d853c1a85e5..d747bb58f0a7 100644
--- a/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,msm8998-gpucc.yaml
@@ -50,6 +50,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,gcc-msm8998.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
index 944719bd586f..90a1349bc713 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
@@ -14,7 +14,9 @@ Required properties :
"qcom,rpmcc-apq8060", "qcom,rpmcc"
"qcom,rpmcc-msm8916", "qcom,rpmcc"
"qcom,rpmcc-msm8974", "qcom,rpmcc"
+ "qcom,rpmcc-msm8976", "qcom,rpmcc"
"qcom,rpmcc-apq8064", "qcom,rpmcc"
+ "qcom,rpmcc-ipq806x", "qcom,rpmcc"
"qcom,rpmcc-msm8996", "qcom,rpmcc"
"qcom,rpmcc-msm8998", "qcom,rpmcc"
"qcom,rpmcc-qcs404", "qcom,rpmcc"
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
index 2cd158f13bab..a46a3a799a70 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,sc7180-rpmh-clk
- qcom,sdm845-rpmh-clk
- qcom,sm8150-rpmh-clk
+ - qcom,sm8250-rpmh-clk
clocks:
maxItems: 1
@@ -35,6 +36,8 @@ required:
- compatible
- '#clock-cells'
+additionalProperties: false
+
examples:
# Example for GCC for SDM845: The below node should be defined inside
# &apps_rsc node.
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
index 0429062f1585..58cdfd5924d3 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
@@ -58,6 +58,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,gcc-sc7180.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
index 5785192cc4be..8635e35fd3f0 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
@@ -52,6 +52,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,gcc-sc7180.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml
new file mode 100644
index 000000000000..0dd5d25ae7d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sc7180-mss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Modem Clock Controller Binding for SC7180
+
+maintainers:
+ - Taniya Das <tdas@codeaurora.org>
+
+description: |
+ Qualcomm modem clock control module which supports the clocks on SC7180.
+
+ See also:
+ - dt-bindings/clock/qcom,mss-sc7180.h
+
+properties:
+ compatible:
+ const: qcom,sc7180-mss
+
+ clocks:
+ items:
+ - description: gcc_mss_mfab_axi clock from GCC
+ - description: gcc_mss_nav_axi clock from GCC
+ - description: gcc_mss_cfg_ahb clock from GCC
+
+ clock-names:
+ items:
+ - const: gcc_mss_mfab_axis
+ - const: gcc_mss_nav_axi
+ - const: cfg_ahb
+
+ '#clock-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sc7180.h>
+ clock-controller@41a8000 {
+ compatible = "qcom,sc7180-mss";
+ reg = <0 0x041a8000 0 0x8000>;
+ clocks = <&gcc GCC_MSS_MFAB_AXIS_CLK>,
+ <&gcc GCC_MSS_NAV_AXI_CLK>,
+ <&gcc GCC_MSS_CFG_AHB_CLK>;
+ clock-names = "gcc_mss_mfab_axis",
+ "gcc_mss_nav_axi",
+ "cfg_ahb";
+ #clock-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml
index 31df901884ac..0071b9701960 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml
@@ -48,6 +48,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
index 89269ddfbdcd..ad47d747a3e4 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
@@ -67,6 +67,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml
index bac04f1c5d79..7a052ac5dc00 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml
@@ -52,6 +52,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml
index 9d216c0f11d4..2a6a81ab0318 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml
@@ -48,6 +48,8 @@ required:
- '#reset-cells'
- '#power-domain-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
deleted file mode 100644
index f4d153f24a0f..000000000000
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
+++ /dev/null
@@ -1,100 +0,0 @@
-* Renesas Clock Pulse Generator / Module Standby and Software Reset
-
-On Renesas ARM SoCs (SH/R-Mobile, R-Car, RZ), the CPG (Clock Pulse Generator)
-and MSSR (Module Standby and Software Reset) blocks are intimately connected,
-and share the same register block.
-
-They provide the following functionalities:
- - The CPG block generates various core clocks,
- - The MSSR block provides two functions:
- 1. Module Standby, providing a Clock Domain to control the clock supply
- to individual SoC devices,
- 2. Reset Control, to perform a software reset of individual SoC devices.
-
-Required Properties:
- - compatible: Must be one of:
- - "renesas,r7s9210-cpg-mssr" for the r7s9210 SoC (RZ/A2)
- - "renesas,r8a7743-cpg-mssr" for the r8a7743 SoC (RZ/G1M)
- - "renesas,r8a7744-cpg-mssr" for the r8a7744 SoC (RZ/G1N)
- - "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
- - "renesas,r8a77470-cpg-mssr" for the r8a77470 SoC (RZ/G1C)
- - "renesas,r8a774a1-cpg-mssr" for the r8a774a1 SoC (RZ/G2M)
- - "renesas,r8a774b1-cpg-mssr" for the r8a774b1 SoC (RZ/G2N)
- - "renesas,r8a774c0-cpg-mssr" for the r8a774c0 SoC (RZ/G2E)
- - "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2)
- - "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W)
- - "renesas,r8a7792-cpg-mssr" for the r8a7792 SoC (R-Car V2H)
- - "renesas,r8a7793-cpg-mssr" for the r8a7793 SoC (R-Car M2-N)
- - "renesas,r8a7794-cpg-mssr" for the r8a7794 SoC (R-Car E2)
- - "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC (R-Car H3)
- - "renesas,r8a7796-cpg-mssr" for the r8a77960 SoC (R-Car M3-W)
- - "renesas,r8a77961-cpg-mssr" for the r8a77961 SoC (R-Car M3-W+)
- - "renesas,r8a77965-cpg-mssr" for the r8a77965 SoC (R-Car M3-N)
- - "renesas,r8a77970-cpg-mssr" for the r8a77970 SoC (R-Car V3M)
- - "renesas,r8a77980-cpg-mssr" for the r8a77980 SoC (R-Car V3H)
- - "renesas,r8a77990-cpg-mssr" for the r8a77990 SoC (R-Car E3)
- - "renesas,r8a77995-cpg-mssr" for the r8a77995 SoC (R-Car D3)
-
- - reg: Base address and length of the memory resource used by the CPG/MSSR
- block
-
- - clocks: References to external parent clocks, one entry for each entry in
- clock-names
- - clock-names: List of external parent clock names. Valid names are:
- - "extal" (r7s9210, r8a7743, r8a7744, r8a7745, r8a77470, r8a774a1,
- r8a774b1, r8a774c0, r8a7790, r8a7791, r8a7792, r8a7793,
- r8a7794, r8a7795, r8a77960, r8a77961, r8a77965, r8a77970,
- r8a77980, r8a77990, r8a77995)
- - "extalr" (r8a774a1, r8a774b1, r8a7795, r8a77960, r8a77961, r8a77965,
- r8a77970, r8a77980)
- - "usb_extal" (r8a7743, r8a7744, r8a7745, r8a77470, r8a7790, r8a7791,
- r8a7793, r8a7794)
-
- - #clock-cells: Must be 2
- - For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
- and a core clock reference, as defined in
- <dt-bindings/clock/*-cpg-mssr.h>.
- - For module clocks, the two clock specifier cells must be "CPG_MOD" and
- a module number, as defined in the datasheet.
-
- - #power-domain-cells: Must be 0
- - SoC devices that are part of the CPG/MSSR Clock Domain and can be
- power-managed through Module Standby should refer to the CPG device
- node in their "power-domains" property, as documented by the generic PM
- Domain bindings in
- Documentation/devicetree/bindings/power/power-domain.yaml.
-
- - #reset-cells: Must be 1
- - The single reset specifier cell must be the module number, as defined
- in the datasheet.
-
-
-Examples
---------
-
- - CPG device node:
-
- cpg: clock-controller@e6150000 {
- compatible = "renesas,r8a7795-cpg-mssr";
- reg = <0 0xe6150000 0 0x1000>;
- clocks = <&extal_clk>, <&extalr_clk>;
- clock-names = "extal", "extalr";
- #clock-cells = <2>;
- #power-domain-cells = <0>;
- #reset-cells = <1>;
- };
-
-
- - CPG/MSSR Clock Domain member device node:
-
- scif2: serial@e6e88000 {
- compatible = "renesas,scif-r8a7795", "renesas,scif";
- reg = <0 0xe6e88000 0 64>;
- interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 310>;
- clock-names = "fck";
- dmas = <&dmac1 0x13>, <&dmac1 0x12>;
- dma-names = "tx", "rx";
- power-domains = <&cpg>;
- resets = <&cpg 310>;
- };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
new file mode 100644
index 000000000000..9cd102e5fed5
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/clock/renesas,cpg-mssr.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas Clock Pulse Generator / Module Standby and Software Reset
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description: |
+ On Renesas ARM SoCs (SH/R-Mobile, R-Car, RZ), the CPG (Clock Pulse Generator)
+ and MSSR (Module Standby and Software Reset) blocks are intimately connected,
+ and share the same register block.
+
+ They provide the following functionalities:
+ - The CPG block generates various core clocks,
+ - The MSSR block provides two functions:
+ 1. Module Standby, providing a Clock Domain to control the clock supply
+ to individual SoC devices,
+ 2. Reset Control, to perform a software reset of individual SoC devices.
+
+properties:
+ compatible:
+ enum:
+ - renesas,r7s9210-cpg-mssr # RZ/A2
+ - renesas,r8a7743-cpg-mssr # RZ/G1M
+ - renesas,r8a7744-cpg-mssr # RZ/G1N
+ - renesas,r8a7745-cpg-mssr # RZ/G1E
+ - renesas,r8a77470-cpg-mssr # RZ/G1C
+ - renesas,r8a774a1-cpg-mssr # RZ/G2M
+ - renesas,r8a774b1-cpg-mssr # RZ/G2N
+ - renesas,r8a774c0-cpg-mssr # RZ/G2E
+ - renesas,r8a7790-cpg-mssr # R-Car H2
+ - renesas,r8a7791-cpg-mssr # R-Car M2-W
+ - renesas,r8a7792-cpg-mssr # R-Car V2H
+ - renesas,r8a7793-cpg-mssr # R-Car M2-N
+ - renesas,r8a7794-cpg-mssr # R-Car E2
+ - renesas,r8a7795-cpg-mssr # R-Car H3
+ - renesas,r8a7796-cpg-mssr # R-Car M3-W
+ - renesas,r8a77961-cpg-mssr # R-Car M3-W+
+ - renesas,r8a77965-cpg-mssr # R-Car M3-N
+ - renesas,r8a77970-cpg-mssr # R-Car V3M
+ - renesas,r8a77980-cpg-mssr # R-Car V3H
+ - renesas,r8a77990-cpg-mssr # R-Car E3
+ - renesas,r8a77995-cpg-mssr # R-Car D3
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ enum:
+ - extal # All
+ - extalr # Most R-Car Gen3 and RZ/G2
+ - usb_extal # Most R-Car Gen2 and RZ/G1
+
+ '#clock-cells':
+ description: |
+ - For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
+ and a core clock reference, as defined in
+ <dt-bindings/clock/*-cpg-mssr.h>
+ - For module clocks, the two clock specifier cells must be "CPG_MOD" and
+ a module number, as defined in the datasheet.
+ const: 2
+
+ '#power-domain-cells':
+ description:
+ SoC devices that are part of the CPG/MSSR Clock Domain and can be
+ power-managed through Module Standby should refer to the CPG device node
+ in their "power-domains" property, as documented by the generic PM Domain
+ bindings in Documentation/devicetree/bindings/power/power-domain.yaml.
+ const: 0
+
+ '#reset-cells':
+ description:
+ The single reset specifier cell must be the module number, as defined in
+ the datasheet.
+ const: 1
+
+if:
+ not:
+ properties:
+ compatible:
+ items:
+ enum:
+ - renesas,r7s9210-cpg-mssr
+then:
+ required:
+ - '#reset-cells'
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ cpg: clock-controller@e6150000 {
+ compatible = "renesas,r8a7795-cpg-mssr";
+ reg = <0xe6150000 0x1000>;
+ clocks = <&extal_clk>, <&extalr_clk>;
+ clock-names = "extal", "extalr";
+ #clock-cells = <2>;
+ #power-domain-cells = <0>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
index 83f6c6a7c41c..4bf6f53bd95e 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
@@ -38,10 +38,17 @@ Required properties:
- reg: offset and length of the USB 2.0 clock selector register block.
- clocks: A list of phandles and specifier pairs.
- clock-names: Name of the clocks.
- - The functional clock must be "ehci_ohci"
+ - The functional clock of USB 2.0 host side must be "ehci_ohci"
+ - The functional clock of HS-USB side must be "hs-usb-if"
- The USB_EXTAL clock pin must be "usb_extal"
- The USB_XTAL clock pin must be "usb_xtal"
- #clock-cells: Must be 0
+- power-domains: A phandle and symbolic PM domain specifier.
+ See power/renesas,rcar-sysc.yaml.
+- resets: A list of phandles and specifier pairs.
+- reset-names: Name of the resets.
+ - The reset of USB 2.0 host side must be "ehci_ohci"
+ - The reset of HS-USB side must be "hs-usb-if"
Example (R-Car H3):
@@ -49,7 +56,11 @@ Example (R-Car H3):
compatible = "renesas,r8a7795-rcar-usb2-clock-sel",
"renesas,rcar-gen3-usb2-clock-sel";
reg = <0 0xe6590630 0 0x02>;
- clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>;
- clock-names = "ehci_ohci", "usb_extal", "usb_xtal";
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>,
+ <&usb_extal>, <&usb_xtal>;
+ clock-names = "ehci_ohci", "hs-usb-if", "usb_extal", "usb_xtal";
#clock-cells = <0>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ reset-names = "ehci_ohci", "hs-usb-if";
};
diff --git a/Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml b/Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml
new file mode 100644
index 000000000000..c3930edc410f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/socionext,uniphier-clock.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/socionext,uniphier-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier clock controller
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ compatible:
+ oneOf:
+ - description: System clock
+ enum:
+ - socionext,uniphier-ld4-clock
+ - socionext,uniphier-pro4-clock
+ - socionext,uniphier-sld8-clock
+ - socionext,uniphier-pro5-clock
+ - socionext,uniphier-pxs2-clock
+ - socionext,uniphier-ld6b-clock
+ - socionext,uniphier-ld11-clock
+ - socionext,uniphier-ld20-clock
+ - socionext,uniphier-pxs3-clock
+ - description: Media I/O (MIO) clock, SD clock
+ enum:
+ - socionext,uniphier-ld4-mio-clock
+ - socionext,uniphier-pro4-mio-clock
+ - socionext,uniphier-sld8-mio-clock
+ - socionext,uniphier-pro5-sd-clock
+ - socionext,uniphier-pxs2-sd-clock
+ - socionext,uniphier-ld11-mio-clock
+ - socionext,uniphier-ld20-sd-clock
+ - socionext,uniphier-pxs3-sd-clock
+ - description: Peripheral clock
+ enum:
+ - socionext,uniphier-ld4-peri-clock
+ - socionext,uniphier-pro4-peri-clock
+ - socionext,uniphier-sld8-peri-clock
+ - socionext,uniphier-pro5-peri-clock
+ - socionext,uniphier-pxs2-peri-clock
+ - socionext,uniphier-ld11-peri-clock
+ - socionext,uniphier-ld20-peri-clock
+ - socionext,uniphier-pxs3-peri-clock
+
+ "#clock-cells":
+ const: 1
+
+additionalProperties: false
+
+required:
+ - compatible
+ - "#clock-cells"
+
+examples:
+ - |
+ sysctrl@61840000 {
+ compatible = "socionext,uniphier-sysctrl", "simple-mfd", "syscon";
+ reg = <0x61840000 0x4000>;
+
+ clock {
+ compatible = "socionext,uniphier-ld11-clock";
+ #clock-cells = <1>;
+ };
+
+ // other nodes ...
+ };
+
+ - |
+ mioctrl@59810000 {
+ compatible = "socionext,uniphier-mioctrl", "simple-mfd", "syscon";
+ reg = <0x59810000 0x800>;
+
+ clock {
+ compatible = "socionext,uniphier-ld11-mio-clock";
+ #clock-cells = <1>;
+ };
+
+ // other nodes ...
+ };
+
+ - |
+ perictrl@59820000 {
+ compatible = "socionext,uniphier-perictrl", "simple-mfd", "syscon";
+ reg = <0x59820000 0x200>;
+
+ clock {
+ compatible = "socionext,uniphier-ld11-peri-clock";
+ #clock-cells = <1>;
+ };
+
+ // other nodes ...
+ };
diff --git a/Documentation/devicetree/bindings/clock/sprd.txt b/Documentation/devicetree/bindings/clock/sprd,sc9860-clk.txt
index e9d179e882d9..aaaf02ca2a6a 100644
--- a/Documentation/devicetree/bindings/clock/sprd.txt
+++ b/Documentation/devicetree/bindings/clock/sprd,sc9860-clk.txt
@@ -1,4 +1,4 @@
-Spreadtrum Clock Binding
+Spreadtrum SC9860 Clock Binding
------------------------
Required properties:
diff --git a/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml b/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
new file mode 100644
index 000000000000..bb3a78d8105e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2019 Unisoc Inc.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/clock/sprd,sc9863a-clk.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: SC9863A Clock Control Unit Device Tree Bindings
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ "#clock-cells":
+ const: 1
+
+ compatible :
+ enum:
+ - sprd,sc9863a-ap-clk
+ - sprd,sc9863a-aon-clk
+ - sprd,sc9863a-apahb-gate
+ - sprd,sc9863a-pmu-gate
+ - sprd,sc9863a-aonapb-gate
+ - sprd,sc9863a-pll
+ - sprd,sc9863a-mpll
+ - sprd,sc9863a-rpll
+ - sprd,sc9863a-dpll
+ - sprd,sc9863a-mm-gate
+ - sprd,sc9863a-apapb-gate
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+ description: |
+ The input parent clock(s) phandle for this clock, only list fixed
+ clocks which are declared in devicetree.
+
+ clock-names:
+ minItems: 1
+ maxItems: 4
+ items:
+ - const: ext-26m
+ - const: ext-32k
+ - const: ext-4m
+ - const: rco-100m
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - '#clock-cells'
+
+if:
+ properties:
+ compatible:
+ enum:
+ - sprd,sc9863a-ap-clk
+ - sprd,sc9863a-aon-clk
+then:
+ required:
+ - reg
+
+else:
+ description: |
+ Other SC9863a clock nodes should be the child of a syscon node in
+ which compatible string shoule be:
+ "sprd,sc9863a-glbregs", "syscon", "simple-mfd"
+
+ The 'reg' property for the clock node is also required if there is a sub
+ range of registers for the clocks.
+
+examples:
+ - |
+ ap_clk: clock-controller@21500000 {
+ compatible = "sprd,sc9863a-ap-clk";
+ reg = <0 0x21500000 0 0x1000>;
+ clocks = <&ext_26m>, <&ext_32k>;
+ clock-names = "ext-26m", "ext-32k";
+ #clock-cells = <1>;
+ };
+
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ap_ahb_regs: syscon@20e00000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon", "simple-mfd";
+ reg = <0 0x20e00000 0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x20e00000 0x4000>;
+
+ apahb_gate: apahb-gate@0 {
+ compatible = "sprd,sc9863a-apahb-gate";
+ reg = <0x0 0x1020>;
+ #clock-cells = <1>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml b/Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml
new file mode 100644
index 000000000000..869b18ac88d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti,am654-ehrpwm-tbclk.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/ti,am654-ehrpwm-tbclk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI EHRPWM Time Base Clock
+
+maintainers:
+ - Vignesh Raghavendra <vigneshr@ti.com>
+
+properties:
+ compatible:
+ items:
+ - const: ti,am654-ehrpwm-tbclk
+ - const: syscon
+
+ "#clock-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#clock-cells"
+ - reg
+
+examples:
+ - |
+ ehrpwm_tbclk: syscon@4140 {
+ compatible = "ti,am654-ehrpwm-tbclk", "syscon";
+ reg = <0x4140 0x18>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/uniphier-clock.txt b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
deleted file mode 100644
index 7b5f602765fe..000000000000
--- a/Documentation/devicetree/bindings/clock/uniphier-clock.txt
+++ /dev/null
@@ -1,132 +0,0 @@
-UniPhier clock controller
-
-
-System clock
-------------
-
-Required properties:
-- compatible: should be one of the following:
- "socionext,uniphier-ld4-clock" - for LD4 SoC.
- "socionext,uniphier-pro4-clock" - for Pro4 SoC.
- "socionext,uniphier-sld8-clock" - for sLD8 SoC.
- "socionext,uniphier-pro5-clock" - for Pro5 SoC.
- "socionext,uniphier-pxs2-clock" - for PXs2/LD6b SoC.
- "socionext,uniphier-ld11-clock" - for LD11 SoC.
- "socionext,uniphier-ld20-clock" - for LD20 SoC.
- "socionext,uniphier-pxs3-clock" - for PXs3 SoC
-- #clock-cells: should be 1.
-
-Example:
-
- sysctrl@61840000 {
- compatible = "socionext,uniphier-sysctrl",
- "simple-mfd", "syscon";
- reg = <0x61840000 0x4000>;
-
- clock {
- compatible = "socionext,uniphier-ld11-clock";
- #clock-cells = <1>;
- };
-
- other nodes ...
- };
-
-Provided clocks:
-
- 8: ST DMAC
-12: GIO (Giga bit stream I/O)
-14: USB3 ch0 host
-15: USB3 ch1 host
-16: USB3 ch0 PHY0
-17: USB3 ch0 PHY1
-20: USB3 ch1 PHY0
-21: USB3 ch1 PHY1
-
-
-Media I/O (MIO) clock, SD clock
--------------------------------
-
-Required properties:
-- compatible: should be one of the following:
- "socionext,uniphier-ld4-mio-clock" - for LD4 SoC.
- "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC.
- "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC.
- "socionext,uniphier-pro5-sd-clock" - for Pro5 SoC.
- "socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC.
- "socionext,uniphier-ld11-mio-clock" - for LD11 SoC.
- "socionext,uniphier-ld20-sd-clock" - for LD20 SoC.
- "socionext,uniphier-pxs3-sd-clock" - for PXs3 SoC
-- #clock-cells: should be 1.
-
-Example:
-
- mioctrl@59810000 {
- compatible = "socionext,uniphier-mioctrl",
- "simple-mfd", "syscon";
- reg = <0x59810000 0x800>;
-
- clock {
- compatible = "socionext,uniphier-ld11-mio-clock";
- #clock-cells = <1>;
- };
-
- other nodes ...
- };
-
-Provided clocks:
-
- 0: SD ch0 host
- 1: eMMC host
- 2: SD ch1 host
- 7: MIO DMAC
- 8: USB2 ch0 host
- 9: USB2 ch1 host
-10: USB2 ch2 host
-12: USB2 ch0 PHY
-13: USB2 ch1 PHY
-14: USB2 ch2 PHY
-
-
-Peripheral clock
-----------------
-
-Required properties:
-- compatible: should be one of the following:
- "socionext,uniphier-ld4-peri-clock" - for LD4 SoC.
- "socionext,uniphier-pro4-peri-clock" - for Pro4 SoC.
- "socionext,uniphier-sld8-peri-clock" - for sLD8 SoC.
- "socionext,uniphier-pro5-peri-clock" - for Pro5 SoC.
- "socionext,uniphier-pxs2-peri-clock" - for PXs2/LD6b SoC.
- "socionext,uniphier-ld11-peri-clock" - for LD11 SoC.
- "socionext,uniphier-ld20-peri-clock" - for LD20 SoC.
- "socionext,uniphier-pxs3-peri-clock" - for PXs3 SoC
-- #clock-cells: should be 1.
-
-Example:
-
- perictrl@59820000 {
- compatible = "socionext,uniphier-perictrl",
- "simple-mfd", "syscon";
- reg = <0x59820000 0x200>;
-
- clock {
- compatible = "socionext,uniphier-ld11-peri-clock";
- #clock-cells = <1>;
- };
-
- other nodes ...
- };
-
-Provided clocks:
-
- 0: UART ch0
- 1: UART ch1
- 2: UART ch2
- 3: UART ch3
- 4: I2C ch0
- 5: I2C ch1
- 6: I2C ch2
- 7: I2C ch3
- 8: I2C ch4
- 9: I2C ch5
-10: I2C ch6
diff --git a/Documentation/devicetree/bindings/connector/samsung,usb-connector-11pin.txt b/Documentation/devicetree/bindings/connector/samsung,usb-connector-11pin.txt
index 22256e295a7a..3dd8961154ab 100644
--- a/Documentation/devicetree/bindings/connector/samsung,usb-connector-11pin.txt
+++ b/Documentation/devicetree/bindings/connector/samsung,usb-connector-11pin.txt
@@ -19,7 +19,7 @@ Required nodes:
0: High Speed (HS),
3: Mobile High-Definition Link (MHL), specific to 11-pin Samsung micro-USB.
-[1]: bindings/connector/usb-connector.txt
+[1]: bindings/connector/usb-connector.yaml
Example
-------
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.txt b/Documentation/devicetree/bindings/connector/usb-connector.txt
deleted file mode 100644
index 88578ac1a8a7..000000000000
--- a/Documentation/devicetree/bindings/connector/usb-connector.txt
+++ /dev/null
@@ -1,135 +0,0 @@
-USB Connector
-=============
-
-A USB connector node represents a physical USB connector. It should be
-a child of a USB interface controller.
-
-Required properties:
-- compatible: describes type of the connector, must be one of:
- "usb-a-connector",
- "usb-b-connector",
- "usb-c-connector".
-
-Optional properties:
-- label: symbolic name for the connector,
-- type: size of the connector, should be specified in case of USB-A, USB-B
- non-fullsize connectors: "mini", "micro".
-- self-powered: Set this property if the usb device that has its own power
- source.
-
-Optional properties for usb-b-connector:
-- id-gpios: an input gpio for USB ID pin.
-- vbus-gpios: an input gpio for USB VBUS pin, used to detect presence of
- VBUS 5V.
- see gpio/gpio.txt.
-- vbus-supply: a phandle to the regulator for USB VBUS if needed when host
- mode or dual role mode is supported.
- Particularly, if use an output GPIO to control a VBUS regulator, should
- model it as a regulator.
- see regulator/fixed-regulator.yaml
-- pinctrl-names : a pinctrl state named "default" is optional
-- pinctrl-0 : pin control group
- see pinctrl/pinctrl-bindings.txt
-
-Optional properties for usb-c-connector:
-- power-role: should be one of "source", "sink" or "dual"(DRP) if typec
- connector has power support.
-- try-power-role: preferred power role if "dual"(DRP) can support Try.SNK
- or Try.SRC, should be "sink" for Try.SNK or "source" for Try.SRC.
-- data-role: should be one of "host", "device", "dual"(DRD) if typec
- connector supports USB data.
-
-Required properties for usb-c-connector with power delivery support:
-- source-pdos: An array of u32 with each entry providing supported power
- source data object(PDO), the detailed bit definitions of PDO can be found
- in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.2
- Source_Capabilities Message, the order of each entry(PDO) should follow
- the PD spec chapter 6.4.1. Required for power source and power dual role.
- User can specify the source PDO array via PDO_FIXED/BATT/VAR/PPS_APDO()
- defined in dt-bindings/usb/pd.h.
-- sink-pdos: An array of u32 with each entry providing supported power
- sink data object(PDO), the detailed bit definitions of PDO can be found
- in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.3
- Sink Capabilities Message, the order of each entry(PDO) should follow
- the PD spec chapter 6.4.1. Required for power sink and power dual role.
- User can specify the sink PDO array via PDO_FIXED/BATT/VAR/PPS_APDO() defined
- in dt-bindings/usb/pd.h.
-- op-sink-microwatt: Sink required operating power in microwatt, if source
- can't offer the power, Capability Mismatch is set. Required for power
- sink and power dual role.
-
-Required nodes:
-- any data bus to the connector should be modeled using the OF graph bindings
- specified in bindings/graph.txt, unless the bus is between parent node and
- the connector. Since single connector can have multiple data buses every bus
- has assigned OF graph port number as follows:
- 0: High Speed (HS), present in all connectors,
- 1: Super Speed (SS), present in SS capable connectors,
- 2: Sideband use (SBU), present in USB-C.
-
-Examples
---------
-
-1. Micro-USB connector with HS lines routed via controller (MUIC):
-
-muic-max77843@66 {
- ...
- usb_con: connector {
- compatible = "usb-b-connector";
- label = "micro-USB";
- type = "micro";
- };
-};
-
-2. USB-C connector attached to CC controller (s2mm005), HS lines routed
-to companion PMIC (max77865), SS lines to USB3 PHY and SBU to DisplayPort.
-DisplayPort video lines are routed to the connector via SS mux in USB3 PHY.
-
-ccic: s2mm005@33 {
- ...
- usb_con: connector {
- compatible = "usb-c-connector";
- label = "USB-C";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- usb_con_hs: endpoint {
- remote-endpoint = <&max77865_usbc_hs>;
- };
- };
- port@1 {
- reg = <1>;
- usb_con_ss: endpoint {
- remote-endpoint = <&usbdrd_phy_ss>;
- };
- };
- port@2 {
- reg = <2>;
- usb_con_sbu: endpoint {
- remote-endpoint = <&dp_aux>;
- };
- };
- };
- };
-};
-
-3. USB-C connector attached to a typec port controller(ptn5110), which has
-power delivery support and enables drp.
-
-typec: ptn5110@50 {
- ...
- usb_con: connector {
- compatible = "usb-c-connector";
- label = "USB-C";
- power-role = "dual";
- try-power-role = "sink";
- source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
- sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)
- PDO_VAR(5000, 12000, 2000)>;
- op-sink-microwatt = <10000000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.yaml b/Documentation/devicetree/bindings/connector/usb-connector.yaml
new file mode 100644
index 000000000000..4638d7adb806
--- /dev/null
+++ b/Documentation/devicetree/bindings/connector/usb-connector.yaml
@@ -0,0 +1,206 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/connector/usb-connector.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: USB Connector
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description:
+ A USB connector node represents a physical USB connector. It should be a child
+ of a USB interface controller.
+
+properties:
+ compatible:
+ enum:
+ - usb-a-connector
+ - usb-b-connector
+ - usb-c-connector
+
+ label:
+ description: Symbolic name for the connector.
+
+ type:
+ description: Size of the connector, should be specified in case of
+ non-fullsize 'usb-a-connector' or 'usb-b-connector' compatible
+ connectors.
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/string
+ enum:
+ - mini
+ - micro
+
+ self-powered:
+ description: Set this property if the USB device has its own power source.
+ type: boolean
+
+ # The following are optional properties for "usb-b-connector".
+ id-gpios:
+ description: An input gpio for USB ID pin.
+ maxItems: 1
+
+ vbus-gpios:
+ description: An input gpio for USB VBus pin, used to detect presence of
+ VBUS 5V.
+ maxItems: 1
+
+ vbus-supply:
+ description: A phandle to the regulator for USB VBUS if needed when host
+ mode or dual role mode is supported.
+ Particularly, if use an output GPIO to control a VBUS regulator, should
+ model it as a regulator. See bindings/regulator/fixed-regulator.yaml
+
+ # The following are optional properties for "usb-c-connector".
+ power-role:
+ description: Determines the power role that the Type C connector will
+ support. "dual" refers to Dual Role Port (DRP).
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/string
+ enum:
+ - source
+ - sink
+ - dual
+
+ try-power-role:
+ description: Preferred power role.
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/string
+ enum:
+ - source
+ - sink
+ - dual
+
+ data-role:
+ description: Data role if Type C connector supports USB data. "dual" refers
+ Dual Role Device (DRD).
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/string
+ enum:
+ - host
+ - device
+ - dual
+
+ # The following are optional properties for "usb-c-connector" with power
+ # delivery support.
+ source-pdos:
+ description: An array of u32 with each entry providing supported power
+ source data object(PDO), the detailed bit definitions of PDO can be found
+ in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.2
+ Source_Capabilities Message, the order of each entry(PDO) should follow
+ the PD spec chapter 6.4.1. Required for power source and power dual role.
+ User can specify the source PDO array via PDO_FIXED/BATT/VAR/PPS_APDO()
+ defined in dt-bindings/usb/pd.h.
+ minItems: 1
+ maxItems: 7
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+
+ sink-pdos:
+ description: An array of u32 with each entry providing supported power sink
+ data object(PDO), the detailed bit definitions of PDO can be found in
+ "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.3
+ Sink Capabilities Message, the order of each entry(PDO) should follow the
+ PD spec chapter 6.4.1. Required for power sink and power dual role. User
+ can specify the sink PDO array via PDO_FIXED/BATT/VAR/PPS_APDO() defined
+ in dt-bindings/usb/pd.h.
+ minItems: 1
+ maxItems: 7
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+
+ op-sink-microwatt:
+ description: Sink required operating power in microwatt, if source can't
+ offer the power, Capability Mismatch is set. Required for power sink and
+ power dual role.
+
+ ports:
+ description: OF graph bindings (specified in bindings/graph.txt) that model
+ any data bus to the connector unless the bus is between parent node and
+ the connector. Since a single connector can have multiple data buses every
+ bus has an assigned OF graph port number as described below.
+ type: object
+ properties:
+ port@0:
+ type: object
+ description: High Speed (HS), present in all connectors.
+
+ port@1:
+ type: object
+ description: Super Speed (SS), present in SS capable connectors.
+
+ port@2:
+ type: object
+ description: Sideband Use (SBU), present in USB-C. This describes the
+ alternate mode connection of which SBU is a part.
+
+ required:
+ - port@0
+
+required:
+ - compatible
+
+examples:
+ # Micro-USB connector with HS lines routed via controller (MUIC).
+ - |+
+ muic-max77843 {
+ usb_con1: connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
+ };
+
+ # USB-C connector attached to CC controller (s2mm005), HS lines routed
+ # to companion PMIC (max77865), SS lines to USB3 PHY and SBU to DisplayPort.
+ # DisplayPort video lines are routed to the connector via SS mux in USB3 PHY.
+ - |+
+ ccic: s2mm005 {
+ usb_con2: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ usb_con_hs: endpoint {
+ remote-endpoint = <&max77865_usbc_hs>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ usb_con_ss: endpoint {
+ remote-endpoint = <&usbdrd_phy_ss>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ usb_con_sbu: endpoint {
+ remote-endpoint = <&dp_aux>;
+ };
+ };
+ };
+ };
+ };
+
+ # USB-C connector attached to a typec port controller(ptn5110), which has
+ # power delivery support and enables drp.
+ - |+
+ #include <dt-bindings/usb/pd.h>
+ typec: ptn5110 {
+ usb_con3: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 12000, 2000)>;
+ op-sink-microwatt = <10000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/crypto/fsl-dcp.txt b/Documentation/devicetree/bindings/crypto/fsl-dcp.txt
index 4e4d387e38a5..513499fcdb5b 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-dcp.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-dcp.txt
@@ -11,7 +11,7 @@ Required properties:
Example:
-dcp@80028000 {
+dcp: crypto@80028000 {
compatible = "fsl,imx28-dcp", "fsl,imx23-dcp";
reg = <0x80028000 0x2000>;
interrupts = <52 53>;
diff --git a/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt b/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
index e8a35c71e947..db690b10e582 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
@@ -8,7 +8,7 @@ Required properties:
Example:
-sah@10025000 {
+sah: crypto@10025000 {
compatible = "fsl,imx27-sahara";
reg = < 0x10025000 0x800>;
interrupts = <75>;
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index 2fe245ca816a..8f359f473ada 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -138,7 +138,7 @@ iMX6QDL/SX requires four clocks
iMX6UL does only require three clocks
- crypto: caam@2140000 {
+ crypto: crypto@2140000 {
compatible = "fsl,sec-v4.0";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.yaml b/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.yaml
new file mode 100644
index 000000000000..55dd6e3d270d
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/xlnx,zynqmp-aes.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx ZynqMP AES-GCM Hardware Accelerator Device Tree Bindings
+
+maintainers:
+ - Kalyani Akula <kalyani.akula@xilinx.com>
+ - Michal Simek <michal.simek@xilinx.com>
+
+description: |
+ The ZynqMP AES-GCM hardened cryptographic accelerator is used to
+ encrypt or decrypt the data with provided key and initialization vector.
+
+properties:
+ compatible:
+ const: xlnx,zynqmp-aes
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+ xlnx_aes: zynqmp-aes {
+ compatible = "xlnx,zynqmp-aes";
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
index 5ff9cf26ca38..e5344c4ae226 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -36,6 +36,12 @@ properties:
- items:
- enum:
+ - allwinner,sun7i-a20-tcon0
+ - allwinner,sun7i-a20-tcon1
+ - const: allwinner,sun7i-a20-tcon
+
+ - items:
+ - enum:
- allwinner,sun50i-a64-tcon-lcd
- const: allwinner,sun8i-a83t-tcon-lcd
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
index d1205a6697a0..a8d202c9d004 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -71,9 +71,9 @@ properties:
maxItems: 2
reg-names:
- items:
- - const: vpu
- - const: hhi
+ items:
+ - const: vpu
+ - const: hhi
interrupts:
maxItems: 1
@@ -107,6 +107,8 @@ required:
- "#address-cells"
- "#size-cells"
+additionalProperties: false
+
examples:
- |
vpu: vpu@d0100000 {
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 2c887536258c..e8ddec5d9d91 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -1,10 +1,10 @@
-Analog Device ADV7511(W)/13/33 HDMI Encoders
+Analog Device ADV7511(W)/13/33/35 HDMI Encoders
-----------------------------------------
-The ADV7511, ADV7511W, ADV7513 and ADV7533 are HDMI audio and video transmitters
-compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
-S/PDIF, CEC and HDCP. ADV7533 supports the DSI interface for input pixels, while
-the others support RGB interface.
+The ADV7511, ADV7511W, ADV7513, ADV7533 and ADV7535 are HDMI audio and video
+transmitters compatible with HDMI 1.4 and DVI 1.0. They support color space
+conversion, S/PDIF, CEC and HDCP. ADV7533/5 supports the DSI interface for input
+pixels, while the others support RGB interface.
Required properties:
@@ -13,6 +13,7 @@ Required properties:
"adi,adv7511w"
"adi,adv7513"
"adi,adv7533"
+ "adi,adv7535"
- reg: I2C slave addresses
The ADV7511 internal registers are split into four pages exposed through
@@ -52,14 +53,14 @@ The following input format properties are required except in "rgb 1x" and
- bgvdd-supply: A 1.8V supply that powers up the BGVDD pin. This is
needed only for ADV7511.
-The following properties are required for ADV7533:
+The following properties are required for ADV7533 and ADV7535:
- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
be one of 1, 2, 3 or 4.
- a2vdd-supply: 1.8V supply that powers up the A2VDD pin on the chip.
- v3p3-supply: A 3.3V supply that powers up the V3P3 pin on the chip.
- v1p2-supply: A supply that powers up the V1P2 pin on the chip. It can be
- either 1.2V or 1.8V.
+ either 1.2V or 1.8V for ADV7533 but only 1.8V for ADV7535.
Optional properties:
@@ -71,9 +72,9 @@ Optional properties:
- adi,embedded-sync: The input uses synchronization signals embedded in the
data stream (similar to BT.656). Defaults to separate H/V synchronization
signals.
-- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
- generator. The chip will rely on the sync signals in the DSI data lanes,
- rather than generate its own timings for HDMI output.
+- adi,disable-timing-generator: Only for ADV7533 and ADV7535. Disables the
+ internal timing generator. The chip will rely on the sync signals in the
+ DSI data lanes, rather than generate its own timings for HDMI output.
- clocks: from common clock binding: reference to the CEC clock.
- clock-names: from common clock binding: must be "cec".
- reg-names : Names of maps with programmable addresses.
@@ -85,7 +86,7 @@ Required nodes:
The ADV7511 has two video ports. Their connections are modelled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533, the
+- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533/5, the
remote endpoint phandle should be a reference to a valid mipi_dsi_host device
node.
- Video port 1 for the HDMI output
diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
new file mode 100644
index 000000000000..5dff93641bea
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ps8640.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MIPI DSI to eDP Video Format Converter Device Tree Bindings
+
+maintainers:
+ - Nicolas Boichat <drinkcat@chromium.org>
+ - Enric Balletbo i Serra <enric.balletbo@collabora.com>
+
+description: |
+ The PS8640 is a low power MIPI-to-eDP video format converter supporting
+ mobile devices with embedded panel resolutions up to 2048 x 1536. The
+ device accepts a single channel of MIPI DSI v1.1, with up to four lanes
+ plus clock, at a transmission rate up to 1.5Gbit/sec per lane. The
+ device outputs eDP v1.4, one or two lanes, at a link rate of up to
+ 3.24Gbit/sec per lane.
+
+properties:
+ compatible:
+ const: parade,ps8640
+
+ reg:
+ maxItems: 1
+ description: Base I2C address of the device.
+
+ powerdown-gpios:
+ maxItems: 1
+ description: GPIO connected to active low powerdown.
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO connected to active low reset.
+
+ vdd12-supply:
+ maxItems: 1
+ description: Regulator for 1.2V digital core power.
+
+ vdd33-supply:
+ maxItems: 1
+ description: Regulator for 3.3V digital core power.
+
+ ports:
+ type: object
+ description:
+ A node containing DSI input & output port nodes with endpoint
+ definitions as documented in
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+ Documentation/devicetree/bindings/graph.txt
+ properties:
+ port@0:
+ type: object
+ description: |
+ Video port for DSI input
+
+ port@1:
+ type: object
+ description: |
+ Video port for eDP output (panel or connector).
+
+ required:
+ - port@0
+
+required:
+ - compatible
+ - reg
+ - powerdown-gpios
+ - reset-gpios
+ - vdd12-supply
+ - vdd33-supply
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ps8640: edp-bridge@18 {
+ compatible = "parade,ps8640";
+ reg = <0x18>;
+ powerdown-gpios = <&pio 116 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&pio 115 GPIO_ACTIVE_LOW>;
+ vdd12-supply = <&ps8640_fixed_1v2>;
+ vdd33-supply = <&mt6397_vgp2_reg>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ps8640_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ ps8640_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
new file mode 100644
index 000000000000..c036a75db8f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/toshiba,tc358768.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toschiba TC358768/TC358778 Parallel RGB to MIPI DSI bridge
+
+maintainers:
+ - Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+description: |
+ The TC358768/TC358778 is bridge device which converts RGB to DSI.
+
+properties:
+ compatible:
+ enum:
+ - toshiba,tc358768
+ - toshiba,tc358778
+
+ reg:
+ maxItems: 1
+ description: base I2C address of the device
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO connected to active low RESX pin
+
+ vddc-supply:
+ description: Regulator for 1.2V internal core power.
+
+ vddmipi-supply:
+ description: Regulator for 1.2V for the MIPI.
+
+ vddio-supply:
+ description: Regulator for 1.8V - 3.3V IO power.
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: refclk
+
+ ports:
+ type: object
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ port@0:
+ type: object
+ additionalProperties: false
+
+ description: |
+ Video port for RGB input
+
+ properties:
+ reg:
+ const: 0
+
+ patternProperties:
+ endpoint:
+ type: object
+ additionalProperties: false
+
+ properties:
+ data-lines:
+ enum: [ 16, 18, 24 ]
+
+ remote-endpoint: true
+
+ required:
+ - reg
+
+ port@1:
+ type: object
+ additionalProperties: false
+
+ description: |
+ Video port for DSI output (panel or connector).
+
+ properties:
+ reg:
+ const: 1
+
+ patternProperties:
+ endpoint:
+ type: object
+ additionalProperties: false
+
+ properties:
+ remote-endpoint: true
+
+ required:
+ - reg
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - vddc-supply
+ - vddmipi-supply
+ - vddio-supply
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dsi_bridge: dsi-bridge@e {
+ compatible = "toshiba,tc358768";
+ reg = <0xe>;
+
+ clocks = <&tc358768_refclk>;
+ clock-names = "refclk";
+
+ reset-gpios = <&pcf_display_board 0 GPIO_ACTIVE_LOW>;
+
+ vddc-supply = <&v1_2d>;
+ vddmipi-supply = <&v1_2d>;
+ vddio-supply = <&v3_3d>;
+
+ dsi_bridge_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ rgb_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ data-lines = <24>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi_out: endpoint {
+ remote-endpoint = <&lcd_in>;
+ };
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
deleted file mode 100644
index 8def11b16a24..000000000000
--- a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Vivante GPU core devices
-========================
-
-Required properties:
-- compatible: Should be "vivante,gc"
- A more specific compatible is not needed, as the cores contain chip
- identification registers at fixed locations, which provide all the
- necessary information to the driver.
-- reg: should be register base and length as documented in the
- datasheet
-- interrupts: Should contain the cores interrupt line
-- clocks: should contain one clock for entry in clock-names
- see Documentation/devicetree/bindings/clock/clock-bindings.txt
-- clock-names:
- - "bus": AXI/master interface clock
- - "reg": AHB/slave interface clock
- (only required if GPU can gate slave interface independently)
- - "core": GPU core clock
- - "shader": Shader clock (only required if GPU has feature PIPE_3D)
-
-Optional properties:
-- power-domains: a power domain consumer specifier according to
- Documentation/devicetree/bindings/power/power_domain.txt
-
-example:
-
-gpu_3d: gpu@130000 {
- compatible = "vivante,gc";
- reg = <0x00130000 0x4000>;
- interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6QDL_CLK_GPU3D_AXI>,
- <&clks IMX6QDL_CLK_GPU3D_CORE>,
- <&clks IMX6QDL_CLK_GPU3D_SHADER>;
- clock-names = "bus", "core", "shader";
- power-domains = <&gpc 1>;
-};
diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml b/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
new file mode 100644
index 000000000000..66e93e563653
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/ilitek,ili9486.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI9486 display panels device tree bindings
+
+maintainers:
+ - Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
+
+description:
+ This binding is for display panels using an Ilitek ILI9486 controller in SPI
+ mode.
+
+allOf:
+ - $ref: panel/panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ # Waveshare 3.5" 320x480 Color TFT LCD
+ - waveshare,rpi-lcd-35
+ # Ozzmaker 3.5" 320x480 Color TFT LCD
+ - ozzmaker,piscreen
+ - const: ilitek,ili9486
+
+ spi-max-frequency:
+ maximum: 32000000
+
+ dc-gpios:
+ maxItems: 1
+ description: Display data/command selection (D/CX)
+
+ backlight: true
+ reg: true
+ reset-gpios: true
+ rotation: true
+
+required:
+ - compatible
+ - reg
+ - dc-gpios
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ backlight: backlight {
+ compatible = "gpio-backlight";
+ gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+ };
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+
+ display@0{
+ compatible = "waveshare,rpi-lcd-35", "ilitek,ili9486";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio0 24 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>;
+ rotation = <180>;
+ backlight = <&backlight>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
index b6a7e7397b8b..58914cf681b8 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
@@ -7,6 +7,7 @@ output bus.
Required properties:
- compatible: "mediatek,<chip>-dpi"
+ the supported chips are mt2701 , mt8173 and mt8183.
- reg: Physical base address and length of the controller's registers
- interrupts: The interrupt signal from the function block.
- clocks: device clocks
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt
deleted file mode 100644
index bf9c7a2a495c..000000000000
--- a/Documentation/devicetree/bindings/display/msm/gmu.txt
+++ /dev/null
@@ -1,116 +0,0 @@
-Qualcomm adreno/snapdragon GMU (Graphics management unit)
-
-The GMU is a programmable power controller for the GPU. the CPU controls the
-GMU which in turn handles power controls for the GPU.
-
-Required properties:
-- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu"
- for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu"
- Note that you need to list the less specific "qcom,adreno-gmu"
- for generic matches and the more specific identifier to identify
- the specific device.
-- reg: Physical base address and length of the GMU registers.
-- reg-names: Matching names for the register regions
- * "gmu"
- * "gmu_pdc"
- * "gmu_pdc_seg"
-- interrupts: The interrupt signals from the GMU.
-- interrupt-names: Matching names for the interrupts
- * "hfi"
- * "gmu"
-- clocks: phandles to the device clocks
-- clock-names: Matching names for the clocks
- * "gmu"
- * "cxo"
- * "axi"
- * "mnoc"
-- power-domains: should be:
- <&clock_gpucc GPU_CX_GDSC>
- <&clock_gpucc GPU_GX_GDSC>
-- power-domain-names: Matching names for the power domains
-- iommus: phandle to the adreno iommu
-- operating-points-v2: phandle to the OPP operating points
-
-Optional properties:
-- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon
- SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
-
-Example:
-
-/ {
- ...
-
- gmu: gmu@506a000 {
- compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
-
- reg = <0x506a000 0x30000>,
- <0xb280000 0x10000>,
- <0xb480000 0x10000>;
- reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
-
- interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hfi", "gmu";
-
- clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
- <&gpucc GPU_CC_CXO_CLK>,
- <&gcc GCC_DDRSS_GPU_AXI_CLK>,
- <&gcc GCC_GPU_MEMNOC_GFX_CLK>;
- clock-names = "gmu", "cxo", "axi", "memnoc";
-
- power-domains = <&gpucc GPU_CX_GDSC>,
- <&gpucc GPU_GX_GDSC>;
- power-domain-names = "cx", "gx";
-
- iommus = <&adreno_smmu 5>;
-
- operating-points-v2 = <&gmu_opp_table>;
- };
-};
-
-a3xx example with OCMEM support:
-
-/ {
- ...
-
- gpu: adreno@fdb00000 {
- compatible = "qcom,adreno-330.2",
- "qcom,adreno";
- reg = <0xfdb00000 0x10000>;
- reg-names = "kgsl_3d0_reg_memory";
- interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "kgsl_3d0_irq";
- clock-names = "core",
- "iface",
- "mem_iface";
- clocks = <&mmcc OXILI_GFX3D_CLK>,
- <&mmcc OXILICX_AHB_CLK>,
- <&mmcc OXILICX_AXI_CLK>;
- sram = <&gmu_sram>;
- power-domains = <&mmcc OXILICX_GDSC>;
- operating-points-v2 = <&gpu_opp_table>;
- iommus = <&gpu_iommu 0>;
- };
-
- ocmem@fdd00000 {
- compatible = "qcom,msm8974-ocmem";
-
- reg = <0xfdd00000 0x2000>,
- <0xfec00000 0x180000>;
- reg-names = "ctrl",
- "mem";
-
- clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
- <&mmcc OCMEMCX_OCMEMNOC_CLK>;
- clock-names = "core",
- "iface";
-
- #address-cells = <1>;
- #size-cells = <1>;
-
- gmu_sram: gmu-sram@0 {
- reg = <0x0 0x100000>;
- ranges = <0 0 0xfec00000 0x100000>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml
new file mode 100644
index 000000000000..0b8736a9384e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright 2019-2020, The Linux Foundation, All Rights Reserved
+%YAML 1.2
+---
+
+$id: "http://devicetree.org/schemas/display/msm/gmu.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Devicetree bindings for the GMU attached to certain Adreno GPUs
+
+maintainers:
+ - Rob Clark <robdclark@gmail.com>
+
+description: |
+ These bindings describe the Graphics Management Unit (GMU) that is attached
+ to members of the Adreno A6xx GPU family. The GMU provides on-device power
+ management and support to improve power efficiency and reduce the load on
+ the CPU.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,adreno-gmu-630.2
+ - const: qcom,adreno-gmu
+
+ reg:
+ items:
+ - description: Core GMU registers
+ - description: GMU PDC registers
+ - description: GMU PDC sequence registers
+
+ reg-names:
+ items:
+ - const: gmu
+ - const: gmu_pdc
+ - const: gmu_pdc_seq
+
+ clocks:
+ items:
+ - description: GMU clock
+ - description: GPU CX clock
+ - description: GPU AXI clock
+ - description: GPU MEMNOC clock
+
+ clock-names:
+ items:
+ - const: gmu
+ - const: cxo
+ - const: axi
+ - const: memnoc
+
+ interrupts:
+ items:
+ - description: GMU HFI interrupt
+ - description: GMU interrupt
+
+
+ interrupt-names:
+ items:
+ - const: hfi
+ - const: gmu
+
+ power-domains:
+ items:
+ - description: CX power domain
+ - description: GX power domain
+
+ power-domain-names:
+ items:
+ - const: cx
+ - const: gx
+
+ iommus:
+ maxItems: 1
+
+ operating-points-v2: true
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - power-domains
+ - power-domain-names
+ - iommus
+ - operating-points-v2
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ gmu: gmu@506a000 {
+ compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
+
+ reg = <0x506a000 0x30000>,
+ <0xb280000 0x10000>,
+ <0xb480000 0x10000>;
+ reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
+
+ clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>,
+ <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>;
+ clock-names = "gmu", "cxo", "axi", "memnoc";
+
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hfi", "gmu";
+
+ power-domains = <&gpucc GPU_CX_GDSC>,
+ <&gpucc GPU_GX_GDSC>;
+ power-domain-names = "cx", "gx";
+
+ iommus = <&adreno_smmu 5>;
+ operating-points-v2 = <&gmu_opp_table>;
+ };
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index 7edc298a15f2..fd779cd6994d 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -35,25 +35,54 @@ Required properties:
bring the GPU out of secure mode.
- firmware-name: optional property of the 'zap-shader' node, listing the
relative path of the device specific zap firmware.
+- sram: phandle to the On Chip Memory (OCMEM) that's present on some a3xx and
+ a4xx Snapdragon SoCs. See
+ Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
-Example 3xx/4xx/a5xx:
+Example 3xx/4xx:
/ {
...
- gpu: qcom,kgsl-3d0@4300000 {
- compatible = "qcom,adreno-320.2", "qcom,adreno";
- reg = <0x04300000 0x20000>;
+ gpu: adreno@fdb00000 {
+ compatible = "qcom,adreno-330.2",
+ "qcom,adreno";
+ reg = <0xfdb00000 0x10000>;
reg-names = "kgsl_3d0_reg_memory";
- interrupts = <GIC_SPI 80 0>;
- clock-names =
- "core",
- "iface",
- "mem_iface";
- clocks =
- <&mmcc GFX3D_CLK>,
- <&mmcc GFX3D_AHB_CLK>,
- <&mmcc MMSS_IMEM_AHB_CLK>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names = "core",
+ "iface",
+ "mem_iface";
+ clocks = <&mmcc OXILI_GFX3D_CLK>,
+ <&mmcc OXILICX_AHB_CLK>,
+ <&mmcc OXILICX_AXI_CLK>;
+ sram = <&gpu_sram>;
+ power-domains = <&mmcc OXILICX_GDSC>;
+ operating-points-v2 = <&gpu_opp_table>;
+ iommus = <&gpu_iommu 0>;
+ };
+
+ gpu_sram: ocmem@fdd00000 {
+ compatible = "qcom,msm8974-ocmem";
+
+ reg = <0xfdd00000 0x2000>,
+ <0xfec00000 0x180000>;
+ reg-names = "ctrl",
+ "mem";
+
+ clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
+ <&mmcc OCMEMCX_OCMEMNOC_CLK>;
+ clock-names = "core",
+ "iface";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ gpu_sram: gpu-sram@0 {
+ reg = <0x0 0x100000>;
+ ranges = <0 0 0xfec00000 0x100000>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
new file mode 100644
index 000000000000..93878c2cd370
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/advantech,idk-1110wr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Advantech IDK-1110WR 10.1" WSVGA LVDS Display Panel
+
+maintainers:
+ - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: lvds.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: advantech,idk-1110wr
+ - {} # panel-lvds, but not listed here to avoid false select
+
+ data-mapping:
+ const: jeida-24
+
+ width-mm:
+ const: 223
+
+ height-mm:
+ const: 125
+
+ panel-timing: true
+ port: true
+
+additionalProperties: false
+
+required:
+ - compatible
+
+examples:
+ - |+
+ panel {
+ compatible = "advantech,idk-1110wr", "panel-lvds";
+
+ width-mm = <223>;
+ height-mm = <125>;
+
+ data-mapping = "jeida-24";
+
+ panel-timing {
+ /* 1024x600 @60Hz */
+ clock-frequency = <51200000>;
+ hactive = <1024>;
+ vactive = <600>;
+ hsync-len = <240>;
+ hfront-porch = <40>;
+ hback-porch = <40>;
+ vsync-len = <10>;
+ vfront-porch = <15>;
+ vback-porch = <10>;
+ };
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds_encoder>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
new file mode 100644
index 000000000000..6b7fddc80c41
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/advantech,idk-2121wr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Advantech IDK-2121WR 21.5" Full-HD dual-LVDS panel
+
+maintainers:
+ - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+description: |
+ The IDK-2121WR from Advantech is a Full-HD dual-LVDS panel.
+ A dual-LVDS interface is a dual-link connection with even pixels traveling
+ on one link, and with odd pixels traveling on the other link.
+
+ The panel expects odd pixels on the first port, and even pixels on the
+ second port, therefore the ports must be marked accordingly (with either
+ dual-lvds-odd-pixels or dual-lvds-even-pixels).
+
+properties:
+ compatible:
+ items:
+ - const: advantech,idk-2121wr
+ - {} # panel-lvds, but not listed here to avoid false select
+
+ width-mm:
+ const: 476
+
+ height-mm:
+ const: 268
+
+ data-mapping:
+ const: vesa-24
+
+ panel-timing: true
+
+ ports:
+ type: object
+ properties:
+ port@0:
+ type: object
+ description: The sink for odd pixels.
+ properties:
+ reg:
+ const: 0
+
+ dual-lvds-odd-pixels: true
+
+ required:
+ - reg
+ - dual-lvds-odd-pixels
+
+ port@1:
+ type: object
+ description: The sink for even pixels.
+ properties:
+ reg:
+ const: 1
+
+ dual-lvds-even-pixels: true
+
+ required:
+ - reg
+ - dual-lvds-even-pixels
+
+additionalProperties: false
+
+required:
+ - compatible
+ - width-mm
+ - height-mm
+ - data-mapping
+ - panel-timing
+ - ports
+
+examples:
+ - |+
+ panel-lvds {
+ compatible = "advantech,idk-2121wr", "panel-lvds";
+
+ width-mm = <476>;
+ height-mm = <268>;
+
+ data-mapping = "vesa-24";
+
+ panel-timing {
+ clock-frequency = <148500000>;
+ hactive = <1920>;
+ vactive = <1080>;
+ hsync-len = <44>;
+ hfront-porch = <88>;
+ hback-porch = <148>;
+ vfront-porch = <4>;
+ vback-porch = <36>;
+ vsync-len = <5>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dual-lvds-odd-pixels;
+ panel_in0: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dual-lvds-even-pixels;
+ panel_in1: endpoint {
+ remote-endpoint = <&lvds1_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt b/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt
deleted file mode 100644
index bae0e2b51467..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 8.0" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101ean01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt b/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt
deleted file mode 100644
index 72e088a4fb3a..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101aw03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt b/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt
deleted file mode 100644
index 3590b0741619..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101ean01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt
deleted file mode 100644
index 889d511d66c9..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101xtn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt b/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt
deleted file mode 100644
index 690d0a568ef3..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
-
-Required properties:
-- compatible: should be "auo,b116xw03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt
deleted file mode 100644
index 302226b5bb55..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
-
-Required properties:
-- compatible: should be "auo,b133htn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt
deleted file mode 100644
index 7443b7c76769..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b133xtn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
deleted file mode 100644
index 49e4105378f6..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g070vvn01"
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: single regulator to provide the supply voltage
-
-Required nodes:
-- port: Parallel port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX6Q based board
-
- lcd_panel: lcd-panel {
- compatible = "auo,g070vvn01";
- backlight = <&backlight_lcd>;
- power-supply = <&reg_display>;
-
- port {
- lcd_panel_in: endpoint {
- remote-endpoint = <&lcd_display_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt b/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt
deleted file mode 100644
index bc6a0c858e23..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g101evn010"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt b/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt
deleted file mode 100644
index 85626edf63e5..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g104sn02"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt b/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt
deleted file mode 100644
index 3afc76747824..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g133han01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt b/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt
deleted file mode 100644
index ed657c2141d4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g185han01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt b/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt
deleted file mode 100644
index 59bb6cd8aa75..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,p320hvn03"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt
deleted file mode 100644
index cbd9da3f03b1..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,t215hvn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt b/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt
deleted file mode 100644
index b6f2f3e8f44e..000000000000
--- a/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "avic,tm070ddh03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
deleted file mode 100644
index 55183d360032..000000000000
--- a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,hv070wsa-100"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-- enable-gpios: GPIO pin to enable and disable panel (active high)
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [1]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
- panel: panel {
- compatible = "boe,hv070wsa-100";
- power-supply = <&vcc_3v3_reg>;
- enable-gpios = <&gpd1 3 GPIO_ACTIVE_HIGH>;
- port {
- panel_ep: endpoint {
- remote-endpoint = <&bridge_out_ep>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt b/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt
deleted file mode 100644
index b258d6a91ec6..000000000000
--- a/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,nv101wxmn51"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
deleted file mode 100644
index 50be5e2438b2..000000000000
--- a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Boe Corporation 8.0" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,tv080wum-nl0"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
new file mode 100644
index 000000000000..740213459134
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/boe,tv101wum-nl6.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BOE TV101WUM-NL6 DSI Display Panel
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Sam Ravnborg <sam@ravnborg.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ # BOE TV101WUM-NL6 10.1" WUXGA TFT LCD panel
+ - boe,tv101wum-nl6
+ # AUO KD101N80-45NA 10.1" WUXGA TFT LCD panel
+ - auo,kd101n80-45na
+ # BOE TV101WUM-N53 10.1" WUXGA TFT LCD panel
+ - boe,tv101wum-n53
+ # AUO B101UAN08.3 10.1" WUXGA TFT LCD panel
+ - auo,b101uan08.3
+
+ reg:
+ description: the virtual channel number of a DSI peripheral
+
+ enable-gpios:
+ description: a GPIO spec for the enable pin
+
+ pp1800-supply:
+ description: core voltage supply
+
+ avdd-supply:
+ description: phandle of the regulator that provides positive voltage
+
+ avee-supply:
+ description: phandle of the regulator that provides negative voltage
+
+ backlight:
+ description: phandle of the backlight device attached to the panel
+
+ port: true
+
+required:
+ - compatible
+ - reg
+ - enable-gpios
+ - pp1800-supply
+ - avdd-supply
+ - avee-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "boe,tv101wum-nl6";
+ reg = <0>;
+ enable-gpios = <&pio 45 0>;
+ avdd-supply = <&ppvarn_lcd>;
+ avee-supply = <&ppvarp_lcd>;
+ pp1800-supply = <&pp1800_lcd>;
+ backlight = <&backlight_lcd0>;
+ status = "okay";
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
deleted file mode 100644
index 057f7f3f6dbe..000000000000
--- a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "cdtech,s043wq26h-ct7"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
deleted file mode 100644
index 505615dfa0df..000000000000
--- a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "cdtech,s070wv95-ct16"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt
deleted file mode 100644
index dd22685d2adc..000000000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa070wp03xg"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt
deleted file mode 100644
index f24614e4d5ec..000000000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa101wa01a"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt
deleted file mode 100644
index 0ab2c05a4c22..000000000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa101wb03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
deleted file mode 100644
index 897085ee3cd4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
-
-Required properties:
-- compatible: should be "dataimage,scf0700c48ggu18"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index 78222ced1874..7f55ad4a40c4 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -1,123 +1 @@
-display-timing bindings
-=======================
-
-display-timings node
---------------------
-
-required properties:
- - none
-
-optional properties:
- - native-mode: The native mode for the display, in case multiple modes are
- provided. When omitted, assume the first node is the native.
-
-timing subnode
---------------
-
-required properties:
- - hactive, vactive: display resolution
- - hfront-porch, hback-porch, hsync-len: horizontal display timing parameters
- in pixels
- vfront-porch, vback-porch, vsync-len: vertical display timing parameters in
- lines
- - clock-frequency: display clock in Hz
-
-optional properties:
- - hsync-active: hsync pulse is active low/high/ignored
- - vsync-active: vsync pulse is active low/high/ignored
- - de-active: data-enable pulse is active low/high/ignored
- - pixelclk-active: with
- - active high = drive pixel data on rising edge/
- sample data on falling edge
- - active low = drive pixel data on falling edge/
- sample data on rising edge
- - ignored = ignored
- - syncclk-active: with
- - active high = drive sync on rising edge/
- sample sync on falling edge of pixel
- clock
- - active low = drive sync on falling edge/
- sample sync on rising edge of pixel
- clock
- - omitted = same configuration as pixelclk-active
- - interlaced (bool): boolean to enable interlaced mode
- - doublescan (bool): boolean to enable doublescan mode
- - doubleclk (bool): boolean to enable doubleclock mode
-
-All the optional properties that are not bool follow the following logic:
- <1>: high active
- <0>: low active
- omitted: not used on hardware
-
-There are different ways of describing the capabilities of a display. The
-devicetree representation corresponds to the one commonly found in datasheets
-for displays. If a display supports multiple signal timings, the native-mode
-can be specified.
-
-The parameters are defined as:
-
- +----------+-------------------------------------+----------+-------+
- | | ^ | | |
- | | |vback_porch | | |
- | | v | | |
- +----------#######################################----------+-------+
- | # ^ # | |
- | # | # | |
- | hback # | # hfront | hsync |
- | porch # | hactive # porch | len |
- |<-------->#<-------+--------------------------->#<-------->|<----->|
- | # | # | |
- | # |vactive # | |
- | # | # | |
- | # v # | |
- +----------#######################################----------+-------+
- | | ^ | | |
- | | |vfront_porch | | |
- | | v | | |
- +----------+-------------------------------------+----------+-------+
- | | ^ | | |
- | | |vsync_len | | |
- | | v | | |
- +----------+-------------------------------------+----------+-------+
-
-Note: In addition to being used as subnode(s) of display-timings, the timing
- subnode may also be used on its own. This is appropriate if only one mode
- need be conveyed. In this case, the node should be named 'panel-timing'.
-
-
-Example:
-
- display-timings {
- native-mode = <&timing0>;
- timing0: 1080p24 {
- /* 1920x1080p24 */
- clock-frequency = <52000000>;
- hactive = <1920>;
- vactive = <1080>;
- hfront-porch = <25>;
- hback-porch = <25>;
- hsync-len = <25>;
- vback-porch = <2>;
- vfront-porch = <2>;
- vsync-len = <2>;
- hsync-active = <1>;
- };
- };
-
-Every required property also supports the use of ranges, so the commonly used
-datasheet description with minimum, typical and maximum values can be used.
-
-Example:
-
- timing1: timing {
- /* 1920x1080p24 */
- clock-frequency = <148500000>;
- hactive = <1920>;
- vactive = <1080>;
- hsync-len = <0 44 60>;
- hfront-porch = <80 88 95>;
- hback-porch = <100 148 160>;
- vfront-porch = <0 4 6>;
- vback-porch = <0 36 50>;
- vsync-len = <0 5 6>;
- };
+See display-timings.yaml in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/display-timings.yaml b/Documentation/devicetree/bindings/display/panel/display-timings.yaml
new file mode 100644
index 000000000000..c8c0c9cb0492
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/display-timings.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/display-timings.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: display timing bindings
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+ - Sam Ravnborg <sam@ravnborg.org>
+
+description: |
+ A display panel may be able to handle several display timings,
+ with different resolutions.
+ The display-timings node makes it possible to specify the timing
+ and to specify the timing that is native for the display.
+
+properties:
+ $nodename:
+ const: display-timings
+
+ native-mode:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: |
+ The default display timing is the one specified as native-mode.
+ If no native-mode is specified then the first node is assumed the
+ native mode.
+
+patternProperties:
+ "^timing":
+ type: object
+ allOf:
+ - $ref: panel-timing.yaml#
+
+additionalProperties: false
+
+examples:
+ - |+
+
+ /*
+ * Example that specifies panel timing using minimum, typical,
+ * maximum values as commonly used in datasheet description.
+ * timing1 is the native-mode.
+ */
+ display-timings {
+ native-mode = <&timing1>;
+ timing0 {
+ /* 1920x1080p24 */
+ clock-frequency = <148500000>;
+ hactive = <1920>;
+ vactive = <1080>;
+ hsync-len = <0 44 60>;
+ hfront-porch = <80 88 95>;
+ hback-porch = <100 148 160>;
+ vfront-porch = <0 4 6>;
+ vback-porch = <0 36 50>;
+ vsync-len = <0 5 6>;
+ };
+ timing1 {
+ /* 1920x1080p24 */
+ clock-frequency = <52000000>;
+ hactive = <1920>;
+ vactive = <1080>;
+ hfront-porch = <25>;
+ hback-porch = <25>;
+ hsync-len = <0 25 25>;
+ vback-porch = <2>;
+ vfront-porch = <2>;
+ vsync-len = <2>;
+ hsync-active = <1>;
+ pixelclk-active = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
deleted file mode 100644
index fbf5dcd15661..000000000000
--- a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
-
-Required properties:
-- compatible: should be "dlc,dlc1010gig"
-- power-supply: See simple-panel.txt
-
-Optional properties:
-- enable-gpios: See simple-panel.txt
-- backlight: See simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
deleted file mode 100644
index b7ac1c725f97..000000000000
--- a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Emerging Display Technology Corp. Displays
-==========================================
-
-
-Display bindings for EDT Display Technology Corp. Displays which are
-compatible with the simple-panel binding, which is specified in
-simple-panel.txt
-
-3,5" QVGA TFT Panels
---------------------
-+-----------------+---------------------+-------------------------------------+
-| Identifier | compatbile | description |
-+=================+=====================+=====================================+
-| ET035012DM6 | edt,et035012dm6 | 3.5" QVGA TFT LCD panel |
-+-----------------+---------------------+-------------------------------------+
-
-4,3" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier | compatbile | description |
-+=================+=====================+=====================================+
-| ETM0430G0DH6 | edt,etm0430g0dh6 | 480x272 TFT Display |
-+-----------------+---------------------+-------------------------------------+
-
-5,7" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier | compatbile | description |
-+=================+=====================+=====================================+
-| ET057090DHU | edt,et057090dhu | 5.7" VGA TFT LCD panel |
-+-----------------+---------------------+-------------------------------------+
-
-
-7,0" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier | compatbile | description |
-+=================+=====================+=====================================+
-| ETM0700G0DH6 | edt,etm070080dh6 | WVGA TFT Display with capacitive |
-| | edt,etm0700g0dh6 | Touchscreen |
-+-----------------+---------------------+-------------------------------------+
-| ETM0700G0BDH6 | edt,etm070080bdh6 | Same as ETM0700G0DH6 but with |
-| | | inverted pixel clock. |
-+-----------------+---------------------+-------------------------------------+
-| ETM0700G0EDH6 | edt,etm070080edh6 | Same display as the ETM0700G0BDH6, |
-| | | but with changed Hardware for the |
-| | | backlight and the touch interface |
-+-----------------+---------------------+-------------------------------------+
-| ET070080DH6 | edt,etm070080dh6 | Same timings as the ETM0700G0DH6, |
-| | | but with resistive touch. |
-+-----------------+---------------------+-------------------------------------+
-
diff --git a/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml b/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
new file mode 100644
index 000000000000..aa761f697b7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/elida,kd35t133.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Elida KD35T133 3.5in 320x480 DSI panel
+
+maintainers:
+ - Heiko Stuebner <heiko.stuebner@theobroma-systems.com>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: elida,kd35t133
+ reg: true
+ backlight: true
+ reset-gpios: true
+ iovcc-supply:
+ description: regulator that supplies the iovcc voltage
+ vdd-supply:
+ description: regulator that supplies the vdd voltage
+
+required:
+ - compatible
+ - reg
+ - backlight
+ - iovcc-supply
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "elida,kd35t133";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_1v8>;
+ vdd-supply = <&vcc3v3_lcd>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt b/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt
deleted file mode 100644
index 82d22e191ac3..000000000000
--- a/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
-
-Required properties:
-- compatible: should be "evervision,vgg804821"
-- power-supply: See simple-panel.txt
-
-Optional properties:
-- backlight: See simple-panel.txt
-- enable-gpios: See simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml b/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
new file mode 100644
index 000000000000..927f1eea18d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/feixin,k101-im2ba02.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Feixin K101 IM2BA02 10.1" MIPI-DSI LCD panel
+
+maintainers:
+ - Icenowy Zheng <icenowy@aosc.io>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: feixin,k101-im2ba02
+ reg: true
+ backlight: true
+ reset-gpios: true
+ avdd-supply:
+ description: regulator that supplies the AVDD voltage
+ dvdd-supply:
+ description: regulator that supplies the DVDD voltage
+ cvdd-supply:
+ description: regulator that supplies the CVDD voltage
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+ - dvdd-supply
+ - cvdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "feixin,k101-im2ba02";
+ reg = <0>;
+ avdd-supply = <&reg_dc1sw>;
+ dvdd-supply = <&reg_dc1sw>;
+ cvdd-supply = <&reg_ldo_io1>;
+ reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>;
+ backlight = <&backlight>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt b/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt
deleted file mode 100644
index b47f9d87bc19..000000000000
--- a/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Foxlink Group 5" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "foxlink,fl500wvr00-a0t"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt b/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt
deleted file mode 100644
index 6c9156fc3478..000000000000
--- a/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-FriendlyELEC HD702E 800x1280 LCD panel
-
-HD702E lcd is FriendlyELEC developed eDP LCD panel with 800x1280
-resolution. It has built in Goodix, GT9271 captive touchscreen
-with backlight adjustable via PWM.
-
-Required properties:
-- compatible: should be "friendlyarm,hd702e"
-- power-supply: regulator to provide the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LCD panel input.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Example:
-
- panel {
- compatible ="friendlyarm,hd702e", "simple-panel";
- backlight = <&backlight>;
- power-supply = <&vcc3v3_sys>;
-
- port {
- panel_in_edp: endpoint {
- remote-endpoint = <&edp_out_panel>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt b/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt
deleted file mode 100644
index 24b0b624434b..000000000000
--- a/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "giantplus,gpg48273qs5"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt b/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt
deleted file mode 100644
index 7da1d5c038ff..000000000000
--- a/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "hannstar,hsd070pww1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt b/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt
deleted file mode 100644
index 8270319a99de..000000000000
--- a/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel
-
-Required properties:
-- compatible: should be "hannstar,hsd100pxn1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt b/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt
deleted file mode 100644
index 04caaae19af6..000000000000
--- a/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
-
-Required properties:
-- compatible: should be "hit,tx23d38vm0caa"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt b/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt
deleted file mode 100644
index 4104226b61bc..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux AT043TN24 4.3" WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,at043tn24"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt b/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt
deleted file mode 100644
index 3e10cd782491..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux AT070TN92 7.0" WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,at070tn92"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
deleted file mode 100644
index 7c234cf68e11..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g070y2-l01"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
deleted file mode 100644
index 9e7590465227..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
-
-Required properties:
-- compatible: should be "innolux,g101ice-l01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt
deleted file mode 100644
index 2743b07cd2f2..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g121i1-l01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
deleted file mode 100644
index 649744620ae1..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g121x1-l03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt b/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt
deleted file mode 100644
index 081bb939ed31..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,n116bge"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt b/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt
deleted file mode 100644
index 7825844aafdf..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-InnoLux 15.6" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,n156bge-l21"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt b/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt
deleted file mode 100644
index 824f87f1526d..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,zj070na-01p"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt b/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt
deleted file mode 100644
index be7ac666807b..000000000000
--- a/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
-
-Required properties:
-- compatible: should be "koe,tx14d24vm1bpa"
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: single regulator to provide the supply voltage
-
-Required nodes:
-- port: Parallel port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX53 based board
-
- lcd_panel: lcd-panel {
- compatible = "koe,tx14d24vm1bpa";
- backlight = <&backlight_lcd>;
- power-supply = <&reg_3v3>;
-
- port {
- lcd_panel_in: endpoint {
- remote-endpoint = <&lcd_display_out>;
- };
- };
- };
-
-Then one needs to extend the dispX node:
-
- lcd_display: disp1 {
-
- port@1 {
- reg = <1>;
-
- lcd_display_out: endpoint {
- remote-endpoint = <&lcd_panel_in>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt b/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt
deleted file mode 100644
index 6a036ede3e28..000000000000
--- a/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "koe,tx31d200vm0baa"
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LVDS panel input.
-
-Example:
- panel {
- compatible = "koe,tx31d200vm0baa";
- backlight = <&backlight_lvds>;
-
- port {
- panel_in: endpoint {
- remote-endpoint = <&lvds0_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
deleted file mode 100644
index a8e940fe731e..000000000000
--- a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "kyo,tcg121xglp"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
index a614644c9849..fd931b293816 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
@@ -34,7 +34,7 @@ additionalProperties: false
examples:
- |
- dsi@ff450000 {
+ dsi {
#address-cells = <1>;
#size-cells = <0>;
reg = <0xff450000 0x1000>;
diff --git a/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt b/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt
deleted file mode 100644
index 74ee7ea6b493..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "lemaker,bl035-rgb-002"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt b/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt
deleted file mode 100644
index a7588e5259cf..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 7" (800x480 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lb070wv8"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt b/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
deleted file mode 100644
index b9877acad012..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp079qx1-sp0v"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
deleted file mode 100644
index 42141516f078..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 9.7" (2048x1536 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp097qx1-spa1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt
deleted file mode 100644
index 8c5de692c55c..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 12.0" (1920x1280 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp120up1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt b/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt
deleted file mode 100644
index 9f262e0c5a2e..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 12.9" (2560x1700 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp129qe"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt
deleted file mode 100644
index 7d8f6eeef6d9..000000000000
--- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "mitsubishi,aa070mc01-ca1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt b/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt
deleted file mode 100644
index 71cbc49ecfab..000000000000
--- a/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
-
-Required properties:
-- compatible: should be "nec,nl12880bc20-05"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt b/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
deleted file mode 100644
index 8e1914d1edb8..000000000000
--- a/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "nec,nl4827hc19-05b"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt b/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt
deleted file mode 100644
index c6d06b5eab51..000000000000
--- a/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Netron-DY E231732 7.0" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "netron-dy,e231732"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
deleted file mode 100644
index e78292b1a131..000000000000
--- a/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Newhaven Display International 480 x 272 TFT LCD panel
-
-Required properties:
-- compatible: should be "newhaven,nhd-4.3-480272ef-atxl"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt b/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt
deleted file mode 100644
index 1a639fd8778d..000000000000
--- a/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-NLT Technologies, Ltd. 15.6" FHD (1920x1080) LVDS TFT LCD panel
-
-Required properties:
-- compatible: should be "nlt,nl192108ac18-02d"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
new file mode 100644
index 000000000000..73d2ff3baaff
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/novatek,nt35510.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Novatek NT35510-based display panels
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: hydis,hva40wv1
+ - const: novatek,nt35510
+ description: This indicates the panel manufacturer of the panel
+ that is in turn using the NT35510 panel driver. The compatible
+ string determines how the NT35510 panel driver shall be configured
+ to work with the indicated panel. The novatek,nt35510 compatible shall
+ always be provided as a fallback.
+ reg: true
+ reset-gpios: true
+ vdd-supply:
+ description: regulator that supplies the vdd voltage
+ vddi-supply:
+ description: regulator that supplies the vddi voltage
+ backlight: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "hydis,hva40wv1", "novatek,nt35510";
+ reg = <0>;
+ vdd-supply = <&ab8500_ldo_aux4_reg>;
+ vddi-supply = <&ab8500_ldo_aux6_reg>;
+ reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ backlight = <&gpio_bl>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/nvd,9128.txt b/Documentation/devicetree/bindings/display/panel/nvd,9128.txt
deleted file mode 100644
index 17bcd017c678..000000000000
--- a/Documentation/devicetree/bindings/display/panel/nvd,9128.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
-
-Required properties:
-- compatible: should be "nvd,9128"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt b/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt
deleted file mode 100644
index ddf8e211d382..000000000000
--- a/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
-
-Required properties:
-- compatible: should be "okaya,rs800480t-7x0gp"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt
deleted file mode 100644
index 74540a090669..000000000000
--- a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Olimex 4.3" TFT LCD panel
-
-Required properties:
-- compatible: should be "olimex,lcd-olinuxino-43-ts"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt b/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt
deleted file mode 100644
index 3d8a5e029242..000000000000
--- a/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-On Tat Industrial Company 7" DPI TFT panel.
-
-Required properties:
-- compatible: should be "ontat,yx700wv03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
deleted file mode 100644
index 203b03eefb68..000000000000
--- a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
-
-The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
-a MIPI-DSI video interface. Its backlight is managed through the DSI link.
-
-Required properties:
- - compatible: "orisetech,otm8009a"
- - reg: the virtual channel number of a DSI peripheral
-
-Optional properties:
- - reset-gpios: a GPIO spec for the reset pin (active low).
- - power-supply: phandle of the regulator that provides the supply voltage.
-
-Example:
-&dsi {
- ...
- panel@0 {
- compatible = "orisetech,otm8009a";
- reg = <0>;
- reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
- power-supply = <&v1v8>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
new file mode 100644
index 000000000000..4b6dda6dbc0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/orisetech,otm8009a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
+
+maintainers:
+ - Philippe CORNU <philippe.cornu@st.com>
+
+description: |
+ The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
+ a MIPI-DSI video interface. Its backlight is managed through the DSI link.
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+
+ compatible:
+ const: orisetech,otm8009a
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ enable-gpios: true
+ port: true
+ power-supply: true
+
+ reset-gpios:
+ maxItems: 1
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "orisetech,otm8009a";
+ reg = <0>;
+ reset-gpios = <&gpiof 15 0>;
+ power-supply = <&v1v8>;
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt
deleted file mode 100644
index c16907c02f80..000000000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com37h3m05dtc"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt
deleted file mode 100644
index 06a73c3f46b5..000000000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OrtusTech COM37H3M99DTC Blanview 3.7" VGA portrait TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com37h3m99dtc"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt
deleted file mode 100644
index de19e9398618..000000000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-OrtusTech COM43H4M85ULC Blanview 3.7" TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com43h4m85ulc"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt
deleted file mode 100644
index e57883ccdf2f..000000000000
--- a/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OSD Displays OSD070T1718-19TS 7" WVGA TFT LCD panel
-
-Required properties:
-- compatible: shall be "osddisplays,osd070t1718-19ts"
-- power-supply: see simple-panel.txt
-
-Optional properties:
-- backlight: see simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory. No other simple-panel properties than
-the ones specified herein are valid.
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt
deleted file mode 100644
index 85c0b2cacfda..000000000000
--- a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
-
-Required properties:
-- compatible: should be "osddisplays,osd101t2045-53ts"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt
deleted file mode 100644
index d328b0341bf4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Panasonic Corporation 10.1" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "panasonic,vvx10f004b00"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
deleted file mode 100644
index 37dedf6a6702..000000000000
--- a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Panasonic 10" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "panasonic,vvx10f034n00"
-- reg: DSI virtual channel of the peripheral
-- power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
- mdss_dsi@fd922800 {
- panel@0 {
- compatible = "panasonic,vvx10f034n00";
- reg = <0>;
- power-supply = <&vreg_vsp>;
- backlight = <&lp8566_wled>;
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.yaml b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
index ef8d8cdfcede..ed051ba12084 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-common.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
@@ -54,13 +54,20 @@ properties:
# Display Timings
panel-timing:
- type: object
description:
Most display panels are restricted to a single resolution and
require specific display timings. The panel-timing subnode expresses those
- timings as specified in the timing subnode section of the display timing
- bindings defined in
- Documentation/devicetree/bindings/display/panel/display-timing.txt.
+ timings.
+ allOf:
+ - $ref: panel-timing.yaml#
+
+ display-timings:
+ description:
+ Some display panels supports several resolutions with different timing.
+ The display-timings bindings supports specifying several timings and
+ optional specify which is the native mode.
+ allOf:
+ - $ref: display-timings.yaml#
# Connectivity
port:
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
deleted file mode 100644
index 6b203bc4d932..000000000000
--- a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Generic MIPI DPI Panel
-======================
-
-Required properties:
-- compatible: "panel-dpi"
-
-Optional properties:
-- label: a symbolic name for the panel
-- enable-gpios: panel enable gpio
-- reset-gpios: GPIO to control the RESET pin
-- vcc-supply: phandle of regulator that will be used to enable power to the display
-- backlight: phandle of the backlight device
-
-Required nodes:
-- "panel-timing" containing video timings
- (Documentation/devicetree/bindings/display/panel/display-timing.txt)
-- Video port for DPI input
-
-Example
--------
-
-lcd0: display@0 {
- compatible = "samsung,lte430wq-f0c", "panel-dpi";
- label = "lcd";
-
- backlight = <&backlight>;
-
- port {
- lcd_in: endpoint {
- remote-endpoint = <&dpi_out>;
- };
- };
-
- panel-timing {
- clock-frequency = <9200000>;
- hactive = <480>;
- vactive = <272>;
- hfront-porch = <8>;
- hback-porch = <4>;
- hsync-len = <41>;
- vback-porch = <2>;
- vfront-porch = <4>;
- vsync-len = <10>;
-
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <1>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml b/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml
new file mode 100644
index 000000000000..f63870384c00
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-dpi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic MIPI DPI Panel
+
+maintainers:
+ - Sam Ravnborg <sam@ravnborg.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ description:
+ Shall contain a panel specific compatible and "panel-dpi"
+ in that order.
+ items:
+ - {}
+ - const: panel-dpi
+
+ data-mapping:
+ enum:
+ - rgb24
+ - rgb565
+ - bgr666
+ description: |
+ Describes the media format, how the display panel is connected
+ to the display interface.
+
+ backlight: true
+ enable-gpios: true
+ height-mm: true
+ label: true
+ panel-timing: true
+ port: true
+ power-supply: true
+ reset-gpios: true
+ width-mm: true
+
+required:
+ - panel-timing
+ - power-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ panel {
+ compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+ label = "osddisplay";
+ power-supply = <&vcc_supply>;
+ data-mapping = "rgb565";
+ backlight = <&backlight>;
+
+ port {
+ lcd_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+ panel-timing {
+ clock-frequency = <9200000>;
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <8>;
+ hback-porch = <4>;
+ hsync-len = <41>;
+ vback-porch = <2>;
+ vfront-porch = <4>;
+ vsync-len = <10>;
+
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
new file mode 100644
index 000000000000..b2e8742fd6af
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-simple-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Simple DSI panels with a single power-supply
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Sam Ravnborg <sam@ravnborg.org>
+
+description: |
+ This binding file is a collection of the DSI panels that
+ requires only a single power-supply.
+ There are optionally a backlight and an enable GPIO.
+ The panel may use an OF graph binding for the association to the display,
+ or it may be a direct child node of the display.
+
+ If the panel is more advanced a dedicated binding file is required.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+
+ compatible:
+ enum:
+ # compatible must be listed in alphabetical order, ordered by compatible.
+ # The description in the comment is mandatory for each compatible.
+
+ # Panasonic 10" WUXGA TFT LCD panel
+ - panasonic,vvx10f034n00
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ backlight: true
+ enable-gpios: true
+ port: true
+ power-supply: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - power-supply
+ - reg
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "panasonic,vvx10f034n00";
+ reg = <0>;
+ power-supply = <&vcc_lcd_reg>;
+
+ port {
+ panel: endpoint {
+ remote-endpoint = <&ltdc_out>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 8fe60ee2531c..393ffc6acbba 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -33,16 +33,225 @@ properties:
- ampire,am-480272h3tmqw-t01h
# Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
- ampire,am800480r3tmqwa1h
+ # AU Optronics Corporation 8.0" WUXGA TFT LCD panel
+ - auo,b080uan01
+ # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+ - auo,b101aw03
+ # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+ - auo,b101ean01
+ # AU Optronics Corporation 10.1" WXGA TFT LCD panel
+ - auo,b101xtn01
# AUO B116XAK01 eDP TFT LCD panel
- auo,b116xa01
+ # AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
+ - auo,b116xw03
+ # AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
+ - auo,b133htn01
+ # AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
+ - auo,b133xtn01
+ # AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
+ - auo,g070vvn01
+ # AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
+ - auo,g101evn010
+ # AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
+ - auo,g104sn02
+ # AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
+ - auo,g133han01
+ # AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
+ - auo,g185han01
+ # AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
+ - auo,p320hvn03
+ # AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
+ - auo,t215hvn01
+ # Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
+ - avic,tm070ddh03
+ # BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
+ - boe,hv070wsa-100
+ # BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
+ - boe,nv101wxmn51
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
- boe,nv140fhmn49
+ # Boe Corporation 8.0" WUXGA TFT LCD panel
+ - boe,tv080wum-nl0
+ # CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
+ - cdtech,s043wq26h-ct7
+ # CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
+ - cdtech,s070wv95-ct16
+ # Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
+ - chunghwa,claa070wp03xg
+ # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+ - chunghwa,claa101wa01a
+ # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+ - chunghwa,claa101wb03
+ # DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
+ - dataimage,scf0700c48ggu18
+ # DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
+ - dlc,dlc1010gig
+ # Emerging Display Technology Corp. 3.5" QVGA TFT LCD panel
+ - edt,et035012dm6
+ # Emerging Display Technology Corp. 480x272 TFT Display with capacitive touch
+ - edt,etm043080dh6gp
+ # Emerging Display Technology Corp. 480x272 TFT Display
+ - edt,etm0430g0dh6
+ # Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
+ - edt,et057090dhu
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ - edt,etm070080dh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ - edt,etm0700g0dh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ # Same as ETM0700G0DH6 but with inverted pixel clock.
+ - edt,etm070080bdh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ # Same display as the ETM0700G0BDH6, but with changed hardware for the
+ # backlight and the touch interface.
+ - edt,etm070080edh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ # Same timings as the ETM0700G0DH6, but with resistive touch.
+ - edt,etm070080dh6
+ # Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
+ - evervision,vgg804821
+ # Foxlink Group 5" WVGA TFT LCD panel
+ - foxlink,fl500wvr00-a0t
+ # Frida FRD350H54004 3.5" QVGA TFT LCD panel
+ - frida,frd350h54004
+ # FriendlyELEC HD702E 800x1280 LCD panel
+ - friendlyarm,hd702e
+ # GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
+ - giantplus,gpg48273qs5
# GiantPlus GPM940B0 3.0" QVGA TFT LCD panel
- giantplus,gpm940b0
+ # HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
+ - hannstar,hsd070pww1
+ # HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel
+ - hannstar,hsd100pxn1
+ # Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
+ - hit,tx23d38vm0caa
+ # Innolux AT043TN24 4.3" WQVGA TFT LCD panel
+ - innolux,at043tn24
+ # Innolux AT070TN92 7.0" WQVGA TFT LCD panel
+ - innolux,at070tn92
+ # Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
+ - innolux,g070y2-l01
+ # Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
+ - innolux,g101ice-l01
+ # Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
+ - innolux,g121i1-l01
+ # Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
+ - innolux,g121x1-l03
+ # Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
+ - innolux,n116bge
+ # InnoLux 15.6" WXGA TFT LCD panel
+ - innolux,n156bge-l21
+ # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
+ - innolux,zj070na-01p
+ # Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
+ - koe,tx14d24vm1bpa
+ # Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
+ - koe,tx31d200vm0baa
+ # Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
+ - kyo,tcg121xglp
+ # LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel
+ - lemaker,bl035-rgb-002
+ # LG 7" (800x480 pixels) TFT LCD panel
+ - lg,lb070wv8
+ # LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
+ - lg,lp079qx1-sp0v
+ # LG 9.7" (2048x1536 pixels) TFT LCD panel
+ - lg,lp097qx1-spa1
+ # LG 12.0" (1920x1280 pixels) TFT LCD panel
+ - lg,lp120up1
+ # LG 12.9" (2560x1700 pixels) TFT LCD panel
+ - lg,lp129qe
+ # Logic Technologies LT161010-2NHC 7" WVGA TFT Cap Touch Module
+ - logictechno,lt161010-2nhc
+ # Logic Technologies LT161010-2NHR 7" WVGA TFT Resistive Touch Module
+ - logictechno,lt161010-2nhr
+ # Logic Technologies LT170410-2WHC 10.1" 1280x800 IPS TFT Cap Touch Mod.
+ - logictechno,lt170410-2whc
+ # Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
+ - mitsubishi,aa070mc01-ca1
+ # NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
+ - nec,nl12880bc20-05
+ # NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
+ - nec,nl4827hc19-05b
+ # Netron-DY E231732 7.0" WSVGA TFT LCD panel
+ - netron-dy,e231732
+ # NewEast Optoelectronics CO., LTD WJFH116008A eDP TFT LCD panel
+ - neweast,wjfh116008a
+ # Newhaven Display International 480 x 272 TFT LCD panel
+ - newhaven,nhd-4.3-480272ef-atxl
+ # NLT Technologies, Ltd. 15.6" FHD (1920x1080) LVDS TFT LCD panel
+ - nlt,nl192108ac18-02d
+ # New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
+ - nvd,9128
+ # OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
+ - okaya,rs800480t-7x0gp
+ # Olimex 4.3" TFT LCD panel
+ - olimex,lcd-olinuxino-43-ts
+ # On Tat Industrial Company 7" DPI TFT panel.
+ - ontat,yx700wv03
+ # OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
+ - ortustech,com37h3m05dtc
+ # OrtusTech COM37H3M99DTC Blanview 3.7" VGA portrait TFT-LCD panel
+ - ortustech,com37h3m99dtc
+ # OrtusTech COM43H4M85ULC Blanview 3.7" TFT-LCD panel
+ - ortustech,com43h4m85ulc
+ # OSD Displays OSD070T1718-19TS 7" WVGA TFT LCD panel
+ - osddisplays,osd070t1718-19ts
+ # One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
+ - osddisplays,osd101t2045-53ts
+ # QiaoDian XianShi Corporation 4"3 TFT LCD panel
+ - qiaodian,qd43003c0-40
+ # Rocktech Displays Ltd. RK101II01D-CT 10.1" TFT 1280x800
+ - rocktech,rk101ii01d-ct
+ # Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
+ - rocktech,rk070er9427
+ # Samsung 12.2" (2560x1600 pixels) TFT LCD panel
+ - samsung,lsn122dl01-c01
+ # Samsung Electronics 10.1" WSVGA TFT LCD panel
+ - samsung,ltn101nt05
+ # Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
+ - samsung,ltn140at29-301
# Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel
- satoz,sat050at40h12r2
+ # Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
+ - sharp,lq035q7db03
+ # Sharp LQ070Y3DG3B 7.0" WVGA landscape TFT LCD panel
+ - sharp,lq070y3dg3b
+ # Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
+ - sharp,lq101k1ly04
+ # Sharp 12.3" (2400x1600 pixels) TFT LCD panel
+ - sharp,lq123p1jx31
# Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel
- sharp,ls020b1dd01d
+ # Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
+ - shelly,sca07010-bfn-lnn
+ # Starry 12.2" (1920x1200 pixels) TFT LCD panel
+ - starry,kr122ea0sra
+ # Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
+ - tianma,tm070jdhg30
+ # Tianma Micro-electronics TM070RVHG71 7.0" WXGA TFT LCD panel
+ - tianma,tm070rvhg71
+ # Toshiba 8.9" WXGA (1280x768) TFT LCD panel
+ - toshiba,lt089ac29000
+ # TPK U.S.A. LLC Fusion 7" 800 x 480 (WVGA) LCD panel with capacitive touch
+ - tpk,f07a-0102
+ # TPK U.S.A. LLC Fusion 10.1" 1024 x 600 (WSVGA) LCD panel with capacitive touch
+ - tpk,f10a-0102
+ # United Radiant Technology UMSH-8596MD-xT 7.0" WVGA TFT LCD panel
+ # Supported are LVDS versions (-11T, -19T) and parallel ones
+ # (-T, -1T, -7T, -20T).
+ - urt,umsh-8596md-t
+ - urt,umsh-8596md-1t
+ - urt,umsh-8596md-7t
+ - urt,umsh-8596md-11t
+ - urt,umsh-8596md-19t
+ - urt,umsh-8596md-20t
+ # VXT 800x480 color TFT LCD panel
+ - vxt,vl050-8048nt-c01
+ # Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
+ - winstar,wf35ltiacd
backlight: true
enable-gpios: true
diff --git a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
new file mode 100644
index 000000000000..bd558ad7891f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
@@ -0,0 +1,227 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-timing.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: panel timing bindings
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Sam Ravnborg <sam@ravnborg.org>
+
+description: |
+ There are different ways of describing the timing data of a panel. The
+ devicetree representation corresponds to the one commonly found in datasheets
+ for panels.
+
+ The parameters are defined as seen in the following illustration.
+
+ +----------+-------------------------------------+----------+-------+
+ | | ^ | | |
+ | | |vback_porch | | |
+ | | v | | |
+ +----------#######################################----------+-------+
+ | # ^ # | |
+ | # | # | |
+ | hback # | # hfront | hsync |
+ | porch # | hactive # porch | len |
+ |<-------->#<-------+--------------------------->#<-------->|<----->|
+ | # | # | |
+ | # |vactive # | |
+ | # | # | |
+ | # v # | |
+ +----------#######################################----------+-------+
+ | | ^ | | |
+ | | |vfront_porch | | |
+ | | v | | |
+ +----------+-------------------------------------+----------+-------+
+ | | ^ | | |
+ | | |vsync_len | | |
+ | | v | | |
+ +----------+-------------------------------------+----------+-------+
+
+
+ The following is the panel timings shown with time on the x-axis.
+ This matches the timing diagrams often found in data sheets.
+
+ Active Front Sync Back
+ Region Porch Porch
+ <-----------------------><----------------><-------------><-------------->
+ //////////////////////|
+ ////////////////////// |
+ ////////////////////// |.................. ................
+ _______________
+
+ Timing can be specified either as a typical value or as a tuple
+ of min, typ, max values.
+
+properties:
+
+ clock-frequency:
+ description: Panel clock in Hz
+
+ hactive:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Horizontal panel resolution in pixels
+
+ vactive:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Vertical panel resolution in pixels
+
+ hfront-porch:
+ description: Horizontal front porch panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of pixels
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
+
+ hback-porch:
+ description: Horizontal back porch timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of pixels
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
+
+ hsync-len:
+ description: Horizontal sync length panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of pixels
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
+
+ vfront-porch:
+ description: Vertical front porch panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of lines
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
+
+ vback-porch:
+ description: Vertical back porch panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of lines
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
+
+ vsync-len:
+ description: Vertical sync length panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of lines
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
+
+ hsync-active:
+ description: |
+ Horizontal sync pulse.
+ 0 selects active low, 1 selects active high.
+ If omitted then it is not used by the hardware
+ enum: [0, 1]
+
+ vsync-active:
+ description: |
+ Vertical sync pulse.
+ 0 selects active low, 1 selects active high.
+ If omitted then it is not used by the hardware
+ enum: [0, 1]
+
+ de-active:
+ description: |
+ Data enable.
+ 0 selects active low, 1 selects active high.
+ If omitted then it is not used by the hardware
+ enum: [0, 1]
+
+ pixelclk-active:
+ description: |
+ Data driving on rising or falling edge.
+ Use 0 to drive pixel data on falling edge and
+ sample data on rising edge.
+ Use 1 to drive pixel data on rising edge and
+ sample data on falling edge
+ enum: [0, 1]
+
+ syncclk-active:
+ description: |
+ Drive sync on rising or sample sync on falling edge.
+ If not specified then the setup is as specified by pixelclk-active.
+ Use 0 to drive sync on falling edge and
+ sample sync on rising edge of pixel clock.
+ Use 1 to drive sync on rising edge and
+ sample sync on falling edge of pixel clock
+ enum: [0, 1]
+
+ interlaced:
+ type: boolean
+ description: Enable interlaced mode
+
+ doublescan:
+ type: boolean
+ description: Enable double scan mode
+
+ doubleclk:
+ type: boolean
+ description: Enable double clock mode
+
+required:
+ - clock-frequency
+ - hactive
+ - vactive
+ - hfront-porch
+ - hback-porch
+ - hsync-len
+ - vfront-porch
+ - vback-porch
+ - vsync-len
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
deleted file mode 100644
index 0fbdab89ac3d..000000000000
--- a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-QiaoDian XianShi Corporation 4"3 TFT LCD panel
-
-Required properties:
-- compatible: should be "qiaodian,qd43003c0-40"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
deleted file mode 100644
index cbb79ef3bfc9..000000000000
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Raydium Semiconductor Corporation RM68200 5.5" 720p MIPI-DSI TFT LCD panel
-
-The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
-panel connected using a MIPI-DSI video interface.
-
-Required properties:
- - compatible: "raydium,rm68200"
- - reg: the virtual channel number of a DSI peripheral
-
-Optional properties:
- - reset-gpios: a GPIO spec for the reset pin (active low).
- - power-supply: phandle of the regulator that provides the supply voltage.
- - backlight: phandle of the backlight device attached to the panel.
-
-Example:
-&dsi {
- ...
- panel@0 {
- compatible = "raydium,rm68200";
- reg = <0>;
- reset-gpios = <&gpiof 15 GPIO_ACTIVE_LOW>;
- power-supply = <&v1v8>;
- backlight = <&pwm_backlight>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
new file mode 100644
index 000000000000..a35ba16fc000
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/raydium,rm68200.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Raydium Semiconductor Corporation RM68200 5.5" 720p MIPI-DSI TFT LCD panel
+
+maintainers:
+ - Philippe CORNU <philippe.cornu@st.com>
+
+description: |
+ The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
+ panel connected using a MIPI-DSI video interface.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+
+ compatible:
+ const: raydium,rm68200
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ backlight: true
+ enable-gpios: true
+ port: true
+ power-supply: true
+
+ reset-gpios:
+ maxItems: 1
+
+additionalProperties: false
+
+required:
+ - compatible
+ - power-supply
+ - reg
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "raydium,rm68200";
+ reg = <0>;
+ reset-gpios = <&gpiof 15 0>;
+ power-supply = <&v1v8>;
+ backlight = <&pwm_backlight>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
deleted file mode 100644
index eb1fb9f8d1f4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "rocktech,rk070er9427"
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LCD panel input.
-
-Example:
- panel {
- compatible = "rocktech,rk070er9427";
- backlight = <&backlight_lcd>;
-
- port {
- lcd_panel_in: endpoint {
- remote-endpoint = <&lcd_display_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt b/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
deleted file mode 100644
index dba298b43b24..000000000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung 12.2" (2560x1600 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,lsn122dl01-c01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt b/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt
deleted file mode 100644
index ef522c6bb85f..000000000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung Electronics 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,ltn101nt05"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt b/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt
deleted file mode 100644
index e7f969d891cc..000000000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,ltn140at29-301"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
new file mode 100644
index 000000000000..7a685d0428b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,s6e88a0-ams452ef01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung AMS452EF01 AMOLED panel with S6E88A0 video mode DSI controller
+
+maintainers:
+ - Michael Srba <Michael.Srba@seznam.cz>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: samsung,s6e88a0-ams452ef01
+ reg: true
+ reset-gpios: true
+ vdd3-supply:
+ description: core voltage supply
+ vci-supply:
+ description: voltage supply for analog circuits
+
+required:
+ - compatible
+ - reg
+ - vdd3-supply
+ - vci-supply
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ reg = <0>;
+
+ compatible = "samsung,s6e88a0-ams452ef01";
+
+ vdd3-supply = <&pm8916_l17>;
+ vci-supply = <&reg_vlcd_vci>;
+ reset-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
deleted file mode 100644
index 0753f6967279..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq035q7db03"
-- power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt
deleted file mode 100644
index 95534b55ee5f..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Sharp LQ070Y3DG3B 7.0" WVGA landscape TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq070y3dg3b"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
deleted file mode 100644
index 4aff25b8dfe6..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq101k1ly04"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
deleted file mode 100644
index bcb0e8a29f71..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Sharp 12.3" (2400x1600 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq123p1jx31"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt b/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt
deleted file mode 100644
index fc1ea9e26c94..000000000000
--- a/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "shelly,sca07010-bfn-lnn"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt b/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
deleted file mode 100644
index 1e87fe6078af..000000000000
--- a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Starry 12.2" (1920x1200 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "starry,kr122ea0sra"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt b/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt
deleted file mode 100644
index eb9501a82e25..000000000000
--- a/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "tianma,tm070jdhg30"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt b/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt
deleted file mode 100644
index b25261e63a6d..000000000000
--- a/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Tianma Micro-electronics TM070RVHG71 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "tianma,tm070rvhg71"
-- power-supply: single regulator to provide the supply voltage
-- backlight: phandle of the backlight device attached to the panel
-
-Required nodes:
-- port: LVDS port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX6Q based board
-
- panel: panel-lvds0 {
- compatible = "tianma,tm070rvhg71";
- backlight = <&backlight_lvds>;
- power-supply = <&reg_lvds>;
-
- port {
- panel_in_lvds0: endpoint {
- remote-endpoint = <&lvds0_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt b/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
deleted file mode 100644
index 89826116628c..000000000000
--- a/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Toshiba 8.9" WXGA (1280x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "toshiba,lt089ac29000"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt b/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt
deleted file mode 100644
index a2613b9675df..000000000000
--- a/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TPK U.S.A. LLC Fusion 7" integrated projected capacitive touch display with,
-800 x 480 (WVGA) LCD panel.
-
-Required properties:
-- compatible: should be "tpk,f07a-0102"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt b/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt
deleted file mode 100644
index b9d051196ba9..000000000000
--- a/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TPK U.S.A. LLC Fusion 10.1" integrated projected capacitive touch display with,
-1024 x 600 (WSVGA) LCD panel.
-
-Required properties:
-- compatible: should be "tpk,f10a-0102"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt b/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt
deleted file mode 100644
index 088a6cea5015..000000000000
--- a/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-United Radiant Technology UMSH-8596MD-xT 7.0" WVGA TFT LCD panel
-
-Supported are LVDS versions (-11T, -19T) and parallel ones
-(-T, -1T, -7T, -20T).
-
-Required properties:
-- compatible: should be one of:
- "urt,umsh-8596md-t",
- "urt,umsh-8596md-1t",
- "urt,umsh-8596md-7t",
- "urt,umsh-8596md-11t",
- "urt,umsh-8596md-19t",
- "urt,umsh-8596md-20t".
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt b/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt
deleted file mode 100644
index b42bf06bbd99..000000000000
--- a/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-VXT 800x480 color TFT LCD panel
-
-Required properties:
-- compatible: should be "vxt,vl050-8048nt-c01"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt b/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt
deleted file mode 100644
index 2a7e6e3ba64c..000000000000
--- a/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
-
-Required properties:
-- compatible: should be "winstar,wf35ltiacd"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Example:
- backlight: backlight {
- compatible = "pwm-backlight";
- pwms = <&hlcdc_pwm 0 50000 PWM_POLARITY_INVERTED>;
- brightness-levels = <0 31 63 95 127 159 191 223 255>;
- default-brightness-level = <191>;
- power-supply = <&bl_reg>;
- };
-
- bl_reg: backlight_regulator {
- compatible = "regulator-fixed";
- regulator-name = "backlight-power-supply";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- };
-
- panel: panel {
- compatible = "winstar,wf35ltiacd", "simple-panel";
- backlight = <&backlight>;
- power-supply = <&panel_reg>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- panel_input: endpoint {
- remote-endpoint = <&hlcdc_panel_output>;
- };
- };
- };
-
- panel_reg: panel_regulator {
- compatible = "regulator-fixed";
- regulator-name = "panel-power-supply";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
diff --git a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
index 22c91beb0541..d9fdb58e06b4 100644
--- a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
+++ b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
@@ -34,7 +34,7 @@ additionalProperties: false
examples:
- |
- dsi@ff450000 {
+ dsi {
#address-cells = <1>;
#size-cells = <0>;
reg = <0xff450000 0x1000>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt
deleted file mode 100644
index 5707af89319d..000000000000
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Rockchip DRM master device
-================================
-
-The Rockchip DRM master device is a virtual device needed to list all
-vop devices or other display interface nodes that comprise the
-graphics subsystem.
-
-Required properties:
-- compatible: Should be "rockchip,display-subsystem"
-- ports: Should contain a list of phandles pointing to display interface port
- of vop devices. vop definitions as defined in
- Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
-
-example:
-
-display-subsystem {
- compatible = "rockchip,display-subsystem";
- ports = <&vopl_out>, <&vopb_out>;
-};
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
new file mode 100644
index 000000000000..ec8ae742d4da
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip-drm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip DRM master device
+
+maintainers:
+ - Sandy Huang <hjc@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+ The Rockchip DRM master device is a virtual device needed to list all
+ vop devices or other display interface nodes that comprise the
+ graphics subsystem.
+
+properties:
+ compatible:
+ const: rockchip,display-subsystem
+
+ ports:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ Should contain a list of phandles pointing to display interface port
+ of vop devices. vop definitions as defined in
+ Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+
+required:
+ - compatible
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vopl_out>, <&vopb_out>;
+ };
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
deleted file mode 100644
index cd5c7186890a..000000000000
--- a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Sitronix ST7735R display panels
-
-This binding is for display panels using a Sitronix ST7735R controller in SPI
-mode.
-
-Required properties:
-- compatible: "jianda,jd-t18003-t01", "sitronix,st7735r"
-- dc-gpios: Display data/command selection (D/CX)
-- reset-gpios: Reset signal (RSTX)
-
-The node for this driver must be a child node of a SPI controller, hence
-all mandatory properties described in ../spi/spi-bus.txt must be specified.
-
-Optional properties:
-- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
- backlight: backlight {
- compatible = "gpio-backlight";
- gpios = <&gpio 44 GPIO_ACTIVE_HIGH>;
- };
-
- ...
-
- display@0{
- compatible = "jianda,jd-t18003-t01", "sitronix,st7735r";
- reg = <0>;
- spi-max-frequency = <32000000>;
- dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
- rotation = <270>;
- backlight = &backlight;
- };
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
new file mode 100644
index 000000000000..0cebaaefda03
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/sitronix,st7735r.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7735R Display Panels Device Tree Bindings
+
+maintainers:
+ - David Lechner <david@lechnology.com>
+
+description:
+ This binding is for display panels using a Sitronix ST7715R or ST7735R
+ controller in SPI mode.
+
+allOf:
+ - $ref: panel/panel-common.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - description:
+ Adafruit 1.8" 160x128 Color TFT LCD (Product ID 358 or 618)
+ items:
+ - enum:
+ - jianda,jd-t18003-t01
+ - const: sitronix,st7735r
+ - description:
+ Okaya 1.44" 128x128 Color TFT LCD (E.g. Renesas YRSK-LCD-PMOD)
+ items:
+ - enum:
+ - okaya,rh128128t
+ - const: sitronix,st7715r
+
+ spi-max-frequency:
+ maximum: 32000000
+
+ dc-gpios:
+ maxItems: 1
+ description: Display data/command selection (D/CX)
+
+ backlight: true
+ reg: true
+ reset-gpios: true
+ rotation: true
+
+required:
+ - compatible
+ - reg
+ - dc-gpios
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ backlight: backlight {
+ compatible = "gpio-backlight";
+ gpios = <&gpio 44 GPIO_ACTIVE_HIGH>;
+ };
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ display@0{
+ compatible = "jianda,jd-t18003-t01", "sitronix,st7735r";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
+ rotation = <270>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
new file mode 100644
index 000000000000..cac61a998203
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,am65x-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments AM65x Display Subsystem
+
+maintainers:
+ - Jyri Sarha <jsarha@ti.com>
+ - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+description: |
+ The AM65x TI Keystone Display SubSystem with two output ports and
+ two video planes. The first video port supports OLDI and the second
+ supports DPI format. The fist plane is full video plane with all
+ features and the second is a "lite plane" without scaling support.
+
+properties:
+ compatible:
+ const: ti,am65x-dss
+
+ reg:
+ description:
+ Addresses to each DSS memory region described in the SoC's TRM.
+ items:
+ - description: common DSS register area
+ - description: VIDL1 light video plane
+ - description: VID video plane
+ - description: OVR1 overlay manager for vp1
+ - description: OVR2 overlay manager for vp2
+ - description: VP1 video port 1
+ - description: VP2 video port 2
+
+ reg-names:
+ items:
+ - const: common
+ - const: vidl1
+ - const: vid
+ - const: ovr1
+ - const: ovr2
+ - const: vp1
+ - const: vp2
+
+ clocks:
+ items:
+ - description: fck DSS functional clock
+ - description: vp1 Video Port 1 pixel clock
+ - description: vp2 Video Port 2 pixel clock
+
+ clock-names:
+ items:
+ - const: fck
+ - const: vp1
+ - const: vp2
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ ports:
+ type: object
+ description:
+ Ports as described in Documentation/devictree/bindings/graph.txt
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ port@0:
+ type: object
+ description:
+ The DSS OLDI output port node form video port 1
+
+ port@1:
+ type: object
+ description:
+ The DSS DPI output port node from video port 2
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+ ti,am65x-oldi-io-ctrl:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ - maxItems: 1
+ description:
+ phandle to syscon device node mapping OLDI IO_CTRL registers.
+ The mapped range should point to OLDI_DAT0_IO_CTRL, map it and
+ following OLDI_DAT1_IO_CTRL, OLDI_DAT2_IO_CTRL, OLDI_DAT3_IO_CTRL,
+ and OLDI_CLK_IO_CTRL registers. This property is needed for OLDI
+ interface to work.
+
+ max-memory-bandwidth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Input memory (from main memory to dispc) bandwidth limit in
+ bytes per second
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ dss: dss@04a00000 {
+ compatible = "ti,am65x-dss";
+ reg = <0x0 0x04a00000 0x0 0x1000>, /* common */
+ <0x0 0x04a02000 0x0 0x1000>, /* vidl1 */
+ <0x0 0x04a06000 0x0 0x1000>, /* vid */
+ <0x0 0x04a07000 0x0 0x1000>, /* ovr1 */
+ <0x0 0x04a08000 0x0 0x1000>, /* ovr2 */
+ <0x0 0x04a0a000 0x0 0x1000>, /* vp1 */
+ <0x0 0x04a0b000 0x0 0x1000>; /* vp2 */
+ reg-names = "common", "vidl1", "vid",
+ "ovr1", "ovr2", "vp1", "vp2";
+ ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
+ power-domains = <&k3_pds 67 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 67 1>,
+ <&k3_clks 216 1>,
+ <&k3_clks 67 2>;
+ clock-names = "fck", "vp1", "vp2";
+ interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ oldi_out0: endpoint {
+ remote-endpoint = <&lcd_in0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
new file mode 100644
index 000000000000..ade9b2f513f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
@@ -0,0 +1,208 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,j721e-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments J721E Display Subsystem
+
+maintainers:
+ - Jyri Sarha <jsarha@ti.com>
+ - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+description: |
+ The J721E TI Keystone Display SubSystem with four output ports and
+ four video planes. There is two full video planes and two "lite
+ planes" without scaling support. The video ports can be connected to
+ the SoC's DPI pins or to integrated display bridges on the SoC.
+
+properties:
+ compatible:
+ const: ti,j721e-dss
+
+ reg:
+ items:
+ - description: common_m DSS Master common
+ - description: common_s0 DSS Shared common 0
+ - description: common_s1 DSS Shared common 1
+ - description: common_s2 DSS Shared common 2
+ - description: VIDL1 light video plane 1
+ - description: VIDL2 light video plane 2
+ - description: VID1 video plane 1
+ - description: VID1 video plane 2
+ - description: OVR1 overlay manager for vp1
+ - description: OVR2 overlay manager for vp2
+ - description: OVR3 overlay manager for vp3
+ - description: OVR4 overlay manager for vp4
+ - description: VP1 video port 1
+ - description: VP2 video port 2
+ - description: VP3 video port 3
+ - description: VP4 video port 4
+ - description: WB Write Back
+
+ reg-names:
+ items:
+ - const: common_m
+ - const: common_s0
+ - const: common_s1
+ - const: common_s2
+ - const: vidl1
+ - const: vidl2
+ - const: vid1
+ - const: vid2
+ - const: ovr1
+ - const: ovr2
+ - const: ovr3
+ - const: ovr4
+ - const: vp1
+ - const: vp2
+ - const: vp3
+ - const: vp4
+ - const: wb
+
+ clocks:
+ items:
+ - description: fck DSS functional clock
+ - description: vp1 Video Port 1 pixel clock
+ - description: vp2 Video Port 2 pixel clock
+ - description: vp3 Video Port 3 pixel clock
+ - description: vp4 Video Port 4 pixel clock
+
+ clock-names:
+ items:
+ - const: fck
+ - const: vp1
+ - const: vp2
+ - const: vp3
+ - const: vp4
+
+ interrupts:
+ items:
+ - description: common_m DSS Master common
+ - description: common_s0 DSS Shared common 0
+ - description: common_s1 DSS Shared common 1
+ - description: common_s2 DSS Shared common 2
+
+ interrupt-names:
+ items:
+ - const: common_m
+ - const: common_s0
+ - const: common_s1
+ - const: common_s2
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ ports:
+ type: object
+ description:
+ Ports as described in Documentation/devictree/bindings/graph.txt
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ port@0:
+ type: object
+ description:
+ The output port node form video port 1
+
+ port@1:
+ type: object
+ description:
+ The output port node from video port 2
+
+ port@2:
+ type: object
+ description:
+ The output port node from video port 3
+
+ port@3:
+ type: object
+ description:
+ The output port node from video port 4
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+ max-memory-bandwidth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Input memory (from main memory to dispc) bandwidth limit in
+ bytes per second
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ dss: dss@04a00000 {
+ compatible = "ti,j721e-dss";
+ reg = <0x00 0x04a00000 0x00 0x10000>, /* common_m */
+ <0x00 0x04a10000 0x00 0x10000>, /* common_s0*/
+ <0x00 0x04b00000 0x00 0x10000>, /* common_s1*/
+ <0x00 0x04b10000 0x00 0x10000>, /* common_s2*/
+ <0x00 0x04a20000 0x00 0x10000>, /* vidl1 */
+ <0x00 0x04a30000 0x00 0x10000>, /* vidl2 */
+ <0x00 0x04a50000 0x00 0x10000>, /* vid1 */
+ <0x00 0x04a60000 0x00 0x10000>, /* vid2 */
+ <0x00 0x04a70000 0x00 0x10000>, /* ovr1 */
+ <0x00 0x04a90000 0x00 0x10000>, /* ovr2 */
+ <0x00 0x04ab0000 0x00 0x10000>, /* ovr3 */
+ <0x00 0x04ad0000 0x00 0x10000>, /* ovr4 */
+ <0x00 0x04a80000 0x00 0x10000>, /* vp1 */
+ <0x00 0x04aa0000 0x00 0x10000>, /* vp2 */
+ <0x00 0x04ac0000 0x00 0x10000>, /* vp3 */
+ <0x00 0x04ae0000 0x00 0x10000>, /* vp4 */
+ <0x00 0x04af0000 0x00 0x10000>; /* wb */
+ reg-names = "common_m", "common_s0",
+ "common_s1", "common_s2",
+ "vidl1", "vidl2","vid1","vid2",
+ "ovr1", "ovr2", "ovr3", "ovr4",
+ "vp1", "vp2", "vp3", "vp4",
+ "wb";
+ clocks = <&k3_clks 152 0>,
+ <&k3_clks 152 1>,
+ <&k3_clks 152 4>,
+ <&k3_clks 152 9>,
+ <&k3_clks 152 13>;
+ clock-names = "fck", "vp1", "vp2", "vp3", "vp4";
+ power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common_m",
+ "common_s0",
+ "common_s1",
+ "common_s2";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+
+ dpi_out_0: endpoint {
+ remote-endpoint = <&dp_bridge_input>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
new file mode 100644
index 000000000000..385bd060ccf9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,k2g-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments K2G Display Subsystem
+
+maintainers:
+ - Jyri Sarha <jsarha@ti.com>
+ - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+description: |
+ The K2G DSS is an ultra-light version of TI Keystone Display
+ SubSystem. It has only one output port and video plane. The
+ output is DPI.
+
+properties:
+ compatible:
+ const: ti,k2g-dss
+
+ reg:
+ items:
+ - description: cfg DSS top level
+ - description: common DISPC common
+ - description: VID1 video plane 1
+ - description: OVR1 overlay manager for vp1
+ - description: VP1 video port 1
+
+ reg-names:
+ items:
+ - const: cfg
+ - const: common
+ - const: vid1
+ - const: ovr1
+ - const: vp1
+
+ clocks:
+ items:
+ - description: fck DSS functional clock
+ - description: vp1 Video Port 1 pixel clock
+
+ clock-names:
+ items:
+ - const: fck
+ - const: vp1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ port:
+ type: object
+ description:
+ Port as described in Documentation/devictree/bindings/graph.txt.
+ The DSS DPI output port node
+
+ max-memory-bandwidth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Input memory (from main memory to dispc) bandwidth limit in
+ bytes per second
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ dss: dss@02540000 {
+ compatible = "ti,k2g-dss";
+ reg = <0x02540000 0x400>,
+ <0x02550000 0x1000>,
+ <0x02557000 0x1000>,
+ <0x0255a800 0x100>,
+ <0x0255ac00 0x100>;
+ reg-names = "cfg", "common", "vid1", "ovr1", "vp1";
+ clocks = <&k2g_clks 0x2 0>,
+ <&k2g_clks 0x2 1>;
+ clock-names = "fck", "vp1";
+ interrupts = <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>;
+
+ power-domains = <&k2g_pds 0x2>;
+
+ max-memory-bandwidth = <230000000>;
+
+ port {
+ dpi_out: endpoint {
+ remote-endpoint = <&sii9022_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
index f1f95f678739..e8f6c42e80f2 100644
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -16,6 +16,7 @@ Required Properties:
- "renesas,r8a7794-usb-dmac" (R-Car E2)
- "renesas,r8a7795-usb-dmac" (R-Car H3)
- "renesas,r8a7796-usb-dmac" (R-Car M3-W)
+ - "renesas,r8a77961-usb-dmac" (R-Car M3-W+)
- "renesas,r8a77965-usb-dmac" (R-Car M3-N)
- "renesas,r8a77990-usb-dmac" (R-Car E3)
- "renesas,r8a77995-usb-dmac" (R-Car D3)
diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
index 2ca3ddbe1ff4..e7f2ad7dab5e 100644
--- a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
+++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
@@ -43,6 +43,8 @@ required:
- interrupts
- '#dma-cells'
+additionalProperties: false
+
examples:
- |
dma@3000000 {
diff --git a/Documentation/devicetree/bindings/dma/socionext,uniphier-mio-dmac.yaml b/Documentation/devicetree/bindings/dma/socionext,uniphier-mio-dmac.yaml
new file mode 100644
index 000000000000..e7bf6dd7da29
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/socionext,uniphier-mio-dmac.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/socionext,uniphier-mio-dmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier Media IO DMA controller
+
+description: |
+ This works as an external DMA engine for SD/eMMC controllers etc.
+ found in UniPhier LD4, Pro4, sLD8 SoCs.
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ const: socionext,uniphier-mio-dmac
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: |
+ A list of interrupt specifiers associated with the DMA channels.
+ The number of interrupt lines is SoC-dependent.
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ '#dma-cells':
+ description: The single cell represents the channel index.
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - '#dma-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ // In the example below, "interrupts = <0 68 4>, <0 68 4>, ..." is not a
+ // typo. The first two channels share a single interrupt line.
+
+ dmac: dma-controller@5a000000 {
+ compatible = "socionext,uniphier-mio-dmac";
+ reg = <0x5a000000 0x1000>;
+ interrupts = <0 68 4>, <0 68 4>, <0 69 4>, <0 70 4>,
+ <0 71 4>, <0 72 4>, <0 73 4>, <0 74 4>;
+ clocks = <&mio_clk 7>;
+ resets = <&mio_rst 7>;
+ #dma-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml b/Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml
new file mode 100644
index 000000000000..86cfb599256e
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/socionext,uniphier-xdmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier external DMA controller
+
+description: |
+ This describes the devicetree bindings for an external DMA engine to perform
+ memory-to-memory or peripheral-to-memory data transfer capable of supporting
+ 16 channels, implemented in Socionext UniPhier SoCs.
+
+maintainers:
+ - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ const: socionext,uniphier-xdmac
+
+ reg:
+ items:
+ - description: XDMAC base register region (offset and length)
+ - description: XDMAC extension register region (offset and length)
+
+ interrupts:
+ maxItems: 1
+
+ "#dma-cells":
+ const: 2
+ description: |
+ DMA request from clients consists of 2 cells:
+ 1. Channel index
+ 2. Transfer request factor number, If no transfer factor, use 0.
+ The number is SoC-specific, and this should be specified with
+ relation to the device to use the DMA controller.
+
+ dma-channels:
+ minimum: 1
+ maximum: 16
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#dma-cells"
+
+examples:
+ - |
+ xdmac: dma-controller@5fc10000 {
+ compatible = "socionext,uniphier-xdmac";
+ reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>;
+ interrupts = <0 188 4>;
+ #dma-cells = <2>;
+ dma-channels = <16>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 0e1398f93aa2..29fcd37082e8 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -180,7 +180,7 @@ edma1_tptc0: tptc@27b0000 {
};
edma1_tptc1: tptc@27b8000 {
- compatible = "ti, k2g-edma3-tptc", "ti,edma3-tptc";
+ compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc";
reg = <0x027b8000 0x400>;
power-domains = <&k2g_pds 0x4f>;
};
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
index 34780d7535b8..39ea05e6e5ff 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
@@ -45,7 +45,8 @@ allOf:
properties:
"#dma-cells":
- const: 1
+ minimum: 1
+ maximum: 2
description: |
The cell is the PSI-L thread ID of the remote (to UDMAP) end.
Valid ranges for thread ID depends on the data movement direction:
@@ -55,6 +56,8 @@ properties:
Please refer to the device documentation for the PSI-L thread map and also
the PSI-L peripheral chapter for the correct thread ID.
+ When #dma-cells is 2, the second parameter is the channel ATYPE.
+
compatible:
enum:
- ti,am654-navss-main-udmap
@@ -131,6 +134,20 @@ required:
- ti,sci-rm-range-rchan
- ti,sci-rm-range-rflow
+if:
+ properties:
+ "#dma-cells":
+ const: 2
+then:
+ properties:
+ ti,udma-atype:
+ description: ATYPE value which should be used by non slave channels
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+
+ required:
+ - ti,udma-atype
+
examples:
- |+
cbass_main {
diff --git a/Documentation/devicetree/bindings/dma/uniphier-mio-dmac.txt b/Documentation/devicetree/bindings/dma/uniphier-mio-dmac.txt
deleted file mode 100644
index b12388dc7eac..000000000000
--- a/Documentation/devicetree/bindings/dma/uniphier-mio-dmac.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-UniPhier Media IO DMA controller
-
-This works as an external DMA engine for SD/eMMC controllers etc.
-found in UniPhier LD4, Pro4, sLD8 SoCs.
-
-Required properties:
-- compatible: should be "socionext,uniphier-mio-dmac".
-- reg: offset and length of the register set for the device.
-- interrupts: a list of interrupt specifiers associated with the DMA channels.
-- clocks: a single clock specifier.
-- #dma-cells: should be <1>. The single cell represents the channel index.
-
-Example:
- dmac: dma-controller@5a000000 {
- compatible = "socionext,uniphier-mio-dmac";
- reg = <0x5a000000 0x1000>;
- interrupts = <0 68 4>, <0 68 4>, <0 69 4>, <0 70 4>,
- <0 71 4>, <0 72 4>, <0 73 4>, <0 74 4>;
- clocks = <&mio_clk 7>;
- #dma-cells = <1>;
- };
-
-Note:
-In the example above, "interrupts = <0 68 4>, <0 68 4>, ..." is not a typo.
-The first two channels share a single interrupt line.
diff --git a/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml b/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
index f04870d84542..a5dc070d0ca7 100644
--- a/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
+++ b/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
@@ -68,6 +68,8 @@ required:
- mbox-names
- memory-region
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/firmware/imx/rsrc.h>
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index 0f6d8db18d6c..a15787e504f0 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -172,6 +172,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
i2c {
diff --git a/Documentation/devicetree/bindings/example-schema.yaml b/Documentation/devicetree/bindings/example-schema.yaml
index 4ddcf709cc3c..62811a1b5058 100644
--- a/Documentation/devicetree/bindings/example-schema.yaml
+++ b/Documentation/devicetree/bindings/example-schema.yaml
@@ -7,9 +7,9 @@
# $id is a unique identifier based on the filename. There may or may not be a
# file present at the URL.
-$id: "http://devicetree.org/schemas/example-schema.yaml#"
+$id: http://devicetree.org/schemas/example-schema.yaml#
# $schema is the meta-schema this schema should be validated with.
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+$schema: http://devicetree.org/meta-schemas/core.yaml#
title: An example schema annotated with jsonschema details
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt
deleted file mode 100644
index 8e8625c00dfa..000000000000
--- a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-ChromeOS EC USB Type-C cable and accessories detection
-
-On ChromeOS systems with USB Type C ports, the ChromeOS Embedded Controller is
-able to detect the state of external accessories such as display adapters
-or USB devices when said accessories are attached or detached.
-
-The node for this device must be under a cros-ec node like google,cros-ec-spi
-or google,cros-ec-i2c.
-
-Required properties:
-- compatible: Should be "google,extcon-usbc-cros-ec".
-- google,usb-port-id: Specifies the USB port ID to use.
-
-Example:
- cros-ec@0 {
- compatible = "google,cros-ec-i2c";
-
- ...
-
- extcon {
- compatible = "google,extcon-usbc-cros-ec";
- google,usb-port-id = <0>;
- };
- }
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml
new file mode 100644
index 000000000000..9c5849b341ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/extcon-usbc-cros-ec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ChromeOS EC USB Type-C cable and accessories detection
+
+maintainers:
+ - Benson Leung <bleung@chromium.org>
+ - Enric Balletbo i Serra <enric.balletbo@collabora.com>
+
+description: |
+ On ChromeOS systems with USB Type C ports, the ChromeOS Embedded Controller is
+ able to detect the state of external accessories such as display adapters
+ or USB devices when said accessories are attached or detached.
+ The node for this device must be under a cros-ec node like google,cros-ec-spi
+ or google,cros-ec-i2c.
+
+properties:
+ compatible:
+ const: google,extcon-usbc-cros-ec
+
+ google,usb-port-id:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description: the port id
+ minimum: 0
+ maximum: 255
+
+required:
+ - compatible
+ - google,usb-port-id
+
+additionalProperties: false
+
+examples:
+ - |
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cros-ec@0 {
+ compatible = "google,cros-ec-spi";
+ reg = <0>;
+
+ usbc_extcon0: extcon0 {
+ compatible = "google,extcon-usbc-cros-ec";
+ google,usb-port-id = <0>;
+ };
+
+ usbc_extcon1: extcon1 {
+ compatible = "google,extcon-usbc-cros-ec";
+ google,usb-port-id = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
index 878a2079ebb6..1bd2870c3a9c 100644
--- a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
+++ b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
@@ -34,9 +34,12 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
npe@c8006000 {
compatible = "intel,ixp4xx-network-processing-engine";
reg = <0xc8006000 0x1000>, <0xc8007000 0x1000>, <0xc8008000 0x1000>;
};
+...
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index 3f29ea04b5fe..354b448fc0c3 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -10,6 +10,7 @@ Required properties:
* "qcom,scm-apq8064"
* "qcom,scm-apq8084"
* "qcom,scm-ipq4019"
+ * "qcom,scm-ipq806x"
* "qcom,scm-msm8660"
* "qcom,scm-msm8916"
* "qcom,scm-msm8960"
diff --git a/Documentation/devicetree/bindings/gnss/gnss.txt b/Documentation/devicetree/bindings/gnss/gnss.txt
index f547bd4549fe..d6dc9c0d8249 100644
--- a/Documentation/devicetree/bindings/gnss/gnss.txt
+++ b/Documentation/devicetree/bindings/gnss/gnss.txt
@@ -8,7 +8,7 @@ bus (e.g. UART, I2C or SPI).
Please refer to the following documents for generic properties:
- Documentation/devicetree/bindings/serial/slave-device.txt
+ Documentation/devicetree/bindings/serial/serial.yaml
Documentation/devicetree/bindings/spi/spi-bus.txt
Required properties:
diff --git a/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml b/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
index 64e279a4bc10..5f1ed20e43ee 100644
--- a/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
@@ -47,6 +47,8 @@ required:
- "#gpio-cells"
- gpio-controller
+additionalProperties: false
+
dependencies:
interrupt-controller: [ interrupts ]
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 2e097b57f170..0fc6700ed800 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -14,7 +14,7 @@ Required properties:
"marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
SoCs (either from AP or CP), see
- Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
+ Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
for specific details about the offset property.
- reg: Address and length of the register set for the device. Only one
diff --git a/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt b/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
deleted file mode 100644
index f281f12dac18..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-UniPhier GPIO controller
-
-Required properties:
-- compatible: Should be "socionext,uniphier-gpio".
-- reg: Specifies offset and length of the register set for the device.
-- gpio-controller: Marks the device node as a GPIO controller.
-- #gpio-cells: Should be 2. The first cell is the pin number and the second
- cell is used to specify optional parameters.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Should be 2. The first cell defines the interrupt number.
- The second cell bits[3:0] is used to specify trigger type as follows:
- 1 = low-to-high edge triggered
- 2 = high-to-low edge triggered
- 4 = active high level-sensitive
- 8 = active low level-sensitive
- Valid combinations are 1, 2, 3, 4, 8.
-- ngpios: Specifies the number of GPIO lines.
-- gpio-ranges: Mapping to pin controller pins (as described in gpio.txt)
-- socionext,interrupt-ranges: Specifies an interrupt number mapping between
- this GPIO controller and its interrupt parent, in the form of arbitrary
- number of <child-interrupt-base parent-interrupt-base length> triplets.
-
-Optional properties:
-- gpio-ranges-group-names: Used for named gpio ranges (as described in gpio.txt)
-
-Example:
- gpio: gpio@55000000 {
- compatible = "socionext,uniphier-gpio";
- reg = <0x55000000 0x200>;
- interrupt-parent = <&aidet>;
- interrupt-controller;
- #interrupt-cells = <2>;
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinctrl 0 0 0>;
- gpio-ranges-group-names = "gpio_range";
- ngpios = <248>;
- socionext,interrupt-ranges = <0 48 16>, <16 154 5>, <21 217 3>;
- };
-
-Consumer Example:
-
- sdhci0_pwrseq {
- compatible = "mmc-pwrseq-emmc";
- reset-gpios = <&gpio UNIPHIER_GPIO_PORT(29, 4) GPIO_ACTIVE_LOW>;
- };
-
-Please note UNIPHIER_GPIO_PORT(29, 4) represents PORT294 in the SoC document.
-Unfortunately, only the one's place is octal in the port numbering. (That is,
-PORT 8, 9, 18, 19, 28, 29, ... are missing.) UNIPHIER_GPIO_PORT() is a helper
-macro to calculate 29 * 8 + 4.
diff --git a/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml b/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml
new file mode 100644
index 000000000000..c58ff9a94f45
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/socionext,uniphier-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier GPIO controller
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ $nodename:
+ pattern: "^gpio@[0-9a-f]+$"
+
+ compatible:
+ const: socionext,uniphier-gpio
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ description: |
+ The first cell defines the interrupt number.
+ The second cell bits[3:0] is used to specify trigger type as follows:
+ 1 = low-to-high edge triggered
+ 2 = high-to-low edge triggered
+ 4 = active high level-sensitive
+ 8 = active low level-sensitive
+ Valid combinations are 1, 2, 3, 4, 8.
+ const: 2
+
+ ngpios:
+ minimum: 0
+ maximum: 512
+
+ gpio-ranges: true
+
+ gpio-ranges-group-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+
+ socionext,interrupt-ranges:
+ description: |
+ Specifies an interrupt number mapping between this GPIO controller and
+ its interrupt parent, in the form of arbitrary number of
+ <child-interrupt-base parent-interrupt-base length> triplets.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+ - interrupt-controller
+ - "#interrupt-cells"
+ - ngpios
+ - gpio-ranges
+ - socionext,interrupt-ranges
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/gpio/uniphier-gpio.h>
+
+ gpio: gpio@55000000 {
+ compatible = "socionext,uniphier-gpio";
+ reg = <0x55000000 0x200>;
+ interrupt-parent = <&aidet>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 0>;
+ gpio-ranges-group-names = "gpio_range";
+ ngpios = <248>;
+ socionext,interrupt-ranges = <0 48 16>, <16 154 5>, <21 217 3>;
+ };
+
+ // Consumer:
+ // Please note UNIPHIER_GPIO_PORT(29, 4) represents PORT294 in the SoC
+ // document. Unfortunately, only the one's place is octal in the port
+ // numbering. (That is, PORT 8, 9, 18, 19, 28, 29, ... do not exist.)
+ // UNIPHIER_GPIO_PORT() is a helper macro to calculate 29 * 8 + 4.
+ sdhci0_pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio UNIPHIER_GPIO_PORT(29, 4) GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/xylon,logicvc-gpio.yaml b/Documentation/devicetree/bindings/gpio/xylon,logicvc-gpio.yaml
index d102888c1be7..a36aec27069c 100644
--- a/Documentation/devicetree/bindings/gpio/xylon,logicvc-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/xylon,logicvc-gpio.yaml
@@ -49,6 +49,8 @@ required:
- "#gpio-cells"
- gpio-controller
+additionalProperties: false
+
examples:
- |
logicvc: logicvc@43c00000 {
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index e8b99adcb1bd..0b229a7d4a98 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -43,6 +43,9 @@ properties:
operating-points-v2: true
+ resets:
+ maxItems: 2
+
required:
- compatible
- reg
@@ -50,6 +53,8 @@ required:
- interrupt-names
- clocks
+additionalProperties: false
+
allOf:
- if:
properties:
@@ -57,9 +62,6 @@ allOf:
contains:
const: amlogic,meson-g12a-mali
then:
- properties:
- resets:
- minItems: 2
required:
- resets
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
index 8d966f3ff3db..0407e45eb8c4 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
@@ -75,6 +75,9 @@ properties:
mali-supply: true
+ power-domains:
+ maxItems: 1
+
resets:
minItems: 1
maxItems: 2
@@ -91,6 +94,8 @@ required:
- interrupt-names
- clocks
+additionalProperties: false
+
allOf:
- if:
properties:
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
index afde81be3c29..f5401cc8de4a 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
@@ -115,6 +115,8 @@ required:
- clocks
- clock-names
+additionalProperties: false
+
allOf:
- if:
properties:
diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.yaml b/Documentation/devicetree/bindings/gpu/samsung-rotator.yaml
index f4dfa6fc724c..665c6e3b31d3 100644
--- a/Documentation/devicetree/bindings/gpu/samsung-rotator.yaml
+++ b/Documentation/devicetree/bindings/gpu/samsung-rotator.yaml
@@ -36,6 +36,8 @@ required:
- clocks
- clock-names
+additionalProperties: false
+
examples:
- |
rotator@12810000 {
diff --git a/Documentation/devicetree/bindings/gpu/vivante,gc.yaml b/Documentation/devicetree/bindings/gpu/vivante,gc.yaml
new file mode 100644
index 000000000000..0bc4b38d5cbb
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/vivante,gc.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpu/vivante,gc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Vivante GPU Bindings
+
+description: Vivante GPU core devices
+
+maintainers:
+ - Lucas Stach <l.stach@pengutronix.de>
+
+properties:
+ compatible:
+ const: vivante,gc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: AXI/master interface clock
+ - description: GPU core clock
+ - description: Shader clock (only required if GPU has feature PIPE_3D)
+ - description: AHB/slave interface clock (only required if GPU can gate slave interface independently)
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ items:
+ enum: [ bus, core, shader, reg ]
+ minItems: 1
+ maxItems: 4
+
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ gpu@130000 {
+ compatible = "vivante,gc";
+ reg = <0x00130000 0x4000>;
+ interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6QDL_CLK_GPU3D_AXI>,
+ <&clks IMX6QDL_CLK_GPU3D_CORE>,
+ <&clks IMX6QDL_CLK_GPU3D_SHADER>;
+ clock-names = "bus", "core", "shader";
+ power-domains = <&gpc 1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml b/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
index 2a9822075b36..154bee851139 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
@@ -47,6 +47,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
index 6a742a51e2f9..44a63fffb4be 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
@@ -87,6 +87,8 @@ required:
- reg
+additionalProperties: false
+
examples:
- |
spi {
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
index 5d42e1304202..e8feee38c76c 100644
--- a/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
+++ b/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
@@ -32,6 +32,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
i2c {
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
index 168235ad5d81..3f043e943668 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
@@ -76,6 +76,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
i2c {
diff --git a/Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml b/Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
new file mode 100644
index 000000000000..edbca2476128
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/brcm,brcmstb-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom STB BSC IIC Master Controller
+
+maintainers:
+ - Kamal Dasu <kdasu.kdev@gmail.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm2711-hdmi-i2c
+ - brcm,brcmstb-i2c
+ - brcm,brcmper-i2c
+
+ reg:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: BSC register range
+ - description: Auto-I2C register range
+
+ reg-names:
+ items:
+ - const: bsc
+ - const: auto-i2c
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ maxItems: 1
+
+ clock-frequency:
+ enum:
+ - 46875
+ - 50000
+ - 93750
+ - 97500
+ - 187500
+ - 200000
+ - 375000
+ - 390000
+
+required:
+ - compatible
+ - reg
+ - clock-frequency
+
+unevaluatedProperties: false
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,bcm2711-hdmi-i2c
+
+then:
+ properties:
+ reg:
+ minItems: 2
+
+ required:
+ - reg-names
+
+else:
+ properties:
+ reg:
+ maxItems: 1
+
+examples:
+ - |
+ bsca: i2c@f0406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&irq0_intc>;
+ reg = <0xf0406200 0x58>;
+ interrupts = <0x18>;
+ interrupt-names = "upg_bsca";
+ };
+
+ - |
+ ddc0: i2c@7ef04500 {
+ compatible = "brcm,bcm2711-hdmi-i2c";
+ reg = <0x7ef04500 0x100>, <0x7ef00b00 0x300>;
+ reg-names = "bsc", "auto-i2c";
+ clock-frequency = <390000>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
index d4bad86107b8..96c914e048f5 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
@@ -28,8 +28,13 @@ Optional properties:
"atmel,sama5d4-i2c",
"atmel,sama5d2-i2c",
"microchip,sam9x60-i2c".
+- scl-gpios: specify the gpio related to SCL pin
+- sda-gpios: specify the gpio related to SDA pin
+- pinctrl: add extra pinctrl to configure i2c pins to gpio function for i2c
+ bus recovery, call it "gpio" state
- Child nodes conforming to i2c bus binding
+
Examples :
i2c0: i2c@fff84000 {
@@ -64,6 +69,11 @@ i2c0: i2c@f8034600 {
clocks = <&flx0>;
atmel,fifo-size = <16>;
i2c-sda-hold-time-ns = <336>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ sda-gpios = <&pioA 30 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA 31 GPIO_ACTIVE_HIGH>;
wm8731: wm8731@1a {
compatible = "wm8731";
diff --git a/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt b/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
deleted file mode 100644
index 0380609b177a..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Broadcom stb bsc iic master controller
-
-Required properties:
-
-- compatible: should be "brcm,brcmstb-i2c" or "brcm,brcmper-i2c"
-- clock-frequency: 32-bit decimal value of iic master clock freqency in Hz
- valid values are 375000, 390000, 187500, 200000
- 93750, 97500, 46875 and 50000
-- reg: specifies the base physical address and size of the registers
-
-Optional properties :
-
-- interrupts: specifies the interrupt number, the irq line to be used
-- interrupt-names: Interrupt name string
-
-Example:
-
-bsca: i2c@f0406200 {
- clock-frequency = <390000>;
- compatible = "brcm,brcmstb-i2c";
- interrupt-parent = <&irq0_intc>;
- reg = <0xf0406200 0x58>;
- interrupts = <0x18>;
- interrupt-names = "upg_bsca";
-};
-
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
deleted file mode 100644
index 22f2eeb2c4c9..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-* Rockchip RK3xxx I2C controller
-
-This driver interfaces with the native I2C controller present in Rockchip
-RK3xxx SoCs.
-
-Required properties :
-
- - reg : Offset and length of the register set for the device
- - compatible: should be one of the following:
- - "rockchip,rv1108-i2c": for rv1108
- - "rockchip,rk3066-i2c": for rk3066
- - "rockchip,rk3188-i2c": for rk3188
- - "rockchip,rk3228-i2c": for rk3228
- - "rockchip,rk3288-i2c": for rk3288
- - "rockchip,rk3328-i2c", "rockchip,rk3399-i2c": for rk3328
- - "rockchip,rk3399-i2c": for rk3399
- - interrupts : interrupt number
- - clocks: See ../clock/clock-bindings.txt
- - For older hardware (rk3066, rk3188, rk3228, rk3288):
- - There is one clock that's used both to derive the functional clock
- for the device and as the bus clock.
- - For newer hardware (rk3399): specified by name
- - "i2c": This is used to derive the functional clock.
- - "pclk": This is the bus clock.
-
-Required on RK3066, RK3188 :
-
- - rockchip,grf : the phandle of the syscon node for the general register
- file (GRF)
- - on those SoCs an alias with the correct I2C bus ID (bit offset in the GRF)
- is also required.
-
-Optional properties :
-
- - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
- - i2c-scl-rising-time-ns : Number of nanoseconds the SCL signal takes to rise
- (t(r) in I2C specification). If not specified this is assumed to be
- the maximum the specification allows(1000 ns for Standard-mode,
- 300 ns for Fast-mode) which might cause slightly slower communication.
- - i2c-scl-falling-time-ns : Number of nanoseconds the SCL signal takes to fall
- (t(f) in the I2C specification). If not specified this is assumed to
- be the maximum the specification allows (300 ns) which might cause
- slightly slower communication.
- - i2c-sda-falling-time-ns : Number of nanoseconds the SDA signal takes to fall
- (t(f) in the I2C specification). If not specified we'll use the SCL
- value since they are the same in nearly all cases.
-
-Example:
-
-aliases {
- i2c0 = &i2c0;
-}
-
-i2c0: i2c@2002d000 {
- compatible = "rockchip,rk3188-i2c";
- reg = <0x2002d000 0x1000>;
- interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- rockchip,grf = <&grf>;
-
- clock-names = "i2c";
- clocks = <&cru PCLK_I2C0>;
-
- i2c-scl-rising-time-ns = <800>;
- i2c-scl-falling-time-ns = <100>;
-};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
new file mode 100644
index 000000000000..61eac76c84c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-rk3x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3xxx I2C controller
+
+description:
+ This driver interfaces with the native I2C controller present in Rockchip
+ RK3xxx SoCs.
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+# Everything else is described in the common file
+properties:
+ compatible:
+ oneOf:
+ - const: rockchip,rv1108-i2c
+ - const: rockchip,rk3066-i2c
+ - const: rockchip,rk3188-i2c
+ - const: rockchip,rk3228-i2c
+ - const: rockchip,rk3288-i2c
+ - const: rockchip,rk3399-i2c
+ - items:
+ - enum:
+ - rockchip,rk3036-i2c
+ - rockchip,rk3368-i2c
+ - const: rockchip,rk3288-i2c
+ - items:
+ - enum:
+ - rockchip,px30-i2c
+ - rockchip,rk3308-i2c
+ - rockchip,rk3328-i2c
+ - const: rockchip,rk3399-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description:
+ For older hardware (rk3066, rk3188, rk3228, rk3288)
+ there is one clock that is used both to derive the functional clock
+ for the device and as the bus clock.
+ For newer hardware (rk3399) this clock is used to derive
+ the functional clock
+ - description:
+ For newer hardware (rk3399) this is the bus clock
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: i2c
+ - const: pclk
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Required on RK3066, RK3188 the phandle of the syscon node for
+ the general register file (GRF)
+ On those SoCs an alias with the correct I2C bus ID
+ (bit offset in the GRF) is also required.
+
+ clock-frequency:
+ default: 100000
+ description:
+ SCL frequency to use (in Hz). If omitted, 100kHz is used.
+
+ i2c-scl-rising-time-ns:
+ default: 1000
+ description:
+ Number of nanoseconds the SCL signal takes to rise
+ (t(r) in I2C specification). If not specified this is assumed to be
+ the maximum the specification allows(1000 ns for Standard-mode,
+ 300 ns for Fast-mode) which might cause slightly slower communication.
+
+ i2c-scl-falling-time-ns:
+ default: 300
+ description:
+ Number of nanoseconds the SCL signal takes to fall
+ (t(f) in the I2C specification). If not specified this is assumed to
+ be the maximum the specification allows (300 ns) which might cause
+ slightly slower communication.
+
+ i2c-sda-falling-time-ns:
+ default: 300
+ description:
+ Number of nanoseconds the SDA signal takes to fall
+ (t(f) in the I2C specification). If not specified we will use the SCL
+ value since they are the same in nearly all cases.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3066-i2c
+ - rockchip,rk3188-i2c
+
+then:
+ required:
+ - rockchip,grf
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3188-cru-common.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c0: i2c@2002d000 {
+ compatible = "rockchip,rk3188-i2c";
+ reg = <0x2002d000 0x1000>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_I2C0>;
+ clock-names = "i2c";
+ rockchip,grf = <&grf>;
+ i2c-scl-falling-time-ns = <100>;
+ i2c-scl-rising-time-ns = <800>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-uniphier-f.txt b/Documentation/devicetree/bindings/i2c/i2c-uniphier-f.txt
deleted file mode 100644
index 27fc6f8c798b..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-uniphier-f.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-UniPhier I2C controller (FIFO-builtin)
-
-Required properties:
-- compatible: should be "socionext,uniphier-fi2c".
-- #address-cells: should be 1.
-- #size-cells: should be 0.
-- reg: offset and length of the register set for the device.
-- interrupts: a single interrupt specifier.
-- clocks: phandle to the input clock.
-
-Optional properties:
-- clock-frequency: desired I2C bus frequency in Hz. The maximum supported
- value is 400000. Defaults to 100000 if not specified.
-
-Examples:
-
- i2c0: i2c@58780000 {
- compatible = "socionext,uniphier-fi2c";
- reg = <0x58780000 0x80>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <0 41 4>;
- clocks = <&i2c_clk>;
- clock-frequency = <100000>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-uniphier.txt b/Documentation/devicetree/bindings/i2c/i2c-uniphier.txt
deleted file mode 100644
index 26f9d95b3436..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-uniphier.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-UniPhier I2C controller (FIFO-less)
-
-Required properties:
-- compatible: should be "socionext,uniphier-i2c".
-- #address-cells: should be 1.
-- #size-cells: should be 0.
-- reg: offset and length of the register set for the device.
-- interrupts: a single interrupt specifier.
-- clocks: phandle to the input clock.
-
-Optional properties:
-- clock-frequency: desired I2C bus frequency in Hz. The maximum supported
- value is 400000. Defaults to 100000 if not specified.
-
-Examples:
-
- i2c0: i2c@58400000 {
- compatible = "socionext,uniphier-i2c";
- reg = <0x58400000 0x40>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <0 41 1>;
- clocks = <&i2c_clk>;
- clock-frequency = <100000>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/socionext,uniphier-fi2c.yaml b/Documentation/devicetree/bindings/i2c/socionext,uniphier-fi2c.yaml
new file mode 100644
index 000000000000..15abc022968e
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/socionext,uniphier-fi2c.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/socionext,uniphier-fi2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier I2C controller (FIFO-builtin)
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: socionext,uniphier-fi2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency:
+ minimum: 100000
+ maximum: 400000
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ i2c0: i2c@58780000 {
+ compatible = "socionext,uniphier-fi2c";
+ reg = <0x58780000 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 41 4>;
+ clocks = <&i2c_clk>;
+ clock-frequency = <100000>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/socionext,uniphier-i2c.yaml b/Documentation/devicetree/bindings/i2c/socionext,uniphier-i2c.yaml
new file mode 100644
index 000000000000..ef998def554e
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/socionext,uniphier-i2c.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/socionext,uniphier-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier I2C controller (FIFO-less)
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: socionext,uniphier-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency:
+ minimum: 100000
+ maximum: 400000
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ i2c0: i2c@58400000 {
+ compatible = "socionext,uniphier-i2c";
+ reg = <0x58400000 0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 41 1>;
+ clocks = <&i2c_clk>;
+ clock-frequency = <100000>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
index c602b6fe1c0c..d124eba1ce54 100644
--- a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
@@ -17,9 +17,13 @@ description: |
properties:
compatible:
- enum:
- - adi,adxl345
- - adi,adxl375
+ oneOf:
+ - items:
+ - const: adi,adxl346
+ - const: adi,adxl345
+ - enum:
+ - adi,adxl345
+ - adi,adxl375
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml b/Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
index c1c6d6f223cf..8723a336229e 100644
--- a/Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
@@ -36,6 +36,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index 84d25bd39488..d0913034b1d8 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -106,7 +106,6 @@ examples:
spi-cpha;
clocks = <&ad7192_mclk>;
clock-names = "mclk";
- #interrupt-cells = <2>;
interrupts = <25 0x2>;
interrupt-parent = <&gpio>;
dvdd-supply = <&dvdd>;
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
index 9acde6d2e2d9..a67ba67dab51 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
@@ -67,6 +67,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml b/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
index 91ab9c842273..77605f17901c 100644
--- a/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
@@ -53,6 +53,8 @@ required:
- dout-gpios
- avdd-supply
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
index 59009997dca0..118809a03279 100644
--- a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
@@ -32,6 +32,8 @@ required:
- vref-supply
- reg
+additionalProperties: false
+
examples:
- |
spi {
diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
index 0ce290473fb0..8ffeceb6abae 100644
--- a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
@@ -52,6 +52,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
spi {
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index acf36eef728b..b1627441a0b2 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -69,6 +69,8 @@ required:
- "#address-cells"
- "#size-cells"
+additionalProperties: false
+
patternProperties:
"^filter@[0-9]+$":
type: object
diff --git a/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
index 19e53930ebf6..1fe561574019 100644
--- a/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
+++ b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
@@ -38,6 +38,8 @@ required:
- compatible
- vcc-supply
+additionalProperties: false
+
examples:
- |
serial {
diff --git a/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml b/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml
index 50a50a0d7070..a93d1972a5c2 100644
--- a/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml
+++ b/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.yaml
@@ -24,6 +24,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
i2c {
diff --git a/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml b/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
index a285eaba7125..e51a585bd5a3 100644
--- a/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
@@ -34,6 +34,8 @@ required:
- reg
- vref-supply
+additionalProperties: false
+
examples:
- |
spi {
diff --git a/Documentation/devicetree/bindings/iio/light/adux1020.yaml b/Documentation/devicetree/bindings/iio/light/adux1020.yaml
index 69bd5c06319d..d7d14f2f1c20 100644
--- a/Documentation/devicetree/bindings/iio/light/adux1020.yaml
+++ b/Documentation/devicetree/bindings/iio/light/adux1020.yaml
@@ -28,6 +28,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/iio/light/bh1750.yaml b/Documentation/devicetree/bindings/iio/light/bh1750.yaml
index 1cc60d7ecfa0..1a88b3c253d5 100644
--- a/Documentation/devicetree/bindings/iio/light/bh1750.yaml
+++ b/Documentation/devicetree/bindings/iio/light/bh1750.yaml
@@ -28,6 +28,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
i2c {
diff --git a/Documentation/devicetree/bindings/iio/light/isl29018.yaml b/Documentation/devicetree/bindings/iio/light/isl29018.yaml
index cbb00be8f359..0ea278b07d1c 100644
--- a/Documentation/devicetree/bindings/iio/light/isl29018.yaml
+++ b/Documentation/devicetree/bindings/iio/light/isl29018.yaml
@@ -38,6 +38,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/iio/light/noa1305.yaml b/Documentation/devicetree/bindings/iio/light/noa1305.yaml
index 17e7f140b69b..fe7bfe1adbda 100644
--- a/Documentation/devicetree/bindings/iio/light/noa1305.yaml
+++ b/Documentation/devicetree/bindings/iio/light/noa1305.yaml
@@ -29,6 +29,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
i2c {
diff --git a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
index aae8a6d627c9..f92bf7b2b7f0 100644
--- a/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
+++ b/Documentation/devicetree/bindings/iio/light/stk33xx.yaml
@@ -30,6 +30,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2583.yaml b/Documentation/devicetree/bindings/iio/light/tsl2583.yaml
index e86ef64ecf03..7b92ba8cbb9f 100644
--- a/Documentation/devicetree/bindings/iio/light/tsl2583.yaml
+++ b/Documentation/devicetree/bindings/iio/light/tsl2583.yaml
@@ -32,6 +32,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
i2c {
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2772.yaml b/Documentation/devicetree/bindings/iio/light/tsl2772.yaml
index ed2c3d5eadf5..e8f7d1ada57b 100644
--- a/Documentation/devicetree/bindings/iio/light/tsl2772.yaml
+++ b/Documentation/devicetree/bindings/iio/light/tsl2772.yaml
@@ -62,6 +62,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/iio/light/veml6030.yaml b/Documentation/devicetree/bindings/iio/light/veml6030.yaml
index 0ff9b11f9d18..fb19a2d7a849 100644
--- a/Documentation/devicetree/bindings/iio/light/veml6030.yaml
+++ b/Documentation/devicetree/bindings/iio/light/veml6030.yaml
@@ -45,6 +45,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml b/Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml
index 9f5ca9c42025..64c18f1693f0 100644
--- a/Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml
+++ b/Documentation/devicetree/bindings/iio/pressure/asc,dlhl60d.yaml
@@ -33,6 +33,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml b/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
index 519137e5c170..49257f9251e8 100644
--- a/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
+++ b/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
@@ -25,6 +25,9 @@ properties:
- bosch,bmp280
- bosch,bme280
+ reg:
+ maxItems: 1
+
vddd-supply:
description:
digital voltage regulator (see regulator/regulator.txt)
@@ -49,6 +52,8 @@ required:
- vddd-supply
- vdda-supply
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml b/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml
index 8afbac24c34e..f86f8b23ef18 100644
--- a/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml
+++ b/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml
@@ -74,6 +74,8 @@ required:
- trig-gpios
- echo-gpios
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml b/Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml
index a079c9921af6..ada55f186f3c 100644
--- a/Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml
+++ b/Documentation/devicetree/bindings/iio/proximity/parallax-ping.yaml
@@ -42,6 +42,8 @@ required:
- compatible
- ping-gpios
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
index d4922f9f0376..8fb46de6641d 100644
--- a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
+++ b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
@@ -123,12 +123,11 @@ patternProperties:
sign.
allOf:
- $ref: /schemas/types.yaml#/definitions/uint64-matrix
+ minItems: 3
+ maxItems: 64
items:
- minItems: 3
- maxItems: 64
- items:
- minItems: 2
- maxItems: 2
+ minItems: 2
+ maxItems: 2
"^diode@":
type: object
@@ -328,12 +327,11 @@ patternProperties:
78 and 79.
allOf:
- $ref: /schemas/types.yaml#/definitions/uint64-matrix
+ minItems: 3
+ maxItems: 64
items:
- minItems: 3
- maxItems: 64
- items:
- minItems: 2
- maxItems: 2
+ minItems: 2
+ maxItems: 2
adi,custom-steinhart:
description:
@@ -398,6 +396,8 @@ required:
- reg
- interrupts
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
@@ -463,16 +463,16 @@ examples:
adi,sensor-type = <9>; //custom thermocouple
adi,single-ended;
adi,custom-thermocouple = /bits/ 64
- <(-50220000) 0
- (-30200000) 99100000
- (-5300000) 135400000
- 0 273150000
- 40200000 361200000
- 55300000 522100000
- 88300000 720300000
- 132200000 811200000
- 188700000 922500000
- 460400000 1000000000>; //10 pairs
+ <(-50220000) 0>,
+ <(-30200000) 99100000>,
+ <(-5300000) 135400000>,
+ <0 273150000>,
+ <40200000 361200000>,
+ <55300000 522100000>,
+ <88300000 720300000>,
+ <132200000 811200000>,
+ <188700000 922500000>,
+ <460400000 1000000000>; //10 pairs
};
};
diff --git a/Documentation/devicetree/bindings/input/gpio-vibrator.yaml b/Documentation/devicetree/bindings/input/gpio-vibrator.yaml
index b98bf9363c8f..2384465eaa19 100644
--- a/Documentation/devicetree/bindings/input/gpio-vibrator.yaml
+++ b/Documentation/devicetree/bindings/input/gpio-vibrator.yaml
@@ -26,6 +26,8 @@ required:
- compatible
- enable-gpios
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/input/max77650-onkey.yaml b/Documentation/devicetree/bindings/input/max77650-onkey.yaml
index 2f2e0b6ebbbd..3a2ad6ec64db 100644
--- a/Documentation/devicetree/bindings/input/max77650-onkey.yaml
+++ b/Documentation/devicetree/bindings/input/max77650-onkey.yaml
@@ -33,3 +33,6 @@ properties:
required:
- compatible
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt b/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt
deleted file mode 100644
index eb8e83736c02..000000000000
--- a/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-STMicroelectronics STPMIC1 Onkey
-
-Required properties:
-
-- compatible = "st,stpmic1-onkey";
-- interrupts: interrupt line to use
-- interrupt-names = "onkey-falling", "onkey-rising"
- onkey-falling: happens when onkey is pressed; IT_PONKEY_F of pmic
- onkey-rising: happens when onkey is released; IT_PONKEY_R of pmic
-
-Optional properties:
-
-- st,onkey-clear-cc-flag: onkey is able power on after an
- over-current shutdown event.
-- st,onkey-pu-inactive: onkey pull up is not active
-- power-off-time-sec: Duration in seconds which the key should be kept
- pressed for device to power off automatically (from 1 to 16 seconds).
- see See Documentation/devicetree/bindings/input/input.yaml
-
-Example:
-
-onkey {
- compatible = "st,stpmic1-onkey";
- interrupt-parent = <&pmic>;
- interrupts = <IT_PONKEY_F 0>,<IT_PONKEY_R 1>;
- interrupt-names = "onkey-falling", "onkey-rising";
- power-off-time-sec = <10>;
-};
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,bcm-voter.yaml b/Documentation/devicetree/bindings/interconnect/qcom,bcm-voter.yaml
new file mode 100644
index 000000000000..5971fc1df08d
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,bcm-voter.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,bcm-voter.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm BCM-Voter Interconnect
+
+maintainers:
+ - Georgi Djakov <georgi.djakov@linaro.org>
+
+description: |
+ The Bus Clock Manager (BCM) is a dedicated hardware accelerator that manages
+ shared system resources by aggregating requests from multiple Resource State
+ Coordinators (RSC). Interconnect providers are able to vote for aggregated
+ thresholds values from consumers by communicating through their respective
+ RSCs.
+
+properties:
+ compatible:
+ enum:
+ - qcom,bcm-voter
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ # Example 1: apps bcm_voter on SDM845 SoC should be defined inside &apps_rsc node
+ # as defined in Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt
+ - |
+
+ apps_bcm_voter: bcm_voter {
+ compatible = "qcom,bcm-voter";
+ };
+
+ # Example 2: disp bcm_voter on SDM845 should be defined inside &disp_rsc node
+ # as defined in Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt
+ - |
+
+ disp_bcm_voter: bcm_voter {
+ compatible = "qcom,bcm-voter";
+ };
+...
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
new file mode 100644
index 000000000000..91f70c9067d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,osm-l3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Operating State Manager (OSM) L3 Interconnect Provider
+
+maintainers:
+ - Sibi Sankar <sibis@codeaurora.org>
+
+description:
+ L3 cache bandwidth requirements on Qualcomm SoCs is serviced by the OSM.
+ The OSM L3 interconnect provider aggregates the L3 bandwidth requests
+ from CPU/GPU and relays it to the OSM.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sc7180-osm-l3
+ - qcom,sdm845-osm-l3
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: xo clock
+ - description: alternate clock
+
+ clock-names:
+ items:
+ - const: xo
+ - const: alternate
+
+ '#interconnect-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#interconnect-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+
+ #define GPLL0 165
+ #define RPMH_CXO_CLK 0
+
+ osm_l3: interconnect@17d41000 {
+ compatible = "qcom,sdm845-osm-l3";
+ reg = <0x17d41000 0x1400>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #interconnect-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml
new file mode 100644
index 000000000000..50f78f87f3fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,sc7180.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SC7180 Network-On-Chip Interconnect
+
+maintainers:
+ - Odelu Kukatla <okukatla@codeaurora.org>
+
+description: |
+ SC7180 interconnect providers support system bandwidth requirements through
+ RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
+ able to communicate with the BCM through the Resource State Coordinator (RSC)
+ associated with each execution environment. Provider nodes must point to at
+ least one RPMh device child node pertaining to their RSC and each provider
+ can map to multiple RPMh resources.
+
+properties:
+ reg:
+ maxItems: 1
+
+ compatible:
+ enum:
+ - qcom,sc7180-aggre1-noc
+ - qcom,sc7180-aggre2-noc
+ - qcom,sc7180-camnoc-virt
+ - qcom,sc7180-compute-noc
+ - qcom,sc7180-config-noc
+ - qcom,sc7180-dc-noc
+ - qcom,sc7180-gem-noc
+ - qcom,sc7180-ipa-virt
+ - qcom,sc7180-mc-virt
+ - qcom,sc7180-mmss-noc
+ - qcom,sc7180-npu-noc
+ - qcom,sc7180-qup-virt
+ - qcom,sc7180-system-noc
+
+ '#interconnect-cells':
+ const: 1
+
+ qcom,bcm-voters:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ List of phandles to qcom,bcm-voter nodes that are required by
+ this interconnect to send RPMh commands.
+
+ qcom,bcm-voter-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description: |
+ Names for each of the qcom,bcm-voters specified.
+
+required:
+ - compatible
+ - reg
+ - '#interconnect-cells'
+ - qcom,bcm-voters
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interconnect/qcom,sc7180.h>
+
+ config_noc: interconnect@1500000 {
+ compatible = "qcom,sc7180-config-noc";
+ reg = <0 0x01500000 0 0x28000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@1620000 {
+ compatible = "qcom,sc7180-system-noc";
+ reg = <0 0x01620000 0 0x17080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@1740000 {
+ compatible = "qcom,sc7180-mmss-noc";
+ reg = <0 0x01740000 0 0x1c100>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt
deleted file mode 100644
index 5c4f1d911630..000000000000
--- a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Qualcomm SDM845 Network-On-Chip interconnect driver binding
------------------------------------------------------------
-
-SDM845 interconnect providers support system bandwidth requirements through
-RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
-able to communicate with the BCM through the Resource State Coordinator (RSC)
-associated with each execution environment. Provider nodes must reside within
-an RPMh device node pertaining to their RSC and each provider maps to a single
-RPMh resource.
-
-Required properties :
-- compatible : shall contain only one of the following:
- "qcom,sdm845-rsc-hlos"
-- #interconnect-cells : should contain 1
-
-Examples:
-
-apps_rsc: rsc {
- rsc_hlos: interconnect {
- compatible = "qcom,sdm845-rsc-hlos";
- #interconnect-cells = <1>;
- };
-};
-
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml
new file mode 100644
index 000000000000..8b087e0b0b81
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,sdm845.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SDM845 Network-On-Chip Interconnect
+
+maintainers:
+ - Georgi Djakov <georgi.djakov@linaro.org>
+
+description: |
+ SDM845 interconnect providers support system bandwidth requirements through
+ RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
+ able to communicate with the BCM through the Resource State Coordinator (RSC)
+ associated with each execution environment. Provider nodes must point to at
+ least one RPMh device child node pertaining to their RSC and each provider
+ can map to multiple RPMh resources.
+
+properties:
+ reg:
+ maxItems: 1
+
+ compatible:
+ enum:
+ - qcom,sdm845-aggre1-noc
+ - qcom,sdm845-aggre2-noc
+ - qcom,sdm845-config-noc
+ - qcom,sdm845-dc-noc
+ - qcom,sdm845-gladiator-noc
+ - qcom,sdm845-mem-noc
+ - qcom,sdm845-mmss-noc
+ - qcom,sdm845-system-noc
+
+ '#interconnect-cells':
+ const: 1
+
+ qcom,bcm-voters:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ List of phandles to qcom,bcm-voter nodes that are required by
+ this interconnect to send RPMh commands.
+
+ qcom,bcm-voter-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description: |
+ Names for each of the qcom,bcm-voters specified.
+
+required:
+ - compatible
+ - reg
+ - '#interconnect-cells'
+ - qcom,bcm-voters
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interconnect/qcom,sdm845.h>
+
+ mem_noc: interconnect@1380000 {
+ compatible = "qcom,sdm845-mem-noc";
+ reg = <0 0x01380000 0 0x27200>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@1740000 {
+ compatible = "qcom,sdm845-mmss-noc";
+ reg = <0 0x01740000 0 0x1c1000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voter-names = "apps", "disp";
+ qcom,bcm-voters = <&apps_bcm_voter>, <&disp_bcm_voter>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
index 507c141ea760..ccc507f384d2 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
@@ -44,6 +44,8 @@ required:
- interrupt-controller
- '#interrupt-cells'
+additionalProperties: false
+
examples:
- |
intcon: interrupt-controller@c8003000 {
diff --git a/Documentation/devicetree/bindings/interrupt-controller/msi.txt b/Documentation/devicetree/bindings/interrupt-controller/msi.txt
index c60c034dcf19..c20b51df7138 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/msi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/msi.txt
@@ -98,7 +98,7 @@ Example
};
msi_c: msi-controller@c {
- reg = <0xb 0xf00>;
+ reg = <0xc 0xf00>;
compatible = "vendor-b,another-controller";
msi-controller;
/* Each device has some unique ID */
diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt
deleted file mode 100644
index 48e71d3ac2ad..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-UniPhier AIDET
-
-UniPhier AIDET (ARM Interrupt Detector) is an add-on block for ARM GIC (Generic
-Interrupt Controller). GIC itself can handle only high level and rising edge
-interrupts. The AIDET provides logic inverter to support low level and falling
-edge interrupts.
-
-Required properties:
-- compatible: Should be one of the following:
- "socionext,uniphier-ld4-aidet" - for LD4 SoC
- "socionext,uniphier-pro4-aidet" - for Pro4 SoC
- "socionext,uniphier-sld8-aidet" - for sLD8 SoC
- "socionext,uniphier-pro5-aidet" - for Pro5 SoC
- "socionext,uniphier-pxs2-aidet" - for PXs2/LD6b SoC
- "socionext,uniphier-ld11-aidet" - for LD11 SoC
- "socionext,uniphier-ld20-aidet" - for LD20 SoC
- "socionext,uniphier-pxs3-aidet" - for PXs3 SoC
-- reg: Specifies offset and length of the register set for the device.
-- interrupt-controller: Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an interrupt
- source. The value should be 2. The first cell defines the interrupt number
- (corresponds to the SPI interrupt number of GIC). The second cell specifies
- the trigger type as defined in interrupts.txt in this directory.
-
-Example:
-
- aidet: aidet@5fc20000 {
- compatible = "socionext,uniphier-pro4-aidet";
- reg = <0x5fc20000 0x200>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml
new file mode 100644
index 000000000000..f89ebde76dab
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/socionext,uniphier-aidet.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier AIDET
+
+description: |
+ UniPhier AIDET (ARM Interrupt Detector) is an add-on block for ARM GIC
+ (Generic Interrupt Controller). GIC itself can handle only high level and
+ rising edge interrupts. The AIDET provides logic inverter to support low
+ level and falling edge interrupts.
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - socionext,uniphier-ld4-aidet
+ - socionext,uniphier-pro4-aidet
+ - socionext,uniphier-sld8-aidet
+ - socionext,uniphier-pro5-aidet
+ - socionext,uniphier-pxs2-aidet
+ - socionext,uniphier-ld6b-aidet
+ - socionext,uniphier-ld11-aidet
+ - socionext,uniphier-ld20-aidet
+ - socionext,uniphier-pxs3-aidet
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description: |
+ The first cell defines the interrupt number (corresponds to the SPI
+ interrupt number of GIC). The second cell specifies the trigger type as
+ defined in interrupts.txt in this directory.
+ const: 2
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ interrupt-controller@5fc20000 {
+ compatible = "socionext,uniphier-pro4-aidet";
+ reg = <0x5fc20000 0x200>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
index 7cdd3aaa2ba4..0e33cd9e010e 100644
--- a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
@@ -80,6 +80,8 @@ required:
- clock-names
- "#iommu-cells"
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/exynos5250.h>
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index c60b994fe116..4c270fde4567 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -167,13 +167,13 @@ examples:
led-controller {
compatible = "gpio-leds";
- led0 {
+ led-0 {
function = LED_FUNCTION_STATUS;
linux,default-trigger = "heartbeat";
gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
};
- led1 {
+ led-1 {
function = LED_FUNCTION_USB;
gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
trigger-sources = <&ohci_port1>, <&ehci_port1>;
diff --git a/Documentation/devicetree/bindings/leds/leds-max77650.yaml b/Documentation/devicetree/bindings/leds/leds-max77650.yaml
index 8c43f1e1bf7d..c6f96cabd4d1 100644
--- a/Documentation/devicetree/bindings/leds/leds-max77650.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-max77650.yaml
@@ -49,3 +49,6 @@ required:
- compatible
- "#address-cells"
- "#size-cells"
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
index b50f4bcc98f1..90edf9d33b33 100644
--- a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
+++ b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
@@ -50,3 +50,6 @@ patternProperties:
required:
- compatible
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml b/Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml
new file mode 100644
index 000000000000..75d5d97305e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/allwinner,sun6i-a31-msgbox.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner sunxi Message Box
+
+maintainers:
+ - Samuel Holland <samuel@sholland.org>
+
+description: |
+ The hardware message box on sun6i, sun8i, sun9i, and sun50i SoCs is a
+ two-user mailbox controller containing 8 unidirectional FIFOs. An interrupt
+ is raised for received messages, but software must poll to know when a
+ transmitted message has been acknowledged by the remote user. Each FIFO can
+ hold four 32-bit messages; when a FIFO is full, clients must wait before
+ attempting more transmissions.
+
+ Refer to ./mailbox.txt for generic information about mailbox device-tree
+ bindings.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - allwinner,sun8i-a83t-msgbox
+ - allwinner,sun8i-h3-msgbox
+ - allwinner,sun9i-a80-msgbox
+ - allwinner,sun50i-a64-msgbox
+ - allwinner,sun50i-h6-msgbox
+ - const: allwinner,sun6i-a31-msgbox
+ - const: allwinner,sun6i-a31-msgbox
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description: bus clock
+
+ resets:
+ maxItems: 1
+ description: bus reset
+
+ interrupts:
+ maxItems: 1
+
+ '#mbox-cells':
+ const: 1
+ description: first cell is the channel number (0-7)
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - interrupts
+ - '#mbox-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun8i-h3-ccu.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/sun8i-h3-ccu.h>
+
+ msgbox: mailbox@1c17000 {
+ compatible = "allwinner,sun8i-h3-msgbox",
+ "allwinner,sun6i-a31-msgbox";
+ reg = <0x01c17000 0x1000>;
+ clocks = <&ccu CLK_BUS_MSGBOX>;
+ resets = <&ccu RST_BUS_MSGBOX>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml b/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
index 319280563648..aa2b3bf56b57 100644
--- a/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
+++ b/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
@@ -41,6 +41,8 @@ required:
- interrupts
- "#mbox-cells"
+additionalProperties: false
+
examples:
- |
mailbox@c883c404 {
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
index 9c43357c5924..31486c9f6443 100644
--- a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
@@ -23,6 +23,8 @@ Required properties:
be included together with SoC specific compatible.
There is a version 1.0 MU on imx7ulp, use "fsl,imx7ulp-mu"
compatible to support it.
+ To communicate with i.MX8 SCU, "fsl,imx8-mu-scu" could be
+ used for fast IPC
- reg : Should contain the registers location and length
- interrupts : Interrupt number. The interrupt specifier format depends
on the interrupt controller parent.
diff --git a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
index 7b13787ab13d..0b5b2a6bcc48 100644
--- a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
+++ b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
@@ -14,13 +14,11 @@ Required properties:
- interrupts: The interrupt signal from the GCE block
- clock: Clocks according to the common clock binding
- clock-names: Must be "gce" to stand for GCE clock
-- #mbox-cells: Should be 3.
- <&phandle channel priority atomic_exec>
+- #mbox-cells: Should be 2.
+ <&phandle channel priority>
phandle: Label name of a gce node.
channel: Channel of mailbox. Be equal to the thread id of GCE.
priority: Priority of GCE thread.
- atomic_exec: GCE processing continuous packets of commands in atomic
- way.
Required properties for a client device:
- mboxes: Client use mailbox to communicate with GCE, it should have this
@@ -54,8 +52,8 @@ Example for a client device:
mmsys: clock-controller@14000000 {
compatible = "mediatek,mt8173-mmsys";
- mboxes = <&gce 0 CMDQ_THR_PRIO_LOWEST 1>,
- <&gce 1 CMDQ_THR_PRIO_LOWEST 1>;
+ mboxes = <&gce 0 CMDQ_THR_PRIO_LOWEST>,
+ <&gce 1 CMDQ_THR_PRIO_LOWEST>;
mutex-event-eof = <CMDQ_EVENT_MUTEX0_STREAM_EOF
CMDQ_EVENT_MUTEX1_STREAM_EOF>;
mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x3000 0x1000>,
diff --git a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
index 335717e15970..37d77e065491 100644
--- a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
@@ -124,6 +124,8 @@ required:
- amlogic,ao-sysctrl
- amlogic,canvas
+additionalProperties: false
+
examples:
- |
vdec: video-decoder@c8820000 {
diff --git a/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml b/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
index 41197578f19a..95ffa8bc0533 100644
--- a/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
@@ -24,6 +24,12 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ maxItems: 1
+
interrupts:
maxItems: 1
@@ -47,7 +53,6 @@ allOf:
- description: AO-CEC clock
clock-names:
- maxItems: 1
items:
- const: core
@@ -66,7 +71,6 @@ allOf:
- description: AO-CEC clock generator source
clock-names:
- maxItems: 1
items:
- const: oscin
@@ -78,6 +82,8 @@ required:
- clocks
- clock-names
+additionalProperties: false
+
examples:
- |
cec_AO: cec@100 {
@@ -88,4 +94,3 @@ examples:
clock-names = "core";
hdmi-phandle = <&hdmi_tx>;
};
-
diff --git a/Documentation/devicetree/bindings/media/renesas,ceu.yaml b/Documentation/devicetree/bindings/media/renesas,ceu.yaml
index 8e9251a0f9ef..fcb5f13704a5 100644
--- a/Documentation/devicetree/bindings/media/renesas,ceu.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,ceu.yaml
@@ -59,6 +59,8 @@ required:
- interrupts
- port
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.txt b/Documentation/devicetree/bindings/media/renesas,vin.txt
deleted file mode 100644
index 5eefd62ac5c5..000000000000
--- a/Documentation/devicetree/bindings/media/renesas,vin.txt
+++ /dev/null
@@ -1,217 +0,0 @@
-Renesas R-Car Video Input driver (rcar_vin)
--------------------------------------------
-
-The rcar_vin device provides video input capabilities for the Renesas R-Car
-family of devices.
-
-Each VIN instance has a single parallel input that supports RGB and YUV video,
-with both external synchronization and BT.656 synchronization for the latter.
-Depending on the instance the VIN input is connected to external SoC pins, or
-on Gen3 and RZ/G2 platforms to a CSI-2 receiver.
-
- - compatible: Must be one or more of the following
- - "renesas,vin-r8a7743" for the R8A7743 device
- - "renesas,vin-r8a7744" for the R8A7744 device
- - "renesas,vin-r8a7745" for the R8A7745 device
- - "renesas,vin-r8a77470" for the R8A77470 device
- - "renesas,vin-r8a774a1" for the R8A774A1 device
- - "renesas,vin-r8a774b1" for the R8A774B1 device
- - "renesas,vin-r8a774c0" for the R8A774C0 device
- - "renesas,vin-r8a7778" for the R8A7778 device
- - "renesas,vin-r8a7779" for the R8A7779 device
- - "renesas,vin-r8a7790" for the R8A7790 device
- - "renesas,vin-r8a7791" for the R8A7791 device
- - "renesas,vin-r8a7792" for the R8A7792 device
- - "renesas,vin-r8a7793" for the R8A7793 device
- - "renesas,vin-r8a7794" for the R8A7794 device
- - "renesas,vin-r8a7795" for the R8A7795 device
- - "renesas,vin-r8a7796" for the R8A7796 device
- - "renesas,vin-r8a77965" for the R8A77965 device
- - "renesas,vin-r8a77970" for the R8A77970 device
- - "renesas,vin-r8a77980" for the R8A77980 device
- - "renesas,vin-r8a77990" for the R8A77990 device
- - "renesas,vin-r8a77995" for the R8A77995 device
- - "renesas,rcar-gen2-vin" for a generic R-Car Gen2 or RZ/G1 compatible
- device.
-
- When compatible with the generic version nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
-
- - reg: the register base and size for the device registers
- - interrupts: the interrupt for the device
- - clocks: Reference to the parent clock
-
-The per-board settings for Gen2 and RZ/G1 platforms:
-
-- port - sub-node describing a single endpoint connected to the VIN
- from external SoC pins as described in video-interfaces.txt[1].
- Only the first one will be considered as each vin interface has one
- input port.
-
- - Optional properties for endpoint nodes:
- - hsync-active: see [1] for description. Default is active high.
- - vsync-active: see [1] for description. Default is active high.
- If both HSYNC and VSYNC polarities are not specified, embedded
- synchronization is selected.
- - field-active-even: see [1] for description. Default is active high.
- - bus-width: see [1] for description. The selected bus width depends on
- the SoC type and selected input image format.
- Valid values are: 8, 10, 12, 16, 24 and 32.
- - data-shift: see [1] for description. Valid values are 0 and 8.
- - data-enable-active: polarity of CLKENB signal, see [1] for
- description. Default is active high.
-
-The per-board settings for Gen3 and RZ/G2 platforms:
-
-Gen3 and RZ/G2 platforms can support both a single connected parallel input
-source from external SoC pins (port@0) and/or multiple parallel input sources
-from local SoC CSI-2 receivers (port@1) depending on SoC.
-
-- renesas,id - ID number of the VIN, VINx in the documentation.
-- ports
- - port@0 - sub-node describing a single endpoint connected to the VIN
- from external SoC pins as described in video-interfaces.txt[1].
- Describing more than one endpoint in port@0 is invalid. Only VIN
- instances that are connected to external pins should have port@0.
-
- Endpoint nodes of port@0 support the optional properties listed in
- the Gen2 per-board settings description.
-
- - port@1 - sub-nodes describing one or more endpoints connected to
- the VIN from local SoC CSI-2 receivers. The endpoint numbers must
- use the following schema.
-
- - endpoint@0 - sub-node describing the endpoint connected to CSI20
- - endpoint@1 - sub-node describing the endpoint connected to CSI21
- - endpoint@2 - sub-node describing the endpoint connected to CSI40
- - endpoint@3 - sub-node describing the endpoint connected to CSI41
-
- Endpoint nodes of port@1 do not support any optional endpoint property.
-
-Device node example for Gen2 platforms
---------------------------------------
-
- aliases {
- vin0 = &vin0;
- };
-
- vin0: vin@e6ef0000 {
- compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin";
- clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
- reg = <0 0xe6ef0000 0 0x1000>;
- interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- };
-
-Board setup example for Gen2 platforms (vin1 composite video input)
--------------------------------------------------------------------
-
-&i2c2 {
- status = "okay";
- pinctrl-0 = <&i2c2_pins>;
- pinctrl-names = "default";
-
- adv7180@20 {
- compatible = "adi,adv7180";
- reg = <0x20>;
- remote = <&vin1>;
-
- port {
- adv7180: endpoint {
- bus-width = <8>;
- remote-endpoint = <&vin1ep0>;
- };
- };
- };
-};
-
-/* composite video input */
-&vin1 {
- pinctrl-0 = <&vin1_pins>;
- pinctrl-names = "default";
-
- status = "okay";
-
- port {
- vin1ep0: endpoint {
- remote-endpoint = <&adv7180>;
- bus-width = <8>;
- };
- };
-};
-
-Device node example for Gen3 platforms
---------------------------------------
-
- vin0: video@e6ef0000 {
- compatible = "renesas,vin-r8a7795";
- reg = <0 0xe6ef0000 0 0x1000>;
- interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 811>;
- power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
- resets = <&cpg 811>;
- renesas,id = <0>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- reg = <1>;
-
- vin0csi20: endpoint@0 {
- reg = <0>;
- remote-endpoint= <&csi20vin0>;
- };
- vin0csi21: endpoint@1 {
- reg = <1>;
- remote-endpoint= <&csi21vin0>;
- };
- vin0csi40: endpoint@2 {
- reg = <2>;
- remote-endpoint= <&csi40vin0>;
- };
- };
- };
- };
-
- csi20: csi2@fea80000 {
- compatible = "renesas,r8a7795-csi2";
- reg = <0 0xfea80000 0 0x10000>;
- interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 714>;
- power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
- resets = <&cpg 714>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- csi20_in: endpoint {
- clock-lanes = <0>;
- data-lanes = <1>;
- remote-endpoint = <&adv7482_txb>;
- };
- };
-
- port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- reg = <1>;
-
- csi20vin0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vin0csi20>;
- };
- };
- };
- };
-
-[1] video-interfaces.txt common video media interface
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
new file mode 100644
index 000000000000..1ec947b4781f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
@@ -0,0 +1,402 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2020 Renesas Electronics Corp.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/renesas,vin.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car Video Input (VIN)
+
+maintainers:
+ - Niklas Söderlund <niklas.soderlund@ragnatech.se>
+
+description:
+ The R-Car Video Input (VIN) device provides video input capabilities for the
+ Renesas R-Car family of devices.
+
+ Each VIN instance has a single parallel input that supports RGB and YUV video,
+ with both external synchronization and BT.656 synchronization for the latter.
+ Depending on the instance the VIN input is connected to external SoC pins, or
+ on Gen3 and RZ/G2 platforms to a CSI-2 receiver.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,vin-r8a7743 # RZ/G1M
+ - renesas,vin-r8a7744 # RZ/G1N
+ - renesas,vin-r8a7745 # RZ/G1E
+ - renesas,vin-r8a77470 # RZ/G1C
+ - renesas,vin-r8a7790 # R-Car H2
+ - renesas,vin-r8a7791 # R-Car M2-W
+ - renesas,vin-r8a7792 # R-Car V2H
+ - renesas,vin-r8a7793 # R-Car M2-N
+ - renesas,vin-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-vin # Generic R-Car Gen2 or RZ/G1
+
+ - items:
+ - enum:
+ - renesas,vin-r8a774a1 # RZ/G2M
+ - renesas,vin-r8a774b1 # RZ/G2N
+ - renesas,vin-r8a774c0 # RZ/G2E
+ - renesas,vin-r8a7778 # R-Car M1
+ - renesas,vin-r8a7779 # R-Car H1
+ - renesas,vin-r8a7795 # R-Car H3
+ - renesas,vin-r8a7796 # R-Car M3-W
+ - renesas,vin-r8a77965 # R-Car M3-N
+ - renesas,vin-r8a77970 # R-Car V3M
+ - renesas,vin-r8a77980 # R-Car V3H
+ - renesas,vin-r8a77990 # R-Car E3
+ - renesas,vin-r8a77995 # R-Car D3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ #The per-board settings for Gen2 and RZ/G1 platforms:
+ port:
+ type: object
+ description:
+ A node containing a parallel input with a single endpoint definitions as
+ documented in
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+
+ properties:
+ endpoint:
+ type: object
+
+ properties:
+ hsync-active:
+ description:
+ If both HSYNC and VSYNC polarities are not specified, embedded
+ synchronization is selected.
+ default: 1
+
+ vsync-active:
+ description:
+ If both HSYNC and VSYNC polarities are not specified, embedded
+ synchronization is selected.
+ default: 1
+
+ field-active-even: true
+
+ bus-width: true
+
+ data-shift: true
+
+ data-enable-active:
+ description: Polarity of CLKENB signal
+ default: 1
+
+ pclk-sample: true
+
+ data-active: true
+
+ remote-endpoint: true
+
+ required:
+ - remote-endpoint
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+ #The per-board settings for Gen3 and RZ/G2 platforms:
+ renesas,id:
+ description: VIN channel number
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ - maximum: 15
+
+ ports:
+ type: object
+ description:
+ A node containing input nodes with endpoint definitions as documented in
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+
+ properties:
+ port@0:
+ type: object
+ description:
+ Input port node, single endpoint describing a parallel input source.
+
+ properties:
+ reg:
+ const: 0
+
+ endpoint:
+ type: object
+
+ properties:
+ hsync-active:
+ description:
+ If both HSYNC and VSYNC polarities are not specified, embedded
+ synchronization is selected.
+ default: 1
+
+ vsync-active:
+ description:
+ If both HSYNC and VSYNC polarities are not specified, embedded
+ synchronization is selected.
+ default: 1
+
+ field-active-even: true
+
+ bus-width: true
+
+ data-shift: true
+
+ data-enable-active:
+ description: Polarity of CLKENB signal
+ default: 1
+
+ pclk-sample: true
+
+ data-active: true
+
+ remote-endpoint: true
+
+ required:
+ - remote-endpoint
+
+ additionalProperties: false
+
+ required:
+ - endpoint
+
+ additionalProperties: false
+
+ port@1:
+ type: object
+ description:
+ Input port node, multiple endpoints describing all the R-Car CSI-2
+ modules connected the VIN.
+
+ properties:
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ reg:
+ const: 1
+
+ endpoint@0:
+ type: object
+ description: Endpoint connected to CSI20.
+
+ properties:
+ reg:
+ const: 0
+
+ remote-endpoint: true
+
+ required:
+ - reg
+ - remote-endpoint
+
+ additionalProperties: false
+
+ endpoint@1:
+ type: object
+ description: Endpoint connected to CSI21.
+
+ properties:
+ reg:
+ const: 1
+
+ remote-endpoint: true
+
+ required:
+ - reg
+ - remote-endpoint
+
+ additionalProperties: false
+
+ endpoint@2:
+ type: object
+ description: Endpoint connected to CSI40.
+
+ properties:
+ reg:
+ const: 2
+
+ remote-endpoint: true
+
+ required:
+ - reg
+ - remote-endpoint
+
+ additionalProperties: false
+
+ endpoint@3:
+ type: object
+ description: Endpoint connected to CSI41.
+
+ properties:
+ reg:
+ const: 3
+
+ remote-endpoint: true
+
+ required:
+ - reg
+ - remote-endpoint
+
+ additionalProperties: false
+
+ anyOf:
+ - required:
+ - endpoint@0
+ - required:
+ - endpoint@1
+ - required:
+ - endpoint@2
+ - required:
+ - endpoint@3
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+ - resets
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,vin-r8a7778
+ - renesas,vin-r8a7779
+ - renesas,rcar-gen2-vin
+then:
+ required:
+ - port
+else:
+ required:
+ - renesas,id
+ - ports
+
+additionalProperties: false
+
+examples:
+ # Device node example for Gen2 platform
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ vin1: vin@e6ef1000 {
+ compatible = "renesas,vin-r8a7790",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef1000 0 0x1000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 810>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 810>;
+
+ port {
+ vin1ep0: endpoint {
+ remote-endpoint = <&adv7180>;
+ bus-width = <8>;
+ };
+ };
+ };
+
+ # Device node example for Gen3 platform with only CSI-2
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+
+ vin0: video@e6ef0000 {
+ compatible = "renesas,vin-r8a7795";
+ reg = <0 0xe6ef0000 0 0x1000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 811>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 811>;
+ renesas,id = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin0csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint= <&csi20vin0>;
+ };
+ vin0csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint= <&csi40vin0>;
+ };
+ };
+ };
+ };
+
+ # Device node example for Gen3 platform with CSI-2 and parallel
+ - |
+ #include <dt-bindings/clock/r8a77970-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a77970-sysc.h>
+
+ vin2: video@e6ef2000 {
+ compatible = "renesas,vin-r8a77970";
+ reg = <0 0xe6ef2000 0 0x1000>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 809>;
+ power-domains = <&sysc R8A77970_PD_ALWAYS_ON>;
+ resets = <&cpg 809>;
+ renesas,id = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ vin2_in: endpoint {
+ remote-endpoint = <&adv7612_out>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin2csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin2>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/max77650.yaml b/Documentation/devicetree/bindings/mfd/max77650.yaml
index 480385789394..b0a0f0d3d9d4 100644
--- a/Documentation/devicetree/bindings/mfd/max77650.yaml
+++ b/Documentation/devicetree/bindings/mfd/max77650.yaml
@@ -73,6 +73,8 @@ required:
- gpio-controller
- "#gpio-cells"
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml
index 4fbb9e734284..3a6a1a26e2b3 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml
@@ -41,6 +41,9 @@ properties:
"#clock-cells":
const: 0
+ clock-output-names:
+ const: bd71828-32k-out
+
rohm,charger-sense-resistor-ohms:
minimum: 10000000
maximum: 50000000
@@ -74,6 +77,8 @@ required:
- gpio-controller
- "#gpio-cells"
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/mfd/st,stpmic1.txt b/Documentation/devicetree/bindings/mfd/st,stpmic1.txt
deleted file mode 100644
index afd45c089585..000000000000
--- a/Documentation/devicetree/bindings/mfd/st,stpmic1.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-* STMicroelectronics STPMIC1 Power Management IC
-
-Required properties:
-- compatible: : "st,stpmic1"
-- reg: : The I2C slave address for the STPMIC1 chip.
-- interrupts: : The interrupt line the device is connected to.
-- #interrupt-cells: : Should be 1.
-- interrupt-controller: : Marks the device node as an interrupt controller.
- Interrupt numbers are defined at
- dt-bindings/mfd/st,stpmic1.h.
-
-STPMIC1 consists in a varied group of sub-devices.
-Each sub-device binding is be described in own documentation file.
-
-Device Description
------- ------------
-st,stpmic1-onkey : Power on key, see ../input/st,stpmic1-onkey.txt
-st,stpmic1-regulators : Regulators, see ../regulator/st,stpmic1-regulator.txt
-st,stpmic1-wdt : Watchdog, see ../watchdog/st,stpmic1-wdt.txt
-
-Example:
-
-#include <dt-bindings/mfd/st,stpmic1.h>
-
-pmic: pmic@33 {
- compatible = "st,stpmic1";
- reg = <0x33>;
- interrupt-parent = <&gpioa>;
- interrupts = <0 2>;
-
- interrupt-controller;
- #interrupt-cells = <2>;
-
- onkey {
- compatible = "st,stpmic1-onkey";
- interrupts = <IT_PONKEY_F 0>,<IT_PONKEY_R 1>;
- interrupt-names = "onkey-falling", "onkey-rising";
- power-off-time-sec = <10>;
- };
-
- watchdog {
- compatible = "st,stpmic1-wdt";
- };
-
- regulators {
- compatible = "st,stpmic1-regulators";
-
- vdd_core: buck1 {
- regulator-name = "vdd_core";
- regulator-boot-on;
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1200000>;
- };
- vdd: buck3 {
- regulator-name = "vdd";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- regulator-pull-down;
- };
- };
diff --git a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
new file mode 100644
index 000000000000..d9ad9260e348
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
@@ -0,0 +1,339 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/st,stpmic1.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectonics STPMIC1 Power Management IC bindings
+
+description: STMicroelectronics STPMIC1 Power Management IC
+
+maintainers:
+ - pascal Paillet <p.paillet@st.com>
+
+properties:
+ compatible:
+ const: st,stpmic1
+
+ reg:
+ const: 0x33
+
+ interrupts:
+ maxItems: 1
+
+ "#interrupt-cells":
+ const: 2
+
+ interrupt-controller: true
+
+ onkey:
+ type: object
+
+ allOf:
+ - $ref: ../input/input.yaml
+
+ properties:
+ compatible:
+ const: st,stpmic1-onkey
+
+ interrupts:
+ items:
+ - description: onkey-falling, happens when onkey is pressed. IT_PONKEY_F of pmic
+ - description: onkey-rising, happens when onkey is released. IT_PONKEY_R of pmic
+
+ interrupt-names:
+ items:
+ - const: onkey-falling
+ - const: onkey-rising
+
+ st,onkey-clear-cc-flag:
+ description: onkey is able power on after an over-current shutdown event.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ st,onkey-pu-inactive:
+ description: onkey pull up is not active
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ power-off-time-sec:
+ minimum: 1
+ maximum: 16
+
+ required:
+ - compatible
+ - interrupts
+ - interrupt-names
+
+ additionalProperties: false
+
+ watchdog:
+ type: object
+
+ allOf:
+ - $ref: ../watchdog/watchdog.yaml
+
+ properties:
+ compatible:
+ const: st,stpmic1-wdt
+
+ timeout-sec: true
+
+ required:
+ - compatible
+
+ additionalProperties: false
+
+ regulators:
+ type: object
+
+ description: |
+ Available Regulators in STPMIC1 device are:
+ - buck1 for Buck BUCK1
+ - buck2 for Buck BUCK2
+ - buck3 for Buck BUCK3
+ - buck4 for Buck BUCK4
+ - ldo1 for LDO LDO1
+ - ldo2 for LDO LDO2
+ - ldo3 for LDO LDO3
+ - ldo4 for LDO LDO4
+ - ldo5 for LDO LDO5
+ - ldo6 for LDO LDO6
+ - vref_ddr for LDO Vref DDR
+ - boost for Buck BOOST
+ - pwr_sw1 for VBUS_OTG switch
+ - pwr_sw2 for SW_OUT switch
+ Switches are fixed voltage regulators with only enable/disable capability.
+
+ properties:
+ compatible:
+ const: st,stpmic1-regulators
+
+ ldo3:
+ type: object
+
+ properties:
+ interrupts:
+ maxItems: 1
+
+ st,mask-reset:
+ description: mask reset for this regulator,
+ the regulator configuration is maintained during pmic reset.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ regulator-name: true
+ regulator-boot-on: true
+ regulator-always-on: true
+ regulator-min-microvolt: true
+ regulator-max-microvolt: true
+ regulator-allow-bypass: true
+ regulator-over-current-protection: true
+
+ additionalProperties: false
+
+ ldo4:
+ type: object
+
+ properties:
+ interrupts:
+ maxItems: 1
+
+ st,mask-reset:
+ description: mask reset for this regulator,
+ the regulator configuration is maintained during pmic reset.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ regulator-name: true
+ regulator-boot-on: true
+ regulator-always-on: true
+ regulator-over-current-protection: true
+
+ additionalProperties: false
+
+ vref_ddr:
+ type: object
+
+ properties:
+ interrupts:
+ maxItems: 1
+
+ st,mask-reset:
+ description: mask reset for this regulator,
+ the regulator configuration is maintained during pmic reset.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ regulator-name: true
+ regulator-boot-on: true
+ regulator-always-on: true
+
+ additionalProperties: false
+
+ boost:
+ type: object
+
+ properties:
+ interrupts:
+ maxItems: 1
+
+ st,mask-reset:
+ description: mask reset for this regulator,
+ the regulator configuration is maintained during pmic reset.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ regulator-name: true
+ regulator-boot-on: true
+ regulator-always-on: true
+ regulator-over-current-protection: true
+
+ additionalProperties: false
+
+ patternProperties:
+ "^(buck[1-4]|ldo[1-6]|boost|pwr_sw[1-2])-supply$":
+ description: STPMIC1 voltage regulators supplies
+
+ "^(buck[1-4]|ldo[1-6]|boost|vref_ddr|pwr_sw[1-2])$":
+ allOf:
+ - $ref: ../regulator/regulator.yaml
+
+ "^ldo[1-2,5-6]$":
+ type: object
+
+ properties:
+ interrupts:
+ maxItems: 1
+
+ st,mask-reset:
+ description: mask reset for this regulator,
+ the regulator configuration is maintained during pmic reset.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ regulator-name: true
+ regulator-boot-on: true
+ regulator-always-on: true
+ regulator-min-microvolt: true
+ regulator-max-microvolt: true
+ regulator-over-current-protection: true
+ regulator-enable-ramp-delay: true
+
+ additionalProperties: false
+
+ "^buck[1-4]$":
+ type: object
+
+ properties:
+ interrupts:
+ maxItems: 1
+
+ st,mask-reset:
+ description: mask reset for this regulator,
+ the regulator configuration is maintained during pmic reset.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ regulator-name: true
+ regulator-boot-on: true
+ regulator-always-on: true
+ regulator-min-microvolt: true
+ regulator-max-microvolt: true
+ regulator-initial-mode: true
+ regulator-pull-down: true
+ regulator-over-current-protection: true
+ regulator-enable-ramp-delay: true
+
+ additionalProperties: false
+
+ "^pwr_sw[1-2]$":
+ type: object
+
+ properties:
+ interrupts:
+ maxItems: 1
+
+ regulator-name: true
+ regulator-boot-on: true
+ regulator-always-on: true
+ regulator-over-current-protection: true
+ regulator-active-discharge: true
+
+ additionalProperties: false
+
+ required:
+ - compatible
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#interrupt-cells"
+ - interrupt-controller
+
+examples:
+ - |
+ #include <dt-bindings/mfd/st,stpmic1.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupt-parent = <&gpioa>;
+ interrupts = <0 2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>,<IT_PONKEY_R 1>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ power-off-time-sec = <10>;
+ };
+
+ watchdog {
+ compatible = "st,stpmic1-wdt";
+ };
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+
+ ldo6-supply = <&v3v3>;
+
+ buck1 {
+ regulator-name = "vdd_core";
+ interrupts = <IT_CURLIM_BUCK1 0>;
+ st,mask-reset;
+ regulator-boot-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-pull-down;
+ };
+
+ buck4 {
+ regulator-name = "v3v3";
+ interrupts = <IT_CURLIM_BUCK4 0>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo6 {
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-over-current-protection;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml b/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml
index 0ea21a6f70b4..38ab0499102d 100644
--- a/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml
+++ b/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml
@@ -38,6 +38,8 @@ required:
- reg
- interrupts
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml b/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml
new file mode 100644
index 000000000000..2f45dd0d04db
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml
@@ -0,0 +1,143 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/cdns,sdhci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence SD/SDIO/eMMC Host Controller (SD4HC)
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+ - Piotr Sroka <piotrs@cadence.com>
+
+allOf:
+ - $ref: mmc-controller.yaml
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - socionext,uniphier-sd4hc
+ - const: cdns,sd4hc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ # PHY DLL input delays:
+ # They are used to delay the data valid window, and align the window to
+ # sampling clock. The delay starts from 5ns (for delay parameter equal to 0)
+ # and it is increased by 2.5ns in each step.
+
+ cdns,phy-input-delay-sd-highspeed:
+ description: Value of the delay in the input path for SD high-speed timing
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x1f
+
+ cdns,phy-input-delay-legacy:
+ description: Value of the delay in the input path for legacy timing
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x1f
+
+ cdns,phy-input-delay-sd-uhs-sdr12:
+ description: Value of the delay in the input path for SD UHS SDR12 timing
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x1f
+
+ cdns,phy-input-delay-sd-uhs-sdr25:
+ description: Value of the delay in the input path for SD UHS SDR25 timing
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x1f
+
+ cdns,phy-input-delay-sd-uhs-sdr50:
+ description: Value of the delay in the input path for SD UHS SDR50 timing
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x1f
+
+ cdns,phy-input-delay-sd-uhs-ddr50:
+ description: Value of the delay in the input path for SD UHS DDR50 timing
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x1f
+
+ cdns,phy-input-delay-mmc-highspeed:
+ description: Value of the delay in the input path for MMC high-speed timing
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x1f
+
+ cdns,phy-input-delay-mmc-ddr:
+ description: Value of the delay in the input path for eMMC high-speed DDR timing
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x1f
+
+ # PHY DLL clock delays:
+ # Each delay property represents the fraction of the clock period.
+ # The approximate delay value will be
+ # (<delay property value>/128)*sdmclk_clock_period.
+
+ cdns,phy-dll-delay-sdclk:
+ description: |
+ Value of the delay introduced on the sdclk output for all modes except
+ HS200, HS400 and HS400_ES.
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x7f
+
+ cdns,phy-dll-delay-sdclk-hsmmc:
+ description: |
+ Value of the delay introduced on the sdclk output for HS200, HS400 and
+ HS400_ES speed modes.
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x7f
+
+ cdns,phy-dll-delay-strobe:
+ description: |
+ Value of the delay introduced on the dat_strobe input used in
+ HS400 / HS400_ES speed modes.
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ - maximum: 0x7f
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ emmc: mmc@5a000000 {
+ compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
+ reg = <0x5a000000 0x400>;
+ interrupts = <0 78 4>;
+ clocks = <&clk 4>;
+ bus-width = <8>;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ cdns,phy-dll-delay-sdclk = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt b/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
deleted file mode 100644
index fa423c277853..000000000000
--- a/Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-* Cadence SD/SDIO/eMMC Host Controller
-
-Required properties:
-- compatible: should be one of the following:
- "cdns,sd4hc" - default of the IP
- "socionext,uniphier-sd4hc" - for Socionext UniPhier SoCs
-- reg: offset and length of the register set for the device.
-- interrupts: a single interrupt specifier.
-- clocks: phandle to the input clock.
-
-Optional properties:
-For eMMC configuration, supported speed modes are not indicated by the SDHCI
-Capabilities Register. Instead, the following properties should be specified
-if supported. See mmc.txt for details.
-- mmc-ddr-1_8v
-- mmc-ddr-1_2v
-- mmc-hs200-1_8v
-- mmc-hs200-1_2v
-- mmc-hs400-1_8v
-- mmc-hs400-1_2v
-
-Some PHY delays can be configured by following properties.
-PHY DLL input delays:
-They are used to delay the data valid window, and align the window
-to sampling clock. The delay starts from 5ns (for delay parameter equal to 0)
-and it is increased by 2.5ns in each step.
-- cdns,phy-input-delay-sd-highspeed:
- Value of the delay in the input path for SD high-speed timing
- Valid range = [0:0x1F].
-- cdns,phy-input-delay-legacy:
- Value of the delay in the input path for legacy timing
- Valid range = [0:0x1F].
-- cdns,phy-input-delay-sd-uhs-sdr12:
- Value of the delay in the input path for SD UHS SDR12 timing
- Valid range = [0:0x1F].
-- cdns,phy-input-delay-sd-uhs-sdr25:
- Value of the delay in the input path for SD UHS SDR25 timing
- Valid range = [0:0x1F].
-- cdns,phy-input-delay-sd-uhs-sdr50:
- Value of the delay in the input path for SD UHS SDR50 timing
- Valid range = [0:0x1F].
-- cdns,phy-input-delay-sd-uhs-ddr50:
- Value of the delay in the input path for SD UHS DDR50 timing
- Valid range = [0:0x1F].
-- cdns,phy-input-delay-mmc-highspeed:
- Value of the delay in the input path for MMC high-speed timing
- Valid range = [0:0x1F].
-- cdns,phy-input-delay-mmc-ddr:
- Value of the delay in the input path for eMMC high-speed DDR timing
- Valid range = [0:0x1F].
-
-PHY DLL clock delays:
-Each delay property represents the fraction of the clock period.
-The approximate delay value will be
-(<delay property value>/128)*sdmclk_clock_period.
-- cdns,phy-dll-delay-sdclk:
- Value of the delay introduced on the sdclk output
- for all modes except HS200, HS400 and HS400_ES.
- Valid range = [0:0x7F].
-- cdns,phy-dll-delay-sdclk-hsmmc:
- Value of the delay introduced on the sdclk output
- for HS200, HS400 and HS400_ES speed modes.
- Valid range = [0:0x7F].
-- cdns,phy-dll-delay-strobe:
- Value of the delay introduced on the dat_strobe input
- used in HS400 / HS400_ES speed modes.
- Valid range = [0:0x7F].
-
-Example:
- emmc: sdhci@5a000000 {
- compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
- reg = <0x5a000000 0x400>;
- interrupts = <0 78 4>;
- clocks = <&clk 4>;
- bus-width = <8>;
- mmc-ddr-1_8v;
- mmc-hs200-1_8v;
- mmc-hs400-1_8v;
- cdns,phy-dll-delay-sdclk = <0>;
- };
diff --git a/Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml b/Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml
new file mode 100644
index 000000000000..cdfac9b4411b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/socionext,uniphier-sd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier SD/SDIO/eMMC controller
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ compatible:
+ description: version 2.91, 3.1, 3.1.1, respectively
+ enum:
+ - socionext,uniphier-sd-v2.91
+ - socionext,uniphier-sd-v3.1
+ - socionext,uniphier-sd-v3.1.1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ reset-names:
+ description: |
+ There are three reset signals at maximum
+ host: mandatory for all variants
+ bridge: exist only for version 2.91
+ hw: optional. exist if eMMC hw reset line is available
+ oneOf:
+ - const: host
+ - items:
+ - const: host
+ - const: bridge
+ - items:
+ - const: host
+ - const: hw
+ - items:
+ - const: host
+ - const: bridge
+ - const: hw
+
+ resets:
+ minItems: 1
+ maxItems: 3
+
+allOf:
+ - $ref: mmc-controller.yaml
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-sd-v2.91
+ then:
+ properties:
+ reset-names:
+ contains:
+ const: bridge
+ else:
+ properties:
+ reset-names:
+ not:
+ contains:
+ const: bridge
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - reset-names
+ - resets
+
+examples:
+ - |
+ sd: mmc@5a400000 {
+ compatible = "socionext,uniphier-sd-v2.91";
+ reg = <0x5a400000 0x200>;
+ interrupts = <0 76 4>;
+ pinctrl-names = "default", "uhs";
+ pinctrl-0 = <&pinctrl_sd>;
+ pinctrl-1 = <&pinctrl_sd_uhs>;
+ clocks = <&mio_clk 0>;
+ reset-names = "host", "bridge";
+ resets = <&mio_rst 0>, <&mio_rst 3>;
+ dma-names = "rx-tx";
+ dmas = <&dmac 4>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/uniphier-sd.txt b/Documentation/devicetree/bindings/mmc/uniphier-sd.txt
deleted file mode 100644
index e1d658755722..000000000000
--- a/Documentation/devicetree/bindings/mmc/uniphier-sd.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-UniPhier SD/eMMC controller
-
-Required properties:
-- compatible: should be one of the following:
- "socionext,uniphier-sd-v2.91" - IP version 2.91
- "socionext,uniphier-sd-v3.1" - IP version 3.1
- "socionext,uniphier-sd-v3.1.1" - IP version 3.1.1
-- reg: offset and length of the register set for the device.
-- interrupts: a single interrupt specifier.
-- clocks: a single clock specifier of the controller clock.
-- reset-names: should contain the following:
- "host" - mandatory for all versions
- "bridge" - should exist only for "socionext,uniphier-sd-v2.91"
- "hw" - should exist if eMMC hw reset line is available
-- resets: a list of reset specifiers, corresponding to the reset-names
-
-Optional properties:
-- pinctrl-names: if present, should contain the following:
- "default" - should exist for all instances
- "uhs" - should exist for SD instance with UHS support
-- pinctrl-0: pin control state for the default mode
-- pinctrl-1: pin control state for the UHS mode
-- dma-names: should be "rx-tx" if present.
- This property can exist only for "socionext,uniphier-sd-v2.91".
-- dmas: a single DMA channel specifier
- This property can exist only for "socionext,uniphier-sd-v2.91".
-- bus-width: see mmc.txt
-- cap-sd-highspeed: see mmc.txt
-- cap-mmc-highspeed: see mmc.txt
-- sd-uhs-sdr12: see mmc.txt
-- sd-uhs-sdr25: see mmc.txt
-- sd-uhs-sdr50: see mmc.txt
-- cap-mmc-hw-reset: should exist if reset-names contains "hw". see mmc.txt
-- non-removable: see mmc.txt
-
-Example:
-
- sd: sdhc@5a400000 {
- compatible = "socionext,uniphier-sd-v2.91";
- reg = <0x5a400000 0x200>;
- interrupts = <0 76 4>;
- pinctrl-names = "default", "uhs";
- pinctrl-0 = <&pinctrl_sd>;
- pinctrl-1 = <&pinctrl_sd_uhs>;
- clocks = <&mio_clk 0>;
- reset-names = "host", "bridge";
- resets = <&mio_rst 0>, <&mio_rst 3>;
- dma-names = "rx-tx";
- dmas = <&dmac 4>;
- bus-width = <4>;
- cap-sd-highspeed;
- sd-uhs-sdr12;
- sd-uhs-sdr25;
- sd-uhs-sdr50;
- };
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
index 82156dc8f304..05651a654c66 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
@@ -35,11 +35,11 @@ Required properties:
(optional) NAND flash cache range (if at non-standard offset)
- reg-names : a list of the names corresponding to the previous register
ranges. Should contain "nand" and (optionally)
- "flash-dma" and/or "nand-cache".
-- interrupts : The NAND CTLRDY interrupt and (if Flash DMA is available)
- FLASH_DMA_DONE
-- interrupt-names : May be "nand_ctlrdy" or "flash_dma_done", if broken out as
- individual interrupts.
+ "flash-dma" or "flash-edu" and/or "nand-cache".
+- interrupts : The NAND CTLRDY interrupt, (if Flash DMA is available)
+ FLASH_DMA_DONE and if EDU is avaialble and used FLASH_EDU_DONE
+- interrupt-names : May be "nand_ctlrdy" or "flash_dma_done" or "flash_edu_done",
+ if broken out as individual interrupts.
May be "nand", if the SoC has the individual NAND
interrupts multiplexed behind another custom piece of
hardware
diff --git a/Documentation/devicetree/bindings/mtd/denali,nand.yaml b/Documentation/devicetree/bindings/mtd/denali,nand.yaml
new file mode 100644
index 000000000000..46e6b6726bc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/denali,nand.yaml
@@ -0,0 +1,148 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/denali,nand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Denali NAND controller
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ compatible:
+ enum:
+ - altr,socfpga-denali-nand
+ - socionext,uniphier-denali-nand-v5a
+ - socionext,uniphier-denali-nand-v5b
+
+ reg-names:
+ description: |
+ There are two register regions:
+ nand_data: host data/command interface
+ denali_reg: register interface
+ items:
+ - const: nand_data
+ - const: denali_reg
+
+ reg:
+ minItems: 2
+ maxItems: 2
+
+ interrupts:
+ maxItems: 1
+
+ clock-names:
+ description: |
+ There are three clocks:
+ nand: controller core clock
+ nand_x: bus interface clock
+ ecc: ECC circuit clock
+ items:
+ - const: nand
+ - const: nand_x
+ - const: ecc
+
+ clocks:
+ minItems: 3
+ maxItems: 3
+
+ reset-names:
+ description: |
+ There are two optional resets:
+ nand: controller core reset
+ reg: register reset
+ oneOf:
+ - items:
+ - const: nand
+ - const: reg
+ - const: nand
+ - const: reg
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+allOf:
+ - $ref: nand-controller.yaml
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: altr,socfpga-denali-nand
+ then:
+ patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ properties:
+ nand-ecc-strength:
+ enum:
+ - 8
+ - 15
+ nand-ecc-step-size:
+ enum:
+ - 512
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-denali-nand-v5a
+ then:
+ patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ properties:
+ nand-ecc-strength:
+ enum:
+ - 8
+ - 16
+ - 24
+ nand-ecc-step-size:
+ enum:
+ - 1024
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-denali-nand-v5b
+ then:
+ patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ properties:
+ nand-ecc-strength:
+ enum:
+ - 8
+ - 16
+ nand-ecc-step-size:
+ enum:
+ - 1024
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clock-names
+ - clocks
+
+examples:
+ - |
+ nand-controller@ff900000 {
+ compatible = "altr,socfpga-denali-nand";
+ reg-names = "nand_data", "denali_reg";
+ reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
+ interrupts = <0 144 4>;
+ clock-names = "nand", "nand_x", "ecc";
+ clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>;
+ reset-names = "nand", "reg";
+ resets = <&nand_rst>, <&nand_reg_rst>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
deleted file mode 100644
index 98916a84bbf6..000000000000
--- a/Documentation/devicetree/bindings/mtd/denali-nand.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-* Denali NAND controller
-
-Required properties:
- - compatible : should be one of the following:
- "altr,socfpga-denali-nand" - for Altera SOCFPGA
- "socionext,uniphier-denali-nand-v5a" - for Socionext UniPhier (v5a)
- "socionext,uniphier-denali-nand-v5b" - for Socionext UniPhier (v5b)
- - reg : should contain registers location and length for data and reg.
- - reg-names: Should contain the reg names "nand_data" and "denali_reg"
- - #address-cells: should be 1. The cell encodes the chip select connection.
- - #size-cells : should be 0.
- - interrupts : The interrupt number.
- - clocks: should contain phandle of the controller core clock, the bus
- interface clock, and the ECC circuit clock.
- - clock-names: should contain "nand", "nand_x", "ecc"
-
-Optional properties:
- - resets: may contain phandles to the controller core reset, the register
- reset
- - reset-names: may contain "nand", "reg"
-
-Sub-nodes:
- Sub-nodes represent available NAND chips.
-
- Required properties:
- - reg: should contain the bank ID of the controller to which each chip
- select is connected.
-
- Optional properties:
- - nand-ecc-step-size: see nand-controller.yaml for details.
- If present, the value must be
- 512 for "altr,socfpga-denali-nand"
- 1024 for "socionext,uniphier-denali-nand-v5a"
- 1024 for "socionext,uniphier-denali-nand-v5b"
- - nand-ecc-strength: see nand-controller.yaml for details. Valid values are:
- 8, 15 for "altr,socfpga-denali-nand"
- 8, 16, 24 for "socionext,uniphier-denali-nand-v5a"
- 8, 16 for "socionext,uniphier-denali-nand-v5b"
- - nand-ecc-maximize: see nand-controller.yaml for details
-
-The chip nodes may optionally contain sub-nodes describing partitions of the
-address space. See partition.txt for more detail.
-
-Examples:
-
-nand: nand@ff900000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "altr,socfpga-denali-nand";
- reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
- reg-names = "nand_data", "denali_reg";
- clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>;
- clock-names = "nand", "nand_x", "ecc";
- resets = <&nand_rst>, <&nand_reg_rst>;
- reset-names = "nand", "reg";
- interrupts = <0 144 4>;
-
- nand@0 {
- reg = <0>;
- }
-};
diff --git a/Documentation/devicetree/bindings/mtd/nand-macronix.txt b/Documentation/devicetree/bindings/mtd/nand-macronix.txt
new file mode 100644
index 000000000000..ffab28a2c4d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/nand-macronix.txt
@@ -0,0 +1,27 @@
+Macronix NANDs Device Tree Bindings
+-----------------------------------
+
+Macronix NANDs support randomizer operation for scrambling user data,
+which can be enabled with a SET_FEATURE. The penalty when using the
+randomizer are subpage accesses prohibited and more time period needed
+for program operation, i.e., tPROG 300us to 340us (randomizer enabled).
+Enabling the randomizer is a one time persistent and non reversible
+operation.
+
+For more high-reliability concern, if subpage write is not available
+with hardware ECC and not enabled at UBI level, then enabling the
+randomizer is recommended by default by adding a new specific property
+in children nodes.
+
+Required NAND chip properties in children mode:
+- randomizer enable: should be "mxic,enable-randomizer-otp"
+
+Example:
+
+ nand: nand-controller@unit-address {
+
+ nand@0 {
+ reg = <0>;
+ mxic,enable-randomizer-otp;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
index dd258674633c..a7d57ba5f2ac 100644
--- a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
@@ -20,7 +20,7 @@ Required properties:
Optional properties:
- - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
+ - max-speed: see Documentation/devicetree/bindings/serial/serial.yaml
- shutdown-gpios: GPIO specifier, used to enable the BT module
- device-wakeup-gpios: GPIO specifier, used to wakeup the controller
- host-wakeup-gpios: GPIO specifier, used to wakeup the host processor.
diff --git a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
new file mode 100644
index 000000000000..cccf8202c8f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/bosch,m_can.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bosch MCAN controller Bindings
+
+description: Bosch MCAN controller for CAN bus
+
+maintainers:
+ - Sriram Dash <sriram.dash@samsung.com>
+
+properties:
+ compatible:
+ const: bosch,m_can
+
+ reg:
+ items:
+ - description: M_CAN registers map
+ - description: message RAM
+
+ reg-names:
+ items:
+ - const: m_can
+ - const: message_ram
+
+ interrupts:
+ items:
+ - description: interrupt line0
+ - description: interrupt line1
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ items:
+ - const: int0
+ - const: int1
+ minItems: 1
+ maxItems: 2
+
+ clocks:
+ items:
+ - description: peripheral clock
+ - description: bus clock
+
+ clock-names:
+ items:
+ - const: hclk
+ - const: cclk
+
+ bosch,mram-cfg:
+ description: |
+ Message RAM configuration data.
+ Multiple M_CAN instances can share the same Message RAM
+ and each element(e.g Rx FIFO or Tx Buffer and etc) number
+ in Message RAM is also configurable, so this property is
+ telling driver how the shared or private Message RAM are
+ used by this M_CAN controller.
+
+ The format should be as follows:
+ <offset sidf_elems xidf_elems rxf0_elems rxf1_elems rxb_elems txe_elems txb_elems>
+ The 'offset' is an address offset of the Message RAM where
+ the following elements start from. This is usually set to
+ 0x0 if you're using a private Message RAM. The remain cells
+ are used to specify how many elements are used for each FIFO/Buffer.
+
+ M_CAN includes the following elements according to user manual:
+ 11-bit Filter 0-128 elements / 0-128 words
+ 29-bit Filter 0-64 elements / 0-128 words
+ Rx FIFO 0 0-64 elements / 0-1152 words
+ Rx FIFO 1 0-64 elements / 0-1152 words
+ Rx Buffers 0-64 elements / 0-1152 words
+ Tx Event FIFO 0-32 elements / 0-64 words
+ Tx Buffers 0-32 elements / 0-576 words
+
+ Please refer to 2.4.1 Message RAM Configuration in Bosch
+ M_CAN user manual for details.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/int32-array
+ - items:
+ items:
+ - description: The 'offset' is an address offset of the Message RAM
+ where the following elements start from. This is usually
+ set to 0x0 if you're using a private Message RAM.
+ default: 0
+ - description: 11-bit Filter 0-128 elements / 0-128 words
+ minimum: 0
+ maximum: 128
+ - description: 29-bit Filter 0-64 elements / 0-128 words
+ minimum: 0
+ maximum: 64
+ - description: Rx FIFO 0 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Rx FIFO 1 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Rx Buffers 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Tx Event FIFO 0-32 elements / 0-64 words
+ minimum: 0
+ maximum: 32
+ - description: Tx Buffers 0-32 elements / 0-576 words
+ minimum: 0
+ maximum: 32
+ maxItems: 1
+
+ can-transceiver:
+ $ref: can-transceiver.yaml#
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - bosch,mram-cfg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6sx-clock.h>
+ can@20e8000 {
+ compatible = "bosch,m_can";
+ reg = <0x020e8000 0x4000>, <0x02298000 0x4000>;
+ reg-names = "m_can", "message_ram";
+ interrupts = <0 114 0x04>, <0 114 0x04>;
+ interrupt-names = "int0", "int1";
+ clocks = <&clks IMX6SX_CLK_CANFD>,
+ <&clks IMX6SX_CLK_CANFD>;
+ clock-names = "hclk", "cclk";
+ bosch,mram-cfg = <0x0 0 0 32 0 0 0 1>;
+
+ can-transceiver {
+ max-bitrate = <5000000>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/can/can-transceiver.txt b/Documentation/devicetree/bindings/net/can/can-transceiver.txt
deleted file mode 100644
index 0011f53ff159..000000000000
--- a/Documentation/devicetree/bindings/net/can/can-transceiver.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Generic CAN transceiver Device Tree binding
-------------------------------
-
-CAN transceiver typically limits the max speed in standard CAN and CAN FD
-modes. Typically these limitations are static and the transceivers themselves
-provide no way to detect this limitation at runtime. For this situation,
-the "can-transceiver" node can be used.
-
-Required Properties:
- max-bitrate: a positive non 0 value that determines the max
- speed that CAN/CAN-FD can run. Any other value
- will be ignored.
-
-Examples:
-
-Based on Texas Instrument's TCAN1042HGV CAN Transceiver
-
-m_can0 {
- ....
- can-transceiver {
- max-bitrate = <5000000>;
- };
- ...
-};
diff --git a/Documentation/devicetree/bindings/net/can/can-transceiver.yaml b/Documentation/devicetree/bindings/net/can/can-transceiver.yaml
new file mode 100644
index 000000000000..6396977d29e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/can-transceiver.yaml
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/can-transceiver.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: CAN transceiver Bindings
+
+description: CAN transceiver generic properties bindings
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+properties:
+ max-bitrate:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: a positive non 0 value that determines the max speed that CAN/CAN-FD can run.
+ minimum: 1
diff --git a/Documentation/devicetree/bindings/net/can/m_can.txt b/Documentation/devicetree/bindings/net/can/m_can.txt
deleted file mode 100644
index ed614383af9c..000000000000
--- a/Documentation/devicetree/bindings/net/can/m_can.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-Bosch MCAN controller Device Tree Bindings
--------------------------------------------------
-
-Required properties:
-- compatible : Should be "bosch,m_can" for M_CAN controllers
-- reg : physical base address and size of the M_CAN
- registers map and Message RAM
-- reg-names : Should be "m_can" and "message_ram"
-- interrupts : Should be the interrupt number of M_CAN interrupt
- line 0 and line 1, could be same if sharing
- the same interrupt.
-- interrupt-names : Should contain "int0" and "int1"
-- clocks : Clocks used by controller, should be host clock
- and CAN clock.
-- clock-names : Should contain "hclk" and "cclk"
-- pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
-- pinctrl-names : Names corresponding to the numbered pinctrl states
-- bosch,mram-cfg : Message RAM configuration data.
- Multiple M_CAN instances can share the same Message
- RAM and each element(e.g Rx FIFO or Tx Buffer and etc)
- number in Message RAM is also configurable,
- so this property is telling driver how the shared or
- private Message RAM are used by this M_CAN controller.
-
- The format should be as follows:
- <offset sidf_elems xidf_elems rxf0_elems rxf1_elems
- rxb_elems txe_elems txb_elems>
- The 'offset' is an address offset of the Message RAM
- where the following elements start from. This is
- usually set to 0x0 if you're using a private Message
- RAM. The remain cells are used to specify how many
- elements are used for each FIFO/Buffer.
-
- M_CAN includes the following elements according to user manual:
- 11-bit Filter 0-128 elements / 0-128 words
- 29-bit Filter 0-64 elements / 0-128 words
- Rx FIFO 0 0-64 elements / 0-1152 words
- Rx FIFO 1 0-64 elements / 0-1152 words
- Rx Buffers 0-64 elements / 0-1152 words
- Tx Event FIFO 0-32 elements / 0-64 words
- Tx Buffers 0-32 elements / 0-576 words
-
- Please refer to 2.4.1 Message RAM Configuration in
- Bosch M_CAN user manual for details.
-
-Optional Subnode:
-- can-transceiver : Can-transceiver subnode describing maximum speed
- that can be used for CAN/CAN-FD modes. See
- Documentation/devicetree/bindings/net/can/can-transceiver.txt
- for details.
-Example:
-SoC dtsi:
-m_can1: can@20e8000 {
- compatible = "bosch,m_can";
- reg = <0x020e8000 0x4000>, <0x02298000 0x4000>;
- reg-names = "m_can", "message_ram";
- interrupts = <0 114 0x04>,
- <0 114 0x04>;
- interrupt-names = "int0", "int1";
- clocks = <&clks IMX6SX_CLK_CANFD>,
- <&clks IMX6SX_CLK_CANFD>;
- clock-names = "hclk", "cclk";
- bosch,mram-cfg = <0x0 0 0 32 0 0 0 1>;
-};
-
-Board dts:
-&m_can1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_m_can1>;
- status = "enabled";
-
- can-transceiver {
- max-bitrate = <5000000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
index 6bdcc3f84bd3..3613c2c8f75d 100644
--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
@@ -14,7 +14,7 @@ Required properties:
the interrupt.
- interrupts: interrupt specification for data-ready.
-See Documentation/devicetree/bindings/net/can/m_can.txt for additional
+See Documentation/devicetree/bindings/net/can/bosch,m_can.yaml for additional
required property details.
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 5b88fae0307d..ff8b0f211aa1 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -22,6 +22,8 @@ Optional properties:
- fsl,err006687-workaround-present: If present indicates that the system has
the hardware workaround for ERR006687 applied and does not need a software
workaround.
+- gpr: phandle of SoC general purpose register mode. Required for wake on LAN
+ on some SoCs
-interrupt-names: names of the interrupts listed in interrupts property in
the same order. The defaults if not specified are
__Number of interrupts__ __Default__
diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
index 112011c51d5e..219bcbd0d344 100644
--- a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
@@ -42,7 +42,7 @@ child node of the serial node with UART.
Please refer to the following documents for generic properties:
- Documentation/devicetree/bindings/serial/slave-device.txt
+ Documentation/devicetree/bindings/serial/serial.yaml
Required properties:
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.txt b/Documentation/devicetree/bindings/net/qca,qca7000.txt
index 21c36e524993..8f5ae0b84eec 100644
--- a/Documentation/devicetree/bindings/net/qca,qca7000.txt
+++ b/Documentation/devicetree/bindings/net/qca,qca7000.txt
@@ -68,7 +68,7 @@ Required properties:
Optional properties:
- local-mac-address : see ./ethernet.txt
- current-speed : current baud rate of QCA7000 which defaults to 115200
- if absent, see also ../serial/slave-device.txt
+ if absent, see also ../serial/serial.yaml
UART Example:
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
index ac8c76369a86..976f139bb66e 100644
--- a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -37,6 +37,12 @@ properties:
description:
The physical base address and size of full the CPSW module IO range
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
ranges: true
clocks:
@@ -111,13 +117,6 @@ properties:
- reg
- phys
- mdio:
- type: object
- allOf:
- - $ref: "ti,davinci-mdio.yaml#"
- description:
- CPSW MDIO bus.
-
cpts:
type: object
description:
@@ -148,6 +147,15 @@ properties:
- clocks
- clock-names
+patternProperties:
+ "^mdio@":
+ type: object
+ allOf:
+ - $ref: "ti,davinci-mdio.yaml#"
+ description:
+ CPSW MDIO bus.
+
+
required:
- compatible
- reg
@@ -159,6 +167,8 @@ required:
- '#address-cells'
- '#size-cells'
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
@@ -174,7 +184,6 @@ examples:
#address-cells = <1>;
#size-cells = <1>;
syscon = <&scm_conf>;
- inctrl-names = "default", "sleep";
interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/net/ti-bluetooth.txt b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
index 6d03ff8c7068..f48c17b38f58 100644
--- a/Documentation/devicetree/bindings/net/ti-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
@@ -15,8 +15,7 @@ standard BT HCI protocol with additional channels for the other functions.
TI WiLink devices also have a separate WiFi interface as described in
wireless/ti,wlcore.txt.
-This bindings follows the UART slave device binding in
-../serial/slave-device.txt.
+This bindings follows the UART slave device binding in ../serial/serial.yaml.
Required properties:
- compatible: should be one of the following:
diff --git a/Documentation/devicetree/bindings/nvmem/ingenic,jz4780-efuse.yaml b/Documentation/devicetree/bindings/nvmem/ingenic,jz4780-efuse.yaml
new file mode 100644
index 000000000000..1485d3fbabfd
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/ingenic,jz4780-efuse.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/ingenic,jz4780-efuse.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic JZ EFUSE driver bindings
+
+maintainers:
+ - PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - ingenic,jz4780-efuse
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ # Handle for the ahb for the efuse.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4780-cgu.h>
+
+ efuse@134100d0 {
+ compatible = "ingenic,jz4780-efuse";
+ reg = <0x134100d0 0x2c>;
+
+ clocks = <&cgu JZ4780_CLK_AHB2>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt b/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
index 84fdc422792e..b6acbe694ffb 100644
--- a/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
@@ -18,7 +18,6 @@ Required properties:
- reg-names: Must be
- "elbi" External local bus interface registers
- "cfg" Meson specific registers
- - "phy" Meson PCIE PHY registers for AXG SoC Family
- "config" PCIe configuration space
- reset-gpios: The GPIO to generate PCIe PERST# assert and deassert signal.
- clocks: Must contain an entry for each entry in clock-names.
@@ -26,13 +25,13 @@ Required properties:
- "pclk" PCIe GEN 100M PLL clock
- "port" PCIe_x(A or B) RC clock gate
- "general" PCIe Phy clock
- - "mipi" PCIe_x(A or B) 100M ref clock gate for AXG SoC Family
- resets: phandle to the reset lines.
-- reset-names: must contain "phy" "port" and "apb"
- - "phy" Share PHY reset for AXG SoC Family
+- reset-names: must contain "port" and "apb"
- "port" Port A or B reset
- "apb" Share APB reset
-- phys: should contain a phandle to the shared phy for G12A SoC Family
+- phys: should contain a phandle to the PCIE phy
+- phy-names: must contain "pcie"
+
- device_type:
should be "pci". As specified in designware-pcie.txt
@@ -43,9 +42,8 @@ Example configuration:
compatible = "amlogic,axg-pcie", "snps,dw-pcie";
reg = <0x0 0xf9800000 0x0 0x400000
0x0 0xff646000 0x0 0x2000
- 0x0 0xff644000 0x0 0x2000
0x0 0xf9f00000 0x0 0x100000>;
- reg-names = "elbi", "cfg", "phy", "config";
+ reg-names = "elbi", "cfg", "config";
reset-gpios = <&gpio GPIOX_19 GPIO_ACTIVE_HIGH>;
interrupts = <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>;
#interrupt-cells = <1>;
@@ -58,17 +56,15 @@ Example configuration:
ranges = <0x82000000 0 0 0x0 0xf9c00000 0 0x00300000>;
clocks = <&clkc CLKID_USB
- &clkc CLKID_MIPI_ENABLE
&clkc CLKID_PCIE_A
&clkc CLKID_PCIE_CML_EN0>;
clock-names = "general",
- "mipi",
"pclk",
"port";
- resets = <&reset RESET_PCIE_PHY>,
- <&reset RESET_PCIE_A>,
+ resets = <&reset RESET_PCIE_A>,
<&reset RESET_PCIE_APB>;
- reset-names = "phy",
- "port",
+ reset-names = "port",
"apb";
+ phys = <&pcie_phy>;
+ phy-names = "pcie";
};
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
deleted file mode 100644
index 4a0475e2ba7e..000000000000
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Cadence PCIe endpoint controller
-
-Required properties:
-- compatible: Should contain "cdns,cdns-pcie-ep" to identify the IP used.
-- reg: Should contain the controller register base address and AXI interface
- region base address respectively.
-- reg-names: Must be "reg" and "mem" respectively.
-- cdns,max-outbound-regions: Set to maximum number of outbound regions
-
-Optional properties:
-- max-functions: Maximum number of functions that can be configured (default 1).
-- phys: From PHY bindings: List of Generic PHY phandles. One per lane if more
- than one in the list. If only one PHY listed it must manage all lanes.
-- phy-names: List of names to identify the PHY.
-
-Example:
-
-pcie@fc000000 {
- compatible = "cdns,cdns-pcie-ep";
- reg = <0x0 0xfc000000 0x0 0x01000000>,
- <0x0 0x80000000 0x0 0x40000000>;
- reg-names = "reg", "mem";
- cdns,max-outbound-regions = <16>;
- max-functions = /bits/ 8 <8>;
- phys = <&ep_phy0 &ep_phy1>;
- phy-names = "pcie-lane0","pcie-lane1";
-};
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
new file mode 100644
index 000000000000..2996f8d4777c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/cdns,cdns-pcie-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence PCIe EP Controller
+
+maintainers:
+ - Tom Joseph <tjoseph@cadence.com>
+
+allOf:
+ - $ref: "cdns-pcie.yaml#"
+ - $ref: "pci-ep.yaml#"
+
+properties:
+ compatible:
+ const: cdns,cdns-pcie-ep
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: reg
+ - const: mem
+
+required:
+ - reg
+ - reg-names
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie-ep@fc000000 {
+ compatible = "cdns,cdns-pcie-ep";
+ reg = <0x0 0xfc000000 0x0 0x01000000>,
+ <0x0 0x80000000 0x0 0x40000000>;
+ reg-names = "reg", "mem";
+ cdns,max-outbound-regions = <16>;
+ max-functions = /bits/ 8 <8>;
+ phys = <&pcie_phy0>;
+ phy-names = "pcie-phy";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
deleted file mode 100644
index 91de69c713a9..000000000000
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-* Cadence PCIe host controller
-
-This PCIe controller inherits the base properties defined in
-host-generic-pci.txt.
-
-Required properties:
-- compatible: Should contain "cdns,cdns-pcie-host" to identify the IP used.
-- reg: Should contain the controller register base address, PCIe configuration
- window base address, and AXI interface region base address respectively.
-- reg-names: Must be "reg", "cfg" and "mem" respectively.
-- #address-cells: Set to <3>
-- #size-cells: Set to <2>
-- device_type: Set to "pci"
-- ranges: Ranges for the PCI memory and I/O regions
-- #interrupt-cells: Set to <1>
-- interrupt-map-mask and interrupt-map: Standard PCI properties to define the
- mapping of the PCIe interface to interrupt numbers.
-
-Optional properties:
-- cdns,max-outbound-regions: Set to maximum number of outbound regions
- (default 32)
-- cdns,no-bar-match-nbits: Set into the no BAR match register to configure the
- number of least significant bits kept during inbound (PCIe -> AXI) address
- translations (default 32)
-- vendor-id: The PCI vendor ID (16 bits, default is design dependent)
-- device-id: The PCI device ID (16 bits, default is design dependent)
-- phys: From PHY bindings: List of Generic PHY phandles. One per lane if more
- than one in the list. If only one PHY listed it must manage all lanes.
-- phy-names: List of names to identify the PHY.
-
-Example:
-
-pcie@fb000000 {
- compatible = "cdns,cdns-pcie-host";
- device_type = "pci";
- #address-cells = <3>;
- #size-cells = <2>;
- bus-range = <0x0 0xff>;
- linux,pci-domain = <0>;
- cdns,max-outbound-regions = <16>;
- cdns,no-bar-match-nbits = <32>;
- vendor-id = /bits/ 16 <0x17cd>;
- device-id = /bits/ 16 <0x0200>;
-
- reg = <0x0 0xfb000000 0x0 0x01000000>,
- <0x0 0x41000000 0x0 0x00001000>,
- <0x0 0x40000000 0x0 0x04000000>;
- reg-names = "reg", "cfg", "mem";
-
- ranges = <0x02000000 0x0 0x42000000 0x0 0x42000000 0x0 0x1000000>,
- <0x01000000 0x0 0x43000000 0x0 0x43000000 0x0 0x0010000>;
-
- #interrupt-cells = <0x1>;
-
- interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 14 0x1
- 0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 15 0x1
- 0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 16 0x1
- 0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 17 0x1>;
-
- interrupt-map-mask = <0x0 0x0 0x0 0x7>;
-
- msi-parent = <&its_pci>;
-
- phys = <&pcie_phy0>;
- phy-names = "pcie-phy";
-};
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml
new file mode 100644
index 000000000000..cabbe46ff578
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/cdns,cdns-pcie-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence PCIe host controller
+
+maintainers:
+ - Tom Joseph <tjoseph@cadence.com>
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: "cdns-pcie-host.yaml#"
+
+properties:
+ compatible:
+ const: cdns,cdns-pcie-host
+
+ reg:
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: reg
+ - const: cfg
+ - const: mem
+
+ msi-parent: true
+
+required:
+ - reg
+ - reg-names
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pcie@fb000000 {
+ compatible = "cdns,cdns-pcie-host";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x0 0xff>;
+ linux,pci-domain = <0>;
+ cdns,max-outbound-regions = <16>;
+ cdns,no-bar-match-nbits = <32>;
+ vendor-id = <0x17cd>;
+ device-id = <0x0200>;
+
+ reg = <0x0 0xfb000000 0x0 0x01000000>,
+ <0x0 0x41000000 0x0 0x00001000>,
+ <0x0 0x40000000 0x0 0x04000000>;
+ reg-names = "reg", "cfg", "mem";
+
+ ranges = <0x02000000 0x0 0x42000000 0x0 0x42000000 0x0 0x1000000>,
+ <0x01000000 0x0 0x43000000 0x0 0x43000000 0x0 0x0010000>;
+
+ #interrupt-cells = <0x1>;
+
+ interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 14 0x1>,
+ <0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 15 0x1>,
+ <0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 16 0x1>,
+ <0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 17 0x1>;
+
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+
+ msi-parent = <&its_pci>;
+
+ phys = <&pcie_phy0>;
+ phy-names = "pcie-phy";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml b/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml
new file mode 100644
index 000000000000..ab6e43b636ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/pci/cdns-pcie-host.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Cadence PCIe Host
+
+maintainers:
+ - Tom Joseph <tjoseph@cadence.com>
+
+allOf:
+ - $ref: "/schemas/pci/pci-bus.yaml#"
+ - $ref: "cdns-pcie.yaml#"
+
+properties:
+ cdns,no-bar-match-nbits:
+ description:
+ Set into the no BAR match register to configure the number of least
+ significant bits kept during inbound (PCIe -> AXI) address translations
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 64
+ default: 32
+
+ msi-parent: true
diff --git a/Documentation/devicetree/bindings/pci/cdns-pcie.yaml b/Documentation/devicetree/bindings/pci/cdns-pcie.yaml
new file mode 100644
index 000000000000..6887ccc339cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/cdns-pcie.yaml
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/pci/cdns-pcie.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Cadence PCIe Core
+
+maintainers:
+ - Tom Joseph <tjoseph@cadence.com>
+
+properties:
+ cdns,max-outbound-regions:
+ description: maximum number of outbound regions
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+ default: 32
+
+ phys:
+ description:
+ One per lane if more than one in the list. If only one PHY listed it must
+ manage all lanes.
+ minItems: 1
+ maxItems: 16
+
+ phy-names:
+ items:
+ - const: pcie-phy
+ # FIXME: names when more than 1
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt b/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
new file mode 100644
index 000000000000..b40fb5d15d3d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
@@ -0,0 +1,52 @@
+NXP Layerscape PCIe Gen4 controller
+
+This PCIe controller is based on the Mobiveil PCIe IP and thus inherits all
+the common properties defined in mobiveil-pcie.txt.
+
+Required properties:
+- compatible: should contain the platform identifier such as:
+ "fsl,lx2160a-pcie"
+- reg: base addresses and lengths of the PCIe controller register blocks.
+ "csr_axi_slave": Bridge config registers
+ "config_axi_slave": PCIe controller registers
+- interrupts: A list of interrupt outputs of the controller. Must contain an
+ entry for each entry in the interrupt-names property.
+- interrupt-names: It could include the following entries:
+ "intr": The interrupt that is asserted for controller interrupts
+ "aer": Asserted for aer interrupt when chip support the aer interrupt with
+ none MSI/MSI-X/INTx mode,but there is interrupt line for aer.
+ "pme": Asserted for pme interrupt when chip support the pme interrupt with
+ none MSI/MSI-X/INTx mode,but there is interrupt line for pme.
+- dma-coherent: Indicates that the hardware IP block can ensure the coherency
+ of the data transferred from/to the IP block. This can avoid the software
+ cache flush/invalid actions, and improve the performance significantly.
+- msi-parent : See the generic MSI binding described in
+ Documentation/devicetree/bindings/interrupt-controller/msi.txt.
+
+Example:
+
+ pcie@3400000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x80 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ apio-wins = <8>;
+ ppio-wins = <8>;
+ dma-coherent;
+ bus-range = <0x0 0xff>;
+ msi-parent = <&its>;
+ ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
index b739f92da58e..bd43f3c3ece4 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt
@@ -1,11 +1,11 @@
NVIDIA Tegra PCIe controller (Synopsys DesignWare Core based)
-This PCIe host controller is based on the Synopsis Designware PCIe IP
+This PCIe controller is based on the Synopsis Designware PCIe IP
and thus inherits all the common properties defined in designware-pcie.txt.
+Some of the controller instances are dual mode where in they can work either
+in root port mode or endpoint mode but one at a time.
Required properties:
-- compatible: For Tegra19x, must contain "nvidia,tegra194-pcie".
-- device_type: Must be "pci"
- power-domains: A phandle to the node that controls power to the respective
PCIe controller and a specifier name for the PCIe controller. Following are
the specifiers for the different PCIe controllers
@@ -32,6 +32,32 @@ Required properties:
entry for each entry in the interrupt-names property.
- interrupt-names: Must include the following entries:
"intr": The Tegra interrupt that is asserted for controller interrupts
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - core
+- resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+ - apb
+ - core
+- phys: Must contain a phandle to P2U PHY for each entry in phy-names.
+- phy-names: Must include an entry for each active lane.
+ "p2u-N": where N ranges from 0 to one less than the total number of lanes
+- nvidia,bpmp: Must contain a pair of phandle to BPMP controller node followed
+ by controller-id. Following are the controller ids for each controller.
+ 0: C0
+ 1: C1
+ 2: C2
+ 3: C3
+ 4: C4
+ 5: C5
+- vddio-pex-ctl-supply: Regulator supply for PCIe side band signals
+
+RC mode:
+- compatible: Tegra19x must contain "nvidia,tegra194-pcie"
+- device_type: Must be "pci" for RC mode
+- interrupt-names: Must include the following entries:
"msi": The Tegra interrupt that is asserted when an MSI is received
- bus-range: Range of bus numbers associated with this controller
- #address-cells: Address representation for root ports (must be 3)
@@ -60,27 +86,15 @@ Required properties:
- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
Please refer to the standard PCI bus binding document for a more detailed
explanation.
-- clocks: Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names: Must include the following entries:
- - core
-- resets: Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names: Must include the following entries:
- - apb
- - core
-- phys: Must contain a phandle to P2U PHY for each entry in phy-names.
-- phy-names: Must include an entry for each active lane.
- "p2u-N": where N ranges from 0 to one less than the total number of lanes
-- nvidia,bpmp: Must contain a pair of phandle to BPMP controller node followed
- by controller-id. Following are the controller ids for each controller.
- 0: C0
- 1: C1
- 2: C2
- 3: C3
- 4: C4
- 5: C5
-- vddio-pex-ctl-supply: Regulator supply for PCIe side band signals
+
+EP mode:
+In Tegra194, Only controllers C0, C4 & C5 support EP mode.
+- compatible: Tegra19x must contain "nvidia,tegra194-pcie-ep"
+- reg-names: Must include the following entries:
+ "addr_space": Used to map remote RC address space
+- reset-gpios: Must contain a phandle to a GPIO controller followed by
+ GPIO that is being used as PERST input signal. Please refer to pci.txt
+ document.
Optional properties:
- pinctrl-names: A list of pinctrl state names.
@@ -104,6 +118,8 @@ Optional properties:
specified in microseconds
- nvidia,aspm-l0s-entrance-latency-us: ASPM L0s entrance latency to be
specified in microseconds
+
+RC mode:
- vpcie3v3-supply: A phandle to the regulator node that supplies 3.3V to the slot
if the platform has one such slot. (Ex:- x16 slot owned by C5 controller
in p2972-0000 platform).
@@ -111,14 +127,21 @@ Optional properties:
if the platform has one such slot. (Ex:- x16 slot owned by C5 controller
in p2972-0000 platform).
+EP mode:
+- nvidia,refclk-select-gpios: Must contain a phandle to a GPIO controller
+ followed by GPIO that is being used to enable REFCLK to controller from host
+
+NOTE:- On Tegra194's P2972-0000 platform, only C5 controller can be enabled to
+operate in the endpoint mode because of the way the platform is designed.
+
Examples:
=========
-Tegra194:
---------
+Tegra194 RC mode:
+-----------------
pcie@14180000 {
- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
+ compatible = "nvidia,tegra194-pcie";
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */
@@ -169,3 +192,53 @@ Tegra194:
<&p2u_hsio_5>;
phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3";
};
+
+Tegra194 EP mode:
+-----------------
+
+ pcie_ep@141a0000 {
+ compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
+ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
+ reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
+ 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
+ 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */
+ 0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
+ reg-names = "appl", "atu_dma", "dbi", "addr_space";
+
+ num-lanes = <8>;
+ num-ib-windows = <2>;
+ num-ob-windows = <8>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&clkreq_c5_bi_dir_state>;
+
+ clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>;
+ clock-names = "core";
+
+ resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>,
+ <&bpmp TEGRA194_RESET_PEX1_CORE_5>;
+ reset-names = "apb", "core";
+
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "intr";
+
+ nvidia,bpmp = <&bpmp 5>;
+
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+
+ vddio-pex-ctl-supply = <&vdd_1v8ao>;
+
+ reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>;
+
+ nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5)
+ GPIO_ACTIVE_HIGH>;
+
+ phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
+ <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
+ <&p2u_nvhs_6>, <&p2u_nvhs_7>;
+
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
+ "p2u-5", "p2u-6", "p2u-7";
+ };
diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml
new file mode 100644
index 000000000000..b3df100705b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/pci-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PCI Endpoint Controller Schema
+
+description: |
+ Common properties for PCI Endpoint Controller Nodes.
+
+maintainers:
+ - Kishon Vijay Abraham I <kishon@ti.com>
+
+properties:
+ $nodename:
+ pattern: "^pcie-ep@"
+
+ max-functions:
+ description: Maximum number of functions that can be configured
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint8
+ minimum: 1
+ default: 1
+ maximum: 255
+
+ max-link-speed:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 2, 3, 4 ]
+
+ num-lanes:
+ description: maximum number of lanes
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ default: 1
+ maximum: 16
+
+required:
+ - compatible
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml
index e5922b427342..c03b83103e87 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml
@@ -34,6 +34,8 @@ required:
- resets
- "#phy-cells"
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/sun50i-h6-ccu.h>
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
new file mode 100644
index 000000000000..88683db6cf81
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/amlogic,meson-axg-mipi-pcie-analog.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic AXG shared MIPI/PCIE analog PHY
+
+maintainers:
+ - Remi Pommarel <repk@triplefau.lt>
+
+properties:
+ compatible:
+ const: amlogic,axg-mipi-pcie-analog-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ mpphy: phy@0 {
+ compatible = "amlogic,axg-mipi-pcie-analog-phy";
+ reg = <0x0 0x0 0x0 0xc>;
+ #phy-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml
new file mode 100644
index 000000000000..086478aec946
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/amlogic,meson-axg-pcie.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic AXG PCIE PHY
+
+maintainers:
+ - Remi Pommarel <repk@triplefau.lt>
+
+properties:
+ compatible:
+ const: amlogic,axg-pcie-phy
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: analog
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - phys
+ - phy-names
+ - resets
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/reset/amlogic,meson-axg-reset.h>
+ #include <dt-bindings/phy/phy.h>
+ pcie_phy: pcie-phy@ff644000 {
+ compatible = "amlogic,axg-pcie-phy";
+ reg = <0x0 0xff644000 0x0 0x1c>;
+ resets = <&reset RESET_PCIE_PHY>;
+ phys = <&mipi_analog_phy PHY_TYPE_PCIE>;
+ phy-names = "analog";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
index 346f9c35427c..453c083cf44c 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
@@ -44,6 +44,8 @@ required:
- reset-names
- "#phy-cells"
+additionalProperties: false
+
examples:
- |
phy@46000 {
diff --git a/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml b/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
index 0ccee64c6962..9a346d6290d9 100644
--- a/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
@@ -40,6 +40,8 @@ required:
- reg
- clocks
+additionalProperties: false
+
examples:
- |
sysconf: chiptop@e0200000 {
diff --git a/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
index 5ab436189f3b..00609ace677c 100644
--- a/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
@@ -31,6 +31,8 @@ required:
- reset-gpios
- "#phy-cells"
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/phy/marvell,mmp3-usb-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,mmp3-usb-phy.yaml
new file mode 100644
index 000000000000..c97043eaa8fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/marvell,mmp3-usb-phy.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+# Copyright 2019,2020 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/marvell,mmp3-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MMP3 USB PHY bindings
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+properties:
+ $nodename:
+ pattern: '^usb-phy@[a-f0-9]+$'
+
+ compatible:
+ const: marvell,mmp3-usb-phy
+
+ reg:
+ maxItems: 1
+ description: base address of the device
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@d4207000 {
+ compatible = "marvell,mmp3-usb-phy";
+ reg = <0xd4207000 0x40>;
+ #phy-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt b/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
index 9fb682e47c29..38c5fa21f435 100644
--- a/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
+++ b/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
@@ -37,6 +37,7 @@ Required properties:
- Tegra132: "nvidia,tegra132-xusb-padctl", "nvidia,tegra124-xusb-padctl"
- Tegra210: "nvidia,tegra210-xusb-padctl"
- Tegra186: "nvidia,tegra186-xusb-padctl"
+ - Tegra194: "nvidia,tegra194-xusb-padctl"
- reg: Physical base address and length of the controller's registers.
- resets: Must contain an entry for each entry in reset-names.
- reset-names: Must include the following entries:
@@ -62,6 +63,10 @@ For Tegra186:
- vclamp-usb-supply: Bias rail for USB pad. Must supply 1.8 V.
- vddio-hsic-supply: HSIC PHY power supply. Must supply 1.2 V.
+For Tegra194:
+- avdd-usb-supply: USB I/Os, VBUS, ID, REXT, D+/D- power supply. Must supply
+ 3.3 V.
+- vclamp-usb-supply: Bias rail for USB pad. Must supply 1.8 V.
Pad nodes:
==========
@@ -154,6 +159,11 @@ For Tegra210, the list of valid PHY nodes is given below:
- sata: sata-0
- functions: "usb3-ss", "sata"
+For Tegra194, the list of valid PHY nodes is given below:
+- usb2: usb2-0, usb2-1, usb2-2, usb2-3
+ - functions: "xusb"
+- usb3: usb3-0, usb3-1, usb3-2, usb3-3
+ - functions: "xusb"
Port nodes:
===========
@@ -174,6 +184,12 @@ Required properties:
- "device": for USB device mode
- "otg": for USB OTG mode
+Required properties for OTG/Peripheral capable USB2 ports:
+- usb-role-switch: Boolean property to indicate that the port support OTG or
+ peripheral mode. If present, the port supports switching between USB host
+ and peripheral roles. Connector should be added as subnode.
+ See usb/usb-conn-gpio.txt.
+
Optional properties:
- nvidia,internal: A boolean property whose presence determines that a port
is internal. In the absence of this property the port is considered to be
@@ -221,6 +237,11 @@ Optional properties:
is internal. In the absence of this property the port is considered to be
external.
+- maximum-speed: Only for Tegra194. A string property that specifies maximum
+ supported speed of a usb3 port. Valid values are:
+ - "super-speed-plus": default, the usb3 port supports USB 3.1 Gen 2 speed.
+ - "super-speed": the usb3 port supports USB 3.1 Gen 1 speed only.
+
For Tegra124 and Tegra132, the XUSB pad controller exposes the following
ports:
- 3x USB2: usb2-0, usb2-1, usb2-2
@@ -233,6 +254,9 @@ For Tegra210, the XUSB pad controller exposes the following ports:
- 2x HSIC: hsic-0, hsic-1
- 4x super-speed USB: usb3-0, usb3-1, usb3-2, usb3-3
+For Tegra194, the XUSB pad controller exposes the following ports:
+- 4x USB2: usb2-0, usb2-1, usb2-2, usb2-3
+- 4x super-speed USB: usb3-0, usb3-1, usb3-2, usb3-3
Examples:
=========
diff --git a/Documentation/devicetree/bindings/phy/phy-mmp3-usb.txt b/Documentation/devicetree/bindings/phy/phy-mmp3-usb.txt
deleted file mode 100644
index 7183b9102f91..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-mmp3-usb.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Marvell MMP3 USB PHY
---------------------
-
-Required properties:
-- compatible: must be "marvell,mmp3-usb-phy"
-- #phy-cells: must be 0
-
-Example:
- usb-phy: usb-phy@d4207000 {
- compatible = "marvell,mmp3-usb-phy";
- reg = <0xd4207000 0x40>;
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
deleted file mode 100644
index 541f5298827c..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
+++ /dev/null
@@ -1,81 +0,0 @@
-ROCKCHIP USB2.0 PHY WITH INNO IP BLOCK
-
-Required properties (phy (parent) node):
- - compatible : should be one of the listed compatibles:
- * "rockchip,px30-usb2phy"
- * "rockchip,rk3228-usb2phy"
- * "rockchip,rk3328-usb2phy"
- * "rockchip,rk3366-usb2phy"
- * "rockchip,rk3399-usb2phy"
- * "rockchip,rv1108-usb2phy"
- - reg : the address offset of grf for usb-phy configuration.
- - #clock-cells : should be 0.
- - clock-output-names : specify the 480m output clock name.
-
-Optional properties:
- - clocks : phandle + phy specifier pair, for the input clock of phy.
- - clock-names : input clock name of phy, must be "phyclk".
- - assigned-clocks : phandle of usb 480m clock.
- - assigned-clock-parents : parent of usb 480m clock, select between
- usb-phy output 480m and xin24m.
- Refer to clk/clock-bindings.txt for generic clock
- consumer properties.
- - rockchip,usbgrf : phandle to the syscon managing the "usb general
- register files". When set driver will request its
- phandle as one companion-grf for some special SoCs
- (e.g RV1108).
- - extcon : phandle to the extcon device providing the cable state for
- the otg phy.
-
-Required nodes : a sub-node is required for each port the phy provides.
- The sub-node name is used to identify host or otg port,
- and shall be the following entries:
- * "otg-port" : the name of otg port.
- * "host-port" : the name of host port.
-
-Required properties (port (child) node):
- - #phy-cells : must be 0. See ./phy-bindings.txt for details.
- - interrupts : specify an interrupt for each entry in interrupt-names.
- - interrupt-names : a list which should be one of the following cases:
- Regular case:
- * "otg-id" : for the otg id interrupt.
- * "otg-bvalid" : for the otg vbus interrupt.
- * "linestate" : for the host/otg linestate interrupt.
- Some SoCs use one interrupt with the above muxed together, so for these
- * "otg-mux" : otg-port interrupt, which mux otg-id/otg-bvalid/linestate
- to one.
-
-Optional properties:
- - phy-supply : phandle to a regulator that provides power to VBUS.
- See ./phy-bindings.txt for details.
-
-Example:
-
-grf: syscon@ff770000 {
- compatible = "rockchip,rk3366-grf", "syscon", "simple-mfd";
- #address-cells = <1>;
- #size-cells = <1>;
-
-...
-
- u2phy: usb2-phy@700 {
- compatible = "rockchip,rk3366-usb2phy";
- reg = <0x700 0x2c>;
- #clock-cells = <0>;
- clock-output-names = "sclk_otgphy0_480m";
-
- u2phy_otg: otg-port {
- #phy-cells = <0>;
- interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "otg-id", "otg-bvalid", "linestate";
- };
-
- u2phy_host: host-port {
- #phy-cells = <0>;
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "linestate";
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
new file mode 100644
index 000000000000..cb71561a21b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
@@ -0,0 +1,155 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/phy-rockchip-inno-usb2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip USB2.0 phy with inno IP block
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,px30-usb2phy
+ - rockchip,rk3228-usb2phy
+ - rockchip,rk3328-usb2phy
+ - rockchip,rk3366-usb2phy
+ - rockchip,rk3399-usb2phy
+ - rockchip,rv1108-usb2phy
+
+ reg:
+ maxItems: 1
+
+ clock-output-names:
+ description:
+ The usb 480m output clock name.
+
+ "#clock-cells":
+ const: 0
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: phyclk
+
+ assigned-clocks:
+ description:
+ Phandle of the usb 480m clock.
+
+ assigned-clock-parents:
+ description:
+ Parent of the usb 480m clock.
+ Select between usb-phy output 480m and xin24m.
+ Refer to clk/clock-bindings.txt for generic clock consumer properties.
+
+ extcon:
+ description:
+ Phandle to the extcon device providing the cable state for the otg phy.
+
+ rockchip,usbgrf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the 'usb general register files'.
+ When set the driver will request its phandle as one companion-grf
+ for some special SoCs (e.g rv1108).
+
+ host-port:
+ type: object
+ additionalProperties: false
+
+ properties:
+ "#phy-cells":
+ const: 0
+
+ interrupts:
+ description: host linestate interrupt
+
+ interrupt-names:
+ const: linestate
+
+ phy-supply:
+ description:
+ Phandle to a regulator that provides power to VBUS.
+ See ./phy-bindings.txt for details.
+
+ required:
+ - "#phy-cells"
+ - interrupts
+ - interrupt-names
+
+ otg-port:
+ type: object
+ additionalProperties: false
+
+ properties:
+ "#phy-cells":
+ const: 0
+
+ interrupts:
+ minItems: 1
+ maxItems: 3
+
+ interrupt-names:
+ oneOf:
+ - const: linestate
+ - const: otg-mux
+ - items:
+ - const: otg-bvalid
+ - const: otg-id
+ - const: linestate
+
+ phy-supply:
+ description:
+ Phandle to a regulator that provides power to VBUS.
+ See ./phy-bindings.txt for details.
+
+ required:
+ - "#phy-cells"
+ - interrupts
+ - interrupt-names
+
+required:
+ - compatible
+ - reg
+ - clock-output-names
+ - "#clock-cells"
+ - "#phy-cells"
+ - host-port
+ - otg-port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3399-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ u2phy0: usb2-phy@e450 {
+ compatible = "rockchip,rk3399-usb2phy";
+ reg = <0xe450 0x10>;
+ clocks = <&cru SCLK_USB2PHY0_REF>;
+ clock-names = "phyclk";
+ clock-output-names = "clk_usbphy0_480m";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+
+ u2phy0_host: host-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "linestate";
+ };
+
+ u2phy0_otg: otg-port {
+ #phy-cells = <0>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "otg-bvalid", "otg-id", "linestate";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
index 452cee1aed32..fd1982c56104 100644
--- a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
+++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
@@ -159,6 +159,8 @@ required:
- "#reset-cells"
- ranges
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/soc/ti,sci_pm_domain.h>
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
index 135c7dfbc180..7651a675ab2d 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
@@ -57,6 +57,8 @@ patternProperties:
required:
- compatible
+additionalProperties: false
+
examples:
- |
syscon: scu@1e6e2000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index 824f7fd1d51b..36feaf5e2dff 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -70,6 +70,8 @@ required:
- compatible
- aspeed,external-nodes
+additionalProperties: false
+
examples:
- |
apb {
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index ac8d1c30a8ed..45af29bc3202 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -92,6 +92,8 @@ patternProperties:
required:
- compatible
+additionalProperties: false
+
examples:
- |
syscon: scu@1e6e2000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
index eb39f5051159..e8abbdad7b5d 100644
--- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
@@ -38,7 +38,7 @@ Bank: 3 (A, B and C)
0xffffffff 0x7fff3ccf /* pioB */
0xffffffff 0x007fffff /* pioC */
-For each peripheral/bank we will descibe in a u32 if a pin can be
+For each peripheral/bank we will describe in a u32 if a pin can be
configured in it by putting 1 to the pin bit (1 << pin)
Let's take the pioA on peripheral B
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt
deleted file mode 100644
index e4e01c05cf83..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-* Freescale IMX8MM IOMUX Controller
-
-Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
-for common binding part and usage.
-
-Required properties:
-- compatible: "fsl,imx8mm-iomuxc"
-- reg: should contain the base physical address and size of the iomuxc
- registers.
-
-Required properties in sub-nodes:
-- fsl,pins: each entry consists of 6 integers and represents the mux and config
- setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val
- input_val> are specified using a PIN_FUNC_ID macro, which can be found in
- <arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h>. The last integer CONFIG is
- the pad setting value like pull-up on this pin. Please refer to i.MX8M Mini
- Reference Manual for detailed CONFIG settings.
-
-Examples:
-
-&uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1>;
-};
-
-iomuxc: pinctrl@30330000 {
- compatible = "fsl,imx8mm-iomuxc";
- reg = <0x0 0x30330000 0x0 0x10000>;
-
- pinctrl_uart1: uart1grp {
- fsl,pins = <
- MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
- MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
- >;
- };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml
new file mode 100644
index 000000000000..d98a3866add8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,imx8mm-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale IMX8MM IOMUX Controller
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description:
+ Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+ for common binding part and usage.
+
+properties:
+ compatible:
+ const: fsl,imx8mm-iomuxc
+
+ reg:
+ maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+ 'grp$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ fsl,pins:
+ description:
+ each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg
+ mux_val input_val> are specified using a PIN_FUNC_ID macro, which can
+ be found in <arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h>. The last
+ integer CONFIG is the pad setting value like pull-up on this pin. Please
+ refer to i.MX8M Mini Reference Manual for detailed CONFIG settings.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ - items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
+
+ required:
+ - fsl,pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx8mm-iomuxc";
+ reg = <0x30330000 0x10000>;
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins =
+ <0x23C 0x4A4 0x4FC 0x0 0x0 0x140>,
+ <0x240 0x4A8 0x000 0x0 0x0 0x140>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.txt
deleted file mode 100644
index 330716c971b9..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-* Freescale IMX8MN IOMUX Controller
-
-Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
-for common binding part and usage.
-
-Required properties:
-- compatible: "fsl,imx8mn-iomuxc"
-- reg: should contain the base physical address and size of the iomuxc
- registers.
-
-Required properties in sub-nodes:
-- fsl,pins: each entry consists of 6 integers and represents the mux and config
- setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val
- input_val> are specified using a PIN_FUNC_ID macro, which can be found in
- <arch/arm64/boot/dts/freescale/imx8mn-pinfunc.h>. The last integer CONFIG is
- the pad setting value like pull-up on this pin. Please refer to i.MX8M Nano
- Reference Manual for detailed CONFIG settings.
-
-Examples:
-
-&uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1>;
-};
-
-iomuxc: pinctrl@30330000 {
- compatible = "fsl,imx8mn-iomuxc";
- reg = <0x0 0x30330000 0x0 0x10000>;
-
- pinctrl_uart1: uart1grp {
- fsl,pins = <
- MX8MN_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
- MX8MN_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
- MX8MN_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x140
- MX8MN_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x140
- MX8MN_IOMUXC_SD1_DATA4_GPIO2_IO6 0x19
- >;
- };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml
new file mode 100644
index 000000000000..b9aa180e07e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,imx8mn-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale IMX8MN IOMUX Controller
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description:
+ Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+ for common binding part and usage.
+
+properties:
+ compatible:
+ const: fsl,imx8mn-iomuxc
+
+ reg:
+ maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+ 'grp$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ fsl,pins:
+ description:
+ each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg
+ mux_val input_val> are specified using a PIN_FUNC_ID macro, which can
+ be found in <arch/arm64/boot/dts/freescale/imx8mn-pinfunc.h>. The last
+ integer CONFIG is the pad setting value like pull-up on this pin. Please
+ refer to i.MX8M Nano Reference Manual for detailed CONFIG settings.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ - items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
+
+ required:
+ - fsl,pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx8mn-iomuxc";
+ reg = <0x30330000 0x10000>;
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins =
+ <0x23C 0x4A4 0x4FC 0x0 0x0 0x140>,
+ <0x240 0x4A8 0x000 0x0 0x0 0x140>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
index 2e31e120395e..6297e78418cf 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
@@ -30,8 +30,6 @@ patternProperties:
properties:
fsl,pins:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
description:
each entry consists of 6 integers and represents the mux and config
setting for one pin. The first 5 integers <mux_reg conf_reg input_reg
@@ -39,6 +37,22 @@ patternProperties:
be found in <arch/arm64/boot/dts/freescale/imx8mp-pinfunc.h>. The last
integer CONFIG is the pad setting value like pull-up on this pin. Please
refer to i.MX8M Plus Reference Manual for detailed CONFIG settings.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ - items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
required:
- fsl,pins
@@ -59,10 +73,9 @@ examples:
reg = <0x30330000 0x10000>;
pinctrl_uart2: uart2grp {
- fsl,pins = <
- 0x228 0x488 0x5F0 0x0 0x6 0x49
- 0x228 0x488 0x000 0x0 0x0 0x49
- >;
+ fsl,pins =
+ <0x228 0x488 0x5F0 0x0 0x6 0x49>,
+ <0x228 0x488 0x000 0x0 0x0 0x49>;
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt
deleted file mode 100644
index 66de75090458..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-* Freescale IMX8MQ IOMUX Controller
-
-Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
-for common binding part and usage.
-
-Required properties:
-- compatible: "fsl,imx8mq-iomuxc"
-- reg: should contain the base physical address and size of the iomuxc
- registers.
-
-Required properties in sub-nodes:
-- fsl,pins: each entry consists of 6 integers and represents the mux and config
- setting for one pin. The first 5 integers <mux_reg conf_reg input_reg mux_val
- input_val> are specified using a PIN_FUNC_ID macro, which can be found in
- imx8mq-pinfunc.h under device tree source folder. The last integer CONFIG is
- the pad setting value like pull-up on this pin. Please refer to i.MX8M Quad
- Reference Manual for detailed CONFIG settings.
-
-Examples:
-
-&uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart1>;
-};
-
-iomuxc: pinctrl@30330000 {
- compatible = "fsl,imx8mq-iomuxc";
- reg = <0x0 0x30330000 0x0 0x10000>;
-
- pinctrl_uart1: uart1grp {
- fsl,pins = <
- MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x49
- MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX 0x49
- >;
- };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml
new file mode 100644
index 000000000000..b30c704fcfa1
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,imx8mq-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale IMX8MQ IOMUX Controller
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description:
+ Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+ for common binding part and usage.
+
+properties:
+ compatible:
+ const: fsl,imx8mq-iomuxc
+
+ reg:
+ maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+ 'grp$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ fsl,pins:
+ description:
+ each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg
+ mux_val input_val> are specified using a PIN_FUNC_ID macro, which can
+ be found in <arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h>. The last
+ integer CONFIG is the pad setting value like pull-up on this pin. Please
+ refer to i.MX8M Quad Reference Manual for detailed CONFIG settings.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ - items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
+
+ required:
+ - fsl,pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx8mq-iomuxc";
+ reg = <0x30330000 0x10000>;
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins =
+ <0x234 0x49C 0x4F4 0x0 0x0 0x49>,
+ <0x238 0x4A0 0x4F4 0x0 0x0 0x49>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
new file mode 100644
index 000000000000..63d1cfe86c6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
@@ -0,0 +1,153 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,ipq6018-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. IPQ6018 TLMM block
+
+maintainers:
+ - Sricharan R <sricharan@codeaurora.org>
+
+description: |
+ This binding describes the Top Level Mode Multiplexer block found in the
+ IPQ6018 platform.
+
+properties:
+ compatible:
+ const: qcom,ipq6018-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: Specifies the TLMM summary IRQ
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description:
+ Specifies the PIN numbers and Flags, as defined in defined in
+ include/dt-bindings/interrupt-controller/irq.h
+ const: 2
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description: Specifying the pin number and flags, as defined in
+ include/dt-bindings/gpio/gpio.h
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+#PIN CONFIGURATION NODES
+patternProperties:
+ '-pinmux$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ allOf:
+ - $ref: "/schemas/pinctrl/pincfg-node.yaml"
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([1-9]|[1-7][0-9]|80)$"
+ - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd,
+ sdc2_data, qdsd_cmd, qdsd_data0, qdsd_data1, qdsd_data2,
+ qdsd_data3 ]
+ minItems: 1
+ maxItems: 4
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ adsp_ext, alsp_int, atest_bbrx0, atest_bbrx1, atest_char,
+ atest_char0, atest_char1, atest_char2, atest_char3, atest_combodac,
+ atest_gpsadc0, atest_gpsadc1, atest_tsens, atest_wlan0,
+ atest_wlan1, backlight_en, bimc_dte0, bimc_dte1, blsp1_i2c,
+ blsp2_i2c, blsp3_i2c, blsp4_i2c, blsp5_i2c, blsp6_i2c, blsp1_spi,
+ blsp1_spi_cs1, blsp1_spi_cs2, blsp1_spi_cs3, blsp2_spi,
+ blsp2_spi_cs1, blsp2_spi_cs2, blsp2_spi_cs3, blsp3_spi,
+ blsp3_spi_cs1, blsp3_spi_cs2, blsp3_spi_cs3, blsp4_spi, blsp5_spi,
+ blsp6_spi, blsp1_uart, blsp2_uart, blsp1_uim, blsp2_uim, cam1_rst,
+ cam1_standby, cam_mclk0, cam_mclk1, cci_async, cci_i2c, cci_timer0,
+ cci_timer1, cci_timer2, cdc_pdm0, codec_mad, dbg_out, display_5v,
+ dmic0_clk, dmic0_data, dsi_rst, ebi0_wrcdc, euro_us, ext_lpass,
+ flash_strobe, gcc_gp1_clk_a, gcc_gp1_clk_b, gcc_gp2_clk_a,
+ gcc_gp2_clk_b, gcc_gp3_clk_a, gcc_gp3_clk_b, gpio, gsm0_tx0,
+ gsm0_tx1, gsm1_tx0, gsm1_tx1, gyro_accl, kpsns0, kpsns1, kpsns2,
+ ldo_en, ldo_update, mag_int, mdp_vsync, modem_tsync, m_voc,
+ nav_pps, nav_tsync, pa_indicator, pbs0, pbs1, pbs2, pri_mi2s,
+ pri_mi2s_ws, prng_rosc, pwr_crypto_enabled_a, pwr_crypto_enabled_b,
+ pwr_modem_enabled_a, pwr_modem_enabled_b, pwr_nav_enabled_a,
+ pwr_nav_enabled_b, qdss_ctitrig_in_a0, qdss_ctitrig_in_a1,
+ qdss_ctitrig_in_b0, qdss_ctitrig_in_b1, qdss_ctitrig_out_a0,
+ qdss_ctitrig_out_a1, qdss_ctitrig_out_b0, qdss_ctitrig_out_b1,
+ qdss_traceclk_a, qdss_traceclk_b, qdss_tracectl_a, qdss_tracectl_b,
+ qdss_tracedata_a, qdss_tracedata_b, reset_n, sd_card, sd_write,
+ sec_mi2s, smb_int, ssbi_wtr0, ssbi_wtr1, uim1, uim2, uim3,
+ uim_batt, wcss_bt, wcss_fm, wcss_wlan, webcam1_rst ]
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+ default: 2
+ description:
+ Selects the drive strength for the specified pins, in mA.
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,ipq6018-pinctrl";
+ reg = <0x01000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 80>;
+
+ serial3-pinmux {
+ pins = "gpio44", "gpio45";
+ function = "blsp2_uart";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.txt
deleted file mode 100644
index 8173b12138ad..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-UniPhier SoCs pin controller
-
-Required properties:
-- compatible: should be one of the following:
- "socionext,uniphier-ld4-pinctrl" - for LD4 SoC
- "socionext,uniphier-pro4-pinctrl" - for Pro4 SoC
- "socionext,uniphier-sld8-pinctrl" - for sLD8 SoC
- "socionext,uniphier-pro5-pinctrl" - for Pro5 SoC
- "socionext,uniphier-pxs2-pinctrl" - for PXs2 SoC
- "socionext,uniphier-ld6b-pinctrl" - for LD6b SoC
- "socionext,uniphier-ld11-pinctrl" - for LD11 SoC
- "socionext,uniphier-ld20-pinctrl" - for LD20 SoC
- "socionext,uniphier-pxs3-pinctrl" - for PXs3 SoC
-
-Note:
-The UniPhier pinctrl should be a subnode of a "syscon" compatible node.
-
-Example:
- soc-glue@5f800000 {
- compatible = "socionext,uniphier-pro4-soc-glue",
- "simple-mfd", "syscon";
- reg = <0x5f800000 0x2000>;
-
- pinctrl: pinctrl {
- compatible = "socionext,uniphier-pro4-pinctrl";
- };
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml
new file mode 100644
index 000000000000..f8a93d8680f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/socionext,uniphier-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier SoCs pin controller
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ $nodename:
+ pattern: "pinctrl"
+
+ compatible:
+ enum:
+ - socionext,uniphier-ld4-pinctrl
+ - socionext,uniphier-pro4-pinctrl
+ - socionext,uniphier-sld8-pinctrl
+ - socionext,uniphier-pro5-pinctrl
+ - socionext,uniphier-pxs2-pinctrl
+ - socionext,uniphier-ld6b-pinctrl
+ - socionext,uniphier-ld11-pinctrl
+ - socionext,uniphier-ld20-pinctrl
+ - socionext,uniphier-pxs3-pinctrl
+
+required:
+ - compatible
+
+examples:
+ - |
+ // The UniPhier pinctrl should be a subnode of a "syscon" compatible node.
+
+ soc-glue@5f800000 {
+ compatible = "socionext,uniphier-pro4-soc-glue", "simple-mfd", "syscon";
+ reg = <0x5f800000 0x2000>;
+
+ pinctrl: pinctrl {
+ compatible = "socionext,uniphier-pro4-pinctrl";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index ef4de32cb17c..46a0478cb924 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -194,6 +194,8 @@ required:
- ranges
- pins-are-numbered
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/pinctrl/stm32-pinfunc.h>
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
index d3098c924b25..6c6079fe1351 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
@@ -68,6 +68,8 @@ required:
- "#power-domain-cells"
- amlogic,ao-sysctrl
+additionalProperties: false
+
examples:
- |
pwrc: power-controller {
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
new file mode 100644
index 000000000000..bc4e037f3f73
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-sec-pwrc.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+# Copyright (c) 2019 Amlogic, Inc
+# Author: Jianxin Pan <jianxin.pan@amlogic.com>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/amlogic,meson-sec-pwrc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Meson Secure Power Domains
+
+maintainers:
+ - Jianxin Pan <jianxin.pan@amlogic.com>
+
+description: |+
+ Secure Power Domains used in Meson A1/C1 SoCs, and should be the child node
+ of secure-monitor.
+
+properties:
+ compatible:
+ enum:
+ - amlogic,meson-a1-pwrc
+
+ "#power-domain-cells":
+ const: 1
+
+required:
+ - compatible
+ - "#power-domain-cells"
+
+examples:
+ - |
+ secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
+
+ pwrc: power-controller {
+ compatible = "amlogic,meson-a1-pwrc";
+ #power-domain-cells = <1>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/power/power-domain.yaml b/Documentation/devicetree/bindings/power/power-domain.yaml
index 6047aacd7766..ff5936e4a215 100644
--- a/Documentation/devicetree/bindings/power/power-domain.yaml
+++ b/Documentation/devicetree/bindings/power/power-domain.yaml
@@ -114,18 +114,18 @@ examples:
domain-idle-states = <&DOMAIN_PWR_DN>;
};
- DOMAIN_RET: state@0 {
- compatible = "domain-idle-state";
- reg = <0x0 0x0>;
- entry-latency-us = <1000>;
- exit-latency-us = <2000>;
- min-residency-us = <10000>;
- };
-
- DOMAIN_PWR_DN: state@1 {
- compatible = "domain-idle-state";
- reg = <0x1 0x0>;
- entry-latency-us = <5000>;
- exit-latency-us = <8000>;
- min-residency-us = <7000>;
+ domain-idle-states {
+ DOMAIN_RET: domain-retention {
+ compatible = "domain-idle-state";
+ entry-latency-us = <1000>;
+ exit-latency-us = <2000>;
+ min-residency-us = <10000>;
+ };
+
+ DOMAIN_PWR_DN: domain-pwr-dn {
+ compatible = "domain-idle-state";
+ entry-latency-us = <5000>;
+ exit-latency-us = <8000>;
+ min-residency-us = <7000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/power/renesas,apmu.txt b/Documentation/devicetree/bindings/power/renesas,apmu.txt
deleted file mode 100644
index 5f24586c8cf3..000000000000
--- a/Documentation/devicetree/bindings/power/renesas,apmu.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-DT bindings for the Renesas Advanced Power Management Unit
-
-Renesas R-Car and RZ/G1 SoCs utilize one or more APMU hardware units
-for CPU core power domain control including SMP boot and CPU Hotplug.
-
-Required properties:
-
-- compatible: Should be "renesas,<soctype>-apmu", "renesas,apmu" as fallback.
- Examples with soctypes are:
- - "renesas,r8a7743-apmu" (RZ/G1M)
- - "renesas,r8a7744-apmu" (RZ/G1N)
- - "renesas,r8a7745-apmu" (RZ/G1E)
- - "renesas,r8a77470-apmu" (RZ/G1C)
- - "renesas,r8a7790-apmu" (R-Car H2)
- - "renesas,r8a7791-apmu" (R-Car M2-W)
- - "renesas,r8a7792-apmu" (R-Car V2H)
- - "renesas,r8a7793-apmu" (R-Car M2-N)
- - "renesas,r8a7794-apmu" (R-Car E2)
-
-- reg: Base address and length of the I/O registers used by the APMU.
-
-- cpus: This node contains a list of CPU cores, which should match the order
- of CPU cores used by the WUPCR and PSTR registers in the Advanced Power
- Management Unit section of the device's datasheet.
-
-
-Example:
-
-This shows the r8a7791 APMU that can control CPU0 and CPU1.
-
- apmu@e6152000 {
- compatible = "renesas,r8a7791-apmu", "renesas,apmu";
- reg = <0 0xe6152000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
- };
diff --git a/Documentation/devicetree/bindings/power/renesas,apmu.yaml b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
new file mode 100644
index 000000000000..078b2cb40fe3
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/renesas,apmu.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas Advanced Power Management Unit
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Magnus Damm <magnus.damm@gmail.com>
+
+description:
+ Renesas R-Car Gen2 and RZ/G1 SoCs utilize one or more APMU hardware units for
+ CPU core power domain control including SMP boot and CPU Hotplug.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r8a7743-apmu # RZ/G1M
+ - renesas,r8a7744-apmu # RZ/G1N
+ - renesas,r8a7745-apmu # RZ/G1E
+ - renesas,r8a77470-apmu # RZ/G1C
+ - renesas,r8a7790-apmu # R-Car H2
+ - renesas,r8a7791-apmu # R-Car M2-W
+ - renesas,r8a7792-apmu # R-Car V2H
+ - renesas,r8a7793-apmu # R-Car M2-N
+ - renesas,r8a7794-apmu # R-Car E2
+ - const: renesas,apmu
+
+ reg:
+ maxItems: 1
+
+ cpus:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ Array of phandles pointing to CPU cores, which should match the order of
+ CPU cores used by the WUPCR and PSTR registers in the Advanced Power
+ Management Unit section of the device's datasheet.
+
+required:
+ - compatible
+ - reg
+ - cpus
+
+additionalProperties: false
+
+examples:
+ - |
+ apmu@e6152000 {
+ compatible = "renesas,r8a7791-apmu", "renesas,apmu";
+ reg = <0xe6152000 0x188>;
+ cpus = <&cpu0 &cpu1>;
+ };
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
deleted file mode 100644
index acb41fade926..000000000000
--- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-DT bindings for the Renesas R-Car (RZ/G) System Controller
-
-== System Controller Node ==
-
-The R-Car (RZ/G) System Controller provides power management for the CPU cores
-and various coprocessors.
-
-Required properties:
- - compatible: Must contain exactly one of the following:
- - "renesas,r8a7743-sysc" (RZ/G1M)
- - "renesas,r8a7744-sysc" (RZ/G1N)
- - "renesas,r8a7745-sysc" (RZ/G1E)
- - "renesas,r8a77470-sysc" (RZ/G1C)
- - "renesas,r8a774a1-sysc" (RZ/G2M)
- - "renesas,r8a774b1-sysc" (RZ/G2N)
- - "renesas,r8a774c0-sysc" (RZ/G2E)
- - "renesas,r8a7779-sysc" (R-Car H1)
- - "renesas,r8a7790-sysc" (R-Car H2)
- - "renesas,r8a7791-sysc" (R-Car M2-W)
- - "renesas,r8a7792-sysc" (R-Car V2H)
- - "renesas,r8a7793-sysc" (R-Car M2-N)
- - "renesas,r8a7794-sysc" (R-Car E2)
- - "renesas,r8a7795-sysc" (R-Car H3)
- - "renesas,r8a7796-sysc" (R-Car M3-W)
- - "renesas,r8a77961-sysc" (R-Car M3-W+)
- - "renesas,r8a77965-sysc" (R-Car M3-N)
- - "renesas,r8a77970-sysc" (R-Car V3M)
- - "renesas,r8a77980-sysc" (R-Car V3H)
- - "renesas,r8a77990-sysc" (R-Car E3)
- - "renesas,r8a77995-sysc" (R-Car D3)
- - reg: Address start and address range for the device.
- - #power-domain-cells: Must be 1.
-
-
-Example:
-
- sysc: system-controller@e6180000 {
- compatible = "renesas,r8a7791-sysc";
- reg = <0 0xe6180000 0 0x0200>;
- #power-domain-cells = <1>;
- };
-
-
-== PM Domain Consumers ==
-
-Devices residing in a power area must refer to that power area, as documented
-by the generic PM domain bindings in
-Documentation/devicetree/bindings/power/power_domain.txt.
-
-Required properties:
- - power-domains: A phandle and symbolic PM domain specifier, as defined in
- <dt-bindings/power/r8a77*-sysc.h>.
-
-
-Example:
-
- L2_CA15: cache-controller@0 {
- compatible = "cache";
- power-domains = <&sysc R8A7791_PD_CA15_SCU>;
- cache-unified;
- cache-level = <2>;
- };
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
new file mode 100644
index 000000000000..e59331e1d944
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/renesas,rcar-sysc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas R-Car and RZ/G System Controller
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Magnus Damm <magnus.damm@gmail.com>
+
+description:
+ The R-Car (RZ/G) System Controller provides power management for the CPU
+ cores and various coprocessors.
+
+properties:
+ compatible:
+ enum:
+ - renesas,r8a7743-sysc # RZ/G1M
+ - renesas,r8a7744-sysc # RZ/G1N
+ - renesas,r8a7745-sysc # RZ/G1E
+ - renesas,r8a77470-sysc # RZ/G1C
+ - renesas,r8a774a1-sysc # RZ/G2M
+ - renesas,r8a774b1-sysc # RZ/G2N
+ - renesas,r8a774c0-sysc # RZ/G2E
+ - renesas,r8a7779-sysc # R-Car H1
+ - renesas,r8a7790-sysc # R-Car H2
+ - renesas,r8a7791-sysc # R-Car M2-W
+ - renesas,r8a7792-sysc # R-Car V2H
+ - renesas,r8a7793-sysc # R-Car M2-N
+ - renesas,r8a7794-sysc # R-Car E2
+ - renesas,r8a7795-sysc # R-Car H3
+ - renesas,r8a77961-sysc # R-Car M3-W+
+ - renesas,r8a77965-sysc # R-Car M3-N
+ - renesas,r8a7796-sysc # R-Car M3-W
+ - renesas,r8a77970-sysc # R-Car V3M
+ - renesas,r8a77980-sysc # R-Car V3H
+ - renesas,r8a77990-sysc # R-Car E3
+ - renesas,r8a77995-sysc # R-Car D3
+
+ reg:
+ maxItems: 1
+
+ '#power-domain-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ // System Controller node
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,r8a7791-sysc";
+ reg = <0xe6180000 0x0200>;
+ #power-domain-cells = <1>;
+ };
+
+ - |
+ // Power Domain consumers
+ #include <dt-bindings/power/r8a7791-sysc.h>
+
+ cache-controller-0 {
+ compatible = "cache";
+ power-domains = <&sysc R8A7791_PD_CA15_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml b/Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml
index 520e07e6f21b..3412fe7e1e80 100644
--- a/Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml
+++ b/Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml
@@ -41,6 +41,8 @@ required:
- regmap
- offset
+additionalProperties: false
+
allOf:
- if:
not:
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
index d38006b1f1f4..b80772cb9f06 100644
--- a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
+++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
@@ -41,6 +41,8 @@ required:
- regmap
- offset
+additionalProperties: false
+
allOf:
- if:
not:
diff --git a/Documentation/devicetree/bindings/power/supply/max77650-charger.yaml b/Documentation/devicetree/bindings/power/supply/max77650-charger.yaml
index deef010ec535..62eeddb65aed 100644
--- a/Documentation/devicetree/bindings/power/supply/max77650-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/max77650-charger.yaml
@@ -32,3 +32,6 @@ properties:
required:
- compatible
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml b/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml
index 9e21b83d717e..239b49fad805 100644
--- a/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml
+++ b/Documentation/devicetree/bindings/ptp/ptp-idtcm.yaml
@@ -55,6 +55,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
i2c@1 {
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
index 95536d83c5f2..29adff59c479 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
@@ -19,10 +19,15 @@ Required properties:
- "pwm1-8": the eight per PWM clocks for mt2712
- "pwm1-6": the six per PWM clocks for mt7622
- "pwm1-5": the five per PWM clocks for mt7623
+ - "pwm1" : the PWM1 clock for mt7629
- pinctrl-names: Must contain a "default" entry.
- pinctrl-0: One property must exist for each entry in pinctrl-names.
See pinctrl/pinctrl-bindings.txt for details of the property values.
+Optional properties:
+- assigned-clocks: Reference to the PWM clock entries.
+- assigned-clock-parents: The phandle of the parent clock of PWM clock.
+
Example:
pwm0: pwm@11006000 {
compatible = "mediatek,mt7623-pwm";
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
index 4969a954993c..4bf62a3d5bba 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
@@ -19,6 +19,10 @@ properties:
- renesas,tpu-r8a7744 # RZ/G1N
- renesas,tpu-r8a7745 # RZ/G1E
- renesas,tpu-r8a7790 # R-Car H2
+ - renesas,tpu-r8a7791 # R-Car M2-W
+ - renesas,tpu-r8a7792 # R-Car V2H
+ - renesas,tpu-r8a7793 # R-Car M2-N
+ - renesas,tpu-r8a7794 # R-Car E2
- renesas,tpu-r8a7795 # R-Car H3
- renesas,tpu-r8a7796 # R-Car M3-W
- renesas,tpu-r8a77965 # R-Car M3-N
diff --git a/Documentation/devicetree/bindings/regulator/max77650-regulator.yaml b/Documentation/devicetree/bindings/regulator/max77650-regulator.yaml
index 7d724159f890..ce0a4021ae7f 100644
--- a/Documentation/devicetree/bindings/regulator/max77650-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/max77650-regulator.yaml
@@ -24,8 +24,11 @@ properties:
const: maxim,max77650-regulator
patternProperties:
- "^regulator@[0-3]$":
+ "^regulator-(ldo|sbb[0-2])$":
$ref: "regulator.yaml#"
required:
- compatible
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt
deleted file mode 100644
index 6189df71ea98..000000000000
--- a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-STMicroelectronics STPMIC1 Voltage regulators
-
-Regulator Nodes are optional depending on needs.
-
-Available Regulators in STPMIC1 device are:
- - buck1 for Buck BUCK1
- - buck2 for Buck BUCK2
- - buck3 for Buck BUCK3
- - buck4 for Buck BUCK4
- - ldo1 for LDO LDO1
- - ldo2 for LDO LDO2
- - ldo3 for LDO LDO3
- - ldo4 for LDO LDO4
- - ldo5 for LDO LDO5
- - ldo6 for LDO LDO6
- - vref_ddr for LDO Vref DDR
- - boost for Buck BOOST
- - pwr_sw1 for VBUS_OTG switch
- - pwr_sw2 for SW_OUT switch
-
-Switches are fixed voltage regulators with only enable/disable capability.
-
-Optional properties:
-- st,mask-reset: mask reset for this regulator: the regulator configuration
- is maintained during pmic reset.
-- regulator-over-current-protection:
- if set, all regulators are switched off in case of over-current detection
- on this regulator,
- if not set, the driver only sends an over-current event.
-- interrupts: index of current limit detection interrupt
-- <regulator>-supply: phandle to the parent supply/regulator node
- each regulator supply can be described except vref_ddr.
-- regulator-active-discharge: can be used on pwr_sw1 and pwr_sw2.
-
-Example:
-regulators {
- compatible = "st,stpmic1-regulators";
-
- ldo6-supply = <&v3v3>;
-
- vdd_core: buck1 {
- regulator-name = "vdd_core";
- interrupts = <IT_CURLIM_BUCK1 0>;
- st,mask-reset;
- regulator-pull-down;
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1200000>;
- };
-
- v3v3: buck4 {
- regulator-name = "v3v3";
- interrupts = <IT_CURLIM_BUCK4 0>;
-
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- v1v8: ldo6 {
- regulator-name = "v1v8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-over-current-protection;
- };
-};
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml
new file mode 100644
index 000000000000..084960a8f17a
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/ti,omap-remoteproc.yaml
@@ -0,0 +1,324 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/ti,omap-remoteproc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OMAP4+ Remoteproc Devices
+
+maintainers:
+ - Suman Anna <s-anna@ti.com>
+
+description:
+ The OMAP family of SoCs usually have one or more slave processor sub-systems
+ that are used to offload some of the processor-intensive tasks, or to manage
+ other hardware accelerators, for achieving various system level goals.
+
+ The processor cores in the sub-system are usually behind an IOMMU, and may
+ contain additional sub-modules like Internal RAM and/or ROMs, L1 and/or L2
+ caches, an Interrupt Controller, a Cache Controller etc.
+
+ The OMAP SoCs usually have a DSP processor sub-system and/or an IPU processor
+ sub-system. The DSP processor sub-system can contain any of the TI's C64x,
+ C66x or C67x family of DSP cores as the main execution unit. The IPU processor
+ sub-system usually contains either a Dual-Core Cortex-M3 or Dual-Core
+ Cortex-M4 processors.
+
+ Each remote processor sub-system is represented as a single DT node. Each node
+ has a number of required or optional properties that enable the OS running on
+ the host processor (MPU) to perform the device management of the remote
+ processor and to communicate with the remote processor. The various properties
+ can be classified as constant or variable. The constant properties are
+ dictated by the SoC and does not change from one board to another having the
+ same SoC. Examples of constant properties include 'iommus', 'reg'. The
+ variable properties are dictated by the system integration aspects such as
+ memory on the board, or configuration used within the corresponding firmware
+ image. Examples of variable properties include 'mboxes', 'memory-region',
+ 'timers', 'watchdog-timers' etc.
+
+properties:
+ compatible:
+ enum:
+ - ti,omap4-dsp
+ - ti,omap5-dsp
+ - ti,dra7-dsp
+ - ti,omap4-ipu
+ - ti,omap5-ipu
+ - ti,dra7-ipu
+
+ iommus:
+ minItems: 1
+ maxItems: 2
+ description: |
+ phandles to OMAP IOMMU nodes, that need to be programmed
+ for this remote processor to access any external RAM memory or
+ other peripheral device address spaces. This property usually
+ has only a single phandle. Multiple phandles are used only in
+ cases where the sub-system has different ports for different
+ sub-modules within the processor sub-system (eg: DRA7 DSPs),
+ and need the same programming in both the MMUs.
+
+ mboxes:
+ minItems: 1
+ maxItems: 2
+ description: |
+ OMAP Mailbox specifier denoting the sub-mailbox, to be used for
+ communication with the remote processor. The specifier format is
+ as per the bindings,
+ Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
+ This property should match with the sub-mailbox node used in
+ the firmware image.
+
+ clocks:
+ description: |
+ Main functional clock for the remote processor
+
+ resets:
+ description: |
+ Reset handles for the remote processor
+
+ firmware-name:
+ description: |
+ Default name of the firmware to load to the remote processor.
+
+# Optional properties:
+# --------------------
+# Some of these properties are mandatory on some SoCs, and some are optional
+# depending on the configuration of the firmware image to be executed on the
+# remote processor. The conditions are mentioned for each property.
+#
+# The following are the optional properties:
+
+ memory-region:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: |
+ phandle to the reserved memory node to be associated
+ with the remoteproc device. The reserved memory node
+ can be a CMA memory node, and should be defined as
+ per the bindings,
+ Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+
+ reg:
+ description: |
+ Address space for any remoteproc memories present on
+ the SoC. Should contain an entry for each value in
+ 'reg-names'. These are mandatory for all DSP and IPU
+ processors that have them (OMAP4/OMAP5 DSPs do not have
+ any RAMs)
+
+ reg-names:
+ description: |
+ Required names for each of the address spaces defined in
+ the 'reg' property. Expects the names from the following
+ list, in the specified order, each representing the corresponding
+ internal RAM memory region.
+ minItems: 1
+ maxItems: 3
+ items:
+ - const: l2ram
+ - const: l1pram
+ - const: l1dram
+
+ ti,bootreg:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ Should be a triple of the phandle to the System Control
+ Configuration region that contains the boot address
+ register, the register offset of the boot address
+ register within the System Control module, and the bit
+ shift within the register. This property is required for
+ all the DSP instances on OMAP4, OMAP5 and DRA7xx SoCs.
+
+ ti,autosuspend-delay-ms:
+ description: |
+ Custom autosuspend delay for the remoteproc in milliseconds.
+ Recommended values is preferable to be in the order of couple
+ of seconds. A negative value can also be used to disable the
+ autosuspend behavior.
+
+ ti,timers:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ One or more phandles to OMAP DMTimer nodes, that serve
+ as System/Tick timers for the OS running on the remote
+ processors. This will usually be a single timer if the
+ processor sub-system is running in SMP mode, or one per
+ core in the processor sub-system. This can also be used
+ to reserve specific timers to be dedicated to the
+ remote processors.
+
+ This property is mandatory on remote processors requiring
+ external tick wakeup, and to support Power Management
+ features. The timers to be used should match with the
+ timers used in the firmware image.
+
+ ti,watchdog-timers:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ One or more phandles to OMAP DMTimer nodes, used to
+ serve as Watchdog timers for the processor cores. This
+ will usually be one per executing processor core, even
+ if the processor sub-system is running a SMP OS.
+
+ The timers to be used should match with the watchdog
+ timers used in the firmware image.
+
+if:
+ properties:
+ compatible:
+ enum:
+ - ti,dra7-dsp
+then:
+ properties:
+ reg:
+ minItems: 3
+ maxItems: 3
+ required:
+ - reg
+ - reg-names
+ - ti,bootreg
+
+else:
+ if:
+ properties:
+ compatible:
+ enum:
+ - ti,omap4-ipu
+ - ti,omap5-ipu
+ - ti,dra7-ipu
+ then:
+ properties:
+ reg:
+ minItems: 1
+ maxItems: 1
+ ti,bootreg: false
+ required:
+ - reg
+ - reg-names
+
+ else:
+ properties:
+ reg: false
+ required:
+ - ti,bootreg
+
+required:
+ - compatible
+ - iommus
+ - mboxes
+ - clocks
+ - resets
+ - firmware-name
+
+additionalProperties: false
+
+examples:
+ - |
+
+ //Example 1: OMAP4 DSP
+
+ /* DSP Reserved Memory node */
+ #include <dt-bindings/clock/omap4.h>
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ dsp_memory_region: dsp-memory@98000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x98000000 0x800000>;
+ reusable;
+ };
+ };
+
+ /* DSP node */
+ ocp {
+ dsp: dsp {
+ compatible = "ti,omap4-dsp";
+ ti,bootreg = <&scm_conf 0x304 0>;
+ iommus = <&mmu_dsp>;
+ mboxes = <&mailbox &mbox_dsp>;
+ memory-region = <&dsp_memory_region>;
+ ti,timers = <&timer5>;
+ ti,watchdog-timers = <&timer6>;
+ clocks = <&tesla_clkctrl OMAP4_DSP_CLKCTRL 0>;
+ resets = <&prm_tesla 0>, <&prm_tesla 1>;
+ firmware-name = "omap4-dsp-fw.xe64T";
+ };
+ };
+
+ - |+
+
+ //Example 2: OMAP5 IPU
+
+ /* IPU Reserved Memory node */
+ #include <dt-bindings/clock/omap5.h>
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ipu_memory_region: ipu-memory@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x95800000 0 0x3800000>;
+ reusable;
+ };
+ };
+
+ /* IPU node */
+ ocp {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ipu: ipu@55020000 {
+ compatible = "ti,omap5-ipu";
+ reg = <0x55020000 0x10000>;
+ reg-names = "l2ram";
+ iommus = <&mmu_ipu>;
+ mboxes = <&mailbox &mbox_ipu>;
+ memory-region = <&ipu_memory_region>;
+ ti,timers = <&timer3>, <&timer4>;
+ ti,watchdog-timers = <&timer9>, <&timer11>;
+ clocks = <&ipu_clkctrl OMAP5_MMU_IPU_CLKCTRL 0>;
+ resets = <&prm_core 2>;
+ firmware-name = "omap5-ipu-fw.xem4";
+ };
+ };
+
+ - |+
+
+ //Example 3: DRA7xx/AM57xx DSP
+
+ /* DSP1 Reserved Memory node */
+ #include <dt-bindings/clock/dra7.h>
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ dsp1_memory_region: dsp1-memory@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ };
+ };
+
+ /* DSP1 node */
+ ocp {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ dsp1: dsp@40800000 {
+ compatible = "ti,dra7-dsp";
+ reg = <0x40800000 0x48000>,
+ <0x40e00000 0x8000>,
+ <0x40f00000 0x8000>;
+ reg-names = "l2ram", "l1pram", "l1dram";
+ ti,bootreg = <&scm_conf 0x55c 0>;
+ iommus = <&mmu0_dsp1>, <&mmu1_dsp1>;
+ mboxes = <&mailbox5 &mbox_dsp1_ipc3x>;
+ memory-region = <&dsp1_memory_region>;
+ ti,timers = <&timer5>;
+ ti,watchdog-timers = <&timer10>;
+ resets = <&prm_dsp1 0>;
+ clocks = <&dsp1_clkctrl DRA7_DSP1_MMU0_DSP1_CLKCTRL 0>;
+ firmware-name = "dra7-dsp1-fw.xe66";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
index b3f57d81f007..92922d3afd14 100644
--- a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
@@ -29,6 +29,8 @@ required:
- reg
- "#reset-cells"
+additionalProperties: false
+
examples:
- |
reset-controller@c884404 {
diff --git a/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml b/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml
index 411bd76f1b64..512a33bdb208 100644
--- a/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml
+++ b/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml
@@ -28,6 +28,8 @@ required:
- reg
- "#reset-cells"
+additionalProperties: false
+
examples:
- |
reset-controller@8b2c800 {
diff --git a/Documentation/devicetree/bindings/reset/renesas,rst.txt b/Documentation/devicetree/bindings/reset/renesas,rst.txt
deleted file mode 100644
index de7f06ccd003..000000000000
--- a/Documentation/devicetree/bindings/reset/renesas,rst.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-DT bindings for the Renesas R-Car and RZ/G Reset Controllers
-
-The R-Car and RZ/G Reset Controllers provide reset control, and implement the
-following functions:
- - Latching of the levels on mode pins when PRESET# is negated,
- - Mode monitoring register,
- - Reset control of peripheral devices (on R-Car Gen1),
- - Watchdog timer (on R-Car Gen1),
- - Register-based reset control and boot address registers for the various CPU
- cores (on R-Car Gen2 and Gen3, and on RZ/G).
-
-
-Required properties:
- - compatible: Should be
- - "renesas,<soctype>-reset-wdt" for R-Car Gen1,
- - "renesas,<soctype>-rst" for R-Car Gen2 and Gen3, and RZ/G
- Examples with soctypes are:
- - "renesas,r8a7743-rst" (RZ/G1M)
- - "renesas,r8a7744-rst" (RZ/G1N)
- - "renesas,r8a7745-rst" (RZ/G1E)
- - "renesas,r8a77470-rst" (RZ/G1C)
- - "renesas,r8a774a1-rst" (RZ/G2M)
- - "renesas,r8a774b1-rst" (RZ/G2N)
- - "renesas,r8a774c0-rst" (RZ/G2E)
- - "renesas,r8a7778-reset-wdt" (R-Car M1A)
- - "renesas,r8a7779-reset-wdt" (R-Car H1)
- - "renesas,r8a7790-rst" (R-Car H2)
- - "renesas,r8a7791-rst" (R-Car M2-W)
- - "renesas,r8a7792-rst" (R-Car V2H
- - "renesas,r8a7793-rst" (R-Car M2-N)
- - "renesas,r8a7794-rst" (R-Car E2)
- - "renesas,r8a7795-rst" (R-Car H3)
- - "renesas,r8a7796-rst" (R-Car M3-W)
- - "renesas,r8a77961-rst" (R-Car M3-W+)
- - "renesas,r8a77965-rst" (R-Car M3-N)
- - "renesas,r8a77970-rst" (R-Car V3M)
- - "renesas,r8a77980-rst" (R-Car V3H)
- - "renesas,r8a77990-rst" (R-Car E3)
- - "renesas,r8a77995-rst" (R-Car D3)
- - reg: Address start and address range for the device.
-
-
-Example:
-
- rst: reset-controller@e6160000 {
- compatible = "renesas,r8a7795-rst";
- reg = <0 0xe6160000 0 0x0200>;
- };
diff --git a/Documentation/devicetree/bindings/reset/renesas,rst.yaml b/Documentation/devicetree/bindings/reset/renesas,rst.yaml
new file mode 100644
index 000000000000..b5de1d196a13
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/renesas,rst.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reset/renesas,rst.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas R-Car and RZ/G Reset Controller
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Magnus Damm <magnus.damm@gmail.com>
+
+description: |
+ The R-Car and RZ/G Reset Controllers provide reset control, and implement the
+ following functions:
+ - Latching of the levels on mode pins when PRESET# is negated,
+ - Mode monitoring register,
+ - Reset control of peripheral devices (on R-Car Gen1),
+ - Watchdog timer (on R-Car Gen1),
+ - Register-based reset control and boot address registers for the various
+ CPU cores (on R-Car Gen2 and Gen3, and on RZ/G).
+
+properties:
+ compatible:
+ enum:
+ - renesas,r8a7743-rst # RZ/G1M
+ - renesas,r8a7744-rst # RZ/G1N
+ - renesas,r8a7745-rst # RZ/G1E
+ - renesas,r8a77470-rst # RZ/G1C
+ - renesas,r8a774a1-rst # RZ/G2M
+ - renesas,r8a774b1-rst # RZ/G2N
+ - renesas,r8a774c0-rst # RZ/G2E
+ - renesas,r8a7778-reset-wdt # R-Car M1A
+ - renesas,r8a7779-reset-wdt # R-Car H1
+ - renesas,r8a7790-rst # R-Car H2
+ - renesas,r8a7791-rst # R-Car M2-W
+ - renesas,r8a7792-rst # R-Car V2H
+ - renesas,r8a7793-rst # R-Car M2-N
+ - renesas,r8a7794-rst # R-Car E2
+ - renesas,r8a7795-rst # R-Car H3
+ - renesas,r8a7796-rst # R-Car M3-W
+ - renesas,r8a77961-rst # R-Car M3-W+
+ - renesas,r8a77965-rst # R-Car M3-N
+ - renesas,r8a77970-rst # R-Car V3M
+ - renesas,r8a77980-rst # R-Car V3H
+ - renesas,r8a77990-rst # R-Car E3
+ - renesas,r8a77995-rst # R-Car D3
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7795-rst";
+ reg = <0xe6160000 0x0200>;
+ };
diff --git a/Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml b/Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml
index a9ff3cb35c5e..444be32a8a29 100644
--- a/Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml
+++ b/Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml
@@ -29,6 +29,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
rng@c8834000 {
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
deleted file mode 100644
index aaac7975f61c..000000000000
--- a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-BCM2835/6368 Random number generator
-
-Required properties:
-
-- compatible : should be one of
- "brcm,bcm2835-rng"
- "brcm,bcm-nsp-rng"
- "brcm,bcm5301x-rng" or
- "brcm,bcm6368-rng"
-- reg : Specifies base physical address and size of the registers.
-
-Optional properties:
-
-- clocks : phandle to clock-controller plus clock-specifier pair
-- clock-names : "ipsec" as a clock name
-
-Optional properties:
-
-- interrupts: specify the interrupt for the RNG block
-
-Example:
-
-rng {
- compatible = "brcm,bcm2835-rng";
- reg = <0x7e104000 0x10>;
- interrupts = <2 29>;
-};
-
-rng@18033000 {
- compatible = "brcm,bcm-nsp-rng";
- reg = <0x18033000 0x14>;
-};
-
-random: rng@10004180 {
- compatible = "brcm,bcm6368-rng";
- reg = <0x10004180 0x14>;
-
- clocks = <&periph_clk 18>;
- clock-names = "ipsec";
-};
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml b/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml
new file mode 100644
index 000000000000..89ab67f20a7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/brcm,bcm2835.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BCM2835/6368 Random number generator
+
+maintainers:
+ - Stefan Wahren <stefan.wahren@i2se.com>
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Herbert Xu <herbert@gondor.apana.org.au>
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm2835-rng
+ - brcm,bcm-nsp-rng
+ - brcm,bcm5301x-rng
+ - brcm,bcm6368-rng
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: ipsec
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ rng {
+ compatible = "brcm,bcm2835-rng";
+ reg = <0x7e104000 0x10>;
+ interrupts = <2 29>;
+ };
+
+ - |
+ rng@18033000 {
+ compatible = "brcm,bcm-nsp-rng";
+ reg = <0x18033000 0x14>;
+ };
+
+ - |
+ rng@10004180 {
+ compatible = "brcm,bcm6368-rng";
+ reg = <0x10004180 0x14>;
+
+ clocks = <&periph_clk 18>;
+ clock-names = "ipsec";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/ingenic,jz4740-rtc.txt b/Documentation/devicetree/bindings/rtc/ingenic,jz4740-rtc.txt
deleted file mode 100644
index 41c7ae18fd7b..000000000000
--- a/Documentation/devicetree/bindings/rtc/ingenic,jz4740-rtc.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-JZ4740 and similar SoCs real-time clock driver
-
-Required properties:
-
-- compatible: One of:
- - "ingenic,jz4740-rtc" - for use with the JZ4740 SoC
- - "ingenic,jz4780-rtc" - for use with the JZ4780 SoC
-- reg: Address range of rtc register set
-- interrupts: IRQ number for the alarm interrupt
-- clocks: phandle to the "rtc" clock
-- clock-names: must be "rtc"
-
-Optional properties:
-- system-power-controller: To use this component as the
- system power controller
-- reset-pin-assert-time-ms: Reset pin low-level assertion
- time after wakeup (default 60ms; range 0-125ms if RTC clock
- at 32 kHz)
-- min-wakeup-pin-assert-time-ms: Minimum wakeup pin assertion
- time (default 100ms; range 0-2s if RTC clock at 32 kHz)
-
-Example:
-
-rtc@10003000 {
- compatible = "ingenic,jz4740-rtc";
- reg = <0x10003000 0x40>;
-
- interrupt-parent = <&intc>;
- interrupts = <32>;
-
- clocks = <&rtc_clock>;
- clock-names = "rtc";
-
- system-power-controller;
- reset-pin-assert-time-ms = <60>;
- min-wakeup-pin-assert-time-ms = <100>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml b/Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml
new file mode 100644
index 000000000000..4206bf8a2469
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/ingenic,rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs Real-Time Clock DT bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+allOf:
+ - $ref: rtc.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4740-rtc
+ - ingenic,jz4760-rtc
+ - items:
+ - const: ingenic,jz4725b-rtc
+ - const: ingenic,jz4740-rtc
+ - items:
+ - enum:
+ - ingenic,jz4770-rtc
+ - ingenic,jz4780-rtc
+ - const: ingenic,jz4760-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: rtc
+
+ system-power-controller:
+ description: |
+ Indicates that the RTC is responsible for powering OFF
+ the system.
+ type: boolean
+
+ ingenic,reset-pin-assert-time-ms:
+ minimum: 0
+ maximum: 125
+ default: 60
+ description: |
+ Reset pin low-level assertion time after wakeup
+ (assuming RTC clock at 32 kHz)
+
+ ingenic,min-wakeup-pin-assert-time-ms:
+ minimum: 0
+ maximum: 2000
+ default: 100
+ description: |
+ Minimum wakeup pin assertion time
+ (assuming RTC clock at 32 kHz)
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4740-cgu.h>
+ rtc_dev: rtc@10003000 {
+ compatible = "ingenic,jz4740-rtc";
+ reg = <0x10003000 0x40>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+
+ clocks = <&cgu JZ4740_CLK_RTC>;
+ clock-names = "rtc";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml b/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml
index dcff573cbdb1..b95cb017f469 100644
--- a/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml
@@ -51,6 +51,8 @@ required:
- clocks
- clock-names
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/r7s72100-clock.h>
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mt2712.txt b/Documentation/devicetree/bindings/rtc/rtc-mt2712.txt
new file mode 100644
index 000000000000..c33d87e5e753
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-mt2712.txt
@@ -0,0 +1,14 @@
+Device-Tree bindings for MediaTek SoC based RTC
+
+Required properties:
+- compatible : Should be "mediatek,mt2712-rtc" : for MT2712 SoC
+- reg : Specifies base physical address and size of the registers;
+- interrupts : Should contain the interrupt for RTC alarm;
+
+Example:
+
+rtc: rtc@10011000 {
+ compatible = "mediatek,mt2712-rtc";
+ reg = <0 0x10011000 0 0x1000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_LOW>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
index 0a54296d7218..48c6cafca90c 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
@@ -111,6 +111,8 @@ required:
- clocks
- interrupts
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/mfd/stm32f4-rcc.h>
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
index 214fe8beddc3..d4178ab0d675 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
@@ -62,6 +62,8 @@ required:
- clocks
- clock-names
+additionalProperties: false
+
examples:
- |
serial@84c0 {
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index c8d677f9491f..9582fc2279ed 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -19,7 +19,7 @@ Optional properties:
the transceiver is actually CTS_B, not RTS_B. CTS_B is always output,
and RTS_B is input, regardless of dte-mode.
-Please check Documentation/devicetree/bindings/serial/serial.txt
+Please check Documentation/devicetree/bindings/serial/serial.yaml
for the complete list of generic properties.
Note: Each uart controller should have an alias correctly numbered
diff --git a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
new file mode 100644
index 000000000000..91101521ef07
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/renesas,hscif.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas High Speed Serial Communication Interface with FIFO (HSCIF)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,hscif-r8a7778 # R-Car M1
+ - renesas,hscif-r8a7779 # R-Car H1
+ - const: renesas,rcar-gen1-hscif # R-Car Gen1
+ - const: renesas,hscif # generic HSCIF compatible UART
+
+ - items:
+ - enum:
+ - renesas,hscif-r8a7743 # RZ/G1M
+ - renesas,hscif-r8a7744 # RZ/G1N
+ - renesas,hscif-r8a7745 # RZ/G1E
+ - renesas,hscif-r8a77470 # RZ/G1C
+ - renesas,hscif-r8a7790 # R-Car H2
+ - renesas,hscif-r8a7791 # R-Car M2-W
+ - renesas,hscif-r8a7792 # R-Car V2H
+ - renesas,hscif-r8a7793 # R-Car M2-N
+ - renesas,hscif-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-hscif # R-Car Gen2 and RZ/G1
+ - const: renesas,hscif # generic HSCIF compatible UART
+
+ - items:
+ - enum:
+ - renesas,hscif-r8a774a1 # RZ/G2M
+ - renesas,hscif-r8a774b1 # RZ/G2N
+ - renesas,hscif-r8a774c0 # RZ/G2E
+ - renesas,hscif-r8a7795 # R-Car H3
+ - renesas,hscif-r8a7796 # R-Car M3-W
+ - renesas,hscif-r8a77961 # R-Car M3-W+
+ - renesas,hscif-r8a77965 # R-Car M3-N
+ - renesas,hscif-r8a77970 # R-Car V3M
+ - renesas,hscif-r8a77980 # R-Car V3H
+ - renesas,hscif-r8a77990 # R-Car E3
+ - renesas,hscif-r8a77995 # R-Car D3
+ - const: renesas,rcar-gen3-hscif # R-Car Gen3 and RZ/G2
+ - const: renesas,hscif # generic HSCIF compatible UART
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ minItems: 1
+ maxItems: 4
+ items:
+ enum:
+ - fck # UART functional clock
+ - hsck # optional external clock input
+ - brg_int # optional internal clock source for BRG frequency divider
+ - scif_clk # optional external clock source for BRG frequency divider
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ description:
+ Must contain a list of pairs of references to DMA specifiers, one for
+ transmission, and one for reception.
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ - tx
+ - rx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen2-hscif
+ - renesas,rcar-gen3-hscif
+then:
+ required:
+ - resets
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+ aliases {
+ serial1 = &hscif1;
+ };
+
+ hscif1: serial@e6550000 {
+ compatible = "renesas,hscif-r8a7795", "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0xe6550000 96>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 519>, <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x33>, <&dmac1 0x32>, <&dmac2 0x33>, <&dmac2 0x32>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 519>;
+ uart-has-rtscts;
+ };
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
deleted file mode 100644
index a5edf4b70c7a..000000000000
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ /dev/null
@@ -1,150 +0,0 @@
-* Renesas SH-Mobile Serial Communication Interface
-
-Required properties:
-
- - compatible: Must contain one or more of the following:
-
- - "renesas,scif-r7s72100" for R7S72100 (RZ/A1H) SCIF compatible UART.
- - "renesas,scif-r7s9210" for R7S9210 (RZ/A2) SCIF compatible UART.
- - "renesas,scifa-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFA compatible UART.
- - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
- - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
- - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
- - "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART.
- - "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART.
- - "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART.
- - "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART.
- - "renesas,scif-r8a7744" for R8A7744 (RZ/G1N) SCIF compatible UART.
- - "renesas,scifa-r8a7744" for R8A7744 (RZ/G1N) SCIFA compatible UART.
- - "renesas,scifb-r8a7744" for R8A7744 (RZ/G1N) SCIFB compatible UART.
- - "renesas,hscif-r8a7744" for R8A7744 (RZ/G1N) HSCIF compatible UART.
- - "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART.
- - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
- - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
- - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
- - "renesas,scif-r8a77470" for R8A77470 (RZ/G1C) SCIF compatible UART.
- - "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART.
- - "renesas,scif-r8a774a1" for R8A774A1 (RZ/G2M) SCIF compatible UART.
- - "renesas,hscif-r8a774a1" for R8A774A1 (RZ/G2M) HSCIF compatible UART.
- - "renesas,scif-r8a774b1" for R8A774B1 (RZ/G2N) SCIF compatible UART.
- - "renesas,hscif-r8a774b1" for R8A774B1 (RZ/G2N) HSCIF compatible UART.
- - "renesas,scif-r8a774c0" for R8A774C0 (RZ/G2E) SCIF compatible UART.
- - "renesas,hscif-r8a774c0" for R8A774C0 (RZ/G2E) HSCIF compatible UART.
- - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
- - "renesas,hscif-r8a7778" for R8A7778 (R-Car M1) HSCIF compatible UART.
- - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
- - "renesas,hscif-r8a7779" for R8A7779 (R-Car H1) HSCIF compatible UART.
- - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
- - "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART.
- - "renesas,scifb-r8a7790" for R8A7790 (R-Car H2) SCIFB compatible UART.
- - "renesas,hscif-r8a7790" for R8A7790 (R-Car H2) HSCIF compatible UART.
- - "renesas,scif-r8a7791" for R8A7791 (R-Car M2-W) SCIF compatible UART.
- - "renesas,scifa-r8a7791" for R8A7791 (R-Car M2-W) SCIFA compatible UART.
- - "renesas,scifb-r8a7791" for R8A7791 (R-Car M2-W) SCIFB compatible UART.
- - "renesas,hscif-r8a7791" for R8A7791 (R-Car M2-W) HSCIF compatible UART.
- - "renesas,scif-r8a7792" for R8A7792 (R-Car V2H) SCIF compatible UART.
- - "renesas,hscif-r8a7792" for R8A7792 (R-Car V2H) HSCIF compatible UART.
- - "renesas,scif-r8a7793" for R8A7793 (R-Car M2-N) SCIF compatible UART.
- - "renesas,scifa-r8a7793" for R8A7793 (R-Car M2-N) SCIFA compatible UART.
- - "renesas,scifb-r8a7793" for R8A7793 (R-Car M2-N) SCIFB compatible UART.
- - "renesas,hscif-r8a7793" for R8A7793 (R-Car M2-N) HSCIF compatible UART.
- - "renesas,scif-r8a7794" for R8A7794 (R-Car E2) SCIF compatible UART.
- - "renesas,scifa-r8a7794" for R8A7794 (R-Car E2) SCIFA compatible UART.
- - "renesas,scifb-r8a7794" for R8A7794 (R-Car E2) SCIFB compatible UART.
- - "renesas,hscif-r8a7794" for R8A7794 (R-Car E2) HSCIF compatible UART.
- - "renesas,scif-r8a7795" for R8A7795 (R-Car H3) SCIF compatible UART.
- - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
- - "renesas,scif-r8a7796" for R8A77960 (R-Car M3-W) SCIF compatible UART.
- - "renesas,hscif-r8a7796" for R8A77960 (R-Car M3-W) HSCIF compatible UART.
- - "renesas,scif-r8a77961" for R8A77961 (R-Car M3-W+) SCIF compatible UART.
- - "renesas,hscif-r8a77961" for R8A77961 (R-Car M3-W+) HSCIF compatible UART.
- - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART.
- - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART.
- - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
- - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
- - "renesas,scif-r8a77980" for R8A77980 (R-Car V3H) SCIF compatible UART.
- - "renesas,hscif-r8a77980" for R8A77980 (R-Car V3H) HSCIF compatible UART.
- - "renesas,scif-r8a77990" for R8A77990 (R-Car E3) SCIF compatible UART.
- - "renesas,hscif-r8a77990" for R8A77990 (R-Car E3) HSCIF compatible UART.
- - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
- - "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART.
- - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
- - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
- - "renesas,rcar-gen1-scif" for R-Car Gen1 SCIF compatible UART,
- - "renesas,rcar-gen2-scif" for R-Car Gen2 and RZ/G1 SCIF compatible UART,
- - "renesas,rcar-gen3-scif" for R-Car Gen3 and RZ/G2 SCIF compatible UART,
- - "renesas,rcar-gen2-scifa" for R-Car Gen2 and RZ/G1 SCIFA compatible UART,
- - "renesas,rcar-gen2-scifb" for R-Car Gen2 and RZ/G1 SCIFB compatible UART,
- - "renesas,rcar-gen1-hscif" for R-Car Gen1 HSCIF compatible UART,
- - "renesas,rcar-gen2-hscif" for R-Car Gen2 and RZ/G1 HSCIF compatible UART,
- - "renesas,rcar-gen3-hscif" for R-Car Gen3 and RZ/G2 HSCIF compatible UART,
- - "renesas,scif" for generic SCIF compatible UART.
- - "renesas,scifa" for generic SCIFA compatible UART.
- - "renesas,scifb" for generic SCIFB compatible UART.
- - "renesas,hscif" for generic HSCIF compatible UART.
- - "renesas,sci" for generic SCI compatible UART.
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first, followed by the
- family-specific and/or generic versions.
-
- - reg: Base address and length of the I/O registers used by the UART.
- - interrupts: Must contain one or more interrupt-specifiers for the SCIx.
- If a single interrupt is expressed, then all events are
- multiplexed into this single interrupt.
-
- If multiple interrupts are provided by the hardware, the order
- in which the interrupts are listed must match order below. Note
- that some HW interrupt events may be muxed together resulting
- in duplicate entries.
- The interrupt order is as follows:
- 1. Error (ERI)
- 2. Receive buffer full (RXI)
- 3. Transmit buffer empty (TXI)
- 4. Break (BRI)
- 5. Data Ready (DRI)
- 6. Transmit End (TEI)
-
- - clocks: Must contain a phandle and clock-specifier pair for each entry
- in clock-names.
- - clock-names: Must contain "fck" for the SCIx UART functional clock.
- Apart from the divided functional clock, there may be other possible
- sources for the sampling clock, depending on SCIx variant.
- On (H)SCI(F) and some SCIFA, an additional clock may be specified:
- - "hsck" for the optional external clock input (on HSCIF),
- - "sck" for the optional external clock input (on other variants).
- On UARTs equipped with a Baud Rate Generator for External Clock (BRG)
- (some SCIF and HSCIF), additional clocks may be specified:
- - "brg_int" for the optional internal clock source for the frequency
- divider (typically the (AXI or SHwy) bus clock),
- - "scif_clk" for the optional external clock source for the frequency
- divider (SCIF_CLK).
-
-Note: Each enabled SCIx UART may have an optional "serialN" alias in the
-"aliases" node.
-
-Optional properties:
- - dmas: Must contain a list of two references to DMA specifiers, one for
- transmission, and one for reception.
- - dma-names: Must contain a list of two DMA names, "tx" and "rx".
- - {cts,dsr,dcd,rng,rts,dtr}-gpios: Specify GPIOs for modem lines, cfr. the
- generic serial DT bindings in serial.txt.
- - uart-has-rtscts: Indicates dedicated lines for RTS/CTS hardware flow
- control, cfr. the generic serial DT bindings in serial.txt.
-
-Example:
- aliases {
- serial0 = &scifa0;
- };
-
- scifa0: serial@e6c40000 {
- compatible = "renesas,scifa-r8a7790",
- "renesas,rcar-gen2-scifa", "renesas,scifa";
- reg = <0 0xe6c40000 0 64>;
- interrupt-parent = <&gic>;
- interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp2_clks R8A7790_CLK_SCIFA0>;
- clock-names = "fck";
- dmas = <&dmac0 0x21>, <&dmac0 0x22>;
- dma-names = "tx", "rx";
- };
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci.yaml b/Documentation/devicetree/bindings/serial/renesas,sci.yaml
new file mode 100644
index 000000000000..4183b7311f37
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/renesas,sci.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/renesas,sci.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas Serial Communication Interface
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ const: renesas,sci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Error interrupt
+ - description: Receive buffer full interrupt
+ - description: Transmit buffer empty interrupt
+ - description: Transmit end interrupt
+
+ interrupt-names:
+ items:
+ - const: eri
+ - const: rxi
+ - const: txi
+ - const: tei
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ enum:
+ - fck # UART functional clock
+ - sck # optional external clock input
+
+ uart-has-rtscts: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ aliases {
+ serial0 = &sci0;
+ };
+
+ sci0: serial@ffff78 {
+ compatible = "renesas,sci";
+ reg = <0xffff78 8>;
+ interrupts = <88 0>, <89 0>, <90 0>, <91 0>;
+ clocks = <&fclk>;
+ clock-names = "fck";
+ };
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
new file mode 100644
index 000000000000..70392b9bd977
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -0,0 +1,172 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/renesas,scif.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas Serial Communication Interface with FIFO (SCIF)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,scif-r7s72100 # RZ/A1H
+ - const: renesas,scif # generic SCIF compatible UART
+
+ - items:
+ - enum:
+ - renesas,scif-r7s9210 # RZ/A2
+
+ - items:
+ - enum:
+ - renesas,scif-r8a7778 # R-Car M1
+ - renesas,scif-r8a7779 # R-Car H1
+ - const: renesas,rcar-gen1-scif # R-Car Gen1
+ - const: renesas,scif # generic SCIF compatible UART
+
+ - items:
+ - enum:
+ - renesas,scif-r8a7743 # RZ/G1M
+ - renesas,scif-r8a7744 # RZ/G1N
+ - renesas,scif-r8a7745 # RZ/G1E
+ - renesas,scif-r8a77470 # RZ/G1C
+ - renesas,scif-r8a7790 # R-Car H2
+ - renesas,scif-r8a7791 # R-Car M2-W
+ - renesas,scif-r8a7792 # R-Car V2H
+ - renesas,scif-r8a7793 # R-Car M2-N
+ - renesas,scif-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-scif # R-Car Gen2 and RZ/G1
+ - const: renesas,scif # generic SCIF compatible UART
+
+ - items:
+ - enum:
+ - renesas,scif-r8a774a1 # RZ/G2M
+ - renesas,scif-r8a774b1 # RZ/G2N
+ - renesas,scif-r8a774c0 # RZ/G2E
+ - renesas,scif-r8a7795 # R-Car H3
+ - renesas,scif-r8a7796 # R-Car M3-W
+ - renesas,scif-r8a77961 # R-Car M3-W+
+ - renesas,scif-r8a77965 # R-Car M3-N
+ - renesas,scif-r8a77970 # R-Car V3M
+ - renesas,scif-r8a77980 # R-Car V3H
+ - renesas,scif-r8a77990 # R-Car E3
+ - renesas,scif-r8a77995 # R-Car D3
+ - const: renesas,rcar-gen3-scif # R-Car Gen3 and RZ/G2
+ - const: renesas,scif # generic SCIF compatible UART
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ oneOf:
+ - items:
+ - description: A combined interrupt
+ - items:
+ - description: Error interrupt
+ - description: Receive buffer full interrupt
+ - description: Transmit buffer empty interrupt
+ - description: Transmit End interrupt
+ - items:
+ - description: Error interrupt
+ - description: Receive buffer full interrupt
+ - description: Transmit buffer empty interrupt
+ - description: Break interrupt
+ - description: Data Ready interrupt
+ - description: Transmit End interrupt
+
+ interrupt-names:
+ oneOf:
+ - items:
+ - const: eri
+ - const: rxi
+ - const: txi
+ - const: tei
+ - items:
+ - const: eri
+ - const: rxi
+ - const: txi
+ - const: bri
+ - const: dri
+ - const: tei
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ minItems: 1
+ maxItems: 4
+ items:
+ enum:
+ - fck # UART functional clock
+ - sck # optional external clock input
+ - brg_int # optional internal clock source for BRG frequency divider
+ - scif_clk # optional external clock source for BRG frequency divider
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ description:
+ Must contain a list of pairs of references to DMA specifiers, one for
+ transmission, and one for reception.
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ - tx
+ - rx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen2-scif
+ - renesas,rcar-gen3-scif
+then:
+ required:
+ - resets
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7791-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7791-sysc.h>
+ aliases {
+ serial0 = &scif0;
+ };
+
+ scif0: serial@e6e60000 {
+ compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+ "renesas,scif";
+ reg = <0xe6e60000 64>;
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 721>, <&cpg CPG_CORE R8A7791_CLK_ZS>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x29>, <&dmac0 0x2a>, <&dmac1 0x29>, <&dmac1 0x2a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 721>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/renesas,scifa.yaml b/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
new file mode 100644
index 000000000000..b28bcb268854
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/renesas,scifa.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas Serial Communications Interface with FIFO A (SCIFA)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,scifa-r8a73a4 # R-Mobile APE6
+ - renesas,scifa-r8a7740 # R-Mobile A1
+ - renesas,scifa-sh73a0 # SH-Mobile AG5
+ - const: renesas,scifa # generic SCIFA compatible UART
+
+ - items:
+ - enum:
+ - renesas,scifa-r8a7743 # R8A7743 RZ/G1M
+ - renesas,scifa-r8a7744 # R8A7744 RZ/G1N
+ - renesas,scifa-r8a7745 # R8A7745 RZ/G1E
+ - renesas,scifa-r8a7790 # R8A7790 R-Car H2
+ - renesas,scifa-r8a7791 # R8A7791 R-Car M2-W
+ - renesas,scifa-r8a7793 # R8A7793 R-Car M2-N
+ - renesas,scifa-r8a7794 # R8A7794 R-Car E2
+ - const: renesas,rcar-gen2-scifa # R-Car Gen2 and RZ/G1
+ - const: renesas,scifa # generic SCIFA compatible UART
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ enum:
+ - fck # UART functional clock
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ description:
+ Must contain a list of pairs of references to DMA specifiers, one for
+ transmission, and one for reception.
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ - tx
+ - rx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen2-scifa
+then:
+ required:
+ - resets
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+ aliases {
+ serial0 = &scifa0;
+ };
+
+ scifa0: serial@e6c40000 {
+ compatible = "renesas,scifa-r8a7790", "renesas,rcar-gen2-scifa",
+ "renesas,scifa";
+ reg = <0xe6c40000 64>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 204>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 204>;
+ dmas = <&dmac0 0x21>, <&dmac0 0x22>, <&dmac1 0x21>, <&dmac1 0x22>;
+ dma-names = "tx", "rx", "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/serial/renesas,scifb.yaml b/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
new file mode 100644
index 000000000000..57205cb1dcd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/renesas,scifb.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas Serial Communications Interface with FIFO B (SCIFB)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,scifb-r8a73a4 # R-Mobile APE6
+ - renesas,scifb-r8a7740 # R-Mobile A1
+ - renesas,scifb-sh73a0 # SH-Mobile AG5
+ - const: renesas,scifb # generic SCIFB compatible UART
+
+ - items:
+ - enum:
+ - renesas,scifb-r8a7743 # RZ/G1M
+ - renesas,scifb-r8a7744 # RZ/G1N
+ - renesas,scifb-r8a7745 # RZ/G1E
+ - renesas,scifb-r8a7790 # R-Car H2
+ - renesas,scifb-r8a7791 # R-Car M2-W
+ - renesas,scifb-r8a7793 # R-Car M2-N
+ - renesas,scifb-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-scifb # R-Car Gen2 and RZ/G1
+ - const: renesas,scifb # generic SCIFB compatible UART
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ enum:
+ - fck # UART functional clock
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ description:
+ Must contain a list of pairs of references to DMA specifiers, one for
+ transmission, and one for reception.
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ - tx
+ - rx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen2-scifb
+then:
+ required:
+ - resets
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7740-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ scifb: serial@e6c30000 {
+ compatible = "renesas,scifb-r8a7740", "renesas,scifb";
+ reg = <0xe6c30000 0x100>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp2_clks R8A7740_CLK_SCIFB>;
+ clock-names = "fck";
+ power-domains = <&pd_a3sp>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/serial.txt b/Documentation/devicetree/bindings/serial/serial.txt
deleted file mode 100644
index 863c2893759e..000000000000
--- a/Documentation/devicetree/bindings/serial/serial.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-Generic Serial DT Bindings
-
-This document lists a set of generic properties for describing UARTs in a
-device tree. Whether these properties apply to a particular device depends on
-the DT bindings for the actual device.
-
-Optional properties:
- - cts-gpios: Must contain a GPIO specifier, referring to the GPIO pin to be
- used as the UART's CTS line.
- - dcd-gpios: Must contain a GPIO specifier, referring to the GPIO pin to be
- used as the UART's DCD line.
- - dsr-gpios: Must contain a GPIO specifier, referring to the GPIO pin to be
- used as the UART's DSR line.
- - dtr-gpios: Must contain a GPIO specifier, referring to the GPIO pin to be
- used as the UART's DTR line.
- - rng-gpios: Must contain a GPIO specifier, referring to the GPIO pin to be
- used as the UART's RNG line.
- - rts-gpios: Must contain a GPIO specifier, referring to the GPIO pin to be
- used as the UART's RTS line.
-
- - uart-has-rtscts: The presence of this property indicates that the
- UART has dedicated lines for RTS/CTS hardware flow control, and that
- they are available for use (wired and enabled by pinmux configuration).
- This depends on both the UART hardware and the board wiring.
- Note that this property is mutually-exclusive with "cts-gpios" and
- "rts-gpios" above, unless support is provided to switch between modes
- dynamically.
-
-
-Examples:
-
- uart1: serial@48022000 {
- compatible = "ti,am3352-uart", "ti,omap3-uart";
- ti,hwmods = "uart2";
- clock-frequency = <48000000>;
- reg = <0x48022000 0x2000>;
- interrupts = <73>;
- dmas = <&edma 28 0>, <&edma 29 0>;
- dma-names = "tx", "rx";
- dtr-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;
- dsr-gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
- dcd-gpios = <&gpio2 24 GPIO_ACTIVE_LOW>;
- rng-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>;
- cts-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
- rts-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
- };
-
- scifa4: serial@e6c80000 {
- compatible = "renesas,scifa-sh73a0", "renesas,scifa";
- reg = <0xe6c80000 0x100>;
- interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp2_clks SH73A0_CLK_SCIFA4>;
- clock-names = "fck";
- power-domains = <&pd_a3sp>;
- uart-has-rtscts;
- };
diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
new file mode 100644
index 000000000000..53204d90d0c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/serial.yaml
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/serial.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Serial Interface Generic DT Bindings
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+description:
+ This document lists a set of generic properties for describing UARTs in a
+ device tree. Whether these properties apply to a particular device depends
+ on the DT bindings for the actual device.
+
+ Each enabled UART may have an optional "serialN" alias in the "aliases" node,
+ where N is the port number (non-negative decimal integer) as printed on the
+ label next to the physical port.
+
+properties:
+ $nodename:
+ pattern: "^serial(@.*)?$"
+
+ cts-gpios:
+ maxItems: 1
+ description:
+ Must contain a GPIO specifier, referring to the GPIO pin to be used as
+ the UART's CTS line.
+
+ dcd-gpios:
+ maxItems: 1
+ description:
+ Must contain a GPIO specifier, referring to the GPIO pin to be used as
+ the UART's DCD line.
+
+ dsr-gpios:
+ maxItems: 1
+ description:
+ Must contain a GPIO specifier, referring to the GPIO pin to be used as
+ the UART's DSR line.
+
+ dtr-gpios:
+ maxItems: 1
+ description:
+ Must contain a GPIO specifier, referring to the GPIO pin to be used as
+ the UART's DTR line.
+
+ rng-gpios:
+ maxItems: 1
+ description:
+ Must contain a GPIO specifier, referring to the GPIO pin to be used as
+ the UART's RNG line.
+
+ rts-gpios:
+ maxItems: 1
+ description:
+ Must contain a GPIO specifier, referring to the GPIO pin to be used as
+ the UART's RTS line.
+
+ uart-has-rtscts:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ The presence of this property indicates that the UART has dedicated lines
+ for RTS/CTS hardware flow control, and that they are available for use
+ (wired and enabled by pinmux configuration). This depends on both the
+ UART hardware and the board wiring.
+
+if:
+ required:
+ - uart-has-rtscts
+then:
+ properties:
+ cts-gpios: false
+ rts-gpios: false
+
+patternProperties:
+ ".*":
+ if:
+ type: object
+ then:
+ description:
+ Serial attached devices shall be a child node of the host UART device
+ the slave device is attached to. It is expected that the attached
+ device is the only child node of the UART device. The slave device node
+ name shall reflect the generic type of device for the node.
+
+ properties:
+ compatible:
+ description:
+ Compatible of the device connected to the serial port.
+
+ max-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The maximum baud rate the device operates at.
+ This should only be present if the maximum is less than the slave
+ device can support. For example, a particular board has some
+ signal quality issue or the host processor can't support higher
+ baud rates.
+
+ current-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ The current baud rate the device operates at.
+ This should only be present in case a driver has no chance to know
+ the baud rate of the slave device.
+ Examples:
+ * device supports auto-baud
+ * the rate is setup by a bootloader and there is no way to reset
+ the device
+ * device baud rate is configured by its firmware but there is no
+ way to request the actual settings
+
+ required:
+ - compatible
+
+examples:
+ - |
+ serial@1234 {
+ compatible = "ns16550a";
+ reg = <0x1234 0x20>;
+ interrupts = <1>;
+
+ bluetooth {
+ compatible = "brcm,bcm43341-bt";
+ interrupt-parent = <&gpio>;
+ interrupts = <10>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/serial/slave-device.txt b/Documentation/devicetree/bindings/serial/slave-device.txt
deleted file mode 100644
index 40110e019620..000000000000
--- a/Documentation/devicetree/bindings/serial/slave-device.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Serial Slave Device DT binding
-
-This documents the binding structure and common properties for serial
-attached devices. Common examples include Bluetooth, WiFi, NFC and GPS
-devices.
-
-Serial attached devices shall be a child node of the host UART device the
-slave device is attached to. It is expected that the attached device is
-the only child node of the UART device. The slave device node name shall
-reflect the generic type of device for the node.
-
-Required Properties:
-
-- compatible : A string reflecting the vendor and specific device the node
- represents.
-
-Optional Properties:
-
-- max-speed : The maximum baud rate the device operates at. This should
- only be present if the maximum is less than the slave device
- can support. For example, a particular board has some signal
- quality issue or the host processor can't support higher
- baud rates.
-- current-speed : The current baud rate the device operates at. This should
- only be present in case a driver has no chance to know
- the baud rate of the slave device.
- Examples:
- * device supports auto-baud
- * the rate is setup by a bootloader and there is no
- way to reset the device
- * device baud rate is configured by its firmware but
- there is no way to request the actual settings
-
-Example:
-
-serial@1234 {
- compatible = "ns16550a";
- interrupts = <1>;
-
- bluetooth {
- compatible = "brcm,bcm43341-bt";
- interrupt-parent = <&gpio>;
- interrupts = <10>;
- };
-};
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
index b42002542690..b962f8db4ce9 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
@@ -27,6 +27,7 @@ properties:
- rockchip,rk3066-uart
- rockchip,rk3188-uart
- rockchip,rk3288-uart
+ - rockchip,rk3308-uart
- rockchip,rk3328-uart
- rockchip,rk3368-uart
- rockchip,rk3399-uart
diff --git a/Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml b/Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml
new file mode 100644
index 000000000000..09a30300850c
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/socionext,uniphier-uart.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/socionext,uniphier-uart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UniPhier UART controller
+
+maintainers:
+ - Masahiro Yamada <yamada.masahiro@socionext.com>
+
+properties:
+ compatible:
+ const: socionext,uniphier-uart
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+
+ auto-flow-control:
+ description: enable automatic flow control support.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ aliases {
+ serial0 = &serial0;
+ };
+
+ serial0: serial@54006800 {
+ compatible = "socionext,uniphier-uart";
+ reg = <0x54006800 0x40>;
+ interrupts = <0 33 4>;
+ clocks = <&uart_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/uniphier-uart.txt b/Documentation/devicetree/bindings/serial/uniphier-uart.txt
deleted file mode 100644
index 7a1bf02bb869..000000000000
--- a/Documentation/devicetree/bindings/serial/uniphier-uart.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-UniPhier UART controller
-
-Required properties:
-- compatible: should be "socionext,uniphier-uart".
-- reg: offset and length of the register set for the device.
-- interrupts: a single interrupt specifier.
-- clocks: phandle to the input clock.
-
-Optional properties:
--auto-flow-control: enable automatic flow control support.
-
-Example:
- aliases {
- serial0 = &serial0;
- };
-
- serial0: serial@54006800 {
- compatible = "socionext,uniphier-uart";
- reg = <0x54006800 0x40>;
- interrupts = <0 33 4>;
- clocks = <&uart_clk>;
- };
diff --git a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
index f548594d020b..cb008fd188d8 100644
--- a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
+++ b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
@@ -40,6 +40,8 @@ required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
canvas: video-lut@48 {
diff --git a/Documentation/devicetree/bindings/soc/imx/fsl,aips-bus.yaml b/Documentation/devicetree/bindings/soc/imx/fsl,aips-bus.yaml
new file mode 100644
index 000000000000..3cbf2d28a188
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/imx/fsl,aips-bus.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/imx/fsl,aips-bus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: i.MX AHB to IP Bridge
+
+maintainers:
+ - Peng Fan <peng.fan@nxp.com>
+
+description: |
+ This particular peripheral is designed as the bridge between
+ AHB bus and peripherals with the lower bandwidth IP Slave (IPS)
+ buses.
+
+select:
+ properties:
+ compatible:
+ contains:
+ const: fsl,aips-bus
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - const: fsl,aips-bus
+ - const: simple-bus
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ bus@30000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x30000000 0x400000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ };
+...
diff --git a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
index 7a32404c6114..ecac2bbeae45 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
@@ -20,6 +20,7 @@ Required properties in pwrap device node.
- compatible:
"mediatek,mt2701-pwrap" for MT2701/7623 SoCs
"mediatek,mt6765-pwrap" for MT6765 SoCs
+ "mediatek,mt6779-pwrap" for MT6779 SoCs
"mediatek,mt6797-pwrap" for MT6797 SoCs
"mediatek,mt7622-pwrap" for MT7622 SoCs
"mediatek,mt8135-pwrap" for MT8135 SoCs
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
index db501269f47b..f8fa71f5d84b 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
@@ -45,6 +45,18 @@ by the individual bindings for the specific service
12 - Ultrasound stream manager.
13 - Listen stream manager.
+- qcom,protection-domain
+ Usage: optional
+ Value type: <stringlist>
+ Definition: Must list the protection domain service name and path
+ that the particular apr service has a dependency on.
+ Possible values are :
+ "avs/audio", "msm/adsp/audio_pd".
+ "kernel/elf_loader", "msm/modem/wlan_pd".
+ "tms/servreg", "msm/adsp/audio_pd".
+ "tms/servreg", "msm/modem/wlan_pd".
+ "tms/servreg", "msm/slpi/sensor_pd".
+
= EXAMPLE
The following example represents a QDSP based sound card on a MSM8996 device
which uses apr as communication between Apps and QDSP.
@@ -82,3 +94,41 @@ which uses apr as communication between Apps and QDSP.
...
};
};
+
+= EXAMPLE 2
+The following example represents a QDSP based sound card with protection domain
+dependencies specified. Here some of the apr services are dependent on services
+running on protection domain hosted on ADSP/SLPI remote processors while others
+have no such dependency.
+
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,glink-channels = "apr_audio_svc";
+ qcom,apr-domain = <APR_DOMAIN_ADSP>;
+
+ q6core {
+ compatible = "qcom,q6core";
+ reg = <APR_SVC_ADSP_CORE>;
+ };
+
+ q6afe: q6afe {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ ...
+ };
+
+ q6asm: q6asm {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ qcom,protection-domain = "tms/servreg", "msm/slpi/sensor_pd";
+ ...
+ };
+
+ q6adm: q6adm {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ ...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/adi,adau7118.yaml b/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
index 75e0cbe6be70..76ee695097bf 100644
--- a/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
+++ b/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
@@ -59,6 +59,8 @@ required:
- iovdd-supply
- dvdd-supply
+additionalProperties: false
+
examples:
- |
i2c {
diff --git a/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml b/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
new file mode 100644
index 000000000000..a61bccf915d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/amlogic,aiu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic AIU audio output controller
+
+maintainers:
+ - Jerome Brunet <jbrunet@baylibre.com>
+
+properties:
+ $nodename:
+ pattern: "^audio-controller@.*"
+
+ "#sound-dai-cells":
+ const: 2
+
+ compatible:
+ items:
+ - enum:
+ - amlogic,aiu-gxbb
+ - amlogic,aiu-gxl
+ - amlogic,aiu-meson8
+ - amlogic,aiu-meson8b
+ - const:
+ amlogic,aiu
+
+ clocks:
+ items:
+ - description: AIU peripheral clock
+ - description: I2S peripheral clock
+ - description: I2S output clock
+ - description: I2S master clock
+ - description: I2S mixer clock
+ - description: SPDIF peripheral clock
+ - description: SPDIF output clock
+ - description: SPDIF master clock
+ - description: SPDIF master clock multiplexer
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: i2s_pclk
+ - const: i2s_aoclk
+ - const: i2s_mclk
+ - const: i2s_mixer
+ - const: spdif_pclk
+ - const: spdif_aoclk
+ - const: spdif_mclk
+ - const: spdif_mclk_sel
+
+ interrupts:
+ items:
+ - description: I2S interrupt line
+ - description: SPDIF interrupt line
+
+ interrupt-names:
+ items:
+ - const: i2s
+ - const: spdif
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - reg
+ - resets
+
+examples:
+ - |
+ #include <dt-bindings/clock/gxbb-clkc.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
+
+ aiu: audio-controller@5400 {
+ compatible = "amlogic,aiu-gxl", "amlogic,aiu";
+ #sound-dai-cells = <2>;
+ reg = <0x0 0x5400 0x0 0x2ac>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 50 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "i2s", "spdif";
+ clocks = <&clkc CLKID_AIU_GLUE>,
+ <&clkc CLKID_I2S_OUT>,
+ <&clkc CLKID_AOCLK_GATE>,
+ <&clkc CLKID_CTS_AMCLK>,
+ <&clkc CLKID_MIXER_IFACE>,
+ <&clkc CLKID_IEC958>,
+ <&clkc CLKID_IEC958_GATE>,
+ <&clkc CLKID_CTS_MCLK_I958>,
+ <&clkc CLKID_CTS_I958>;
+ clock-names = "pclk",
+ "i2s_pclk",
+ "i2s_aoclk",
+ "i2s_mclk",
+ "i2s_mixer",
+ "spdif_pclk",
+ "spdif_aoclk",
+ "spdif_mclk",
+ "spdif_mclk_sel";
+ resets = <&reset RESET_AIU>;
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml b/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
new file mode 100644
index 000000000000..f778d3371fde
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/amlogic,g12a-toacodec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic G12a Internal DAC Control Glue
+
+maintainers:
+ - Jerome Brunet <jbrunet@baylibre.com>
+
+properties:
+ $nodename:
+ pattern: "^audio-controller@.*"
+
+ "#sound-dai-cells":
+ const: 1
+
+ compatible:
+ oneOf:
+ - items:
+ - const:
+ amlogic,g12a-toacodec
+ - items:
+ - enum:
+ - amlogic,sm1-toacodec
+ - const:
+ amlogic,g12a-toacodec
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - reg
+ - resets
+
+examples:
+ - |
+ #include <dt-bindings/reset/amlogic,meson-g12a-audio-reset.h>
+
+ toacodec: audio-controller@740 {
+ compatible = "amlogic,g12a-toacodec";
+ reg = <0x0 0x740 0x0 0x4>;
+ #sound-dai-cells = <1>;
+ resets = <&clkc_audio AUD_RESET_TOACODEC>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
new file mode 100644
index 000000000000..fb374c659be1
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/amlogic,gx-sound-card.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic GX sound card
+
+maintainers:
+ - Jerome Brunet <jbrunet@baylibre.com>
+
+properties:
+ compatible:
+ items:
+ - const: amlogic,gx-sound-card
+
+ audio-aux-devs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: list of auxiliary devices
+
+ audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ minItems: 2
+ description: |-
+ A list of the connections between audio components. Each entry is a
+ pair of strings, the first being the connection's sink, the second
+ being the connection's source.
+
+ audio-widgets:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ minItems: 2
+ description: |-
+ A list off component DAPM widget. Each entry is a pair of strings,
+ the first being the widget type, the second being the widget name
+
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: User specified audio sound card name
+
+patternProperties:
+ "^dai-link-[0-9]+$":
+ type: object
+ description: |-
+ dai-link child nodes:
+ Container for dai-link level properties and the CODEC sub-nodes.
+ There should be at least one (and probably more) subnode of this type
+
+ properties:
+ dai-format:
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [ i2s, left-j, dsp_a ]
+
+ mclk-fs:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |-
+ Multiplication factor between the frame rate and master clock
+ rate
+
+ sound-dai:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle of the CPU DAI
+
+ patternProperties:
+ "^codec-[0-9]+$":
+ type: object
+ description: |-
+ Codecs:
+ dai-link representing backend links should have at least one subnode.
+ One subnode for each codec of the dai-link. dai-link representing
+ frontend links have no codec, therefore have no subnodes
+
+ properties:
+ sound-dai:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle of the codec DAI
+
+ required:
+ - sound-dai
+
+ required:
+ - sound-dai
+
+required:
+ - model
+ - dai-link-0
+
+examples:
+ - |
+ sound {
+ compatible = "amlogic,gx-sound-card";
+ model = "GXL-ACME-S905X-FOO";
+ audio-aux-devs = <&amp>;
+ audio-routing = "I2S ENCODER I2S IN", "I2S FIFO Playback";
+
+ dai-link-0 {
+ sound-dai = <&i2s_fifo>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&i2s_encoder>;
+ dai-format = "i2s";
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&codec0>;
+ };
+
+ codec-1 {
+ sound-dai = <&codec1>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml b/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml
new file mode 100644
index 000000000000..b7c38c2b5b54
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/amlogic,t9015.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Amlogic T9015 Internal Audio DAC
+
+maintainers:
+ - Jerome Brunet <jbrunet@baylibre.com>
+
+properties:
+ $nodename:
+ pattern: "^audio-controller@.*"
+
+ "#sound-dai-cells":
+ const: 0
+
+ compatible:
+ items:
+ - const: amlogic,t9015
+
+ clocks:
+ items:
+ - description: Peripheral clock
+
+ clock-names:
+ items:
+ - const: pclk
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+
+examples:
+ - |
+ #include <dt-bindings/clock/g12a-clkc.h>
+ #include <dt-bindings/reset/amlogic,meson-g12a-reset.h>
+
+ acodec: audio-controller@32000 {
+ compatible = "amlogic,t9015";
+ reg = <0x0 0x32000 0x0 0x14>;
+ #sound-dai-cells = <0>;
+ clocks = <&clkc CLKID_AUDIO_CODEC>;
+ clock-names = "pclk";
+ resets = <&reset RESET_AUDIO_CODEC>;
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/brcm,bcm63xx-audio.txt b/Documentation/devicetree/bindings/sound/brcm,bcm63xx-audio.txt
new file mode 100644
index 000000000000..007f524b4d15
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/brcm,bcm63xx-audio.txt
@@ -0,0 +1,29 @@
+Broadcom DSL/PON BCM63xx Audio I2S controller
+
+Required properties:
+- compatible: Should be "brcm,bcm63xx-i2s".
+- #address-cells: 32bit valued, 1 cell.
+- #size-cells: 32bit valued, 0 cell.
+- reg: Should contain audio registers location and length
+- interrupts: Should contain the interrupt for the controller.
+- clocks: Must contain an entry for each entry in clock-names.
+ Please refer to clock-bindings.txt.
+- clock-names: One of each entry matching the clocks phandles list:
+ - "i2sclk" (generated clock) Required.
+ - "i2sosc" (fixed 200MHz clock) Required.
+
+(1) : The generated clock is required only when any of TX and RX
+ works on Master Mode.
+(2) : The fixed 200MHz clock is from internal chip and always on
+
+Example:
+
+ i2s: bcm63xx-i2s {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm63xx-i2s";
+ reg = <0xFF802080 0xFF>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&i2sclk>, <&osc>;
+ clock-names = "i2sclk","i2sosc";
+ };
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml
new file mode 100644
index 000000000000..efce847a3408
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/cirrus,cs42l51.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: CS42L51 audio codec DT bindings
+
+maintainers:
+ - Olivier Moysan <olivier.moysan@st.com>
+
+properties:
+ compatible:
+ const: cirrus,cs42l51
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: MCLK
+
+ reset-gpios:
+ maxItems: 1
+
+ VL-supply:
+ description: phandle to voltage regulator of digital interface section
+
+ VD-supply:
+ description: phandle to voltage regulator of digital internal section
+
+ VA-supply:
+ description: phandle to voltage regulator of analog internal section
+
+ VAHP-supply:
+ description: phandle to voltage regulator of headphone
+
+required:
+ - compatible
+ - reg
+ - "#sound-dai-cells"
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cs42l51@4a {
+ compatible = "cirrus,cs42l51";
+ reg = <0x4a>;
+ #sound-dai-cells = <0>;
+ clocks = <&mclk_prov>;
+ clock-names = "MCLK";
+ VL-supply = <&reg_audio>;
+ VD-supply = <&reg_audio>;
+ VA-supply = <&reg_audio>;
+ VAHP-supply = <&reg_audio>;
+ reset-gpios = <&gpiog 9 GPIO_ACTIVE_LOW>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/sound/cs42l51.txt b/Documentation/devicetree/bindings/sound/cs42l51.txt
deleted file mode 100644
index acbd68ddd2cb..000000000000
--- a/Documentation/devicetree/bindings/sound/cs42l51.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-CS42L51 audio CODEC
-
-Required properties:
-
- - compatible : "cirrus,cs42l51"
-
- - reg : the I2C address of the device for I2C.
-
-Optional properties:
- - VL-supply, VD-supply, VA-supply, VAHP-supply: power supplies for the device,
- as covered in Documentation/devicetree/bindings/regulator/regulator.txt.
-
- - reset-gpios : GPIO specification for the reset pin. If specified, it will be
- deasserted before starting the communication with the codec.
-
- - clocks : a list of phandles + clock-specifiers, one for each entry in
- clock-names
-
- - clock-names : must contain "MCLK"
-
-Example:
-
-cs42l51: cs42l51@4a {
- compatible = "cirrus,cs42l51";
- reg = <0x4a>;
- clocks = <&mclk_prov>;
- clock-names = "MCLK";
- VL-supply = <&reg_audio>;
- VD-supply = <&reg_audio>;
- VA-supply = <&reg_audio>;
- VAHP-supply = <&reg_audio>;
- reset-gpios = <&gpiog 9 GPIO_ACTIVE_LOW>;
-};
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
deleted file mode 100644
index 8ca52dcc5572..000000000000
--- a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-Audio codec controlled by ChromeOS EC
-
-Google's ChromeOS EC codec is a digital mic codec provided by the
-Embedded Controller (EC) and is controlled via a host-command interface.
-
-An EC codec node should only be found as a sub-node of the EC node (see
-Documentation/devicetree/bindings/mfd/cros-ec.txt).
-
-Required properties:
-- compatible: Must contain "google,cros-ec-codec"
-- #sound-dai-cells: Should be 1. The cell specifies number of DAIs.
-
-Optional properties:
-- reg: Pysical base address and length of shared memory region from EC.
- It contains 3 unsigned 32-bit integer. The first 2 integers
- combine to become an unsigned 64-bit physical address. The last
- one integer is length of the shared memory.
-- memory-region: Shared memory region to EC. A "shared-dma-pool". See
- ../reserved-memory/reserved-memory.txt for details.
-
-Example:
-
-{
- ...
-
- reserved_mem: reserved_mem {
- compatible = "shared-dma-pool";
- reg = <0 0x52800000 0 0x100000>;
- no-map;
- };
-}
-
-cros-ec@0 {
- compatible = "google,cros-ec-spi";
-
- ...
-
- cros_ec_codec: ec-codec {
- compatible = "google,cros-ec-codec";
- #sound-dai-cells = <1>;
- reg = <0x0 0x10500000 0x80000>;
- memory-region = <&reserved_mem>;
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
new file mode 100644
index 000000000000..c84e656afb0a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/google,cros-ec-codec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Audio codec controlled by ChromeOS EC
+
+maintainers:
+ - Cheng-Yi Chiang <cychiang@chromium.org>
+
+description: |
+ Google's ChromeOS EC codec is a digital mic codec provided by the
+ Embedded Controller (EC) and is controlled via a host-command interface.
+ An EC codec node should only be found as a sub-node of the EC node (see
+ Documentation/devicetree/bindings/mfd/cros-ec.txt).
+
+properties:
+ compatible:
+ const: google,cros-ec-codec
+
+ "#sound-dai-cells":
+ const: 1
+
+ reg:
+ items:
+ - description: |
+ Physical base address and length of shared memory region from EC.
+ It contains 3 unsigned 32-bit integer. The first 2 integers
+ combine to become an unsigned 64-bit physical address.
+ The last one integer is the length of the shared memory.
+
+ memory-region:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description: |
+ Shared memory region to EC. A "shared-dma-pool".
+ See ../reserved-memory/reserved-memory.txt for details.
+
+required:
+ - compatible
+ - '#sound-dai-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ reserved_mem: reserved-mem@52800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x52800000 0x100000>;
+ no-map;
+ };
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cros-ec@0 {
+ compatible = "google,cros-ec-spi";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ reg = <0>;
+ cros_ec_codec: ec-codec@10500000 {
+ compatible = "google,cros-ec-codec";
+ #sound-dai-cells = <1>;
+ reg = <0x0 0x10500000 0x80000>;
+ memory-region = <&reserved_mem>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/ingenic,aic.yaml b/Documentation/devicetree/bindings/sound/ingenic,aic.yaml
new file mode 100644
index 000000000000..44f49bebb267
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ingenic,aic.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/ingenic,aic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs AC97 / I2S Controller (AIC) DT bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+ $nodename:
+ pattern: '^audio-controller@'
+
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4740-i2s
+ - ingenic,jz4760-i2s
+ - ingenic,jz4770-i2s
+ - ingenic,jz4780-i2s
+ - items:
+ - const: ingenic,jz4725b-i2s
+ - const: ingenic,jz4740-i2s
+
+ '#sound-dai-cells':
+ const: 0
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: AIC clock
+ - description: I2S clock
+ - description: EXT clock
+ - description: PLL/2 clock
+
+ clock-names:
+ items:
+ - const: aic
+ - const: i2s
+ - const: ext
+ - const: pll half
+
+ dmas:
+ items:
+ - description: DMA controller phandle and request line for I2S RX
+ - description: DMA controller phandle and request line for I2S TX
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+ - '#sound-dai-cells'
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4740-cgu.h>
+ aic: audio-controller@10020000 {
+ compatible = "ingenic,jz4740-i2s";
+ reg = <0x10020000 0x38>;
+
+ #sound-dai-cells = <0>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <18>;
+
+ clocks = <&cgu JZ4740_CLK_AIC>,
+ <&cgu JZ4740_CLK_I2S>,
+ <&cgu JZ4740_CLK_EXT>,
+ <&cgu JZ4740_CLK_PLL_HALF>;
+ clock-names = "aic", "i2s", "ext", "pll half";
+
+ dmas = <&dmac 25 0xffffffff>, <&dmac 24 0xffffffff>;
+ dma-names = "rx", "tx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt b/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt
deleted file mode 100644
index b623d50004fb..000000000000
--- a/Documentation/devicetree/bindings/sound/ingenic,jz4740-i2s.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Ingenic JZ4740 I2S controller
-
-Required properties:
-- compatible : "ingenic,jz4740-i2s" or "ingenic,jz4780-i2s"
-- reg : I2S registers location and length
-- clocks : AIC and I2S PLL clock specifiers.
-- clock-names: "aic" and "i2s"
-- dmas: DMA controller phandle and DMA request line for I2S Tx and Rx channels
-- dma-names: Must be "tx" and "rx"
-
-Example:
-
-i2s: i2s@10020000 {
- compatible = "ingenic,jz4740-i2s";
- reg = <0x10020000 0x94>;
-
- clocks = <&cgu JZ4740_CLK_AIC>, <&cgu JZ4740_CLK_I2SPLL>;
- clock-names = "aic", "i2s";
-
- dmas = <&dma 2>, <&dma 3>;
- dma-names = "tx", "rx";
-
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
index b795d282818d..a8f2b0c56c79 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
@@ -18,6 +18,7 @@ Required properties:
* Headphone Jack
* Int Spk
* Mic Jack
+ * Int Mic
- nvidia,i2s-controller : The phandle of the Tegra I2S1 controller
- nvidia,audio-codec : The phandle of the WM8903 audio codec
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
index 38eaf0c028f9..a495d5fc0d23 100644
--- a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
@@ -139,6 +139,8 @@ required:
- "#address-cells"
- "#size-cells"
+additionalProperties: false
+
examples:
- |
codec@1,0{
diff --git a/Documentation/devicetree/bindings/sound/renesas,fsi.yaml b/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
index 140a37fc3c0b..d1b65554e681 100644
--- a/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
@@ -63,6 +63,8 @@ required:
- reg
- interrupts
+additionalProperties: false
+
examples:
- |
sh_fsi2: sound@ec230000 {
diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt b/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt
index 2469588c7ccb..1ecd75d2032a 100644
--- a/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt
@@ -10,6 +10,11 @@ Required properties:
- clock-names: should be "pclk".
- spk-depop-time-ms: speak depop time msec.
+Optional properties:
+
+- mute-gpios: GPIO specifier for external line driver control (typically the
+ dedicated GPIO_MUTE pin)
+
Example for rk3328 internal codec:
codec: codec@ff410000 {
@@ -18,6 +23,6 @@ codec: codec@ff410000 {
rockchip,grf = <&grf>;
clocks = <&cru PCLK_ACODEC>;
clock-names = "pclk";
+ mute-gpios = <&grf_gpio 0 GPIO_ACTIVE_LOW>;
spk-depop-time-ms = 100;
- status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
deleted file mode 100644
index 54aefab71f2c..000000000000
--- a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-* Rockchip I2S controller
-
-The I2S bus (Inter-IC sound bus) is a serial link for digital
-audio data transfer between devices in the system.
-
-Required properties:
-
-- compatible: should be one of the following:
- - "rockchip,rk3066-i2s": for rk3066
- - "rockchip,px30-i2s", "rockchip,rk3066-i2s": for px30
- - "rockchip,rk3036-i2s", "rockchip,rk3066-i2s": for rk3036
- - "rockchip,rk3188-i2s", "rockchip,rk3066-i2s": for rk3188
- - "rockchip,rk3228-i2s", "rockchip,rk3066-i2s": for rk3228
- - "rockchip,rk3288-i2s", "rockchip,rk3066-i2s": for rk3288
- - "rockchip,rk3328-i2s", "rockchip,rk3066-i2s": for rk3328
- - "rockchip,rk3366-i2s", "rockchip,rk3066-i2s": for rk3366
- - "rockchip,rk3368-i2s", "rockchip,rk3066-i2s": for rk3368
- - "rockchip,rk3399-i2s", "rockchip,rk3066-i2s": for rk3399
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: should contain the I2S interrupt.
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
- Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: should include "tx" and "rx".
-- clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names.
-- clock-names: should contain the following:
- - "i2s_hclk": clock for I2S BUS
- - "i2s_clk" : clock for I2S controller
-- rockchip,playback-channels: max playback channels, if not set, 8 channels default.
-- rockchip,capture-channels: max capture channels, if not set, 2 channels default.
-
-Required properties for controller which support multi channels
-playback/capture:
-
-- rockchip,grf: the phandle of the syscon node for GRF register.
-
-Example for rk3288 I2S controller:
-
-i2s@ff890000 {
- compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s";
- reg = <0xff890000 0x10000>;
- interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&pdma1 0>, <&pdma1 1>;
- dma-names = "tx", "rx";
- clock-names = "i2s_hclk", "i2s_clk";
- clocks = <&cru HCLK_I2S0>, <&cru SCLK_I2S0>;
- rockchip,playback-channels = <8>;
- rockchip,capture-channels = <2>;
-};
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml b/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
new file mode 100644
index 000000000000..7cd0e278ed85
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/rockchip-i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip I2S controller
+
+description:
+ The I2S bus (Inter-IC sound bus) is a serial link for digital
+ audio data transfer between devices in the system.
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ oneOf:
+ - const: rockchip,rk3066-i2s
+ - items:
+ - enum:
+ - rockchip,px30-i2s
+ - rockchip,rk3036-i2s
+ - rockchip,rk3188-i2s
+ - rockchip,rk3228-i2s
+ - rockchip,rk3288-i2s
+ - rockchip,rk3328-i2s
+ - rockchip,rk3366-i2s
+ - rockchip,rk3368-i2s
+ - rockchip,rk3399-i2s
+ - const: rockchip,rk3066-i2s
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: clock for I2S controller
+ - description: clock for I2S BUS
+
+ clock-names:
+ items:
+ - const: i2s_clk
+ - const: i2s_hclk
+
+ dmas:
+ items:
+ - description: TX DMA Channel
+ - description: RX DMA Channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ rockchip,capture-channels:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ default: 2
+ description:
+ Max capture channels, if not set, 2 channels default.
+
+ rockchip,playback-channels:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ default: 8
+ description:
+ Max playback channels, if not set, 8 channels default.
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The phandle of the syscon node for the GRF register.
+ Required property for controllers which support multi channel
+ playback/capture.
+
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+ - "#sound-dai-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3288-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2s@ff890000 {
+ compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s";
+ reg = <0xff890000 0x10000>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_I2S0>, <&cru HCLK_I2S0>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ dmas = <&pdma1 0>, <&pdma1 1>;
+ dma-names = "tx", "rx";
+ rockchip,capture-channels = <2>;
+ rockchip,playback-channels = <8>;
+ #sound-dai-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
index 30e927a28369..ade1ece8b45f 100644
--- a/Documentation/devicetree/bindings/sound/rt5682.txt
+++ b/Documentation/devicetree/bindings/sound/rt5682.txt
@@ -32,6 +32,18 @@ Optional properties:
The delay time is realtek,btndet-delay value multiple of 8.192 ms.
If absent, the default is 16.
+- #clock-cells : Should be set to '<1>', wclk and bclk sources provided.
+- clock-output-names : Name given for DAI clocks output.
+
+- clocks : phandle and clock specifier for codec MCLK.
+- clock-names : Clock name string for 'clocks' attribute, should be "mclk".
+
+- realtek,dmic-clk-rate-hz : Set the clock rate (hz) for the requirement of
+ the particular DMIC.
+
+- realtek,dmic-delay-ms : Set the delay time (ms) for the requirement of
+ the particular DMIC.
+
Pins on the device (for linking into audio routes) for RT5682:
* DMIC L1
@@ -53,4 +65,10 @@ rt5682 {
realtek,dmic1-clk-pin = <1>;
realtek,jd-src = <1>;
realtek,btndet-delay = <16>;
+
+ #clock-cells = <1>;
+ clock-output-names = "rt5682-dai-wclk", "rt5682-dai-bclk";
+
+ clocks = <&osc>;
+ clock-names = "mclk";
};
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.yaml b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
index c6b244352d05..8ff2d39e7d17 100644
--- a/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
@@ -69,6 +69,8 @@ required:
- cpu
- codec
+additionalProperties: false
+
examples:
- |
sound {
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.yaml b/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
index 53e3bad4178c..b2ad093d94df 100644
--- a/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
@@ -115,6 +115,8 @@ required:
- clocks
- clock-names
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/exynos-audss-clk.h>
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt b/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt
deleted file mode 100644
index cbf24bcd1b8d..000000000000
--- a/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-STMicroelectronics STM32 SPI/I2S Controller
-
-The SPI/I2S block supports I2S/PCM protocols when configured on I2S mode.
-Only some SPI instances support I2S.
-
-Required properties:
- - compatible: Must be "st,stm32h7-i2s"
- - reg: Offset and length of the device's register set.
- - interrupts: Must contain the interrupt line id.
- - clocks: Must contain phandle and clock specifier pairs for each entry
- in clock-names.
- - clock-names: Must contain "i2sclk", "pclk", "x8k" and "x11k".
- "i2sclk": clock which feeds the internal clock generator
- "pclk": clock which feeds the peripheral bus interface
- "x8k": I2S parent clock for sampling rates multiple of 8kHz.
- "x11k": I2S parent clock for sampling rates multiple of 11.025kHz.
- - dmas: DMA specifiers for tx and rx dma.
- See Documentation/devicetree/bindings/dma/stm32-dma.txt.
- - dma-names: Identifier for each DMA request line. Must be "tx" and "rx".
- - pinctrl-names: should contain only value "default"
- - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
-
-Optional properties:
- - resets: Reference to a reset controller asserting the reset controller
-
-The device node should contain one 'port' child node with one child 'endpoint'
-node, according to the bindings defined in Documentation/devicetree/bindings/
-graph.txt.
-
-Example:
-sound_card {
- compatible = "audio-graph-card";
- dais = <&i2s2_port>;
-};
-
-i2s2: audio-controller@40003800 {
- compatible = "st,stm32h7-i2s";
- reg = <0x40003800 0x400>;
- interrupts = <36>;
- clocks = <&rcc PCLK1>, <&rcc SPI2_CK>, <&rcc PLL1_Q>, <&rcc PLL2_P>;
- clock-names = "pclk", "i2sclk", "x8k", "x11k";
- dmas = <&dmamux2 2 39 0x400 0x1>,
- <&dmamux2 3 40 0x400 0x1>;
- dma-names = "rx", "tx";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2s2>;
-
- i2s2_port: port@0 {
- cpu_endpoint: endpoint {
- remote-endpoint = <&codec_endpoint>;
- format = "i2s";
- };
- };
-};
-
-audio-codec {
- codec_port: port@0 {
- codec_endpoint: endpoint {
- remote-endpoint = <&cpu_endpoint>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml b/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml
new file mode 100644
index 000000000000..f32410890589
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,stm32-i2s.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/st,stm32-i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 SPI/I2S Controller
+
+maintainers:
+ - Olivier Moysan <olivier.moysan@st.com>
+
+description:
+ The SPI/I2S block supports I2S/PCM protocols when configured on I2S mode.
+ Only some SPI instances support I2S.
+
+properties:
+ compatible:
+ enum:
+ - st,stm32h7-i2s
+
+ "#sound-dai-cells":
+ const: 0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: clock feeding the peripheral bus interface.
+ - description: clock feeding the internal clock generator.
+ - description: I2S parent clock for sampling rates multiple of 8kHz.
+ - description: I2S parent clock for sampling rates multiple of 11.025kHz.
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: i2sclk
+ - const: x8k
+ - const: x11k
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ items:
+ - description: audio capture DMA.
+ - description: audio playback DMA.
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#sound-dai-cells"
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - dmas
+ - dma-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ i2s2: audio-controller@4000b000 {
+ compatible = "st,stm32h7-i2s";
+ #sound-dai-cells = <0>;
+ reg = <0x4000b000 0x400>;
+ clocks = <&rcc SPI2>, <&rcc SPI2_K>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
+ clock-names = "pclk", "i2sclk", "x8k", "x11k";
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 39 0x400 0x01>,
+ <&dmamux1 40 0x400 0x01>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s2_pins_a>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
deleted file mode 100644
index ca9101777c44..000000000000
--- a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-STMicroelectronics STM32 S/PDIF receiver (SPDIFRX).
-
-The SPDIFRX peripheral, is designed to receive an S/PDIF flow compliant with
-IEC-60958 and IEC-61937.
-
-Required properties:
- - compatible: should be "st,stm32h7-spdifrx"
- - reg: cpu DAI IP base address and size
- - clocks: must contain an entry for kclk (used as S/PDIF signal reference)
- - clock-names: must contain "kclk"
- - interrupts: cpu DAI interrupt line
- - dmas: DMA specifiers for audio data DMA and iec control flow DMA
- See STM32 DMA bindings, Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
- - dma-names: two dmas have to be defined, "rx" and "rx-ctrl"
-
-Optional properties:
- - resets: Reference to a reset controller asserting the SPDIFRX
-
-The device node should contain one 'port' child node with one child 'endpoint'
-node, according to the bindings defined in Documentation/devicetree/bindings/
-graph.txt.
-
-Example:
-spdifrx: spdifrx@40004000 {
- compatible = "st,stm32h7-spdifrx";
- reg = <0x40004000 0x400>;
- clocks = <&rcc SPDIFRX_CK>;
- clock-names = "kclk";
- interrupts = <97>;
- dmas = <&dmamux1 2 93 0x400 0x0>,
- <&dmamux1 3 94 0x400 0x0>;
- dma-names = "rx", "rx-ctrl";
- pinctrl-0 = <&spdifrx_pins>;
- pinctrl-names = "default";
-
- spdifrx_port: port {
- cpu_endpoint: endpoint {
- remote-endpoint = <&codec_endpoint>;
- };
- };
-};
-
-spdif_in: spdif-in {
- compatible = "linux,spdif-dir";
-
- codec_port: port {
- codec_endpoint: endpoint {
- remote-endpoint = <&cpu_endpoint>;
- };
- };
-};
-
-soundcard {
- compatible = "audio-graph-card";
- dais = <&spdifrx_port>;
-};
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml
new file mode 100644
index 000000000000..b7f7dc452231
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/st,stm32-spdifrx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 S/PDIF receiver (SPDIFRX)
+
+maintainers:
+ - Olivier Moysan <olivier.moysan@st.com>
+
+description: |
+ The SPDIFRX peripheral, is designed to receive an S/PDIF flow compliant with
+ IEC-60958 and IEC-61937.
+
+properties:
+ compatible:
+ enum:
+ - st,stm32h7-spdifrx
+
+ "#sound-dai-cells":
+ const: 0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: kclk
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ items:
+ - description: audio data capture DMA
+ - description: IEC status bits capture DMA
+
+ dma-names:
+ items:
+ - const: rx
+ - const: rx-ctrl
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#sound-dai-cells"
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - dmas
+ - dma-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ spdifrx: spdifrx@40004000 {
+ compatible = "st,stm32h7-spdifrx";
+ #sound-dai-cells = <0>;
+ reg = <0x40004000 0x400>;
+ clocks = <&rcc SPDIF_K>;
+ clock-names = "kclk";
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 2 93 0x400 0x0>,
+ <&dmamux1 3 94 0x400 0x0>;
+ dma-names = "rx", "rx-ctrl";
+ pinctrl-0 = <&spdifrx_pins>;
+ pinctrl-names = "default";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/tas2562.txt b/Documentation/devicetree/bindings/sound/tas2562.txt
index 658e1fb18a99..94796b547184 100644
--- a/Documentation/devicetree/bindings/sound/tas2562.txt
+++ b/Documentation/devicetree/bindings/sound/tas2562.txt
@@ -8,7 +8,7 @@ real time monitoring of loudspeaker behavior.
Required properties:
- #address-cells - Should be <1>.
- #size-cells - Should be <0>.
- - compatible: - Should contain "ti,tas2562".
+ - compatible: - Should contain "ti,tas2562", "ti,tas2563".
- reg: - The i2c address. Should be 0x4c, 0x4d, 0x4e or 0x4f.
- ti,imon-slot-no:- TDM TX current sense time slot.
diff --git a/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml b/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
new file mode 100644
index 000000000000..ab2268c0ee67
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause)
+# Copyright (C) 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/tlv320adcx140.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments TLV320ADCX140 Quad Channel Analog-to-Digital Converter
+
+maintainers:
+ - Dan Murphy <dmurphy@ti.com>
+
+description: |
+ The TLV320ADCX140 are multichannel (4-ch analog recording or 8-ch digital
+ PDM microphones recording), high-performance audio, analog-to-digital
+ converter (ADC) with analog inputs supporting up to 2V RMS. The TLV320ADCX140
+ family supports line and microphone Inputs, and offers a programmable
+ microphone bias or supply voltage generation.
+
+ Specifications can be found at:
+ http://www.ti.com/lit/ds/symlink/tlv320adc3140.pdf
+ http://www.ti.com/lit/ds/symlink/tlv320adc5140.pdf
+ http://www.ti.com/lit/ds/symlink/tlv320adc6140.pdf
+
+properties:
+ compatible:
+ oneOf:
+ - const: ti,tlv320adc3140
+ - const: ti,tlv320adc5140
+ - const: ti,tlv320adc6140
+
+ reg:
+ maxItems: 1
+ description: |
+ I2C addresss of the device can be one of these 0x4c, 0x4d, 0x4e or 0x4f
+
+ reset-gpios:
+ description: |
+ GPIO used for hardware reset.
+
+ areg-supply:
+ description: |
+ Regulator with AVDD at 3.3V. If not defined then the internal regulator
+ is enabled.
+
+ ti,mic-bias-source:
+ description: |
+ Indicates the source for MIC Bias.
+ 0 - Mic bias is set to VREF
+ 1 - Mic bias is set to VREF × 1.096
+ 6 - Mic bias is set to AVDD
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 6]
+
+ ti,vref-source:
+ description: |
+ Indicates the source for MIC Bias.
+ 0 - Set VREF to 2.75V
+ 1 - Set VREF to 2.5V
+ 2 - Set VREF to 1.375V
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2]
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ codec: codec@4c {
+ compatible = "ti,tlv320adc5140";
+ reg = <0x4c>;
+ ti,mic-bias-source = <6>;
+ reset-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
index 0565dc49e449..243a6b1e66ea 100644
--- a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
@@ -18,9 +18,14 @@ properties:
"#size-cells": true
compatible:
- enum:
- - allwinner,sun6i-a31-spi
- - allwinner,sun8i-h3-spi
+ oneOf:
+ - const: allwinner,sun6i-a31-spi
+ - const: allwinner,sun8i-h3-spi
+ - items:
+ - enum:
+ - allwinner,sun8i-r40-spi
+ - allwinner,sun50i-h6-spi
+ - const: allwinner,sun8i-h3-spi
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.txt b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.txt
deleted file mode 100644
index 1d64b61f5171..000000000000
--- a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Qualcomm Quad Serial Peripheral Interface (QSPI)
-
-The QSPI controller allows SPI protocol communication in single, dual, or quad
-wire transmission modes for read/write access to slaves such as NOR flash.
-
-Required properties:
-- compatible: An SoC specific identifier followed by "qcom,qspi-v1", such as
- "qcom,sdm845-qspi", "qcom,qspi-v1"
-- reg: Should contain the base register location and length.
-- interrupts: Interrupt number used by the controller.
-- clocks: Should contain the core and AHB clock.
-- clock-names: Should be "core" for core clock and "iface" for AHB clock.
-
-SPI slave nodes must be children of the SPI master node and can contain
-properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Example:
-
- qspi: spi@88df000 {
- compatible = "qcom,sdm845-qspi", "qcom,qspi-v1";
- reg = <0x88df000 0x600>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "iface", "core";
- clocks = <&gcc GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
- <&gcc GCC_QSPI_CORE_CLK>;
-
- flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <25000000>;
- spi-tx-bus-width = <2>;
- spi-rx-bus-width = <2>;
- };
- };
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
new file mode 100644
index 000000000000..0cf470eaf2a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/spi/qcom,spi-qcom-qspi.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Quad Serial Peripheral Interface (QSPI)
+
+maintainers:
+ - Mukesh Savaliya <msavaliy@codeaurora.org>
+ - Akash Asthana <akashast@codeaurora.org>
+
+description:
+ The QSPI controller allows SPI protocol communication in single, dual, or quad
+ wire transmission modes for read/write access to slaves such as NOR flash.
+
+allOf:
+ - $ref: /spi/spi-controller.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: qcom,sdm845-qspi
+ - const: qcom,qspi-v1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: iface
+ - const: core
+
+ clocks:
+ items:
+ - description: AHB clock
+ - description: QSPI core clock
+
+ interconnects:
+ minItems: 1
+ maxItems: 2
+
+ interconnect-names:
+ items:
+ - const: qspi-config
+ - const: qspi-memory
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clock-names
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc: soc@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ qspi: spi@88df000 {
+ compatible = "qcom,sdm845-qspi", "qcom,qspi-v1";
+ reg = <0 0x88df000 0 0x600>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "iface", "core";
+ clocks = <&gcc GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <&gcc GCC_QSPI_CORE_CLK>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ spi-tx-bus-width = <2>;
+ spi-rx-bus-width = <2>;
+ };
+
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml b/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
index 222990f9923c..930188bc5e6a 100644
--- a/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
+++ b/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml
@@ -43,6 +43,9 @@ properties:
'#size-cells':
const: 1
+ ranges:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -51,9 +54,12 @@ required:
- clock-names
- '#address-cells'
- '#size-cells'
+ - ranges
+
+additionalProperties: false
patternProperties:
- "^.+-sram$":
+ "-sram@[0-9a-f]+$":
type: object
description: A region of reserved memory.
@@ -61,12 +67,8 @@ patternProperties:
reg:
maxItems: 1
- ranges:
- maxItems: 1
-
required:
- reg
- - ranges
examples:
- |
@@ -88,9 +90,9 @@ examples:
#address-cells = <1>;
#size-cells = <1>;
+ ranges = <0 0xfec00000 0x100000>;
gmu-sram@0 {
reg = <0x0 0x100000>;
- ranges = <0 0 0xfec00000 0x100000>;
};
};
diff --git a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
index f761681e4c0d..e43ec50bda37 100644
--- a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
@@ -32,6 +32,8 @@ properties:
description: phandle to the ao-secure syscon
$ref: '/schemas/types.yaml#/definitions/phandle'
+ '#thermal-sensor-cells':
+ const: 0
required:
- compatible
@@ -40,6 +42,8 @@ required:
- clocks
- amlogic,ao-secure
+additionalProperties: false
+
examples:
- |
cpu_temp: temperature-sensor@ff634800 {
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
index f3b441100890..b0bee7e42038 100644
--- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -12,7 +12,7 @@ Required properties:
Note: these bindings are deprecated for AP806/CP110 and should instead
follow the rules described in:
-Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
+Documentation/devicetree/bindings/arm/marvell/ap80x-system-controller.txt
Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
- reg: Device's register space.
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index eef13b9446a8..a57b76ad7dea 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -47,19 +47,30 @@ properties:
- description: TM registers
- description: SROT registers
+ interrupts:
+ minItems: 1
+ items:
+ - description: Combined interrupt if upper or lower threshold crossed
+ - description: Interrupt if critical threshold crossed
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: uplow
+ - const: critical
+
nvmem-cells:
minItems: 1
maxItems: 2
description:
Reference to an nvmem node for the calibration data
- nvmem-cells-names:
+ nvmem-cell-names:
minItems: 1
maxItems: 2
items:
- - enum:
- - caldata
- - calsel
+ - const: calib
+ - const: calib_sel
"#qcom,sensors":
allOf:
@@ -90,22 +101,16 @@ allOf:
then:
properties:
interrupts:
- items:
- - description: Combined interrupt if upper or lower threshold crossed
+ maxItems: 1
interrupt-names:
- items:
- - const: uplow
+ maxItems: 1
else:
properties:
interrupts:
- items:
- - description: Combined interrupt if upper or lower threshold crossed
- - description: Interrupt if critical threshold crossed
+ minItems: 2
interrupt-names:
- items:
- - const: uplow
- - const: critical
+ minItems: 2
required:
- compatible
@@ -115,6 +120,8 @@ required:
- interrupt-names
- "#thermal-sensor-cells"
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -125,7 +132,7 @@ examples:
<0x4a8000 0x1000>; /* SROT */
nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
- nvmem-cell-names = "caldata", "calsel";
+ nvmem-cell-names = "calib", "calib_sel";
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "uplow";
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
deleted file mode 100644
index 196112d23b1e..000000000000
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-* Renesas R-Car Thermal
-
-Required properties:
-- compatible : "renesas,thermal-<soctype>",
- "renesas,rcar-gen2-thermal" (with thermal-zone) or
- "renesas,rcar-thermal" (without thermal-zone) as
- fallback except R-Car V3M/E3/D3 and RZ/G2E.
- Examples with soctypes are:
- - "renesas,thermal-r8a73a4" (R-Mobile APE6)
- - "renesas,thermal-r8a7743" (RZ/G1M)
- - "renesas,thermal-r8a7744" (RZ/G1N)
- - "renesas,thermal-r8a774c0" (RZ/G2E)
- - "renesas,thermal-r8a7779" (R-Car H1)
- - "renesas,thermal-r8a7790" (R-Car H2)
- - "renesas,thermal-r8a7791" (R-Car M2-W)
- - "renesas,thermal-r8a7792" (R-Car V2H)
- - "renesas,thermal-r8a7793" (R-Car M2-N)
- - "renesas,thermal-r8a77970" (R-Car V3M)
- - "renesas,thermal-r8a77990" (R-Car E3)
- - "renesas,thermal-r8a77995" (R-Car D3)
-- reg : Address range of the thermal registers.
- The 1st reg will be recognized as common register
- if it has "interrupts".
-
-Option properties:
-
-- interrupts : If present should contain 3 interrupts for
- R-Car V3M/E3/D3 and RZ/G2E or 1 interrupt otherwise.
-
-Example (non interrupt support):
-
-thermal@ffc48000 {
- compatible = "renesas,thermal-r8a7779", "renesas,rcar-thermal";
- reg = <0xffc48000 0x38>;
-};
-
-Example (interrupt support):
-
-thermal@e61f0000 {
- compatible = "renesas,thermal-r8a73a4", "renesas,rcar-thermal";
- reg = <0xe61f0000 0x14
- 0xe61f0100 0x38
- 0xe61f0200 0x38
- 0xe61f0300 0x38>;
- interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
-};
-
-Example (with thermal-zone):
-
-thermal-zones {
- cpu_thermal: cpu-thermal {
- polling-delay-passive = <1000>;
- polling-delay = <5000>;
-
- thermal-sensors = <&thermal>;
-
- trips {
- cpu-crit {
- temperature = <115000>;
- hysteresis = <0>;
- type = "critical";
- };
- };
- cooling-maps {
- };
- };
-};
-
-thermal: thermal@e61f0000 {
- compatible = "renesas,thermal-r8a7790",
- "renesas,rcar-gen2-thermal",
- "renesas,rcar-thermal";
- reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
- interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
- power-domains = <&cpg_clocks>;
- #thermal-sensor-cells = <0>;
-};
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
new file mode 100644
index 000000000000..d2f4f1b063ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
@@ -0,0 +1,139 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2020 Renesas Electronics Corp.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/rcar-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car Thermal
+
+maintainers:
+ - Niklas Söderlund <niklas.soderlund@ragnatech.se>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,thermal-r8a73a4 # R-Mobile APE6
+ - renesas,thermal-r8a7779 # R-Car H1
+ - const: renesas,rcar-thermal # Generic without thermal-zone
+ - items:
+ - enum:
+ - renesas,thermal-r8a7743 # RZ/G1M
+ - renesas,thermal-r8a7744 # RZ/G1N
+ - const: renesas,rcar-gen2-thermal # Generic thermal-zone
+ - items:
+ - enum:
+ - renesas,thermal-r8a7790 # R-Car H2
+ - renesas,thermal-r8a7791 # R-Car M2-W
+ - renesas,thermal-r8a7792 # R-Car V2H
+ - renesas,thermal-r8a7793 # R-Car M2-N
+ - const: renesas,rcar-gen2-thermal # Generic thermal-zone
+ - const: renesas,rcar-thermal # Generic without thermal-zone
+ - items:
+ - enum:
+ - renesas,thermal-r8a774c0 # RZ/G2E
+ - renesas,thermal-r8a77970 # R-Car V3M
+ - renesas,thermal-r8a77990 # R-Car E3
+ - renesas,thermal-r8a77995 # R-Car D3
+ reg:
+ description:
+ Address ranges of the thermal registers. If more then one range is given
+ the first one must be the common registers followed by each sensor
+ according the the datasheet.
+ minItems: 1
+ maxItems: 4
+
+ interrupts:
+ minItems: 1
+ maxItems: 3
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,thermal-r8a73a4 # R-Mobile APE6
+ - renesas,thermal-r8a7779 # R-Car H1
+then:
+ required:
+ - compatible
+ - reg
+else:
+ required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+ - resets
+
+examples:
+ # Example (non interrupt support)
+ - |
+ thermal@ffc48000 {
+ compatible = "renesas,thermal-r8a7779", "renesas,rcar-thermal";
+ reg = <0xffc48000 0x38>;
+ };
+
+ # Example (interrupt support)
+ - |
+ #include <dt-bindings/clock/r8a73a4-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ thermal@e61f0000 {
+ compatible = "renesas,thermal-r8a73a4", "renesas,rcar-thermal";
+ reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>,
+ <0 0xe61f0200 0 0x38>, <0 0xe61f0300 0 0x38>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks R8A73A4_CLK_THERMAL>;
+ power-domains = <&pd_c5>;
+ };
+
+ # Example (with thermal-zone)
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ thermal: thermal@e61f0000 {
+ compatible = "renesas,thermal-r8a7790",
+ "renesas,rcar-gen2-thermal",
+ "renesas,rcar-thermal";
+ reg = <0 0xe61f0000 0 0x10>, <0 0xe61f0100 0 0x38>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 522>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+
+ thermal-sensors = <&thermal>;
+
+ trips {
+ cpu-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ cooling-maps {
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
index 6deead07728e..fa255672e8e5 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
@@ -82,6 +82,8 @@ properties:
required:
- compatible
+additionalProperties: false
+
oneOf:
- required:
- interrupts
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
index 102f319833d9..582bbef62b95 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
@@ -32,6 +32,8 @@ properties:
'#size-cells':
const: 1
+ ranges: true
+
clock-frequency:
description: The frequency of the main counter, in Hz. Should be present
only where necessary to work around broken firmware which does not configure
@@ -93,6 +95,8 @@ required:
- '#address-cells'
- '#size-cells'
+additionalProperties: false
+
examples:
- |
timer@f0000000 {
diff --git a/Documentation/devicetree/bindings/timer/arm,global_timer.yaml b/Documentation/devicetree/bindings/timer/arm,global_timer.yaml
index 21c24a8e28fd..4956c8f409d2 100644
--- a/Documentation/devicetree/bindings/timer/arm,global_timer.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,global_timer.yaml
@@ -35,6 +35,8 @@ required:
- reg
- clocks
+additionalProperties: false
+
examples:
- |
timer@2c000600 {
diff --git a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
index 2807225db902..1a721d8af67a 100644
--- a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
@@ -32,6 +32,8 @@ required:
- reg
- interrupts
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
index 273e359854dd..37bd01a62c52 100644
--- a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
+++ b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
@@ -52,6 +52,8 @@ required:
- interrupts
- reg
+additionalProperties: false
+
examples:
- |
// In this example, the IP contains two local timers, using separate
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 330cab25cc92..4165352a590a 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -34,10 +34,6 @@ properties:
- adi,adt7461
# +/-1C TDM Extended Temp Range I.C
- adt7461
- # Three-Axis Digital Accelerometer
- - adi,adxl345
- # Three-Axis Digital Accelerometer (backward-compatibility value "adi,adxl345" must be listed too)
- - adi,adxl346
# AMS iAQ-Core VOC Sensor
- ams,iaq-core
# i2c serial eeprom (24cxx)
@@ -367,4 +363,6 @@ required:
- compatible
- reg
+additionalProperties: false
+
...
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index 6baf00e7d0a9..0d6d850a7f17 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -32,20 +32,19 @@ properties:
- const: lantiq,arx100-usb
- const: lantiq,xrx200-usb
- items:
- - const: amlogic,meson8-usb
- - const: snps,dwc2
- - items:
- - const: amlogic,meson8b-usb
- - const: snps,dwc2
- - const: amlogic,meson-gxbb-usb
- - items:
- - const: amlogic,meson-g12a-usb
+ - enum:
+ - amlogic,meson8-usb
+ - amlogic,meson8b-usb
+ - amlogic,meson-gxbb-usb
+ - amlogic,meson-g12a-usb
- const: snps,dwc2
- const: amcc,dwc-otg
- const: snps,dwc2
- const: st,stm32f4x9-fsotg
- const: st,stm32f4x9-hsotg
- const: st,stm32f7-hsotg
+ - const: st,stm32mp15-fsotg
+ - const: st,stm32mp15-hsotg
- const: samsung,s3c6400-hsotg
reg:
@@ -91,6 +90,10 @@ properties:
vusb_a-supply:
description: phandle to voltage regulator of analog section.
+ vusb33d-supply:
+ description: reference to the VBUS and ID sensing comparators supply, in
+ order to perform OTG operation, used on STM32MP15 SoCs.
+
dr_mode:
enum: [host, peripheral, otg]
diff --git a/Documentation/devicetree/bindings/usb/exynos-usb.txt b/Documentation/devicetree/bindings/usb/exynos-usb.txt
index 66c394f9e11f..6aae1544f240 100644
--- a/Documentation/devicetree/bindings/usb/exynos-usb.txt
+++ b/Documentation/devicetree/bindings/usb/exynos-usb.txt
@@ -78,7 +78,14 @@ Required properties:
- ranges: allows valid 1:1 translation between child's address space and
parent's address space
- clocks: Clock IDs array as required by the controller.
- - clock-names: names of clocks correseponding to IDs in the clock property
+ - clock-names: Names of clocks corresponding to IDs in the clock property.
+ Following clock names shall be provided for different
+ compatibles:
+ - samsung,exynos5250-dwusb3: "usbdrd30",
+ - samsung,exynos5433-dwusb3: "aclk", "susp_clk", "pipe_pclk",
+ "phyclk",
+ - samsung,exynos7-dwusb3: "usbdrd30", "usbdrd30_susp_clk",
+ "usbdrd30_axius_clk"
- vdd10-supply: 1.0V powr supply
- vdd33-supply: 3.0V/3.3V power supply
diff --git a/Documentation/devicetree/bindings/usb/fcs,fusb302.txt b/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
index ba2e32d500c0..60e4654297af 100644
--- a/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
+++ b/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
@@ -9,7 +9,7 @@ Required sub-node:
- connector : The "usb-c-connector" attached to the FUSB302 IC. The bindings
of the connector node are specified in:
- Documentation/devicetree/bindings/connector/usb-connector.txt
+ Documentation/devicetree/bindings/connector/usb-connector.yaml
Example:
diff --git a/Documentation/devicetree/bindings/usb/generic.txt b/Documentation/devicetree/bindings/usb/generic.txt
index 67c51759a642..ba472e7aefc9 100644
--- a/Documentation/devicetree/bindings/usb/generic.txt
+++ b/Documentation/devicetree/bindings/usb/generic.txt
@@ -34,7 +34,7 @@ Optional properties:
- usb-role-switch: boolean, indicates that the device is capable of assigning
the USB data role (USB host or USB device) for a given
USB connector, such as Type-C, Type-B(micro).
- see connector/usb-connector.txt.
+ see connector/usb-connector.yaml.
- role-switch-default-mode: indicating if usb-role-switch is enabled, the
device default operation mode of controller while usb
role is USB_ROLE_NONE. Valid arguments are "host" and
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
index e0ae6096f7ac..a82ca438aec1 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
@@ -34,7 +34,7 @@ Optional properties:
dual-role mode.
it's considered valid for compatibility reasons, not allowed for
new bindings, and put into a usb-connector node.
- see connector/usb-connector.txt.
+ see connector/usb-connector.yaml.
- pinctrl-names : a pinctrl state named "default" is optional, and need be
defined if auto drd switch is enabled, that means the property dr_mode
is set as "otg", and meanwhile the property "mediatek,enable-manual-drd"
diff --git a/Documentation/devicetree/bindings/usb/mediatek,musb.txt b/Documentation/devicetree/bindings/usb/mediatek,musb.txt
index 2b8a87c90d9e..5eedb0296562 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,musb.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,musb.txt
@@ -23,7 +23,7 @@ Optional properties:
MTCMOS
Required child nodes:
- usb connector node as defined in bindings/connector/usb-connector.txt
+ usb connector node as defined in bindings/connector/usb-connector.yaml
Optional properties:
- id-gpios : input GPIO for USB ID pin.
- vbus-gpios : input GPIO for USB VBUS pin.
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
new file mode 100644
index 000000000000..b84ed8ee8cfc
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
@@ -0,0 +1,190 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/usb/nvidia,tegra-xudc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Device tree binding for NVIDIA Tegra XUSB device mode controller (XUDC)
+
+description:
+ The Tegra XUDC controller supports both USB 2.0 HighSpeed/FullSpeed and
+ USB 3.0 SuperSpeed protocols.
+
+maintainers:
+ - Nagarjuna Kristam <nkristam@nvidia.com>
+ - JC Kuo <jckuo@nvidia.com>
+ - Thierry Reding <treding@nvidia.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - nvidia,tegra210-xudc # For Tegra210
+ - nvidia,tegra186-xudc # For Tegra186
+
+ reg:
+ minItems: 2
+ maxItems: 3
+ items:
+ - description: XUSB device controller registers
+ - description: XUSB device PCI Config registers
+ - description: XUSB device registers.
+
+ reg-names:
+ minItems: 2
+ maxItems: 3
+ items:
+ - const: base
+ - const: fpci
+ - const: ipfs
+
+ interrupts:
+ maxItems: 1
+ description: Must contain the XUSB device interrupt.
+
+ clocks:
+ minItems: 4
+ maxItems: 5
+ items:
+ - description: Clock to enable core XUSB dev clock.
+ - description: Clock to enable XUSB super speed clock.
+ - description: Clock to enable XUSB super speed dev clock.
+ - description: Clock to enable XUSB high speed dev clock.
+ - description: Clock to enable XUSB full speed dev clock.
+
+ clock-names:
+ minItems: 4
+ maxItems: 5
+ items:
+ - const: dev
+ - const: ss
+ - const: ss_src
+ - const: fs_src
+ - const: hs_src
+
+ power-domains:
+ maxItems: 2
+ items:
+ - description: XUSBB(device) power-domain
+ - description: XUSBA(superspeed) power-domain
+
+ power-domain-names:
+ maxItems: 2
+ items:
+ - const: dev
+ - const: ss
+
+ nvidia,xusb-padctl:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ phandle to the XUSB pad controller that is used to configure the USB pads
+ used by the XUDC controller.
+
+ phys:
+ minItems: 1
+ description:
+ Must contain an entry for each entry in phy-names.
+ See ../phy/phy-bindings.txt for details.
+
+ phy-names:
+ minItems: 1
+ items:
+ - const: usb2-0
+ - const: usb2-1
+ - const: usb2-2
+ - const: usb2-3
+ - const: usb3-0
+ - const: usb3-1
+ - const: usb3-2
+ - const: usb3-3
+
+ avddio-usb-supply:
+ description: PCIe/USB3 analog logic power supply. Must supply 1.05 V.
+
+ hvdd-usb-supply:
+ description: USB controller power supply. Must supply 3.3 V.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+ - power-domain-names
+ - nvidia,xusb-padctl
+ - phys
+ - phy-names
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra210-xudc
+ then:
+ properties:
+ reg:
+ minItems: 3
+ reg-names:
+ minItems: 3
+ clocks:
+ minItems: 5
+ clock-names:
+ minItems: 5
+ required:
+ - avddio-usb-supply
+ - hvdd-usb-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra186-xudc
+ then:
+ properties:
+ reg:
+ maxItems: 2
+ reg-names:
+ maxItems: 2
+ clocks:
+ maxItems: 4
+ clock-names:
+ maxItems: 4
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra210-car.h>
+ #include <dt-bindings/gpio/tegra-gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ usb@700d0000 {
+ compatible = "nvidia,tegra210-xudc";
+ reg = <0x0 0x700d0000 0x0 0x8000>,
+ <0x0 0x700d8000 0x0 0x1000>,
+ <0x0 0x700d9000 0x0 0x1000>;
+ reg-names = "base", "fpci", "ipfs";
+
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&tegra_car TEGRA210_CLK_XUSB_DEV>,
+ <&tegra_car TEGRA210_CLK_XUSB_SS>,
+ <&tegra_car TEGRA210_CLK_XUSB_SSP_SRC>,
+ <&tegra_car TEGRA210_CLK_XUSB_FS_SRC>,
+ <&tegra_car TEGRA210_CLK_XUSB_HS_SRC>;
+ clock-names = "dev", "ss", "ss_src", "fs_src", "hs_src";
+
+ power-domains = <&pd_xusbdev>, <&pd_xusbss>;
+ power-domain-names = "dev", "ss";
+
+ nvidia,xusb-padctl = <&padctl>;
+
+ phys = <&micro_b>;
+ phy-names = "usb2-0";
+
+ avddio-usb-supply = <&vdd_pex_1v05>;
+ hvdd-usb-supply = <&vdd_3v3_sys>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
index e3fc57e605ed..6f8115db2ea9 100644
--- a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
+++ b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
@@ -9,7 +9,7 @@ Required properties:
Required sub-node:
- connector: The "usb-c-connector" attached to the tcpci chip, the bindings
of connector node are specified in
- Documentation/devicetree/bindings/connector/usb-connector.txt
+ Documentation/devicetree/bindings/connector/usb-connector.yaml
Example :
rt1711h@4e {
diff --git a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt
index 25780e945b15..2bd21b22ce95 100644
--- a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt
+++ b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt
@@ -9,7 +9,7 @@ Required sub-node:
- connector: The "usb-c-connector" attached to the hd3ss3220 chip. The
bindings of the connector node are specified in:
- Documentation/devicetree/bindings/connector/usb-connector.txt
+ Documentation/devicetree/bindings/connector/usb-connector.yaml
Example:
hd3ss3220@47 {
diff --git a/Documentation/devicetree/bindings/usb/typec-tcpci.txt b/Documentation/devicetree/bindings/usb/typec-tcpci.txt
index 0dd1469e7318..2082522b1c32 100644
--- a/Documentation/devicetree/bindings/usb/typec-tcpci.txt
+++ b/Documentation/devicetree/bindings/usb/typec-tcpci.txt
@@ -13,7 +13,7 @@ Required properties:
Required sub-node:
- connector: The "usb-c-connector" attached to the tcpci chip, the bindings
of connector node are specified in
- Documentation/devicetree/bindings/connector/usb-connector.txt
+ Documentation/devicetree/bindings/connector/usb-connector.yaml
Example:
diff --git a/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt b/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt
index 3d05ae56cb0d..ec80641208a5 100644
--- a/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt
+++ b/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt
@@ -8,11 +8,11 @@ Required properties:
- compatible : should include "gpio-usb-b-connector" and "usb-b-connector".
- id-gpios, vbus-gpios : input gpios, either one of them must be present,
and both can be present as well.
- see connector/usb-connector.txt
+ see connector/usb-connector.yaml
Optional properties:
- vbus-supply : can be present if needed when supports dual role mode.
- see connector/usb-connector.txt
+ see connector/usb-connector.yaml
- Sub-nodes:
- port : can be present.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index fba343fa0205..d3891386d671 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -141,6 +141,8 @@ patternProperties:
description: Shenzhen AZW Technology Co., Ltd.
"^bananapi,.*":
description: BIPAI KEJI LIMITED
+ "^beacon,.*":
+ description: Compass Electronics Group, LLC
"^bhf,.*":
description: Beckhoff Automation GmbH & Co. KG
"^bitmain,.*":
@@ -235,6 +237,8 @@ patternProperties:
description: DataImage, Inc.
"^davicom,.*":
description: DAVICOM Semiconductor, Inc.
+ "^dell,.*":
+ description: Dell Inc.
"^delta,.*":
description: Delta Electronics, Inc.
"^denx,.*":
@@ -289,6 +293,8 @@ patternProperties:
description: Elan Microelectronic Corp.
"^elgin,.*":
description: Elgin S/A.
+ "^elida,.*":
+ description: Shenzhen Elida Technology Co., Ltd.
"^embest,.*":
description: Shenzhen Embest Technology Co., Ltd.
"^emlid,.*":
@@ -301,6 +307,8 @@ patternProperties:
description: emtrion GmbH
"^endless,.*":
description: Endless Mobile, Inc.
+ "^ene,.*":
+ description: ENE Technology, Inc.
"^energymicro,.*":
description: Silicon Laboratories (formerly Energy Micro AS)
"^engicam,.*":
@@ -341,12 +349,16 @@ patternProperties:
description: Fastrax Oy
"^fcs,.*":
description: Fairchild Semiconductor
+ "^feixin,.*":
+ description: Shenzhen Feixin Photoelectic Co., Ltd
"^feiyang,.*":
description: Shenzhen Fly Young Technology Co.,LTD.
"^firefly,.*":
description: Firefly
"^focaltech,.*":
description: FocalTech Systems Co.,Ltd
+ "^frida,.*":
+ description: Shenzhen Frida LCD Co., Ltd.
"^friendlyarm,.*":
description: Guangzhou FriendlyARM Computer Tech Co., Ltd
"^fsl,.*":
@@ -425,6 +437,8 @@ patternProperties:
description: Shenzhen Hugsun Technology Co. Ltd.
"^hwacom,.*":
description: HwaCom Systems Inc.
+ "^hydis,.*":
+ description: Hydis Technologies
"^hyundai,.*":
description: Hyundai Technology
"^i2se,.*":
@@ -473,6 +487,8 @@ patternProperties:
description: Intersil
"^issi,.*":
description: Integrated Silicon Solutions Inc.
+ "^ite,.*":
+ description: ITE Tech, Inc.
"^itead,.*":
description: ITEAD Intelligent Systems Co.Ltd
"^iwave,.*":
@@ -549,6 +565,8 @@ patternProperties:
description: LinkSprite Technologies, Inc.
"^linksys,.*":
description: Belkin International, Inc. (Linksys)
+ "^linutronix,.*":
+ description: Linutronix GmbH
"^linux,.*":
description: Linux-specific binding
"^linx,.*":
@@ -557,6 +575,8 @@ patternProperties:
description: Linear Technology Corporation
"^logicpd,.*":
description: Logic PD, Inc.
+ "^logictechno,.*":
+ description: Logic Technologies Limited
"^longcheer,.*":
description: Longcheer Technology (Shanghai) Co., Ltd.
"^loongson,.*":
@@ -633,6 +653,9 @@ patternProperties:
description: Monolithic Power Systems Inc.
"^mqmaker,.*":
description: mqmaker Inc.
+ "^mrvl,.*":
+ description: Marvell Technology Group Ltd.
+ deprecated: true
"^mscc,.*":
description: Microsemi Corporation
"^msi,.*":
@@ -663,6 +686,8 @@ patternProperties:
description: Netron DY
"^netxeon,.*":
description: Shenzhen Netxeon Technology CO., LTD
+ "^neweast,.*":
+ description: Guangdong Neweast Optoelectronics CO., LTD
"^nexbox,.*":
description: Nexbox
"^nextthing,.*":
@@ -731,6 +756,8 @@ patternProperties:
description: OmniVision Technologies
"^oxsemi,.*":
description: Oxford Semiconductor, Ltd.
+ "^ozzmaker,.*":
+ description: OzzMaker
"^panasonic,.*":
description: Panasonic Corporation
"^parade,.*":
@@ -765,6 +792,8 @@ patternProperties:
description: Broadcom Corporation (formerly PLX Technology)
"^pni,.*":
description: PNI Sensor Corporation
+ "^pocketbook,.*":
+ description: PocketBook International SA
"^polaroid,.*":
description: Polaroid Corporation
"^portwell,.*":
@@ -863,6 +892,8 @@ patternProperties:
description: Small Form Factor Committee
"^sgd,.*":
description: Solomon Goldentek Display Corporation
+ "^sgmicro,.*":
+ description: SG Micro Corp
"^sgx,.*":
description: SGX Sensortech
"^sharp,.*":
@@ -986,6 +1017,8 @@ patternProperties:
"^toppoly,.*":
description: TPO (deprecated, use tpo)
deprecated: true
+ "^topwise,.*":
+ description: Topwise Communication Co., Ltd.
"^toradex,.*":
description: Toradex AG
"^toshiba,.*":
@@ -1056,6 +1089,8 @@ patternProperties:
description: Vision Optical Technology Co., Ltd.
"^vxt,.*":
description: VXT Ltd
+ "^waveshare,.*":
+ description: Waveshare Electronics
"^wd,.*":
description: Western Digital Corp.
"^wetek,.*":
@@ -1082,6 +1117,8 @@ patternProperties:
description: X-Powers
"^xes,.*":
description: Extreme Engineering Solutions (X-ES)
+ "^xiaomi,.*":
+ description: Xiaomi Technology Co., Ltd.
"^xillybus,.*":
description: Xillybus Ltd.
"^xinpeng,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/st,stpmic1-wdt.txt b/Documentation/devicetree/bindings/watchdog/st,stpmic1-wdt.txt
deleted file mode 100644
index 7cc1407f15cb..000000000000
--- a/Documentation/devicetree/bindings/watchdog/st,stpmic1-wdt.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-STMicroelectronics STPMIC1 Watchdog
-
-Required properties:
-
-- compatible : should be "st,stpmic1-wdt"
-
-Example:
-
-watchdog {
- compatible = "st,stpmic1-wdt";
-};
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index 871922529332..9809f593c0ab 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -416,7 +416,7 @@ The preferred way to set up the helpers is to fill in the
struct gpio_irq_chip inside struct gpio_chip before adding the gpio_chip.
If you do this, the additional irq_chip will be set up by gpiolib at the
same time as setting up the rest of the GPIO functionality. The following
-is a typical example of a cascaded interrupt handler using gpio_irq_chip::
+is a typical example of a cascaded interrupt handler using gpio_irq_chip:
.. code-block:: c
@@ -453,7 +453,7 @@ is a typical example of a cascaded interrupt handler using gpio_irq_chip::
return devm_gpiochip_add_data(dev, &g->gc, g);
The helper support using hierarchical interrupt controllers as well.
-In this case the typical set-up will look like this::
+In this case the typical set-up will look like this:
.. code-block:: c
diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
index 207f0d24de69..e2f87b82b074 100644
--- a/Documentation/driver-api/libata.rst
+++ b/Documentation/driver-api/libata.rst
@@ -401,7 +401,7 @@ Error handling
==============
This chapter describes how errors are handled under libata. Readers are
-advised to read SCSI EH (Documentation/scsi/scsi_eh.txt) and ATA
+advised to read SCSI EH (Documentation/scsi/scsi_eh.rst) and ATA
exceptions doc first.
Origins of commands
diff --git a/Documentation/driver-api/soundwire/stream.rst b/Documentation/driver-api/soundwire/stream.rst
index 5351bd2f34a8..8bceece51554 100644
--- a/Documentation/driver-api/soundwire/stream.rst
+++ b/Documentation/driver-api/soundwire/stream.rst
@@ -156,22 +156,27 @@ Below shows the SoundWire stream states and state transition diagram. ::
+-----------+ +------------+ +----------+ +----------+
| ALLOCATED +---->| CONFIGURED +---->| PREPARED +---->| ENABLED |
| STATE | | STATE | | STATE | | STATE |
- +-----------+ +------------+ +----------+ +----+-----+
- ^
- |
- |
- v
- +----------+ +------------+ +----+-----+
+ +-----------+ +------------+ +---+--+---+ +----+-----+
+ ^ ^ ^
+ | | |
+ __| |___________ |
+ | | |
+ v | v
+ +----------+ +-----+------+ +-+--+-----+
| RELEASED |<----------+ DEPREPARED |<-------+ DISABLED |
| STATE | | STATE | | STATE |
+----------+ +------------+ +----------+
-NOTE: State transition between prepare and deprepare is supported in Spec
-but not in the software (subsystem)
+NOTE: State transitions between ``SDW_STREAM_ENABLED`` and
+``SDW_STREAM_DISABLED`` are only relevant when then INFO_PAUSE flag is
+supported at the ALSA/ASoC level. Likewise the transition between
+``SDW_DISABLED_STATE`` and ``SDW_PREPARED_STATE`` depends on the
+INFO_RESUME flag.
-NOTE2: Stream state transition checks need to be handled by caller
-framework, for example ALSA/ASoC. No checks for stream transition exist in
-SoundWire subsystem.
+NOTE2: The framework implements basic state transition checks, but
+does not e.g. check if a transition from DISABLED to ENABLED is valid
+on a specific platform. Such tests need to be added at the ALSA/ASoC
+level.
Stream State Operations
-----------------------
@@ -246,6 +251,9 @@ SDW_STREAM_PREPARED
Prepare state of stream. Operations performed before entering in this state:
+ (0) Steps 1 and 2 are omitted in the case of a resume operation,
+ where the bus bandwidth is known.
+
(1) Bus parameters such as bandwidth, frame shape, clock frequency,
are computed based on current stream as well as already active
stream(s) on Bus. Re-computation is required to accommodate current
@@ -270,9 +278,11 @@ Prepare state of stream. Operations performed before entering in this state:
After all above operations are successful, stream state is set to
``SDW_STREAM_PREPARED``.
-Bus implements below API for PREPARE state which needs to be called once per
-stream. From ASoC DPCM framework, this stream state is linked to
-.prepare() operation.
+Bus implements below API for PREPARE state which needs to be called
+once per stream. From ASoC DPCM framework, this stream state is linked
+to .prepare() operation. Since the .trigger() operations may not
+follow the .prepare(), a direct transition from
+``SDW_STREAM_PREPARED`` to ``SDW_STREAM_DEPREPARED`` is allowed.
.. code-block:: c
@@ -332,6 +342,14 @@ Bus implements below API for DISABLED state which needs to be called once
per stream. From ASoC DPCM framework, this stream state is linked to
.trigger() stop operation.
+When the INFO_PAUSE flag is supported, a direct transition to
+``SDW_STREAM_ENABLED`` is allowed.
+
+For resume operations where ASoC will use the .prepare() callback, the
+stream can transition from ``SDW_STREAM_DISABLED`` to
+``SDW_STREAM_PREPARED``, with all required settings restored but
+without updating the bandwidth and bit allocation.
+
.. code-block:: c
int sdw_disable_stream(struct sdw_stream_runtime * stream);
@@ -353,9 +371,18 @@ state:
After all above operations are successful, stream state is set to
``SDW_STREAM_DEPREPARED``.
-Bus implements below API for DEPREPARED state which needs to be called once
-per stream. From ASoC DPCM framework, this stream state is linked to
-.trigger() stop operation.
+Bus implements below API for DEPREPARED state which needs to be called
+once per stream. ALSA/ASoC do not have a concept of 'deprepare', and
+the mapping from this stream state to ALSA/ASoC operation may be
+implementation specific.
+
+When the INFO_PAUSE flag is supported, the stream state is linked to
+the .hw_free() operation - the stream is not deprepared on a
+TRIGGER_STOP.
+
+Other implementations may transition to the ``SDW_STREAM_DEPREPARED``
+state on TRIGGER_STOP, should they require a transition through the
+``SDW_STREAM_PREPARED`` state.
.. code-block:: c
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index d681203728d7..87d794bc75a4 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -243,8 +243,8 @@ checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enabl
hide up to all remaining free space. The actual space that
would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable
This space is reclaimed once checkpoint=enable.
-compress_algorithm=%s Control compress algorithm, currently f2fs supports "lzo"
- and "lz4" algorithm.
+compress_algorithm=%s Control compress algorithm, currently f2fs supports "lzo",
+ "lz4" and "zstd" algorithm.
compress_log_size=%u Support configuring compress cluster size, the size will
be 4KB * (1 << %u), 16KB is minimum size, also it's
default size.
diff --git a/Documentation/filesystems/fiemap.txt b/Documentation/filesystems/fiemap.txt
index f6d9c99103a4..ac87e6fda842 100644
--- a/Documentation/filesystems/fiemap.txt
+++ b/Documentation/filesystems/fiemap.txt
@@ -115,8 +115,10 @@ data. Note that the opposite is not true - it would be valid for
FIEMAP_EXTENT_NOT_ALIGNED to appear alone.
* FIEMAP_EXTENT_LAST
-This is the last extent in the file. A mapping attempt past this
-extent will return nothing.
+This is generally the last extent in the file. A mapping attempt past
+this extent may return nothing. Some implementations set this flag to
+indicate this extent is the last one in the range queried by the user
+(via fiemap->fm_length).
* FIEMAP_EXTENT_UNKNOWN
The location of this extent is currently unknown. This may indicate
diff --git a/Documentation/filesystems/path-lookup.rst b/Documentation/filesystems/path-lookup.rst
index a3216979298b..f46b05e9b96c 100644
--- a/Documentation/filesystems/path-lookup.rst
+++ b/Documentation/filesystems/path-lookup.rst
@@ -404,11 +404,8 @@ that is the "next" component in the pathname.
``int last_type``
~~~~~~~~~~~~~~~~~
-This is one of ``LAST_NORM``, ``LAST_ROOT``, ``LAST_DOT``, ``LAST_DOTDOT``, or
-``LAST_BIND``. The ``last`` field is only valid if the type is
-``LAST_NORM``. ``LAST_BIND`` is used when following a symlink and no
-components of the symlink have been processed yet. Others should be
-fairly self-explanatory.
+This is one of ``LAST_NORM``, ``LAST_ROOT``, ``LAST_DOT`` or ``LAST_DOTDOT``.
+The ``last`` field is only valid if the type is ``LAST_NORM``.
``struct path root``
~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 9668a7fe2408..ee730457bf4e 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -139,11 +139,17 @@ Overview
.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
:doc: overview
-Default bridge callback sequence
---------------------------------
+Bridge Operations
+-----------------
.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
- :doc: bridge callbacks
+ :doc: bridge operations
+
+Bridge Connector Helper
+-----------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
+ :doc: overview
Bridge Helper Reference
@@ -155,6 +161,12 @@ Bridge Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
:export:
+Bridge Connector Helper Reference
+---------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
+ :export:
+
Panel-Bridge Helper Reference
-----------------------------
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index cc74e24ca3b5..f6d363b6756e 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -332,7 +332,7 @@ This process is dubbed relocation.
GEM BO Management Implementation Details
----------------------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_vma.h
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
:doc: Virtual Memory Address
Buffer Object Eviction
@@ -382,7 +382,7 @@ Logical Rings, Logical Ring Contexts and Execlists
Global GTT views
----------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
:doc: Global GTT views
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index bc869b23fc39..439656f55c5d 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -72,6 +72,28 @@ Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
Level: Advanced
+Improve plane atomic_check helpers
+----------------------------------
+
+Aside from the clipped coordinates right above there's a few suboptimal things
+with the current helpers:
+
+- drm_plane_helper_funcs->atomic_check gets called for enabled or disabled
+ planes. At best this seems to confuse drivers, worst it means they blow up
+ when the plane is disabled without the CRTC. The only special handling is
+ resetting values in the plane state structures, which instead should be moved
+ into the drm_plane_funcs->atomic_duplicate_state functions.
+
+- Once that's done, helpers could stop calling ->atomic_check for disabled
+ planes.
+
+- Then we could go through all the drivers and remove the more-or-less confused
+ checks for plane_state->fb and plane_state->crtc.
+
+Contact: Daniel Vetter
+
+Level: Advanced
+
Convert early atomic drivers to async commit helpers
----------------------------------------------------
@@ -337,23 +359,6 @@ Contact: Sean Paul
Level: Starter
-drm_fb_helper tasks
--------------------
-
-- drm_fb_helper_restore_fbdev_mode_unlocked() should call restore_fbdev_mode()
- not the _force variant so it can bail out if there is a master. But first
- these igt tests need to be fixed: kms_fbcon_fbt@psr and
- kms_fbcon_fbt@psr-suspend.
-
-- The max connector argument for drm_fb_helper_init() isn't used anymore and
- can be removed.
-
-- The helper doesn't keep an array of connectors anymore so these can be
- removed: drm_fb_helper_single_add_all_connectors(),
- drm_fb_helper_add_one_connector() and drm_fb_helper_remove_one_connector().
-
-Level: Intermediate
-
connector register/unregister fixes
-----------------------------------
@@ -385,6 +390,20 @@ Contact: Daniel Vetter
Level: Intermediate
+Replace drm_detect_hdmi_monitor() with drm_display_info.is_hdmi
+---------------------------------------------------------------
+
+Once EDID is parsed, the monitor HDMI support information is available through
+drm_display_info.is_hdmi. Many drivers still call drm_detect_hdmi_monitor() to
+retrieve the same information, which is less efficient.
+
+Audit each individual driver calling drm_detect_hdmi_monitor() and switch to
+drm_display_info.is_hdmi if applicable.
+
+Contact: Laurent Pinchart, respective driver maintainers
+
+Level: Intermediate
+
Core refactorings
=================
diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst
index c122ed239f7f..c2e29633071e 100644
--- a/Documentation/i2c/smbus-protocol.rst
+++ b/Documentation/i2c/smbus-protocol.rst
@@ -274,7 +274,7 @@ to know which slave triggered the interrupt.
This is implemented the following way in the Linux kernel:
* I2C bus drivers which support SMBus alert should call
- i2c_setup_smbus_alert() to setup SMBus alert support.
+ i2c_new_smbus_alert_device() to install SMBus alert support.
* I2C drivers for devices which can trigger SMBus alerts should implement
the optional alert() callback.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 9df95bab4de8..9599c0f3eea8 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -131,8 +131,10 @@ needed).
bpf/index
usb/index
PCI/index
+ scsi/index
misc-devices/index
scheduler/index
+ mhi/index
Architecture-agnostic documentation
-----------------------------------
diff --git a/Documentation/mhi/index.rst b/Documentation/mhi/index.rst
new file mode 100644
index 000000000000..1d8dec302780
--- /dev/null
+++ b/Documentation/mhi/index.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===
+MHI
+===
+
+.. toctree::
+ :maxdepth: 1
+
+ mhi
+ topology
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/mhi/mhi.rst b/Documentation/mhi/mhi.rst
new file mode 100644
index 000000000000..803ff84f7d7b
--- /dev/null
+++ b/Documentation/mhi/mhi.rst
@@ -0,0 +1,218 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+MHI (Modem Host Interface)
+==========================
+
+This document provides information about the MHI protocol.
+
+Overview
+========
+
+MHI is a protocol developed by Qualcomm Innovation Center, Inc. It is used
+by the host processors to control and communicate with modem devices over high
+speed peripheral buses or shared memory. Even though MHI can be easily adapted
+to any peripheral buses, it is primarily used with PCIe based devices. MHI
+provides logical channels over the physical buses and allows transporting the
+modem protocols, such as IP data packets, modem control messages, and
+diagnostics over at least one of those logical channels. Also, the MHI
+protocol provides data acknowledgment feature and manages the power state of the
+modems via one or more logical channels.
+
+MHI Internals
+=============
+
+MMIO
+----
+
+MMIO (Memory mapped IO) consists of a set of registers in the device hardware,
+which are mapped to the host memory space by the peripheral buses like PCIe.
+Following are the major components of MMIO register space:
+
+MHI control registers: Access to MHI configurations registers
+
+MHI BHI registers: BHI (Boot Host Interface) registers are used by the host
+for downloading the firmware to the device before MHI initialization.
+
+Channel Doorbell array: Channel Doorbell (DB) registers used by the host to
+notify the device when there is new work to do.
+
+Event Doorbell array: Associated with event context array, the Event Doorbell
+(DB) registers are used by the host to notify the device when new events are
+available.
+
+Debug registers: A set of registers and counters used by the device to expose
+debugging information like performance, functional, and stability to the host.
+
+Data structures
+---------------
+
+All data structures used by MHI are in the host system memory. Using the
+physical interface, the device accesses those data structures. MHI data
+structures and data buffers in the host system memory regions are mapped for
+the device.
+
+Channel context array: All channel configurations are organized in channel
+context data array.
+
+Transfer rings: Used by the host to schedule work items for a channel. The
+transfer rings are organized as a circular queue of Transfer Descriptors (TD).
+
+Event context array: All event configurations are organized in the event context
+data array.
+
+Event rings: Used by the device to send completion and state transition messages
+to the host
+
+Command context array: All command configurations are organized in command
+context data array.
+
+Command rings: Used by the host to send MHI commands to the device. The command
+rings are organized as a circular queue of Command Descriptors (CD).
+
+Channels
+--------
+
+MHI channels are logical, unidirectional data pipes between a host and a device.
+The concept of channels in MHI is similar to endpoints in USB. MHI supports up
+to 256 channels. However, specific device implementations may support less than
+the maximum number of channels allowed.
+
+Two unidirectional channels with their associated transfer rings form a
+bidirectional data pipe, which can be used by the upper-layer protocols to
+transport application data packets (such as IP packets, modem control messages,
+diagnostics messages, and so on). Each channel is associated with a single
+transfer ring.
+
+Transfer rings
+--------------
+
+Transfers between the host and device are organized by channels and defined by
+Transfer Descriptors (TD). TDs are managed through transfer rings, which are
+defined for each channel between the device and host and reside in the host
+memory. TDs consist of one or more ring elements (or transfer blocks)::
+
+ [Read Pointer (RP)] ----------->[Ring Element] } TD
+ [Write Pointer (WP)]- [Ring Element]
+ - [Ring Element]
+ --------->[Ring Element]
+ [Ring Element]
+
+Below is the basic usage of transfer rings:
+
+* Host allocates memory for transfer ring.
+* Host sets the base pointer, read pointer, and write pointer in corresponding
+ channel context.
+* Ring is considered empty when RP == WP.
+* Ring is considered full when WP + 1 == RP.
+* RP indicates the next element to be serviced by the device.
+* When the host has a new buffer to send, it updates the ring element with
+ buffer information, increments the WP to the next element and rings the
+ associated channel DB.
+
+Event rings
+-----------
+
+Events from the device to host are organized in event rings and defined by Event
+Descriptors (ED). Event rings are used by the device to report events such as
+data transfer completion status, command completion status, and state changes
+to the host. Event rings are the array of EDs that resides in the host
+memory. EDs consist of one or more ring elements (or transfer blocks)::
+
+ [Read Pointer (RP)] ----------->[Ring Element] } ED
+ [Write Pointer (WP)]- [Ring Element]
+ - [Ring Element]
+ --------->[Ring Element]
+ [Ring Element]
+
+Below is the basic usage of event rings:
+
+* Host allocates memory for event ring.
+* Host sets the base pointer, read pointer, and write pointer in corresponding
+ channel context.
+* Both host and device has a local copy of RP, WP.
+* Ring is considered empty (no events to service) when WP + 1 == RP.
+* Ring is considered full of events when RP == WP.
+* When there is a new event the device needs to send, the device updates ED
+ pointed by RP, increments the RP to the next element and triggers the
+ interrupt.
+
+Ring Element
+------------
+
+A Ring Element is a data structure used to transfer a single block
+of data between the host and the device. Transfer ring element types contain a
+single buffer pointer, the size of the buffer, and additional control
+information. Other ring element types may only contain control and status
+information. For single buffer operations, a ring descriptor is composed of a
+single element. For large multi-buffer operations (such as scatter and gather),
+elements can be chained to form a longer descriptor.
+
+MHI Operations
+==============
+
+MHI States
+----------
+
+MHI_STATE_RESET
+~~~~~~~~~~~~~~~
+MHI is in reset state after power-up or hardware reset. The host is not allowed
+to access device MMIO register space.
+
+MHI_STATE_READY
+~~~~~~~~~~~~~~~
+MHI is ready for initialization. The host can start MHI initialization by
+programming MMIO registers.
+
+MHI_STATE_M0
+~~~~~~~~~~~~
+MHI is running and operational in the device. The host can start channels by
+issuing channel start command.
+
+MHI_STATE_M1
+~~~~~~~~~~~~
+MHI operation is suspended by the device. This state is entered when the
+device detects inactivity at the physical interface within a preset time.
+
+MHI_STATE_M2
+~~~~~~~~~~~~
+MHI is in low power state. MHI operation is suspended and the device may
+enter lower power mode.
+
+MHI_STATE_M3
+~~~~~~~~~~~~
+MHI operation stopped by the host. This state is entered when the host suspends
+MHI operation.
+
+MHI Initialization
+------------------
+
+After system boots, the device is enumerated over the physical interface.
+In the case of PCIe, the device is enumerated and assigned BAR-0 for
+the device's MMIO register space. To initialize the MHI in a device,
+the host performs the following operations:
+
+* Allocates the MHI context for event, channel and command arrays.
+* Initializes the context array, and prepares interrupts.
+* Waits until the device enters READY state.
+* Programs MHI MMIO registers and sets device into MHI_M0 state.
+* Waits for the device to enter M0 state.
+
+MHI Data Transfer
+-----------------
+
+MHI data transfer is initiated by the host to transfer data to the device.
+Following are the sequence of operations performed by the host to transfer
+data to device:
+
+* Host prepares TD with buffer information.
+* Host increments the WP of the corresponding channel transfer ring.
+* Host rings the channel DB register.
+* Device wakes up to process the TD.
+* Device generates a completion event for the processed TD by updating ED.
+* Device increments the RP of the corresponding event ring.
+* Device triggers IRQ to wake up the host.
+* Host wakes up and checks the event ring for completion event.
+* Host updates the WP of the corresponding event ring to indicate that the
+ data transfer has been completed successfully.
+
diff --git a/Documentation/mhi/topology.rst b/Documentation/mhi/topology.rst
new file mode 100644
index 000000000000..dc7799d03294
--- /dev/null
+++ b/Documentation/mhi/topology.rst
@@ -0,0 +1,60 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+MHI Topology
+============
+
+This document provides information about the MHI topology modeling and
+representation in the kernel.
+
+MHI Controller
+--------------
+
+MHI controller driver manages the interaction with the MHI client devices
+such as the external modems and WiFi chipsets. It is also the MHI bus master
+which is in charge of managing the physical link between the host and device.
+It is however not involved in the actual data transfer as the data transfer
+is taken care by the physical bus such as PCIe. Each controller driver exposes
+channels and events based on the client device type.
+
+Below are the roles of the MHI controller driver:
+
+* Turns on the physical bus and establishes the link to the device
+* Configures IRQs, IOMMU, and IOMEM
+* Allocates struct mhi_controller and registers with the MHI bus framework
+ with channel and event configurations using mhi_register_controller.
+* Initiates power on and shutdown sequence
+* Initiates suspend and resume power management operations of the device.
+
+MHI Device
+----------
+
+MHI device is the logical device which binds to a maximum of two MHI channels
+for bi-directional communication. Once MHI is in powered on state, the MHI
+core will create MHI devices based on the channel configuration exposed
+by the controller. There can be a single MHI device for each channel or for a
+couple of channels.
+
+Each supported device is enumerated in::
+
+ /sys/bus/mhi/devices/
+
+MHI Driver
+----------
+
+MHI driver is the client driver which binds to one or more MHI devices. The MHI
+driver sends and receives the upper-layer protocol packets like IP packets,
+modem control messages, and diagnostics messages over MHI. The MHI core will
+bind the MHI devices to the MHI driver.
+
+Each supported driver is enumerated in::
+
+ /sys/bus/mhi/drivers/
+
+Below are the roles of the MHI driver:
+
+* Registers the driver with the MHI bus framework using mhi_driver_register.
+* Prepares the device for transfer by calling mhi_prepare_for_transfer.
+* Initiates data transfer by calling mhi_queue_transfer.
+* Once the data transfer is finished, calls mhi_unprepare_from_transfer to
+ end data transfer.
diff --git a/Documentation/misc-devices/uacce.rst b/Documentation/misc-devices/uacce.rst
new file mode 100644
index 000000000000..1db412e9b1a3
--- /dev/null
+++ b/Documentation/misc-devices/uacce.rst
@@ -0,0 +1,176 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Introduction of Uacce
+---------------------
+
+Uacce (Unified/User-space-access-intended Accelerator Framework) targets to
+provide Shared Virtual Addressing (SVA) between accelerators and processes.
+So accelerator can access any data structure of the main cpu.
+This differs from the data sharing between cpu and io device, which share
+only data content rather than address.
+Because of the unified address, hardware and user space of process can
+share the same virtual address in the communication.
+Uacce takes the hardware accelerator as a heterogeneous processor, while
+IOMMU share the same CPU page tables and as a result the same translation
+from va to pa.
+
+::
+
+ __________________________ __________________________
+ | | | |
+ | User application (CPU) | | Hardware Accelerator |
+ |__________________________| |__________________________|
+
+ | |
+ | va | va
+ V V
+ __________ __________
+ | | | |
+ | MMU | | IOMMU |
+ |__________| |__________|
+ | |
+ | |
+ V pa V pa
+ _______________________________________
+ | |
+ | Memory |
+ |_______________________________________|
+
+
+
+Architecture
+------------
+
+Uacce is the kernel module, taking charge of iommu and address sharing.
+The user drivers and libraries are called WarpDrive.
+
+The uacce device, built around the IOMMU SVA API, can access multiple
+address spaces, including the one without PASID.
+
+A virtual concept, queue, is used for the communication. It provides a
+FIFO-like interface. And it maintains a unified address space between the
+application and all involved hardware.
+
+::
+
+ ___________________ ________________
+ | | user API | |
+ | WarpDrive library | ------------> | user driver |
+ |___________________| |________________|
+ | |
+ | |
+ | queue fd |
+ | |
+ | |
+ v |
+ ___________________ _________ |
+ | | | | | mmap memory
+ | Other framework | | uacce | | r/w interface
+ | crypto/nic/others | |_________| |
+ |___________________| |
+ | | |
+ | register | register |
+ | | |
+ | | |
+ | _________________ __________ |
+ | | | | | |
+ ------------- | Device Driver | | IOMMU | |
+ |_________________| |__________| |
+ | |
+ | V
+ | ___________________
+ | | |
+ -------------------------- | Device(Hardware) |
+ |___________________|
+
+
+How does it work
+----------------
+
+Uacce uses mmap and IOMMU to play the trick.
+
+Uacce creates a chrdev for every device registered to it. New queue is
+created when user application open the chrdev. The file descriptor is used
+as the user handle of the queue.
+The accelerator device present itself as an Uacce object, which exports as
+a chrdev to the user space. The user application communicates with the
+hardware by ioctl (as control path) or share memory (as data path).
+
+The control path to the hardware is via file operation, while data path is
+via mmap space of the queue fd.
+
+The queue file address space:
+
+::
+
+ /**
+ * enum uacce_qfrt: qfrt type
+ * @UACCE_QFRT_MMIO: device mmio region
+ * @UACCE_QFRT_DUS: device user share region
+ */
+ enum uacce_qfrt {
+ UACCE_QFRT_MMIO = 0,
+ UACCE_QFRT_DUS = 1,
+ };
+
+All regions are optional and differ from device type to type.
+Each region can be mmapped only once, otherwise -EEXIST returns.
+
+The device mmio region is mapped to the hardware mmio space. It is generally
+used for doorbell or other notification to the hardware. It is not fast enough
+as data channel.
+
+The device user share region is used for share data buffer between user process
+and device.
+
+
+The Uacce register API
+----------------------
+
+The register API is defined in uacce.h.
+
+::
+
+ struct uacce_interface {
+ char name[UACCE_MAX_NAME_SIZE];
+ unsigned int flags;
+ const struct uacce_ops *ops;
+ };
+
+According to the IOMMU capability, uacce_interface flags can be:
+
+::
+
+ /**
+ * UACCE Device flags:
+ * UACCE_DEV_SVA: Shared Virtual Addresses
+ * Support PASID
+ * Support device page faults (PCI PRI or SMMU Stall)
+ */
+ #define UACCE_DEV_SVA BIT(0)
+
+ struct uacce_device *uacce_alloc(struct device *parent,
+ struct uacce_interface *interface);
+ int uacce_register(struct uacce_device *uacce);
+ void uacce_remove(struct uacce_device *uacce);
+
+uacce_register results can be:
+
+a. If uacce module is not compiled, ERR_PTR(-ENODEV)
+
+b. Succeed with the desired flags
+
+c. Succeed with the negotiated flags, for example
+
+ uacce_interface.flags = UACCE_DEV_SVA but uacce->flags = ~UACCE_DEV_SVA
+
+ So user driver need check return value as well as the negotiated uacce->flags.
+
+
+The user driver
+---------------
+
+The queue file mmap space will need a user driver to wrap the communication
+protocol. Uacce provides some attributes in sysfs for the user driver to
+match the right accelerator accordingly.
+More details in Documentation/ABI/testing/sysfs-driver-uacce.
diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst
index a09971c2115c..fe089acb7783 100644
--- a/Documentation/networking/devlink/devlink-trap.rst
+++ b/Documentation/networking/devlink/devlink-trap.rst
@@ -257,6 +257,8 @@ drivers:
* :doc:`netdevsim`
* :doc:`mlxsw`
+.. _Generic-Packet-Trap-Groups:
+
Generic Packet Trap Groups
==========================
diff --git a/Documentation/openrisc/openrisc_port.rst b/Documentation/openrisc/openrisc_port.rst
index a18747a8d191..4b2c437942a0 100644
--- a/Documentation/openrisc/openrisc_port.rst
+++ b/Documentation/openrisc/openrisc_port.rst
@@ -37,8 +37,8 @@ or Stafford's toolchain build and release scripts.
Build the Linux kernel as usual::
- make ARCH=openrisc defconfig
- make ARCH=openrisc
+ make ARCH=openrisc CROSS_COMPILE="or1k-linux-" defconfig
+ make ARCH=openrisc CROSS_COMPILE="or1k-linux-"
3) Running on FPGA (optional)
diff --git a/Documentation/powerpc/firmware-assisted-dump.rst b/Documentation/powerpc/firmware-assisted-dump.rst
index 0455a78486d5..b3f3ee135dbe 100644
--- a/Documentation/powerpc/firmware-assisted-dump.rst
+++ b/Documentation/powerpc/firmware-assisted-dump.rst
@@ -112,13 +112,13 @@ to ensure that crash data is preserved to process later.
-- On OPAL based machines (PowerNV), if the kernel is build with
CONFIG_OPAL_CORE=y, OPAL memory at the time of crash is also
- exported as /sys/firmware/opal/core file. This procfs file is
+ exported as /sys/firmware/opal/mpipl/core file. This procfs file is
helpful in debugging OPAL crashes with GDB. The kernel memory
used for exporting this procfs file can be released by echo'ing
- '1' to /sys/kernel/fadump_release_opalcore node.
+ '1' to /sys/firmware/opal/mpipl/release_core node.
e.g.
- # echo 1 > /sys/kernel/fadump_release_opalcore
+ # echo 1 > /sys/firmware/opal/mpipl/release_core
Implementation details:
-----------------------
@@ -268,6 +268,11 @@ Here is the list of files under kernel sysfs:
be handled and vmcore will not be captured. This interface can be
easily integrated with kdump service start/stop.
+ /sys/kernel/fadump/mem_reserved
+
+ This is used to display the memory reserved by FADump for saving the
+ crash dump.
+
/sys/kernel/fadump_release_mem
This file is available only when FADump is active during
second kernel. This is used to release the reserved memory
@@ -283,14 +288,29 @@ Here is the list of files under kernel sysfs:
enhanced to use this interface to release the memory reserved for
dump and continue without 2nd reboot.
- /sys/kernel/fadump_release_opalcore
+Note: /sys/kernel/fadump_release_opalcore sysfs has moved to
+ /sys/firmware/opal/mpipl/release_core
+
+ /sys/firmware/opal/mpipl/release_core
This file is available only on OPAL based machines when FADump is
active during capture kernel. This is used to release the memory
- used by the kernel to export /sys/firmware/opal/core file. To
+ used by the kernel to export /sys/firmware/opal/mpipl/core file. To
release this memory, echo '1' to it:
- echo 1 > /sys/kernel/fadump_release_opalcore
+ echo 1 > /sys/firmware/opal/mpipl/release_core
+
+Note: The following FADump sysfs files are deprecated.
+
++----------------------------------+--------------------------------+
+| Deprecated | Alternative |
++----------------------------------+--------------------------------+
+| /sys/kernel/fadump_enabled | /sys/kernel/fadump/enabled |
++----------------------------------+--------------------------------+
+| /sys/kernel/fadump_registered | /sys/kernel/fadump/registered |
++----------------------------------+--------------------------------+
+| /sys/kernel/fadump_release_mem | /sys/kernel/fadump/release_mem |
++----------------------------------+--------------------------------+
Here is the list of files under powerpc debugfs:
(Assuming debugfs is mounted on /sys/kernel/debug directory.)
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index a19d084f9b2c..43cdc67e4f8e 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -246,7 +246,8 @@ an involved disclosed party. The current ambassadors list:
============= ========================================================
ARM Grant Likely <grant.likely@arm.com>
AMD Tom Lendacky <tom.lendacky@amd.com>
- IBM
+ IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
+ IBM Power Anton Blanchard <anton@linux.ibm.com>
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <tsoni@codeaurora.org>
diff --git a/Documentation/remoteproc.txt b/Documentation/remoteproc.txt
index 03c3d2e568b0..2be1147256e0 100644
--- a/Documentation/remoteproc.txt
+++ b/Documentation/remoteproc.txt
@@ -230,7 +230,7 @@ in the used rings.
Binary Firmware Structure
=========================
-At this point remoteproc only supports ELF32 firmware binaries. However,
+At this point remoteproc supports ELF32 and ELF64 firmware binaries. However,
it is quite expected that other platforms/devices which we'd want to
support with this framework will be based on different binary formats.
diff --git a/Documentation/scsi/53c700.txt b/Documentation/scsi/53c700.rst
index e31aceb6df15..53a0e9f9c198 100644
--- a/Documentation/scsi/53c700.txt
+++ b/Documentation/scsi/53c700.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+The 53c700 Driver Notes
+=======================
+
General Description
===================
@@ -16,9 +22,9 @@ fill in to get the driver working.
Compile Time Flags
==================
-A compile time flag is:
+A compile time flag is::
-CONFIG_53C700_LE_ON_BE
+ CONFIG_53C700_LE_ON_BE
define if the chipset must be supported in little endian mode on a big
endian architecture (used for the 700 on parisc).
@@ -51,9 +57,11 @@ consistent with the best operation of the chip (although some choose
to drive it off the CPU or bus clock rather than going to the expense
of an extra clock chip). The best operation clock speeds are:
-53c700 - 25MHz
-53c700-66 - 50MHz
-53c710 - 40Mhz
+========= =====
+53c700 25MHz
+53c700-66 50MHz
+53c710 40Mhz
+========= =====
Writing Your Glue Driver
========================
@@ -69,7 +77,7 @@ parameters that matter to you (see below), plumb the NCR_700_intr
routine into the interrupt line and call NCR_700_detect with the host
template and the new parameters as arguments. You should also call
the relevant request_*_region function and place the register base
-address into the `base' pointer of the host parameters.
+address into the 'base' pointer of the host parameters.
In the release routine, you must free the NCR_700_Host_Parameters that
you allocated, call the corresponding release_*_region and free the
@@ -78,7 +86,7 @@ interrupt.
Handling Interrupts
-------------------
-In general, you should just plumb the card's interrupt line in with
+In general, you should just plumb the card's interrupt line in with
request_irq(irq, NCR_700_intr, <irq flags>, <driver name>, host);
@@ -95,41 +103,32 @@ Settable NCR_700_Host_Parameters
The following are a list of the user settable parameters:
clock: (MANDATORY)
-
-Set to the clock speed of the chip in MHz.
+ Set to the clock speed of the chip in MHz.
base: (MANDATORY)
-
-set to the base of the io or mem region for the register set. On 64
-bit architectures this is only 32 bits wide, so the registers must be
-mapped into the low 32 bits of memory.
+ Set to the base of the io or mem region for the register set. On 64
+ bit architectures this is only 32 bits wide, so the registers must be
+ mapped into the low 32 bits of memory.
pci_dev: (OPTIONAL)
-
-set to the PCI board device. Leave NULL for a non-pci board. This is
-used for the pci_alloc_consistent() and pci_map_*() functions.
+ Set to the PCI board device. Leave NULL for a non-pci board. This is
+ used for the pci_alloc_consistent() and pci_map_*() functions.
dmode_extra: (OPTIONAL, 53c710 only)
-
-extra flags for the DMODE register. These are used to control bus
-output pins on the 710. The settings should be a combination of
-DMODE_FC1 and DMODE_FC2. What these pins actually do is entirely up
-to the board designer. Usually it is safe to ignore this setting.
+ Extra flags for the DMODE register. These are used to control bus
+ output pins on the 710. The settings should be a combination of
+ DMODE_FC1 and DMODE_FC2. What these pins actually do is entirely up
+ to the board designer. Usually it is safe to ignore this setting.
differential: (OPTIONAL)
-
-set to 1 if the chip drives a differential bus.
+ Set to 1 if the chip drives a differential bus.
force_le_on_be: (OPTIONAL, only if CONFIG_53C700_LE_ON_BE is set)
-
-set to 1 if the chip is operating in little endian mode on a big
-endian architecture.
+ Set to 1 if the chip is operating in little endian mode on a big
+ endian architecture.
chip710: (OPTIONAL)
-
-set to 1 if the chip is a 53c710.
+ Set to 1 if the chip is a 53c710.
burst_disable: (OPTIONAL, 53c710 only)
-
-disable 8 byte bursting for DMA transfers.
-
+ Disable 8 byte bursting for DMA transfers.
diff --git a/Documentation/scsi/BusLogic.txt b/Documentation/scsi/BusLogic.rst
index 48e982cd6fe7..b60169812358 100644
--- a/Documentation/scsi/BusLogic.txt
+++ b/Documentation/scsi/BusLogic.rst
@@ -1,6 +1,11 @@
- BusLogic MultiMaster and FlashPoint SCSI Driver for Linux
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================================================
+BusLogic MultiMaster and FlashPoint SCSI Driver for Linux
+=========================================================
Version 2.0.15 for Linux 2.0
+
Version 2.1.15 for Linux 2.1
PRODUCTION RELEASE
@@ -8,13 +13,16 @@
17 August 1998
Leonard N. Zubkoff
+
Dandelion Digital
+
lnz@dandelion.com
Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
- INTRODUCTION
+Introduction
+============
BusLogic, Inc. designed and manufactured a variety of high performance SCSI
host adapters which share a common programming interface across a diverse
@@ -86,9 +94,11 @@ Contact information for offices in Europe and Japan is available on the Web
site.
- DRIVER FEATURES
+Driver Features
+===============
-o Configuration Reporting and Testing
+Configuration Reporting and Testing
+-----------------------------------
During system initialization, the driver reports extensively on the host
adapter hardware configuration, including the synchronous transfer parameters
@@ -130,7 +140,8 @@ o Configuration Reporting and Testing
The status of Wide Negotiation, Disconnect/Reconnect, and Tagged Queuing
are reported as "Enabled", Disabled", or a sequence of "Y" and "N" letters.
-o Performance Features
+Performance Features
+--------------------
BusLogic SCSI Host Adapters directly implement SCSI-2 Tagged Queuing, and so
support has been included in the driver to utilize tagged queuing with any
@@ -150,7 +161,8 @@ o Performance Features
queue depth of 1 is selected. Tagged queuing is also disabled for individual
target devices if disconnect/reconnect is disabled for that device.
-o Robustness Features
+Robustness Features
+-------------------
The driver implements extensive error recovery procedures. When the higher
level parts of the SCSI subsystem request that a timed out command be reset,
@@ -174,7 +186,8 @@ o Robustness Features
lock up or crash, and thereby allowing a clean shutdown and restart after the
offending component is removed.
-o PCI Configuration Support
+PCI Configuration Support
+-------------------------
On PCI systems running kernels compiled with PCI BIOS support enabled, this
driver will interrogate the PCI configuration space and use the I/O port
@@ -184,19 +197,22 @@ o PCI Configuration Support
used to disable the ISA compatible I/O port entirely as it is not necessary.
The ISA compatible I/O port is disabled by default on the BT-948/958/958D.
-o /proc File System Support
+/proc File System Support
+-------------------------
Copies of the host adapter configuration information together with updated
data transfer and error recovery statistics are available through the
/proc/scsi/BusLogic/<N> interface.
-o Shared Interrupts Support
+Shared Interrupts Support
+-------------------------
On systems that support shared interrupts, any number of BusLogic Host
Adapters may share the same interrupt request channel.
- SUPPORTED HOST ADAPTERS
+Supported Host Adapters
+=======================
The following list comprises the supported BusLogic SCSI Host Adapters as of
the date of this document. It is recommended that anyone purchasing a BusLogic
@@ -205,6 +221,7 @@ that it is or will be supported.
FlashPoint Series PCI Host Adapters:
+======================= =============================================
FlashPoint LT (BT-930) Ultra SCSI-3
FlashPoint LT (BT-930R) Ultra SCSI-3 with RAIDPlus
FlashPoint LT (BT-920) Ultra SCSI-3 (BT-930 without BIOS)
@@ -214,15 +231,19 @@ FlashPoint LW (BT-950) Wide Ultra SCSI-3
FlashPoint LW (BT-950R) Wide Ultra SCSI-3 with RAIDPlus
FlashPoint DW (BT-952) Dual Channel Wide Ultra SCSI-3
FlashPoint DW (BT-952R) Dual Channel Wide Ultra SCSI-3 with RAIDPlus
+======================= =============================================
MultiMaster "W" Series Host Adapters:
+======= === ==============================
BT-948 PCI Ultra SCSI-3
BT-958 PCI Wide Ultra SCSI-3
BT-958D PCI Wide Differential Ultra SCSI-3
+======= === ==============================
MultiMaster "C" Series Host Adapters:
+======== ==== ==============================
BT-946C PCI Fast SCSI-2
BT-956C PCI Wide Fast SCSI-2
BT-956CD PCI Wide Differential Fast SCSI-2
@@ -232,9 +253,11 @@ BT-757C EISA Wide Fast SCSI-2
BT-757CD EISA Wide Differential Fast SCSI-2
BT-545C ISA Fast SCSI-2
BT-540CF ISA Fast SCSI-2
+======== ==== ==============================
MultiMaster "S" Series Host Adapters:
+======= ==== ==============================
BT-445S VLB Fast SCSI-2
BT-747S EISA Fast SCSI-2
BT-747D EISA Differential Fast SCSI-2
@@ -244,11 +267,14 @@ BT-545S ISA Fast SCSI-2
BT-542D ISA Differential Fast SCSI-2
BT-742A EISA SCSI-2 (742A revision H)
BT-542B ISA SCSI-2 (542B revision H)
+======= ==== ==============================
MultiMaster "A" Series Host Adapters:
+======= ==== ==============================
BT-742A EISA SCSI-2 (742A revisions A - G)
BT-542B ISA SCSI-2 (542B revisions A - G)
+======= ==== ==============================
AMI FastDisk Host Adapters that are true BusLogic MultiMaster clones are also
supported by this driver.
@@ -260,9 +286,11 @@ list. The retail kit includes the bare board and manual as well as cabling and
driver media and documentation that are not provided with bare boards.
- FLASHPOINT INSTALLATION NOTES
+FlashPoint Installation Notes
+=============================
-o RAIDPlus Support
+RAIDPlus Support
+----------------
FlashPoint Host Adapters now include RAIDPlus, Mylex's bootable software
RAID. RAIDPlus is not supported on Linux, and there are no plans to support
@@ -273,7 +301,8 @@ o RAIDPlus Support
than RAIDPlus, so there is little impetus to include RAIDPlus support in the
BusLogic driver.
-o Enabling UltraSCSI Transfers
+Enabling UltraSCSI Transfers
+----------------------------
FlashPoint Host Adapters ship with their configuration set to "Factory
Default" settings that are conservative and do not allow for UltraSCSI speed
@@ -287,12 +316,14 @@ o Enabling UltraSCSI Transfers
the "Optimum Performance" settings are loaded.
- BT-948/958/958D INSTALLATION NOTES
+BT-948/958/958D Installation Notes
+==================================
The BT-948/958/958D PCI Ultra SCSI Host Adapters have some features which may
require attention in some circumstances when installing Linux.
-o PCI I/O Port Assignments
+PCI I/O Port Assignments
+------------------------
When configured to factory default settings, the BT-948/958/958D will only
recognize the PCI I/O port assignments made by the motherboard's PCI BIOS.
@@ -312,7 +343,8 @@ o PCI I/O Port Assignments
possible future I/O port conflicts. The older BT-946C/956C/956CD also have
this configuration option, but the factory default setting is "Primary".
-o PCI Slot Scanning Order
+PCI Slot Scanning Order
+-----------------------
In systems with multiple BusLogic PCI Host Adapters, the order in which the
PCI slots are scanned may appear reversed with the BT-948/958/958D as
@@ -339,7 +371,8 @@ o PCI Slot Scanning Order
so as to recognize the host adapters in the same order as they are enumerated
by the host adapter's BIOS.
-o Enabling UltraSCSI Transfers
+Enabling UltraSCSI Transfers
+----------------------------
The BT-948/958/958D ship with their configuration set to "Factory Default"
settings that are conservative and do not allow for UltraSCSI speed to be
@@ -353,7 +386,8 @@ o Enabling UltraSCSI Transfers
"Optimum Performance" settings are loaded.
- DRIVER OPTIONS
+Driver Options
+==============
BusLogic Driver Options may be specified either via the Linux Kernel Command
Line or via the Loadable Kernel Module Installation Facility. Driver Options
@@ -520,30 +554,34 @@ The following examples demonstrate setting the Queue Depth for Target Devices
Devices on the second host adapter to 31, and the Bus Settle Time on the
second host adapter to 30 seconds.
-Linux Kernel Command Line:
+Linux Kernel Command Line::
linux BusLogic=QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30
-LILO Linux Boot Loader (in /etc/lilo.conf):
+LILO Linux Boot Loader (in /etc/lilo.conf)::
append = "BusLogic=QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30"
-INSMOD Loadable Kernel Module Installation Facility:
+INSMOD Loadable Kernel Module Installation Facility::
insmod BusLogic.o \
'BusLogic="QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30"'
-NOTE: Module Utilities 2.1.71 or later is required for correct parsing
+
+.. Note::
+
+ Module Utilities 2.1.71 or later is required for correct parsing
of driver options containing commas.
- DRIVER INSTALLATION
+Driver Installation
+===================
This distribution was prepared for Linux kernel version 2.0.35, but should be
compatible with 2.0.4 or any later 2.0 series kernel.
To install the new BusLogic SCSI driver, you may use the following commands,
-replacing "/usr/src" with wherever you keep your Linux kernel source tree:
+replacing "/usr/src" with wherever you keep your Linux kernel source tree::
cd /usr/src
tar -xvzf BusLogic-2.0.15.tar.gz
@@ -557,7 +595,8 @@ Then install "arch/x86/boot/zImage" as your standard kernel, run lilo if
appropriate, and reboot.
- BUSLOGIC ANNOUNCEMENTS MAILING LIST
+BusLogic Announcements Mailing List
+===================================
The BusLogic Announcements Mailing List provides a forum for informing Linux
users of new driver releases and other announcements regarding Linux support
diff --git a/Documentation/scsi/FlashPoint.rst b/Documentation/scsi/FlashPoint.rst
new file mode 100644
index 000000000000..ef3c07e94ad6
--- /dev/null
+++ b/Documentation/scsi/FlashPoint.rst
@@ -0,0 +1,176 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================
+The BusLogic FlashPoint SCSI Driver
+===================================
+
+The BusLogic FlashPoint SCSI Host Adapters are now fully supported on Linux.
+The upgrade program described below has been officially terminated effective
+31 March 1997 since it is no longer needed.
+
+::
+
+ MYLEX INTRODUCES LINUX OPERATING SYSTEM SUPPORT FOR ITS
+ BUSLOGIC FLASHPOINT LINE OF SCSI HOST ADAPTERS
+
+
+ FREMONT, CA, -- October 8, 1996 -- Mylex Corporation has expanded Linux
+ operating system support to its BusLogic brand of FlashPoint Ultra SCSI
+ host adapters. All of BusLogic's other SCSI host adapters, including the
+ MultiMaster line, currently support the Linux operating system. Linux
+ drivers and information will be available on October 15th at
+ http://sourceforge.net/projects/dandelion/.
+
+ "Mylex is committed to supporting the Linux community," says Peter Shambora,
+ vice president of marketing for Mylex. "We have supported Linux driver
+ development and provided technical support for our host adapters for several
+ years, and are pleased to now make our FlashPoint products available to this
+ user base."
+
+The Linux Operating System
+==========================
+
+Linux is a freely-distributed implementation of UNIX for Intel x86, Sun
+SPARC, SGI MIPS, Motorola 68k, Digital Alpha AXP and Motorola PowerPC
+machines. It supports a wide range of software, including the X Window
+System, Emacs, and TCP/IP networking. Further information is available at
+http://www.linux.org and http://www.ssc.com/.
+
+FlashPoint Host Adapters
+========================
+
+The FlashPoint family of Ultra SCSI host adapters, designed for workstation
+and file server environments, are available in narrow, wide, dual channel,
+and dual channel wide versions. These adapters feature SeqEngine
+automation technology, which minimizes SCSI command overhead and reduces
+the number of interrupts generated to the CPU.
+
+About Mylex
+===========
+
+Mylex Corporation (NASDAQ/NM SYMBOL: MYLX), founded in 1983, is a leading
+producer of RAID technology and network management products. The company
+produces high performance disk array (RAID) controllers, and complementary
+computer products for network servers, mass storage systems, workstations
+and system boards. Through its wide range of RAID controllers and its
+BusLogic line of Ultra SCSI host adapter products, Mylex provides enabling
+intelligent I/O technologies that increase network management control,
+enhance CPU utilization, optimize I/O performance, and ensure data security
+and availability. Products are sold globally through a network of OEMs,
+major distributors, VARs, and system integrators. Mylex Corporation is
+headquartered at 34551 Ardenwood Blvd., Fremont, CA.
+
+Contact:
+========
+
+::
+
+ Peter Shambora
+ Vice President of Marketing
+ Mylex Corp.
+ 510/796-6100
+ peters@mylex.com
+
+
+::
+
+ ANNOUNCEMENT
+ BusLogic FlashPoint LT/BT-948 Upgrade Program
+ 1 February 1996
+
+ ADDITIONAL ANNOUNCEMENT
+ BusLogic FlashPoint LW/BT-958 Upgrade Program
+ 14 June 1996
+
+ Ever since its introduction last October, the BusLogic FlashPoint LT has
+ been problematic for members of the Linux community, in that no Linux
+ drivers have been available for this new Ultra SCSI product. Despite its
+ officially being positioned as a desktop workstation product, and not being
+ particularly well suited for a high performance multitasking operating
+ system like Linux, the FlashPoint LT has been touted by computer system
+ vendors as the latest thing, and has been sold even on many of their high
+ end systems, to the exclusion of the older MultiMaster products. This has
+ caused grief for many people who inadvertently purchased a system expecting
+ that all BusLogic SCSI Host Adapters were supported by Linux, only to
+ discover that the FlashPoint was not supported and would not be for quite
+ some time, if ever.
+
+ After this problem was identified, BusLogic contacted its major OEM
+ customers to make sure the BT-946C/956C MultiMaster cards would still be
+ made available, and that Linux users who mistakenly ordered systems with
+ the FlashPoint would be able to upgrade to the BT-946C. While this helped
+ many purchasers of new systems, it was only a partial solution to the
+ overall problem of FlashPoint support for Linux users. It did nothing to
+ assist the people who initially purchased a FlashPoint for a supported
+ operating system and then later decided to run Linux, or those who had
+ ended up with a FlashPoint LT, believing it was supported, and were unable
+ to return it.
+
+ In the middle of December, I asked to meet with BusLogic's senior
+ management to discuss the issues related to Linux and free software support
+ for the FlashPoint. Rumors of varying accuracy had been circulating
+ publicly about BusLogic's attitude toward the Linux community, and I felt
+ it was best that these issues be addressed directly. I sent an email
+ message after 11pm one evening, and the meeting took place the next
+ afternoon. Unfortunately, corporate wheels sometimes grind slowly,
+ especially when a company is being acquired, and so it's taken until now
+ before the details were completely determined and a public statement could
+ be made.
+
+ BusLogic is not prepared at this time to release the information necessary
+ for third parties to write drivers for the FlashPoint. The only existing
+ FlashPoint drivers have been written directly by BusLogic Engineering, and
+ there is no FlashPoint documentation sufficiently detailed to allow outside
+ developers to write a driver without substantial assistance. While there
+ are people at BusLogic who would rather not release the details of the
+ FlashPoint architecture at all, that debate has not yet been settled either
+ way. In any event, even if documentation were available today it would
+ take quite a while for a usable driver to be written, especially since I'm
+ not convinced that the effort required would be worthwhile.
+
+ However, BusLogic does remain committed to providing a high performance
+ SCSI solution for the Linux community, and does not want to see anyone left
+ unable to run Linux because they have a Flashpoint LT. Therefore, BusLogic
+ has put in place a direct upgrade program to allow any Linux user worldwide
+ to trade in their FlashPoint LT for the new BT-948 MultiMaster PCI Ultra
+ SCSI Host Adapter. The BT-948 is the Ultra SCSI successor to the BT-946C
+ and has all the best features of both the BT-946C and FlashPoint LT,
+ including smart termination and a flash PROM for easy firmware updates, and
+ is of course compatible with the present Linux driver. The price for this
+ upgrade has been set at US $45 plus shipping and handling, and the upgrade
+ program will be administered through BusLogic Technical Support, which can
+ be reached by electronic mail at techsup@buslogic.com, by Voice at +1 408
+ 654-0760, or by FAX at +1 408 492-1542.
+
+ As of 14 June 1996, the original BusLogic FlashPoint LT to BT-948 upgrade
+ program has now been extended to encompass the FlashPoint LW Wide Ultra
+ SCSI Host Adapter. Any Linux user worldwide may trade in their FlashPoint
+ LW (BT-950) for a BT-958 MultiMaster PCI Ultra SCSI Host Adapter. The
+ price for this upgrade has been set at US $65 plus shipping and handling.
+
+ I was a beta test site for the BT-948/958, and versions 1.2.1 and 1.3.1 of
+ my BusLogic driver already included latent support for the BT-948/958.
+ Additional cosmetic support for the Ultra SCSI MultiMaster cards was added
+ subsequent releases. As a result of this cooperative testing process,
+ several firmware bugs were found and corrected. My heavily loaded Linux
+ test system provided an ideal environment for testing error recovery
+ processes that are much more rarely exercised in production systems, but
+ are crucial to overall system stability. It was especially convenient
+ being able to work directly with their firmware engineer in demonstrating
+ the problems under control of the firmware debugging environment; things
+ sure have come a long way since the last time I worked on firmware for an
+ embedded system. I am presently working on some performance testing and
+ expect to have some data to report in the not too distant future.
+
+ BusLogic asked me to send this announcement since a large percentage of the
+ questions regarding support for the FlashPoint have either been sent to me
+ directly via email, or have appeared in the Linux newsgroups in which I
+ participate. To summarize, BusLogic is offering Linux users an upgrade
+ from the unsupported FlashPoint LT (BT-930) to the supported BT-948 for US
+ $45 plus shipping and handling, or from the unsupported FlashPoint LW
+ (BT-950) to the supported BT-958 for $65 plus shipping and handling.
+ Contact BusLogic Technical Support at techsup@buslogic.com or +1 408
+ 654-0760 to take advantage of their offer.
+
+ Leonard N. Zubkoff
+ lnz@dandelion.com
diff --git a/Documentation/scsi/FlashPoint.txt b/Documentation/scsi/FlashPoint.txt
deleted file mode 100644
index 5b5f29cb9f8b..000000000000
--- a/Documentation/scsi/FlashPoint.txt
+++ /dev/null
@@ -1,163 +0,0 @@
-The BusLogic FlashPoint SCSI Host Adapters are now fully supported on Linux.
-The upgrade program described below has been officially terminated effective
-31 March 1997 since it is no longer needed.
-
-
-
- MYLEX INTRODUCES LINUX OPERATING SYSTEM SUPPORT FOR ITS
- BUSLOGIC FLASHPOINT LINE OF SCSI HOST ADAPTERS
-
-
-FREMONT, CA, -- October 8, 1996 -- Mylex Corporation has expanded Linux
-operating system support to its BusLogic brand of FlashPoint Ultra SCSI
-host adapters. All of BusLogic's other SCSI host adapters, including the
-MultiMaster line, currently support the Linux operating system. Linux
-drivers and information will be available on October 15th at
-http://sourceforge.net/projects/dandelion/.
-
-"Mylex is committed to supporting the Linux community," says Peter Shambora,
-vice president of marketing for Mylex. "We have supported Linux driver
-development and provided technical support for our host adapters for several
-years, and are pleased to now make our FlashPoint products available to this
-user base."
-
-The Linux Operating System
-
-Linux is a freely-distributed implementation of UNIX for Intel x86, Sun
-SPARC, SGI MIPS, Motorola 68k, Digital Alpha AXP and Motorola PowerPC
-machines. It supports a wide range of software, including the X Window
-System, Emacs, and TCP/IP networking. Further information is available at
-http://www.linux.org and http://www.ssc.com/.
-
-FlashPoint Host Adapters
-
-The FlashPoint family of Ultra SCSI host adapters, designed for workstation
-and file server environments, are available in narrow, wide, dual channel,
-and dual channel wide versions. These adapters feature SeqEngine
-automation technology, which minimizes SCSI command overhead and reduces
-the number of interrupts generated to the CPU.
-
-About Mylex
-
-Mylex Corporation (NASDAQ/NM SYMBOL: MYLX), founded in 1983, is a leading
-producer of RAID technology and network management products. The company
-produces high performance disk array (RAID) controllers, and complementary
-computer products for network servers, mass storage systems, workstations
-and system boards. Through its wide range of RAID controllers and its
-BusLogic line of Ultra SCSI host adapter products, Mylex provides enabling
-intelligent I/O technologies that increase network management control,
-enhance CPU utilization, optimize I/O performance, and ensure data security
-and availability. Products are sold globally through a network of OEMs,
-major distributors, VARs, and system integrators. Mylex Corporation is
-headquartered at 34551 Ardenwood Blvd., Fremont, CA.
-
- ####
-
-Contact:
-
-Peter Shambora
-Vice President of Marketing
-Mylex Corp.
-510/796-6100
-peters@mylex.com
-
- ANNOUNCEMENT
- BusLogic FlashPoint LT/BT-948 Upgrade Program
- 1 February 1996
-
- ADDITIONAL ANNOUNCEMENT
- BusLogic FlashPoint LW/BT-958 Upgrade Program
- 14 June 1996
-
-Ever since its introduction last October, the BusLogic FlashPoint LT has
-been problematic for members of the Linux community, in that no Linux
-drivers have been available for this new Ultra SCSI product. Despite its
-officially being positioned as a desktop workstation product, and not being
-particularly well suited for a high performance multitasking operating
-system like Linux, the FlashPoint LT has been touted by computer system
-vendors as the latest thing, and has been sold even on many of their high
-end systems, to the exclusion of the older MultiMaster products. This has
-caused grief for many people who inadvertently purchased a system expecting
-that all BusLogic SCSI Host Adapters were supported by Linux, only to
-discover that the FlashPoint was not supported and would not be for quite
-some time, if ever.
-
-After this problem was identified, BusLogic contacted its major OEM
-customers to make sure the BT-946C/956C MultiMaster cards would still be
-made available, and that Linux users who mistakenly ordered systems with
-the FlashPoint would be able to upgrade to the BT-946C. While this helped
-many purchasers of new systems, it was only a partial solution to the
-overall problem of FlashPoint support for Linux users. It did nothing to
-assist the people who initially purchased a FlashPoint for a supported
-operating system and then later decided to run Linux, or those who had
-ended up with a FlashPoint LT, believing it was supported, and were unable
-to return it.
-
-In the middle of December, I asked to meet with BusLogic's senior
-management to discuss the issues related to Linux and free software support
-for the FlashPoint. Rumors of varying accuracy had been circulating
-publicly about BusLogic's attitude toward the Linux community, and I felt
-it was best that these issues be addressed directly. I sent an email
-message after 11pm one evening, and the meeting took place the next
-afternoon. Unfortunately, corporate wheels sometimes grind slowly,
-especially when a company is being acquired, and so it's taken until now
-before the details were completely determined and a public statement could
-be made.
-
-BusLogic is not prepared at this time to release the information necessary
-for third parties to write drivers for the FlashPoint. The only existing
-FlashPoint drivers have been written directly by BusLogic Engineering, and
-there is no FlashPoint documentation sufficiently detailed to allow outside
-developers to write a driver without substantial assistance. While there
-are people at BusLogic who would rather not release the details of the
-FlashPoint architecture at all, that debate has not yet been settled either
-way. In any event, even if documentation were available today it would
-take quite a while for a usable driver to be written, especially since I'm
-not convinced that the effort required would be worthwhile.
-
-However, BusLogic does remain committed to providing a high performance
-SCSI solution for the Linux community, and does not want to see anyone left
-unable to run Linux because they have a Flashpoint LT. Therefore, BusLogic
-has put in place a direct upgrade program to allow any Linux user worldwide
-to trade in their FlashPoint LT for the new BT-948 MultiMaster PCI Ultra
-SCSI Host Adapter. The BT-948 is the Ultra SCSI successor to the BT-946C
-and has all the best features of both the BT-946C and FlashPoint LT,
-including smart termination and a flash PROM for easy firmware updates, and
-is of course compatible with the present Linux driver. The price for this
-upgrade has been set at US $45 plus shipping and handling, and the upgrade
-program will be administered through BusLogic Technical Support, which can
-be reached by electronic mail at techsup@buslogic.com, by Voice at +1 408
-654-0760, or by FAX at +1 408 492-1542.
-
-As of 14 June 1996, the original BusLogic FlashPoint LT to BT-948 upgrade
-program has now been extended to encompass the FlashPoint LW Wide Ultra
-SCSI Host Adapter. Any Linux user worldwide may trade in their FlashPoint
-LW (BT-950) for a BT-958 MultiMaster PCI Ultra SCSI Host Adapter. The
-price for this upgrade has been set at US $65 plus shipping and handling.
-
-I was a beta test site for the BT-948/958, and versions 1.2.1 and 1.3.1 of
-my BusLogic driver already included latent support for the BT-948/958.
-Additional cosmetic support for the Ultra SCSI MultiMaster cards was added
-subsequent releases. As a result of this cooperative testing process,
-several firmware bugs were found and corrected. My heavily loaded Linux
-test system provided an ideal environment for testing error recovery
-processes that are much more rarely exercised in production systems, but
-are crucial to overall system stability. It was especially convenient
-being able to work directly with their firmware engineer in demonstrating
-the problems under control of the firmware debugging environment; things
-sure have come a long way since the last time I worked on firmware for an
-embedded system. I am presently working on some performance testing and
-expect to have some data to report in the not too distant future.
-
-BusLogic asked me to send this announcement since a large percentage of the
-questions regarding support for the FlashPoint have either been sent to me
-directly via email, or have appeared in the Linux newsgroups in which I
-participate. To summarize, BusLogic is offering Linux users an upgrade
-from the unsupported FlashPoint LT (BT-930) to the supported BT-948 for US
-$45 plus shipping and handling, or from the unsupported FlashPoint LW
-(BT-950) to the supported BT-958 for $65 plus shipping and handling.
-Contact BusLogic Technical Support at techsup@buslogic.com or +1 408
-654-0760 to take advantage of their offer.
-
- Leonard N. Zubkoff
- lnz@dandelion.com
diff --git a/Documentation/scsi/NinjaSCSI.rst b/Documentation/scsi/NinjaSCSI.rst
new file mode 100644
index 000000000000..999a6ed5bf7e
--- /dev/null
+++ b/Documentation/scsi/NinjaSCSI.rst
@@ -0,0 +1,164 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================================
+WorkBiT NinjaSCSI-3/32Bi driver for Linux
+=========================================
+
+1. Comment
+==========
+
+This is Workbit corp.'s(http://www.workbit.co.jp/) NinjaSCSI-3
+for Linux.
+
+2. My Linux environment
+=======================
+
+:Linux kernel: 2.4.7 / 2.2.19
+:pcmcia-cs: 3.1.27
+:gcc: gcc-2.95.4
+:PC card: I-O data PCSC-F (NinjaSCSI-3),
+ I-O data CBSC-II in 16 bit mode (NinjaSCSI-32Bi)
+:SCSI device: I-O data CDPS-PX24 (CD-ROM drive),
+ Media Intelligent MMO-640GT (Optical disk drive)
+
+3. Install
+==========
+
+(a) Check your PC card is true "NinjaSCSI-3" card.
+
+ If you installed pcmcia-cs already, pcmcia reports your card as UNKNOWN
+ card, and write ["WBT", "NinjaSCSI-3", "R1.0"] or some other string to
+ your console or log file.
+
+ You can also use "cardctl" program (this program is in pcmcia-cs source
+ code) to get more info.
+
+ ::
+
+ # cat /var/log/messages
+ ...
+ Jan 2 03:45:06 lindberg cardmgr[78]: unsupported card in socket 1
+ Jan 2 03:45:06 lindberg cardmgr[78]: product info: "WBT", "NinjaSCSI-3", "R1.0"
+ ...
+ # cardctl ident
+ Socket 0:
+ no product info available
+ Socket 1:
+ product info: "IO DATA", "CBSC16 ", "1"
+
+
+(b) Get the Linux kernel source, and extract it to /usr/src.
+ Because the NinjaSCSI driver requires some SCSI header files in Linux
+ kernel source, I recommend rebuilding your kernel; this eliminates
+ some versioning problems.
+
+ ::
+
+ $ cd /usr/src
+ $ tar -zxvf linux-x.x.x.tar.gz
+ $ cd linux
+ $ make config
+ ...
+
+(c) If you use this driver with Kernel 2.2, unpack pcmcia-cs in some directory
+ and make & install. This driver requires the pcmcia-cs header file.
+
+ ::
+
+ $ cd /usr/src
+ $ tar zxvf cs-pcmcia-cs-3.x.x.tar.gz
+ ...
+
+(d) Extract this driver's archive somewhere, and edit Makefile, then do make::
+
+ $ tar -zxvf nsp_cs-x.x.tar.gz
+ $ cd nsp_cs-x.x
+ $ emacs Makefile
+ ...
+ $ make
+
+(e) Copy nsp_cs.ko to suitable place, like /lib/modules/<Kernel version>/pcmcia/ .
+
+(f) Add these lines to /etc/pcmcia/config .
+
+ If you use pcmcia-cs-3.1.8 or later, we can use "nsp_cs.conf" file.
+ So, you don't need to edit file. Just copy to /etc/pcmcia/ .
+
+ ::
+
+ device "nsp_cs"
+ class "scsi" module "nsp_cs"
+
+ card "WorkBit NinjaSCSI-3"
+ version "WBT", "NinjaSCSI-3", "R1.0"
+ bind "nsp_cs"
+
+ card "WorkBit NinjaSCSI-32Bi (16bit)"
+ version "WORKBIT", "UltraNinja-16", "1"
+ bind "nsp_cs"
+
+ # OEM
+ card "WorkBit NinjaSCSI-32Bi (16bit) / IO-DATA"
+ version "IO DATA", "CBSC16 ", "1"
+ bind "nsp_cs"
+
+ # OEM
+ card "WorkBit NinjaSCSI-32Bi (16bit) / KME-1"
+ version "KME ", "SCSI-CARD-001", "1"
+ bind "nsp_cs"
+ card "WorkBit NinjaSCSI-32Bi (16bit) / KME-2"
+ version "KME ", "SCSI-CARD-002", "1"
+ bind "nsp_cs"
+ card "WorkBit NinjaSCSI-32Bi (16bit) / KME-3"
+ version "KME ", "SCSI-CARD-003", "1"
+ bind "nsp_cs"
+ card "WorkBit NinjaSCSI-32Bi (16bit) / KME-4"
+ version "KME ", "SCSI-CARD-004", "1"
+ bind "nsp_cs"
+
+(f) Start (or restart) pcmcia-cs::
+
+ # /etc/rc.d/rc.pcmcia start (BSD style)
+
+ or::
+
+ # /etc/init.d/pcmcia start (SYSV style)
+
+
+4. History
+==========
+
+See README.nin_cs .
+
+5. Caution
+==========
+
+If you eject card when doing some operation for your SCSI device or suspend
+your computer, you encount some *BAD* error like disk crash.
+
+It works good when I using this driver right way. But I'm not guarantee
+your data. Please backup your data when you use this driver.
+
+6. Known Bugs
+=============
+
+In 2.4 kernel, you can't use 640MB Optical disk. This error comes from
+high level SCSI driver.
+
+7. Testing
+==========
+
+Please send me some reports(bug reports etc..) of this software.
+When you send report, please tell me these or more.
+
+ - card name
+ - kernel version
+ - your SCSI device name(hard drive, CD-ROM, etc...)
+
+8. Copyright
+============
+
+ See GPL.
+
+
+2001/08/08 yokota@netlab.is.tsukuba.ac.jp <YOKOTA Hiroshi>
diff --git a/Documentation/scsi/NinjaSCSI.txt b/Documentation/scsi/NinjaSCSI.txt
deleted file mode 100644
index ac8db8ceec77..000000000000
--- a/Documentation/scsi/NinjaSCSI.txt
+++ /dev/null
@@ -1,128 +0,0 @@
-
- WorkBiT NinjaSCSI-3/32Bi driver for Linux
-
-1. Comment
- This is Workbit corp.'s(http://www.workbit.co.jp/) NinjaSCSI-3
-for Linux.
-
-2. My Linux environment
-Linux kernel: 2.4.7 / 2.2.19
-pcmcia-cs: 3.1.27
-gcc: gcc-2.95.4
-PC card: I-O data PCSC-F (NinjaSCSI-3)
- I-O data CBSC-II in 16 bit mode (NinjaSCSI-32Bi)
-SCSI device: I-O data CDPS-PX24 (CD-ROM drive)
- Media Intelligent MMO-640GT (Optical disk drive)
-
-3. Install
-[1] Check your PC card is true "NinjaSCSI-3" card.
- If you installed pcmcia-cs already, pcmcia reports your card as UNKNOWN
- card, and write ["WBT", "NinjaSCSI-3", "R1.0"] or some other string to
- your console or log file.
- You can also use "cardctl" program (this program is in pcmcia-cs source
- code) to get more info.
-
-# cat /var/log/messages
-...
-Jan 2 03:45:06 lindberg cardmgr[78]: unsupported card in socket 1
-Jan 2 03:45:06 lindberg cardmgr[78]: product info: "WBT", "NinjaSCSI-3", "R1.0"
-...
-# cardctl ident
-Socket 0:
- no product info available
-Socket 1:
- product info: "IO DATA", "CBSC16 ", "1"
-
-
-[2] Get the Linux kernel source, and extract it to /usr/src.
- Because the NinjaSCSI driver requires some SCSI header files in Linux
- kernel source, I recommend rebuilding your kernel; this eliminates
- some versioning problems.
-$ cd /usr/src
-$ tar -zxvf linux-x.x.x.tar.gz
-$ cd linux
-$ make config
-...
-
-[3] If you use this driver with Kernel 2.2, unpack pcmcia-cs in some directory
- and make & install. This driver requires the pcmcia-cs header file.
-$ cd /usr/src
-$ tar zxvf cs-pcmcia-cs-3.x.x.tar.gz
-...
-
-[4] Extract this driver's archive somewhere, and edit Makefile, then do make.
-$ tar -zxvf nsp_cs-x.x.tar.gz
-$ cd nsp_cs-x.x
-$ emacs Makefile
-...
-$ make
-
-[5] Copy nsp_cs.ko to suitable place, like /lib/modules/<Kernel version>/pcmcia/ .
-
-[6] Add these lines to /etc/pcmcia/config .
- If you use pcmcia-cs-3.1.8 or later, we can use "nsp_cs.conf" file.
- So, you don't need to edit file. Just copy to /etc/pcmcia/ .
-
--------------------------------------
-device "nsp_cs"
- class "scsi" module "nsp_cs"
-
-card "WorkBit NinjaSCSI-3"
- version "WBT", "NinjaSCSI-3", "R1.0"
- bind "nsp_cs"
-
-card "WorkBit NinjaSCSI-32Bi (16bit)"
- version "WORKBIT", "UltraNinja-16", "1"
- bind "nsp_cs"
-
-# OEM
-card "WorkBit NinjaSCSI-32Bi (16bit) / IO-DATA"
- version "IO DATA", "CBSC16 ", "1"
- bind "nsp_cs"
-
-# OEM
-card "WorkBit NinjaSCSI-32Bi (16bit) / KME-1"
- version "KME ", "SCSI-CARD-001", "1"
- bind "nsp_cs"
-card "WorkBit NinjaSCSI-32Bi (16bit) / KME-2"
- version "KME ", "SCSI-CARD-002", "1"
- bind "nsp_cs"
-card "WorkBit NinjaSCSI-32Bi (16bit) / KME-3"
- version "KME ", "SCSI-CARD-003", "1"
- bind "nsp_cs"
-card "WorkBit NinjaSCSI-32Bi (16bit) / KME-4"
- version "KME ", "SCSI-CARD-004", "1"
- bind "nsp_cs"
--------------------------------------
-
-[7] Start (or restart) pcmcia-cs.
-# /etc/rc.d/rc.pcmcia start (BSD style)
-or
-# /etc/init.d/pcmcia start (SYSV style)
-
-
-4. History
-See README.nin_cs .
-
-5. Caution
- If you eject card when doing some operation for your SCSI device or suspend
-your computer, you encount some *BAD* error like disk crash.
- It works good when I using this driver right way. But I'm not guarantee
-your data. Please backup your data when you use this driver.
-
-6. Known Bugs
- In 2.4 kernel, you can't use 640MB Optical disk. This error comes from
-high level SCSI driver.
-
-7. Testing
- Please send me some reports(bug reports etc..) of this software.
-When you send report, please tell me these or more.
- card name
- kernel version
- your SCSI device name(hard drive, CD-ROM, etc...)
-
-8. Copyright
- See GPL.
-
-
-2001/08/08 yokota@netlab.is.tsukuba.ac.jp <YOKOTA Hiroshi>
diff --git a/Documentation/scsi/aacraid.txt b/Documentation/scsi/aacraid.rst
index 30f643f611b2..1904674b94f3 100644
--- a/Documentation/scsi/aacraid.txt
+++ b/Documentation/scsi/aacraid.rst
@@ -1,7 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================
AACRAID Driver for Linux (take two)
+===================================
Introduction
--------------------------
+============
The aacraid driver adds support for Adaptec (http://www.adaptec.com)
RAID controllers. This is a major rewrite from the original
Adaptec supplied driver. It has significantly cleaned up both the code
@@ -9,8 +13,11 @@ and the running binary size (the module is less than half the size of
the original).
Supported Cards/Chipsets
--------------------------
+========================
+
+ =================== ======= =======================================
PCI ID (pci.ids) OEM Product
+ =================== ======= =======================================
9005:0285:9005:0285 Adaptec 2200S (Vulcan)
9005:0285:9005:0286 Adaptec 2120S (Crusader)
9005:0285:9005:0287 Adaptec 2200S (Vulcan-2m)
@@ -117,34 +124,54 @@ Supported Cards/Chipsets
9005:0285:108e:0286 SUN STK RAID INT (Cougar)
9005:0285:108e:0287 SUN STK RAID EXT (Prometheus)
9005:0285:108e:7aae SUN STK RAID EM (Narvi)
+ =================== ======= =======================================
People
--------------------------
+======
+
Alan Cox <alan@lxorguk.ukuu.org.uk>
-Christoph Hellwig <hch@infradead.org> (updates for new-style PCI probing and SCSI host registration,
- small cleanups/fixes)
-Matt Domsch <matt_domsch@dell.com> (revision ioctl, adapter messages)
-Deanna Bonds (non-DASD support, PAE fibs and 64 bit, added new adaptec controllers
- added new ioctls, changed scsi interface to use new error handler,
- increased the number of fibs and outstanding commands to a container)
-
- (fixed 64bit and 64G memory model, changed confusing naming convention
- where fibs that go to the hardware are consistently called hw_fibs and
- not just fibs like the name of the driver tracking structure)
-Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas. Performance tuning, card failover and bug mitigations.
+
+Christoph Hellwig <hch@infradead.org>
+
+- updates for new-style PCI probing and SCSI host registration,
+ small cleanups/fixes
+
+Matt Domsch <matt_domsch@dell.com>
+
+- revision ioctl, adapter messages
+
+Deanna Bonds
+
+- non-DASD support, PAE fibs and 64 bit, added new adaptec controllers
+ added new ioctls, changed scsi interface to use new error handler,
+ increased the number of fibs and outstanding commands to a container
+- fixed 64bit and 64G memory model, changed confusing naming convention
+ where fibs that go to the hardware are consistently called hw_fibs and
+ not just fibs like the name of the driver tracking structure
+
+Mark Salyzyn <Mark_Salyzyn@adaptec.com>
+
+- Fixed panic issues and added some new product ids for upcoming hbas.
+- Performance tuning, card failover and bug mitigations.
+
Achim Leubner <Achim_Leubner@adaptec.com>
-Original Driver
+- Original Driver
+
-------------------------
+
Adaptec Unix OEM Product Group
Mailing List
--------------------------
+============
+
linux-scsi@vger.kernel.org (Interested parties troll here)
Also note this is very different to Brian's original driver
so don't expect him to support it.
+
Adaptec does support this driver. Contact Adaptec tech support or
aacraid@adaptec.com
Original by Brian Boerner February 2001
+
Rewritten by Alan Cox, November 2001
diff --git a/Documentation/scsi/advansys.txt b/Documentation/scsi/advansys.rst
index 4a3db62b7424..e0367e179696 100644
--- a/Documentation/scsi/advansys.txt
+++ b/Documentation/scsi/advansys.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+AdvanSys Driver Notes
+=====================
+
AdvanSys (Advanced System Products, Inc.) manufactures the following
RISC-based, Bus-Mastering, Fast (10 Mhz) and Ultra (20 Mhz) Narrow
(8-bit transfer) SCSI Host Adapters for the ISA, EISA, VL, and PCI
@@ -12,50 +18,51 @@ adapter detected. The number of CDBs used by the driver can be
lowered in the BIOS by changing the 'Host Queue Size' adapter setting.
Laptop Products:
- ABP-480 - Bus-Master CardBus (16 CDB)
+ - ABP-480 - Bus-Master CardBus (16 CDB)
Connectivity Products:
- ABP510/5150 - Bus-Master ISA (240 CDB)
- ABP5140 - Bus-Master ISA PnP (16 CDB)
- ABP5142 - Bus-Master ISA PnP with floppy (16 CDB)
- ABP902/3902 - Bus-Master PCI (16 CDB)
- ABP3905 - Bus-Master PCI (16 CDB)
- ABP915 - Bus-Master PCI (16 CDB)
- ABP920 - Bus-Master PCI (16 CDB)
- ABP3922 - Bus-Master PCI (16 CDB)
- ABP3925 - Bus-Master PCI (16 CDB)
- ABP930 - Bus-Master PCI (16 CDB)
- ABP930U - Bus-Master PCI Ultra (16 CDB)
- ABP930UA - Bus-Master PCI Ultra (16 CDB)
- ABP960 - Bus-Master PCI MAC/PC (16 CDB)
- ABP960U - Bus-Master PCI MAC/PC Ultra (16 CDB)
+ - ABP510/5150 - Bus-Master ISA (240 CDB)
+ - ABP5140 - Bus-Master ISA PnP (16 CDB)
+ - ABP5142 - Bus-Master ISA PnP with floppy (16 CDB)
+ - ABP902/3902 - Bus-Master PCI (16 CDB)
+ - ABP3905 - Bus-Master PCI (16 CDB)
+ - ABP915 - Bus-Master PCI (16 CDB)
+ - ABP920 - Bus-Master PCI (16 CDB)
+ - ABP3922 - Bus-Master PCI (16 CDB)
+ - ABP3925 - Bus-Master PCI (16 CDB)
+ - ABP930 - Bus-Master PCI (16 CDB)
+ - ABP930U - Bus-Master PCI Ultra (16 CDB)
+ - ABP930UA - Bus-Master PCI Ultra (16 CDB)
+ - ABP960 - Bus-Master PCI MAC/PC (16 CDB)
+ - ABP960U - Bus-Master PCI MAC/PC Ultra (16 CDB)
Single Channel Products:
- ABP542 - Bus-Master ISA with floppy (240 CDB)
- ABP742 - Bus-Master EISA (240 CDB)
- ABP842 - Bus-Master VL (240 CDB)
- ABP940 - Bus-Master PCI (240 CDB)
- ABP940U - Bus-Master PCI Ultra (240 CDB)
- ABP940UA/3940UA - Bus-Master PCI Ultra (240 CDB)
- ABP970 - Bus-Master PCI MAC/PC (240 CDB)
- ABP970U - Bus-Master PCI MAC/PC Ultra (240 CDB)
- ABP3960UA - Bus-Master PCI MAC/PC Ultra (240 CDB)
- ABP940UW/3940UW - Bus-Master PCI Ultra-Wide (253 CDB)
- ABP970UW - Bus-Master PCI MAC/PC Ultra-Wide (253 CDB)
- ABP3940U2W - Bus-Master PCI LVD/Ultra2-Wide (253 CDB)
+ - ABP542 - Bus-Master ISA with floppy (240 CDB)
+ - ABP742 - Bus-Master EISA (240 CDB)
+ - ABP842 - Bus-Master VL (240 CDB)
+ - ABP940 - Bus-Master PCI (240 CDB)
+ - ABP940U - Bus-Master PCI Ultra (240 CDB)
+ - ABP940UA/3940UA - Bus-Master PCI Ultra (240 CDB)
+ - ABP970 - Bus-Master PCI MAC/PC (240 CDB)
+ - ABP970U - Bus-Master PCI MAC/PC Ultra (240 CDB)
+ - ABP3960UA - Bus-Master PCI MAC/PC Ultra (240 CDB)
+ - ABP940UW/3940UW - Bus-Master PCI Ultra-Wide (253 CDB)
+ - ABP970UW - Bus-Master PCI MAC/PC Ultra-Wide (253 CDB)
+ - ABP3940U2W - Bus-Master PCI LVD/Ultra2-Wide (253 CDB)
Multi-Channel Products:
- ABP752 - Dual Channel Bus-Master EISA (240 CDB Per Channel)
- ABP852 - Dual Channel Bus-Master VL (240 CDB Per Channel)
- ABP950 - Dual Channel Bus-Master PCI (240 CDB Per Channel)
- ABP950UW - Dual Channel Bus-Master PCI Ultra-Wide (253 CDB Per Channel)
- ABP980 - Four Channel Bus-Master PCI (240 CDB Per Channel)
- ABP980U - Four Channel Bus-Master PCI Ultra (240 CDB Per Channel)
- ABP980UA/3980UA - Four Channel Bus-Master PCI Ultra (16 CDB Per Chan.)
- ABP3950U2W - Bus-Master PCI LVD/Ultra2-Wide and Ultra-Wide (253 CDB)
- ABP3950U3W - Bus-Master PCI Dual LVD2/Ultra3-Wide (253 CDB)
+ - ABP752 - Dual Channel Bus-Master EISA (240 CDB Per Channel)
+ - ABP852 - Dual Channel Bus-Master VL (240 CDB Per Channel)
+ - ABP950 - Dual Channel Bus-Master PCI (240 CDB Per Channel)
+ - ABP950UW - Dual Channel Bus-Master PCI Ultra-Wide (253 CDB Per Channel)
+ - ABP980 - Four Channel Bus-Master PCI (240 CDB Per Channel)
+ - ABP980U - Four Channel Bus-Master PCI Ultra (240 CDB Per Channel)
+ - ABP980UA/3980UA - Four Channel Bus-Master PCI Ultra (16 CDB Per Chan.)
+ - ABP3950U2W - Bus-Master PCI LVD/Ultra2-Wide and Ultra-Wide (253 CDB)
+ - ABP3950U3W - Bus-Master PCI Dual LVD2/Ultra3-Wide (253 CDB)
Driver Compile Time Options and Debugging
+=========================================
The following constants can be defined in the source file.
@@ -88,26 +95,30 @@ The following constants can be defined in the source file.
first three hex digits of the pseudo I/O Port must be set to
'deb' and the fourth hex digit specifies the debug level: 0 - F.
The following command line will look for an adapter at 0x330
- and set the debug level to 2.
+ and set the debug level to 2::
linux advansys=0x330,0,0,0,0xdeb2
If the driver is built as a loadable module this variable can be
defined when the driver is loaded. The following insmod command
- will set the debug level to one.
+ will set the debug level to one::
insmod advansys.o asc_dbglvl=1
Debugging Message Levels:
- 0: Errors Only
- 1: High-Level Tracing
- 2-N: Verbose Tracing
+
+
+ ==== ==================
+ 0 Errors Only
+ 1 High-Level Tracing
+ 2-N Verbose Tracing
+ ==== ==================
To enable debug output to console, please make sure that:
a. System and kernel logging is enabled (syslogd, klogd running).
b. Kernel messages are routed to console output. Check
- /etc/syslog.conf for an entry similar to this:
+ /etc/syslog.conf for an entry similar to this::
kern.* /dev/console
@@ -120,8 +131,11 @@ The following constants can be defined in the source file.
Alternatively you can enable printk() to console with this
program. However, this is not the 'official' way to do this.
+
Debug output is logged in /var/log/messages.
+ ::
+
main()
{
syscall(103, 7, 0, 0);
@@ -144,11 +158,11 @@ The following constants can be defined in the source file.
Statistics are only available for kernels greater than or equal
to v1.3.0 with the CONFIG_PROC_FS (/proc) file system configured.
- AdvanSys SCSI adapter files have the following path name format:
+ AdvanSys SCSI adapter files have the following path name format::
/proc/scsi/advansys/{0,1,2,3,...}
- This information can be displayed with cat. For example:
+ This information can be displayed with cat. For example::
cat /proc/scsi/advansys/0
@@ -156,6 +170,7 @@ The following constants can be defined in the source file.
contain adapter and device configuration information.
Driver LILO Option
+==================
If init/main.c is modified as described in the 'Directions for Adding
the AdvanSys Driver to Linux' section (B.4.) above, the driver will
@@ -167,17 +182,30 @@ affects searching for ISA and VL boards.
Examples:
1. Eliminate I/O port scanning:
- boot: linux advansys=
- or
- boot: linux advansys=0x0
+
+ boot::
+
+ linux advansys=
+
+ or::
+
+ boot: linux advansys=0x0
+
2. Limit I/O port scanning to one I/O port:
- boot: linux advansys=0x110
+
+ boot::
+
+ linux advansys=0x110
+
3. Limit I/O port scanning to four I/O ports:
- boot: linux advansys=0x110,0x210,0x230,0x330
+
+ boot::
+
+ linux advansys=0x110,0x210,0x230,0x330
For a loadable module the same effect can be achieved by setting
the 'asc_iopflag' variable and 'asc_ioport' array when loading
-the driver, e.g.
+the driver, e.g.::
insmod advansys.o asc_iopflag=1 asc_ioport=0x110,0x330
@@ -187,6 +215,7 @@ the 'Driver Compile Time Options and Debugging' section above for
more information.
Credits (Chronological Order)
+=============================
Bob Frey <bfrey@turbolinux.com.cn> wrote the AdvanSys SCSI driver
and maintained it up to 3.3F. He continues to answer questions
diff --git a/Documentation/scsi/aha152x.txt b/Documentation/scsi/aha152x.rst
index 94848734ac66..7012b5c46d5d 100644
--- a/Documentation/scsi/aha152x.txt
+++ b/Documentation/scsi/aha152x.rst
@@ -1,7 +1,12 @@
-$Id: README.aha152x,v 1.2 1999/12/25 15:32:30 fischer Exp fischer $
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+=====================================================
Adaptec AHA-1520/1522 SCSI driver for Linux (aha152x)
+=====================================================
+
+Copyright |copy| 1993-1999 Jürgen Fischer <fischer@norbit.de>
-Copyright 1993-1999 Jürgen Fischer <fischer@norbit.de>
TC1550 patches by Luuk van Dijk (ldz@xs4all.nl)
@@ -14,8 +19,10 @@ less polling loops), has slightly higher throughput (at
least on my ancient test box; a i486/33Mhz/20MB).
-CONFIGURATION ARGUMENTS:
+Configuration Arguments
+=======================
+============ ======================================== ======================
IOPORT base io address (0x340/0x140)
IRQ interrupt level (9-12; default 11)
SCSI_ID scsi id of controller (0-7; default 7)
@@ -25,31 +32,38 @@ SYNCHRONOUS enable synchronous transfers (0/1; default 1 [on])
DELAY: bus reset delay (default 100)
EXT_TRANS: enable extended translation (0/1: default 0 [off])
(see NOTES)
+============ ======================================== ======================
+
+Compile Time Configuration
+==========================
+
+(go into AHA152X in drivers/scsi/Makefile):
-COMPILE TIME CONFIGURATION (go into AHA152X in drivers/scsi/Makefile):
+- DAUTOCONF
+ use configuration the controller reports (AHA-152x only)
--DAUTOCONF
- use configuration the controller reports (AHA-152x only)
+- DSKIP_BIOSTEST
+ Don't test for BIOS signature (AHA-1510 or disabled BIOS)
--DSKIP_BIOSTEST
- Don't test for BIOS signature (AHA-1510 or disabled BIOS)
+- DSETUP0="{ IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY, EXT_TRANS }"
+ override for the first controller
--DSETUP0="{ IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY, EXT_TRANS }"
- override for the first controller
+- DSETUP1="{ IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY, EXT_TRANS }"
+ override for the second controller
--DSETUP1="{ IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY, EXT_TRANS }"
- override for the second controller
+- DAHA152X_DEBUG
+ enable debugging output
--DAHA152X_DEBUG
- enable debugging output
+- DAHA152X_STAT
+ enable some statistics
--DAHA152X_STAT
- enable some statistics
+LILO Command Line Options
+=========================
-LILO COMMAND LINE OPTIONS:
+ ::
-aha152x=<IOPORT>[,<IRQ>[,<SCSI-ID>[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY> [,<EXT_TRANS]]]]]]]
+ aha152x=<IOPORT>[,<IRQ>[,<SCSI-ID>[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY> [,<EXT_TRANS]]]]]]]
The normal configuration can be overridden by specifying a command line.
When you do this, the BIOS test is skipped. Entered values have to be
@@ -58,17 +72,21 @@ aha152x=<IOPORT>[,<IRQ>[,<SCSI-ID>[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY
For two controllers use the aha152x statement twice.
-SYMBOLS FOR MODULE CONFIGURATION:
+Symbols for Module Configuration
+================================
Choose from 2 alternatives:
-1. specify everything (old)
+1. specify everything (old)::
+
+ aha152x=IOPORT,IRQ,SCSI_ID,RECONNECT,PARITY,SYNCHRONOUS,DELAY,EXT_TRANS
-aha152x=IOPORT,IRQ,SCSI_ID,RECONNECT,PARITY,SYNCHRONOUS,DELAY,EXT_TRANS
configuration override for first controller
+ ::
+
+ aha152x1=IOPORT,IRQ,SCSI_ID,RECONNECT,PARITY,SYNCHRONOUS,DELAY,EXT_TRANS
-aha152x1=IOPORT,IRQ,SCSI_ID,RECONNECT,PARITY,SYNCHRONOUS,DELAY,EXT_TRANS
configuration override for second controller
2. specify only what you need to (irq or io is required; new)
@@ -101,7 +119,8 @@ exttrans=EXTTRANS0[,EXTTRANS1]
If you use both alternatives the first will be taken.
-NOTES ON EXT_TRANS:
+Notes on EXT_TRANS
+==================
SCSI uses block numbers to address blocks/sectors on a device.
The BIOS uses a cylinder/head/sector addressing scheme (C/H/S)
@@ -150,8 +169,9 @@ geometry right in most cases:
- for disks<1GB: use default translation (C/32/64)
- for disks>1GB:
+
- take current geometry from the partition table
- (using scsicam_bios_param and accept only `valid' geometries,
+ (using scsicam_bios_param and accept only 'valid' geometries,
ie. either (C/32/64) or (C/63/255)). This can be extended translation
even if it's not enabled in the driver.
@@ -161,7 +181,8 @@ geometry right in most cases:
disks.
-REFERENCES USED:
+References Used
+===============
"AIC-6260 SCSI Chip Specification", Adaptec Corporation.
@@ -177,7 +198,7 @@ REFERENCES USED:
Drew Eckhardt (drew@cs.colorado.edu)
- Eric Youngdale (eric@andante.org)
+ Eric Youngdale (eric@andante.org)
special thanks to Eric Youngdale for the free(!) supplying the
documentation on the chip.
diff --git a/Documentation/scsi/aic79xx.rst b/Documentation/scsi/aic79xx.rst
new file mode 100644
index 000000000000..071ff5111a4f
--- /dev/null
+++ b/Documentation/scsi/aic79xx.rst
@@ -0,0 +1,593 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+===================================
+Adaptec Ultra320 Family Manager Set
+===================================
+
+README for The Linux Operating System
+
+.. The following information is available in this file:
+
+ 1. Supported Hardware
+ 2. Version History
+ 3. Command Line Options
+ 4. Additional Notes
+ 5. Contacting Adaptec
+
+
+1. Supported Hardware
+=====================
+
+ The following Adaptec SCSI Host Adapters are supported by this
+ driver set.
+
+ ============= =========================================
+ Ultra320 ASIC Description
+ ============= =========================================
+ AIC-7901A Single Channel 64-bit PCI-X 133MHz to
+ Ultra320 SCSI ASIC
+ AIC-7901B Single Channel 64-bit PCI-X 133MHz to
+ Ultra320 SCSI ASIC with Retained Training
+ AIC-7902A4 Dual Channel 64-bit PCI-X 133MHz to
+ Ultra320 SCSI ASIC
+ AIC-7902B Dual Channel 64-bit PCI-X 133MHz to
+ Ultra320 SCSI ASIC with Retained Training
+ ============= =========================================
+
+ ========================== ===================================== ============
+ Ultra320 Adapters Description ASIC
+ ========================== ===================================== ============
+ Adaptec SCSI Card 39320 Dual Channel 64-bit PCI-X 133MHz to 7902A4/7902B
+ Ultra320 SCSI Card (one external
+ 68-pin, two internal 68-pin)
+ Adaptec SCSI Card 39320A Dual Channel 64-bit PCI-X 133MHz to 7902B
+ Ultra320 SCSI Card (one external
+ 68-pin, two internal 68-pin)
+ Adaptec SCSI Card 39320D Dual Channel 64-bit PCI-X 133MHz to 7902A4
+ Ultra320 SCSI Card (two external VHDC
+ and one internal 68-pin)
+ Adaptec SCSI Card 39320D Dual Channel 64-bit PCI-X 133MHz to 7902A4
+ Ultra320 SCSI Card (two external VHDC
+ and one internal 68-pin) based on the
+ AIC-7902B ASIC
+ Adaptec SCSI Card 29320 Single Channel 64-bit PCI-X 133MHz to 7901A
+ Ultra320 SCSI Card (one external
+ 68-pin, two internal 68-pin, one
+ internal 50-pin)
+ Adaptec SCSI Card 29320A Single Channel 64-bit PCI-X 133MHz to 7901B
+ Ultra320 SCSI Card (one external
+ 68-pin, two internal 68-pin, one
+ internal 50-pin)
+ Adaptec SCSI Card 29320LP Single Channel 64-bit Low Profile 7901A
+ PCI-X 133MHz to Ultra320 SCSI Card
+ (One external VHDC, one internal
+ 68-pin)
+ Adaptec SCSI Card 29320ALP Single Channel 64-bit Low Profile 7901B
+ PCI-X 133MHz to Ultra320 SCSI Card
+ (One external VHDC, one internal
+ 68-pin)
+ ========================== ===================================== ============
+
+2. Version History
+==================
+
+
+ * 3.0 (December 1st, 2005)
+ - Updated driver to use SCSI transport class infrastructure
+ - Upported sequencer and core fixes from adaptec released
+ version 2.0.15 of the driver.
+
+ * 1.3.11 (July 11, 2003)
+ - Fix several deadlock issues.
+ - Add 29320ALP and 39320B Id's.
+
+ * 1.3.10 (June 3rd, 2003)
+ - Align the SCB_TAG field on a 16byte boundary. This avoids
+ SCB corruption on some PCI-33 busses.
+ - Correct non-zero luns on Rev B. hardware.
+ - Update for change in 2.5.X SCSI proc FS interface.
+ - When negotiation async via an 8bit WDTR message, send
+ an SDTR with an offset of 0 to be sure the target
+ knows we are async. This works around a firmware defect
+ in the Quantum Atlas 10K.
+ - Implement controller suspend and resume.
+ - Clear PCI error state during driver attach so that we
+ don't disable memory mapped I/O due to a stray write
+ by some other driver probe that occurred before we
+ claimed the controller.
+
+ * 1.3.9 (May 22nd, 2003)
+ - Fix compiler errors.
+ - Remove S/G splitting for segments that cross a 4GB boundary.
+ This is guaranteed not to happen in Linux.
+ - Add support for scsi_report_device_reset() found in
+ 2.5.X kernels.
+ - Add 7901B support.
+ - Simplify handling of the packetized lun Rev A workaround.
+ - Correct and simplify handling of the ignore wide residue
+ message. The previous code would fail to report a residual
+ if the transaction data length was even and we received
+ an IWR message.
+
+ * 1.3.8 (April 29th, 2003)
+ - Fix types accessed via the command line interface code.
+ - Perform a few firmware optimizations.
+ - Fix "Unexpected PKT busfree" errors.
+ - Use a sequencer interrupt to notify the host of
+ commands with bad status. We defer the notification
+ until there are no outstanding selections to ensure
+ that the host is interrupted for as short a time as
+ possible.
+ - Remove pre-2.2.X support.
+ - Add support for new 2.5.X interrupt API.
+ - Correct big-endian architecture support.
+
+ * 1.3.7 (April 16th, 2003)
+ - Use del_timer_sync() to ensure that no timeouts
+ are pending during controller shutdown.
+ - For pre-2.5.X kernels, carefully adjust our segment
+ list size to avoid SCSI malloc pool fragmentation.
+ - Cleanup channel display in our /proc output.
+ - Workaround duplicate device entries in the mid-layer
+ device list during add-single-device.
+
+ * 1.3.6 (March 28th, 2003)
+ - Correct a double free in the Domain Validation code.
+ - Correct a reference to free'ed memory during controller
+ shutdown.
+ - Reset the bus on an SE->LVD change. This is required
+ to reset our transceivers.
+
+ * 1.3.5 (March 24th, 2003)
+ - Fix a few register window mode bugs.
+ - Include read streaming in the PPR flags we display in
+ diagnostics as well as /proc.
+ - Add PCI hot plug support for 2.5.X kernels.
+ - Correct default precompensation value for RevA hardware.
+ - Fix Domain Validation thread shutdown.
+ - Add a firmware workaround to make the LED blink
+ brighter during packetized operations on the H2A4.
+ - Correct /proc display of user read streaming settings.
+ - Simplify driver locking by releasing the io_request_lock
+ upon driver entry from the mid-layer.
+ - Cleanup command line parsing and move much of this code
+ to aiclib.
+
+ * 1.3.4 (February 28th, 2003)
+ - Correct a race condition in our error recovery handler.
+ - Allow Test Unit Ready commands to take a full 5 seconds
+ during Domain Validation.
+
+ * 1.3.2 (February 19th, 2003)
+ - Correct a Rev B. regression due to the GEM318
+ compatibility fix included in 1.3.1.
+
+ * 1.3.1 (February 11th, 2003)
+ - Add support for the 39320A.
+ - Improve recovery for certain PCI-X errors.
+ - Fix handling of LQ/DATA/LQ/DATA for the
+ same write transaction that can occur without
+ interveining training.
+ - Correct compatibility issues with the GEM318
+ enclosure services device.
+ - Correct data corruption issue that occurred under
+ high tag depth write loads.
+ - Adapt to a change in the 2.5.X daemonize() API.
+ - Correct a "Missing case in ahd_handle_scsiint" panic.
+
+ * 1.3.0 (January 21st, 2003)
+ - Full regression testing for all U320 products completed.
+ - Added abort and target/lun reset error recovery handler and
+ interrupt coalescing.
+
+ * 1.2.0 (November 14th, 2002)
+ - Added support for Domain Validation
+ - Add support for the Hewlett-Packard version of the 39320D
+ and AIC-7902 adapters.
+
+ Support for previous adapters has not been fully tested and should
+ only be used at the customer's own risk.
+
+ * 1.1.1 (September 24th, 2002)
+ - Added support for the Linux 2.5.X kernel series
+
+ * 1.1.0 (September 17th, 2002)
+ - Added support for four additional SCSI products:
+ ASC-39320, ASC-29320, ASC-29320LP, AIC-7901.
+
+ * 1.0.0 (May 30th, 2002)
+ - Initial driver release.
+
+ * 2.1. Software/Hardware Features
+ - Support for the SPI-4 "Ultra320" standard:
+ - 320MB/s transfer rates
+ - Packetized SCSI Protocol at 160MB/s and 320MB/s
+ - Quick Arbitration Selection (QAS)
+ - Retained Training Information (Rev B. ASIC only)
+ - Interrupt Coalescing
+ - Initiator Mode (target mode not currently
+ supported)
+ - Support for the PCI-X standard up to 133MHz
+ - Support for the PCI v2.2 standard
+ - Domain Validation
+
+ * 2.2. Operating System Support:
+ - Redhat Linux 7.2, 7.3, 8.0, Advanced Server 2.1
+ - SuSE Linux 7.3, 8.0, 8.1, Enterprise Server 7
+ - only Intel and AMD x86 supported at this time
+ - >4GB memory configurations supported.
+
+ Refer to the User's Guide for more details on this.
+
+3. Command Line Options
+=======================
+
+ .. Warning::
+
+ ALTERING OR ADDING THESE DRIVER PARAMETERS
+ INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
+ USE THEM WITH CAUTION.
+
+ Put a .conf file in the /etc/modprobe.d/ directory and add/edit a
+ line containing ``options aic79xx aic79xx=[command[,command...]]`` where
+ ``command`` is one or more of the following:
+
+
+verbose
+ :Definition: enable additional informative messages during driver operation.
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+debug:[value]
+ :Definition: Enables various levels of debugging information
+ The bit definitions for the debugging mask can
+ be found in drivers/scsi/aic7xxx/aic79xx.h under
+ the "Debug" heading.
+ :Possible Values: 0x0000 = no debugging, 0xffff = full debugging
+ :Default Value: 0x0000
+
+no_reset
+ :Definition: Do not reset the bus during the initial probe
+ phase
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+extended
+ :Definition: Force extended translation on the controller
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+periodic_otag
+ :Definition: Send an ordered tag periodically to prevent
+ tag starvation. Needed for some older devices
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+reverse_scan
+ :Definition: Probe the scsi bus in reverse order, starting with target 15
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+global_tag_depth
+ :Definition: Global tag depth for all targets on all busses.
+ This option sets the default tag depth which
+ may be selectively overridden vi the tag_info
+ option.
+
+ :Possible Values: 1 - 253
+ :Default Value: 32
+
+tag_info:{{value[,value...]}[,{value[,value...]}...]}
+ :Definition: Set the per-target tagged queue depth on a
+ per controller basis. Both controllers and targets
+ may be omitted indicating that they should retain
+ the default tag depth.
+
+ :Possible Values: 1 - 253
+ :Default Value: 32
+
+ Examples:
+
+
+ ::
+
+ tag_info:{{16,32,32,64,8,8,,32,32,32,32,32,32,32,32,32}
+
+ On Controller 0
+
+ - specifies a tag depth of 16 for target 0
+ - specifies a tag depth of 64 for target 3
+ - specifies a tag depth of 8 for targets 4 and 5
+ - leaves target 6 at the default
+ - specifies a tag depth of 32 for targets 1,2,7-15
+
+ All other targets retain the default depth.
+
+ ::
+
+ tag_info:{{},{32,,32}}
+
+ On Controller 1
+
+ - specifies a tag depth of 32 for targets 0 and 2
+
+ All other targets retain the default depth.
+
+
+rd_strm: {rd_strm_bitmask[,rd_strm_bitmask...]}
+ :Definition: Enable read streaming on a per target basis.
+ The rd_strm_bitmask is a 16 bit hex value in which
+ each bit represents a target. Setting the target's
+ bit to '1' enables read streaming for that
+ target. Controllers may be omitted indicating that
+ they should retain the default read streaming setting.
+
+ Examples:
+
+ ::
+
+ rd_strm:{0x0041}
+
+ On Controller 0
+
+ - enables read streaming for targets 0 and 6.
+ - disables read streaming for targets 1-5,7-15.
+
+ All other targets retain the default read
+ streaming setting.
+
+ ::
+
+ rd_strm:{0x0023,,0xFFFF}
+
+ On Controller 0
+
+ - enables read streaming for targets 1,2, and 5.
+ - disables read streaming for targets 3,4,6-15.
+
+ On Controller 2
+
+ - enables read streaming for all targets.
+
+ All other targets retain the default read
+ streaming setting.
+
+ :Possible Values: 0x0000 - 0xffff
+ :Default Value: 0x0000
+
+dv: {value[,value...]}
+ :Definition: Set Domain Validation Policy on a per-controller basis.
+ Controllers may be omitted indicating that
+ they should retain the default read streaming setting.
+
+ :Possible Values:
+
+ ==== ===============================
+ < 0 Use setting from serial EEPROM.
+ 0 Disable DV
+ > 0 Enable DV
+ ==== ===============================
+
+ :Default Value: DV Serial EEPROM configuration setting.
+
+ Example:
+
+ ::
+
+ dv:{-1,0,,1,1,0}
+
+ - On Controller 0 leave DV at its default setting.
+ - On Controller 1 disable DV.
+ - Skip configuration on Controller 2.
+ - On Controllers 3 and 4 enable DV.
+ - On Controller 5 disable DV.
+
+seltime:[value]
+ :Definition: Specifies the selection timeout value
+ :Possible Values: 0 = 256ms, 1 = 128ms, 2 = 64ms, 3 = 32ms
+ :Default Value: 0
+
+.. Warning:
+
+ The following three options should only be changed at
+ the direction of a technical support representative.
+
+
+precomp: {value[,value...]}
+ :Definition: Set IO Cell precompensation value on a per-controller basis.
+ Controllers may be omitted indicating that
+ they should retain the default precompensation setting.
+
+ :Possible Values: 0 - 7
+ :Default Value: Varies based on chip revision
+
+ Examples:
+
+ ::
+
+ precomp:{0x1}
+
+ On Controller 0 set precompensation to 1.
+
+ ::
+
+ precomp:{1,,7}
+
+ - On Controller 0 set precompensation to 1.
+ - On Controller 2 set precompensation to 8.
+
+slewrate: {value[,value...]}
+ :Definition: Set IO Cell slew rate on a per-controller basis.
+ Controllers may be omitted indicating that
+ they should retain the default slew rate setting.
+
+ :Possible Values: 0 - 15
+ :Default Value: Varies based on chip revision
+
+ Examples:
+
+ ::
+
+ slewrate:{0x1}
+
+ - On Controller 0 set slew rate to 1.
+
+ ::
+
+ slewrate :{1,,8}
+
+ - On Controller 0 set slew rate to 1.
+ - On Controller 2 set slew rate to 8.
+
+amplitude: {value[,value...]}
+ :Definition: Set IO Cell signal amplitude on a per-controller basis.
+ Controllers may be omitted indicating that
+ they should retain the default read streaming setting.
+
+ :Possible Values: 1 - 7
+ :Default Value: Varies based on chip revision
+
+ Examples:
+
+ ::
+
+ amplitude:{0x1}
+
+ On Controller 0 set amplitude to 1.
+
+ ::
+
+ amplitude :{1,,7}
+
+ - On Controller 0 set amplitude to 1.
+ - On Controller 2 set amplitude to 7.
+
+Example::
+
+ options aic79xx aic79xx=verbose,rd_strm:{{0x0041}}
+
+enables verbose output in the driver and turns read streaming on
+for targets 0 and 6 of Controller 0.
+
+4. Additional Notes
+===================
+
+4.1. Known/Unresolved or FYI Issues
+-----------------------------------
+
+ * Under SuSE Linux Enterprise 7, the driver may fail to operate
+ correctly due to a problem with PCI interrupt routing in the
+ Linux kernel. Please contact SuSE for an updated Linux
+ kernel.
+
+4.2. Third-Party Compatibility Issues
+-------------------------------------
+
+ * Adaptec only supports Ultra320 hard drives running
+ the latest firmware available. Please check with
+ your hard drive manufacturer to ensure you have the
+ latest version.
+
+4.3. Operating System or Technology Limitations
+-----------------------------------------------
+
+ * PCI Hot Plug is untested and may cause the operating system
+ to stop responding.
+ * Luns that are not numbered contiguously starting with 0 might not
+ be automatically probed during system startup. This is a limitation
+ of the OS. Please contact your Linux vendor for instructions on
+ manually probing non-contiguous luns.
+ * Using the Driver Update Disk version of this package during OS
+ installation under RedHat might result in two versions of this
+ driver being installed into the system module directory. This
+ might cause problems with the /sbin/mkinitrd program and/or
+ other RPM packages that try to install system modules. The best
+ way to correct this once the system is running is to install
+ the latest RPM package version of this driver, available from
+ http://www.adaptec.com.
+
+
+5. Adaptec Customer Support
+===========================
+
+ A Technical Support Identification (TSID) Number is required for
+ Adaptec technical support.
+
+ - The 12-digit TSID can be found on the white barcode-type label
+ included inside the box with your product. The TSID helps us
+ provide more efficient service by accurately identifying your
+ product and support status.
+
+ Support Options
+ - Search the Adaptec Support Knowledgebase (ASK) at
+ http://ask.adaptec.com for articles, troubleshooting tips, and
+ frequently asked questions about your product.
+ - For support via Email, submit your question to Adaptec's
+ Technical Support Specialists at http://ask.adaptec.com/.
+
+ North America
+ - Visit our Web site at http://www.adaptec.com/.
+ - For information about Adaptec's support options, call
+ 408-957-2550, 24 hours a day, 7 days a week.
+ - To speak with a Technical Support Specialist,
+
+ * For hardware products, call 408-934-7274,
+ Monday to Friday, 3:00 am to 5:00 pm, PDT.
+ * For RAID and Fibre Channel products, call 321-207-2000,
+ Monday to Friday, 3:00 am to 5:00 pm, PDT.
+
+ To expedite your service, have your computer with you.
+ - To order Adaptec products, including accessories and cables,
+ call 408-957-7274. To order cables online go to
+ http://www.adaptec.com/buy-cables/.
+
+ Europe
+ - Visit our Web site at http://www.adaptec.com/en-US/_common/world_index.
+ - To speak with a Technical Support Specialist, call, or email,
+
+ * German: +49 89 4366 5522, Monday-Friday, 9:00-17:00 CET,
+ http://ask-de.adaptec.com/.
+ * French: +49 89 4366 5533, Monday-Friday, 9:00-17:00 CET,
+ http://ask-fr.adaptec.com/.
+ * English: +49 89 4366 5544, Monday-Friday, 9:00-17:00 GMT,
+ http://ask.adaptec.com/.
+
+ - You can order Adaptec cables online at
+ http://www.adaptec.com/buy-cables/.
+
+ Japan
+ - Visit our web site at http://www.adaptec.co.jp/.
+ - To speak with a Technical Support Specialist, call
+ +81 3 5308 6120, Monday-Friday, 9:00 a.m. to 12:00 p.m.,
+ 1:00 p.m. to 6:00 p.m.
+
+Copyright |copy| 2003 Adaptec Inc. 691 S. Milpitas Blvd., Milpitas CA 95035 USA.
+All rights reserved.
+
+You are permitted to redistribute, use and modify this README file in whole
+or in part in conjunction with redistribution of software governed by the
+General Public License, provided that the following conditions are met:
+
+1. Redistributions of README file must retain the above copyright
+ notice, this list of conditions, and the following disclaimer,
+ without modification.
+2. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+3. Modifications or new contributions must be attributed in a copyright
+ notice identifying the author ("Contributor") and added below the
+ original copyright notice. The copyright notice is for purposes of
+ identifying contributors and should not be deemed as permission to alter
+ the permissions given by Adaptec.
+
+THIS README FILE IS PROVIDED BY ADAPTEC AND CONTRIBUTORS ``AS IS`` AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ANY
+WARRANTIES OF NON-INFRINGEMENT OR THE IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ADAPTEC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS README
+FILE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Documentation/scsi/aic79xx.txt b/Documentation/scsi/aic79xx.txt
deleted file mode 100644
index e2d3273000d4..000000000000
--- a/Documentation/scsi/aic79xx.txt
+++ /dev/null
@@ -1,497 +0,0 @@
-====================================================================
-= Adaptec Ultra320 Family Manager Set =
-= =
-= README for =
-= The Linux Operating System =
-====================================================================
-
-The following information is available in this file:
-
- 1. Supported Hardware
- 2. Version History
- 3. Command Line Options
- 4. Additional Notes
- 5. Contacting Adaptec
-
-
-1. Supported Hardware
-
- The following Adaptec SCSI Host Adapters are supported by this
- driver set.
-
- Ultra320 ASIC Description
- ----------------------------------------------------------------
- AIC-7901A Single Channel 64-bit PCI-X 133MHz to
- Ultra320 SCSI ASIC
- AIC-7901B Single Channel 64-bit PCI-X 133MHz to
- Ultra320 SCSI ASIC with Retained Training
- AIC-7902A4 Dual Channel 64-bit PCI-X 133MHz to
- Ultra320 SCSI ASIC
- AIC-7902B Dual Channel 64-bit PCI-X 133MHz to
- Ultra320 SCSI ASIC with Retained Training
-
- Ultra320 Adapters Description ASIC
- --------------------------------------------------------------------------
- Adaptec SCSI Card 39320 Dual Channel 64-bit PCI-X 133MHz to 7902A4/7902B
- Ultra320 SCSI Card (one external
- 68-pin, two internal 68-pin)
- Adaptec SCSI Card 39320A Dual Channel 64-bit PCI-X 133MHz to 7902B
- Ultra320 SCSI Card (one external
- 68-pin, two internal 68-pin)
- Adaptec SCSI Card 39320D Dual Channel 64-bit PCI-X 133MHz to 7902A4
- Ultra320 SCSI Card (two external VHDC
- and one internal 68-pin)
- Adaptec SCSI Card 39320D Dual Channel 64-bit PCI-X 133MHz to 7902A4
- Ultra320 SCSI Card (two external VHDC
- and one internal 68-pin) based on the
- AIC-7902B ASIC
- Adaptec SCSI Card 29320 Single Channel 64-bit PCI-X 133MHz to 7901A
- Ultra320 SCSI Card (one external
- 68-pin, two internal 68-pin, one
- internal 50-pin)
- Adaptec SCSI Card 29320A Single Channel 64-bit PCI-X 133MHz to 7901B
- Ultra320 SCSI Card (one external
- 68-pin, two internal 68-pin, one
- internal 50-pin)
- Adaptec SCSI Card 29320LP Single Channel 64-bit Low Profile 7901A
- PCI-X 133MHz to Ultra320 SCSI Card
- (One external VHDC, one internal
- 68-pin)
- Adaptec SCSI Card 29320ALP Single Channel 64-bit Low Profile 7901B
- PCI-X 133MHz to Ultra320 SCSI Card
- (One external VHDC, one internal
- 68-pin)
-2. Version History
-
- 3.0 (December 1st, 2005)
- - Updated driver to use SCSI transport class infrastructure
- - Upported sequencer and core fixes from adaptec released
- version 2.0.15 of the driver.
-
- 1.3.11 (July 11, 2003)
- - Fix several deadlock issues.
- - Add 29320ALP and 39320B Id's.
-
- 1.3.10 (June 3rd, 2003)
- - Align the SCB_TAG field on a 16byte boundary. This avoids
- SCB corruption on some PCI-33 busses.
- - Correct non-zero luns on Rev B. hardware.
- - Update for change in 2.5.X SCSI proc FS interface.
- - When negotiation async via an 8bit WDTR message, send
- an SDTR with an offset of 0 to be sure the target
- knows we are async. This works around a firmware defect
- in the Quantum Atlas 10K.
- - Implement controller suspend and resume.
- - Clear PCI error state during driver attach so that we
- don't disable memory mapped I/O due to a stray write
- by some other driver probe that occurred before we
- claimed the controller.
-
- 1.3.9 (May 22nd, 2003)
- - Fix compiler errors.
- - Remove S/G splitting for segments that cross a 4GB boundary.
- This is guaranteed not to happen in Linux.
- - Add support for scsi_report_device_reset() found in
- 2.5.X kernels.
- - Add 7901B support.
- - Simplify handling of the packetized lun Rev A workaround.
- - Correct and simplify handling of the ignore wide residue
- message. The previous code would fail to report a residual
- if the transaction data length was even and we received
- an IWR message.
-
- 1.3.8 (April 29th, 2003)
- - Fix types accessed via the command line interface code.
- - Perform a few firmware optimizations.
- - Fix "Unexpected PKT busfree" errors.
- - Use a sequencer interrupt to notify the host of
- commands with bad status. We defer the notification
- until there are no outstanding selections to ensure
- that the host is interrupted for as short a time as
- possible.
- - Remove pre-2.2.X support.
- - Add support for new 2.5.X interrupt API.
- - Correct big-endian architecture support.
-
- 1.3.7 (April 16th, 2003)
- - Use del_timer_sync() to ensure that no timeouts
- are pending during controller shutdown.
- - For pre-2.5.X kernels, carefully adjust our segment
- list size to avoid SCSI malloc pool fragmentation.
- - Cleanup channel display in our /proc output.
- - Workaround duplicate device entries in the mid-layer
- device list during add-single-device.
-
- 1.3.6 (March 28th, 2003)
- - Correct a double free in the Domain Validation code.
- - Correct a reference to free'ed memory during controller
- shutdown.
- - Reset the bus on an SE->LVD change. This is required
- to reset our transceivers.
-
- 1.3.5 (March 24th, 2003)
- - Fix a few register window mode bugs.
- - Include read streaming in the PPR flags we display in
- diagnostics as well as /proc.
- - Add PCI hot plug support for 2.5.X kernels.
- - Correct default precompensation value for RevA hardware.
- - Fix Domain Validation thread shutdown.
- - Add a firmware workaround to make the LED blink
- brighter during packetized operations on the H2A4.
- - Correct /proc display of user read streaming settings.
- - Simplify driver locking by releasing the io_request_lock
- upon driver entry from the mid-layer.
- - Cleanup command line parsing and move much of this code
- to aiclib.
-
- 1.3.4 (February 28th, 2003)
- - Correct a race condition in our error recovery handler.
- - Allow Test Unit Ready commands to take a full 5 seconds
- during Domain Validation.
-
- 1.3.2 (February 19th, 2003)
- - Correct a Rev B. regression due to the GEM318
- compatibility fix included in 1.3.1.
-
- 1.3.1 (February 11th, 2003)
- - Add support for the 39320A.
- - Improve recovery for certain PCI-X errors.
- - Fix handling of LQ/DATA/LQ/DATA for the
- same write transaction that can occur without
- interveining training.
- - Correct compatibility issues with the GEM318
- enclosure services device.
- - Correct data corruption issue that occurred under
- high tag depth write loads.
- - Adapt to a change in the 2.5.X daemonize() API.
- - Correct a "Missing case in ahd_handle_scsiint" panic.
-
- 1.3.0 (January 21st, 2003)
- - Full regression testing for all U320 products completed.
- - Added abort and target/lun reset error recovery handler and
- interrupt coalescing.
-
- 1.2.0 (November 14th, 2002)
- - Added support for Domain Validation
- - Add support for the Hewlett-Packard version of the 39320D
- and AIC-7902 adapters.
- Support for previous adapters has not been fully tested and should
- only be used at the customer's own risk.
-
- 1.1.1 (September 24th, 2002)
- - Added support for the Linux 2.5.X kernel series
-
- 1.1.0 (September 17th, 2002)
- - Added support for four additional SCSI products:
- ASC-39320, ASC-29320, ASC-29320LP, AIC-7901.
-
- 1.0.0 (May 30th, 2002)
- - Initial driver release.
-
- 2.1. Software/Hardware Features
- - Support for the SPI-4 "Ultra320" standard:
- - 320MB/s transfer rates
- - Packetized SCSI Protocol at 160MB/s and 320MB/s
- - Quick Arbitration Selection (QAS)
- - Retained Training Information (Rev B. ASIC only)
- - Interrupt Coalescing
- - Initiator Mode (target mode not currently
- supported)
- - Support for the PCI-X standard up to 133MHz
- - Support for the PCI v2.2 standard
- - Domain Validation
-
- 2.2. Operating System Support:
- - Redhat Linux 7.2, 7.3, 8.0, Advanced Server 2.1
- - SuSE Linux 7.3, 8.0, 8.1, Enterprise Server 7
- - only Intel and AMD x86 supported at this time
- - >4GB memory configurations supported.
-
- Refer to the User's Guide for more details on this.
-
-3. Command Line Options
-
- WARNING: ALTERING OR ADDING THESE DRIVER PARAMETERS
- INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
- USE THEM WITH CAUTION.
-
- Put a .conf file in the /etc/modprobe.d/ directory and add/edit a
- line containing 'options aic79xx aic79xx=[command[,command...]]' where
- 'command' is one or more of the following:
- -----------------------------------------------------------------
- Option: verbose
- Definition: enable additional informative messages during
- driver operation.
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: debug:[value]
- Definition: Enables various levels of debugging information
- The bit definitions for the debugging mask can
- be found in drivers/scsi/aic7xxx/aic79xx.h under
- the "Debug" heading.
- Possible Values: 0x0000 = no debugging, 0xffff = full debugging
- Default Value: 0x0000
- -----------------------------------------------------------------
- Option: no_reset
- Definition: Do not reset the bus during the initial probe
- phase
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: extended
- Definition: Force extended translation on the controller
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: periodic_otag
- Definition: Send an ordered tag periodically to prevent
- tag starvation. Needed for some older devices
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: reverse_scan
- Definition: Probe the scsi bus in reverse order, starting
- with target 15
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: global_tag_depth
- Definition: Global tag depth for all targets on all busses.
- This option sets the default tag depth which
- may be selectively overridden vi the tag_info
- option.
- Possible Values: 1 - 253
- Default Value: 32
- -----------------------------------------------------------------
- Option: tag_info:{{value[,value...]}[,{value[,value...]}...]}
- Definition: Set the per-target tagged queue depth on a
- per controller basis. Both controllers and targets
- may be omitted indicating that they should retain
- the default tag depth.
- Examples: tag_info:{{16,32,32,64,8,8,,32,32,32,32,32,32,32,32,32}
- On Controller 0
- specifies a tag depth of 16 for target 0
- specifies a tag depth of 64 for target 3
- specifies a tag depth of 8 for targets 4 and 5
- leaves target 6 at the default
- specifies a tag depth of 32 for targets 1,2,7-15
- All other targets retain the default depth.
-
- tag_info:{{},{32,,32}}
- On Controller 1
- specifies a tag depth of 32 for targets 0 and 2
- All other targets retain the default depth.
-
- Possible Values: 1 - 253
- Default Value: 32
- -----------------------------------------------------------------
- Option: rd_strm: {rd_strm_bitmask[,rd_strm_bitmask...]}
- Definition: Enable read streaming on a per target basis.
- The rd_strm_bitmask is a 16 bit hex value in which
- each bit represents a target. Setting the target's
- bit to '1' enables read streaming for that
- target. Controllers may be omitted indicating that
- they should retain the default read streaming setting.
- Example: rd_strm:{0x0041}
- On Controller 0
- enables read streaming for targets 0 and 6.
- disables read streaming for targets 1-5,7-15.
- All other targets retain the default read
- streaming setting.
- Example: rd_strm:{0x0023,,0xFFFF}
- On Controller 0
- enables read streaming for targets 1,2, and 5.
- disables read streaming for targets 3,4,6-15.
- On Controller 2
- enables read streaming for all targets.
- All other targets retain the default read
- streaming setting.
-
- Possible Values: 0x0000 - 0xffff
- Default Value: 0x0000
- -----------------------------------------------------------------
- Option: dv: {value[,value...]}
- Definition: Set Domain Validation Policy on a per-controller basis.
- Controllers may be omitted indicating that
- they should retain the default read streaming setting.
- Example: dv:{-1,0,,1,1,0}
- On Controller 0 leave DV at its default setting.
- On Controller 1 disable DV.
- Skip configuration on Controller 2.
- On Controllers 3 and 4 enable DV.
- On Controller 5 disable DV.
-
- Possible Values: < 0 Use setting from serial EEPROM.
- 0 Disable DV
- > 0 Enable DV
- Default Value: DV Serial EEPROM configuration setting.
- -----------------------------------------------------------------
- Option: seltime:[value]
- Definition: Specifies the selection timeout value
- Possible Values: 0 = 256ms, 1 = 128ms, 2 = 64ms, 3 = 32ms
- Default Value: 0
- -----------------------------------------------------------------
-
- *** The following three options should only be changed at ***
- *** the direction of a technical support representative. ***
-
- -----------------------------------------------------------------
- Option: precomp: {value[,value...]}
- Definition: Set IO Cell precompensation value on a per-controller
- basis.
- Controllers may be omitted indicating that
- they should retain the default precompensation setting.
- Example: precomp:{0x1}
- On Controller 0 set precompensation to 1.
- Example: precomp:{1,,7}
- On Controller 0 set precompensation to 1.
- On Controller 2 set precompensation to 8.
-
- Possible Values: 0 - 7
- Default Value: Varies based on chip revision
- -----------------------------------------------------------------
- Option: slewrate: {value[,value...]}
- Definition: Set IO Cell slew rate on a per-controller basis.
- Controllers may be omitted indicating that
- they should retain the default slew rate setting.
- Example: slewrate:{0x1}
- On Controller 0 set slew rate to 1.
- Example: slewrate :{1,,8}
- On Controller 0 set slew rate to 1.
- On Controller 2 set slew rate to 8.
-
- Possible Values: 0 - 15
- Default Value: Varies based on chip revision
- -----------------------------------------------------------------
- Option: amplitude: {value[,value...]}
- Definition: Set IO Cell signal amplitude on a per-controller basis.
- Controllers may be omitted indicating that
- they should retain the default read streaming setting.
- Example: amplitude:{0x1}
- On Controller 0 set amplitude to 1.
- Example: amplitude :{1,,7}
- On Controller 0 set amplitude to 1.
- On Controller 2 set amplitude to 7.
-
- Possible Values: 1 - 7
- Default Value: Varies based on chip revision
- -----------------------------------------------------------------
-
- Example: 'options aic79xx aic79xx=verbose,rd_strm:{{0x0041}}'
- enables verbose output in the driver and turns read streaming on
- for targets 0 and 6 of Controller 0.
-
-4. Additional Notes
-
- 4.1. Known/Unresolved or FYI Issues
-
- * Under SuSE Linux Enterprise 7, the driver may fail to operate
- correctly due to a problem with PCI interrupt routing in the
- Linux kernel. Please contact SuSE for an updated Linux
- kernel.
-
- 4.2. Third-Party Compatibility Issues
-
- * Adaptec only supports Ultra320 hard drives running
- the latest firmware available. Please check with
- your hard drive manufacturer to ensure you have the
- latest version.
-
- 4.3. Operating System or Technology Limitations
-
- * PCI Hot Plug is untested and may cause the operating system
- to stop responding.
- * Luns that are not numbered contiguously starting with 0 might not
- be automatically probed during system startup. This is a limitation
- of the OS. Please contact your Linux vendor for instructions on
- manually probing non-contiguous luns.
- * Using the Driver Update Disk version of this package during OS
- installation under RedHat might result in two versions of this
- driver being installed into the system module directory. This
- might cause problems with the /sbin/mkinitrd program and/or
- other RPM packages that try to install system modules. The best
- way to correct this once the system is running is to install
- the latest RPM package version of this driver, available from
- http://www.adaptec.com.
-
-
-5. Adaptec Customer Support
-
- A Technical Support Identification (TSID) Number is required for
- Adaptec technical support.
- - The 12-digit TSID can be found on the white barcode-type label
- included inside the box with your product. The TSID helps us
- provide more efficient service by accurately identifying your
- product and support status.
-
- Support Options
- - Search the Adaptec Support Knowledgebase (ASK) at
- http://ask.adaptec.com for articles, troubleshooting tips, and
- frequently asked questions about your product.
- - For support via Email, submit your question to Adaptec's
- Technical Support Specialists at http://ask.adaptec.com/.
-
- North America
- - Visit our Web site at http://www.adaptec.com/.
- - For information about Adaptec's support options, call
- 408-957-2550, 24 hours a day, 7 days a week.
- - To speak with a Technical Support Specialist,
- * For hardware products, call 408-934-7274,
- Monday to Friday, 3:00 am to 5:00 pm, PDT.
- * For RAID and Fibre Channel products, call 321-207-2000,
- Monday to Friday, 3:00 am to 5:00 pm, PDT.
- To expedite your service, have your computer with you.
- - To order Adaptec products, including accessories and cables,
- call 408-957-7274. To order cables online go to
- http://www.adaptec.com/buy-cables/.
-
- Europe
- - Visit our Web site at http://www.adaptec.com/en-US/_common/world_index.
- - To speak with a Technical Support Specialist, call, or email,
- * German: +49 89 4366 5522, Monday-Friday, 9:00-17:00 CET,
- http://ask-de.adaptec.com/.
- * French: +49 89 4366 5533, Monday-Friday, 9:00-17:00 CET,
- http://ask-fr.adaptec.com/.
- * English: +49 89 4366 5544, Monday-Friday, 9:00-17:00 GMT,
- http://ask.adaptec.com/.
- - You can order Adaptec cables online at
- http://www.adaptec.com/buy-cables/.
-
- Japan
- - Visit our web site at http://www.adaptec.co.jp/.
- - To speak with a Technical Support Specialist, call
- +81 3 5308 6120, Monday-Friday, 9:00 a.m. to 12:00 p.m.,
- 1:00 p.m. to 6:00 p.m.
-
--------------------------------------------------------------------
-/*
- * Copyright (c) 2003 Adaptec Inc. 691 S. Milpitas Blvd., Milpitas CA 95035 USA.
- * All rights reserved.
- *
- * You are permitted to redistribute, use and modify this README file in whole
- * or in part in conjunction with redistribution of software governed by the
- * General Public License, provided that the following conditions are met:
- * 1. Redistributions of README file must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- * 3. Modifications or new contributions must be attributed in a copyright
- * notice identifying the author ("Contributor") and added below the
- * original copyright notice. The copyright notice is for purposes of
- * identifying contributors and should not be deemed as permission to alter
- * the permissions given by Adaptec.
- *
- * THIS README FILE IS PROVIDED BY ADAPTEC AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ANY
- * WARRANTIES OF NON-INFRINGEMENT OR THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * ADAPTEC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS README
- * FILE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
diff --git a/Documentation/scsi/aic7xxx.rst b/Documentation/scsi/aic7xxx.rst
new file mode 100644
index 000000000000..bad0e5567b21
--- /dev/null
+++ b/Documentation/scsi/aic7xxx.rst
@@ -0,0 +1,458 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+========================================================
+Adaptec Aic7xxx Fast -> Ultra160 Family Manager Set v7.0
+========================================================
+
+README for The Linux Operating System
+
+The following information is available in this file:
+
+ 1. Supported Hardware
+ 2. Version History
+ 3. Command Line Options
+ 4. Contacting Adaptec
+
+1. Supported Hardware
+=====================
+
+ The following Adaptec SCSI Chips and Host Adapters are supported by
+ the aic7xxx driver.
+
+ ======== ===== ========= ======== ========= ===== ===============
+ Chip MIPS Host Bus MaxSync MaxWidth SCBs Notes
+ ======== ===== ========= ======== ========= ===== ===============
+ aic7770 10 EISA/VL 10MHz 16Bit 4 1
+ aic7850 10 PCI/32 10MHz 8Bit 3
+ aic7855 10 PCI/32 10MHz 8Bit 3
+ aic7856 10 PCI/32 10MHz 8Bit 3
+ aic7859 10 PCI/32 20MHz 8Bit 3
+ aic7860 10 PCI/32 20MHz 8Bit 3
+ aic7870 10 PCI/32 10MHz 16Bit 16
+ aic7880 10 PCI/32 20MHz 16Bit 16
+ aic7890 20 PCI/32 40MHz 16Bit 16 3 4 5 6 7 8
+ aic7891 20 PCI/64 40MHz 16Bit 16 3 4 5 6 7 8
+ aic7892 20 PCI/64-66 80MHz 16Bit 16 3 4 5 6 7 8
+ aic7895 15 PCI/32 20MHz 16Bit 16 2 3 4 5
+ aic7895C 15 PCI/32 20MHz 16Bit 16 2 3 4 5 8
+ aic7896 20 PCI/32 40MHz 16Bit 16 2 3 4 5 6 7 8
+ aic7897 20 PCI/64 40MHz 16Bit 16 2 3 4 5 6 7 8
+ aic7899 20 PCI/64-66 80MHz 16Bit 16 2 3 4 5 6 7 8
+ ======== ===== ========= ======== ========= ===== ===============
+
+ 1. Multiplexed Twin Channel Device - One controller servicing two
+ busses.
+ 2. Multi-function Twin Channel Device - Two controllers on one chip.
+ 3. Command Channel Secondary DMA Engine - Allows scatter gather list
+ and SCB prefetch.
+ 4. 64 Byte SCB Support - Allows disconnected, untagged request table
+ for all possible target/lun combinations.
+ 5. Block Move Instruction Support - Doubles the speed of certain
+ sequencer operations.
+ 6. 'Bayonet' style Scatter Gather Engine - Improves S/G prefetch
+ performance.
+ 7. Queuing Registers - Allows queuing of new transactions without
+ pausing the sequencer.
+ 8. Multiple Target IDs - Allows the controller to respond to selection
+ as a target on multiple SCSI IDs.
+
+ ============== ======= =========== =============== =============== =========
+ Controller Chip Host-Bus Int-Connectors Ext-Connectors Notes
+ ============== ======= =========== =============== =============== =========
+ AHA-274X[A] aic7770 EISA SE-50M SE-HD50F
+ AHA-274X[A]W aic7770 EISA SE-HD68F SE-HD68F
+ SE-50M
+ AHA-274X[A]T aic7770 EISA 2 X SE-50M SE-HD50F
+ AHA-2842 aic7770 VL SE-50M SE-HD50F
+ AHA-2940AU aic7860 PCI/32 SE-50M SE-HD50F
+ AVA-2902I aic7860 PCI/32 SE-50M
+ AVA-2902E aic7860 PCI/32 SE-50M
+ AVA-2906 aic7856 PCI/32 SE-50M SE-DB25F
+ APC-7850 aic7850 PCI/32 SE-50M 1
+ AVA-2940 aic7860 PCI/32 SE-50M
+ AHA-2920B aic7860 PCI/32 SE-50M
+ AHA-2930B aic7860 PCI/32 SE-50M
+ AHA-2920C aic7856 PCI/32 SE-50M SE-HD50F
+ AHA-2930C aic7860 PCI/32 SE-50M
+ AHA-2930C aic7860 PCI/32 SE-50M
+ AHA-2910C aic7860 PCI/32 SE-50M
+ AHA-2915C aic7860 PCI/32 SE-50M
+ AHA-2940AU/CN aic7860 PCI/32 SE-50M SE-HD50F
+ AHA-2944W aic7870 PCI/32 HVD-HD68F HVD-HD68F
+ HVD-50M
+ AHA-3940W aic7870 PCI/32 2 X SE-HD68F SE-HD68F 2
+ AHA-2940UW aic7880 PCI/32 SE-HD68F
+ SE-50M SE-HD68F
+ AHA-2940U aic7880 PCI/32 SE-50M SE-HD50F
+ AHA-2940D aic7880 PCI/32
+ aHA-2940 A/T aic7880 PCI/32
+ AHA-2940D A/T aic7880 PCI/32
+ AHA-3940UW aic7880 PCI/32 2 X SE-HD68F SE-HD68F 3
+ AHA-3940UWD aic7880 PCI/32 2 X SE-HD68F 2 X SE-VHD68F 3
+ AHA-3940U aic7880 PCI/32 2 X SE-50M SE-HD50F 3
+ AHA-2944UW aic7880 PCI/32 HVD-HD68F HVD-HD68F
+ HVD-50M
+ AHA-3944UWD aic7880 PCI/32 2 X HVD-HD68F 2 X HVD-VHD68F 3
+ AHA-4944UW aic7880 PCI/32
+ AHA-2930UW aic7880 PCI/32
+ AHA-2940UW Pro aic7880 PCI/32 SE-HD68F SE-HD68F 4
+ SE-50M
+ AHA-2940UW/CN aic7880 PCI/32
+ AHA-2940UDual aic7895 PCI/32
+ AHA-2940UWDual aic7895 PCI/32
+ AHA-3940UWD aic7895 PCI/32
+ AHA-3940AUW aic7895 PCI/32
+ AHA-3940AUWD aic7895 PCI/32
+ AHA-3940AU aic7895 PCI/32
+ AHA-3944AUWD aic7895 PCI/32 2 X HVD-HD68F 2 X HVD-VHD68F
+ AHA-2940U2B aic7890 PCI/32 LVD-HD68F LVD-HD68F
+ AHA-2940U2 OEM aic7891 PCI/64
+ AHA-2940U2W aic7890 PCI/32 LVD-HD68F LVD-HD68F
+ SE-HD68F
+ SE-50M
+ AHA-2950U2B aic7891 PCI/64 LVD-HD68F LVD-HD68F
+ AHA-2930U2 aic7890 PCI/32 LVD-HD68F SE-HD50F
+ SE-50M
+ AHA-3950U2B aic7897 PCI/64
+ AHA-3950U2D aic7897 PCI/64
+ AHA-29160 aic7892 PCI/64-66
+ AHA-29160 CPQ aic7892 PCI/64-66
+ AHA-29160N aic7892 PCI/32 LVD-HD68F SE-HD50F
+ SE-50M
+ AHA-29160LP aic7892 PCI/64-66
+ AHA-19160 aic7892 PCI/64-66
+ AHA-29150LP aic7892 PCI/64-66
+ AHA-29130LP aic7892 PCI/64-66
+ AHA-3960D aic7899 PCI/64-66 2 X LVD-HD68F 2 X LVD-VHD68F
+ LVD-50M
+ AHA-3960D CPQ aic7899 PCI/64-66 2 X LVD-HD68F 2 X LVD-VHD68F
+ LVD-50M
+ AHA-39160 aic7899 PCI/64-66 2 X LVD-HD68F 2 X LVD-VHD68F
+ LVD-50M
+ ============== ======= =========== =============== =============== =========
+
+ 1. No BIOS support
+ 2. DEC21050 PCI-PCI bridge with multiple controller chips on secondary bus
+ 3. DEC2115X PCI-PCI bridge with multiple controller chips on secondary bus
+ 4. All three SCSI connectors may be used simultaneously without
+ SCSI "stub" effects.
+
+2. Version History
+==================
+
+ * 7.0 (4th August, 2005)
+ - Updated driver to use SCSI transport class infrastructure
+ - Upported sequencer and core fixes from last adaptec released
+ version of the driver.
+
+ * 6.2.36 (June 3rd, 2003)
+ - Correct code that disables PCI parity error checking.
+ - Correct and simplify handling of the ignore wide residue
+ message. The previous code would fail to report a residual
+ if the transaction data length was even and we received
+ an IWR message.
+ - Add support for the 2.5.X EISA framework.
+ - Update for change in 2.5.X SCSI proc FS interface.
+ - Correct Domain Validation command-line option parsing.
+ - When negotiation async via an 8bit WDTR message, send
+ an SDTR with an offset of 0 to be sure the target
+ knows we are async. This works around a firmware defect
+ in the Quantum Atlas 10K.
+ - Clear PCI error state during driver attach so that we
+ don't disable memory mapped I/O due to a stray write
+ by some other driver probe that occurred before we
+ claimed the controller.
+
+ * 6.2.35 (May 14th, 2003)
+ - Fix a few GCC 3.3 compiler warnings.
+ - Correct operation on EISA Twin Channel controller.
+ - Add support for 2.5.X's scsi_report_device_reset().
+
+ * 6.2.34 (May 5th, 2003)
+ - Fix locking regression introduced in 6.2.29 that
+ could cause a lock order reversal between the io_request_lock
+ and our per-softc lock. This was only possible on RH9,
+ SuSE, and kernel.org 2.4.X kernels.
+
+ * 6.2.33 (April 30th, 2003)
+ - Dynamically disable PCI parity error reporting after
+ 10 errors are reported to the user. These errors are
+ the result of some other device issuing PCI transactions
+ with bad parity. Once the user has been informed of the
+ problem, continuing to report the errors just degrades
+ our performance.
+
+ * 6.2.32 (March 28th, 2003)
+ - Dynamically sized S/G lists to avoid SCSI malloc
+ pool fragmentation and SCSI mid-layer deadlock.
+
+ * 6.2.28 (January 20th, 2003)
+ - Domain Validation Fixes
+ - Add ability to disable PCI parity error checking.
+ - Enhanced Memory Mapped I/O probe
+
+ * 6.2.20 (November 7th, 2002)
+ - Added Domain Validation.
+
+3. Command Line Options
+=======================
+
+
+ .. Warning::
+
+ ALTERING OR ADDING THESE DRIVER PARAMETERS
+ INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
+ USE THEM WITH CAUTION.
+
+ Put a .conf file in the /etc/modprobe.d directory and add/edit a
+ line containing ``options aic7xxx aic7xxx=[command[,command...]]`` where
+ ``command`` is one or more of the following:
+
+verbose
+
+ :Definition: enable additional informative messages during driver operation.
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+
+debug:[value]
+
+ :Definition: Enables various levels of debugging information
+ :Possible Values: 0x0000 = no debugging, 0xffff = full debugging
+ :Default Value: 0x0000
+
+no_probe
+
+probe_eisa_vl
+
+ :Definition: Do not probe for EISA/VLB controllers.
+ This is a toggle. If the driver is compiled
+ to not probe EISA/VLB controllers by default,
+ specifying "no_probe" will enable this probing.
+ If the driver is compiled to probe EISA/VLB
+ controllers by default, specifying "no_probe"
+ will disable this probing.
+
+ :Possible Values: This option is a toggle
+ :Default Value: EISA/VLB probing is disabled by default.
+
+pci_parity
+
+ :Definition: Toggles the detection of PCI parity errors.
+ On many motherboards with VIA chipsets,
+ PCI parity is not generated correctly on the
+ PCI bus. It is impossible for the hardware to
+ differentiate between these "spurious" parity
+ errors and real parity errors. The symptom of
+ this problem is a stream of the message::
+
+ "scsi0: Data Parity Error Detected during address or write data phase"
+
+ output by the driver.
+
+ :Possible Values: This option is a toggle
+ :Default Value: PCI Parity Error reporting is disabled
+
+no_reset
+
+ :Definition: Do not reset the bus during the initial probe
+ phase
+
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+extended
+
+ :Definition: Force extended translation on the controller
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+periodic_otag
+
+ :Definition: Send an ordered tag periodically to prevent
+ tag starvation. Needed for some older devices
+
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+reverse_scan
+
+ :Definition: Probe the scsi bus in reverse order, starting
+ with target 15
+
+ :Possible Values: This option is a flag
+ :Default Value: disabled
+
+global_tag_depth:[value]
+
+ :Definition: Global tag depth for all targets on all busses.
+ This option sets the default tag depth which
+ may be selectively overridden vi the tag_info
+ option.
+
+ :Possible Values: 1 - 253
+ :Default Value: 32
+
+tag_info:{{value[,value...]}[,{value[,value...]}...]}
+
+ :Definition: Set the per-target tagged queue depth on a
+ per controller basis. Both controllers and targets
+ may be omitted indicating that they should retain
+ the default tag depth.
+
+ :Possible Values: 1 - 253
+ :Default Value: 32
+
+ Examples:
+
+ ::
+
+ tag_info:{{16,32,32,64,8,8,,32,32,32,32,32,32,32,32,32}
+
+ On Controller 0:
+
+ - specifies a tag depth of 16 for target 0
+ - specifies a tag depth of 64 for target 3
+ - specifies a tag depth of 8 for targets 4 and 5
+ - leaves target 6 at the default
+ - specifies a tag depth of 32 for targets 1,2,7-15
+ - All other targets retain the default depth.
+
+ ::
+
+ tag_info:{{},{32,,32}}
+
+ On Controller 1:
+
+ - specifies a tag depth of 32 for targets 0 and 2
+ - All other targets retain the default depth.
+
+seltime:[value]
+
+ :Definition: Specifies the selection timeout value
+ :Possible Values: 0 = 256ms, 1 = 128ms, 2 = 64ms, 3 = 32ms
+ :Default Value: 0
+
+dv: {value[,value...]}
+
+ :Definition: Set Domain Validation Policy on a per-controller basis.
+ Controllers may be omitted indicating that
+ they should retain the default read streaming setting.
+
+ :Possible Values:
+
+ ==== ===============================
+ < 0 Use setting from serial EEPROM.
+ 0 Disable DV
+ > 0 Enable DV
+ ==== ===============================
+
+
+ :Default Value: SCSI-Select setting on controllers with a SCSI Select
+ option for DV. Otherwise, on for controllers supporting
+ U160 speeds and off for all other controller types.
+
+ Example:
+
+ ::
+
+ dv:{-1,0,,1,1,0}
+
+ - On Controller 0 leave DV at its default setting.
+ - On Controller 1 disable DV.
+ - Skip configuration on Controller 2.
+ - On Controllers 3 and 4 enable DV.
+ - On Controller 5 disable DV.
+
+Example::
+
+ options aic7xxx aic7xxx=verbose,no_probe,tag_info:{{},{,,10}},seltime:1
+
+enables verbose logging, Disable EISA/VLB probing,
+and set tag depth on Controller 1/Target 2 to 10 tags.
+
+4. Adaptec Customer Support
+===========================
+
+ A Technical Support Identification (TSID) Number is required for
+ Adaptec technical support.
+
+ - The 12-digit TSID can be found on the white barcode-type label
+ included inside the box with your product. The TSID helps us
+ provide more efficient service by accurately identifying your
+ product and support status.
+
+ Support Options
+ - Search the Adaptec Support Knowledgebase (ASK) at
+ http://ask.adaptec.com for articles, troubleshooting tips, and
+ frequently asked questions about your product.
+ - For support via Email, submit your question to Adaptec's
+ Technical Support Specialists at http://ask.adaptec.com/.
+
+ North America
+ - Visit our Web site at http://www.adaptec.com/.
+ - For information about Adaptec's support options, call
+ 408-957-2550, 24 hours a day, 7 days a week.
+ - To speak with a Technical Support Specialist,
+
+ * For hardware products, call 408-934-7274,
+ Monday to Friday, 3:00 am to 5:00 pm, PDT.
+ * For RAID and Fibre Channel products, call 321-207-2000,
+ Monday to Friday, 3:00 am to 5:00 pm, PDT.
+
+ To expedite your service, have your computer with you.
+ - To order Adaptec products, including accessories and cables,
+ call 408-957-7274. To order cables online go to
+ http://www.adaptec.com/buy-cables/.
+
+ Europe
+ - Visit our Web site at http://www.adaptec.com/en-US/_common/world_index.
+ - To speak with a Technical Support Specialist, call, or email,
+
+ * German: +49 89 4366 5522, Monday-Friday, 9:00-17:00 CET,
+ http://ask-de.adaptec.com/.
+ * French: +49 89 4366 5533, Monday-Friday, 9:00-17:00 CET,
+ http://ask-fr.adaptec.com/.
+ * English: +49 89 4366 5544, Monday-Friday, 9:00-17:00 GMT,
+ http://ask.adaptec.com/.
+
+ - You can order Adaptec cables online at
+ http://www.adaptec.com/buy-cables/.
+
+ Japan
+ - Visit our web site at http://www.adaptec.co.jp/.
+ - To speak with a Technical Support Specialist, call
+ +81 3 5308 6120, Monday-Friday, 9:00 a.m. to 12:00 p.m.,
+ 1:00 p.m. to 6:00 p.m.
+
+Copyright |copy| 2003 Adaptec Inc. 691 S. Milpitas Blvd., Milpitas CA 95035 USA.
+
+All rights reserved.
+
+You are permitted to redistribute, use and modify this README file in whole
+or in part in conjunction with redistribution of software governed by the
+General Public License, provided that the following conditions are met:
+
+1. Redistributions of README file must retain the above copyright
+ notice, this list of conditions, and the following disclaimer,
+ without modification.
+2. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+3. Modifications or new contributions must be attributed in a copyright
+ notice identifying the author ("Contributor") and added below the
+ original copyright notice. The copyright notice is for purposes of
+ identifying contributors and should not be deemed as permission to alter
+ the permissions given by Adaptec.
+
+THIS README FILE IS PROVIDED BY ADAPTEC AND CONTRIBUTORS ``AS IS`` AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ANY
+WARRANTIES OF NON-INFRINGEMENT OR THE IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ADAPTEC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS README
+FILE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Documentation/scsi/aic7xxx.txt b/Documentation/scsi/aic7xxx.txt
deleted file mode 100644
index 7c5d0223d444..000000000000
--- a/Documentation/scsi/aic7xxx.txt
+++ /dev/null
@@ -1,394 +0,0 @@
-====================================================================
-= Adaptec Aic7xxx Fast -> Ultra160 Family Manager Set v7.0 =
-= README for =
-= The Linux Operating System =
-====================================================================
-
-The following information is available in this file:
-
- 1. Supported Hardware
- 2. Version History
- 3. Command Line Options
- 4. Contacting Adaptec
-
-1. Supported Hardware
-
- The following Adaptec SCSI Chips and Host Adapters are supported by
- the aic7xxx driver.
-
- Chip MIPS Host Bus MaxSync MaxWidth SCBs Notes
- ---------------------------------------------------------------
- aic7770 10 EISA/VL 10MHz 16Bit 4 1
- aic7850 10 PCI/32 10MHz 8Bit 3
- aic7855 10 PCI/32 10MHz 8Bit 3
- aic7856 10 PCI/32 10MHz 8Bit 3
- aic7859 10 PCI/32 20MHz 8Bit 3
- aic7860 10 PCI/32 20MHz 8Bit 3
- aic7870 10 PCI/32 10MHz 16Bit 16
- aic7880 10 PCI/32 20MHz 16Bit 16
- aic7890 20 PCI/32 40MHz 16Bit 16 3 4 5 6 7 8
- aic7891 20 PCI/64 40MHz 16Bit 16 3 4 5 6 7 8
- aic7892 20 PCI/64-66 80MHz 16Bit 16 3 4 5 6 7 8
- aic7895 15 PCI/32 20MHz 16Bit 16 2 3 4 5
- aic7895C 15 PCI/32 20MHz 16Bit 16 2 3 4 5 8
- aic7896 20 PCI/32 40MHz 16Bit 16 2 3 4 5 6 7 8
- aic7897 20 PCI/64 40MHz 16Bit 16 2 3 4 5 6 7 8
- aic7899 20 PCI/64-66 80MHz 16Bit 16 2 3 4 5 6 7 8
-
- 1. Multiplexed Twin Channel Device - One controller servicing two
- busses.
- 2. Multi-function Twin Channel Device - Two controllers on one chip.
- 3. Command Channel Secondary DMA Engine - Allows scatter gather list
- and SCB prefetch.
- 4. 64 Byte SCB Support - Allows disconnected, untagged request table
- for all possible target/lun combinations.
- 5. Block Move Instruction Support - Doubles the speed of certain
- sequencer operations.
- 6. `Bayonet' style Scatter Gather Engine - Improves S/G prefetch
- performance.
- 7. Queuing Registers - Allows queuing of new transactions without
- pausing the sequencer.
- 8. Multiple Target IDs - Allows the controller to respond to selection
- as a target on multiple SCSI IDs.
-
- Controller Chip Host-Bus Int-Connectors Ext-Connectors Notes
- --------------------------------------------------------------------------
- AHA-274X[A] aic7770 EISA SE-50M SE-HD50F
- AHA-274X[A]W aic7770 EISA SE-HD68F SE-HD68F
- SE-50M
- AHA-274X[A]T aic7770 EISA 2 X SE-50M SE-HD50F
- AHA-2842 aic7770 VL SE-50M SE-HD50F
- AHA-2940AU aic7860 PCI/32 SE-50M SE-HD50F
- AVA-2902I aic7860 PCI/32 SE-50M
- AVA-2902E aic7860 PCI/32 SE-50M
- AVA-2906 aic7856 PCI/32 SE-50M SE-DB25F
- APC-7850 aic7850 PCI/32 SE-50M 1
- AVA-2940 aic7860 PCI/32 SE-50M
- AHA-2920B aic7860 PCI/32 SE-50M
- AHA-2930B aic7860 PCI/32 SE-50M
- AHA-2920C aic7856 PCI/32 SE-50M SE-HD50F
- AHA-2930C aic7860 PCI/32 SE-50M
- AHA-2930C aic7860 PCI/32 SE-50M
- AHA-2910C aic7860 PCI/32 SE-50M
- AHA-2915C aic7860 PCI/32 SE-50M
- AHA-2940AU/CN aic7860 PCI/32 SE-50M SE-HD50F
- AHA-2944W aic7870 PCI/32 HVD-HD68F HVD-HD68F
- HVD-50M
- AHA-3940W aic7870 PCI/32 2 X SE-HD68F SE-HD68F 2
- AHA-2940UW aic7880 PCI/32 SE-HD68F
- SE-50M SE-HD68F
- AHA-2940U aic7880 PCI/32 SE-50M SE-HD50F
- AHA-2940D aic7880 PCI/32
- aHA-2940 A/T aic7880 PCI/32
- AHA-2940D A/T aic7880 PCI/32
- AHA-3940UW aic7880 PCI/32 2 X SE-HD68F SE-HD68F 3
- AHA-3940UWD aic7880 PCI/32 2 X SE-HD68F 2 X SE-VHD68F 3
- AHA-3940U aic7880 PCI/32 2 X SE-50M SE-HD50F 3
- AHA-2944UW aic7880 PCI/32 HVD-HD68F HVD-HD68F
- HVD-50M
- AHA-3944UWD aic7880 PCI/32 2 X HVD-HD68F 2 X HVD-VHD68F 3
- AHA-4944UW aic7880 PCI/32
- AHA-2930UW aic7880 PCI/32
- AHA-2940UW Pro aic7880 PCI/32 SE-HD68F SE-HD68F 4
- SE-50M
- AHA-2940UW/CN aic7880 PCI/32
- AHA-2940UDual aic7895 PCI/32
- AHA-2940UWDual aic7895 PCI/32
- AHA-3940UWD aic7895 PCI/32
- AHA-3940AUW aic7895 PCI/32
- AHA-3940AUWD aic7895 PCI/32
- AHA-3940AU aic7895 PCI/32
- AHA-3944AUWD aic7895 PCI/32 2 X HVD-HD68F 2 X HVD-VHD68F
- AHA-2940U2B aic7890 PCI/32 LVD-HD68F LVD-HD68F
- AHA-2940U2 OEM aic7891 PCI/64
- AHA-2940U2W aic7890 PCI/32 LVD-HD68F LVD-HD68F
- SE-HD68F
- SE-50M
- AHA-2950U2B aic7891 PCI/64 LVD-HD68F LVD-HD68F
- AHA-2930U2 aic7890 PCI/32 LVD-HD68F SE-HD50F
- SE-50M
- AHA-3950U2B aic7897 PCI/64
- AHA-3950U2D aic7897 PCI/64
- AHA-29160 aic7892 PCI/64-66
- AHA-29160 CPQ aic7892 PCI/64-66
- AHA-29160N aic7892 PCI/32 LVD-HD68F SE-HD50F
- SE-50M
- AHA-29160LP aic7892 PCI/64-66
- AHA-19160 aic7892 PCI/64-66
- AHA-29150LP aic7892 PCI/64-66
- AHA-29130LP aic7892 PCI/64-66
- AHA-3960D aic7899 PCI/64-66 2 X LVD-HD68F 2 X LVD-VHD68F
- LVD-50M
- AHA-3960D CPQ aic7899 PCI/64-66 2 X LVD-HD68F 2 X LVD-VHD68F
- LVD-50M
- AHA-39160 aic7899 PCI/64-66 2 X LVD-HD68F 2 X LVD-VHD68F
- LVD-50M
-
- 1. No BIOS support
- 2. DEC21050 PCI-PCI bridge with multiple controller chips on secondary bus
- 3. DEC2115X PCI-PCI bridge with multiple controller chips on secondary bus
- 4. All three SCSI connectors may be used simultaneously without
- SCSI "stub" effects.
-
-2. Version History
- 7.0 (4th August, 2005)
- - Updated driver to use SCSI transport class infrastructure
- - Upported sequencer and core fixes from last adaptec released
- version of the driver.
- 6.2.36 (June 3rd, 2003)
- - Correct code that disables PCI parity error checking.
- - Correct and simplify handling of the ignore wide residue
- message. The previous code would fail to report a residual
- if the transaction data length was even and we received
- an IWR message.
- - Add support for the 2.5.X EISA framework.
- - Update for change in 2.5.X SCSI proc FS interface.
- - Correct Domain Validation command-line option parsing.
- - When negotiation async via an 8bit WDTR message, send
- an SDTR with an offset of 0 to be sure the target
- knows we are async. This works around a firmware defect
- in the Quantum Atlas 10K.
- - Clear PCI error state during driver attach so that we
- don't disable memory mapped I/O due to a stray write
- by some other driver probe that occurred before we
- claimed the controller.
-
- 6.2.35 (May 14th, 2003)
- - Fix a few GCC 3.3 compiler warnings.
- - Correct operation on EISA Twin Channel controller.
- - Add support for 2.5.X's scsi_report_device_reset().
-
- 6.2.34 (May 5th, 2003)
- - Fix locking regression introduced in 6.2.29 that
- could cause a lock order reversal between the io_request_lock
- and our per-softc lock. This was only possible on RH9,
- SuSE, and kernel.org 2.4.X kernels.
-
- 6.2.33 (April 30th, 2003)
- - Dynamically disable PCI parity error reporting after
- 10 errors are reported to the user. These errors are
- the result of some other device issuing PCI transactions
- with bad parity. Once the user has been informed of the
- problem, continuing to report the errors just degrades
- our performance.
-
- 6.2.32 (March 28th, 2003)
- - Dynamically sized S/G lists to avoid SCSI malloc
- pool fragmentation and SCSI mid-layer deadlock.
-
- 6.2.28 (January 20th, 2003)
- - Domain Validation Fixes
- - Add ability to disable PCI parity error checking.
- - Enhanced Memory Mapped I/O probe
-
- 6.2.20 (November 7th, 2002)
- - Added Domain Validation.
-
-3. Command Line Options
-
- WARNING: ALTERING OR ADDING THESE DRIVER PARAMETERS
- INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
- USE THEM WITH CAUTION.
-
- Put a .conf file in the /etc/modprobe.d directory and add/edit a
- line containing 'options aic7xxx aic7xxx=[command[,command...]]' where
- 'command' is one or more of the following:
- -----------------------------------------------------------------
- Option: verbose
- Definition: enable additional informative messages during
- driver operation.
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: debug:[value]
- Definition: Enables various levels of debugging information
- Possible Values: 0x0000 = no debugging, 0xffff = full debugging
- Default Value: 0x0000
- -----------------------------------------------------------------
- Option: no_probe
- Option: probe_eisa_vl
- Definition: Do not probe for EISA/VLB controllers.
- This is a toggle. If the driver is compiled
- to not probe EISA/VLB controllers by default,
- specifying "no_probe" will enable this probing.
- If the driver is compiled to probe EISA/VLB
- controllers by default, specifying "no_probe"
- will disable this probing.
- Possible Values: This option is a toggle
- Default Value: EISA/VLB probing is disabled by default.
- -----------------------------------------------------------------
- Option: pci_parity
- Definition: Toggles the detection of PCI parity errors.
- On many motherboards with VIA chipsets,
- PCI parity is not generated correctly on the
- PCI bus. It is impossible for the hardware to
- differentiate between these "spurious" parity
- errors and real parity errors. The symptom of
- this problem is a stream of the message:
- "scsi0: Data Parity Error Detected during address or write data phase"
- output by the driver.
- Possible Values: This option is a toggle
- Default Value: PCI Parity Error reporting is disabled
- -----------------------------------------------------------------
- Option: no_reset
- Definition: Do not reset the bus during the initial probe
- phase
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: extended
- Definition: Force extended translation on the controller
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: periodic_otag
- Definition: Send an ordered tag periodically to prevent
- tag starvation. Needed for some older devices
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: reverse_scan
- Definition: Probe the scsi bus in reverse order, starting
- with target 15
- Possible Values: This option is a flag
- Default Value: disabled
- -----------------------------------------------------------------
- Option: global_tag_depth:[value]
- Definition: Global tag depth for all targets on all busses.
- This option sets the default tag depth which
- may be selectively overridden vi the tag_info
- option.
- Possible Values: 1 - 253
- Default Value: 32
- -----------------------------------------------------------------
- Option: tag_info:{{value[,value...]}[,{value[,value...]}...]}
- Definition: Set the per-target tagged queue depth on a
- per controller basis. Both controllers and targets
- may be omitted indicating that they should retain
- the default tag depth.
- Examples: tag_info:{{16,32,32,64,8,8,,32,32,32,32,32,32,32,32,32}
- On Controller 0
- specifies a tag depth of 16 for target 0
- specifies a tag depth of 64 for target 3
- specifies a tag depth of 8 for targets 4 and 5
- leaves target 6 at the default
- specifies a tag depth of 32 for targets 1,2,7-15
- All other targets retain the default depth.
-
- tag_info:{{},{32,,32}}
- On Controller 1
- specifies a tag depth of 32 for targets 0 and 2
- All other targets retain the default depth.
-
- Possible Values: 1 - 253
- Default Value: 32
- -----------------------------------------------------------------
- Option: seltime:[value]
- Definition: Specifies the selection timeout value
- Possible Values: 0 = 256ms, 1 = 128ms, 2 = 64ms, 3 = 32ms
- Default Value: 0
- -----------------------------------------------------------------
- Option: dv: {value[,value...]}
- Definition: Set Domain Validation Policy on a per-controller basis.
- Controllers may be omitted indicating that
- they should retain the default read streaming setting.
- Example: dv:{-1,0,,1,1,0}
- On Controller 0 leave DV at its default setting.
- On Controller 1 disable DV.
- Skip configuration on Controller 2.
- On Controllers 3 and 4 enable DV.
- On Controller 5 disable DV.
-
- Possible Values: < 0 Use setting from serial EEPROM.
- 0 Disable DV
- > 0 Enable DV
-
- Default Value: SCSI-Select setting on controllers with a SCSI Select
- option for DV. Otherwise, on for controllers supporting
- U160 speeds and off for all other controller types.
- -----------------------------------------------------------------
-
- Example:
- 'options aic7xxx aic7xxx=verbose,no_probe,tag_info:{{},{,,10}},seltime:1'
- enables verbose logging, Disable EISA/VLB probing,
- and set tag depth on Controller 1/Target 2 to 10 tags.
-
-4. Adaptec Customer Support
-
- A Technical Support Identification (TSID) Number is required for
- Adaptec technical support.
- - The 12-digit TSID can be found on the white barcode-type label
- included inside the box with your product. The TSID helps us
- provide more efficient service by accurately identifying your
- product and support status.
-
- Support Options
- - Search the Adaptec Support Knowledgebase (ASK) at
- http://ask.adaptec.com for articles, troubleshooting tips, and
- frequently asked questions about your product.
- - For support via Email, submit your question to Adaptec's
- Technical Support Specialists at http://ask.adaptec.com/.
-
- North America
- - Visit our Web site at http://www.adaptec.com/.
- - For information about Adaptec's support options, call
- 408-957-2550, 24 hours a day, 7 days a week.
- - To speak with a Technical Support Specialist,
- * For hardware products, call 408-934-7274,
- Monday to Friday, 3:00 am to 5:00 pm, PDT.
- * For RAID and Fibre Channel products, call 321-207-2000,
- Monday to Friday, 3:00 am to 5:00 pm, PDT.
- To expedite your service, have your computer with you.
- - To order Adaptec products, including accessories and cables,
- call 408-957-7274. To order cables online go to
- http://www.adaptec.com/buy-cables/.
-
- Europe
- - Visit our Web site at http://www.adaptec.com/en-US/_common/world_index.
- - To speak with a Technical Support Specialist, call, or email,
- * German: +49 89 4366 5522, Monday-Friday, 9:00-17:00 CET,
- http://ask-de.adaptec.com/.
- * French: +49 89 4366 5533, Monday-Friday, 9:00-17:00 CET,
- http://ask-fr.adaptec.com/.
- * English: +49 89 4366 5544, Monday-Friday, 9:00-17:00 GMT,
- http://ask.adaptec.com/.
- - You can order Adaptec cables online at
- http://www.adaptec.com/buy-cables/.
-
- Japan
- - Visit our web site at http://www.adaptec.co.jp/.
- - To speak with a Technical Support Specialist, call
- +81 3 5308 6120, Monday-Friday, 9:00 a.m. to 12:00 p.m.,
- 1:00 p.m. to 6:00 p.m.
-
--------------------------------------------------------------------
-/*
- * Copyright (c) 2003 Adaptec Inc. 691 S. Milpitas Blvd., Milpitas CA 95035 USA.
- * All rights reserved.
- *
- * You are permitted to redistribute, use and modify this README file in whole
- * or in part in conjunction with redistribution of software governed by the
- * General Public License, provided that the following conditions are met:
- * 1. Redistributions of README file must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- * 3. Modifications or new contributions must be attributed in a copyright
- * notice identifying the author ("Contributor") and added below the
- * original copyright notice. The copyright notice is for purposes of
- * identifying contributors and should not be deemed as permission to alter
- * the permissions given by Adaptec.
- *
- * THIS README FILE IS PROVIDED BY ADAPTEC AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ANY
- * WARRANTIES OF NON-INFRINGEMENT OR THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
- * ADAPTEC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS README
- * FILE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
diff --git a/Documentation/scsi/arcmsr_spec.rst b/Documentation/scsi/arcmsr_spec.rst
new file mode 100644
index 000000000000..83dd53bcff78
--- /dev/null
+++ b/Documentation/scsi/arcmsr_spec.rst
@@ -0,0 +1,907 @@
+ARECA FIRMWARE SPEC
+===================
+
+Usage of IOP331 adapter
+=======================
+
+(All In/Out is in IOP331's view)
+
+1. Message 0
+------------
+
+- InitThread message and return code
+
+2. Doorbell is used for RS-232 emulation
+----------------------------------------
+
+inDoorBell
+ bit0
+ data in ready
+ zDRIVER DATA WRITE OK)
+ bit1
+ data out has been read
+ (DRIVER DATA READ OK)
+
+outDooeBell:
+ bit0
+ data out ready
+ (IOP331 DATA WRITE OK)
+ bit1
+ data in has been read
+ (IOP331 DATA READ OK)
+
+3. Index Memory Usage
+---------------------
+
+============ ==========================================
+offset 0xf00 for RS232 out (request buffer)
+offset 0xe00 for RS232 in (scratch buffer)
+offset 0xa00 for inbound message code message_rwbuffer
+ (driver send to IOP331)
+offset 0xa00 for outbound message code message_rwbuffer
+ (IOP331 send to driver)
+============ ==========================================
+
+4. RS-232 emulation
+-------------------
+
+Currently 128 byte buffer is used:
+
+============ =====================
+1st uint32_t Data length (1--124)
+Byte 4--127 Max 124 bytes of data
+============ =====================
+
+5. PostQ
+--------
+
+All SCSI Command must be sent through postQ:
+
+ (inbound queue port)
+ Request frame must be 32 bytes aligned:
+
+ #bit27--bit31
+ flag for post ccb
+ #bit0--bit26
+ real address (bit27--bit31) of post arcmsr_cdb
+
+ ===== ===================
+ bit31 == ===============
+ 0 256 bytes frame
+ 1 512 bytes frame
+ == ===============
+ bit30 == ==============
+ 0 normal request
+ 1 BIOS request
+ == ==============
+ bit29 reserved
+ bit28 reserved
+ bit27 reserved
+ ===== ===================
+
+ (outbount queue port)
+ Request reply:
+
+ #bit27--bit31
+ flag for reply
+ #bit0--bit26
+ real address (bit27--bit31) of reply arcmsr_cdb
+
+ ===== =======================================================
+ bit31 must be 0 (for this type of reply)
+ bit30 reserved for BIOS handshake
+ bit29 reserved
+ bit28 == ===================================================
+ 0 no error, ignore AdapStatus/DevStatus/SenseData
+ 1 Error, error code in AdapStatus/DevStatus/SenseData
+ == ===================================================
+ bit27 reserved
+ ===== =======================================================
+
+6. BIOS request
+---------------
+
+All BIOS request is the same with request from PostQ
+
+Except:
+
+Request frame is sent from configuration space:
+
+ ============ ==========================
+ offset: 0x78 Request Frame (bit30 == 1)
+ offset: 0x18 writeonly to generate
+ IRQ to IOP331
+ ============ ==========================
+
+Completion of request::
+
+ (bit30 == 0, bit28==err flag)
+
+7. Definition of SGL entry (structure)
+--------------------------------------
+
+8. Message1 Out - Diag Status Code (????)
+-----------------------------------------
+
+9. Message0 message code
+------------------------
+
+====== =================================================================
+0x00 NOP
+0x01 Get Config
+ ->offset 0xa00 :for outbound message code message_rwbuffer
+ (IOP331 send to driver)
+
+ ===================== ==========================================
+ Signature 0x87974060(4)
+ Request len 0x00000200(4)
+ numbers of queue 0x00000100(4)
+ SDRAM Size 0x00000100(4)-->256 MB
+ IDE Channels 0x00000008(4)
+ vendor 40 bytes char
+ model 8 bytes char
+ FirmVer 16 bytes char
+ Device Map 16 bytes char
+ FirmwareVersion DWORD
+
+ - Added for checking of
+ new firmware capability
+ ===================== ==========================================
+0x02 Set Config
+ ->offset 0xa00 :for inbound message code message_rwbuffer
+ (driver send to IOP331)
+
+ ========================= ==================
+ Signature 0x87974063(4)
+ UPPER32 of Request Frame (4)-->Driver Only
+ ========================= ==================
+0x03 Reset (Abort all queued Command)
+0x04 Stop Background Activity
+0x05 Flush Cache
+0x06 Start Background Activity
+ (re-start if background is halted)
+0x07 Check If Host Command Pending
+ (Novell May Need This Function)
+0x08 Set controller time
+ ->offset 0xa00 for inbound message code message_rwbuffer
+ (driver to IOP331)
+
+ ====== ==================
+ byte 0 0xaa <-- signature
+ byte 1 0x55 <-- signature
+ byte 2 year (04)
+ byte 3 month (1..12)
+ byte 4 date (1..31)
+ byte 5 hour (0..23)
+ byte 6 minute (0..59)
+ byte 7 second (0..59)
+ ====== ==================
+====== =================================================================
+
+
+RS-232 Interface for Areca Raid Controller
+==========================================
+
+ The low level command interface is exclusive with VT100 terminal
+
+1. Sequence of command execution
+--------------------------------
+
+ (A) Header
+ 3 bytes sequence (0x5E, 0x01, 0x61)
+
+ (B) Command block
+ variable length of data including length,
+ command code, data and checksum byte
+
+ (C) Return data
+ variable length of data
+
+2. Command block
+----------------
+
+ (A) 1st byte
+ command block length (low byte)
+
+ (B) 2nd byte
+ command block length (high byte)
+
+ .. Note:: command block length shouldn't > 2040 bytes,
+ length excludes these two bytes
+
+ (C) 3rd byte
+ command code
+
+ (D) 4th and following bytes
+ variable length data bytes
+
+ depends on command code
+
+ (E) last byte
+ checksum byte (sum of 1st byte until last data byte)
+
+3. Command code and associated data
+-----------------------------------
+
+The following are command code defined in raid controller Command
+code 0x10--0x1? are used for system level management,
+no password checking is needed and should be implemented in separate
+well controlled utility and not for end user access.
+Command code 0x20--0x?? always check the password,
+password must be entered to enable these command::
+
+ enum
+ {
+ GUI_SET_SERIAL=0x10,
+ GUI_SET_VENDOR,
+ GUI_SET_MODEL,
+ GUI_IDENTIFY,
+ GUI_CHECK_PASSWORD,
+ GUI_LOGOUT,
+ GUI_HTTP,
+ GUI_SET_ETHERNET_ADDR,
+ GUI_SET_LOGO,
+ GUI_POLL_EVENT,
+ GUI_GET_EVENT,
+ GUI_GET_HW_MONITOR,
+ // GUI_QUICK_CREATE=0x20, (function removed)
+ GUI_GET_INFO_R=0x20,
+ GUI_GET_INFO_V,
+ GUI_GET_INFO_P,
+ GUI_GET_INFO_S,
+ GUI_CLEAR_EVENT,
+ GUI_MUTE_BEEPER=0x30,
+ GUI_BEEPER_SETTING,
+ GUI_SET_PASSWORD,
+ GUI_HOST_INTERFACE_MODE,
+ GUI_REBUILD_PRIORITY,
+ GUI_MAX_ATA_MODE,
+ GUI_RESET_CONTROLLER,
+ GUI_COM_PORT_SETTING,
+ GUI_NO_OPERATION,
+ GUI_DHCP_IP,
+ GUI_CREATE_PASS_THROUGH=0x40,
+ GUI_MODIFY_PASS_THROUGH,
+ GUI_DELETE_PASS_THROUGH,
+ GUI_IDENTIFY_DEVICE,
+ GUI_CREATE_RAIDSET=0x50,
+ GUI_DELETE_RAIDSET,
+ GUI_EXPAND_RAIDSET,
+ GUI_ACTIVATE_RAIDSET,
+ GUI_CREATE_HOT_SPARE,
+ GUI_DELETE_HOT_SPARE,
+ GUI_CREATE_VOLUME=0x60,
+ GUI_MODIFY_VOLUME,
+ GUI_DELETE_VOLUME,
+ GUI_START_CHECK_VOLUME,
+ GUI_STOP_CHECK_VOLUME
+ };
+
+Command description
+^^^^^^^^^^^^^^^^^^^
+
+GUI_SET_SERIAL
+ Set the controller serial#
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x10
+ byte 3 password length (should be 0x0f)
+ byte 4-0x13 should be "ArEcATecHnoLogY"
+ byte 0x14--0x23 Serial number string (must be 16 bytes)
+ ================ =============================================
+
+GUI_SET_VENDOR
+ Set vendor string for the controller
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x11
+ byte 3 password length (should be 0x08)
+ byte 4-0x13 should be "ArEcAvAr"
+ byte 0x14--0x3B vendor string (must be 40 bytes)
+ ================ =============================================
+
+GUI_SET_MODEL
+ Set the model name of the controller
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x12
+ byte 3 password length (should be 0x08)
+ byte 4-0x13 should be "ArEcAvAr"
+ byte 0x14--0x1B model string (must be 8 bytes)
+ ================ =============================================
+
+GUI_IDENTIFY
+ Identify device
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x13
+ return "Areca RAID Subsystem "
+ ================ =============================================
+
+GUI_CHECK_PASSWORD
+ Verify password
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x14
+ byte 3 password length
+ byte 4-0x?? user password to be checked
+ ================ =============================================
+
+GUI_LOGOUT
+ Logout GUI (force password checking on next command)
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x15
+ ================ =============================================
+
+GUI_HTTP
+ HTTP interface (reserved for Http proxy service)(0x16)
+
+GUI_SET_ETHERNET_ADDR
+ Set the ethernet MAC address
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x17
+ byte 3 password length (should be 0x08)
+ byte 4-0x13 should be "ArEcAvAr"
+ byte 0x14--0x19 Ethernet MAC address (must be 6 bytes)
+ ================ =============================================
+
+GUI_SET_LOGO
+ Set logo in HTTP
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x18
+ byte 3 Page# (0/1/2/3) (0xff --> clear OEM logo)
+ byte 4/5/6/7 0x55/0xaa/0xa5/0x5a
+ byte 8 TITLE.JPG data (each page must be 2000 bytes)
+
+ .. Note:: page0 1st 2 byte must be
+ actual length of the JPG file
+ ================ =============================================
+
+GUI_POLL_EVENT
+ Poll If Event Log Changed
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x19
+ ================ =============================================
+
+GUI_GET_EVENT
+ Read Event
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x1a
+ byte 3 Event Page (0:1st page/1/2/3:last page)
+ ================ =============================================
+
+GUI_GET_HW_MONITOR
+ Get HW monitor data
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x1b
+ byte 3 # of FANs(example 2)
+ byte 4 # of Voltage sensor(example 3)
+ byte 5 # of temperature sensor(example 2)
+ byte 6 # of power
+ byte 7/8 Fan#0 (RPM)
+ byte 9/10 Fan#1
+ byte 11/12 Voltage#0 original value in ``*1000``
+ byte 13/14 Voltage#0 value
+ byte 15/16 Voltage#1 org
+ byte 17/18 Voltage#1
+ byte 19/20 Voltage#2 org
+ byte 21/22 Voltage#2
+ byte 23 Temp#0
+ byte 24 Temp#1
+ byte 25 Power indicator (bit0 power#0,
+ bit1 power#1)
+ byte 26 UPS indicator
+ ================ =============================================
+
+GUI_QUICK_CREATE
+ Quick create raid/volume set
+
+ ================ ==============================================
+ byte 0,1 length
+ byte 2 command code 0x20
+ byte 3/4/5/6 raw capacity
+ byte 7 raid level
+ byte 8 stripe size
+ byte 9 spare
+ byte 10/11/12/13 device mask (the devices to create raid/volume)
+ ================ ==============================================
+
+ This function is removed, application like
+ to implement quick create function
+
+ need to use GUI_CREATE_RAIDSET and GUI_CREATE_VOLUMESET function.
+
+GUI_GET_INFO_R
+ Get Raid Set Information
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x20
+ byte 3 raidset#
+ ================ =============================================
+
+ ::
+
+ typedef struct sGUI_RAIDSET
+ {
+ BYTE grsRaidSetName[16];
+ DWORD grsCapacity;
+ DWORD grsCapacityX;
+ DWORD grsFailMask;
+ BYTE grsDevArray[32];
+ BYTE grsMemberDevices;
+ BYTE grsNewMemberDevices;
+ BYTE grsRaidState;
+ BYTE grsVolumes;
+ BYTE grsVolumeList[16];
+ BYTE grsRes1;
+ BYTE grsRes2;
+ BYTE grsRes3;
+ BYTE grsFreeSegments;
+ DWORD grsRawStripes[8];
+ DWORD grsRes4;
+ DWORD grsRes5; // Total to 128 bytes
+ DWORD grsRes6; // Total to 128 bytes
+ } sGUI_RAIDSET, *pGUI_RAIDSET;
+
+GUI_GET_INFO_V
+ Get Volume Set Information
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x21
+ byte 3 volumeset#
+ ================ =============================================
+
+ ::
+
+ typedef struct sGUI_VOLUMESET
+ {
+ BYTE gvsVolumeName[16]; // 16
+ DWORD gvsCapacity;
+ DWORD gvsCapacityX;
+ DWORD gvsFailMask;
+ DWORD gvsStripeSize;
+ DWORD gvsNewFailMask;
+ DWORD gvsNewStripeSize;
+ DWORD gvsVolumeStatus;
+ DWORD gvsProgress; // 32
+ sSCSI_ATTR gvsScsi;
+ BYTE gvsMemberDisks;
+ BYTE gvsRaidLevel; // 8
+ BYTE gvsNewMemberDisks;
+ BYTE gvsNewRaidLevel;
+ BYTE gvsRaidSetNumber;
+ BYTE gvsRes0; // 4
+ BYTE gvsRes1[4]; // 64 bytes
+ } sGUI_VOLUMESET, *pGUI_VOLUMESET;
+
+GUI_GET_INFO_P
+ Get Physical Drive Information
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x22
+ byte 3 drive # (from 0 to max-channels - 1)
+ ================ =============================================
+
+ ::
+
+ typedef struct sGUI_PHY_DRV
+ {
+ BYTE gpdModelName[40];
+ BYTE gpdSerialNumber[20];
+ BYTE gpdFirmRev[8];
+ DWORD gpdCapacity;
+ DWORD gpdCapacityX; // Reserved for expansion
+ BYTE gpdDeviceState;
+ BYTE gpdPioMode;
+ BYTE gpdCurrentUdmaMode;
+ BYTE gpdUdmaMode;
+ BYTE gpdDriveSelect;
+ BYTE gpdRaidNumber; // 0xff if not belongs to a raid set
+ sSCSI_ATTR gpdScsi;
+ BYTE gpdReserved[40]; // Total to 128 bytes
+ } sGUI_PHY_DRV, *pGUI_PHY_DRV;
+
+GUI_GET_INFO_S
+ Get System Information
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x23
+ ================ =============================================
+
+ ::
+
+ typedef struct sCOM_ATTR
+ {
+ BYTE comBaudRate;
+ BYTE comDataBits;
+ BYTE comStopBits;
+ BYTE comParity;
+ BYTE comFlowControl;
+ } sCOM_ATTR, *pCOM_ATTR;
+ typedef struct sSYSTEM_INFO
+ {
+ BYTE gsiVendorName[40];
+ BYTE gsiSerialNumber[16];
+ BYTE gsiFirmVersion[16];
+ BYTE gsiBootVersion[16];
+ BYTE gsiMbVersion[16];
+ BYTE gsiModelName[8];
+ BYTE gsiLocalIp[4];
+ BYTE gsiCurrentIp[4];
+ DWORD gsiTimeTick;
+ DWORD gsiCpuSpeed;
+ DWORD gsiICache;
+ DWORD gsiDCache;
+ DWORD gsiScache;
+ DWORD gsiMemorySize;
+ DWORD gsiMemorySpeed;
+ DWORD gsiEvents;
+ BYTE gsiMacAddress[6];
+ BYTE gsiDhcp;
+ BYTE gsiBeeper;
+ BYTE gsiChannelUsage;
+ BYTE gsiMaxAtaMode;
+ BYTE gsiSdramEcc; // 1:if ECC enabled
+ BYTE gsiRebuildPriority;
+ sCOM_ATTR gsiComA; // 5 bytes
+ sCOM_ATTR gsiComB; // 5 bytes
+ BYTE gsiIdeChannels;
+ BYTE gsiScsiHostChannels;
+ BYTE gsiIdeHostChannels;
+ BYTE gsiMaxVolumeSet;
+ BYTE gsiMaxRaidSet;
+ BYTE gsiEtherPort; // 1:if ether net port supported
+ BYTE gsiRaid6Engine; // 1:Raid6 engine supported
+ BYTE gsiRes[75];
+ } sSYSTEM_INFO, *pSYSTEM_INFO;
+
+GUI_CLEAR_EVENT
+ Clear System Event
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x24
+ ================ =============================================
+
+GUI_MUTE_BEEPER
+ Mute current beeper
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x30
+ ================ =============================================
+GUI_BEEPER_SETTING
+ Disable beeper
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x31
+ byte 3 0->disable, 1->enable
+ ================ =============================================
+
+GUI_SET_PASSWORD
+ Change password
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x32
+ byte 3 pass word length ( must <= 15 )
+ byte 4 password (must be alpha-numerical)
+ ================ =============================================
+
+GUI_HOST_INTERFACE_MODE
+ Set host interface mode
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x33
+ byte 3 0->Independent, 1->cluster
+ ================ =============================================
+
+GUI_REBUILD_PRIORITY
+ Set rebuild priority
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x34
+ byte 3 0/1/2/3 (low->high)
+ ================ =============================================
+
+GUI_MAX_ATA_MODE
+ Set maximum ATA mode to be used
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x35
+ byte 3 0/1/2/3 (133/100/66/33)
+ ================ =============================================
+
+GUI_RESET_CONTROLLER
+ Reset Controller
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x36
+ * Response with VT100 screen (discard it)
+ ================ =============================================
+
+GUI_COM_PORT_SETTING
+ COM port setting
+
+ ================ =================================================
+ byte 0,1 length
+ byte 2 command code 0x37
+ byte 3 0->COMA (term port),
+ 1->COMB (debug port)
+ byte 4 0/1/2/3/4/5/6/7
+ (1200/2400/4800/9600/19200/38400/57600/115200)
+ byte 5 data bit
+ (0:7 bit, 1:8 bit must be 8 bit)
+ byte 6 stop bit (0:1, 1:2 stop bits)
+ byte 7 parity (0:none, 1:off, 2:even)
+ byte 8 flow control
+ (0:none, 1:xon/xoff, 2:hardware => must use none)
+ ================ =================================================
+
+GUI_NO_OPERATION
+ No operation
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x38
+ ================ =============================================
+
+GUI_DHCP_IP
+ Set DHCP option and local IP address
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x39
+ byte 3 0:dhcp disabled, 1:dhcp enabled
+ byte 4/5/6/7 IP address
+ ================ =============================================
+
+GUI_CREATE_PASS_THROUGH
+ Create pass through disk
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x40
+ byte 3 device #
+ byte 4 scsi channel (0/1)
+ byte 5 scsi id (0-->15)
+ byte 6 scsi lun (0-->7)
+ byte 7 tagged queue (1 enabled)
+ byte 8 cache mode (1 enabled)
+ byte 9 max speed (0/1/2/3/4,
+ async/20/40/80/160 for scsi)
+ (0/1/2/3/4, 33/66/100/133/150 for ide )
+ ================ =============================================
+
+GUI_MODIFY_PASS_THROUGH
+ Modify pass through disk
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x41
+ byte 3 device #
+ byte 4 scsi channel (0/1)
+ byte 5 scsi id (0-->15)
+ byte 6 scsi lun (0-->7)
+ byte 7 tagged queue (1 enabled)
+ byte 8 cache mode (1 enabled)
+ byte 9 max speed (0/1/2/3/4,
+ async/20/40/80/160 for scsi)
+ (0/1/2/3/4, 33/66/100/133/150 for ide )
+ ================ =============================================
+
+GUI_DELETE_PASS_THROUGH
+ Delete pass through disk
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x42
+ byte 3 device# to be deleted
+ ================ =============================================
+GUI_IDENTIFY_DEVICE
+ Identify Device
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x43
+ byte 3 Flash Method
+ (0:flash selected, 1:flash not selected)
+ byte 4/5/6/7 IDE device mask to be flashed
+ .. Note:: no response data available
+ ================ =============================================
+
+GUI_CREATE_RAIDSET
+ Create Raid Set
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x50
+ byte 3/4/5/6 device mask
+ byte 7-22 raidset name (if byte 7 == 0:use default)
+ ================ =============================================
+
+GUI_DELETE_RAIDSET
+ Delete Raid Set
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x51
+ byte 3 raidset#
+ ================ =============================================
+
+GUI_EXPAND_RAIDSET
+ Expand Raid Set
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x52
+ byte 3 raidset#
+ byte 4/5/6/7 device mask for expansion
+ byte 8/9/10 (8:0 no change, 1 change, 0xff:terminate,
+ 9:new raid level,
+ 10:new stripe size
+ 0/1/2/3/4/5->4/8/16/32/64/128K )
+ byte 11/12/13 repeat for each volume in the raidset
+ ================ =============================================
+
+GUI_ACTIVATE_RAIDSET
+ Activate incomplete raid set
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x53
+ byte 3 raidset#
+ ================ =============================================
+
+GUI_CREATE_HOT_SPARE
+ Create hot spare disk
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x54
+ byte 3/4/5/6 device mask for hot spare creation
+ ================ =============================================
+
+GUI_DELETE_HOT_SPARE
+ Delete hot spare disk
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x55
+ byte 3/4/5/6 device mask for hot spare deletion
+ ================ =============================================
+
+GUI_CREATE_VOLUME
+ Create volume set
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x60
+ byte 3 raidset#
+ byte 4-19 volume set name
+ (if byte4 == 0, use default)
+ byte 20-27 volume capacity (blocks)
+ byte 28 raid level
+ byte 29 stripe size
+ (0/1/2/3/4/5->4/8/16/32/64/128K)
+ byte 30 channel
+ byte 31 ID
+ byte 32 LUN
+ byte 33 1 enable tag
+ byte 34 1 enable cache
+ byte 35 speed
+ (0/1/2/3/4->async/20/40/80/160 for scsi)
+ (0/1/2/3/4->33/66/100/133/150 for IDE )
+ byte 36 1 to select quick init
+ ================ =============================================
+
+GUI_MODIFY_VOLUME
+ Modify volume Set
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x61
+ byte 3 volumeset#
+ byte 4-19 new volume set name
+ (if byte4 == 0, not change)
+ byte 20-27 new volume capacity (reserved)
+ byte 28 new raid level
+ byte 29 new stripe size
+ (0/1/2/3/4/5->4/8/16/32/64/128K)
+ byte 30 new channel
+ byte 31 new ID
+ byte 32 new LUN
+ byte 33 1 enable tag
+ byte 34 1 enable cache
+ byte 35 speed
+ (0/1/2/3/4->async/20/40/80/160 for scsi)
+ (0/1/2/3/4->33/66/100/133/150 for IDE )
+ ================ =============================================
+
+GUI_DELETE_VOLUME
+ Delete volume set
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x62
+ byte 3 volumeset#
+ ================ =============================================
+
+GUI_START_CHECK_VOLUME
+ Start volume consistency check
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x63
+ byte 3 volumeset#
+ ================ =============================================
+
+GUI_STOP_CHECK_VOLUME
+ Stop volume consistency check
+
+ ================ =============================================
+ byte 0,1 length
+ byte 2 command code 0x64
+ ================ =============================================
+
+4. Returned data
+----------------
+
+(A) Header
+ 3 bytes sequence (0x5E, 0x01, 0x61)
+(B) Length
+ 2 bytes
+ (low byte 1st, excludes length and checksum byte)
+(C)
+ status or data:
+
+ 1) If length == 1 ==> 1 byte status code::
+
+ #define GUI_OK 0x41
+ #define GUI_RAIDSET_NOT_NORMAL 0x42
+ #define GUI_VOLUMESET_NOT_NORMAL 0x43
+ #define GUI_NO_RAIDSET 0x44
+ #define GUI_NO_VOLUMESET 0x45
+ #define GUI_NO_PHYSICAL_DRIVE 0x46
+ #define GUI_PARAMETER_ERROR 0x47
+ #define GUI_UNSUPPORTED_COMMAND 0x48
+ #define GUI_DISK_CONFIG_CHANGED 0x49
+ #define GUI_INVALID_PASSWORD 0x4a
+ #define GUI_NO_DISK_SPACE 0x4b
+ #define GUI_CHECKSUM_ERROR 0x4c
+ #define GUI_PASSWORD_REQUIRED 0x4d
+
+ 2) If length > 1:
+
+ data block returned from controller
+ and the contents depends on the command code
+
+(E) Checksum
+ checksum of length and status or data byte
+
diff --git a/Documentation/scsi/arcmsr_spec.txt b/Documentation/scsi/arcmsr_spec.txt
deleted file mode 100644
index 45d9482c1517..000000000000
--- a/Documentation/scsi/arcmsr_spec.txt
+++ /dev/null
@@ -1,574 +0,0 @@
-*******************************************************************************
-** ARECA FIRMWARE SPEC
-*******************************************************************************
-** Usage of IOP331 adapter
-** (All In/Out is in IOP331's view)
-** 1. Message 0 --> InitThread message and return code
-** 2. Doorbell is used for RS-232 emulation
-** inDoorBell : bit0 -- data in ready
-** (DRIVER DATA WRITE OK)
-** bit1 -- data out has been read
-** (DRIVER DATA READ OK)
-** outDooeBell: bit0 -- data out ready
-** (IOP331 DATA WRITE OK)
-** bit1 -- data in has been read
-** (IOP331 DATA READ OK)
-** 3. Index Memory Usage
-** offset 0xf00 : for RS232 out (request buffer)
-** offset 0xe00 : for RS232 in (scratch buffer)
-** offset 0xa00 : for inbound message code message_rwbuffer
-** (driver send to IOP331)
-** offset 0xa00 : for outbound message code message_rwbuffer
-** (IOP331 send to driver)
-** 4. RS-232 emulation
-** Currently 128 byte buffer is used
-** 1st uint32_t : Data length (1--124)
-** Byte 4--127 : Max 124 bytes of data
-** 5. PostQ
-** All SCSI Command must be sent through postQ:
-** (inbound queue port) Request frame must be 32 bytes aligned
-** #bit27--bit31 => flag for post ccb
-** #bit0--bit26 => real address (bit27--bit31) of post arcmsr_cdb
-** bit31 :
-** 0 : 256 bytes frame
-** 1 : 512 bytes frame
-** bit30 :
-** 0 : normal request
-** 1 : BIOS request
-** bit29 : reserved
-** bit28 : reserved
-** bit27 : reserved
-** ---------------------------------------------------------------------------
-** (outbount queue port) Request reply
-** #bit27--bit31
-** => flag for reply
-** #bit0--bit26
-** => real address (bit27--bit31) of reply arcmsr_cdb
-** bit31 : must be 0 (for this type of reply)
-** bit30 : reserved for BIOS handshake
-** bit29 : reserved
-** bit28 :
-** 0 : no error, ignore AdapStatus/DevStatus/SenseData
-** 1 : Error, error code in AdapStatus/DevStatus/SenseData
-** bit27 : reserved
-** 6. BIOS request
-** All BIOS request is the same with request from PostQ
-** Except :
-** Request frame is sent from configuration space
-** offset: 0x78 : Request Frame (bit30 == 1)
-** offset: 0x18 : writeonly to generate
-** IRQ to IOP331
-** Completion of request:
-** (bit30 == 0, bit28==err flag)
-** 7. Definition of SGL entry (structure)
-** 8. Message1 Out - Diag Status Code (????)
-** 9. Message0 message code :
-** 0x00 : NOP
-** 0x01 : Get Config
-** ->offset 0xa00 :for outbound message code message_rwbuffer
-** (IOP331 send to driver)
-** Signature 0x87974060(4)
-** Request len 0x00000200(4)
-** numbers of queue 0x00000100(4)
-** SDRAM Size 0x00000100(4)-->256 MB
-** IDE Channels 0x00000008(4)
-** vendor 40 bytes char
-** model 8 bytes char
-** FirmVer 16 bytes char
-** Device Map 16 bytes char
-** FirmwareVersion DWORD <== Added for checking of
-** new firmware capability
-** 0x02 : Set Config
-** ->offset 0xa00 :for inbound message code message_rwbuffer
-** (driver send to IOP331)
-** Signature 0x87974063(4)
-** UPPER32 of Request Frame (4)-->Driver Only
-** 0x03 : Reset (Abort all queued Command)
-** 0x04 : Stop Background Activity
-** 0x05 : Flush Cache
-** 0x06 : Start Background Activity
-** (re-start if background is halted)
-** 0x07 : Check If Host Command Pending
-** (Novell May Need This Function)
-** 0x08 : Set controller time
-** ->offset 0xa00 : for inbound message code message_rwbuffer
-** (driver to IOP331)
-** byte 0 : 0xaa <-- signature
-** byte 1 : 0x55 <-- signature
-** byte 2 : year (04)
-** byte 3 : month (1..12)
-** byte 4 : date (1..31)
-** byte 5 : hour (0..23)
-** byte 6 : minute (0..59)
-** byte 7 : second (0..59)
-*******************************************************************************
-*******************************************************************************
-** RS-232 Interface for Areca Raid Controller
-** The low level command interface is exclusive with VT100 terminal
-** --------------------------------------------------------------------
-** 1. Sequence of command execution
-** --------------------------------------------------------------------
-** (A) Header : 3 bytes sequence (0x5E, 0x01, 0x61)
-** (B) Command block : variable length of data including length,
-** command code, data and checksum byte
-** (C) Return data : variable length of data
-** --------------------------------------------------------------------
-** 2. Command block
-** --------------------------------------------------------------------
-** (A) 1st byte : command block length (low byte)
-** (B) 2nd byte : command block length (high byte)
-** note ..command block length shouldn't > 2040 bytes,
-** length excludes these two bytes
-** (C) 3rd byte : command code
-** (D) 4th and following bytes : variable length data bytes
-** depends on command code
-** (E) last byte : checksum byte (sum of 1st byte until last data byte)
-** --------------------------------------------------------------------
-** 3. Command code and associated data
-** --------------------------------------------------------------------
-** The following are command code defined in raid controller Command
-** code 0x10--0x1? are used for system level management,
-** no password checking is needed and should be implemented in separate
-** well controlled utility and not for end user access.
-** Command code 0x20--0x?? always check the password,
-** password must be entered to enable these command.
-** enum
-** {
-** GUI_SET_SERIAL=0x10,
-** GUI_SET_VENDOR,
-** GUI_SET_MODEL,
-** GUI_IDENTIFY,
-** GUI_CHECK_PASSWORD,
-** GUI_LOGOUT,
-** GUI_HTTP,
-** GUI_SET_ETHERNET_ADDR,
-** GUI_SET_LOGO,
-** GUI_POLL_EVENT,
-** GUI_GET_EVENT,
-** GUI_GET_HW_MONITOR,
-** // GUI_QUICK_CREATE=0x20, (function removed)
-** GUI_GET_INFO_R=0x20,
-** GUI_GET_INFO_V,
-** GUI_GET_INFO_P,
-** GUI_GET_INFO_S,
-** GUI_CLEAR_EVENT,
-** GUI_MUTE_BEEPER=0x30,
-** GUI_BEEPER_SETTING,
-** GUI_SET_PASSWORD,
-** GUI_HOST_INTERFACE_MODE,
-** GUI_REBUILD_PRIORITY,
-** GUI_MAX_ATA_MODE,
-** GUI_RESET_CONTROLLER,
-** GUI_COM_PORT_SETTING,
-** GUI_NO_OPERATION,
-** GUI_DHCP_IP,
-** GUI_CREATE_PASS_THROUGH=0x40,
-** GUI_MODIFY_PASS_THROUGH,
-** GUI_DELETE_PASS_THROUGH,
-** GUI_IDENTIFY_DEVICE,
-** GUI_CREATE_RAIDSET=0x50,
-** GUI_DELETE_RAIDSET,
-** GUI_EXPAND_RAIDSET,
-** GUI_ACTIVATE_RAIDSET,
-** GUI_CREATE_HOT_SPARE,
-** GUI_DELETE_HOT_SPARE,
-** GUI_CREATE_VOLUME=0x60,
-** GUI_MODIFY_VOLUME,
-** GUI_DELETE_VOLUME,
-** GUI_START_CHECK_VOLUME,
-** GUI_STOP_CHECK_VOLUME
-** };
-** Command description :
-** GUI_SET_SERIAL : Set the controller serial#
-** byte 0,1 : length
-** byte 2 : command code 0x10
-** byte 3 : password length (should be 0x0f)
-** byte 4-0x13 : should be "ArEcATecHnoLogY"
-** byte 0x14--0x23 : Serial number string (must be 16 bytes)
-** GUI_SET_VENDOR : Set vendor string for the controller
-** byte 0,1 : length
-** byte 2 : command code 0x11
-** byte 3 : password length (should be 0x08)
-** byte 4-0x13 : should be "ArEcAvAr"
-** byte 0x14--0x3B : vendor string (must be 40 bytes)
-** GUI_SET_MODEL : Set the model name of the controller
-** byte 0,1 : length
-** byte 2 : command code 0x12
-** byte 3 : password length (should be 0x08)
-** byte 4-0x13 : should be "ArEcAvAr"
-** byte 0x14--0x1B : model string (must be 8 bytes)
-** GUI_IDENTIFY : Identify device
-** byte 0,1 : length
-** byte 2 : command code 0x13
-** return "Areca RAID Subsystem "
-** GUI_CHECK_PASSWORD : Verify password
-** byte 0,1 : length
-** byte 2 : command code 0x14
-** byte 3 : password length
-** byte 4-0x?? : user password to be checked
-** GUI_LOGOUT : Logout GUI (force password checking on next command)
-** byte 0,1 : length
-** byte 2 : command code 0x15
-** GUI_HTTP : HTTP interface (reserved for Http proxy service)(0x16)
-**
-** GUI_SET_ETHERNET_ADDR : Set the ethernet MAC address
-** byte 0,1 : length
-** byte 2 : command code 0x17
-** byte 3 : password length (should be 0x08)
-** byte 4-0x13 : should be "ArEcAvAr"
-** byte 0x14--0x19 : Ethernet MAC address (must be 6 bytes)
-** GUI_SET_LOGO : Set logo in HTTP
-** byte 0,1 : length
-** byte 2 : command code 0x18
-** byte 3 : Page# (0/1/2/3) (0xff --> clear OEM logo)
-** byte 4/5/6/7 : 0x55/0xaa/0xa5/0x5a
-** byte 8 : TITLE.JPG data (each page must be 2000 bytes)
-** note page0 1st 2 byte must be
-** actual length of the JPG file
-** GUI_POLL_EVENT : Poll If Event Log Changed
-** byte 0,1 : length
-** byte 2 : command code 0x19
-** GUI_GET_EVENT : Read Event
-** byte 0,1 : length
-** byte 2 : command code 0x1a
-** byte 3 : Event Page (0:1st page/1/2/3:last page)
-** GUI_GET_HW_MONITOR : Get HW monitor data
-** byte 0,1 : length
-** byte 2 : command code 0x1b
-** byte 3 : # of FANs(example 2)
-** byte 4 : # of Voltage sensor(example 3)
-** byte 5 : # of temperature sensor(example 2)
-** byte 6 : # of power
-** byte 7/8 : Fan#0 (RPM)
-** byte 9/10 : Fan#1
-** byte 11/12 : Voltage#0 original value in *1000
-** byte 13/14 : Voltage#0 value
-** byte 15/16 : Voltage#1 org
-** byte 17/18 : Voltage#1
-** byte 19/20 : Voltage#2 org
-** byte 21/22 : Voltage#2
-** byte 23 : Temp#0
-** byte 24 : Temp#1
-** byte 25 : Power indicator (bit0 : power#0,
-** bit1 : power#1)
-** byte 26 : UPS indicator
-** GUI_QUICK_CREATE : Quick create raid/volume set
-** byte 0,1 : length
-** byte 2 : command code 0x20
-** byte 3/4/5/6 : raw capacity
-** byte 7 : raid level
-** byte 8 : stripe size
-** byte 9 : spare
-** byte 10/11/12/13: device mask (the devices to create raid/volume)
-** This function is removed, application like
-** to implement quick create function
-** need to use GUI_CREATE_RAIDSET and GUI_CREATE_VOLUMESET function.
-** GUI_GET_INFO_R : Get Raid Set Information
-** byte 0,1 : length
-** byte 2 : command code 0x20
-** byte 3 : raidset#
-** typedef struct sGUI_RAIDSET
-** {
-** BYTE grsRaidSetName[16];
-** DWORD grsCapacity;
-** DWORD grsCapacityX;
-** DWORD grsFailMask;
-** BYTE grsDevArray[32];
-** BYTE grsMemberDevices;
-** BYTE grsNewMemberDevices;
-** BYTE grsRaidState;
-** BYTE grsVolumes;
-** BYTE grsVolumeList[16];
-** BYTE grsRes1;
-** BYTE grsRes2;
-** BYTE grsRes3;
-** BYTE grsFreeSegments;
-** DWORD grsRawStripes[8];
-** DWORD grsRes4;
-** DWORD grsRes5; // Total to 128 bytes
-** DWORD grsRes6; // Total to 128 bytes
-** } sGUI_RAIDSET, *pGUI_RAIDSET;
-** GUI_GET_INFO_V : Get Volume Set Information
-** byte 0,1 : length
-** byte 2 : command code 0x21
-** byte 3 : volumeset#
-** typedef struct sGUI_VOLUMESET
-** {
-** BYTE gvsVolumeName[16]; // 16
-** DWORD gvsCapacity;
-** DWORD gvsCapacityX;
-** DWORD gvsFailMask;
-** DWORD gvsStripeSize;
-** DWORD gvsNewFailMask;
-** DWORD gvsNewStripeSize;
-** DWORD gvsVolumeStatus;
-** DWORD gvsProgress; // 32
-** sSCSI_ATTR gvsScsi;
-** BYTE gvsMemberDisks;
-** BYTE gvsRaidLevel; // 8
-** BYTE gvsNewMemberDisks;
-** BYTE gvsNewRaidLevel;
-** BYTE gvsRaidSetNumber;
-** BYTE gvsRes0; // 4
-** BYTE gvsRes1[4]; // 64 bytes
-** } sGUI_VOLUMESET, *pGUI_VOLUMESET;
-** GUI_GET_INFO_P : Get Physical Drive Information
-** byte 0,1 : length
-** byte 2 : command code 0x22
-** byte 3 : drive # (from 0 to max-channels - 1)
-** typedef struct sGUI_PHY_DRV
-** {
-** BYTE gpdModelName[40];
-** BYTE gpdSerialNumber[20];
-** BYTE gpdFirmRev[8];
-** DWORD gpdCapacity;
-** DWORD gpdCapacityX; // Reserved for expansion
-** BYTE gpdDeviceState;
-** BYTE gpdPioMode;
-** BYTE gpdCurrentUdmaMode;
-** BYTE gpdUdmaMode;
-** BYTE gpdDriveSelect;
-** BYTE gpdRaidNumber; // 0xff if not belongs to a raid set
-** sSCSI_ATTR gpdScsi;
-** BYTE gpdReserved[40]; // Total to 128 bytes
-** } sGUI_PHY_DRV, *pGUI_PHY_DRV;
-** GUI_GET_INFO_S : Get System Information
-** byte 0,1 : length
-** byte 2 : command code 0x23
-** typedef struct sCOM_ATTR
-** {
-** BYTE comBaudRate;
-** BYTE comDataBits;
-** BYTE comStopBits;
-** BYTE comParity;
-** BYTE comFlowControl;
-** } sCOM_ATTR, *pCOM_ATTR;
-** typedef struct sSYSTEM_INFO
-** {
-** BYTE gsiVendorName[40];
-** BYTE gsiSerialNumber[16];
-** BYTE gsiFirmVersion[16];
-** BYTE gsiBootVersion[16];
-** BYTE gsiMbVersion[16];
-** BYTE gsiModelName[8];
-** BYTE gsiLocalIp[4];
-** BYTE gsiCurrentIp[4];
-** DWORD gsiTimeTick;
-** DWORD gsiCpuSpeed;
-** DWORD gsiICache;
-** DWORD gsiDCache;
-** DWORD gsiScache;
-** DWORD gsiMemorySize;
-** DWORD gsiMemorySpeed;
-** DWORD gsiEvents;
-** BYTE gsiMacAddress[6];
-** BYTE gsiDhcp;
-** BYTE gsiBeeper;
-** BYTE gsiChannelUsage;
-** BYTE gsiMaxAtaMode;
-** BYTE gsiSdramEcc; // 1:if ECC enabled
-** BYTE gsiRebuildPriority;
-** sCOM_ATTR gsiComA; // 5 bytes
-** sCOM_ATTR gsiComB; // 5 bytes
-** BYTE gsiIdeChannels;
-** BYTE gsiScsiHostChannels;
-** BYTE gsiIdeHostChannels;
-** BYTE gsiMaxVolumeSet;
-** BYTE gsiMaxRaidSet;
-** BYTE gsiEtherPort; // 1:if ether net port supported
-** BYTE gsiRaid6Engine; // 1:Raid6 engine supported
-** BYTE gsiRes[75];
-** } sSYSTEM_INFO, *pSYSTEM_INFO;
-** GUI_CLEAR_EVENT : Clear System Event
-** byte 0,1 : length
-** byte 2 : command code 0x24
-** GUI_MUTE_BEEPER : Mute current beeper
-** byte 0,1 : length
-** byte 2 : command code 0x30
-** GUI_BEEPER_SETTING : Disable beeper
-** byte 0,1 : length
-** byte 2 : command code 0x31
-** byte 3 : 0->disable, 1->enable
-** GUI_SET_PASSWORD : Change password
-** byte 0,1 : length
-** byte 2 : command code 0x32
-** byte 3 : pass word length ( must <= 15 )
-** byte 4 : password (must be alpha-numerical)
-** GUI_HOST_INTERFACE_MODE : Set host interface mode
-** byte 0,1 : length
-** byte 2 : command code 0x33
-** byte 3 : 0->Independent, 1->cluster
-** GUI_REBUILD_PRIORITY : Set rebuild priority
-** byte 0,1 : length
-** byte 2 : command code 0x34
-** byte 3 : 0/1/2/3 (low->high)
-** GUI_MAX_ATA_MODE : Set maximum ATA mode to be used
-** byte 0,1 : length
-** byte 2 : command code 0x35
-** byte 3 : 0/1/2/3 (133/100/66/33)
-** GUI_RESET_CONTROLLER : Reset Controller
-** byte 0,1 : length
-** byte 2 : command code 0x36
-** *Response with VT100 screen (discard it)
-** GUI_COM_PORT_SETTING : COM port setting
-** byte 0,1 : length
-** byte 2 : command code 0x37
-** byte 3 : 0->COMA (term port),
-** 1->COMB (debug port)
-** byte 4 : 0/1/2/3/4/5/6/7
-** (1200/2400/4800/9600/19200/38400/57600/115200)
-** byte 5 : data bit
-** (0:7 bit, 1:8 bit : must be 8 bit)
-** byte 6 : stop bit (0:1, 1:2 stop bits)
-** byte 7 : parity (0:none, 1:off, 2:even)
-** byte 8 : flow control
-** (0:none, 1:xon/xoff, 2:hardware => must use none)
-** GUI_NO_OPERATION : No operation
-** byte 0,1 : length
-** byte 2 : command code 0x38
-** GUI_DHCP_IP : Set DHCP option and local IP address
-** byte 0,1 : length
-** byte 2 : command code 0x39
-** byte 3 : 0:dhcp disabled, 1:dhcp enabled
-** byte 4/5/6/7 : IP address
-** GUI_CREATE_PASS_THROUGH : Create pass through disk
-** byte 0,1 : length
-** byte 2 : command code 0x40
-** byte 3 : device #
-** byte 4 : scsi channel (0/1)
-** byte 5 : scsi id (0-->15)
-** byte 6 : scsi lun (0-->7)
-** byte 7 : tagged queue (1 : enabled)
-** byte 8 : cache mode (1 : enabled)
-** byte 9 : max speed (0/1/2/3/4,
-** async/20/40/80/160 for scsi)
-** (0/1/2/3/4, 33/66/100/133/150 for ide )
-** GUI_MODIFY_PASS_THROUGH : Modify pass through disk
-** byte 0,1 : length
-** byte 2 : command code 0x41
-** byte 3 : device #
-** byte 4 : scsi channel (0/1)
-** byte 5 : scsi id (0-->15)
-** byte 6 : scsi lun (0-->7)
-** byte 7 : tagged queue (1 : enabled)
-** byte 8 : cache mode (1 : enabled)
-** byte 9 : max speed (0/1/2/3/4,
-** async/20/40/80/160 for scsi)
-** (0/1/2/3/4, 33/66/100/133/150 for ide )
-** GUI_DELETE_PASS_THROUGH : Delete pass through disk
-** byte 0,1 : length
-** byte 2 : command code 0x42
-** byte 3 : device# to be deleted
-** GUI_IDENTIFY_DEVICE : Identify Device
-** byte 0,1 : length
-** byte 2 : command code 0x43
-** byte 3 : Flash Method
-** (0:flash selected, 1:flash not selected)
-** byte 4/5/6/7 : IDE device mask to be flashed
-** note .... no response data available
-** GUI_CREATE_RAIDSET : Create Raid Set
-** byte 0,1 : length
-** byte 2 : command code 0x50
-** byte 3/4/5/6 : device mask
-** byte 7-22 : raidset name (if byte 7 == 0:use default)
-** GUI_DELETE_RAIDSET : Delete Raid Set
-** byte 0,1 : length
-** byte 2 : command code 0x51
-** byte 3 : raidset#
-** GUI_EXPAND_RAIDSET : Expand Raid Set
-** byte 0,1 : length
-** byte 2 : command code 0x52
-** byte 3 : raidset#
-** byte 4/5/6/7 : device mask for expansion
-** byte 8/9/10 : (8:0 no change, 1 change, 0xff:terminate,
-** 9:new raid level,
-** 10:new stripe size
-** 0/1/2/3/4/5->4/8/16/32/64/128K )
-** byte 11/12/13 : repeat for each volume in the raidset
-** GUI_ACTIVATE_RAIDSET : Activate incomplete raid set
-** byte 0,1 : length
-** byte 2 : command code 0x53
-** byte 3 : raidset#
-** GUI_CREATE_HOT_SPARE : Create hot spare disk
-** byte 0,1 : length
-** byte 2 : command code 0x54
-** byte 3/4/5/6 : device mask for hot spare creation
-** GUI_DELETE_HOT_SPARE : Delete hot spare disk
-** byte 0,1 : length
-** byte 2 : command code 0x55
-** byte 3/4/5/6 : device mask for hot spare deletion
-** GUI_CREATE_VOLUME : Create volume set
-** byte 0,1 : length
-** byte 2 : command code 0x60
-** byte 3 : raidset#
-** byte 4-19 : volume set name
-** (if byte4 == 0, use default)
-** byte 20-27 : volume capacity (blocks)
-** byte 28 : raid level
-** byte 29 : stripe size
-** (0/1/2/3/4/5->4/8/16/32/64/128K)
-** byte 30 : channel
-** byte 31 : ID
-** byte 32 : LUN
-** byte 33 : 1 enable tag
-** byte 34 : 1 enable cache
-** byte 35 : speed
-** (0/1/2/3/4->async/20/40/80/160 for scsi)
-** (0/1/2/3/4->33/66/100/133/150 for IDE )
-** byte 36 : 1 to select quick init
-**
-** GUI_MODIFY_VOLUME : Modify volume Set
-** byte 0,1 : length
-** byte 2 : command code 0x61
-** byte 3 : volumeset#
-** byte 4-19 : new volume set name
-** (if byte4 == 0, not change)
-** byte 20-27 : new volume capacity (reserved)
-** byte 28 : new raid level
-** byte 29 : new stripe size
-** (0/1/2/3/4/5->4/8/16/32/64/128K)
-** byte 30 : new channel
-** byte 31 : new ID
-** byte 32 : new LUN
-** byte 33 : 1 enable tag
-** byte 34 : 1 enable cache
-** byte 35 : speed
-** (0/1/2/3/4->async/20/40/80/160 for scsi)
-** (0/1/2/3/4->33/66/100/133/150 for IDE )
-** GUI_DELETE_VOLUME : Delete volume set
-** byte 0,1 : length
-** byte 2 : command code 0x62
-** byte 3 : volumeset#
-** GUI_START_CHECK_VOLUME : Start volume consistency check
-** byte 0,1 : length
-** byte 2 : command code 0x63
-** byte 3 : volumeset#
-** GUI_STOP_CHECK_VOLUME : Stop volume consistency check
-** byte 0,1 : length
-** byte 2 : command code 0x64
-** ---------------------------------------------------------------------
-** 4. Returned data
-** ---------------------------------------------------------------------
-** (A) Header : 3 bytes sequence (0x5E, 0x01, 0x61)
-** (B) Length : 2 bytes
-** (low byte 1st, excludes length and checksum byte)
-** (C) status or data :
-** <1> If length == 1 ==> 1 byte status code
-** #define GUI_OK 0x41
-** #define GUI_RAIDSET_NOT_NORMAL 0x42
-** #define GUI_VOLUMESET_NOT_NORMAL 0x43
-** #define GUI_NO_RAIDSET 0x44
-** #define GUI_NO_VOLUMESET 0x45
-** #define GUI_NO_PHYSICAL_DRIVE 0x46
-** #define GUI_PARAMETER_ERROR 0x47
-** #define GUI_UNSUPPORTED_COMMAND 0x48
-** #define GUI_DISK_CONFIG_CHANGED 0x49
-** #define GUI_INVALID_PASSWORD 0x4a
-** #define GUI_NO_DISK_SPACE 0x4b
-** #define GUI_CHECKSUM_ERROR 0x4c
-** #define GUI_PASSWORD_REQUIRED 0x4d
-** <2> If length > 1 ==>
-** data block returned from controller
-** and the contents depends on the command code
-** (E) Checksum : checksum of length and status or data byte
-**************************************************************************
diff --git a/Documentation/scsi/bfa.txt b/Documentation/scsi/bfa.rst
index 3cc4d80d6092..3abc0411857d 100644
--- a/Documentation/scsi/bfa.txt
+++ b/Documentation/scsi/bfa.rst
@@ -1,5 +1,8 @@
-Linux driver for Brocade FC/FCOE adapters
+.. SPDX-License-Identifier: GPL-2.0
+=========================================
+Linux driver for Brocade FC/FCOE adapters
+=========================================
Supported Hardware
------------------
@@ -7,8 +10,9 @@ Supported Hardware
bfa 3.0.2.2 driver supports all Brocade FC/FCOE adapters. Below is a list of
adapter models with corresponding PCIIDs.
- PCIID Model
-
+ =================== ===========================================
+ PCIID Model
+ =================== ===========================================
1657:0013:1657:0014 425 4Gbps dual port FC HBA
1657:0013:1657:0014 825 8Gbps PCIe dual port FC HBA
1657:0013:103c:1742 HP 82B 8Gbps PCIedual port FC HBA
@@ -26,6 +30,7 @@ adapter models with corresponding PCIIDs.
1657:0022:1657:0024 1860 16Gbps FC HBA
1657:0022:1657:0022 1860 10Gbps CNA - FCOE
+ =================== ===========================================
Firmware download
@@ -37,9 +42,11 @@ http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
and then click following respective util package link:
- Version Link
-
+ ========= =======================================================
+ Version Link
+ ========= =======================================================
v3.0.0.0 Linux Adapter Firmware package for RHEL 6.2, SLES 11SP2
+ ========= =======================================================
Configuration & Management utility download
@@ -52,9 +59,11 @@ http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
and then click following respective util package link
- Version Link
-
+ ========= =======================================================
+ Version Link
+ ========= =======================================================
v3.0.2.0 Linux Adapter Firmware package for RHEL 6.2, SLES 11SP2
+ ========= =======================================================
Documentation
@@ -69,10 +78,11 @@ http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
and use the following inbox and out-of-box driver version mapping to find
the corresponding documentation:
+ ============= ==================
Inbox Version Out-of-box Version
-
+ ============= ==================
v3.0.2.2 v3.0.0.0
-
+ ============= ==================
Support
-------
diff --git a/Documentation/scsi/bnx2fc.txt b/Documentation/scsi/bnx2fc.rst
index 80823556d62f..2fef2dff80c7 100644
--- a/Documentation/scsi/bnx2fc.txt
+++ b/Documentation/scsi/bnx2fc.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
Operating FCoE using bnx2fc
===========================
Broadcom FCoE offload through bnx2fc is full stateful hardware offload that
@@ -24,6 +27,7 @@ Driver Usage Model:
2. Configure the interfaces on which bnx2fc driver has to operate on.
Here are the steps to configure:
+
a. cd /etc/fcoe
b. copy cfg-ethx to cfg-eth5 if FCoE has to be enabled on eth5.
c. Repeat this for all the interfaces where FCoE has to be enabled.
@@ -39,8 +43,10 @@ discovery and log into the targets.
5. "Symbolic Name" in 'fcoeadm -i' output would display if bnx2fc has claimed
the interface.
-Eg:
-[root@bh2 ~]# fcoeadm -i
+
+Eg::
+
+ [root@bh2 ~]# fcoeadm -i
Description: NetXtreme II BCM57712 10 Gigabit Ethernet
Revision: 01
Manufacturer: Broadcom Corporation
@@ -60,16 +66,16 @@ Eg:
State: Online
6. Verify the vlan discovery is performed by running ifconfig and notice
-<INTERFACE>.<VLAN>-fcoe interfaces are automatically created.
+ <INTERFACE>.<VLAN>-fcoe interfaces are automatically created.
Refer to fcoeadm manpage for more information on fcoeadm operations to
create/destroy interfaces or to display lun/target information.
-NOTE:
+NOTE
====
** Broadcom FCoE capable devices implement a DCBX/LLDP client on-chip. Only one
LLDP client is allowed per interface. For proper operation all host software
based DCBX/LLDP clients (e.g. lldpad) must be disabled. To disable lldpad on a
-given interface, run the following command:
+given interface, run the following command::
-lldptool set-lldp -i <interface_name> adminStatus=disabled
+ lldptool set-lldp -i <interface_name> adminStatus=disabled
diff --git a/Documentation/scsi/cxgb3i.txt b/Documentation/scsi/cxgb3i.rst
index 7ac8032ee9b2..e01f18fbfa9f 100644
--- a/Documentation/scsi/cxgb3i.txt
+++ b/Documentation/scsi/cxgb3i.rst
@@ -1,4 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================
Chelsio S3 iSCSI Driver for Linux
+=================================
Introduction
============
@@ -49,7 +53,8 @@ The following steps need to be taken to accelerates the open-iscsi initiator:
The cxgb3i module registers a new transport class "cxgb3i" with open-iscsi.
- * in the case of recompiling the kernel, the cxgb3i selection is located at
+ * in the case of recompiling the kernel, the cxgb3i selection is located at::
+
Device Drivers
SCSI device support --->
[*] SCSI low-level drivers --->
@@ -58,25 +63,26 @@ The following steps need to be taken to accelerates the open-iscsi initiator:
2. Create an interface file located under /etc/iscsi/ifaces/ for the new
transport class "cxgb3i".
- The content of the file should be in the following format:
+ The content of the file should be in the following format::
+
iface.transport_name = cxgb3i
iface.net_ifacename = <ethX>
iface.ipaddress = <iscsi ip address>
* if iface.ipaddress is specified, <iscsi ip address> needs to be either the
- same as the ethX's ip address or an address on the same subnet. Make
- sure the ip address is unique in the network.
+ same as the ethX's ip address or an address on the same subnet. Make
+ sure the ip address is unique in the network.
3. edit /etc/iscsi/iscsid.conf
The default setting for MaxRecvDataSegmentLength (131072) is too big;
- replace with a value no bigger than 15360 (for example 8192):
+ replace with a value no bigger than 15360 (for example 8192)::
node.conn[0].iscsi.MaxRecvDataSegmentLength = 8192
* The login would fail for a normal session if MaxRecvDataSegmentLength is
- too big. A error message in the format of
- "cxgb3i: ERR! MaxRecvSegmentLength <X> too big. Need to be <= <Y>."
- would be logged to dmesg.
+ too big. A error message in the format of
+ "cxgb3i: ERR! MaxRecvSegmentLength <X> too big. Need to be <= <Y>."
+ would be logged to dmesg.
4. To direct open-iscsi traffic to go through cxgb3i's accelerated path,
"-I <iface file name>" option needs to be specified with most of the
diff --git a/Documentation/scsi/dc395x.txt b/Documentation/scsi/dc395x.rst
index 88219f96633d..d779e782b1cb 100644
--- a/Documentation/scsi/dc395x.txt
+++ b/Documentation/scsi/dc395x.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
README file for the dc395x SCSI driver
-==========================================
+======================================
Status
------
@@ -18,14 +21,14 @@ http://lists.twibble.org/mailman/listinfo/dc395x/
Parameters
----------
-The driver uses the settings from the EEPROM set in the SCSI BIOS
+The driver uses the settings from the EEPROM set in the SCSI BIOS
setup. If there is no EEPROM, the driver uses default values.
Both can be overridden by command line parameters (module or kernel
parameters).
The following parameters are available:
- - safe
+safe
Default: 0, Acceptable values: 0 or 1
If safe is set to 1 then the adapter will use conservative
@@ -33,52 +36,63 @@ The following parameters are available:
shortcut for dc395x=7,4,9,15,2,10
- - adapter_id
+adapter_id
Default: 7, Acceptable values: 0 to 15
Sets the host adapter SCSI ID.
- - max_speed
+max_speed
Default: 1, Acceptable value: 0 to 7
- 0 = 20 Mhz
- 1 = 12.2 Mhz
- 2 = 10 Mhz
- 3 = 8 Mhz
- 4 = 6.7 Mhz
- 5 = 5.8 Hhz
- 6 = 5 Mhz
- 7 = 4 Mhz
-
- - dev_mode
+
+ == ========
+ 0 20 Mhz
+ 1 12.2 Mhz
+ 2 10 Mhz
+ 3 8 Mhz
+ 4 6.7 Mhz
+ 5 5.8 Hhz
+ 6 5 Mhz
+ 7 4 Mhz
+ == ========
+
+dev_mode
Bitmap for device configuration
DevMode bit definition:
+
+ === ======== ======== =========================================
Bit Val(hex) Val(dec) Meaning
- *0 0x01 1 Parity check
- *1 0x02 2 Synchronous Negotiation
- *2 0x04 4 Disconnection
- *3 0x08 8 Send Start command on startup. (Not used)
- *4 0x10 16 Tagged Command Queueing
- *5 0x20 32 Wide Negotiation
-
- - adapter_mode
+ === ======== ======== =========================================
+ 0 0x01 1 Parity check
+ 1 0x02 2 Synchronous Negotiation
+ 2 0x04 4 Disconnection
+ 3 0x08 8 Send Start command on startup. (Not used)
+ 4 0x10 16 Tagged Command Queueing
+ 5 0x20 32 Wide Negotiation
+ === ======== ======== =========================================
+
+adapter_mode
Bitmap for adapter configuration
AdaptMode bit definition
+
+ ===== ======== ======== ====================================================
Bit Val(hex) Val(dec) Meaning
- *0 0x01 1 Support more than two drives. (Not used)
- *1 0x02 2 Use DOS compatible mapping for HDs greater than 1GB.
- *2 0x04 4 Reset SCSI Bus on startup.
- *3 0x08 8 Active Negation: Improves SCSI Bus noise immunity.
+ ===== ======== ======== ====================================================
+ 0 0x01 1 Support more than two drives. (Not used)
+ 1 0x02 2 Use DOS compatible mapping for HDs greater than 1GB.
+ 2 0x04 4 Reset SCSI Bus on startup.
+ 3 0x08 8 Active Negation: Improves SCSI Bus noise immunity.
4 0x10 16 Immediate return on BIOS seek command. (Not used)
(*)5 0x20 32 Check for LUNs >= 1.
+ ===== ======== ======== ====================================================
- - tags
+tags
Default: 3, Acceptable values: 0-5
-
+
The number of tags is 1<<x, if x has been specified
- - reset_delay
+reset_delay
Default: 1, Acceptable values: 0-180
The seconds to not accept commands after a SCSI Reset
@@ -95,8 +109,9 @@ License (GPL). Please read it, before using this driver. It should be
included in your kernel sources and with your distribution. It carries the
filename COPYING. If you don't have it, please ask me to send you one by
email.
-Note: The GNU GPL says also something about warranty and liability.
+
+Note: The GNU GPL says also something about warranty and liability.
Please be aware the following: While we do my best to provide a working and
-reliable driver, there is a chance, that it will kill your valuable data.
+reliable driver, there is a chance, that it will kill your valuable data.
We refuse to take any responsibility for that. The driver is provided as-is
and YOU USE IT AT YOUR OWN RESPONSIBILITY.
diff --git a/Documentation/scsi/dpti.rst b/Documentation/scsi/dpti.rst
new file mode 100644
index 000000000000..0496919d87d3
--- /dev/null
+++ b/Documentation/scsi/dpti.rst
@@ -0,0 +1,92 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+Adaptec dpti driver
+===================
+
+Redistribution and use in source form, with or without modification, are
+permitted provided that redistributions of source code must retain the
+above copyright notice, this list of conditions and the following disclaimer.
+
+This software is provided ``as is`` by Adaptec and
+any express or implied warranties, including, but not limited to, the
+implied warranties of merchantability and fitness for a particular purpose,
+are disclaimed. In no event shall Adaptec be
+liable for any direct, indirect, incidental, special, exemplary or
+consequential damages (including, but not limited to, procurement of
+substitute goods or services; loss of use, data, or profits; or business
+interruptions) however caused and on any theory of liability, whether in
+contract, strict liability, or tort (including negligence or otherwise)
+arising in any way out of the use of this driver software, even if advised
+of the possibility of such damage.
+
+This driver supports the Adaptec I2O RAID and DPT SmartRAID V I2O boards.
+
+Credits
+=======
+
+The original linux driver was ported to Linux by Karen White while at
+Dell Computer. It was ported from Bob Pasteur's (of DPT) original
+non-Linux driver. Mark Salyzyn and Bob Pasteur consulted on the original
+driver.
+
+2.0 version of the driver by Deanna Bonds and Mark Salyzyn.
+
+History
+=======
+
+The driver was originally ported to linux version 2.0.34
+
+==== ==========================================================================
+V2.0 Rewrite of driver. Re-architectured based on i2o subsystem.
+ This was the first full GPL version since the last version used
+ i2osig headers which were not GPL. Developer Testing version.
+V2.1 Internal testing
+V2.2 First released version
+
+V2.3 Changes:
+
+ - Added Raptor Support
+ - Fixed bug causing system to hang under extreme load with
+ - management utilities running (removed GFP_DMA from kmalloc flags)
+
+V2.4 First version ready to be submitted to be embedded in the kernel
+
+ Changes:
+
+ - Implemented suggestions from Alan Cox
+ - Added calculation of resid for sg layer
+ - Better error handling
+ - Added checking underflow conditions
+ - Added DATAPROTECT checking
+ - Changed error return codes
+ - Fixed pointer bug in bus reset routine
+ - Enabled hba reset from ioctls (allows a FW flash to reboot and use
+ the new FW without having to reboot)
+ - Changed proc output
+==== ==========================================================================
+
+TODO
+====
+- Add 64 bit Scatter Gather when compiled on 64 bit architectures
+- Add sparse lun scanning
+- Add code that checks if a device that had been taken offline is
+ now online (at the FW level) when test unit ready or inquiry
+ command from scsi-core
+- Add proc read interface
+- busrescan command
+- rescan command
+- Add code to rescan routine that notifies scsi-core about new devices
+- Add support for C-PCI (hotplug stuff)
+- Add ioctl passthru error recovery
+
+Notes
+=====
+The DPT card optimizes the order of processing commands. Consequently,
+a command may take up to 6 minutes to complete after it has been sent
+to the board.
+
+The files dpti_ioctl.h dptsig.h osd_defs.h osd_util.h sys_info.h are part of the
+interface files for Adaptec's management routines. These define the structures used
+in the ioctls. They are written to be portable. They are hard to read, but I need
+to use them 'as is' or I can miss changes in the interface.
diff --git a/Documentation/scsi/dpti.txt b/Documentation/scsi/dpti.txt
deleted file mode 100644
index f36dc0e7c8da..000000000000
--- a/Documentation/scsi/dpti.txt
+++ /dev/null
@@ -1,83 +0,0 @@
- /* TERMS AND CONDITIONS OF USE
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Adaptec and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Adaptec be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- ****************************************************************
- * This driver supports the Adaptec I2O RAID and DPT SmartRAID V I2O boards.
- *
- * CREDITS:
- * The original linux driver was ported to Linux by Karen White while at
- * Dell Computer. It was ported from Bob Pasteur's (of DPT) original
- * non-Linux driver. Mark Salyzyn and Bob Pasteur consulted on the original
- * driver.
- *
- * 2.0 version of the driver by Deanna Bonds and Mark Salyzyn.
- *
- * HISTORY:
- * The driver was originally ported to linux version 2.0.34
- *
- * V2.0 Rewrite of driver. Re-architectured based on i2o subsystem.
- * This was the first full GPL version since the last version used
- * i2osig headers which were not GPL. Developer Testing version.
- * V2.1 Internal testing
- * V2.2 First released version
- *
- * V2.3
- * Changes:
- * Added Raptor Support
- * Fixed bug causing system to hang under extreme load with
- * management utilities running (removed GFP_DMA from kmalloc flags)
- *
- *
- * V2.4 First version ready to be submitted to be embedded in the kernel
- * Changes:
- * Implemented suggestions from Alan Cox
- * Added calculation of resid for sg layer
- * Better error handling
- * Added checking underflow conditions
- * Added DATAPROTECT checking
- * Changed error return codes
- * Fixed pointer bug in bus reset routine
- * Enabled hba reset from ioctls (allows a FW flash to reboot and use the new
- * FW without having to reboot)
- * Changed proc output
- *
- * TODO:
- * Add 64 bit Scatter Gather when compiled on 64 bit architectures
- * Add sparse lun scanning
- * Add code that checks if a device that had been taken offline is
- * now online (at the FW level) when test unit ready or inquiry
- * command from scsi-core
- * Add proc read interface
- * busrescan command
- * rescan command
- * Add code to rescan routine that notifies scsi-core about new devices
- * Add support for C-PCI (hotplug stuff)
- * Add ioctl passthru error recovery
- *
- * NOTES:
- * The DPT card optimizes the order of processing commands. Consequently,
- * a command may take up to 6 minutes to complete after it has been sent
- * to the board.
- *
- * The files dpti_ioctl.h dptsig.h osd_defs.h osd_util.h sys_info.h are part of the
- * interface files for Adaptec's management routines. These define the structures used
- * in the ioctls. They are written to be portable. They are hard to read, but I need
- * to use them 'as is' or I can miss changes in the interface.
- *
- */
-
diff --git a/Documentation/scsi/g_NCR5380.rst b/Documentation/scsi/g_NCR5380.rst
new file mode 100644
index 000000000000..a282059fec43
--- /dev/null
+++ b/Documentation/scsi/g_NCR5380.rst
@@ -0,0 +1,93 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+==========================================
+README file for the Linux g_NCR5380 driver
+==========================================
+
+Copyright |copy| 1993 Drew Eckhard
+
+NCR53c400 extensions Copyright |copy| 1994,1995,1996 Kevin Lentin
+
+This file documents the NCR53c400 extensions by Kevin Lentin and some
+enhancements to the NCR5380 core.
+
+This driver supports NCR5380 and NCR53c400 and compatible cards in port or
+memory mapped modes.
+
+Use of an interrupt is recommended, if supported by the board, as this will
+allow targets to disconnect and thereby improve SCSI bus utilization.
+
+If the irq parameter is 254 or is omitted entirely, the driver will probe
+for the correct IRQ line automatically. If the irq parameter is 0 or 255
+then no IRQ will be used.
+
+The NCR53c400 does not support DMA but it does have Pseudo-DMA which is
+supported by the driver.
+
+This driver provides some information on what it has detected in
+/proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot
+time. More info to come in the future.
+
+This driver works as a module.
+When included as a module, parameters can be passed on the insmod/modprobe
+command line:
+
+ ============= ===============================================================
+ irq=xx[,...] the interrupt(s)
+ base=xx[,...] the port or base address(es) (for port or memory mapped, resp.)
+ card=xx[,...] card type(s):
+
+ == ======================================
+ 0 NCR5380,
+ 1 NCR53C400,
+ 2 NCR53C400A,
+ 3 Domex Technology Corp 3181E (DTC3181E)
+ 4 Hewlett Packard C2502
+ == ======================================
+ ============= ===============================================================
+
+These old-style parameters can support only one card:
+
+ ============= =================================================
+ ncr_irq=xx the interrupt
+ ncr_addr=xx the port or base address (for port or memory
+ mapped, resp.)
+ ncr_5380=1 to set up for a NCR5380 board
+ ncr_53c400=1 to set up for a NCR53C400 board
+ ncr_53c400a=1 to set up for a NCR53C400A board
+ dtc_3181e=1 to set up for a Domex Technology Corp 3181E board
+ hp_c2502=1 to set up for a Hewlett Packard C2502 board
+ ============= =================================================
+
+E.g. Trantor T130B in its default configuration::
+
+ modprobe g_NCR5380 irq=5 base=0x350 card=1
+
+or alternatively, using the old syntax::
+
+ modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_53c400=1
+
+E.g. a port mapped NCR5380 board, driver to probe for IRQ::
+
+ modprobe g_NCR5380 base=0x350 card=0
+
+or alternatively::
+
+ modprobe g_NCR5380 ncr_addr=0x350 ncr_5380=1
+
+E.g. a memory mapped NCR53C400 board with no IRQ::
+
+ modprobe g_NCR5380 irq=255 base=0xc8000 card=1
+
+or alternatively::
+
+ modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+
+E.g. two cards, DTC3181 (in non-PnP mode) at 0x240 with no IRQ
+and HP C2502 at 0x300 with IRQ 7::
+
+ modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
+
+Kevin Lentin
+K.Lentin@cs.monash.edu.au
diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt
deleted file mode 100644
index 37b1967a00a9..000000000000
--- a/Documentation/scsi/g_NCR5380.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-README file for the Linux g_NCR5380 driver.
-
-(c) 1993 Drew Eckhard
-NCR53c400 extensions (c) 1994,1995,1996 Kevin Lentin
-
-This file documents the NCR53c400 extensions by Kevin Lentin and some
-enhancements to the NCR5380 core.
-
-This driver supports NCR5380 and NCR53c400 and compatible cards in port or
-memory mapped modes.
-
-Use of an interrupt is recommended, if supported by the board, as this will
-allow targets to disconnect and thereby improve SCSI bus utilization.
-
-If the irq parameter is 254 or is omitted entirely, the driver will probe
-for the correct IRQ line automatically. If the irq parameter is 0 or 255
-then no IRQ will be used.
-
-The NCR53c400 does not support DMA but it does have Pseudo-DMA which is
-supported by the driver.
-
-This driver provides some information on what it has detected in
-/proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot
-time. More info to come in the future.
-
-This driver works as a module.
-When included as a module, parameters can be passed on the insmod/modprobe
-command line:
- irq=xx[,...] the interrupt(s)
- base=xx[,...] the port or base address(es) (for port or memory mapped, resp.)
- card=xx[,...] card type(s):
- 0 = NCR5380,
- 1 = NCR53C400,
- 2 = NCR53C400A,
- 3 = Domex Technology Corp 3181E (DTC3181E)
- 4 = Hewlett Packard C2502
-
-These old-style parameters can support only one card:
- ncr_irq=xx the interrupt
- ncr_addr=xx the port or base address (for port or memory
- mapped, resp.)
- ncr_5380=1 to set up for a NCR5380 board
- ncr_53c400=1 to set up for a NCR53C400 board
- ncr_53c400a=1 to set up for a NCR53C400A board
- dtc_3181e=1 to set up for a Domex Technology Corp 3181E board
- hp_c2502=1 to set up for a Hewlett Packard C2502 board
-
-E.g. Trantor T130B in its default configuration:
-modprobe g_NCR5380 irq=5 base=0x350 card=1
-or alternatively, using the old syntax,
-modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_53c400=1
-
-E.g. a port mapped NCR5380 board, driver to probe for IRQ:
-modprobe g_NCR5380 base=0x350 card=0
-or alternatively,
-modprobe g_NCR5380 ncr_addr=0x350 ncr_5380=1
-
-E.g. a memory mapped NCR53C400 board with no IRQ:
-modprobe g_NCR5380 irq=255 base=0xc8000 card=1
-or alternatively,
-modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
-
-E.g. two cards, DTC3181 (in non-PnP mode) at 0x240 with no IRQ
-and HP C2502 at 0x300 with IRQ 7:
-modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
-
-Kevin Lentin
-K.Lentin@cs.monash.edu.au
diff --git a/Documentation/scsi/hpsa.txt b/Documentation/scsi/hpsa.rst
index 891435a72fce..340e10c6e35f 100644
--- a/Documentation/scsi/hpsa.txt
+++ b/Documentation/scsi/hpsa.rst
@@ -1,6 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+=========================================
HPSA - Hewlett Packard Smart Array driver
------------------------------------------
+=========================================
This file describes the hpsa SCSI driver for HP Smart Array controllers.
The hpsa driver is intended to supplant the cciss driver for newer
@@ -11,17 +13,17 @@ driver (for logical drives) AND a SCSI driver (for tape drives). This
complexity and eliminating that complexity is one of the reasons
for hpsa to exist.
-Supported devices:
-------------------
+Supported devices
+=================
-Smart Array P212
-Smart Array P410
-Smart Array P410i
-Smart Array P411
-Smart Array P812
-Smart Array P712m
-Smart Array P711m
-StorageWorks P1210m
+- Smart Array P212
+- Smart Array P410
+- Smart Array P410i
+- Smart Array P411
+- Smart Array P812
+- Smart Array P712m
+- Smart Array P711m
+- StorageWorks P1210m
Additionally, older Smart Arrays may work with the hpsa driver if the kernel
boot parameter "hpsa_allow_any=1" is specified, however these are not tested
@@ -35,18 +37,20 @@ mode, each command completion requires an interrupt, while with "performant mode
command completions indicated by a single interrupt.
HPSA specific entries in /sys
------------------------------
+=============================
In addition to the generic SCSI attributes available in /sys, hpsa supports
the following attributes:
- HPSA specific host attributes:
- ------------------------------
+HPSA specific host attributes
+=============================
+
+ ::
- /sys/class/scsi_host/host*/rescan
- /sys/class/scsi_host/host*/firmware_revision
- /sys/class/scsi_host/host*/resettable
- /sys/class/scsi_host/host*/transport_mode
+ /sys/class/scsi_host/host*/rescan
+ /sys/class/scsi_host/host*/firmware_revision
+ /sys/class/scsi_host/host*/resettable
+ /sys/class/scsi_host/host*/transport_mode
the host "rescan" attribute is a write only attribute. Writing to this
attribute will cause the driver to scan for new, changed, or removed devices
@@ -58,7 +62,7 @@ HPSA specific entries in /sys
tape drives, or entire storage boxes containing pre-configured logical drives.
The "firmware_revision" attribute contains the firmware version of the Smart Array.
- For example:
+ For example::
root@host:/sys/class/scsi_host/host4# cat firmware_revision
7.14
@@ -78,16 +82,18 @@ HPSA specific entries in /sys
kexec tools to warn the user if they attempt to designate a device which is
unable to honor the reset_devices kernel parameter as a dump device.
- HPSA specific disk attributes:
- ------------------------------
+HPSA specific disk attributes
+-----------------------------
+
+ ::
- /sys/class/scsi_disk/c:b:t:l/device/unique_id
- /sys/class/scsi_disk/c:b:t:l/device/raid_level
- /sys/class/scsi_disk/c:b:t:l/device/lunid
+ /sys/class/scsi_disk/c:b:t:l/device/unique_id
+ /sys/class/scsi_disk/c:b:t:l/device/raid_level
+ /sys/class/scsi_disk/c:b:t:l/device/lunid
(where c:b:t:l are the controller, bus, target and lun of the device)
- For example:
+ For example::
root@host:/sys/class/scsi_disk/4:0:0:0/device# cat unique_id
600508B1001044395355323037570F77
@@ -96,35 +102,28 @@ HPSA specific entries in /sys
root@host:/sys/class/scsi_disk/4:0:0:0/device# cat raid_level
RAID 0
-HPSA specific ioctls:
----------------------
+HPSA specific ioctls
+====================
For compatibility with applications written for the cciss driver, many, but
not all of the ioctls supported by the cciss driver are also supported by the
hpsa driver. The data structures used by these are described in
include/linux/cciss_ioctl.h
- CCISS_DEREGDISK
- CCISS_REGNEWDISK
- CCISS_REGNEWD
-
- The above three ioctls all do exactly the same thing, which is to cause the driver
- to rescan for new devices. This does exactly the same thing as writing to the
- hpsa specific host "rescan" attribute.
+ CCISS_DEREGDISK, CCISS_REGNEWDISK, CCISS_REGNEWD
+ The above three ioctls all do exactly the same thing, which is to cause the driver
+ to rescan for new devices. This does exactly the same thing as writing to the
+ hpsa specific host "rescan" attribute.
CCISS_GETPCIINFO
-
Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
CCISS_GETDRIVVER
+ Returns driver version in three bytes encoded as::
- Returns driver version in three bytes encoded as:
(major_version << 16) | (minor_version << 8) | (subminor_version)
- CCISS_PASSTHRU
- CCISS_BIG_PASSTHRU
-
+ CCISS_PASSTHRU, CCISS_BIG_PASSTHRU
Allows "BMIC" and "CISS" commands to be passed through to the Smart Array.
These are used extensively by the HP Array Configuration Utility, SNMP storage
agents, etc. See cciss_vol_status at http://cciss.sf.net for some examples.
-
diff --git a/Documentation/scsi/hptiop.txt b/Documentation/scsi/hptiop.rst
index 12ecfd308e55..23ae7ae36971 100644
--- a/Documentation/scsi/hptiop.txt
+++ b/Documentation/scsi/hptiop.rst
@@ -1,15 +1,25 @@
-HIGHPOINT ROCKETRAID 3xxx/4xxx ADAPTER DRIVER (hptiop)
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+======================================================
+Highpoint RocketRAID 3xxx/4xxx Adapter Driver (hptiop)
+======================================================
Controller Register Map
--------------------------
+-----------------------
-For RR44xx Intel IOP based adapters, the controller IOP is accessed via PCI BAR0 and BAR2:
+For RR44xx Intel IOP based adapters, the controller IOP is accessed via PCI BAR0 and BAR2
+ ============== ==================================
BAR0 offset Register
+ ============== ==================================
0x11C5C Link Interface IRQ Set
0x11C60 Link Interface IRQ Clear
+ ============== ==================================
+ ============== ==================================
BAR2 offset Register
+ ============== ==================================
0x10 Inbound Message Register 0
0x14 Inbound Message Register 1
0x18 Outbound Message Register 0
@@ -21,10 +31,13 @@ For RR44xx Intel IOP based adapters, the controller IOP is accessed via PCI BAR0
0x34 Outbound Interrupt Mask Register
0x40 Inbound Queue Port
0x44 Outbound Queue Port
+ ============== ==================================
For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
+ ============== ==================================
BAR0 offset Register
+ ============== ==================================
0x10 Inbound Message Register 0
0x14 Inbound Message Register 1
0x18 Outbound Message Register 0
@@ -36,16 +49,22 @@ For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
0x34 Outbound Interrupt Mask Register
0x40 Inbound Queue Port
0x44 Outbound Queue Port
+ ============== ==================================
For Marvell not Frey IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
+ ============== ==================================
BAR0 offset Register
+ ============== ==================================
0x20400 Inbound Doorbell Register
0x20404 Inbound Interrupt Mask Register
0x20408 Outbound Doorbell Register
0x2040C Outbound Interrupt Mask Register
+ ============== ==================================
+ ============== ==================================
BAR1 offset Register
+ ============== ==================================
0x0 Inbound Queue Head Pointer
0x4 Inbound Queue Tail Pointer
0x8 Outbound Queue Head Pointer
@@ -53,14 +72,20 @@ For Marvell not Frey IOP based adapters, the IOP is accessed via PCI BAR0 and BA
0x10 Inbound Message Register
0x14 Outbound Message Register
0x40-0x1040 Inbound Queue
- 0x1040-0x2040 Outbound Queue
+ 0x1040-0x2040 Outbound Queue
+ ============== ==================================
For Marvell Frey IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
+ ============== ==================================
BAR0 offset Register
+ ============== ==================================
0x0 IOP configuration information.
+ ============== ==================================
+ ============== ===================================================
BAR1 offset Register
+ ============== ===================================================
0x4000 Inbound List Base Address Low
0x4004 Inbound List Base Address High
0x4018 Inbound List Write Pointer
@@ -76,10 +101,11 @@ For Marvell Frey IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
0x10420 CPU to PCIe Function 0 Message A
0x10480 CPU to PCIe Function 0 Doorbell
0x10484 CPU to PCIe Function 0 Doorbell Enable
+ ============== ===================================================
I/O Request Workflow of Not Marvell Frey
-------------------------------------------
+----------------------------------------
All queued requests are handled via inbound/outbound queue port.
A request packet can be allocated in either IOP or host memory.
@@ -124,7 +150,7 @@ of an inbound message.
I/O Request Workflow of Marvell Frey
---------------------------------------
+------------------------------------
All queued requests are handled via inbound/outbound list.
@@ -167,13 +193,17 @@ User-level Interface
The driver exposes following sysfs attributes:
+ ================== === ========================
NAME R/W Description
+ ================== === ========================
driver-version R driver version string
firmware-version R firmware version string
+ ================== === ========================
-----------------------------------------------------------------------------
-Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
+
+Copyright |copy| 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -181,4 +211,5 @@ Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
GNU General Public License for more details.
linux@highpoint-tech.com
+
http://www.highpoint-tech.com
diff --git a/Documentation/scsi/index.rst b/Documentation/scsi/index.rst
new file mode 100644
index 000000000000..7c5f5f8f614e
--- /dev/null
+++ b/Documentation/scsi/index.rst
@@ -0,0 +1,51 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+Linux SCSI Subsystem
+====================
+
+.. toctree::
+ :maxdepth: 1
+
+ 53c700
+ aacraid
+ advansys
+ aha152x
+ aic79xx
+ aic7xxx
+ arcmsr_spec
+ bfa
+ bnx2fc
+ BusLogic
+ cxgb3i
+ dc395x
+ dpti
+ FlashPoint
+ g_NCR5380
+ hpsa
+ hptiop
+ libsas
+ link_power_management_policy
+ lpfc
+ megaraid
+ ncr53c8xx
+ NinjaSCSI
+ ppa
+ qlogicfas
+ scsi-changer
+ scsi_eh
+ scsi_fc_transport
+ scsi-generic
+ scsi_mid_low_api
+ scsi-parameters
+ scsi
+ sd-parameters
+ smartpqi
+ st
+ sym53c500_cs
+ sym53c8xx_2
+ tcm_qla2xxx
+ ufs
+ wd719x
+
+ scsi_transport_srp/figures
diff --git a/Documentation/scsi/libsas.txt b/Documentation/scsi/libsas.rst
index 8cac6492aade..7216b5d25800 100644
--- a/Documentation/scsi/libsas.txt
+++ b/Documentation/scsi/libsas.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========
SAS Layer
----------
+=========
The SAS Layer is a management infrastructure which manages
SAS LLDDs. It sits between SCSI Core and SAS LLDDs. The
@@ -37,16 +40,21 @@ It will then return. Then you enable your phys to actually
start OOB (at which point your driver will start calling the
notify_* event callbacks).
-Structure descriptions:
+Structure descriptions
+======================
+
+``struct sas_phy``
+------------------
-struct sas_phy --------------------
Normally this is statically embedded to your driver's
-phy structure:
- struct my_phy {
- blah;
- struct sas_phy sas_phy;
- bleh;
- };
+phy structure::
+
+ struct my_phy {
+ blah;
+ struct sas_phy sas_phy;
+ bleh;
+ };
+
And then all the phys are an array of my_phy in your HA
struct (shown below).
@@ -63,94 +71,122 @@ There is a scheme where the LLDD can RW certain fields,
and the SAS layer can only read such ones, and vice versa.
The idea is to avoid unnecessary locking.
-enabled -- must be set (0/1)
-id -- must be set [0,MAX_PHYS)
-class, proto, type, role, oob_mode, linkrate -- must be set
-oob_mode -- you set this when OOB has finished and then notify
-the SAS Layer.
-
-sas_addr -- this normally points to an array holding the sas
-address of the phy, possibly somewhere in your my_phy
-struct.
-
-attached_sas_addr -- set this when you (LLDD) receive an
-IDENTIFY frame or a FIS frame, _before_ notifying the SAS
-layer. The idea is that sometimes the LLDD may want to fake
-or provide a different SAS address on that phy/port and this
-allows it to do this. At best you should copy the sas
-address from the IDENTIFY frame or maybe generate a SAS
-address for SATA directly attached devices. The Discover
-process may later change this.
-
-frame_rcvd -- this is where you copy the IDENTIFY/FIS frame
-when you get it; you lock, copy, set frame_rcvd_size and
-unlock the lock, and then call the event. It is a pointer
-since there's no way to know your hw frame size _exactly_,
-so you define the actual array in your phy struct and let
-this pointer point to it. You copy the frame from your
-DMAable memory to that area holding the lock.
-
-sas_prim -- this is where primitives go when they're
-received. See sas.h. Grab the lock, set the primitive,
-release the lock, notify.
-
-port -- this points to the sas_port if the phy belongs
-to a port -- the LLDD only reads this. It points to the
-sas_port this phy is part of. Set by the SAS Layer.
-
-ha -- may be set; the SAS layer sets it anyway.
-
-lldd_phy -- you should set this to point to your phy so you
-can find your way around faster when the SAS layer calls one
-of your callbacks and passes you a phy. If the sas_phy is
-embedded you can also use container_of -- whatever you
-prefer.
-
-
-struct sas_port --------------------
+enabled
+ - must be set (0/1)
+
+id
+ - must be set [0,MAX_PHYS)]
+
+class, proto, type, role, oob_mode, linkrate
+ - must be set
+
+oob_mode
+ - you set this when OOB has finished and then notify
+ the SAS Layer.
+
+sas_addr
+ - this normally points to an array holding the sas
+ address of the phy, possibly somewhere in your my_phy
+ struct.
+
+attached_sas_addr
+ - set this when you (LLDD) receive an
+ IDENTIFY frame or a FIS frame, _before_ notifying the SAS
+ layer. The idea is that sometimes the LLDD may want to fake
+ or provide a different SAS address on that phy/port and this
+ allows it to do this. At best you should copy the sas
+ address from the IDENTIFY frame or maybe generate a SAS
+ address for SATA directly attached devices. The Discover
+ process may later change this.
+
+frame_rcvd
+ - this is where you copy the IDENTIFY/FIS frame
+ when you get it; you lock, copy, set frame_rcvd_size and
+ unlock the lock, and then call the event. It is a pointer
+ since there's no way to know your hw frame size _exactly_,
+ so you define the actual array in your phy struct and let
+ this pointer point to it. You copy the frame from your
+ DMAable memory to that area holding the lock.
+
+sas_prim
+ - this is where primitives go when they're
+ received. See sas.h. Grab the lock, set the primitive,
+ release the lock, notify.
+
+port
+ - this points to the sas_port if the phy belongs
+ to a port -- the LLDD only reads this. It points to the
+ sas_port this phy is part of. Set by the SAS Layer.
+
+ha
+ - may be set; the SAS layer sets it anyway.
+
+lldd_phy
+ - you should set this to point to your phy so you
+ can find your way around faster when the SAS layer calls one
+ of your callbacks and passes you a phy. If the sas_phy is
+ embedded you can also use container_of -- whatever you
+ prefer.
+
+
+``struct sas_port``
+-------------------
+
The LLDD doesn't set any fields of this struct -- it only
reads them. They should be self explanatory.
phy_mask is 32 bit, this should be enough for now, as I
haven't heard of a HA having more than 8 phys.
-lldd_port -- I haven't found use for that -- maybe other
-LLDD who wish to have internal port representation can make
-use of this.
+lldd_port
+ - I haven't found use for that -- maybe other
+ LLDD who wish to have internal port representation can make
+ use of this.
+``struct sas_ha_struct``
+------------------------
-struct sas_ha_struct --------------------
It normally is statically declared in your own LLDD
-structure describing your adapter:
-struct my_sas_ha {
- blah;
- struct sas_ha_struct sas_ha;
- struct my_phy phys[MAX_PHYS];
- struct sas_port sas_ports[MAX_PHYS]; /* (1) */
- bleh;
-};
+structure describing your adapter::
+
+ struct my_sas_ha {
+ blah;
+ struct sas_ha_struct sas_ha;
+ struct my_phy phys[MAX_PHYS];
+ struct sas_port sas_ports[MAX_PHYS]; /* (1) */
+ bleh;
+ };
-(1) If your LLDD doesn't have its own port representation.
+ (1) If your LLDD doesn't have its own port representation.
What needs to be initialized (sample function given below).
pcidev
-sas_addr -- since the SAS layer doesn't want to mess with
+^^^^^^
+
+sas_addr
+ - since the SAS layer doesn't want to mess with
memory allocation, etc, this points to statically
allocated array somewhere (say in your host adapter
structure) and holds the SAS address of the host
adapter as given by you or the manufacturer, etc.
+
sas_port
-sas_phy -- an array of pointers to structures. (see
+^^^^^^^^
+
+sas_phy
+ - an array of pointers to structures. (see
note above on sas_addr).
These must be set. See more notes below.
-num_phys -- the number of phys present in the sas_phy array,
+
+num_phys
+ - the number of phys present in the sas_phy array,
and the number of ports present in the sas_port
array. There can be a maximum num_phys ports (one per
port) so we drop the num_ports, and only use
num_phys.
-The event interface:
+The event interface::
/* LLDD calls these to notify the class of an event. */
void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
@@ -161,7 +197,7 @@ When sas_register_ha() returns, those are set and can be
called by the LLDD to notify the SAS layer of such events
the SAS layer.
-The port notification:
+The port notification::
/* The class calls these to notify the LLDD of an event. */
void (*lldd_port_formed)(struct sas_phy *);
@@ -171,7 +207,7 @@ If the LLDD wants notification when a port has been formed
or deformed it sets those to a function satisfying the type.
A SAS LLDD should also implement at least one of the Task
-Management Functions (TMFs) described in SAM:
+Management Functions (TMFs) described in SAM::
/* Task Management Functions. Must be called from process context. */
int (*lldd_abort_task)(struct sas_task *);
@@ -184,7 +220,7 @@ Management Functions (TMFs) described in SAM:
For more information please read SAM from T10.org.
-Port and Adapter management:
+Port and Adapter management::
/* Port and Adapter management */
int (*lldd_clear_nexus_port)(struct sas_port *);
@@ -192,75 +228,78 @@ Port and Adapter management:
A SAS LLDD should implement at least one of those.
-Phy management:
+Phy management::
/* Phy management */
int (*lldd_control_phy)(struct sas_phy *, enum phy_func);
-lldd_ha -- set this to point to your HA struct. You can also
-use container_of if you embedded it as shown above.
+lldd_ha
+ - set this to point to your HA struct. You can also
+ use container_of if you embedded it as shown above.
A sample initialization and registration function
can look like this (called last thing from probe())
-*but* before you enable the phys to do OOB:
+*but* before you enable the phys to do OOB::
-static int register_sas_ha(struct my_sas_ha *my_ha)
-{
- int i;
- static struct sas_phy *sas_phys[MAX_PHYS];
- static struct sas_port *sas_ports[MAX_PHYS];
+ static int register_sas_ha(struct my_sas_ha *my_ha)
+ {
+ int i;
+ static struct sas_phy *sas_phys[MAX_PHYS];
+ static struct sas_port *sas_ports[MAX_PHYS];
- my_ha->sas_ha.sas_addr = &my_ha->sas_addr[0];
+ my_ha->sas_ha.sas_addr = &my_ha->sas_addr[0];
- for (i = 0; i < MAX_PHYS; i++) {
- sas_phys[i] = &my_ha->phys[i].sas_phy;
- sas_ports[i] = &my_ha->sas_ports[i];
- }
+ for (i = 0; i < MAX_PHYS; i++) {
+ sas_phys[i] = &my_ha->phys[i].sas_phy;
+ sas_ports[i] = &my_ha->sas_ports[i];
+ }
- my_ha->sas_ha.sas_phy = sas_phys;
- my_ha->sas_ha.sas_port = sas_ports;
- my_ha->sas_ha.num_phys = MAX_PHYS;
+ my_ha->sas_ha.sas_phy = sas_phys;
+ my_ha->sas_ha.sas_port = sas_ports;
+ my_ha->sas_ha.num_phys = MAX_PHYS;
- my_ha->sas_ha.lldd_port_formed = my_port_formed;
+ my_ha->sas_ha.lldd_port_formed = my_port_formed;
- my_ha->sas_ha.lldd_dev_found = my_dev_found;
- my_ha->sas_ha.lldd_dev_gone = my_dev_gone;
+ my_ha->sas_ha.lldd_dev_found = my_dev_found;
+ my_ha->sas_ha.lldd_dev_gone = my_dev_gone;
- my_ha->sas_ha.lldd_execute_task = my_execute_task;
+ my_ha->sas_ha.lldd_execute_task = my_execute_task;
- my_ha->sas_ha.lldd_abort_task = my_abort_task;
- my_ha->sas_ha.lldd_abort_task_set = my_abort_task_set;
- my_ha->sas_ha.lldd_clear_aca = my_clear_aca;
- my_ha->sas_ha.lldd_clear_task_set = my_clear_task_set;
- my_ha->sas_ha.lldd_I_T_nexus_reset= NULL; (2)
- my_ha->sas_ha.lldd_lu_reset = my_lu_reset;
- my_ha->sas_ha.lldd_query_task = my_query_task;
+ my_ha->sas_ha.lldd_abort_task = my_abort_task;
+ my_ha->sas_ha.lldd_abort_task_set = my_abort_task_set;
+ my_ha->sas_ha.lldd_clear_aca = my_clear_aca;
+ my_ha->sas_ha.lldd_clear_task_set = my_clear_task_set;
+ my_ha->sas_ha.lldd_I_T_nexus_reset= NULL; (2)
+ my_ha->sas_ha.lldd_lu_reset = my_lu_reset;
+ my_ha->sas_ha.lldd_query_task = my_query_task;
- my_ha->sas_ha.lldd_clear_nexus_port = my_clear_nexus_port;
- my_ha->sas_ha.lldd_clear_nexus_ha = my_clear_nexus_ha;
+ my_ha->sas_ha.lldd_clear_nexus_port = my_clear_nexus_port;
+ my_ha->sas_ha.lldd_clear_nexus_ha = my_clear_nexus_ha;
- my_ha->sas_ha.lldd_control_phy = my_control_phy;
+ my_ha->sas_ha.lldd_control_phy = my_control_phy;
- return sas_register_ha(&my_ha->sas_ha);
-}
+ return sas_register_ha(&my_ha->sas_ha);
+ }
(2) SAS 1.1 does not define I_T Nexus Reset TMF.
Events
-------
+======
-Events are _the only way_ a SAS LLDD notifies the SAS layer
+Events are **the only way** a SAS LLDD notifies the SAS layer
of anything. There is no other method or way a LLDD to tell
the SAS layer of anything happening internally or in the SAS
domain.
-Phy events:
+Phy events::
+
PHYE_LOSS_OF_SIGNAL, (C)
PHYE_OOB_DONE,
PHYE_OOB_ERROR, (C)
PHYE_SPINUP_HOLD.
-Port events, passed on a _phy_:
+Port events, passed on a _phy_::
+
PORTE_BYTES_DMAED, (M)
PORTE_BROADCAST_RCVD, (E)
PORTE_LINK_RESET_ERR, (C)
@@ -271,6 +310,7 @@ Host Adapter event:
HAE_RESET
A SAS LLDD should be able to generate
+
- at least one event from group C (choice),
- events marked M (mandatory) are mandatory (only one),
- events marked E (expander) if it wants the SAS layer
@@ -279,26 +319,42 @@ A SAS LLDD should be able to generate
Meaning:
-HAE_RESET -- when your HA got internal error and was reset.
+HAE_RESET
+ - when your HA got internal error and was reset.
+
+PORTE_BYTES_DMAED
+ - on receiving an IDENTIFY/FIS frame
+
+PORTE_BROADCAST_RCVD
+ - on receiving a primitive
+
+PORTE_LINK_RESET_ERR
+ - timer expired, loss of signal, loss of DWS, etc. [1]_
-PORTE_BYTES_DMAED -- on receiving an IDENTIFY/FIS frame
-PORTE_BROADCAST_RCVD -- on receiving a primitive
-PORTE_LINK_RESET_ERR -- timer expired, loss of signal, loss
-of DWS, etc. (*)
-PORTE_TIMER_EVENT -- DWS reset timeout timer expired (*)
-PORTE_HARD_RESET -- Hard Reset primitive received.
+PORTE_TIMER_EVENT
+ - DWS reset timeout timer expired [1]_
-PHYE_LOSS_OF_SIGNAL -- the device is gone (*)
-PHYE_OOB_DONE -- OOB went fine and oob_mode is valid
-PHYE_OOB_ERROR -- Error while doing OOB, the device probably
-got disconnected. (*)
-PHYE_SPINUP_HOLD -- SATA is present, COMWAKE not sent.
+PORTE_HARD_RESET
+ - Hard Reset primitive received.
-(*) should set/clear the appropriate fields in the phy,
- or alternatively call the inlined sas_phy_disconnected()
- which is just a helper, from their tasklet.
+PHYE_LOSS_OF_SIGNAL
+ - the device is gone [1]_
-The Execute Command SCSI RPC:
+PHYE_OOB_DONE
+ - OOB went fine and oob_mode is valid
+
+PHYE_OOB_ERROR
+ - Error while doing OOB, the device probably
+ got disconnected. [1]_
+
+PHYE_SPINUP_HOLD
+ - SATA is present, COMWAKE not sent.
+
+.. [1] should set/clear the appropriate fields in the phy,
+ or alternatively call the inlined sas_phy_disconnected()
+ which is just a helper, from their tasklet.
+
+The Execute Command SCSI RPC::
int (*lldd_execute_task)(struct sas_task *, gfp_t gfp_flags);
@@ -311,23 +367,28 @@ That is, when lldd_execute_task() is called, the command
go out on the transport *immediately*. There is *no*
queuing of any sort and at any level in a SAS LLDD.
-Returns: -SAS_QUEUE_FULL, -ENOMEM, nothing was queued;
- 0, the task(s) were queued.
+Returns:
-struct sas_task {
- dev -- the device this task is destined to
- task_proto -- _one_ of enum sas_proto
- scatter -- pointer to scatter gather list array
- num_scatter -- number of elements in scatter
- total_xfer_len -- total number of bytes expected to be transferred
- data_dir -- PCI_DMA_...
- task_done -- callback when the task has finished execution
-};
+ * -SAS_QUEUE_FULL, -ENOMEM, nothing was queued;
+ * 0, the task(s) were queued.
-DISCOVERY
----------
+::
+
+ struct sas_task {
+ dev -- the device this task is destined to
+ task_proto -- _one_ of enum sas_proto
+ scatter -- pointer to scatter gather list array
+ num_scatter -- number of elements in scatter
+ total_xfer_len -- total number of bytes expected to be transferred
+ data_dir -- PCI_DMA_...
+ task_done -- callback when the task has finished execution
+ };
+
+Discovery
+=========
The sysfs tree has the following purposes:
+
a) It shows you the physical layout of the SAS domain at
the current time, i.e. how the domain looks in the
physical world right now.
@@ -336,6 +397,7 @@ The sysfs tree has the following purposes:
This is a link to the tree(1) program, very useful in
viewing the SAS domain:
ftp://mama.indstate.edu/linux/tree/
+
I expect user space applications to actually create a
graphical interface of this.
@@ -359,7 +421,7 @@ contents of the domain_device structure, but it never creates
or destroys one.
Expander management from User Space
------------------------------------
+===================================
In each expander directory in sysfs, there is a file called
"smp_portal". It is a binary sysfs attribute file, which
@@ -371,15 +433,23 @@ Functionality is deceptively simple:
1. Build the SMP frame you want to send. The format and layout
is described in the SAS spec. Leave the CRC field equal 0.
+
open(2)
+
2. Open the expander's SMP portal sysfs file in RW mode.
+
write(2)
+
3. Write the frame you built in 1.
+
read(2)
+
4. Read the amount of data you expect to receive for the frame you built.
If you receive different amount of data you expected to receive,
then there was some kind of error.
+
close(2)
+
All this process is shown in detail in the function do_smp_func()
and its callers, in the file "expander_conf.c".
diff --git a/Documentation/scsi/link_power_management_policy.txt b/Documentation/scsi/link_power_management_policy.rst
index d18993d01884..64288dcf10f6 100644
--- a/Documentation/scsi/link_power_management_policy.txt
+++ b/Documentation/scsi/link_power_management_policy.rst
@@ -1,8 +1,15 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+Link Power Managent Policy
+==========================
+
This parameter allows the user to set the link (interface) power management.
There are 3 possible options:
+===================== =====================================================
Value Effect
-----------------------------------------------------------------------------
+===================== =====================================================
min_power Tell the controller to try to make the link use the
least possible power when possible. This may
sacrifice some performance due to increased latency
@@ -15,5 +22,4 @@ max_performance Generally, this means no power management. Tell
medium_power Tell the controller to enter a lower power state
when possible, but do not enter the lowest power
state, thus improving latency over min_power setting.
-
-
+===================== =====================================================
diff --git a/Documentation/scsi/lpfc.txt b/Documentation/scsi/lpfc.rst
index 5741ea8aa88a..6e217e82b9b9 100644
--- a/Documentation/scsi/lpfc.txt
+++ b/Documentation/scsi/lpfc.rst
@@ -1,10 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
-LPFC Driver Release Notes:
+=========================
+LPFC Driver Release Notes
+=========================
-=============================================================================
-
- IMPORTANT:
+.. important::
Starting in the 8.0.17 release, the driver began to be targeted strictly
toward the upstream kernel. As such, we removed #ifdefs for older kernels
@@ -22,9 +23,6 @@ LPFC Driver Release Notes:
Please heed these dependencies....
- ********************************************************************
-
-
The following information is provided for additional background on the
history of the driver as we push for upstream acceptance.
@@ -64,6 +62,7 @@ Cable pull and temporary device Loss:
Kernel Support
+==============
This source package is targeted for the upstream kernel only. (See notes
at the top of this file). It relies on interfaces that are slowing
@@ -77,7 +76,6 @@ Kernel Support
Patches
+=======
Thankfully, at this time, patches are not needed.
-
-
diff --git a/Documentation/scsi/megaraid.txt b/Documentation/scsi/megaraid.rst
index 3c7cea51e687..22b75a86ba72 100644
--- a/Documentation/scsi/megaraid.txt
+++ b/Documentation/scsi/megaraid.rst
@@ -1,7 +1,10 @@
- Notes on Management Module
- ~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. SPDX-License-Identifier: GPL-2.0
-Overview:
+==========================
+Notes on Management Module
+==========================
+
+Overview
--------
Different classes of controllers from LSI Logic accept and respond to the
@@ -25,28 +28,32 @@ ioctl commands. But this module is envisioned to handle all user space level
interactions. So any 'proc', 'sysfs' implementations will be localized in this
common module.
-Credits:
+Credits
-------
-"Shared code in a third module, a "library module", is an acceptable
-solution. modprobe automatically loads dependent modules, so users
-running "modprobe driver1" or "modprobe driver2" would automatically
-load the shared library module."
+::
+
+ "Shared code in a third module, a "library module", is an acceptable
+ solution. modprobe automatically loads dependent modules, so users
+ running "modprobe driver1" or "modprobe driver2" would automatically
+ load the shared library module."
+
+- Jeff Garzik (jgarzik@pobox.com), 02.25.2004 LKML
- - Jeff Garzik (jgarzik@pobox.com), 02.25.2004 LKML
+::
-"As Jeff hinted, if your userspace<->driver API is consistent between
-your new MPT-based RAID controllers and your existing megaraid driver,
-then perhaps you need a single small helper module (lsiioctl or some
-better name), loaded by both mptraid and megaraid automatically, which
-handles registering the /dev/megaraid node dynamically. In this case,
-both mptraid and megaraid would register with lsiioctl for each
-adapter discovered, and lsiioctl would essentially be a switch,
-redirecting userspace tool ioctls to the appropriate driver."
+ "As Jeff hinted, if your userspace<->driver API is consistent between
+ your new MPT-based RAID controllers and your existing megaraid driver,
+ then perhaps you need a single small helper module (lsiioctl or some
+ better name), loaded by both mptraid and megaraid automatically, which
+ handles registering the /dev/megaraid node dynamically. In this case,
+ both mptraid and megaraid would register with lsiioctl for each
+ adapter discovered, and lsiioctl would essentially be a switch,
+ redirecting userspace tool ioctls to the appropriate driver."
- - Matt Domsch, (Matt_Domsch@dell.com), 02.25.2004 LKML
+- Matt Domsch, (Matt_Domsch@dell.com), 02.25.2004 LKML
-Design:
+Design
------
The Common Management Module is implemented in megaraid_mm.[ch] files. This
@@ -61,7 +68,7 @@ uioc_t. The management module converts the older ioctl packets from the older
applications into uioc_t. After driver handles the uioc_t, the common module
will convert that back into the old format before returning to applications.
-As new applications evolve and replace the old ones, the old packet format
+As new applications evolve and replace the old ones, the old packet format
will be retired.
Common module dedicates one uioc_t packet to each controller registered. This
diff --git a/Documentation/scsi/ncr53c8xx.txt b/Documentation/scsi/ncr53c8xx.rst
index 8586efff1e99..c41cec99f07c 100644
--- a/Documentation/scsi/ncr53c8xx.txt
+++ b/Documentation/scsi/ncr53c8xx.rst
@@ -1,106 +1,114 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================================
The Linux NCR53C8XX/SYM53C8XX drivers README file
+=================================================
Written by Gerard Roudier <groudier@free.fr>
+
21 Rue Carnot
+
95170 DEUIL LA BARRE - FRANCE
29 May 1999
-===============================================================================
-
-1. Introduction
-2. Supported chips and SCSI features
-3. Advantages of the enhanced 896 driver
- 3.1 Optimized SCSI SCRIPTS
- 3.2 New features of the SYM53C896 (64 bit PCI dual LVD SCSI controller)
-4. Memory mapped I/O versus normal I/O
-5. Tagged command queueing
-6. Parity checking
-7. Profiling information
-8. Control commands
- 8.1 Set minimum synchronous period
- 8.2 Set wide size
- 8.3 Set maximum number of concurrent tagged commands
- 8.4 Set order type for tagged command
- 8.5 Set debug mode
- 8.6 Clear profile counters
- 8.7 Set flag (no_disc)
- 8.8 Set verbose level
- 8.9 Reset all logical units of a target
- 8.10 Abort all tasks of all logical units of a target
-9. Configuration parameters
-10. Boot setup commands
- 10.1 Syntax
- 10.2 Available arguments
- 10.2.1 Master parity checking
- 10.2.2 Scsi parity checking
- 10.2.3 Scsi disconnections
- 10.2.4 Special features
- 10.2.5 Ultra SCSI support
- 10.2.6 Default number of tagged commands
- 10.2.7 Default synchronous period factor
- 10.2.8 Negotiate synchronous with all devices
- 10.2.9 Verbosity level
- 10.2.10 Debug mode
- 10.2.11 Burst max
- 10.2.12 LED support
- 10.2.13 Max wide
- 10.2.14 Differential mode
- 10.2.15 IRQ mode
- 10.2.16 Reverse probe
- 10.2.17 Fix up PCI configuration space
- 10.2.18 Serial NVRAM
- 10.2.19 Check SCSI BUS
- 10.2.20 Exclude a host from being attached
- 10.2.21 Suggest a default SCSI id for hosts
- 10.2.22 Enable use of IMMEDIATE ARBITRATION
- 10.3 Advised boot setup commands
- 10.4 PCI configuration fix-up boot option
- 10.5 Serial NVRAM support boot option
- 10.6 SCSI BUS checking boot option
- 10.7 IMMEDIATE ARBITRATION boot option
-11. Some constants and flags of the ncr53c8xx.h header file
-12. Installation
-13. Architecture dependent features
-14. Known problems
- 14.1 Tagged commands with Iomega Jaz device
- 14.2 Device names change when another controller is added
- 14.3 Using only 8 bit devices with a WIDE SCSI controller.
- 14.4 Possible data corruption during a Memory Write and Invalidate
- 14.5 IRQ sharing problems
-15. SCSI problem troubleshooting
- 15.1 Problem tracking
- 15.2 Understanding hardware error reports
-16. Synchronous transfer negotiation tables
- 16.1 Synchronous timings for 53C875 and 53C860 Ultra-SCSI controllers
- 16.2 Synchronous timings for fast SCSI-2 53C8XX controllers
-17. Serial NVRAM support (by Richard Waltham)
- 17.1 Features
- 17.2 Symbios NVRAM layout
- 17.3 Tekram NVRAM layout
-18. Support for Big Endian
- 18.1 Big Endian CPU
- 18.2 NCR chip in Big Endian mode of operations
-===============================================================================
+.. Contents:
+
+ 1. Introduction
+ 2. Supported chips and SCSI features
+ 3. Advantages of the enhanced 896 driver
+ 3.1 Optimized SCSI SCRIPTS
+ 3.2 New features of the SYM53C896 (64 bit PCI dual LVD SCSI controller)
+ 4. Memory mapped I/O versus normal I/O
+ 5. Tagged command queueing
+ 6. Parity checking
+ 7. Profiling information
+ 8. Control commands
+ 8.1 Set minimum synchronous period
+ 8.2 Set wide size
+ 8.3 Set maximum number of concurrent tagged commands
+ 8.4 Set order type for tagged command
+ 8.5 Set debug mode
+ 8.6 Clear profile counters
+ 8.7 Set flag (no_disc)
+ 8.8 Set verbose level
+ 8.9 Reset all logical units of a target
+ 8.10 Abort all tasks of all logical units of a target
+ 9. Configuration parameters
+ 10. Boot setup commands
+ 10.1 Syntax
+ 10.2 Available arguments
+ 10.2.1 Master parity checking
+ 10.2.2 Scsi parity checking
+ 10.2.3 Scsi disconnections
+ 10.2.4 Special features
+ 10.2.5 Ultra SCSI support
+ 10.2.6 Default number of tagged commands
+ 10.2.7 Default synchronous period factor
+ 10.2.8 Negotiate synchronous with all devices
+ 10.2.9 Verbosity level
+ 10.2.10 Debug mode
+ 10.2.11 Burst max
+ 10.2.12 LED support
+ 10.2.13 Max wide
+ 10.2.14 Differential mode
+ 10.2.15 IRQ mode
+ 10.2.16 Reverse probe
+ 10.2.17 Fix up PCI configuration space
+ 10.2.18 Serial NVRAM
+ 10.2.19 Check SCSI BUS
+ 10.2.20 Exclude a host from being attached
+ 10.2.21 Suggest a default SCSI id for hosts
+ 10.2.22 Enable use of IMMEDIATE ARBITRATION
+ 10.3 Advised boot setup commands
+ 10.4 PCI configuration fix-up boot option
+ 10.5 Serial NVRAM support boot option
+ 10.6 SCSI BUS checking boot option
+ 10.7 IMMEDIATE ARBITRATION boot option
+ 11. Some constants and flags of the ncr53c8xx.h header file
+ 12. Installation
+ 13. Architecture dependent features
+ 14. Known problems
+ 14.1 Tagged commands with Iomega Jaz device
+ 14.2 Device names change when another controller is added
+ 14.3 Using only 8 bit devices with a WIDE SCSI controller.
+ 14.4 Possible data corruption during a Memory Write and Invalidate
+ 14.5 IRQ sharing problems
+ 15. SCSI problem troubleshooting
+ 15.1 Problem tracking
+ 15.2 Understanding hardware error reports
+ 16. Synchronous transfer negotiation tables
+ 16.1 Synchronous timings for 53C875 and 53C860 Ultra-SCSI controllers
+ 16.2 Synchronous timings for fast SCSI-2 53C8XX controllers
+ 17. Serial NVRAM support (by Richard Waltham)
+ 17.1 Features
+ 17.2 Symbios NVRAM layout
+ 17.3 Tekram NVRAM layout
+ 18. Support for Big Endian
+ 18.1 Big Endian CPU
+ 18.2 NCR chip in Big Endian mode of operations
1. Introduction
+===============
-The initial Linux ncr53c8xx driver has been a port of the ncr driver from
+The initial Linux ncr53c8xx driver has been a port of the ncr driver from
FreeBSD that has been achieved in November 1995 by:
- Gerard Roudier <groudier@free.fr>
+
+ - Gerard Roudier <groudier@free.fr>
The original driver has been written for 386bsd and FreeBSD by:
- Wolfgang Stanglmeier <wolf@cologne.de>
- Stefan Esser <se@mi.Uni-Koeln.de>
+
+ - Wolfgang Stanglmeier <wolf@cologne.de>
+ - Stefan Esser <se@mi.Uni-Koeln.de>
It is now available as a bundle of 2 drivers:
-- ncr53c8xx generic driver that supports all the SYM53C8XX family including
+- ncr53c8xx generic driver that supports all the SYM53C8XX family including
the earliest 810 rev. 1, the latest 896 (2 channel LVD SCSI controller) and
the new 895A (1 channel LVD SCSI controller).
-- sym53c8xx enhanced driver (a.k.a. 896 drivers) that drops support of oldest
- chips in order to gain advantage of new features, as LOAD/STORE instructions
- available since the 810A and hardware phase mismatch available with the
+- sym53c8xx enhanced driver (a.k.a. 896 drivers) that drops support of oldest
+ chips in order to gain advantage of new features, as LOAD/STORE instructions
+ available since the 810A and hardware phase mismatch available with the
896 and the 895A.
You can find technical information about the NCR 8xx family in the
@@ -109,119 +117,145 @@ Drew Eckhardt.
Information about new chips is available at LSILOGIC web server:
- http://www.lsilogic.com/
+ - http://www.lsilogic.com/
SCSI standard documentations are available at SYMBIOS ftp server:
- ftp://ftp.symbios.com/
+ - ftp://ftp.symbios.com/
Useful SCSI tools written by Eric Youngdale are available at tsx-11:
- ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/scsiinfo-X.Y.tar.gz
- ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/scsidev-X.Y.tar.gz
+ - ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/scsiinfo-X.Y.tar.gz
+ - ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/scsidev-X.Y.tar.gz
These tools are not ALPHA but quite clean and work quite well.
It is essential you have the 'scsiinfo' package.
This short documentation describes the features of the generic and enhanced
-drivers, configuration parameters and control commands available through
+drivers, configuration parameters and control commands available through
the proc SCSI file system read / write operations.
This driver has been tested OK with linux/i386, Linux/Alpha and Linux/PPC.
Latest driver version and patches are available at:
- ftp://ftp.tux.org/pub/people/gerard-roudier
+ - ftp://ftp.tux.org/pub/people/gerard-roudier
+
or
- ftp://ftp.symbios.com/mirror/ftp.tux.org/pub/tux/roudier/drivers
+
+ - ftp://ftp.symbios.com/mirror/ftp.tux.org/pub/tux/roudier/drivers
I am not a native speaker of English and there are probably lots of
mistakes in this README file. Any help will be welcome.
2. Supported chips and SCSI features
+====================================
The following features are supported for all chips:
- Synchronous negotiation
- Disconnection
- Tagged command queuing
- SCSI parity checking
- Master parity checking
+ - Synchronous negotiation
+ - Disconnection
+ - Tagged command queuing
+ - SCSI parity checking
+ - Master parity checking
"Wide negotiation" is supported for chips that allow it. The
-following table shows some characteristics of NCR 8xx family chips
+following table shows some characteristics of NCR 8xx family chips
and what drivers support them.
- Supported by Supported by
- On board the generic the enhanced
-Chip SDMS BIOS Wide SCSI std. Max. sync driver driver
----- --------- ---- --------- ---------- ------------ -------------
-810 N N FAST10 10 MB/s Y N
-810A N N FAST10 10 MB/s Y Y
-815 Y N FAST10 10 MB/s Y N
-825 Y Y FAST10 20 MB/s Y N
-825A Y Y FAST10 20 MB/s Y Y
-860 N N FAST20 20 MB/s Y Y
-875 Y Y FAST20 40 MB/s Y Y
-876 Y Y FAST20 40 MB/s Y Y
-895 Y Y FAST40 80 MB/s Y Y
-895A Y Y FAST40 80 MB/s Y Y
-896 Y Y FAST40 80 MB/s Y Y
-897 Y Y FAST40 80 MB/s Y Y
-1510D Y Y FAST40 80 MB/s Y Y
-1010 Y Y FAST80 160 MB/s N Y
-1010_66* Y Y FAST80 160 MB/s N Y
-
-* Chip supports 33MHz and 66MHz PCI buses.
++--------+-----------+-----+-----------+------------+------------+------------+
+| | | | | |Supported by|Supported by|
+| |On board | | | |the generic |the enhanced|
+|Chip |SDMS BIOS |Wide |SCSI std. | Max. sync |driver |driver |
++--------+-----------+-----+-----------+------------+------------+------------+
+|810 | N | N | FAST10 | 10 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+------------+
+|810A | N | N | FAST10 | 10 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|815 | Y | N | FAST10 | 10 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+------------+
+|825 | Y | Y | FAST10 | 20 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+------------+
+|825A | Y | Y | FAST10 | 20 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|860 | N | N | FAST20 | 20 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|875 | Y | Y | FAST20 | 40 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|876 | Y | Y | FAST20 | 40 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|895 | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|895A | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|896 | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|897 | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|1510D | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|1010 | Y | Y | FAST80 |160 MB/s | N | Y |
++--------+-----------+-----+-----------+------------+------------+------------+
+|1010_66 | Y | Y | FAST80 |160 MB/s | N | Y |
+|[1]_ | | | | | | |
++--------+-----------+-----+-----------+------------+------------+------------+
+
+.. [1] Chip supports 33MHz and 66MHz PCI buses.
Summary of other supported features:
-Module: allow to load the driver
-Memory mapped I/O: increases performance
-Profiling information: read operations from the proc SCSI file system
-Control commands: write operations to the proc SCSI file system
-Debugging information: written to syslog (expert only)
-Scatter / gather
-Shared interrupt
-Boot setup commands
-Serial NVRAM: Symbios and Tekram formats
+:Module: allow to load the driver
+:Memory mapped I/O: increases performance
+:Profiling information: read operations from the proc SCSI file system
+:Control commands: write operations to the proc SCSI file system
+:Debugging information: written to syslog (expert only)
+:Serial NVRAM: Symbios and Tekram formats
+
+- Scatter / gather
+- Shared interrupt
+- Boot setup commands
3. Advantages of the enhanced 896 driver
+========================================
-3.1 Optimized SCSI SCRIPTS.
+3.1 Optimized SCSI SCRIPTS
+--------------------------
-The 810A, 825A, 875, 895, 896 and 895A support new SCSI SCRIPTS instructions
-named LOAD and STORE that allow to move up to 1 DWORD from/to an IO register
-to/from memory much faster that the MOVE MEMORY instruction that is supported
+The 810A, 825A, 875, 895, 896 and 895A support new SCSI SCRIPTS instructions
+named LOAD and STORE that allow to move up to 1 DWORD from/to an IO register
+to/from memory much faster that the MOVE MEMORY instruction that is supported
by the 53c7xx and 53c8xx family.
-The LOAD/STORE instructions support absolute and DSA relative addressing
-modes. The SCSI SCRIPTS had been entirely rewritten using LOAD/STORE instead
+The LOAD/STORE instructions support absolute and DSA relative addressing
+modes. The SCSI SCRIPTS had been entirely rewritten using LOAD/STORE instead
of MOVE MEMORY instructions.
3.2 New features of the SYM53C896 (64 bit PCI dual LVD SCSI controller)
+-----------------------------------------------------------------------
-The 896 and the 895A allows handling of the phase mismatch context from
-SCRIPTS (avoids the phase mismatch interrupt that stops the SCSI processor
+The 896 and the 895A allows handling of the phase mismatch context from
+SCRIPTS (avoids the phase mismatch interrupt that stops the SCSI processor
until the C code has saved the context of the transfer).
-Implementing this without using LOAD/STORE instructions would be painful
+Implementing this without using LOAD/STORE instructions would be painful
and I didn't even want to try it.
-The 896 chip supports 64 bit PCI transactions and addressing, while the
+The 896 chip supports 64 bit PCI transactions and addressing, while the
895A supports 32 bit PCI transactions and 64 bit addressing.
-The SCRIPTS processor of these chips is not true 64 bit, but uses segment
-registers for bit 32-63. Another interesting feature is that LOAD/STORE
+The SCRIPTS processor of these chips is not true 64 bit, but uses segment
+registers for bit 32-63. Another interesting feature is that LOAD/STORE
instructions that address the on-chip RAM (8k) remain internal to the chip.
-Due to the use of LOAD/STORE SCRIPTS instructions, this driver does not
+Due to the use of LOAD/STORE SCRIPTS instructions, this driver does not
support the following chips:
+
- SYM53C810 revision < 0x10 (16)
- SYM53C815 all revisions
- SYM53C825 revision < 0x10 (16)
4. Memory mapped I/O versus normal I/O
+======================================
Memory mapped I/O has less latency than normal I/O. Since
linux-1.3.x, memory mapped I/O is used rather than normal I/O. Memory
@@ -233,17 +267,18 @@ driver to use normal I/O in all cases.
5. Tagged command queueing
+==========================
-Queuing more than 1 command at a time to a device allows it to perform
-optimizations based on actual head positions and its mechanical
+Queuing more than 1 command at a time to a device allows it to perform
+optimizations based on actual head positions and its mechanical
characteristics. This feature may also reduce average command latency.
-In order to really gain advantage of this feature, devices must have
-a reasonable cache size (No miracle is to be expected for a low-end
+In order to really gain advantage of this feature, devices must have
+a reasonable cache size (No miracle is to be expected for a low-end
hard disk with 128 KB or less).
Some known SCSI devices do not properly support tagged command queuing.
-Generally, firmware revisions that fix this kind of problems are available
+Generally, firmware revisions that fix this kind of problems are available
at respective vendor web/ftp sites.
-All I can say is that the hard disks I use on my machines behave well with
+All I can say is that the hard disks I use on my machines behave well with
this driver with tagged command queuing enabled:
- IBM S12 0662
@@ -251,9 +286,9 @@ this driver with tagged command queuing enabled:
- Quantum Atlas I
- Quantum Atlas II
-If your controller has NVRAM, you can configure this feature per target
-from the user setup tool. The Tekram Setup program allows to tune the
-maximum number of queued commands up to 32. The Symbios Setup only allows
+If your controller has NVRAM, you can configure this feature per target
+from the user setup tool. The Tekram Setup program allows to tune the
+maximum number of queued commands up to 32. The Symbios Setup only allows
to enable or disable this feature.
The maximum number of simultaneous tagged commands queued to a device
@@ -261,16 +296,16 @@ is currently set to 8 by default. This value is suitable for most SCSI
disks. With large SCSI disks (>= 2GB, cache >= 512KB, average seek time
<= 10 ms), using a larger value may give better performances.
-The sym53c8xx driver supports up to 255 commands per device, and the
-generic ncr53c8xx driver supports up to 64, but using more than 32 is
-generally not worth-while, unless you are using a very large disk or disk
-array. It is noticeable that most of recent hard disks seem not to accept
-more than 64 simultaneous commands. So, using more than 64 queued commands
+The sym53c8xx driver supports up to 255 commands per device, and the
+generic ncr53c8xx driver supports up to 64, but using more than 32 is
+generally not worth-while, unless you are using a very large disk or disk
+array. It is noticeable that most of recent hard disks seem not to accept
+more than 64 simultaneous commands. So, using more than 64 queued commands
is probably just resource wasting.
-If your controller does not have NVRAM or if it is managed by the SDMS
-BIOS/SETUP, you can configure tagged queueing feature and device queue
-depths from the boot command-line. For example:
+If your controller does not have NVRAM or if it is managed by the SDMS
+BIOS/SETUP, you can configure tagged queueing feature and device queue
+depths from the boot command-line. For example::
ncr53c8xx=tags:4/t2t3q15-t4q7/t1u0q32
@@ -286,80 +321,85 @@ In some special conditions, some SCSI disk firmwares may return a
QUEUE FULL status for a SCSI command. This behaviour is managed by the
driver using the following heuristic:
-- Each time a QUEUE FULL status is returned, tagged queue depth is reduced
- to the actual number of disconnected commands.
+- Each time a QUEUE FULL status is returned, tagged queue depth is reduced
+ to the actual number of disconnected commands.
- Every 1000 successfully completed SCSI commands, if allowed by the
current limit, the maximum number of queueable commands is incremented.
-Since QUEUE FULL status reception and handling is resource wasting, the
-driver notifies by default this problem to user by indicating the actual
-number of commands used and their status, as well as its decision on the
+Since QUEUE FULL status reception and handling is resource wasting, the
+driver notifies by default this problem to user by indicating the actual
+number of commands used and their status, as well as its decision on the
device queue depth change.
-The heuristic used by the driver in handling QUEUE FULL ensures that the
-impact on performances is not too bad. You can get rid of the messages by
+The heuristic used by the driver in handling QUEUE FULL ensures that the
+impact on performances is not too bad. You can get rid of the messages by
setting verbose level to zero, as follow:
-1st method: boot your system using 'ncr53c8xx=verb:0' option.
-2nd method: apply "setverbose 0" control command to the proc fs entry
+1st method:
+ boot your system using 'ncr53c8xx=verb:0' option.
+
+2nd method:
+ apply "setverbose 0" control command to the proc fs entry
corresponding to your controller after boot-up.
6. Parity checking
+==================
The driver supports SCSI parity checking and PCI bus master parity
checking. These features must be enabled in order to ensure safe data
transfers. However, some flawed devices or mother boards will have
-problems with parity. You can disable either PCI parity or SCSI parity
+problems with parity. You can disable either PCI parity or SCSI parity
checking by entering appropriate options from the boot command line.
(See 10: Boot setup commands).
7. Profiling information
+========================
Profiling information is available through the proc SCSI file system.
-Since gathering profiling information may impact performances, this
-feature is disabled by default and requires a compilation configuration
+Since gathering profiling information may impact performances, this
+feature is disabled by default and requires a compilation configuration
option to be set to Y.
-The device associated with a host has the following pathname:
+The device associated with a host has the following pathname::
/proc/scsi/ncr53c8xx/N (N=0,1,2 ....)
-Generally, only 1 board is used on hardware configuration, and that device is:
+Generally, only 1 board is used on hardware configuration, and that device is::
+
/proc/scsi/ncr53c8xx/0
However, if the driver has been made as module, the number of the
hosts is incremented each time the driver is loaded.
-In order to display profiling information, just enter:
+In order to display profiling information, just enter::
cat /proc/scsi/ncr53c8xx/0
-and you will get something like the following text:
-
--------------------------------------------------------
-General information:
- Chip NCR53C810, device id 0x1, revision id 0x2
- IO port address 0x6000, IRQ number 10
- Using memory mapped IO at virtual address 0x282c000
- Synchronous transfer period 25, max commands per lun 4
-Profiling information:
- num_trans = 18014
- num_kbytes = 671314
- num_disc = 25763
- num_break = 1673
- num_int = 1685
- num_fly = 18038
- ms_setup = 4940
- ms_data = 369940
- ms_disc = 183090
- ms_post = 1320
--------------------------------------------------------
+and you will get something like the following text::
+
+ General information:
+ Chip NCR53C810, device id 0x1, revision id 0x2
+ IO port address 0x6000, IRQ number 10
+ Using memory mapped IO at virtual address 0x282c000
+ Synchronous transfer period 25, max commands per lun 4
+ Profiling information:
+ num_trans = 18014
+ num_kbytes = 671314
+ num_disc = 25763
+ num_break = 1673
+ num_int = 1685
+ num_fly = 18038
+ ms_setup = 4940
+ ms_data = 369940
+ ms_disc = 183090
+ ms_post = 1320
General information is easy to understand. The device ID and the
revision ID identify the SCSI chip as follows:
+======= ============= ===========
Chip Device id Revision Id
----- --------- -----------
+======= ============= ===========
810 0x1 < 0x10
810A 0x1 >= 0x10
815 0x4
@@ -368,6 +408,7 @@ Chip Device id Revision Id
825A 0x3 >= 0x10
875 0xf
895 0xc
+======= ============= ===========
The profiling information is updated upon completion of SCSI commands.
A data structure is allocated and zeroed when the host adapter is
@@ -425,15 +466,16 @@ Due to the 1/100 second tick of the system clock, "ms_post" time may
be wrong.
In the example above, we got 18038 interrupts "on the fly" and only
-1673 script breaks generally due to disconnections inside a segment
+1673 script breaks generally due to disconnections inside a segment
of the scatter list.
8. Control commands
+===================
Control commands can be sent to the driver with write operations to
the proc SCSI file system. The generic command syntax is the
-following:
+following::
echo "<verb> <parameters>" >/proc/scsi/ncr53c8xx/0
(assumes controller number is 0)
@@ -444,66 +486,81 @@ apply to all targets of the SCSI chain (except the controller).
Available commands:
8.1 Set minimum synchronous period factor
+-----------------------------------------
setsync <target> <period factor>
- target: target number
- period: minimum synchronous period.
+ :target: target number
+ :period: minimum synchronous period.
Maximum speed = 1000/(4*period factor) except for special
cases below.
Specify a period of 255, to force asynchronous transfer mode.
- 10 means 25 nano-seconds synchronous period
- 11 means 30 nano-seconds synchronous period
- 12 means 50 nano-seconds synchronous period
+ - 10 means 25 nano-seconds synchronous period
+ - 11 means 30 nano-seconds synchronous period
+ - 12 means 50 nano-seconds synchronous period
8.2 Set wide size
+-----------------
setwide <target> <size>
- target: target number
- size: 0=8 bits, 1=16bits
+ :target: target number
+ :size: 0=8 bits, 1=16bits
8.3 Set maximum number of concurrent tagged commands
-
+----------------------------------------------------
+
settags <target> <tags>
- target: target number
- tags: number of concurrent tagged commands
+ :target: target number
+ :tags: number of concurrent tagged commands
must not be greater than SCSI_NCR_MAX_TAGS (default: 8)
8.4 Set order type for tagged command
+-------------------------------------
setorder <order>
- order: 3 possible values:
- simple: use SIMPLE TAG for all operations (read and write)
- ordered: use ORDERED TAG for all operations
- default: use default tag type,
+ :order: 3 possible values:
+
+ simple:
+ use SIMPLE TAG for all operations (read and write)
+
+ ordered:
+ use ORDERED TAG for all operations
+
+ default:
+ use default tag type,
SIMPLE TAG for read operations
ORDERED TAG for write operations
8.5 Set debug mode
+------------------
setdebug <list of debug flags>
Available debug flags:
- alloc: print info about memory allocations (ccb, lcb)
- queue: print info about insertions into the command start queue
- result: print sense data on CHECK CONDITION status
- scatter: print info about the scatter process
- scripts: print info about the script binding process
- tiny: print minimal debugging information
- timing: print timing information of the NCR chip
- nego: print information about SCSI negotiations
- phase: print information on script interruptions
+
+ ======== ========================================================
+ alloc print info about memory allocations (ccb, lcb)
+ queue print info about insertions into the command start queue
+ result print sense data on CHECK CONDITION status
+ scatter print info about the scatter process
+ scripts print info about the script binding process
+ tiny print minimal debugging information
+ timing print timing information of the NCR chip
+ nego print information about SCSI negotiations
+ phase print information on script interruptions
+ ======== ========================================================
Use "setdebug" with no argument to reset debug flags.
8.6 Clear profile counters
+--------------------------
clearprof
@@ -513,7 +570,8 @@ Available commands:
8.7 Set flag (no_disc)
-
+----------------------
+
setflag <target> <flag>
target: target number
@@ -523,38 +581,47 @@ Available commands:
no_disc: not allow target to disconnect.
Do not specify any flag in order to reset the flag. For example:
- - setflag 4
+
+ setflag 4
will reset no_disc flag for target 4, so will allow it disconnections.
- - setflag all
+
+ setflag all
will allow disconnection for all devices on the SCSI bus.
8.8 Set verbose level
+---------------------
setverbose #level
- The driver default verbose level is 1. This command allows to change
+ The driver default verbose level is 1. This command allows to change
th driver verbose level after boot-up.
8.9 Reset all logical units of a target
+---------------------------------------
resetdev <target>
- target: target number
+ :target: target number
+
The driver will try to send a BUS DEVICE RESET message to the target.
(Only supported by the SYM53C8XX driver and provided for test purpose)
8.10 Abort all tasks of all logical units of a target
+-----------------------------------------------------
cleardev <target>
- target: target number
- The driver will try to send a ABORT message to all the logical units
+ :target: target number
+
+ The driver will try to send a ABORT message to all the logical units
of the target.
+
(Only supported by the SYM53C8XX driver and provided for test purpose)
9. Configuration parameters
+===========================
If the firmware of all your devices is perfect enough, all the
features supported by the driver can be enabled at start-up. However,
@@ -564,6 +631,7 @@ this feature after boot-up only for devices that support it safely.
CONFIG_SCSI_NCR53C8XX_IOMAPPED (default answer: n)
Answer "y" if you suspect your mother board to not allow memory mapped I/O.
+
May slow down performance a little. This option is required by
Linux/PPC and is used no matter what you select here. Linux/PPC
suffers no performance loss with this option since all IO is memory
@@ -573,35 +641,37 @@ CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS (default answer: 8)
Default tagged command queue depth.
CONFIG_SCSI_NCR53C8XX_MAX_TAGS (default answer: 8)
- This option allows you to specify the maximum number of tagged commands
+ This option allows you to specify the maximum number of tagged commands
that can be queued to a device. The maximum supported value is 32.
CONFIG_SCSI_NCR53C8XX_SYNC (default answer: 5)
- This option allows you to specify the frequency in MHz the driver
+ This option allows you to specify the frequency in MHz the driver
will use at boot time for synchronous data transfer negotiations.
This frequency can be changed later with the "setsync" control command.
0 means "asynchronous data transfers".
CONFIG_SCSI_NCR53C8XX_FORCE_SYNC_NEGO (default answer: n)
Force synchronous negotiation for all SCSI-2 devices.
- Some SCSI-2 devices do not report this feature in byte 7 of inquiry
+
+ Some SCSI-2 devices do not report this feature in byte 7 of inquiry
response but do support it properly (TAMARACK scanners for example).
CONFIG_SCSI_NCR53C8XX_NO_DISCONNECT (default and only reasonable answer: n)
If you suspect a device of yours does not properly support disconnections,
- you can answer "y". Then, all SCSI devices will never disconnect the bus
+ you can answer "y". Then, all SCSI devices will never disconnect the bus
even while performing long SCSI operations.
CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT
- Genuine SYMBIOS boards use GPIO0 in output for controller LED and GPIO3
+ Genuine SYMBIOS boards use GPIO0 in output for controller LED and GPIO3
bit as a flag indicating singled-ended/differential interface.
If all the boards of your system are genuine SYMBIOS boards or use
BIOS and drivers from SYMBIOS, you would want to enable this option.
- This option must NOT be enabled if your system has at least one 53C8XX
+
+ This option must NOT be enabled if your system has at least one 53C8XX
based scsi board with a vendor-specific BIOS.
- For example, Tekram DC-390/U, DC-390/W and DC-390/F scsi controllers
- use a vendor-specific BIOS and are known to not use SYMBIOS compatible
- GPIO wiring. So, this option must not be enabled if your system has
+ For example, Tekram DC-390/U, DC-390/W and DC-390/F scsi controllers
+ use a vendor-specific BIOS and are known to not use SYMBIOS compatible
+ GPIO wiring. So, this option must not be enabled if your system has
such a board installed.
CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
@@ -610,7 +680,7 @@ CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
systems with more than one Symbios compatible controller where at least
one has a serial NVRAM, or for a system with a mixture of Symbios and
Tekram cards. Enables setting the boot order of host adaptors
- to something other than the default order or "reverse probe" order.
+ to something other than the default order or "reverse probe" order.
Also enables Symbios and Tekram cards to be distinguished so
CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT may be set in a system with a
mixture of Symbios and Tekram cards so the Symbios cards can make use of
@@ -618,243 +688,364 @@ CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
causing problems for the Tekram card(s).
10. Boot setup commands
+=======================
10.1 Syntax
+-----------
-Setup commands can be passed to the driver either at boot time or as a
+Setup commands can be passed to the driver either at boot time or as a
string variable using 'insmod'.
-A boot setup command for the ncr53c8xx (sym53c8xx) driver begins with the
-driver name "ncr53c8xx="(sym53c8xx). The kernel syntax parser then expects
-an optional list of integers separated with comma followed by an optional
-list of comma-separated strings. Example of boot setup command under lilo
-prompt:
+A boot setup command for the ncr53c8xx (sym53c8xx) driver begins with the
+driver name "ncr53c8xx="(sym53c8xx). The kernel syntax parser then expects
+an optional list of integers separated with comma followed by an optional
+list of comma-separated strings. Example of boot setup command under lilo
+prompt::
-lilo: linux root=/dev/hda2 ncr53c8xx=tags:4,sync:10,debug:0x200
+ lilo: linux root=/dev/hda2 ncr53c8xx=tags:4,sync:10,debug:0x200
- enable tagged commands, up to 4 tagged commands queued.
- set synchronous negotiation speed to 10 Mega-transfers / second.
- set DEBUG_NEGO flag.
-Since comma seems not to be allowed when defining a string variable using
-'insmod', the driver also accepts <space> as option separator.
-The following command will install driver module with the same options as
-above.
+Since comma seems not to be allowed when defining a string variable using
+'insmod', the driver also accepts <space> as option separator.
+The following command will install driver module with the same options as
+above::
insmod ncr53c8xx.o ncr53c8xx="tags:4 sync:10 debug:0x200"
-For the moment, the integer list of arguments is discarded by the driver.
+For the moment, the integer list of arguments is discarded by the driver.
It will be used in the future in order to allow a per controller setup.
-Each string argument must be specified as "keyword:value". Only lower-case
+Each string argument must be specified as "keyword:value". Only lower-case
characters and digits are allowed.
-In a system that contains multiple 53C8xx adapters insmod will install the
+In a system that contains multiple 53C8xx adapters insmod will install the
specified driver on each adapter. To exclude a chip use the 'excl' keyword.
-The sequence of commands,
+The sequence of commands::
insmod sym53c8xx sym53c8xx=excl:0x1400
insmod ncr53c8xx
-installs the sym53c8xx driver on all adapters except the one at IO port
-address 0x1400 and then installs the ncr53c8xx driver to the adapter at IO
+installs the sym53c8xx driver on all adapters except the one at IO port
+address 0x1400 and then installs the ncr53c8xx driver to the adapter at IO
port address 0x1400.
10.2 Available arguments
+------------------------
10.2.1 Master parity checking
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ ====== ========
mpar:y enabled
mpar:n disabled
+ ====== ========
10.2.2 Scsi parity checking
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ ====== ========
spar:y enabled
spar:n disabled
+ ====== ========
10.2.3 Scsi disconnections
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ ====== ========
disc:y enabled
disc:n disabled
-
+ ====== ========
+
10.2.4 Special features
+^^^^^^^^^^^^^^^^^^^^^^^^
+
Only apply to 810A, 825A, 860, 875 and 895 controllers.
Have no effect with other ones.
+
+ ======= =================================================
specf:y (or 1) enabled
specf:n (or 0) disabled
specf:3 enabled except Memory Write And Invalidate
- The default driver setup is 'specf:3'. As a consequence, option 'specf:y'
- must be specified in the boot setup command to enable Memory Write And
+ ======= =================================================
+
+ The default driver setup is 'specf:3'. As a consequence, option 'specf:y'
+ must be specified in the boot setup command to enable Memory Write And
Invalidate.
10.2.5 Ultra SCSI support
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
Only apply to 860, 875, 895, 895a, 896, 1010 and 1010_66 controllers.
Have no effect with other ones.
+
+ ======= ========================
ultra:n All ultra speeds enabled
ultra:2 Ultra2 enabled
ultra:1 Ultra enabled
ultra:0 Ultra speeds disabled
+ ======= ========================
10.2.6 Default number of tagged commands
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ ======================= ===============================
tags:0 (or tags:1 ) tagged command queuing disabled
tags:#tags (#tags > 1) tagged command queuing enabled
+ ======================= ===============================
+
#tags will be truncated to the max queued commands configuration parameter.
- This option also allows to specify a command queue depth for each device
+ This option also allows to specify a command queue depth for each device
that support tagged command queueing.
- Example:
+
+ Example::
+
ncr53c8xx=tags:10/t2t3q16-t5q24/t1u2q32
- will set devices queue depth as follow:
+
+ will set devices queue depth as follow:
+
- controller #0 target #2 and target #3 -> 16 commands,
- controller #0 target #5 -> 24 commands,
- controller #1 target #1 logical unit #2 -> 32 commands,
- all other logical units (all targets, all controllers) -> 10 commands.
10.2.7 Default synchronous period factor
- sync:255 disabled (asynchronous transfer mode)
- sync:#factor
- #factor = 10 Ultra-2 SCSI 40 Mega-transfers / second
- #factor = 11 Ultra-2 SCSI 33 Mega-transfers / second
- #factor < 25 Ultra SCSI 20 Mega-transfers / second
- #factor < 50 Fast SCSI-2
-
- In all cases, the driver will use the minimum transfer period supported by
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+============ ========================================================
+sync:255 disabled (asynchronous transfer mode)
+sync:#factor
+ ============ =======================================
+ #factor = 10 Ultra-2 SCSI 40 Mega-transfers / second
+ #factor = 11 Ultra-2 SCSI 33 Mega-transfers / second
+ #factor < 25 Ultra SCSI 20 Mega-transfers / second
+ #factor < 50 Fast SCSI-2
+ ============ =======================================
+============ ========================================================
+
+ In all cases, the driver will use the minimum transfer period supported by
controllers according to NCR53C8XX chip type.
10.2.8 Negotiate synchronous with all devices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(force sync nego)
+
+ ===== =========
fsn:y enabled
fsn:n disabled
+ ===== =========
10.2.9 Verbosity level
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ ====== =========
verb:0 minimal
verb:1 normal
verb:2 too much
+ ====== =========
10.2.10 Debug mode
- debug:0 clear debug flags
- debug:#x set debug flags
- #x is an integer value combining the following power-of-2 values:
- DEBUG_ALLOC 0x1
- DEBUG_PHASE 0x2
- DEBUG_POLL 0x4
- DEBUG_QUEUE 0x8
- DEBUG_RESULT 0x10
- DEBUG_SCATTER 0x20
- DEBUG_SCRIPT 0x40
- DEBUG_TINY 0x80
- DEBUG_TIMING 0x100
- DEBUG_NEGO 0x200
- DEBUG_TAGS 0x400
- DEBUG_FREEZE 0x800
- DEBUG_RESTART 0x1000
-
- You can play safely with DEBUG_NEGO. However, some of these flags may
- generate bunches of syslog messages.
+^^^^^^^^^^^^^^^^^^
+
+======== ==================================================================
+debug:0 clear debug flags
+debug:#x set debug flags
+
+ #x is an integer value combining the following power-of-2 values:
+
+ ============= ======
+ DEBUG_ALLOC 0x1
+ DEBUG_PHASE 0x2
+ DEBUG_POLL 0x4
+ DEBUG_QUEUE 0x8
+ DEBUG_RESULT 0x10
+ DEBUG_SCATTER 0x20
+ DEBUG_SCRIPT 0x40
+ DEBUG_TINY 0x80
+ DEBUG_TIMING 0x100
+ DEBUG_NEGO 0x200
+ DEBUG_TAGS 0x400
+ DEBUG_FREEZE 0x800
+ DEBUG_RESTART 0x1000
+ ============= ======
+======== ==================================================================
+
+ You can play safely with DEBUG_NEGO. However, some of these flags may
+ generate bunches of syslog messages.
10.2.11 Burst max
- burst:0 burst disabled
- burst:255 get burst length from initial IO register settings.
- burst:#x burst enabled (1<<#x burst transfers max)
- #x is an integer value which is log base 2 of the burst transfers max.
- The NCR53C875 and NCR53C825A support up to 128 burst transfers (#x = 7).
- Other chips only support up to 16 (#x = 4).
- This is a maximum value. The driver set the burst length according to chip
- and revision ids. By default the driver uses the maximum value supported
- by the chip.
+^^^^^^^^^^^^^^^^^
+
+========= ==================================================================
+burst:0 burst disabled
+burst:255 get burst length from initial IO register settings.
+burst:#x burst enabled (1<<#x burst transfers max)
+
+ #x is an integer value which is log base 2 of the burst transfers
+ max.
+
+ The NCR53C875 and NCR53C825A support up to 128 burst transfers
+ (#x = 7).
+
+ Other chips only support up to 16 (#x = 4).
+
+ This is a maximum value. The driver set the burst length according
+ to chip and revision ids. By default the driver uses the maximum
+ value supported by the chip.
+========= ==================================================================
10.2.12 LED support
+^^^^^^^^^^^^^^^^^^^
+
+ ===== ===================
led:1 enable LED support
led:0 disable LED support
+ ===== ===================
+
Donnot enable LED support if your scsi board does not use SDMS BIOS.
(See 'Configuration parameters')
10.2.13 Max wide
+^^^^^^^^^^^^^^^^
+
+ ====== ===================
wide:1 wide scsi enabled
wide:0 wide scsi disabled
+ ====== ===================
+
Some scsi boards use a 875 (ultra wide) and only supply narrow connectors.
- If you have connected a wide device with a 50 pins to 68 pins cable
+ If you have connected a wide device with a 50 pins to 68 pins cable
converter, any accepted wide negotiation will break further data transfers.
- In such a case, using "wide:0" in the bootup command will be helpful.
+ In such a case, using "wide:0" in the bootup command will be helpful.
10.2.14 Differential mode
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ ====== =================================
diff:0 never set up diff mode
diff:1 set up diff mode if BIOS set it
diff:2 always set up diff mode
diff:3 set diff mode if GPIO3 is not set
+ ====== =================================
10.2.15 IRQ mode
+^^^^^^^^^^^^^^^^
+
+ ========= ========================================================
irqm:0 always open drain
irqm:1 same as initial settings (assumed BIOS settings)
irqm:2 always totem pole
irqm:0x10 driver will not use IRQF_SHARED flag when requesting irq
+ ========= ========================================================
(Bits 0x10 and 0x20 can be combined with hardware irq mode option)
10.2.16 Reverse probe
+^^^^^^^^^^^^^^^^^^^^^
+
+ ========= ========================================================
revprob:n probe chip ids from the PCI configuration in this order:
810, 815, 820, 860, 875, 885, 895, 896
revprob:y probe chip ids in the reverse order.
+ ========= ========================================================
10.2.17 Fix up PCI configuration space
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
pcifix:<option bits>
Available option bits:
- 0x0: No attempt to fix PCI configuration space registers values.
- 0x1: Set PCI cache-line size register if not set.
- 0x2: Set write and invalidate bit in PCI command register.
- 0x4: Increase if necessary PCI latency timer according to burst max.
+
+ === ===============================================================
+ 0x0 No attempt to fix PCI configuration space registers values.
+ 0x1 Set PCI cache-line size register if not set.
+ 0x2 Set write and invalidate bit in PCI command register.
+ 0x4 Increase if necessary PCI latency timer according to burst max.
+ === ===============================================================
Use 'pcifix:7' in order to allow the driver to fix up all PCI features.
10.2.18 Serial NVRAM
+^^^^^^^^^^^^^^^^^^^^
+
+ ======= =========================================
nvram:n do not look for serial NVRAM
nvram:y test controllers for onboard serial NVRAM
+ ======= =========================================
+
(alternate binary form)
mvram=<bits options>
+
+ ==== =================================================================
0x01 look for NVRAM (equivalent to nvram=y)
0x02 ignore NVRAM "Synchronous negotiation" parameters for all devices
0x04 ignore NVRAM "Wide negotiation" parameter for all devices
0x08 ignore NVRAM "Scan at boot time" parameter for all devices
0x80 also attach controllers set to OFF in the NVRAM (sym53c8xx only)
+ ==== =================================================================
+
+10.2.19 Check SCSI BUS
+^^^^^^^^^^^^^^^^^^^^^^
-10.2.19 Check SCSI BUS
buschk:<option bits>
Available option bits:
+
+ ==== ================================================
0x0: No check.
- 0x1: Check and do not attach the controller on error.
+ 0x1: Check and do not attach the controller on error.
0x2: Check and just warn on error.
0x4: Disable SCSI bus integrity checking.
+ ==== ================================================
10.2.20 Exclude a host from being attached
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
excl=<io_address>
Prevent host at a given io address from being attached.
- For example 'ncr53c8xx=excl:0xb400,excl:0xc000' indicate to the
+ For example 'ncr53c8xx=excl:0xb400,excl:0xc000' indicate to the
ncr53c8xx driver not to attach hosts at address 0xb400 and 0xc000.
10.2.21 Suggest a default SCSI id for hosts
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ ========== ==========================================
hostid:255 no id suggested.
- hostid:#x (0 < x < 7) x suggested for hosts SCSI id.
+ hostid:#x (0 < x < 7) x suggested for hosts SCSI id.
+ ========== ==========================================
- If a host SCSI id is available from the NVRAM, the driver will ignore
- any value suggested as boot option. Otherwise, if a suggested value
- different from 255 has been supplied, it will use it. Otherwise, it will
- try to deduce the value previously set in the hardware and use value
+ If a host SCSI id is available from the NVRAM, the driver will ignore
+ any value suggested as boot option. Otherwise, if a suggested value
+ different from 255 has been supplied, it will use it. Otherwise, it will
+ try to deduce the value previously set in the hardware and use value
7 if the hardware value is zero.
10.2.22 Enable use of IMMEDIATE ARBITRATION
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
(only supported by the sym53c8xx driver. See 10.7 for more details)
- iarb:0 do not use this feature.
- iarb:#x use this feature according to bit fields as follow:
- bit 0 (1) : enable IARB each time the initiator has been reselected
- when it arbitrated for the SCSI BUS.
- (#x >> 4) : maximum number of successive settings of IARB if the initiator
- win arbitration and it has other commands to send to a device.
+======= =================================================================
+iarb:0 do not use this feature.
+iarb:#x use this feature according to bit fields as follow:
+
+ ========= =======================================================
+ bit 0 (1) enable IARB each time the initiator has been reselected
+ when it arbitrated for the SCSI BUS.
+ (#x >> 4) maximum number of successive settings of IARB if the
+ initiator win arbitration and it has other commands
+ to send to a device.
+ ========= =======================================================
+======= =================================================================
Boot fail safe
safe:y load the following assumed fail safe initial setup
+ ======================== ====================== ==========
master parity disabled mpar:n
scsi parity enabled spar:y
disconnections not allowed disc:n
@@ -876,189 +1067,222 @@ Boot fail safe
irq mode from BIOS settings irqm:1
SCSI BUS check do not attach on error buschk:1
immediate arbitration disabled iarb:0
+ ======================== ====================== ==========
10.3 Advised boot setup commands
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If the driver has been configured with default options, the equivalent
-boot setup is:
+If the driver has been configured with default options, the equivalent
+boot setup is::
ncr53c8xx=mpar:y,spar:y,disc:y,specf:3,fsn:n,ultra:2,fsn:n,revprob:n,verb:1\
tags:0,sync:50,debug:0,burst:7,led:0,wide:1,settle:2,diff:0,irqm:0
For an installation diskette or a safe but not fast system,
-boot setup can be:
+boot setup can be::
ncr53c8xx=safe:y,mpar:y,disc:y
ncr53c8xx=safe:y,disc:y
ncr53c8xx=safe:y,mpar:y
ncr53c8xx=safe:y
-My personal system works flawlessly with the following equivalent setup:
+My personal system works flawlessly with the following equivalent setup::
ncr53c8xx=mpar:y,spar:y,disc:y,specf:1,fsn:n,ultra:2,fsn:n,revprob:n,verb:1\
tags:32,sync:12,debug:0,burst:7,led:1,wide:1,settle:2,diff:0,irqm:0
-The driver prints its actual setup when verbosity level is 2. You can try
-"ncr53c8xx=verb:2" to get the "static" setup of the driver, or add "verb:2"
-to your boot setup command in order to check the actual setup the driver is
+The driver prints its actual setup when verbosity level is 2. You can try
+"ncr53c8xx=verb:2" to get the "static" setup of the driver, or add "verb:2"
+to your boot setup command in order to check the actual setup the driver is
using.
10.4 PCI configuration fix-up boot option
+-----------------------------------------
pcifix:<option bits>
Available option bits:
- 0x1: Set PCI cache-line size register if not set.
- 0x2: Set write and invalidate bit in PCI command register.
+
+ === =====================================================
+ 0x1 Set PCI cache-line size register if not set.
+ 0x2 Set write and invalidate bit in PCI command register.
+ === =====================================================
Use 'pcifix:3' in order to allow the driver to fix both PCI features.
-These options only apply to new SYMBIOS chips 810A, 825A, 860, 875
+These options only apply to new SYMBIOS chips 810A, 825A, 860, 875
and 895 and are only supported for Pentium and 486 class processors.
-Recent SYMBIOS 53C8XX scsi processors are able to use PCI read multiple
-and PCI write and invalidate commands. These features require the
-cache line size register to be properly set in the PCI configuration
-space of the chips. On the other hand, chips will use PCI write and
-invalidate commands only if the corresponding bit is set to 1 in the
+Recent SYMBIOS 53C8XX scsi processors are able to use PCI read multiple
+and PCI write and invalidate commands. These features require the
+cache line size register to be properly set in the PCI configuration
+space of the chips. On the other hand, chips will use PCI write and
+invalidate commands only if the corresponding bit is set to 1 in the
PCI command register.
-Not all PCI bioses set the PCI cache line register and the PCI write and
+Not all PCI bioses set the PCI cache line register and the PCI write and
invalidate bit in the PCI configuration space of 53C8XX chips.
-Optimized PCI accesses may be broken for some PCI/memory controllers or
+Optimized PCI accesses may be broken for some PCI/memory controllers or
make problems with some PCI boards.
This fix-up worked flawlessly on my previous system.
(MB Triton HX / 53C875 / 53C810A)
-I use these options at my own risks as you will do if you decide to
+I use these options at my own risks as you will do if you decide to
use them too.
10.5 Serial NVRAM support boot option
+-------------------------------------
+======= =========================================
nvram:n do not look for serial NVRAM
nvram:y test controllers for onboard serial NVRAM
+======= =========================================
-This option can also been entered as an hexadecimal value that allows
-to control what information the driver will get from the NVRAM and what
+This option can also been entered as an hexadecimal value that allows
+to control what information the driver will get from the NVRAM and what
information it will ignore.
For details see '17. Serial NVRAM support'.
-When this option is enabled, the driver tries to detect all boards using
+When this option is enabled, the driver tries to detect all boards using
a Serial NVRAM. This memory is used to hold user set up parameters.
-The parameters the driver is able to get from the NVRAM depend on the
+The parameters the driver is able to get from the NVRAM depend on the
data format used, as follow:
- Tekram format Symbios format
-General and host parameters
- Boot order N Y
- Host SCSI ID Y Y
- SCSI parity checking Y Y
- Verbose boot messages N Y
-SCSI devices parameters
- Synchronous transfer speed Y Y
- Wide 16 / Narrow Y Y
- Tagged Command Queuing enabled Y Y
- Disconnections enabled Y Y
- Scan at boot time N Y
-
-In order to speed up the system boot, for each device configured without
-the "scan at boot time" option, the driver forces an error on the
++-------------------------------+------------------+--------------+
+| |Tekram format |Symbios format|
++-------------------------------+------------------+--------------+
+|General and host parameters | | |
++-------------------------------+------------------+--------------+
+| * Boot order | N | Y |
++-------------------------------+------------------+--------------+
+| * Host SCSI ID | Y | Y |
++-------------------------------+------------------+--------------+
+| * SCSI parity checking | Y | Y |
++-------------------------------+------------------+--------------+
+| * Verbose boot messages | N | Y |
++-------------------------------+------------------+--------------+
+|SCSI devices parameters |
++-------------------------------+------------------+--------------+
+| * Synchronous transfer speed | Y | Y |
++-------------------------------+------------------+--------------+
+| * Wide 16 / Narrow | Y | Y |
++-------------------------------+------------------+--------------+
+| * Tagged Command Queuing | Y | Y |
+| enabled | | |
++-------------------------------+------------------+--------------+
+| * Disconnections enabled | Y | Y |
++-------------------------------+------------------+--------------+
+| * Scan at boot time | N | Y |
++-------------------------------+------------------+--------------+
+
+In order to speed up the system boot, for each device configured without
+the "scan at boot time" option, the driver forces an error on the
first TEST UNIT READY command received for this device.
-Some SDMS BIOS revisions seem to be unable to boot cleanly with very fast
-hard disks. In such a situation you cannot configure the NVRAM with
+Some SDMS BIOS revisions seem to be unable to boot cleanly with very fast
+hard disks. In such a situation you cannot configure the NVRAM with
optimized parameters value.
-The 'nvram' boot option can be entered in hexadecimal form in order
+The 'nvram' boot option can be entered in hexadecimal form in order
to ignore some options configured in the NVRAM, as follow:
mvram=<bits options>
+
+ ==== =================================================================
0x01 look for NVRAM (equivalent to nvram=y)
0x02 ignore NVRAM "Synchronous negotiation" parameters for all devices
0x04 ignore NVRAM "Wide negotiation" parameter for all devices
0x08 ignore NVRAM "Scan at boot time" parameter for all devices
0x80 also attach controllers set to OFF in the NVRAM (sym53c8xx only)
+ ==== =================================================================
-Option 0x80 is only supported by the sym53c8xx driver and is disabled by
-default. Result is that, by default (option not set), the sym53c8xx driver
+Option 0x80 is only supported by the sym53c8xx driver and is disabled by
+default. Result is that, by default (option not set), the sym53c8xx driver
will not attach controllers set to OFF in the NVRAM.
-The ncr53c8xx always tries to attach all the controllers. Option 0x80 has
-not been added to the ncr53c8xx driver, since it has been reported to
-confuse users who use this driver since a long time. If you desire a
-controller not to be attached by the ncr53c8xx driver at Linux boot, you
+The ncr53c8xx always tries to attach all the controllers. Option 0x80 has
+not been added to the ncr53c8xx driver, since it has been reported to
+confuse users who use this driver since a long time. If you desire a
+controller not to be attached by the ncr53c8xx driver at Linux boot, you
must use the 'excl' driver boot option.
10.6 SCSI BUS checking boot option.
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-When this option is set to a non-zero value, the driver checks SCSI lines
+When this option is set to a non-zero value, the driver checks SCSI lines
logic state, 100 micro-seconds after having asserted the SCSI RESET line.
The driver just reads SCSI lines and checks all lines read FALSE except RESET.
-Since SCSI devices shall release the BUS at most 800 nano-seconds after SCSI
+Since SCSI devices shall release the BUS at most 800 nano-seconds after SCSI
RESET has been asserted, any signal to TRUE may indicate a SCSI BUS problem.
Unfortunately, the following common SCSI BUS problems are not detected:
+
- Only 1 terminator installed.
- Misplaced terminators.
- Bad quality terminators.
-On the other hand, either bad cabling, broken devices, not conformant
+
+On the other hand, either bad cabling, broken devices, not conformant
devices, ... may cause a SCSI signal to be wrong when te driver reads it.
10.7 IMMEDIATE ARBITRATION boot option
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This option is only supported by the SYM53C8XX driver (not by the NCR53C8XX).
-SYMBIOS 53C8XX chips are able to arbitrate for the SCSI BUS as soon as they
-have detected an expected disconnection (BUS FREE PHASE). For this process
-to be started, bit 1 of SCNTL1 IO register must be set when the chip is
+SYMBIOS 53C8XX chips are able to arbitrate for the SCSI BUS as soon as they
+have detected an expected disconnection (BUS FREE PHASE). For this process
+to be started, bit 1 of SCNTL1 IO register must be set when the chip is
connected to the SCSI BUS.
-When this feature has been enabled for the current connection, the chip has
-every chance to win arbitration if only devices with lower priority are
-competing for the SCSI BUS. By the way, when the chip is using SCSI id 7,
+When this feature has been enabled for the current connection, the chip has
+every chance to win arbitration if only devices with lower priority are
+competing for the SCSI BUS. By the way, when the chip is using SCSI id 7,
then it will for sure win the next SCSI BUS arbitration.
-Since, there is no way to know what devices are trying to arbitrate for the
+Since, there is no way to know what devices are trying to arbitrate for the
BUS, using this feature can be extremely unfair. So, you are not advised
-to enable it, or at most enable this feature for the case the chip lost
+to enable it, or at most enable this feature for the case the chip lost
the previous arbitration (boot option 'iarb:1').
This feature has the following advantages:
a) Allow the initiator with ID 7 to win arbitration when it wants so.
-b) Overlap at least 4 micro-seconds of arbitration time with the execution
- of SCRIPTS that deal with the end of the current connection and that
+b) Overlap at least 4 micro-seconds of arbitration time with the execution
+ of SCRIPTS that deal with the end of the current connection and that
starts the next job.
-Hmmm... But (a) may just prevent other devices from reselecting the initiator,
-and delay data transfers or status/completions, and (b) may just waste
+Hmmm... But (a) may just prevent other devices from reselecting the initiator,
+and delay data transfers or status/completions, and (b) may just waste
SCSI BUS bandwidth if the SCRIPTS execution lasts more than 4 micro-seconds.
-The use of IARB needs the SCSI_NCR_IARB_SUPPORT option to have been defined
-at compile time and the 'iarb' boot option to have been set to a non zero
-value at boot time. It is not that useful for real work, but can be used
-to stress SCSI devices or for some applications that can gain advantage of
-it. By the way, if you experience badnesses like 'unexpected disconnections',
-'bad reselections', etc... when using IARB on heavy IO load, you should not
-be surprised, because force-feeding anything and blocking its arse at the
+The use of IARB needs the SCSI_NCR_IARB_SUPPORT option to have been defined
+at compile time and the 'iarb' boot option to have been set to a non zero
+value at boot time. It is not that useful for real work, but can be used
+to stress SCSI devices or for some applications that can gain advantage of
+it. By the way, if you experience badnesses like 'unexpected disconnections',
+'bad reselections', etc... when using IARB on heavy IO load, you should not
+be surprised, because force-feeding anything and blocking its arse at the
same time cannot work for a long time. :-))
11. Some constants and flags of the ncr53c8xx.h header file
+===========================================================
Some of these are defined from the configuration parameters. To
change other "defines", you must edit the header file. Do that only
if you know what you are doing.
SCSI_NCR_SETUP_SPECIAL_FEATURES (default: defined)
- If defined, the driver will enable some special features according
+ If defined, the driver will enable some special features according
to chip and revision id.
- For 810A, 860, 825A, 875 and 895 scsi chips, this option enables
- support of features that reduce load of PCI bus and memory accesses
- during scsi transfer processing: burst op-code fetch, read multiple,
- read line, prefetch, cache line, write and invalidate,
+
+ For 810A, 860, 825A, 875 and 895 scsi chips, this option enables
+ support of features that reduce load of PCI bus and memory accesses
+ during scsi transfer processing: burst op-code fetch, read multiple,
+ read line, prefetch, cache line, write and invalidate,
burst 128 (875 only), large dma fifo (875 only), offset 16 (875 only).
- Can be changed by the following boot setup command:
+ Can be changed by the following boot setup command::
+
ncr53c8xx=specf:n
SCSI_NCR_IOMAPPED (default: not defined)
@@ -1066,22 +1290,26 @@ SCSI_NCR_IOMAPPED (default: not defined)
SCSI_NCR_SHARE_IRQ (default: defined)
If defined, request shared IRQ.
-
+
SCSI_NCR_MAX_TAGS (default: 8)
Maximum number of simultaneous tagged commands to a device.
+
Can be changed by "settags <target> <maxtags>"
SCSI_NCR_SETUP_DEFAULT_SYNC (default: 50)
- Transfer period factor the driver will use at boot time for synchronous
+ Transfer period factor the driver will use at boot time for synchronous
negotiation. 0 means asynchronous.
+
Can be changed by "setsync <target> <period factor>"
SCSI_NCR_SETUP_DEFAULT_TAGS (default: 8)
Default number of simultaneous tagged commands to a device.
+
< 1 means tagged command queuing disabled at start-up.
SCSI_NCR_ALWAYS_SIMPLE_TAG (default: defined)
Use SIMPLE TAG for read and write commands.
+
Can be changed by "setorder <ordered|simple|default>"
SCSI_NCR_SETUP_DISCONNECTION (default: defined)
@@ -1089,6 +1317,7 @@ SCSI_NCR_SETUP_DISCONNECTION (default: defined)
SCSI_NCR_SETUP_FORCE_SYNC_NEGO (default: not defined)
If defined, synchronous negotiation is tried for all SCSI-2 devices.
+
Can be changed by "setsync <target> <period>"
SCSI_NCR_SETUP_MASTER_PARITY (default: defined)
@@ -1115,6 +1344,7 @@ SCSI_NCR_SETTLE_TIME (default: 2)
SCSI_NCR_TIMEOUT_ALERT (default: 3)
If a pending command will time out after this amount of seconds,
an ordered tag is used for the next command.
+
Avoids timeouts for unordered tagged commands.
SCSI_NCR_CAN_QUEUE (default: 7*SCSI_NCR_MAX_TAGS)
@@ -1131,34 +1361,38 @@ SCSI_NCR_MAX_LUN (default: 8)
12. Installation
+================
This driver is part of the linux kernel distribution.
-Driver files are located in the sub-directory "drivers/scsi" of the
+Driver files are located in the sub-directory "drivers/scsi" of the
kernel source tree.
-Driver files:
+Driver files::
README.ncr53c8xx : this file
ChangeLog.ncr53c8xx : change log
ncr53c8xx.h : definitions
ncr53c8xx.c : the driver code
-New driver versions are made available separately in order to allow testing
-changes and new features prior to including them into the linux kernel
-distribution. The following URL provides information on latest available
-patches:
+New driver versions are made available separately in order to allow testing
+changes and new features prior to including them into the linux kernel
+distribution. The following URL provides information on latest available
+patches:
ftp://ftp.tux.org/pub/people/gerard-roudier/README
-13. Architecture dependent features.
+13. Architecture dependent features
+===================================
<Not yet written>
14. Known problems
+==================
14.1 Tagged commands with Iomega Jaz device
+-------------------------------------------
I have not tried this device, however it has been reported to me the
following: This device is capable of Tagged command queuing. However
@@ -1170,14 +1404,15 @@ other problem that may appear is timeouts. The only way to avoid
timeouts seems to edit linux/drivers/scsi/sd.c and to increase the
current timeout values.
-14.2 Device names change when another controller is added.
+14.2 Device names change when another controller is added
+---------------------------------------------------------
-When you add a new NCR53C8XX chip based controller to a system that already
-has one or more controllers of this family, it may happen that the order
-the driver registers them to the kernel causes problems due to device
+When you add a new NCR53C8XX chip based controller to a system that already
+has one or more controllers of this family, it may happen that the order
+the driver registers them to the kernel causes problems due to device
name changes.
-When at least one controller uses NvRAM, SDMS BIOS version 4 allows you to
-define the order the BIOS will scan the scsi boards. The driver attaches
+When at least one controller uses NvRAM, SDMS BIOS version 4 allows you to
+define the order the BIOS will scan the scsi boards. The driver attaches
controllers according to BIOS information if NvRAM detect option is set.
If your controllers do not have NvRAM, you can:
@@ -1187,52 +1422,58 @@ If your controllers do not have NvRAM, you can:
- Make appropriate changes in the fstab.
- Use the 'scsidev' tool from Eric Youngdale.
-14.3 Using only 8 bit devices with a WIDE SCSI controller.
+14.3 Using only 8 bit devices with a WIDE SCSI controller
+---------------------------------------------------------
-When only 8 bit NARROW devices are connected to a 16 bit WIDE SCSI controller,
+When only 8 bit NARROW devices are connected to a 16 bit WIDE SCSI controller,
you must ensure that lines of the wide part of the SCSI BUS are pulled-up.
-This can be achieved by ENABLING the WIDE TERMINATOR portion of the SCSI
+This can be achieved by ENABLING the WIDE TERMINATOR portion of the SCSI
controller card.
+
The TYAN 1365 documentation revision 1.2 is not correct about such settings.
(page 10, figure 3.3).
14.4 Possible data corruption during a Memory Write and Invalidate
+------------------------------------------------------------------
This problem is described in SYMBIOS DEL 397, Part Number 69-039241, ITEM 4.
-In some complex situations, 53C875 chips revision <= 3 may start a PCI
+In some complex situations, 53C875 chips revision <= 3 may start a PCI
Write and Invalidate Command at a not cache-line-aligned 4 DWORDS boundary.
This is only possible when Cache Line Size is 8 DWORDS or greater.
-Pentium systems use a 8 DWORDS cache line size and so are concerned by
+Pentium systems use a 8 DWORDS cache line size and so are concerned by
this chip bug, unlike i486 systems that use a 4 DWORDS cache line size.
-When this situation occurs, the chip may complete the Write and Invalidate
-command after having only filled part of the last cache line involved in
+When this situation occurs, the chip may complete the Write and Invalidate
+command after having only filled part of the last cache line involved in
the transfer, leaving to data corruption the remainder of this cache line.
-Not using Write And Invalidate obviously gets rid of this chip bug, and so
+Not using Write And Invalidate obviously gets rid of this chip bug, and so
it is now the default setting of the driver.
-However, for people like me who want to enable this feature, I have added
-part of a work-around suggested by SYMBIOS. This work-around resets the
-addressing logic when the DATA IN phase is entered and so prevents the bug
-from being triggered for the first SCSI MOVE of the phase. This work-around
+However, for people like me who want to enable this feature, I have added
+part of a work-around suggested by SYMBIOS. This work-around resets the
+addressing logic when the DATA IN phase is entered and so prevents the bug
+from being triggered for the first SCSI MOVE of the phase. This work-around
should be enough according to the following:
-The only driver internal data structure that is greater than 8 DWORDS and
-that is moved by the SCRIPTS processor is the 'CCB header' that contains
-the context of the SCSI transfer. This data structure is aligned on 8 DWORDS
-boundary (Pentium Cache Line Size), and so is immune to this chip bug, at
+The only driver internal data structure that is greater than 8 DWORDS and
+that is moved by the SCRIPTS processor is the 'CCB header' that contains
+the context of the SCSI transfer. This data structure is aligned on 8 DWORDS
+boundary (Pentium Cache Line Size), and so is immune to this chip bug, at
least on Pentium systems.
-But the conditions of this bug can be met when a SCSI read command is
+
+But the conditions of this bug can be met when a SCSI read command is
performed using a buffer that is 4 DWORDS but not cache-line aligned.
-This cannot happen under Linux when scatter/gather lists are used since
-they only refer to system buffers that are well aligned. So, a work around
-may only be needed under Linux when a scatter/gather list is not used and
+This cannot happen under Linux when scatter/gather lists are used since
+they only refer to system buffers that are well aligned. So, a work around
+may only be needed under Linux when a scatter/gather list is not used and
when the SCSI DATA IN phase is reentered after a phase mismatch.
15. SCSI problem troubleshooting
+================================
15.1 Problem tracking
+---------------------
Most SCSI problems are due to a non conformant SCSI bus or to buggy
devices. If unfortunately you have SCSI problems, you can check the
@@ -1267,193 +1508,286 @@ tagged commands queuing.
Try to enable one feature at a time with control commands. For example:
-- echo "setsync all 25" >/proc/scsi/ncr53c8xx/0
- Will enable fast synchronous data transfer negotiation for all targets.
+::
+
+ echo "setsync all 25" >/proc/scsi/ncr53c8xx/0
+
+Will enable fast synchronous data transfer negotiation for all targets.
+
+::
-- echo "setflag 3" >/proc/scsi/ncr53c8xx/0
- Will reset flags (no_disc) for target 3, and so will allow it to disconnect
- the SCSI Bus.
+ echo "setflag 3" >/proc/scsi/ncr53c8xx/0
-- echo "settags 3 8" >/proc/scsi/ncr53c8xx/0
- Will enable tagged command queuing for target 3 if that device supports it.
+Will reset flags (no_disc) for target 3, and so will allow it to disconnect
+the SCSI Bus.
-Once you have found the device and the feature that cause problems, just
+::
+
+ echo "settags 3 8" >/proc/scsi/ncr53c8xx/0
+
+Will enable tagged command queuing for target 3 if that device supports it.
+
+Once you have found the device and the feature that cause problems, just
disable that feature for that device.
15.2 Understanding hardware error reports
+-----------------------------------------
-When the driver detects an unexpected error condition, it may display a
-message of the following pattern.
+When the driver detects an unexpected error condition, it may display a
+message of the following pattern::
-sym53c876-0:1: ERROR (0:48) (1-21-65) (f/95) @ (script 7c0:19000000).
-sym53c876-0: script cmd = 19000000
-sym53c876-0: regdump: da 10 80 95 47 0f 01 07 75 01 81 21 80 01 09 00.
+ sym53c876-0:1: ERROR (0:48) (1-21-65) (f/95) @ (script 7c0:19000000).
+ sym53c876-0: script cmd = 19000000
+ sym53c876-0: regdump: da 10 80 95 47 0f 01 07 75 01 81 21 80 01 09 00.
-Some fields in such a message may help you understand the cause of the
-problem, as follows:
+Some fields in such a message may help you understand the cause of the
+problem, as follows::
-sym53c876-0:1: ERROR (0:48) (1-21-65) (f/95) @ (script 7c0:19000000).
-............A.........B.C....D.E..F....G.H.......I.....J...K.......
+ sym53c876-0:1: ERROR (0:48) (1-21-65) (f/95) @ (script 7c0:19000000).
+ ............A.........B.C....D.E..F....G.H.......I.....J...K.......
Field A : target number.
- SCSI ID of the device the controller was talking with at the moment the
+ SCSI ID of the device the controller was talking with at the moment the
error occurs.
Field B : DSTAT io register (DMA STATUS)
- Bit 0x40 : MDPE Master Data Parity Error
+ ======== =============================================================
+ Bit 0x40 MDPE Master Data Parity Error
Data parity error detected on the PCI BUS.
- Bit 0x20 : BF Bus Fault
+ Bit 0x20 BF Bus Fault
PCI bus fault condition detected
- Bit 0x01 : IID Illegal Instruction Detected
- Set by the chip when it detects an Illegal Instruction format
+ Bit 0x01 IID Illegal Instruction Detected
+ Set by the chip when it detects an Illegal Instruction format
on some condition that makes an instruction illegal.
- Bit 0x80 : DFE Dma Fifo Empty
+ Bit 0x80 DFE Dma Fifo Empty
Pure status bit that does not indicate an error.
- If the reported DSTAT value contains a combination of MDPE (0x40),
+ ======== =============================================================
+
+ If the reported DSTAT value contains a combination of MDPE (0x40),
BF (0x20), then the cause may be likely due to a PCI BUS problem.
Field C : SIST io register (SCSI Interrupt Status)
- Bit 0x08 : SGE SCSI GROSS ERROR
- Indicates that the chip detected a severe error condition
+ ======== ==================================================================
+ Bit 0x08 SGE SCSI GROSS ERROR
+ Indicates that the chip detected a severe error condition
on the SCSI BUS that prevents the SCSI protocol from functioning
properly.
- Bit 0x04 : UDC Unexpected Disconnection
- Indicates that the device released the SCSI BUS when the chip
- was not expecting this to happen. A device may behave so to
- indicate the SCSI initiator that an error condition not reportable using the SCSI protocol has occurred.
- Bit 0x02 : RST SCSI BUS Reset
- Generally SCSI targets do not reset the SCSI BUS, although any
+ Bit 0x04 UDC Unexpected Disconnection
+ Indicates that the device released the SCSI BUS when the chip
+ was not expecting this to happen. A device may behave so to
+ indicate the SCSI initiator that an error condition not reportable
+ using the SCSI protocol has occurred.
+ Bit 0x02 RST SCSI BUS Reset
+ Generally SCSI targets do not reset the SCSI BUS, although any
device on the BUS can reset it at any time.
- Bit 0x01 : PAR Parity
+ Bit 0x01 PAR Parity
SCSI parity error detected.
- On a faulty SCSI BUS, any error condition among SGE (0x08), UDC (0x04) and
- PAR (0x01) may be detected by the chip. If your SCSI system sometimes
- encounters such error conditions, especially SCSI GROSS ERROR, then a SCSI
+ ======== ==================================================================
+
+ On a faulty SCSI BUS, any error condition among SGE (0x08), UDC (0x04) and
+ PAR (0x01) may be detected by the chip. If your SCSI system sometimes
+ encounters such error conditions, especially SCSI GROSS ERROR, then a SCSI
BUS problem is likely the cause of these errors.
-For fields D,E,F,G and H, you may look into the sym53c8xx_defs.h file
+For fields D,E,F,G and H, you may look into the sym53c8xx_defs.h file
that contains some minimal comments on IO register bits.
+
Field D : SOCL Scsi Output Control Latch
- This register reflects the state of the SCSI control lines the
+ This register reflects the state of the SCSI control lines the
chip want to drive or compare against.
+
Field E : SBCL Scsi Bus Control Lines
Actual value of control lines on the SCSI BUS.
+
Field F : SBDL Scsi Bus Data Lines
Actual value of data lines on the SCSI BUS.
+
Field G : SXFER SCSI Transfer
- Contains the setting of the Synchronous Period for output and
+ Contains the setting of the Synchronous Period for output and
the current Synchronous offset (offset 0 means asynchronous).
+
Field H : SCNTL3 Scsi Control Register 3
- Contains the setting of timing values for both asynchronous and
- synchronous data transfers.
+ Contains the setting of timing values for both asynchronous and
+ synchronous data transfers.
-Understanding Fields I, J, K and dumps requires to have good knowledge of
+Understanding Fields I, J, K and dumps requires to have good knowledge of
SCSI standards, chip cores functionnals and internal driver data structures.
-You are not required to decode and understand them, unless you want to help
+You are not required to decode and understand them, unless you want to help
maintain the driver code.
16. Synchronous transfer negotiation tables
+===========================================
Tables below have been created by calling the routine the driver uses
for synchronisation negotiation timing calculation and chip setting.
-The first table corresponds to Ultra chips 53875 and 53C860 with 80 MHz
+The first table corresponds to Ultra chips 53875 and 53C860 with 80 MHz
clock and 5 clock divisors.
-The second one has been calculated by setting the scsi clock to 40 Mhz
-and using 4 clock divisors and so applies to all NCR53C8XX chips in fast
+The second one has been calculated by setting the scsi clock to 40 Mhz
+and using 4 clock divisors and so applies to all NCR53C8XX chips in fast
SCSI-2 mode.
Periods are in nano-seconds and speeds are in Mega-transfers per second.
-1 Mega-transfers/second means 1 MB/s with 8 bits SCSI and 2 MB/s with
+1 Mega-transfers/second means 1 MB/s with 8 bits SCSI and 2 MB/s with
Wide16 SCSI.
16.1 Synchronous timings for 53C895, 53C875 and 53C860 SCSI controllers
- ----------------------------------------------
- Negotiated NCR settings
- Factor Period Speed Period Speed
- ------ ------ ------ ------ ------
- 10 25 40.000 25 40.000 (53C895 only)
- 11 30.2 33.112 31.25 32.000 (53C895 only)
- 12 50 20.000 50 20.000
- 13 52 19.230 62 16.000
- 14 56 17.857 62 16.000
- 15 60 16.666 62 16.000
- 16 64 15.625 75 13.333
- 17 68 14.705 75 13.333
- 18 72 13.888 75 13.333
- 19 76 13.157 87 11.428
- 20 80 12.500 87 11.428
- 21 84 11.904 87 11.428
- 22 88 11.363 93 10.666
- 23 92 10.869 93 10.666
- 24 96 10.416 100 10.000
- 25 100 10.000 100 10.000
- 26 104 9.615 112 8.888
- 27 108 9.259 112 8.888
- 28 112 8.928 112 8.888
- 29 116 8.620 125 8.000
- 30 120 8.333 125 8.000
- 31 124 8.064 125 8.000
- 32 128 7.812 131 7.619
- 33 132 7.575 150 6.666
- 34 136 7.352 150 6.666
- 35 140 7.142 150 6.666
- 36 144 6.944 150 6.666
- 37 148 6.756 150 6.666
- 38 152 6.578 175 5.714
- 39 156 6.410 175 5.714
- 40 160 6.250 175 5.714
- 41 164 6.097 175 5.714
- 42 168 5.952 175 5.714
- 43 172 5.813 175 5.714
- 44 176 5.681 187 5.333
- 45 180 5.555 187 5.333
- 46 184 5.434 187 5.333
- 47 188 5.319 200 5.000
- 48 192 5.208 200 5.000
- 49 196 5.102 200 5.000
-
++-----------------------------+--------+-------+--------------+
+|Negotiated |NCR settings | |
++-------+--------+------------+--------+-------+ |
+|Factor |Period |Speed |Period |Speed | |
++-------+--------+------------+--------+-------+--------------+
+|10 | 25 |40.000 | 25 |40.000 | (53C895 only)|
++-------+--------+------------+--------+-------+--------------+
+|11 | 30.2 |33.112 | 31.25 |32.000 | (53C895 only)|
++-------+--------+------------+--------+-------+--------------+
+|12 | 50 |20.000 | 50 |20.000 | |
++-------+--------+------------+--------+-------+--------------+
+|13 | 52 |19.230 | 62 |16.000 | |
++-------+--------+------------+--------+-------+--------------+
+|14 | 56 |17.857 | 62 |16.000 | |
++-------+--------+------------+--------+-------+--------------+
+|15 | 60 |16.666 | 62 |16.000 | |
++-------+--------+------------+--------+-------+--------------+
+|16 | 64 |15.625 | 75 |13.333 | |
++-------+--------+------------+--------+-------+--------------+
+|17 | 68 |14.705 | 75 |13.333 | |
++-------+--------+------------+--------+-------+--------------+
+|18 | 72 |13.888 | 75 |13.333 | |
++-------+--------+------------+--------+-------+--------------+
+|19 | 76 |13.157 | 87 |11.428 | |
++-------+--------+------------+--------+-------+--------------+
+|20 | 80 |12.500 | 87 |11.428 | |
++-------+--------+------------+--------+-------+--------------+
+|21 | 84 |11.904 | 87 |11.428 | |
++-------+--------+------------+--------+-------+--------------+
+|22 | 88 |11.363 | 93 |10.666 | |
++-------+--------+------------+--------+-------+--------------+
+|23 | 92 |10.869 | 93 |10.666 | |
++-------+--------+------------+--------+-------+--------------+
+|24 | 96 |10.416 |100 |10.000 | |
++-------+--------+------------+--------+-------+--------------+
+|25 |100 |10.000 |100 |10.000 | |
++-------+--------+------------+--------+-------+--------------+
+|26 |104 | 9.615 |112 | 8.888 | |
++-------+--------+------------+--------+-------+--------------+
+|27 |108 | 9.259 |112 | 8.888 | |
++-------+--------+------------+--------+-------+--------------+
+|28 |112 | 8.928 |112 | 8.888 | |
++-------+--------+------------+--------+-------+--------------+
+|29 |116 | 8.620 |125 | 8.000 | |
++-------+--------+------------+--------+-------+--------------+
+|30 |120 | 8.333 |125 | 8.000 | |
++-------+--------+------------+--------+-------+--------------+
+|31 |124 | 8.064 |125 | 8.000 | |
++-------+--------+------------+--------+-------+--------------+
+|32 |128 | 7.812 |131 | 7.619 | |
++-------+--------+------------+--------+-------+--------------+
+|33 |132 | 7.575 |150 | 6.666 | |
++-------+--------+------------+--------+-------+--------------+
+|34 |136 | 7.352 |150 | 6.666 | |
++-------+--------+------------+--------+-------+--------------+
+|35 |140 | 7.142 |150 | 6.666 | |
++-------+--------+------------+--------+-------+--------------+
+|36 |144 | 6.944 |150 | 6.666 | |
++-------+--------+------------+--------+-------+--------------+
+|37 |148 | 6.756 |150 | 6.666 | |
++-------+--------+------------+--------+-------+--------------+
+|38 |152 | 6.578 |175 | 5.714 | |
++-------+--------+------------+--------+-------+--------------+
+|39 |156 | 6.410 |175 | 5.714 | |
++-------+--------+------------+--------+-------+--------------+
+|40 |160 | 6.250 |175 | 5.714 | |
++-------+--------+------------+--------+-------+--------------+
+|41 |164 | 6.097 |175 | 5.714 | |
++-------+--------+------------+--------+-------+--------------+
+|42 |168 | 5.952 |175 | 5.714 | |
++-------+--------+------------+--------+-------+--------------+
+|43 |172 | 5.813 |175 | 5.714 | |
++-------+--------+------------+--------+-------+--------------+
+|44 |176 | 5.681 |187 | 5.333 | |
++-------+--------+------------+--------+-------+--------------+
+|45 |180 | 5.555 |187 | 5.333 | |
++-------+--------+------------+--------+-------+--------------+
+|46 |184 | 5.434 |187 | 5.333 | |
++-------+--------+------------+--------+-------+--------------+
+|47 |188 | 5.319 |200 | 5.000 | |
++-------+--------+------------+--------+-------+--------------+
+|48 |192 | 5.208 |200 | 5.000 | |
++-------+--------+------------+--------+-------+--------------+
+|49 |196 | 5.102 |200 | 5.000 | |
++-------+--------+------------+--------+-------+--------------+
16.2 Synchronous timings for fast SCSI-2 53C8XX controllers
- ----------------------------------------------
- Negotiated NCR settings
- Factor Period Speed Period Speed
- ------ ------ ------ ------ ------
- 25 100 10.000 100 10.000
- 26 104 9.615 125 8.000
- 27 108 9.259 125 8.000
- 28 112 8.928 125 8.000
- 29 116 8.620 125 8.000
- 30 120 8.333 125 8.000
- 31 124 8.064 125 8.000
- 32 128 7.812 131 7.619
- 33 132 7.575 150 6.666
- 34 136 7.352 150 6.666
- 35 140 7.142 150 6.666
- 36 144 6.944 150 6.666
- 37 148 6.756 150 6.666
- 38 152 6.578 175 5.714
- 39 156 6.410 175 5.714
- 40 160 6.250 175 5.714
- 41 164 6.097 175 5.714
- 42 168 5.952 175 5.714
- 43 172 5.813 175 5.714
- 44 176 5.681 187 5.333
- 45 180 5.555 187 5.333
- 46 184 5.434 187 5.333
- 47 188 5.319 200 5.000
- 48 192 5.208 200 5.000
- 49 196 5.102 200 5.000
-
-
-17. Serial NVRAM (added by Richard Waltham: dormouse@farsrobt.demon.co.uk)
++-----------------------------+----------------+
+|Negotiated |NCR settings |
++-------+--------+------------+--------+-------+
+|Factor |Period |Speed |Period |Speed |
++-------+--------+------------+--------+-------+
+|25 |100 |10.000 |100 |10.000 |
++-------+--------+------------+--------+-------+
+|26 |104 |9.615 |125 | 8.000 |
++-------+--------+------------+--------+-------+
+|27 |108 |9.259 |125 | 8.000 |
++-------+--------+------------+--------+-------+
+|28 |112 |8.928 |125 | 8.000 |
++-------+--------+------------+--------+-------+
+|29 |116 |8.620 |125 | 8.000 |
++-------+--------+------------+--------+-------+
+|30 |120 |8.333 |125 | 8.000 |
++-------+--------+------------+--------+-------+
+|31 |124 |8.064 |125 | 8.000 |
++-------+--------+------------+--------+-------+
+|32 |128 |7.812 |131 | 7.619 |
++-------+--------+------------+--------+-------+
+|33 |132 |7.575 |150 | 6.666 |
++-------+--------+------------+--------+-------+
+|34 |136 |7.352 |150 | 6.666 |
++-------+--------+------------+--------+-------+
+|35 |140 |7.142 |150 | 6.666 |
++-------+--------+------------+--------+-------+
+|36 |144 |6.944 |150 | 6.666 |
++-------+--------+------------+--------+-------+
+|37 |148 |6.756 |150 | 6.666 |
++-------+--------+------------+--------+-------+
+|38 |152 |6.578 |175 | 5.714 |
++-------+--------+------------+--------+-------+
+|39 |156 |6.410 |175 | 5.714 |
++-------+--------+------------+--------+-------+
+|40 |160 |6.250 |175 | 5.714 |
++-------+--------+------------+--------+-------+
+|41 |164 |6.097 |175 | 5.714 |
++-------+--------+------------+--------+-------+
+|42 |168 |5.952 |175 | 5.714 |
++-------+--------+------------+--------+-------+
+|43 |172 |5.813 |175 | 5.714 |
++-------+--------+------------+--------+-------+
+|44 |176 |5.681 |187 | 5.333 |
++-------+--------+------------+--------+-------+
+|45 |180 |5.555 |187 | 5.333 |
++-------+--------+------------+--------+-------+
+|46 |184 |5.434 |187 | 5.333 |
++-------+--------+------------+--------+-------+
+|47 |188 |5.319 |200 | 5.000 |
++-------+--------+------------+--------+-------+
+|48 |192 |5.208 |200 | 5.000 |
++-------+--------+------------+--------+-------+
+|49 |196 |5.102 |200 | 5.000 |
++-------+--------+------------+--------+-------+
+
+
+17. Serial NVRAM
+================
+
+(added by Richard Waltham: dormouse@farsrobt.demon.co.uk)
17.1 Features
+-------------
Enabling serial NVRAM support enables detection of the serial NVRAM included
-on Symbios and some Symbios compatible host adaptors, and Tekram boards. The
-serial NVRAM is used by Symbios and Tekram to hold set up parameters for the
+on Symbios and some Symbios compatible host adaptors, and Tekram boards. The
+serial NVRAM is used by Symbios and Tekram to hold set up parameters for the
host adaptor and its attached drives.
The Symbios NVRAM also holds data on the boot order of host adaptors in a
@@ -1467,10 +1801,10 @@ NVRAM boot order settings can do this as well as change the order the same
types of cards are scanned in, something "reverse probe" cannot do.
Tekram boards using Symbios chips, DC390W/F/U, which have NVRAM are detected
-and this is used to distinguish between Symbios compatible and Tekram host
+and this is used to distinguish between Symbios compatible and Tekram host
adaptors. This is used to disable the Symbios compatible "diff" setting
-incorrectly set on Tekram boards if the CONFIG_SCSI_53C8XX_SYMBIOS_COMPAT
-configuration parameter is set enabling both Symbios and Tekram boards to be
+incorrectly set on Tekram boards if the CONFIG_SCSI_53C8XX_SYMBIOS_COMPAT
+configuration parameter is set enabling both Symbios and Tekram boards to be
used together with the Symbios cards using all their features, including
"diff" support. ("led pin" support for Symbios compatible cards can remain
enabled when using Tekram cards. It does nothing useful for Tekram host
@@ -1478,71 +1812,76 @@ adaptors but does not cause problems either.)
17.2 Symbios NVRAM layout
+-------------------------
+
+typical data at NVRAM address 0x100 (53c810a NVRAM)::
+
+ 00 00
+ 64 01
+ 8e 0b
+
+ 00 30 00 00 00 00 07 00 00 00 00 00 00 00 07 04 10 04 00 00
+
+ 04 00 0f 00 00 10 00 50 00 00 01 00 00 62
+ 04 00 03 00 00 10 00 58 00 00 01 00 00 63
+ 04 00 01 00 00 10 00 48 00 00 01 00 00 61
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+
+ fe fe
+ 00 00
+ 00 00
-typical data at NVRAM address 0x100 (53c810a NVRAM)
------------------------------------------------------------
-00 00
-64 01
-8e 0b
-
-00 30 00 00 00 00 07 00 00 00 00 00 00 00 07 04 10 04 00 00
-
-04 00 0f 00 00 10 00 50 00 00 01 00 00 62
-04 00 03 00 00 10 00 58 00 00 01 00 00 63
-04 00 01 00 00 10 00 48 00 00 01 00 00 61
-00 00 00 00 00 00 00 00 00 00 00 00 00 00
-
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-
-fe fe
-00 00
-00 00
------------------------------------------------------------
NVRAM layout details
-NVRAM Address 0x000-0x0ff not used
- 0x100-0x26f initialised data
- 0x270-0x7ff not used
+============= ================
+NVRAM Address
+============= ================
+0x000-0x0ff not used
+0x100-0x26f initialised data
+0x270-0x7ff not used
+============= ================
-general layout
+general layout::
header - 6 bytes,
data - 356 bytes (checksum is byte sum of this data)
@@ -1550,7 +1889,7 @@ general layout
---
total 368 bytes
-data area layout
+data area layout::
controller set up - 20 bytes
boot configuration - 56 bytes (4x14 bytes)
@@ -1559,121 +1898,126 @@ data area layout
---
total 356 bytes
------------------------------------------------------------
-header
-
-00 00 - ?? start marker
-64 01 - byte count (lsb/msb excludes header/trailer)
-8e 0b - checksum (lsb/msb excludes header/trailer)
------------------------------------------------------------
-controller set up
-
-00 30 00 00 00 00 07 00 00 00 00 00 00 00 07 04 10 04 00 00
- | | | |
- | | | -- host ID
- | | |
- | | --Removable Media Support
- | | 0x00 = none
- | | 0x01 = Bootable Device
- | | 0x02 = All with Media
- | |
- | --flag bits 2
- | 0x00000001= scan order hi->low
- | (default 0x00 - scan low->hi)
- --flag bits 1
- 0x00000001 scam enable
- 0x00000010 parity enable
- 0x00000100 verbose boot msgs
+header::
+
+ 00 00 - ?? start marker
+ 64 01 - byte count (lsb/msb excludes header/trailer)
+ 8e 0b - checksum (lsb/msb excludes header/trailer)
+
+controller set up::
+
+ 00 30 00 00 00 00 07 00 00 00 00 00 00 00 07 04 10 04 00 00
+ | | | |
+ | | | -- host ID
+ | | |
+ | | --Removable Media Support
+ | | 0x00 = none
+ | | 0x01 = Bootable Device
+ | | 0x02 = All with Media
+ | |
+ | --flag bits 2
+ | 0x00000001= scan order hi->low
+ | (default 0x00 - scan low->hi)
+ --flag bits 1
+ 0x00000001 scam enable
+ 0x00000010 parity enable
+ 0x00000100 verbose boot msgs
remaining bytes unknown - they do not appear to change in my
current set up for any of the controllers.
default set up is identical for 53c810a and 53c875 NVRAM
(Removable Media added Symbios BIOS version 4.09)
------------------------------------------------------------
+
boot configuration
-boot order set by order of the devices in this table
+boot order set by order of the devices in this table::
-04 00 0f 00 00 10 00 50 00 00 01 00 00 62 -- 1st controller
-04 00 03 00 00 10 00 58 00 00 01 00 00 63 2nd controller
-04 00 01 00 00 10 00 48 00 00 01 00 00 61 3rd controller
-00 00 00 00 00 00 00 00 00 00 00 00 00 00 4th controller
- | | | | | | | |
- | | | | | | ---- PCI io port adr
- | | | | | --0x01 init/scan at boot time
- | | | | --PCI device/function number (0xdddddfff)
- | | ----- ?? PCI vendor ID (lsb/msb)
- ----PCI device ID (lsb/msb)
+ 04 00 0f 00 00 10 00 50 00 00 01 00 00 62 -- 1st controller
+ 04 00 03 00 00 10 00 58 00 00 01 00 00 63 2nd controller
+ 04 00 01 00 00 10 00 48 00 00 01 00 00 61 3rd controller
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 4th controller
+ | | | | | | | |
+ | | | | | | ---- PCI io port adr
+ | | | | | --0x01 init/scan at boot time
+ | | | | --PCI device/function number (0xdddddfff)
+ | | ----- ?? PCI vendor ID (lsb/msb)
+ ----PCI device ID (lsb/msb)
-?? use of this data is a guess but seems reasonable
+ ?? use of this data is a guess but seems reasonable
remaining bytes unknown - they do not appear to change in my
current set up
default set up is identical for 53c810a and 53c875 NVRAM
------------------------------------------------------------
-device set up (up to 16 devices - includes controller)
-
-0f 00 08 08 64 00 0a 00 - id 0
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00 - id 15
- | | | | | |
- | | | | ----timeout (lsb/msb)
- | | | --synch period (0x?? 40 Mtrans/sec- fast 40) (probably 0x28)
- | | | (0x30 20 Mtrans/sec- fast 20)
- | | | (0x64 10 Mtrans/sec- fast )
- | | | (0xc8 5 Mtrans/sec)
- | | | (0x00 asynchronous)
- | | -- ?? max sync offset (0x08 in NVRAM on 53c810a)
- | | (0x10 in NVRAM on 53c875)
- | --device bus width (0x08 narrow)
- | (0x10 16 bit wide)
- --flag bits
- 0x00000001 - disconnect enabled
- 0x00000010 - scan at boot time
- 0x00000100 - scan luns
- 0x00001000 - queue tags enabled
+--------------------------------------------------------
+
+device set up (up to 16 devices - includes controller)::
+
+ 0f 00 08 08 64 00 0a 00 - id 0
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00 - id 15
+ | | | | | |
+ | | | | ----timeout (lsb/msb)
+ | | | --synch period (0x?? 40 Mtrans/sec- fast 40) (probably 0x28)
+ | | | (0x30 20 Mtrans/sec- fast 20)
+ | | | (0x64 10 Mtrans/sec- fast )
+ | | | (0xc8 5 Mtrans/sec)
+ | | | (0x00 asynchronous)
+ | | -- ?? max sync offset (0x08 in NVRAM on 53c810a)
+ | | (0x10 in NVRAM on 53c875)
+ | --device bus width (0x08 narrow)
+ | (0x10 16 bit wide)
+ --flag bits
+ 0x00000001 - disconnect enabled
+ 0x00000010 - scan at boot time
+ 0x00000100 - scan luns
+ 0x00001000 - queue tags enabled
remaining bytes unknown - they do not appear to change in my
current set up
-?? use of this data is a guess but seems reasonable
+?? use of this data is a guess but seems reasonable
(but it could be max bus width)
default set up for 53c810a NVRAM
-default set up for 53c875 NVRAM - bus width - 0x10
+default set up for 53c875 NVRAM
+
+ - bus width - 0x10
- sync offset ? - 0x10
- sync period - 0x30
------------------------------------------------------------
+
?? spare device space (32 bit bus ??)
-00 00 00 00 00 00 00 00 (19x8bytes)
-.
-.
-00 00 00 00 00 00 00 00
+::
+
+ 00 00 00 00 00 00 00 00 (19x8bytes)
+ .
+ .
+ 00 00 00 00 00 00 00 00
default set up is identical for 53c810a and 53c875 NVRAM
------------------------------------------------------------
-trailer
+--------------------------------------------------------
-fe fe - ? end marker ?
-00 00
-00 00
+trailer::
+
+ fe fe - ? end marker ?
+ 00 00
+ 00 00
default set up is identical for 53c810a and 53c875 NVRAM
-----------------------------------------------------------
@@ -1681,51 +2025,52 @@ default set up is identical for 53c810a and 53c875 NVRAM
17.3 Tekram NVRAM layout
+------------------------
nvram 64x16 (1024 bit)
-Drive settings
-
-Drive ID 0-15 (addr 0x0yyyy0 = device setup, yyyy = ID)
- (addr 0x0yyyy1 = 0x0000)
-
- x x x x x x x x x x x x x x x x
- | | | | | | | | |
- | | | | | | | | ----- parity check 0 - off
- | | | | | | | | 1 - on
- | | | | | | | |
- | | | | | | | ------- sync neg 0 - off
- | | | | | | | 1 - on
- | | | | | | |
- | | | | | | --------- disconnect 0 - off
- | | | | | | 1 - on
- | | | | | |
- | | | | | ----------- start cmd 0 - off
- | | | | | 1 - on
- | | | | |
- | | | | -------------- tagged cmds 0 - off
- | | | | 1 - on
- | | | |
- | | | ---------------- wide neg 0 - off
- | | | 1 - on
- | | |
- --------------------------- sync rate 0 - 10.0 Mtrans/sec
- 1 - 8.0
- 2 - 6.6
- 3 - 5.7
- 4 - 5.0
- 5 - 4.0
- 6 - 3.0
- 7 - 2.0
- 7 - 2.0
- 8 - 20.0
- 9 - 16.7
- a - 13.9
- b - 11.9
+Drive settings::
+
+ Drive ID 0-15 (addr 0x0yyyy0 = device setup, yyyy = ID)
+ (addr 0x0yyyy1 = 0x0000)
+
+ x x x x x x x x x x x x x x x x
+ | | | | | | | | |
+ | | | | | | | | ----- parity check 0 - off
+ | | | | | | | | 1 - on
+ | | | | | | | |
+ | | | | | | | ------- sync neg 0 - off
+ | | | | | | | 1 - on
+ | | | | | | |
+ | | | | | | --------- disconnect 0 - off
+ | | | | | | 1 - on
+ | | | | | |
+ | | | | | ----------- start cmd 0 - off
+ | | | | | 1 - on
+ | | | | |
+ | | | | -------------- tagged cmds 0 - off
+ | | | | 1 - on
+ | | | |
+ | | | ---------------- wide neg 0 - off
+ | | | 1 - on
+ | | |
+ --------------------------- sync rate 0 - 10.0 Mtrans/sec
+ 1 - 8.0
+ 2 - 6.6
+ 3 - 5.7
+ 4 - 5.0
+ 5 - 4.0
+ 6 - 3.0
+ 7 - 2.0
+ 7 - 2.0
+ 8 - 20.0
+ 9 - 16.7
+ a - 13.9
+ b - 11.9
Global settings
-Host flags 0 (addr 0x100000, 32)
+Host flags 0 (addr 0x100000, 32)::
x x x x x x x x x x x x x x x x
| | | | | | | | | | | |
@@ -1733,7 +2078,7 @@ Host flags 0 (addr 0x100000, 32)
| | | | | | | |
| | | | | | | ----------------------- support for 0 - off
| | | | | | | > 2 drives 1 - on
- | | | | | | |
+ | | | | | | |
| | | | | | ------------------------- support drives 0 - off
| | | | | | > 1Gbytes 1 - on
| | | | | |
@@ -1753,10 +2098,10 @@ Host flags 0 (addr 0x100000, 32)
as BIOS dev 1 - boot device
2 - all
-Host flags 1 (addr 0x100001, 33)
+Host flags 1 (addr 0x100001, 33)::
x x x x x x x x x x x x x x x x
- | | | | | |
+ | | | | | |
| | | --------- boot delay 0 - 3 sec
| | | 1 - 5
| | | 2 - 10
@@ -1771,7 +2116,7 @@ Host flags 1 (addr 0x100001, 33)
3 - 16
4 - 32
-Host flags 2 (addr 0x100010, 34)
+Host flags 2 (addr 0x100010, 34)::
x x x x x x x x x x x x x x x x
|
@@ -1784,41 +2129,41 @@ checksum = 0x1234 - (sum addr 0-63)
----------------------------------------------------------------------------
-default nvram data:
+default nvram data::
-0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
+ 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
+ 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
+ 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
+ 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-0x0f07 0x0400 0x0001 0x0000 0x0000 0x0000 0x0000 0x0000
-0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000
-0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000
-0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0xfbbc
+ 0x0f07 0x0400 0x0001 0x0000 0x0000 0x0000 0x0000 0x0000
+ 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000
+ 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000
+ 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0xfbbc
18. Support for Big Endian
+==========================
The PCI local bus has been primarily designed for x86 architecture.
-As a consequence, PCI devices generally expect DWORDS using little endian
+As a consequence, PCI devices generally expect DWORDS using little endian
byte ordering.
18.1 Big Endian CPU
+-------------------
-In order to support NCR chips on a Big Endian architecture the driver has to
-perform byte reordering each time it is needed. This feature has been
-added to the driver by Cort <cort@cs.nmt.edu> and is available in driver
-version 2.5 and later ones. For the moment Big Endian support has only
+In order to support NCR chips on a Big Endian architecture the driver has to
+perform byte reordering each time it is needed. This feature has been
+added to the driver by Cort <cort@cs.nmt.edu> and is available in driver
+version 2.5 and later ones. For the moment Big Endian support has only
been tested on Linux/PPC (PowerPC).
18.2 NCR chip in Big Endian mode of operations
+----------------------------------------------
-It can be read in SYMBIOS documentation that some chips support a special
+It can be read in SYMBIOS documentation that some chips support a special
Big Endian mode, on paper: 53C815, 53C825A, 53C875, 53C875N, 53C895.
-This mode of operations is not software-selectable, but needs pin named
-BigLit to be pulled-up. Using this mode, most of byte reorderings should
+This mode of operations is not software-selectable, but needs pin named
+BigLit to be pulled-up. Using this mode, most of byte reorderings should
be avoided when the driver is running on a Big Endian CPU.
Driver version 2.5 is also, in theory, ready for this feature.
-
-===============================================================================
-End of NCR53C8XX driver README file
diff --git a/Documentation/scsi/ppa.rst b/Documentation/scsi/ppa.rst
new file mode 100644
index 000000000000..5fe3859a6892
--- /dev/null
+++ b/Documentation/scsi/ppa.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
+Terse where to get ZIP Drive help info
+======================================
+
+General Iomega ZIP drive page for Linux:
+http://web.archive.org/web/%2E/http://www.torque.net/~campbell/
+
+Driver archive for old drivers:
+http://web.archive.org/web/%2E/http://www.torque.net/~campbell/ppa
+
+Linux Parport page (parallel port)
+http://web.archive.org/web/%2E/http://www.torque.net/parport/
+
+Email list for Linux Parport
+linux-parport@torque.net
+
diff --git a/Documentation/scsi/ppa.txt b/Documentation/scsi/ppa.txt
deleted file mode 100644
index 05ff47dbe8d1..000000000000
--- a/Documentation/scsi/ppa.txt
+++ /dev/null
@@ -1,14 +0,0 @@
--------- Terse where to get ZIP Drive help info --------
-
-General Iomega ZIP drive page for Linux:
-http://web.archive.org/web/*/http://www.torque.net/~campbell/
-
-Driver archive for old drivers:
-http://web.archive.org/web/*/http://www.torque.net/~campbell/ppa
-
-Linux Parport page (parallel port)
-http://web.archive.org/web/*/http://www.torque.net/parport/
-
-Email list for Linux Parport
-linux-parport@torque.net
-
diff --git a/Documentation/scsi/qlogicfas.txt b/Documentation/scsi/qlogicfas.rst
index c211d827fef2..b17f1b3676c3 100644
--- a/Documentation/scsi/qlogicfas.txt
+++ b/Documentation/scsi/qlogicfas.rst
@@ -1,3 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================
+Qlogic FASXXX Family Driver Notes
+=================================
This driver supports the Qlogic FASXXX family of chips. This driver
only works with the ISA, VLB, and PCMCIA versions of the Qlogic
@@ -16,7 +21,8 @@ is provided by the qla1280 driver.
Nor does it support the PCI-Basic, which is supported by the
'am53c974' driver.
-PCMCIA SUPPORT
+PCMCIA Support
+==============
This currently only works if the card is enabled first from DOS. This
means you will have to load your socket and card services, and
@@ -31,7 +37,8 @@ it from configuring the card.
I am working with the PCMCIA group to make it more flexible, but that
may take a while.
-ALL CARDS
+All Cards
+=========
The top of the qlogic.c file has a number of defines that controls
configuration. As shipped, it provides a balance between speed and
@@ -46,7 +53,8 @@ command or something. It comes up faster if this is set to zero, and
if you have reliable hardware and connections it may be more useful to
not reset things.
-SOME TROUBLESHOOTING TIPS
+Some Troubleshooting Tips
+=========================
Make sure it works properly under DOS. You should also do an initial FDISK
on a new drive if you want partitions.
@@ -54,7 +62,8 @@ on a new drive if you want partitions.
Don't enable all the speedups first. If anything is wrong, they will make
any problem worse.
-IMPORTANT
+Important
+=========
The best way to test if your cables, termination, etc. are good is to
copy a very big file (e.g. a doublespace container file, or a very
diff --git a/Documentation/scsi/scsi-changer.txt b/Documentation/scsi/scsi-changer.rst
index ade046ea7c17..ab60e7e61a6c 100644
--- a/Documentation/scsi/scsi-changer.txt
+++ b/Documentation/scsi/scsi-changer.rst
@@ -1,4 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+========================================
README for the SCSI media changer driver
========================================
@@ -28,15 +30,17 @@ The SCSI changer model is complex, compared to - for example - IDE-CD
changers. But it allows to handle nearly all possible cases. It knows
4 different types of changer elements:
- media transport - this one shuffles around the media, i.e. the
+ =============== ==================================================
+ media transport this one shuffles around the media, i.e. the
transport arm. Also known as "picker".
- storage - a slot which can hold a media.
- import/export - the same as above, but is accessible from outside,
+ storage a slot which can hold a media.
+ import/export the same as above, but is accessible from outside,
i.e. there the operator (you !) can use this to
fill in and remove media from the changer.
Sometimes named "mailslot".
- data transfer - this is the device which reads/writes, i.e. the
+ data transfer this is the device which reads/writes, i.e. the
CD-ROM / Tape / whatever drive.
+ =============== ==================================================
None of these is limited to one: A huge Jukebox could have slots for
123 CD-ROM's, 5 CD-ROM readers (and therefore 6 SCSI ID's: the changer
@@ -131,24 +135,23 @@ timeout_init=<seconds>
timeout_move=<seconds>
timeout for all other commands (default: 120).
-dt_id=<id1>,<id2>,...
-dt_lun=<lun1>,<lun2>,...
+dt_id=<id1>,<id2>,... / dt_lun=<lun1>,<lun2>,...
These two allow to specify the SCSI ID and LUN for the data
transfer elements. You likely don't need this as the jukebox
should provide this information. But some devices don't ...
-vendor_firsts=
-vendor_counts=
-vendor_labels=
+vendor_firsts=, vendor_counts=, vendor_labels=
These insmod options can be used to tell the driver that there
are some vendor-specific element types. Grundig for example
does this. Some jukeboxes have a printer to label fresh burned
CDs, which is addressed as element 0xc000 (type 5). To tell the
- driver about this vendor-specific element, use this:
+ driver about this vendor-specific element, use this::
+
$ insmod ch \
vendor_firsts=0xc000 \
vendor_counts=1 \
vendor_labels=printer
+
All three insmod options accept up to four comma-separated
values, this way you can configure the element types 5-8.
You likely need the SCSI specs for the device in question to
@@ -162,13 +165,15 @@ Credits
I wrote this driver using the famous mailing-patches-around-the-world
method. With (more or less) help from:
- Daniel Moehwald <moehwald@hdg.de>
- Dane Jasper <dane@sonic.net>
- R. Scott Bailey <sbailey@dsddi.eds.com>
- Jonathan Corbet <corbet@lwn.net>
+ - Daniel Moehwald <moehwald@hdg.de>
+ - Dane Jasper <dane@sonic.net>
+ - R. Scott Bailey <sbailey@dsddi.eds.com>
+ - Jonathan Corbet <corbet@lwn.net>
Special thanks go to
- Martin Kuehne <martin.kuehne@bnbt.de>
+
+ - Martin Kuehne <martin.kuehne@bnbt.de>
+
for a old, second-hand (but full functional) cdrom jukebox which I use
to develop/test driver and tools now.
@@ -176,5 +181,4 @@ Have fun,
Gerd
---
Gerd Knorr <kraxel@bytesex.org>
diff --git a/Documentation/scsi/scsi-generic.txt b/Documentation/scsi/scsi-generic.rst
index 51be20a6a14d..258505e557a6 100644
--- a/Documentation/scsi/scsi-generic.txt
+++ b/Documentation/scsi/scsi-generic.rst
@@ -1,6 +1,11 @@
- Notes on Linux SCSI Generic (sg) driver
- ---------------------------------------
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+Notes on Linux SCSI Generic (sg) driver
+=======================================
+
20020126
+
Introduction
============
The SCSI Generic driver (sg) is one of the four "high level" SCSI device
@@ -18,7 +23,7 @@ and examples.
Major versions of the sg driver
===============================
There are three major versions of sg found in the linux kernel (lk):
- - sg version 1 (original) from 1992 to early 1999 (lk 2.2.5) .
+ - sg version 1 (original) from 1992 to early 1999 (lk 2.2.5) .
It is based in the sg_header interface structure.
- sg version 2 from lk 2.2.6 in the 2.2 series. It is based on
an extended version of the sg_header interface structure.
@@ -29,12 +34,16 @@ There are three major versions of sg found in the linux kernel (lk):
Sg driver documentation
=======================
The most recent documentation of the sg driver is kept at the Linux
-Documentation Project's (LDP) site:
-http://www.tldp.org/HOWTO/SCSI-Generic-HOWTO
+Documentation Project's (LDP) site:
+
+- http://www.tldp.org/HOWTO/SCSI-Generic-HOWTO
+
This describes the sg version 3 driver found in the lk 2.4 series.
+
The LDP renders documents in single and multiple page HTML, postscript
and pdf. This document can also be found at:
-http://sg.danny.cz/sg/p/sg_v3_ho.html
+
+- http://sg.danny.cz/sg/p/sg_v3_ho.html
Documentation for the version 2 sg driver found in the lk 2.2 series can
be found at http://sg.danny.cz/sg/. A larger version
@@ -45,23 +54,27 @@ found at http://www.torque.net/sg/p/original/SCSI-Programming-HOWTO.txt
and in the LDP archives.
A changelog with brief notes can be found in the
-/usr/src/linux/include/scsi/sg.h file. Note that the glibc maintainers copy
-and edit this file (removing its changelog for example) before placing it
-in /usr/include/scsi/sg.h . Driver debugging information and other notes
+/usr/src/linux/include/scsi/sg.h file. Note that the glibc maintainers copy
+and edit this file (removing its changelog for example) before placing it
+in /usr/include/scsi/sg.h . Driver debugging information and other notes
can be found at the top of the /usr/src/linux/drivers/scsi/sg.c file.
-A more general description of the Linux SCSI subsystem of which sg is a
+A more general description of the Linux SCSI subsystem of which sg is a
part can be found at http://www.tldp.org/HOWTO/SCSI-2.4-HOWTO .
Example code and utilities
==========================
There are two packages of sg utilities:
- - sg3_utils for the sg version 3 driver found in lk 2.4
- - sg_utils for the sg version 2 (and original) driver found in lk 2.2
+
+ ========= ==========================================================
+ sg3_utils for the sg version 3 driver found in lk 2.4
+ sg_utils for the sg version 2 (and original) driver found in lk 2.2
and earlier
+ ========= ==========================================================
+
Both packages will work in the lk 2.4 series however sg3_utils offers more
-capabilities. They can be found at: http://sg.danny.cz/sg/sg3_utils.html and
+capabilities. They can be found at: http://sg.danny.cz/sg/sg3_utils.html and
freecode.com
Another approach is to look at the applications that use the sg driver.
@@ -72,30 +85,34 @@ Mapping of Linux kernel versions to sg driver versions
======================================================
Here is a list of linux kernels in the 2.4 series that had new version
of the sg driver:
- lk 2.4.0 : sg version 3.1.17
- lk 2.4.7 : sg version 3.1.19
- lk 2.4.10 : sg version 3.1.20 **
- lk 2.4.17 : sg version 3.1.22
-** There were 3 changes to sg version 3.1.20 by third parties in the
- next six linux kernel versions.
+ - lk 2.4.0 : sg version 3.1.17
+ - lk 2.4.7 : sg version 3.1.19
+ - lk 2.4.10 : sg version 3.1.20 [#]_
+ - lk 2.4.17 : sg version 3.1.22
+
+.. [#] There were 3 changes to sg version 3.1.20 by third parties in the
+ next six linux kernel versions.
-For reference here is a list of linux kernels in the 2.2 series that had
+For reference here is a list of linux kernels in the 2.2 series that had
new version of the sg driver:
- lk 2.2.0 : original sg version [with no version number]
- lk 2.2.6 : sg version 2.1.31
- lk 2.2.8 : sg version 2.1.32
- lk 2.2.10 : sg version 2.1.34 [SG_GET_VERSION_NUM ioctl first appeared]
- lk 2.2.14 : sg version 2.1.36
- lk 2.2.16 : sg version 2.1.38
- lk 2.2.17 : sg version 2.1.39
- lk 2.2.20 : sg version 2.1.40
+
+ - lk 2.2.0 : original sg version [with no version number]
+ - lk 2.2.6 : sg version 2.1.31
+ - lk 2.2.8 : sg version 2.1.32
+ - lk 2.2.10 : sg version 2.1.34 [SG_GET_VERSION_NUM ioctl first appeared]
+ - lk 2.2.14 : sg version 2.1.36
+ - lk 2.2.16 : sg version 2.1.38
+ - lk 2.2.17 : sg version 2.1.39
+ - lk 2.2.20 : sg version 2.1.40
The lk 2.5 development series has recently commenced and it currently
contains sg version 3.5.23 which is functionally equivalent to sg
-version 3.1.22 found in lk 2.4.17 .
+version 3.1.22 found in lk 2.4.17.
Douglas Gilbert
+
26th January 2002
+
dgilbert@interlog.com
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.rst
index 25a4b4cf04a6..9aba897c97ac 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.rst
@@ -1,31 +1,35 @@
- SCSI Kernel Parameters
- ~~~~~~~~~~~~~~~~~~~~~~
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+SCSI Kernel Parameters
+======================
See Documentation/admin-guide/kernel-parameters.rst for general information on
specifying module parameters.
This document may not be entirely up to date and comprehensive. The command
-"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
+``modinfo -p ${modulename}`` shows a current list of all parameters of a loadable
module. Loadable modules, after being loaded into the running kernel, also
reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
parameters may be changed at runtime by the command
-"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
+``echo -n ${value} > /sys/module/${modulename}/parameters/${parm}``.
+::
advansys= [HW,SCSI]
See header of drivers/scsi/advansys.c.
aha152x= [HW,SCSI]
- See Documentation/scsi/aha152x.txt.
+ See Documentation/scsi/aha152x.rst.
aha1542= [HW,SCSI]
Format: <portbase>[,<buson>,<busoff>[,<dmaspeed>]]
aic7xxx= [HW,SCSI]
- See Documentation/scsi/aic7xxx.txt.
+ See Documentation/scsi/aic7xxx.rst.
aic79xx= [HW,SCSI]
- See Documentation/scsi/aic79xx.txt.
+ See Documentation/scsi/aic79xx.rst.
atascsi= [HW,SCSI]
See drivers/scsi/atari_scsi.c.
@@ -57,19 +61,19 @@ parameters may be changed at runtime by the command
See header of drivers/scsi/NCR_D700.c.
ncr5380= [HW,SCSI]
- See Documentation/scsi/g_NCR5380.txt.
+ See Documentation/scsi/g_NCR5380.rst.
ncr53c400= [HW,SCSI]
- See Documentation/scsi/g_NCR5380.txt.
+ See Documentation/scsi/g_NCR5380.rst.
ncr53c400a= [HW,SCSI]
- See Documentation/scsi/g_NCR5380.txt.
+ See Documentation/scsi/g_NCR5380.rst.
ncr53c8xx= [HW,SCSI]
osst= [HW,SCSI] SCSI Tape Driver
Format: <buffer_size>,<write_threshold>
- See also Documentation/scsi/st.txt.
+ See also Documentation/scsi/st.rst.
scsi_debug_*= [SCSI]
See drivers/scsi/scsi_debug.c.
@@ -101,7 +105,7 @@ parameters may be changed at runtime by the command
See header of drivers/scsi/sim710.c.
st= [HW,SCSI] SCSI tape parameters (buffers, etc.)
- See Documentation/scsi/st.txt.
+ See Documentation/scsi/st.rst.
wd33c93= [HW,SCSI]
See header of drivers/scsi/wd33c93.c.
diff --git a/Documentation/scsi/scsi.txt b/Documentation/scsi/scsi.rst
index 3d99d38cb62a..276918eb4d74 100644
--- a/Documentation/scsi/scsi.txt
+++ b/Documentation/scsi/scsi.rst
@@ -1,44 +1,47 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
SCSI subsystem documentation
============================
+
The Linux Documentation Project (LDP) maintains a document describing
the SCSI subsystem in the Linux kernel (lk) 2.4 series. See:
http://www.tldp.org/HOWTO/SCSI-2.4-HOWTO . The LDP has single
and multiple page HTML renderings as well as postscript and pdf.
It can also be found at:
-http://web.archive.org/web/*/http://www.torque.net/scsi/SCSI-2.4-HOWTO
+http://web.archive.org/web/%2E/http://www.torque.net/scsi/SCSI-2.4-HOWTO
Notes on using modules in the SCSI subsystem
============================================
-The scsi support in the linux kernel can be modularized in a number of
+The scsi support in the linux kernel can be modularized in a number of
different ways depending upon the needs of the end user. To understand
your options, we should first define a few terms.
-The scsi-core (also known as the "mid level") contains the core of scsi
+The scsi-core (also known as the "mid level") contains the core of scsi
support. Without it you can do nothing with any of the other scsi drivers.
The scsi core support can be a module (scsi_mod.o), or it can be built into
-the kernel. If the core is a module, it must be the first scsi module
-loaded, and if you unload the modules, it will have to be the last one
+the kernel. If the core is a module, it must be the first scsi module
+loaded, and if you unload the modules, it will have to be the last one
unloaded. In practice the modprobe and rmmod commands (and "autoclean")
will enforce the correct ordering of loading and unloading modules in
the SCSI subsystem.
-The individual upper and lower level drivers can be loaded in any order
+The individual upper and lower level drivers can be loaded in any order
once the scsi core is present in the kernel (either compiled in or loaded
as a module). The disk driver (sd_mod.o), cdrom driver (sr_mod.o),
-tape driver ** (st.o) and scsi generics driver (sg.o) represent the upper
-level drivers to support the various assorted devices which can be
-controlled. You can for example load the tape driver to use the tape drive,
+tape driver [1]_ (st.o) and scsi generics driver (sg.o) represent the upper
+level drivers to support the various assorted devices which can be
+controlled. You can for example load the tape driver to use the tape drive,
and then unload it once you have no further need for the driver (and release
the associated memory).
The lower level drivers are the ones that support the individual cards that
are supported for the hardware platform that you are running under. Those
individual cards are often called Host Bus Adapters (HBAs). For example the
-aic7xxx.o driver is used to control all recent SCSI controller cards from
-Adaptec. Almost all lower level drivers can be built either as modules or
+aic7xxx.o driver is used to control all recent SCSI controller cards from
+Adaptec. Almost all lower level drivers can be built either as modules or
built into the kernel.
-
-** There is a variant of the st driver for controlling OnStream tape
- devices. Its module name is osst.o .
+.. [1] There is a variant of the st driver for controlling OnStream tape
+ devices. Its module name is osst.o .
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.rst
index 1b7436932a2b..7d78c2475615 100644
--- a/Documentation/scsi/scsi_eh.txt
+++ b/Documentation/scsi/scsi_eh.rst
@@ -1,35 +1,39 @@
+.. SPDX-License-Identifier: GPL-2.0
+=======
SCSI EH
-======================================
+=======
- This document describes SCSI midlayer error handling infrastructure.
-Please refer to Documentation/scsi/scsi_mid_low_api.txt for more
+This document describes SCSI midlayer error handling infrastructure.
+Please refer to Documentation/scsi/scsi_mid_low_api.rst for more
information regarding SCSI midlayer.
-TABLE OF CONTENTS
+.. TABLE OF CONTENTS
-[1] How SCSI commands travel through the midlayer and to EH
- [1-1] struct scsi_cmnd
- [1-2] How do scmd's get completed?
- [1-2-1] Completing a scmd w/ scsi_done
- [1-2-2] Completing a scmd w/ timeout
- [1-3] How EH takes over
-[2] How SCSI EH works
- [2-1] EH through fine-grained callbacks
- [2-1-1] Overview
- [2-1-2] Flow of scmds through EH
- [2-1-3] Flow of control
- [2-2] EH through transportt->eh_strategy_handler()
- [2-2-1] Pre transportt->eh_strategy_handler() SCSI midlayer conditions
- [2-2-2] Post transportt->eh_strategy_handler() SCSI midlayer conditions
- [2-2-3] Things to consider
+ [1] How SCSI commands travel through the midlayer and to EH
+ [1-1] struct scsi_cmnd
+ [1-2] How do scmd's get completed?
+ [1-2-1] Completing a scmd w/ scsi_done
+ [1-2-2] Completing a scmd w/ timeout
+ [1-3] How EH takes over
+ [2] How SCSI EH works
+ [2-1] EH through fine-grained callbacks
+ [2-1-1] Overview
+ [2-1-2] Flow of scmds through EH
+ [2-1-3] Flow of control
+ [2-2] EH through transportt->eh_strategy_handler()
+ [2-2-1] Pre transportt->eh_strategy_handler() SCSI midlayer conditions
+ [2-2-2] Post transportt->eh_strategy_handler() SCSI midlayer conditions
+ [2-2-3] Things to consider
-[1] How SCSI commands travel through the midlayer and to EH
+1. How SCSI commands travel through the midlayer and to EH
+==========================================================
-[1-1] struct scsi_cmnd
+1.1 struct scsi_cmnd
+--------------------
- Each SCSI command is represented with struct scsi_cmnd (== scmd). A
+Each SCSI command is represented with struct scsi_cmnd (== scmd). A
scmd has two list_head's to link itself into lists. The two are
scmd->list and scmd->eh_entry. The former is used for free list or
per-device allocated scmd list and not of much interest to this EH
@@ -38,25 +42,28 @@ otherwise stated scmds are always linked using scmd->eh_entry in this
discussion.
-[1-2] How do scmd's get completed?
+1.2 How do scmd's get completed?
+--------------------------------
- Once LLDD gets hold of a scmd, either the LLDD will complete the
+Once LLDD gets hold of a scmd, either the LLDD will complete the
command by calling scsi_done callback passed from midlayer when
invoking hostt->queuecommand() or the block layer will time it out.
-[1-2-1] Completing a scmd w/ scsi_done
+1.2.1 Completing a scmd w/ scsi_done
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- For all non-EH commands, scsi_done() is the completion callback. It
+For all non-EH commands, scsi_done() is the completion callback. It
just calls blk_complete_request() to delete the block layer timer and
raise SCSI_SOFTIRQ
- SCSI_SOFTIRQ handler scsi_softirq calls scsi_decide_disposition() to
+SCSI_SOFTIRQ handler scsi_softirq calls scsi_decide_disposition() to
determine what to do with the command. scsi_decide_disposition()
looks at the scmd->result value and sense data to determine what to do
with the command.
- SUCCESS
+
scsi_finish_command() is invoked for the command. The
function does some maintenance chores and then calls
scsi_io_completion() to finish the I/O.
@@ -66,17 +73,21 @@ with the command.
of the data in case of an error.
- NEEDS_RETRY
+
- ADD_TO_MLQUEUE
+
scmd is requeued to blk queue.
- otherwise
+
scsi_eh_scmd_add(scmd) is invoked for the command. See
[1-3] for details of this function.
-[1-2-2] Completing a scmd w/ timeout
+1.2.2 Completing a scmd w/ timeout
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- The timeout handler is scsi_times_out(). When a timeout occurs, this
+The timeout handler is scsi_times_out(). When a timeout occurs, this
function
1. invokes optional hostt->eh_timed_out() callback. Return value can
@@ -101,18 +112,21 @@ function
3. scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD) is invoked for the
command. See [1-4] for more information.
-[1-3] Asynchronous command aborts
+1.3 Asynchronous command aborts
+-------------------------------
After a timeout occurs a command abort is scheduled from
scsi_abort_command(). If the abort is successful the command
will either be retried (if the number of retries is not exhausted)
or terminated with DID_TIME_OUT.
+
Otherwise scsi_eh_scmd_add() is invoked for the command.
See [1-4] for more information.
-[1-4] How EH takes over
+1.4 How EH takes over
+---------------------
- scmds enter EH via scsi_eh_scmd_add(), which does the following.
+scmds enter EH via scsi_eh_scmd_add(), which does the following.
1. Links scmd->eh_entry to shost->eh_cmd_q
@@ -122,19 +136,19 @@ function
4. Wakes up SCSI EH thread if shost->host_busy == shost->host_failed
- As can be seen above, once any scmd is added to shost->eh_cmd_q,
+As can be seen above, once any scmd is added to shost->eh_cmd_q,
SHOST_RECOVERY shost_state bit is turned on. This prevents any new
scmd to be issued from blk queue to the host; eventually, all scmds on
the host either complete normally, fail and get added to eh_cmd_q, or
time out and get added to shost->eh_cmd_q.
- If all scmds either complete or fail, the number of in-flight scmds
+If all scmds either complete or fail, the number of in-flight scmds
becomes equal to the number of failed scmds - i.e. shost->host_busy ==
shost->host_failed. This wakes up SCSI EH thread. So, once woken up,
SCSI EH thread can expect that all in-flight commands have failed and
are linked on shost->eh_cmd_q.
- Note that this does not mean lower layers are quiescent. If a LLDD
+Note that this does not mean lower layers are quiescent. If a LLDD
completed a scmd with error status, the LLDD and lower layers are
assumed to forget about the scmd at that point. However, if a scmd
has timed out, unless hostt->eh_timed_out() made lower layers forget
@@ -143,13 +157,14 @@ active as long as lower layers are concerned and completion could
occur at any time. Of course, all such completions are ignored as the
timer has already expired.
- We'll talk about how SCSI EH takes actions to abort - make LLDD
+We'll talk about how SCSI EH takes actions to abort - make LLDD
forget about - timed out scmds later.
-[2] How SCSI EH works
+2. How SCSI EH works
+====================
- LLDD's can implement SCSI EH actions in one of the following two
+LLDD's can implement SCSI EH actions in one of the following two
ways.
- Fine-grained EH callbacks
@@ -162,7 +177,7 @@ ways.
handling. As such, it should do all chores the SCSI midlayer
performs during recovery. This will be discussed in [2-2].
- Once recovery is complete, SCSI EH resumes normal operation by
+Once recovery is complete, SCSI EH resumes normal operation by
calling scsi_restart_operations(), which
1. Checks if door locking is needed and locks door.
@@ -177,34 +192,38 @@ calling scsi_restart_operations(), which
4. Kicks queues in all devices on the host in the asses
-[2-1] EH through fine-grained callbacks
+2.1 EH through fine-grained callbacks
+-------------------------------------
-[2-1-1] Overview
+2.1.1 Overview
+^^^^^^^^^^^^^^
- If eh_strategy_handler() is not present, SCSI midlayer takes charge
+If eh_strategy_handler() is not present, SCSI midlayer takes charge
of driving error handling. EH's goals are two - make LLDD, host and
device forget about timed out scmds and make them ready for new
commands. A scmd is said to be recovered if the scmd is forgotten by
lower layers and lower layers are ready to process or fail the scmd
again.
- To achieve these goals, EH performs recovery actions with increasing
+To achieve these goals, EH performs recovery actions with increasing
severity. Some actions are performed by issuing SCSI commands and
others are performed by invoking one of the following fine-grained
hostt EH callbacks. Callbacks may be omitted and omitted ones are
considered to fail always.
-int (* eh_abort_handler)(struct scsi_cmnd *);
-int (* eh_device_reset_handler)(struct scsi_cmnd *);
-int (* eh_bus_reset_handler)(struct scsi_cmnd *);
-int (* eh_host_reset_handler)(struct scsi_cmnd *);
+::
- Higher-severity actions are taken only when lower-severity actions
+ int (* eh_abort_handler)(struct scsi_cmnd *);
+ int (* eh_device_reset_handler)(struct scsi_cmnd *);
+ int (* eh_bus_reset_handler)(struct scsi_cmnd *);
+ int (* eh_host_reset_handler)(struct scsi_cmnd *);
+
+Higher-severity actions are taken only when lower-severity actions
cannot recover some of failed scmds. Also, note that failure of the
highest-severity action means EH failure and results in offlining of
all unrecovered devices.
- During recovery, the following rules are followed
+During recovery, the following rules are followed
- Recovery actions are performed on failed scmds on the to do list,
eh_work_q. If a recovery action succeeds for a scmd, recovered
@@ -221,58 +240,72 @@ all unrecovered devices.
timed-out scmds, SCSI EH ensures that LLDD forgets about a scmd
before reusing it for EH commands.
- When a scmd is recovered, the scmd is moved from eh_work_q to EH
+When a scmd is recovered, the scmd is moved from eh_work_q to EH
local eh_done_q using scsi_eh_finish_cmd(). After all scmds are
recovered (eh_work_q is empty), scsi_eh_flush_done_q() is invoked to
either retry or error-finish (notify upper layer of failure) recovered
scmds.
- scmds are retried iff its sdev is still online (not offlined during
+scmds are retried iff its sdev is still online (not offlined during
EH), REQ_FAILFAST is not set and ++scmd->retries is less than
scmd->allowed.
-[2-1-2] Flow of scmds through EH
+2.1.2 Flow of scmds through EH
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. Error completion / time out
- ACTION: scsi_eh_scmd_add() is invoked for scmd
+
+ :ACTION: scsi_eh_scmd_add() is invoked for scmd
+
- add scmd to shost->eh_cmd_q
- set SHOST_RECOVERY
- shost->host_failed++
- LOCKING: shost->host_lock
+
+ :LOCKING: shost->host_lock
2. EH starts
- ACTION: move all scmds to EH's local eh_work_q. shost->eh_cmd_q
- is cleared.
- LOCKING: shost->host_lock (not strictly necessary, just for
+
+ :ACTION: move all scmds to EH's local eh_work_q. shost->eh_cmd_q
+ is cleared.
+
+ :LOCKING: shost->host_lock (not strictly necessary, just for
consistency)
3. scmd recovered
- ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
+
+ :ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
+
- scsi_setup_cmd_retry()
- move from local eh_work_q to local eh_done_q
- LOCKING: none
- CONCURRENCY: at most one thread per separate eh_work_q to
- keep queue manipulation lockless
+
+ :LOCKING: none
+
+ :CONCURRENCY: at most one thread per separate eh_work_q to
+ keep queue manipulation lockless
4. EH completes
- ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
- layer of failure. May be called concurrently but must have
- a no more than one thread per separate eh_work_q to
- manipulate the queue locklessly
- - scmd is removed from eh_done_q and scmd->eh_entry is cleared
- - if retry is necessary, scmd is requeued using
- scsi_queue_insert()
- - otherwise, scsi_finish_command() is invoked for scmd
- - zero shost->host_failed
- LOCKING: queue or finish function performs appropriate locking
+ :ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
+ layer of failure. May be called concurrently but must have
+ a no more than one thread per separate eh_work_q to
+ manipulate the queue locklessly
-[2-1-3] Flow of control
+ - scmd is removed from eh_done_q and scmd->eh_entry is cleared
+ - if retry is necessary, scmd is requeued using
+ scsi_queue_insert()
+ - otherwise, scsi_finish_command() is invoked for scmd
+ - zero shost->host_failed
+
+ :LOCKING: queue or finish function performs appropriate locking
+
+
+2.1.3 Flow of control
+^^^^^^^^^^^^^^^^^^^^^^
EH through fine-grained callbacks start from scsi_unjam_host().
-<<scsi_unjam_host>>
+``scsi_unjam_host``
1. Lock shost->host_lock, splice_init shost->eh_cmd_q into local
eh_work_q and unlock host_lock. Note that shost->eh_cmd_q is
@@ -280,7 +313,7 @@ scmd->allowed.
2. Invoke scsi_eh_get_sense.
- <<scsi_eh_get_sense>>
+ ``scsi_eh_get_sense``
This action is taken for each error-completed
(!SCSI_EH_CANCEL_CMD) commands without valid sense data. Most
@@ -315,7 +348,7 @@ scmd->allowed.
3. If !list_empty(&eh_work_q), invoke scsi_eh_abort_cmds().
- <<scsi_eh_abort_cmds>>
+ ``scsi_eh_abort_cmds``
This action is taken for each timed out command when
no_async_abort is enabled in the host template.
@@ -339,14 +372,14 @@ scmd->allowed.
4. If !list_empty(&eh_work_q), invoke scsi_eh_ready_devs()
- <<scsi_eh_ready_devs>>
+ ``scsi_eh_ready_devs``
This function takes four increasingly more severe measures to
make failed sdevs ready for new commands.
1. Invoke scsi_eh_stu()
- <<scsi_eh_stu>>
+ ``scsi_eh_stu``
For each sdev which has failed scmds with valid sense data
of which scsi_check_sense()'s verdict is FAILED,
@@ -369,7 +402,7 @@ scmd->allowed.
2. If !list_empty(&eh_work_q), invoke scsi_eh_bus_device_reset().
- <<scsi_eh_bus_device_reset>>
+ ``scsi_eh_bus_device_reset``
This action is very similar to scsi_eh_stu() except that,
instead of issuing STU, hostt->eh_device_reset_handler()
@@ -379,7 +412,7 @@ scmd->allowed.
3. If !list_empty(&eh_work_q), invoke scsi_eh_bus_reset()
- <<scsi_eh_bus_reset>>
+ ``scsi_eh_bus_reset``
hostt->eh_bus_reset_handler() is invoked for each channel
with failed scmds. If bus reset succeeds, all failed
@@ -388,7 +421,7 @@ scmd->allowed.
4. If !list_empty(&eh_work_q), invoke scsi_eh_host_reset()
- <<scsi_eh_host_reset>>
+ ``scsi_eh_host_reset``
This is the last resort. hostt->eh_host_reset_handler()
is invoked. If host reset succeeds, all failed scmds on
@@ -396,14 +429,14 @@ scmd->allowed.
5. If !list_empty(&eh_work_q), invoke scsi_eh_offline_sdevs()
- <<scsi_eh_offline_sdevs>>
+ ``scsi_eh_offline_sdevs``
Take all sdevs which still have unrecovered scmds offline
and EH-finish the scmds.
5. Invoke scsi_eh_flush_done_q().
- <<scsi_eh_flush_done_q>>
+ ``scsi_eh_flush_done_q``
At this point all scmds are recovered (or given up) and
put on eh_done_q by scsi_eh_finish_cmd(). This function
@@ -411,9 +444,10 @@ scmd->allowed.
layer of failure of the scmds.
-[2-2] EH through transportt->eh_strategy_handler()
+2.2 EH through transportt->eh_strategy_handler()
+------------------------------------------------
- transportt->eh_strategy_handler() is invoked in the place of
+transportt->eh_strategy_handler() is invoked in the place of
scsi_unjam_host() and it is responsible for whole recovery process.
On completion, the handler should have made lower layers forget about
all failed scmds and either ready for new commands or offline. Also,
@@ -422,7 +456,8 @@ SCSI midlayer. IOW, of the steps described in [2-1-2], all steps
except for #1 must be implemented by eh_strategy_handler().
-[2-2-1] Pre transportt->eh_strategy_handler() SCSI midlayer conditions
+2.2.1 Pre transportt->eh_strategy_handler() SCSI midlayer conditions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following conditions are true on entry to the handler.
@@ -435,7 +470,8 @@ except for #1 must be implemented by eh_strategy_handler().
- shost->host_failed == shost->host_busy
-[2-2-2] Post transportt->eh_strategy_handler() SCSI midlayer conditions
+2.2.2 Post transportt->eh_strategy_handler() SCSI midlayer conditions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following conditions must be true on exit from the handler.
@@ -453,7 +489,8 @@ except for #1 must be implemented by eh_strategy_handler().
->allowed to limit the number of retries.
-[2-2-3] Things to consider
+2.2.3 Things to consider
+^^^^^^^^^^^^^^^^^^^^^^^^
- Know that timed out scmds are still active on lower layers. Make
lower layers forget about them before doing anything else with
@@ -469,7 +506,7 @@ except for #1 must be implemented by eh_strategy_handler().
offline.
---
Tejun Heo
htejun@gmail.com
+
11th September 2005
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.rst
index f79282fc48d7..176c1862cb9b 100644
--- a/Documentation/scsi/scsi_fc_transport.txt
+++ b/Documentation/scsi/scsi_fc_transport.rst
@@ -1,8 +1,13 @@
- SCSI FC Tansport
- =============================================
+.. SPDX-License-Identifier: GPL-2.0
+
+================
+SCSI FC Tansport
+================
Date: 11/18/2008
-Kernel Revisions for features:
+
+Kernel Revisions for features::
+
rports : <<TBS>>
vports : 2.6.22
bsg support : 2.6.30 (?TBD?)
@@ -12,25 +17,27 @@ Introduction
============
This file documents the features and components of the SCSI FC Transport.
It also provides documents the API between the transport and FC LLDDs.
-The FC transport can be found at:
+
+The FC transport can be found at::
+
drivers/scsi/scsi_transport_fc.c
include/scsi/scsi_transport_fc.h
include/scsi/scsi_netlink_fc.h
include/scsi/scsi_bsg_fc.h
-This file is found at Documentation/scsi/scsi_fc_transport.txt
+This file is found at Documentation/scsi/scsi_fc_transport.rst
FC Remote Ports (rports)
-========================================================================
+========================
<< To Be Supplied >>
FC Virtual Ports (vports)
-========================================================================
+=========================
-Overview:
--------------------------------
+Overview
+--------
New FC standards have defined mechanisms which allows for a single physical
port to appear on as multiple communication ports. Using the N_Port Id
@@ -61,12 +68,14 @@ Overview:
Thus, whether a FC port is based on a physical port or on a virtual port,
each will appear as a unique scsi_host with its own target and lun space.
- Note: At this time, the transport is written to create only NPIV-based
+ .. Note::
+ At this time, the transport is written to create only NPIV-based
vports. However, consideration was given to VF-based vports and it
should be a minor change to add support if needed. The remaining
discussion will concentrate on NPIV.
- Note: World Wide Name assignment (and uniqueness guarantees) are left
+ .. Note::
+ World Wide Name assignment (and uniqueness guarantees) are left
up to an administrative entity controlling the vport. For example,
if vports are to be associated with virtual machines, a XEN mgmt
utility would be responsible for creating wwpn/wwnn's for the vport,
@@ -91,18 +100,29 @@ Device Trees and Vport Objects:
port's scsi_host.
Here's what to expect in the device tree :
- The typical Physical Port's Scsi_Host:
+
+ The typical Physical Port's Scsi_Host::
+
/sys/devices/.../host17/
- and it has the typical descendant tree:
+
+ and it has the typical descendant tree::
+
/sys/devices/.../host17/rport-17:0-0/target17:0:0/17:0:0:0:
- and then the vport is created on the Physical Port:
+
+ and then the vport is created on the Physical Port::
+
/sys/devices/.../host17/vport-17:0-0
- and the vport's Scsi_Host is then created:
+
+ and the vport's Scsi_Host is then created::
+
/sys/devices/.../host17/vport-17:0-0/host18
- and then the rest of the tree progresses, such as:
+
+ and then the rest of the tree progresses, such as::
+
/sys/devices/.../host17/vport-17:0-0/host18/rport-18:0-0/target18:0:0/18:0:0:0:
- Here's what to expect in the sysfs tree :
+ Here's what to expect in the sysfs tree::
+
scsi_hosts:
/sys/class/scsi_host/host17 physical port's scsi_host
/sys/class/scsi_host/host18 vport's scsi_host
@@ -116,8 +136,8 @@ Device Trees and Vport Objects:
/sys/class/fc_remote_ports/rport-18:0-0 rport on the vport
-Vport Attributes:
--------------------------------
+Vport Attributes
+----------------
The new fc_vport class object has the following attributes
@@ -184,16 +204,18 @@ Vport Attributes:
(e.g. 0x, x, etc).
-Vport States:
--------------------------------
+Vport States
+------------
Vport instantiation consists of two parts:
+
- Creation with the kernel and LLDD. This means all transport and
driver data structures are built up, and device objects created.
This is equivalent to a driver "attach" on an adapter, which is
independent of the adapter's link state.
- Instantiation of the vport on the FC link via ELS traffic, etc.
This is equivalent to a "link up" and successful link initialization.
+
Further information can be found in the interfaces section below for
Vport Creation.
@@ -227,6 +249,7 @@ Vport States:
FC_VPORT_NO_FABRIC_SUPP - No Fabric Support
The vport is not operational. One of the following conditions were
encountered:
+
- The FC topology is not Point-to-Point
- The FC port is not connected to an F_Port
- The F_Port has indicated that NPIV is not supported.
@@ -251,32 +274,53 @@ Vport States:
The following state table indicates the different state transitions:
- State Event New State
- --------------------------------------------------------------------
- n/a Initialization Unknown
- Unknown: Link Down Linkdown
- Link Up & Loop No Fabric Support
- Link Up & no Fabric No Fabric Support
- Link Up & FLOGI response No Fabric Support
- indicates no NPIV support
- Link Up & FDISC being sent Initializing
- Disable request Disable
- Linkdown: Link Up Unknown
- Initializing: FDISC ACC Active
- FDISC LS_RJT w/ no resources No Fabric Resources
- FDISC LS_RJT w/ invalid Fabric Rejected WWN
- pname or invalid nport_id
- FDISC LS_RJT failed for Vport Failed
- other reasons
- Link Down Linkdown
- Disable request Disable
- Disable: Enable request Unknown
- Active: LOGO received from fabric Fabric Logout
- Link Down Linkdown
- Disable request Disable
- Fabric Logout: Link still up Unknown
+ +------------------+--------------------------------+---------------------+
+ | State | Event | New State |
+ +==================+================================+=====================+
+ | n/a | Initialization | Unknown |
+ +------------------+--------------------------------+---------------------+
+ | Unknown: | Link Down | Linkdown |
+ | +--------------------------------+---------------------+
+ | | Link Up & Loop | No Fabric Support |
+ | +--------------------------------+---------------------+
+ | | Link Up & no Fabric | No Fabric Support |
+ | +--------------------------------+---------------------+
+ | | Link Up & FLOGI response | No Fabric Support |
+ | | indicates no NPIV support | |
+ | +--------------------------------+---------------------+
+ | | Link Up & FDISC being sent | Initializing |
+ | +--------------------------------+---------------------+
+ | | Disable request | Disable |
+ +------------------+--------------------------------+---------------------+
+ | Linkdown: | Link Up | Unknown |
+ +------------------+--------------------------------+---------------------+
+ | Initializing: | FDISC ACC | Active |
+ | +--------------------------------+---------------------+
+ | | FDISC LS_RJT w/ no resources | No Fabric Resources |
+ | +--------------------------------+---------------------+
+ | | FDISC LS_RJT w/ invalid | Fabric Rejected WWN |
+ | | pname or invalid nport_id | |
+ | +--------------------------------+---------------------+
+ | | FDISC LS_RJT failed for | Vport Failed |
+ | | other reasons | |
+ | +--------------------------------+---------------------+
+ | | Link Down | Linkdown |
+ | +--------------------------------+---------------------+
+ | | Disable request | Disable |
+ +------------------+--------------------------------+---------------------+
+ | Disable: | Enable request | Unknown |
+ +------------------+--------------------------------+---------------------+
+ | Active: | LOGO received from fabric | Fabric Logout |
+ | +--------------------------------+---------------------+
+ | | Link Down | Linkdown |
+ | +--------------------------------+---------------------+
+ | | Disable request | Disable |
+ +------------------+--------------------------------+---------------------+
+ | Fabric Logout: | Link still up | Unknown |
+ +------------------+--------------------------------+---------------------+
+
+The following 4 error states all have the same transitions::
- The following 4 error states all have the same transitions:
No Fabric Support:
No Fabric Resources:
Fabric Rejected WWN:
@@ -285,8 +329,8 @@ Vport States:
Link goes down Linkdown
-Transport <-> LLDD Interfaces :
--------------------------------
+Transport <-> LLDD Interfaces
+-----------------------------
Vport support by LLDD:
@@ -300,14 +344,17 @@ Vport support by LLDD:
Vport Creation:
- The LLDD vport_create() syntax is:
+ The LLDD vport_create() syntax is::
int vport_create(struct fc_vport *vport, bool disable)
- where:
- vport: Is the newly allocated vport object
- disable: If "true", the vport is to be created in a disabled stated.
+ where:
+
+ ======= ===========================================================
+ vport Is the newly allocated vport object
+ disable If "true", the vport is to be created in a disabled stated.
If "false", the vport is to be enabled upon creation.
+ ======= ===========================================================
When a request is made to create a new vport (via sgio/netlink, or the
vport_create fc_host attribute), the transport will validate that the LLDD
@@ -317,6 +364,7 @@ Vport Creation:
LLDD's vport_create() function with the newly allocated vport object.
As mentioned above, vport creation is divided into two parts:
+
- Creation with the kernel and LLDD. This means all transport and
driver data structures are built up, and device objects created.
This is equivalent to a driver "attach" on an adapter, which is
@@ -329,6 +377,7 @@ Vport Creation:
infrastructure exists to support NPIV, and complete the first part of
vport creation (data structure build up) before returning. We do not
hinge vport_create() on the link-side operation mainly because:
+
- The link may be down. It is not a failure if it is. It simply
means the vport is in an inoperable state until the link comes up.
This is consistent with the link bouncing post vport creation.
@@ -337,11 +386,15 @@ Vport Creation:
FC adapter. The vport_create is synonymous with driver attachment
to the adapter, which is independent of link state.
- Note: special error codes have been defined to delineate infrastructure
+ .. Note::
+
+ special error codes have been defined to delineate infrastructure
failure cases for quicker resolution.
The expected behavior for the LLDD's vport_create() function is:
+
- Validate Infrastructure:
+
- If the driver or adapter cannot support another vport, whether
due to improper firmware, (a lie about) max_npiv, or a lack of
some other resource - return VPCERR_UNSUPPORTED.
@@ -349,17 +402,21 @@ Vport Creation:
the adapter and detects an overlap - return VPCERR_BAD_WWN.
- If the driver detects the topology is loop, non-fabric, or the
FLOGI did not support NPIV - return VPCERR_NO_FABRIC_SUPP.
+
- Allocate data structures. If errors are encountered, such as out
of memory conditions, return the respective negative Exxx error code.
- If the role is FCP Initiator, the LLDD is to :
+
- Call scsi_host_alloc() to allocate a scsi_host for the vport.
- Call scsi_add_host(new_shost, &vport->dev) to start the scsi_host
and bind it as a child of the vport device.
- Initializes the fc_host attribute values.
+
- Kick of further vport state transitions based on the disable flag and
link state - and return success (zero).
LLDD Implementers Notes:
+
- It is suggested that there be a different fc_function_templates for
the physical port and the virtual port. The physical port's template
would have the vport_create, vport_delete, and vport_disable functions,
@@ -373,14 +430,17 @@ Vport Creation:
Vport Disable/Enable:
- The LLDD vport_disable() syntax is:
+ The LLDD vport_disable() syntax is::
int vport_disable(struct fc_vport *vport, bool disable)
- where:
- vport: Is vport to be enabled or disabled
- disable: If "true", the vport is to be disabled.
+ where:
+
+ ======= =======================================
+ vport Is vport to be enabled or disabled
+ disable If "true", the vport is to be disabled.
If "false", the vport is to be enabled.
+ ======= =======================================
When a request is made to change the disabled state on a vport, the
transport will validate the request against the existing vport state.
@@ -401,11 +461,12 @@ Vport Disable/Enable:
Vport Deletion:
- The LLDD vport_delete() syntax is:
+ The LLDD vport_delete() syntax is::
int vport_delete(struct fc_vport *vport)
- where:
+ where:
+
vport: Is vport to delete
When a request is made to delete a vport (via sgio/netlink, or via the
@@ -443,39 +504,42 @@ Transport supplied functions
The following functions are supplied by the FC-transport for use by LLDs.
- fc_vport_create - create a vport
- fc_vport_terminate - detach and remove a vport
-
-Details:
-
-/**
- * fc_vport_create - Admin App or LLDD requests creation of a vport
- * @shost: scsi host the virtual port is connected to.
- * @ids: The world wide names, FC4 port roles, etc for
- * the virtual port.
- *
- * Notes:
- * This routine assumes no locks are held on entry.
- */
-struct fc_vport *
-fc_vport_create(struct Scsi_Host *shost, struct fc_vport_identifiers *ids)
-
-/**
- * fc_vport_terminate - Admin App or LLDD requests termination of a vport
- * @vport: fc_vport to be terminated
- *
- * Calls the LLDD vport_delete() function, then deallocates and removes
- * the vport from the shost and object tree.
- *
- * Notes:
- * This routine assumes no locks are held on entry.
- */
-int
-fc_vport_terminate(struct fc_vport *vport)
+ ================== =========================
+ fc_vport_create create a vport
+ fc_vport_terminate detach and remove a vport
+ ================== =========================
+
+Details::
+
+ /**
+ * fc_vport_create - Admin App or LLDD requests creation of a vport
+ * @shost: scsi host the virtual port is connected to.
+ * @ids: The world wide names, FC4 port roles, etc for
+ * the virtual port.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+ struct fc_vport *
+ fc_vport_create(struct Scsi_Host *shost, struct fc_vport_identifiers *ids)
+
+ /**
+ * fc_vport_terminate - Admin App or LLDD requests termination of a vport
+ * @vport: fc_vport to be terminated
+ *
+ * Calls the LLDD vport_delete() function, then deallocates and removes
+ * the vport from the shost and object tree.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+ int
+ fc_vport_terminate(struct fc_vport *vport)
FC BSG support (CT & ELS passthru, and more)
-========================================================================
+============================================
+
<< To Be Supplied >>
diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
new file mode 100644
index 000000000000..5358bc10689e
--- /dev/null
+++ b/Documentation/scsi/scsi_mid_low_api.rst
@@ -0,0 +1,1313 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================================
+SCSI mid_level - lower_level driver interface
+=============================================
+
+Introduction
+============
+This document outlines the interface between the Linux SCSI mid level and
+SCSI lower level drivers. Lower level drivers (LLDs) are variously called
+host bus adapter (HBA) drivers and host drivers (HD). A "host" in this
+context is a bridge between a computer IO bus (e.g. PCI or ISA) and a
+single SCSI initiator port on a SCSI transport. An "initiator" port
+(SCSI terminology, see SAM-3 at http://www.t10.org) sends SCSI commands
+to "target" SCSI ports (e.g. disks). There can be many LLDs in a running
+system, but only one per hardware type. Most LLDs can control one or more
+SCSI HBAs. Some HBAs contain multiple hosts.
+
+In some cases the SCSI transport is an external bus that already has
+its own subsystem in Linux (e.g. USB and ieee1394). In such cases the
+SCSI subsystem LLD is a software bridge to the other driver subsystem.
+Examples are the usb-storage driver (found in the drivers/usb/storage
+directory) and the ieee1394/sbp2 driver (found in the drivers/ieee1394
+directory).
+
+For example, the aic7xxx LLD controls Adaptec SCSI parallel interface
+(SPI) controllers based on that company's 7xxx chip series. The aic7xxx
+LLD can be built into the kernel or loaded as a module. There can only be
+one aic7xxx LLD running in a Linux system but it may be controlling many
+HBAs. These HBAs might be either on PCI daughter-boards or built into
+the motherboard (or both). Some aic7xxx based HBAs are dual controllers
+and thus represent two hosts. Like most modern HBAs, each aic7xxx host
+has its own PCI device address. [The one-to-one correspondence between
+a SCSI host and a PCI device is common but not required (e.g. with
+ISA adapters).]
+
+The SCSI mid level isolates an LLD from other layers such as the SCSI
+upper layer drivers and the block layer.
+
+This version of the document roughly matches linux kernel version 2.6.8 .
+
+Documentation
+=============
+There is a SCSI documentation directory within the kernel source tree,
+typically Documentation/scsi . Most documents are in plain
+(i.e. ASCII) text. This file is named scsi_mid_low_api.txt and can be
+found in that directory. A more recent copy of this document may be found
+at http://web.archive.org/web/20070107183357rn_1/sg.torque.net/scsi/.
+Many LLDs are documented there (e.g. aic7xxx.txt). The SCSI mid-level is
+briefly described in scsi.txt which contains a url to a document
+describing the SCSI subsystem in the lk 2.4 series. Two upper level
+drivers have documents in that directory: st.txt (SCSI tape driver) and
+scsi-generic.txt (for the sg driver).
+
+Some documentation (or urls) for LLDs may be found in the C source code
+or in the same directory as the C source code. For example to find a url
+about the USB mass storage driver see the
+/usr/src/linux/drivers/usb/storage directory.
+
+Driver structure
+================
+Traditionally an LLD for the SCSI subsystem has been at least two files in
+the drivers/scsi directory. For example, a driver called "xyz" has a header
+file "xyz.h" and a source file "xyz.c". [Actually there is no good reason
+why this couldn't all be in one file; the header file is superfluous.] Some
+drivers that have been ported to several operating systems have more than
+two files. For example the aic7xxx driver has separate files for generic
+and OS-specific code (e.g. FreeBSD and Linux). Such drivers tend to have
+their own directory under the drivers/scsi directory.
+
+When a new LLD is being added to Linux, the following files (found in the
+drivers/scsi directory) will need some attention: Makefile and Kconfig .
+It is probably best to study how existing LLDs are organized.
+
+As the 2.5 series development kernels evolve into the 2.6 series
+production series, changes are being introduced into this interface. An
+example of this is driver initialization code where there are now 2 models
+available. The older one, similar to what was found in the lk 2.4 series,
+is based on hosts that are detected at HBA driver load time. This will be
+referred to the "passive" initialization model. The newer model allows HBAs
+to be hot plugged (and unplugged) during the lifetime of the LLD and will
+be referred to as the "hotplug" initialization model. The newer model is
+preferred as it can handle both traditional SCSI equipment that is
+permanently connected as well as modern "SCSI" devices (e.g. USB or
+IEEE 1394 connected digital cameras) that are hotplugged. Both
+initialization models are discussed in the following sections.
+
+An LLD interfaces to the SCSI subsystem several ways:
+
+ a) directly invoking functions supplied by the mid level
+ b) passing a set of function pointers to a registration function
+ supplied by the mid level. The mid level will then invoke these
+ functions at some point in the future. The LLD will supply
+ implementations of these functions.
+ c) direct access to instances of well known data structures maintained
+ by the mid level
+
+Those functions in group a) are listed in a section entitled "Mid level
+supplied functions" below.
+
+Those functions in group b) are listed in a section entitled "Interface
+functions" below. Their function pointers are placed in the members of
+"struct scsi_host_template", an instance of which is passed to
+scsi_host_alloc() [#]_. Those interface functions that the LLD does not
+wish to supply should have NULL placed in the corresponding member of
+struct scsi_host_template. Defining an instance of struct
+scsi_host_template at file scope will cause NULL to be placed in function
+pointer members not explicitly initialized.
+
+Those usages in group c) should be handled with care, especially in a
+"hotplug" environment. LLDs should be aware of the lifetime of instances
+that are shared with the mid level and other layers.
+
+All functions defined within an LLD and all data defined at file scope
+should be static. For example the slave_alloc() function in an LLD
+called "xxx" could be defined as
+``static int xxx_slave_alloc(struct scsi_device * sdev) { /* code */ }``
+
+.. [#] the scsi_host_alloc() function is a replacement for the rather vaguely
+ named scsi_register() function in most situations.
+
+
+Hotplug initialization model
+============================
+In this model an LLD controls when SCSI hosts are introduced and removed
+from the SCSI subsystem. Hosts can be introduced as early as driver
+initialization and removed as late as driver shutdown. Typically a driver
+will respond to a sysfs probe() callback that indicates an HBA has been
+detected. After confirming that the new device is one that the LLD wants
+to control, the LLD will initialize the HBA and then register a new host
+with the SCSI mid level.
+
+During LLD initialization the driver should register itself with the
+appropriate IO bus on which it expects to find HBA(s) (e.g. the PCI bus).
+This can probably be done via sysfs. Any driver parameters (especially
+those that are writable after the driver is loaded) could also be
+registered with sysfs at this point. The SCSI mid level first becomes
+aware of an LLD when that LLD registers its first HBA.
+
+At some later time, the LLD becomes aware of an HBA and what follows
+is a typical sequence of calls between the LLD and the mid level.
+This example shows the mid level scanning the newly introduced HBA for 3
+scsi devices of which only the first 2 respond::
+
+ HBA PROBE: assume 2 SCSI devices found in scan
+ LLD mid level LLD
+ ===-------------------=========--------------------===------
+ scsi_host_alloc() -->
+ scsi_add_host() ---->
+ scsi_scan_host() -------+
+ |
+ slave_alloc()
+ slave_configure() --> scsi_change_queue_depth()
+ |
+ slave_alloc()
+ slave_configure()
+ |
+ slave_alloc() ***
+ slave_destroy() ***
+
+
+ *** For scsi devices that the mid level tries to scan but do not
+ respond, a slave_alloc(), slave_destroy() pair is called.
+
+If the LLD wants to adjust the default queue settings, it can invoke
+scsi_change_queue_depth() in its slave_configure() routine.
+
+When an HBA is being removed it could be as part of an orderly shutdown
+associated with the LLD module being unloaded (e.g. with the "rmmod"
+command) or in response to a "hot unplug" indicated by sysfs()'s
+remove() callback being invoked. In either case, the sequence is the
+same::
+
+ HBA REMOVE: assume 2 SCSI devices attached
+ LLD mid level LLD
+ ===----------------------=========-----------------===------
+ scsi_remove_host() ---------+
+ |
+ slave_destroy()
+ slave_destroy()
+ scsi_host_put()
+
+It may be useful for a LLD to keep track of struct Scsi_Host instances
+(a pointer is returned by scsi_host_alloc()). Such instances are "owned"
+by the mid-level. struct Scsi_Host instances are freed from
+scsi_host_put() when the reference count hits zero.
+
+Hot unplugging an HBA that controls a disk which is processing SCSI
+commands on a mounted file system is an interesting situation. Reference
+counting logic is being introduced into the mid level to cope with many
+of the issues involved. See the section on reference counting below.
+
+
+The hotplug concept may be extended to SCSI devices. Currently, when an
+HBA is added, the scsi_scan_host() function causes a scan for SCSI devices
+attached to the HBA's SCSI transport. On newer SCSI transports the HBA
+may become aware of a new SCSI device _after_ the scan has completed.
+An LLD can use this sequence to make the mid level aware of a SCSI device::
+
+ SCSI DEVICE hotplug
+ LLD mid level LLD
+ ===-------------------=========--------------------===------
+ scsi_add_device() ------+
+ |
+ slave_alloc()
+ slave_configure() [--> scsi_change_queue_depth()]
+
+In a similar fashion, an LLD may become aware that a SCSI device has been
+removed (unplugged) or the connection to it has been interrupted. Some
+existing SCSI transports (e.g. SPI) may not become aware that a SCSI
+device has been removed until a subsequent SCSI command fails which will
+probably cause that device to be set offline by the mid level. An LLD that
+detects the removal of a SCSI device can instigate its removal from
+upper layers with this sequence::
+
+ SCSI DEVICE hot unplug
+ LLD mid level LLD
+ ===----------------------=========-----------------===------
+ scsi_remove_device() -------+
+ |
+ slave_destroy()
+
+It may be useful for an LLD to keep track of struct scsi_device instances
+(a pointer is passed as the parameter to slave_alloc() and
+slave_configure() callbacks). Such instances are "owned" by the mid-level.
+struct scsi_device instances are freed after slave_destroy().
+
+
+Reference Counting
+==================
+The Scsi_Host structure has had reference counting infrastructure added.
+This effectively spreads the ownership of struct Scsi_Host instances
+across the various SCSI layers which use them. Previously such instances
+were exclusively owned by the mid level. LLDs would not usually need to
+directly manipulate these reference counts but there may be some cases
+where they do.
+
+There are 3 reference counting functions of interest associated with
+struct Scsi_Host:
+
+ - scsi_host_alloc():
+ returns a pointer to new instance of struct
+ Scsi_Host which has its reference count ^^ set to 1
+
+ - scsi_host_get():
+ adds 1 to the reference count of the given instance
+
+ - scsi_host_put():
+ decrements 1 from the reference count of the given
+ instance. If the reference count reaches 0 then the given instance
+ is freed
+
+The scsi_device structure has had reference counting infrastructure added.
+This effectively spreads the ownership of struct scsi_device instances
+across the various SCSI layers which use them. Previously such instances
+were exclusively owned by the mid level. See the access functions declared
+towards the end of include/scsi/scsi_device.h . If an LLD wants to keep
+a copy of a pointer to a scsi_device instance it should use scsi_device_get()
+to bump its reference count. When it is finished with the pointer it can
+use scsi_device_put() to decrement its reference count (and potentially
+delete it).
+
+.. Note::
+
+ struct Scsi_Host actually has 2 reference counts which are manipulated
+ in parallel by these functions.
+
+
+Conventions
+===========
+First, Linus Torvalds's thoughts on C coding style can be found in the
+Documentation/process/coding-style.rst file.
+
+Next, there is a movement to "outlaw" typedefs introducing synonyms for
+struct tags. Both can be still found in the SCSI subsystem, but
+the typedefs have been moved to a single file, scsi_typedefs.h to
+make their future removal easier, for example:
+"typedef struct scsi_cmnd Scsi_Cmnd;"
+
+Also, most C99 enhancements are encouraged to the extent they are supported
+by the relevant gcc compilers. So C99 style structure and array
+initializers are encouraged where appropriate. Don't go too far,
+VLAs are not properly supported yet. An exception to this is the use of
+``//`` style comments; ``/*...*/`` comments are still preferred in Linux.
+
+Well written, tested and documented code, need not be re-formatted to
+comply with the above conventions. For example, the aic7xxx driver
+comes to Linux from FreeBSD and Adaptec's own labs. No doubt FreeBSD
+and Adaptec have their own coding conventions.
+
+
+Mid level supplied functions
+============================
+These functions are supplied by the SCSI mid level for use by LLDs.
+The names (i.e. entry points) of these functions are exported
+so an LLD that is a module can access them. The kernel will
+arrange for the SCSI mid level to be loaded and initialized before any LLD
+is initialized. The functions below are listed alphabetically and their
+names all start with ``scsi_``.
+
+Summary:
+
+ - scsi_add_device - creates new scsi device (lu) instance
+ - scsi_add_host - perform sysfs registration and set up transport class
+ - scsi_change_queue_depth - change the queue depth on a SCSI device
+ - scsi_bios_ptable - return copy of block device's partition table
+ - scsi_block_requests - prevent further commands being queued to given host
+ - scsi_host_alloc - return a new scsi_host instance whose refcount==1
+ - scsi_host_get - increments Scsi_Host instance's refcount
+ - scsi_host_put - decrements Scsi_Host instance's refcount (free if 0)
+ - scsi_register - create and register a scsi host adapter instance.
+ - scsi_remove_device - detach and remove a SCSI device
+ - scsi_remove_host - detach and remove all SCSI devices owned by host
+ - scsi_report_bus_reset - report scsi _bus_ reset observed
+ - scsi_scan_host - scan SCSI bus
+ - scsi_track_queue_full - track successive QUEUE_FULL events
+ - scsi_unblock_requests - allow further commands to be queued to given host
+ - scsi_unregister - [calls scsi_host_put()]
+
+
+Details::
+
+ /**
+ * scsi_add_device - creates new scsi device (lu) instance
+ * @shost: pointer to scsi host instance
+ * @channel: channel number (rarely other than 0)
+ * @id: target id number
+ * @lun: logical unit number
+ *
+ * Returns pointer to new struct scsi_device instance or
+ * ERR_PTR(-ENODEV) (or some other bent pointer) if something is
+ * wrong (e.g. no lu responds at given address)
+ *
+ * Might block: yes
+ *
+ * Notes: This call is usually performed internally during a scsi
+ * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
+ * should only be called if the HBA becomes aware of a new scsi
+ * device (lu) after scsi_scan_host() has completed. If successful
+ * this call can lead to slave_alloc() and slave_configure() callbacks
+ * into the LLD.
+ *
+ * Defined in: drivers/scsi/scsi_scan.c
+ **/
+ struct scsi_device * scsi_add_device(struct Scsi_Host *shost,
+ unsigned int channel,
+ unsigned int id, unsigned int lun)
+
+
+ /**
+ * scsi_add_host - perform sysfs registration and set up transport class
+ * @shost: pointer to scsi host instance
+ * @dev: pointer to struct device of type scsi class
+ *
+ * Returns 0 on success, negative errno of failure (e.g. -ENOMEM)
+ *
+ * Might block: no
+ *
+ * Notes: Only required in "hotplug initialization model" after a
+ * successful call to scsi_host_alloc(). This function does not
+ * scan the bus; this can be done by calling scsi_scan_host() or
+ * in some other transport-specific way. The LLD must set up
+ * the transport template before calling this function and may only
+ * access the transport class data after this function has been called.
+ *
+ * Defined in: drivers/scsi/hosts.c
+ **/
+ int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
+
+
+ /**
+ * scsi_change_queue_depth - allow LLD to change queue depth on a SCSI device
+ * @sdev: pointer to SCSI device to change queue depth on
+ * @tags Number of tags allowed if tagged queuing enabled,
+ * or number of commands the LLD can queue up
+ * in non-tagged mode (as per cmd_per_lun).
+ *
+ * Returns nothing
+ *
+ * Might block: no
+ *
+ * Notes: Can be invoked any time on a SCSI device controlled by this
+ * LLD. [Specifically during and after slave_configure() and prior to
+ * slave_destroy().] Can safely be invoked from interrupt code.
+ *
+ * Defined in: drivers/scsi/scsi.c [see source code for more notes]
+ *
+ **/
+ int scsi_change_queue_depth(struct scsi_device *sdev, int tags)
+
+
+ /**
+ * scsi_bios_ptable - return copy of block device's partition table
+ * @dev: pointer to block device
+ *
+ * Returns pointer to partition table, or NULL for failure
+ *
+ * Might block: yes
+ *
+ * Notes: Caller owns memory returned (free with kfree() )
+ *
+ * Defined in: drivers/scsi/scsicam.c
+ **/
+ unsigned char *scsi_bios_ptable(struct block_device *dev)
+
+
+ /**
+ * scsi_block_requests - prevent further commands being queued to given host
+ *
+ * @shost: pointer to host to block commands on
+ *
+ * Returns nothing
+ *
+ * Might block: no
+ *
+ * Notes: There is no timer nor any other means by which the requests
+ * get unblocked other than the LLD calling scsi_unblock_requests().
+ *
+ * Defined in: drivers/scsi/scsi_lib.c
+ **/
+ void scsi_block_requests(struct Scsi_Host * shost)
+
+
+ /**
+ * scsi_host_alloc - create a scsi host adapter instance and perform basic
+ * initialization.
+ * @sht: pointer to scsi host template
+ * @privsize: extra bytes to allocate in hostdata array (which is the
+ * last member of the returned Scsi_Host instance)
+ *
+ * Returns pointer to new Scsi_Host instance or NULL on failure
+ *
+ * Might block: yes
+ *
+ * Notes: When this call returns to the LLD, the SCSI bus scan on
+ * this host has _not_ yet been done.
+ * The hostdata array (by default zero length) is a per host scratch
+ * area for the LLD's exclusive use.
+ * Both associated refcounting objects have their refcount set to 1.
+ * Full registration (in sysfs) and a bus scan are performed later when
+ * scsi_add_host() and scsi_scan_host() are called.
+ *
+ * Defined in: drivers/scsi/hosts.c .
+ **/
+ struct Scsi_Host * scsi_host_alloc(struct scsi_host_template * sht,
+ int privsize)
+
+
+ /**
+ * scsi_host_get - increment Scsi_Host instance refcount
+ * @shost: pointer to struct Scsi_Host instance
+ *
+ * Returns nothing
+ *
+ * Might block: currently may block but may be changed to not block
+ *
+ * Notes: Actually increments the counts in two sub-objects
+ *
+ * Defined in: drivers/scsi/hosts.c
+ **/
+ void scsi_host_get(struct Scsi_Host *shost)
+
+
+ /**
+ * scsi_host_put - decrement Scsi_Host instance refcount, free if 0
+ * @shost: pointer to struct Scsi_Host instance
+ *
+ * Returns nothing
+ *
+ * Might block: currently may block but may be changed to not block
+ *
+ * Notes: Actually decrements the counts in two sub-objects. If the
+ * latter refcount reaches 0, the Scsi_Host instance is freed.
+ * The LLD need not worry exactly when the Scsi_Host instance is
+ * freed, it just shouldn't access the instance after it has balanced
+ * out its refcount usage.
+ *
+ * Defined in: drivers/scsi/hosts.c
+ **/
+ void scsi_host_put(struct Scsi_Host *shost)
+
+
+ /**
+ * scsi_register - create and register a scsi host adapter instance.
+ * @sht: pointer to scsi host template
+ * @privsize: extra bytes to allocate in hostdata array (which is the
+ * last member of the returned Scsi_Host instance)
+ *
+ * Returns pointer to new Scsi_Host instance or NULL on failure
+ *
+ * Might block: yes
+ *
+ * Notes: When this call returns to the LLD, the SCSI bus scan on
+ * this host has _not_ yet been done.
+ * The hostdata array (by default zero length) is a per host scratch
+ * area for the LLD.
+ *
+ * Defined in: drivers/scsi/hosts.c .
+ **/
+ struct Scsi_Host * scsi_register(struct scsi_host_template * sht,
+ int privsize)
+
+
+ /**
+ * scsi_remove_device - detach and remove a SCSI device
+ * @sdev: a pointer to a scsi device instance
+ *
+ * Returns value: 0 on success, -EINVAL if device not attached
+ *
+ * Might block: yes
+ *
+ * Notes: If an LLD becomes aware that a scsi device (lu) has
+ * been removed but its host is still present then it can request
+ * the removal of that scsi device. If successful this call will
+ * lead to the slave_destroy() callback being invoked. sdev is an
+ * invalid pointer after this call.
+ *
+ * Defined in: drivers/scsi/scsi_sysfs.c .
+ **/
+ int scsi_remove_device(struct scsi_device *sdev)
+
+
+ /**
+ * scsi_remove_host - detach and remove all SCSI devices owned by host
+ * @shost: a pointer to a scsi host instance
+ *
+ * Returns value: 0 on success, 1 on failure (e.g. LLD busy ??)
+ *
+ * Might block: yes
+ *
+ * Notes: Should only be invoked if the "hotplug initialization
+ * model" is being used. It should be called _prior_ to
+ * scsi_unregister().
+ *
+ * Defined in: drivers/scsi/hosts.c .
+ **/
+ int scsi_remove_host(struct Scsi_Host *shost)
+
+
+ /**
+ * scsi_report_bus_reset - report scsi _bus_ reset observed
+ * @shost: a pointer to a scsi host involved
+ * @channel: channel (within) host on which scsi bus reset occurred
+ *
+ * Returns nothing
+ *
+ * Might block: no
+ *
+ * Notes: This only needs to be called if the reset is one which
+ * originates from an unknown location. Resets originated by the
+ * mid level itself don't need to call this, but there should be
+ * no harm. The main purpose of this is to make sure that a
+ * CHECK_CONDITION is properly treated.
+ *
+ * Defined in: drivers/scsi/scsi_error.c .
+ **/
+ void scsi_report_bus_reset(struct Scsi_Host * shost, int channel)
+
+
+ /**
+ * scsi_scan_host - scan SCSI bus
+ * @shost: a pointer to a scsi host instance
+ *
+ * Might block: yes
+ *
+ * Notes: Should be called after scsi_add_host()
+ *
+ * Defined in: drivers/scsi/scsi_scan.c
+ **/
+ void scsi_scan_host(struct Scsi_Host *shost)
+
+
+ /**
+ * scsi_track_queue_full - track successive QUEUE_FULL events on given
+ * device to determine if and when there is a need
+ * to adjust the queue depth on the device.
+ * @sdev: pointer to SCSI device instance
+ * @depth: Current number of outstanding SCSI commands on this device,
+ * not counting the one returned as QUEUE_FULL.
+ *
+ * Returns 0 - no change needed
+ * >0 - adjust queue depth to this new depth
+ * -1 - drop back to untagged operation using host->cmd_per_lun
+ * as the untagged command depth
+ *
+ * Might block: no
+ *
+ * Notes: LLDs may call this at any time and we will do "The Right
+ * Thing"; interrupt context safe.
+ *
+ * Defined in: drivers/scsi/scsi.c .
+ **/
+ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
+
+
+ /**
+ * scsi_unblock_requests - allow further commands to be queued to given host
+ *
+ * @shost: pointer to host to unblock commands on
+ *
+ * Returns nothing
+ *
+ * Might block: no
+ *
+ * Defined in: drivers/scsi/scsi_lib.c .
+ **/
+ void scsi_unblock_requests(struct Scsi_Host * shost)
+
+
+ /**
+ * scsi_unregister - unregister and free memory used by host instance
+ * @shp: pointer to scsi host instance to unregister.
+ *
+ * Returns nothing
+ *
+ * Might block: no
+ *
+ * Notes: Should not be invoked if the "hotplug initialization
+ * model" is being used. Called internally by exit_this_scsi_driver()
+ * in the "passive initialization model". Hence a LLD has no need to
+ * call this function directly.
+ *
+ * Defined in: drivers/scsi/hosts.c .
+ **/
+ void scsi_unregister(struct Scsi_Host * shp)
+
+
+
+
+Interface Functions
+===================
+Interface functions are supplied (defined) by LLDs and their function
+pointers are placed in an instance of struct scsi_host_template which
+is passed to scsi_host_alloc() [or scsi_register() / init_this_scsi_driver()].
+Some are mandatory. Interface functions should be declared static. The
+accepted convention is that driver "xyz" will declare its slave_configure()
+function as::
+
+ static int xyz_slave_configure(struct scsi_device * sdev);
+
+and so forth for all interface functions listed below.
+
+A pointer to this function should be placed in the 'slave_configure' member
+of a "struct scsi_host_template" instance. A pointer to such an instance
+should be passed to the mid level's scsi_host_alloc() [or scsi_register() /
+init_this_scsi_driver()].
+
+The interface functions are also described in the include/scsi/scsi_host.h
+file immediately above their definition point in "struct scsi_host_template".
+In some cases more detail is given in scsi_host.h than below.
+
+The interface functions are listed below in alphabetical order.
+
+Summary:
+
+ - bios_param - fetch head, sector, cylinder info for a disk
+ - eh_timed_out - notify the host that a command timer expired
+ - eh_abort_handler - abort given command
+ - eh_bus_reset_handler - issue SCSI bus reset
+ - eh_device_reset_handler - issue SCSI device reset
+ - eh_host_reset_handler - reset host (host bus adapter)
+ - info - supply information about given host
+ - ioctl - driver can respond to ioctls
+ - proc_info - supports /proc/scsi/{driver_name}/{host_no}
+ - queuecommand - queue scsi command, invoke 'done' on completion
+ - slave_alloc - prior to any commands being sent to a new device
+ - slave_configure - driver fine tuning for given device after attach
+ - slave_destroy - given device is about to be shut down
+
+
+Details::
+
+ /**
+ * bios_param - fetch head, sector, cylinder info for a disk
+ * @sdev: pointer to scsi device context (defined in
+ * include/scsi/scsi_device.h)
+ * @bdev: pointer to block device context (defined in fs.h)
+ * @capacity: device size (in 512 byte sectors)
+ * @params: three element array to place output:
+ * params[0] number of heads (max 255)
+ * params[1] number of sectors (max 63)
+ * params[2] number of cylinders
+ *
+ * Return value is ignored
+ *
+ * Locks: none
+ *
+ * Calling context: process (sd)
+ *
+ * Notes: an arbitrary geometry (based on READ CAPACITY) is used
+ * if this function is not provided. The params array is
+ * pre-initialized with made up values just in case this function
+ * doesn't output anything.
+ *
+ * Optionally defined in: LLD
+ **/
+ int bios_param(struct scsi_device * sdev, struct block_device *bdev,
+ sector_t capacity, int params[3])
+
+
+ /**
+ * eh_timed_out - The timer for the command has just fired
+ * @scp: identifies command timing out
+ *
+ * Returns:
+ *
+ * EH_HANDLED: I fixed the error, please complete the command
+ * EH_RESET_TIMER: I need more time, reset the timer and
+ * begin counting again
+ * EH_NOT_HANDLED Begin normal error recovery
+ *
+ *
+ * Locks: None held
+ *
+ * Calling context: interrupt
+ *
+ * Notes: This is to give the LLD an opportunity to do local recovery.
+ * This recovery is limited to determining if the outstanding command
+ * will ever complete. You may not abort and restart the command from
+ * this callback.
+ *
+ * Optionally defined in: LLD
+ **/
+ int eh_timed_out(struct scsi_cmnd * scp)
+
+
+ /**
+ * eh_abort_handler - abort command associated with scp
+ * @scp: identifies command to be aborted
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ *
+ * Locks: None held
+ *
+ * Calling context: kernel thread
+ *
+ * Notes: If 'no_async_abort' is defined this callback
+ * will be invoked from scsi_eh thread. No other commands
+ * will then be queued on current host during eh.
+ * Otherwise it will be called whenever scsi_times_out()
+ * is called due to a command timeout.
+ *
+ * Optionally defined in: LLD
+ **/
+ int eh_abort_handler(struct scsi_cmnd * scp)
+
+
+ /**
+ * eh_bus_reset_handler - issue SCSI bus reset
+ * @scp: SCSI bus that contains this device should be reset
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ *
+ * Locks: None held
+ *
+ * Calling context: kernel thread
+ *
+ * Notes: Invoked from scsi_eh thread. No other commands will be
+ * queued on current host during eh.
+ *
+ * Optionally defined in: LLD
+ **/
+ int eh_bus_reset_handler(struct scsi_cmnd * scp)
+
+
+ /**
+ * eh_device_reset_handler - issue SCSI device reset
+ * @scp: identifies SCSI device to be reset
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ *
+ * Locks: None held
+ *
+ * Calling context: kernel thread
+ *
+ * Notes: Invoked from scsi_eh thread. No other commands will be
+ * queued on current host during eh.
+ *
+ * Optionally defined in: LLD
+ **/
+ int eh_device_reset_handler(struct scsi_cmnd * scp)
+
+
+ /**
+ * eh_host_reset_handler - reset host (host bus adapter)
+ * @scp: SCSI host that contains this device should be reset
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ *
+ * Locks: None held
+ *
+ * Calling context: kernel thread
+ *
+ * Notes: Invoked from scsi_eh thread. No other commands will be
+ * queued on current host during eh.
+ * With the default eh_strategy in place, if none of the _abort_,
+ * _device_reset_, _bus_reset_ or this eh handler function are
+ * defined (or they all return FAILED) then the device in question
+ * will be set offline whenever eh is invoked.
+ *
+ * Optionally defined in: LLD
+ **/
+ int eh_host_reset_handler(struct scsi_cmnd * scp)
+
+
+ /**
+ * info - supply information about given host: driver name plus data
+ * to distinguish given host
+ * @shp: host to supply information about
+ *
+ * Return ASCII null terminated string. [This driver is assumed to
+ * manage the memory pointed to and maintain it, typically for the
+ * lifetime of this host.]
+ *
+ * Locks: none
+ *
+ * Calling context: process
+ *
+ * Notes: Often supplies PCI or ISA information such as IO addresses
+ * and interrupt numbers. If not supplied struct Scsi_Host::name used
+ * instead. It is assumed the returned information fits on one line
+ * (i.e. does not included embedded newlines).
+ * The SCSI_IOCTL_PROBE_HOST ioctl yields the string returned by this
+ * function (or struct Scsi_Host::name if this function is not
+ * available).
+ * In a similar manner, init_this_scsi_driver() outputs to the console
+ * each host's "info" (or name) for the driver it is registering.
+ * Also if proc_info() is not supplied, the output of this function
+ * is used instead.
+ *
+ * Optionally defined in: LLD
+ **/
+ const char * info(struct Scsi_Host * shp)
+
+
+ /**
+ * ioctl - driver can respond to ioctls
+ * @sdp: device that ioctl was issued for
+ * @cmd: ioctl number
+ * @arg: pointer to read or write data from. Since it points to
+ * user space, should use appropriate kernel functions
+ * (e.g. copy_from_user() ). In the Unix style this argument
+ * can also be viewed as an unsigned long.
+ *
+ * Returns negative "errno" value when there is a problem. 0 or a
+ * positive value indicates success and is returned to the user space.
+ *
+ * Locks: none
+ *
+ * Calling context: process
+ *
+ * Notes: The SCSI subsystem uses a "trickle down" ioctl model.
+ * The user issues an ioctl() against an upper level driver
+ * (e.g. /dev/sdc) and if the upper level driver doesn't recognize
+ * the 'cmd' then it is passed to the SCSI mid level. If the SCSI
+ * mid level does not recognize it, then the LLD that controls
+ * the device receives the ioctl. According to recent Unix standards
+ * unsupported ioctl() 'cmd' numbers should return -ENOTTY.
+ *
+ * Optionally defined in: LLD
+ **/
+ int ioctl(struct scsi_device *sdp, int cmd, void *arg)
+
+
+ /**
+ * proc_info - supports /proc/scsi/{driver_name}/{host_no}
+ * @buffer: anchor point to output to (0==writeto1_read0) or fetch from
+ * (1==writeto1_read0).
+ * @start: where "interesting" data is written to. Ignored when
+ * 1==writeto1_read0.
+ * @offset: offset within buffer 0==writeto1_read0 is actually
+ * interested in. Ignored when 1==writeto1_read0 .
+ * @length: maximum (or actual) extent of buffer
+ * @host_no: host number of interest (struct Scsi_Host::host_no)
+ * @writeto1_read0: 1 -> data coming from user space towards driver
+ * (e.g. "echo some_string > /proc/scsi/xyz/2")
+ * 0 -> user what data from this driver
+ * (e.g. "cat /proc/scsi/xyz/2")
+ *
+ * Returns length when 1==writeto1_read0. Otherwise number of chars
+ * output to buffer past offset.
+ *
+ * Locks: none held
+ *
+ * Calling context: process
+ *
+ * Notes: Driven from scsi_proc.c which interfaces to proc_fs. proc_fs
+ * support can now be configured out of the scsi subsystem.
+ *
+ * Optionally defined in: LLD
+ **/
+ int proc_info(char * buffer, char ** start, off_t offset,
+ int length, int host_no, int writeto1_read0)
+
+
+ /**
+ * queuecommand - queue scsi command, invoke scp->scsi_done on completion
+ * @shost: pointer to the scsi host object
+ * @scp: pointer to scsi command object
+ *
+ * Returns 0 on success.
+ *
+ * If there's a failure, return either:
+ *
+ * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
+ * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
+ *
+ * On both of these returns, the mid-layer will requeue the I/O
+ *
+ * - if the return is SCSI_MLQUEUE_DEVICE_BUSY, only that particular
+ * device will be paused, and it will be unpaused when a command to
+ * the device returns (or after a brief delay if there are no more
+ * outstanding commands to it). Commands to other devices continue
+ * to be processed normally.
+ *
+ * - if the return is SCSI_MLQUEUE_HOST_BUSY, all I/O to the host
+ * is paused and will be unpaused when any command returns from
+ * the host (or after a brief delay if there are no outstanding
+ * commands to the host).
+ *
+ * For compatibility with earlier versions of queuecommand, any
+ * other return value is treated the same as
+ * SCSI_MLQUEUE_HOST_BUSY.
+ *
+ * Other types of errors that are detected immediately may be
+ * flagged by setting scp->result to an appropriate value,
+ * invoking the scp->scsi_done callback, and then returning 0
+ * from this function. If the command is not performed
+ * immediately (and the LLD is starting (or will start) the given
+ * command) then this function should place 0 in scp->result and
+ * return 0.
+ *
+ * Command ownership. If the driver returns zero, it owns the
+ * command and must take responsibility for ensuring the
+ * scp->scsi_done callback is executed. Note: the driver may
+ * call scp->scsi_done before returning zero, but after it has
+ * called scp->scsi_done, it may not return any value other than
+ * zero. If the driver makes a non-zero return, it must not
+ * execute the command's scsi_done callback at any time.
+ *
+ * Locks: up to and including 2.6.36, struct Scsi_Host::host_lock
+ * held on entry (with "irqsave") and is expected to be
+ * held on return. From 2.6.37 onwards, queuecommand is
+ * called without any locks held.
+ *
+ * Calling context: in interrupt (soft irq) or process context
+ *
+ * Notes: This function should be relatively fast. Normally it
+ * will not wait for IO to complete. Hence the scp->scsi_done
+ * callback is invoked (often directly from an interrupt service
+ * routine) some time after this function has returned. In some
+ * cases (e.g. pseudo adapter drivers that manufacture the
+ * response to a SCSI INQUIRY) the scp->scsi_done callback may be
+ * invoked before this function returns. If the scp->scsi_done
+ * callback is not invoked within a certain period the SCSI mid
+ * level will commence error processing. If a status of CHECK
+ * CONDITION is placed in "result" when the scp->scsi_done
+ * callback is invoked, then the LLD driver should perform
+ * autosense and fill in the struct scsi_cmnd::sense_buffer
+ * array. The scsi_cmnd::sense_buffer array is zeroed prior to
+ * the mid level queuing a command to an LLD.
+ *
+ * Defined in: LLD
+ **/
+ int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp)
+
+
+ /**
+ * slave_alloc - prior to any commands being sent to a new device
+ * (i.e. just prior to scan) this call is made
+ * @sdp: pointer to new device (about to be scanned)
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ *
+ * Locks: none
+ *
+ * Calling context: process
+ *
+ * Notes: Allows the driver to allocate any resources for a device
+ * prior to its initial scan. The corresponding scsi device may not
+ * exist but the mid level is just about to scan for it (i.e. send
+ * and INQUIRY command plus ...). If a device is found then
+ * slave_configure() will be called while if a device is not found
+ * slave_destroy() is called.
+ * For more details see the include/scsi/scsi_host.h file.
+ *
+ * Optionally defined in: LLD
+ **/
+ int slave_alloc(struct scsi_device *sdp)
+
+
+ /**
+ * slave_configure - driver fine tuning for given device just after it
+ * has been first scanned (i.e. it responded to an
+ * INQUIRY)
+ * @sdp: device that has just been attached
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is taken offline. [offline devices will _not_ have
+ * slave_destroy() called on them so clean up resources.]
+ *
+ * Locks: none
+ *
+ * Calling context: process
+ *
+ * Notes: Allows the driver to inspect the response to the initial
+ * INQUIRY done by the scanning code and take appropriate action.
+ * For more details see the include/scsi/scsi_host.h file.
+ *
+ * Optionally defined in: LLD
+ **/
+ int slave_configure(struct scsi_device *sdp)
+
+
+ /**
+ * slave_destroy - given device is about to be shut down. All
+ * activity has ceased on this device.
+ * @sdp: device that is about to be shut down
+ *
+ * Returns nothing
+ *
+ * Locks: none
+ *
+ * Calling context: process
+ *
+ * Notes: Mid level structures for given device are still in place
+ * but are about to be torn down. Any per device resources allocated
+ * by this driver for given device should be freed now. No further
+ * commands will be sent for this sdp instance. [However the device
+ * could be re-attached in the future in which case a new instance
+ * of struct scsi_device would be supplied by future slave_alloc()
+ * and slave_configure() calls.]
+ *
+ * Optionally defined in: LLD
+ **/
+ void slave_destroy(struct scsi_device *sdp)
+
+
+
+Data Structures
+===============
+struct scsi_host_template
+-------------------------
+There is one "struct scsi_host_template" instance per LLD [#]_. It is
+typically initialized as a file scope static in a driver's header file. That
+way members that are not explicitly initialized will be set to 0 or NULL.
+Member of interest:
+
+ name
+ - name of driver (may contain spaces, please limit to
+ less than 80 characters)
+
+ proc_name
+ - name used in "/proc/scsi/<proc_name>/<host_no>" and
+ by sysfs in one of its "drivers" directories. Hence
+ "proc_name" should only contain characters acceptable
+ to a Unix file name.
+
+ ``(*queuecommand)()``
+ - primary callback that the mid level uses to inject
+ SCSI commands into an LLD.
+
+The structure is defined and commented in include/scsi/scsi_host.h
+
+.. [#] In extreme situations a single driver may have several instances
+ if it controls several different classes of hardware (e.g. an LLD
+ that handles both ISA and PCI cards and has a separate instance of
+ struct scsi_host_template for each class).
+
+struct Scsi_Host
+----------------
+There is one struct Scsi_Host instance per host (HBA) that an LLD
+controls. The struct Scsi_Host structure has many members in common
+with "struct scsi_host_template". When a new struct Scsi_Host instance
+is created (in scsi_host_alloc() in hosts.c) those common members are
+initialized from the driver's struct scsi_host_template instance. Members
+of interest:
+
+ host_no
+ - system wide unique number that is used for identifying
+ this host. Issued in ascending order from 0.
+ can_queue
+ - must be greater than 0; do not send more than can_queue
+ commands to the adapter.
+ this_id
+ - scsi id of host (scsi initiator) or -1 if not known
+ sg_tablesize
+ - maximum scatter gather elements allowed by host.
+ Set this to SG_ALL or less to avoid chained SG lists.
+ Must be at least 1.
+ max_sectors
+ - maximum number of sectors (usually 512 bytes) allowed
+ in a single SCSI command. The default value of 0 leads
+ to a setting of SCSI_DEFAULT_MAX_SECTORS (defined in
+ scsi_host.h) which is currently set to 1024. So for a
+ disk the maximum transfer size is 512 KB when max_sectors
+ is not defined. Note that this size may not be sufficient
+ for disk firmware uploads.
+ cmd_per_lun
+ - maximum number of commands that can be queued on devices
+ controlled by the host. Overridden by LLD calls to
+ scsi_change_queue_depth().
+ unchecked_isa_dma
+ - 1=>only use bottom 16 MB of ram (ISA DMA addressing
+ restriction), 0=>can use full 32 bit (or better) DMA
+ address space
+ no_async_abort
+ - 1=>Asynchronous aborts are not supported
+ - 0=>Timed-out commands will be aborted asynchronously
+ hostt
+ - pointer to driver's struct scsi_host_template from which
+ this struct Scsi_Host instance was spawned
+ hostt->proc_name
+ - name of LLD. This is the driver name that sysfs uses
+ transportt
+ - pointer to driver's struct scsi_transport_template instance
+ (if any). FC and SPI transports currently supported.
+ sh_list
+ - a double linked list of pointers to all struct Scsi_Host
+ instances (currently ordered by ascending host_no)
+ my_devices
+ - a double linked list of pointers to struct scsi_device
+ instances that belong to this host.
+ hostdata[0]
+ - area reserved for LLD at end of struct Scsi_Host. Size
+ is set by the second argument (named 'xtr_bytes') to
+ scsi_host_alloc() or scsi_register().
+ vendor_id
+ - a unique value that identifies the vendor supplying
+ the LLD for the Scsi_Host. Used most often in validating
+ vendor-specific message requests. Value consists of an
+ identifier type and a vendor-specific value.
+ See scsi_netlink.h for a description of valid formats.
+
+The scsi_host structure is defined in include/scsi/scsi_host.h
+
+struct scsi_device
+------------------
+Generally, there is one instance of this structure for each SCSI logical unit
+on a host. Scsi devices connected to a host are uniquely identified by a
+channel number, target id and logical unit number (lun).
+The structure is defined in include/scsi/scsi_device.h
+
+struct scsi_cmnd
+----------------
+Instances of this structure convey SCSI commands to the LLD and responses
+back to the mid level. The SCSI mid level will ensure that no more SCSI
+commands become queued against the LLD than are indicated by
+scsi_change_queue_depth() (or struct Scsi_Host::cmd_per_lun). There will
+be at least one instance of struct scsi_cmnd available for each SCSI device.
+Members of interest:
+
+ cmnd
+ - array containing SCSI command
+ cmnd_len
+ - length (in bytes) of SCSI command
+ sc_data_direction
+ - direction of data transfer in data phase. See
+ "enum dma_data_direction" in include/linux/dma-mapping.h
+ request_bufflen
+ - number of data bytes to transfer (0 if no data phase)
+ use_sg
+ - ==0 -> no scatter gather list, hence transfer data
+ to/from request_buffer
+ - >0 -> scatter gather list (actually an array) in
+ request_buffer with use_sg elements
+ request_buffer
+ - either contains data buffer or scatter gather list
+ depending on the setting of use_sg. Scatter gather
+ elements are defined by 'struct scatterlist' found
+ in include/linux/scatterlist.h .
+ done
+ - function pointer that should be invoked by LLD when the
+ SCSI command is completed (successfully or otherwise).
+ Should only be called by an LLD if the LLD has accepted
+ the command (i.e. queuecommand() returned or will return
+ 0). The LLD may invoke 'done' prior to queuecommand()
+ finishing.
+ result
+ - should be set by LLD prior to calling 'done'. A value
+ of 0 implies a successfully completed command (and all
+ data (if any) has been transferred to or from the SCSI
+ target device). 'result' is a 32 bit unsigned integer that
+ can be viewed as 4 related bytes. The SCSI status value is
+ in the LSB. See include/scsi/scsi.h status_byte(),
+ msg_byte(), host_byte() and driver_byte() macros and
+ related constants.
+ sense_buffer
+ - an array (maximum size: SCSI_SENSE_BUFFERSIZE bytes) that
+ should be written when the SCSI status (LSB of 'result')
+ is set to CHECK_CONDITION (2). When CHECK_CONDITION is
+ set, if the top nibble of sense_buffer[0] has the value 7
+ then the mid level will assume the sense_buffer array
+ contains a valid SCSI sense buffer; otherwise the mid
+ level will issue a REQUEST_SENSE SCSI command to
+ retrieve the sense buffer. The latter strategy is error
+ prone in the presence of command queuing so the LLD should
+ always "auto-sense".
+ device
+ - pointer to scsi_device object that this command is
+ associated with.
+ resid
+ - an LLD should set this signed integer to the requested
+ transfer length (i.e. 'request_bufflen') less the number
+ of bytes that are actually transferred. 'resid' is
+ preset to 0 so an LLD can ignore it if it cannot detect
+ underruns (overruns should be rare). If possible an LLD
+ should set 'resid' prior to invoking 'done'. The most
+ interesting case is data transfers from a SCSI target
+ device (e.g. READs) that underrun.
+ underflow
+ - LLD should place (DID_ERROR << 16) in 'result' if
+ actual number of bytes transferred is less than this
+ figure. Not many LLDs implement this check and some that
+ do just output an error message to the log rather than
+ report a DID_ERROR. Better for an LLD to implement
+ 'resid'.
+
+It is recommended that a LLD set 'resid' on data transfers from a SCSI
+target device (e.g. READs). It is especially important that 'resid' is set
+when such data transfers have sense keys of MEDIUM ERROR and HARDWARE ERROR
+(and possibly RECOVERED ERROR). In these cases if a LLD is in doubt how much
+data has been received then the safest approach is to indicate no bytes have
+been received. For example: to indicate that no valid data has been received
+a LLD might use these helpers::
+
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
+
+where 'SCpnt' is a pointer to a scsi_cmnd object. To indicate only three 512
+bytes blocks has been received 'resid' could be set like this::
+
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt) - (3 * 512));
+
+The scsi_cmnd structure is defined in include/scsi/scsi_cmnd.h
+
+
+Locks
+=====
+Each struct Scsi_Host instance has a spin_lock called struct
+Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in
+hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer
+is initialized to point at default_lock. Thereafter lock and unlock
+operations performed by the mid level use the struct Scsi_Host::host_lock
+pointer. Previously drivers could override the host_lock pointer but
+this is not allowed anymore.
+
+
+Autosense
+=========
+Autosense (or auto-sense) is defined in the SAM-2 document as "the
+automatic return of sense data to the application client coincident
+with the completion of a SCSI command" when a status of CHECK CONDITION
+occurs. LLDs should perform autosense. This should be done when the LLD
+detects a CHECK CONDITION status by either:
+
+ a) instructing the SCSI protocol (e.g. SCSI Parallel Interface (SPI))
+ to perform an extra data in phase on such responses
+ b) or, the LLD issuing a REQUEST SENSE command itself
+
+Either way, when a status of CHECK CONDITION is detected, the mid level
+decides whether the LLD has performed autosense by checking struct
+scsi_cmnd::sense_buffer[0] . If this byte has an upper nibble of 7 (or 0xf)
+then autosense is assumed to have taken place. If it has another value (and
+this byte is initialized to 0 before each command) then the mid level will
+issue a REQUEST SENSE command.
+
+In the presence of queued commands the "nexus" that maintains sense
+buffer data from the command that failed until a following REQUEST SENSE
+may get out of synchronization. This is why it is best for the LLD
+to perform autosense.
+
+
+Changes since lk 2.4 series
+===========================
+io_request_lock has been replaced by several finer grained locks. The lock
+relevant to LLDs is struct Scsi_Host::host_lock and there is
+one per SCSI host.
+
+The older error handling mechanism has been removed. This means the
+LLD interface functions abort() and reset() have been removed.
+The struct scsi_host_template::use_new_eh_code flag has been removed.
+
+In the 2.4 series the SCSI subsystem configuration descriptions were
+aggregated with the configuration descriptions from all other Linux
+subsystems in the Documentation/Configure.help file. In the 2.6 series,
+the SCSI subsystem now has its own (much smaller) drivers/scsi/Kconfig
+file that contains both configuration and help information.
+
+struct SHT has been renamed to struct scsi_host_template.
+
+Addition of the "hotplug initialization model" and many extra functions
+to support it.
+
+
+Credits
+=======
+The following people have contributed to this document:
+
+ - Mike Anderson <andmike at us dot ibm dot com>
+ - James Bottomley <James dot Bottomley at hansenpartnership dot com>
+ - Patrick Mansfield <patmans at us dot ibm dot com>
+ - Christoph Hellwig <hch at infradead dot org>
+ - Doug Ledford <dledford at redhat dot com>
+ - Andries Brouwer <Andries dot Brouwer at cwi dot nl>
+ - Randy Dunlap <rdunlap at xenotime dot net>
+ - Alan Stern <stern at rowland dot harvard dot edu>
+
+
+Douglas Gilbert
+dgilbert at interlog dot com
+
+21st September 2004
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
deleted file mode 100644
index 537f04728487..000000000000
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ /dev/null
@@ -1,1259 +0,0 @@
- Linux Kernel 2.6 series
- SCSI mid_level - lower_level driver interface
- =============================================
-
-Introduction
-============
-This document outlines the interface between the Linux SCSI mid level and
-SCSI lower level drivers. Lower level drivers (LLDs) are variously called
-host bus adapter (HBA) drivers and host drivers (HD). A "host" in this
-context is a bridge between a computer IO bus (e.g. PCI or ISA) and a
-single SCSI initiator port on a SCSI transport. An "initiator" port
-(SCSI terminology, see SAM-3 at http://www.t10.org) sends SCSI commands
-to "target" SCSI ports (e.g. disks). There can be many LLDs in a running
-system, but only one per hardware type. Most LLDs can control one or more
-SCSI HBAs. Some HBAs contain multiple hosts.
-
-In some cases the SCSI transport is an external bus that already has
-its own subsystem in Linux (e.g. USB and ieee1394). In such cases the
-SCSI subsystem LLD is a software bridge to the other driver subsystem.
-Examples are the usb-storage driver (found in the drivers/usb/storage
-directory) and the ieee1394/sbp2 driver (found in the drivers/ieee1394
-directory).
-
-For example, the aic7xxx LLD controls Adaptec SCSI parallel interface
-(SPI) controllers based on that company's 7xxx chip series. The aic7xxx
-LLD can be built into the kernel or loaded as a module. There can only be
-one aic7xxx LLD running in a Linux system but it may be controlling many
-HBAs. These HBAs might be either on PCI daughter-boards or built into
-the motherboard (or both). Some aic7xxx based HBAs are dual controllers
-and thus represent two hosts. Like most modern HBAs, each aic7xxx host
-has its own PCI device address. [The one-to-one correspondence between
-a SCSI host and a PCI device is common but not required (e.g. with
-ISA adapters).]
-
-The SCSI mid level isolates an LLD from other layers such as the SCSI
-upper layer drivers and the block layer.
-
-This version of the document roughly matches linux kernel version 2.6.8 .
-
-Documentation
-=============
-There is a SCSI documentation directory within the kernel source tree,
-typically Documentation/scsi . Most documents are in plain
-(i.e. ASCII) text. This file is named scsi_mid_low_api.txt and can be
-found in that directory. A more recent copy of this document may be found
-at http://web.archive.org/web/20070107183357rn_1/sg.torque.net/scsi/.
-Many LLDs are documented there (e.g. aic7xxx.txt). The SCSI mid-level is
-briefly described in scsi.txt which contains a url to a document
-describing the SCSI subsystem in the lk 2.4 series. Two upper level
-drivers have documents in that directory: st.txt (SCSI tape driver) and
-scsi-generic.txt (for the sg driver).
-
-Some documentation (or urls) for LLDs may be found in the C source code
-or in the same directory as the C source code. For example to find a url
-about the USB mass storage driver see the
-/usr/src/linux/drivers/usb/storage directory.
-
-Driver structure
-================
-Traditionally an LLD for the SCSI subsystem has been at least two files in
-the drivers/scsi directory. For example, a driver called "xyz" has a header
-file "xyz.h" and a source file "xyz.c". [Actually there is no good reason
-why this couldn't all be in one file; the header file is superfluous.] Some
-drivers that have been ported to several operating systems have more than
-two files. For example the aic7xxx driver has separate files for generic
-and OS-specific code (e.g. FreeBSD and Linux). Such drivers tend to have
-their own directory under the drivers/scsi directory.
-
-When a new LLD is being added to Linux, the following files (found in the
-drivers/scsi directory) will need some attention: Makefile and Kconfig .
-It is probably best to study how existing LLDs are organized.
-
-As the 2.5 series development kernels evolve into the 2.6 series
-production series, changes are being introduced into this interface. An
-example of this is driver initialization code where there are now 2 models
-available. The older one, similar to what was found in the lk 2.4 series,
-is based on hosts that are detected at HBA driver load time. This will be
-referred to the "passive" initialization model. The newer model allows HBAs
-to be hot plugged (and unplugged) during the lifetime of the LLD and will
-be referred to as the "hotplug" initialization model. The newer model is
-preferred as it can handle both traditional SCSI equipment that is
-permanently connected as well as modern "SCSI" devices (e.g. USB or
-IEEE 1394 connected digital cameras) that are hotplugged. Both
-initialization models are discussed in the following sections.
-
-An LLD interfaces to the SCSI subsystem several ways:
- a) directly invoking functions supplied by the mid level
- b) passing a set of function pointers to a registration function
- supplied by the mid level. The mid level will then invoke these
- functions at some point in the future. The LLD will supply
- implementations of these functions.
- c) direct access to instances of well known data structures maintained
- by the mid level
-
-Those functions in group a) are listed in a section entitled "Mid level
-supplied functions" below.
-
-Those functions in group b) are listed in a section entitled "Interface
-functions" below. Their function pointers are placed in the members of
-"struct scsi_host_template", an instance of which is passed to
-scsi_host_alloc() ** . Those interface functions that the LLD does not
-wish to supply should have NULL placed in the corresponding member of
-struct scsi_host_template. Defining an instance of struct
-scsi_host_template at file scope will cause NULL to be placed in function
- pointer members not explicitly initialized.
-
-Those usages in group c) should be handled with care, especially in a
-"hotplug" environment. LLDs should be aware of the lifetime of instances
-that are shared with the mid level and other layers.
-
-All functions defined within an LLD and all data defined at file scope
-should be static. For example the slave_alloc() function in an LLD
-called "xxx" could be defined as
-"static int xxx_slave_alloc(struct scsi_device * sdev) { /* code */ }"
-
-** the scsi_host_alloc() function is a replacement for the rather vaguely
-named scsi_register() function in most situations.
-
-
-Hotplug initialization model
-============================
-In this model an LLD controls when SCSI hosts are introduced and removed
-from the SCSI subsystem. Hosts can be introduced as early as driver
-initialization and removed as late as driver shutdown. Typically a driver
-will respond to a sysfs probe() callback that indicates an HBA has been
-detected. After confirming that the new device is one that the LLD wants
-to control, the LLD will initialize the HBA and then register a new host
-with the SCSI mid level.
-
-During LLD initialization the driver should register itself with the
-appropriate IO bus on which it expects to find HBA(s) (e.g. the PCI bus).
-This can probably be done via sysfs. Any driver parameters (especially
-those that are writable after the driver is loaded) could also be
-registered with sysfs at this point. The SCSI mid level first becomes
-aware of an LLD when that LLD registers its first HBA.
-
-At some later time, the LLD becomes aware of an HBA and what follows
-is a typical sequence of calls between the LLD and the mid level.
-This example shows the mid level scanning the newly introduced HBA for 3
-scsi devices of which only the first 2 respond:
-
- HBA PROBE: assume 2 SCSI devices found in scan
-LLD mid level LLD
-===-------------------=========--------------------===------
-scsi_host_alloc() -->
-scsi_add_host() ---->
-scsi_scan_host() -------+
- |
- slave_alloc()
- slave_configure() --> scsi_change_queue_depth()
- |
- slave_alloc()
- slave_configure()
- |
- slave_alloc() ***
- slave_destroy() ***
-------------------------------------------------------------
-
-If the LLD wants to adjust the default queue settings, it can invoke
-scsi_change_queue_depth() in its slave_configure() routine.
-
-*** For scsi devices that the mid level tries to scan but do not
- respond, a slave_alloc(), slave_destroy() pair is called.
-
-When an HBA is being removed it could be as part of an orderly shutdown
-associated with the LLD module being unloaded (e.g. with the "rmmod"
-command) or in response to a "hot unplug" indicated by sysfs()'s
-remove() callback being invoked. In either case, the sequence is the
-same:
-
- HBA REMOVE: assume 2 SCSI devices attached
-LLD mid level LLD
-===----------------------=========-----------------===------
-scsi_remove_host() ---------+
- |
- slave_destroy()
- slave_destroy()
-scsi_host_put()
-------------------------------------------------------------
-
-It may be useful for a LLD to keep track of struct Scsi_Host instances
-(a pointer is returned by scsi_host_alloc()). Such instances are "owned"
-by the mid-level. struct Scsi_Host instances are freed from
-scsi_host_put() when the reference count hits zero.
-
-Hot unplugging an HBA that controls a disk which is processing SCSI
-commands on a mounted file system is an interesting situation. Reference
-counting logic is being introduced into the mid level to cope with many
-of the issues involved. See the section on reference counting below.
-
-
-The hotplug concept may be extended to SCSI devices. Currently, when an
-HBA is added, the scsi_scan_host() function causes a scan for SCSI devices
-attached to the HBA's SCSI transport. On newer SCSI transports the HBA
-may become aware of a new SCSI device _after_ the scan has completed.
-An LLD can use this sequence to make the mid level aware of a SCSI device:
-
- SCSI DEVICE hotplug
-LLD mid level LLD
-===-------------------=========--------------------===------
-scsi_add_device() ------+
- |
- slave_alloc()
- slave_configure() [--> scsi_change_queue_depth()]
-------------------------------------------------------------
-
-In a similar fashion, an LLD may become aware that a SCSI device has been
-removed (unplugged) or the connection to it has been interrupted. Some
-existing SCSI transports (e.g. SPI) may not become aware that a SCSI
-device has been removed until a subsequent SCSI command fails which will
-probably cause that device to be set offline by the mid level. An LLD that
-detects the removal of a SCSI device can instigate its removal from
-upper layers with this sequence:
-
- SCSI DEVICE hot unplug
-LLD mid level LLD
-===----------------------=========-----------------===------
-scsi_remove_device() -------+
- |
- slave_destroy()
-------------------------------------------------------------
-
-It may be useful for an LLD to keep track of struct scsi_device instances
-(a pointer is passed as the parameter to slave_alloc() and
-slave_configure() callbacks). Such instances are "owned" by the mid-level.
-struct scsi_device instances are freed after slave_destroy().
-
-
-Reference Counting
-==================
-The Scsi_Host structure has had reference counting infrastructure added.
-This effectively spreads the ownership of struct Scsi_Host instances
-across the various SCSI layers which use them. Previously such instances
-were exclusively owned by the mid level. LLDs would not usually need to
-directly manipulate these reference counts but there may be some cases
-where they do.
-
-There are 3 reference counting functions of interest associated with
-struct Scsi_Host:
- - scsi_host_alloc(): returns a pointer to new instance of struct
- Scsi_Host which has its reference count ^^ set to 1
- - scsi_host_get(): adds 1 to the reference count of the given instance
- - scsi_host_put(): decrements 1 from the reference count of the given
- instance. If the reference count reaches 0 then the given instance
- is freed
-
-The scsi_device structure has had reference counting infrastructure added.
-This effectively spreads the ownership of struct scsi_device instances
-across the various SCSI layers which use them. Previously such instances
-were exclusively owned by the mid level. See the access functions declared
-towards the end of include/scsi/scsi_device.h . If an LLD wants to keep
-a copy of a pointer to a scsi_device instance it should use scsi_device_get()
-to bump its reference count. When it is finished with the pointer it can
-use scsi_device_put() to decrement its reference count (and potentially
-delete it).
-
-^^ struct Scsi_Host actually has 2 reference counts which are manipulated
-in parallel by these functions.
-
-
-Conventions
-===========
-First, Linus Torvalds's thoughts on C coding style can be found in the
-Documentation/process/coding-style.rst file.
-
-Next, there is a movement to "outlaw" typedefs introducing synonyms for
-struct tags. Both can be still found in the SCSI subsystem, but
-the typedefs have been moved to a single file, scsi_typedefs.h to
-make their future removal easier, for example:
-"typedef struct scsi_cmnd Scsi_Cmnd;"
-
-Also, most C99 enhancements are encouraged to the extent they are supported
-by the relevant gcc compilers. So C99 style structure and array
-initializers are encouraged where appropriate. Don't go too far,
-VLAs are not properly supported yet. An exception to this is the use of
-"//" style comments; /*...*/ comments are still preferred in Linux.
-
-Well written, tested and documented code, need not be re-formatted to
-comply with the above conventions. For example, the aic7xxx driver
-comes to Linux from FreeBSD and Adaptec's own labs. No doubt FreeBSD
-and Adaptec have their own coding conventions.
-
-
-Mid level supplied functions
-============================
-These functions are supplied by the SCSI mid level for use by LLDs.
-The names (i.e. entry points) of these functions are exported
-so an LLD that is a module can access them. The kernel will
-arrange for the SCSI mid level to be loaded and initialized before any LLD
-is initialized. The functions below are listed alphabetically and their
-names all start with "scsi_".
-
-Summary:
- scsi_add_device - creates new scsi device (lu) instance
- scsi_add_host - perform sysfs registration and set up transport class
- scsi_change_queue_depth - change the queue depth on a SCSI device
- scsi_bios_ptable - return copy of block device's partition table
- scsi_block_requests - prevent further commands being queued to given host
- scsi_host_alloc - return a new scsi_host instance whose refcount==1
- scsi_host_get - increments Scsi_Host instance's refcount
- scsi_host_put - decrements Scsi_Host instance's refcount (free if 0)
- scsi_register - create and register a scsi host adapter instance.
- scsi_remove_device - detach and remove a SCSI device
- scsi_remove_host - detach and remove all SCSI devices owned by host
- scsi_report_bus_reset - report scsi _bus_ reset observed
- scsi_scan_host - scan SCSI bus
- scsi_track_queue_full - track successive QUEUE_FULL events
- scsi_unblock_requests - allow further commands to be queued to given host
- scsi_unregister - [calls scsi_host_put()]
-
-
-Details:
-
-/**
- * scsi_add_device - creates new scsi device (lu) instance
- * @shost: pointer to scsi host instance
- * @channel: channel number (rarely other than 0)
- * @id: target id number
- * @lun: logical unit number
- *
- * Returns pointer to new struct scsi_device instance or
- * ERR_PTR(-ENODEV) (or some other bent pointer) if something is
- * wrong (e.g. no lu responds at given address)
- *
- * Might block: yes
- *
- * Notes: This call is usually performed internally during a scsi
- * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
- * should only be called if the HBA becomes aware of a new scsi
- * device (lu) after scsi_scan_host() has completed. If successful
- * this call can lead to slave_alloc() and slave_configure() callbacks
- * into the LLD.
- *
- * Defined in: drivers/scsi/scsi_scan.c
- **/
-struct scsi_device * scsi_add_device(struct Scsi_Host *shost,
- unsigned int channel,
- unsigned int id, unsigned int lun)
-
-
-/**
- * scsi_add_host - perform sysfs registration and set up transport class
- * @shost: pointer to scsi host instance
- * @dev: pointer to struct device of type scsi class
- *
- * Returns 0 on success, negative errno of failure (e.g. -ENOMEM)
- *
- * Might block: no
- *
- * Notes: Only required in "hotplug initialization model" after a
- * successful call to scsi_host_alloc(). This function does not
- * scan the bus; this can be done by calling scsi_scan_host() or
- * in some other transport-specific way. The LLD must set up
- * the transport template before calling this function and may only
- * access the transport class data after this function has been called.
- *
- * Defined in: drivers/scsi/hosts.c
- **/
-int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
-
-
-/**
- * scsi_change_queue_depth - allow LLD to change queue depth on a SCSI device
- * @sdev: pointer to SCSI device to change queue depth on
- * @tags Number of tags allowed if tagged queuing enabled,
- * or number of commands the LLD can queue up
- * in non-tagged mode (as per cmd_per_lun).
- *
- * Returns nothing
- *
- * Might block: no
- *
- * Notes: Can be invoked any time on a SCSI device controlled by this
- * LLD. [Specifically during and after slave_configure() and prior to
- * slave_destroy().] Can safely be invoked from interrupt code.
- *
- * Defined in: drivers/scsi/scsi.c [see source code for more notes]
- *
- **/
-int scsi_change_queue_depth(struct scsi_device *sdev, int tags)
-
-
-/**
- * scsi_bios_ptable - return copy of block device's partition table
- * @dev: pointer to block device
- *
- * Returns pointer to partition table, or NULL for failure
- *
- * Might block: yes
- *
- * Notes: Caller owns memory returned (free with kfree() )
- *
- * Defined in: drivers/scsi/scsicam.c
- **/
-unsigned char *scsi_bios_ptable(struct block_device *dev)
-
-
-/**
- * scsi_block_requests - prevent further commands being queued to given host
- *
- * @shost: pointer to host to block commands on
- *
- * Returns nothing
- *
- * Might block: no
- *
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the LLD calling scsi_unblock_requests().
- *
- * Defined in: drivers/scsi/scsi_lib.c
-**/
-void scsi_block_requests(struct Scsi_Host * shost)
-
-
-/**
- * scsi_host_alloc - create a scsi host adapter instance and perform basic
- * initialization.
- * @sht: pointer to scsi host template
- * @privsize: extra bytes to allocate in hostdata array (which is the
- * last member of the returned Scsi_Host instance)
- *
- * Returns pointer to new Scsi_Host instance or NULL on failure
- *
- * Might block: yes
- *
- * Notes: When this call returns to the LLD, the SCSI bus scan on
- * this host has _not_ yet been done.
- * The hostdata array (by default zero length) is a per host scratch
- * area for the LLD's exclusive use.
- * Both associated refcounting objects have their refcount set to 1.
- * Full registration (in sysfs) and a bus scan are performed later when
- * scsi_add_host() and scsi_scan_host() are called.
- *
- * Defined in: drivers/scsi/hosts.c .
- **/
-struct Scsi_Host * scsi_host_alloc(struct scsi_host_template * sht,
- int privsize)
-
-
-/**
- * scsi_host_get - increment Scsi_Host instance refcount
- * @shost: pointer to struct Scsi_Host instance
- *
- * Returns nothing
- *
- * Might block: currently may block but may be changed to not block
- *
- * Notes: Actually increments the counts in two sub-objects
- *
- * Defined in: drivers/scsi/hosts.c
- **/
-void scsi_host_get(struct Scsi_Host *shost)
-
-
-/**
- * scsi_host_put - decrement Scsi_Host instance refcount, free if 0
- * @shost: pointer to struct Scsi_Host instance
- *
- * Returns nothing
- *
- * Might block: currently may block but may be changed to not block
- *
- * Notes: Actually decrements the counts in two sub-objects. If the
- * latter refcount reaches 0, the Scsi_Host instance is freed.
- * The LLD need not worry exactly when the Scsi_Host instance is
- * freed, it just shouldn't access the instance after it has balanced
- * out its refcount usage.
- *
- * Defined in: drivers/scsi/hosts.c
- **/
-void scsi_host_put(struct Scsi_Host *shost)
-
-
-/**
- * scsi_register - create and register a scsi host adapter instance.
- * @sht: pointer to scsi host template
- * @privsize: extra bytes to allocate in hostdata array (which is the
- * last member of the returned Scsi_Host instance)
- *
- * Returns pointer to new Scsi_Host instance or NULL on failure
- *
- * Might block: yes
- *
- * Notes: When this call returns to the LLD, the SCSI bus scan on
- * this host has _not_ yet been done.
- * The hostdata array (by default zero length) is a per host scratch
- * area for the LLD.
- *
- * Defined in: drivers/scsi/hosts.c .
- **/
-struct Scsi_Host * scsi_register(struct scsi_host_template * sht,
- int privsize)
-
-
-/**
- * scsi_remove_device - detach and remove a SCSI device
- * @sdev: a pointer to a scsi device instance
- *
- * Returns value: 0 on success, -EINVAL if device not attached
- *
- * Might block: yes
- *
- * Notes: If an LLD becomes aware that a scsi device (lu) has
- * been removed but its host is still present then it can request
- * the removal of that scsi device. If successful this call will
- * lead to the slave_destroy() callback being invoked. sdev is an
- * invalid pointer after this call.
- *
- * Defined in: drivers/scsi/scsi_sysfs.c .
- **/
-int scsi_remove_device(struct scsi_device *sdev)
-
-
-/**
- * scsi_remove_host - detach and remove all SCSI devices owned by host
- * @shost: a pointer to a scsi host instance
- *
- * Returns value: 0 on success, 1 on failure (e.g. LLD busy ??)
- *
- * Might block: yes
- *
- * Notes: Should only be invoked if the "hotplug initialization
- * model" is being used. It should be called _prior_ to
- * scsi_unregister().
- *
- * Defined in: drivers/scsi/hosts.c .
- **/
-int scsi_remove_host(struct Scsi_Host *shost)
-
-
-/**
- * scsi_report_bus_reset - report scsi _bus_ reset observed
- * @shost: a pointer to a scsi host involved
- * @channel: channel (within) host on which scsi bus reset occurred
- *
- * Returns nothing
- *
- * Might block: no
- *
- * Notes: This only needs to be called if the reset is one which
- * originates from an unknown location. Resets originated by the
- * mid level itself don't need to call this, but there should be
- * no harm. The main purpose of this is to make sure that a
- * CHECK_CONDITION is properly treated.
- *
- * Defined in: drivers/scsi/scsi_error.c .
- **/
-void scsi_report_bus_reset(struct Scsi_Host * shost, int channel)
-
-
-/**
- * scsi_scan_host - scan SCSI bus
- * @shost: a pointer to a scsi host instance
- *
- * Might block: yes
- *
- * Notes: Should be called after scsi_add_host()
- *
- * Defined in: drivers/scsi/scsi_scan.c
- **/
-void scsi_scan_host(struct Scsi_Host *shost)
-
-
-/**
- * scsi_track_queue_full - track successive QUEUE_FULL events on given
- * device to determine if and when there is a need
- * to adjust the queue depth on the device.
- * @sdev: pointer to SCSI device instance
- * @depth: Current number of outstanding SCSI commands on this device,
- * not counting the one returned as QUEUE_FULL.
- *
- * Returns 0 - no change needed
- * >0 - adjust queue depth to this new depth
- * -1 - drop back to untagged operation using host->cmd_per_lun
- * as the untagged command depth
- *
- * Might block: no
- *
- * Notes: LLDs may call this at any time and we will do "The Right
- * Thing"; interrupt context safe.
- *
- * Defined in: drivers/scsi/scsi.c .
- **/
-int scsi_track_queue_full(struct scsi_device *sdev, int depth)
-
-
-/**
- * scsi_unblock_requests - allow further commands to be queued to given host
- *
- * @shost: pointer to host to unblock commands on
- *
- * Returns nothing
- *
- * Might block: no
- *
- * Defined in: drivers/scsi/scsi_lib.c .
-**/
-void scsi_unblock_requests(struct Scsi_Host * shost)
-
-
-/**
- * scsi_unregister - unregister and free memory used by host instance
- * @shp: pointer to scsi host instance to unregister.
- *
- * Returns nothing
- *
- * Might block: no
- *
- * Notes: Should not be invoked if the "hotplug initialization
- * model" is being used. Called internally by exit_this_scsi_driver()
- * in the "passive initialization model". Hence a LLD has no need to
- * call this function directly.
- *
- * Defined in: drivers/scsi/hosts.c .
- **/
-void scsi_unregister(struct Scsi_Host * shp)
-
-
-
-
-Interface Functions
-===================
-Interface functions are supplied (defined) by LLDs and their function
-pointers are placed in an instance of struct scsi_host_template which
-is passed to scsi_host_alloc() [or scsi_register() / init_this_scsi_driver()].
-Some are mandatory. Interface functions should be declared static. The
-accepted convention is that driver "xyz" will declare its slave_configure()
-function as:
- static int xyz_slave_configure(struct scsi_device * sdev);
-and so forth for all interface functions listed below.
-
-A pointer to this function should be placed in the 'slave_configure' member
-of a "struct scsi_host_template" instance. A pointer to such an instance
-should be passed to the mid level's scsi_host_alloc() [or scsi_register() /
-init_this_scsi_driver()].
-
-The interface functions are also described in the include/scsi/scsi_host.h
-file immediately above their definition point in "struct scsi_host_template".
-In some cases more detail is given in scsi_host.h than below.
-
-The interface functions are listed below in alphabetical order.
-
-Summary:
- bios_param - fetch head, sector, cylinder info for a disk
- eh_timed_out - notify the host that a command timer expired
- eh_abort_handler - abort given command
- eh_bus_reset_handler - issue SCSI bus reset
- eh_device_reset_handler - issue SCSI device reset
- eh_host_reset_handler - reset host (host bus adapter)
- info - supply information about given host
- ioctl - driver can respond to ioctls
- proc_info - supports /proc/scsi/{driver_name}/{host_no}
- queuecommand - queue scsi command, invoke 'done' on completion
- slave_alloc - prior to any commands being sent to a new device
- slave_configure - driver fine tuning for given device after attach
- slave_destroy - given device is about to be shut down
-
-
-Details:
-
-/**
- * bios_param - fetch head, sector, cylinder info for a disk
- * @sdev: pointer to scsi device context (defined in
- * include/scsi/scsi_device.h)
- * @bdev: pointer to block device context (defined in fs.h)
- * @capacity: device size (in 512 byte sectors)
- * @params: three element array to place output:
- * params[0] number of heads (max 255)
- * params[1] number of sectors (max 63)
- * params[2] number of cylinders
- *
- * Return value is ignored
- *
- * Locks: none
- *
- * Calling context: process (sd)
- *
- * Notes: an arbitrary geometry (based on READ CAPACITY) is used
- * if this function is not provided. The params array is
- * pre-initialized with made up values just in case this function
- * doesn't output anything.
- *
- * Optionally defined in: LLD
- **/
- int bios_param(struct scsi_device * sdev, struct block_device *bdev,
- sector_t capacity, int params[3])
-
-
-/**
- * eh_timed_out - The timer for the command has just fired
- * @scp: identifies command timing out
- *
- * Returns:
- *
- * EH_HANDLED: I fixed the error, please complete the command
- * EH_RESET_TIMER: I need more time, reset the timer and
- * begin counting again
- * EH_NOT_HANDLED Begin normal error recovery
- *
- *
- * Locks: None held
- *
- * Calling context: interrupt
- *
- * Notes: This is to give the LLD an opportunity to do local recovery.
- * This recovery is limited to determining if the outstanding command
- * will ever complete. You may not abort and restart the command from
- * this callback.
- *
- * Optionally defined in: LLD
- **/
- int eh_timed_out(struct scsi_cmnd * scp)
-
-
-/**
- * eh_abort_handler - abort command associated with scp
- * @scp: identifies command to be aborted
- *
- * Returns SUCCESS if command aborted else FAILED
- *
- * Locks: None held
- *
- * Calling context: kernel thread
- *
- * Notes: If 'no_async_abort' is defined this callback
- * will be invoked from scsi_eh thread. No other commands
- * will then be queued on current host during eh.
- * Otherwise it will be called whenever scsi_times_out()
- * is called due to a command timeout.
- *
- * Optionally defined in: LLD
- **/
- int eh_abort_handler(struct scsi_cmnd * scp)
-
-
-/**
- * eh_bus_reset_handler - issue SCSI bus reset
- * @scp: SCSI bus that contains this device should be reset
- *
- * Returns SUCCESS if command aborted else FAILED
- *
- * Locks: None held
- *
- * Calling context: kernel thread
- *
- * Notes: Invoked from scsi_eh thread. No other commands will be
- * queued on current host during eh.
- *
- * Optionally defined in: LLD
- **/
- int eh_bus_reset_handler(struct scsi_cmnd * scp)
-
-
-/**
- * eh_device_reset_handler - issue SCSI device reset
- * @scp: identifies SCSI device to be reset
- *
- * Returns SUCCESS if command aborted else FAILED
- *
- * Locks: None held
- *
- * Calling context: kernel thread
- *
- * Notes: Invoked from scsi_eh thread. No other commands will be
- * queued on current host during eh.
- *
- * Optionally defined in: LLD
- **/
- int eh_device_reset_handler(struct scsi_cmnd * scp)
-
-
-/**
- * eh_host_reset_handler - reset host (host bus adapter)
- * @scp: SCSI host that contains this device should be reset
- *
- * Returns SUCCESS if command aborted else FAILED
- *
- * Locks: None held
- *
- * Calling context: kernel thread
- *
- * Notes: Invoked from scsi_eh thread. No other commands will be
- * queued on current host during eh.
- * With the default eh_strategy in place, if none of the _abort_,
- * _device_reset_, _bus_reset_ or this eh handler function are
- * defined (or they all return FAILED) then the device in question
- * will be set offline whenever eh is invoked.
- *
- * Optionally defined in: LLD
- **/
- int eh_host_reset_handler(struct scsi_cmnd * scp)
-
-
-/**
- * info - supply information about given host: driver name plus data
- * to distinguish given host
- * @shp: host to supply information about
- *
- * Return ASCII null terminated string. [This driver is assumed to
- * manage the memory pointed to and maintain it, typically for the
- * lifetime of this host.]
- *
- * Locks: none
- *
- * Calling context: process
- *
- * Notes: Often supplies PCI or ISA information such as IO addresses
- * and interrupt numbers. If not supplied struct Scsi_Host::name used
- * instead. It is assumed the returned information fits on one line
- * (i.e. does not included embedded newlines).
- * The SCSI_IOCTL_PROBE_HOST ioctl yields the string returned by this
- * function (or struct Scsi_Host::name if this function is not
- * available).
- * In a similar manner, init_this_scsi_driver() outputs to the console
- * each host's "info" (or name) for the driver it is registering.
- * Also if proc_info() is not supplied, the output of this function
- * is used instead.
- *
- * Optionally defined in: LLD
- **/
- const char * info(struct Scsi_Host * shp)
-
-
-/**
- * ioctl - driver can respond to ioctls
- * @sdp: device that ioctl was issued for
- * @cmd: ioctl number
- * @arg: pointer to read or write data from. Since it points to
- * user space, should use appropriate kernel functions
- * (e.g. copy_from_user() ). In the Unix style this argument
- * can also be viewed as an unsigned long.
- *
- * Returns negative "errno" value when there is a problem. 0 or a
- * positive value indicates success and is returned to the user space.
- *
- * Locks: none
- *
- * Calling context: process
- *
- * Notes: The SCSI subsystem uses a "trickle down" ioctl model.
- * The user issues an ioctl() against an upper level driver
- * (e.g. /dev/sdc) and if the upper level driver doesn't recognize
- * the 'cmd' then it is passed to the SCSI mid level. If the SCSI
- * mid level does not recognize it, then the LLD that controls
- * the device receives the ioctl. According to recent Unix standards
- * unsupported ioctl() 'cmd' numbers should return -ENOTTY.
- *
- * Optionally defined in: LLD
- **/
- int ioctl(struct scsi_device *sdp, int cmd, void *arg)
-
-
-/**
- * proc_info - supports /proc/scsi/{driver_name}/{host_no}
- * @buffer: anchor point to output to (0==writeto1_read0) or fetch from
- * (1==writeto1_read0).
- * @start: where "interesting" data is written to. Ignored when
- * 1==writeto1_read0.
- * @offset: offset within buffer 0==writeto1_read0 is actually
- * interested in. Ignored when 1==writeto1_read0 .
- * @length: maximum (or actual) extent of buffer
- * @host_no: host number of interest (struct Scsi_Host::host_no)
- * @writeto1_read0: 1 -> data coming from user space towards driver
- * (e.g. "echo some_string > /proc/scsi/xyz/2")
- * 0 -> user what data from this driver
- * (e.g. "cat /proc/scsi/xyz/2")
- *
- * Returns length when 1==writeto1_read0. Otherwise number of chars
- * output to buffer past offset.
- *
- * Locks: none held
- *
- * Calling context: process
- *
- * Notes: Driven from scsi_proc.c which interfaces to proc_fs. proc_fs
- * support can now be configured out of the scsi subsystem.
- *
- * Optionally defined in: LLD
- **/
- int proc_info(char * buffer, char ** start, off_t offset,
- int length, int host_no, int writeto1_read0)
-
-
-/**
- * queuecommand - queue scsi command, invoke scp->scsi_done on completion
- * @shost: pointer to the scsi host object
- * @scp: pointer to scsi command object
- *
- * Returns 0 on success.
- *
- * If there's a failure, return either:
- *
- * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
- * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
- *
- * On both of these returns, the mid-layer will requeue the I/O
- *
- * - if the return is SCSI_MLQUEUE_DEVICE_BUSY, only that particular
- * device will be paused, and it will be unpaused when a command to
- * the device returns (or after a brief delay if there are no more
- * outstanding commands to it). Commands to other devices continue
- * to be processed normally.
- *
- * - if the return is SCSI_MLQUEUE_HOST_BUSY, all I/O to the host
- * is paused and will be unpaused when any command returns from
- * the host (or after a brief delay if there are no outstanding
- * commands to the host).
- *
- * For compatibility with earlier versions of queuecommand, any
- * other return value is treated the same as
- * SCSI_MLQUEUE_HOST_BUSY.
- *
- * Other types of errors that are detected immediately may be
- * flagged by setting scp->result to an appropriate value,
- * invoking the scp->scsi_done callback, and then returning 0
- * from this function. If the command is not performed
- * immediately (and the LLD is starting (or will start) the given
- * command) then this function should place 0 in scp->result and
- * return 0.
- *
- * Command ownership. If the driver returns zero, it owns the
- * command and must take responsibility for ensuring the
- * scp->scsi_done callback is executed. Note: the driver may
- * call scp->scsi_done before returning zero, but after it has
- * called scp->scsi_done, it may not return any value other than
- * zero. If the driver makes a non-zero return, it must not
- * execute the command's scsi_done callback at any time.
- *
- * Locks: up to and including 2.6.36, struct Scsi_Host::host_lock
- * held on entry (with "irqsave") and is expected to be
- * held on return. From 2.6.37 onwards, queuecommand is
- * called without any locks held.
- *
- * Calling context: in interrupt (soft irq) or process context
- *
- * Notes: This function should be relatively fast. Normally it
- * will not wait for IO to complete. Hence the scp->scsi_done
- * callback is invoked (often directly from an interrupt service
- * routine) some time after this function has returned. In some
- * cases (e.g. pseudo adapter drivers that manufacture the
- * response to a SCSI INQUIRY) the scp->scsi_done callback may be
- * invoked before this function returns. If the scp->scsi_done
- * callback is not invoked within a certain period the SCSI mid
- * level will commence error processing. If a status of CHECK
- * CONDITION is placed in "result" when the scp->scsi_done
- * callback is invoked, then the LLD driver should perform
- * autosense and fill in the struct scsi_cmnd::sense_buffer
- * array. The scsi_cmnd::sense_buffer array is zeroed prior to
- * the mid level queuing a command to an LLD.
- *
- * Defined in: LLD
- **/
- int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp)
-
-
-/**
- * slave_alloc - prior to any commands being sent to a new device
- * (i.e. just prior to scan) this call is made
- * @sdp: pointer to new device (about to be scanned)
- *
- * Returns 0 if ok. Any other return is assumed to be an error and
- * the device is ignored.
- *
- * Locks: none
- *
- * Calling context: process
- *
- * Notes: Allows the driver to allocate any resources for a device
- * prior to its initial scan. The corresponding scsi device may not
- * exist but the mid level is just about to scan for it (i.e. send
- * and INQUIRY command plus ...). If a device is found then
- * slave_configure() will be called while if a device is not found
- * slave_destroy() is called.
- * For more details see the include/scsi/scsi_host.h file.
- *
- * Optionally defined in: LLD
- **/
- int slave_alloc(struct scsi_device *sdp)
-
-
-/**
- * slave_configure - driver fine tuning for given device just after it
- * has been first scanned (i.e. it responded to an
- * INQUIRY)
- * @sdp: device that has just been attached
- *
- * Returns 0 if ok. Any other return is assumed to be an error and
- * the device is taken offline. [offline devices will _not_ have
- * slave_destroy() called on them so clean up resources.]
- *
- * Locks: none
- *
- * Calling context: process
- *
- * Notes: Allows the driver to inspect the response to the initial
- * INQUIRY done by the scanning code and take appropriate action.
- * For more details see the include/scsi/scsi_host.h file.
- *
- * Optionally defined in: LLD
- **/
- int slave_configure(struct scsi_device *sdp)
-
-
-/**
- * slave_destroy - given device is about to be shut down. All
- * activity has ceased on this device.
- * @sdp: device that is about to be shut down
- *
- * Returns nothing
- *
- * Locks: none
- *
- * Calling context: process
- *
- * Notes: Mid level structures for given device are still in place
- * but are about to be torn down. Any per device resources allocated
- * by this driver for given device should be freed now. No further
- * commands will be sent for this sdp instance. [However the device
- * could be re-attached in the future in which case a new instance
- * of struct scsi_device would be supplied by future slave_alloc()
- * and slave_configure() calls.]
- *
- * Optionally defined in: LLD
- **/
- void slave_destroy(struct scsi_device *sdp)
-
-
-
-Data Structures
-===============
-struct scsi_host_template
--------------------------
-There is one "struct scsi_host_template" instance per LLD ***. It is
-typically initialized as a file scope static in a driver's header file. That
-way members that are not explicitly initialized will be set to 0 or NULL.
-Member of interest:
- name - name of driver (may contain spaces, please limit to
- less than 80 characters)
- proc_name - name used in "/proc/scsi/<proc_name>/<host_no>" and
- by sysfs in one of its "drivers" directories. Hence
- "proc_name" should only contain characters acceptable
- to a Unix file name.
- (*queuecommand)() - primary callback that the mid level uses to inject
- SCSI commands into an LLD.
-The structure is defined and commented in include/scsi/scsi_host.h
-
-*** In extreme situations a single driver may have several instances
- if it controls several different classes of hardware (e.g. an LLD
- that handles both ISA and PCI cards and has a separate instance of
- struct scsi_host_template for each class).
-
-struct Scsi_Host
-----------------
-There is one struct Scsi_Host instance per host (HBA) that an LLD
-controls. The struct Scsi_Host structure has many members in common
-with "struct scsi_host_template". When a new struct Scsi_Host instance
-is created (in scsi_host_alloc() in hosts.c) those common members are
-initialized from the driver's struct scsi_host_template instance. Members
-of interest:
- host_no - system wide unique number that is used for identifying
- this host. Issued in ascending order from 0.
- can_queue - must be greater than 0; do not send more than can_queue
- commands to the adapter.
- this_id - scsi id of host (scsi initiator) or -1 if not known
- sg_tablesize - maximum scatter gather elements allowed by host.
- Set this to SG_ALL or less to avoid chained SG lists.
- Must be at least 1.
- max_sectors - maximum number of sectors (usually 512 bytes) allowed
- in a single SCSI command. The default value of 0 leads
- to a setting of SCSI_DEFAULT_MAX_SECTORS (defined in
- scsi_host.h) which is currently set to 1024. So for a
- disk the maximum transfer size is 512 KB when max_sectors
- is not defined. Note that this size may not be sufficient
- for disk firmware uploads.
- cmd_per_lun - maximum number of commands that can be queued on devices
- controlled by the host. Overridden by LLD calls to
- scsi_change_queue_depth().
- unchecked_isa_dma - 1=>only use bottom 16 MB of ram (ISA DMA addressing
- restriction), 0=>can use full 32 bit (or better) DMA
- address space
- no_async_abort - 1=>Asynchronous aborts are not supported
- 0=>Timed-out commands will be aborted asynchronously
- hostt - pointer to driver's struct scsi_host_template from which
- this struct Scsi_Host instance was spawned
- hostt->proc_name - name of LLD. This is the driver name that sysfs uses
- transportt - pointer to driver's struct scsi_transport_template instance
- (if any). FC and SPI transports currently supported.
- sh_list - a double linked list of pointers to all struct Scsi_Host
- instances (currently ordered by ascending host_no)
- my_devices - a double linked list of pointers to struct scsi_device
- instances that belong to this host.
- hostdata[0] - area reserved for LLD at end of struct Scsi_Host. Size
- is set by the second argument (named 'xtr_bytes') to
- scsi_host_alloc() or scsi_register().
- vendor_id - a unique value that identifies the vendor supplying
- the LLD for the Scsi_Host. Used most often in validating
- vendor-specific message requests. Value consists of an
- identifier type and a vendor-specific value.
- See scsi_netlink.h for a description of valid formats.
-
-The scsi_host structure is defined in include/scsi/scsi_host.h
-
-struct scsi_device
-------------------
-Generally, there is one instance of this structure for each SCSI logical unit
-on a host. Scsi devices connected to a host are uniquely identified by a
-channel number, target id and logical unit number (lun).
-The structure is defined in include/scsi/scsi_device.h
-
-struct scsi_cmnd
-----------------
-Instances of this structure convey SCSI commands to the LLD and responses
-back to the mid level. The SCSI mid level will ensure that no more SCSI
-commands become queued against the LLD than are indicated by
-scsi_change_queue_depth() (or struct Scsi_Host::cmd_per_lun). There will
-be at least one instance of struct scsi_cmnd available for each SCSI device.
-Members of interest:
- cmnd - array containing SCSI command
- cmnd_len - length (in bytes) of SCSI command
- sc_data_direction - direction of data transfer in data phase. See
- "enum dma_data_direction" in include/linux/dma-mapping.h
- request_bufflen - number of data bytes to transfer (0 if no data phase)
- use_sg - ==0 -> no scatter gather list, hence transfer data
- to/from request_buffer
- - >0 -> scatter gather list (actually an array) in
- request_buffer with use_sg elements
- request_buffer - either contains data buffer or scatter gather list
- depending on the setting of use_sg. Scatter gather
- elements are defined by 'struct scatterlist' found
- in include/linux/scatterlist.h .
- done - function pointer that should be invoked by LLD when the
- SCSI command is completed (successfully or otherwise).
- Should only be called by an LLD if the LLD has accepted
- the command (i.e. queuecommand() returned or will return
- 0). The LLD may invoke 'done' prior to queuecommand()
- finishing.
- result - should be set by LLD prior to calling 'done'. A value
- of 0 implies a successfully completed command (and all
- data (if any) has been transferred to or from the SCSI
- target device). 'result' is a 32 bit unsigned integer that
- can be viewed as 4 related bytes. The SCSI status value is
- in the LSB. See include/scsi/scsi.h status_byte(),
- msg_byte(), host_byte() and driver_byte() macros and
- related constants.
- sense_buffer - an array (maximum size: SCSI_SENSE_BUFFERSIZE bytes) that
- should be written when the SCSI status (LSB of 'result')
- is set to CHECK_CONDITION (2). When CHECK_CONDITION is
- set, if the top nibble of sense_buffer[0] has the value 7
- then the mid level will assume the sense_buffer array
- contains a valid SCSI sense buffer; otherwise the mid
- level will issue a REQUEST_SENSE SCSI command to
- retrieve the sense buffer. The latter strategy is error
- prone in the presence of command queuing so the LLD should
- always "auto-sense".
- device - pointer to scsi_device object that this command is
- associated with.
- resid - an LLD should set this signed integer to the requested
- transfer length (i.e. 'request_bufflen') less the number
- of bytes that are actually transferred. 'resid' is
- preset to 0 so an LLD can ignore it if it cannot detect
- underruns (overruns should be rare). If possible an LLD
- should set 'resid' prior to invoking 'done'. The most
- interesting case is data transfers from a SCSI target
- device (e.g. READs) that underrun.
- underflow - LLD should place (DID_ERROR << 16) in 'result' if
- actual number of bytes transferred is less than this
- figure. Not many LLDs implement this check and some that
- do just output an error message to the log rather than
- report a DID_ERROR. Better for an LLD to implement
- 'resid'.
-
-It is recommended that a LLD set 'resid' on data transfers from a SCSI
-target device (e.g. READs). It is especially important that 'resid' is set
-when such data transfers have sense keys of MEDIUM ERROR and HARDWARE ERROR
-(and possibly RECOVERED ERROR). In these cases if a LLD is in doubt how much
-data has been received then the safest approach is to indicate no bytes have
-been received. For example: to indicate that no valid data has been received
-a LLD might use these helpers:
- scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
-where 'SCpnt' is a pointer to a scsi_cmnd object. To indicate only three 512
-bytes blocks has been received 'resid' could be set like this:
- scsi_set_resid(SCpnt, scsi_bufflen(SCpnt) - (3 * 512));
-
-The scsi_cmnd structure is defined in include/scsi/scsi_cmnd.h
-
-
-Locks
-=====
-Each struct Scsi_Host instance has a spin_lock called struct
-Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in
-hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer
-is initialized to point at default_lock. Thereafter lock and unlock
-operations performed by the mid level use the struct Scsi_Host::host_lock
-pointer. Previously drivers could override the host_lock pointer but
-this is not allowed anymore.
-
-
-Autosense
-=========
-Autosense (or auto-sense) is defined in the SAM-2 document as "the
-automatic return of sense data to the application client coincident
-with the completion of a SCSI command" when a status of CHECK CONDITION
-occurs. LLDs should perform autosense. This should be done when the LLD
-detects a CHECK CONDITION status by either:
- a) instructing the SCSI protocol (e.g. SCSI Parallel Interface (SPI))
- to perform an extra data in phase on such responses
- b) or, the LLD issuing a REQUEST SENSE command itself
-
-Either way, when a status of CHECK CONDITION is detected, the mid level
-decides whether the LLD has performed autosense by checking struct
-scsi_cmnd::sense_buffer[0] . If this byte has an upper nibble of 7 (or 0xf)
-then autosense is assumed to have taken place. If it has another value (and
-this byte is initialized to 0 before each command) then the mid level will
-issue a REQUEST SENSE command.
-
-In the presence of queued commands the "nexus" that maintains sense
-buffer data from the command that failed until a following REQUEST SENSE
-may get out of synchronization. This is why it is best for the LLD
-to perform autosense.
-
-
-Changes since lk 2.4 series
-===========================
-io_request_lock has been replaced by several finer grained locks. The lock
-relevant to LLDs is struct Scsi_Host::host_lock and there is
-one per SCSI host.
-
-The older error handling mechanism has been removed. This means the
-LLD interface functions abort() and reset() have been removed.
-The struct scsi_host_template::use_new_eh_code flag has been removed.
-
-In the 2.4 series the SCSI subsystem configuration descriptions were
-aggregated with the configuration descriptions from all other Linux
-subsystems in the Documentation/Configure.help file. In the 2.6 series,
-the SCSI subsystem now has its own (much smaller) drivers/scsi/Kconfig
-file that contains both configuration and help information.
-
-struct SHT has been renamed to struct scsi_host_template.
-
-Addition of the "hotplug initialization model" and many extra functions
-to support it.
-
-
-Credits
-=======
-The following people have contributed to this document:
- Mike Anderson <andmike at us dot ibm dot com>
- James Bottomley <James dot Bottomley at hansenpartnership dot com>
- Patrick Mansfield <patmans at us dot ibm dot com>
- Christoph Hellwig <hch at infradead dot org>
- Doug Ledford <dledford at redhat dot com>
- Andries Brouwer <Andries dot Brouwer at cwi dot nl>
- Randy Dunlap <rdunlap at xenotime dot net>
- Alan Stern <stern at rowland dot harvard dot edu>
-
-
-Douglas Gilbert
-dgilbert at interlog dot com
-21st September 2004
diff --git a/Documentation/scsi/scsi_transport_srp/Makefile b/Documentation/scsi/scsi_transport_srp/Makefile
deleted file mode 100644
index 5f6b567e955c..000000000000
--- a/Documentation/scsi/scsi_transport_srp/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all: rport_state_diagram.svg rport_state_diagram.png
-
-rport_state_diagram.svg: rport_state_diagram.dot
- dot -Tsvg -o $@ $<
-
-rport_state_diagram.png: rport_state_diagram.dot
- dot -Tpng -o $@ $<
diff --git a/Documentation/scsi/scsi_transport_srp/figures.rst b/Documentation/scsi/scsi_transport_srp/figures.rst
new file mode 100644
index 000000000000..6c8f8dd6301b
--- /dev/null
+++ b/Documentation/scsi/scsi_transport_srp/figures.rst
@@ -0,0 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+SCSI RDMA (SRP) transport class diagram
+=======================================
+
+.. kernel-figure:: rport_state_diagram.dot
diff --git a/Documentation/scsi/sd-parameters.rst b/Documentation/scsi/sd-parameters.rst
new file mode 100644
index 000000000000..87d554008bfb
--- /dev/null
+++ b/Documentation/scsi/sd-parameters.rst
@@ -0,0 +1,27 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
+Linux SCSI Disk Driver (sd) Parameters
+======================================
+
+cache_type (RW)
+---------------
+Enable/disable drive write & read cache.
+
+=========================== === === =========== ==========
+ cache_type string WCE RCD Write cache Read cache
+=========================== === === =========== ==========
+ write through 0 0 off on
+ none 0 1 off off
+ write back 1 0 on on
+ write back, no read (daft) 1 1 on off
+=========================== === === =========== ==========
+
+To set cache type to "write back" and save this setting to the drive::
+
+ # echo "write back" > cache_type
+
+To modify the caching mode without making the change persistent, prepend
+"temporary " to the cache type string. E.g.::
+
+ # echo "temporary write back" > cache_type
diff --git a/Documentation/scsi/sd-parameters.txt b/Documentation/scsi/sd-parameters.txt
deleted file mode 100644
index 8e5af00d88e7..000000000000
--- a/Documentation/scsi/sd-parameters.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Linux SCSI Disk Driver (sd) Parameters
-======================================
-
-cache_type (RW)
----------------
-Enable/disable drive write & read cache.
-
- cache_type string | WCE RCD | Write cache | Read cache
-----------------------------+---------+-------------+------------
- write through | 0 0 | off | on
- none | 0 1 | off | off
- write back | 1 0 | on | on
- write back, no read (daft) | 1 1 | on | off
-
-To set cache type to "write back" and save this setting to the drive:
-
- # echo "write back" > cache_type
-
-To modify the caching mode without making the change persistent, prepend
-"temporary " to the cache type string. E.g.:
-
- # echo "temporary write back" > cache_type
diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.rst
index df129f55ace5..a7de27352c6f 100644
--- a/Documentation/scsi/smartpqi.txt
+++ b/Documentation/scsi/smartpqi.rst
@@ -1,6 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+=====================================
SMARTPQI - Microsemi Smart PQI Driver
------------------------------------------
+=====================================
This file describes the smartpqi SCSI driver for Microsemi
(http://www.microsemi.com) PQI controllers. The smartpqi driver
@@ -16,20 +18,21 @@ For Microsemi smartpqi controller support, enable the smartpqi driver
when configuring the kernel.
For more information on the PQI Queuing Interface, please see:
-http://www.t10.org/drafts.htm
-http://www.t10.org/members/w_pqi2.htm
-Supported devices:
-------------------
+- http://www.t10.org/drafts.htm
+- http://www.t10.org/members/w_pqi2.htm
+
+Supported devices
+=================
<Controller names to be added as they become publicly available.>
smartpqi specific entries in /sys
------------------------------
+=================================
- smartpqi host attributes:
- -------------------------
- /sys/class/scsi_host/host*/rescan
- /sys/class/scsi_host/host*/driver_version
+smartpqi host attributes
+------------------------
+ - /sys/class/scsi_host/host*/rescan
+ - /sys/class/scsi_host/host*/driver_version
The host rescan attribute is a write only attribute. Writing to this
attribute will trigger the driver to scan for new, changed, or removed
@@ -37,12 +40,13 @@ smartpqi specific entries in /sys
The version attribute is read-only and will return the driver version
and the controller firmware version.
- For example:
+ For example::
+
driver: 0.9.13-370
firmware: 0.01-522
- smartpqi sas device attributes
- ------------------------------
+smartpqi sas device attributes
+------------------------------
HBA devices are added to the SAS transport layer. These attributes are
automatically added by the SAS transport layer.
@@ -50,31 +54,25 @@ smartpqi specific entries in /sys
/sys/class/sas_device/end_device-X:X/enclosure_identifier
/sys/class/sas_device/end_device-X:X/scsi_target_id
-smartpqi specific ioctls:
--------------------------
+smartpqi specific ioctls
+========================
For compatibility with applications written for the cciss protocol.
- CCISS_DEREGDISK
- CCISS_REGNEWDISK
- CCISS_REGNEWD
-
- The above three ioctls all do exactly the same thing, which is to cause the driver
- to rescan for new devices. This does exactly the same thing as writing to the
- smartpqi specific host "rescan" attribute.
+ CCISS_DEREGDISK, CCISS_REGNEWDISK, CCISS_REGNEWD
+ The above three ioctls all do exactly the same thing, which is to cause the driver
+ to rescan for new devices. This does exactly the same thing as writing to the
+ smartpqi specific host "rescan" attribute.
CCISS_GETPCIINFO
-
Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
CCISS_GETDRIVVER
+ Returns driver version in three bytes encoded as::
- Returns driver version in three bytes encoded as:
- (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | (DRIVER_RELEASE << 16) | DRIVER_REVISION;
+ (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | (DRIVER_RELEASE << 16) | DRIVER_REVISION;
CCISS_PASSTHRU
-
Allows "BMIC" and "CISS" commands to be passed through to the Smart Storage Array.
These are used extensively by the SSA Array Configuration Utility, SNMP storage
agents, etc.
-
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.rst
index ec0acf6acccd..d3b28c28d74c 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+The SCSI Tape Driver
+====================
+
This file contains brief information about the SCSI tape driver.
The driver is currently maintained by Kai Mäkisara (email
Kai.Makisara@kolumbus.fi)
@@ -5,7 +11,8 @@ Kai.Makisara@kolumbus.fi)
Last modified: Tue Feb 9 21:54:16 2016 by kai.makisara
-BASICS
+Basics
+======
The driver is generic, i.e., it does not contain any code tailored
to any specific tape drive. The tape parameters can be specified with
@@ -110,15 +117,17 @@ tape in the drive (commands trying to write something return error if
attempted).
-MINOR NUMBERS
+Minor Numbers
+=============
The tape driver currently supports up to 2^17 drives if 4 modes for
each drive are used.
-The minor numbers consist of the following bit fields:
+The minor numbers consist of the following bit fields::
+
+ dev_upper non-rew mode dev-lower
+ 20 - 8 7 6 5 4 0
-dev_upper non-rew mode dev-lower
- 20 - 8 7 6 5 4 0
The non-rewind bit is always bit 7 (the uppermost bit in the lowermost
byte). The bits defining the mode are below the non-rewind bit. The
remaining bits define the tape device number. This numbering is
@@ -126,7 +135,8 @@ backward compatible with the numbering used when the minor number was
only 8 bits wide.
-SYSFS SUPPORT
+Sysfs Support
+=============
The driver creates the directory /sys/class/scsi_tape and populates it with
directories corresponding to the existing tape devices. There are autorewind
@@ -148,10 +158,11 @@ bit definitions are the same as those used with MTSETDRVBUFFER in setting the
options.
A link named 'tape' is made from the SCSI device directory to the class
-directory corresponding to the mode 0 auto-rewind device (e.g., st0).
+directory corresponding to the mode 0 auto-rewind device (e.g., st0).
-SYSFS AND STATISTICS FOR TAPE DEVICES
+Sysfs and Statistics for Tape Devices
+=====================================
The st driver maintains statistics for tape drives inside the sysfs filesystem.
The following method can be used to locate the statistics that are
@@ -160,10 +171,10 @@ available (assuming that sysfs is mounted at /sys):
1. Use opendir(3) on the directory /sys/class/scsi_tape
2. Use readdir(3) to read the directory contents
3. Use regcomp(3)/regexec(3) to match directory entries to the extended
- regular expression "^st[0-9]+$"
+ regular expression "^st[0-9]+$"
4. Access the statistics from the /sys/class/scsi_tape/<match>/stats
- directory (where <match> is a directory entry from /sys/class/scsi_tape
- that matched the extended regular expression)
+ directory (where <match> is a directory entry from /sys/class/scsi_tape
+ that matched the extended regular expression)
The reason for using this approach is that all the character devices
pointing to the same tape drive use the same statistics. That means
@@ -171,29 +182,41 @@ that st0 would have the same statistics as nst0.
The directory contains the following statistics files:
-1. in_flight - The number of I/Os currently outstanding to this device.
-2. io_ns - The amount of time spent waiting (in nanoseconds) for all I/O
+1. in_flight
+ - The number of I/Os currently outstanding to this device.
+2. io_ns
+ - The amount of time spent waiting (in nanoseconds) for all I/O
to complete (including read and write). This includes tape movement
commands such as seeking between file or set marks and implicit tape
movement such as when rewind on close tape devices are used.
-3. other_cnt - The number of I/Os issued to the tape drive other than read or
+3. other_cnt
+ - The number of I/Os issued to the tape drive other than read or
write commands. The time taken to complete these commands uses the
following calculation io_ms-read_ms-write_ms.
-4. read_byte_cnt - The number of bytes read from the tape drive.
-5. read_cnt - The number of read requests issued to the tape drive.
-6. read_ns - The amount of time (in nanoseconds) spent waiting for read
+4. read_byte_cnt
+ - The number of bytes read from the tape drive.
+5. read_cnt
+ - The number of read requests issued to the tape drive.
+6. read_ns
+ - The amount of time (in nanoseconds) spent waiting for read
requests to complete.
-7. write_byte_cnt - The number of bytes written to the tape drive.
-8. write_cnt - The number of write requests issued to the tape drive.
-9. write_ns - The amount of time (in nanoseconds) spent waiting for write
+7. write_byte_cnt
+ - The number of bytes written to the tape drive.
+8. write_cnt
+ - The number of write requests issued to the tape drive.
+9. write_ns
+ - The amount of time (in nanoseconds) spent waiting for write
requests to complete.
-10. resid_cnt - The number of times during a read or write we found
+10. resid_cnt
+ - The number of times during a read or write we found
the residual amount to be non-zero. This should mean that a program
is issuing a read larger thean the block size on tape. For write
not all data made it to tape.
-Note: The in_flight value is incremented when an I/O starts the I/O
-itself is not added to the statistics until it completes.
+.. Note::
+
+ The in_flight value is incremented when an I/O starts the I/O
+ itself is not added to the statistics until it completes.
The total of read_cnt, write_cnt, and other_cnt may not total to the same
value as iodone_cnt at the device level. The tape statistics only count
@@ -210,7 +233,8 @@ The value of in_flight is 0 when there are no I/Os outstanding that are
issued by the st driver. Tape statistics do not take into account any
I/O performed via the sg device.
-BSD AND SYS V SEMANTICS
+BSD and Sys V Semantics
+=======================
The user can choose between these two behaviours of the tape driver by
defining the value of the symbol ST_SYSV. The semantics differ when a
@@ -221,13 +245,15 @@ filemark unless the filemark has just been crossed.
The default is BSD semantics.
-BUFFERING
+Buffering
+=========
The driver tries to do transfers directly to/from user space. If this
is not possible, a driver buffer allocated at run-time is used. If
direct i/o is not possible for the whole transfer, the driver buffer
is used (i.e., bounce buffers for individual pages are not
used). Direct i/o can be impossible because of several reasons, e.g.:
+
- one or more pages are at addresses not reachable by the HBA
- the number of pages in the transfer exceeds the number of
scatter/gather segments permitted by the HBA
@@ -269,28 +295,30 @@ in the physical memory) are used if contiguous buffers can't be
allocated. To support all SCSI adapters (including those not
supporting scatter/gather), buffer allocation is using the following
three kinds of chunks:
+
1. The initial segment that is used for all SCSI adapters including
-those not supporting scatter/gather. The size of this buffer will be
-(PAGE_SIZE << ST_FIRST_ORDER) bytes if the system can give a chunk of
-this size (and it is not larger than the buffer size specified by
-ST_BUFFER_BLOCKS). If this size is not available, the driver halves
-the size and tries again until the size of one page. The default
-settings in st_options.h make the driver to try to allocate all of the
-buffer as one chunk.
+ those not supporting scatter/gather. The size of this buffer will be
+ (PAGE_SIZE << ST_FIRST_ORDER) bytes if the system can give a chunk of
+ this size (and it is not larger than the buffer size specified by
+ ST_BUFFER_BLOCKS). If this size is not available, the driver halves
+ the size and tries again until the size of one page. The default
+ settings in st_options.h make the driver to try to allocate all of the
+ buffer as one chunk.
2. The scatter/gather segments to fill the specified buffer size are
-allocated so that as many segments as possible are used but the number
-of segments does not exceed ST_FIRST_SG.
+ allocated so that as many segments as possible are used but the number
+ of segments does not exceed ST_FIRST_SG.
3. The remaining segments between ST_MAX_SG (or the module parameter
-max_sg_segs) and the number of segments used in phases 1 and 2
-are used to extend the buffer at run-time if this is necessary. The
-number of scatter/gather segments allowed for the SCSI adapter is not
-exceeded if it is smaller than the maximum number of scatter/gather
-segments specified. If the maximum number allowed for the SCSI adapter
-is smaller than the number of segments used in phases 1 and 2,
-extending the buffer will always fail.
+ max_sg_segs) and the number of segments used in phases 1 and 2
+ are used to extend the buffer at run-time if this is necessary. The
+ number of scatter/gather segments allowed for the SCSI adapter is not
+ exceeded if it is smaller than the maximum number of scatter/gather
+ segments specified. If the maximum number allowed for the SCSI adapter
+ is smaller than the number of segments used in phases 1 and 2,
+ extending the buffer will always fail.
-EOM BEHAVIOUR WHEN WRITING
+EOM Behaviour When Writing
+==========================
When the end of medium early warning is encountered, the current write
is finished and the number of bytes is returned. The next write
@@ -300,12 +328,13 @@ bytes is returned. After this, -1 and the number of bytes are
alternately returned until the physical end of medium (or some other
error) is encountered.
-
-MODULE PARAMETERS
+Module Parameters
+=================
The buffer size, write threshold, and the maximum number of allocated buffers
are configurable when the driver is loaded as a module. The keywords are:
+========================== ===========================================
buffer_kbs=xxx the buffer size for fixed block mode is set
to xxx kilobytes
write_threshold_kbs=xxx the write threshold in kilobytes set to xxx
@@ -313,12 +342,14 @@ max_sg_segs=xxx the maximum number of scatter/gather
segments
try_direct_io=x try direct transfer between user buffer and
tape drive if this is non-zero
+========================== ===========================================
Note that if the buffer size is changed but the write threshold is not
set, the write threshold is set to the new buffer size - 2 kB.
-BOOT TIME CONFIGURATION
+Boot Time Configuration
+=======================
If the driver is compiled into the kernel, the same parameters can be
also set using, e.g., the LILO command line. The preferred syntax is
@@ -332,21 +363,23 @@ versions is supported. The same keywords can be used as when loading
the driver as module. If several parameters are set, the keyword-value
pairs are separated with a comma (no spaces allowed). A colon can be
used instead of the equal mark. The definition is prepended by the
-string st=. Here is an example:
+string st=. Here is an example::
st=buffer_kbs:64,write_threshold_kbs:60
-The following syntax used by the old kernel versions is also supported:
+The following syntax used by the old kernel versions is also supported::
st=aa[,bb[,dd]]
-where
- aa is the buffer size for fixed block mode in 1024 byte units
- bb is the write threshold in 1024 byte units
- dd is the maximum number of scatter/gather segments
+where:
+ - aa is the buffer size for fixed block mode in 1024 byte units
+ - bb is the write threshold in 1024 byte units
+ - dd is the maximum number of scatter/gather segments
-IOCTLS
+
+IOCTLs
+======
The tape is positioned and the drive parameters are set with ioctls
defined in mtio.h The tape control program 'mt' uses these ioctls. Try
@@ -359,55 +392,80 @@ The supported ioctls are:
The following use the structure mtop:
-MTFSF Space forward over count filemarks. Tape positioned after filemark.
-MTFSFM As above but tape positioned before filemark.
-MTBSF Space backward over count filemarks. Tape positioned before
+MTFSF
+ Space forward over count filemarks. Tape positioned after filemark.
+MTFSFM
+ As above but tape positioned before filemark.
+MTBSF
+ Space backward over count filemarks. Tape positioned before
filemark.
-MTBSFM As above but ape positioned after filemark.
-MTFSR Space forward over count records.
-MTBSR Space backward over count records.
-MTFSS Space forward over count setmarks.
-MTBSS Space backward over count setmarks.
-MTWEOF Write count filemarks.
-MTWEOFI Write count filemarks with immediate bit set (i.e., does not
+MTBSFM
+ As above but ape positioned after filemark.
+MTFSR
+ Space forward over count records.
+MTBSR
+ Space backward over count records.
+MTFSS
+ Space forward over count setmarks.
+MTBSS
+ Space backward over count setmarks.
+MTWEOF
+ Write count filemarks.
+MTWEOFI
+ Write count filemarks with immediate bit set (i.e., does not
wait until data is on tape)
-MTWSM Write count setmarks.
-MTREW Rewind tape.
-MTOFFL Set device off line (often rewind plus eject).
-MTNOP Do nothing except flush the buffers.
-MTRETEN Re-tension tape.
-MTEOM Space to end of recorded data.
-MTERASE Erase tape. If the argument is zero, the short erase command
+MTWSM
+ Write count setmarks.
+MTREW
+ Rewind tape.
+MTOFFL
+ Set device off line (often rewind plus eject).
+MTNOP
+ Do nothing except flush the buffers.
+MTRETEN
+ Re-tension tape.
+MTEOM
+ Space to end of recorded data.
+MTERASE
+ Erase tape. If the argument is zero, the short erase command
is used. The long erase command is used with all other values
of the argument.
-MTSEEK Seek to tape block count. Uses Tandberg-compatible seek (QFA)
+MTSEEK
+ Seek to tape block count. Uses Tandberg-compatible seek (QFA)
for SCSI-1 drives and SCSI-2 seek for SCSI-2 drives. The file and
block numbers in the status are not valid after a seek.
-MTSETBLK Set the drive block size. Setting to zero sets the drive into
+MTSETBLK
+ Set the drive block size. Setting to zero sets the drive into
variable block mode (if applicable).
-MTSETDENSITY Sets the drive density code to arg. See drive
+MTSETDENSITY
+ Sets the drive density code to arg. See drive
documentation for available codes.
-MTLOCK and MTUNLOCK Explicitly lock/unlock the tape drive door.
-MTLOAD and MTUNLOAD Explicitly load and unload the tape. If the
+MTLOCK and MTUNLOCK
+ Explicitly lock/unlock the tape drive door.
+MTLOAD and MTUNLOAD
+ Explicitly load and unload the tape. If the
command argument x is between MT_ST_HPLOADER_OFFSET + 1 and
MT_ST_HPLOADER_OFFSET + 6, the number x is used sent to the
drive with the command and it selects the tape slot to use of
HP C1553A changer.
-MTCOMPRESSION Sets compressing or uncompressing drive mode using the
+MTCOMPRESSION
+ Sets compressing or uncompressing drive mode using the
SCSI mode page 15. Note that some drives other methods for
control of compression. Some drives (like the Exabytes) use
density codes for compression control. Some drives use another
mode page but this page has not been implemented in the
driver. Some drives without compression capability will accept
any compression mode without error.
-MTSETPART Moves the tape to the partition given by the argument at the
+MTSETPART
+ Moves the tape to the partition given by the argument at the
next tape operation. The block at which the tape is positioned
is the block where the tape was previously positioned in the
new active partition unless the next tape operation is
MTSEEK. In this case the tape is moved directly to the block
specified by MTSEEK. MTSETPART is inactive unless
MT_ST_CAN_PARTITIONS set.
-MTMKPART Formats the tape with one partition (argument zero) or two
+MTMKPART
+ Formats the tape with one partition (argument zero) or two
partitions (argument non-zero). If the argument is positive,
it specifies the size of partition 1 in megabytes. For DDS
drives and several early drives this is the physically first
@@ -422,64 +480,81 @@ MTSETDRVBUFFER
with mask MT_SET_OPTIONS, the low order bits are used as argument.
This command is only allowed for the superuser (root). The
subcommands are:
- 0
+
+ * 0
The drive buffer option is set to the argument. Zero means
no buffering.
- MT_ST_BOOLEANS
+ * MT_ST_BOOLEANS
Sets the buffering options. The bits are the new states
(enabled/disabled) the following options (in the
parenthesis is specified whether the option is global or
can be specified differently for each mode):
- MT_ST_BUFFER_WRITES write buffering (mode)
- MT_ST_ASYNC_WRITES asynchronous writes (mode)
- MT_ST_READ_AHEAD read ahead (mode)
- MT_ST_TWO_FM writing of two filemarks (global)
- MT_ST_FAST_EOM using the SCSI spacing to EOD (global)
- MT_ST_AUTO_LOCK automatic locking of the drive door (global)
- MT_ST_DEF_WRITES the defaults are meant only for writes (mode)
- MT_ST_CAN_BSR backspacing over more than one records can
+
+ MT_ST_BUFFER_WRITES
+ write buffering (mode)
+ MT_ST_ASYNC_WRITES
+ asynchronous writes (mode)
+ MT_ST_READ_AHEAD
+ read ahead (mode)
+ MT_ST_TWO_FM
+ writing of two filemarks (global)
+ MT_ST_FAST_EOM
+ using the SCSI spacing to EOD (global)
+ MT_ST_AUTO_LOCK
+ automatic locking of the drive door (global)
+ MT_ST_DEF_WRITES
+ the defaults are meant only for writes (mode)
+ MT_ST_CAN_BSR
+ backspacing over more than one records can
be used for repositioning the tape (global)
- MT_ST_NO_BLKLIMS the driver does not ask the block limits
+ MT_ST_NO_BLKLIMS
+ the driver does not ask the block limits
from the drive (block size can be changed only to
variable) (global)
- MT_ST_CAN_PARTITIONS enables support for partitioned
+ MT_ST_CAN_PARTITIONS
+ enables support for partitioned
tapes (global)
- MT_ST_SCSI2LOGICAL the logical block number is used in
+ MT_ST_SCSI2LOGICAL
+ the logical block number is used in
the MTSEEK and MTIOCPOS for SCSI-2 drives instead of
the device dependent address. It is recommended to set
this flag unless there are tapes using the device
dependent (from the old times) (global)
- MT_ST_SYSV sets the SYSV semantics (mode)
- MT_ST_NOWAIT enables immediate mode (i.e., don't wait for
+ MT_ST_SYSV
+ sets the SYSV semantics (mode)
+ MT_ST_NOWAIT
+ enables immediate mode (i.e., don't wait for
the command to finish) for some commands (e.g., rewind)
- MT_ST_NOWAIT_EOF enables immediate filemark mode (i.e. when
+ MT_ST_NOWAIT_EOF
+ enables immediate filemark mode (i.e. when
writing a filemark, don't wait for it to complete). Please
see the BASICS note about MTWEOFI with respect to the
possible dangers of writing immediate filemarks.
- MT_ST_SILI enables setting the SILI bit in SCSI commands when
+ MT_ST_SILI
+ enables setting the SILI bit in SCSI commands when
reading in variable block mode to enhance performance when
reading blocks shorter than the byte count; set this only
if you are sure that the drive supports SILI and the HBA
correctly returns transfer residuals
- MT_ST_DEBUGGING debugging (global; debugging must be
+ MT_ST_DEBUGGING
+ debugging (global; debugging must be
compiled into the driver)
- MT_ST_SETBOOLEANS
- MT_ST_CLEARBOOLEANS
+
+ * MT_ST_SETBOOLEANS, MT_ST_CLEARBOOLEANS
Sets or clears the option bits.
- MT_ST_WRITE_THRESHOLD
+ * MT_ST_WRITE_THRESHOLD
Sets the write threshold for this device to kilobytes
specified by the lowest bits.
- MT_ST_DEF_BLKSIZE
+ * MT_ST_DEF_BLKSIZE
Defines the default block size set automatically. Value
0xffffff means that the default is not used any more.
- MT_ST_DEF_DENSITY
- MT_ST_DEF_DRVBUFFER
+ * MT_ST_DEF_DENSITY, MT_ST_DEF_DRVBUFFER
Used to set or clear the density (8 bits), and drive buffer
state (3 bits). If the value is MT_ST_CLEAR_DEFAULT
(0xfffff) the default will not be used any more. Otherwise
the lowermost bits of the value contain the new value of
the parameter.
- MT_ST_DEF_COMPRESSION
+ * MT_ST_DEF_COMPRESSION
The compression default will not be used if the value of
the lowermost byte is 0xff. Otherwise the lowermost bit
contains the new default. If the bits 8-15 are set to a
@@ -487,17 +562,17 @@ MTSETDRVBUFFER
used as the compression algorithm. The value
MT_ST_CLEAR_DEFAULT can be used to clear the compression
default.
- MT_ST_SET_TIMEOUT
+ * MT_ST_SET_TIMEOUT
Set the normal timeout in seconds for this device. The
default is 900 seconds (15 minutes). The timeout should be
long enough for the retries done by the device while
reading/writing.
- MT_ST_SET_LONG_TIMEOUT
+ * MT_ST_SET_LONG_TIMEOUT
Set the long timeout that is used for operations that are
known to take a long time. The default is 14000 seconds
(3.9 hours). For erase this value is further multiplied by
eight.
- MT_ST_SET_CLN
+ * MT_ST_SET_CLN
Set the cleaning request interpretation parameters using
the lowest 24 bits of the argument. The driver can set the
generic status bit GMT_CLN if a cleaning request bit pattern
@@ -506,7 +581,7 @@ MTSETDRVBUFFER
cleaning. The bits are device-dependent. The driver is
given the number of the sense data byte (the lowest eight
bits of the argument; must be >= 18 (values 1 - 17
- reserved) and <= the maximum requested sense data sixe),
+ reserved) and <= the maximum requested sense data sixe),
a mask to select the relevant bits (the bits 9-16), and the
bit pattern (bits 17-23). If the bit pattern is zero, one
or more bits under the mask indicate cleaning request. If
@@ -518,12 +593,16 @@ MTSETDRVBUFFER
MT_ST_SET_CLN.)
The following ioctl uses the structure mtpos:
-MTIOCPOS Reads the current position from the drive. Uses
+
+MTIOCPOS
+ Reads the current position from the drive. Uses
Tandberg-compatible QFA for SCSI-1 drives and the SCSI-2
command for the SCSI-2 drives.
The following ioctl uses the structure mtget to return the status:
-MTIOCGET Returns some status information.
+
+MTIOCGET
+ Returns some status information.
The file number and block number within file are returned. The
block is -1 when it can't be determined (e.g., after MTBSF).
The drive type is either MTISSCSI1 or MTISSCSI2.
@@ -537,7 +616,8 @@ MTIOCGET Returns some status information.
end of recorded data or end of tape. GMT_EOT means end of tape.
-MISCELLANEOUS COMPILE OPTIONS
+Miscellaneous Compile Options
+=============================
The recovered write errors are considered fatal if ST_RECOVERED_WRITE_FATAL
is defined.
@@ -568,7 +648,8 @@ time or the MT_ST_CAN_BSR bit is set for the drive with an ioctl.
user does not request data that far.)
-DEBUGGING HINTS
+Debugging Hints
+===============
Debugging code is now compiled in by default but debugging is turned off
with the kernel module parameter debug_flag defaulting to 0. Debugging
diff --git a/Documentation/scsi/sym53c500_cs.txt b/Documentation/scsi/sym53c500_cs.rst
index 75febcf9298c..55464861bbd5 100644
--- a/Documentation/scsi/sym53c500_cs.txt
+++ b/Documentation/scsi/sym53c500_cs.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+The sym53c500_cs Driver
+=======================
+
The sym53c500_cs driver originated as an add-on to David Hinds' pcmcia-cs
package, and was written by Tom Corner (tcorner@via.at). A rewrite was
long overdue, and the current version addresses the following concerns:
@@ -20,4 +26,4 @@ Through the years, there have been a number of downloads of the pcmcia-cs
version of this driver, and I guess it worked for those users. It worked
for Tom Corner, and it works for me. Your mileage will probably vary.
---Bob Tracy (rct@frus.com)
+Bob Tracy (rct@frus.com)
diff --git a/Documentation/scsi/sym53c8xx_2.txt b/Documentation/scsi/sym53c8xx_2.rst
index d28186553fb0..8de44a7baa9b 100644
--- a/Documentation/scsi/sym53c8xx_2.txt
+++ b/Documentation/scsi/sym53c8xx_2.rst
@@ -1,99 +1,111 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================================
The Linux SYM-2 driver documentation file
+=========================================
Written by Gerard Roudier <groudier@free.fr>
+
21 Rue Carnot
+
95170 DEUIL LA BARRE - FRANCE
Updated by Matthew Wilcox <matthew@wil.cx>
2004-10-09
-===============================================================================
-
-1. Introduction
-2. Supported chips and SCSI features
-3. Advantages of this driver for newer chips.
- 3.1 Optimized SCSI SCRIPTS
- 3.2 New features appeared with the SYM53C896
-4. Memory mapped I/O versus normal I/O
-5. Tagged command queueing
-6. Parity checking
-7. Profiling information
-8. Control commands
- 8.1 Set minimum synchronous period
- 8.2 Set wide size
- 8.3 Set maximum number of concurrent tagged commands
- 8.4 Set debug mode
- 8.5 Set flag (no_disc)
- 8.6 Set verbose level
- 8.7 Reset all logical units of a target
- 8.8 Abort all tasks of all logical units of a target
-9. Configuration parameters
-10. Boot setup commands
- 10.1 Syntax
- 10.2 Available arguments
- 10.2.1 Default number of tagged commands
- 10.2.2 Burst max
- 10.2.3 LED support
- 10.2.4 Differential mode
- 10.2.5 IRQ mode
- 10.2.6 Check SCSI BUS
- 10.2.7 Suggest a default SCSI id for hosts
- 10.2.8 Verbosity level
- 10.2.9 Debug mode
- 10.2.10 Settle delay
- 10.2.11 Serial NVRAM
- 10.2.12 Exclude a host from being attached
- 10.3 Converting from old options
- 10.4 SCSI BUS checking boot option
-11. SCSI problem troubleshooting
- 15.1 Problem tracking
- 15.2 Understanding hardware error reports
-12. Serial NVRAM support (by Richard Waltham)
- 17.1 Features
- 17.2 Symbios NVRAM layout
- 17.3 Tekram NVRAM layout
-
-===============================================================================
+
+.. Contents
+
+ 1. Introduction
+ 2. Supported chips and SCSI features
+ 3. Advantages of this driver for newer chips.
+ 3.1 Optimized SCSI SCRIPTS
+ 3.2 New features appeared with the SYM53C896
+ 4. Memory mapped I/O versus normal I/O
+ 5. Tagged command queueing
+ 6. Parity checking
+ 7. Profiling information
+ 8. Control commands
+ 8.1 Set minimum synchronous period
+ 8.2 Set wide size
+ 8.3 Set maximum number of concurrent tagged commands
+ 8.4 Set debug mode
+ 8.5 Set flag (no_disc)
+ 8.6 Set verbose level
+ 8.7 Reset all logical units of a target
+ 8.8 Abort all tasks of all logical units of a target
+ 9. Configuration parameters
+ 10. Boot setup commands
+ 10.1 Syntax
+ 10.2 Available arguments
+ 10.2.1 Default number of tagged commands
+ 10.2.2 Burst max
+ 10.2.3 LED support
+ 10.2.4 Differential mode
+ 10.2.5 IRQ mode
+ 10.2.6 Check SCSI BUS
+ 10.2.7 Suggest a default SCSI id for hosts
+ 10.2.8 Verbosity level
+ 10.2.9 Debug mode
+ 10.2.10 Settle delay
+ 10.2.11 Serial NVRAM
+ 10.2.12 Exclude a host from being attached
+ 10.3 Converting from old options
+ 10.4 SCSI BUS checking boot option
+ 11. SCSI problem troubleshooting
+ 15.1 Problem tracking
+ 15.2 Understanding hardware error reports
+ 12. Serial NVRAM support (by Richard Waltham)
+ 17.1 Features
+ 17.2 Symbios NVRAM layout
+ 17.3 Tekram NVRAM layout
+
1. Introduction
+===============
This driver supports the whole SYM53C8XX family of PCI-SCSI controllers.
-It also support the subset of LSI53C10XX PCI-SCSI controllers that are based
+It also support the subset of LSI53C10XX PCI-SCSI controllers that are based
on the SYM53C8XX SCRIPTS language.
-It replaces the sym53c8xx+ncr53c8xx driver bundle and shares its core code
-with the FreeBSD SYM-2 driver. The `glue' that allows this driver to work
+It replaces the sym53c8xx+ncr53c8xx driver bundle and shares its core code
+with the FreeBSD SYM-2 driver. The 'glue' that allows this driver to work
under Linux is contained in 2 files named sym_glue.h and sym_glue.c.
-Other drivers files are intended not to depend on the Operating System
+Other drivers files are intended not to depend on the Operating System
on which the driver is used.
The history of this driver can be summarized as follows:
1993: ncr driver written for 386bsd and FreeBSD by:
- Wolfgang Stanglmeier <wolf@cologne.de>
- Stefan Esser <se@mi.Uni-Koeln.de>
+
+ - Wolfgang Stanglmeier <wolf@cologne.de>
+ - Stefan Esser <se@mi.Uni-Koeln.de>
1996: port of the ncr driver to Linux-1.2.13 and rename it ncr53c8xx.
- Gerard Roudier
-1998: new sym53c8xx driver for Linux based on LOAD/STORE instruction and that
+ - Gerard Roudier
+
+1998: new sym53c8xx driver for Linux based on LOAD/STORE instruction and that
adds full support for the 896 but drops support for early NCR devices.
- Gerard Roudier
-1999: port of the sym53c8xx driver to FreeBSD and support for the LSI53C1010
- 33 MHz and 66MHz Ultra-3 controllers. The new driver is named `sym'.
- Gerard Roudier
+ - Gerard Roudier
-2000: Add support for early NCR devices to FreeBSD `sym' driver.
- Break the driver into several sources and separate the OS glue
+1999: port of the sym53c8xx driver to FreeBSD and support for the LSI53C1010
+ 33 MHz and 66MHz Ultra-3 controllers. The new driver is named 'sym'.
+
+ - Gerard Roudier
+
+2000: Add support for early NCR devices to FreeBSD 'sym' driver.
+ Break the driver into several sources and separate the OS glue
code from the core code that can be shared among different O/Ses.
Write a glue code for Linux.
- Gerard Roudier
+
+ - Gerard Roudier
2004: Remove FreeBSD compatibility code. Remove support for versions of
Linux before 2.6. Start using Linux facilities.
-This README file addresses the Linux version of the driver. Under FreeBSD,
+This README file addresses the Linux version of the driver. Under FreeBSD,
the driver documentation is the sym.8 man page.
Information about new chips is available at LSILOGIC web server:
@@ -104,113 +116,145 @@ SCSI standard documentations are available at T10 site:
http://www.t10.org/
-Useful SCSI tools written by Eric Youngdale are part of most Linux
+Useful SCSI tools written by Eric Youngdale are part of most Linux
distributions:
- scsiinfo: command line tool
- scsi-config: TCL/Tk tool using scsiinfo
+
+ ============ ==========================
+ scsiinfo command line tool
+ scsi-config TCL/Tk tool using scsiinfo
+ ============ ==========================
2. Supported chips and SCSI features
+====================================
The following features are supported for all chips:
- Synchronous negotiation
- Disconnection
- Tagged command queuing
- SCSI parity checking
- PCI Master parity checking
+ - Synchronous negotiation
+ - Disconnection
+ - Tagged command queuing
+ - SCSI parity checking
+ - PCI Master parity checking
Other features depends on chip capabilities.
-The driver notably uses optimized SCRIPTS for devices that support
-LOAD/STORE and handles PHASE MISMATCH from SCRIPTS for devices that
+
+The driver notably uses optimized SCRIPTS for devices that support
+LOAD/STORE and handles PHASE MISMATCH from SCRIPTS for devices that
support the corresponding feature.
The following table shows some characteristics of the chip family.
- On board LOAD/STORE HARDWARE
-Chip SDMS BIOS Wide SCSI std. Max. sync SCRIPTS PHASE MISMATCH
----- --------- ---- --------- ---------- ---------- --------------
-810 N N FAST10 10 MB/s N N
-810A N N FAST10 10 MB/s Y N
-815 Y N FAST10 10 MB/s N N
-825 Y Y FAST10 20 MB/s N N
-825A Y Y FAST10 20 MB/s Y N
-860 N N FAST20 20 MB/s Y N
-875 Y Y FAST20 40 MB/s Y N
-875A Y Y FAST20 40 MB/s Y Y
-876 Y Y FAST20 40 MB/s Y N
-895 Y Y FAST40 80 MB/s Y N
-895A Y Y FAST40 80 MB/s Y Y
-896 Y Y FAST40 80 MB/s Y Y
-897 Y Y FAST40 80 MB/s Y Y
-1510D Y Y FAST40 80 MB/s Y Y
-1010 Y Y FAST80 160 MB/s Y Y
-1010_66* Y Y FAST80 160 MB/s Y Y
-
-* Chip supports 33MHz and 66MHz PCI bus clock.
++--------+-----------+-----+-----------+------------+------------+---------+
+| | | | | |Load/store |Hardware |
+| |On board | | | |scripts |phase |
+|Chip |SDMS BIOS |Wide |SCSI std. | Max. sync | |mismatch |
++--------+-----------+-----+-----------+------------+------------+---------+
+|810 | N | N | FAST10 | 10 MB/s | N | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|810A | N | N | FAST10 | 10 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|815 | Y | N | FAST10 | 10 MB/s | N | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|825 | Y | Y | FAST10 | 20 MB/s | N | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|825A | Y | Y | FAST10 | 20 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|860 | N | N | FAST20 | 20 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|875 | Y | Y | FAST20 | 40 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|875A | Y | Y | FAST20 | 40 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+---------+
+|876 | Y | Y | FAST20 | 40 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|895 | Y | Y | FAST40 | 80 MB/s | Y | N |
++--------+-----------+-----+-----------+------------+------------+---------+
+|895A | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+---------+
+|896 | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+---------+
+|897 | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+---------+
+|1510D | Y | Y | FAST40 | 80 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+---------+
+|1010 | Y | Y | FAST80 |160 MB/s | Y | Y |
++--------+-----------+-----+-----------+------------+------------+---------+
+|1010_66 | Y | Y | FAST80 |160 MB/s | Y | Y |
+|[1]_ | | | | | | |
++--------+-----------+-----+-----------+------------+------------+---------+
+
+.. [1] Chip supports 33MHz and 66MHz PCI bus clock.
Summary of other supported features:
-Module: allow to load the driver
-Memory mapped I/O: increases performance
-Control commands: write operations to the proc SCSI file system
-Debugging information: written to syslog (expert only)
-Scatter / gather
-Shared interrupt
-Boot setup commands
-Serial NVRAM: Symbios and Tekram formats
+:Module: allow to load the driver
+:Memory mapped I/O: increases performance
+:Control commands: write operations to the proc SCSI file system
+:Debugging information: written to syslog (expert only)
+:Serial NVRAM: Symbios and Tekram formats
+
+- Scatter / gather
+- Shared interrupt
+- Boot setup commands
3. Advantages of this driver for newer chips.
+=============================================
-3.1 Optimized SCSI SCRIPTS.
+3.1 Optimized SCSI SCRIPTS
+--------------------------
-All chips except the 810, 815 and 825, support new SCSI SCRIPTS instructions
-named LOAD and STORE that allow to move up to 1 DWORD from/to an IO register
-to/from memory much faster that the MOVE MEMORY instruction that is supported
+All chips except the 810, 815 and 825, support new SCSI SCRIPTS instructions
+named LOAD and STORE that allow to move up to 1 DWORD from/to an IO register
+to/from memory much faster that the MOVE MEMORY instruction that is supported
by the 53c7xx and 53c8xx family.
-The LOAD/STORE instructions support absolute and DSA relative addressing
-modes. The SCSI SCRIPTS had been entirely rewritten using LOAD/STORE instead
+The LOAD/STORE instructions support absolute and DSA relative addressing
+modes. The SCSI SCRIPTS had been entirely rewritten using LOAD/STORE instead
of MOVE MEMORY instructions.
-Due to the lack of LOAD/STORE SCRIPTS instructions by earlier chips, this
-driver also incorporates a different SCRIPTS set based on MEMORY MOVE, in
+Due to the lack of LOAD/STORE SCRIPTS instructions by earlier chips, this
+driver also incorporates a different SCRIPTS set based on MEMORY MOVE, in
order to provide support for the entire SYM53C8XX chips family.
3.2 New features appeared with the SYM53C896
+--------------------------------------------
-Newer chips (see above) allows handling of the phase mismatch context from
-SCRIPTS (avoids the phase mismatch interrupt that stops the SCSI processor
+Newer chips (see above) allows handling of the phase mismatch context from
+SCRIPTS (avoids the phase mismatch interrupt that stops the SCSI processor
until the C code has saved the context of the transfer).
-The 896 and 1010 chips support 64 bit PCI transactions and addressing,
+The 896 and 1010 chips support 64 bit PCI transactions and addressing,
while the 895A supports 32 bit PCI transactions and 64 bit addressing.
-The SCRIPTS processor of these chips is not true 64 bit, but uses segment
-registers for bit 32-63. Another interesting feature is that LOAD/STORE
+The SCRIPTS processor of these chips is not true 64 bit, but uses segment
+registers for bit 32-63. Another interesting feature is that LOAD/STORE
instructions that address the on-chip RAM (8k) remain internal to the chip.
4. Memory mapped I/O versus normal I/O
+======================================
-Memory mapped I/O has less latency than normal I/O and is the recommended
-way for doing IO with PCI devices. Memory mapped I/O seems to work fine on
-most hardware configurations, but some poorly designed chipsets may break
-this feature. A configuration option is provided for normal I/O to be
+Memory mapped I/O has less latency than normal I/O and is the recommended
+way for doing IO with PCI devices. Memory mapped I/O seems to work fine on
+most hardware configurations, but some poorly designed chipsets may break
+this feature. A configuration option is provided for normal I/O to be
used but the driver defaults to MMIO.
5. Tagged command queueing
+==========================
-Queuing more than 1 command at a time to a device allows it to perform
-optimizations based on actual head positions and its mechanical
+Queuing more than 1 command at a time to a device allows it to perform
+optimizations based on actual head positions and its mechanical
characteristics. This feature may also reduce average command latency.
-In order to really gain advantage of this feature, devices must have
-a reasonable cache size (No miracle is to be expected for a low-end
+In order to really gain advantage of this feature, devices must have
+a reasonable cache size (No miracle is to be expected for a low-end
hard disk with 128 KB or less).
+
Some known old SCSI devices do not properly support tagged command queuing.
-Generally, firmware revisions that fix this kind of problems are available
+Generally, firmware revisions that fix this kind of problems are available
at respective vendor web/ftp sites.
-All I can say is that I never have had problem with tagged queuing using
-this driver and its predecessors. Hard disks that behaved correctly for
+
+All I can say is that I never have had problem with tagged queuing using
+this driver and its predecessors. Hard disks that behaved correctly for
me using tagged commands are the following:
- IBM S12 0662
@@ -223,9 +267,9 @@ me using tagged commands are the following:
- Quantum Atlas IV
- Seagate Cheetah II
-If your controller has NVRAM, you can configure this feature per target
-from the user setup tool. The Tekram Setup program allows to tune the
-maximum number of queued commands up to 32. The Symbios Setup only allows
+If your controller has NVRAM, you can configure this feature per target
+from the user setup tool. The Tekram Setup program allows to tune the
+maximum number of queued commands up to 32. The Symbios Setup only allows
to enable or disable this feature.
The maximum number of simultaneous tagged commands queued to a device
@@ -233,15 +277,15 @@ is currently set to 16 by default. This value is suitable for most SCSI
disks. With large SCSI disks (>= 2GB, cache >= 512KB, average seek time
<= 10 ms), using a larger value may give better performances.
-This driver supports up to 255 commands per device, and but using more than
-64 is generally not worth-while, unless you are using a very large disk or
-disk arrays. It is noticeable that most of recent hard disks seem not to
-accept more than 64 simultaneous commands. So, using more than 64 queued
+This driver supports up to 255 commands per device, and but using more than
+64 is generally not worth-while, unless you are using a very large disk or
+disk arrays. It is noticeable that most of recent hard disks seem not to
+accept more than 64 simultaneous commands. So, using more than 64 queued
commands is probably just resource wasting.
-If your controller does not have NVRAM or if it is managed by the SDMS
-BIOS/SETUP, you can configure tagged queueing feature and device queue
-depths from the boot command-line. For example:
+If your controller does not have NVRAM or if it is managed by the SDMS
+BIOS/SETUP, you can configure tagged queueing feature and device queue
+depths from the boot command-line. For example::
sym53c8xx=tags:4/t2t3q15-t4q7/t1u0q32
@@ -257,25 +301,28 @@ In some special conditions, some SCSI disk firmwares may return a
QUEUE FULL status for a SCSI command. This behaviour is managed by the
driver using the following heuristic:
-- Each time a QUEUE FULL status is returned, tagged queue depth is reduced
- to the actual number of disconnected commands.
+- Each time a QUEUE FULL status is returned, tagged queue depth is reduced
+ to the actual number of disconnected commands.
- Every 200 successfully completed SCSI commands, if allowed by the
current limit, the maximum number of queueable commands is incremented.
-Since QUEUE FULL status reception and handling is resource wasting, the
-driver notifies by default this problem to user by indicating the actual
-number of commands used and their status, as well as its decision on the
+Since QUEUE FULL status reception and handling is resource wasting, the
+driver notifies by default this problem to user by indicating the actual
+number of commands used and their status, as well as its decision on the
device queue depth change.
-The heuristic used by the driver in handling QUEUE FULL ensures that the
-impact on performances is not too bad. You can get rid of the messages by
+The heuristic used by the driver in handling QUEUE FULL ensures that the
+impact on performances is not too bad. You can get rid of the messages by
setting verbose level to zero, as follow:
-1st method: boot your system using 'sym53c8xx=verb:0' option.
-2nd method: apply "setverbose 0" control command to the proc fs entry
+1st method:
+ boot your system using 'sym53c8xx=verb:0' option.
+2nd method:
+ apply "setverbose 0" control command to the proc fs entry
corresponding to your controller after boot-up.
6. Parity checking
+==================
The driver supports SCSI parity checking and PCI bus master parity
checking. These features must be enabled in order to ensure safe
@@ -284,17 +331,19 @@ with parity. The options to defeat parity checking have been removed
from the driver.
7. Profiling information
+========================
This driver does not provide profiling information as did its predecessors.
-This feature was not this useful and added complexity to the code.
-As the driver code got more complex, I have decided to remove everything
+This feature was not this useful and added complexity to the code.
+As the driver code got more complex, I have decided to remove everything
that didn't seem actually useful.
8. Control commands
+===================
Control commands can be sent to the driver with write operations to
the proc SCSI file system. The generic command syntax is the
-following:
+following::
echo "<verb> <parameters>" >/proc/scsi/sym53c8xx/0
(assumes controller number is 0)
@@ -305,97 +354,112 @@ apply to all targets of the SCSI chain (except the controller).
Available commands:
8.1 Set minimum synchronous period factor
+-----------------------------------------
setsync <target> <period factor>
- target: target number
- period: minimum synchronous period.
+ :target: target number
+ :period: minimum synchronous period.
Maximum speed = 1000/(4*period factor) except for special
cases below.
Specify a period of 0, to force asynchronous transfer mode.
- 9 means 12.5 nano-seconds synchronous period
- 10 means 25 nano-seconds synchronous period
- 11 means 30 nano-seconds synchronous period
- 12 means 50 nano-seconds synchronous period
+ - 9 means 12.5 nano-seconds synchronous period
+ - 10 means 25 nano-seconds synchronous period
+ - 11 means 30 nano-seconds synchronous period
+ - 12 means 50 nano-seconds synchronous period
8.2 Set wide size
+-----------------
setwide <target> <size>
- target: target number
- size: 0=8 bits, 1=16bits
+ :target: target number
+ :size: 0=8 bits, 1=16bits
8.3 Set maximum number of concurrent tagged commands
-
+----------------------------------------------------
+
settags <target> <tags>
- target: target number
- tags: number of concurrent tagged commands
+ :target: target number
+ :tags: number of concurrent tagged commands
must not be greater than configured (default: 16)
8.4 Set debug mode
+------------------
setdebug <list of debug flags>
Available debug flags:
- alloc: print info about memory allocations (ccb, lcb)
- queue: print info about insertions into the command start queue
- result: print sense data on CHECK CONDITION status
- scatter: print info about the scatter process
- scripts: print info about the script binding process
- tiny: print minimal debugging information
- timing: print timing information of the NCR chip
- nego: print information about SCSI negotiations
- phase: print information on script interruptions
+
+ ======== ========================================================
+ alloc print info about memory allocations (ccb, lcb)
+ queue print info about insertions into the command start queue
+ result print sense data on CHECK CONDITION status
+ scatter print info about the scatter process
+ scripts print info about the script binding process
+ tiny print minimal debugging information
+ timing print timing information of the NCR chip
+ nego print information about SCSI negotiations
+ phase print information on script interruptions
+ ======== ========================================================
Use "setdebug" with no argument to reset debug flags.
8.5 Set flag (no_disc)
-
+----------------------
+
setflag <target> <flag>
- target: target number
+ :target: target number
For the moment, only one flag is available:
no_disc: not allow target to disconnect.
Do not specify any flag in order to reset the flag. For example:
- - setflag 4
+
+ setflag 4
will reset no_disc flag for target 4, so will allow it disconnections.
- - setflag all
+ setflag all
will allow disconnection for all devices on the SCSI bus.
8.6 Set verbose level
+---------------------
setverbose #level
- The driver default verbose level is 1. This command allows to change
+ The driver default verbose level is 1. This command allows to change
th driver verbose level after boot-up.
8.7 Reset all logical units of a target
+---------------------------------------
resetdev <target>
- target: target number
+ :target: target number
+
The driver will try to send a BUS DEVICE RESET message to the target.
8.8 Abort all tasks of all logical units of a target
+----------------------------------------------------
cleardev <target>
- target: target number
- The driver will try to send a ABORT message to all the logical units
+ :target: target number
+
+ The driver will try to send a ABORT message to all the logical units
of the target.
9. Configuration parameters
+===========================
-Under kernel configuration tools (make menuconfig, for example), it is
+Under kernel configuration tools (make menuconfig, for example), it is
possible to change some default driver configuration parameters.
If the firmware of all your devices is perfect enough, all the
features supported by the driver can be enabled at start-up. However,
@@ -414,166 +478,238 @@ Default tagged command queue depth (default answer: 16)
This parameter can be specified from the boot command line.
Maximum number of queued commands (default answer: 32)
- This option allows you to specify the maximum number of tagged commands
+ This option allows you to specify the maximum number of tagged commands
that can be queued to a device. The maximum supported value is 255.
Synchronous transfers frequency (default answer: 80)
- This option allows you to specify the frequency in MHz the driver
+ This option allows you to specify the frequency in MHz the driver
will use at boot time for synchronous data transfer negotiations.
0 means "asynchronous data transfers".
10. Boot setup commands
+=======================
10.1 Syntax
+-----------
Setup commands can be passed to the driver either at boot time or as
parameters to modprobe, as described in Documentation/admin-guide/kernel-parameters.rst
-Example of boot setup command under lilo prompt:
+Example of boot setup command under lilo prompt::
-lilo: linux root=/dev/sda2 sym53c8xx.cmd_per_lun=4 sym53c8xx.sync=10 sym53c8xx.debug=0x200
+ lilo: linux root=/dev/sda2 sym53c8xx.cmd_per_lun=4 sym53c8xx.sync=10 sym53c8xx.debug=0x200
- enable tagged commands, up to 4 tagged commands queued.
- set synchronous negotiation speed to 10 Mega-transfers / second.
- set DEBUG_NEGO flag.
The following command will install the driver module with the same
-options as above.
+options as above::
modprobe sym53c8xx cmd_per_lun=4 sync=10 debug=0x200
10.2 Available arguments
+------------------------
10.2.1 Default number of tagged commands
- cmd_per_lun=0 (or cmd_per_lun=1) tagged command queuing disabled
- cmd_per_lun=#tags (#tags > 1) tagged command queuing enabled
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ - cmd_per_lun=0 (or cmd_per_lun=1) tagged command queuing disabled
+ - cmd_per_lun=#tags (#tags > 1) tagged command queuing enabled
+
#tags will be truncated to the max queued commands configuration parameter.
10.2.2 Burst max
+^^^^^^^^^^^^^^^^
+
+ ========== ======================================================
burst=0 burst disabled
burst=255 get burst length from initial IO register settings.
burst=#x burst enabled (1<<#x burst transfers max)
- #x is an integer value which is log base 2 of the burst transfers max.
+
+ #x is an integer value which is log base 2 of the burst
+ transfers max.
+ ========== ======================================================
+
By default the driver uses the maximum value supported by the chip.
10.2.3 LED support
+^^^^^^^^^^^^^^^^^^
+
+ ===== ===================
led=1 enable LED support
led=0 disable LED support
+ ===== ===================
+
Do not enable LED support if your scsi board does not use SDMS BIOS.
(See 'Configuration parameters')
10.2.4 Differential mode
- diff=0 never set up diff mode
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+ ====== =================================
+ diff=0 never set up diff mode
diff=1 set up diff mode if BIOS set it
diff=2 always set up diff mode
diff=3 set diff mode if GPIO3 is not set
+ ====== =================================
10.2.5 IRQ mode
+^^^^^^^^^^^^^^^
+
+ ====== ================================================
irqm=0 always open drain
irqm=1 same as initial settings (assumed BIOS settings)
irqm=2 always totem pole
+ ====== ================================================
+
+10.2.6 Check SCSI BUS
+^^^^^^^^^^^^^^^^^^^^^
-10.2.6 Check SCSI BUS
buschk=<option bits>
Available option bits:
- 0x0: No check.
- 0x1: Check and do not attach the controller on error.
- 0x2: Check and just warn on error.
+
+ === ================================================
+ 0x0 No check.
+ 0x1 Check and do not attach the controller on error.
+ 0x2 Check and just warn on error.
+ === ================================================
10.2.7 Suggest a default SCSI id for hosts
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ ========== ==========================================
hostid=255 no id suggested.
- hostid=#x (0 < x < 7) x suggested for hosts SCSI id.
+ hostid=#x (0 < x < 7) x suggested for hosts SCSI id.
+ ========== ==========================================
- If a host SCSI id is available from the NVRAM, the driver will ignore
- any value suggested as boot option. Otherwise, if a suggested value
- different from 255 has been supplied, it will use it. Otherwise, it will
- try to deduce the value previously set in the hardware and use value
+ If a host SCSI id is available from the NVRAM, the driver will ignore
+ any value suggested as boot option. Otherwise, if a suggested value
+ different from 255 has been supplied, it will use it. Otherwise, it will
+ try to deduce the value previously set in the hardware and use value
7 if the hardware value is zero.
10.2.8 Verbosity level
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ ====== ========
verb=0 minimal
verb=1 normal
verb=2 too much
+ ====== ========
10.2.9 Debug mode
- debug=0 clear debug flags
- debug=#x set debug flags
- #x is an integer value combining the following power-of-2 values:
- DEBUG_ALLOC 0x1
- DEBUG_PHASE 0x2
- DEBUG_POLL 0x4
- DEBUG_QUEUE 0x8
- DEBUG_RESULT 0x10
- DEBUG_SCATTER 0x20
- DEBUG_SCRIPT 0x40
- DEBUG_TINY 0x80
- DEBUG_TIMING 0x100
- DEBUG_NEGO 0x200
- DEBUG_TAGS 0x400
- DEBUG_FREEZE 0x800
- DEBUG_RESTART 0x1000
-
- You can play safely with DEBUG_NEGO. However, some of these flags may
- generate bunches of syslog messages.
+^^^^^^^^^^^^^^^^^
+
+ ========= ====================================
+ debug=0 clear debug flags
+ debug=#x set debug flags
+
+ #x is an integer value combining the
+ following power-of-2 values:
+
+ ============= ======
+ DEBUG_ALLOC 0x1
+ DEBUG_PHASE 0x2
+ DEBUG_POLL 0x4
+ DEBUG_QUEUE 0x8
+ DEBUG_RESULT 0x10
+ DEBUG_SCATTER 0x20
+ DEBUG_SCRIPT 0x40
+ DEBUG_TINY 0x80
+ DEBUG_TIMING 0x100
+ DEBUG_NEGO 0x200
+ DEBUG_TAGS 0x400
+ DEBUG_FREEZE 0x800
+ DEBUG_RESTART 0x1000
+ ============= ======
+ ========= ====================================
+
+ You can play safely with DEBUG_NEGO. However, some of these flags may
+ generate bunches of syslog messages.
10.2.10 Settle delay
+^^^^^^^^^^^^^^^^^^^^
+
+ ======== ===================
settle=n delay for n seconds
+ ======== ===================
After a bus reset, the driver will delay for n seconds before talking
to any device on the bus. The default is 3 seconds and safe mode will
default it to 10.
10.2.11 Serial NVRAM
- NB: option not currently implemented.
+^^^^^^^^^^^^^^^^^^^^
+
+ .. Note:: option not currently implemented.
+
+ ======= =========================================
nvram=n do not look for serial NVRAM
nvram=y test controllers for onboard serial NVRAM
+ ======= =========================================
+
(alternate binary form)
+
nvram=<bits options>
+
+ ==== =================================================================
0x01 look for NVRAM (equivalent to nvram=y)
0x02 ignore NVRAM "Synchronous negotiation" parameters for all devices
0x04 ignore NVRAM "Wide negotiation" parameter for all devices
0x08 ignore NVRAM "Scan at boot time" parameter for all devices
0x80 also attach controllers set to OFF in the NVRAM (sym53c8xx only)
+ ==== =================================================================
10.2.12 Exclude a host from being attached
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
excl=<io_address>,...
Prevent host at a given io address from being attached.
- For example 'excl=0xb400,0xc000' indicate to the
+ For example 'excl=0xb400,0xc000' indicate to the
driver not to attach hosts at address 0xb400 and 0xc000.
10.3 Converting from old style options
+--------------------------------------
+
+Previously, the sym2 driver accepted arguments of the form::
-Previously, the sym2 driver accepted arguments of the form
sym53c8xx=tags:4,sync:10,debug:0x200
As a result of the new module parameters, this is no longer available.
Most of the options have remained the same, but tags has become
cmd_per_lun to reflect its different purposes. The sample above would
-be specified as:
+be specified as::
+
modprobe sym53c8xx cmd_per_lun=4 sync=10 debug=0x200
-or on the kernel boot line as:
+or on the kernel boot line as::
+
sym53c8xx.cmd_per_lun=4 sym53c8xx.sync=10 sym53c8xx.debug=0x200
-10.4 SCSI BUS checking boot option.
+10.4 SCSI BUS checking boot option
+----------------------------------
-When this option is set to a non-zero value, the driver checks SCSI lines
+When this option is set to a non-zero value, the driver checks SCSI lines
logic state, 100 micro-seconds after having asserted the SCSI RESET line.
The driver just reads SCSI lines and checks all lines read FALSE except RESET.
-Since SCSI devices shall release the BUS at most 800 nano-seconds after SCSI
+Since SCSI devices shall release the BUS at most 800 nano-seconds after SCSI
RESET has been asserted, any signal to TRUE may indicate a SCSI BUS problem.
Unfortunately, the following common SCSI BUS problems are not detected:
+
- Only 1 terminator installed.
- Misplaced terminators.
- Bad quality terminators.
-On the other hand, either bad cabling, broken devices, not conformant
+
+On the other hand, either bad cabling, broken devices, not conformant
devices, ... may cause a SCSI signal to be wrong when te driver reads it.
15. SCSI problem troubleshooting
+================================
15.1 Problem tracking
+---------------------
Most SCSI problems are due to a non conformant SCSI bus or too buggy
devices. If unfortunately you have SCSI problems, you can check the
@@ -607,86 +743,96 @@ hard disks. Good SCSI hard disks with a large cache gain advantage of
tagged commands queuing.
15.2 Understanding hardware error reports
+-----------------------------------------
-When the driver detects an unexpected error condition, it may display a
-message of the following pattern.
+When the driver detects an unexpected error condition, it may display a
+message of the following pattern::
-sym0:1: ERROR (0:48) (1-21-65) (f/95/0) @ (script 7c0:19000000).
-sym0: script cmd = 19000000
-sym0: regdump: da 10 80 95 47 0f 01 07 75 01 81 21 80 01 09 00.
+ sym0:1: ERROR (0:48) (1-21-65) (f/95/0) @ (script 7c0:19000000).
+ sym0: script cmd = 19000000
+ sym0: regdump: da 10 80 95 47 0f 01 07 75 01 81 21 80 01 09 00.
-Some fields in such a message may help you understand the cause of the
-problem, as follows:
+Some fields in such a message may help you understand the cause of the
+problem, as follows::
-sym0:1: ERROR (0:48) (1-21-65) (f/95/0) @ (script 7c0:19000000).
-.....A.........B.C....D.E..F....G.H..I.......J.....K...L.......
+ sym0:1: ERROR (0:48) (1-21-65) (f/95/0) @ (script 7c0:19000000).
+ .....A.........B.C....D.E..F....G.H..I.......J.....K...L.......
Field A : target number.
- SCSI ID of the device the controller was talking with at the moment the
+ SCSI ID of the device the controller was talking with at the moment the
error occurs.
Field B : DSTAT io register (DMA STATUS)
- Bit 0x40 : MDPE Master Data Parity Error
+ ======== =============================================================
+ Bit 0x40 MDPE Master Data Parity Error
Data parity error detected on the PCI BUS.
- Bit 0x20 : BF Bus Fault
+ Bit 0x20 BF Bus Fault
PCI bus fault condition detected
- Bit 0x01 : IID Illegal Instruction Detected
- Set by the chip when it detects an Illegal Instruction format
+ Bit 0x01 IID Illegal Instruction Detected
+ Set by the chip when it detects an Illegal Instruction format
on some condition that makes an instruction illegal.
- Bit 0x80 : DFE Dma Fifo Empty
+ Bit 0x80 DFE Dma Fifo Empty
Pure status bit that does not indicate an error.
- If the reported DSTAT value contains a combination of MDPE (0x40),
+ ======== =============================================================
+
+ If the reported DSTAT value contains a combination of MDPE (0x40),
BF (0x20), then the cause may be likely due to a PCI BUS problem.
Field C : SIST io register (SCSI Interrupt Status)
- Bit 0x08 : SGE SCSI GROSS ERROR
- Indicates that the chip detected a severe error condition
+ ======== ==================================================================
+ Bit 0x08 SGE SCSI GROSS ERROR
+ Indicates that the chip detected a severe error condition
on the SCSI BUS that prevents the SCSI protocol from functioning
properly.
- Bit 0x04 : UDC Unexpected Disconnection
- Indicates that the device released the SCSI BUS when the chip
- was not expecting this to happen. A device may behave so to
+ Bit 0x04 UDC Unexpected Disconnection
+ Indicates that the device released the SCSI BUS when the chip
+ was not expecting this to happen. A device may behave so to
indicate the SCSI initiator that an error condition not reportable using the SCSI protocol has occurred.
- Bit 0x02 : RST SCSI BUS Reset
- Generally SCSI targets do not reset the SCSI BUS, although any
+ Bit 0x02 RST SCSI BUS Reset
+ Generally SCSI targets do not reset the SCSI BUS, although any
device on the BUS can reset it at any time.
- Bit 0x01 : PAR Parity
+ Bit 0x01 PAR Parity
SCSI parity error detected.
- On a faulty SCSI BUS, any error condition among SGE (0x08), UDC (0x04) and
- PAR (0x01) may be detected by the chip. If your SCSI system sometimes
- encounters such error conditions, especially SCSI GROSS ERROR, then a SCSI
+ ======== ==================================================================
+
+ On a faulty SCSI BUS, any error condition among SGE (0x08), UDC (0x04) and
+ PAR (0x01) may be detected by the chip. If your SCSI system sometimes
+ encounters such error conditions, especially SCSI GROSS ERROR, then a SCSI
BUS problem is likely the cause of these errors.
-For fields D,E,F,G and H, you may look into the sym53c8xx_defs.h file
+For fields D,E,F,G and H, you may look into the sym53c8xx_defs.h file
that contains some minimal comments on IO register bits.
+
Field D : SOCL Scsi Output Control Latch
- This register reflects the state of the SCSI control lines the
+ This register reflects the state of the SCSI control lines the
chip want to drive or compare against.
Field E : SBCL Scsi Bus Control Lines
Actual value of control lines on the SCSI BUS.
Field F : SBDL Scsi Bus Data Lines
Actual value of data lines on the SCSI BUS.
Field G : SXFER SCSI Transfer
- Contains the setting of the Synchronous Period for output and
+ Contains the setting of the Synchronous Period for output and
the current Synchronous offset (offset 0 means asynchronous).
Field H : SCNTL3 Scsi Control Register 3
- Contains the setting of timing values for both asynchronous and
- synchronous data transfers.
+ Contains the setting of timing values for both asynchronous and
+ synchronous data transfers.
Field I : SCNTL4 Scsi Control Register 4
Only meaningful for 53C1010 Ultra3 controllers.
-Understanding Fields J, K, L and dumps requires to have good knowledge of
+Understanding Fields J, K, L and dumps requires to have good knowledge of
SCSI standards, chip cores functionnals and internal driver data structures.
-You are not required to decode and understand them, unless you want to help
+You are not required to decode and understand them, unless you want to help
maintain the driver code.
17. Serial NVRAM (added by Richard Waltham: dormouse@farsrobt.demon.co.uk)
+==========================================================================
17.1 Features
+-------------
Enabling serial NVRAM support enables detection of the serial NVRAM included
-on Symbios and some Symbios compatible host adaptors, and Tekram boards. The
-serial NVRAM is used by Symbios and Tekram to hold set up parameters for the
+on Symbios and some Symbios compatible host adaptors, and Tekram boards. The
+serial NVRAM is used by Symbios and Tekram to hold set up parameters for the
host adaptor and its attached drives.
The Symbios NVRAM also holds data on the boot order of host adaptors in a
@@ -694,102 +840,122 @@ system with more than one host adaptor. This information is no longer used
as it's fundamentally incompatible with the hotplug PCI model.
Tekram boards using Symbios chips, DC390W/F/U, which have NVRAM are detected
-and this is used to distinguish between Symbios compatible and Tekram host
+and this is used to distinguish between Symbios compatible and Tekram host
adaptors. This is used to disable the Symbios compatible "diff" setting
-incorrectly set on Tekram boards if the CONFIG_SCSI_53C8XX_SYMBIOS_COMPAT
-configuration parameter is set enabling both Symbios and Tekram boards to be
+incorrectly set on Tekram boards if the CONFIG_SCSI_53C8XX_SYMBIOS_COMPAT
+configuration parameter is set enabling both Symbios and Tekram boards to be
used together with the Symbios cards using all their features, including
"diff" support. ("led pin" support for Symbios compatible cards can remain
enabled when using Tekram cards. It does nothing useful for Tekram host
adaptors but does not cause problems either.)
-The parameters the driver is able to get from the NVRAM depend on the
+The parameters the driver is able to get from the NVRAM depend on the
data format used, as follow:
- Tekram format Symbios format
-General and host parameters
- Boot order N Y
- Host SCSI ID Y Y
- SCSI parity checking Y Y
- Verbose boot messages N Y
-SCSI devices parameters
- Synchronous transfer speed Y Y
- Wide 16 / Narrow Y Y
- Tagged Command Queuing enabled Y Y
- Disconnections enabled Y Y
- Scan at boot time N Y
-
-In order to speed up the system boot, for each device configured without
-the "scan at boot time" option, the driver forces an error on the
++-------------------------------+------------------+--------------+
+| |Tekram format |Symbios format|
++-------------------------------+------------------+--------------+
+|General and host parameters | | |
++-------------------------------+------------------+--------------+
+| * Boot order | N | Y |
++-------------------------------+------------------+--------------+
+| * Host SCSI ID | Y | Y |
++-------------------------------+------------------+--------------+
+| * SCSI parity checking | Y | Y |
++-------------------------------+------------------+--------------+
+| * Verbose boot messages | N | Y |
++-------------------------------+------------------+--------------+
+|SCSI devices parameters |
++-------------------------------+------------------+--------------+
+| * Synchronous transfer speed | Y | Y |
++-------------------------------+------------------+--------------+
+| * Wide 16 / Narrow | Y | Y |
++-------------------------------+------------------+--------------+
+| * Tagged Command Queuing | Y | Y |
+| enabled | | |
++-------------------------------+------------------+--------------+
+| * Disconnections enabled | Y | Y |
++-------------------------------+------------------+--------------+
+| * Scan at boot time | N | Y |
++-------------------------------+------------------+--------------+
+
+
+In order to speed up the system boot, for each device configured without
+the "scan at boot time" option, the driver forces an error on the
first TEST UNIT READY command received for this device.
17.2 Symbios NVRAM layout
+-------------------------
+
+typical data at NVRAM address 0x100 (53c810a NVRAM)::
+
+ 00 00
+ 64 01
+ 8e 0b
+
+ 00 30 00 00 00 00 07 00 00 00 00 00 00 00 07 04 10 04 00 00
+
+ 04 00 0f 00 00 10 00 50 00 00 01 00 00 62
+ 04 00 03 00 00 10 00 58 00 00 01 00 00 63
+ 04 00 01 00 00 10 00 48 00 00 01 00 00 61
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00
+
+ fe fe
+ 00 00
+ 00 00
-typical data at NVRAM address 0x100 (53c810a NVRAM)
------------------------------------------------------------
-00 00
-64 01
-8e 0b
-
-00 30 00 00 00 00 07 00 00 00 00 00 00 00 07 04 10 04 00 00
-
-04 00 0f 00 00 10 00 50 00 00 01 00 00 62
-04 00 03 00 00 10 00 58 00 00 01 00 00 63
-04 00 01 00 00 10 00 48 00 00 01 00 00 61
-00 00 00 00 00 00 00 00 00 00 00 00 00 00
-
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-00 00 00 00 00 00 00 00
-
-fe fe
-00 00
-00 00
------------------------------------------------------------
NVRAM layout details
-NVRAM Address 0x000-0x0ff not used
- 0x100-0x26f initialised data
- 0x270-0x7ff not used
+============= =================
+NVRAM Address
+============= =================
+0x000-0x0ff not used
+0x100-0x26f initialised data
+0x270-0x7ff not used
+============= =================
-general layout
+general layout::
header - 6 bytes,
data - 356 bytes (checksum is byte sum of this data)
@@ -797,7 +963,7 @@ general layout
---
total 368 bytes
-data area layout
+data area layout::
controller set up - 20 bytes
boot configuration - 56 bytes (4x14 bytes)
@@ -806,52 +972,51 @@ data area layout
---
total 356 bytes
------------------------------------------------------------
-header
-
-00 00 - ?? start marker
-64 01 - byte count (lsb/msb excludes header/trailer)
-8e 0b - checksum (lsb/msb excludes header/trailer)
------------------------------------------------------------
-controller set up
-
-00 30 00 00 00 00 07 00 00 00 00 00 00 00 07 04 10 04 00 00
- | | | |
- | | | -- host ID
- | | |
- | | --Removable Media Support
- | | 0x00 = none
- | | 0x01 = Bootable Device
- | | 0x02 = All with Media
- | |
- | --flag bits 2
- | 0x00000001= scan order hi->low
- | (default 0x00 - scan low->hi)
- --flag bits 1
- 0x00000001 scam enable
- 0x00000010 parity enable
- 0x00000100 verbose boot msgs
+header::
+
+ 00 00 - ?? start marker
+ 64 01 - byte count (lsb/msb excludes header/trailer)
+ 8e 0b - checksum (lsb/msb excludes header/trailer)
+
+controller set up::
+
+ 00 30 00 00 00 00 07 00 00 00 00 00 00 00 07 04 10 04 00 00
+ | | | |
+ | | | -- host ID
+ | | |
+ | | --Removable Media Support
+ | | 0x00 = none
+ | | 0x01 = Bootable Device
+ | | 0x02 = All with Media
+ | |
+ | --flag bits 2
+ | 0x00000001= scan order hi->low
+ | (default 0x00 - scan low->hi)
+ --flag bits 1
+ 0x00000001 scam enable
+ 0x00000010 parity enable
+ 0x00000100 verbose boot msgs
remaining bytes unknown - they do not appear to change in my
current set up for any of the controllers.
default set up is identical for 53c810a and 53c875 NVRAM
(Removable Media added Symbios BIOS version 4.09)
------------------------------------------------------------
+
boot configuration
-boot order set by order of the devices in this table
+boot order set by order of the devices in this table::
-04 00 0f 00 00 10 00 50 00 00 01 00 00 62 -- 1st controller
-04 00 03 00 00 10 00 58 00 00 01 00 00 63 2nd controller
-04 00 01 00 00 10 00 48 00 00 01 00 00 61 3rd controller
-00 00 00 00 00 00 00 00 00 00 00 00 00 00 4th controller
- | | | | | | | |
- | | | | | | ---- PCI io port adr
- | | | | | --0x01 init/scan at boot time
- | | | | --PCI device/function number (0xdddddfff)
- | | ----- ?? PCI vendor ID (lsb/msb)
- ----PCI device ID (lsb/msb)
+ 04 00 0f 00 00 10 00 50 00 00 01 00 00 62 -- 1st controller
+ 04 00 03 00 00 10 00 58 00 00 01 00 00 63 2nd controller
+ 04 00 01 00 00 10 00 48 00 00 01 00 00 61 3rd controller
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 4th controller
+ | | | | | | | |
+ | | | | | | ---- PCI io port adr
+ | | | | | --0x01 init/scan at boot time
+ | | | | --PCI device/function number (0xdddddfff)
+ | | ----- ?? PCI vendor ID (lsb/msb)
+ ----PCI device ID (lsb/msb)
?? use of this data is a guess but seems reasonable
@@ -859,120 +1024,120 @@ remaining bytes unknown - they do not appear to change in my
current set up
default set up is identical for 53c810a and 53c875 NVRAM
------------------------------------------------------------
-device set up (up to 16 devices - includes controller)
-
-0f 00 08 08 64 00 0a 00 - id 0
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00
-0f 00 08 08 64 00 0a 00 - id 15
- | | | | | |
- | | | | ----timeout (lsb/msb)
- | | | --synch period (0x?? 40 Mtrans/sec- fast 40) (probably 0x28)
- | | | (0x30 20 Mtrans/sec- fast 20)
- | | | (0x64 10 Mtrans/sec- fast )
- | | | (0xc8 5 Mtrans/sec)
- | | | (0x00 asynchronous)
- | | -- ?? max sync offset (0x08 in NVRAM on 53c810a)
- | | (0x10 in NVRAM on 53c875)
- | --device bus width (0x08 narrow)
- | (0x10 16 bit wide)
- --flag bits
- 0x00000001 - disconnect enabled
- 0x00000010 - scan at boot time
- 0x00000100 - scan luns
- 0x00001000 - queue tags enabled
+
+device set up (up to 16 devices - includes controller)::
+
+ 0f 00 08 08 64 00 0a 00 - id 0
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00
+ 0f 00 08 08 64 00 0a 00 - id 15
+ | | | | | |
+ | | | | ----timeout (lsb/msb)
+ | | | --synch period (0x?? 40 Mtrans/sec- fast 40) (probably 0x28)
+ | | | (0x30 20 Mtrans/sec- fast 20)
+ | | | (0x64 10 Mtrans/sec- fast )
+ | | | (0xc8 5 Mtrans/sec)
+ | | | (0x00 asynchronous)
+ | | -- ?? max sync offset (0x08 in NVRAM on 53c810a)
+ | | (0x10 in NVRAM on 53c875)
+ | --device bus width (0x08 narrow)
+ | (0x10 16 bit wide)
+ --flag bits
+ 0x00000001 - disconnect enabled
+ 0x00000010 - scan at boot time
+ 0x00000100 - scan luns
+ 0x00001000 - queue tags enabled
remaining bytes unknown - they do not appear to change in my
current set up
-?? use of this data is a guess but seems reasonable
+?? use of this data is a guess but seems reasonable
(but it could be max bus width)
default set up for 53c810a NVRAM
-default set up for 53c875 NVRAM - bus width - 0x10
+default set up for 53c875 NVRAM
+
+ - bus width - 0x10
- sync offset ? - 0x10
- sync period - 0x30
------------------------------------------------------------
-?? spare device space (32 bit bus ??)
-
-00 00 00 00 00 00 00 00 (19x8bytes)
-.
-.
-00 00 00 00 00 00 00 00
-default set up is identical for 53c810a and 53c875 NVRAM
------------------------------------------------------------
-trailer
+?? spare device space (32 bit bus ??)::
-fe fe - ? end marker ?
-00 00
-00 00
+ 00 00 00 00 00 00 00 00 (19x8bytes)
+ .
+ .
+ 00 00 00 00 00 00 00 00
default set up is identical for 53c810a and 53c875 NVRAM
------------------------------------------------------------
+trailer::
+ fe fe - ? end marker ?
+ 00 00
+ 00 00
+
+default set up is identical for 53c810a and 53c875 NVRAM
17.3 Tekram NVRAM layout
+------------------------
nvram 64x16 (1024 bit)
-Drive settings
-
-Drive ID 0-15 (addr 0x0yyyy0 = device setup, yyyy = ID)
- (addr 0x0yyyy1 = 0x0000)
-
- x x x x x x x x x x x x x x x x
- | | | | | | | | |
- | | | | | | | | ----- parity check 0 - off
- | | | | | | | | 1 - on
- | | | | | | | |
- | | | | | | | ------- sync neg 0 - off
- | | | | | | | 1 - on
- | | | | | | |
- | | | | | | --------- disconnect 0 - off
- | | | | | | 1 - on
- | | | | | |
- | | | | | ----------- start cmd 0 - off
- | | | | | 1 - on
- | | | | |
- | | | | -------------- tagged cmds 0 - off
- | | | | 1 - on
- | | | |
- | | | ---------------- wide neg 0 - off
- | | | 1 - on
- | | |
- --------------------------- sync rate 0 - 10.0 Mtrans/sec
- 1 - 8.0
- 2 - 6.6
- 3 - 5.7
- 4 - 5.0
- 5 - 4.0
- 6 - 3.0
- 7 - 2.0
- 7 - 2.0
- 8 - 20.0
- 9 - 16.7
- a - 13.9
- b - 11.9
+Drive settings::
+
+ Drive ID 0-15 (addr 0x0yyyy0 = device setup, yyyy = ID)
+ (addr 0x0yyyy1 = 0x0000)
+
+ x x x x x x x x x x x x x x x x
+ | | | | | | | | |
+ | | | | | | | | ----- parity check 0 - off
+ | | | | | | | | 1 - on
+ | | | | | | | |
+ | | | | | | | ------- sync neg 0 - off
+ | | | | | | | 1 - on
+ | | | | | | |
+ | | | | | | --------- disconnect 0 - off
+ | | | | | | 1 - on
+ | | | | | |
+ | | | | | ----------- start cmd 0 - off
+ | | | | | 1 - on
+ | | | | |
+ | | | | -------------- tagged cmds 0 - off
+ | | | | 1 - on
+ | | | |
+ | | | ---------------- wide neg 0 - off
+ | | | 1 - on
+ | | |
+ --------------------------- sync rate 0 - 10.0 Mtrans/sec
+ 1 - 8.0
+ 2 - 6.6
+ 3 - 5.7
+ 4 - 5.0
+ 5 - 4.0
+ 6 - 3.0
+ 7 - 2.0
+ 7 - 2.0
+ 8 - 20.0
+ 9 - 16.7
+ a - 13.9
+ b - 11.9
Global settings
-Host flags 0 (addr 0x100000, 32)
+Host flags 0 (addr 0x100000, 32)::
x x x x x x x x x x x x x x x x
| | | | | | | | | | | |
@@ -980,7 +1145,7 @@ Host flags 0 (addr 0x100000, 32)
| | | | | | | |
| | | | | | | ----------------------- support for 0 - off
| | | | | | | > 2 drives 1 - on
- | | | | | | |
+ | | | | | | |
| | | | | | ------------------------- support drives 0 - off
| | | | | | > 1Gbytes 1 - on
| | | | | |
@@ -1000,10 +1165,10 @@ Host flags 0 (addr 0x100000, 32)
as BIOS dev 1 - boot device
2 - all
-Host flags 1 (addr 0x100001, 33)
+Host flags 1 (addr 0x100001, 33)::
x x x x x x x x x x x x x x x x
- | | | | | |
+ | | | | | |
| | | --------- boot delay 0 - 3 sec
| | | 1 - 5
| | | 2 - 10
@@ -1018,7 +1183,7 @@ Host flags 1 (addr 0x100001, 33)
3 - 16
4 - 32
-Host flags 2 (addr 0x100010, 34)
+Host flags 2 (addr 0x100010, 34)::
x x x x x x x x x x x x x x x x
|
@@ -1031,18 +1196,14 @@ checksum = 0x1234 - (sum addr 0-63)
----------------------------------------------------------------------------
-default nvram data:
-
-0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-
-0x0f07 0x0400 0x0001 0x0000 0x0000 0x0000 0x0000 0x0000
-0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000
-0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000
-0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0xfbbc
+default nvram data::
+ 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
+ 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
+ 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
+ 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000 0x0037 0x0000
-===============================================================================
-End of Linux SYM-2 driver documentation file
+ 0x0f07 0x0400 0x0001 0x0000 0x0000 0x0000 0x0000 0x0000
+ 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000
+ 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000
+ 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0x0000 0xfbbc
diff --git a/Documentation/scsi/tcm_qla2xxx.txt b/Documentation/scsi/tcm_qla2xxx.rst
index c3a670a25e2b..91bc1fcd369e 100644
--- a/Documentation/scsi/tcm_qla2xxx.txt
+++ b/Documentation/scsi/tcm_qla2xxx.rst
@@ -1,22 +1,36 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================
+tcm_qla2xxx Driver Notes
+========================
+
tcm_qla2xxx jam_host attribute
------------------------------
There is now a new module endpoint atribute called jam_host
-attribute: jam_host: boolean=0/1
+attribute::
+
+ jam_host: boolean=0/1
+
This attribute and accompanying code is only included if the
Kconfig parameter TCM_QLA2XXX_DEBUG is set to Y
+
By default this jammer code and functionality is disabled
Use this attribute to control the discarding of SCSI commands to a
selected host.
+
This may be useful for testing error handling and simulating slow drain
and other fabric issues.
Setting a boolean of 1 for the jam_host attribute for a particular host
- will discard the commands for that host.
+will discard the commands for that host.
+
Reset back to 0 to stop the jamming.
-Enable host 4 to be jammed
-echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
+Enable host 4 to be jammed::
+
+ echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
+
+Disable jamming on host 4::
-Disable jamming on host 4
-echo 0 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
+ echo 0 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.rst
index 81842ec3e116..a920c0a5a1f6 100644
--- a/Documentation/scsi/ufs.txt
+++ b/Documentation/scsi/ufs.rst
@@ -1,24 +1,26 @@
- Universal Flash Storage
- =======================
+.. SPDX-License-Identifier: GPL-2.0
+=======================
+Universal Flash Storage
+=======================
-Contents
---------
-1. Overview
-2. UFS Architecture Overview
- 2.1 Application Layer
- 2.2 UFS Transport Protocol(UTP) layer
- 2.3 UFS Interconnect(UIC) Layer
-3. UFSHCD Overview
- 3.1 UFS controller initialization
- 3.2 UTP Transfer requests
- 3.3 UFS error handling
- 3.4 SCSI Error handling
+.. Contents
+
+ 1. Overview
+ 2. UFS Architecture Overview
+ 2.1 Application Layer
+ 2.2 UFS Transport Protocol(UTP) layer
+ 2.3 UFS Interconnect(UIC) Layer
+ 3. UFSHCD Overview
+ 3.1 UFS controller initialization
+ 3.2 UTP Transfer requests
+ 3.3 UFS error handling
+ 3.4 SCSI Error handling
1. Overview
------------
+===========
Universal Flash Storage(UFS) is a storage specification for flash devices.
It is aimed to provide a universal storage interface for both
@@ -28,19 +30,25 @@ is defined by JEDEC Solid State Technology Association. UFS is based
on MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
physical layer and MIPI Unipro as the link layer.
-The main goals of UFS is to provide,
+The main goals of UFS is to provide:
+
* Optimized performance:
- For UFS version 1.0 and 1.1 the target performance is as follows,
- Support for Gear1 is mandatory (rate A: 1248Mbps, rate B: 1457.6Mbps)
- Support for Gear2 is optional (rate A: 2496Mbps, rate B: 2915.2Mbps)
+
+ For UFS version 1.0 and 1.1 the target performance is as follows:
+
+ - Support for Gear1 is mandatory (rate A: 1248Mbps, rate B: 1457.6Mbps)
+ - Support for Gear2 is optional (rate A: 2496Mbps, rate B: 2915.2Mbps)
+
Future version of the standard,
- Gear3 (rate A: 4992Mbps, rate B: 5830.4Mbps)
+
+ - Gear3 (rate A: 4992Mbps, rate B: 5830.4Mbps)
+
* Low power consumption
* High random IOPs and low latency
2. UFS Architecture Overview
-----------------------------
+============================
UFS has a layered communication architecture which is based on SCSI
SAM-5 architectural model.
@@ -48,16 +56,22 @@ SAM-5 architectural model.
UFS communication architecture consists of following layers,
2.1 Application Layer
+---------------------
The Application layer is composed of UFS command set layer(UCS),
Task Manager and Device manager. The UFS interface is designed to be
protocol agnostic, however SCSI has been selected as a baseline
protocol for versions 1.0 and 1.1 of UFS protocol layer.
+
UFS supports subset of SCSI commands defined by SPC-4 and SBC-3.
- * UCS: It handles SCSI commands supported by UFS specification.
- * Task manager: It handles task management functions defined by the
+
+ * UCS:
+ It handles SCSI commands supported by UFS specification.
+ * Task manager:
+ It handles task management functions defined by the
UFS which are meant for command queue control.
- * Device manager: It handles device level operations and device
+ * Device manager:
+ It handles device level operations and device
configuration operations. Device level operations mainly involve
device power management operations and commands to Interconnect
layers. Device level configurations involve handling of query
@@ -65,10 +79,12 @@ UFS communication architecture consists of following layers,
information of the device.
2.2 UFS Transport Protocol(UTP) layer
+-------------------------------------
UTP layer provides services for
the higher layers through Service Access Points. UTP defines 3
service access points for higher layers.
+
* UDM_SAP: Device manager service access point is exposed to device
manager for device level operations. These device level operations
are done through query requests.
@@ -76,20 +92,23 @@ UFS communication architecture consists of following layers,
set layer(UCS) to transport commands.
* UTP_TM_SAP: Task management service access point is exposed to task
manager to transport task management functions.
+
UTP transports messages through UFS protocol information unit(UPIU).
2.3 UFS Interconnect(UIC) Layer
+-------------------------------
UIC is the lowest layer of UFS layered architecture. It handles
connection between UFS host and UFS device. UIC consists of
MIPI UniPro and MIPI M-PHY. UIC provides 2 service access points
to upper layer,
+
* UIC_SAP: To transport UPIU between UFS host and UFS device.
* UIO_SAP: To issue commands to Unipro layers.
3. UFSHCD Overview
-------------------
+==================
The UFS host controller driver is based on Linux SCSI Framework.
UFSHCD is a low level device driver which acts as an interface between
@@ -98,12 +117,14 @@ SCSI Midlayer and PCIe based UFS host controllers.
The current UFSHCD implementation supports following functionality,
3.1 UFS controller initialization
+---------------------------------
The initialization module brings UFS host controller to active state
and prepares the controller to transfer commands/response between
UFSHCD and UFS device.
3.2 UTP Transfer requests
+-------------------------
Transfer request handling module of UFSHCD receives SCSI commands
from SCSI Midlayer, forms UPIUs and issues the UPIUs to UFS Host
@@ -112,11 +133,13 @@ The current UFSHCD implementation supports following functionality,
of the status of the command.
3.3 UFS error handling
+----------------------
Error handling module handles Host controller fatal errors,
Device fatal errors and UIC interconnect layer related errors.
3.4 SCSI Error handling
+-----------------------
This is done through UFSHCD SCSI error handling routines registered
with SCSI Midlayer. Examples of some of the error handling commands
@@ -129,7 +152,7 @@ In this version of UFSHCD Query requests and power management
functionality are not implemented.
4. BSG Support
-------------------
+==============
This transport driver supports exchanging UFS protocol information units
(UPIUs) with a UFS device. Typically, user space will allocate
@@ -138,7 +161,7 @@ request_upiu and reply_upiu respectively. Filling those UPIUs should
be done in accordance with JEDEC spec UFS2.1 paragraph 10.7.
*Caveat emptor*: The driver makes no further input validations and sends the
UPIU to the device as it is. Open the bsg device in /dev/ufs-bsg and
-send SG_IO with the applicable sg_io_v4:
+send SG_IO with the applicable sg_io_v4::
io_hdr_v4.guard = 'Q';
io_hdr_v4.protocol = BSG_PROTOCOL_SCSI;
@@ -166,6 +189,7 @@ upiu-based protocol is available at:
For more detailed information about the tool and its supported
features, please see the tool's README.
-UFS Specifications can be found at,
-UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
-UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
+UFS Specifications can be found at:
+
+- UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
+- UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
diff --git a/Documentation/scsi/wd719x.rst b/Documentation/scsi/wd719x.rst
new file mode 100644
index 000000000000..a35015dfedd9
--- /dev/null
+++ b/Documentation/scsi/wd719x.rst
@@ -0,0 +1,24 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============================================================
+Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards
+===============================================================
+
+The card requires firmware that can be cut out of the Windows NT driver that
+can be downloaded from WD at:
+http://support.wdc.com/product/download.asp?groupid=801&sid=27&lang=en
+
+There is no license anywhere in the file or on the page - so the firmware
+probably cannot be added to linux-firmware.
+
+This script downloads and extracts the firmware, creating wd719x-risc.bin and
+d719x-wcs.bin files. Put them in /lib/firmware/::
+
+ #!/bin/sh
+ wget http://support.wdc.com/download/archive/pciscsi.exe
+ lha xi pciscsi.exe pci-scsi.exe
+ lha xi pci-scsi.exe nt/wd7296a.sys
+ rm pci-scsi.exe
+ dd if=wd7296a.sys of=wd719x-risc.bin bs=1 skip=5760 count=14336
+ dd if=wd7296a.sys of=wd719x-wcs.bin bs=1 skip=20096 count=514
+ rm wd7296a.sys
diff --git a/Documentation/scsi/wd719x.txt b/Documentation/scsi/wd719x.txt
deleted file mode 100644
index 0816b0220238..000000000000
--- a/Documentation/scsi/wd719x.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards
----------------------------------------------------------------
-
-The card requires firmware that can be cut out of the Windows NT driver that
-can be downloaded from WD at:
-http://support.wdc.com/product/download.asp?groupid=801&sid=27&lang=en
-
-There is no license anywhere in the file or on the page - so the firmware
-probably cannot be added to linux-firmware.
-
-This script downloads and extracts the firmware, creating wd719x-risc.bin and
-d719x-wcs.bin files. Put them in /lib/firmware/.
-
-#!/bin/sh
-wget http://support.wdc.com/download/archive/pciscsi.exe
-lha xi pciscsi.exe pci-scsi.exe
-lha xi pci-scsi.exe nt/wd7296a.sys
-rm pci-scsi.exe
-dd if=wd7296a.sys of=wd719x-risc.bin bs=1 skip=5760 count=14336
-dd if=wd7296a.sys of=wd719x-wcs.bin bs=1 skip=20096 count=514
-rm wd7296a.sys
diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst
index 392875a1b94e..72f97d4b01a7 100644
--- a/Documentation/sound/alsa-configuration.rst
+++ b/Documentation/sound/alsa-configuration.rst
@@ -2234,6 +2234,19 @@ use_vmalloc
buffers. If mmap is used on such architectures, turn off this
option, so that the DMA-coherent buffers are allocated and used
instead.
+delayed_register
+ The option is needed for devices that have multiple streams
+ defined in multiple USB interfaces. The driver may invoke
+ registrations multiple times (once per interface) and this may
+ lead to the insufficient device enumeration.
+ This option receives an array of strings, and you can pass
+ ID:INTERFACE like ``0123abcd:4`` for performing the delayed
+ registration to the given device. In this example, when a USB
+ device 0123:abcd is probed, the driver waits the registration
+ until the USB interface 4 gets probed.
+ The driver prints a message like "Found post-registration device
+ assignment: 1234abcd:04" for such a device, so that user can
+ notice the need.
This module supports multiple devices, autoprobe and hotplugging.
diff --git a/Documentation/sound/hd-audio/index.rst b/Documentation/sound/hd-audio/index.rst
index f8a72ffffe66..6e12de9fc34e 100644
--- a/Documentation/sound/hd-audio/index.rst
+++ b/Documentation/sound/hd-audio/index.rst
@@ -8,3 +8,4 @@ HD-Audio
models
controls
dp-mst
+ realtek-pc-beep
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 11298f0ce44d..0ea967d34583 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -216,8 +216,6 @@ alc298-dell-aio
ALC298 fixups on Dell AIO machines
alc275-dell-xps
ALC275 fixups on Dell XPS models
-alc256-dell-xps13
- ALC256 fixups on Dell XPS13
lenovo-spk-noise
Workaround for speaker noise on Lenovo machines
lenovo-hotkey
diff --git a/Documentation/sound/hd-audio/realtek-pc-beep.rst b/Documentation/sound/hd-audio/realtek-pc-beep.rst
new file mode 100644
index 000000000000..be47c6f76a6e
--- /dev/null
+++ b/Documentation/sound/hd-audio/realtek-pc-beep.rst
@@ -0,0 +1,129 @@
+===============================
+Realtek PC Beep Hidden Register
+===============================
+
+This file documents the "PC Beep Hidden Register", which is present in certain
+Realtek HDA codecs and controls a muxer and pair of passthrough mixers that can
+route audio between pins but aren't themselves exposed as HDA widgets. As far
+as I can tell, these hidden routes are designed to allow flexible PC Beep output
+for codecs that don't have mixer widgets in their output paths. Why it's easier
+to hide a mixer behind an undocumented vendor register than to just expose it
+as a widget, I have no idea.
+
+Register Description
+====================
+
+The register is accessed via processing coefficient 0x36 on NID 20h. Bits not
+identified below have no discernible effect on my machine, a Dell XPS 13 9350::
+
+ MSB LSB
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |h|S|L| | B |R| | Known bits
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ |0|0|1|1| 0x7 |0|0x0|1| 0x7 | Reset value
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+1Ah input select (B): 2 bits
+ When zero, expose the PC Beep line (from the internal beep generator, when
+ enabled with the Set Beep Generation verb on NID 01h, or else from the
+ external PCBEEP pin) on the 1Ah pin node. When nonzero, expose the headphone
+ jack (or possibly Line In on some machines) input instead. If PC Beep is
+ selected, the 1Ah boost control has no effect.
+
+Amplify 1Ah loopback, left (L): 1 bit
+ Amplify the left channel of 1Ah before mixing it into outputs as specified
+ by h and S bits. Does not affect the level of 1Ah exposed to other widgets.
+
+Amplify 1Ah loopback, right (R): 1 bit
+ Amplify the right channel of 1Ah before mixing it into outputs as specified
+ by h and S bits. Does not affect the level of 1Ah exposed to other widgets.
+
+Loopback 1Ah to 21h [active low] (h): 1 bit
+ When zero, mix 1Ah (possibly with amplification, depending on L and R bits)
+ into 21h (headphone jack on my machine). Mixed signal respects the mute
+ setting on 21h.
+
+Loopback 1Ah to 14h (S): 1 bit
+ When one, mix 1Ah (possibly with amplification, depending on L and R bits)
+ into 14h (internal speaker on my machine). Mixed signal **ignores** the mute
+ setting on 14h and is present whenever 14h is configured as an output.
+
+Path diagrams
+=============
+
+1Ah input selection (DIV is the PC Beep divider set on NID 01h)::
+
+ <Beep generator> <PCBEEP pin> <Headphone jack>
+ | | |
+ +--DIV--+--!DIV--+ {1Ah boost control}
+ | |
+ +--(b == 0)--+--(b != 0)--+
+ |
+ >1Ah (Beep/Headphone Mic/Line In)<
+
+Loopback of 1Ah to 21h/14h::
+
+ <1Ah (Beep/Headphone Mic/Line In)>
+ |
+ {amplify if L/R}
+ |
+ +-----!h-----+-----S-----+
+ | |
+ {21h mute control} |
+ | |
+ >21h (Headphone)< >14h (Internal Speaker)<
+
+Background
+==========
+
+All Realtek HDA codecs have a vendor-defined widget with node ID 20h which
+provides access to a bank of registers that control various codec functions.
+Registers are read and written via the standard HDA processing coefficient
+verbs (Set/Get Coefficient Index, Set/Get Processing Coefficient). The node is
+named "Realtek Vendor Registers" in public datasheets' verb listings and,
+apart from that, is entirely undocumented.
+
+This particular register, exposed at coefficient 0x36 and named in commits from
+Realtek, is of note: unlike most registers, which seem to control detailed
+amplifier parameters not in scope of the HDA specification, it controls audio
+routing which could just as easily have been defined using standard HDA mixer
+and selector widgets.
+
+Specifically, it selects between two sources for the input pin widget with Node
+ID (NID) 1Ah: the widget's signal can come either from an audio jack (on my
+laptop, a Dell XPS 13 9350, it's the headphone jack, but comments in Realtek
+commits indicate that it might be a Line In on some machines) or from the PC
+Beep line (which is itself multiplexed between the codec's internal beep
+generator and external PCBEEP pin, depending on if the beep generator is
+enabled via verbs on NID 01h). Additionally, it can mix (with optional
+amplification) that signal onto the 21h and/or 14h output pins.
+
+The register's reset value is 0x3717, corresponding to PC Beep on 1Ah that is
+then amplified and mixed into both the headphones and the speakers. Not only
+does this violate the HDA specification, which says that "[a vendor defined
+beep input pin] connection may be maintained *only* while the Link reset
+(**RST#**) is asserted", it means that we cannot ignore the register if we care
+about the input that 1Ah would otherwise expose or if the PCBEEP trace is
+poorly shielded and picks up chassis noise (both of which are the case on my
+machine).
+
+Unfortunately, there are lots of ways to get this register configuration wrong.
+Linux, it seems, has gone through most of them. For one, the register resets
+after S3 suspend: judging by existing code, this isn't the case for all vendor
+registers, and it's led to some fixes that improve behavior on cold boot but
+don't last after suspend. Other fixes have successfully switched the 1Ah input
+away from PC Beep but have failed to disable both loopback paths. On my
+machine, this means that the headphone input is amplified and looped back to
+the headphone output, which uses the exact same pins! As you might expect, this
+causes terrible headphone noise, the character of which is controlled by the
+1Ah boost control. (If you've seen instructions online to fix XPS 13 headphone
+noise by changing "Headphone Mic Boost" in ALSA, now you know why.)
+
+The information here has been obtained through black-box reverse engineering of
+the ALC256 codec's behavior and is not guaranteed to be correct. It likely
+also applies for the ALC255, ALC257, ALC235, and ALC236, since those codecs
+seem to be close relatives of the ALC256. (They all share one initialization
+function.) Additionally, other codecs like the ALC225 and ALC285 also have this
+register, judging by existing fixups in ``patch_realtek.c``, but specific
+data (e.g. node IDs, bit positions, pin mappings) for those codecs may differ
+from what I've described here.
diff --git a/Documentation/sound/soc/codec-to-codec.rst b/Documentation/sound/soc/codec-to-codec.rst
index 810109d7500d..4eaa9a0c41fc 100644
--- a/Documentation/sound/soc/codec-to-codec.rst
+++ b/Documentation/sound/soc/codec-to-codec.rst
@@ -104,5 +104,10 @@ Make sure to name your corresponding cpu and codec playback and capture
dai names ending with "Playback" and "Capture" respectively as dapm core
will link and power those dais based on the name.
-Note that in current device tree there is no way to mark a dai_link
-as codec to codec. However, it may change in future.
+A dai_link in a "simple-audio-card" will automatically be detected as
+codec to codec when all DAIs on the link belong to codec components.
+The dai_link will be initialized with the subset of stream parameters
+(channels, format, sample rate) supported by all DAIs on the link. Since
+there is no way to provide these parameters in the device tree, this is
+mostly useful for communication with simple fixed-function codecs, such
+as a Bluetooth controller or cellular modem.
diff --git a/Documentation/trace/coresight/coresight-ect.rst b/Documentation/trace/coresight/coresight-ect.rst
new file mode 100644
index 000000000000..ecc1e57012ef
--- /dev/null
+++ b/Documentation/trace/coresight/coresight-ect.rst
@@ -0,0 +1,222 @@
+.. SPDX-License-Identifier: GPL-2.0
+=============================================
+CoreSight Embedded Cross Trigger (CTI & CTM).
+=============================================
+
+ :Author: Mike Leach <mike.leach@linaro.org>
+ :Date: November 2019
+
+Hardware Description
+--------------------
+
+The CoreSight Cross Trigger Interface (CTI) is a hardware device that takes
+individual input and output hardware signals known as triggers to and from
+devices and interconnects them via the Cross Trigger Matrix (CTM) to other
+devices via numbered channels, in order to propagate events between devices.
+
+e.g.::
+
+ 0000000 in_trigs :::::::
+ 0 C 0----------->: : +======>(other CTI channel IO)
+ 0 P 0<-----------: : v
+ 0 U 0 out_trigs : : Channels ***** :::::::
+ 0000000 : CTI :<=========>*CTM*<====>: CTI :---+
+ ####### in_trigs : : (id 0-3) ***** ::::::: v
+ # ETM #----------->: : ^ #######
+ # #<-----------: : +---# ETR #
+ ####### out_trigs ::::::: #######
+
+The CTI driver enables the programming of the CTI to attach triggers to
+channels. When an input trigger becomes active, the attached channel will
+become active. Any output trigger attached to that channel will also
+become active. The active channel is propagated to other CTIs via the CTM,
+activating connected output triggers there, unless filtered by the CTI
+channel gate.
+
+It is also possible to activate a channel using system software directly
+programming registers in the CTI.
+
+The CTIs are registered by the system to be associated with CPUs and/or other
+CoreSight devices on the trace data path. When these devices are enabled the
+attached CTIs will also be enabled. By default/on power up the CTIs have
+no programmed trigger/channel attachments, so will not affect the system
+until explicitly programmed.
+
+The hardware trigger connections between CTIs and devices is implementation
+defined, unless the CPU/ETM combination is a v8 architecture, in which case
+the connections have an architecturally defined standard layout.
+
+The hardware trigger signals can also be connected to non-CoreSight devices
+(e.g. UART), or be propagated off chip as hardware IO lines.
+
+All the CTI devices are associated with a CTM. On many systems there will be a
+single effective CTM (one CTM, or multiple CTMs all interconnected), but it is
+possible that systems can have nets of CTIs+CTM that are not interconnected by
+a CTM to each other. On these systems a CTM index is declared to associate
+CTI devices that are interconnected via a given CTM.
+
+Sysfs files and directories
+---------------------------
+
+The CTI devices appear on the existing CoreSight bus alongside the other
+CoreSight devices::
+
+ >$ ls /sys/bus/coresight/devices
+ cti_cpu0 cti_cpu2 cti_sys0 etm0 etm2 funnel0 replicator0 tmc_etr0
+ cti_cpu1 cti_cpu3 cti_sys1 etm1 etm3 funnel1 tmc_etf0 tpiu0
+
+The ``cti_cpu<N>`` named CTIs are associated with a CPU, and any ETM used by
+that core. The ``cti_sys<N>`` CTIs are general system infrastructure CTIs that
+can be associated with other CoreSight devices, or other system hardware
+capable of generating or using trigger signals.::
+
+ >$ ls /sys/bus/coresight/devices/etm0/cti_cpu0
+ channels ctmid enable nr_trigger_cons mgmt power powered regs
+ subsystem triggers0 triggers1 uevent
+
+*Key file items are:-*
+ * ``enable``: enables/disables the CTI. Read to determine current state.
+ If this shows as enabled (1), but ``powered`` shows unpowered (0), then
+ the enable indicates a request to enabled when the device is powered.
+ * ``ctmid`` : associated CTM - only relevant if system has multiple CTI+CTM
+ clusters that are not interconnected.
+ * ``nr_trigger_cons`` : total connections - triggers<N> directories.
+ * ``powered`` : Read to determine if the CTI is currently powered.
+
+*Sub-directories:-*
+ * ``triggers<N>``: contains list of triggers for an individual connection.
+ * ``channels``: Contains the channel API - CTI main programming interface.
+ * ``regs``: Gives access to the raw programmable CTI regs.
+ * ``mgmt``: the standard CoreSight management registers.
+
+
+triggers<N> directories
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Individual trigger connection information. This describes trigger signals for
+CoreSight and non-CoreSight connections.
+
+Each triggers directory has a set of parameters describing the triggers for
+the connection.
+
+ * ``name`` : name of connection
+ * ``in_signals`` : input trigger signal indexes used in this connection.
+ * ``in_types`` : functional types for in signals.
+ * ``out_signals`` : output trigger signals for this connection.
+ * ``out_types`` : functional types for out signals.
+
+e.g::
+
+ >$ ls ./cti_cpu0/triggers0/
+ in_signals in_types name out_signals out_types
+ >$ cat ./cti_cpu0/triggers0/name
+ cpu0
+ >$ cat ./cti_cpu0/triggers0/out_signals
+ 0-2
+ >$ cat ./cti_cpu0/triggers0/out_types
+ pe_edbgreq pe_dbgrestart pe_ctiirq
+ >$ cat ./cti_cpu0/triggers0/in_signals
+ 0-1
+ >$ cat ./cti_cpu0/triggers0/in_types
+ pe_dbgtrigger pe_pmuirq
+
+If a connection has zero signals in either the 'in' or 'out' triggers then
+those parameters will be omitted.
+
+Channels API Directory
+~~~~~~~~~~~~~~~~~~~~~~
+
+This provides an easy way to attach triggers to channels, without needing
+the multiple register operations that are required if manipulating the
+'regs' sub-directory elements directly.
+
+A number of files provide this API::
+
+ >$ ls ./cti_sys0/channels/
+ chan_clear chan_inuse chan_xtrigs_out trigin_attach
+ chan_free chan_pulse chan_xtrigs_reset trigin_detach
+ chan_gate_disable chan_set chan_xtrigs_sel trigout_attach
+ chan_gate_enable chan_xtrigs_in trig_filter_enable trigout_detach
+ trigout_filtered
+
+Most access to these elements take the form::
+
+ echo <chan> [<trigger>] > /<device_path>/<operation>
+
+where the optional <trigger> is only needed for trigXX_attach | detach
+operations.
+
+e.g.::
+
+ >$ echo 0 1 > ./cti_sys0/channels/trigout_attach
+ >$ echo 0 > ./cti_sys0/channels/chan_set
+
+Attaches trigout(1) to channel(0), then activates channel(0) generating a
+set state on cti_sys0.trigout(1)
+
+
+*API operations*
+
+ * ``trigin_attach, trigout_attach``: Attach a channel to a trigger signal.
+ * ``trigin_detach, trigout_detach``: Detach a channel from a trigger signal.
+ * ``chan_set``: Set the channel - the set state will be propagated around
+ the CTM to other connected devices.
+ * ``chan_clear``: Clear the channel.
+ * ``chan_pulse``: Set the channel for a single CoreSight clock cycle.
+ * ``chan_gate_enable``: Write operation sets the CTI gate to propagate
+ (enable) the channel to other devices. This operation takes a channel
+ number. CTI gate is enabled for all channels by default at power up. Read
+ to list the currently enabled channels on the gate.
+ * ``chan_gate_disable``: Write channel number to disable gate for that
+ channel.
+ * ``chan_inuse``: Show the current channels attached to any signal
+ * ``chan_free``: Show channels with no attached signals.
+ * ``chan_xtrigs_sel``: write a channel number to select a channel to view,
+ read to show the selected channel number.
+ * ``chan_xtrigs_in``: Read to show the input triggers attached to
+ the selected view channel.
+ * ``chan_xtrigs_out``:Read to show the output triggers attached to
+ the selected view channel.
+ * ``trig_filter_enable``: Defaults to enabled, disable to allow potentially
+ dangerous output signals to be set.
+ * ``trigout_filtered``: Trigger out signals that are prevented from being
+ set if filtering ``trig_filter_enable`` is enabled. One use is to prevent
+ accidental ``EDBGREQ`` signals stopping a core.
+ * ``chan_xtrigs_reset``: Write 1 to clear all channel / trigger programming.
+ Resets device hardware to default state.
+
+
+The example below attaches input trigger index 1 to channel 2, and output
+trigger index 6 to the same channel. It then examines the state of the
+channel / trigger connections using the appropriate sysfs attributes.
+
+The settings mean that if either input trigger 1, or channel 2 go active then
+trigger out 6 will go active. We then enable the CTI, and use the software
+channel control to activate channel 2. We see the active channel on the
+``choutstatus`` register and the active signal on the ``trigoutstatus``
+register. Finally clearing the channel removes this.
+
+e.g.::
+
+ .../cti_sys0/channels# echo 2 1 > trigin_attach
+ .../cti_sys0/channels# echo 2 6 > trigout_attach
+ .../cti_sys0/channels# cat chan_free
+ 0-1,3
+ .../cti_sys0/channels# cat chan_inuse
+ 2
+ .../cti_sys0/channels# echo 2 > chan_xtrigs_sel
+ .../cti_sys0/channels# cat chan_xtrigs_trigin
+ 1
+ .../cti_sys0/channels# cat chan_xtrigs_trigout
+ 6
+ .../cti_sys0/# echo 1 > enable
+ .../cti_sys0/channels# echo 2 > chan_set
+ .../cti_sys0/channels# cat ../regs/choutstatus
+ 0x4
+ .../cti_sys0/channels# cat ../regs/trigoutstatus
+ 0x40
+ .../cti_sys0/channels# echo 2 > chan_clear
+ .../cti_sys0/channels# cat ../regs/trigoutstatus
+ 0x0
+ .../cti_sys0/channels# cat ../regs/choutstatus
+ 0x0
diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst
index a566719f8e7e..108600ee1e12 100644
--- a/Documentation/trace/coresight/coresight.rst
+++ b/Documentation/trace/coresight/coresight.rst
@@ -491,8 +491,21 @@ interface provided for that purpose by the generic STM API::
Details on how to use the generic STM API can be found here:- :doc:`../stm` [#second]_.
+The CTI & CTM Modules
+---------------------
+
+The CTI (Cross Trigger Interface) provides a set of trigger signals between
+individual CTIs and components, and can propagate these between all CTIs via
+channels on the CTM (Cross Trigger Matrix).
+
+A separate documentation file is provided to explain the use of these devices.
+(:doc:`coresight-ect`) [#fourth]_.
+
+
.. [#first] Documentation/ABI/testing/sysfs-bus-coresight-devices-stm
.. [#second] Documentation/trace/stm.rst
.. [#third] https://github.com/Linaro/perf-opencsd
+
+.. [#fourth] Documentation/trace/coresight/coresight-ect.rst
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index ff658e27d25b..3b5614b1d1a5 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -125,10 +125,13 @@ of ftrace. Here is a list of some of the key files:
trace:
This file holds the output of the trace in a human
- readable format (described below). Note, tracing is temporarily
- disabled when the file is open for reading. Once all readers
- are closed, tracing is re-enabled. Opening this file for
+ readable format (described below). Opening this file for
writing with the O_TRUNC flag clears the ring buffer content.
+ Note, this file is not a consumer. If tracing is off
+ (no tracer running, or tracing_on is zero), it will produce
+ the same output each time it is read. When tracing is on,
+ it may produce inconsistent results as it tries to read
+ the entire buffer without consuming it.
trace_pipe:
@@ -142,9 +145,7 @@ of ftrace. Here is a list of some of the key files:
will not be read again with a sequential read. The
"trace" file is static, and if the tracer is not
adding more data, it will display the same
- information every time it is read. Unlike the
- "trace" file, opening this file for reading will not
- temporarily disable tracing.
+ information every time it is read.
trace_options:
@@ -262,6 +263,20 @@ of ftrace. Here is a list of some of the key files:
traced by the function tracer as well. This option will also
cause PIDs of tasks that exit to be removed from the file.
+ set_ftrace_notrace_pid:
+
+ Have the function tracer ignore threads whose PID are listed in
+ this file.
+
+ If the "function-fork" option is set, then when a task whose
+ PID is listed in this file forks, the child's PID will
+ automatically be added to this file, and the child will not be
+ traced by the function tracer as well. This option will also
+ cause PIDs of tasks that exit to be removed from the file.
+
+ If a PID is in both this file and "set_ftrace_pid", then this
+ file takes precedence, and the thread will not be traced.
+
set_event_pid:
Have the events only trace a task with a PID listed in this file.
@@ -273,6 +288,19 @@ of ftrace. Here is a list of some of the key files:
cause the PIDs of tasks to be removed from this file when the task
exits.
+ set_event_notrace_pid:
+
+ Have the events not trace a task with a PID listed in this file.
+ Note, sched_switch and sched_wakeup will trace threads not listed
+ in this file, even if a thread's PID is in the file if the
+ sched_switch or sched_wakeup events also trace a thread that should
+ be traced.
+
+ To have the PIDs of children of tasks with their PID in this file
+ added on fork, enable the "event-fork" option. That option will also
+ cause the PIDs of tasks to be removed from this file when the task
+ exits.
+
set_graph_function:
Functions listed in this file will cause the function graph
@@ -1125,6 +1153,12 @@ Here are the available options:
the trace displays additional information about the
latency, as described in "Latency trace format".
+ pause-on-trace
+ When set, opening the trace file for read, will pause
+ writing to the ring buffer (as if tracing_on was set to zero).
+ This simulates the original behavior of the trace file.
+ When the file is closed, tracing will be enabled again.
+
record-cmd
When any event or tracer is enabled, a hook is enabled
in the sched_switch trace point to fill comm cache
@@ -1176,6 +1210,8 @@ Here are the available options:
tasks fork. Also, when tasks with PIDs in set_event_pid exit,
their PIDs will be removed from the file.
+ This affects PIDs listed in set_event_notrace_pid as well.
+
function-trace
The latency tracers will enable function tracing
if this option is enabled (default it is). When
@@ -1190,6 +1226,8 @@ Here are the available options:
set_ftrace_pid exit, their PIDs will be removed from the
file.
+ This affects PIDs in set_ftrace_notrace_pid as well.
+
display-graph
When set, the latency tracers (irqsoff, wakeup, etc) will
use function graph tracing instead of function tracing.
@@ -2126,6 +2164,8 @@ periodically make a CPU constantly busy with interrupts disabled.
# cat trace
# tracer: hwlat
#
+ # entries-in-buffer/entries-written: 13/13 #P:8
+ #
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
@@ -2133,12 +2173,18 @@ periodically make a CPU constantly busy with interrupts disabled.
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
- <...>-3638 [001] d... 19452.055471: #1 inner/outer(us): 12/14 ts:1499801089.066141940
- <...>-3638 [003] d... 19454.071354: #2 inner/outer(us): 11/9 ts:1499801091.082164365
- <...>-3638 [002] dn.. 19461.126852: #3 inner/outer(us): 12/9 ts:1499801098.138150062
- <...>-3638 [001] d... 19488.340960: #4 inner/outer(us): 8/12 ts:1499801125.354139633
- <...>-3638 [003] d... 19494.388553: #5 inner/outer(us): 8/12 ts:1499801131.402150961
- <...>-3638 [003] d... 19501.283419: #6 inner/outer(us): 0/12 ts:1499801138.297435289 nmi-total:4 nmi-count:1
+ <...>-1729 [001] d... 678.473449: #1 inner/outer(us): 11/12 ts:1581527483.343962693 count:6
+ <...>-1729 [004] d... 689.556542: #2 inner/outer(us): 16/9 ts:1581527494.889008092 count:1
+ <...>-1729 [005] d... 714.756290: #3 inner/outer(us): 16/16 ts:1581527519.678961629 count:5
+ <...>-1729 [001] d... 718.788247: #4 inner/outer(us): 9/17 ts:1581527523.889012713 count:1
+ <...>-1729 [002] d... 719.796341: #5 inner/outer(us): 13/9 ts:1581527524.912872606 count:1
+ <...>-1729 [006] d... 844.787091: #6 inner/outer(us): 9/12 ts:1581527649.889048502 count:2
+ <...>-1729 [003] d... 849.827033: #7 inner/outer(us): 18/9 ts:1581527654.889013793 count:1
+ <...>-1729 [007] d... 853.859002: #8 inner/outer(us): 9/12 ts:1581527658.889065736 count:1
+ <...>-1729 [001] d... 855.874978: #9 inner/outer(us): 9/11 ts:1581527660.861991877 count:1
+ <...>-1729 [001] d... 863.938932: #10 inner/outer(us): 9/11 ts:1581527668.970010500 count:1 nmi-total:7 nmi-count:1
+ <...>-1729 [007] d... 878.050780: #11 inner/outer(us): 9/12 ts:1581527683.385002600 count:1 nmi-total:5 nmi-count:1
+ <...>-1729 [007] d... 886.114702: #12 inner/outer(us): 9/12 ts:1581527691.385001600 count:1
The above output is somewhat the same in the header. All events will have
@@ -2148,7 +2194,7 @@ interrupts disabled 'd'. Under the FUNCTION title there is:
This is the count of events recorded that were greater than the
tracing_threshold (See below).
- inner/outer(us): 12/14
+ inner/outer(us): 11/11
This shows two numbers as "inner latency" and "outer latency". The test
runs in a loop checking a timestamp twice. The latency detected within
@@ -2156,11 +2202,15 @@ interrupts disabled 'd'. Under the FUNCTION title there is:
after the previous timestamp and the next timestamp in the loop is
the "outer latency".
- ts:1499801089.066141940
+ ts:1581527483.343962693
+
+ The absolute timestamp that the first latency was recorded in the window.
+
+ count:6
- The absolute timestamp that the event happened.
+ The number of times a latency was detected during the window.
- nmi-total:4 nmi-count:1
+ nmi-total:7 nmi-count:1
On architectures that support it, if an NMI comes in during the
test, the time spent in NMI is reported in "nmi-total" (in
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index ebd383fba939..efbbe570aa9b 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -1574,8 +1574,8 @@ This ioctl would set vcpu's xcr to the value userspace specified.
};
#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
- #define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
- #define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
+ #define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) /* deprecated */
+ #define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) /* deprecated */
struct kvm_cpuid_entry2 {
__u32 function;
@@ -1626,13 +1626,6 @@ emulate them efficiently. The fields in each entry are defined as follows:
KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
if the index field is valid
- KVM_CPUID_FLAG_STATEFUL_FUNC:
- if cpuid for this function returns different values for successive
- invocations; there will be several entries with the same function,
- all with this flag set
- KVM_CPUID_FLAG_STATE_READ_NEXT:
- for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
- the first entry to be read by a cpu
eax, ebx, ecx, edx:
the values returned by the cpuid instruction for
@@ -2117,7 +2110,8 @@ Errors:
====== ============================================================
 ENOENT   no such register
-  EINVAL   invalid register ID, or no such register
+  EINVAL   invalid register ID, or no such register or used with VMs in
+ protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
====== ============================================================
@@ -2552,7 +2546,8 @@ Errors include:
======== ============================================================
 ENOENT   no such register
-  EINVAL   invalid register ID, or no such register
+  EINVAL   invalid register ID, or no such register or used with VMs in
+ protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
======== ============================================================
@@ -3347,8 +3342,8 @@ The member 'flags' is used for passing flags from userspace.
::
#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
- #define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
- #define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
+ #define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) /* deprecated */
+ #define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) /* deprecated */
struct kvm_cpuid_entry2 {
__u32 function;
@@ -3394,13 +3389,6 @@ The fields in each entry are defined as follows:
KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
if the index field is valid
- KVM_CPUID_FLAG_STATEFUL_FUNC:
- if cpuid for this function returns different values for successive
- invocations; there will be several entries with the same function,
- all with this flag set
- KVM_CPUID_FLAG_STATE_READ_NEXT:
- for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
- the first entry to be read by a cpu
eax, ebx, ecx, edx:
@@ -4649,6 +4637,60 @@ the clear cpu reset definition in the POP. However, the cpu is not put
into ESA mode. This reset is a superset of the initial reset.
+4.125 KVM_S390_PV_COMMAND
+-------------------------
+
+:Capability: KVM_CAP_S390_PROTECTED
+:Architectures: s390
+:Type: vm ioctl
+:Parameters: struct kvm_pv_cmd
+:Returns: 0 on success, < 0 on error
+
+::
+
+ struct kvm_pv_cmd {
+ __u32 cmd; /* Command to be executed */
+ __u16 rc; /* Ultravisor return code */
+ __u16 rrc; /* Ultravisor return reason code */
+ __u64 data; /* Data or address */
+ __u32 flags; /* flags for future extensions. Must be 0 for now */
+ __u32 reserved[3];
+ };
+
+cmd values:
+
+KVM_PV_ENABLE
+ Allocate memory and register the VM with the Ultravisor, thereby
+ donating memory to the Ultravisor that will become inaccessible to
+ KVM. All existing CPUs are converted to protected ones. After this
+ command has succeeded, any CPU added via hotplug will become
+ protected during its creation as well.
+
+ Errors:
+
+ ===== =============================
+ EINTR an unmasked signal is pending
+ ===== =============================
+
+KVM_PV_DISABLE
+
+ Deregister the VM from the Ultravisor and reclaim the memory that
+ had been donated to the Ultravisor, making it usable by the kernel
+ again. All registered VCPUs are converted back to non-protected
+ ones.
+
+KVM_PV_VM_SET_SEC_PARMS
+ Pass the image header from VM memory to the Ultravisor in
+ preparation of image unpacking and verification.
+
+KVM_PV_VM_UNPACK
+ Unpack (protect and decrypt) a page of the encrypted boot image.
+
+KVM_PV_VM_VERIFY
+ Verify the integrity of the unpacked image. Only if this succeeds,
+ KVM is allowed to start protected VCPUs.
+
+
5. The kvm_run structure
========================
@@ -5707,8 +5749,13 @@ and injected exceptions.
:Architectures: x86, arm, arm64, mips
:Parameters: args[0] whether feature should be enabled or not
-With this capability enabled, KVM_GET_DIRTY_LOG will not automatically
-clear and write-protect all pages that are returned as dirty.
+Valid flags are::
+
+ #define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
+ #define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
+
+With KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is set, KVM_GET_DIRTY_LOG will not
+automatically clear and write-protect all pages that are returned as dirty.
Rather, userspace will have to do this operation separately using
KVM_CLEAR_DIRTY_LOG.
@@ -5719,18 +5766,42 @@ than requiring to sync a full memslot; this ensures that KVM does not
take spinlocks for an extended period of time. Second, in some cases a
large amount of time can pass between a call to KVM_GET_DIRTY_LOG and
userspace actually using the data in the page. Pages can be modified
-during this time, which is inefficint for both the guest and userspace:
+during this time, which is inefficient for both the guest and userspace:
the guest will incur a higher penalty due to write protection faults,
while userspace can see false reports of dirty pages. Manual reprotection
helps reducing this time, improving guest performance and reducing the
number of dirty log false positives.
+With KVM_DIRTY_LOG_INITIALLY_SET set, all the bits of the dirty bitmap
+will be initialized to 1 when created. This also improves performance because
+dirty logging can be enabled gradually in small chunks on the first call
+to KVM_CLEAR_DIRTY_LOG. KVM_DIRTY_LOG_INITIALLY_SET depends on
+KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (it is also only available on
+x86 for now).
+
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make
it hard or impossible to use it correctly. The availability of
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed.
Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT.
+7.19 KVM_CAP_PPC_SECURE_GUEST
+------------------------------
+
+:Architectures: ppc
+
+This capability indicates that KVM is running on a host that has
+ultravisor firmware and thus can support a secure guest. On such a
+system, a guest can ask the ultravisor to make it a secure guest,
+one whose memory is inaccessible to the host except for pages which
+are explicitly requested to be shared with the host. The ultravisor
+notifies KVM when a guest requests to become a secure guest, and KVM
+has the opportunity to veto the transition.
+
+If present, this capability can be enabled for a VM, meaning that KVM
+will allow the transition to secure guest mode. Otherwise KVM will
+veto the transition.
+
8. Other capabilities.
======================
@@ -6027,3 +6098,14 @@ Architectures: s390
This capability indicates that the KVM_S390_NORMAL_RESET and
KVM_S390_CLEAR_RESET ioctls are available.
+
+8.23 KVM_CAP_S390_PROTECTED
+
+Architecture: s390
+
+
+This capability indicates that the Ultravisor has been initialized and
+KVM can therefore start protected VMs.
+This capability governs the KVM_S390_PV_COMMAND ioctl and the
+KVM_MP_STATE_LOAD MP_STATE. KVM_SET_MP_STATE can fail for protected
+guests when the state change is invalid.
diff --git a/Documentation/virt/kvm/arm/hyp-abi.rst b/Documentation/virt/kvm/arm/hyp-abi.rst
index d1fc27d848e9..d9eba93aa364 100644
--- a/Documentation/virt/kvm/arm/hyp-abi.rst
+++ b/Documentation/virt/kvm/arm/hyp-abi.rst
@@ -11,6 +11,11 @@ hypervisor when running as a guest (under Xen, KVM or any other
hypervisor), or any hypervisor-specific interaction when the kernel is
used as a host.
+Note: KVM/arm has been removed from the kernel. The API described
+here is still valid though, as it allows the kernel to kexec when
+booted at HYP. It can also be used by a hypervisor other than KVM
+if necessary.
+
On arm and arm64 (without VHE), the kernel doesn't run in hypervisor
mode, but still needs to interact with it, allowing a built-in
hypervisor to be either installed or torn down.
diff --git a/Documentation/virt/kvm/devices/s390_flic.rst b/Documentation/virt/kvm/devices/s390_flic.rst
index 954190da7d04..ea96559ba501 100644
--- a/Documentation/virt/kvm/devices/s390_flic.rst
+++ b/Documentation/virt/kvm/devices/s390_flic.rst
@@ -108,16 +108,9 @@ Groups:
mask or unmask the adapter, as specified in mask
KVM_S390_IO_ADAPTER_MAP
- perform a gmap translation for the guest address provided in addr,
- pin a userspace page for the translated address and add it to the
- list of mappings
-
- .. note:: A new mapping will be created unconditionally; therefore,
- the calling code should avoid making duplicate mappings.
-
+ This is now a no-op. The mapping is purely done by the irq route.
KVM_S390_IO_ADAPTER_UNMAP
- release a userspace page for the translated address specified in addr
- from the list of mappings
+ This is now a no-op. The mapping is purely done by the irq route.
KVM_DEV_FLIC_AISM
modify the adapter-interruption-suppression mode for a given isc if the
diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst
index 774deaebf7fa..dcc252634cf9 100644
--- a/Documentation/virt/kvm/index.rst
+++ b/Documentation/virt/kvm/index.rst
@@ -18,6 +18,8 @@ KVM
nested-vmx
ppc-pv
s390-diag
+ s390-pv
+ s390-pv-boot
timekeeping
vcpu-requests
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index c02291beac3f..b21a34c34a21 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -96,19 +96,18 @@ will happen:
We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap.
For direct sp, we can easily avoid it since the spte of direct sp is fixed
-to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic()
-to pin gfn to pfn, because after gfn_to_pfn_atomic():
+to gfn. For indirect sp, we disabled fast page fault for simplicity.
+
+A solution for indirect sp could be to pin the gfn, for example via
+kvm_vcpu_gfn_to_pfn_atomic, before the cmpxchg. After the pinning:
- We have held the refcount of pfn that means the pfn can not be freed and
be reused for another gfn.
-- The pfn is writable that means it can not be shared between different gfns
+- The pfn is writable and therefore it cannot be shared between different gfns
by KSM.
Then, we can ensure the dirty bitmaps is correctly set for a gfn.
-Currently, to simplify the whole things, we disable fast page fault for
-indirect shadow page.
-
2) Dirty bit tracking
In the origin code, the spte can be fast updated (non-atomically) if the
diff --git a/Documentation/virt/kvm/s390-pv-boot.rst b/Documentation/virt/kvm/s390-pv-boot.rst
new file mode 100644
index 000000000000..8b8fa0390409
--- /dev/null
+++ b/Documentation/virt/kvm/s390-pv-boot.rst
@@ -0,0 +1,84 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
+s390 (IBM Z) Boot/IPL of Protected VMs
+======================================
+
+Summary
+-------
+The memory of Protected Virtual Machines (PVMs) is not accessible to
+I/O or the hypervisor. In those cases where the hypervisor needs to
+access the memory of a PVM, that memory must be made accessible.
+Memory made accessible to the hypervisor will be encrypted. See
+:doc:`s390-pv` for details."
+
+On IPL (boot) a small plaintext bootloader is started, which provides
+information about the encrypted components and necessary metadata to
+KVM to decrypt the protected virtual machine.
+
+Based on this data, KVM will make the protected virtual machine known
+to the Ultravisor (UV) and instruct it to secure the memory of the
+PVM, decrypt the components and verify the data and address list
+hashes, to ensure integrity. Afterwards KVM can run the PVM via the
+SIE instruction which the UV will intercept and execute on KVM's
+behalf.
+
+As the guest image is just like an opaque kernel image that does the
+switch into PV mode itself, the user can load encrypted guest
+executables and data via every available method (network, dasd, scsi,
+direct kernel, ...) without the need to change the boot process.
+
+
+Diag308
+-------
+This diagnose instruction is the basic mechanism to handle IPL and
+related operations for virtual machines. The VM can set and retrieve
+IPL information blocks, that specify the IPL method/devices and
+request VM memory and subsystem resets, as well as IPLs.
+
+For PVMs this concept has been extended with new subcodes:
+
+Subcode 8: Set an IPL Information Block of type 5 (information block
+for PVMs)
+Subcode 9: Store the saved block in guest memory
+Subcode 10: Move into Protected Virtualization mode
+
+The new PV load-device-specific-parameters field specifies all data
+that is necessary to move into PV mode.
+
+* PV Header origin
+* PV Header length
+* List of Components composed of
+ * AES-XTS Tweak prefix
+ * Origin
+ * Size
+
+The PV header contains the keys and hashes, which the UV will use to
+decrypt and verify the PV, as well as control flags and a start PSW.
+
+The components are for instance an encrypted kernel, kernel parameters
+and initrd. The components are decrypted by the UV.
+
+After the initial import of the encrypted data, all defined pages will
+contain the guest content. All non-specified pages will start out as
+zero pages on first access.
+
+
+When running in protected virtualization mode, some subcodes will result in
+exceptions or return error codes.
+
+Subcodes 4 and 7, which specify operations that do not clear the guest
+memory, will result in specification exceptions. This is because the
+UV will clear all memory when a secure VM is removed, and therefore
+non-clearing IPL subcodes are not allowed.
+
+Subcodes 8, 9, 10 will result in specification exceptions.
+Re-IPL into a protected mode is only possible via a detour into non
+protected mode.
+
+Keys
+----
+Every CEC will have a unique public key to enable tooling to build
+encrypted images.
+See `s390-tools <https://github.com/ibm-s390-tools/s390-tools/>`_
+for the tooling.
diff --git a/Documentation/virt/kvm/s390-pv.rst b/Documentation/virt/kvm/s390-pv.rst
new file mode 100644
index 000000000000..774a8c606091
--- /dev/null
+++ b/Documentation/virt/kvm/s390-pv.rst
@@ -0,0 +1,116 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================================
+s390 (IBM Z) Ultravisor and Protected VMs
+=========================================
+
+Summary
+-------
+Protected virtual machines (PVM) are KVM VMs that do not allow KVM to
+access VM state like guest memory or guest registers. Instead, the
+PVMs are mostly managed by a new entity called Ultravisor (UV). The UV
+provides an API that can be used by PVMs and KVM to request management
+actions.
+
+Each guest starts in non-protected mode and then may make a request to
+transition into protected mode. On transition, KVM registers the guest
+and its VCPUs with the Ultravisor and prepares everything for running
+it.
+
+The Ultravisor will secure and decrypt the guest's boot memory
+(i.e. kernel/initrd). It will safeguard state changes like VCPU
+starts/stops and injected interrupts while the guest is running.
+
+As access to the guest's state, such as the SIE state description, is
+normally needed to be able to run a VM, some changes have been made in
+the behavior of the SIE instruction. A new format 4 state description
+has been introduced, where some fields have different meanings for a
+PVM. SIE exits are minimized as much as possible to improve speed and
+reduce exposed guest state.
+
+
+Interrupt injection
+-------------------
+Interrupt injection is safeguarded by the Ultravisor. As KVM doesn't
+have access to the VCPUs' lowcores, injection is handled via the
+format 4 state description.
+
+Machine check, external, IO and restart interruptions each can be
+injected on SIE entry via a bit in the interrupt injection control
+field (offset 0x54). If the guest cpu is not enabled for the interrupt
+at the time of injection, a validity interception is recognized. The
+format 4 state description contains fields in the interception data
+block where data associated with the interrupt can be transported.
+
+Program and Service Call exceptions have another layer of
+safeguarding; they can only be injected for instructions that have
+been intercepted into KVM. The exceptions need to be a valid outcome
+of an instruction emulation by KVM, e.g. we can never inject a
+addressing exception as they are reported by SIE since KVM has no
+access to the guest memory.
+
+
+Mask notification interceptions
+-------------------------------
+KVM cannot intercept lctl(g) and lpsw(e) anymore in order to be
+notified when a PVM enables a certain class of interrupt. As a
+replacement, two new interception codes have been introduced: One
+indicating that the contents of CRs 0, 6, or 14 have been changed,
+indicating different interruption subclasses; and one indicating that
+PSW bit 13 has been changed, indicating that a machine check
+intervention was requested and those are now enabled.
+
+Instruction emulation
+---------------------
+With the format 4 state description for PVMs, the SIE instruction already
+interprets more instructions than it does with format 2. It is not able
+to interpret every instruction, but needs to hand some tasks to KVM;
+therefore, the SIE and the ultravisor safeguard emulation inputs and outputs.
+
+The control structures associated with SIE provide the Secure
+Instruction Data Area (SIDA), the Interception Parameters (IP) and the
+Secure Interception General Register Save Area. Guest GRs and most of
+the instruction data, such as I/O data structures, are filtered.
+Instruction data is copied to and from the SIDA when needed. Guest
+GRs are put into / retrieved from the Secure Interception General
+Register Save Area.
+
+Only GR values needed to emulate an instruction will be copied into this
+save area and the real register numbers will be hidden.
+
+The Interception Parameters state description field still contains the
+the bytes of the instruction text, but with pre-set register values
+instead of the actual ones. I.e. each instruction always uses the same
+instruction text, in order not to leak guest instruction text.
+This also implies that the register content that a guest had in r<n>
+may be in r<m> from the hypervisor's point of view.
+
+The Secure Instruction Data Area contains instruction storage
+data. Instruction data, i.e. data being referenced by an instruction
+like the SCCB for sclp, is moved via the SIDA. When an instruction is
+intercepted, the SIE will only allow data and program interrupts for
+this instruction to be moved to the guest via the two data areas
+discussed before. Other data is either ignored or results in validity
+interceptions.
+
+
+Instruction emulation interceptions
+-----------------------------------
+There are two types of SIE secure instruction intercepts: the normal
+and the notification type. Normal secure instruction intercepts will
+make the guest pending for instruction completion of the intercepted
+instruction type, i.e. on SIE entry it is attempted to complete
+emulation of the instruction with the data provided by KVM. That might
+be a program exception or instruction completion.
+
+The notification type intercepts inform KVM about guest environment
+changes due to guest instruction interpretation. Such an interception
+is recognized, for example, for the store prefix instruction to provide
+the new lowcore location. On SIE reentry, any KVM data in the data areas
+is ignored and execution continues as if the guest instruction had
+completed. For that reason KVM is not allowed to inject a program
+interrupt.
+
+Links
+-----
+`KVM Forum 2019 presentation <https://static.sched.com/hosted_files/kvmforum2019/3b/ibm_protected_vms_s390x.pdf>`_
diff --git a/Documentation/vm/.gitignore b/Documentation/vm/.gitignore
index 09b164a5700f..bc74f5643008 100644
--- a/Documentation/vm/.gitignore
+++ b/Documentation/vm/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
page-types
slabinfo
diff --git a/Documentation/vm/free_page_reporting.rst b/Documentation/vm/free_page_reporting.rst
new file mode 100644
index 000000000000..8c05e62d8b2b
--- /dev/null
+++ b/Documentation/vm/free_page_reporting.rst
@@ -0,0 +1,40 @@
+.. _free_page_reporting:
+
+=====================
+Free Page Reporting
+=====================
+
+Free page reporting is an API by which a device can register to receive
+lists of pages that are currently unused by the system. This is useful in
+the case of virtualization where a guest is then able to use this data to
+notify the hypervisor that it is no longer using certain pages in memory.
+
+For the driver, typically a balloon driver, to use of this functionality
+it will allocate and initialize a page_reporting_dev_info structure. The
+field within the structure it will populate is the "report" function
+pointer used to process the scatterlist. It must also guarantee that it can
+handle at least PAGE_REPORTING_CAPACITY worth of scatterlist entries per
+call to the function. A call to page_reporting_register will register the
+page reporting interface with the reporting framework assuming no other
+page reporting devices are already registered.
+
+Once registered the page reporting API will begin reporting batches of
+pages to the driver. The API will start reporting pages 2 seconds after
+the interface is registered and will continue to do so 2 seconds after any
+page of a sufficiently high order is freed.
+
+Pages reported will be stored in the scatterlist passed to the reporting
+function with the final entry having the end bit set in entry nent - 1.
+While pages are being processed by the report function they will not be
+accessible to the allocator. Once the report function has been completed
+the pages will be returned to the free area from which they were obtained.
+
+Prior to removing a driver that is making use of free page reporting it
+is necessary to call page_reporting_unregister to have the
+page_reporting_dev_info structure that is currently in use by free page
+reporting removed. Doing this will prevent further reports from being
+issued via the interface. If another driver or the same driver is
+registered it is possible for it to resume where the previous driver had
+left off in terms of reporting free pages.
+
+Alexander Duyck, Dec 04, 2019
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 95fec5968362..4e3e9362afeb 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -161,13 +161,11 @@ device must complete the update before the driver callback returns.
When the device driver wants to populate a range of virtual addresses, it can
use::
- long hmm_range_fault(struct hmm_range *range, unsigned int flags);
+ long hmm_range_fault(struct hmm_range *range);
-With the HMM_RANGE_SNAPSHOT flag, it will only fetch present CPU page table
-entries and will not trigger a page fault on missing or non-present entries.
-Without that flag, it does trigger a page fault on missing or read-only entries
-if write access is requested (see below). Page faults use the generic mm page
-fault code path just like a CPU page fault.
+It will trigger a page fault on missing or read-only entries if write access is
+requested (see below). Page faults use the generic mm page fault code path just
+like a CPU page fault.
Both functions copy CPU page table entries into their pfns array argument. Each
entry in that array corresponds to an address in the virtual range. HMM
@@ -197,7 +195,7 @@ The usage pattern is::
again:
range.notifier_seq = mmu_interval_read_begin(&interval_sub);
down_read(&mm->mmap_sem);
- ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
+ ret = hmm_range_fault(&range);
if (ret) {
up_read(&mm->mmap_sem);
if (ret == -EBUSY)
diff --git a/Documentation/vm/zswap.rst b/Documentation/vm/zswap.rst
index 61f6185188cd..f8c6a79d7c70 100644
--- a/Documentation/vm/zswap.rst
+++ b/Documentation/vm/zswap.rst
@@ -35,9 +35,11 @@ Zswap evicts pages from compressed cache on an LRU basis to the backing swap
device when the compressed pool reaches its size limit. This requirement had
been identified in prior community discussions.
-Zswap is disabled by default but can be enabled at boot time by setting
-the ``enabled`` attribute to 1 at boot time. ie: ``zswap.enabled=1``. Zswap
-can also be enabled and disabled at runtime using the sysfs interface.
+Whether Zswap is enabled at the boot time depends on whether
+the ``CONFIG_ZSWAP_DEFAULT_ON`` Kconfig option is enabled or not.
+This setting can then be overridden by providing the kernel command line
+``zswap.enabled=`` option, for example ``zswap.enabled=0``.
+Zswap can also be enabled and disabled at runtime using the sysfs interface.
An example command to enable zswap at runtime, assuming sysfs is mounted
at ``/sys``, is::
@@ -64,9 +66,10 @@ allocation in zpool is not directly accessible by address. Rather, a handle is
returned by the allocation routine and that handle must be mapped before being
accessed. The compressed memory pool grows on demand and shrinks as compressed
pages are freed. The pool is not preallocated. By default, a zpool
-of type zbud is created, but it can be selected at boot time by
-setting the ``zpool`` attribute, e.g. ``zswap.zpool=zbud``. It can
-also be changed at runtime using the sysfs ``zpool`` attribute, e.g.::
+of type selected in ``CONFIG_ZSWAP_ZPOOL_DEFAULT`` Kconfig option is created,
+but it can be overridden at boot time by setting the ``zpool`` attribute,
+e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs
+``zpool`` attribute, e.g.::
echo zbud > /sys/module/zswap/parameters/zpool
@@ -97,8 +100,9 @@ controlled policy:
* max_pool_percent - The maximum percentage of memory that the compressed
pool can occupy.
-The default compressor is lzo, but it can be selected at boot time by
-setting the ``compressor`` attribute, e.g. ``zswap.compressor=lzo``.
+The default compressor is selected in ``CONFIG_ZSWAP_COMPRESSOR_DEFAULT``
+Kconfig option, but it can be overridden at boot time by setting the
+``compressor`` attribute, e.g. ``zswap.compressor=lzo``.
It can also be changed at runtime using the sysfs "compressor"
attribute, e.g.::
diff --git a/MAINTAINERS b/MAINTAINERS
index 195052943574..9271068bde63 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -77,21 +77,13 @@ Tips for patch submitters
8. Happy hacking.
-Descriptions of section entries
--------------------------------
+Descriptions of section entries and preferred order
+---------------------------------------------------
M: *Mail* patches to: FullName <address@domain>
R: Designated *Reviewer*: FullName <address@domain>
These reviewers should be CCed on patches.
L: *Mailing list* that is relevant to this area
- W: *Web-page* with status/info
- B: URI for where to file *bugs*. A web-page with detailed bug
- filing info, a direct bug tracker link, or a mailto: URI.
- C: URI for *chat* protocol, server and channel where developers
- usually hang out, for example irc://server/channel.
- Q: *Patchwork* web based patch tracking system site
- T: *SCM* tree type and location.
- Type is one of: git, hg, quilt, stgit, topgit
S: *Status*, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
@@ -102,30 +94,39 @@ Descriptions of section entries
Obsolete: Old code. Something tagged obsolete generally means
it has been replaced by a better system and you
should be using that.
+ W: *Web-page* with status/info
+ Q: *Patchwork* web based patch tracking system site
+ B: URI for where to file *bugs*. A web-page with detailed bug
+ filing info, a direct bug tracker link, or a mailto: URI.
+ C: URI for *chat* protocol, server and channel where developers
+ usually hang out, for example irc://server/channel.
P: Subsystem Profile document for more details submitting
patches to the given subsystem. This is either an in-tree file,
or a URI. See Documentation/maintainer/maintainer-entry-profile.rst
for details.
+ T: *SCM* tree type and location.
+ Type is one of: git, hg, quilt, stgit, topgit
F: *Files* and directories wildcard patterns.
A trailing slash includes all files and subdirectory files.
F: drivers/net/ all files in and below drivers/net
F: drivers/net/* all files in drivers/net, but not below
F: */net/* all files in "any top level directory"/net
One pattern per line. Multiple F: lines acceptable.
+ X: *Excluded* files and directories that are NOT maintained, same
+ rules as F:. Files exclusions are tested before file matches.
+ Can be useful for excluding a specific subdirectory, for instance:
+ F: net/
+ X: net/ipv6/
+ matches all files in and below net excluding net/ipv6/
N: Files and directories *Regex* patterns.
- N: [^a-z]tegra all files whose path contains the word tegra
+ N: [^a-z]tegra all files whose path contains tegra
+ (not including files like integrator)
One pattern per line. Multiple N: lines acceptable.
scripts/get_maintainer.pl has different behavior for files that
match F: pattern and matches of N: patterns. By default,
get_maintainer will not look at git log history when an F: pattern
match occurs. When an N: match occurs, git log history is used
to also notify the people that have git commit signatures.
- X: *Excluded* files and directories that are NOT maintained, same
- rules as F:. Files exclusions are tested before file matches.
- Can be useful for excluding a specific subdirectory, for instance:
- F: net/
- X: net/ipv6/
- matches all files in and below net excluding net/ipv6/
K: *Content regex* (perl extended) pattern match in a patch or file.
For instance:
K: of_get_profile
@@ -236,7 +237,7 @@ M: Adaptec OEM Raid Solutions <aacraid@microsemi.com>
L: linux-scsi@vger.kernel.org
W: http://www.adaptec.com/
S: Supported
-F: Documentation/scsi/aacraid.txt
+F: Documentation/scsi/aacraid.rst
F: drivers/scsi/aacraid/
ABI/API
@@ -540,7 +541,7 @@ M: Matthew Wilcox <willy@infradead.org>
M: Hannes Reinecke <hare@suse.com>
L: linux-scsi@vger.kernel.org
S: Maintained
-F: Documentation/scsi/advansys.txt
+F: Documentation/scsi/advansys.rst
F: drivers/scsi/advansys.c
ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
@@ -1297,7 +1298,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/arm-boards
F: Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
-F: Documentation/devicetree/bindings/clock/arm-integrator.txt
+F: Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
F: Documentation/devicetree/bindings/i2c/i2c-versatile.txt
F: Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
F: Documentation/devicetree/bindings/mtd/arm-versatile.txt
@@ -1622,7 +1623,7 @@ F: Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
F: Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt
ARM/CALXEDA HIGHBANK ARCHITECTURE
-M: Rob Herring <robh@kernel.org>
+M: Andre Przywara <andre.przywara@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-highbank/
@@ -1693,12 +1694,15 @@ F: arch/arm/mach-ep93xx/micro9.c
ARM/CORESIGHT FRAMEWORK AND DRIVERS
M: Mathieu Poirier <mathieu.poirier@linaro.org>
R: Suzuki K Poulose <suzuki.poulose@arm.com>
+R: Mike Leach <mike.leach@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/hwtracing/coresight/*
+F: include/dt-bindings/arm/coresight-cti-dt.h
F: Documentation/trace/coresight/*
F: Documentation/devicetree/bindings/arm/coresight.txt
F: Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
+F: Documentation/devicetree/bindings/arm/coresight-cti.yaml
F: Documentation/ABI/testing/sysfs-bus-coresight-devices-*
F: tools/perf/arch/arm/util/pmu.c
F: tools/perf/arch/arm/util/auxtrace.c
@@ -1945,7 +1949,7 @@ F: Documentation/devicetree/bindings/i2c/i2c-lpc2k.txt
F: arch/arm/boot/dts/lpc43*
F: drivers/i2c/busses/i2c-lpc2k.c
F: drivers/memory/pl172.c
-F: drivers/mtd/spi-nor/nxp-spifi.c
+F: drivers/mtd/spi-nor/controllers/nxp-spifi.c
F: drivers/rtc/rtc-lpc24xx.c
N: lpc18xx
@@ -2012,7 +2016,9 @@ M: Sean Wang <sean.wang@mediatek.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/rtc/rtc-mt2712.txt
F: Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
+F: drivers/rtc/rtc-mt2712.c
F: drivers/rtc/rtc-mt6397.c
F: drivers/rtc/rtc-mt7622.c
@@ -2291,7 +2297,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-rockchip@lists.infradead.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
S: Maintained
-F: Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+F: Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
F: Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
F: Documentation/devicetree/bindings/spi/spi-rockchip.yaml
F: arch/arm/boot/dts/rk3*
@@ -2564,9 +2570,9 @@ M: Masahiro Yamada <yamada.masahiro@socionext.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
S: Maintained
-F: Documentation/devicetree/bindings/arm/socionext/uniphier.txt
-F: Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
-F: Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.txt
+F: Documentation/devicetree/bindings/arm/socionext/uniphier.yaml
+F: Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml
+F: Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml
F: arch/arm/boot/dts/uniphier*
F: arch/arm/include/asm/hardware/cache-uniphier.h
F: arch/arm/mach-uniphier/
@@ -2743,8 +2749,8 @@ L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
L: linux-gpio@vger.kernel.org
S: Maintained
-F: drivers/pinctrl/aspeed/
F: Documentation/devicetree/bindings/pinctrl/aspeed,*
+F: drivers/pinctrl/aspeed/
ASPEED SCU INTERRUPT CONTROLLER DRIVER
M: Eddie James <eajames@linux.ibm.com>
@@ -3470,7 +3476,7 @@ L: linux-i2c@vger.kernel.org
L: bcm-kernel-feedback-list@broadcom.com
S: Supported
F: drivers/i2c/busses/i2c-brcmstb.c
-F: Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
+F: Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
M: Al Cooper <alcooperx@gmail.com>
@@ -4055,8 +4061,8 @@ F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
F: sound/soc/codecs/cros_ec_codec.*
CIRRUS LOGIC AUDIO CODEC DRIVERS
-M: Brian Austin <brian.austin@cirrus.com>
-M: Paul Handrigan <Paul.Handrigan@cirrus.com>
+M: James Schulman <james.schulman@cirrus.com>
+M: David Rhodes <david.rhodes@cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: sound/soc/codecs/cs*
@@ -4577,7 +4583,9 @@ S: Supported
F: drivers/scsi/cxgbi/cxgb3i
CXGB4 CRYPTO DRIVER (chcr)
-M: Atul Gupta <atul.gupta@chelsio.com>
+M: Ayush Sawal <ayush.sawal@chelsio.com>
+M: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+M: Rohit Maheshwari <rohitm@chelsio.com>
L: linux-crypto@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -4716,7 +4724,7 @@ L: dc395x@twibble.org
W: http://twibble.org/dist/dc395x/
W: http://lists.twibble.org/mailman/listinfo/dc395x/
S: Maintained
-F: Documentation/scsi/dc395x.txt
+F: Documentation/scsi/dc395x.rst
F: drivers/scsi/dc395x.*
DCCP PROTOCOL
@@ -4956,6 +4964,7 @@ F: drivers/leds/leds-da90??.c
F: drivers/mfd/da903x.c
F: drivers/mfd/da90??-*.c
F: drivers/mfd/da91??-*.c
+F: drivers/pinctrl/pinctrl-da90??.c
F: drivers/power/supply/da9052-battery.c
F: drivers/power/supply/da91??-*.c
F: drivers/regulator/da903x.c
@@ -5056,7 +5065,7 @@ L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
F: drivers/dma-buf/
F: include/linux/dma-buf*
-F: include/linux/reservation.h
+F: include/linux/dma-resv.h
F: include/linux/*fence.h
F: Documentation/driver-api/dma-buf.rst
K: dma_(buf|fence|resv)
@@ -5297,6 +5306,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/tve200/
+DRM DRIVER FOR FEIXIN K101 IM2BA02 MIPI-DSI LCD PANELS
+M: Icenowy Zheng <icenowy@aosc.io>
+S: Maintained
+F: drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+F: Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
+
DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
M: Jagan Teki <jagan@amarulasolutions.com>
S: Maintained
@@ -5316,6 +5331,13 @@ S: Maintained
F: drivers/gpu/drm/tiny/ili9225.c
F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt
+DRM DRIVER FOR ILITEK ILI9486 PANELS
+M: Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/tiny/ili9486.c
+F: Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
+
DRM DRIVER FOR HX8357D PANELS
M: Eric Anholt <eric@anholt.net>
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -5357,6 +5379,13 @@ F: drivers/gpu/drm/msm/
F: include/uapi/drm/msm_drm.h
F: Documentation/devicetree/bindings/display/msm/
+DRM DRIVER FOR NOVATEK NT35510 PANELS
+M: Linus Walleij <linus.walleij@linaro.org>
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
+F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
+
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Ben Skeggs <bskeggs@redhat.com>
L: dri-devel@lists.freedesktop.org
@@ -5444,7 +5473,7 @@ M: David Lechner <david@lechnology.com>
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/tiny/st7735r.c
-F: Documentation/devicetree/bindings/display/sitronix,st7735r.txt
+F: Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
DRM DRIVER FOR SONY ACX424AKP PANELS
M: Linus Walleij <linus.walleij@linaro.org>
@@ -5524,6 +5553,7 @@ F: include/linux/vga*
DRM DRIVERS AND MISC GPU PATCHES
M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
M: Maxime Ripard <mripard@kernel.org>
+M: Thomas Zimmermann <tzimmermann@suse.de>
W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -5603,7 +5633,6 @@ S: Supported
F: drivers/gpu/drm/fsl-dcu/
F: Documentation/devicetree/bindings/display/fsl,dcu.txt
F: Documentation/devicetree/bindings/display/fsl,tcon.txt
-F: Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR FREESCALE IMX
@@ -5622,12 +5651,13 @@ S: Maintained
F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
-M: Xinliang Liu <z.liuxinliang@hisilicon.com>
+M: Xinliang Liu <xinliang.liu@linaro.org>
M: Rongrong Zou <zourongrong@gmail.com>
+R: John Stultz <john.stultz@linaro.org>
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
R: Chen Feng <puck.chen@hisilicon.com>
L: dri-devel@lists.freedesktop.org
-T: git git://github.com/xin3liang/linux.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/hisilicon/
F: Documentation/devicetree/bindings/display/hisilicon/
@@ -5642,7 +5672,7 @@ F: include/uapi/drm/lima_drm.h
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR MEDIATEK
-M: CK Hu <ck.hu@mediatek.com>
+M: Chun-Kuang Hu <chunkuang.hu@kernel.org>
M: Philipp Zabel <p.zabel@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Supported
@@ -5719,6 +5749,17 @@ S: Maintained
F: drivers/gpu/drm/omapdrm/
F: Documentation/devicetree/bindings/display/ti/
+DRM DRIVERS FOR TI KEYSTONE
+M: Jyri Sarha <jsarha@ti.com>
+M: Tomi Valkeinen <tomi.valkeinen@ti.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/tidss/
+F: Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
+F: Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+F: Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DRM DRIVERS FOR V3D
M: Eric Anholt <eric@anholt.net>
S: Supported
@@ -5745,7 +5786,7 @@ L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/etnaviv/
F: include/uapi/drm/etnaviv_drm.h
-F: Documentation/devicetree/bindings/display/etnaviv/
+F: Documentation/devicetree/bindings/gpu/vivante,gc.yaml
DRM DRIVERS FOR ZTE ZX
M: Shawn Guo <shawnguo@kernel.org>
@@ -6345,6 +6386,13 @@ F: include/trace/events/mdio.h
F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
+EXFAT FILE SYSTEM
+M: Namjae Jeon <namjae.jeon@samsung.com>
+M: Sungjong Seo <sj1557.seo@samsung.com>
+L: linux-fsdevel@vger.kernel.org
+S: Maintained
+F: fs/exfat/
+
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.com>
L: linux-ext4@vger.kernel.org
@@ -7456,7 +7504,7 @@ M: Don Brace <don.brace@microsemi.com>
L: esc.storagedev@microsemi.com
L: linux-scsi@vger.kernel.org
S: Supported
-F: Documentation/scsi/hpsa.txt
+F: Documentation/scsi/hpsa.rst
F: drivers/scsi/hpsa*.[ch]
F: include/linux/cciss*.h
F: include/uapi/linux/cciss*.h
@@ -7545,7 +7593,7 @@ HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
M: HighPoint Linux Team <linux@highpoint-tech.com>
W: http://www.highpoint-tech.com
S: Supported
-F: Documentation/scsi/hptiop.txt
+F: Documentation/scsi/hptiop.rst
F: drivers/scsi/hptiop.c
HIPPI
@@ -7816,6 +7864,10 @@ F: Documentation/ABI/testing/debugfs-hyperv
HYPERBUS SUPPORT
M: Vignesh Raghavendra <vigneshr@ti.com>
+L: linux-mtd@lists.infradead.org
+Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git cfi/next
+C: irc://irc.oftc.net/mtd
S: Supported
F: drivers/mtd/hyperbus/
F: include/linux/mtd/hyperbus.h
@@ -9210,7 +9262,7 @@ F: virt/kvm/*
F: tools/kvm/
F: tools/testing/selftests/kvm/
-KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
+KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Marc Zyngier <maz@kernel.org>
R: James Morse <james.morse@arm.com>
R: Julien Thierry <julien.thierry.kdev@gmail.com>
@@ -9219,9 +9271,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
S: Maintained
-F: arch/arm/include/uapi/asm/kvm*
-F: arch/arm/include/asm/kvm*
-F: arch/arm/kvm/
F: arch/arm64/include/uapi/asm/kvm*
F: arch/arm64/include/asm/kvm*
F: arch/arm64/kvm/
@@ -9256,6 +9305,7 @@ L: kvm@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
S: Supported
+F: Documentation/virt/kvm/s390*
F: arch/s390/include/uapi/asm/kvm*
F: arch/s390/include/asm/gmap.h
F: arch/s390/include/asm/kvm*
@@ -9436,7 +9486,7 @@ LASI 53c700 driver for PARISC
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
L: linux-scsi@vger.kernel.org
S: Maintained
-F: Documentation/scsi/53c700.txt
+F: Documentation/scsi/53c700.rst
F: drivers/scsi/53c700*
LEAKING_ADDRESSES
@@ -9643,17 +9693,16 @@ F: include/uapi/linux/lightnvm.h
LINUX FOR POWER MACINTOSH
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Odd Fixes
F: arch/powerpc/platforms/powermac/
F: drivers/macintosh/
LINUX FOR POWERPC (32-BIT AND 64-BIT)
-M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-M: Paul Mackerras <paulus@samba.org>
M: Michael Ellerman <mpe@ellerman.id.au>
-W: https://github.com/linuxppc/linux/wiki
+R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+R: Paul Mackerras <paulus@samba.org>
+W: https://github.com/linuxppc/wiki/wiki
L: linuxppc-dev@lists.ozlabs.org
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
@@ -9670,6 +9719,8 @@ F: drivers/crypto/vmx/
F: drivers/i2c/busses/i2c-opal.c
F: drivers/net/ethernet/ibm/ibmveth.*
F: drivers/net/ethernet/ibm/ibmvnic.*
+F: drivers/*/*/*pasemi*
+F: drivers/*/*pasemi*
F: drivers/pci/hotplug/pnv_php.c
F: drivers/pci/hotplug/rpa*
F: drivers/rtc/rtc-opal.c
@@ -9686,51 +9737,31 @@ N: pseries
LINUX FOR POWERPC EMBEDDED MPC5XXX
M: Anatolij Gustschin <agust@denx.de>
L: linuxppc-dev@lists.ozlabs.org
-T: git git://git.denx.de/linux-denx-agust.git
-S: Maintained
+S: Odd Fixes
F: arch/powerpc/platforms/512x/
F: arch/powerpc/platforms/52xx/
LINUX FOR POWERPC EMBEDDED PPC4XX
-M: Alistair Popple <alistair@popple.id.au>
-M: Matt Porter <mporter@kernel.crashing.org>
-W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Orphan
F: arch/powerpc/platforms/40x/
F: arch/powerpc/platforms/44x/
LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
M: Scott Wood <oss@buserror.net>
-M: Kumar Gala <galak@kernel.crashing.org>
-W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux.git
-S: Maintained
+S: Odd fixes
F: arch/powerpc/platforms/83xx/
F: arch/powerpc/platforms/85xx/
F: Documentation/devicetree/bindings/powerpc/fsl/
LINUX FOR POWERPC EMBEDDED PPC8XX
-M: Vitaly Bordug <vitb@kernel.crashing.org>
-W: http://www.penguinppc.org/
+M: Christophe Leroy <christophe.leroy@c-s.fr>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: arch/powerpc/platforms/8xx/
-LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
-L: linuxppc-dev@lists.ozlabs.org
-S: Orphan
-F: arch/powerpc/*/*virtex*
-F: arch/powerpc/*/*/*virtex*
-
-LINUX FOR POWERPC PA SEMI PWRFICIENT
-L: linuxppc-dev@lists.ozlabs.org
-S: Orphan
-F: arch/powerpc/platforms/pasemi/
-F: drivers/*/*pasemi*
-F: drivers/*/*/*pasemi*
-
LINUX KERNEL DUMP TEST MODULE (LKDTM)
M: Kees Cook <keescook@chromium.org>
S: Maintained
@@ -10066,6 +10097,7 @@ F: Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt
MARVELL CRYPTO DRIVER
M: Boris Brezillon <bbrezillon@kernel.org>
M: Arnaud Ebalard <arno@natisbad.org>
+M: Srujana Challa <schalla@marvell.com>
F: drivers/crypto/marvell/
S: Maintained
L: linux-crypto@vger.kernel.org
@@ -10287,11 +10319,18 @@ M: Dan Murphy <dmurphy@ti.com>
M: Sriram Dash <sriram.dash@samsung.com>
L: linux-can@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/net/can/m_can.txt
+F: Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
F: drivers/net/can/m_can/m_can.c
F: drivers/net/can/m_can/m_can.h
F: drivers/net/can/m_can/m_can_platform.c
+MCP2221A MICROCHIP USB-HID TO I2C BRIDGE DRIVER
+M: Rishi Gupta <gupt21@gmail.com>
+L: linux-i2c@vger.kernel.org
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/hid/hid-mcp2221.c
+
MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS
M: Peter Rosin <peda@axentia.se>
L: linux-iio@vger.kernel.org
@@ -10491,7 +10530,7 @@ L: linux-renesas-soc@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Supported
F: Documentation/devicetree/bindings/media/renesas,csi2.yaml
-F: Documentation/devicetree/bindings/media/renesas,vin.txt
+F: Documentation/devicetree/bindings/media/renesas,vin.yaml
F: drivers/media/platform/rcar-vin/
MEDIA DRIVERS FOR RENESAS - VSP1
@@ -10696,7 +10735,7 @@ L: megaraidlinux.pdl@broadcom.com
L: linux-scsi@vger.kernel.org
W: http://www.avagotech.com/support/
S: Maintained
-F: Documentation/scsi/megaraid.txt
+F: Documentation/scsi/megaraid.rst
F: drivers/scsi/megaraid.*
F: drivers/scsi/megaraid/
@@ -10948,6 +10987,16 @@ M: Vladimir Vid <vladimir.vid@sartura.hr>
S: Maintained
F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+MHI BUS
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Hemant Kumar <hemantk@codeaurora.org>
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git
+F: Documentation/mhi/
+F: drivers/bus/mhi/
+F: include/linux/mhi.h
+
MICROBLAZE ARCHITECTURE
M: Michal Simek <monstr@monstr.eu>
W: http://www.monstr.eu/fdt/
@@ -11150,7 +11199,7 @@ F: drivers/scsi/smartpqi/Kconfig
F: drivers/scsi/smartpqi/Makefile
F: include/linux/cciss*.h
F: include/uapi/linux/cciss*.h
-F: Documentation/scsi/smartpqi.txt
+F: Documentation/scsi/smartpqi.rst
MICROSEMI ETHERNET SWITCH DRIVER
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
@@ -11511,6 +11560,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git nand/next
+C: irc://irc.oftc.net/mtd
S: Maintained
F: drivers/mtd/nand/
F: include/linux/mtd/*nand*.h
@@ -11531,7 +11581,7 @@ M: Finn Thain <fthain@telegraphics.com.au>
M: Michael Schmitz <schmitzmic@gmail.com>
L: linux-scsi@vger.kernel.org
S: Maintained
-F: Documentation/scsi/g_NCR5380.txt
+F: Documentation/scsi/g_NCR5380.rst
F: drivers/scsi/NCR5380.*
F: drivers/scsi/arm/cumana_1.c
F: drivers/scsi/arm/oak.c
@@ -11860,7 +11910,7 @@ NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER
M: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
W: http://www.netlab.is.tsukuba.ac.jp/~yokota/izumi/ninja/
S: Maintained
-F: Documentation/scsi/NinjaSCSI.txt
+F: Documentation/scsi/NinjaSCSI.rst
F: drivers/scsi/pcmcia/nsp_*
NINJA SCSI-32Bi/UDE PCI/CARDBUS SCSI HOST ADAPTER DRIVER
@@ -11868,7 +11918,7 @@ M: GOTO Masanori <gotom@debian.or.jp>
M: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
W: http://www.netlab.is.tsukuba.ac.jp/~yokota/izumi/ninja/
S: Maintained
-F: Documentation/scsi/NinjaSCSI.txt
+F: Documentation/scsi/NinjaSCSI.rst
F: drivers/scsi/nsp32*
NIOS2 ARCHITECTURE
@@ -12478,7 +12528,6 @@ F: Documentation/ABI/testing/sysfs-firmware-ofw
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
-M: Mark Rutland <mark.rutland@arm.com>
L: devicetree@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
@@ -12600,16 +12649,6 @@ W: http://wireless.kernel.org/en/users/Drivers/p54
S: Maintained
F: drivers/net/wireless/intersil/p54/
-PA SEMI ETHERNET DRIVER
-L: netdev@vger.kernel.org
-S: Orphan
-F: drivers/net/ethernet/pasemi/*
-
-PA SEMI SMBUS DRIVER
-L: linux-i2c@vger.kernel.org
-S: Orphan
-F: drivers/i2c/busses/i2c-pasemi.c
-
PACKING
M: Vladimir Oltean <olteanv@gmail.com>
L: netdev@vger.kernel.org
@@ -12799,7 +12838,7 @@ PCI DRIVER FOR CADENCE PCIE IP
M: Tom Joseph <tjoseph@cadence.com>
L: linux-pci@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/pci/cdns,*.txt
+F: Documentation/devicetree/bindings/pci/cdns,*
F: drivers/pci/controller/cadence/
PCI DRIVER FOR FREESCALE LAYERSCAPE
@@ -12812,6 +12851,14 @@ L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: drivers/pci/controller/dwc/*layerscape*
+PCI DRIVER FOR NXP LAYERSCAPE GEN4 CONTROLLER
+M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
+F: drivers/pci/controller/mobibeil/pcie-layerscape-gen4.c
+
PCI DRIVER FOR GENERIC OF HOSTS
M: Will Deacon <will@kernel.org>
L: linux-pci@vger.kernel.org
@@ -12854,7 +12901,7 @@ M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
-F: drivers/pci/controller/pcie-mobiveil.c
+F: drivers/pci/controller/mobiveil/pcie-mobiveil*
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
@@ -13219,6 +13266,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
F: samples/pidfd/
F: tools/testing/selftests/pidfd/
+F: tools/testing/selftests/pid_namespace/
F: tools/testing/selftests/clone3/
K: (?i)pidfd
K: (?i)clone3
@@ -13227,21 +13275,13 @@ K: \b(clone_args|kernel_clone_args)\b
PIN CONTROL SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-gpio@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
F: Documentation/devicetree/bindings/pinctrl/
F: Documentation/driver-api/pinctl.rst
F: drivers/pinctrl/
F: include/linux/pinctrl/
-PIN CONTROLLER - MICROCHIP AT91
-M: Ludovic Desroches <ludovic.desroches@microchip.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: linux-gpio@vger.kernel.org
-S: Supported
-F: drivers/pinctrl/pinctrl-at91*
-F: drivers/gpio/gpio-sama5d2-piobu.c
-
PIN CONTROLLER - FREESCALE
M: Dong Aisheng <aisheng.dong@nxp.com>
M: Fabio Estevam <festevam@gmail.com>
@@ -13250,14 +13290,14 @@ M: Stefan Agner <stefan@agner.ch>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-gpio@vger.kernel.org
S: Maintained
-F: drivers/pinctrl/freescale/
F: Documentation/devicetree/bindings/pinctrl/fsl,*
+F: drivers/pinctrl/freescale/
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Andy Shevchenko <andy@kernel.org>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
F: drivers/pinctrl/intel/
PIN CONTROLLER - MEDIATEK
@@ -13268,18 +13308,26 @@ F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
F: drivers/pinctrl/mediatek/
+PIN CONTROLLER - MICROCHIP AT91
+M: Ludovic Desroches <ludovic.desroches@microchip.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-gpio@vger.kernel.org
+S: Supported
+F: drivers/gpio/gpio-sama5d2-piobu.c
+F: drivers/pinctrl/pinctrl-at91*
+
PIN CONTROLLER - QUALCOMM
M: Bjorn Andersson <bjorn.andersson@linaro.org>
-S: Maintained
L: linux-arm-msm@vger.kernel.org
+S: Maintained
F: Documentation/devicetree/bindings/pinctrl/qcom,*.txt
F: drivers/pinctrl/qcom/
PIN CONTROLLER - RENESAS
M: Geert Uytterhoeven <geert+renesas@glider.be>
L: linux-renesas-soc@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git sh-pfc
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git sh-pfc
F: drivers/pinctrl/pinctrl-rz*
F: drivers/pinctrl/sh-pfc/
@@ -13289,12 +13337,12 @@ M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
-Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
+Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
+F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
F: drivers/pinctrl/samsung/
F: include/dt-bindings/pinctrl/samsung.h
-F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
PIN CONTROLLER - SINGLE
M: Tony Lindgren <tony@atomide.com>
@@ -13307,8 +13355,8 @@ F: drivers/pinctrl/pinctrl-single.c
PIN CONTROLLER - ST SPEAR
M: Viresh Kumar <vireshk@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.st.com/spear
S: Maintained
+W: http://www.st.com/spear
F: drivers/pinctrl/spear/
PISTACHIO SOC SUPPORT
@@ -14147,6 +14195,7 @@ S: Supported
F: arch/x86/kernel/cpu/resctrl/
F: arch/x86/include/asm/resctrl_sched.h
F: Documentation/x86/resctrl*
+F: tools/testing/selftests/resctrl/
READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@kernel.org>
@@ -14546,7 +14595,7 @@ F: Documentation/s390/
F: Documentation/driver-api/s390-drivers.rst
S390 COMMON I/O LAYER
-M: Sebastian Ott <sebott@linux.ibm.com>
+M: Vineeth Vijayan <vneethv@linux.ibm.com>
M: Peter Oberparleiter <oberpar@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -14588,7 +14637,7 @@ S: Supported
F: drivers/s390/net/
S390 PCI SUBSYSTEM
-M: Sebastian Ott <sebott@linux.ibm.com>
+M: Niklas Schnelle <schnelle@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -14882,7 +14931,7 @@ M: Doug Gilbert <dgilbert@interlog.com>
L: linux-scsi@vger.kernel.org
W: http://sg.danny.cz/sg
S: Maintained
-F: Documentation/scsi/scsi-generic.txt
+F: Documentation/scsi/scsi-generic.rst
F: drivers/scsi/sg.c
F: include/scsi/sg.h
@@ -14902,7 +14951,7 @@ SCSI TAPE DRIVER
M: Kai Mäkisara <Kai.Makisara@kolumbus.fi>
L: linux-scsi@vger.kernel.org
S: Maintained
-F: Documentation/scsi/st.txt
+F: Documentation/scsi/st.rst
F: drivers/scsi/st.*
F: drivers/scsi/st_*.h
@@ -15089,7 +15138,7 @@ SERIAL DEVICE BUS
M: Rob Herring <robh@kernel.org>
L: linux-serial@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/serial/slave-device.txt
+F: Documentation/devicetree/bindings/serial/serial.yaml
F: drivers/tty/serdev/
F: include/linux/serdev.h
@@ -15708,6 +15757,17 @@ F: sound/soc/
F: include/dt-bindings/sound/
F: include/sound/soc*
+SOUND - SOUND OPEN FIRMWARE (SOF) DRIVERS
+M: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+M: Liam Girdwood <lgirdwood@gmail.com>
+M: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+M: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+M: Daniel Baluta <daniel.baluta@nxp.com>
+L: sound-open-firmware@alsa-project.org (moderated for non-subscribers)
+W: https://github.com/thesofproject/linux/
+S: Supported
+F: sound/soc/sof/
+
SOUNDWIRE SUBSYSTEM
M: Vinod Koul <vkoul@kernel.org>
M: Sanyog Kale <sanyog.r.kale@intel.com>
@@ -15782,6 +15842,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git spi-nor/next
+C: irc://irc.oftc.net/mtd
S: Maintained
F: drivers/mtd/spi-nor/
F: include/linux/mtd/spi-nor.h
@@ -16015,7 +16076,6 @@ F: drivers/pwm/pwm-stm32*
F: include/linux/*/stm32-*tim*
F: Documentation/ABI/testing/*timer-stm32
F: Documentation/devicetree/bindings/*/*stm32-*timer*
-F: Documentation/devicetree/bindings/pwm/pwm-stm32*
STMMAC ETHERNET DRIVER
M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
@@ -17139,6 +17199,18 @@ W: http://linuxtv.org
S: Maintained
F: drivers/media/pci/tw686x/
+UACCE ACCELERATOR FRAMEWORK
+M: Zhangfei Gao <zhangfei.gao@linaro.org>
+M: Zhou Wang <wangzhou1@hisilicon.com>
+L: linux-accelerators@lists.ozlabs.org
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-driver-uacce
+F: Documentation/misc-devices/uacce.rst
+F: drivers/misc/uacce/
+F: include/linux/uacce.h
+F: include/uapi/misc/uacce/
+
UBI FILE SYSTEM (UBIFS)
M: Richard Weinberger <richard@nod.at>
L: linux-mtd@lists.infradead.org
@@ -17235,7 +17307,7 @@ R: Alim Akhtar <alim.akhtar@samsung.com>
R: Avri Altman <avri.altman@wdc.com>
L: linux-scsi@vger.kernel.org
S: Supported
-F: Documentation/scsi/ufs.txt
+F: Documentation/scsi/ufs.rst
F: drivers/scsi/ufs/
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
diff --git a/arch/.gitignore b/arch/.gitignore
index 741468920320..4191da401dbb 100644
--- a/arch/.gitignore
+++ b/arch/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
i386
x86_64
diff --git a/arch/Kconfig b/arch/Kconfig
index 516f2b05bd66..786a85d4ad40 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -248,11 +248,18 @@ config ARCH_HAS_SET_DIRECT_MAP
bool
#
-# Select if arch has an uncached kernel segment and provides the
-# uncached_kernel_address / cached_kernel_address symbols to use it
+# Select if the architecture provides the arch_dma_set_uncached symbol to
+# either provide an uncached segement alias for a DMA allocation, or
+# to remap the page tables in place.
#
-config ARCH_HAS_UNCACHED_SEGMENT
- select ARCH_HAS_DMA_PREP_COHERENT
+config ARCH_HAS_DMA_SET_UNCACHED
+ bool
+
+#
+# Select if the architectures provides the arch_dma_clear_uncached symbol
+# to undo an in-place page table remap for uncached access.
+#
+config ARCH_HAS_DMA_CLEAR_UNCACHED
bool
# Select if arch init_task must go in the __init_task_data section
diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig
index f4ec420d7f2d..6293675db164 100644
--- a/arch/alpha/configs/defconfig
+++ b/arch/alpha/configs/defconfig
@@ -36,7 +36,6 @@ CONFIG_BLK_DEV_CY82C693=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
@@ -53,7 +52,8 @@ CONFIG_NET_PCI=y
CONFIG_YELLOWFIN=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_REISERFS_FS=m
CONFIG_ISO9660_FS=y
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index 89e87bbc987f..42911c8340c7 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -1,17 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
-generic-y += compat.h
-generic-y += exec.h
generic-y += export.h
-generic-y += fb.h
-generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += trace_clock.h
-generic-y += current.h
-generic-y += kprobes.h
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 7ee144f484f1..9b521c857436 100644
--- a/arch/alpha/include/asm/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
@@ -8,8 +8,6 @@
#include <asm/smp.h>
-struct bootmem_data_t; /* stupid forward decl. */
-
/*
* Following are macros that are specific to this numa platform.
*/
diff --git a/arch/alpha/kernel/.gitignore b/arch/alpha/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/alpha/kernel/.gitignore
+++ b/arch/alpha/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index cd9a112d67ff..32850e45834b 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -187,10 +187,6 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
extern void pcibios_claim_one_bus(struct pci_bus *);
-static struct resource irongate_io = {
- .name = "Irongate PCI IO",
- .flags = IORESOURCE_IO,
-};
static struct resource irongate_mem = {
.name = "Irongate PCI MEM",
.flags = IORESOURCE_MEM,
@@ -208,17 +204,19 @@ nautilus_init_pci(void)
struct pci_controller *hose = hose_head;
struct pci_host_bridge *bridge;
struct pci_bus *bus;
- struct pci_dev *irongate;
unsigned long bus_align, bus_size, pci_mem;
unsigned long memtop = max_low_pfn << PAGE_SHIFT;
- int ret;
bridge = pci_alloc_host_bridge(0);
if (!bridge)
return;
+ /* Use default IO. */
pci_add_resource(&bridge->windows, &ioport_resource);
- pci_add_resource(&bridge->windows, &iomem_resource);
+ /* Irongate PCI memory aperture, calculate requred size before
+ setting it up. */
+ pci_add_resource(&bridge->windows, &irongate_mem);
+
pci_add_resource(&bridge->windows, &busn_resource);
bridge->dev.parent = NULL;
bridge->sysdata = hose;
@@ -226,59 +224,49 @@ nautilus_init_pci(void)
bridge->ops = alpha_mv.pci_ops;
bridge->swizzle_irq = alpha_mv.pci_swizzle;
bridge->map_irq = alpha_mv.pci_map_irq;
+ bridge->size_windows = 1;
/* Scan our single hose. */
- ret = pci_scan_root_bus_bridge(bridge);
- if (ret) {
+ if (pci_scan_root_bus_bridge(bridge)) {
pci_free_host_bridge(bridge);
return;
}
-
bus = hose->bus = bridge->bus;
pcibios_claim_one_bus(bus);
- irongate = pci_get_domain_bus_and_slot(pci_domain_nr(bus), 0, 0);
- bus->self = irongate;
- bus->resource[0] = &irongate_io;
- bus->resource[1] = &irongate_mem;
-
pci_bus_size_bridges(bus);
- /* IO port range. */
- bus->resource[0]->start = 0;
- bus->resource[0]->end = 0xffff;
-
- /* Set up PCI memory range - limit is hardwired to 0xffffffff,
- base must be at aligned to 16Mb. */
- bus_align = bus->resource[1]->start;
- bus_size = bus->resource[1]->end + 1 - bus_align;
+ /* Now we've got the size and alignment of PCI memory resources
+ stored in irongate_mem. Set up the PCI memory range: limit is
+ hardwired to 0xffffffff, base must be aligned to 16Mb. */
+ bus_align = irongate_mem.start;
+ bus_size = irongate_mem.end + 1 - bus_align;
if (bus_align < 0x1000000UL)
bus_align = 0x1000000UL;
pci_mem = (0x100000000UL - bus_size) & -bus_align;
+ irongate_mem.start = pci_mem;
+ irongate_mem.end = 0xffffffffUL;
- bus->resource[1]->start = pci_mem;
- bus->resource[1]->end = 0xffffffffUL;
- if (request_resource(&iomem_resource, bus->resource[1]) < 0)
+ /* Register our newly calculated PCI memory window in the resource
+ tree. */
+ if (request_resource(&iomem_resource, &irongate_mem) < 0)
printk(KERN_ERR "Failed to request MEM on hose 0\n");
+ printk(KERN_INFO "Irongate pci_mem %pR\n", &irongate_mem);
+
if (pci_mem < memtop)
memtop = pci_mem;
if (memtop > alpha_mv.min_mem_address) {
free_reserved_area(__va(alpha_mv.min_mem_address),
__va(memtop), -1, NULL);
- printk("nautilus_init_pci: %ldk freed\n",
+ printk(KERN_INFO "nautilus_init_pci: %ldk freed\n",
(memtop - alpha_mv.min_mem_address) >> 10);
}
-
if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */
IRONGATE0->pci_mem = pci_mem;
pci_bus_assign_resources(bus);
-
- /* pci_common_swizzle() relies on bus->self being NULL
- for the root bus, so just clear it. */
- bus->self = NULL;
pci_bus_add_devices(bus);
}
diff --git a/arch/alpha/kernel/syscalls/syscallhdr.sh b/arch/alpha/kernel/syscalls/syscallhdr.sh
index e5b99bd2e5e7..1780e861492a 100644
--- a/arch/alpha/kernel/syscalls/syscallhdr.sh
+++ b/arch/alpha/kernel/syscalls/syscallhdr.sh
@@ -32,5 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 741e61ef9d3f..c2d7b6d7bac7 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -89,7 +89,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
const struct exception_table_entry *fixup;
int si_code = SEGV_MAPERR;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
(or is suppressed by the PALcode). Support that for older CPUs
@@ -150,7 +150,7 @@ retry:
the fault. */
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -169,7 +169,7 @@ retry:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 7124ab82dfa3..ff306246d0f8 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -401,13 +401,61 @@ config ARC_HAS_DIV_REM
default y
config ARC_HAS_ACCL_REGS
- bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
+ bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6 and/or DSP)"
default y
help
Depending on the configuration, CPU can contain accumulator reg-pair
(also referred to as r58:r59). These can also be used by gcc as GPR so
kernel needs to save/restore per process
+config ARC_DSP_HANDLED
+ def_bool n
+
+config ARC_DSP_SAVE_RESTORE_REGS
+ def_bool n
+
+choice
+ prompt "DSP support"
+ default ARC_DSP_NONE
+ help
+ Depending on the configuration, CPU can contain DSP registers
+ (ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_CTRL, DSP_FFT_CTRL).
+ Bellow is options describing how to handle these registers in
+ interrupt entry / exit and in context switch.
+
+config ARC_DSP_NONE
+ bool "No DSP extension presence in HW"
+ help
+ No DSP extension presence in HW
+
+config ARC_DSP_KERNEL
+ bool "DSP extension in HW, no support for userspace"
+ select ARC_HAS_ACCL_REGS
+ select ARC_DSP_HANDLED
+ help
+ DSP extension presence in HW, no support for DSP-enabled userspace
+ applications. We don't save / restore DSP registers and only do
+ some minimal preparations so userspace won't be able to break kernel
+
+config ARC_DSP_USERSPACE
+ bool "Support DSP for userspace apps"
+ select ARC_HAS_ACCL_REGS
+ select ARC_DSP_HANDLED
+ select ARC_DSP_SAVE_RESTORE_REGS
+ help
+ DSP extension presence in HW, support save / restore DSP registers to
+ run DSP-enabled userspace applications
+
+config ARC_DSP_AGU_USERSPACE
+ bool "Support DSP with AGU for userspace apps"
+ select ARC_HAS_ACCL_REGS
+ select ARC_DSP_HANDLED
+ select ARC_DSP_SAVE_RESTORE_REGS
+ help
+ DSP and AGU extensions presence in HW, support save / restore DSP
+ and AGU registers to run DSP-enabled userspace applications
+endchoice
+
config ARC_IRQ_NO_AUTOSAVE
bool "Disable hardware autosave regfile on interrupts"
default n
diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore
index c4c5fd529c25..675db1494028 100644
--- a/arch/arc/boot/.gitignore
+++ b/arch/arc/boot/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
uImage
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 1d109b06e7d8..99d3e7175bf7 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -305,7 +305,6 @@
pgu@17000 {
compatible = "snps,arcpgu";
reg = <0x17000 0x400>;
- encoder-slave = <&adv7511>;
clocks = <&pguclk>;
clock-names = "pxlclk";
memory-region = <&frame_buffer>;
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 1b505694691e..81f4edec0c2a 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,28 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += bugs.h
-generic-y += compat.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
generic-y += extable.h
-generic-y += ftrace.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += parport.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += topology.h
-generic-y += trace_clock.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index f7e432448e4b..2162023195c5 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -118,6 +118,32 @@
#define ARC_AUX_DPFP_2H 0x304
#define ARC_AUX_DPFP_STAT 0x305
+/*
+ * DSP-related registers
+ * Registers names must correspond to dsp_callee_regs structure fields names
+ * for automatic offset calculation in DSP_AUX_SAVE_RESTORE macros.
+ */
+#define ARC_AUX_DSP_BUILD 0x7A
+#define ARC_AUX_ACC0_LO 0x580
+#define ARC_AUX_ACC0_GLO 0x581
+#define ARC_AUX_ACC0_HI 0x582
+#define ARC_AUX_ACC0_GHI 0x583
+#define ARC_AUX_DSP_BFLY0 0x598
+#define ARC_AUX_DSP_CTRL 0x59F
+#define ARC_AUX_DSP_FFT_CTRL 0x59E
+
+#define ARC_AUX_AGU_BUILD 0xCC
+#define ARC_AUX_AGU_AP0 0x5C0
+#define ARC_AUX_AGU_AP1 0x5C1
+#define ARC_AUX_AGU_AP2 0x5C2
+#define ARC_AUX_AGU_AP3 0x5C3
+#define ARC_AUX_AGU_OS0 0x5D0
+#define ARC_AUX_AGU_OS1 0x5D1
+#define ARC_AUX_AGU_MOD0 0x5E0
+#define ARC_AUX_AGU_MOD1 0x5E1
+#define ARC_AUX_AGU_MOD2 0x5E2
+#define ARC_AUX_AGU_MOD3 0x5E3
+
#ifndef __ASSEMBLY__
#include <soc/arc/aux.h>
diff --git a/arch/arc/include/asm/asserts.h b/arch/arc/include/asm/asserts.h
new file mode 100644
index 000000000000..108f33be6aa5
--- /dev/null
+++ b/arch/arc/include/asm/asserts.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_ASSERTS_H
+#define __ASM_ARC_ASSERTS_H
+
+/* Helpers to sanitize config options. */
+
+void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena);
+void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena);
+
+/*
+ * Check required config option:
+ * - panic in case of OPT enabled but corresponding HW absent.
+ * - warn in case of OPT disabled but corresponding HW exists.
+*/
+#define CHK_OPT_STRICT(opt_name, hw_exists) \
+({ \
+ chk_opt_strict(#opt_name, hw_exists, IS_ENABLED(opt_name)); \
+})
+
+/*
+ * Check optional config option:
+ * - panic in case of OPT enabled but corresponding HW absent.
+*/
+#define CHK_OPT_WEAK(opt_name, hw_exists) \
+({ \
+ chk_opt_weak(#opt_name, hw_exists, IS_ENABLED(opt_name)); \
+})
+
+#endif /* __ASM_ARC_ASSERTS_H */
diff --git a/arch/arc/include/asm/dsp-impl.h b/arch/arc/include/asm/dsp-impl.h
new file mode 100644
index 000000000000..e1aa212ca6eb
--- /dev/null
+++ b/arch/arc/include/asm/dsp-impl.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_DSP_IMPL_H
+#define __ASM_ARC_DSP_IMPL_H
+
+#include <asm/dsp.h>
+
+#define DSP_CTRL_DISABLED_ALL 0
+
+#ifdef __ASSEMBLY__
+
+/* clobbers r5 register */
+.macro DSP_EARLY_INIT
+ lr r5, [ARC_AUX_DSP_BUILD]
+ bmsk r5, r5, 7
+ breq r5, 0, 1f
+ mov r5, DSP_CTRL_DISABLED_ALL
+ sr r5, [ARC_AUX_DSP_CTRL]
+1:
+.endm
+
+/* clobbers r10, r11 registers pair */
+.macro DSP_SAVE_REGFILE_IRQ
+#if defined(CONFIG_ARC_DSP_KERNEL)
+ /*
+ * Drop any changes to DSP_CTRL made by userspace so userspace won't be
+ * able to break kernel - reset it to DSP_CTRL_DISABLED_ALL value
+ */
+ mov r10, DSP_CTRL_DISABLED_ALL
+ sr r10, [ARC_AUX_DSP_CTRL]
+
+#elif defined(CONFIG_ARC_DSP_SAVE_RESTORE_REGS)
+ /*
+ * Save DSP_CTRL register and reset it to value suitable for kernel
+ * (DSP_CTRL_DISABLED_ALL)
+ */
+ mov r10, DSP_CTRL_DISABLED_ALL
+ aex r10, [ARC_AUX_DSP_CTRL]
+ st r10, [sp, PT_DSP_CTRL]
+
+#endif
+.endm
+
+/* clobbers r10, r11 registers pair */
+.macro DSP_RESTORE_REGFILE_IRQ
+#if defined(CONFIG_ARC_DSP_SAVE_RESTORE_REGS)
+ ld r10, [sp, PT_DSP_CTRL]
+ sr r10, [ARC_AUX_DSP_CTRL]
+
+#endif
+.endm
+
+#else /* __ASEMBLY__ */
+
+#include <linux/sched.h>
+#include <asm/asserts.h>
+#include <asm/switch_to.h>
+
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+
+/*
+ * As we save new and restore old AUX register value in the same place we
+ * can optimize a bit and use AEX instruction (swap contents of an auxiliary
+ * register with a core register) instead of LR + SR pair.
+ */
+#define AUX_SAVE_RESTORE(_saveto, _readfrom, _offt, _aux) \
+do { \
+ long unsigned int _scratch; \
+ \
+ __asm__ __volatile__( \
+ "ld %0, [%2, %4] \n" \
+ "aex %0, [%3] \n" \
+ "st %0, [%1, %4] \n" \
+ : \
+ "=&r" (_scratch) /* must be early clobber */ \
+ : \
+ "r" (_saveto), \
+ "r" (_readfrom), \
+ "Ir" (_aux), \
+ "Ir" (_offt) \
+ : \
+ "memory" \
+ ); \
+} while (0)
+
+#define DSP_AUX_SAVE_RESTORE(_saveto, _readfrom, _aux) \
+ AUX_SAVE_RESTORE(_saveto, _readfrom, \
+ offsetof(struct dsp_callee_regs, _aux), \
+ ARC_AUX_##_aux)
+
+static inline void dsp_save_restore(struct task_struct *prev,
+ struct task_struct *next)
+{
+ long unsigned int *saveto = &prev->thread.dsp.ACC0_GLO;
+ long unsigned int *readfrom = &next->thread.dsp.ACC0_GLO;
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, ACC0_GLO);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, ACC0_GHI);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, DSP_BFLY0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, DSP_FFT_CTRL);
+
+#ifdef CONFIG_ARC_DSP_AGU_USERSPACE
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP1);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP2);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP3);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_OS0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_OS1);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD1);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD2);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD3);
+#endif /* CONFIG_ARC_DSP_AGU_USERSPACE */
+}
+
+#else /* !CONFIG_ARC_DSP_SAVE_RESTORE_REGS */
+#define dsp_save_restore(p, n)
+#endif /* CONFIG_ARC_DSP_SAVE_RESTORE_REGS */
+
+static inline bool dsp_exist(void)
+{
+ struct bcr_generic bcr;
+
+ READ_BCR(ARC_AUX_DSP_BUILD, bcr);
+ return !!bcr.ver;
+}
+
+static inline bool agu_exist(void)
+{
+ struct bcr_generic bcr;
+
+ READ_BCR(ARC_AUX_AGU_BUILD, bcr);
+ return !!bcr.ver;
+}
+
+static inline void dsp_config_check(void)
+{
+ CHK_OPT_STRICT(CONFIG_ARC_DSP_HANDLED, dsp_exist());
+ CHK_OPT_WEAK(CONFIG_ARC_DSP_AGU_USERSPACE, agu_exist());
+}
+
+#endif /* __ASEMBLY__ */
+#endif /* __ASM_ARC_DSP_IMPL_H */
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
new file mode 100644
index 000000000000..202c78e56704
--- /dev/null
+++ b/arch/arc/include/asm/dsp.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_DSP_H
+#define __ASM_ARC_DSP_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * DSP-related saved registers - need to be saved only when you are
+ * scheduled out.
+ * structure fields name must correspond to aux register defenitions for
+ * automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
+ */
+struct dsp_callee_regs {
+ unsigned long ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_FFT_CTRL;
+#ifdef CONFIG_ARC_DSP_AGU_USERSPACE
+ unsigned long AGU_AP0, AGU_AP1, AGU_AP2, AGU_AP3;
+ unsigned long AGU_OS0, AGU_OS1;
+ unsigned long AGU_MOD0, AGU_MOD1, AGU_MOD2, AGU_MOD3;
+#endif
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_ARC_DSP_H */
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 0b8b63d0bec1..ae0aa5323be1 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -4,6 +4,7 @@
#define __ASM_ARC_ENTRY_ARCV2_H
#include <asm/asm-offsets.h>
+#include <asm/dsp-impl.h>
#include <asm/irqflags-arcv2.h>
#include <asm/thread_info.h> /* For THREAD_SIZE */
@@ -165,6 +166,8 @@
ST2 r58, r59, PT_r58
#endif
+ /* clobbers r10, r11 registers pair */
+ DSP_SAVE_REGFILE_IRQ
.endm
/*------------------------------------------------------------------------*/
@@ -189,6 +192,9 @@
ld r25, [sp, PT_user_r25]
#endif
+ /* clobbers r10, r11 registers pair */
+ DSP_RESTORE_REGFILE_IRQ
+
#ifdef CONFIG_ARC_HAS_ACCL_REGS
LD2 r58, r59, PT_r58
#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index ec532d1e0725..0fcea5bad343 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -14,6 +14,7 @@
#ifndef __ASSEMBLY__
#include <asm/ptrace.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
#ifdef CONFIG_ARC_PLAT_EZNPS
@@ -31,6 +32,9 @@ struct thread_struct {
unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ struct dsp_callee_regs dsp;
+#endif
#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
struct arc_fpu fpu;
#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index ba9854ef39e8..2fdb87addadc 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -91,6 +91,9 @@ struct pt_regs {
#ifdef CONFIG_ARC_HAS_ACCL_REGS
unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
#endif
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ unsigned long DSP_CTRL;
+#endif
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
index aadf65b2b56c..4a3d67989d19 100644
--- a/arch/arc/include/asm/switch_to.h
+++ b/arch/arc/include/asm/switch_to.h
@@ -9,6 +9,7 @@
#ifndef __ASSEMBLY__
#include <linux/sched.h>
+#include <asm/dsp-impl.h>
#include <asm/fpu.h>
#ifdef CONFIG_ARC_PLAT_EZNPS
@@ -24,6 +25,7 @@ struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
#define switch_to(prev, next, last) \
do { \
ARC_EZNPS_DP_PREV(prev, next); \
+ dsp_save_restore(prev, next); \
fpu_save_restore(prev, next); \
last = __switch_to(prev, next);\
mb(); \
diff --git a/arch/arc/kernel/.gitignore b/arch/arc/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/arc/kernel/.gitignore
+++ b/arch/arc/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
index c783bcd35eb8..0e884036ab74 100644
--- a/arch/arc/kernel/asm-offsets.c
+++ b/arch/arc/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
#include <asm/hardirq.h>
#include <asm/page.h>
+
int main(void)
{
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
@@ -75,6 +76,9 @@ int main(void)
OFFSET(PT_r58, pt_regs, r58);
OFFSET(PT_r59, pt_regs, r59);
#endif
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ OFFSET(PT_DSP_CTRL, pt_regs, DSP_CTRL);
+#endif
return 0;
}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 6f41265f6250..6eb23f1545ee 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -14,6 +14,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
+#include <asm/dsp-impl.h>
#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@@ -59,6 +60,9 @@
#endif
kflag r5
#endif
+ ; Config DSP_CTRL properly, so kernel may use integer multiply,
+ ; multiply-accumulate, and divide operations
+ DSP_EARLY_INIT
.endm
.section .init.text, "ax",@progbits
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index aa41af6ef4ac..b2b1cb645d9e 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -19,6 +19,7 @@
#include <uapi/linux/mount.h>
#include <asm/sections.h>
#include <asm/arcregs.h>
+#include <asm/asserts.h>
#include <asm/tlb.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -26,6 +27,7 @@
#include <asm/unwind.h>
#include <asm/mach_desc.h>
#include <asm/smp.h>
+#include <asm/dsp-impl.h>
#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
@@ -389,11 +391,24 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
return buf;
}
+void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena)
+{
+ if (hw_exists && !opt_ena)
+ pr_warn(" ! Enable %s for working apps\n", opt_name);
+ else if (!hw_exists && opt_ena)
+ panic("Disable %s, hardware NOT present\n", opt_name);
+}
+
+void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
+{
+ if (!hw_exists && opt_ena)
+ panic("Disable %s, hardware NOT present\n", opt_name);
+}
+
static void arc_chk_core_config(void)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
- int saved = 0, present = 0;
- char *opt_nm = NULL;
+ int present = 0;
if (!cpu->extn.timer0)
panic("Timer0 is not present!\n");
@@ -425,23 +440,16 @@ static void arc_chk_core_config(void)
*/
if (is_isa_arcompact()) {
- opt_nm = "CONFIG_ARC_FPU_SAVE_RESTORE";
- saved = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
-
/* only DPDP checked since SP has no arch visible regs */
present = cpu->extn.fpu_dp;
+ CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
} else {
- opt_nm = "CONFIG_ARC_HAS_ACCL_REGS";
- saved = IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS);
-
/* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */
present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
- }
+ CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
- if (present && !saved)
- pr_warn("Enable %s for working apps\n", opt_nm);
- else if (!present && saved)
- panic("Disable %s, hardware NOT present\n", opt_nm);
+ dsp_config_check();
+ }
}
/*
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index fb86bc3e9b35..92b339c7adba 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -100,7 +100,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
exec = 1;
- flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (write)
@@ -133,29 +133,20 @@ retry:
fault = handle_mm_fault(vma, address, flags);
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+ }
+
/*
- * Fault retry nuances
+ * Fault retry nuances, mmap_sem already relinquished by core mm
*/
- if (unlikely(fault & VM_FAULT_RETRY)) {
-
- /*
- * If fault needs to be retried, handle any pending signals
- * first (by returning to user mode).
- * mmap_sem already relinquished by core mm for RETRY case
- */
- if (fatal_signal_pending(current)) {
- if (!user_mode(regs))
- goto no_context;
- return;
- }
- /*
- * retry state machine
- */
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ if (unlikely((fault & VM_FAULT_RETRY) &&
+ (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
bad_area:
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 03bbfc312fe7..66a04f6f4775 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2090,5 +2090,3 @@ source "drivers/firmware/Kconfig"
if CRYPTO
source "arch/arm/crypto/Kconfig"
endif
-
-source "arch/arm/kvm/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index b70d7debf5ca..f46e18a77645 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1201,23 +1201,49 @@ choice
config STM32F4_DEBUG_UART
bool "Use STM32F4 UART for low-level debug"
- depends on ARCH_STM32
+ depends on MACH_STM32F429 || MACH_STM32F469
select DEBUG_STM32_UART
help
Say Y here if you want kernel low-level debugging support
on STM32F4 based platforms, which default UART is wired on
- USART1.
+ USART1, but another UART instance can be selected by modifying
+ CONFIG_DEBUG_UART_PHYS.
If unsure, say N.
config STM32F7_DEBUG_UART
bool "Use STM32F7 UART for low-level debug"
- depends on ARCH_STM32
+ depends on MACH_STM32F746 || MACH_STM32F769
select DEBUG_STM32_UART
help
Say Y here if you want kernel low-level debugging support
on STM32F7 based platforms, which default UART is wired on
- USART1.
+ USART1, but another UART instance can be selected by modifying
+ CONFIG_DEBUG_UART_PHYS.
+
+ If unsure, say N.
+
+ config STM32H7_DEBUG_UART
+ bool "Use STM32H7 UART for low-level debug"
+ depends on MACH_STM32H743
+ select DEBUG_STM32_UART
+ help
+ Say Y here if you want kernel low-level debugging support
+ on STM32H7 based platforms, which default UART is wired on
+ USART1, but another UART instance can be selected by modifying
+ CONFIG_DEBUG_UART_PHYS.
+
+ If unsure, say N.
+
+ config STM32MP1_DEBUG_UART
+ bool "Use STM32MP1 UART for low-level debug"
+ depends on MACH_STM32MP157
+ select DEBUG_STM32_UART
+ help
+ Say Y here if you want kernel low-level debugging support
+ on STM32MP1 based platforms, wich default UART is wired on
+ UART4, but another UART instance can be selected by modifying
+ CONFIG_DEBUG_UART_PHYS and CONFIG_DEBUG_UART_VIRT.
If unsure, say N.
@@ -1619,6 +1645,9 @@ config DEBUG_UART_PHYS
default 0x3e000000 if DEBUG_BCM_KONA_UART
default 0x3f201000 if DEBUG_BCM2836
default 0x4000e400 if DEBUG_LL_UART_EFM32
+ default 0x40010000 if STM32MP1_DEBUG_UART
+ default 0x40011000 if STM32F4_DEBUG_UART || STM32F7_DEBUG_UART || \
+ STM32H7_DEBUG_UART
default 0x40028000 if DEBUG_AT91_SAMV7_USART1
default 0x40081000 if DEBUG_LPC18XX_UART0
default 0x40090000 if DEBUG_LPC32XX
@@ -1713,7 +1742,7 @@ config DEBUG_UART_PHYS
DEBUG_S3C64XX_UART || \
DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
- DEBUG_AT91_UART
+ DEBUG_AT91_UART || DEBUG_STM32_UART
config DEBUG_UART_VIRT
hex "Virtual base address of debug UART"
@@ -1779,6 +1808,7 @@ config DEBUG_UART_VIRT
default 0xfcfe8600 if DEBUG_BCM63XX_UART
default 0xfd000000 if DEBUG_SPEAR3XX || DEBUG_SPEAR13XX
default 0xfd883000 if DEBUG_ALPINE_UART0
+ default 0xfe010000 if STM32MP1_DEBUG_UART
default 0xfe017000 if DEBUG_MMP_UART2
default 0xfe018000 if DEBUG_MMP_UART3
default 0xfe100000 if DEBUG_IMX23_UART || DEBUG_IMX28_UART
@@ -1823,7 +1853,7 @@ config DEBUG_UART_VIRT
DEBUG_S3C64XX_UART || \
DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
- DEBUG_AT91_UART
+ DEBUG_AT91_UART || DEBUG_STM32_UART
config DEBUG_UART_8250_SHIFT
int "Register offset shift for the 8250 debug UART"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 1fc32b611f8a..7d5cd0f85461 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -152,6 +152,7 @@ textofs-$(CONFIG_PM_H1940) := 0x00108000
ifeq ($(CONFIG_ARCH_SA1100),y)
textofs-$(CONFIG_SA1111) := 0x00208000
endif
+textofs-$(CONFIG_ARCH_IPQ40XX) := 0x00208000
textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
textofs-$(CONFIG_ARCH_MESON) := 0x00208000
@@ -278,7 +279,6 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
core-$(CONFIG_FPE_FASTFPE) += $(patsubst $(srctree)/%,%,$(wildcard $(srctree)/arch/arm/fastfpe/))
core-$(CONFIG_VFP) += arch/arm/vfp/
core-$(CONFIG_XEN) += arch/arm/xen/
-core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
core-$(CONFIG_VDSO) += arch/arm/vdso/
# If we have a machine-specific directory, then include it in the build.
diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore
index ce1c5ff746e7..8c759326baf4 100644
--- a/arch/arm/boot/.gitignore
+++ b/arch/arm/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
Image
zImage
xipImage
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index 86b2f5d28240..db05c6ef3e31 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ashldi3.S
bswapsdi2.S
font.c
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 04f77214f050..cabdd8f4a248 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -151,6 +151,25 @@
.L_\@:
.endm
+ /*
+ * The kernel build system appends the size of the
+ * decompressed kernel at the end of the compressed data
+ * in little-endian form.
+ */
+ .macro get_inflated_image_size, res:req, tmp1:req, tmp2:req
+ adr \res, .Linflated_image_size_offset
+ ldr \tmp1, [\res]
+ add \tmp1, \tmp1, \res @ address of inflated image size
+
+ ldrb \res, [\tmp1] @ get_unaligned_le32
+ ldrb \tmp2, [\tmp1, #1]
+ orr \res, \res, \tmp2, lsl #8
+ ldrb \tmp2, [\tmp1, #2]
+ ldrb \tmp1, [\tmp1, #3]
+ orr \res, \res, \tmp2, lsl #16
+ orr \res, \res, \tmp1, lsl #24
+ .endm
+
.section ".start", "ax"
/*
* sort out different calling conventions
@@ -268,15 +287,15 @@ not_angel:
*/
mov r0, pc
cmp r0, r4
- ldrcc r0, LC0+32
+ ldrcc r0, LC0+28
addcc r0, r0, pc
cmpcc r4, r0
orrcc r4, r4, #1 @ remember we skipped cache_on
blcs cache_on
restart: adr r0, LC0
- ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
- ldr sp, [r0, #28]
+ ldmia r0, {r1, r2, r3, r6, r11, r12}
+ ldr sp, [r0, #24]
/*
* We might be running at a different address. We need
@@ -284,20 +303,8 @@ restart: adr r0, LC0
*/
sub r0, r0, r1 @ calculate the delta offset
add r6, r6, r0 @ _edata
- add r10, r10, r0 @ inflated kernel size location
- /*
- * The kernel build system appends the size of the
- * decompressed kernel at the end of the compressed data
- * in little-endian form.
- */
- ldrb r9, [r10, #0]
- ldrb lr, [r10, #1]
- orr r9, r9, lr, lsl #8
- ldrb lr, [r10, #2]
- ldrb r10, [r10, #3]
- orr r9, r9, lr, lsl #16
- orr r9, r9, r10, lsl #24
+ get_inflated_image_size r9, r10, lr
#ifndef CONFIG_ZBOOT_ROM
/* malloc space is above the relocated stack (64k max) */
@@ -521,11 +528,8 @@ dtb_check_done:
/* Preserve offset to relocated code. */
sub r6, r9, r6
-#ifndef CONFIG_ZBOOT_ROM
- /* cache_clean_flush may use the stack, so relocate it */
- add sp, sp, r6
-#endif
-
+ mov r0, r9 @ start of relocated zImage
+ add r1, sp, r6 @ end of relocated zImage
bl cache_clean_flush
badr r0, restart
@@ -622,6 +626,11 @@ not_relocated: mov r0, #0
add r2, sp, #0x10000 @ 64k max
mov r3, r7
bl decompress_kernel
+
+ get_inflated_image_size r1, r2, r3
+
+ mov r0, r4 @ start of inflated image
+ add r1, r1, r0 @ end of inflated image
bl cache_clean_flush
bl cache_off
@@ -652,13 +661,15 @@ LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
.word _edata @ r6
- .word input_data_end - 4 @ r10 (inflated size location)
.word _got_start @ r11
.word _got_end @ ip
.word .L_user_stack_end @ sp
.word _end - restart + 16384 + 1024*1024
.size LC0, . - LC0
+.Linflated_image_size_offset:
+ .long (input_data_end - 4) - .
+
#ifdef CONFIG_ARCH_RPC
.globl params
params: ldr r0, =0x10000100 @ params_phys for RPC
@@ -668,6 +679,24 @@ params: ldr r0, =0x10000100 @ params_phys for RPC
#endif
/*
+ * dcache_line_size - get the minimum D-cache line size from the CTR register
+ * on ARMv7.
+ */
+ .macro dcache_line_size, reg, tmp
+#ifdef CONFIG_CPU_V7M
+ movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
+ movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
+ ldr \tmp, [\tmp]
+#else
+ mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
+#endif
+ lsr \tmp, \tmp, #16
+ and \tmp, \tmp, #0xf @ cache line size encoding
+ mov \reg, #4 @ bytes per word
+ mov \reg, \reg, lsl \tmp @ actual cache line size
+ .endm
+
+/*
* Turn on the cache. We need to setup some page tables so that we
* can have both the I and D caches on.
*
@@ -1159,8 +1188,6 @@ __armv7_mmu_cache_off:
bic r0, r0, #0x000c
#endif
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
- mov r12, lr
- bl __armv7_mmu_cache_flush
mov r0, #0
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
@@ -1168,11 +1195,14 @@ __armv7_mmu_cache_off:
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
mcr p15, 0, r0, c7, c10, 4 @ DSB
mcr p15, 0, r0, c7, c5, 4 @ ISB
- mov pc, r12
+ mov pc, lr
/*
* Clean and flush the cache to maintain consistency.
*
+ * On entry,
+ * r0 = start address
+ * r1 = end address (exclusive)
* On exit,
* r1, r2, r3, r9, r10, r11, r12 corrupted
* This routine must preserve:
@@ -1181,6 +1211,7 @@ __armv7_mmu_cache_off:
.align 5
cache_clean_flush:
mov r3, #16
+ mov r11, r1
b call_cache_fn
__armv4_mpu_cache_flush:
@@ -1231,51 +1262,16 @@ __armv7_mmu_cache_flush:
mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
b iflush
hierarchical:
- mcr p15, 0, r10, c7, c10, 5 @ DMB
- stmfd sp!, {r0-r7, r9-r11}
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 0
-loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt skip @ skip if no cache, or just i-cache
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
-loop2:
- mov r9, r4 @ create working copy of max way size
-loop3:
- ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
- ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
- THUMB( lsl r6, r9, r5 )
- THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
- THUMB( lsl r6, r7, r2 )
- THUMB( orr r11, r11, r6 ) @ factor index number into r11
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- subs r9, r9, #1 @ decrement the way
- bge loop3
- subs r7, r7, #1 @ decrement the index
- bge loop2
-skip:
- add r10, r10, #2 @ increment cache number
- cmp r3, r10
- bgt loop1
-finished:
- ldmfd sp!, {r0-r7, r9-r11}
- mov r10, #0 @ switch back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dcache_line_size r1, r2 @ r1 := dcache min line size
+ sub r2, r1, #1 @ r2 := line size mask
+ bic r0, r0, r2 @ round down start to line size
+ sub r11, r11, #1 @ end address is exclusive
+ bic r11, r11, r2 @ round down end to line size
+0: cmp r0, r11 @ finished?
+ bgt iflush
+ mcr p15, 0, r0, c7, c14, 1 @ Dcache clean/invalidate by VA
+ add r0, r0, r1
+ b 0b
iflush:
mcr p15, 0, r10, c7, c10, 4 @ DSB
mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index fc7ed03d8b93..b247f399de71 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -36,7 +36,6 @@ SECTIONS
*(.start)
*(.text)
*(.text.*)
- *(.fixup)
*(.gnu.warning)
*(.glue_7t)
*(.glue_7)
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index d6546d2676b9..e8dd99201397 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -446,6 +446,10 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-nitrogen6x.dtb \
imx6dl-phytec-mira-rdk-nand.dtb \
imx6dl-phytec-pbab01.dtb \
+ imx6dl-pico-dwarf.dtb \
+ imx6dl-pico-hobbit.dtb \
+ imx6dl-pico-nymph.dtb \
+ imx6dl-pico-pi.dtb \
imx6dl-rex-basic.dtb \
imx6dl-riotboard.dtb \
imx6dl-sabreauto.dtb \
@@ -529,6 +533,10 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-phytec-mira-rdk-emmc.dtb \
imx6q-phytec-mira-rdk-nand.dtb \
imx6q-phytec-pbab01.dtb \
+ imx6q-pico-dwarf.dtb \
+ imx6q-pico-hobbit.dtb \
+ imx6q-pico-nymph.dtb \
+ imx6q-pico-pi.dtb \
imx6q-pistachio.dtb \
imx6q-rex-pro.dtb \
imx6q-sabreauto.dtb \
@@ -594,6 +602,7 @@ dtb-$(CONFIG_SOC_IMX6UL) += \
imx6ul-kontron-n6310-s-43.dtb \
imx6ul-liteboard.dtb \
imx6ul-opos6uldev.dtb \
+ imx6ul-pico-dwarf.dtb \
imx6ul-pico-hobbit.dtb \
imx6ul-pico-pi.dtb \
imx6ul-phytec-segin-ff-rdk-nand.dtb \
@@ -610,12 +619,16 @@ dtb-$(CONFIG_SOC_IMX6UL) += \
imx6ulz-14x14-evk.dtb
dtb-$(CONFIG_SOC_IMX7D) += \
imx7d-cl-som-imx7.dtb \
+ imx7d-colibri-aster.dtb \
+ imx7d-colibri-emmc-aster.dtb \
imx7d-colibri-emmc-eval-v3.dtb \
imx7d-colibri-eval-v3.dtb \
imx7d-mba7.dtb \
imx7d-meerkat96.dtb \
imx7d-nitrogen7.dtb \
+ imx7d-pico-dwarf.dtb \
imx7d-pico-hobbit.dtb \
+ imx7d-pico-nymph.dtb \
imx7d-pico-pi.dtb \
imx7d-sbc-imx7.dtb \
imx7d-sdb.dtb \
@@ -623,6 +636,7 @@ dtb-$(CONFIG_SOC_IMX7D) += \
imx7d-sdb-sht11.dtb \
imx7d-zii-rmu2.dtb \
imx7d-zii-rpu2.dtb \
+ imx7s-colibri-aster.dtb \
imx7s-colibri-eval-v3.dtb \
imx7s-mba7.dtb \
imx7s-warp.dtb
@@ -1016,6 +1030,7 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32h743i-disco.dtb \
stm32mp157a-avenger96.dtb \
stm32mp157a-dk1.dtb \
+ stm32mp157c-dhcom-pdk2.dtb \
stm32mp157c-dk2.dtb \
stm32mp157c-ed1.dtb \
stm32mp157c-ev1.dtb
@@ -1056,6 +1071,7 @@ dtb-$(CONFIG_MACH_SUN5I) += \
sun5i-a13-licheepi-one.dtb \
sun5i-a13-olinuxino.dtb \
sun5i-a13-olinuxino-micro.dtb \
+ sun5i-a13-pocketbook-touch-lux-3.dtb \
sun5i-a13-q8-tablet.dtb \
sun5i-a13-utoo-p66.dtb \
sun5i-gr8-chip-pro.dtb \
@@ -1086,6 +1102,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
sun7i-a20-i12-tvbox.dtb \
sun7i-a20-icnova-swac.dtb \
sun7i-a20-lamobo-r1.dtb \
+ sun7i-a20-linutronix-testbox-v2.dtb \
sun7i-a20-m3.dtb \
sun7i-a20-mk808c.dtb \
sun7i-a20-olimex-som-evb.dtb \
@@ -1202,7 +1219,8 @@ dtb-$(CONFIG_ARCH_U8500) += \
ste-hrefv60plus-stuib.dtb \
ste-hrefv60plus-tvk.dtb \
ste-href520-tvk.dtb \
- ste-ux500-samsung-golden.dtb
+ ste-ux500-samsung-golden.dtb \
+ ste-ux500-samsung-skomer.dtb
dtb-$(CONFIG_ARCH_UNIPHIER) += \
uniphier-ld4-ref.dtb \
uniphier-ld6b-ref.dtb \
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 4e2986f0c604..5ed7f3c58c0f 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -759,12 +759,27 @@
ranges = <0x0 0x200000 0x80000>;
};
- target-module@300000 { /* 0x4a300000, ap 9 04.0 */
- compatible = "ti,sysc";
- status = "disabled";
+ pruss_tm: target-module@300000 { /* 0x4a300000, ap 9 04.0 */
+ compatible = "ti,sysc-pruss", "ti,sysc";
+ reg = <0x326000 0x4>,
+ <0x326004 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_PRUSS_STANDBY_INIT |
+ SYSC_PRUSS_SUB_MWAIT)>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&pruss_ocp_clkctrl AM3_PRUSS_OCP_PRUSS_CLKCTRL 0>;
+ clock-names = "fck";
+ resets = <&prm_per 1>;
+ reset-names = "rstctrl";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x300000 0x80000>;
+ status = "disabled";
};
};
};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 41dcfb37155a..a35f5052d76f 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -47,6 +47,7 @@
#size-cells = <0>;
cpu@0 {
compatible = "arm,cortex-a8";
+ enable-method = "ti,am3352";
device_type = "cpu";
reg = <0>;
@@ -56,6 +57,17 @@
clock-names = "cpu";
clock-latency = <300000>; /* From omap-cpufreq driver */
+ cpu-idle-states = <&mpu_gate>;
+ };
+
+ idle-states {
+ mpu_gate: mpu_gate {
+ compatible = "arm,idle-state";
+ entry-latency-us = <40>;
+ exit-latency-us = <90>;
+ min-residency-us = <300>;
+ ti,idle-wkup-m3;
+ };
};
};
@@ -193,45 +205,100 @@
reg = <0x48200000 0x1000>;
};
- edma: edma@49000000 {
- compatible = "ti,edma3-tpcc";
- ti,hwmods = "tpcc";
- reg = <0x49000000 0x10000>;
- reg-names = "edma3_cc";
- interrupts = <12 13 14>;
- interrupt-names = "edma3_ccint", "edma3_mperr",
- "edma3_ccerrint";
- dma-requests = <64>;
- #dma-cells = <2>;
-
- ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
- <&edma_tptc2 0>;
-
- ti,edma-memcpy-channels = <20 21>;
+ target-module@49000000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49000000 0x4>;
+ reg-names = "rev";
+ clocks = <&l3_clkctrl AM3_L3_TPCC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49000000 0x10000>;
+
+ edma: dma@0 {
+ compatible = "ti,edma3-tpcc";
+ reg = <0 0x10000>;
+ reg-names = "edma3_cc";
+ interrupts = <12 13 14>;
+ interrupt-names = "edma3_ccint", "edma3_mperr",
+ "edma3_ccerrint";
+ dma-requests = <64>;
+ #dma-cells = <2>;
+
+ ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
+ <&edma_tptc2 0>;
+
+ ti,edma-memcpy-channels = <20 21>;
+ };
};
- edma_tptc0: tptc@49800000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc0";
- reg = <0x49800000 0x100000>;
- interrupts = <112>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49800000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49800000 0x4>,
+ <0x49800010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&l3_clkctrl AM3_L3_TPTC0_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49800000 0x100000>;
+
+ edma_tptc0: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <112>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
- edma_tptc1: tptc@49900000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc1";
- reg = <0x49900000 0x100000>;
- interrupts = <113>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49900000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49900000 0x4>,
+ <0x49900010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&l3_clkctrl AM3_L3_TPTC1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49900000 0x100000>;
+
+ edma_tptc1: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <113>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
- edma_tptc2: tptc@49a00000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc2";
- reg = <0x49a00000 0x100000>;
- interrupts = <114>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49a00000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49a00000 0x4>,
+ <0x49a00010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&l3_clkctrl AM3_L3_TPTC2_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49a00000 0x100000>;
+
+ edma_tptc2: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <114>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
target-module@47810000 {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index faa14dc0faff..dba87bfaf33e 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -45,6 +45,7 @@
#size-cells = <0>;
cpu: cpu@0 {
compatible = "arm,cortex-a9";
+ enable-method = "ti,am4372";
device_type = "cpu";
reg = <0>;
@@ -54,6 +55,17 @@
operating-points-v2 = <&cpu0_opp_table>;
clock-latency = <300000>; /* From omap-cpufreq driver */
+ cpu-idle-states = <&mpu_gate>;
+ };
+
+ idle-states {
+ mpu_gate: mpu_gate {
+ compatible = "arm,idle-state";
+ entry-latency-us = <40>;
+ exit-latency-us = <100>;
+ min-residency-us = <300>;
+ local-timer-stop;
+ };
};
};
@@ -185,47 +197,102 @@
&pm_sram_data>;
};
- edma: edma@49000000 {
- compatible = "ti,edma3-tpcc";
- ti,hwmods = "tpcc";
- reg = <0x49000000 0x10000>;
- reg-names = "edma3_cc";
- interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_ccint", "edma3_mperr",
- "edma3_ccerrint";
- dma-requests = <64>;
- #dma-cells = <2>;
-
- ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
- <&edma_tptc2 0>;
-
- ti,edma-memcpy-channels = <58 59>;
+ target-module@49000000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49000000 0x4>;
+ reg-names = "rev";
+ clocks = <&l3_clkctrl AM4_L3_TPCC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49000000 0x10000>;
+
+ edma: dma@0 {
+ compatible = "ti,edma3-tpcc";
+ reg = <0 0x10000>;
+ reg-names = "edma3_cc";
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma3_ccint", "edma3_mperr",
+ "edma3_ccerrint";
+ dma-requests = <64>;
+ #dma-cells = <2>;
+
+ ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
+ <&edma_tptc2 0>;
+
+ ti,edma-memcpy-channels = <58 59>;
+ };
};
- edma_tptc0: tptc@49800000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc0";
- reg = <0x49800000 0x100000>;
- interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49800000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49800000 0x4>,
+ <0x49800010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&l3_clkctrl AM4_L3_TPTC0_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49800000 0x100000>;
+
+ edma_tptc0: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
- edma_tptc1: tptc@49900000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc1";
- reg = <0x49900000 0x100000>;
- interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49900000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49900000 0x4>,
+ <0x49900010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&l3_clkctrl AM4_L3_TPTC1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49900000 0x100000>;
+
+ edma_tptc1: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
- edma_tptc2: tptc@49a00000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc2";
- reg = <0x49a00000 0x100000>;
- interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49a00000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49a00000 0x4>,
+ <0x49a00010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&l3_clkctrl AM4_L3_TPTC2_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49a00000 0x100000>;
+
+ edma_tptc2: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
target-module@47810000 {
@@ -344,6 +411,28 @@
};
};
+ pruss_tm: target-module@54400000 {
+ compatible = "ti,sysc-pruss", "ti,sysc";
+ reg = <0x54426000 0x4>,
+ <0x54426004 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_PRUSS_STANDBY_INIT |
+ SYSC_PRUSS_SUB_MWAIT)>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&pruss_ocp_clkctrl AM4_PRUSS_OCP_PRUSS_CLKCTRL 0>;
+ clock-names = "fck";
+ resets = <&prm_per 1>;
+ reset-names = "rstctrl";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x54400000 0x80000>;
+ };
+
gpmc: gpmc@50000000 {
compatible = "ti,am3352-gpmc";
ti,hwmods = "gpmc";
@@ -394,38 +483,6 @@
};
};
- dss: dss@4832a000 {
- compatible = "ti,omap3-dss";
- reg = <0x4832a000 0x200>;
- status = "disabled";
- ti,hwmods = "dss_core";
- clocks = <&disp_clk>;
- clock-names = "fck";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- dispc: dispc@4832a400 {
- compatible = "ti,omap3-dispc";
- reg = <0x4832a400 0x400>;
- interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "dss_dispc";
- clocks = <&disp_clk>;
- clock-names = "fck";
-
- max-memory-bandwidth = <230000000>;
- };
-
- rfbi: rfbi@4832a800 {
- compatible = "ti,omap3-rfbi";
- reg = <0x4832a800 0x100>;
- ti,hwmods = "dss_rfbi";
- clocks = <&disp_clk>;
- clock-names = "fck";
- status = "disabled";
- };
- };
-
ocmcram: sram@40300000 {
compatible = "mmio-sram";
reg = <0x40300000 0x40000>; /* 256k */
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index e18e17d31272..49c6a872052e 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -2117,7 +2117,6 @@
target-module@2a000 { /* 0x4832a000, ap 88 3c.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
- ti,hwmods = "dss_core";
reg = <0x2a000 0x4>,
<0x2a010 0x4>,
<0x2a014 0x4>;
@@ -2135,6 +2134,82 @@
<0x00000800 0x0002a800 0x00000400>,
<0x00000c00 0x0002ac00 0x00000400>,
<0x00001000 0x0002b000 0x00001000>;
+
+ dss: dss@0 {
+ compatible = "ti,omap3-dss";
+ reg = <0 0x200>;
+ status = "disabled";
+ clocks = <&disp_clk>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x00000000 0x00000400>,
+ <0x00000400 0x00000400 0x00000400>,
+ <0x00000800 0x00000800 0x00000400>,
+ <0x00000c00 0x00000c00 0x00000400>,
+ <0x00001000 0x00001000 0x00001000>;
+
+ target-module@400 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x400 0x4>,
+ <0x410 0x4>,
+ <0x414 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ clocks = <&dss_clkctrl AM4_DSS_DSS_CORE_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x400 0x400>;
+
+ dispc: dispc@0 {
+ compatible = "ti,omap3-dispc";
+ reg = <0 0x400>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&disp_clk>;
+ clock-names = "fck";
+
+ max-memory-bandwidth = <230000000>;
+ };
+ };
+
+ target-module@800 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x800 0x4>,
+ <0x810 0x4>,
+ <0x814 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ clocks = <&dss_clkctrl AM4_DSS_DSS_CORE_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x800 0x400>;
+
+ rfbi: rfbi@0 {
+ compatible = "ti,omap3-rfbi";
+ reg = <0 0x100>;
+ clocks = <&dss_clkctrl AM4_DSS_DSS_CORE_CLKCTRL 0>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+ };
+ };
};
target-module@3d000 { /* 0x4833d000, ap 102 6e.0 */
diff --git a/arch/arm/boot/dts/am57-pruss.dtsi b/arch/arm/boot/dts/am57-pruss.dtsi
new file mode 100644
index 000000000000..b1c583dee10b
--- /dev/null
+++ b/arch/arm/boot/dts/am57-pruss.dtsi
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Common PRUSS data for TI AM57xx platforms
+ */
+
+&ocp {
+ pruss1_tm: target-module@4b226000 {
+ compatible = "ti,sysc-pruss", "ti,sysc";
+ reg = <0x4b226000 0x4>,
+ <0x4b226004 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_PRUSS_STANDBY_INIT |
+ SYSC_PRUSS_SUB_MWAIT)>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (P, C): coreaon_pwrdm, l4per2_clkdm */
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_PRUSS1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x4b200000 0x80000>;
+ };
+
+ pruss2_tm: target-module@4b2a6000 {
+ compatible = "ti,sysc-pruss", "ti,sysc";
+ reg = <0x4b2a6000 0x4>,
+ <0x4b2a6004 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_PRUSS_STANDBY_INIT |
+ SYSC_PRUSS_SUB_MWAIT)>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (P, C): coreaon_pwrdm, l4per2_clkdm */
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_PRUSS2_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x4b280000 0x80000>;
+ };
+};
diff --git a/arch/arm/boot/dts/am5718.dtsi b/arch/arm/boot/dts/am5718.dtsi
index d51007c3e8c4..a80c2e3eee2e 100644
--- a/arch/arm/boot/dts/am5718.dtsi
+++ b/arch/arm/boot/dts/am5718.dtsi
@@ -4,6 +4,7 @@
*/
#include "dra72x.dtsi"
+#include "am57-pruss.dtsi"
/ {
compatible = "ti,am5718", "ti,dra7";
diff --git a/arch/arm/boot/dts/am5728.dtsi b/arch/arm/boot/dts/am5728.dtsi
index 82e5427ef6a9..9a3810f5adcc 100644
--- a/arch/arm/boot/dts/am5728.dtsi
+++ b/arch/arm/boot/dts/am5728.dtsi
@@ -4,6 +4,7 @@
*/
#include "dra74x.dtsi"
+#include "am57-pruss.dtsi"
/ {
compatible = "ti,am5728", "ti,dra7";
diff --git a/arch/arm/boot/dts/am5748.dtsi b/arch/arm/boot/dts/am5748.dtsi
index 5e129759d04a..2b65317b1513 100644
--- a/arch/arm/boot/dts/am5748.dtsi
+++ b/arch/arm/boot/dts/am5748.dtsi
@@ -4,6 +4,7 @@
*/
#include "dra76x.dtsi"
+#include "am57-pruss.dtsi"
/ {
compatible = "ti,am5748", "ti,dra762", "ti,dra7";
diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi
index 09f3f544f3a7..f61bd59ae5ba 100644
--- a/arch/arm/boot/dts/arm-realview-pbx.dtsi
+++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi
@@ -210,7 +210,7 @@
};
};
- soc: soc@0 {
+ soc: soc {
compatible = "arm,realview-pbx-soc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi b/arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi
index df0f0cc575c1..bea920b192b6 100644
--- a/arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi
+++ b/arch/arm/boot/dts/at91-dvk_su60_somc_lcm.dtsi
@@ -17,7 +17,7 @@
};
panel: panel {
- compatible = "winstar,wf70gtiagdng0", "innolux,at070tn92", "simple-panel";
+ compatible = "winstar,wf70gtiagdng0", "innolux,at070tn92";
backlight = <&backlight>;
power-supply = <&vcc_lcd_reg>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
index 9f30132d7d7b..b484745bf2d4 100644
--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
+++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
@@ -645,3 +645,8 @@
&usb2 {
status = "okay";
};
+
+&watchdog {
+ status = "okay";
+};
+
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
index 0b9fa2942dff..6b8461278950 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
@@ -186,6 +186,11 @@
pinmux = <PIN_PA10__GPIO>;
bias-disable;
};
+
+ pinctrl_usba_vbus: usba_vbus {
+ pinmux = <PIN_PA16__GPIO>;
+ bias-disable;
+ };
};
&pwm0 {
@@ -248,6 +253,13 @@
status = "okay";
};
+&usb0 {
+ atmel,vbus-gpio = <&pioA PIN_PA16 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usba_vbus>;
+ status = "okay";
+};
+
&usb1 {
num-ports = <3>;
atmel,vbus-gpio = <0
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index ba7f3e646c26..1c24ac8019ba 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -180,8 +180,11 @@
i2c0: i2c@f8028000 {
dmas = <0>, <0>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ sda-gpios = <&pioA PIN_PD21 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD22 GPIO_ACTIVE_HIGH>;
status = "okay";
};
@@ -198,8 +201,11 @@
#address-cells = <1>;
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_flx0_default>;
+ pinctrl-1 = <&pinctrl_flx0_gpio>;
+ sda-gpios = <&pioA PIN_PB28 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PB29 GPIO_ACTIVE_HIGH>;
atmel,fifo-size = <16>;
status = "okay";
};
@@ -226,8 +232,11 @@
i2c1: i2c@fc028000 {
dmas = <0>, <0>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ sda-gpios = <&pioA PIN_PC6 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PC7 GPIO_ACTIVE_HIGH>;
status = "okay";
at24@50 {
@@ -244,18 +253,36 @@
bias-disable;
};
+ pinctrl_flx0_gpio: flx0_gpio {
+ pinmux = <PIN_PB28__GPIO>,
+ <PIN_PB29__GPIO>;
+ bias-disable;
+ };
+
pinctrl_i2c0_default: i2c0_default {
pinmux = <PIN_PD21__TWD0>,
<PIN_PD22__TWCK0>;
bias-disable;
};
+ pinctrl_i2c0_gpio: i2c0_gpio {
+ pinmux = <PIN_PD21__GPIO>,
+ <PIN_PD22__GPIO>;
+ bias-disable;
+ };
+
pinctrl_i2c1_default: i2c1_default {
pinmux = <PIN_PC6__TWD1>,
<PIN_PC7__TWCK1>;
bias-disable;
};
+ pinctrl_i2c1_gpio: i2c1_gpio {
+ pinmux = <PIN_PC6__GPIO>,
+ <PIN_PC7__GPIO>;
+ bias-disable;
+ };
+
pinctrl_key_gpio_default: key_gpio_default {
pinmux = <PIN_PA10__GPIO>;
bias-pull-up;
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 9d0a7fbea725..055ee53e4773 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -129,8 +129,11 @@
i2c0: i2c@f8028000 {
dmas = <0>, <0>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ sda-gpios = <&pioA PIN_PD21 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD22 GPIO_ACTIVE_HIGH>;
i2c-sda-hold-time-ns = <350>;
status = "okay";
@@ -331,8 +334,11 @@
#address-cells = <1>;
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_flx4_default>;
+ pinctrl-1 = <&pinctrl_flx4_gpio>;
+ sda-gpios = <&pioA PIN_PD12 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD13 GPIO_ACTIVE_HIGH>;
atmel,fifo-size = <16>;
i2c-analog-filter;
i2c-digital-filter;
@@ -343,11 +349,14 @@
i2c1: i2c@fc028000 {
dmas = <0>, <0>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1_default>;
i2c-analog-filter;
i2c-digital-filter;
i2c-digital-filter-width-ns = <35>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ sda-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD5 GPIO_ACTIVE_HIGH>;
status = "okay";
at24@54 {
@@ -441,18 +450,36 @@
bias-disable;
};
+ pinctrl_flx4_gpio: flx4_gpio {
+ pinmux = <PIN_PD12__GPIO>,
+ <PIN_PD13__GPIO>;
+ bias-disable;
+ };
+
pinctrl_i2c0_default: i2c0_default {
pinmux = <PIN_PD21__TWD0>,
<PIN_PD22__TWCK0>;
bias-disable;
};
+ pinctrl_i2c0_gpio: i2c0_gpio {
+ pinmux = <PIN_PD21__GPIO>,
+ <PIN_PD22__GPIO>;
+ bias-disable;
+ };
+
pinctrl_i2c1_default: i2c1_default {
pinmux = <PIN_PD4__TWD1>,
<PIN_PD5__TWCK1>;
bias-disable;
};
+ pinctrl_i2c1_gpio: i2c1_gpio {
+ pinmux = <PIN_PD4__GPIO>,
+ <PIN_PD5__GPIO>;
+ bias-disable;
+ };
+
pinctrl_i2s0_default: i2s0_default {
pinmux = <PIN_PC1__I2SC0_CK>,
<PIN_PC2__I2SC0_MCK>,
diff --git a/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts b/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
index af4969485c88..4d7cee569ff2 100644
--- a/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
@@ -136,7 +136,7 @@
panel: panel {
/* Actually Ampire 800480R2 */
- compatible = "foxlink,fl500wvr00-a0t", "simple-panel";
+ compatible = "foxlink,fl500wvr00-a0t";
backlight = <&backlight>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index ea5cef0b0974..d36e162a8817 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -238,7 +238,7 @@
};
panel: panel {
- compatible = "qiaodian,qd43003c0-40", "simple-panel";
+ compatible = "qiaodian,qd43003c0-40";
backlight = <&backlight>;
power-supply = <&panel_reg>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/at91sam9x5dm.dtsi b/arch/arm/boot/dts/at91sam9x5dm.dtsi
index 7f00c1f57b90..a9278038af3b 100644
--- a/arch/arm/boot/dts/at91sam9x5dm.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5dm.dtsi
@@ -27,7 +27,7 @@
};
panel: panel {
- compatible = "foxlink,fl500wvr00-a0t", "simple-panel";
+ compatible = "foxlink,fl500wvr00-a0t";
backlight = <&backlight>;
power-supply = <&panel_reg>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
index efea891b1a76..e26ea9006378 100644
--- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
+++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
@@ -20,6 +20,7 @@
};
aliases {
+ emmc2bus = &emmc2bus;
ethernet0 = &genet;
pcie0 = &pcie0;
};
@@ -74,6 +75,79 @@
};
};
+&gpio {
+ /*
+ * Parts taken from rpi_SCH_4b_4p0_reduced.pdf and
+ * the official GPU firmware DT blob.
+ *
+ * Legend:
+ * "FOO" = GPIO line named "FOO" on the schematic
+ * "FOO_N" = GPIO line named "FOO" on schematic, active low
+ */
+ gpio-line-names = "ID_SDA",
+ "ID_SCL",
+ "SDA1",
+ "SCL1",
+ "GPIO_GCLK",
+ "GPIO5",
+ "GPIO6",
+ "SPI_CE1_N",
+ "SPI_CE0_N",
+ "SPI_MISO",
+ "SPI_MOSI",
+ "SPI_SCLK",
+ "GPIO12",
+ "GPIO13",
+ /* Serial port */
+ "TXD1",
+ "RXD1",
+ "GPIO16",
+ "GPIO17",
+ "GPIO18",
+ "GPIO19",
+ "GPIO20",
+ "GPIO21",
+ "GPIO22",
+ "GPIO23",
+ "GPIO24",
+ "GPIO25",
+ "GPIO26",
+ "GPIO27",
+ "RGMII_MDIO",
+ "RGMIO_MDC",
+ /* Used by BT module */
+ "CTS0",
+ "RTS0",
+ "TXD0",
+ "RXD0",
+ /* Used by Wifi */
+ "SD1_CLK",
+ "SD1_CMD",
+ "SD1_DATA0",
+ "SD1_DATA1",
+ "SD1_DATA2",
+ "SD1_DATA3",
+ /* Shared with SPI flash */
+ "PWM0_MISO",
+ "PWM1_MOSI",
+ "STATUS_LED_G_CLK",
+ "SPIFLASH_CE_N",
+ "SDA0",
+ "SCL0",
+ "RGMII_RXCLK",
+ "RGMII_RXCTL",
+ "RGMII_RXD0",
+ "RGMII_RXD1",
+ "RGMII_RXD2",
+ "RGMII_RXD3",
+ "RGMII_TXCLK",
+ "RGMII_TXCTL",
+ "RGMII_TXD0",
+ "RGMII_TXD1",
+ "RGMII_TXD2",
+ "RGMII_TXD3";
+};
+
&pwm1 {
pinctrl-names = "default";
pinctrl-0 = <&pwm1_0_gpio40 &pwm1_1_gpio41>;
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index d1e684d0acfd..a91cf68e3c4c 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -241,17 +241,32 @@
status = "disabled";
};
+ hvs@7e400000 {
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ /*
+ * emmc2 has different DMA constraints based on SoC revisions. It was
+ * moved into its own bus, so as for RPi4's firmware to update them.
+ * The firmware will find whether the emmc2bus alias is defined, and if
+ * so, it'll edit the dma-ranges property below accordingly.
+ */
+ emmc2bus: emmc2bus {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ ranges = <0x0 0x7e000000 0x0 0xfe000000 0x01800000>;
+ dma-ranges = <0x0 0xc0000000 0x0 0x00000000 0x40000000>;
+
emmc2: emmc2@7e340000 {
compatible = "brcm,bcm2711-emmc2";
- reg = <0x7e340000 0x100>;
+ reg = <0x0 0x7e340000 0x100>;
interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clocks BCM2711_CLOCK_EMMC2>;
status = "disabled";
};
-
- hvs@7e400000 {
- interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
- };
};
arm-pmu {
diff --git a/arch/arm/boot/dts/dm814x-clocks.dtsi b/arch/arm/boot/dts/dm814x-clocks.dtsi
index e5e4d0affefa..f7939f43413b 100644
--- a/arch/arm/boot/dts/dm814x-clocks.dtsi
+++ b/arch/arm/boot/dts/dm814x-clocks.dtsi
@@ -362,4 +362,18 @@
#clock-cells = <2>;
};
};
+
+ alwon_ethernet_cm: alwon_ethernet_cm@15d4 {
+ compatible = "ti,omap4-cm";
+ reg = <0x15d4 0x4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x15d4 0x4>;
+
+ alwon_ethernet_clkctrl: clk@0 {
+ compatible = "ti,clkctrl";
+ reg = <0 0x4>;
+ #clock-cells = <2>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 95de9f214c14..44ed5a798164 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -4,6 +4,8 @@
* kind, whether express or implied.
*/
+#include <dt-bindings/bus/ti-sysc.h>
+#include <dt-bindings/clock/dm814.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/dm814x.h>
@@ -519,53 +521,123 @@
reg = <0x47810000 0x1000>;
};
- edma: edma@49000000 {
- compatible = "ti,edma3-tpcc";
- ti,hwmods = "tpcc";
- reg = <0x49000000 0x10000>;
- reg-names = "edma3_cc";
- interrupts = <12 13 14>;
- interrupt-names = "edma3_ccint", "edma3_mperr",
- "edma3_ccerrint";
- dma-requests = <64>;
- #dma-cells = <2>;
-
- ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
- <&edma_tptc2 3>, <&edma_tptc3 0>;
-
- ti,edma-memcpy-channels = <20 21>;
+ target-module@49000000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49000000 0x4>;
+ reg-names = "rev";
+ clocks = <&alwon_clkctrl DM814_TPCC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49000000 0x10000>;
+
+ edma: dma@0 {
+ compatible = "ti,edma3-tpcc";
+ reg = <0 0x10000>;
+ reg-names = "edma3_cc";
+ interrupts = <12 13 14>;
+ interrupt-names = "edma3_ccint", "edma3_mperr",
+ "edma3_ccerrint";
+ dma-requests = <64>;
+ #dma-cells = <2>;
+
+ ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
+ <&edma_tptc2 3>, <&edma_tptc3 0>;
+
+ ti,edma-memcpy-channels = <20 21>;
+ };
};
- edma_tptc0: tptc@49800000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc0";
- reg = <0x49800000 0x100000>;
- interrupts = <112>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49800000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49800000 0x4>,
+ <0x49800010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&alwon_clkctrl DM814_TPTC0_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49800000 0x100000>;
+
+ edma_tptc0: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <112>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
- edma_tptc1: tptc@49900000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc1";
- reg = <0x49900000 0x100000>;
- interrupts = <113>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49900000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49900000 0x4>,
+ <0x49900010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&alwon_clkctrl DM814_TPTC1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49900000 0x100000>;
+
+ edma_tptc1: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <113>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
- edma_tptc2: tptc@49a00000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc2";
- reg = <0x49a00000 0x100000>;
- interrupts = <114>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49a00000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49a00000 0x4>,
+ <0x49a00010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&alwon_clkctrl DM814_TPTC2_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49a00000 0x100000>;
+
+ edma_tptc2: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <114>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
- edma_tptc3: tptc@49b00000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc3";
- reg = <0x49b00000 0x100000>;
- interrupts = <115>;
- interrupt-names = "edma3_tcerrint";
+ target-module@49b00000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49b00000 0x4>,
+ <0x49b00010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&alwon_clkctrl DM814_TPTC3_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49b00000 0x100000>;
+
+ edma_tptc3: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <115>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
/* See TRM "Table 1-318. L4HS Instance Summary" */
@@ -574,57 +646,73 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x4a000000 0x1b4040>;
- };
- /* REVISIT: Move to live under l4hs once driver is fixed */
- mac: ethernet@4a100000 {
- compatible = "ti,cpsw";
- ti,hwmods = "cpgmac0";
- clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
- clock-names = "fck", "cpts";
- cpdma_channels = <8>;
- ale_entries = <1024>;
- bd_ram_size = <0x2000>;
- mac_control = <0x20>;
- slaves = <2>;
- active_slave = <0>;
- cpts_clock_mult = <0x80000000>;
- cpts_clock_shift = <29>;
- reg = <0x4a100000 0x800
- 0x4a100900 0x100>;
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&intc>;
- /*
- * c0_rx_thresh_pend
- * c0_rx_pend
- * c0_tx_pend
- * c0_misc_pend
- */
- interrupts = <40 41 42 43>;
- ranges;
- syscon = <&scm_conf>;
-
- davinci_mdio: mdio@4a100800 {
- compatible = "ti,davinci_mdio";
+ target-module@100000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ reg = <0x100900 0x4>,
+ <0x100908 0x4>,
+ <0x100904 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <0>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>;
+ ti,syss-mask = <1>;
+ clocks = <&alwon_ethernet_clkctrl DM814_ETHERNET_CPGMAC0_CLKCTRL 0>;
+ clock-names = "fck";
#address-cells = <1>;
- #size-cells = <0>;
- ti,hwmods = "davinci_mdio";
- bus_freq = <1000000>;
- reg = <0x4a100800 0x100>;
- };
-
- cpsw_emac0: slave@4a100200 {
- /* Filled in by U-Boot */
- mac-address = [ 00 00 00 00 00 00 ];
- phys = <&phy_gmii_sel 1>;
+ #size-cells = <1>;
+ ranges = <0 0x100000 0x8000>;
+
+ mac: ethernet@0 {
+ compatible = "ti,cpsw";
+ clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+ clock-names = "fck", "cpts";
+ cpdma_channels = <8>;
+ ale_entries = <1024>;
+ bd_ram_size = <0x2000>;
+ mac_control = <0x20>;
+ slaves = <2>;
+ active_slave = <0>;
+ cpts_clock_mult = <0x80000000>;
+ cpts_clock_shift = <29>;
+ reg = <0 0x800>,
+ <0x900 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /*
+ * c0_rx_thresh_pend
+ * c0_rx_pend
+ * c0_tx_pend
+ * c0_misc_pend
+ */
+ interrupts = <40 41 42 43>;
+ ranges = <0 0 0x8000>;
+ syscon = <&scm_conf>;
+
+ davinci_mdio: mdio@800 {
+ compatible = "ti,cpsw-mdio", "ti,davinci_mdio";
+ clocks = <&alwon_ethernet_clkctrl DM814_ETHERNET_CPGMAC0_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus_freq = <1000000>;
+ reg = <0x800 0x100>;
+ };
- };
+ cpsw_emac0: slave@200 {
+ /* Filled in by U-Boot */
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 1>;
+ };
- cpsw_emac1: slave@4a100300 {
- /* Filled in by U-Boot */
- mac-address = [ 00 00 00 00 00 00 ];
- phys = <&phy_gmii_sel 2>;
+ cpsw_emac1: slave@300 {
+ /* Filled in by U-Boot */
+ mac-address = [ 00 00 00 00 00 00 ];
+ phys = <&phy_gmii_sel 2>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 1edc2b48b254..2a4934b60ded 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -4,6 +4,8 @@
* kind, whether express or implied.
*/
+#include <dt-bindings/bus/ti-sysc.h>
+#include <dt-bindings/clock/dm816.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/omap.h>
@@ -138,13 +140,123 @@
};
};
- edma: edma@49000000 {
- compatible = "ti,edma3";
- ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2", "tptc3";
- reg = <0x49000000 0x10000>,
- <0x44e10f90 0x40>;
- interrupts = <12 13 14>;
- #dma-cells = <1>;
+ target-module@49000000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49000000 0x4>;
+ reg-names = "rev";
+ clocks = <&alwon_clkctrl DM816_TPCC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49000000 0x10000>;
+
+ edma: dma@0 {
+ compatible = "ti,edma3-tpcc";
+ reg = <0 0x10000>;
+ reg-names = "edma3_cc";
+ interrupts = <12 13 14>;
+ interrupt-names = "edma3_ccint", "edma3_mperr",
+ "edma3_ccerrint";
+ dma-requests = <64>;
+ #dma-cells = <2>;
+
+ ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
+ <&edma_tptc2 3>, <&edma_tptc3 0>;
+
+ ti,edma-memcpy-channels = <20 21>;
+ };
+ };
+
+ target-module@49800000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49800000 0x4>,
+ <0x49800010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&alwon_clkctrl DM816_TPTC0_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49800000 0x100000>;
+
+ edma_tptc0: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <112>;
+ interrupt-names = "edma3_tcerrint";
+ };
+ };
+
+ target-module@49900000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49900000 0x4>,
+ <0x49900010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&alwon_clkctrl DM816_TPTC1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49900000 0x100000>;
+
+ edma_tptc1: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <113>;
+ interrupt-names = "edma3_tcerrint";
+ };
+ };
+
+ target-module@49a00000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49a00000 0x4>,
+ <0x49a00010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&alwon_clkctrl DM816_TPTC2_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49a00000 0x100000>;
+
+ edma_tptc2: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <114>;
+ interrupt-names = "edma3_tcerrint";
+ };
+ };
+
+ target-module@49b00000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x49b00000 0x4>,
+ <0x49b00010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_SMART>;
+ clocks = <&alwon_clkctrl DM816_TPTC3_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49b00000 0x100000>;
+
+ edma_tptc3: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <115>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
elm: elm@48080000 {
@@ -185,7 +297,7 @@
#address-cells = <2>;
#size-cells = <1>;
interrupts = <100>;
- dmas = <&edma 52>;
+ dmas = <&edma 52 0>;
dma-names = "rxtx";
gpmc,num-cs = <6>;
gpmc,num-waitpins = <2>;
@@ -202,7 +314,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <70>;
- dmas = <&edma 58 &edma 59>;
+ dmas = <&edma 58 0 &edma 59 0>;
dma-names = "tx", "rx";
};
@@ -213,7 +325,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <71>;
- dmas = <&edma 60 &edma 61>;
+ dmas = <&edma 60 0 &edma 61 0>;
dma-names = "tx", "rx";
};
@@ -311,10 +423,10 @@
interrupts = <65>;
ti,spi-num-cs = <4>;
ti,hwmods = "mcspi1";
- dmas = <&edma 16 &edma 17
- &edma 18 &edma 19
- &edma 20 &edma 21
- &edma 22 &edma 23>;
+ dmas = <&edma 16 0 &edma 17 0
+ &edma 18 0 &edma 19 0
+ &edma 20 0 &edma 21 0
+ &edma 22 0 &edma 23 0>;
dma-names = "tx0", "rx0", "tx1", "rx1",
"tx2", "rx2", "tx3", "rx3";
};
@@ -324,7 +436,7 @@
reg = <0x48060000 0x11000>;
ti,hwmods = "mmc1";
interrupts = <64>;
- dmas = <&edma 24 &edma 25>;
+ dmas = <&edma 24 0 &edma 25 0>;
dma-names = "tx", "rx";
};
@@ -392,7 +504,7 @@
reg = <0x48020000 0x2000>;
clock-frequency = <48000000>;
interrupts = <72>;
- dmas = <&edma 26 &edma 27>;
+ dmas = <&edma 26 0 &edma 27 0>;
dma-names = "tx", "rx";
};
@@ -402,7 +514,7 @@
reg = <0x48022000 0x2000>;
clock-frequency = <48000000>;
interrupts = <73>;
- dmas = <&edma 28 &edma 29>;
+ dmas = <&edma 28 0 &edma 29 0>;
dma-names = "tx", "rx";
};
@@ -412,7 +524,7 @@
reg = <0x48024000 0x2000>;
clock-frequency = <48000000>;
interrupts = <74>;
- dmas = <&edma 30 &edma 31>;
+ dmas = <&edma 30 0 &edma 31 0>;
dma-names = "tx", "rx";
};
diff --git a/arch/arm/boot/dts/dra62x.dtsi b/arch/arm/boot/dts/dra62x.dtsi
index d3cbb4ea35a8..cc4878aaa8ea 100644
--- a/arch/arm/boot/dts/dra62x.dtsi
+++ b/arch/arm/boot/dts/dra62x.dtsi
@@ -12,12 +12,12 @@
/* Compared to dm814x, dra62x has different offsets for Ethernet */
&mac {
- reg = <0x4a100000 0x800
- 0x4a101200 0x100>;
+ reg = <0 0x800>,
+ <0x1200 0x100>;
};
&davinci_mdio {
- reg = <0x4a101000 0x100>;
+ reg = <0x1000 0x100>;
};
#include "dra62x-clocks.dtsi"
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 5f5ee16f07a3..4740989ed9c4 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -143,7 +143,7 @@
* the moment, just use a fake OCP bus entry to represent the whole bus
* hierarchy.
*/
- ocp {
+ ocp: ocp {
compatible = "ti,dra7-l3-noc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -334,43 +334,73 @@
#pinctrl-cells = <2>;
};
- edma: edma@43300000 {
- compatible = "ti,edma3-tpcc";
- ti,hwmods = "tpcc";
- reg = <0x43300000 0x100000>;
- reg-names = "edma3_cc";
- interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_ccint", "edma3_mperr",
- "edma3_ccerrint";
- dma-requests = <64>;
- #dma-cells = <2>;
+ target-module@43300000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x43300000 0x4>;
+ reg-names = "rev";
+ clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPCC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x43300000 0x100000>;
+
+ edma: dma@0 {
+ compatible = "ti,edma3-tpcc";
+ reg = <0 0x100000>;
+ reg-names = "edma3_cc";
+ interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma3_ccint", "edma3_mperr",
+ "edma3_ccerrint";
+ dma-requests = <64>;
+ #dma-cells = <2>;
+
+ ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 0>;
+
+ /*
+ * memcpy is disabled, can be enabled with:
+ * ti,edma-memcpy-channels = <20 21>;
+ * for example. Note that these channels need to be
+ * masked in the xbar as well.
+ */
+ };
+ };
- ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 0>;
+ target-module@43400000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x43400000 0x4>;
+ reg-names = "rev";
+ clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPTC0_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x43400000 0x100000>;
- /*
- * memcpy is disabled, can be enabled with:
- * ti,edma-memcpy-channels = <20 21>;
- * for example. Note that these channels need to be
- * masked in the xbar as well.
- */
+ edma_tptc0: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
- edma_tptc0: tptc@43400000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc0";
- reg = <0x43400000 0x100000>;
- interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_tcerrint";
- };
+ target-module@43500000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x43500000 0x4>;
+ reg-names = "rev";
+ clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPTC1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x43500000 0x100000>;
- edma_tptc1: tptc@43500000 {
- compatible = "ti,edma3-tptc";
- ti,hwmods = "tptc1";
- reg = <0x43500000 0x100000>;
- interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_tcerrint";
+ edma_tptc1: dma@0 {
+ compatible = "ti,edma3-tptc";
+ reg = <0 0x100000>;
+ interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma3_tcerrint";
+ };
};
dmm@4e000000 {
@@ -708,44 +738,99 @@
ti,irqs-safe-map = <0>;
};
- dss: dss@58000000 {
- compatible = "ti,dra7-dss";
- /* 'reg' defined in dra72x.dtsi and dra74x.dtsi */
- /* 'clocks' defined in dra72x.dtsi and dra74x.dtsi */
- status = "disabled";
- ti,hwmods = "dss_core";
- /* CTRL_CORE_DSS_PLL_CONTROL */
- syscon-pll-ctrl = <&scm_conf 0x538>;
+ target-module@58000000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x58000000 4>,
+ <0x58000014 4>;
+ reg-names = "rev", "syss";
+ ti,syss-mask = <1>;
+ clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 0>,
+ <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 10>,
+ <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 11>;
+ clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk";
#address-cells = <1>;
#size-cells = <1>;
- ranges;
-
- dispc@58001000 {
- compatible = "ti,dra7-dispc";
- reg = <0x58001000 0x1000>;
- interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "dss_dispc";
- clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
- /* CTRL_CORE_SMA_SW_1 */
- syscon-pol = <&scm_conf 0x534>;
- };
+ ranges = <0 0x58000000 0x800000>;
- hdmi: encoder@58060000 {
- compatible = "ti,dra7-hdmi";
- reg = <0x58040000 0x200>,
- <0x58040200 0x80>,
- <0x58040300 0x80>,
- <0x58060000 0x19000>;
- reg-names = "wp", "pll", "phy", "core";
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ dss: dss@0 {
+ compatible = "ti,dra7-dss";
+ /* 'reg' defined in dra72x.dtsi and dra74x.dtsi */
+ /* 'clocks' defined in dra72x.dtsi and dra74x.dtsi */
status = "disabled";
- ti,hwmods = "dss_hdmi";
- clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
- <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 10>;
- clock-names = "fck", "sys_clk";
- dmas = <&sdma_xbar 76>;
- dma-names = "audio_tx";
+ /* CTRL_CORE_DSS_PLL_CONTROL */
+ syscon-pll-ctrl = <&scm_conf 0x538>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x800000>;
+
+ target-module@1000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x1000 0x4>,
+ <0x1010 0x4>,
+ <0x1014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x1000 0x1000>;
+
+ dispc@0 {
+ compatible = "ti,dra7-dispc";
+ reg = <0 0x1000>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck";
+ /* CTRL_CORE_SMA_SW_1 */
+ syscon-pol = <&scm_conf 0x534>;
+ };
+ };
+
+ target-module@40000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x40000 0x4>,
+ <0x40010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>;
+ clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck", "dss_clk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x40000 0x40000>;
+
+ hdmi: encoder@0 {
+ compatible = "ti,dra7-hdmi";
+ reg = <0 0x200>,
+ <0x200 0x80>,
+ <0x300 0x80>,
+ <0x20000 0x19000>;
+ reg-names = "wp", "pll", "phy", "core";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
+ dmas = <&sdma_xbar 76>;
+ dma-names = "audio_tx";
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
index 82b57a35abc0..da334489b18f 100644
--- a/arch/arm/boot/dts/dra72x.dtsi
+++ b/arch/arm/boot/dts/dra72x.dtsi
@@ -60,9 +60,9 @@
};
&dss {
- reg = <0x58000000 0x80>,
- <0x58004054 0x4>,
- <0x58004300 0x20>;
+ reg = <0 0x80>,
+ <0x4054 0x4>,
+ <0x4300 0x20>;
reg-names = "dss", "pll1_clkctrl", "pll1";
clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>,
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index c5abc436ca1f..7b1c61298253 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -132,11 +132,11 @@
};
&dss {
- reg = <0x58000000 0x80>,
- <0x58004054 0x4>,
- <0x58004300 0x20>,
- <0x58009054 0x4>,
- <0x58009300 0x20>;
+ reg = <0 0x80>,
+ <0x4054 0x4>,
+ <0x4300 0x20>,
+ <0x9054 0x4>,
+ <0x9300 0x20>;
reg-names = "dss", "pll1_clkctrl", "pll1",
"pll2_clkctrl", "pll2";
diff --git a/arch/arm/boot/dts/ecx-2000.dts b/arch/arm/boot/dts/ecx-2000.dts
index 5651ae6dc969..f6eb71553b95 100644
--- a/arch/arm/boot/dts/ecx-2000.dts
+++ b/arch/arm/boot/dts/ecx-2000.dts
@@ -13,7 +13,6 @@
compatible = "calxeda,ecx-2000";
#address-cells = <2>;
#size-cells = <2>;
- clock-ranges;
cpus {
#address-cells = <1>;
@@ -83,8 +82,7 @@
intc: interrupt-controller@fff11000 {
compatible = "arm,cortex-a15-gic";
#interrupt-cells = <3>;
- #size-cells = <0>;
- #address-cells = <1>;
+ #address-cells = <0>;
interrupt-controller;
interrupts = <1 9 0xf04>;
reg = <0xfff11000 0x1000>,
@@ -95,7 +93,7 @@
pmu {
compatible = "arm,cortex-a9-pmu";
- interrupts = <0 76 4 0 75 4 0 74 4 0 73 4>;
+ interrupts = <0 76 4>, <0 75 4>, <0 74 4>, <0 73 4>;
};
};
};
diff --git a/arch/arm/boot/dts/ecx-common.dtsi b/arch/arm/boot/dts/ecx-common.dtsi
index 66ee1d34f72b..57a028a69373 100644
--- a/arch/arm/boot/dts/ecx-common.dtsi
+++ b/arch/arm/boot/dts/ecx-common.dtsi
@@ -27,10 +27,11 @@
reg = <0xffe08000 0x10000>;
interrupts = <0 83 4>;
dma-coherent;
- calxeda,port-phys = <&combophy5 0 &combophy0 0
- &combophy0 1 &combophy0 2
- &combophy0 3>;
- calxeda,sgpio-gpio =<&gpioh 5 1 &gpioh 6 1 &gpioh 7 1>;
+ calxeda,port-phys = < &combophy5 0>, <&combophy0 0>,
+ <&combophy0 1>, <&combophy0 2>,
+ <&combophy0 3>;
+ calxeda,sgpio-gpio =<&gpioh 5 1>, <&gpioh 6 1>,
+ <&gpioh 7 1>;
calxeda,led-order = <4 0 1 2 3>;
};
@@ -114,8 +115,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0xfff36000 0x1000>;
interrupts = <0 20 4>;
- clocks = <&pclk>;
- clock-names = "apb_pclk";
+ clocks = <&pclk>, <&pclk>;
+ clock-names = "uartclk", "apb_pclk";
};
smic@fff3a000 {
@@ -202,14 +203,14 @@
ethernet@fff50000 {
compatible = "calxeda,hb-xgmac";
reg = <0xfff50000 0x1000>;
- interrupts = <0 77 4 0 78 4 0 79 4>;
+ interrupts = <0 77 4>, <0 78 4>, <0 79 4>;
dma-coherent;
};
ethernet@fff51000 {
compatible = "calxeda,hb-xgmac";
reg = <0xfff51000 0x1000>;
- interrupts = <0 80 4 0 81 4 0 82 4>;
+ interrupts = <0 80 4>, <0 81 4>, <0 82 4>;
dma-coherent;
};
diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
index dee35e3a5c4b..b27a82072365 100644
--- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
+++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
@@ -23,7 +23,7 @@
memory@40000000 {
device_type = "memory";
- reg = <0x40000000 0x1ff00000>;
+ reg = <0x40000000 0x1f800000>;
};
firmware@205f000 {
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index a1bdf7830a87..9dda6bdb9253 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -115,7 +115,7 @@
gpio-sck = <&gpy3 1 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpy3 3 GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
- cs-gpios = <&gpy4 3 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpy4 3 GPIO_ACTIVE_LOW>;
lcd@0 {
compatible = "samsung,ld9040";
@@ -124,8 +124,6 @@
vci-supply = <&ldo17_reg>;
reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>;
spi-max-frequency = <1200000>;
- spi-cpol;
- spi-cpha;
power-on-delay = <10>;
reset-delay = <10>;
panel-width-mm = <90>;
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 9c39e82e4ecb..73d6a71da88d 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -165,6 +165,15 @@
cpu0-supply = <&buck2_reg>;
};
+&cpu0_opp_table {
+ opp-1000000000 {
+ opp-suspend;
+ };
+ opp-800000000 {
+ /delete-property/opp-suspend;
+ };
+};
+
&pinctrl_1 {
gpio_power_key: power_key {
samsung,pins = "gpx1-3";
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index f8ebc620f42d..6904091d4837 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -93,22 +93,23 @@
compatible = "regulator-fixed";
reg = <0>;
regulator-name = "MAIN_DC";
+ regulator-always-on;
};
mmc_reg: regulator@1 {
compatible = "regulator-fixed";
reg = <1>;
- regulator-name = "VDD_33ON_2.8V";
+ regulator-name = "VDD_MMC";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
- gpio = <&gpx1 1 GPIO_ACTIVE_HIGH>;
- enable-active-high;
+ regulator-always-on;
};
reg_hdmi_en: regulator@2 {
compatible = "regulator-fixed";
reg = <2>;
regulator-name = "hdmi-en";
+ regulator-always-on;
};
vcc_1v2_reg: regulator@3 {
@@ -117,6 +118,7 @@
regulator-name = "VCC_1V2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
+ regulator-always-on;
};
vcc_1v8_reg: regulator@4 {
@@ -125,6 +127,7 @@
regulator-name = "VCC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
vcc_3v3_reg: regulator@5 {
@@ -133,6 +136,7 @@
regulator-name = "VCC_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-always-on;
};
};
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index ee28d30f5476..e9a09dd0a49b 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -584,6 +584,7 @@
regulator-name = "PVDD_G3DS_1V0";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1100000>;
+ regulator-always-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -697,6 +698,7 @@
regulator-name = "PVDD_G3D_1V0";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1400000>;
+ regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
diff --git a/arch/arm/boot/dts/exynos5422-cpus.dtsi b/arch/arm/boot/dts/exynos5422-cpus.dtsi
index 1b8605cf2407..4b641b9b8179 100644
--- a/arch/arm/boot/dts/exynos5422-cpus.dtsi
+++ b/arch/arm/boot/dts/exynos5422-cpus.dtsi
@@ -31,6 +31,7 @@
operating-points-v2 = <&cluster_a7_opp_table>;
#cooling-cells = <2>; /* min followed by max */
capacity-dmips-mhz = <539>;
+ dynamic-power-coefficient = <90>;
};
cpu1: cpu@101 {
@@ -43,6 +44,7 @@
operating-points-v2 = <&cluster_a7_opp_table>;
#cooling-cells = <2>; /* min followed by max */
capacity-dmips-mhz = <539>;
+ dynamic-power-coefficient = <90>;
};
cpu2: cpu@102 {
@@ -55,6 +57,7 @@
operating-points-v2 = <&cluster_a7_opp_table>;
#cooling-cells = <2>; /* min followed by max */
capacity-dmips-mhz = <539>;
+ dynamic-power-coefficient = <90>;
};
cpu3: cpu@103 {
@@ -67,6 +70,7 @@
operating-points-v2 = <&cluster_a7_opp_table>;
#cooling-cells = <2>; /* min followed by max */
capacity-dmips-mhz = <539>;
+ dynamic-power-coefficient = <90>;
};
cpu4: cpu@0 {
@@ -79,6 +83,7 @@
operating-points-v2 = <&cluster_a15_opp_table>;
#cooling-cells = <2>; /* min followed by max */
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <310>;
};
cpu5: cpu@1 {
@@ -91,6 +96,7 @@
operating-points-v2 = <&cluster_a15_opp_table>;
#cooling-cells = <2>; /* min followed by max */
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <310>;
};
cpu6: cpu@2 {
@@ -103,6 +109,7 @@
operating-points-v2 = <&cluster_a15_opp_table>;
#cooling-cells = <2>; /* min followed by max */
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <310>;
};
cpu7: cpu@3 {
@@ -115,6 +122,7 @@
operating-points-v2 = <&cluster_a15_opp_table>;
#cooling-cells = <2>; /* min followed by max */
capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <310>;
};
};
};
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index 5cf1aed20490..ab27ff8bc3dc 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -901,6 +901,7 @@
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1400000>;
regulator-boot-on;
+ regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
index f163206265bb..812659260278 100644
--- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
@@ -215,6 +215,36 @@
};
};
};
+ gpu_thermal: gpu-thermal {
+ thermal-sensors = <&tmu_gpu 0>;
+ trips {
+ gpu_alert0: gpu-alert-0 {
+ temperature = <70000>;
+ hysteresis = <10000>;
+ type = "active";
+ };
+ gpu_alert1: gpu-alert-1 {
+ temperature = <85000>;
+ hysteresis = <10000>;
+ type = "active";
+ };
+ gpu_crit0: gpu-crit-0 {
+ temperature = <120000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert0>;
+ cooling-device = <&gpu 0 2>;
+ };
+ map1 {
+ trip = <&gpu_alert1>;
+ cooling-device = <&gpu 3 6>;
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 1865a708b49f..5da2d81e3be2 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -357,6 +357,65 @@
};
};
};
+ gpu_thermal: gpu-thermal {
+ thermal-sensors = <&tmu_gpu 0>;
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+ trips {
+ gpu_alert0: gpu-alert-0 {
+ temperature = <50000>;
+ hysteresis = <5000>;
+ type = "active";
+ };
+ gpu_alert1: gpu-alert-1 {
+ temperature = <60000>;
+ hysteresis = <5000>;
+ type = "active";
+ };
+ gpu_alert2: gpu-alert-2 {
+ temperature = <70000>;
+ hysteresis = <5000>;
+ type = "active";
+ };
+ gpu_crit0: gpu-crit-0 {
+ temperature = <120000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ gpu_alert3: gpu-alert-3 {
+ temperature = <70000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ gpu_alert4: gpu-alert-4 {
+ temperature = <85000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert0>;
+ cooling-device = <&fan0 0 1>;
+ };
+ map1 {
+ trip = <&gpu_alert1>;
+ cooling-device = <&fan0 1 2>;
+ };
+ map2 {
+ trip = <&gpu_alert2>;
+ cooling-device = <&fan0 2 3>;
+ };
+ map3 {
+ trip = <&gpu_alert3>;
+ cooling-device = <&gpu 0 2>;
+ };
+ map4 {
+ trip = <&gpu_alert4>;
+ cooling-device = <&gpu 3 6>;
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index e2030ba16512..cc39289e99dd 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -119,13 +119,11 @@
/*
* This is a Sunon Maglev GM0502PFV2-8 cooling fan @10000 RPM.
- * Since the platform has no temperature sensor, this is controlled
- * from userspace by using the hard disks S.M.A.R.T. temperature
* sensor. It is turned on when the temperature exceeds 46 degrees
* and turned off when the temperatures goes below 41 degrees
* (celsius).
*/
- gpio-fan {
+ fan0: gpio-fan {
compatible = "gpio-fan";
/* Collides with IDE */
gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
@@ -133,6 +131,40 @@
#cooling-cells = <2>;
};
+ thermal-zones {
+ chassis-thermal {
+ /* Poll every 20 seconds */
+ polling-delay = <20000>;
+ /* Poll every 2nd second when cooling */
+ polling-delay-passive = <2000>;
+ /* Use the thermal sensor in the hard drive */
+ thermal-sensors = <&drive0>;
+
+ /* Tripping points from the fan.script in the rootfs */
+ trips {
+ alert: chassis-alert {
+ /* At 43 degrees turn on the fan */
+ temperature = <43000>;
+ hysteresis = <3000>;
+ type = "active";
+ };
+ crit: chassis-crit {
+ /* Just shut down at 60 degrees */
+ temperature = <60000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&alert>;
+ cooling-device = <&fan0 1 1>;
+ };
+ };
+ };
+ };
+
/*
* The touchpad input is connected to a GPIO bit-banged
* I2C bus.
@@ -443,8 +475,18 @@
};
};
- ata@63000000 {
+ ide@63000000 {
status = "okay";
+
+ /*
+ * This drive may have a temperature sensor with a
+ * thermal zone we can use for thermal control of the
+ * chassis temperature using the fan.
+ */
+ drive0: ide-port@0 {
+ reg = <0>;
+ #thermal-sensor-cells = <0>;
+ };
};
display-controller@6a000000 {
diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts
index 360642a02a48..c6f3d90e3e90 100644
--- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts
@@ -297,7 +297,7 @@
};
};
- ata@63000000 {
+ ide@63000000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
index 521714f38eed..43c45f7e1e0a 100644
--- a/arch/arm/boot/dts/gemini-nas4220b.dts
+++ b/arch/arm/boot/dts/gemini-nas4220b.dts
@@ -170,11 +170,11 @@
};
};
- ata@63000000 {
+ ide@63000000 {
status = "okay";
};
- ata@63400000 {
+ ide@63400000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/gemini-sl93512r.dts b/arch/arm/boot/dts/gemini-sl93512r.dts
index a98af0351906..a0916d3c1059 100644
--- a/arch/arm/boot/dts/gemini-sl93512r.dts
+++ b/arch/arm/boot/dts/gemini-sl93512r.dts
@@ -293,11 +293,11 @@
};
};
- ata@63000000 {
+ ide@63000000 {
status = "okay";
};
- ata@63400000 {
+ ide@63400000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/gemini-sq201.dts b/arch/arm/boot/dts/gemini-sq201.dts
index 239dfacaae4d..0c6e6d35bfaa 100644
--- a/arch/arm/boot/dts/gemini-sq201.dts
+++ b/arch/arm/boot/dts/gemini-sq201.dts
@@ -289,7 +289,7 @@
};
};
- ata@63000000 {
+ ide@63000000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi
index 8cf67b11751f..065ed10a79fa 100644
--- a/arch/arm/boot/dts/gemini.dtsi
+++ b/arch/arm/boot/dts/gemini.dtsi
@@ -356,7 +356,7 @@
};
};
- ata@63000000 {
+ ide@63000000 {
compatible = "cortina,gemini-pata", "faraday,ftide010";
reg = <0x63000000 0x1000>;
interrupts = <4 IRQ_TYPE_EDGE_RISING>;
@@ -365,9 +365,11 @@
clock-names = "PCLK";
sata = <&sata>;
status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
};
- ata@63400000 {
+ ide@63400000 {
compatible = "cortina,gemini-pata", "faraday,ftide010";
reg = <0x63400000 0x1000>;
interrupts = <5 IRQ_TYPE_EDGE_RISING>;
@@ -376,6 +378,8 @@
clock-names = "PCLK";
sata = <&sata>;
status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
};
dma-controller@67000000 {
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index f4e4dca6f7e7..b6b0225a769e 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -13,7 +13,6 @@
compatible = "calxeda,highbank";
#address-cells = <1>;
#size-cells = <1>;
- clock-ranges;
cpus {
#address-cells = <1>;
@@ -96,7 +95,7 @@
};
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x00000000 0xff900000>;
@@ -128,14 +127,12 @@
intc: interrupt-controller@fff11000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
- #size-cells = <0>;
- #address-cells = <1>;
interrupt-controller;
reg = <0xfff11000 0x1000>,
<0xfff10100 0x100>;
};
- L2: l2-cache {
+ L2: cache-controller {
compatible = "arm,pl310-cache";
reg = <0xfff12000 0x1000>;
interrupts = <0 70 4>;
@@ -145,14 +142,14 @@
pmu {
compatible = "arm,cortex-a9-pmu";
- interrupts = <0 76 4 0 75 4 0 74 4 0 73 4>;
+ interrupts = <0 76 4>, <0 75 4>, <0 74 4>, <0 73 4>;
};
sregs@fff3c200 {
compatible = "calxeda,hb-sregs-l2-ecc";
reg = <0xfff3c200 0x100>;
- interrupts = <0 71 4 0 72 4>;
+ interrupts = <0 71 4>, <0 72 4>;
};
};
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
index 4c9aafe00b5d..0729e72f2283 100644
--- a/arch/arm/boot/dts/imx23-olinuxino.dts
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
@@ -23,7 +23,7 @@
ssp0: spi@80010000 {
compatible = "fsl,imx23-mmc";
pinctrl-names = "default";
- pinctrl-0 = <&mmc0_4bit_pins_a &mmc0_pins_fixup>;
+ pinctrl-0 = <&mmc0_4bit_pins_a &mmc0_sck_cfg>;
bus-width = <4>;
broken-cd;
status = "okay";
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 8257630f7a49..c5edff381213 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -267,6 +267,14 @@
fsl,pull-up = <MXS_PULL_DISABLE>;
};
+ mmc0_sck_cfg: mmc0-sck-cfg@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX23_PAD_SSP1_SCK__SSP1_SCK
+ >;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
mmc1_4bit_pins_a: mmc1-4bit@0 {
reg = <0>;
fsl,pinmux-ids = <
@@ -422,7 +430,7 @@
clocks = <&clks 16>;
};
- dcp@80028000 {
+ dcp: crypto@80028000 {
compatible = "fsl,imx23-dcp";
reg = <0x80028000 0x2000>;
interrupts = <53 54>;
diff --git a/arch/arm/boot/dts/imx25-pinfunc.h b/arch/arm/boot/dts/imx25-pinfunc.h
index f4516ccf2c1a..111bfdcbe552 100644
--- a/arch/arm/boot/dts/imx25-pinfunc.h
+++ b/arch/arm/boot/dts/imx25-pinfunc.h
@@ -82,6 +82,7 @@
#define MX25_PAD_EB0__EB0 0x040 0x258 0x000 0x00 0x000
#define MX25_PAD_EB0__AUD4_TXD 0x040 0x258 0x464 0x04 0x000
#define MX25_PAD_EB0__GPIO_2_12 0x040 0x258 0x000 0x05 0x000
+#define MX25_PAD_EB0__CSPI3_SS0 0x040 0x258 0x4bc 0x06 0x000
#define MX25_PAD_EB1__EB1 0x044 0x25c 0x000 0x00 0x000
#define MX25_PAD_EB1__AUD4_RXD 0x044 0x25c 0x460 0x04 0x000
@@ -102,11 +103,13 @@
#define MX25_PAD_CS4__NF_CE1 0x054 0x264 0x000 0x01 0x000
#define MX25_PAD_CS4__UART5_CTS 0x054 0x264 0x000 0x03 0x000
#define MX25_PAD_CS4__GPIO_3_20 0x054 0x264 0x000 0x05 0x000
+#define MX25_PAD_CS4__CSPI3_MOSI 0x054 0x264 0x4b8 0x06 0x000
#define MX25_PAD_CS5__CS5 0x058 0x268 0x000 0x00 0x000
#define MX25_PAD_CS5__NF_CE2 0x058 0x268 0x000 0x01 0x000
#define MX25_PAD_CS5__UART5_RTS 0x058 0x268 0x574 0x03 0x000
#define MX25_PAD_CS5__GPIO_3_21 0x058 0x268 0x000 0x05 0x000
+#define MX25_PAD_CS5__CSPI3_MISO 0x058 0x268 0x4b4 0x06 0x000
#define MX25_PAD_NF_CE0__NF_CE0 0x05c 0x26c 0x000 0x00 0x000
#define MX25_PAD_NF_CE0__GPIO_3_22 0x05c 0x26c 0x000 0x05 0x000
@@ -114,6 +117,7 @@
#define MX25_PAD_ECB__ECB 0x060 0x270 0x000 0x00 0x000
#define MX25_PAD_ECB__UART5_TXD 0x060 0x270 0x000 0x03 0x000
#define MX25_PAD_ECB__GPIO_3_23 0x060 0x270 0x000 0x05 0x000
+#define MX25_PAD_ECB__CSPI3_SCLK 0x060 0x270 0x4ac 0x06 0x000
#define MX25_PAD_LBA__LBA 0x064 0x274 0x000 0x00 0x000
#define MX25_PAD_LBA__UART5_RXD 0x064 0x274 0x578 0x03 0x000
@@ -251,10 +255,12 @@
#define MX25_PAD_LD12__LD12 0x0f8 0x2f0 0x000 0x00 0x000
#define MX25_PAD_LD12__CSPI2_MOSI 0x0f8 0x2f0 0x4a0 0x02 0x000
+#define MX25_PAD_LD12__KPP_ROW6 0x0f8 0x2f0 0x544 0x04 0x000
#define MX25_PAD_LD12__FEC_RDATA3 0x0f8 0x2f0 0x510 0x05 0x001
#define MX25_PAD_LD13__LD13 0x0fc 0x2f4 0x000 0x00 0x000
#define MX25_PAD_LD13__CSPI2_MISO 0x0fc 0x2f4 0x49c 0x02 0x000
+#define MX25_PAD_LD13__KPP_ROW7 0x0fc 0x2f4 0x548 0x04 0x000
#define MX25_PAD_LD13__FEC_TDATA2 0x0fc 0x2f4 0x000 0x05 0x000
#define MX25_PAD_LD14__LD14 0x100 0x2f8 0x000 0x00 0x000
@@ -512,9 +518,11 @@
#define MX25_PAD_FEC_TX_EN__FEC_TX_EN 0x1d8 0x3d0 0x000 0x00 0x000
#define MX25_PAD_FEC_TX_EN__GPIO_3_9 0x1d8 0x3d0 0x000 0x05 0x000
+#define MX25_PAD_FEC_TX_EN__KPP_ROW4 0x1d8 0x3d0 0x53c 0x06 0x000
#define MX25_PAD_FEC_RDATA0__FEC_RDATA0 0x1dc 0x3d4 0x000 0x00 0x000
#define MX25_PAD_FEC_RDATA0__GPIO_3_10 0x1dc 0x3d4 0x000 0x05 0x000
+#define MX25_PAD_FEC_RDATA0__KPP_ROW5 0x1dc 0x3d4 0x540 0x06 0x000
#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 0x1e0 0x3d8 0x000 0x00 0x000
/*
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 40b95a290bd6..1123e683025c 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -75,7 +75,7 @@
interrupt-parent = <&asic>;
ranges;
- aips@43f00000 { /* AIPS1 */
+ bus@43f00000 { /* AIPS1 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -332,7 +332,7 @@
};
};
- aips@53f00000 { /* AIPS2 */
+ bus@53f00000 { /* AIPS2 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index f3464cf52e49..002cd223f22d 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -525,7 +525,7 @@
reg = <0x10024600 0x200>;
};
- sahara2: sahara@10025000 {
+ sahara2: crypto@10025000 {
compatible = "fsl,imx27-sahara";
reg = <0x10025000 0x1000>;
interrupts = <59>;
diff --git a/arch/arm/boot/dts/imx28-apx4devkit.dts b/arch/arm/boot/dts/imx28-apx4devkit.dts
index 3a184d13887b..c5acc19c982d 100644
--- a/arch/arm/boot/dts/imx28-apx4devkit.dts
+++ b/arch/arm/boot/dts/imx28-apx4devkit.dts
@@ -183,10 +183,20 @@
pinctrl-0 = <&auart2_2pins_a>;
status = "okay";
};
+
+ usbphy1: usbphy@8007e000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_pins_a>;
+ status = "okay";
+ };
};
};
ahb@80080000 {
+ usb1: usb@80090000 {
+ status = "okay";
+ };
+
mac0: ethernet@800f0000 {
phy-mode = "rmii";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index e14d8ef0158b..a1cbbeb39a4f 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -998,7 +998,7 @@
clocks = <&clks 26>;
};
- dcp: dcp@80028000 {
+ dcp: crypto@80028000 {
compatible = "fsl,imx28-dcp", "fsl,imx23-dcp";
reg = <0x80028000 0x2000>;
interrupts = <52 53 54>;
diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
index 6b62f0745b82..18270ec648fe 100644
--- a/arch/arm/boot/dts/imx31.dtsi
+++ b/arch/arm/boot/dts/imx31.dtsi
@@ -63,7 +63,7 @@
ranges = <0 0x1fffc000 0x4000>;
};
- aips@43f00000 { /* AIPS1 */
+ bus@43f00000 { /* AIPS1 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -225,7 +225,7 @@
};
};
- aips@53f00000 { /* AIPS2 */
+ bus@53f00000 { /* AIPS2 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 9cbdc1a15cda..2ebf2c1fa682 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -66,7 +66,7 @@
cache-level = <2>;
};
- aips1: aips@43f00000 {
+ aips1: bus@43f00000 {
compatible = "fsl,aips", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -199,7 +199,7 @@
};
};
- aips2: aips@53f00000 {
+ aips2: bus@53f00000 {
compatible = "fsl,aips", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index 0bfe7c91d0eb..d325658901c5 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -101,7 +101,7 @@
interrupt-parent = <&tzic>;
ranges;
- aips@50000000 { /* AIPS1 */
+ bus@50000000 { /* AIPS1 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -389,7 +389,7 @@
};
};
- aips@60000000 { /* AIPS2 */
+ bus@60000000 { /* AIPS2 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index 3596060f52e7..e559ab0c3645 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -217,8 +217,8 @@
simple-audio-card,widgets =
"Headphone", "Headphone Jack";
simple-audio-card,routing =
- "Headphone Jack", "HPLEFT",
- "Headphone Jack", "HPRIGHT";
+ "Headphone Jack", "TPA6130A2 HPLEFT",
+ "Headphone Jack", "TPA6130A2 HPRIGHT";
simple-audio-card,aux-devs = <&hpa1>;
sound_cpu: simple-audio-card,cpu {
@@ -470,6 +470,7 @@
compatible = "ti,tpa6130a2";
reg = <0x60>;
Vdd-supply = <&reg_3p3v>;
+ sound-name-prefix = "TPA6130A2";
};
ds1341: rtc@68 {
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index dea86b98e9c3..92fbb90bec57 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -104,6 +104,11 @@
#phy-cells = <0>;
};
+ capture-subsystem {
+ compatible = "fsl,imx-capture-subsystem";
+ ports = <&ipu_csi0>, <&ipu_csi1>;
+ };
+
display-subsystem {
compatible = "fsl,imx-display-subsystem";
ports = <&ipu_di0>, <&ipu_di1>;
@@ -143,6 +148,14 @@
clock-names = "bus", "di0", "di1";
resets = <&src 2>;
+ ipu_csi0: port@0 {
+ reg = <0>;
+ };
+
+ ipu_csi1: port@1 {
+ reg = <1>;
+ };
+
ipu_di0: port@2 {
reg = <2>;
@@ -158,7 +171,7 @@
};
};
- aips@70000000 { /* AIPS1 */
+ bus@70000000 { /* AIPS1 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -440,7 +453,7 @@
};
};
- aips@80000000 { /* AIPS2 */
+ bus@80000000 { /* AIPS2 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index ed341cfd9d09..8536f59f59e6 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -222,7 +222,7 @@
clock-names = "core_clk", "mem_iface_clk";
};
- aips@50000000 { /* AIPS1 */
+ bus@50000000 { /* AIPS1 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -654,7 +654,7 @@
};
};
- aips@60000000 { /* AIPS2 */
+ bus@60000000 { /* AIPS2 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
index 84fcc203a2e4..65359aece950 100644
--- a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
@@ -1,44 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014-2016 Toradex AG
+ * Copyright 2014-2020 Toradex
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6dl-pico-dwarf.dts b/arch/arm/boot/dts/imx6dl-pico-dwarf.dts
new file mode 100644
index 000000000000..659a8e8714ea
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-pico-dwarf.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-pico-pi.dtsi"
+
+/ {
+ model = "TechNexion PICO-IMX6 DualLite/Solo Board and Dwarf baseboard";
+ compatible = "technexion,imx6dl-pico", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-pico-hobbit.dts b/arch/arm/boot/dts/imx6dl-pico-hobbit.dts
new file mode 100644
index 000000000000..d7403c5c4337
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-pico-hobbit.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-pico-hobbit.dtsi"
+
+/ {
+ model = "TechNexion PICO-IMX6 DualLite/Solo Board and Hobbit baseboard";
+ compatible = "technexion,imx6dl-pico", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-pico-nymph.dts b/arch/arm/boot/dts/imx6dl-pico-nymph.dts
new file mode 100644
index 000000000000..b282dbf953aa
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-pico-nymph.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-pico-pi.dtsi"
+
+/ {
+ model = "TechNexion PICO-IMX6 DualLite/Solo Board and Nymph baseboard";
+ compatible = "technexion,imx6dl-pico", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-pico-pi.dts b/arch/arm/boot/dts/imx6dl-pico-pi.dts
new file mode 100644
index 000000000000..b7b1c07f96f3
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-pico-pi.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-pico-pi.dtsi"
+
+/ {
+ model = "TechNexion PICO-IMX6 DualLite/Solo Board and PI baseboard";
+ compatible = "technexion,imx6dl-pico", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 829654e1835a..065d3ab0f50a 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -89,11 +89,25 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii-id";
- phy-reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
+ phy-handle = <&rgmii_phy>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
fsl,err006687-workaround-present;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Atheros AR8035 PHY */
+ rgmii_phy: ethernet-phy@4 {
+ reg = <4>;
+ interrupts-extended = <&gpio1 28 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <1000>;
+ };
+ };
};
&gpio1 {
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index 80ed5f16a76e..2b9423d55c37 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -562,6 +562,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbh1>;
vbus-supply = <&reg_usb_h1_vbus>;
+ over-current-active-low;
status = "disabled";
};
@@ -569,6 +570,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotg>;
vbus-supply = <&reg_usb_otg_vbus>;
+ over-current-active-low;
srp-disable;
hnp-disable;
adp-disable;
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 008312ee0c31..77b65a402e19 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -44,6 +44,8 @@
arm-supply = <&reg_arm>;
pu-supply = <&reg_pu>;
soc-supply = <&reg_soc>;
+ nvmem-cells = <&cpu_speed_grade>;
+ nvmem-cell-names = "speed_grade";
};
cpu@1 {
@@ -85,8 +87,8 @@
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
- aips1: aips-bus@2000000 {
- iomuxc: iomuxc@20e0000 {
+ aips1: bus@2000000 {
+ iomuxc: pinctrl@20e0000 {
compatible = "fsl,imx6dl-iomuxc";
};
@@ -101,7 +103,7 @@
};
};
- aips2: aips-bus@2100000 {
+ aips2: bus@2100000 {
i2c4: i2c@21f8000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6q-apalis-eval.dts b/arch/arm/boot/dts/imx6q-apalis-eval.dts
index 4665e15b196d..fab83abb6466 100644
--- a/arch/arm/boot/dts/imx6q-apalis-eval.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-eval.dts
@@ -1,44 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014-2017 Toradex AG
+ * Copyright 2014-2020 Toradex
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts
index a3fa04a97d81..1614b1ae501d 100644
--- a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts
@@ -1,44 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014-2017 Toradex AG
+ * Copyright 2014-2020 Toradex
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora.dts b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
index 5ba49d0f4880..fa9f98dd15ac 100644
--- a/arch/arm/boot/dts/imx6q-apalis-ixora.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
@@ -1,44 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014-2017 Toradex AG
+ * Copyright 2014-2020 Toradex
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts
index 84b30bd6908f..05ee28388229 100644
--- a/arch/arm/boot/dts/imx6q-marsboard.dts
+++ b/arch/arm/boot/dts/imx6q-marsboard.dts
@@ -111,8 +111,22 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii-id";
- phy-reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
+ phy-handle = <&rgmii_phy>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Atheros AR8035 PHY */
+ rgmii_phy: ethernet-phy@4 {
+ reg = <4>;
+ interrupts-extended = <&gpio1 28 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <1000>;
+ };
+ };
};
&hdmi {
diff --git a/arch/arm/boot/dts/imx6q-novena.dts b/arch/arm/boot/dts/imx6q-novena.dts
index 61347a545d6c..69f170ff31c5 100644
--- a/arch/arm/boot/dts/imx6q-novena.dts
+++ b/arch/arm/boot/dts/imx6q-novena.dts
@@ -107,7 +107,7 @@
};
panel: panel {
- compatible = "innolux,n133hse-ea1", "simple-panel";
+ compatible = "innolux,n133hse-ea1";
backlight = <&backlight>;
};
diff --git a/arch/arm/boot/dts/imx6q-pico-dwarf.dts b/arch/arm/boot/dts/imx6q-pico-dwarf.dts
new file mode 100644
index 000000000000..618d2743e1e9
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-pico-dwarf.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-pico-pi.dtsi"
+
+/ {
+ model = "TechNexion PICO-IMX6 Quad Board and Dwarf baseboard";
+ compatible = "technexion,imx6q-pico", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-pico-hobbit.dts b/arch/arm/boot/dts/imx6q-pico-hobbit.dts
new file mode 100644
index 000000000000..7a666507b456
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-pico-hobbit.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-pico-hobbit.dtsi"
+
+/ {
+ model = "TechNexion PICO-IMX6 Quad Board and Hobbit baseboard";
+ compatible = "technexion,imx6q-pico", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-pico-nymph.dts b/arch/arm/boot/dts/imx6q-pico-nymph.dts
new file mode 100644
index 000000000000..fe5a7becc9e5
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-pico-nymph.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-pico-pi.dtsi"
+
+/ {
+ model = "TechNexion PICO-IMX6 Quad Board and Nymph baseboard";
+ compatible = "technexion,imx6q-pico", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-pico-pi.dts b/arch/arm/boot/dts/imx6q-pico-pi.dts
new file mode 100644
index 000000000000..9413f0a68f54
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-pico-pi.dts
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-pico-pi.dtsi"
+
+/ {
+ model = "TechNexion PICO-IMX6 Quad Board and PI baseboard";
+ compatible = "technexion,imx6q-pico", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 9d3be1cc6b64..78a4d64929f3 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -49,6 +49,8 @@
arm-supply = <&reg_arm>;
pu-supply = <&reg_pu>;
soc-supply = <&reg_soc>;
+ nvmem-cells = <&cpu_speed_grade>;
+ nvmem-cell-names = "speed_grade";
};
cpu1: cpu@1 {
@@ -164,7 +166,7 @@
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
- aips-bus@2000000 { /* AIPS1 */
+ bus@2000000 { /* AIPS1 */
spba-bus@2000000 {
ecspi5: spi@2018000 {
#address-cells = <1>;
@@ -181,7 +183,7 @@
};
};
- iomuxc: iomuxc@20e0000 {
+ iomuxc: pinctrl@20e0000 {
compatible = "fsl,imx6q-iomuxc";
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index 347a5edc6927..e34be8fabd93 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -1,44 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014-2017 Toradex AG
+ * Copyright 2014-2020 Toradex
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
index d03dff23863d..6e3c6b4925a7 100644
--- a/arch/arm/boot/dts/imx6qdl-colibri.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
@@ -1,44 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014-2016 Toradex AG
+ * Copyright 2014-2020 Toradex
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 1a9a9d98f284..60563ff0b7ce 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -313,6 +313,11 @@
interrupts = <12 2>;
wakeup-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
};
+
+ accel@1e {
+ compatible = "nxp,fxos8700";
+ reg = <0x1e>;
+ };
};
&ldb {
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 54b2beadd7a2..8942bec65c5c 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -304,6 +304,11 @@
interrupts = <11 2>;
wakeup-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
};
+
+ accel@1e {
+ compatible = "nxp,fxos8700";
+ reg = <0x1e>;
+ };
};
&ldb {
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 1b6c1331c220..c40583dbd96d 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -361,6 +361,11 @@
interrupts = <12 2>;
wakeup-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
};
+
+ accel@1e {
+ compatible = "nxp,fxos8700";
+ reg = <0x1e>;
+ };
};
&ldb {
diff --git a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
index a1066897be18..ee85031c3916 100644
--- a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
@@ -173,6 +173,25 @@
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
+ magn@1c {
+ compatible = "st,lsm9ds1-magn";
+ reg = <0x1c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mag>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <2 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ imu@6a {
+ compatible = "st,lsm9ds1-imu";
+ reg = <0x6a>;
+ st,drdy-int-pin = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_imu>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
ltc3676: pmic@3c {
compatible = "lltc,ltc3676";
reg = <0x3c>;
@@ -426,6 +445,12 @@
>;
};
+ pinctrl_imu: imugrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x1b0b0
+ >;
+ };
+
pinctrl_ipu1_csi0: ipu1csi0grp {
fsl,pins = <
MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0
@@ -449,6 +474,12 @@
>;
};
+ pinctrl_mag: maggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0
+ >;
+ };
+
pinctrl_pcie: pciegrp {
fsl,pins = <
MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
index be1af7482f89..30fe47ff64a4 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
@@ -220,6 +220,14 @@
status = "okay";
};
+/* cc1352 */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
/* Sterling-LWB Bluetooth */
&uart4 {
pinctrl-names = "default";
@@ -411,6 +419,23 @@
>;
};
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D23__UART3_RTS_B 0x1b0b1
+ MX6QDL_PAD_EIM_D31__UART3_CTS_B 0x1b0b1
+ MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x4001b0b1 /* DIO20 */
+ MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x4001b0b1 /* DIO14 */
+ MX6QDL_PAD_DISP0_DAT12__GPIO5_IO06 0x4001b0b1 /* DIO15 */
+ MX6QDL_PAD_DISP0_DAT14__GPIO5_IO08 0x1b0b1 /* TMS */
+ MX6QDL_PAD_DISP0_DAT15__GPIO5_IO09 0x1b0b1 /* TCK */
+ MX6QDL_PAD_DISP0_DAT16__GPIO5_IO10 0x1b0b1 /* TDO */
+ MX6QDL_PAD_DISP0_DAT17__GPIO5_IO11 0x1b0b1 /* TDI */
+ MX6QDL_PAD_DISP0_DAT23__GPIO5_IO17 0x4001b0b1 /* RST# */
+ >;
+ };
+
pinctrl_uart4: uart4grp {
fsl,pins = <
MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
index 77d871340eb7..41ebe4599e43 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
@@ -88,7 +88,7 @@
reg = <0x50>;
};
- pmic@58 {
+ pmic: pmic@58 {
compatible = "dlg,da9062";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
@@ -96,6 +96,8 @@
interrupt-parent = <&gpio1>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ gpio-controller;
+ #gpio-cells = <2>;
da9062_rtc: rtc {
compatible = "dlg,da9062-rtc";
@@ -107,6 +109,7 @@
watchdog {
compatible = "dlg,da9062-watchdog";
+ dlg,use-sw-pm;
};
regulators {
diff --git a/arch/arm/boot/dts/imx6qdl-pico-dwarf.dtsi b/arch/arm/boot/dts/imx6qdl-pico-dwarf.dtsi
new file mode 100644
index 000000000000..3a968782e854
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-pico-dwarf.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+//
+// Copyright 2017 NXP
+
+#include "imx6qdl-pico.dtsi"
+
+/ {
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led {
+ label = "gpio-led";
+ gpios = <&gpio5 31 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+};
+
+&i2c1 {
+ mpl3115@60 {
+ compatible = "fsl,mpl3115";
+ reg = <0x60>;
+ };
+};
+
+&i2c2 {
+ io-expander@25 {
+ compatible = "nxp,pca9554";
+ reg = <0x25>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+
+};
+
+&iomuxc {
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT13__GPIO5_IO31 0x1b0b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-pico-hobbit.dtsi b/arch/arm/boot/dts/imx6qdl-pico-hobbit.dtsi
new file mode 100644
index 000000000000..144c4727fbc7
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-pico-hobbit.dtsi
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+//
+// Copyright 2017 NXP
+
+#include "imx6qdl-pico.dtsi"
+
+/ {
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led {
+ label = "gpio-led";
+ gpios = <&gpio5 31 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+};
+
+&i2c2 {
+ status = "okay";
+
+ adc081c: adc@50 {
+ compatible = "ti,adc081c";
+ reg = <0x50>;
+ vref-supply = <&reg_3p3v>;
+ };
+};
+
+&iomuxc {
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT13__GPIO5_IO31 0x1b0b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-pico-nymph.dtsi b/arch/arm/boot/dts/imx6qdl-pico-nymph.dtsi
new file mode 100644
index 000000000000..3d56a4216448
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-pico-nymph.dtsi
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+
+#include "imx6qdl-pico.dtsi"
+
+/ {
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led {
+ label = "gpio-led";
+ gpios = <&gpio5 31 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+};
+
+&i2c1 {
+ adc@52 {
+ compatible = "ti,adc081c";
+ reg = <0x52>;
+ vref-supply = <&reg_2p5v>;
+ };
+};
+
+&i2c2 {
+ io-expander@25 {
+ compatible = "nxp,pca9554";
+ reg = <0x25>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ };
+};
+
+&i2c3 {
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+};
+
+&pcie {
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT13__GPIO5_IO31 0x1b0b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-pico-pi.dtsi b/arch/arm/boot/dts/imx6qdl-pico-pi.dtsi
new file mode 100644
index 000000000000..b823dce62e63
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-pico-pi.dtsi
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+//
+// Copyright 2017 NXP
+
+#include "imx6qdl-pico.dtsi"
+
+/ {
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led {
+ label = "gpio-led";
+ gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+};
+
+&hdmi {
+ status = "disabled";
+};
+
+&iomuxc {
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_PIXCLK__GPIO5_IO18 0x1b0b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-pico.dtsi b/arch/arm/boot/dts/imx6qdl-pico.dtsi
new file mode 100644
index 000000000000..39dfd90c2c6b
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-pico.dtsi
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+//
+// Copyright 2018 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ reg_2p5v: regulator-2p5v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P5V";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_1p5v: regulator-1p5v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P5V";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+
+ reg_2p8v: regulator-2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg_vbus: regulator-usb-otg-vbus {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg_vbus>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_LOW>;
+ };
+
+ codec_osc: clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-sgtl5000";
+ model = "imx6-pico-sgtl5000";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <1>;
+ mux-ext-port = <3>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm4 0 50000 0>;
+ brightness-levels = <0 36 72 108 144 180 216 255>;
+ default-brightness-level = <6>;
+ status = "okay";
+ };
+
+ reg_lcd_3v3: regulator-lcd-3v3 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_lcd>;
+ regulator-name = "lcd-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ lcd_display: disp0 {
+ compatible = "fsl,imx-parallel-display";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1>;
+ status = "okay";
+
+ port@0 {
+ reg = <0>;
+
+ lcd_display_in: endpoint {
+ remote-endpoint = <&ipu1_di0_disp0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ lcd_display_out: endpoint {
+ remote-endpoint = <&lcd_panel_in>;
+ };
+ };
+ };
+
+ panel {
+ compatible = "vxt,vl050-8048nt-c01";
+ backlight = <&backlight>;
+ power-supply = <&reg_lcd_3v3>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ status = "okay";
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+ <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ cs-gpios = <&gpio2 27 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii-id";
+ phy-reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&hdmi {
+ ddc-i2c-bus = <&i2c2>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ sgtl5000: audio-codec@a {
+ #sound-dai-cells = <0>;
+ reg = <0x0a>;
+ compatible = "fsl,sgtl5000";
+ clocks = <&codec_osc>;
+ VDDA-supply = <&reg_2p5v>;
+ VDDIO-supply = <&reg_1p8v>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5x06";
+ reg = <0x38>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio5 27 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ wakeup-source;
+ };
+
+ camera@3c {
+ compatible = "ovti,ov5645";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ov5645>;
+ reg = <0x3c>;
+ clocks = <&clks IMX6QDL_CLK_CKO2>;
+ clock-names = "xclk";
+ clock-frequency = <24000000>;
+ vdddo-supply = <&reg_1p8v>;
+ vdda-supply = <&reg_2p8v>;
+ vddd-supply = <&reg_1p5v>;
+ enable-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+
+ port {
+ ov5645_to_mipi_csi2: endpoint {
+ remote-endpoint = <&mipi_csi2_in>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+&ipu1_di0_disp0 {
+ remote-endpoint = <&lcd_display_in>;
+};
+
+&mipi_csi {
+ status = "okay";
+
+ port@0 {
+ reg = <0>;
+
+ mipi_csi2_in: endpoint {
+ remote-endpoint = <&ov5645_to_mipi_csi2>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie_reset>;
+ reset-gpio = <&gpio5 21 GPIO_ACTIVE_LOW>;
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2>;
+ status = "okay";
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+ status = "okay";
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+ status = "okay";
+};
+
+&ssi1 {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 { /* Bluetooth module */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&usbh1 {
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <8>;
+ cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&usdhc2 { /* Wifi/BT */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ bus-width = <4>;
+ no-1-8-v;
+ keep-power-in-suspend;
+ non-removable;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_MCLK__GPIO5_IO19 0x4001b0b5 /* PICO_P24 */
+ MX6QDL_PAD_CSI0_VSYNC__GPIO5_IO21 0x4001b0b5 /* PICO_P26 */
+ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x4001b0b5 /* PICO_P28 */
+ MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x4001b0b5 /* PICO_P30 */
+ MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x4001b0b5 /* PICO_P32 */
+ MX6QDL_PAD_CSI0_DAT14__GPIO6_IO00 0x4001b0b5 /* PICO_P34 */
+ MX6QDL_PAD_CSI0_DAT12__GPIO5_IO30 0x4001b0b5 /* PICO_P42 */
+ MX6QDL_PAD_CSI0_DAT13__GPIO5_IO31 0x4001b0b5 /* PICO_P44 */
+ MX6QDL_PAD_CSI0_DAT15__GPIO6_IO01 0x4001b0b5 /* PICO_P48 */
+ >;
+ };
+
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT7__AUD3_RXD 0x130b0
+ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
+ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x110b0
+ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x000f0b0
+ >;
+ };
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_OE__ECSPI2_MISO 0x1b0b1
+ MX6QDL_PAD_EIM_CS1__ECSPI2_MOSI 0x1b0b1
+ MX6QDL_PAD_EIM_CS0__ECSPI2_SCLK 0x1b0b1
+ MX6QDL_PAD_EIM_RW__GPIO2_IO26 0x000f0b0
+ MX6QDL_PAD_EIM_LBA__GPIO2_IO27 0x000f0b0
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1f0b1
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x1b0b0
+ MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b0b0
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x1b0b0
+ MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_ipu1: ipu1grp {
+ fsl,pins = <
+ MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
+ MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x10
+ MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10
+ MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10
+ MX6QDL_PAD_DI0_PIN4__IPU1_DI0_PIN04 0x10
+ MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10
+ MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10
+ MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10
+ MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10
+ MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10
+ MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10
+ MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10
+ MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10
+ MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10
+ MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10
+ MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10
+ MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10
+ MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10
+ MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10
+ MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10
+ MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10
+ MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10
+ MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10
+ MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10
+ MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10
+ MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10
+ MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10
+ MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10
+ MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10
+ >;
+ };
+
+ pinctrl_ov5645: ov5645grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x0b0b0
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x0b0b0
+ MX6QDL_PAD_GPIO_3__CCM_CLKO2 0x000b0
+ >;
+ };
+
+ pinctrl_pcie_reset: pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_VSYNC__GPIO5_IO21 0x130b0
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_9__PWM1_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm2: pwm2grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__PWM2_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT2__PWM4_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_reg_lcd: reglcdgrp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT3__GPIO2_IO11 0x1b0b0
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT5__UART2_RTS_B 0x1b0b1
+ MX6QDL_PAD_SD4_DAT6__UART2_CTS_B 0x1b0b1
+ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1
+ MX6QDL_PAD_EIM_D31__UART3_RTS_B 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+ >;
+ };
+
+ pinctrl_usbotg_vbus: usbotgvbusgrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17071
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x17071
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17071
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17071
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17071
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17071
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0xb0b1
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi
index 44a97ba93a95..352ac585ca6b 100644
--- a/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sr-som-ti.dtsi
@@ -153,6 +153,7 @@
bus-width = <4>;
keep-power-in-suspend;
mmc-pwrseq = <&pwrseq_ti_wifi>;
+ cap-power-off-card;
non-removable;
vmmc-supply = <&vcc_3v3>;
/* vqmmc-supply = <&nvcc_sd1>; - MMC layer doesn't like it! */
diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
index 0075637f9b0b..20350e803377 100644
--- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
@@ -216,6 +216,13 @@
};
};
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>;
+};
+
&cpu0 {
fsl,soc-operating-points = <
/* ARM kHz SOC-PU uV */
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e6b4b8525f98..98da446aa0f2 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -294,7 +294,7 @@
status = "disabled";
};
- aips-bus@2000000 { /* AIPS1 */
+ bus@2000000 { /* AIPS1 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -574,7 +574,7 @@
status = "disabled";
};
- gpt: gpt@2098000 {
+ gpt: timer@2098000 {
compatible = "fsl,imx6q-gpt", "fsl,imx31-gpt";
reg = <0x02098000 0x4000>;
interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
@@ -661,7 +661,7 @@
#interrupt-cells = <2>;
};
- kpp: kpp@20b8000 {
+ kpp: keypad@20b8000 {
compatible = "fsl,imx6q-kpp", "fsl,imx21-kpp";
reg = <0x020b8000 0x4000>;
interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
@@ -669,14 +669,14 @@
status = "disabled";
};
- wdog1: wdog@20bc000 {
+ wdog1: watchdog@20bc000 {
compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
reg = <0x020bc000 0x4000>;
interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_IPG>;
};
- wdog2: wdog@20c0000 {
+ wdog2: watchdog@20c0000 {
compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
reg = <0x020c0000 0x4000>;
interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
@@ -684,7 +684,7 @@
status = "disabled";
};
- clks: ccm@20c4000 {
+ clks: clock-controller@20c4000 {
compatible = "fsl,imx6q-ccm";
reg = <0x020c4000 0x4000>;
interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>,
@@ -908,7 +908,7 @@
};
};
- iomuxc: iomuxc@20e0000 {
+ iomuxc: pinctrl@20e0000 {
compatible = "fsl,imx6dl-iomuxc", "fsl,imx6q-iomuxc";
reg = <0x20e0000 0x4000>;
};
@@ -935,14 +935,14 @@
};
};
- aips-bus@2100000 { /* AIPS2 */
+ bus@2100000 { /* AIPS2 */
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x02100000 0x100000>;
ranges;
- crypto: caam@2100000 {
+ crypto: crypto@2100000 {
compatible = "fsl,sec-v4.0";
#address-cells = <1>;
#size-cells = <1>;
@@ -954,13 +954,13 @@
<&clks IMX6QDL_CLK_EIM_SLOW>;
clock-names = "mem", "aclk", "ipg", "emi_slow";
- sec_jr0: jr0@1000 {
+ sec_jr0: jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr1: jr1@2000 {
+ sec_jr1: jr@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
@@ -1039,13 +1039,13 @@
compatible = "fsl,imx6q-fec";
reg = <0x02188000 0x4000>;
interrupt-names = "int0", "pps";
- interrupts-extended =
- <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>,
- <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
+ <0 119 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_ENET>,
<&clks IMX6QDL_CLK_ENET>,
<&clks IMX6QDL_CLK_ENET_REF>;
clock-names = "ipg", "ahb", "ptp";
+ gpr = <&gpr>;
status = "disabled";
};
@@ -1161,10 +1161,16 @@
status = "disabled";
};
- ocotp: ocotp@21bc000 {
+ ocotp: ocotp-ctrl@21bc000 {
compatible = "fsl,imx6q-ocotp", "syscon";
reg = <0x021bc000 0x4000>;
clocks = <&clks IMX6QDL_CLK_IIM>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpu_speed_grade: speed-grade@10 {
+ reg = <0x10 4>;
+ };
};
tzasc@21d0000 { /* TZASC1 */
diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi
index 5f51f8e5c1fa..b310f13a53f2 100644
--- a/arch/arm/boot/dts/imx6qp.dtsi
+++ b/arch/arm/boot/dts/imx6qp.dtsi
@@ -18,7 +18,7 @@
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
- aips-bus@2100000 {
+ bus@2100000 {
pre1: pre@21c8000 {
compatible = "fsl,imx6qp-pre";
reg = <0x021c8000 0x1000>;
@@ -77,7 +77,6 @@
};
&fec {
- /delete-property/interrupts-extended;
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>,
<0 119 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 59c54e6ad09a..8230b45057a1 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -74,6 +74,8 @@
arm-supply = <&reg_arm>;
pu-supply = <&reg_pu>;
soc-supply = <&reg_soc>;
+ nvmem-cells = <&cpu_speed_grade>;
+ nvmem-cell-names = "speed_grade";
};
};
@@ -143,7 +145,7 @@
arm,data-latency = <4 2 3>;
};
- aips1: aips-bus@2000000 {
+ aips1: bus@2000000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -380,7 +382,7 @@
clock-names = "ipg", "per";
};
- gpt: gpt@2098000 {
+ gpt: timer@2098000 {
compatible = "fsl,imx6sl-gpt";
reg = <0x02098000 0x4000>;
interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
@@ -491,7 +493,7 @@
<&iomuxc 21 161 1>;
};
- kpp: kpp@20b8000 {
+ kpp: keypad@20b8000 {
compatible = "fsl,imx6sl-kpp", "fsl,imx21-kpp";
reg = <0x020b8000 0x4000>;
interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
@@ -499,14 +501,14 @@
status = "disabled";
};
- wdog1: wdog@20bc000 {
+ wdog1: watchdog@20bc000 {
compatible = "fsl,imx6sl-wdt", "fsl,imx21-wdt";
reg = <0x020bc000 0x4000>;
interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_IPG>;
};
- wdog2: wdog@20c0000 {
+ wdog2: watchdog@20c0000 {
compatible = "fsl,imx6sl-wdt", "fsl,imx21-wdt";
reg = <0x020c0000 0x4000>;
interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
@@ -514,7 +516,7 @@
status = "disabled";
};
- clks: ccm@20c4000 {
+ clks: clock-controller@20c4000 {
compatible = "fsl,imx6sl-ccm";
reg = <0x020c4000 0x4000>;
interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>,
@@ -728,7 +730,7 @@
reg = <0x020e0000 0x38>;
};
- iomuxc: iomuxc@20e0000 {
+ iomuxc: pinctrl@20e0000 {
compatible = "fsl,imx6sl-iomuxc";
reg = <0x020e0000 0x4000>;
};
@@ -777,7 +779,7 @@
power-domains = <&pd_disp>;
};
- dcp: dcp@20fc000 {
+ dcp: crypto@20fc000 {
compatible = "fsl,imx6sl-dcp", "fsl,imx28-dcp";
reg = <0x020fc000 0x4000>;
interrupts = <0 99 IRQ_TYPE_LEVEL_HIGH>,
@@ -786,7 +788,7 @@
};
};
- aips2: aips-bus@2100000 {
+ aips2: bus@2100000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -949,10 +951,16 @@
status = "disabled";
};
- ocotp: ocotp@21bc000 {
+ ocotp: ocotp-ctrl@21bc000 {
compatible = "fsl,imx6sl-ocotp", "syscon";
reg = <0x021bc000 0x4000>;
clocks = <&clks IMX6SL_CLK_OCOTP>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpu_speed_grade: speed-grade@10 {
+ reg = <0x10 4>;
+ };
};
audmux: audmux@21d8000 {
diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
index a1bc5bb31756..edd3abb9a9f1 100644
--- a/arch/arm/boot/dts/imx6sll.dtsi
+++ b/arch/arm/boot/dts/imx6sll.dtsi
@@ -72,6 +72,8 @@
<&clks IMX6SLL_CLK_PLL1_SYS>;
clock-names = "arm", "pll2_pfd2_396m", "step",
"pll1_sw", "pll1_sys";
+ nvmem-cells = <&cpu_speed_grade>;
+ nvmem-cell-names = "speed_grade";
};
};
@@ -144,7 +146,7 @@
arm,data-latency = <4 2 3>;
};
- aips1: aips-bus@2000000 {
+ aips1: bus@2000000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -652,7 +654,7 @@
status = "disabled";
};
- dcp: dcp@20fc000 {
+ dcp: crypto@20fc000 {
compatible = "fsl,imx28-dcp";
reg = <0x020fc000 0x4000>;
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
@@ -663,7 +665,7 @@
};
};
- aips2: aips-bus@2100000 {
+ aips2: bus@2100000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -791,6 +793,10 @@
reg = <0x021bc000 0x4000>;
clocks = <&clks IMX6SLL_CLK_OCOTP>;
+ cpu_speed_grade: speed-grade@10 {
+ reg = <0x10 4>;
+ };
+
tempmon_calib: calib@38 {
reg = <0x38 4>;
};
diff --git a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
index 832b5c5d7441..d84ea6999377 100644
--- a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
+++ b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
@@ -484,31 +484,31 @@
pinctrl_uart1: uart1grp {
fsl,pins = <
- MX6SX_PAD_GPIO1_IO04__UART1_TX 0x1b0b1
- MX6SX_PAD_GPIO1_IO05__UART1_RX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO04__UART1_DCE_TX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO05__UART1_DCE_RX 0x1b0b1
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
- MX6SX_PAD_GPIO1_IO06__UART2_TX 0x1b0b1
- MX6SX_PAD_GPIO1_IO07__UART2_RX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO06__UART2_DCE_TX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO07__UART2_DCE_RX 0x1b0b1
>;
};
pinctrl_uart3: uart3grp {
fsl,pins = <
- MX6SX_PAD_QSPI1B_SS0_B__UART3_TX 0x1b0b1
- MX6SX_PAD_QSPI1B_SCLK__UART3_RX 0x1b0b1
+ MX6SX_PAD_QSPI1B_SS0_B__UART3_DCE_TX 0x1b0b1
+ MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x1b0b1
>;
};
pinctrl_uart5: uart5grp {
fsl,pins = <
- MX6SX_PAD_KEY_COL3__UART5_TX 0x1b0b1
- MX6SX_PAD_KEY_ROW3__UART5_RX 0x1b0b1
- MX6SX_PAD_SD3_DATA6__UART3_RTS_B 0x1b0b1
- MX6SX_PAD_SD3_DATA7__UART3_CTS_B 0x1b0b1
+ MX6SX_PAD_KEY_COL3__UART5_DCE_TX 0x1b0b1
+ MX6SX_PAD_KEY_ROW3__UART5_DCE_RX 0x1b0b1
+ MX6SX_PAD_SD3_DATA6__UART3_DCE_RTS 0x1b0b1
+ MX6SX_PAD_SD3_DATA7__UART3_DCE_CTS 0x1b0b1
>;
};
diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h
index aa194a2fdd53..0b02c7e60c17 100644
--- a/arch/arm/boot/dts/imx6sx-pinfunc.h
+++ b/arch/arm/boot/dts/imx6sx-pinfunc.h
@@ -42,8 +42,8 @@
#define MX6SX_PAD_GPIO1_IO03__GPIO1_IO_3 0x0020 0x0368 0x0000 0x5 0x0
#define MX6SX_PAD_GPIO1_IO03__CCM_PLL3_BYP 0x0020 0x0368 0x0000 0x6 0x0
#define MX6SX_PAD_GPIO1_IO03__PHY_TCK 0x0020 0x0368 0x0000 0x7 0x0
-#define MX6SX_PAD_GPIO1_IO04__UART1_RX 0x0024 0x036C 0x0830 0x0 0x0
-#define MX6SX_PAD_GPIO1_IO04__UART1_TX 0x0024 0x036C 0x0000 0x0 0x0
+#define MX6SX_PAD_GPIO1_IO04__UART1_DCE_TX 0x0024 0x036C 0x0000 0x0 0x0
+#define MX6SX_PAD_GPIO1_IO04__UART1_DTE_RX 0x0024 0x036C 0x0830 0x0 0x0
#define MX6SX_PAD_GPIO1_IO04__USDHC2_RESET_B 0x0024 0x036C 0x0000 0x1 0x0
#define MX6SX_PAD_GPIO1_IO04__ENET1_MDC 0x0024 0x036C 0x0000 0x2 0x0
#define MX6SX_PAD_GPIO1_IO04__OSC32K_32K_OUT 0x0024 0x036C 0x0000 0x3 0x0
@@ -51,8 +51,8 @@
#define MX6SX_PAD_GPIO1_IO04__GPIO1_IO_4 0x0024 0x036C 0x0000 0x5 0x0
#define MX6SX_PAD_GPIO1_IO04__CCM_PLL2_BYP 0x0024 0x036C 0x0000 0x6 0x0
#define MX6SX_PAD_GPIO1_IO04__PHY_TMS 0x0024 0x036C 0x0000 0x7 0x0
-#define MX6SX_PAD_GPIO1_IO05__UART1_RX 0x0028 0x0370 0x0830 0x0 0x1
-#define MX6SX_PAD_GPIO1_IO05__UART1_TX 0x0028 0x0370 0x0000 0x0 0x0
+#define MX6SX_PAD_GPIO1_IO05__UART1_DCE_RX 0x0028 0x0370 0x0830 0x0 0x1
+#define MX6SX_PAD_GPIO1_IO05__UART1_DTE_TX 0x0028 0x0370 0x0000 0x0 0x0
#define MX6SX_PAD_GPIO1_IO05__USDHC2_VSELECT 0x0028 0x0370 0x0000 0x1 0x0
#define MX6SX_PAD_GPIO1_IO05__ENET1_MDIO 0x0028 0x0370 0x0764 0x2 0x0
#define MX6SX_PAD_GPIO1_IO05__ASRC_ASRC_EXT_CLK 0x0028 0x0370 0x0000 0x3 0x0
@@ -60,21 +60,23 @@
#define MX6SX_PAD_GPIO1_IO05__GPIO1_IO_5 0x0028 0x0370 0x0000 0x5 0x0
#define MX6SX_PAD_GPIO1_IO05__SRC_TESTER_ACK 0x0028 0x0370 0x0000 0x6 0x0
#define MX6SX_PAD_GPIO1_IO05__PHY_TDO 0x0028 0x0370 0x0000 0x7 0x0
-#define MX6SX_PAD_GPIO1_IO06__UART2_RX 0x002C 0x0374 0x0838 0x0 0x0
-#define MX6SX_PAD_GPIO1_IO06__UART2_TX 0x002C 0x0374 0x0000 0x0 0x0
+#define MX6SX_PAD_GPIO1_IO06__UART2_DCE_TX 0x002C 0x0374 0x0000 0x0 0x0
+#define MX6SX_PAD_GPIO1_IO06__UART2_DTE_RX 0x002C 0x0374 0x0838 0x0 0x0
#define MX6SX_PAD_GPIO1_IO06__USDHC2_CD_B 0x002C 0x0374 0x086C 0x1 0x1
#define MX6SX_PAD_GPIO1_IO06__ENET2_MDC 0x002C 0x0374 0x0000 0x2 0x0
#define MX6SX_PAD_GPIO1_IO06__CSI1_MCLK 0x002C 0x0374 0x0000 0x3 0x0
-#define MX6SX_PAD_GPIO1_IO06__UART1_RTS_B 0x002C 0x0374 0x082C 0x4 0x0
+#define MX6SX_PAD_GPIO1_IO06__UART1_DCE_RTS 0x002C 0x0374 0x082C 0x4 0x0
+#define MX6SX_PAD_GPIO1_IO06__UART1_DTE_CTS 0x002C 0x0374 0x0000 0x4 0x0
#define MX6SX_PAD_GPIO1_IO06__GPIO1_IO_6 0x002C 0x0374 0x0000 0x5 0x0
#define MX6SX_PAD_GPIO1_IO06__SRC_ANY_PU_RESET 0x002C 0x0374 0x0000 0x6 0x0
#define MX6SX_PAD_GPIO1_IO06__OCOTP_CTRL_WRAPPER_FUSE_LATCHED 0x002C 0x0374 0x0000 0x7 0x0
-#define MX6SX_PAD_GPIO1_IO07__UART2_RX 0x0030 0x0378 0x0838 0x0 0x1
-#define MX6SX_PAD_GPIO1_IO07__UART2_TX 0x0030 0x0378 0x0000 0x0 0x0
+#define MX6SX_PAD_GPIO1_IO07__UART2_DCE_RX 0x0030 0x0378 0x0838 0x0 0x1
+#define MX6SX_PAD_GPIO1_IO07__UART2_DTE_TX 0x0030 0x0378 0x0000 0x0 0x0
#define MX6SX_PAD_GPIO1_IO07__USDHC2_WP 0x0030 0x0378 0x0870 0x1 0x1
#define MX6SX_PAD_GPIO1_IO07__ENET2_MDIO 0x0030 0x0378 0x0770 0x2 0x0
#define MX6SX_PAD_GPIO1_IO07__AUDMUX_MCLK 0x0030 0x0378 0x0000 0x3 0x0
-#define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B 0x0030 0x0378 0x0000 0x4 0x0
+#define MX6SX_PAD_GPIO1_IO07__UART1_DCE_CTS 0x0030 0x0378 0x0000 0x4 0x0
+#define MX6SX_PAD_GPIO1_IO07__UART1_DTE_RTS 0x0030 0x0378 0x082C 0x4 0x1
#define MX6SX_PAD_GPIO1_IO07__GPIO1_IO_7 0x0030 0x0378 0x0000 0x5 0x0
#define MX6SX_PAD_GPIO1_IO07__SRC_EARLY_RESET 0x0030 0x0378 0x0000 0x6 0x0
#define MX6SX_PAD_GPIO1_IO07__DCIC2_OUT 0x0030 0x0378 0x0000 0x7 0x0
@@ -83,7 +85,8 @@
#define MX6SX_PAD_GPIO1_IO08__WDOG1_WDOG_B 0x0034 0x037C 0x0000 0x1 0x0
#define MX6SX_PAD_GPIO1_IO08__SDMA_EXT_EVENT_0 0x0034 0x037C 0x081C 0x2 0x0
#define MX6SX_PAD_GPIO1_IO08__CCM_PMIC_RDY 0x0034 0x037C 0x069C 0x3 0x1
-#define MX6SX_PAD_GPIO1_IO08__UART2_RTS_B 0x0034 0x037C 0x0834 0x4 0x0
+#define MX6SX_PAD_GPIO1_IO08__UART2_DCE_RTS 0x0034 0x037C 0x0834 0x4 0x0
+#define MX6SX_PAD_GPIO1_IO08__UART2_DTE_CTS 0x0034 0x037C 0x0000 0x4 0x0
#define MX6SX_PAD_GPIO1_IO08__GPIO1_IO_8 0x0034 0x037C 0x0000 0x5 0x0
#define MX6SX_PAD_GPIO1_IO08__SRC_SYSTEM_RESET 0x0034 0x037C 0x0000 0x6 0x0
#define MX6SX_PAD_GPIO1_IO08__DCIC1_OUT 0x0034 0x037C 0x0000 0x7 0x0
@@ -92,7 +95,8 @@
#define MX6SX_PAD_GPIO1_IO09__WDOG2_WDOG_B 0x0038 0x0380 0x0000 0x1 0x0
#define MX6SX_PAD_GPIO1_IO09__SDMA_EXT_EVENT_1 0x0038 0x0380 0x0820 0x2 0x0
#define MX6SX_PAD_GPIO1_IO09__CCM_OUT0 0x0038 0x0380 0x0000 0x3 0x0
-#define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B 0x0038 0x0380 0x0000 0x4 0x0
+#define MX6SX_PAD_GPIO1_IO09__UART2_DCE_CTS 0x0038 0x0380 0x0000 0x4 0x0
+#define MX6SX_PAD_GPIO1_IO09__UART2_DTE_RTS 0x0038 0x0380 0x0834 0x4 0x1
#define MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9 0x0038 0x0380 0x0000 0x5 0x0
#define MX6SX_PAD_GPIO1_IO09__SRC_INT_BOOT 0x0038 0x0380 0x0000 0x6 0x0
#define MX6SX_PAD_GPIO1_IO09__OBSERVE_MUX_OUT_4 0x0038 0x0380 0x0000 0x7 0x0
@@ -177,8 +181,8 @@
#define MX6SX_PAD_CSI_DATA04__ESAI_TX1 0x005C 0x03A4 0x0794 0x1 0x1
#define MX6SX_PAD_CSI_DATA04__SPDIF_OUT 0x005C 0x03A4 0x0000 0x2 0x0
#define MX6SX_PAD_CSI_DATA04__KPP_COL_6 0x005C 0x03A4 0x07CC 0x3 0x0
-#define MX6SX_PAD_CSI_DATA04__UART6_RX 0x005C 0x03A4 0x0858 0x4 0x0
-#define MX6SX_PAD_CSI_DATA04__UART6_TX 0x005C 0x03A4 0x0000 0x4 0x0
+#define MX6SX_PAD_CSI_DATA04__UART6_DCE_RX 0x005C 0x03A4 0x0858 0x4 0x0
+#define MX6SX_PAD_CSI_DATA04__UART6_DTE_TX 0x005C 0x03A4 0x0000 0x4 0x0
#define MX6SX_PAD_CSI_DATA04__GPIO1_IO_18 0x005C 0x03A4 0x0000 0x5 0x0
#define MX6SX_PAD_CSI_DATA04__WEIM_DATA_19 0x005C 0x03A4 0x0000 0x6 0x0
#define MX6SX_PAD_CSI_DATA04__PWM5_OUT 0x005C 0x03A4 0x0000 0x7 0x0
@@ -188,8 +192,8 @@
#define MX6SX_PAD_CSI_DATA05__ESAI_TX4_RX1 0x0060 0x03A8 0x07A0 0x1 0x1
#define MX6SX_PAD_CSI_DATA05__SPDIF_IN 0x0060 0x03A8 0x0824 0x2 0x1
#define MX6SX_PAD_CSI_DATA05__KPP_ROW_6 0x0060 0x03A8 0x07D8 0x3 0x0
-#define MX6SX_PAD_CSI_DATA05__UART6_RX 0x0060 0x03A8 0x0858 0x4 0x1
-#define MX6SX_PAD_CSI_DATA05__UART6_TX 0x0060 0x03A8 0x0000 0x4 0x0
+#define MX6SX_PAD_CSI_DATA05__UART6_DCE_TX 0x0060 0x03A8 0x0000 0x4 0x0
+#define MX6SX_PAD_CSI_DATA05__UART6_DTE_RX 0x0060 0x03A8 0x0858 0x4 0x1
#define MX6SX_PAD_CSI_DATA05__GPIO1_IO_19 0x0060 0x03A8 0x0000 0x5 0x0
#define MX6SX_PAD_CSI_DATA05__WEIM_DATA_18 0x0060 0x03A8 0x0000 0x6 0x0
#define MX6SX_PAD_CSI_DATA05__PWM6_OUT 0x0060 0x03A8 0x0000 0x7 0x0
@@ -199,7 +203,8 @@
#define MX6SX_PAD_CSI_DATA06__ESAI_TX2_RX3 0x0064 0x03AC 0x0798 0x1 0x1
#define MX6SX_PAD_CSI_DATA06__I2C4_SCL 0x0064 0x03AC 0x07C0 0x2 0x2
#define MX6SX_PAD_CSI_DATA06__KPP_COL_7 0x0064 0x03AC 0x07D0 0x3 0x0
-#define MX6SX_PAD_CSI_DATA06__UART6_RTS_B 0x0064 0x03AC 0x0854 0x4 0x0
+#define MX6SX_PAD_CSI_DATA06__UART6_DCE_RTS 0x0064 0x03AC 0x0854 0x4 0x0
+#define MX6SX_PAD_CSI_DATA06__UART6_DTE_CTS 0x0064 0x03AC 0x0000 0x4 0x0
#define MX6SX_PAD_CSI_DATA06__GPIO1_IO_20 0x0064 0x03AC 0x0000 0x5 0x0
#define MX6SX_PAD_CSI_DATA06__WEIM_DATA_17 0x0064 0x03AC 0x0000 0x6 0x0
#define MX6SX_PAD_CSI_DATA06__DCIC2_OUT 0x0064 0x03AC 0x0000 0x7 0x0
@@ -209,7 +214,8 @@
#define MX6SX_PAD_CSI_DATA07__ESAI_TX3_RX2 0x0068 0x03B0 0x079C 0x1 0x1
#define MX6SX_PAD_CSI_DATA07__I2C4_SDA 0x0068 0x03B0 0x07C4 0x2 0x2
#define MX6SX_PAD_CSI_DATA07__KPP_ROW_7 0x0068 0x03B0 0x07DC 0x3 0x0
-#define MX6SX_PAD_CSI_DATA07__UART6_CTS_B 0x0068 0x03B0 0x0000 0x4 0x0
+#define MX6SX_PAD_CSI_DATA07__UART6_DCE_CTS 0x0068 0x03B0 0x0000 0x4 0x0
+#define MX6SX_PAD_CSI_DATA07__UART6_DTE_RTS 0x0068 0x03B0 0x0854 0x4 0x1
#define MX6SX_PAD_CSI_DATA07__GPIO1_IO_21 0x0068 0x03B0 0x0000 0x5 0x0
#define MX6SX_PAD_CSI_DATA07__WEIM_DATA_16 0x0068 0x03B0 0x0000 0x6 0x0
#define MX6SX_PAD_CSI_DATA07__DCIC1_OUT 0x0068 0x03B0 0x0000 0x7 0x0
@@ -218,7 +224,8 @@
#define MX6SX_PAD_CSI_HSYNC__CSI1_HSYNC 0x006C 0x03B4 0x0700 0x0 0x0
#define MX6SX_PAD_CSI_HSYNC__ESAI_TX0 0x006C 0x03B4 0x0790 0x1 0x1
#define MX6SX_PAD_CSI_HSYNC__AUDMUX_AUD6_TXD 0x006C 0x03B4 0x0678 0x2 0x1
-#define MX6SX_PAD_CSI_HSYNC__UART4_RTS_B 0x006C 0x03B4 0x0844 0x3 0x2
+#define MX6SX_PAD_CSI_HSYNC__UART4_DCE_RTS 0x006C 0x03B4 0x0844 0x3 0x2
+#define MX6SX_PAD_CSI_HSYNC__UART4_DTE_CTS 0x006C 0x03B4 0x0000 0x3 0x0
#define MX6SX_PAD_CSI_HSYNC__MQS_LEFT 0x006C 0x03B4 0x0000 0x4 0x0
#define MX6SX_PAD_CSI_HSYNC__GPIO1_IO_22 0x006C 0x03B4 0x0000 0x5 0x0
#define MX6SX_PAD_CSI_HSYNC__WEIM_DATA_25 0x006C 0x03B4 0x0000 0x6 0x0
@@ -228,8 +235,8 @@
#define MX6SX_PAD_CSI_MCLK__CSI1_MCLK 0x0070 0x03B8 0x0000 0x0 0x0
#define MX6SX_PAD_CSI_MCLK__ESAI_TX_HF_CLK 0x0070 0x03B8 0x0784 0x1 0x1
#define MX6SX_PAD_CSI_MCLK__OSC32K_32K_OUT 0x0070 0x03B8 0x0000 0x2 0x0
-#define MX6SX_PAD_CSI_MCLK__UART4_RX 0x0070 0x03B8 0x0848 0x3 0x2
-#define MX6SX_PAD_CSI_MCLK__UART4_TX 0x0070 0x03B8 0x0000 0x3 0x0
+#define MX6SX_PAD_CSI_MCLK__UART4_DCE_RX 0x0070 0x03B8 0x0848 0x3 0x2
+#define MX6SX_PAD_CSI_MCLK__UART4_DTE_TX 0x0070 0x03B8 0x0000 0x3 0x0
#define MX6SX_PAD_CSI_MCLK__ANATOP_32K_OUT 0x0070 0x03B8 0x0000 0x4 0x0
#define MX6SX_PAD_CSI_MCLK__GPIO1_IO_23 0x0070 0x03B8 0x0000 0x5 0x0
#define MX6SX_PAD_CSI_MCLK__WEIM_DATA_26 0x0070 0x03B8 0x0000 0x6 0x0
@@ -239,8 +246,8 @@
#define MX6SX_PAD_CSI_PIXCLK__CSI1_PIXCLK 0x0074 0x03BC 0x0704 0x0 0x0
#define MX6SX_PAD_CSI_PIXCLK__ESAI_RX_HF_CLK 0x0074 0x03BC 0x0780 0x1 0x1
#define MX6SX_PAD_CSI_PIXCLK__AUDMUX_MCLK 0x0074 0x03BC 0x0000 0x2 0x0
-#define MX6SX_PAD_CSI_PIXCLK__UART4_RX 0x0074 0x03BC 0x0848 0x3 0x3
-#define MX6SX_PAD_CSI_PIXCLK__UART4_TX 0x0074 0x03BC 0x0000 0x3 0x0
+#define MX6SX_PAD_CSI_PIXCLK__UART4_DCE_TX 0x0074 0x03BC 0x0000 0x3 0x0
+#define MX6SX_PAD_CSI_PIXCLK__UART4_DTE_RX 0x0074 0x03BC 0x0848 0x3 0x3
#define MX6SX_PAD_CSI_PIXCLK__ANATOP_24M_OUT 0x0074 0x03BC 0x0000 0x4 0x0
#define MX6SX_PAD_CSI_PIXCLK__GPIO1_IO_24 0x0074 0x03BC 0x0000 0x5 0x0
#define MX6SX_PAD_CSI_PIXCLK__WEIM_DATA_27 0x0074 0x03BC 0x0000 0x6 0x0
@@ -250,7 +257,8 @@
#define MX6SX_PAD_CSI_VSYNC__CSI1_VSYNC 0x0078 0x03C0 0x0708 0x0 0x0
#define MX6SX_PAD_CSI_VSYNC__ESAI_TX5_RX0 0x0078 0x03C0 0x07A4 0x1 0x1
#define MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD 0x0078 0x03C0 0x0674 0x2 0x1
-#define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B 0x0078 0x03C0 0x0000 0x3 0x0
+#define MX6SX_PAD_CSI_VSYNC__UART4_DCE_CTS 0x0078 0x03C0 0x0000 0x3 0x0
+#define MX6SX_PAD_CSI_VSYNC__UART4_DTE_RTS 0x0078 0x03C0 0x0844 0x3 0x3
#define MX6SX_PAD_CSI_VSYNC__MQS_RIGHT 0x0078 0x03C0 0x0000 0x4 0x0
#define MX6SX_PAD_CSI_VSYNC__GPIO1_IO_25 0x0078 0x03C0 0x0000 0x5 0x0
#define MX6SX_PAD_CSI_VSYNC__WEIM_DATA_24 0x0078 0x03C0 0x0000 0x6 0x0
@@ -330,8 +338,8 @@
#define MX6SX_PAD_ENET2_COL__ENET2_COL 0x0094 0x03DC 0x0000 0x0 0x0
#define MX6SX_PAD_ENET2_COL__ENET1_MDC 0x0094 0x03DC 0x0000 0x1 0x0
#define MX6SX_PAD_ENET2_COL__AUDMUX_AUD4_RXC 0x0094 0x03DC 0x064C 0x2 0x1
-#define MX6SX_PAD_ENET2_COL__UART1_RX 0x0094 0x03DC 0x0830 0x3 0x2
-#define MX6SX_PAD_ENET2_COL__UART1_TX 0x0094 0x03DC 0x0000 0x3 0x0
+#define MX6SX_PAD_ENET2_COL__UART1_DCE_RX 0x0094 0x03DC 0x0830 0x3 0x2
+#define MX6SX_PAD_ENET2_COL__UART1_DTE_TX 0x0094 0x03DC 0x0000 0x3 0x0
#define MX6SX_PAD_ENET2_COL__SPDIF_IN 0x0094 0x03DC 0x0824 0x4 0x3
#define MX6SX_PAD_ENET2_COL__GPIO2_IO_6 0x0094 0x03DC 0x0000 0x5 0x0
#define MX6SX_PAD_ENET2_COL__ANATOP_OTG1_ID 0x0094 0x03DC 0x0624 0x6 0x1
@@ -341,8 +349,8 @@
#define MX6SX_PAD_ENET2_CRS__ENET2_CRS 0x0098 0x03E0 0x0000 0x0 0x0
#define MX6SX_PAD_ENET2_CRS__ENET1_MDIO 0x0098 0x03E0 0x0764 0x1 0x2
#define MX6SX_PAD_ENET2_CRS__AUDMUX_AUD4_RXFS 0x0098 0x03E0 0x0650 0x2 0x1
-#define MX6SX_PAD_ENET2_CRS__UART1_RX 0x0098 0x03E0 0x0830 0x3 0x3
-#define MX6SX_PAD_ENET2_CRS__UART1_TX 0x0098 0x03E0 0x0000 0x3 0x0
+#define MX6SX_PAD_ENET2_CRS__UART1_DCE_TX 0x0098 0x03E0 0x0000 0x3 0x0
+#define MX6SX_PAD_ENET2_CRS__UART1_DTE_RX 0x0098 0x03E0 0x0830 0x3 0x3
#define MX6SX_PAD_ENET2_CRS__MLB_SIG 0x0098 0x03E0 0x07F0 0x4 0x1
#define MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x0098 0x03E0 0x0000 0x5 0x0
#define MX6SX_PAD_ENET2_CRS__ANATOP_OTG2_ID 0x0098 0x03E0 0x0628 0x6 0x1
@@ -352,7 +360,8 @@
#define MX6SX_PAD_ENET2_RX_CLK__ENET2_RX_CLK 0x009C 0x03E4 0x0774 0x0 0x0
#define MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x009C 0x03E4 0x0000 0x1 0x0
#define MX6SX_PAD_ENET2_RX_CLK__I2C3_SCL 0x009C 0x03E4 0x07B8 0x2 0x1
-#define MX6SX_PAD_ENET2_RX_CLK__UART1_RTS_B 0x009C 0x03E4 0x082C 0x3 0x2
+#define MX6SX_PAD_ENET2_RX_CLK__UART1_DCE_RTS 0x009C 0x03E4 0x082C 0x3 0x2
+#define MX6SX_PAD_ENET2_RX_CLK__UART1_DTE_CTS 0x009C 0x03E4 0x0000 0x3 0x0
#define MX6SX_PAD_ENET2_RX_CLK__MLB_DATA 0x009C 0x03E4 0x07EC 0x4 0x1
#define MX6SX_PAD_ENET2_RX_CLK__GPIO2_IO_8 0x009C 0x03E4 0x0000 0x5 0x0
#define MX6SX_PAD_ENET2_RX_CLK__USB_OTG2_OC 0x009C 0x03E4 0x085C 0x6 0x1
@@ -362,7 +371,8 @@
#define MX6SX_PAD_ENET2_TX_CLK__ENET2_TX_CLK 0x00A0 0x03E8 0x0000 0x0 0x0
#define MX6SX_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x00A0 0x03E8 0x076C 0x1 0x1
#define MX6SX_PAD_ENET2_TX_CLK__I2C3_SDA 0x00A0 0x03E8 0x07BC 0x2 0x1
-#define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B 0x00A0 0x03E8 0x0000 0x3 0x0
+#define MX6SX_PAD_ENET2_TX_CLK__UART1_DCE_CTS 0x00A0 0x03E8 0x0000 0x3 0x0
+#define MX6SX_PAD_ENET2_TX_CLK__UART1_DTE_RTS 0x00A0 0x03E8 0x082C 0x3 0x3
#define MX6SX_PAD_ENET2_TX_CLK__MLB_CLK 0x00A0 0x03E8 0x07E8 0x4 0x1
#define MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9 0x00A0 0x03E8 0x0000 0x5 0x0
#define MX6SX_PAD_ENET2_TX_CLK__USB_OTG2_PWR 0x00A0 0x03E8 0x0000 0x6 0x0
@@ -371,7 +381,8 @@
#define MX6SX_PAD_ENET2_TX_CLK__PCIE_CTRL_DEBUG_24 0x00A0 0x03E8 0x0000 0x9 0x0
#define MX6SX_PAD_KEY_COL0__KPP_COL_0 0x00A4 0x03EC 0x0000 0x0 0x0
#define MX6SX_PAD_KEY_COL0__USDHC3_CD_B 0x00A4 0x03EC 0x0000 0x1 0x0
-#define MX6SX_PAD_KEY_COL0__UART6_RTS_B 0x00A4 0x03EC 0x0854 0x2 0x2
+#define MX6SX_PAD_KEY_COL0__UART6_DCE_RTS 0x00A4 0x03EC 0x0854 0x2 0x2
+#define MX6SX_PAD_KEY_COL0__UART6_DTE_CTS 0x00A4 0x03EC 0x0000 0x2 0x0
#define MX6SX_PAD_KEY_COL0__ECSPI1_SCLK 0x00A4 0x03EC 0x0710 0x3 0x0
#define MX6SX_PAD_KEY_COL0__AUDMUX_AUD5_TXC 0x00A4 0x03EC 0x066C 0x4 0x0
#define MX6SX_PAD_KEY_COL0__GPIO2_IO_10 0x00A4 0x03EC 0x0000 0x5 0x0
@@ -380,8 +391,8 @@
#define MX6SX_PAD_KEY_COL0__VADC_DATA_0 0x00A4 0x03EC 0x0000 0x8 0x0
#define MX6SX_PAD_KEY_COL1__KPP_COL_1 0x00A8 0x03F0 0x0000 0x0 0x0
#define MX6SX_PAD_KEY_COL1__USDHC3_RESET_B 0x00A8 0x03F0 0x0000 0x1 0x0
-#define MX6SX_PAD_KEY_COL1__UART6_RX 0x00A8 0x03F0 0x0858 0x2 0x2
-#define MX6SX_PAD_KEY_COL1__UART6_TX 0x00A8 0x03F0 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_COL1__UART6_DCE_TX 0x00A8 0x03F0 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_COL1__UART6_DTE_RX 0x00A8 0x03F0 0x0858 0x2 0x2
#define MX6SX_PAD_KEY_COL1__ECSPI1_MISO 0x00A8 0x03F0 0x0714 0x3 0x0
#define MX6SX_PAD_KEY_COL1__AUDMUX_AUD5_TXFS 0x00A8 0x03F0 0x0670 0x4 0x0
#define MX6SX_PAD_KEY_COL1__GPIO2_IO_11 0x00A8 0x03F0 0x0000 0x5 0x0
@@ -389,7 +400,8 @@
#define MX6SX_PAD_KEY_COL1__SAI2_TX_SYNC 0x00A8 0x03F0 0x0818 0x7 0x0
#define MX6SX_PAD_KEY_COL2__KPP_COL_2 0x00AC 0x03F4 0x0000 0x0 0x0
#define MX6SX_PAD_KEY_COL2__USDHC4_CD_B 0x00AC 0x03F4 0x0874 0x1 0x1
-#define MX6SX_PAD_KEY_COL2__UART5_RTS_B 0x00AC 0x03F4 0x084C 0x2 0x2
+#define MX6SX_PAD_KEY_COL2__UART5_DCE_RTS 0x00AC 0x03F4 0x084C 0x2 0x2
+#define MX6SX_PAD_KEY_COL2__UART5_DTE_CTS 0x00AC 0x03F4 0x0000 0x2 0x0
#define MX6SX_PAD_KEY_COL2__CAN1_TX 0x00AC 0x03F4 0x0000 0x3 0x0
#define MX6SX_PAD_KEY_COL2__CANFD_TX1 0x00AC 0x03F4 0x0000 0x4 0x0
#define MX6SX_PAD_KEY_COL2__GPIO2_IO_12 0x00AC 0x03F4 0x0000 0x5 0x0
@@ -397,8 +409,8 @@
#define MX6SX_PAD_KEY_COL2__ECSPI1_RDY 0x00AC 0x03F4 0x0000 0x7 0x0
#define MX6SX_PAD_KEY_COL3__KPP_COL_3 0x00B0 0x03F8 0x0000 0x0 0x0
#define MX6SX_PAD_KEY_COL3__USDHC4_LCTL 0x00B0 0x03F8 0x0000 0x1 0x0
-#define MX6SX_PAD_KEY_COL3__UART5_RX 0x00B0 0x03F8 0x0850 0x2 0x2
-#define MX6SX_PAD_KEY_COL3__UART5_TX 0x00B0 0x03F8 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_COL3__UART5_DCE_TX 0x00B0 0x03F8 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_COL3__UART5_DTE_RX 0x00B0 0x03F8 0x0850 0x2 0x2
#define MX6SX_PAD_KEY_COL3__CAN2_TX 0x00B0 0x03F8 0x0000 0x3 0x0
#define MX6SX_PAD_KEY_COL3__CANFD_TX2 0x00B0 0x03F8 0x0000 0x4 0x0
#define MX6SX_PAD_KEY_COL3__GPIO2_IO_13 0x00B0 0x03F8 0x0000 0x5 0x0
@@ -414,7 +426,8 @@
#define MX6SX_PAD_KEY_COL4__SAI2_RX_BCLK 0x00B4 0x03FC 0x0808 0x7 0x0
#define MX6SX_PAD_KEY_ROW0__KPP_ROW_0 0x00B8 0x0400 0x0000 0x0 0x0
#define MX6SX_PAD_KEY_ROW0__USDHC3_WP 0x00B8 0x0400 0x0000 0x1 0x0
-#define MX6SX_PAD_KEY_ROW0__UART6_CTS_B 0x00B8 0x0400 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_ROW0__UART6_DCE_CTS 0x00B8 0x0400 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_ROW0__UART6_DTE_RTS 0x00B8 0x0400 0x0854 0x2 0x3
#define MX6SX_PAD_KEY_ROW0__ECSPI1_MOSI 0x00B8 0x0400 0x0718 0x3 0x0
#define MX6SX_PAD_KEY_ROW0__AUDMUX_AUD5_TXD 0x00B8 0x0400 0x0660 0x4 0x0
#define MX6SX_PAD_KEY_ROW0__GPIO2_IO_15 0x00B8 0x0400 0x0000 0x5 0x0
@@ -423,8 +436,8 @@
#define MX6SX_PAD_KEY_ROW0__GPU_IDLE 0x00B8 0x0400 0x0000 0x8 0x0
#define MX6SX_PAD_KEY_ROW1__KPP_ROW_1 0x00BC 0x0404 0x0000 0x0 0x0
#define MX6SX_PAD_KEY_ROW1__USDHC4_VSELECT 0x00BC 0x0404 0x0000 0x1 0x0
-#define MX6SX_PAD_KEY_ROW1__UART6_RX 0x00BC 0x0404 0x0858 0x2 0x3
-#define MX6SX_PAD_KEY_ROW1__UART6_TX 0x00BC 0x0404 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_ROW1__UART6_DCE_RX 0x00BC 0x0404 0x0858 0x2 0x3
+#define MX6SX_PAD_KEY_ROW1__UART6_DTE_TX 0x00BC 0x0404 0x0000 0x2 0x0
#define MX6SX_PAD_KEY_ROW1__ECSPI1_SS0 0x00BC 0x0404 0x071C 0x3 0x0
#define MX6SX_PAD_KEY_ROW1__AUDMUX_AUD5_RXD 0x00BC 0x0404 0x065C 0x4 0x0
#define MX6SX_PAD_KEY_ROW1__GPIO2_IO_16 0x00BC 0x0404 0x0000 0x5 0x0
@@ -433,7 +446,8 @@
#define MX6SX_PAD_KEY_ROW1__M4_NMI 0x00BC 0x0404 0x0000 0x8 0x0
#define MX6SX_PAD_KEY_ROW2__KPP_ROW_2 0x00C0 0x0408 0x0000 0x0 0x0
#define MX6SX_PAD_KEY_ROW2__USDHC4_WP 0x00C0 0x0408 0x0878 0x1 0x1
-#define MX6SX_PAD_KEY_ROW2__UART5_CTS_B 0x00C0 0x0408 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_ROW2__UART5_DCE_CTS 0x00C0 0x0408 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_ROW2__UART5_DTE_RTS 0x00C0 0x0408 0x084C 0x2 0x3
#define MX6SX_PAD_KEY_ROW2__CAN1_RX 0x00C0 0x0408 0x068C 0x3 0x1
#define MX6SX_PAD_KEY_ROW2__CANFD_RX1 0x00C0 0x0408 0x0694 0x4 0x1
#define MX6SX_PAD_KEY_ROW2__GPIO2_IO_17 0x00C0 0x0408 0x0000 0x5 0x0
@@ -441,8 +455,8 @@
#define MX6SX_PAD_KEY_ROW2__ECSPI1_SS3 0x00C0 0x0408 0x0000 0x7 0x0
#define MX6SX_PAD_KEY_ROW3__KPP_ROW_3 0x00C4 0x040C 0x0000 0x0 0x0
#define MX6SX_PAD_KEY_ROW3__USDHC3_LCTL 0x00C4 0x040C 0x0000 0x1 0x0
-#define MX6SX_PAD_KEY_ROW3__UART5_RX 0x00C4 0x040C 0x0850 0x2 0x3
-#define MX6SX_PAD_KEY_ROW3__UART5_TX 0x00C4 0x040C 0x0000 0x2 0x0
+#define MX6SX_PAD_KEY_ROW3__UART5_DCE_RX 0x00C4 0x040C 0x0850 0x2 0x3
+#define MX6SX_PAD_KEY_ROW3__UART5_DTE_TX 0x00C4 0x040C 0x0000 0x2 0x0
#define MX6SX_PAD_KEY_ROW3__CAN2_RX 0x00C4 0x040C 0x0690 0x3 0x1
#define MX6SX_PAD_KEY_ROW3__CANFD_RX2 0x00C4 0x040C 0x0698 0x4 0x1
#define MX6SX_PAD_KEY_ROW3__GPIO2_IO_18 0x00C4 0x040C 0x0000 0x5 0x0
@@ -815,7 +829,8 @@
#define MX6SX_PAD_NAND_DATA04__RAWNAND_DATA04 0x0160 0x04A8 0x0000 0x0 0x0
#define MX6SX_PAD_NAND_DATA04__USDHC2_DATA4 0x0160 0x04A8 0x0000 0x1 0x0
#define MX6SX_PAD_NAND_DATA04__QSPI2_B_SS1_B 0x0160 0x04A8 0x0000 0x2 0x0
-#define MX6SX_PAD_NAND_DATA04__UART3_RTS_B 0x0160 0x04A8 0x083C 0x3 0x0
+#define MX6SX_PAD_NAND_DATA04__UART3_DCE_RTS 0x0160 0x04A8 0x083C 0x3 0x0
+#define MX6SX_PAD_NAND_DATA04__UART3_DTE_CTS 0x0160 0x04A8 0x0000 0x3 0x0
#define MX6SX_PAD_NAND_DATA04__AUDMUX_AUD4_RXFS 0x0160 0x04A8 0x0650 0x4 0x0
#define MX6SX_PAD_NAND_DATA04__GPIO4_IO_8 0x0160 0x04A8 0x0000 0x5 0x0
#define MX6SX_PAD_NAND_DATA04__WEIM_AD_4 0x0160 0x04A8 0x0000 0x6 0x0
@@ -825,7 +840,8 @@
#define MX6SX_PAD_NAND_DATA05__RAWNAND_DATA05 0x0164 0x04AC 0x0000 0x0 0x0
#define MX6SX_PAD_NAND_DATA05__USDHC2_DATA5 0x0164 0x04AC 0x0000 0x1 0x0
#define MX6SX_PAD_NAND_DATA05__QSPI2_B_DQS 0x0164 0x04AC 0x0000 0x2 0x0
-#define MX6SX_PAD_NAND_DATA05__UART3_CTS_B 0x0164 0x04AC 0x0000 0x3 0x0
+#define MX6SX_PAD_NAND_DATA05__UART3_DCE_CTS 0x0164 0x04AC 0x0000 0x3 0x0
+#define MX6SX_PAD_NAND_DATA05__UART3_DTE_RTS 0x0164 0x04AC 0x083C 0x3 0x1
#define MX6SX_PAD_NAND_DATA05__AUDMUX_AUD4_RXC 0x0164 0x04AC 0x064C 0x4 0x0
#define MX6SX_PAD_NAND_DATA05__GPIO4_IO_9 0x0164 0x04AC 0x0000 0x5 0x0
#define MX6SX_PAD_NAND_DATA05__WEIM_AD_5 0x0164 0x04AC 0x0000 0x6 0x0
@@ -835,8 +851,8 @@
#define MX6SX_PAD_NAND_DATA06__RAWNAND_DATA06 0x0168 0x04B0 0x0000 0x0 0x0
#define MX6SX_PAD_NAND_DATA06__USDHC2_DATA6 0x0168 0x04B0 0x0000 0x1 0x0
#define MX6SX_PAD_NAND_DATA06__QSPI2_A_SS1_B 0x0168 0x04B0 0x0000 0x2 0x0
-#define MX6SX_PAD_NAND_DATA06__UART3_RX 0x0168 0x04B0 0x0840 0x3 0x0
-#define MX6SX_PAD_NAND_DATA06__UART3_TX 0x0168 0x04B0 0x0000 0x3 0x0
+#define MX6SX_PAD_NAND_DATA06__UART3_DCE_RX 0x0168 0x04B0 0x0840 0x3 0x0
+#define MX6SX_PAD_NAND_DATA06__UART3_DTE_TX 0x0168 0x04B0 0x0000 0x3 0x0
#define MX6SX_PAD_NAND_DATA06__PWM3_OUT 0x0168 0x04B0 0x0000 0x4 0x0
#define MX6SX_PAD_NAND_DATA06__GPIO4_IO_10 0x0168 0x04B0 0x0000 0x5 0x0
#define MX6SX_PAD_NAND_DATA06__WEIM_AD_6 0x0168 0x04B0 0x0000 0x6 0x0
@@ -846,8 +862,8 @@
#define MX6SX_PAD_NAND_DATA07__RAWNAND_DATA07 0x016C 0x04B4 0x0000 0x0 0x0
#define MX6SX_PAD_NAND_DATA07__USDHC2_DATA7 0x016C 0x04B4 0x0000 0x1 0x0
#define MX6SX_PAD_NAND_DATA07__QSPI2_A_DQS 0x016C 0x04B4 0x0000 0x2 0x0
-#define MX6SX_PAD_NAND_DATA07__UART3_RX 0x016C 0x04B4 0x0840 0x3 0x1
-#define MX6SX_PAD_NAND_DATA07__UART3_TX 0x016C 0x04B4 0x0000 0x3 0x0
+#define MX6SX_PAD_NAND_DATA07__UART3_DCE_TX 0x016C 0x04B4 0x0000 0x3 0x0
+#define MX6SX_PAD_NAND_DATA07__UART3_DTE_RX 0x016C 0x04B4 0x0840 0x3 0x1
#define MX6SX_PAD_NAND_DATA07__PWM4_OUT 0x016C 0x04B4 0x0000 0x4 0x0
#define MX6SX_PAD_NAND_DATA07__GPIO4_IO_11 0x016C 0x04B4 0x0000 0x5 0x0
#define MX6SX_PAD_NAND_DATA07__WEIM_AD_7 0x016C 0x04B4 0x0000 0x6 0x0
@@ -967,7 +983,8 @@
#define MX6SX_PAD_QSPI1A_SS1_B__SIM_M_HADDR_12 0x019C 0x04E4 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1A_SS1_B__SDMA_DEBUG_PC_3 0x019C 0x04E4 0x0000 0x9 0x0
#define MX6SX_PAD_QSPI1B_DATA0__QSPI1_B_DATA_0 0x01A0 0x04E8 0x0000 0x0 0x0
-#define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B 0x01A0 0x04E8 0x0000 0x1 0x0
+#define MX6SX_PAD_QSPI1B_DATA0__UART3_DCE_CTS 0x01A0 0x04E8 0x0000 0x1 0x0
+#define MX6SX_PAD_QSPI1B_DATA0__UART3_DTE_RTS 0x01A0 0x04E8 0x083C 0x1 0x4
#define MX6SX_PAD_QSPI1B_DATA0__ECSPI3_MOSI 0x01A0 0x04E8 0x0738 0x2 0x1
#define MX6SX_PAD_QSPI1B_DATA0__ESAI_RX_FS 0x01A0 0x04E8 0x0778 0x3 0x2
#define MX6SX_PAD_QSPI1B_DATA0__CSI1_DATA_22 0x01A0 0x04E8 0x06F4 0x4 0x1
@@ -975,7 +992,8 @@
#define MX6SX_PAD_QSPI1B_DATA0__WEIM_DATA_14 0x01A0 0x04E8 0x0000 0x6 0x0
#define MX6SX_PAD_QSPI1B_DATA0__SIM_M_HADDR_9 0x01A0 0x04E8 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1B_DATA1__QSPI1_B_DATA_1 0x01A4 0x04EC 0x0000 0x0 0x0
-#define MX6SX_PAD_QSPI1B_DATA1__UART3_RTS_B 0x01A4 0x04EC 0x083C 0x1 0x5
+#define MX6SX_PAD_QSPI1B_DATA1__UART3_DCE_RTS 0x01A4 0x04EC 0x083C 0x1 0x5
+#define MX6SX_PAD_QSPI1B_DATA1__UART3_DTE_CTS 0x01A4 0x04EC 0x0000 0x1 0x0
#define MX6SX_PAD_QSPI1B_DATA1__ECSPI3_MISO 0x01A4 0x04EC 0x0734 0x2 0x1
#define MX6SX_PAD_QSPI1B_DATA1__ESAI_RX_CLK 0x01A4 0x04EC 0x0788 0x3 0x2
#define MX6SX_PAD_QSPI1B_DATA1__CSI1_DATA_21 0x01A4 0x04EC 0x06F0 0x4 0x1
@@ -1007,8 +1025,8 @@
#define MX6SX_PAD_QSPI1B_DQS__WEIM_DATA_15 0x01B0 0x04F8 0x0000 0x6 0x0
#define MX6SX_PAD_QSPI1B_DQS__SIM_M_HADDR_15 0x01B0 0x04F8 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1B_SCLK__QSPI1_B_SCLK 0x01B4 0x04FC 0x0000 0x0 0x0
-#define MX6SX_PAD_QSPI1B_SCLK__UART3_RX 0x01B4 0x04FC 0x0840 0x1 0x4
-#define MX6SX_PAD_QSPI1B_SCLK__UART3_TX 0x01B4 0x04FC 0x0000 0x0 0x0
+#define MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x01B4 0x04FC 0x0840 0x1 0x4
+#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x0 0x0
#define MX6SX_PAD_QSPI1B_SCLK__ECSPI3_SCLK 0x01B4 0x04FC 0x0730 0x2 0x1
#define MX6SX_PAD_QSPI1B_SCLK__ESAI_RX_HF_CLK 0x01B4 0x04FC 0x0780 0x3 0x2
#define MX6SX_PAD_QSPI1B_SCLK__CSI1_DATA_16 0x01B4 0x04FC 0x06DC 0x4 0x1
@@ -1016,8 +1034,8 @@
#define MX6SX_PAD_QSPI1B_SCLK__WEIM_DATA_8 0x01B4 0x04FC 0x0000 0x6 0x0
#define MX6SX_PAD_QSPI1B_SCLK__SIM_M_HADDR_11 0x01B4 0x04FC 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1B_SS0_B__QSPI1_B_SS0_B 0x01B8 0x0500 0x0000 0x0 0x0
-#define MX6SX_PAD_QSPI1B_SS0_B__UART3_RX 0x01B8 0x0500 0x0840 0x1 0x5
-#define MX6SX_PAD_QSPI1B_SS0_B__UART3_TX 0x01B8 0x0500 0x0000 0x1 0x0
+#define MX6SX_PAD_QSPI1B_SS0_B__UART3_DCE_TX 0x01B8 0x0500 0x0000 0x1 0x0
+#define MX6SX_PAD_QSPI1B_SS0_B__UART3_DTE_RX 0x01B8 0x0500 0x0840 0x1 0x5
#define MX6SX_PAD_QSPI1B_SS0_B__ECSPI3_SS0 0x01B8 0x0500 0x073C 0x2 0x1
#define MX6SX_PAD_QSPI1B_SS0_B__ESAI_TX_HF_CLK 0x01B8 0x0500 0x0784 0x3 0x3
#define MX6SX_PAD_QSPI1B_SS0_B__CSI1_DATA_17 0x01B8 0x0500 0x06E0 0x4 0x1
@@ -1224,8 +1242,8 @@
#define MX6SX_PAD_SD1_DATA0__AUDMUX_AUD5_RXD 0x0228 0x0570 0x065C 0x1 0x1
#define MX6SX_PAD_SD1_DATA0__CAAM_WRAPPER_RNG_OSC_OBS 0x0228 0x0570 0x0000 0x2 0x0
#define MX6SX_PAD_SD1_DATA0__GPT_CAPTURE1 0x0228 0x0570 0x0000 0x3 0x0
-#define MX6SX_PAD_SD1_DATA0__UART2_RX 0x0228 0x0570 0x0838 0x4 0x2
-#define MX6SX_PAD_SD1_DATA0__UART2_TX 0x0228 0x0570 0x0000 0x4 0x0
+#define MX6SX_PAD_SD1_DATA0__UART2_DCE_RX 0x0228 0x0570 0x0838 0x4 0x2
+#define MX6SX_PAD_SD1_DATA0__UART2_DTE_TX 0x0228 0x0570 0x0000 0x4 0x0
#define MX6SX_PAD_SD1_DATA0__GPIO6_IO_2 0x0228 0x0570 0x0000 0x5 0x0
#define MX6SX_PAD_SD1_DATA0__ENET1_1588_EVENT1_IN 0x0228 0x0570 0x0000 0x6 0x0
#define MX6SX_PAD_SD1_DATA0__CCM_OUT2 0x0228 0x0570 0x0000 0x7 0x0
@@ -1235,8 +1253,8 @@
#define MX6SX_PAD_SD1_DATA1__AUDMUX_AUD5_TXC 0x022C 0x0574 0x066C 0x1 0x1
#define MX6SX_PAD_SD1_DATA1__PWM4_OUT 0x022C 0x0574 0x0000 0x2 0x0
#define MX6SX_PAD_SD1_DATA1__GPT_CAPTURE2 0x022C 0x0574 0x0000 0x3 0x0
-#define MX6SX_PAD_SD1_DATA1__UART2_RX 0x022C 0x0574 0x0838 0x4 0x3
-#define MX6SX_PAD_SD1_DATA1__UART2_TX 0x022C 0x0574 0x0000 0x4 0x0
+#define MX6SX_PAD_SD1_DATA1__UART2_DCE_TX 0x022C 0x0574 0x0000 0x4 0x0
+#define MX6SX_PAD_SD1_DATA1__UART2_DTE_RX 0x022C 0x0574 0x0838 0x4 0x3
#define MX6SX_PAD_SD1_DATA1__GPIO6_IO_3 0x022C 0x0574 0x0000 0x5 0x0
#define MX6SX_PAD_SD1_DATA1__ENET1_1588_EVENT1_OUT 0x022C 0x0574 0x0000 0x6 0x0
#define MX6SX_PAD_SD1_DATA1__CCM_CLKO2 0x022C 0x0574 0x0000 0x7 0x0
@@ -1246,7 +1264,8 @@
#define MX6SX_PAD_SD1_DATA2__AUDMUX_AUD5_TXFS 0x0230 0x0578 0x0670 0x1 0x1
#define MX6SX_PAD_SD1_DATA2__PWM3_OUT 0x0230 0x0578 0x0000 0x2 0x0
#define MX6SX_PAD_SD1_DATA2__GPT_COMPARE2 0x0230 0x0578 0x0000 0x3 0x0
-#define MX6SX_PAD_SD1_DATA2__UART2_CTS_B 0x0230 0x0578 0x0000 0x4 0x0
+#define MX6SX_PAD_SD1_DATA2__UART2_DCE_CTS 0x0230 0x0578 0x0000 0x4 0x0
+#define MX6SX_PAD_SD1_DATA2__UART2_DTE_RTS 0x0230 0x0578 0x0834 0x4 0x2
#define MX6SX_PAD_SD1_DATA2__GPIO6_IO_4 0x0230 0x0578 0x0000 0x5 0x0
#define MX6SX_PAD_SD1_DATA2__ECSPI4_RDY 0x0230 0x0578 0x0000 0x6 0x0
#define MX6SX_PAD_SD1_DATA2__CCM_OUT0 0x0230 0x0578 0x0000 0x7 0x0
@@ -1255,7 +1274,8 @@
#define MX6SX_PAD_SD1_DATA3__AUDMUX_AUD5_TXD 0x0234 0x057C 0x0660 0x1 0x1
#define MX6SX_PAD_SD1_DATA3__AUDMUX_AUD5_RXD 0x0234 0x057C 0x065C 0x2 0x2
#define MX6SX_PAD_SD1_DATA3__GPT_COMPARE3 0x0234 0x057C 0x0000 0x3 0x0
-#define MX6SX_PAD_SD1_DATA3__UART2_RTS_B 0x0234 0x057C 0x0834 0x4 0x3
+#define MX6SX_PAD_SD1_DATA3__UART2_DCE_RTS 0x0234 0x057C 0x0834 0x4 0x3
+#define MX6SX_PAD_SD1_DATA3__UART2_DTE_CTS 0x0234 0x057C 0x0000 0x4 0x0
#define MX6SX_PAD_SD1_DATA3__GPIO6_IO_5 0x0234 0x057C 0x0000 0x5 0x0
#define MX6SX_PAD_SD1_DATA3__ECSPI4_SS1 0x0234 0x057C 0x0000 0x6 0x0
#define MX6SX_PAD_SD1_DATA3__CCM_PMIC_RDY 0x0234 0x057C 0x069C 0x7 0x2
@@ -1287,8 +1307,8 @@
#define MX6SX_PAD_SD2_DATA0__I2C4_SDA 0x0240 0x0588 0x07C4 0x4 0x3
#define MX6SX_PAD_SD2_DATA0__GPIO6_IO_8 0x0240 0x0588 0x0000 0x5 0x0
#define MX6SX_PAD_SD2_DATA0__ECSPI4_SS3 0x0240 0x0588 0x0000 0x6 0x0
-#define MX6SX_PAD_SD2_DATA0__UART4_RX 0x0240 0x0588 0x0848 0x7 0x4
-#define MX6SX_PAD_SD2_DATA0__UART4_TX 0x0240 0x0588 0x0000 0x7 0x0
+#define MX6SX_PAD_SD2_DATA0__UART4_DCE_RX 0x0240 0x0588 0x0848 0x7 0x4
+#define MX6SX_PAD_SD2_DATA0__UART4_DTE_TX 0x0240 0x0588 0x0000 0x7 0x0
#define MX6SX_PAD_SD2_DATA0__VADC_CLAMP_CURRENT_0 0x0240 0x0588 0x0000 0x8 0x0
#define MX6SX_PAD_SD2_DATA0__MMDC_DEBUG_50 0x0240 0x0588 0x0000 0x9 0x0
#define MX6SX_PAD_SD2_DATA1__USDHC2_DATA1 0x0244 0x058C 0x0000 0x0 0x0
@@ -1298,8 +1318,8 @@
#define MX6SX_PAD_SD2_DATA1__I2C4_SCL 0x0244 0x058C 0x07C0 0x4 0x3
#define MX6SX_PAD_SD2_DATA1__GPIO6_IO_9 0x0244 0x058C 0x0000 0x5 0x0
#define MX6SX_PAD_SD2_DATA1__ECSPI4_SS2 0x0244 0x058C 0x0000 0x6 0x0
-#define MX6SX_PAD_SD2_DATA1__UART4_RX 0x0244 0x058C 0x0848 0x7 0x5
-#define MX6SX_PAD_SD2_DATA1__UART4_TX 0x0244 0x058C 0x0000 0x7 0x0
+#define MX6SX_PAD_SD2_DATA1__UART4_DCE_TX 0x0244 0x058C 0x0000 0x7 0x0
+#define MX6SX_PAD_SD2_DATA1__UART4_DTE_RX 0x0244 0x058C 0x0848 0x7 0x5
#define MX6SX_PAD_SD2_DATA1__VADC_CLAMP_CURRENT_1 0x0244 0x058C 0x0000 0x8 0x0
#define MX6SX_PAD_SD2_DATA1__MMDC_DEBUG_49 0x0244 0x058C 0x0000 0x9 0x0
#define MX6SX_PAD_SD2_DATA2__USDHC2_DATA2 0x0248 0x0590 0x0000 0x0 0x0
@@ -1309,8 +1329,8 @@
#define MX6SX_PAD_SD2_DATA2__SDMA_EXT_EVENT_0 0x0248 0x0590 0x081C 0x4 0x2
#define MX6SX_PAD_SD2_DATA2__GPIO6_IO_10 0x0248 0x0590 0x0000 0x5 0x0
#define MX6SX_PAD_SD2_DATA2__SPDIF_OUT 0x0248 0x0590 0x0000 0x6 0x0
-#define MX6SX_PAD_SD2_DATA2__UART6_RX 0x0248 0x0590 0x0858 0x7 0x4
-#define MX6SX_PAD_SD2_DATA2__UART6_TX 0x0248 0x0590 0x0000 0x7 0x0
+#define MX6SX_PAD_SD2_DATA2__UART6_DCE_RX 0x0248 0x0590 0x0858 0x7 0x4
+#define MX6SX_PAD_SD2_DATA2__UART6_DTE_TX 0x0248 0x0590 0x0000 0x7 0x0
#define MX6SX_PAD_SD2_DATA2__VADC_CLAMP_CURRENT_2 0x0248 0x0590 0x0000 0x8 0x0
#define MX6SX_PAD_SD2_DATA2__MMDC_DEBUG_32 0x0248 0x0590 0x0000 0x9 0x0
#define MX6SX_PAD_SD2_DATA3__USDHC2_DATA3 0x024C 0x0594 0x0000 0x0 0x0
@@ -1320,12 +1340,13 @@
#define MX6SX_PAD_SD2_DATA3__MLB_DATA 0x024C 0x0594 0x07EC 0x4 0x2
#define MX6SX_PAD_SD2_DATA3__GPIO6_IO_11 0x024C 0x0594 0x0000 0x5 0x0
#define MX6SX_PAD_SD2_DATA3__SPDIF_IN 0x024C 0x0594 0x0824 0x6 0x4
-#define MX6SX_PAD_SD2_DATA3__UART6_RX 0x024C 0x0594 0x0858 0x7 0x5
-#define MX6SX_PAD_SD2_DATA3__UART6_TX 0x024C 0x0594 0x0000 0x7 0x0
+#define MX6SX_PAD_SD2_DATA3__UART6_DCE_TX 0x024C 0x0594 0x0000 0x7 0x0
+#define MX6SX_PAD_SD2_DATA3__UART6_DTE_RX 0x024C 0x0594 0x0858 0x7 0x5
#define MX6SX_PAD_SD2_DATA3__VADC_CLAMP_CURRENT_3 0x024C 0x0594 0x0000 0x8 0x0
#define MX6SX_PAD_SD2_DATA3__MMDC_DEBUG_31 0x024C 0x0594 0x0000 0x9 0x0
#define MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x0250 0x0598 0x0000 0x0 0x0
-#define MX6SX_PAD_SD3_CLK__UART4_CTS_B 0x0250 0x0598 0x0000 0x1 0x0
+#define MX6SX_PAD_SD3_CLK__UART4_DCE_CTS 0x0250 0x0598 0x0000 0x1 0x0
+#define MX6SX_PAD_SD3_CLK__UART4_DTE_RTS 0x0250 0x0598 0x0844 0x1 0x0
#define MX6SX_PAD_SD3_CLK__ECSPI4_SCLK 0x0250 0x0598 0x0740 0x2 0x0
#define MX6SX_PAD_SD3_CLK__AUDMUX_AUD6_RXFS 0x0250 0x0598 0x0680 0x3 0x0
#define MX6SX_PAD_SD3_CLK__LCDIF2_VSYNC 0x0250 0x0598 0x0000 0x4 0x0
@@ -1334,8 +1355,8 @@
#define MX6SX_PAD_SD3_CLK__TPSMP_HDATA_29 0x0250 0x0598 0x0000 0x7 0x0
#define MX6SX_PAD_SD3_CLK__SDMA_DEBUG_EVENT_CHANNEL_5 0x0250 0x0598 0x0000 0x9 0x0
#define MX6SX_PAD_SD3_CMD__USDHC3_CMD 0x0254 0x059C 0x0000 0x0 0x0
-#define MX6SX_PAD_SD3_CMD__UART4_RX 0x0254 0x059C 0x0848 0x1 0x0
-#define MX6SX_PAD_SD3_CMD__UART4_TX 0x0254 0x059C 0x0000 0x1 0x0
+#define MX6SX_PAD_SD3_CMD__UART4_DCE_TX 0x0254 0x059C 0x0000 0x1 0x0
+#define MX6SX_PAD_SD3_CMD__UART4_DTE_RX 0x0254 0x059C 0x0848 0x1 0x0
#define MX6SX_PAD_SD3_CMD__ECSPI4_MOSI 0x0254 0x059C 0x0748 0x2 0x0
#define MX6SX_PAD_SD3_CMD__AUDMUX_AUD6_RXC 0x0254 0x059C 0x067C 0x3 0x0
#define MX6SX_PAD_SD3_CMD__LCDIF2_HSYNC 0x0254 0x059C 0x07E4 0x4 0x1
@@ -1364,7 +1385,8 @@
#define MX6SX_PAD_SD3_DATA1__GPU_DEBUG_1 0x025C 0x05A4 0x0000 0x8 0x0
#define MX6SX_PAD_SD3_DATA1__SDMA_DEBUG_EVT_CHN_LINES_1 0x025C 0x05A4 0x0000 0x9 0x0
#define MX6SX_PAD_SD3_DATA2__USDHC3_DATA2 0x0260 0x05A8 0x0000 0x0 0x0
-#define MX6SX_PAD_SD3_DATA2__UART4_RTS_B 0x0260 0x05A8 0x0844 0x1 0x1
+#define MX6SX_PAD_SD3_DATA2__UART4_DCE_RTS 0x0260 0x05A8 0x0844 0x1 0x1
+#define MX6SX_PAD_SD3_DATA2__UART4_DTE_CTS 0x0260 0x05A8 0x0000 0x1 0x0
#define MX6SX_PAD_SD3_DATA2__ECSPI4_SS0 0x0260 0x05A8 0x074C 0x2 0x0
#define MX6SX_PAD_SD3_DATA2__AUDMUX_AUD6_TXFS 0x0260 0x05A8 0x0688 0x3 0x0
#define MX6SX_PAD_SD3_DATA2__LCDIF2_CLK 0x0260 0x05A8 0x0000 0x4 0x0
@@ -1374,8 +1396,8 @@
#define MX6SX_PAD_SD3_DATA2__GPU_DEBUG_2 0x0260 0x05A8 0x0000 0x8 0x0
#define MX6SX_PAD_SD3_DATA2__SDMA_DEBUG_EVENT_CHANNEL_2 0x0260 0x05A8 0x0000 0x9 0x0
#define MX6SX_PAD_SD3_DATA3__USDHC3_DATA3 0x0264 0x05AC 0x0000 0x0 0x0
-#define MX6SX_PAD_SD3_DATA3__UART4_RX 0x0264 0x05AC 0x0848 0x1 0x1
-#define MX6SX_PAD_SD3_DATA3__UART4_TX 0x0264 0x05AC 0x0000 0x1 0x0
+#define MX6SX_PAD_SD3_DATA3__UART4_DCE_RX 0x0264 0x05AC 0x0848 0x1 0x1
+#define MX6SX_PAD_SD3_DATA3__UART4_DTE_TX 0x0264 0x05AC 0x0000 0x1 0x0
#define MX6SX_PAD_SD3_DATA3__ECSPI4_MISO 0x0264 0x05AC 0x0744 0x2 0x0
#define MX6SX_PAD_SD3_DATA3__AUDMUX_AUD6_TXD 0x0264 0x05AC 0x0678 0x3 0x0
#define MX6SX_PAD_SD3_DATA3__LCDIF2_ENABLE 0x0264 0x05AC 0x0000 0x4 0x0
@@ -1387,8 +1409,8 @@
#define MX6SX_PAD_SD3_DATA4__USDHC3_DATA4 0x0268 0x05B0 0x0000 0x0 0x0
#define MX6SX_PAD_SD3_DATA4__CAN2_RX 0x0268 0x05B0 0x0690 0x1 0x0
#define MX6SX_PAD_SD3_DATA4__CANFD_RX2 0x0268 0x05B0 0x0698 0x2 0x0
-#define MX6SX_PAD_SD3_DATA4__UART3_RX 0x0268 0x05B0 0x0840 0x3 0x2
-#define MX6SX_PAD_SD3_DATA4__UART3_TX 0x0268 0x05B0 0x0000 0x3 0x0
+#define MX6SX_PAD_SD3_DATA4__UART3_DCE_RX 0x0268 0x05B0 0x0840 0x3 0x2
+#define MX6SX_PAD_SD3_DATA4__UART3_DTE_TX 0x0268 0x05B0 0x0000 0x3 0x0
#define MX6SX_PAD_SD3_DATA4__LCDIF2_DATA_3 0x0268 0x05B0 0x0000 0x4 0x0
#define MX6SX_PAD_SD3_DATA4__GPIO7_IO_6 0x0268 0x05B0 0x0000 0x5 0x0
#define MX6SX_PAD_SD3_DATA4__ENET2_1588_EVENT0_IN 0x0268 0x05B0 0x0000 0x6 0x0
@@ -1398,8 +1420,8 @@
#define MX6SX_PAD_SD3_DATA5__USDHC3_DATA5 0x026C 0x05B4 0x0000 0x0 0x0
#define MX6SX_PAD_SD3_DATA5__CAN1_TX 0x026C 0x05B4 0x0000 0x1 0x0
#define MX6SX_PAD_SD3_DATA5__CANFD_TX1 0x026C 0x05B4 0x0000 0x2 0x0
-#define MX6SX_PAD_SD3_DATA5__UART3_RX 0x026C 0x05B4 0x0840 0x3 0x3
-#define MX6SX_PAD_SD3_DATA5__UART3_TX 0x026C 0x05B4 0x0000 0x3 0x0
+#define MX6SX_PAD_SD3_DATA5__UART3_DCE_TX 0x026C 0x05B4 0x0000 0x3 0x0
+#define MX6SX_PAD_SD3_DATA5__UART3_DTE_RX 0x026C 0x05B4 0x0840 0x3 0x3
#define MX6SX_PAD_SD3_DATA5__LCDIF2_DATA_2 0x026C 0x05B4 0x0000 0x4 0x0
#define MX6SX_PAD_SD3_DATA5__GPIO7_IO_7 0x026C 0x05B4 0x0000 0x5 0x0
#define MX6SX_PAD_SD3_DATA5__ENET2_1588_EVENT0_OUT 0x026C 0x05B4 0x0000 0x6 0x0
@@ -1409,7 +1431,8 @@
#define MX6SX_PAD_SD3_DATA6__USDHC3_DATA6 0x0270 0x05B8 0x0000 0x0 0x0
#define MX6SX_PAD_SD3_DATA6__CAN2_TX 0x0270 0x05B8 0x0000 0x1 0x0
#define MX6SX_PAD_SD3_DATA6__CANFD_TX2 0x0270 0x05B8 0x0000 0x2 0x0
-#define MX6SX_PAD_SD3_DATA6__UART3_RTS_B 0x0270 0x05B8 0x083C 0x3 0x2
+#define MX6SX_PAD_SD3_DATA6__UART3_DCE_RTS 0x0270 0x05B8 0x083C 0x3 0x2
+#define MX6SX_PAD_SD3_DATA6__UART3_DTE_CTS 0x0270 0x05B8 0x0000 0x3 0x0
#define MX6SX_PAD_SD3_DATA6__LCDIF2_DATA_4 0x0270 0x05B8 0x0000 0x4 0x0
#define MX6SX_PAD_SD3_DATA6__GPIO7_IO_8 0x0270 0x05B8 0x0000 0x5 0x0
#define MX6SX_PAD_SD3_DATA6__ENET1_1588_EVENT0_OUT 0x0270 0x05B8 0x0000 0x6 0x0
@@ -1419,7 +1442,8 @@
#define MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x0274 0x05BC 0x0000 0x0 0x0
#define MX6SX_PAD_SD3_DATA7__CAN1_RX 0x0274 0x05BC 0x068C 0x1 0x0
#define MX6SX_PAD_SD3_DATA7__CANFD_RX1 0x0274 0x05BC 0x0694 0x2 0x0
-#define MX6SX_PAD_SD3_DATA7__UART3_CTS_B 0x0274 0x05BC 0x0000 0x3 0x0
+#define MX6SX_PAD_SD3_DATA7__UART3_DCE_CTS 0x0274 0x05BC 0x0000 0x3 0x0
+#define MX6SX_PAD_SD3_DATA7__UART3_DTE_RTS 0x0274 0x05BC 0x083C 0x3 0x3
#define MX6SX_PAD_SD3_DATA7__LCDIF2_DATA_5 0x0274 0x05BC 0x0000 0x4 0x0
#define MX6SX_PAD_SD3_DATA7__GPIO7_IO_9 0x0274 0x05BC 0x0000 0x5 0x0
#define MX6SX_PAD_SD3_DATA7__ENET1_1588_EVENT0_IN 0x0274 0x05BC 0x0000 0x6 0x0
@@ -1488,8 +1512,8 @@
#define MX6SX_PAD_SD4_DATA3__SDMA_DEBUG_MATCHED_DMBUS 0x028C 0x05D4 0x0000 0x9 0x0
#define MX6SX_PAD_SD4_DATA4__USDHC4_DATA4 0x0290 0x05D8 0x0000 0x0 0x0
#define MX6SX_PAD_SD4_DATA4__RAWNAND_DATA09 0x0290 0x05D8 0x0000 0x1 0x0
-#define MX6SX_PAD_SD4_DATA4__UART5_RX 0x0290 0x05D8 0x0850 0x2 0x0
-#define MX6SX_PAD_SD4_DATA4__UART5_TX 0x0290 0x05D8 0x0000 0x2 0x0
+#define MX6SX_PAD_SD4_DATA4__UART5_DCE_RX 0x0290 0x05D8 0x0850 0x2 0x0
+#define MX6SX_PAD_SD4_DATA4__UART5_DTE_TX 0x0290 0x05D8 0x0000 0x2 0x0
#define MX6SX_PAD_SD4_DATA4__ECSPI3_SCLK 0x0290 0x05D8 0x0730 0x3 0x0
#define MX6SX_PAD_SD4_DATA4__LCDIF2_DATA_8 0x0290 0x05D8 0x0000 0x4 0x0
#define MX6SX_PAD_SD4_DATA4__GPIO6_IO_18 0x0290 0x05D8 0x0000 0x5 0x0
@@ -1499,8 +1523,8 @@
#define MX6SX_PAD_SD4_DATA4__SDMA_DEBUG_RTBUFFER_WRITE 0x0290 0x05D8 0x0000 0x9 0x0
#define MX6SX_PAD_SD4_DATA5__USDHC4_DATA5 0x0294 0x05DC 0x0000 0x0 0x0
#define MX6SX_PAD_SD4_DATA5__RAWNAND_CE2_B 0x0294 0x05DC 0x0000 0x1 0x0
-#define MX6SX_PAD_SD4_DATA5__UART5_RX 0x0294 0x05DC 0x0850 0x2 0x1
-#define MX6SX_PAD_SD4_DATA5__UART5_TX 0x0294 0x05DC 0x0000 0x2 0x0
+#define MX6SX_PAD_SD4_DATA5__UART5_DCE_TX 0x0294 0x05DC 0x0000 0x2 0x0
+#define MX6SX_PAD_SD4_DATA5__UART5_DTE_RX 0x0294 0x05DC 0x0850 0x2 0x1
#define MX6SX_PAD_SD4_DATA5__ECSPI3_MOSI 0x0294 0x05DC 0x0738 0x3 0x0
#define MX6SX_PAD_SD4_DATA5__LCDIF2_DATA_7 0x0294 0x05DC 0x0000 0x4 0x0
#define MX6SX_PAD_SD4_DATA5__GPIO6_IO_19 0x0294 0x05DC 0x0000 0x5 0x0
@@ -1510,7 +1534,8 @@
#define MX6SX_PAD_SD4_DATA5__SDMA_DEBUG_EVENT_CHANNEL_0 0x0294 0x05DC 0x0000 0x9 0x0
#define MX6SX_PAD_SD4_DATA6__USDHC4_DATA6 0x0298 0x05E0 0x0000 0x0 0x0
#define MX6SX_PAD_SD4_DATA6__RAWNAND_CE3_B 0x0298 0x05E0 0x0000 0x1 0x0
-#define MX6SX_PAD_SD4_DATA6__UART5_RTS_B 0x0298 0x05E0 0x084C 0x2 0x0
+#define MX6SX_PAD_SD4_DATA6__UART5_DCE_RTS 0x0298 0x05E0 0x084C 0x2 0x0
+#define MX6SX_PAD_SD4_DATA6__UART5_DTE_CTS 0x0298 0x05E0 0x0000 0x2 0x0
#define MX6SX_PAD_SD4_DATA6__ECSPI3_MISO 0x0298 0x05E0 0x0734 0x3 0x0
#define MX6SX_PAD_SD4_DATA6__LCDIF2_DATA_6 0x0298 0x05E0 0x0000 0x4 0x0
#define MX6SX_PAD_SD4_DATA6__GPIO6_IO_20 0x0298 0x05E0 0x0000 0x5 0x0
@@ -1520,7 +1545,8 @@
#define MX6SX_PAD_SD4_DATA6__SDMA_DEBUG_EVENT_CHANNEL_1 0x0298 0x05E0 0x0000 0x9 0x0
#define MX6SX_PAD_SD4_DATA7__USDHC4_DATA7 0x029C 0x05E4 0x0000 0x0 0x0
#define MX6SX_PAD_SD4_DATA7__RAWNAND_DATA08 0x029C 0x05E4 0x0000 0x1 0x0
-#define MX6SX_PAD_SD4_DATA7__UART5_CTS_B 0x029C 0x05E4 0x0000 0x2 0x0
+#define MX6SX_PAD_SD4_DATA7__UART5_DCE_CTS 0x029C 0x05E4 0x0000 0x2 0x0
+#define MX6SX_PAD_SD4_DATA7__UART5_DTE_RTS 0x029C 0x05E4 0x084C 0x2 0x1
#define MX6SX_PAD_SD4_DATA7__ECSPI3_SS0 0x029C 0x05E4 0x073C 0x3 0x0
#define MX6SX_PAD_SD4_DATA7__LCDIF2_DATA_15 0x029C 0x05E4 0x0000 0x4 0x0
#define MX6SX_PAD_SD4_DATA7__GPIO6_IO_21 0x029C 0x05E4 0x0000 0x5 0x0
@@ -1551,4 +1577,92 @@
#define MX6SX_PAD_USB_H_STROBE__WDOG3_WDOG_RST_B_DEB 0x02A8 0x05F0 0x0000 0x4 0x0
#define MX6SX_PAD_USB_H_STROBE__GPIO7_IO_11 0x02A8 0x05F0 0x0000 0x5 0x0
+/* these are not supposed to be used any more and remove them after some time */
+#define MX6SX_PAD_GPIO1_IO04__UART1_RX MX6SX_PAD_GPIO1_IO04__UART1_DTE_RX
+#define MX6SX_PAD_GPIO1_IO04__UART1_TX MX6SX_PAD_GPIO1_IO04__UART1_DCE_TX
+#define MX6SX_PAD_GPIO1_IO05__UART1_RX MX6SX_PAD_GPIO1_IO05__UART1_DCE_RX
+#define MX6SX_PAD_GPIO1_IO05__UART1_TX MX6SX_PAD_GPIO1_IO05__UART1_DTE_TX
+#define MX6SX_PAD_GPIO1_IO06__UART2_RX MX6SX_PAD_GPIO1_IO06__UART2_DTE_RX
+#define MX6SX_PAD_GPIO1_IO06__UART2_TX MX6SX_PAD_GPIO1_IO06__UART2_DCE_TX
+#define MX6SX_PAD_GPIO1_IO06__UART1_RTS_B MX6SX_PAD_GPIO1_IO06__UART1_DCE_RTS
+#define MX6SX_PAD_GPIO1_IO07__UART2_RX MX6SX_PAD_GPIO1_IO07__UART2_DCE_RX
+#define MX6SX_PAD_GPIO1_IO07__UART2_TX MX6SX_PAD_GPIO1_IO07__UART2_DTE_TX
+#define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B MX6SX_PAD_GPIO1_IO07__UART1_DCE_CTS
+#define MX6SX_PAD_GPIO1_IO08__UART2_RTS_B MX6SX_PAD_GPIO1_IO08__UART2_DCE_RTS
+#define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B MX6SX_PAD_GPIO1_IO09__UART2_DCE_CTS
+#define MX6SX_PAD_CSI_DATA04__UART6_RX MX6SX_PAD_CSI_DATA04__UART6_DCE_RX
+#define MX6SX_PAD_CSI_DATA04__UART6_TX MX6SX_PAD_CSI_DATA04__UART6_DTE_TX
+#define MX6SX_PAD_CSI_DATA05__UART6_RX MX6SX_PAD_CSI_DATA05__UART6_DTE_RX
+#define MX6SX_PAD_CSI_DATA05__UART6_TX MX6SX_PAD_CSI_DATA05__UART6_DCE_TX
+#define MX6SX_PAD_CSI_DATA06__UART6_RTS_B MX6SX_PAD_CSI_DATA06__UART6_DCE_RTS
+#define MX6SX_PAD_CSI_DATA07__UART6_CTS_B MX6SX_PAD_CSI_DATA07__UART6_DCE_CTS
+#define MX6SX_PAD_CSI_HSYNC__UART4_RTS_B MX6SX_PAD_CSI_HSYNC__UART4_DCE_RTS
+#define MX6SX_PAD_CSI_MCLK__UART4_RX MX6SX_PAD_CSI_MCLK__UART4_DCE_RX
+#define MX6SX_PAD_CSI_MCLK__UART4_TX MX6SX_PAD_CSI_MCLK__UART4_DTE_TX
+#define MX6SX_PAD_CSI_PIXCLK__UART4_RX MX6SX_PAD_CSI_PIXCLK__UART4_DTE_RX
+#define MX6SX_PAD_CSI_PIXCLK__UART4_TX MX6SX_PAD_CSI_PIXCLK__UART4_DCE_TX
+#define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B MX6SX_PAD_CSI_VSYNC__UART4_DCE_CTS
+#define MX6SX_PAD_ENET2_COL__UART1_RX MX6SX_PAD_ENET2_COL__UART1_DCE_RX
+#define MX6SX_PAD_ENET2_COL__UART1_TX MX6SX_PAD_ENET2_COL__UART1_DTE_TX
+#define MX6SX_PAD_ENET2_CRS__UART1_RX MX6SX_PAD_ENET2_CRS__UART1_DTE_RX
+#define MX6SX_PAD_ENET2_CRS__UART1_TX MX6SX_PAD_ENET2_CRS__UART1_DCE_TX
+#define MX6SX_PAD_ENET2_RX_CLK__UART1_RTS_B MX6SX_PAD_ENET2_RX_CLK__UART1_DCE_RTS
+#define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B MX6SX_PAD_ENET2_TX_CLK__UART1_DCE_CTS
+#define MX6SX_PAD_KEY_COL0__UART6_RTS_B MX6SX_PAD_KEY_COL0__UART6_DCE_RTS
+#define MX6SX_PAD_KEY_COL1__UART6_RX MX6SX_PAD_KEY_COL1__UART6_DTE_RX
+#define MX6SX_PAD_KEY_COL1__UART6_TX MX6SX_PAD_KEY_COL1__UART6_DCE_TX
+#define MX6SX_PAD_KEY_COL2__UART5_RTS_B MX6SX_PAD_KEY_COL2__UART5_DCE_RTS
+#define MX6SX_PAD_KEY_COL3__UART5_RX MX6SX_PAD_KEY_COL3__UART5_DTE_RX
+#define MX6SX_PAD_KEY_COL3__UART5_TX MX6SX_PAD_KEY_COL3__UART5_DCE_TX
+#define MX6SX_PAD_KEY_ROW0__UART6_CTS_B MX6SX_PAD_KEY_ROW0__UART6_DCE_CTS
+#define MX6SX_PAD_KEY_ROW1__UART6_RX MX6SX_PAD_KEY_ROW1__UART6_DCE_RX
+#define MX6SX_PAD_KEY_ROW1__UART6_TX MX6SX_PAD_KEY_ROW1__UART6_DTE_TX
+#define MX6SX_PAD_KEY_ROW2__UART5_CTS_B MX6SX_PAD_KEY_ROW2__UART5_DCE_CTS
+#define MX6SX_PAD_KEY_ROW3__UART5_RX MX6SX_PAD_KEY_ROW3__UART5_DCE_RX
+#define MX6SX_PAD_KEY_ROW3__UART5_TX MX6SX_PAD_KEY_ROW3__UART5_DTE_TX
+#define MX6SX_PAD_NAND_DATA04__UART3_RTS_B MX6SX_PAD_NAND_DATA04__UART3_DCE_RTS
+#define MX6SX_PAD_NAND_DATA05__UART3_CTS_B MX6SX_PAD_NAND_DATA05__UART3_DCE_CTS
+#define MX6SX_PAD_NAND_DATA06__UART3_RX MX6SX_PAD_NAND_DATA06__UART3_DCE_RX
+#define MX6SX_PAD_NAND_DATA06__UART3_TX MX6SX_PAD_NAND_DATA06__UART3_DTE_TX
+#define MX6SX_PAD_NAND_DATA07__UART3_RX MX6SX_PAD_NAND_DATA07__UART3_DTE_RX
+#define MX6SX_PAD_NAND_DATA07__UART3_TX MX6SX_PAD_NAND_DATA07__UART3_DCE_TX
+#define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B MX6SX_PAD_QSPI1B_DATA0__UART3_DCE_CTS
+#define MX6SX_PAD_QSPI1B_DATA1__UART3_RTS_B MX6SX_PAD_QSPI1B_DATA1__UART3_DCE_RTS
+#define MX6SX_PAD_QSPI1B_SCLK__UART3_RX MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX
+#define MX6SX_PAD_QSPI1B_SCLK__UART3_TX MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX
+#define MX6SX_PAD_QSPI1B_SS0_B__UART3_RX MX6SX_PAD_QSPI1B_SS0_B__UART3_DTE_RX
+#define MX6SX_PAD_QSPI1B_SS0_B__UART3_TX MX6SX_PAD_QSPI1B_SS0_B__UART3_DCE_TX
+#define MX6SX_PAD_SD1_DATA0__UART2_RX MX6SX_PAD_SD1_DATA0__UART2_DCE_RX
+#define MX6SX_PAD_SD1_DATA0__UART2_TX MX6SX_PAD_SD1_DATA0__UART2_DTE_TX
+#define MX6SX_PAD_SD1_DATA1__UART2_RX MX6SX_PAD_SD1_DATA1__UART2_DTE_RX
+#define MX6SX_PAD_SD1_DATA1__UART2_TX MX6SX_PAD_SD1_DATA1__UART2_DCE_TX
+#define MX6SX_PAD_SD1_DATA2__UART2_CTS_B MX6SX_PAD_SD1_DATA2__UART2_DCE_CTS
+#define MX6SX_PAD_SD1_DATA3__UART2_RTS_B MX6SX_PAD_SD1_DATA3__UART2_DCE_RTS
+#define MX6SX_PAD_SD2_DATA0__UART4_RX MX6SX_PAD_SD2_DATA0__UART4_DCE_RX
+#define MX6SX_PAD_SD2_DATA0__UART4_TX MX6SX_PAD_SD2_DATA0__UART4_DTE_TX
+#define MX6SX_PAD_SD2_DATA1__UART4_RX MX6SX_PAD_SD2_DATA1__UART4_DTE_RX
+#define MX6SX_PAD_SD2_DATA1__UART4_TX MX6SX_PAD_SD2_DATA1__UART4_DCE_TX
+#define MX6SX_PAD_SD2_DATA2__UART6_RX MX6SX_PAD_SD2_DATA2__UART6_DCE_RX
+#define MX6SX_PAD_SD2_DATA2__UART6_TX MX6SX_PAD_SD2_DATA2__UART6_DTE_TX
+#define MX6SX_PAD_SD2_DATA3__UART6_RX MX6SX_PAD_SD2_DATA3__UART6_DTE_RX
+#define MX6SX_PAD_SD2_DATA3__UART6_TX MX6SX_PAD_SD2_DATA3__UART6_DCE_TX
+#define MX6SX_PAD_SD3_CLK__UART4_CTS_B MX6SX_PAD_SD3_CLK__UART4_DCE_CTS
+#define MX6SX_PAD_SD3_CMD__UART4_RX MX6SX_PAD_SD3_CMD__UART4_DTE_RX
+#define MX6SX_PAD_SD3_CMD__UART4_TX MX6SX_PAD_SD3_CMD__UART4_DCE_TX
+#define MX6SX_PAD_SD3_DATA2__UART4_RTS_B MX6SX_PAD_SD3_DATA2__UART4_DCE_RTS
+#define MX6SX_PAD_SD3_DATA3__UART4_RX MX6SX_PAD_SD3_DATA3__UART4_DCE_RX
+#define MX6SX_PAD_SD3_DATA3__UART4_TX MX6SX_PAD_SD3_DATA3__UART4_DTE_TX
+#define MX6SX_PAD_SD3_DATA4__UART3_RX MX6SX_PAD_SD3_DATA4__UART3_DCE_RX
+#define MX6SX_PAD_SD3_DATA4__UART3_TX MX6SX_PAD_SD3_DATA4__UART3_DTE_TX
+#define MX6SX_PAD_SD3_DATA5__UART3_RX MX6SX_PAD_SD3_DATA5__UART3_DTE_RX
+#define MX6SX_PAD_SD3_DATA5__UART3_TX MX6SX_PAD_SD3_DATA5__UART3_DCE_TX
+#define MX6SX_PAD_SD3_DATA6__UART3_RTS_B MX6SX_PAD_SD3_DATA6__UART3_DCE_RTS
+#define MX6SX_PAD_SD3_DATA7__UART3_CTS_B MX6SX_PAD_SD3_DATA7__UART3_DCE_CTS
+#define MX6SX_PAD_SD4_DATA4__UART5_RX MX6SX_PAD_SD4_DATA4__UART5_DCE_RX
+#define MX6SX_PAD_SD4_DATA4__UART5_TX MX6SX_PAD_SD4_DATA4__UART5_DTE_TX
+#define MX6SX_PAD_SD4_DATA5__UART5_RX MX6SX_PAD_SD4_DATA5__UART5_DTE_RX
+#define MX6SX_PAD_SD4_DATA5__UART5_TX MX6SX_PAD_SD4_DATA5__UART5_DCE_TX
+#define MX6SX_PAD_SD4_DATA6__UART5_RTS_B MX6SX_PAD_SD4_DATA6__UART5_DCE_RTS
+#define MX6SX_PAD_SD4_DATA7__UART5_CTS_B MX6SX_PAD_SD4_DATA7__UART5_DCE_CTS
+
#endif /* __DTS_IMX6SX_PINFUNC_H */
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index 315044ccd65f..825924448ab4 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -229,8 +229,8 @@
pinctrl_uart1: uart1grp {
fsl,pins = <
- MX6SX_PAD_GPIO1_IO04__UART1_TX 0x1b0b1
- MX6SX_PAD_GPIO1_IO05__UART1_RX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO04__UART1_DCE_TX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO05__UART1_DCE_RX 0x1b0b1
>;
};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index f6972deb5e39..3e5fb72f21fc 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -564,17 +564,17 @@
pinctrl_uart1: uart1grp {
fsl,pins = <
- MX6SX_PAD_GPIO1_IO04__UART1_TX 0x1b0b1
- MX6SX_PAD_GPIO1_IO05__UART1_RX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO04__UART1_DCE_TX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO05__UART1_DCE_RX 0x1b0b1
>;
};
pinctrl_uart5: uart5grp {
fsl,pins = <
- MX6SX_PAD_KEY_ROW3__UART5_RX 0x1b0b1
- MX6SX_PAD_KEY_COL3__UART5_TX 0x1b0b1
- MX6SX_PAD_KEY_ROW2__UART5_CTS_B 0x1b0b1
- MX6SX_PAD_KEY_COL2__UART5_RTS_B 0x1b0b1
+ MX6SX_PAD_KEY_ROW3__UART5_DCE_RX 0x1b0b1
+ MX6SX_PAD_KEY_COL3__UART5_DCE_TX 0x1b0b1
+ MX6SX_PAD_KEY_ROW2__UART5_DCE_CTS 0x1b0b1
+ MX6SX_PAD_KEY_COL2__UART5_DCE_RTS 0x1b0b1
>;
};
diff --git a/arch/arm/boot/dts/imx6sx-softing-vining-2000.dts b/arch/arm/boot/dts/imx6sx-softing-vining-2000.dts
index 28563f21024e..6b728b03f1f2 100644
--- a/arch/arm/boot/dts/imx6sx-softing-vining-2000.dts
+++ b/arch/arm/boot/dts/imx6sx-softing-vining-2000.dts
@@ -352,6 +352,12 @@
>;
};
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ MX6SX_PAD_NAND_DATA02__GPIO4_IO_6 0x10b0
+ >;
+ };
+
pinctrl_pwm1: pwm1grp-1 {
fsl,pins = <
/* blue LED */
@@ -384,15 +390,15 @@
pinctrl_uart1: uart1grp {
fsl,pins = <
- MX6SX_PAD_GPIO1_IO04__UART1_TX 0x1b0b1
- MX6SX_PAD_GPIO1_IO05__UART1_RX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO04__UART1_DCE_TX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO05__UART1_DCE_RX 0x1b0b1
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
- MX6SX_PAD_GPIO1_IO06__UART2_TX 0x1b0b1
- MX6SX_PAD_GPIO1_IO07__UART2_RX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO06__UART2_DCE_TX 0x1b0b1
+ MX6SX_PAD_GPIO1_IO07__UART2_DCE_RX 0x1b0b1
>;
};
@@ -490,6 +496,14 @@
};
};
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio4 6 GPIO_ACTIVE_HIGH>;
+ reset-gpio-active-high;
+ status = "okay";
+};
+
&pwm1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm1>;
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
index 25d4aa985a69..ee645655090d 100644
--- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
@@ -235,28 +235,28 @@
pinctrl_uart1: uart1grp {
fsl,pins =
- <MX6SX_PAD_GPIO1_IO04__UART1_TX 0x1b0b1>,
- <MX6SX_PAD_GPIO1_IO05__UART1_RX 0x1b0b1>;
+ <MX6SX_PAD_GPIO1_IO04__UART1_DCE_TX 0x1b0b1>,
+ <MX6SX_PAD_GPIO1_IO05__UART1_DCE_RX 0x1b0b1>;
};
pinctrl_uart2: uart2grp {
fsl,pins =
- <MX6SX_PAD_GPIO1_IO06__UART2_TX 0x1b0b1>,
- <MX6SX_PAD_GPIO1_IO07__UART2_RX 0x1b0b1>;
+ <MX6SX_PAD_GPIO1_IO06__UART2_DCE_TX 0x1b0b1>,
+ <MX6SX_PAD_GPIO1_IO07__UART2_DCE_RX 0x1b0b1>;
};
pinctrl_uart3: uart3grp {
fsl,pins =
- <MX6SX_PAD_SD3_DATA4__UART3_RX 0x13059>,
- <MX6SX_PAD_SD3_DATA5__UART3_TX 0x13059>,
- <MX6SX_PAD_SD3_DATA6__UART3_RTS_B 0x13059>,
- <MX6SX_PAD_SD3_DATA7__UART3_CTS_B 0x13059>;
+ <MX6SX_PAD_SD3_DATA4__UART3_DCE_RX 0x13059>,
+ <MX6SX_PAD_SD3_DATA5__UART3_DCE_TX 0x13059>,
+ <MX6SX_PAD_SD3_DATA6__UART3_DCE_RTS 0x13059>,
+ <MX6SX_PAD_SD3_DATA7__UART3_DCE_CTS 0x13059>;
};
pinctrl_uart5: uart5grp {
fsl,pins =
- <MX6SX_PAD_SD4_DATA4__UART5_RX 0x1b0b1>,
- <MX6SX_PAD_SD4_DATA5__UART5_TX 0x1b0b1>;
+ <MX6SX_PAD_SD4_DATA4__UART5_DCE_RX 0x1b0b1>,
+ <MX6SX_PAD_SD4_DATA5__UART5_DCE_TX 0x1b0b1>;
};
pinctrl_uart6: uart6grp {
@@ -265,10 +265,10 @@
<MX6SX_PAD_CSI_DATA01__UART6_DSR_B 0x1b0b1>,
<MX6SX_PAD_CSI_DATA02__UART6_DTR_B 0x1b0b1>,
<MX6SX_PAD_CSI_DATA03__UART6_DCD_B 0x1b0b1>,
- <MX6SX_PAD_CSI_DATA04__UART6_RX 0x1b0b1>,
- <MX6SX_PAD_CSI_DATA05__UART6_TX 0x1b0b1>,
- <MX6SX_PAD_CSI_DATA06__UART6_RTS_B 0x1b0b1>,
- <MX6SX_PAD_CSI_DATA07__UART6_CTS_B 0x1b0b1>;
+ <MX6SX_PAD_CSI_DATA04__UART6_DCE_RX 0x1b0b1>,
+ <MX6SX_PAD_CSI_DATA05__UART6_DCE_TX 0x1b0b1>,
+ <MX6SX_PAD_CSI_DATA06__UART6_DCE_RTS 0x1b0b1>,
+ <MX6SX_PAD_CSI_DATA07__UART6_DCE_CTS 0x1b0b1>;
};
pinctrl_otg1_reg: otg1grp {
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 59bad60a47dc..d6f831731460 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -87,6 +87,8 @@
"pll1_sw", "pll1_sys";
arm-supply = <&reg_arm>;
soc-supply = <&reg_soc>;
+ nvmem-cells = <&cpu_speed_grade>;
+ nvmem-cell-names = "speed_grade";
};
};
@@ -235,7 +237,7 @@
status = "disabled";
};
- aips1: aips-bus@2000000 {
+ aips1: bus@2000000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -466,7 +468,7 @@
status = "disabled";
};
- gpt: gpt@2098000 {
+ gpt: timer@2098000 {
compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
reg = <0x02098000 0x4000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
@@ -559,7 +561,7 @@
gpio-ranges = <&iomuxc 0 148 10>, <&iomuxc 10 169 2>;
};
- kpp: kpp@20b8000 {
+ kpp: keypad@20b8000 {
compatible = "fsl,imx6sx-kpp", "fsl,imx21-kpp";
reg = <0x020b8000 0x4000>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
@@ -567,14 +569,14 @@
status = "disabled";
};
- wdog1: wdog@20bc000 {
+ wdog1: watchdog@20bc000 {
compatible = "fsl,imx6sx-wdt", "fsl,imx21-wdt";
reg = <0x020bc000 0x4000>;
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_IPG>;
};
- wdog2: wdog@20c0000 {
+ wdog2: watchdog@20c0000 {
compatible = "fsl,imx6sx-wdt", "fsl,imx21-wdt";
reg = <0x020c0000 0x4000>;
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
@@ -582,7 +584,7 @@
status = "disabled";
};
- clks: ccm@20c4000 {
+ clks: clock-controller@20c4000 {
compatible = "fsl,imx6sx-ccm";
reg = <0x020c4000 0x4000>;
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
@@ -806,7 +808,7 @@
};
};
- iomuxc: iomuxc@20e0000 {
+ iomuxc: pinctrl@20e0000 {
compatible = "fsl,imx6sx-iomuxc";
reg = <0x020e0000 0x4000>;
};
@@ -830,14 +832,14 @@
};
};
- aips2: aips-bus@2100000 {
+ aips2: bus@2100000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x02100000 0x100000>;
ranges;
- crypto: caam@2100000 {
+ crypto: crypto@2100000 {
compatible = "fsl,sec-v4.0";
#address-cells = <1>;
#size-cells = <1>;
@@ -850,13 +852,13 @@
<&clks IMX6SX_CLK_EIM_SLOW>;
clock-names = "mem", "aclk", "ipg", "emi_slow";
- sec_jr0: jr0@1000 {
+ sec_jr0: jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr1: jr1@2000 {
+ sec_jr1: jr@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
@@ -1051,13 +1053,17 @@
status = "disabled";
};
- ocotp: ocotp@21bc000 {
+ ocotp: ocotp-ctrl@21bc000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,imx6sx-ocotp", "syscon";
reg = <0x021bc000 0x4000>;
clocks = <&clks IMX6SX_CLK_OCOTP>;
+ cpu_speed_grade: speed-grade@10 {
+ reg = <0x10 4>;
+ };
+
tempmon_calib: calib@38 {
reg = <0x38 4>;
};
@@ -1188,7 +1194,7 @@
};
};
- aips3: aips-bus@2200000 {
+ aips3: bus@2200000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -1289,7 +1295,7 @@
status = "disabled";
};
- wdog3: wdog@2288000 {
+ wdog3: watchdog@2288000 {
compatible = "fsl,imx6sx-wdt", "fsl,imx21-wdt";
reg = <0x02288000 0x4000>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/imx6ul-ccimx6ulsbcpro.dts b/arch/arm/boot/dts/imx6ul-ccimx6ulsbcpro.dts
index 3749fdda3611..5d3805b07032 100644
--- a/arch/arm/boot/dts/imx6ul-ccimx6ulsbcpro.dts
+++ b/arch/arm/boot/dts/imx6ul-ccimx6ulsbcpro.dts
@@ -25,7 +25,7 @@
};
panel {
- compatible = "auo,g101evn010", "simple-panel";
+ compatible = "auo,g101evn010";
power-supply = <&ldo4_ext>;
backlight = <&lcd_backlight>;
diff --git a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
new file mode 100644
index 000000000000..162dc259edc8
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright 2015 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+/dts-v1/;
+
+#include "imx6ul-pico.dtsi"
+/ {
+ model = "TechNexion PICO-IMX6UL and DWARF baseboard";
+ compatible = "technexion,imx6ul-pico-dwarf", "fsl,imx6ul";
+
+ sound {
+ compatible = "fsl,imx-audio-sgtl5000";
+ model = "imx6ul-sgtl5000";
+ audio-cpu = <&sai1>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "LINE_IN", "Line In Jack",
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ };
+
+ sys_mclk: clock-sys-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+};
+
+&i2c2 {
+ clock_frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ sgtl5000: audio-codec@a {
+ reg = <0x0a>;
+ compatible = "fsl,sgtl5000";
+ clocks = <&sys_mclk>;
+ VDDA-supply = <&reg_2p5v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+
+ pressure-sensor@60 {
+ compatible = "fsl,mpl3115";
+ reg = <0x60>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6ul-pico.dtsi b/arch/arm/boot/dts/imx6ul-pico.dtsi
index de9f83189ba8..df1da98ab10f 100644
--- a/arch/arm/boot/dts/imx6ul-pico.dtsi
+++ b/arch/arm/boot/dts/imx6ul-pico.dtsi
@@ -20,7 +20,7 @@
stdout-path = &uart6;
};
- backlight {
+ backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pwm3 0 5000000>;
brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -72,6 +72,17 @@
regulator-max-microvolt = <3300000>;
startup-delay-us = <200000>;
};
+
+ panel {
+ compatible = "vxt,vl050-8048nt-c01";
+ backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
};
&can1 {
@@ -154,31 +165,11 @@
&lcdif {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lcdif_dat &pinctrl_lcdif_ctrl>;
- display = <&display0>;
status = "okay";
- display0: display0 {
- bits-per-pixel = <32>;
- bus-width = <24>;
-
- display-timings {
- native-mode = <&timing0>;
-
- timing0: timing0 {
- clock-frequency = <33200000>;
- hactive = <800>;
- vactive = <480>;
- hfront-porch = <210>;
- hback-porch = <46>;
- hsync-len = <1>;
- vback-porch = <22>;
- vfront-porch = <23>;
- vsync-len = <1>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
};
};
};
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index d9fdca12819b..2ccf67c4ac1a 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -204,7 +204,7 @@
status = "disabled";
};
- aips1: aips-bus@2000000 {
+ aips1: bus@2000000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -430,7 +430,7 @@
status = "disabled";
};
- gpt1: gpt@2098000 {
+ gpt1: timer@2098000 {
compatible = "fsl,imx6ul-gpt", "fsl,imx6sx-gpt";
reg = <0x02098000 0x4000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
@@ -523,7 +523,7 @@
status = "disabled";
};
- kpp: kpp@20b8000 {
+ kpp: keypad@20b8000 {
compatible = "fsl,imx6ul-kpp", "fsl,imx6q-kpp", "fsl,imx21-kpp";
reg = <0x020b8000 0x4000>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
@@ -531,14 +531,14 @@
status = "disabled";
};
- wdog1: wdog@20bc000 {
+ wdog1: watchdog@20bc000 {
compatible = "fsl,imx6ul-wdt", "fsl,imx21-wdt";
reg = <0x020bc000 0x4000>;
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_WDOG1>;
};
- wdog2: wdog@20c0000 {
+ wdog2: watchdog@20c0000 {
compatible = "fsl,imx6ul-wdt", "fsl,imx21-wdt";
reg = <0x020c0000 0x4000>;
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
@@ -546,7 +546,7 @@
status = "disabled";
};
- clks: ccm@20c4000 {
+ clks: clock-controller@20c4000 {
compatible = "fsl,imx6ul-ccm";
reg = <0x020c4000 0x4000>;
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
@@ -693,7 +693,7 @@
interrupt-parent = <&intc>;
};
- iomuxc: iomuxc@20e0000 {
+ iomuxc: pinctrl@20e0000 {
compatible = "fsl,imx6ul-iomuxc";
reg = <0x020e0000 0x4000>;
};
@@ -704,7 +704,7 @@
reg = <0x020e4000 0x4000>;
};
- gpt2: gpt@20e8000 {
+ gpt2: timer@20e8000 {
compatible = "fsl,imx6ul-gpt", "fsl,imx6sx-gpt";
reg = <0x020e8000 0x4000>;
interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
@@ -771,14 +771,14 @@
};
};
- aips2: aips-bus@2100000 {
+ aips2: bus@2100000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x02100000 0x100000>;
ranges;
- crypto: caam@2140000 {
+ crypto: crypto@2140000 {
compatible = "fsl,imx6ul-caam", "fsl,sec-v4.0";
#address-cells = <1>;
#size-cells = <1>;
@@ -789,19 +789,19 @@
<&clks IMX6UL_CLK_CAAM_MEM>;
clock-names = "ipg", "aclk", "mem";
- sec_jr0: jr0@1000 {
+ sec_jr0: jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr1: jr1@2000 {
+ sec_jr1: jr@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr2: jr2@3000 {
+ sec_jr2: jr@3000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x3000 0x1000>;
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
@@ -1007,7 +1007,7 @@
status = "disabled";
};
- wdog3: wdog@21e4000 {
+ wdog3: watchdog@21e4000 {
compatible = "fsl,imx6ul-wdt", "fsl,imx21-wdt";
reg = <0x021e4000 0x4000>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/imx6ull.dtsi b/arch/arm/boot/dts/imx6ull.dtsi
index b7e67d121322..fcde7f77ae42 100644
--- a/arch/arm/boot/dts/imx6ull.dtsi
+++ b/arch/arm/boot/dts/imx6ull.dtsi
@@ -51,7 +51,7 @@
/ {
soc {
- aips3: aips-bus@2200000 {
+ aips3: bus@2200000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx7-colibri-aster.dtsi b/arch/arm/boot/dts/imx7-colibri-aster.dtsi
new file mode 100644
index 000000000000..9fa701bec2ec
--- /dev/null
+++ b/arch/arm/boot/dts/imx7-colibri-aster.dtsi
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2017-2020 Toradex AG
+ *
+ */
+
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpiokeys>;
+
+ power {
+ label = "Wake-Up";
+ gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+ linux,code = <KEY_WAKEUP>;
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+ };
+
+ panel: panel {
+ compatible = "edt,et057090dhu";
+ backlight = <&bl>;
+ power-supply = <&reg_3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lcdif_out>;
+ };
+ };
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_usbh_vbus: regulator-usbh-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh_reg>;
+ regulator-name = "VCC_USB[1-4]";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 7 GPIO_ACTIVE_LOW>;
+ vin-supply = <&reg_5v0>;
+ };
+};
+
+&adc1 {
+ status = "okay";
+};
+
+/*
+ * ADC2 is not available on the Aster board and
+ * conflicts with AD7879 resistive touchscreen.
+ */
+&adc2 {
+ status = "disabled";
+};
+
+&bl {
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ power-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&fec1 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+
+ /* Microchip/Atmel maxtouch controller */
+ touchscreen@4a {
+ compatible = "atmel,maxtouch";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpiotouch>;
+ reg = <0x4a>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <15 IRQ_TYPE_EDGE_FALLING>; /* SODIMM 107 */
+ reset-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* SODIMM 106 */
+ };
+
+ /* M41T0M6 real time clock on carrier board */
+ rtc: m41t0m6@68 {
+ compatible = "st,m41t0";
+ reg = <0x68>;
+ };
+};
+
+&iomuxc {
+ pinctrl_gpiotouch: touchgpios {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA15__GPIO2_IO15 0x74
+ MX7D_PAD_EPDC_BDR0__GPIO2_IO28 0x14
+ >;
+ };
+};
+
+&lcdif {
+ status = "okay";
+
+ port {
+ lcdif_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&pwm2 {
+ status = "okay";
+};
+
+&pwm3 {
+ status = "okay";
+};
+
+&pwm4 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&usbotg1 {
+ status = "okay";
+};
+
+&usdhc1 {
+ keep-power-in-suspend;
+ no-1-8-v;
+ wakeup-source;
+ vmmc-supply = <&reg_3v3>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
index 6aa123cbdadb..97601375f264 100644
--- a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
@@ -1,46 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2016 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2016-2020 Toradex
*/
/ {
+ aliases {
+ rtc0 = &rtc;
+ rtc1 = &snvs_rtc;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
index 04717cf69db0..e18e89dec879 100644
--- a/arch/arm/boot/dts/imx7-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri.dtsi
@@ -1,43 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2016 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2016-2020 Toradex
*/
/ {
@@ -130,6 +93,184 @@
status = "disabled";
};
+&gpio1 {
+ gpio-line-names = "SODIMM_43",
+ "SODIMM_45",
+ "SODIMM_135",
+ "SODIMM_22",
+ "",
+ "",
+ "SODIMM_37",
+ "SODIMM_29",
+ "SODIMM_59",
+ "SODIMM_28",
+ "SODIMM_30",
+ "SODIMM_67",
+ "",
+ "",
+ "SODIMM_188",
+ "SODIMM_178";
+};
+
+&gpio2 {
+ gpio-line-names = "SODIMM_111",
+ "SODIMM_113",
+ "SODIMM_115",
+ "SODIMM_117",
+ "SODIMM_119",
+ "SODIMM_121",
+ "SODIMM_123",
+ "SODIMM_125",
+ "SODIMM_91",
+ "SODIMM_89",
+ "SODIMM_105",
+ "SODIMM_152",
+ "SODIMM_150",
+ "SODIMM_95",
+ "SODIMM_126",
+ "SODIMM_107",
+ "SODIMM_114",
+ "SODIMM_116",
+ "SODIMM_118",
+ "SODIMM_120",
+ "SODIMM_122",
+ "SODIMM_124",
+ "SODIMM_127",
+ "SODIMM_130",
+ "SODIMM_132",
+ "SODIMM_134",
+ "SODIMM_133",
+ "SODIMM_104",
+ "SODIMM_106",
+ "SODIMM_110",
+ "SODIMM_112",
+ "SODIMM_128";
+};
+
+&gpio3 {
+ gpio-line-names = "SODIMM_56",
+ "SODIMM_44",
+ "SODIMM_68",
+ "SODIMM_82",
+ "SODIMM_93",
+ "SODIMM_76",
+ "SODIMM_70",
+ "SODIMM_60",
+ "SODIMM_58",
+ "SODIMM_78",
+ "SODIMM_72",
+ "SODIMM_80",
+ "SODIMM_46",
+ "SODIMM_62",
+ "SODIMM_48",
+ "SODIMM_74",
+ "SODIMM_50",
+ "SODIMM_52",
+ "SODIMM_54",
+ "SODIMM_66",
+ "SODIMM_64",
+ "SODIMM_57",
+ "SODIMM_61",
+ "SODIMM_136",
+ "SODIMM_138",
+ "SODIMM_140",
+ "SODIMM_142",
+ "SODIMM_144",
+ "SODIMM_146";
+};
+
+&gpio4 {
+ gpio-line-names = "SODIMM_35",
+ "SODIMM_33",
+ "SODIMM_38",
+ "SODIMM_36",
+ "SODIMM_21",
+ "SODIMM_19",
+ "SODIMM_131",
+ "SODIMM_129",
+ "SODIMM_90",
+ "SODIMM_92",
+ "SODIMM_88",
+ "SODIMM_86",
+ "SODIMM_81",
+ "SODIMM_94",
+ "SODIMM_96",
+ "SODIMM_75",
+ "SODIMM_101",
+ "SODIMM_103",
+ "SODIMM_79",
+ "SODIMM_97",
+ "SODIMM_67",
+ "SODIMM_59",
+ "SODIMM_85",
+ "SODIMM_65";
+};
+
+&gpio5 {
+ gpio-line-names = "SODIMM_69",
+ "SODIMM_71",
+ "SODIMM_73",
+ "SODIMM_47",
+ "SODIMM_190",
+ "SODIMM_192",
+ "SODIMM_49",
+ "SODIMM_51",
+ "SODIMM_53",
+ "",
+ "",
+ "SODIMM_98",
+ "SODIMM_184",
+ "SODIMM_186",
+ "SODIMM_23",
+ "SODIMM_31",
+ "SODIMM_100",
+ "SODIMM_102";
+};
+
+&gpio6 {
+ gpio-line-names = "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SODIMM_169",
+ "",
+ "",
+ "",
+ "SODIMM_77",
+ "SODIMM_24",
+ "",
+ "SODIMM_25",
+ "SODIMM_27",
+ "SODIMM_32",
+ "SODIMM_34";
+};
+
+&gpio7 {
+ gpio-line-names = "",
+ "",
+ "SODIMM_63",
+ "SODIMM_55",
+ "",
+ "",
+ "",
+ "",
+ "SODIMM_196",
+ "SODIMM_194",
+ "",
+ "SODIMM_99",
+ "",
+ "",
+ "SODIMM_137";
+};
+
&gpmi {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpmi_nand>;
@@ -345,7 +486,7 @@
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio1 &pinctrl_gpio2 &pinctrl_gpio3 &pinctrl_gpio4
- &pinctrl_gpio7>;
+ &pinctrl_gpio7 &pinctrl_usbc_det>;
pinctrl_gpio1: gpio1-grp {
fsl,pins = <
@@ -356,7 +497,6 @@
MX7D_PAD_EPDC_DATA13__GPIO2_IO13 0x14 /* SODIMM 95 */
MX7D_PAD_ENET1_RGMII_TXC__GPIO7_IO11 0x14 /* SODIMM 99 */
MX7D_PAD_EPDC_DATA10__GPIO2_IO10 0x74 /* SODIMM 105 */
- MX7D_PAD_EPDC_DATA15__GPIO2_IO15 0x74 /* SODIMM 107 */
MX7D_PAD_EPDC_DATA00__GPIO2_IO0 0x14 /* SODIMM 111 */
MX7D_PAD_EPDC_DATA01__GPIO2_IO1 0x14 /* SODIMM 113 */
MX7D_PAD_EPDC_DATA02__GPIO2_IO2 0x14 /* SODIMM 115 */
@@ -373,7 +513,6 @@
MX7D_PAD_SD2_DATA2__GPIO5_IO16 0x14 /* SODIMM 100 */
MX7D_PAD_SD2_DATA3__GPIO5_IO17 0x14 /* SODIMM 102 */
MX7D_PAD_EPDC_GDSP__GPIO2_IO27 0x14 /* SODIMM 104 */
- MX7D_PAD_EPDC_BDR0__GPIO2_IO28 0x74 /* SODIMM 106 */
MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x14 /* SODIMM 110 */
MX7D_PAD_EPDC_PWR_COM__GPIO2_IO30 0x14 /* SODIMM 112 */
MX7D_PAD_EPDC_SDCLK__GPIO2_IO16 0x14 /* SODIMM 114 */
@@ -450,7 +589,6 @@
pinctrl_enet1: enet1grp {
fsl,pins = <
- MX7D_PAD_ENET1_CRS__GPIO7_IO14 0x14
MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x73
MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x73
MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x73
@@ -648,6 +786,12 @@
>;
};
+ pinctrl_usbc_det: gpio-usbc-det {
+ fsl,pins = <
+ MX7D_PAD_ENET1_CRS__GPIO7_IO14 0x14
+ >;
+ };
+
pinctrl_usbh_reg: gpio-usbh-vbus {
fsl,pins = <
MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14 /* SODIMM 129 USBH PEN */
diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
index 7646284e13a7..89267cd59037 100644
--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
@@ -85,12 +85,12 @@
pmic: pmic@8 {
compatible = "fsl,pfuze3000";
- reg = <0x08>;
+ reg = <0x8>;
regulators {
sw1a_reg: sw1a {
regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1475000>;
+ regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
regulator-ramp-delay = <6250>;
diff --git a/arch/arm/boot/dts/imx7d-colibri-aster.dts b/arch/arm/boot/dts/imx7d-colibri-aster.dts
new file mode 100644
index 000000000000..f3f0537d5a37
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-colibri-aster.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2017-2020 Toradex AG
+ *
+ */
+
+/dts-v1/;
+#include "imx7d-colibri.dtsi"
+#include "imx7-colibri-aster.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX7D on Aster Carrier Board";
+ compatible = "toradex,colibri-imx7d-aster", "toradex,colibri-imx7d",
+ "fsl,imx7d";
+};
+
+&usbotg2 {
+ vbus-supply = <&reg_usbh_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7d-colibri-emmc-aster.dts b/arch/arm/boot/dts/imx7d-colibri-emmc-aster.dts
new file mode 100644
index 000000000000..20480276cb0e
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-colibri-emmc-aster.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2017-2020 Toradex AG
+ *
+ */
+
+/dts-v1/;
+#include "imx7d-colibri-emmc.dtsi"
+#include "imx7-colibri-aster.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX7D 1GB (eMMC) on Aster Carrier Board";
+ compatible = "toradex,colibri-imx7d-emmc-aster",
+ "toradex,colibri-imx7d-emmc", "fsl,imx7d";
+};
+
+&usbotg2 {
+ vbus-supply = <&reg_usbh_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi b/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi
index 898f4b8d7421..af39e5370fa1 100644
--- a/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi
+++ b/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi
@@ -13,6 +13,32 @@
};
};
+&gpio6 {
+ gpio-line-names = "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SODIMM_169",
+ "SODIMM_157",
+ "",
+ "SODIMM_163",
+ "SODIMM_77",
+ "SODIMM_24",
+ "",
+ "SODIMM_25",
+ "SODIMM_27",
+ "SODIMM_32",
+ "SODIMM_34";
+};
+
&usbotg2 {
dr_mode = "host";
};
diff --git a/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts b/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts
index 136e11ab4893..87b132bcd272 100644
--- a/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts
@@ -1,43 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2016 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2016-2020 Toradex
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx7d-colibri.dtsi b/arch/arm/boot/dts/imx7d-colibri.dtsi
index e2e327f437e3..c59d72e50920 100644
--- a/arch/arm/boot/dts/imx7d-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7d-colibri.dtsi
@@ -1,43 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2016 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2016-2020 Toradex
*/
#include "imx7d.dtsi"
diff --git a/arch/arm/boot/dts/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/imx7d-pico-dwarf.dts
new file mode 100644
index 000000000000..5162fe227d1e
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-pico-dwarf.dts
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright 2015 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+/dts-v1/;
+
+#include "imx7d-pico.dtsi"
+/ {
+ model = "TechNexion PICO-IMX7D and DWARF baseboard";
+ compatible = "technexion,imx7d-pico-dwarf", "fsl,imx7d";
+
+ sound {
+ compatible = "fsl,imx-audio-sgtl5000";
+ model = "imx7d-sgtl5000";
+ audio-cpu = <&sai1>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "LINE_IN", "Line In Jack",
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ };
+
+ sys_mclk: clock-sys-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+};
+
+&i2c1 {
+ clock_frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ sgtl5000: audio-codec@a {
+ reg = <0x0a>;
+ compatible = "fsl,sgtl5000";
+ clocks = <&sys_mclk>;
+ VDDA-supply = <&reg_2p5v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+
+ pressure-sensor@60 {
+ compatible = "fsl,mpl3115";
+ reg = <0x60>;
+ };
+};
+
+&i2c4 {
+ clock_frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pca9554: io-expander@25 {
+ compatible = "nxp,pca9554";
+ gpio-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ reg = <0x25>;
+ };
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5x06";
+ reg = <0x38>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_touchscreen>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&pca9554 4 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ };
+};
+
+&iomuxc {
+ pinctrl_touchscreen: touchscreengrp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA13__GPIO2_IO13 0x14
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx7d-pico-nymph.dts b/arch/arm/boot/dts/imx7d-pico-nymph.dts
new file mode 100644
index 000000000000..104a85254adb
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-pico-nymph.dts
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright 2015 Technexion Ltd.
+//
+// Author: Wig Cheng <wig.cheng@technexion.com>
+// Richard Hu <richard.hu@technexion.com>
+// Tapani Utriainen <tapani@technexion.com>
+/dts-v1/;
+
+#include "imx7d-pico.dtsi"
+/ {
+ model = "TechNexion PICO-IMX7 and NYMPH baseboard";
+ compatible = "technexion,imx7d-pico-nymph", "fsl,imx7d";
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led {
+ label = "gpio-led";
+ gpios = <&gpio2 13 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-sgtl5000";
+ model = "imx7d-sgtl5000";
+ audio-cpu = <&sai1>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "LINE_IN", "Line In Jack",
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ };
+
+ sys_mclk: clock-sys-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+};
+
+&i2c1 {
+ clock_frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ sgtl5000: audio-codec@a {
+ reg = <0x0a>;
+ compatible = "fsl,sgtl5000";
+ clocks = <&sys_mclk>;
+ VDDA-supply = <&reg_2p5v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+
+ adc@52 {
+ compatible = "ti,adc081c";
+ reg = <0x52>;
+ vref-supply = <&reg_2p5v>;
+ };
+};
+
+&i2c2 {
+ clock_frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+};
+
+&iomuxc {
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA13__GPIO2_IO13 0x14
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx7s-colibri-aster.dts b/arch/arm/boot/dts/imx7s-colibri-aster.dts
new file mode 100644
index 000000000000..fca4e0a95c1b
--- /dev/null
+++ b/arch/arm/boot/dts/imx7s-colibri-aster.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2017-2020 Toradex AG
+ *
+ */
+
+/dts-v1/;
+#include "imx7s-colibri.dtsi"
+#include "imx7-colibri-aster.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX7S on Aster Carrier Board";
+ compatible = "toradex,colibri-imx7s-aster", "toradex,colibri-imx7s",
+ "fsl,imx7s";
+};
diff --git a/arch/arm/boot/dts/imx7s-colibri-eval-v3.dts b/arch/arm/boot/dts/imx7s-colibri-eval-v3.dts
index bd2a49c1ade6..aa70d3f2e2e2 100644
--- a/arch/arm/boot/dts/imx7s-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/imx7s-colibri-eval-v3.dts
@@ -1,43 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2016 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2016-2020 Toradex
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx7s-colibri.dtsi b/arch/arm/boot/dts/imx7s-colibri.dtsi
index 6d16e32aed89..94de220a5965 100644
--- a/arch/arm/boot/dts/imx7s-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7s-colibri.dtsi
@@ -1,43 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2016 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2016-2020 Toradex
*/
#include "imx7s.dtsi"
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 568d7a984aa6..76e3ffbbbfbf 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -315,7 +315,7 @@
<0x31006000 0x2000>;
};
- aips1: aips-bus@30000000 {
+ aips1: bus@30000000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -406,14 +406,14 @@
gpio-ranges = <&iomuxc 0 139 16>;
};
- wdog1: wdog@30280000 {
+ wdog1: watchdog@30280000 {
compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
reg = <0x30280000 0x10000>;
interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_WDOG1_ROOT_CLK>;
};
- wdog2: wdog@30290000 {
+ wdog2: watchdog@30290000 {
compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
reg = <0x30290000 0x10000>;
interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
@@ -421,7 +421,7 @@
status = "disabled";
};
- wdog3: wdog@302a0000 {
+ wdog3: watchdog@302a0000 {
compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
reg = <0x302a0000 0x10000>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
@@ -429,7 +429,7 @@
status = "disabled";
};
- wdog4: wdog@302b0000 {
+ wdog4: watchdog@302b0000 {
compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
reg = <0x302b0000 0x10000>;
interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
@@ -443,7 +443,7 @@
fsl,input-sel = <&iomuxc>;
};
- gpt1: gpt@302d0000 {
+ gpt1: timer@302d0000 {
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302d0000 0x10000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
@@ -452,7 +452,7 @@
clock-names = "ipg", "per";
};
- gpt2: gpt@302e0000 {
+ gpt2: timer@302e0000 {
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302e0000 0x10000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
@@ -462,7 +462,7 @@
status = "disabled";
};
- gpt3: gpt@302f0000 {
+ gpt3: timer@302f0000 {
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302f0000 0x10000>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
@@ -472,7 +472,7 @@
status = "disabled";
};
- gpt4: gpt@30300000 {
+ gpt4: timer@30300000 {
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x30300000 0x10000>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
@@ -482,7 +482,7 @@
status = "disabled";
};
- kpp: kpp@30320000 {
+ kpp: keypad@30320000 {
compatible = "fsl,imx7d-kpp", "fsl,imx21-kpp";
reg = <0x30320000 0x10000>;
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
@@ -490,7 +490,7 @@
status = "disabled";
};
- iomuxc: iomuxc@30330000 {
+ iomuxc: pinctrl@30330000 {
compatible = "fsl,imx7d-iomuxc";
reg = <0x30330000 0x10000>;
};
@@ -606,13 +606,15 @@
compatible = "fsl,sec-v4.0-pwrkey";
regmap = <&snvs>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_SNVS_CLK>;
+ clock-names = "snvs-pwrkey";
linux,keycode = <KEY_POWER>;
wakeup-source;
status = "disabled";
};
};
- clks: ccm@30380000 {
+ clks: clock-controller@30380000 {
compatible = "fsl,imx7d-ccm";
reg = <0x30380000 0x10000>;
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
@@ -663,7 +665,7 @@
};
};
- aips2: aips-bus@30400000 {
+ aips2: bus@30400000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -803,7 +805,7 @@
};
};
- aips3: aips-bus@30800000 {
+ aips3: bus@30800000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -932,7 +934,7 @@
};
};
- crypto: caam@30900000 {
+ crypto: crypto@30900000 {
compatible = "fsl,sec-v4.0";
#address-cells = <1>;
#size-cells = <1>;
@@ -943,19 +945,19 @@
<&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
clock-names = "ipg", "aclk";
- sec_jr0: jr0@1000 {
+ sec_jr0: jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr1: jr1@2000 {
+ sec_jr1: jr@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr2: jr1@3000 {
+ sec_jr2: jr@3000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x3000 0x1000>;
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/imx7ulp-evk.dts b/arch/arm/boot/dts/imx7ulp-evk.dts
index a863a2b337d6..eff51e113db4 100644
--- a/arch/arm/boot/dts/imx7ulp-evk.dts
+++ b/arch/arm/boot/dts/imx7ulp-evk.dts
@@ -72,7 +72,7 @@
srp-disable;
hnp-disable;
adp-disable;
- over-current-active-low;
+ disable-over-current;
status = "okay";
};
@@ -110,7 +110,6 @@
pinctrl_usbotg1_id: otg1idgrp {
fsl,pins = <
IMX7ULP_PAD_PTC13__USB0_ID 0x10003
- IMX7ULP_PAD_PTC16__USB1_OC2 0x10003
>;
};
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index ab91c98f2124..f7c4878534c8 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -132,13 +132,13 @@
<&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>;
clock-names = "aclk", "ipg";
- sec_jr0: jr0@1000 {
+ sec_jr0: jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr1: jr1@2000 {
+ sec_jr1: jr@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index 94d2ff9836d0..198d66181c50 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -9,7 +9,6 @@
/ {
model = "ARM Integrator/AP";
compatible = "arm,integrator-ap";
- dma-ranges = <0x80000000 0x0 0x80000000>;
cpus {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index 51e1305c6471..2093b38d6e6d 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -148,7 +148,7 @@
reg = <0 0x10005000 0 0x1000>;
};
- scpsys: scpsys@10006000 {
+ scpsys: power-controller@10006000 {
compatible = "mediatek,mt2701-scpsys", "syscon";
#power-domain-cells = <1>;
reg = <0 0x10006000 0 0x1000>;
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
index a79f0b6c3429..f76b4a3c34b9 100644
--- a/arch/arm/boot/dts/mt7623.dtsi
+++ b/arch/arm/boot/dts/mt7623.dtsi
@@ -268,7 +268,7 @@
reg = <0 0x10005000 0 0x1000>;
};
- scpsys: scpsys@10006000 {
+ scpsys: power-controller@10006000 {
compatible = "mediatek,mt7623-scpsys",
"mediatek,mt2701-scpsys",
"syscon";
diff --git a/arch/arm/boot/dts/mt7629.dtsi b/arch/arm/boot/dts/mt7629.dtsi
index 867b88103b9d..5cbb3d244c75 100644
--- a/arch/arm/boot/dts/mt7629.dtsi
+++ b/arch/arm/boot/dts/mt7629.dtsi
@@ -90,7 +90,7 @@
#clock-cells = <1>;
};
- scpsys: scpsys@10006000 {
+ scpsys: power-controller@10006000 {
compatible = "mediatek,mt7629-scpsys",
"mediatek,mt7622-scpsys";
#power-domain-cells = <1>;
@@ -241,6 +241,20 @@
status = "disabled";
};
+ pwm: pwm@11006000 {
+ compatible = "mediatek,mt7629-pwm";
+ reg = <0x11006000 0x1000>;
+ #pwm-cells = <2>;
+ clocks = <&topckgen CLK_TOP_PWM_SEL>,
+ <&pericfg CLK_PERI_PWM_PD>,
+ <&pericfg CLK_PERI_PWM1_PD>;
+ clock-names = "top", "main", "pwm1";
+ assigned-clocks = <&topckgen CLK_TOP_PWM_SEL>;
+ assigned-clock-parents =
+ <&topckgen CLK_TOP_UNIVPLL2_D4>;
+ status = "disabled";
+ };
+
i2c: i2c@11007000 {
compatible = "mediatek,mt7629-i2c",
"mediatek,mt2712-i2c";
diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi
index 459fd7027591..ef59e4e97d7c 100644
--- a/arch/arm/boot/dts/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/omap4-l4.dtsi
@@ -1529,6 +1529,7 @@
};
};
+ /* Unused DSS L4 access, see L3 instead */
target-module@40000 { /* 0x48040000, ap 13 0a.0 */
compatible = "ti,sysc";
status = "disabled";
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 9a87440d0b9d..763bdea8c829 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -108,7 +108,6 @@
dsp {
compatible = "ti,omap3-c64";
- ti,hwmods = "dsp";
};
iva {
@@ -415,87 +414,213 @@
*/
};
- dss: dss@58000000 {
- compatible = "ti,omap4-dss";
- reg = <0x58000000 0x80>;
- status = "disabled";
- ti,hwmods = "dss_core";
- clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ /*
+ * DSS is only using l3 mapping without l4 as noted in the TRM
+ * "10.1.3 DSS Register Manual" for omap4460.
+ */
+ target-module@58000000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x58000000 4>,
+ <0x58000014 4>;
+ reg-names = "rev", "syss";
+ ti,syss-mask = <1>;
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 0>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 9>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 11>;
+ clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk";
#address-cells = <1>;
#size-cells = <1>;
- ranges;
-
- dispc@58001000 {
- compatible = "ti,omap4-dispc";
- reg = <0x58001000 0x1000>;
- interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "dss_dispc";
- clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
- };
-
- rfbi: encoder@58002000 {
- compatible = "ti,omap4-rfbi";
- reg = <0x58002000 0x1000>;
- status = "disabled";
- ti,hwmods = "dss_rfbi";
- clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>, <&l3_div_ck>;
- clock-names = "fck", "ick";
- };
+ ranges = <0 0x58000000 0x1000000>;
- venc: encoder@58003000 {
- compatible = "ti,omap4-venc";
- reg = <0x58003000 0x1000>;
+ dss: dss@0 {
+ compatible = "ti,omap4-dss";
+ reg = <0 0x80>;
status = "disabled";
- ti,hwmods = "dss_venc";
- clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 11>;
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>;
clock-names = "fck";
- };
-
- dsi1: encoder@58004000 {
- compatible = "ti,omap4-dsi";
- reg = <0x58004000 0x200>,
- <0x58004200 0x40>,
- <0x58004300 0x20>;
- reg-names = "proto", "phy", "pll";
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- ti,hwmods = "dss_dsi1";
- clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>,
- <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
- clock-names = "fck", "sys_clk";
- };
-
- dsi2: encoder@58005000 {
- compatible = "ti,omap4-dsi";
- reg = <0x58005000 0x200>,
- <0x58005200 0x40>,
- <0x58005300 0x20>;
- reg-names = "proto", "phy", "pll";
- interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- ti,hwmods = "dss_dsi2";
- clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>,
- <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
- clock-names = "fck", "sys_clk";
- };
-
- hdmi: encoder@58006000 {
- compatible = "ti,omap4-hdmi";
- reg = <0x58006000 0x200>,
- <0x58006200 0x100>,
- <0x58006300 0x100>,
- <0x58006400 0x1000>;
- reg-names = "wp", "pll", "phy", "core";
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- ti,hwmods = "dss_hdmi";
- clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 9>,
- <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
- clock-names = "fck", "sys_clk";
- dmas = <&sdma 76>;
- dma-names = "audio_tx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x1000000>;
+
+ target-module@1000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x1000 0x4>,
+ <0x1010 0x4>,
+ <0x1014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x1000 0x1000>;
+
+ dispc@0 {
+ compatible = "ti,omap4-dispc";
+ reg = <0 0x1000>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck";
+ };
+ };
+
+ target-module@2000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x2000 0x4>,
+ <0x2010 0x4>,
+ <0x2014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x2000 0x1000>;
+
+ rfbi: encoder@0 {
+ reg = <0 0x1000>;
+ status = "disabled";
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>, <&l3_div_ck>;
+ clock-names = "fck", "ick";
+ };
+ };
+
+ target-module@3000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x3000 0x4>;
+ reg-names = "rev";
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
+ clock-names = "sys_clk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x3000 0x1000>;
+
+ venc: encoder@0 {
+ compatible = "ti,omap4-venc";
+ reg = <0 0x1000>;
+ status = "disabled";
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 11>;
+ clock-names = "fck";
+ };
+ };
+
+ target-module@4000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x4000 0x4>,
+ <0x4010 0x4>,
+ <0x4014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x4000 0x1000>;
+
+ dsi1: encoder@0 {
+ compatible = "ti,omap4-dsi";
+ reg = <0 0x200>,
+ <0x200 0x40>,
+ <0x300 0x20>;
+ reg-names = "proto", "phy", "pll";
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
+ };
+ };
+
+ target-module@5000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x5000 0x4>,
+ <0x5010 0x4>,
+ <0x5014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x5000 0x1000>;
+
+ dsi2: encoder@0 {
+ compatible = "ti,omap4-dsi";
+ reg = <0 0x200>,
+ <0x200 0x40>,
+ <0x300 0x20>;
+ reg-names = "proto", "phy", "pll";
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
+ };
+ };
+
+ target-module@6000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x6000 0x4>,
+ <0x6010 0x4>;
+ reg-names = "rev", "sysc";
+ /*
+ * Has SYSC_IDLE_SMART and SYSC_IDLE_SMART_WKUP
+ * but HDMI audio will fail with them.
+ */
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>;
+ ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>;
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 9>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck", "dss_clk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x6000 0x2000>;
+
+ hdmi: encoder@0 {
+ compatible = "ti,omap4-hdmi";
+ reg = <0 0x200>,
+ <0x200 0x100>,
+ <0x300 0x100>,
+ <0x400 0x1000>;
+ reg-names = "wp", "pll", "phy", "core";
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ clocks = <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 9>,
+ <&l3_dss_clkctrl OMAP4_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
+ dmas = <&sdma 76>;
+ dma-names = "audio_tx";
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/omap5-l4-abe.dtsi b/arch/arm/boot/dts/omap5-l4-abe.dtsi
index 4ec7909df78b..bafd6adf9f45 100644
--- a/arch/arm/boot/dts/omap5-l4-abe.dtsi
+++ b/arch/arm/boot/dts/omap5-l4-abe.dtsi
@@ -426,8 +426,20 @@
};
target-module@f1000 { /* 0x401f1000, ap 32 20.0 */
- compatible = "ti,sysc";
- status = "disabled";
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0xf1000 0x4>,
+ <0xf1010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_AESS_CLKCTRL 0>;
+ clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0xf1000 0x1000>,
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index a7562d3deb1a..2ac7f021c284 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -293,78 +293,185 @@
*/
};
- dss: dss@58000000 {
- compatible = "ti,omap5-dss";
- reg = <0x58000000 0x80>;
- status = "disabled";
- ti,hwmods = "dss_core";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ target-module@58000000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x58000000 4>,
+ <0x58000014 4>;
+ reg-names = "rev", "syss";
+ ti,syss-mask = <1>;
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 0>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 11>;
+ clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk";
#address-cells = <1>;
#size-cells = <1>;
- ranges;
+ ranges = <0 0x58000000 0x1000000>;
- dispc@58001000 {
- compatible = "ti,omap5-dispc";
- reg = <0x58001000 0x1000>;
- interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "dss_dispc";
+ dss: dss@0 {
+ compatible = "ti,omap5-dss";
+ reg = <0 0x80>;
+ status = "disabled";
clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
clock-names = "fck";
- };
-
- rfbi: encoder@58002000 {
- compatible = "ti,omap5-rfbi";
- reg = <0x58002000 0x100>;
- status = "disabled";
- ti,hwmods = "dss_rfbi";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>, <&l3_iclk_div>;
- clock-names = "fck", "ick";
- };
-
- dsi1: encoder@58004000 {
- compatible = "ti,omap5-dsi";
- reg = <0x58004000 0x200>,
- <0x58004200 0x40>,
- <0x58004300 0x40>;
- reg-names = "proto", "phy", "pll";
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- ti,hwmods = "dss_dsi1";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
- <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
- clock-names = "fck", "sys_clk";
- };
-
- dsi2: encoder@58005000 {
- compatible = "ti,omap5-dsi";
- reg = <0x58009000 0x200>,
- <0x58009200 0x40>,
- <0x58009300 0x40>;
- reg-names = "proto", "phy", "pll";
- interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- ti,hwmods = "dss_dsi2";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
- <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
- clock-names = "fck", "sys_clk";
- };
-
- hdmi: encoder@58060000 {
- compatible = "ti,omap5-hdmi";
- reg = <0x58040000 0x200>,
- <0x58040200 0x80>,
- <0x58040300 0x80>,
- <0x58060000 0x19000>;
- reg-names = "wp", "pll", "phy", "core";
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- ti,hwmods = "dss_hdmi";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 9>,
- <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
- clock-names = "fck", "sys_clk";
- dmas = <&sdma 76>;
- dma-names = "audio_tx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x1000000>;
+
+ target-module@1000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x1000 0x4>,
+ <0x1010 0x4>,
+ <0x1014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x1000 0x1000>;
+
+ dispc@0 {
+ compatible = "ti,omap5-dispc";
+ reg = <0 0x1000>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck";
+ };
+ };
+
+ target-module@2000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x2000 0x4>,
+ <0x2010 0x4>,
+ <0x2014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x2000 0x1000>;
+
+ rfbi: encoder@0 {
+ compatible = "ti,omap5-rfbi";
+ reg = <0 0x100>;
+ status = "disabled";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>, <&l3_iclk_div>;
+ clock-names = "fck", "ick";
+ };
+ };
+
+ target-module@5000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x5000 0x4>,
+ <0x5010 0x4>,
+ <0x5014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x5000 0x1000>;
+
+ dsi1: encoder@0 {
+ compatible = "ti,omap5-dsi";
+ reg = <0 0x200>,
+ <0x200 0x40>,
+ <0x300 0x40>;
+ reg-names = "proto", "phy", "pll";
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck";
+ };
+ };
+
+ target-module@9000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x9000 0x4>,
+ <0x9010 0x4>,
+ <0x9014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,syss-mask = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x9000 0x1000>;
+
+ dsi2: encoder@0 {
+ compatible = "ti,omap5-dsi";
+ reg = <0 0x200>,
+ <0x200 0x40>,
+ <0x300 0x40>;
+ reg-names = "proto", "phy", "pll";
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck";
+ };
+ };
+
+ target-module@40000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x40000 0x4>,
+ <0x40010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>;
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
+ clock-names = "fck", "dss_clk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x40000 0x40000>;
+
+ hdmi: encoder@0 {
+ compatible = "ti,omap5-hdmi";
+ reg = <0 0x200>,
+ <0x200 0x80>,
+ <0x300 0x80>,
+ <0x20000 0x19000>;
+ reg-names = "wp", "pll", "phy", "core";
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
+ dmas = <&sdma 76>;
+ dma-names = "audio_tx";
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 8b79b4112ee1..2687c4e890ba 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -350,6 +350,7 @@
reg = <0x800000 0x4000>;
gpio-controller;
+ gpio-ranges = <&tlmm_pinmux 0 0 90>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index fa1852eed37b..bf6a03506b45 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -403,6 +403,7 @@
compatible = "qcom,apq8084-pinctrl";
reg = <0xfd510000 0x4000>;
gpio-controller;
+ gpio-ranges = <&tlmm 0 0 147>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 71bb25a8afc0..bfa9ce4c6e69 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -201,6 +201,7 @@
compatible = "qcom,ipq4019-pinctrl";
reg = <0x01000000 0x300000>;
gpio-controller;
+ gpio-ranges = <&tlmm 0 0 100>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 16c0da97932c..4021f661cd11 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -119,6 +119,7 @@
reg = <0x800000 0x4000>;
gpio-controller;
+ gpio-ranges = <&qcom_pinmux 0 0 69>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 356e9535f7a6..347b4f7d7889 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -128,6 +128,7 @@
msmgpio: pinctrl@800000 {
compatible = "qcom,mdm9615-pinctrl";
gpio-controller;
+ gpio-ranges = <&msmgpio 0 0 88>;
#gpio-cells = <2>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index ec5cbc468bd3..480fc08cbe8e 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -115,6 +115,7 @@
reg = <0x800000 0x4000>;
gpio-controller;
+ gpio-ranges = <&tlmm 0 0 173>;
#gpio-cells = <2>;
interrupts = <0 16 0x4>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
index f2aeaccdc1ad..172ea3c70eac 100644
--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
@@ -107,6 +107,7 @@
msmgpio: pinctrl@800000 {
compatible = "qcom,msm8960-pinctrl";
gpio-controller;
+ gpio-ranges = <&msmgpio 0 0 152>;
#gpio-cells = <2>;
interrupts = <0 16 0x4>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
index 3487daf98e81..32b474bfeec3 100644
--- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
@@ -12,6 +12,7 @@
aliases {
serial0 = &blsp1_uart1;
+ serial1 = &blsp2_uart10;
};
chosen {
@@ -355,6 +356,57 @@
bias-disable;
};
};
+
+ bt_pin: bt {
+ hostwake {
+ pins = "gpio42";
+ function = "gpio";
+ };
+
+ devwake {
+ pins = "gpio62";
+ function = "gpio";
+ };
+
+ shutdown {
+ pins = "gpio41";
+ function = "gpio";
+ };
+ };
+
+ blsp2_uart10_pin_a: blsp2-uart10-pin-active {
+ tx {
+ pins = "gpio53";
+ function = "blsp_uart10";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx {
+ pins = "gpio54";
+ function = "blsp_uart10";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ cts {
+ pins = "gpio55";
+ function = "blsp_uart10";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ rts {
+ pins = "gpio56";
+ function = "blsp_uart10";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
};
sdhci@f9824900 {
@@ -418,6 +470,25 @@
};
};
+ serial@f9960000 {
+ status = "ok";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&blsp2_uart10_pin_a>;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ max-speed = <3000000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_pin>;
+
+ host-wakeup-gpios = <&msmgpio 42 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&msmgpio 62 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&msmgpio 41 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
i2c@f9967000 {
status = "ok";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
index 450b8321e0a6..611bae9fe66b 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
@@ -260,6 +260,31 @@
};
&soc {
+ usb@f9a55000 {
+ status = "ok";
+
+ phys = <&usb_hs1_phy>;
+ phy-select = <&tcsr 0xb000 0>;
+ extcon = <&smbb>, <&usb_id>;
+ vbus-supply = <&chg_otg>;
+
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+
+ ulpi {
+ phy@a {
+ status = "ok";
+
+ v1p8-supply = <&pm8941_l6>;
+ v3p3-supply = <&pm8941_l24>;
+
+ extcon = <&smbb>;
+ qcom,init-seq = /bits/ 8 <0x1 0x64>;
+ };
+ };
+ };
+
sdhci@f9824900 {
status = "ok";
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 4b161b809dd5..2ea2308d91b3 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -715,6 +715,15 @@
status = "disabled";
};
+ blsp2_uart10: serial@f9960000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0xf9960000 0x1000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART4_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
sdhci@f9824900 {
compatible = "qcom,msm8974-sdhci", "qcom,sdhci-msm-v4";
reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
@@ -925,6 +934,7 @@
compatible = "qcom,msm8974-pinctrl";
reg = <0xfd510000 0x4000>;
gpio-controller;
+ gpio-ranges = <&msmgpio 0 0 146>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/r7s72100-gr-peach.dts b/arch/arm/boot/dts/r7s72100-gr-peach.dts
index fe1a4aa4d7cb..2562cc9b5356 100644
--- a/arch/arm/boot/dts/r7s72100-gr-peach.dts
+++ b/arch/arm/boot/dts/r7s72100-gr-peach.dts
@@ -41,6 +41,9 @@
bank-width = <4>;
device-width = <1>;
+ clocks = <&mstp9_clks R7S72100_CLK_SPIBSC0>;
+ power-domains = <&cpg_clocks>;
+
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index 75b2796ebfca..0a567d8ebc66 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -467,11 +467,12 @@
#clock-cells = <1>;
compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0xfcfe0438 4>;
- clocks = <&p0_clk>, <&p0_clk>, <&p0_clk>, <&p0_clk>;
+ clocks = <&p0_clk>, <&p0_clk>, <&p0_clk>, <&p0_clk>, <&b_clk>, <&b_clk>;
clock-indices = <
R7S72100_CLK_I2C0 R7S72100_CLK_I2C1 R7S72100_CLK_I2C2 R7S72100_CLK_I2C3
+ R7S72100_CLK_SPIBSC0 R7S72100_CLK_SPIBSC1
>;
- clock-output-names = "i2c0", "i2c1", "i2c2", "i2c3";
+ clock-output-names = "i2c0", "i2c1", "i2c2", "i2c3", "spibsc0", "spibsc1";
};
mstp10_clks: mstp10_clks@fcfe043c {
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
index a5351ddbf506..b088e8e351d5 100644
--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
@@ -157,11 +157,8 @@
&cpu0 {
cpu0-supply = <&vdd_dvfs>;
- operating-points = <
- /* kHz uV */
- 1950000 1115000
- 1462500 995000
- >;
+ operating-points = <1950000 1115000>, /* kHz uV */
+ <1462500 995000>;
voltage-tolerance = <1>; /* 1% */
};
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
index 1cd19a569bd0..e8b340bb99bc 100644
--- a/arch/arm/boot/dts/r8a7743.dtsi
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -1669,9 +1669,10 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi
index 1c82dd0abd76..def840b8b2d3 100644
--- a/arch/arm/boot/dts/r8a7744.dtsi
+++ b/arch/arm/boot/dts/r8a7744.dtsi
@@ -1655,9 +1655,10 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm-dbhd-ca.dts b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm-dbhd-ca.dts
index 2aeebfc9e4f1..92aa26ba423c 100644
--- a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm-dbhd-ca.dts
+++ b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm-dbhd-ca.dts
@@ -108,6 +108,12 @@
};
};
+&lcd_panel {
+ status = "disabled";
+
+ /delete-node/ port;
+};
+
&pfc {
can1_pins: can1 {
groups = "can1_data_b";
diff --git a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
index 58d369ad8279..b15b1b088a32 100644
--- a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
+++ b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
@@ -30,6 +30,7 @@
/dts-v1/;
#include "r8a7745-iwg22m.dtsi"
+#include <dt-bindings/pwm/pwm.h>
/ {
model = "iWave Systems RainboW-G22D-SODIMM board based on RZ/G1E";
@@ -78,6 +79,49 @@
gpios-states = <1>;
states = <3300000 1>, <1800000 0>;
};
+
+ vccq_panel: regulator-vccq-panel {
+ compatible = "regulator-fixed";
+ regulator-name = "Panel VccQ";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 13 GPIO_ACTIVE_LOW>;
+ enable-active-high;
+ };
+
+ backlight_lcd: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&tpu 3 5000000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ };
+
+ lcd_panel: lcd {
+ compatible = "edt,etm043080dh6gp";
+ power-supply = <&vccq_panel>;
+ backlight = <&backlight_lcd>;
+
+ port {
+ lcd_in: endpoint {
+ remote-endpoint = <&du_out_rgb0>;
+ };
+ };
+ };
+};
+
+&du {
+ pinctrl-0 = <&du0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ ports {
+ port@0 {
+ endpoint {
+ remote-endpoint = <&lcd_in>;
+ };
+ };
+ };
};
&avb {
@@ -127,6 +171,39 @@
status = "okay";
clock-frequency = <400000>;
+ stmpe811@44 {
+ compatible = "st,stmpe811";
+ reg = <0x44>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+
+ /* 3.25 MHz ADC clock speed */
+ st,adc-freq = <1>;
+ /* ADC converstion time: 80 clocks */
+ st,sample-time = <4>;
+ /* 12-bit ADC */
+ st,mod-12b = <1>;
+ /* internal ADC reference */
+ st,ref-sel = <0>;
+
+ stmpe_touchscreen {
+ compatible = "st,stmpe-ts";
+ /* 8 sample average control */
+ st,ave-ctrl = <3>;
+ /* 7 length fractional part in z */
+ st,fraction-z = <7>;
+ /*
+ * 50 mA typical 80 mA max touchscreen drivers
+ * current limit value
+ */
+ st,i-drive = <1>;
+ /* 1 ms panel driver settling time */
+ st,settling = <3>;
+ /* 5 ms touch detect interrupt delay */
+ st,touch-det-delay = <5>;
+ };
+ };
+
sgtl5000: codec@a {
compatible = "fsl,sgtl5000";
#sound-dai-cells = <0>;
@@ -149,11 +226,21 @@
function = "avb";
};
+ backlight_pins: backlight {
+ groups = "tpu_to3_c";
+ function = "tpu";
+ };
+
can0_pins: can0 {
groups = "can0_data";
function = "can0";
};
+ du0_pins: du0 {
+ groups = "du0_rgb666", "du0_sync", "du0_disp", "du0_clk0_out";
+ function = "du0";
+ };
+
hscif1_pins: hscif1 {
groups = "hscif1_data", "hscif1_ctrl";
function = "hscif1";
@@ -229,6 +316,12 @@
shared-pin;
};
+&tpu {
+ pinctrl-0 = <&backlight_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
&usbphy {
status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi
index 3f88a7e34af2..7ab58d8bb740 100644
--- a/arch/arm/boot/dts/r8a7745.dtsi
+++ b/arch/arm/boot/dts/r8a7745.dtsi
@@ -1506,11 +1506,12 @@
du: display@feb00000 {
compatible = "renesas,du-r8a7745";
reg = <0 0xfeb00000 0 0x40000>;
- reg-names = "du";
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/r8a77470.dtsi b/arch/arm/boot/dts/r8a77470.dtsi
index 6efcef1670e1..f55153192276 100644
--- a/arch/arm/boot/dts/r8a77470.dtsi
+++ b/arch/arm/boot/dts/r8a77470.dtsi
@@ -942,9 +942,10 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 097fd9317c6e..69745def44d4 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -674,6 +674,7 @@
interrupt-parent = <&irqc0>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
+ reset-gpios = <&gpio5 31 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/r8a7790-stout.dts b/arch/arm/boot/dts/r8a7790-stout.dts
index a315ba749aa4..4138efb2766d 100644
--- a/arch/arm/boot/dts/r8a7790-stout.dts
+++ b/arch/arm/boot/dts/r8a7790-stout.dts
@@ -203,6 +203,7 @@
interrupt-parent = <&irqc0>;
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
+ reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 334ba19769b9..e5ef9fd4284a 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1719,6 +1719,8 @@
clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>,
<&cpg CPG_MOD 722>;
clock-names = "du.0", "du.1", "du.2";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 2b096d5e06fb..687167b70cb6 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -633,6 +633,7 @@
interrupt-parent = <&irqc0>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
+ reset-gpios = <&gpio5 22 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index f9ece7ab2010..a8e0335148a5 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -307,6 +307,7 @@
interrupt-parent = <&irqc0>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
+ reset-gpios = <&gpio5 22 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 59a55e87fcc6..6e5bd86731cd 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1681,9 +1681,10 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 39af16caa2ae..4627eefa502b 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -852,9 +852,10 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 22ca7cd1e7d2..cfe06a74ce89 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -591,6 +591,7 @@
interrupt-parent = <&irqc0>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
+ reset-gpios = <&gpio5 22 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index eef035c4d983..dadbda16161b 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -1341,9 +1341,10 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index f79fce74cd9c..935935c1dbac 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -343,6 +343,7 @@
interrupt-parent = <&irqc0>;
interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
+ reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index 2c16ad854300..9aaa96ea9943 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -394,6 +394,7 @@
interrupt-parent = <&irqc0>;
interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
+ reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 05ef79c6ed7f..2c9e7a1ebfec 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -1356,6 +1356,8 @@
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
+ resets = <&cpg 724>;
+ reset-names = "du.0";
status = "disabled";
ports {
diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts
index fb3cf005cc90..2ff9f152d29b 100644
--- a/arch/arm/boot/dts/rk3036-kylin.dts
+++ b/arch/arm/boot/dts/rk3036-kylin.dts
@@ -319,7 +319,7 @@
bus-width = <4>;
cap-sd-highspeed;
cap-sdio-irq;
- default-sample-phase = <90>;
+ rockchip,default-sample-phase = <90>;
keep-power-in-suspend;
mmc-pwrseq = <&sdio_pwrseq>;
non-removable;
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index cf36e25195b4..781ac7583522 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -54,7 +54,7 @@
};
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -101,7 +101,7 @@
#clock-cells = <0>;
};
- bus_intmem@10080000 {
+ bus_intmem: sram@10080000 {
compatible = "mmio-sram";
reg = <0x10080000 0x2000>;
#address-cells = <1>;
@@ -263,7 +263,7 @@
clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
<&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
- default-sample-phase = <158>;
+ rockchip,default-sample-phase = <158>;
disable-wp;
dmas = <&pdma 12>;
dma-names = "rx-tx";
@@ -281,8 +281,6 @@
compatible = "rockchip,rk3036-i2s", "rockchip,rk3066-i2s";
reg = <0x10220000 0x4000>;
interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
clock-names = "i2s_clk", "i2s_hclk";
clocks = <&cru SCLK_I2S>, <&cru HCLK_I2S>;
dmas = <&pdma 0>, <&pdma 1>;
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index 3d1b02f45ffd..b599394d149d 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -156,14 +156,12 @@
compatible = "rockchip,rk3066-i2s";
reg = <0x10118000 0x2000>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2s0_bus>;
+ clocks = <&cru SCLK_I2S0>, <&cru HCLK_I2S0>;
+ clock-names = "i2s_clk", "i2s_hclk";
dmas = <&dmac1_s 4>, <&dmac1_s 5>;
dma-names = "tx", "rx";
- clock-names = "i2s_hclk", "i2s_clk";
- clocks = <&cru HCLK_I2S0>, <&cru SCLK_I2S0>;
rockchip,playback-channels = <8>;
rockchip,capture-channels = <2>;
#sound-dai-cells = <0>;
@@ -174,14 +172,12 @@
compatible = "rockchip,rk3066-i2s";
reg = <0x1011a000 0x2000>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2s1_bus>;
+ clocks = <&cru SCLK_I2S1>, <&cru HCLK_I2S1>;
+ clock-names = "i2s_clk", "i2s_hclk";
dmas = <&dmac1_s 6>, <&dmac1_s 7>;
dma-names = "tx", "rx";
- clock-names = "i2s_hclk", "i2s_clk";
- clocks = <&cru HCLK_I2S1>, <&cru SCLK_I2S1>;
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
#sound-dai-cells = <0>;
@@ -192,14 +188,12 @@
compatible = "rockchip,rk3066-i2s";
reg = <0x1011c000 0x2000>;
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2s2_bus>;
+ clocks = <&cru SCLK_I2S2>, <&cru HCLK_I2S2>;
+ clock-names = "i2s_clk", "i2s_hclk";
dmas = <&dmac1_s 9>, <&dmac1_s 10>;
dma-names = "tx", "rx";
- clock-names = "i2s_hclk", "i2s_clk";
- clocks = <&cru HCLK_I2S2>, <&cru SCLK_I2S2>;
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
#sound-dai-cells = <0>;
diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
index ad1afd403052..66a0ff196eb1 100644
--- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts
+++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts
@@ -58,20 +58,25 @@
lvds-encoder {
compatible = "ti,sn75lvds83", "lvds-encoder";
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- lvds_in_vop0: endpoint {
- remote-endpoint = <&vop0_out_lvds>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ lvds_in_vop0: endpoint {
+ remote-endpoint = <&vop0_out_lvds>;
+ };
};
- };
- port@1 {
- reg = <1>;
- lvds_out_panel: endpoint {
- remote-endpoint = <&panel_in_lvds>;
+ port@1 {
+ reg = <1>;
+
+ lvds_out_panel: endpoint {
+ remote-endpoint = <&panel_in_lvds>;
+ };
};
};
};
@@ -465,7 +470,7 @@
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>;
- vmmcq-supply = <&vccio_wl>;
+ vqmmc-supply = <&vccio_wl>;
#address-cells = <1>;
#size-cells = <0>;
status = "okay";
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 10ede65d90f3..2298a8d840ba 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -166,14 +166,12 @@
compatible = "rockchip,rk3188-i2s", "rockchip,rk3066-i2s";
reg = <0x1011a000 0x2000>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2s0_bus>;
+ clocks = <&cru SCLK_I2S0>, <&cru HCLK_I2S0>;
+ clock-names = "i2s_clk", "i2s_hclk";
dmas = <&dmac1_s 6>, <&dmac1_s 7>;
dma-names = "tx", "rx";
- clock-names = "i2s_hclk", "i2s_clk";
- clocks = <&cru HCLK_I2S0>, <&cru SCLK_I2S0>;
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
#sound-dai-cells = <0>;
@@ -184,8 +182,8 @@
compatible = "rockchip,rk3188-spdif", "rockchip,rk3066-spdif";
reg = <0x1011e000 0x2000>;
#sound-dai-cells = <0>;
- clock-names = "hclk", "mclk";
- clocks = <&cru HCLK_SPDIF>, <&cru SCLK_SPDIF>;
+ clocks = <&cru SCLK_SPDIF>, <&cru HCLK_SPDIF>;
+ clock-names = "mclk", "hclk";
dmas = <&dmac1_s 8>;
dma-names = "tx";
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 4e90efdc9630..06172ebbf0ce 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -95,7 +95,7 @@
};
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -152,8 +152,6 @@
compatible = "rockchip,rk3228-i2s", "rockchip,rk3066-i2s";
reg = <0x100b0000 0x4000>;
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
clock-names = "i2s_clk", "i2s_hclk";
clocks = <&cru SCLK_I2S1>, <&cru HCLK_I2S1_8CH>;
dmas = <&pdma 14>, <&pdma 15>;
@@ -167,8 +165,6 @@
compatible = "rockchip,rk3228-i2s", "rockchip,rk3066-i2s";
reg = <0x100c0000 0x4000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
clock-names = "i2s_clk", "i2s_hclk";
clocks = <&cru SCLK_I2S0>, <&cru HCLK_I2S0_8CH>;
dmas = <&pdma 11>, <&pdma 12>;
@@ -193,8 +189,6 @@
compatible = "rockchip,rk3228-i2s", "rockchip,rk3066-i2s";
reg = <0x100e0000 0x4000>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
clock-names = "i2s_clk", "i2s_hclk";
clocks = <&cru SCLK_I2S2>, <&cru HCLK_I2S2_2CH>;
dmas = <&pdma 0>, <&pdma 1>;
@@ -698,7 +692,7 @@
<&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
bus-width = <8>;
- default-sample-phase = <158>;
+ rockchip,default-sample-phase = <158>;
fifo-depth = <0x100>;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
@@ -718,7 +712,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <280>;
g-tx-fifo-size = <256 128 128 64 32 16>;
- g-use-dma;
phys = <&u2phy0_otg>;
phy-names = "usb2-phy";
status = "disabled";
@@ -729,7 +722,6 @@
reg = <0x30080000 0x20000>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST0>, <&u2phy0>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy0_host>;
phy-names = "usb";
status = "disabled";
@@ -740,7 +732,6 @@
reg = <0x300a0000 0x20000>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST0>, <&u2phy0>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy0_host>;
phy-names = "usb";
status = "disabled";
@@ -751,7 +742,6 @@
reg = <0x300c0000 0x20000>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST1>, <&u2phy1>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy1_otg>;
phy-names = "usb";
status = "disabled";
@@ -762,7 +752,6 @@
reg = <0x300e0000 0x20000>;
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST1>, <&u2phy1>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy1_otg>;
phy-names = "usb";
status = "disabled";
@@ -775,7 +764,6 @@
clocks = <&cru HCLK_HOST2>, <&u2phy1>;
phys = <&u2phy1_host>;
phy-names = "usb";
- clock-names = "usbhost", "utmi";
status = "disabled";
};
@@ -784,7 +772,6 @@
reg = <0x30120000 0x20000>;
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST2>, <&u2phy1>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy1_host>;
phy-names = "usb";
status = "disabled";
diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts
index 80080767c365..be695b8c1f67 100644
--- a/arch/arm/boot/dts/rk3288-evb-act8846.dts
+++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts
@@ -4,6 +4,7 @@
#include "rk3288-evb.dtsi"
/ {
+ model = "Rockchip RK3288 EVB ACT8846";
compatible = "rockchip,rk3288-evb-act8846", "rockchip,rk3288";
vcc_lcd: vcc-lcd {
diff --git a/arch/arm/boot/dts/rk3288-evb-rk808.dts b/arch/arm/boot/dts/rk3288-evb-rk808.dts
index 16788209625b..42384ea4ca21 100644
--- a/arch/arm/boot/dts/rk3288-evb-rk808.dts
+++ b/arch/arm/boot/dts/rk3288-evb-rk808.dts
@@ -4,6 +4,7 @@
#include "rk3288-evb.dtsi"
/ {
+ model = "Rockchip RK3288 EVB RK808";
compatible = "rockchip,rk3288-evb-rk808", "rockchip,rk3288";
};
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload.dts b/arch/arm/boot/dts/rk3288-firefly-reload.dts
index 1574383fd2dc..8c38bda21a7c 100644
--- a/arch/arm/boot/dts/rk3288-firefly-reload.dts
+++ b/arch/arm/boot/dts/rk3288-firefly-reload.dts
@@ -234,6 +234,7 @@
};
&saradc {
+ vref-supply = <&vcc_18>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/rk3288-r89.dts b/arch/arm/boot/dts/rk3288-r89.dts
index a6ffc381abaa..a258c7ae5329 100644
--- a/arch/arm/boot/dts/rk3288-r89.dts
+++ b/arch/arm/boot/dts/rk3288-r89.dts
@@ -9,6 +9,7 @@
#include "rk3288.dtsi"
/ {
+ model = "Netxeon R89";
compatible = "netxeon,r89", "rockchip,rk3288";
memory@0 {
diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi
index 312582c1bd37..acfaa12ec239 100644
--- a/arch/arm/boot/dts/rk3288-tinker.dtsi
+++ b/arch/arm/boot/dts/rk3288-tinker.dtsi
@@ -276,6 +276,7 @@
};
vccio_sd: LDO_REG5 {
+ regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/rk3288-vyasa.dts b/arch/arm/boot/dts/rk3288-vyasa.dts
index ba06e9f97ddc..385dd59393e1 100644
--- a/arch/arm/boot/dts/rk3288-vyasa.dts
+++ b/arch/arm/boot/dts/rk3288-vyasa.dts
@@ -78,6 +78,18 @@
vin-supply = <&vcc_io>;
};
+ vcc50_hdmi: vcc50-hdmi {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc50_hdmi";
+ enable-active-high;
+ gpio = <&gpio7 RK_PB4 GPIO_ACTIVE_HIGH>; /* HDMI_EN */
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc50_hdmi_en>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vsus_5v>;
+ };
+
vusb1_5v: vusb1-5v {
compatible = "regulator-fixed";
regulator-name = "vusb1_5v";
@@ -150,7 +162,7 @@
};
&hdmi {
- ddc-i2c-bus = <&i2c2>;
+ ddc-i2c-bus = <&i2c5>;
status = "okay";
};
@@ -286,15 +298,15 @@
};
};
- vcc10_lcd: LDO_REG6 {
- regulator-name = "vcc10_lcd";
+ vdd10_lcd: LDO_REG6 {
+ regulator-name = "vdd10_lcd";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
- regulator-suspend-microvolt = <1800000>;
+ regulator-suspend-microvolt = <1000000>;
};
};
@@ -347,7 +359,7 @@
};
};
-&i2c2 {
+&i2c5 {
status = "okay";
};
@@ -446,6 +458,12 @@
};
};
+ hdmi {
+ vcc50_hdmi_en: vcc50-hdmi-en {
+ rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
pmic_int: pmic-int {
rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 9beb662166aa..0cd88774db95 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -155,7 +155,7 @@
};
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
@@ -420,6 +420,8 @@
reg-io-width = <4>;
clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac_peri 1>, <&dmac_peri 2>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&uart0_xfer>;
status = "disabled";
@@ -433,6 +435,8 @@
reg-io-width = <4>;
clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac_peri 3>, <&dmac_peri 4>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&uart1_xfer>;
status = "disabled";
@@ -459,6 +463,8 @@
reg-io-width = <4>;
clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac_peri 7>, <&dmac_peri 8>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&uart3_xfer>;
status = "disabled";
@@ -472,6 +478,8 @@
reg-io-width = <4>;
clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac_peri 9>, <&dmac_peri 10>;
+ dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&uart4_xfer>;
status = "disabled";
@@ -601,7 +609,6 @@
reg = <0x0 0xff500000 0x0 0x100>;
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_USBHOST0>;
- clock-names = "usbhost";
phys = <&usbphy1>;
phy-names = "usb";
status = "disabled";
@@ -644,7 +651,6 @@
reg = <0x0 0xff5c0000 0x0 0x100>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HSIC>;
- clock-names = "usbhost";
status = "disabled";
};
@@ -718,7 +724,7 @@
status = "disabled";
};
- bus_intmem@ff700000 {
+ bus_intmem: sram@ff700000 {
compatible = "mmio-sram";
reg = <0x0 0xff700000 0x0 0x18000>;
#address-cells = <1>;
@@ -730,7 +736,7 @@
};
};
- sram@ff720000 {
+ pmu_sram: sram@ff720000 {
compatible = "rockchip,rk3288-pmu-sram", "mmio-sram";
reg = <0x0 0xff720000 0x0 0x1000>;
};
@@ -946,8 +952,8 @@
compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
reg = <0x0 0xff8b0000 0x0 0x10000>;
#sound-dai-cells = <0>;
- clock-names = "hclk", "mclk";
- clocks = <&cru HCLK_SPDIF8CH>, <&cru SCLK_SPDIF8CH>;
+ clocks = <&cru SCLK_SPDIF8CH>, <&cru HCLK_SPDIF8CH>;
+ clock-names = "mclk", "hclk";
dmas = <&dmac_bus_s 3>;
dma-names = "tx";
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
@@ -962,12 +968,10 @@
reg = <0x0 0xff890000 0x0 0x10000>;
#sound-dai-cells = <0>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
+ clocks = <&cru SCLK_I2S0>, <&cru HCLK_I2S0>;
+ clock-names = "i2s_clk", "i2s_hclk";
dmas = <&dmac_bus_s 0>, <&dmac_bus_s 1>;
dma-names = "tx", "rx";
- clock-names = "i2s_hclk", "i2s_clk";
- clocks = <&cru HCLK_I2S0>, <&cru SCLK_I2S0>;
pinctrl-names = "default";
pinctrl-0 = <&i2s0_bus>;
rockchip,playback-channels = <8>;
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index 241f43e29c77..f9fcb7e9657b 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -32,7 +32,7 @@
spi1 = &spi1;
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -91,7 +91,7 @@
status = "disabled";
};
- L2: l2-cache-controller@10138000 {
+ L2: cache-controller@10138000 {
compatible = "arm,pl310-cache";
reg = <0x10138000 0x1000>;
cache-unified;
diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
index 1fd06e7cb983..f9cfe2c80791 100644
--- a/arch/arm/boot/dts/rv1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -85,7 +85,7 @@
#clock-cells = <0>;
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -102,7 +102,7 @@
};
};
- bus_intmem@10080000 {
+ bus_intmem: sram@10080000 {
compatible = "mmio-sram";
reg = <0x10080000 0x2000>;
#address-cells = <1>;
@@ -120,7 +120,6 @@
clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
clock-names = "baudclk", "apb_pclk";
dmas = <&pdma 6>, <&pdma 7>;
- #dma-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&uart2m0_xfer>;
status = "disabled";
@@ -136,7 +135,6 @@
clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
clock-names = "baudclk", "apb_pclk";
dmas = <&pdma 4>, <&pdma 5>;
- #dma-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&uart1_xfer>;
status = "disabled";
@@ -152,7 +150,6 @@
clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
clock-names = "baudclk", "apb_pclk";
dmas = <&pdma 2>, <&pdma 3>;
- #dma-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
status = "disabled";
@@ -208,7 +205,6 @@
clock-names = "spiclk", "apb_pclk";
dmas = <&pdma 8>, <&pdma 9>;
dma-names = "tx", "rx";
- #dma-cells = <2>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -370,7 +366,6 @@
reg = <0x1038c000 0x100>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
#io-channel-cells = <1>;
- clock-frequency = <1000000>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk";
status = "disabled";
@@ -499,7 +494,6 @@
reg = <0x30140000 0x20000>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST0>, <&u2phy>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy_host>;
phy-names = "usb";
status = "disabled";
@@ -510,7 +504,6 @@
reg = <0x30160000 0x20000>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST0>, <&u2phy>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy_host>;
phy-names = "usb";
status = "disabled";
@@ -527,7 +520,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <280>;
g-tx-fifo-size = <256 128 128 64 32 16>;
- g-use-dma;
phys = <&u2phy_otg>;
phy-names = "usb2-phy";
status = "disabled";
diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
index 326b39328b58..6763423d64b8 100644
--- a/arch/arm/boot/dts/sam9x60.dtsi
+++ b/arch/arm/boot/dts/sam9x60.dtsi
@@ -686,6 +686,14 @@
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>;
};
+
+ watchdog: watchdog@ffffff80 {
+ compatible = "microchip,sam9x60-wdt";
+ reg = <0xffffff80 0x24>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&clk32k 0>;
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 2012b7407c60..ab550d69db91 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -695,7 +695,7 @@
};
rtc: rtc@f80480b0 {
- compatible = "atmel,at91rm9200-rtc";
+ compatible = "atmel,sama5d2-rtc";
reg = <0xf80480b0 0x30>;
interrupts = <74 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k>;
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 203d40be70a5..f11b018e9173 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -159,8 +159,11 @@
dmas = <&dma0 2 AT91_DMA_CFG_PER_ID(7)>,
<&dma0 2 AT91_DMA_CFG_PER_ID(8)>;
dma-names = "tx", "rx";
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c0>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ sda-gpios = <&pioA 30 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA 31 GPIO_ACTIVE_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&twi0_clk>;
@@ -174,8 +177,11 @@
dmas = <&dma0 2 AT91_DMA_CFG_PER_ID(9)>,
<&dma0 2 AT91_DMA_CFG_PER_ID(10)>;
dma-names = "tx", "rx";
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ sda-gpios = <&pioC 26 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioC 27 GPIO_ACTIVE_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&twi1_clk>;
@@ -357,8 +363,11 @@
dmas = <&dma1 2 AT91_DMA_CFG_PER_ID(11)>,
<&dma1 2 AT91_DMA_CFG_PER_ID(12)>;
dma-names = "tx", "rx";
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ sda-gpios = <&pioA 18 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA 19 GPIO_ACTIVE_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&twi2_clk>;
@@ -639,6 +648,12 @@
<AT91_PIOA 30 AT91_PERIPH_A AT91_PINCTRL_NONE /* PA30 periph A TWD0 pin, conflicts with URXD1, ISI_VSYNC */
AT91_PIOA 31 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PA31 periph A TWCK0 pin, conflicts with UTXD1, ISI_HSYNC */
};
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ atmel,pins =
+ <AT91_PIOA 30 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
+ AT91_PIOA 31 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
};
i2c1 {
@@ -647,6 +662,12 @@
<AT91_PIOC 26 AT91_PERIPH_B AT91_PINCTRL_NONE /* PC26 periph B TWD1 pin, conflicts with SPI1_NPCS1, ISI_D11 */
AT91_PIOC 27 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC27 periph B TWCK1 pin, conflicts with SPI1_NPCS2, ISI_D10 */
};
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ atmel,pins =
+ <AT91_PIOC 26 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
+ AT91_PIOC 27 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
};
i2c2 {
@@ -655,6 +676,12 @@
<AT91_PIOA 18 AT91_PERIPH_B AT91_PINCTRL_NONE /* TWD2 pin, conflicts with LCDDAT18, ISI_D2 */
AT91_PIOA 19 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* TWCK2 pin, conflicts with LCDDAT19, ISI_D3 */
};
+
+ pinctrl_i2c2_gpio: i2c2-gpio {
+ atmel,pins =
+ <AT91_PIOA 18 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
+ AT91_PIOA 19 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
};
isi {
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 6ab27a7b388d..c9c0316b5b0e 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -458,8 +458,11 @@
(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
| AT91_XDMAC_DT_PERID(3))>;
dma-names = "tx", "rx";
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c0>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ sda-gpios = <&pioA 30 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA 31 GPIO_ACTIVE_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 32>;
@@ -477,8 +480,11 @@
(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
| AT91_XDMAC_DT_PERID(5))>;
dma-names = "tx", "rx";
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ sda-gpios = <&pioE 29 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioE 30 GPIO_ACTIVE_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 33>;
@@ -519,8 +525,11 @@
(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
| AT91_XDMAC_DT_PERID(7))>;
dma-names = "tx", "rx";
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ sda-gpios = <&pioB 29 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioB 30 GPIO_ACTIVE_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 34>;
@@ -884,7 +893,7 @@
};
rtc@fc0686b0 {
- compatible = "atmel,at91rm9200-rtc";
+ compatible = "atmel,sama5d4-rtc";
reg = <0xfc0686b0 0x30>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k>;
@@ -1122,6 +1131,12 @@
<AT91_PIOA 30 AT91_PERIPH_A AT91_PINCTRL_NONE
AT91_PIOA 31 AT91_PERIPH_A AT91_PINCTRL_NONE>;
};
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ atmel,pins =
+ <AT91_PIOA 30 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
+ AT91_PIOA 31 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
};
i2c1 {
@@ -1130,6 +1145,12 @@
<AT91_PIOE 29 AT91_PERIPH_C AT91_PINCTRL_NONE /* TWD1, conflicts with UART0 RX and DIBP */
AT91_PIOE 30 AT91_PERIPH_C AT91_PINCTRL_NONE>; /* TWCK1, conflicts with UART0 TX and DIBN */
};
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ atmel,pins =
+ <AT91_PIOE 29 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
+ AT91_PIOE 30 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
};
i2c2 {
@@ -1138,6 +1159,12 @@
<AT91_PIOB 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* TWD2, conflicts with RD0 and PWML1 */
AT91_PIOB 30 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* TWCK2, conflicts with RF0 */
};
+
+ pinctrl_i2c2_gpio: i2c2-gpio {
+ atmel,pins =
+ <AT91_PIOB 29 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
+ AT91_PIOB 30 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
};
isi {
diff --git a/arch/arm/boot/dts/sh73a0-kzm9g.dts b/arch/arm/boot/dts/sh73a0-kzm9g.dts
index 1916f31a30ff..5a8d92a061df 100644
--- a/arch/arm/boot/dts/sh73a0-kzm9g.dts
+++ b/arch/arm/boot/dts/sh73a0-kzm9g.dts
@@ -25,12 +25,9 @@
cpus {
cpu@0 {
cpu0-supply = <&vdd_dvfs>;
- operating-points = <
- /* kHz uV */
- 1196000 1315000
- 598000 1175000
- 398667 1065000
- >;
+ operating-points = <1196000 1315000>, /* kHz uV */
+ < 598000 1175000>,
+ < 398667 1065000>;
voltage-tolerance = <1>; /* 1% */
};
};
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index 2a86e72d9791..3b8571b8b412 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -431,8 +431,8 @@
snps,perfect-filter-entries = <128>;
tx-fifo-depth = <4096>;
rx-fifo-depth = <16384>;
- clocks = <&l4_mp_clk>;
- clock-names = "stmmaceth";
+ clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ clock-names = "stmmaceth", "ptp_ref";
resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>;
reset-names = "stmmaceth", "stmmaceth-ocp";
snps,axi-config = <&socfpga_axi_setup>;
@@ -451,8 +451,8 @@
snps,perfect-filter-entries = <128>;
tx-fifo-depth = <4096>;
rx-fifo-depth = <16384>;
- clocks = <&l4_mp_clk>;
- clock-names = "stmmaceth";
+ clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ clock-names = "stmmaceth", "ptp_ref";
resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>;
reset-names = "stmmaceth", "stmmaceth-ocp";
snps,axi-config = <&socfpga_axi_setup>;
@@ -471,8 +471,8 @@
snps,perfect-filter-entries = <128>;
tx-fifo-depth = <4096>;
rx-fifo-depth = <16384>;
- clocks = <&l4_mp_clk>;
- clock-names = "stmmaceth";
+ clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ clock-names = "stmmaceth", "ptp_ref";
resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>;
reset-names = "stmmaceth", "stmmaceth-ocp";
snps,axi-config = <&socfpga_axi_setup>;
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_nand.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_nand.dts
index 9bd9e04c7361..9aa897b79544 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk_nand.dts
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_nand.dts
@@ -16,11 +16,11 @@
partition@0 {
label = "Boot and fpga data";
- reg = <0x0 0x1C00000>;
+ reg = <0x0 0x02000000>;
};
partition@1c00000 {
label = "Root Filesystem - JFFS2";
- reg = <0x1C00000 0x6400000>;
+ reg = <0x02000000 0x06000000>;
};
};
};
diff --git a/arch/arm/boot/dts/ste-ab8500.dtsi b/arch/arm/boot/dts/ste-ab8500.dtsi
index 14d4d8617d75..3cd6ee6d50e0 100644
--- a/arch/arm/boot/dts/ste-ab8500.dtsi
+++ b/arch/arm/boot/dts/ste-ab8500.dtsi
@@ -314,13 +314,13 @@
mcde@a0350000 {
vana-supply = <&ab8500_ldo_ana_reg>;
- dsi@a0351000 {
+ dsi-controller@a0351000 {
vana-supply = <&ab8500_ldo_ana_reg>;
};
- dsi@a0352000 {
+ dsi-controller@a0352000 {
vana-supply = <&ab8500_ldo_ana_reg>;
};
- dsi@a0353000 {
+ dsi-controller@a0353000 {
vana-supply = <&ab8500_ldo_ana_reg>;
};
};
diff --git a/arch/arm/boot/dts/ste-ab8505.dtsi b/arch/arm/boot/dts/ste-ab8505.dtsi
index c72aa250bf6f..67bc69e67b33 100644
--- a/arch/arm/boot/dts/ste-ab8505.dtsi
+++ b/arch/arm/boot/dts/ste-ab8505.dtsi
@@ -261,13 +261,13 @@
mcde@a0350000 {
vana-supply = <&ab8500_ldo_ana_reg>;
- dsi@a0351000 {
+ dsi-controller@a0351000 {
vana-supply = <&ab8500_ldo_ana_reg>;
};
- dsi@a0352000 {
+ dsi-controller@a0352000 {
vana-supply = <&ab8500_ldo_ana_reg>;
};
- dsi@a0353000 {
+ dsi-controller@a0353000 {
vana-supply = <&ab8500_ldo_ana_reg>;
};
};
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 6671f74c9f03..3e10da3f8fd3 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -1097,7 +1097,7 @@
ranges;
status = "disabled";
- dsi0: dsi@a0351000 {
+ dsi0: dsi-controller@a0351000 {
compatible = "ste,mcde-dsi";
reg = <0xa0351000 0x1000>;
clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
@@ -1105,7 +1105,7 @@
#address-cells = <1>;
#size-cells = <0>;
};
- dsi1: dsi@a0352000 {
+ dsi1: dsi-controller@a0352000 {
compatible = "ste,mcde-dsi";
reg = <0xa0352000 0x1000>;
clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
@@ -1113,7 +1113,7 @@
#address-cells = <1>;
#size-cells = <0>;
};
- dsi2: dsi@a0353000 {
+ dsi2: dsi-controller@a0353000 {
compatible = "ste,mcde-dsi";
reg = <0xa0353000 0x1000>;
/* This DSI port only has the Low Power / Energy Save clock */
diff --git a/arch/arm/boot/dts/ste-href-stuib.dtsi b/arch/arm/boot/dts/ste-href-stuib.dtsi
index e32d0c36feb8..b8fd8f18ba16 100644
--- a/arch/arm/boot/dts/ste-href-stuib.dtsi
+++ b/arch/arm/boot/dts/ste-href-stuib.dtsi
@@ -199,7 +199,7 @@
mcde@a0350000 {
status = "okay";
- dsi@a0351000 {
+ dsi-controller@a0351000 {
panel {
compatible = "samsung,s6d16d0";
reg = <0>;
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
index e024520f4d47..de82b9db956f 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
@@ -66,7 +66,7 @@
mcde@a0350000 {
status = "okay";
- dsi@a0351000 {
+ dsi-controller@a0351000 {
panel {
compatible = "samsung,s6d16d0";
reg = <0>;
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
index cb3677f0a1cb..9f285c7cf914 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
@@ -45,7 +45,7 @@
mcde@a0350000 {
status = "okay";
- dsi@a0351000 {
+ dsi-controller@a0351000 {
panel {
compatible = "sony,acx424akp";
reg = <0>;
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
new file mode 100644
index 000000000000..292ed5286652
--- /dev/null
+++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Devicetree for the Samsung XCover 2 GT-S7710 also known as Skomer.
+ */
+
+/dts-v1/;
+#include "ste-db8500.dtsi"
+#include "ste-ab8505.dtsi"
+#include "ste-dbx5x0-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ model = "Samsung XCover 2 (GT-S7710)";
+ compatible = "samsung,skomer", "st-ericsson,u8500";
+
+ chosen {
+ stdout-path = &serial2;
+ };
+
+ /* TI TXS0206 level translator for 2.9 V */
+ sd_level_translator: regulator-gpio {
+ compatible = "regulator-fixed";
+
+ /* GPIO87 EN */
+ gpios = <&gpio2 23 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ regulator-name = "sd-level-translator";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-type = "voltage";
+
+ startup-delay-us = <200>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd_level_translator_default>;
+ };
+
+ /* External LDO MIC5366-3.3YMT for eMMC */
+ ldo_3v3_reg: regulator-gpio-ldo-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "en-3v3-fixed-supply";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio6 31 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <5000>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_ldo_en_default_mode>;
+ };
+
+ wlan_en: regulator-gpio-wlan-en {
+ compatible = "regulator-fixed";
+ regulator-name = "wl-reg-on";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ startup-delay-us = <200000>;
+ /* GPIO215 WLAN_EN */
+ gpio = <&gpio6 23 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_en_default_mode>;
+ };
+
+ vibrator {
+ compatible = "gpio-vibrator";
+ enable-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vibrator_default>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_default_mode>;
+
+ button-home {
+ linux,code = <KEY_HOME>;
+ label = "HOME";
+ /* GPIO91 */
+ gpios = <&gpio2 27 GPIO_ACTIVE_LOW>;
+ };
+ button-volup {
+ linux,code = <KEY_VOLUMEUP>;
+ label = "VOL+";
+ /* GPIO67 */
+ gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+ };
+ button-voldown {
+ linux,code = <KEY_VOLUMEDOWN>;
+ label = "VOL-";
+ /* GPIO92 */
+ gpios = <&gpio2 28 GPIO_ACTIVE_LOW>;
+ };
+ button-menu {
+ linux,code = <KEY_MENU>;
+ label = "MENU";
+ /* GPIO204 */
+ gpios = <&gpio6 12 GPIO_ACTIVE_LOW>;
+ };
+ button-back {
+ linux,code = <KEY_BACK>;
+ label = "BACK";
+ /* GPIO205 */
+ gpios = <&gpio6 13 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ /*
+ * FIXME: this is not quite GPIO backlight. This is a
+ * KTD253 one-wire GPIO-controlled backlight. It can
+ * work as a GPIO backlight.
+ */
+ gpio_bl: backlight {
+ compatible = "gpio-backlight";
+ /* GPIO 69 */
+ gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_backlight_default_mode>;
+ };
+
+ i2c-gpio-0 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio4 16 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio4 15 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_gpio_0_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* TODO: this should be used by the NCP6914 Camera power management unit */
+ };
+
+ i2c-gpio-1 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio4 24 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio4 23 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_gpio_1_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* TODO: this should be used by the ALPS HSCDTD008A compass sensor */
+ };
+
+ soc {
+ // External Micro SD slot
+ sdi0_per1@80126000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ /* All direction control is used */
+ st,sig-dir-cmd;
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-dat31;
+ st,sig-pin-fbclk;
+ full-pwr-cycle;
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+ vqmmc-supply = <&sd_level_translator>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mc0_a_1_default>;
+ pinctrl-1 = <&mc0_a_1_sleep>;
+ status = "okay";
+ };
+
+ // WLAN SDIO channel
+ sdi1_per2@80118000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ non-removable;
+ cap-sd-highspeed;
+ vmmc-supply = <&wlan_en>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mc1_a_2_default>;
+ pinctrl-1 = <&mc1_a_2_sleep>;
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ reg = <1>;
+ /* GPIO216 WL_HOST_WAKE */
+ interrupt-parent = <&gpio6>;
+ interrupts = <24 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_default_mode>;
+ };
+ };
+
+ // eMMC
+ sdi2_per3@80005000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ vmmc-supply = <&ldo_3v3_reg>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mc2_a_1_default>;
+ pinctrl-1 = <&mc2_a_1_sleep>;
+
+ status = "okay";
+ };
+
+ /* GBF (Bluetooth) UART */
+ uart@80120000 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&u0_a_1_default>;
+ pinctrl-1 = <&u0_a_1_sleep>;
+ status = "okay";
+
+ /* FIXME: not quite working yet, probably needs regulators */
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio3 1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bluetooth_default_mode>;
+ };
+ };
+
+ /* GPF UART */
+ uart@80121000 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&u1rxtx_a_1_default &u1ctsrts_a_1_default>;
+ pinctrl-1 = <&u1rxtx_a_1_sleep &u1ctsrts_a_1_sleep>;
+ };
+
+ /* Debugging console UART connected to AB8505 USB */
+ uart@80007000 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&u2rxtx_c_1_default>;
+ pinctrl-1 = <&u2rxtx_c_1_sleep>;
+ };
+
+ prcmu@80157000 {
+ ab8505 {
+ ab8500_usb {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&usb_a_1_default>;
+ pinctrl-1 = <&usb_a_1_sleep>;
+ };
+
+ ab8505-regulators {
+ ab8500_ldo_aux1 {
+ /* Used for VDD for sensors */
+ regulator-name = "AUX1";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ab8500_ldo_aux2 {
+ /* Supplies the Cypress TMA140 touchscreen only with 3.3V */
+ regulator-name = "AUX2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ab8500_ldo_aux3 {
+ /* Used for voltage for external MMC/SD card */
+ regulator-name = "AUX3";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ab8500_ldo_aux4 {
+ regulator-name = "AUX4";
+ /* Hammer to 3.0V for the display */
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ ab8500_ldo_aux5 {
+ regulator-name = "AUX5";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <2790000>;
+ regulator-always-on;
+ };
+
+ ab8500_ldo_aux6 {
+ regulator-name = "AUX6";
+ /* Hammer to 1.8V for the display */
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ab8500_ldo_aux8 {
+ /* Mostly VIO for sensors */
+ regulator-name = "AUX8";
+ };
+ };
+ };
+ };
+
+ /* I2C0 */
+ i2c@80004000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c0_a_1_default>;
+ pinctrl-1 = <&i2c0_a_1_sleep>;
+
+ proximity@44 {
+ compatible = "sharp,gp2ap002s00f";
+ clock-frequency = <400000>;
+ reg = <0x44>;
+
+ interrupt-parent = <&gpio4>;
+ interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vio-supply = <&ab8500_ldo_aux8_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gp2ap002_skomer_default>;
+ sharp,proximity-far-hysteresis = /bits/ 8 <0x2f>;
+ sharp,proximity-close-hysteresis = /bits/ 8 <0x0f>;
+ };
+ };
+
+
+ /* I2C2 */
+ i2c@80128000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c2_b_2_default>;
+ pinctrl-1 = <&i2c2_b_2_sleep>;
+
+ accel@18 {
+ compatible = "bosch,bma254";
+ clock-frequency = <400000>;
+ reg = <0x18>;
+
+ /* GPIO224 used as "smart alert" interrupt */
+ interrupt-parent = <&gpio7>;
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+
+ mount-matrix = "0", "1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vddio-supply = <&ab8500_ldo_aux8_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bma254_skomer_default>;
+ };
+ };
+
+ /* I2C3 */
+ i2c@80110000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c3_c_2_default>;
+ pinctrl-1 = <&i2c3_c_2_sleep>;
+
+ /* TODO: this should be used by the Cypress TMA140 touchscreen */
+ };
+
+ mcde@a0350000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsi_default_mode>;
+
+ dsi-controller@a0351000 {
+ panel {
+ /* NT35510-based Hydis HVA40WV1 */
+ compatible = "hydis,hva40wv1", "novatek,nt35510";
+ reg = <0>;
+ /* v_lcd_3v0 2.3-4.8V */
+ vdd-supply = <&ab8500_ldo_aux4_reg>;
+ /* v_lcd_1v8 1.65-3.3V */
+ vddi-supply = <&ab8500_ldo_aux6_reg>;
+ /* GPIO 139 */
+ reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&display_default_mode>;
+ backlight = <&gpio_bl>;
+ };
+ };
+ };
+ };
+};
+
+&pinctrl {
+ /*
+ * This extends the MC0 default config to include DAT32DIR
+ * which is used by this machine. If we don't do this the
+ * SD card does not work.
+ */
+ sdi0 {
+ mc0_a_1_default {
+ default_mux {
+ function = "mc0";
+ /* This machine uses the DAT31 pin */
+ groups = "mc0_a_1", "mc0dat31dir_a_1";
+ };
+ default_cfg5 {
+ pins = "GPIO21_AB3"; /* DAT31DIR */
+ ste,config = <&out_hi>;
+ };
+ };
+ };
+
+ mcde {
+ dsi_default_mode: dsi_default {
+ default_mux1 {
+ /* Mux in VSI0 used for DSI TE */
+ function = "lcd";
+ groups = "lcdvsi0_a_1"; /* VSI0 for LCD */
+ };
+ default_cfg1 {
+ pins = "GPIO68_E1"; /* VSI0 */
+ ste,config = <&in_nopull>;
+ };
+ };
+ };
+
+ /* Two GPIO lines used by the display */
+ display {
+ display_default_mode: display_default {
+ skomer_cfg1 {
+ /*
+ * OLED DETECT or check_pba, this appears to be high
+ * on "PBA" which I guess is "prototype board A".
+ */
+ pins = "GPIO93_B7";
+ ste,config = <&gpio_in_nopull>;
+ };
+ skomer_cfg2 {
+ pins = "GPIO139_C9";
+ /*
+ * MIPI_DSI0_RESET_N resets the display, leave high
+ * (de-asserted) so we only assert reset explicitly
+ * from the display driver.
+ */
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ backlight {
+ gpio_backlight_default_mode: backlight_default {
+ skomer_cfg1 {
+ pins = "GPIO69_E2"; /* LCD_BL_CTRL */
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+ /* GPIO that enables the 2.9V SD card level translator */
+ sd-level-translator {
+ sd_level_translator_default: sd_level_translator_default {
+ /* level shifter on GPIO87 */
+ skomer_cfg1 {
+ pins = "GPIO87_B3";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ /* GPIO that enables the LDO regulator for the eMMC */
+ emmc-ldo {
+ emmc_ldo_en_default_mode: emmc_ldo_default {
+ /* LDO enable on GPIO223 */
+ skomer_cfg1 {
+ pins = "GPIO223_AH9";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ /* GPIO keys */
+ gpio-keys {
+ gpio_keys_default_mode: gpio_keys_default {
+ skomer_cfg1 {
+ pins = "GPIO67_G2", /* VOL UP */
+ "GPIO91_B6", /* HOME */
+ "GPIO92_D6", /* VOL DOWN */
+ "GPIO204_AF23", /* MENU */
+ "GPIO205_AG23"; /* BACK */
+ ste,config = <&gpio_in_pu>;
+ };
+ };
+ };
+ /* Interrupt line for BMA254 */
+ bma254 {
+ bma254_skomer_default: bma254_skomer {
+ skomer_cfg1 {
+ pins = "GPIO224_AG9";
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ };
+ /* Interrupt line for light/proximity sensor GP2AP002 */
+ gp2ap002 {
+ gp2ap002_skomer_default: gp2ap002_skomer {
+ skomer_cfg1 {
+ pins = "GPIO146_D13";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* GPIO-based I2C bus for NCP6914 */
+ i2c-gpio-0 {
+ i2c_gpio_0_default: i2c_gpio_0 {
+ skomer_cfg1 {
+ pins = "GPIO143_D12", "GPIO144_B13";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ /* GPIO-based I2C bus for ALPS HSCD compass */
+ i2c-gpio-1 {
+ i2c_gpio_1_default: i2c_gpio_1 {
+ skomer_cfg1 {
+ pins = "GPIO151_B17", "GPIO152_D16";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ wlan {
+ wlan_default_mode: wlan_default {
+ skomer_cfg1 {
+ pins = "GPIO216_AG12";
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ wlan_en_default_mode: wlan_en_default {
+ skomer_cfg2 {
+ pins = "GPIO215_AH13";
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+ bluetooth {
+ bluetooth_default_mode: bluetooth_default {
+ skomer_cfg1 {
+ pins = "GPIO199_AH23", "GPIO222_AJ9";
+ ste,config = <&gpio_out_lo>;
+ };
+ skomer_cfg2 {
+ pins = "GPIO97_D9";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+ vibrator {
+ vibrator_default: vibrator_default {
+ skomer_cfg1 {
+ pins = "GPIO195_AG28"; /* MOT_EN */
+ ste,config = <&gpio_out_lo>;
+ };
+ };
+ };
+};
+
+&ab8505_gpio {
+ /* Hog a few default settings */
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_default>;
+
+ gpio {
+ gpio_default: gpio_default {
+ skomer_mux {
+ /* Change unused pins to GPIO mode */
+ function = "gpio";
+ groups = "gpio3_a_1", /* default: SysClkReq4 */
+ "gpio14_a_1"; /* default: PWMOut1 */
+ };
+ skomer_cfg1 {
+ pins = "GPIO11_B17", "GPIO13_D17", "GPIO50_L4";
+ bias-disable;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/stm32746g-eval.dts b/arch/arm/boot/dts/stm32746g-eval.dts
index fcc804e3c158..4ea3f98dd275 100644
--- a/arch/arm/boot/dts/stm32746g-eval.dts
+++ b/arch/arm/boot/dts/stm32746g-eval.dts
@@ -165,7 +165,7 @@
interrupts = <8 IRQ_TYPE_EDGE_RISING>;
interrupt-parent = <&gpioi>;
- stmfx_pinctrl: stmfx-pin-controller {
+ stmfx_pinctrl: pinctrl {
compatible = "st,stmfx-0300-pinctrl";
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
index 0237d4ddaa92..73c07f0dfad2 100644
--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
@@ -162,6 +162,40 @@
};
};
+ ethernet0_rmii_pins_a: rmii-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 13, AF11)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('G', 14, AF11)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, AF11)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('A', 1, AF0)>, /* ETH1_RMII_REF_CLK */
+ <STM32_PINMUX('A', 2, AF11)>, /* ETH1_MDIO */
+ <STM32_PINMUX('C', 1, AF11)>; /* ETH1_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH1_RMII_CRS_DV */
+ bias-disable;
+ };
+ };
+
+ ethernet0_rmii_pins_sleep_a: rmii-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 13, ANALOG)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('G', 14, ANALOG)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ETH1_MDIO */
+ <STM32_PINMUX('C', 1, ANALOG)>, /* ETH1_MDC */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH1_RMII_REF_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH1_RMII_CRS_DV */
+ };
+ };
+
fmc_pins_a: fmc-0 {
pins1 {
pinmux = <STM32_PINMUX('D', 4, AF12)>, /* FMC_NOE */
@@ -685,6 +719,26 @@
};
};
+
+ sai2a_pins_b: sai2a-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('I', 6, AF10)>, /* SAI2_SD_A */
+ <STM32_PINMUX('I', 7, AF10)>, /* SAI2_FS_A */
+ <STM32_PINMUX('D', 13, AF10)>; /* SAI2_SCK_A */
+ slew-rate = <0>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sai2a_sleep_pins_b: sai2a-sleep-3 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 6, ANALOG)>, /* SAI2_SD_A */
+ <STM32_PINMUX('I', 7, ANALOG)>, /* SAI2_FS_A */
+ <STM32_PINMUX('D', 13, ANALOG)>; /* SAI2_SCK_A */
+ };
+ };
+
sai2b_pins_a: sai2b-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 12, AF10)>, /* SAI2_SCK_B */
@@ -1000,6 +1054,19 @@
};
};
+ usart3_pins_a: usart3-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 10, AF7)>; /* USART3_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
+ bias-disable;
+ };
+ };
+
uart4_pins_a: uart4-0 {
pins1 {
pinmux = <STM32_PINMUX('G', 11, AF6)>; /* UART4_TX */
@@ -1040,6 +1107,32 @@
bias-disable;
};
};
+
+ uart8_pins_a: uart8-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('E', 1, AF8)>; /* UART8_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 0, AF8)>; /* UART8_RX */
+ bias-disable;
+ };
+ };
+
+ usbotg_hs_pins_a: usbotg-hs-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 10, ANALOG)>; /* OTG_ID */
+ };
+ };
+
+ usbotg_fs_dp_dm_pins_a: usbotg-fs-dp-dm-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 11, ANALOG)>, /* OTG_FS_DM */
+ <STM32_PINMUX('A', 12, ANALOG)>; /* OTG_FS_DP */
+ };
+ };
};
&pinctrl_z {
diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi
index fb41d0778b00..3ea05ba48215 100644
--- a/arch/arm/boot/dts/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/stm32mp151.dtsi
@@ -17,6 +17,7 @@
cpu0: cpu@0 {
compatible = "arm,cortex-a7";
+ clock-frequency = <650000000>;
device_type = "cpu";
reg = <0>;
};
@@ -483,6 +484,7 @@
resets = <&rcc I2C1_R>;
#address-cells = <1>;
#size-cells = <0>;
+ wakeup-source;
status = "disabled";
};
@@ -496,6 +498,7 @@
resets = <&rcc I2C2_R>;
#address-cells = <1>;
#size-cells = <0>;
+ wakeup-source;
status = "disabled";
};
@@ -509,6 +512,7 @@
resets = <&rcc I2C3_R>;
#address-cells = <1>;
#size-cells = <0>;
+ wakeup-source;
status = "disabled";
};
@@ -522,6 +526,7 @@
resets = <&rcc I2C5_R>;
#address-cells = <1>;
#size-cells = <0>;
+ wakeup-source;
status = "disabled";
};
@@ -959,6 +964,7 @@
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc DMA1>;
+ resets = <&rcc DMA1_R>;
#dma-cells = <4>;
st,mem2mem;
dma-requests = <8>;
@@ -976,6 +982,7 @@
<GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc DMA2>;
+ resets = <&rcc DMA2_R>;
#dma-cells = <4>;
st,mem2mem;
dma-requests = <8>;
@@ -989,6 +996,7 @@
dma-masters = <&dma1 &dma2>;
dma-channels = <16>;
clocks = <&rcc DMAMUX>;
+ resets = <&rcc DMAMUX_R>;
};
adc: adc@48003000 {
@@ -1044,7 +1052,7 @@
};
usbotg_hs: usb-otg@49000000 {
- compatible = "snps,dwc2";
+ compatible = "st,stm32mp15-hsotg", "snps,dwc2";
reg = <0x49000000 0x10000>;
clocks = <&rcc USBO_K>;
clock-names = "otg";
@@ -1055,6 +1063,7 @@
g-np-tx-fifo-size = <32>;
g-tx-fifo-size = <128 128 64 64 64 64 32 32>;
dr_mode = "otg";
+ usb33d-supply = <&usb33>;
status = "disabled";
};
@@ -1280,6 +1289,7 @@
reg = <0x58000000 0x1000>;
interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc MDMA>;
+ resets = <&rcc MDMA_R>;
#dma-cells = <5>;
dma-channels = <32>;
dma-requests = <48>;
@@ -1369,10 +1379,12 @@
clock-names = "stmmaceth",
"mac-clk-tx",
"mac-clk-rx",
+ "eth-ck",
"ethstp";
clocks = <&rcc ETHMAC>,
<&rcc ETHTX>,
<&rcc ETHRX>,
+ <&rcc ETHCK_K>,
<&rcc ETHSTP>;
st,syscon = <&syscfg 0x4>;
snps,mixed-burst;
@@ -1473,6 +1485,7 @@
resets = <&rcc I2C4_R>;
#address-cells = <1>;
#size-cells = <0>;
+ wakeup-source;
status = "disabled";
};
@@ -1508,6 +1521,7 @@
resets = <&rcc I2C6_R>;
#address-cells = <1>;
#size-cells = <0>;
+ wakeup-source;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stm32mp153.dtsi b/arch/arm/boot/dts/stm32mp153.dtsi
index 2d759fc6015c..6d9ab08667fc 100644
--- a/arch/arm/boot/dts/stm32mp153.dtsi
+++ b/arch/arm/boot/dts/stm32mp153.dtsi
@@ -10,6 +10,7 @@
cpus {
cpu1: cpu@1 {
compatible = "arm,cortex-a7";
+ clock-frequency = <650000000>;
device_type = "cpu";
reg = <1>;
};
diff --git a/arch/arm/boot/dts/stm32mp157a-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-avenger96.dts
index cbfa4075907e..425175f7d83c 100644
--- a/arch/arm/boot/dts/stm32mp157a-avenger96.dts
+++ b/arch/arm/boot/dts/stm32mp157a-avenger96.dts
@@ -135,10 +135,6 @@
#interrupt-cells = <2>;
status = "okay";
- st,main-control-register = <0x04>;
- st,vin-control-register = <0xc0>;
- st,usb-control-register = <0x30>;
-
regulators {
compatible = "st,stpmic1-regulators";
@@ -173,7 +169,6 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
- st,mask_reset;
regulator-initial-mode = <0>;
regulator-over-current-protection;
};
@@ -213,8 +208,6 @@
vdd_usb: ldo4 {
regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
interrupts = <IT_CURLIM_LDO4 0>;
interrupt-parent = <&pmic>;
};
@@ -240,7 +233,6 @@
vref_ddr: vref_ddr {
regulator-name = "vref_ddr";
regulator-always-on;
- regulator-over-current-protection;
};
bst_out: boost {
@@ -300,7 +292,8 @@
pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
- broken-cd;
+ cd-gpios = <&gpioi 8 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
st,sig-dir;
st,neg-edge;
st,use-ckin;
diff --git a/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts b/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts
new file mode 100644
index 000000000000..af99e132e1b1
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (C) 2019 Marek Vasut <marex@denx.de>
+ */
+
+#include "stm32mp157c-dhcom-som.dtsi"
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ model = "STMicroelectronics STM32MP157C DHCOM Premium Developer Kit (2)";
+ compatible = "dh,stm32mp157c-dhcom-pdk2", "st,stm32mp157";
+
+ aliases {
+ serial0 = &uart4;
+ serial1 = &usart3;
+ serial2 = &uart8;
+ ethernet0 = &ethernet0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clk_ext_audio_codec: clock-codec {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+ display_bl: display-bl {
+ compatible = "pwm-backlight";
+ pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>;
+ default-brightness-level = <8>;
+ enable-gpios = <&gpioi 0 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
+ ethernet_vio: vioregulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vio";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ panel {
+ compatible = "edt,etm0700g0edh6";
+ backlight = <&display_bl>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "audio-graph-card";
+ routing =
+ "MIC_IN", "Capture",
+ "Capture", "Mic Bias",
+ "Playback", "HP_OUT";
+ dais = <&sai2a_port &sai2b_port>;
+ status = "okay";
+ };
+};
+
+&cec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cec_pins_a>;
+ status = "okay";
+};
+
+&ethernet0 {
+ status = "okay";
+ pinctrl-0 = <&ethernet0_rmii_pins_a>;
+ pinctrl-1 = <&ethernet0_rmii_pins_sleep_a>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rmii";
+ max-speed = <100>;
+ phy-handle = <&phy0>;
+ st,eth-ref-clk-sel;
+ phy-reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+};
+
+&i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_pins_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ sgtl5000: codec@a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ #sound-dai-cells = <0>;
+ clocks = <&clk_ext_audio_codec>;
+ VDDA-supply = <&v3v3>;
+ VDDIO-supply = <&vdd>;
+
+ sgtl5000_port: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sgtl5000_tx_endpoint: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&sai2a_endpoint>;
+ frame-master;
+ bitclock-master;
+ };
+
+ sgtl5000_rx_endpoint: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&sai2b_endpoint>;
+ frame-master;
+ bitclock-master;
+ };
+ };
+
+ };
+
+ polytouch@38 {
+ compatible = "edt,edt-ft5x06";
+ reg = <0x38>;
+ interrupt-parent = <&gpiog>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
+ linux,wakeup;
+ };
+};
+
+&ltdc {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ltdc_pins_b>;
+ pinctrl-1 = <&ltdc_pins_sleep_b>;
+ status = "okay";
+
+ port {
+ lcd_display_out: endpoint {
+ remote-endpoint = <&lcd_panel_in>;
+ };
+ };
+};
+
+&m_can1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&m_can1_pins_a>;
+ pinctrl-1 = <&m_can1_sleep_pins_a>;
+ status = "okay";
+};
+
+&sai2 {
+ clocks = <&rcc SAI2>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
+ clock-names = "pclk", "x8k", "x11k";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sai2a_pins_b &sai2b_pins_b>;
+ pinctrl-1 = <&sai2a_sleep_pins_b &sai2b_sleep_pins_b>;
+ status = "okay";
+
+ sai2a: audio-controller@4400b004 {
+ #clock-cells = <0>;
+ dma-names = "tx";
+ clocks = <&rcc SAI2_K>;
+ clock-names = "sai_ck";
+ status = "okay";
+
+ sai2a_port: port {
+ sai2a_endpoint: endpoint {
+ remote-endpoint = <&sgtl5000_tx_endpoint>;
+ format = "i2s";
+ mclk-fs = <512>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+ };
+ };
+
+ sai2b: audio-controller@4400b024 {
+ dma-names = "rx";
+ st,sync = <&sai2a 2>;
+ clocks = <&rcc SAI2_K>, <&sai2a>;
+ clock-names = "sai_ck", "MCLK";
+ status = "okay";
+
+ sai2b_port: port {
+ sai2b_endpoint: endpoint {
+ remote-endpoint = <&sgtl5000_rx_endpoint>;
+ format = "i2s";
+ mclk-fs = <512>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+ };
+ };
+};
+
+&timers2 {
+ /* spare dmas for other usage (un-delete to enable pwm capture) */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+ pwm2: pwm {
+ pinctrl-0 = <&pwm2_pins_a>;
+ pinctrl-names = "default";
+ status = "okay";
+ };
+ timer@1 {
+ status = "okay";
+ };
+};
+
+&usart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usart3_pins_a>;
+ status = "okay";
+};
+
+&uart8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart8_pins_a>;
+ status = "okay";
+};
+
+&usbh_ehci {
+ phys = <&usbphyc_port0>;
+ status = "okay";
+};
+
+&usbotg_hs {
+ dr_mode = "peripheral";
+ phys = <&usbphyc_port1 0>;
+ phy-names = "usb2-phy";
+ status = "okay";
+};
+
+&usbphyc {
+ status = "okay";
+};
+
+&usbphyc_port0 {
+ phy-supply = <&vdd_usb>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&vdd_usb>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
diff --git a/arch/arm/boot/dts/stm32mp157c-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp157c-dhcom-som.dtsi
new file mode 100644
index 000000000000..f81dc3134135
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157c-dhcom-som.dtsi
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2019 Marek Vasut <marex@denx.de>
+ */
+/dts-v1/;
+
+#include "stm32mp157.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+
+/ {
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xC0000000 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mcuram2: mcuram2@10000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10000000 0x40000>;
+ no-map;
+ };
+
+ vdev0vring0: vdev0vring0@10040000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10040000 0x1000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@10041000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10041000 0x1000>;
+ no-map;
+ };
+
+ vdev0buffer: vdev0buffer@10042000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10042000 0x4000>;
+ no-map;
+ };
+
+ mcuram: mcuram@30000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x30000000 0x40000>;
+ no-map;
+ };
+
+ retram: retram@38000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x38000000 0x10000>;
+ no-map;
+ };
+ };
+};
+
+&adc {
+ vdd-supply = <&vdd>;
+ vdda-supply = <&vdda>;
+ vref-supply = <&vdda>;
+ status = "okay";
+
+ adc1: adc@0 {
+ st,min-sample-time-nsecs = <5000>;
+ st,adc-channels = <0>;
+ status = "okay";
+ };
+
+ adc2: adc@100 {
+ st,adc-channels = <1>;
+ st,min-sample-time-nsecs = <5000>;
+ status = "okay";
+ };
+};
+
+&dac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&dac_ch1_pins_a &dac_ch2_pins_a>;
+ vref-supply = <&vdda>;
+ status = "okay";
+
+ dac1: dac@1 {
+ status = "okay";
+ };
+ dac2: dac@2 {
+ status = "okay";
+ };
+};
+
+&dts {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ rtc@32 {
+ compatible = "microcrystal,rv8803";
+ reg = <0x32>;
+ };
+
+ pmic: stpmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "okay";
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+ ldo1-supply = <&v3v3>;
+ ldo2-supply = <&v3v3>;
+ ldo3-supply = <&vdd_ddr>;
+ ldo5-supply = <&v3v3>;
+ ldo6-supply = <&v3v3>;
+ pwr_sw1-supply = <&bst_out>;
+ pwr_sw2-supply = <&bst_out>;
+
+ vddcore: buck1 {
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 {
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ st,mask-reset;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ regulator-initial-mode = <0>;
+ };
+
+ vdda: ldo1 {
+ regulator-name = "vdda";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO1 0>;
+ };
+
+ v2v8: ldo2 {
+ regulator-name = "v2v8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ interrupts = <IT_CURLIM_LDO2 0>;
+ };
+
+ vtt_ddr: ldo3 {
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ vdd_usb: ldo4 {
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ vdd_sd: ldo5 {
+ regulator-name = "vdd_sd";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO5 0>;
+ regulator-boot-on;
+ };
+
+ v1v8: ldo6 {
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ interrupts = <IT_CURLIM_LDO6 0>;
+ };
+
+ vref_ddr: vref_ddr {
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ bst_out: boost {
+ regulator-name = "bst_out";
+ interrupts = <IT_OCP_BOOST 0>;
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge;
+ };
+ };
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 0>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ power-off-time-sec = <10>;
+ status = "okay";
+ };
+
+ watchdog {
+ compatible = "st,stpmic1-wdt";
+ status = "disabled";
+ };
+ };
+
+ touchscreen@49 {
+ compatible = "ti,tsc2004";
+ reg = <0x49>;
+ vio-supply = <&v3v3>;
+ interrupts-extended = <&gpioh 3 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+};
+
+&ipcc {
+ status = "okay";
+};
+
+&iwdg2 {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&m4_rproc {
+ memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ <&vdev0vring1>, <&vdev0buffer>;
+ mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+ mbox-names = "vq0", "vq1", "shutdown";
+ interrupt-parent = <&exti>;
+ interrupts = <68 1>;
+ status = "okay";
+};
+
+&pwr_regulators {
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+};
+
+&qspi {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a &qspi_bk2_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a &qspi_bk2_sleep_pins_a>;
+ reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash0: mx66l51235l@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&rng1 {
+ status = "okay";
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
+ broken-cd;
+ st,sig-dir;
+ st,neg-edge;
+ st,use-ckin;
+ bus-width = <4>;
+ vmmc-supply = <&vdd_sd>;
+ status = "okay";
+};
+
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_a>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_a>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_a>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ st,neg-edge;
+ bus-width = <8>;
+ vmmc-supply = <&v3v3>;
+ vqmmc-supply = <&v3v3>;
+ mmc-ddr-3_3v;
+ status = "okay";
+};
+
+&sdmmc3 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc3_b4_pins_a>;
+ pinctrl-1 = <&sdmmc3_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc3_b4_sleep_pins_a>;
+ broken-cd;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ vqmmc-supply = <&v3v3>;
+ mmc-ddr-3_3v;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts
index 1fc43251d697..9d2592db630c 100644
--- a/arch/arm/boot/dts/stm32mp157c-ed1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts
@@ -130,10 +130,12 @@
};
&i2c4 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c4_pins_a>;
+ pinctrl-1 = <&i2c4_pins_sleep_a>;
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
+ clock-frequency = <400000>;
status = "okay";
/* spare dmas for other usage */
/delete-property/dmas;
@@ -218,8 +220,6 @@
vdd_usb: ldo4 {
regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
interrupts = <IT_CURLIM_LDO4 0>;
};
@@ -241,7 +241,6 @@
vref_ddr: vref_ddr {
regulator-name = "vref_ddr";
regulator-always-on;
- regulator-over-current-protection;
};
bst_out: boost {
@@ -313,7 +312,8 @@
pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
- broken-cd;
+ cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
st,sig-dir;
st,neg-edge;
st,use-ckin;
@@ -334,7 +334,7 @@
st,neg-edge;
bus-width = <8>;
vmmc-supply = <&v3v3>;
- vqmmc-supply = <&v3v3>;
+ vqmmc-supply = <&vdd>;
mmc-ddr-3_3v;
status = "okay";
};
@@ -355,6 +355,10 @@
status = "okay";
};
+&usbotg_hs {
+ vbus-supply = <&vbus_otg>;
+};
+
&usbphyc_port0 {
phy-supply = <&vdd_usb>;
vdda1v1-supply = <&reg11>;
diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts
index 228e35e16884..8a4c7ff31a92 100644
--- a/arch/arm/boot/dts/stm32mp157c-ev1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts
@@ -174,8 +174,9 @@
};
&i2c2 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c2_pins_a>;
+ pinctrl-1 = <&i2c2_pins_sleep_a>;
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
status = "okay";
@@ -210,7 +211,7 @@
interrupt-parent = <&gpioi>;
vdd-supply = <&v3v3>;
- stmfx_pinctrl: stmfx-pin-controller {
+ stmfx_pinctrl: pinctrl {
compatible = "st,stmfx-0300-pinctrl";
gpio-controller;
#gpio-cells = <2>;
@@ -218,7 +219,7 @@
#interrupt-cells = <2>;
gpio-ranges = <&stmfx_pinctrl 0 0 24>;
- joystick_pins: joystick {
+ joystick_pins: joystick-pins {
pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
bias-pull-down;
};
@@ -227,8 +228,9 @@
};
&i2c5 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c5_pins_a>;
+ pinctrl-1 = <&i2c5_pins_sleep_a>;
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
status = "okay";
@@ -353,7 +355,8 @@
};
&usbotg_hs {
- dr_mode = "peripheral";
+ pinctrl-0 = <&usbotg_hs_pins_a>;
+ pinctrl-names = "default";
phys = <&usbphyc_port1 0>;
phy-names = "usb2-phy";
status = "okay";
diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
index f6672e87aef3..d946e0a02f5c 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
@@ -216,10 +216,12 @@
};
&i2c4 {
- pinctrl-names = "default";
+ pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c4_pins_a>;
+ pinctrl-1 = <&i2c4_pins_sleep_a>;
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
+ clock-frequency = <400000>;
status = "okay";
/* spare dmas for other usage */
/delete-property/dmas;
@@ -304,8 +306,6 @@
vdd_usb: ldo4 {
regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
interrupts = <IT_CURLIM_LDO4 0>;
};
@@ -328,7 +328,6 @@
vref_ddr: vref_ddr {
regulator-name = "vref_ddr";
regulator-always-on;
- regulator-over-current-protection;
};
bst_out: boost {
@@ -479,7 +478,8 @@
pinctrl-0 = <&sdmmc1_b4_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
- broken-cd;
+ cd-gpios = <&gpiob 7 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
st,neg-edge;
bus-width = <4>;
vmmc-supply = <&v3v3>;
diff --git a/arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts b/arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts
new file mode 100644
index 000000000000..e9ef97c9c893
--- /dev/null
+++ b/arch/arm/boot/dts/sun5i-a13-pocketbook-touch-lux-3.dts
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2019 Ondrej Jirman <megous@megous.com>
+ */
+
+/dts-v1/;
+#include "sun5i-a13.dtsi"
+#include "sunxi-common-regulators.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ model = "PocketBook Touch Lux 3";
+ compatible = "pocketbook,touch-lux-3", "allwinner,sun5i-a13";
+
+ aliases {
+ serial0 = &uart1;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
+ enable-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
+ brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
+ default-brightness-level = <8>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ gpios = <&pio 4 8 GPIO_ACTIVE_LOW>; /* PE8 */
+ default-state = "on";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+ label = "GPIO Keys";
+
+ key-right {
+ label = "Right";
+ linux,code = <KEY_RIGHT>;
+ gpios = <&pio 6 9 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PG9 */
+ };
+
+ key-left {
+ label = "Left";
+ linux,code = <KEY_LEFT>;
+ gpios = <&pio 6 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PG10 */
+ };
+ };
+
+ reg_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-1v8-nor-ctp";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&pio 2 15 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_1v8_nor: regulator-nor {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-nor";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&pio 2 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_1v8>;
+ regulator-always-on;
+ };
+
+ reg_1v8_ctp: regulator-ctp {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-ctp";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&pio 2 13 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_1v8>;
+ };
+
+ reg_3v3_mmc0: regulator-mmc0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-mmc0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 4 4 GPIO_ACTIVE_LOW>; /* PE4 */
+ vin-supply = <&reg_vcc3v3>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ axp209: pmic@34 {
+ reg = <0x34>;
+ interrupts = <0>;
+ };
+};
+
+#include "axp209.dtsi"
+
+&i2c1 {
+ status = "okay";
+
+ pcf8563: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ /* Touchpanel is connected here. */
+};
+
+&lradc {
+ vref-supply = <&reg_ldo2>;
+ status = "okay";
+
+ button-200 {
+ label = "Home";
+ linux,code = <KEY_HOME>;
+ channel = <0>;
+ voltage = <200000>;
+ };
+
+ button-400 {
+ label = "Menu";
+ linux,code = <KEY_MENU>;
+ channel = <0>;
+ voltage = <400000>;
+ };
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_3v3_mmc0>;
+ bus-width = <4>;
+ cd-gpios = <&pio 6 0 GPIO_ACTIVE_LOW>; /* PG0 */
+ status = "okay";
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_4bit_pc_pins>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&otg_sram {
+ status = "okay";
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pin>;
+ status = "okay";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdd-int-pll";
+};
+
+&reg_ldo1 {
+ regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+};
+
+&reg_ldo3 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+ /* We need this otherwise the LDO3 would overload */
+ regulator-soft-start;
+ regulator-ramp-delay = <1600>;
+};
+
+&spi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pe_pins>, <&spi2_cs0_pe_pin>;
+ status = "okay";
+
+ epd_flash: flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "macronix,mx25u4033", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <4000000>;
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pg_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ usb1_vbus-supply = <&reg_ldo3>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index 0b526e6e5a95..e6b036734a64 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -186,6 +186,8 @@
compatible = "allwinner,sun5i-a13-mbus";
reg = <0x01c01000 0x1000>;
clocks = <&ccu CLK_MBUS>;
+ #address-cells = <1>;
+ #size-cells = <1>;
dma-ranges = <0x00000000 0x40000000 0x20000000>;
#interconnect-cells = <1>;
};
@@ -771,9 +773,6 @@
interconnect-names = "dma-mem";
status = "disabled";
- assigned-clocks = <&ccu CLK_DE_BE>;
- assigned-clock-rates = <300000000>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 7762fbd9a133..f3425a66fc0a 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -1139,9 +1139,6 @@
"ram";
resets = <&ccu RST_AHB1_BE1>;
- assigned-clocks = <&ccu CLK_BE1>;
- assigned-clock-rates = <300000000>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -1185,9 +1182,6 @@
"ram";
resets = <&ccu RST_AHB1_DRC1>;
- assigned-clocks = <&ccu CLK_IEP_DRC1>;
- assigned-clock-rates = <300000000>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -1231,9 +1225,6 @@
"ram";
resets = <&ccu RST_AHB1_BE0>;
- assigned-clocks = <&ccu CLK_BE0>;
- assigned-clock-rates = <300000000>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -1274,9 +1265,6 @@
"ram";
resets = <&ccu RST_AHB1_DRC0>;
- assigned-clocks = <&ccu CLK_IEP_DRC0>;
- assigned-clock-rates = <300000000>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun7i-a20-linutronix-testbox-v2.dts b/arch/arm/boot/dts/sun7i-a20-linutronix-testbox-v2.dts
new file mode 100644
index 000000000000..da5a2eea4ce3
--- /dev/null
+++ b/arch/arm/boot/dts/sun7i-a20-linutronix-testbox-v2.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2020 Linutronix GmbH
+ * Author: Benedikt Spranger <b.spranger@linutronix.de>
+ */
+
+/dts-v1/;
+#include "sun7i-a20-lamobo-r1.dts"
+
+/ {
+ model = "Lamobo R1";
+ compatible = "linutronix,testbox-v2", "lamobo,lamobo-r1", "allwinner,sun7i-a20";
+
+ leds {
+ led-opto1 {
+ label = "lamobo_r1:opto:powerswitch";
+ gpios = <&pio 7 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-opto2 {
+ label = "lamobo_r1:opto:relay";
+ gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ eeprom: eeprom@50 {
+ compatible = "atmel,24c08";
+ reg = <0x50>;
+ status = "okay";
+ };
+
+ atecc508a@60 {
+ compatible = "atmel,atecc508a";
+ reg = <0x60>;
+ };
+};
+
+&can0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can_ph_pins>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 92b5be97085d..ffe1d10a1a84 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -47,6 +47,7 @@
#include <dt-bindings/dma/sun4i-a10.h>
#include <dt-bindings/clock/sun7i-a20-ccu.h>
#include <dt-bindings/reset/sun4i-a10-ccu.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
interrupt-parent = <&gic>;
@@ -404,11 +405,12 @@
};
tcon0: lcd-controller@1c0c000 {
- compatible = "allwinner,sun7i-a20-tcon";
+ compatible = "allwinner,sun7i-a20-tcon0",
+ "allwinner,sun7i-a20-tcon";
reg = <0x01c0c000 0x1000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
- resets = <&ccu RST_TCON0>;
- reset-names = "lcd";
+ resets = <&ccu RST_TCON0>, <&ccu RST_LVDS>;
+ reset-names = "lcd", "lvds";
clocks = <&ccu CLK_AHB_LCD0>,
<&ccu CLK_TCON0_CH0>,
<&ccu CLK_TCON0_CH1>;
@@ -454,7 +456,8 @@
};
tcon1: lcd-controller@1c0d000 {
- compatible = "allwinner,sun7i-a20-tcon";
+ compatible = "allwinner,sun7i-a20-tcon1",
+ "allwinner,sun7i-a20-tcon";
reg = <0x01c0d000 0x1000>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
resets = <&ccu RST_TCON1>;
@@ -932,6 +935,20 @@
};
/omit-if-no-ref/
+ lcd_lvds0_pins: lcd-lvds0-pins {
+ pins = "PD0", "PD1", "PD2", "PD3", "PD4",
+ "PD5", "PD6", "PD7", "PD8", "PD9";
+ function = "lvds0";
+ };
+
+ /omit-if-no-ref/
+ lcd_lvds1_pins: lcd-lvds1-pins {
+ pins = "PD10", "PD11", "PD12", "PD13", "PD14",
+ "PD15", "PD16", "PD17", "PD18", "PD19";
+ function = "lvds1";
+ };
+
+ /omit-if-no-ref/
mmc0_pins: mmc0-pins {
pins = "PF0", "PF1", "PF2",
"PF3", "PF4", "PF5";
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 48487f6d4ab9..c1362d0f0ff8 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -684,9 +684,6 @@
clock-names = "ahb", "mod", "ram";
resets = <&ccu RST_BUS_DRC>;
- assigned-clocks = <&ccu CLK_DRC>;
- assigned-clock-rates = <300000000>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index a2c37adacf77..cfd3858afb3e 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -372,8 +372,6 @@
"ram", "sat";
resets = <&ccu RST_BUS_DE_BE>, <&ccu RST_BUS_SAT>;
reset-names = "be", "sat";
- assigned-clocks = <&ccu CLK_DE_BE>;
- assigned-clock-rates = <300000000>;
};
&ccu {
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index e8b3669e0e5d..bfc9bb277a49 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -493,7 +493,6 @@
};
&usb_otg {
- dr_mode = "otg";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index e7b9bef1be6b..655404d6d3a3 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -72,7 +72,7 @@
#cooling-cells = <2>;
};
- cpu@1 {
+ cpu1: cpu@1 {
compatible = "arm,cortex-a7";
device_type = "cpu";
clocks = <&ccu CLK_C0CPUX>;
@@ -83,7 +83,7 @@
#cooling-cells = <2>;
};
- cpu@2 {
+ cpu2: cpu@2 {
compatible = "arm,cortex-a7";
device_type = "cpu";
clocks = <&ccu CLK_C0CPUX>;
@@ -94,7 +94,7 @@
#cooling-cells = <2>;
};
- cpu@3 {
+ cpu3: cpu@3 {
compatible = "arm,cortex-a7";
device_type = "cpu";
clocks = <&ccu CLK_C0CPUX>;
@@ -116,7 +116,7 @@
#cooling-cells = <2>;
};
- cpu@101 {
+ cpu101: cpu@101 {
compatible = "arm,cortex-a7";
device_type = "cpu";
clocks = <&ccu CLK_C1CPUX>;
@@ -127,7 +127,7 @@
#cooling-cells = <2>;
};
- cpu@102 {
+ cpu102: cpu@102 {
compatible = "arm,cortex-a7";
device_type = "cpu";
clocks = <&ccu CLK_C1CPUX>;
@@ -138,7 +138,7 @@
#cooling-cells = <2>;
};
- cpu@103 {
+ cpu103: cpu@103 {
compatible = "arm,cortex-a7";
device_type = "cpu";
clocks = <&ccu CLK_C1CPUX>;
@@ -314,7 +314,7 @@
display_clocks: clock@1000000 {
compatible = "allwinner,sun8i-a83t-de2-clk";
- reg = <0x01000000 0x100000>;
+ reg = <0x01000000 0x10000>;
clocks = <&ccu CLK_BUS_DE>,
<&ccu CLK_PLL_DE>;
clock-names = "bus",
@@ -324,6 +324,17 @@
#reset-cells = <1>;
};
+ rotate: rotate@1020000 {
+ compatible = "allwinner,sun8i-a83t-de2-rotate";
+ reg = <0x1020000 0x10000>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&display_clocks CLK_BUS_ROT>,
+ <&display_clocks CLK_ROT>;
+ clock-names = "bus",
+ "mod";
+ resets = <&display_clocks RST_ROT>;
+ };
+
mixer0: mixer@1100000 {
compatible = "allwinner,sun8i-a83t-de2-mixer-0";
reg = <0x01100000 0x100000>;
@@ -1188,12 +1199,60 @@
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors = <&ths 0>;
+
+ trips {
+ cpu0_hot: cpu-hot {
+ temperature = <80000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu0_very_hot: cpu-very-hot {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ cpu-hot-limit {
+ trip = <&cpu0_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
cpu1_thermal: cpu1-thermal {
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors = <&ths 1>;
+
+ trips {
+ cpu1_hot: cpu-hot {
+ temperature = <80000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu1_very_hot: cpu-very-hot {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ cpu-hot-limit {
+ trip = <&cpu1_hot>;
+ cooling-device = <&cpu100 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu101 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu102 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu103 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
gpu_thermal: gpu-thermal {
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index 20217e2ca4d3..e83aa6866e7e 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -41,6 +41,7 @@
*/
#include "sunxi-h3-h5.dtsi"
+#include <dt-bindings/thermal/thermal.h>
/ {
cpu0_opp_table: opp_table0 {
@@ -227,6 +228,30 @@
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors = <&ths 0>;
+
+ trips {
+ cpu_hot_trip: cpu-hot {
+ temperature = <80000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_very_hot_trip: cpu-very-hot {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ cpu-hot-limit {
+ trip = <&cpu_hot_trip>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index a9d5d6ddbd71..b782041e0e04 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -47,6 +47,7 @@
#include <dt-bindings/clock/sun8i-tcon-top.h>
#include <dt-bindings/reset/sun8i-r40-ccu.h>
#include <dt-bindings/reset/sun8i-de2.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
#address-cells = <1>;
@@ -110,6 +111,22 @@
status = "disabled";
};
+ thermal-zones {
+ cpu_thermal: cpu0-thermal {
+ /* milliseconds */
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&ths 0>;
+ };
+
+ gpu_thermal: gpu-thermal {
+ /* milliseconds */
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&ths 1>;
+ };
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -119,7 +136,7 @@
display_clocks: clock@1000000 {
compatible = "allwinner,sun8i-r40-de2-clk",
"allwinner,sun8i-h3-de2-clk";
- reg = <0x01000000 0x100000>;
+ reg = <0x01000000 0x10000>;
clocks = <&ccu CLK_BUS_DE>,
<&ccu CLK_DE>;
clock-names = "bus",
@@ -562,6 +579,17 @@
clocks = <&osc24M>;
};
+ ths: thermal-sensor@1c24c00 {
+ compatible = "allwinner,sun8i-r40-ths";
+ reg = <0x01c24c00 0x100>;
+ clocks = <&ccu CLK_BUS_THS>, <&ccu CLK_THS>;
+ clock-names = "bus", "mod";
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&ccu RST_BUS_THS>;
+ /* TODO: add nvmem-cells for calibration */
+ #thermal-sensor-cells = <1>;
+ };
+
uart0: serial@1c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
@@ -738,6 +766,8 @@
compatible = "allwinner,sun8i-r40-mbus";
reg = <0x01c62000 0x1000>;
clocks = <&ccu 155>;
+ #address-cells = <1>;
+ #size-cells = <1>;
dma-ranges = <0x00000000 0x40000000 0x80000000>;
#interconnect-cells = <1>;
};
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index 81ea50838cd5..e5312869c0d2 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -105,7 +105,7 @@
display_clocks: clock@1000000 {
compatible = "allwinner,sun8i-v3s-de2-clk";
- reg = <0x01000000 0x100000>;
+ reg = <0x01000000 0x10000>;
clocks = <&ccu CLK_BUS_DE>,
<&ccu CLK_DE>;
clock-names = "bus",
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 5e9c3060aa08..01a5df9aa71b 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -114,7 +114,7 @@
display_clocks: clock@1000000 {
/* compatible is in per SoC .dtsi file */
- reg = <0x01000000 0x100000>;
+ reg = <0x01000000 0x10000>;
clocks = <&ccu CLK_BUS_DE>,
<&ccu CLK_DE>;
clock-names = "bus",
@@ -560,6 +560,8 @@
compatible = "allwinner,sun8i-h3-mbus";
reg = <0x01c62000 0x1000>;
clocks = <&ccu CLK_MBUS>;
+ #address-cells = <1>;
+ #size-cells = <1>;
dma-ranges = <0x00000000 0x40000000 0xc0000000>;
#interconnect-cells = <1>;
};
@@ -892,6 +894,21 @@
pins = "PL0", "PL1";
function = "s_i2c";
};
+
+ r_pwm_pin: r-pwm-pin {
+ pins = "PL10";
+ function = "s_pwm";
+ };
+ };
+
+ r_pwm: pwm@1f03800 {
+ compatible = "allwinner,sun8i-h3-pwm";
+ reg = <0x01f03800 0x8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&r_pwm_pin>;
+ clocks = <&osc24M>;
+ #pwm-cells = <3>;
+ status = "disabled";
};
};
};
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index 97a5c3504bbe..d3e032e7d21a 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -1296,7 +1296,13 @@
clocks = <&tegra_car TEGRA114_CLK_PLL_A>,
<&tegra_car TEGRA114_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA114_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA114_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA114_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA114_CLK_EXTERN1>;
};
};
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 0d7a6327e404..450a1f1b12a0 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -4,6 +4,7 @@
#include <dt-bindings/memory/tegra114-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/tegra-pmc.h>
/ {
compatible = "nvidia,tegra114";
@@ -514,11 +515,12 @@
status = "disabled";
};
- pmc@7000e400 {
+ tegra_pmc: pmc@7000e400 {
compatible = "nvidia,tegra114-pmc";
reg = <0x7000e400 0x400>;
clocks = <&tegra_car TEGRA114_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
+ #clock-cells = <1>;
};
fuse@7000f800 {
diff --git a/arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi b/arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi
index 0462ed2dd8b8..de499f736bda 100644
--- a/arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi
+++ b/arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi
@@ -2009,8 +2009,14 @@
nvidia,audio-codec = <&sgtl5000>;
clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
<&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA124_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA124_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
};
thermal-zones {
diff --git a/arch/arm/boot/dts/tegra124-apalis.dtsi b/arch/arm/boot/dts/tegra124-apalis.dtsi
index d1e8593ef0d9..d70a86da4ee4 100644
--- a/arch/arm/boot/dts/tegra124-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra124-apalis.dtsi
@@ -2001,8 +2001,14 @@
nvidia,audio-codec = <&sgtl5000>;
clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
<&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA124_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA124_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
};
thermal-zones {
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index d5fd642f8b77..1b567e2d5ce0 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -1782,12 +1782,6 @@
};
ports {
- /* Micro A/B */
- usb2-0 {
- status = "okay";
- mode = "otg";
- };
-
/* Mini PCIe */
usb2-1 {
status = "okay";
@@ -2058,8 +2052,14 @@
clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
<&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA124_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA124_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
};
thermal-zones {
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index 3b10f475037f..9b1af50cd4b8 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -788,9 +788,15 @@
clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
<&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA124_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+ assigned-clocks = <&tegra_car TEGRA124_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
+
nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(I, 7) GPIO_ACTIVE_HIGH>;
nvidia,mic-det-gpios =
<&gpio TEGRA_GPIO(R, 7) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 7309393bfced..8c2ee6e7d6f1 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -1266,8 +1266,14 @@
clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
<&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA124_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA124_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
};
};
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 7f330b1f150f..94cac13d3e50 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/tegra124-car.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
+#include <dt-bindings/soc/tegra-pmc.h>
/ {
compatible = "nvidia,tegra124";
@@ -595,11 +596,12 @@
clocks = <&tegra_car TEGRA124_CLK_RTC>;
};
- pmc@7000e400 {
+ tegra_pmc: pmc@7000e400 {
compatible = "nvidia,tegra124-pmc";
reg = <0x0 0x7000e400 0x0 0x400>;
clocks = <&tegra_car TEGRA124_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
+ #clock-cells = <1>;
};
fuse@7000f800 {
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 9c58e7fcf5c0..c3b8ad53b967 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -4,6 +4,7 @@
#include <dt-bindings/memory/tegra20-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/tegra-pmc.h>
/ {
compatible = "nvidia,tegra20";
@@ -608,11 +609,12 @@
status = "disabled";
};
- pmc@7000e400 {
+ tegra_pmc: pmc@7000e400 {
compatible = "nvidia,tegra20-pmc";
reg = <0x7000e400 0x400>;
clocks = <&tegra_car TEGRA20_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
+ #clock-cells = <1>;
};
mc: memory-controller@7000f000 {
diff --git a/arch/arm/boot/dts/tegra30-apalis-v1.1.dtsi b/arch/arm/boot/dts/tegra30-apalis-v1.1.dtsi
index 8b7a827d604d..387b17458e22 100644
--- a/arch/arm/boot/dts/tegra30-apalis-v1.1.dtsi
+++ b/arch/arm/boot/dts/tegra30-apalis-v1.1.dtsi
@@ -1189,7 +1189,13 @@
nvidia,audio-codec = <&sgtl5000>;
clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
<&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA30_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA30_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA30_CLK_EXTERN1>;
};
};
diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi
index c18f6f61d764..6648506f3aa4 100644
--- a/arch/arm/boot/dts/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra30-apalis.dtsi
@@ -1171,7 +1171,13 @@
nvidia,audio-codec = <&sgtl5000>;
clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
<&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA30_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA30_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA30_CLK_EXTERN1>;
};
};
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index a3b0f3555cd2..45ef6002b225 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -2111,7 +2111,13 @@
clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
<&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA30_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA30_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA30_CLK_EXTERN1>;
};
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 7ce61edd52f5..4b4f49a49394 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -619,8 +619,14 @@
clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
<&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA30_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA30_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA30_CLK_EXTERN1>;
};
gpio-keys {
diff --git a/arch/arm/boot/dts/tegra30-colibri.dtsi b/arch/arm/boot/dts/tegra30-colibri.dtsi
index 1f9198bb24ff..adba554381c7 100644
--- a/arch/arm/boot/dts/tegra30-colibri.dtsi
+++ b/arch/arm/boot/dts/tegra30-colibri.dtsi
@@ -1030,8 +1030,14 @@
nvidia,audio-codec = <&sgtl5000>;
clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
<&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA30_CLK_EXTERN1>;
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
+
+ assigned-clocks = <&tegra_car TEGRA30_CLK_EXTERN1>,
+ <&tegra_pmc TEGRA_PMC_CLK_OUT_1>;
+
+ assigned-clock-parents = <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA30_CLK_EXTERN1>;
};
};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 55ae050042ce..d2d05f1da274 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -4,6 +4,7 @@
#include <dt-bindings/memory/tegra30-mc.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/tegra-pmc.h>
/ {
compatible = "nvidia,tegra30";
@@ -714,11 +715,12 @@
status = "disabled";
};
- pmc@7000e400 {
+ tegra_pmc: pmc@7000e400 {
compatible = "nvidia,tegra30-pmc";
reg = <0x7000e400 0x400>;
clocks = <&tegra_car TEGRA30_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
+ #clock-cells = <1>;
};
mc: memory-controller@7000f000 {
diff --git a/arch/arm/boot/dts/uniphier-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi
index 64ec46c72a4c..06e7400d2940 100644
--- a/arch/arm/boot/dts/uniphier-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld4.dtsi
@@ -51,7 +51,7 @@
ranges;
interrupt-parent = <&intc>;
- l2: l2-cache@500c0000 {
+ l2: cache-controller@500c0000 {
compatible = "socionext,uniphier-system-cache";
reg = <0x500c0000 0x2000>, <0x503c0100 0x4>,
<0x506c0000 0x400>;
@@ -245,7 +245,7 @@
#dma-cells = <1>;
};
- sd: sdhc@5a400000 {
+ sd: mmc@5a400000 {
compatible = "socionext,uniphier-sd-v2.91";
status = "disabled";
reg = <0x5a400000 0x200>;
@@ -265,7 +265,7 @@
sd-uhs-sdr50;
};
- emmc: sdhc@5a500000 {
+ emmc: mmc@5a500000 {
compatible = "socionext,uniphier-sd-v2.91";
status = "disabled";
reg = <0x5a500000 0x200>;
@@ -375,7 +375,7 @@
interrupt-controller;
};
- aidet: aidet@61830000 {
+ aidet: interrupt-controller@61830000 {
compatible = "socionext,uniphier-ld4-aidet";
reg = <0x61830000 0x200>;
interrupt-controller;
@@ -398,7 +398,7 @@
};
};
- nand: nand@68000000 {
+ nand: nand-controller@68000000 {
compatible = "socionext,uniphier-denali-nand-v5a";
status = "disabled";
reg-names = "nand_data", "denali_reg";
diff --git a/arch/arm/boot/dts/uniphier-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi
index 2ec04d7972ef..1c866f0306fc 100644
--- a/arch/arm/boot/dts/uniphier-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro4.dtsi
@@ -59,7 +59,7 @@
ranges;
interrupt-parent = <&intc>;
- l2: l2-cache@500c0000 {
+ l2: cache-controller@500c0000 {
compatible = "socionext,uniphier-system-cache";
reg = <0x500c0000 0x2000>, <0x503c0100 0x4>,
<0x506c0000 0x400>;
@@ -279,7 +279,7 @@
#dma-cells = <1>;
};
- sd: sdhc@5a400000 {
+ sd: mmc@5a400000 {
compatible = "socionext,uniphier-sd-v2.91";
status = "disabled";
reg = <0x5a400000 0x200>;
@@ -299,7 +299,7 @@
sd-uhs-sdr50;
};
- emmc: sdhc@5a500000 {
+ emmc: mmc@5a500000 {
compatible = "socionext,uniphier-sd-v2.91";
status = "disabled";
reg = <0x5a500000 0x200>;
@@ -317,7 +317,7 @@
non-removable;
};
- sd1: sdhc@5a600000 {
+ sd1: mmc@5a600000 {
compatible = "socionext,uniphier-sd-v2.91";
status = "disabled";
reg = <0x5a600000 0x200>;
@@ -426,7 +426,7 @@
};
};
- aidet: aidet@5fc20000 {
+ aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-pro4-aidet";
reg = <0x5fc20000 0x200>;
interrupt-controller;
@@ -588,7 +588,7 @@
};
};
- nand: nand@68000000 {
+ nand: nand-controller@68000000 {
compatible = "socionext,uniphier-denali-nand-v5a";
status = "disabled";
reg-names = "nand_data", "denali_reg";
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index ea3961f920a0..8f1ae0957f5f 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -131,7 +131,7 @@
ranges;
interrupt-parent = <&intc>;
- l2: l2-cache@500c0000 {
+ l2: cache-controller@500c0000 {
compatible = "socionext,uniphier-system-cache";
reg = <0x500c0000 0x2000>, <0x503c0100 0x8>,
<0x506c0000 0x400>;
@@ -144,7 +144,7 @@
next-level-cache = <&l3>;
};
- l3: l3-cache@500c8000 {
+ l3: cache-controller@500c8000 {
compatible = "socionext,uniphier-system-cache";
reg = <0x500c8000 0x2000>, <0x503c8100 0x8>,
<0x506c8000 0x400>;
@@ -174,8 +174,8 @@
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
- clocks = <&peri_clk 11>;
- resets = <&peri_rst 11>;
+ clocks = <&peri_clk 11>; /* common with spi0 */
+ resets = <&peri_rst 12>;
};
serial0: serial@54006800 {
@@ -408,7 +408,7 @@
};
};
- aidet: aidet@5fc20000 {
+ aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-pro5-aidet";
reg = <0x5fc20000 0x200>;
interrupt-controller;
@@ -453,7 +453,155 @@
};
};
- nand: nand@68000000 {
+ usb0: usb@65a00000 {
+ compatible = "socionext,uniphier-dwc3", "snps,dwc3";
+ status = "disabled";
+ reg = <0x65a00000 0xcd00>;
+ interrupt-names = "host";
+ interrupts = <0 134 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0>;
+ clock-names = "ref", "bus_early", "suspend";
+ clocks = <&sys_clk 12>, <&sys_clk 12>, <&sys_clk 12>;
+ resets = <&usb0_rst 15>;
+ phys = <&usb0_hsphy0>, <&usb0_ssphy0>;
+ dr_mode = "host";
+ };
+
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-pro5-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb0_rst: reset@0 {
+ compatible = "socionext,uniphier-pro5-usb3-reset";
+ reg = <0x0 0x4>;
+ #reset-cells = <1>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 14>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 14>;
+ };
+
+ usb0_vbus0: regulator@100 {
+ compatible = "socionext,uniphier-pro5-usb3-regulator";
+ reg = <0x100 0x10>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 14>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 14>;
+ };
+
+ usb0_hsphy0: hs-phy@280 {
+ compatible = "socionext,uniphier-pro5-usb3-hsphy";
+ reg = <0x280 0x10>;
+ #phy-cells = <0>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 14>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 14>;
+ vbus-supply = <&usb0_vbus0>;
+ };
+
+ usb0_ssphy0: ss-phy@380 {
+ compatible = "socionext,uniphier-pro5-usb3-ssphy";
+ reg = <0x380 0x10>;
+ #phy-cells = <0>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 14>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 14>;
+ vbus-supply = <&usb0_vbus0>;
+ };
+ };
+
+ usb1: usb@65c00000 {
+ compatible = "socionext,uniphier-dwc3", "snps,dwc3";
+ status = "disabled";
+ reg = <0x65c00000 0xcd00>;
+ interrupt-names = "host";
+ interrupts = <0 137 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb2>;
+ clock-names = "ref", "bus_early", "suspend";
+ clocks = <&sys_clk 12>, <&sys_clk 12>, <&sys_clk 12>;
+ resets = <&usb1_rst 15>;
+ phys = <&usb1_hsphy0>, <&usb1_hsphy1>, <&usb1_ssphy0>;
+ dr_mode = "host";
+ };
+
+ usb-glue@65d00000 {
+ compatible = "socionext,uniphier-pro5-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65d00000 0x400>;
+
+ usb1_rst: reset@0 {
+ compatible = "socionext,uniphier-pro5-usb3-reset";
+ reg = <0x0 0x4>;
+ #reset-cells = <1>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 15>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 15>;
+ };
+
+ usb1_vbus0: regulator@100 {
+ compatible = "socionext,uniphier-pro5-usb3-regulator";
+ reg = <0x100 0x10>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 15>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 15>;
+ };
+
+ usb1_vbus1: regulator@110 {
+ compatible = "socionext,uniphier-pro5-usb3-regulator";
+ reg = <0x110 0x10>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 15>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 15>;
+ };
+
+ usb1_hsphy0: hs-phy@280 {
+ compatible = "socionext,uniphier-pro5-usb3-hsphy";
+ reg = <0x280 0x10>;
+ #phy-cells = <0>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 15>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 15>;
+ vbus-supply = <&usb1_vbus0>;
+ };
+
+ usb1_hsphy1: hs-phy@290 {
+ compatible = "socionext,uniphier-pro5-usb3-hsphy";
+ reg = <0x290 0x10>;
+ #phy-cells = <0>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 15>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 15>;
+ vbus-supply = <&usb1_vbus1>;
+ };
+
+ usb1_ssphy0: ss-phy@380 {
+ compatible = "socionext,uniphier-pro5-usb3-ssphy";
+ reg = <0x380 0x10>;
+ #phy-cells = <0>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 15>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 15>;
+ vbus-supply = <&usb1_vbus0>;
+ };
+ };
+
+ nand: nand-controller@68000000 {
compatible = "socionext,uniphier-denali-nand-v5b";
status = "disabled";
reg-names = "nand_data", "denali_reg";
@@ -469,7 +617,7 @@
resets = <&sys_rst 2>, <&sys_rst 2>;
};
- emmc: sdhc@68400000 {
+ emmc: mmc@68400000 {
compatible = "socionext,uniphier-sd-v3.1";
status = "disabled";
reg = <0x68400000 0x800>;
@@ -485,7 +633,7 @@
non-removable;
};
- sd: sdhc@68800000 {
+ sd: mmc@68800000 {
compatible = "socionext,uniphier-sd-v3.1";
status = "disabled";
reg = <0x68800000 0x800>;
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 13b0d4a7741f..2f2a24994c69 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -157,7 +157,7 @@
ranges;
interrupt-parent = <&intc>;
- l2: l2-cache@500c0000 {
+ l2: cache-controller@500c0000 {
compatible = "socionext,uniphier-system-cache";
reg = <0x500c0000 0x2000>, <0x503c0100 0x8>,
<0x506c0000 0x400>;
@@ -187,8 +187,8 @@
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
- clocks = <&peri_clk 11>;
- resets = <&peri_rst 11>;
+ clocks = <&peri_clk 12>;
+ resets = <&peri_rst 12>;
};
serial0: serial@54006800 {
@@ -446,7 +446,7 @@
};
};
- emmc: sdhc@5a000000 {
+ emmc: mmc@5a000000 {
compatible = "socionext,uniphier-sd-v3.1.1";
status = "disabled";
reg = <0x5a000000 0x800>;
@@ -462,7 +462,7 @@
non-removable;
};
- sd: sdhc@5a400000 {
+ sd: mmc@5a400000 {
compatible = "socionext,uniphier-sd-v3.1.1";
status = "disabled";
reg = <0x5a400000 0x800>;
@@ -508,7 +508,7 @@
};
};
- aidet: aidet@5fc20000 {
+ aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-pxs2-aidet";
reg = <0x5fc20000 0x200>;
interrupt-controller;
@@ -761,7 +761,7 @@
};
};
- nand: nand@68000000 {
+ nand: nand-controller@68000000 {
compatible = "socionext,uniphier-denali-nand-v5b";
status = "disabled";
reg-names = "nand_data", "denali_reg";
diff --git a/arch/arm/boot/dts/uniphier-ref-daughter.dtsi b/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
index 04e60c295319..a11897669c26 100644
--- a/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
+++ b/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
@@ -7,7 +7,7 @@
&i2c0 {
eeprom@50 {
- compatible = "microchip,24lc128";
+ compatible = "microchip,24lc128", "atmel,24c128";
reg = <0x50>;
pagesize = <64>;
};
diff --git a/arch/arm/boot/dts/uniphier-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi
index 4fc6676f5486..09992163e1f4 100644
--- a/arch/arm/boot/dts/uniphier-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld8.dtsi
@@ -51,7 +51,7 @@
ranges;
interrupt-parent = <&intc>;
- l2: l2-cache@500c0000 {
+ l2: cache-controller@500c0000 {
compatible = "socionext,uniphier-system-cache";
reg = <0x500c0000 0x2000>, <0x503c0100 0x4>,
<0x506c0000 0x400>;
@@ -249,7 +249,7 @@
#dma-cells = <1>;
};
- sd: sdhc@5a400000 {
+ sd: mmc@5a400000 {
compatible = "socionext,uniphier-sd-v2.91";
status = "disabled";
reg = <0x5a400000 0x200>;
@@ -269,7 +269,7 @@
sd-uhs-sdr50;
};
- emmc: sdhc@5a500000 {
+ emmc: mmc@5a500000 {
compatible = "socionext,uniphier-sd-v2.91";
status = "disabled";
reg = <0x5a500000 0x200>;
@@ -379,7 +379,7 @@
interrupt-controller;
};
- aidet: aidet@61830000 {
+ aidet: interrupt-controller@61830000 {
compatible = "socionext,uniphier-sld8-aidet";
reg = <0x61830000 0x200>;
interrupt-controller;
@@ -402,7 +402,7 @@
};
};
- nand: nand@68000000 {
+ nand: nand-controller@68000000 {
compatible = "socionext,uniphier-denali-nand-v5a";
status = "disabled";
reg-names = "nand_data", "denali_reg";
diff --git a/arch/arm/boot/dts/versatile-ab-ib2.dts b/arch/arm/boot/dts/versatile-ab-ib2.dts
index 5890cb974f78..c577ff4bb4be 100644
--- a/arch/arm/boot/dts/versatile-ab-ib2.dts
+++ b/arch/arm/boot/dts/versatile-ab-ib2.dts
@@ -10,7 +10,7 @@
model = "ARM Versatile AB + IB2 board";
/* Special IB2 control register */
- ib2_syscon@27000000 {
+ syscon@27000000 {
compatible = "arm,versatile-ib2-syscon", "syscon", "simple-mfd";
reg = <0x27000000 0x4>;
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index dfae90adbb7c..5c183483ec3b 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -19,7 +19,7 @@
*/
/ {
- smb@8000000 {
+ bus@8000000 {
motherboard {
model = "V2M-P1";
arm,hbi = <0x190>;
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index 2e29d7790497..5e48b641068a 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -19,7 +19,7 @@
*/
/ {
- smb@4000000 {
+ bus@4000000 {
motherboard {
model = "V2M-P1";
arm,hbi = <0x190>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 0dc4277d5f8b..f82fa34c90be 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -236,7 +236,7 @@
};
};
- smb@8000000 {
+ bus@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 1de0a658adf1..3ac95a179452 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -608,7 +608,7 @@
};
};
- smb: smb@8000000 {
+ smb: bus@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index d5b47d526f9e..7aa64ae25779 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -206,7 +206,7 @@
};
};
- smb: smb@8000000 {
+ smb: bus@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index d796efaadbe3..623246f37448 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -295,7 +295,7 @@
};
};
- smb: smb@4000000 {
+ smb: bus@4000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
index e2da122a63f4..c12a1b8bc086 100644
--- a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2014-2020 Toradex
*/
/ {
diff --git a/arch/arm/boot/dts/vf-colibri.dtsi b/arch/arm/boot/dts/vf-colibri.dtsi
index fba37b8756f7..cc1e069c44e6 100644
--- a/arch/arm/boot/dts/vf-colibri.dtsi
+++ b/arch/arm/boot/dts/vf-colibri.dtsi
@@ -1,42 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014 Toradex AG
+ * Copyright 2014-2020 Toradex
*
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/ {
diff --git a/arch/arm/boot/dts/vf500-colibri-eval-v3.dts b/arch/arm/boot/dts/vf500-colibri-eval-v3.dts
index 076998968fb5..088964f8dc4b 100644
--- a/arch/arm/boot/dts/vf500-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/vf500-colibri-eval-v3.dts
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2014-2020 Toradex
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/vf500-colibri.dtsi b/arch/arm/boot/dts/vf500-colibri.dtsi
index 92255f8893ce..8af7ed56e653 100644
--- a/arch/arm/boot/dts/vf500-colibri.dtsi
+++ b/arch/arm/boot/dts/vf500-colibri.dtsi
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2014-2020 Toradex
*/
#include "vf500.dtsi"
diff --git a/arch/arm/boot/dts/vf500.dtsi b/arch/arm/boot/dts/vf500.dtsi
index b0ec475017ad..0c0dd442300a 100644
--- a/arch/arm/boot/dts/vf500.dtsi
+++ b/arch/arm/boot/dts/vf500.dtsi
@@ -23,7 +23,7 @@
};
soc {
- aips-bus@40000000 {
+ bus@40000000 {
intc: interrupt-controller@40003000 {
compatible = "arm,cortex-a9-gic";
@@ -43,7 +43,7 @@
};
};
- aips-bus@40080000 {
+ bus@40080000 {
pmu@40089000 {
compatible = "arm,cortex-a5-pmu";
interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/vf610-colibri-eval-v3.dts b/arch/arm/boot/dts/vf610-colibri-eval-v3.dts
index ef9b4d6209f6..fb661e8a2dc6 100644
--- a/arch/arm/boot/dts/vf610-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/vf610-colibri-eval-v3.dts
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2014-2020 Toradex
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/vf610-colibri.dtsi b/arch/arm/boot/dts/vf610-colibri.dtsi
index 05c9a39509b8..607cec2df861 100644
--- a/arch/arm/boot/dts/vf610-colibri.dtsi
+++ b/arch/arm/boot/dts/vf610-colibri.dtsi
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
- * Copyright 2014 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2014-2020 Toradex
*/
#include "vf610.dtsi"
diff --git a/arch/arm/boot/dts/vf610-zii-cfu1.dts b/arch/arm/boot/dts/vf610-zii-cfu1.dts
index 28732249cfc0..ce1920c052fc 100644
--- a/arch/arm/boot/dts/vf610-zii-cfu1.dts
+++ b/arch/arm/boot/dts/vf610-zii-cfu1.dts
@@ -71,6 +71,14 @@
los-gpio = <&gpio4 4 GPIO_ACTIVE_HIGH>;
tx-disable-gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
};
+
+ supply-voltage-monitor {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 8>, /* 28VDC_IN */
+ <&adc0 9>, /* +3.3V */
+ <&adc1 8>, /* VCC_1V5 */
+ <&adc1 9>; /* VCC_1V2 */
+ };
};
&adc0 {
diff --git a/arch/arm/boot/dts/vf610-zii-dev.dtsi b/arch/arm/boot/dts/vf610-zii-dev.dtsi
index a1b4ccee2a10..95d0060fb56c 100644
--- a/arch/arm/boot/dts/vf610-zii-dev.dtsi
+++ b/arch/arm/boot/dts/vf610-zii-dev.dtsi
@@ -84,6 +84,14 @@
regulator-boot-on;
gpio = <&gpio0 6 0>;
};
+
+ supply-voltage-monitor {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 8>, /* VCC_1V5 */
+ <&adc0 9>, /* VCC_1V8 */
+ <&adc1 8>, /* VCC_1V0 */
+ <&adc1 9>; /* VCC_1V2 */
+ };
};
&adc0 {
diff --git a/arch/arm/boot/dts/vf610-zii-spb4.dts b/arch/arm/boot/dts/vf610-zii-spb4.dts
index 77e1484211e4..55b4201e27f6 100644
--- a/arch/arm/boot/dts/vf610-zii-spb4.dts
+++ b/arch/arm/boot/dts/vf610-zii-spb4.dts
@@ -42,6 +42,14 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+ supply-voltage-monitor {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 8>, /* 28V_SW */
+ <&adc0 9>, /* +3.3V */
+ <&adc1 8>, /* VCC_1V5 */
+ <&adc1 9>; /* VCC_1V2 */
+ };
};
&adc0 {
diff --git a/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts b/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts
index 847c5858fea1..a6c22a79779e 100644
--- a/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts
+++ b/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts
@@ -46,6 +46,14 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+ supply-voltage-monitor {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 8>, /* 12V_MAIN */
+ <&adc0 9>, /* +3.3V */
+ <&adc1 8>, /* VCC_1V5 */
+ <&adc1 9>; /* VCC_1V2 */
+ };
};
&adc0 {
diff --git a/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts b/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
index 453fce80f858..3d05c894bdc0 100644
--- a/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
+++ b/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
@@ -46,6 +46,14 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+ supply-voltage-monitor {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 8>, /* 12V_MAIN */
+ <&adc0 9>, /* +3.3V */
+ <&adc1 8>, /* VCC_1V5 */
+ <&adc1 9>; /* VCC_1V2 */
+ };
};
&adc0 {
diff --git a/arch/arm/boot/dts/vf610m4-colibri.dts b/arch/arm/boot/dts/vf610m4-colibri.dts
index d4bc0e3f2f11..2c2db47af441 100644
--- a/arch/arm/boot/dts/vf610m4-colibri.dts
+++ b/arch/arm/boot/dts/vf610m4-colibri.dts
@@ -1,45 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
* Device tree for Colibri VF61 Cortex-M4 support
*
* Copyright (C) 2015 Stefan Agner
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 028e0ec30e0c..2d547e7b21ad 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -59,7 +59,7 @@
interrupt-parent = <&mscm_ir>;
ranges;
- aips0: aips-bus@40000000 {
+ aips0: bus@40000000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -471,7 +471,7 @@
};
};
- aips1: aips-bus@40080000 {
+ aips1: bus@40080000 {
compatible = "fsl,aips-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index f66bb98a5cce..4a0ba2ae1a25 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -118,6 +118,7 @@ CONFIG_POWER_SUPPLY=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_AT91SAM9X_WATCHDOG=y
+CONFIG_SAMA5D4_WATCHDOG=y
CONFIG_MFD_ATMEL_FLEXCOM=y
CONFIG_MFD_ATMEL_HLCDC=y
CONFIG_REGULATOR=y
diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig
index 6ea7dafa4c9e..46075216ee6d 100644
--- a/arch/arm/configs/axm55xx_defconfig
+++ b/arch/arm/configs/axm55xx_defconfig
@@ -236,5 +236,3 @@ CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_SHA256=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=y
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 0afcae9f7cf8..8e7a3ed2a4df 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -72,6 +72,7 @@ CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_NETDEVICES=y
+CONFIG_BCMGENET=y
CONFIG_USB_LAN78XX=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC95XX=y
@@ -96,9 +97,13 @@ CONFIG_SPI_BCM2835AUX=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_RASPBERRYPI_HWMON=m
CONFIG_THERMAL=y
+CONFIG_BCM2711_THERMAL=y
CONFIG_BCM2835_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_BCM2835_WDT=y
+CONFIG_MFD_SYSCON=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_GPIO=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_DRM=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index b5ba8d731a25..e849367c0566 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -158,7 +158,7 @@ CONFIG_VIDEO_TVP514X=m
CONFIG_VIDEO_ADV7343=m
CONFIG_DRM=m
CONFIG_DRM_TILCDC=m
-CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
CONFIG_DRM_TINYDRM=m
CONFIG_TINYDRM_ST7586=m
CONFIG_FB=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index c8e0c14092e8..3c3a00fc71f6 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -8,6 +8,7 @@ CONFIG_PERF_EVENTS=y
CONFIG_ARCH_EXYNOS=y
CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND=y
CONFIG_SMP=y
+CONFIG_SCHED_MC=y
CONFIG_BIG_LITTLE=y
CONFIG_NR_CPUS=8
CONFIG_HIGHMEM=y
@@ -17,6 +18,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
+CONFIG_ENERGY_MODEL=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
@@ -92,7 +94,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
@@ -111,11 +113,13 @@ CONFIG_USB_LAN78XX=m
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
+CONFIG_BRCMFMAC=m
CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
CONFIG_KEYBOARD_SAMSUNG=y
+CONFIG_KEYBOARD_TM2_TOUCHKEY=y
CONFIG_KEYBOARD_CROS_EC=y
# CONFIG_MOUSE_PS2 is not set
CONFIG_MOUSE_CYAPA=y
@@ -204,6 +208,9 @@ CONFIG_V4L_TEST_DRIVERS=y
CONFIG_VIDEO_VIVID=m
CONFIG_CEC_PLATFORM_DRIVERS=y
CONFIG_VIDEO_SAMSUNG_S5P_CEC=m
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_S5K6A3=m
+CONFIG_VIDEO_S5C73M3=m
CONFIG_DRM=y
CONFIG_DRM_EXYNOS=y
CONFIG_DRM_EXYNOS_FIMD=y
@@ -276,6 +283,7 @@ CONFIG_MMC_DW_EXYNOS=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_AAT1290=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_PWM=y
CONFIG_LEDS_MAX77693=y
@@ -320,6 +328,7 @@ CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CRAMFS=y
+CONFIG_SQUASHFS=y
CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 8f216a599735..5a20d12d62bd 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -179,6 +179,9 @@ CONFIG_MOUSE_PS2=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=y
+CONFIG_TOUCHSCREEN_AD7879=y
+CONFIG_TOUCHSCREEN_AD7879_I2C=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_TOUCHSCREEN_DA9052=y
CONFIG_TOUCHSCREEN_EGALAX=y
CONFIG_TOUCHSCREEN_GOODIX=y
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 2f0a762dc3a0..a9755c501bec 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -55,7 +55,7 @@ CONFIG_SMC91X=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_DRM=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_MATROX=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 017d65f86eba..f8e45351c3f9 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -113,6 +113,7 @@ CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_ARM_ZYNQ_CPUIDLE=y
CONFIG_ARM_EXYNOS_CPUIDLE=y
+CONFIG_ARM_TEGRA_CPUIDLE=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_RASPBERRYPI_FIRMWARE=y
CONFIG_TRUSTED_FOUNDATIONS=y
@@ -640,6 +641,7 @@ CONFIG_CEC_PLATFORM_DRIVERS=y
CONFIG_VIDEO_SAMSUNG_S5P_CEC=m
CONFIG_VIDEO_ADV7180=m
CONFIG_VIDEO_ML86V7667=m
+CONFIG_IMX_IPUV3_CORE=m
CONFIG_DRM=y
# CONFIG_DRM_I2C_CH7006 is not set
# CONFIG_DRM_I2C_SIL164 is not set
@@ -655,6 +657,11 @@ CONFIG_ROCKCHIP_ANALOGIX_DP=y
CONFIG_ROCKCHIP_DW_HDMI=y
CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_DRM_IMX=m
+CONFIG_DRM_IMX_PARALLEL_DISPLAY=m
+CONFIG_DRM_IMX_TVE=m
+CONFIG_DRM_IMX_LDB=m
+CONFIG_DRM_IMX_HDMI=m
CONFIG_DRM_ATMEL_HLCDC=m
CONFIG_DRM_RCAR_DU=m
CONFIG_DRM_RCAR_LVDS=y
@@ -670,11 +677,11 @@ CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m
CONFIG_DRM_PANEL_RAYDIUM_RM68200=m
CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_DUMB_VGA_DAC=m
CONFIG_DRM_NXP_PTN3460=m
CONFIG_DRM_PARADE_PS8622=m
CONFIG_DRM_SII902X=m
CONFIG_DRM_SII9234=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
CONFIG_DRM_TOSHIBA_TC358764=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_I2C_ADV7511_AUDIO=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 847f9874ccc4..3cc3ca5fa027 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -56,6 +56,8 @@ CONFIG_CPUFREQ_DT=m
# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
CONFIG_ARM_TI_CPUFREQ=y
CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_DT_IDLE_STATES=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_PM_DEBUG=y
CONFIG_ARM_CRYPTO=y
@@ -78,6 +80,8 @@ CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=y
CONFIG_CMA=y
+CONFIG_ZSMALLOC=m
+CONFIG_PGTABLE_MAPPING=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -130,6 +134,7 @@ CONFIG_PCI_EPF_TEST=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_OMAP_OCP2SCP=y
+CONFIG_SIMPLE_PM_BUS=y
CONFIG_CONNECTOR=m
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -139,7 +144,6 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_M25P80=m
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
CONFIG_MTD_ONENAND_OMAP2=y
@@ -149,6 +153,8 @@ CONFIG_MTD_NAND_OMAP2=y
CONFIG_MTD_NAND_OMAP_BCH=y
CONFIG_MTD_SPI_NOR=m
CONFIG_MTD_UBI=y
+CONFIG_ZRAM=m
+CONFIG_ZRAM_WRITEBACK=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
@@ -186,10 +192,10 @@ CONFIG_TI_CPSW_SWITCHDEV=y
CONFIG_TI_CPTS=y
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_AT803X_PHY=y
CONFIG_DP83848_PHY=y
CONFIG_DP83867_PHY=y
CONFIG_MICREL_PHY=y
+CONFIG_AT803X_PHY=y
CONFIG_SMSC_PHY=y
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -278,6 +284,7 @@ CONFIG_GPIO_PALMAS=y
CONFIG_GPIO_TWL4030=y
CONFIG_W1=m
CONFIG_HDQ_MASTER_OMAP=m
+CONFIG_W1_SLAVE_DS250X=m
CONFIG_POWER_AVS=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_GPIO=y
@@ -350,20 +357,19 @@ CONFIG_DRM_OMAP=m
CONFIG_OMAP5_DSS_HDMI=y
CONFIG_OMAP2_DSS_SDI=y
CONFIG_OMAP2_DSS_DSI=y
-CONFIG_DRM_OMAP_ENCODER_OPA362=m
-CONFIG_DRM_OMAP_ENCODER_TPD12S015=m
-CONFIG_DRM_OMAP_CONNECTOR_HDMI=m
-CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
CONFIG_DRM_OMAP_PANEL_DSI_CM=m
CONFIG_DRM_TILCDC=m
CONFIG_DRM_PANEL_SIMPLE=m
-CONFIG_DRM_TI_TFP410=m
CONFIG_DRM_PANEL_LG_LB035Q02=m
CONFIG_DRM_PANEL_NEC_NL8048HL11=m
CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
CONFIG_DRM_PANEL_SONY_ACX565AKM=m
CONFIG_DRM_PANEL_TPO_TD028TTEC1=m
CONFIG_DRM_PANEL_TPO_TD043MTEA1=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
+CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_TI_TPD12S015=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
@@ -483,6 +489,8 @@ CONFIG_RTC_DRV_PALMAS=m
CONFIG_RTC_DRV_OMAP=m
CONFIG_RTC_DRV_CPCAP=m
CONFIG_DMADEVICES=y
+CONFIG_CLK_TWL6040=m
+CONFIG_COMMON_CLK_PALMAS=m
CONFIG_OMAP_IOMMU=y
CONFIG_REMOTEPROC=y
CONFIG_OMAP_REMOTEPROC=m
@@ -499,7 +507,9 @@ CONFIG_IIO_SW_DEVICE=m
CONFIG_IIO_SW_TRIGGER=m
CONFIG_IIO_ST_ACCEL_3AXIS=m
CONFIG_CPCAP_ADC=m
+CONFIG_INA2XX_ADC=m
CONFIG_TI_AM335X_ADC=m
+CONFIG_SENSORS_ISL29028=m
CONFIG_BMP280=m
CONFIG_PWM=y
CONFIG_PWM_OMAP_DMTIMER=m
@@ -515,6 +525,7 @@ CONFIG_TI_PIPE3=y
CONFIG_TWL4030_USB=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_FANOTIFY=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
@@ -553,10 +564,10 @@ CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_PRINTK_TIME=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_SPLIT=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_SCHEDSTATS=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index cdc75ef7d529..c882167e1496 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -50,6 +50,11 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
+CONFIG_QRTR=m
+CONFIG_QRTR_SMD=m
+CONFIG_BT=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_BCM=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_RFKILL=y
@@ -111,6 +116,7 @@ CONFIG_SERIO_LIBPS2=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
CONFIG_HW_RANDOM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig
index 3b82b64950d9..c090643b1ecb 100644
--- a/arch/arm/configs/rpc_defconfig
+++ b/arch/arm/configs/rpc_defconfig
@@ -32,7 +32,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 73ed73a8785a..153009130dab 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -202,7 +202,6 @@ CONFIG_EEPROM_AT24=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 64fa849f8bbe..838307a9bb92 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -125,9 +125,9 @@ CONFIG_VIDEO_ML86V7667=y
CONFIG_DRM=y
CONFIG_DRM_RCAR_DU=y
CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_DUMB_VGA_DAC=y
CONFIG_DRM_LVDS_CODEC=y
CONFIG_DRM_SII902X=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_DRM_I2C_ADV7511_AUDIO=y
CONFIG_FB_SH_MOBILE_LCDC=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index e9fb57374b9f..61b8be19e527 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -101,7 +101,7 @@ CONFIG_RC_DEVICES=y
CONFIG_IR_SUNXI=y
CONFIG_DRM=y
CONFIG_DRM_SUN4I=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_FB_SIMPLE=y
CONFIG_SOUND=y
CONFIG_SND=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index a27592d3b1fa..aa94369bdd0f 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -25,6 +25,7 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPUFREQ_DT=y
CONFIG_CPU_IDLE=y
+CONFIG_ARM_TEGRA_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_TRUSTED_FOUNDATIONS=y
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index fe4d4b596585..767935337413 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -59,7 +59,7 @@ CONFIG_GPIO_PL061=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_ARM_VERSATILE=y
CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
index 31e1f538df7d..8d7f4bcaec2c 100644
--- a/arch/arm/crypto/.gitignore
+++ b/arch/arm/crypto/.gitignore
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
aesbs-core.S
sha256-core.S
sha512-core.S
+poly1305-core.S
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index e85839a8aaeb..e6fd32919c81 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -138,6 +138,7 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
kernel_neon_begin();
aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
kernel_neon_end();
+ memzero_explicit(&rk, sizeof(rk));
return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
}
diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S
index 534c9647726d..9f51e3fa4526 100644
--- a/arch/arm/crypto/ghash-ce-core.S
+++ b/arch/arm/crypto/ghash-ce-core.S
@@ -8,6 +8,9 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+ .arch armv8-a
+ .fpu crypto-neon-fp-armv8
+
SHASH .req q0
T1 .req q1
XL .req q2
@@ -88,8 +91,6 @@
T3_H .req d17
.text
- .arch armv8-a
- .fpu crypto-neon-fp-armv8
.macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4
vmull.p64 \rd, \rn, \rm
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index fa579b23b4df..383635b68763 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,22 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += compat.h
-generic-y += current.h
generic-y += early_ioremap.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
generic-y += flat.h
-generic-y += irq_regs.h
-generic-y += kdebug.h
-generic-y += local.h
generic-y += local64.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += parport.h
-generic-y += preempt.h
generic-y += seccomp.h
-generic-y += serial.h
-generic-y += trace_clock.h
generated-y += mach-types.h
generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index c815477b4303..413abfb42989 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -38,71 +38,6 @@
#define ICC_AP1R2 __ICC_AP1Rx(2)
#define ICC_AP1R3 __ICC_AP1Rx(3)
-#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5)
-
-#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4)
-#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0)
-#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
-#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
-#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
-#define ICH_ELRSR __ACCESS_CP15(c12, 4, c11, 5)
-#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
-
-#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
-#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x)
-
-#define ICH_LR0 __LR0(0)
-#define ICH_LR1 __LR0(1)
-#define ICH_LR2 __LR0(2)
-#define ICH_LR3 __LR0(3)
-#define ICH_LR4 __LR0(4)
-#define ICH_LR5 __LR0(5)
-#define ICH_LR6 __LR0(6)
-#define ICH_LR7 __LR0(7)
-#define ICH_LR8 __LR8(0)
-#define ICH_LR9 __LR8(1)
-#define ICH_LR10 __LR8(2)
-#define ICH_LR11 __LR8(3)
-#define ICH_LR12 __LR8(4)
-#define ICH_LR13 __LR8(5)
-#define ICH_LR14 __LR8(6)
-#define ICH_LR15 __LR8(7)
-
-/* LR top half */
-#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x)
-#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x)
-
-#define ICH_LRC0 __LRC0(0)
-#define ICH_LRC1 __LRC0(1)
-#define ICH_LRC2 __LRC0(2)
-#define ICH_LRC3 __LRC0(3)
-#define ICH_LRC4 __LRC0(4)
-#define ICH_LRC5 __LRC0(5)
-#define ICH_LRC6 __LRC0(6)
-#define ICH_LRC7 __LRC0(7)
-#define ICH_LRC8 __LRC8(0)
-#define ICH_LRC9 __LRC8(1)
-#define ICH_LRC10 __LRC8(2)
-#define ICH_LRC11 __LRC8(3)
-#define ICH_LRC12 __LRC8(4)
-#define ICH_LRC13 __LRC8(5)
-#define ICH_LRC14 __LRC8(6)
-#define ICH_LRC15 __LRC8(7)
-
-#define __ICH_AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x)
-#define ICH_AP0R0 __ICH_AP0Rx(0)
-#define ICH_AP0R1 __ICH_AP0Rx(1)
-#define ICH_AP0R2 __ICH_AP0Rx(2)
-#define ICH_AP0R3 __ICH_AP0Rx(3)
-
-#define __ICH_AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x)
-#define ICH_AP1R0 __ICH_AP1Rx(0)
-#define ICH_AP1R1 __ICH_AP1Rx(1)
-#define ICH_AP1R2 __ICH_AP1Rx(2)
-#define ICH_AP1R3 __ICH_AP1Rx(3)
-
-/* A32-to-A64 mappings used by VGIC save/restore */
-
#define CPUIF_MAP(a32, a64) \
static inline void write_ ## a64(u32 val) \
{ \
@@ -113,21 +48,6 @@ static inline u32 read_ ## a64(void) \
return read_sysreg(a32); \
} \
-#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
-static inline void write_ ## a64(u64 val) \
-{ \
- write_sysreg(lower_32_bits(val), a32lo);\
- write_sysreg(upper_32_bits(val), a32hi);\
-} \
-static inline u64 read_ ## a64(void) \
-{ \
- u64 val = read_sysreg(a32lo); \
- \
- val |= (u64)read_sysreg(a32hi) << 32; \
- \
- return val; \
-}
-
CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
@@ -138,40 +58,6 @@ CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1)
CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1)
CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
-CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
-CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
-CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
-CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
-CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
-CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
-CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
-CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
-CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
-CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
-CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
-CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
-CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
-CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
-CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
-CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
-
-CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
-CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
-CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
-CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
-CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
-CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
-CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
-CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
-CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
-CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
-CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
-CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
-CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
-CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
-CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
-CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
-
#define read_gicreg(r) read_##r()
#define write_gicreg(v, r) write_##r(v)
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 772f48ef84b7..86405cc81385 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -33,7 +33,5 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
-int arm_dma_supported(struct device *dev, u64 mask);
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
deleted file mode 100644
index 9c04bd810d07..000000000000
--- a/arch/arm/include/asm/kvm_arm.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_ARM_H__
-#define __ARM_KVM_ARM_H__
-
-#include <linux/const.h>
-#include <linux/types.h>
-
-/* Hyp Configuration Register (HCR) bits */
-#define HCR_TGE (1 << 27)
-#define HCR_TVM (1 << 26)
-#define HCR_TTLB (1 << 25)
-#define HCR_TPU (1 << 24)
-#define HCR_TPC (1 << 23)
-#define HCR_TSW (1 << 22)
-#define HCR_TAC (1 << 21)
-#define HCR_TIDCP (1 << 20)
-#define HCR_TSC (1 << 19)
-#define HCR_TID3 (1 << 18)
-#define HCR_TID2 (1 << 17)
-#define HCR_TID1 (1 << 16)
-#define HCR_TID0 (1 << 15)
-#define HCR_TWE (1 << 14)
-#define HCR_TWI (1 << 13)
-#define HCR_DC (1 << 12)
-#define HCR_BSU (3 << 10)
-#define HCR_BSU_IS (1 << 10)
-#define HCR_FB (1 << 9)
-#define HCR_VA (1 << 8)
-#define HCR_VI (1 << 7)
-#define HCR_VF (1 << 6)
-#define HCR_AMO (1 << 5)
-#define HCR_IMO (1 << 4)
-#define HCR_FMO (1 << 3)
-#define HCR_PTW (1 << 2)
-#define HCR_SWIO (1 << 1)
-#define HCR_VM 1
-
-/*
- * The bits we set in HCR:
- * TAC: Trap ACTLR
- * TSC: Trap SMC
- * TVM: Trap VM ops (until MMU and caches are on)
- * TSW: Trap cache operations by set/way
- * TWI: Trap WFI
- * TWE: Trap WFE
- * TIDCP: Trap L2CTLR/L2ECTLR
- * BSU_IS: Upgrade barriers to the inner shareable domain
- * FB: Force broadcast of all maintainance operations
- * AMO: Override CPSR.A and enable signaling with VA
- * IMO: Override CPSR.I and enable signaling with VI
- * FMO: Override CPSR.F and enable signaling with VF
- * SWIO: Turn set/way invalidates into set/way clean+invalidate
- */
-#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
- HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
-
-/* System Control Register (SCTLR) bits */
-#define SCTLR_TE (1 << 30)
-#define SCTLR_EE (1 << 25)
-#define SCTLR_V (1 << 13)
-
-/* Hyp System Control Register (HSCTLR) bits */
-#define HSCTLR_TE (1 << 30)
-#define HSCTLR_EE (1 << 25)
-#define HSCTLR_FI (1 << 21)
-#define HSCTLR_WXN (1 << 19)
-#define HSCTLR_I (1 << 12)
-#define HSCTLR_C (1 << 2)
-#define HSCTLR_A (1 << 1)
-#define HSCTLR_M 1
-#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
- HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
-
-/* TTBCR and HTCR Registers bits */
-#define TTBCR_EAE (1 << 31)
-#define TTBCR_IMP (1 << 30)
-#define TTBCR_SH1 (3 << 28)
-#define TTBCR_ORGN1 (3 << 26)
-#define TTBCR_IRGN1 (3 << 24)
-#define TTBCR_EPD1 (1 << 23)
-#define TTBCR_A1 (1 << 22)
-#define TTBCR_T1SZ (7 << 16)
-#define TTBCR_SH0 (3 << 12)
-#define TTBCR_ORGN0 (3 << 10)
-#define TTBCR_IRGN0 (3 << 8)
-#define TTBCR_EPD0 (1 << 7)
-#define TTBCR_T0SZ (7 << 0)
-#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
-
-/* Hyp System Trap Register */
-#define HSTR_T(x) (1 << x)
-#define HSTR_TTEE (1 << 16)
-#define HSTR_TJDBX (1 << 17)
-
-/* Hyp Coprocessor Trap Register */
-#define HCPTR_TCP(x) (1 << x)
-#define HCPTR_TCP_MASK (0x3fff)
-#define HCPTR_TASE (1 << 15)
-#define HCPTR_TTA (1 << 20)
-#define HCPTR_TCPAC (1 << 31)
-
-/* Hyp Debug Configuration Register bits */
-#define HDCR_TDRA (1 << 11)
-#define HDCR_TDOSA (1 << 10)
-#define HDCR_TDA (1 << 9)
-#define HDCR_TDE (1 << 8)
-#define HDCR_HPME (1 << 7)
-#define HDCR_TPM (1 << 6)
-#define HDCR_TPMCR (1 << 5)
-#define HDCR_HPMN_MASK (0x1F)
-
-/*
- * The architecture supports 40-bit IPA as input to the 2nd stage translations
- * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
- * space.
- */
-#define KVM_PHYS_SHIFT (40)
-
-#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
-
-/* Virtualization Translation Control Register (VTCR) bits */
-#define VTCR_SH0 (3 << 12)
-#define VTCR_ORGN0 (3 << 10)
-#define VTCR_IRGN0 (3 << 8)
-#define VTCR_SL0 (3 << 6)
-#define VTCR_S (1 << 4)
-#define VTCR_T0SZ (0xf)
-#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
- VTCR_S | VTCR_T0SZ)
-#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
-#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
-#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
-#define KVM_VTCR_SL0 VTCR_SL_L1
-/* stage-2 input address range defined as 2^(32-T0SZ) */
-#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
-#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
-#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
-
-/* Virtualization Translation Table Base Register (VTTBR) bits */
-#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
-#define VTTBR_X (14 - KVM_T0SZ)
-#else
-#define VTTBR_X (5 - KVM_T0SZ)
-#endif
-#define VTTBR_CNP_BIT _AC(1, UL)
-#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
-#define VTTBR_VMID_SHIFT _AC(48, ULL)
-#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
-
-/* Hyp Syndrome Register (HSR) bits */
-#define HSR_EC_SHIFT (26)
-#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
-#define HSR_IL (_AC(1, UL) << 25)
-#define HSR_ISS (HSR_IL - 1)
-#define HSR_ISV_SHIFT (24)
-#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
-#define HSR_SRT_SHIFT (16)
-#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
-#define HSR_CM (1 << 8)
-#define HSR_FSC (0x3f)
-#define HSR_FSC_TYPE (0x3c)
-#define HSR_SSE (1 << 21)
-#define HSR_WNR (1 << 6)
-#define HSR_CV_SHIFT (24)
-#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
-#define HSR_COND_SHIFT (20)
-#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
-
-#define FSC_FAULT (0x04)
-#define FSC_ACCESS (0x08)
-#define FSC_PERM (0x0c)
-#define FSC_SEA (0x10)
-#define FSC_SEA_TTW0 (0x14)
-#define FSC_SEA_TTW1 (0x15)
-#define FSC_SEA_TTW2 (0x16)
-#define FSC_SEA_TTW3 (0x17)
-#define FSC_SECC (0x18)
-#define FSC_SECC_TTW0 (0x1c)
-#define FSC_SECC_TTW1 (0x1d)
-#define FSC_SECC_TTW2 (0x1e)
-#define FSC_SECC_TTW3 (0x1f)
-
-/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
-#define HPFAR_MASK (~0xf)
-
-#define HSR_EC_UNKNOWN (0x00)
-#define HSR_EC_WFI (0x01)
-#define HSR_EC_CP15_32 (0x03)
-#define HSR_EC_CP15_64 (0x04)
-#define HSR_EC_CP14_MR (0x05)
-#define HSR_EC_CP14_LS (0x06)
-#define HSR_EC_CP_0_13 (0x07)
-#define HSR_EC_CP10_ID (0x08)
-#define HSR_EC_JAZELLE (0x09)
-#define HSR_EC_BXJ (0x0A)
-#define HSR_EC_CP14_64 (0x0C)
-#define HSR_EC_SVC_HYP (0x11)
-#define HSR_EC_HVC (0x12)
-#define HSR_EC_SMC (0x13)
-#define HSR_EC_IABT (0x20)
-#define HSR_EC_IABT_HYP (0x21)
-#define HSR_EC_DABT (0x24)
-#define HSR_EC_DABT_HYP (0x25)
-#define HSR_EC_MAX (0x3f)
-
-#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
-
-#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
-
-#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
-#define HSR_DABT_CM (_AC(1, UL) << 8)
-
-#define kvm_arm_exception_type \
- {0, "RESET" }, \
- {1, "UNDEFINED" }, \
- {2, "SOFTWARE" }, \
- {3, "PREF_ABORT" }, \
- {4, "DATA_ABORT" }, \
- {5, "IRQ" }, \
- {6, "FIQ" }, \
- {7, "HVC" }
-
-#define HSRECN(x) { HSR_EC_##x, #x }
-
-#define kvm_arm_exception_class \
- HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \
- HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \
- HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \
- HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \
- HSRECN(DABT), HSRECN(DABT_HYP)
-
-
-#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
deleted file mode 100644
index f615830f9f57..000000000000
--- a/arch/arm/include/asm/kvm_asm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_ASM_H__
-#define __ARM_KVM_ASM_H__
-
-#include <asm/virt.h>
-
-#define ARM_EXIT_WITH_ABORT_BIT 31
-#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
-#define ARM_EXCEPTION_IS_TRAP(x) \
- (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \
- ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \
- ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC)
-#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
-
-#define ARM_EXCEPTION_RESET 0
-#define ARM_EXCEPTION_UNDEFINED 1
-#define ARM_EXCEPTION_SOFTWARE 2
-#define ARM_EXCEPTION_PREF_ABORT 3
-#define ARM_EXCEPTION_DATA_ABORT 4
-#define ARM_EXCEPTION_IRQ 5
-#define ARM_EXCEPTION_FIQ 6
-#define ARM_EXCEPTION_HVC 7
-#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
-/*
- * The rr_lo_hi macro swaps a pair of registers depending on
- * current endianness. It is used in conjunction with ldrd and strd
- * instructions that load/store a 64-bit value from/to memory to/from
- * a pair of registers which are used with the mrrc and mcrr instructions.
- * If used with the ldrd/strd instructions, the a1 parameter is the first
- * source/destination register and the a2 parameter is the second
- * source/destination register. Note that the ldrd/strd instructions
- * already swap the bytes within the words correctly according to the
- * endianness setting, but the order of the registers need to be effectively
- * swapped when used with the mrrc/mcrr instructions.
- */
-#ifdef CONFIG_CPU_ENDIAN_BE8
-#define rr_lo_hi(a1, a2) a2, a1
-#else
-#define rr_lo_hi(a1, a2) a1, a2
-#endif
-
-#define kvm_ksym_ref(kva) (kva)
-
-#ifndef __ASSEMBLY__
-struct kvm;
-struct kvm_vcpu;
-
-extern char __kvm_hyp_init[];
-extern char __kvm_hyp_init_end[];
-
-extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
-extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
-
-extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
-
-/* no VHE on 32-bit :( */
-static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }
-
-extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
-
-extern void __init_stage2_translation(void);
-
-extern u64 __vgic_v3_get_ich_vtr_el2(void);
-extern u64 __vgic_v3_read_vmcr(void);
-extern void __vgic_v3_write_vmcr(u32 vmcr);
-extern void __vgic_v3_init_lrs(void);
-
-#endif
-
-#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
deleted file mode 100644
index a23826117dd6..000000000000
--- a/arch/arm/include/asm/kvm_coproc.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Rusty Russell IBM Corporation
- */
-
-#ifndef __ARM_KVM_COPROC_H__
-#define __ARM_KVM_COPROC_H__
-#include <linux/kvm_host.h>
-
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_coproc_target_table {
- unsigned target;
- const struct coproc_reg *table;
- size_t num;
-};
-void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
-
-int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-void kvm_coproc_table_init(void);
-
-struct kvm_one_reg;
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
deleted file mode 100644
index 3944305e81df..000000000000
--- a/arch/arm/include/asm/kvm_emulate.h
+++ /dev/null
@@ -1,372 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_EMULATE_H__
-#define __ARM_KVM_EMULATE_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-#include <asm/cputype.h>
-
-/* arm64 compatibility macros */
-#define PSR_AA32_MODE_FIQ FIQ_MODE
-#define PSR_AA32_MODE_SVC SVC_MODE
-#define PSR_AA32_MODE_ABT ABT_MODE
-#define PSR_AA32_MODE_UND UND_MODE
-#define PSR_AA32_T_BIT PSR_T_BIT
-#define PSR_AA32_F_BIT PSR_F_BIT
-#define PSR_AA32_I_BIT PSR_I_BIT
-#define PSR_AA32_A_BIT PSR_A_BIT
-#define PSR_AA32_E_BIT PSR_E_BIT
-#define PSR_AA32_IT_MASK PSR_IT_MASK
-#define PSR_AA32_GE_MASK 0x000f0000
-#define PSR_AA32_DIT_BIT 0x00200000
-#define PSR_AA32_PAN_BIT 0x00400000
-#define PSR_AA32_SSBS_BIT 0x00800000
-#define PSR_AA32_Q_BIT PSR_Q_BIT
-#define PSR_AA32_V_BIT PSR_V_BIT
-#define PSR_AA32_C_BIT PSR_C_BIT
-#define PSR_AA32_Z_BIT PSR_Z_BIT
-#define PSR_AA32_N_BIT PSR_N_BIT
-
-unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-
-static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
-{
- return vcpu_reg(vcpu, reg_num);
-}
-
-unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
-
-static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
-{
- return *__vcpu_spsr(vcpu);
-}
-
-static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
-{
- *__vcpu_spsr(vcpu) = v;
-}
-
-static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
-{
- return spsr;
-}
-
-static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
- u8 reg_num)
-{
- return *vcpu_reg(vcpu, reg_num);
-}
-
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
- unsigned long val)
-{
- *vcpu_reg(vcpu, reg_num) = val;
-}
-
-bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
-void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
-void kvm_inject_undef32(struct kvm_vcpu *vcpu);
-void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_vabt(struct kvm_vcpu *vcpu);
-
-static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
-{
- kvm_inject_undef32(vcpu);
-}
-
-static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
-{
- kvm_inject_dabt32(vcpu, addr);
-}
-
-static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
-{
- kvm_inject_pabt32(vcpu, addr);
-}
-
-static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
-{
- return kvm_condition_valid32(vcpu);
-}
-
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
-{
- kvm_skip_instr32(vcpu, is_wide_instr);
-}
-
-static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr = HCR_GUEST_MASK;
-}
-
-static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
-{
- return (unsigned long *)&vcpu->arch.hcr;
-}
-
-static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr &= ~HCR_TWE;
-}
-
-static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr |= HCR_TWE;
-}
-
-static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
-{
- return true;
-}
-
-static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
-}
-
-static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
-{
- return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
-}
-
-static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
-}
-
-static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
-}
-
-static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return cpsr_mode > USR_MODE;
-}
-
-static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hsr;
-}
-
-static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
-{
- u32 hsr = kvm_vcpu_get_hsr(vcpu);
-
- if (hsr & HSR_CV)
- return (hsr & HSR_COND) >> HSR_COND_SHIFT;
-
- return -1;
-}
-
-static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hxfar;
-}
-
-static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
-{
- return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
-}
-
-static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
-}
-
-static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
-}
-
-static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
-}
-
-static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
-}
-
-static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
-{
- return false;
-}
-
-static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
-{
- return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
-}
-
-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
-}
-
-static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
-}
-
-/* Get Access Size from a data abort */
-static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
-{
- switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
- case 0:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- default:
- kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
- return 4;
- }
-}
-
-/* This one is not specific to Data Abort */
-static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
-}
-
-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
-}
-
-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
-}
-
-static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
-{
- switch (kvm_vcpu_trap_get_fault(vcpu)) {
- case FSC_SEA:
- case FSC_SEA_TTW0:
- case FSC_SEA_TTW1:
- case FSC_SEA_TTW2:
- case FSC_SEA_TTW3:
- case FSC_SECC:
- case FSC_SECC_TTW0:
- case FSC_SECC_TTW1:
- case FSC_SECC_TTW2:
- case FSC_SECC_TTW3:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
-static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
-}
-
-static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
-{
- return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
-}
-
-static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
-{
- return false;
-}
-
-static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
- bool flag)
-{
-}
-
-static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-}
-
-static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
-{
- return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
-}
-
-static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return be16_to_cpu(data & 0xffff);
- default:
- return be32_to_cpu(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return le16_to_cpu(data & 0xffff);
- default:
- return le32_to_cpu(data);
- }
- }
-}
-
-static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_be16(data & 0xffff);
- default:
- return cpu_to_be32(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_le16(data & 0xffff);
- default:
- return cpu_to_le32(data);
- }
- }
-}
-
-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
-
-#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
deleted file mode 100644
index a827b4d60d38..000000000000
--- a/arch/arm/include/asm/kvm_host.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_HOST_H__
-#define __ARM_KVM_HOST_H__
-
-#include <linux/arm-smccc.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/kvm_types.h>
-#include <asm/cputype.h>
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/fpstate.h>
-#include <kvm/arm_arch_timer.h>
-
-#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_HAVE_ONE_REG
-#define KVM_HALT_POLL_NS_DEFAULT 500000
-
-#define KVM_VCPU_MAX_FEATURES 2
-
-#include <kvm/arm_vgic.h>
-
-
-#ifdef CONFIG_ARM_GIC_V3
-#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#else
-#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
-#endif
-
-#define KVM_REQ_SLEEP \
- KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
-#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
-#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
-
-DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
-
-static inline int kvm_arm_init_sve(void) { return 0; }
-
-u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
-int __attribute_const__ kvm_target_cpu(void);
-int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_vmid {
- /* The VMID generation used for the virt. memory system */
- u64 vmid_gen;
- u32 vmid;
-};
-
-struct kvm_arch {
- /* The last vcpu id that ran on each physical CPU */
- int __percpu *last_vcpu_ran;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
-
- /* The VMID generation used for the virt. memory system */
- struct kvm_vmid vmid;
-
- /* Stage-2 page table */
- pgd_t *pgd;
- phys_addr_t pgd_phys;
-
- /* Interrupt controller */
- struct vgic_dist vgic;
- int max_vcpus;
-
- /* Mandated version of PSCI */
- u32 psci_version;
-
- /*
- * If we encounter a data abort without valid instruction syndrome
- * information, report this to user space. User space can (and
- * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
- * supported.
- */
- bool return_nisv_io_abort_to_user;
-};
-
-#define KVM_NR_MEM_OBJS 40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
-struct kvm_vcpu_fault_info {
- u32 hsr; /* Hyp Syndrome Register */
- u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
- u32 hpfar; /* Hyp IPA Fault Address Register */
-};
-
-/*
- * 0 is reserved as an invalid value.
- * Order should be kept in sync with the save/restore code.
- */
-enum vcpu_sysreg {
- __INVALID_SYSREG__,
- c0_MPIDR, /* MultiProcessor ID Register */
- c0_CSSELR, /* Cache Size Selection Register */
- c1_SCTLR, /* System Control Register */
- c1_ACTLR, /* Auxiliary Control Register */
- c1_CPACR, /* Coprocessor Access Control */
- c2_TTBR0, /* Translation Table Base Register 0 */
- c2_TTBR0_high, /* TTBR0 top 32 bits */
- c2_TTBR1, /* Translation Table Base Register 1 */
- c2_TTBR1_high, /* TTBR1 top 32 bits */
- c2_TTBCR, /* Translation Table Base Control R. */
- c3_DACR, /* Domain Access Control Register */
- c5_DFSR, /* Data Fault Status Register */
- c5_IFSR, /* Instruction Fault Status Register */
- c5_ADFSR, /* Auxilary Data Fault Status R */
- c5_AIFSR, /* Auxilary Instrunction Fault Status R */
- c6_DFAR, /* Data Fault Address Register */
- c6_IFAR, /* Instruction Fault Address Register */
- c7_PAR, /* Physical Address Register */
- c7_PAR_high, /* PAR top 32 bits */
- c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
- c10_PRRR, /* Primary Region Remap Register */
- c10_NMRR, /* Normal Memory Remap Register */
- c12_VBAR, /* Vector Base Address Register */
- c13_CID, /* Context ID Register */
- c13_TID_URW, /* Thread ID, User R/W */
- c13_TID_URO, /* Thread ID, User R/O */
- c13_TID_PRIV, /* Thread ID, Privileged */
- c14_CNTKCTL, /* Timer Control Register (PL1) */
- c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */
- c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */
- NR_CP15_REGS /* Number of regs (incl. invalid) */
-};
-
-struct kvm_cpu_context {
- struct kvm_regs gp_regs;
- struct vfp_hard_struct vfp;
- u32 cp15[NR_CP15_REGS];
-};
-
-struct kvm_host_data {
- struct kvm_cpu_context host_ctxt;
-};
-
-typedef struct kvm_host_data kvm_host_data_t;
-
-static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
-{
- /* The host's MPIDR is immutable, so let's set it up at boot time */
- cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr();
-}
-
-struct vcpu_reset_state {
- unsigned long pc;
- unsigned long r0;
- bool be;
- bool reset;
-};
-
-struct kvm_vcpu_arch {
- struct kvm_cpu_context ctxt;
-
- int target; /* Processor target */
- DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
-
- /* The CPU type we expose to the VM */
- u32 midr;
-
- /* HYP trapping configuration */
- u32 hcr;
-
- /* Exception Information */
- struct kvm_vcpu_fault_info fault;
-
- /* Host FP context */
- struct kvm_cpu_context *host_cpu_context;
-
- /* VGIC state */
- struct vgic_cpu vgic_cpu;
- struct arch_timer_cpu timer_cpu;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
-
- /* vcpu power-off state */
- bool power_off;
-
- /* Don't run the guest (internal implementation need) */
- bool pause;
-
- /* Cache some mmu pages needed inside spinlock regions */
- struct kvm_mmu_memory_cache mmu_page_cache;
-
- struct vcpu_reset_state reset_state;
-
- /* Detect first run of a vcpu */
- bool has_run_once;
-};
-
-struct kvm_vm_stat {
- ulong remote_tlb_flush;
-};
-
-struct kvm_vcpu_stat {
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
- u64 hvc_exit_stat;
- u64 wfe_exit_stat;
- u64 wfi_exit_stat;
- u64 mmio_exit_user;
- u64 mmio_exit_kernel;
- u64 exits;
-};
-
-#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
-
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-
-unsigned long __kvm_call_hyp(void *hypfn, ...);
-
-/*
- * The has_vhe() part doesn't get emitted, but is used for type-checking.
- */
-#define kvm_call_hyp(f, ...) \
- do { \
- if (has_vhe()) { \
- f(__VA_ARGS__); \
- } else { \
- __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
- } \
- } while(0)
-
-#define kvm_call_hyp_ret(f, ...) \
- ({ \
- typeof(f(__VA_ARGS__)) ret; \
- \
- if (has_vhe()) { \
- ret = f(__VA_ARGS__); \
- } else { \
- ret = __kvm_call_hyp(kvm_ksym_ref(f), \
- ##__VA_ARGS__); \
- } \
- \
- ret; \
- })
-
-void force_vm_exit(const cpumask_t *mask);
-int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events);
-
-int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events);
-
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-
-void kvm_arm_halt_guest(struct kvm *kvm);
-void kvm_arm_resume_guest(struct kvm *kvm);
-
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index);
-
-static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index) {}
-
-/* MMIO helpers */
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
-
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- phys_addr_t fault_ipa);
-
-static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
- unsigned long hyp_stack_ptr,
- unsigned long vector_ptr)
-{
- /*
- * Call initialization code, and switch to the full blown HYP
- * code. The init code doesn't need to preserve these
- * registers as r0-r3 are already callee saved according to
- * the AAPCS.
- * Note that we slightly misuse the prototype by casting the
- * stack pointer to a void *.
-
- * The PGDs are always passed as the third argument, in order
- * to be passed into r2-r3 to the init code (yes, this is
- * compliant with the PCS!).
- */
-
- __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
-}
-
-static inline void __cpu_init_stage2(void)
-{
- kvm_call_hyp(__init_stage2_translation);
-}
-
-static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-{
- return 0;
-}
-
-int kvm_perf_init(void);
-int kvm_perf_teardown(void);
-
-static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
-{
- return SMCCC_RET_NOT_SUPPORTED;
-}
-
-static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
-{
- return GPA_INVALID;
-}
-
-static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
-{
-}
-
-static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
-{
- return false;
-}
-
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
-
-struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-
-static inline bool kvm_arch_requires_vhe(void) { return false; }
-static inline void kvm_arch_hardware_unsetup(void) {}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {}
-
-static inline void kvm_arm_init_debug(void) {}
-static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
-
-int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr);
-
-/*
- * VFP/NEON switching is all done by the hyp switch code, so no need to
- * coordinate with host context handling for this state:
- */
-static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
-
-static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
-static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
-
-#define KVM_BP_HARDEN_UNKNOWN -1
-#define KVM_BP_HARDEN_WA_NEEDED 0
-#define KVM_BP_HARDEN_NOT_REQUIRED 1
-
-static inline int kvm_arm_harden_branch_predictor(void)
-{
- switch(read_cpuid_part()) {
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
- case ARM_CPU_PART_BRAHMA_B15:
- case ARM_CPU_PART_CORTEX_A12:
- case ARM_CPU_PART_CORTEX_A15:
- case ARM_CPU_PART_CORTEX_A17:
- return KVM_BP_HARDEN_WA_NEEDED;
-#endif
- case ARM_CPU_PART_CORTEX_A7:
- return KVM_BP_HARDEN_NOT_REQUIRED;
- default:
- return KVM_BP_HARDEN_UNKNOWN;
- }
-}
-
-#define KVM_SSBD_UNKNOWN -1
-#define KVM_SSBD_FORCE_DISABLE 0
-#define KVM_SSBD_KERNEL 1
-#define KVM_SSBD_FORCE_ENABLE 2
-#define KVM_SSBD_MITIGATED 3
-
-static inline int kvm_arm_have_ssbd(void)
-{
- /* No way to detect it yet, pretend it is not there. */
- return KVM_SSBD_UNKNOWN;
-}
-
-static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
-static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
-
-#define __KVM_HAVE_ARCH_VM_ALLOC
-struct kvm *kvm_arch_alloc_vm(void);
-void kvm_arch_free_vm(struct kvm *kvm);
-
-static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
-{
- /*
- * On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
- * so any non-zero value used as type is illegal.
- */
- if (type)
- return -EINVAL;
- return 0;
-}
-
-static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
-{
- return -EINVAL;
-}
-
-static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
-{
- return true;
-}
-
-#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
deleted file mode 100644
index 3c1b55ecc578..000000000000
--- a/arch/arm/include/asm/kvm_hyp.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2015 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#ifndef __ARM_KVM_HYP_H__
-#define __ARM_KVM_HYP_H__
-
-#include <linux/compiler.h>
-#include <linux/kvm_host.h>
-#include <asm/cp15.h>
-#include <asm/kvm_arm.h>
-#include <asm/vfp.h>
-
-#define __hyp_text __section(.hyp.text) notrace
-
-#define __ACCESS_VFP(CRn) \
- "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
-
-#define write_special(v, r) \
- asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
-#define read_special(r) ({ \
- u32 __val; \
- asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
- __val; \
-})
-
-#define TTBR0 __ACCESS_CP15_64(0, c2)
-#define TTBR1 __ACCESS_CP15_64(1, c2)
-#define VTTBR __ACCESS_CP15_64(6, c2)
-#define PAR __ACCESS_CP15_64(0, c7)
-#define CNTP_CVAL __ACCESS_CP15_64(2, c14)
-#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
-#define CNTVOFF __ACCESS_CP15_64(4, c14)
-
-#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
-#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
-#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
-#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
-#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
-#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
-#define HCR __ACCESS_CP15(c1, 4, c1, 0)
-#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
-#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
-#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
-#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
-#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
-#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
-#define DACR __ACCESS_CP15(c3, 0, c0, 0)
-#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
-#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
-#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
-#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
-#define HSR __ACCESS_CP15(c5, 4, c2, 0)
-#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
-#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
-#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
-#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
-#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
-#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
-#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
-#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
-#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
-#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
-#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
-#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
-#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
-#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
-#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
-#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
-#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
-#define CID __ACCESS_CP15(c13, 0, c0, 1)
-#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
-#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
-#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
-#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
-#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
-#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1)
-#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
-#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
-
-#define VFP_FPEXC __ACCESS_VFP(FPEXC)
-
-/* AArch64 compatibility macros, only for the timer so far */
-#define read_sysreg_el0(r) read_sysreg(r##_EL0)
-#define write_sysreg_el0(v, r) write_sysreg(v, r##_EL0)
-
-#define SYS_CNTP_CTL_EL0 CNTP_CTL
-#define SYS_CNTP_CVAL_EL0 CNTP_CVAL
-#define SYS_CNTV_CTL_EL0 CNTV_CTL
-#define SYS_CNTV_CVAL_EL0 CNTV_CVAL
-
-#define cntvoff_el2 CNTVOFF
-#define cnthctl_el2 CNTHCTL
-
-void __timer_enable_traps(struct kvm_vcpu *vcpu);
-void __timer_disable_traps(struct kvm_vcpu *vcpu);
-
-void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
-
-void __sysreg_save_state(struct kvm_cpu_context *ctxt);
-void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
-
-void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
-void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
-void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
-void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
-void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
-
-asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
-asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
-static inline bool __vfp_enabled(void)
-{
- return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
-}
-
-void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
-void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
-
-asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
- struct kvm_cpu_context *host);
-asmlinkage int __hyp_do_panic(const char *, int, u32);
-
-#endif /* __ARM_KVM_HYP_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
deleted file mode 100644
index 0d84d50bf9ba..000000000000
--- a/arch/arm/include/asm/kvm_mmu.h
+++ /dev/null
@@ -1,435 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_MMU_H__
-#define __ARM_KVM_MMU_H__
-
-#include <asm/memory.h>
-#include <asm/page.h>
-
-/*
- * We directly use the kernel VA for the HYP, as we can directly share
- * the mapping (HTTBR "covers" TTBR1).
- */
-#define kern_hyp_va(kva) (kva)
-
-/* Contrary to arm64, there is no need to generate a PC-relative address */
-#define hyp_symbol_addr(s) \
- ({ \
- typeof(s) *addr = &(s); \
- addr; \
- })
-
-#ifndef __ASSEMBLY__
-
-#include <linux/highmem.h>
-#include <asm/cacheflush.h>
-#include <asm/cputype.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_hyp.h>
-#include <asm/pgalloc.h>
-#include <asm/stage2_pgtable.h>
-
-/* Ensure compatibility with arm64 */
-#define VA_BITS 32
-
-#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
-#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm))
-#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL)
-#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
-
-#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
-
-int create_hyp_mappings(void *from, void *to, pgprot_t prot);
-int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
- void __iomem **kaddr,
- void __iomem **haddr);
-int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
- void **haddr);
-void free_hyp_pgds(void);
-
-void stage2_unmap_vm(struct kvm *kvm);
-int kvm_alloc_stage2_pgd(struct kvm *kvm);
-void kvm_free_stage2_pgd(struct kvm *kvm);
-int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size, bool writable);
-
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
-
-phys_addr_t kvm_mmu_get_httbr(void);
-phys_addr_t kvm_get_idmap_vector(void);
-int kvm_mmu_init(void);
-void kvm_clear_hyp_idmap(void);
-
-#define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE)
-#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
-#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
-
-#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
-#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
-#define kvm_pfn_pud(pfn, prot) (__pud(0))
-
-#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; })
-
-
-#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
-/* No support for pud hugepages */
-#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; })
-
-/*
- * The following kvm_*pud*() functions are provided strictly to allow
- * sharing code with arm64. They should never be called in practice.
- */
-static inline void kvm_set_s2pud_readonly(pud_t *pud)
-{
- WARN_ON(1);
-}
-
-static inline bool kvm_s2pud_readonly(pud_t *pud)
-{
- WARN_ON(1);
- return false;
-}
-
-static inline void kvm_set_pud(pud_t *pud, pud_t new_pud)
-{
- WARN_ON(1);
-}
-
-static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
-{
- WARN_ON(1);
- return pud;
-}
-
-static inline pud_t kvm_s2pud_mkexec(pud_t pud)
-{
- WARN_ON(1);
- return pud;
-}
-
-static inline bool kvm_s2pud_exec(pud_t *pud)
-{
- WARN_ON(1);
- return false;
-}
-
-static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
-{
- BUG();
- return pud;
-}
-
-static inline bool kvm_s2pud_young(pud_t pud)
-{
- WARN_ON(1);
- return false;
-}
-
-static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
-{
- pte_val(pte) |= L_PTE_S2_RDWR;
- return pte;
-}
-
-static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
-{
- pmd_val(pmd) |= L_PMD_S2_RDWR;
- return pmd;
-}
-
-static inline pte_t kvm_s2pte_mkexec(pte_t pte)
-{
- pte_val(pte) &= ~L_PTE_XN;
- return pte;
-}
-
-static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
-{
- pmd_val(pmd) &= ~PMD_SECT_XN;
- return pmd;
-}
-
-static inline void kvm_set_s2pte_readonly(pte_t *pte)
-{
- pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
-}
-
-static inline bool kvm_s2pte_readonly(pte_t *pte)
-{
- return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
-}
-
-static inline bool kvm_s2pte_exec(pte_t *pte)
-{
- return !(pte_val(*pte) & L_PTE_XN);
-}
-
-static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
-{
- pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
-}
-
-static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
-{
- return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
-}
-
-static inline bool kvm_s2pmd_exec(pmd_t *pmd)
-{
- return !(pmd_val(*pmd) & PMD_SECT_XN);
-}
-
-static inline bool kvm_page_empty(void *ptr)
-{
- struct page *ptr_page = virt_to_page(ptr);
- return page_count(ptr_page) == 1;
-}
-
-#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
-#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
-#define kvm_pud_table_empty(kvm, pudp) false
-
-#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
-#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
-#define hyp_pud_table_empty(pudp) false
-
-struct kvm;
-
-#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
-
-static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
-{
- return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
-}
-
-static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
- /*
- * Clean the dcache to the Point of Coherency.
- *
- * We need to do this through a kernel mapping (using the
- * user-space mapping has proved to be the wrong
- * solution). For that, we need to kmap one page at a time,
- * and iterate over the range.
- */
-
- VM_BUG_ON(size & ~PAGE_MASK);
-
- while (size) {
- void *va = kmap_atomic_pfn(pfn);
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- size -= PAGE_SIZE;
- pfn++;
-
- kunmap_atomic(va);
- }
-}
-
-static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
- unsigned long size)
-{
- u32 iclsz;
-
- /*
- * If we are going to insert an instruction page and the icache is
- * either VIPT or PIPT, there is a potential problem where the host
- * (or another VM) may have used the same page as this guest, and we
- * read incorrect data from the icache. If we're using a PIPT cache,
- * we can invalidate just that page, but if we are using a VIPT cache
- * we need to invalidate the entire icache - damn shame - as written
- * in the ARM ARM (DDI 0406C.b - Page B3-1393).
- *
- * VIVT caches are tagged using both the ASID and the VMID and doesn't
- * need any kind of flushing (DDI 0406C.b - Page B3-1392).
- */
-
- VM_BUG_ON(size & ~PAGE_MASK);
-
- if (icache_is_vivt_asid_tagged())
- return;
-
- if (!icache_is_pipt()) {
- /* any kind of VIPT cache */
- __flush_icache_all();
- return;
- }
-
- /*
- * CTR IminLine contains Log2 of the number of words in the
- * cache line, so we can get the number of words as
- * 2 << (IminLine - 1). To get the number of bytes, we
- * multiply by 4 (the number of bytes in a 32-bit word), and
- * get 4 << (IminLine).
- */
- iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
-
- while (size) {
- void *va = kmap_atomic_pfn(pfn);
- void *end = va + PAGE_SIZE;
- void *addr = va;
-
- do {
- write_sysreg(addr, ICIMVAU);
- addr += iclsz;
- } while (addr < end);
-
- dsb(ishst);
- isb();
-
- size -= PAGE_SIZE;
- pfn++;
-
- kunmap_atomic(va);
- }
-
- /* Check if we need to invalidate the BTB */
- if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
- write_sysreg(0, BPIALLIS);
- dsb(ishst);
- isb();
- }
-}
-
-static inline void __kvm_flush_dcache_pte(pte_t pte)
-{
- void *va = kmap_atomic(pte_page(pte));
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- kunmap_atomic(va);
-}
-
-static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
-{
- unsigned long size = PMD_SIZE;
- kvm_pfn_t pfn = pmd_pfn(pmd);
-
- while (size) {
- void *va = kmap_atomic_pfn(pfn);
-
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
-
- pfn++;
- size -= PAGE_SIZE;
-
- kunmap_atomic(va);
- }
-}
-
-static inline void __kvm_flush_dcache_pud(pud_t pud)
-{
-}
-
-#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
-
-void kvm_set_way_flush(struct kvm_vcpu *vcpu);
-void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
-
-static inline bool __kvm_cpu_uses_extended_idmap(void)
-{
- return false;
-}
-
-static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
-{
- return PTRS_PER_PGD;
-}
-
-static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
- pgd_t *hyp_pgd,
- pgd_t *merged_hyp_pgd,
- unsigned long hyp_idmap_start) { }
-
-static inline unsigned int kvm_get_vmid_bits(void)
-{
- return 8;
-}
-
-/*
- * We are not in the kvm->srcu critical section most of the time, so we take
- * the SRCU read lock here. Since we copy the data from the user page, we
- * can immediately drop the lock again.
- */
-static inline int kvm_read_guest_lock(struct kvm *kvm,
- gpa_t gpa, void *data, unsigned long len)
-{
- int srcu_idx = srcu_read_lock(&kvm->srcu);
- int ret = kvm_read_guest(kvm, gpa, data, len);
-
- srcu_read_unlock(&kvm->srcu, srcu_idx);
-
- return ret;
-}
-
-static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
- const void *data, unsigned long len)
-{
- int srcu_idx = srcu_read_lock(&kvm->srcu);
- int ret = kvm_write_guest(kvm, gpa, data, len);
-
- srcu_read_unlock(&kvm->srcu, srcu_idx);
-
- return ret;
-}
-
-static inline void *kvm_get_hyp_vector(void)
-{
- switch(read_cpuid_part()) {
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
- case ARM_CPU_PART_CORTEX_A12:
- case ARM_CPU_PART_CORTEX_A17:
- {
- extern char __kvm_hyp_vector_bp_inv[];
- return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
- }
-
- case ARM_CPU_PART_BRAHMA_B15:
- case ARM_CPU_PART_CORTEX_A15:
- {
- extern char __kvm_hyp_vector_ic_inv[];
- return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
- }
-#endif
- default:
- {
- extern char __kvm_hyp_vector[];
- return kvm_ksym_ref(__kvm_hyp_vector);
- }
- }
-}
-
-static inline int kvm_map_vectors(void)
-{
- return 0;
-}
-
-static inline int hyp_map_aux_data(void)
-{
- return 0;
-}
-
-#define kvm_phys_to_vttbr(addr) (addr)
-
-static inline void kvm_set_ipa_limit(void) {}
-
-static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
-{
- struct kvm_vmid *vmid = &kvm->arch.vmid;
- u64 vmid_field, baddr;
-
- baddr = kvm->arch.pgd_phys;
- vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
- return kvm_phys_to_vttbr(baddr) | vmid_field;
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_ras.h b/arch/arm/include/asm/kvm_ras.h
deleted file mode 100644
index e9577292dfe4..000000000000
--- a/arch/arm/include/asm/kvm_ras.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018 - Arm Ltd */
-
-#ifndef __ARM_KVM_RAS_H__
-#define __ARM_KVM_RAS_H__
-
-#include <linux/types.h>
-
-static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
-{
- return -1;
-}
-
-#endif /* __ARM_KVM_RAS_H__ */
diff --git a/arch/arm/include/asm/nwflash.h b/arch/arm/include/asm/nwflash.h
index 0ec6f07c2d8a..66b7e68c9b58 100644
--- a/arch/arm/include/asm/nwflash.h
+++ b/arch/arm/include/asm/nwflash.h
@@ -2,7 +2,6 @@
#ifndef _FLASH_H
#define _FLASH_H
-#define FLASH_MINOR 160 /* MAJOR is 10 - miscdevice */
#define CMD_WRITE_DISABLE 0
#define CMD_WRITE_ENABLE 0x28
#define CMD_WRITE_BASE64K_ENABLE 0x47
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index ad55ab068dbf..36805f94939e 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -104,26 +104,6 @@
*/
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
-/*
- * 2nd stage PTE definitions for LPAE.
- */
-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
-#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
-#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
-
-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
-
-#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
-#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
-
-/*
- * Hyp-mode PL2 PTE definitions for LPAE.
- */
-#define L_PTE_HYP L_PTE_USER
-
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index eabcb48a7840..0483cf413315 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -80,9 +80,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
-extern pgprot_t pgprot_hyp_device;
-extern pgprot_t pgprot_s2;
-extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
@@ -95,12 +92,6 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
-#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
-#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
-#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
-#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
-#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN)
-#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
index 4ceb4f757d4d..700b8bcdf9bd 100644
--- a/arch/arm/include/asm/sections.h
+++ b/arch/arm/include/asm/sections.h
@@ -10,8 +10,6 @@ extern char __idmap_text_start[];
extern char __idmap_text_end[];
extern char __entry_text_start[];
extern char __entry_text_end[];
-extern char __hyp_idmap_text_start[];
-extern char __hyp_idmap_text_end[];
static inline bool in_entry_text(unsigned long addr)
{
@@ -22,9 +20,7 @@ static inline bool in_entry_text(unsigned long addr)
static inline bool in_idmap_text(unsigned long addr)
{
void *a = (void *)addr;
- return memory_contains(__idmap_text_start, __idmap_text_end, a, 1) ||
- memory_contains(__hyp_idmap_text_start, __hyp_idmap_text_end,
- a, 1);
+ return memory_contains(__idmap_text_start, __idmap_text_end, a, 1);
}
#endif /* _ASM_ARM_SECTIONS_H */
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
deleted file mode 100644
index aaceec7855ec..000000000000
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 - ARM Ltd
- *
- * stage2 page table helpers
- */
-
-#ifndef __ARM_S2_PGTABLE_H_
-#define __ARM_S2_PGTABLE_H_
-
-/*
- * kvm_mmu_cache_min_pages() is the number of pages required
- * to install a stage-2 translation. We pre-allocate the entry
- * level table at VM creation. Since we have a 3 level page-table,
- * we need only two pages to add a new mapping.
- */
-#define kvm_mmu_cache_min_pages(kvm) 2
-
-#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
-#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
-#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
-#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
-#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(kvm, pud) do { } while (0)
-
-#define stage2_pud_none(kvm, pud) pud_none(pud)
-#define stage2_pud_clear(kvm, pud) pud_clear(pud)
-#define stage2_pud_present(kvm, pud) pud_present(pud)
-#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
-#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd)
-
-#define stage2_pud_huge(kvm, pud) pud_huge(pud)
-
-/* Open coded p*d_addr_end that can deal with 64bit addresses */
-static inline phys_addr_t
-stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
-
- return (boundary - 1 < end - 1) ? boundary : end;
-}
-
-#define stage2_pud_addr_end(kvm, addr, end) (end)
-
-static inline phys_addr_t
-stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
-
- return (boundary - 1 < end - 1) ? boundary : end;
-}
-
-#define stage2_pgd_index(kvm, addr) pgd_index(addr)
-
-#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
-#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
-#define stage2_pud_table_empty(kvm, pudp) false
-
-static inline bool kvm_stage2_has_pud(struct kvm *kvm)
-{
- return false;
-}
-
-#define S2_PMD_MASK PMD_MASK
-#define S2_PMD_SIZE PMD_SIZE
-#define S2_PUD_MASK PUD_MASK
-#define S2_PUD_SIZE PUD_SIZE
-
-static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
-{
- return true;
-}
-
-#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 17c26ccd126d..dd9697b2bde8 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -39,8 +39,6 @@ static inline void sync_boot_mode(void)
sync_cache_r(&__boot_cpu_mode);
}
-void __hyp_set_vectors(unsigned long phys_vector_base);
-void __hyp_reset_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
#define sync_boot_mode()
@@ -67,18 +65,6 @@ static inline bool is_kernel_in_hyp_mode(void)
return false;
}
-static inline bool has_vhe(void)
-{
- return false;
-}
-
-/* The section containing the hypervisor idmap text */
-extern char __hyp_idmap_text_start[];
-extern char __hyp_idmap_text_end[];
-
-/* The section containing the hypervisor text */
-extern char __hyp_text_start[];
-extern char __hyp_text_end[];
#endif
#else
@@ -87,9 +73,6 @@ extern char __hyp_text_end[];
#define HVC_SET_VECTORS 0
#define HVC_SOFT_RESTART 1
-#define HVC_RESET_VECTORS 2
-
-#define HVC_STUB_HCALL_NR 3
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/debug/stm32.S b/arch/arm/include/debug/stm32.S
index 1abb32f685fd..f3c4a37210ed 100644
--- a/arch/arm/include/debug/stm32.S
+++ b/arch/arm/include/debug/stm32.S
@@ -4,14 +4,13 @@
* Author: Gerald Baeza <gerald.baeza@st.com> for STMicroelectronics.
*/
-#define STM32_UART_BASE 0x40011000 /* USART1 */
-
#ifdef CONFIG_STM32F4_DEBUG_UART
#define STM32_USART_SR_OFF 0x00
#define STM32_USART_TDR_OFF 0x04
#endif
-#ifdef CONFIG_STM32F7_DEBUG_UART
+#if defined(CONFIG_STM32F7_DEBUG_UART) || defined(CONFIG_STM32H7_DEBUG_UART) || \
+ defined(CONFIG_STM32MP1_DEBUG_UART)
#define STM32_USART_SR_OFF 0x1C
#define STM32_USART_TDR_OFF 0x28
#endif
@@ -20,8 +19,8 @@
#define STM32_USART_TXE (1 << 7) /* Tx data reg empty */
.macro addruart, rp, rv, tmp
- ldr \rp, =STM32_UART_BASE @ physical base
- ldr \rv, =STM32_UART_BASE @ virt base /* NoMMU */
+ ldr \rp, =CONFIG_DEBUG_UART_PHYS @ physical base
+ ldr \rv, =CONFIG_DEBUG_UART_VIRT @ virt base
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
deleted file mode 100644
index 03cd7c19a683..000000000000
--- a/arch/arm/include/uapi/asm/kvm.h
+++ /dev/null
@@ -1,314 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __ARM_KVM_H__
-#define __ARM_KVM_H__
-
-#include <linux/types.h>
-#include <linux/psci.h>
-#include <asm/ptrace.h>
-
-#define __KVM_HAVE_GUEST_DEBUG
-#define __KVM_HAVE_IRQ_LINE
-#define __KVM_HAVE_READONLY_MEM
-#define __KVM_HAVE_VCPU_EVENTS
-
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
-/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
-#define KVM_ARM_SVC_sp svc_regs[0]
-#define KVM_ARM_SVC_lr svc_regs[1]
-#define KVM_ARM_SVC_spsr svc_regs[2]
-#define KVM_ARM_ABT_sp abt_regs[0]
-#define KVM_ARM_ABT_lr abt_regs[1]
-#define KVM_ARM_ABT_spsr abt_regs[2]
-#define KVM_ARM_UND_sp und_regs[0]
-#define KVM_ARM_UND_lr und_regs[1]
-#define KVM_ARM_UND_spsr und_regs[2]
-#define KVM_ARM_IRQ_sp irq_regs[0]
-#define KVM_ARM_IRQ_lr irq_regs[1]
-#define KVM_ARM_IRQ_spsr irq_regs[2]
-
-/* Valid only for fiq_regs in struct kvm_regs */
-#define KVM_ARM_FIQ_r8 fiq_regs[0]
-#define KVM_ARM_FIQ_r9 fiq_regs[1]
-#define KVM_ARM_FIQ_r10 fiq_regs[2]
-#define KVM_ARM_FIQ_fp fiq_regs[3]
-#define KVM_ARM_FIQ_ip fiq_regs[4]
-#define KVM_ARM_FIQ_sp fiq_regs[5]
-#define KVM_ARM_FIQ_lr fiq_regs[6]
-#define KVM_ARM_FIQ_spsr fiq_regs[7]
-
-struct kvm_regs {
- struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
- unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
- unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
- unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
- unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
- unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
-};
-
-/* Supported Processor Types */
-#define KVM_ARM_TARGET_CORTEX_A15 0
-#define KVM_ARM_TARGET_CORTEX_A7 1
-#define KVM_ARM_NUM_TARGETS 2
-
-/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
-#define KVM_ARM_DEVICE_TYPE_SHIFT 0
-#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
-#define KVM_ARM_DEVICE_ID_SHIFT 16
-#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
-
-/* Supported device IDs */
-#define KVM_ARM_DEVICE_VGIC_V2 0
-
-/* Supported VGIC address types */
-#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
-#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
-
-#define KVM_VGIC_V2_DIST_SIZE 0x1000
-#define KVM_VGIC_V2_CPU_SIZE 0x2000
-
-/* Supported VGICv3 address types */
-#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
-#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
-#define KVM_VGIC_ITS_ADDR_TYPE 4
-#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
-
-#define KVM_VGIC_V3_DIST_SIZE SZ_64K
-#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
-#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
-
-#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
-#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
-
-struct kvm_vcpu_init {
- __u32 target;
- __u32 features[7];
-};
-
-struct kvm_sregs {
-};
-
-struct kvm_fpu {
-};
-
-struct kvm_guest_debug_arch {
-};
-
-struct kvm_debug_exit_arch {
-};
-
-struct kvm_sync_regs {
- /* Used with KVM_CAP_ARM_USER_IRQ */
- __u64 device_irq_level;
-};
-
-struct kvm_arch_memory_slot {
-};
-
-/* for KVM_GET/SET_VCPU_EVENTS */
-struct kvm_vcpu_events {
- struct {
- __u8 serror_pending;
- __u8 serror_has_esr;
- __u8 ext_dabt_pending;
- /* Align it to 8 bytes */
- __u8 pad[5];
- __u64 serror_esr;
- } exception;
- __u32 reserved[12];
-};
-
-/* If you need to interpret the index values, here is the key: */
-#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
-#define KVM_REG_ARM_COPROC_SHIFT 16
-#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
-#define KVM_REG_ARM_32_OPC2_SHIFT 0
-#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
-#define KVM_REG_ARM_OPC1_SHIFT 3
-#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
-#define KVM_REG_ARM_CRM_SHIFT 7
-#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
-#define KVM_REG_ARM_32_CRN_SHIFT 11
-/*
- * For KVM currently all guest registers are nonsecure, but we reserve a bit
- * in the encoding to distinguish secure from nonsecure for AArch32 system
- * registers that are banked by security. This is 1 for the secure banked
- * register, and 0 for the nonsecure banked register or if the register is
- * not banked by security.
- */
-#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
-#define KVM_REG_ARM_SECURE_SHIFT 28
-
-#define ARM_CP15_REG_SHIFT_MASK(x,n) \
- (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
-
-#define __ARM_CP15_REG(op1,crn,crm,op2) \
- (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
- ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
- ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
- ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
- ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
-
-#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
-
-#define __ARM_CP15_REG64(op1,crm) \
- (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
-#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
-
-/* PL1 Physical Timer Registers */
-#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
-#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
-#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
-
-/* Virtual Timer Registers */
-#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
-#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
-#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
-
-/* Normal registers are mapped as coprocessor 16. */
-#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
-
-/* Some registers need more space to represent values. */
-#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
-#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
-#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
-#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
-#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
-
-/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
-#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
-#define KVM_REG_ARM_VFP_BASE_REG 0x0
-#define KVM_REG_ARM_VFP_FPSID 0x1000
-#define KVM_REG_ARM_VFP_FPSCR 0x1001
-#define KVM_REG_ARM_VFP_MVFR1 0x1006
-#define KVM_REG_ARM_VFP_MVFR0 0x1007
-#define KVM_REG_ARM_VFP_FPEXC 0x1008
-#define KVM_REG_ARM_VFP_FPINST 0x1009
-#define KVM_REG_ARM_VFP_FPINST2 0x100A
-
-/* KVM-as-firmware specific pseudo-registers */
-#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
-#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
- KVM_REG_ARM_FW | ((r) & 0xffff))
-#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
- /* Higher values mean better protection. */
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
- /* Higher values mean better protection. */
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
-#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
-
-/* Device Control API: ARM VGIC */
-#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
-#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
-#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
-#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
-#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
-#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
-#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
- (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
-#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
-#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
-#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
-#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
-#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
-#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
-#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
-#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
-#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
- (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
-#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
-#define VGIC_LEVEL_INFO_LINE_LEVEL 0
-
-/* Device Control API on vcpu fd */
-#define KVM_ARM_VCPU_PMU_V3_CTRL 0
-#define KVM_ARM_VCPU_PMU_V3_IRQ 0
-#define KVM_ARM_VCPU_PMU_V3_INIT 1
-#define KVM_ARM_VCPU_TIMER_CTRL 1
-#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
-#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
-
-#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
-#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
-#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
-#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
-#define KVM_DEV_ARM_ITS_CTRL_RESET 4
-
-/* KVM_IRQ_LINE irq field index values */
-#define KVM_ARM_IRQ_VCPU2_SHIFT 28
-#define KVM_ARM_IRQ_VCPU2_MASK 0xf
-#define KVM_ARM_IRQ_TYPE_SHIFT 24
-#define KVM_ARM_IRQ_TYPE_MASK 0xf
-#define KVM_ARM_IRQ_VCPU_SHIFT 16
-#define KVM_ARM_IRQ_VCPU_MASK 0xff
-#define KVM_ARM_IRQ_NUM_SHIFT 0
-#define KVM_ARM_IRQ_NUM_MASK 0xffff
-
-/* irq_type field */
-#define KVM_ARM_IRQ_TYPE_CPU 0
-#define KVM_ARM_IRQ_TYPE_SPI 1
-#define KVM_ARM_IRQ_TYPE_PPI 2
-
-/* out-of-kernel GIC cpu interrupt injection irq_number field */
-#define KVM_ARM_IRQ_CPU_IRQ 0
-#define KVM_ARM_IRQ_CPU_FIQ 1
-
-/*
- * This used to hold the highest supported SPI, but it is now obsolete
- * and only here to provide source code level compatibility with older
- * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
- */
-#ifndef __KERNEL__
-#define KVM_ARM_IRQ_GIC_MAX 127
-#endif
-
-/* One single KVM irqchip, ie. the VGIC */
-#define KVM_NR_IRQCHIPS 1
-
-/* PSCI interface */
-#define KVM_PSCI_FN_BASE 0x95c1ba5e
-#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
-
-#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
-#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
-#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
-#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
-
-#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
-#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
-#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
-#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
-
-#endif /* __ARM_KVM_H__ */
diff --git a/arch/arm/kernel/.gitignore b/arch/arm/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/arm/kernel/.gitignore
+++ b/arch/arm/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index c773b829ee8e..c036a4a2f8e2 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -11,9 +11,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
-#ifdef CONFIG_KVM_ARM_HOST
-#include <linux/kvm_host.h>
-#endif
#include <asm/cacheflush.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
@@ -167,14 +164,6 @@ int main(void)
DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK();
-#ifdef CONFIG_KVM_ARM_HOST
- DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt));
- DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
- DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp));
- DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
- DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs));
-#endif
- BLANK();
#ifdef CONFIG_VDSO
DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
#endif
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 6607fa817bba..26d8e03b1dd3 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -189,19 +189,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
ENDPROC(__hyp_stub_install_secondary)
__hyp_stub_do_trap:
+#ifdef ZIMAGE
teq r0, #HVC_SET_VECTORS
bne 1f
+ /* Only the ZIMAGE stubs can change the HYP vectors */
mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
b __hyp_stub_exit
+#endif
1: teq r0, #HVC_SOFT_RESTART
- bne 1f
+ bne 2f
bx r1
-1: teq r0, #HVC_RESET_VECTORS
- beq __hyp_stub_exit
-
- ldr r0, =HVC_STUB_ERR
+2: ldr r0, =HVC_STUB_ERR
__ERET
__hyp_stub_exit:
@@ -210,26 +210,9 @@ __hyp_stub_exit:
ENDPROC(__hyp_stub_do_trap)
/*
- * __hyp_set_vectors: Call this after boot to set the initial hypervisor
- * vectors as part of hypervisor installation. On an SMP system, this should
- * be called on each CPU.
- *
- * r0 must be the physical address of the new vector table (which must lie in
- * the bottom 4GB of physical address space.
- *
- * r0 must be 32-byte aligned.
- *
- * Before calling this, you must check that the stub hypervisor is installed
- * everywhere, by waiting for any secondary CPUs to be brought up and then
- * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
- *
- * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
- * something else went wrong... in such cases, trying to install a new
- * hypervisor is unlikely to work as desired.
- *
- * When you call into your shiny new hypervisor, sp_hyp will contain junk,
- * so you will need to set that to something sensible at the new hypervisor's
- * initialisation entry point.
+ * __hyp_set_vectors is only used when ZIMAGE must bounce between HYP
+ * and SVC. For the kernel itself, the vectors are set once and for
+ * all by the stubs.
*/
ENTRY(__hyp_set_vectors)
mov r1, r0
@@ -245,12 +228,6 @@ ENTRY(__hyp_soft_restart)
ret lr
ENDPROC(__hyp_soft_restart)
-ENTRY(__hyp_reset_vectors)
- mov r0, #HVC_RESET_VECTORS
- __HVC(0)
- ret lr
-ENDPROC(__hyp_reset_vectors)
-
#ifndef ZIMAGE
.align 2
.L__boot_cpu_mode_offset:
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 7eaa2ae7aff5..72a08786e16e 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -25,26 +25,26 @@ ENTRY(relocate_new_kernel)
ldr r3, [r0],#4
/* Is it a destination page. Put destination address to r4 */
- tst r3,#1,0
+ tst r3,#1
beq 1f
bic r4,r3,#1
b 0b
1:
/* Is it an indirection page */
- tst r3,#2,0
+ tst r3,#2
beq 1f
bic r0,r3,#2
b 0b
1:
/* are we done ? */
- tst r3,#4,0
+ tst r3,#4
beq 1f
b 2f
1:
/* is it source ? */
- tst r3,#8,0
+ tst r3,#8
beq 0b
bic r3,r3,#8
mov r6,#1024
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 4574e6aea0a5..11a964fd66f4 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -444,7 +444,7 @@ int unwind_frame(struct stackframe *frame)
ctrl.vrs[PC] = ctrl.vrs[LR];
/* check for infinite loop */
- if (frame->pc == ctrl.vrs[PC])
+ if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP])
return -URC_FAILURE;
frame->fp = ctrl.vrs[FP];
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 21b8b271c80d..6d2be994ae58 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -162,14 +162,6 @@ SECTIONS
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
-/*
- * The HYP init code can't be more than a page long,
- * and should not cross a page boundary.
- * The above comment applies as well.
- */
-ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
- "HYP init code too big or misaligned")
-
#ifdef CONFIG_XIP_DEFLATED_DATA
/*
* The .bss is used as a stack area for __inflate_kernel_data() whose stack
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 319ccb10846a..88a720da443b 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -170,12 +170,4 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
-/*
- * The HYP init code can't be more than a page long,
- * and should not cross a page boundary.
- * The above comment applies as well.
- */
-ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
- "HYP init code too big or misaligned")
-
#endif /* CONFIG_XIP_KERNEL */
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
index 8247bc15addc..381a8e105fa5 100644
--- a/arch/arm/kernel/vmlinux.lds.h
+++ b/arch/arm/kernel/vmlinux.lds.h
@@ -31,20 +31,11 @@
*(.proc.info.init) \
__proc_info_end = .;
-#define HYPERVISOR_TEXT \
- __hyp_text_start = .; \
- *(.hyp.text) \
- __hyp_text_end = .;
-
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
__idmap_text_start = .; \
*(.idmap.text) \
__idmap_text_end = .; \
- . = ALIGN(PAGE_SIZE); \
- __hyp_idmap_text_start = .; \
- *(.hyp.idmap.text) \
- __hyp_idmap_text_end = .;
#define ARM_DISCARD \
*(.ARM.exidx.exit.text) \
@@ -72,7 +63,6 @@
SCHED_TEXT \
CPUIDLE_TEXT \
LOCK_TEXT \
- HYPERVISOR_TEXT \
KPROBES_TEXT \
*(.gnu.warning) \
*(.glue_7) \
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
deleted file mode 100644
index f591026347a5..000000000000
--- a/arch/arm/kvm/Kconfig
+++ /dev/null
@@ -1,59 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# KVM configuration
-#
-
-source "virt/kvm/Kconfig"
-source "virt/lib/Kconfig"
-
-menuconfig VIRTUALIZATION
- bool "Virtualization"
- ---help---
- Say Y here to get to see options for using your Linux host to run
- other operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and
- disabled.
-
-if VIRTUALIZATION
-
-config KVM
- bool "Kernel-based Virtual Machine (KVM) support"
- depends on MMU && OF
- select PREEMPT_NOTIFIERS
- select ARM_GIC
- select ARM_GIC_V3
- select ARM_GIC_V3_ITS
- select HAVE_KVM_CPU_RELAX_INTERCEPT
- select HAVE_KVM_ARCH_TLB_FLUSH_ALL
- select KVM_MMIO
- select KVM_ARM_HOST
- select KVM_GENERIC_DIRTYLOG_READ_PROTECT
- select SRCU
- select MMU_NOTIFIER
- select KVM_VFIO
- select HAVE_KVM_EVENTFD
- select HAVE_KVM_IRQFD
- select HAVE_KVM_IRQCHIP
- select HAVE_KVM_IRQ_ROUTING
- select HAVE_KVM_MSI
- select IRQ_BYPASS_MANAGER
- select HAVE_KVM_IRQ_BYPASS
- depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
- ---help---
- Support hosting virtualized guest machines.
-
- This module provides access to the hardware capabilities through
- a character device node named /dev/kvm.
-
- If unsure, say N.
-
-config KVM_ARM_HOST
- bool
- ---help---
- Provides host support for ARM processors.
-
-source "drivers/vhost/Kconfig"
-
-endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
deleted file mode 100644
index e442d82821df..000000000000
--- a/arch/arm/kvm/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Kernel-based Virtual Machine module
-#
-
-plus_virt := $(call as-instr,.arch_extension virt,+virt)
-ifeq ($(plus_virt),+virt)
- plus_virt_def := -DREQUIRES_VIRT=1
-endif
-
-KVM := ../../../virt/kvm
-
-ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
-CFLAGS_$(KVM)/arm/arm.o := $(plus_virt_def)
-
-AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
-AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
-
-kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
-
-obj-$(CONFIG_KVM_ARM_HOST) += hyp/
-
-obj-y += kvm-arm.o init.o interrupts.o
-obj-y += handle_exit.o guest.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
-obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
-obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o
-obj-y += $(KVM)/arm/aarch32.o
-
-obj-y += $(KVM)/arm/vgic/vgic.o
-obj-y += $(KVM)/arm/vgic/vgic-init.o
-obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
-obj-y += $(KVM)/arm/vgic/vgic-v2.o
-obj-y += $(KVM)/arm/vgic/vgic-v3.o
-obj-y += $(KVM)/arm/vgic/vgic-v4.o
-obj-y += $(KVM)/arm/vgic/vgic-mmio.o
-obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
-obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
-obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
-obj-y += $(KVM)/arm/vgic/vgic-its.o
-obj-y += $(KVM)/arm/vgic/vgic-debug.o
-obj-y += $(KVM)/irqchip.o
-obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
deleted file mode 100644
index 07745ee022a1..000000000000
--- a/arch/arm/kvm/coproc.c
+++ /dev/null
@@ -1,1455 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Authors: Rusty Russell <rusty@rustcorp.com.au>
- * Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#include <linux/bsearch.h>
-#include <linux/mm.h>
-#include <linux/kvm_host.h>
-#include <linux/uaccess.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_host.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_mmu.h>
-#include <asm/cacheflush.h>
-#include <asm/cputype.h>
-#include <trace/events/kvm.h>
-#include <asm/vfp.h>
-#include "../vfp/vfpinstr.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-#include "coproc.h"
-
-
-/******************************************************************************
- * Co-processor emulation
- *****************************************************************************/
-
-static bool write_to_read_only(struct kvm_vcpu *vcpu,
- const struct coproc_params *params)
-{
- WARN_ONCE(1, "CP15 write to read-only register\n");
- print_cp_instr(params);
- kvm_inject_undefined(vcpu);
- return false;
-}
-
-static bool read_from_write_only(struct kvm_vcpu *vcpu,
- const struct coproc_params *params)
-{
- WARN_ONCE(1, "CP15 read to write-only register\n");
- print_cp_instr(params);
- kvm_inject_undefined(vcpu);
- return false;
-}
-
-/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
-static u32 cache_levels;
-
-/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
-#define CSSELR_MAX 12
-
-/*
- * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
- * of cp15 registers can be viewed either as couple of two u32 registers
- * or one u64 register. Current u64 register encoding is that least
- * significant u32 word is followed by most significant u32 word.
- */
-static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
- const struct coproc_reg *r,
- u64 val)
-{
- vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
- vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
-}
-
-static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
- const struct coproc_reg *r)
-{
- u64 val;
-
- val = vcpu_cp15(vcpu, r->reg + 1);
- val = val << 32;
- val = val | vcpu_cp15(vcpu, r->reg);
- return val;
-}
-
-int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
-int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- /*
- * We can get here, if the host has been built without VFPv3 support,
- * but the guest attempted a floating point operation.
- */
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
-static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- /*
- * Compute guest MPIDR. We build a virtual cluster out of the
- * vcpu_id, but we read the 'U' bit from the underlying
- * hardware directly.
- */
- vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
- ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
- (vcpu->vcpu_id & 3));
-}
-
-/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
-static bool access_actlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
- return true;
-}
-
-/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
-static bool access_cbar(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return write_to_read_only(vcpu, p);
- return read_zero(vcpu, p);
-}
-
-/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
-static bool access_l2ctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
- return true;
-}
-
-static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- u32 l2ctlr, ncores;
-
- asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
- l2ctlr &= ~(3 << 24);
- ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
- /* How many cores in the current cluster and the next ones */
- ncores -= (vcpu->vcpu_id & ~3);
- /* Cap it to the maximum number of cores in a single cluster */
- ncores = min(ncores, 3U);
- l2ctlr |= (ncores & 3) << 24;
-
- vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
-}
-
-static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- u32 actlr;
-
- /* ACTLR contains SMP bit: make sure you create all cpus first! */
- asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
- /* Make the SMP bit consistent with the guest configuration */
- if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
- actlr |= 1U << 6;
- else
- actlr &= ~(1U << 6);
-
- vcpu_cp15(vcpu, c1_ACTLR) = actlr;
-}
-
-/*
- * TRM entries: A7:4.3.50, A15:4.3.49
- * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
- */
-static bool access_l2ectlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = 0;
- return true;
-}
-
-/*
- * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
- */
-static bool access_dcsw(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (!p->is_write)
- return read_from_write_only(vcpu, p);
-
- kvm_set_way_flush(vcpu);
- return true;
-}
-
-/*
- * Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set. If the guest enables the MMU, we stop trapping the VM
- * sys_regs and leave it in complete control of the caches.
- *
- * Used by the cpu-specific code.
- */
-bool access_vm_reg(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- bool was_enabled = vcpu_has_cache_enabled(vcpu);
-
- BUG_ON(!p->is_write);
-
- vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
- if (p->is_64bit)
- vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
-
- kvm_toggle_cache(vcpu, was_enabled);
- return true;
-}
-
-static bool access_gic_sgi(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- u64 reg;
- bool g1;
-
- if (!p->is_write)
- return read_from_write_only(vcpu, p);
-
- reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
- reg |= *vcpu_reg(vcpu, p->Rt1) ;
-
- /*
- * In a system where GICD_CTLR.DS=1, a ICC_SGI0R access generates
- * Group0 SGIs only, while ICC_SGI1R can generate either group,
- * depending on the SGI configuration. ICC_ASGI1R is effectively
- * equivalent to ICC_SGI0R, as there is no "alternative" secure
- * group.
- */
- switch (p->Op1) {
- default: /* Keep GCC quiet */
- case 0: /* ICC_SGI1R */
- g1 = true;
- break;
- case 1: /* ICC_ASGI1R */
- case 2: /* ICC_SGI0R */
- g1 = false;
- break;
- }
-
- vgic_v3_dispatch_sgi(vcpu, reg, g1);
-
- return true;
-}
-
-static bool access_gic_sre(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
-
- *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
-
- return true;
-}
-
-static bool access_cntp_tval(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- u32 val;
-
- if (p->is_write) {
- val = *vcpu_reg(vcpu, p->Rt1);
- kvm_arm_timer_write_sysreg(vcpu,
- TIMER_PTIMER, TIMER_REG_TVAL, val);
- } else {
- val = kvm_arm_timer_read_sysreg(vcpu,
- TIMER_PTIMER, TIMER_REG_TVAL);
- *vcpu_reg(vcpu, p->Rt1) = val;
- }
-
- return true;
-}
-
-static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- u32 val;
-
- if (p->is_write) {
- val = *vcpu_reg(vcpu, p->Rt1);
- kvm_arm_timer_write_sysreg(vcpu,
- TIMER_PTIMER, TIMER_REG_CTL, val);
- } else {
- val = kvm_arm_timer_read_sysreg(vcpu,
- TIMER_PTIMER, TIMER_REG_CTL);
- *vcpu_reg(vcpu, p->Rt1) = val;
- }
-
- return true;
-}
-
-static bool access_cntp_cval(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- u64 val;
-
- if (p->is_write) {
- val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
- val |= *vcpu_reg(vcpu, p->Rt1);
- kvm_arm_timer_write_sysreg(vcpu,
- TIMER_PTIMER, TIMER_REG_CVAL, val);
- } else {
- val = kvm_arm_timer_read_sysreg(vcpu,
- TIMER_PTIMER, TIMER_REG_CVAL);
- *vcpu_reg(vcpu, p->Rt1) = val;
- *vcpu_reg(vcpu, p->Rt2) = val >> 32;
- }
-
- return true;
-}
-
-/*
- * We could trap ID_DFR0 and tell the guest we don't support performance
- * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
- * NAKed, so it will read the PMCR anyway.
- *
- * Therefore we tell the guest we have 0 counters. Unfortunately, we
- * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
- * all PM registers, which doesn't crash the guest kernel at least.
- */
-static bool trap_raz_wi(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- if (p->is_write)
- return ignore_write(vcpu, p);
- else
- return read_zero(vcpu, p);
-}
-
-#define access_pmcr trap_raz_wi
-#define access_pmcntenset trap_raz_wi
-#define access_pmcntenclr trap_raz_wi
-#define access_pmovsr trap_raz_wi
-#define access_pmselr trap_raz_wi
-#define access_pmceid0 trap_raz_wi
-#define access_pmceid1 trap_raz_wi
-#define access_pmccntr trap_raz_wi
-#define access_pmxevtyper trap_raz_wi
-#define access_pmxevcntr trap_raz_wi
-#define access_pmuserenr trap_raz_wi
-#define access_pmintenset trap_raz_wi
-#define access_pmintenclr trap_raz_wi
-
-/* Architected CP15 registers.
- * CRn denotes the primary register number, but is copied to the CRm in the
- * user space API for 64-bit register access in line with the terminology used
- * in the ARM ARM.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
- * registers preceding 32-bit ones.
- */
-static const struct coproc_reg cp15_regs[] = {
- /* MPIDR: we use VMPIDR for guest access. */
- { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
- NULL, reset_mpidr, c0_MPIDR },
-
- /* CSSELR: swapped by interrupt.S. */
- { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
- NULL, reset_unknown, c0_CSSELR },
-
- /* ACTLR: trapped by HCR.TAC bit. */
- { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
- access_actlr, reset_actlr, c1_ACTLR },
-
- /* CPACR: swapped by interrupt.S. */
- { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
- NULL, reset_val, c1_CPACR, 0x00000000 },
-
- /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
- { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
- { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_unknown, c2_TTBR0 },
- { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
- access_vm_reg, reset_unknown, c2_TTBR1 },
- { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
- access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
- { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
-
-
- /* DACR: swapped by interrupt.S. */
- { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_unknown, c3_DACR },
-
- /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
- { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_unknown, c5_DFSR },
- { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
- access_vm_reg, reset_unknown, c5_IFSR },
- { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_unknown, c5_ADFSR },
- { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
- access_vm_reg, reset_unknown, c5_AIFSR },
-
- /* DFAR/IFAR: swapped by interrupt.S. */
- { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_unknown, c6_DFAR },
- { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
- access_vm_reg, reset_unknown, c6_IFAR },
-
- /* PAR swapped by interrupt.S */
- { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
-
- /*
- * DC{C,I,CI}SW operations:
- */
- { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
- { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
- { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
- /*
- * L2CTLR access (guest wants to know #CPUs).
- */
- { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
- access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
- { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
-
- /*
- * Dummy performance monitor implementation.
- */
- { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
- { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
- { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
- { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
- { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
- { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
- { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
- { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
- { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
- { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
- { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
- { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
- { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
-
- /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
- { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_unknown, c10_PRRR},
- { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
- access_vm_reg, reset_unknown, c10_NMRR},
-
- /* AMAIR0/AMAIR1: swapped by interrupt.S. */
- { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_unknown, c10_AMAIR0},
- { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
- access_vm_reg, reset_unknown, c10_AMAIR1},
-
- /* ICC_SGI1R */
- { CRm64(12), Op1( 0), is64, access_gic_sgi},
-
- /* VBAR: swapped by interrupt.S. */
- { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
- NULL, reset_val, c12_VBAR, 0x00000000 },
-
- /* ICC_ASGI1R */
- { CRm64(12), Op1( 1), is64, access_gic_sgi},
- /* ICC_SGI0R */
- { CRm64(12), Op1( 2), is64, access_gic_sgi},
- /* ICC_SRE */
- { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
-
- /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
- { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
- access_vm_reg, reset_val, c13_CID, 0x00000000 },
- { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
- NULL, reset_unknown, c13_TID_URW },
- { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
- NULL, reset_unknown, c13_TID_URO },
- { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
- NULL, reset_unknown, c13_TID_PRIV },
-
- /* CNTP */
- { CRm64(14), Op1( 2), is64, access_cntp_cval},
-
- /* CNTKCTL: swapped by interrupt.S. */
- { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
- NULL, reset_val, c14_CNTKCTL, 0x00000000 },
-
- /* CNTP */
- { CRn(14), CRm( 2), Op1( 0), Op2( 0), is32, access_cntp_tval },
- { CRn(14), CRm( 2), Op1( 0), Op2( 1), is32, access_cntp_ctl },
-
- /* The Configuration Base Address Register. */
- { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
-};
-
-static int check_reg_table(const struct coproc_reg *table, unsigned int n)
-{
- unsigned int i;
-
- for (i = 1; i < n; i++) {
- if (cmp_reg(&table[i-1], &table[i]) >= 0) {
- kvm_err("reg table %p out of order (%d)\n", table, i - 1);
- return 1;
- }
- }
-
- return 0;
-}
-
-/* Target specific emulation tables */
-static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
-
-void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
-{
- BUG_ON(check_reg_table(table->table, table->num));
- target_tables[table->target] = table;
-}
-
-/* Get specific register table for this target. */
-static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
-{
- struct kvm_coproc_target_table *table;
-
- table = target_tables[target];
- *num = table->num;
- return table->table;
-}
-
-#define reg_to_match_value(x) \
- ({ \
- unsigned long val; \
- val = (x)->CRn << 11; \
- val |= (x)->CRm << 7; \
- val |= (x)->Op1 << 4; \
- val |= (x)->Op2 << 1; \
- val |= !(x)->is_64bit; \
- val; \
- })
-
-static int match_reg(const void *key, const void *elt)
-{
- const unsigned long pval = (unsigned long)key;
- const struct coproc_reg *r = elt;
-
- return pval - reg_to_match_value(r);
-}
-
-static const struct coproc_reg *find_reg(const struct coproc_params *params,
- const struct coproc_reg table[],
- unsigned int num)
-{
- unsigned long pval = reg_to_match_value(params);
-
- return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
-}
-
-static int emulate_cp15(struct kvm_vcpu *vcpu,
- const struct coproc_params *params)
-{
- size_t num;
- const struct coproc_reg *table, *r;
-
- trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
- params->CRm, params->Op2, params->is_write);
-
- table = get_target_table(vcpu->arch.target, &num);
-
- /* Search target-specific then generic table. */
- r = find_reg(params, table, num);
- if (!r)
- r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
-
- if (likely(r)) {
- /* If we don't have an accessor, we should never get here! */
- BUG_ON(!r->access);
-
- if (likely(r->access(vcpu, params, r))) {
- /* Skip instruction, since it was emulated */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- }
- } else {
- /* If access function fails, it should complain. */
- kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n",
- *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
- print_cp_instr(params);
- kvm_inject_undefined(vcpu);
- }
-
- return 1;
-}
-
-static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
-{
- struct coproc_params params;
-
- params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
- params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
- params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
- params.is_64bit = true;
-
- params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
- params.Op2 = 0;
- params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
- params.CRm = 0;
-
- return params;
-}
-
-/**
- * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run: The kvm_run struct
- */
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- struct coproc_params params = decode_64bit_hsr(vcpu);
-
- return emulate_cp15(vcpu, &params);
-}
-
-/**
- * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
- * @vcpu: The VCPU pointer
- * @run: The kvm_run struct
- */
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- struct coproc_params params = decode_64bit_hsr(vcpu);
-
- /* raz_wi cp14 */
- trap_raz_wi(vcpu, &params, NULL);
-
- /* handled */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return 1;
-}
-
-static void reset_coproc_regs(struct kvm_vcpu *vcpu,
- const struct coproc_reg *table, size_t num,
- unsigned long *bmap)
-{
- unsigned long i;
-
- for (i = 0; i < num; i++)
- if (table[i].reset) {
- int reg = table[i].reg;
-
- table[i].reset(vcpu, &table[i]);
- if (reg > 0 && reg < NR_CP15_REGS) {
- set_bit(reg, bmap);
- if (table[i].is_64bit)
- set_bit(reg + 1, bmap);
- }
- }
-}
-
-static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
-{
- struct coproc_params params;
-
- params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
- params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
- params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
- params.is_64bit = false;
-
- params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
- params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
- params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
- params.Rt2 = 0;
-
- return params;
-}
-
-/**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run: The kvm_run struct
- */
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- struct coproc_params params = decode_32bit_hsr(vcpu);
- return emulate_cp15(vcpu, &params);
-}
-
-/**
- * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
- * @vcpu: The VCPU pointer
- * @run: The kvm_run struct
- */
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- struct coproc_params params = decode_32bit_hsr(vcpu);
-
- /* raz_wi cp14 */
- trap_raz_wi(vcpu, &params, NULL);
-
- /* handled */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return 1;
-}
-
-/******************************************************************************
- * Userspace API
- *****************************************************************************/
-
-static bool index_to_params(u64 id, struct coproc_params *params)
-{
- switch (id & KVM_REG_SIZE_MASK) {
- case KVM_REG_SIZE_U32:
- /* Any unused index bits means it's not valid. */
- if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
- | KVM_REG_ARM_COPROC_MASK
- | KVM_REG_ARM_32_CRN_MASK
- | KVM_REG_ARM_CRM_MASK
- | KVM_REG_ARM_OPC1_MASK
- | KVM_REG_ARM_32_OPC2_MASK))
- return false;
-
- params->is_64bit = false;
- params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
- >> KVM_REG_ARM_32_CRN_SHIFT);
- params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
- >> KVM_REG_ARM_CRM_SHIFT);
- params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
- >> KVM_REG_ARM_OPC1_SHIFT);
- params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
- >> KVM_REG_ARM_32_OPC2_SHIFT);
- return true;
- case KVM_REG_SIZE_U64:
- /* Any unused index bits means it's not valid. */
- if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
- | KVM_REG_ARM_COPROC_MASK
- | KVM_REG_ARM_CRM_MASK
- | KVM_REG_ARM_OPC1_MASK))
- return false;
- params->is_64bit = true;
- /* CRm to CRn: see cp15_to_index for details */
- params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
- >> KVM_REG_ARM_CRM_SHIFT);
- params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
- >> KVM_REG_ARM_OPC1_SHIFT);
- params->Op2 = 0;
- params->CRm = 0;
- return true;
- default:
- return false;
- }
-}
-
-/* Decode an index value, and find the cp15 coproc_reg entry. */
-static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
- u64 id)
-{
- size_t num;
- const struct coproc_reg *table, *r;
- struct coproc_params params;
-
- /* We only do cp15 for now. */
- if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
- return NULL;
-
- if (!index_to_params(id, &params))
- return NULL;
-
- table = get_target_table(vcpu->arch.target, &num);
- r = find_reg(&params, table, num);
- if (!r)
- r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
-
- /* Not saved in the cp15 array? */
- if (r && !r->reg)
- r = NULL;
-
- return r;
-}
-
-/*
- * These are the invariant cp15 registers: we let the guest see the host
- * versions of these, so they're part of the guest state.
- *
- * A future CPU may provide a mechanism to present different values to
- * the guest, or a future kvm may trap them.
- */
-/* Unfortunately, there's no register-argument for mrc, so generate. */
-#define FUNCTION_FOR32(crn, crm, op1, op2, name) \
- static void get_##name(struct kvm_vcpu *v, \
- const struct coproc_reg *r) \
- { \
- u32 val; \
- \
- asm volatile("mrc p15, " __stringify(op1) \
- ", %0, c" __stringify(crn) \
- ", c" __stringify(crm) \
- ", " __stringify(op2) "\n" : "=r" (val)); \
- ((struct coproc_reg *)r)->val = val; \
- }
-
-FUNCTION_FOR32(0, 0, 0, 0, MIDR)
-FUNCTION_FOR32(0, 0, 0, 1, CTR)
-FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
-FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
-FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
-FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
-FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
-FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
-FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
-FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
-FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
-FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
-FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
-FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
-FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
-FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
-FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
-FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
-FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
-FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
-FUNCTION_FOR32(0, 0, 1, 7, AIDR)
-
-/* ->val is filled in by kvm_invariant_coproc_table_init() */
-static struct coproc_reg invariant_cp15[] = {
- { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
- { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
- { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
- { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
- { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
-
- { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
- { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
-
- { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
- { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
- { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
- { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
- { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
- { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
- { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
- { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
-
- { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
- { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
- { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
- { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
- { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
- { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
-};
-
-/*
- * Reads a register value from a userspace address to a kernel
- * variable. Make sure that register size matches sizeof(*__val).
- */
-static int reg_from_user(void *val, const void __user *uaddr, u64 id)
-{
- if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
- return -EFAULT;
- return 0;
-}
-
-/*
- * Writes a register value to a userspace address from a kernel variable.
- * Make sure that register size matches sizeof(*__val).
- */
-static int reg_to_user(void __user *uaddr, const void *val, u64 id)
-{
- if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
- return -EFAULT;
- return 0;
-}
-
-static int get_invariant_cp15(u64 id, void __user *uaddr)
-{
- struct coproc_params params;
- const struct coproc_reg *r;
- int ret;
-
- if (!index_to_params(id, &params))
- return -ENOENT;
-
- r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
- if (!r)
- return -ENOENT;
-
- ret = -ENOENT;
- if (KVM_REG_SIZE(id) == 4) {
- u32 val = r->val;
-
- ret = reg_to_user(uaddr, &val, id);
- } else if (KVM_REG_SIZE(id) == 8) {
- ret = reg_to_user(uaddr, &r->val, id);
- }
- return ret;
-}
-
-static int set_invariant_cp15(u64 id, void __user *uaddr)
-{
- struct coproc_params params;
- const struct coproc_reg *r;
- int err;
- u64 val;
-
- if (!index_to_params(id, &params))
- return -ENOENT;
- r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
- if (!r)
- return -ENOENT;
-
- err = -ENOENT;
- if (KVM_REG_SIZE(id) == 4) {
- u32 val32;
-
- err = reg_from_user(&val32, uaddr, id);
- if (!err)
- val = val32;
- } else if (KVM_REG_SIZE(id) == 8) {
- err = reg_from_user(&val, uaddr, id);
- }
- if (err)
- return err;
-
- /* This is what we mean by invariant: you can't change it. */
- if (r->val != val)
- return -EINVAL;
-
- return 0;
-}
-
-static bool is_valid_cache(u32 val)
-{
- u32 level, ctype;
-
- if (val >= CSSELR_MAX)
- return false;
-
- /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
- level = (val >> 1);
- ctype = (cache_levels >> (level * 3)) & 7;
-
- switch (ctype) {
- case 0: /* No cache */
- return false;
- case 1: /* Instruction cache only */
- return (val & 1);
- case 2: /* Data cache only */
- case 4: /* Unified cache */
- return !(val & 1);
- case 3: /* Separate instruction and data caches */
- return true;
- default: /* Reserved: we can't know instruction or data. */
- return false;
- }
-}
-
-/* Which cache CCSIDR represents depends on CSSELR value. */
-static u32 get_ccsidr(u32 csselr)
-{
- u32 ccsidr;
-
- /* Make sure noone else changes CSSELR during this! */
- local_irq_disable();
- /* Put value into CSSELR */
- asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
- isb();
- /* Read result out of CCSIDR */
- asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
- local_irq_enable();
-
- return ccsidr;
-}
-
-static int demux_c15_get(u64 id, void __user *uaddr)
-{
- u32 val;
- u32 __user *uval = uaddr;
-
- /* Fail if we have unknown bits set. */
- if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
- | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
- return -ENOENT;
-
- switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
- case KVM_REG_ARM_DEMUX_ID_CCSIDR:
- if (KVM_REG_SIZE(id) != 4)
- return -ENOENT;
- val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
- >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
- if (!is_valid_cache(val))
- return -ENOENT;
-
- return put_user(get_ccsidr(val), uval);
- default:
- return -ENOENT;
- }
-}
-
-static int demux_c15_set(u64 id, void __user *uaddr)
-{
- u32 val, newval;
- u32 __user *uval = uaddr;
-
- /* Fail if we have unknown bits set. */
- if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
- | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
- return -ENOENT;
-
- switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
- case KVM_REG_ARM_DEMUX_ID_CCSIDR:
- if (KVM_REG_SIZE(id) != 4)
- return -ENOENT;
- val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
- >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
- if (!is_valid_cache(val))
- return -ENOENT;
-
- if (get_user(newval, uval))
- return -EFAULT;
-
- /* This is also invariant: you can't change it. */
- if (newval != get_ccsidr(val))
- return -EINVAL;
- return 0;
- default:
- return -ENOENT;
- }
-}
-
-#ifdef CONFIG_VFPv3
-static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
- KVM_REG_ARM_VFP_FPSCR,
- KVM_REG_ARM_VFP_FPINST,
- KVM_REG_ARM_VFP_FPINST2,
- KVM_REG_ARM_VFP_MVFR0,
- KVM_REG_ARM_VFP_MVFR1,
- KVM_REG_ARM_VFP_FPSID };
-
-static unsigned int num_fp_regs(void)
-{
- if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
- return 32;
- else
- return 16;
-}
-
-static unsigned int num_vfp_regs(void)
-{
- /* Normal FP regs + control regs. */
- return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
-}
-
-static int copy_vfp_regids(u64 __user *uindices)
-{
- unsigned int i;
- const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
- const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
-
- for (i = 0; i < num_fp_regs(); i++) {
- if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
- uindices))
- return -EFAULT;
- uindices++;
- }
-
- for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
- if (put_user(u32reg | vfp_sysregs[i], uindices))
- return -EFAULT;
- uindices++;
- }
-
- return num_vfp_regs();
-}
-
-static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
-{
- u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
- u32 val;
-
- /* Fail if we have unknown bits set. */
- if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
- | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
- return -ENOENT;
-
- if (vfpid < num_fp_regs()) {
- if (KVM_REG_SIZE(id) != 8)
- return -ENOENT;
- return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
- id);
- }
-
- /* FP control registers are all 32 bit. */
- if (KVM_REG_SIZE(id) != 4)
- return -ENOENT;
-
- switch (vfpid) {
- case KVM_REG_ARM_VFP_FPEXC:
- return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
- case KVM_REG_ARM_VFP_FPSCR:
- return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
- case KVM_REG_ARM_VFP_FPINST:
- return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
- case KVM_REG_ARM_VFP_FPINST2:
- return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
- case KVM_REG_ARM_VFP_MVFR0:
- val = fmrx(MVFR0);
- return reg_to_user(uaddr, &val, id);
- case KVM_REG_ARM_VFP_MVFR1:
- val = fmrx(MVFR1);
- return reg_to_user(uaddr, &val, id);
- case KVM_REG_ARM_VFP_FPSID:
- val = fmrx(FPSID);
- return reg_to_user(uaddr, &val, id);
- default:
- return -ENOENT;
- }
-}
-
-static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
-{
- u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
- u32 val;
-
- /* Fail if we have unknown bits set. */
- if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
- | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
- return -ENOENT;
-
- if (vfpid < num_fp_regs()) {
- if (KVM_REG_SIZE(id) != 8)
- return -ENOENT;
- return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
- uaddr, id);
- }
-
- /* FP control registers are all 32 bit. */
- if (KVM_REG_SIZE(id) != 4)
- return -ENOENT;
-
- switch (vfpid) {
- case KVM_REG_ARM_VFP_FPEXC:
- return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
- case KVM_REG_ARM_VFP_FPSCR:
- return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
- case KVM_REG_ARM_VFP_FPINST:
- return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
- case KVM_REG_ARM_VFP_FPINST2:
- return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
- /* These are invariant. */
- case KVM_REG_ARM_VFP_MVFR0:
- if (reg_from_user(&val, uaddr, id))
- return -EFAULT;
- if (val != fmrx(MVFR0))
- return -EINVAL;
- return 0;
- case KVM_REG_ARM_VFP_MVFR1:
- if (reg_from_user(&val, uaddr, id))
- return -EFAULT;
- if (val != fmrx(MVFR1))
- return -EINVAL;
- return 0;
- case KVM_REG_ARM_VFP_FPSID:
- if (reg_from_user(&val, uaddr, id))
- return -EFAULT;
- if (val != fmrx(FPSID))
- return -EINVAL;
- return 0;
- default:
- return -ENOENT;
- }
-}
-#else /* !CONFIG_VFPv3 */
-static unsigned int num_vfp_regs(void)
-{
- return 0;
-}
-
-static int copy_vfp_regids(u64 __user *uindices)
-{
- return 0;
-}
-
-static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
-{
- return -ENOENT;
-}
-
-static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
-{
- return -ENOENT;
-}
-#endif /* !CONFIG_VFPv3 */
-
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- const struct coproc_reg *r;
- void __user *uaddr = (void __user *)(long)reg->addr;
- int ret;
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
- return demux_c15_get(reg->id, uaddr);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
- return vfp_get_reg(vcpu, reg->id, uaddr);
-
- r = index_to_coproc_reg(vcpu, reg->id);
- if (!r)
- return get_invariant_cp15(reg->id, uaddr);
-
- ret = -ENOENT;
- if (KVM_REG_SIZE(reg->id) == 8) {
- u64 val;
-
- val = vcpu_cp15_reg64_get(vcpu, r);
- ret = reg_to_user(uaddr, &val, reg->id);
- } else if (KVM_REG_SIZE(reg->id) == 4) {
- ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
- }
-
- return ret;
-}
-
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- const struct coproc_reg *r;
- void __user *uaddr = (void __user *)(long)reg->addr;
- int ret;
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
- return demux_c15_set(reg->id, uaddr);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
- return vfp_set_reg(vcpu, reg->id, uaddr);
-
- r = index_to_coproc_reg(vcpu, reg->id);
- if (!r)
- return set_invariant_cp15(reg->id, uaddr);
-
- ret = -ENOENT;
- if (KVM_REG_SIZE(reg->id) == 8) {
- u64 val;
-
- ret = reg_from_user(&val, uaddr, reg->id);
- if (!ret)
- vcpu_cp15_reg64_set(vcpu, r, val);
- } else if (KVM_REG_SIZE(reg->id) == 4) {
- ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
- }
-
- return ret;
-}
-
-static unsigned int num_demux_regs(void)
-{
- unsigned int i, count = 0;
-
- for (i = 0; i < CSSELR_MAX; i++)
- if (is_valid_cache(i))
- count++;
-
- return count;
-}
-
-static int write_demux_regids(u64 __user *uindices)
-{
- u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
- unsigned int i;
-
- val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
- for (i = 0; i < CSSELR_MAX; i++) {
- if (!is_valid_cache(i))
- continue;
- if (put_user(val | i, uindices))
- return -EFAULT;
- uindices++;
- }
- return 0;
-}
-
-static u64 cp15_to_index(const struct coproc_reg *reg)
-{
- u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
- if (reg->is_64bit) {
- val |= KVM_REG_SIZE_U64;
- val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
- /*
- * CRn always denotes the primary coproc. reg. nr. for the
- * in-kernel representation, but the user space API uses the
- * CRm for the encoding, because it is modelled after the
- * MRRC/MCRR instructions: see the ARM ARM rev. c page
- * B3-1445
- */
- val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
- } else {
- val |= KVM_REG_SIZE_U32;
- val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
- val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
- val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
- val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
- }
- return val;
-}
-
-static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
-{
- if (!*uind)
- return true;
-
- if (put_user(cp15_to_index(reg), *uind))
- return false;
-
- (*uind)++;
- return true;
-}
-
-/* Assumed ordered tables, see kvm_coproc_table_init. */
-static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
-{
- const struct coproc_reg *i1, *i2, *end1, *end2;
- unsigned int total = 0;
- size_t num;
-
- /* We check for duplicates here, to allow arch-specific overrides. */
- i1 = get_target_table(vcpu->arch.target, &num);
- end1 = i1 + num;
- i2 = cp15_regs;
- end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
-
- BUG_ON(i1 == end1 || i2 == end2);
-
- /* Walk carefully, as both tables may refer to the same register. */
- while (i1 || i2) {
- int cmp = cmp_reg(i1, i2);
- /* target-specific overrides generic entry. */
- if (cmp <= 0) {
- /* Ignore registers we trap but don't save. */
- if (i1->reg) {
- if (!copy_reg_to_user(i1, &uind))
- return -EFAULT;
- total++;
- }
- } else {
- /* Ignore registers we trap but don't save. */
- if (i2->reg) {
- if (!copy_reg_to_user(i2, &uind))
- return -EFAULT;
- total++;
- }
- }
-
- if (cmp <= 0 && ++i1 == end1)
- i1 = NULL;
- if (cmp >= 0 && ++i2 == end2)
- i2 = NULL;
- }
- return total;
-}
-
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
-{
- return ARRAY_SIZE(invariant_cp15)
- + num_demux_regs()
- + num_vfp_regs()
- + walk_cp15(vcpu, (u64 __user *)NULL);
-}
-
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- unsigned int i;
- int err;
-
- /* Then give them all the invariant registers' indices. */
- for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
- if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
- return -EFAULT;
- uindices++;
- }
-
- err = walk_cp15(vcpu, uindices);
- if (err < 0)
- return err;
- uindices += err;
-
- err = copy_vfp_regids(uindices);
- if (err < 0)
- return err;
- uindices += err;
-
- return write_demux_regids(uindices);
-}
-
-void kvm_coproc_table_init(void)
-{
- unsigned int i;
-
- /* Make sure tables are unique and in order. */
- BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
- BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
-
- /* We abuse the reset function to overwrite the table itself. */
- for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
- invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
-
- /*
- * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
- *
- * If software reads the Cache Type fields from Ctype1
- * upwards, once it has seen a value of 0b000, no caches
- * exist at further-out levels of the hierarchy. So, for
- * example, if Ctype3 is the first Cache Type field with a
- * value of 0b000, the values of Ctype4 to Ctype7 must be
- * ignored.
- */
- asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
- for (i = 0; i < 7; i++)
- if (((cache_levels >> (i*3)) & 7) == 0)
- break;
- /* Clear all higher bits. */
- cache_levels &= (1 << (i*3))-1;
-}
-
-/**
- * kvm_reset_coprocs - sets cp15 registers to reset value
- * @vcpu: The VCPU pointer
- *
- * This function finds the right table above and sets the registers on the
- * virtual CPU struct to their architecturally defined reset values.
- */
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
-{
- size_t num;
- const struct coproc_reg *table;
- DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
-
- /* Generic chip reset first (so target could override). */
- reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
-
- table = get_target_table(vcpu->arch.target, &num);
- reset_coproc_regs(vcpu, table, num, bmap);
-
- for (num = 1; num < NR_CP15_REGS; num++)
- WARN(!test_bit(num, bmap),
- "Didn't reset vcpu_cp15(vcpu, %zi)", num);
-}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
deleted file mode 100644
index 637065b13012..000000000000
--- a/arch/arm/kvm/coproc.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_COPROC_LOCAL_H__
-#define __ARM_KVM_COPROC_LOCAL_H__
-
-struct coproc_params {
- unsigned long CRn;
- unsigned long CRm;
- unsigned long Op1;
- unsigned long Op2;
- unsigned long Rt1;
- unsigned long Rt2;
- bool is_64bit;
- bool is_write;
-};
-
-struct coproc_reg {
- /* MRC/MCR/MRRC/MCRR instruction which accesses it. */
- unsigned long CRn;
- unsigned long CRm;
- unsigned long Op1;
- unsigned long Op2;
-
- bool is_64bit;
-
- /* Trapped access from guest, if non-NULL. */
- bool (*access)(struct kvm_vcpu *,
- const struct coproc_params *,
- const struct coproc_reg *);
-
- /* Initialization for vcpu. */
- void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
-
- /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */
- unsigned long reg;
-
- /* Value (usually reset value) */
- u64 val;
-};
-
-static inline void print_cp_instr(const struct coproc_params *p)
-{
- /* Look, we even formatted it for you to paste into the table! */
- if (p->is_64bit) {
- kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
- p->CRn, p->Op1, p->is_write ? "write" : "read");
- } else {
- kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
- " func_%s },\n",
- p->CRn, p->CRm, p->Op1, p->Op2,
- p->is_write ? "write" : "read");
- }
-}
-
-static inline bool ignore_write(struct kvm_vcpu *vcpu,
- const struct coproc_params *p)
-{
- return true;
-}
-
-static inline bool read_zero(struct kvm_vcpu *vcpu,
- const struct coproc_params *p)
-{
- *vcpu_reg(vcpu, p->Rt1) = 0;
- return true;
-}
-
-/* Reset functions */
-static inline void reset_unknown(struct kvm_vcpu *vcpu,
- const struct coproc_reg *r)
-{
- BUG_ON(!r->reg);
- BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
- vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
-}
-
-static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
-{
- BUG_ON(!r->reg);
- BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
- vcpu_cp15(vcpu, r->reg) = r->val;
-}
-
-static inline void reset_unknown64(struct kvm_vcpu *vcpu,
- const struct coproc_reg *r)
-{
- BUG_ON(!r->reg);
- BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
-
- vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
- vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee;
-}
-
-static inline int cmp_reg(const struct coproc_reg *i1,
- const struct coproc_reg *i2)
-{
- BUG_ON(i1 == i2);
- if (!i1)
- return 1;
- else if (!i2)
- return -1;
- if (i1->CRn != i2->CRn)
- return i1->CRn - i2->CRn;
- if (i1->CRm != i2->CRm)
- return i1->CRm - i2->CRm;
- if (i1->Op1 != i2->Op1)
- return i1->Op1 - i2->Op1;
- if (i1->Op2 != i2->Op2)
- return i1->Op2 - i2->Op2;
- return i2->is_64bit - i1->is_64bit;
-}
-
-
-#define CRn(_x) .CRn = _x
-#define CRm(_x) .CRm = _x
-#define CRm64(_x) .CRn = _x, .CRm = 0
-#define Op1(_x) .Op1 = _x
-#define Op2(_x) .Op2 = _x
-#define is64 .is_64bit = true
-#define is32 .is_64bit = false
-
-bool access_vm_reg(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r);
-
-#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
deleted file mode 100644
index 36bf15421ae8..000000000000
--- a/arch/arm/kvm/coproc_a15.c
+++ /dev/null
@@ -1,39 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Authors: Rusty Russell <rusty@rustcorp.au>
- * Christoffer Dall <c.dall@virtualopensystems.com>
- */
-#include <linux/kvm_host.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_emulate.h>
-#include <linux/init.h>
-
-#include "coproc.h"
-
-/*
- * A15-specific CP15 registers.
- * CRn denotes the primary register number, but is copied to the CRm in the
- * user space API for 64-bit register access in line with the terminology used
- * in the ARM ARM.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
- * registers preceding 32-bit ones.
- */
-static const struct coproc_reg a15_regs[] = {
- /* SCTLR: swapped by interrupt.S. */
- { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
-};
-
-static struct kvm_coproc_target_table a15_target_table = {
- .target = KVM_ARM_TARGET_CORTEX_A15,
- .table = a15_regs,
- .num = ARRAY_SIZE(a15_regs),
-};
-
-static int __init coproc_a15_init(void)
-{
- kvm_register_target_coproc_table(&a15_target_table);
- return 0;
-}
-late_initcall(coproc_a15_init);
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
deleted file mode 100644
index 40f643e1e05c..000000000000
--- a/arch/arm/kvm/coproc_a7.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Copyright (C) 2013 - ARM Ltd
- *
- * Authors: Rusty Russell <rusty@rustcorp.au>
- * Christoffer Dall <c.dall@virtualopensystems.com>
- * Jonathan Austin <jonathan.austin@arm.com>
- */
-#include <linux/kvm_host.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_emulate.h>
-#include <linux/init.h>
-
-#include "coproc.h"
-
-/*
- * Cortex-A7 specific CP15 registers.
- * CRn denotes the primary register number, but is copied to the CRm in the
- * user space API for 64-bit register access in line with the terminology used
- * in the ARM ARM.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
- * registers preceding 32-bit ones.
- */
-static const struct coproc_reg a7_regs[] = {
- /* SCTLR: swapped by interrupt.S. */
- { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
- access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
-};
-
-static struct kvm_coproc_target_table a7_target_table = {
- .target = KVM_ARM_TARGET_CORTEX_A7,
- .table = a7_regs,
- .num = ARRAY_SIZE(a7_regs),
-};
-
-static int __init coproc_a7_init(void)
-{
- kvm_register_target_coproc_table(&a7_target_table);
- return 0;
-}
-late_initcall(coproc_a7_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
deleted file mode 100644
index 29bb852140c5..000000000000
--- a/arch/arm/kvm/emulate.c
+++ /dev/null
@@ -1,166 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#include <linux/mm.h>
-#include <linux/kvm_host.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_emulate.h>
-#include <asm/opcodes.h>
-#include <trace/events/kvm.h>
-
-#include "trace.h"
-
-#define VCPU_NR_MODES 6
-#define VCPU_REG_OFFSET_USR 0
-#define VCPU_REG_OFFSET_FIQ 1
-#define VCPU_REG_OFFSET_IRQ 2
-#define VCPU_REG_OFFSET_SVC 3
-#define VCPU_REG_OFFSET_ABT 4
-#define VCPU_REG_OFFSET_UND 5
-#define REG_OFFSET(_reg) \
- (offsetof(struct kvm_regs, _reg) / sizeof(u32))
-
-#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
-
-static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
- /* USR/SYS Registers */
- [VCPU_REG_OFFSET_USR] = {
- USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
- USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
- USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
- USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
- USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
- },
-
- /* FIQ Registers */
- [VCPU_REG_OFFSET_FIQ] = {
- USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
- USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
- USR_REG_OFFSET(6), USR_REG_OFFSET(7),
- REG_OFFSET(fiq_regs[0]), /* r8 */
- REG_OFFSET(fiq_regs[1]), /* r9 */
- REG_OFFSET(fiq_regs[2]), /* r10 */
- REG_OFFSET(fiq_regs[3]), /* r11 */
- REG_OFFSET(fiq_regs[4]), /* r12 */
- REG_OFFSET(fiq_regs[5]), /* r13 */
- REG_OFFSET(fiq_regs[6]), /* r14 */
- },
-
- /* IRQ Registers */
- [VCPU_REG_OFFSET_IRQ] = {
- USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
- USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
- USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
- USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
- USR_REG_OFFSET(12),
- REG_OFFSET(irq_regs[0]), /* r13 */
- REG_OFFSET(irq_regs[1]), /* r14 */
- },
-
- /* SVC Registers */
- [VCPU_REG_OFFSET_SVC] = {
- USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
- USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
- USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
- USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
- USR_REG_OFFSET(12),
- REG_OFFSET(svc_regs[0]), /* r13 */
- REG_OFFSET(svc_regs[1]), /* r14 */
- },
-
- /* ABT Registers */
- [VCPU_REG_OFFSET_ABT] = {
- USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
- USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
- USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
- USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
- USR_REG_OFFSET(12),
- REG_OFFSET(abt_regs[0]), /* r13 */
- REG_OFFSET(abt_regs[1]), /* r14 */
- },
-
- /* UND Registers */
- [VCPU_REG_OFFSET_UND] = {
- USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
- USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
- USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
- USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
- USR_REG_OFFSET(12),
- REG_OFFSET(und_regs[0]), /* r13 */
- REG_OFFSET(und_regs[1]), /* r14 */
- },
-};
-
-/*
- * Return a pointer to the register number valid in the current mode of
- * the virtual CPU.
- */
-unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
-{
- unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
- unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
-
- switch (mode) {
- case USR_MODE...SVC_MODE:
- mode &= ~MODE32_BIT; /* 0 ... 3 */
- break;
-
- case ABT_MODE:
- mode = VCPU_REG_OFFSET_ABT;
- break;
-
- case UND_MODE:
- mode = VCPU_REG_OFFSET_UND;
- break;
-
- case SYSTEM_MODE:
- mode = VCPU_REG_OFFSET_USR;
- break;
-
- default:
- BUG();
- }
-
- return reg_array + vcpu_reg_offsets[mode][reg_num];
-}
-
-/*
- * Return the SPSR for the current mode of the virtual CPU.
- */
-unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
-{
- unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
- switch (mode) {
- case SVC_MODE:
- return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
- case ABT_MODE:
- return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
- case UND_MODE:
- return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
- case IRQ_MODE:
- return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
- case FIQ_MODE:
- return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
- default:
- BUG();
- }
-}
-
-/******************************************************************************
- * Inject exceptions into the guest
- */
-
-/**
- * kvm_inject_vabt - inject an async abort / SError into the guest
- * @vcpu: The VCPU to receive the exception
- *
- * It is assumed that this code is called from the VCPU thread and that the
- * VCPU therefore is not currently executing guest code.
- */
-void kvm_inject_vabt(struct kvm_vcpu *vcpu)
-{
- *vcpu_hcr(vcpu) |= HCR_VA;
-}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
deleted file mode 100644
index 9f7ae0d8690f..000000000000
--- a/arch/arm/kvm/guest.c
+++ /dev/null
@@ -1,387 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/kvm_host.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <kvm/arm_psci.h>
-#include <asm/cputype.h>
-#include <linux/uaccess.h>
-#include <asm/kvm.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-
-#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
-#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
-
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT(halt_successful_poll),
- VCPU_STAT(halt_attempted_poll),
- VCPU_STAT(halt_poll_invalid),
- VCPU_STAT(halt_wakeup),
- VCPU_STAT(hvc_exit_stat),
- VCPU_STAT(wfe_exit_stat),
- VCPU_STAT(wfi_exit_stat),
- VCPU_STAT(mmio_exit_user),
- VCPU_STAT(mmio_exit_kernel),
- VCPU_STAT(exits),
- { NULL }
-};
-
-static u64 core_reg_offset_from_id(u64 id)
-{
- return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
-}
-
-static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- u32 __user *uaddr = (u32 __user *)(long)reg->addr;
- struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
- u64 off;
-
- if (KVM_REG_SIZE(reg->id) != 4)
- return -ENOENT;
-
- /* Our ID is an index into the kvm_regs struct. */
- off = core_reg_offset_from_id(reg->id);
- if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
- return -ENOENT;
-
- return put_user(((u32 *)regs)[off], uaddr);
-}
-
-static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- u32 __user *uaddr = (u32 __user *)(long)reg->addr;
- struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
- u64 off, val;
-
- if (KVM_REG_SIZE(reg->id) != 4)
- return -ENOENT;
-
- /* Our ID is an index into the kvm_regs struct. */
- off = core_reg_offset_from_id(reg->id);
- if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
- return -ENOENT;
-
- if (get_user(val, uaddr) != 0)
- return -EFAULT;
-
- if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
- unsigned long mode = val & MODE_MASK;
- switch (mode) {
- case USR_MODE:
- case FIQ_MODE:
- case IRQ_MODE:
- case SVC_MODE:
- case ABT_MODE:
- case UND_MODE:
- break;
- default:
- return -EINVAL;
- }
- }
-
- ((u32 *)regs)[off] = val;
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
- return -EINVAL;
-}
-
-#define NUM_TIMER_REGS 3
-
-static bool is_timer_reg(u64 index)
-{
- switch (index) {
- case KVM_REG_ARM_TIMER_CTL:
- case KVM_REG_ARM_TIMER_CNT:
- case KVM_REG_ARM_TIMER_CVAL:
- return true;
- }
- return false;
-}
-
-static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
- return -EFAULT;
- uindices++;
- if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
- return -EFAULT;
- uindices++;
- if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
- return -EFAULT;
-
- return 0;
-}
-
-static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- void __user *uaddr = (void __user *)(long)reg->addr;
- u64 val;
- int ret;
-
- ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
- if (ret != 0)
- return -EFAULT;
-
- return kvm_arm_timer_set_reg(vcpu, reg->id, val);
-}
-
-static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- void __user *uaddr = (void __user *)(long)reg->addr;
- u64 val;
-
- val = kvm_arm_timer_get_reg(vcpu, reg->id);
- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
-}
-
-static unsigned long num_core_regs(void)
-{
- return sizeof(struct kvm_regs) / sizeof(u32);
-}
-
-/**
- * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
- *
- * This is for all registers.
- */
-unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
-{
- return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
- + kvm_arm_get_fw_num_regs(vcpu)
- + NUM_TIMER_REGS;
-}
-
-/**
- * kvm_arm_copy_reg_indices - get indices of all registers.
- *
- * We do core registers right here, then we append coproc regs.
- */
-int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- unsigned int i;
- const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
- int ret;
-
- for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
- if (put_user(core_reg | i, uindices))
- return -EFAULT;
- uindices++;
- }
-
- ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
- if (ret)
- return ret;
- uindices += kvm_arm_get_fw_num_regs(vcpu);
-
- ret = copy_timer_indices(vcpu, uindices);
- if (ret)
- return ret;
- uindices += NUM_TIMER_REGS;
-
- return kvm_arm_copy_coproc_indices(vcpu, uindices);
-}
-
-int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- /* We currently use nothing arch-specific in upper 32 bits */
- if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
- return -EINVAL;
-
- /* Register group 16 means we want a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return get_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_get_fw_reg(vcpu, reg);
-
- if (is_timer_reg(reg->id))
- return get_timer_reg(vcpu, reg);
-
- return kvm_arm_coproc_get_reg(vcpu, reg);
-}
-
-int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
-{
- /* We currently use nothing arch-specific in upper 32 bits */
- if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
- return -EINVAL;
-
- /* Register group 16 means we set a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return set_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_set_fw_reg(vcpu, reg);
-
- if (is_timer_reg(reg->id))
- return set_timer_reg(vcpu, reg);
-
- return kvm_arm_coproc_set_reg(vcpu, reg);
-}
-
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- return -EINVAL;
-}
-
-
-int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events)
-{
- events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
-
- /*
- * We never return a pending ext_dabt here because we deliver it to
- * the virtual CPU directly when setting the event and it's no longer
- * 'pending' at this point.
- */
-
- return 0;
-}
-
-int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events)
-{
- bool serror_pending = events->exception.serror_pending;
- bool has_esr = events->exception.serror_has_esr;
- bool ext_dabt_pending = events->exception.ext_dabt_pending;
-
- if (serror_pending && has_esr)
- return -EINVAL;
- else if (serror_pending)
- kvm_inject_vabt(vcpu);
-
- if (ext_dabt_pending)
- kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-
- return 0;
-}
-
-int __attribute_const__ kvm_target_cpu(void)
-{
- switch (read_cpuid_part()) {
- case ARM_CPU_PART_CORTEX_A7:
- return KVM_ARM_TARGET_CORTEX_A7;
- case ARM_CPU_PART_CORTEX_A15:
- return KVM_ARM_TARGET_CORTEX_A15;
- default:
- return -EINVAL;
- }
-}
-
-int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
-{
- int target = kvm_target_cpu();
-
- if (target < 0)
- return -ENODEV;
-
- memset(init, 0, sizeof(*init));
-
- /*
- * For now, we don't return any features.
- * In future, we might use features to return target
- * specific features available for the preferred
- * target type.
- */
- init->target = (__u32)target;
-
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
- struct kvm_translation *tr)
-{
- return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg)
-{
- return -EINVAL;
-}
-
-int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret;
-
- switch (attr->group) {
- case KVM_ARM_VCPU_TIMER_CTRL:
- ret = kvm_arm_timer_set_attr(vcpu, attr);
- break;
- default:
- ret = -ENXIO;
- break;
- }
-
- return ret;
-}
-
-int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret;
-
- switch (attr->group) {
- case KVM_ARM_VCPU_TIMER_CTRL:
- ret = kvm_arm_timer_get_attr(vcpu, attr);
- break;
- default:
- ret = -ENXIO;
- break;
- }
-
- return ret;
-}
-
-int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret;
-
- switch (attr->group) {
- case KVM_ARM_VCPU_TIMER_CTRL:
- ret = kvm_arm_timer_has_attr(vcpu, attr);
- break;
- default:
- ret = -ENXIO;
- break;
- }
-
- return ret;
-}
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
deleted file mode 100644
index e58a89d2f13f..000000000000
--- a/arch/arm/kvm/handle_exit.c
+++ /dev/null
@@ -1,175 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
-#include <trace/events/kvm.h>
-
-#include "trace.h"
-
-typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
-
-static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- int ret;
-
- trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
- kvm_vcpu_hvc_get_imm(vcpu));
- vcpu->stat.hvc_exit_stat++;
-
- ret = kvm_hvc_call_handler(vcpu);
- if (ret < 0) {
- vcpu_set_reg(vcpu, 0, ~0UL);
- return 1;
- }
-
- return ret;
-}
-
-static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- /*
- * "If an SMC instruction executed at Non-secure EL1 is
- * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
- * Trap exception, not a Secure Monitor Call exception [...]"
- *
- * We need to advance the PC after the trap, as it would
- * otherwise return to the same address...
- */
- vcpu_set_reg(vcpu, 0, ~0UL);
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return 1;
-}
-
-/**
- * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
- * @vcpu: the vcpu pointer
- * @run: the kvm_run structure pointer
- *
- * WFE: Yield the CPU and come back to this vcpu when the scheduler
- * decides to.
- * WFI: Simply call kvm_vcpu_block(), which will halt execution of
- * world-switches and schedule other host processes until there is an
- * incoming IRQ or FIQ to the VM.
- */
-static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
- trace_kvm_wfx(*vcpu_pc(vcpu), true);
- vcpu->stat.wfe_exit_stat++;
- kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
- } else {
- trace_kvm_wfx(*vcpu_pc(vcpu), false);
- vcpu->stat.wfi_exit_stat++;
- kvm_vcpu_block(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- }
-
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
-
- return 1;
-}
-
-static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- u32 hsr = kvm_vcpu_get_hsr(vcpu);
-
- kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
- hsr);
-
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
-static exit_handle_fn arm_exit_handlers[] = {
- [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
- [HSR_EC_WFI] = kvm_handle_wfx,
- [HSR_EC_CP15_32] = kvm_handle_cp15_32,
- [HSR_EC_CP15_64] = kvm_handle_cp15_64,
- [HSR_EC_CP14_MR] = kvm_handle_cp14_32,
- [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
- [HSR_EC_CP14_64] = kvm_handle_cp14_64,
- [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
- [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
- [HSR_EC_HVC] = handle_hvc,
- [HSR_EC_SMC] = handle_smc,
- [HSR_EC_IABT] = kvm_handle_guest_abort,
- [HSR_EC_DABT] = kvm_handle_guest_abort,
-};
-
-static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
-{
- u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
-
- return arm_exit_handlers[hsr_ec];
-}
-
-/*
- * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
- * proper exit to userspace.
- */
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
- int exception_index)
-{
- exit_handle_fn exit_handler;
-
- if (ARM_ABORT_PENDING(exception_index)) {
- u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
-
- /*
- * HVC/SMC already have an adjusted PC, which we need
- * to correct in order to return to after having
- * injected the abort.
- */
- if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) {
- u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
- *vcpu_pc(vcpu) -= adj;
- }
-
- kvm_inject_vabt(vcpu);
- return 1;
- }
-
- exception_index = ARM_EXCEPTION_CODE(exception_index);
-
- switch (exception_index) {
- case ARM_EXCEPTION_IRQ:
- return 1;
- case ARM_EXCEPTION_HVC:
- /*
- * See ARM ARM B1.14.1: "Hyp traps on instructions
- * that fail their condition code check"
- */
- if (!kvm_condition_valid(vcpu)) {
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return 1;
- }
-
- exit_handler = kvm_get_exit_handler(vcpu);
-
- return exit_handler(vcpu, run);
- case ARM_EXCEPTION_DATA_ABORT:
- kvm_inject_vabt(vcpu);
- return 1;
- case ARM_EXCEPTION_HYP_GONE:
- /*
- * HYP has been reset to the hyp-stub. This happens
- * when a guest is pre-empted by kvm_reboot()'s
- * shutdown call.
- */
- run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- return 0;
- default:
- kvm_pr_unimpl("Unsupported exception type: %d",
- exception_index);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return 0;
- }
-}
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
deleted file mode 100644
index ba88b1eca93c..000000000000
--- a/arch/arm/kvm/hyp/Makefile
+++ /dev/null
@@ -1,34 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Kernel-based Virtual Machine module, HYP part
-#
-
-ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
-
-KVM=../../../../virt/kvm
-
-CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
-
-obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
-obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
-obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
-
-obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
-obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
-obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
-obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
-CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
-
-obj-$(CONFIG_KVM_ARM_HOST) += entry.o
-obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
-obj-$(CONFIG_KVM_ARM_HOST) += switch.o
-CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
-obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
-
-# KVM code is run at a different exception code with a different map, so
-# compiler instrumentation that inserts callbacks or checks into the code may
-# cause crashes. Just disable it.
-GCOV_PROFILE := n
-KASAN_SANITIZE := n
-UBSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
deleted file mode 100644
index c4632ed9e819..000000000000
--- a/arch/arm/kvm/hyp/banked-sr.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Original code:
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <asm/kvm_hyp.h>
-
-/*
- * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
- * trick the assembler.
- */
-__asm__(".arch_extension virt");
-
-void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
-{
- ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr);
- ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp);
- ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR);
- ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc);
- ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc);
- ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc);
- ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt);
- ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt);
- ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt);
- ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und);
- ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und);
- ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und);
- ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq);
- ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq);
- ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq);
- ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq);
- ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq);
- ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq);
- ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq);
- ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq);
- ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq);
- ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq);
- ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq);
-}
-
-void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt)
-{
- write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr);
- write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp);
- write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf);
- write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc);
- write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc);
- write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc);
- write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt);
- write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt);
- write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt);
- write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und);
- write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und);
- write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und);
- write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq);
- write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq);
- write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq);
- write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq);
- write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq);
- write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq);
- write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq);
- write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq);
- write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq);
- write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq);
- write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq);
-}
diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c
deleted file mode 100644
index e6923306f698..000000000000
--- a/arch/arm/kvm/hyp/cp15-sr.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Original code:
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <asm/kvm_hyp.h>
-
-static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
-{
- return (u64 *)(ctxt->cp15 + idx);
-}
-
-void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
-{
- ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
- ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
- ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
- *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0);
- *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1);
- ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR);
- ctxt->cp15[c3_DACR] = read_sysreg(DACR);
- ctxt->cp15[c5_DFSR] = read_sysreg(DFSR);
- ctxt->cp15[c5_IFSR] = read_sysreg(IFSR);
- ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR);
- ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR);
- ctxt->cp15[c6_DFAR] = read_sysreg(DFAR);
- ctxt->cp15[c6_IFAR] = read_sysreg(IFAR);
- *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR);
- ctxt->cp15[c10_PRRR] = read_sysreg(PRRR);
- ctxt->cp15[c10_NMRR] = read_sysreg(NMRR);
- ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0);
- ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1);
- ctxt->cp15[c12_VBAR] = read_sysreg(VBAR);
- ctxt->cp15[c13_CID] = read_sysreg(CID);
- ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW);
- ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO);
- ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV);
- ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL);
-}
-
-void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
-{
- write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR);
- write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR);
- write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR);
- write_sysreg(ctxt->cp15[c1_CPACR], CPACR);
- write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0);
- write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1);
- write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR);
- write_sysreg(ctxt->cp15[c3_DACR], DACR);
- write_sysreg(ctxt->cp15[c5_DFSR], DFSR);
- write_sysreg(ctxt->cp15[c5_IFSR], IFSR);
- write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR);
- write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR);
- write_sysreg(ctxt->cp15[c6_DFAR], DFAR);
- write_sysreg(ctxt->cp15[c6_IFAR], IFAR);
- write_sysreg(*cp15_64(ctxt, c7_PAR), PAR);
- write_sysreg(ctxt->cp15[c10_PRRR], PRRR);
- write_sysreg(ctxt->cp15[c10_NMRR], NMRR);
- write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0);
- write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1);
- write_sysreg(ctxt->cp15[c12_VBAR], VBAR);
- write_sysreg(ctxt->cp15[c13_CID], CID);
- write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW);
- write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO);
- write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV);
- write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL);
-}
diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S
deleted file mode 100644
index 4bd1f6a74180..000000000000
--- a/arch/arm/kvm/hyp/entry.S
+++ /dev/null
@@ -1,121 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
-*/
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-
- .arch_extension virt
-
- .text
- .pushsection .hyp.text, "ax"
-
-#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR)
-
-/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
-ENTRY(__guest_enter)
- @ Save host registers
- add r1, r1, #(USR_REGS_OFFSET + S_R4)
- stm r1!, {r4-r12}
- str lr, [r1, #4] @ Skip SP_usr (already saved)
-
- @ Restore guest registers
- add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
- ldr lr, [r0, #S_LR]
- ldm r0, {r0-r12}
-
- clrex
- eret
-ENDPROC(__guest_enter)
-
-ENTRY(__guest_exit)
- /*
- * return convention:
- * guest r0, r1, r2 saved on the stack
- * r0: vcpu pointer
- * r1: exception code
- */
-
- add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
- stm r2!, {r3-r12}
- str lr, [r2, #4]
- add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
- pop {r3, r4, r5} @ r0, r1, r2
- stm r2, {r3-r5}
-
- ldr r0, [r0, #VCPU_HOST_CTXT]
- add r0, r0, #(USR_REGS_OFFSET + S_R4)
- ldm r0!, {r4-r12}
- ldr lr, [r0, #4]
-
- mov r0, r1
- mrs r1, SPSR
- mrs r2, ELR_hyp
- mrc p15, 4, r3, c5, c2, 0 @ HSR
-
- /*
- * Force loads and stores to complete before unmasking aborts
- * and forcing the delivery of the exception. This gives us a
- * single instruction window, which the handler will try to
- * match.
- */
- dsb sy
- cpsie a
-
- .global abort_guest_exit_start
-abort_guest_exit_start:
-
- isb
-
- .global abort_guest_exit_end
-abort_guest_exit_end:
-
- /*
- * If we took an abort, r0[31] will be set, and cmp will set
- * the N bit in PSTATE.
- */
- cmp r0, #0
- msrmi SPSR_cxsf, r1
- msrmi ELR_hyp, r2
- mcrmi p15, 4, r3, c5, c2, 0 @ HSR
-
- bx lr
-ENDPROC(__guest_exit)
-
-/*
- * If VFPv3 support is not available, then we will not switch the VFP
- * registers; however cp10 and cp11 accesses will still trap and fallback
- * to the regular coprocessor emulation code, which currently will
- * inject an undefined exception to the guest.
- */
-#ifdef CONFIG_VFPv3
-ENTRY(__vfp_guest_restore)
- push {r3, r4, lr}
-
- @ NEON/VFP used. Turn on VFP access.
- mrc p15, 4, r1, c1, c1, 2 @ HCPTR
- bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
- mcr p15, 4, r1, c1, c1, 2 @ HCPTR
- isb
-
- @ Switch VFP/NEON hardware state to the guest's
- mov r4, r0
- ldr r0, [r0, #VCPU_HOST_CTXT]
- add r0, r0, #CPU_CTXT_VFP
- bl __vfp_save_state
- add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
- bl __vfp_restore_state
-
- pop {r3, r4, lr}
- pop {r0, r1, r2}
- clrex
- eret
-ENDPROC(__vfp_guest_restore)
-#endif
-
- .popsection
-
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
deleted file mode 100644
index fe3d7811a908..000000000000
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ /dev/null
@@ -1,295 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#include <linux/arm-smccc.h>
-#include <linux/linkage.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-
- .arch_extension virt
-
- .text
- .pushsection .hyp.text, "ax"
-
-.macro load_vcpu reg
- mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR
-.endm
-
-/********************************************************************
- * Hypervisor exception vector and handlers
- *
- *
- * The KVM/ARM Hypervisor ABI is defined as follows:
- *
- * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
- * instruction is issued since all traps are disabled when running the host
- * kernel as per the Hyp-mode initialization at boot time.
- *
- * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
- * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
- * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
- * instructions are called from within Hyp-mode.
- *
- * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
- * Switching to Hyp mode is done through a simple HVC #0 instruction. The
- * exception vector code will check that the HVC comes from VMID==0.
- * - r0 contains a pointer to a HYP function
- * - r1, r2, and r3 contain arguments to the above function.
- * - The HYP function will be called with its arguments in r0, r1 and r2.
- * On HYP function return, we return directly to SVC.
- *
- * Note that the above is used to execute code in Hyp-mode from a host-kernel
- * point of view, and is a different concept from performing a world-switch and
- * executing guest code SVC mode (with a VMID != 0).
- */
-
- .align 5
-__kvm_hyp_vector:
- .global __kvm_hyp_vector
-
- @ Hyp-mode exception vector
- W(b) hyp_reset
- W(b) hyp_undef
- W(b) hyp_svc
- W(b) hyp_pabt
- W(b) hyp_dabt
- W(b) hyp_hvc
- W(b) hyp_irq
- W(b) hyp_fiq
-
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
- .align 5
-__kvm_hyp_vector_ic_inv:
- .global __kvm_hyp_vector_ic_inv
-
- /*
- * We encode the exception entry in the bottom 3 bits of
- * SP, and we have to guarantee to be 8 bytes aligned.
- */
- W(add) sp, sp, #1 /* Reset 7 */
- W(add) sp, sp, #1 /* Undef 6 */
- W(add) sp, sp, #1 /* Syscall 5 */
- W(add) sp, sp, #1 /* Prefetch abort 4 */
- W(add) sp, sp, #1 /* Data abort 3 */
- W(add) sp, sp, #1 /* HVC 2 */
- W(add) sp, sp, #1 /* IRQ 1 */
- W(nop) /* FIQ 0 */
-
- mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
- isb
-
- b decode_vectors
-
- .align 5
-__kvm_hyp_vector_bp_inv:
- .global __kvm_hyp_vector_bp_inv
-
- /*
- * We encode the exception entry in the bottom 3 bits of
- * SP, and we have to guarantee to be 8 bytes aligned.
- */
- W(add) sp, sp, #1 /* Reset 7 */
- W(add) sp, sp, #1 /* Undef 6 */
- W(add) sp, sp, #1 /* Syscall 5 */
- W(add) sp, sp, #1 /* Prefetch abort 4 */
- W(add) sp, sp, #1 /* Data abort 3 */
- W(add) sp, sp, #1 /* HVC 2 */
- W(add) sp, sp, #1 /* IRQ 1 */
- W(nop) /* FIQ 0 */
-
- mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
- isb
-
-decode_vectors:
-
-#ifdef CONFIG_THUMB2_KERNEL
- /*
- * Yet another silly hack: Use VPIDR as a temp register.
- * Thumb2 is really a pain, as SP cannot be used with most
- * of the bitwise instructions. The vect_br macro ensures
- * things gets cleaned-up.
- */
- mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
- mov r0, sp
- and r0, r0, #7
- sub sp, sp, r0
- push {r1, r2}
- mov r1, r0
- mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
- mrc p15, 0, r2, c0, c0, 0 /* MIDR */
- mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
-#endif
-
-.macro vect_br val, targ
-ARM( eor sp, sp, #\val )
-ARM( tst sp, #7 )
-ARM( eorne sp, sp, #\val )
-
-THUMB( cmp r1, #\val )
-THUMB( popeq {r1, r2} )
-
- beq \targ
-.endm
-
- vect_br 0, hyp_fiq
- vect_br 1, hyp_irq
- vect_br 2, hyp_hvc
- vect_br 3, hyp_dabt
- vect_br 4, hyp_pabt
- vect_br 5, hyp_svc
- vect_br 6, hyp_undef
- vect_br 7, hyp_reset
-#endif
-
-.macro invalid_vector label, cause
- .align
-\label: mov r0, #\cause
- b __hyp_panic
-.endm
-
- invalid_vector hyp_reset ARM_EXCEPTION_RESET
- invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
- invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
- invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
- invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
-
-ENTRY(__hyp_do_panic)
- mrs lr, cpsr
- bic lr, lr, #MODE_MASK
- orr lr, lr, #SVC_MODE
-THUMB( orr lr, lr, #PSR_T_BIT )
- msr spsr_cxsf, lr
- ldr lr, =panic
- msr ELR_hyp, lr
- ldr lr, =__kvm_call_hyp
- clrex
- eret
-ENDPROC(__hyp_do_panic)
-
-hyp_hvc:
- /*
- * Getting here is either because of a trap from a guest,
- * or from executing HVC from the host kernel, which means
- * "do something in Hyp mode".
- */
- push {r0, r1, r2}
-
- @ Check syndrome register
- mrc p15, 4, r1, c5, c2, 0 @ HSR
- lsr r0, r1, #HSR_EC_SHIFT
- cmp r0, #HSR_EC_HVC
- bne guest_trap @ Not HVC instr.
-
- /*
- * Let's check if the HVC came from VMID 0 and allow simple
- * switch to Hyp mode
- */
- mrrc p15, 6, r0, r2, c2
- lsr r2, r2, #16
- and r2, r2, #0xff
- cmp r2, #0
- bne guest_hvc_trap @ Guest called HVC
-
- /*
- * Getting here means host called HVC, we shift parameters and branch
- * to Hyp function.
- */
- pop {r0, r1, r2}
-
- /*
- * Check if we have a kernel function, which is guaranteed to be
- * bigger than the maximum hyp stub hypercall
- */
- cmp r0, #HVC_STUB_HCALL_NR
- bhs 1f
-
- /*
- * Not a kernel function, treat it as a stub hypercall.
- * Compute the physical address for __kvm_handle_stub_hvc
- * (as the code lives in the idmaped page) and branch there.
- * We hijack ip (r12) as a tmp register.
- */
- push {r1}
- ldr r1, =kimage_voffset
- ldr r1, [r1]
- ldr ip, =__kvm_handle_stub_hvc
- sub ip, ip, r1
- pop {r1}
-
- bx ip
-
-1:
- /*
- * Pushing r2 here is just a way of keeping the stack aligned to
- * 8 bytes on any path that can trigger a HYP exception. Here,
- * we may well be about to jump into the guest, and the guest
- * exit would otherwise be badly decoded by our fancy
- * "decode-exception-without-a-branch" code...
- */
- push {r2, lr}
-
- mov lr, r0
- mov r0, r1
- mov r1, r2
- mov r2, r3
-
-THUMB( orr lr, #1)
- blx lr @ Call the HYP function
-
- pop {r2, lr}
- eret
-
-guest_hvc_trap:
- movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
- movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
- ldr r0, [sp] @ Guest's r0
- teq r0, r2
- bne guest_trap
- add sp, sp, #12
- @ Returns:
- @ r0 = 0
- @ r1 = HSR value (perfectly predictable)
- @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
- mov r0, #0
- eret
-
-guest_trap:
- load_vcpu r0 @ Load VCPU pointer to r0
-
-#ifdef CONFIG_VFPv3
- @ Check for a VFP access
- lsr r1, r1, #HSR_EC_SHIFT
- cmp r1, #HSR_EC_CP_0_13
- beq __vfp_guest_restore
-#endif
-
- mov r1, #ARM_EXCEPTION_HVC
- b __guest_exit
-
-hyp_irq:
- push {r0, r1, r2}
- mov r1, #ARM_EXCEPTION_IRQ
- load_vcpu r0 @ Load VCPU pointer to r0
- b __guest_exit
-
-hyp_dabt:
- push {r0, r1}
- mrs r0, ELR_hyp
- ldr r1, =abort_guest_exit_start
-THUMB( add r1, r1, #1)
- cmp r0, r1
- ldrne r1, =abort_guest_exit_end
-THUMB( addne r1, r1, #1)
- cmpne r0, r1
- pop {r0, r1}
- bne __hyp_panic
-
- orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT)
- eret
-
- .ltorg
-
- .popsection
diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c
deleted file mode 100644
index 5dfbea5adf65..000000000000
--- a/arch/arm/kvm/hyp/s2-setup.c
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2016 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/types.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_hyp.h>
-
-void __hyp_text __init_stage2_translation(void)
-{
- u64 val;
-
- val = read_sysreg(VTCR) & ~VTCR_MASK;
-
- val |= read_sysreg(HTCR) & VTCR_HTCR_SH;
- val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S;
-
- write_sysreg(val, VTCR);
-}
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
deleted file mode 100644
index 1efeef3fd0ee..000000000000
--- a/arch/arm/kvm/hyp/switch.c
+++ /dev/null
@@ -1,242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2015 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-#include <linux/jump_label.h>
-
-#include <asm/kvm_asm.h>
-#include <asm/kvm_hyp.h>
-#include <asm/kvm_mmu.h>
-
-__asm__(".arch_extension virt");
-
-/*
- * Activate the traps, saving the host's fpexc register before
- * overwriting it. We'll restore it on VM exit.
- */
-static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
-{
- u32 val;
-
- /*
- * We are about to set HCPTR.TCP10/11 to trap all floating point
- * register accesses to HYP, however, the ARM ARM clearly states that
- * traps are only taken to HYP if the operation would not otherwise
- * trap to SVC. Therefore, always make sure that for 32-bit guests,
- * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
- */
- val = read_sysreg(VFP_FPEXC);
- *fpexc_host = val;
- if (!(val & FPEXC_EN)) {
- write_sysreg(val | FPEXC_EN, VFP_FPEXC);
- isb();
- }
-
- write_sysreg(vcpu->arch.hcr, HCR);
- /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
- write_sysreg(HSTR_T(15), HSTR);
- write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
- val = read_sysreg(HDCR);
- val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
- val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
- write_sysreg(val, HDCR);
-}
-
-static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
-{
- u32 val;
-
- /*
- * If we pended a virtual abort, preserve it until it gets
- * cleared. See B1.9.9 (Virtual Abort exception) for details,
- * but the crucial bit is the zeroing of HCR.VA in the
- * pseudocode.
- */
- if (vcpu->arch.hcr & HCR_VA)
- vcpu->arch.hcr = read_sysreg(HCR);
-
- write_sysreg(0, HCR);
- write_sysreg(0, HSTR);
- val = read_sysreg(HDCR);
- write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
- write_sysreg(0, HCPTR);
-}
-
-static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
- write_sysreg(kvm_get_vttbr(kvm), VTTBR);
- write_sysreg(vcpu->arch.midr, VPIDR);
-}
-
-static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
-{
- write_sysreg(0, VTTBR);
- write_sysreg(read_sysreg(MIDR), VPIDR);
-}
-
-
-static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
-{
- if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
- __vgic_v3_save_state(vcpu);
- __vgic_v3_deactivate_traps(vcpu);
- }
-}
-
-static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
-{
- if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
- __vgic_v3_activate_traps(vcpu);
- __vgic_v3_restore_state(vcpu);
- }
-}
-
-static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
-{
- u32 hsr = read_sysreg(HSR);
- u8 ec = hsr >> HSR_EC_SHIFT;
- u32 hpfar, far;
-
- vcpu->arch.fault.hsr = hsr;
-
- if (ec == HSR_EC_IABT)
- far = read_sysreg(HIFAR);
- else if (ec == HSR_EC_DABT)
- far = read_sysreg(HDFAR);
- else
- return true;
-
- /*
- * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
- *
- * Abort on the stage 2 translation for a memory access from a
- * Non-secure PL1 or PL0 mode:
- *
- * For any Access flag fault or Translation fault, and also for any
- * Permission fault on the stage 2 translation of a memory access
- * made as part of a translation table walk for a stage 1 translation,
- * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
- * is UNKNOWN.
- */
- if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
- u64 par, tmp;
-
- par = read_sysreg(PAR);
- write_sysreg(far, ATS1CPR);
- isb();
-
- tmp = read_sysreg(PAR);
- write_sysreg(par, PAR);
-
- if (unlikely(tmp & 1))
- return false; /* Translation failed, back to guest */
-
- hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
- } else {
- hpfar = read_sysreg(HPFAR);
- }
-
- vcpu->arch.fault.hxfar = far;
- vcpu->arch.fault.hpfar = hpfar;
- return true;
-}
-
-int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpu_context *host_ctxt;
- struct kvm_cpu_context *guest_ctxt;
- bool fp_enabled;
- u64 exit_code;
- u32 fpexc;
-
- vcpu = kern_hyp_va(vcpu);
- write_sysreg(vcpu, HTPIDR);
-
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
- guest_ctxt = &vcpu->arch.ctxt;
-
- __sysreg_save_state(host_ctxt);
- __banked_save_state(host_ctxt);
-
- __activate_traps(vcpu, &fpexc);
- __activate_vm(vcpu);
-
- __vgic_restore_state(vcpu);
- __timer_enable_traps(vcpu);
-
- __sysreg_restore_state(guest_ctxt);
- __banked_restore_state(guest_ctxt);
-
- /* Jump in the fire! */
-again:
- exit_code = __guest_enter(vcpu, host_ctxt);
- /* And we're baaack! */
-
- if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
- goto again;
-
- fp_enabled = __vfp_enabled();
-
- __banked_save_state(guest_ctxt);
- __sysreg_save_state(guest_ctxt);
- __timer_disable_traps(vcpu);
-
- __vgic_save_state(vcpu);
-
- __deactivate_traps(vcpu);
- __deactivate_vm(vcpu);
-
- __banked_restore_state(host_ctxt);
- __sysreg_restore_state(host_ctxt);
-
- if (fp_enabled) {
- __vfp_save_state(&guest_ctxt->vfp);
- __vfp_restore_state(&host_ctxt->vfp);
- }
-
- write_sysreg(fpexc, VFP_FPEXC);
-
- return exit_code;
-}
-
-static const char * const __hyp_panic_string[] = {
- [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
- [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
- [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x",
- [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
- [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
- [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x",
- [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x",
- [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x",
-};
-
-void __hyp_text __noreturn __hyp_panic(int cause)
-{
- u32 elr = read_special(ELR_hyp);
- u32 val;
-
- if (cause == ARM_EXCEPTION_DATA_ABORT)
- val = read_sysreg(HDFAR);
- else
- val = read_special(SPSR);
-
- if (read_sysreg(VTTBR)) {
- struct kvm_vcpu *vcpu;
- struct kvm_cpu_context *host_ctxt;
-
- vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
- __timer_disable_traps(vcpu);
- __deactivate_traps(vcpu);
- __deactivate_vm(vcpu);
- __banked_restore_state(host_ctxt);
- __sysreg_restore_state(host_ctxt);
- }
-
- /* Call panic for real */
- __hyp_do_panic(__hyp_panic_string[cause], elr, val);
-
- unreachable();
-}
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
deleted file mode 100644
index 848f27bbad9d..000000000000
--- a/arch/arm/kvm/hyp/tlb.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Original code:
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <asm/kvm_hyp.h>
-#include <asm/kvm_mmu.h>
-
-/**
- * Flush per-VMID TLBs
- *
- * __kvm_tlb_flush_vmid(struct kvm *kvm);
- *
- * We rely on the hardware to broadcast the TLB invalidation to all CPUs
- * inside the inner-shareable domain (which is the case for all v7
- * implementations). If we come across a non-IS SMP implementation, we'll
- * have to use an IPI based mechanism. Until then, we stick to the simple
- * hardware assisted version.
- *
- * As v7 does not support flushing per IPA, just nuke the whole TLB
- * instead, ignoring the ipa value.
- */
-void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
-{
- dsb(ishst);
-
- /* Switch to requested VMID */
- kvm = kern_hyp_va(kvm);
- write_sysreg(kvm_get_vttbr(kvm), VTTBR);
- isb();
-
- write_sysreg(0, TLBIALLIS);
- dsb(ish);
- isb();
-
- write_sysreg(0, VTTBR);
-}
-
-void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
-{
- __kvm_tlb_flush_vmid(kvm);
-}
-
-void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
-
- /* Switch to requested VMID */
- write_sysreg(kvm_get_vttbr(kvm), VTTBR);
- isb();
-
- write_sysreg(0, TLBIALL);
- dsb(nsh);
- isb();
-
- write_sysreg(0, VTTBR);
-}
-
-void __hyp_text __kvm_flush_vm_context(void)
-{
- write_sysreg(0, TLBIALLNSNHIS);
- write_sysreg(0, ICIALLUIS);
- dsb(ish);
-}
diff --git a/arch/arm/kvm/hyp/vfp.S b/arch/arm/kvm/hyp/vfp.S
deleted file mode 100644
index 675a52348d8d..000000000000
--- a/arch/arm/kvm/hyp/vfp.S
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/vfpmacros.h>
-
- .text
- .pushsection .hyp.text, "ax"
-
-/* void __vfp_save_state(struct vfp_hard_struct *vfp); */
-ENTRY(__vfp_save_state)
- push {r4, r5}
- VFPFMRX r1, FPEXC
-
- @ Make sure *really* VFP is enabled so we can touch the registers.
- orr r5, r1, #FPEXC_EN
- tst r5, #FPEXC_EX @ Check for VFP Subarchitecture
- bic r5, r5, #FPEXC_EX @ FPEXC_EX disable
- VFPFMXR FPEXC, r5
- isb
-
- VFPFMRX r2, FPSCR
- beq 1f
-
- @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
- @ we only need to save them if FPEXC_EX is set.
- VFPFMRX r3, FPINST
- tst r5, #FPEXC_FP2V
- VFPFMRX r4, FPINST2, ne @ vmrsne
-1:
- VFPFSTMIA r0, r5 @ Save VFP registers
- stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2
- pop {r4, r5}
- bx lr
-ENDPROC(__vfp_save_state)
-
-/* void __vfp_restore_state(struct vfp_hard_struct *vfp);
- * Assume FPEXC_EN is on and FPEXC_EX is off */
-ENTRY(__vfp_restore_state)
- VFPFLDMIA r0, r1 @ Load VFP registers
- ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2
-
- VFPFMXR FPSCR, r1
- tst r0, #FPEXC_EX @ Check for VFP Subarchitecture
- beq 1f
- VFPFMXR FPINST, r2
- tst r0, #FPEXC_FP2V
- VFPFMXR FPINST2, r3, ne
-1:
- VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN)
- bx lr
-ENDPROC(__vfp_restore_state)
-
- .popsection
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
deleted file mode 100644
index 33e34b6d24b2..000000000000
--- a/arch/arm/kvm/init.S
+++ /dev/null
@@ -1,157 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/unified.h>
-#include <asm/asm-offsets.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/virt.h>
-
-/********************************************************************
- * Hypervisor initialization
- * - should be called with:
- * r0 = top of Hyp stack (kernel VA)
- * r1 = pointer to hyp vectors
- * r2,r3 = Hypervisor pgd pointer
- *
- * The init scenario is:
- * - We jump in HYP with 3 parameters: runtime HYP pgd, runtime stack,
- * runtime vectors
- * - Invalidate TLBs
- * - Set stack and vectors
- * - Setup the page tables
- * - Enable the MMU
- * - Profit! (or eret, if you only care about the code).
- *
- * Another possibility is to get a HYP stub hypercall.
- * We discriminate between the two by checking if r0 contains a value
- * that is less than HVC_STUB_HCALL_NR.
- */
-
- .text
- .pushsection .hyp.idmap.text,"ax"
- .align 5
-__kvm_hyp_init:
- .globl __kvm_hyp_init
-
- @ Hyp-mode exception vector
- W(b) .
- W(b) .
- W(b) .
- W(b) .
- W(b) .
- W(b) __do_hyp_init
- W(b) .
- W(b) .
-
-__do_hyp_init:
- @ Check for a stub hypercall
- cmp r0, #HVC_STUB_HCALL_NR
- blo __kvm_handle_stub_hvc
-
- @ Set stack pointer
- mov sp, r0
-
- @ Set HVBAR to point to the HYP vectors
- mcr p15, 4, r1, c12, c0, 0 @ HVBAR
-
- @ Set the HTTBR to point to the hypervisor PGD pointer passed
- mcrr p15, 4, rr_lo_hi(r2, r3), c2
-
- @ Set the HTCR and VTCR to the same shareability and cacheability
- @ settings as the non-secure TTBCR and with T0SZ == 0.
- mrc p15, 4, r0, c2, c0, 2 @ HTCR
- ldr r2, =HTCR_MASK
- bic r0, r0, r2
- mrc p15, 0, r1, c2, c0, 2 @ TTBCR
- and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
- orr r0, r0, r1
- mcr p15, 4, r0, c2, c0, 2 @ HTCR
-
- @ Use the same memory attributes for hyp. accesses as the kernel
- @ (copy MAIRx ro HMAIRx).
- mrc p15, 0, r0, c10, c2, 0
- mcr p15, 4, r0, c10, c2, 0
- mrc p15, 0, r0, c10, c2, 1
- mcr p15, 4, r0, c10, c2, 1
-
- @ Invalidate the stale TLBs from Bootloader
- mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
- dsb ish
-
- @ Set the HSCTLR to:
- @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
- @ - Endianness: Kernel config
- @ - Fast Interrupt Features: Kernel config
- @ - Write permission implies XN: disabled
- @ - Instruction cache: enabled
- @ - Data/Unified cache: enabled
- @ - MMU: enabled (this code must be run from an identity mapping)
- mrc p15, 4, r0, c1, c0, 0 @ HSCR
- ldr r2, =HSCTLR_MASK
- bic r0, r0, r2
- mrc p15, 0, r1, c1, c0, 0 @ SCTLR
- ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
- and r1, r1, r2
- ARM( ldr r2, =(HSCTLR_M) )
- THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
- orr r1, r1, r2
- orr r0, r0, r1
- mcr p15, 4, r0, c1, c0, 0 @ HSCR
- isb
-
- eret
-
-ENTRY(__kvm_handle_stub_hvc)
- cmp r0, #HVC_SOFT_RESTART
- bne 1f
-
- /* The target is expected in r1 */
- msr ELR_hyp, r1
- mrs r0, cpsr
- bic r0, r0, #MODE_MASK
- orr r0, r0, #HYP_MODE
-THUMB( orr r0, r0, #PSR_T_BIT )
- msr spsr_cxsf, r0
- b reset
-
-1: cmp r0, #HVC_RESET_VECTORS
- bne 1f
-
-reset:
- /* We're now in idmap, disable MMU */
- mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
- ldr r0, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
- bic r1, r1, r0
- mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
-
- /*
- * Install stub vectors, using ardb's VA->PA trick.
- */
-0: adr r0, 0b @ PA(0)
- movw r1, #:lower16:__hyp_stub_vectors - 0b @ VA(stub) - VA(0)
- movt r1, #:upper16:__hyp_stub_vectors - 0b
- add r1, r1, r0 @ PA(stub)
- mcr p15, 4, r1, c12, c0, 0 @ HVBAR
- b exit
-
-1: ldr r0, =HVC_STUB_ERR
- eret
-
-exit:
- mov r0, #0
- eret
-ENDPROC(__kvm_handle_stub_hvc)
-
- .ltorg
-
- .globl __kvm_hyp_init_end
-__kvm_hyp_init_end:
-
- .popsection
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
deleted file mode 100644
index 064f4f118ca7..000000000000
--- a/arch/arm/kvm/interrupts.S
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#include <linux/linkage.h>
-
- .text
-
-/********************************************************************
- * Call function in Hyp mode
- *
- *
- * unsigned long kvm_call_hyp(void *hypfn, ...);
- *
- * This is not really a variadic function in the classic C-way and care must
- * be taken when calling this to ensure parameters are passed in registers
- * only, since the stack will change between the caller and the callee.
- *
- * Call the function with the first argument containing a pointer to the
- * function you wish to call in Hyp mode, and subsequent arguments will be
- * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
- * function pointer can be passed). The function being called must be mapped
- * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
- * passed in r0 (strictly 32bit).
- *
- * The calling convention follows the standard AAPCS:
- * r0 - r3: caller save
- * r12: caller save
- * rest: callee save
- */
-ENTRY(__kvm_call_hyp)
- hvc #0
- bx lr
-ENDPROC(__kvm_call_hyp)
diff --git a/arch/arm/kvm/irq.h b/arch/arm/kvm/irq.h
deleted file mode 100644
index 0d257de42c10..000000000000
--- a/arch/arm/kvm/irq.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * irq.h: in kernel interrupt controller related definitions
- * Copyright (c) 2016 Red Hat, Inc.
- *
- * This header is included by irqchip.c. However, on ARM, interrupt
- * controller declarations are located in include/kvm/arm_vgic.h since
- * they are mostly shared between arm and arm64.
- */
-
-#ifndef __IRQ_H
-#define __IRQ_H
-
-#include <kvm/arm_vgic.h>
-
-#endif
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
deleted file mode 100644
index eb4174f6ebbd..000000000000
--- a/arch/arm/kvm/reset.c
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kvm_host.h>
-#include <linux/kvm.h>
-
-#include <asm/unified.h>
-#include <asm/ptrace.h>
-#include <asm/cputype.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_emulate.h>
-
-#include <kvm/arm_arch_timer.h>
-
-/******************************************************************************
- * Cortex-A15 and Cortex-A7 Reset Values
- */
-
-static struct kvm_regs cortexa_regs_reset = {
- .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
-};
-
-
-/*******************************************************************************
- * Exported reset function
- */
-
-/**
- * kvm_reset_vcpu - sets core registers and cp15 registers to reset value
- * @vcpu: The VCPU pointer
- *
- * This function finds the right table above and sets the registers on the
- * virtual CPU struct to their architecturally defined reset values.
- */
-int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
-{
- struct kvm_regs *reset_regs;
-
- switch (vcpu->arch.target) {
- case KVM_ARM_TARGET_CORTEX_A7:
- case KVM_ARM_TARGET_CORTEX_A15:
- reset_regs = &cortexa_regs_reset;
- vcpu->arch.midr = read_cpuid_id();
- break;
- default:
- return -ENODEV;
- }
-
- /* Reset core registers */
- memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs));
-
- /* Reset CP15 registers */
- kvm_reset_coprocs(vcpu);
-
- /*
- * Additional reset state handling that PSCI may have imposed on us.
- * Must be done after all the sys_reg reset.
- */
- if (READ_ONCE(vcpu->arch.reset_state.reset)) {
- unsigned long target_pc = vcpu->arch.reset_state.pc;
-
- /* Gracefully handle Thumb2 entry point */
- if (target_pc & 1) {
- target_pc &= ~1UL;
- vcpu_set_thumb(vcpu);
- }
-
- /* Propagate caller endianness */
- if (vcpu->arch.reset_state.be)
- kvm_vcpu_set_be(vcpu);
-
- *vcpu_pc(vcpu) = target_pc;
- vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
-
- vcpu->arch.reset_state.reset = false;
- }
-
- /* Reset arch_timer context */
- return kvm_timer_vcpu_reset(vcpu);
-}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
deleted file mode 100644
index 69a9d62a0ac6..000000000000
--- a/arch/arm/kvm/trace.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_ARM_KVM_H
-
-#include <linux/tracepoint.h>
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM kvm
-
-/* Architecturally implementation defined CP15 register access */
-TRACE_EVENT(kvm_emulate_cp15_imp,
- TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
- unsigned long CRm, unsigned long Op2, bool is_write),
- TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
-
- TP_STRUCT__entry(
- __field( unsigned int, Op1 )
- __field( unsigned int, Rt1 )
- __field( unsigned int, CRn )
- __field( unsigned int, CRm )
- __field( unsigned int, Op2 )
- __field( bool, is_write )
- ),
-
- TP_fast_assign(
- __entry->is_write = is_write;
- __entry->Op1 = Op1;
- __entry->Rt1 = Rt1;
- __entry->CRn = CRn;
- __entry->CRm = CRm;
- __entry->Op2 = Op2;
- ),
-
- TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
- (__entry->is_write) ? "mcr" : "mrc",
- __entry->Op1, __entry->Rt1, __entry->CRn,
- __entry->CRm, __entry->Op2)
-);
-
-TRACE_EVENT(kvm_wfx,
- TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
- TP_ARGS(vcpu_pc, is_wfe),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( bool, is_wfe )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->is_wfe = is_wfe;
- ),
-
- TP_printk("guest executed wf%c at: 0x%08lx",
- __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_hvc,
- TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
- TP_ARGS(vcpu_pc, r0, imm),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( unsigned long, r0 )
- __field( unsigned long, imm )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->r0 = r0;
- __entry->imm = imm;
- ),
-
- TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
- __entry->vcpu_pc, __entry->r0, __entry->imm)
-);
-
-#endif /* _TRACE_ARM_KVM_H */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/arch/arm/kvm/vgic-v3-coproc.c b/arch/arm/kvm/vgic-v3-coproc.c
deleted file mode 100644
index ed3b2e4759ce..000000000000
--- a/arch/arm/kvm/vgic-v3-coproc.c
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * VGIC system registers handling functions for AArch32 mode
- */
-
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <asm/kvm_emulate.h>
-#include "vgic.h"
-
-int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
- u64 *reg)
-{
- /*
- * TODO: Implement for AArch32
- */
- return -ENXIO;
-}
-
-int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
- u64 *reg)
-{
- /*
- * TODO: Implement for AArch32
- */
- return -ENXIO;
-}
diff --git a/arch/arm/mach-at91/.gitignore b/arch/arm/mach-at91/.gitignore
index 2ecd6f51c8a9..f6d47389675e 100644
--- a/arch/arm/mach-at91/.gitignore
+++ b/arch/arm/mach-at91/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
pm_data-offsets.h
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 7979420bd48b..ccd7e80ce943 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -153,7 +153,6 @@ config HAVE_AT91_USB_CLK
config COMMON_CLK_AT91
bool
- select COMMON_CLK
select MFD_SYSCON
config HAVE_AT91_SMD
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 374b9d155558..074bde64064e 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -736,13 +736,36 @@ backup_default:
struct pmc_info {
unsigned long uhp_udp_mask;
+ unsigned long mckr;
+ unsigned long version;
};
static const struct pmc_info pmc_infos[] __initconst = {
- { .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP },
- { .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP },
- { .uhp_udp_mask = AT91SAM926x_PMC_UHP },
- { .uhp_udp_mask = 0 },
+ {
+ .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
+ .mckr = 0x30,
+ .version = AT91_PMC_V1,
+ },
+
+ {
+ .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
+ .mckr = 0x30,
+ .version = AT91_PMC_V1,
+ },
+ {
+ .uhp_udp_mask = AT91SAM926x_PMC_UHP,
+ .mckr = 0x30,
+ .version = AT91_PMC_V1,
+ },
+ { .uhp_udp_mask = 0,
+ .mckr = 0x30,
+ .version = AT91_PMC_V1,
+ },
+ {
+ .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
+ .mckr = 0x28,
+ .version = AT91_PMC_V2,
+ },
};
static const struct of_device_id atmel_pmc_ids[] __initconst = {
@@ -757,7 +780,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
- { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[1] },
+ { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
{ /* sentinel */ },
};
@@ -779,6 +802,8 @@ static void __init at91_pm_init(void (*pm_idle)(void))
pmc = of_id->data;
soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
+ soc_pm.data.pmc_mckr_offset = pmc->mckr;
+ soc_pm.data.pmc_version = pmc->version;
if (pm_idle)
arm_pm_idle = pm_idle;
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 9fa4f483f2b5..218e8d1a30fb 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -33,6 +33,8 @@ struct at91_pm_data {
void __iomem *sfrbu;
unsigned int standby_mode;
unsigned int suspend_mode;
+ unsigned int pmc_mckr_offset;
+ unsigned int pmc_version;
};
#endif
diff --git a/arch/arm/mach-at91/pm_data-offsets.c b/arch/arm/mach-at91/pm_data-offsets.c
index f2d893c03cd9..82089ff258c0 100644
--- a/arch/arm/mach-at91/pm_data-offsets.c
+++ b/arch/arm/mach-at91/pm_data-offsets.c
@@ -12,6 +12,10 @@ int main(void)
DEFINE(PM_DATA_MODE, offsetof(struct at91_pm_data, mode));
DEFINE(PM_DATA_SHDWC, offsetof(struct at91_pm_data, shdwc));
DEFINE(PM_DATA_SFRBU, offsetof(struct at91_pm_data, sfrbu));
+ DEFINE(PM_DATA_PMC_MCKR_OFFSET, offsetof(struct at91_pm_data,
+ pmc_mckr_offset));
+ DEFINE(PM_DATA_PMC_VERSION, offsetof(struct at91_pm_data,
+ pmc_version));
return 0;
}
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index ed57c879d4e1..be9764e8d3fa 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -18,6 +18,7 @@
pmc .req r0
tmp1 .req r4
tmp2 .req r5
+tmp3 .req r6
/*
* Wait until master clock is ready (after switching master clock source)
@@ -93,13 +94,17 @@ ENTRY(at91_pm_suspend_in_sram)
str tmp1, .memtype
ldr tmp1, [r0, #PM_DATA_MODE]
str tmp1, .pm_mode
+ ldr tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET]
+ str tmp1, .mckr_offset
+ ldr tmp1, [r0, #PM_DATA_PMC_VERSION]
+ str tmp1, .pmc_version
/* Both ldrne below are here to preload their address in the TLB */
ldr tmp1, [r0, #PM_DATA_SHDWC]
str tmp1, .shdwc
cmp tmp1, #0
ldrne tmp2, [tmp1, #0]
ldr tmp1, [r0, #PM_DATA_SFRBU]
- str tmp1, .sfr
+ str tmp1, .sfrbu
cmp tmp1, #0
ldrne tmp2, [tmp1, #0x10]
@@ -138,14 +143,15 @@ ENDPROC(at91_pm_suspend_in_sram)
ENTRY(at91_backup_mode)
/* Switch the master clock source to slow clock. */
ldr pmc, .pmc_base
- ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ ldr tmp2, .mckr_offset
+ ldr tmp1, [pmc, tmp2]
bic tmp1, tmp1, #AT91_PMC_CSS
- str tmp1, [pmc, #AT91_PMC_MCKR]
+ str tmp1, [pmc, tmp2]
wait_mckrdy
/*BUMEN*/
- ldr r0, .sfr
+ ldr r0, .sfrbu
mov tmp1, #0x1
str tmp1, [r0, #0x10]
@@ -218,6 +224,7 @@ ENDPROC(at91_backup_mode)
*/
.macro at91_pm_ulp1_mode
ldr pmc, .pmc_base
+ ldr tmp2, .mckr_offset
/* Save RC oscillator state and check if it is enabled. */
ldr tmp1, [pmc, #AT91_PMC_SR]
@@ -254,10 +261,10 @@ ENDPROC(at91_backup_mode)
str tmp1, [pmc, #AT91_CKGR_MOR]
/* Switch the master clock source to main clock */
- ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ ldr tmp1, [pmc, tmp2]
bic tmp1, tmp1, #AT91_PMC_CSS
orr tmp1, tmp1, #AT91_PMC_CSS_MAIN
- str tmp1, [pmc, #AT91_PMC_MCKR]
+ str tmp1, [pmc, tmp2]
wait_mckrdy
@@ -268,6 +275,10 @@ ENDPROC(at91_backup_mode)
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
+ /* Quirk for SAM9X60's PMC */
+ nop
+ nop
+
wait_mckrdy
/* Enable the crystal oscillator */
@@ -280,9 +291,9 @@ ENDPROC(at91_backup_mode)
wait_moscrdy
/* Switch the master clock source to slow clock */
- ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ ldr tmp1, [pmc, tmp2]
bic tmp1, tmp1, #AT91_PMC_CSS
- str tmp1, [pmc, #AT91_PMC_MCKR]
+ str tmp1, [pmc, tmp2]
wait_mckrdy
@@ -296,10 +307,10 @@ ENDPROC(at91_backup_mode)
wait_moscsels
/* Switch the master clock source to main clock */
- ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ ldr tmp1, [pmc, tmp2]
bic tmp1, tmp1, #AT91_PMC_CSS
orr tmp1, tmp1, #AT91_PMC_CSS_MAIN
- str tmp1, [pmc, #AT91_PMC_MCKR]
+ str tmp1, [pmc, tmp2]
wait_mckrdy
@@ -323,21 +334,160 @@ ENDPROC(at91_backup_mode)
3:
.endm
+.macro at91_plla_disable
+ /* Save PLLA setting and disable it */
+ ldr tmp1, .pmc_version
+ cmp tmp1, #AT91_PMC_V1
+ beq 1f
+
+#ifdef CONFIG_SOC_SAM9X60
+ /* Save PLLA settings. */
+ ldr tmp2, [pmc, #AT91_PMC_PLL_UPDT]
+ bic tmp2, tmp2, #AT91_PMC_PLL_UPDT_ID
+ str tmp2, [pmc, #AT91_PMC_PLL_UPDT]
+
+ /* save div. */
+ mov tmp1, #0
+ ldr tmp2, [pmc, #AT91_PMC_PLL_CTRL0]
+ bic tmp2, tmp2, #0xffffff00
+ orr tmp1, tmp1, tmp2
+
+ /* save mul. */
+ ldr tmp2, [pmc, #AT91_PMC_PLL_CTRL1]
+ bic tmp2, tmp2, #0xffffff
+ orr tmp1, tmp1, tmp2
+ str tmp1, .saved_pllar
+
+ /* step 2. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+ bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE
+ bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID
+ str tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+
+ /* step 3. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_CTRL0]
+ bic tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLLCK
+ orr tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLL
+ str tmp1, [pmc, #AT91_PMC_PLL_CTRL0]
+
+ /* step 4. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+ orr tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE
+ bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID
+ str tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+
+ /* step 5. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_CTRL0]
+ bic tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLL
+ str tmp1, [pmc, #AT91_PMC_PLL_CTRL0]
+
+ /* step 7. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+ orr tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE
+ bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID
+ str tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+
+ b 2f
+#endif
+
+1: /* Save PLLA setting and disable it */
+ ldr tmp1, [pmc, #AT91_CKGR_PLLAR]
+ str tmp1, .saved_pllar
+
+ /* Disable PLLA. */
+ mov tmp1, #AT91_PMC_PLLCOUNT
+ orr tmp1, tmp1, #(1 << 29) /* bit 29 always set */
+ str tmp1, [pmc, #AT91_CKGR_PLLAR]
+2:
+.endm
+
+.macro at91_plla_enable
+ ldr tmp2, .saved_pllar
+ ldr tmp3, .pmc_version
+ cmp tmp3, #AT91_PMC_V1
+ beq 4f
+
+#ifdef CONFIG_SOC_SAM9X60
+ /* step 1. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+ bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID
+ bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE
+ str tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+
+ /* step 2. */
+ ldr tmp1, =#AT91_PMC_PLL_ACR_DEFAULT_PLLA
+ str tmp1, [pmc, #AT91_PMC_PLL_ACR]
+
+ /* step 3. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_CTRL1]
+ mov tmp3, tmp2
+ bic tmp3, tmp3, #0xffffff
+ orr tmp1, tmp1, tmp3
+ str tmp1, [pmc, #AT91_PMC_PLL_CTRL1]
+
+ /* step 8. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+ bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID
+ orr tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE
+ str tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+
+ /* step 9. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_CTRL0]
+ orr tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENLOCK
+ orr tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLL
+ orr tmp1, tmp1, #AT91_PMC_PLL_CTRL0_ENPLLCK
+ bic tmp1, tmp1, #0xff
+ mov tmp3, tmp2
+ bic tmp3, tmp3, #0xffffff00
+ orr tmp1, tmp1, tmp3
+ str tmp1, [pmc, #AT91_PMC_PLL_CTRL0]
+
+ /* step 10. */
+ ldr tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+ orr tmp1, tmp1, #AT91_PMC_PLL_UPDT_UPDATE
+ bic tmp1, tmp1, #AT91_PMC_PLL_UPDT_ID
+ str tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+
+ /* step 11. */
+3: ldr tmp1, [pmc, #AT91_PMC_PLL_ISR0]
+ tst tmp1, #0x1
+ beq 3b
+ b 2f
+#endif
+
+ /* Restore PLLA setting */
+4: str tmp2, [pmc, #AT91_CKGR_PLLAR]
+
+ /* Enable PLLA. */
+ tst tmp2, #(AT91_PMC_MUL & 0xff0000)
+ bne 1f
+ tst tmp2, #(AT91_PMC_MUL & ~0xff0000)
+ beq 2f
+
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_LOCKA
+ beq 1b
+2:
+.endm
+
ENTRY(at91_ulp_mode)
ldr pmc, .pmc_base
+ ldr tmp2, .mckr_offset
/* Save Master clock setting */
- ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ ldr tmp1, [pmc, tmp2]
str tmp1, .saved_mckr
/*
* Set the Master clock source to slow clock
*/
bic tmp1, tmp1, #AT91_PMC_CSS
- str tmp1, [pmc, #AT91_PMC_MCKR]
+ str tmp1, [pmc, tmp2]
wait_mckrdy
+ at91_plla_disable
+
ldr r0, .pm_mode
cmp r0, #AT91_PM_ULP1
beq ulp1_mode
@@ -352,11 +502,14 @@ ulp1_mode:
ulp_exit:
ldr pmc, .pmc_base
+ at91_plla_enable
+
/*
* Restore master clock setting
*/
- ldr tmp1, .saved_mckr
- str tmp1, [pmc, #AT91_PMC_MCKR]
+ ldr tmp1, .mckr_offset
+ ldr tmp2, .saved_mckr
+ str tmp2, [pmc, tmp1]
wait_mckrdy
@@ -496,14 +649,20 @@ ENDPROC(at91_sramc_self_refresh)
.word 0
.shdwc:
.word 0
-.sfr:
+.sfrbu:
.word 0
.memtype:
.word 0
.pm_mode:
.word 0
+.mckr_offset:
+ .word 0
+.pmc_version:
+ .word 0
.saved_mckr:
.word 0
+.saved_pllar:
+ .word 0
.saved_sam9_lpr:
.word 0
.saved_sam9_lpr1:
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index fcfe2a0e8058..6aa938b949db 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -20,7 +20,6 @@ config ARCH_BCM_IPROC
select GPIOLIB
select ARM_AMBA
select PINCTRL
- select PCI_DOMAINS_GENERIC if PCI
help
This enables support for systems based on Broadcom IPROC architected SoCs.
The IPROC complex contains one or more ARM CPUs along with common
@@ -54,7 +53,6 @@ config ARCH_BCM_NSP
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select ARM_ERRATA_764369 if SMP
- select HAVE_SMP
select THERMAL
select THERMAL_OF
help
@@ -73,7 +71,6 @@ config ARCH_BCM_5301X
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select ARM_ERRATA_764369 if SMP
- select HAVE_SMP
help
Support for Broadcom BCM470X and BCM5301X SoCs with ARM CPU cores.
@@ -109,7 +106,6 @@ config ARCH_BCM_281XX
bool "Broadcom BCM281XX SoC family"
depends on ARCH_MULTI_V7
select ARCH_BCM_MOBILE
- select HAVE_SMP
help
Enable support for the BCM281XX family, which includes
BCM11130, BCM11140, BCM11351, BCM28145 and BCM28155
@@ -119,7 +115,6 @@ config ARCH_BCM_21664
bool "Broadcom BCM21664 SoC family"
depends on ARCH_MULTI_V7
select ARCH_BCM_MOBILE
- select HAVE_SMP
help
Enable support for the BCM21664 family, which includes
BCM21663 and BCM21664 variants.
@@ -128,7 +123,6 @@ config ARCH_BCM_23550
bool "Broadcom BCM23550 SoC"
depends on ARCH_MULTI_V7
select ARCH_BCM_MOBILE
- select HAVE_SMP
help
Enable support for the BCM23550.
@@ -165,7 +159,6 @@ config ARCH_BCM2835
select ZONE_DMA if ARCH_MULTI_V7
select ARM_TIMER_SP804
select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
- select TIMER_OF
select BCM2835_TIMER
select PINCTRL
select PINCTRL_BCM2835
@@ -201,7 +194,6 @@ config ARCH_BCM_63XX
select HAVE_ARM_ARCH_TIMER
select HAVE_ARM_TWD if SMP
select HAVE_ARM_SCU if SMP
- select HAVE_SMP
help
This enables support for systems based on Broadcom DSL SoCs.
It currently supports the 'BCM63XX' ARM-based family, which includes
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 1d61a7701c11..e4f4b20b83a2 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -189,12 +189,6 @@ static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction cns3xxx_timer_irq = {
- .name = "timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = cns3xxx_timer_interrupt,
-};
-
/*
* Set up the clock source and clock events devices
*/
@@ -245,7 +239,9 @@ static void __init __cns3xxx_timer_init(unsigned int timer_irq)
writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
/* Make irqs happen for the system timer */
- setup_irq(timer_irq, &cns3xxx_timer_irq);
+ if (request_irq(timer_irq, cns3xxx_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "timer", NULL))
+ pr_err("Failed to request irq %d (timer)\n", timer_irq);
cns3xxx_clockevents_init(timer_irq);
}
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 01b830afcea9..dbe970e37895 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -48,7 +48,7 @@
/*****************************************************************************
* I/O Address Mapping
****************************************************************************/
-static struct map_desc dove_io_desc[] __initdata = {
+static struct map_desc __maybe_unused dove_io_desc[] __initdata = {
{
.virtual = (unsigned long) DOVE_SB_REGS_VIRT_BASE,
.pfn = __phys_to_pfn(DOVE_SB_REGS_PHYS_BASE),
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index da2ff4f61d6b..575b2e2b6759 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -201,17 +201,13 @@ ebsa110_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction ebsa110_timer_irq = {
- .name = "EBSA110 Timer Tick",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = ebsa110_timer_interrupt,
-};
-
/*
* Set up timer interrupt.
*/
void __init ebsa110_timer_init(void)
{
+ int irq = IRQ_EBSA110_TIMER0;
+
arch_gettimeoffset = ebsa110_gettimeoffset;
/*
@@ -221,7 +217,9 @@ void __init ebsa110_timer_init(void)
__raw_writeb(COUNT & 0xff, PIT_T1);
__raw_writeb(COUNT >> 8, PIT_T1);
- setup_irq(IRQ_EBSA110_TIMER0, &ebsa110_timer_irq);
+ if (request_irq(irq, ebsa110_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "EBSA110 Timer Tick", NULL))
+ pr_err("Failed to request irq %d (EBSA110 Timer Tick)\n", irq);
}
static struct plat_serial8250_port serial_platform_data[] = {
diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c
index de998830f534..dd4b164d1831 100644
--- a/arch/arm/mach-ep93xx/timer-ep93xx.c
+++ b/arch/arm/mach-ep93xx/timer-ep93xx.c
@@ -117,15 +117,11 @@ static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction ep93xx_timer_irq = {
- .name = "ep93xx timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = ep93xx_timer_interrupt,
- .dev_id = &ep93xx_clockevent,
-};
-
void __init ep93xx_timer_init(void)
{
+ int irq = IRQ_EP93XX_TIMER3;
+ unsigned long flags = IRQF_TIMER | IRQF_IRQPOLL;
+
/* Enable and register clocksource and sched_clock on timer 4 */
writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE,
EP93XX_TIMER4_VALUE_HIGH);
@@ -136,7 +132,9 @@ void __init ep93xx_timer_init(void)
EP93XX_TIMER4_RATE);
/* Set up clockevent on timer 3 */
- setup_irq(IRQ_EP93XX_TIMER3, &ep93xx_timer_irq);
+ if (request_irq(irq, ep93xx_timer_interrupt, flags, "ep93xx timer",
+ &ep93xx_clockevent))
+ pr_err("Failed to request irq %d (ep93xx timer)\n", irq);
clockevents_config_and_register(&ep93xx_clockevent,
EP93XX_TIMER123_RATE,
1,
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index cbbe03e96de8..76838255b5fa 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -21,7 +21,7 @@ menuconfig ARCH_EXYNOS
select EXYNOS_SROM
select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
select GPIOLIB
- select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5 && VIRTUALIZATION
+ select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5
select HAVE_ARM_SCU if SMP
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index f76212d2dbf1..2908c9ef3c9b 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -101,13 +101,6 @@ static irqreturn_t timer1_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction footbridge_timer_irq = {
- .name = "dc21285_timer1",
- .handler = timer1_interrupt,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .dev_id = &ckevt_dc21285,
-};
-
/*
* Set up timer interrupt.
*/
@@ -118,7 +111,9 @@ void __init footbridge_timer_init(void)
clocksource_register_hz(&cksrc_dc21285, rate);
- setup_irq(ce->irq, &footbridge_timer_irq);
+ if (request_irq(ce->irq, timer1_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "dc21285_timer1", &ckevt_dc21285))
+ pr_err("Failed to request irq %d (dc21285_timer1)", ce->irq);
ce->cpumask = cpumask_of(smp_processor_id());
clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index 88a553932c33..842ddb4121ef 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -96,11 +96,6 @@ static void isa_irq_handler(struct irq_desc *desc)
generic_handle_irq(isa_irq);
}
-static struct irqaction irq_cascade = {
- .handler = no_action,
- .name = "cascade",
-};
-
static struct resource pic1_resource = {
.name = "pic1",
.start = 0x20,
@@ -160,7 +155,10 @@ void __init isa_init_irq(unsigned int host_irq)
request_resource(&ioport_resource, &pic1_resource);
request_resource(&ioport_resource, &pic2_resource);
- setup_irq(IRQ_ISA_CASCADE, &irq_cascade);
+
+ irq = IRQ_ISA_CASCADE;
+ if (request_irq(irq, no_action, 0, "cascade", NULL))
+ pr_err("Failed to request irq %u (cascade)\n", irq);
irq_set_chained_handler(host_irq, isa_irq_handler);
diff --git a/arch/arm/mach-footbridge/isa-timer.c b/arch/arm/mach-footbridge/isa-timer.c
index 82f45591fb2c..723e3eae995d 100644
--- a/arch/arm/mach-footbridge/isa-timer.c
+++ b/arch/arm/mach-footbridge/isa-timer.c
@@ -25,17 +25,12 @@ static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction pit_timer_irq = {
- .name = "pit",
- .handler = pit_timer_interrupt,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .dev_id = &i8253_clockevent,
-};
-
void __init isa_timer_init(void)
{
clocksource_i8253_init();
- setup_irq(i8253_clockevent.irq, &pit_timer_irq);
+ if (request_irq(i8253_clockevent.irq, pit_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "pit", &i8253_clockevent))
+ pr_err("Failed to request irq %d(pit)\n", i8253_clockevent.irq);
clockevent_i8253_init(false);
}
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 95584ee02b55..e7d7b90e2cf8 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -471,8 +471,6 @@ config SOC_IMX53
config SOC_IMX6
bool
select ARM_CPU_SUSPEND if (PM || CPU_IDLE)
- select ARM_ERRATA_754322
- select ARM_ERRATA_775420
select ARM_GIC
select HAVE_IMX_ANATOP
select HAVE_IMX_GPC
@@ -484,6 +482,8 @@ config SOC_IMX6
config SOC_IMX6Q
bool "i.MX6 Quad/DualLite support"
select ARM_ERRATA_764369 if SMP
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_775420
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD
select PINCTRL_IMX6Q
@@ -494,6 +494,8 @@ config SOC_IMX6Q
config SOC_IMX6SL
bool "i.MX6 SoloLite support"
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_775420
select PINCTRL_IMX6SL
select SOC_IMX6
@@ -502,6 +504,8 @@ config SOC_IMX6SL
config SOC_IMX6SLL
bool "i.MX6 SoloLiteLite support"
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_775420
select PINCTRL_IMX6SLL
select SOC_IMX6
@@ -510,6 +514,8 @@ config SOC_IMX6SLL
config SOC_IMX6SX
bool "i.MX6 SoloX support"
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_775420
select PINCTRL_IMX6SX
select SOC_IMX6
diff --git a/arch/arm/mach-imx/anatop.c b/arch/arm/mach-imx/anatop.c
index 8fb68c0ec34c..d841bed8664d 100644
--- a/arch/arm/mach-imx/anatop.c
+++ b/arch/arm/mach-imx/anatop.c
@@ -89,12 +89,11 @@ void imx_anatop_post_resume(void)
if (cpu_is_imx6sl())
imx_anatop_disconnect_high_snvs(false);
-
}
void __init imx_init_revision_from_anatop(void)
{
- struct device_node *np;
+ struct device_node *np, *src_np;
void __iomem *anatop_base;
unsigned int revision;
u32 digprog;
@@ -135,9 +134,10 @@ void __init imx_init_revision_from_anatop(void)
void __iomem *src_base;
u32 sbmr2;
- np = of_find_compatible_node(NULL, NULL,
+ src_np = of_find_compatible_node(NULL, NULL,
"fsl,imx6ul-src");
src_base = of_iomap(np, 0);
+ of_node_put(src_np);
WARN_ON(!src_base);
sbmr2 = readl_relaxed(src_base + SRC_SBMR2);
iounmap(src_base);
@@ -149,6 +149,7 @@ void __init imx_init_revision_from_anatop(void)
}
}
}
+ of_node_put(np);
mxc_set_cpu_type(digprog >> 16 & 0xff);
imx_set_soc_revision(revision);
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index b5b557fe2c49..ebc4339b8be4 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -10,7 +10,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/irqchip/arm-gic.h>
+
#include "common.h"
#include "hardware.h"
@@ -111,7 +111,6 @@ void imx_gpc_mask_all(void)
gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
writel_relaxed(~0, reg_imr1 + i * 4);
}
-
}
void imx_gpc_restore_all(void)
@@ -282,4 +281,5 @@ void __init imx_gpc_check_dt(void)
/* map GPC, so that at least CPUidle and WARs keep working */
gpc_base = of_iomap(np, 0);
}
+ of_node_put(np);
}
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index edd26e0ffeec..284bce1112d2 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -5,29 +5,16 @@
*/
#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/cpu.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/irq.h>
#include <linux/irqchip.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/pm_opp.h>
#include <linux/pci.h>
#include <linux/phy.h>
-#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/micrel_phy.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/system_misc.h>
#include "common.h"
#include "cpuidle.h"
diff --git a/arch/arm/mach-imx/mach-imx6sl.c b/arch/arm/mach-imx/mach-imx6sl.c
index e00818abe54d..e27a6889cc56 100644
--- a/arch/arm/mach-imx/mach-imx6sl.c
+++ b/arch/arm/mach-imx/mach-imx6sl.c
@@ -4,7 +4,6 @@
*/
#include <linux/irqchip.h>
-#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index 311f5e4ff723..3b0e16ccd59d 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -25,7 +25,6 @@ static void __init imx6ul_enet_clk_init(void)
IMX6UL_GPR1_ENET_CLK_OUTPUT);
else
pr_err("failed to find fsl,imx6ul-iomux-gpr regmap\n");
-
}
static int ksz8081_phy_fixup(struct phy_device *dev)
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c
index 2aa26928221d..cf4e9335831c 100644
--- a/arch/arm/mach-imx/platsmp.c
+++ b/arch/arm/mach-imx/platsmp.c
@@ -109,6 +109,7 @@ static void __init ls1021a_smp_prepare_cpus(unsigned int max_cpus)
np = of_find_compatible_node(NULL, NULL, "fsl,ls1021a-dcfg");
dcfg_base = of_iomap(np, 0);
+ of_node_put(np);
BUG_ON(!dcfg_base);
paddr = __pa_symbol(secondary_startup);
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 1c0ecad3620e..dd34dff13762 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -655,6 +655,8 @@ void __init imx6_pm_ccm_init(const char *ccm_compat)
if (of_property_read_bool(np, "fsl,pmic-stby-poweroff"))
imx6_pm_stby_poweroff_probe();
+
+ of_node_put(np);
}
void __init imx6q_pm_init(void)
diff --git a/arch/arm/mach-imx/pm-imx7ulp.c b/arch/arm/mach-imx/pm-imx7ulp.c
index 7b2f7387e662..2e756d8191fa 100644
--- a/arch/arm/mach-imx/pm-imx7ulp.c
+++ b/arch/arm/mach-imx/pm-imx7ulp.c
@@ -62,6 +62,7 @@ void __init imx7ulp_pm_init(void)
np = of_find_compatible_node(NULL, NULL, "fsl,imx7ulp-smc1");
smc1_base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!smc1_base);
imx7ulp_set_lpm(ULP_PM_RUN);
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 0beea6d0217f..f52f371292ac 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -43,9 +43,6 @@ static int imx_src_reset_module(struct reset_controller_dev *rcdev,
int bit;
u32 val;
- if (!src_base)
- return -ENODEV;
-
if (sw_reset_idx >= ARRAY_SIZE(sw_reset_bits))
return -EINVAL;
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 1ecbea5331d6..6f875ded8419 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -410,13 +410,10 @@ static int __ref impd1_probe(struct lm_device *dev)
* 5 = Key lower right
*/
/* We need the two MMCI GPIO entries */
- lookup->table[0].chip_label = chipname;
- lookup->table[0].chip_hwnum = 3;
- lookup->table[0].con_id = "wp";
- lookup->table[1].chip_label = chipname;
- lookup->table[1].chip_hwnum = 4;
- lookup->table[1].con_id = "cd";
- lookup->table[1].flags = GPIO_ACTIVE_LOW;
+ lookup->table[0] = (struct gpiod_lookup)
+ GPIO_LOOKUP(chipname, 3, "wp", 0);
+ lookup->table[1] = (struct gpiod_lookup)
+ GPIO_LOOKUP(chipname, 4, "cd", GPIO_ACTIVE_LOW);
gpiod_add_lookup_table(lookup);
}
diff --git a/arch/arm/mach-iop32x/time.c b/arch/arm/mach-iop32x/time.c
index 18a4df5c1baa..ae533b66fefd 100644
--- a/arch/arm/mach-iop32x/time.c
+++ b/arch/arm/mach-iop32x/time.c
@@ -137,13 +137,6 @@ iop_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction iop_timer_irq = {
- .name = "IOP Timer Tick",
- .handler = iop_timer_interrupt,
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .dev_id = &iop_clockevent,
-};
-
static unsigned long iop_tick_rate;
unsigned long get_iop_tick_rate(void)
{
@@ -154,6 +147,7 @@ EXPORT_SYMBOL(get_iop_tick_rate);
void __init iop_init_time(unsigned long tick_rate)
{
u32 timer_ctl;
+ int irq = IRQ_IOP32X_TIMER0;
sched_clock_register(iop_read_sched_clock, 32, tick_rate);
@@ -168,7 +162,9 @@ void __init iop_init_time(unsigned long tick_rate)
*/
write_tmr0(timer_ctl & ~IOP_TMR_EN);
write_tisr(1);
- setup_irq(IRQ_IOP32X_TIMER0, &iop_timer_irq);
+ if (request_irq(irq, iop_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "IOP Timer Tick", &iop_clockevent))
+ pr_err("Failed to request irq() %d (IOP Timer Tick)\n", irq);
iop_clockevent.cpumask = cpumask_of(0);
clockevents_config_and_register(&iop_clockevent, tick_rate,
0xf, 0xfffffffe);
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index c65cfc1ad99b..049a65f47b42 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -175,13 +175,6 @@ static void __init timer_config(void)
__raw_writel(0x2, mmp_timer_base + TMR_CER);
}
-static struct irqaction timer_irq = {
- .name = "timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = timer_interrupt,
- .dev_id = &ckevt,
-};
-
void __init mmp_timer_init(int irq, unsigned long rate)
{
timer_config();
@@ -190,7 +183,9 @@ void __init mmp_timer_init(int irq, unsigned long rate)
ckevt.cpumask = cpumask_of(0);
- setup_irq(irq, &timer_irq);
+ if (request_irq(irq, timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "timer", &ckevt))
+ pr_err("Failed to request irq %d (timer)\n", irq);
clocksource_register_hz(&cksrc, rate);
clockevents_config_and_register(&ckevt, rate, MIN_DELTA, MAX_DELTA);
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index a2aa7a12b374..8d32894ecd2e 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -17,6 +17,8 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/leds.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/fixed.h>
@@ -294,9 +296,42 @@ struct modem_private_data {
static struct modem_private_data modem_priv;
+/*
+ * Define partitions for flash device
+ */
+
+static struct mtd_partition partition_info[] = {
+ { .name = "Kernel",
+ .offset = 0,
+ .size = 3 * SZ_1M + SZ_512K },
+ { .name = "u-boot",
+ .offset = 3 * SZ_1M + SZ_512K,
+ .size = SZ_256K },
+ { .name = "u-boot params",
+ .offset = 3 * SZ_1M + SZ_512K + SZ_256K,
+ .size = SZ_256K },
+ { .name = "Amstrad LDR",
+ .offset = 4 * SZ_1M,
+ .size = SZ_256K },
+ { .name = "File system",
+ .offset = 4 * SZ_1M + 1 * SZ_256K,
+ .size = 27 * SZ_1M },
+ { .name = "PBL reserved",
+ .offset = 32 * SZ_1M - 3 * SZ_256K,
+ .size = 3 * SZ_256K },
+};
+
+static struct gpio_nand_platdata nand_platdata = {
+ .parts = partition_info,
+ .num_parts = ARRAY_SIZE(partition_info),
+};
+
static struct platform_device ams_delta_nand_device = {
.name = "ams-delta-nand",
.id = -1,
+ .dev = {
+ .platform_data = &nand_platdata,
+ },
};
#define OMAP_GPIO_LABEL "gpio-0-15"
@@ -306,10 +341,14 @@ static struct gpiod_lookup_table ams_delta_nand_gpio_table = {
.table = {
GPIO_LOOKUP(OMAP_GPIO_LABEL, AMS_DELTA_GPIO_PIN_NAND_RB, "rdy",
0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NCE, "nce", 0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NRE, "nre", 0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWP, "nwp", 0),
- GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe", 0),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NCE, "nce",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NRE, "nre",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWP, "nwp",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe",
+ GPIO_ACTIVE_LOW),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_ALE, "ale", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_CLE, "cle", 0),
GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 0, "data", 0, 0),
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index d068958d6f8a..2c1e2b32b9b3 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -596,11 +596,6 @@ static irqreturn_t omap_wakeup_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
-static struct irqaction omap_wakeup_irq = {
- .name = "peripheral wakeup",
- .handler = omap_wakeup_interrupt
-};
-
static const struct platform_suspend_ops omap_pm_ops = {
@@ -613,6 +608,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
static int __init omap_pm_init(void)
{
int error = 0;
+ int irq;
if (!cpu_class_is_omap1())
return -ENODEV;
@@ -656,9 +652,12 @@ static int __init omap_pm_init(void)
arm_pm_idle = omap1_pm_idle;
if (cpu_is_omap7xx())
- setup_irq(INT_7XX_WAKE_UP_REQ, &omap_wakeup_irq);
+ irq = INT_7XX_WAKE_UP_REQ;
else if (cpu_is_omap16xx())
- setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
+ irq = INT_1610_WAKE_UP_REQ;
+ if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
+ NULL))
+ pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
/* Program new power ramp-up time
* (0 for most boards since we don't lower voltage when in deep sleep)
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 524977a31a49..de590a85a42b 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -155,15 +155,11 @@ static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction omap_mpu_timer1_irq = {
- .name = "mpu_timer1",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = omap_mpu_timer1_interrupt,
-};
-
static __init void omap_init_mpu_timer(unsigned long rate)
{
- setup_irq(INT_TIMER1, &omap_mpu_timer1_irq);
+ if (request_irq(INT_TIMER1, omap_mpu_timer1_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "mpu_timer1", NULL))
+ pr_err("Failed to request irq %d (mpu_timer1)\n", INT_TIMER1);
omap_mpu_timer_start(0, (rate / HZ) - 1, 1);
clockevent_mpu_timer1.cpumask = cpumask_of(0);
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 0ae6c52a7d70..780fdf03c3ce 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -148,15 +148,11 @@ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction omap_32k_timer_irq = {
- .name = "32KHz timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = omap_32k_timer_interrupt,
-};
-
static __init void omap_init_32k_timer(void)
{
- setup_irq(INT_OS_TIMER, &omap_32k_timer_irq);
+ if (request_irq(INT_OS_TIMER, omap_32k_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "32KHz timer", NULL))
+ pr_err("Failed to request irq %d(32KHz timer)\n", INT_OS_TIMER);
clockevent_32k_timer.cpumask = cpumask_of(0);
clockevents_config_and_register(&clockevent_32k_timer,
diff --git a/arch/arm/mach-omap2/.gitignore b/arch/arm/mach-omap2/.gitignore
index 79a8d6ea7152..dc7be7556736 100644
--- a/arch/arm/mach-omap2/.gitignore
+++ b/arch/arm/mach-omap2/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
pm-asm-offsets.h
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 532a3e4b98c6..090a8aafb25e 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
int index)
{
struct omap3_idle_statedata *cx = &omap3_idle_data[index];
+ int error;
if (omap_irq_pending() || need_resched())
goto return_sleep_time;
@@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that
* VFP context is saved.
*/
- if (cx->mpu_state == PWRDM_POWER_OFF)
- cpu_pm_enter();
+ if (cx->mpu_state == PWRDM_POWER_OFF) {
+ error = cpu_pm_enter();
+ if (error)
+ goto out_clkdm_set;
+ }
/* Execute ARM wfi */
omap_sram_idle();
@@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
cpu_pm_exit();
+out_clkdm_set:
/* Re-allow idle for C1 */
if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index fe75d4fa6073..6f5f89711f25 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
{
struct idle_statedata *cx = state_ptr + index;
u32 mpuss_can_lose_context = 0;
+ int error;
/*
* CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved.
*/
- cpu_pm_enter();
+ error = cpu_pm_enter();
+ if (error)
+ goto cpu_pm_out;
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
@@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU cluster PM enter notifier chain
* to save GIC and wakeupgen context.
*/
- if (mpuss_can_lose_context)
- cpu_cluster_pm_enter();
+ if (mpuss_can_lose_context) {
+ error = cpu_cluster_pm_enter();
+ if (error)
+ goto cpu_cluster_pm_out;
+ }
}
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true;
+cpu_cluster_pm_out:
/* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
@@ -198,18 +205,19 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
}
/*
- * Call idle CPU PM exit notifier chain to restore
- * VFP and per CPU IRQ context.
- */
- cpu_pm_exit();
-
- /*
* Call idle CPU cluster PM exit notifier chain
* to restore GIC and wakeupgen context.
*/
if (dev->cpu == 0 && mpuss_can_lose_context)
cpu_cluster_pm_exit();
+ /*
+ * Call idle CPU PM exit notifier chain to restore
+ * VFP and per CPU IRQ context.
+ */
+ cpu_pm_exit();
+
+cpu_pm_out:
tick_broadcast_exit();
fail:
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 17d337ed18be..82706af307de 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -3148,15 +3148,14 @@ static int omap_hwmod_check_sysc(struct device *dev,
/**
* omap_hwmod_init_regbits - init sysconfig specific register bits
* @dev: struct device
+ * @oh: module
* @data: module data
* @sysc_fields: new sysc configuration
*/
-static int omap_hwmod_init_regbits(struct device *dev,
+static int omap_hwmod_init_regbits(struct device *dev, struct omap_hwmod *oh,
const struct ti_sysc_module_data *data,
struct sysc_regbits **sysc_fields)
{
- *sysc_fields = NULL;
-
switch (data->cap->type) {
case TI_SYSC_OMAP2:
case TI_SYSC_OMAP2_TIMER:
@@ -3191,6 +3190,12 @@ static int omap_hwmod_init_regbits(struct device *dev,
*sysc_fields = &omap_hwmod_sysc_type_usb_host_fs;
break;
default:
+ *sysc_fields = NULL;
+ if (!oh->class->sysc->sysc_fields)
+ return 0;
+
+ dev_err(dev, "sysc_fields not found\n");
+
return -EINVAL;
}
@@ -3356,9 +3361,9 @@ static int omap_hwmod_check_module(struct device *dev,
if (!oh->class->sysc)
return -ENODEV;
- if (sysc_fields != oh->class->sysc->sysc_fields)
- dev_warn(dev, "sysc_fields %p != %p\n", sysc_fields,
- oh->class->sysc->sysc_fields);
+ if (oh->class->sysc->sysc_fields &&
+ sysc_fields != oh->class->sysc->sysc_fields)
+ dev_warn(dev, "sysc_fields mismatch\n");
if (rev_offs != oh->class->sysc->rev_offs)
dev_warn(dev, "rev_offs %08x != %08x\n", rev_offs,
@@ -3574,7 +3579,7 @@ int omap_hwmod_init_module(struct device *dev,
cookie->data = oh;
- error = omap_hwmod_init_regbits(dev, data, &sysc_fields);
+ error = omap_hwmod_init_regbits(dev, oh, data, &sysc_fields);
if (error)
return error;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
index 5ef76fe3f33d..fa2ff41f84b9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
@@ -24,16 +24,11 @@ extern struct omap_hwmod_ocp_if am33xx_l3_s__l4_wkup;
extern struct omap_hwmod_ocp_if am33xx_l3_main__l3_instr;
extern struct omap_hwmod_ocp_if am33xx_mpu__prcm;
extern struct omap_hwmod_ocp_if am33xx_l3_s__l3_main;
-extern struct omap_hwmod_ocp_if am33xx_pruss__l3_main;
extern struct omap_hwmod_ocp_if am33xx_gfx__l3_main;
extern struct omap_hwmod_ocp_if am33xx_l3_main__gfx;
extern struct omap_hwmod_ocp_if am33xx_l4_wkup__rtc;
extern struct omap_hwmod_ocp_if am33xx_l3_s__gpmc;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer2;
-extern struct omap_hwmod_ocp_if am33xx_l3_main__tpcc;
-extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc0;
-extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc1;
-extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc2;
extern struct omap_hwmod_ocp_if am33xx_l3_main__ocmc;
extern struct omap_hwmod am33xx_l3_main_hwmod;
@@ -42,7 +37,6 @@ extern struct omap_hwmod am33xx_l3_instr_hwmod;
extern struct omap_hwmod am33xx_l4_ls_hwmod;
extern struct omap_hwmod am33xx_l4_wkup_hwmod;
extern struct omap_hwmod am33xx_mpu_hwmod;
-extern struct omap_hwmod am33xx_pruss_hwmod;
extern struct omap_hwmod am33xx_gfx_hwmod;
extern struct omap_hwmod am33xx_prcm_hwmod;
extern struct omap_hwmod am33xx_ocmcram_hwmod;
@@ -52,10 +46,6 @@ extern struct omap_hwmod am33xx_gpmc_hwmod;
extern struct omap_hwmod am33xx_rtc_hwmod;
extern struct omap_hwmod am33xx_timer1_hwmod;
extern struct omap_hwmod am33xx_timer2_hwmod;
-extern struct omap_hwmod am33xx_tpcc_hwmod;
-extern struct omap_hwmod am33xx_tptc0_hwmod;
-extern struct omap_hwmod am33xx_tptc1_hwmod;
-extern struct omap_hwmod am33xx_tptc2_hwmod;
extern struct omap_hwmod_class am33xx_emif_hwmod_class;
extern struct omap_hwmod_class am33xx_l4_hwmod_class;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
index ac7d5bb1a02f..0ebbfbb4fb1c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
@@ -74,14 +74,6 @@ struct omap_hwmod_ocp_if am33xx_l3_s__l3_main = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* pru-icss -> l3 main */
-struct omap_hwmod_ocp_if am33xx_pruss__l3_main = {
- .master = &am33xx_pruss_hwmod,
- .slave = &am33xx_l3_main_hwmod,
- .clk = "l3_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* gfx -> l3 main */
struct omap_hwmod_ocp_if am33xx_gfx__l3_main = {
.master = &am33xx_gfx_hwmod,
@@ -122,38 +114,6 @@ struct omap_hwmod_ocp_if am33xx_l4_ls__timer2 = {
.user = OCP_USER_MPU,
};
-/* l3 main -> tpcc */
-struct omap_hwmod_ocp_if am33xx_l3_main__tpcc = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_tpcc_hwmod,
- .clk = "l3_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l3 main -> tpcc0 */
-struct omap_hwmod_ocp_if am33xx_l3_main__tptc0 = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_tptc0_hwmod,
- .clk = "l3_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l3 main -> tpcc1 */
-struct omap_hwmod_ocp_if am33xx_l3_main__tptc1 = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_tptc1_hwmod,
- .clk = "l3_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l3 main -> tpcc2 */
-struct omap_hwmod_ocp_if am33xx_l3_main__tptc2 = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_tptc2_hwmod,
- .clk = "l3_gclk",
- .user = OCP_USER_MPU,
-};
-
/* l3 main -> ocmc */
struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = {
.master = &am33xx_l3_main_hwmod,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index 78ec1bc8e3a1..dca5a3a7b97c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -133,34 +133,6 @@ struct omap_hwmod_class am33xx_wkup_m3_hwmod_class = {
.name = "wkup_m3",
};
-/*
- * 'pru-icss' class
- * Programmable Real-Time Unit and Industrial Communication Subsystem
- */
-static struct omap_hwmod_class am33xx_pruss_hwmod_class = {
- .name = "pruss",
-};
-
-static struct omap_hwmod_rst_info am33xx_pruss_resets[] = {
- { .name = "pruss", .rst_shift = 1 },
-};
-
-/* pru-icss */
-/* Pseudo hwmod for reset control purpose only */
-struct omap_hwmod am33xx_pruss_hwmod = {
- .name = "pruss",
- .class = &am33xx_pruss_hwmod_class,
- .clkdm_name = "pruss_ocp_clkdm",
- .main_clk = "pruss_ocp_gclk",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .rst_lines = am33xx_pruss_resets,
- .rst_lines_cnt = ARRAY_SIZE(am33xx_pruss_resets),
-};
-
/* gfx */
/* Pseudo hwmod for reset control purpose only */
static struct omap_hwmod_class am33xx_gfx_hwmod_class = {
@@ -393,80 +365,6 @@ struct omap_hwmod am33xx_timer2_hwmod = {
},
};
-/* tpcc */
-static struct omap_hwmod_class am33xx_tpcc_hwmod_class = {
- .name = "tpcc",
-};
-
-struct omap_hwmod am33xx_tpcc_hwmod = {
- .name = "tpcc",
- .class = &am33xx_tpcc_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod_class_sysconfig am33xx_tptc_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x10,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_MIDLEMODE),
- .idlemodes = (SIDLE_FORCE | SIDLE_SMART | MSTANDBY_FORCE),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-/* 'tptc' class */
-static struct omap_hwmod_class am33xx_tptc_hwmod_class = {
- .name = "tptc",
- .sysc = &am33xx_tptc_sysc,
-};
-
-/* tptc0 */
-struct omap_hwmod am33xx_tptc0_hwmod = {
- .name = "tptc0",
- .class = &am33xx_tptc_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* tptc1 */
-struct omap_hwmod am33xx_tptc1_hwmod = {
- .name = "tptc1",
- .class = &am33xx_tptc_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* tptc2 */
-struct omap_hwmod am33xx_tptc2_hwmod = {
- .name = "tptc2",
- .class = &am33xx_tptc_hwmod_class,
- .clkdm_name = "l3_clkdm",
- .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
- .main_clk = "l3_gclk",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
static void omap_hwmod_am33xx_clkctrl(void)
{
CLKCTRL(am33xx_timer2_hwmod, AM33XX_CM_PER_TIMER2_CLKCTRL_OFFSET);
@@ -481,12 +379,7 @@ static void omap_hwmod_am33xx_clkctrl(void)
CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l4_wkup_hwmod, AM33XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l3_main_hwmod, AM33XX_CM_PER_L3_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_tpcc_hwmod, AM33XX_CM_PER_TPCC_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_tptc0_hwmod, AM33XX_CM_PER_TPTC0_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_tptc1_hwmod, AM33XX_CM_PER_TPTC1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_tptc2_hwmod, AM33XX_CM_PER_TPTC2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gfx_hwmod, AM33XX_CM_GFX_GFX_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_pruss_hwmod, AM33XX_CM_PER_PRUSS_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mpu_hwmod , AM33XX_CM_MPU_MPU_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l3_instr_hwmod , AM33XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET);
CLKCTRL(am33xx_ocmcram_hwmod , AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET);
@@ -494,7 +387,6 @@ static void omap_hwmod_am33xx_clkctrl(void)
static void omap_hwmod_am33xx_rst(void)
{
- RSTCTRL(am33xx_pruss_hwmod, AM33XX_RM_PER_RSTCTRL_OFFSET);
RSTCTRL(am33xx_gfx_hwmod, AM33XX_RM_GFX_RSTCTRL_OFFSET);
RSTST(am33xx_gfx_hwmod, AM33XX_RM_GFX_RSTST_OFFSET);
}
@@ -518,12 +410,7 @@ static void omap_hwmod_am43xx_clkctrl(void)
CLKCTRL(am33xx_l4_ls_hwmod, AM43XX_CM_PER_L4LS_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l4_wkup_hwmod, AM43XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l3_main_hwmod, AM43XX_CM_PER_L3_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_tpcc_hwmod, AM43XX_CM_PER_TPCC_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_tptc0_hwmod, AM43XX_CM_PER_TPTC0_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_tptc1_hwmod, AM43XX_CM_PER_TPTC1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_tptc2_hwmod, AM43XX_CM_PER_TPTC2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gfx_hwmod, AM43XX_CM_GFX_GFX_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_pruss_hwmod, AM43XX_CM_PER_PRUSS_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mpu_hwmod , AM43XX_CM_MPU_MPU_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l3_instr_hwmod , AM43XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET);
CLKCTRL(am33xx_ocmcram_hwmod , AM43XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET);
@@ -531,9 +418,7 @@ static void omap_hwmod_am43xx_clkctrl(void)
static void omap_hwmod_am43xx_rst(void)
{
- RSTCTRL(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTCTRL_OFFSET);
RSTCTRL(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTCTRL_OFFSET);
- RSTST(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTST_OFFSET);
RSTST(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTST_OFFSET);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index f1ea8c604595..c64b735c8acc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -233,14 +233,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__wkup_m3 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4 hs -> pru-icss */
-static struct omap_hwmod_ocp_if am33xx_l4_hs__pruss = {
- .master = &am33xx_l4_hs_hwmod,
- .slave = &am33xx_pruss_hwmod,
- .clk = "dpll_core_m4_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main -> debugss */
static struct omap_hwmod_ocp_if am33xx_l3_main__debugss = {
.master = &am33xx_l3_main_hwmod,
@@ -292,7 +284,6 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l3_main__l3_instr,
&am33xx_l3_main__gfx,
&am33xx_l3_s__l3_main,
- &am33xx_pruss__l3_main,
&am33xx_wkup_m3__l4_wkup,
&am33xx_gfx__l3_main,
&am33xx_l3_main__debugss,
@@ -302,13 +293,8 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_wkup__smartreflex1,
&am33xx_l4_wkup__timer1,
&am33xx_l4_wkup__rtc,
- &am33xx_l4_hs__pruss,
&am33xx_l4_ls__timer2,
- &am33xx_l3_main__tpcc,
&am33xx_l3_s__gpmc,
- &am33xx_l3_main__tptc0,
- &am33xx_l3_main__tptc1,
- &am33xx_l3_main__tptc2,
&am33xx_l3_main__ocmc,
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index d0867dbd788e..d2203f44af88 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -156,75 +156,6 @@ static struct omap_hwmod am43xx_usb_otg_ss1_hwmod = {
},
};
-/* dss */
-
-static struct omap_hwmod am43xx_dss_core_hwmod = {
- .name = "dss_core",
- .class = &omap2_dss_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "disp_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM43XX_CM_PER_DSS_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* dispc */
-
-static struct omap_dss_dispc_dev_attr am43xx_dss_dispc_dev_attr = {
- .manager_count = 1,
- .has_framedonetv_irq = 0
-};
-
-static struct omap_hwmod_class_sysconfig am43xx_dispc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_MIDLEMODE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am43xx_dispc_hwmod_class = {
- .name = "dispc",
- .sysc = &am43xx_dispc_sysc,
-};
-
-static struct omap_hwmod am43xx_dss_dispc_hwmod = {
- .name = "dss_dispc",
- .class = &am43xx_dispc_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "disp_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM43XX_CM_PER_DSS_CLKCTRL_OFFSET,
- },
- },
- .dev_attr = &am43xx_dss_dispc_dev_attr,
- .parent_hwmod = &am43xx_dss_core_hwmod,
-};
-
-/* rfbi */
-
-static struct omap_hwmod am43xx_dss_rfbi_hwmod = {
- .name = "dss_rfbi",
- .class = &omap2_rfbi_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "disp_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM43XX_CM_PER_DSS_CLKCTRL_OFFSET,
- },
- },
- .parent_hwmod = &am43xx_dss_core_hwmod,
-};
-
-
/* Interfaces */
static struct omap_hwmod_ocp_if am43xx_l3_main__emif = {
.master = &am33xx_l3_main_hwmod,
@@ -254,13 +185,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_wkup__wkup_m3 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-static struct omap_hwmod_ocp_if am43xx_l3_main__pruss = {
- .master = &am33xx_l3_main_hwmod,
- .slave = &am33xx_pruss_hwmod,
- .clk = "dpll_core_m4_ck",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_ocp_if am43xx_l4_wkup__smartreflex0 = {
.master = &am33xx_l4_wkup_hwmod,
.slave = &am33xx_smartreflex0_hwmod,
@@ -310,37 +234,8 @@ static struct omap_hwmod_ocp_if am43xx_l3_s__usbotgss1 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-static struct omap_hwmod_ocp_if am43xx_dss__l3_main = {
- .master = &am43xx_dss_core_hwmod,
- .slave = &am33xx_l3_main_hwmod,
- .clk = "l3_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__dss = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am43xx_dss_core_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__dss_dispc = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am43xx_dss_dispc_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__dss_rfbi = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am43xx_dss_rfbi_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_wkup__synctimer,
- &am43xx_l3_main__pruss,
&am33xx_mpu__l3_main,
&am33xx_mpu__prcm,
&am33xx_l3_s__l4_ls,
@@ -351,7 +246,6 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l3_main__gfx,
&am33xx_l3_s__l3_main,
&am43xx_l3_main__emif,
- &am33xx_pruss__l3_main,
&am43xx_wkup_m3__l4_wkup,
&am33xx_gfx__l3_main,
&am43xx_l4_wkup__wkup_m3,
@@ -360,18 +254,10 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am43xx_l4_wkup__smartreflex1,
&am43xx_l4_wkup__timer1,
&am33xx_l4_ls__timer2,
- &am33xx_l3_main__tpcc,
&am33xx_l3_s__gpmc,
- &am33xx_l3_main__tptc0,
- &am33xx_l3_main__tptc1,
- &am33xx_l3_main__tptc2,
&am33xx_l3_main__ocmc,
&am43xx_l3_s__usbotgss0,
&am43xx_l3_s__usbotgss1,
- &am43xx_dss__l3_main,
- &am43xx_l4_ls__dss,
- &am43xx_l4_ls__dss_dispc,
- &am43xx_l4_ls__dss_rfbi,
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index b7c51ea8c9a6..33f6596c03f7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -356,306 +356,6 @@ static struct omap_hwmod omap44xx_debugss_hwmod = {
};
/*
- * 'dsp' class
- * dsp sub-system
- */
-
-static struct omap_hwmod_class omap44xx_dsp_hwmod_class = {
- .name = "dsp",
-};
-
-/* dsp */
-static struct omap_hwmod_rst_info omap44xx_dsp_resets[] = {
- { .name = "dsp", .rst_shift = 0 },
-};
-
-static struct omap_hwmod omap44xx_dsp_hwmod = {
- .name = "dsp",
- .class = &omap44xx_dsp_hwmod_class,
- .clkdm_name = "tesla_clkdm",
- .rst_lines = omap44xx_dsp_resets,
- .rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_resets),
- .main_clk = "dpll_iva_m4x2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET,
- .rstctrl_offs = OMAP4_RM_TESLA_RSTCTRL_OFFSET,
- .context_offs = OMAP4_RM_TESLA_TESLA_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/*
- * 'dss' class
- * display sub-system
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
- .rev_offs = 0x0000,
- .syss_offs = 0x0014,
- .sysc_flags = SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
- .name = "dss",
- .sysc = &omap44xx_dss_sysc,
- .reset = omap_dss_reset,
-};
-
-/* dss */
-static struct omap_hwmod_opt_clk dss_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
- { .role = "tv_clk", .clk = "dss_tv_clk" },
- { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
-};
-
-static struct omap_hwmod omap44xx_dss_hwmod = {
- .name = "dss_core",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .class = &omap44xx_dss_hwmod_class,
- .clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = dss_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
-};
-
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_dispc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_dispc_hwmod_class = {
- .name = "dispc",
- .sysc = &omap44xx_dispc_sysc,
-};
-
-/* dss_dispc */
-static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = {
- .manager_count = 3,
- .has_framedonetv_irq = 1
-};
-
-static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
- .name = "dss_dispc",
- .class = &omap44xx_dispc_hwmod_class,
- .clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
- },
- },
- .dev_attr = &omap44xx_dss_dispc_dev_attr,
- .parent_hwmod = &omap44xx_dss_hwmod,
-};
-
-/*
- * 'dsi' class
- * display serial interface controller
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_dsi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_dsi_hwmod_class = {
- .name = "dsi",
- .sysc = &omap44xx_dsi_sysc,
-};
-
-/* dss_dsi1 */
-static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
-};
-
-static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
- .name = "dss_dsi1",
- .class = &omap44xx_dsi_hwmod_class,
- .clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
- },
- },
- .opt_clks = dss_dsi1_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks),
- .parent_hwmod = &omap44xx_dss_hwmod,
-};
-
-/* dss_dsi2 */
-static struct omap_hwmod_opt_clk dss_dsi2_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
-};
-
-static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
- .name = "dss_dsi2",
- .class = &omap44xx_dsi_hwmod_class,
- .clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
- },
- },
- .opt_clks = dss_dsi2_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_dsi2_opt_clks),
- .parent_hwmod = &omap44xx_dss_hwmod,
-};
-
-/*
- * 'hdmi' class
- * hdmi controller
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_hdmi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_hdmi_hwmod_class = {
- .name = "hdmi",
- .sysc = &omap44xx_hdmi_sysc,
-};
-
-/* dss_hdmi */
-static struct omap_hwmod_opt_clk dss_hdmi_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
- { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
-};
-
-static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
- .name = "dss_hdmi",
- .class = &omap44xx_hdmi_hwmod_class,
- .clkdm_name = "l3_dss_clkdm",
- /*
- * HDMI audio requires to use no-idle mode. Hence,
- * set idle mode by software.
- */
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_OPT_CLKS_NEEDED,
- .main_clk = "dss_48mhz_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
- },
- },
- .opt_clks = dss_hdmi_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_hdmi_opt_clks),
- .parent_hwmod = &omap44xx_dss_hwmod,
-};
-
-/*
- * 'rfbi' class
- * remote frame buffer interface
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_rfbi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_rfbi_hwmod_class = {
- .name = "rfbi",
- .sysc = &omap44xx_rfbi_sysc,
-};
-
-/* dss_rfbi */
-static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
- { .role = "ick", .clk = "l3_div_ck" },
-};
-
-static struct omap_hwmod omap44xx_dss_rfbi_hwmod = {
- .name = "dss_rfbi",
- .class = &omap44xx_rfbi_hwmod_class,
- .clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
- },
- },
- .opt_clks = dss_rfbi_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
- .parent_hwmod = &omap44xx_dss_hwmod,
-};
-
-/*
- * 'venc' class
- * video encoder
- */
-
-static struct omap_hwmod_class omap44xx_venc_hwmod_class = {
- .name = "venc",
-};
-
-/* dss_venc */
-static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
- { .role = "tv_clk", .clk = "dss_tv_clk" },
-};
-
-static struct omap_hwmod omap44xx_dss_venc_hwmod = {
- .name = "dss_venc",
- .class = &omap44xx_venc_hwmod_class,
- .clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_tv_clk",
- .flags = HWMOD_OPT_CLKS_NEEDED,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
- },
- },
- .parent_hwmod = &omap44xx_dss_hwmod,
- .opt_clks = dss_venc_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks),
-};
-
-
-
-/*
* 'emif' class
* external memory interface no1
*/
@@ -737,39 +437,6 @@ static struct omap_hwmod omap44xx_gpmc_hwmod = {
},
};
-
-/*
- * 'ipu' class
- * imaging processor unit
- */
-
-static struct omap_hwmod_class omap44xx_ipu_hwmod_class = {
- .name = "ipu",
-};
-
-/* ipu */
-static struct omap_hwmod_rst_info omap44xx_ipu_resets[] = {
- { .name = "cpu0", .rst_shift = 0 },
- { .name = "cpu1", .rst_shift = 1 },
-};
-
-static struct omap_hwmod omap44xx_ipu_hwmod = {
- .name = "ipu",
- .class = &omap44xx_ipu_hwmod_class,
- .clkdm_name = "ducati_clkdm",
- .rst_lines = omap44xx_ipu_resets,
- .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_resets),
- .main_clk = "ducati_clk_mux_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET,
- .rstctrl_offs = OMAP4_RM_DUCATI_RSTCTRL_OFFSET,
- .context_offs = OMAP4_RM_DUCATI_DUCATI_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
/*
* 'iss' class
* external images sensor pixel data processor
@@ -1236,22 +903,6 @@ static struct omap_hwmod_ocp_if omap44xx_ocp_wp_noc__l3_instr = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* dsp -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap44xx_dsp__l3_main_1 = {
- .master = &omap44xx_dsp_hwmod,
- .slave = &omap44xx_l3_main_1_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* dss -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap44xx_dss__l3_main_1 = {
- .master = &omap44xx_dss_hwmod,
- .slave = &omap44xx_l3_main_1_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main_2 -> l3_main_1 */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_1 = {
.master = &omap44xx_l3_main_2_hwmod,
@@ -1284,14 +935,6 @@ static struct omap_hwmod_ocp_if omap44xx_debugss__l3_main_2 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* ipu -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_ipu__l3_main_2 = {
- .master = &omap44xx_ipu_hwmod,
- .slave = &omap44xx_l3_main_2_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* iss -> l3_main_2 */
static struct omap_hwmod_ocp_if omap44xx_iss__l3_main_2 = {
.master = &omap44xx_iss_hwmod,
@@ -1364,14 +1007,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* dsp -> l4_abe */
-static struct omap_hwmod_ocp_if omap44xx_dsp__l4_abe = {
- .master = &omap44xx_dsp_hwmod,
- .slave = &omap44xx_l4_abe_hwmod,
- .clk = "ocp_abe_iclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main_1 -> l4_abe */
static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_abe = {
.master = &omap44xx_l3_main_1_hwmod,
@@ -1476,142 +1111,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_instr__debugss = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* dsp -> iva */
-static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
- .master = &omap44xx_dsp_hwmod,
- .slave = &omap44xx_iva_hwmod,
- .clk = "dpll_iva_m5x2_ck",
- .user = OCP_USER_DSP,
-};
-
-/* dsp -> sl2if */
-static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
- .master = &omap44xx_dsp_hwmod,
- .slave = &omap44xx_sl2if_hwmod,
- .clk = "dpll_iva_m5x2_ck",
- .user = OCP_USER_DSP,
-};
-
-/* l4_cfg -> dsp */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dsp = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_dsp_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> dss */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_dss_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_SDMA,
-};
-
-/* l4_per -> dss */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__dss = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_dss_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_2 -> dss_dispc */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dispc = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_dss_dispc_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_SDMA,
-};
-
-/* l4_per -> dss_dispc */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_dss_dispc_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_2 -> dss_dsi1 */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi1 = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_dss_dsi1_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_SDMA,
-};
-
-/* l4_per -> dss_dsi1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi1 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_dss_dsi1_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_2 -> dss_dsi2 */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi2 = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_dss_dsi2_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_SDMA,
-};
-
-/* l4_per -> dss_dsi2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi2 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_dss_dsi2_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_2 -> dss_hdmi */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_hdmi = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_dss_hdmi_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_SDMA,
-};
-
-/* l4_per -> dss_hdmi */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_hdmi = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_dss_hdmi_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_2 -> dss_rfbi */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_rfbi = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_dss_rfbi_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_SDMA,
-};
-
-/* l4_per -> dss_rfbi */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_rfbi = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_dss_rfbi_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_2 -> dss_venc */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_venc = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_dss_venc_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_SDMA,
-};
-
-/* l4_per -> dss_venc */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_venc = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_dss_venc_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU,
-};
-
/* l3_main_2 -> gpmc */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__gpmc = {
.master = &omap44xx_l3_main_2_hwmod,
@@ -1620,14 +1119,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__gpmc = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l3_main_2 -> ipu */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ipu = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_ipu_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main_2 -> iss */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
.master = &omap44xx_l3_main_2_hwmod,
@@ -1762,13 +1253,10 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_iva__l3_instr,
&omap44xx_l3_main_3__l3_instr,
&omap44xx_ocp_wp_noc__l3_instr,
- &omap44xx_dsp__l3_main_1,
- &omap44xx_dss__l3_main_1,
&omap44xx_l3_main_2__l3_main_1,
&omap44xx_l4_cfg__l3_main_1,
&omap44xx_mpu__l3_main_1,
&omap44xx_debugss__l3_main_2,
- &omap44xx_ipu__l3_main_2,
&omap44xx_iss__l3_main_2,
&omap44xx_iva__l3_main_2,
&omap44xx_l3_main_1__l3_main_2,
@@ -1778,7 +1266,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l3_main_1__l3_main_3,
&omap44xx_l3_main_2__l3_main_3,
&omap44xx_l4_cfg__l3_main_3,
- &omap44xx_dsp__l4_abe,
&omap44xx_l3_main_1__l4_abe,
&omap44xx_mpu__l4_abe,
&omap44xx_l3_main_1__l4_cfg,
@@ -1792,25 +1279,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_wkup__ctrl_module_wkup,
&omap44xx_l4_wkup__ctrl_module_pad_wkup,
&omap44xx_l3_instr__debugss,
- &omap44xx_dsp__iva,
- /* &omap44xx_dsp__sl2if, */
- &omap44xx_l4_cfg__dsp,
- &omap44xx_l3_main_2__dss,
- &omap44xx_l4_per__dss,
- &omap44xx_l3_main_2__dss_dispc,
- &omap44xx_l4_per__dss_dispc,
- &omap44xx_l3_main_2__dss_dsi1,
- &omap44xx_l4_per__dss_dsi1,
- &omap44xx_l3_main_2__dss_dsi2,
- &omap44xx_l4_per__dss_dsi2,
- &omap44xx_l3_main_2__dss_hdmi,
- &omap44xx_l4_per__dss_hdmi,
- &omap44xx_l3_main_2__dss_rfbi,
- &omap44xx_l4_per__dss_rfbi,
- &omap44xx_l3_main_2__dss_venc,
- &omap44xx_l4_per__dss_venc,
&omap44xx_l3_main_2__gpmc,
- &omap44xx_l3_main_2__ipu,
&omap44xx_l3_main_2__iss,
/* &omap44xx_iva__sl2if, */
&omap44xx_l3_main_2__iva,
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index ad398f6bc011..08f34f4732fd 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -227,240 +227,6 @@ static struct omap_hwmod omap54xx_counter_32k_hwmod = {
};
/*
- * 'dss' class
- * display sub-system
- */
-static struct omap_hwmod_class_sysconfig omap54xx_dss_sysc = {
- .rev_offs = 0x0000,
- .syss_offs = 0x0014,
- .sysc_flags = SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class omap54xx_dss_hwmod_class = {
- .name = "dss",
- .sysc = &omap54xx_dss_sysc,
- .reset = omap_dss_reset,
-};
-
-/* dss */
-static struct omap_hwmod_opt_clk dss_opt_clks[] = {
- { .role = "32khz_clk", .clk = "dss_32khz_clk" },
- { .role = "sys_clk", .clk = "dss_sys_clk" },
- { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
-};
-
-static struct omap_hwmod omap54xx_dss_hwmod = {
- .name = "dss_core",
- .class = &omap54xx_dss_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_DSS_DSS_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = dss_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
-};
-
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_dispc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_dispc_hwmod_class = {
- .name = "dispc",
- .sysc = &omap54xx_dispc_sysc,
-};
-
-/* dss_dispc */
-static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
-};
-
-/* dss_dispc dev_attr */
-static struct omap_dss_dispc_dev_attr dss_dispc_dev_attr = {
- .has_framedonetv_irq = 1,
- .manager_count = 4,
-};
-
-static struct omap_hwmod omap54xx_dss_dispc_hwmod = {
- .name = "dss_dispc",
- .class = &omap54xx_dispc_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
- .opt_clks = dss_dispc_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_dispc_opt_clks),
- .dev_attr = &dss_dispc_dev_attr,
- .parent_hwmod = &omap54xx_dss_hwmod,
-};
-
-/*
- * 'dsi1' class
- * display serial interface controller
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_dsi1_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_dsi1_hwmod_class = {
- .name = "dsi1",
- .sysc = &omap54xx_dsi1_sysc,
-};
-
-/* dss_dsi1_a */
-static struct omap_hwmod_opt_clk dss_dsi1_a_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
-};
-
-static struct omap_hwmod omap54xx_dss_dsi1_a_hwmod = {
- .name = "dss_dsi1",
- .class = &omap54xx_dsi1_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
- .opt_clks = dss_dsi1_a_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_a_opt_clks),
- .parent_hwmod = &omap54xx_dss_hwmod,
-};
-
-/* dss_dsi1_c */
-static struct omap_hwmod_opt_clk dss_dsi1_c_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
-};
-
-static struct omap_hwmod omap54xx_dss_dsi1_c_hwmod = {
- .name = "dss_dsi2",
- .class = &omap54xx_dsi1_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
- .opt_clks = dss_dsi1_c_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_c_opt_clks),
- .parent_hwmod = &omap54xx_dss_hwmod,
-};
-
-/*
- * 'hdmi' class
- * hdmi controller
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_hdmi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap54xx_hdmi_hwmod_class = {
- .name = "hdmi",
- .sysc = &omap54xx_hdmi_sysc,
-};
-
-static struct omap_hwmod_opt_clk dss_hdmi_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
-};
-
-static struct omap_hwmod omap54xx_dss_hdmi_hwmod = {
- .name = "dss_hdmi",
- .class = &omap54xx_hdmi_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "dss_48mhz_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
- .opt_clks = dss_hdmi_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_hdmi_opt_clks),
- .parent_hwmod = &omap54xx_dss_hwmod,
-};
-
-/*
- * 'rfbi' class
- * remote frame buffer interface
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_rfbi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_rfbi_hwmod_class = {
- .name = "rfbi",
- .sysc = &omap54xx_rfbi_sysc,
-};
-
-/* dss_rfbi */
-static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
- { .role = "ick", .clk = "l3_iclk_div" },
-};
-
-static struct omap_hwmod omap54xx_dss_rfbi_hwmod = {
- .name = "dss_rfbi",
- .class = &omap54xx_rfbi_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
- .opt_clks = dss_rfbi_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
- .parent_hwmod = &omap54xx_dss_hwmod,
-};
-
-/*
* 'emif' class
* external memory interface no1 (wrapper)
*/
@@ -908,54 +674,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_wkup__counter_32k = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l3_main_2 -> dss */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__dss = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_dss_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> dss_dispc */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__dss_dispc = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_dss_dispc_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> dss_dsi1_a */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__dss_dsi1_a = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_dss_dsi1_a_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> dss_dsi1_c */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__dss_dsi1_c = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_dss_dsi1_c_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> dss_hdmi */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__dss_hdmi = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_dss_hdmi_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> dss_rfbi */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__dss_rfbi = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_dss_rfbi_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* mpu -> emif1 */
static struct omap_hwmod_ocp_if omap54xx_mpu__emif1 = {
.master = &omap54xx_mpu_hwmod,
@@ -1030,12 +748,6 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
&omap54xx_l3_main_1__l4_wkup,
&omap54xx_mpu__mpu_private,
&omap54xx_l4_wkup__counter_32k,
- &omap54xx_l3_main_2__dss,
- &omap54xx_l3_main_2__dss_dispc,
- &omap54xx_l3_main_2__dss_dsi1_a,
- &omap54xx_l3_main_2__dss_dsi1_c,
- &omap54xx_l3_main_2__dss_hdmi,
- &omap54xx_l3_main_2__dss_rfbi,
&omap54xx_mpu__emif1,
&omap54xx_mpu__emif2,
&omap54xx_l4_cfg__mpu,
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index acef3733db4c..e95668bdbc3f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -277,203 +277,6 @@ static struct omap_hwmod dra7xx_ctrl_module_wkup_hwmod = {
};
/*
- * 'tpcc' class
- *
- */
-static struct omap_hwmod_class dra7xx_tpcc_hwmod_class = {
- .name = "tpcc",
-};
-
-static struct omap_hwmod dra7xx_tpcc_hwmod = {
- .name = "tpcc",
- .class = &dra7xx_tpcc_hwmod_class,
- .clkdm_name = "l3main1_clkdm",
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3MAIN1_TPCC_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3MAIN1_TPCC_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'tptc' class
- *
- */
-static struct omap_hwmod_class dra7xx_tptc_hwmod_class = {
- .name = "tptc",
-};
-
-/* tptc0 */
-static struct omap_hwmod dra7xx_tptc0_hwmod = {
- .name = "tptc0",
- .class = &dra7xx_tptc_hwmod_class,
- .clkdm_name = "l3main1_clkdm",
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3MAIN1_TPTC1_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3MAIN1_TPTC1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/* tptc1 */
-static struct omap_hwmod dra7xx_tptc1_hwmod = {
- .name = "tptc1",
- .class = &dra7xx_tptc_hwmod_class,
- .clkdm_name = "l3main1_clkdm",
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3MAIN1_TPTC2_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3MAIN1_TPTC2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/*
- * 'dss' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_dss_sysc = {
- .rev_offs = 0x0000,
- .syss_offs = 0x0014,
- .sysc_flags = SYSS_HAS_RESET_STATUS,
-};
-
-static struct omap_hwmod_class dra7xx_dss_hwmod_class = {
- .name = "dss",
- .sysc = &dra7xx_dss_sysc,
- .reset = omap_dss_reset,
-};
-
-/* dss */
-static struct omap_hwmod_opt_clk dss_opt_clks[] = {
- { .role = "dss_clk", .clk = "dss_dss_clk" },
- { .role = "hdmi_phy_clk", .clk = "dss_48mhz_clk" },
- { .role = "32khz_clk", .clk = "dss_32khz_clk" },
- { .role = "video2_clk", .clk = "dss_video2_clk" },
- { .role = "video1_clk", .clk = "dss_video1_clk" },
- { .role = "hdmi_clk", .clk = "dss_hdmi_clk" },
- { .role = "hdcp_clk", .clk = "dss_deshdcp_clk" },
-};
-
-static struct omap_hwmod dra7xx_dss_hwmod = {
- .name = "dss_core",
- .class = &dra7xx_dss_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_DSS_DSS_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = dss_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
-};
-
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_dispc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_dispc_hwmod_class = {
- .name = "dispc",
- .sysc = &dra7xx_dispc_sysc,
-};
-
-/* dss_dispc */
-/* dss_dispc dev_attr */
-static struct omap_dss_dispc_dev_attr dss_dispc_dev_attr = {
- .has_framedonetv_irq = 1,
- .manager_count = 4,
-};
-
-static struct omap_hwmod dra7xx_dss_dispc_hwmod = {
- .name = "dss_dispc",
- .class = &dra7xx_dispc_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "dss_dss_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
- .dev_attr = &dss_dispc_dev_attr,
- .parent_hwmod = &dra7xx_dss_hwmod,
-};
-
-/*
- * 'hdmi' class
- * hdmi controller
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_hdmi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class dra7xx_hdmi_hwmod_class = {
- .name = "hdmi",
- .sysc = &dra7xx_hdmi_sysc,
-};
-
-/* dss_hdmi */
-
-static struct omap_hwmod_opt_clk dss_hdmi_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_hdmi_clk" },
-};
-
-static struct omap_hwmod dra7xx_dss_hdmi_hwmod = {
- .name = "dss_hdmi",
- .class = &dra7xx_hdmi_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "dss_48mhz_clk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_DSS_DSS_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
- .opt_clks = dss_hdmi_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_hdmi_opt_clks),
- .parent_hwmod = &dra7xx_dss_hwmod,
-};
-
-
-
-
-
-/*
* 'gpmc' class
*
*/
@@ -1077,54 +880,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_wkup__ctrl_module_wkup = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l3_main_1 -> tpcc */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__tpcc = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_tpcc_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_1 -> tptc0 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__tptc0 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_tptc0_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_1 -> tptc1 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__tptc1 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_tptc1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_1 -> dss */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__dss = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_dss_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> dispc */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__dispc = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_dss_dispc_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> dispc */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_dss_hdmi_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main_1 -> gpmc */
static struct omap_hwmod_ocp_if dra7xx_l3_main_1__gpmc = {
.master = &dra7xx_l3_main_1_hwmod,
@@ -1309,12 +1064,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
&dra7xx_l3_main_1__bb2d,
&dra7xx_l4_wkup__counter_32k,
&dra7xx_l4_wkup__ctrl_module_wkup,
- &dra7xx_l3_main_1__tpcc,
- &dra7xx_l3_main_1__tptc0,
- &dra7xx_l3_main_1__tptc1,
- &dra7xx_l3_main_1__dss,
- &dra7xx_l3_main_1__dispc,
- &dra7xx_l3_main_1__hdmi,
&dra7xx_l3_main_1__gpmc,
&dra7xx_l4_cfg__mpu,
&dra7xx_l3_main_1__pciess1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index 83230d9ce5ed..6a9f1ad9d413 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -129,13 +129,6 @@ static struct omap_hwmod dm81xx_alwon_l3_med_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-static struct omap_hwmod dm81xx_alwon_l3_fast_hwmod = {
- .name = "l3_fast",
- .clkdm_name = "alwon_l3_fast_clkdm",
- .class = &l3_hwmod_class,
- .flags = HWMOD_NO_IDLEST,
-};
-
/*
* L4 standard peripherals, see TRM table 1-12 for devices using this.
* See TRM table 1-73 for devices using the 125MHz SYSCLK6 clock.
@@ -867,62 +860,6 @@ static struct omap_hwmod_ocp_if dm816x_l4_ls__timer7 = {
.user = OCP_USER_MPU,
};
-/* CPSW on dm814x */
-static struct omap_hwmod_class_sysconfig dm814x_cpgmac_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x8,
- .syss_offs = 0x4,
- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
- SYSS_HAS_RESET_STATUS,
- .idlemodes = SIDLE_FORCE | SIDLE_NO | MSTANDBY_FORCE |
- MSTANDBY_NO,
- .sysc_fields = &omap_hwmod_sysc_type3,
-};
-
-static struct omap_hwmod_class dm814x_cpgmac0_hwmod_class = {
- .name = "cpgmac0",
- .sysc = &dm814x_cpgmac_sysc,
-};
-
-static struct omap_hwmod dm814x_cpgmac0_hwmod = {
- .name = "cpgmac0",
- .class = &dm814x_cpgmac0_hwmod_class,
- .clkdm_name = "alwon_ethernet_clkdm",
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
- .main_clk = "cpsw_125mhz_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod_class dm814x_mdio_hwmod_class = {
- .name = "davinci_mdio",
-};
-
-static struct omap_hwmod dm814x_mdio_hwmod = {
- .name = "davinci_mdio",
- .class = &dm814x_mdio_hwmod_class,
- .clkdm_name = "alwon_ethernet_clkdm",
- .main_clk = "cpsw_125mhz_gclk",
-};
-
-static struct omap_hwmod_ocp_if dm814x_l4_hs__cpgmac0 = {
- .master = &dm81xx_l4_hs_hwmod,
- .slave = &dm814x_cpgmac0_hwmod,
- .clk = "cpsw_125mhz_gclk",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if dm814x_cpgmac0__mdio = {
- .master = &dm814x_cpgmac0_hwmod,
- .slave = &dm814x_mdio_hwmod,
- .user = OCP_USER_MPU,
- .flags = HWMOD_NO_IDLEST,
-};
-
/* EMAC Ethernet */
static struct omap_hwmod_class_sysconfig dm816x_emac_sysc = {
.rev_offs = 0x0,
@@ -1321,154 +1258,6 @@ static struct omap_hwmod_ocp_if dm81xx_l4_ls__spinbox = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_class dm81xx_tpcc_hwmod_class = {
- .name = "tpcc",
-};
-
-static struct omap_hwmod dm81xx_tpcc_hwmod = {
- .name = "tpcc",
- .class = &dm81xx_tpcc_hwmod_class,
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "sysclk4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DM81XX_CM_ALWON_TPCC_CLKCTRL,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tpcc = {
- .master = &dm81xx_alwon_l3_fast_hwmod,
- .slave = &dm81xx_tpcc_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_class dm81xx_tptc0_hwmod_class = {
- .name = "tptc0",
-};
-
-static struct omap_hwmod dm81xx_tptc0_hwmod = {
- .name = "tptc0",
- .class = &dm81xx_tptc0_hwmod_class,
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "sysclk4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DM81XX_CM_ALWON_TPTC0_CLKCTRL,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc0 = {
- .master = &dm81xx_alwon_l3_fast_hwmod,
- .slave = &dm81xx_tptc0_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if dm81xx_tptc0__alwon_l3_fast = {
- .master = &dm81xx_tptc0_hwmod,
- .slave = &dm81xx_alwon_l3_fast_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_class dm81xx_tptc1_hwmod_class = {
- .name = "tptc1",
-};
-
-static struct omap_hwmod dm81xx_tptc1_hwmod = {
- .name = "tptc1",
- .class = &dm81xx_tptc1_hwmod_class,
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "sysclk4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DM81XX_CM_ALWON_TPTC1_CLKCTRL,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc1 = {
- .master = &dm81xx_alwon_l3_fast_hwmod,
- .slave = &dm81xx_tptc1_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if dm81xx_tptc1__alwon_l3_fast = {
- .master = &dm81xx_tptc1_hwmod,
- .slave = &dm81xx_alwon_l3_fast_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_class dm81xx_tptc2_hwmod_class = {
- .name = "tptc2",
-};
-
-static struct omap_hwmod dm81xx_tptc2_hwmod = {
- .name = "tptc2",
- .class = &dm81xx_tptc2_hwmod_class,
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "sysclk4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DM81XX_CM_ALWON_TPTC2_CLKCTRL,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc2 = {
- .master = &dm81xx_alwon_l3_fast_hwmod,
- .slave = &dm81xx_tptc2_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if dm81xx_tptc2__alwon_l3_fast = {
- .master = &dm81xx_tptc2_hwmod,
- .slave = &dm81xx_alwon_l3_fast_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_class dm81xx_tptc3_hwmod_class = {
- .name = "tptc3",
-};
-
-static struct omap_hwmod dm81xx_tptc3_hwmod = {
- .name = "tptc3",
- .class = &dm81xx_tptc3_hwmod_class,
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "sysclk4_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DM81XX_CM_ALWON_TPTC3_CLKCTRL,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc3 = {
- .master = &dm81xx_alwon_l3_fast_hwmod,
- .slave = &dm81xx_tptc3_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if dm81xx_tptc3__alwon_l3_fast = {
- .master = &dm81xx_tptc3_hwmod,
- .slave = &dm81xx_alwon_l3_fast_hwmod,
- .clk = "sysclk4_ck",
- .user = OCP_USER_MPU,
-};
-
/*
* REVISIT: Test and enable the following once clocks work:
* dm81xx_l4_ls__mailbox
@@ -1499,19 +1288,8 @@ static struct omap_hwmod_ocp_if *dm814x_hwmod_ocp_ifs[] __initdata = {
&dm814x_l4_ls__mmc1,
&dm814x_l4_ls__mmc2,
&ti81xx_l4_ls__rtc,
- &dm81xx_alwon_l3_fast__tpcc,
- &dm81xx_alwon_l3_fast__tptc0,
- &dm81xx_alwon_l3_fast__tptc1,
- &dm81xx_alwon_l3_fast__tptc2,
- &dm81xx_alwon_l3_fast__tptc3,
- &dm81xx_tptc0__alwon_l3_fast,
- &dm81xx_tptc1__alwon_l3_fast,
- &dm81xx_tptc2__alwon_l3_fast,
- &dm81xx_tptc3__alwon_l3_fast,
&dm814x_l4_ls__timer1,
&dm814x_l4_ls__timer2,
- &dm814x_l4_hs__cpgmac0,
- &dm814x_cpgmac0__mdio,
&dm81xx_alwon_l3_slow__gpmc,
&dm814x_default_l3_slow__usbss,
&dm814x_alwon_l3_med__mmc3,
@@ -1554,15 +1332,6 @@ static struct omap_hwmod_ocp_if *dm816x_hwmod_ocp_ifs[] __initdata = {
&dm81xx_emac0__mdio,
&dm816x_l4_hs__emac1,
&dm81xx_l4_hs__sata,
- &dm81xx_alwon_l3_fast__tpcc,
- &dm81xx_alwon_l3_fast__tptc0,
- &dm81xx_alwon_l3_fast__tptc1,
- &dm81xx_alwon_l3_fast__tptc2,
- &dm81xx_alwon_l3_fast__tptc3,
- &dm81xx_tptc0__alwon_l3_fast,
- &dm81xx_tptc1__alwon_l3_fast,
- &dm81xx_tptc2__alwon_l3_fast,
- &dm81xx_tptc3__alwon_l3_fast,
&dm81xx_alwon_l3_slow__gpmc,
&dm816x_default_l3_slow__usbss,
NULL,
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index dbb7c2acef31..2a4fe3e68b82 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -397,10 +397,16 @@ static int ti_sysc_shutdown_module(struct device *dev,
return omap_hwmod_shutdown(cookie->data);
}
+static bool ti_sysc_soc_type_gp(void)
+{
+ return omap_type() == OMAP2_DEVICE_TYPE_GP;
+}
+
static struct of_dev_auxdata omap_auxdata_lookup[];
static struct ti_sysc_platform_data ti_sysc_pdata = {
.auxdata = omap_auxdata_lookup,
+ .soc_type_gp = ti_sysc_soc_type_gp,
.init_clockdomain = ti_sysc_clkdm_init,
.clkdm_deny_idle = ti_sysc_clkdm_deny_idle,
.clkdm_allow_idle = ti_sysc_clkdm_allow_idle,
diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
index 7461b0346549..5455fc98c60e 100644
--- a/arch/arm/mach-omap2/pm33xx-core.c
+++ b/arch/arm/mach-omap2/pm33xx-core.c
@@ -6,11 +6,14 @@
* Dave Gerlach
*/
+#include <linux/cpuidle.h>
+#include <linux/platform_data/pm33xx.h>
+#include <asm/cpuidle.h>
#include <asm/smp_scu.h>
#include <asm/suspend.h>
#include <linux/errno.h>
-#include <linux/platform_data/pm33xx.h>
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/wkup_m3_ipc.h>
@@ -35,6 +38,14 @@ static struct clockdomain *gfx_l4ls_clkdm;
static void __iomem *scu_base;
static struct omap_hwmod *rtc_oh;
+static int (*idle_fn)(u32 wfi_flags);
+
+struct amx3_idle_state {
+ int wfi_flags;
+};
+
+static struct amx3_idle_state *idle_states;
+
static int am43xx_map_scu(void)
{
scu_base = ioremap(scu_a9_get_base(), SZ_256);
@@ -68,7 +79,7 @@ static int am43xx_check_off_mode_enable(void)
return 0;
}
-static int amx3_common_init(void)
+static int amx3_common_init(int (*idle)(u32 wfi_flags))
{
gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
per_pwrdm = pwrdm_lookup("per_pwrdm");
@@ -88,10 +99,12 @@ static int amx3_common_init(void)
else
omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
+ idle_fn = idle;
+
return 0;
}
-static int am33xx_suspend_init(void)
+static int am33xx_suspend_init(int (*idle)(u32 wfi_flags))
{
int ret;
@@ -102,12 +115,12 @@ static int am33xx_suspend_init(void)
return -ENODEV;
}
- ret = amx3_common_init();
+ ret = amx3_common_init(idle);
return ret;
}
-static int am43xx_suspend_init(void)
+static int am43xx_suspend_init(int (*idle)(u32 wfi_flags))
{
int ret = 0;
@@ -117,11 +130,17 @@ static int am43xx_suspend_init(void)
return ret;
}
- ret = amx3_common_init();
+ ret = amx3_common_init(idle);
return ret;
}
+static int amx3_suspend_deinit(void)
+{
+ idle_fn = NULL;
+ return 0;
+}
+
static void amx3_pre_suspend_common(void)
{
omap_set_pwrdm_state(gfx_pwrdm, PWRDM_POWER_OFF);
@@ -201,6 +220,43 @@ static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
return ret;
}
+static int am33xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
+{
+ int ret = 0;
+
+ if (omap_irq_pending() || need_resched())
+ return ret;
+
+ ret = cpu_suspend(args, fn);
+
+ return ret;
+}
+
+static int am43xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
+{
+ int ret = 0;
+
+ if (!scu_base)
+ return 0;
+
+ scu_power_mode(scu_base, SCU_PM_DORMANT);
+ ret = cpu_suspend(args, fn);
+ scu_power_mode(scu_base, SCU_PM_NORMAL);
+
+ return ret;
+}
+
+static void amx3_begin_suspend(void)
+{
+ cpu_idle_poll_ctrl(true);
+}
+
+static void amx3_finish_suspend(void)
+{
+ cpu_idle_poll_ctrl(false);
+}
+
+
static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void)
{
if (soc_is_am33xx())
@@ -253,7 +309,11 @@ static void am43xx_prepare_rtc_resume(void)
static struct am33xx_pm_platform_data am33xx_ops = {
.init = am33xx_suspend_init,
+ .deinit = amx3_suspend_deinit,
.soc_suspend = am33xx_suspend,
+ .cpu_suspend = am33xx_cpu_suspend,
+ .begin_suspend = amx3_begin_suspend,
+ .finish_suspend = amx3_finish_suspend,
.get_sram_addrs = amx3_get_sram_addrs,
.save_context = am33xx_save_context,
.restore_context = am33xx_restore_context,
@@ -265,7 +325,11 @@ static struct am33xx_pm_platform_data am33xx_ops = {
static struct am33xx_pm_platform_data am43xx_ops = {
.init = am43xx_suspend_init,
+ .deinit = amx3_suspend_deinit,
.soc_suspend = am43xx_suspend,
+ .cpu_suspend = am43xx_cpu_suspend,
+ .begin_suspend = amx3_begin_suspend,
+ .finish_suspend = amx3_finish_suspend,
.get_sram_addrs = amx3_get_sram_addrs,
.save_context = am43xx_save_context,
.restore_context = am43xx_restore_context,
@@ -301,3 +365,64 @@ int __init amx3_common_pm_init(void)
return 0;
}
+
+static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
+{
+ struct device_node *state_node;
+ struct amx3_idle_state states[CPUIDLE_STATE_MAX];
+ int i;
+ int state_count = 1;
+
+ for (i = 0; ; i++) {
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ if (!state_node)
+ break;
+
+ if (!of_device_is_available(state_node))
+ continue;
+
+ if (i == CPUIDLE_STATE_MAX) {
+ pr_warn("%s: cpuidle states reached max possible\n",
+ __func__);
+ break;
+ }
+
+ states[state_count].wfi_flags = 0;
+
+ if (of_property_read_bool(state_node, "ti,idle-wkup-m3"))
+ states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 |
+ WFI_FLAG_FLUSH_CACHE;
+
+ state_count++;
+ }
+
+ idle_states = kcalloc(state_count, sizeof(*idle_states), GFP_KERNEL);
+ if (!idle_states)
+ return -ENOMEM;
+
+ for (i = 1; i < state_count; i++)
+ idle_states[i].wfi_flags = states[i].wfi_flags;
+
+ return 0;
+}
+
+static int amx3_idle_enter(unsigned long index)
+{
+ struct amx3_idle_state *idle_state = &idle_states[index];
+
+ if (!idle_state)
+ return -EINVAL;
+
+ if (idle_fn)
+ idle_fn(idle_state->wfi_flags);
+
+ return 0;
+}
+
+static struct cpuidle_ops amx3_cpuidle_ops __initdata = {
+ .init = amx3_idle_init,
+ .suspend = amx3_idle_enter,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(pm33xx_idle, "ti,am3352", &amx3_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(pm43xx_idle, "ti,am4372", &amx3_cpuidle_ops);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index e66e9948636c..6df395fff971 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -191,6 +191,7 @@ void omap_sram_idle(void)
int per_next_state = PWRDM_POWER_ON;
int core_next_state = PWRDM_POWER_ON;
u32 sdrc_pwr = 0;
+ int error;
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
switch (mpu_next_state) {
@@ -219,8 +220,11 @@ void omap_sram_idle(void)
pwrdm_pre_transition(NULL);
/* PER */
- if (per_next_state == PWRDM_POWER_OFF)
- cpu_cluster_pm_enter();
+ if (per_next_state == PWRDM_POWER_OFF) {
+ error = cpu_cluster_pm_enter();
+ if (error)
+ return;
+ }
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 0d0a731cb476..8b09cdacc30d 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -91,12 +91,6 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction omap2_gp_timer_irq = {
- .name = "gp_timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = omap2_gp_timer_interrupt,
-};
-
static int omap2_gp_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
@@ -382,8 +376,9 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
&clockevent_gpt.name, OMAP_TIMER_POSTED);
BUG_ON(res);
- omap2_gp_timer_irq.dev_id = &clkev;
- setup_irq(clkev.irq, &omap2_gp_timer_irq);
+ if (request_irq(clkev.irq, omap2_gp_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "gp_timer", &clkev))
+ pr_err("Failed to request irq %d (gp_timer)\n", clkev.irq);
__omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW);
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index cf9cb3d2590e..e94a61901ffd 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -3,7 +3,6 @@ menuconfig ARCH_ORION5X
bool "Marvell Orion"
depends on MMU && ARCH_MULTI_V5
select CPU_FEROCEON
- select GENERIC_CLOCKEVENTS
select GPIOLIB
select MVEBU_MBUS
select FORCE_PCI
@@ -18,7 +17,6 @@ if ARCH_ORION5X
config ARCH_ORION5X_DT
bool "Marvell Orion5x Flattened Device Tree"
- select USE_OF
select ORION_CLK
select ORION_IRQCHIP
select ORION_TIMER
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index fda9b75c3a33..a39764faf2a0 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -398,7 +398,6 @@ static int ts78xx_fpga_load_devices(void)
static int ts78xx_fpga_unload_devices(void)
{
- int ret = 0;
if (ts78xx_fpga.supports.ts_rtc.present == 1)
ts78xx_ts_rtc_unload();
@@ -407,7 +406,7 @@ static int ts78xx_fpga_unload_devices(void)
if (ts78xx_fpga.supports.ts_rng.present == 1)
ts78xx_ts_rng_unload();
- return ret;
+ return 0;
}
static int ts78xx_fpga_load(void)
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index ecbf3c4eb878..1772eccb5caf 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -12,6 +12,11 @@ menuconfig ARCH_QCOM
if ARCH_QCOM
+config ARCH_IPQ40XX
+ bool "Enable support for IPQ40XX"
+ select CLKSRC_QCOM
+ select HAVE_ARM_ARCH_TIMER
+
config ARCH_MSM8X60
bool "Enable support for MSM8X60"
select CLKSRC_QCOM
diff --git a/arch/arm/mach-rpc/time.c b/arch/arm/mach-rpc/time.c
index 1d750152b160..da85cac761ba 100644
--- a/arch/arm/mach-rpc/time.c
+++ b/arch/arm/mach-rpc/time.c
@@ -85,11 +85,6 @@ ioc_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction ioc_timer_irq = {
- .name = "timer",
- .handler = ioc_timer_interrupt
-};
-
/*
* Set up timer interrupt.
*/
@@ -97,5 +92,6 @@ void __init ioc_timer_init(void)
{
WARN_ON(clocksource_register_hz(&ioctime_clocksource, RPC_CLOCK_FREQ));
ioctime_init();
- setup_irq(IRQ_TIMER0, &ioc_timer_irq);
+ if (request_irq(IRQ_TIMER0, ioc_timer_interrupt, 0, "timer", NULL))
+ pr_err("Failed to request irq %d (timer)\n", IRQ_TIMER0);
}
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index b13ec9088ce5..86406e3f9b22 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -72,7 +72,6 @@ static const char *const r8a7779_compat_dt[] __initconst = {
DT_MACHINE_START(R8A7779_DT, "Generic R8A7779 (Flattened Device Tree)")
.smp = smp_ops(r8a7779_smp_ops),
.map_io = r8a7779_map_io,
- .init_early = shmobile_init_delay,
.init_irq = r8a7779_init_irq_dt,
.init_late = shmobile_init_late,
.dt_compat = r8a7779_compat_dt,
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 2fd3aa6f3212..1ee5cd2840e0 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -7,7 +7,6 @@
* Copyright (C) 2014 Ulrich Hecht
*/
-#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/device.h>
#include <linux/dma-contiguous.h>
@@ -15,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/of.h>
+#include <linux/of_clk.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/psci.h>
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index cc08aa752244..eb4a62fa4289 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -56,7 +56,6 @@ static const char *const sh73a0_boards_compat_dt[] __initconst = {
DT_MACHINE_START(SH73A0_DT, "Generic SH73A0 (Flattened Device Tree)")
.smp = smp_ops(sh73a0_smp_ops),
.map_io = sh73a0_map_io,
- .init_early = shmobile_init_delay,
.init_machine = sh73a0_generic_init,
.init_late = shmobile_init_late,
.dt_compat = sh73a0_boards_compat_dt,
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c
index 289e036c9c30..d1fdb6066f7b 100644
--- a/arch/arm/mach-spear/time.c
+++ b/arch/arm/mach-spear/time.c
@@ -181,12 +181,6 @@ static irqreturn_t spear_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction spear_timer_irq = {
- .name = "timer",
- .flags = IRQF_TIMER,
- .handler = spear_timer_interrupt
-};
-
static void __init spear_clockevent_init(int irq)
{
u32 tick_rate;
@@ -201,7 +195,8 @@ static void __init spear_clockevent_init(int irq)
clockevents_config_and_register(&clkevt, tick_rate, 3, 0xfff0);
- setup_irq(irq, &spear_timer_irq);
+ if (request_irq(irq, spear_timer_interrupt, IRQF_TIMER, "timer", NULL))
+ pr_err("Failed to request irq %d (timer)\n", irq);
}
static const struct of_device_id timer_of_match[] __initconst = {
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 933b6930f024..06da2747a90b 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -10,9 +10,9 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/of_clk.h>
#include <linux/platform_device.h>
#include <linux/reset/sunxi.h>
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 6c1dff2eccc2..07572b5373b8 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -8,29 +8,14 @@ obj-y += reset.o
obj-y += reset-handler.o
obj-y += sleep.o
obj-y += tegra.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += sleep-tegra20.o
+obj-y += sleep-tegra20.o
+obj-y += sleep-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pm-tegra20.o
-ifeq ($(CONFIG_CPU_IDLE),y)
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += cpuidle-tegra20.o
-endif
-obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += sleep-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += pm-tegra30.o
-ifeq ($(CONFIG_CPU_IDLE),y)
-obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += cpuidle-tegra30.o
-endif
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_ARCH_TEGRA_114_SOC) += sleep-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += pm-tegra30.o
-ifeq ($(CONFIG_CPU_IDLE),y)
-obj-$(CONFIG_ARCH_TEGRA_114_SOC) += cpuidle-tegra114.o
-endif
-obj-$(CONFIG_ARCH_TEGRA_124_SOC) += sleep-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += pm-tegra30.o
-ifeq ($(CONFIG_CPU_IDLE),y)
-obj-$(CONFIG_ARCH_TEGRA_124_SOC) += cpuidle-tegra114.o
-endif
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += board-paz00.o
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
deleted file mode 100644
index 5118f777fd66..000000000000
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
- */
-
-#include <asm/firmware.h>
-#include <linux/tick.h>
-#include <linux/cpuidle.h>
-#include <linux/cpu_pm.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <linux/firmware/trusted_foundations.h>
-
-#include <asm/cpuidle.h>
-#include <asm/smp_plat.h>
-#include <asm/suspend.h>
-#include <asm/psci.h>
-
-#include "cpuidle.h"
-#include "pm.h"
-#include "sleep.h"
-
-#ifdef CONFIG_PM_SLEEP
-#define TEGRA114_MAX_STATES 2
-#else
-#define TEGRA114_MAX_STATES 1
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int tegra114_idle_power_down(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- local_fiq_disable();
-
- tegra_set_cpu_in_lp2();
- cpu_pm_enter();
-
- call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
-
- /* Do suspend by ourselves if the firmware does not implement it */
- if (call_firmware_op(do_idle, 0) == -ENOSYS)
- cpu_suspend(0, tegra30_sleep_cpu_secondary_finish);
-
- cpu_pm_exit();
- tegra_clear_cpu_in_lp2();
-
- local_fiq_enable();
-
- return index;
-}
-
-static void tegra114_idle_enter_s2idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- tegra114_idle_power_down(dev, drv, index);
-}
-#endif
-
-static struct cpuidle_driver tegra_idle_driver = {
- .name = "tegra_idle",
- .owner = THIS_MODULE,
- .state_count = TEGRA114_MAX_STATES,
- .states = {
- [0] = ARM_CPUIDLE_WFI_STATE_PWR(600),
-#ifdef CONFIG_PM_SLEEP
- [1] = {
- .enter = tegra114_idle_power_down,
- .enter_s2idle = tegra114_idle_enter_s2idle,
- .exit_latency = 500,
- .target_residency = 1000,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
- .power_usage = 0,
- .name = "powered-down",
- .desc = "CPU power gated",
- },
-#endif
- },
-};
-
-int __init tegra114_cpuidle_init(void)
-{
- if (!psci_smp_available())
- return cpuidle_register(&tegra_idle_driver, NULL);
-
- return 0;
-}
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
deleted file mode 100644
index 69f3fa270fbe..000000000000
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CPU idle driver for Tegra CPUs
- *
- * Copyright (c) 2010-2012, NVIDIA Corporation.
- * Copyright (c) 2011 Google, Inc.
- * Author: Colin Cross <ccross@android.com>
- * Gary King <gking@nvidia.com>
- *
- * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com>
- */
-
-#include <linux/clk/tegra.h>
-#include <linux/tick.h>
-#include <linux/cpuidle.h>
-#include <linux/cpu_pm.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <soc/tegra/flowctrl.h>
-
-#include <asm/cpuidle.h>
-#include <asm/smp_plat.h>
-#include <asm/suspend.h>
-
-#include "cpuidle.h"
-#include "iomap.h"
-#include "irq.h"
-#include "pm.h"
-#include "reset.h"
-#include "sleep.h"
-
-#ifdef CONFIG_PM_SLEEP
-static bool abort_flag;
-static atomic_t abort_barrier;
-static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
-#define TEGRA20_MAX_STATES 2
-#else
-#define TEGRA20_MAX_STATES 1
-#endif
-
-static struct cpuidle_driver tegra_idle_driver = {
- .name = "tegra_idle",
- .owner = THIS_MODULE,
- .states = {
- ARM_CPUIDLE_WFI_STATE_PWR(600),
-#ifdef CONFIG_PM_SLEEP
- {
- .enter = tegra20_idle_lp2_coupled,
- .exit_latency = 5000,
- .target_residency = 10000,
- .power_usage = 0,
- .flags = CPUIDLE_FLAG_COUPLED |
- CPUIDLE_FLAG_TIMER_STOP,
- .name = "powered-down",
- .desc = "CPU power gated",
- },
-#endif
- },
- .state_count = TEGRA20_MAX_STATES,
- .safe_state_index = 0,
-};
-
-#ifdef CONFIG_PM_SLEEP
-#ifdef CONFIG_SMP
-static int tegra20_reset_sleeping_cpu_1(void)
-{
- int ret = 0;
-
- tegra_pen_lock();
-
- if (readb(tegra20_cpu1_resettable_status) == CPU_RESETTABLE)
- tegra20_cpu_shutdown(1);
- else
- ret = -EINVAL;
-
- tegra_pen_unlock();
-
- return ret;
-}
-
-static void tegra20_wake_cpu1_from_reset(void)
-{
- tegra_pen_lock();
-
- tegra20_cpu_clear_resettable();
-
- /* enable cpu clock on cpu */
- tegra_enable_cpu_clock(1);
-
- /* take the CPU out of reset */
- tegra_cpu_out_of_reset(1);
-
- /* unhalt the cpu */
- flowctrl_write_cpu_halt(1, 0);
-
- tegra_pen_unlock();
-}
-
-static int tegra20_reset_cpu_1(void)
-{
- if (!cpu_online(1) || !tegra20_reset_sleeping_cpu_1())
- return 0;
-
- tegra20_wake_cpu1_from_reset();
- return -EBUSY;
-}
-#else
-static inline void tegra20_wake_cpu1_from_reset(void)
-{
-}
-
-static inline int tegra20_reset_cpu_1(void)
-{
- return 0;
-}
-#endif
-
-static bool tegra20_cpu_cluster_power_down(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- while (tegra20_cpu_is_resettable_soon())
- cpu_relax();
-
- if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready())
- return false;
-
- tegra_idle_lp2_last();
-
- if (cpu_online(1))
- tegra20_wake_cpu1_from_reset();
-
- return true;
-}
-
-#ifdef CONFIG_SMP
-static bool tegra20_idle_enter_lp2_cpu_1(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- cpu_suspend(0, tegra20_sleep_cpu_secondary_finish);
-
- tegra20_cpu_clear_resettable();
-
- return true;
-}
-#else
-static inline bool tegra20_idle_enter_lp2_cpu_1(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- return true;
-}
-#endif
-
-static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- bool entered_lp2 = false;
-
- if (tegra_pending_sgi())
- WRITE_ONCE(abort_flag, true);
-
- cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
-
- if (abort_flag) {
- cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
- abort_flag = false; /* clean flag for next coming */
- return -EINTR;
- }
-
- local_fiq_disable();
-
- tegra_set_cpu_in_lp2();
- cpu_pm_enter();
-
- if (dev->cpu == 0)
- entered_lp2 = tegra20_cpu_cluster_power_down(dev, drv, index);
- else
- entered_lp2 = tegra20_idle_enter_lp2_cpu_1(dev, drv, index);
-
- cpu_pm_exit();
- tegra_clear_cpu_in_lp2();
-
- local_fiq_enable();
-
- smp_rmb();
-
- return entered_lp2 ? index : 0;
-}
-#endif
-
-/*
- * Tegra20 HW appears to have a bug such that PCIe device interrupts, whether
- * they are legacy IRQs or MSI, are lost when LP2 is enabled. To work around
- * this, simply disable LP2 if the PCI driver and DT node are both enabled.
- */
-void tegra20_cpuidle_pcie_irqs_in_use(void)
-{
- pr_info_once(
- "Disabling cpuidle LP2 state, since PCIe IRQs are in use\n");
- cpuidle_driver_state_disabled(&tegra_idle_driver, 1, true);
-}
-
-int __init tegra20_cpuidle_init(void)
-{
- return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
-}
diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c
deleted file mode 100644
index c6128526877d..000000000000
--- a/arch/arm/mach-tegra/cpuidle-tegra30.c
+++ /dev/null
@@ -1,132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CPU idle driver for Tegra CPUs
- *
- * Copyright (c) 2010-2012, NVIDIA Corporation.
- * Copyright (c) 2011 Google, Inc.
- * Author: Colin Cross <ccross@android.com>
- * Gary King <gking@nvidia.com>
- *
- * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com>
- */
-
-#include <linux/clk/tegra.h>
-#include <linux/tick.h>
-#include <linux/cpuidle.h>
-#include <linux/cpu_pm.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/cpuidle.h>
-#include <asm/smp_plat.h>
-#include <asm/suspend.h>
-
-#include "cpuidle.h"
-#include "pm.h"
-#include "sleep.h"
-
-#ifdef CONFIG_PM_SLEEP
-static int tegra30_idle_lp2(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
-#endif
-
-static struct cpuidle_driver tegra_idle_driver = {
- .name = "tegra_idle",
- .owner = THIS_MODULE,
-#ifdef CONFIG_PM_SLEEP
- .state_count = 2,
-#else
- .state_count = 1,
-#endif
- .states = {
- [0] = ARM_CPUIDLE_WFI_STATE_PWR(600),
-#ifdef CONFIG_PM_SLEEP
- [1] = {
- .enter = tegra30_idle_lp2,
- .exit_latency = 2000,
- .target_residency = 2200,
- .power_usage = 0,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
- .name = "powered-down",
- .desc = "CPU power gated",
- },
-#endif
- },
-};
-
-#ifdef CONFIG_PM_SLEEP
-static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- /* All CPUs entering LP2 is not working.
- * Don't let CPU0 enter LP2 when any secondary CPU is online.
- */
- if (num_online_cpus() > 1 || !tegra_cpu_rail_off_ready()) {
- cpu_do_idle();
- return false;
- }
-
- tegra_idle_lp2_last();
-
- return true;
-}
-
-#ifdef CONFIG_SMP
-static bool tegra30_cpu_core_power_down(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- smp_wmb();
-
- cpu_suspend(0, tegra30_sleep_cpu_secondary_finish);
-
- return true;
-}
-#else
-static inline bool tegra30_cpu_core_power_down(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- return true;
-}
-#endif
-
-static int tegra30_idle_lp2(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- bool entered_lp2 = false;
- bool last_cpu;
-
- local_fiq_disable();
-
- last_cpu = tegra_set_cpu_in_lp2();
- cpu_pm_enter();
-
- if (dev->cpu == 0) {
- if (last_cpu)
- entered_lp2 = tegra30_cpu_cluster_power_down(dev, drv,
- index);
- else
- cpu_do_idle();
- } else {
- entered_lp2 = tegra30_cpu_core_power_down(dev, drv, index);
- }
-
- cpu_pm_exit();
- tegra_clear_cpu_in_lp2();
-
- local_fiq_enable();
-
- smp_rmb();
-
- return (entered_lp2) ? index : 0;
-}
-#endif
-
-int __init tegra30_cpuidle_init(void)
-{
- return cpuidle_register(&tegra_idle_driver, NULL);
-}
diff --git a/arch/arm/mach-tegra/cpuidle.c b/arch/arm/mach-tegra/cpuidle.c
deleted file mode 100644
index d565c44cfc93..000000000000
--- a/arch/arm/mach-tegra/cpuidle.c
+++ /dev/null
@@ -1,50 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/mach-tegra/cpuidle.c
- *
- * CPU idle driver for Tegra CPUs
- *
- * Copyright (c) 2010-2012, NVIDIA Corporation.
- * Copyright (c) 2011 Google, Inc.
- * Author: Colin Cross <ccross@android.com>
- * Gary King <gking@nvidia.com>
- *
- * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <soc/tegra/fuse.h>
-
-#include "cpuidle.h"
-
-void __init tegra_cpuidle_init(void)
-{
- switch (tegra_get_chip_id()) {
- case TEGRA20:
- if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
- tegra20_cpuidle_init();
- break;
- case TEGRA30:
- if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC))
- tegra30_cpuidle_init();
- break;
- case TEGRA114:
- case TEGRA124:
- if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
- IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
- tegra114_cpuidle_init();
- break;
- }
-}
-
-void tegra_cpuidle_pcie_irqs_in_use(void)
-{
- switch (tegra_get_chip_id()) {
- case TEGRA20:
- if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
- tegra20_cpuidle_pcie_irqs_in_use();
- break;
- }
-}
diff --git a/arch/arm/mach-tegra/cpuidle.h b/arch/arm/mach-tegra/cpuidle.h
deleted file mode 100644
index 4e1f459f5bd8..000000000000
--- a/arch/arm/mach-tegra/cpuidle.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012, NVIDIA Corporation. All rights reserved.
- */
-
-#ifndef __MACH_TEGRA_CPUIDLE_H
-#define __MACH_TEGRA_CPUIDLE_H
-
-#ifdef CONFIG_CPU_IDLE
-int tegra20_cpuidle_init(void);
-void tegra20_cpuidle_pcie_irqs_in_use(void);
-int tegra30_cpuidle_init(void);
-int tegra114_cpuidle_init(void);
-void tegra_cpuidle_init(void);
-void tegra_cpuidle_pcie_irqs_in_use(void);
-#else
-static inline void tegra_cpuidle_init(void) {}
-static inline void tegra_cpuidle_pcie_irqs_in_use(void) {}
-#endif
-
-#endif
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index ace7a390b5fe..4e1ee70b2a3f 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -18,9 +18,10 @@
#include <linux/of.h>
#include <linux/syscore_ops.h>
+#include <soc/tegra/irq.h>
+
#include "board.h"
#include "iomap.h"
-#include "irq.h"
#define SGI_MASK 0xFFFF
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 3cab81b82866..d1e1a61b12cf 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -110,7 +110,7 @@ static void suspend_cpu_complex(void)
flowctrl_cpu_suspend_enter(cpu);
}
-void tegra_clear_cpu_in_lp2(void)
+void tegra_pm_clear_cpu_in_lp2(void)
{
int phy_cpu_id = cpu_logical_map(smp_processor_id());
u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
@@ -123,11 +123,9 @@ void tegra_clear_cpu_in_lp2(void)
spin_unlock(&tegra_lp2_lock);
}
-bool tegra_set_cpu_in_lp2(void)
+void tegra_pm_set_cpu_in_lp2(void)
{
int phy_cpu_id = cpu_logical_map(smp_processor_id());
- bool last_cpu = false;
- cpumask_t *cpu_lp2_mask = tegra_cpu_lp2_mask;
u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
spin_lock(&tegra_lp2_lock);
@@ -135,22 +133,15 @@ bool tegra_set_cpu_in_lp2(void)
BUG_ON((*cpu_in_lp2 & BIT(phy_cpu_id)));
*cpu_in_lp2 |= BIT(phy_cpu_id);
- if ((phy_cpu_id == 0) && cpumask_equal(cpu_lp2_mask, cpu_online_mask))
- last_cpu = true;
- else if (tegra_get_chip_id() == TEGRA20 && phy_cpu_id == 1)
- tegra20_cpu_set_resettable_soon();
-
spin_unlock(&tegra_lp2_lock);
- return last_cpu;
-}
-
-int tegra_cpu_do_idle(void)
-{
- return cpu_do_idle();
}
static int tegra_sleep_cpu(unsigned long v2p)
{
+ if (tegra_cpu_car_ops->rail_off_ready &&
+ WARN_ON(!tegra_cpu_rail_off_ready()))
+ return -EBUSY;
+
/*
* L2 cache disabling using kernel API only allowed when all
* secondary CPU's are offline. Cache have to be disabled with
@@ -159,9 +150,10 @@ static int tegra_sleep_cpu(unsigned long v2p)
* if any of secondary CPU's is online and this is the LP2-idle
* code-path only for Tegra20/30.
*/
- if (trusted_foundations_registered())
- outer_disable();
-
+#ifdef CONFIG_OUTER_CACHE
+ if (trusted_foundations_registered() && outer_cache.disable)
+ outer_cache.disable();
+#endif
/*
* Note that besides of setting up CPU reset vector this firmware
* call may also do the following, depending on the FW version:
@@ -202,14 +194,16 @@ static void tegra_pm_set(enum tegra_suspend_mode mode)
tegra_pmc_enter_suspend_mode(mode);
}
-void tegra_idle_lp2_last(void)
+int tegra_pm_enter_lp2(void)
{
+ int err;
+
tegra_pm_set(TEGRA_SUSPEND_LP2);
cpu_cluster_pm_enter();
suspend_cpu_complex();
- cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu);
+ err = cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu);
/*
* Resume L2 cache if it wasn't re-enabled early during resume,
@@ -221,6 +215,8 @@ void tegra_idle_lp2_last(void)
restore_cpu_complex();
cpu_cluster_pm_exit();
+
+ return err;
}
enum tegra_suspend_mode tegra_pm_validate_suspend_mode(
@@ -365,7 +361,7 @@ static int tegra_suspend_enter(suspend_state_t state)
tegra_suspend_enter_lp1();
break;
case TEGRA_SUSPEND_LP2:
- tegra_set_cpu_in_lp2();
+ tegra_pm_set_cpu_in_lp2();
break;
default:
break;
@@ -386,7 +382,7 @@ static int tegra_suspend_enter(suspend_state_t state)
tegra_suspend_exit_lp1();
break;
case TEGRA_SUSPEND_LP2:
- tegra_clear_cpu_in_lp2();
+ tegra_pm_clear_cpu_in_lp2();
break;
default:
break;
@@ -436,4 +432,18 @@ void __init tegra_init_suspend(void)
suspend_set_ops(&tegra_suspend_ops);
}
+
+int tegra_pm_park_secondary_cpu(unsigned long cpu)
+{
+ if (cpu > 0) {
+ tegra_disable_clean_inv_dcache(TEGRA_FLUSH_CACHE_LOUIS);
+
+ if (tegra_get_chip_id() == TEGRA20)
+ tegra20_hotplug_shutdown();
+ else
+ tegra30_hotplug_shutdown();
+ }
+
+ return -EINVAL;
+}
#endif
diff --git a/arch/arm/mach-tegra/pm.h b/arch/arm/mach-tegra/pm.h
index 569151b3edc0..81525f5f4a44 100644
--- a/arch/arm/mach-tegra/pm.h
+++ b/arch/arm/mach-tegra/pm.h
@@ -23,10 +23,6 @@ void tegra20_sleep_core_init(void);
void tegra30_lp1_iram_hook(void);
void tegra30_sleep_core_init(void);
-void tegra_clear_cpu_in_lp2(void);
-bool tegra_set_cpu_in_lp2(void);
-int tegra_cpu_do_idle(void);
-void tegra_idle_lp2_last(void);
extern void (*tegra_tear_down_cpu)(void);
#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index e3f34815c9da..53123ae4ac3b 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -183,17 +183,6 @@ after_errata:
bleq __die @ CPU not present (to OS)
#endif
-#ifdef CONFIG_ARCH_TEGRA_2x_SOC
- /* Are we on Tegra20? */
- cmp r6, #TEGRA20
- bne 1f
- /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
- mov r0, #CPU_NOT_RESETTABLE
- cmp r10, #0
- strbne r0, [r12, #RESET_DATA(RESETTABLE_STATUS)]
-1:
-#endif
-
/* Waking up from LP1? */
ldr r8, [r12, #RESET_DATA(MASK_LP1)]
tst r8, r11 @ if in_lp1
diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
index a4cfc08159f6..51265592cb1a 100644
--- a/arch/arm/mach-tegra/reset.h
+++ b/arch/arm/mach-tegra/reset.h
@@ -16,9 +16,8 @@
#define TEGRA_RESET_STARTUP_SECONDARY 3
#define TEGRA_RESET_STARTUP_LP2 4
#define TEGRA_RESET_STARTUP_LP1 5
-#define TEGRA_RESET_RESETTABLE_STATUS 6
-#define TEGRA_RESET_TF_PRESENT 7
-#define TEGRA_RESET_DATA_SIZE 8
+#define TEGRA_RESET_TF_PRESENT 6
+#define TEGRA_RESET_DATA_SIZE 7
#define RESET_DATA(x) ((TEGRA_RESET_##x)*4)
@@ -42,10 +41,6 @@ void __tegra_cpu_reset_handler_end(void);
(IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP2] - \
(u32)__tegra_cpu_reset_handler_start)))
-#define tegra20_cpu1_resettable_status \
- (IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
- ((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_RESETTABLE_STATUS] - \
- (u32)__tegra_cpu_reset_handler_start)))
#endif
#define tegra_cpu_reset_handler_offset \
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index 9a89f30d53ca..0e00ba8cf646 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -43,9 +43,6 @@
#define APB_MISC_XM2CFGCPADCTRL2 0x8e4
#define APB_MISC_XM2CFGDPADCTRL2 0x8e8
-#define __tegra20_cpu1_resettable_status_offset \
- (__tegra_cpu_reset_handler_data_offset + RESET_DATA(RESETTABLE_STATUS))
-
.macro pll_enable, rd, r_car_base, pll_base
ldr \rd, [\r_car_base, #\pll_base]
tst \rd, #(1 << 30)
@@ -90,10 +87,6 @@ ENDPROC(tegra20_hotplug_shutdown)
ENTRY(tegra20_cpu_shutdown)
cmp r0, #0
reteq lr @ must not be called for CPU 0
- mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
- ldr r2, =__tegra20_cpu1_resettable_status_offset
- mov r12, #CPU_RESETTABLE
- strb r12, [r1, r2]
cpu_to_halt_reg r1, r0
ldr r3, =TEGRA_FLOW_CTRL_VIRT
@@ -117,107 +110,6 @@ ENDPROC(tegra20_cpu_shutdown)
#ifdef CONFIG_PM_SLEEP
/*
- * tegra_pen_lock
- *
- * spinlock implementation with no atomic test-and-set and no coherence
- * using Peterson's algorithm on strongly-ordered registers
- * used to synchronize a cpu waking up from wfi with entering lp2 on idle
- *
- * The reference link of Peterson's algorithm:
- * http://en.wikipedia.org/wiki/Peterson's_algorithm
- *
- * SCRATCH37 = r1 = !turn (inverted from Peterson's algorithm)
- * on cpu 0:
- * r2 = flag[0] (in SCRATCH38)
- * r3 = flag[1] (in SCRATCH39)
- * on cpu1:
- * r2 = flag[1] (in SCRATCH39)
- * r3 = flag[0] (in SCRATCH38)
- *
- * must be called with MMU on
- * corrupts r0-r3, r12
- */
-ENTRY(tegra_pen_lock)
- mov32 r3, TEGRA_PMC_VIRT
- cpu_id r0
- add r1, r3, #PMC_SCRATCH37
- cmp r0, #0
- addeq r2, r3, #PMC_SCRATCH38
- addeq r3, r3, #PMC_SCRATCH39
- addne r2, r3, #PMC_SCRATCH39
- addne r3, r3, #PMC_SCRATCH38
-
- mov r12, #1
- str r12, [r2] @ flag[cpu] = 1
- dsb
- str r12, [r1] @ !turn = cpu
-1: dsb
- ldr r12, [r3]
- cmp r12, #1 @ flag[!cpu] == 1?
- ldreq r12, [r1]
- cmpeq r12, r0 @ !turn == cpu?
- beq 1b @ while !turn == cpu && flag[!cpu] == 1
-
- ret lr @ locked
-ENDPROC(tegra_pen_lock)
-
-ENTRY(tegra_pen_unlock)
- dsb
- mov32 r3, TEGRA_PMC_VIRT
- cpu_id r0
- cmp r0, #0
- addeq r2, r3, #PMC_SCRATCH38
- addne r2, r3, #PMC_SCRATCH39
- mov r12, #0
- str r12, [r2]
- ret lr
-ENDPROC(tegra_pen_unlock)
-
-/*
- * tegra20_cpu_clear_resettable(void)
- *
- * Called to clear the "resettable soon" flag in IRAM variable when
- * it is expected that the secondary CPU will be idle soon.
- */
-ENTRY(tegra20_cpu_clear_resettable)
- mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
- ldr r2, =__tegra20_cpu1_resettable_status_offset
- mov r12, #CPU_NOT_RESETTABLE
- strb r12, [r1, r2]
- ret lr
-ENDPROC(tegra20_cpu_clear_resettable)
-
-/*
- * tegra20_cpu_set_resettable_soon(void)
- *
- * Called to set the "resettable soon" flag in IRAM variable when
- * it is expected that the secondary CPU will be idle soon.
- */
-ENTRY(tegra20_cpu_set_resettable_soon)
- mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
- ldr r2, =__tegra20_cpu1_resettable_status_offset
- mov r12, #CPU_RESETTABLE_SOON
- strb r12, [r1, r2]
- ret lr
-ENDPROC(tegra20_cpu_set_resettable_soon)
-
-/*
- * tegra20_cpu_is_resettable_soon(void)
- *
- * Returns true if the "resettable soon" flag in IRAM variable has been
- * set because it is expected that the secondary CPU will be idle soon.
- */
-ENTRY(tegra20_cpu_is_resettable_soon)
- mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
- ldr r2, =__tegra20_cpu1_resettable_status_offset
- ldrb r12, [r1, r2]
- cmp r12, #CPU_RESETTABLE_SOON
- moveq r0, #1
- movne r0, #0
- ret lr
-ENDPROC(tegra20_cpu_is_resettable_soon)
-
-/*
* tegra20_sleep_core_finish(unsigned long v2p)
*
* Enters suspend in LP0 or LP1 by turning off the mmu and jumping to
@@ -243,68 +135,6 @@ ENTRY(tegra20_sleep_core_finish)
ENDPROC(tegra20_sleep_core_finish)
/*
- * tegra20_sleep_cpu_secondary_finish(unsigned long v2p)
- *
- * Enters WFI on secondary CPU by exiting coherency.
- */
-ENTRY(tegra20_sleep_cpu_secondary_finish)
- stmfd sp!, {r4-r11, lr}
-
- mrc p15, 0, r11, c1, c0, 1 @ save actlr before exiting coherency
-
- /* Flush and disable the L1 data cache */
- mov r0, #TEGRA_FLUSH_CACHE_LOUIS
- bl tegra_disable_clean_inv_dcache
-
- mov32 r0, TEGRA_IRAM_RESET_BASE_VIRT
- ldr r4, =__tegra20_cpu1_resettable_status_offset
- mov r3, #CPU_RESETTABLE
- strb r3, [r0, r4]
-
- bl tegra_cpu_do_idle
-
- /*
- * cpu may be reset while in wfi, which will return through
- * tegra_resume to cpu_resume
- * or interrupt may wake wfi, which will return here
- * cpu state is unchanged - MMU is on, cache is on, coherency
- * is off, and the data cache is off
- *
- * r11 contains the original actlr
- */
-
- bl tegra_pen_lock
-
- mov32 r0, TEGRA_IRAM_RESET_BASE_VIRT
- ldr r4, =__tegra20_cpu1_resettable_status_offset
- mov r3, #CPU_NOT_RESETTABLE
- strb r3, [r0, r4]
-
- bl tegra_pen_unlock
-
- /* Re-enable the data cache */
- mrc p15, 0, r10, c1, c0, 0
- orr r10, r10, #CR_C
- mcr p15, 0, r10, c1, c0, 0
- isb
-
- mcr p15, 0, r11, c1, c0, 1 @ reenable coherency
-
- /* Invalidate the TLBs & BTAC */
- mov r1, #0
- mcr p15, 0, r1, c8, c3, 0 @ invalidate shared TLBs
- mcr p15, 0, r1, c7, c1, 6 @ invalidate shared BTAC
- dsb
- isb
-
- /* the cpu was running with coherency disabled,
- * caches may be out of date */
- bl v7_flush_kern_cache_louis
-
- ldmfd sp!, {r4 - r11, pc}
-ENDPROC(tegra20_sleep_cpu_secondary_finish)
-
-/*
* tegra20_tear_down_cpu
*
* Switches the CPU cluster to PLL-P and enters sleep.
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index 02cc6ff96f30..e7bcf7dc4675 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -265,11 +265,11 @@ ENTRY(tegra30_sleep_core_finish)
ENDPROC(tegra30_sleep_core_finish)
/*
- * tegra30_sleep_cpu_secondary_finish(unsigned long v2p)
+ * tegra30_pm_secondary_cpu_suspend(unsigned long unused_arg)
*
* Enters LP2 on secondary CPU by exiting coherency and powergating the CPU.
*/
-ENTRY(tegra30_sleep_cpu_secondary_finish)
+ENTRY(tegra30_pm_secondary_cpu_suspend)
mov r7, lr
/* Flush and disable the L1 data cache */
@@ -281,7 +281,7 @@ ENTRY(tegra30_sleep_cpu_secondary_finish)
bl tegra30_cpu_shutdown
mov r0, #1 @ never return here
ret r7
-ENDPROC(tegra30_sleep_cpu_secondary_finish)
+ENDPROC(tegra30_pm_secondary_cpu_suspend)
/*
* tegra30_tear_down_cpu
diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h
index 78ef32a907c8..4718a3cb45a1 100644
--- a/arch/arm/mach-tegra/sleep.h
+++ b/arch/arm/mach-tegra/sleep.h
@@ -114,29 +114,14 @@
.endm
#else
-void tegra_pen_lock(void);
-void tegra_pen_unlock(void);
void tegra_resume(void);
int tegra_sleep_cpu_finish(unsigned long);
void tegra_disable_clean_inv_dcache(u32 flag);
-#ifdef CONFIG_HOTPLUG_CPU
void tegra20_hotplug_shutdown(void);
void tegra30_hotplug_shutdown(void);
-#endif
-
-void tegra20_cpu_shutdown(int cpu);
-int tegra20_cpu_is_resettable_soon(void);
-void tegra20_cpu_clear_resettable(void);
-#ifdef CONFIG_ARCH_TEGRA_2x_SOC
-void tegra20_cpu_set_resettable_soon(void);
-#else
-static inline void tegra20_cpu_set_resettable_soon(void) {}
-#endif
-int tegra20_sleep_cpu_secondary_finish(unsigned long);
void tegra20_tear_down_cpu(void);
-int tegra30_sleep_cpu_secondary_finish(unsigned long);
void tegra30_tear_down_cpu(void);
#endif
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index e512e606eabd..f1ce2857a251 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -36,13 +36,12 @@
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach-types.h>
+#include <asm/psci.h>
#include <asm/setup.h>
#include "board.h"
#include "common.h"
-#include "cpuidle.h"
#include "iomap.h"
-#include "irq.h"
#include "pm.h"
#include "reset.h"
#include "sleep.h"
@@ -86,7 +85,6 @@ static void __init tegra_dt_init(void)
static void __init tegra_dt_init_late(void)
{
tegra_init_suspend();
- tegra_cpuidle_init();
if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) &&
of_machine_is_compatible("compal,paz00"))
@@ -95,6 +93,9 @@ static void __init tegra_dt_init_late(void)
if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) &&
of_machine_is_compatible("nvidia,tegra20"))
platform_device_register_simple("tegra20-cpufreq", -1, NULL, 0);
+
+ if (IS_ENABLED(CONFIG_ARM_TEGRA_CPUIDLE) && !psci_smp_available())
+ platform_device_register_simple("tegra-cpuidle", -1, NULL, 0);
}
static const char * const tegra_dt_board_compat[] = {
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 3a4248fd7962..a9dd2f71cd19 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -12,10 +12,10 @@
#include <linux/cpumask.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/clk-provider.h>
#include <linux/clk/zynq.h>
#include <linux/clocksource.h>
#include <linux/of_address.h>
+#include <linux/of_clk.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of.h>
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 9414d72f664b..8a8949174b1c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
}
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ */
+static int arm_dma_supported(struct device *dev, u64 mask)
+{
+ unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ return dma_to_pfn(dev, mask) >= max_dma_pfn;
+}
+
const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -219,49 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = {
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
-static int __dma_supported(struct device *dev, u64 mask, bool warn)
-{
- unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
-
- /*
- * Translate the device's DMA mask to a PFN limit. This
- * PFN number includes the page which we can DMA to.
- */
- if (dma_to_pfn(dev, mask) < max_dma_pfn) {
- if (warn)
- dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
- mask,
- dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
- max_dma_pfn + 1);
- return 0;
- }
-
- return 1;
-}
-
-static u64 get_coherent_dma_mask(struct device *dev)
-{
- u64 mask = (u64)DMA_BIT_MASK(32);
-
- if (dev) {
- mask = dev->coherent_dma_mask;
-
- /*
- * Sanity check the DMA mask - it must be non-zero, and
- * must be able to be satisfied by a DMA allocation.
- */
- if (mask == 0) {
- dev_warn(dev, "coherent DMA mask is unset\n");
- return 0;
- }
-
- if (!__dma_supported(dev, mask, true))
- return 0;
- }
-
- return mask;
-}
-
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
/*
@@ -688,7 +662,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent,
unsigned long attrs, const void *caller)
{
- u64 mask = get_coherent_dma_mask(dev);
+ u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
struct page *page = NULL;
void *addr;
bool allowblock, cma;
@@ -712,9 +686,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
}
#endif
- if (!mask)
- return NULL;
-
buf = kzalloc(sizeof(*buf),
gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
if (!buf)
@@ -1087,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
dir);
}
-/*
- * Return whether the given device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- */
-int arm_dma_supported(struct device *dev, u64 mask)
-{
- return __dma_supported(dev, mask, false);
-}
-
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index bd0f4821f7e1..b598e6978b29 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -241,7 +241,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
struct mm_struct *mm;
int sig, code;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
if (kprobe_page_fault(regs, fsr))
return 0;
@@ -295,7 +295,7 @@ retry:
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return 0;
@@ -319,9 +319,6 @@ retry:
regs, addr);
}
if (fault & VM_FAULT_RETRY) {
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry;
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 5d0d0f86e790..69a337df619f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -63,9 +63,6 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
pgprot_t pgprot_kernel;
-pgprot_t pgprot_hyp_device;
-pgprot_t pgprot_s2;
-pgprot_t pgprot_s2_device;
EXPORT_SYMBOL(pgprot_user);
EXPORT_SYMBOL(pgprot_kernel);
@@ -75,15 +72,8 @@ struct cachepolicy {
unsigned int cr_mask;
pmdval_t pmd;
pteval_t pte;
- pteval_t pte_s2;
};
-#ifdef CONFIG_ARM_LPAE
-#define s2_policy(policy) policy
-#else
-#define s2_policy(policy) 0
-#endif
-
unsigned long kimage_voffset __ro_after_init;
static struct cachepolicy cache_policies[] __initdata = {
@@ -92,31 +82,26 @@ static struct cachepolicy cache_policies[] __initdata = {
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
.pte = L_PTE_MT_UNCACHED,
- .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
.pte = L_PTE_MT_BUFFERABLE,
- .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
.pte = L_PTE_MT_WRITETHROUGH,
- .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
.pte = L_PTE_MT_WRITEBACK,
- .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
.pte = L_PTE_MT_WRITEALLOC,
- .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
}
};
@@ -246,9 +231,6 @@ static struct mem_type mem_types[] __ro_after_init = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
- .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
- s2_policy(L_PTE_S2_MT_DEV_SHARED) |
- L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
@@ -434,7 +416,6 @@ static void __init build_mem_type_table(void)
struct cachepolicy *cp;
unsigned int cr = get_cr();
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
- pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -558,9 +539,6 @@ static void __init build_mem_type_table(void)
*/
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
- s2_pgprot = cp->pte_s2;
- hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
- s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
#ifndef CONFIG_ARM_LPAE
/*
@@ -604,7 +582,6 @@ static void __init build_mem_type_table(void)
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
- s2_pgprot |= L_PTE_SHARED;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -666,9 +643,6 @@ static void __init build_mem_type_table(void)
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | kern_pgprot);
- pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
- pgprot_s2_device = __pgprot(s2_device_pgprot);
- pgprot_hyp_device = __pgprot(hyp_device_pgprot);
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c
index ffb93db68e9c..509d4824dc1c 100644
--- a/arch/arm/plat-orion/time.c
+++ b/arch/arm/plat-orion/time.c
@@ -177,12 +177,6 @@ static irqreturn_t orion_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irqaction orion_timer_irq = {
- .name = "orion_tick",
- .flags = IRQF_TIMER,
- .handler = orion_timer_interrupt
-};
-
void __init
orion_time_set_base(void __iomem *_timer_base)
{
@@ -236,7 +230,9 @@ orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask,
/*
* Setup clockevent timer (interrupt-driven).
*/
- setup_irq(irq, &orion_timer_irq);
+ if (request_irq(irq, orion_timer_interrupt, IRQF_TIMER, "orion_tick",
+ NULL))
+ pr_err("Failed to request irq %u (orion_tick)\n", irq);
orion_clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&orion_clkevt, tclk, 1, 0xfffffffe);
}
diff --git a/arch/arm/vdso/.gitignore b/arch/arm/vdso/.gitignore
index 6b47f6e0b032..dfa06f5365cf 100644
--- a/arch/arm/vdso/.gitignore
+++ b/arch/arm/vdso/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
vdso.so.raw
vdsomunge
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 76778b2f520d..55d70cfe0f9e 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -39,7 +39,6 @@ config ARCH_BCM2835
select ARM_AMBA
select ARM_GIC
select ARM_TIMER_SP804
- select HAVE_ARM_ARCH_TIMER
help
This enables support for the Broadcom BCM2837 and BCM2711 SoC.
These SoCs are used in the Raspberry Pi 3 and 4 devices.
@@ -301,7 +300,6 @@ config ARCH_ZX
config ARCH_ZYNQMP
bool "Xilinx ZynqMP Family"
- select ZYNQMP_FIRMWARE
help
This enables support for Xilinx ZynqMP Family
diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore
index 8dab0bb6ae66..9a7a9009d43a 100644
--- a/arch/arm64/boot/.gitignore
+++ b/arch/arm64/boot/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
Image
Image.gz
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index cf4f78617c3f..e4d3cd0ac5bb 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -9,6 +9,9 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-orangepi-win.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pine64-lts.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pine64-plus.dtb sun50i-a64-pine64.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinebook.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinephone-1.0.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinephone-1.1.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinetab.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-sopine-baseboard.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-teres-i.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-bananapi-m2-plus.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
index 6dfafa1c879b..5fa9ca0191a8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
@@ -158,6 +158,17 @@
*/
};
+&pio {
+ vcc-pa-supply = <&reg_dcdc1>;
+ vcc-pb-supply = <&reg_dcdc1>;
+ vcc-pc-supply = <&reg_dcdc1>;
+ vcc-pd-supply = <&reg_dcdc1>;
+ vcc-pe-supply = <&reg_aldo1>;
+ vcc-pf-supply = <&reg_dcdc1>;
+ vcc-pg-supply = <&reg_dldo4>;
+ vcc-ph-supply = <&reg_dcdc1>;
+};
+
&r_rsb {
status = "okay";
@@ -170,6 +181,12 @@
};
};
+/* VCC-PL is powered by aldo2 but we cannot add it as the RSB */
+/* interface used to talk to the PMIC in on the PL pins */
+/* &r_pio { */
+/* vcc-pl-supply = <&reg_aldo2>; */
+/* }; */
+
#include "axp803.dtsi"
&ac_power_supply {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 3d894b208901..64b1c54f87c0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -21,32 +21,17 @@
ethernet0 = &rtl8723cs;
};
- vdd_bl: regulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "bl-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
- enable-active-high;
- };
-
backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pwm 0 50000 0>;
brightness-levels = <0 5 10 15 20 30 40 55 70 85 100>;
default-brightness-level = <2>;
enable-gpios = <&pio 3 23 GPIO_ACTIVE_HIGH>; /* PD23 */
- power-supply = <&vdd_bl>;
+ power-supply = <&reg_vbklt>;
};
chosen {
stdout-path = "serial0:115200n8";
-
- framebuffer-lcd {
- panel-supply = <&reg_dc1sw>;
- dvdd25-supply = <&reg_dldo2>;
- dvdd12-supply = <&reg_fldo1>;
- };
};
gpio_keys {
@@ -63,11 +48,34 @@
};
};
- reg_vcc3v3: vcc3v3 {
+ panel_edp: panel-edp {
+ compatible = "neweast,wjfh116008a";
+ backlight = <&backlight>;
+ power-supply = <&reg_dc1sw>;
+
+ port {
+ panel_edp_in: endpoint {
+ remote-endpoint = <&anx6345_out_edp>;
+ };
+ };
+ };
+
+ reg_vbklt: vbklt {
+ compatible = "regulator-fixed";
+ regulator-name = "vbklt";
+ regulator-min-microvolt = <18000000>;
+ regulator-max-microvolt = <18000000>;
+ gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
+ enable-active-high;
+ };
+
+ reg_vcc5v0: vcc5v0 {
compatible = "regulator-fixed";
- regulator-name = "vcc3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&axp_gpio 0 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
};
wifi_pwrseq: wifi_pwrseq {
@@ -77,12 +85,7 @@
speaker_amp: audio-amplifier {
compatible = "simple-audio-amplifier";
- /*
- * TODO This is actually a fixed regulator controlled by
- * the GPIO line on the PMIC. This should be corrected
- * once GPIO support is added for this PMIC.
- */
- VCC-supply = <&reg_ldo_io0>;
+ VCC-supply = <&reg_vcc5v0>;
enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
sound-name-prefix = "Speaker Amp";
};
@@ -118,6 +121,10 @@
status = "okay";
};
+&de {
+ status = "okay";
+};
+
&ehci0 {
phys = <&usbphy 0>;
phy-names = "usb";
@@ -128,11 +135,15 @@
status = "okay";
};
+&mixer0 {
+ status = "okay";
+};
+
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_dcdc1>;
- cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
disable-wp;
bus-width = <4>;
status = "okay";
@@ -175,10 +186,61 @@
status = "okay";
};
+&pio {
+ vcc-pc-supply = <&reg_eldo1>;
+ vcc-pd-supply = <&reg_dcdc1>;
+ vcc-pe-supply = <&reg_aldo1>;
+ vcc-pg-supply = <&reg_eldo1>;
+};
+
&pwm {
status = "okay";
};
+&r_i2c {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&r_i2c_pl89_pins>;
+ status = "okay";
+
+ anx6345: anx6345@38 {
+ compatible = "analogix,anx6345";
+ reg = <0x38>;
+ reset-gpios = <&pio 3 24 GPIO_ACTIVE_LOW>; /* PD24 */
+ dvdd25-supply = <&reg_dldo2>;
+ dvdd12-supply = <&reg_fldo1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ anx6345_in: port@0 {
+ reg = <0>;
+ anx6345_in_tcon0: endpoint {
+ remote-endpoint = <&tcon0_out_anx6345>;
+ };
+ };
+
+ anx6345_out: port@1 {
+ reg = <1>;
+ anx6345_out_edp: endpoint {
+ remote-endpoint = <&panel_edp_in>;
+ };
+ };
+ };
+ };
+};
+
+&r_pio {
+ /*
+ * FIXME: We can't add that supply for now since it would
+ * create a circular dependency between pinctrl, the regulator
+ * and the RSB Bus.
+ *
+ * vcc-pl-supply = <&reg_aldo2>;
+ */
+};
+
&r_rsb {
status = "okay";
@@ -190,14 +252,6 @@
};
};
-/* The ANX6345 eDP-bridge is on r_i2c */
-&r_i2c {
- clock-frequency = <100000>;
- pinctrl-names = "default";
- pinctrl-0 = <&r_i2c_pl89_pins>;
- status = "okay";
-};
-
#include "axp803.dtsi"
&ac_power_supply {
@@ -209,9 +263,7 @@
};
&reg_aldo1 {
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-name = "vcc-csi";
+ regulator-name = "vcc-pe";
};
&reg_aldo2 {
@@ -274,12 +326,6 @@
regulator-name = "vcc-edp";
};
-&reg_dldo3 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "avdd-csi";
-};
-
&reg_dldo4 {
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -293,12 +339,6 @@
regulator-name = "cpvdd";
};
-&reg_eldo3 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-name = "vdd-1v8-csi";
-};
-
&reg_fldo1 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
@@ -312,17 +352,16 @@
regulator-name = "vdd-cpus";
};
-&reg_ldo_io0 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-usb";
- status = "okay";
-};
-
&reg_rtc_ldo {
regulator-name = "vcc-rtc";
};
+&simplefb_lcd {
+ panel-supply = <&reg_dc1sw>;
+ dvdd25-supply = <&reg_dldo2>;
+ dvdd12-supply = <&reg_fldo1>;
+};
+
&simplefb_hdmi {
vcc-hdmi-supply = <&reg_dldo1>;
};
@@ -350,6 +389,19 @@
"MIC2", "Internal Microphone Right";
};
+&tcon0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_rgb666_pins>;
+
+ status = "okay";
+};
+
+&tcon0_out {
+ tcon0_out_anx6345: endpoint {
+ remote-endpoint = <&anx6345_in_tcon0>;
+ };
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pb_pins>;
@@ -361,7 +413,7 @@
};
&usbphy {
- usb0_vbus-supply = <&reg_ldo_io0>;
- usb1_vbus-supply = <&reg_ldo_io0>;
+ usb0_vbus-supply = <&reg_vcc5v0>;
+ usb1_vbus-supply = <&reg_vcc5v0>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts
new file mode 100644
index 000000000000..0c42272106af
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2020 Ondrej Jirman <megous@megous.com>
+
+/dts-v1/;
+
+#include "sun50i-a64-pinephone.dtsi"
+
+/ {
+ model = "Pine64 PinePhone Developer Batch (1.0)";
+ compatible = "pine64,pinephone-1.0", "allwinner,sun50i-a64";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
new file mode 100644
index 000000000000..06a775c41664
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2020 Ondrej Jirman <megous@megous.com>
+
+/dts-v1/;
+
+#include "sun50i-a64-pinephone.dtsi"
+
+/ {
+ model = "Pine64 PinePhone Braveheart (1.1)";
+ compatible = "pine64,pinephone-1.1", "allwinner,sun50i-a64";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
new file mode 100644
index 000000000000..cefda145c3c9
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2019 Icenowy Zheng <icenowy@aosc.xyz>
+// Copyright (C) 2020 Martijn Braam <martijn@brixit.nl>
+// Copyright (C) 2020 Ondrej Jirman <megous@megous.com>
+
+#include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ blue {
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&pio 3 20 GPIO_ACTIVE_HIGH>; /* PD20 */
+ };
+
+ green {
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pio 3 18 GPIO_ACTIVE_HIGH>; /* PD18 */
+ };
+
+ red {
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&pio 3 19 GPIO_ACTIVE_HIGH>; /* PD19 */
+ };
+ };
+
+ speaker_amp: audio-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&pio 2 7 GPIO_ACTIVE_HIGH>; /* PC7 */
+ sound-name-prefix = "Speaker Amp";
+ };
+
+ vibrator {
+ compatible = "gpio-vibrator";
+ enable-gpios = <&pio 3 2 GPIO_ACTIVE_HIGH>; /* PD2 */
+ vcc-supply = <&reg_dcdc1>;
+ };
+};
+
+&codec {
+ status = "okay";
+};
+
+&codec_analog {
+ cpvdd-supply = <&reg_eldo1>;
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&dai {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ /* Magnetometer */
+ lis3mdl@1e {
+ compatible = "st,lis3mdl-magn";
+ reg = <0x1e>;
+ vdd-supply = <&reg_dldo1>;
+ vddio-supply = <&reg_dldo1>;
+ };
+
+ /* Accelerometer/gyroscope */
+ mpu6050@68 {
+ compatible = "invensense,mpu6050";
+ reg = <0x68>;
+ interrupt-parent = <&pio>;
+ interrupts = <7 5 IRQ_TYPE_EDGE_RISING>; /* PH5 */
+ vdd-supply = <&reg_dldo1>;
+ vddio-supply = <&reg_dldo1>;
+ };
+};
+
+/* Connected to pogo pins (external spring based pinheader for user addons) */
+&i2c2 {
+ status = "okay";
+};
+
+&lradc {
+ vref-supply = <&reg_aldo3>;
+ status = "okay";
+
+ button-200 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ channel = <0>;
+ voltage = <200000>;
+ };
+
+ button-400 {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ channel = <0>;
+ voltage = <400000>;
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_dcdc1>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ disable-wp;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_dcdc1>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pb-supply = <&reg_dcdc1>;
+ vcc-pc-supply = <&reg_dcdc1>;
+ vcc-pd-supply = <&reg_dcdc1>;
+ vcc-pe-supply = <&reg_aldo1>;
+ vcc-pf-supply = <&reg_dcdc1>;
+ vcc-pg-supply = <&reg_dldo4>;
+ vcc-ph-supply = <&reg_dcdc1>;
+};
+
+&r_pio {
+ /*
+ * FIXME: We can't add that supply for now since it would
+ * create a circular dependency between pinctrl, the regulator
+ * and the RSB Bus.
+ *
+ * vcc-pl-supply = <&reg_aldo2>;
+ */
+};
+
+&r_rsb {
+ status = "okay";
+
+ axp803: pmic@3a3 {
+ compatible = "x-powers,axp803";
+ reg = <0x3a3>;
+ interrupt-parent = <&r_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+#include "axp803.dtsi"
+
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
+&reg_aldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "dovdd-csi";
+};
+
+&reg_aldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pl";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pll-avcc";
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-name = "vdd-cpux";
+};
+
+/* DCDC3 is polyphased with DCDC2 */
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_dcdc6 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-sys";
+};
+
+&reg_dldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-dsi-sensor";
+};
+
+&reg_dldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-mipi-io";
+};
+
+&reg_dldo3 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "avdd-csi";
+};
+
+&reg_dldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi-io";
+};
+
+&reg_eldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-lpddr";
+};
+
+&reg_eldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "dvdd-1v8-csi";
+};
+
+&reg_fldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-1v2-hsic";
+};
+
+&reg_fldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpus";
+};
+
+&reg_ldo_io0 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-lcd-ctp-stk";
+ status = "okay";
+};
+
+&reg_ldo_io1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-1v8-typec";
+ status = "okay";
+};
+
+&reg_rtc_ldo {
+ regulator-name = "vcc-rtc";
+};
+
+&sound {
+ status = "okay";
+ simple-audio-card,aux-devs = <&codec_analog>, <&speaker_amp>;
+ simple-audio-card,widgets = "Microphone", "Headset Microphone",
+ "Microphone", "Internal Microphone",
+ "Headphone", "Headphone Jack",
+ "Speaker", "Internal Earpiece",
+ "Speaker", "Internal Speaker";
+ simple-audio-card,routing =
+ "Headphone Jack", "HP",
+ "Internal Earpiece", "EARPIECE",
+ "Internal Speaker", "Speaker Amp OUTL",
+ "Internal Speaker", "Speaker Amp OUTR",
+ "Speaker Amp INL", "LINEOUT",
+ "Speaker Amp INR", "LINEOUT",
+ "Left DAC", "AIF1 Slot 0 Left",
+ "Right DAC", "AIF1 Slot 0 Right",
+ "AIF1 Slot 0 Left ADC", "Left ADC",
+ "AIF1 Slot 0 Right ADC", "Right ADC",
+ "Internal Microphone", "MBIAS",
+ "MIC1", "Internal Microphone",
+ "Headset Microphone", "HBIAS",
+ "MIC2", "Headset Microphone";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+/* Connected to the modem (hardware flow control can't be used) */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
new file mode 100644
index 000000000000..316e8a443913
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2019 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ */
+
+/dts-v1/;
+
+#include "sun50i-a64.dtsi"
+#include "sun50i-a64-cpu-opp.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ model = "PineTab";
+ compatible = "pine64,pinetab", "allwinner,sun50i-a64";
+
+ aliases {
+ serial0 = &uart0;
+ ethernet0 = &rtl8723cs;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 16 18 20 22 24 26 29 32 35 38 42 46 51 56 62 68 75 83 91 100>;
+ default-brightness-level = <15>;
+ enable-gpios = <&pio 3 23 GPIO_ACTIVE_HIGH>; /* PD23 */
+ power-supply = <&vdd_bl>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ i2c-csi {
+ compatible = "i2c-gpio";
+ sda-gpios = <&pio 4 13 GPIO_ACTIVE_HIGH>; /* PE13 */
+ scl-gpios = <&pio 4 12 GPIO_ACTIVE_HIGH>; /* PE12 */
+ i2c-gpio,delay-us = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Rear camera */
+ ov5640: camera@3c {
+ compatible = "ovti,ov5640";
+ reg = <0x3c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&csi_mclk_pin>;
+ clocks = <&ccu CLK_CSI_MCLK>;
+ clock-names = "xclk";
+
+ AVDD-supply = <&reg_dldo3>;
+ DOVDD-supply = <&reg_aldo1>;
+ DVDD-supply = <&reg_eldo3>;
+ reset-gpios = <&pio 4 14 GPIO_ACTIVE_LOW>; /* PE14 */
+ powerdown-gpios = <&pio 4 15 GPIO_ACTIVE_HIGH>; /* PE15 */
+
+ port {
+ ov5640_ep: endpoint {
+ remote-endpoint = <&csi_ep>;
+ bus-width = <8>;
+ hsync-active = <1>; /* Active high */
+ vsync-active = <0>; /* Active low */
+ data-active = <1>; /* Active high */
+ pclk-sample = <1>; /* Rising */
+ };
+ };
+ };
+ };
+
+ speaker_amp: audio-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
+ sound-name-prefix = "Speaker Amp";
+ };
+
+ vdd_bl: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "bl-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
+ enable-active-high;
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 */
+ post-power-on-delay-ms = <200>;
+ };
+};
+
+&codec {
+ status = "okay";
+};
+
+&codec_analog {
+ hpvcc-supply = <&reg_eldo1>;
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu1 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu2 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&cpu3 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&csi {
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi_ep: endpoint {
+ remote-endpoint = <&ov5640_ep>;
+ bus-width = <8>;
+ hsync-active = <1>; /* Active high */
+ vsync-active = <0>; /* Active low */
+ data-active = <1>; /* Active high */
+ pclk-sample = <1>; /* Rising */
+ };
+ };
+};
+
+&dai {
+ status = "okay";
+};
+
+&de {
+ status = "okay";
+};
+
+&dphy {
+ status = "okay";
+};
+
+&dsi {
+ vcc-dsi-supply = <&reg_dldo1>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "feixin,k101-im2ba02";
+ reg = <0>;
+ avdd-supply = <&reg_dc1sw>;
+ dvdd-supply = <&reg_dc1sw>;
+ cvdd-supply = <&reg_ldo_io1>;
+ reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */
+ backlight = <&backlight>;
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ touchscreen@5d {
+ compatible = "goodix,gt9271";
+ reg = <0x5d>;
+ interrupt-parent = <&pio>;
+ interrupts = <7 4 IRQ_TYPE_LEVEL_HIGH>; /* PH4 */
+ irq-gpios = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ reset-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>; /* PH8 */
+ AVDD28-supply = <&reg_ldo_io1>;
+ };
+};
+
+&i2c0_pins {
+ bias-pull-up;
+};
+
+&i2c1 {
+ status = "okay";
+
+ /* TODO: add Bochs BMA223 accelerometer here */
+};
+
+&lradc {
+ vref-supply = <&reg_aldo3>;
+ status = "okay";
+
+ button-200 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ channel = <0>;
+ voltage = <200000>;
+ };
+
+ button-400 {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ channel = <0>;
+ voltage = <400000>;
+ };
+};
+
+&mixer1 {
+ status = "okay";
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ vmmc-supply = <&reg_dcdc1>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ vmmc-supply = <&reg_dldo4>;
+ vqmmc-supply = <&reg_eldo1>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ rtl8723cs: wifi@1 {
+ reg = <1>;
+ };
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_dcdc1>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&pwm {
+ status = "okay";
+};
+
+&r_rsb {
+ status = "okay";
+
+ axp803: pmic@3a3 {
+ compatible = "x-powers,axp803";
+ reg = <0x3a3>;
+ interrupt-parent = <&r_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ x-powers,drive-vbus-en;
+ };
+};
+
+#include "axp803.dtsi"
+
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
+&reg_aldo1 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "dovdd-csi";
+};
+
+&reg_aldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pl";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pll-avcc";
+};
+
+&reg_dc1sw {
+ regulator-name = "vcc-lcd";
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-name = "vdd-cpux";
+};
+
+/* DCDC3 is polyphased with DCDC2 */
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_dcdc6 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-sys";
+};
+
+&reg_dldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-hdmi-dsi-sensor";
+};
+
+&reg_dldo3 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-name = "avdd-csi";
+};
+
+&reg_dldo4 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+};
+
+&reg_drivevbus {
+ regulator-name = "usb0-vbus";
+ status = "okay";
+};
+
+&reg_eldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "cpvdd";
+};
+
+&reg_eldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca-1v8";
+};
+
+&reg_eldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "dvdd-1v8-csi";
+};
+
+&reg_fldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-1v2-hsic";
+};
+
+&reg_fldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpus";
+};
+
+&reg_ldo_io0 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-usb";
+ status = "okay";
+};
+
+&reg_ldo_io1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <3500000>;
+ regulator-name = "vcc-touchscreen";
+ status = "okay";
+};
+
+&reg_rtc_ldo {
+ regulator-name = "vcc-rtc";
+};
+
+&sound {
+ status = "okay";
+ simple-audio-card,aux-devs = <&codec_analog>, <&speaker_amp>;
+ simple-audio-card,widgets = "Microphone", "Internal Microphone Left",
+ "Microphone", "Internal Microphone Right",
+ "Headphone", "Headphone Jack",
+ "Speaker", "Internal Speaker";
+ simple-audio-card,routing =
+ "Left DAC", "AIF1 Slot 0 Left",
+ "Right DAC", "AIF1 Slot 0 Right",
+ "Speaker Amp INL", "LINEOUT",
+ "Speaker Amp INR", "LINEOUT",
+ "Internal Speaker", "Speaker Amp OUTL",
+ "Internal Speaker", "Speaker Amp OUTR",
+ "Headphone Jack", "HP",
+ "AIF1 Slot 0 Left ADC", "Left ADC",
+ "AIF1 Slot 0 Right ADC", "Right ADC",
+ "Internal Microphone Left", "MBIAS",
+ "MIC1", "Internal Microphone Left",
+ "Internal Microphone Right", "HBIAS",
+ "MIC2", "Internal Microphone Right";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ usb0_id_det-gpios = <&pio 7 9 GPIO_ACTIVE_HIGH>; /* PH9 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
+ usb0_vbus-supply = <&reg_drivevbus>;
+ usb1_vbus-supply = <&reg_ldo_io0>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 862b47dc9dc9..31143fe64d91 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -264,7 +264,7 @@
display_clocks: clock@0 {
compatible = "allwinner,sun50i-a64-de2-clk";
- reg = <0x0 0x100000>;
+ reg = <0x0 0x10000>;
clocks = <&ccu CLK_BUS_DE>,
<&ccu CLK_DE>;
clock-names = "bus",
@@ -274,6 +274,18 @@
#reset-cells = <1>;
};
+ rotate: rotate@20000 {
+ compatible = "allwinner,sun50i-a64-de2-rotate",
+ "allwinner,sun8i-a83t-de2-rotate";
+ reg = <0x20000 0x10000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&display_clocks CLK_BUS_ROT>,
+ <&display_clocks CLK_ROT>;
+ clock-names = "bus",
+ "mod";
+ resets = <&display_clocks RST_ROT>;
+ };
+
mixer0: mixer@100000 {
compatible = "allwinner,sun50i-a64-de2-mixer-0";
reg = <0x100000 0x100000>;
@@ -671,6 +683,11 @@
function = "i2c1";
};
+ i2c2_pins: i2c2-pins {
+ pins = "PE14", "PE15";
+ function = "i2c2";
+ };
+
/omit-if-no-ref/
lcd_rgb666_pins: lcd-rgb666-pins {
pins = "PD0", "PD1", "PD2", "PD3", "PD4",
@@ -958,12 +975,13 @@
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_BUS_I2C2>;
resets = <&ccu RST_BUS_I2C2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
};
-
spi0: spi@1c68000 {
compatible = "allwinner,sun8i-h3-spi";
reg = <0x01c68000 0x1000>;
@@ -1061,6 +1079,14 @@
status = "disabled";
};
+ mbus: dram-controller@1c62000 {
+ compatible = "allwinner,sun50i-a64-mbus";
+ reg = <0x01c62000 0x1000>;
+ clocks = <&ccu 112>;
+ dma-ranges = <0x00000000 0x40000000 0xc0000000>;
+ #interconnect-cells = <1>;
+ };
+
csi: csi@1cb0000 {
compatible = "allwinner,sun50i-a64-csi";
reg = <0x01cb0000 0x1000>;
@@ -1106,6 +1132,20 @@
#phy-cells = <0>;
};
+ deinterlace: deinterlace@1e00000 {
+ compatible = "allwinner,sun50i-a64-deinterlace",
+ "allwinner,sun8i-h3-deinterlace";
+ reg = <0x01e00000 0x20000>;
+ clocks = <&ccu CLK_BUS_DEINTERLACE>,
+ <&ccu CLK_DEINTERLACE>,
+ <&ccu CLK_DRAM_DEINTERLACE>;
+ clock-names = "bus", "mod", "ram";
+ resets = <&ccu RST_BUS_DEINTERLACE>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&mbus 9>;
+ interconnect-names = "dma-mem";
+ };
+
hdmi: hdmi@1ee0000 {
compatible = "allwinner,sun50i-a64-dw-hdmi",
"allwinner,sun8i-a83t-dw-hdmi";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
index 70b5f0998421..7d7aad18f078 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -61,6 +61,7 @@
label = "sw4";
linux,code = <BTN_0>;
gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+ wakeup-source;
};
};
@@ -93,6 +94,10 @@
status = "okay";
};
+&cpu0 {
+ cpu-supply = <&reg_vdd_cpux>;
+};
+
&de {
status = "okay";
};
@@ -168,6 +173,22 @@
status = "okay";
};
+&r_i2c {
+ status = "okay";
+
+ reg_vdd_cpux: regulator@65 {
+ compatible = "silergy,sy8106a";
+ reg = <0x65>;
+ regulator-name = "vdd-cpux";
+ silergy,fixed-microvolt = <1100000>;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <200>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+};
+
&spi0 {
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index 9893aa64dd0b..4462a68c0681 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -38,8 +38,7 @@
};
pmu {
- compatible = "arm,cortex-a53-pmu",
- "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index df6d872c34e2..8f09d209359b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -32,6 +32,13 @@
};
};
+ ext_osc32k: ext_osc32k_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "ext_osc32k";
+ };
+
leds {
compatible = "gpio-leds";
@@ -275,6 +282,10 @@
vcc-pm-supply = <&reg_aldo1>;
};
+&rtc {
+ clocks = <&ext_osc32k>;
+};
+
&spdif {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
index c311eee52a35..47f579610dcc 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
@@ -32,6 +32,13 @@
};
};
+ ext_osc32k: ext_osc32k_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "ext_osc32k";
+ };
+
leds {
compatible = "gpio-leds";
@@ -144,6 +151,15 @@
};
};
+&mmc2 {
+ vmmc-supply = <&reg_cldo1>;
+ vqmmc-supply = <&reg_bldo2>;
+ cap-mmc-hw-reset;
+ non-removable;
+ bus-width = <8>;
+ status = "okay";
+};
+
&ohci0 {
status = "okay";
};
@@ -276,6 +292,10 @@
status = "okay";
};
+&rtc {
+ clocks = <&ext_osc32k>;
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_ph_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
index 83aab7368889..fceb298bfd53 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
@@ -7,4 +7,37 @@
/ {
model = "OrangePi One Plus";
compatible = "xunlong,orangepi-one-plus", "allwinner,sun50i-h6";
+
+ aliases {
+ ethernet0 = &emac;
+ };
+
+ reg_gmac_3v3: gmac-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-gmac-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <100000>;
+ enable-active-high;
+ gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>; /* PD6 */
+ vin-supply = <&reg_aldo2>;
+ };
+};
+
+&emac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ext_rgmii_pins>;
+ phy-mode = "rgmii";
+ phy-handle = <&ext_rgmii_phy>;
+ phy-supply = <&reg_gmac_3v3>;
+ allwinner,rx-delay-ps = <200>;
+ allwinner,tx-delay-ps = <200>;
+ status = "okay";
+};
+
+&mdio {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
index 37f4c57597d4..9287976c4a50 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
@@ -20,6 +20,25 @@
stdout-path = "serial0:115200n8";
};
+ connector {
+ compatible = "hdmi-connector";
+ type = "a";
+ ddc-en-gpios = <&pio 7 2 GPIO_ACTIVE_HIGH>; /* PH2 */
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ ext_osc32k: ext_osc32k_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "ext_osc32k";
+ };
+
leds {
compatible = "gpio-leds";
@@ -45,6 +64,10 @@
};
};
+&de {
+ status = "okay";
+};
+
&ehci0 {
status = "okay";
};
@@ -58,6 +81,16 @@
status = "okay";
};
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
&mmc0 {
vmmc-supply = <&reg_cldo1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
@@ -197,6 +230,10 @@
status = "okay";
};
+&rtc {
+ clocks = <&ext_osc32k>;
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_ph_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index d1c2aa5b3a20..b0642d841933 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -14,12 +14,20 @@
aliases {
ethernet0 = &emac;
serial0 = &uart0;
+ spi0 = &spi0;
};
chosen {
stdout-path = "serial0:115200n8";
};
+ ext_osc32k: ext_osc32k_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "ext_osc32k";
+ };
+
hdmi_connector: connector {
compatible = "hdmi-connector";
type = "a";
@@ -278,6 +286,28 @@
vcc-pm-supply = <&reg_aldo1>;
};
+&rtc {
+ clocks = <&ext_osc32k>;
+};
+
+/*
+ * The CS pin is shared with the MMC2 CMD pin, so we cannot have the SPI
+ * flash and eMMC at the same time, as one of them would fail probing.
+ * Disable SPI0 in here, to prefer the more useful eMMC. U-Boot can
+ * fix this up in no eMMC is connected.
+ */
+&spi0 {
+ pinctrl-0 = <&spi0_pins>, <&spi0_cs_pin>;
+ pinctrl-names = "default";
+ status = "disabled";
+
+ flash@0 {
+ compatible = "winbond,w25q128", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <4000000>;
+ };
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_ph_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 3329283e38ab..b9ab7d8fa8af 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -62,16 +62,8 @@
clock-output-names = "osc24M";
};
- ext_osc32k: ext_osc32k_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- clock-output-names = "ext_osc32k";
- };
-
pmu {
- compatible = "arm,cortex-a53-pmu",
- "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
@@ -338,6 +330,31 @@
bias-pull-up;
};
+ /omit-if-no-ref/
+ spi0_pins: spi0-pins {
+ pins = "PC0", "PC2", "PC3";
+ function = "spi0";
+ };
+
+ /* pin shared with MMC2-CMD (eMMC) */
+ /omit-if-no-ref/
+ spi0_cs_pin: spi0-cs-pin {
+ pins = "PC5";
+ function = "spi0";
+ };
+
+ /omit-if-no-ref/
+ spi1_pins: spi1-pins {
+ pins = "PH4", "PH5", "PH6";
+ function = "spi1";
+ };
+
+ /omit-if-no-ref/
+ spi1_cs_pin: spi1-cs-pin {
+ pins = "PH3";
+ function = "spi1";
+ };
+
spdif_tx_pin: spdif-tx-pin {
pins = "PH7";
function = "spdif";
@@ -504,6 +521,36 @@
#size-cells = <0>;
};
+ spi0: spi@5010000 {
+ compatible = "allwinner,sun50i-h6-spi",
+ "allwinner,sun8i-h3-spi";
+ reg = <0x05010000 0x1000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma 22>, <&dma 22>;
+ dma-names = "rx", "tx";
+ resets = <&ccu RST_BUS_SPI0>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi1: spi@5011000 {
+ compatible = "allwinner,sun50i-h6-spi",
+ "allwinner,sun8i-h3-spi";
+ reg = <0x05011000 0x1000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma 23>, <&dma 23>;
+ dma-names = "rx", "tx";
+ resets = <&ccu RST_BUS_SPI1>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
emac: ethernet@5020000 {
compatible = "allwinner,sun50i-h6-emac",
"allwinner,sun50i-a64-emac";
@@ -800,7 +847,6 @@
interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
clock-output-names = "osc32k", "osc32k-out", "iosc";
- clocks = <&ext_osc32k>;
#clock-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
index fb11ef05d556..f6c4a15079d3 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
@@ -178,12 +178,12 @@
qspi_boot: partition@0 {
label = "Boot and fpga data";
- reg = <0x0 0x034B0000>;
+ reg = <0x0 0x03FE0000>;
};
- qspi_rootfs: partition@4000000 {
+ qspi_rootfs: partition@3FE0000 {
label = "Root Filesystem - JFFS2";
- reg = <0x034B0000 0x0EB50000>;
+ reg = <0x03FE0000 0x0C020000>;
};
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
index 4dec518c4dde..755b4ad15184 100644
--- a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
@@ -60,6 +60,12 @@
sm: secure-monitor {
compatible = "amlogic,meson-gxbb-sm";
+
+ pwrc: power-controller {
+ compatible = "amlogic,meson-a1-pwrc";
+ #power-domain-cells = <1>;
+ status = "okay";
+ };
};
soc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
index 4cd2d5951822..cb1360ae1211 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
@@ -313,15 +313,15 @@
dai-tdm-slot-rx-mask-1 = <1 1>;
mclk-fs = <256>;
- codec@0 {
+ codec-0 {
sound-dai = <&lineout>;
};
- codec@1 {
+ codec-1 {
sound-dai = <&speaker_amp1>;
};
- codec@2 {
+ codec-2 {
sound-dai = <&linein>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index abe04f4ad7d8..0882ea215b88 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -295,17 +295,9 @@
};
};
- emmc_pins: emmc {
+ emmc_ctrl_pins: emmc-ctrl {
mux-0 {
- groups = "emmc_nand_d0",
- "emmc_nand_d1",
- "emmc_nand_d2",
- "emmc_nand_d3",
- "emmc_nand_d4",
- "emmc_nand_d5",
- "emmc_nand_d6",
- "emmc_nand_d7",
- "emmc_cmd";
+ groups = "emmc_cmd";
function = "emmc";
bias-pull-up;
drive-strength-microamp = <4000>;
@@ -319,6 +311,34 @@
};
};
+ emmc_data_4b_pins: emmc-data-4b {
+ mux-0 {
+ groups = "emmc_nand_d0",
+ "emmc_nand_d1",
+ "emmc_nand_d2",
+ "emmc_nand_d3";
+ function = "emmc";
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ emmc_data_8b_pins: emmc-data-8b {
+ mux-0 {
+ groups = "emmc_nand_d0",
+ "emmc_nand_d1",
+ "emmc_nand_d2",
+ "emmc_nand_d3",
+ "emmc_nand_d4",
+ "emmc_nand_d5",
+ "emmc_nand_d6",
+ "emmc_nand_d7";
+ function = "emmc";
+ bias-pull-up;
+ drive-strength-microamp = <4000>;
+ };
+ };
+
emmc_ds_pins: emmc-ds {
mux {
groups = "emmc_nand_ds";
@@ -573,6 +593,17 @@
};
};
+ nor_pins: nor {
+ mux {
+ groups = "nor_d",
+ "nor_q",
+ "nor_c",
+ "nor_cs";
+ function = "nor";
+ bias-disable;
+ };
+ };
+
pdm_din0_a_pins: pdm-din0-a {
mux {
groups = "pdm_din0_a";
@@ -957,6 +988,57 @@
};
};
+ spicc0_x_pins: spicc0-x {
+ mux {
+ groups = "spi0_mosi_x",
+ "spi0_miso_x",
+ "spi0_clk_x";
+ function = "spi0";
+ drive-strength-microamp = <4000>;
+ bias-disable;
+ };
+ };
+
+ spicc0_ss0_x_pins: spicc0-ss0-x {
+ mux {
+ groups = "spi0_ss0_x";
+ function = "spi0";
+ drive-strength-microamp = <4000>;
+ bias-disable;
+ };
+ };
+
+ spicc0_c_pins: spicc0-c {
+ mux {
+ groups = "spi0_mosi_c",
+ "spi0_miso_c",
+ "spi0_ss0_c",
+ "spi0_clk_c";
+ function = "spi0";
+ drive-strength-microamp = <4000>;
+ bias-disable;
+ };
+ };
+
+ spicc1_pins: spicc1 {
+ mux {
+ groups = "spi1_mosi",
+ "spi1_miso",
+ "spi1_clk";
+ function = "spi1";
+ drive-strength-microamp = <4000>;
+ };
+ };
+
+ spicc1_ss0_pins: spicc1-ss0 {
+ mux {
+ groups = "spi1_ss0";
+ function = "spi1";
+ drive-strength-microamp = <4000>;
+ bias-disable;
+ };
+ };
+
tdm_a_din0_pins: tdm-a-din0 {
mux {
groups = "tdm_a_din0";
@@ -2051,6 +2133,39 @@
amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
};
+ spicc0: spi@13000 {
+ compatible = "amlogic,meson-g12a-spicc";
+ reg = <0x0 0x13000 0x0 0x44>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc CLKID_SPICC0>,
+ <&clkc CLKID_SPICC0_SCLK>;
+ clock-names = "core", "pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spicc1: spi@15000 {
+ compatible = "amlogic,meson-g12a-spicc";
+ reg = <0x0 0x15000 0x0 0x44>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc CLKID_SPICC1>,
+ <&clkc CLKID_SPICC1_SCLK>;
+ clock-names = "core", "pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spifc: spi@14000 {
+ compatible = "amlogic,meson-gxbb-spifc";
+ status = "disabled";
+ reg = <0x0 0x14000 0x0 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkc CLKID_CLK81>;
+ };
+
pwm_ef: pwm@19000 {
compatible = "amlogic,meson-g12a-ee-pwm";
reg = <0x0 0x19000 0x0 0x20>;
@@ -2220,6 +2335,7 @@
dr_mode = "host";
snps,dis_u2_susphy_quirk;
snps,quirk-frame-length-adjustment;
+ snps,parkmode-disable-ss-quirk;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
index 03054c478896..783e5a397f86 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
@@ -1,3 +1,4 @@
+
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Copyright (c) 2019 BayLibre, SAS
@@ -56,6 +57,7 @@
<&clkc_audio AUD_CLKID_PDM_DCLK>,
<&clkc_audio AUD_CLKID_PDM_SYSCLK>;
clock-names = "pclk", "dclk", "sysclk";
+ resets = <&clkc_audio AUD_RESET_PDM>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index 2ac9e3a43b96..b00d0468c753 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -269,7 +269,7 @@
dai-tdm-slot-tx-mask-3 = <1 1>;
mclk-fs = <256>;
- codec@0 {
+ codec {
sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
};
};
@@ -472,7 +472,7 @@
/* eMMC */
&sd_emmc_c {
status = "okay";
- pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
index 2a324f0136e3..a26bfe72550f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
@@ -271,7 +271,7 @@
/* eMMC */
&sd_emmc_c {
status = "okay";
- pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
index 4f2596d82989..1b07c8c06eac 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
@@ -443,7 +443,7 @@
/* eMMC */
&sd_emmc_c {
status = "okay";
- pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
index 554863429aa6..c33e85fbdaba 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
@@ -8,6 +8,8 @@
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
/ {
+ model = "Khadas VIM3";
+
vddcpu_a: regulator-vddcpu-a {
/*
* MP8756GD Regulator.
@@ -48,7 +50,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12A-KHADAS-VIM3";
+ model = "G12B-KHADAS-VIM3";
audio-aux-devs = <&tdmout_b>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
index 0e54c1dc2842..169ea283d4ee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
@@ -208,7 +208,7 @@
sound {
compatible = "amlogic,axg-sound-card";
- model = "G12A-ODROIDN2";
+ model = "G12B-ODROID-N2";
audio-aux-devs = <&tdmout_b>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
@@ -435,7 +435,7 @@
/* eMMC */
&sd_emmc_c {
status = "okay";
- pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
@@ -451,6 +451,27 @@
vqmmc-supply = <&flash_1v8>;
};
+/*
+ * EMMC_D4, EMMC_D5, EMMC_D6 and EMMC_D7 pins are shared between SPI NOR pins
+ * and eMMC Data 4 to 7 pins.
+ * Replace emmc_data_8b_pins to emmc_data_4b_pins from sd_emmc_c pinctrl-0,
+ * and change bus-width to 4 then spifc can be enabled.
+ * The SW1 slide should also be set to the correct position.
+ */
+&spifc {
+ status = "disabled";
+ pinctrl-0 = <&nor_pins>;
+ pinctrl-names = "default";
+
+ mx25u64: spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mxicy,mx25u6435f", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ };
+};
+
&tdmif_b {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
index ccd0bced01e8..325e448eb09c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
@@ -485,7 +485,7 @@
/* eMMC */
&sd_emmc_c {
status = "okay";
- pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 40db06e28b66..03f79fe045b7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -12,6 +12,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-parent = <&gic>;
@@ -83,6 +84,7 @@
enable-method = "psci";
next-level-cache = <&l2>;
clocks = <&scpi_dvfs 0>;
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -92,6 +94,7 @@
enable-method = "psci";
next-level-cache = <&l2>;
clocks = <&scpi_dvfs 0>;
+ #cooling-cells = <2>;
};
cpu2: cpu@2 {
@@ -101,6 +104,7 @@
enable-method = "psci";
next-level-cache = <&l2>;
clocks = <&scpi_dvfs 0>;
+ #cooling-cells = <2>;
};
cpu3: cpu@3 {
@@ -110,6 +114,7 @@
enable-method = "psci";
next-level-cache = <&l2>;
clocks = <&scpi_dvfs 0>;
+ #cooling-cells = <2>;
};
l2: l2-cache0 {
@@ -117,6 +122,53 @@
};
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <250>; /* milliseconds */
+ polling-delay = <1000>; /* milliseconds */
+
+ thermal-sensors = <&scpi_sensors 0>;
+
+ trips {
+ cpu_passive: cpu-passive {
+ temperature = <80000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+
+ cpu_hot: cpu-hot {
+ temperature = <90000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "hot";
+ };
+
+ cpu_critical: cpu-critical {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cpu_cooling_maps: cooling-maps {
+ map0 {
+ trip = <&cpu_passive>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&cpu_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
arm-pmu {
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 6ded279c40c8..b46ef985bb44 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -248,6 +248,7 @@
status = "okay";
pinctrl-0 = <&remote_input_ao_pins>;
pinctrl-names = "default";
+ linux,rc-map-name = "rc-odroid";
};
&gpio_ao {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 5eab3dfdbd55..45cb83625951 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -245,6 +245,9 @@
bluetooth {
compatible = "brcm,bcm43438-bt";
shutdown-gpios = <&gpio GPIOX_20 GPIO_ACTIVE_HIGH>;
+ max-speed = <2000000>;
+ clocks = <&wifi32k>;
+ clock-names = "lpo";
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index 43eb7d149e36..6ac678f88bd8 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -15,7 +15,6 @@
/ {
aliases {
serial0 = &uart_AO;
- serial1 = &uart_A;
ethernet0 = &ethmac;
};
@@ -180,6 +179,14 @@
pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
pinctrl-names = "default";
uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ max-speed = <2000000>;
+ clocks = <&wifi32k>;
+ clock-names = "lpo";
+ };
};
&uart_AO {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index d5dc12878dfe..27eeab71ec77 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -8,7 +8,6 @@
/dts-v1/;
#include <dt-bindings/input/input.h>
-#include <dt-bindings/thermal/thermal.h>
#include "meson-gxm.dtsi"
@@ -100,49 +99,6 @@
clock-names = "ext_clock";
};
- thermal-zones {
- cpu-thermal {
- polling-delay-passive = <250>; /* milliseconds */
- polling-delay = <1000>; /* milliseconds */
-
- thermal-sensors = <&scpi_sensors 0>;
-
- trips {
- cpu_alert0: cpu-alert0 {
- temperature = <70000>; /* millicelsius */
- hysteresis = <2000>; /* millicelsius */
- type = "active";
- };
-
- cpu_alert1: cpu-alert1 {
- temperature = <80000>; /* millicelsius */
- hysteresis = <2000>; /* millicelsius */
- type = "passive";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&cpu_alert0>;
- cooling-device = <&gpio_fan THERMAL_NO_LIMIT 1>;
- };
-
- map1 {
- trip = <&cpu_alert1>;
- cooling-device = <&gpio_fan 2 THERMAL_NO_LIMIT>,
- <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- };
- };
- };
-
hdmi_5v: regulator-hdmi-5v {
compatible = "regulator-fixed";
@@ -198,36 +154,23 @@
hdmi-phandle = <&hdmi_tx>;
};
-&cpu0 {
- #cooling-cells = <2>;
-};
-
-&cpu1 {
- #cooling-cells = <2>;
-};
-
-&cpu2 {
- #cooling-cells = <2>;
-};
-
-&cpu3 {
- #cooling-cells = <2>;
-};
-
-&cpu4 {
- #cooling-cells = <2>;
-};
-&cpu5 {
- #cooling-cells = <2>;
-};
-
-&cpu6 {
- #cooling-cells = <2>;
-};
+&cpu_cooling_maps {
+ map0 {
+ cooling-device = <&gpio_fan THERMAL_NO_LIMIT 1>;
+ };
-&cpu7 {
- #cooling-cells = <2>;
+ map1 {
+ cooling-device = <&gpio_fan 2 THERMAL_NO_LIMIT>,
+ <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
};
&ethmac {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index 5ff64a0d2dcf..b6f89f108e28 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -49,6 +49,7 @@
enable-method = "psci";
next-level-cache = <&l2>;
clocks = <&scpi_dvfs 1>;
+ #cooling-cells = <2>;
};
cpu5: cpu@101 {
@@ -58,6 +59,7 @@
enable-method = "psci";
next-level-cache = <&l2>;
clocks = <&scpi_dvfs 1>;
+ #cooling-cells = <2>;
};
cpu6: cpu@102 {
@@ -67,6 +69,7 @@
enable-method = "psci";
next-level-cache = <&l2>;
clocks = <&scpi_dvfs 1>;
+ #cooling-cells = <2>;
};
cpu7: cpu@103 {
@@ -76,6 +79,7 @@
enable-method = "psci";
next-level-cache = <&l2>;
clocks = <&scpi_dvfs 1>;
+ #cooling-cells = <2>;
};
};
};
@@ -124,6 +128,30 @@
compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc";
};
+&cpu_cooling_maps {
+ map0 {
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+};
+
&saradc {
compatible = "amlogic,meson-gxm-saradc", "amlogic,meson-saradc";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
index 90815fa25ec6..094ecf2222bb 100644
--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
@@ -9,8 +9,6 @@
#include <dt-bindings/gpio/meson-g12a-gpio.h>
/ {
- model = "Khadas VIM3";
-
aliases {
serial0 = &uart_AO;
ethernet0 = &ethmac;
@@ -312,7 +310,7 @@
/* eMMC */
&sd_emmc_c {
status = "okay";
- pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
@@ -328,6 +326,26 @@
vqmmc-supply = <&emmc_1v8>;
};
+/*
+ * EMMC_D4, EMMC_D5, EMMC_D6 and EMMC_D7 pins are shared between SPI NOR CS
+ * and eMMC Data 4 to 7 pins.
+ * Replace emmc_data_8b_pins to emmc_data_4b_pins from sd_emmc_c pinctrl-0,
+ * and change bus-width to 4 then spifc can be enabled.
+ */
+&spifc {
+ status = "disabled";
+ pinctrl-0 = <&nor_pins>;
+ pinctrl-names = "default";
+
+ w25q32: spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q128fw", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <104000000>;
+ };
+};
+
&uart_A {
status = "okay";
pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index cb1b48f5b8b1..dfb2438851c0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -518,7 +518,7 @@
/* eMMC */
&sd_emmc_c {
status = "okay";
- pinctrl-0 = <&emmc_pins>, <&emmc_ds_pins>;
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
pinctrl-1 = <&emmc_clk_gate_pins>;
pinctrl-names = "default", "clk-gate";
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index d847a3fcbc85..d4ec735fb1a5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -448,6 +448,7 @@
<&clkc_audio AUD_CLKID_PDM_DCLK>,
<&clkc_audio AUD_CLKID_PDM_SYSCLK>;
clock-names = "pclk", "dclk", "sysclk";
+ resets = <&clkc_audio AUD_RESET_PDM>;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index 3f78373f708a..12f039fa3dad 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -92,7 +92,7 @@
timeout-sec = <30>;
};
- smb@8000000 {
+ bus@8000000 {
compatible = "arm,vexpress,v2m-p1", "simple-bus";
arm,v2m-memory-map = "rs1";
#address-cells = <2>; /* SMB chipselect number and offset */
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
index 335fff762451..66381d89c1ce 100644
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -206,7 +206,7 @@
};
};
- smb@8000000 {
+ bus@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 1f3c80aafbd7..f5889281545f 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -800,7 +800,7 @@
<0x00000008 0x80000000 0x1 0x80000000>;
};
- smb@8000000 {
+ bus@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 9f60dacb4f80..e3983ded3c3c 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -8,7 +8,7 @@
*/
/ {
- smb@8000000 {
+ bus@8000000 {
mb_clk24mhz: clk24mhz {
compatible = "fixed-clock";
#clock-cells = <0>;
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index 6e685d883303..c5d15cbd8cf6 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -132,7 +132,7 @@
};
};
- smb@8000000 {
+ bus@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
index 57b0b9d7f3fa..60703b5763c6 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
@@ -5,7 +5,7 @@
* "rs2" extension for the v2m motherboard
*/
/ {
- smb@8000000 {
+ bus@8000000 {
motherboard {
arm,v2m-memory-map = "rs2";
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
index 03a7bf079c8f..e333c8d2d0e4 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
@@ -8,7 +8,7 @@
* VEMotherBoard.lisa
*/
/ {
- smb@8000000 {
+ bus@8000000 {
motherboard {
arm,v2m-memory-map = "rs1";
compatible = "arm,vexpress,v2m-p1", "simple-bus";
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
index 22383c26bb03..d859914500a7 100644
--- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -144,7 +144,7 @@
};
};
- smb: smb@8000000 {
+ smb: bus@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index bac293e6ee33..a39f0a1723e0 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -4,6 +4,11 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frwy.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-oxalis.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-kbox-a-230-ls.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var2.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var3-ads2.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-kontron-sl28-var4.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds.dtb
@@ -26,6 +31,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-hummingboard-pulse.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-devkit.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts
new file mode 100644
index 000000000000..4b4cc6a1573d
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Device Tree File for the Kontron KBox A-230-LS.
+ *
+ * This consists of a Kontron SMARC-sAL28 (Dual PHY) and a special
+ * carrier (s1914).
+ *
+ * Copyright (C) 2019 Michael Walle <michael@walle.cc>
+ *
+ */
+
+/dts-v1/;
+#include "fsl-ls1028a-kontron-sl28-var4.dts"
+
+/ {
+ model = "Kontron KBox A-230-LS";
+ compatible = "kontron,kbox-a-230-ls", "kontron,sl28-var4",
+ "kontron,sl28", "fsl,ls1028a";
+};
+
+&enetc_mdio_pf3 {
+ /* BCM54140 QSGMII quad PHY */
+ qsgmii_phy0: ethernet-phy@7 {
+ reg = <7>;
+ };
+
+ qsgmii_phy1: ethernet-phy@8 {
+ reg = <8>;
+ };
+
+ qsgmii_phy2: ethernet-phy@9 {
+ reg = <9>;
+ };
+
+ qsgmii_phy3: ethernet-phy@10 {
+ reg = <10>;
+ };
+};
+
+&enetc_port2 {
+ status = "okay";
+};
+
+&i2c3 {
+ eeprom@57 {
+ compatible = "atmel,24c32";
+ reg = <0x57>;
+ pagesize = <32>;
+ };
+};
+
+&mscc_felix {
+ status = "okay";
+};
+
+&mscc_felix_port0 {
+ label = "swp0";
+ managed = "in-band-status";
+ phy-handle = <&qsgmii_phy0>;
+ phy-mode = "qsgmii";
+ status = "okay";
+};
+
+&mscc_felix_port1 {
+ label = "swp1";
+ managed = "in-band-status";
+ phy-handle = <&qsgmii_phy1>;
+ phy-mode = "qsgmii";
+ status = "okay";
+};
+
+&mscc_felix_port2 {
+ label = "swp2";
+ managed = "in-band-status";
+ phy-handle = <&qsgmii_phy2>;
+ phy-mode = "qsgmii";
+ status = "okay";
+};
+
+&mscc_felix_port3 {
+ label = "swp3";
+ managed = "in-band-status";
+ phy-handle = <&qsgmii_phy3>;
+ phy-mode = "qsgmii";
+ status = "okay";
+};
+
+&mscc_felix_port4 {
+ ethernet = <&enetc_port2>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
new file mode 100644
index 000000000000..901b5b161def
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Device Tree file for the Kontron SMARC-sAL28 board.
+ *
+ * This is for the network variant 2 which has two ethernet ports. These
+ * ports are connected to the internal switch.
+ *
+ * Copyright (C) 2020 Michael Walle <michael@walle.cc>
+ *
+ */
+
+/dts-v1/;
+#include "fsl-ls1028a-kontron-sl28.dts"
+
+/ {
+ model = "Kontron SMARC-sAL28 (TSN-on-module)";
+ compatible = "kontron,sl28-var2", "kontron,sl28", "fsl,ls1028a";
+};
+
+&enetc_mdio_pf3 {
+ phy0: ethernet-phy@5 {
+ reg = <0x5>;
+ eee-broken-1000t;
+ eee-broken-100tx;
+ };
+
+ phy1: ethernet-phy@4 {
+ reg = <0x4>;
+ eee-broken-1000t;
+ eee-broken-100tx;
+ };
+};
+
+&enetc_port0 {
+ status = "disabled";
+ /*
+ * In the base device tree the PHY was registered in the mdio
+ * subnode as it is PHY for this port. On this module this PHY
+ * is connected to a switch port instead and registered above.
+ * Therefore, delete the mdio subnode as well as the phy-handle
+ * property here.
+ */
+ /delete-property/ phy-handle;
+ /delete-node/ mdio;
+};
+
+&enetc_port2 {
+ status = "okay";
+};
+
+&mscc_felix {
+ status = "okay";
+};
+
+&mscc_felix_port0 {
+ label = "gbe0";
+ phy-handle = <&phy0>;
+ phy-mode = "sgmii";
+ status = "okay";
+};
+
+&mscc_felix_port1 {
+ label = "gbe1";
+ phy-handle = <&phy1>;
+ phy-mode = "sgmii";
+ status = "okay";
+};
+
+&mscc_felix_port4 {
+ ethernet = <&enetc_port2>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
new file mode 100644
index 000000000000..0973a6a45217
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var3-ads2.dts
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Device Tree file for the Kontron SMARC-sAL28 board on a SMARC Eval 2.0
+ * carrier (ADS2).
+ *
+ * Copyright (C) 2019 Michael Walle <michael@walle.cc>
+ *
+ */
+
+/dts-v1/;
+#include "fsl-ls1028a-kontron-sl28.dts"
+
+/ {
+ model = "Kontron SMARC-sAL28 (Single PHY) on SMARC Eval 2.0 carrier";
+ compatible = "kontron,sl28-var3-ads2", "kontron,sl28-var3",
+ "kontron,sl28", "fsl,ls1028a";
+
+ sound {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "simple-audio-card";
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack",
+ "Line", "Line Out Jack",
+ "Microphone", "Microphone Jack",
+ "Line", "Line In Jack";
+ simple-audio-card,routing =
+ "Line Out Jack", "LINEOUTR",
+ "Line Out Jack", "LINEOUTL",
+ "Headphone Jack", "HPOUTR",
+ "Headphone Jack", "HPOUTL",
+ "IN1L", "Line In Jack",
+ "IN1R", "Line In Jack",
+ "Microphone Jack", "MICBIAS",
+ "IN2L", "Microphone Jack",
+ "IN2R", "Microphone Jack";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,dai-link@0 {
+ reg = <0>;
+ bitclock-master = <&dailink0_master>;
+ frame-master = <&dailink0_master>;
+ format = "i2s";
+
+ cpu {
+ sound-dai = <&sai6>;
+ };
+
+ dailink0_master: codec {
+ sound-dai = <&wm8904>;
+ };
+ };
+
+ simple-audio-card,dai-link@1 {
+ reg = <1>;
+ bitclock-master = <&dailink1_master>;
+ frame-master = <&dailink1_master>;
+ format = "i2s";
+
+ cpu {
+ sound-dai = <&sai5>;
+ };
+
+ dailink1_master: codec {
+ sound-dai = <&wm8904>;
+ };
+ };
+ };
+};
+
+&dspi2 {
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ m25p,fast-read;
+ spi-max-frequency = <100000000>;
+ reg = <0>;
+ };
+};
+
+&i2c3 {
+ eeprom@57 {
+ compatible = "atmel,24c64";
+ reg = <0x57>;
+ pagesize = <32>;
+ };
+};
+
+&i2c4 {
+ status = "okay";
+
+ wm8904: audio-codec@1a {
+ #sound-dai-cells = <0>;
+ compatible = "wlf,wm8904";
+ reg = <0x1a>;
+ clocks = <&mclk>;
+ clock-names = "mclk";
+ assigned-clocks = <&mclk>;
+ assigned-clock-rates = <1250000>;
+ };
+};
+
+&sai5 {
+ status = "okay";
+};
+
+&sai6 {
+ status = "okay";
+};
+
+&soc {
+ mclk: clock-mclk@f130080 {
+ compatible = "fsl,vf610-sai-clock";
+ reg = <0x0 0xf130080 0x0 0x80>;
+ clocks = <&clockgen 4 1>;
+ #clock-cells = <0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
new file mode 100644
index 000000000000..df212ed5bb94
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Device Tree file for the Kontron SMARC-sAL28 board.
+ *
+ * This is for the network variant 4 which has two ethernet ports. It
+ * extends the base and provides one more port connected via RGMII.
+ *
+ * Copyright (C) 2019 Michael Walle <michael@walle.cc>
+ *
+ */
+
+/dts-v1/;
+#include "fsl-ls1028a-kontron-sl28.dts"
+#include <dt-bindings/net/qca-ar803x.h>
+
+/ {
+ model = "Kontron SMARC-sAL28 (Dual PHY)";
+ compatible = "kontron,sl28-var4", "kontron,sl28", "fsl,ls1028a";
+};
+
+&enetc_port1 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy1: ethernet-phy@4 {
+ reg = <0x4>;
+ eee-broken-1000t;
+ eee-broken-100tx;
+
+ qca,clk-out-frequency = <125000000>;
+ qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
+
+ vddio-supply = <&vddh>;
+
+ vddio: vddio-regulator {
+ regulator-name = "VDDIO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vddh: vddh-regulator {
+ regulator-name = "VDDH";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
new file mode 100644
index 000000000000..1648a04ea79f
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Device Tree file for the Kontron SMARC-sAL28 board.
+ *
+ * Copyright (C) 2019 Michael Walle <michael@walle.cc>
+ *
+ */
+
+/dts-v1/;
+#include "fsl-ls1028a.dtsi"
+
+/ {
+ model = "Kontron SMARC-sAL28";
+ compatible = "kontron,sl28", "fsl,ls1028a";
+
+ aliases {
+ crypto = &crypto;
+ serial0 = &duart0;
+ serial1 = &duart1;
+ spi0 = &fspi;
+ spi1 = &dspi2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&dspi2 {
+ status = "okay";
+};
+
+&duart0 {
+ status = "okay";
+};
+
+&duart1 {
+ status = "okay";
+};
+
+&enetc_port0 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "sgmii";
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@5 {
+ reg = <0x5>;
+ eee-broken-1000t;
+ eee-broken-100tx;
+ };
+ };
+};
+
+&esdhc {
+ sd-uhs-sdr104;
+ sd-uhs-sdr50;
+ sd-uhs-sdr25;
+ sd-uhs-sdr12;
+ status = "okay";
+};
+
+&esdhc1 {
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ bus-width = <8>;
+ status = "okay";
+};
+
+&fspi {
+ status = "okay";
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ m25p,fast-read;
+ spi-max-frequency = <133000000>;
+ reg = <0>;
+ /* The following setting enables 1-1-2 (CMD-ADDR-DATA) mode */
+ spi-rx-bus-width = <2>; /* 2 SPI Rx lines */
+ spi-tx-bus-width = <1>; /* 1 SPI Tx line */
+
+ partition@0 {
+ reg = <0x000000 0x010000>;
+ label = "rcw";
+ read-only;
+ };
+
+ partition@10000 {
+ reg = <0x010000 0x0f0000>;
+ label = "failsafe bootloader";
+ read-only;
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x040000>;
+ label = "failsafe DP firmware";
+ read-only;
+ };
+
+ partition@140000 {
+ reg = <0x140000 0x0a0000>;
+ label = "failsafe trusted firmware";
+ read-only;
+ };
+
+ partition@1e0000 {
+ reg = <0x1e0000 0x020000>;
+ label = "reserved";
+ read-only;
+ };
+
+ partition@200000 {
+ reg = <0x200000 0x010000>;
+ label = "configuration store";
+ };
+
+ partition@210000 {
+ reg = <0x210000 0x0f0000>;
+ label = "bootloader";
+ };
+
+ partition@300000 {
+ reg = <0x300000 0x040000>;
+ label = "DP firmware";
+ };
+
+ partition@340000 {
+ reg = <0x340000 0x0a0000>;
+ label = "trusted firmware";
+ };
+
+ partition@3e0000 {
+ reg = <0x3e0000 0x020000>;
+ label = "bootloader environment";
+ };
+ };
+};
+
+&gpio1 {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "TDO", "TCK",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio2 {
+ gpio-line-names =
+ "", "", "", "", "", "", "TMS", "TDI",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&i2c0 {
+ status = "okay";
+
+ rtc@32 {
+ compatible = "microcrystal,rv8803";
+ reg = <0x32>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
index ca409d907b36..dd69c5b821e9 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
@@ -225,6 +225,7 @@
&enetc_port1 {
phy-handle = <&qds_phy1>;
phy-connection-type = "rgmii-id";
+ status = "okay";
};
&sai1 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index afb55653850d..c2dc1232f93f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -177,9 +177,29 @@
status = "okay";
};
+&enetc_mdio_pf3 {
+ /* VSC8514 QSGMII quad PHY */
+ qsgmii_phy0: ethernet-phy@10 {
+ reg = <0x10>;
+ };
+
+ qsgmii_phy1: ethernet-phy@11 {
+ reg = <0x11>;
+ };
+
+ qsgmii_phy2: ethernet-phy@12 {
+ reg = <0x12>;
+ };
+
+ qsgmii_phy3: ethernet-phy@13 {
+ reg = <0x13>;
+ };
+};
+
&enetc_port0 {
phy-handle = <&sgmii_phy0>;
phy-connection-type = "sgmii";
+ status = "okay";
mdio {
#address-cells = <1>;
@@ -190,8 +210,49 @@
};
};
-&enetc_port1 {
- status = "disabled";
+&enetc_port2 {
+ status = "okay";
+};
+
+&mscc_felix {
+ status = "okay";
+};
+
+&mscc_felix_port0 {
+ label = "swp0";
+ managed = "in-band-status";
+ phy-handle = <&qsgmii_phy0>;
+ phy-mode = "qsgmii";
+ status = "okay";
+};
+
+&mscc_felix_port1 {
+ label = "swp1";
+ managed = "in-band-status";
+ phy-handle = <&qsgmii_phy1>;
+ phy-mode = "qsgmii";
+ status = "okay";
+};
+
+&mscc_felix_port2 {
+ label = "swp2";
+ managed = "in-band-status";
+ phy-handle = <&qsgmii_phy2>;
+ phy-mode = "qsgmii";
+ status = "okay";
+};
+
+&mscc_felix_port3 {
+ label = "swp3";
+ managed = "in-band-status";
+ phy-handle = <&qsgmii_phy3>;
+ phy-mode = "qsgmii";
+ status = "okay";
+};
+
+&mscc_felix_port4 {
+ ethernet = <&enetc_port2>;
+ status = "okay";
};
&sai4 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index c28a25b145ae..2a7f70b71149 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -290,6 +290,45 @@
status = "disabled";
};
+ dspi0: spi@2100000 {
+ compatible = "fsl,ls1028a-dspi", "fsl,ls1021a-v1.0-dspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2100000 0x0 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "dspi";
+ clocks = <&clockgen 4 1>;
+ spi-num-chipselects = <4>;
+ little-endian;
+ status = "disabled";
+ };
+
+ dspi1: spi@2110000 {
+ compatible = "fsl,ls1028a-dspi", "fsl,ls1021a-v1.0-dspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2110000 0x0 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "dspi";
+ clocks = <&clockgen 4 1>;
+ spi-num-chipselects = <4>;
+ little-endian;
+ status = "disabled";
+ };
+
+ dspi2: spi@2120000 {
+ compatible = "fsl,ls1028a-dspi", "fsl,ls1021a-v1.0-dspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2120000 0x0 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "dspi";
+ clocks = <&clockgen 4 1>;
+ spi-num-chipselects = <3>;
+ little-endian;
+ status = "disabled";
+ };
+
esdhc: mmc@2140000 {
compatible = "fsl,ls1028a-esdhc", "fsl,esdhc";
reg = <0x0 0x2140000 0x0 0x10000>;
@@ -484,6 +523,60 @@
status = "disabled";
};
+ pcie@3400000 {
+ compatible = "fsl,ls1028a-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x80 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
+ interrupt-names = "pme", "aer";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-viewport = <8>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x80 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ status = "disabled";
+ };
+
+ pcie@3500000 {
+ compatible = "fsl,ls1028a-pcie";
+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
+ 0x88 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pme", "aer";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-viewport = <8>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x88 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x88 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ status = "disabled";
+ };
+
smmu: iommu@5000000 {
compatible = "arm,mmu-500";
reg = <0 0x5000000 0 0x800000>;
@@ -756,7 +849,6 @@
reg = <0x01 0xf0000000 0x0 0x100000>;
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
msi-parent = <&its>;
device_type = "pci";
bus-range = <0x0 0x0>;
@@ -774,27 +866,115 @@
/* PF1: VF0-1 BAR0 - non-prefetchable memory */
0x82000000 0x0 0x00000000 0x1 0xf8210000 0x0 0x020000
/* PF1: VF0-1 BAR2 - prefetchable memory */
- 0xc2000000 0x0 0x00000000 0x1 0xf8230000 0x0 0x020000>;
+ 0xc2000000 0x0 0x00000000 0x1 0xf8230000 0x0 0x020000
+ /* BAR4 (PF5) - non-prefetchable memory */
+ 0x82000000 0x0 0x00000000 0x1 0xfc000000 0x0 0x400000>;
enetc_port0: ethernet@0,0 {
compatible = "fsl,enetc";
reg = <0x000000 0 0 0 0>;
+ status = "disabled";
};
+
enetc_port1: ethernet@0,1 {
compatible = "fsl,enetc";
reg = <0x000100 0 0 0 0>;
+ status = "disabled";
+ };
+
+ enetc_port2: ethernet@0,2 {
+ compatible = "fsl,enetc";
+ reg = <0x000200 0 0 0 0>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
};
+
enetc_mdio_pf3: mdio@0,3 {
compatible = "fsl,enetc-mdio";
reg = <0x000300 0 0 0 0>;
#address-cells = <1>;
#size-cells = <0>;
};
+
ethernet@0,4 {
compatible = "fsl,enetc-ptp";
reg = <0x000400 0 0 0 0>;
clocks = <&clockgen 4 0>;
little-endian;
+ fsl,extts-fifo;
+ };
+
+ mscc_felix: ethernet-switch@0,5 {
+ reg = <0x000500 0 0 0 0>;
+ /* IEP INT_B */
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* External ports */
+ mscc_felix_port0: port@0 {
+ reg = <0>;
+ status = "disabled";
+ };
+
+ mscc_felix_port1: port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ mscc_felix_port2: port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ mscc_felix_port3: port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+
+ /* Internal ports */
+ mscc_felix_port4: port@4 {
+ reg = <4>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ };
+ };
+
+ mscc_felix_port5: port@5 {
+ reg = <5>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+
+ enetc_port3: ethernet@0,6 {
+ compatible = "fsl,enetc";
+ reg = <0x000600 0 0 0 0>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
index 4d77b345cebd..5633e59febc3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
@@ -83,6 +83,7 @@
};
&esdhc {
+ mmc-hs200-1_8v;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index 594566265e3d..36a799554620 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -393,6 +393,7 @@
reg = <0x0 0x2140000 0x0 0x10000>;
interrupts = <0 28 0x4>; /* Level high type */
clock-frequency = <0>;
+ clocks = <&clockgen 2 1>;
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
little-endian;
@@ -493,6 +494,7 @@
<0000 0 0 2 &gic 0 0 0 110 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 3 &gic 0 0 0 111 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 4 &gic 0 0 0 112 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
status = "disabled";
};
@@ -518,6 +520,7 @@
<0000 0 0 2 &gic 0 0 0 115 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 3 &gic 0 0 0 116 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 4 &gic 0 0 0 117 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
status = "disabled";
};
@@ -543,6 +546,7 @@
<0000 0 0 2 &gic 0 0 0 120 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 3 &gic 0 0 0 121 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 4 &gic 0 0 0 122 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index f96d06da96be..3944ef16ec60 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -648,6 +648,7 @@
<0000 0 0 2 &gic 0 0 0 110 4>,
<0000 0 0 3 &gic 0 0 0 111 4>,
<0000 0 0 4 &gic 0 0 0 112 4>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
status = "disabled";
};
@@ -669,6 +670,7 @@
<0000 0 0 2 &gic 0 0 0 115 4>,
<0000 0 0 3 &gic 0 0 0 116 4>,
<0000 0 0 4 &gic 0 0 0 117 4>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
status = "disabled";
};
@@ -690,6 +692,7 @@
<0000 0 0 2 &gic 0 0 0 120 4>,
<0000 0 0 3 &gic 0 0 0 121 4>,
<0000 0 0 4 &gic 0 0 0 122 4>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
status = "disabled";
};
@@ -711,6 +714,7 @@
<0000 0 0 2 &gic 0 0 0 125 4>,
<0000 0 0 3 &gic 0 0 0 126 4>,
<0000 0 0 4 &gic 0 0 0 127 4>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
index 071e21678f77..d87d16460875 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
@@ -59,6 +59,32 @@
#size-cells = <0>;
reg = <0x77>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c512";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,spd";
+ reg = <0x51>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,spd";
+ reg = <0x53>;
+ };
+
+ eeprom@57 {
+ compatible = "atmel,24c02";
+ reg = <0x57>;
+ };
+ };
+
i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -73,6 +99,17 @@
};
};
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ regulator@5c {
+ compatible = "lltc,ltc3882";
+ reg = <0x5c>;
+ };
+ };
+
i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
index 1a5acf62f23c..3b88e1efe4db 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
@@ -43,6 +43,21 @@
status = "okay";
};
+&fspi {
+ status = "okay";
+
+ mt35xu512aba0: flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ m25p,fast-read;
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ spi-rx-bus-width = <8>;
+ spi-tx-bus-width = <8>;
+ };
+};
+
&i2c0 {
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
index 51615de102fe..22d0308eb13b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
@@ -84,7 +84,7 @@
mt35xu512aba0: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "spansion,m25p80";
+ compatible = "jedec,spi-nor";
m25p,fast-read;
spi-max-frequency = <50000000>;
reg = <0>;
@@ -95,7 +95,7 @@
mt35xu512aba1: flash@1 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "spansion,m25p80";
+ compatible = "jedec,spi-nor";
m25p,fast-read;
spi-max-frequency = <50000000>;
reg = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index e5ee5591e52b..ae1b113ab162 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -834,6 +834,174 @@
status = "disabled";
};
+ pcie@3400000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x80 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ apio-wins = <8>;
+ ppio-wins = <8>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ status = "disabled";
+ };
+
+ pcie@3500000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
+ 0x88 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ apio-wins = <8>;
+ ppio-wins = <8>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x82000000 0x0 0x40000000 0x88 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ status = "disabled";
+ };
+
+ pcie@3600000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
+ 0x90 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ apio-wins = <256>;
+ ppio-wins = <24>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x82000000 0x0 0x40000000 0x90 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ status = "disabled";
+ };
+
+ pcie@3700000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
+ 0x98 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ apio-wins = <8>;
+ ppio-wins = <8>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x82000000 0x0 0x40000000 0x98 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ status = "disabled";
+ };
+
+ pcie@3800000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03800000 0x0 0x00100000 /* controller registers */
+ 0xa0 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ apio-wins = <256>;
+ ppio-wins = <24>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x82000000 0x0 0x40000000 0xa0 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ status = "disabled";
+ };
+
+ pcie@3900000 {
+ compatible = "fsl,lx2160a-pcie";
+ reg = <0x00 0x03900000 0x0 0x00100000 /* controller registers */
+ 0xa8 0x00000000 0x0 0x00001000>; /* configuration space */
+ reg-names = "csr_axi_slave", "config_axi_slave";
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "aer", "pme", "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ apio-wins = <8>;
+ ppio-wins = <8>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x82000000 0x0 0x40000000 0xa8 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 0 GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ iommu-map = <0 &smmu 0 1>; /* Fixed-up by bootloader */
+ status = "disabled";
+ };
+
smmu: iommu@5000000 {
compatible = "arm,mmu-500";
reg = <0 0x5000000 0 0x800000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
index 9e54747cf4e6..951e14a3de0e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
@@ -107,6 +107,8 @@
pinctrl-0 = <&pinctrl_fec1>;
phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
+ phy-reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <10>;
fsl,magic-packet;
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 1e5e11592f7b..cc7152ecedd9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -68,6 +68,7 @@
nvmem-cells = <&cpu_speed_grade>;
nvmem-cell-names = "speed_grade";
cpu-idle-states = <&cpu_pd_wait>;
+ #cooling-cells = <2>;
};
A53_1: cpu@1 {
@@ -80,6 +81,7 @@
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
cpu-idle-states = <&cpu_pd_wait>;
+ #cooling-cells = <2>;
};
A53_2: cpu@2 {
@@ -92,6 +94,7 @@
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
cpu-idle-states = <&cpu_pd_wait>;
+ #cooling-cells = <2>;
};
A53_3: cpu@3 {
@@ -104,6 +107,7 @@
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
cpu-idle-states = <&cpu_pd_wait>;
+ #cooling-cells = <2>;
};
A53_L2: l2-cache0 {
@@ -204,6 +208,38 @@
arm,no-tick-in-suspend;
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tmu>;
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&A53_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
usbphynop1: usbphynop1 {
compatible = "usb-nop-xceiv";
clocks = <&clk IMX8MM_CLK_USB_PHY_REF>;
@@ -227,7 +263,8 @@
ranges = <0x0 0x0 0x0 0x3e000000>;
aips1: bus@30000000 {
- compatible = "simple-bus";
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x301f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x30000000 0x30000000 0x400000>;
@@ -363,6 +400,13 @@
gpio-ranges = <&iomuxc 0 119 30>;
};
+ tmu: tmu@30260000 {
+ compatible = "fsl,imx8mm-tmu";
+ reg = <0x30260000 0x10000>;
+ clocks = <&clk IMX8MM_CLK_TMU_ROOT>;
+ #thermal-sensor-cells = <0>;
+ };
+
wdog1: watchdog@30280000 {
compatible = "fsl,imx8mm-wdt", "fsl,imx21-wdt";
reg = <0x30280000 0x10000>;
@@ -455,6 +499,8 @@
compatible = "fsl,sec-v4.0-pwrkey";
regmap = <&snvs>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SNVS_ROOT>;
+ clock-names = "snvs-pwrkey";
linux,keycode = <KEY_POWER>;
wakeup-source;
status = "disabled";
@@ -496,7 +542,8 @@
};
aips2: bus@30400000 {
- compatible = "simple-bus";
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x305f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x30400000 0x30400000 0x400000>;
@@ -555,10 +602,12 @@
};
aips3: bus@30800000 {
- compatible = "simple-bus";
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x309f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x30800000 0x30800000 0x400000>;
+ ranges = <0x30800000 0x30800000 0x400000>,
+ <0x8000000 0x8000000 0x10000000>;
ecspi1: spi@30820000 {
compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
@@ -760,6 +809,19 @@
status = "disabled";
};
+ flexspi: spi@30bb0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nxp,imx8mm-fspi";
+ reg = <0x30bb0000 0x10000>, <0x8000000 0x10000000>;
+ reg-names = "fspi_base", "fspi_mmap";
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_QSPI_ROOT>,
+ <&clk IMX8MM_CLK_QSPI_ROOT>;
+ clock-names = "fspi", "fspi_en";
+ status = "disabled";
+ };
+
sdma1: dma-controller@30bd0000 {
compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
reg = <0x30bd0000 0x10000>;
@@ -800,7 +862,8 @@
};
aips4: bus@32c00000 {
- compatible = "simple-bus";
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x32df0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x32c00000 0x32c00000 0x400000>;
@@ -896,7 +959,6 @@
ddr-pmu@3d800000 {
compatible = "fsl,imx8mm-ddr-pmu", "fsl,imx8m-ddr-pmu";
reg = <0x3d800000 0x400000>;
- interrupt-parent = <&gic>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
index 0d2ec4a2c7f2..85fc0aa38c4f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-evk.dtsi
@@ -102,6 +102,20 @@
};
};
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ pca6416: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
&snvs_pwrkey {
status = "okay";
};
@@ -202,6 +216,13 @@
>;
};
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MN_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MN_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
pinctrl_reg_usdhc2_vmmc: regusdhc2vmmc {
fsl,pins = <
MX8MN_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index a44b5438e842..fa78f0163270 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
#include "imx8mn-pinfunc.h"
@@ -67,6 +68,7 @@
nvmem-cells = <&cpu_speed_grade>;
nvmem-cell-names = "speed_grade";
cpu-idle-states = <&cpu_pd_wait>;
+ #cooling-cells = <2>;
};
A53_1: cpu@1 {
@@ -79,6 +81,7 @@
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
cpu-idle-states = <&cpu_pd_wait>;
+ #cooling-cells = <2>;
};
A53_2: cpu@2 {
@@ -91,6 +94,7 @@
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
cpu-idle-states = <&cpu_pd_wait>;
+ #cooling-cells = <2>;
};
A53_3: cpu@3 {
@@ -103,6 +107,7 @@
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
cpu-idle-states = <&cpu_pd_wait>;
+ #cooling-cells = <2>;
};
A53_L2: l2-cache0 {
@@ -116,7 +121,7 @@
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <850000>;
+ opp-microvolt = <950000>;
opp-supported-hw = <0xb00>, <0x7>;
clock-latency-ns = <150000>;
opp-suspend;
@@ -186,6 +191,38 @@
method = "smc";
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tmu>;
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&A53_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
@@ -203,8 +240,8 @@
ranges = <0x0 0x0 0x0 0x3e000000>;
aips1: bus@30000000 {
- compatible = "simple-bus";
- reg = <0x30000000 0x400000>;
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x301f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -274,6 +311,13 @@
gpio-ranges = <&iomuxc 0 119 30>;
};
+ tmu: tmu@30260000 {
+ compatible = "fsl,imx8mn-tmu", "fsl,imx8mm-tmu";
+ reg = <0x30260000 0x10000>;
+ clocks = <&clk IMX8MN_CLK_TMU_ROOT>;
+ #thermal-sensor-cells = <0>;
+ };
+
wdog1: watchdog@30280000 {
compatible = "fsl,imx8mn-wdt", "fsl,imx21-wdt";
reg = <0x30280000 0x10000>;
@@ -358,6 +402,7 @@
offset = <0x34>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_SNVS_ROOT>;
clock-names = "snvs-rtc";
};
@@ -365,6 +410,8 @@
compatible = "fsl,sec-v4.0-pwrkey";
regmap = <&snvs>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MN_CLK_SNVS_ROOT>;
+ clock-names = "snvs-pwrkey";
linux,keycode = <KEY_POWER>;
wakeup-source;
status = "disabled";
@@ -379,6 +426,16 @@
<&clk_ext3>, <&clk_ext4>;
clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4";
+ assigned-clocks = <&clk IMX8MN_CLK_NOC>,
+ <&clk IMX8MN_CLK_AUDIO_AHB>,
+ <&clk IMX8MN_CLK_IPG_AUDIO_ROOT>,
+ <&clk IMX8MN_SYS_PLL3>;
+ assigned-clock-parents = <&clk IMX8MN_SYS_PLL3_OUT>,
+ <&clk IMX8MN_SYS_PLL1_800M>;
+ assigned-clock-rates = <0>,
+ <400000000>,
+ <400000000>,
+ <600000000>;
};
src: reset-controller@30390000 {
@@ -390,8 +447,8 @@
};
aips2: bus@30400000 {
- compatible = "simple-bus";
- reg = <0x30400000 0x400000>;
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x305f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -450,8 +507,8 @@
};
aips3: bus@30800000 {
- compatible = "simple-bus";
- reg = <0x30800000 0x400000>;
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x309f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -543,19 +600,19 @@
<&clk IMX8MN_CLK_IPG_ROOT>;
clock-names = "aclk", "ipg";
- sec_jr0: jr0@1000 {
+ sec_jr0: jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr1: jr1@2000 {
+ sec_jr1: jr@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr2: jr2@3000 {
+ sec_jr2: jr@3000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x3000 0x1000>;
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
@@ -696,8 +753,8 @@
};
aips4: bus@32c00000 {
- compatible = "simple-bus";
- reg = <0x32c00000 0x400000>;
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x32df0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
new file mode 100644
index 000000000000..3da1fff3d6fd
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2019 NXP
+ */
+
+/dts-v1/;
+
+#include "imx8mp.dtsi"
+
+/ {
+ model = "NXP i.MX8MPlus EVK board";
+ compatible = "fsl,imx8mp-evk", "fsl,imx8mp";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_led>;
+
+ status {
+ label = "yellow:status";
+ gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0xc0000000>,
+ <0x1 0x00000000 0 0xc0000000>;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy1>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ eee-broken-1000t;
+ reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ pca6416: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+&uart2 {
+ /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&usdhc2 {
+ assigned-clocks = <&clk IMX8MP_CLK_USDHC2>;
+ assigned-clock-rates = <400000000>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&usdhc3 {
+ assigned-clocks = <&clk IMX8MP_CLK_USDHC3>;
+ assigned-clock-rates = <400000000>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+
+ pinctrl_fec: fecgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x3
+ MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO 0x3
+ MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x91
+ MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x91
+ MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x91
+ MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x91
+ MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x91
+ MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x91
+ MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x1f
+ MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x1f
+ MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x1f
+ MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x1f
+ MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x1f
+ MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x1f
+ MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02 0x19
+ >;
+ };
+
+ pinctrl_gpio_led: gpioledgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x19
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c3
+ MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmc {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x49
+ MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x49
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d0
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d0
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2grp-100mhz {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x194
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d4
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d4
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2grp-200mhz {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x196
+ MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x1d6
+ MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x1d6
+ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6
+ MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6
+ MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2grp-gpio {
+ fsl,pins = <
+ MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x1c4
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d0
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d0
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d0
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d0
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d0
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d0
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d0
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d0
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d0
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp-100mhz {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x194
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d4
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d4
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d4
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d4
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d4
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d4
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d4
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d4
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d4
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp-200mhz {
+ fsl,pins = <
+ MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x196
+ MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x1d6
+ MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x1d6
+ MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x1d6
+ MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x1d6
+ MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x1d6
+ MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x1d6
+ MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x1d6
+ MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x1d6
+ MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x1d6
+ MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mp-pinfunc.h
new file mode 100644
index 000000000000..da78f89b6c98
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-pinfunc.h
@@ -0,0 +1,931 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2019 NXP
+ */
+
+#ifndef __DTS_IMX8MP_PINFUNC_H
+#define __DTS_IMX8MP_PINFUNC_H
+
+/*
+ * The pin function ID is a tuple of
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ */
+#define MX8MP_IOMUXC_GPIO1_IO00__GPIO1_IO00 0x014 0x274 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO00__CCMSRCGPCMIX_ENET_PHY_REF_CLK_ROOT 0x014 0x274 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO00__MEDIAMIX_ISP_FL_TRIG_0 0x014 0x274 0x5D4 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO00__ANAMIX_REF_CLK_32K 0x014 0x274 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO00__CCMSRCGPCMIX_EXT_CLK1 0x014 0x274 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO00__SJC_FAIL 0x014 0x274 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO01__GPIO1_IO01 0x018 0x278 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO01__PWM1_OUT 0x018 0x278 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO01__MEDIAMIX_ISP_SHUTTER_TRIG_0 0x018 0x278 0x5DC 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO01__ANAMIX_REF_CLK_24M 0x018 0x278 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO01__CCMSRCGPCMIX_EXT_CLK2 0x018 0x278 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO01__SJC_ACTIVE 0x018 0x278 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO02__GPIO1_IO02 0x01C 0x27C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_B 0x01C 0x27C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO02__MEDIAMIX_ISP_FLASH_TRIG_0 0x01C 0x27C 0x000 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO02__WDOG1_WDOG_ANY 0x01C 0x27C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO02__SJC_DE_B 0x01C 0x27C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x020 0x280 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO03__USDHC1_VSELECT 0x020 0x280 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO03__MEDIAMIX_ISP_PRELIGHT_TRIG_0 0x020 0x280 0x000 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO03__SDMA1_EXT_EVENT00 0x020 0x280 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO03__ANAMIX_XTAL_OK 0x020 0x280 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO03__SJC_DONE 0x020 0x280 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO04__GPIO1_IO04 0x024 0x284 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0x024 0x284 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO04__MEDIAMIX_ISP_SHUTTER_OPEN_0 0x024 0x284 0x000 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO04__SDMA1_EXT_EVENT01 0x024 0x284 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO04__ANAMIX_XTAL_OK_LV 0x024 0x284 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO04__USDHC1_TEST_TRIG 0x024 0x284 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO05__GPIO1_IO05 0x028 0x288 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO05__M7_NMI 0x028 0x288 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO05__MEDIAMIX_ISP_FL_TRIG_1 0x028 0x288 0x5D8 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO05__CCMSRCGPCMIX_PMIC_READY 0x028 0x288 0x554 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO05__CCMSRCGPCMIX_INT_BOOT 0x028 0x288 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO05__USDHC2_TEST_TRIG 0x028 0x288 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO06__GPIO1_IO06 0x02C 0x28C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO06__ENET_QOS_MDC 0x02C 0x28C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO06__MEDIAMIX_ISP_SHUTTER_TRIG_1 0x02C 0x28C 0x5E0 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO06__USDHC1_CD_B 0x02C 0x28C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO06__CCMSRCGPCMIX_EXT_CLK3 0x02C 0x28C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO06__ECSPI1_TEST_TRIG 0x02C 0x28C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO07__GPIO1_IO07 0x030 0x290 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO07__ENET_QOS_MDIO 0x030 0x290 0x590 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO07__MEDIAMIX_ISP_FLASH_TRIG_1 0x030 0x290 0x000 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO07__USDHC1_WP 0x030 0x290 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO07__CCMSRCGPCMIX_EXT_CLK4 0x030 0x290 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO07__ECSPI2_TEST_TRIG 0x030 0x290 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO08__GPIO1_IO08 0x034 0x294 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO08__ENET_QOS_1588_EVENT0_IN 0x034 0x294 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO08__PWM1_OUT 0x034 0x294 0x000 0x2 0x0
+#define MX8MP_IOMUXC_GPIO1_IO08__MEDIAMIX_ISP_PRELIGHT_TRIG_1 0x034 0x294 0x000 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO08__ENET_QOS_1588_EVENT0_AUX_IN 0x034 0x294 0x000 0x4 0x0
+#define MX8MP_IOMUXC_GPIO1_IO08__USDHC2_RESET_B 0x034 0x294 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO08__CCMSRCGPCMIX_WAIT 0x034 0x294 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO08__FLEXSPI_TEST_TRIG 0x034 0x294 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO09__GPIO1_IO09 0x038 0x298 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO09__ENET_QOS_1588_EVENT0_OUT 0x038 0x298 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO09__PWM2_OUT 0x038 0x298 0x000 0x2 0x0
+#define MX8MP_IOMUXC_GPIO1_IO09__MEDIAMIX_ISP_SHUTTER_OPEN_1 0x038 0x298 0x000 0x3 0x0
+#define MX8MP_IOMUXC_GPIO1_IO09__USDHC3_RESET_B 0x038 0x298 0x000 0x4 0x0
+#define MX8MP_IOMUXC_GPIO1_IO09__AUDIOMIX_EXT_EVENT00 0x038 0x298 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO09__CCMSRCGPCMIX_STOP 0x038 0x298 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO09__RAWNAND_TEST_TRIG 0x038 0x298 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO10__GPIO1_IO10 0x03C 0x29C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO10__HSIOMIX_usb1_OTG_ID 0x03C 0x29C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO10__PWM3_OUT 0x03C 0x29C 0x000 0x2 0x0
+#define MX8MP_IOMUXC_GPIO1_IO10__OCOTP_FUSE_LATCHED 0x03C 0x29C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO11__GPIO1_IO11 0x040 0x2A0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO11__HSIOMIX_usb2_OTG_ID 0x040 0x2A0 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO11__PWM2_OUT 0x040 0x2A0 0x000 0x2 0x0
+#define MX8MP_IOMUXC_GPIO1_IO11__USDHC3_VSELECT 0x040 0x2A0 0x000 0x4 0x0
+#define MX8MP_IOMUXC_GPIO1_IO11__CCMSRCGPCMIX_PMIC_READY 0x040 0x2A0 0x554 0x5 0x1
+#define MX8MP_IOMUXC_GPIO1_IO11__CCMSRCGPCMIX_OUT0 0x040 0x2A0 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO11__CAAM_RNG_OSC_OBS 0x040 0x2A0 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO12__GPIO1_IO12 0x044 0x2A4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO12__HSIOMIX_usb1_OTG_PWR 0x044 0x2A4 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO12__AUDIOMIX_EXT_EVENT01 0x044 0x2A4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO12__CCMSRCGPCMIX_OUT1 0x044 0x2A4 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO12__CSU_CSU_ALARM_AUT00 0x044 0x2A4 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO13__GPIO1_IO13 0x048 0x2A8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO13__HSIOMIX_usb1_OTG_OC 0x048 0x2A8 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO13__PWM2_OUT 0x048 0x2A8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO13__CCMSRCGPCMIX_OUT2 0x048 0x2A8 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO13__CSU_CSU_ALARM_AUT01 0x048 0x2A8 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO14__GPIO1_IO14 0x04C 0x2AC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO14__HSIOMIX_usb2_OTG_PWR 0x04C 0x2AC 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO14__USDHC3_CD_B 0x04C 0x2AC 0x608 0x4 0x0
+#define MX8MP_IOMUXC_GPIO1_IO14__PWM3_OUT 0x04C 0x2AC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO14__CCMSRCGPCMIX_CLKO1 0x04C 0x2AC 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO14__CSU_CSU_ALARM_AUT02 0x04C 0x2AC 0x000 0x7 0x0
+#define MX8MP_IOMUXC_GPIO1_IO15__GPIO1_IO15 0x050 0x2B0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_GPIO1_IO15__HSIOMIX_usb2_OTG_OC 0x050 0x2B0 0x000 0x1 0x0
+#define MX8MP_IOMUXC_GPIO1_IO15__USDHC3_WP 0x050 0x2B0 0x634 0x4 0x0
+#define MX8MP_IOMUXC_GPIO1_IO15__PWM4_OUT 0x050 0x2B0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_GPIO1_IO15__CCMSRCGPCMIX_CLKO2 0x050 0x2B0 0x000 0x6 0x0
+#define MX8MP_IOMUXC_GPIO1_IO15__CSU_CSU_INT_DEB 0x050 0x2B0 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x054 0x2B4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_MDC__AUDIOMIX_SAI6_TX_DATA00 0x054 0x2B4 0x000 0x2 0x0
+#define MX8MP_IOMUXC_ENET_MDC__GPIO1_IO16 0x054 0x2B4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_MDC__USDHC3_STROBE 0x054 0x2B4 0x630 0x6 0x0
+#define MX8MP_IOMUXC_ENET_MDC__SIM_M_HADDR15 0x054 0x2B4 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x058 0x2B8 0x590 0x0 0x1
+#define MX8MP_IOMUXC_ENET_MDIO__AUDIOMIX_SAI6_TX_SYNC 0x058 0x2B8 0x528 0x2 0x0
+#define MX8MP_IOMUXC_ENET_MDIO__GPIO1_IO17 0x058 0x2B8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_MDIO__USDHC3_DATA5 0x058 0x2B8 0x624 0x6 0x0
+#define MX8MP_IOMUXC_ENET_MDIO__SIM_M_HADDR16 0x058 0x2B8 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x05C 0x2BC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_TD3__AUDIOMIX_SAI6_TX_BCLK 0x05C 0x2BC 0x524 0x2 0x0
+#define MX8MP_IOMUXC_ENET_TD3__GPIO1_IO18 0x05C 0x2BC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_TD3__USDHC3_DATA6 0x05C 0x2BC 0x628 0x6 0x0
+#define MX8MP_IOMUXC_ENET_TD3__SIM_M_HADDR17 0x05C 0x2BC 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x060 0x2C0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_TD2__CCM_ENET_QOS_CLOCK_GENERATE_REF_CLK 0x060 0x2C0 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ENET_TD2__AUDIOMIX_SAI6_RX_DATA00 0x060 0x2C0 0x51C 0x2 0x0
+#define MX8MP_IOMUXC_ENET_TD2__GPIO1_IO19 0x060 0x2C0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_TD2__USDHC3_DATA7 0x060 0x2C0 0x62C 0x6 0x0
+#define MX8MP_IOMUXC_ENET_TD2__SIM_M_HADDR18 0x060 0x2C0 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x064 0x2C4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_TD1__AUDIOMIX_SAI6_RX_SYNC 0x064 0x2C4 0x520 0x2 0x0
+#define MX8MP_IOMUXC_ENET_TD1__GPIO1_IO20 0x064 0x2C4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_TD1__USDHC3_CD_B 0x064 0x2C4 0x608 0x6 0x1
+#define MX8MP_IOMUXC_ENET_TD1__SIM_M_HADDR19 0x064 0x2C4 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x068 0x2C8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_TD0__AUDIOMIX_SAI6_RX_BCLK 0x068 0x2C8 0x518 0x2 0x0
+#define MX8MP_IOMUXC_ENET_TD0__GPIO1_IO21 0x068 0x2C8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_TD0__USDHC3_WP 0x068 0x2C8 0x634 0x6 0x1
+#define MX8MP_IOMUXC_ENET_TD0__SIM_M_HADDR20 0x068 0x2C8 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x06C 0x2CC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_TX_CTL__AUDIOMIX_SAI6_MCLK 0x06C 0x2CC 0x514 0x2 0x0
+#define MX8MP_IOMUXC_ENET_TX_CTL__AUDIOMIX_SPDIF_OUT 0x06C 0x2CC 0x000 0x3 0x0
+#define MX8MP_IOMUXC_ENET_TX_CTL__GPIO1_IO22 0x06C 0x2CC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_TX_CTL__USDHC3_DATA0 0x06C 0x2CC 0x610 0x6 0x0
+#define MX8MP_IOMUXC_ENET_TX_CTL__SIM_M_HADDR21 0x06C 0x2CC 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x070 0x2D0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_TXC__ENET_QOS_TX_ER 0x070 0x2D0 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ENET_TXC__AUDIOMIX_SAI7_TX_DATA00 0x070 0x2D0 0x000 0x2 0x0
+#define MX8MP_IOMUXC_ENET_TXC__GPIO1_IO23 0x070 0x2D0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_TXC__USDHC3_DATA1 0x070 0x2D0 0x614 0x6 0x0
+#define MX8MP_IOMUXC_ENET_TXC__SIM_M_HADDR22 0x070 0x2D0 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x074 0x2D4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_RX_CTL__AUDIOMIX_SAI7_TX_SYNC 0x074 0x2D4 0x540 0x2 0x0
+#define MX8MP_IOMUXC_ENET_RX_CTL__AUDIOMIX_BIT_STREAM03 0x074 0x2D4 0x4CC 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RX_CTL__GPIO1_IO24 0x074 0x2D4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_RX_CTL__USDHC3_DATA2 0x074 0x2D4 0x618 0x6 0x0
+#define MX8MP_IOMUXC_ENET_RX_CTL__SIM_M_HADDR23 0x074 0x2D4 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x078 0x2D8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_RXC__ENET_QOS_RX_ER 0x078 0x2D8 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ENET_RXC__AUDIOMIX_SAI7_TX_BCLK 0x078 0x2D8 0x53C 0x2 0x0
+#define MX8MP_IOMUXC_ENET_RXC__AUDIOMIX_BIT_STREAM02 0x078 0x2D8 0x4C8 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RXC__GPIO1_IO25 0x078 0x2D8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_RXC__USDHC3_DATA3 0x078 0x2D8 0x61C 0x6 0x0
+#define MX8MP_IOMUXC_ENET_RXC__SIM_M_HADDR24 0x078 0x2D8 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x07C 0x2DC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_RD0__AUDIOMIX_SAI7_RX_DATA00 0x07C 0x2DC 0x534 0x2 0x0
+#define MX8MP_IOMUXC_ENET_RD0__AUDIOMIX_BIT_STREAM01 0x07C 0x2DC 0x4C4 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RD0__GPIO1_IO26 0x07C 0x2DC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_RD0__USDHC3_DATA4 0x07C 0x2DC 0x620 0x6 0x0
+#define MX8MP_IOMUXC_ENET_RD0__SIM_M_HADDR25 0x07C 0x2DC 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x080 0x2E0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_RD1__AUDIOMIX_SAI7_RX_SYNC 0x080 0x2E0 0x538 0x2 0x0
+#define MX8MP_IOMUXC_ENET_RD1__AUDIOMIX_BIT_STREAM00 0x080 0x2E0 0x4C0 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RD1__GPIO1_IO27 0x080 0x2E0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_RD1__USDHC3_RESET_B 0x080 0x2E0 0x000 0x6 0x0
+#define MX8MP_IOMUXC_ENET_RD1__SIM_M_HADDR26 0x080 0x2E0 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x084 0x2E4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_RD2__AUDIOMIX_SAI7_RX_BCLK 0x084 0x2E4 0x530 0x2 0x0
+#define MX8MP_IOMUXC_ENET_RD2__AUDIOMIX_CLK 0x084 0x2E4 0x000 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RD2__GPIO1_IO28 0x084 0x2E4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_RD2__USDHC3_CLK 0x084 0x2E4 0x604 0x6 0x0
+#define MX8MP_IOMUXC_ENET_RD2__SIM_M_HADDR27 0x084 0x2E4 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x088 0x2E8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_ENET_RD3__AUDIOMIX_SAI7_MCLK 0x088 0x2E8 0x52C 0x2 0x0
+#define MX8MP_IOMUXC_ENET_RD3__AUDIOMIX_SPDIF_IN 0x088 0x2E8 0x544 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RD3__GPIO1_IO29 0x088 0x2E8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ENET_RD3__USDHC3_CMD 0x088 0x2E8 0x60C 0x6 0x0
+#define MX8MP_IOMUXC_ENET_RD3__SIM_M_HADDR28 0x088 0x2E8 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x08C 0x2EC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_CLK__ENET1_MDC 0x08C 0x2EC 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SD1_CLK__I2C5_SCL 0x08C 0x2EC 0x5C4 0x3 0x0
+#define MX8MP_IOMUXC_SD1_CLK__UART1_DCE_TX 0x08C 0x2EC 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_CLK__UART1_DTE_RX 0x08C 0x2EC 0x5E8 0x4 0x0
+#define MX8MP_IOMUXC_SD1_CLK__GPIO2_IO00 0x08C 0x2EC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_CLK__SIM_M_HADDR29 0x08C 0x2EC 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x090 0x2F0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_CMD__ENET1_MDIO 0x090 0x2F0 0x57C 0x1 0x0
+#define MX8MP_IOMUXC_SD1_CMD__I2C5_SDA 0x090 0x2F0 0x5C8 0x3 0x0
+#define MX8MP_IOMUXC_SD1_CMD__UART1_DCE_RX 0x090 0x2F0 0x5E8 0x4 0x1
+#define MX8MP_IOMUXC_SD1_CMD__UART1_DTE_TX 0x090 0x2F0 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_CMD__GPIO2_IO01 0x090 0x2F0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_CMD__SIM_M_HADDR30 0x090 0x2F0 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x094 0x2F4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_DATA0__ENET1_RGMII_TD1 0x094 0x2F4 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SD1_DATA0__I2C6_SCL 0x094 0x2F4 0x5CC 0x3 0x0
+#define MX8MP_IOMUXC_SD1_DATA0__UART1_DCE_RTS 0x094 0x2F4 0x5E4 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA0__UART1_DTE_CTS 0x094 0x2F4 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA0__GPIO2_IO02 0x094 0x2F4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_DATA0__SIM_M_HADDR31 0x094 0x2F4 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x098 0x2F8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_DATA1__ENET1_RGMII_TD0 0x098 0x2F8 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SD1_DATA1__I2C6_SDA 0x098 0x2F8 0x5D0 0x3 0x0
+#define MX8MP_IOMUXC_SD1_DATA1__UART1_DCE_CTS 0x098 0x2F8 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA1__UART1_DTE_RTS 0x098 0x2F8 0x5E4 0x4 0x1
+#define MX8MP_IOMUXC_SD1_DATA1__GPIO2_IO03 0x098 0x2F8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_DATA1__SIM_M_HBURST00 0x098 0x2F8 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x09C 0x2FC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_DATA2__ENET1_RGMII_RD0 0x09C 0x2FC 0x580 0x1 0x0
+#define MX8MP_IOMUXC_SD1_DATA2__I2C4_SCL 0x09C 0x2FC 0x5BC 0x3 0x0
+#define MX8MP_IOMUXC_SD1_DATA2__UART2_DCE_TX 0x09C 0x2FC 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA2__UART2_DTE_RX 0x09C 0x2FC 0x5F0 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA2__GPIO2_IO04 0x09C 0x2FC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_DATA2__SIM_M_HBURST01 0x09C 0x2FC 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x0A0 0x300 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_DATA3__ENET1_RGMII_RD1 0x0A0 0x300 0x584 0x1 0x0
+#define MX8MP_IOMUXC_SD1_DATA3__I2C4_SDA 0x0A0 0x300 0x5C0 0x3 0x0
+#define MX8MP_IOMUXC_SD1_DATA3__UART2_DCE_RX 0x0A0 0x300 0x5F0 0x4 0x1
+#define MX8MP_IOMUXC_SD1_DATA3__UART2_DTE_TX 0x0A0 0x300 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA3__GPIO2_IO05 0x0A0 0x300 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_DATA3__SIM_M_HBURST02 0x0A0 0x300 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_DATA4__USDHC1_DATA4 0x0A4 0x304 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_DATA4__ENET1_RGMII_TX_CTL 0x0A4 0x304 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SD1_DATA4__I2C1_SCL 0x0A4 0x304 0x5A4 0x3 0x0
+#define MX8MP_IOMUXC_SD1_DATA4__UART2_DCE_RTS 0x0A4 0x304 0x5EC 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA4__UART2_DTE_CTS 0x0A4 0x304 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06 0x0A4 0x304 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_DATA4__SIM_M_HRESP 0x0A4 0x304 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_DATA5__USDHC1_DATA5 0x0A8 0x308 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_DATA5__ENET1_TX_ER 0x0A8 0x308 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SD1_DATA5__I2C1_SDA 0x0A8 0x308 0x5A8 0x3 0x0
+#define MX8MP_IOMUXC_SD1_DATA5__UART2_DCE_CTS 0x0A8 0x308 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA5__UART2_DTE_RTS 0x0A8 0x308 0x5EC 0x4 0x1
+#define MX8MP_IOMUXC_SD1_DATA5__GPIO2_IO07 0x0A8 0x308 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_DATA5__TPSMP_HDATA05 0x0A8 0x308 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_DATA6__USDHC1_DATA6 0x0AC 0x30C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_DATA6__ENET1_RGMII_RX_CTL 0x0AC 0x30C 0x588 0x1 0x0
+#define MX8MP_IOMUXC_SD1_DATA6__I2C2_SCL 0x0AC 0x30C 0x5AC 0x3 0x0
+#define MX8MP_IOMUXC_SD1_DATA6__UART3_DCE_TX 0x0AC 0x30C 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA6__UART3_DTE_RX 0x0AC 0x30C 0x5F8 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA6__GPIO2_IO08 0x0AC 0x30C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_DATA6__TPSMP_HDATA06 0x0AC 0x30C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_DATA7__USDHC1_DATA7 0x0B0 0x310 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_DATA7__ENET1_RX_ER 0x0B0 0x310 0x58C 0x1 0x0
+#define MX8MP_IOMUXC_SD1_DATA7__I2C2_SDA 0x0B0 0x310 0x5B0 0x3 0x0
+#define MX8MP_IOMUXC_SD1_DATA7__UART3_DCE_RX 0x0B0 0x310 0x5F8 0x4 0x1
+#define MX8MP_IOMUXC_SD1_DATA7__UART3_DTE_TX 0x0B0 0x310 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_DATA7__GPIO2_IO09 0x0B0 0x310 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_DATA7__TPSMP_HDATA07 0x0B0 0x310 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_RESET_B__USDHC1_RESET_B 0x0B4 0x314 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_RESET_B__ENET1_TX_CLK 0x0B4 0x314 0x578 0x1 0x0
+#define MX8MP_IOMUXC_SD1_RESET_B__I2C3_SCL 0x0B4 0x314 0x5B4 0x3 0x0
+#define MX8MP_IOMUXC_SD1_RESET_B__UART3_DCE_RTS 0x0B4 0x314 0x5F4 0x4 0x0
+#define MX8MP_IOMUXC_SD1_RESET_B__UART3_DTE_CTS 0x0B4 0x314 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_RESET_B__GPIO2_IO10 0x0B4 0x314 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_RESET_B__ECSPI3_TEST_TRIG 0x0B4 0x314 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD1_STROBE__USDHC1_STROBE 0x0B8 0x318 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD1_STROBE__I2C3_SDA 0x0B8 0x318 0x5B8 0x3 0x0
+#define MX8MP_IOMUXC_SD1_STROBE__UART3_DCE_CTS 0x0B8 0x318 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD1_STROBE__UART3_DTE_RTS 0x0B8 0x318 0x5F4 0x4 0x1
+#define MX8MP_IOMUXC_SD1_STROBE__GPIO2_IO11 0x0B8 0x318 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD1_STROBE__USDHC3_TEST_TRIG 0x0B8 0x318 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD2_CD_B__USDHC2_CD_B 0x0BC 0x31C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_CD_B__GPIO2_IO12 0x0BC 0x31C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_CD_B__CCMSRCGPCMIX_TESTER_ACK 0x0BC 0x31C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x0C0 0x320 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_CLK__ECSPI2_SCLK 0x0C0 0x320 0x568 0x2 0x0
+#define MX8MP_IOMUXC_SD2_CLK__UART4_DCE_RX 0x0C0 0x320 0x600 0x3 0x0
+#define MX8MP_IOMUXC_SD2_CLK__UART4_DTE_TX 0x0C0 0x320 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SD2_CLK__GPIO2_IO13 0x0C0 0x320 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_CLK__CCMSRCGPCMIX_OBSERVE0 0x0C0 0x320 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_CLK__OBSERVE_MUX_OUT00 0x0C0 0x320 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD2_CMD__USDHC2_CMD 0x0C4 0x324 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_CMD__ECSPI2_MOSI 0x0C4 0x324 0x570 0x2 0x0
+#define MX8MP_IOMUXC_SD2_CMD__UART4_DCE_TX 0x0C4 0x324 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SD2_CMD__UART4_DTE_RX 0x0C4 0x324 0x600 0x3 0x1
+#define MX8MP_IOMUXC_SD2_CMD__AUDIOMIX_CLK 0x0C4 0x324 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SD2_CMD__GPIO2_IO14 0x0C4 0x324 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_CMD__CCMSRCGPCMIX_OBSERVE1 0x0C4 0x324 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_CMD__OBSERVE_MUX_OUT01 0x0C4 0x324 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD2_DATA0__USDHC2_DATA0 0x0C8 0x328 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_DATA0__I2C4_SDA 0x0C8 0x328 0x5C0 0x2 0x1
+#define MX8MP_IOMUXC_SD2_DATA0__UART2_DCE_RX 0x0C8 0x328 0x5F0 0x3 0x2
+#define MX8MP_IOMUXC_SD2_DATA0__UART2_DTE_TX 0x0C8 0x328 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SD2_DATA0__AUDIOMIX_BIT_STREAM00 0x0C8 0x328 0x4C0 0x4 0x1
+#define MX8MP_IOMUXC_SD2_DATA0__GPIO2_IO15 0x0C8 0x328 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_DATA0__CCMSRCGPCMIX_OBSERVE2 0x0C8 0x328 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_DATA0__OBSERVE_MUX_OUT02 0x0C8 0x328 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x0CC 0x32C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_DATA1__I2C4_SCL 0x0CC 0x32C 0x5BC 0x2 0x1
+#define MX8MP_IOMUXC_SD2_DATA1__UART2_DCE_TX 0x0CC 0x32C 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SD2_DATA1__UART2_DTE_RX 0x0CC 0x32C 0x5F0 0x3 0x3
+#define MX8MP_IOMUXC_SD2_DATA1__AUDIOMIX_BIT_STREAM01 0x0CC 0x32C 0x4C4 0x4 0x1
+#define MX8MP_IOMUXC_SD2_DATA1__GPIO2_IO16 0x0CC 0x32C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_DATA1__CCMSRCGPCMIX_WAIT 0x0CC 0x32C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_DATA1__OBSERVE_MUX_OUT03 0x0CC 0x32C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x0D0 0x330 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_DATA2__ECSPI2_SS0 0x0D0 0x330 0x574 0x2 0x0
+#define MX8MP_IOMUXC_SD2_DATA2__AUDIOMIX_SPDIF_OUT 0x0D0 0x330 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SD2_DATA2__AUDIOMIX_BIT_STREAM02 0x0D0 0x330 0x4C8 0x4 0x1
+#define MX8MP_IOMUXC_SD2_DATA2__GPIO2_IO17 0x0D0 0x330 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_DATA2__CCMSRCGPCMIX_STOP 0x0D0 0x330 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_DATA2__OBSERVE_MUX_OUT04 0x0D0 0x330 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x0D4 0x334 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_DATA3__ECSPI2_MISO 0x0D4 0x334 0x56C 0x2 0x0
+#define MX8MP_IOMUXC_SD2_DATA3__AUDIOMIX_SPDIF_IN 0x0D4 0x334 0x544 0x3 0x1
+#define MX8MP_IOMUXC_SD2_DATA3__AUDIOMIX_BIT_STREAM03 0x0D4 0x334 0x4CC 0x4 0x1
+#define MX8MP_IOMUXC_SD2_DATA3__GPIO2_IO18 0x0D4 0x334 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_DATA3__CCMSRCGPCMIX_EARLY_RESET 0x0D4 0x334 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_RESET_B__USDHC2_RESET_B 0x0D8 0x338 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x0D8 0x338 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_RESET_B__CCMSRCGPCMIX_SYSTEM_RESET 0x0D8 0x338 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_WP__USDHC2_WP 0x0DC 0x33C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SD2_WP__GPIO2_IO20 0x0DC 0x33C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SD2_WP__CORESIGHT_EVENTI 0x0DC 0x33C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SD2_WP__SIM_M_HMASTLOCK 0x0DC 0x33C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_ALE__RAWNAND_ALE 0x0E0 0x340 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_ALE__FLEXSPI_A_SCLK 0x0E0 0x340 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_ALE__AUDIOMIX_SAI3_TX_BCLK 0x0E0 0x340 0x4E8 0x2 0x0
+#define MX8MP_IOMUXC_NAND_ALE__MEDIAMIX_ISP_FL_TRIG_0 0x0E0 0x340 0x5D4 0x3 0x1
+#define MX8MP_IOMUXC_NAND_ALE__UART3_DCE_RX 0x0E0 0x340 0x5F8 0x4 0x2
+#define MX8MP_IOMUXC_NAND_ALE__UART3_DTE_TX 0x0E0 0x340 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_ALE__GPIO3_IO00 0x0E0 0x340 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_ALE__CORESIGHT_TRACE_CLK 0x0E0 0x340 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_ALE__SIM_M_HPROT00 0x0E0 0x340 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_CE0_B__RAWNAND_CE0_B 0x0E4 0x344 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_CE0_B__FLEXSPI_A_SS0_B 0x0E4 0x344 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_CE0_B__AUDIOMIX_SAI3_TX_DATA00 0x0E4 0x344 0x000 0x2 0x0
+#define MX8MP_IOMUXC_NAND_CE0_B__MEDIAMIX_ISP_SHUTTER_TRIG_0 0x0E4 0x344 0x5DC 0x3 0x1
+#define MX8MP_IOMUXC_NAND_CE0_B__UART3_DCE_TX 0x0E4 0x344 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_CE0_B__UART3_DTE_RX 0x0E4 0x344 0x5F8 0x4 0x3
+#define MX8MP_IOMUXC_NAND_CE0_B__GPIO3_IO01 0x0E4 0x344 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_CE0_B__CORESIGHT_TRACE_CTL 0x0E4 0x344 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_CE0_B__SIM_M_HPROT01 0x0E4 0x344 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_CE1_B__RAWNAND_CE1_B 0x0E8 0x348 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_CE1_B__FLEXSPI_A_SS1_B 0x0E8 0x348 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_CE1_B__USDHC3_STROBE 0x0E8 0x348 0x630 0x2 0x1
+#define MX8MP_IOMUXC_NAND_CE1_B__I2C4_SCL 0x0E8 0x348 0x5BC 0x4 0x2
+#define MX8MP_IOMUXC_NAND_CE1_B__GPIO3_IO02 0x0E8 0x348 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_CE1_B__CORESIGHT_TRACE00 0x0E8 0x348 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_CE1_B__SIM_M_HPROT02 0x0E8 0x348 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_CE2_B__RAWNAND_CE2_B 0x0EC 0x34C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_CE2_B__FLEXSPI_B_SS0_B 0x0EC 0x34C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_CE2_B__USDHC3_DATA5 0x0EC 0x34C 0x624 0x2 0x1
+#define MX8MP_IOMUXC_NAND_CE2_B__I2C4_SDA 0x0EC 0x34C 0x5C0 0x4 0x2
+#define MX8MP_IOMUXC_NAND_CE2_B__GPIO3_IO03 0x0EC 0x34C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_CE2_B__CORESIGHT_TRACE01 0x0EC 0x34C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_CE2_B__SIM_M_HPROT03 0x0EC 0x34C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_CE3_B__RAWNAND_CE3_B 0x0F0 0x350 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_CE3_B__FLEXSPI_B_SS1_B 0x0F0 0x350 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_CE3_B__USDHC3_DATA6 0x0F0 0x350 0x628 0x2 0x1
+#define MX8MP_IOMUXC_NAND_CE3_B__I2C3_SDA 0x0F0 0x350 0x5B8 0x4 0x1
+#define MX8MP_IOMUXC_NAND_CE3_B__GPIO3_IO04 0x0F0 0x350 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_CE3_B__CORESIGHT_TRACE02 0x0F0 0x350 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_CE3_B__SIM_M_HADDR00 0x0F0 0x350 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_CLE__RAWNAND_CLE 0x0F4 0x354 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_CLE__FLEXSPI_B_SCLK 0x0F4 0x354 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_CLE__USDHC3_DATA7 0x0F4 0x354 0x62C 0x2 0x1
+#define MX8MP_IOMUXC_NAND_CLE__UART4_DCE_RX 0x0F4 0x354 0x600 0x4 0x2
+#define MX8MP_IOMUXC_NAND_CLE__UART4_DTE_TX 0x0F4 0x354 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_CLE__GPIO3_IO05 0x0F4 0x354 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_CLE__CORESIGHT_TRACE03 0x0F4 0x354 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_CLE__SIM_M_HADDR01 0x0F4 0x354 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DATA00__RAWNAND_DATA00 0x0F8 0x358 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DATA00__FLEXSPI_A_DATA00 0x0F8 0x358 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DATA00__AUDIOMIX_SAI3_RX_DATA00 0x0F8 0x358 0x4E4 0x2 0x0
+#define MX8MP_IOMUXC_NAND_DATA00__MEDIAMIX_ISP_FLASH_TRIG_0 0x0F8 0x358 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA00__UART4_DCE_RX 0x0F8 0x358 0x600 0x4 0x3
+#define MX8MP_IOMUXC_NAND_DATA00__UART4_DTE_TX 0x0F8 0x358 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_DATA00__GPIO3_IO06 0x0F8 0x358 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DATA00__CORESIGHT_TRACE04 0x0F8 0x358 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DATA00__SIM_M_HADDR02 0x0F8 0x358 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DATA01__RAWNAND_DATA01 0x0FC 0x35C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DATA01__FLEXSPI_A_DATA01 0x0FC 0x35C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DATA01__AUDIOMIX_SAI3_TX_SYNC 0x0FC 0x35C 0x4EC 0x2 0x0
+#define MX8MP_IOMUXC_NAND_DATA01__MEDIAMIX_ISP_PRELIGHT_TRIG_0 0x0FC 0x35C 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA01__UART4_DCE_TX 0x0FC 0x35C 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_DATA01__UART4_DTE_RX 0x0FC 0x35C 0x600 0x4 0x4
+#define MX8MP_IOMUXC_NAND_DATA01__GPIO3_IO07 0x0FC 0x35C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DATA01__CORESIGHT_TRACE05 0x0FC 0x35C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DATA01__SIM_M_HADDR03 0x0FC 0x35C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DATA02__RAWNAND_DATA02 0x100 0x360 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DATA02__FLEXSPI_A_DATA02 0x100 0x360 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DATA02__USDHC3_CD_B 0x100 0x360 0x608 0x2 0x2
+#define MX8MP_IOMUXC_NAND_DATA02__UART4_DCE_CTS 0x100 0x360 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA02__UART4_DTE_RTS 0x100 0x360 0x5FC 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA02__I2C4_SDA 0x100 0x360 0x5C0 0x4 0x3
+#define MX8MP_IOMUXC_NAND_DATA02__GPIO3_IO08 0x100 0x360 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DATA02__CORESIGHT_TRACE06 0x100 0x360 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DATA02__SIM_M_HADDR04 0x100 0x360 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DATA03__RAWNAND_DATA03 0x104 0x364 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DATA03__FLEXSPI_A_DATA03 0x104 0x364 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DATA03__USDHC3_WP 0x104 0x364 0x634 0x2 0x2
+#define MX8MP_IOMUXC_NAND_DATA03__UART4_DCE_RTS 0x104 0x364 0x5FC 0x3 0x1
+#define MX8MP_IOMUXC_NAND_DATA03__UART4_DTE_CTS 0x104 0x364 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA03__MEDIAMIX_ISP_FL_TRIG_1 0x104 0x364 0x5D8 0x4 0x1
+#define MX8MP_IOMUXC_NAND_DATA03__GPIO3_IO09 0x104 0x364 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DATA03__CORESIGHT_TRACE07 0x104 0x364 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DATA03__SIM_M_HADDR05 0x104 0x364 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DATA04__RAWNAND_DATA04 0x108 0x368 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DATA04__FLEXSPI_B_DATA00 0x108 0x368 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DATA04__USDHC3_DATA0 0x108 0x368 0x610 0x2 0x1
+#define MX8MP_IOMUXC_NAND_DATA04__FLEXSPI_A_DATA04 0x108 0x368 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA04__MEDIAMIX_ISP_SHUTTER_TRIG_1 0x108 0x368 0x5E0 0x4 0x1
+#define MX8MP_IOMUXC_NAND_DATA04__GPIO3_IO10 0x108 0x368 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DATA04__CORESIGHT_TRACE08 0x108 0x368 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DATA04__SIM_M_HADDR06 0x108 0x368 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DATA05__RAWNAND_DATA05 0x10C 0x36C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DATA05__FLEXSPI_B_DATA01 0x10C 0x36C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DATA05__USDHC3_DATA1 0x10C 0x36C 0x614 0x2 0x1
+#define MX8MP_IOMUXC_NAND_DATA05__FLEXSPI_A_DATA05 0x10C 0x36C 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA05__MEDIAMIX_ISP_FLASH_TRIG_1 0x10C 0x36C 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_DATA05__GPIO3_IO11 0x10C 0x36C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DATA05__CORESIGHT_TRACE09 0x10C 0x36C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DATA05__SIM_M_HADDR07 0x10C 0x36C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DATA06__RAWNAND_DATA06 0x110 0x370 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DATA06__FLEXSPI_B_DATA02 0x110 0x370 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DATA06__USDHC3_DATA2 0x110 0x370 0x618 0x2 0x1
+#define MX8MP_IOMUXC_NAND_DATA06__FLEXSPI_A_DATA06 0x110 0x370 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA06__MEDIAMIX_ISP_PRELIGHT_TRIG_1 0x110 0x370 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_DATA06__GPIO3_IO12 0x110 0x370 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DATA06__CORESIGHT_TRACE10 0x110 0x370 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DATA06__SIM_M_HADDR08 0x110 0x370 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DATA07__RAWNAND_DATA07 0x114 0x374 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DATA07__FLEXSPI_B_DATA03 0x114 0x374 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DATA07__USDHC3_DATA3 0x114 0x374 0x61C 0x2 0x1
+#define MX8MP_IOMUXC_NAND_DATA07__FLEXSPI_A_DATA07 0x114 0x374 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DATA07__MEDIAMIX_ISP_SHUTTER_OPEN_1 0x114 0x374 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_DATA07__GPIO3_IO13 0x114 0x374 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DATA07__CORESIGHT_TRACE11 0x114 0x374 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DATA07__SIM_M_HADDR09 0x114 0x374 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_DQS__RAWNAND_DQS 0x118 0x378 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_DQS__FLEXSPI_A_DQS 0x118 0x378 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_DQS__AUDIOMIX_SAI3_MCLK 0x118 0x378 0x4E0 0x2 0x0
+#define MX8MP_IOMUXC_NAND_DQS__MEDIAMIX_ISP_SHUTTER_OPEN_0 0x118 0x378 0x000 0x3 0x0
+#define MX8MP_IOMUXC_NAND_DQS__I2C3_SCL 0x118 0x378 0x5B4 0x4 0x1
+#define MX8MP_IOMUXC_NAND_DQS__GPIO3_IO14 0x118 0x378 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_DQS__CORESIGHT_TRACE12 0x118 0x378 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_DQS__SIM_M_HADDR10 0x118 0x378 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_RE_B__RAWNAND_RE_B 0x11C 0x37C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_RE_B__FLEXSPI_B_DQS 0x11C 0x37C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_NAND_RE_B__USDHC3_DATA4 0x11C 0x37C 0x620 0x2 0x1
+#define MX8MP_IOMUXC_NAND_RE_B__UART4_DCE_TX 0x11C 0x37C 0x000 0x4 0x0
+#define MX8MP_IOMUXC_NAND_RE_B__UART4_DTE_RX 0x11C 0x37C 0x600 0x4 0x5
+#define MX8MP_IOMUXC_NAND_RE_B__GPIO3_IO15 0x11C 0x37C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_RE_B__CORESIGHT_TRACE13 0x11C 0x37C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_RE_B__SIM_M_HADDR11 0x11C 0x37C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_READY_B__RAWNAND_READY_B 0x120 0x380 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_READY_B__USDHC3_RESET_B 0x120 0x380 0x000 0x2 0x0
+#define MX8MP_IOMUXC_NAND_READY_B__I2C3_SCL 0x120 0x380 0x5B4 0x4 0x2
+#define MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x120 0x380 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_READY_B__CORESIGHT_TRACE14 0x120 0x380 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_READY_B__SIM_M_HADDR12 0x120 0x380 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_WE_B__RAWNAND_WE_B 0x124 0x384 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x124 0x384 0x604 0x2 0x1
+#define MX8MP_IOMUXC_NAND_WE_B__I2C3_SDA 0x124 0x384 0x5B8 0x4 0x2
+#define MX8MP_IOMUXC_NAND_WE_B__GPIO3_IO17 0x124 0x384 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_WE_B__CORESIGHT_TRACE15 0x124 0x384 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_WE_B__SIM_M_HADDR13 0x124 0x384 0x000 0x7 0x0
+#define MX8MP_IOMUXC_NAND_WP_B__RAWNAND_WP_B 0x128 0x388 0x000 0x0 0x0
+#define MX8MP_IOMUXC_NAND_WP_B__USDHC3_CMD 0x128 0x388 0x60C 0x2 0x1
+#define MX8MP_IOMUXC_NAND_WP_B__I2C4_SCL 0x128 0x388 0x5BC 0x4 0x3
+#define MX8MP_IOMUXC_NAND_WP_B__GPIO3_IO18 0x128 0x388 0x000 0x5 0x0
+#define MX8MP_IOMUXC_NAND_WP_B__CORESIGHT_EVENTO 0x128 0x388 0x000 0x6 0x0
+#define MX8MP_IOMUXC_NAND_WP_B__SIM_M_HADDR14 0x128 0x388 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI5_RXFS__AUDIOMIX_SAI5_RX_SYNC 0x12C 0x38C 0x508 0x0 0x0
+#define MX8MP_IOMUXC_SAI5_RXFS__AUDIOMIX_SAI1_TX_DATA00 0x12C 0x38C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI5_RXFS__PWM4_OUT 0x12C 0x38C 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI5_RXFS__I2C6_SCL 0x12C 0x38C 0x5CC 0x3 0x1
+#define MX8MP_IOMUXC_SAI5_RXFS__GPIO3_IO19 0x12C 0x38C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI5_RXC__AUDIOMIX_SAI5_RX_BCLK 0x130 0x390 0x4F4 0x0 0x0
+#define MX8MP_IOMUXC_SAI5_RXC__AUDIOMIX_SAI1_TX_DATA01 0x130 0x390 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI5_RXC__PWM3_OUT 0x130 0x390 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI5_RXC__I2C6_SDA 0x130 0x390 0x5D0 0x3 0x1
+#define MX8MP_IOMUXC_SAI5_RXC__AUDIOMIX_CLK 0x130 0x390 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20 0x130 0x390 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_SAI5_RX_DATA00 0x134 0x394 0x4F8 0x0 0x0
+#define MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_SAI1_TX_DATA02 0x134 0x394 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI5_RXD0__PWM2_OUT 0x134 0x394 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI5_RXD0__I2C5_SCL 0x134 0x394 0x5C4 0x3 0x1
+#define MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_BIT_STREAM00 0x134 0x394 0x4C0 0x4 0x2
+#define MX8MP_IOMUXC_SAI5_RXD0__GPIO3_IO21 0x134 0x394 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI5_RX_DATA01 0x138 0x398 0x4FC 0x0 0x0
+#define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI1_TX_DATA03 0x138 0x398 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI1_TX_SYNC 0x138 0x398 0x4D8 0x2 0x0
+#define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI5_TX_SYNC 0x138 0x398 0x510 0x3 0x0
+#define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_BIT_STREAM01 0x138 0x398 0x4C4 0x4 0x2
+#define MX8MP_IOMUXC_SAI5_RXD1__GPIO3_IO22 0x138 0x398 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI5_RXD1__CAN1_TX 0x138 0x398 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI5_RX_DATA02 0x13C 0x39C 0x500 0x0 0x0
+#define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI1_TX_DATA04 0x13C 0x39C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI1_TX_SYNC 0x13C 0x39C 0x4D8 0x2 0x1
+#define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI5_TX_BCLK 0x13C 0x39C 0x50C 0x3 0x0
+#define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_BIT_STREAM02 0x13C 0x39C 0x4C8 0x4 0x2
+#define MX8MP_IOMUXC_SAI5_RXD2__GPIO3_IO23 0x13C 0x39C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI5_RXD2__CAN1_RX 0x13C 0x39C 0x54C 0x6 0x0
+#define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI5_RX_DATA03 0x140 0x3A0 0x504 0x0 0x0
+#define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI1_TX_DATA05 0x140 0x3A0 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI1_TX_SYNC 0x140 0x3A0 0x4D8 0x2 0x2
+#define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI5_TX_DATA00 0x140 0x3A0 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_BIT_STREAM03 0x140 0x3A0 0x4CC 0x4 0x2
+#define MX8MP_IOMUXC_SAI5_RXD3__GPIO3_IO24 0x140 0x3A0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI5_RXD3__CAN2_TX 0x140 0x3A0 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SAI5_MCLK__AUDIOMIX_SAI5_MCLK 0x144 0x3A4 0x4F0 0x0 0x0
+#define MX8MP_IOMUXC_SAI5_MCLK__AUDIOMIX_SAI1_TX_BCLK 0x144 0x3A4 0x4D4 0x1 0x0
+#define MX8MP_IOMUXC_SAI5_MCLK__PWM1_OUT 0x144 0x3A4 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI5_MCLK__I2C5_SDA 0x144 0x3A4 0x5C8 0x3 0x1
+#define MX8MP_IOMUXC_SAI5_MCLK__GPIO3_IO25 0x144 0x3A4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI5_MCLK__CAN2_RX 0x144 0x3A4 0x550 0x6 0x0
+#define MX8MP_IOMUXC_SAI1_RXFS__AUDIOMIX_SAI1_RX_SYNC 0x148 0x3A8 0x4D0 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXFS__AUDIOMIX_SAI5_RX_SYNC 0x148 0x3A8 0x508 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXFS__ENET1_1588_EVENT0_IN 0x148 0x3A8 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_RXFS__GPIO4_IO00 0x148 0x3A8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXC__AUDIOMIX_SAI1_RX_BCLK 0x14C 0x3AC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXC__AUDIOMIX_SAI5_RX_BCLK 0x14C 0x3AC 0x4F4 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXC__AUDIOMIX_CLK 0x14C 0x3AC 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI1_RXC__ENET1_1588_EVENT0_OUT 0x14C 0x3AC 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_RXC__GPIO4_IO01 0x14C 0x3AC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_SAI1_RX_DATA00 0x150 0x3B0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_SAI5_RX_DATA00 0x150 0x3B0 0x4F8 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_SAI1_TX_DATA01 0x150 0x3B0 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_BIT_STREAM00 0x150 0x3B0 0x4C0 0x3 0x3
+#define MX8MP_IOMUXC_SAI1_RXD0__ENET1_1588_EVENT1_IN 0x150 0x3B0 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02 0x150 0x3B0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXD1__AUDIOMIX_SAI1_RX_DATA01 0x154 0x3B4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXD1__AUDIOMIX_SAI5_RX_DATA01 0x154 0x3B4 0x4FC 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXD1__AUDIOMIX_BIT_STREAM01 0x154 0x3B4 0x4C4 0x3 0x3
+#define MX8MP_IOMUXC_SAI1_RXD1__ENET1_1588_EVENT1_OUT 0x154 0x3B4 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_RXD1__GPIO4_IO03 0x154 0x3B4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXD2__AUDIOMIX_SAI1_RX_DATA02 0x158 0x3B8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXD2__AUDIOMIX_SAI5_RX_DATA02 0x158 0x3B8 0x500 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXD2__AUDIOMIX_BIT_STREAM02 0x158 0x3B8 0x4C8 0x3 0x3
+#define MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC 0x158 0x3B8 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_RXD2__GPIO4_IO04 0x158 0x3B8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXD3__AUDIOMIX_SAI1_RX_DATA03 0x15C 0x3BC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXD3__AUDIOMIX_SAI5_RX_DATA03 0x15C 0x3BC 0x504 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXD3__AUDIOMIX_BIT_STREAM03 0x15C 0x3BC 0x4CC 0x3 0x3
+#define MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO 0x15C 0x3BC 0x57C 0x4 0x1
+#define MX8MP_IOMUXC_SAI1_RXD3__GPIO4_IO05 0x15C 0x3BC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXD4__AUDIOMIX_SAI1_RX_DATA04 0x160 0x3C0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXD4__AUDIOMIX_SAI6_TX_BCLK 0x160 0x3C0 0x524 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXD4__AUDIOMIX_SAI6_RX_BCLK 0x160 0x3C0 0x518 0x2 0x1
+#define MX8MP_IOMUXC_SAI1_RXD4__ENET1_RGMII_RD0 0x160 0x3C0 0x580 0x4 0x1
+#define MX8MP_IOMUXC_SAI1_RXD4__GPIO4_IO06 0x160 0x3C0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXD5__AUDIOMIX_SAI1_RX_DATA05 0x164 0x3C4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXD5__AUDIOMIX_SAI6_TX_DATA00 0x164 0x3C4 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI1_RXD5__AUDIOMIX_SAI6_RX_DATA00 0x164 0x3C4 0x51C 0x2 0x1
+#define MX8MP_IOMUXC_SAI1_RXD5__AUDIOMIX_SAI1_RX_SYNC 0x164 0x3C4 0x4D0 0x3 0x1
+#define MX8MP_IOMUXC_SAI1_RXD5__ENET1_RGMII_RD1 0x164 0x3C4 0x584 0x4 0x1
+#define MX8MP_IOMUXC_SAI1_RXD5__GPIO4_IO07 0x164 0x3C4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXD6__AUDIOMIX_SAI1_RX_DATA06 0x168 0x3C8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXD6__AUDIOMIX_SAI6_TX_SYNC 0x168 0x3C8 0x528 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXD6__AUDIOMIX_SAI6_RX_SYNC 0x168 0x3C8 0x520 0x2 0x1
+#define MX8MP_IOMUXC_SAI1_RXD6__ENET1_RGMII_RD2 0x168 0x3C8 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_RXD6__GPIO4_IO08 0x168 0x3C8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_RXD7__AUDIOMIX_SAI1_RX_DATA07 0x16C 0x3CC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_RXD7__AUDIOMIX_SAI6_MCLK 0x16C 0x3CC 0x514 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_RXD7__AUDIOMIX_SAI1_TX_SYNC 0x16C 0x3CC 0x4D8 0x2 0x3
+#define MX8MP_IOMUXC_SAI1_RXD7__AUDIOMIX_SAI1_TX_DATA04 0x16C 0x3CC 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI1_RXD7__ENET1_RGMII_RD3 0x16C 0x3CC 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_RXD7__GPIO4_IO09 0x16C 0x3CC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXFS__AUDIOMIX_SAI1_TX_SYNC 0x170 0x3D0 0x4D8 0x0 0x4
+#define MX8MP_IOMUXC_SAI1_TXFS__AUDIOMIX_SAI5_TX_SYNC 0x170 0x3D0 0x510 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_TXFS__ENET1_RGMII_RX_CTL 0x170 0x3D0 0x588 0x4 0x1
+#define MX8MP_IOMUXC_SAI1_TXFS__GPIO4_IO10 0x170 0x3D0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXC__AUDIOMIX_SAI1_TX_BCLK 0x174 0x3D4 0x4D4 0x0 0x1
+#define MX8MP_IOMUXC_SAI1_TXC__AUDIOMIX_SAI5_TX_BCLK 0x174 0x3D4 0x50C 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_TXC__ENET1_RGMII_RXC 0x174 0x3D4 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_TXC__GPIO4_IO11 0x174 0x3D4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXD0__AUDIOMIX_SAI1_TX_DATA00 0x178 0x3D8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_TXD0__AUDIOMIX_SAI5_TX_DATA00 0x178 0x3D8 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI1_TXD0__ENET1_RGMII_TD0 0x178 0x3D8 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_TXD0__GPIO4_IO12 0x178 0x3D8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXD1__AUDIOMIX_SAI1_TX_DATA01 0x17C 0x3DC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_TXD1__AUDIOMIX_SAI5_TX_DATA01 0x17C 0x3DC 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI1_TXD1__ENET1_RGMII_TD1 0x17C 0x3DC 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_TXD1__GPIO4_IO13 0x17C 0x3DC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXD2__AUDIOMIX_SAI1_TX_DATA02 0x180 0x3E0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_TXD2__AUDIOMIX_SAI5_TX_DATA02 0x180 0x3E0 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI1_TXD2__ENET1_RGMII_TD2 0x180 0x3E0 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_TXD2__GPIO4_IO14 0x180 0x3E0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXD3__AUDIOMIX_SAI1_TX_DATA03 0x184 0x3E4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_TXD3__AUDIOMIX_SAI5_TX_DATA03 0x184 0x3E4 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI1_TXD3__ENET1_RGMII_TD3 0x184 0x3E4 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_TXD3__GPIO4_IO15 0x184 0x3E4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXD4__AUDIOMIX_SAI1_TX_DATA04 0x188 0x3E8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_TXD4__AUDIOMIX_SAI6_RX_BCLK 0x188 0x3E8 0x518 0x1 0x2
+#define MX8MP_IOMUXC_SAI1_TXD4__AUDIOMIX_SAI6_TX_BCLK 0x188 0x3E8 0x524 0x2 0x2
+#define MX8MP_IOMUXC_SAI1_TXD4__ENET1_RGMII_TX_CTL 0x188 0x3E8 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_TXD4__GPIO4_IO16 0x188 0x3E8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXD5__AUDIOMIX_SAI1_TX_DATA05 0x18C 0x3EC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_TXD5__AUDIOMIX_SAI6_RX_DATA00 0x18C 0x3EC 0x51C 0x1 0x2
+#define MX8MP_IOMUXC_SAI1_TXD5__AUDIOMIX_SAI6_TX_DATA00 0x18C 0x3EC 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI1_TXD5__ENET1_RGMII_TXC 0x18C 0x3EC 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_TXD5__GPIO4_IO17 0x18C 0x3EC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXD6__AUDIOMIX_SAI1_TX_DATA06 0x190 0x3F0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_TXD6__AUDIOMIX_SAI6_RX_SYNC 0x190 0x3F0 0x520 0x1 0x2
+#define MX8MP_IOMUXC_SAI1_TXD6__AUDIOMIX_SAI6_TX_SYNC 0x190 0x3F0 0x528 0x2 0x2
+#define MX8MP_IOMUXC_SAI1_TXD6__ENET1_RX_ER 0x190 0x3F0 0x58C 0x4 0x1
+#define MX8MP_IOMUXC_SAI1_TXD6__GPIO4_IO18 0x190 0x3F0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_TXD7__AUDIOMIX_SAI1_TX_DATA07 0x194 0x3F4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_TXD7__AUDIOMIX_SAI6_MCLK 0x194 0x3F4 0x514 0x1 0x2
+#define MX8MP_IOMUXC_SAI1_TXD7__AUDIOMIX_CLK 0x194 0x3F4 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI1_TXD7__ENET1_TX_ER 0x194 0x3F4 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI1_TXD7__GPIO4_IO19 0x194 0x3F4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI1_MCLK__AUDIOMIX_SAI1_MCLK 0x198 0x3F8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI1_MCLK__AUDIOMIX_SAI5_MCLK 0x198 0x3F8 0x4F0 0x1 0x1
+#define MX8MP_IOMUXC_SAI1_MCLK__AUDIOMIX_SAI1_TX_BCLK 0x198 0x3F8 0x4D4 0x2 0x2
+#define MX8MP_IOMUXC_SAI1_MCLK__ENET1_TX_CLK 0x198 0x3F8 0x578 0x4 0x1
+#define MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20 0x198 0x3F8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI2_RXFS__AUDIOMIX_SAI2_RX_SYNC 0x19C 0x3FC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI2_RXFS__AUDIOMIX_SAI5_TX_SYNC 0x19C 0x3FC 0x510 0x1 0x2
+#define MX8MP_IOMUXC_SAI2_RXFS__AUDIOMIX_SAI5_TX_DATA01 0x19C 0x3FC 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI2_RXFS__AUDIOMIX_SAI2_RX_DATA01 0x19C 0x3FC 0x4DC 0x3 0x0
+#define MX8MP_IOMUXC_SAI2_RXFS__UART1_DCE_TX 0x19C 0x3FC 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI2_RXFS__UART1_DTE_RX 0x19C 0x3FC 0x5E8 0x4 0x2
+#define MX8MP_IOMUXC_SAI2_RXFS__GPIO4_IO21 0x19C 0x3FC 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI2_RXFS__AUDIOMIX_BIT_STREAM02 0x19C 0x3FC 0x4C8 0x6 0x4
+#define MX8MP_IOMUXC_SAI2_RXFS__SIM_M_HSIZE00 0x19C 0x3FC 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI2_RXC__AUDIOMIX_SAI2_RX_BCLK 0x1A0 0x400 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI2_RXC__AUDIOMIX_SAI5_TX_BCLK 0x1A0 0x400 0x50C 0x1 0x2
+#define MX8MP_IOMUXC_SAI2_RXC__CAN1_TX 0x1A0 0x400 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI2_RXC__UART1_DCE_RX 0x1A0 0x400 0x5E8 0x4 0x3
+#define MX8MP_IOMUXC_SAI2_RXC__UART1_DTE_TX 0x1A0 0x400 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x1A0 0x400 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI2_RXC__AUDIOMIX_BIT_STREAM01 0x1A0 0x400 0x4C4 0x6 0x4
+#define MX8MP_IOMUXC_SAI2_RXC__SIM_M_HSIZE01 0x1A0 0x400 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_SAI2_RX_DATA00 0x1A4 0x404 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_SAI5_TX_DATA00 0x1A4 0x404 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI2_RXD0__ENET_QOS_1588_EVENT2_OUT 0x1A4 0x404 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_SAI2_TX_DATA01 0x1A4 0x404 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI2_RXD0__UART1_DCE_RTS 0x1A4 0x404 0x5E4 0x4 0x2
+#define MX8MP_IOMUXC_SAI2_RXD0__UART1_DTE_CTS 0x1A4 0x404 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI2_RXD0__GPIO4_IO23 0x1A4 0x404 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_BIT_STREAM03 0x1A4 0x404 0x4CC 0x6 0x4
+#define MX8MP_IOMUXC_SAI2_RXD0__SIM_M_HSIZE02 0x1A4 0x404 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC 0x1A8 0x408 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI5_TX_DATA01 0x1A8 0x408 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI2_TXFS__ENET_QOS_1588_EVENT3_OUT 0x1A8 0x408 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_DATA01 0x1A8 0x408 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI2_TXFS__UART1_DCE_CTS 0x1A8 0x408 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI2_TXFS__UART1_DTE_RTS 0x1A8 0x408 0x5E4 0x4 0x3
+#define MX8MP_IOMUXC_SAI2_TXFS__GPIO4_IO24 0x1A8 0x408 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_BIT_STREAM02 0x1A8 0x408 0x4C8 0x6 0x5
+#define MX8MP_IOMUXC_SAI2_TXFS__SIM_M_HWRITE 0x1A8 0x408 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK 0x1AC 0x40C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI5_TX_DATA02 0x1AC 0x40C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI2_TXC__CAN1_RX 0x1AC 0x40C 0x54C 0x3 0x1
+#define MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25 0x1AC 0x40C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_BIT_STREAM01 0x1AC 0x40C 0x4C4 0x6 0x5
+#define MX8MP_IOMUXC_SAI2_TXC__SIM_M_HREADYOUT 0x1AC 0x40C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00 0x1B0 0x410 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI5_TX_DATA03 0x1B0 0x410 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI2_TXD0__ENET_QOS_1588_EVENT2_IN 0x1B0 0x410 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI2_TXD0__CAN2_TX 0x1B0 0x410 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI2_TXD0__ENET_QOS_1588_EVENT2_AUX_IN 0x1B0 0x410 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI2_TXD0__GPIO4_IO26 0x1B0 0x410 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI2_TXD0__CCMSRCGPCMIX_BOOT_MODE04 0x1B0 0x410 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SAI2_TXD0__TPSMP_CLK 0x1B0 0x410 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK 0x1B4 0x414 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI5_MCLK 0x1B4 0x414 0x4F0 0x1 0x2
+#define MX8MP_IOMUXC_SAI2_MCLK__ENET_QOS_1588_EVENT3_IN 0x1B4 0x414 0x000 0x2 0x0
+#define MX8MP_IOMUXC_SAI2_MCLK__CAN2_RX 0x1B4 0x414 0x550 0x3 0x1
+#define MX8MP_IOMUXC_SAI2_MCLK__ENET_QOS_1588_EVENT3_AUX_IN 0x1B4 0x414 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI2_MCLK__GPIO4_IO27 0x1B4 0x414 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI3_MCLK 0x1B4 0x414 0x4E0 0x6 0x1
+#define MX8MP_IOMUXC_SAI2_MCLK__TPSMP_HDATA_DIR 0x1B4 0x414 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_SAI3_RX_SYNC 0x1B8 0x418 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_SAI2_RX_DATA01 0x1B8 0x418 0x4DC 0x1 0x1
+#define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_SAI5_RX_SYNC 0x1B8 0x418 0x508 0x2 0x2
+#define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_SAI3_RX_DATA01 0x1B8 0x418 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_SPDIF_IN 0x1B8 0x418 0x544 0x4 0x2
+#define MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x1B8 0x418 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_BIT_STREAM00 0x1B8 0x418 0x4C0 0x6 0x4
+#define MX8MP_IOMUXC_SAI3_RXFS__TPSMP_HTRANS00 0x1B8 0x418 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI3_RXC__AUDIOMIX_SAI3_RX_BCLK 0x1BC 0x41C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI3_RXC__AUDIOMIX_SAI2_RX_DATA02 0x1BC 0x41C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI3_RXC__AUDIOMIX_SAI5_RX_BCLK 0x1BC 0x41C 0x4F4 0x2 0x2
+#define MX8MP_IOMUXC_SAI3_RXC__GPT1_CLK 0x1BC 0x41C 0x59C 0x3 0x0
+#define MX8MP_IOMUXC_SAI3_RXC__UART2_DCE_CTS 0x1BC 0x41C 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI3_RXC__UART2_DTE_RTS 0x1BC 0x41C 0x5EC 0x4 0x2
+#define MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29 0x1BC 0x41C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI3_RXC__AUDIOMIX_CLK 0x1BC 0x41C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SAI3_RXC__TPSMP_HTRANS01 0x1BC 0x41C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_SAI3_RX_DATA00 0x1C0 0x420 0x4E4 0x0 0x1
+#define MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_SAI2_RX_DATA03 0x1C0 0x420 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_SAI5_RX_DATA00 0x1C0 0x420 0x4F8 0x2 0x2
+#define MX8MP_IOMUXC_SAI3_RXD__UART2_DCE_RTS 0x1C0 0x420 0x5EC 0x4 0x3
+#define MX8MP_IOMUXC_SAI3_RXD__UART2_DTE_CTS 0x1C0 0x420 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI3_RXD__GPIO4_IO30 0x1C0 0x420 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_BIT_STREAM01 0x1C0 0x420 0x4C4 0x6 0x6
+#define MX8MP_IOMUXC_SAI3_RXD__TPSMP_HDATA00 0x1C0 0x420 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI3_TX_SYNC 0x1C4 0x424 0x4EC 0x0 0x1
+#define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI2_TX_DATA01 0x1C4 0x424 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI5_RX_DATA01 0x1C4 0x424 0x4FC 0x2 0x2
+#define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI3_TX_DATA01 0x1C4 0x424 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SAI3_TXFS__UART2_DCE_RX 0x1C4 0x424 0x5F0 0x4 0x4
+#define MX8MP_IOMUXC_SAI3_TXFS__UART2_DTE_TX 0x1C4 0x424 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI3_TXFS__GPIO4_IO31 0x1C4 0x424 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_BIT_STREAM03 0x1C4 0x424 0x4CC 0x6 0x5
+#define MX8MP_IOMUXC_SAI3_TXFS__TPSMP_HDATA01 0x1C4 0x424 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_SAI3_TX_BCLK 0x1C8 0x428 0x4E8 0x0 0x1
+#define MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_SAI2_TX_DATA02 0x1C8 0x428 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_SAI5_RX_DATA02 0x1C8 0x428 0x500 0x2 0x2
+#define MX8MP_IOMUXC_SAI3_TXC__GPT1_CAPTURE1 0x1C8 0x428 0x594 0x3 0x0
+#define MX8MP_IOMUXC_SAI3_TXC__UART2_DCE_TX 0x1C8 0x428 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI3_TXC__UART2_DTE_RX 0x1C8 0x428 0x5F0 0x4 0x5
+#define MX8MP_IOMUXC_SAI3_TXC__GPIO5_IO00 0x1C8 0x428 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_BIT_STREAM02 0x1C8 0x428 0x4C8 0x6 0x6
+#define MX8MP_IOMUXC_SAI3_TXC__TPSMP_HDATA02 0x1C8 0x428 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SAI3_TX_DATA00 0x1CC 0x42C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SAI2_TX_DATA03 0x1CC 0x42C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SAI5_RX_DATA03 0x1CC 0x42C 0x504 0x2 0x2
+#define MX8MP_IOMUXC_SAI3_TXD__GPT1_CAPTURE2 0x1CC 0x42C 0x598 0x3 0x0
+#define MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SPDIF_EXT_CLK 0x1CC 0x42C 0x548 0x4 0x0
+#define MX8MP_IOMUXC_SAI3_TXD__GPIO5_IO01 0x1CC 0x42C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI3_TXD__CCMSRCGPCMIX_BOOT_MODE05 0x1CC 0x42C 0x000 0x6 0x0
+#define MX8MP_IOMUXC_SAI3_TXD__TPSMP_HDATA03 0x1CC 0x42C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SAI3_MCLK__AUDIOMIX_SAI3_MCLK 0x1D0 0x430 0x4E0 0x0 0x2
+#define MX8MP_IOMUXC_SAI3_MCLK__PWM4_OUT 0x1D0 0x430 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SAI3_MCLK__AUDIOMIX_SAI5_MCLK 0x1D0 0x430 0x4F0 0x2 0x3
+#define MX8MP_IOMUXC_SAI3_MCLK__AUDIOMIX_SPDIF_OUT 0x1D0 0x430 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SAI3_MCLK__GPIO5_IO02 0x1D0 0x430 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SAI3_MCLK__AUDIOMIX_SPDIF_IN 0x1D0 0x430 0x544 0x6 0x3
+#define MX8MP_IOMUXC_SAI3_MCLK__TPSMP_HDATA04 0x1D0 0x430 0x000 0x7 0x0
+#define MX8MP_IOMUXC_SPDIF_TX__AUDIOMIX_SPDIF_OUT 0x1D4 0x434 0x000 0x0 0x0
+#define MX8MP_IOMUXC_SPDIF_TX__PWM3_OUT 0x1D4 0x434 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SPDIF_TX__I2C5_SCL 0x1D4 0x434 0x5C4 0x2 0x2
+#define MX8MP_IOMUXC_SPDIF_TX__GPT1_COMPARE1 0x1D4 0x434 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SPDIF_TX__CAN1_TX 0x1D4 0x434 0x000 0x4 0x0
+#define MX8MP_IOMUXC_SPDIF_TX__GPIO5_IO03 0x1D4 0x434 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SPDIF_RX__AUDIOMIX_SPDIF_IN 0x1D8 0x438 0x544 0x0 0x4
+#define MX8MP_IOMUXC_SPDIF_RX__PWM2_OUT 0x1D8 0x438 0x000 0x1 0x0
+#define MX8MP_IOMUXC_SPDIF_RX__I2C5_SDA 0x1D8 0x438 0x5C8 0x2 0x2
+#define MX8MP_IOMUXC_SPDIF_RX__GPT1_COMPARE2 0x1D8 0x438 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SPDIF_RX__CAN1_RX 0x1D8 0x438 0x54C 0x4 0x2
+#define MX8MP_IOMUXC_SPDIF_RX__GPIO5_IO04 0x1D8 0x438 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SPDIF_EXT_CLK__GPT1_COMPARE3 0x1DC 0x43C 0x000 0x3 0x0
+#define MX8MP_IOMUXC_SPDIF_EXT_CLK__GPIO5_IO05 0x1DC 0x43C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_SPDIF_EXT_CLK__AUDIOMIX_SPDIF_EXT_CLK 0x1DC 0x43C 0x548 0x0 0x1
+#define MX8MP_IOMUXC_SPDIF_EXT_CLK__PWM1_OUT 0x1DC 0x43C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK 0x1E0 0x440 0x558 0x0 0x0
+#define MX8MP_IOMUXC_ECSPI1_SCLK__UART3_DCE_RX 0x1E0 0x440 0x5F8 0x1 0x4
+#define MX8MP_IOMUXC_ECSPI1_SCLK__UART3_DTE_TX 0x1E0 0x440 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI1_SCLK__I2C1_SCL 0x1E0 0x440 0x5A4 0x2 0x1
+#define MX8MP_IOMUXC_ECSPI1_SCLK__AUDIOMIX_SAI7_RX_SYNC 0x1E0 0x440 0x538 0x3 0x1
+#define MX8MP_IOMUXC_ECSPI1_SCLK__GPIO5_IO06 0x1E0 0x440 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ECSPI1_SCLK__TPSMP_HDATA08 0x1E0 0x440 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI 0x1E4 0x444 0x560 0x0 0x0
+#define MX8MP_IOMUXC_ECSPI1_MOSI__UART3_DCE_TX 0x1E4 0x444 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI1_MOSI__UART3_DTE_RX 0x1E4 0x444 0x5F8 0x1 0x5
+#define MX8MP_IOMUXC_ECSPI1_MOSI__I2C1_SDA 0x1E4 0x444 0x5A8 0x2 0x1
+#define MX8MP_IOMUXC_ECSPI1_MOSI__AUDIOMIX_SAI7_RX_BCLK 0x1E4 0x444 0x530 0x3 0x1
+#define MX8MP_IOMUXC_ECSPI1_MOSI__GPIO5_IO07 0x1E4 0x444 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ECSPI1_MOSI__TPSMP_HDATA09 0x1E4 0x444 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO 0x1E8 0x448 0x55C 0x0 0x0
+#define MX8MP_IOMUXC_ECSPI1_MISO__UART3_DCE_CTS 0x1E8 0x448 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI1_MISO__UART3_DTE_RTS 0x1E8 0x448 0x5F4 0x1 0x2
+#define MX8MP_IOMUXC_ECSPI1_MISO__I2C2_SCL 0x1E8 0x448 0x5AC 0x2 0x1
+#define MX8MP_IOMUXC_ECSPI1_MISO__AUDIOMIX_SAI7_RX_DATA00 0x1E8 0x448 0x534 0x3 0x1
+#define MX8MP_IOMUXC_ECSPI1_MISO__GPIO5_IO08 0x1E8 0x448 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ECSPI1_MISO__TPSMP_HDATA10 0x1E8 0x448 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ECSPI1_SS0__ECSPI1_SS0 0x1EC 0x44C 0x564 0x0 0x0
+#define MX8MP_IOMUXC_ECSPI1_SS0__UART3_DCE_RTS 0x1EC 0x44C 0x5F4 0x1 0x3
+#define MX8MP_IOMUXC_ECSPI1_SS0__UART3_DTE_CTS 0x1EC 0x44C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI1_SS0__I2C2_SDA 0x1EC 0x44C 0x5B0 0x2 0x1
+#define MX8MP_IOMUXC_ECSPI1_SS0__AUDIOMIX_SAI7_TX_SYNC 0x1EC 0x44C 0x540 0x3 0x1
+#define MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09 0x1EC 0x44C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ECSPI1_SS0__TPSMP_HDATA11 0x1EC 0x44C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ECSPI2_SCLK__ECSPI2_SCLK 0x1F0 0x450 0x568 0x0 0x1
+#define MX8MP_IOMUXC_ECSPI2_SCLK__UART4_DCE_RX 0x1F0 0x450 0x600 0x1 0x6
+#define MX8MP_IOMUXC_ECSPI2_SCLK__UART4_DTE_TX 0x1F0 0x450 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI2_SCLK__I2C3_SCL 0x1F0 0x450 0x5B4 0x2 0x3
+#define MX8MP_IOMUXC_ECSPI2_SCLK__AUDIOMIX_SAI7_TX_BCLK 0x1F0 0x450 0x53C 0x3 0x1
+#define MX8MP_IOMUXC_ECSPI2_SCLK__GPIO5_IO10 0x1F0 0x450 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ECSPI2_SCLK__TPSMP_HDATA12 0x1F0 0x450 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ECSPI2_MOSI__ECSPI2_MOSI 0x1F4 0x454 0x570 0x0 0x1
+#define MX8MP_IOMUXC_ECSPI2_MOSI__UART4_DCE_TX 0x1F4 0x454 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI2_MOSI__UART4_DTE_RX 0x1F4 0x454 0x600 0x1 0x7
+#define MX8MP_IOMUXC_ECSPI2_MOSI__I2C3_SDA 0x1F4 0x454 0x5B8 0x2 0x3
+#define MX8MP_IOMUXC_ECSPI2_MOSI__AUDIOMIX_SAI7_TX_DATA00 0x1F4 0x454 0x000 0x3 0x0
+#define MX8MP_IOMUXC_ECSPI2_MOSI__GPIO5_IO11 0x1F4 0x454 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ECSPI2_MOSI__TPSMP_HDATA13 0x1F4 0x454 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ECSPI2_MISO__GPIO5_IO12 0x1F8 0x458 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ECSPI2_MISO__TPSMP_HDATA14 0x1F8 0x458 0x000 0x7 0x0
+#define MX8MP_IOMUXC_ECSPI2_MISO__ECSPI2_MISO 0x1F8 0x458 0x56C 0x0 0x1
+#define MX8MP_IOMUXC_ECSPI2_MISO__UART4_DCE_CTS 0x1F8 0x458 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI2_MISO__UART4_DTE_RTS 0x1F8 0x458 0x5FC 0x1 0x2
+#define MX8MP_IOMUXC_ECSPI2_MISO__I2C4_SCL 0x1F8 0x458 0x5BC 0x2 0x4
+#define MX8MP_IOMUXC_ECSPI2_MISO__AUDIOMIX_SAI7_MCLK 0x1F8 0x458 0x52C 0x3 0x1
+#define MX8MP_IOMUXC_ECSPI2_MISO__CCMSRCGPCMIX_CLKO1 0x1F8 0x458 0x000 0x4 0x0
+#define MX8MP_IOMUXC_ECSPI2_SS0__ECSPI2_SS0 0x1FC 0x45C 0x574 0x0 0x1
+#define MX8MP_IOMUXC_ECSPI2_SS0__UART4_DCE_RTS 0x1FC 0x45C 0x5FC 0x1 0x3
+#define MX8MP_IOMUXC_ECSPI2_SS0__UART4_DTE_CTS 0x1FC 0x45C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_ECSPI2_SS0__I2C4_SDA 0x1FC 0x45C 0x5C0 0x2 0x4
+#define MX8MP_IOMUXC_ECSPI2_SS0__CCMSRCGPCMIX_CLKO2 0x1FC 0x45C 0x000 0x4 0x0
+#define MX8MP_IOMUXC_ECSPI2_SS0__GPIO5_IO13 0x1FC 0x45C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_ECSPI2_SS0__TPSMP_HDATA15 0x1FC 0x45C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x200 0x460 0x5A4 0x0 0x2
+#define MX8MP_IOMUXC_I2C1_SCL__ENET_QOS_MDC 0x200 0x460 0x000 0x1 0x0
+#define MX8MP_IOMUXC_I2C1_SCL__ECSPI1_SCLK 0x200 0x460 0x558 0x3 0x1
+#define MX8MP_IOMUXC_I2C1_SCL__GPIO5_IO14 0x200 0x460 0x000 0x5 0x0
+#define MX8MP_IOMUXC_I2C1_SCL__TPSMP_HDATA16 0x200 0x460 0x000 0x7 0x0
+#define MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x204 0x464 0x5A8 0x0 0x2
+#define MX8MP_IOMUXC_I2C1_SDA__ENET_QOS_MDIO 0x204 0x464 0x590 0x1 0x2
+#define MX8MP_IOMUXC_I2C1_SDA__ECSPI1_MOSI 0x204 0x464 0x560 0x3 0x1
+#define MX8MP_IOMUXC_I2C1_SDA__GPIO5_IO15 0x204 0x464 0x000 0x5 0x0
+#define MX8MP_IOMUXC_I2C1_SDA__TPSMP_HDATA17 0x204 0x464 0x000 0x7 0x0
+#define MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x208 0x468 0x5AC 0x0 0x2
+#define MX8MP_IOMUXC_I2C2_SCL__ENET_QOS_1588_EVENT1_IN 0x208 0x468 0x000 0x1 0x0
+#define MX8MP_IOMUXC_I2C2_SCL__USDHC3_CD_B 0x208 0x468 0x608 0x2 0x3
+#define MX8MP_IOMUXC_I2C2_SCL__ECSPI1_MISO 0x208 0x468 0x55C 0x3 0x1
+#define MX8MP_IOMUXC_I2C2_SCL__ENET_QOS_1588_EVENT1_AUX_IN 0x208 0x468 0x000 0x4 0x0
+#define MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x208 0x468 0x000 0x5 0x0
+#define MX8MP_IOMUXC_I2C2_SCL__TPSMP_HDATA18 0x208 0x468 0x000 0x7 0x0
+#define MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x20C 0x46C 0x5B0 0x0 0x2
+#define MX8MP_IOMUXC_I2C2_SDA__ENET_QOS_1588_EVENT1_OUT 0x20C 0x46C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_I2C2_SDA__USDHC3_WP 0x20C 0x46C 0x634 0x2 0x3
+#define MX8MP_IOMUXC_I2C2_SDA__ECSPI1_SS0 0x20C 0x46C 0x564 0x3 0x1
+#define MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x20C 0x46C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_I2C2_SDA__TPSMP_HDATA19 0x20C 0x46C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x210 0x470 0x5B4 0x0 0x4
+#define MX8MP_IOMUXC_I2C3_SCL__PWM4_OUT 0x210 0x470 0x000 0x1 0x0
+#define MX8MP_IOMUXC_I2C3_SCL__GPT2_CLK 0x210 0x470 0x000 0x2 0x0
+#define MX8MP_IOMUXC_I2C3_SCL__ECSPI2_SCLK 0x210 0x470 0x568 0x3 0x2
+#define MX8MP_IOMUXC_I2C3_SCL__GPIO5_IO18 0x210 0x470 0x000 0x5 0x0
+#define MX8MP_IOMUXC_I2C3_SCL__TPSMP_HDATA20 0x210 0x470 0x000 0x7 0x0
+#define MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x214 0x474 0x5B8 0x0 0x4
+#define MX8MP_IOMUXC_I2C3_SDA__PWM3_OUT 0x214 0x474 0x000 0x1 0x0
+#define MX8MP_IOMUXC_I2C3_SDA__GPT3_CLK 0x214 0x474 0x000 0x2 0x0
+#define MX8MP_IOMUXC_I2C3_SDA__ECSPI2_MOSI 0x214 0x474 0x570 0x3 0x2
+#define MX8MP_IOMUXC_I2C3_SDA__GPIO5_IO19 0x214 0x474 0x000 0x5 0x0
+#define MX8MP_IOMUXC_I2C3_SDA__TPSMP_HDATA21 0x214 0x474 0x000 0x7 0x0
+#define MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x218 0x478 0x5BC 0x0 0x5
+#define MX8MP_IOMUXC_I2C4_SCL__PWM2_OUT 0x218 0x478 0x000 0x1 0x0
+#define MX8MP_IOMUXC_I2C4_SCL__HSIOMIX_PCIE_CLKREQ_B 0x218 0x478 0x5A0 0x2 0x0
+#define MX8MP_IOMUXC_I2C4_SCL__ECSPI2_MISO 0x218 0x478 0x56C 0x3 0x2
+#define MX8MP_IOMUXC_I2C4_SCL__GPIO5_IO20 0x218 0x478 0x000 0x5 0x0
+#define MX8MP_IOMUXC_I2C4_SCL__TPSMP_HDATA22 0x218 0x478 0x000 0x7 0x0
+#define MX8MP_IOMUXC_I2C4_SDA__I2C4_SDA 0x21C 0x47C 0x5C0 0x0 0x5
+#define MX8MP_IOMUXC_I2C4_SDA__PWM1_OUT 0x21C 0x47C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_I2C4_SDA__ECSPI2_SS0 0x21C 0x47C 0x574 0x3 0x2
+#define MX8MP_IOMUXC_I2C4_SDA__GPIO5_IO21 0x21C 0x47C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_I2C4_SDA__TPSMP_HDATA23 0x21C 0x47C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x220 0x480 0x5E8 0x0 0x4
+#define MX8MP_IOMUXC_UART1_RXD__UART1_DTE_TX 0x220 0x480 0x000 0x0 0x0
+#define MX8MP_IOMUXC_UART1_RXD__ECSPI3_SCLK 0x220 0x480 0x000 0x1 0x0
+#define MX8MP_IOMUXC_UART1_RXD__GPIO5_IO22 0x220 0x480 0x000 0x5 0x0
+#define MX8MP_IOMUXC_UART1_RXD__TPSMP_HDATA24 0x220 0x480 0x000 0x7 0x0
+#define MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x224 0x484 0x000 0x0 0x0
+#define MX8MP_IOMUXC_UART1_TXD__UART1_DTE_RX 0x224 0x484 0x5E8 0x0 0x5
+#define MX8MP_IOMUXC_UART1_TXD__ECSPI3_MOSI 0x224 0x484 0x000 0x1 0x0
+#define MX8MP_IOMUXC_UART1_TXD__GPIO5_IO23 0x224 0x484 0x000 0x5 0x0
+#define MX8MP_IOMUXC_UART1_TXD__TPSMP_HDATA25 0x224 0x484 0x000 0x7 0x0
+#define MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x228 0x488 0x5F0 0x0 0x6
+#define MX8MP_IOMUXC_UART2_RXD__UART2_DTE_TX 0x228 0x488 0x000 0x0 0x0
+#define MX8MP_IOMUXC_UART2_RXD__ECSPI3_MISO 0x228 0x488 0x000 0x1 0x0
+#define MX8MP_IOMUXC_UART2_RXD__GPT1_COMPARE3 0x228 0x488 0x000 0x3 0x0
+#define MX8MP_IOMUXC_UART2_RXD__GPIO5_IO24 0x228 0x488 0x000 0x5 0x0
+#define MX8MP_IOMUXC_UART2_RXD__TPSMP_HDATA26 0x228 0x488 0x000 0x7 0x0
+#define MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x22C 0x48C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_UART2_TXD__UART2_DTE_RX 0x22C 0x48C 0x5F0 0x0 0x7
+#define MX8MP_IOMUXC_UART2_TXD__ECSPI3_SS0 0x22C 0x48C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_UART2_TXD__GPT1_COMPARE2 0x22C 0x48C 0x000 0x3 0x0
+#define MX8MP_IOMUXC_UART2_TXD__GPIO5_IO25 0x22C 0x48C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_UART2_TXD__TPSMP_HDATA27 0x22C 0x48C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_UART3_RXD__UART3_DCE_RX 0x230 0x490 0x5F8 0x0 0x6
+#define MX8MP_IOMUXC_UART3_RXD__UART3_DTE_TX 0x230 0x490 0x000 0x0 0x0
+#define MX8MP_IOMUXC_UART3_RXD__UART1_DCE_CTS 0x230 0x490 0x000 0x1 0x0
+#define MX8MP_IOMUXC_UART3_RXD__UART1_DTE_RTS 0x230 0x490 0x5E4 0x1 0x4
+#define MX8MP_IOMUXC_UART3_RXD__USDHC3_RESET_B 0x230 0x490 0x000 0x2 0x0
+#define MX8MP_IOMUXC_UART3_RXD__GPT1_CAPTURE2 0x230 0x490 0x598 0x3 0x1
+#define MX8MP_IOMUXC_UART3_RXD__CAN2_TX 0x230 0x490 0x000 0x4 0x0
+#define MX8MP_IOMUXC_UART3_RXD__GPIO5_IO26 0x230 0x490 0x000 0x5 0x0
+#define MX8MP_IOMUXC_UART3_RXD__TPSMP_HDATA28 0x230 0x490 0x000 0x7 0x0
+#define MX8MP_IOMUXC_UART3_TXD__UART3_DCE_TX 0x234 0x494 0x000 0x0 0x0
+#define MX8MP_IOMUXC_UART3_TXD__UART3_DTE_RX 0x234 0x494 0x5F8 0x0 0x7
+#define MX8MP_IOMUXC_UART3_TXD__UART1_DCE_RTS 0x234 0x494 0x5E4 0x1 0x5
+#define MX8MP_IOMUXC_UART3_TXD__UART1_DTE_CTS 0x234 0x494 0x000 0x1 0x0
+#define MX8MP_IOMUXC_UART3_TXD__USDHC3_VSELECT 0x234 0x494 0x000 0x2 0x0
+#define MX8MP_IOMUXC_UART3_TXD__GPT1_CLK 0x234 0x494 0x59C 0x3 0x1
+#define MX8MP_IOMUXC_UART3_TXD__CAN2_RX 0x234 0x494 0x550 0x4 0x2
+#define MX8MP_IOMUXC_UART3_TXD__GPIO5_IO27 0x234 0x494 0x000 0x5 0x0
+#define MX8MP_IOMUXC_UART3_TXD__TPSMP_HDATA29 0x234 0x494 0x000 0x7 0x0
+#define MX8MP_IOMUXC_UART4_RXD__UART4_DCE_RX 0x238 0x498 0x600 0x0 0x8
+#define MX8MP_IOMUXC_UART4_RXD__UART4_DTE_TX 0x238 0x498 0x000 0x0 0x0
+#define MX8MP_IOMUXC_UART4_RXD__UART2_DCE_CTS 0x238 0x498 0x000 0x1 0x0
+#define MX8MP_IOMUXC_UART4_RXD__UART2_DTE_RTS 0x238 0x498 0x5EC 0x1 0x4
+#define MX8MP_IOMUXC_UART4_RXD__HSIOMIX_PCIE_CLKREQ_B 0x238 0x498 0x5A0 0x2 0x1
+#define MX8MP_IOMUXC_UART4_RXD__GPT1_COMPARE1 0x238 0x498 0x000 0x3 0x0
+#define MX8MP_IOMUXC_UART4_RXD__I2C6_SCL 0x238 0x498 0x5CC 0x4 0x2
+#define MX8MP_IOMUXC_UART4_RXD__GPIO5_IO28 0x238 0x498 0x000 0x5 0x0
+#define MX8MP_IOMUXC_UART4_RXD__TPSMP_HDATA30 0x238 0x498 0x000 0x7 0x0
+#define MX8MP_IOMUXC_UART4_TXD__UART4_DCE_TX 0x23C 0x49C 0x000 0x0 0x0
+#define MX8MP_IOMUXC_UART4_TXD__UART4_DTE_RX 0x23C 0x49C 0x600 0x0 0x9
+#define MX8MP_IOMUXC_UART4_TXD__UART2_DCE_RTS 0x23C 0x49C 0x5EC 0x1 0x5
+#define MX8MP_IOMUXC_UART4_TXD__UART2_DTE_CTS 0x23C 0x49C 0x000 0x1 0x0
+#define MX8MP_IOMUXC_UART4_TXD__GPT1_CAPTURE1 0x23C 0x49C 0x594 0x3 0x1
+#define MX8MP_IOMUXC_UART4_TXD__I2C6_SDA 0x23C 0x49C 0x5D0 0x4 0x2
+#define MX8MP_IOMUXC_UART4_TXD__GPIO5_IO29 0x23C 0x49C 0x000 0x5 0x0
+#define MX8MP_IOMUXC_UART4_TXD__TPSMP_HDATA31 0x23C 0x49C 0x000 0x7 0x0
+#define MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_EARC_SCL 0x240 0x4A0 0x000 0x0 0x0
+#define MX8MP_IOMUXC_HDMI_DDC_SCL__I2C5_SCL 0x240 0x4A0 0x5C4 0x3 0x3
+#define MX8MP_IOMUXC_HDMI_DDC_SCL__CAN1_TX 0x240 0x4A0 0x000 0x4 0x0
+#define MX8MP_IOMUXC_HDMI_DDC_SCL__GPIO3_IO26 0x240 0x4A0 0x000 0x5 0x0
+#define MX8MP_IOMUXC_HDMI_DDC_SCL__AUDIOMIX_test_out00 0x240 0x4A0 0x000 0x6 0x0
+#define MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_EARC_SDA 0x244 0x4A4 0x000 0x0 0x0
+#define MX8MP_IOMUXC_HDMI_DDC_SDA__I2C5_SDA 0x244 0x4A4 0x5C8 0x3 0x3
+#define MX8MP_IOMUXC_HDMI_DDC_SDA__CAN1_RX 0x244 0x4A4 0x54C 0x4 0x3
+#define MX8MP_IOMUXC_HDMI_DDC_SDA__GPIO3_IO27 0x244 0x4A4 0x000 0x5 0x0
+#define MX8MP_IOMUXC_HDMI_DDC_SDA__AUDIOMIX_test_out01 0x244 0x4A4 0x000 0x6 0x0
+#define MX8MP_IOMUXC_HDMI_CEC__HDMIMIX_EARC_CEC 0x248 0x4A8 0x000 0x0 0x0
+#define MX8MP_IOMUXC_HDMI_CEC__I2C6_SCL 0x248 0x4A8 0x5CC 0x3 0x3
+#define MX8MP_IOMUXC_HDMI_CEC__CAN2_TX 0x248 0x4A8 0x000 0x4 0x0
+#define MX8MP_IOMUXC_HDMI_CEC__GPIO3_IO28 0x248 0x4A8 0x000 0x5 0x0
+#define MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_EARC_DC_HPD 0x24C 0x4AC 0x000 0x0 0x0
+#define MX8MP_IOMUXC_HDMI_HPD__AUDIOMIX_EARC_HDMI_HPD_O 0x24C 0x4AC 0x000 0x1 0x0
+#define MX8MP_IOMUXC_HDMI_HPD__I2C6_SDA 0x24C 0x4AC 0x5D0 0x3 0x3
+#define MX8MP_IOMUXC_HDMI_HPD__CAN2_RX 0x24C 0x4AC 0x550 0x4 0x3
+#define MX8MP_IOMUXC_HDMI_HPD__GPIO3_IO29 0x24C 0x4AC 0x000 0x5 0x0
+
+#endif /* __DTS_IMX8MP_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
new file mode 100644
index 000000000000..9b1616e59d58
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <dt-bindings/clock/imx8mp-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "imx8mp-pinfunc.h"
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ ethernet0 = &fec;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc3;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ A53_0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ clock-latency = <61036>;
+ clocks = <&clk IMX8MP_CLK_ARM>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ };
+
+ A53_1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x1>;
+ clock-latency = <61036>;
+ clocks = <&clk IMX8MP_CLK_ARM>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ };
+
+ A53_2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x2>;
+ clock-latency = <61036>;
+ clocks = <&clk IMX8MP_CLK_ARM>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ };
+
+ A53_3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x3>;
+ clock-latency = <61036>;
+ clocks = <&clk IMX8MP_CLK_ARM>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ };
+
+ A53_L2: l2-cache0 {
+ compatible = "cache";
+ };
+ };
+
+ osc_32k: clock-osc-32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "osc_32k";
+ };
+
+ osc_24m: clock-osc-24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "osc_24m";
+ };
+
+ clk_ext1: clock-ext1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133000000>;
+ clock-output-names = "clk_ext1";
+ };
+
+ clk_ext2: clock-ext2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133000000>;
+ clock-output-names = "clk_ext2";
+ };
+
+ clk_ext3: clock-ext3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133000000>;
+ clock-output-names = "clk_ext3";
+ };
+
+ clk_ext4: clock-ext4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency= <133000000>;
+ clock-output-names = "clk_ext4";
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
+ clock-frequency = <8000000>;
+ arm,no-tick-in-suspend;
+ };
+
+ soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x0 0x3e000000>;
+
+ aips1: bus@30000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x301f0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gpio1: gpio@30200000 {
+ compatible = "fsl,imx8mp-gpio", "fsl,imx35-gpio";
+ reg = <0x30200000 0x10000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_GPIO1_ROOT>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 5 30>;
+ };
+
+ gpio2: gpio@30210000 {
+ compatible = "fsl,imx8mp-gpio", "fsl,imx35-gpio";
+ reg = <0x30210000 0x10000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_GPIO2_ROOT>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 35 21>;
+ };
+
+ gpio3: gpio@30220000 {
+ compatible = "fsl,imx8mp-gpio", "fsl,imx35-gpio";
+ reg = <0x30220000 0x10000>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_GPIO3_ROOT>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>;
+ };
+
+ gpio4: gpio@30230000 {
+ compatible = "fsl,imx8mp-gpio", "fsl,imx35-gpio";
+ reg = <0x30230000 0x10000>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_GPIO4_ROOT>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 82 32>;
+ };
+
+ gpio5: gpio@30240000 {
+ compatible = "fsl,imx8mp-gpio", "fsl,imx35-gpio";
+ reg = <0x30240000 0x10000>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_GPIO5_ROOT>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&iomuxc 0 114 30>;
+ };
+
+ wdog1: watchdog@30280000 {
+ compatible = "fsl,imx8mp-wdt", "fsl,imx21-wdt";
+ reg = <0x30280000 0x10000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_WDOG1_ROOT>;
+ status = "disabled";
+ };
+
+ iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx8mp-iomuxc";
+ reg = <0x30330000 0x10000>;
+ };
+
+ gpr: iomuxc-gpr@30340000 {
+ compatible = "fsl,imx8mp-iomuxc-gpr", "syscon";
+ reg = <0x30340000 0x10000>;
+ };
+
+ ocotp: ocotp-ctrl@30350000 {
+ compatible = "fsl,imx8mp-ocotp", "syscon";
+ reg = <0x30350000 0x10000>;
+ clocks = <&clk IMX8MP_CLK_OCOTP_ROOT>;
+ /* For nvmem subnodes */
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpu_speed_grade: speed-grade@10 {
+ reg = <0x10 4>;
+ };
+ };
+
+ anatop: anatop@30360000 {
+ compatible = "fsl,imx8mp-anatop", "fsl,imx8mm-anatop",
+ "syscon";
+ reg = <0x30360000 0x10000>;
+ };
+
+ snvs: snvs@30370000 {
+ compatible = "fsl,sec-v4.0-mon","syscon", "simple-mfd";
+ reg = <0x30370000 0x10000>;
+
+ snvs_rtc: snvs-rtc-lp {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ regmap =<&snvs>;
+ offset = <0x34>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_SNVS_ROOT>;
+ clock-names = "snvs-rtc";
+ };
+
+ snvs_pwrkey: snvs-powerkey {
+ compatible = "fsl,sec-v4.0-pwrkey";
+ regmap = <&snvs>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_SNVS_ROOT>;
+ clock-names = "snvs-pwrkey";
+ linux,keycode = <KEY_POWER>;
+ wakeup-source;
+ status = "disabled";
+ };
+ };
+
+ clk: clock-controller@30380000 {
+ compatible = "fsl,imx8mp-ccm";
+ reg = <0x30380000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
+ <&clk_ext3>, <&clk_ext4>;
+ clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4";
+ assigned-clocks = <&clk IMX8MP_CLK_NOC>,
+ <&clk IMX8MP_CLK_NOC_IO>,
+ <&clk IMX8MP_CLK_GIC>,
+ <&clk IMX8MP_CLK_AUDIO_AHB>,
+ <&clk IMX8MP_CLK_AUDIO_AXI_SRC>,
+ <&clk IMX8MP_CLK_IPG_AUDIO_ROOT>,
+ <&clk IMX8MP_AUDIO_PLL1>,
+ <&clk IMX8MP_AUDIO_PLL2>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
+ <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL2_500M>,
+ <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <1000000000>,
+ <800000000>,
+ <500000000>,
+ <400000000>,
+ <800000000>,
+ <400000000>,
+ <393216000>,
+ <361267200>;
+ };
+
+ src: reset-controller@30390000 {
+ compatible = "fsl,imx8mp-src", "syscon";
+ reg = <0x30390000 0x10000>;
+ #reset-cells = <1>;
+ };
+ };
+
+ aips2: bus@30400000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x305f0000 0x400000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pwm1: pwm@30660000 {
+ compatible = "fsl,imx8mp-pwm", "fsl,imx27-pwm";
+ reg = <0x30660000 0x10000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_PWM1_ROOT>,
+ <&clk IMX8MP_CLK_PWM1_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@30670000 {
+ compatible = "fsl,imx8mp-pwm", "fsl,imx27-pwm";
+ reg = <0x30670000 0x10000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_PWM2_ROOT>,
+ <&clk IMX8MP_CLK_PWM2_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@30680000 {
+ compatible = "fsl,imx8mp-pwm", "fsl,imx27-pwm";
+ reg = <0x30680000 0x10000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_PWM3_ROOT>,
+ <&clk IMX8MP_CLK_PWM3_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@30690000 {
+ compatible = "fsl,imx8mp-pwm", "fsl,imx27-pwm";
+ reg = <0x30690000 0x10000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_PWM4_ROOT>,
+ <&clk IMX8MP_CLK_PWM4_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ system_counter: timer@306a0000 {
+ compatible = "nxp,sysctr-timer";
+ reg = <0x306a0000 0x20000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc_24m>;
+ clock-names = "per";
+ };
+ };
+
+ aips3: bus@30800000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x309f0000 0x400000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ ecspi1: spi@30820000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx8mp-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30820000 0x10000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_ECSPI1_ROOT>,
+ <&clk IMX8MP_CLK_ECSPI1_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 0 7 1>, <&sdma1 1 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ ecspi2: spi@30830000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx8mp-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30830000 0x10000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_ECSPI2_ROOT>,
+ <&clk IMX8MP_CLK_ECSPI2_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 2 7 1>, <&sdma1 3 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ ecspi3: spi@30840000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx8mp-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30840000 0x10000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_ECSPI3_ROOT>,
+ <&clk IMX8MP_CLK_ECSPI3_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 4 7 1>, <&sdma1 5 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart1: serial@30860000 {
+ compatible = "fsl,imx8mp-uart", "fsl,imx6q-uart";
+ reg = <0x30860000 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_UART1_ROOT>,
+ <&clk IMX8MP_CLK_UART1_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 22 4 0>, <&sdma1 23 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart3: serial@30880000 {
+ compatible = "fsl,imx8mp-uart", "fsl,imx6q-uart";
+ reg = <0x30880000 0x10000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_UART3_ROOT>,
+ <&clk IMX8MP_CLK_UART3_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 26 4 0>, <&sdma1 27 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart2: serial@30890000 {
+ compatible = "fsl,imx8mp-uart", "fsl,imx6q-uart";
+ reg = <0x30890000 0x10000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_UART2_ROOT>,
+ <&clk IMX8MP_CLK_UART2_ROOT>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ crypto: crypto@30900000 {
+ compatible = "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x30900000 0x40000>;
+ ranges = <0 0x30900000 0x40000>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_AHB>,
+ <&clk IMX8MP_CLK_IPG_ROOT>;
+ clock-names = "aclk", "ipg";
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ i2c1: i2c@30a20000 {
+ compatible = "fsl,imx8mp-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30a20000 0x10000>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_I2C1_ROOT>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@30a30000 {
+ compatible = "fsl,imx8mp-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30a30000 0x10000>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_I2C2_ROOT>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@30a40000 {
+ compatible = "fsl,imx8mp-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30a40000 0x10000>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_I2C3_ROOT>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@30a50000 {
+ compatible = "fsl,imx8mp-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30a50000 0x10000>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_I2C4_ROOT>;
+ status = "disabled";
+ };
+
+ uart4: serial@30a60000 {
+ compatible = "fsl,imx8mp-uart", "fsl,imx6q-uart";
+ reg = <0x30a60000 0x10000>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_UART4_ROOT>,
+ <&clk IMX8MP_CLK_UART4_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 28 4 0>, <&sdma1 29 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ i2c5: i2c@30ad0000 {
+ compatible = "fsl,imx8mp-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30ad0000 0x10000>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_I2C5_ROOT>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@30ae0000 {
+ compatible = "fsl,imx8mp-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30ae0000 0x10000>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_I2C6_ROOT>;
+ status = "disabled";
+ };
+
+ usdhc1: mmc@30b40000 {
+ compatible = "fsl,imx8mp-usdhc", "fsl,imx7d-usdhc";
+ reg = <0x30b40000 0x10000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_DUMMY>,
+ <&clk IMX8MP_CLK_NAND_USDHC_BUS>,
+ <&clk IMX8MP_CLK_USDHC1_ROOT>;
+ clock-names = "ipg", "ahb", "per";
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc2: mmc@30b50000 {
+ compatible = "fsl,imx8mp-usdhc", "fsl,imx7d-usdhc";
+ reg = <0x30b50000 0x10000>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_DUMMY>,
+ <&clk IMX8MP_CLK_NAND_USDHC_BUS>,
+ <&clk IMX8MP_CLK_USDHC2_ROOT>;
+ clock-names = "ipg", "ahb", "per";
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc3: mmc@30b60000 {
+ compatible = "fsl,imx8mp-usdhc", "fsl,imx7d-usdhc";
+ reg = <0x30b60000 0x10000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_DUMMY>,
+ <&clk IMX8MP_CLK_NAND_USDHC_BUS>,
+ <&clk IMX8MP_CLK_USDHC3_ROOT>;
+ clock-names = "ipg", "ahb", "per";
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ sdma1: dma-controller@30bd0000 {
+ compatible = "fsl,imx8mp-sdma", "fsl,imx8mq-sdma";
+ reg = <0x30bd0000 0x10000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_SDMA1_ROOT>,
+ <&clk IMX8MP_CLK_SDMA1_ROOT>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
+ };
+
+ fec: ethernet@30be0000 {
+ compatible = "fsl,imx8mp-fec", "fsl,imx8mq-fec";
+ reg = <0x30be0000 0x10000>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_ENET1_ROOT>,
+ <&clk IMX8MP_CLK_SIM_ENET_ROOT>,
+ <&clk IMX8MP_CLK_ENET_TIMER>,
+ <&clk IMX8MP_CLK_ENET_REF>,
+ <&clk IMX8MP_CLK_ENET_PHY_REF>;
+ clock-names = "ipg", "ahb", "ptp",
+ "enet_clk_ref", "enet_out";
+ assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>,
+ <&clk IMX8MP_CLK_ENET_TIMER>,
+ <&clk IMX8MP_CLK_ENET_REF>,
+ <&clk IMX8MP_CLK_ENET_TIMER>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>,
+ <&clk IMX8MP_SYS_PLL2_100M>,
+ <&clk IMX8MP_SYS_PLL2_125M>;
+ assigned-clock-rates = <0>, <0>, <125000000>, <100000000>;
+ fsl,num-tx-queues = <3>;
+ fsl,num-rx-queues = <3>;
+ status = "disabled";
+ };
+ };
+
+ gic: interrupt-controller@38800000 {
+ compatible = "arm,gic-v3";
+ reg = <0x38800000 0x10000>,
+ <0x38880000 0xc0000>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 94066d49d6ed..c9241abf0df3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -137,6 +137,8 @@
pinctrl-0 = <&pinctrl_fec1>;
phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
+ phy-reset-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <10>;
fsl,magic-packet;
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
index 764a4cb4e125..10eca94194be 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
@@ -55,6 +55,15 @@
wakeup-source;
linux,code = <KEY_HP>;
};
+
+ wwan-wake {
+ label = "WWAN_WAKE";
+ gpios = <&gpio3 8 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <8 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ linux,code = <KEY_PHONE>;
+ };
};
leds {
@@ -148,6 +157,55 @@
regulator-always-on;
};
+ wwan_codec: sound-wwan-codec {
+ compatible = "option,gtm601";
+ #sound-dai-cells = <0>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "sgtl5000";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "Speaker Ext",
+ "Line", "Line In Jack";
+ simple-audio-card,routing =
+ "MIC_IN", "Microphone Jack",
+ "Microphone Jack", "Mic Bias",
+ "LINE_IN", "Line In Jack",
+ "Headphone Jack", "HP_OUT",
+ "Speaker Ext", "LINE_OUT";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai2>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&sgtl5000>;
+ clocks = <&clk IMX8MQ_CLK_SAI2_ROOT>;
+ frame-master;
+ bitclock-master;
+ };
+ };
+
+ sound-wwan {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "SIMCom SIM7100";
+ simple-audio-card,format = "dsp_a";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai6>;
+ };
+
+ telephony_link_master: simple-audio-card,codec {
+ sound-dai = <&wwan_codec>;
+ frame-master;
+ bitclock-master;
+ };
+ };
+
vibrator {
compatible = "gpio-vibrator";
pinctrl-names = "default";
@@ -169,6 +227,22 @@
};
};
+&A53_0 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2_reg>;
+};
+
&clk {
assigned-clocks = <&clk IMX8MQ_AUDIO_PLL1>, <&clk IMX8MQ_AUDIO_PLL2>;
assigned-clock-rates = <786432000>, <722534400>;
@@ -354,10 +428,10 @@
PDO_FIXED_USB_COMM |
PDO_FIXED_DUAL_ROLE |
PDO_FIXED_DATA_SWAP )>;
- sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM |
+ sink-pdos = <PDO_FIXED(5000, 3500, PDO_FIXED_USB_COMM |
PDO_FIXED_DUAL_ROLE |
PDO_FIXED_DATA_SWAP )
- PDO_VAR(5000, 3000, 3000)>;
+ PDO_VAR(5000, 5000, 3500)>;
op-sink-microwatt = <10000000>;
ports {
@@ -426,6 +500,19 @@
vddio-supply = <&reg_3v3_p>;
};
+ sgtl5000: audio-codec@a {
+ compatible = "fsl,sgtl5000";
+ clocks = <&clk IMX8MQ_CLK_SAI2_ROOT>;
+ assigned-clocks = <&clk IMX8MQ_CLK_SAI2>;
+ assigned-clock-parents = <&clk IMX8MQ_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ #sound-dai-cells = <0>;
+ reg = <0x0a>;
+ VDDD-supply = <&reg_1v8_p>;
+ VDDIO-supply = <&reg_3v3_p>;
+ VDDA-supply = <&reg_3v3_p>;
+ };
+
touchscreen@5d {
compatible = "goodix,gt5688";
reg = <0x5d>;
@@ -441,11 +528,20 @@
VDDIO-supply = <&reg_1v8_p>;
};
+ proximity-sensor@60 {
+ compatible = "vishay,vcnl4040";
+ reg = <0x60>;
+ pinctrl-0 = <&pinctrl_prox>;
+ };
+
accel-gyro@6a {
compatible = "st,lsm9ds1-imu";
reg = <0x6a>;
vdd-supply = <&reg_3v3_p>;
vddio-supply = <&reg_3v3_p>;
+ mount-matrix = "1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "-1";
};
};
@@ -508,6 +604,7 @@
MX8MQ_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x16
MX8MQ_IOMUXC_SAI2_RXC_GPIO4_IO22 0x16
MX8MQ_IOMUXC_SAI5_RXC_GPIO3_IO20 0x180 /* HP_DET */
+ MX8MQ_IOMUXC_NAND_DATA02_GPIO3_IO8 0x80 /* nWoWWAN */
>;
};
@@ -543,6 +640,12 @@
>;
};
+ pinctrl_prox: proxgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x80 /* prox intr */
+ >;
+ };
+
pinctrl_pwr_en: pwrengrp {
fsl,pins = <
MX8MQ_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x06
@@ -555,6 +658,25 @@
>;
};
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0xd6
+ MX8MQ_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0xd6
+ MX8MQ_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0xd6
+ MX8MQ_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0 0xd6
+ MX8MQ_IOMUXC_SAI2_MCLK_SAI2_MCLK 0xd6
+ >;
+ };
+
+ pinctrl_sai6: sai6grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SAI1_RXD5_SAI6_RX_DATA0 0xd6
+ MX8MQ_IOMUXC_SAI1_RXD6_SAI6_RX_SYNC 0xd6
+ MX8MQ_IOMUXC_SAI1_TXD4_SAI6_RX_BCLK 0xd6
+ MX8MQ_IOMUXC_SAI1_TXD5_SAI6_TX_DATA0 0xd6
+ >;
+ };
+
pinctrl_typec: typecgrp {
fsl,pins = <
MX8MQ_IOMUXC_NAND_DATA06_GPIO3_IO12 0x16
@@ -730,6 +852,25 @@
status = "okay";
};
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ assigned-clocks = <&clk IMX8MQ_CLK_SAI2>;
+ assigned-clock-parents = <&clk IMX8MQ_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ status = "okay";
+};
+
+&sai6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai6>;
+ assigned-clocks = <&clk IMX8MQ_CLK_SAI6>;
+ assigned-clock-parents = <&clk IMX8MQ_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ fsl,sai-synchronous-rx;
+ status = "okay";
+};
+
&uart1 { /* console */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -750,6 +891,7 @@
};
&usb3_phy0 {
+ vbus-supply = <&reg_5v_p>;
status = "okay";
};
@@ -808,7 +950,7 @@
bus-width = <4>;
vmmc-supply = <&reg_usdhc2_vmmc>;
power-supply = <&wifi_pwr_en>;
- non-removable;
+ broken-cd;
disable-wp;
cap-sdio-irq;
keep-power-in-suspend;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-phanbell.dts b/arch/arm64/boot/dts/freescale/imx8mq-phanbell.dts
index 3f2a489a4ad8..77ab568fae67 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-phanbell.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-phanbell.dts
@@ -35,6 +35,16 @@
gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
+
+ fan: gpio-fan {
+ compatible = "gpio-fan";
+ gpio-fan,speed-map = <0 0 8600 1>;
+ gpios = <&gpio3 5 GPIO_ACTIVE_HIGH>;
+ #cooling-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_fan>;
+ status = "okay";
+ };
};
&A53_0 {
@@ -53,6 +63,53 @@
cpu-supply = <&buck2>;
};
+&cpu_thermal {
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_alert1: trip1 {
+ temperature = <80000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip3 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ fan_toggle0: trip4 {
+ temperature = <65000>;
+ hysteresis = <10000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&A53_0 0 1>; /* Exclude highest OPP */
+ };
+
+ map1 {
+ trip = <&cpu_alert1>;
+ cooling-device =
+ <&A53_0 0 2>; /* Exclude two highest OPPs */
+ };
+
+ map4 {
+ trip = <&fan_toggle0>;
+ cooling-device = <&fan 0 1>;
+ };
+ };
+};
+
&i2c1 {
clock-frequency = <400000>;
pinctrl-names = "default";
@@ -201,6 +258,27 @@
};
};
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-reset-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <10>;
+ phy-reset-post-delay = <50>;
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -254,6 +332,32 @@
};
&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MQ_IOMUXC_ENET_MDIO_ENET1_MDIO 0x23
+ MX8MQ_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MQ_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MQ_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MQ_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MQ_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MQ_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MQ_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MQ_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MQ_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MQ_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MQ_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MQ_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MQ_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19
+ >;
+ };
+
+ pinctrl_gpio_fan: gpiofangrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_CLE_GPIO3_IO5 0x16
+ >;
+ };
+
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x4000007f
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 6a1e83922c71..75b384217a23 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -198,7 +198,7 @@
};
thermal-zones {
- cpu-thermal {
+ cpu_thermal: cpu-thermal {
polling-delay-passive = <250>;
polling-delay = <2000>;
thermal-sensors = <&tmu 0>;
@@ -290,7 +290,8 @@
dma-ranges = <0x40000000 0x0 0x40000000 0xc0000000>;
bus@30000000 { /* AIPS1 */
- compatible = "simple-bus";
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x301f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x30000000 0x30000000 0x400000>;
@@ -521,7 +522,7 @@
status = "disabled";
};
- iomuxc: iomuxc@30330000 {
+ iomuxc: pinctrl@30330000 {
compatible = "fsl,imx8mq-iomuxc";
reg = <0x30330000 0x10000>;
};
@@ -574,6 +575,8 @@
compatible = "fsl,sec-v4.0-pwrkey";
regmap = <&snvs>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_SNVS_ROOT>;
+ clock-names = "snvs-pwrkey";
linux,keycode = <KEY_POWER>;
wakeup-source;
status = "disabled";
@@ -692,7 +695,8 @@
};
bus@30400000 { /* AIPS2 */
- compatible = "simple-bus";
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x305f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x30400000 0x30400000 0x400000>;
@@ -751,7 +755,8 @@
};
bus@30800000 { /* AIPS3 */
- compatible = "simple-bus";
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x309f0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x30800000 0x30800000 0x400000>,
@@ -1023,7 +1028,8 @@
};
bus@32c00000 { /* AIPS4 */
- compatible = "simple-bus";
+ compatible = "fsl,aips-bus", "simple-bus";
+ reg = <0x32df0000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x32c00000 0x32c00000 0x400000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
index fb5f752b15fe..e8ffb7590656 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
@@ -11,6 +11,7 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/pinctrl/pads-imx8qxp.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-parent = <&gic>;
@@ -189,6 +190,11 @@
compatible = "fsl,imx8qxp-sc-wdt", "fsl,imx-sc-wdt";
timeout-sec = <60>;
};
+
+ tsens: thermal-sensor {
+ compatible = "fsl,imx8qxp-sc-thermal", "fsl,imx-sc-thermal";
+ #thermal-sensor-cells = <1>;
+ };
};
timer {
@@ -586,4 +592,37 @@
#clock-cells = <1>;
};
};
+
+ thermal_zones: thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tsens IMX_SC_R_SYSTEM>;
+
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <107000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <127000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&A35_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/s32v234.dtsi b/arch/arm64/boot/dts/freescale/s32v234.dtsi
index e746b9c48f7a..ba0b5305d481 100644
--- a/arch/arm64/boot/dts/freescale/s32v234.dtsi
+++ b/arch/arm64/boot/dts/freescale/s32v234.dtsi
@@ -104,7 +104,7 @@
interrupt-parent = <&gic>;
ranges;
- aips0: aips-bus@40000000 {
+ aips0: bus@40000000 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
@@ -120,7 +120,7 @@
};
};
- aips1: aips-bus@40080000 {
+ aips1: bus@40080000 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
index e794a12ba7c5..51d948323bfd 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
@@ -121,12 +121,12 @@
qspi_boot: partition@0 {
label = "Boot and fpga data";
- reg = <0x0 0x034B0000>;
+ reg = <0x0 0x03FE0000>;
};
- qspi_rootfs: partition@34B0000 {
+ qspi_rootfs: partition@3FE0000 {
label = "Root Filesystem - JFFS2";
- reg = <0x034B0000 0x0EB50000>;
+ reg = <0x03FE0000 0x0C020000>;
};
};
};
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
index f1b5127f0b89..3e5f2e7a040c 100644
--- a/arch/arm64/boot/dts/marvell/Makefile
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -2,6 +2,9 @@
# Mvebu SoC Family
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin-emmc.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin-v7.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin-v7-emmc.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-turris-mox.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-uDPU.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts
index bd9ed9dc9c3e..ec72a11ed80f 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts
@@ -11,6 +11,8 @@
* Schematic available at http://espressobin.net/wp-content/uploads/2017/08/ESPRESSObin_V5_Schematics.pdf
*/
+/dts-v1/;
+
#include "armada-3720-espressobin.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
index 6e876a6d9532..03733fd92732 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
@@ -11,6 +11,8 @@
* Schematic available at http://wiki.espressobin.net/tiki-download_file.php?fileId=200
*/
+/dts-v1/;
+
#include "armada-3720-espressobin.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
index 0f8405d085fd..8570c5f47d7d 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
@@ -11,6 +11,8 @@
* Schematic available at http://wiki.espressobin.net/tiki-download_file.php?fileId=200
*/
+/dts-v1/;
+
#include "armada-3720-espressobin.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
index 53b8ac55a7f3..42e992f9c8a5 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
@@ -7,12 +7,16 @@
*
*/
-/dts-v1/;
-
#include <dt-bindings/gpio/gpio.h>
#include "armada-372x.dtsi"
/ {
+ aliases {
+ ethernet0 = &eth0;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index a211a046b2f2..b90d78a5724b 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -367,6 +367,7 @@
pinctrl-0 = <&cp0_copper_eth_phy_reset>;
reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>;
reset-assert-us = <10000>;
+ reset-deassert-us = <10000>;
};
switch0: switch0@4 {
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
index 572e2610e0a3..cbcb210cb6d8 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
@@ -71,6 +71,7 @@
tx-fault-gpio = <&cp1_gpio1 26 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&cp1_sfpp0_pins>;
+ maximum-power-milliwatt = <2000>;
};
sfp_eth1: sfp-eth1 {
@@ -83,6 +84,7 @@
tx-fault-gpio = <&cp0_gpio2 30 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&cp1_sfpp1_pins &cp0_sfpp1_pins>;
+ maximum-power-milliwatt = <2000>;
};
sfp_eth3: sfp-eth3 {
@@ -95,6 +97,7 @@
tx-fault-gpio = <&cp0_gpio2 19 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&cp0_sfp_1g_pins &cp1_sfp_1g_pins>;
+ maximum-power-milliwatt = <2000>;
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
index 09849558a776..fcab5173fe67 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
@@ -53,4 +53,9 @@
cache-sets = <512>;
};
};
+
+ thermal-zones {
+ /delete-node/ ap-thermal-cpu2;
+ /delete-node/ ap-thermal-cpu3;
+ };
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi
index 840466e143b4..68782f161f12 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi
@@ -17,7 +17,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x000>;
enable-method = "psci";
#cooling-cells = <2>;
@@ -32,7 +32,7 @@
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x001>;
enable-method = "psci";
#cooling-cells = <2>;
@@ -47,7 +47,7 @@
};
cpu2: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x100>;
enable-method = "psci";
#cooling-cells = <2>;
@@ -62,7 +62,7 @@
};
cpu3: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a72", "arm,armv8";
+ compatible = "arm,cortex-a72";
reg = <0x101>;
enable-method = "psci";
#cooling-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 458bbc422a94..2f8967cb8717 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -8,3 +8,4 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-rfb1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-bananapi-bpi-r64.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 43307bad3f0d..2cd8b33886e5 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -278,7 +278,7 @@
interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
};
- scpsys: scpsys@10006000 {
+ scpsys: power-controller@10006000 {
compatible = "mediatek,mt2712-scpsys", "syscon";
#power-domain-cells = <1>;
reg = <0 0x10006000 0 0x1000>;
@@ -303,6 +303,12 @@
status = "disabled";
};
+ rtc: rtc@10011000 {
+ compatible = "mediatek,mt2712-rtc";
+ reg = <0 0x10011000 0 0x1000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_LOW>;
+ };
+
spis1: spi@10013000 {
compatible = "mediatek,mt2712-spi-slave";
reg = <0 0x10013000 0 0x100>;
diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
index 2b2a69c7567f..136ef9527a0d 100644
--- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
@@ -157,7 +157,7 @@
};
};
- scpsys: scpsys@10006000 {
+ scpsys: power-controller@10006000 {
compatible = "mediatek,mt6797-scpsys";
#power-domain-cells = <1>;
reg = <0 0x10006000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index dac51e98204c..339dc9f88f43 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -230,7 +230,7 @@
#reset-cells = <1>;
};
- scpsys: scpsys@10006000 {
+ scpsys: power-controller@10006000 {
compatible = "mediatek,mt7622-scpsys",
"syscon";
#power-domain-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 8b4e806d5119..ccb8e88a60c5 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -331,6 +331,7 @@
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ arm,no-tick-in-suspend;
};
soc {
@@ -436,7 +437,7 @@
};
};
- scpsys: scpsys@10006000 {
+ scpsys: power-controller@10006000 {
compatible = "mediatek,mt8173-scpsys";
#power-domain-cells = <1>;
reg = <0 0x10006000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 124f9d3e09f5..97863adb7bc0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -74,6 +74,9 @@
reg = <0x000>;
enable-method = "psci";
capacity-dmips-mhz = <741>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <84>;
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -82,6 +85,9 @@
reg = <0x001>;
enable-method = "psci";
capacity-dmips-mhz = <741>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <84>;
+ #cooling-cells = <2>;
};
cpu2: cpu@2 {
@@ -90,6 +96,9 @@
reg = <0x002>;
enable-method = "psci";
capacity-dmips-mhz = <741>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <84>;
+ #cooling-cells = <2>;
};
cpu3: cpu@3 {
@@ -98,6 +107,9 @@
reg = <0x003>;
enable-method = "psci";
capacity-dmips-mhz = <741>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <84>;
+ #cooling-cells = <2>;
};
cpu4: cpu@100 {
@@ -106,6 +118,9 @@
reg = <0x100>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <211>;
+ #cooling-cells = <2>;
};
cpu5: cpu@101 {
@@ -114,6 +129,9 @@
reg = <0x101>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <211>;
+ #cooling-cells = <2>;
};
cpu6: cpu@102 {
@@ -122,6 +140,9 @@
reg = <0x102>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <211>;
+ #cooling-cells = <2>;
};
cpu7: cpu@103 {
@@ -130,6 +151,31 @@
reg = <0x103>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <211>;
+ #cooling-cells = <2>;
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP: cpu-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x00010001>;
+ entry-latency-us = <200>;
+ exit-latency-us = <200>;
+ min-residency-us = <800>;
+ };
+
+ CLUSTER_SLEEP: cluster-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x01010001>;
+ entry-latency-us = <250>;
+ exit-latency-us = <400>;
+ min-residency-us = <1300>;
+ };
};
};
@@ -255,6 +301,13 @@
#interrupt-cells = <2>;
};
+ watchdog: watchdog@10007000 {
+ compatible = "mediatek,mt8183-wdt",
+ "mediatek,mt6589-wdt";
+ reg = <0 0x10007000 0 0x100>;
+ #reset-cells = <1>;
+ };
+
apmixedsys: syscon@1000c000 {
compatible = "mediatek,mt8183-apmixedsys", "syscon";
reg = <0 0x1000c000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8516-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt8516-pinfunc.h
new file mode 100644
index 000000000000..73339bb48f0d
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8516-pinfunc.h
@@ -0,0 +1,663 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+#ifndef __DTS_MT8516_PINFUNC_H
+#define __DTS_MT8516_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8516_PIN_0_EINT0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8516_PIN_0_EINT0__FUNC_PWM_B (MTK_PIN_NO(0) | 1)
+#define MT8516_PIN_0_EINT0__FUNC_I2S2_BCK (MTK_PIN_NO(0) | 3)
+#define MT8516_PIN_0_EINT0__FUNC_EXT_TXD0 (MTK_PIN_NO(0) | 4)
+#define MT8516_PIN_0_EINT0__FUNC_SQICS (MTK_PIN_NO(0) | 6)
+#define MT8516_PIN_0_EINT0__FUNC_DBG_MON_A_6 (MTK_PIN_NO(0) | 7)
+
+#define MT8516_PIN_1_EINT1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8516_PIN_1_EINT1__FUNC_PWM_C (MTK_PIN_NO(1) | 1)
+#define MT8516_PIN_1_EINT1__FUNC_I2S2_DI (MTK_PIN_NO(1) | 3)
+#define MT8516_PIN_1_EINT1__FUNC_EXT_TXD1 (MTK_PIN_NO(1) | 4)
+#define MT8516_PIN_1_EINT1__FUNC_CONN_MCU_TDO (MTK_PIN_NO(1) | 5)
+#define MT8516_PIN_1_EINT1__FUNC_SQISO (MTK_PIN_NO(1) | 6)
+#define MT8516_PIN_1_EINT1__FUNC_DBG_MON_A_7 (MTK_PIN_NO(1) | 7)
+
+#define MT8516_PIN_2_EINT2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8516_PIN_2_EINT2__FUNC_CLKM0 (MTK_PIN_NO(2) | 1)
+#define MT8516_PIN_2_EINT2__FUNC_I2S2_LRCK (MTK_PIN_NO(2) | 3)
+#define MT8516_PIN_2_EINT2__FUNC_EXT_TXD2 (MTK_PIN_NO(2) | 4)
+#define MT8516_PIN_2_EINT2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(2) | 5)
+#define MT8516_PIN_2_EINT2__FUNC_SQISI (MTK_PIN_NO(2) | 6)
+#define MT8516_PIN_2_EINT2__FUNC_DBG_MON_A_8 (MTK_PIN_NO(2) | 7)
+
+#define MT8516_PIN_3_EINT3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8516_PIN_3_EINT3__FUNC_CLKM1 (MTK_PIN_NO(3) | 1)
+#define MT8516_PIN_3_EINT3__FUNC_SPI_MI (MTK_PIN_NO(3) | 3)
+#define MT8516_PIN_3_EINT3__FUNC_EXT_TXD3 (MTK_PIN_NO(3) | 4)
+#define MT8516_PIN_3_EINT3__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(3) | 5)
+#define MT8516_PIN_3_EINT3__FUNC_SQIWP (MTK_PIN_NO(3) | 6)
+#define MT8516_PIN_3_EINT3__FUNC_DBG_MON_A_9 (MTK_PIN_NO(3) | 7)
+
+#define MT8516_PIN_4_EINT4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8516_PIN_4_EINT4__FUNC_CLKM2 (MTK_PIN_NO(4) | 1)
+#define MT8516_PIN_4_EINT4__FUNC_SPI_MO (MTK_PIN_NO(4) | 3)
+#define MT8516_PIN_4_EINT4__FUNC_EXT_TXC (MTK_PIN_NO(4) | 4)
+#define MT8516_PIN_4_EINT4__FUNC_CONN_MCU_TCK (MTK_PIN_NO(4) | 5)
+#define MT8516_PIN_4_EINT4__FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(4) | 6)
+#define MT8516_PIN_4_EINT4__FUNC_DBG_MON_A_10 (MTK_PIN_NO(4) | 7)
+
+#define MT8516_PIN_5_EINT5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8516_PIN_5_EINT5__FUNC_UCTS2 (MTK_PIN_NO(5) | 1)
+#define MT8516_PIN_5_EINT5__FUNC_SPI_CSB (MTK_PIN_NO(5) | 3)
+#define MT8516_PIN_5_EINT5__FUNC_EXT_RXER (MTK_PIN_NO(5) | 4)
+#define MT8516_PIN_5_EINT5__FUNC_CONN_MCU_TDI (MTK_PIN_NO(5) | 5)
+#define MT8516_PIN_5_EINT5__FUNC_CONN_TEST_CK (MTK_PIN_NO(5) | 6)
+#define MT8516_PIN_5_EINT5__FUNC_DBG_MON_A_11 (MTK_PIN_NO(5) | 7)
+
+#define MT8516_PIN_6_EINT6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8516_PIN_6_EINT6__FUNC_URTS2 (MTK_PIN_NO(6) | 1)
+#define MT8516_PIN_6_EINT6__FUNC_SPI_CLK (MTK_PIN_NO(6) | 3)
+#define MT8516_PIN_6_EINT6__FUNC_EXT_RXC (MTK_PIN_NO(6) | 4)
+#define MT8516_PIN_6_EINT6__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(6) | 5)
+#define MT8516_PIN_6_EINT6__FUNC_DBG_MON_A_12 (MTK_PIN_NO(6) | 7)
+
+#define MT8516_PIN_7_EINT7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8516_PIN_7_EINT7__FUNC_SQIRST (MTK_PIN_NO(7) | 1)
+#define MT8516_PIN_7_EINT7__FUNC_SDA1_0 (MTK_PIN_NO(7) | 3)
+#define MT8516_PIN_7_EINT7__FUNC_EXT_RXDV (MTK_PIN_NO(7) | 4)
+#define MT8516_PIN_7_EINT7__FUNC_CONN_MCU_TMS (MTK_PIN_NO(7) | 5)
+#define MT8516_PIN_7_EINT7__FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(7) | 6)
+#define MT8516_PIN_7_EINT7__FUNC_DBG_MON_A_13 (MTK_PIN_NO(7) | 7)
+
+#define MT8516_PIN_8_EINT8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8516_PIN_8_EINT8__FUNC_SQICK (MTK_PIN_NO(8) | 1)
+#define MT8516_PIN_8_EINT8__FUNC_CLKM3 (MTK_PIN_NO(8) | 2)
+#define MT8516_PIN_8_EINT8__FUNC_SCL1_0 (MTK_PIN_NO(8) | 3)
+#define MT8516_PIN_8_EINT8__FUNC_EXT_RXD0 (MTK_PIN_NO(8) | 4)
+#define MT8516_PIN_8_EINT8__FUNC_ANT_SEL0 (MTK_PIN_NO(8) | 5)
+#define MT8516_PIN_8_EINT8__FUNC_DBG_MON_A_14 (MTK_PIN_NO(8) | 7)
+
+#define MT8516_PIN_9_EINT9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8516_PIN_9_EINT9__FUNC_CLKM4 (MTK_PIN_NO(9) | 1)
+#define MT8516_PIN_9_EINT9__FUNC_SDA2_0 (MTK_PIN_NO(9) | 2)
+#define MT8516_PIN_9_EINT9__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3)
+#define MT8516_PIN_9_EINT9__FUNC_EXT_RXD1 (MTK_PIN_NO(9) | 4)
+#define MT8516_PIN_9_EINT9__FUNC_ANT_SEL1 (MTK_PIN_NO(9) | 5)
+#define MT8516_PIN_9_EINT9__FUNC_DBG_MON_A_15 (MTK_PIN_NO(9) | 7)
+
+#define MT8516_PIN_10_EINT10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8516_PIN_10_EINT10__FUNC_CLKM5 (MTK_PIN_NO(10) | 1)
+#define MT8516_PIN_10_EINT10__FUNC_SCL2_0 (MTK_PIN_NO(10) | 2)
+#define MT8516_PIN_10_EINT10__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(10) | 3)
+#define MT8516_PIN_10_EINT10__FUNC_EXT_RXD2 (MTK_PIN_NO(10) | 4)
+#define MT8516_PIN_10_EINT10__FUNC_ANT_SEL2 (MTK_PIN_NO(10) | 5)
+#define MT8516_PIN_10_EINT10__FUNC_DBG_MON_A_16 (MTK_PIN_NO(10) | 7)
+
+#define MT8516_PIN_11_EINT11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8516_PIN_11_EINT11__FUNC_CLKM4 (MTK_PIN_NO(11) | 1)
+#define MT8516_PIN_11_EINT11__FUNC_PWM_C (MTK_PIN_NO(11) | 2)
+#define MT8516_PIN_11_EINT11__FUNC_CONN_TEST_CK (MTK_PIN_NO(11) | 3)
+#define MT8516_PIN_11_EINT11__FUNC_ANT_SEL3 (MTK_PIN_NO(11) | 4)
+#define MT8516_PIN_11_EINT11__FUNC_EXT_RXD3 (MTK_PIN_NO(11) | 6)
+#define MT8516_PIN_11_EINT11__FUNC_DBG_MON_A_17 (MTK_PIN_NO(11) | 7)
+
+#define MT8516_PIN_12_EINT12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8516_PIN_12_EINT12__FUNC_CLKM5 (MTK_PIN_NO(12) | 1)
+#define MT8516_PIN_12_EINT12__FUNC_PWM_A (MTK_PIN_NO(12) | 2)
+#define MT8516_PIN_12_EINT12__FUNC_SPDIF_OUT (MTK_PIN_NO(12) | 3)
+#define MT8516_PIN_12_EINT12__FUNC_ANT_SEL4 (MTK_PIN_NO(12) | 4)
+#define MT8516_PIN_12_EINT12__FUNC_EXT_TXEN (MTK_PIN_NO(12) | 6)
+#define MT8516_PIN_12_EINT12__FUNC_DBG_MON_A_18 (MTK_PIN_NO(12) | 7)
+
+#define MT8516_PIN_13_EINT13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8516_PIN_13_EINT13__FUNC_TSF_IN (MTK_PIN_NO(13) | 3)
+#define MT8516_PIN_13_EINT13__FUNC_ANT_SEL5 (MTK_PIN_NO(13) | 4)
+#define MT8516_PIN_13_EINT13__FUNC_SPDIF_IN (MTK_PIN_NO(13) | 6)
+#define MT8516_PIN_13_EINT13__FUNC_DBG_MON_A_19 (MTK_PIN_NO(13) | 7)
+
+#define MT8516_PIN_14_EINT14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8516_PIN_14_EINT14__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(14) | 2)
+#define MT8516_PIN_14_EINT14__FUNC_TDM_RX_MCK (MTK_PIN_NO(14) | 3)
+#define MT8516_PIN_14_EINT14__FUNC_ANT_SEL1 (MTK_PIN_NO(14) | 4)
+#define MT8516_PIN_14_EINT14__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(14) | 5)
+#define MT8516_PIN_14_EINT14__FUNC_NCLE (MTK_PIN_NO(14) | 6)
+#define MT8516_PIN_14_EINT14__FUNC_DBG_MON_B_8 (MTK_PIN_NO(14) | 7)
+
+#define MT8516_PIN_15_EINT15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8516_PIN_15_EINT15__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(15) | 2)
+#define MT8516_PIN_15_EINT15__FUNC_TDM_RX_BCK (MTK_PIN_NO(15) | 3)
+#define MT8516_PIN_15_EINT15__FUNC_ANT_SEL2 (MTK_PIN_NO(15) | 4)
+#define MT8516_PIN_15_EINT15__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(15) | 5)
+#define MT8516_PIN_15_EINT15__FUNC_NCEB1 (MTK_PIN_NO(15) | 6)
+#define MT8516_PIN_15_EINT15__FUNC_DBG_MON_B_9 (MTK_PIN_NO(15) | 7)
+
+#define MT8516_PIN_16_EINT16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8516_PIN_16_EINT16__FUNC_I2S_8CH_BCK (MTK_PIN_NO(16) | 2)
+#define MT8516_PIN_16_EINT16__FUNC_TDM_RX_LRCK (MTK_PIN_NO(16) | 3)
+#define MT8516_PIN_16_EINT16__FUNC_ANT_SEL3 (MTK_PIN_NO(16) | 4)
+#define MT8516_PIN_16_EINT16__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(16) | 5)
+#define MT8516_PIN_16_EINT16__FUNC_NCEB0 (MTK_PIN_NO(16) | 6)
+#define MT8516_PIN_16_EINT16__FUNC_DBG_MON_B_10 (MTK_PIN_NO(16) | 7)
+
+#define MT8516_PIN_17_EINT17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8516_PIN_17_EINT17__FUNC_I2S_8CH_MCK (MTK_PIN_NO(17) | 2)
+#define MT8516_PIN_17_EINT17__FUNC_TDM_RX_DI (MTK_PIN_NO(17) | 3)
+#define MT8516_PIN_17_EINT17__FUNC_IDDIG (MTK_PIN_NO(17) | 4)
+#define MT8516_PIN_17_EINT17__FUNC_ANT_SEL4 (MTK_PIN_NO(17) | 5)
+#define MT8516_PIN_17_EINT17__FUNC_NREB (MTK_PIN_NO(17) | 6)
+#define MT8516_PIN_17_EINT17__FUNC_DBG_MON_B_11 (MTK_PIN_NO(17) | 7)
+
+#define MT8516_PIN_18_EINT18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8516_PIN_18_EINT18__FUNC_USB_DRVVBUS (MTK_PIN_NO(18) | 2)
+#define MT8516_PIN_18_EINT18__FUNC_I2S3_LRCK (MTK_PIN_NO(18) | 3)
+#define MT8516_PIN_18_EINT18__FUNC_CLKM1 (MTK_PIN_NO(18) | 4)
+#define MT8516_PIN_18_EINT18__FUNC_ANT_SEL3 (MTK_PIN_NO(18) | 5)
+#define MT8516_PIN_18_EINT18__FUNC_I2S2_BCK (MTK_PIN_NO(18) | 6)
+#define MT8516_PIN_18_EINT18__FUNC_DBG_MON_A_20 (MTK_PIN_NO(18) | 7)
+
+#define MT8516_PIN_19_EINT19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8516_PIN_19_EINT19__FUNC_UCTS1 (MTK_PIN_NO(19) | 1)
+#define MT8516_PIN_19_EINT19__FUNC_IDDIG (MTK_PIN_NO(19) | 2)
+#define MT8516_PIN_19_EINT19__FUNC_I2S3_BCK (MTK_PIN_NO(19) | 3)
+#define MT8516_PIN_19_EINT19__FUNC_CLKM2 (MTK_PIN_NO(19) | 4)
+#define MT8516_PIN_19_EINT19__FUNC_ANT_SEL4 (MTK_PIN_NO(19) | 5)
+#define MT8516_PIN_19_EINT19__FUNC_I2S2_DI (MTK_PIN_NO(19) | 6)
+#define MT8516_PIN_19_EINT19__FUNC_DBG_MON_A_21 (MTK_PIN_NO(19) | 7)
+
+#define MT8516_PIN_20_EINT20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8516_PIN_20_EINT20__FUNC_URTS1 (MTK_PIN_NO(20) | 1)
+#define MT8516_PIN_20_EINT20__FUNC_I2S3_DO (MTK_PIN_NO(20) | 3)
+#define MT8516_PIN_20_EINT20__FUNC_CLKM3 (MTK_PIN_NO(20) | 4)
+#define MT8516_PIN_20_EINT20__FUNC_ANT_SEL5 (MTK_PIN_NO(20) | 5)
+#define MT8516_PIN_20_EINT20__FUNC_I2S2_LRCK (MTK_PIN_NO(20) | 6)
+#define MT8516_PIN_20_EINT20__FUNC_DBG_MON_A_22 (MTK_PIN_NO(20) | 7)
+
+#define MT8516_PIN_21_EINT21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8516_PIN_21_EINT21__FUNC_NRNB (MTK_PIN_NO(21) | 1)
+#define MT8516_PIN_21_EINT21__FUNC_ANT_SEL0 (MTK_PIN_NO(21) | 2)
+#define MT8516_PIN_21_EINT21__FUNC_I2S_8CH_DO4 (MTK_PIN_NO(21) | 3)
+#define MT8516_PIN_21_EINT21__FUNC_DBG_MON_B_31 (MTK_PIN_NO(21) | 7)
+
+#define MT8516_PIN_22_EINT22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8516_PIN_22_EINT22__FUNC_I2S_8CH_DO2 (MTK_PIN_NO(22) | 2)
+#define MT8516_PIN_22_EINT22__FUNC_TSF_IN (MTK_PIN_NO(22) | 3)
+#define MT8516_PIN_22_EINT22__FUNC_USB_DRVVBUS (MTK_PIN_NO(22) | 4)
+#define MT8516_PIN_22_EINT22__FUNC_SPDIF_OUT (MTK_PIN_NO(22) | 5)
+#define MT8516_PIN_22_EINT22__FUNC_NRE_C (MTK_PIN_NO(22) | 6)
+#define MT8516_PIN_22_EINT22__FUNC_DBG_MON_B_12 (MTK_PIN_NO(22) | 7)
+
+#define MT8516_PIN_23_EINT23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8516_PIN_23_EINT23__FUNC_I2S_8CH_DO3 (MTK_PIN_NO(23) | 2)
+#define MT8516_PIN_23_EINT23__FUNC_CLKM0 (MTK_PIN_NO(23) | 3)
+#define MT8516_PIN_23_EINT23__FUNC_IR (MTK_PIN_NO(23) | 4)
+#define MT8516_PIN_23_EINT23__FUNC_SPDIF_IN (MTK_PIN_NO(23) | 5)
+#define MT8516_PIN_23_EINT23__FUNC_NDQS_C (MTK_PIN_NO(23) | 6)
+#define MT8516_PIN_23_EINT23__FUNC_DBG_MON_B_13 (MTK_PIN_NO(23) | 7)
+
+#define MT8516_PIN_24_EINT24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8516_PIN_24_EINT24__FUNC_ANT_SEL1 (MTK_PIN_NO(24) | 3)
+#define MT8516_PIN_24_EINT24__FUNC_UCTS2 (MTK_PIN_NO(24) | 4)
+#define MT8516_PIN_24_EINT24__FUNC_PWM_A (MTK_PIN_NO(24) | 5)
+#define MT8516_PIN_24_EINT24__FUNC_I2S0_MCK (MTK_PIN_NO(24) | 6)
+#define MT8516_PIN_24_EINT24__FUNC_DBG_MON_A_0 (MTK_PIN_NO(24) | 7)
+
+#define MT8516_PIN_25_EINT25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8516_PIN_25_EINT25__FUNC_ANT_SEL0 (MTK_PIN_NO(25) | 3)
+#define MT8516_PIN_25_EINT25__FUNC_URTS2 (MTK_PIN_NO(25) | 4)
+#define MT8516_PIN_25_EINT25__FUNC_PWM_B (MTK_PIN_NO(25) | 5)
+#define MT8516_PIN_25_EINT25__FUNC_I2S_8CH_MCK (MTK_PIN_NO(25) | 6)
+#define MT8516_PIN_25_EINT25__FUNC_DBG_MON_A_1 (MTK_PIN_NO(25) | 7)
+
+#define MT8516_PIN_26_PWRAP_SPI0_MI__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8516_PIN_26_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(26) | 1)
+#define MT8516_PIN_26_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(26) | 2)
+
+#define MT8516_PIN_27_PWRAP_SPI0_MO__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8516_PIN_27_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(27) | 1)
+#define MT8516_PIN_27_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(27) | 2)
+
+#define MT8516_PIN_28_PWRAP_INT__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8516_PIN_28_PWRAP_INT__FUNC_I2S0_MCK (MTK_PIN_NO(28) | 1)
+#define MT8516_PIN_28_PWRAP_INT__FUNC_I2S_8CH_MCK (MTK_PIN_NO(28) | 4)
+#define MT8516_PIN_28_PWRAP_INT__FUNC_I2S2_MCK (MTK_PIN_NO(28) | 5)
+#define MT8516_PIN_28_PWRAP_INT__FUNC_I2S3_MCK (MTK_PIN_NO(28) | 6)
+
+#define MT8516_PIN_29_PWRAP_SPI0_CK__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8516_PIN_29_PWRAP_SPI0_CK__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(29) | 1)
+
+#define MT8516_PIN_30_PWRAP_SPI0_CSN__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8516_PIN_30_PWRAP_SPI0_CSN__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(30) | 1)
+
+#define MT8516_PIN_31_RTC32K_CK__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8516_PIN_31_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(31) | 1)
+
+#define MT8516_PIN_32_WATCHDOG__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8516_PIN_32_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(32) | 1)
+
+#define MT8516_PIN_33_SRCLKENA__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8516_PIN_33_SRCLKENA__FUNC_SRCLKENA0 (MTK_PIN_NO(33) | 1)
+
+#define MT8516_PIN_34_URXD2__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8516_PIN_34_URXD2__FUNC_URXD2 (MTK_PIN_NO(34) | 1)
+#define MT8516_PIN_34_URXD2__FUNC_UTXD2 (MTK_PIN_NO(34) | 3)
+#define MT8516_PIN_34_URXD2__FUNC_DBG_SCL (MTK_PIN_NO(34) | 4)
+#define MT8516_PIN_34_URXD2__FUNC_I2S2_MCK (MTK_PIN_NO(34) | 6)
+#define MT8516_PIN_34_URXD2__FUNC_DBG_MON_B_0 (MTK_PIN_NO(34) | 7)
+
+#define MT8516_PIN_35_UTXD2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8516_PIN_35_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(35) | 1)
+#define MT8516_PIN_35_UTXD2__FUNC_URXD2 (MTK_PIN_NO(35) | 3)
+#define MT8516_PIN_35_UTXD2__FUNC_DBG_SDA (MTK_PIN_NO(35) | 4)
+#define MT8516_PIN_35_UTXD2__FUNC_I2S3_MCK (MTK_PIN_NO(35) | 6)
+#define MT8516_PIN_35_UTXD2__FUNC_DBG_MON_B_1 (MTK_PIN_NO(35) | 7)
+
+#define MT8516_PIN_36_MRG_CLK__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8516_PIN_36_MRG_CLK__FUNC_MRG_CLK (MTK_PIN_NO(36) | 1)
+#define MT8516_PIN_36_MRG_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(36) | 3)
+#define MT8516_PIN_36_MRG_CLK__FUNC_I2S3_BCK (MTK_PIN_NO(36) | 4)
+#define MT8516_PIN_36_MRG_CLK__FUNC_PCM0_CLK (MTK_PIN_NO(36) | 5)
+#define MT8516_PIN_36_MRG_CLK__FUNC_IR (MTK_PIN_NO(36) | 6)
+#define MT8516_PIN_36_MRG_CLK__FUNC_DBG_MON_A_2 (MTK_PIN_NO(36) | 7)
+
+#define MT8516_PIN_37_MRG_SYNC__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_MRG_SYNC (MTK_PIN_NO(37) | 1)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_I2S0_LRCK (MTK_PIN_NO(37) | 3)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_I2S3_LRCK (MTK_PIN_NO(37) | 4)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_PCM0_SYNC (MTK_PIN_NO(37) | 5)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_EXT_COL (MTK_PIN_NO(37) | 6)
+#define MT8516_PIN_37_MRG_SYNC__FUNC_DBG_MON_A_3 (MTK_PIN_NO(37) | 7)
+
+#define MT8516_PIN_38_MRG_DI__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8516_PIN_38_MRG_DI__FUNC_MRG_DI (MTK_PIN_NO(38) | 1)
+#define MT8516_PIN_38_MRG_DI__FUNC_I2S0_DI (MTK_PIN_NO(38) | 3)
+#define MT8516_PIN_38_MRG_DI__FUNC_I2S3_DO (MTK_PIN_NO(38) | 4)
+#define MT8516_PIN_38_MRG_DI__FUNC_PCM0_DI (MTK_PIN_NO(38) | 5)
+#define MT8516_PIN_38_MRG_DI__FUNC_EXT_MDIO (MTK_PIN_NO(38) | 6)
+#define MT8516_PIN_38_MRG_DI__FUNC_DBG_MON_A_4 (MTK_PIN_NO(38) | 7)
+
+#define MT8516_PIN_39_MRG_DO__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8516_PIN_39_MRG_DO__FUNC_MRG_DO (MTK_PIN_NO(39) | 1)
+#define MT8516_PIN_39_MRG_DO__FUNC_I2S0_MCK (MTK_PIN_NO(39) | 3)
+#define MT8516_PIN_39_MRG_DO__FUNC_I2S3_MCK (MTK_PIN_NO(39) | 4)
+#define MT8516_PIN_39_MRG_DO__FUNC_PCM0_DO (MTK_PIN_NO(39) | 5)
+#define MT8516_PIN_39_MRG_DO__FUNC_EXT_MDC (MTK_PIN_NO(39) | 6)
+#define MT8516_PIN_39_MRG_DO__FUNC_DBG_MON_A_5 (MTK_PIN_NO(39) | 7)
+
+#define MT8516_PIN_40_KPROW0__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8516_PIN_40_KPROW0__FUNC_KPROW0 (MTK_PIN_NO(40) | 1)
+#define MT8516_PIN_40_KPROW0__FUNC_DBG_MON_B_4 (MTK_PIN_NO(40) | 7)
+
+#define MT8516_PIN_41_KPROW1__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8516_PIN_41_KPROW1__FUNC_KPROW1 (MTK_PIN_NO(41) | 1)
+#define MT8516_PIN_41_KPROW1__FUNC_IDDIG (MTK_PIN_NO(41) | 2)
+#define MT8516_PIN_41_KPROW1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(41) | 3)
+#define MT8516_PIN_41_KPROW1__FUNC_DBG_MON_B_5 (MTK_PIN_NO(41) | 7)
+
+#define MT8516_PIN_42_KPCOL0__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8516_PIN_42_KPCOL0__FUNC_KPCOL0 (MTK_PIN_NO(42) | 1)
+#define MT8516_PIN_42_KPCOL0__FUNC_DBG_MON_B_6 (MTK_PIN_NO(42) | 7)
+
+#define MT8516_PIN_43_KPCOL1__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8516_PIN_43_KPCOL1__FUNC_KPCOL1 (MTK_PIN_NO(43) | 1)
+#define MT8516_PIN_43_KPCOL1__FUNC_USB_DRVVBUS (MTK_PIN_NO(43) | 2)
+#define MT8516_PIN_43_KPCOL1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(43) | 3)
+#define MT8516_PIN_43_KPCOL1__FUNC_TSF_IN (MTK_PIN_NO(43) | 4)
+#define MT8516_PIN_43_KPCOL1__FUNC_DBG_MON_B_7 (MTK_PIN_NO(43) | 7)
+
+#define MT8516_PIN_44_JTMS__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8516_PIN_44_JTMS__FUNC_JTMS (MTK_PIN_NO(44) | 1)
+#define MT8516_PIN_44_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(44) | 2)
+#define MT8516_PIN_44_JTMS__FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(44) | 3)
+#define MT8516_PIN_44_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(44) | 5)
+#define MT8516_PIN_44_JTMS__FUNC_UDI_TMS_XI (MTK_PIN_NO(44) | 6)
+
+#define MT8516_PIN_45_JTCK__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8516_PIN_45_JTCK__FUNC_JTCK (MTK_PIN_NO(45) | 1)
+#define MT8516_PIN_45_JTCK__FUNC_CONN_MCU_TCK (MTK_PIN_NO(45) | 2)
+#define MT8516_PIN_45_JTCK__FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(45) | 3)
+
+#define MT8516_PIN_46_JTDI__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8516_PIN_46_JTDI__FUNC_JTDI (MTK_PIN_NO(46) | 1)
+#define MT8516_PIN_46_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(46) | 2)
+
+#define MT8516_PIN_47_JTDO__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8516_PIN_47_JTDO__FUNC_JTDO (MTK_PIN_NO(47) | 1)
+#define MT8516_PIN_47_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(47) | 2)
+
+#define MT8516_PIN_48_SPI_CS__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8516_PIN_48_SPI_CS__FUNC_SPI_CSB (MTK_PIN_NO(48) | 1)
+#define MT8516_PIN_48_SPI_CS__FUNC_I2S0_DI (MTK_PIN_NO(48) | 3)
+#define MT8516_PIN_48_SPI_CS__FUNC_I2S2_BCK (MTK_PIN_NO(48) | 4)
+#define MT8516_PIN_48_SPI_CS__FUNC_DBG_MON_A_23 (MTK_PIN_NO(48) | 7)
+
+#define MT8516_PIN_49_SPI_CK__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8516_PIN_49_SPI_CK__FUNC_SPI_CLK (MTK_PIN_NO(49) | 1)
+#define MT8516_PIN_49_SPI_CK__FUNC_I2S0_LRCK (MTK_PIN_NO(49) | 3)
+#define MT8516_PIN_49_SPI_CK__FUNC_I2S2_DI (MTK_PIN_NO(49) | 4)
+#define MT8516_PIN_49_SPI_CK__FUNC_DBG_MON_A_24 (MTK_PIN_NO(49) | 7)
+
+#define MT8516_PIN_50_SPI_MI__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8516_PIN_50_SPI_MI__FUNC_SPI_MI (MTK_PIN_NO(50) | 1)
+#define MT8516_PIN_50_SPI_MI__FUNC_SPI_MO (MTK_PIN_NO(50) | 2)
+#define MT8516_PIN_50_SPI_MI__FUNC_I2S0_BCK (MTK_PIN_NO(50) | 3)
+#define MT8516_PIN_50_SPI_MI__FUNC_I2S2_LRCK (MTK_PIN_NO(50) | 4)
+#define MT8516_PIN_50_SPI_MI__FUNC_DBG_MON_A_25 (MTK_PIN_NO(50) | 7)
+
+#define MT8516_PIN_51_SPI_MO__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8516_PIN_51_SPI_MO__FUNC_SPI_MO (MTK_PIN_NO(51) | 1)
+#define MT8516_PIN_51_SPI_MO__FUNC_SPI_MI (MTK_PIN_NO(51) | 2)
+#define MT8516_PIN_51_SPI_MO__FUNC_I2S0_MCK (MTK_PIN_NO(51) | 3)
+#define MT8516_PIN_51_SPI_MO__FUNC_I2S2_MCK (MTK_PIN_NO(51) | 4)
+#define MT8516_PIN_51_SPI_MO__FUNC_DBG_MON_A_26 (MTK_PIN_NO(51) | 7)
+
+#define MT8516_PIN_52_SDA1__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8516_PIN_52_SDA1__FUNC_SDA1_0 (MTK_PIN_NO(52) | 1)
+
+#define MT8516_PIN_53_SCL1__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8516_PIN_53_SCL1__FUNC_SCL1_0 (MTK_PIN_NO(53) | 1)
+
+#define MT8516_PIN_54_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8516_PIN_54_GPIO54__FUNC_PWM_B (MTK_PIN_NO(54) | 2)
+#define MT8516_PIN_54_GPIO54__FUNC_DBG_MON_B_2 (MTK_PIN_NO(54) | 7)
+
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_I2S0_DI (MTK_PIN_NO(55) | 1)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_UCTS0 (MTK_PIN_NO(55) | 2)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_I2S3_DO (MTK_PIN_NO(55) | 3)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(55) | 4)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_PWM_A (MTK_PIN_NO(55) | 5)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_I2S2_BCK (MTK_PIN_NO(55) | 6)
+#define MT8516_PIN_55_I2S_DATA_IN__FUNC_DBG_MON_A_28 (MTK_PIN_NO(55) | 7)
+
+#define MT8516_PIN_56_I2S_LRCK__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(56) | 1)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(56) | 3)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(56) | 4)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_PWM_B (MTK_PIN_NO(56) | 5)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_I2S2_DI (MTK_PIN_NO(56) | 6)
+#define MT8516_PIN_56_I2S_LRCK__FUNC_DBG_MON_A_29 (MTK_PIN_NO(56) | 7)
+
+#define MT8516_PIN_57_I2S_BCK__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8516_PIN_57_I2S_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(57) | 1)
+#define MT8516_PIN_57_I2S_BCK__FUNC_URTS0 (MTK_PIN_NO(57) | 2)
+#define MT8516_PIN_57_I2S_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(57) | 3)
+#define MT8516_PIN_57_I2S_BCK__FUNC_I2S_8CH_BCK (MTK_PIN_NO(57) | 4)
+#define MT8516_PIN_57_I2S_BCK__FUNC_PWM_C (MTK_PIN_NO(57) | 5)
+#define MT8516_PIN_57_I2S_BCK__FUNC_I2S2_LRCK (MTK_PIN_NO(57) | 6)
+#define MT8516_PIN_57_I2S_BCK__FUNC_DBG_MON_A_30 (MTK_PIN_NO(57) | 7)
+
+#define MT8516_PIN_58_SDA0__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8516_PIN_58_SDA0__FUNC_SDA0_0 (MTK_PIN_NO(58) | 1)
+
+#define MT8516_PIN_59_SCL0__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8516_PIN_59_SCL0__FUNC_SCL0_0 (MTK_PIN_NO(59) | 1)
+
+#define MT8516_PIN_60_SDA2__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8516_PIN_60_SDA2__FUNC_SDA2_0 (MTK_PIN_NO(60) | 1)
+#define MT8516_PIN_60_SDA2__FUNC_PWM_B (MTK_PIN_NO(60) | 2)
+
+#define MT8516_PIN_61_SCL2__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8516_PIN_61_SCL2__FUNC_SCL2_0 (MTK_PIN_NO(61) | 1)
+#define MT8516_PIN_61_SCL2__FUNC_PWM_C (MTK_PIN_NO(61) | 2)
+
+#define MT8516_PIN_62_URXD0__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8516_PIN_62_URXD0__FUNC_URXD0 (MTK_PIN_NO(62) | 1)
+#define MT8516_PIN_62_URXD0__FUNC_UTXD0 (MTK_PIN_NO(62) | 2)
+
+#define MT8516_PIN_63_UTXD0__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8516_PIN_63_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(63) | 1)
+#define MT8516_PIN_63_UTXD0__FUNC_URXD0 (MTK_PIN_NO(63) | 2)
+
+#define MT8516_PIN_64_URXD1__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8516_PIN_64_URXD1__FUNC_URXD1 (MTK_PIN_NO(64) | 1)
+#define MT8516_PIN_64_URXD1__FUNC_UTXD1 (MTK_PIN_NO(64) | 2)
+#define MT8516_PIN_64_URXD1__FUNC_DBG_MON_A_27 (MTK_PIN_NO(64) | 7)
+
+#define MT8516_PIN_65_UTXD1__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8516_PIN_65_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(65) | 1)
+#define MT8516_PIN_65_UTXD1__FUNC_URXD1 (MTK_PIN_NO(65) | 2)
+#define MT8516_PIN_65_UTXD1__FUNC_DBG_MON_A_31 (MTK_PIN_NO(65) | 7)
+
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(68) | 1)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_I2S_8CH_DO4 (MTK_PIN_NO(68) | 2)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_SDA1_0 (MTK_PIN_NO(68) | 3)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_USB_SDA (MTK_PIN_NO(68) | 5)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_I2S3_BCK (MTK_PIN_NO(68) | 6)
+#define MT8516_PIN_68_MSDC2_CMD__FUNC_DBG_MON_B_15 (MTK_PIN_NO(68) | 7)
+
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(69) | 1)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_I2S_8CH_DO3 (MTK_PIN_NO(69) | 2)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_SCL1_0 (MTK_PIN_NO(69) | 3)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_USB_SCL (MTK_PIN_NO(69) | 5)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_I2S3_LRCK (MTK_PIN_NO(69) | 6)
+#define MT8516_PIN_69_MSDC2_CLK__FUNC_DBG_MON_B_16 (MTK_PIN_NO(69) | 7)
+
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(70) | 1)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_I2S_8CH_DO2 (MTK_PIN_NO(70) | 2)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_UTXD0 (MTK_PIN_NO(70) | 5)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_I2S3_DO (MTK_PIN_NO(70) | 6)
+#define MT8516_PIN_70_MSDC2_DAT0__FUNC_DBG_MON_B_17 (MTK_PIN_NO(70) | 7)
+
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(71) | 1)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_I2S_8CH_DO1 (MTK_PIN_NO(71) | 2)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_PWM_A (MTK_PIN_NO(71) | 3)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_I2S3_MCK (MTK_PIN_NO(71) | 4)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_URXD0 (MTK_PIN_NO(71) | 5)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_PWM_B (MTK_PIN_NO(71) | 6)
+#define MT8516_PIN_71_MSDC2_DAT1__FUNC_DBG_MON_B_18 (MTK_PIN_NO(71) | 7)
+
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(72) | 1)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_I2S_8CH_LRCK (MTK_PIN_NO(72) | 2)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_SDA2_0 (MTK_PIN_NO(72) | 3)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_UTXD1 (MTK_PIN_NO(72) | 5)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_PWM_C (MTK_PIN_NO(72) | 6)
+#define MT8516_PIN_72_MSDC2_DAT2__FUNC_DBG_MON_B_19 (MTK_PIN_NO(72) | 7)
+
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(73) | 1)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_I2S_8CH_BCK (MTK_PIN_NO(73) | 2)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_SCL2_0 (MTK_PIN_NO(73) | 3)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(73) | 4)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_URXD1 (MTK_PIN_NO(73) | 5)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_PWM_A (MTK_PIN_NO(73) | 6)
+#define MT8516_PIN_73_MSDC2_DAT3__FUNC_DBG_MON_B_20 (MTK_PIN_NO(73) | 7)
+
+#define MT8516_PIN_74_TDN3__FUNC_GPI74 (MTK_PIN_NO(74) | 0)
+#define MT8516_PIN_74_TDN3__FUNC_TDN3 (MTK_PIN_NO(74) | 1)
+
+#define MT8516_PIN_75_TDP3__FUNC_GPI75 (MTK_PIN_NO(75) | 0)
+#define MT8516_PIN_75_TDP3__FUNC_TDP3 (MTK_PIN_NO(75) | 1)
+
+#define MT8516_PIN_76_TDN2__FUNC_GPI76 (MTK_PIN_NO(76) | 0)
+#define MT8516_PIN_76_TDN2__FUNC_TDN2 (MTK_PIN_NO(76) | 1)
+
+#define MT8516_PIN_77_TDP2__FUNC_GPI77 (MTK_PIN_NO(77) | 0)
+#define MT8516_PIN_77_TDP2__FUNC_TDP2 (MTK_PIN_NO(77) | 1)
+
+#define MT8516_PIN_78_TCN__FUNC_GPI78 (MTK_PIN_NO(78) | 0)
+#define MT8516_PIN_78_TCN__FUNC_TCN (MTK_PIN_NO(78) | 1)
+
+#define MT8516_PIN_79_TCP__FUNC_GPI79 (MTK_PIN_NO(79) | 0)
+#define MT8516_PIN_79_TCP__FUNC_TCP (MTK_PIN_NO(79) | 1)
+
+#define MT8516_PIN_80_TDN1__FUNC_GPI80 (MTK_PIN_NO(80) | 0)
+#define MT8516_PIN_80_TDN1__FUNC_TDN1 (MTK_PIN_NO(80) | 1)
+
+#define MT8516_PIN_81_TDP1__FUNC_GPI81 (MTK_PIN_NO(81) | 0)
+#define MT8516_PIN_81_TDP1__FUNC_TDP1 (MTK_PIN_NO(81) | 1)
+
+#define MT8516_PIN_82_TDN0__FUNC_GPI82 (MTK_PIN_NO(82) | 0)
+#define MT8516_PIN_82_TDN0__FUNC_TDN0 (MTK_PIN_NO(82) | 1)
+
+#define MT8516_PIN_83_TDP0__FUNC_GPI83 (MTK_PIN_NO(83) | 0)
+#define MT8516_PIN_83_TDP0__FUNC_TDP0 (MTK_PIN_NO(83) | 1)
+
+#define MT8516_PIN_84_RDN0__FUNC_GPI84 (MTK_PIN_NO(84) | 0)
+#define MT8516_PIN_84_RDN0__FUNC_RDN0 (MTK_PIN_NO(84) | 1)
+
+#define MT8516_PIN_85_RDP0__FUNC_GPI85 (MTK_PIN_NO(85) | 0)
+#define MT8516_PIN_85_RDP0__FUNC_RDP0 (MTK_PIN_NO(85) | 1)
+
+#define MT8516_PIN_86_RDN1__FUNC_GPI86 (MTK_PIN_NO(86) | 0)
+#define MT8516_PIN_86_RDN1__FUNC_RDN1 (MTK_PIN_NO(86) | 1)
+
+#define MT8516_PIN_87_RDP1__FUNC_GPI87 (MTK_PIN_NO(87) | 0)
+#define MT8516_PIN_87_RDP1__FUNC_RDP1 (MTK_PIN_NO(87) | 1)
+
+#define MT8516_PIN_88_RCN__FUNC_GPI88 (MTK_PIN_NO(88) | 0)
+#define MT8516_PIN_88_RCN__FUNC_RCN (MTK_PIN_NO(88) | 1)
+
+#define MT8516_PIN_89_RCP__FUNC_GPI89 (MTK_PIN_NO(89) | 0)
+#define MT8516_PIN_89_RCP__FUNC_RCP (MTK_PIN_NO(89) | 1)
+
+#define MT8516_PIN_90_RDN2__FUNC_GPI90 (MTK_PIN_NO(90) | 0)
+#define MT8516_PIN_90_RDN2__FUNC_RDN2 (MTK_PIN_NO(90) | 1)
+#define MT8516_PIN_90_RDN2__FUNC_CMDAT8 (MTK_PIN_NO(90) | 2)
+
+#define MT8516_PIN_91_RDP2__FUNC_GPI91 (MTK_PIN_NO(91) | 0)
+#define MT8516_PIN_91_RDP2__FUNC_RDP2 (MTK_PIN_NO(91) | 1)
+#define MT8516_PIN_91_RDP2__FUNC_CMDAT9 (MTK_PIN_NO(91) | 2)
+
+#define MT8516_PIN_92_RDN3__FUNC_GPI92 (MTK_PIN_NO(92) | 0)
+#define MT8516_PIN_92_RDN3__FUNC_RDN3 (MTK_PIN_NO(92) | 1)
+#define MT8516_PIN_92_RDN3__FUNC_CMDAT4 (MTK_PIN_NO(92) | 2)
+
+#define MT8516_PIN_93_RDP3__FUNC_GPI93 (MTK_PIN_NO(93) | 0)
+#define MT8516_PIN_93_RDP3__FUNC_RDP3 (MTK_PIN_NO(93) | 1)
+#define MT8516_PIN_93_RDP3__FUNC_CMDAT5 (MTK_PIN_NO(93) | 2)
+
+#define MT8516_PIN_94_RCN_A__FUNC_GPI94 (MTK_PIN_NO(94) | 0)
+#define MT8516_PIN_94_RCN_A__FUNC_RCN_A (MTK_PIN_NO(94) | 1)
+#define MT8516_PIN_94_RCN_A__FUNC_CMDAT6 (MTK_PIN_NO(94) | 2)
+
+#define MT8516_PIN_95_RCP_A__FUNC_GPI95 (MTK_PIN_NO(95) | 0)
+#define MT8516_PIN_95_RCP_A__FUNC_RCP_A (MTK_PIN_NO(95) | 1)
+#define MT8516_PIN_95_RCP_A__FUNC_CMDAT7 (MTK_PIN_NO(95) | 2)
+
+#define MT8516_PIN_96_RDN1_A__FUNC_GPI96 (MTK_PIN_NO(96) | 0)
+#define MT8516_PIN_96_RDN1_A__FUNC_RDN1_A (MTK_PIN_NO(96) | 1)
+#define MT8516_PIN_96_RDN1_A__FUNC_CMDAT2 (MTK_PIN_NO(96) | 2)
+#define MT8516_PIN_96_RDN1_A__FUNC_CMCSD2 (MTK_PIN_NO(96) | 3)
+
+#define MT8516_PIN_97_RDP1_A__FUNC_GPI97 (MTK_PIN_NO(97) | 0)
+#define MT8516_PIN_97_RDP1_A__FUNC_RDP1_A (MTK_PIN_NO(97) | 1)
+#define MT8516_PIN_97_RDP1_A__FUNC_CMDAT3 (MTK_PIN_NO(97) | 2)
+#define MT8516_PIN_97_RDP1_A__FUNC_CMCSD3 (MTK_PIN_NO(97) | 3)
+
+#define MT8516_PIN_98_RDN0_A__FUNC_GPI98 (MTK_PIN_NO(98) | 0)
+#define MT8516_PIN_98_RDN0_A__FUNC_RDN0_A (MTK_PIN_NO(98) | 1)
+#define MT8516_PIN_98_RDN0_A__FUNC_CMHSYNC (MTK_PIN_NO(98) | 2)
+
+#define MT8516_PIN_99_RDP0_A__FUNC_GPI99 (MTK_PIN_NO(99) | 0)
+#define MT8516_PIN_99_RDP0_A__FUNC_RDP0_A (MTK_PIN_NO(99) | 1)
+#define MT8516_PIN_99_RDP0_A__FUNC_CMVSYNC (MTK_PIN_NO(99) | 2)
+
+#define MT8516_PIN_100_CMDAT0__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8516_PIN_100_CMDAT0__FUNC_CMDAT0 (MTK_PIN_NO(100) | 1)
+#define MT8516_PIN_100_CMDAT0__FUNC_CMCSD0 (MTK_PIN_NO(100) | 2)
+#define MT8516_PIN_100_CMDAT0__FUNC_ANT_SEL2 (MTK_PIN_NO(100) | 3)
+#define MT8516_PIN_100_CMDAT0__FUNC_TDM_RX_MCK (MTK_PIN_NO(100) | 5)
+#define MT8516_PIN_100_CMDAT0__FUNC_DBG_MON_B_21 (MTK_PIN_NO(100) | 7)
+
+#define MT8516_PIN_101_CMDAT1__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8516_PIN_101_CMDAT1__FUNC_CMDAT1 (MTK_PIN_NO(101) | 1)
+#define MT8516_PIN_101_CMDAT1__FUNC_CMCSD1 (MTK_PIN_NO(101) | 2)
+#define MT8516_PIN_101_CMDAT1__FUNC_ANT_SEL3 (MTK_PIN_NO(101) | 3)
+#define MT8516_PIN_101_CMDAT1__FUNC_CMFLASH (MTK_PIN_NO(101) | 4)
+#define MT8516_PIN_101_CMDAT1__FUNC_TDM_RX_BCK (MTK_PIN_NO(101) | 5)
+#define MT8516_PIN_101_CMDAT1__FUNC_DBG_MON_B_22 (MTK_PIN_NO(101) | 7)
+
+#define MT8516_PIN_102_CMMCLK__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8516_PIN_102_CMMCLK__FUNC_CMMCLK (MTK_PIN_NO(102) | 1)
+#define MT8516_PIN_102_CMMCLK__FUNC_ANT_SEL4 (MTK_PIN_NO(102) | 3)
+#define MT8516_PIN_102_CMMCLK__FUNC_TDM_RX_LRCK (MTK_PIN_NO(102) | 5)
+#define MT8516_PIN_102_CMMCLK__FUNC_DBG_MON_B_23 (MTK_PIN_NO(102) | 7)
+
+#define MT8516_PIN_103_CMPCLK__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8516_PIN_103_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(103) | 1)
+#define MT8516_PIN_103_CMPCLK__FUNC_CMCSK (MTK_PIN_NO(103) | 2)
+#define MT8516_PIN_103_CMPCLK__FUNC_ANT_SEL5 (MTK_PIN_NO(103) | 3)
+#define MT8516_PIN_103_CMPCLK__FUNC_TDM_RX_DI (MTK_PIN_NO(103) | 5)
+#define MT8516_PIN_103_CMPCLK__FUNC_DBG_MON_B_24 (MTK_PIN_NO(103) | 7)
+
+#define MT8516_PIN_104_MSDC1_CMD__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8516_PIN_104_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(104) | 1)
+#define MT8516_PIN_104_MSDC1_CMD__FUNC_SQICS (MTK_PIN_NO(104) | 4)
+#define MT8516_PIN_104_MSDC1_CMD__FUNC_DBG_MON_B_25 (MTK_PIN_NO(104) | 7)
+
+#define MT8516_PIN_105_MSDC1_CLK__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8516_PIN_105_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(105) | 1)
+#define MT8516_PIN_105_MSDC1_CLK__FUNC_SQISO (MTK_PIN_NO(105) | 4)
+#define MT8516_PIN_105_MSDC1_CLK__FUNC_DBG_MON_B_26 (MTK_PIN_NO(105) | 7)
+
+#define MT8516_PIN_106_MSDC1_DAT0__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8516_PIN_106_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(106) | 1)
+#define MT8516_PIN_106_MSDC1_DAT0__FUNC_SQISI (MTK_PIN_NO(106) | 4)
+#define MT8516_PIN_106_MSDC1_DAT0__FUNC_DBG_MON_B_27 (MTK_PIN_NO(106) | 7)
+
+#define MT8516_PIN_107_MSDC1_DAT1__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8516_PIN_107_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(107) | 1)
+#define MT8516_PIN_107_MSDC1_DAT1__FUNC_SQIWP (MTK_PIN_NO(107) | 4)
+#define MT8516_PIN_107_MSDC1_DAT1__FUNC_DBG_MON_B_28 (MTK_PIN_NO(107) | 7)
+
+#define MT8516_PIN_108_MSDC1_DAT2__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8516_PIN_108_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(108) | 1)
+#define MT8516_PIN_108_MSDC1_DAT2__FUNC_SQIRST (MTK_PIN_NO(108) | 4)
+#define MT8516_PIN_108_MSDC1_DAT2__FUNC_DBG_MON_B_29 (MTK_PIN_NO(108) | 7)
+
+#define MT8516_PIN_109_MSDC1_DAT3__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8516_PIN_109_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(109) | 1)
+#define MT8516_PIN_109_MSDC1_DAT3__FUNC_SQICK (MTK_PIN_NO(109) | 4)
+#define MT8516_PIN_109_MSDC1_DAT3__FUNC_DBG_MON_B_30 (MTK_PIN_NO(109) | 7)
+
+#define MT8516_PIN_110_MSDC0_DAT7__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8516_PIN_110_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(110) | 1)
+#define MT8516_PIN_110_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(110) | 4)
+
+#define MT8516_PIN_111_MSDC0_DAT6__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8516_PIN_111_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(111) | 1)
+#define MT8516_PIN_111_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(111) | 4)
+
+#define MT8516_PIN_112_MSDC0_DAT5__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8516_PIN_112_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(112) | 1)
+#define MT8516_PIN_112_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(112) | 4)
+
+#define MT8516_PIN_113_MSDC0_DAT4__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8516_PIN_113_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(113) | 1)
+#define MT8516_PIN_113_MSDC0_DAT4__FUNC_NLD3 (MTK_PIN_NO(113) | 4)
+
+#define MT8516_PIN_114_MSDC0_RSTB__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8516_PIN_114_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(114) | 1)
+#define MT8516_PIN_114_MSDC0_RSTB__FUNC_NLD0 (MTK_PIN_NO(114) | 4)
+
+#define MT8516_PIN_115_MSDC0_CMD__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8516_PIN_115_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(115) | 1)
+#define MT8516_PIN_115_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(115) | 4)
+
+#define MT8516_PIN_116_MSDC0_CLK__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8516_PIN_116_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(116) | 1)
+#define MT8516_PIN_116_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(116) | 4)
+
+#define MT8516_PIN_117_MSDC0_DAT3__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8516_PIN_117_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(117) | 1)
+#define MT8516_PIN_117_MSDC0_DAT3__FUNC_NLD1 (MTK_PIN_NO(117) | 4)
+
+#define MT8516_PIN_118_MSDC0_DAT2__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8516_PIN_118_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(118) | 1)
+#define MT8516_PIN_118_MSDC0_DAT2__FUNC_NLD5 (MTK_PIN_NO(118) | 4)
+
+#define MT8516_PIN_119_MSDC0_DAT1__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8516_PIN_119_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(119) | 1)
+#define MT8516_PIN_119_MSDC0_DAT1__FUNC_NLD8 (MTK_PIN_NO(119) | 4)
+
+#define MT8516_PIN_120_MSDC0_DAT0__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8516_PIN_120_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(120) | 1)
+#define MT8516_PIN_120_MSDC0_DAT0__FUNC_WATCHDOG (MTK_PIN_NO(120) | 4)
+#define MT8516_PIN_120_MSDC0_DAT0__FUNC_NLD2 (MTK_PIN_NO(120) | 5)
+
+#endif /* __DTS_MT8516_PINFUNC_H */
diff --git a/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
new file mode 100644
index 000000000000..cce642c53812
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8516-pumpkin.dts
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include "mt8516.dtsi"
+#include "pumpkin-common.dtsi"
+
+/ {
+ model = "Pumpkin MT8516";
+ compatible = "mediatek,mt8516";
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x40000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
new file mode 100644
index 000000000000..2f8adf042195
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <dt-bindings/clock/mt8516-clk.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "mt8516-pinfunc.h"
+
+/ {
+ compatible = "mediatek,mt8516";
+ interrupt-parent = <&sysirq>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cluster0_opp: opp-table-0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+ opp-598000000 {
+ opp-hz = /bits/ 64 <598000000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-747500000 {
+ opp-hz = /bits/ 64 <747500000>;
+ opp-microvolt = <1150000>;
+ };
+ opp-1040000000 {
+ opp-hz = /bits/ 64 <1040000000>;
+ opp-microvolt = <1200000>;
+ };
+ opp-1196000000 {
+ opp-hz = /bits/ 64 <1196000000>;
+ opp-microvolt = <1250000>;
+ };
+ opp-1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1300000>;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CLUSTER_SLEEP_0 &CLUSTER_SLEEP_0>,
+ <&CPU_SLEEP_0_0 &CPU_SLEEP_0_0 &CPU_SLEEP_0_0>;
+ clocks = <&infracfg CLK_IFR_MUX1_SEL>,
+ <&topckgen CLK_TOP_MAINPLL_D2>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x1>;
+ enable-method = "psci";
+ cpu-idle-states = <&CLUSTER_SLEEP_0 &CLUSTER_SLEEP_0>,
+ <&CPU_SLEEP_0_0 &CPU_SLEEP_0_0 &CPU_SLEEP_0_0>;
+ clocks = <&infracfg CLK_IFR_MUX1_SEL>,
+ <&topckgen CLK_TOP_MAINPLL_D2>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x2>;
+ enable-method = "psci";
+ cpu-idle-states = <&CLUSTER_SLEEP_0 &CLUSTER_SLEEP_0>,
+ <&CPU_SLEEP_0_0 &CPU_SLEEP_0_0 &CPU_SLEEP_0_0>;
+ clocks = <&infracfg CLK_IFR_MUX1_SEL>,
+ <&topckgen CLK_TOP_MAINPLL_D2>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x3>;
+ enable-method = "psci";
+ cpu-idle-states = <&CLUSTER_SLEEP_0 &CLUSTER_SLEEP_0>,
+ <&CPU_SLEEP_0_0 &CPU_SLEEP_0_0 &CPU_SLEEP_0_0>;
+ clocks = <&infracfg CLK_IFR_MUX1_SEL>,
+ <&topckgen CLK_TOP_MAINPLL_D2>;
+ clock-names = "cpu", "intermediate", "armpll";
+ operating-points-v2 = <&cluster0_opp>;
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP_0_0: cpu-sleep-0-0 {
+ compatible = "arm,idle-state";
+ entry-latency-us = <600>;
+ exit-latency-us = <600>;
+ min-residency-us = <1200>;
+ arm,psci-suspend-param = <0x0010000>;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ entry-latency-us = <800>;
+ exit-latency-us = <1000>;
+ min-residency-us = <2000>;
+ arm,psci-suspend-param = <0x2010000>;
+ };
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ clk26m: clk26m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ clock-output-names = "clk26m";
+ };
+
+ clk32k: clk32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32000>;
+ clock-output-names = "clk32k";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* 128 KiB reserved for ARM Trusted Firmware (BL31) */
+ bl31_secmon_reserved: secmon@43000000 {
+ no-map;
+ reg = <0 0x43000000 0 0x20000>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "simple-bus";
+ ranges;
+
+ topckgen: topckgen@10000000 {
+ compatible = "mediatek,mt8516-topckgen", "syscon";
+ reg = <0 0x10000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ infracfg: infracfg@10001000 {
+ compatible = "mediatek,mt8516-infracfg", "syscon";
+ reg = <0 0x10001000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ apmixedsys: apmixedsys@10018000 {
+ compatible = "mediatek,mt8516-apmixedsys", "syscon";
+ reg = <0 0x10018000 0 0x710>;
+ #clock-cells = <1>;
+ };
+
+ toprgu: toprgu@10007000 {
+ compatible = "mediatek,mt8516-wdt",
+ "mediatek,mt6589-wdt";
+ reg = <0 0x10007000 0 0x1000>;
+ interrupts = <GIC_SPI 198 IRQ_TYPE_EDGE_FALLING>;
+ #reset-cells = <1>;
+ };
+
+ timer: timer@10008000 {
+ compatible = "mediatek,mt8516-timer",
+ "mediatek,mt6577-timer";
+ reg = <0 0x10008000 0 0x1000>;
+ interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_CLK26M_D2>,
+ <&topckgen CLK_TOP_APXGPT>;
+ clock-names = "clk13m", "bus";
+ };
+
+ syscfg_pctl: syscfg-pctl@10005000 {
+ compatible = "syscon";
+ reg = <0 0x10005000 0 0x1000>;
+ };
+
+ pio: pinctrl@1000b000 {
+ compatible = "mediatek,mt8516-pinctrl";
+ reg = <0 0x1000b000 0 0x1000>;
+ mediatek,pctl-regmap = <&syscfg_pctl>;
+ pins-are-numbered;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pwrap: pwrap@1000f000 {
+ compatible = "mediatek,mt8516-pwrap";
+ reg = <0 0x1000f000 0 0x1000>;
+ reg-names = "pwrap";
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_PMICWRAP_26M>,
+ <&topckgen CLK_TOP_PMICWRAP_AP>;
+ clock-names = "spi", "wrap";
+ };
+
+ sysirq: interrupt-controller@10200620 {
+ compatible = "mediatek,mt8516-sysirq",
+ "mediatek,mt6577-sysirq";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0 0x10200620 0 0x20>;
+ };
+
+ gic: interrupt-controller@10310000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ interrupt-controller;
+ reg = <0 0x10310000 0 0x1000>,
+ <0 0x10320000 0 0x1000>,
+ <0 0x10340000 0 0x2000>,
+ <0 0x10360000 0 0x2000>;
+ interrupts = <GIC_PPI 9
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ uart0: serial@11005000 {
+ compatible = "mediatek,mt8516-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11005000 0 0x1000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UART0_SEL>,
+ <&topckgen CLK_TOP_UART0>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
+ uart1: serial@11006000 {
+ compatible = "mediatek,mt8516-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11006000 0 0x1000>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UART1_SEL>,
+ <&topckgen CLK_TOP_UART1>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
+ uart2: serial@11007000 {
+ compatible = "mediatek,mt8516-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11007000 0 0x1000>;
+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UART2_SEL>,
+ <&topckgen CLK_TOP_UART2>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
+ i2c0: i2c@11009000 {
+ compatible = "mediatek,mt8516-i2c",
+ "mediatek,mt2712-i2c";
+ reg = <0 0x11009000 0 0x90>,
+ <0 0x11000180 0 0x80>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_AHB_INFRA_D2>,
+ <&infracfg CLK_IFR_I2C0_SEL>,
+ <&topckgen CLK_TOP_I2C0>,
+ <&topckgen CLK_TOP_APDMA>;
+ clock-names = "main-source",
+ "main-sel",
+ "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@1100a000 {
+ compatible = "mediatek,mt8516-i2c",
+ "mediatek,mt2712-i2c";
+ reg = <0 0x1100a000 0 0x90>,
+ <0 0x11000200 0 0x80>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_AHB_INFRA_D2>,
+ <&infracfg CLK_IFR_I2C1_SEL>,
+ <&topckgen CLK_TOP_I2C1>,
+ <&topckgen CLK_TOP_APDMA>;
+ clock-names = "main-source",
+ "main-sel",
+ "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@1100b000 {
+ compatible = "mediatek,mt8516-i2c",
+ "mediatek,mt2712-i2c";
+ reg = <0 0x1100b000 0 0x90>,
+ <0 0x11000280 0 0x80>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_AHB_INFRA_D2>,
+ <&infracfg CLK_IFR_I2C2_SEL>,
+ <&topckgen CLK_TOP_I2C2>,
+ <&topckgen CLK_TOP_APDMA>;
+ clock-names = "main-source",
+ "main-sel",
+ "main",
+ "dma";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi: spi@1100c000 {
+ compatible = "mediatek,mt8516-spi",
+ "mediatek,mt2712-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0 0x1100c000 0 0x1000>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_UNIVPLL_D12>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+ <&topckgen CLK_TOP_SPI>;
+ clock-names = "parent-clk", "sel-clk", "spi-clk";
+ status = "disabled";
+ };
+
+ mmc0: mmc@11120000 {
+ compatible = "mediatek,mt8516-mmc";
+ reg = <0 0x11120000 0 0x1000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_MSDC0>,
+ <&topckgen CLK_TOP_AHB_INFRA_SEL>,
+ <&topckgen CLK_TOP_MSDC0_INFRA>;
+ clock-names = "source", "hclk", "source_cg";
+ status = "disabled";
+ };
+
+ mmc1: mmc@11130000 {
+ compatible = "mediatek,mt8516-mmc";
+ reg = <0 0x11130000 0 0x1000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_MSDC1>,
+ <&topckgen CLK_TOP_AHB_INFRA_SEL>,
+ <&topckgen CLK_TOP_MSDC1_INFRA>;
+ clock-names = "source", "hclk", "source_cg";
+ status = "disabled";
+ };
+
+ mmc2: mmc@11170000 {
+ compatible = "mediatek,mt8516-mmc";
+ reg = <0 0x11170000 0 0x1000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_MSDC2>,
+ <&topckgen CLK_TOP_RG_MSDC2>,
+ <&topckgen CLK_TOP_MSDC2_INFRA>;
+ clock-names = "source", "hclk", "source_cg";
+ status = "disabled";
+ };
+
+ rng: rng@1020c000 {
+ compatible = "mediatek,mt8516-rng",
+ "mediatek,mt7623-rng";
+ reg = <0 0x1020c000 0 0x100>;
+ clocks = <&topckgen CLK_TOP_TRNG>;
+ clock-names = "rng";
+ };
+
+ pwm: pwm@11008000 {
+ compatible = "mediatek,mt8516-pwm";
+ reg = <0 0x11008000 0 0x1000>;
+ #pwm-cells = <2>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_PWM>,
+ <&topckgen CLK_TOP_PWM_B>,
+ <&topckgen CLK_TOP_PWM1_FB>,
+ <&topckgen CLK_TOP_PWM2_FB>,
+ <&topckgen CLK_TOP_PWM3_FB>,
+ <&topckgen CLK_TOP_PWM4_FB>,
+ <&topckgen CLK_TOP_PWM5_FB>;
+ clock-names = "top", "main", "pwm1", "pwm2", "pwm3",
+ "pwm4", "pwm5";
+ };
+
+ usb0: usb@11100000 {
+ compatible = "mediatek,mtk-musb";
+ reg = <0 0x11100000 0 0x1000>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "mc";
+ phys = <&usb0_port PHY_TYPE_USB2>;
+ clocks = <&topckgen CLK_TOP_USB>,
+ <&topckgen CLK_TOP_USBIF>,
+ <&topckgen CLK_TOP_USB_1P>;
+ clock-names = "main","mcu","univpll";
+ status = "disabled";
+ };
+
+ usb0_phy: usb@11110000 {
+ compatible = "mediatek,generic-tphy-v1";
+ reg = <0 0x11110000 0 0x800>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ usb0_port: usb-phy@11110800 {
+ reg = <0 0x11110800 0 0x100>;
+ clocks = <&topckgen CLK_TOP_USB_PHY48M>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
new file mode 100644
index 000000000000..a31093d7142b
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:921600n8";
+ };
+
+ firmware {
+ optee: optee@4fd00000 {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_default>;
+
+ volume-up {
+ gpios = <&pio 42 GPIO_ACTIVE_LOW>;
+ label = "volume_up";
+ linux,code = <115>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+
+ volume-down {
+ gpios = <&pio 43 GPIO_ACTIVE_LOW>;
+ label = "volume_down";
+ linux,code = <114>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+ };
+};
+
+&i2c0 {
+ clock-div = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ tca6416: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ rst-gpio = <&pio 65 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tca6416_pins>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ eint20_mux_sel0 {
+ gpio-hog;
+ gpios = <0 0>;
+ input;
+ line-name = "eint20_mux_sel0";
+ };
+
+ expcon_mux_sel1 {
+ gpio-hog;
+ gpios = <1 0>;
+ input;
+ line-name = "expcon_mux_sel1";
+ };
+
+ mrg_di_mux_sel2 {
+ gpio-hog;
+ gpios = <2 0>;
+ input;
+ line-name = "mrg_di_mux_sel2";
+ };
+
+ sd_sdio_mux_sel3 {
+ gpio-hog;
+ gpios = <3 0>;
+ input;
+ line-name = "sd_sdio_mux_sel3";
+ };
+
+ sd_sdio_mux_ctrl7 {
+ gpio-hog;
+ gpios = <7 0>;
+ output-low;
+ line-name = "sd_sdio_mux_ctrl7";
+ };
+
+ hw_id0 {
+ gpio-hog;
+ gpios = <8 0>;
+ input;
+ line-name = "hw_id0";
+ };
+
+ hw_id1 {
+ gpio-hog;
+ gpios = <9 0>;
+ input;
+ line-name = "hw_id1";
+ };
+
+ hw_id2 {
+ gpio-hog;
+ gpios = <10 0>;
+ input;
+ line-name = "hw_id2";
+ };
+
+ fg_int_n {
+ gpio-hog;
+ gpios = <11 0>;
+ input;
+ line-name = "fg_int_n";
+ };
+
+ usba_pwr_en {
+ gpio-hog;
+ gpios = <12 0>;
+ output-high;
+ line-name = "usba_pwr_en";
+ };
+
+ wifi_3v3_pg {
+ gpio-hog;
+ gpios = <13 0>;
+ input;
+ line-name = "wifi_3v3_pg";
+ };
+
+ cam_rst {
+ gpio-hog;
+ gpios = <14 0>;
+ output-low;
+ line-name = "cam_rst";
+ };
+
+ cam_pwdn {
+ gpio-hog;
+ gpios = <15 0>;
+ output-low;
+ line-name = "cam_pwdn";
+ };
+ };
+};
+
+&i2c2 {
+ clock-div = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_a>;
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+ dr_mode = "peripheral";
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ };
+};
+
+&usb0_phy {
+ status = "okay";
+};
+
+&pio {
+ gpio_keys_default: gpiodefault {
+ pins_cmd_dat {
+ pinmux = <MT8516_PIN_42_KPCOL0__FUNC_GPIO42>,
+ <MT8516_PIN_43_KPCOL1__FUNC_GPIO43>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+
+ i2c0_pins_a: i2c0@0 {
+ pins1 {
+ pinmux = <MT8516_PIN_58_SDA0__FUNC_SDA0_0>,
+ <MT8516_PIN_59_SCL0__FUNC_SCL0_0>;
+ bias-disable;
+ };
+ };
+
+ i2c2_pins_a: i2c2@0 {
+ pins1 {
+ pinmux = <MT8516_PIN_60_SDA2__FUNC_SDA2_0>,
+ <MT8516_PIN_61_SCL2__FUNC_SCL2_0>;
+ bias-disable;
+ };
+ };
+
+ tca6416_pins: pinmux_tca6416_pins {
+ gpio_mux_rst_n_pin {
+ pinmux = <MT8516_PIN_65_UTXD1__FUNC_GPIO65>;
+ output-high;
+ };
+
+ gpio_mux_int_n_pin {
+ pinmux = <MT8516_PIN_64_URXD1__FUNC_GPIO64>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index 6238e6e274b4..11a1bb428595 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
+#include <dt-bindings/soc/tegra-pmc.h>
/ {
compatible = "nvidia,tegra132", "nvidia,tegra124";
@@ -577,11 +578,12 @@
clock-names = "rtc";
};
- pmc@7000e400 {
+ tegra_pmc: pmc@7000e400 {
compatible = "nvidia,tegra124-pmc";
reg = <0x0 0x7000e400 0x0 0x400>;
clocks = <&tegra_car TEGRA124_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
+ #clock-cells = <1>;
};
fuse@7000f800 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index f1de4ff6230a..1af7f9ffb7b6 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -131,7 +131,7 @@
status = "okay";
lanes {
- usb2-0 {
+ micro_b: usb2-0 {
nvidia,function = "xusb";
status = "okay";
};
@@ -174,8 +174,20 @@
usb2-0 {
status = "okay";
mode = "otg";
-
vbus-supply = <&vdd_usb0>;
+
+ usb-role-switch;
+ connector {
+ compatible = "usb-b-connector",
+ "gpio-usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ vbus-gpio = <&gpio
+ TEGRA186_MAIN_GPIO(X, 7)
+ GPIO_ACTIVE_LOW>;
+ id-gpio = <&pmic 0 GPIO_ACTIVE_HIGH>;
+ };
+
};
usb2-1 {
@@ -201,12 +213,20 @@
phy-names = "usb2-0", "usb2-1", "usb3-0";
};
+ usb@3550000 {
+ status = "okay";
+
+ phys = <&micro_b>;
+ phy-names = "usb2-0";
+ };
+
i2c@c250000 {
/* carrier board ID EEPROM */
eeprom@57 {
compatible = "atmel,24c02";
reg = <0x57>;
+ vcc-supply = <&vdd_1v8>;
address-bits = <8>;
page-size = <8>;
size = <256>;
@@ -258,7 +278,7 @@
status = "okay";
avdd-io-hdmi-dp-supply = <&vdd_hdmi_1v05>;
- vdd-hdmi-dp-pll = <&vdd_1v8_ap>;
+ vdd-hdmi-dp-pll-supply = <&vdd_1v8_ap>;
nvidia,dpaux = <&dpaux>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index 947744d0f04c..da96de04d003 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -171,6 +171,7 @@
compatible = "atmel,24c02";
reg = <0x50>;
+ vcc-supply = <&vdd_1v8>;
address-bits = <8>;
page-size = <8>;
size = <256>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index c905527c26ef..58100fb9cd8b 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -572,6 +572,25 @@
nvidia,xusb-padctl = <&padctl>;
};
+ usb@3550000 {
+ compatible = "nvidia,tegra186-xudc";
+ reg = <0x0 0x03550000 0x0 0x8000>,
+ <0x0 0x03558000 0x0 0x1000>;
+ reg-names = "base", "fpci";
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&bpmp TEGRA186_CLK_XUSB_CORE_DEV>,
+ <&bpmp TEGRA186_CLK_XUSB_SS>,
+ <&bpmp TEGRA186_CLK_XUSB_CORE_SS>,
+ <&bpmp TEGRA186_CLK_XUSB_FS>;
+ clock-names = "dev", "ss", "ss_src", "fs_src";
+ iommus = <&smmu TEGRA186_SID_XUSB_DEV>;
+ power-domains = <&bpmp TEGRA186_POWER_DOMAIN_XUSBB>,
+ <&bpmp TEGRA186_POWER_DOMAIN_XUSBA>;
+ power-domain-names = "dev", "ss";
+ nvidia,xusb-padctl = <&padctl>;
+ status = "disabled";
+ };
+
fuse@3820000 {
compatible = "nvidia,tegra186-efuse";
reg = <0x0 0x03820000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index bdd33ff4e324..623f7d7d216b 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -71,6 +71,29 @@
vmmc-supply = <&vdd_emmc_3v3>;
};
+ padctl@3520000 {
+ avdd-usb-supply = <&vdd_usb_3v3>;
+ vclamp-usb-supply = <&vdd_1v8ao>;
+
+ ports {
+ usb2-1 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
+ usb2-3 {
+ vbus-supply = <&vdd_5v_sata>;
+ };
+
+ usb3-0 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+
+ usb3-3 {
+ vbus-supply = <&vdd_5v0_sys>;
+ };
+ };
+ };
+
rtc@c2a0000 {
status = "okay";
};
@@ -234,7 +257,7 @@
regulator-max-microvolt = <3300000>;
};
- ldo5 {
+ vdd_usb_3v3: ldo5 {
regulator-name = "VDD_USB_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -317,5 +340,16 @@
gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_HIGH>;
regulator-boot-on;
};
+
+ vdd_5v_sata: regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <4>;
+
+ regulator-name = "VDD_5V_SATA";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA194_MAIN_GPIO(Z, 1) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index 985e7d84f161..e15d1eac05f5 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -37,6 +37,69 @@
status = "okay";
};
+ padctl@3520000 {
+ status = "okay";
+
+ pads {
+ usb2 {
+ lanes {
+ usb2-1 {
+ status = "okay";
+ };
+
+ usb2-3 {
+ status = "okay";
+ };
+ };
+ };
+
+ usb3 {
+ lanes {
+ usb3-0 {
+ status = "okay";
+ };
+
+ usb3-3 {
+ status = "okay";
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-1 {
+ mode = "host";
+ status = "okay";
+ };
+
+ usb2-3 {
+ mode = "host";
+ status = "okay";
+ };
+
+ usb3-0 {
+ nvidia,usb2-companion = <1>;
+ status = "okay";
+ };
+
+ usb3-3 {
+ nvidia,usb2-companion = <3>;
+ maximum-speed = "super-speed";
+ status = "okay";
+ };
+ };
+ };
+
+ usb@3610000 {
+ status = "okay";
+
+ phys = <&{/cbb@0/padctl@3520000/pads/usb2/lanes/usb2-1}>,
+ <&{/cbb@0/padctl@3520000/pads/usb2/lanes/usb2-3}>,
+ <&{/cbb@0/padctl@3520000/pads/usb3/lanes/usb3-0}>,
+ <&{/cbb@0/padctl@3520000/pads/usb3/lanes/usb3-3}>;
+ phy-names = "usb2-1", "usb2-3", "usb3-0", "usb3-3";
+ };
+
pwm@c340000 {
status = "okay";
};
@@ -136,6 +199,24 @@
"p2u-5", "p2u-6", "p2u-7";
};
+ pcie_ep@141a0000 {
+ status = "disabled";
+
+ vddio-pex-ctl-supply = <&vdd_1v8ao>;
+
+ reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>;
+
+ nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5)
+ GPIO_ACTIVE_HIGH>;
+
+ phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
+ <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>,
+ <&p2u_nvhs_6>, <&p2u_nvhs_7>;
+
+ phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4",
+ "p2u-5", "p2u-6", "p2u-7";
+ };
+
fan: fan {
compatible = "pwm-fan";
pwms = <&pwm4 0 45334>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index ccac43be12ac..f4ede86e32b4 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -537,6 +537,145 @@
status = "disabled";
};
+ xusb_padctl: padctl@3520000 {
+ compatible = "nvidia,tegra194-xusb-padctl";
+ reg = <0x03520000 0x1000>,
+ <0x03540000 0x1000>;
+ reg-names = "padctl", "ao";
+
+ resets = <&bpmp TEGRA194_RESET_XUSB_PADCTL>;
+ reset-names = "padctl";
+
+ status = "disabled";
+
+ pads {
+ usb2 {
+ clocks = <&bpmp TEGRA194_CLK_USB2_TRK>;
+ clock-names = "trk";
+
+ lanes {
+ usb2-0 {
+ nvidia,function = "xusb";
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb2-1 {
+ nvidia,function = "xusb";
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb2-2 {
+ nvidia,function = "xusb";
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb2-3 {
+ nvidia,function = "xusb";
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+
+ usb3 {
+ lanes {
+ usb3-0 {
+ nvidia,function = "xusb";
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb3-1 {
+ nvidia,function = "xusb";
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb3-2 {
+ nvidia,function = "xusb";
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb3-3 {
+ nvidia,function = "xusb";
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-0 {
+ status = "disabled";
+ };
+
+ usb2-1 {
+ status = "disabled";
+ };
+
+ usb2-2 {
+ status = "disabled";
+ };
+
+ usb2-3 {
+ status = "disabled";
+ };
+
+ usb3-0 {
+ status = "disabled";
+ };
+
+ usb3-1 {
+ status = "disabled";
+ };
+
+ usb3-2 {
+ status = "disabled";
+ };
+
+ usb3-3 {
+ status = "disabled";
+ };
+ };
+ };
+
+ usb@3610000 {
+ compatible = "nvidia,tegra194-xusb";
+ reg = <0x03610000 0x40000>,
+ <0x03600000 0x10000>;
+ reg-names = "hcd", "fpci";
+
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&bpmp TEGRA194_CLK_XUSB_CORE_HOST>,
+ <&bpmp TEGRA194_CLK_XUSB_FALCON>,
+ <&bpmp TEGRA194_CLK_XUSB_CORE_SS>,
+ <&bpmp TEGRA194_CLK_XUSB_SS>,
+ <&bpmp TEGRA194_CLK_CLK_M>,
+ <&bpmp TEGRA194_CLK_XUSB_FS>,
+ <&bpmp TEGRA194_CLK_UTMIPLL>,
+ <&bpmp TEGRA194_CLK_CLK_M>,
+ <&bpmp TEGRA194_CLK_PLLE>;
+ clock-names = "xusb_host", "xusb_falcon_src",
+ "xusb_ss", "xusb_ss_src", "xusb_hs_src",
+ "xusb_fs_src", "pll_u_480m", "clk_m",
+ "pll_e";
+
+ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBC>,
+ <&bpmp TEGRA194_POWER_DOMAIN_XUSBA>;
+ power-domain-names = "xusb_host", "xusb_ss";
+
+ nvidia,xusb-padctl = <&xusb_padctl>;
+ status = "disabled";
+ };
+
fuse@3820000 {
compatible = "nvidia,tegra194-efuse";
reg = <0x03820000 0x10000>;
@@ -1208,7 +1347,7 @@
};
pcie@14100000 {
- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
+ compatible = "nvidia,tegra194-pcie";
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>;
reg = <0x00 0x14100000 0x0 0x00020000 /* appl registers (128K) */
0x00 0x30000000 0x0 0x00040000 /* configuration space (256K) */
@@ -1253,7 +1392,7 @@
};
pcie@14120000 {
- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
+ compatible = "nvidia,tegra194-pcie";
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>;
reg = <0x00 0x14120000 0x0 0x00020000 /* appl registers (128K) */
0x00 0x32000000 0x0 0x00040000 /* configuration space (256K) */
@@ -1298,7 +1437,7 @@
};
pcie@14140000 {
- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
+ compatible = "nvidia,tegra194-pcie";
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX1A>;
reg = <0x00 0x14140000 0x0 0x00020000 /* appl registers (128K) */
0x00 0x34000000 0x0 0x00040000 /* configuration space (256K) */
@@ -1343,7 +1482,7 @@
};
pcie@14160000 {
- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
+ compatible = "nvidia,tegra194-pcie";
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */
0x00 0x36000000 0x0 0x00040000 /* configuration space (256K) */
@@ -1388,7 +1527,7 @@
};
pcie@14180000 {
- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
+ compatible = "nvidia,tegra194-pcie";
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
0x00 0x38000000 0x0 0x00040000 /* configuration space (256K) */
@@ -1433,7 +1572,7 @@
};
pcie@141a0000 {
- compatible = "nvidia,tegra194-pcie", "snps,dw-pcie";
+ compatible = "nvidia,tegra194-pcie";
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */
@@ -1481,6 +1620,105 @@
0x82000000 0x0 0x40000000 0x1f 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */
};
+ pcie_ep@14160000 {
+ compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
+ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
+ reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */
+ 0x00 0x36040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
+ 0x00 0x36080000 0x0 0x00040000 /* DBI reg space (256K) */
+ 0x14 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
+ reg-names = "appl", "atu_dma", "dbi", "addr_space";
+
+ status = "disabled";
+
+ num-lanes = <4>;
+ num-ib-windows = <2>;
+ num-ob-windows = <8>;
+
+ clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_4>;
+ clock-names = "core";
+
+ resets = <&bpmp TEGRA194_RESET_PEX0_CORE_4_APB>,
+ <&bpmp TEGRA194_RESET_PEX0_CORE_4>;
+ reset-names = "apb", "core";
+
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "intr";
+
+ nvidia,bpmp = <&bpmp 4>;
+
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+ };
+
+ pcie_ep@14180000 {
+ compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
+ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
+ reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
+ 0x00 0x38040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
+ 0x00 0x38080000 0x0 0x00040000 /* DBI reg space (256K) */
+ 0x18 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
+ reg-names = "appl", "atu_dma", "dbi", "addr_space";
+
+ status = "disabled";
+
+ num-lanes = <8>;
+ num-ib-windows = <2>;
+ num-ob-windows = <8>;
+
+ clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>;
+ clock-names = "core";
+
+ resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>,
+ <&bpmp TEGRA194_RESET_PEX0_CORE_0>;
+ reset-names = "apb", "core";
+
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "intr";
+
+ nvidia,bpmp = <&bpmp 0>;
+
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+ };
+
+ pcie_ep@141a0000 {
+ compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
+ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
+ reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
+ 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
+ 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */
+ 0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */
+ reg-names = "appl", "atu_dma", "dbi", "addr_space";
+
+ status = "disabled";
+
+ num-lanes = <8>;
+ num-ib-windows = <2>;
+ num-ob-windows = <8>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&clkreq_c5_bi_dir_state>;
+
+ clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>;
+ clock-names = "core";
+
+ resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>,
+ <&bpmp TEGRA194_RESET_PEX1_CORE_5>;
+ reset-names = "apb", "core";
+
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
+ interrupt-names = "intr";
+
+ nvidia,bpmp = <&bpmp 5>;
+
+ nvidia,aspm-cmrt-us = <60>;
+ nvidia,aspm-pwr-on-t-us = <20>;
+ nvidia,aspm-l0s-entrance-latency-us = <3>;
+ };
+
sysram@40000000 {
compatible = "nvidia,tegra194-sysram", "mmio-sram";
reg = <0x0 0x40000000 0x0 0x50000>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index cb58f79deb48..f87d2437d11c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -265,11 +265,14 @@
};
i2c@7000c500 {
+ status = "okay";
+
/* module ID EEPROM */
eeprom@50 {
compatible = "atmel,24c02";
reg = <0x50>;
+ vcc-supply = <&vdd_1v8>;
address-bits = <8>;
page-size = <8>;
size = <256>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index a3cafe39ba4c..ea0e1efa6973 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -56,6 +56,7 @@
backlight: backlight@2c {
compatible = "ti,lp8557";
reg = <0x2c>;
+ power-supply = <&vdd_3v3_sys>;
dev-ctrl = /bits/ 8 <0x80>;
init-brt = /bits/ 8 <0xff>;
@@ -85,6 +86,7 @@
compatible = "atmel,24c02";
reg = <0x57>;
+ vcc-supply = <&vdd_1v8>;
address-bits = <8>;
page-size = <8>;
size = <256>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index b0095072bc28..313a4c29d37a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -5,6 +5,10 @@
model = "NVIDIA Tegra210 P2597 I/O board";
compatible = "nvidia,p2597", "nvidia,tegra210";
+ aliases {
+ ethernet = "/usb@70090000/ethernet@1";
+ };
+
host1x@50000000 {
dpaux@54040000 {
status = "okay";
@@ -1336,7 +1340,6 @@
<&{/padctl@7009f000/pads/pcie/lanes/pcie-5}>;
phy-names = "usb2-0", "usb2-1", "usb2-2", "usb2-3", "usb3-0",
"usb3-1";
-
dvddio-pex-supply = <&vdd_pex_1v05>;
hvddio-pex-supply = <&vdd_1v8>;
avdd-usb-supply = <&vdd_3v3_sys>;
@@ -1347,6 +1350,13 @@
hvdd-usb-ss-pll-e-supply = <&vdd_1v8>;
status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet@1 {
+ reg = <1>;
+ };
};
padctl@7009f000 {
@@ -1362,7 +1372,7 @@
status = "okay";
lanes {
- usb2-0 {
+ micro_b: usb2-0 {
nvidia,function = "xusb";
status = "okay";
};
@@ -1440,7 +1450,19 @@
ports {
usb2-0 {
status = "okay";
+ vbus-supply = <&vdd_usb_vbus_otg>;
mode = "otg";
+
+ usb-role-switch;
+ connector {
+ compatible = "usb-b-connector",
+ "gpio-usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ vbus-gpio = <&gpio TEGRA_GPIO(Z, 0)
+ GPIO_ACTIVE_LOW>;
+ id-gpio = <&pmic 0 0>;
+ };
};
usb2-1 {
@@ -1483,6 +1505,14 @@
vmmc-supply = <&vdd_3v3_sd>;
};
+ usb@700d0000 {
+ status = "okay";
+ phys = <&micro_b>;
+ phy-names = "usb2-0";
+ avddio-usb-supply = <&vdd_3v3_sys>;
+ hvdd-usb-supply = <&vdd_1v8>;
+ };
+
regulators {
compatible = "simple-bus";
#address-cells = <1>;
@@ -1606,6 +1636,17 @@
vin-supply = <&vdd_5v0_sys>;
};
+ vdd_usb_vbus_otg: regulator@11 {
+ compatible = "regulator-fixed";
+ reg = <9>;
+ regulator-name = "USB_VBUS_EN0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
vdd_hdmi: regulator@10 {
compatible = "regulator-fixed";
reg = <10>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
index 9101d3a39cd2..21ed1756b889 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
@@ -114,6 +114,7 @@
compatible = "atmel,24c02";
reg = <0x50>;
+ vcc-supply = <&vdd_1v8>;
address-bits = <8>;
page-size = <8>;
size = <256>;
@@ -124,6 +125,7 @@
compatible = "atmel,24c02";
reg = <0x57>;
+ vcc-supply = <&vdd_1v8>;
address-bits = <8>;
page-size = <8>;
size = <256>;
@@ -443,7 +445,7 @@
status = "okay";
lanes {
- usb2-0 {
+ micro_b: usb2-0 {
nvidia,function = "xusb";
status = "okay";
};
@@ -505,7 +507,17 @@
ports {
usb2-0 {
status = "okay";
- mode = "otg";
+ mode = "peripheral";
+
+ usb-role-switch;
+ connector {
+ compatible = "usb-b-connector",
+ "gpio-usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ vbus-gpio = <&gpio TEGRA_GPIO(CC, 4)
+ GPIO_ACTIVE_LOW>;
+ };
};
usb2-1 {
@@ -536,6 +548,14 @@
vmmc-supply = <&vdd_3v3_sd>;
};
+ usb@700d0000 {
+ status = "okay";
+ phys = <&micro_b>;
+ phy-names = "usb2-0";
+ avddio-usb-supply = <&vdd_3v3_sys>;
+ hvdd-usb-supply = <&vdd_1v8>;
+ };
+
sdhci@700b0400 {
status = "okay";
bus-width = <4>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
index 72c7a04ac1df..2faab6390552 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -1592,7 +1592,7 @@
reg = <0x1a>;
interrupt-parent = <&gpio>;
interrupts = <TEGRA_GPIO(E, 6) IRQ_TYPE_LEVEL_LOW>;
- clocks = <&tegra_car TEGRA210_CLK_CLK_OUT_2>;
+ clocks = <&tegra_pmc TEGRA_PMC_CLK_OUT_2>;
clock-names = "mclk";
nuvoton,jkdet-enable;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 48c63256ba7f..64c46ce3849d 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/reset/tegra210-car.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/tegra124-soctherm.h>
+#include <dt-bindings/soc/tegra-pmc.h>
/ {
compatible = "nvidia,tegra210";
@@ -770,16 +771,17 @@
compatible = "nvidia,tegra210-rtc", "nvidia,tegra20-rtc";
reg = <0x0 0x7000e000 0x0 0x100>;
interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-parent = <&pmc>;
+ interrupt-parent = <&tegra_pmc>;
clocks = <&tegra_car TEGRA210_CLK_RTC>;
clock-names = "rtc";
};
- pmc: pmc@7000e400 {
+ tegra_pmc: pmc@7000e400 {
compatible = "nvidia,tegra210-pmc";
reg = <0x0 0x7000e400 0x0 0x400>;
clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
+ #clock-cells = <1>;
#interrupt-cells = <2>;
interrupt-controller;
@@ -1207,6 +1209,25 @@
status = "disabled";
};
+ usb@700d0000 {
+ compatible = "nvidia,tegra210-xudc";
+ reg = <0x0 0x700d0000 0x0 0x8000>,
+ <0x0 0x700d8000 0x0 0x1000>,
+ <0x0 0x700d9000 0x0 0x1000>;
+ reg-names = "base", "fpci", "ipfs";
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_XUSB_DEV>,
+ <&tegra_car TEGRA210_CLK_XUSB_SS>,
+ <&tegra_car TEGRA210_CLK_XUSB_SSP_SRC>,
+ <&tegra_car TEGRA210_CLK_XUSB_FS_SRC>,
+ <&tegra_car TEGRA210_CLK_XUSB_HS_SRC>;
+ clock-names = "dev", "ss", "ss_src", "fs_src", "hs_src";
+ power-domains = <&pd_xusbdev>, <&pd_xusbss>;
+ power-domain-names = "dev", "ss";
+ nvidia,xusb-padctl = <&padctl>;
+ status = "disabled";
+ };
+
mipi: mipi@700e3000 {
compatible = "nvidia,tegra210-mipi";
reg = <0x0 0x700e3000 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 973c0f079659..cc103f7020fd 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -2,6 +2,7 @@
dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-ifc6640.dtb
+dtb-$(CONFIG_ARCH_QCOM) += ipq6018-cp01-c1.dtb
dtb-$(CONFIG_ARCH_QCOM) += ipq8074-hk01.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8916-longcheer-l8150.dtb
@@ -22,5 +23,6 @@ dtb-$(CONFIG_ARCH_QCOM) += sdm845-db845c.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm850-lenovo-yoga-c630.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8150-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8250-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-1000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-4000.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 037e26b3f8d5..06aab44d798c 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -562,7 +562,6 @@
&smd_rpm_regulators {
vdd_l1_l2_l3-supply = <&pm8916_s3>;
- vdd_l5-supply = <&pm8916_s3>;
vdd_l4_l5_l6-supply = <&pm8916_s4>;
vdd_l7-supply = <&pm8916_s4>;
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index fff6115f2670..af87350b5547 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -999,13 +999,7 @@
vdda-phy-supply = <&vreg_l28a_0p925>;
vdda-pll-supply = <&vreg_l12a_1p8>;
-
- vdda-phy-max-microamp = <18380>;
- vdda-pll-max-microamp = <9440>;
-
vddp-ref-clk-supply = <&vreg_l25a_1p2>;
- vddp-ref-clk-max-microamp = <100>;
- vddp-ref-clk-always-on;
};
&ufshc {
diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
new file mode 100644
index 000000000000..b31117a93995
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * IPQ6018 CP01 board device tree source
+ *
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "ipq6018.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. IPQ6018/AP-CP01-C1";
+ compatible = "qcom,ipq6018-cp01", "qcom,ipq6018";
+
+ aliases {
+ serial0 = &blsp1_uart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ bootargs-append = " swiotlb=1";
+ };
+};
+
+&blsp1_uart3 {
+ pinctrl-0 = <&serial_3_pins>;
+ pinctrl-names = "default";
+ status = "ok";
+};
+
+&i2c_1 {
+ pinctrl-0 = <&i2c_1_pins>;
+ pinctrl-names = "default";
+ status = "ok";
+};
+
+&spi_0 {
+ cs-select = <0>;
+ status = "ok";
+
+ m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+ compatible = "n25q128a11";
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&tlmm {
+ i2c_1_pins: i2c-1-pins {
+ pins = "gpio42", "gpio43";
+ function = "blsp2_i2c";
+ drive-strength = <8>;
+ };
+
+ spi_0_pins: spi-0-pins {
+ pins = "gpio38", "gpio39", "gpio40", "gpio41";
+ function = "blsp0_spi";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
new file mode 100644
index 000000000000..1aa8d8579463
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * IPQ6018 SoC device tree source
+ *
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-ipq6018.h>
+#include <dt-bindings/reset/qcom,gcc-ipq6018.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&intc>;
+
+ clocks {
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+
+ xo: xo {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ cpus: cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x1>;
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x2>;
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x3>;
+ next-level-cache = <&L2_0>;
+ };
+
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <0x2>;
+ };
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm";
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x80>;
+ #hwlock-cells = <1>;
+ };
+
+ pmuv8: pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ psci: psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ tz: tz@48500000 {
+ reg = <0x0 0x48500000 0x0 0x00200000>;
+ no-map;
+ };
+
+ smem_region: memory@4aa00000 {
+ reg = <0x0 0x4aa00000 0x0 0x00100000>;
+ no-map;
+ };
+
+ q6_region: memory@4ab00000 {
+ reg = <0x0 0x4ab00000 0x0 0x02800000>;
+ no-map;
+ };
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_region>;
+ hwlocks = <&tcsr_mutex 0>;
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ dma-ranges;
+ compatible = "simple-bus";
+
+ prng: qrng@e1000 {
+ compatible = "qcom,prng-ee";
+ reg = <0xe3000 0x1000>;
+ clocks = <&gcc GCC_PRNG_AHB_CLK>;
+ clock-names = "core";
+ };
+
+ cryptobam: dma@704000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x00704000 0x20000>;
+ interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_CRYPTO_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <1>;
+ qcom,controlled-remotely = <1>;
+ qcom,config-pipe-trust-reg = <0>;
+ };
+
+ crypto: crypto@73a000 {
+ compatible = "qcom,crypto-v5.1";
+ reg = <0x0073a000 0x6000>;
+ clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
+ <&gcc GCC_CRYPTO_AXI_CLK>,
+ <&gcc GCC_CRYPTO_CLK>;
+ clock-names = "iface", "bus", "core";
+ dmas = <&cryptobam 2>, <&cryptobam 3>;
+ dma-names = "rx", "tx";
+ };
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,ipq6018-pinctrl";
+ reg = <0x01000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 80>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ serial_3_pins: serial3-pinmux {
+ pins = "gpio44", "gpio45";
+ function = "blsp2_uart";
+ drive-strength = <8>;
+ bias-pull-down;
+ };
+ };
+
+ gcc: gcc@1800000 {
+ compatible = "qcom,gcc-ipq6018";
+ reg = <0x01800000 0x80000>;
+ clocks = <&xo>, <&sleep_clk>;
+ clock-names = "xo", "sleep_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ tcsr_mutex_regs: syscon@1905000 {
+ compatible = "syscon";
+ reg = <0x01905000 0x8000>;
+ };
+
+ tcsr_q6: syscon@1945000 {
+ compatible = "syscon";
+ reg = <0x01945000 0xe000>;
+ };
+
+ blsp_dma: dma@7884000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x07884000 0x2b000>;
+ interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ };
+
+ blsp1_uart3: serial@78b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x078b1000 0x200>;
+ interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ spi_0: spi@78b5000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x078b5000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ spi-max-frequency = <50000000>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 12>, <&blsp_dma 13>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ spi_1: spi@78b6000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x078b6000 0x600>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ spi-max-frequency = <50000000>;
+ clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 14>, <&blsp_dma 15>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ i2c_0: i2c@78b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x078b6000 0x600>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ clock-frequency = <400000>;
+ dmas = <&blsp_dma 15>, <&blsp_dma 14>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ i2c_1: i2c@78b7000 { /* BLSP1 QUP2 */
+ compatible = "qcom,i2c-qup-v2.2.1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x078b7000 0x600>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ clock-frequency = <400000>;
+ dmas = <&blsp_dma 17>, <&blsp_dma 16>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ intc: interrupt-controller@b000000 {
+ compatible = "qcom,msm-qgic2";
+ interrupt-controller;
+ #interrupt-cells = <0x3>;
+ reg = <0x0b000000 0x1000>, /*GICD*/
+ <0x0b002000 0x1000>, /*GICC*/
+ <0x0b001000 0x1000>, /*GICH*/
+ <0x0b004000 0x1000>; /*GICV*/
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ watchdog@b017000 {
+ compatible = "qcom,kpss-wdt";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
+ reg = <0x0b017000 0x40>;
+ clocks = <&sleep_clk>;
+ timeout-sec = <10>;
+ };
+
+ apcs_glb: mailbox@b111000 {
+ compatible = "qcom,ipq8074-apcs-apps-global";
+ reg = <0x0b111000 0xc>;
+
+ #mbox-cells = <1>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ timer@b120000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0b120000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@b120000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b121000 0x1000>,
+ <0x0b122000 0x1000>;
+ };
+
+ frame@b123000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb123000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b124000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b124000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b125000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b125000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b126000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b126000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b127000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b127000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b128000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b128000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ q6v5_wcss: remoteproc@cd00000 {
+ compatible = "qcom,ipq8074-wcss-pil";
+ reg = <0x0cd00000 0x4040>,
+ <0x004ab000 0x20>;
+ reg-names = "qdsp6",
+ "rmb";
+ interrupts-extended = <&intc GIC_SPI 325 IRQ_TYPE_EDGE_RISING>,
+ <&wcss_smp2p_in 0 0>,
+ <&wcss_smp2p_in 1 0>,
+ <&wcss_smp2p_in 2 0>,
+ <&wcss_smp2p_in 3 0>;
+ interrupt-names = "wdog",
+ "fatal",
+ "ready",
+ "handover",
+ "stop-ack";
+
+ resets = <&gcc GCC_WCSSAON_RESET>,
+ <&gcc GCC_WCSS_BCR>,
+ <&gcc GCC_WCSS_Q6_BCR>;
+
+ reset-names = "wcss_aon_reset",
+ "wcss_reset",
+ "wcss_q6_reset";
+
+ clocks = <&gcc GCC_PRNG_AHB_CLK>;
+ clock-names = "prng";
+
+ qcom,halt-regs = <&tcsr_q6 0xa000 0xd000 0x0>;
+
+ qcom,smem-states = <&wcss_smp2p_out 0>,
+ <&wcss_smp2p_out 1>;
+ qcom,smem-state-names = "shutdown",
+ "stop";
+
+ memory-region = <&q6_region>;
+
+ glink-edge {
+ interrupts = <GIC_SPI 321 IRQ_TYPE_EDGE_RISING>;
+ qcom,remote-pid = <1>;
+ mboxes = <&apcs_glb 8>;
+
+ qrtr_requests {
+ qcom,glink-channels = "IPCRTR";
+ };
+ };
+ };
+
+ };
+
+ wcss: wcss-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 322 IRQ_TYPE_EDGE_RISING>;
+
+ mboxes = <&apcs_glb 9>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ wcss_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ wcss_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 67ee5f560104..2b31823d3ccd 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -21,6 +21,7 @@
reg = <0x1000000 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&tlmm 0 0 70>;
#gpio-cells = <0x2>;
interrupt-controller;
#interrupt-cells = <0x2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
index bd1eb3eeca53..43c5e0f882f1 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
@@ -15,6 +15,14 @@
stdout-path = "serial0";
};
+ reserved-memory {
+ /* Additional memory used by Samsung firmware modifications */
+ tz-apps@85500000 {
+ reg = <0x0 0x85500000 0x0 0xb00000>;
+ no-map;
+ };
+ };
+
soc {
sdhci@7824000 {
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 9f31064f2374..a88a15f2352b 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -423,6 +423,7 @@
reg = <0x1000000 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&msmgpio 0 0 122>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -860,7 +861,7 @@
};
tsens: thermal-sensor@4a9000 {
- compatible = "qcom,msm8916-tsens";
+ compatible = "qcom,msm8916-tsens", "qcom,tsens-v0_1";
reg = <0x4a9000 0x1000>, /* TM */
<0x4a8000 0x1000>; /* SROT */
nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
@@ -1129,6 +1130,20 @@
qcom,remote-pid = <1>;
label = "hexagon";
+
+ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,smd-channels = "fastrpcsmd-apps-dsp";
+ label = "adsp";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cb@1{
+ compatible = "qcom,fastrpc-compute-cb";
+ reg = <1>;
+ };
+ };
};
};
@@ -1415,6 +1430,7 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ arm,coresight-loses-context-with-cpu;
cpu = <&CPU0>;
@@ -1433,6 +1449,7 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ arm,coresight-loses-context-with-cpu;
cpu = <&CPU1>;
@@ -1451,6 +1468,7 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ arm,coresight-loses-context-with-cpu;
cpu = <&CPU2>;
@@ -1469,6 +1487,7 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ arm,coresight-loses-context-with-cpu;
cpu = <&CPU3>;
diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
index 8be60c08a9ab..2021795c99ad 100644
--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
@@ -171,6 +171,7 @@
reg = <0xfd510000 0x4000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&msmgpio 0 0 146>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index 3932757f78b7..b1c2d7d6a0f2 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -133,6 +133,7 @@
reg = <0xfd510000 0x4000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&msmgpio 0 0 146>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 7ae082ea14ea..14827adebd94 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -443,10 +443,13 @@
#reset-cells = <1>;
#power-domain-cells = <1>;
reg = <0x00300000 0x90000>;
+
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>;
+ clock-names = "cxo2";
};
tsens0: thermal-sensor@4a9000 {
- compatible = "qcom,msm8996-tsens";
+ compatible = "qcom,msm8996-tsens", "qcom,tsens-v2";
reg = <0x004a9000 0x1000>, /* TM */
<0x004a8000 0x1000>; /* SROT */
#qcom,sensors = <13>;
@@ -457,7 +460,7 @@
};
tsens1: thermal-sensor@4ad000 {
- compatible = "qcom,msm8996-tsens";
+ compatible = "qcom,msm8996-tsens", "qcom,tsens-v2";
reg = <0x004ad000 0x1000>, /* TM */
<0x004ac000 0x1000>; /* SROT */
#qcom,sensors = <8>;
@@ -695,6 +698,7 @@
reg = <0x01010000 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
+ gpio-ranges = <&msmgpio 0 0 150>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -882,7 +886,7 @@
reg = <0x00624000 0x2500>;
interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
- phys = <&ufsphy>;
+ phys = <&ufsphy_lane>;
phy-names = "ufsphy";
power-domains = <&gcc UFS_GDSC>;
@@ -934,16 +938,25 @@
};
ufsphy: phy@627000 {
- compatible = "qcom,msm8996-ufs-phy-qmp-14nm";
- reg = <0x00627000 0xda8>;
- reg-names = "phy_mem";
- #phy-cells = <0>;
+ compatible = "qcom,msm8996-qmp-ufs-phy";
+ reg = <0x00627000 0x1c4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_UFS_CLKREF_CLK>;
+ clock-names = "ref";
- clock-names = "ref_clk_src", "ref_clk";
- clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
- <&gcc GCC_UFS_CLKREF_CLK>;
resets = <&ufshc 0>;
+ reset-names = "ufsphy";
status = "disabled";
+
+ ufsphy_lane: lanes@627400 {
+ reg = <0x627400 0x12c>,
+ <0x627600 0x200>,
+ <0x627c00 0x1b4>;
+ #phy-cells = <0>;
+ };
};
camss: camss@a00000 {
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index 0e0b9bc12945..8a14b2bf7bca 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -95,11 +95,15 @@
};
&funnel4 {
- status = "okay";
+ // FIXME: Figure out why clock late_initcall crashes the board with
+ // this enabled.
+ // status = "okay";
};
&funnel5 {
- status = "okay";
+ // FIXME: Figure out why clock late_initcall crashes the board with
+ // this enabled.
+ // status = "okay";
};
&pm8005_lsid1 {
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 91f7f2d07597..c07fee6fd7eb 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -130,7 +130,7 @@
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo280";
reg = <0x0 0x0>;
enable-method = "psci";
cpu-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
@@ -149,7 +149,7 @@
CPU1: cpu@1 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo280";
reg = <0x0 0x1>;
enable-method = "psci";
cpu-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
@@ -164,7 +164,7 @@
CPU2: cpu@2 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo280";
reg = <0x0 0x2>;
enable-method = "psci";
cpu-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
@@ -179,7 +179,7 @@
CPU3: cpu@3 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo280";
reg = <0x0 0x3>;
enable-method = "psci";
cpu-idle-states = <&LITTLE_CPU_SLEEP_0 &LITTLE_CPU_SLEEP_1>;
@@ -194,7 +194,7 @@
CPU4: cpu@100 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo280";
reg = <0x0 0x100>;
enable-method = "psci";
cpu-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
@@ -213,7 +213,7 @@
CPU5: cpu@101 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo280";
reg = <0x0 0x101>;
enable-method = "psci";
cpu-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
@@ -228,7 +228,7 @@
CPU6: cpu@102 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo280";
reg = <0x0 0x102>;
enable-method = "psci";
cpu-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
@@ -243,7 +243,7 @@
CPU7: cpu@103 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo280";
reg = <0x0 0x103>;
enable-method = "psci";
cpu-idle-states = <&BIG_CPU_SLEEP_0 &BIG_CPU_SLEEP_1>;
diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi
index 23534639f455..57af0b4a384d 100644
--- a/arch/arm64/boot/dts/qcom/pm6150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi
@@ -20,7 +20,7 @@
mode-bootloader = <0x2>;
mode-recovery = <0x1>;
- pwrkey {
+ pm6150_pwrkey: pwrkey {
compatible = "qcom,pm8941-pwrkey";
interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
debounce = <15625>;
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index dc2ce23cde05..67283d60e2ac 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -45,7 +45,7 @@
mode-bootloader = <0x2>;
mode-recovery = <0x1>;
- pwrkey {
+ pm8998_pwrkey: pwrkey {
compatible = "qcom,pm8941-pwrkey";
interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
debounce = <15625>;
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index 522d3ef72df5..afe69e8f3114 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -200,6 +200,7 @@
&sdcc1 {
status = "ok";
+ supports-cqe;
mmc-ddr-1_8v;
mmc-hs400-1_8v;
bus-width = <8>;
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index 4ee1e3d5f123..f149a538c1cc 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -685,9 +685,9 @@
};
sdcc1: sdcc@7804000 {
- compatible = "qcom,sdhci-msm-v5";
+ compatible = "qcom,qcs404-sdhci", "qcom,sdhci-msm-v5";
reg = <0x07804000 0x1000>, <0x7805000 0x1000>;
- reg-names = "hc_mem", "cmdq_mem";
+ reg-names = "hc", "cqhci";
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-idp.dts b/arch/arm64/boot/dts/qcom/sc7180-idp.dts
index 388f50ad4fde..043c9b9b5024 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-idp.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-idp.dts
@@ -7,6 +7,7 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "sc7180.dtsi"
#include "pm6150.dtsi"
@@ -17,6 +18,7 @@
compatible = "qcom,sc7180-idp", "qcom,sc7180";
aliases {
+ bluetooth0 = &bluetooth;
hsuart0 = &uart3;
serial0 = &uart8;
};
@@ -101,9 +103,9 @@
};
vreg_l12a_1p8: ldo12 {
- regulator-min-microvolt = <1696000>;
- regulator-max-microvolt = <1952000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13a_1p8: ldo13 {
@@ -143,9 +145,9 @@
};
vreg_l19a_2p9: ldo19 {
- regulator-min-microvolt = <2696000>;
- regulator-max-microvolt = <3304000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
};
@@ -189,9 +191,9 @@
};
vreg_l6c_2p9: ldo6 {
- regulator-min-microvolt = <2696000>;
- regulator-max-microvolt = <3304000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c_3p0: ldo7 {
@@ -207,9 +209,9 @@
};
vreg_l9c_2p9: ldo9 {
- regulator-min-microvolt = <2952000>;
- regulator-max-microvolt = <3304000>;
- regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l10c_3p3: ldo10 {
@@ -254,8 +256,40 @@
status = "okay";
};
+&sdhc_1 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_on>;
+ pinctrl-1 = <&sdc1_off>;
+ vmmc-supply = <&vreg_l19a_2p9>;
+ vqmmc-supply = <&vreg_l12a_1p8>;
+};
+
+&sdhc_2 {
+ status = "okay";
+
+ pinctrl-names = "default","sleep";
+ pinctrl-0 = <&sdc2_on>;
+ pinctrl-1 = <&sdc2_off>;
+ vmmc-supply = <&vreg_l9c_2p9>;
+ vqmmc-supply = <&vreg_l6c_2p9>;
+
+ cd-gpios = <&tlmm 69 GPIO_ACTIVE_LOW>;
+};
+
&uart3 {
status = "okay";
+
+ bluetooth: wcn3990-bt {
+ compatible = "qcom,wcn3990-bt";
+ vddio-supply = <&vreg_l10a_1p8>;
+ vddxo-supply = <&vreg_l1c_1p8>;
+ vddrf-supply = <&vreg_l2c_1p3>;
+ vddch0-supply = <&vreg_l10c_3p3>;
+ max-speed = <3200000>;
+ clocks = <&rpmhcc RPMH_RF_CLK2>;
+ };
};
&uart8 {
@@ -287,6 +321,12 @@
vdda-pll-supply = <&vreg_l4a_0p8>;
};
+&venus {
+ video-firmware {
+ iommus = <&apps_smmu 0x0c42 0x0>;
+ };
+};
+
/* PINCTRL - additions to nodes defined in sc7180.dtsi */
&qspi_clk {
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 8011c5fe2a31..998f101ad623 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -5,8 +5,11 @@
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dispcc-sc7180.h>
#include <dt-bindings/clock/qcom,gcc-sc7180.h>
+#include <dt-bindings/clock/qcom,gpucc-sc7180.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/clock/qcom,videocc-sc7180.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/phy/phy-qcom-qusb2.h>
#include <dt-bindings/power/qcom-aoss-qmp.h>
@@ -75,6 +78,11 @@
reg = <0x0 0x80900000 0x0 0x200000>;
no-map;
};
+
+ venus_mem: memory@8f600000 {
+ reg = <0 0x8f600000 0 0x500000>;
+ no-map;
+ };
};
cpus {
@@ -86,6 +94,8 @@
compatible = "arm,armv8";
reg = <0x0 0x0>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
next-level-cache = <&L2_0>;
#cooling-cells = <2>;
qcom,freq-domain = <&cpufreq_hw 0>;
@@ -103,6 +113,8 @@
compatible = "arm,armv8";
reg = <0x0 0x100>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
next-level-cache = <&L2_100>;
#cooling-cells = <2>;
qcom,freq-domain = <&cpufreq_hw 0>;
@@ -117,6 +129,8 @@
compatible = "arm,armv8";
reg = <0x0 0x200>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
next-level-cache = <&L2_200>;
#cooling-cells = <2>;
qcom,freq-domain = <&cpufreq_hw 0>;
@@ -131,6 +145,8 @@
compatible = "arm,armv8";
reg = <0x0 0x300>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
next-level-cache = <&L2_300>;
#cooling-cells = <2>;
qcom,freq-domain = <&cpufreq_hw 0>;
@@ -145,6 +161,8 @@
compatible = "arm,armv8";
reg = <0x0 0x400>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
next-level-cache = <&L2_400>;
#cooling-cells = <2>;
qcom,freq-domain = <&cpufreq_hw 0>;
@@ -159,6 +177,8 @@
compatible = "arm,armv8";
reg = <0x0 0x500>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ dynamic-power-coefficient = <100>;
next-level-cache = <&L2_500>;
#cooling-cells = <2>;
qcom,freq-domain = <&cpufreq_hw 0>;
@@ -173,6 +193,8 @@
compatible = "arm,armv8";
reg = <0x0 0x600>;
enable-method = "psci";
+ capacity-dmips-mhz = <1740>;
+ dynamic-power-coefficient = <405>;
next-level-cache = <&L2_600>;
#cooling-cells = <2>;
qcom,freq-domain = <&cpufreq_hw 1>;
@@ -187,6 +209,8 @@
compatible = "arm,armv8";
reg = <0x0 0x700>;
enable-method = "psci";
+ capacity-dmips-mhz = <1740>;
+ dynamic-power-coefficient = <405>;
next-level-cache = <&L2_700>;
#cooling-cells = <2>;
qcom,freq-domain = <&cpufreq_hw 1>;
@@ -195,6 +219,42 @@
next-level-cache = <&L3_0>;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+
+ core2 {
+ cpu = <&CPU2>;
+ };
+
+ core3 {
+ cpu = <&CPU3>;
+ };
+
+ core4 {
+ cpu = <&CPU4>;
+ };
+
+ core5 {
+ cpu = <&CPU5>;
+ };
+
+ core6 {
+ cpu = <&CPU6>;
+ };
+
+ core7 {
+ cpu = <&CPU7>;
+ };
+ };
+ };
};
memory@80000000 {
@@ -299,7 +359,7 @@
method = "smc";
};
- soc: soc {
+ soc: soc@0 {
#address-cells = <2>;
#size-cells = <2>;
ranges = <0 0 0 0 0x10 0>;
@@ -310,8 +370,9 @@
compatible = "qcom,gcc-sc7180";
reg = <0 0x00100000 0 0x1f0000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
- <&rpmhcc RPMH_CXO_CLK_A>;
- clock-names = "bi_tcxo", "bi_tcxo_ao";
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
@@ -329,6 +390,33 @@
};
};
+ sdhc_1: sdhci@7c4000 {
+ compatible = "qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0 0x7c4000 0 0x1000>,
+ <0 0x07c5000 0 0x1000>;
+ reg-names = "hc", "cqhci";
+
+ iommus = <&apps_smmu 0x60 0x0>;
+ interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 644 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC1_APPS_CLK>,
+ <&gcc GCC_SDCC1_AHB_CLK>;
+ clock-names = "core", "iface";
+
+ bus-width = <8>;
+ non-removable;
+ supports-cqe;
+
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+
+ status = "disabled";
+ };
+
qupv3_id_0: geniqup@8c0000 {
compatible = "qcom,geni-se-qup";
reg = <0 0x008c0000 0 0x6000>;
@@ -338,6 +426,7 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ iommus = <&apps_smmu 0x43 0x0>;
status = "disabled";
i2c0: i2c@880000 {
@@ -546,6 +635,7 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ iommus = <&apps_smmu 0x4c3 0x0>;
status = "disabled";
i2c6: i2c@a80000 {
@@ -745,6 +835,69 @@
};
};
+ config_noc: interconnect@1500000 {
+ compatible = "qcom,sc7180-config-noc";
+ reg = <0 0x01500000 0 0x28000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@1620000 {
+ compatible = "qcom,sc7180-system-noc";
+ reg = <0 0x01620000 0 0x17080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mc_virt: interconnect@1638000 {
+ compatible = "qcom,sc7180-mc-virt";
+ reg = <0 0x01638000 0 0x1000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ qup_virt: interconnect@1650000 {
+ compatible = "qcom,sc7180-qup-virt";
+ reg = <0 0x01650000 0 0x1000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre1_noc: interconnect@16e0000 {
+ compatible = "qcom,sc7180-aggre1-noc";
+ reg = <0 0x016e0000 0 0x15080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre2_noc: interconnect@1705000 {
+ compatible = "qcom,sc7180-aggre2-noc";
+ reg = <0 0x01705000 0 0x9000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ compute_noc: interconnect@170e000 {
+ compatible = "qcom,sc7180-compute-noc";
+ reg = <0 0x0170e000 0 0x6000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@1740000 {
+ compatible = "qcom,sc7180-mmss-noc";
+ reg = <0 0x01740000 0 0x1c100>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ ipa_virt: interconnect@1e00000 {
+ compatible = "qcom,sc7180-ipa-virt";
+ reg = <0 0x01e00000 0 0x1000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
tcsr_mutex_regs: syscon@1f40000 {
compatible = "syscon";
reg = <0 0x01f40000 0 0x40000>;
@@ -1037,6 +1190,140 @@
function = "qup15";
};
};
+
+ sdc1_on: sdc1-on {
+ pinconf-clk {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-data {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-rclk {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc1_off: sdc1-off {
+ pinconf-clk {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-data {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-rclk {
+ pins = "sdc1_rclk";
+ bias-pull-down;
+ };
+ };
+
+ sdc2_on: sdc2-on {
+ pinconf-clk {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-data {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+
+ pinconf-sd-cd {
+ pins = "gpio69";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+
+ sdc2_off: sdc2-off {
+ pinconf-clk {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ pinconf-cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-data {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pinconf-sd-cd {
+ pins = "gpio69";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ };
+ };
+
+ sdhc_2: sdhci@8804000 {
+ compatible = "qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0 0x08804000 0 0x1000>;
+
+ iommus = <&apps_smmu 0x80 0>;
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC2_APPS_CLK>,
+ <&gcc GCC_SDCC2_AHB_CLK>;
+ clock-names = "core", "iface";
+
+ bus-width = <4>;
+
+ status = "disabled";
+ };
+
+ gpucc: clock-controller@5090000 {
+ compatible = "qcom,sc7180-gpucc";
+ reg = <0 0x05090000 0 0x9000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ clock-names = "bi_tcxo",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
};
qspi: spi@88dc000 {
@@ -1081,8 +1368,8 @@
<&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
clock-names = "aux", "cfg_ahb", "ref", "com_aux";
- resets = <&gcc GCC_USB3_DP_PHY_PRIM_BCR>,
- <&gcc GCC_USB3_PHY_PRIM_BCR>;
+ resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
reset-names = "phy", "common";
usb_1_ssphy: phy@88e9200 {
@@ -1100,6 +1387,13 @@
};
};
+ dc_noc: interconnect@9160000 {
+ compatible = "qcom,sc7180-dc-noc";
+ reg = <0 0x09160000 0 0x03200>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
system-cache-controller@9200000 {
compatible = "qcom,sc7180-llcc";
reg = <0 0x09200000 0 0x200000>, <0 0x09600000 0 0x50000>;
@@ -1107,6 +1401,20 @@
interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
};
+ gem_noc: interconnect@9680000 {
+ compatible = "qcom,sc7180-gem-noc";
+ reg = <0 0x09680000 0 0x3e200>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ npu_noc: interconnect@9990000 {
+ compatible = "qcom,sc7180-npu-noc";
+ reg = <0 0x09990000 0 0x1600>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
usb_1: usb@a6f8800 {
compatible = "qcom,sc7180-dwc3", "qcom,dwc3";
reg = <0 0x0a6f8800 0 0x400>;
@@ -1151,6 +1459,201 @@
};
};
+ venus: video-codec@aa00000 {
+ compatible = "qcom,sc7180-venus";
+ reg = <0 0x0aa00000 0 0xff000>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&videocc VENUS_GDSC>,
+ <&videocc VCODEC0_GDSC>;
+ power-domain-names = "venus", "vcodec0";
+ clocks = <&videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
+ <&videocc VIDEO_CC_VENUS_AHB_CLK>,
+ <&videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
+ <&videocc VIDEO_CC_VCODEC0_CORE_CLK>,
+ <&videocc VIDEO_CC_VCODEC0_AXI_CLK>;
+ clock-names = "core", "iface", "bus",
+ "vcodec0_core", "vcodec0_bus";
+ iommus = <&apps_smmu 0x0c00 0x60>;
+ memory-region = <&venus_mem>;
+
+ video-decoder {
+ compatible = "venus-decoder";
+ };
+
+ video-encoder {
+ compatible = "venus-encoder";
+ };
+ };
+
+ videocc: clock-controller@ab00000 {
+ compatible = "qcom,sc7180-videocc";
+ reg = <0 0x0ab00000 0 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "bi_tcxo";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ camnoc_virt: interconnect@ac00000 {
+ compatible = "qcom,sc7180-camnoc-virt";
+ reg = <0 0x0ac00000 0 0x1000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mdss: mdss@ae00000 {
+ compatible = "qcom,sc7180-mdss";
+ reg = <0 0x0ae00000 0 0x1000>;
+ reg-names = "mdss";
+
+ power-domains = <&dispcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_DISP_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>;
+ clock-names = "iface", "bus", "ahb", "core";
+
+ assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>;
+ assigned-clock-rates = <300000000>;
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ iommus = <&apps_smmu 0x800 0x2>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ status = "disabled";
+
+ mdp: mdp@ae01000 {
+ compatible = "qcom,sc7180-dpu";
+ reg = <0 0x0ae01000 0 0x8f000>,
+ <0 0x0aeb0000 0 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_ROT_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_LUT_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "iface", "rot", "lut", "core",
+ "vsync";
+ assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <300000000>,
+ <19200000>;
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+ };
+ };
+
+ dsi0: dsi@ae94000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ reg = <0 0x0ae94000 0 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC0_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&gcc GCC_DISP_HF_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ phys = <&dsi_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ };
+ };
+ };
+ };
+
+ dsi_phy: dsi-phy@ae94400 {
+ compatible = "qcom,dsi-phy-10nm";
+ reg = <0 0x0ae94400 0 0x200>,
+ <0 0x0ae94600 0 0x280>,
+ <0 0x0ae94a00 0 0x1e0>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+
+ status = "disabled";
+ };
+ };
+
+ dispcc: clock-controller@af00000 {
+ compatible = "qcom,sc7180-dispcc";
+ reg = <0 0x0af00000 0 0x200000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_DISP_GPLL0_CLK_SRC>,
+ <&dsi_phy 0>,
+ <&dsi_phy 1>,
+ <0>,
+ <0>;
+ clock-names = "bi_tcxo",
+ "gcc_disp_gpll0_clk_src",
+ "dsi0_phy_pll_out_byteclk",
+ "dsi0_phy_pll_out_dsiclk",
+ "dp_phy_pll_link_clk",
+ "dp_phy_pll_vco_div_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sc7180-pdc", "qcom,pdc";
reg = <0 0x0b220000 0 0x30000>;
@@ -1478,6 +1981,20 @@
};
};
};
+
+ apps_bcm_voter: bcm_voter {
+ compatible = "qcom,bcm-voter";
+ };
+ };
+
+ osm_l3: interconnect@18321000 {
+ compatible = "qcom,sc7180-osm-l3";
+ reg = <0 0x18321000 0 0x1400>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #interconnect-cells = <1>;
};
cpufreq_hw: cpufreq@18323000 {
@@ -1953,6 +2470,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ aoss0_crit: aoss0_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2008,6 +2531,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ gpuss0_crit: gpuss0_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2023,6 +2552,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ gpuss1_crit: gpuss1_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2038,6 +2573,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ aoss1_crit: aoss1_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2053,6 +2594,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ cwlan_crit: cwlan_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2068,6 +2615,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ audio_crit: audio_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2083,6 +2636,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ ddr_crit: ddr_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2098,6 +2657,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ q6_hvx_crit: q6_hvx_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2113,6 +2678,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ camera_crit: camera_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2128,6 +2699,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ mdm_crit: mdm_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2143,6 +2720,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ mdm_dsp_crit: mdm_dsp_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2158,6 +2741,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ npu_crit: npu_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
@@ -2173,6 +2762,12 @@
hysteresis = <2000>;
type = "hot";
};
+
+ video_crit: video_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
index 7b53b3c7ffe6..9070be43a309 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
@@ -614,6 +614,11 @@ ap_ts_i2c: &i2c14 {
};
};
+&ipa {
+ status = "okay";
+ modem-init;
+};
+
&lpasscc {
status = "okay";
};
@@ -626,6 +631,10 @@ ap_ts_i2c: &i2c14 {
status = "okay";
};
+&pm8998_pwrkey {
+ status = "disabled";
+};
+
&qupv3_id_0 {
status = "okay";
};
@@ -1292,3 +1301,9 @@ ap_ts_i2c: &i2c14 {
};
};
};
+
+&venus {
+ video-firmware {
+ iommus = <&apps_smmu 0x10b2 0x0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index eb77aaa6a819..a2e05926b429 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -8,6 +8,8 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
#include "sdm845.dtsi"
#include "pm8998.dtsi"
#include "pmi8998.dtsi"
@@ -359,11 +361,56 @@
};
};
+&i2c11 {
+ /* On Low speed expansion */
+ label = "LS-I2C1";
+ status = "okay";
+};
+
+&i2c14 {
+ /* On Low speed expansion */
+ label = "LS-I2C0";
+ status = "okay";
+};
+
&mss_pil {
status = "okay";
firmware-name = "qcom/sdm845/mba.mbn", "qcom/sdm845/modem.mbn";
};
+&pcie0 {
+ status = "okay";
+ perst-gpio = <&tlmm 35 GPIO_ACTIVE_LOW>;
+ enable-gpio = <&tlmm 134 GPIO_ACTIVE_HIGH>;
+
+ vddpe-3v3-supply = <&pcie0_3p3v_dual>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_default_state>;
+};
+
+&pcie0_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l1a_0p875>;
+ vdda-pll-supply = <&vreg_l26a_1p2>;
+};
+
+&pcie1 {
+ status = "okay";
+ perst-gpio = <&tlmm 102 GPIO_ACTIVE_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_default_state>;
+};
+
+&pcie1_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l1a_0p875>;
+ vdda-pll-supply = <&vreg_l26a_1p2>;
+};
+
&pm8998_gpio {
vol_up_pin_a: vol-up-active {
pins = "gpio6";
@@ -384,6 +431,37 @@
};
};
+/* QUAT I2S Uses 4 I2S SD Lines for audio on LT9611 HDMI Bridge */
+&q6afedai {
+ qi2s@22 {
+ reg = <22>;
+ qcom,sd-lines = <0 1 2 3>;
+ };
+};
+
+&q6asmdai {
+ dai@0 {
+ reg = <0>;
+ direction = <2>;
+ };
+
+ dai@1 {
+ reg = <1>;
+ direction = <2>;
+ };
+
+ dai@2 {
+ reg = <2>;
+ direction = <1>;
+ };
+
+ dai@3 {
+ reg = <3>;
+ direction = <2>;
+ is-compress-dai;
+ };
+};
+
&qupv3_id_0 {
status = "okay";
};
@@ -405,7 +483,121 @@
cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
};
+&sound {
+ compatible = "qcom,db845c-sndcard";
+ pinctrl-0 = <&quat_mi2s_active
+ &quat_mi2s_sd0_active
+ &quat_mi2s_sd1_active
+ &quat_mi2s_sd2_active
+ &quat_mi2s_sd3_active>;
+ pinctrl-names = "default";
+ model = "DB845c";
+ audio-routing =
+ "RX_BIAS", "MCLK",
+ "AMIC1", "MIC BIAS1",
+ "AMIC2", "MIC BIAS2",
+ "DMIC0", "MIC BIAS1",
+ "DMIC1", "MIC BIAS1",
+ "DMIC2", "MIC BIAS3",
+ "DMIC3", "MIC BIAS3",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT",
+ "MM_DL1", "MultiMedia1 Playback",
+ "MM_DL2", "MultiMedia2 Playback",
+ "MM_DL4", "MultiMedia4 Playback",
+ "MultiMedia3 Capture", "MM_UL3";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ mm3-dai-link {
+ link-name = "MultiMedia3";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+
+ mm4-dai-link {
+ link-name = "MultiMedia4";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA4>;
+ };
+ };
+
+ slim-dai-link {
+ link-name = "SLIM Playback";
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swm 0>, <&wcd9340 0>;
+ };
+ };
+
+ slimcap-dai-link {
+ link-name = "SLIM Capture";
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_TX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&wcd9340 1>;
+ };
+ };
+};
+
+&spi2 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+};
+
&tlmm {
+ pcie0_default_state: pcie0-default {
+ clkreq {
+ pins = "gpio36";
+ function = "pci_e0";
+ bias-pull-up;
+ };
+
+ reset-n {
+ pins = "gpio35";
+ function = "gpio";
+
+ drive-strength = <2>;
+ output-low;
+ bias-pull-down;
+ };
+
+ wake-n {
+ pins = "gpio37";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
pcie0_pwren_state: pcie0-pwren {
pins = "gpio90";
function = "gpio";
@@ -414,6 +606,39 @@
bias-disable;
};
+ pcie1_default_state: pcie1-default {
+ perst-n {
+ pins = "gpio102";
+ function = "gpio";
+
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ clkreq {
+ pins = "gpio103";
+ function = "pci_e1";
+ bias-pull-up;
+ };
+
+ wake-n {
+ pins = "gpio11";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ reset-n {
+ pins = "gpio75";
+ function = "gpio";
+
+ drive-strength = <16>;
+ bias-pull-up;
+ output-high;
+ };
+ };
+
sdc2_default_state: sdc2-default {
clk {
pins = "sdc2_clk";
@@ -444,6 +669,20 @@
function = "gpio";
bias-pull-up;
};
+
+ wcd_intr_default: wcd_intr_default {
+ pins = <54>;
+ function = "gpio";
+
+ input-enable;
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+};
+
+&uart3 {
+ label = "LS-UART0";
+ status = "disabled";
};
&uart6 {
@@ -461,6 +700,7 @@
};
&uart9 {
+ label = "LS-UART1";
status = "okay";
};
@@ -534,6 +774,39 @@
vdda-pll-supply = <&vreg_l26a_1p2>;
};
+&wcd9340{
+ pinctrl-0 = <&wcd_intr_default>;
+ pinctrl-names = "default";
+ clock-names = "extclk";
+ clocks = <&rpmhcc RPMH_LN_BB_CLK2>;
+ reset-gpios = <&tlmm 64 0>;
+ vdd-buck-supply = <&vreg_s4a_1p8>;
+ vdd-buck-sido-supply = <&vreg_s4a_1p8>;
+ vdd-tx-supply = <&vreg_s4a_1p8>;
+ vdd-rx-supply = <&vreg_s4a_1p8>;
+ vdd-io-supply = <&vreg_s4a_1p8>;
+
+ swm: swm@c85 {
+ left_spkr: wsa8810-left{
+ compatible = "sdw10217201000";
+ reg = <0 1>;
+ powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ #sound-dai-cells = <0>;
+ };
+
+ right_spkr: wsa8810-right{
+ compatible = "sdw10217201000";
+ powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+ reg = <0 2>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ #sound-dai-cells = <0>;
+ };
+ };
+};
+
&wifi {
status = "okay";
@@ -546,6 +819,16 @@
};
/* PINCTRL - additions to nodes defined in sdm845.dtsi */
+&qup_spi2_default {
+ drive-strength = <16>;
+};
+
+&qup_uart3_default{
+ pinmux {
+ pins = "gpio41", "gpio42", "gpio43", "gpio44";
+ function = "qup3";
+ };
+};
&qup_uart6_default {
pinmux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index 09ad37b0dd71..023e8b04c7f6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -50,6 +50,7 @@
&adsp_pas {
status = "okay";
+ firmware-name = "qcom/sdm845/adsp.mdt";
};
&apps_rsc {
@@ -350,6 +351,81 @@
&cdsp_pas {
status = "okay";
+ firmware-name = "qcom/sdm845/cdsp.mdt";
+};
+
+&dsi0 {
+ status = "okay";
+ vdda-supply = <&vdda_mipi_dsi0_1p2>;
+
+ qcom,dual-dsi-mode;
+ qcom,master-dsi;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&truly_in_0>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+
+ panel@0 {
+ compatible = "truly,nt35597-2K-display";
+ reg = <0>;
+ vdda-supply = <&vreg_l14a_1p88>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+ mode-gpios = <&tlmm 52 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ truly_in_0: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ truly_in_1: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+ };
+ };
+};
+
+&dsi0_phy {
+ status = "okay";
+ vdds-supply = <&vdda_mipi_dsi0_pll>;
+};
+
+&dsi1 {
+ status = "okay";
+ vdda-supply = <&vdda_mipi_dsi1_1p2>;
+
+ qcom,dual-dsi-mode;
+
+ ports {
+ port@1 {
+ endpoint {
+ remote-endpoint = <&truly_in_1>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+};
+
+&dsi1_phy {
+ status = "okay";
+ vdds-supply = <&vdda_mipi_dsi1_pll>;
};
&gcc {
@@ -372,6 +448,19 @@
clock-frequency = <400000>;
};
+&mdss {
+ status = "okay";
+};
+
+&mdss_mdp {
+ status = "okay";
+};
+
+&mss_pil {
+ status = "okay";
+ firmware-name = "qcom/sdm845/mba.mbn", "qcom/sdm845/modem.mbn";
+};
+
&qupv3_id_1 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index d42302b8889b..8f926b5234d4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -17,6 +17,7 @@
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/reset/qcom,sdm845-aoss.h>
#include <dt-bindings/reset/qcom,sdm845-pdc.h>
+#include <dt-bindings/soc/qcom,apr.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/thermal/thermal.h>
@@ -491,6 +492,57 @@
label = "lpass";
qcom,remote-pid = <2>;
mboxes = <&apss_shared 8>;
+
+ apr {
+ compatible = "qcom,apr-v2";
+ qcom,glink-channels = "apr_audio_svc";
+ qcom,apr-domain = <APR_DOMAIN_ADSP>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,intents = <512 20>;
+
+ apr-service@3 {
+ reg = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ };
+
+ q6afe: apr-service@4 {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ q6afedai: dais {
+ compatible = "qcom,q6afe-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ };
+ };
+
+ q6asm: apr-service@7 {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ q6asmdai: dais {
+ compatible = "qcom,q6asm-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ iommus = <&apps_smmu 0x1821 0x0>;
+ };
+ };
+
+ q6adm: apr-service@8 {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+ q6routing: routing {
+ compatible = "qcom,q6adm-routing";
+ #sound-dai-cells = <0>;
+ };
+ };
+ };
+
fastrpc {
compatible = "qcom,fastrpc";
qcom,glink-channels = "fastrpcglink-apps-dsp";
@@ -675,6 +727,17 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
smp2p-slpi {
@@ -1364,6 +1427,267 @@
interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
};
+ pcie0: pci@1c00000 {
+ compatible = "qcom,pcie-sdm845", "snps,dw-pcie";
+ reg = <0 0x01c00000 0 0x2000>,
+ <0 0x60000000 0 0xf1d>,
+ <0 0x60000f20 0 0xa8>,
+ <0 0x60100000 0 0x100000>;
+ reg-names = "parf", "dbi", "elbi", "config";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0xd00000>;
+
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 150 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 151 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 152 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>,
+ <&gcc GCC_PCIE_0_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>;
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "tbu";
+
+ iommus = <&apps_smmu 0x1c10 0xf>;
+ iommu-map = <0x0 &apps_smmu 0x1c10 0x1>,
+ <0x100 &apps_smmu 0x1c11 0x1>,
+ <0x200 &apps_smmu 0x1c12 0x1>,
+ <0x300 &apps_smmu 0x1c13 0x1>,
+ <0x400 &apps_smmu 0x1c14 0x1>,
+ <0x500 &apps_smmu 0x1c15 0x1>,
+ <0x600 &apps_smmu 0x1c16 0x1>,
+ <0x700 &apps_smmu 0x1c17 0x1>,
+ <0x800 &apps_smmu 0x1c18 0x1>,
+ <0x900 &apps_smmu 0x1c19 0x1>,
+ <0xa00 &apps_smmu 0x1c1a 0x1>,
+ <0xb00 &apps_smmu 0x1c1b 0x1>,
+ <0xc00 &apps_smmu 0x1c1c 0x1>,
+ <0xd00 &apps_smmu 0x1c1d 0x1>,
+ <0xe00 &apps_smmu 0x1c1e 0x1>,
+ <0xf00 &apps_smmu 0x1c1f 0x1>;
+
+ resets = <&gcc GCC_PCIE_0_BCR>;
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_0_GDSC>;
+
+ phys = <&pcie0_lane>;
+ phy-names = "pciephy";
+
+ status = "disabled";
+ };
+
+ pcie0_phy: phy@1c06000 {
+ compatible = "qcom,sdm845-qmp-pcie-phy";
+ reg = <0 0x01c06000 0 0x18c>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_CLKREF_CLK>,
+ <&gcc GCC_PCIE_PHY_REFGEN_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "refgen";
+
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "phy";
+
+ assigned-clocks = <&gcc GCC_PCIE_PHY_REFGEN_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+
+ pcie0_lane: lanes@1c06200 {
+ reg = <0 0x01c06200 0 0x128>,
+ <0 0x01c06400 0 0x1fc>,
+ <0 0x01c06800 0 0x218>,
+ <0 0x01c06600 0 0x70>;
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+
+ #phy-cells = <0>;
+ clock-output-names = "pcie_0_pipe_clk";
+ };
+ };
+
+ pcie1: pci@1c08000 {
+ compatible = "qcom,pcie-sdm845", "snps,dw-pcie";
+ reg = <0 0x01c08000 0 0x2000>,
+ <0 0x40000000 0 0xf1d>,
+ <0 0x40000f20 0 0xa8>,
+ <0 0x40100000 0 0x100000>;
+ reg-names = "parf", "dbi", "elbi", "config";
+ device_type = "pci";
+ linux,pci-domain = <1>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
+ <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+
+ interrupts = <GIC_SPI 307 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 434 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 435 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 438 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 439 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>,
+ <&gcc GCC_PCIE_1_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_1_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_PCIE_1_CLKREF_CLK>,
+ <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>;
+ clock-names = "pipe",
+ "aux",
+ "cfg",
+ "bus_master",
+ "bus_slave",
+ "slave_q2a",
+ "ref",
+ "tbu";
+
+ assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>;
+ assigned-clock-rates = <19200000>;
+
+ iommus = <&apps_smmu 0x1c00 0xf>;
+ iommu-map = <0x0 &apps_smmu 0x1c00 0x1>,
+ <0x100 &apps_smmu 0x1c01 0x1>,
+ <0x200 &apps_smmu 0x1c02 0x1>,
+ <0x300 &apps_smmu 0x1c03 0x1>,
+ <0x400 &apps_smmu 0x1c04 0x1>,
+ <0x500 &apps_smmu 0x1c05 0x1>,
+ <0x600 &apps_smmu 0x1c06 0x1>,
+ <0x700 &apps_smmu 0x1c07 0x1>,
+ <0x800 &apps_smmu 0x1c08 0x1>,
+ <0x900 &apps_smmu 0x1c09 0x1>,
+ <0xa00 &apps_smmu 0x1c0a 0x1>,
+ <0xb00 &apps_smmu 0x1c0b 0x1>,
+ <0xc00 &apps_smmu 0x1c0c 0x1>,
+ <0xd00 &apps_smmu 0x1c0d 0x1>,
+ <0xe00 &apps_smmu 0x1c0e 0x1>,
+ <0xf00 &apps_smmu 0x1c0f 0x1>;
+
+ resets = <&gcc GCC_PCIE_1_BCR>;
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_1_GDSC>;
+
+ phys = <&pcie1_lane>;
+ phy-names = "pciephy";
+
+ status = "disabled";
+ };
+
+ pcie1_phy: phy@1c0a000 {
+ compatible = "qcom,sdm845-qhp-pcie-phy";
+ reg = <0 0x01c0a000 0 0x800>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_CLKREF_CLK>,
+ <&gcc GCC_PCIE_PHY_REFGEN_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "refgen";
+
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "phy";
+
+ assigned-clocks = <&gcc GCC_PCIE_PHY_REFGEN_CLK>;
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+
+ pcie1_lane: lanes@1c06200 {
+ reg = <0 0x01c0a800 0 0x800>,
+ <0 0x01c0a800 0 0x800>,
+ <0 0x01c0b800 0 0x400>;
+ clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-names = "pipe0";
+
+ #phy-cells = <0>;
+ clock-output-names = "pcie_1_pipe_clk";
+ };
+ };
+
+ mem_noc: interconnect@1380000 {
+ compatible = "qcom,sdm845-mem-noc";
+ reg = <0 0x01380000 0 0x27200>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ dc_noc: interconnect@14e0000 {
+ compatible = "qcom,sdm845-dc-noc";
+ reg = <0 0x014e0000 0 0x400>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ config_noc: interconnect@1500000 {
+ compatible = "qcom,sdm845-config-noc";
+ reg = <0 0x01500000 0 0x5080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@1620000 {
+ compatible = "qcom,sdm845-system-noc";
+ reg = <0 0x01620000 0 0x18080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre1_noc: interconnect@16e0000 {
+ compatible = "qcom,sdm845-aggre1-noc";
+ reg = <0 0x016e0000 0 0x15080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre2_noc: interconnect@1700000 {
+ compatible = "qcom,sdm845-aggre2-noc";
+ reg = <0 0x01700000 0 0x1f300>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@1740000 {
+ compatible = "qcom,sdm845-mmss-noc";
+ reg = <0 0x01740000 0 0x1c100>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
ufs_mem_hc: ufshc@1d84000 {
compatible = "qcom,sdm845-ufshc", "qcom,ufshc",
"jedec,ufs-2.0";
@@ -1435,6 +1759,44 @@
};
};
+ ipa: ipa@1e40000 {
+ compatible = "qcom,sdm845-ipa";
+ reg = <0 0x1e40000 0 0x7000>,
+ <0 0x1e47000 0 0x2000>,
+ <0 0x1e04000 0 0x2c000>;
+ reg-names = "ipa-reg",
+ "ipa-shared",
+ "gsi";
+
+ interrupts-extended = <&intc 0 311 IRQ_TYPE_EDGE_RISING>,
+ <&intc 0 432 IRQ_TYPE_LEVEL_HIGH>,
+ <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ipa",
+ "gsi",
+ "ipa-clock-query",
+ "ipa-setup-ready";
+
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
+ clock-names = "core";
+
+ interconnects = <&aggre2_noc MASTER_IPA &mem_noc SLAVE_EBI1>,
+ <&aggre2_noc MASTER_IPA &system_noc SLAVE_IMEM>,
+ <&gladiator_noc MASTER_APPSS_PROC &config_noc SLAVE_IPA_CFG>;
+ interconnect-names = "memory",
+ "imem",
+ "config";
+
+ qcom,smem-states = <&ipa_smp2p_out 0>,
+ <&ipa_smp2p_out 1>;
+ qcom,smem-state-names = "ipa-clock-enabled-valid",
+ "ipa-clock-enabled";
+
+ modem-remoteproc = <&mss_pil>;
+
+ status = "disabled";
+ };
+
tcsr_mutex_regs: syscon@1f40000 {
compatible = "syscon";
reg = <0 0x01f40000 0 0x40000>;
@@ -1837,6 +2199,142 @@
function = "qup15";
};
};
+
+ quat_mi2s_sleep: quat_mi2s_sleep {
+ mux {
+ pins = "gpio58", "gpio59";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio58", "gpio59";
+ drive-strength = <2>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ quat_mi2s_active: quat_mi2s_active {
+ mux {
+ pins = "gpio58", "gpio59";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio58", "gpio59";
+ drive-strength = <8>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ quat_mi2s_sd0_sleep: quat_mi2s_sd0_sleep {
+ mux {
+ pins = "gpio60";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio60";
+ drive-strength = <2>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ quat_mi2s_sd0_active: quat_mi2s_sd0_active {
+ mux {
+ pins = "gpio60";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio60";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+
+ quat_mi2s_sd1_sleep: quat_mi2s_sd1_sleep {
+ mux {
+ pins = "gpio61";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <2>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ quat_mi2s_sd1_active: quat_mi2s_sd1_active {
+ mux {
+ pins = "gpio61";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+
+ quat_mi2s_sd2_sleep: quat_mi2s_sd2_sleep {
+ mux {
+ pins = "gpio62";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio62";
+ drive-strength = <2>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ quat_mi2s_sd2_active: quat_mi2s_sd2_active {
+ mux {
+ pins = "gpio62";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio62";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+
+ quat_mi2s_sd3_sleep: quat_mi2s_sd3_sleep {
+ mux {
+ pins = "gpio63";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio63";
+ drive-strength = <2>;
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ quat_mi2s_sd3_active: quat_mi2s_sd3_active {
+ mux {
+ pins = "gpio63";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio63";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
};
mss_pil: remoteproc@4080000 {
@@ -1903,8 +2401,12 @@
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
- clocks = <&rpmhcc RPMH_CXO_CLK>;
- clock-names = "xo";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_GPU_GPLL0_CLK_SRC>,
+ <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
+ clock-names = "bi_tcxo",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src";
};
stm@6002000 {
@@ -2386,6 +2888,87 @@
status = "disabled";
};
+ slim: slim@171c0000 {
+ compatible = "qcom,slim-ngd-v2.1.0";
+ reg = <0 0x171c0000 0 0x2c000>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
+
+ qcom,apps-ch-pipes = <0x780000>;
+ qcom,ea-pc = <0x270>;
+ status = "okay";
+ dmas = <&slimbam 3>, <&slimbam 4>,
+ <&slimbam 5>, <&slimbam 6>;
+ dma-names = "rx", "tx", "tx2", "rx2";
+
+ iommus = <&apps_smmu 0x1806 0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ngd@1 {
+ reg = <1>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wcd9340_ifd: ifd@0{
+ compatible = "slim217,250";
+ reg = <0 0>;
+ };
+
+ wcd9340: codec@1{
+ compatible = "slim217,250";
+ reg = <1 0>;
+ slim-ifc-dev = <&wcd9340_ifd>;
+
+ #sound-dai-cells = <1>;
+
+ interrupts-extended = <&tlmm 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ #clock-cells = <0>;
+ clock-frequency = <9600000>;
+ clock-output-names = "mclk";
+ qcom,micbias1-millivolt = <1800>;
+ qcom,micbias2-millivolt = <1800>;
+ qcom,micbias3-millivolt = <1800>;
+ qcom,micbias4-millivolt = <1800>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ wcdgpio: gpio-controller@42 {
+ compatible = "qcom,wcd9340-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x42 0x2>;
+ };
+
+ swm: swm@c85 {
+ compatible = "qcom,soundwire-v1.3.0";
+ reg = <0xc85 0x40>;
+ interrupts-extended = <&wcd9340 20>;
+
+ qcom,dout-ports = <6>;
+ qcom,din-ports = <2>;
+ qcom,ports-sinterval-low =/bits/ 8 <0x07 0x1F 0x3F 0x7 0x1F 0x3F 0x0F 0x0F>;
+ qcom,ports-offset1 = /bits/ 8 <0x01 0x02 0x0C 0x6 0x12 0x0D 0x07 0x0A >;
+ qcom,ports-offset2 = /bits/ 8 <0x00 0x00 0x1F 0x00 0x00 0x1F 0x00 0x00>;
+
+ #sound-dai-cells = <1>;
+ clocks = <&wcd9340>;
+ clock-names = "iface";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+
+ };
+ };
+ };
+ };
+
+ sound: sound {
+ };
+
usb_1_hsphy: phy@88e2000 {
compatible = "qcom,sdm845-qusb2-phy";
reg = <0 0x088e2000 0 0x400>;
@@ -2570,39 +3153,42 @@
};
};
- video-codec@aa00000 {
- compatible = "qcom,sdm845-venus";
+ venus: video-codec@aa00000 {
+ compatible = "qcom,sdm845-venus-v2";
reg = <0 0x0aa00000 0 0xff000>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
- power-domains = <&videocc VENUS_GDSC>;
+ power-domains = <&videocc VENUS_GDSC>,
+ <&videocc VCODEC0_GDSC>,
+ <&videocc VCODEC1_GDSC>;
+ power-domain-names = "venus", "vcodec0", "vcodec1";
clocks = <&videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
<&videocc VIDEO_CC_VENUS_AHB_CLK>,
- <&videocc VIDEO_CC_VENUS_CTL_AXI_CLK>;
- clock-names = "core", "iface", "bus";
+ <&videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
+ <&videocc VIDEO_CC_VCODEC0_CORE_CLK>,
+ <&videocc VIDEO_CC_VCODEC0_AXI_CLK>,
+ <&videocc VIDEO_CC_VCODEC1_CORE_CLK>,
+ <&videocc VIDEO_CC_VCODEC1_AXI_CLK>;
+ clock-names = "core", "iface", "bus",
+ "vcodec0_core", "vcodec0_bus",
+ "vcodec1_core", "vcodec1_bus";
iommus = <&apps_smmu 0x10a0 0x8>,
<&apps_smmu 0x10b0 0x0>;
memory-region = <&venus_mem>;
video-core0 {
compatible = "venus-decoder";
- clocks = <&videocc VIDEO_CC_VCODEC0_CORE_CLK>,
- <&videocc VIDEO_CC_VCODEC0_AXI_CLK>;
- clock-names = "core", "bus";
- power-domains = <&videocc VCODEC0_GDSC>;
};
video-core1 {
compatible = "venus-encoder";
- clocks = <&videocc VIDEO_CC_VCODEC1_CORE_CLK>,
- <&videocc VIDEO_CC_VCODEC1_AXI_CLK>;
- clock-names = "core", "bus";
- power-domains = <&videocc VCODEC1_GDSC>;
};
};
videocc: clock-controller@ab00000 {
compatible = "qcom,sdm845-videocc";
reg = <0 0x0ab00000 0 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "bi_tcxo";
#clock-cells = <1>;
#power-domain-cells = <1>;
#reset-cells = <1>;
@@ -2933,6 +3519,24 @@
dispcc: clock-controller@af00000 {
compatible = "qcom,sdm845-dispcc";
reg = <0 0x0af00000 0 0x10000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_DISP_GPLL0_CLK_SRC>,
+ <&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
+ <&dsi0_phy 0>,
+ <&dsi0_phy 1>,
+ <&dsi1_phy 0>,
+ <&dsi1_phy 1>,
+ <0>,
+ <0>;
+ clock-names = "bi_tcxo",
+ "gcc_disp_gpll0_clk_src",
+ "gcc_disp_gpll0_div_clk_src",
+ "dsi0_phy_pll_out_byteclk",
+ "dsi0_phy_pll_out_dsiclk",
+ "dsi1_phy_pll_out_byteclk",
+ "dsi1_phy_pll_out_dsiclk",
+ "dp_link_clk_divsel_ten",
+ "dp_vco_divided_clk_src_mux";
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
@@ -3098,6 +3702,13 @@
status = "disabled";
};
+ gladiator_noc: interconnect@17900000 {
+ compatible = "qcom,sdm845-gladiator-noc";
+ reg = <0 0x17900000 0 0xd080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
watchdog@17980000 {
compatible = "qcom,apss-wdt-sdm845", "qcom,kpss-wdt";
reg = <0 0x17980000 0 0x1000>;
@@ -3127,6 +3738,10 @@
<WAKE_TCS 3>,
<CONTROL_TCS 1>;
+ apps_bcm_voter: bcm-voter {
+ compatible = "qcom,bcm-voter";
+ };
+
rpmhcc: clock-controller {
compatible = "qcom,sdm845-rpmh-clk";
#clock-cells = <1>;
@@ -3183,11 +3798,6 @@
};
};
};
-
- rsc_hlos: interconnect {
- compatible = "qcom,sdm845-rsc-hlos";
- #interconnect-cells = <1>;
- };
};
intc: interrupt-controller@17a00000 {
@@ -3210,6 +3820,18 @@
};
};
+ slimbam: dma@17184000 {
+ compatible = "qcom,bam-v1.7.0";
+ qcom,controlled-remotely;
+ reg = <0 0x17184000 0 0x2a000>;
+ num-channels = <31>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ qcom,ee = <1>;
+ qcom,num-ees = <2>;
+ iommus = <&apps_smmu 0x1806 0x0>;
+ };
+
timer@17c90000 {
#address-cells = <2>;
#size-cells = <2>;
@@ -3268,6 +3890,16 @@
};
};
+ osm_l3: interconnect@17d41000 {
+ compatible = "qcom,sdm845-osm-l3";
+ reg = <0 0x17d41000 0 0x1400>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #interconnect-cells = <1>;
+ };
+
cpufreq_hw: cpufreq@17d43000 {
compatible = "qcom,cpufreq-hw";
reg = <0 0x17d43000 0 0x1400>, <0 0x17d45800 0 0x1400>;
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index b255be3a4a0a..3b617a75fafa 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -7,7 +7,10 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
#include "sdm845.dtsi"
#include "pm8998.dtsi"
@@ -353,6 +356,75 @@
status = "okay";
};
+&q6asmdai {
+ dai@0 {
+ reg = <0>;
+ direction = <2>;
+ };
+
+ dai@1 {
+ reg = <1>;
+ direction = <1>;
+ };
+};
+
+&sound {
+ compatible = "qcom,db845c-sndcard";
+ model = "Lenovo-YOGA-C630-13Q50";
+
+ audio-routing =
+ "RX_BIAS", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT",
+ "MM_DL1", "MultiMedia1 Playback",
+ "MultiMedia2 Capture", "MM_UL2";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ slim-dai-link {
+ link-name = "SLIM Playback";
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swm 0>, <&wcd9340 0>;
+ };
+ };
+
+ slimcap-dai-link {
+ link-name = "SLIM Capture";
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_TX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&wcd9340 1>;
+ };
+ };
+};
+
&tlmm {
gpio-reserved-ranges = <0 4>, <81 4>;
@@ -382,6 +454,15 @@
bias-pull-up;
drive-strength = <2>;
};
+
+ wcd_intr_default: wcd_intr_default {
+ pins = <54>;
+ function = "gpio";
+
+ input-enable;
+ bias-pull-down;
+ drive-strength = <2>;
+ };
};
&uart6 {
@@ -465,3 +546,36 @@
vdda-phy-supply = <&vdda_usb2_ss_1p2>;
vdda-pll-supply = <&vdda_usb2_ss_core>;
};
+
+&wcd9340{
+ pinctrl-0 = <&wcd_intr_default>;
+ pinctrl-names = "default";
+ clock-names = "extclk";
+ clocks = <&rpmhcc RPMH_LN_BB_CLK2>;
+ reset-gpios = <&tlmm 64 0>;
+ vdd-buck-supply = <&vreg_s4a_1p8>;
+ vdd-buck-sido-supply = <&vreg_s4a_1p8>;
+ vdd-tx-supply = <&vreg_s4a_1p8>;
+ vdd-rx-supply = <&vreg_s4a_1p8>;
+ vdd-io-supply = <&vreg_s4a_1p8>;
+
+ swm: swm@c85 {
+ left_spkr: wsa8810-left{
+ compatible = "sdw10217211000";
+ reg = <0 3>;
+ powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ #sound-dai-cells = <0>;
+ };
+
+ right_spkr: wsa8810-right{
+ compatible = "sdw10217211000";
+ powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>;
+ reg = <0 4>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ #sound-dai-cells = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
new file mode 100644
index 000000000000..224d0f1ea6f9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "sm8250.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SM8250 MTP";
+ compatible = "qcom,sm8250-mtp";
+
+ aliases {
+ serial0 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
new file mode 100644
index 000000000000..891d83b2afea
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo485";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo485";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo485";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo485";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo485";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo485";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo485";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo485";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ firmware {
+ scm: scm {
+ compatible = "qcom,scm";
+ #reset-cells = <1>;
+ };
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0x0 0x80000000 0x0 0x0>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ hyp_mem: memory@80000000 {
+ reg = <0x0 0x80000000 0x0 0x600000>;
+ no-map;
+ };
+
+ xbl_aop_mem: memory@80700000 {
+ reg = <0x0 0x80700000 0x0 0x160000>;
+ no-map;
+ };
+
+ cmd_db: memory@80860000 {
+ compatible = "qcom,cmd-db";
+ reg = <0x0 0x80860000 0x0 0x20000>;
+ no-map;
+ };
+
+ smem_mem: memory@80900000 {
+ reg = <0x0 0x80900000 0x0 0x200000>;
+ no-map;
+ };
+
+ removed_mem: memory@80b00000 {
+ reg = <0x0 0x80b00000 0x0 0x5300000>;
+ no-map;
+ };
+
+ camera_mem: memory@86200000 {
+ reg = <0x0 0x86200000 0x0 0x500000>;
+ no-map;
+ };
+
+ wlan_mem: memory@86700000 {
+ reg = <0x0 0x86700000 0x0 0x100000>;
+ no-map;
+ };
+
+ ipa_fw_mem: memory@86800000 {
+ reg = <0x0 0x86800000 0x0 0x10000>;
+ no-map;
+ };
+
+ ipa_gsi_mem: memory@86810000 {
+ reg = <0x0 0x86810000 0x0 0xa000>;
+ no-map;
+ };
+
+ gpu_mem: memory@8681a000 {
+ reg = <0x0 0x8681a000 0x0 0x2000>;
+ no-map;
+ };
+
+ npu_mem: memory@86900000 {
+ reg = <0x0 0x86900000 0x0 0x500000>;
+ no-map;
+ };
+
+ video_mem: memory@86e00000 {
+ reg = <0x0 0x86e00000 0x0 0x500000>;
+ no-map;
+ };
+
+ cvp_mem: memory@87300000 {
+ reg = <0x0 0x87300000 0x0 0x500000>;
+ no-map;
+ };
+
+ cdsp_mem: memory@87800000 {
+ reg = <0x0 0x87800000 0x0 0x1400000>;
+ no-map;
+ };
+
+ slpi_mem: memory@88c00000 {
+ reg = <0x0 0x88c00000 0x0 0x1500000>;
+ no-map;
+ };
+
+ adsp_mem: memory@8a100000 {
+ reg = <0x0 0x8a100000 0x0 0x1d00000>;
+ no-map;
+ };
+
+ spss_mem: memory@8be00000 {
+ reg = <0x0 0x8be00000 0x0 0x100000>;
+ no-map;
+ };
+
+ cdsp_secure_heap: memory@8bf00000 {
+ reg = <0x0 0x8bf00000 0x0 0x4600000>;
+ no-map;
+ };
+ };
+
+ smem: qcom,smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_mem>;
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ soc: soc@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0 0 0 0x10 0>;
+ dma-ranges = <0 0 0 0 0x10 0>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sm8250";
+ reg = <0x0 0x00100000 0x0 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clock-names = "bi_tcxo", "sleep_clk";
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&sleep_clk>;
+ };
+
+ qupv3_id_1: geniqup@ac0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x00ac0000 0x0 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc 133>, <&gcc 134>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ uart2: serial@a90000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0x0 0x00a90000 0x0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc 113>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x17a00000 0x0 0x10000>, /* GICD */
+ <0x0 0x17a60000 0x0 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pdc: interrupt-controller@b220000 {
+ compatible = "qcom,sm8250-pdc";
+ reg = <0x0b220000 0x30000>, <0x17c000f0 0x60>;
+ qcom,pdc-ranges = <0 480 94>, <94 609 31>,
+ <125 63 1>, <126 716 12>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
+ spmi: qcom,spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0 0x0c440000 0x0 0x0001100>,
+ <0x0 0x0c600000 0x0 0x2000000>,
+ <0x0 0x0e600000 0x0 0x0100000>,
+ <0x0 0x0e700000 0x0 0x00a0000>,
+ <0x0 0x0c40a000 0x0 0x0026000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ apps_rsc: rsc@18200000 {
+ label = "apps_rsc";
+ compatible = "qcom,rpmh-rsc";
+ reg = <0x0 0x18200000 0x0 0x10000>,
+ <0x0 0x18210000 0x0 0x10000>,
+ <0x0 0x18220000 0x0 0x10000>;
+ reg-names = "drv-0", "drv-1", "drv-2";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,tcs-offset = <0xd00>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 2>, <SLEEP_TCS 3>,
+ <WAKE_TCS 3>, <CONTROL_TCS 1>;
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,sm8250-rpmh-clk";
+ #clock-cells = <1>;
+ clock-names = "xo";
+ clocks = <&xo_board>;
+ };
+ };
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+ reg = <0x0 0x01f40000 0x0 0x40000>;
+ };
+
+ timer@17c20000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0 0x17c20000 0x0 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@17c21000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c21000 0x0 0x1000>,
+ <0x0 0x17c22000 0x0 0x1000>;
+ };
+
+ frame@17c23000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c23000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c25000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c25000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c27000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c27000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c29000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c29000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c2b000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c2b000 0x0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c2d000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0x17c2d000 0x0 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 12
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 2153842321ce..a7ec7a7065d5 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -1,14 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_R8A774A1) += r8a774a1-hihope-rzg2m.dtb
dtb-$(CONFIG_ARCH_R8A774A1) += r8a774a1-hihope-rzg2m-ex.dtb
+dtb-$(CONFIG_ARCH_R8A774A1) += r8a774a1-hihope-rzg2m-ex-idk-1110wr.dtb
dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n.dtb
dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n-ex.dtb
dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-cat874.dtb r8a774c0-ek874.dtb \
r8a774c0-ek874-idk-2121wr.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a77950-salvator-x.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a77950-ulcb.dtb r8a77950-ulcb-kf.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a77951-salvator-x.dtb r8a77951-salvator-xs.dtb
-dtb-$(CONFIG_ARCH_R8A7795) += r8a77951-ulcb.dtb r8a77951-ulcb-kf.dtb
dtb-$(CONFIG_ARCH_R8A77950) += r8a77950-salvator-x.dtb
dtb-$(CONFIG_ARCH_R8A77950) += r8a77950-ulcb.dtb r8a77950-ulcb-kf.dtb
dtb-$(CONFIG_ARCH_R8A77951) += r8a77951-salvator-x.dtb r8a77951-salvator-xs.dtb
@@ -16,6 +13,7 @@ dtb-$(CONFIG_ARCH_R8A77951) += r8a77951-ulcb.dtb r8a77951-ulcb-kf.dtb
dtb-$(CONFIG_ARCH_R8A77960) += r8a77960-salvator-x.dtb r8a77960-salvator-xs.dtb
dtb-$(CONFIG_ARCH_R8A77960) += r8a77960-ulcb.dtb r8a77960-ulcb-kf.dtb
dtb-$(CONFIG_ARCH_R8A77961) += r8a77961-salvator-xs.dtb
+dtb-$(CONFIG_ARCH_R8A77961) += r8a77961-ulcb.dtb
dtb-$(CONFIG_ARCH_R8A77965) += r8a77965-salvator-x.dtb r8a77965-salvator-xs.dtb
dtb-$(CONFIG_ARCH_R8A77965) += r8a77965-ulcb.dtb r8a77965-ulcb-kf.dtb
dtb-$(CONFIG_ARCH_R8A77970) += r8a77970-eagle.dtb r8a77970-v3msk.dtb
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-hihope-rzg2m-ex-idk-1110wr.dts b/arch/arm64/boot/dts/renesas/r8a774a1-hihope-rzg2m-ex-idk-1110wr.dts
new file mode 100644
index 000000000000..2ab5edd84e9b
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a774a1-hihope-rzg2m-ex-idk-1110wr.dts
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the HiHope RZ/G2M sub board connected to an
+ * Advantech IDK-1110WR 10.1" LVDS panel
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include "r8a774a1-hihope-rzg2m-ex.dts"
+#include "rzg2-advantech-idk-1110wr-panel.dtsi"
+
+/ {
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm0 0 50000>;
+
+ brightness-levels = <0 2 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
+
+};
+
+&gpio1 {
+ /*
+ * When GP1_20 is LOW LVDS0 is connected to the LVDS connector
+ * When GP1_20 is HIGH LVDS0 is connected to the LT8918L
+ */
+ lvds-connector-en-gpio {
+ gpio-hog;
+ gpios = <20 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "lvds-connector-en-gpio";
+ };
+};
+
+&lvds0 {
+ status = "okay";
+};
+
+&pfc {
+ pwm0_pins: pwm0 {
+ groups = "pwm0";
+ function = "pwm0";
+ };
+};
+
+&pwm0 {
+ pinctrl-0 = <&pwm0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index 8f950dabca54..79023433a740 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -2634,13 +2634,14 @@
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>,
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>,
<&cpg CPG_MOD 722>;
clock-names = "du.0", "du.1", "du.2";
+ resets = <&cpg 724>, <&cpg 722>;
+ reset-names = "du.0", "du.2";
status = "disabled";
- vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>;
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>;
ports {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index c40ea300968e..3137f735974b 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -2480,13 +2480,14 @@
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>,
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>,
<&cpg CPG_MOD 721>;
clock-names = "du.0", "du.1", "du.3";
+ resets = <&cpg 724>, <&cpg 722>;
+ reset-names = "du.0", "du.3";
status = "disabled";
- vsps = <&vspd0 0>, <&vspd1 0>, <&vspd0 1>;
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd0 1>;
ports {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index a53cd5fcc401..22785cbddff5 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -1810,10 +1810,12 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
- vsps = <&vspd0 0>, <&vspd1 0>;
+ resets = <&cpg 724>;
+ reset-names = "du.0";
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>;
+
status = "disabled";
ports {
diff --git a/arch/arm64/boot/dts/renesas/r8a77950.dtsi b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
index 15216495e1c8..3975eecd50c4 100644
--- a/arch/arm64/boot/dts/renesas/r8a77950.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
@@ -30,7 +30,7 @@
};
&du {
- vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd3 0>;
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd3 0>;
};
&fcpvb1 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index a8729eb744db..52229546454c 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -3177,14 +3177,15 @@
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>,
- <&cpg CPG_MOD 722>,
- <&cpg CPG_MOD 721>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>,
+ <&cpg CPG_MOD 722>, <&cpg CPG_MOD 721>;
clock-names = "du.0", "du.1", "du.2", "du.3";
+ resets = <&cpg 724>, <&cpg 722>;
+ reset-names = "du.0", "du.2";
renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>, <&cmm3>;
- vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd0 1>;
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>,
+ <&vspd0 1>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 60f156cfd2d6..31282367d3ac 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -862,6 +862,15 @@
status = "disabled";
};
+ arm_cc630p: crypto@e6601000 {
+ compatible = "arm,cryptocell-630p-ree";
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0xe6601000 0 0x1000>;
+ clocks = <&cpg CPG_MOD 229>;
+ resets = <&cpg 229>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ };
+
dmac0: dma-controller@e6700000 {
compatible = "renesas,dmac-r8a7796",
"renesas,rcar-dmac";
@@ -2818,13 +2827,14 @@
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>,
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>,
<&cpg CPG_MOD 722>;
clock-names = "du.0", "du.1", "du.2";
+ resets = <&cpg 724>, <&cpg 722>;
+ reset-names = "du.0", "du.2";
renesas,cmms = <&cmm0>, <&cmm1>, <&cmm2>;
- vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>;
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts
index 4abd78ac1cd5..2ffc7e31dd58 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts
@@ -19,7 +19,7 @@
reg = <0x0 0x48000000 0x0 0x78000000>;
};
- memory@400000000 {
+ memory@480000000 {
device_type = "memory";
reg = <0x4 0x80000000 0x0 0x80000000>;
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts b/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts
new file mode 100644
index 000000000000..7c6e60f6f32d
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the M3ULCB (R-Car Starter Kit Pro) board with R-Car
+ * M3-W+
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a77961.dtsi"
+#include "ulcb.dtsi"
+
+/ {
+ model = "Renesas M3ULCB board based on r8a77961";
+ compatible = "renesas,m3ulcb", "renesas,r8a77961";
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x0 0x78000000>;
+ };
+
+ memory@480000000 {
+ device_type = "memory";
+ reg = <0x4 0x80000000 0x0 0x80000000>;
+ };
+
+ memory@600000000 {
+ device_type = "memory";
+ reg = <0x6 0x00000000 0x1 0x00000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index be3824bda632..0d96f2d3492b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -474,6 +474,20 @@
#power-domain-cells = <1>;
};
+ tsc: thermal@e6198000 {
+ compatible = "renesas,r8a77961-thermal";
+ reg = <0 0xe6198000 0 0x100>,
+ <0 0xe61a0000 0 0x100>,
+ <0 0xe61a8000 0 0x100>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 522>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
+ #thermal-sensor-cells = <1>;
+ };
+
intc_ex: interrupt-controller@e61c0000 {
#interrupt-cells = <2>;
interrupt-controller;
@@ -629,6 +643,15 @@
/* placeholder */
};
+ arm_cc630p: crypto@e6601000 {
+ compatible = "arm,cryptocell-630p-ree";
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0xe6601000 0 0x1000>;
+ clocks = <&cpg CPG_MOD 229>;
+ resets = <&cpg 229>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ };
+
dmac0: dma-controller@e6700000 {
compatible = "renesas,dmac-r8a77961",
"renesas,rcar-dmac";
@@ -861,6 +884,7 @@
rcar_sound,ssi {
ssi0: ssi-0 { };
ssi1: ssi-1 { };
+ ssi2: ssi-2 { };
};
};
@@ -1064,6 +1088,71 @@
};
};
+ thermal-zones {
+ sensor_thermal1: sensor-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 0>;
+ sustainable-power = <3874>;
+
+ trips {
+ sensor1_crit: sensor1-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ sensor_thermal2: sensor-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 1>;
+ sustainable-power = <3874>;
+
+ trips {
+ sensor2_crit: sensor2-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ sensor_thermal3: sensor-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 2>;
+ sustainable-power = <3874>;
+
+ cooling-maps {
+ map0 {
+ trip = <&target>;
+ cooling-device = <&a57_0 2 4>;
+ contribution = <1024>;
+ };
+ map1 {
+ trip = <&target>;
+ cooling-device = <&a53_0 0 2>;
+ contribution = <1024>;
+ };
+ };
+ trips {
+ target: trip-point1 {
+ temperature = <100000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ sensor3_crit: sensor3-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index c17d90bd160e..d82dd4e67b62 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -111,6 +111,7 @@
power-domains = <&sysc R8A77965_PD_CA57_CPU0>;
next-level-cache = <&L2_CA57>;
enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
#cooling-cells = <2>;
dynamic-power-coefficient = <854>;
clocks = <&cpg CPG_CORE R8A77965_CLK_Z>;
@@ -124,6 +125,7 @@
power-domains = <&sysc R8A77965_PD_CA57_CPU1>;
next-level-cache = <&L2_CA57>;
enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&cpg CPG_CORE R8A77965_CLK_Z>;
operating-points-v2 = <&cluster0_opp>;
};
@@ -134,6 +136,19 @@
cache-unified;
cache-level = <2>;
};
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x0010000>;
+ local-timer-stop;
+ entry-latency-us = <400>;
+ exit-latency-us = <500>;
+ min-residency-us = <4000>;
+ };
+ };
};
extal_clk: extal {
@@ -717,6 +732,15 @@
status = "disabled";
};
+ arm_cc630p: crypto@e6601000 {
+ compatible = "arm,cryptocell-630p-ree";
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0xe6601000 0 0x1000>;
+ clocks = <&cpg CPG_MOD 229>;
+ resets = <&cpg 229>;
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ };
+
dmac0: dma-controller@e6700000 {
compatible = "renesas,dmac-r8a77965",
"renesas,rcar-dmac";
@@ -2494,13 +2518,14 @@
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>,
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>,
<&cpg CPG_MOD 721>;
clock-names = "du.0", "du.1", "du.3";
+ resets = <&cpg 724>, <&cpg 722>;
+ reset-names = "du.0", "du.3";
renesas,cmms = <&cmm0>, <&cmm1>, <&cmm3>;
- vsps = <&vspd0 0>, <&vspd1 0>, <&vspd0 1>;
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd0 1>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 664a73a2cc69..a009c0ebc8b4 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -1121,7 +1121,9 @@
clock-names = "du.0";
power-domains = <&sysc R8A77970_PD_ALWAYS_ON>;
resets = <&cpg 724>;
- vsps = <&vspd0 0>;
+ reset-names = "du.0";
+ renesas,vsps = <&vspd0 0>;
+
status = "disabled";
ports {
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index b340fb469999..e01b0508a18f 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -1484,15 +1484,16 @@
};
du: display@feb00000 {
- compatible = "renesas,du-r8a77980",
- "renesas,du-r8a77970";
+ compatible = "renesas,du-r8a77980";
reg = <0 0xfeb00000 0 0x80000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 724>;
clock-names = "du.0";
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 724>;
- vsps = <&vspd0 0>;
+ reset-names = "du.0";
+ renesas,vsps = <&vspd0 0>;
+
status = "disabled";
ports {
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 32d91f210246..1543f18e834f 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -88,6 +88,7 @@
power-domains = <&sysc R8A77990_PD_CA53_CPU0>;
next-level-cache = <&L2_CA53>;
enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
dynamic-power-coefficient = <277>;
clocks =<&cpg CPG_CORE R8A77990_CLK_Z2>;
operating-points-v2 = <&cluster1_opp>;
@@ -100,6 +101,7 @@
power-domains = <&sysc R8A77990_PD_CA53_CPU1>;
next-level-cache = <&L2_CA53>;
enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
clocks =<&cpg CPG_CORE R8A77990_CLK_Z2>;
operating-points-v2 = <&cluster1_opp>;
};
@@ -110,6 +112,19 @@
cache-unified;
cache-level = <2>;
};
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x0010000>;
+ local-timer-stop;
+ entry-latency-us = <700>;
+ exit-latency-us = <700>;
+ min-residency-us = <5000>;
+ };
+ };
};
extal_clk: extal {
@@ -667,6 +682,15 @@
dma-channels = <2>;
};
+ arm_cc630p: crypto@e6601000 {
+ compatible = "arm,cryptocell-630p-ree";
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0xe6601000 0 0x1000>;
+ clocks = <&cpg CPG_MOD 229>;
+ resets = <&cpg 229>;
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ };
+
dmac0: dma-controller@e6700000 {
compatible = "renesas,dmac-r8a77990",
"renesas,rcar-dmac";
@@ -1784,14 +1808,13 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
resets = <&cpg 724>;
reset-names = "du.0";
renesas,cmms = <&cmm0>, <&cmm1>;
- vsps = <&vspd0 0>, <&vspd1 0>;
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
index 9503007c34c0..e8d2290fe79d 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
@@ -389,6 +389,15 @@
dma-channels = <2>;
};
+ arm_cc630p: crypto@e6601000 {
+ compatible = "arm,cryptocell-630p-ree";
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0 0xe6601000 0 0x1000>;
+ clocks = <&cpg CPG_MOD 229>;
+ resets = <&cpg 229>;
+ power-domains = <&sysc R8A77995_PD_ALWAYS_ON>;
+ };
+
canfd: can@e66c0000 {
compatible = "renesas,r8a77995-canfd",
"renesas,rcar-gen3-canfd";
@@ -1017,14 +1026,13 @@
reg = <0 0xfeb00000 0 0x40000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 724>,
- <&cpg CPG_MOD 723>;
+ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>;
clock-names = "du.0", "du.1";
resets = <&cpg 724>;
reset-names = "du.0";
renesas,cmms = <&cmm0>, <&cmm1>;
- vsps = <&vspd0 0>, <&vspd1 0>;
+ renesas,vsps = <&vspd0 0>, <&vspd1 0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 60d9437096c7..ae7621309e92 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopc-t4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-neo4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-orangepi.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-pinebook-pro.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc-mezzanine.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index 75908c587511..f809dd6d5dc3 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -413,27 +413,30 @@
lvds: lvds {
compatible = "rockchip,px30-lvds";
- #address-cells = <1>;
- #size-cells = <0>;
phys = <&dsi_dphy>;
phy-names = "dphy";
rockchip,grf = <&grf>;
rockchip,output = "lvds";
status = "disabled";
- port@0 {
- reg = <0>;
+ ports {
#address-cells = <1>;
#size-cells = <0>;
- lvds_vopb_in: endpoint@0 {
+ port@0 {
reg = <0>;
- remote-endpoint = <&vopb_out_lvds>;
- };
-
- lvds_vopl_in: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_lvds>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lvds_vopb_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_lvds>;
+ };
+
+ lvds_vopl_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_lvds>;
+ };
};
};
};
@@ -700,7 +703,7 @@
clock-names = "pclk", "timer";
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
@@ -870,7 +873,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <280>;
g-tx-fifo-size = <256 128 128 64 32 16>;
- g-use-dma;
phys = <&u2phy_otg>;
phy-names = "usb2-phy";
power-domains = <&power PX30_PD_USB>;
@@ -882,7 +884,6 @@
reg = <0x0 0xff340000 0x0 0x10000>;
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST>;
- clock-names = "usbhost";
phys = <&u2phy_host>;
phy-names = "usb";
power-domains = <&power PX30_PD_USB>;
@@ -894,7 +895,6 @@
reg = <0x0 0xff350000 0x0 0x10000>;
interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST>;
- clock-names = "usbhost";
phys = <&u2phy_host>;
phy-names = "usb";
power-domains = <&power PX30_PD_USB>;
@@ -1031,7 +1031,6 @@
reset-names = "axi", "ahb", "dclk";
iommus = <&vopb_mmu>;
power-domains = <&power PX30_PD_VO>;
- rockchip,grf = <&grf>;
status = "disabled";
vopb_out: port {
@@ -1073,7 +1072,6 @@
reset-names = "axi", "ahb", "dclk";
iommus = <&vopl_mmu>;
power-domains = <&power PX30_PD_VO>;
- rockchip,grf = <&grf>;
status = "disabled";
vopl_out: port {
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index 116f1900effb..ac43bc3f7031 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -40,7 +40,7 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a35", "arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x0>;
enable-method = "psci";
clocks = <&cru ARMCLK>;
@@ -53,7 +53,7 @@
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a35", "arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x1>;
enable-method = "psci";
operating-points-v2 = <&cpu0_opp_table>;
@@ -63,7 +63,7 @@
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a35", "arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x2>;
enable-method = "psci";
operating-points-v2 = <&cpu0_opp_table>;
@@ -73,7 +73,7 @@
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a35", "arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x3>;
enable-method = "psci";
operating-points-v2 = <&cpu0_opp_table>;
@@ -513,7 +513,7 @@
status = "disabled";
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
index 16f1656d5203..797e90a3ac92 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
@@ -60,6 +60,7 @@
};
&codec {
+ mute-gpios = <&grf_gpio 0 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 62936b432f9a..bf3e546f5266 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -104,6 +104,7 @@
};
&codec {
+ mute-gpios = <&grf_gpio 0 GPIO_ACTIVE_LOW>;
status = "okay";
port@0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 1f53ead52c7f..7e88d88aab98 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -142,7 +142,7 @@
};
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
@@ -906,6 +906,7 @@
resets = <&cru SRST_GMAC2IO_A>;
reset-names = "stmmaceth";
rockchip,grf = <&grf>;
+ snps,txpbl = <0x4>;
status = "disabled";
};
@@ -927,6 +928,7 @@
reset-names = "stmmaceth", "mac-phy";
phy-mode = "rmii";
phy-handle = <&phy>;
+ snps,txpbl = <0x4>;
status = "disabled";
mdio {
@@ -957,7 +959,6 @@
g-np-tx-fifo-size = <16>;
g-rx-fifo-size = <280>;
g-tx-fifo-size = <256 128 128 64 32 16>;
- g-use-dma;
phys = <&u2phy_otg>;
phy-names = "usb2-phy";
status = "disabled";
@@ -968,7 +969,6 @@
reg = <0x0 0xff5c0000 0x0 0x10000>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST0>, <&u2phy>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy_host>;
phy-names = "usb";
status = "disabled";
@@ -979,7 +979,6 @@
reg = <0x0 0xff5d0000 0x0 0x10000>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST0>, <&u2phy>;
- clock-names = "usbhost", "utmi";
phys = <&u2phy_host>;
phy-names = "usb";
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
index 231db0305a03..5ffd7b4d3036 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
@@ -239,7 +239,6 @@
cap-mmc-highspeed;
cap-sd-highspeed;
card-detect-delay = <200>;
- no-emmc;
no-sdio;
sd-uhs-sdr12;
sd-uhs-sdr25;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index a0df61c61925..1ebb0eef42da 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -136,7 +136,7 @@
};
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
@@ -513,7 +513,6 @@
reg = <0x0 0xff500000 0x0 0x100>;
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_HOST0>;
- clock-names = "usbhost";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
index 77008dca45bc..694b0d08d644 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
@@ -9,8 +9,7 @@
/ {
model = "Rockchip RK3399 Evaluation Board";
- compatible = "rockchip,rk3399-evb", "rockchip,rk3399",
- "google,rk3399evb-rev2";
+ compatible = "rockchip,rk3399-evb", "rockchip,rk3399";
backlight: backlight {
compatible = "pwm-backlight";
@@ -48,10 +47,22 @@
240 241 242 243 244 245 246 247
248 249 250 251 252 253 254 255>;
default-brightness-level = <200>;
- enable-gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>;
pwms = <&pwm0 0 25000 0>;
};
+ edp_panel: edp-panel {
+ compatible ="lg,lp079qx1-sp0v";
+ backlight = <&backlight>;
+ enable-gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>;
+ power-supply = <&vcc3v3_s0>;
+
+ port {
+ panel_in_edp: endpoint {
+ remote-endpoint = <&edp_out_panel>;
+ };
+ };
+ };
+
clkin_gmac: external-gmac-clock {
compatible = "fixed-clock";
clock-frequency = <125000000>;
@@ -114,6 +125,24 @@
};
+&edp {
+ status = "okay";
+ force-hpd;
+
+ ports {
+ edp_out: port@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp_out_panel: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_in_edp>;
+ };
+ };
+ };
+};
+
&emmc_phy {
status = "okay";
};
@@ -134,6 +163,228 @@
status = "okay";
};
+&i2c0 {
+ status = "okay";
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+ clock-output-names = "rk808-clkout1", "rk808-clkout2";
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+ vcc10-supply = <&vcc3v3_sys>;
+ vcc11-supply = <&vcc3v3_sys>;
+ vcc12-supply = <&vcc3v3_sys>;
+ vddio-supply = <&vcc1v8_pmu>;
+
+ regulators {
+ vdd_log: DCDC_REG1 {
+ regulator-name = "vdd_log";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vdd_cpu_l: DCDC_REG2 {
+ regulator-name = "vdd_cpu_l";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG4 {
+ regulator-name = "vcc_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc1v8_dvp: LDO_REG1 {
+ regulator-name = "vcc1v8_dvp";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v0_tp: LDO_REG2 {
+ regulator-name = "vcc3v0_tp";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc1v8_pmu: LDO_REG3 {
+ regulator-name = "vcc1v8_pmu";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_sd: LDO_REG4 {
+ regulator-name = "vcc_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcca3v0_codec: LDO_REG5 {
+ regulator-name = "vcca3v0_codec";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v5: LDO_REG6 {
+ regulator-name = "vcc_1v5";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1500000>;
+ };
+ };
+
+ vcca1v8_codec: LDO_REG7 {
+ regulator-name = "vcca1v8_codec";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v0: LDO_REG8 {
+ regulator-name = "vcc_3v0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc3v3_s3: SWITCH_REG1 {
+ regulator-name = "vcc3v3_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc3v3_s0: SWITCH_REG2 {
+ regulator-name = "vcc3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ vdd_cpu_b: regulator@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_b";
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <1000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: regulator@41 {
+ compatible = "silergy,syr828";
+ reg = <0x41>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_gpu";
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <1000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
&pwm0 {
status = "okay";
};
@@ -210,11 +461,6 @@
rockchip,pins =
<1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
};
-
- pmic_dvs2: pmic-dvs2 {
- rockchip,pins =
- <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_down>;
- };
};
usb2 {
@@ -224,3 +470,11 @@
};
};
};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
index 7cd6d470c1cb..1384dabbdf40 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
@@ -291,11 +291,9 @@ ap_i2c_tp: &i2c5 {
#pwm-cells = <1>;
};
- usbc_extcon1: extcon@1 {
+ usbc_extcon1: extcon1 {
compatible = "google,extcon-usbc-cros-ec";
google,usb-port-id = <1>;
-
- #extcon-cells = <0>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index dd5624975c9b..2f3997740068 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -570,11 +570,9 @@ ap_i2c_audio: &i2c8 {
#size-cells = <0>;
};
- usbc_extcon0: extcon@0 {
+ usbc_extcon0: extcon0 {
compatible = "google,extcon-usbc-cros-ec";
google,usb-port-id = <0>;
-
- #extcon-cells = <0>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
index d69a613fb65a..aee484a05181 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
@@ -29,6 +29,26 @@
regulator-max-microvolt = <5000000>;
};
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_rx>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&power_led_gpio>;
+
+ led-0 {
+ label = "blue:power";
+ gpios = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "default-on";
+ };
+ };
+
vcc_sys: vcc-sys {
compatible = "regulator-fixed";
regulator-name = "vcc_sys";
@@ -483,6 +503,18 @@
};
};
+ ir {
+ ir_rx: ir-rx {
+ rockchip,pins = <0 RK_PA6 1 &pcfg_pull_none>;
+ };
+ };
+
+ leds {
+ power_led_gpio: power-led-gpio {
+ rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins =
@@ -539,10 +571,6 @@
};
};
-&pwm0 {
- status = "okay";
-};
-
&pwm2 {
status = "okay";
pinctrl-0 = <&pwm2_pin_pull_down>;
@@ -555,7 +583,7 @@
&sdmmc {
clock-frequency = <150000000>;
- clock-freq-min-max = <200000 150000000>;
+ max-frequency = <150000000>;
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
@@ -610,7 +638,6 @@
&spi1 {
status = "okay";
- max-freq = <10000000>;
flash@0 {
compatible = "jedec,spi-nor";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
index 9c659f3115c8..f9f7246d4d2f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
@@ -202,14 +202,27 @@
clock_in_out = "input";
phy-supply = <&vcc3v3_s3>;
phy-mode = "rgmii";
+ phy-handle = <&rtl8211e>;
pinctrl-names = "default";
- pinctrl-0 = <&rgmii_pins>;
- snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
- snps,reset-active-low;
- snps,reset-delays-us = <0 10000 50000>;
+ pinctrl-0 = <&rgmii_pins>, <&phy_intb>, <&phy_rstb>;
tx_delay = <0x28>;
rx_delay = <0x11>;
status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtl8211e: phy@1 {
+ reg = <1>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <30000>;
+ reset-gpios = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+ };
+ };
};
&gpu {
@@ -419,6 +432,8 @@
compatible = "silergy,syr827";
reg = <0x40>;
fcs,suspend-voltage-selector = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cpu_b_sleep>;
regulator-name = "vdd_cpu_b";
regulator-min-microvolt = <712500>;
regulator-max-microvolt = <1500000>;
@@ -436,6 +451,8 @@
compatible = "silergy,syr828";
reg = <0x41>;
fcs,suspend-voltage-selector = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpu_sleep>;
regulator-name = "vdd_gpu";
regulator-min-microvolt = <712500>;
regulator-max-microvolt = <1500000>;
@@ -537,7 +554,25 @@
};
};
+ phy {
+ phy_intb: phy-intb {
+ rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ phy_rstb: phy-rstb {
+ rockchip,pins = <3 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
+ cpu_b_sleep: cpu-b-sleep {
+ rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ gpu_sleep: gpu-sleep {
+ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
pmic_int_l: pmic-int-l {
rockchip,pins =
<1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
new file mode 100644
index 000000000000..5ea281b55fe2
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -0,0 +1,1096 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2018 Akash Gajjar <Akash_Gajjar@mentor.com>
+ * Copyright (c) 2020 Tobias Schramm <t.schramm@manjaro.org>
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/usb/pd.h>
+#include <dt-bindings/leds/common.h>
+#include "rk3399.dtsi"
+#include "rk3399-opp.dtsi"
+
+/ {
+ model = "Pine64 Pinebook Pro";
+ compatible = "pine64,pinebook-pro", "rockchip,rk3399";
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ backlight: edp-backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&vcc_12v>;
+ pwms = <&pwm0 0 740740 0>;
+ };
+
+ edp_panel: edp-panel {
+ compatible = "boe,nv140fhmn49";
+ backlight = <&backlight>;
+ enable-gpios = <&gpio1 RK_PA0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_en_gpio>;
+ power-supply = <&vcc3v3_panel>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel_in_edp: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&edp_out_panel>;
+ };
+ };
+ };
+ };
+
+ /*
+ * Use separate nodes for gpio-keys to allow for selective deactivation
+ * of wakeup sources via sysfs without disabling the whole key
+ */
+ gpio-key-lid {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&lidbtn_gpio>;
+
+ lid {
+ debounce-interval = <20>;
+ gpios = <&gpio1 RK_PA1 GPIO_ACTIVE_LOW>;
+ label = "Lid";
+ linux,code = <SW_LID>;
+ linux,input-type = <EV_SW>;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ wakeup-source;
+ };
+ };
+
+ gpio-key-power {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwrbtn_gpio>;
+
+ power {
+ debounce-interval = <20>;
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
+ label = "Power";
+ linux,code = <KEY_POWER>;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwrled_gpio &slpled_gpio>;
+
+ green-led {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio0 RK_PB3 GPIO_ACTIVE_HIGH>;
+ label = "green:power";
+ };
+
+ red-led {
+ color = <LED_COLOR_ID_RED>;
+ default-state = "off";
+ function = LED_FUNCTION_STANDBY;
+ gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+ label = "red:standby";
+ panic-indicator;
+ retain-state-suspended;
+ };
+ };
+
+ /* Power sequence for SDIO WiFi module */
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk808 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h_gpio>;
+ post-power-on-delay-ms = <100>;
+ power-off-delay-us = <500000>;
+
+ /* WL_REG_ON on module */
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ };
+
+ /* Audio components */
+ es8316-sound {
+ compatible = "simple-audio-card";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hp_det_gpio>;
+ simple-audio-card,name = "rockchip,es8316-codec";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <256>;
+
+ simple-audio-card,widgets =
+ "Microphone", "Mic Jack",
+ "Headphone", "Headphones",
+ "Speaker", "Speaker";
+ simple-audio-card,routing =
+ "MIC1", "Mic Jack",
+ "Headphones", "HPOL",
+ "Headphones", "HPOR",
+ "Speaker Amplifier INL", "HPOL",
+ "Speaker Amplifier INR", "HPOR",
+ "Speaker", "Speaker Amplifier OUTL",
+ "Speaker", "Speaker Amplifier OUTR";
+
+ simple-audio-card,hp-det-gpio = <&gpio0 RK_PB0 GPIO_ACTIVE_LOW>;
+ simple-audio-card,aux-devs = <&speaker_amp>;
+ simple-audio-card,pin-switches = "Speaker";
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s1>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&es8316>;
+ };
+ };
+
+ speaker_amp: speaker-amplifier {
+ compatible = "simple-audio-amplifier";
+ enable-gpios = <&gpio4 RK_PD3 GPIO_ACTIVE_HIGH>;
+ sound-name-prefix = "Speaker Amplifier";
+ VCC-supply = <&pa_5v>;
+ };
+
+ /* Power tree */
+ /* Root power source */
+ vcc_sysin: vcc-sysin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sysin";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ /* Regulators supplied by vcc_sysin */
+ /* LCD backlight supply */
+ vcc_12v: vcc-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_12v";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ vin-supply = <&vcc_sysin>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ /* Main 3.3 V supply */
+ vcc3v3_sys: wifi_bat: vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_sysin>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ /* 5 V USB power supply */
+ vcc5v0_usb: pa_5v: vcc5v0-usb-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_5v_gpio>;
+ regulator-name = "vcc5v0_usb";
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_sysin>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ /* RK3399 logic supply */
+ vdd_log: vdd-log {
+ compatible = "pwm-regulator";
+ pwms = <&pwm2 0 25000 1>;
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ vin-supply = <&vcc_sysin>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ /* Regulators supplied by vcc3v3_sys */
+ /* 0.9 V supply, always on */
+ vcc_0v9: vcc-0v9 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ /* S3 1.8 V supply, switched by vcc1v8_s3 */
+ vcca1v8_s3: vcc1v8-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca1v8_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ /* micro SD card power */
+ vcc3v0_sd: vcc3v0-sd {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_pwr_h_gpio>;
+ regulator-name = "vcc3v0_sd";
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ /* LCD panel power, called VCC3V3_S0 in schematic */
+ vcc3v3_panel: vcc3v3-panel {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PC6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdvcc_en_gpio>;
+ regulator-name = "vcc3v3_panel";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <100000>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ /* M.2 adapter power, switched by vcc1v8_s3 */
+ vcc3v3_ssd: vcc3v3-ssd {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_ssd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ /* Regulators supplied by vcc5v0_usb */
+ /* USB 3 port power supply regulator */
+ vcc5v0_otg: vcc5v0-otg {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en_gpio>;
+ regulator-name = "vcc5v0_otg";
+ regulator-always-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_usb>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ /* Regulators supplied by vcc5v0_usb */
+ /* Type C port power supply regulator */
+ vbus_5vout: vbus_typec: vbus-5vout {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_typec0_en_gpio>;
+ regulator-name = "vbus_5vout";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_usb>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ /* Regulators supplied by vcc_1v8 */
+ /* Primary 0.9 V LDO */
+ vcca0v9_s3: vcca0v9-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc0v9_s3";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_1v8>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ mains_charger: dc-charger {
+ compatible = "gpio-charger";
+ charger-type = "mains";
+ gpios = <&gpio4 RK_PD0 GPIO_ACTIVE_LOW>;
+
+ /* Also triggered by USB charger */
+ pinctrl-names = "default";
+ pinctrl-0 = <&dc_det_gpio>;
+ };
+};
+
+&cdn_dp {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&edp {
+ force-hpd;
+ pinctrl-names = "default";
+ pinctrl-0 = <&edp_hpd>;
+ status = "okay";
+
+ ports {
+ edp_out: port@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp_out_panel: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_in_edp>;
+ };
+ };
+ };
+};
+
+&emmc_phy {
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ i2c-scl-falling-time-ns = <4>;
+ i2c-scl-rising-time-ns = <168>;
+ status = "okay";
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk808-clkout2";
+ interrupt-parent = <&gpio3>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l_gpio>;
+ rockchip,system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc_sysin>;
+ vcc2-supply = <&vcc_sysin>;
+ vcc3-supply = <&vcc_sysin>;
+ vcc4-supply = <&vcc_sysin>;
+ vcc6-supply = <&vcc_sysin>;
+ vcc7-supply = <&vcc_sysin>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc_sysin>;
+ vcc10-supply = <&vcc_sysin>;
+ vcc11-supply = <&vcc_sysin>;
+ vcc12-supply = <&vcc3v3_sys>;
+ vcc13-supply = <&vcc_sysin>;
+ vcc14-supply = <&vcc_sysin>;
+
+ regulators {
+ /* rk3399 center logic supply */
+ vdd_center: DCDC_REG1 {
+ regulator-name = "vdd_center";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_l: DCDC_REG2 {
+ regulator-name = "vdd_cpu_l";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_1v8: vcc_wl: DCDC_REG4 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ /* not used */
+ LDO_REG1 {
+ };
+
+ /* not used */
+ LDO_REG2 {
+ };
+
+ vcc1v8_pmupll: LDO_REG3 {
+ regulator-name = "vcc1v8_pmupll";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_sdio: LDO_REG4 {
+ regulator-name = "vcc_sdio";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcca3v0_codec: LDO_REG5 {
+ regulator-name = "vcca3v0_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v5: LDO_REG6 {
+ regulator-name = "vcc_1v5";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1500000>;
+ };
+ };
+
+ vcca1v8_codec: LDO_REG7 {
+ regulator-name = "vcca1v8_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v0: LDO_REG8 {
+ regulator-name = "vcc_3v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc3v3_s3: SWITCH_REG1 {
+ regulator-name = "vcc3v3_s3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_s0: SWITCH_REG2 {
+ regulator-name = "vcc3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ vdd_cpu_b: regulator@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vsel1_gpio>;
+ regulator-name = "vdd_cpu_b";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <1000>;
+ vin-supply = <&vcc_1v8>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: regulator@41 {
+ compatible = "silergy,syr828";
+ reg = <0x41>;
+ fcs,suspend-voltage-selector = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vsel2_gpio>;
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <1000>;
+ vin-supply = <&vcc_1v8>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ i2c-scl-falling-time-ns = <4>;
+ i2c-scl-rising-time-ns = <168>;
+ status = "okay";
+
+ es8316: es8316@11 {
+ compatible = "everest,es8316";
+ reg = <0x11>;
+ clocks = <&cru SCLK_I2S_8CH_OUT>;
+ clock-names = "mclk";
+ #sound-dai-cells = <0>;
+ };
+};
+
+&i2c3 {
+ i2c-scl-falling-time-ns = <15>;
+ i2c-scl-rising-time-ns = <450>;
+ status = "okay";
+};
+
+&i2c4 {
+ i2c-scl-falling-time-ns = <20>;
+ i2c-scl-rising-time-ns = <600>;
+ status = "okay";
+
+ fusb0: fusb30x@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ fcs,int_n = <&gpio1 RK_PA2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&fusb0_int_gpio>;
+ vbus-supply = <&vbus_typec>;
+
+ connector {
+ compatible = "usb-c-connector";
+ data-role = "host";
+ label = "USB-C";
+ op-sink-microwatt = <1000000>;
+ power-role = "dual";
+ sink-pdos =
+ <PDO_FIXED(5000, 2500, PDO_FIXED_USB_COMM)>;
+ source-pdos =
+ <PDO_FIXED(5000, 1400, PDO_FIXED_USB_COMM)>;
+ try-power-role = "sink";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc_hs: endpoint {
+ remote-endpoint =
+ <&u2phy0_typec_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc_ss: endpoint {
+ remote-endpoint =
+ <&tcphy0_typec_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usbc_dp: endpoint {
+ remote-endpoint =
+ <&tcphy0_typec_dp>;
+ };
+ };
+ };
+ };
+ };
+};
+
+&i2s1 {
+ #sound-dai-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_8ch_mclk_gpio>, <&i2s1_2ch_bus>;
+ rockchip,capture-channels = <8>;
+ rockchip,playback-channels = <8>;
+ status = "okay";
+};
+
+&io_domains {
+ audio-supply = <&vcc_3v0>;
+ gpio1830-supply = <&vcc_3v0>;
+ sdmmc-supply = <&vcc_sdio>;
+ status = "okay";
+};
+
+&pcie_phy {
+ status = "okay";
+};
+
+&pcie0 {
+ bus-scan-delay-ms = <1000>;
+ ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
+ max-link-speed = <2>;
+ num-lanes = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreqn_cpm>;
+ vpcie0v9-supply = <&vcca0v9_s3>;
+ vpcie1v8-supply = <&vcca1v8_s3>;
+ vpcie3v3-supply = <&vcc3v3_ssd>;
+ status = "okay";
+};
+
+&pinctrl {
+ buttons {
+ pwrbtn_gpio: pwrbtn-gpio {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ lidbtn_gpio: lidbtn-gpio {
+ rockchip,pins = <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ dc-charger {
+ dc_det_gpio: dc-det-gpio {
+ rockchip,pins = <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ es8316 {
+ hp_det_gpio: hp-det-gpio {
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ fusb302x {
+ fusb0_int_gpio: fusb0-int-gpio {
+ rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ i2s1 {
+ i2s_8ch_mclk_gpio: i2s-8ch-mclk-gpio {
+ rockchip,pins = <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+ };
+
+ lcd-panel {
+ lcdvcc_en_gpio: lcdvcc-en-gpio {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ panel_en_gpio: panel-en-gpio {
+ rockchip,pins = <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ lcd_panel_reset_gpio: lcd-panel-reset-gpio {
+ rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ leds {
+ pwrled_gpio: pwrled_gpio {
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ slpled_gpio: slpled_gpio {
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int_l_gpio: pmic-int-l-gpio {
+ rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ vsel1_gpio: vsel1-gpio {
+ rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ vsel2_gpio: vsel2-gpio {
+ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ sdcard {
+ sdmmc0_pwr_h_gpio: sdmmc0-pwr-h-gpio {
+ rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ };
+
+ sdio-pwrseq {
+ wifi_enable_h_gpio: wifi-enable-h-gpio {
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb-typec {
+ vcc5v0_typec0_en_gpio: vcc5v0-typec0-en-gpio {
+ rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb2 {
+ pwr_5v_gpio: pwr-5v-gpio {
+ rockchip,pins = <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_host_en_gpio: vcc5v0-host-en-gpio {
+ rockchip,pins = <4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wireless-bluetooth {
+ bt_wake_gpio: bt-wake-gpio {
+ rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_host_wake_gpio: bt-host-wake-gpio {
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_reset_gpio: bt-reset-gpio {
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmu1830-supply = <&vcc_3v0>;
+ status = "okay";
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwm2 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca1v8_s3>;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v0_sd>;
+ vqmmc-supply = <&vcc_sdio>;
+ status = "okay";
+};
+
+&sdio0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
+ sd-uhs-sdr104;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ mmc-hs200-1_8v;
+ non-removable;
+ status = "okay";
+};
+
+&spi1 {
+ max-freq = <10000000>;
+ status = "okay";
+
+ spiflash: flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ m25p,fast-read;
+ spi-max-frequency = <10000000>;
+ };
+};
+
+&tcphy0 {
+ status = "okay";
+};
+
+&tcphy0_dp {
+ port {
+ tcphy0_typec_dp: endpoint {
+ remote-endpoint = <&usbc_dp>;
+ };
+ };
+};
+
+&tcphy0_usb3 {
+ port {
+ tcphy0_typec_ss: endpoint {
+ remote-endpoint = <&usbc_ss>;
+ };
+ };
+};
+
+&tcphy1 {
+ status = "okay";
+};
+
+&tsadc {
+ /* tshut mode 0:CRU 1:GPIO */
+ rockchip,hw-tshut-mode = <1>;
+ /* tshut polarity 0:LOW 1:HIGH */
+ rockchip,hw-tshut-polarity = <1>;
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+
+ u2phy0_otg: otg-port {
+ status = "okay";
+ };
+
+ u2phy0_host: host-port {
+ phy-supply = <&vcc5v0_otg>;
+ status = "okay";
+ };
+
+ port {
+ u2phy0_typec_hs: endpoint {
+ remote-endpoint = <&usbc_hs>;
+ };
+ };
+};
+
+&u2phy1 {
+ status = "okay";
+
+ u2phy1_otg: otg-port {
+ status = "okay";
+ };
+
+ u2phy1_host: host-port {
+ phy-supply = <&vcc5v0_otg>;
+ status = "okay";
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4345c5";
+ clocks = <&rk808 1>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&gpio2 RK_PD3 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>;
+ max-speed = <1500000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_gpio &bt_wake_gpio &bt_reset_gpio>;
+ shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
+ vbat-supply = <&wifi_bat>;
+ vddio-supply = <&vcc_wl>;
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usbdrd3_0 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbdrd3_1 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index c1edca3872c7..07694b196fdb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -480,7 +480,7 @@
};
&sdmmc {
- vqmmc = <&vcc_sd>;
+ vqmmc-supply = <&vcc_sd>;
};
&spi1 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index b69f0f2cbd67..ba7c75c9f2a1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -542,7 +542,7 @@
cap-mmc-highspeed;
cap-sd-highspeed;
clock-frequency = <100000000>;
- clock-freq-min-max = <100000 100000000>;
+ max-frequency = <100000000>;
cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
disable-wp;
sd-uhs-sdr104;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
index b4d8f60b7e44..73e269a8ae0c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
@@ -230,7 +230,5 @@
};
&spdif {
- i2c-scl-rising-time-ns = <450>;
- i2c-scl-falling-time-ns = <15>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 33cc21fcf4c1..74f2c3d49095 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -197,7 +197,7 @@
#clock-cells = <0>;
};
- amba {
+ amba: bus {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
@@ -288,6 +288,7 @@
resets = <&cru SRST_A_GMAC>;
reset-names = "stmmaceth";
rockchip,grf = <&grf>;
+ snps,txpbl = <0x4>;
status = "disabled";
};
@@ -349,8 +350,6 @@
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>,
<&u2phy0>;
- clock-names = "usbhost", "arbiter",
- "utmi";
phys = <&u2phy0_host>;
phy-names = "usb";
status = "disabled";
@@ -362,8 +361,6 @@
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>,
<&u2phy0>;
- clock-names = "usbhost", "arbiter",
- "utmi";
phys = <&u2phy0_host>;
phy-names = "usb";
status = "disabled";
@@ -375,8 +372,6 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>,
<&u2phy1>;
- clock-names = "usbhost", "arbiter",
- "utmi";
phys = <&u2phy1_host>;
phy-names = "usb";
status = "disabled";
@@ -388,8 +383,6 @@
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>,
<&u2phy1>;
- clock-names = "usbhost", "arbiter",
- "utmi";
phys = <&u2phy1_host>;
phy-names = "usb";
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts b/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts
index b42f94179538..a1783e7f769a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399pro-rock-pi-n10.dts
@@ -13,5 +13,6 @@
/ {
model = "Radxa ROCK Pi N10";
- compatible = "radxa,rockpi-n10", "rockchip,rk3399pro";
+ compatible = "radxa,rockpi-n10", "vamrs,rk3399pro-vmarc-som",
+ "rockchip,rk3399pro";
};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index 5b18bda9c5a6..2ca2d3dc8d6c 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -143,8 +143,8 @@
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
- clocks = <&peri_clk 11>;
- resets = <&peri_rst 11>;
+ clocks = <&peri_clk 12>;
+ resets = <&peri_rst 12>;
};
serial0: serial@54006800 {
@@ -433,7 +433,7 @@
};
};
- emmc: sdhc@5a000000 {
+ emmc: mmc@5a000000 {
compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
reg = <0x5a000000 0x400>;
interrupts = <0 78 4>;
@@ -566,7 +566,7 @@
};
};
- aidet: aidet@5fc20000 {
+ aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-ld11-aidet";
reg = <0x5fc20000 0x200>;
interrupt-controller;
@@ -621,7 +621,7 @@
};
};
- nand: nand@68000000 {
+ nand: nand-controller@68000000 {
compatible = "socionext,uniphier-denali-nand-v5b";
status = "disabled";
reg-names = "nand_data", "denali_reg";
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index f2dc5f695020..a93148c2088f 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -248,8 +248,8 @@
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
- clocks = <&peri_clk 11>;
- resets = <&peri_rst 11>;
+ clocks = <&peri_clk 12>;
+ resets = <&peri_rst 12>;
};
spi2: spi@54006200 {
@@ -259,8 +259,8 @@
interrupts = <0 229 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi2>;
- clocks = <&peri_clk 11>;
- resets = <&peri_rst 11>;
+ clocks = <&peri_clk 13>;
+ resets = <&peri_rst 13>;
};
spi3: spi@54006300 {
@@ -270,8 +270,8 @@
interrupts = <0 230 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi3>;
- clocks = <&peri_clk 11>;
- resets = <&peri_rst 11>;
+ clocks = <&peri_clk 14>;
+ resets = <&peri_rst 14>;
};
serial0: serial@54006800 {
@@ -559,7 +559,7 @@
};
};
- emmc: sdhc@5a000000 {
+ emmc: mmc@5a000000 {
compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
reg = <0x5a000000 0x400>;
interrupts = <0 78 4>;
@@ -578,7 +578,7 @@
cdns,phy-dll-delay-sdclk-hsmmc = <21>;
};
- sd: sdhc@5a400000 {
+ sd: mmc@5a400000 {
compatible = "socionext,uniphier-sd-v3.1.1";
status = "disabled";
reg = <0x5a400000 0x800>;
@@ -664,7 +664,7 @@
};
};
- aidet: aidet@5fc20000 {
+ aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-ld20-aidet";
reg = <0x5fc20000 0x200>;
interrupt-controller;
@@ -925,7 +925,7 @@
socionext,syscon = <&soc_glue>;
};
- nand: nand@68000000 {
+ nand: nand-controller@68000000 {
compatible = "socionext,uniphier-denali-nand-v5b";
status = "disabled";
reg-names = "nand_data", "denali_reg";
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
index 754315bbd1c8..4d00ff9548e1 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
@@ -27,6 +27,8 @@
i2c2 = &i2c2;
i2c3 = &i2c3;
i2c6 = &i2c6;
+ spi0 = &spi0;
+ spi1 = &spi1;
};
memory@80000000 {
@@ -39,6 +41,14 @@
interrupts = <4 8>;
};
+&spi0 {
+ status = "okay";
+};
+
+&spi1 {
+ status = "okay";
+};
+
&serial0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 73e7e1203b09..616835b38106 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/gpio/uniphier-gpio.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
compatible = "socionext,uniphier-pxs3";
@@ -42,6 +43,7 @@
clocks = <&sys_clk 33>;
enable-method = "psci";
operating-points-v2 = <&cluster0_opp>;
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -51,6 +53,7 @@
clocks = <&sys_clk 33>;
enable-method = "psci";
operating-points-v2 = <&cluster0_opp>;
+ #cooling-cells = <2>;
};
cpu2: cpu@2 {
@@ -60,6 +63,7 @@
clocks = <&sys_clk 33>;
enable-method = "psci";
operating-points-v2 = <&cluster0_opp>;
+ #cooling-cells = <2>;
};
cpu3: cpu@3 {
@@ -69,6 +73,7 @@
clocks = <&sys_clk 33>;
enable-method = "psci";
operating-points-v2 = <&cluster0_opp>;
+ #cooling-cells = <2>;
};
};
@@ -136,6 +141,37 @@
<1 10 4>;
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <250>; /* 250ms */
+ polling-delay = <1000>; /* 1000ms */
+ thermal-sensors = <&pvtctl>;
+
+ trips {
+ cpu_crit: cpu-crit {
+ temperature = <110000>; /* 110C */
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ cpu_alert: cpu-alert {
+ temperature = <100000>; /* 100C */
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -171,8 +207,8 @@
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
- clocks = <&peri_clk 11>;
- resets = <&peri_rst 11>;
+ clocks = <&peri_clk 12>;
+ resets = <&peri_rst 12>;
};
serial0: serial@54006800 {
@@ -353,7 +389,7 @@
};
};
- emmc: sdhc@5a000000 {
+ emmc: mmc@5a000000 {
compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
reg = <0x5a000000 0x400>;
interrupts = <0 78 4>;
@@ -372,7 +408,7 @@
cdns,phy-dll-delay-sdclk-hsmmc = <21>;
};
- sd: sdhc@5a400000 {
+ sd: mmc@5a400000 {
compatible = "socionext,uniphier-sd-v3.1.1";
status = "disabled";
reg = <0x5a400000 0x800>;
@@ -462,7 +498,7 @@
};
};
- aidet: aidet@5fc20000 {
+ aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-pxs3-aidet";
reg = <0x5fc20000 0x200>;
interrupt-controller;
@@ -496,6 +532,13 @@
watchdog {
compatible = "socionext,uniphier-wdt";
};
+
+ pvtctl: pvtctl {
+ compatible = "socionext,uniphier-pxs3-thermal";
+ interrupts = <0 3 4>;
+ #thermal-sensor-cells = <0>;
+ socionext,tmod-calibration = <0x0f22 0x68ee>;
+ };
};
eth0: ethernet@65000000 {
@@ -783,7 +826,7 @@
socionext,syscon = <&soc_glue>;
};
- nand: nand@68000000 {
+ nand: nand-controller@68000000 {
compatible = "socionext,uniphier-denali-nand-v5b";
status = "disabled";
reg-names = "nand_data", "denali_reg";
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index e5df20a2d2f9..11887c72f23a 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -189,6 +189,8 @@
power-domains = <&k3_pds 137 TI_SCI_PD_EXCLUSIVE>;
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>;
+ dma-names = "tx0", "rx0";
};
main_spi1: spi@2110000 {
@@ -296,6 +298,7 @@
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
power-domains = <&k3_pds 151 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 151 2>, <&k3_clks 151 7>;
assigned-clocks = <&k3_clks 151 2>, <&k3_clks 151 7>;
assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
<&k3_clks 151 9>; /* set PIPE3_TXB_CLK to CLK_12M_RC/256 (for HS only) */
@@ -335,6 +338,7 @@
interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 152 2>;
assigned-clocks = <&k3_clks 152 2>;
assigned-clock-parents = <&k3_clks 152 4>; /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index cbf97b621931..353d1e2532a7 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -12,6 +12,12 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x40f00000 0x20000>;
+
+ phy_gmii_sel: phy@4040 {
+ compatible = "ti,am654-phy-gmii-sel";
+ reg = <0x4040 0x4>;
+ #phy-cells = <1>;
+ };
};
mcu_uart0: serial@40a00000 {
@@ -82,6 +88,9 @@
assigned-clocks = <&k3_clks 0 2>;
assigned-clock-rates = <60000000>;
clock-names = "adc_tsc_fck";
+ dmas = <&mcu_udmap 0x7100>,
+ <&mcu_udmap 0x7101 >;
+ dma-names = "fifo0", "fifo1";
adc {
#io-channel-cells = <1>;
@@ -97,6 +106,9 @@
assigned-clocks = <&k3_clks 1 2>;
assigned-clock-rates = <60000000>;
clock-names = "adc_tsc_fck";
+ dmas = <&mcu_udmap 0x7102>,
+ <&mcu_udmap 0x7103>;
+ dma-names = "fifo0", "fifo1";
adc {
#io-channel-cells = <1>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index bfe91f2a52cb..3d6064125b40 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -34,6 +34,20 @@
};
};
+ mcu_conf: syscon@40f00000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0x40f00000 0x0 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x40f00000 0x20000>;
+
+ phy_gmii_sel: phy@4040 {
+ compatible = "ti,am654-phy-gmii-sel";
+ reg = <0x4040 0x4>;
+ #phy-cells = <1>;
+ };
+ };
+
wkup_pmx0: pinmux@4301c000 {
compatible = "pinctrl-single";
/* Proxy 0 addressing */
@@ -203,6 +217,9 @@
assigned-clocks = <&k3_clks 0 3>;
assigned-clock-rates = <60000000>;
clock-names = "adc_tsc_fck";
+ dmas = <&main_udmap 0x7400>,
+ <&main_udmap 0x7401>;
+ dma-names = "fifo0", "fifo1";
adc {
#io-channel-cells = <1>;
@@ -219,6 +236,9 @@
assigned-clocks = <&k3_clks 1 3>;
assigned-clock-rates = <60000000>;
clock-names = "adc_tsc_fck";
+ dmas = <&main_udmap 0x7402>,
+ <&main_udmap 0x7403>;
+ dma-names = "fifo0", "fifo1";
adc {
#io-channel-cells = <1>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index a6c0d02d9928..24e534d85045 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -91,6 +91,7 @@ CONFIG_ARM_QCOM_CPUFREQ_NVMEM=y
CONFIG_ARM_QCOM_CPUFREQ_HW=y
CONFIG_ARM_RASPBERRYPI_CPUFREQ=m
CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_QORIQ_CPUFREQ=y
CONFIG_ARM_SCPI_PROTOCOL=y
CONFIG_RASPBERRYPI_FIRMWARE=y
CONFIG_INTEL_STRATIX10_SERVICE=y
@@ -155,6 +156,7 @@ CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_BRIDGE=m
CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_NET_DSA=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y
@@ -162,6 +164,10 @@ CONFIG_QRTR=m
CONFIG_QRTR_SMD=m
CONFIG_QRTR_TUN=m
CONFIG_BPF_JIT=y
+CONFIG_CAN=m
+CONFIG_CAN_RCAR=m
+CONFIG_CAN_RCAR_CANFD=m
+CONFIG_CAN_FLEXCAN=m
CONFIG_BT=m
CONFIG_BT_HIDP=m
# CONFIG_BT_HS is not set
@@ -196,6 +202,7 @@ CONFIG_PCI_HOST_THUNDER_ECAM=y
CONFIG_PCIE_ROCKCHIP_HOST=m
CONFIG_PCIE_BRCMSTB=m
CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCIE_LAYERSCAPE_GEN4=y
CONFIG_PCI_HISI=y
CONFIG_PCIE_QCOM=y
CONFIG_PCIE_ARMADA_8K=y
@@ -208,11 +215,22 @@ CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_HISILICON_LPC=y
CONFIG_SIMPLE_PM_BUS=y
+CONFIG_FSL_MC_BUS=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_SST25L=y
CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_DENALI_DT=y
CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_NAND_QCOM=y
CONFIG_MTD_SPI_NOR=y
CONFIG_SPI_CADENCE_QUADSPI=y
@@ -221,12 +239,14 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_NVME=m
CONFIG_SRAM=y
+CONFIG_EEPROM_AT24=m
CONFIG_EEPROM_AT25=m
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SAS_ATA=y
CONFIG_SCSI_HISI_SAS=y
CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_MEGARAID_SAS=y
CONFIG_SCSI_MPT3SAS=m
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
@@ -254,6 +274,7 @@ CONFIG_MACVTAP=m
CONFIG_TUN=y
CONFIG_VETH=m
CONFIG_VIRTIO_NET=y
+CONFIG_NET_DSA_MSCC_FELIX=m
CONFIG_AMD_XGBE=y
CONFIG_NET_XGENE=y
CONFIG_ATL1C=m
@@ -262,12 +283,18 @@ CONFIG_BNX2X=m
CONFIG_MACB=y
CONFIG_THUNDER_NIC_PF=y
CONFIG_FEC=y
+CONFIG_FSL_FMAN=y
+CONFIG_FSL_DPAA_ETH=y
+CONFIG_FSL_DPAA2_ETH=y
+CONFIG_FSL_ENETC=y
+CONFIG_FSL_ENETC_VF=y
CONFIG_HIX5HD2_GMAC=y
CONFIG_HNS_DSAF=y
CONFIG_HNS_ENET=y
CONFIG_HNS3=y
CONFIG_HNS3_HCLGE=y
CONFIG_HNS3_ENET=y
+CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IGBVF=y
@@ -286,13 +313,17 @@ CONFIG_SNI_NETSEC=y
CONFIG_STMMAC_ETH=m
CONFIG_TI_K3_AM65_CPSW_NUSS=y
CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_MDIO_BUS_MUX_MULTIPLEXER=y
+CONFIG_AQUANTIA_PHY=y
CONFIG_MARVELL_PHY=m
CONFIG_MARVELL_10G_PHY=m
CONFIG_MESON_GXL_PHY=m
CONFIG_MICREL_PHY=y
+CONFIG_MICROSEMI_PHY=y
CONFIG_AT803X_PHY=y
CONFIG_REALTEK_PHY=m
CONFIG_ROCKCHIP_PHY=y
+CONFIG_VITESSE_PHY=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
CONFIG_USB_RTL8152=m
@@ -322,6 +353,7 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_PM8XXX_VIBRATOR=m
CONFIG_INPUT_HISI_POWERKEY=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
@@ -390,8 +422,11 @@ CONFIG_SPI=y
CONFIG_SPI_ARMADA_3700=y
CONFIG_SPI_BCM2835=m
CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_FSL_LPSPI=y
+CONFIG_SPI_FSL_QUADSPI=y
CONFIG_SPI_NXP_FLEXSPI=y
CONFIG_SPI_IMX=m
+CONFIG_SPI_FSL_DSPI=y
CONFIG_SPI_MESON_SPICC=m
CONFIG_SPI_MESON_SPIFC=m
CONFIG_SPI_ORION=y
@@ -401,6 +436,7 @@ CONFIG_SPI_QCOM_QSPI=m
CONFIG_SPI_QUP=y
CONFIG_SPI_QCOM_GENI=m
CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SH_MSIOF=m
CONFIG_SPI_SUN6I=y
CONFIG_SPI_SPIDEV=m
CONFIG_SPMI=y
@@ -411,9 +447,11 @@ CONFIG_PINCTRL_S700=y
CONFIG_PINCTRL_S900=y
CONFIG_PINCTRL_IMX8MM=y
CONFIG_PINCTRL_IMX8MN=y
+CONFIG_PINCTRL_IMX8MP=y
CONFIG_PINCTRL_IMX8MQ=y
CONFIG_PINCTRL_IMX8QXP=y
CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_IPQ6018=y
CONFIG_PINCTRL_MSM8916=y
CONFIG_PINCTRL_MSM8994=y
CONFIG_PINCTRL_MSM8996=y
@@ -426,14 +464,17 @@ CONFIG_PINCTRL_SM8150=y
CONFIG_GPIO_ALTERA=m
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_MPC8XXX=y
CONFIG_GPIO_PL061=y
CONFIG_GPIO_RCAR=y
CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_WCD934X=m
CONFIG_GPIO_XGENE=y
CONFIG_GPIO_XGENE_SB=y
CONFIG_GPIO_MAX732X=y
CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_BD9571MWV=m
CONFIG_GPIO_MAX77620=y
CONFIG_POWER_AVS=y
CONFIG_QCOM_CPR=y
@@ -455,6 +496,8 @@ CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_EMULATION=y
CONFIG_QORIQ_THERMAL=m
CONFIG_SUN8I_THERMAL=y
+CONFIG_IMX_SC_THERMAL=m
+CONFIG_IMX8MM_THERMAL=m
CONFIG_ROCKCHIP_THERMAL=m
CONFIG_RCAR_THERMAL=y
CONFIG_RCAR_GEN3_THERMAL=y
@@ -469,14 +512,15 @@ CONFIG_QCOM_SPMI_TEMP_ALARM=m
CONFIG_UNIPHIER_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_ARM_SP805_WATCHDOG=y
+CONFIG_ARM_SBSA_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=m
CONFIG_IMX2_WDT=y
CONFIG_IMX_SC_WDT=m
+CONFIG_QCOM_WDT=m
CONFIG_MESON_GXBB_WATCHDOG=m
CONFIG_MESON_WATCHDOG=m
-CONFIG_QCOM_WDT=m
CONFIG_RENESAS_WDT=y
CONFIG_UNIPHIER_WATCHDOG=y
CONFIG_BCM2835_WDT=y
@@ -492,6 +536,7 @@ CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_RK808=y
CONFIG_MFD_SEC_CORE=y
CONFIG_MFD_ROHM_BD718XX=y
+CONFIG_MFD_WCD934X=m
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_AXP20X=y
CONFIG_REGULATOR_BD718XX=y
@@ -519,21 +564,28 @@ CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_SDR_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
# CONFIG_DVB_NET is not set
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_RCAR_CSI2=m
+CONFIG_VIDEO_RCAR_VIN=m
CONFIG_VIDEO_SUN6I_CSI=m
CONFIG_V4L_MEM2MEM_DRIVERS=y
CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FDP1=m
CONFIG_VIDEO_RENESAS_FCP=m
CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_SDR_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_RCAR_DRIF=m
CONFIG_DRM=m
CONFIG_DRM_I2C_NXP_TDA998X=m
+CONFIG_DRM_MALI_DISPLAY=m
CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_EXYNOS=m
CONFIG_DRM_EXYNOS5433_DECON=y
@@ -549,16 +601,23 @@ CONFIG_ROCKCHIP_DW_HDMI=y
CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_DW_HDMI=m
CONFIG_DRM_SUN4I=m
CONFIG_DRM_SUN6I_DSI=m
CONFIG_DRM_SUN8I_DW_HDMI=m
CONFIG_DRM_SUN8I_MIXER=m
CONFIG_DRM_MSM=m
CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_LVDS=m
CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
CONFIG_DRM_SII902X=m
+CONFIG_DRM_THINE_THC63LVD1024=m
CONFIG_DRM_TI_SN65DSI86=m
CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
+CONFIG_DRM_DW_HDMI_CEC=m
CONFIG_DRM_VC4=m
CONFIG_DRM_ETNAVIV=m
CONFIG_DRM_HISI_HIBMC=m
@@ -583,6 +642,7 @@ CONFIG_SND_HDA_CODEC_HDMI=m
CONFIG_SND_SOC=y
CONFIG_SND_BCM2835_SOC_I2S=m
CONFIG_SND_MESON_AXG_SOUND_CARD=m
+CONFIG_SND_SOC_SDM845=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_RT5645=m
@@ -595,11 +655,13 @@ CONFIG_SND_SOC_ES7134=m
CONFIG_SND_SOC_ES7241=m
CONFIG_SND_SOC_PCM3168A_I2C=m
CONFIG_SND_SOC_TAS571X=m
+CONFIG_SND_SOC_WCD934X=m
+CONFIG_SND_SOC_WSA881X=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_SND_AUDIO_GRAPH_CARD=m
CONFIG_I2C_HID=m
-CONFIG_USB=y
CONFIG_USB_CONN_GPIO=m
+CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_TEGRA=y
@@ -609,6 +671,7 @@ CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_EXYNOS=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS_HCD=m
CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
CONFIG_USB_MUSB_HDRC=y
@@ -621,7 +684,6 @@ CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_ISP1760=y
CONFIG_USB_HSIC_USB3503=y
CONFIG_NOP_USB_XCEIV=y
-CONFIG_USB_ULPI=y
CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_RENESAS_USB3=m
@@ -669,11 +731,14 @@ CONFIG_LEDS_TRIGGER_PANIC=y
CONFIG_EDAC=y
CONFIG_EDAC_GHES=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_PCF85363=m
CONFIG_RTC_DRV_RX8581=m
CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_PCF2127=m
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_CROS_EC=y
CONFIG_RTC_DRV_S3C=y
@@ -716,8 +781,8 @@ CONFIG_COMMON_CLK_RK808=y
CONFIG_COMMON_CLK_SCPI=y
CONFIG_COMMON_CLK_CS2000_CP=y
CONFIG_COMMON_CLK_S2MPS11=y
-CONFIG_CLK_QORIQ=y
CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_VC5=y
CONFIG_CLK_RASPBERRYPI=m
CONFIG_CLK_IMX8MM=y
CONFIG_CLK_IMX8MN=y
@@ -731,12 +796,15 @@ CONFIG_QCOM_CLK_APCS_MSM8916=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_QCOM_CLK_RPMH=y
CONFIG_IPQ_GCC_8074=y
+CONFIG_IPQ_GCC_6018=y
CONFIG_MSM_GCC_8916=y
CONFIG_MSM_GCC_8994=y
CONFIG_MSM_MMCC_8996=y
CONFIG_MSM_GCC_8998=y
CONFIG_QCS_GCC_404=y
CONFIG_SDM_GCC_845=y
+CONFIG_SDM_GPUCC_845=y
+CONFIG_SDM_DISPCC_845=y
CONFIG_SM_GCC_8150=y
CONFIG_QCOM_HFPLL=y
CONFIG_HWSPINLOCK=y
@@ -758,11 +826,14 @@ CONFIG_QCOM_SYSMON=m
CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=m
CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_SOUNDWIRE=m
+CONFIG_SOUNDWIRE_QCOM=m
CONFIG_OWL_PM_DOMAINS=y
CONFIG_RASPBERRYPI_POWER=y
+CONFIG_FSL_DPAA=y
+CONFIG_FSL_MC_DPIO=y
CONFIG_IMX_SCU_SOC=y
CONFIG_QCOM_AOSS_QMP=y
-CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_GENI_SE=y
CONFIG_QCOM_GLINK_SSR=m
CONFIG_QCOM_RMTFS_MEM=m
@@ -773,6 +844,7 @@ CONFIG_QCOM_SMD_RPM=y
CONFIG_QCOM_SMP2P=y
CONFIG_QCOM_SMSM=y
CONFIG_QCOM_SOCINFO=m
+CONFIG_QCOM_APR=m
CONFIG_ARCH_R8A774A1=y
CONFIG_ARCH_R8A774B1=y
CONFIG_ARCH_R8A774C0=y
@@ -784,7 +856,6 @@ CONFIG_ARCH_R8A77970=y
CONFIG_ARCH_R8A77980=y
CONFIG_ARCH_R8A77990=y
CONFIG_ARCH_R8A77995=y
-CONFIG_QCOM_PDC=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
@@ -795,9 +866,9 @@ CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
-CONFIG_MEMORY=y
CONFIG_IIO=y
CONFIG_EXYNOS_ADC=y
+CONFIG_MAX9611=m
CONFIG_QCOM_SPMI_ADC5=m
CONFIG_ROCKCHIP_SARADC=m
CONFIG_IIO_CROS_EC_SENSORS_CORE=m
@@ -815,6 +886,7 @@ CONFIG_PWM_ROCKCHIP=y
CONFIG_PWM_SAMSUNG=y
CONFIG_PWM_SUN4I=m
CONFIG_PWM_TEGRA=m
+CONFIG_QCOM_PDC=y
CONFIG_RESET_QCOM_AOSS=y
CONFIG_RESET_QCOM_PDC=m
CONFIG_RESET_TI_SCI=y
@@ -858,6 +930,10 @@ CONFIG_FPGA_REGION=m
CONFIG_OF_FPGA_REGION=m
CONFIG_TEE=y
CONFIG_OPTEE=y
+CONFIG_SLIMBUS=m
+CONFIG_SLIM_QCOM_CTRL=m
+CONFIG_SLIM_QCOM_NGD_CTRL=m
+CONFIG_MUX_MMIO=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -886,16 +962,18 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_DEV_SUN8I_CE=m
CONFIG_CRYPTO_DEV_FSL_CAAM=m
-CONFIG_CRYPTO_DEV_HISI_ZIP=m
-CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=m
CONFIG_CRYPTO_DEV_QCOM_RNG=m
+CONFIG_CRYPTO_DEV_CCREE=m
+CONFIG_CRYPTO_DEV_HISI_ZIP=m
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arm64/crypto/.gitignore b/arch/arm64/crypto/.gitignore
index 879df8781ed5..fcf2d731e6c1 100644
--- a/arch/arm64/crypto/.gitignore
+++ b/arch/arm64/crypto/.gitignore
@@ -1,2 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
sha256-core.S
sha512-core.S
+poly1305-core.S
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index e3e27349a9fe..fb507d569922 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -151,6 +151,7 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
kernel_neon_begin();
aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
kernel_neon_end();
+ memzero_explicit(&rk, sizeof(rk));
return 0;
}
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 63c875d3314b..565ef604ca04 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -91,12 +91,32 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
return sha1_base_finish(desc, out);
}
+static int sha1_ce_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, &sctx->sst, sizeof(struct sha1_state));
+ return 0;
+}
+
+static int sha1_ce_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(&sctx->sst, in, sizeof(struct sha1_state));
+ sctx->finalize = 0;
+ return 0;
+}
+
static struct shash_alg alg = {
.init = sha1_base_init,
.update = sha1_ce_update,
.final = sha1_ce_final,
.finup = sha1_ce_finup,
+ .import = sha1_ce_import,
+ .export = sha1_ce_export,
.descsize = sizeof(struct sha1_ce_state),
+ .statesize = sizeof(struct sha1_state),
.digestsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "sha1",
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index a8e67bafba3d..9450d19b9e6e 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -109,12 +109,32 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
return sha256_base_finish(desc, out);
}
+static int sha256_ce_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, &sctx->sst, sizeof(struct sha256_state));
+ return 0;
+}
+
+static int sha256_ce_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(&sctx->sst, in, sizeof(struct sha256_state));
+ sctx->finalize = 0;
+ return 0;
+}
+
static struct shash_alg algs[] = { {
.init = sha224_base_init,
.update = sha256_ce_update,
.final = sha256_ce_final,
.finup = sha256_ce_finup,
+ .export = sha256_ce_export,
+ .import = sha256_ce_import,
.descsize = sizeof(struct sha256_ce_state),
+ .statesize = sizeof(struct sha256_state),
.digestsize = SHA224_DIGEST_SIZE,
.base = {
.cra_name = "sha224",
@@ -128,7 +148,10 @@ static struct shash_alg algs[] = { {
.update = sha256_ce_update,
.final = sha256_ce_final,
.finup = sha256_ce_finup,
+ .export = sha256_ce_export,
+ .import = sha256_ce_import,
.descsize = sizeof(struct sha256_ce_state),
+ .statesize = sizeof(struct sha256_state),
.digestsize = SHA256_DIGEST_SIZE,
.base = {
.cra_name = "sha256",
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index d3077c991962..ff9cbb631212 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,26 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += bugs.h
-generic-y += delay.h
-generic-y += div64.h
-generic-y += dma.h
-generic-y += dma-mapping.h
generic-y += early_ioremap.h
-generic-y += emergency-restart.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += serial.h
generic-y += set_memory.h
-generic-y += switch_to.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
generic-y += user.h
-generic-y += vga.h
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 3fe02da70004..fc1594a0710e 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -4,6 +4,8 @@
#ifdef CONFIG_ARCH_RANDOM
+#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/random.h>
#include <asm/cpufeature.h>
@@ -66,6 +68,18 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
}
+static inline bool __init __must_check
+arch_get_random_seed_long_early(unsigned long *v)
+{
+ WARN_ON(system_state != SYSTEM_BOOTING);
+
+ if (!__early_cpu_has_rndr())
+ return false;
+
+ return __arm64_rndr(v);
+}
+#define arch_get_random_seed_long_early arch_get_random_seed_long_early
+
#else
static inline bool __arm64_rndr(unsigned long *v) { return false; }
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f658dda12364..a30b4eec7cb4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -89,7 +89,8 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 &= ~HCR_TWE;
- if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count))
+ if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
+ vcpu->kvm->arch.vgic.nassgireq)
vcpu->arch.hcr_el2 &= ~HCR_TWI;
else
vcpu->arch.hcr_el2 |= HCR_TWI;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 57fd46acd058..32c8a675e5a4 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
+#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
diff --git a/arch/arm64/kernel/.gitignore b/arch/arm64/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/arm64/kernel/.gitignore
+++ b/arch/arm64/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
index f8b69d84238e..652e31d82582 100644
--- a/arch/arm64/kernel/vdso/.gitignore
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
diff --git a/arch/arm64/kernel/vdso32/.gitignore b/arch/arm64/kernel/vdso32/.gitignore
index 4fea950fa5ed..3542fa24e26b 100644
--- a/arch/arm64/kernel/vdso32/.gitignore
+++ b/arch/arm64/kernel/vdso32/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
vdso.so.raw
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 525010504f9d..e329a36b2bee 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -11,7 +11,6 @@
#include <linux/kvm_host.h>
#include <asm/fpsimd.h>
#include <asm/kvm_asm.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2bd92301d32f..23ebe51410f0 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -25,7 +25,6 @@
#include <asm/kvm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
-#include <asm/kvm_host.h>
#include <asm/sigcontext.h>
#include "trace.h"
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index eaa05c3c7235..8a1e81a400e0 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -17,7 +17,6 @@
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/fpsimd.h>
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 090f46d3add1..51db934702b6 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -22,7 +22,6 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/perf_event.h>
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 2b4a3e2d1b89..9cb6b4c8355a 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -12,7 +12,6 @@
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/sysreg.h>
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 85566d32958f..1027851d469a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -446,7 +446,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct mm_struct *mm = current->mm;
vm_fault_t fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
- unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int mm_flags = FAULT_FLAG_DEFAULT;
if (kprobe_page_fault(regs, esr))
return 0;
@@ -513,25 +513,15 @@ retry:
fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
major |= fault & VM_FAULT_MAJOR;
- if (fault & VM_FAULT_RETRY) {
- /*
- * If we need to retry but a fatal signal is pending,
- * handle the signal first. We do not need to release
- * the mmap_sem because it would already be released
- * in __lock_page_or_retry in mm/filemap.c.
- */
- if (fatal_signal_pending(current)) {
- if (!user_mode(regs))
- goto no_context;
- return 0;
- }
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return 0;
+ }
- /*
- * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
- * starvation.
- */
+ if (fault & VM_FAULT_RETRY) {
if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
- mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index a036e05fc3c6..a4ef93a1f7ae 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -1,42 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += atomic.h
-generic-y += barrier.h
-generic-y += bugs.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += fb.h
-generic-y += futex.h
-generic-y += hw_irq.h
-generic-y += io.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += mmu.h
-generic-y += mmu_context.h
-generic-y += pci.h
-generic-y += percpu.h
-generic-y += pgalloc.h
-generic-y += preempt.h
-generic-y += serial.h
-generic-y += shmparam.h
-generic-y += tlbflush.h
-generic-y += topology.h
-generic-y += trace_clock.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/c6x/include/asm/unaligned.h b/arch/c6x/include/asm/unaligned.h
index b56ba7110f5a..d628cc170564 100644
--- a/arch/c6x/include/asm/unaligned.h
+++ b/arch/c6x/include/asm/unaligned.h
@@ -10,6 +10,7 @@
#define _ASM_C6X_UNALIGNED_H
#include <linux/swab.h>
+#include <linux/unaligned/generic.h>
/*
* The C64x+ can do unaligned word and dword accesses in hardware
@@ -100,68 +101,4 @@ static inline void put_unaligned64(u64 val, const void *p)
#endif
-/*
- * Cause a link-time error if we try an unaligned access other than
- * 1,2,4 or 8 bytes long
- */
-extern int __bad_unaligned_access_size(void);
-
-#define __get_unaligned_le(ptr) (typeof(*(ptr)))({ \
- sizeof(*(ptr)) == 1 ? *(ptr) : \
- (sizeof(*(ptr)) == 2 ? get_unaligned_le16((ptr)) : \
- (sizeof(*(ptr)) == 4 ? get_unaligned_le32((ptr)) : \
- (sizeof(*(ptr)) == 8 ? get_unaligned_le64((ptr)) : \
- __bad_unaligned_access_size()))); \
- })
-
-#define __get_unaligned_be(ptr) (__force typeof(*(ptr)))({ \
- sizeof(*(ptr)) == 1 ? *(ptr) : \
- (sizeof(*(ptr)) == 2 ? get_unaligned_be16((ptr)) : \
- (sizeof(*(ptr)) == 4 ? get_unaligned_be32((ptr)) : \
- (sizeof(*(ptr)) == 8 ? get_unaligned_be64((ptr)) : \
- __bad_unaligned_access_size()))); \
- })
-
-#define __put_unaligned_le(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_le16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_le32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_le64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
-#define __put_unaligned_be(val, ptr) ({ \
- void *__gu_p = (ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- *(u8 *)__gu_p = (__force u8)(val); \
- break; \
- case 2: \
- put_unaligned_be16((__force u16)(val), __gu_p); \
- break; \
- case 4: \
- put_unaligned_be32((__force u32)(val), __gu_p); \
- break; \
- case 8: \
- put_unaligned_be64((__force u64)(val), __gu_p); \
- break; \
- default: \
- __bad_unaligned_access_size(); \
- break; \
- } \
- (void)0; })
-
#endif /* _ASM_C6X_UNALIGNED_H */
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 047427f71d83..94545d50d40f 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -3,6 +3,7 @@ config CSKY
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_DMA_PREP_COHERENT
+ select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
@@ -38,16 +39,22 @@ config CSKY
select HAVE_ARCH_AUDITSYSCALL
select HAVE_COPY_THREAD_TLS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
+ select HAVE_KPROBES if !CPU_CK610
+ select HAVE_KPROBES_ON_FTRACE if !CPU_CK610
+ select HAVE_KRETPROBES if !CPU_CK610
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_DMA_CONTIGUOUS
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select MAY_HAVE_SPARSE_IRQ
@@ -65,6 +72,12 @@ config CSKY
select PCI_SYSCALL if PCI
select PCI_MSI if PCI
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config ARCH_SUPPORTS_UPROBES
+ def_bool y if !CPU_CK610
+
config CPU_HAS_CACHEV2
bool
diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
index f35a9f3315ee..5056ebb902d1 100644
--- a/arch/csky/abiv1/inc/abi/entry.h
+++ b/arch/csky/abiv1/inc/abi/entry.h
@@ -172,10 +172,7 @@
addi r6, 0xe
cpwcr r6, cpcr30
- lsri r6, 28
- addi r6, 2
- lsli r6, 28
- addi r6, 0xe
+ movi r6, 0
cpwcr r6, cpcr31
.endm
diff --git a/arch/csky/abiv2/fpu.c b/arch/csky/abiv2/fpu.c
index 86d187d4e5af..5acc5c2e544e 100644
--- a/arch/csky/abiv2/fpu.c
+++ b/arch/csky/abiv2/fpu.c
@@ -10,11 +10,6 @@
#define MTCR_DIST 0xC0006420
#define MFCR_DIST 0xC0006020
-void __init init_fpu(void)
-{
- mtcr("cr<1, 2>", 0);
-}
-
/*
* fpu_libc_helper() is to help libc to excute:
* - mfcr %a, cr<1, 2>
diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
index 94a7a58765df..a99aff555a0a 100644
--- a/arch/csky/abiv2/inc/abi/entry.h
+++ b/arch/csky/abiv2/inc/abi/entry.h
@@ -100,6 +100,66 @@
rte
.endm
+.macro SAVE_REGS_FTRACE
+ subi sp, 152
+ stw tls, (sp, 0)
+ stw lr, (sp, 4)
+
+ mfcr lr, psr
+ stw lr, (sp, 12)
+
+ addi lr, sp, 152
+ stw lr, (sp, 16)
+
+ stw a0, (sp, 20)
+ stw a0, (sp, 24)
+ stw a1, (sp, 28)
+ stw a2, (sp, 32)
+ stw a3, (sp, 36)
+
+ addi sp, 40
+ stm r4-r13, (sp)
+
+ addi sp, 40
+ stm r16-r30, (sp)
+#ifdef CONFIG_CPU_HAS_HILO
+ mfhi lr
+ stw lr, (sp, 60)
+ mflo lr
+ stw lr, (sp, 64)
+ mfcr lr, cr14
+ stw lr, (sp, 68)
+#endif
+ subi sp, 80
+.endm
+
+.macro RESTORE_REGS_FTRACE
+ ldw tls, (sp, 0)
+ ldw a0, (sp, 16)
+ mtcr a0, ss0
+
+#ifdef CONFIG_CPU_HAS_HILO
+ ldw a0, (sp, 140)
+ mthi a0
+ ldw a0, (sp, 144)
+ mtlo a0
+ ldw a0, (sp, 148)
+ mtcr a0, cr14
+#endif
+
+ ldw a0, (sp, 24)
+ ldw a1, (sp, 28)
+ ldw a2, (sp, 32)
+ ldw a3, (sp, 36)
+
+ addi sp, 40
+ ldm r4-r13, (sp)
+ addi sp, 40
+ ldm r16-r30, (sp)
+ addi sp, 72
+ mfcr sp, ss0
+.endm
+
.macro SAVE_SWITCH_STACK
subi sp, 64
stm r4-r11, (sp)
@@ -230,11 +290,8 @@
addi r6, 0x1ce
mtcr r6, cr<30, 15> /* Set MSA0 */
- lsri r6, 28
- addi r6, 2
- lsli r6, 28
- addi r6, 0x1ce
- mtcr r6, cr<31, 15> /* Set MSA1 */
+ movi r6, 0
+ mtcr r6, cr<31, 15> /* Clr MSA1 */
/* enable MMU */
mfcr r6, cr18
diff --git a/arch/csky/abiv2/inc/abi/fpu.h b/arch/csky/abiv2/inc/abi/fpu.h
index 22ca3cf2794a..09e2700a3693 100644
--- a/arch/csky/abiv2/inc/abi/fpu.h
+++ b/arch/csky/abiv2/inc/abi/fpu.h
@@ -9,7 +9,8 @@
int fpu_libc_helper(struct pt_regs *regs);
void fpu_fpe(struct pt_regs *regs);
-void __init init_fpu(void);
+
+static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); }
void save_to_user_fp(struct user_fp *user_fp);
void restore_from_user_fp(struct user_fp *user_fp);
diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S
index 326402e65f9e..9331c7ed5958 100644
--- a/arch/csky/abiv2/mcount.S
+++ b/arch/csky/abiv2/mcount.S
@@ -3,6 +3,8 @@
#include <linux/linkage.h>
#include <asm/ftrace.h>
+#include <abi/entry.h>
+#include <asm/asm-offsets.h>
/*
* csky-gcc with -pg will put the following asm after prologue:
@@ -44,6 +46,22 @@
jmp t1
.endm
+.macro mcount_enter_regs
+ subi sp, 8
+ stw lr, (sp, 0)
+ stw r8, (sp, 4)
+ SAVE_REGS_FTRACE
+.endm
+
+.macro mcount_exit_regs
+ RESTORE_REGS_FTRACE
+ ldw t1, (sp, 0)
+ ldw r8, (sp, 4)
+ ldw lr, (sp, 8)
+ addi sp, 12
+ jmp t1
+.endm
+
.macro save_return_regs
subi sp, 16
stw a0, (sp, 0)
@@ -122,6 +140,8 @@ ENTRY(ftrace_caller)
ldw a0, (sp, 16)
subi a0, 4
ldw a1, (sp, 24)
+ lrw a2, function_trace_op
+ ldw a2, (a2, 0)
nop
GLOBAL(ftrace_call)
@@ -157,3 +177,31 @@ ENTRY(return_to_handler)
jmp lr
END(return_to_handler)
#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ENTRY(ftrace_regs_caller)
+ mcount_enter_regs
+
+ lrw t1, PT_FRAME_SIZE
+ add t1, sp
+
+ ldw a0, (t1, 0)
+ subi a0, 4
+ ldw a1, (t1, 8)
+ lrw a2, function_trace_op
+ ldw a2, (a2, 0)
+ mov a3, sp
+
+ nop
+GLOBAL(ftrace_regs_call)
+ nop32_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ nop
+GLOBAL(ftrace_graph_regs_call)
+ nop32_stub
+#endif
+
+ mcount_exit_regs
+ENDPROC(ftrace_regs_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 4130e3eaa766..93372255984d 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -1,44 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
-generic-y += bugs.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += delay.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
-generic-y += fb.h
-generic-y += futex.h
generic-y += gpio.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += linkage.h
-generic-y += local.h
generic-y += local64.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += module.h
-generic-y += percpu.h
-generic-y += preempt.h
generic-y += qrwlock.h
-generic-y += sections.h
-generic-y += serial.h
-generic-y += timex.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
generic-y += user.h
-generic-y += vga.h
generic-y += vmlinux.lds.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h
index ba35d93ecda2..fae72b0b1374 100644
--- a/arch/csky/include/asm/ftrace.h
+++ b/arch/csky/include/asm/ftrace.h
@@ -10,6 +10,8 @@
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+
#define MCOUNT_ADDR ((unsigned long)_mcount)
#ifndef __ASSEMBLY__
diff --git a/arch/csky/include/asm/kprobes.h b/arch/csky/include/asm/kprobes.h
new file mode 100644
index 000000000000..b647bbde4d6d
--- /dev/null
+++ b/arch/csky/include/asm/kprobes.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_KPROBES_H
+#define __ASM_CSKY_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 1
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* Single step context for kprobe */
+struct kprobe_step_ctx {
+ unsigned long ss_pending;
+ unsigned long match_addr;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_sr;
+ struct prev_kprobe prev_kprobe;
+ struct kprobe_step_ctx ss_ctx;
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
+int kprobe_breakpoint_handler(struct pt_regs *regs);
+int kprobe_single_step_handler(struct pt_regs *regs);
+void kretprobe_trampoline(void);
+void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
+
+#endif /* CONFIG_KPROBES */
+#endif /* __ASM_CSKY_KPROBES_H */
diff --git a/arch/csky/include/asm/probes.h b/arch/csky/include/asm/probes.h
new file mode 100644
index 000000000000..5e526334e6f9
--- /dev/null
+++ b/arch/csky/include/asm/probes.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_PROBES_H
+#define __ASM_CSKY_PROBES_H
+
+typedef u32 probe_opcode_t;
+typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_probe_insn {
+ probe_opcode_t *insn;
+ probes_handler_t *handler;
+ /* restore address after simulation */
+ unsigned long restore;
+};
+
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+ struct arch_probe_insn api;
+};
+#endif
+
+#endif /* __ASM_CSKY_PROBES_H */
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 21e0bd5293dd..c6bcd7f7c720 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -43,6 +43,7 @@ extern struct cpuinfo_csky cpu_data[];
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long sr; /* saved status register */
+ unsigned long trap_no; /* saved status register */
/* FPU regs */
struct user_fp __aligned(16) user_fp;
diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h
index d0aba7b32417..aae5aa96cf54 100644
--- a/arch/csky/include/asm/ptrace.h
+++ b/arch/csky/include/asm/ptrace.h
@@ -7,11 +7,14 @@
#include <uapi/asm/ptrace.h>
#include <asm/traps.h>
#include <linux/types.h>
+#include <linux/compiler.h>
#ifndef __ASSEMBLY__
#define PS_S 0x80000000 /* Supervisor Mode */
+#define USR_BKPT 0x1464
+
#define arch_has_single_step() (1)
#define current_pt_regs() \
({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
@@ -22,6 +25,18 @@
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->pc = val;
+}
+
+#if defined(__CSKYABIV2__)
+#define MAX_REG_OFFSET offsetof(struct pt_regs, dcsr)
+#else
+#define MAX_REG_OFFSET offsetof(struct pt_regs, regs[9])
+#endif
+
static inline bool in_syscall(struct pt_regs const *regs)
{
return ((regs->sr >> 16) & 0xff) == VEC_TRAP0;
@@ -37,5 +52,33 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->a0;
}
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->usp;
+}
+
+extern int regs_query_register_offset(const char *name);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+/*
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CSKY_PTRACE_H */
diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h
index 0b546a55a8bf..442fedad0260 100644
--- a/arch/csky/include/asm/thread_info.h
+++ b/arch/csky/include/asm/thread_info.h
@@ -57,6 +57,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing */
+#define TIF_UPROBE 6 /* uprobe breakpoint or singlestep */
#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
@@ -68,6 +69,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
diff --git a/arch/csky/include/asm/uprobes.h b/arch/csky/include/asm/uprobes.h
new file mode 100644
index 000000000000..600388eb93c6
--- /dev/null
+++ b/arch/csky/include/asm/uprobes.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_UPROBES_H
+#define __ASM_CSKY_UPROBES_H
+
+#include <asm/probes.h>
+
+#define MAX_UINSN_BYTES 4
+
+#define UPROBE_SWBP_INSN USR_BKPT
+#define UPROBE_SWBP_INSN_SIZE 2
+#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+ unsigned long saved_trap_no;
+};
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+ struct arch_probe_insn api;
+ unsigned long insn_size;
+ bool simulate;
+};
+
+int uprobe_breakpoint_handler(struct pt_regs *regs);
+int uprobe_single_step_handler(struct pt_regs *regs);
+
+#endif /* __ASM_CSKY_UPROBES_H */
diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile
index 071d659f37b7..fd6d9dc8b7f3 100644
--- a/arch/csky/kernel/Makefile
+++ b/arch/csky/kernel/Makefile
@@ -4,6 +4,7 @@ extra-y := head.o vmlinux.lds
obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o
obj-y += power.o syscall.o syscall_table.o setup.o
obj-y += process.o cpu-probe.o ptrace.o dumpstack.o
+obj-y += probes/
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c
index 9b48b1b1a61b..f8be348df9e4 100644
--- a/arch/csky/kernel/asm-offsets.c
+++ b/arch/csky/kernel/asm-offsets.c
@@ -72,6 +72,7 @@ int main(void)
DEFINE(PT_RLO, offsetof(struct pt_regs, rlo));
#endif
DEFINE(PT_USP, offsetof(struct pt_regs, usp));
+ DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs));
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t,
diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
index 007706328000..364819536f2e 100644
--- a/arch/csky/kernel/entry.S
+++ b/arch/csky/kernel/entry.S
@@ -128,7 +128,10 @@ tlbop_end 1
ENTRY(csky_systemcall)
SAVE_ALL TRAP0_SIZE
zero_fp
-
+#ifdef CONFIG_RSEQ_DEBUG
+ mov a0, sp
+ jbsr rseq_syscall
+#endif
psrset ee, ie
lrw r11, __NR_syscalls
@@ -218,10 +221,17 @@ ret_from_exception:
andn r9, r10
ldw r12, (r9, TINFO_FLAGS)
- andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
+ andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | _TIF_UPROBE)
cmpnei r12, 0
bt exit_work
1:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ ld r10, (sp, LSAVE_PSR)
+ btsti r10, 6
+ bf 2f
+ jbsr trace_hardirqs_on
+2:
+#endif
RESTORE_ALL
exit_work:
@@ -277,6 +287,10 @@ ENTRY(csky_irq)
zero_fp
psrset ee
+#ifdef CONFIG_TRACE_IRQFLAGS
+ jbsr trace_hardirqs_off
+#endif
+
#ifdef CONFIG_PREEMPTION
mov r9, sp /* Get current stack pointer */
bmaski r10, THREAD_SHIFT
diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c
index 44f4880179b7..44628e3f7fa6 100644
--- a/arch/csky/kernel/ftrace.c
+++ b/arch/csky/kernel/ftrace.c
@@ -3,6 +3,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -126,6 +127,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = ftrace_modify_code((unsigned long)&ftrace_call,
(unsigned long)func, true, true);
+ if (!ret)
+ ret = ftrace_modify_code((unsigned long)&ftrace_regs_call,
+ (unsigned long)func, true, true);
return ret;
}
@@ -135,6 +139,14 @@ int __init ftrace_dyn_arch_init(void)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return ftrace_modify_code(rec->ip, addr, true, true);
+}
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
@@ -190,5 +202,35 @@ int ftrace_disable_ftrace_graph_caller(void)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#ifndef CONFIG_CPU_HAS_ICACHE_INS
+struct ftrace_modify_param {
+ int command;
+ atomic_t cpu_count;
+};
+
+static int __ftrace_modify_code(void *data)
+{
+ struct ftrace_modify_param *param = data;
+
+ if (atomic_inc_return(&param->cpu_count) == 1) {
+ ftrace_modify_all_code(param->command);
+ atomic_inc(&param->cpu_count);
+ } else {
+ while (atomic_read(&param->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ local_icache_inv_all(NULL);
+ }
+
+ return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+ struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
+
+ stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
+}
+#endif
+
/* _mcount is defined in abi's mcount.S */
EXPORT_SYMBOL(_mcount);
diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S
index 61989f9241c0..17ed9d250480 100644
--- a/arch/csky/kernel/head.S
+++ b/arch/csky/kernel/head.S
@@ -21,6 +21,11 @@ END(_start)
ENTRY(_start_smp_secondary)
SETUP_MMU
+ /* copy msa1 from CPU0 */
+ lrw r6, secondary_msa1
+ ld.w r6, (r6, 0)
+ mtcr r6, cr<31, 15>
+
/* set stack point */
lrw r6, secondary_stack
ld.w r6, (r6, 0)
diff --git a/arch/csky/kernel/probes/Makefile b/arch/csky/kernel/probes/Makefile
new file mode 100644
index 000000000000..1c7c6e6cb25b
--- /dev/null
+++ b/arch/csky/kernel/probes/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
+obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
+obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
+
+CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/csky/kernel/probes/decode-insn.c b/arch/csky/kernel/probes/decode-insn.c
new file mode 100644
index 000000000000..bbc4edc25dc9
--- /dev/null
+++ b/arch/csky/kernel/probes/decode-insn.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <asm/sections.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ */
+enum probe_insn __kprobes
+csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
+{
+ probe_opcode_t insn = le32_to_cpu(*addr);
+
+ CSKY_INSN_SET_SIMULATE(br16, insn);
+ CSKY_INSN_SET_SIMULATE(bt16, insn);
+ CSKY_INSN_SET_SIMULATE(bf16, insn);
+ CSKY_INSN_SET_SIMULATE(jmp16, insn);
+ CSKY_INSN_SET_SIMULATE(jsr16, insn);
+ CSKY_INSN_SET_SIMULATE(lrw16, insn);
+ CSKY_INSN_SET_SIMULATE(pop16, insn);
+
+ CSKY_INSN_SET_SIMULATE(br32, insn);
+ CSKY_INSN_SET_SIMULATE(bt32, insn);
+ CSKY_INSN_SET_SIMULATE(bf32, insn);
+ CSKY_INSN_SET_SIMULATE(jmp32, insn);
+ CSKY_INSN_SET_SIMULATE(jsr32, insn);
+ CSKY_INSN_SET_SIMULATE(lrw32, insn);
+ CSKY_INSN_SET_SIMULATE(pop32, insn);
+
+ CSKY_INSN_SET_SIMULATE(bez32, insn);
+ CSKY_INSN_SET_SIMULATE(bnez32, insn);
+ CSKY_INSN_SET_SIMULATE(bnezad32, insn);
+ CSKY_INSN_SET_SIMULATE(bhsz32, insn);
+ CSKY_INSN_SET_SIMULATE(bhz32, insn);
+ CSKY_INSN_SET_SIMULATE(blsz32, insn);
+ CSKY_INSN_SET_SIMULATE(blz32, insn);
+ CSKY_INSN_SET_SIMULATE(bsr32, insn);
+ CSKY_INSN_SET_SIMULATE(jmpi32, insn);
+ CSKY_INSN_SET_SIMULATE(jsri32, insn);
+
+ return INSN_GOOD;
+}
diff --git a/arch/csky/kernel/probes/decode-insn.h b/arch/csky/kernel/probes/decode-insn.h
new file mode 100644
index 000000000000..9c4ad48fee0d
--- /dev/null
+++ b/arch/csky/kernel/probes/decode-insn.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H
+#define __CSKY_KERNEL_KPROBES_DECODE_INSN_H
+
+#include <asm/sections.h>
+#include <asm/kprobes.h>
+
+enum probe_insn {
+ INSN_REJECTED,
+ INSN_GOOD_NO_SLOT,
+ INSN_GOOD,
+};
+
+#define is_insn32(insn) ((insn & 0xc000) == 0xc000)
+
+enum probe_insn __kprobes
+csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi);
+
+#endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */
diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
new file mode 100644
index 000000000000..5264763d05be
--- /dev/null
+++ b/arch/csky/kernel/probes/ftrace.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kprobes.h>
+
+int arch_check_ftrace_location(struct kprobe *p)
+{
+ if (ftrace_location((unsigned long)p->addr))
+ p->flags |= KPROBE_FLAG_FTRACE;
+ return 0;
+}
+
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ bool lr_saver = false;
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ /* Preempt is disabled by ftrace */
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (!p) {
+ p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
+ if (unlikely(!p) || kprobe_disabled(p))
+ return;
+ lr_saver = true;
+ }
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+
+ if (lr_saver)
+ ip -= MCOUNT_INSN_SIZE;
+ instruction_pointer_set(regs, ip);
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ /*
+ * Emulate singlestep (and also recover regs->pc)
+ * as if there is a nop
+ */
+ instruction_pointer_set(regs,
+ (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ instruction_pointer_set(regs, orig_ip);
+ }
+ /*
+ * If pre_handler returns !0, it changes regs->pc. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.api.insn = NULL;
+ return 0;
+}
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
new file mode 100644
index 000000000000..f0f733b7ac5a
--- /dev/null
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kprobes.h>
+#include <linux/extable.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/sections.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+
+struct csky_insn_patch {
+ kprobe_opcode_t *addr;
+ u32 opcode;
+ atomic_t cpu_count;
+};
+
+static int __kprobes patch_text_cb(void *priv)
+{
+ struct csky_insn_patch *param = priv;
+ unsigned int addr = (unsigned int)param->addr;
+
+ if (atomic_inc_return(&param->cpu_count) == 1) {
+ *(u16 *) addr = cpu_to_le16(param->opcode);
+ dcache_wb_range(addr, addr + 2);
+ atomic_inc(&param->cpu_count);
+ } else {
+ while (atomic_read(&param->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ }
+
+ icache_inv_range(addr, addr + 2);
+
+ return 0;
+}
+
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+{
+ struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
+
+ return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
+}
+
+static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+{
+ unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
+
+ p->ainsn.api.restore = (unsigned long)p->addr + offset;
+
+ patch_text(p->ainsn.api.insn, p->opcode);
+}
+
+static void __kprobes arch_prepare_simulate(struct kprobe *p)
+{
+ p->ainsn.api.restore = 0;
+}
+
+static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (p->ainsn.api.handler)
+ p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
+
+ post_kprobe_handler(kcb, regs);
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long probe_addr = (unsigned long)p->addr;
+
+ if (probe_addr & 0x1) {
+ pr_warn("Address not aligned.\n");
+ return -EINVAL;
+ }
+
+ /* copy instruction */
+ p->opcode = le32_to_cpu(*p->addr);
+
+ /* decode instruction */
+ switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
+ case INSN_REJECTED: /* insn not supported */
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT: /* insn need simulation */
+ p->ainsn.api.insn = NULL;
+ break;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.api.insn = get_insn_slot();
+ if (!p->ainsn.api.insn)
+ return -ENOMEM;
+ break;
+ }
+
+ /* prepare the instruction */
+ if (p->ainsn.api.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+
+/* install breakpoint in text */
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, USR_BKPT);
+}
+
+/* remove breakpoint from text */
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, p->opcode);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_sr = regs->sr;
+ regs->sr &= ~BIT(6);
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ regs->sr = kcb->saved_sr;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
+{
+ unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
+
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + offset;
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+#define TRACE_MODE_SI BIT(14)
+#define TRACE_MODE_MASK ~(0x3 << 14)
+#define TRACE_MODE_RUN 0
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.api.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.api.insn;
+
+ set_ss_context(kcb, slot, p); /* mark pending ss */
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(kcb, regs);
+ regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
+ instruction_pointer_set(regs, slot);
+ } else {
+ /* insn simulation */
+ arch_simulate_insn(p, regs);
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected.\n");
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.api.restore != 0)
+ regs->pc = cur->ainsn.api.restore;
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler) {
+ /* post_handler can hit breakpoint and single step
+ * again, so we enable D-flag for recursive exception.
+ */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->pc = (unsigned long) cur->addr;
+ if (!instruction_pointer(regs))
+ BUG();
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+ }
+ return 0;
+}
+
+int __kprobes
+kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur_kprobe;
+ struct kprobe_ctlblk *kcb;
+ unsigned long addr = instruction_pointer(regs);
+
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe((kprobe_opcode_t *) addr);
+
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return 1;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ setup_singlestep(p, regs, kcb, 0);
+ else
+ reset_current_kprobe();
+ }
+ return 1;
+ }
+
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+ return 0;
+}
+
+int __kprobes
+kprobe_single_step_handler(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if ((kcb->ss_ctx.ss_pending)
+ && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
+ clear_ss_context(kcb); /* clear pending ss */
+
+ kprobes_restore_local_irqflag(kcb, regs);
+ regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
+
+ post_kprobe_handler(kcb, regs);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ int ret;
+
+ ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+ return ret;
+}
+
+void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address =
+ (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because multiple functions in the call path have
+ * return probes installed on them, and/or more than one
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always pushed into the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the (chronologically) first instance's ret_addr
+ * will be the real return address, and all the rest will
+ * point to kretprobe_trampoline.
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ if (ri->rp && ri->rp->handler) {
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_hash_unlock(current, &flags);
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+ return (void *)orig_ret_address;
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->lr;
+ regs->lr = (unsigned long) &kretprobe_trampoline;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
diff --git a/arch/csky/kernel/probes/kprobes_trampoline.S b/arch/csky/kernel/probes/kprobes_trampoline.S
new file mode 100644
index 000000000000..b1fe3af24f03
--- /dev/null
+++ b/arch/csky/kernel/probes/kprobes_trampoline.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#include <linux/linkage.h>
+
+#include <abi/entry.h>
+
+ENTRY(kretprobe_trampoline)
+ SAVE_REGS_FTRACE
+
+ mov a0, sp /* pt_regs */
+
+ jbsr trampoline_probe_handler
+
+ /* use the result as the return-address */
+ mov lr, a0
+
+ RESTORE_REGS_FTRACE
+ rts
+ENDPROC(kretprobe_trampoline)
diff --git a/arch/csky/kernel/probes/simulate-insn.c b/arch/csky/kernel/probes/simulate-insn.c
new file mode 100644
index 000000000000..4e464fed52ec
--- /dev/null
+++ b/arch/csky/kernel/probes/simulate-insn.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "decode-insn.h"
+#include "simulate-insn.h"
+
+static inline bool csky_insn_reg_get_val(struct pt_regs *regs,
+ unsigned long index,
+ unsigned long *ptr)
+{
+ if (index < 14)
+ *ptr = *(&regs->a0 + index);
+
+ if (index > 15 && index < 31)
+ *ptr = *(&regs->exregs[0] + index - 16);
+
+ switch (index) {
+ case 14:
+ *ptr = regs->usp;
+ break;
+ case 15:
+ *ptr = regs->lr;
+ break;
+ case 31:
+ *ptr = regs->tls;
+ break;
+ default:
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static inline bool csky_insn_reg_set_val(struct pt_regs *regs,
+ unsigned long index,
+ unsigned long val)
+{
+ if (index < 14)
+ *(&regs->a0 + index) = val;
+
+ if (index > 15 && index < 31)
+ *(&regs->exregs[0] + index - 16) = val;
+
+ switch (index) {
+ case 14:
+ regs->usp = val;
+ break;
+ case 15:
+ regs->lr = val;
+ break;
+ case 31:
+ regs->tls = val;
+ break;
+ default:
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+void __kprobes
+simulate_br16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+}
+
+void __kprobes
+simulate_br32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+}
+
+void __kprobes
+simulate_bt16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (regs->sr & 1)
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+ else
+ instruction_pointer_set(regs, addr + 2);
+}
+
+void __kprobes
+simulate_bt32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (regs->sr & 1)
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bf16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (!(regs->sr & 1))
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0x3ff) << 1, 9));
+ else
+ instruction_pointer_set(regs, addr + 2);
+}
+
+void __kprobes
+simulate_bf32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ if (!(regs->sr & 1))
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = (opcode >> 2) & 0xf;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = (opcode >> 2) & 0xf;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ regs->lr = addr + 2;
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ regs->lr = addr + 4;
+
+ instruction_pointer_set(regs, tmp & 0xfffffffe);
+}
+
+void __kprobes
+simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long tmp = (opcode & 0x300) >> 3;
+ unsigned long offset = ((opcode & 0x1f) | tmp) << 2;
+
+ tmp = (opcode & 0xe0) >> 5;
+
+ val = *(unsigned int *)(instruction_pointer(regs) + offset);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = (opcode & 0xffff0000) >> 14;
+ unsigned long tmp = opcode & 0x0000001f;
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_pop16(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long *tmp = (unsigned long *)regs->usp;
+ int i;
+
+ for (i = 0; i < (opcode & 0xf); i++) {
+ csky_insn_reg_set_val(regs, i + 4, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x10) {
+ csky_insn_reg_set_val(regs, 15, *tmp);
+ tmp += 1;
+ }
+
+ regs->usp = (unsigned long)tmp;
+
+ instruction_pointer_set(regs, regs->lr);
+}
+
+void __kprobes
+simulate_pop32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long *tmp = (unsigned long *)regs->usp;
+ int i;
+
+ for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) {
+ csky_insn_reg_set_val(regs, i + 4, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x100000) {
+ csky_insn_reg_set_val(regs, 15, *tmp);
+ tmp += 1;
+ }
+
+ for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) {
+ csky_insn_reg_set_val(regs, i + 16, *tmp);
+ tmp += 1;
+ }
+
+ if (opcode & 0x1000000) {
+ csky_insn_reg_set_val(regs, 29, *tmp);
+ tmp += 1;
+ }
+
+ regs->usp = (unsigned long)tmp;
+
+ instruction_pointer_set(regs, regs->lr);
+}
+
+void __kprobes
+simulate_bez32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ if (tmp == 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+
+ csky_insn_reg_get_val(regs, tmp, &tmp);
+
+ if (tmp != 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+}
+
+void __kprobes
+simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ val -= 1;
+
+ if (val > 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if (val >= 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if (val > 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if (val <= 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_blz32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp = opcode & 0x1f;
+ unsigned long val;
+
+ csky_insn_reg_get_val(regs, tmp, &val);
+
+ if (val < 0) {
+ instruction_pointer_set(regs,
+ addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
+ } else
+ instruction_pointer_set(regs, addr + 4);
+
+ csky_insn_reg_set_val(regs, tmp, val);
+}
+
+void __kprobes
+simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long tmp;
+
+ tmp = (opcode & 0xffff) << 16;
+ tmp |= (opcode & 0xffff0000) >> 16;
+
+ instruction_pointer_set(regs,
+ addr + sign_extend32((tmp & 0x3ffffff) << 1, 15));
+
+ regs->lr = addr + 4;
+}
+
+void __kprobes
+simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = ((opcode & 0xffff0000) >> 14);
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ instruction_pointer_set(regs, val);
+}
+
+void __kprobes
+simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs)
+{
+ unsigned long val;
+ unsigned long offset = ((opcode & 0xffff0000) >> 14);
+
+ val = *(unsigned int *)
+ ((instruction_pointer(regs) + offset) & 0xfffffffc);
+
+ regs->lr = addr + 4;
+
+ instruction_pointer_set(regs, val);
+}
diff --git a/arch/csky/kernel/probes/simulate-insn.h b/arch/csky/kernel/probes/simulate-insn.h
new file mode 100644
index 000000000000..ba4cb7ef062e
--- /dev/null
+++ b/arch/csky/kernel/probes/simulate-insn.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
+#define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
+
+#define __CSKY_INSN_FUNCS(name, mask, val) \
+static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \
+{ \
+ BUILD_BUG_ON(~(mask) & (val)); \
+ return (code & (mask)) == (val); \
+} \
+void simulate_##name(u32 opcode, long addr, struct pt_regs *regs);
+
+#define CSKY_INSN_SET_SIMULATE(name, code) \
+ do { \
+ if (csky_insn_is_##name(code)) { \
+ api->handler = simulate_##name; \
+ return INSN_GOOD_NO_SLOT; \
+ } \
+ } while (0)
+
+__CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400)
+__CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800)
+__CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00)
+__CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800)
+__CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801)
+__CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000)
+__CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480)
+
+__CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800)
+__CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860)
+__CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840)
+__CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0)
+__CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0)
+__CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80)
+__CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0)
+
+__CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900)
+__CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920)
+__CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820)
+__CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0)
+__CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940)
+__CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960)
+__CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980)
+__CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000)
+__CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0)
+__CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0)
+
+#endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */
diff --git a/arch/csky/kernel/probes/uprobes.c b/arch/csky/kernel/probes/uprobes.c
new file mode 100644
index 000000000000..b3a56c260e3e
--- /dev/null
+++ b/arch/csky/kernel/probes/uprobes.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ */
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long addr)
+{
+ probe_opcode_t insn;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+
+ auprobe->insn_size = is_insn32(insn) ? 4 : 2;
+
+ switch (csky_probe_decode_insn(&insn, &auprobe->api)) {
+ case INSN_REJECTED:
+ return -EINVAL;
+
+ case INSN_GOOD_NO_SLOT:
+ auprobe->simulate = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ utask->autask.saved_trap_no = current->thread.trap_no;
+ current->thread.trap_no = UPROBE_TRAP_NR;
+
+ instruction_pointer_set(regs, utask->xol_vaddr);
+
+ user_enable_single_step(current);
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
+
+ instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
+
+ user_disable_single_step(current);
+
+ return 0;
+}
+
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ if (t->thread.trap_no != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ probe_opcode_t insn;
+ unsigned long addr;
+
+ if (!auprobe->simulate)
+ return false;
+
+ insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+ addr = instruction_pointer(regs);
+
+ if (auprobe->api.handler)
+ auprobe->api.handler(insn, addr, regs);
+
+ return true;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /*
+ * Task has received a fatal signal, so reset back to probbed
+ * address.
+ */
+ instruction_pointer_set(regs, utask->vaddr);
+
+ user_disable_single_step(current);
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->usp <= ret->stack;
+ else
+ return regs->usp < ret->stack;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long ra;
+
+ ra = regs->lr;
+
+ regs->lr = trampoline_vaddr;
+
+ return ra;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+int uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ if (uprobe_pre_sstep_notifier(regs))
+ return 1;
+
+ return 0;
+}
+
+int uprobe_single_step_handler(struct pt_regs *regs)
+{
+ if (uprobe_post_sstep_notifier(regs))
+ return 1;
+
+ return 0;
+}
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 313623a19ecb..21ac2608f205 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -193,6 +193,109 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_csky_view;
}
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(tls),
+ REG_OFFSET_NAME(lr),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(usp),
+ REG_OFFSET_NAME(orig_a0),
+ REG_OFFSET_NAME(a0),
+ REG_OFFSET_NAME(a1),
+ REG_OFFSET_NAME(a2),
+ REG_OFFSET_NAME(a3),
+ REG_OFFSET_NAME(regs[0]),
+ REG_OFFSET_NAME(regs[1]),
+ REG_OFFSET_NAME(regs[2]),
+ REG_OFFSET_NAME(regs[3]),
+ REG_OFFSET_NAME(regs[4]),
+ REG_OFFSET_NAME(regs[5]),
+ REG_OFFSET_NAME(regs[6]),
+ REG_OFFSET_NAME(regs[7]),
+ REG_OFFSET_NAME(regs[8]),
+ REG_OFFSET_NAME(regs[9]),
+#if defined(__CSKYABIV2__)
+ REG_OFFSET_NAME(exregs[0]),
+ REG_OFFSET_NAME(exregs[1]),
+ REG_OFFSET_NAME(exregs[2]),
+ REG_OFFSET_NAME(exregs[3]),
+ REG_OFFSET_NAME(exregs[4]),
+ REG_OFFSET_NAME(exregs[5]),
+ REG_OFFSET_NAME(exregs[6]),
+ REG_OFFSET_NAME(exregs[7]),
+ REG_OFFSET_NAME(exregs[8]),
+ REG_OFFSET_NAME(exregs[9]),
+ REG_OFFSET_NAME(exregs[10]),
+ REG_OFFSET_NAME(exregs[11]),
+ REG_OFFSET_NAME(exregs[12]),
+ REG_OFFSET_NAME(exregs[13]),
+ REG_OFFSET_NAME(exregs[14]),
+ REG_OFFSET_NAME(rhi),
+ REG_OFFSET_NAME(rlo),
+ REG_OFFSET_NAME(dcsr),
+#endif
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return (addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
void ptrace_disable(struct task_struct *child)
{
singlestep_disable(child);
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index 3821e55742f4..819a9a7bf786 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -24,26 +24,9 @@ struct screen_info screen_info = {
};
#endif
-phys_addr_t __init_memblock memblock_end_of_REG0(void)
-{
- return (memblock.memory.regions[0].base +
- memblock.memory.regions[0].size);
-}
-
-phys_addr_t __init_memblock memblock_start_of_REG1(void)
-{
- return memblock.memory.regions[1].base;
-}
-
-size_t __init_memblock memblock_size_of_REG1(void)
-{
- return memblock.memory.regions[1].size;
-}
-
static void __init csky_memblock_init(void)
{
unsigned long zone_size[MAX_NR_ZONES];
- unsigned long zhole_size[MAX_NR_ZONES];
signed long size;
memblock_reserve(__pa(_stext), _end - _stext);
@@ -54,54 +37,36 @@ static void __init csky_memblock_init(void)
memblock_dump_all();
memset(zone_size, 0, sizeof(zone_size));
- memset(zhole_size, 0, sizeof(zhole_size));
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
- max_pfn = PFN_DOWN(memblock_end_of_DRAM());
-
- max_low_pfn = PFN_UP(memblock_end_of_REG0());
- if (max_low_pfn == 0)
- max_low_pfn = max_pfn;
+ max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM());
size = max_pfn - min_low_pfn;
- if (memblock.memory.cnt > 1) {
- zone_size[ZONE_NORMAL] =
- PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn;
- zhole_size[ZONE_NORMAL] =
- PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn;
+ if (size <= PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET))
+ zone_size[ZONE_NORMAL] = size;
+ else if (size < PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) {
+ zone_size[ZONE_NORMAL] =
+ PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET);
+ max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
} else {
- if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET))
- zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn;
- else {
- zone_size[ZONE_NORMAL] =
+ zone_size[ZONE_NORMAL] =
PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
- max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
- }
+ max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
+ write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
}
#ifdef CONFIG_HIGHMEM
- size = 0;
- if (memblock.memory.cnt > 1) {
- size = PFN_DOWN(memblock_size_of_REG1());
- highstart_pfn = PFN_DOWN(memblock_start_of_REG1());
- } else {
- size = max_pfn - min_low_pfn -
- PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
- highstart_pfn = min_low_pfn +
- PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
- }
-
- if (size > 0)
- zone_size[ZONE_HIGHMEM] = size;
+ zone_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
- highend_pfn = max_pfn;
+ highstart_pfn = max_low_pfn;
+ highend_pfn = max_pfn;
#endif
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(0);
- free_area_init_node(0, zone_size, min_low_pfn, zhole_size);
+ free_area_init_node(0, zone_size, min_low_pfn, NULL);
}
void __init setup_arch(char **cmdline_p)
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 9b1b7c039ddf..9452d6570b7e 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -175,6 +175,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *oldset = sigmask_to_save();
int ret;
+ rseq_signal_deliver(ksig, regs);
+
/* Are we from a system call? */
if (in_syscall(regs)) {
/* Avoid additional syscall restarting via ret_from_exception */
@@ -251,6 +253,9 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_info_flags)
{
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
/* Handle pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
@@ -258,5 +263,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
}
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index 0bb0954d5570..b5c5bc3afeb5 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -22,6 +22,9 @@
#include <asm/sections.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
+#ifdef CONFIG_CPU_HAS_FPU
+#include <abi/fpu.h>
+#endif
struct ipi_data_struct {
unsigned long bits ____cacheline_aligned;
@@ -156,6 +159,8 @@ volatile unsigned int secondary_hint;
volatile unsigned int secondary_ccr;
volatile unsigned int secondary_stack;
+unsigned long secondary_msa1;
+
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
unsigned long mask = 1 << cpu;
@@ -164,6 +169,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
(unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
secondary_hint = mfcr("cr31");
secondary_ccr = mfcr("cr18");
+ secondary_msa1 = read_mmu_msa1();
/*
* Because other CPUs are in reset status, we must flush data
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
index b057480e7463..fcc3a69831ad 100644
--- a/arch/csky/kernel/traps.c
+++ b/arch/csky/kernel/traps.c
@@ -14,6 +14,7 @@
#include <linux/kallsyms.h>
#include <linux/rtc.h>
#include <linux/uaccess.h>
+#include <linux/kprobes.h>
#include <asm/setup.h>
#include <asm/traps.h>
@@ -109,14 +110,14 @@ void buserr(struct pt_regs *regs)
force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc);
}
-#define USR_BKPT 0x1464
asmlinkage void trap_c(struct pt_regs *regs)
{
int sig;
unsigned long vector;
siginfo_t info;
+ struct task_struct *tsk = current;
- vector = (mfcr("psr") >> 16) & 0xff;
+ vector = (regs->sr >> 16) & 0xff;
switch (vector) {
case VEC_ZERODIV:
@@ -125,10 +126,27 @@ asmlinkage void trap_c(struct pt_regs *regs)
break;
/* ptrace */
case VEC_TRACE:
+#ifdef CONFIG_KPROBES
+ if (kprobe_single_step_handler(regs))
+ return;
+#endif
+#ifdef CONFIG_UPROBES
+ if (uprobe_single_step_handler(regs))
+ return;
+#endif
info.si_code = TRAP_TRACE;
sig = SIGTRAP;
break;
case VEC_ILLEGAL:
+ tsk->thread.trap_no = vector;
+#ifdef CONFIG_KPROBES
+ if (kprobe_breakpoint_handler(regs))
+ return;
+#endif
+#ifdef CONFIG_UPROBES
+ if (uprobe_breakpoint_handler(regs))
+ return;
+#endif
die_if_kernel("Kernel mode ILLEGAL", regs, vector);
#ifndef CONFIG_CPU_NO_USER_BKPT
if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT)
@@ -146,16 +164,20 @@ asmlinkage void trap_c(struct pt_regs *regs)
sig = SIGTRAP;
break;
case VEC_ACCESS:
+ tsk->thread.trap_no = vector;
return buserr(regs);
#ifdef CONFIG_CPU_NEED_SOFTALIGN
case VEC_ALIGN:
+ tsk->thread.trap_no = vector;
return csky_alignment(regs);
#endif
#ifdef CONFIG_CPU_HAS_FPU
case VEC_FPE:
+ tsk->thread.trap_no = vector;
die_if_kernel("Kernel mode FPE", regs, vector);
return fpu_fpe(regs);
case VEC_PRIV:
+ tsk->thread.trap_no = vector;
die_if_kernel("Kernel mode PRIV", regs, vector);
if (fpu_libc_helper(regs))
return;
@@ -164,5 +186,8 @@ asmlinkage void trap_c(struct pt_regs *regs)
sig = SIGSEGV;
break;
}
+
+ tsk->thread.trap_no = vector;
+
send_sig(sig, current, 0);
}
diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c
index bc419f8039d3..7a9664adce43 100644
--- a/arch/csky/mm/cachev2.c
+++ b/arch/csky/mm/cachev2.c
@@ -7,8 +7,12 @@
#include <asm/cache.h>
#include <asm/barrier.h>
+/* for L1-cache */
#define INS_CACHE (1 << 0)
+#define DATA_CACHE (1 << 1)
#define CACHE_INV (1 << 4)
+#define CACHE_CLR (1 << 5)
+#define CACHE_OMS (1 << 6)
void local_icache_inv_all(void *priv)
{
@@ -16,11 +20,6 @@ void local_icache_inv_all(void *priv)
sync_is();
}
-void icache_inv_all(void)
-{
- on_each_cpu(local_icache_inv_all, NULL, 1);
-}
-
#ifdef CONFIG_CPU_HAS_ICACHE_INS
void icache_inv_range(unsigned long start, unsigned long end)
{
@@ -31,9 +30,43 @@ void icache_inv_range(unsigned long start, unsigned long end)
sync_is();
}
#else
+struct cache_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+static DEFINE_SPINLOCK(cache_lock);
+
+static inline void cache_op_line(unsigned long i, unsigned int val)
+{
+ mtcr("cr22", i);
+ mtcr("cr17", val);
+}
+
+void local_icache_inv_range(void *priv)
+{
+ struct cache_range *param = priv;
+ unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache_lock, flags);
+
+ for (; i < param->end; i += L1_CACHE_BYTES)
+ cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
+
+ spin_unlock_irqrestore(&cache_lock, flags);
+
+ sync_is();
+}
+
void icache_inv_range(unsigned long start, unsigned long end)
{
- icache_inv_all();
+ struct cache_range param = { start, end };
+
+ if (irqs_disabled())
+ local_icache_inv_range(&param);
+ else
+ on_each_cpu(local_icache_inv_range, &param, 1);
}
#endif
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index f76618b630f9..4e6dc68f3258 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -18,6 +18,7 @@
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
+#include <linux/kprobes.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
@@ -53,6 +54,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
int fault;
unsigned long address = mmu_meh & PAGE_MASK;
+ if (kprobe_page_fault(regs, tsk->thread.trap_no))
+ return;
+
si_code = SEGV_MAPERR;
#ifndef CONFIG_CPU_HAS_TLBI
@@ -137,7 +141,7 @@ good_area:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
- if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ if (unlikely(!vma_is_accessible(vma)))
goto bad_area;
}
@@ -179,11 +183,14 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
+ tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
no_context:
+ tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
+
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
@@ -198,6 +205,8 @@ no_context:
die_if_kernel("Oops", regs, write);
out_of_memory:
+ tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
+
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
@@ -206,6 +215,8 @@ out_of_memory:
return;
do_sigbus:
+ tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
+
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 7f618190e1a9..ddf04f32b546 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,54 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
-generic-y += barrier.h
-generic-y += bugs.h
-generic-y += cacheflush.h
-generic-y += checksum.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += delay.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += fb.h
-generic-y += ftrace.h
-generic-y += futex.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += linkage.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += mmu.h
-generic-y += mmu_context.h
-generic-y += module.h
generic-y += parport.h
-generic-y += pci.h
-generic-y += percpu.h
-generic-y += pgalloc.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += serial.h
-generic-y += shmparam.h
generic-y += spinlock.h
-generic-y += timex.h
-generic-y += tlbflush.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += uaccess.h
-generic-y += unaligned.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 84bb1ed1b931..373964bb177e 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -1,39 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += barrier.h
-generic-y += bug.h
-generic-y += bugs.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
generic-y += extable.h
-generic-y += fb.h
-generic-y += ftrace.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
generic-y += iomap.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += pci.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += serial.h
-generic-y += shmparam.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index b3bc71680ae4..72334b26317a 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -41,7 +41,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
int si_code = SEGV_MAPERR;
vm_fault_t fault;
const struct exception_table_entry *fixup;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
/*
* If we're in an interrupt or have no user context,
@@ -91,7 +91,7 @@ good_area:
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
/* The most common case -- we are done. */
@@ -102,7 +102,6 @@ good_area:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry;
}
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig
index b630bd7351c4..f3ba813a5b80 100644
--- a/arch/ia64/configs/bigsur_defconfig
+++ b/arch/ia64/configs/bigsur_defconfig
@@ -57,7 +57,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
-CONFIG_EFI_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_AGP=m
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 661d90b3e148..cb267a07c57f 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -94,7 +94,8 @@ CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
-CONFIG_EFI_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
CONFIG_RAW_DRIVER=m
CONFIG_HPET=y
CONFIG_AGP=m
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 7844e6a956a4..7e25f2f031b6 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -82,7 +82,8 @@ CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
-CONFIG_EFI_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
CONFIG_RAW_DRIVER=m
CONFIG_HPET=y
CONFIG_AGP=m
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index 1d6e2a01452b..3f486d5bdc2d 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -86,7 +86,8 @@ CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
-CONFIG_EFI_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
CONFIG_RAW_DRIVER=m
CONFIG_HPET=y
CONFIG_AGP=m
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig
index 8c92e095f8bb..261e98e1f5fe 100644
--- a/arch/ia64/configs/zx1_defconfig
+++ b/arch/ia64/configs/zx1_defconfig
@@ -35,7 +35,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_CHR_DEV_OSST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
@@ -69,7 +68,8 @@ CONFIG_SERIAL_8250_NR_UARTS=8
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
-CONFIG_EFI_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
CONFIG_I2C_CHARDEV=y
CONFIG_AGP=y
CONFIG_AGP_HP_ZX1=y
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 390393667d3b..f994c1daf9d4 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,12 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
-generic-y += compat.h
-generic-y += exec.h
-generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += preempt.h
-generic-y += trace_clock.h
generic-y += vtime.h
-generic-y += word-at-a-time.h
diff --git a/arch/ia64/kernel/.gitignore b/arch/ia64/kernel/.gitignore
index 21cb0da5ded8..0374827206e7 100644
--- a/arch/ia64/kernel/.gitignore
+++ b/arch/ia64/kernel/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
gate.lds
vmlinux.lds
diff --git a/arch/ia64/kernel/syscalls/syscallhdr.sh b/arch/ia64/kernel/syscalls/syscallhdr.sh
index 0c2d2c748565..f407b6e53283 100644
--- a/arch/ia64/kernel/syscalls/syscallhdr.sh
+++ b/arch/ia64/kernel/syscalls/syscallhdr.sh
@@ -32,5 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 1ec6b703c5b4..6b5652ee76f9 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -54,6 +54,8 @@ SECTIONS {
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
*(.gnu.linkonce.t*)
}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index c2f299fe9e04..30d0c1fca99e 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -65,7 +65,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
struct mm_struct *mm = current->mm;
unsigned long mask;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
@@ -141,7 +141,7 @@ retry:
*/
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -167,7 +167,6 @@ retry:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 9818d0ed143e..5b3a273ae3da 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -333,7 +333,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index db2402c75300..0bf0907a7c80 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -318,7 +318,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 07a28b48eb43..876e69292294 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -333,7 +333,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 033589367cb1..aa59c242e715 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -315,7 +315,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index ee4d02a90214..308cd93929a9 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -317,7 +317,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 442eed608ebc..0bc210ace870 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -324,7 +324,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 1de7da7c1041..3b3b832dee80 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -357,7 +357,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index ced341e047db..e3633c66926f 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -314,7 +314,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 006188f1f063..88b3f7f9f146 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -315,7 +315,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index c65d985ee642..3dd5b536921e 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -323,7 +323,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 32905fe56cd7..715e015ed270 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -312,7 +312,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 32b0969668fe..f9ff129ac7c2 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -312,7 +312,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 63f12afc4874..a0765aa60ea9 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,32 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
-generic-y += barrier.h
-generic-y += compat.h
-generic-y += device.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += futex.h
generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += shmparam.h
generic-y += spinlock.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/m68k/kernel/.gitignore b/arch/m68k/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/m68k/kernel/.gitignore
+++ b/arch/m68k/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index e9b1d7585b43..3bfb5c8ac3c7 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -71,7 +71,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
@@ -125,7 +125,7 @@ good_area:
case 1: /* read, present */
goto acc_err;
case 0: /* read, not present */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ if (unlikely(!vma_is_accessible(vma)))
goto acc_err;
}
@@ -138,7 +138,7 @@ good_area:
fault = handle_mm_fault(vma, address, flags);
pr_debug("handle_mm_fault returns %x\n", fault);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return 0;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -162,9 +162,6 @@ good_area:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/*
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 242f58ec086b..9606c244b5b8 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -8,7 +8,7 @@ config MICROBLAZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_UNCACHED_SEGMENT if !MMU
+ select ARCH_HAS_DMA_SET_UNCACHED if !MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
@@ -47,8 +47,6 @@ config MICROBLAZE
select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU
select SPARSE_IRQ
- select GENERIC_IRQ_MULTI_HANDLER
- select HANDLE_DOMAIN_IRQ
# Endianness selection
choice
diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore
index 679502d64a97..11a9e229f3c0 100644
--- a/arch/microblaze/boot/.gitignore
+++ b/arch/microblaze/boot/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
linux.bin*
simpleImage.*
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index abb33619299b..2e87a9b6d312 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,40 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
-generic-y += bitops.h
-generic-y += bug.h
-generic-y += bugs.h
-generic-y += compat.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += fb.h
-generic-y += hardirq.h
generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += linkage.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += parport.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += serial.h
-generic-y += shmparam.h
generic-y += syscalls.h
generic-y += tlb.h
-generic-y += topology.h
-generic-y += trace_clock.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index cb6ab55d1d01..0a28e80bbab0 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -11,4 +11,7 @@
struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
+/* should be defined in each interrupt controller driver */
+extern unsigned int xintc_get_irq(void);
+
#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/kernel/.gitignore b/arch/microblaze/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/microblaze/kernel/.gitignore
+++ b/arch/microblaze/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 0b37dde60a1e..903dad822fad 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -20,10 +20,29 @@
#include <linux/irqchip.h>
#include <linux/of_irq.h>
+static u32 concurrent_irq;
+
void __irq_entry do_IRQ(struct pt_regs *regs)
{
+ unsigned int irq;
+ struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off();
- handle_arch_irq(regs);
+
+ irq_enter();
+ irq = xintc_get_irq();
+next_irq:
+ BUG_ON(!irq);
+ generic_handle_irq(irq);
+
+ irq = xintc_get_irq();
+ if (irq != -1U) {
+ pr_debug("next irq: %d\n", irq);
+ ++concurrent_irq;
+ goto next_irq;
+ }
+
+ irq_exit();
+ set_irq_regs(old_regs);
trace_hardirqs_on();
}
diff --git a/arch/microblaze/kernel/syscalls/syscallhdr.sh b/arch/microblaze/kernel/syscalls/syscallhdr.sh
index 2e9062a926a3..a914854f8d9f 100644
--- a/arch/microblaze/kernel/syscalls/syscallhdr.sh
+++ b/arch/microblaze/kernel/syscalls/syscallhdr.sh
@@ -32,5 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index 8c5f0c332d8b..e09b66e43cb6 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
#define UNCACHED_SHADOW_MASK 0
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
-void *uncached_kernel_address(void *ptr)
+void *arch_dma_set_uncached(void *ptr, size_t size)
{
unsigned long addr = (unsigned long)ptr;
@@ -49,11 +49,4 @@ void *uncached_kernel_address(void *ptr)
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
return (void *)addr;
}
-
-void *cached_kernel_address(void *ptr)
-{
- unsigned long addr = (unsigned long)ptr;
-
- return (void *)(addr & ~UNCACHED_SHADOW_MASK);
-}
#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index e6a810b0c7ad..3248141f8ed5 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -91,7 +91,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
int code = SEGV_MAPERR;
int is_write = error_code & ESR_S;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
regs->ear = address;
regs->esr = error_code;
@@ -217,7 +217,7 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -236,7 +236,6 @@ good_area:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/*
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 58cc4965bd3e..60a58c0015f2 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -433,10 +433,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
pr_debug("Parsing ranges property...\n");
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
- pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
- range.pci_space, range.pci_addr);
- pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
- range.cpu_addr, range.size);
/* If we failed translation or got a zero-sized region
* (some FW try to feed us with non sensical zero sized regions
@@ -486,7 +482,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
range.cpu_addr, range.cpu_addr + range.size - 1,
range.pci_addr,
- (range.pci_space & 0x40000000) ?
+ (range.flags & IORESOURCE_PREFETCH) ?
"Prefetch" : "");
/* We support only 3 memory ranges */
@@ -1121,4 +1117,3 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
{
return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
}
-
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f8f3dbdd7ad5..690718b3701a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1192,8 +1192,9 @@ config DMA_NONCOHERENT
# significant advantages.
#
select ARCH_HAS_DMA_WRITE_COMBINE
+ select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_UNCACHED_SEGMENT
+ select ARCH_HAS_DMA_SET_UNCACHED
select DMA_NONCOHERENT_MMAP
select DMA_NONCOHERENT_CACHE_SYNC
select NEED_DMA_MAP_STATE
diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore
index a73d6e2c4f64..2adc8581a175 100644
--- a/arch/mips/boot/.gitignore
+++ b/arch/mips/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mkboot
elf2ecoff
vmlinux.*
diff --git a/arch/mips/boot/compressed/.gitignore b/arch/mips/boot/compressed/.gitignore
index ebae133f1d00..d358395614c9 100644
--- a/arch/mips/boot/compressed/.gitignore
+++ b/arch/mips/boot/compressed/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
ashldi3.c
bswapsi.c
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index 410e61ebaf9e..aa0b2d39c902 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -403,8 +403,8 @@
compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
#address-cells = <1>;
#size-cells = <0>;
- reg-names = "nand";
- reg = <0x41b800 0x400>;
+ reg-names = "nand", "flash-edu";
+ reg = <0x41b800 0x400>, <0x41bc00 0x24>;
interrupt-parent = <&hif_l2_intc>;
interrupts = <24>;
status = "disabled";
diff --git a/arch/mips/boot/tools/.gitignore b/arch/mips/boot/tools/.gitignore
index be0ed065249b..d36dc7cf9115 100644
--- a/arch/mips/boot/tools/.gitignore
+++ b/arch/mips/boot/tools/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
relocs
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index f14ad0538f4e..eea9b613bb74 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -112,7 +112,6 @@ CONFIG_BLK_DEV_TC86C001=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_CHR_DEV_SCH=m
CONFIG_ATA=y
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 1788ae23bff9..6466e83067b4 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -99,7 +99,6 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOWLEVEL is not set
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 82d942a6026e..638d7cf5ef01 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -99,7 +99,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 370884018aad..7b1fab518317 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -50,7 +50,6 @@ CONFIG_RAID_ATTRS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 92085df3e367..8c223035921f 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -42,7 +42,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_ISCSI_TCP=m
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 59eedf55419d..211bd3d6e6cb 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -239,7 +239,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 8ef612552a19..62b1969b4f55 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -247,7 +247,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index d2a008c9907c..9185e0a0aa45 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -245,7 +245,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 970df6d42728..636311d67a53 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -245,7 +245,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 2c7adea7638f..30d7c3db884e 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -203,7 +203,6 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 4ebd8ce254ce..8643d313890e 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -4,23 +4,10 @@ generated-y += syscall_table_32_o32.h
generated-y += syscall_table_64_n32.h
generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
-generic-y += current.h
-generic-y += device.h
-generic-y += emergency-restart.h
generic-y += export.h
-generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
generic-y += parport.h
-generic-y += percpu.h
-generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += sections.h
-generic-y += serial.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
generic-y += user.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 41204a49cf95..2c343c346b79 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -1133,7 +1133,7 @@ extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
- struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+ struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/mips/kernel/.gitignore
+++ b/arch/mips/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh
index d2bcfa8f4d1a..2e241e713a7d 100644
--- a/arch/mips/kernel/syscalls/syscallhdr.sh
+++ b/arch/mips/kernel/syscalls/syscallhdr.sh
@@ -32,6 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
- printf "\n"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 71244bf87c3a..8f05dd0a0f4e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -118,12 +118,12 @@ void kvm_arch_hardware_disable(void)
kvm_mips_callbacks->hardware_disable();
}
-int kvm_arch_hardware_setup(void)
+int kvm_arch_hardware_setup(void *opaque)
{
return 0;
}
-int kvm_arch_check_processor_compat(void)
+int kvm_arch_check_processor_compat(void *opaque)
{
return 0;
}
@@ -188,12 +188,6 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
return -ENOIOCTLCMD;
}
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
/* Flush whole GPA */
@@ -230,7 +224,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
@@ -984,69 +978,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
return r;
}
-/**
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
- * @kvm: kvm instance
- * @log: slot id and address to which we copy the log
- *
- * Steps 1-4 below provide general overview of dirty page logging. See
- * kvm_get_dirty_log_protect() function description for additional details.
- *
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
- * always flush the TLB (step 4) even if previous step failed and the dirty
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
- * writes will be marked dirty for next log read.
- *
- * 1. Take a snapshot of the bit and clear it if needed.
- * 2. Write protect the corresponding page.
- * 3. Copy the snapshot to the userspace.
- * 4. Flush TLB's if needed.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_get_dirty_log_protect(kvm, log, &flush);
-
- if (flush) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
- /* Let implementation handle TLB/GVA invalidation */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
- }
-
- mutex_unlock(&kvm->slots_lock);
- return r;
}
-int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_clear_dirty_log_protect(kvm, log, &flush);
-
- if (flush) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
-
- /* Let implementation handle TLB/GVA invalidation */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
- }
-
- mutex_unlock(&kvm->slots_lock);
- return r;
+ /* Let implementation handle TLB/GVA invalidation */
+ kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
}
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index dc42ffc83825..fcea92d95d86 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -49,16 +49,11 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
dma_cache_wback_inv((unsigned long)page_address(page), size);
}
-void *uncached_kernel_address(void *addr)
+void *arch_dma_set_uncached(void *addr, size_t size)
{
return (void *)(__pa(addr) + UNCAC_BASE);
}
-void *cached_kernel_address(void *addr)
-{
- return __va(addr) - UNCAC_BASE;
-}
-
static inline void dma_sync_virt(void *addr, size_t size,
enum dma_data_direction dir)
{
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 1e8d00793784..f8d62cd83b36 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -44,7 +44,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
const int field = sizeof(unsigned long) * 2;
int si_code;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
@@ -142,7 +142,7 @@ good_area:
goto bad_area;
}
} else {
- if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ if (unlikely(!vma_is_accessible(vma)))
goto bad_area;
}
}
@@ -154,7 +154,7 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -178,7 +178,6 @@ good_area:
tsk->min_flt++;
}
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/*
diff --git a/arch/mips/tools/.gitignore b/arch/mips/tools/.gitignore
index b0209450d9ff..794817dfb389 100644
--- a/arch/mips/tools/.gitignore
+++ b/arch/mips/tools/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
elf-entry
loongson3-llsc-check
diff --git a/arch/mips/vdso/.gitignore b/arch/mips/vdso/.gitignore
index 5286a7d73d79..1f43f6dd8142 100644
--- a/arch/mips/vdso/.gitignore
+++ b/arch/mips/vdso/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.so*
vdso-*image.c
genvdso
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index 77eae62036b5..ff1e94299317 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -1,46 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
-generic-y += atomic.h
-generic-y += bitops.h
-generic-y += bug.h
-generic-y += bugs.h
-generic-y += checksum.h
generic-y += cmpxchg.h
-generic-y += compat.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += export.h
-generic-y += fb.h
generic-y += gpio.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += local64.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += parport.h
-generic-y += pci.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += serial.h
-generic-y += switch_to.h
-generic-y += timex.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += xor.h
-generic-y += unaligned.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
diff --git a/arch/nds32/kernel/.gitignore b/arch/nds32/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/nds32/kernel/.gitignore
+++ b/arch/nds32/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/nds32/kernel/vdso/.gitignore b/arch/nds32/kernel/vdso/.gitignore
index f8b69d84238e..652e31d82582 100644
--- a/arch/nds32/kernel/vdso/.gitignore
+++ b/arch/nds32/kernel/vdso/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S
index f679d3397436..7a6c1cefe3fe 100644
--- a/arch/nds32/kernel/vmlinux.lds.S
+++ b/arch/nds32/kernel/vmlinux.lds.S
@@ -47,6 +47,7 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
*(.fixup)
}
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
index 906dfb25353c..0cf0c08c7da2 100644
--- a/arch/nds32/mm/fault.c
+++ b/arch/nds32/mm/fault.c
@@ -80,7 +80,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
int si_code;
vm_fault_t fault;
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
tsk = current;
@@ -214,7 +214,7 @@ good_area:
* signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
@@ -246,7 +246,6 @@ good_area:
1, regs, addr);
}
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 44b5da37e8bd..2fc4ed210b5f 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -2,9 +2,10 @@
config NIOS2
def_bool y
select ARCH_32BIT_OFF_T
+ select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_UNCACHED_SEGMENT
+ select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_NO_SWAP
select TIMER_OF
select GENERIC_ATOMIC64
diff --git a/arch/nios2/boot/.gitignore b/arch/nios2/boot/.gitignore
index 64386a8dedd8..ef37cac5bcc0 100644
--- a/arch/nios2/boot/.gitignore
+++ b/arch/nios2/boot/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmImage
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 68093999bd26..7fe7437555fb 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -1,45 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += atomic.h
-generic-y += barrier.h
-generic-y += bitops.h
-generic-y += bug.h
-generic-y += bugs.h
generic-y += cmpxchg.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += fb.h
-generic-y += ftrace.h
-generic-y += futex.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += module.h
-generic-y += pci.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += serial.h
generic-y += spinlock.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/nios2/kernel/.gitignore b/arch/nios2/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/nios2/kernel/.gitignore
+++ b/arch/nios2/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index 0ed711e37902..fd887d5f3f9a 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -67,7 +67,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
flush_dcache_range(start, start + size);
}
-void *uncached_kernel_address(void *ptr)
+void *arch_dma_set_uncached(void *ptr, size_t size)
{
unsigned long addr = (unsigned long)ptr;
@@ -75,13 +75,3 @@ void *uncached_kernel_address(void *ptr)
return (void *)ptr;
}
-
-void *cached_kernel_address(void *ptr)
-{
- unsigned long addr = (unsigned long)ptr;
-
- addr &= ~CONFIG_NIOS2_IO_REGION_BASE;
- addr |= CONFIG_NIOS2_KERNEL_REGION_BASE;
-
- return (void *)ptr;
-}
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 6a2e716b959f..ec9d8a9c426f 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -47,7 +47,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
struct mm_struct *mm = tsk->mm;
int code = SEGV_MAPERR;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
cause >>= 2;
@@ -133,7 +133,7 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -157,9 +157,6 @@ good_area:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/*
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 1928e061ff96..8588996165ae 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -7,6 +7,8 @@
config OPENRISC
def_bool y
select ARCH_32BIT_OFF_T
+ select ARCH_HAS_DMA_SET_UNCACHED
+ select ARCH_HAS_DMA_CLEAR_UNCACHED
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select OF
select OF_EARLY_FLATTREE
@@ -14,6 +16,7 @@ config OPENRISC
select HANDLE_DOMAIN_IRQ
select GPIOLIB
select HAVE_ARCH_TRACEHOOK
+ select HAVE_COPY_THREAD_TLS
select SPARSE_IRQ
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig
index d8ff4f8ffb88..75f2da324d0e 100644
--- a/arch/openrisc/configs/or1ksim_defconfig
+++ b/arch/openrisc/configs/or1ksim_defconfig
@@ -1,4 +1,3 @@
-CONFIG_CROSS_COMPILE="or1k-linux-"
CONFIG_NO_HZ=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/openrisc/configs/simple_smp_defconfig b/arch/openrisc/configs/simple_smp_defconfig
index 64278992df9c..ff49d868e040 100644
--- a/arch/openrisc/configs/simple_smp_defconfig
+++ b/arch/openrisc/configs/simple_smp_defconfig
@@ -1,4 +1,3 @@
-CONFIG_CROSS_COMPILE="or1k-linux-"
CONFIG_LOCALVERSION="-simple-smp"
CONFIG_NO_HZ=y
CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index e12d6c1735a0..ca5987e11053 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,45 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += barrier.h
-generic-y += bug.h
-generic-y += bugs.h
-generic-y += checksum.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += fb.h
-generic-y += ftrace.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += module.h
-generic-y += pci.h
-generic-y += percpu.h
-generic-y += preempt.h
generic-y += qspinlock_types.h
generic-y += qspinlock.h
generic-y += qrwlock_types.h
generic-y += qrwlock.h
-generic-y += sections.h
-generic-y += shmparam.h
-generic-y += switch_to.h
-generic-y += topology.h
-generic-y += trace_clock.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h
index 566f8c4f8047..fae34c60fa88 100644
--- a/arch/openrisc/include/uapi/asm/unistd.h
+++ b/arch/openrisc/include/uapi/asm/unistd.h
@@ -24,6 +24,7 @@
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
diff --git a/arch/openrisc/kernel/.gitignore b/arch/openrisc/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/openrisc/kernel/.gitignore
+++ b/arch/openrisc/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index adec711ad39d..c152a68811dd 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -11,8 +11,6 @@
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* DMA mapping callbacks...
- * As alloc_coherent is the only DMA callback being used currently, that's
- * the only thing implemented properly. The rest need looking into...
*/
#include <linux/dma-noncoherent.h>
@@ -67,62 +65,29 @@ static const struct mm_walk_ops clear_nocache_walk_ops = {
.pte_entry = page_clear_nocache,
};
-/*
- * Alloc "coherent" memory, which for OpenRISC means simply uncached.
- *
- * This function effectively just calls __get_free_pages, sets the
- * cache-inhibit bit on those pages, and makes sure that the pages are
- * flushed out of the cache before they are used.
- *
- * If the NON_CONSISTENT attribute is set, then this function just
- * returns "normal", cachable memory.
- *
- * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
- * into consideration here, too. All current known implementations of
- * the OR1K support only strongly ordered memory accesses, so that flag
- * is being ignored for now; uncached but write-combined memory is a
- * missing feature of the OR1K.
- */
-void *
-arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
+void *arch_dma_set_uncached(void *cpu_addr, size_t size)
{
- unsigned long va;
- void *page;
-
- page = alloc_pages_exact(size, gfp | __GFP_ZERO);
- if (!page)
- return NULL;
-
- /* This gives us the real physical address of the first page. */
- *dma_handle = __pa(page);
-
- va = (unsigned long)page;
+ unsigned long va = (unsigned long)cpu_addr;
+ int error;
/*
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
*/
- if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
- NULL)) {
- free_pages_exact(page, size);
- return NULL;
- }
-
- return (void *)va;
+ error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
+ NULL);
+ if (error)
+ return ERR_PTR(error);
+ return cpu_addr;
}
-void
-arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
+void arch_dma_clear_uncached(void *cpu_addr, size_t size)
{
- unsigned long va = (unsigned long)vaddr;
+ unsigned long va = (unsigned long)cpu_addr;
/* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range(&init_mm, va, va + size,
&clear_nocache_walk_ops, NULL));
-
- free_pages_exact(vaddr, size);
}
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index b06f84f6676f..6bcdca424e11 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -117,12 +117,12 @@ void release_thread(struct task_struct *dead_task)
extern asmlinkage void ret_from_fork(void);
/*
- * copy_thread
+ * copy_thread_tls
* @clone_flags: flags
* @usp: user stack pointer or fn for kernel thread
* @arg: arg to fn for kernel thread; always NULL for userspace thread
* @p: the newly created task
- * @regs: CPU context to copy for userspace thread; always NULL for kthread
+ * @tls: the Thread Local Storage pointer for the new process
*
* At the top of a newly initialized kernel stack are two stacked pt_reg
* structures. The first (topmost) is the userspace context of the thread.
@@ -148,8 +148,8 @@ extern asmlinkage void ret_from_fork(void);
*/
int
-copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *userregs;
struct pt_regs *kregs;
@@ -179,16 +179,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
userregs->sp = usp;
/*
- * For CLONE_SETTLS set "tp" (r10) to the TLS pointer passed to sys_clone.
- *
- * The kernel entry is:
- * int clone (long flags, void *child_stack, int *parent_tid,
- * int *child_tid, struct void *tls)
- *
- * This makes the source r7 in the kernel registers.
+ * For CLONE_SETTLS set "tp" (r10) to the TLS pointer.
*/
if (clone_flags & CLONE_SETTLS)
- userregs->gpr[10] = userregs->gpr[7];
+ userregs->gpr[10] = tls;
userregs->gpr[11] = 0; /* Result from fork() */
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
index 7d518ee8bddc..bd1e660bbc89 100644
--- a/arch/openrisc/kernel/smp.c
+++ b/arch/openrisc/kernel/smp.c
@@ -14,6 +14,7 @@
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/irq.h>
#include <asm/cpuinfo.h>
#include <asm/mmu_context.h>
@@ -113,7 +114,7 @@ asmlinkage __init void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 932a8ec2b520..c11aa2e17ce0 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -55,13 +55,6 @@ void show_stack(struct task_struct *task, unsigned long *esp)
unwind_stack(NULL, esp, print_trace);
}
-void show_trace_task(struct task_struct *tsk)
-{
- /*
- * TODO: SysRq-T trace dump...
- */
-}
-
void show_registers(struct pt_regs *regs)
{
int i;
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 5d4d3a9691d0..8af1cc78c4fb 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -50,7 +50,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
struct vm_area_struct *vma;
int si_code;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
tsk = current;
@@ -161,7 +161,7 @@ good_area:
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -181,7 +181,6 @@ good_area:
else
tsk->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would
diff --git a/arch/parisc/boot/.gitignore b/arch/parisc/boot/.gitignore
index 017d5912ad2d..adf2ae0e7eda 100644
--- a/arch/parisc/boot/.gitignore
+++ b/arch/parisc/boot/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
image
bzImage
diff --git a/arch/parisc/boot/compressed/.gitignore b/arch/parisc/boot/compressed/.gitignore
index 926cd41c1069..b9853a356ab2 100644
--- a/arch/parisc/boot/compressed/.gitignore
+++ b/arch/parisc/boot/compressed/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
firmware.c
real2.S
sizes.h
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 9ceedf6393c4..e3ee5c0bfe80 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -2,26 +2,8 @@
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
-generic-y += current.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += emergency-restart.h
-generic-y += exec.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += percpu.h
-generic-y += preempt.h
generic-y += seccomp.h
-generic-y += trace_clock.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 197d2247e4db..70fecb8dc4e2 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -10,25 +10,34 @@
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
+ smp_mb();
return *a == 0;
}
-#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
+static inline void arch_spin_lock(arch_spinlock_t *x)
+{
+ volatile unsigned int *a;
+
+ a = __ldcw_align(x);
+ while (__ldcw(a) == 0)
+ while (*a == 0)
+ cpu_relax();
+}
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
+ unsigned long flags_dis;
a = __ldcw_align(x);
- while (__ldcw(a) == 0)
+ while (__ldcw(a) == 0) {
+ local_save_flags(flags_dis);
+ local_irq_restore(flags);
while (*a == 0)
- if (flags & PSW_SM_I) {
- local_irq_enable();
- cpu_relax();
- local_irq_disable();
- } else
- cpu_relax();
+ cpu_relax();
+ local_irq_restore(flags_dis);
+ }
}
#define arch_spin_lock_flags arch_spin_lock_flags
@@ -58,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
- * Linux rwlocks are unfair to writers; they can be starved for an indefinite
- * time by readers. With care, they can also be taken in interrupt context.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
*
- * In the PA-RISC implementation, we have a spinlock and a counter.
- * Readers use the lock to serialise their access to the counter (which
- * records how many readers currently hold the lock).
- * Writers hold the spinlock, preventing any readers or other writers from
- * grabbing the rwlock.
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
*/
-/* Note that we have to ensure interrupts are disabled in case we're
- * interrupted by some other code that wants to grab the same read lock */
-static __inline__ void arch_read_lock(arch_rwlock_t *rw)
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
+ int ret = 0;
unsigned long flags;
- local_irq_save(flags);
- arch_spin_lock_flags(&rw->lock, flags);
- rw->counter++;
- arch_spin_unlock(&rw->lock);
- local_irq_restore(flags);
-}
-/* Note that we have to ensure interrupts are disabled in case we're
- * interrupted by some other code that wants to grab the same read lock */
-static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned long flags;
local_irq_save(flags);
- arch_spin_lock_flags(&rw->lock, flags);
- rw->counter--;
- arch_spin_unlock(&rw->lock);
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ */
+ if (rw->counter > 0) {
+ rw->counter--;
+ ret = 1;
+ }
+
+ arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
+
+ return ret;
}
-/* Note that we have to ensure interrupts are disabled in case we're
- * interrupted by some other code that wants to grab the same read lock */
-static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
+ int ret = 0;
unsigned long flags;
- retry:
+
local_irq_save(flags);
- if (arch_spin_trylock(&rw->lock)) {
- rw->counter++;
- arch_spin_unlock(&rw->lock);
- local_irq_restore(flags);
- return 1;
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ */
+ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ rw->counter = 0;
+ ret = 1;
}
-
+ arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
- /* If write-locked, we fail to acquire the lock */
- if (rw->counter < 0)
- return 0;
- /* Wait until we have a realistic chance at the lock */
- while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
+ return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (!arch_read_trylock(rw))
cpu_relax();
+}
- goto retry;
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (!arch_write_trylock(rw))
+ cpu_relax();
}
-/* Note that we have to ensure interrupts are disabled in case we're
- * interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void arch_write_lock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
-retry:
- local_irq_save(flags);
- arch_spin_lock_flags(&rw->lock, flags);
- if (rw->counter != 0) {
- arch_spin_unlock(&rw->lock);
- local_irq_restore(flags);
-
- while (rw->counter != 0)
- cpu_relax();
-
- goto retry;
- }
-
- rw->counter = -1; /* mark as write-locked */
- mb();
+ local_irq_save(flags);
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter++;
+ arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
}
-static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
-{
- rw->counter = 0;
- arch_spin_unlock(&rw->lock);
-}
-
-/* Note that we have to ensure interrupts are disabled in case we're
- * interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
- int result = 0;
local_irq_save(flags);
- if (arch_spin_trylock(&rw->lock)) {
- if (rw->counter == 0) {
- rw->counter = -1;
- result = 1;
- } else {
- /* Read-locked. Oh well. */
- arch_spin_unlock(&rw->lock);
- }
- }
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
-
- return result;
}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 42979c5704dc..ca39ee350c3f 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -12,11 +12,19 @@ typedef struct {
#endif
} arch_spinlock_t;
+
+/* counter:
+ * Unlocked : 0x0100_0000
+ * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
+ * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
+ */
typedef struct {
- arch_spinlock_t lock;
- volatile int counter;
+ arch_spinlock_t lock_mutex;
+ volatile unsigned int counter;
} arch_rwlock_t;
-#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
+ .counter = __ARCH_RW_LOCK_UNLOCKED__ }
#endif
diff --git a/arch/parisc/kernel/.gitignore b/arch/parisc/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/parisc/kernel/.gitignore
+++ b/arch/parisc/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c
index 3c66d5c4d90d..fa28c4c9f972 100644
--- a/arch/parisc/kernel/alternative.c
+++ b/arch/parisc/kernel/alternative.c
@@ -25,6 +25,22 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *entry;
int index = 0, applied = 0;
int num_cpus = num_online_cpus();
+ u32 cond_check;
+
+ cond_check = ALT_COND_ALWAYS |
+ ((num_cpus == 1) ? ALT_COND_NO_SMP : 0) |
+ ((cache_info.dc_size == 0) ? ALT_COND_NO_DCACHE : 0) |
+ ((cache_info.ic_size == 0) ? ALT_COND_NO_ICACHE : 0) |
+ (running_on_qemu ? ALT_COND_RUN_ON_QEMU : 0) |
+ ((split_tlb == 0) ? ALT_COND_NO_SPLIT_TLB : 0) |
+ /*
+ * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
+ * set (bit #61, big endian), we have to flush and sync every
+ * time IO-PDIR is changed in Ike/Astro.
+ */
+ (((boot_cpu_data.cpu_type > pcxw_) &&
+ ((boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) == 0))
+ ? ALT_COND_NO_IOC_FDC : 0);
for (entry = start; entry < end; entry++, index++) {
@@ -38,29 +54,14 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
WARN_ON(!cond);
- if (cond != ALT_COND_ALWAYS && no_alternatives)
+ if ((cond & ALT_COND_ALWAYS) == 0 && no_alternatives)
continue;
pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
index, cond, len, from, replacement);
- if ((cond & ALT_COND_NO_SMP) && (num_cpus != 1))
- continue;
- if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0))
- continue;
- if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0))
- continue;
- if ((cond & ALT_COND_RUN_ON_QEMU) && !running_on_qemu)
- continue;
-
- /*
- * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
- * set (bit #61, big endian), we have to flush and sync every
- * time IO-PDIR is changed in Ike/Astro.
- */
- if ((cond & ALT_COND_NO_IOC_FDC) &&
- ((boot_cpu_data.cpu_type <= pcxw_) ||
- (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)))
+ /* Bounce out if none of the conditions are true. */
+ if ((cond & cond_check) == 0)
continue;
/* Want to replace pdtlb by a pdtlb,l instruction? */
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index e5fcfb70cc7c..e76c86619949 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -560,33 +560,23 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto out;
}
-static struct irqaction timer_action = {
- .handler = timer_interrupt,
- .name = "timer",
- .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
-};
-
-#ifdef CONFIG_SMP
-static struct irqaction ipi_action = {
- .handler = ipi_interrupt,
- .name = "IPI",
- .flags = IRQF_PERCPU,
-};
-#endif
-
static void claim_cpu_irqs(void)
{
+ unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL;
int i;
+
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
irq_set_chip_and_handler(i, &cpu_interrupt_type,
handle_percpu_irq);
}
irq_set_handler(TIMER_IRQ, handle_percpu_irq);
- setup_irq(TIMER_IRQ, &timer_action);
+ if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL))
+ pr_err("Failed to register timer interrupt\n");
#ifdef CONFIG_SMP
irq_set_handler(IPI_IRQ, handle_percpu_irq);
- setup_irq(IPI_IRQ, &ipi_action);
+ if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL))
+ pr_err("Failed to register IPI interrupt\n");
#endif
}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 97ac707c6bff..f05c9d5b6b9e 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -935,7 +935,7 @@ ENTRY(lws_table)
END(lws_table)
/* End of lws table */
-#define __SYSCALL(nr, entry, nargs) ASM_ULONG_INSN entry
+#define __SYSCALL(nr, entry) ASM_ULONG_INSN entry
.align 8
ENTRY(sys_call_table)
.export sys_call_table,data
diff --git a/arch/parisc/kernel/syscalls/syscallhdr.sh b/arch/parisc/kernel/syscalls/syscallhdr.sh
index 50242b747d7c..730db288fe54 100644
--- a/arch/parisc/kernel/syscalls/syscallhdr.sh
+++ b/arch/parisc/kernel/syscalls/syscallhdr.sh
@@ -32,5 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/parisc/kernel/syscalls/syscalltbl.sh b/arch/parisc/kernel/syscalls/syscalltbl.sh
index 45b5bae26240..f7393a7b18aa 100644
--- a/arch/parisc/kernel/syscalls/syscalltbl.sh
+++ b/arch/parisc/kernel/syscalls/syscalltbl.sh
@@ -13,10 +13,10 @@ emit() {
t_entry="$3"
while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
t_nxt=$((t_nxt+1))
done
- printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+ printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
}
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index adbd5e2144a3..86e8c848f3d7 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -274,7 +274,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
if (!mm)
goto no_context;
- flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
@@ -304,7 +304,7 @@ good_area:
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -328,14 +328,12 @@ good_area:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
-
/*
* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
-
+ flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6f40af294685..5fc45364e86e 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -980,6 +980,7 @@ config PPC_SECURE_BOOT
bool
depends on PPC_POWERNV
depends on IMA_ARCH_POLICY
+ imply IMA_SECURE_AND_OR_TRUSTED_BOOT
help
Systems with firmware secure boot enabled need to define security
policies to extend secure boot to the OS. This config allows a user
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index f35730548e42..f310c32e88a4 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -239,10 +239,8 @@ KBUILD_CFLAGS += $(call cc-option,-mno-vsx)
KBUILD_CFLAGS += $(call cc-option,-mno-spe)
KBUILD_CFLAGS += $(call cc-option,-mspe=no)
-# FIXME: the module load should be taught about the additional relocs
-# generated by this.
-# revert to pre-gcc-4.4 behaviour of .eh_frame
-KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
+# Don't emit .eh_frame since we have no use for it
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Never use string load/store instructions as they are
# often slow when they are implemented at all
@@ -298,6 +296,7 @@ $(BOOT_TARGETS2): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+PHONY += bootwrapper_install
bootwrapper_install:
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
@@ -403,9 +402,11 @@ define archhelp
@echo ' (minus the .dts extension).'
endef
+PHONY += install
install:
$(Q)$(MAKE) $(build)=$(boot) install
+PHONY += vdso_install
vdso_install:
ifdef CONFIG_PPC64
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
@@ -425,6 +426,7 @@ archheaders:
ifdef CONFIG_STACKPROTECTOR
prepare: stack_protector_prepare
+PHONY += stack_protector_prepare
stack_protector_prepare: prepare0
ifdef CONFIG_PPC64
$(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
@@ -436,10 +438,12 @@ endif
ifdef CONFIG_SMP
prepare: task_cpu_prepare
+PHONY += task_cpu_prepare
task_cpu_prepare: prepare0
$(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h))
endif
+PHONY += checkbin
# Check toolchain versions:
# - gcc-4.6 is the minimum kernel-wide version so nothing required.
checkbin:
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 6610665fcf5e..1eee61b82341 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
addnote
decompress_inflate.c
empty.c
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 0556bf4fc9e9..c53a1b8bba8b 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -445,6 +445,8 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
zInstall: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
+PHONY += install zInstall
+
# anything not in $(targets)
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 92608f34d312..1d83966f5ef6 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -44,9 +44,6 @@ p_end: .long _end
p_pstack: .long _platform_stack_top
#endif
- .globl _zimage_start
- /* Clang appears to require the .weak directive to be after the symbol
- * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
.weak _zimage_start
_zimage_start:
.globl _zimage_start_lib
diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
index 9575a38c9155..b507df6ac69f 100644
--- a/arch/powerpc/configs/85xx-hw.config
+++ b/arch/powerpc/configs/85xx-hw.config
@@ -2,7 +2,6 @@ CONFIG_AQUANTIA_PHY=y
CONFIG_AT803X_PHY=y
CONFIG_ATA=y
CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_BLK_DEV_SR=y
CONFIG_BROADCOM_PHY=y
CONFIG_C293_PCIE=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index f6d140f2d922..200bb1ecb560 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -44,7 +44,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index 502a75d49789..a4a805b87469 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -42,7 +42,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index fbfcc85e4dc0..a68c7f3af10e 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -62,7 +62,6 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 2975e64629aa..161351a18517 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -41,7 +41,6 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_IPR=y
CONFIG_ATA=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 4b6d31d4474e..08b7f4cef243 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -60,7 +60,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_CHR_DEV_OSST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index f492e7d35925..05e325ca3fbd 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -117,7 +117,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 71749377d164..df8bdbaa5d8f 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -108,7 +108,6 @@ CONFIG_BLK_DEV_NVME=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 7e68cb222c7b..bae8170d7401 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -110,7 +110,6 @@ CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 0d746774c2bd..33a01a9e86be 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -60,7 +60,6 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 3e2f44f38ac5..feb5d47d8d1e 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -368,7 +368,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_ENCLOSURE=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 6b68109e248f..0bea4d3ffb85 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -97,7 +97,6 @@ CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 1b6bdad36b13..ad6739ac63dc 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -84,7 +84,6 @@ CONFIG_EEPROM_AT24=m
# CONFIG_OCXL is not set
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index d0a23d0db863..dadbcf3a0b1e 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -3,12 +3,8 @@ generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
-generic-y += div64.h
-generic-y += dma-mapping.h
generic-y += export.h
-generic-y += irq_regs.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += preempt.h
generic-y += vtime.h
generic-y += early_ioremap.h
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 983c0084fb3f..7d81e86a1e5d 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -97,6 +97,10 @@ ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
unsigned long __init early_init(unsigned long dt_ptr);
void __init machine_init(u64 dt_ptr);
#endif
+long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
+notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs);
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
@@ -104,14 +108,6 @@ long sys_switch_endian(void);
notrace unsigned int __check_irq_replay(void);
void notrace restore_interrupts(void);
-/* ptrace */
-long do_syscall_trace_enter(struct pt_regs *regs);
-void do_syscall_trace_leave(struct pt_regs *regs);
-
-/* process */
-void restore_math(struct pt_regs *regs);
-void restore_tm_state(struct pt_regs *regs);
-
/* prom_init (OpenFirmware) */
unsigned long __init prom_init(unsigned long r3, unsigned long r4,
unsigned long pp,
@@ -122,9 +118,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
void __init early_setup(unsigned long dt_ptr);
void early_setup_secondary(void);
-/* time */
-void accumulate_stolen_time(void);
-
/* misc runtime */
extern u64 __bswapdi2(u64);
extern s64 __lshrdi3(s64, int);
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
index 2a0a467d2985..34a7215ae81e 100644
--- a/arch/powerpc/include/asm/book3s/32/hash.h
+++ b/arch/powerpc/include/asm/book3s/32/hash.h
@@ -17,9 +17,9 @@
* updating the accessed and modified bits in the page table tree.
*/
-#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_USER 0x001 /* usermode access allowed */
+#define _PAGE_RW 0x002 /* software: user write access allowed */
+#define _PAGE_PRESENT 0x004 /* software: pte contains a translation */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
@@ -27,7 +27,7 @@
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_HASHPTE 0x400 /* hash_page has made an HPTE for this pte */
#define _PAGE_SPECIAL 0x800 /* software: Special page */
#ifdef CONFIG_PTE_64BIT
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 5b39c11e884a..7549393c4c43 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -366,10 +366,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir, addr) \
- ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
- (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
-#define pte_unmap(pte) kunmap_atomic(pte)
+#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
+static inline void pte_unmap(pte_t *pte) { }
/*
* Encode and decode a swap entry.
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 8fd8599c9395..3f9ae3585ab9 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -156,6 +156,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void);
#endif
+static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
+{
+ BUG();
+ return pmd;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index d1d9177d9ebd..0729c034e56f 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -246,7 +246,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
*/
static inline int hash__pmd_trans_huge(pmd_t pmd)
{
- return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
+ return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
(_PAGE_PTE | H_PAGE_THP_HUGE));
}
@@ -272,6 +272,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
extern int hash__has_transparent_hugepage(void);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index 90dd3a3fc8c7..3bcef989a35d 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -3,6 +3,7 @@
#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
#include <linux/const.h>
+#include <asm/reg.h>
#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
@@ -56,7 +57,20 @@
#ifdef CONFIG_PPC_KUAP
-#include <asm/reg.h>
+#include <asm/mmu.h>
+#include <asm/ptrace.h>
+
+static inline void kuap_restore_amr(struct pt_regs *regs)
+{
+ if (mmu_has_feature(MMU_FTR_RADIX_KUAP))
+ mtspr(SPRN_AMR, regs->kuap);
+}
+
+static inline void kuap_check_amr(void)
+{
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
+ WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
+}
/*
* We support individually allowing read or write, but we don't support nesting
@@ -127,6 +141,14 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
+#else /* CONFIG_PPC_KUAP */
+static inline void kuap_restore_amr(struct pt_regs *regs)
+{
+}
+
+static inline void kuap_check_amr(void)
+{
+}
#endif /* CONFIG_PPC_KUAP */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 201a69e6a355..368b136517e0 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1303,7 +1303,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm);
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
- return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+ if (radix_enabled())
+ return radix__pmd_mkdevmap(pmd);
+ return hash__pmd_mkdevmap(pmd);
}
static inline int pmd_devmap(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index d97db3ad9aae..a1c60d5b50af 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -263,6 +263,11 @@ static inline int radix__has_transparent_hugepage(void)
}
#endif
+static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+}
+
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 72b81015cebe..609cab1d58f2 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -97,7 +97,7 @@ static inline u32 l1_icache_bytes(void)
#endif
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(.data..read_mostly)
#ifdef CONFIG_PPC_BOOK3S_32
extern long _get_L2CR(void);
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 4a1c9f0200e1..e92191b390f3 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -65,17 +65,13 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop)
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
- if (IS_ENABLED(CONFIG_PPC64)) {
+ if (IS_ENABLED(CONFIG_PPC64))
mb(); /* sync */
- isync();
- }
for (i = 0; i < size >> shift; i++, addr += bytes)
dcbf(addr);
mb(); /* sync */
- if (IS_ENABLED(CONFIG_PPC64))
- isync();
}
/*
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 2431b4ada2fa..0fccd5ea1e9a 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -43,9 +43,12 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
*/
#ifdef CONFIG_PPC64
#define get_accounting(tsk) (&get_paca()->accounting)
+#define raw_get_accounting(tsk) (&local_paca->accounting)
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
+
#else
#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
+#define raw_get_accounting(tsk) get_accounting(tsk)
/*
* Called from the context switch with interrupts disabled, to charge all
* accumulated times to the current process, and to prepare accounting on
@@ -60,6 +63,36 @@ static inline void arch_vtime_task_switch(struct task_struct *prev)
}
#endif
+/*
+ * account_cpu_user_entry/exit runs "unreconciled", so can't trace,
+ * can't use use get_paca()
+ */
+static notrace inline void account_cpu_user_entry(void)
+{
+ unsigned long tb = mftb();
+ struct cpu_accounting_data *acct = raw_get_accounting(current);
+
+ acct->utime += (tb - acct->starttime_user);
+ acct->starttime = tb;
+}
+
+static notrace inline void account_cpu_user_exit(void)
+{
+ unsigned long tb = mftb();
+ struct cpu_accounting_data *acct = raw_get_accounting(current);
+
+ acct->stime += (tb - acct->starttime);
+ acct->starttime_user = tb;
+}
+
+
#endif /* __KERNEL__ */
+#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+static inline void account_cpu_user_entry(void)
+{
+}
+static inline void account_cpu_user_exit(void)
+{
+}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index 1b4f0254868f..6161a9596196 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -151,10 +151,9 @@
#define DMA2_EXT_REG 0x4D6
#ifndef __powerpc64__
- /* in arch/ppc/kernel/setup.c -- Cort */
+ /* in arch/powerpc/kernel/setup_32.c -- Cort */
extern unsigned int DMA_MODE_WRITE;
extern unsigned int DMA_MODE_READ;
- extern unsigned long ISA_DMA_THRESHOLD;
#else
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
index 3d76e1c388c2..28c3d936fdf3 100644
--- a/arch/powerpc/include/asm/drmem.h
+++ b/arch/powerpc/include/asm/drmem.h
@@ -27,12 +27,12 @@ struct drmem_lmb_info {
extern struct drmem_lmb_info *drmem_info;
#define for_each_drmem_lmb_in_range(lmb, start, end) \
- for ((lmb) = (start); (lmb) <= (end); (lmb)++)
+ for ((lmb) = (start); (lmb) < (end); (lmb)++)
#define for_each_drmem_lmb(lmb) \
for_each_drmem_lmb_in_range((lmb), \
&drmem_info->lmbs[0], \
- &drmem_info->lmbs[drmem_info->n_lmbs - 1])
+ &drmem_info->lmbs[drmem_info->n_lmbs])
/*
* The of_drconf_cell_v1 struct defines the layout of the LMB data
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 6f9b2a12540a..964a54292b36 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -215,7 +215,7 @@ enum {
struct eeh_ops {
char *name;
int (*init)(void);
- void* (*probe)(struct pci_dn *pdn, void *data);
+ struct eeh_dev *(*probe)(struct pci_dev *pdev);
int (*set_option)(struct eeh_pe *pe, int option);
int (*get_pe_addr)(struct eeh_pe *pe);
int (*get_state)(struct eeh_pe *pe, int *delay);
@@ -301,11 +301,7 @@ int __exit eeh_ops_unregister(const char *name);
int eeh_check_failure(const volatile void __iomem *token);
int eeh_dev_check_failure(struct eeh_dev *edev);
void eeh_addr_cache_init(void);
-void eeh_add_device_early(struct pci_dn *);
-void eeh_add_device_tree_early(struct pci_dn *);
-void eeh_add_device_late(struct pci_dev *);
-void eeh_add_device_tree_late(struct pci_bus *);
-void eeh_add_sysfs_files(struct pci_bus *);
+void eeh_probe_device(struct pci_dev *pdev);
void eeh_remove_device(struct pci_dev *);
int eeh_unfreeze_pe(struct eeh_pe *pe);
int eeh_pe_reset_and_recover(struct eeh_pe *pe);
@@ -360,15 +356,7 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
static inline void eeh_addr_cache_init(void) { }
-static inline void eeh_add_device_early(struct pci_dn *pdn) { }
-
-static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
-
-static inline void eeh_add_device_late(struct pci_dev *dev) { }
-
-static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
-
-static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+static inline void eeh_probe_device(struct pci_dev *dev) { }
static inline void eeh_remove_device(struct pci_dev *dev) { }
@@ -376,6 +364,14 @@ static inline void eeh_remove_device(struct pci_dev *dev) { }
#define EEH_IO_ERROR_VALUE(size) (-1UL)
#endif /* CONFIG_EEH */
+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_EEH)
+void pseries_eeh_init_edev(struct pci_dn *pdn);
+void pseries_eeh_init_edev_recursive(struct pci_dn *pdn);
+#else
+static inline void pseries_eeh_add_device_early(struct pci_dn *pdn) { }
+static inline void pseries_eeh_add_device_tree_early(struct pci_dn *pdn) { }
+#endif
+
#ifdef CONFIG_PPC64
/*
* MMIO read/write operations with EEH support.
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 33f4f72eb035..47bd4ea0837d 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -33,11 +33,7 @@
#include <asm/feature-fixups.h>
/* PACA save area size in u64 units (exgen, exmc, etc) */
-#if defined(CONFIG_RELOCATABLE)
#define EX_SIZE 10
-#else
-#define EX_SIZE 9
-#endif
/*
* maximum recursive depth of MCE exceptions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e3a905e3d573..e0e71777961f 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -52,7 +52,7 @@
#ifndef __ASSEMBLY__
extern void replay_system_reset(void);
-extern void __replay_interrupt(unsigned int vector);
+extern void replay_soft_interrupts(void);
extern void timer_interrupt(struct pt_regs *);
extern void timer_broadcast_interrupt(void);
@@ -228,9 +228,13 @@ static inline bool arch_irqs_disabled(void)
#ifdef CONFIG_PPC_BOOK3E
#define __hard_irq_enable() wrtee(MSR_EE)
#define __hard_irq_disable() wrtee(0)
+#define __hard_EE_RI_disable() wrtee(0)
+#define __hard_RI_enable() do { } while (0)
#else
#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
+#define __hard_EE_RI_disable() __mtmsrd(0, 1)
+#define __hard_RI_enable() __mtmsrd(MSR_RI, 1)
#endif
#define hard_irq_disable() do { \
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 635fb154b33f..a3633560493b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -150,4 +150,7 @@
#define KVM_INST_FETCH_FAILED -1
+/* Extract PO and XOP opcode fields */
+#define PO_XOP_OPCODE_MASK 0xfc0007fe
+
#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
index 5a9834e0e2d1..9cb7d8be2366 100644
--- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h
+++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
@@ -5,6 +5,7 @@
#ifdef CONFIG_PPC_UV
int kvmppc_uvmem_init(void);
void kvmppc_uvmem_free(void);
+bool kvmppc_uvmem_available(void);
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
void kvmppc_uvmem_slot_free(struct kvm *kvm,
const struct kvm_memory_slot *slot);
@@ -30,6 +31,11 @@ static inline int kvmppc_uvmem_init(void)
static inline void kvmppc_uvmem_free(void) { }
+static inline bool kvmppc_uvmem_available(void)
+{
+ return false;
+}
+
static inline int
kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 6e8b8ffd06ad..1dc63101ffe1 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -303,13 +303,12 @@ struct kvm_arch {
u8 radix;
u8 fwnmi_enabled;
u8 secure_guest;
+ u8 svm_enabled;
bool threads_indep;
bool nested_enable;
pgd_t *pgtable;
u64 process_table;
struct dentry *debugfs_dir;
- struct dentry *htab_dentry;
- struct dentry *radix_dentry;
struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -830,7 +829,6 @@ struct kvm_vcpu_arch {
struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
struct dentry *debugfs_dir;
- struct dentry *debugfs_timings;
#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
};
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index bc2494e5710a..94f5a32acaf1 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -107,8 +107,6 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
unsigned int gtlb_idx);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
-extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
-extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
@@ -200,14 +198,11 @@ extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern void kvmppc_core_free_memslot(struct kvm *kvm,
- struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
-extern int kvmppc_core_create_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- unsigned long npages);
+ struct kvm_memory_slot *slot);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
@@ -280,7 +275,8 @@ struct kvmppc_ops {
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
int (*prepare_memory_region)(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change);
void (*commit_memory_region)(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
@@ -291,11 +287,7 @@ struct kvmppc_ops {
int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
- void (*mmu_destroy)(struct kvm_vcpu *vcpu);
- void (*free_memslot)(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
- int (*create_memslot)(struct kvm_memory_slot *slot,
- unsigned long npages);
+ void (*free_memslot)(struct kvm_memory_slot *slot);
int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm);
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
@@ -321,6 +313,7 @@ struct kvmppc_ops {
int size);
int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
int size);
+ int (*enable_svm)(struct kvm *kvm);
int (*svm_off)(struct kvm *kvm);
};
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index 6a6ddaabdb34..376a395daf32 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -218,6 +218,8 @@ extern void machine_check_queue_event(void);
extern void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode, bool in_guest);
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr);
+extern void mce_common_process_ue(struct pt_regs *regs,
+ struct mce_error_info *mce_err);
#ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void);
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 60c4d829152e..b04ba257fddb 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -372,10 +372,8 @@ static inline int pte_young(pte_t pte)
#define pte_offset_kernel(dir, addr) \
(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
pte_index(addr))
-#define pte_offset_map(dir, addr) \
- ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
- (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
-#define pte_unmap(pte) kunmap_atomic(pte)
+#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
+static inline void pte_unmap(pte_t *pte) { }
/*
* Encode and decode a swap entry.
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index c1f25a760eb1..1dffa3cb16ba 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -1067,6 +1067,7 @@ enum {
OPAL_REBOOT_PLATFORM_ERROR = 1,
OPAL_REBOOT_FULL_IPL = 2,
OPAL_REBOOT_MPIPL = 3,
+ OPAL_REBOOT_FAST = 4,
};
/* Argument to OPAL_PCI_TCE_KILL */
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 7426d7a90e1e..eed3954082fa 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -32,7 +32,7 @@
do { \
(regs)->result = 0; \
(regs)->nip = __ip; \
- (regs)->gpr[1] = current_stack_pointer(); \
+ (regs)->gpr[1] = current_stack_frame(); \
asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \
} while (0)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 8cc543ed114c..b1f1d5339735 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,6 +41,25 @@ struct mm_struct;
#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC32
+static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va)
+{
+ return pmd_offset(pud_offset(pgd_offset(mm, va), va), va);
+}
+
+static inline pmd_t *pmd_ptr_k(unsigned long va)
+{
+ return pmd_offset(pud_offset(pgd_offset_k(va), va), va);
+}
+
+static inline pte_t *virt_to_kpte(unsigned long vaddr)
+{
+ pmd_t *pmd = pmd_ptr_k(vaddr);
+
+ return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
+}
+#endif
+
#include <asm/tlbflush.h>
/* Keep these as a macros to avoid include dependency mess */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index ee3ada66deb5..e0195e6b892b 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -138,6 +138,9 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define profile_pc(regs) instruction_pointer(regs)
#endif
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
@@ -276,6 +279,8 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#endif /* __ASSEMBLY__ */
#ifndef __powerpc64__
+/* We need PT_SOFTE defined at all time to avoid #ifdefs */
+#define PT_SOFTE PT_MQ
#else /* __powerpc64__ */
#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1aa46dff0957..da5cab038e25 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1448,7 +1448,9 @@ static inline void mtsrin(u32 val, u32 idx)
#define proc_trap() asm volatile("trap")
-extern unsigned long current_stack_pointer(void);
+extern unsigned long current_stack_frame(void);
+
+register unsigned long current_stack_pointer asm("r1");
extern unsigned long scom970_read(unsigned int address);
extern void scom970_write(unsigned int address, unsigned long value);
diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h
index e9f81bb3f83b..f798e80e4106 100644
--- a/arch/powerpc/include/asm/setjmp.h
+++ b/arch/powerpc/include/asm/setjmp.h
@@ -7,7 +7,9 @@
#define JMP_BUF_LEN 23
-extern long setjmp(long *) __attribute__((returns_twice));
-extern void longjmp(long *, long) __attribute__((noreturn));
+typedef long jmp_buf[JMP_BUF_LEN];
+
+extern int setjmp(jmp_buf env) __attribute__((returns_twice));
+extern void longjmp(jmp_buf env, int val) __attribute__((noreturn));
#endif /* _ASM_POWERPC_SETJMP_H */
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index 0803ca8b9149..99e1c6de27bc 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -6,4 +6,7 @@
#include <uapi/asm/signal.h>
#include <uapi/asm/ptrace.h>
+struct pt_regs;
+void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
+
#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 5b03d8a82409..b867b58b1093 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -5,6 +5,7 @@
#ifndef _ASM_POWERPC_SWITCH_TO_H
#define _ASM_POWERPC_SWITCH_TO_H
+#include <linux/sched.h>
#include <asm/reg.h>
struct thread_struct;
@@ -22,6 +23,16 @@ extern void switch_booke_debug_regs(struct debug_reg *new_debug);
extern int emulate_altivec(struct pt_regs *);
+#ifdef CONFIG_PPC_BOOK3S_64
+void restore_math(struct pt_regs *regs);
+#else
+static inline void restore_math(struct pt_regs *regs)
+{
+}
+#endif
+
+void restore_tm_state(struct pt_regs *regs);
+
extern void flush_all_to_thread(struct task_struct *);
extern void giveup_all(struct task_struct *);
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 08dbe3e6831c..39ce95016a3a 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -24,7 +24,6 @@ extern struct clock_event_device decrementer_clockevent;
extern void generic_calibrate_decr(void);
-extern void hdec_interrupt(struct pt_regs *regs);
/* Some sane defaults: 125 MHz timebase, 1GHz processor */
extern unsigned long ppc_proc_freq;
@@ -195,5 +194,8 @@ DECLARE_PER_CPU(u64, decrementers_next_tb);
/* Convert timebase ticks to nanoseconds */
unsigned long long tb_to_ns(unsigned long long tb_ticks);
+/* SPLPAR */
+void accumulate_stolen_time(void);
+
#endif /* __KERNEL__ */
#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 2f7e1ea5089e..2db7ba789720 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -98,7 +98,6 @@ extern int stop_topology_update(void);
extern int prrn_is_enabled(void);
extern int find_and_online_cpu_nid(int cpu);
extern int timed_topology_update(int nsecs);
-extern void __init shared_proc_topology_init(void);
#else
static inline int start_topology_update(void)
{
@@ -121,9 +120,6 @@ static inline int timed_topology_update(int nsecs)
return 0;
}
-#ifdef CONFIG_SMP
-static inline void shared_proc_topology_init(void) {}
-#endif
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
#include <asm-generic/topology.h>
@@ -134,7 +130,13 @@ static inline void shared_proc_topology_init(void) {}
#ifdef CONFIG_PPC64
#include <asm/smp.h>
+#ifdef CONFIG_PPC_SPLPAR
+int get_physical_package_id(int cpu);
+#define topology_physical_package_id(cpu) (get_physical_package_id(cpu))
+#else
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
+#endif
+
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index b5e1f8f8a05c..2ff884853f97 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -13,9 +13,6 @@
#define VDSO_VERSION_STRING LINUX_2.6.15
-/* Define if 64 bits VDSO has procedure descriptors */
-#undef VDS64_HAS_DESCRIPTORS
-
#ifndef __ASSEMBLY__
/* Offsets relative to thread->vdso_base */
@@ -28,25 +25,6 @@ int vdso_getcpu_init(void);
#else /* __ASSEMBLY__ */
#ifdef __VDSO64__
-#ifdef VDS64_HAS_DESCRIPTORS
-#define V_FUNCTION_BEGIN(name) \
- .globl name; \
- .section ".opd","a"; \
- .align 3; \
- name: \
- .quad .name,.TOC.@tocbase,0; \
- .previous; \
- .globl .name; \
- .type .name,@function; \
- .name: \
-
-#define V_FUNCTION_END(name) \
- .size .name,.-.name;
-
-#define V_LOCAL_FUNC(name) (.name)
-
-#else /* VDS64_HAS_DESCRIPTORS */
-
#define V_FUNCTION_BEGIN(name) \
.globl name; \
name: \
@@ -55,8 +33,6 @@ int vdso_getcpu_init(void);
.size name,.-name;
#define V_LOCAL_FUNC(name) (name)
-
-#endif /* VDS64_HAS_DESCRIPTORS */
#endif /* __VDSO64__ */
#ifdef __VDSO32__
diff --git a/arch/powerpc/kernel/.gitignore b/arch/powerpc/kernel/.gitignore
index 67ebd3003c05..d71179d3ffe9 100644
--- a/arch/powerpc/kernel/.gitignore
+++ b/arch/powerpc/kernel/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
prom_init_check
vmlinux.lds
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 78a1b22d4fd8..570660efbb3d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -3,8 +3,6 @@
# Makefile for the linux kernel.
#
-CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
ifdef CONFIG_PPC64
CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
endif
@@ -41,16 +39,17 @@ CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
endif
-obj-y := cputable.o ptrace.o syscalls.o \
+obj-y := cputable.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
process.o systbl.o idle.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
of_platform.o prom_parse.o
-obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
- signal_64.o ptrace32.o \
- paca.o nvram_64.o firmware.o note.o
+obj-y += ptrace/
+obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o signal_64.o \
+ paca.o nvram_64.o firmware.o note.o \
+ syscall_64.o
obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 6dfceaa820e4..f57712a55815 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -26,7 +26,7 @@
static void scrollscreen(void);
#endif
-#define __force_data __attribute__((__section__(".data")))
+#define __force_data __section(.data)
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 245be4fafe13..13eba2eb46fe 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2198,7 +2198,6 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
*/
if (old.oprofile_cpu_type != NULL) {
t->oprofile_cpu_type = old.oprofile_cpu_type;
- t->oprofile_type = old.oprofile_type;
t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
}
}
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 182b4047c1ef..36bc0d5c4f3a 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -139,7 +139,6 @@ static void __init cpufeatures_setup_cpu(void)
/* Initialize the base environment -- clear FSCR/HFSCR. */
hv_mode = !!(mfmsr() & MSR_HV);
if (hv_mode) {
- /* CPU_FTR_HVMODE is used early in PACA setup */
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
mtspr(SPRN_HFSCR, 0);
}
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 17cb3e9b5697..7cdcb413bb44 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1107,87 +1107,43 @@ static int eeh_init(void)
core_initcall_sync(eeh_init);
/**
- * eeh_add_device_early - Enable EEH for the indicated device node
- * @pdn: PCI device node for which to set up EEH
- *
- * This routine must be used to perform EEH initialization for PCI
- * devices that were added after system boot (e.g. hotplug, dlpar).
- * This routine must be called before any i/o is performed to the
- * adapter (inluding any config-space i/o).
- * Whether this actually enables EEH or not for this device depends
- * on the CEC architecture, type of the device, on earlier boot
- * command-line arguments & etc.
- */
-void eeh_add_device_early(struct pci_dn *pdn)
-{
- struct pci_controller *phb = pdn ? pdn->phb : NULL;
- struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
-
- if (!edev)
- return;
-
- if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
- return;
-
- /* USB Bus children of PCI devices will not have BUID's */
- if (NULL == phb ||
- (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid))
- return;
-
- eeh_ops->probe(pdn, NULL);
-}
-
-/**
- * eeh_add_device_tree_early - Enable EEH for the indicated device
- * @pdn: PCI device node
- *
- * This routine must be used to perform EEH initialization for the
- * indicated PCI device that was added after system boot (e.g.
- * hotplug, dlpar).
- */
-void eeh_add_device_tree_early(struct pci_dn *pdn)
-{
- struct pci_dn *n;
-
- if (!pdn)
- return;
-
- list_for_each_entry(n, &pdn->child_list, list)
- eeh_add_device_tree_early(n);
- eeh_add_device_early(pdn);
-}
-EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
-
-/**
- * eeh_add_device_late - Perform EEH initialization for the indicated pci device
+ * eeh_probe_device() - Perform EEH initialization for the indicated pci device
* @dev: pci device for which to set up EEH
*
* This routine must be used to complete EEH initialization for PCI
* devices that were added after system boot (e.g. hotplug, dlpar).
*/
-void eeh_add_device_late(struct pci_dev *dev)
+void eeh_probe_device(struct pci_dev *dev)
{
- struct pci_dn *pdn;
struct eeh_dev *edev;
- if (!dev)
+ pr_debug("EEH: Adding device %s\n", pci_name(dev));
+
+ /*
+ * pci_dev_to_eeh_dev() can only work if eeh_probe_dev() was
+ * already called for this device.
+ */
+ if (WARN_ON_ONCE(pci_dev_to_eeh_dev(dev))) {
+ pci_dbg(dev, "Already bound to an eeh_dev!\n");
return;
+ }
- pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
- edev = pdn_to_eeh_dev(pdn);
- eeh_edev_dbg(edev, "Adding device\n");
- if (edev->pdev == dev) {
- eeh_edev_dbg(edev, "Device already referenced!\n");
+ edev = eeh_ops->probe(dev);
+ if (!edev) {
+ pr_debug("EEH: Adding device failed\n");
return;
}
/*
- * The EEH cache might not be removed correctly because of
- * unbalanced kref to the device during unplug time, which
- * relies on pcibios_release_device(). So we have to remove
- * that here explicitly.
+ * FIXME: We rely on pcibios_release_device() to remove the
+ * existing EEH state. The release function is only called if
+ * the pci_dev's refcount drops to zero so if something is
+ * keeping a ref to a device (e.g. a filesystem) we need to
+ * remove the old EEH state.
+ *
+ * FIXME: HEY MA, LOOK AT ME, NO LOCKING!
*/
- if (edev->pdev) {
+ if (edev->pdev && edev->pdev != dev) {
eeh_rmv_from_parent_pe(edev);
eeh_addr_cache_rmv_dev(edev->pdev);
eeh_sysfs_remove_device(edev->pdev);
@@ -1198,69 +1154,16 @@ void eeh_add_device_late(struct pci_dev *dev)
* into error handler afterwards.
*/
edev->mode |= EEH_DEV_NO_HANDLER;
-
- edev->pdev = NULL;
- dev->dev.archdata.edev = NULL;
}
- if (eeh_has_flag(EEH_PROBE_MODE_DEV))
- eeh_ops->probe(pdn, NULL);
-
+ /* bind the pdev and the edev together */
edev->pdev = dev;
dev->dev.archdata.edev = edev;
-
eeh_addr_cache_insert_dev(dev);
+ eeh_sysfs_add_device(dev);
}
/**
- * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
- * @bus: PCI bus
- *
- * This routine must be used to perform EEH initialization for PCI
- * devices which are attached to the indicated PCI bus. The PCI bus
- * is added after system boot through hotplug or dlpar.
- */
-void eeh_add_device_tree_late(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- if (eeh_has_flag(EEH_FORCE_DISABLED))
- return;
- list_for_each_entry(dev, &bus->devices, bus_list) {
- eeh_add_device_late(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- struct pci_bus *subbus = dev->subordinate;
- if (subbus)
- eeh_add_device_tree_late(subbus);
- }
- }
-}
-EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
-
-/**
- * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
- * @bus: PCI bus
- *
- * This routine must be used to add EEH sysfs files for PCI
- * devices which are attached to the indicated PCI bus. The PCI bus
- * is added after system boot through hotplug or dlpar.
- */
-void eeh_add_sysfs_files(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- eeh_sysfs_add_device(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- struct pci_bus *subbus = dev->subordinate;
- if (subbus)
- eeh_add_sysfs_files(subbus);
- }
- }
-}
-EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
-
-/**
* eeh_remove_device - Undo EEH setup for the indicated pci device
* @dev: pci device to be removed
*
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 16af0d8d90a8..a6371fb8f761 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -246,9 +246,8 @@ reenable_mmu:
* r3 can be different from GPR3(r1) at this point, r9 and r11
* contains the old MSR and handler address respectively,
* r4 & r5 can contain page fault arguments that need to be passed
- * along as well. r12, CCR, CTR, XER etc... are left clobbered as
- * they aren't useful past this point (aren't syscall arguments),
- * the rest is restored from the exception frame.
+ * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
+ * clobbered as they aren't useful past this point.
*/
stwu r1,-32(r1)
@@ -262,16 +261,12 @@ reenable_mmu:
* lockdep
*/
1: bl trace_hardirqs_off
-2: lwz r5,24(r1)
+ lwz r5,24(r1)
lwz r4,20(r1)
lwz r3,16(r1)
lwz r11,12(r1)
lwz r9,8(r1)
addi r1,r1,32
- lwz r0,GPR0(r1)
- lwz r6,GPR6(r1)
- lwz r7,GPR7(r1)
- lwz r8,GPR8(r1)
mtctr r11
mtlr r9
bctr /* jump to handler */
@@ -575,6 +570,33 @@ syscall_exit_work:
bl do_syscall_trace_leave
b ret_from_except_full
+ /*
+ * System call was called from kernel. We get here with SRR1 in r9.
+ * Mark the exception as recoverable once we have retrieved SRR0,
+ * trap a warning and return ENOSYS with CR[SO] set.
+ */
+ .globl ret_from_kernel_syscall
+ret_from_kernel_syscall:
+ mfspr r9, SPRN_SRR0
+ mfspr r10, SPRN_SRR1
+#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
+ mtmsr r11
+#endif
+
+0: trap
+ EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
+
+ li r3, ENOSYS
+ crset so
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
+ mtspr SPRN_NRI, r0
+#endif
+ mtspr SPRN_SRR0, r9
+ mtspr SPRN_SRR1, r10
+ SYNC
+ RFI
+
/*
* The fork/clone functions need to copy the full register set into
* the child process. Therefore we need to save all the nonvolatile
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6ba675b0cf7d..63f0a4414618 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <asm/cache.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -69,6 +70,7 @@ BEGIN_FTR_SECTION
bne .Ltabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
+_ASM_NOKPROBE_SYMBOL(system_call_common)
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
@@ -76,342 +78,122 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
std r12,_MSR(r1)
std r0,GPR0(r1)
std r10,GPR1(r1)
+ std r2,GPR2(r1)
#ifdef CONFIG_PPC_FSL_BOOK3E
START_BTB_FLUSH_SECTION
BTB_FLUSH(r10)
END_BTB_FLUSH_SECTION
#endif
- ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
- std r2,GPR2(r1)
+ ld r2,PACATOC(r13)
+ mfcr r12
+ li r11,0
+ /* Can we avoid saving r3-r8 in common case? */
std r3,GPR3(r1)
- mfcr r2
std r4,GPR4(r1)
std r5,GPR5(r1)
std r6,GPR6(r1)
std r7,GPR7(r1)
std r8,GPR8(r1)
- li r11,0
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
std r11,GPR9(r1)
std r11,GPR10(r1)
std r11,GPR11(r1)
std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
std r11,_XER(r1)
std r11,_CTR(r1)
- std r9,GPR13(r1)
mflr r10
+
/*
* This clears CR0.SO (bit 28), which is the error indication on
* return from this system call.
*/
- rldimi r2,r11,28,(63-28)
- li r11,0xc01
+ rldimi r12,r11,28,(63-28)
+ li r11,0xc00
std r10,_LINK(r1)
std r11,_TRAP(r1)
+ std r12,_CCR(r1)
std r3,ORIG_GPR3(r1)
- std r2,_CCR(r1)
- ld r2,PACATOC(r13)
- addi r9,r1,STACK_FRAME_OVERHEAD
+ addi r10,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
- std r11,-16(r9) /* "regshere" marker */
-
- kuap_check_amr r10, r11
-
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
-BEGIN_FW_FTR_SECTION
- /* see if there are any DTL entries to process */
- ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
- ld r11,PACA_DTL_RIDX(r13) /* get log read index */
- addi r10,r10,LPPACA_DTLIDX
- LDX_BE r10,0,r10 /* get log write index */
- cmpd r11,r10
- beq+ 33f
- bl accumulate_stolen_time
- REST_GPR(0,r1)
- REST_4GPRS(3,r1)
- REST_2GPRS(7,r1)
- addi r9,r1,STACK_FRAME_OVERHEAD
-33:
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
-
- /*
- * A syscall should always be called with interrupts enabled
- * so we just unconditionally hard-enable here. When some kind
- * of irq tracing is used, we additionally check that condition
- * is correct
- */
-#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
- lbz r10,PACAIRQSOFTMASK(r13)
-1: tdnei r10,IRQS_ENABLED
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
-#endif
-
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
-#else
- li r11,MSR_RI
- ori r11,r11,MSR_EE
- mtmsrd r11,1
-#endif /* CONFIG_PPC_BOOK3E */
-
-system_call: /* label this so stack traces look sane */
- /* We do need to set SOFTE in the stack frame or the return
- * from interrupt will be painful
- */
- li r10,IRQS_ENABLED
- std r10,SOFTE(r1)
-
- ld r11, PACA_THREAD_INFO(r13)
- ld r10,TI_FLAGS(r11)
- andi. r11,r10,_TIF_SYSCALL_DOTRACE
- bne .Lsyscall_dotrace /* does not return */
- cmpldi 0,r0,NR_syscalls
- bge- .Lsyscall_enosys
+ std r11,-16(r10) /* "regshere" marker */
-.Lsyscall:
-/*
- * Need to vector to 32 Bit or default sys_call_table here,
- * based on caller's run-mode / personality.
- */
- ld r11,SYS_CALL_TABLE@toc(2)
- andis. r10,r10,_TIF_32BIT@h
- beq 15f
- ld r11,COMPAT_SYS_CALL_TABLE@toc(2)
- clrldi r3,r3,32
- clrldi r4,r4,32
- clrldi r5,r5,32
- clrldi r6,r6,32
- clrldi r7,r7,32
- clrldi r8,r8,32
-15:
- slwi r0,r0,3
-
- barrier_nospec_asm
/*
- * Prevent the load of the handler below (based on the user-passed
- * system call number) being speculatively executed until the test
- * against NR_syscalls and branch to .Lsyscall_enosys above has
- * committed.
+ * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
+ * would clobber syscall parameters. Also we always enter with IRQs
+ * enabled and nothing pending. system_call_exception() will call
+ * trace_hardirqs_off().
*/
+ li r11,IRQS_ALL_DISABLED
+ li r12,PACA_IRQ_HARD_DIS
+ stb r11,PACAIRQSOFTMASK(r13)
+ stb r12,PACAIRQHAPPENED(r13)
- ldx r12,r11,r0 /* Fetch system call handler [ptr] */
- mtctr r12
- bctrl /* Call handler */
+ /* Calling convention has r9 = orig r0, r10 = regs */
+ mr r9,r0
+ bl system_call_exception
- /* syscall_exit can exit to kernel mode, via ret_from_kernel_thread */
.Lsyscall_exit:
- std r3,RESULT(r1)
-
-#ifdef CONFIG_DEBUG_RSEQ
- /* Check whether the syscall is issued inside a restartable sequence */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl rseq_syscall
- ld r3,RESULT(r1)
-#endif
-
- ld r12, PACA_THREAD_INFO(r13)
-
- ld r8,_MSR(r1)
-
-/*
- * This is a few instructions into the actual syscall exit path (which actually
- * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
- * number of visible symbols for profiling purposes.
- *
- * We can probe from system_call until this point as MSR_RI is set. But once it
- * is cleared below, we won't be able to take a trap.
- *
- * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
- */
-system_call_exit:
- /*
- * Disable interrupts so current_thread_info()->flags can't change,
- * and so that we don't get interrupted after loading SRR0/1.
- *
- * Leave MSR_RI enabled for now, because with THREAD_INFO_IN_TASK we
- * could fault on the load of the TI_FLAGS below.
- */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- li r11,MSR_RI
- mtmsrd r11,1
-#endif /* CONFIG_PPC_BOOK3E */
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ bl syscall_exit_prepare
- ld r9,TI_FLAGS(r12)
- li r11,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
- bne- .Lsyscall_exit_work
+ ld r2,_CCR(r1)
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+ ld r6,_LINK(r1)
- andi. r0,r8,MSR_FP
- beq 2f
-#ifdef CONFIG_ALTIVEC
- andis. r0,r8,MSR_VEC@h
- bne 3f
-#endif
-2: addi r3,r1,STACK_FRAME_OVERHEAD
- bl restore_math
- ld r8,_MSR(r1)
- ld r3,RESULT(r1)
- li r11,-MAX_ERRNO
-
-3: cmpld r3,r11
- ld r5,_CCR(r1)
- bge- .Lsyscall_error
-.Lsyscall_error_cont:
- ld r7,_NIP(r1)
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- andi. r6,r8,MSR_PR
- ld r4,_LINK(r1)
- kuap_check_amr r10, r11
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+ mtlr r6
-#ifdef CONFIG_PPC_BOOK3S
- /*
- * Clear MSR_RI, MSR_EE is already and remains disabled. We could do
- * this later, but testing shows that doing it here causes less slow
- * down than doing it closer to the rfid.
- */
+ cmpdi r3,0
+ bne .Lsyscall_restore_regs
+ /* Zero volatile regs that may contain sensitive kernel data */
+ li r0,0
+ li r4,0
+ li r5,0
+ li r6,0
+ li r7,0
+ li r8,0
+ li r9,0
+ li r10,0
li r11,0
- mtmsrd r11,1
-#endif
-
- beq- 1f
- ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
+ li r12,0
+ mtctr r0
+ mtspr SPRN_XER,r0
+.Lsyscall_restore_regs_cont:
BEGIN_FTR_SECTION
HMT_MEDIUM_LOW
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- std r8, PACATMSCRATCH(r13)
-#endif
-
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
* The value of AMR only matters while we're in the kernel.
*/
- ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
+ mtcr r2
ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r13,GPR13(r1)
ld r1,GPR1(r1)
- mtlr r4
- mtcr r5
- mtspr SPRN_SRR0,r7
- mtspr SPRN_SRR1,r8
RFI_TO_USER
b . /* prevent speculative execution */
-1: /* exit to kernel */
- kuap_restore_amr r2
-
- ld r2,GPR2(r1)
- ld r1,GPR1(r1)
- mtlr r4
- mtcr r5
- mtspr SPRN_SRR0,r7
- mtspr SPRN_SRR1,r8
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-.Lsyscall_error:
- oris r5,r5,0x1000 /* Set SO bit in CR */
- neg r3,r3
- std r5,_CCR(r1)
- b .Lsyscall_error_cont
-
-/* Traced system call support */
-.Lsyscall_dotrace:
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_enter
-
- /*
- * We use the return value of do_syscall_trace_enter() as the syscall
- * number. If the syscall was rejected for any reason do_syscall_trace_enter()
- * returns an invalid syscall number and the test below against
- * NR_syscalls will fail.
- */
- mr r0,r3
-
- /* Restore argument registers just clobbered and/or possibly changed. */
- ld r3,GPR3(r1)
- ld r4,GPR4(r1)
- ld r5,GPR5(r1)
- ld r6,GPR6(r1)
- ld r7,GPR7(r1)
- ld r8,GPR8(r1)
-
- /* Repopulate r9 and r10 for the syscall path */
- addi r9,r1,STACK_FRAME_OVERHEAD
- ld r10, PACA_THREAD_INFO(r13)
- ld r10,TI_FLAGS(r10)
-
- cmpldi r0,NR_syscalls
- blt+ .Lsyscall
-
- /* Return code is already in r3 thanks to do_syscall_trace_enter() */
- b .Lsyscall_exit
-
-
-.Lsyscall_enosys:
- li r3,-ENOSYS
- b .Lsyscall_exit
-
-.Lsyscall_exit_work:
- /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
- If TIF_NOERROR is set, just save r3 as it is. */
-
- andi. r0,r9,_TIF_RESTOREALL
- beq+ 0f
+.Lsyscall_restore_regs:
+ ld r3,_CTR(r1)
+ ld r4,_XER(r1)
REST_NVGPRS(r1)
- b 2f
-0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
- blt+ 1f
- andi. r0,r9,_TIF_NOERROR
- bne- 1f
- ld r5,_CCR(r1)
- neg r3,r3
- oris r5,r5,0x1000 /* Set SO bit in CR */
- std r5,_CCR(r1)
-1: std r3,GPR3(r1)
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
- beq 4f
-
- /* Clear per-syscall TIF flags if any are set. */
-
- li r11,_TIF_PERSYSCALL_MASK
- addi r12,r12,TI_FLAGS
-3: ldarx r10,0,r12
- andc r10,r10,r11
- stdcx. r10,0,r12
- bne- 3b
- subi r12,r12,TI_FLAGS
-
-4: /* Anything else left to do? */
-BEGIN_FTR_SECTION
- lis r3,DEFAULT_PPR@highest /* Set default PPR */
- sldi r3,r3,32 /* bits 11-13 are used for ppr */
- std r3,_PPR(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
- beq ret_from_except_lite
-
- /* Re-enable interrupts */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
-#else
- li r10,MSR_RI
- ori r10,r10,MSR_EE
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
-
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_leave
- b ret_from_except
+ mtctr r3
+ mtspr SPRN_XER,r4
+ ld r0,GPR0(r1)
+ REST_8GPRS(4, r1)
+ ld r12,GPR12(r1)
+ b .Lsyscall_restore_regs_cont
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
.Ltabort_syscall:
@@ -439,64 +221,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
RFI_TO_USER
b . /* prevent speculative execution */
#endif
-_ASM_NOKPROBE_SYMBOL(system_call_common);
-_ASM_NOKPROBE_SYMBOL(system_call_exit);
-
-/* Save non-volatile GPRs, if not already saved. */
-_GLOBAL(save_nvgprs)
- ld r11,_TRAP(r1)
- andi. r0,r11,1
- beqlr-
- SAVE_NVGPRS(r1)
- clrrdi r0,r11,1
- std r0,_TRAP(r1)
- blr
-_ASM_NOKPROBE_SYMBOL(save_nvgprs);
-
-
-/*
- * The sigsuspend and rt_sigsuspend system calls can call do_signal
- * and thus put the process into the stopped state where we might
- * want to examine its user state with ptrace. Therefore we need
- * to save all the nonvolatile registers (r14 - r31) before calling
- * the C code. Similarly, fork, vfork and clone need the full
- * register state on the stack so that it can be copied to the child.
- */
-
-_GLOBAL(ppc_fork)
- bl save_nvgprs
- bl sys_fork
- b .Lsyscall_exit
-
-_GLOBAL(ppc_vfork)
- bl save_nvgprs
- bl sys_vfork
- b .Lsyscall_exit
-
-_GLOBAL(ppc_clone)
- bl save_nvgprs
- bl sys_clone
- b .Lsyscall_exit
-
-_GLOBAL(ppc_clone3)
- bl save_nvgprs
- bl sys_clone3
- b .Lsyscall_exit
-
-_GLOBAL(ppc32_swapcontext)
- bl save_nvgprs
- bl compat_sys_swapcontext
- b .Lsyscall_exit
-
-_GLOBAL(ppc64_swapcontext)
- bl save_nvgprs
- bl sys_swapcontext
- b .Lsyscall_exit
-
-_GLOBAL(ppc_switch_endian)
- bl save_nvgprs
- bl sys_switch_endian
- b .Lsyscall_exit
_GLOBAL(ret_from_fork)
bl schedule_tail
@@ -516,6 +240,19 @@ _GLOBAL(ret_from_kernel_thread)
li r3,0
b .Lsyscall_exit
+#ifdef CONFIG_PPC_BOOK3E
+/* Save non-volatile GPRs, if not already saved. */
+_GLOBAL(save_nvgprs)
+ ld r11,_TRAP(r1)
+ andi. r0,r11,1
+ beqlr-
+ SAVE_NVGPRS(r1)
+ clrrdi r0,r11,1
+ std r0,_TRAP(r1)
+ blr
+_ASM_NOKPROBE_SYMBOL(save_nvgprs);
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
#define FLUSH_COUNT_CACHE \
@@ -578,7 +315,7 @@ flush_count_cache:
* state of one is saved on its kernel stack. Then the state
* of the other is restored from its kernel stack. The memory
* management hardware is updated to the second process's state.
- * Finally, we can return to the second process, via ret_from_except.
+ * Finally, we can return to the second process, via interrupt_return.
* On entry, r3 points to the THREAD for the current task, r4
* points to the THREAD for the new task.
*
@@ -730,408 +467,146 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
addi r1,r1,SWITCH_FRAME_SIZE
blr
- .align 7
-_GLOBAL(ret_from_except)
- ld r11,_TRAP(r1)
- andi. r0,r11,1
- bne ret_from_except_lite
- REST_NVGPRS(r1)
-
-_GLOBAL(ret_from_except_lite)
- /*
- * Disable interrupts so that current_thread_info()->flags
- * can't change between when we test it and when we return
- * from the interrupt.
- */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- li r10,MSR_RI
- mtmsrd r10,1 /* Update machine state */
-#endif /* CONFIG_PPC_BOOK3E */
-
- ld r9, PACA_THREAD_INFO(r13)
- ld r3,_MSR(r1)
-#ifdef CONFIG_PPC_BOOK3E
- ld r10,PACACURRENT(r13)
-#endif /* CONFIG_PPC_BOOK3E */
- ld r4,TI_FLAGS(r9)
- andi. r3,r3,MSR_PR
- beq resume_kernel
-#ifdef CONFIG_PPC_BOOK3E
- lwz r3,(THREAD+THREAD_DBCR0)(r10)
-#endif /* CONFIG_PPC_BOOK3E */
-
- /* Check current_thread_info()->flags */
- andi. r0,r4,_TIF_USER_WORK_MASK
- bne 1f
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3S
/*
- * Check to see if the dbcr0 register is set up to debug.
- * Use the internal debug mode bit to do this.
- */
- andis. r0,r3,DBCR0_IDM@h
- beq restore
- mfmsr r0
- rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
- mtmsr r0
- mtspr SPRN_DBCR0,r3
- li r10, -1
- mtspr SPRN_DBSR,r10
- b restore
-#else
+ * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
+ * touched, AMR not set, no exit work created, then this can be used.
+ */
+ .balign IFETCH_ALIGN_BYTES
+ .globl fast_interrupt_return
+fast_interrupt_return:
+_ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
+ ld r4,_MSR(r1)
+ andi. r0,r4,MSR_PR
+ bne .Lfast_user_interrupt_return
+ andi. r0,r4,MSR_RI
+ li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
+ bne+ .Lfast_kernel_interrupt_return
addi r3,r1,STACK_FRAME_OVERHEAD
- bl restore_math
- b restore
-#endif
-1: andi. r0,r4,_TIF_NEED_RESCHED
- beq 2f
- bl restore_interrupts
- SCHEDULE_USER
- b ret_from_except_lite
-2:
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
- bne 3f /* only restore TM if nothing else to do */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl restore_tm_state
- b restore
-3:
-#endif
- bl save_nvgprs
- /*
- * Use a non volatile GPR to save and restore our thread_info flags
- * across the call to restore_interrupts.
- */
- mr r30,r4
- bl restore_interrupts
- mr r4,r30
+ bl unrecoverable_exception
+ b . /* should not get here */
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl interrupt_return
+interrupt_return:
+_ASM_NOKPROBE_SYMBOL(interrupt_return)
+ ld r4,_MSR(r1)
+ andi. r0,r4,MSR_PR
+ beq .Lkernel_interrupt_return
addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_notify_resume
- b ret_from_except
-
-resume_kernel:
- /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
- andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
- beq+ 1f
+ bl interrupt_exit_user_prepare
+ cmpdi r3,0
+ bne- .Lrestore_nvgprs
- addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+.Lfast_user_interrupt_return:
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+BEGIN_FTR_SECTION
+ ld r10,_PPR(r1)
+ mtspr SPRN_PPR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
- ld r3,GPR1(r1)
- subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
- mr r4,r1 /* src: current exception frame */
- mr r1,r3 /* Reroute the trampoline frame to r1 */
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- /* Copy from the original to the trampoline. */
- li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
- li r6,0 /* start offset: 0 */
- mtctr r5
-2: ldx r0,r6,r4
- stdx r0,r6,r3
- addi r6,r6,8
- bdnz 2b
-
- /* Do real store operation to complete stdu */
- ld r5,GPR1(r1)
- std r8,0(r5)
-
- /* Clear _TIF_EMULATE_STACK_STORE flag */
- lis r11,_TIF_EMULATE_STACK_STORE@h
- addi r5,r9,TI_FLAGS
-0: ldarx r4,0,r5
- andc r4,r4,r11
- stdcx. r4,0,r5
- bne- 0b
-1:
-
-#ifdef CONFIG_PREEMPTION
- /* Check if we need to preempt */
- andi. r0,r4,_TIF_NEED_RESCHED
- beq+ restore
- /* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
- cmpwi cr0,r8,0
- bne restore
- ld r0,SOFTE(r1)
- andi. r0,r0,IRQS_DISABLED
- bne restore
+ ld r3,_CCR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_CTR(r1)
+ ld r6,_XER(r1)
+ li r0,0
- /*
- * Here we are preempting the current task. We want to make
- * sure we are soft-disabled first and reconcile irq state.
- */
- RECONCILE_IRQ_STATE(r3,r4)
- bl preempt_schedule_irq
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
+ REST_GPR(13, r1)
- /*
- * arch_local_irq_restore() from preempt_schedule_irq above may
- * enable hard interrupt but we really should disable interrupts
- * when we return from the interrupt, and so that we don't get
- * interrupted after loading SRR0/1.
- */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- li r10,MSR_RI
- mtmsrd r10,1 /* Update machine state */
-#endif /* CONFIG_PPC_BOOK3E */
-#endif /* CONFIG_PREEMPTION */
+ mtcr r3
+ mtlr r4
+ mtctr r5
+ mtspr SPRN_XER,r6
- .globl fast_exc_return_irq
-fast_exc_return_irq:
-restore:
- /*
- * This is the main kernel exit path. First we check if we
- * are about to re-enable interrupts
- */
- ld r5,SOFTE(r1)
- lbz r6,PACAIRQSOFTMASK(r13)
- andi. r5,r5,IRQS_DISABLED
- bne .Lrestore_irq_off
+ REST_4GPRS(2, r1)
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ RFI_TO_USER
+ b . /* prevent speculative execution */
- /* We are enabling, were we already enabled ? Yes, just return */
- andi. r6,r6,IRQS_DISABLED
- beq cr0,.Ldo_restore
+.Lrestore_nvgprs:
+ REST_NVGPRS(r1)
+ b .Lfast_user_interrupt_return
- /*
- * We are about to soft-enable interrupts (we are hard disabled
- * at this point). We check if there's anything that needs to
- * be replayed first.
- */
- lbz r0,PACAIRQHAPPENED(r13)
- cmpwi cr0,r0,0
- bne- .Lrestore_check_irq_replay
+ .balign IFETCH_ALIGN_BYTES
+.Lkernel_interrupt_return:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_kernel_prepare
- /*
- * Get here when nothing happened while soft-disabled, just
- * soft-enable and move-on. We will hard-enable as a side
- * effect of rfi
- */
-.Lrestore_no_replay:
- TRACE_ENABLE_INTS
- li r0,IRQS_ENABLED
- stb r0,PACAIRQSOFTMASK(r13);
+.Lfast_kernel_interrupt_return:
+ cmpdi cr1,r3,0
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
- /*
- * Final return path. BookE is handled in a different file
- */
-.Ldo_restore:
-#ifdef CONFIG_PPC_BOOK3E
- b exception_return_book3e
-#else
- /*
- * Clear the reservation. If we know the CPU tracks the address of
- * the reservation then we can potentially save some cycles and use
- * a larx. On POWER6 and POWER7 this is significantly faster.
- */
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
FTR_SECTION_ELSE
- ldarx r4,0,r1
+ ldarx r0,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- /*
- * Some code path such as load_up_fpu or altivec return directly
- * here. They run entirely hard disabled and do not alter the
- * interrupt state. They also don't use lwarx/stwcx. and thus
- * are known not to leave dangling reservations.
- */
- .globl fast_exception_return
-fast_exception_return:
- ld r3,_MSR(r1)
+ ld r3,_LINK(r1)
ld r4,_CTR(r1)
- ld r0,_LINK(r1)
- mtctr r4
- mtlr r0
- ld r4,_XER(r1)
- mtspr SPRN_XER,r4
-
- kuap_check_amr r5, r6
-
- REST_8GPRS(5, r1)
-
- andi. r0,r3,MSR_RI
- beq- .Lunrecov_restore
-
- /*
- * Clear RI before restoring r13. If we are returning to
- * userspace and we take an exception after restoring r13,
- * we end up corrupting the userspace r13 value.
- */
- li r4,0
- mtmsrd r4,1
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* TM debug */
- std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
-#endif
- /*
- * r13 is our per cpu area, only restore it if we are returning to
- * userspace the value stored in the stack frame may belong to
- * another CPU.
- */
- andi. r0,r3,MSR_PR
- beq 1f
-BEGIN_FTR_SECTION
- /* Restore PPR */
- ld r2,_PPR(r1)
- mtspr SPRN_PPR,r2
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
- REST_GPR(13, r1)
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtspr SPRN_SRR1,r3
-
- ld r2,_CCR(r1)
- mtcrf 0xFF,r2
- ld r2,_NIP(r1)
- mtspr SPRN_SRR0,r2
-
- ld r0,GPR0(r1)
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r4,GPR4(r1)
- ld r1,GPR1(r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
+ ld r5,_XER(r1)
+ ld r6,_CCR(r1)
+ li r0,0
-1: mtspr SPRN_SRR1,r3
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
- ld r2,_CCR(r1)
- mtcrf 0xFF,r2
- ld r2,_NIP(r1)
- mtspr SPRN_SRR0,r2
+ mtlr r3
+ mtctr r4
+ mtspr SPRN_XER,r5
/*
* Leaving a stale exception_marker on the stack can confuse
* the reliable stack unwinder later on. Clear it.
*/
- li r2,0
- std r2,STACK_FRAME_OVERHEAD-16(r1)
-
- ld r0,GPR0(r1)
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
+ std r0,STACK_FRAME_OVERHEAD-16(r1)
- kuap_restore_amr r4
+ REST_4GPRS(2, r1)
- ld r4,GPR4(r1)
- ld r1,GPR1(r1)
+ bne- cr1,1f /* emulate stack store */
+ mtcr r6
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
RFI_TO_KERNEL
b . /* prevent speculative execution */
-#endif /* CONFIG_PPC_BOOK3E */
-
- /*
- * We are returning to a context with interrupts soft disabled.
- *
- * However, we may also about to hard enable, so we need to
- * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
- * or that bit can get out of sync and bad things will happen
- */
-.Lrestore_irq_off:
- ld r3,_MSR(r1)
- lbz r7,PACAIRQHAPPENED(r13)
- andi. r0,r3,MSR_EE
- beq 1f
- rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
- stb r7,PACAIRQHAPPENED(r13)
-1:
-#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
- /* The interrupt should not have soft enabled. */
- lbz r7,PACAIRQSOFTMASK(r13)
-1: tdeqi r7,IRQS_ENABLED
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
-#endif
- b .Ldo_restore
-
- /*
- * Something did happen, check if a re-emit is needed
- * (this also clears paca->irq_happened)
- */
-.Lrestore_check_irq_replay:
- /* XXX: We could implement a fast path here where we check
- * for irq_happened being just 0x01, in which case we can
- * clear it and return. That means that we would potentially
- * miss a decrementer having wrapped all the way around.
- *
- * Still, this might be useful for things like hash_page
- */
- bl __check_irq_replay
- cmpwi cr0,r3,0
- beq .Lrestore_no_replay
-
- /*
- * We need to re-emit an interrupt. We do so by re-using our
- * existing exception frame. We first change the trap value,
- * but we need to ensure we preserve the low nibble of it
- */
- ld r4,_TRAP(r1)
- clrldi r4,r4,60
- or r4,r4,r3
- std r4,_TRAP(r1)
-
- /*
- * PACA_IRQ_HARD_DIS won't always be set here, so set it now
- * to reconcile the IRQ state. Tracing is already accounted for.
- */
- lbz r4,PACAIRQHAPPENED(r13)
- ori r4,r4,PACA_IRQ_HARD_DIS
- stb r4,PACAIRQHAPPENED(r13)
-
- /*
- * Then find the right handler and call it. Interrupts are
- * still soft-disabled and we keep them that way.
- */
- cmpwi cr0,r3,0x500
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl do_IRQ
- b ret_from_except
-1: cmpwi cr0,r3,0xf00
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl performance_monitor_exception
- b ret_from_except
-1: cmpwi cr0,r3,0xe60
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl handle_hmi_exception
- b ret_from_except
-1: cmpwi cr0,r3,0x900
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl timer_interrupt
- b ret_from_except
-#ifdef CONFIG_PPC_DOORBELL
-1:
-#ifdef CONFIG_PPC_BOOK3E
- cmpwi cr0,r3,0x280
-#else
- cmpwi cr0,r3,0xa00
-#endif /* CONFIG_PPC_BOOK3E */
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl doorbell_exception
-#endif /* CONFIG_PPC_DOORBELL */
-1: b ret_from_except /* What else to do here ? */
-
-.Lunrecov_restore:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- b .Lunrecov_restore
-
-_ASM_NOKPROBE_SYMBOL(ret_from_except);
-_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
-_ASM_NOKPROBE_SYMBOL(resume_kernel);
-_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
-_ASM_NOKPROBE_SYMBOL(restore);
-_ASM_NOKPROBE_SYMBOL(fast_exception_return);
+1: /*
+ * Emulate stack store with update. New r1 value was already calculated
+ * and updated in our interrupt regs by emulate_loadstore, but we can't
+ * store the previous value of r1 to the stack before re-loading our
+ * registers from it, otherwise they could be clobbered. Use
+ * PACA_EXGEN as temporary storage to hold the store data, as
+ * interrupts are disabled here so it won't be clobbered.
+ */
+ mtcr r6
+ std r9,PACA_EXGEN+0(r13)
+ addi r9,r1,INT_FRAME_SIZE /* get original r1 */
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ std r9,0(r1) /* perform store component of stdu */
+ ld r9,PACA_EXGEN+0(r13)
+ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+#endif /* CONFIG_PPC_BOOK3S */
#ifdef CONFIG_PPC_RTAS
/*
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index e4076e3c072d..d9ed79415100 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -24,6 +24,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_booke_hv_asm.h>
#include <asm/feature-fixups.h>
+#include <asm/context_tracking.h>
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -1003,38 +1004,6 @@ masked_interrupt_book3e_0x2c0:
masked_interrupt_book3e PACA_IRQ_DBELL 0
/*
- * Called from arch_local_irq_enable when an interrupt needs
- * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
- * to indicate the kind of interrupt. MSR:EE is already off.
- * We generate a stackframe like if a real interrupt had happened.
- *
- * Note: While MSR:EE is off, we need to make sure that _MSR
- * in the generated frame has EE set to 1 or the exception
- * handler will not properly re-enable them.
- */
-_GLOBAL(__replay_interrupt)
- /* We are going to jump to the exception common code which
- * will retrieve various register values from the PACA which
- * we don't give a damn about.
- */
- mflr r10
- mfmsr r11
- mfcr r4
- mtspr SPRN_SPRG_GEN_SCRATCH,r13;
- std r1,PACA_EXGEN+EX_R1(r13);
- stw r4,PACA_EXGEN+EX_CR(r13);
- ori r11,r11,MSR_EE
- subi r1,r1,INT_FRAME_SIZE;
- cmpwi cr0,r3,0x500
- beq exc_0x500_common
- cmpwi cr0,r3,0x900
- beq exc_0x900_common
- cmpwi cr0,r3,0x280
- beq exc_0x280_common
- blr
-
-
-/*
* This is called from 0x300 and 0x400 handlers after the prologs with
* r14 and r15 containing the fault address and error code, with the
* original values stashed away in the PACA
@@ -1073,17 +1042,161 @@ alignment_more:
bl alignment_exception
b ret_from_except
-/*
- * We branch here from entry_64.S for the last stage of the exception
- * return code path. MSR:EE is expected to be off at that point
- */
-_GLOBAL(exception_return_book3e)
- b 1f
+ .align 7
+_GLOBAL(ret_from_except)
+ ld r11,_TRAP(r1)
+ andi. r0,r11,1
+ bne ret_from_except_lite
+ REST_NVGPRS(r1)
+
+_GLOBAL(ret_from_except_lite)
+ /*
+ * Disable interrupts so that current_thread_info()->flags
+ * can't change between when we test it and when we return
+ * from the interrupt.
+ */
+ wrteei 0
+
+ ld r9, PACA_THREAD_INFO(r13)
+ ld r3,_MSR(r1)
+ ld r10,PACACURRENT(r13)
+ ld r4,TI_FLAGS(r9)
+ andi. r3,r3,MSR_PR
+ beq resume_kernel
+ lwz r3,(THREAD+THREAD_DBCR0)(r10)
+
+ /* Check current_thread_info()->flags */
+ andi. r0,r4,_TIF_USER_WORK_MASK
+ bne 1f
+ /*
+ * Check to see if the dbcr0 register is set up to debug.
+ * Use the internal debug mode bit to do this.
+ */
+ andis. r0,r3,DBCR0_IDM@h
+ beq restore
+ mfmsr r0
+ rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
+ mtmsr r0
+ mtspr SPRN_DBCR0,r3
+ li r10, -1
+ mtspr SPRN_DBSR,r10
+ b restore
+1: andi. r0,r4,_TIF_NEED_RESCHED
+ beq 2f
+ bl restore_interrupts
+ SCHEDULE_USER
+ b ret_from_except_lite
+2:
+ bl save_nvgprs
+ /*
+ * Use a non volatile GPR to save and restore our thread_info flags
+ * across the call to restore_interrupts.
+ */
+ mr r30,r4
+ bl restore_interrupts
+ mr r4,r30
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_notify_resume
+ b ret_from_except
+
+resume_kernel:
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
+ andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+
+ ld r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mtctr r5
+2: ldx r0,r6,r4
+ stdx r0,r6,r3
+ addi r6,r6,8
+ bdnz 2b
+
+ /* Do real store operation to complete stdu */
+ ld r5,GPR1(r1)
+ std r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+0: ldarx r4,0,r5
+ andc r4,r4,r11
+ stdcx. r4,0,r5
+ bne- 0b
+1:
+
+#ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
+ andi. r0,r4,_TIF_NEED_RESCHED
+ beq+ restore
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+ lwz r8,TI_PREEMPT(r9)
+ cmpwi cr0,r8,0
+ bne restore
+ ld r0,SOFTE(r1)
+ andi. r0,r0,IRQS_DISABLED
+ bne restore
+
+ /*
+ * Here we are preempting the current task. We want to make
+ * sure we are soft-disabled first and reconcile irq state.
+ */
+ RECONCILE_IRQ_STATE(r3,r4)
+ bl preempt_schedule_irq
+
+ /*
+ * arch_local_irq_restore() from preempt_schedule_irq above may
+ * enable hard interrupt but we really should disable interrupts
+ * when we return from the interrupt, and so that we don't get
+ * interrupted after loading SRR0/1.
+ */
+ wrteei 0
+#endif /* CONFIG_PREEMPT */
+
+restore:
+ /*
+ * This is the main kernel exit path. First we check if we
+ * are about to re-enable interrupts
+ */
+ ld r5,SOFTE(r1)
+ lbz r6,PACAIRQSOFTMASK(r13)
+ andi. r5,r5,IRQS_DISABLED
+ bne .Lrestore_irq_off
+
+ /* We are enabling, were we already enabled ? Yes, just return */
+ andi. r6,r6,IRQS_DISABLED
+ beq cr0,fast_exception_return
+
+ /*
+ * We are about to soft-enable interrupts (we are hard disabled
+ * at this point). We check if there's anything that needs to
+ * be replayed first.
+ */
+ lbz r0,PACAIRQHAPPENED(r13)
+ cmpwi cr0,r0,0
+ bne- .Lrestore_check_irq_replay
+
+ /*
+ * Get here when nothing happened while soft-disabled, just
+ * soft-enable and move-on. We will hard-enable as a side
+ * effect of rfi
+ */
+.Lrestore_no_replay:
+ TRACE_ENABLE_INTS
+ li r0,IRQS_ENABLED
+ stb r0,PACAIRQSOFTMASK(r13);
/* This is the return from load_up_fpu fast path which could do with
* less GPR restores in fact, but for now we have a single return path
*/
- .globl fast_exception_return
fast_exception_return:
wrteei 0
1: mr r0,r13
@@ -1124,6 +1237,102 @@ fast_exception_return:
mfspr r13,SPRN_SPRG_GEN_SCRATCH
rfi
+ /*
+ * We are returning to a context with interrupts soft disabled.
+ *
+ * However, we may also about to hard enable, so we need to
+ * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
+ * or that bit can get out of sync and bad things will happen
+ */
+.Lrestore_irq_off:
+ ld r3,_MSR(r1)
+ lbz r7,PACAIRQHAPPENED(r13)
+ andi. r0,r3,MSR_EE
+ beq 1f
+ rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
+ stb r7,PACAIRQHAPPENED(r13)
+1:
+#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
+ /* The interrupt should not have soft enabled. */
+ lbz r7,PACAIRQSOFTMASK(r13)
+1: tdeqi r7,IRQS_ENABLED
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
+#endif
+ b fast_exception_return
+
+ /*
+ * Something did happen, check if a re-emit is needed
+ * (this also clears paca->irq_happened)
+ */
+.Lrestore_check_irq_replay:
+ /* XXX: We could implement a fast path here where we check
+ * for irq_happened being just 0x01, in which case we can
+ * clear it and return. That means that we would potentially
+ * miss a decrementer having wrapped all the way around.
+ *
+ * Still, this might be useful for things like hash_page
+ */
+ bl __check_irq_replay
+ cmpwi cr0,r3,0
+ beq .Lrestore_no_replay
+
+ /*
+ * We need to re-emit an interrupt. We do so by re-using our
+ * existing exception frame. We first change the trap value,
+ * but we need to ensure we preserve the low nibble of it
+ */
+ ld r4,_TRAP(r1)
+ clrldi r4,r4,60
+ or r4,r4,r3
+ std r4,_TRAP(r1)
+
+ /*
+ * PACA_IRQ_HARD_DIS won't always be set here, so set it now
+ * to reconcile the IRQ state. Tracing is already accounted for.
+ */
+ lbz r4,PACAIRQHAPPENED(r13)
+ ori r4,r4,PACA_IRQ_HARD_DIS
+ stb r4,PACAIRQHAPPENED(r13)
+
+ /*
+ * Then find the right handler and call it. Interrupts are
+ * still soft-disabled and we keep them that way.
+ */
+ cmpwi cr0,r3,0x500
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl do_IRQ
+ b ret_from_except
+1: cmpwi cr0,r3,0xf00
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl performance_monitor_exception
+ b ret_from_except
+1: cmpwi cr0,r3,0xe60
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl handle_hmi_exception
+ b ret_from_except
+1: cmpwi cr0,r3,0x900
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl timer_interrupt
+ b ret_from_except
+#ifdef CONFIG_PPC_DOORBELL
+1:
+ cmpwi cr0,r3,0x280
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl doorbell_exception
+#endif /* CONFIG_PPC_DOORBELL */
+1: b ret_from_except /* What else to do here ? */
+
+_ASM_NOKPROBE_SYMBOL(ret_from_except);
+_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
+_ASM_NOKPROBE_SYMBOL(resume_kernel);
+_ASM_NOKPROBE_SYMBOL(restore);
+_ASM_NOKPROBE_SYMBOL(fast_exception_return);
+
/*
* Trampolines used when spotting a bad kernel stack pointer in
* the exception entry code.
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ffc15f4f079d..18bbce143084 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -32,16 +32,10 @@
#define EX_CCR 52
#define EX_CFAR 56
#define EX_PPR 64
-#if defined(CONFIG_RELOCATABLE)
#define EX_CTR 72
.if EX_SIZE != 10
.error "EX_SIZE is wrong"
.endif
-#else
-.if EX_SIZE != 9
- .error "EX_SIZE is wrong"
-.endif
-#endif
/*
* Following are fixed section helper macros.
@@ -50,7 +44,6 @@
* EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors
* TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these)
* TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use)
- * TRAMP_KVM_BEGIN - KVM handlers, these are put into real, unrelocated
* EXC_COMMON - After switching to virtual, relocated mode.
*/
@@ -80,13 +73,6 @@ name:
#define TRAMP_VIRT_BEGIN(name) \
FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#define TRAMP_KVM_BEGIN(name) \
- TRAMP_VIRT_BEGIN(name)
-#else
-#define TRAMP_KVM_BEGIN(name)
-#endif
-
#define EXC_REAL_NONE(start, size) \
FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
@@ -119,67 +105,6 @@ name:
ori reg,reg,(ABS_ADDR(label))@l; \
addis reg,reg,(ABS_ADDR(label))@h
-/* Exception register prefixes */
-#define EXC_HV_OR_STD 2 /* depends on HVMODE */
-#define EXC_HV 1
-#define EXC_STD 0
-
-#if defined(CONFIG_RELOCATABLE)
-/*
- * If we support interrupts with relocation on AND we're a relocatable kernel,
- * we need to use CTR to get to the 2nd level handler. So, save/restore it
- * when required.
- */
-#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
-#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
-#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
-#else
-/* ...else CTR is unused and in register. */
-#define SAVE_CTR(reg, area)
-#define GET_CTR(reg, area) mfctr reg
-#define RESTORE_CTR(reg, area)
-#endif
-
-/*
- * PPR save/restore macros used in exceptions-64s.S
- * Used for P7 or later processors
- */
-#define SAVE_PPR(area, ra) \
-BEGIN_FTR_SECTION_NESTED(940) \
- ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
- std ra,_PPR(r1); \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
-
-#define RESTORE_PPR_PACA(area, ra) \
-BEGIN_FTR_SECTION_NESTED(941) \
- ld ra,area+EX_PPR(r13); \
- mtspr SPRN_PPR,ra; \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
-
-/*
- * Get an SPR into a register if the CPU has the given feature
- */
-#define OPT_GET_SPR(ra, spr, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- mfspr ra,spr; \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
-
-/*
- * Set an SPR from a register if the CPU has the given feature
- */
-#define OPT_SET_SPR(ra, spr, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- mtspr spr,ra; \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
-
-/*
- * Save a register to the PACA if the CPU has the given feature
- */
-#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- std ra,offset(r13); \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
-
/*
* Branch to label using its 0xC000 address. This results in instruction
* address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
@@ -193,89 +118,199 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mtctr reg; \
bctr
-.macro INT_KVM_HANDLER name, vec, hsrr, area, skip
- TRAMP_KVM_BEGIN(\name\()_kvm)
- KVM_HANDLER \vec, \hsrr, \area, \skip
+/*
+ * Interrupt code generation macros
+ */
+#define IVEC .L_IVEC_\name\() /* Interrupt vector address */
+#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */
+#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */
+#define IAREA .L_IAREA_\name\() /* PACA save area */
+#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */
+#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
+#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
+#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
+#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */
+#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
+#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
+#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
+#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */
+#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
+#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
+#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
+#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
+#define __ISTACK(name) .L_ISTACK_ ## name
+#define IRECONCILE .L_IRECONCILE_\name\() /* Do RECONCILE_IRQ_STATE */
+#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
+
+#define INT_DEFINE_BEGIN(n) \
+.macro int_define_ ## n name
+
+#define INT_DEFINE_END(n) \
+.endm ; \
+int_define_ ## n n ; \
+do_define_int n
+
+.macro do_define_int name
+ .ifndef IVEC
+ .error "IVEC not defined"
+ .endif
+ .ifndef IHSRR
+ IHSRR=0
+ .endif
+ .ifndef IHSRR_IF_HVMODE
+ IHSRR_IF_HVMODE=0
+ .endif
+ .ifndef IAREA
+ IAREA=PACA_EXGEN
+ .endif
+ .ifndef IVIRT
+ IVIRT=1
+ .endif
+ .ifndef IISIDE
+ IISIDE=0
+ .endif
+ .ifndef IDAR
+ IDAR=0
+ .endif
+ .ifndef IDSISR
+ IDSISR=0
+ .endif
+ .ifndef ISET_RI
+ ISET_RI=1
+ .endif
+ .ifndef IBRANCH_TO_COMMON
+ IBRANCH_TO_COMMON=1
+ .endif
+ .ifndef IREALMODE_COMMON
+ IREALMODE_COMMON=0
+ .else
+ .if ! IBRANCH_TO_COMMON
+ .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0"
+ .endif
+ .endif
+ .ifndef IMASK
+ IMASK=0
+ .endif
+ .ifndef IKVM_SKIP
+ IKVM_SKIP=0
+ .endif
+ .ifndef IKVM_REAL
+ IKVM_REAL=0
+ .endif
+ .ifndef IKVM_VIRT
+ IKVM_VIRT=0
+ .endif
+ .ifndef ISTACK
+ ISTACK=1
+ .endif
+ .ifndef IRECONCILE
+ IRECONCILE=1
+ .endif
+ .ifndef IKUAP
+ IKUAP=1
+ .endif
.endm
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
- * If hv is possible, interrupts come into to the hv version
- * of the kvmppc_interrupt code, which then jumps to the PR handler,
- * kvmppc_interrupt_pr, if the guest is a PR guest.
+ * All interrupts which set HSRR registers, as well as SRESET and MCE and
+ * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
+ * so they all generally need to test whether they were taken in guest context.
+ *
+ * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be
+ * taken with MSR[HV]=0.
+ *
+ * Interrupts which set SRR registers (with the above exceptions) do not
+ * elevate to MSR[HV]=1 mode, though most can be taken when running with
+ * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do
+ * not need to test whether a guest is running because they get delivered to
+ * the guest directly, including nested HV KVM guests.
+ *
+ * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host
+ * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the
+ * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be
+ * delivered to the real-mode entry point, therefore such interrupts only test
+ * KVM in their real mode handlers, and only when PR KVM is possible.
+ *
+ * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always
+ * delivered in real-mode when the MMU is in hash mode because the MMU
+ * registers are not set appropriately to translate host addresses. In nested
+ * radix mode these can be delivered in virt-mode as the host translations are
+ * used implicitly (see: effective LPID, effective PID).
+ */
+
+/*
+ * If an interrupt is taken while a guest is running, it is immediately routed
+ * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first
+ * to kvmppc_interrupt_hv, which handles the PR guest case.
*/
#define kvmppc_interrupt kvmppc_interrupt_hv
#else
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
-.macro KVMTEST name, hsrr, n
+.macro KVMTEST name
lbz r10,HSTATE_IN_GUEST(r13)
cmpwi r10,0
bne \name\()_kvm
.endm
-.macro KVM_HANDLER vec, hsrr, area, skip
- .if \skip
+.macro GEN_KVM name
+ .balign IFETCH_ALIGN_BYTES
+\name\()_kvm:
+
+ .if IKVM_SKIP
cmpwi r10,KVM_GUEST_MODE_SKIP
beq 89f
.else
-BEGIN_FTR_SECTION_NESTED(947)
- ld r10,\area+EX_CFAR(r13)
+BEGIN_FTR_SECTION
+ ld r10,IAREA+EX_CFAR(r13)
std r10,HSTATE_CFAR(r13)
-END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.endif
-BEGIN_FTR_SECTION_NESTED(948)
- ld r10,\area+EX_PPR(r13)
+ ld r10,PACA_EXGEN+EX_CTR(r13)
+ mtctr r10
+BEGIN_FTR_SECTION
+ ld r10,IAREA+EX_PPR(r13)
std r10,HSTATE_PPR(r13)
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
- ld r10,\area+EX_R10(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ld r11,IAREA+EX_R11(r13)
+ ld r12,IAREA+EX_R12(r13)
std r12,HSTATE_SCRATCH0(r13)
sldi r12,r9,32
+ ld r9,IAREA+EX_R9(r13)
+ ld r10,IAREA+EX_R10(r13)
/* HSRR variants have the 0x2 bit added to their trap number */
- .if \hsrr == EXC_HV_OR_STD
+ .if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
- ori r12,r12,(\vec + 0x2)
+ ori r12,r12,(IVEC + 0x2)
FTR_SECTION_ELSE
- ori r12,r12,(\vec)
+ ori r12,r12,(IVEC)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- ori r12,r12,(\vec + 0x2)
+ .elseif IHSRR
+ ori r12,r12,(IVEC+ 0x2)
.else
- ori r12,r12,(\vec)
+ ori r12,r12,(IVEC)
.endif
-
-#ifdef CONFIG_RELOCATABLE
- /*
- * KVM requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
- * outside the head section. CONFIG_RELOCATABLE KVM expects CTR
- * to be saved in HSTATE_SCRATCH1.
- */
- mfctr r9
- std r9,HSTATE_SCRATCH1(r13)
- __LOAD_FAR_HANDLER(r9, kvmppc_interrupt)
- mtctr r9
- ld r9,\area+EX_R9(r13)
- bctr
-#else
- ld r9,\area+EX_R9(r13)
b kvmppc_interrupt
-#endif
-
- .if \skip
+ .if IKVM_SKIP
89: mtocrf 0x80,r9
- ld r9,\area+EX_R9(r13)
- ld r10,\area+EX_R10(r13)
- .if \hsrr == EXC_HV_OR_STD
+ ld r10,PACA_EXGEN+EX_CTR(r13)
+ mtctr r10
+ ld r9,IAREA+EX_R9(r13)
+ ld r10,IAREA+EX_R10(r13)
+ ld r11,IAREA+EX_R11(r13)
+ ld r12,IAREA+EX_R12(r13)
+ .if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
b kvmppc_skip_Hinterrupt
FTR_SECTION_ELSE
b kvmppc_skip_interrupt
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
+ .elseif IHSRR
b kvmppc_skip_Hinterrupt
.else
b kvmppc_skip_interrupt
@@ -284,107 +319,12 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
.endm
#else
-.macro KVMTEST name, hsrr, n
+.macro KVMTEST name
.endm
-.macro KVM_HANDLER name, vec, hsrr, area, skip
+.macro GEN_KVM name
.endm
#endif
-.macro INT_SAVE_SRR_AND_JUMP label, hsrr, set_ri
- ld r10,PACAKMSR(r13) /* get MSR value for kernel */
- .if ! \set_ri
- xori r10,r10,MSR_RI /* Clear MSR_RI */
- .endif
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- mtspr SPRN_HSRR1,r10
- FTR_SECTION_ELSE
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- mfspr r12,SPRN_SRR1 /* and SRR1 */
- mtspr SPRN_SRR1,r10
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- mtspr SPRN_HSRR1,r10
- .else
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- mfspr r12,SPRN_SRR1 /* and SRR1 */
- mtspr SPRN_SRR1,r10
- .endif
- LOAD_HANDLER(r10, \label\())
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mtspr SPRN_HSRR0,r10
- HRFI_TO_KERNEL
- FTR_SECTION_ELSE
- mtspr SPRN_SRR0,r10
- RFI_TO_KERNEL
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mtspr SPRN_HSRR0,r10
- HRFI_TO_KERNEL
- .else
- mtspr SPRN_SRR0,r10
- RFI_TO_KERNEL
- .endif
- b . /* prevent speculative execution */
-.endm
-
-/* INT_SAVE_SRR_AND_JUMP works for real or virt, this is faster but virt only */
-.macro INT_VIRT_SAVE_SRR_AND_JUMP label, hsrr
-#ifdef CONFIG_RELOCATABLE
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- FTR_SECTION_ELSE
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- .else
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- .endif
- LOAD_HANDLER(r12, \label\())
- mtctr r12
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- FTR_SECTION_ELSE
- mfspr r12,SPRN_SRR1 /* and HSRR1 */
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- .else
- mfspr r12,SPRN_SRR1 /* and HSRR1 */
- .endif
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
- bctr
-#else
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- FTR_SECTION_ELSE
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- mfspr r12,SPRN_SRR1 /* and SRR1 */
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- .else
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- mfspr r12,SPRN_SRR1 /* and SRR1 */
- .endif
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
- b \label
-#endif
-.endm
-
/*
* This is the BOOK3S interrupt entry code macro.
*
@@ -405,14 +345,41 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
* - Fall through and continue executing in real, unrelocated mode.
* This is done if early=2.
*/
-.macro INT_HANDLER name, vec, ool=0, early=0, virt=0, hsrr=0, area=PACA_EXGEN, ri=1, dar=0, dsisr=0, bitmask=0, kvm=0
+
+.macro GEN_BRANCH_TO_COMMON name, virt
+ .if IREALMODE_COMMON
+ LOAD_HANDLER(r10, \name\()_common)
+ mtctr r10
+ bctr
+ .else
+ .if \virt
+#ifndef CONFIG_RELOCATABLE
+ b \name\()_common_virt
+#else
+ LOAD_HANDLER(r10, \name\()_common_virt)
+ mtctr r10
+ bctr
+#endif
+ .else
+ LOAD_HANDLER(r10, \name\()_common_real)
+ mtctr r10
+ bctr
+ .endif
+ .endif
+.endm
+
+.macro GEN_INT_ENTRY name, virt, ool=0
SET_SCRATCH0(r13) /* save r13 */
GET_PACA(r13)
- std r9,\area\()+EX_R9(r13) /* save r9 */
- OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR)
+ std r9,IAREA+EX_R9(r13) /* save r9 */
+BEGIN_FTR_SECTION
+ mfspr r9,SPRN_PPR
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
HMT_MEDIUM
- std r10,\area\()+EX_R10(r13) /* save r10 - r12 */
- OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
+ std r10,IAREA+EX_R10(r13) /* save r10 - r12 */
+BEGIN_FTR_SECTION
+ mfspr r10,SPRN_CFAR
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.if \ool
.if !\virt
b tramp_real_\name
@@ -425,47 +392,18 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
.endif
.endif
- OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR)
- OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+ std r9,IAREA+EX_PPR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+BEGIN_FTR_SECTION
+ std r10,IAREA+EX_CFAR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
INTERRUPT_TO_KERNEL
- SAVE_CTR(r10, \area\())
+ mfctr r10
+ std r10,IAREA+EX_CTR(r13)
mfcr r9
- .if \kvm
- KVMTEST \name \hsrr \vec
- .endif
- .if \bitmask
- lbz r10,PACAIRQSOFTMASK(r13)
- andi. r10,r10,\bitmask
- /* Associate vector numbers with bits in paca->irq_happened */
- .if \vec == 0x500 || \vec == 0xea0
- li r10,PACA_IRQ_EE
- .elseif \vec == 0x900
- li r10,PACA_IRQ_DEC
- .elseif \vec == 0xa00 || \vec == 0xe80
- li r10,PACA_IRQ_DBELL
- .elseif \vec == 0xe60
- li r10,PACA_IRQ_HMI
- .elseif \vec == 0xf00
- li r10,PACA_IRQ_PMI
- .else
- .abort "Bad maskable vector"
- .endif
-
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- bne masked_Hinterrupt
- FTR_SECTION_ELSE
- bne masked_interrupt
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- bne masked_Hinterrupt
- .else
- bne masked_interrupt
- .endif
- .endif
-
- std r11,\area\()+EX_R11(r13)
- std r12,\area\()+EX_R12(r13)
+ std r11,IAREA+EX_R11(r13)
+ std r12,IAREA+EX_R12(r13)
/*
* DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
@@ -473,49 +411,134 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
* not recoverable if they are live.
*/
GET_SCRATCH0(r10)
- std r10,\area\()+EX_R13(r13)
- .if \dar
- .if \hsrr
+ std r10,IAREA+EX_R13(r13)
+ .if IDAR && !IISIDE
+ .if IHSRR
mfspr r10,SPRN_HDAR
.else
mfspr r10,SPRN_DAR
.endif
- std r10,\area\()+EX_DAR(r13)
+ std r10,IAREA+EX_DAR(r13)
.endif
- .if \dsisr
- .if \hsrr
+ .if IDSISR && !IISIDE
+ .if IHSRR
mfspr r10,SPRN_HDSISR
.else
mfspr r10,SPRN_DSISR
.endif
- stw r10,\area\()+EX_DSISR(r13)
+ stw r10,IAREA+EX_DSISR(r13)
.endif
- .if \early == 2
- /* nothing more */
- .elseif \early
- mfctr r10 /* save ctr, even for !RELOCATABLE */
- BRANCH_TO_C000(r11, \name\()_early_common)
- .elseif !\virt
- INT_SAVE_SRR_AND_JUMP \name\()_common, \hsrr, \ri
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
+ FTR_SECTION_ELSE
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
.else
- INT_VIRT_SAVE_SRR_AND_JUMP \name\()_common, \hsrr
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+ .endif
+
+ .if IBRANCH_TO_COMMON
+ GEN_BRANCH_TO_COMMON \name \virt
.endif
+
.if \ool
.popsection
.endif
.endm
/*
- * On entry r13 points to the paca, r9-r13 are saved in the paca,
- * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
- * SRR1, and relocation is on.
+ * __GEN_COMMON_ENTRY is required to receive the branch from interrupt
+ * entry, except in the case of the real-mode handlers which require
+ * __GEN_REALMODE_COMMON_ENTRY.
*
- * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
- * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
+ * This switches to virtual mode and sets MSR[RI].
+ */
+.macro __GEN_COMMON_ENTRY name
+DEFINE_FIXED_SYMBOL(\name\()_common_real)
+\name\()_common_real:
+ .if IKVM_REAL
+ KVMTEST \name
+ .endif
+
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ /* MSR[RI] is clear iff using SRR regs */
+ .if IHSRR == EXC_HV_OR_STD
+ BEGIN_FTR_SECTION
+ xori r10,r10,MSR_RI
+ END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
+ .elseif ! IHSRR
+ xori r10,r10,MSR_RI
+ .endif
+ mtmsrd r10
+
+ .if IVIRT
+ .if IKVM_VIRT
+ b 1f /* skip the virt test coming from real */
+ .endif
+
+ .balign IFETCH_ALIGN_BYTES
+DEFINE_FIXED_SYMBOL(\name\()_common_virt)
+\name\()_common_virt:
+ .if IKVM_VIRT
+ KVMTEST \name
+1:
+ .endif
+ .endif /* IVIRT */
+.endm
+
+/*
+ * Don't switch to virt mode. Used for early MCE and HMI handlers that
+ * want to run in real mode.
*/
-.macro INT_COMMON vec, area, stack, kuap, reconcile, dar, dsisr
- .if \stack
+.macro __GEN_REALMODE_COMMON_ENTRY name
+DEFINE_FIXED_SYMBOL(\name\()_common_real)
+\name\()_common_real:
+ .if IKVM_REAL
+ KVMTEST \name
+ .endif
+.endm
+
+.macro __GEN_COMMON_BODY name
+ .if IMASK
+ lbz r10,PACAIRQSOFTMASK(r13)
+ andi. r10,r10,IMASK
+ /* Associate vector numbers with bits in paca->irq_happened */
+ .if IVEC == 0x500 || IVEC == 0xea0
+ li r10,PACA_IRQ_EE
+ .elseif IVEC == 0x900
+ li r10,PACA_IRQ_DEC
+ .elseif IVEC == 0xa00 || IVEC == 0xe80
+ li r10,PACA_IRQ_DBELL
+ .elseif IVEC == 0xe60
+ li r10,PACA_IRQ_HMI
+ .elseif IVEC == 0xf00
+ li r10,PACA_IRQ_PMI
+ .else
+ .abort "Bad maskable vector"
+ .endif
+
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ bne masked_Hinterrupt
+ FTR_SECTION_ELSE
+ bne masked_interrupt
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ bne masked_Hinterrupt
+ .else
+ bne masked_interrupt
+ .endif
+ .endif
+
+ .if ISTACK
andi. r10,r12,MSR_PR /* See if coming from user */
mr r10,r1 /* Save r1 */
subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
@@ -532,54 +555,67 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */
- .if \stack
- .if \kuap
+ .if ISET_RI
+ li r10,MSR_RI
+ mtmsrd r10,1 /* Set MSR_RI */
+ .endif
+
+ .if ISTACK
+ .if IKUAP
kuap_save_amr_and_lock r9, r10, cr1, cr0
.endif
beq 101f /* if from kernel mode */
ACCOUNT_CPU_USER_ENTRY(r13, r9, r10)
- SAVE_PPR(\area, r9)
+BEGIN_FTR_SECTION
+ ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */
+ std r9,_PPR(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
101:
.else
- .if \kuap
+ .if IKUAP
kuap_save_amr_and_lock r9, r10, cr1
.endif
.endif
/* Save original regs values from save area to stack frame. */
- ld r9,\area+EX_R9(r13) /* move r9, r10 to stackframe */
- ld r10,\area+EX_R10(r13)
+ ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */
+ ld r10,IAREA+EX_R10(r13)
std r9,GPR9(r1)
std r10,GPR10(r1)
- ld r9,\area+EX_R11(r13) /* move r11 - r13 to stackframe */
- ld r10,\area+EX_R12(r13)
- ld r11,\area+EX_R13(r13)
+ ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */
+ ld r10,IAREA+EX_R12(r13)
+ ld r11,IAREA+EX_R13(r13)
std r9,GPR11(r1)
std r10,GPR12(r1)
std r11,GPR13(r1)
- .if \dar
- .if \dar == 2
+
+ SAVE_NVGPRS(r1)
+
+ .if IDAR
+ .if IISIDE
ld r10,_NIP(r1)
.else
- ld r10,\area+EX_DAR(r13)
+ ld r10,IAREA+EX_DAR(r13)
.endif
std r10,_DAR(r1)
.endif
- .if \dsisr
- .if \dsisr == 2
+
+ .if IDSISR
+ .if IISIDE
ld r10,_MSR(r1)
lis r11,DSISR_SRR1_MATCH_64S@h
and r10,r10,r11
.else
- lwz r10,\area+EX_DSISR(r13)
+ lwz r10,IAREA+EX_DSISR(r13)
.endif
std r10,_DSISR(r1)
.endif
-BEGIN_FTR_SECTION_NESTED(66)
- ld r10,\area+EX_CFAR(r13)
+
+BEGIN_FTR_SECTION
+ ld r10,IAREA+EX_CFAR(r13)
std r10,ORIG_GPR3(r1)
-END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66)
- GET_CTR(r10, \area)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ ld r10,IAREA+EX_CTR(r13)
std r10,_CTR(r1)
std r2,GPR2(r1) /* save r2 in stackframe */
SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */
@@ -591,32 +627,42 @@ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66)
mfspr r11,SPRN_XER /* save XER in stackframe */
std r10,SOFTE(r1)
std r11,_XER(r1)
- li r9,(\vec)+1
+ li r9,IVEC
std r9,_TRAP(r1) /* set trap number */
li r10,0
ld r11,exception_marker@toc(r2)
std r10,RESULT(r1) /* clear regs->result */
std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */
- .if \stack
+ .if ISTACK
ACCOUNT_STOLEN_TIME
.endif
- .if \reconcile
+ .if IRECONCILE
RECONCILE_IRQ_STATE(r10, r11)
.endif
.endm
/*
+ * On entry r13 points to the paca, r9-r13 are saved in the paca,
+ * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
+ * SRR1, and relocation is on.
+ *
+ * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
+ * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
+ */
+.macro GEN_COMMON name
+ __GEN_COMMON_ENTRY \name
+ __GEN_COMMON_BODY \name
+.endm
+
+/*
* Restore all registers including H/SRR0/1 saved in a stack frame of a
* standard exception.
*/
-.macro EXCEPTION_RESTORE_REGS hsrr
+.macro EXCEPTION_RESTORE_REGS hsrr=0
/* Move original SRR0 and SRR1 into the respective regs */
ld r9,_MSR(r1)
- .if \hsrr == EXC_HV_OR_STD
- .error "EXC_HV_OR_STD Not implemented for EXCEPTION_RESTORE_REGS"
- .endif
.if \hsrr
mtspr SPRN_HSRR1,r9
.else
@@ -670,28 +716,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
#define FINISH_NAP
#endif
-#define EXC_COMMON(name, realvec, hdlr) \
- EXC_COMMON_BEGIN(name); \
- INT_COMMON realvec, PACA_EXGEN, 1, 1, 1, 0, 0 ; \
- bl save_nvgprs; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b ret_from_except
-
-/*
- * Like EXC_COMMON, but for exceptions that can occur in the idle task and
- * therefore need the special idle handling (finish nap and runlatch)
- */
-#define EXC_COMMON_ASYNC(name, realvec, hdlr) \
- EXC_COMMON_BEGIN(name); \
- INT_COMMON realvec, PACA_EXGEN, 1, 1, 1, 0, 0 ; \
- FINISH_NAP; \
- RUNLATCH_ON; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b ret_from_except_lite
-
-
/*
* There are a few constraints to be concerned with.
* - Real mode exceptions code/data must be located at their physical location.
@@ -778,6 +802,53 @@ __start_interrupts:
EXC_VIRT_NONE(0x4000, 0x100)
+/**
+ * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI).
+ * This is a non-maskable, asynchronous interrupt always taken in real-mode.
+ * It is caused by:
+ * - Wake from power-saving state, on powernv.
+ * - An NMI from another CPU, triggered by firmware or hypercall.
+ * - As crash/debug signal injected from BMC, firmware or hypervisor.
+ *
+ * Handling:
+ * Power-save wakeup is the only performance critical path, so this is
+ * determined quickly as possible first. In this case volatile registers
+ * can be discarded and SPRs like CFAR don't need to be read.
+ *
+ * If not a powersave wakeup, then it's run as a regular interrupt, however
+ * it uses its own stack and PACA save area to preserve the regular kernel
+ * environment for debugging.
+ *
+ * This interrupt is not maskable, so triggering it when MSR[RI] is clear,
+ * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely
+ * correct to switch to virtual mode to run the regular interrupt handler
+ * because it might be interrupted when the MMU is in a bad state (e.g., SLB
+ * is clear).
+ *
+ * FWNMI:
+ * PAPR specifies a "fwnmi" facility which sends the sreset to a different
+ * entry point with a different register set up. Some hypervisors will
+ * send the sreset to 0x100 in the guest if it is not fwnmi capable.
+ *
+ * KVM:
+ * Unlike most SRR interrupts, this may be taken by the host while executing
+ * in a guest, so a KVM test is required. KVM will pull the CPU out of guest
+ * mode and then raise the sreset.
+ */
+INT_DEFINE_BEGIN(system_reset)
+ IVEC=0x100
+ IAREA=PACA_EXNMI
+ IVIRT=0 /* no virt entry point */
+ /*
+ * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
+ * being used, so a nested NMI exception would corrupt it.
+ */
+ ISET_RI=0
+ ISTACK=0
+ IRECONCILE=0
+ IKVM_REAL=1
+INT_DEFINE_END(system_reset)
+
EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
#ifdef CONFIG_PPC_P7_NAP
/*
@@ -815,11 +886,8 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
- INT_HANDLER system_reset, 0x100, area=PACA_EXNMI, ri=0, kvm=1
+ GEN_INT_ENTRY system_reset, virt=0
/*
- * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
- * being used, so a nested NMI exception would corrupt it.
- *
* In theory, we should not enable relocation here if it was disabled
* in SRR1, because the MMU may not be configured to support it (e.g.,
* SLB may have been cleared). In practice, there should only be a few
@@ -828,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
*/
EXC_REAL_END(system_reset, 0x100, 0x100)
EXC_VIRT_NONE(0x4100, 0x100)
-INT_KVM_HANDLER system_reset 0x100, EXC_STD, PACA_EXNMI, 0
#ifdef CONFIG_PPC_P7_NAP
TRAMP_REAL_BEGIN(system_reset_idle_wake)
@@ -843,12 +910,14 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
* Vectors for the FWNMI option. Share common code.
*/
TRAMP_REAL_BEGIN(system_reset_fwnmi)
- /* See comment at system_reset exception, don't turn on RI */
- INT_HANDLER system_reset, 0x100, area=PACA_EXNMI, ri=0
+ /* XXX: fwnmi guest could run a nested/PR guest, so why no test? */
+ __IKVM_REAL(system_reset)=0
+ GEN_INT_ENTRY system_reset, virt=0
#endif /* CONFIG_PPC_PSERIES */
EXC_COMMON_BEGIN(system_reset_common)
+ __GEN_COMMON_ENTRY system_reset
/*
* Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
* to recover, but nested NMI will notice in_nmi and not recover
@@ -864,21 +933,21 @@ EXC_COMMON_BEGIN(system_reset_common)
mr r10,r1
ld r1,PACA_NMI_EMERG_SP(r13)
subi r1,r1,INT_FRAME_SIZE
- INT_COMMON 0x100, PACA_EXNMI, 0, 1, 0, 0, 0
- bl save_nvgprs
+ __GEN_COMMON_BODY system_reset
/*
- * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
+ * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
* the right thing. We do not want to reconcile because that goes
* through irq tracing which we don't want in NMI.
*
- * Save PACAIRQHAPPENED because some code will do a hard disable
- * (e.g., xmon). So we want to restore this back to where it was
- * when we return. DAR is unused in the stack, so save it there.
+ * Save PACAIRQHAPPENED to _DAR (otherwise unused), and set HARD_DIS
+ * as we are running with MSR[EE]=0.
*/
li r10,IRQS_ALL_DISABLED
stb r10,PACAIRQSOFTMASK(r13)
lbz r10,PACAIRQHAPPENED(r13)
std r10,_DAR(r1)
+ ori r10,r10,PACA_IRQ_HARD_DIS
+ stb r10,PACAIRQHAPPENED(r13)
addi r3,r1,STACK_FRAME_OVERHEAD
bl system_reset_exception
@@ -902,28 +971,95 @@ EXC_COMMON_BEGIN(system_reset_common)
ld r10,SOFTE(r1)
stb r10,PACAIRQSOFTMASK(r13)
- EXCEPTION_RESTORE_REGS EXC_STD
+ EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL
+ GEN_KVM system_reset
-EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
- INT_HANDLER machine_check, 0x200, early=1, area=PACA_EXMC, dar=1, dsisr=1
+
+/**
+ * Interrupt 0x200 - Machine Check Interrupt (MCE).
+ * This is a non-maskable interrupt always taken in real-mode. It can be
+ * synchronous or asynchronous, caused by hardware or software, and it may be
+ * taken in a power-saving state.
+ *
+ * Handling:
+ * Similarly to system reset, this uses its own stack and PACA save area,
+ * the difference is re-entrancy is allowed on the machine check stack.
+ *
+ * machine_check_early is run in real mode, and carefully decodes the
+ * machine check and tries to handle it (e.g., flush the SLB if there was an
+ * error detected there), determines if it was recoverable and logs the
+ * event.
+ *
+ * This early code does not "reconcile" irq soft-mask state like SRESET or
+ * regular interrupts do, so irqs_disabled() among other things may not work
+ * properly (irq disable/enable already doesn't work because irq tracing can
+ * not work in real mode).
+ *
+ * Then, depending on the execution context when the interrupt is taken, there
+ * are 3 main actions:
+ * - Executing in kernel mode. The event is queued with irq_work, which means
+ * it is handled when it is next safe to do so (i.e., the kernel has enabled
+ * interrupts), which could be immediately when the interrupt returns. This
+ * avoids nasty issues like switching to virtual mode when the MMU is in a
+ * bad state, or when executing OPAL code. (SRESET is exposed to such issues,
+ * but it has different priorities). Check to see if the CPU was in power
+ * save, and return via the wake up code if it was.
+ *
+ * - Executing in user mode. machine_check_exception is run like a normal
+ * interrupt handler, which processes the data generated by the early handler.
+ *
+ * - Executing in guest mode. The interrupt is run with its KVM test, and
+ * branches to KVM to deal with. KVM may queue the event for the host
+ * to report later.
+ *
+ * This interrupt is not maskable, so if it triggers when MSR[RI] is clear,
+ * or SCRATCH0 is in use, it may cause a crash.
+ *
+ * KVM:
+ * See SRESET.
+ */
+INT_DEFINE_BEGIN(machine_check_early)
+ IVEC=0x200
+ IAREA=PACA_EXMC
+ IVIRT=0 /* no virt entry point */
+ IREALMODE_COMMON=1
/*
* MSR_RI is not enabled, because PACA_EXMC is being used, so a
* nested machine check corrupts it. machine_check_common enables
* MSR_RI.
*/
+ ISET_RI=0
+ ISTACK=0
+ IDAR=1
+ IDSISR=1
+ IRECONCILE=0
+ IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
+INT_DEFINE_END(machine_check_early)
+
+INT_DEFINE_BEGIN(machine_check)
+ IVEC=0x200
+ IAREA=PACA_EXMC
+ IVIRT=0 /* no virt entry point */
+ ISET_RI=0
+ IDAR=1
+ IDSISR=1
+ IKVM_SKIP=1
+ IKVM_REAL=1
+INT_DEFINE_END(machine_check)
+
+EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
+ GEN_INT_ENTRY machine_check_early, virt=0
EXC_REAL_END(machine_check, 0x200, 0x100)
EXC_VIRT_NONE(0x4200, 0x100)
#ifdef CONFIG_PPC_PSERIES
TRAMP_REAL_BEGIN(machine_check_fwnmi)
/* See comment at machine_check exception, don't turn on RI */
- INT_HANDLER machine_check, 0x200, early=1, area=PACA_EXMC, dar=1, dsisr=1
+ GEN_INT_ENTRY machine_check_early, virt=0
#endif
-INT_KVM_HANDLER machine_check 0x200, EXC_STD, PACA_EXMC, 1
-
#define MACHINE_CHECK_HANDLER_WINDUP \
/* Clear MSR_RI before setting SRR0 and SRR1. */\
li r9,0; \
@@ -932,12 +1068,10 @@ INT_KVM_HANDLER machine_check 0x200, EXC_STD, PACA_EXMC, 1
lhz r12,PACA_IN_MCE(r13); \
subi r12,r12,1; \
sth r12,PACA_IN_MCE(r13); \
- EXCEPTION_RESTORE_REGS EXC_STD
+ EXCEPTION_RESTORE_REGS
EXC_COMMON_BEGIN(machine_check_early_common)
- mtctr r10 /* Restore ctr */
- mfspr r11,SPRN_SRR0
- mfspr r12,SPRN_SRR1
+ __GEN_REALMODE_COMMON_ENTRY machine_check_early
/*
* Switch to mc_emergency stack and handle re-entrancy (we limit
@@ -974,8 +1108,7 @@ EXC_COMMON_BEGIN(machine_check_early_common)
bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- /* We don't touch AMR here, we never go to virtual mode */
- INT_COMMON 0x200, PACA_EXMC, 0, 0, 0, 1, 1
+ __GEN_COMMON_BODY machine_check_early
BEGIN_FTR_SECTION
bl enable_machine_check
@@ -983,7 +1116,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
li r10,MSR_RI
mtmsrd r10,1
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
std r3,RESULT(r1) /* Save result */
@@ -1063,23 +1195,25 @@ BEGIN_FTR_SECTION
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
MACHINE_CHECK_HANDLER_WINDUP
- /* See comment at machine_check exception, don't turn on RI */
- INT_HANDLER machine_check, 0x200, area=PACA_EXMC, ri=0, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY machine_check, virt=0
EXC_COMMON_BEGIN(machine_check_common)
/*
* Machine check is different because we use a different
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
- INT_COMMON 0x200, PACA_EXMC, 1, 1, 1, 1, 1
+ GEN_COMMON machine_check
+
FINISH_NAP
/* Enable MSR_RI when finished with PACA_EXMC */
li r10,MSR_RI
mtmsrd r10,1
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
- b ret_from_except
+ b interrupt_return
+
+ GEN_KVM machine_check
+
#ifdef CONFIG_PPC_P7_NAP
/*
@@ -1144,21 +1278,48 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
b .
+/**
+ * Interrupt 0x300 - Data Storage Interrupt (DSI).
+ * This is a synchronous interrupt generated due to a data access exception,
+ * e.g., a load orstore which does not have a valid page table entry with
+ * permissions. DAWR matches also fault here, as do RC updates, and minor misc
+ * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc.
+ *
+ * Handling:
+ * - Hash MMU
+ * Go to do_hash_page first to see if the HPT can be filled from an entry in
+ * the Linux page table. Hash faults can hit in kernel mode in a fairly
+ * arbitrary state (e.g., interrupts disabled, locks held) when accessing
+ * "non-bolted" regions, e.g., vmalloc space. However these should always be
+ * backed by Linux page tables.
+ *
+ * If none is found, do a Linux page fault. Linux page faults can happen in
+ * kernel mode due to user copy operations of course.
+ *
+ * - Radix MMU
+ * The hardware loads from the Linux page table directly, so a fault goes
+ * immediately to Linux page fault.
+ *
+ * Conditions like DAWR match are handled on the way in to Linux page fault.
+ */
+INT_DEFINE_BEGIN(data_access)
+ IVEC=0x300
+ IDAR=1
+ IDSISR=1
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_SKIP=1
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(data_access)
+
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
- INT_HANDLER data_access, 0x300, ool=1, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY data_access, virt=0
EXC_REAL_END(data_access, 0x300, 0x80)
EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
- INT_HANDLER data_access, 0x300, virt=1, dar=1, dsisr=1
+ GEN_INT_ENTRY data_access, virt=1
EXC_VIRT_END(data_access, 0x4300, 0x80)
-INT_KVM_HANDLER data_access, 0x300, EXC_STD, PACA_EXGEN, 1
EXC_COMMON_BEGIN(data_access_common)
- /*
- * Here r13 points to the paca, r9 contains the saved CR,
- * SRR0 and SRR1 are saved in r11 and r12,
- * r9 - r13 are saved in paca->exgen.
- * EX_DAR and EX_DSISR have saved DAR/DSISR
- */
- INT_COMMON 0x300, PACA_EXGEN, 1, 1, 1, 1, 1
+ GEN_COMMON data_access
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
BEGIN_MMU_FTR_SECTION
@@ -1169,16 +1330,46 @@ MMU_FTR_SECTION_ELSE
b handle_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+ GEN_KVM data_access
+
+
+/**
+ * Interrupt 0x380 - Data Segment Interrupt (DSLB).
+ * This is a synchronous interrupt in response to an MMU fault missing SLB
+ * entry for HPT, or an address outside RPT translation range.
+ *
+ * Handling:
+ * - HPT:
+ * This refills the SLB, or reports an access fault similarly to a bad page
+ * fault. When coming from user-mode, the SLB handler may access any kernel
+ * data, though it may itself take a DSLB. When coming from kernel mode,
+ * recursive faults must be avoided so access is restricted to the kernel
+ * image text/data, kernel stack, and any data allocated below
+ * ppc64_bolted_size (first segment). The kernel handler must avoid stomping
+ * on user-handler data structures.
+ *
+ * A dedicated save area EXSLB is used (XXX: but it actually need not be
+ * these days, we could use EXGEN).
+ */
+INT_DEFINE_BEGIN(data_access_slb)
+ IVEC=0x380
+ IAREA=PACA_EXSLB
+ IRECONCILE=0
+ IDAR=1
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_SKIP=1
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(data_access_slb)
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
- INT_HANDLER data_access_slb, 0x380, ool=1, area=PACA_EXSLB, dar=1, kvm=1
+ GEN_INT_ENTRY data_access_slb, virt=0
EXC_REAL_END(data_access_slb, 0x380, 0x80)
EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
- INT_HANDLER data_access_slb, 0x380, virt=1, area=PACA_EXSLB, dar=1
+ GEN_INT_ENTRY data_access_slb, virt=1
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
-INT_KVM_HANDLER data_access_slb, 0x380, EXC_STD, PACA_EXSLB, 1
EXC_COMMON_BEGIN(data_access_slb_common)
- INT_COMMON 0x380, PACA_EXSLB, 1, 1, 0, 1, 0
+ GEN_COMMON data_access_slb
ld r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
@@ -1186,31 +1377,50 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_exception_return
+ b fast_interrupt_return
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
- bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
ld r4,_DAR(r1)
ld r5,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b ret_from_except
+ b interrupt_return
+
+ GEN_KVM data_access_slb
+/**
+ * Interrupt 0x400 - Instruction Storage Interrupt (ISI).
+ * This is a synchronous interrupt in response to an MMU fault due to an
+ * instruction fetch.
+ *
+ * Handling:
+ * Similar to DSI, though in response to fetch. The faulting address is found
+ * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR).
+ */
+INT_DEFINE_BEGIN(instruction_access)
+ IVEC=0x400
+ IISIDE=1
+ IDAR=1
+ IDSISR=1
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(instruction_access)
+
EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
- INT_HANDLER instruction_access, 0x400, kvm=1
+ GEN_INT_ENTRY instruction_access, virt=0
EXC_REAL_END(instruction_access, 0x400, 0x80)
EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
- INT_HANDLER instruction_access, 0x400, virt=1
+ GEN_INT_ENTRY instruction_access, virt=1
EXC_VIRT_END(instruction_access, 0x4400, 0x80)
-INT_KVM_HANDLER instruction_access, 0x400, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(instruction_access_common)
- INT_COMMON 0x400, PACA_EXGEN, 1, 1, 1, 2, 2
+ GEN_COMMON instruction_access
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
BEGIN_MMU_FTR_SECTION
@@ -1221,16 +1431,37 @@ MMU_FTR_SECTION_ELSE
b handle_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+ GEN_KVM instruction_access
+
+
+/**
+ * Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
+ * This is a synchronous interrupt in response to an MMU fault due to an
+ * instruction fetch.
+ *
+ * Handling:
+ * Similar to DSLB, though in response to fetch. The faulting address is found
+ * in SRR0 (rather than DAR).
+ */
+INT_DEFINE_BEGIN(instruction_access_slb)
+ IVEC=0x480
+ IAREA=PACA_EXSLB
+ IRECONCILE=0
+ IISIDE=1
+ IDAR=1
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(instruction_access_slb)
EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
- INT_HANDLER instruction_access_slb, 0x480, area=PACA_EXSLB, kvm=1
+ GEN_INT_ENTRY instruction_access_slb, virt=0
EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
- INT_HANDLER instruction_access_slb, 0x480, virt=1, area=PACA_EXSLB
+ GEN_INT_ENTRY instruction_access_slb, virt=1
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
-INT_KVM_HANDLER instruction_access_slb, 0x480, EXC_STD, PACA_EXSLB, 0
EXC_COMMON_BEGIN(instruction_access_slb_common)
- INT_COMMON 0x480, PACA_EXSLB, 1, 1, 0, 2, 0
+ GEN_COMMON instruction_access_slb
ld r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
@@ -1238,54 +1469,125 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_exception_return
+ b fast_interrupt_return
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
- bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
ld r4,_DAR(r1)
ld r5,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b ret_from_except
+ b interrupt_return
+
+ GEN_KVM instruction_access_slb
+
+
+/**
+ * Interrupt 0x500 - External Interrupt.
+ * This is an asynchronous maskable interrupt in response to an "external
+ * exception" from the interrupt controller or hypervisor (e.g., device
+ * interrupt). It is maskable in hardware by clearing MSR[EE], and
+ * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()).
+ *
+ * When running in HV mode, Linux sets up the LPCR[LPES] bit such that
+ * interrupts are delivered with HSRR registers, guests use SRRs, which
+ * reqiures IHSRR_IF_HVMODE.
+ *
+ * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that
+ * external interrupts are delivered as Hypervisor Virtualization Interrupts
+ * rather than External Interrupts.
+ *
+ * Handling:
+ * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead,
+ * because registers at the time of the interrupt are not so important as it is
+ * asynchronous.
+ *
+ * If soft masked, the masked handler will note the pending interrupt for
+ * replay, and clear MSR[EE] in the interrupted context.
+ */
+INT_DEFINE_BEGIN(hardware_interrupt)
+ IVEC=0x500
+ IHSRR_IF_HVMODE=1
+ IMASK=IRQS_DISABLED
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(hardware_interrupt)
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
- INT_HANDLER hardware_interrupt, 0x500, hsrr=EXC_HV_OR_STD, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY hardware_interrupt, virt=0
EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
- INT_HANDLER hardware_interrupt, 0x500, virt=1, hsrr=EXC_HV_OR_STD, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY hardware_interrupt, virt=1
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
-INT_KVM_HANDLER hardware_interrupt, 0x500, EXC_HV_OR_STD, PACA_EXGEN, 0
-EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
+EXC_COMMON_BEGIN(hardware_interrupt_common)
+ GEN_COMMON hardware_interrupt
+ FINISH_NAP
+ RUNLATCH_ON
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_IRQ
+ b interrupt_return
+ GEN_KVM hardware_interrupt
+
+
+/**
+ * Interrupt 0x600 - Alignment Interrupt
+ * This is a synchronous interrupt in response to data alignment fault.
+ */
+INT_DEFINE_BEGIN(alignment)
+ IVEC=0x600
+ IDAR=1
+ IDSISR=1
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(alignment)
EXC_REAL_BEGIN(alignment, 0x600, 0x100)
- INT_HANDLER alignment, 0x600, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY alignment, virt=0
EXC_REAL_END(alignment, 0x600, 0x100)
EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
- INT_HANDLER alignment, 0x600, virt=1, dar=1, dsisr=1
+ GEN_INT_ENTRY alignment, virt=1
EXC_VIRT_END(alignment, 0x4600, 0x100)
-INT_KVM_HANDLER alignment, 0x600, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(alignment_common)
- INT_COMMON 0x600, PACA_EXGEN, 1, 1, 1, 1, 1
- bl save_nvgprs
+ GEN_COMMON alignment
addi r3,r1,STACK_FRAME_OVERHEAD
bl alignment_exception
- b ret_from_except
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ b interrupt_return
+
+ GEN_KVM alignment
+
+/**
+ * Interrupt 0x700 - Program Interrupt (program check).
+ * This is a synchronous interrupt in response to various instruction faults:
+ * traps, privilege errors, TM errors, floating point exceptions.
+ *
+ * Handling:
+ * This interrupt may use the "emergency stack" in some cases when being taken
+ * from kernel context, which complicates handling.
+ */
+INT_DEFINE_BEGIN(program_check)
+ IVEC=0x700
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(program_check)
EXC_REAL_BEGIN(program_check, 0x700, 0x100)
- INT_HANDLER program_check, 0x700, kvm=1
+ GEN_INT_ENTRY program_check, virt=0
EXC_REAL_END(program_check, 0x700, 0x100)
EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
- INT_HANDLER program_check, 0x700, virt=1
+ GEN_INT_ENTRY program_check, virt=1
EXC_VIRT_END(program_check, 0x4700, 0x100)
-INT_KVM_HANDLER program_check, 0x700, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(program_check_common)
+ __GEN_COMMON_ENTRY program_check
+
/*
* It's possible to receive a TM Bad Thing type program check with
* userspace register values (in particular r1), but with SRR1 reporting
@@ -1310,28 +1612,47 @@ EXC_COMMON_BEGIN(program_check_common)
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- INT_COMMON 0x700, PACA_EXGEN, 0, 1, 1, 0, 0
+ __ISTACK(program_check)=0
+ __GEN_COMMON_BODY program_check
b 3f
2:
- INT_COMMON 0x700, PACA_EXGEN, 1, 1, 1, 0, 0
+ __ISTACK(program_check)=1
+ __GEN_COMMON_BODY program_check
3:
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
- b ret_from_except
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ b interrupt_return
+ GEN_KVM program_check
+
+
+/*
+ * Interrupt 0x800 - Floating-Point Unavailable Interrupt.
+ * This is a synchronous interrupt in response to executing an fp instruction
+ * with MSR[FP]=0.
+ *
+ * Handling:
+ * This will load FP registers and enable the FP bit if coming from userspace,
+ * otherwise report a bad kernel use of FP.
+ */
+INT_DEFINE_BEGIN(fp_unavailable)
+ IVEC=0x800
+ IRECONCILE=0
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(fp_unavailable)
EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
- INT_HANDLER fp_unavailable, 0x800, kvm=1
+ GEN_INT_ENTRY fp_unavailable, virt=0
EXC_REAL_END(fp_unavailable, 0x800, 0x100)
EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
- INT_HANDLER fp_unavailable, 0x800, virt=1
+ GEN_INT_ENTRY fp_unavailable, virt=1
EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
-INT_KVM_HANDLER fp_unavailable, 0x800, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(fp_unavailable_common)
- INT_COMMON 0x800, PACA_EXGEN, 1, 1, 0, 0, 0
+ GEN_COMMON fp_unavailable
bne 1f /* if from user, just load it up */
- bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl kernel_fp_unavailable_exception
@@ -1348,64 +1669,168 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
bl load_up_fpu
- b fast_exception_return
+ b fast_interrupt_return
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl fp_unavailable_tm
- b ret_from_except
+ b interrupt_return
#endif
+ GEN_KVM fp_unavailable
+
+
+/**
+ * Interrupt 0x900 - Decrementer Interrupt.
+ * This is an asynchronous interrupt in response to a decrementer exception
+ * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing
+ * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e.,
+ * local_irq_disable()).
+ *
+ * Handling:
+ * This calls into Linux timer handler. NVGPRs are not saved (see 0x500).
+ *
+ * If soft masked, the masked handler will note the pending interrupt for
+ * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled
+ * in the interrupted context.
+ * If PPC_WATCHDOG is configured, the soft masked handler will actually set
+ * things back up to run soft_nmi_interrupt as a regular interrupt handler
+ * on the emergency stack.
+ */
+INT_DEFINE_BEGIN(decrementer)
+ IVEC=0x900
+ IMASK=IRQS_DISABLED
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(decrementer)
EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
- INT_HANDLER decrementer, 0x900, ool=1, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY decrementer, virt=0
EXC_REAL_END(decrementer, 0x900, 0x80)
EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
- INT_HANDLER decrementer, 0x900, virt=1, bitmask=IRQS_DISABLED
+ GEN_INT_ENTRY decrementer, virt=1
EXC_VIRT_END(decrementer, 0x4900, 0x80)
-INT_KVM_HANDLER decrementer, 0x900, EXC_STD, PACA_EXGEN, 0
-EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+EXC_COMMON_BEGIN(decrementer_common)
+ GEN_COMMON decrementer
+ FINISH_NAP
+ RUNLATCH_ON
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl timer_interrupt
+ b interrupt_return
+
+ GEN_KVM decrementer
+/**
+ * Interrupt 0x980 - Hypervisor Decrementer Interrupt.
+ * This is an asynchronous interrupt, similar to 0x900 but for the HDEC
+ * register.
+ *
+ * Handling:
+ * Linux does not use this outside KVM where it's used to keep a host timer
+ * while the guest is given control of DEC. It should normally be caught by
+ * the KVM test and routed there.
+ */
+INT_DEFINE_BEGIN(hdecrementer)
+ IVEC=0x980
+ IHSRR=1
+ ISTACK=0
+ IRECONCILE=0
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(hdecrementer)
+
EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
- INT_HANDLER hdecrementer, 0x980, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY hdecrementer, virt=0
EXC_REAL_END(hdecrementer, 0x980, 0x80)
EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
- INT_HANDLER hdecrementer, 0x980, virt=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY hdecrementer, virt=1
EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
-INT_KVM_HANDLER hdecrementer, 0x980, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
+EXC_COMMON_BEGIN(hdecrementer_common)
+ __GEN_COMMON_ENTRY hdecrementer
+ /*
+ * Hypervisor decrementer interrupts not caught by the KVM test
+ * shouldn't occur but are sometimes left pending on exit from a KVM
+ * guest. We don't need to do anything to clear them, as they are
+ * edge-triggered.
+ *
+ * Be careful to avoid touching the kernel stack.
+ */
+ ld r10,PACA_EXGEN+EX_CTR(r13)
+ mtctr r10
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ HRFI_TO_KERNEL
+
+ GEN_KVM hdecrementer
+/**
+ * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
+ * This is an asynchronous interrupt in response to a msgsndp doorbell.
+ * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
+ * IRQS_DISABLED mask (i.e., local_irq_disable()).
+ *
+ * Handling:
+ * Guests may use this for IPIs between threads in a core if the
+ * hypervisor supports it. NVGPRS are not saved (see 0x500).
+ *
+ * If soft masked, the masked handler will note the pending interrupt for
+ * replay, leaving MSR[EE] enabled in the interrupted context because the
+ * doorbells are edge triggered.
+ */
+INT_DEFINE_BEGIN(doorbell_super)
+ IVEC=0xa00
+ IMASK=IRQS_DISABLED
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(doorbell_super)
+
EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
- INT_HANDLER doorbell_super, 0xa00, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY doorbell_super, virt=0
EXC_REAL_END(doorbell_super, 0xa00, 0x100)
EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
- INT_HANDLER doorbell_super, 0xa00, virt=1, bitmask=IRQS_DISABLED
+ GEN_INT_ENTRY doorbell_super, virt=1
EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
-INT_KVM_HANDLER doorbell_super, 0xa00, EXC_STD, PACA_EXGEN, 0
+EXC_COMMON_BEGIN(doorbell_super_common)
+ GEN_COMMON doorbell_super
+ FINISH_NAP
+ RUNLATCH_ON
+ addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_DOORBELL
-EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
+ bl doorbell_exception
#else
-EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
+ bl unknown_exception
#endif
+ b interrupt_return
+
+ GEN_KVM doorbell_super
EXC_REAL_NONE(0xb00, 0x100)
EXC_VIRT_NONE(0x4b00, 0x100)
-/*
- * system call / hypercall (0xc00, 0x4c00)
- *
- * The system call exception is invoked with "sc 0" and does not alter HV bit.
- *
- * The hypercall is invoked with "sc 1" and sets HV=1.
+/**
+ * Interrupt 0xc00 - System Call Interrupt (syscall, hcall).
+ * This is a synchronous interrupt invoked with the "sc" instruction. The
+ * system call is invoked with "sc 0" and does not alter the HV bit, so it
+ * is directed to the currently running OS. The hypercall is invoked with
+ * "sc 1" and it sets HV=1, so it elevates to hypervisor.
*
* In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
* 0x4c00 virtual mode.
*
+ * Handling:
+ * If the KVM test fires then it was due to a hypercall and is accordingly
+ * routed to KVM. Otherwise this executes a normal Linux system call.
+ *
* Call convention:
*
* syscall and hypercalls register conventions are documented in
@@ -1417,6 +1842,12 @@ EXC_VIRT_NONE(0x4b00, 0x100)
* without saving, though xer is not a good idea to use, as hardware may
* interpret some bits so it may be costly to change them.
*/
+INT_DEFINE_BEGIN(system_call)
+ IVEC=0xc00
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(system_call)
+
.macro SYSTEM_CALL virt
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
@@ -1431,7 +1862,7 @@ EXC_VIRT_NONE(0x4b00, 0x100)
GET_PACA(r13)
std r10,PACA_EXGEN+EX_R10(r13)
INTERRUPT_TO_KERNEL
- KVMTEST system_call EXC_STD 0xc00 /* uses r10, branch to system_call_kvm */
+ KVMTEST system_call /* uses r10, branch to system_call_kvm */
mfctr r9
#else
mr r9,r13
@@ -1490,6 +1921,7 @@ EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
EXC_VIRT_END(system_call, 0x4c00, 0x100)
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+TRAMP_REAL_BEGIN(system_call_kvm)
/*
* This is a hcall, so register convention is as above, with these
* differences:
@@ -1497,43 +1929,95 @@ EXC_VIRT_END(system_call, 0x4c00, 0x100)
* ctr = orig r13
* orig r10 saved in PACA
*/
-TRAMP_KVM_BEGIN(system_call_kvm)
/*
* Save the PPR (on systems that support it) before changing to
* HMT_MEDIUM. That allows the KVM code to save that value into the
* guest state (it is the guest's PPR value).
*/
- OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
+BEGIN_FTR_SECTION
+ mfspr r10,SPRN_PPR
+ std r10,HSTATE_PPR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
HMT_MEDIUM
- OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
mfctr r10
SET_SCRATCH0(r10)
- std r9,PACA_EXGEN+EX_R9(r13)
- mfcr r9
- KVM_HANDLER 0xc00, EXC_STD, PACA_EXGEN, 0
+ mfcr r10
+ std r12,HSTATE_SCRATCH0(r13)
+ sldi r12,r10,32
+ ori r12,r12,0xc00
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
+ * outside the head section.
+ */
+ __LOAD_FAR_HANDLER(r10, kvmppc_interrupt)
+ mtctr r10
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ bctr
+#else
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ b kvmppc_interrupt
+#endif
#endif
+/**
+ * Interrupt 0xd00 - Trace Interrupt.
+ * This is a synchronous interrupt in response to instruction step or
+ * breakpoint faults.
+ */
+INT_DEFINE_BEGIN(single_step)
+ IVEC=0xd00
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(single_step)
+
EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
- INT_HANDLER single_step, 0xd00, kvm=1
+ GEN_INT_ENTRY single_step, virt=0
EXC_REAL_END(single_step, 0xd00, 0x100)
EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
- INT_HANDLER single_step, 0xd00, virt=1
+ GEN_INT_ENTRY single_step, virt=1
EXC_VIRT_END(single_step, 0x4d00, 0x100)
-INT_KVM_HANDLER single_step, 0xd00, EXC_STD, PACA_EXGEN, 0
-EXC_COMMON(single_step_common, 0xd00, single_step_exception)
+EXC_COMMON_BEGIN(single_step_common)
+ GEN_COMMON single_step
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl single_step_exception
+ b interrupt_return
+ GEN_KVM single_step
+
+
+/**
+ * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
+ * This is a synchronous interrupt in response to an MMU fault caused by a
+ * guest data access.
+ *
+ * Handling:
+ * This should always get routed to KVM. In radix MMU mode, this is caused
+ * by a guest nested radix access that can't be performed due to the
+ * partition scope page table. In hash mode, this can be caused by guests
+ * running with translation disabled (virtual real mode) or with VPM enabled.
+ * KVM will update the page table structures or disallow the access.
+ */
+INT_DEFINE_BEGIN(h_data_storage)
+ IVEC=0xe00
+ IHSRR=1
+ IDAR=1
+ IDSISR=1
+ IKVM_SKIP=1
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(h_data_storage)
EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
- INT_HANDLER h_data_storage, 0xe00, ool=1, hsrr=EXC_HV, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY h_data_storage, virt=0, ool=1
EXC_REAL_END(h_data_storage, 0xe00, 0x20)
EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
- INT_HANDLER h_data_storage, 0xe00, ool=1, virt=1, hsrr=EXC_HV, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY h_data_storage, virt=1, ool=1
EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
-INT_KVM_HANDLER h_data_storage, 0xe00, EXC_HV, PACA_EXGEN, 1
EXC_COMMON_BEGIN(h_data_storage_common)
- INT_COMMON 0xe00, PACA_EXGEN, 1, 1, 1, 1, 1
- bl save_nvgprs
+ GEN_COMMON h_data_storage
addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
ld r4,_DAR(r1)
@@ -1542,56 +2026,125 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl unknown_exception
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
- b ret_from_except
+ b interrupt_return
+ GEN_KVM h_data_storage
+
+
+/**
+ * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
+ * This is a synchronous interrupt in response to an MMU fault caused by a
+ * guest instruction fetch, similar to HDSI.
+ */
+INT_DEFINE_BEGIN(h_instr_storage)
+ IVEC=0xe20
+ IHSRR=1
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(h_instr_storage)
EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
- INT_HANDLER h_instr_storage, 0xe20, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY h_instr_storage, virt=0, ool=1
EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
- INT_HANDLER h_instr_storage, 0xe20, ool=1, virt=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY h_instr_storage, virt=1, ool=1
EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
-INT_KVM_HANDLER h_instr_storage, 0xe20, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
+EXC_COMMON_BEGIN(h_instr_storage_common)
+ GEN_COMMON h_instr_storage
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unknown_exception
+ b interrupt_return
+ GEN_KVM h_instr_storage
+
+
+/**
+ * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
+ */
+INT_DEFINE_BEGIN(emulation_assist)
+ IVEC=0xe40
+ IHSRR=1
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(emulation_assist)
EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
- INT_HANDLER emulation_assist, 0xe40, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY emulation_assist, virt=0, ool=1
EXC_REAL_END(emulation_assist, 0xe40, 0x20)
EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
- INT_HANDLER emulation_assist, 0xe40, ool=1, virt=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY emulation_assist, virt=1, ool=1
EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
-INT_KVM_HANDLER emulation_assist, 0xe40, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
+EXC_COMMON_BEGIN(emulation_assist_common)
+ GEN_COMMON emulation_assist
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl emulation_assist_interrupt
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ b interrupt_return
+ GEN_KVM emulation_assist
-/*
- * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
- * first, and then eventaully from there to the trampoline to get into virtual
- * mode.
+
+/**
+ * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
+ * This is an asynchronous interrupt caused by a Hypervisor Maintenance
+ * Exception. It is always taken in real mode but uses HSRR registers
+ * unlike SRESET and MCE.
+ *
+ * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable
+ * with IRQS_DISABLED mask (i.e., local_irq_disable()).
+ *
+ * Handling:
+ * This is a special case, this is handled similarly to machine checks, with an
+ * initial real mode handler that is not soft-masked, which attempts to fix the
+ * problem. Then a regular handler which is soft-maskable and reports the
+ * problem.
+ *
+ * The emergency stack is used for the early real mode handler.
+ *
+ * XXX: unclear why MCE and HMI schemes could not be made common, e.g.,
+ * either use soft-masking for the MCE, or use irq_work for the HMI.
+ *
+ * KVM:
+ * Unlike MCE, this calls into KVM without calling the real mode handler
+ * first.
*/
+INT_DEFINE_BEGIN(hmi_exception_early)
+ IVEC=0xe60
+ IHSRR=1
+ IREALMODE_COMMON=1
+ ISTACK=0
+ IRECONCILE=0
+ IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
+ IKVM_REAL=1
+INT_DEFINE_END(hmi_exception_early)
+
+INT_DEFINE_BEGIN(hmi_exception)
+ IVEC=0xe60
+ IHSRR=1
+ IMASK=IRQS_DISABLED
+ IKVM_REAL=1
+INT_DEFINE_END(hmi_exception)
+
EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
- INT_HANDLER hmi_exception, 0xe60, ool=1, early=1, hsrr=EXC_HV, ri=0, kvm=1
+ GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1
EXC_REAL_END(hmi_exception, 0xe60, 0x20)
EXC_VIRT_NONE(0x4e60, 0x20)
-INT_KVM_HANDLER hmi_exception, 0xe60, EXC_HV, PACA_EXGEN, 0
+
EXC_COMMON_BEGIN(hmi_exception_early_common)
- mtctr r10 /* Restore ctr */
- mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
+ __GEN_REALMODE_COMMON_ENTRY hmi_exception_early
+
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- /* We don't touch AMR here, we never go to virtual mode */
- INT_COMMON 0xe60, PACA_EXGEN, 0, 0, 0, 0, 0
+ __GEN_COMMON_BODY hmi_exception_early
addi r3,r1,STACK_FRAME_OVERHEAD
bl hmi_exception_realmode
cmpdi cr0,r3,0
bne 1f
- EXCEPTION_RESTORE_REGS EXC_HV
+ EXCEPTION_RESTORE_REGS hsrr=1
HRFI_TO_USER_OR_KERNEL
1:
@@ -1599,41 +2152,84 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
* Go to virtual mode and pull the HMI event information from
* firmware.
*/
- EXCEPTION_RESTORE_REGS EXC_HV
- INT_HANDLER hmi_exception, 0xe60, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ EXCEPTION_RESTORE_REGS hsrr=1
+ GEN_INT_ENTRY hmi_exception, virt=0
+
+ GEN_KVM hmi_exception_early
EXC_COMMON_BEGIN(hmi_exception_common)
- INT_COMMON 0xe60, PACA_EXGEN, 1, 1, 1, 0, 0
+ GEN_COMMON hmi_exception
FINISH_NAP
RUNLATCH_ON
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl handle_hmi_exception
- b ret_from_except
+ b interrupt_return
+
+ GEN_KVM hmi_exception
+/**
+ * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
+ * This is an asynchronous interrupt in response to a msgsnd doorbell.
+ * Similar to the 0xa00 doorbell but for host rather than guest.
+ */
+INT_DEFINE_BEGIN(h_doorbell)
+ IVEC=0xe80
+ IHSRR=1
+ IMASK=IRQS_DISABLED
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(h_doorbell)
+
EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
- INT_HANDLER h_doorbell, 0xe80, ool=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY h_doorbell, virt=0, ool=1
EXC_REAL_END(h_doorbell, 0xe80, 0x20)
EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
- INT_HANDLER h_doorbell, 0xe80, ool=1, virt=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY h_doorbell, virt=1, ool=1
EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
-INT_KVM_HANDLER h_doorbell, 0xe80, EXC_HV, PACA_EXGEN, 0
+EXC_COMMON_BEGIN(h_doorbell_common)
+ GEN_COMMON h_doorbell
+ FINISH_NAP
+ RUNLATCH_ON
+ addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_DOORBELL
-EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
+ bl doorbell_exception
#else
-EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
+ bl unknown_exception
#endif
+ b interrupt_return
+ GEN_KVM h_doorbell
+
+
+/**
+ * Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
+ * This is an asynchronous interrupt in response to an "external exception".
+ * Similar to 0x500 but for host only.
+ */
+INT_DEFINE_BEGIN(h_virt_irq)
+ IVEC=0xea0
+ IHSRR=1
+ IMASK=IRQS_DISABLED
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(h_virt_irq)
EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
- INT_HANDLER h_virt_irq, 0xea0, ool=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY h_virt_irq, virt=0, ool=1
EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
- INT_HANDLER h_virt_irq, 0xea0, ool=1, virt=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY h_virt_irq, virt=1, ool=1
EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
-INT_KVM_HANDLER h_virt_irq, 0xea0, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
+EXC_COMMON_BEGIN(h_virt_irq_common)
+ GEN_COMMON h_virt_irq
+ FINISH_NAP
+ RUNLATCH_ON
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_IRQ
+ b interrupt_return
+
+ GEN_KVM h_virt_irq
EXC_REAL_NONE(0xec0, 0x20)
@@ -1642,25 +2238,69 @@ EXC_REAL_NONE(0xee0, 0x20)
EXC_VIRT_NONE(0x4ee0, 0x20)
+/*
+ * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU).
+ * This is an asynchronous interrupt in response to a PMU exception.
+ * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
+ * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()).
+ *
+ * Handling:
+ * This calls into the perf subsystem.
+ *
+ * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it
+ * runs under local_irq_disable. However it may be soft-masked in
+ * powerpc-specific code.
+ *
+ * If soft masked, the masked handler will note the pending interrupt for
+ * replay, and clear MSR[EE] in the interrupted context.
+ */
+INT_DEFINE_BEGIN(performance_monitor)
+ IVEC=0xf00
+ IMASK=IRQS_PMI_DISABLED
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(performance_monitor)
+
EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
- INT_HANDLER performance_monitor, 0xf00, ool=1, bitmask=IRQS_PMI_DISABLED, kvm=1
+ GEN_INT_ENTRY performance_monitor, virt=0, ool=1
EXC_REAL_END(performance_monitor, 0xf00, 0x20)
EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
- INT_HANDLER performance_monitor, 0xf00, ool=1, virt=1, bitmask=IRQS_PMI_DISABLED
+ GEN_INT_ENTRY performance_monitor, virt=1, ool=1
EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
-INT_KVM_HANDLER performance_monitor, 0xf00, EXC_STD, PACA_EXGEN, 0
-EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
+EXC_COMMON_BEGIN(performance_monitor_common)
+ GEN_COMMON performance_monitor
+ FINISH_NAP
+ RUNLATCH_ON
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl performance_monitor_exception
+ b interrupt_return
+ GEN_KVM performance_monitor
+
+
+/**
+ * Interrupt 0xf20 - Vector Unavailable Interrupt.
+ * This is a synchronous interrupt in response to
+ * executing a vector (or altivec) instruction with MSR[VEC]=0.
+ * Similar to FP unavailable.
+ */
+INT_DEFINE_BEGIN(altivec_unavailable)
+ IVEC=0xf20
+ IRECONCILE=0
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(altivec_unavailable)
EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
- INT_HANDLER altivec_unavailable, 0xf20, ool=1, kvm=1
+ GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1
EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
- INT_HANDLER altivec_unavailable, 0xf20, ool=1, virt=1
+ GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1
EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
-INT_KVM_HANDLER altivec_unavailable, 0xf20, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(altivec_unavailable_common)
- INT_COMMON 0xf20, PACA_EXGEN, 1, 1, 0, 0, 0
+ GEN_COMMON altivec_unavailable
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
beq 1f
@@ -1674,34 +2314,47 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
bl load_up_altivec
- b fast_exception_return
+ b fast_interrupt_return
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_tm
- b ret_from_except
+ b interrupt_return
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
- b ret_from_except
+ b interrupt_return
+ GEN_KVM altivec_unavailable
+
+
+/**
+ * Interrupt 0xf40 - VSX Unavailable Interrupt.
+ * This is a synchronous interrupt in response to
+ * executing a VSX instruction with MSR[VSX]=0.
+ * Similar to FP unavailable.
+ */
+INT_DEFINE_BEGIN(vsx_unavailable)
+ IVEC=0xf40
+ IRECONCILE=0
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(vsx_unavailable)
EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
- INT_HANDLER vsx_unavailable, 0xf40, ool=1, kvm=1
+ GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1
EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
- INT_HANDLER vsx_unavailable, 0xf40, ool=1, virt=1
+ GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1
EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
-INT_KVM_HANDLER vsx_unavailable, 0xf40, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(vsx_unavailable_common)
- INT_COMMON 0xf40, PACA_EXGEN, 1, 1, 0, 0, 0
+ GEN_COMMON vsx_unavailable
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
beq 1f
@@ -1717,40 +2370,78 @@ BEGIN_FTR_SECTION
b load_up_vsx
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_tm
- b ret_from_except
+ b interrupt_return
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
- bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_exception
- b ret_from_except
+ b interrupt_return
+
+ GEN_KVM vsx_unavailable
+
+/**
+ * Interrupt 0xf60 - Facility Unavailable Interrupt.
+ * This is a synchronous interrupt in response to
+ * executing an instruction without access to the facility that can be
+ * resolved by the OS (e.g., FSCR, MSR).
+ * Similar to FP unavailable.
+ */
+INT_DEFINE_BEGIN(facility_unavailable)
+ IVEC=0xf60
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(facility_unavailable)
EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
- INT_HANDLER facility_unavailable, 0xf60, ool=1, kvm=1
+ GEN_INT_ENTRY facility_unavailable, virt=0, ool=1
EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
- INT_HANDLER facility_unavailable, 0xf60, ool=1, virt=1
+ GEN_INT_ENTRY facility_unavailable, virt=1, ool=1
EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
-INT_KVM_HANDLER facility_unavailable, 0xf60, EXC_STD, PACA_EXGEN, 0
-EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
+EXC_COMMON_BEGIN(facility_unavailable_common)
+ GEN_COMMON facility_unavailable
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl facility_unavailable_exception
+ b interrupt_return
+
+ GEN_KVM facility_unavailable
+/**
+ * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
+ * This is a synchronous interrupt in response to
+ * executing an instruction without access to the facility that can only
+ * be resolved in HV mode (e.g., HFSCR).
+ * Similar to FP unavailable.
+ */
+INT_DEFINE_BEGIN(h_facility_unavailable)
+ IVEC=0xf80
+ IHSRR=1
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(h_facility_unavailable)
+
EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
- INT_HANDLER h_facility_unavailable, 0xf80, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1
EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
- INT_HANDLER h_facility_unavailable, 0xf80, ool=1, virt=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1
EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
-INT_KVM_HANDLER h_facility_unavailable, 0xf80, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
+EXC_COMMON_BEGIN(h_facility_unavailable_common)
+ GEN_COMMON h_facility_unavailable
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl facility_unavailable_exception
+ b interrupt_return
+
+ GEN_KVM h_facility_unavailable
EXC_REAL_NONE(0xfa0, 0x20)
@@ -1766,56 +2457,95 @@ EXC_REAL_NONE(0x1100, 0x100)
EXC_VIRT_NONE(0x5100, 0x100)
#ifdef CONFIG_CBE_RAS
+INT_DEFINE_BEGIN(cbe_system_error)
+ IVEC=0x1200
+ IHSRR=1
+ IKVM_SKIP=1
+ IKVM_REAL=1
+INT_DEFINE_END(cbe_system_error)
+
EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
- INT_HANDLER cbe_system_error, 0x1200, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY cbe_system_error, virt=0
EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
-INT_KVM_HANDLER cbe_system_error, 0x1200, EXC_HV, PACA_EXGEN, 1
-EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
+EXC_COMMON_BEGIN(cbe_system_error_common)
+ GEN_COMMON cbe_system_error
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl cbe_system_error_exception
+ b interrupt_return
+
+ GEN_KVM cbe_system_error
+
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
#endif
+INT_DEFINE_BEGIN(instruction_breakpoint)
+ IVEC=0x1300
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_SKIP=1
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(instruction_breakpoint)
+
EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
- INT_HANDLER instruction_breakpoint, 0x1300, kvm=1
+ GEN_INT_ENTRY instruction_breakpoint, virt=0
EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
- INT_HANDLER instruction_breakpoint, 0x1300, virt=1
+ GEN_INT_ENTRY instruction_breakpoint, virt=1
EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
-INT_KVM_HANDLER instruction_breakpoint, 0x1300, EXC_STD, PACA_EXGEN, 1
-EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
+EXC_COMMON_BEGIN(instruction_breakpoint_common)
+ GEN_COMMON instruction_breakpoint
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl instruction_breakpoint_exception
+ b interrupt_return
+
+ GEN_KVM instruction_breakpoint
EXC_REAL_NONE(0x1400, 0x100)
EXC_VIRT_NONE(0x5400, 0x100)
-EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
- INT_HANDLER denorm_exception_hv, 0x1500, early=2, hsrr=EXC_HV
+/**
+ * Interrupt 0x1500 - Soft Patch Interrupt
+ *
+ * Handling:
+ * This is an implementation specific interrupt which can be used for a
+ * range of exceptions.
+ *
+ * This interrupt handler is unique in that it runs the denormal assist
+ * code even for guests (and even in guest context) without going to KVM,
+ * for speed. POWER9 does not raise denorm exceptions, so this special case
+ * could be phased out in future to reduce special cases.
+ */
+INT_DEFINE_BEGIN(denorm_exception)
+ IVEC=0x1500
+ IHSRR=1
+ IBRANCH_COMMON=0
+ IKVM_REAL=1
+INT_DEFINE_END(denorm_exception)
+
+EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100)
+ GEN_INT_ENTRY denorm_exception, virt=0
#ifdef CONFIG_PPC_DENORMALISATION
- mfspr r10,SPRN_HSRR1
- andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
+ andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
bne+ denorm_assist
#endif
- KVMTEST denorm_exception_hv, EXC_HV 0x1500
- INT_SAVE_SRR_AND_JUMP denorm_common, EXC_HV, 1
-EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
-
+ GEN_BRANCH_TO_COMMON denorm_exception, virt=0
+EXC_REAL_END(denorm_exception, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION
EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
- INT_HANDLER denorm_exception, 0x1500, 0, 2, 1, EXC_HV, PACA_EXGEN, 1, 0, 0, 0, 0
- mfspr r10,SPRN_HSRR1
- andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
+ GEN_INT_ENTRY denorm_exception, virt=1
+ andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
bne+ denorm_assist
- INT_VIRT_SAVE_SRR_AND_JUMP denorm_common, EXC_HV
+ GEN_BRANCH_TO_COMMON denorm_exception, virt=1
EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
#else
EXC_VIRT_NONE(0x5500, 0x100)
#endif
-INT_KVM_HANDLER denorm_exception_hv, 0x1500, EXC_HV, PACA_EXGEN, 0
-
#ifdef CONFIG_PPC_DENORMALISATION
TRAMP_REAL_BEGIN(denorm_assist)
BEGIN_FTR_SECTION
@@ -1872,7 +2602,10 @@ denorm_done:
mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13)
- RESTORE_PPR_PACA(PACA_EXGEN, r10)
+BEGIN_FTR_SECTION
+ ld r10,PACA_EXGEN+EX_PPR(r13)
+ mtspr SPRN_PPR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
ld r10,PACA_EXGEN+EX_CFAR(r13)
mtspr SPRN_CFAR,r10
@@ -1885,43 +2618,88 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
b .
#endif
-EXC_COMMON(denorm_common, 0x1500, unknown_exception)
+EXC_COMMON_BEGIN(denorm_exception_common)
+ GEN_COMMON denorm_exception
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unknown_exception
+ b interrupt_return
+
+ GEN_KVM denorm_exception
#ifdef CONFIG_CBE_RAS
+INT_DEFINE_BEGIN(cbe_maintenance)
+ IVEC=0x1600
+ IHSRR=1
+ IKVM_SKIP=1
+ IKVM_REAL=1
+INT_DEFINE_END(cbe_maintenance)
+
EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
- INT_HANDLER cbe_maintenance, 0x1600, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY cbe_maintenance, virt=0
EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
-INT_KVM_HANDLER cbe_maintenance, 0x1600, EXC_HV, PACA_EXGEN, 1
-EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
+EXC_COMMON_BEGIN(cbe_maintenance_common)
+ GEN_COMMON cbe_maintenance
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl cbe_maintenance_exception
+ b interrupt_return
+
+ GEN_KVM cbe_maintenance
+
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
#endif
+INT_DEFINE_BEGIN(altivec_assist)
+ IVEC=0x1700
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(altivec_assist)
+
EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
- INT_HANDLER altivec_assist, 0x1700, kvm=1
+ GEN_INT_ENTRY altivec_assist, virt=0
EXC_REAL_END(altivec_assist, 0x1700, 0x100)
EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
- INT_HANDLER altivec_assist, 0x1700, virt=1
+ GEN_INT_ENTRY altivec_assist, virt=1
EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
-INT_KVM_HANDLER altivec_assist, 0x1700, EXC_STD, PACA_EXGEN, 0
+EXC_COMMON_BEGIN(altivec_assist_common)
+ GEN_COMMON altivec_assist
+ addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_ALTIVEC
-EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
+ bl altivec_assist_exception
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
#else
-EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
+ bl unknown_exception
#endif
+ b interrupt_return
+
+ GEN_KVM altivec_assist
#ifdef CONFIG_CBE_RAS
+INT_DEFINE_BEGIN(cbe_thermal)
+ IVEC=0x1800
+ IHSRR=1
+ IKVM_SKIP=1
+ IKVM_REAL=1
+INT_DEFINE_END(cbe_thermal)
+
EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
- INT_HANDLER cbe_thermal, 0x1800, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY cbe_thermal, virt=0
EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
-INT_KVM_HANDLER cbe_thermal, 0x1800, EXC_HV, PACA_EXGEN, 1
-EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
+EXC_COMMON_BEGIN(cbe_thermal_common)
+ GEN_COMMON cbe_thermal
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl cbe_thermal_exception
+ b interrupt_return
+
+ GEN_KVM cbe_thermal
+
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
@@ -1930,14 +2708,11 @@ EXC_VIRT_NONE(0x5800, 0x100)
#ifdef CONFIG_PPC_WATCHDOG
-#define MASKED_DEC_HANDLER_LABEL 3f
-
-#define MASKED_DEC_HANDLER(_H) \
-3: /* soft-nmi */ \
- std r12,PACA_EXGEN+EX_R12(r13); \
- GET_SCRATCH0(r10); \
- std r10,PACA_EXGEN+EX_R13(r13); \
- INT_SAVE_SRR_AND_JUMP soft_nmi_common, _H, 1
+INT_DEFINE_BEGIN(soft_nmi)
+ IVEC=0x900
+ ISTACK=0
+ IRECONCILE=0 /* Soft-NMI may fire under local_irq_disable */
+INT_DEFINE_END(soft_nmi)
/*
* Branch to soft_nmi_interrupt using the emergency stack. The emergency
@@ -1949,18 +2724,42 @@ EXC_VIRT_NONE(0x5800, 0x100)
* and run it entirely with interrupts hard disabled.
*/
EXC_COMMON_BEGIN(soft_nmi_common)
+ mfspr r11,SPRN_SRR0
mr r10,r1
ld r1,PACAEMERGSP(r13)
subi r1,r1,INT_FRAME_SIZE
- INT_COMMON 0x900, PACA_EXGEN, 0, 1, 1, 0, 0
- bl save_nvgprs
+ __GEN_COMMON_BODY soft_nmi
+
+ /*
+ * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see
+ * system_reset_common)
+ */
+ li r10,IRQS_ALL_DISABLED
+ stb r10,PACAIRQSOFTMASK(r13)
+ lbz r10,PACAIRQHAPPENED(r13)
+ std r10,_DAR(r1)
+ ori r10,r10,PACA_IRQ_HARD_DIS
+ stb r10,PACAIRQHAPPENED(r13)
+
addi r3,r1,STACK_FRAME_OVERHEAD
bl soft_nmi_interrupt
- b ret_from_except
-#else /* CONFIG_PPC_WATCHDOG */
-#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
-#define MASKED_DEC_HANDLER(_H)
+ /* Clear MSR_RI before setting SRR0 and SRR1. */
+ li r9,0
+ mtmsrd r9,1
+
+ /*
+ * Restore soft mask settings.
+ */
+ ld r10,_DAR(r1)
+ stb r10,PACAIRQHAPPENED(r13)
+ ld r10,SOFTE(r1)
+ stb r10,PACAIRQSOFTMASK(r13)
+
+ kuap_restore_amr r10
+ EXCEPTION_RESTORE_REGS hsrr=0
+ RFI_TO_KERNEL
+
#endif /* CONFIG_PPC_WATCHDOG */
/*
@@ -1973,13 +2772,12 @@ EXC_COMMON_BEGIN(soft_nmi_common)
* - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
* This is called with r10 containing the value to OR to the paca field.
*/
-.macro MASKED_INTERRUPT hsrr
+.macro MASKED_INTERRUPT hsrr=0
.if \hsrr
masked_Hinterrupt:
.else
masked_interrupt:
.endif
- std r11,PACA_EXGEN+EX_R11(r13)
lbz r11,PACAIRQHAPPENED(r13)
or r11,r11,r10
stb r11,PACAIRQHAPPENED(r13)
@@ -1988,26 +2786,30 @@ masked_interrupt:
lis r10,0x7fff
ori r10,r10,0xffff
mtspr SPRN_DEC,r10
- b MASKED_DEC_HANDLER_LABEL
+#ifdef CONFIG_PPC_WATCHDOG
+ b soft_nmi_common
+#else
+ b 2f
+#endif
1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
beq 2f
+ xori r12,r12,MSR_EE /* clear MSR_EE */
.if \hsrr
- mfspr r10,SPRN_HSRR1
- xori r10,r10,MSR_EE /* clear MSR_EE */
- mtspr SPRN_HSRR1,r10
+ mtspr SPRN_HSRR1,r12
.else
- mfspr r10,SPRN_SRR1
- xori r10,r10,MSR_EE /* clear MSR_EE */
- mtspr SPRN_SRR1,r10
+ mtspr SPRN_SRR1,r12
.endif
ori r11,r11,PACA_IRQ_HARD_DIS
stb r11,PACAIRQHAPPENED(r13)
2: /* done */
+ ld r10,PACA_EXGEN+EX_CTR(r13)
+ mtctr r10
mtcrf 0x80,r9
std r1,PACAR1(r13)
ld r9,PACA_EXGEN+EX_R9(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
/* returns to kernel where r13 must be set up, so don't restore it */
.if \hsrr
HRFI_TO_KERNEL
@@ -2015,7 +2817,6 @@ masked_interrupt:
RFI_TO_KERNEL
.endif
b .
- MASKED_DEC_HANDLER(\hsrr\())
.endm
TRAMP_REAL_BEGIN(stf_barrier_fallback)
@@ -2117,17 +2918,12 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
GET_SCRATCH0(r13);
hrfid
-/*
- * Real mode exceptions actually use this too, but alternate
- * instruction code patches (which end up in the common .text area)
- * cannot reach these if they are put there.
- */
-USE_FIXED_SECTION(virt_trampolines)
- MASKED_INTERRUPT EXC_STD
- MASKED_INTERRUPT EXC_HV
+USE_TEXT_SECTION()
+ MASKED_INTERRUPT
+ MASKED_INTERRUPT hsrr=1
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
+kvmppc_skip_interrupt:
/*
* Here all GPRs are unchanged from when the interrupt happened
* except for r13, which is saved in SPRG_SCRATCH0.
@@ -2139,7 +2935,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
RFI_TO_KERNEL
b .
-TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
+kvmppc_skip_Hinterrupt:
/*
* Here all GPRs are unchanged from when the interrupt happened
* except for r13, which is saved in SPRG_SCRATCH0.
@@ -2152,16 +2948,6 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
b .
#endif
-/*
- * Ensure that any handlers that get invoked from the exception prologs
- * above are below the first 64KB (0x10000) of the kernel image because
- * the prologs assemble the addresses of these handlers using the
- * LOAD_HANDLER macro, which uses an ori instruction.
- */
-
-/*** Common interrupt handlers ***/
-
-
/*
* Relocation-on interrupts: A subset of the interrupts can be delivered
* with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
@@ -2275,7 +3061,7 @@ do_hash_page:
cmpdi r3,0 /* see if __hash_page succeeded */
/* Success */
- beq fast_exc_return_irq /* Return from exception on success */
+ beq interrupt_return /* Return from exception on success */
/* Error */
blt- 13f
@@ -2292,39 +3078,36 @@ handle_page_fault:
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
cmpdi r3,0
- beq+ ret_from_except_lite
- bl save_nvgprs
+ beq+ interrupt_return
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
ld r4,_DAR(r1)
bl bad_page_fault
- b ret_from_except
+ b interrupt_return
/* We have a data breakpoint exception - handle it */
handle_dabr_fault:
- bl save_nvgprs
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_break
/*
* do_break() may have changed the NV GPRS while handling a breakpoint.
- * If so, we need to restore them with their updated values. Don't use
- * ret_from_except_lite here.
+ * If so, we need to restore them with their updated values.
*/
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
#ifdef CONFIG_PPC_BOOK3S_64
/* We have a page fault that hash_page could handle but HV refused
* the PTE insertion
*/
-13: bl save_nvgprs
- mr r5,r3
+13: mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
ld r4,_DAR(r1)
bl low_hash_fault
- b ret_from_except
+ b interrupt_return
#endif
/*
@@ -2334,11 +3117,10 @@ handle_dabr_fault:
* were soft-disabled. We want to invoke the exception handler for
* the access, or panic if there isn't a handler.
*/
-77: bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
+77: addi r3,r1,STACK_FRAME_OVERHEAD
li r5,SIGSEGV
bl bad_page_fault
- b ret_from_except
+ b interrupt_return
/*
* When doorbell is triggered from system reset wakeup, the message is
@@ -2352,56 +3134,9 @@ handle_dabr_fault:
h_doorbell_common_msgclr:
LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
PPC_MSGCLR(3)
- b h_doorbell_common
+ b h_doorbell_common_virt
doorbell_super_common_msgclr:
LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
PPC_MSGCLRP(3)
- b doorbell_super_common
-
-/*
- * Called from arch_local_irq_enable when an interrupt needs
- * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
- * which kind of interrupt. MSR:EE is already off. We generate a
- * stackframe like if a real interrupt had happened.
- *
- * Note: While MSR:EE is off, we need to make sure that _MSR
- * in the generated frame has EE set to 1 or the exception
- * handler will not properly re-enable them.
- *
- * Note that we don't specify LR as the NIP (return address) for
- * the interrupt because that would unbalance the return branch
- * predictor.
- */
-_GLOBAL(__replay_interrupt)
- /* We are going to jump to the exception common code which
- * will retrieve various register values from the PACA which
- * we don't give a damn about, so we don't bother storing them.
- */
- mfmsr r12
- LOAD_REG_ADDR(r11, replay_interrupt_return)
- mfcr r9
- ori r12,r12,MSR_EE
- cmpwi r3,0x900
- beq decrementer_common
- cmpwi r3,0x500
-BEGIN_FTR_SECTION
- beq h_virt_irq_common
-FTR_SECTION_ELSE
- beq hardware_interrupt_common
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
- cmpwi r3,0xf00
- beq performance_monitor_common
-BEGIN_FTR_SECTION
- cmpwi r3,0xa00
- beq h_doorbell_common_msgclr
- cmpwi r3,0xe60
- beq hmi_exception_common
-FTR_SECTION_ELSE
- cmpwi r3,0xa00
- beq doorbell_super_common_msgclr
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
-replay_interrupt_return:
- blr
-
-_ASM_NOKPROBE_SYMBOL(__replay_interrupt)
+ b doorbell_super_common_virt
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index ff0114aeba9b..59e60a9a9f5c 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -36,6 +36,8 @@ static struct fw_dump fw_dump;
static void __init fadump_reserve_crash_area(u64 base);
+struct kobject *fadump_kobj;
+
#ifndef CONFIG_PRESERVE_FA_DUMP
static DEFINE_MUTEX(fadump_mutex);
struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
@@ -1323,9 +1325,9 @@ static void fadump_invalidate_release_mem(void)
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
}
-static ssize_t fadump_release_memory_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t release_mem_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int input = -1;
@@ -1350,23 +1352,40 @@ static ssize_t fadump_release_memory_store(struct kobject *kobj,
return count;
}
-static ssize_t fadump_enabled_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+/* Release the reserved memory and disable the FADump */
+static void unregister_fadump(void)
+{
+ fadump_cleanup();
+ fadump_release_memory(fw_dump.reserve_dump_area_start,
+ fw_dump.reserve_dump_area_size);
+ fw_dump.fadump_enabled = 0;
+ kobject_put(fadump_kobj);
+}
+
+static ssize_t enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
}
-static ssize_t fadump_register_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static ssize_t mem_reserved_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ld\n", fw_dump.reserve_dump_area_size);
+}
+
+static ssize_t registered_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", fw_dump.dump_registered);
}
-static ssize_t fadump_register_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t registered_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int ret = 0;
int input = -1;
@@ -1418,45 +1437,82 @@ static int fadump_region_show(struct seq_file *m, void *private)
return 0;
}
-static struct kobj_attribute fadump_release_attr = __ATTR(fadump_release_mem,
- 0200, NULL,
- fadump_release_memory_store);
-static struct kobj_attribute fadump_attr = __ATTR(fadump_enabled,
- 0444, fadump_enabled_show,
- NULL);
-static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered,
- 0644, fadump_register_show,
- fadump_register_store);
+static struct kobj_attribute release_attr = __ATTR_WO(release_mem);
+static struct kobj_attribute enable_attr = __ATTR_RO(enabled);
+static struct kobj_attribute register_attr = __ATTR_RW(registered);
+static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved);
+
+static struct attribute *fadump_attrs[] = {
+ &enable_attr.attr,
+ &register_attr.attr,
+ &mem_reserved_attr.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fadump);
DEFINE_SHOW_ATTRIBUTE(fadump_region);
static void fadump_init_files(void)
{
- struct dentry *debugfs_file;
int rc = 0;
- rc = sysfs_create_file(kernel_kobj, &fadump_attr.attr);
- if (rc)
- printk(KERN_ERR "fadump: unable to create sysfs file"
- " fadump_enabled (%d)\n", rc);
+ fadump_kobj = kobject_create_and_add("fadump", kernel_kobj);
+ if (!fadump_kobj) {
+ pr_err("failed to create fadump kobject\n");
+ return;
+ }
- rc = sysfs_create_file(kernel_kobj, &fadump_register_attr.attr);
- if (rc)
- printk(KERN_ERR "fadump: unable to create sysfs file"
- " fadump_registered (%d)\n", rc);
+ debugfs_create_file("fadump_region", 0444, powerpc_debugfs_root, NULL,
+ &fadump_region_fops);
- debugfs_file = debugfs_create_file("fadump_region", 0444,
- powerpc_debugfs_root, NULL,
- &fadump_region_fops);
- if (!debugfs_file)
- printk(KERN_ERR "fadump: unable to create debugfs file"
- " fadump_region\n");
+ if (fw_dump.dump_active) {
+ rc = sysfs_create_file(fadump_kobj, &release_attr.attr);
+ if (rc)
+ pr_err("unable to create release_mem sysfs file (%d)\n",
+ rc);
+ }
+
+ rc = sysfs_create_groups(fadump_kobj, fadump_groups);
+ if (rc) {
+ pr_err("sysfs group creation failed (%d), unregistering FADump",
+ rc);
+ unregister_fadump();
+ return;
+ }
+
+ /*
+ * The FADump sysfs are moved from kernel_kobj to fadump_kobj need to
+ * create symlink at old location to maintain backward compatibility.
+ *
+ * - fadump_enabled -> fadump/enabled
+ * - fadump_registered -> fadump/registered
+ * - fadump_release_mem -> fadump/release_mem
+ */
+ rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
+ "enabled", "fadump_enabled");
+ if (rc) {
+ pr_err("unable to create fadump_enabled symlink (%d)", rc);
+ return;
+ }
+
+ rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
+ "registered",
+ "fadump_registered");
+ if (rc) {
+ pr_err("unable to create fadump_registered symlink (%d)", rc);
+ sysfs_remove_link(kernel_kobj, "fadump_enabled");
+ return;
+ }
if (fw_dump.dump_active) {
- rc = sysfs_create_file(kernel_kobj, &fadump_release_attr.attr);
+ rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj,
+ fadump_kobj,
+ "release_mem",
+ "fadump_release_mem");
if (rc)
- printk(KERN_ERR "fadump: unable to create sysfs file"
- " fadump_release_mem (%d)\n", rc);
+ pr_err("unable to create fadump_release_mem symlink (%d)",
+ rc);
}
return;
}
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 97c887950c3c..daaa153950c2 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -348,7 +348,7 @@ BEGIN_MMU_FTR_SECTION
andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
#endif
bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
- rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
+ rlwinm r3, r5, 32 - 24, 30, 30 /* DSISR_STORE -> _PAGE_RW */
bl hash_page
b handle_page_fault_tramp_1
FTR_SECTION_ELSE
@@ -497,7 +497,6 @@ InstructionTLBMiss:
andc. r1,r1,r0 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r1, r1, 0xe06 /* clear out reserved bits */
andc r1, r0, r1 /* PP = user? 1 : 0 */
BEGIN_FTR_SECTION
@@ -565,9 +564,8 @@ DataLoadTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx.
*/
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
- rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
- rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
+ rlwinm r1,r0,0,30,30 /* _PAGE_RW -> PP msb */
+ rlwimi r0,r0,1,30,30 /* _PAGE_USER -> PP msb */
ori r1,r1,0xe04 /* clear out reserved bits */
andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION
@@ -645,7 +643,6 @@ DataStoreTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx.
*/
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
li r1,0xe06 /* clear out reserved bits & PP msb */
andc r1,r0,r1 /* PP = user? 1: 0 */
BEGIN_FTR_SECTION
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 9db162f79fe6..9abec6cd099c 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -130,37 +130,36 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
.macro SYSCALL_ENTRY trapno
mfspr r12,SPRN_SPRG_THREAD
+ mfspr r9, SPRN_SRR1
#ifdef CONFIG_VMAP_STACK
- mfspr r9, SPRN_SRR0
- mfspr r11, SPRN_SRR1
- stw r9, SRR0(r12)
- stw r11, SRR1(r12)
+ mfspr r11, SPRN_SRR0
+ mtctr r11
#endif
- mfcr r10
+ andi. r11, r9, MSR_PR
lwz r11,TASK_STACK-THREAD(r12)
- rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
+ beq- 99f
addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
#ifdef CONFIG_VMAP_STACK
- li r9, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
- mtmsr r9
+ li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+ mtmsr r10
isync
#endif
tovirt_vmstack r12, r12
tophys_novmstack r11, r11
- mflr r9
- stw r10,_CCR(r11) /* save registers */
- stw r9, _LINK(r11)
+ mflr r10
+ stw r10, _LINK(r11)
#ifdef CONFIG_VMAP_STACK
- lwz r10, SRR0(r12)
- lwz r9, SRR1(r12)
+ mfctr r10
#else
mfspr r10,SPRN_SRR0
- mfspr r9,SPRN_SRR1
#endif
stw r1,GPR1(r11)
stw r1,0(r11)
tovirt_novmstack r1, r11 /* set new kernel sp */
stw r10,_NIP(r11)
+ mfcr r10
+ rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
+ stw r10,_CCR(r11) /* save registers */
#ifdef CONFIG_40x
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
#else
@@ -228,6 +227,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
mtspr SPRN_SRR0,r11
SYNC
RFI /* jump to handler, enable MMU */
+99: b ret_from_kernel_syscall
.endm
.macro save_dar_dsisr_on_stack reg1, reg2, sp
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ad79fddb974d..ddfbd02140d9 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -537,6 +537,7 @@ __start_initialization_multiplatform:
b __after_prom_start
#endif /* CONFIG_PPC_BOOK3E */
+__REF
__boot_from_prom:
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
/* Save parameters */
@@ -574,6 +575,7 @@ __boot_from_prom:
/* We never return. We also hit that trap if trying to boot
* from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
trap
+ .previous
__after_prom_start:
#ifdef CONFIG_RELOCATABLE
@@ -977,7 +979,6 @@ start_here_multiplatform:
RFI
b . /* prevent speculative execution */
- .previous
/* This is where all platforms converge execution */
start_here_common:
@@ -1001,6 +1002,7 @@ start_here_common:
/* Not reached */
trap
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
+ .previous
/*
* We put a few things here that have to be page-aligned.
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 37fc84ed90e3..bd2e5ed8dd50 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -104,16 +104,18 @@ FTR_SECTION_ELSE
#ifdef CONFIG_KVM_BOOKE_HV
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
#endif
+ mfspr r9, SPRN_SRR1
BOOKE_CLEAR_BTB(r11)
+ andi. r11, r9, MSR_PR
lwz r11, TASK_STACK - THREAD(r10)
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
+ beq- 99f
ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
stw r12, _CCR(r11) /* save various registers */
mflr r12
stw r12,_LINK(r11)
mfspr r12,SPRN_SRR0
stw r1, GPR1(r11)
- mfspr r9,SPRN_SRR1
stw r1, 0(r11)
mr r1, r11
stw r12,_NIP(r11)
@@ -176,6 +178,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
mtspr SPRN_SRR0,r11
SYNC
RFI /* jump to handler, enable MMU */
+99: b ret_from_kernel_syscall
.endm
/* To handle the additional exception priority levels on 40x and Book-E
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index d0854320bb50..72f461bd70fb 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -429,3 +429,19 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
+
+void ptrace_triggered(struct perf_event *bp,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event_attr attr;
+
+ /*
+ * Disable the breakpoint request here since ptrace has defined a
+ * one-shot behaviour for breakpoint exceptions in PPC64.
+ * The SIGTRAP signal is generated automatically for us in do_dabr().
+ * We don't have to do anything about that here
+ */
+ attr = bp->attr;
+ attr.disabled = true;
+ modify_user_hw_breakpoint(bp, &attr);
+}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5c9b11878555..a25ed47087ee 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -70,6 +70,7 @@
#include <asm/paca.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
+#include <asm/dbell.h>
#endif
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
@@ -109,6 +110,8 @@ static inline notrace int decrementer_check_overflow(void)
return now >= *next_tb;
}
+#ifdef CONFIG_PPC_BOOK3E
+
/* This is called whenever we are re-enabling interrupts
* and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
* there's an EE, DEC or DBELL to generate.
@@ -168,6 +171,67 @@ notrace unsigned int __check_irq_replay(void)
}
}
+ if (happened & PACA_IRQ_DEC) {
+ local_paca->irq_happened &= ~PACA_IRQ_DEC;
+ return 0x900;
+ }
+
+ if (happened & PACA_IRQ_EE) {
+ local_paca->irq_happened &= ~PACA_IRQ_EE;
+ return 0x500;
+ }
+
+ /*
+ * Check if an EPR external interrupt happened this bit is typically
+ * set if we need to handle another "edge" interrupt from within the
+ * MPIC "EPR" handler.
+ */
+ if (happened & PACA_IRQ_EE_EDGE) {
+ local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
+ return 0x500;
+ }
+
+ if (happened & PACA_IRQ_DBELL) {
+ local_paca->irq_happened &= ~PACA_IRQ_DBELL;
+ return 0x280;
+ }
+
+ /* There should be nothing left ! */
+ BUG_ON(local_paca->irq_happened != 0);
+
+ return 0;
+}
+#endif /* CONFIG_PPC_BOOK3E */
+
+void replay_soft_interrupts(void)
+{
+ /*
+ * We use local_paca rather than get_paca() to avoid all
+ * the debug_smp_processor_id() business in this low level
+ * function
+ */
+ unsigned char happened = local_paca->irq_happened;
+ struct pt_regs regs;
+
+ ppc_save_regs(&regs);
+ regs.softe = IRQS_ALL_DISABLED;
+
+again:
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ if (happened & PACA_IRQ_HARD_DIS) {
+ /*
+ * We may have missed a decrementer interrupt if hard disabled.
+ * Check the decrementer register in case we had a rollover
+ * while hard disabled.
+ */
+ if (!(happened & PACA_IRQ_DEC)) {
+ if (decrementer_check_overflow())
+ happened |= PACA_IRQ_DEC;
+ }
+ }
+
/*
* Force the delivery of pending soft-disabled interrupts on PS3.
* Any HV call will have this side effect.
@@ -182,58 +246,78 @@ notrace unsigned int __check_irq_replay(void)
* This is a higher priority interrupt than the others, so
* replay it first.
*/
- if (happened & PACA_IRQ_HMI) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
local_paca->irq_happened &= ~PACA_IRQ_HMI;
- return 0xe60;
+ regs.trap = 0xe60;
+ handle_hmi_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
}
if (happened & PACA_IRQ_DEC) {
local_paca->irq_happened &= ~PACA_IRQ_DEC;
- return 0x900;
- }
-
- if (happened & PACA_IRQ_PMI) {
- local_paca->irq_happened &= ~PACA_IRQ_PMI;
- return 0xf00;
+ regs.trap = 0x900;
+ timer_interrupt(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
}
if (happened & PACA_IRQ_EE) {
local_paca->irq_happened &= ~PACA_IRQ_EE;
- return 0x500;
+ regs.trap = 0x500;
+ do_IRQ(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
}
-#ifdef CONFIG_PPC_BOOK3E
/*
* Check if an EPR external interrupt happened this bit is typically
* set if we need to handle another "edge" interrupt from within the
* MPIC "EPR" handler.
*/
- if (happened & PACA_IRQ_EE_EDGE) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E) && (happened & PACA_IRQ_EE_EDGE)) {
local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
- return 0x500;
+ regs.trap = 0x500;
+ do_IRQ(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
}
- if (happened & PACA_IRQ_DBELL) {
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- return 0x280;
- }
-#else
- if (happened & PACA_IRQ_DBELL) {
+ if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- return 0xa00;
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E))
+ regs.trap = 0x280;
+ else
+ regs.trap = 0xa00;
+ doorbell_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
}
-#endif /* CONFIG_PPC_BOOK3E */
- /* There should be nothing left ! */
- BUG_ON(local_paca->irq_happened != 0);
+ /* Book3E does not support soft-masking PMI interrupts */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
+ local_paca->irq_happened &= ~PACA_IRQ_PMI;
+ regs.trap = 0xf00;
+ performance_monitor_exception(&regs);
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ }
- return 0;
+ happened = local_paca->irq_happened;
+ if (happened & ~PACA_IRQ_HARD_DIS) {
+ /*
+ * We are responding to the next interrupt, so interrupt-off
+ * latencies should be reset here.
+ */
+ trace_hardirqs_on();
+ trace_hardirqs_off();
+ goto again;
+ }
}
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
- unsigned int replay;
/* Write the new soft-enabled value */
irq_soft_mask_set(mask);
@@ -255,24 +339,16 @@ notrace void arch_local_irq_restore(unsigned long mask)
*/
irq_happened = get_irq_happened();
if (!irq_happened) {
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-#endif
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
return;
}
- /*
- * We need to hard disable to get a trusted value from
- * __check_irq_replay(). We also need to soft-disable
- * again to avoid warnings in there due to the use of
- * per-cpu variables.
- */
+ /* We need to hard disable to replay. */
if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-#endif
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
__hard_irq_disable();
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
} else {
/*
* We should already be hard disabled here. We had bugs
@@ -280,35 +356,26 @@ notrace void arch_local_irq_restore(unsigned long mask)
* warn if we are wrong. Only do that when IRQ tracing
* is enabled as mfmsr() can be costly.
*/
- if (WARN_ON_ONCE(mfmsr() & MSR_EE))
- __hard_irq_disable();
-#endif
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+
+ if (irq_happened == PACA_IRQ_HARD_DIS) {
+ local_paca->irq_happened = 0;
+ __hard_irq_enable();
+ return;
+ }
}
irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off();
- /*
- * Check if anything needs to be re-emitted. We haven't
- * soft-enabled yet to avoid warnings in decrementer_check_overflow
- * accessing per-cpu variables
- */
- replay = __check_irq_replay();
+ replay_soft_interrupts();
+ local_paca->irq_happened = 0;
- /* We can soft-enable now */
trace_hardirqs_on();
irq_soft_mask_set(IRQS_ENABLED);
-
- /*
- * And replay if we have to. This will return with interrupts
- * hard-enabled.
- */
- if (replay) {
- __replay_interrupt(replay);
- return;
- }
-
- /* Finally, let's ensure we are hard enabled */
__hard_irq_enable();
}
EXPORT_SYMBOL(arch_local_irq_restore);
@@ -599,17 +666,18 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
static inline void check_stack_overflow(void)
{
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
long sp;
- sp = current_stack_pointer() & (THREAD_SIZE-1);
+ if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
+ return;
+
+ sp = current_stack_pointer & (THREAD_SIZE - 1);
/* check for stack overflow: is there less than 2KB free? */
if (unlikely(sp < 2048)) {
pr_err("do_IRQ: stack overflow: %ld\n", sp);
dump_stack();
}
-#endif
}
void __do_irq(struct pt_regs *regs)
@@ -647,7 +715,7 @@ void do_IRQ(struct pt_regs *regs)
void *cursp, *irqsp, *sirqsp;
/* Switch to the irq stack to handle this */
- cursp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
+ cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
irqsp = hardirq_ctx[raw_smp_processor_id()];
sirqsp = softirq_ctx[raw_smp_processor_id()];
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2d27ec4feee4..81efb605113e 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -264,6 +264,9 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs))
return 0;
+ if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
+ return 0;
+
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
@@ -271,54 +274,6 @@ int kprobe_handler(struct pt_regs *regs)
preempt_disable();
kcb = get_kprobe_ctlblk();
- /* Check we're not actually recursing */
- if (kprobe_running()) {
- p = get_kprobe(addr);
- if (p) {
- kprobe_opcode_t insn = *p->ainsn.insn;
- if (kcb->kprobe_status == KPROBE_HIT_SS &&
- is_trap(insn)) {
- /* Turn off 'trace' bits */
- regs->msr &= ~MSR_SINGLESTEP;
- regs->msr |= kcb->kprobe_saved_msr;
- goto no_kprobe;
- }
- /* We have reentered the kprobe_handler(), since
- * another probe was hit while within the handler.
- * We here save the original kprobes variables and
- * just single step on the instruction of the new probe
- * without calling any user handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
- kprobes_inc_nmissed_count(p);
- kcb->kprobe_status = KPROBE_REENTER;
- if (p->ainsn.boostable >= 0) {
- ret = try_to_emulate(p, regs);
-
- if (ret > 0) {
- restore_previous_kprobe(kcb);
- preempt_enable_no_resched();
- return 1;
- }
- }
- prepare_singlestep(p, regs);
- return 1;
- } else if (*addr != BREAKPOINT_INSTRUCTION) {
- /* If trap variant, then it belongs not to us */
- kprobe_opcode_t cur_insn = *addr;
-
- if (is_trap(cur_insn))
- goto no_kprobe;
- /* The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
- }
- goto no_kprobe;
- }
-
p = get_kprobe(addr);
if (!p) {
if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -343,6 +298,39 @@ int kprobe_handler(struct pt_regs *regs)
goto no_kprobe;
}
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ kprobe_opcode_t insn = *p->ainsn.insn;
+ if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
+ /* Turn off 'trace' bits */
+ regs->msr &= ~MSR_SINGLESTEP;
+ regs->msr |= kcb->kprobe_saved_msr;
+ goto no_kprobe;
+ }
+
+ /*
+ * We have reentered the kprobe_handler(), since another probe
+ * was hit while within the handler. We here save the original
+ * kprobes variables and just single step on the instruction of
+ * the new probe without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ if (p->ainsn.boostable >= 0) {
+ ret = try_to_emulate(p, regs);
+
+ if (ret > 0) {
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ return 1;
+ }
+ }
+ prepare_singlestep(p, regs);
+ return 1;
+ }
+
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb);
if (p->pre_handler && p->pre_handler(p, regs)) {
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 34c1001e9e8b..8077b5fb18a7 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -15,6 +15,7 @@
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/irq_work.h>
+#include <linux/extable.h>
#include <asm/machdep.h>
#include <asm/mce.h>
@@ -251,6 +252,19 @@ void machine_check_queue_event(void)
/* Queue irq work to process this event later. */
irq_work_queue(&mce_event_process_work);
}
+
+void mce_common_process_ue(struct pt_regs *regs,
+ struct mce_error_info *mce_err)
+{
+ const struct exception_table_entry *entry;
+
+ entry = search_kernel_exception_table(regs->nip);
+ if (entry) {
+ mce_err->ignore_event = true;
+ regs->nip = extable_fixup(entry);
+ }
+}
+
/*
* process pending MCE event from the mce event queue. This function will be
* called during syscall exit.
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 1cbf7f1a4e3d..067b094bfeff 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -579,14 +579,10 @@ static long mce_handle_ue_error(struct pt_regs *regs,
struct mce_error_info *mce_err)
{
long handled = 0;
- const struct exception_table_entry *entry;
- entry = search_kernel_exception_table(regs->nip);
- if (entry) {
- mce_err->ignore_event = true;
- regs->nip = extable_fixup(entry);
+ mce_common_process_ue(regs, mce_err);
+ if (mce_err->ignore_event)
return 1;
- }
/*
* On specific SCOM read via MMIO we may get a machine check
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 974f65f79a8e..65f9f731c229 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -110,7 +110,7 @@ _GLOBAL(longjmp)
li r3, 1
blr
-_GLOBAL(current_stack_pointer)
+_GLOBAL(current_stack_frame)
PPC_LL r3,0(r1)
blr
-EXPORT_SYMBOL(current_stack_pointer)
+EXPORT_SYMBOL(current_stack_frame)
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 427fc22f72b6..71a3f97dc988 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -62,13 +62,9 @@ static int of_pci_phb_probe(struct platform_device *dev)
/* Init pci_dn data structures */
pci_devs_phb_init_dynamic(phb);
- /* Create EEH devices for the PHB */
+ /* Create EEH PEs for the PHB */
eeh_dev_phb_init_dynamic(phb);
- /* Register devices with EEH */
- if (dev->dev.of_node->child)
- eeh_add_device_tree_early(PCI_DN(dev->dev.of_node));
-
/* Scan the bus */
pcibios_scan_phb(phb);
if (phb->bus == NULL)
@@ -80,15 +76,9 @@ static int of_pci_phb_probe(struct platform_device *dev)
*/
pcibios_claim_one_bus(phb->bus);
- /* Finish EEH setup */
- eeh_add_device_tree_late(phb->bus);
-
/* Add probed PCI devices to the device model */
pci_bus_add_devices(phb->bus);
- /* sysfs files should only be added after devices are added */
- eeh_add_sysfs_files(phb->bus);
-
return 0;
}
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 949eceb254d8..3f91ccaa9c74 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -176,7 +176,7 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
struct paca_struct **paca_ptrs __read_mostly;
EXPORT_SYMBOL(paca_ptrs);
-void __init initialise_paca(struct paca_struct *new_paca, int cpu)
+void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int cpu)
{
#ifdef CONFIG_PPC_PSERIES
new_paca->lppaca_ptr = NULL;
@@ -205,7 +205,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
}
/* Put the paca pointer into r13 and SPRG_PACA */
-void setup_paca(struct paca_struct *new_paca)
+void __nostackprotector setup_paca(struct paca_struct *new_paca)
{
/* Setup r13 */
local_paca = new_paca;
@@ -214,11 +214,15 @@ void setup_paca(struct paca_struct *new_paca)
/* On Book3E, initialize the TLB miss exception frames */
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
#else
- /* In HV mode, we setup both HPACA and PACA to avoid problems
+ /*
+ * In HV mode, we setup both HPACA and PACA to avoid problems
* if we do a GET_PACA() before the feature fixups have been
- * applied
+ * applied.
+ *
+ * Normally you should test against CPU_FTR_HVMODE, but CPU features
+ * are not yet set up when we first reach here.
*/
- if (early_cpu_has_feature(CPU_FTR_HVMODE))
+ if (mfmsr() & MSR_HV)
mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index c6c03416a151..be108616a721 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -728,7 +728,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
range.cpu_addr, range.cpu_addr + range.size - 1,
range.pci_addr,
- (range.pci_space & 0x40000000) ?
+ (range.flags & IORESOURCE_PREFETCH) ?
"Prefetch" : "");
/* We support only 3 memory ranges */
@@ -1399,14 +1399,8 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
pci_assign_unassigned_bus_resources(bus);
}
- /* Fixup EEH */
- eeh_add_device_tree_late(bus);
-
/* Add new devices to global lists. Register in proc, sysfs. */
pci_bus_add_devices(bus);
-
- /* sysfs files should only be added after devices are added */
- eeh_add_sysfs_files(bus);
}
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index d6a67f814983..bf83f76563a3 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -112,8 +112,6 @@ void pci_hp_add_devices(struct pci_bus *bus)
struct pci_controller *phb;
struct device_node *dn = pci_bus_to_OF_node(bus);
- eeh_add_device_tree_early(PCI_DN(dn));
-
phb = pci_bus_to_host(bus);
mode = PCI_PROBE_NORMAL;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index fad50db9dcf2..9c21288f8645 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -236,23 +236,9 @@ void enable_kernel_fp(void)
}
}
EXPORT_SYMBOL(enable_kernel_fp);
-
-static int restore_fp(struct task_struct *tsk)
-{
- if (tsk->thread.load_fp) {
- load_fp_state(&current->thread.fp_state);
- current->thread.load_fp++;
- return 1;
- }
- return 0;
-}
-#else
-static int restore_fp(struct task_struct *tsk) { return 0; }
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
-#define loadvec(thr) ((thr).load_vec)
-
static void __giveup_altivec(struct task_struct *tsk)
{
unsigned long msr;
@@ -318,21 +304,6 @@ void flush_altivec_to_thread(struct task_struct *tsk)
}
}
EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
-
-static int restore_altivec(struct task_struct *tsk)
-{
- if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
- load_vr_state(&tsk->thread.vr_state);
- tsk->thread.used_vr = 1;
- tsk->thread.load_vec++;
-
- return 1;
- }
- return 0;
-}
-#else
-#define loadvec(thr) 0
-static inline int restore_altivec(struct task_struct *tsk) { return 0; }
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -400,18 +371,6 @@ void flush_vsx_to_thread(struct task_struct *tsk)
}
}
EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
-
-static int restore_vsx(struct task_struct *tsk)
-{
- if (cpu_has_feature(CPU_FTR_VSX)) {
- tsk->thread.used_vsr = 1;
- return 1;
- }
-
- return 0;
-}
-#else
-static inline int restore_vsx(struct task_struct *tsk) { return 0; }
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
@@ -511,6 +470,53 @@ void giveup_all(struct task_struct *tsk)
}
EXPORT_SYMBOL(giveup_all);
+#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_FPU
+static int restore_fp(struct task_struct *tsk)
+{
+ if (tsk->thread.load_fp) {
+ load_fp_state(&current->thread.fp_state);
+ current->thread.load_fp++;
+ return 1;
+ }
+ return 0;
+}
+#else
+static int restore_fp(struct task_struct *tsk) { return 0; }
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_ALTIVEC
+#define loadvec(thr) ((thr).load_vec)
+static int restore_altivec(struct task_struct *tsk)
+{
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
+ load_vr_state(&tsk->thread.vr_state);
+ tsk->thread.used_vr = 1;
+ tsk->thread.load_vec++;
+
+ return 1;
+ }
+ return 0;
+}
+#else
+#define loadvec(thr) 0
+static inline int restore_altivec(struct task_struct *tsk) { return 0; }
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_VSX
+static int restore_vsx(struct task_struct *tsk)
+{
+ if (cpu_has_feature(CPU_FTR_VSX)) {
+ tsk->thread.used_vsr = 1;
+ return 1;
+ }
+
+ return 0;
+}
+#else
+static inline int restore_vsx(struct task_struct *tsk) { return 0; }
+#endif /* CONFIG_VSX */
+
/*
* The exception exit path calls restore_math() with interrupts hard disabled
* but the soft irq state not "reconciled". ftrace code that calls
@@ -551,6 +557,7 @@ void notrace restore_math(struct pt_regs *regs)
regs->msr = msr;
}
+#endif
static void save_all(struct task_struct *tsk)
{
@@ -1634,11 +1641,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
p->thread.regs = childregs;
childregs->gpr[3] = 0; /* Result from fork() */
if (clone_flags & CLONE_SETTLS) {
-#ifdef CONFIG_PPC64
if (!is_32bit_task())
childregs->gpr[13] = tls;
else
-#endif
childregs->gpr[2] = tls;
}
@@ -1976,6 +1981,32 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
return 0;
}
+static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes)
+{
+#ifdef CONFIG_PPC64
+ unsigned long stack_page;
+ unsigned long cpu = task_cpu(p);
+
+ stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+
+# ifdef CONFIG_PPC_BOOK3S_64
+ stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+
+ stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+# endif
+#endif
+
+ return 0;
+}
+
+
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
@@ -1987,7 +2018,10 @@ int validate_sp(unsigned long sp, struct task_struct *p,
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
- return valid_irq_stack(sp, p, nbytes);
+ if (valid_irq_stack(sp, p, nbytes))
+ return 1;
+
+ return valid_emergency_stack(sp, p, nbytes);
}
EXPORT_SYMBOL(validate_sp);
@@ -2053,7 +2087,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
sp = (unsigned long) stack;
if (sp == 0) {
if (tsk == current)
- sp = current_stack_pointer();
+ sp = current_stack_frame();
else
sp = tsk->thread.ksp;
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 577345382b23..806be751c336 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1773,6 +1773,9 @@ static void __init prom_rtas_os_term(char *str)
if (token == 0)
prom_panic("Could not get token for ibm,os-term\n");
os_term_args.token = cpu_to_be32(token);
+ os_term_args.nargs = cpu_to_be32(1);
+ os_term_args.nret = cpu_to_be32(1);
+ os_term_args.args[0] = cpu_to_be32(__pa(str));
prom_rtas_hcall((uint64_t)&os_term_args);
}
#endif /* CONFIG_PPC_SVM */
@@ -3474,7 +3477,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
hdr = dt_header_start;
- /* Don't print anything after quiesce under OPAL, it crashes OFW */
prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
prom_debug("->dt_header_start=0x%lx\n", hdr);
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
deleted file mode 100644
index 25c0424e8868..000000000000
--- a/arch/powerpc/kernel/ptrace.c
+++ /dev/null
@@ -1,3468 +0,0 @@
-/*
- * PowerPC version
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Derived from "arch/m68k/kernel/ptrace.c"
- * Copyright (C) 1994 by Hamish Macdonald
- * Taken from linux/kernel/ptrace.c and modified for M680x0.
- * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * Modified by Cort Dougan (cort@hq.fsmlabs.com)
- * and Paul Mackerras (paulus@samba.org).
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/regset.h>
-#include <linux/tracehook.h>
-#include <linux/elf.h>
-#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/signal.h>
-#include <linux/seccomp.h>
-#include <linux/audit.h>
-#include <trace/syscall.h>
-#include <linux/hw_breakpoint.h>
-#include <linux/perf_event.h>
-#include <linux/context_tracking.h>
-#include <linux/nospec.h>
-
-#include <linux/uaccess.h>
-#include <linux/pkeys.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/switch_to.h>
-#include <asm/tm.h>
-#include <asm/asm-prototypes.h>
-#include <asm/debug.h>
-#include <asm/hw_breakpoint.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
-/*
- * The parameter save area on the stack is used to store arguments being passed
- * to callee function and is located at fixed offset from stack pointer.
- */
-#ifdef CONFIG_PPC32
-#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
-#else /* CONFIG_PPC32 */
-#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
-#endif
-
-struct pt_regs_offset {
- const char *name;
- int offset;
-};
-
-#define STR(s) #s /* convert to string */
-#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
-#define GPR_OFFSET_NAME(num) \
- {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
- {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
-#define REG_OFFSET_END {.name = NULL, .offset = 0}
-
-#define TVSO(f) (offsetof(struct thread_vr_state, f))
-#define TFSO(f) (offsetof(struct thread_fp_state, f))
-#define TSO(f) (offsetof(struct thread_struct, f))
-
-static const struct pt_regs_offset regoffset_table[] = {
- GPR_OFFSET_NAME(0),
- GPR_OFFSET_NAME(1),
- GPR_OFFSET_NAME(2),
- GPR_OFFSET_NAME(3),
- GPR_OFFSET_NAME(4),
- GPR_OFFSET_NAME(5),
- GPR_OFFSET_NAME(6),
- GPR_OFFSET_NAME(7),
- GPR_OFFSET_NAME(8),
- GPR_OFFSET_NAME(9),
- GPR_OFFSET_NAME(10),
- GPR_OFFSET_NAME(11),
- GPR_OFFSET_NAME(12),
- GPR_OFFSET_NAME(13),
- GPR_OFFSET_NAME(14),
- GPR_OFFSET_NAME(15),
- GPR_OFFSET_NAME(16),
- GPR_OFFSET_NAME(17),
- GPR_OFFSET_NAME(18),
- GPR_OFFSET_NAME(19),
- GPR_OFFSET_NAME(20),
- GPR_OFFSET_NAME(21),
- GPR_OFFSET_NAME(22),
- GPR_OFFSET_NAME(23),
- GPR_OFFSET_NAME(24),
- GPR_OFFSET_NAME(25),
- GPR_OFFSET_NAME(26),
- GPR_OFFSET_NAME(27),
- GPR_OFFSET_NAME(28),
- GPR_OFFSET_NAME(29),
- GPR_OFFSET_NAME(30),
- GPR_OFFSET_NAME(31),
- REG_OFFSET_NAME(nip),
- REG_OFFSET_NAME(msr),
- REG_OFFSET_NAME(ctr),
- REG_OFFSET_NAME(link),
- REG_OFFSET_NAME(xer),
- REG_OFFSET_NAME(ccr),
-#ifdef CONFIG_PPC64
- REG_OFFSET_NAME(softe),
-#else
- REG_OFFSET_NAME(mq),
-#endif
- REG_OFFSET_NAME(trap),
- REG_OFFSET_NAME(dar),
- REG_OFFSET_NAME(dsisr),
- REG_OFFSET_END,
-};
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-static void flush_tmregs_to_thread(struct task_struct *tsk)
-{
- /*
- * If task is not current, it will have been flushed already to
- * it's thread_struct during __switch_to().
- *
- * A reclaim flushes ALL the state or if not in TM save TM SPRs
- * in the appropriate thread structures from live.
- */
-
- if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
- return;
-
- if (MSR_TM_SUSPENDED(mfmsr())) {
- tm_reclaim_current(TM_CAUSE_SIGNAL);
- } else {
- tm_enable();
- tm_save_sprs(&(tsk->thread));
- }
-}
-#else
-static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
-#endif
-
-/**
- * regs_query_register_offset() - query register offset from its name
- * @name: the name of a register
- *
- * regs_query_register_offset() returns the offset of a register in struct
- * pt_regs from its name. If the name is invalid, this returns -EINVAL;
- */
-int regs_query_register_offset(const char *name)
-{
- const struct pt_regs_offset *roff;
- for (roff = regoffset_table; roff->name != NULL; roff++)
- if (!strcmp(roff->name, name))
- return roff->offset;
- return -EINVAL;
-}
-
-/**
- * regs_query_register_name() - query register name from its offset
- * @offset: the offset of a register in struct pt_regs.
- *
- * regs_query_register_name() returns the name of a register from its
- * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
- */
-const char *regs_query_register_name(unsigned int offset)
-{
- const struct pt_regs_offset *roff;
- for (roff = regoffset_table; roff->name != NULL; roff++)
- if (roff->offset == offset)
- return roff->name;
- return NULL;
-}
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-#define MSR_DEBUGCHANGE 0
-#else
-#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
-#endif
-
-/*
- * Max register writeable via put_reg
- */
-#ifdef CONFIG_PPC32
-#define PT_MAX_PUT_REG PT_MQ
-#else
-#define PT_MAX_PUT_REG PT_CCR
-#endif
-
-static unsigned long get_user_msr(struct task_struct *task)
-{
- return task->thread.regs->msr | task->thread.fpexc_mode;
-}
-
-static int set_user_msr(struct task_struct *task, unsigned long msr)
-{
- task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
- task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
- return 0;
-}
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-static unsigned long get_user_ckpt_msr(struct task_struct *task)
-{
- return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
-}
-
-static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
-{
- task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
- task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
- return 0;
-}
-
-static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
-{
- task->thread.ckpt_regs.trap = trap & 0xfff0;
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PPC64
-static int get_user_dscr(struct task_struct *task, unsigned long *data)
-{
- *data = task->thread.dscr;
- return 0;
-}
-
-static int set_user_dscr(struct task_struct *task, unsigned long dscr)
-{
- task->thread.dscr = dscr;
- task->thread.dscr_inherit = 1;
- return 0;
-}
-#else
-static int get_user_dscr(struct task_struct *task, unsigned long *data)
-{
- return -EIO;
-}
-
-static int set_user_dscr(struct task_struct *task, unsigned long dscr)
-{
- return -EIO;
-}
-#endif
-
-/*
- * We prevent mucking around with the reserved area of trap
- * which are used internally by the kernel.
- */
-static int set_user_trap(struct task_struct *task, unsigned long trap)
-{
- task->thread.regs->trap = trap & 0xfff0;
- return 0;
-}
-
-/*
- * Get contents of register REGNO in task TASK.
- */
-int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
-{
- unsigned int regs_max;
-
- if ((task->thread.regs == NULL) || !data)
- return -EIO;
-
- if (regno == PT_MSR) {
- *data = get_user_msr(task);
- return 0;
- }
-
- if (regno == PT_DSCR)
- return get_user_dscr(task, data);
-
-#ifdef CONFIG_PPC64
- /*
- * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
- * no more used as a flag, lets force usr to alway see the softe value as 1
- * which means interrupts are not soft disabled.
- */
- if (regno == PT_SOFTE) {
- *data = 1;
- return 0;
- }
-#endif
-
- regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
- if (regno < regs_max) {
- regno = array_index_nospec(regno, regs_max);
- *data = ((unsigned long *)task->thread.regs)[regno];
- return 0;
- }
-
- return -EIO;
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
-{
- if (task->thread.regs == NULL)
- return -EIO;
-
- if (regno == PT_MSR)
- return set_user_msr(task, data);
- if (regno == PT_TRAP)
- return set_user_trap(task, data);
- if (regno == PT_DSCR)
- return set_user_dscr(task, data);
-
- if (regno <= PT_MAX_PUT_REG) {
- regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
- ((unsigned long *)task->thread.regs)[regno] = data;
- return 0;
- }
- return -EIO;
-}
-
-static int gpr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int i, ret;
-
- if (target->thread.regs == NULL)
- return -EIO;
-
- if (!FULL_REGS(target->thread.regs)) {
- /* We have a partial register set. Fill 14-31 with bogus values */
- for (i = 14; i < 32; i++)
- target->thread.regs->gpr[i] = NV_REG_POISON;
- }
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- target->thread.regs,
- 0, offsetof(struct pt_regs, msr));
- if (!ret) {
- unsigned long msr = get_user_msr(target);
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
- offsetof(struct pt_regs, msr),
- offsetof(struct pt_regs, msr) +
- sizeof(msr));
- }
-
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct pt_regs, msr) + sizeof(long));
-
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.regs->orig_gpr3,
- offsetof(struct pt_regs, orig_gpr3),
- sizeof(struct user_pt_regs));
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- sizeof(struct user_pt_regs), -1);
-
- return ret;
-}
-
-static int gpr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- unsigned long reg;
- int ret;
-
- if (target->thread.regs == NULL)
- return -EIO;
-
- CHECK_FULL_REGS(target->thread.regs);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- target->thread.regs,
- 0, PT_MSR * sizeof(reg));
-
- if (!ret && count > 0) {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
- PT_MSR * sizeof(reg),
- (PT_MSR + 1) * sizeof(reg));
- if (!ret)
- ret = set_user_msr(target, reg);
- }
-
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct pt_regs, msr) + sizeof(long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.regs->orig_gpr3,
- PT_ORIG_R3 * sizeof(reg),
- (PT_MAX_PUT_REG + 1) * sizeof(reg));
-
- if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
- ret = user_regset_copyin_ignore(
- &pos, &count, &kbuf, &ubuf,
- (PT_MAX_PUT_REG + 1) * sizeof(reg),
- PT_TRAP * sizeof(reg));
-
- if (!ret && count > 0) {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
- PT_TRAP * sizeof(reg),
- (PT_TRAP + 1) * sizeof(reg));
- if (!ret)
- ret = set_user_trap(target, reg);
- }
-
- if (!ret)
- ret = user_regset_copyin_ignore(
- &pos, &count, &kbuf, &ubuf,
- (PT_TRAP + 1) * sizeof(reg), -1);
-
- return ret;
-}
-
-/*
- * Regardless of transactions, 'fp_state' holds the current running
- * value of all FPR registers and 'ckfp_state' holds the last checkpointed
- * value of all FPR registers for the current transaction.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * u64 fpr[32];
- * u64 fpscr;
- * };
- */
-static int fpr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
-#ifdef CONFIG_VSX
- u64 buf[33];
- int i;
-
- flush_fp_to_thread(target);
-
- /* copy to local buffer then write that out */
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_FPR(i);
- buf[32] = target->thread.fp_state.fpscr;
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
-#else
- BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32]));
-
- flush_fp_to_thread(target);
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fp_state, 0, -1);
-#endif
-}
-
-/*
- * Regardless of transactions, 'fp_state' holds the current running
- * value of all FPR registers and 'ckfp_state' holds the last checkpointed
- * value of all FPR registers for the current transaction.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * u64 fpr[32];
- * u64 fpscr;
- * };
- *
- */
-static int fpr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
-#ifdef CONFIG_VSX
- u64 buf[33];
- int i;
-
- flush_fp_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_FPR(i);
- buf[32] = target->thread.fp_state.fpscr;
-
- /* copy to local buffer then write that out */
- i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
- if (i)
- return i;
-
- for (i = 0; i < 32 ; i++)
- target->thread.TS_FPR(i) = buf[i];
- target->thread.fp_state.fpscr = buf[32];
- return 0;
-#else
- BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32]));
-
- flush_fp_to_thread(target);
-
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fp_state, 0, -1);
-#endif
-}
-
-#ifdef CONFIG_ALTIVEC
-/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword. Quadwords 0-31 contain the
- * corresponding vector registers. Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword. Quadword 33 contains the
- * vrsave as the first word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface. This allows signal handling and ptrace to use the
- * same structures. This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- */
-
-static int vr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- flush_altivec_to_thread(target);
- return target->thread.used_vr ? regset->n : 0;
-}
-
-/*
- * Regardless of transactions, 'vr_state' holds the current running
- * value of all the VMX registers and 'ckvr_state' holds the last
- * checkpointed value of all the VMX registers for the current
- * transaction to fall back on in case it aborts.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * vector128 vr[32];
- * vector128 vscr;
- * vector128 vrsave;
- * };
- */
-static int vr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- flush_altivec_to_thread(target);
-
- BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
- offsetof(struct thread_vr_state, vr[32]));
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
- 33 * sizeof(vector128));
- if (!ret) {
- /*
- * Copy out only the low-order word of vrsave.
- */
- int start, end;
- union {
- elf_vrreg_t reg;
- u32 word;
- } vrsave;
- memset(&vrsave, 0, sizeof(vrsave));
-
- vrsave.word = target->thread.vrsave;
-
- start = 33 * sizeof(vector128);
- end = start + sizeof(vrsave);
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
- start, end);
- }
-
- return ret;
-}
-
-/*
- * Regardless of transactions, 'vr_state' holds the current running
- * value of all the VMX registers and 'ckvr_state' holds the last
- * checkpointed value of all the VMX registers for the current
- * transaction to fall back on in case it aborts.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * vector128 vr[32];
- * vector128 vscr;
- * vector128 vrsave;
- * };
- */
-static int vr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- flush_altivec_to_thread(target);
-
- BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
- offsetof(struct thread_vr_state, vr[32]));
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
- 33 * sizeof(vector128));
- if (!ret && count > 0) {
- /*
- * We use only the first word of vrsave.
- */
- int start, end;
- union {
- elf_vrreg_t reg;
- u32 word;
- } vrsave;
- memset(&vrsave, 0, sizeof(vrsave));
-
- vrsave.word = target->thread.vrsave;
-
- start = 33 * sizeof(vector128);
- end = start + sizeof(vrsave);
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
- start, end);
- if (!ret)
- target->thread.vrsave = vrsave.word;
- }
-
- return ret;
-}
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_VSX
-/*
- * Currently to set and and get all the vsx state, you need to call
- * the fp and VMX calls as well. This only get/sets the lower 32
- * 128bit VSX registers.
- */
-
-static int vsr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- flush_vsx_to_thread(target);
- return target->thread.used_vsr ? regset->n : 0;
-}
-
-/*
- * Regardless of transactions, 'fp_state' holds the current running
- * value of all FPR registers and 'ckfp_state' holds the last
- * checkpointed value of all FPR registers for the current
- * transaction.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * u64 vsx[32];
- * };
- */
-static int vsr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- u64 buf[32];
- int ret, i;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
- flush_vsx_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- buf, 0, 32 * sizeof(double));
-
- return ret;
-}
-
-/*
- * Regardless of transactions, 'fp_state' holds the current running
- * value of all FPR registers and 'ckfp_state' holds the last
- * checkpointed value of all FPR registers for the current
- * transaction.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * u64 vsx[32];
- * };
- */
-static int vsr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- u64 buf[32];
- int ret,i;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
- flush_vsx_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- buf, 0, 32 * sizeof(double));
- if (!ret)
- for (i = 0; i < 32 ; i++)
- target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
-
- return ret;
-}
-#endif /* CONFIG_VSX */
-
-#ifdef CONFIG_SPE
-
-/*
- * For get_evrregs/set_evrregs functions 'data' has the following layout:
- *
- * struct {
- * u32 evr[32];
- * u64 acc;
- * u32 spefscr;
- * }
- */
-
-static int evr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- flush_spe_to_thread(target);
- return target->thread.used_spe ? regset->n : 0;
-}
-
-static int evr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- flush_spe_to_thread(target);
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.evr,
- 0, sizeof(target->thread.evr));
-
- BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
- offsetof(struct thread_struct, spefscr));
-
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.acc,
- sizeof(target->thread.evr), -1);
-
- return ret;
-}
-
-static int evr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- flush_spe_to_thread(target);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.evr,
- 0, sizeof(target->thread.evr));
-
- BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
- offsetof(struct thread_struct, spefscr));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.acc,
- sizeof(target->thread.evr), -1);
-
- return ret;
-}
-#endif /* CONFIG_SPE */
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/**
- * tm_cgpr_active - get active number of registers in CGPR
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks for the active number of available
- * regisers in transaction checkpointed GPR category.
- */
-static int tm_cgpr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return 0;
-
- return regset->n;
-}
-
-/**
- * tm_cgpr_get - get CGPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets transaction checkpointed GPR registers.
- *
- * When the transaction is active, 'ckpt_regs' holds all the checkpointed
- * GPR register values for the current transaction to fall back on if it
- * aborts in between. This function gets those checkpointed GPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct data {
- * struct pt_regs ckpt_regs;
- * };
- */
-static int tm_cgpr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckpt_regs,
- 0, offsetof(struct pt_regs, msr));
- if (!ret) {
- unsigned long msr = get_user_ckpt_msr(target);
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
- offsetof(struct pt_regs, msr),
- offsetof(struct pt_regs, msr) +
- sizeof(msr));
- }
-
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct pt_regs, msr) + sizeof(long));
-
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckpt_regs.orig_gpr3,
- offsetof(struct pt_regs, orig_gpr3),
- sizeof(struct user_pt_regs));
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- sizeof(struct user_pt_regs), -1);
-
- return ret;
-}
-
-/*
- * tm_cgpr_set - set the CGPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets in transaction checkpointed GPR registers.
- *
- * When the transaction is active, 'ckpt_regs' holds the checkpointed
- * GPR register values for the current transaction to fall back on if it
- * aborts in between. This function sets those checkpointed GPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct data {
- * struct pt_regs ckpt_regs;
- * };
- */
-static int tm_cgpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- unsigned long reg;
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckpt_regs,
- 0, PT_MSR * sizeof(reg));
-
- if (!ret && count > 0) {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
- PT_MSR * sizeof(reg),
- (PT_MSR + 1) * sizeof(reg));
- if (!ret)
- ret = set_user_ckpt_msr(target, reg);
- }
-
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct pt_regs, msr) + sizeof(long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckpt_regs.orig_gpr3,
- PT_ORIG_R3 * sizeof(reg),
- (PT_MAX_PUT_REG + 1) * sizeof(reg));
-
- if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
- ret = user_regset_copyin_ignore(
- &pos, &count, &kbuf, &ubuf,
- (PT_MAX_PUT_REG + 1) * sizeof(reg),
- PT_TRAP * sizeof(reg));
-
- if (!ret && count > 0) {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
- PT_TRAP * sizeof(reg),
- (PT_TRAP + 1) * sizeof(reg));
- if (!ret)
- ret = set_user_ckpt_trap(target, reg);
- }
-
- if (!ret)
- ret = user_regset_copyin_ignore(
- &pos, &count, &kbuf, &ubuf,
- (PT_TRAP + 1) * sizeof(reg), -1);
-
- return ret;
-}
-
-/**
- * tm_cfpr_active - get active number of registers in CFPR
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks for the active number of available
- * regisers in transaction checkpointed FPR category.
- */
-static int tm_cfpr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return 0;
-
- return regset->n;
-}
-
-/**
- * tm_cfpr_get - get CFPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets in transaction checkpointed FPR registers.
- *
- * When the transaction is active 'ckfp_state' holds the checkpointed
- * values for the current transaction to fall back on if it aborts
- * in between. This function gets those checkpointed FPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct data {
- * u64 fpr[32];
- * u64 fpscr;
- *};
- */
-static int tm_cfpr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- u64 buf[33];
- int i;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- /* copy to local buffer then write that out */
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_CKFPR(i);
- buf[32] = target->thread.ckfp_state.fpscr;
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
-}
-
-/**
- * tm_cfpr_set - set CFPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets in transaction checkpointed FPR registers.
- *
- * When the transaction is active 'ckfp_state' holds the checkpointed
- * FPR register values for the current transaction to fall back on
- * if it aborts in between. This function sets these checkpointed
- * FPR registers. The userspace interface buffer layout is as follows.
- *
- * struct data {
- * u64 fpr[32];
- * u64 fpscr;
- *};
- */
-static int tm_cfpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- u64 buf[33];
- int i;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- for (i = 0; i < 32; i++)
- buf[i] = target->thread.TS_CKFPR(i);
- buf[32] = target->thread.ckfp_state.fpscr;
-
- /* copy to local buffer then write that out */
- i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
- if (i)
- return i;
- for (i = 0; i < 32 ; i++)
- target->thread.TS_CKFPR(i) = buf[i];
- target->thread.ckfp_state.fpscr = buf[32];
- return 0;
-}
-
-/**
- * tm_cvmx_active - get active number of registers in CVMX
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks for the active number of available
- * regisers in checkpointed VMX category.
- */
-static int tm_cvmx_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return 0;
-
- return regset->n;
-}
-
-/**
- * tm_cvmx_get - get CMVX registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets in transaction checkpointed VMX registers.
- *
- * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
- * the checkpointed values for the current transaction to fall
- * back on if it aborts in between. The userspace interface buffer
- * layout is as follows.
- *
- * struct data {
- * vector128 vr[32];
- * vector128 vscr;
- * vector128 vrsave;
- *};
- */
-static int tm_cvmx_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- /* Flush the state */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckvr_state, 0,
- 33 * sizeof(vector128));
- if (!ret) {
- /*
- * Copy out only the low-order word of vrsave.
- */
- union {
- elf_vrreg_t reg;
- u32 word;
- } vrsave;
- memset(&vrsave, 0, sizeof(vrsave));
- vrsave.word = target->thread.ckvrsave;
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
- 33 * sizeof(vector128), -1);
- }
-
- return ret;
-}
-
-/**
- * tm_cvmx_set - set CMVX registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets in transaction checkpointed VMX registers.
- *
- * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
- * the checkpointed values for the current transaction to fall
- * back on if it aborts in between. The userspace interface buffer
- * layout is as follows.
- *
- * struct data {
- * vector128 vr[32];
- * vector128 vscr;
- * vector128 vrsave;
- *};
- */
-static int tm_cvmx_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckvr_state, 0,
- 33 * sizeof(vector128));
- if (!ret && count > 0) {
- /*
- * We use only the low-order word of vrsave.
- */
- union {
- elf_vrreg_t reg;
- u32 word;
- } vrsave;
- memset(&vrsave, 0, sizeof(vrsave));
- vrsave.word = target->thread.ckvrsave;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
- 33 * sizeof(vector128), -1);
- if (!ret)
- target->thread.ckvrsave = vrsave.word;
- }
-
- return ret;
-}
-
-/**
- * tm_cvsx_active - get active number of registers in CVSX
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks for the active number of available
- * regisers in transaction checkpointed VSX category.
- */
-static int tm_cvsx_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return 0;
-
- flush_vsx_to_thread(target);
- return target->thread.used_vsr ? regset->n : 0;
-}
-
-/**
- * tm_cvsx_get - get CVSX registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets in transaction checkpointed VSX registers.
- *
- * When the transaction is active 'ckfp_state' holds the checkpointed
- * values for the current transaction to fall back on if it aborts
- * in between. This function gets those checkpointed VSX registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct data {
- * u64 vsx[32];
- *};
- */
-static int tm_cvsx_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- u64 buf[32];
- int ret, i;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- /* Flush the state */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
- flush_vsx_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- buf, 0, 32 * sizeof(double));
-
- return ret;
-}
-
-/**
- * tm_cvsx_set - set CFPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets in transaction checkpointed VSX registers.
- *
- * When the transaction is active 'ckfp_state' holds the checkpointed
- * VSX register values for the current transaction to fall back on
- * if it aborts in between. This function sets these checkpointed
- * FPR registers. The userspace interface buffer layout is as follows.
- *
- * struct data {
- * u64 vsx[32];
- *};
- */
-static int tm_cvsx_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- u64 buf[32];
- int ret, i;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- /* Flush the state */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
- flush_vsx_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- buf, 0, 32 * sizeof(double));
- if (!ret)
- for (i = 0; i < 32 ; i++)
- target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
-
- return ret;
-}
-
-/**
- * tm_spr_active - get active number of registers in TM SPR
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks the active number of available
- * regisers in the transactional memory SPR category.
- */
-static int tm_spr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- return regset->n;
-}
-
-/**
- * tm_spr_get - get the TM related SPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets transactional memory related SPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct {
- * u64 tm_tfhar;
- * u64 tm_texasr;
- * u64 tm_tfiar;
- * };
- */
-static int tm_spr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- /* Build tests */
- BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
- BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
- BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- /* Flush the states */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- /* TFHAR register */
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tfhar, 0, sizeof(u64));
-
- /* TEXASR register */
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_texasr, sizeof(u64),
- 2 * sizeof(u64));
-
- /* TFIAR register */
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tfiar,
- 2 * sizeof(u64), 3 * sizeof(u64));
- return ret;
-}
-
-/**
- * tm_spr_set - set the TM related SPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets transactional memory related SPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct {
- * u64 tm_tfhar;
- * u64 tm_texasr;
- * u64 tm_tfiar;
- * };
- */
-static int tm_spr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- /* Build tests */
- BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
- BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
- BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- /* Flush the states */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- /* TFHAR register */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tfhar, 0, sizeof(u64));
-
- /* TEXASR register */
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_texasr, sizeof(u64),
- 2 * sizeof(u64));
-
- /* TFIAR register */
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tfiar,
- 2 * sizeof(u64), 3 * sizeof(u64));
- return ret;
-}
-
-static int tm_tar_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- return regset->n;
-
- return 0;
-}
-
-static int tm_tar_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tar, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_tar_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tar, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_ppr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- return regset->n;
-
- return 0;
-}
-
-
-static int tm_ppr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_ppr, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_ppr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_ppr, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_dscr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- return regset->n;
-
- return 0;
-}
-
-static int tm_dscr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_dscr, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_dscr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_dscr, 0, sizeof(u64));
- return ret;
-}
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
-#ifdef CONFIG_PPC64
-static int ppr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.regs->ppr, 0, sizeof(u64));
-}
-
-static int ppr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.regs->ppr, 0, sizeof(u64));
-}
-
-static int dscr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.dscr, 0, sizeof(u64));
-}
-static int dscr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.dscr, 0, sizeof(u64));
-}
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
-static int tar_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tar, 0, sizeof(u64));
-}
-static int tar_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tar, 0, sizeof(u64));
-}
-
-static int ebb_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- if (target->thread.used_ebb)
- return regset->n;
-
- return 0;
-}
-
-static int ebb_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- /* Build tests */
- BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
- BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- if (!target->thread.used_ebb)
- return -ENODATA;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
-}
-
-static int ebb_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret = 0;
-
- /* Build tests */
- BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
- BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- if (target->thread.used_ebb)
- return -ENODATA;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ebbrr, 0, sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ebbhr, sizeof(unsigned long),
- 2 * sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.bescr,
- 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
-
- return ret;
-}
-static int pmu_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- return regset->n;
-}
-
-static int pmu_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- /* Build tests */
- BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
- BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
- BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
- BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.siar, 0,
- 5 * sizeof(unsigned long));
-}
-
-static int pmu_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret = 0;
-
- /* Build tests */
- BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
- BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
- BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
- BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.siar, 0,
- sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.sdar, sizeof(unsigned long),
- 2 * sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.sier, 2 * sizeof(unsigned long),
- 3 * sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.mmcr2, 3 * sizeof(unsigned long),
- 4 * sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.mmcr0, 4 * sizeof(unsigned long),
- 5 * sizeof(unsigned long));
- return ret;
-}
-#endif
-
-#ifdef CONFIG_PPC_MEM_KEYS
-static int pkey_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!arch_pkeys_enabled())
- return -ENODEV;
-
- return regset->n;
-}
-
-static int pkey_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
- BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
-
- if (!arch_pkeys_enabled())
- return -ENODEV;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.amr, 0,
- ELF_NPKEY * sizeof(unsigned long));
-}
-
-static int pkey_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- u64 new_amr;
- int ret;
-
- if (!arch_pkeys_enabled())
- return -ENODEV;
-
- /* Only the AMR can be set from userspace */
- if (pos != 0 || count != sizeof(new_amr))
- return -EINVAL;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &new_amr, 0, sizeof(new_amr));
- if (ret)
- return ret;
-
- /* UAMOR determines which bits of the AMR can be set from userspace. */
- target->thread.amr = (new_amr & target->thread.uamor) |
- (target->thread.amr & ~target->thread.uamor);
-
- return 0;
-}
-#endif /* CONFIG_PPC_MEM_KEYS */
-
-/*
- * These are our native regset flavors.
- */
-enum powerpc_regset {
- REGSET_GPR,
- REGSET_FPR,
-#ifdef CONFIG_ALTIVEC
- REGSET_VMX,
-#endif
-#ifdef CONFIG_VSX
- REGSET_VSX,
-#endif
-#ifdef CONFIG_SPE
- REGSET_SPE,
-#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- REGSET_TM_CGPR, /* TM checkpointed GPR registers */
- REGSET_TM_CFPR, /* TM checkpointed FPR registers */
- REGSET_TM_CVMX, /* TM checkpointed VMX registers */
- REGSET_TM_CVSX, /* TM checkpointed VSX registers */
- REGSET_TM_SPR, /* TM specific SPR registers */
- REGSET_TM_CTAR, /* TM checkpointed TAR register */
- REGSET_TM_CPPR, /* TM checkpointed PPR register */
- REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
-#endif
-#ifdef CONFIG_PPC64
- REGSET_PPR, /* PPR register */
- REGSET_DSCR, /* DSCR register */
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- REGSET_TAR, /* TAR register */
- REGSET_EBB, /* EBB registers */
- REGSET_PMR, /* Performance Monitor Registers */
-#endif
-#ifdef CONFIG_PPC_MEM_KEYS
- REGSET_PKEY, /* AMR register */
-#endif
-};
-
-static const struct user_regset native_regsets[] = {
- [REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
- .size = sizeof(long), .align = sizeof(long),
- .get = gpr_get, .set = gpr_set
- },
- [REGSET_FPR] = {
- .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
- .size = sizeof(double), .align = sizeof(double),
- .get = fpr_get, .set = fpr_set
- },
-#ifdef CONFIG_ALTIVEC
- [REGSET_VMX] = {
- .core_note_type = NT_PPC_VMX, .n = 34,
- .size = sizeof(vector128), .align = sizeof(vector128),
- .active = vr_active, .get = vr_get, .set = vr_set
- },
-#endif
-#ifdef CONFIG_VSX
- [REGSET_VSX] = {
- .core_note_type = NT_PPC_VSX, .n = 32,
- .size = sizeof(double), .align = sizeof(double),
- .active = vsr_active, .get = vsr_get, .set = vsr_set
- },
-#endif
-#ifdef CONFIG_SPE
- [REGSET_SPE] = {
- .core_note_type = NT_PPC_SPE, .n = 35,
- .size = sizeof(u32), .align = sizeof(u32),
- .active = evr_active, .get = evr_get, .set = evr_set
- },
-#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- [REGSET_TM_CGPR] = {
- .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
- .size = sizeof(long), .align = sizeof(long),
- .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
- },
- [REGSET_TM_CFPR] = {
- .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
- .size = sizeof(double), .align = sizeof(double),
- .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
- },
- [REGSET_TM_CVMX] = {
- .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
- .size = sizeof(vector128), .align = sizeof(vector128),
- .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
- },
- [REGSET_TM_CVSX] = {
- .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
- .size = sizeof(double), .align = sizeof(double),
- .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
- },
- [REGSET_TM_SPR] = {
- .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
- },
- [REGSET_TM_CTAR] = {
- .core_note_type = NT_PPC_TM_CTAR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
- },
- [REGSET_TM_CPPR] = {
- .core_note_type = NT_PPC_TM_CPPR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
- },
- [REGSET_TM_CDSCR] = {
- .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
- },
-#endif
-#ifdef CONFIG_PPC64
- [REGSET_PPR] = {
- .core_note_type = NT_PPC_PPR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = ppr_get, .set = ppr_set
- },
- [REGSET_DSCR] = {
- .core_note_type = NT_PPC_DSCR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = dscr_get, .set = dscr_set
- },
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- [REGSET_TAR] = {
- .core_note_type = NT_PPC_TAR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = tar_get, .set = tar_set
- },
- [REGSET_EBB] = {
- .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = ebb_active, .get = ebb_get, .set = ebb_set
- },
- [REGSET_PMR] = {
- .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = pmu_active, .get = pmu_get, .set = pmu_set
- },
-#endif
-#ifdef CONFIG_PPC_MEM_KEYS
- [REGSET_PKEY] = {
- .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = pkey_active, .get = pkey_get, .set = pkey_set
- },
-#endif
-};
-
-static const struct user_regset_view user_ppc_native_view = {
- .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
- .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
-};
-
-#ifdef CONFIG_PPC64
-#include <linux/compat.h>
-
-static int gpr32_get_common(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf,
- unsigned long *regs)
-{
- compat_ulong_t *k = kbuf;
- compat_ulong_t __user *u = ubuf;
- compat_ulong_t reg;
-
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf)
- for (; count > 0 && pos < PT_MSR; --count)
- *k++ = regs[pos++];
- else
- for (; count > 0 && pos < PT_MSR; --count)
- if (__put_user((compat_ulong_t) regs[pos++], u++))
- return -EFAULT;
-
- if (count > 0 && pos == PT_MSR) {
- reg = get_user_msr(target);
- if (kbuf)
- *k++ = reg;
- else if (__put_user(reg, u++))
- return -EFAULT;
- ++pos;
- --count;
- }
-
- if (kbuf)
- for (; count > 0 && pos < PT_REGS_COUNT; --count)
- *k++ = regs[pos++];
- else
- for (; count > 0 && pos < PT_REGS_COUNT; --count)
- if (__put_user((compat_ulong_t) regs[pos++], u++))
- return -EFAULT;
-
- kbuf = k;
- ubuf = u;
- pos *= sizeof(reg);
- count *= sizeof(reg);
- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- PT_REGS_COUNT * sizeof(reg), -1);
-}
-
-static int gpr32_set_common(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf,
- unsigned long *regs)
-{
- const compat_ulong_t *k = kbuf;
- const compat_ulong_t __user *u = ubuf;
- compat_ulong_t reg;
-
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf)
- for (; count > 0 && pos < PT_MSR; --count)
- regs[pos++] = *k++;
- else
- for (; count > 0 && pos < PT_MSR; --count) {
- if (__get_user(reg, u++))
- return -EFAULT;
- regs[pos++] = reg;
- }
-
-
- if (count > 0 && pos == PT_MSR) {
- if (kbuf)
- reg = *k++;
- else if (__get_user(reg, u++))
- return -EFAULT;
- set_user_msr(target, reg);
- ++pos;
- --count;
- }
-
- if (kbuf) {
- for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
- regs[pos++] = *k++;
- for (; count > 0 && pos < PT_TRAP; --count, ++pos)
- ++k;
- } else {
- for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
- if (__get_user(reg, u++))
- return -EFAULT;
- regs[pos++] = reg;
- }
- for (; count > 0 && pos < PT_TRAP; --count, ++pos)
- if (__get_user(reg, u++))
- return -EFAULT;
- }
-
- if (count > 0 && pos == PT_TRAP) {
- if (kbuf)
- reg = *k++;
- else if (__get_user(reg, u++))
- return -EFAULT;
- set_user_trap(target, reg);
- ++pos;
- --count;
- }
-
- kbuf = k;
- ubuf = u;
- pos *= sizeof(reg);
- count *= sizeof(reg);
- return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- (PT_TRAP + 1) * sizeof(reg), -1);
-}
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-static int tm_cgpr32_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
- &target->thread.ckpt_regs.gpr[0]);
-}
-
-static int tm_cgpr32_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
- &target->thread.ckpt_regs.gpr[0]);
-}
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
-static int gpr32_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int i;
-
- if (target->thread.regs == NULL)
- return -EIO;
-
- if (!FULL_REGS(target->thread.regs)) {
- /*
- * We have a partial register set.
- * Fill 14-31 with bogus values.
- */
- for (i = 14; i < 32; i++)
- target->thread.regs->gpr[i] = NV_REG_POISON;
- }
- return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
- &target->thread.regs->gpr[0]);
-}
-
-static int gpr32_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- if (target->thread.regs == NULL)
- return -EIO;
-
- CHECK_FULL_REGS(target->thread.regs);
- return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
- &target->thread.regs->gpr[0]);
-}
-
-/*
- * These are the regset flavors matching the CONFIG_PPC32 native set.
- */
-static const struct user_regset compat_regsets[] = {
- [REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
- .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
- .get = gpr32_get, .set = gpr32_set
- },
- [REGSET_FPR] = {
- .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
- .size = sizeof(double), .align = sizeof(double),
- .get = fpr_get, .set = fpr_set
- },
-#ifdef CONFIG_ALTIVEC
- [REGSET_VMX] = {
- .core_note_type = NT_PPC_VMX, .n = 34,
- .size = sizeof(vector128), .align = sizeof(vector128),
- .active = vr_active, .get = vr_get, .set = vr_set
- },
-#endif
-#ifdef CONFIG_SPE
- [REGSET_SPE] = {
- .core_note_type = NT_PPC_SPE, .n = 35,
- .size = sizeof(u32), .align = sizeof(u32),
- .active = evr_active, .get = evr_get, .set = evr_set
- },
-#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- [REGSET_TM_CGPR] = {
- .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
- .size = sizeof(long), .align = sizeof(long),
- .active = tm_cgpr_active,
- .get = tm_cgpr32_get, .set = tm_cgpr32_set
- },
- [REGSET_TM_CFPR] = {
- .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
- .size = sizeof(double), .align = sizeof(double),
- .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
- },
- [REGSET_TM_CVMX] = {
- .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
- .size = sizeof(vector128), .align = sizeof(vector128),
- .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
- },
- [REGSET_TM_CVSX] = {
- .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
- .size = sizeof(double), .align = sizeof(double),
- .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
- },
- [REGSET_TM_SPR] = {
- .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
- },
- [REGSET_TM_CTAR] = {
- .core_note_type = NT_PPC_TM_CTAR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
- },
- [REGSET_TM_CPPR] = {
- .core_note_type = NT_PPC_TM_CPPR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
- },
- [REGSET_TM_CDSCR] = {
- .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
- },
-#endif
-#ifdef CONFIG_PPC64
- [REGSET_PPR] = {
- .core_note_type = NT_PPC_PPR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = ppr_get, .set = ppr_set
- },
- [REGSET_DSCR] = {
- .core_note_type = NT_PPC_DSCR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = dscr_get, .set = dscr_set
- },
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- [REGSET_TAR] = {
- .core_note_type = NT_PPC_TAR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = tar_get, .set = tar_set
- },
- [REGSET_EBB] = {
- .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = ebb_active, .get = ebb_get, .set = ebb_set
- },
-#endif
-};
-
-static const struct user_regset_view user_ppc_compat_view = {
- .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
- .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
-};
-#endif /* CONFIG_PPC64 */
-
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
-#ifdef CONFIG_PPC64
- if (test_tsk_thread_flag(task, TIF_32BIT))
- return &user_ppc_compat_view;
-#endif
- return &user_ppc_native_view;
-}
-
-
-void user_enable_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
-
- if (regs != NULL) {
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- task->thread.debug.dbcr0 &= ~DBCR0_BT;
- task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
-#else
- regs->msr &= ~MSR_BE;
- regs->msr |= MSR_SE;
-#endif
- }
- set_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-void user_enable_block_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
-
- if (regs != NULL) {
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- task->thread.debug.dbcr0 &= ~DBCR0_IC;
- task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
- regs->msr |= MSR_DE;
-#else
- regs->msr &= ~MSR_SE;
- regs->msr |= MSR_BE;
-#endif
- }
- set_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-void user_disable_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
-
- if (regs != NULL) {
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- /*
- * The logic to disable single stepping should be as
- * simple as turning off the Instruction Complete flag.
- * And, after doing so, if all debug flags are off, turn
- * off DBCR0(IDM) and MSR(DE) .... Torez
- */
- task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
- /*
- * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
- */
- if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
- task->thread.debug.dbcr1)) {
- /*
- * All debug events were off.....
- */
- task->thread.debug.dbcr0 &= ~DBCR0_IDM;
- regs->msr &= ~MSR_DE;
- }
-#else
- regs->msr &= ~(MSR_SE | MSR_BE);
-#endif
- }
- clear_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-void ptrace_triggered(struct perf_event *bp,
- struct perf_sample_data *data, struct pt_regs *regs)
-{
- struct perf_event_attr attr;
-
- /*
- * Disable the breakpoint request here since ptrace has defined a
- * one-shot behaviour for breakpoint exceptions in PPC64.
- * The SIGTRAP signal is generated automatically for us in do_dabr().
- * We don't have to do anything about that here
- */
- attr = bp->attr;
- attr.disabled = true;
- modify_user_hw_breakpoint(bp, &attr);
-}
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
-static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
- unsigned long data)
-{
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- int ret;
- struct thread_struct *thread = &(task->thread);
- struct perf_event *bp;
- struct perf_event_attr attr;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- bool set_bp = true;
- struct arch_hw_breakpoint hw_brk;
-#endif
-
- /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
- * For embedded processors we support one DAC and no IAC's at the
- * moment.
- */
- if (addr > 0)
- return -EINVAL;
-
- /* The bottom 3 bits in dabr are flags */
- if ((data & ~0x7UL) >= TASK_SIZE)
- return -EIO;
-
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
- * It was assumed, on previous implementations, that 3 bits were
- * passed together with the data address, fitting the design of the
- * DABR register, as follows:
- *
- * bit 0: Read flag
- * bit 1: Write flag
- * bit 2: Breakpoint translation
- *
- * Thus, we use them here as so.
- */
-
- /* Ensure breakpoint translation bit is set */
- if (data && !(data & HW_BRK_TYPE_TRANSLATE))
- return -EIO;
- hw_brk.address = data & (~HW_BRK_TYPE_DABR);
- hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
- hw_brk.len = DABR_MAX_LEN;
- hw_brk.hw_len = DABR_MAX_LEN;
- set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- bp = thread->ptrace_bps[0];
- if (!set_bp) {
- if (bp) {
- unregister_hw_breakpoint(bp);
- thread->ptrace_bps[0] = NULL;
- }
- return 0;
- }
- if (bp) {
- attr = bp->attr;
- attr.bp_addr = hw_brk.address;
- attr.bp_len = DABR_MAX_LEN;
- arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
-
- /* Enable breakpoint */
- attr.disabled = false;
-
- ret = modify_user_hw_breakpoint(bp, &attr);
- if (ret) {
- return ret;
- }
- thread->ptrace_bps[0] = bp;
- thread->hw_brk = hw_brk;
- return 0;
- }
-
- /* Create a new breakpoint request if one doesn't exist already */
- hw_breakpoint_init(&attr);
- attr.bp_addr = hw_brk.address;
- attr.bp_len = DABR_MAX_LEN;
- arch_bp_generic_fields(hw_brk.type,
- &attr.bp_type);
-
- thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
- ptrace_triggered, NULL, task);
- if (IS_ERR(bp)) {
- thread->ptrace_bps[0] = NULL;
- return PTR_ERR(bp);
- }
-
-#else /* !CONFIG_HAVE_HW_BREAKPOINT */
- if (set_bp && (!ppc_breakpoint_available()))
- return -ENODEV;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
- task->thread.hw_brk = hw_brk;
-#else /* CONFIG_PPC_ADV_DEBUG_REGS */
- /* As described above, it was assumed 3 bits were passed with the data
- * address, but we will assume only the mode bits will be passed
- * as to not cause alignment restrictions for DAC-based processors.
- */
-
- /* DAC's hold the whole address without any mode flags */
- task->thread.debug.dac1 = data & ~0x3UL;
-
- if (task->thread.debug.dac1 == 0) {
- dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
- if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
- task->thread.debug.dbcr1)) {
- task->thread.regs->msr &= ~MSR_DE;
- task->thread.debug.dbcr0 &= ~DBCR0_IDM;
- }
- return 0;
- }
-
- /* Read or Write bits must be set */
-
- if (!(data & 0x3UL))
- return -EINVAL;
-
- /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
- register */
- task->thread.debug.dbcr0 |= DBCR0_IDM;
-
- /* Check for write and read flags and set DBCR0
- accordingly */
- dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
- if (data & 0x1UL)
- dbcr_dac(task) |= DBCR_DAC1R;
- if (data & 0x2UL)
- dbcr_dac(task) |= DBCR_DAC1W;
- task->thread.regs->msr |= MSR_DE;
-#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
- return 0;
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
-{
- /* make sure the single step bit is not set. */
- user_disable_single_step(child);
-}
-
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-static long set_instruction_bp(struct task_struct *child,
- struct ppc_hw_breakpoint *bp_info)
-{
- int slot;
- int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
- int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
- int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
- int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
-
- if (dbcr_iac_range(child) & DBCR_IAC12MODE)
- slot2_in_use = 1;
- if (dbcr_iac_range(child) & DBCR_IAC34MODE)
- slot4_in_use = 1;
-
- if (bp_info->addr >= TASK_SIZE)
- return -EIO;
-
- if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
-
- /* Make sure range is valid. */
- if (bp_info->addr2 >= TASK_SIZE)
- return -EIO;
-
- /* We need a pair of IAC regsisters */
- if ((!slot1_in_use) && (!slot2_in_use)) {
- slot = 1;
- child->thread.debug.iac1 = bp_info->addr;
- child->thread.debug.iac2 = bp_info->addr2;
- child->thread.debug.dbcr0 |= DBCR0_IAC1;
- if (bp_info->addr_mode ==
- PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
- dbcr_iac_range(child) |= DBCR_IAC12X;
- else
- dbcr_iac_range(child) |= DBCR_IAC12I;
-#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- } else if ((!slot3_in_use) && (!slot4_in_use)) {
- slot = 3;
- child->thread.debug.iac3 = bp_info->addr;
- child->thread.debug.iac4 = bp_info->addr2;
- child->thread.debug.dbcr0 |= DBCR0_IAC3;
- if (bp_info->addr_mode ==
- PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
- dbcr_iac_range(child) |= DBCR_IAC34X;
- else
- dbcr_iac_range(child) |= DBCR_IAC34I;
-#endif
- } else
- return -ENOSPC;
- } else {
- /* We only need one. If possible leave a pair free in
- * case a range is needed later
- */
- if (!slot1_in_use) {
- /*
- * Don't use iac1 if iac1-iac2 are free and either
- * iac3 or iac4 (but not both) are free
- */
- if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
- slot = 1;
- child->thread.debug.iac1 = bp_info->addr;
- child->thread.debug.dbcr0 |= DBCR0_IAC1;
- goto out;
- }
- }
- if (!slot2_in_use) {
- slot = 2;
- child->thread.debug.iac2 = bp_info->addr;
- child->thread.debug.dbcr0 |= DBCR0_IAC2;
-#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- } else if (!slot3_in_use) {
- slot = 3;
- child->thread.debug.iac3 = bp_info->addr;
- child->thread.debug.dbcr0 |= DBCR0_IAC3;
- } else if (!slot4_in_use) {
- slot = 4;
- child->thread.debug.iac4 = bp_info->addr;
- child->thread.debug.dbcr0 |= DBCR0_IAC4;
-#endif
- } else
- return -ENOSPC;
- }
-out:
- child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
-
- return slot;
-}
-
-static int del_instruction_bp(struct task_struct *child, int slot)
-{
- switch (slot) {
- case 1:
- if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
- return -ENOENT;
-
- if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
- /* address range - clear slots 1 & 2 */
- child->thread.debug.iac2 = 0;
- dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
- }
- child->thread.debug.iac1 = 0;
- child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
- break;
- case 2:
- if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
- return -ENOENT;
-
- if (dbcr_iac_range(child) & DBCR_IAC12MODE)
- /* used in a range */
- return -EINVAL;
- child->thread.debug.iac2 = 0;
- child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
- break;
-#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- case 3:
- if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
- return -ENOENT;
-
- if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
- /* address range - clear slots 3 & 4 */
- child->thread.debug.iac4 = 0;
- dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
- }
- child->thread.debug.iac3 = 0;
- child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
- break;
- case 4:
- if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
- return -ENOENT;
-
- if (dbcr_iac_range(child) & DBCR_IAC34MODE)
- /* Used in a range */
- return -EINVAL;
- child->thread.debug.iac4 = 0;
- child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
- break;
-#endif
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
-{
- int byte_enable =
- (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
- & 0xf;
- int condition_mode =
- bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
- int slot;
-
- if (byte_enable && (condition_mode == 0))
- return -EINVAL;
-
- if (bp_info->addr >= TASK_SIZE)
- return -EIO;
-
- if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
- slot = 1;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- dbcr_dac(child) |= DBCR_DAC1R;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- dbcr_dac(child) |= DBCR_DAC1W;
- child->thread.debug.dac1 = (unsigned long)bp_info->addr;
-#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- if (byte_enable) {
- child->thread.debug.dvc1 =
- (unsigned long)bp_info->condition_value;
- child->thread.debug.dbcr2 |=
- ((byte_enable << DBCR2_DVC1BE_SHIFT) |
- (condition_mode << DBCR2_DVC1M_SHIFT));
- }
-#endif
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
- /* Both dac1 and dac2 are part of a range */
- return -ENOSPC;
-#endif
- } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
- slot = 2;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- dbcr_dac(child) |= DBCR_DAC2R;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- dbcr_dac(child) |= DBCR_DAC2W;
- child->thread.debug.dac2 = (unsigned long)bp_info->addr;
-#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- if (byte_enable) {
- child->thread.debug.dvc2 =
- (unsigned long)bp_info->condition_value;
- child->thread.debug.dbcr2 |=
- ((byte_enable << DBCR2_DVC2BE_SHIFT) |
- (condition_mode << DBCR2_DVC2M_SHIFT));
- }
-#endif
- } else
- return -ENOSPC;
- child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
-
- return slot + 4;
-}
-
-static int del_dac(struct task_struct *child, int slot)
-{
- if (slot == 1) {
- if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
- return -ENOENT;
-
- child->thread.debug.dac1 = 0;
- dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
- child->thread.debug.dac2 = 0;
- child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
- }
- child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
-#endif
-#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- child->thread.debug.dvc1 = 0;
-#endif
- } else if (slot == 2) {
- if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
- return -ENOENT;
-
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
- /* Part of a range */
- return -EINVAL;
- child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
-#endif
-#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- child->thread.debug.dvc2 = 0;
-#endif
- child->thread.debug.dac2 = 0;
- dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
- } else
- return -EINVAL;
-
- return 0;
-}
-#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
-static int set_dac_range(struct task_struct *child,
- struct ppc_hw_breakpoint *bp_info)
-{
- int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
-
- /* We don't allow range watchpoints to be used with DVC */
- if (bp_info->condition_mode)
- return -EINVAL;
-
- /*
- * Best effort to verify the address range. The user/supervisor bits
- * prevent trapping in kernel space, but let's fail on an obvious bad
- * range. The simple test on the mask is not fool-proof, and any
- * exclusive range will spill over into kernel space.
- */
- if (bp_info->addr >= TASK_SIZE)
- return -EIO;
- if (mode == PPC_BREAKPOINT_MODE_MASK) {
- /*
- * dac2 is a bitmask. Don't allow a mask that makes a
- * kernel space address from a valid dac1 value
- */
- if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
- return -EIO;
- } else {
- /*
- * For range breakpoints, addr2 must also be a valid address
- */
- if (bp_info->addr2 >= TASK_SIZE)
- return -EIO;
- }
-
- if (child->thread.debug.dbcr0 &
- (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
- return -ENOSPC;
-
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
- child->thread.debug.dac1 = bp_info->addr;
- child->thread.debug.dac2 = bp_info->addr2;
- if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
- child->thread.debug.dbcr2 |= DBCR2_DAC12M;
- else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
- child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
- else /* PPC_BREAKPOINT_MODE_MASK */
- child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
- child->thread.regs->msr |= MSR_DE;
-
- return 5;
-}
-#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
-
-static long ppc_set_hwdebug(struct task_struct *child,
- struct ppc_hw_breakpoint *bp_info)
-{
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- int len = 0;
- struct thread_struct *thread = &(child->thread);
- struct perf_event *bp;
- struct perf_event_attr attr;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- struct arch_hw_breakpoint brk;
-#endif
-
- if (bp_info->version != 1)
- return -ENOTSUPP;
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- /*
- * Check for invalid flags and combinations
- */
- if ((bp_info->trigger_type == 0) ||
- (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
- PPC_BREAKPOINT_TRIGGER_RW)) ||
- (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
- (bp_info->condition_mode &
- ~(PPC_BREAKPOINT_CONDITION_MODE |
- PPC_BREAKPOINT_CONDITION_BE_ALL)))
- return -EINVAL;
-#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
- if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
- return -EINVAL;
-#endif
-
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
- if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
- (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
- return -EINVAL;
- return set_instruction_bp(child, bp_info);
- }
- if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
- return set_dac(child, bp_info);
-
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- return set_dac_range(child, bp_info);
-#else
- return -EINVAL;
-#endif
-#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
- /*
- * We only support one data breakpoint
- */
- if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
- (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
- bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
- return -EINVAL;
-
- if ((unsigned long)bp_info->addr >= TASK_SIZE)
- return -EIO;
-
- brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN;
- brk.type = HW_BRK_TYPE_TRANSLATE;
- brk.len = DABR_MAX_LEN;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- brk.type |= HW_BRK_TYPE_READ;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- brk.type |= HW_BRK_TYPE_WRITE;
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
- len = bp_info->addr2 - bp_info->addr;
- else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
- len = 1;
- else
- return -EINVAL;
- bp = thread->ptrace_bps[0];
- if (bp)
- return -ENOSPC;
-
- /* Create a new breakpoint request if one doesn't exist already */
- hw_breakpoint_init(&attr);
- attr.bp_addr = (unsigned long)bp_info->addr;
- attr.bp_len = len;
- arch_bp_generic_fields(brk.type, &attr.bp_type);
-
- thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
- ptrace_triggered, NULL, child);
- if (IS_ERR(bp)) {
- thread->ptrace_bps[0] = NULL;
- return PTR_ERR(bp);
- }
-
- return 1;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
- if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
- return -EINVAL;
-
- if (child->thread.hw_brk.address)
- return -ENOSPC;
-
- if (!ppc_breakpoint_available())
- return -ENODEV;
-
- child->thread.hw_brk = brk;
-
- return 1;
-#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
-}
-
-static long ppc_del_hwdebug(struct task_struct *child, long data)
-{
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- int ret = 0;
- struct thread_struct *thread = &(child->thread);
- struct perf_event *bp;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- int rc;
-
- if (data <= 4)
- rc = del_instruction_bp(child, (int)data);
- else
- rc = del_dac(child, (int)data - 4);
-
- if (!rc) {
- if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
- child->thread.debug.dbcr1)) {
- child->thread.debug.dbcr0 &= ~DBCR0_IDM;
- child->thread.regs->msr &= ~MSR_DE;
- }
- }
- return rc;
-#else
- if (data != 1)
- return -EINVAL;
-
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- bp = thread->ptrace_bps[0];
- if (bp) {
- unregister_hw_breakpoint(bp);
- thread->ptrace_bps[0] = NULL;
- } else
- ret = -ENOENT;
- return ret;
-#else /* CONFIG_HAVE_HW_BREAKPOINT */
- if (child->thread.hw_brk.address == 0)
- return -ENOENT;
-
- child->thread.hw_brk.address = 0;
- child->thread.hw_brk.type = 0;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
- return 0;
-#endif
-}
-
-long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- int ret = -EPERM;
- void __user *datavp = (void __user *) data;
- unsigned long __user *datalp = datavp;
-
- switch (request) {
- /* read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR: {
- unsigned long index, tmp;
-
- ret = -EIO;
- /* convert to index and check */
-#ifdef CONFIG_PPC32
- index = addr >> 2;
- if ((addr & 3) || (index > PT_FPSCR)
- || (child->thread.regs == NULL))
-#else
- index = addr >> 3;
- if ((addr & 7) || (index > PT_FPSCR))
-#endif
- break;
-
- CHECK_FULL_REGS(child->thread.regs);
- if (index < PT_FPR0) {
- ret = ptrace_get_reg(child, (int) index, &tmp);
- if (ret)
- break;
- } else {
- unsigned int fpidx = index - PT_FPR0;
-
- flush_fp_to_thread(child);
- if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&tmp, &child->thread.TS_FPR(fpidx),
- sizeof(long));
- else
- tmp = child->thread.fp_state.fpscr;
- }
- ret = put_user(tmp, datalp);
- break;
- }
-
- /* write the word at location addr in the USER area */
- case PTRACE_POKEUSR: {
- unsigned long index;
-
- ret = -EIO;
- /* convert to index and check */
-#ifdef CONFIG_PPC32
- index = addr >> 2;
- if ((addr & 3) || (index > PT_FPSCR)
- || (child->thread.regs == NULL))
-#else
- index = addr >> 3;
- if ((addr & 7) || (index > PT_FPSCR))
-#endif
- break;
-
- CHECK_FULL_REGS(child->thread.regs);
- if (index < PT_FPR0) {
- ret = ptrace_put_reg(child, index, data);
- } else {
- unsigned int fpidx = index - PT_FPR0;
-
- flush_fp_to_thread(child);
- if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&child->thread.TS_FPR(fpidx), &data,
- sizeof(long));
- else
- child->thread.fp_state.fpscr = data;
- ret = 0;
- }
- break;
- }
-
- case PPC_PTRACE_GETHWDBGINFO: {
- struct ppc_debug_info dbginfo;
-
- dbginfo.version = 1;
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
- dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
- dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
- dbginfo.data_bp_alignment = 4;
- dbginfo.sizeof_condition = 4;
- dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
- PPC_DEBUG_FEATURE_INSN_BP_MASK;
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- dbginfo.features |=
- PPC_DEBUG_FEATURE_DATA_BP_RANGE |
- PPC_DEBUG_FEATURE_DATA_BP_MASK;
-#endif
-#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
- dbginfo.num_instruction_bps = 0;
- if (ppc_breakpoint_available())
- dbginfo.num_data_bps = 1;
- else
- dbginfo.num_data_bps = 0;
- dbginfo.num_condition_regs = 0;
-#ifdef CONFIG_PPC64
- dbginfo.data_bp_alignment = 8;
-#else
- dbginfo.data_bp_alignment = 4;
-#endif
- dbginfo.sizeof_condition = 0;
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
- if (dawr_enabled())
- dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
-#else
- dbginfo.features = 0;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-
- if (copy_to_user(datavp, &dbginfo,
- sizeof(struct ppc_debug_info)))
- return -EFAULT;
- return 0;
- }
-
- case PPC_PTRACE_SETHWDEBUG: {
- struct ppc_hw_breakpoint bp_info;
-
- if (copy_from_user(&bp_info, datavp,
- sizeof(struct ppc_hw_breakpoint)))
- return -EFAULT;
- return ppc_set_hwdebug(child, &bp_info);
- }
-
- case PPC_PTRACE_DELHWDEBUG: {
- ret = ppc_del_hwdebug(child, data);
- break;
- }
-
- case PTRACE_GET_DEBUGREG: {
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- unsigned long dabr_fake;
-#endif
- ret = -EINVAL;
- /* We only support one DABR and no IABRS at the moment */
- if (addr > 0)
- break;
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- ret = put_user(child->thread.debug.dac1, datalp);
-#else
- dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
- (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
- ret = put_user(dabr_fake, datalp);
-#endif
- break;
- }
-
- case PTRACE_SET_DEBUGREG:
- ret = ptrace_set_debugreg(child, addr, data);
- break;
-
-#ifdef CONFIG_PPC64
- case PTRACE_GETREGS64:
-#endif
- case PTRACE_GETREGS: /* Get all pt_regs from the child. */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_GPR,
- 0, sizeof(struct user_pt_regs),
- datavp);
-
-#ifdef CONFIG_PPC64
- case PTRACE_SETREGS64:
-#endif
- case PTRACE_SETREGS: /* Set all gp regs in the child. */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_GPR,
- 0, sizeof(struct user_pt_regs),
- datavp);
-
- case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_FPR,
- 0, sizeof(elf_fpregset_t),
- datavp);
-
- case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_FPR,
- 0, sizeof(elf_fpregset_t),
- datavp);
-
-#ifdef CONFIG_ALTIVEC
- case PTRACE_GETVRREGS:
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_VMX,
- 0, (33 * sizeof(vector128) +
- sizeof(u32)),
- datavp);
-
- case PTRACE_SETVRREGS:
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_VMX,
- 0, (33 * sizeof(vector128) +
- sizeof(u32)),
- datavp);
-#endif
-#ifdef CONFIG_VSX
- case PTRACE_GETVSRREGS:
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_VSX,
- 0, 32 * sizeof(double),
- datavp);
-
- case PTRACE_SETVSRREGS:
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_VSX,
- 0, 32 * sizeof(double),
- datavp);
-#endif
-#ifdef CONFIG_SPE
- case PTRACE_GETEVRREGS:
- /* Get the child spe register state. */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_SPE, 0, 35 * sizeof(u32),
- datavp);
-
- case PTRACE_SETEVRREGS:
- /* Set the child spe register state. */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_SPE, 0, 35 * sizeof(u32),
- datavp);
-#endif
-
- default:
- ret = ptrace_request(child, request, addr, data);
- break;
- }
- return ret;
-}
-
-#ifdef CONFIG_SECCOMP
-static int do_seccomp(struct pt_regs *regs)
-{
- if (!test_thread_flag(TIF_SECCOMP))
- return 0;
-
- /*
- * The ABI we present to seccomp tracers is that r3 contains
- * the syscall return value and orig_gpr3 contains the first
- * syscall parameter. This is different to the ptrace ABI where
- * both r3 and orig_gpr3 contain the first syscall parameter.
- */
- regs->gpr[3] = -ENOSYS;
-
- /*
- * We use the __ version here because we have already checked
- * TIF_SECCOMP. If this fails, there is nothing left to do, we
- * have already loaded -ENOSYS into r3, or seccomp has put
- * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
- */
- if (__secure_computing(NULL))
- return -1;
-
- /*
- * The syscall was allowed by seccomp, restore the register
- * state to what audit expects.
- * Note that we use orig_gpr3, which means a seccomp tracer can
- * modify the first syscall parameter (in orig_gpr3) and also
- * allow the syscall to proceed.
- */
- regs->gpr[3] = regs->orig_gpr3;
-
- return 0;
-}
-#else
-static inline int do_seccomp(struct pt_regs *regs) { return 0; }
-#endif /* CONFIG_SECCOMP */
-
-/**
- * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
- * @regs: the pt_regs of the task to trace (current)
- *
- * Performs various types of tracing on syscall entry. This includes seccomp,
- * ptrace, syscall tracepoints and audit.
- *
- * The pt_regs are potentially visible to userspace via ptrace, so their
- * contents is ABI.
- *
- * One or more of the tracers may modify the contents of pt_regs, in particular
- * to modify arguments or even the syscall number itself.
- *
- * It's also possible that a tracer can choose to reject the system call. In
- * that case this function will return an illegal syscall number, and will put
- * an appropriate return value in regs->r3.
- *
- * Return: the (possibly changed) syscall number.
- */
-long do_syscall_trace_enter(struct pt_regs *regs)
-{
- u32 flags;
-
- user_exit();
-
- flags = READ_ONCE(current_thread_info()->flags) &
- (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
-
- if (flags) {
- int rc = tracehook_report_syscall_entry(regs);
-
- if (unlikely(flags & _TIF_SYSCALL_EMU)) {
- /*
- * A nonzero return code from
- * tracehook_report_syscall_entry() tells us to prevent
- * the syscall execution, but we are not going to
- * execute it anyway.
- *
- * Returning -1 will skip the syscall execution. We want
- * to avoid clobbering any registers, so we don't goto
- * the skip label below.
- */
- return -1;
- }
-
- if (rc) {
- /*
- * The tracer decided to abort the syscall. Note that
- * the tracer may also just change regs->gpr[0] to an
- * invalid syscall number, that is handled below on the
- * exit path.
- */
- goto skip;
- }
- }
-
- /* Run seccomp after ptrace; allow it to set gpr[3]. */
- if (do_seccomp(regs))
- return -1;
-
- /* Avoid trace and audit when syscall is invalid. */
- if (regs->gpr[0] >= NR_syscalls)
- goto skip;
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_enter(regs, regs->gpr[0]);
-
-#ifdef CONFIG_PPC64
- if (!is_32bit_task())
- audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
- regs->gpr[5], regs->gpr[6]);
- else
-#endif
- audit_syscall_entry(regs->gpr[0],
- regs->gpr[3] & 0xffffffff,
- regs->gpr[4] & 0xffffffff,
- regs->gpr[5] & 0xffffffff,
- regs->gpr[6] & 0xffffffff);
-
- /* Return the possibly modified but valid syscall number */
- return regs->gpr[0];
-
-skip:
- /*
- * If we are aborting explicitly, or if the syscall number is
- * now invalid, set the return value to -ENOSYS.
- */
- regs->gpr[3] = -ENOSYS;
- return -1;
-}
-
-void do_syscall_trace_leave(struct pt_regs *regs)
-{
- int step;
-
- audit_syscall_exit(regs);
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->result);
-
- step = test_thread_flag(TIF_SINGLESTEP);
- if (step || test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, step);
-
- user_enter();
-}
-
-void __init pt_regs_check(void);
-
-/*
- * Dummy function, its purpose is to break the build if struct pt_regs and
- * struct user_pt_regs don't match.
- */
-void __init pt_regs_check(void)
-{
- BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
- offsetof(struct user_pt_regs, gpr));
- BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
- offsetof(struct user_pt_regs, nip));
- BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
- offsetof(struct user_pt_regs, msr));
- BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
- offsetof(struct user_pt_regs, msr));
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct user_pt_regs, orig_gpr3));
- BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
- offsetof(struct user_pt_regs, ctr));
- BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
- offsetof(struct user_pt_regs, link));
- BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
- offsetof(struct user_pt_regs, xer));
- BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
- offsetof(struct user_pt_regs, ccr));
-#ifdef __powerpc64__
- BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
- offsetof(struct user_pt_regs, softe));
-#else
- BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
- offsetof(struct user_pt_regs, mq));
-#endif
- BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
- offsetof(struct user_pt_regs, trap));
- BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
- offsetof(struct user_pt_regs, dar));
- BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
- offsetof(struct user_pt_regs, dsisr));
- BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
- offsetof(struct user_pt_regs, result));
-
- BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
-
- // Now check that the pt_regs offsets match the uapi #defines
- #define CHECK_REG(_pt, _reg) \
- BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
- sizeof(unsigned long)));
-
- CHECK_REG(PT_R0, gpr[0]);
- CHECK_REG(PT_R1, gpr[1]);
- CHECK_REG(PT_R2, gpr[2]);
- CHECK_REG(PT_R3, gpr[3]);
- CHECK_REG(PT_R4, gpr[4]);
- CHECK_REG(PT_R5, gpr[5]);
- CHECK_REG(PT_R6, gpr[6]);
- CHECK_REG(PT_R7, gpr[7]);
- CHECK_REG(PT_R8, gpr[8]);
- CHECK_REG(PT_R9, gpr[9]);
- CHECK_REG(PT_R10, gpr[10]);
- CHECK_REG(PT_R11, gpr[11]);
- CHECK_REG(PT_R12, gpr[12]);
- CHECK_REG(PT_R13, gpr[13]);
- CHECK_REG(PT_R14, gpr[14]);
- CHECK_REG(PT_R15, gpr[15]);
- CHECK_REG(PT_R16, gpr[16]);
- CHECK_REG(PT_R17, gpr[17]);
- CHECK_REG(PT_R18, gpr[18]);
- CHECK_REG(PT_R19, gpr[19]);
- CHECK_REG(PT_R20, gpr[20]);
- CHECK_REG(PT_R21, gpr[21]);
- CHECK_REG(PT_R22, gpr[22]);
- CHECK_REG(PT_R23, gpr[23]);
- CHECK_REG(PT_R24, gpr[24]);
- CHECK_REG(PT_R25, gpr[25]);
- CHECK_REG(PT_R26, gpr[26]);
- CHECK_REG(PT_R27, gpr[27]);
- CHECK_REG(PT_R28, gpr[28]);
- CHECK_REG(PT_R29, gpr[29]);
- CHECK_REG(PT_R30, gpr[30]);
- CHECK_REG(PT_R31, gpr[31]);
- CHECK_REG(PT_NIP, nip);
- CHECK_REG(PT_MSR, msr);
- CHECK_REG(PT_ORIG_R3, orig_gpr3);
- CHECK_REG(PT_CTR, ctr);
- CHECK_REG(PT_LNK, link);
- CHECK_REG(PT_XER, xer);
- CHECK_REG(PT_CCR, ccr);
-#ifdef CONFIG_PPC64
- CHECK_REG(PT_SOFTE, softe);
-#else
- CHECK_REG(PT_MQ, mq);
-#endif
- CHECK_REG(PT_TRAP, trap);
- CHECK_REG(PT_DAR, dar);
- CHECK_REG(PT_DSISR, dsisr);
- CHECK_REG(PT_RESULT, result);
- #undef CHECK_REG
-
- BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
-
- /*
- * PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
- * real registers.
- */
- BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
-}
diff --git a/arch/powerpc/kernel/ptrace/Makefile b/arch/powerpc/kernel/ptrace/Makefile
new file mode 100644
index 000000000000..e9d97c2d063e
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux kernel.
+#
+
+CFLAGS_ptrace-view.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
+obj-y += ptrace.o ptrace-view.o
+obj-$(CONFIG_PPC64) += ptrace32.o
+obj-$(CONFIG_VSX) += ptrace-vsx.o
+ifneq ($(CONFIG_VSX),y)
+obj-y += ptrace-novsx.o
+endif
+obj-$(CONFIG_ALTIVEC) += ptrace-altivec.o
+obj-$(CONFIG_SPE) += ptrace-spe.o
+obj-$(CONFIG_PPC_TRANSACTIONAL_MEM) += ptrace-tm.o
+obj-$(CONFIG_PPC_ADV_DEBUG_REGS) += ptrace-adv.o
+ifneq ($(CONFIG_PPC_ADV_DEBUG_REGS),y)
+obj-y += ptrace-noadv.o
+endif
diff --git a/arch/powerpc/kernel/ptrace/ptrace-adv.c b/arch/powerpc/kernel/ptrace/ptrace-adv.c
new file mode 100644
index 000000000000..3990c01ef8cf
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-adv.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
+
+#include "ptrace-decl.h"
+
+void user_enable_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+ task->thread.debug.dbcr0 &= ~DBCR0_BT;
+ task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+ regs->msr |= MSR_DE;
+ }
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void user_enable_block_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+ task->thread.debug.dbcr0 &= ~DBCR0_IC;
+ task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
+ regs->msr |= MSR_DE;
+ }
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+ /*
+ * The logic to disable single stepping should be as
+ * simple as turning off the Instruction Complete flag.
+ * And, after doing so, if all debug flags are off, turn
+ * off DBCR0(IDM) and MSR(DE) .... Torez
+ */
+ task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
+ /*
+ * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
+ */
+ if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
+ task->thread.debug.dbcr1)) {
+ /*
+ * All debug events were off.....
+ */
+ task->thread.debug.dbcr0 &= ~DBCR0_IDM;
+ regs->msr &= ~MSR_DE;
+ }
+ }
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
+{
+ dbginfo->version = 1;
+ dbginfo->num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
+ dbginfo->num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
+ dbginfo->num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
+ dbginfo->data_bp_alignment = 4;
+ dbginfo->sizeof_condition = 4;
+ dbginfo->features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
+ PPC_DEBUG_FEATURE_INSN_BP_MASK;
+ if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_DAC_RANGE))
+ dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_RANGE |
+ PPC_DEBUG_FEATURE_DATA_BP_MASK;
+}
+
+int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
+ unsigned long __user *datalp)
+{
+ /* We only support one DABR and no IABRS at the moment */
+ if (addr > 0)
+ return -EINVAL;
+ return put_user(child->thread.debug.dac1, datalp);
+}
+
+int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret;
+ struct thread_struct *thread = &task->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
+ * For embedded processors we support one DAC and no IAC's at the
+ * moment.
+ */
+ if (addr > 0)
+ return -EINVAL;
+
+ /* The bottom 3 bits in dabr are flags */
+ if ((data & ~0x7UL) >= TASK_SIZE)
+ return -EIO;
+
+ /* As described above, it was assumed 3 bits were passed with the data
+ * address, but we will assume only the mode bits will be passed
+ * as to not cause alignment restrictions for DAC-based processors.
+ */
+
+ /* DAC's hold the whole address without any mode flags */
+ task->thread.debug.dac1 = data & ~0x3UL;
+
+ if (task->thread.debug.dac1 == 0) {
+ dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+ if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
+ task->thread.debug.dbcr1)) {
+ task->thread.regs->msr &= ~MSR_DE;
+ task->thread.debug.dbcr0 &= ~DBCR0_IDM;
+ }
+ return 0;
+ }
+
+ /* Read or Write bits must be set */
+
+ if (!(data & 0x3UL))
+ return -EINVAL;
+
+ /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 register */
+ task->thread.debug.dbcr0 |= DBCR0_IDM;
+
+ /* Check for write and read flags and set DBCR0 accordingly */
+ dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+ if (data & 0x1UL)
+ dbcr_dac(task) |= DBCR_DAC1R;
+ if (data & 0x2UL)
+ dbcr_dac(task) |= DBCR_DAC1W;
+ task->thread.regs->msr |= MSR_DE;
+ return 0;
+}
+
+static long set_instruction_bp(struct task_struct *child,
+ struct ppc_hw_breakpoint *bp_info)
+{
+ int slot;
+ int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
+ int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
+ int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
+ int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
+
+ if (dbcr_iac_range(child) & DBCR_IAC12MODE)
+ slot2_in_use = 1;
+ if (dbcr_iac_range(child) & DBCR_IAC34MODE)
+ slot4_in_use = 1;
+
+ if (bp_info->addr >= TASK_SIZE)
+ return -EIO;
+
+ if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+ /* Make sure range is valid. */
+ if (bp_info->addr2 >= TASK_SIZE)
+ return -EIO;
+
+ /* We need a pair of IAC regsisters */
+ if (!slot1_in_use && !slot2_in_use) {
+ slot = 1;
+ child->thread.debug.iac1 = bp_info->addr;
+ child->thread.debug.iac2 = bp_info->addr2;
+ child->thread.debug.dbcr0 |= DBCR0_IAC1;
+ if (bp_info->addr_mode ==
+ PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+ dbcr_iac_range(child) |= DBCR_IAC12X;
+ else
+ dbcr_iac_range(child) |= DBCR_IAC12I;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ } else if ((!slot3_in_use) && (!slot4_in_use)) {
+ slot = 3;
+ child->thread.debug.iac3 = bp_info->addr;
+ child->thread.debug.iac4 = bp_info->addr2;
+ child->thread.debug.dbcr0 |= DBCR0_IAC3;
+ if (bp_info->addr_mode ==
+ PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+ dbcr_iac_range(child) |= DBCR_IAC34X;
+ else
+ dbcr_iac_range(child) |= DBCR_IAC34I;
+#endif
+ } else {
+ return -ENOSPC;
+ }
+ } else {
+ /* We only need one. If possible leave a pair free in
+ * case a range is needed later
+ */
+ if (!slot1_in_use) {
+ /*
+ * Don't use iac1 if iac1-iac2 are free and either
+ * iac3 or iac4 (but not both) are free
+ */
+ if (slot2_in_use || slot3_in_use == slot4_in_use) {
+ slot = 1;
+ child->thread.debug.iac1 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC1;
+ goto out;
+ }
+ }
+ if (!slot2_in_use) {
+ slot = 2;
+ child->thread.debug.iac2 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC2;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ } else if (!slot3_in_use) {
+ slot = 3;
+ child->thread.debug.iac3 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC3;
+ } else if (!slot4_in_use) {
+ slot = 4;
+ child->thread.debug.iac4 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC4;
+#endif
+ } else {
+ return -ENOSPC;
+ }
+ }
+out:
+ child->thread.debug.dbcr0 |= DBCR0_IDM;
+ child->thread.regs->msr |= MSR_DE;
+
+ return slot;
+}
+
+static int del_instruction_bp(struct task_struct *child, int slot)
+{
+ switch (slot) {
+ case 1:
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
+ return -ENOENT;
+
+ if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
+ /* address range - clear slots 1 & 2 */
+ child->thread.debug.iac2 = 0;
+ dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
+ }
+ child->thread.debug.iac1 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
+ break;
+ case 2:
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
+ return -ENOENT;
+
+ if (dbcr_iac_range(child) & DBCR_IAC12MODE)
+ /* used in a range */
+ return -EINVAL;
+ child->thread.debug.iac2 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
+ break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ case 3:
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
+ return -ENOENT;
+
+ if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
+ /* address range - clear slots 3 & 4 */
+ child->thread.debug.iac4 = 0;
+ dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
+ }
+ child->thread.debug.iac3 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
+ break;
+ case 4:
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
+ return -ENOENT;
+
+ if (dbcr_iac_range(child) & DBCR_IAC34MODE)
+ /* Used in a range */
+ return -EINVAL;
+ child->thread.debug.iac4 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
+{
+ int byte_enable =
+ (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
+ & 0xf;
+ int condition_mode =
+ bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
+ int slot;
+
+ if (byte_enable && condition_mode == 0)
+ return -EINVAL;
+
+ if (bp_info->addr >= TASK_SIZE)
+ return -EIO;
+
+ if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
+ slot = 1;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ dbcr_dac(child) |= DBCR_DAC1R;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ dbcr_dac(child) |= DBCR_DAC1W;
+ child->thread.debug.dac1 = (unsigned long)bp_info->addr;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ if (byte_enable) {
+ child->thread.debug.dvc1 =
+ (unsigned long)bp_info->condition_value;
+ child->thread.debug.dbcr2 |=
+ ((byte_enable << DBCR2_DVC1BE_SHIFT) |
+ (condition_mode << DBCR2_DVC1M_SHIFT));
+ }
+#endif
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+ } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
+ /* Both dac1 and dac2 are part of a range */
+ return -ENOSPC;
+#endif
+ } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
+ slot = 2;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ dbcr_dac(child) |= DBCR_DAC2R;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ dbcr_dac(child) |= DBCR_DAC2W;
+ child->thread.debug.dac2 = (unsigned long)bp_info->addr;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ if (byte_enable) {
+ child->thread.debug.dvc2 =
+ (unsigned long)bp_info->condition_value;
+ child->thread.debug.dbcr2 |=
+ ((byte_enable << DBCR2_DVC2BE_SHIFT) |
+ (condition_mode << DBCR2_DVC2M_SHIFT));
+ }
+#endif
+ } else {
+ return -ENOSPC;
+ }
+ child->thread.debug.dbcr0 |= DBCR0_IDM;
+ child->thread.regs->msr |= MSR_DE;
+
+ return slot + 4;
+}
+
+static int del_dac(struct task_struct *child, int slot)
+{
+ if (slot == 1) {
+ if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
+ return -ENOENT;
+
+ child->thread.debug.dac1 = 0;
+ dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+ if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
+ child->thread.debug.dac2 = 0;
+ child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
+ }
+ child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
+#endif
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ child->thread.debug.dvc1 = 0;
+#endif
+ } else if (slot == 2) {
+ if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
+ return -ENOENT;
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+ if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
+ /* Part of a range */
+ return -EINVAL;
+ child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
+#endif
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ child->thread.debug.dvc2 = 0;
+#endif
+ child->thread.debug.dac2 = 0;
+ dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+static int set_dac_range(struct task_struct *child,
+ struct ppc_hw_breakpoint *bp_info)
+{
+ int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
+
+ /* We don't allow range watchpoints to be used with DVC */
+ if (bp_info->condition_mode)
+ return -EINVAL;
+
+ /*
+ * Best effort to verify the address range. The user/supervisor bits
+ * prevent trapping in kernel space, but let's fail on an obvious bad
+ * range. The simple test on the mask is not fool-proof, and any
+ * exclusive range will spill over into kernel space.
+ */
+ if (bp_info->addr >= TASK_SIZE)
+ return -EIO;
+ if (mode == PPC_BREAKPOINT_MODE_MASK) {
+ /*
+ * dac2 is a bitmask. Don't allow a mask that makes a
+ * kernel space address from a valid dac1 value
+ */
+ if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
+ return -EIO;
+ } else {
+ /*
+ * For range breakpoints, addr2 must also be a valid address
+ */
+ if (bp_info->addr2 >= TASK_SIZE)
+ return -EIO;
+ }
+
+ if (child->thread.debug.dbcr0 &
+ (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
+ return -ENOSPC;
+
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
+ child->thread.debug.dac1 = bp_info->addr;
+ child->thread.debug.dac2 = bp_info->addr2;
+ if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
+ child->thread.debug.dbcr2 |= DBCR2_DAC12M;
+ else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+ child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
+ else /* PPC_BREAKPOINT_MODE_MASK */
+ child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
+ child->thread.regs->msr |= MSR_DE;
+
+ return 5;
+}
+#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
+
+long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
+{
+ if (bp_info->version != 1)
+ return -ENOTSUPP;
+ /*
+ * Check for invalid flags and combinations
+ */
+ if (bp_info->trigger_type == 0 ||
+ (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
+ PPC_BREAKPOINT_TRIGGER_RW)) ||
+ (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
+ (bp_info->condition_mode &
+ ~(PPC_BREAKPOINT_CONDITION_MODE |
+ PPC_BREAKPOINT_CONDITION_BE_ALL)))
+ return -EINVAL;
+#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
+ if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
+ return -EINVAL;
+#endif
+
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
+ if (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE ||
+ bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
+ return -EINVAL;
+ return set_instruction_bp(child, bp_info);
+ }
+ if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+ return set_dac(child, bp_info);
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+ return set_dac_range(child, bp_info);
+#else
+ return -EINVAL;
+#endif
+}
+
+long ppc_del_hwdebug(struct task_struct *child, long data)
+{
+ int rc;
+
+ if (data <= 4)
+ rc = del_instruction_bp(child, (int)data);
+ else
+ rc = del_dac(child, (int)data - 4);
+
+ if (!rc) {
+ if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
+ child->thread.debug.dbcr1)) {
+ child->thread.debug.dbcr0 &= ~DBCR0_IDM;
+ child->thread.regs->msr &= ~MSR_DE;
+ }
+ }
+ return rc;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-altivec.c b/arch/powerpc/kernel/ptrace/ptrace-altivec.c
new file mode 100644
index 000000000000..dd8b75dfbd06
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-altivec.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+#include <linux/elf.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword. Quadwords 0-31 contain the
+ * corresponding vector registers. Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword. Quadword 33 contains the
+ * vrsave as the first word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the
+ * same structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ */
+
+int vr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ flush_altivec_to_thread(target);
+ return target->thread.used_vr ? regset->n : 0;
+}
+
+/*
+ * Regardless of transactions, 'vr_state' holds the current running
+ * value of all the VMX registers and 'ckvr_state' holds the last
+ * checkpointed value of all the VMX registers for the current
+ * transaction to fall back on in case it aborts.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ * };
+ */
+int vr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ flush_altivec_to_thread(target);
+
+ BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+ offsetof(struct thread_vr_state, vr[32]));
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.vr_state, 0,
+ 33 * sizeof(vector128));
+ if (!ret) {
+ /*
+ * Copy out only the low-order word of vrsave.
+ */
+ int start, end;
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+
+ vrsave.word = target->thread.vrsave;
+
+ start = 33 * sizeof(vector128);
+ end = start + sizeof(vrsave);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
+ start, end);
+ }
+
+ return ret;
+}
+
+/*
+ * Regardless of transactions, 'vr_state' holds the current running
+ * value of all the VMX registers and 'ckvr_state' holds the last
+ * checkpointed value of all the VMX registers for the current
+ * transaction to fall back on in case it aborts.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ * };
+ */
+int vr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ flush_altivec_to_thread(target);
+
+ BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+ offsetof(struct thread_vr_state, vr[32]));
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.vr_state, 0,
+ 33 * sizeof(vector128));
+ if (!ret && count > 0) {
+ /*
+ * We use only the first word of vrsave.
+ */
+ int start, end;
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+
+ vrsave.word = target->thread.vrsave;
+
+ start = 33 * sizeof(vector128);
+ end = start + sizeof(vrsave);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
+ start, end);
+ if (!ret)
+ target->thread.vrsave = vrsave.word;
+ }
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-decl.h b/arch/powerpc/kernel/ptrace/ptrace-decl.h
new file mode 100644
index 000000000000..3c8a81999292
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-decl.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/*
+ * Set of msr bits that gdb can change on behalf of a process.
+ */
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+#define MSR_DEBUGCHANGE 0
+#else
+#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
+#endif
+
+/*
+ * Max register writeable via put_reg
+ */
+#ifdef CONFIG_PPC32
+#define PT_MAX_PUT_REG PT_MQ
+#else
+#define PT_MAX_PUT_REG PT_CCR
+#endif
+
+#define TVSO(f) (offsetof(struct thread_vr_state, f))
+#define TFSO(f) (offsetof(struct thread_fp_state, f))
+#define TSO(f) (offsetof(struct thread_struct, f))
+
+/*
+ * These are our native regset flavors.
+ */
+enum powerpc_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+#ifdef CONFIG_ALTIVEC
+ REGSET_VMX,
+#endif
+#ifdef CONFIG_VSX
+ REGSET_VSX,
+#endif
+#ifdef CONFIG_SPE
+ REGSET_SPE,
+#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ REGSET_TM_CGPR, /* TM checkpointed GPR registers */
+ REGSET_TM_CFPR, /* TM checkpointed FPR registers */
+ REGSET_TM_CVMX, /* TM checkpointed VMX registers */
+ REGSET_TM_CVSX, /* TM checkpointed VSX registers */
+ REGSET_TM_SPR, /* TM specific SPR registers */
+ REGSET_TM_CTAR, /* TM checkpointed TAR register */
+ REGSET_TM_CPPR, /* TM checkpointed PPR register */
+ REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
+#endif
+#ifdef CONFIG_PPC64
+ REGSET_PPR, /* PPR register */
+ REGSET_DSCR, /* DSCR register */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ REGSET_TAR, /* TAR register */
+ REGSET_EBB, /* EBB registers */
+ REGSET_PMR, /* Performance Monitor Registers */
+#endif
+#ifdef CONFIG_PPC_MEM_KEYS
+ REGSET_PKEY, /* AMR register */
+#endif
+};
+
+/* ptrace-(no)vsx */
+
+int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace-vsx */
+
+int vsr_active(struct task_struct *target, const struct user_regset *regset);
+int vsr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int vsr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace-altivec */
+
+int vr_active(struct task_struct *target, const struct user_regset *regset);
+int vr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int vr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace-spe */
+
+int evr_active(struct task_struct *target, const struct user_regset *regset);
+int evr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int evr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace */
+
+int gpr32_get_common(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf,
+ unsigned long *regs);
+int gpr32_set_common(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ unsigned long *regs);
+
+/* ptrace-tm */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void flush_tmregs_to_thread(struct task_struct *tsk);
+#else
+static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
+#endif
+
+int tm_cgpr_active(struct task_struct *target, const struct user_regset *regset);
+int tm_cgpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_cgpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_cfpr_active(struct task_struct *target, const struct user_regset *regset);
+int tm_cfpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_cfpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_cvmx_active(struct task_struct *target, const struct user_regset *regset);
+int tm_cvmx_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_cvmx_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_cvsx_active(struct task_struct *target, const struct user_regset *regset);
+int tm_cvsx_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_cvsx_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_spr_active(struct task_struct *target, const struct user_regset *regset);
+int tm_spr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_spr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_tar_active(struct task_struct *target, const struct user_regset *regset);
+int tm_tar_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_tar_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_ppr_active(struct task_struct *target, const struct user_regset *regset);
+int tm_ppr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_ppr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_dscr_active(struct task_struct *target, const struct user_regset *regset);
+int tm_dscr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_dscr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_cgpr32_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf);
+int tm_cgpr32_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace-view */
+
+extern const struct user_regset_view user_ppc_native_view;
+
+/* ptrace-(no)adv */
+void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
+int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
+ unsigned long __user *datalp);
+int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data);
+long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info);
+long ppc_del_hwdebug(struct task_struct *child, long data);
diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
new file mode 100644
index 000000000000..f87e7c5c3bf3
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
+
+#include <asm/debug.h>
+
+#include "ptrace-decl.h"
+
+void user_enable_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+ regs->msr &= ~MSR_BE;
+ regs->msr |= MSR_SE;
+ }
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void user_enable_block_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+ regs->msr &= ~MSR_SE;
+ regs->msr |= MSR_BE;
+ }
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL)
+ regs->msr &= ~(MSR_SE | MSR_BE);
+
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
+{
+ dbginfo->version = 1;
+ dbginfo->num_instruction_bps = 0;
+ if (ppc_breakpoint_available())
+ dbginfo->num_data_bps = 1;
+ else
+ dbginfo->num_data_bps = 0;
+ dbginfo->num_condition_regs = 0;
+ dbginfo->data_bp_alignment = sizeof(long);
+ dbginfo->sizeof_condition = 0;
+ if (IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT)) {
+ dbginfo->features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
+ if (dawr_enabled())
+ dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
+ } else {
+ dbginfo->features = 0;
+ }
+}
+
+int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
+ unsigned long __user *datalp)
+{
+ unsigned long dabr_fake;
+
+ /* We only support one DABR and no IABRS at the moment */
+ if (addr > 0)
+ return -EINVAL;
+ dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
+ (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
+ return put_user(dabr_fake, datalp);
+}
+
+int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret;
+ struct thread_struct *thread = &task->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ bool set_bp = true;
+ struct arch_hw_breakpoint hw_brk;
+
+ /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
+ * For embedded processors we support one DAC and no IAC's at the
+ * moment.
+ */
+ if (addr > 0)
+ return -EINVAL;
+
+ /* The bottom 3 bits in dabr are flags */
+ if ((data & ~0x7UL) >= TASK_SIZE)
+ return -EIO;
+
+ /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
+ * It was assumed, on previous implementations, that 3 bits were
+ * passed together with the data address, fitting the design of the
+ * DABR register, as follows:
+ *
+ * bit 0: Read flag
+ * bit 1: Write flag
+ * bit 2: Breakpoint translation
+ *
+ * Thus, we use them here as so.
+ */
+
+ /* Ensure breakpoint translation bit is set */
+ if (data && !(data & HW_BRK_TYPE_TRANSLATE))
+ return -EIO;
+ hw_brk.address = data & (~HW_BRK_TYPE_DABR);
+ hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
+ hw_brk.len = DABR_MAX_LEN;
+ hw_brk.hw_len = DABR_MAX_LEN;
+ set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ bp = thread->ptrace_bps[0];
+ if (!set_bp) {
+ if (bp) {
+ unregister_hw_breakpoint(bp);
+ thread->ptrace_bps[0] = NULL;
+ }
+ return 0;
+ }
+ if (bp) {
+ attr = bp->attr;
+ attr.bp_addr = hw_brk.address;
+ attr.bp_len = DABR_MAX_LEN;
+ arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
+
+ /* Enable breakpoint */
+ attr.disabled = false;
+
+ ret = modify_user_hw_breakpoint(bp, &attr);
+ if (ret)
+ return ret;
+
+ thread->ptrace_bps[0] = bp;
+ thread->hw_brk = hw_brk;
+ return 0;
+ }
+
+ /* Create a new breakpoint request if one doesn't exist already */
+ hw_breakpoint_init(&attr);
+ attr.bp_addr = hw_brk.address;
+ attr.bp_len = DABR_MAX_LEN;
+ arch_bp_generic_fields(hw_brk.type,
+ &attr.bp_type);
+
+ thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
+ ptrace_triggered, NULL, task);
+ if (IS_ERR(bp)) {
+ thread->ptrace_bps[0] = NULL;
+ return PTR_ERR(bp);
+ }
+
+#else /* !CONFIG_HAVE_HW_BREAKPOINT */
+ if (set_bp && (!ppc_breakpoint_available()))
+ return -ENODEV;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ task->thread.hw_brk = hw_brk;
+ return 0;
+}
+
+long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int len = 0;
+ struct thread_struct *thread = &child->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ struct arch_hw_breakpoint brk;
+
+ if (bp_info->version != 1)
+ return -ENOTSUPP;
+ /*
+ * We only support one data breakpoint
+ */
+ if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
+ (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
+ bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
+ return -EINVAL;
+
+ if ((unsigned long)bp_info->addr >= TASK_SIZE)
+ return -EIO;
+
+ brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN;
+ brk.type = HW_BRK_TYPE_TRANSLATE;
+ brk.len = DABR_MAX_LEN;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ brk.type |= HW_BRK_TYPE_READ;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ brk.type |= HW_BRK_TYPE_WRITE;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
+ len = bp_info->addr2 - bp_info->addr;
+ else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+ len = 1;
+ else
+ return -EINVAL;
+ bp = thread->ptrace_bps[0];
+ if (bp)
+ return -ENOSPC;
+
+ /* Create a new breakpoint request if one doesn't exist already */
+ hw_breakpoint_init(&attr);
+ attr.bp_addr = (unsigned long)bp_info->addr;
+ attr.bp_len = len;
+ arch_bp_generic_fields(brk.type, &attr.bp_type);
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, child);
+ thread->ptrace_bps[0] = bp;
+ if (IS_ERR(bp)) {
+ thread->ptrace_bps[0] = NULL;
+ return PTR_ERR(bp);
+ }
+
+ return 1;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
+ return -EINVAL;
+
+ if (child->thread.hw_brk.address)
+ return -ENOSPC;
+
+ if (!ppc_breakpoint_available())
+ return -ENODEV;
+
+ child->thread.hw_brk = brk;
+
+ return 1;
+}
+
+long ppc_del_hwdebug(struct task_struct *child, long data)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret = 0;
+ struct thread_struct *thread = &child->thread;
+ struct perf_event *bp;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ if (data != 1)
+ return -EINVAL;
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ bp = thread->ptrace_bps[0];
+ if (bp) {
+ unregister_hw_breakpoint(bp);
+ thread->ptrace_bps[0] = NULL;
+ } else {
+ ret = -ENOENT;
+ }
+ return ret;
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
+ if (child->thread.hw_brk.address == 0)
+ return -ENOENT;
+
+ child->thread.hw_brk.address = 0;
+ child->thread.hw_brk.type = 0;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-novsx.c b/arch/powerpc/kernel/ptrace/ptrace-novsx.c
new file mode 100644
index 000000000000..b2dc4e92d11a
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-novsx.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ */
+int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ offsetof(struct thread_fp_state, fpr[32]));
+
+ flush_fp_to_thread(target);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fp_state, 0, -1);
+}
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ *
+ */
+int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ offsetof(struct thread_fp_state, fpr[32]));
+
+ flush_fp_to_thread(target);
+
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fp_state, 0, -1);
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-spe.c b/arch/powerpc/kernel/ptrace/ptrace-spe.c
new file mode 100644
index 000000000000..68b86b4a4be4
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-spe.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * For get_evrregs/set_evrregs functions 'data' has the following layout:
+ *
+ * struct {
+ * u32 evr[32];
+ * u64 acc;
+ * u32 spefscr;
+ * }
+ */
+
+int evr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ flush_spe_to_thread(target);
+ return target->thread.used_spe ? regset->n : 0;
+}
+
+int evr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ flush_spe_to_thread(target);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.evr,
+ 0, sizeof(target->thread.evr));
+
+ BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
+ offsetof(struct thread_struct, spefscr));
+
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.acc,
+ sizeof(target->thread.evr), -1);
+
+ return ret;
+}
+
+int evr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ flush_spe_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.evr,
+ 0, sizeof(target->thread.evr));
+
+ BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
+ offsetof(struct thread_struct, spefscr));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.acc,
+ sizeof(target->thread.evr), -1);
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-tm.c b/arch/powerpc/kernel/ptrace/ptrace-tm.c
new file mode 100644
index 000000000000..d75aff31f637
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-tm.c
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+#include <asm/tm.h>
+#include <asm/asm-prototypes.h>
+
+#include "ptrace-decl.h"
+
+void flush_tmregs_to_thread(struct task_struct *tsk)
+{
+ /*
+ * If task is not current, it will have been flushed already to
+ * it's thread_struct during __switch_to().
+ *
+ * A reclaim flushes ALL the state or if not in TM save TM SPRs
+ * in the appropriate thread structures from live.
+ */
+
+ if (!cpu_has_feature(CPU_FTR_TM) || tsk != current)
+ return;
+
+ if (MSR_TM_SUSPENDED(mfmsr())) {
+ tm_reclaim_current(TM_CAUSE_SIGNAL);
+ } else {
+ tm_enable();
+ tm_save_sprs(&tsk->thread);
+ }
+}
+
+static unsigned long get_user_ckpt_msr(struct task_struct *task)
+{
+ return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
+}
+
+static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
+{
+ task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
+ task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
+ return 0;
+}
+
+static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
+{
+ task->thread.ckpt_regs.trap = trap & 0xfff0;
+ return 0;
+}
+
+/**
+ * tm_cgpr_active - get active number of registers in CGPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed GPR category.
+ */
+int tm_cgpr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cgpr_get - get CGPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets transaction checkpointed GPR registers.
+ *
+ * When the transaction is active, 'ckpt_regs' holds all the checkpointed
+ * GPR register values for the current transaction to fall back on if it
+ * aborts in between. This function gets those checkpointed GPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * struct pt_regs ckpt_regs;
+ * };
+ */
+int tm_cgpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs,
+ 0, offsetof(struct pt_regs, msr));
+ if (!ret) {
+ unsigned long msr = get_user_ckpt_msr(target);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
+ offsetof(struct pt_regs, msr),
+ offsetof(struct pt_regs, msr) +
+ sizeof(msr));
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs.orig_gpr3,
+ offsetof(struct pt_regs, orig_gpr3),
+ sizeof(struct user_pt_regs));
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct user_pt_regs), -1);
+
+ return ret;
+}
+
+/*
+ * tm_cgpr_set - set the CGPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed GPR registers.
+ *
+ * When the transaction is active, 'ckpt_regs' holds the checkpointed
+ * GPR register values for the current transaction to fall back on if it
+ * aborts in between. This function sets those checkpointed GPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * struct pt_regs ckpt_regs;
+ * };
+ */
+int tm_cgpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long reg;
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs,
+ 0, PT_MSR * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_MSR * sizeof(reg),
+ (PT_MSR + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_ckpt_msr(target, reg);
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs.orig_gpr3,
+ PT_ORIG_R3 * sizeof(reg),
+ (PT_MAX_PUT_REG + 1) * sizeof(reg));
+
+ if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_MAX_PUT_REG + 1) * sizeof(reg),
+ PT_TRAP * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_TRAP * sizeof(reg),
+ (PT_TRAP + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_ckpt_trap(target, reg);
+ }
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+
+ return ret;
+}
+
+/**
+ * tm_cfpr_active - get active number of registers in CFPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed FPR category.
+ */
+int tm_cfpr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cfpr_get - get CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed FPR registers.
+ *
+ * When the transaction is active 'ckfp_state' holds the checkpointed
+ * values for the current transaction to fall back on if it aborts
+ * in between. This function gets those checkpointed FPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ *};
+ */
+int tm_cfpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ u64 buf[33];
+ int i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ /* copy to local buffer then write that out */
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_CKFPR(i);
+ buf[32] = target->thread.ckfp_state.fpscr;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+}
+
+/**
+ * tm_cfpr_set - set CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed FPR registers.
+ *
+ * When the transaction is active 'ckfp_state' holds the checkpointed
+ * FPR register values for the current transaction to fall back on
+ * if it aborts in between. This function sets these checkpointed
+ * FPR registers. The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ *};
+ */
+int tm_cfpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[33];
+ int i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ for (i = 0; i < 32; i++)
+ buf[i] = target->thread.TS_CKFPR(i);
+ buf[32] = target->thread.ckfp_state.fpscr;
+
+ /* copy to local buffer then write that out */
+ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+ if (i)
+ return i;
+ for (i = 0; i < 32 ; i++)
+ target->thread.TS_CKFPR(i) = buf[i];
+ target->thread.ckfp_state.fpscr = buf[32];
+ return 0;
+}
+
+/**
+ * tm_cvmx_active - get active number of registers in CVMX
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in checkpointed VMX category.
+ */
+int tm_cvmx_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cvmx_get - get CMVX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed VMX registers.
+ *
+ * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
+ * the checkpointed values for the current transaction to fall
+ * back on if it aborts in between. The userspace interface buffer
+ * layout is as follows.
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ *};
+ */
+int tm_cvmx_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.ckvr_state,
+ 0, 33 * sizeof(vector128));
+ if (!ret) {
+ /*
+ * Copy out only the low-order word of vrsave.
+ */
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.ckvrsave;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
+ 33 * sizeof(vector128), -1);
+ }
+
+ return ret;
+}
+
+/**
+ * tm_cvmx_set - set CMVX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed VMX registers.
+ *
+ * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
+ * the checkpointed values for the current transaction to fall
+ * back on if it aborts in between. The userspace interface buffer
+ * layout is as follows.
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ *};
+ */
+int tm_cvmx_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ckvr_state,
+ 0, 33 * sizeof(vector128));
+ if (!ret && count > 0) {
+ /*
+ * We use only the low-order word of vrsave.
+ */
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.ckvrsave;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
+ 33 * sizeof(vector128), -1);
+ if (!ret)
+ target->thread.ckvrsave = vrsave.word;
+ }
+
+ return ret;
+}
+
+/**
+ * tm_cvsx_active - get active number of registers in CVSX
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed VSX category.
+ */
+int tm_cvsx_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ flush_vsx_to_thread(target);
+ return target->thread.used_vsr ? regset->n : 0;
+}
+
+/**
+ * tm_cvsx_get - get CVSX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed VSX registers.
+ *
+ * When the transaction is active 'ckfp_state' holds the checkpointed
+ * values for the current transaction to fall back on if it aborts
+ * in between. This function gets those checkpointed VSX registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 vsx[32];
+ *};
+ */
+int tm_cvsx_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ u64 buf[32];
+ int ret, i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+
+ return ret;
+}
+
+/**
+ * tm_cvsx_set - set CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed VSX registers.
+ *
+ * When the transaction is active 'ckfp_state' holds the checkpointed
+ * VSX register values for the current transaction to fall back on
+ * if it aborts in between. This function sets these checkpointed
+ * FPR registers. The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 vsx[32];
+ *};
+ */
+int tm_cvsx_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[32];
+ int ret, i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+ if (!ret)
+ for (i = 0; i < 32 ; i++)
+ target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+
+ return ret;
+}
+
+/**
+ * tm_spr_active - get active number of registers in TM SPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks the active number of available
+ * regisers in the transactional memory SPR category.
+ */
+int tm_spr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ return regset->n;
+}
+
+/**
+ * tm_spr_get - get the TM related SPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets transactional memory related SPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct {
+ * u64 tm_tfhar;
+ * u64 tm_texasr;
+ * u64 tm_tfiar;
+ * };
+ */
+int tm_spr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
+ BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
+ BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ /* Flush the states */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ /* TFHAR register */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfhar, 0, sizeof(u64));
+
+ /* TEXASR register */
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_texasr, sizeof(u64),
+ 2 * sizeof(u64));
+
+ /* TFIAR register */
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfiar,
+ 2 * sizeof(u64), 3 * sizeof(u64));
+ return ret;
+}
+
+/**
+ * tm_spr_set - set the TM related SPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets transactional memory related SPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct {
+ * u64 tm_tfhar;
+ * u64 tm_texasr;
+ * u64 tm_tfiar;
+ * };
+ */
+int tm_spr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
+ BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
+ BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ /* Flush the states */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ /* TFHAR register */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfhar, 0, sizeof(u64));
+
+ /* TEXASR register */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_texasr, sizeof(u64),
+ 2 * sizeof(u64));
+
+ /* TFIAR register */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfiar,
+ 2 * sizeof(u64), 3 * sizeof(u64));
+ return ret;
+}
+
+int tm_tar_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+int tm_tar_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tar, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_tar_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tar, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_ppr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+
+int tm_ppr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_ppr, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_ppr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_ppr, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_dscr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+int tm_dscr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_dscr, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_dscr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_dscr, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_cgpr32_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.ckpt_regs.gpr[0]);
+}
+
+int tm_cgpr32_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.ckpt_regs.gpr[0]);
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
new file mode 100644
index 000000000000..15e3b79b6395
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -0,0 +1,904 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+#include <linux/elf.h>
+#include <linux/nospec.h>
+#include <linux/pkeys.h>
+
+#include "ptrace-decl.h"
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define STR(s) #s /* convert to string */
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define GPR_OFFSET_NAME(num) \
+ {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
+ {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ GPR_OFFSET_NAME(0),
+ GPR_OFFSET_NAME(1),
+ GPR_OFFSET_NAME(2),
+ GPR_OFFSET_NAME(3),
+ GPR_OFFSET_NAME(4),
+ GPR_OFFSET_NAME(5),
+ GPR_OFFSET_NAME(6),
+ GPR_OFFSET_NAME(7),
+ GPR_OFFSET_NAME(8),
+ GPR_OFFSET_NAME(9),
+ GPR_OFFSET_NAME(10),
+ GPR_OFFSET_NAME(11),
+ GPR_OFFSET_NAME(12),
+ GPR_OFFSET_NAME(13),
+ GPR_OFFSET_NAME(14),
+ GPR_OFFSET_NAME(15),
+ GPR_OFFSET_NAME(16),
+ GPR_OFFSET_NAME(17),
+ GPR_OFFSET_NAME(18),
+ GPR_OFFSET_NAME(19),
+ GPR_OFFSET_NAME(20),
+ GPR_OFFSET_NAME(21),
+ GPR_OFFSET_NAME(22),
+ GPR_OFFSET_NAME(23),
+ GPR_OFFSET_NAME(24),
+ GPR_OFFSET_NAME(25),
+ GPR_OFFSET_NAME(26),
+ GPR_OFFSET_NAME(27),
+ GPR_OFFSET_NAME(28),
+ GPR_OFFSET_NAME(29),
+ GPR_OFFSET_NAME(30),
+ GPR_OFFSET_NAME(31),
+ REG_OFFSET_NAME(nip),
+ REG_OFFSET_NAME(msr),
+ REG_OFFSET_NAME(ctr),
+ REG_OFFSET_NAME(link),
+ REG_OFFSET_NAME(xer),
+ REG_OFFSET_NAME(ccr),
+#ifdef CONFIG_PPC64
+ REG_OFFSET_NAME(softe),
+#else
+ REG_OFFSET_NAME(mq),
+#endif
+ REG_OFFSET_NAME(trap),
+ REG_OFFSET_NAME(dar),
+ REG_OFFSET_NAME(dsisr),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+static unsigned long get_user_msr(struct task_struct *task)
+{
+ return task->thread.regs->msr | task->thread.fpexc_mode;
+}
+
+static int set_user_msr(struct task_struct *task, unsigned long msr)
+{
+ task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
+ task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
+ return 0;
+}
+
+#ifdef CONFIG_PPC64
+static int get_user_dscr(struct task_struct *task, unsigned long *data)
+{
+ *data = task->thread.dscr;
+ return 0;
+}
+
+static int set_user_dscr(struct task_struct *task, unsigned long dscr)
+{
+ task->thread.dscr = dscr;
+ task->thread.dscr_inherit = 1;
+ return 0;
+}
+#else
+static int get_user_dscr(struct task_struct *task, unsigned long *data)
+{
+ return -EIO;
+}
+
+static int set_user_dscr(struct task_struct *task, unsigned long dscr)
+{
+ return -EIO;
+}
+#endif
+
+/*
+ * We prevent mucking around with the reserved area of trap
+ * which are used internally by the kernel.
+ */
+static int set_user_trap(struct task_struct *task, unsigned long trap)
+{
+ task->thread.regs->trap = trap & 0xfff0;
+ return 0;
+}
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
+{
+ unsigned int regs_max;
+
+ if (task->thread.regs == NULL || !data)
+ return -EIO;
+
+ if (regno == PT_MSR) {
+ *data = get_user_msr(task);
+ return 0;
+ }
+
+ if (regno == PT_DSCR)
+ return get_user_dscr(task, data);
+
+ /*
+ * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
+ * no more used as a flag, lets force usr to alway see the softe value as 1
+ * which means interrupts are not soft disabled.
+ */
+ if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
+ *data = 1;
+ return 0;
+ }
+
+ regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
+ if (regno < regs_max) {
+ regno = array_index_nospec(regno, regs_max);
+ *data = ((unsigned long *)task->thread.regs)[regno];
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
+{
+ if (task->thread.regs == NULL)
+ return -EIO;
+
+ if (regno == PT_MSR)
+ return set_user_msr(task, data);
+ if (regno == PT_TRAP)
+ return set_user_trap(task, data);
+ if (regno == PT_DSCR)
+ return set_user_dscr(task, data);
+
+ if (regno <= PT_MAX_PUT_REG) {
+ regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
+ ((unsigned long *)task->thread.regs)[regno] = data;
+ return 0;
+ }
+ return -EIO;
+}
+
+static int gpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ int i, ret;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ if (!FULL_REGS(target->thread.regs)) {
+ /* We have a partial register set. Fill 14-31 with bogus values */
+ for (i = 14; i < 32; i++)
+ target->thread.regs->gpr[i] = NV_REG_POISON;
+ }
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ target->thread.regs,
+ 0, offsetof(struct pt_regs, msr));
+ if (!ret) {
+ unsigned long msr = get_user_msr(target);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
+ offsetof(struct pt_regs, msr),
+ offsetof(struct pt_regs, msr) +
+ sizeof(msr));
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.regs->orig_gpr3,
+ offsetof(struct pt_regs, orig_gpr3),
+ sizeof(struct user_pt_regs));
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct user_pt_regs), -1);
+
+ return ret;
+}
+
+static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ unsigned long reg;
+ int ret;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ CHECK_FULL_REGS(target->thread.regs);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.regs,
+ 0, PT_MSR * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_MSR * sizeof(reg),
+ (PT_MSR + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_msr(target, reg);
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.regs->orig_gpr3,
+ PT_ORIG_R3 * sizeof(reg),
+ (PT_MAX_PUT_REG + 1) * sizeof(reg));
+
+ if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_MAX_PUT_REG + 1) * sizeof(reg),
+ PT_TRAP * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_TRAP * sizeof(reg),
+ (PT_TRAP + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_trap(target, reg);
+ }
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+
+ return ret;
+}
+
+#ifdef CONFIG_PPC64
+static int ppr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.regs->ppr, 0, sizeof(u64));
+}
+
+static int ppr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.regs->ppr, 0, sizeof(u64));
+}
+
+static int dscr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.dscr, 0, sizeof(u64));
+}
+static int dscr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.dscr, 0, sizeof(u64));
+}
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+static int tar_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tar, 0, sizeof(u64));
+}
+static int tar_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tar, 0, sizeof(u64));
+}
+
+static int ebb_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (target->thread.used_ebb)
+ return regset->n;
+
+ return 0;
+}
+
+static int ebb_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ /* Build tests */
+ BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
+ BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (!target->thread.used_ebb)
+ return -ENODATA;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.ebbrr,
+ 0, 3 * sizeof(unsigned long));
+}
+
+static int ebb_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ int ret = 0;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
+ BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (target->thread.used_ebb)
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ebbrr,
+ 0, sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ebbhr, sizeof(unsigned long),
+ 2 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.bescr, 2 * sizeof(unsigned long),
+ 3 * sizeof(unsigned long));
+
+ return ret;
+}
+static int pmu_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ return regset->n;
+}
+
+static int pmu_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ /* Build tests */
+ BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
+ BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
+ BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
+ BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.siar,
+ 0, 5 * sizeof(unsigned long));
+}
+
+static int pmu_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ int ret = 0;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
+ BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
+ BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
+ BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.siar,
+ 0, sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.sdar, sizeof(unsigned long),
+ 2 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.sier, 2 * sizeof(unsigned long),
+ 3 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.mmcr2, 3 * sizeof(unsigned long),
+ 4 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.mmcr0, 4 * sizeof(unsigned long),
+ 5 * sizeof(unsigned long));
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PPC_MEM_KEYS
+static int pkey_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!arch_pkeys_enabled())
+ return -ENODEV;
+
+ return regset->n;
+}
+
+static int pkey_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
+ BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
+
+ if (!arch_pkeys_enabled())
+ return -ENODEV;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.amr,
+ 0, ELF_NPKEY * sizeof(unsigned long));
+}
+
+static int pkey_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ u64 new_amr;
+ int ret;
+
+ if (!arch_pkeys_enabled())
+ return -ENODEV;
+
+ /* Only the AMR can be set from userspace */
+ if (pos != 0 || count != sizeof(new_amr))
+ return -EINVAL;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &new_amr, 0, sizeof(new_amr));
+ if (ret)
+ return ret;
+
+ /* UAMOR determines which bits of the AMR can be set from userspace. */
+ target->thread.amr = (new_amr & target->thread.uamor) |
+ (target->thread.amr & ~target->thread.uamor);
+
+ return 0;
+}
+#endif /* CONFIG_PPC_MEM_KEYS */
+
+static const struct user_regset native_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .get = gpr_get, .set = gpr_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .get = fpr_get, .set = fpr_set
+ },
+#ifdef CONFIG_ALTIVEC
+ [REGSET_VMX] = {
+ .core_note_type = NT_PPC_VMX, .n = 34,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = vr_active, .get = vr_get, .set = vr_set
+ },
+#endif
+#ifdef CONFIG_VSX
+ [REGSET_VSX] = {
+ .core_note_type = NT_PPC_VSX, .n = 32,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = vsr_active, .get = vsr_get, .set = vsr_set
+ },
+#endif
+#ifdef CONFIG_SPE
+ [REGSET_SPE] = {
+ .core_note_type = NT_PPC_SPE, .n = 35,
+ .size = sizeof(u32), .align = sizeof(u32),
+ .active = evr_active, .get = evr_get, .set = evr_set
+ },
+#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ [REGSET_TM_CGPR] = {
+ .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
+ },
+ [REGSET_TM_CFPR] = {
+ .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
+ },
+ [REGSET_TM_CVMX] = {
+ .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
+ },
+ [REGSET_TM_CVSX] = {
+ .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
+ },
+ [REGSET_TM_SPR] = {
+ .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
+ },
+ [REGSET_TM_CTAR] = {
+ .core_note_type = NT_PPC_TM_CTAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
+ },
+ [REGSET_TM_CPPR] = {
+ .core_note_type = NT_PPC_TM_CPPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
+ },
+ [REGSET_TM_CDSCR] = {
+ .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC64
+ [REGSET_PPR] = {
+ .core_note_type = NT_PPC_PPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = ppr_get, .set = ppr_set
+ },
+ [REGSET_DSCR] = {
+ .core_note_type = NT_PPC_DSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = dscr_get, .set = dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ [REGSET_TAR] = {
+ .core_note_type = NT_PPC_TAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = tar_get, .set = tar_set
+ },
+ [REGSET_EBB] = {
+ .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = ebb_active, .get = ebb_get, .set = ebb_set
+ },
+ [REGSET_PMR] = {
+ .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = pmu_active, .get = pmu_get, .set = pmu_set
+ },
+#endif
+#ifdef CONFIG_PPC_MEM_KEYS
+ [REGSET_PKEY] = {
+ .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = pkey_active, .get = pkey_get, .set = pkey_set
+ },
+#endif
+};
+
+const struct user_regset_view user_ppc_native_view = {
+ .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
+ .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
+};
+
+#include <linux/compat.h>
+
+int gpr32_get_common(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf,
+ unsigned long *regs)
+{
+ compat_ulong_t *k = kbuf;
+ compat_ulong_t __user *u = ubuf;
+ compat_ulong_t reg;
+
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
+
+ if (kbuf)
+ for (; count > 0 && pos < PT_MSR; --count)
+ *k++ = regs[pos++];
+ else
+ for (; count > 0 && pos < PT_MSR; --count)
+ if (__put_user((compat_ulong_t)regs[pos++], u++))
+ return -EFAULT;
+
+ if (count > 0 && pos == PT_MSR) {
+ reg = get_user_msr(target);
+ if (kbuf)
+ *k++ = reg;
+ else if (__put_user(reg, u++))
+ return -EFAULT;
+ ++pos;
+ --count;
+ }
+
+ if (kbuf)
+ for (; count > 0 && pos < PT_REGS_COUNT; --count)
+ *k++ = regs[pos++];
+ else
+ for (; count > 0 && pos < PT_REGS_COUNT; --count)
+ if (__put_user((compat_ulong_t)regs[pos++], u++))
+ return -EFAULT;
+
+ kbuf = k;
+ ubuf = u;
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+ return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ PT_REGS_COUNT * sizeof(reg), -1);
+}
+
+int gpr32_set_common(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ unsigned long *regs)
+{
+ const compat_ulong_t *k = kbuf;
+ const compat_ulong_t __user *u = ubuf;
+ compat_ulong_t reg;
+
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
+
+ if (kbuf)
+ for (; count > 0 && pos < PT_MSR; --count)
+ regs[pos++] = *k++;
+ else
+ for (; count > 0 && pos < PT_MSR; --count) {
+ if (__get_user(reg, u++))
+ return -EFAULT;
+ regs[pos++] = reg;
+ }
+
+
+ if (count > 0 && pos == PT_MSR) {
+ if (kbuf)
+ reg = *k++;
+ else if (__get_user(reg, u++))
+ return -EFAULT;
+ set_user_msr(target, reg);
+ ++pos;
+ --count;
+ }
+
+ if (kbuf) {
+ for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
+ regs[pos++] = *k++;
+ for (; count > 0 && pos < PT_TRAP; --count, ++pos)
+ ++k;
+ } else {
+ for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
+ if (__get_user(reg, u++))
+ return -EFAULT;
+ regs[pos++] = reg;
+ }
+ for (; count > 0 && pos < PT_TRAP; --count, ++pos)
+ if (__get_user(reg, u++))
+ return -EFAULT;
+ }
+
+ if (count > 0 && pos == PT_TRAP) {
+ if (kbuf)
+ reg = *k++;
+ else if (__get_user(reg, u++))
+ return -EFAULT;
+ set_user_trap(target, reg);
+ ++pos;
+ --count;
+ }
+
+ kbuf = k;
+ ubuf = u;
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+}
+
+static int gpr32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int i;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ if (!FULL_REGS(target->thread.regs)) {
+ /*
+ * We have a partial register set.
+ * Fill 14-31 with bogus values.
+ */
+ for (i = 14; i < 32; i++)
+ target->thread.regs->gpr[i] = NV_REG_POISON;
+ }
+ return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.regs->gpr[0]);
+}
+
+static int gpr32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ CHECK_FULL_REGS(target->thread.regs);
+ return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.regs->gpr[0]);
+}
+
+/*
+ * These are the regset flavors matching the CONFIG_PPC32 native set.
+ */
+static const struct user_regset compat_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+ .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
+ .get = gpr32_get, .set = gpr32_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .get = fpr_get, .set = fpr_set
+ },
+#ifdef CONFIG_ALTIVEC
+ [REGSET_VMX] = {
+ .core_note_type = NT_PPC_VMX, .n = 34,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = vr_active, .get = vr_get, .set = vr_set
+ },
+#endif
+#ifdef CONFIG_SPE
+ [REGSET_SPE] = {
+ .core_note_type = NT_PPC_SPE, .n = 35,
+ .size = sizeof(u32), .align = sizeof(u32),
+ .active = evr_active, .get = evr_get, .set = evr_set
+ },
+#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ [REGSET_TM_CGPR] = {
+ .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .active = tm_cgpr_active,
+ .get = tm_cgpr32_get, .set = tm_cgpr32_set
+ },
+ [REGSET_TM_CFPR] = {
+ .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
+ },
+ [REGSET_TM_CVMX] = {
+ .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
+ },
+ [REGSET_TM_CVSX] = {
+ .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
+ },
+ [REGSET_TM_SPR] = {
+ .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
+ },
+ [REGSET_TM_CTAR] = {
+ .core_note_type = NT_PPC_TM_CTAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
+ },
+ [REGSET_TM_CPPR] = {
+ .core_note_type = NT_PPC_TM_CPPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
+ },
+ [REGSET_TM_CDSCR] = {
+ .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC64
+ [REGSET_PPR] = {
+ .core_note_type = NT_PPC_PPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = ppr_get, .set = ppr_set
+ },
+ [REGSET_DSCR] = {
+ .core_note_type = NT_PPC_DSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = dscr_get, .set = dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ [REGSET_TAR] = {
+ .core_note_type = NT_PPC_TAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = tar_get, .set = tar_set
+ },
+ [REGSET_EBB] = {
+ .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = ebb_active, .get = ebb_get, .set = ebb_set
+ },
+#endif
+};
+
+static const struct user_regset_view user_ppc_compat_view = {
+ .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
+ .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ if (IS_ENABLED(CONFIG_PPC64) && test_tsk_thread_flag(task, TIF_32BIT))
+ return &user_ppc_compat_view;
+ return &user_ppc_native_view;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-vsx.c b/arch/powerpc/kernel/ptrace/ptrace-vsx.c
new file mode 100644
index 000000000000..d53466d49cc0
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-vsx.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ */
+int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ u64 buf[33];
+ int i;
+
+ flush_fp_to_thread(target);
+
+ /* copy to local buffer then write that out */
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+}
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ *
+ */
+int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[33];
+ int i;
+
+ flush_fp_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+
+ /* copy to local buffer then write that out */
+ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+ if (i)
+ return i;
+
+ for (i = 0; i < 32 ; i++)
+ target->thread.TS_FPR(i) = buf[i];
+ target->thread.fp_state.fpscr = buf[32];
+ return 0;
+}
+
+/*
+ * Currently to set and and get all the vsx state, you need to call
+ * the fp and VMX calls as well. This only get/sets the lower 32
+ * 128bit VSX registers.
+ */
+
+int vsr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ flush_vsx_to_thread(target);
+ return target->thread.used_vsr ? regset->n : 0;
+}
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last
+ * checkpointed value of all FPR registers for the current
+ * transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 vsx[32];
+ * };
+ */
+int vsr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf)
+{
+ u64 buf[32];
+ int ret, i;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+
+ return ret;
+}
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last
+ * checkpointed value of all FPR registers for the current
+ * transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 vsx[32];
+ * };
+ */
+int vsr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[32];
+ int ret, i;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+ if (!ret)
+ for (i = 0; i < 32 ; i++)
+ target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
new file mode 100644
index 000000000000..f6e51be47c6e
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -0,0 +1,481 @@
+/*
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Derived from "arch/m68k/kernel/ptrace.c"
+ * Copyright (C) 1994 by Hamish Macdonald
+ * Taken from linux/kernel/ptrace.c and modified for M680x0.
+ * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * Modified by Cort Dougan (cort@hq.fsmlabs.com)
+ * and Paul Mackerras (paulus@samba.org).
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/regset.h>
+#include <linux/tracehook.h>
+#include <linux/audit.h>
+#include <linux/context_tracking.h>
+#include <linux/syscalls.h>
+
+#include <asm/switch_to.h>
+#include <asm/asm-prototypes.h>
+#include <asm/debug.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* make sure the single step bit is not set. */
+ user_disable_single_step(child);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EPERM;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = datavp;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long index, tmp;
+
+ ret = -EIO;
+ /* convert to index and check */
+#ifdef CONFIG_PPC32
+ index = addr >> 2;
+ if ((addr & 3) || (index > PT_FPSCR)
+ || (child->thread.regs == NULL))
+#else
+ index = addr >> 3;
+ if ((addr & 7) || (index > PT_FPSCR))
+#endif
+ break;
+
+ CHECK_FULL_REGS(child->thread.regs);
+ if (index < PT_FPR0) {
+ ret = ptrace_get_reg(child, (int) index, &tmp);
+ if (ret)
+ break;
+ } else {
+ unsigned int fpidx = index - PT_FPR0;
+
+ flush_fp_to_thread(child);
+ if (fpidx < (PT_FPSCR - PT_FPR0))
+ memcpy(&tmp, &child->thread.TS_FPR(fpidx),
+ sizeof(long));
+ else
+ tmp = child->thread.fp_state.fpscr;
+ }
+ ret = put_user(tmp, datalp);
+ break;
+ }
+
+ /* write the word at location addr in the USER area */
+ case PTRACE_POKEUSR: {
+ unsigned long index;
+
+ ret = -EIO;
+ /* convert to index and check */
+#ifdef CONFIG_PPC32
+ index = addr >> 2;
+ if ((addr & 3) || (index > PT_FPSCR)
+ || (child->thread.regs == NULL))
+#else
+ index = addr >> 3;
+ if ((addr & 7) || (index > PT_FPSCR))
+#endif
+ break;
+
+ CHECK_FULL_REGS(child->thread.regs);
+ if (index < PT_FPR0) {
+ ret = ptrace_put_reg(child, index, data);
+ } else {
+ unsigned int fpidx = index - PT_FPR0;
+
+ flush_fp_to_thread(child);
+ if (fpidx < (PT_FPSCR - PT_FPR0))
+ memcpy(&child->thread.TS_FPR(fpidx), &data,
+ sizeof(long));
+ else
+ child->thread.fp_state.fpscr = data;
+ ret = 0;
+ }
+ break;
+ }
+
+ case PPC_PTRACE_GETHWDBGINFO: {
+ struct ppc_debug_info dbginfo;
+
+ ppc_gethwdinfo(&dbginfo);
+
+ if (copy_to_user(datavp, &dbginfo,
+ sizeof(struct ppc_debug_info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ case PPC_PTRACE_SETHWDEBUG: {
+ struct ppc_hw_breakpoint bp_info;
+
+ if (copy_from_user(&bp_info, datavp,
+ sizeof(struct ppc_hw_breakpoint)))
+ return -EFAULT;
+ return ppc_set_hwdebug(child, &bp_info);
+ }
+
+ case PPC_PTRACE_DELHWDEBUG: {
+ ret = ppc_del_hwdebug(child, data);
+ break;
+ }
+
+ case PTRACE_GET_DEBUGREG:
+ ret = ptrace_get_debugreg(child, addr, datalp);
+ break;
+
+ case PTRACE_SET_DEBUGREG:
+ ret = ptrace_set_debugreg(child, addr, data);
+ break;
+
+#ifdef CONFIG_PPC64
+ case PTRACE_GETREGS64:
+#endif
+ case PTRACE_GETREGS: /* Get all pt_regs from the child. */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_GPR,
+ 0, sizeof(struct user_pt_regs),
+ datavp);
+
+#ifdef CONFIG_PPC64
+ case PTRACE_SETREGS64:
+#endif
+ case PTRACE_SETREGS: /* Set all gp regs in the child. */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_GPR,
+ 0, sizeof(struct user_pt_regs),
+ datavp);
+
+ case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_FPR,
+ 0, sizeof(elf_fpregset_t),
+ datavp);
+
+ case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_FPR,
+ 0, sizeof(elf_fpregset_t),
+ datavp);
+
+#ifdef CONFIG_ALTIVEC
+ case PTRACE_GETVRREGS:
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_VMX,
+ 0, (33 * sizeof(vector128) +
+ sizeof(u32)),
+ datavp);
+
+ case PTRACE_SETVRREGS:
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_VMX,
+ 0, (33 * sizeof(vector128) +
+ sizeof(u32)),
+ datavp);
+#endif
+#ifdef CONFIG_VSX
+ case PTRACE_GETVSRREGS:
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_VSX,
+ 0, 32 * sizeof(double),
+ datavp);
+
+ case PTRACE_SETVSRREGS:
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_VSX,
+ 0, 32 * sizeof(double),
+ datavp);
+#endif
+#ifdef CONFIG_SPE
+ case PTRACE_GETEVRREGS:
+ /* Get the child spe register state. */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_SPE, 0, 35 * sizeof(u32),
+ datavp);
+
+ case PTRACE_SETEVRREGS:
+ /* Set the child spe register state. */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_SPE, 0, 35 * sizeof(u32),
+ datavp);
+#endif
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_SECCOMP
+static int do_seccomp(struct pt_regs *regs)
+{
+ if (!test_thread_flag(TIF_SECCOMP))
+ return 0;
+
+ /*
+ * The ABI we present to seccomp tracers is that r3 contains
+ * the syscall return value and orig_gpr3 contains the first
+ * syscall parameter. This is different to the ptrace ABI where
+ * both r3 and orig_gpr3 contain the first syscall parameter.
+ */
+ regs->gpr[3] = -ENOSYS;
+
+ /*
+ * We use the __ version here because we have already checked
+ * TIF_SECCOMP. If this fails, there is nothing left to do, we
+ * have already loaded -ENOSYS into r3, or seccomp has put
+ * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
+ */
+ if (__secure_computing(NULL))
+ return -1;
+
+ /*
+ * The syscall was allowed by seccomp, restore the register
+ * state to what audit expects.
+ * Note that we use orig_gpr3, which means a seccomp tracer can
+ * modify the first syscall parameter (in orig_gpr3) and also
+ * allow the syscall to proceed.
+ */
+ regs->gpr[3] = regs->orig_gpr3;
+
+ return 0;
+}
+#else
+static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_SECCOMP */
+
+/**
+ * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+ * @regs: the pt_regs of the task to trace (current)
+ *
+ * Performs various types of tracing on syscall entry. This includes seccomp,
+ * ptrace, syscall tracepoints and audit.
+ *
+ * The pt_regs are potentially visible to userspace via ptrace, so their
+ * contents is ABI.
+ *
+ * One or more of the tracers may modify the contents of pt_regs, in particular
+ * to modify arguments or even the syscall number itself.
+ *
+ * It's also possible that a tracer can choose to reject the system call. In
+ * that case this function will return an illegal syscall number, and will put
+ * an appropriate return value in regs->r3.
+ *
+ * Return: the (possibly changed) syscall number.
+ */
+long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ u32 flags;
+
+ user_exit();
+
+ flags = READ_ONCE(current_thread_info()->flags) &
+ (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
+
+ if (flags) {
+ int rc = tracehook_report_syscall_entry(regs);
+
+ if (unlikely(flags & _TIF_SYSCALL_EMU)) {
+ /*
+ * A nonzero return code from
+ * tracehook_report_syscall_entry() tells us to prevent
+ * the syscall execution, but we are not going to
+ * execute it anyway.
+ *
+ * Returning -1 will skip the syscall execution. We want
+ * to avoid clobbering any registers, so we don't goto
+ * the skip label below.
+ */
+ return -1;
+ }
+
+ if (rc) {
+ /*
+ * The tracer decided to abort the syscall. Note that
+ * the tracer may also just change regs->gpr[0] to an
+ * invalid syscall number, that is handled below on the
+ * exit path.
+ */
+ goto skip;
+ }
+ }
+
+ /* Run seccomp after ptrace; allow it to set gpr[3]. */
+ if (do_seccomp(regs))
+ return -1;
+
+ /* Avoid trace and audit when syscall is invalid. */
+ if (regs->gpr[0] >= NR_syscalls)
+ goto skip;
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->gpr[0]);
+
+ if (!is_32bit_task())
+ audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
+ regs->gpr[5], regs->gpr[6]);
+ else
+ audit_syscall_entry(regs->gpr[0],
+ regs->gpr[3] & 0xffffffff,
+ regs->gpr[4] & 0xffffffff,
+ regs->gpr[5] & 0xffffffff,
+ regs->gpr[6] & 0xffffffff);
+
+ /* Return the possibly modified but valid syscall number */
+ return regs->gpr[0];
+
+skip:
+ /*
+ * If we are aborting explicitly, or if the syscall number is
+ * now invalid, set the return value to -ENOSYS.
+ */
+ regs->gpr[3] = -ENOSYS;
+ return -1;
+}
+
+void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->result);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+
+ user_enter();
+}
+
+void __init pt_regs_check(void);
+
+/*
+ * Dummy function, its purpose is to break the build if struct pt_regs and
+ * struct user_pt_regs don't match.
+ */
+void __init pt_regs_check(void)
+{
+ BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
+ offsetof(struct user_pt_regs, gpr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
+ offsetof(struct user_pt_regs, nip));
+ BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
+ offsetof(struct user_pt_regs, msr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
+ offsetof(struct user_pt_regs, msr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct user_pt_regs, orig_gpr3));
+ BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
+ offsetof(struct user_pt_regs, ctr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
+ offsetof(struct user_pt_regs, link));
+ BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
+ offsetof(struct user_pt_regs, xer));
+ BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
+ offsetof(struct user_pt_regs, ccr));
+#ifdef __powerpc64__
+ BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
+ offsetof(struct user_pt_regs, softe));
+#else
+ BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
+ offsetof(struct user_pt_regs, mq));
+#endif
+ BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
+ offsetof(struct user_pt_regs, trap));
+ BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
+ offsetof(struct user_pt_regs, dar));
+ BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
+ offsetof(struct user_pt_regs, dsisr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
+ offsetof(struct user_pt_regs, result));
+
+ BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
+
+ // Now check that the pt_regs offsets match the uapi #defines
+ #define CHECK_REG(_pt, _reg) \
+ BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
+ sizeof(unsigned long)));
+
+ CHECK_REG(PT_R0, gpr[0]);
+ CHECK_REG(PT_R1, gpr[1]);
+ CHECK_REG(PT_R2, gpr[2]);
+ CHECK_REG(PT_R3, gpr[3]);
+ CHECK_REG(PT_R4, gpr[4]);
+ CHECK_REG(PT_R5, gpr[5]);
+ CHECK_REG(PT_R6, gpr[6]);
+ CHECK_REG(PT_R7, gpr[7]);
+ CHECK_REG(PT_R8, gpr[8]);
+ CHECK_REG(PT_R9, gpr[9]);
+ CHECK_REG(PT_R10, gpr[10]);
+ CHECK_REG(PT_R11, gpr[11]);
+ CHECK_REG(PT_R12, gpr[12]);
+ CHECK_REG(PT_R13, gpr[13]);
+ CHECK_REG(PT_R14, gpr[14]);
+ CHECK_REG(PT_R15, gpr[15]);
+ CHECK_REG(PT_R16, gpr[16]);
+ CHECK_REG(PT_R17, gpr[17]);
+ CHECK_REG(PT_R18, gpr[18]);
+ CHECK_REG(PT_R19, gpr[19]);
+ CHECK_REG(PT_R20, gpr[20]);
+ CHECK_REG(PT_R21, gpr[21]);
+ CHECK_REG(PT_R22, gpr[22]);
+ CHECK_REG(PT_R23, gpr[23]);
+ CHECK_REG(PT_R24, gpr[24]);
+ CHECK_REG(PT_R25, gpr[25]);
+ CHECK_REG(PT_R26, gpr[26]);
+ CHECK_REG(PT_R27, gpr[27]);
+ CHECK_REG(PT_R28, gpr[28]);
+ CHECK_REG(PT_R29, gpr[29]);
+ CHECK_REG(PT_R30, gpr[30]);
+ CHECK_REG(PT_R31, gpr[31]);
+ CHECK_REG(PT_NIP, nip);
+ CHECK_REG(PT_MSR, msr);
+ CHECK_REG(PT_ORIG_R3, orig_gpr3);
+ CHECK_REG(PT_CTR, ctr);
+ CHECK_REG(PT_LNK, link);
+ CHECK_REG(PT_XER, xer);
+ CHECK_REG(PT_CCR, ccr);
+#ifdef CONFIG_PPC64
+ CHECK_REG(PT_SOFTE, softe);
+#else
+ CHECK_REG(PT_MQ, mq);
+#endif
+ CHECK_REG(PT_TRAP, trap);
+ CHECK_REG(PT_DAR, dar);
+ CHECK_REG(PT_DSISR, dsisr);
+ CHECK_REG(PT_RESULT, result);
+ #undef CHECK_REG
+
+ BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
+
+ /*
+ * PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
+ * real registers.
+ */
+ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
+}
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace/ptrace32.c
index f37eb53de1a1..7976ddf29c0e 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace/ptrace32.c
@@ -17,21 +17,10 @@
* this archive for more details.
*/
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
-#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/signal.h>
#include <linux/compat.h>
-#include <linux/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/switch_to.h>
/*
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 7f8c890360fe..f9c0d888ce8a 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -787,8 +787,7 @@ EXPORT_SYMBOL(powerpc_debugfs_root);
static int powerpc_debugfs_init(void)
{
powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
-
- return powerpc_debugfs_root == NULL;
+ return 0;
}
arch_initcall(powerpc_debugfs_init);
#endif
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index 2dd0d9cb5a20..2ec835574cc9 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -8,6 +8,12 @@
#ifndef __ARCH_POWERPC_KERNEL_SETUP_H
#define __ARCH_POWERPC_KERNEL_SETUP_H
+#ifdef CONFIG_CC_IS_CLANG
+#define __nostackprotector
+#else
+#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
+#endif
+
void initialize_cache_info(void);
void irqstack_early_init(void);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 5b49b26eb154..305ca89d856f 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -58,7 +58,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid_phys);
int smp_hw_index[NR_CPUS];
EXPORT_SYMBOL(smp_hw_index);
-unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e05e6dd67ae6..438a9befce41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -279,24 +279,42 @@ void __init record_spr_defaults(void)
* device-tree is not accessible via normal means at this point.
*/
-void __init early_setup(unsigned long dt_ptr)
+void __init __nostackprotector early_setup(unsigned long dt_ptr)
{
static __initdata struct paca_struct boot_paca;
/* -------- printk is _NOT_ safe to use here ! ------- */
- /* Try new device tree based feature discovery ... */
- if (!dt_cpu_ftrs_init(__va(dt_ptr)))
- /* Otherwise use the old style CPU table */
- identify_cpu(0, mfspr(SPRN_PVR));
-
- /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
+ /*
+ * Assume we're on cpu 0 for now.
+ *
+ * We need to load a PACA very early for a few reasons.
+ *
+ * The stack protector canary is stored in the paca, so as soon as we
+ * call any stack protected code we need r13 pointing somewhere valid.
+ *
+ * If we are using kcov it will call in_task() in its instrumentation,
+ * which relies on the current task from the PACA.
+ *
+ * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
+ * printk(), which can trigger both stack protector and kcov.
+ *
+ * percpu variables and spin locks also use the paca.
+ *
+ * So set up a temporary paca. It will be replaced below once we know
+ * what CPU we are on.
+ */
initialise_paca(&boot_paca, 0);
setup_paca(&boot_paca);
fixup_boot_paca();
/* -------- printk is now safe to use ------- */
+ /* Try new device tree based feature discovery ... */
+ if (!dt_cpu_ftrs_init(__va(dt_ptr)))
+ /* Otherwise use the old style CPU table */
+ identify_cpu(0, mfspr(SPRN_PVR));
+
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 800433685888..d396efca4068 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -10,8 +10,6 @@
#ifndef _POWERPC_ARCH_SIGNAL_H
#define _POWERPC_ARCH_SIGNAL_H
-extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
-
extern void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
size_t frame_size, int is_32);
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 84ed2e77ef9c..adfde59cf4ba 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -473,8 +473,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
err |= __get_user(tsk->thread.ckpt_regs.ccr,
&sc->gp_regs[PT_CCR]);
+ /* Don't allow userspace to set the trap value */
+ regs->trap = 0;
+
/* These regs are not checkpointed; they can go in 'regs'. */
- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ea6adbf6a221..6d2a3a3666f0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1185,10 +1185,30 @@ static inline void add_cpu_to_smallcore_masks(int cpu)
}
}
+int get_physical_package_id(int cpu)
+{
+ int pkg_id = cpu_to_chip_id(cpu);
+
+ /*
+ * If the platform is PowerNV or Guest on KVM, ibm,chip-id is
+ * defined. Hence we would return the chip-id as the result of
+ * get_physical_package_id.
+ */
+ if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) &&
+ IS_ENABLED(CONFIG_PPC_SPLPAR)) {
+ struct device_node *np = of_get_cpu_node(cpu, NULL);
+ pkg_id = of_node_to_nid(np);
+ of_node_put(np);
+ }
+
+ return pkg_id;
+}
+EXPORT_SYMBOL_GPL(get_physical_package_id);
+
static void add_cpu_to_masks(int cpu)
{
int first_thread = cpu_first_thread_sibling(cpu);
- int chipid = cpu_to_chip_id(cpu);
+ int pkg_id = get_physical_package_id(cpu);
int i;
/*
@@ -1217,11 +1237,11 @@ static void add_cpu_to_masks(int cpu)
for_each_cpu(i, cpu_l2_cache_mask(cpu))
set_cpus_related(cpu, i, cpu_core_mask);
- if (chipid == -1)
+ if (pkg_id == -1)
return;
for_each_cpu(i, cpu_online_mask)
- if (cpu_to_chip_id(i) == chipid)
+ if (get_physical_package_id(i) == pkg_id)
set_cpus_related(cpu, i, cpu_core_mask);
}
@@ -1359,11 +1379,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
if (smp_ops && smp_ops->bringup_done)
smp_ops->bringup_done();
- /*
- * On a shared LPAR, associativity needs to be requested.
- * Hence, get numa topology before dumping cpu topology
- */
- shared_proc_topology_init();
dump_numa_cpu_topology();
#ifdef CONFIG_SCHED_SMT
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index e2a46cfed5fd..c477b8585a29 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -57,7 +57,7 @@ void save_stack_trace(struct stack_trace *trace)
{
unsigned long sp;
- sp = current_stack_pointer();
+ sp = current_stack_frame();
save_context_stack(trace, sp, current, 1);
}
@@ -71,7 +71,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
return;
if (tsk == current)
- sp = current_stack_pointer();
+ sp = current_stack_frame();
else
sp = tsk->thread.ksp;
@@ -131,7 +131,7 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
}
if (tsk == current)
- sp = current_stack_pointer();
+ sp = current_stack_frame();
else
sp = tsk->thread.ksp;
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
new file mode 100644
index 000000000000..cf06eb443a80
--- /dev/null
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/err.h>
+#include <asm/asm-prototypes.h>
+#include <asm/book3s/64/kup-radix.h>
+#include <asm/cputime.h>
+#include <asm/hw_irq.h>
+#include <asm/kprobes.h>
+#include <asm/paca.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/signal.h>
+#include <asm/switch_to.h>
+#include <asm/syscall.h>
+#include <asm/time.h>
+#include <asm/unistd.h>
+
+typedef long (*syscall_fn)(long, long, long, long, long, long);
+
+/* Has to run notrace because it is entered not completely "reconciled" */
+notrace long system_call_exception(long r3, long r4, long r5,
+ long r6, long r7, long r8,
+ unsigned long r0, struct pt_regs *regs)
+{
+ unsigned long ti_flags;
+ syscall_fn f;
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+
+ trace_hardirqs_off(); /* finish reconciling */
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S))
+ BUG_ON(!(regs->msr & MSR_RI));
+ BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(!FULL_REGS(regs));
+ BUG_ON(regs->softe != IRQS_ENABLED);
+
+ account_cpu_user_entry();
+
+#ifdef CONFIG_PPC_SPLPAR
+ if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) &&
+ firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ struct lppaca *lp = local_paca->lppaca_ptr;
+
+ if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
+ accumulate_stolen_time();
+ }
+#endif
+
+ kuap_check_amr();
+
+ /*
+ * This is not required for the syscall exit path, but makes the
+ * stack frame look nicer. If this was initialised in the first stack
+ * frame, or if the unwinder was taught the first stack frame always
+ * returns to user with IRQS_ENABLED, this store could be avoided!
+ */
+ regs->softe = IRQS_ENABLED;
+
+ local_irq_enable();
+
+ ti_flags = current_thread_info()->flags;
+ if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
+ /*
+ * We use the return value of do_syscall_trace_enter() as the
+ * syscall number. If the syscall was rejected for any reason
+ * do_syscall_trace_enter() returns an invalid syscall number
+ * and the test against NR_syscalls will fail and the return
+ * value to be used is in regs->gpr[3].
+ */
+ r0 = do_syscall_trace_enter(regs);
+ if (unlikely(r0 >= NR_syscalls))
+ return regs->gpr[3];
+ r3 = regs->gpr[3];
+ r4 = regs->gpr[4];
+ r5 = regs->gpr[5];
+ r6 = regs->gpr[6];
+ r7 = regs->gpr[7];
+ r8 = regs->gpr[8];
+
+ } else if (unlikely(r0 >= NR_syscalls)) {
+ return -ENOSYS;
+ }
+
+ /* May be faster to do array_index_nospec? */
+ barrier_nospec();
+
+ if (unlikely(ti_flags & _TIF_32BIT)) {
+ f = (void *)compat_sys_call_table[r0];
+
+ r3 &= 0x00000000ffffffffULL;
+ r4 &= 0x00000000ffffffffULL;
+ r5 &= 0x00000000ffffffffULL;
+ r6 &= 0x00000000ffffffffULL;
+ r7 &= 0x00000000ffffffffULL;
+ r8 &= 0x00000000ffffffffULL;
+
+ } else {
+ f = (void *)sys_call_table[r0];
+ }
+
+ return f(r3, r4, r5, r6, r7, r8);
+}
+
+/*
+ * This should be called after a syscall returns, with r3 the return value
+ * from the syscall. If this function returns non-zero, the system call
+ * exit assembly should additionally load all GPR registers and CTR and XER
+ * from the interrupt frame.
+ *
+ * The function graph tracer can not trace the return side of this function,
+ * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
+ */
+notrace unsigned long syscall_exit_prepare(unsigned long r3,
+ struct pt_regs *regs)
+{
+ unsigned long *ti_flagsp = &current_thread_info()->flags;
+ unsigned long ti_flags;
+ unsigned long ret = 0;
+
+ regs->result = r3;
+
+ /* Check whether the syscall is issued inside a restartable sequence */
+ rseq_syscall(regs);
+
+ ti_flags = *ti_flagsp;
+
+ if (unlikely(r3 >= (unsigned long)-MAX_ERRNO)) {
+ if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
+ r3 = -r3;
+ regs->ccr |= 0x10000000; /* Set SO bit in CR */
+ }
+ }
+
+ if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
+ if (ti_flags & _TIF_RESTOREALL)
+ ret = _TIF_RESTOREALL;
+ else
+ regs->gpr[3] = r3;
+ clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
+ } else {
+ regs->gpr[3] = r3;
+ }
+
+ if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
+ do_syscall_trace_leave(regs);
+ ret |= _TIF_RESTOREALL;
+ }
+
+again:
+ local_irq_disable();
+ ti_flags = READ_ONCE(*ti_flagsp);
+ while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
+ local_irq_enable();
+ if (ti_flags & _TIF_NEED_RESCHED) {
+ schedule();
+ } else {
+ /*
+ * SIGPENDING must restore signal handler function
+ * argument GPRs, and some non-volatiles (e.g., r1).
+ * Restore all for now. This could be made lighter.
+ */
+ if (ti_flags & _TIF_SIGPENDING)
+ ret |= _TIF_RESTOREALL;
+ do_notify_resume(regs, ti_flags);
+ }
+ local_irq_disable();
+ ti_flags = READ_ONCE(*ti_flagsp);
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely((ti_flags & _TIF_RESTORE_TM))) {
+ restore_tm_state(regs);
+ } else {
+ unsigned long mathflags = MSR_FP;
+
+ if (cpu_has_feature(CPU_FTR_VSX))
+ mathflags |= MSR_VEC | MSR_VSX;
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ mathflags |= MSR_VEC;
+
+ if ((regs->msr & mathflags) != mathflags)
+ restore_math(regs);
+ }
+ }
+
+ /* This must be done with RI=1 because tracing may touch vmaps */
+ trace_hardirqs_on();
+
+ /* This pattern matches prep_irq_for_idle */
+ __hard_EE_RI_disable();
+ if (unlikely(lazy_irq_pending())) {
+ __hard_RI_enable();
+ trace_hardirqs_off();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ local_irq_enable();
+ /* Took an interrupt, may have more exit work to do. */
+ goto again;
+ }
+ local_paca->irq_happened = 0;
+ irq_soft_mask_set(IRQS_ENABLED);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ local_paca->tm_scratch = regs->msr;
+#endif
+
+ kuap_check_amr();
+
+ account_cpu_user_exit();
+
+ return ret;
+}
+
+#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
+{
+#ifdef CONFIG_PPC_BOOK3E
+ struct thread_struct *ts = &current->thread;
+#endif
+ unsigned long *ti_flagsp = &current_thread_info()->flags;
+ unsigned long ti_flags;
+ unsigned long flags;
+ unsigned long ret = 0;
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S))
+ BUG_ON(!(regs->msr & MSR_RI));
+ BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(!FULL_REGS(regs));
+ BUG_ON(regs->softe != IRQS_ENABLED);
+
+ local_irq_save(flags);
+
+again:
+ ti_flags = READ_ONCE(*ti_flagsp);
+ while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
+ local_irq_enable(); /* returning to user: may enable */
+ if (ti_flags & _TIF_NEED_RESCHED) {
+ schedule();
+ } else {
+ if (ti_flags & _TIF_SIGPENDING)
+ ret |= _TIF_RESTOREALL;
+ do_notify_resume(regs, ti_flags);
+ }
+ local_irq_disable();
+ ti_flags = READ_ONCE(*ti_flagsp);
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely((ti_flags & _TIF_RESTORE_TM))) {
+ restore_tm_state(regs);
+ } else {
+ unsigned long mathflags = MSR_FP;
+
+ if (cpu_has_feature(CPU_FTR_VSX))
+ mathflags |= MSR_VEC | MSR_VSX;
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ mathflags |= MSR_VEC;
+
+ if ((regs->msr & mathflags) != mathflags)
+ restore_math(regs);
+ }
+ }
+
+ trace_hardirqs_on();
+ __hard_EE_RI_disable();
+ if (unlikely(lazy_irq_pending())) {
+ __hard_RI_enable();
+ trace_hardirqs_off();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ local_irq_enable();
+ local_irq_disable();
+ /* Took an interrupt, may have more exit work to do. */
+ goto again;
+ }
+ local_paca->irq_happened = 0;
+ irq_soft_mask_set(IRQS_ENABLED);
+
+#ifdef CONFIG_PPC_BOOK3E
+ if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
+ /*
+ * Check to see if the dbcr0 register is set up to debug.
+ * Use the internal debug mode bit to do this.
+ */
+ mtmsr(mfmsr() & ~MSR_DE);
+ mtspr(SPRN_DBCR0, ts->debug.dbcr0);
+ mtspr(SPRN_DBSR, -1);
+ }
+#endif
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ local_paca->tm_scratch = regs->msr;
+#endif
+
+ kuap_check_amr();
+
+ account_cpu_user_exit();
+
+ return ret;
+}
+
+void unrecoverable_exception(struct pt_regs *regs);
+void preempt_schedule_irq(void);
+
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
+{
+ unsigned long *ti_flagsp = &current_thread_info()->flags;
+ unsigned long flags;
+ unsigned long ret = 0;
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
+ unrecoverable_exception(regs);
+ BUG_ON(regs->msr & MSR_PR);
+ BUG_ON(!FULL_REGS(regs));
+
+ if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
+ clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
+ ret = 1;
+ }
+
+ local_irq_save(flags);
+
+ if (regs->softe == IRQS_ENABLED) {
+ /* Returning to a kernel context with local irqs enabled. */
+ WARN_ON_ONCE(!(regs->msr & MSR_EE));
+again:
+ if (IS_ENABLED(CONFIG_PREEMPT)) {
+ /* Return to preemptible kernel context */
+ if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
+ if (preempt_count() == 0)
+ preempt_schedule_irq();
+ }
+ }
+
+ trace_hardirqs_on();
+ __hard_EE_RI_disable();
+ if (unlikely(lazy_irq_pending())) {
+ __hard_RI_enable();
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ trace_hardirqs_off();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ /*
+ * Can't local_irq_restore to replay if we were in
+ * interrupt context. Must replay directly.
+ */
+ if (irqs_disabled_flags(flags)) {
+ replay_soft_interrupts();
+ } else {
+ local_irq_restore(flags);
+ local_irq_save(flags);
+ }
+ /* Took an interrupt, may have more exit work to do. */
+ goto again;
+ }
+ local_paca->irq_happened = 0;
+ irq_soft_mask_set(IRQS_ENABLED);
+ } else {
+ /* Returning to a kernel context with local irqs disabled. */
+ __hard_EE_RI_disable();
+ if (regs->msr & MSR_EE)
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ }
+
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ local_paca->tm_scratch = regs->msr;
+#endif
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ kuap_restore_amr(regs);
+
+ return ret;
+}
+#endif
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 35b61bfc1b1a..220ae11555f2 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -9,7 +9,9 @@
#
0 nospu restart_syscall sys_restart_syscall
1 nospu exit sys_exit
-2 nospu fork ppc_fork
+2 32 fork ppc_fork sys_fork
+2 64 fork sys_fork
+2 spu fork sys_ni_syscall
3 common read sys_read
4 common write sys_write
5 common open sys_open compat_sys_open
@@ -158,7 +160,9 @@
119 32 sigreturn sys_sigreturn compat_sys_sigreturn
119 64 sigreturn sys_ni_syscall
119 spu sigreturn sys_ni_syscall
-120 nospu clone ppc_clone
+120 32 clone ppc_clone sys_clone
+120 64 clone sys_clone
+120 spu clone sys_ni_syscall
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
123 common modify_ldt sys_ni_syscall
@@ -240,7 +244,9 @@
186 spu sendfile sys_sendfile64
187 common getpmsg sys_ni_syscall
188 common putpmsg sys_ni_syscall
-189 nospu vfork ppc_vfork
+189 32 vfork ppc_vfork sys_vfork
+189 64 vfork sys_vfork
+189 spu vfork sys_ni_syscall
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
191 common readahead sys_readahead compat_sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
@@ -316,8 +322,8 @@
248 32 clock_nanosleep sys_clock_nanosleep_time32
248 64 clock_nanosleep sys_clock_nanosleep
248 spu clock_nanosleep sys_clock_nanosleep
-249 32 swapcontext ppc_swapcontext ppc32_swapcontext
-249 64 swapcontext ppc64_swapcontext
+249 32 swapcontext ppc_swapcontext compat_sys_swapcontext
+249 64 swapcontext sys_swapcontext
249 spu swapcontext sys_ni_syscall
250 common tgkill sys_tgkill
251 32 utimes sys_utimes_time32
@@ -456,7 +462,7 @@
361 common bpf sys_bpf
362 nospu execveat sys_execveat compat_sys_execveat
363 32 switch_endian sys_ni_syscall
-363 64 switch_endian ppc_switch_endian
+363 64 switch_endian sys_switch_endian
363 spu switch_endian sys_ni_syscall
364 common userfaultfd sys_userfaultfd
365 common membarrier sys_membarrier
@@ -516,6 +522,8 @@
432 common fsmount sys_fsmount
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
-435 nospu clone3 ppc_clone3
+435 32 clone3 ppc_clone3 sys_clone3
+435 64 clone3 sys_clone3
+435 spu clone3 sys_ni_syscall
437 common openat2 sys_openat2
438 common pidfd_getfd sys_pidfd_getfd
diff --git a/arch/powerpc/kernel/syscalls/syscallhdr.sh b/arch/powerpc/kernel/syscalls/syscallhdr.sh
index c0a9a32937f1..02d6751f3be3 100644
--- a/arch/powerpc/kernel/syscalls/syscallhdr.sh
+++ b/arch/powerpc/kernel/syscalls/syscallhdr.sh
@@ -32,6 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
- printf "\n"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 80a676da11cb..479c70680b76 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -87,6 +87,155 @@ __setup("smt-snooze-delay=", setup_smt_snooze_delay);
#endif /* CONFIG_PPC64 */
+#define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
+static void read_##NAME(void *val) \
+{ \
+ *(unsigned long *)val = mfspr(ADDRESS); \
+} \
+static void write_##NAME(void *val) \
+{ \
+ EXTRA; \
+ mtspr(ADDRESS, *(unsigned long *)val); \
+}
+
+#define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
+static ssize_t show_##NAME(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct cpu *cpu = container_of(dev, struct cpu, dev); \
+ unsigned long val; \
+ smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
+ return sprintf(buf, "%lx\n", val); \
+} \
+static ssize_t __used \
+ store_##NAME(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct cpu *cpu = container_of(dev, struct cpu, dev); \
+ unsigned long val; \
+ int ret = sscanf(buf, "%lx", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
+ return count; \
+}
+
+#define SYSFS_PMCSETUP(NAME, ADDRESS) \
+ __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
+ __SYSFS_SPRSETUP_SHOW_STORE(NAME)
+#define SYSFS_SPRSETUP(NAME, ADDRESS) \
+ __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
+ __SYSFS_SPRSETUP_SHOW_STORE(NAME)
+
+#define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
+ __SYSFS_SPRSETUP_SHOW_STORE(NAME)
+
+#ifdef CONFIG_PPC64
+
+/*
+ * This is the system wide DSCR register default value. Any
+ * change to this default value through the sysfs interface
+ * will update all per cpu DSCR default values across the
+ * system stored in their respective PACA structures.
+ */
+static unsigned long dscr_default;
+
+/**
+ * read_dscr() - Fetch the cpu specific DSCR default
+ * @val: Returned cpu specific DSCR default value
+ *
+ * This function returns the per cpu DSCR default value
+ * for any cpu which is contained in it's PACA structure.
+ */
+static void read_dscr(void *val)
+{
+ *(unsigned long *)val = get_paca()->dscr_default;
+}
+
+
+/**
+ * write_dscr() - Update the cpu specific DSCR default
+ * @val: New cpu specific DSCR default value to update
+ *
+ * This function updates the per cpu DSCR default value
+ * for any cpu which is contained in it's PACA structure.
+ */
+static void write_dscr(void *val)
+{
+ get_paca()->dscr_default = *(unsigned long *)val;
+ if (!current->thread.dscr_inherit) {
+ current->thread.dscr = *(unsigned long *)val;
+ mtspr(SPRN_DSCR, *(unsigned long *)val);
+ }
+}
+
+SYSFS_SPRSETUP_SHOW_STORE(dscr);
+static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
+
+static void add_write_permission_dev_attr(struct device_attribute *attr)
+{
+ attr->attr.mode |= 0200;
+}
+
+/**
+ * show_dscr_default() - Fetch the system wide DSCR default
+ * @dev: Device structure
+ * @attr: Device attribute structure
+ * @buf: Interface buffer
+ *
+ * This function returns the system wide DSCR default value.
+ */
+static ssize_t show_dscr_default(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lx\n", dscr_default);
+}
+
+/**
+ * store_dscr_default() - Update the system wide DSCR default
+ * @dev: Device structure
+ * @attr: Device attribute structure
+ * @buf: Interface buffer
+ * @count: Size of the update
+ *
+ * This function updates the system wide DSCR default value.
+ */
+static ssize_t __used store_dscr_default(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int ret = 0;
+
+ ret = sscanf(buf, "%lx", &val);
+ if (ret != 1)
+ return -EINVAL;
+ dscr_default = val;
+
+ on_each_cpu(write_dscr, &val, 1);
+
+ return count;
+}
+
+static DEVICE_ATTR(dscr_default, 0600,
+ show_dscr_default, store_dscr_default);
+
+static void sysfs_create_dscr_default(void)
+{
+ if (cpu_has_feature(CPU_FTR_DSCR)) {
+ int err = 0;
+ int cpu;
+
+ dscr_default = spr_default_dscr;
+ for_each_possible_cpu(cpu)
+ paca_ptrs[cpu]->dscr_default = dscr_default;
+
+ err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
+ }
+}
+#endif /* CONFIG_PPC64 */
+
#ifdef CONFIG_PPC_FSL_BOOK3E
#define MAX_BIT 63
@@ -407,84 +556,35 @@ void ppc_enable_pmcs(void)
}
EXPORT_SYMBOL(ppc_enable_pmcs);
-#define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
-static void read_##NAME(void *val) \
-{ \
- *(unsigned long *)val = mfspr(ADDRESS); \
-} \
-static void write_##NAME(void *val) \
-{ \
- EXTRA; \
- mtspr(ADDRESS, *(unsigned long *)val); \
-}
-#define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
-static ssize_t show_##NAME(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct cpu *cpu = container_of(dev, struct cpu, dev); \
- unsigned long val; \
- smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
- return sprintf(buf, "%lx\n", val); \
-} \
-static ssize_t __used \
- store_##NAME(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct cpu *cpu = container_of(dev, struct cpu, dev); \
- unsigned long val; \
- int ret = sscanf(buf, "%lx", &val); \
- if (ret != 1) \
- return -EINVAL; \
- smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
- return count; \
-}
-
-#define SYSFS_PMCSETUP(NAME, ADDRESS) \
- __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
- __SYSFS_SPRSETUP_SHOW_STORE(NAME)
-#define SYSFS_SPRSETUP(NAME, ADDRESS) \
- __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
- __SYSFS_SPRSETUP_SHOW_STORE(NAME)
-
-#define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
- __SYSFS_SPRSETUP_SHOW_STORE(NAME)
/* Let's define all possible registers, we'll only hook up the ones
* that are implemented on the current processor
*/
-#if defined(CONFIG_PPC64)
+#ifdef CONFIG_PMU_SYSFS
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
#define HAS_PPC_PMC_CLASSIC 1
#define HAS_PPC_PMC_IBM 1
-#define HAS_PPC_PMC_PA6T 1
-#elif defined(CONFIG_PPC_BOOK3S_32)
-#define HAS_PPC_PMC_CLASSIC 1
-#define HAS_PPC_PMC_IBM 1
-#define HAS_PPC_PMC_G4 1
#endif
+#ifdef CONFIG_PPC64
+#define HAS_PPC_PMC_PA6T 1
+#define HAS_PPC_PMC56 1
+#endif
-#ifdef HAS_PPC_PMC_CLASSIC
-SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
-SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
-SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
-SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
-SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
-SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
-SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
-SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
-
-#ifdef HAS_PPC_PMC_G4
-SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
+#ifdef CONFIG_PPC_BOOK3S_32
+#define HAS_PPC_PMC_G4 1
#endif
+#endif /* CONFIG_PMU_SYSFS */
+#if defined(CONFIG_PPC64) && defined(CONFIG_DEBUG_MISC)
+#define HAS_PPC_PA6T
+#endif
+/*
+ * SPRs which are not related to PMU.
+ */
#ifdef CONFIG_PPC64
-SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
-SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
-
-SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
SYSFS_SPRSETUP(purr, SPRN_PURR);
SYSFS_SPRSETUP(spurr, SPRN_SPURR);
SYSFS_SPRSETUP(pir, SPRN_PIR);
@@ -495,115 +595,38 @@ SYSFS_SPRSETUP(tscr, SPRN_TSCR);
enable write when needed with a separate function.
Lets be conservative and default to pseries.
*/
-static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
+#endif /* CONFIG_PPC64 */
-/*
- * This is the system wide DSCR register default value. Any
- * change to this default value through the sysfs interface
- * will update all per cpu DSCR default values across the
- * system stored in their respective PACA structures.
- */
-static unsigned long dscr_default;
-
-/**
- * read_dscr() - Fetch the cpu specific DSCR default
- * @val: Returned cpu specific DSCR default value
- *
- * This function returns the per cpu DSCR default value
- * for any cpu which is contained in it's PACA structure.
- */
-static void read_dscr(void *val)
-{
- *(unsigned long *)val = get_paca()->dscr_default;
-}
-
-
-/**
- * write_dscr() - Update the cpu specific DSCR default
- * @val: New cpu specific DSCR default value to update
- *
- * This function updates the per cpu DSCR default value
- * for any cpu which is contained in it's PACA structure.
- */
-static void write_dscr(void *val)
-{
- get_paca()->dscr_default = *(unsigned long *)val;
- if (!current->thread.dscr_inherit) {
- current->thread.dscr = *(unsigned long *)val;
- mtspr(SPRN_DSCR, *(unsigned long *)val);
- }
-}
-
-SYSFS_SPRSETUP_SHOW_STORE(dscr);
-static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
-
-static void add_write_permission_dev_attr(struct device_attribute *attr)
-{
- attr->attr.mode |= 0200;
-}
-
-/**
- * show_dscr_default() - Fetch the system wide DSCR default
- * @dev: Device structure
- * @attr: Device attribute structure
- * @buf: Interface buffer
- *
- * This function returns the system wide DSCR default value.
- */
-static ssize_t show_dscr_default(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%lx\n", dscr_default);
-}
-
-/**
- * store_dscr_default() - Update the system wide DSCR default
- * @dev: Device structure
- * @attr: Device attribute structure
- * @buf: Interface buffer
- * @count: Size of the update
- *
- * This function updates the system wide DSCR default value.
- */
-static ssize_t __used store_dscr_default(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- unsigned long val;
- int ret = 0;
-
- ret = sscanf(buf, "%lx", &val);
- if (ret != 1)
- return -EINVAL;
- dscr_default = val;
+#ifdef HAS_PPC_PMC_CLASSIC
+SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
+SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
+SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
+SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
+SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
+SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
+SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
+SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
+#endif
- on_each_cpu(write_dscr, &val, 1);
+#ifdef HAS_PPC_PMC_G4
+SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
+#endif
- return count;
-}
+#ifdef HAS_PPC_PMC56
+SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
+SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
-static DEVICE_ATTR(dscr_default, 0600,
- show_dscr_default, store_dscr_default);
+SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
-static void sysfs_create_dscr_default(void)
-{
- if (cpu_has_feature(CPU_FTR_DSCR)) {
- int err = 0;
- int cpu;
+static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
+#endif /* HAS_PPC_PMC56 */
- dscr_default = spr_default_dscr;
- for_each_possible_cpu(cpu)
- paca_ptrs[cpu]->dscr_default = dscr_default;
- err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
- }
-}
-#endif /* CONFIG_PPC64 */
#ifdef HAS_PPC_PMC_PA6T
SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
@@ -612,7 +635,9 @@ SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
-#ifdef CONFIG_DEBUG_MISC
+#endif
+
+#ifdef HAS_PPC_PA6T
SYSFS_SPRSETUP(hid0, SPRN_HID0);
SYSFS_SPRSETUP(hid1, SPRN_HID1);
SYSFS_SPRSETUP(hid4, SPRN_HID4);
@@ -641,15 +666,14 @@ SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
-#endif /* CONFIG_DEBUG_MISC */
-#endif /* HAS_PPC_PMC_PA6T */
+#endif /* HAS_PPC_PA6T */
#ifdef HAS_PPC_PMC_IBM
static struct device_attribute ibm_common_attrs[] = {
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
};
-#endif /* HAS_PPC_PMC_G4 */
+#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
static struct device_attribute g4_common_attrs[] = {
@@ -659,6 +683,7 @@ static struct device_attribute g4_common_attrs[] = {
};
#endif /* HAS_PPC_PMC_G4 */
+#ifdef HAS_PPC_PMC_CLASSIC
static struct device_attribute classic_pmc_attrs[] = {
__ATTR(pmc1, 0600, show_pmc1, store_pmc1),
__ATTR(pmc2, 0600, show_pmc2, store_pmc2),
@@ -666,14 +691,16 @@ static struct device_attribute classic_pmc_attrs[] = {
__ATTR(pmc4, 0600, show_pmc4, store_pmc4),
__ATTR(pmc5, 0600, show_pmc5, store_pmc5),
__ATTR(pmc6, 0600, show_pmc6, store_pmc6),
-#ifdef CONFIG_PPC64
+#ifdef HAS_PPC_PMC56
__ATTR(pmc7, 0600, show_pmc7, store_pmc7),
__ATTR(pmc8, 0600, show_pmc8, store_pmc8),
#endif
};
+#endif
-#ifdef HAS_PPC_PMC_PA6T
+#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
static struct device_attribute pa6t_attrs[] = {
+#ifdef HAS_PPC_PMC_PA6T
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
__ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
@@ -682,7 +709,8 @@ static struct device_attribute pa6t_attrs[] = {
__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
-#ifdef CONFIG_DEBUG_MISC
+#endif
+#ifdef HAS_PPC_PA6T
__ATTR(hid0, 0600, show_hid0, store_hid0),
__ATTR(hid1, 0600, show_hid1, store_hid1),
__ATTR(hid4, 0600, show_hid4, store_hid4),
@@ -711,10 +739,9 @@ static struct device_attribute pa6t_attrs[] = {
__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
-#endif /* CONFIG_DEBUG_MISC */
+#endif /* HAS_PPC_PA6T */
};
-#endif /* HAS_PPC_PMC_PA6T */
-#endif /* HAS_PPC_PMC_CLASSIC */
+#endif
#ifdef CONFIG_PPC_SVM
static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf)
@@ -765,14 +792,14 @@ static int register_cpu_online(unsigned int cpu)
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
-#ifdef HAS_PPC_PMC_PA6T
+#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
pmc_attrs = NULL;
break;
-#endif /* HAS_PPC_PMC_PA6T */
+#endif
default:
attrs = NULL;
nattrs = 0;
@@ -787,8 +814,10 @@ static int register_cpu_online(unsigned int cpu)
device_create_file(s, &pmc_attrs[i]);
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PMU_SYSFS
if (cpu_has_feature(CPU_FTR_MMCRA))
device_create_file(s, &dev_attr_mmcra);
+#endif /* CONFIG_PMU_SYSFS */
if (cpu_has_feature(CPU_FTR_PURR)) {
if (!firmware_has_feature(FW_FEATURE_LPAR))
@@ -854,14 +883,14 @@ static int unregister_cpu_online(unsigned int cpu)
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
-#ifdef HAS_PPC_PMC_PA6T
+#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
pmc_attrs = NULL;
break;
-#endif /* HAS_PPC_PMC_PA6T */
+#endif
default:
attrs = NULL;
nattrs = 0;
@@ -876,8 +905,10 @@ static int unregister_cpu_online(unsigned int cpu)
device_remove_file(s, &pmc_attrs[i]);
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PMU_SYSFS
if (cpu_has_feature(CPU_FTR_MMCRA))
device_remove_file(s, &dev_attr_mmcra);
+#endif /* CONFIG_PMU_SYSFS */
if (cpu_has_feature(CPU_FTR_PURR))
device_remove_file(s, &dev_attr_purr);
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 5b905a2f4e4d..d34276f3c495 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -16,25 +16,22 @@
#ifdef CONFIG_PPC64
.p2align 3
+#define __SYSCALL(nr, entry) .8byte entry
+#else
+#define __SYSCALL(nr, entry) .long entry
#endif
.globl sys_call_table
sys_call_table:
#ifdef CONFIG_PPC64
-#define __SYSCALL(nr, entry) .8byte DOTSYM(entry)
#include <asm/syscall_table_64.h>
-#undef __SYSCALL
#else
-#define __SYSCALL(nr, entry) .long entry
#include <asm/syscall_table_32.h>
-#undef __SYSCALL
#endif
#ifdef CONFIG_COMPAT
.globl compat_sys_call_table
compat_sys_call_table:
#define compat_sys_sigsuspend sys_sigsuspend
-#define __SYSCALL(nr, entry) .8byte DOTSYM(entry)
#include <asm/syscall_table_c32.h>
-#undef __SYSCALL
#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1168e8b37e30..bda9cb4a0a5f 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -663,15 +663,6 @@ void timer_broadcast_interrupt(void)
}
#endif
-/*
- * Hypervisor decrementer interrupts shouldn't occur but are sometimes
- * left pending on exit from a KVM guest. We don't need to do anything
- * to clear them, as they are edge-triggered.
- */
-void hdec_interrupt(struct pt_regs *regs)
-{
-}
-
#ifdef CONFIG_SUSPEND
static void generic_suspend_disable_irqs(void)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 82a3438300fd..3fca22276bb1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -2278,35 +2278,20 @@ void ppc_warn_emulated_print(const char *type)
static int __init ppc_warn_emulated_init(void)
{
- struct dentry *dir, *d;
+ struct dentry *dir;
unsigned int i;
struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
- if (!powerpc_debugfs_root)
- return -ENODEV;
-
dir = debugfs_create_dir("emulated_instructions",
powerpc_debugfs_root);
- if (!dir)
- return -ENOMEM;
- d = debugfs_create_u32("do_warn", 0644, dir,
- &ppc_warn_emulated);
- if (!d)
- goto fail;
+ debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
- for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
- d = debugfs_create_u32(entries[i].name, 0644, dir,
- (u32 *)&entries[i].val.counter);
- if (!d)
- goto fail;
- }
+ for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
+ debugfs_create_u32(entries[i].name, 0644, dir,
+ (u32 *)&entries[i].val.counter);
return 0;
-
-fail:
- debugfs_remove_recursive(dir);
- return -ENOMEM;
}
device_initcall(ppc_warn_emulated_init);
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b9a108411c0d..d3b77c15f9ce 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -391,12 +391,7 @@ static unsigned long __init find_function64(struct lib64_elfinfo *lib,
symname);
return 0;
}
-#ifdef VDS64_HAS_DESCRIPTORS
- return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) -
- VDSO64_LBASE;
-#else
return sym->st_value - VDSO64_LBASE;
-#endif
}
static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32,
diff --git a/arch/powerpc/kernel/vdso32/.gitignore b/arch/powerpc/kernel/vdso32/.gitignore
index fea5809857a5..824b863ec6bd 100644
--- a/arch/powerpc/kernel/vdso32/.gitignore
+++ b/arch/powerpc/kernel/vdso32/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso32.lds
vdso32.so.dbg
diff --git a/arch/powerpc/kernel/vdso64/.gitignore b/arch/powerpc/kernel/vdso64/.gitignore
index 77a0b423642c..84151a7ba31d 100644
--- a/arch/powerpc/kernel/vdso64/.gitignore
+++ b/arch/powerpc/kernel/vdso64/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso64.lds
vdso64.so.dbg
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 25c14a0981bf..d20c5e79e03c 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -134,7 +134,7 @@ _GLOBAL(load_up_vsx)
/* enable use of VSX after return */
oris r12,r12,MSR_VSX@h
std r12,_MSR(r1)
- b fast_exception_return
+ b fast_interrupt_return
#endif /* CONFIG_VSX */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b4c89a1acebb..31a0f201fb6f 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -256,6 +256,7 @@ SECTIONS
*(.dynamic)
}
.hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
+ .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
.interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
.rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
{
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
index 378f6108a414..86380c69f5ce 100644
--- a/arch/powerpc/kexec/Makefile
+++ b/arch/powerpc/kexec/Makefile
@@ -3,9 +3,6 @@
# Makefile for the linux kernel.
#
-# Avoid clang warnings around longjmp/setjmp declarations
-CFLAGS_crash.o += -ffreestanding
-
obj-y += core.o crash.o core_$(BITS).o
obj-$(CONFIG_PPC32) += relocate_32.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index d07a8e12fa15..5690a1f9b976 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -799,21 +799,19 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
}
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
+
}
-void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
- kvm->arch.kvm_ops->free_memslot(free, dont);
+ return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
}
-int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- return kvm->arch.kvm_ops->create_memslot(slot, npages);
+ kvm->arch.kvm_ops->free_memslot(slot);
}
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
@@ -823,9 +821,11 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
- return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
+ return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem,
+ change);
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
@@ -858,11 +858,6 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
return 0;
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
-}
-
int kvmppc_core_init_vm(struct kvm *kvm)
{
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
index 3a4613985949..eae259ee49af 100644
--- a/arch/powerpc/kvm/book3s.h
+++ b/arch/powerpc/kvm/book3s.h
@@ -16,6 +16,7 @@ extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start,
extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
+extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index f21e73492ce3..3fbd570f9c1e 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -234,7 +234,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
case 2:
case 6:
pte->may_write = true;
- /* fall through */
+ fallthrough;
case 3:
case 5:
case 7:
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index d4cb3bcf41b6..e8e7b2c530d1 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -356,7 +356,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
/* From mm/mmu_context_hash32.c */
#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
-int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
+int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err;
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 599133256a95..26b8b27a3755 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -311,7 +311,7 @@ do_second:
case 2:
case 6:
gpte->may_write = true;
- /* fall through */
+ fallthrough;
case 3:
case 5:
case 7:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 044dd49eeb9d..e452158a18d7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -384,7 +384,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
__destroy_context(to_book3s(vcpu)->context_id[0]);
}
-int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
+int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 6c372f5c61b6..6404df613ea3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -485,18 +485,18 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
__be64 *hptep;
unsigned long mmu_seq, psize, pte_size;
unsigned long gpa_base, gfn_base;
- unsigned long gpa, gfn, hva, pfn;
+ unsigned long gpa, gfn, hva, pfn, hpa;
struct kvm_memory_slot *memslot;
unsigned long *rmap;
struct revmap_entry *rev;
- struct page *page, *pages[1];
- long index, ret, npages;
+ struct page *page;
+ long index, ret;
bool is_ci;
- unsigned int writing, write_ok;
- struct vm_area_struct *vma;
+ bool writing, write_ok;
+ unsigned int shift;
unsigned long rcbits;
long mmio_update;
- struct mm_struct *mm;
+ pte_t pte, *ptep;
if (kvm_is_radix(kvm))
return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
@@ -570,59 +570,62 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
smp_rmb();
ret = -EFAULT;
- is_ci = false;
- pfn = 0;
page = NULL;
- mm = kvm->mm;
- pte_size = PAGE_SIZE;
writing = (dsisr & DSISR_ISSTORE) != 0;
/* If writing != 0, then the HPTE must allow writing, if we get here */
write_ok = writing;
hva = gfn_to_hva_memslot(memslot, gfn);
- npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
- if (npages < 1) {
- /* Check if it's an I/O mapping */
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, hva);
- if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
- (vma->vm_flags & VM_PFNMAP)) {
- pfn = vma->vm_pgoff +
- ((hva - vma->vm_start) >> PAGE_SHIFT);
- pte_size = psize;
- is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
- write_ok = vma->vm_flags & VM_WRITE;
- }
- up_read(&mm->mmap_sem);
- if (!pfn)
- goto out_put;
+
+ /*
+ * Do a fast check first, since __gfn_to_pfn_memslot doesn't
+ * do it with !atomic && !async, which is how we call it.
+ * We always ask for write permission since the common case
+ * is that the page is writable.
+ */
+ if (__get_user_pages_fast(hva, 1, 1, &page) == 1) {
+ write_ok = true;
} else {
- page = pages[0];
- pfn = page_to_pfn(page);
- if (PageHuge(page)) {
- page = compound_head(page);
- pte_size <<= compound_order(page);
- }
- /* if the guest wants write access, see if that is OK */
- if (!writing && hpte_is_writable(r)) {
- pte_t *ptep, pte;
- unsigned long flags;
- /*
- * We need to protect against page table destruction
- * hugepage split and collapse.
- */
- local_irq_save(flags);
- ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL);
- if (ptep) {
- pte = kvmppc_read_update_linux_pte(ptep, 1);
- if (__pte_write(pte))
- write_ok = 1;
- }
- local_irq_restore(flags);
+ /* Call KVM generic code to do the slow-path check */
+ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+ writing, &write_ok);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+ page = NULL;
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ page = NULL;
}
}
+ /*
+ * Read the PTE from the process' radix tree and use that
+ * so we get the shift and attribute bits.
+ */
+ local_irq_disable();
+ ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ /*
+ * If the PTE disappeared temporarily due to a THP
+ * collapse, just return and let the guest try again.
+ */
+ if (!ptep) {
+ local_irq_enable();
+ if (page)
+ put_page(page);
+ return RESUME_GUEST;
+ }
+ pte = *ptep;
+ local_irq_enable();
+ hpa = pte_pfn(pte) << PAGE_SHIFT;
+ pte_size = PAGE_SIZE;
+ if (shift)
+ pte_size = 1ul << shift;
+ is_ci = pte_ci(pte);
+
if (psize > pte_size)
goto out_put;
+ if (pte_size > psize)
+ hpa |= hva & (pte_size - psize);
/* Check WIMG vs. the actual page we're accessing */
if (!hpte_cache_flags_ok(r, is_ci)) {
@@ -636,14 +639,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
/*
- * Set the HPTE to point to pfn.
- * Since the pfn is at PAGE_SIZE granularity, make sure we
+ * Set the HPTE to point to hpa.
+ * Since the hpa is at PAGE_SIZE granularity, make sure we
* don't mask out lower-order bits if psize < PAGE_SIZE.
*/
if (psize < PAGE_SIZE)
psize = PAGE_SIZE;
- r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) |
- ((pfn << PAGE_SHIFT) & ~(psize - 1));
+ r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa;
if (hpte_is_writable(r) && !write_ok)
r = hpte_make_readonly(r);
ret = RESUME_GUEST;
@@ -708,20 +710,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
asm volatile("ptesync" : : : "memory");
preempt_enable();
if (page && hpte_is_writable(r))
- SetPageDirty(page);
+ set_page_dirty_lock(page);
out_put:
trace_kvm_page_fault_exit(vcpu, hpte, ret);
- if (page) {
- /*
- * We drop pages[0] here, not page because page might
- * have been set to the head page of a compound, but
- * we have to drop the reference on the correct tail
- * page to match the get inside gup()
- */
- put_page(pages[0]);
- }
+ if (page)
+ put_page(page);
return ret;
out_unlock:
@@ -2138,9 +2133,8 @@ static const struct file_operations debugfs_htab_fops = {
void kvmppc_mmu_debugfs_init(struct kvm *kvm)
{
- kvm->arch.htab_dentry = debugfs_create_file("htab", 0400,
- kvm->arch.debugfs_dir, kvm,
- &debugfs_htab_fops);
+ debugfs_create_file("htab", 0400, kvm->arch.debugfs_dir, kvm,
+ &debugfs_htab_fops);
}
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 803940d79b73..9f050064d2a2 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -425,7 +425,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
unsigned int lpid)
{
if (full) {
- memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
+ memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
} else {
pte_t *p = pte;
unsigned long it;
@@ -1376,9 +1376,8 @@ static const struct file_operations debugfs_radix_fops = {
void kvmhv_radix_debugfs_init(struct kvm *kvm)
{
- kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
- kvm->arch.debugfs_dir, kvm,
- &debugfs_radix_fops);
+ debugfs_create_file("radix", 0400, kvm->arch.debugfs_dir, kvm,
+ &debugfs_radix_fops);
}
int kvmppc_radix_init(void)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index ee6c103bb7d5..50555ad1db93 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -27,7 +27,6 @@
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-#include <asm/kvm_host.h>
#include <asm/udbg.h>
#include <asm/iommu.h>
#include <asm/tce.h>
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index ab6eeb8e753e..6fcaf1fa8e02 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -24,7 +24,6 @@
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-#include <asm/kvm_host.h>
#include <asm/udbg.h>
#include <asm/iommu.h>
#include <asm/tce.h>
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2cefd071b848..93493f0cbfe8 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -72,7 +72,6 @@
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/hw_breakpoint.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
@@ -1074,25 +1073,35 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_PAGE_IN:
- ret = kvmppc_h_svm_page_in(vcpu->kvm,
- kvmppc_get_gpr(vcpu, 4),
- kvmppc_get_gpr(vcpu, 5),
- kvmppc_get_gpr(vcpu, 6));
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_page_in(vcpu->kvm,
+ kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_PAGE_OUT:
- ret = kvmppc_h_svm_page_out(vcpu->kvm,
- kvmppc_get_gpr(vcpu, 4),
- kvmppc_get_gpr(vcpu, 5),
- kvmppc_get_gpr(vcpu, 6));
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_page_out(vcpu->kvm,
+ kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_INIT_START:
- ret = kvmppc_h_svm_init_start(vcpu->kvm);
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_init_start(vcpu->kvm);
break;
case H_SVM_INIT_DONE:
- ret = kvmppc_h_svm_init_done(vcpu->kvm);
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_init_done(vcpu->kvm);
break;
case H_SVM_INIT_ABORT:
- ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_init_abort(vcpu->kvm);
break;
default:
@@ -2258,14 +2267,9 @@ static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
struct kvm *kvm = vcpu->kvm;
snprintf(buf, sizeof(buf), "vcpu%u", id);
- if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
- return;
vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
- if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
- return;
- vcpu->arch.debugfs_timings =
- debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
- vcpu, &debugfs_timings_ops);
+ debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu,
+ &debugfs_timings_ops);
}
#else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
@@ -3616,6 +3620,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
kvmppc_nested_cede(vcpu);
+ kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
} else {
@@ -4400,7 +4405,7 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot);
r = -ENOENT;
- if (!memslot->dirty_bitmap)
+ if (!memslot || !memslot->dirty_bitmap)
goto out;
/*
@@ -4447,29 +4452,26 @@ out:
return r;
}
-static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot)
{
- if (!dont || free->arch.rmap != dont->arch.rmap) {
- vfree(free->arch.rmap);
- free->arch.rmap = NULL;
- }
+ vfree(slot->arch.rmap);
+ slot->arch.rmap = NULL;
}
-static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
- unsigned long npages)
+static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ const struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
- slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap)));
- if (!slot->arch.rmap)
- return -ENOMEM;
+ unsigned long npages = mem->memory_size >> PAGE_SHIFT;
- return 0;
-}
+ if (change == KVM_MR_CREATE) {
+ slot->arch.rmap = vzalloc(array_size(npages,
+ sizeof(*slot->arch.rmap)));
+ if (!slot->arch.rmap)
+ return -ENOMEM;
+ }
-static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem)
-{
return 0;
}
@@ -4558,11 +4560,6 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
}
}
-static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
-{
- return;
-}
-
void kvmppc_setup_partition_table(struct kvm *kvm)
{
unsigned long dw0, dw1;
@@ -5427,6 +5424,21 @@ static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa)
}
/*
+ * Enable a guest to become a secure VM, or test whether
+ * that could be enabled.
+ * Called when the KVM_CAP_PPC_SECURE_GUEST capability is
+ * tested (kvm == NULL) or enabled (kvm != NULL).
+ */
+static int kvmhv_enable_svm(struct kvm *kvm)
+{
+ if (!kvmppc_uvmem_available())
+ return -EINVAL;
+ if (kvm)
+ kvm->arch.svm_enabled = 1;
+ return 0;
+}
+
+/*
* IOCTL handler to turn off secure mode of guest
*
* - Release all device pages
@@ -5526,9 +5538,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.age_hva = kvm_age_hva_hv,
.test_age_hva = kvm_test_age_hva_hv,
.set_spte_hva = kvm_set_spte_hva_hv,
- .mmu_destroy = kvmppc_mmu_destroy_hv,
.free_memslot = kvmppc_core_free_memslot_hv,
- .create_memslot = kvmppc_core_create_memslot_hv,
.init_vm = kvmppc_core_init_vm_hv,
.destroy_vm = kvmppc_core_destroy_vm_hv,
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
@@ -5548,6 +5558,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.enable_nested = kvmhv_enable_nested,
.load_from_eaddr = kvmhv_load_from_eaddr,
.store_to_eaddr = kvmhv_store_to_eaddr,
+ .enable_svm = kvmhv_enable_svm,
.svm_off = kvmhv_svm_off,
};
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index dbc2fecc37f0..780a499c7114 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1266,7 +1266,6 @@ kvmppc_interrupt_hv:
* R12 = (guest CR << 32) | interrupt vector
* R13 = PACA
* guest R12 saved in shadow VCPU SCRATCH0
- * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
* guest R13 saved in SPRN_SCRATCH0
*/
std r9, HSTATE_SCRATCH2(r13)
@@ -1367,12 +1366,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
11: stw r3,VCPU_HEIR(r9)
/* these are volatile across C function calls */
-#ifdef CONFIG_RELOCATABLE
- ld r3, HSTATE_SCRATCH1(r13)
- mtctr r3
-#else
mfctr r3
-#endif
mfxer r4
std r3, VCPU_CTR(r9)
std r4, VCPU_XER(r9)
@@ -3258,7 +3252,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
* r12 is (CR << 32) | vector
* r13 points to our PACA
* r12 is saved in HSTATE_SCRATCH0(r13)
- * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
* r9 is saved in HSTATE_SCRATCH2(r13)
* r13 is saved in HSPRG1
* cfar is saved in HSTATE_CFAR(r13)
@@ -3307,11 +3300,7 @@ kvmppc_bad_host_intr:
ld r5, HSTATE_CFAR(r13)
std r5, ORIG_GPR3(r1)
mflr r3
-#ifdef CONFIG_RELOCATABLE
- ld r4, HSTATE_SCRATCH1(r13)
-#else
mfctr r4
-#endif
mfxer r5
lbz r6, PACAIRQSOFTMASK(r13)
std r3, _LINK(r1)
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index 0db937497169..cc90b8b82329 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -3,6 +3,8 @@
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
@@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
u64 newmsr, bescr;
int ra, rs;
- switch (instr & 0xfc0007ff) {
+ /*
+ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
+ * in these instructions, so masking bit 31 out doesn't change these
+ * instructions. For treclaim., tsr., and trechkpt. instructions if bit
+ * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
+ * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
+ * 31 is an acceptable way to handle these invalid forms that have
+ * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
+ * bit 31 set) can generate a softpatch interrupt. Hence both forms
+ * are handled below for these instructions so they behave the same way.
+ */
+ switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1;
@@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr;
return RESUME_GUEST;
- case PPC_INST_TSR:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* check for PR=1 and arch 2.06 bit set in PCR */
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
/* generate an illegal instruction interrupt */
@@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = msr;
return RESUME_GUEST;
- case PPC_INST_TRECLAIM:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */
@@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST;
- case PPC_INST_TRECHKPT:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
/* XXX do we need to check for PR=0 here? */
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
@@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
}
/* What should we do here? We didn't recognize the instruction */
- WARN_ON_ONCE(1);
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
+
return RESUME_GUEST;
}
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
index 217246279dfa..fad931f224ef 100644
--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
@@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
u64 newmsr, msr, bescr;
int rs;
- switch (instr & 0xfc0007ff) {
+ /*
+ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
+ * in these instructions, so masking bit 31 out doesn't change these
+ * instructions. For the tsr. instruction if bit 31 = 0 then it is per
+ * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
+ * Forms, informs specifically that ignoring bit 31 is an acceptable way
+ * to handle TM-related invalid forms that have bit 31 = 0. Moreover,
+ * for emulation purposes both forms (w/ and wo/ bit 31 set) can
+ * generate a softpatch interrupt. Hence both forms are handled below
+ * for tsr. to make them behave the same way.
+ */
+ switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1;
@@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr;
return 1;
- case PPC_INST_TSR:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* we know the MSR has the TS field = S (0b01) here */
msr = vcpu->arch.shregs.msr;
/* check for PR=1 and arch 2.06 bit set in PCR */
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 79b1202b1c62..76d05c71fb1f 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -113,6 +113,15 @@ struct kvmppc_uvmem_page_pvt {
bool skip_page_out;
};
+bool kvmppc_uvmem_available(void)
+{
+ /*
+ * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
+ * and our data structures have been initialized successfully.
+ */
+ return !!kvmppc_uvmem_bitmap;
+}
+
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
{
struct kvmppc_uvmem_slot *p;
@@ -209,6 +218,8 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
int ret = H_SUCCESS;
int srcu_idx;
+ kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
+
if (!kvmppc_uvmem_bitmap)
return H_UNSUPPORTED;
@@ -216,6 +227,10 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
if (!kvm_is_radix(kvm))
return H_UNSUPPORTED;
+ /* NAK the transition to secure if not enabled */
+ if (!kvm->arch.svm_enabled)
+ return H_AUTHORITY;
+
srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
@@ -233,7 +248,6 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
goto out;
}
}
- kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START;
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
@@ -563,6 +577,7 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
mig.end = end;
mig.src = &src_pfn;
mig.dst = &dst_pfn;
+ mig.src_owner = &kvmppc_uvmem_pgmap;
mutex_lock(&kvm->arch.uvmem_lock);
/* The requested page is already paged-out, nothing to do */
@@ -779,6 +794,8 @@ int kvmppc_uvmem_init(void)
kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
kvmppc_uvmem_pgmap.res = *res;
kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
+ /* just one global instance: */
+ kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
@@ -806,6 +823,9 @@ out:
void kvmppc_uvmem_free(void)
{
+ if (!kvmppc_uvmem_bitmap)
+ return;
+
memunmap_pages(&kvmppc_uvmem_pgmap);
release_mem_region(kvmppc_uvmem_pgmap.res.start,
resource_size(&kvmppc_uvmem_pgmap.res));
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index db3a87319642..a0f6813f4560 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -740,7 +740,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
(vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
pte.raddr &= ~SPLIT_HACK_MASK;
- /* fall through */
+ fallthrough;
case MSR_IR:
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -1795,7 +1795,7 @@ static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
- err = kvmppc_mmu_init(vcpu);
+ err = kvmppc_mmu_init_pr(vcpu);
if (err < 0)
goto free_shared_page;
@@ -1885,7 +1885,6 @@ out:
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
struct kvm_vcpu *vcpu;
ulong ga, ga_end;
@@ -1895,15 +1894,12 @@ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
if (r)
goto out;
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
-
ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
@@ -1928,7 +1924,8 @@ static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
return 0;
}
@@ -1942,19 +1939,11 @@ static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
return;
}
-static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot)
{
return;
}
-static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
-
#ifdef CONFIG_PPC64
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
struct kvm_ppc_smmu_info *info)
@@ -2098,9 +2087,7 @@ static struct kvmppc_ops kvm_ops_pr = {
.age_hva = kvm_age_hva_pr,
.test_age_hva = kvm_test_age_hva_pr,
.set_spte_hva = kvm_set_spte_hva_pr,
- .mmu_destroy = kvmppc_mmu_destroy_pr,
.free_memslot = kvmppc_core_free_memslot_pr,
- .create_memslot = kvmppc_core_create_memslot_pr,
.init_vm = kvmppc_core_init_vm_pr,
.destroy_vm = kvmppc_core_destroy_vm_pr,
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 0169bab544dd..1f492aa4c8d6 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -167,16 +167,9 @@ kvmppc_interrupt_pr:
* R12 = (guest CR << 32) | exit handler id
* R13 = PACA
* HSTATE.SCRATCH0 = guest R12
- * HSTATE.SCRATCH1 = guest CTR if RELOCATABLE
*/
#ifdef CONFIG_PPC64
/* Match 32-bit entry */
-#ifdef CONFIG_RELOCATABLE
- std r9, HSTATE_SCRATCH2(r13)
- ld r9, HSTATE_SCRATCH1(r13)
- mtctr r9
- ld r9, HSTATE_SCRATCH2(r13)
-#endif
rotldi r12, r12, 32 /* Flip R12 halves for stw */
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
srdi r12, r12, 32 /* shift trap into low half */
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 7b27604adadf..6c18ea88fd25 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -421,11 +421,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_DATA_STORAGE:
case BOOKE_IRQPRIO_ALIGNMENT:
update_dear = true;
- /* fall through */
+ fallthrough;
case BOOKE_IRQPRIO_INST_STORAGE:
case BOOKE_IRQPRIO_PROGRAM:
update_esr = true;
- /* fall through */
+ fallthrough;
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
@@ -459,7 +459,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_DECREMENTER:
case BOOKE_IRQPRIO_FIT:
keep_irq = true;
- /* fall through */
+ fallthrough;
case BOOKE_IRQPRIO_EXTERNAL:
case BOOKE_IRQPRIO_DBELL:
allowed = vcpu->arch.shared->msr & MSR_EE;
@@ -1766,25 +1766,24 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return r;
}
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- return -ENOTSUPP;
+
}
-void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
+ return -ENOTSUPP;
}
-int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- return 0;
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
{
return 0;
}
@@ -2074,11 +2073,6 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
kvmppc_clear_dbsr();
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
-}
-
int kvmppc_core_init_vm(struct kvm *kvm)
{
return kvm->arch.kvm_ops->init_vm(kvm);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 9d3169fbce55..65b4d337d337 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -94,7 +94,6 @@ enum int_class {
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
-extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
@@ -102,7 +101,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong *spr_val);
-extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index f2b4feaff6d2..7e8b69015d20 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -490,7 +490,6 @@ static struct kvmppc_ops kvm_ops_e500 = {
.vcpu_put = kvmppc_core_vcpu_put_e500,
.vcpu_create = kvmppc_core_vcpu_create_e500,
.vcpu_free = kvmppc_core_vcpu_free_e500,
- .mmu_destroy = kvmppc_mmu_destroy_e500,
.init_vm = kvmppc_core_init_vm_e500,
.destroy_vm = kvmppc_core_destroy_vm_e500,
.emulate_op = kvmppc_core_emulate_op_e500,
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 2d910b87e441..e131fbecdcc4 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -533,10 +533,6 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}
-void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
-{
-}
-
/*****************************************/
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 425d13806645..df9989cf7ba3 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -422,7 +422,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
break;
}
} else if (vma && hva >= vma->vm_start &&
- (vma->vm_flags & VM_HUGETLB)) {
+ is_vm_hugetlb_page(vma)) {
unsigned long psize = vma_kernel_pagesize(vma);
tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index e6b06cb2b92c..1c189b5aadcc 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -376,7 +376,6 @@ static struct kvmppc_ops kvm_ops_e500mc = {
.vcpu_put = kvmppc_core_vcpu_put_e500mc,
.vcpu_create = kvmppc_core_vcpu_create_e500mc,
.vcpu_free = kvmppc_core_vcpu_free_e500mc,
- .mmu_destroy = kvmppc_mmu_destroy_e500,
.init_vm = kvmppc_core_init_vm_e500mc,
.destroy_vm = kvmppc_core_destroy_vm_e500mc,
.emulate_op = kvmppc_core_emulate_op_e500,
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index fe312c160d97..23e9c2bd9f27 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -32,7 +32,6 @@
#include <linux/uaccess.h>
#include <asm/mpic.h>
#include <asm/kvm_para.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_ppc.h>
#include <kvm/iodev.h>
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 302e9dccdd6d..e15166b0a16d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -32,7 +32,6 @@
#include <asm/plpar_wrappers.h>
#endif
#include <asm/ultravisor.h>
-#include <asm/kvm_host.h>
#include "timing.h"
#include "irq.h"
@@ -416,12 +415,12 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-int kvm_arch_hardware_setup(void)
+int kvm_arch_hardware_setup(void *opaque)
{
return 0;
}
-int kvm_arch_check_processor_compat(void)
+int kvm_arch_check_processor_compat(void *opaque)
{
return kvmppc_core_check_processor_compat();
}
@@ -525,7 +524,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
- /* fall through */
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
@@ -671,6 +669,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
(hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
break;
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
+ case KVM_CAP_PPC_SECURE_GUEST:
+ r = hv_enabled && kvmppc_hv_ops->enable_svm &&
+ !kvmppc_hv_ops->enable_svm(NULL);
+ break;
+#endif
default:
r = 0;
break;
@@ -685,16 +689,9 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
-{
- kvmppc_core_free_memslot(kvm, free, dont);
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- return kvmppc_core_create_memslot(kvm, slot, npages);
+ kvmppc_core_free_memslot(kvm, slot);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -702,12 +699,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
- return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
+ return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
@@ -2176,6 +2173,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = kvm->arch.kvm_ops->enable_nested(kvm);
break;
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
+ case KVM_CAP_PPC_SECURE_GUEST:
+ r = -EINVAL;
+ if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
+ break;
+ r = kvm->arch.kvm_ops->enable_svm(kvm);
+ break;
+#endif
default:
r = -EINVAL;
break;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index bfe4f106cffc..ba56a5cbba97 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -211,23 +211,14 @@ void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing",
current->pid, id);
- debugfs_file = debugfs_create_file(dbg_fname, 0666,
- kvm_debugfs_dir, vcpu,
- &kvmppc_exit_timing_fops);
-
- if (!debugfs_file) {
- printk(KERN_ERR"%s: error creating debugfs file %s\n",
- __func__, dbg_fname);
- return;
- }
+ debugfs_file = debugfs_create_file(dbg_fname, 0666, kvm_debugfs_dir,
+ vcpu, &kvmppc_exit_timing_fops);
vcpu->arch.debugfs_exit_timing = debugfs_file;
}
void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.debugfs_exit_timing) {
- debugfs_remove(vcpu->arch.debugfs_exit_timing);
- vcpu->arch.debugfs_exit_timing = NULL;
- }
+ debugfs_remove(vcpu->arch.debugfs_exit_timing);
+ vcpu->arch.debugfs_exit_timing = NULL;
}
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index ace65f9fed30..feef7885ba82 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -10,7 +10,6 @@
#define __POWERPC_KVM_EXITTIMING_H__
#include <linux/kvm_host.h>
-#include <asm/kvm_host.h>
#ifdef CONFIG_KVM_EXIT_TIMING
void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index c077acb983a1..5f3a7bd9d90d 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -3179,8 +3179,9 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
* entry code works. If that is changed, this will
* need to be changed also.
*/
- if (regs->gpr[0] == 0x1ebe &&
- cpu_has_feature(CPU_FTR_REAL_LE)) {
+ if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
+ cpu_has_feature(CPU_FTR_REAL_LE) &&
+ regs->gpr[0] == 0x1ebe) {
regs->msr ^= MSR_LE;
goto instr_done;
}
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 42347067739c..53df4146dd32 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -13,19 +13,20 @@
#include <asm/code-patching.h>
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
+#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
/*
* Defined with TEST_ prefix so it does not conflict with other
* definitions.
*/
#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \
- ___PPC_RA(base) | IMM_L(i))
+ ___PPC_RA(base) | IMM_DS(i))
#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b))
#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
+ ___PPC_RA(base) | IMM_DS(i))
#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b) | \
__PPC_EH(eh))
@@ -160,7 +161,7 @@ static void __init test_std(void)
/* std r5, 0(r3) */
stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
- if (stepped == 1 || regs.gpr[5] == a)
+ if (stepped == 1 && regs.gpr[5] == a)
show_result("std", "PASS");
else
show_result("std", "FAIL");
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 2015c4f96238..6d236080cb1a 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -35,7 +35,7 @@ mmu_hash_lock:
/*
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
- * _PAGE_RW (0x400) if a write.
+ * _PAGE_RW (0x002) if a write.
* r9 contains the SRR1 value, from which we use the MSR_PR bit.
* SPRG_THREAD contains the physical address of the current task's thread.
*
@@ -69,7 +69,7 @@ _GLOBAL(hash_page)
blt+ 112f /* assume user more likely */
lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
- rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
+ rlwimi r3,r9,32-14,31,31 /* MSR_PR -> _PAGE_USER */
112:
#ifndef CONFIG_PTE_64BIT
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
@@ -94,7 +94,7 @@ _GLOBAL(hash_page)
#else
rlwimi r8,r4,23,20,28 /* compute pte address */
#endif
- rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
+ rlwinm r0,r3,6,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
/*
@@ -310,11 +310,9 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
_GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
- rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
- and r8,r8,r0 /* writable if _RW & _DIRTY */
- rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
- rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
+ and r8,r5,r0 /* writable if _RW & _DIRTY */
+ rlwimi r5,r5,1,30,30 /* _PAGE_USER -> PP msb */
ori r8,r8,0xe04 /* clear out reserved bits */
andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
BEGIN_FTR_SECTION
@@ -566,7 +564,7 @@ _GLOBAL(flush_hash_pages)
33: lwarx r8,0,r5 /* fetch the pte flags word */
andi. r0,r8,_PAGE_HASHPTE
beq 8f /* done if HASHPTE is already clear */
- rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
+ rlwinm r8,r8,0,~_PAGE_HASHPTE /* clear HASHPTE bit */
stwcx. r8,0,r5 /* update the pte */
bne- 33b
@@ -690,18 +688,21 @@ _GLOBAL(_tlbia)
bne- 10b
stwcx. r8,0,r9
bne- 10b
+#endif /* CONFIG_SMP */
+ li r5, 32
+ lis r4, KERNELBASE@h
+ mtctr r5
sync
- tlbia
+0: tlbie r4
+ addi r4, r4, 0x1000
+ bdnz 0b
sync
+#ifdef CONFIG_SMP
TLBSYNC
li r0,0
stw r0,0(r9) /* clear mmu_hash_lock */
mtmsr r10
SYNC_601
isync
-#else /* CONFIG_SMP */
- sync
- tlbia
- sync
#endif /* CONFIG_SMP */
blr
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index f888cbb109b9..39ba53ca5bb5 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -312,7 +312,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea)
if (!Hash)
return;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
+ pmd = pmd_ptr(mm, ea);
if (!pmd_none(*pmd))
add_hash_page(mm->context.id, ea, pmd_val(*pmd));
}
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
index 2fcd321040ff..dc9039a170aa 100644
--- a/arch/powerpc/mm/book3s32/tlb.c
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -79,15 +79,18 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
int count;
unsigned int ctx = mm->context.id;
+ start &= PAGE_MASK;
if (!Hash) {
- _tlbia();
+ if (end - start <= PAGE_SIZE)
+ _tlbie(start);
+ else
+ _tlbia();
return;
}
- start &= PAGE_MASK;
if (start >= end)
return;
end = (end - 1) | ~PAGE_MASK;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
+ pmd = pmd_ptr(mm, start);
for (;;) {
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (pmd_end > end)
@@ -145,7 +148,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
return;
}
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
+ pmd = pmd_ptr(mm, vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 523d4d39d11e..7e5714a69a58 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -2018,11 +2018,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n")
static int __init hash64_debugfs(void)
{
- if (!debugfs_create_file_unsafe("hpt_order", 0600, powerpc_debugfs_root,
- NULL, &fops_hpt_order)) {
- pr_err("lpar: unable to create hpt_order debugsfs file\n");
- }
-
+ debugfs_create_file("hpt_order", 0600, powerpc_debugfs_root, NULL,
+ &fops_hpt_order);
return 0;
}
machine_device_initcall(pseries, hash64_debugfs);
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index eba73ebd8ae5..fa05bbd1f682 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -121,24 +121,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
goto free_exit;
}
- pageshift = PAGE_SHIFT;
- for (i = 0; i < entries; ++i) {
- struct page *page = mem->hpages[i];
-
- /*
- * Allow to use larger than 64k IOMMU pages. Only do that
- * if we are backed by hugetlb.
- */
- if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
- pageshift = page_shift(compound_head(page));
- mem->pageshift = min(mem->pageshift, pageshift);
- /*
- * We don't need struct page reference any more, switch
- * to physical address.
- */
- mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
- }
-
good_exit:
atomic64_set(&mem->mapped, 1);
mem->used = 1;
@@ -158,6 +140,27 @@ good_exit:
}
}
+ if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
+ /*
+ * Allow to use larger than 64k IOMMU pages. Only do that
+ * if we are backed by hugetlb. Skip device memory as it is not
+ * backed with page structs.
+ */
+ pageshift = PAGE_SHIFT;
+ for (i = 0; i < entries; ++i) {
+ struct page *page = mem->hpages[i];
+
+ if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
+ pageshift = page_shift(compound_head(page));
+ mem->pageshift = min(mem->pageshift, pageshift);
+ /*
+ * We don't need struct page reference any more, switch
+ * to physical address.
+ */
+ mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
+ }
+ }
+
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
mutex_unlock(&mem_list_mutex);
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index 59e0ebbd8036..07527f1ed108 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -381,18 +381,6 @@ bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
* So do not enforce things if the VMA is not from the current mm, or if we are
* in a kernel thread.
*/
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
-{
- if (!current->mm)
- return true;
-
- /* if it is not our ->mm, it has to be foreign */
- if (current->mm != vma->vm_mm)
- return true;
-
- return false;
-}
-
bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
bool execute, bool foreign)
{
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index dd1bea45325c..2a9a0cd79490 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -26,6 +26,7 @@
#include <asm/firmware.h>
#include <asm/powernv.h>
#include <asm/sections.h>
+#include <asm/smp.h>
#include <asm/trace.h>
#include <asm/uaccess.h>
#include <asm/ultravisor.h>
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 03f43c924e00..758ade2c2b6e 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -587,6 +587,11 @@ void radix__local_flush_all_mm(struct mm_struct *mm)
preempt_enable();
}
EXPORT_SYMBOL(radix__local_flush_all_mm);
+
+static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
+{
+ radix__local_flush_all_mm(mm);
+}
#endif /* CONFIG_SMP */
void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
@@ -777,7 +782,7 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
EXPORT_SYMBOL(radix__flush_tlb_page);
#else /* CONFIG_SMP */
-#define radix__flush_all_mm radix__local_flush_all_mm
+static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { }
#endif /* CONFIG_SMP */
static void do_tlbiel_kernel(void *info)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 8db0507619e2..84af6c8eecf7 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -314,7 +314,7 @@ static bool access_error(bool is_write, bool is_exec,
return false;
}
- if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+ if (unlikely(!vma_is_accessible(vma)))
return true;
/*
* We should ideally do the vma pkey access check here. But in the
@@ -434,7 +434,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
int is_exec = TRAP(regs) == 0x400;
int is_user = user_mode(regs);
int is_write = page_fault_is_write(error_code);
@@ -582,28 +582,18 @@ good_area:
major |= fault & VM_FAULT_MAJOR;
+ if (fault_signal_pending(fault, regs))
+ return user_mode(regs) ? 0 : SIGBUS;
+
/*
* Handle the retry right now, the mmap_sem has been released in that
* case.
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
- /* We retry only once */
if (flags & FAULT_FLAG_ALLOW_RETRY) {
- /*
- * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation.
- */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
- if (!fatal_signal_pending(current))
- goto retry;
+ goto retry;
}
-
- /*
- * User mode? Just return to handle the fatal exception otherwise
- * return to bad_page_fault
- */
- return is_user ? 0 : SIGBUS;
}
up_read(&current->mm->mmap_sem);
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index d2bed3fcb719..cbcad369fcb2 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -36,7 +36,7 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
unsigned long k_cur, k_next;
pte_t *new = NULL;
- pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
+ pmd = pmd_ptr_k(k_start);
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
k_next = pgd_addr_end(k_cur, k_end);
@@ -78,7 +78,7 @@ static int __init kasan_init_region(void *start, size_t size)
block = memblock_alloc(k_end - k_start, PAGE_SIZE);
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ pmd_t *pmd = pmd_ptr_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
@@ -101,8 +101,8 @@ static void __init kasan_remap_early_shadow_ro(void)
kasan_populate_pte(kasan_early_shadow_pte, prot);
- for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_ptr_k(k_cur);
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
@@ -196,7 +196,7 @@ void __init kasan_early_init(void)
unsigned long addr = KASAN_SHADOW_START;
unsigned long end = KASAN_SHADOW_END;
unsigned long next;
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
+ pmd_t *pmd = pmd_ptr_k(addr);
BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1c07d5a3f543..9b4f5fb719e0 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -66,12 +66,6 @@ pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);
-
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
-{
- return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
- vaddr), vaddr), vaddr);
-}
#endif
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index f348104eb461..82862723ab42 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -104,7 +104,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE;
- pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
+ pmdp = pmd_ptr_k(v);
*pmdp++ = __pmd(val);
*pmdp++ = __pmd(val);
*pmdp++ = __pmd(val);
@@ -119,7 +119,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE;
- pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
+ pmdp = pmd_ptr_k(v);
*pmdp = __pmd(val);
v += LARGE_PAGE_SIZE_4M;
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index 2ca407cedbe7..eaeee402f96e 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -397,7 +397,7 @@ _GLOBAL(set_context)
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
- * Must preserve r7, r8, r9, and r10
+ * Must preserve r7, r8, r9, r10 and r11
*/
_GLOBAL(loadcam_entry)
mflr r5
@@ -433,6 +433,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
*/
_GLOBAL(loadcam_multi)
mflr r8
+ /* Don't switch to AS=1 if already there */
+ mfmsr r11
+ andi. r11,r11,MSR_IS
+ bne 10f
/*
* Set up temporary TLB entry that is the same as what we're
@@ -458,6 +462,7 @@ _GLOBAL(loadcam_multi)
mtmsr r6
isync
+10:
mr r9,r3
add r10,r3,r4
2: bl loadcam_entry
@@ -466,6 +471,10 @@ _GLOBAL(loadcam_multi)
mr r3,r9
blt 2b
+ /* Don't return to AS=0 if we were in AS=1 at function start */
+ andi. r11,r11,MSR_IS
+ bne 3f
+
/* Return to AS=0 and clear the temporary entry */
mfmsr r6
rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
@@ -481,6 +490,7 @@ _GLOBAL(loadcam_multi)
tlbwe
isync
+3:
mtlr r8
blr
#endif
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3c7dec70cda0..9fcf2d195830 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -461,25 +461,69 @@ static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
return nid;
}
+#ifdef CONFIG_PPC_SPLPAR
+static int vphn_get_nid(long lcpu)
+{
+ __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
+ long rc, hwid;
+
+ /*
+ * On a shared lpar, device tree will not have node associativity.
+ * At this time lppaca, or its __old_status field may not be
+ * updated. Hence kernel cannot detect if its on a shared lpar. So
+ * request an explicit associativity irrespective of whether the
+ * lpar is shared or dedicated. Use the device tree property as a
+ * fallback. cpu_to_phys_id is only valid between
+ * smp_setup_cpu_maps() and smp_setup_pacas().
+ */
+ if (firmware_has_feature(FW_FEATURE_VPHN)) {
+ if (cpu_to_phys_id)
+ hwid = cpu_to_phys_id[lcpu];
+ else
+ hwid = get_hard_smp_processor_id(lcpu);
+
+ rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
+ if (rc == H_SUCCESS)
+ return associativity_to_nid(associativity);
+ }
+
+ return NUMA_NO_NODE;
+}
+#else
+static int vphn_get_nid(long unused)
+{
+ return NUMA_NO_NODE;
+}
+#endif /* CONFIG_PPC_SPLPAR */
+
/*
* Figure out to which domain a cpu belongs and stick it there.
* Return the id of the domain used.
*/
static int numa_setup_cpu(unsigned long lcpu)
{
- int nid = NUMA_NO_NODE;
struct device_node *cpu;
+ int fcpu = cpu_first_thread_sibling(lcpu);
+ int nid = NUMA_NO_NODE;
/*
* If a valid cpu-to-node mapping is already available, use it
* directly instead of querying the firmware, since it represents
* the most recent mapping notified to us by the platform (eg: VPHN).
+ * Since cpu_to_node binding remains the same for all threads in the
+ * core. If a valid cpu-to-node mapping is already available, for
+ * the first thread in the core, use it.
*/
- if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
+ nid = numa_cpu_lookup_table[fcpu];
+ if (nid >= 0) {
map_cpu_to_node(lcpu, nid);
return nid;
}
+ nid = vphn_get_nid(lcpu);
+ if (nid != NUMA_NO_NODE)
+ goto out_present;
+
cpu = of_get_cpu_node(lcpu, NULL);
if (!cpu) {
@@ -491,13 +535,26 @@ static int numa_setup_cpu(unsigned long lcpu)
}
nid = of_node_to_nid_single(cpu);
+ of_node_put(cpu);
out_present:
if (nid < 0 || !node_possible(nid))
nid = first_online_node;
+ /*
+ * Update for the first thread of the core. All threads of a core
+ * have to be part of the same node. This not only avoids querying
+ * for every other thread in the core, but always avoids a case
+ * where virtual node associativity change causes subsequent threads
+ * of a core to be associated with different nid. However if first
+ * thread is already online, expect it to have a valid mapping.
+ */
+ if (fcpu != lcpu) {
+ WARN_ON(cpu_online(fcpu));
+ map_cpu_to_node(fcpu, nid);
+ }
+
map_cpu_to_node(lcpu, nid);
- of_node_put(cpu);
out:
return nid;
}
@@ -1191,23 +1248,30 @@ static long vphn_get_associativity(unsigned long cpu,
VPHN_FLAG_VCPU, associativity);
switch (rc) {
+ case H_SUCCESS:
+ dbg("VPHN hcall succeeded. Reset polling...\n");
+ timed_topology_update(0);
+ goto out;
+
case H_FUNCTION:
- printk_once(KERN_INFO
- "VPHN is not supported. Disabling polling...\n");
- stop_topology_update();
+ pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
break;
case H_HARDWARE:
- printk(KERN_ERR
- "hcall_vphn() experienced a hardware fault "
+ pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
"preventing VPHN. Disabling polling...\n");
- stop_topology_update();
break;
- case H_SUCCESS:
- dbg("VPHN hcall succeeded. Reset polling...\n");
- timed_topology_update(0);
+ case H_PARAMETER:
+ pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
+ "Disabling polling...\n");
+ break;
+ default:
+ pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
+ , rc);
break;
}
+ stop_topology_update();
+out:
return rc;
}
@@ -1568,15 +1632,6 @@ int prrn_is_enabled(void)
return prrn_enabled;
}
-void __init shared_proc_topology_init(void)
-{
- if (lppaca_shared_proc(get_lppaca())) {
- bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
- nr_cpumask_bits);
- numa_update_cpu_topology(false);
- }
-}
-
static int topology_read(struct seq_file *file, void *v)
{
if (vphn_enabled || prrn_enabled)
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5fb90edd865e..f62de06e3d07 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -63,7 +63,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
- pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
+ pd = pmd_ptr_k(va);
/* Use middle 10 bits of VA to index the second-level map */
if (likely(slab_is_available()))
pg = pte_alloc_kernel(pd, va);
@@ -121,44 +121,9 @@ void __init mapin_ram(void)
}
}
-/* Scan the real Linux page tables and return a PTE pointer for
- * a virtual address in a context.
- * Returns true (1) if PTE was found, zero otherwise. The pointer to
- * the PTE pointer is unmodified if PTE is not found.
- */
-static int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int retval = 0;
-
- pgd = pgd_offset(mm, addr & PAGE_MASK);
- if (pgd) {
- pud = pud_offset(pgd, addr & PAGE_MASK);
- if (pud && pud_present(*pud)) {
- pmd = pmd_offset(pud, addr & PAGE_MASK);
- if (pmd_present(*pmd)) {
- pte = pte_offset_map(pmd, addr & PAGE_MASK);
- if (pte) {
- retval = 1;
- *ptep = pte;
- if (pmdp)
- *pmdp = pmd;
- /* XXX caller needs to do pte_unmap, yuck */
- }
- }
- }
- }
- return(retval);
-}
-
static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
{
pte_t *kpte;
- pmd_t *kpmd;
unsigned long address;
BUG_ON(PageHighMem(page));
@@ -166,10 +131,10 @@ static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
if (v_block_mapped(address))
return 0;
- if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
+ kpte = virt_to_kpte(address);
+ if (!kpte)
return -EINVAL;
__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
- pte_unmap(kpte);
return 0;
}
diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c
index 4154feac1da3..d3a5d6b318d1 100644
--- a/arch/powerpc/mm/ptdump/bats.c
+++ b/arch/powerpc/mm/ptdump/bats.c
@@ -164,10 +164,8 @@ static const struct file_operations bats_fops = {
static int __init bats_init(void)
{
- struct dentry *debugfs_file;
-
- debugfs_file = debugfs_create_file("block_address_translation", 0400,
- powerpc_debugfs_root, NULL, &bats_fops);
- return debugfs_file ? 0 : -ENOMEM;
+ debugfs_create_file("block_address_translation", 0400,
+ powerpc_debugfs_root, NULL, &bats_fops);
+ return 0;
}
device_initcall(bats_init);
diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
index a07278027c6f..b6ed9578382f 100644
--- a/arch/powerpc/mm/ptdump/hashpagetable.c
+++ b/arch/powerpc/mm/ptdump/hashpagetable.c
@@ -527,13 +527,10 @@ static const struct file_operations ptdump_fops = {
static int ptdump_init(void)
{
- struct dentry *debugfs_file;
-
if (!radix_enabled()) {
populate_markers();
- debugfs_file = debugfs_create_file("kernel_hash_pagetable",
- 0400, NULL, NULL, &ptdump_fops);
- return debugfs_file ? 0 : -ENOMEM;
+ debugfs_create_file("kernel_hash_pagetable", 0400, NULL, NULL,
+ &ptdump_fops);
}
return 0;
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 206156255247..d92bb8ea229c 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -417,12 +417,10 @@ void ptdump_check_wx(void)
static int ptdump_init(void)
{
- struct dentry *debugfs_file;
-
populate_markers();
build_pgtable_complete_mask();
- debugfs_file = debugfs_create_file("kernel_page_tables", 0400, NULL,
- NULL, &ptdump_fops);
- return debugfs_file ? 0 : -ENOMEM;
+ debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
+ &ptdump_fops);
+ return 0;
}
device_initcall(ptdump_init);
diff --git a/arch/powerpc/mm/ptdump/segment_regs.c b/arch/powerpc/mm/ptdump/segment_regs.c
index 501843664bb9..dde2fe8de4b2 100644
--- a/arch/powerpc/mm/ptdump/segment_regs.c
+++ b/arch/powerpc/mm/ptdump/segment_regs.c
@@ -55,10 +55,8 @@ static const struct file_operations sr_fops = {
static int __init sr_init(void)
{
- struct dentry *debugfs_file;
-
- debugfs_file = debugfs_create_file("segment_registers", 0400,
- powerpc_debugfs_root, NULL, &sr_fops);
- return debugfs_file ? 0 : -ENOMEM;
+ debugfs_create_file("segment_registers", 0400, powerpc_debugfs_root,
+ NULL, &sr_fops);
+ return 0;
}
device_initcall(sr_init);
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 6620b64e4963..665f18e37efb 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -43,9 +43,6 @@ static int __init warp_probe(void)
if (!of_machine_is_compatible("pika,warp"))
return 0;
- /* For arch_dma_alloc */
- ISA_DMA_THRESHOLD = ~0L;
-
return 1;
}
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 61538869e88a..4514a6f7458a 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -205,7 +205,6 @@ static int __init efika_probe(void)
if (strcmp(model, "EFIKA5K2"))
return 0;
- ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index ada42f03915a..bcdc2c203ec9 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -53,17 +53,19 @@ static void quirk_mpc8360e_qe_enet10(void)
np_par = of_find_node_by_name(NULL, "par_io");
if (np_par == NULL) {
- pr_warn("%s couldn;t find par_io node\n", __func__);
+ pr_warn("%s couldn't find par_io node\n", __func__);
return;
}
/* Map Parallel I/O ports registers */
ret = of_address_to_resource(np_par, 0, &res);
if (ret) {
- pr_warn("%s couldn;t map par_io registers\n", __func__);
- return;
+ pr_warn("%s couldn't map par_io registers\n", __func__);
+ goto out;
}
base = ioremap(res.start, resource_size(&res));
+ if (!base)
+ goto out;
/*
* set output delay adjustments to default values according
@@ -111,6 +113,7 @@ static void quirk_mpc8360e_qe_enet10(void)
setbits32((base + 0xac), 0x0000c000);
}
iounmap(base);
+out:
of_node_put(np_par);
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 6b1436abe9b1..915ab6710b93 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -218,12 +218,6 @@ static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
-
-static struct irqaction mpc85xxcds_8259_irqaction = {
- .handler = mpc85xx_8259_cascade_action,
- .flags = IRQF_SHARED | IRQF_NO_THREAD,
- .name = "8259 cascade",
-};
#endif /* PPC_I8259 */
#endif /* CONFIG_PCI */
@@ -271,7 +265,10 @@ static int mpc85xx_cds_8259_attach(void)
* disabled when the last user of the shared IRQ line frees their
* interrupt.
*/
- if ((ret = setup_irq(cascade_irq, &mpc85xxcds_8259_irqaction))) {
+ ret = request_irq(cascade_irq, mpc85xx_8259_cascade_action,
+ IRQF_SHARED | IRQF_NO_THREAD, "8259 cascade",
+ cascade_node);
+ if (ret) {
printk(KERN_ERR "Failed to setup cascade interrupt\n");
return ret;
}
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index a43ee7d1ff85..4db4ca2e1222 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -120,12 +120,6 @@ static irqreturn_t cpm_error_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
-static struct irqaction cpm_error_irqaction = {
- .handler = cpm_error_interrupt,
- .flags = IRQF_NO_THREAD,
- .name = "error",
-};
-
static const struct irq_domain_ops cpm_pic_host_ops = {
.map = cpm_pic_host_map,
};
@@ -187,7 +181,8 @@ unsigned int __init cpm_pic_init(void)
if (!eirq)
goto end;
- if (setup_irq(eirq, &cpm_error_irqaction))
+ if (request_irq(eirq, cpm_error_interrupt, IRQF_NO_THREAD, "error",
+ NULL))
printk(KERN_ERR "Could not allocate CPM error IRQ!");
setbits32(&cpic_reg->cpic_cicr, CICR_IEN);
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index f1c805c8adbc..df4d57d07f9a 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -39,12 +39,6 @@ static irqreturn_t timebase_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
-static struct irqaction tbint_irqaction = {
- .handler = timebase_interrupt,
- .flags = IRQF_NO_THREAD,
- .name = "tbint",
-};
-
/* per-board overridable init_internal_rtc() function. */
void __init __attribute__ ((weak))
init_internal_rtc(void)
@@ -157,7 +151,8 @@ void __init mpc8xx_calibrate_decr(void)
(TBSCR_TBF | TBSCR_TBE));
immr_unmap(sys_tmr2);
- if (setup_irq(virq, &tbint_irqaction))
+ if (request_irq(virq, timebase_interrupt, IRQF_NO_THREAD, "tbint",
+ NULL))
panic("Could not allocate timer IRQ!");
}
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 6caedc88474f..0c3c1902135c 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -397,7 +397,7 @@ config PPC_KUAP
config PPC_KUAP_DEBUG
bool "Extra debugging for Kernel Userspace Access Protection"
- depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32)
+ depends on PPC_KUAP && (PPC_RADIX_MMU || PPC_32)
help
Add extra debugging for Kernel Userspace Access Protection (KUAP)
If you're unsure, say N.
@@ -425,6 +425,12 @@ config PPC_MM_SLICES
config PPC_HAVE_PMU_SUPPORT
bool
+config PMU_SYSFS
+ bool "Create PMU SPRs sysfs file"
+ default n
+ help
+ This option enables sysfs file creation for PMU SPRs like MMCR* and PMC*.
+
config PPC_PERF_CTRS
def_bool y
depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index ea5e45e32683..f5d0bf999759 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -146,7 +146,6 @@ static int __init amigaone_probe(void)
*/
cur_cpu_spec->cpu_features &= ~CPU_FTR_NEED_COHERENT;
- ISA_DMA_THRESHOLD = 0x00ffffff;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 57c4e0e86c88..ca2555b8a0c2 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -480,10 +480,6 @@ void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
- if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
- msic, &fops_msic)) {
- pr_devel("axon_msi: debugfs_create_file failed!\n");
- return;
- }
+ debugfs_create_file(name, 0600, powerpc_debugfs_root, msic, &fops_msic);
}
#endif /* DEBUG */
diff --git a/arch/powerpc/platforms/cell/spufs/.gitignore b/arch/powerpc/platforms/cell/spufs/.gitignore
index a09ee8d84d6c..5f3eb224f653 100644
--- a/arch/powerpc/platforms/cell/spufs/.gitignore
+++ b/arch/powerpc/platforms/cell/spufs/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
spu_save_dump.h
spu_restore_dump.h
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 5c3f5d088c3b..d56b4e3241cd 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -177,7 +177,7 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
- /* fall through */
+ fallthrough;
case MFC_CNTL_SUSPEND_COMPLETE:
if (csa)
csa->priv2.mfc_control_RW =
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index fcf6f2342ef4..65a7e01a8f7d 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -451,13 +451,6 @@ static void __init chrp_find_openpic(void)
of_node_put(np);
}
-#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_XMON)
-static struct irqaction xmon_irqaction = {
- .handler = xmon_irq,
- .name = "XMON break",
-};
-#endif
-
static void __init chrp_find_8259(void)
{
struct device_node *np, *pic = NULL;
@@ -541,8 +534,11 @@ static void __init chrp_init_IRQ(void)
if (of_node_is_type(kbd->parent, "adb"))
break;
of_node_put(kbd);
- if (kbd)
- setup_irq(HYDRA_INT_ADB_NMI, &xmon_irqaction);
+ if (kbd) {
+ if (request_irq(HYDRA_INT_ADB_NMI, xmon_irq, 0, "XMON break",
+ NULL))
+ pr_err("Failed to register XMON break interrupt\n");
+ }
#endif
}
@@ -573,7 +569,6 @@ static int __init chrp_probe(void)
if (strcmp(dtype, "chrp"))
return 0;
- ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 6f019df37916..15b2c6eb506d 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -291,23 +291,6 @@ static int __init maple_probe(void)
return 1;
}
-define_machine(maple) {
- .name = "Maple",
- .probe = maple_probe,
- .setup_arch = maple_setup_arch,
- .init_IRQ = maple_init_IRQ,
- .pci_irq_fixup = maple_pci_irq_fixup,
- .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
- .restart = maple_restart,
- .halt = maple_halt,
- .get_boot_time = maple_get_boot_time,
- .set_rtc_time = maple_set_rtc_time,
- .get_rtc_time = maple_get_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = maple_progress,
- .power_save = power4_idle,
-};
-
#ifdef CONFIG_EDAC
/*
* Register a platform device for CPC925 memory controller on
@@ -364,3 +347,20 @@ static int __init maple_cpc925_edac_setup(void)
}
machine_device_initcall(maple, maple_cpc925_edac_setup);
#endif
+
+define_machine(maple) {
+ .name = "Maple",
+ .probe = maple_probe,
+ .setup_arch = maple_setup_arch,
+ .init_IRQ = maple_init_IRQ,
+ .pci_irq_fixup = maple_pci_irq_fixup,
+ .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
+ .restart = maple_restart,
+ .halt = maple_halt,
+ .get_boot_time = maple_get_boot_time,
+ .set_rtc_time = maple_set_rtc_time,
+ .get_rtc_time = maple_get_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = maple_progress,
+ .power_save = power4_idle,
+};
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 2e969073473d..4921bccf0376 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -250,20 +250,6 @@ static unsigned int pmac_pic_get_irq(void)
return irq_linear_revmap(pmac_pic_host, irq);
}
-#ifdef CONFIG_XMON
-static struct irqaction xmon_action = {
- .handler = xmon_irq,
- .flags = IRQF_NO_THREAD,
- .name = "NMI - XMON"
-};
-#endif
-
-static struct irqaction gatwick_cascade_action = {
- .handler = gatwick_action,
- .flags = IRQF_NO_THREAD,
- .name = "cascade",
-};
-
static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
@@ -384,12 +370,17 @@ static void __init pmac_pic_probe_oldstyle(void)
out_le32(&pmac_irq_hw[i]->enable, 0);
/* Hookup cascade irq */
- if (slave && pmac_irq_cascade)
- setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
+ if (slave && pmac_irq_cascade) {
+ if (request_irq(pmac_irq_cascade, gatwick_action,
+ IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to register cascade interrupt\n");
+ }
printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
#ifdef CONFIG_XMON
- setup_irq(irq_create_mapping(NULL, 20), &xmon_action);
+ i = irq_create_mapping(NULL, 20);
+ if (request_irq(i, xmon_irq, IRQF_NO_THREAD, "NMI - XMON", NULL))
+ pr_err("Failed to register NMI-XMON interrupt\n");
#endif
}
@@ -441,7 +432,9 @@ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
nmi_irq = irq_of_parse_and_map(pswitch, 0);
if (nmi_irq) {
mpic_irq_set_priority(nmi_irq, 9);
- setup_irq(nmi_irq, &xmon_action);
+ if (request_irq(nmi_irq, xmon_irq, IRQF_NO_THREAD,
+ "NMI - XMON", NULL))
+ pr_err("Failed to register NMI-XMON interrupt\n");
}
of_node_put(pswitch);
}
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index c6d5333729ed..95fb4feb6ccc 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -586,7 +586,6 @@ static int __init pmac_probe(void)
#ifdef CONFIG_PPC32
/* isa_io_base gets set in pmac_pci_init */
- ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 1;
DMA_MODE_WRITE = 2;
#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index f95fbdee6efe..be2ab5b11e57 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -399,21 +399,19 @@ static int __init smp_psurge_kick_cpu(int nr)
return 0;
}
-static struct irqaction psurge_irqaction = {
- .handler = psurge_ipi_intr,
- .flags = IRQF_PERCPU | IRQF_NO_THREAD,
- .name = "primary IPI",
-};
-
static void __init smp_psurge_setup_cpu(int cpu_nr)
{
+ unsigned long flags = IRQF_PERCPU | IRQF_NO_THREAD;
+ int irq;
+
if (cpu_nr != 0 || !psurge_start)
return;
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
- if (setup_irq(irq_create_mapping(NULL, 30), &psurge_irqaction))
+ irq = irq_create_mapping(NULL, 30);
+ if (request_irq(irq, psurge_ipi_intr, flags, "primary IPI", NULL))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
@@ -660,13 +658,13 @@ static void smp_core99_gpio_tb_freeze(int freeze)
#endif /* !CONFIG_PPC64 */
-/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
-volatile static long int core99_l2_cache;
-volatile static long int core99_l3_cache;
-
static void core99_init_caches(int cpu)
{
#ifndef CONFIG_PPC64
+ /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
+ static long int core99_l2_cache;
+ static long int core99_l3_cache;
+
if (!cpu_has_feature(CPU_FTR_L2CR))
return;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 6f300ab7f0e9..79409e005fcd 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -40,15 +40,8 @@ static int eeh_event_irq = -EINVAL;
void pnv_pcibios_bus_add_device(struct pci_dev *pdev)
{
- struct pci_dn *pdn = pci_get_pdn(pdev);
-
- if (!pdn || eeh_has_flag(EEH_FORCE_DISABLED))
- return;
-
dev_dbg(&pdev->dev, "EEH: Setting up device\n");
- eeh_add_device_early(pdn);
- eeh_add_device_late(pdev);
- eeh_sysfs_add_device(pdev);
+ eeh_probe_device(pdev);
}
static int pnv_eeh_init(void)
@@ -347,23 +340,13 @@ static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
/**
* pnv_eeh_probe - Do probe on PCI device
- * @pdn: PCI device node
- * @data: unused
+ * @pdev: pci_dev to probe
*
- * When EEH module is installed during system boot, all PCI devices
- * are checked one by one to see if it supports EEH. The function
- * is introduced for the purpose. By default, EEH has been enabled
- * on all PCI devices. That's to say, we only need do necessary
- * initialization on the corresponding eeh device and create PE
- * accordingly.
- *
- * It's notable that's unsafe to retrieve the EEH device through
- * the corresponding PCI device. During the PCI device hotplug, which
- * was possiblly triggered by EEH core, the binding between EEH device
- * and the PCI device isn't built yet.
+ * Create, or find the existing, eeh_dev for this pci_dev.
*/
-static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
+static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev)
{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
struct pci_controller *hose = pdn->phb;
struct pnv_phb *phb = hose->private_data;
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
@@ -380,6 +363,14 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
if (!edev || edev->pe)
return NULL;
+ /* already configured? */
+ if (edev->pdev) {
+ pr_debug("%s: found existing edev for %04x:%02x:%02x.%01x\n",
+ __func__, hose->global_number, config_addr >> 8,
+ PCI_SLOT(config_addr), PCI_FUNC(config_addr));
+ return edev;
+ }
+
/* Skip for PCI-ISA bridge */
if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
return NULL;
@@ -471,7 +462,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
eeh_edev_dbg(edev, "EEH enabled on device\n");
- return NULL;
+ return edev;
}
/**
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index eb2e75dac369..13b369d2cc45 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -187,11 +187,6 @@ static int memtrace_init_debugfs(void)
snprintf(ent->name, 16, "%08x", ent->nid);
dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
- if (!dir) {
- pr_err("Failed to create debugfs directory for node %d\n",
- ent->nid);
- return -1;
- }
ent->dir = dir;
debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
@@ -236,16 +231,10 @@ static int memtrace_online(void)
continue;
}
- /*
- * If kernel isn't compiled with the auto online option
- * we need to online the memory ourselves.
- */
- if (!memhp_auto_online) {
- lock_device_hotplug();
- walk_memory_blocks(ent->start, ent->size, NULL,
- online_mem_block);
- unlock_device_hotplug();
- }
+ lock_device_hotplug();
+ walk_memory_blocks(ent->start, ent->size, NULL,
+ online_mem_block);
+ unlock_device_hotplug();
/*
* Memory was added successfully so clean up references to it
@@ -314,8 +303,6 @@ static int memtrace_init(void)
{
memtrace_debugfs_dir = debugfs_create_dir("memtrace",
powerpc_debugfs_root);
- if (!memtrace_debugfs_dir)
- return -1;
debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
NULL, &memtrace_init_fops);
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index ed895d82c048..6dba3b62269f 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -71,6 +71,7 @@ static LIST_HEAD(opalcore_list);
static struct opalcore_config *oc_conf;
static const struct opal_mpipl_fadump *opalc_metadata;
static const struct opal_mpipl_fadump *opalc_cpu_metadata;
+struct kobject *mpipl_kobj;
/*
* Set crashing CPU's signal to SIGUSR1. if the kernel is triggered
@@ -428,7 +429,7 @@ static void opalcore_cleanup(void)
return;
/* Remove OPAL core sysfs file */
- sysfs_remove_bin_file(opal_kobj, &opal_core_attr);
+ sysfs_remove_bin_file(mpipl_kobj, &opal_core_attr);
oc_conf->ptload_phdr = NULL;
oc_conf->ptload_cnt = 0;
@@ -563,9 +564,9 @@ error_out:
of_node_put(np);
}
-static ssize_t fadump_release_opalcore_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t release_core_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int input = -1;
@@ -589,9 +590,23 @@ static ssize_t fadump_release_opalcore_store(struct kobject *kobj,
return count;
}
-static struct kobj_attribute opalcore_rel_attr = __ATTR(fadump_release_opalcore,
- 0200, NULL,
- fadump_release_opalcore_store);
+static struct kobj_attribute opalcore_rel_attr = __ATTR_WO(release_core);
+
+static struct attribute *mpipl_attr[] = {
+ &opalcore_rel_attr.attr,
+ NULL,
+};
+
+static struct bin_attribute *mpipl_bin_attr[] = {
+ &opal_core_attr,
+ NULL,
+
+};
+
+static struct attribute_group mpipl_group = {
+ .attrs = mpipl_attr,
+ .bin_attrs = mpipl_bin_attr,
+};
static int __init opalcore_init(void)
{
@@ -609,7 +624,7 @@ static int __init opalcore_init(void)
* then capture the dump.
*/
if (!(is_opalcore_usable())) {
- pr_err("Failed to export /sys/firmware/opal/core\n");
+ pr_err("Failed to export /sys/firmware/opal/mpipl/core\n");
opalcore_cleanup();
return rc;
}
@@ -617,18 +632,28 @@ static int __init opalcore_init(void)
/* Set OPAL core file size */
opal_core_attr.size = oc_conf->opalcore_size;
+ mpipl_kobj = kobject_create_and_add("mpipl", opal_kobj);
+ if (!mpipl_kobj) {
+ pr_err("unable to create mpipl kobject\n");
+ return -ENOMEM;
+ }
+
/* Export OPAL core sysfs file */
- rc = sysfs_create_bin_file(opal_kobj, &opal_core_attr);
- if (rc != 0) {
- pr_err("Failed to export /sys/firmware/opal/core\n");
+ rc = sysfs_create_group(mpipl_kobj, &mpipl_group);
+ if (rc) {
+ pr_err("mpipl sysfs group creation failed (%d)", rc);
opalcore_cleanup();
return rc;
}
-
- rc = sysfs_create_file(kernel_kobj, &opalcore_rel_attr.attr);
+ /* The /sys/firmware/opal/core is moved to /sys/firmware/opal/mpipl/
+ * directory, need to create symlink at old location to maintain
+ * backward compatibility.
+ */
+ rc = compat_only_sysfs_link_entry_to_kobj(opal_kobj, mpipl_kobj,
+ "core", NULL);
if (rc) {
- pr_warn("unable to create sysfs file fadump_release_opalcore (%d)\n",
- rc);
+ pr_err("unable to create core symlink (%d)\n", rc);
+ return rc;
}
return 0;
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 000b350d4060..968b9a4d1cd9 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -35,11 +35,10 @@ static int imc_mem_set(void *data, u64 val)
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n");
-static struct dentry *imc_debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value)
+static void imc_debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value)
{
- return debugfs_create_file_unsafe(name, mode, parent,
- value, &fops_imc_x64);
+ debugfs_create_file_unsafe(name, mode, parent, value, &fops_imc_x64);
}
/*
@@ -59,9 +58,6 @@ static void export_imc_mode_and_cmd(struct device_node *node,
imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
- if (!imc_debugfs_parent)
- return;
-
if (of_property_read_u32(node, "cb_offset", &cb_offset))
cb_offset = IMC_CNTL_BLK_OFFSET;
@@ -69,21 +65,15 @@ static void export_imc_mode_and_cmd(struct device_node *node,
loc = (u64)(ptr->vbase) + cb_offset;
imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
- if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
- imc_mode_addr))
- goto err;
+ imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
+ imc_mode_addr);
imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
- if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
- imc_cmd_addr))
- goto err;
+ imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
+ imc_cmd_addr);
ptr++;
}
- return;
-
-err:
- debugfs_remove_recursive(imc_debugfs_parent);
}
/*
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 22c22cd7bd82..57d3a6af1d52 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3174,11 +3174,6 @@ static void pnv_pci_ioda_create_dbgfs(void)
sprintf(name, "PCI%04x", hose->global_number);
phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
- if (!phb->dbgfs) {
- pr_warn("%s: Error on creating debugfs on PHB#%x\n",
- __func__, hose->global_number);
- continue;
- }
debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs,
phb, &pnv_pci_diag_data_fops);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 11fdae81b5dd..3bc188da82ba 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -229,7 +229,7 @@ static void __noreturn pnv_restart(char *cmd)
pnv_prepare_going_down();
do {
- if (!cmd)
+ if (!cmd || !strlen(cmd))
rc = opal_cec_reboot();
else if (strcmp(cmd, "full") == 0)
rc = opal_cec_reboot2(OPAL_REBOOT_FULL_IPL, NULL);
@@ -237,6 +237,8 @@ static void __noreturn pnv_restart(char *cmd)
rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, NULL);
else if (strcmp(cmd, "error") == 0)
rc = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, NULL);
+ else if (strcmp(cmd, "fast") == 0)
+ rc = opal_cec_reboot2(OPAL_REBOOT_FAST, NULL);
else
rc = OPAL_UNSUPPORTED;
diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
index 09e63df53c30..44035a3d6414 100644
--- a/arch/powerpc/platforms/powernv/vas-debug.c
+++ b/arch/powerpc/platforms/powernv/vas-debug.c
@@ -115,7 +115,7 @@ void vas_window_free_dbgdir(struct vas_window *window)
void vas_window_init_dbgdir(struct vas_window *window)
{
- struct dentry *f, *d;
+ struct dentry *d;
if (!window->vinst->dbgdir)
return;
@@ -127,28 +127,10 @@ void vas_window_init_dbgdir(struct vas_window *window)
snprintf(window->dbgname, 16, "w%d", window->winid);
d = debugfs_create_dir(window->dbgname, window->vinst->dbgdir);
- if (IS_ERR(d))
- goto free_name;
-
window->dbgdir = d;
- f = debugfs_create_file("info", 0444, d, window, &info_fops);
- if (IS_ERR(f))
- goto remove_dir;
-
- f = debugfs_create_file("hvwc", 0444, d, window, &hvwc_fops);
- if (IS_ERR(f))
- goto remove_dir;
-
- return;
-
-remove_dir:
- debugfs_remove_recursive(window->dbgdir);
- window->dbgdir = NULL;
-
-free_name:
- kfree(window->dbgname);
- window->dbgname = NULL;
+ debugfs_create_file("info", 0444, d, window, &info_fops);
+ debugfs_create_file("hvwc", 0444, d, window, &hvwc_fops);
}
void vas_instance_init_dbgdir(struct vas_instance *vinst)
@@ -156,8 +138,6 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst)
struct dentry *d;
vas_init_dbgdir();
- if (!vas_debugfs)
- return;
vinst->dbgname = kzalloc(16, GFP_KERNEL);
if (!vinst->dbgname)
@@ -166,16 +146,7 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst)
snprintf(vinst->dbgname, 16, "v%d", vinst->vas_id);
d = debugfs_create_dir(vinst->dbgname, vas_debugfs);
- if (IS_ERR(d))
- goto free_name;
-
vinst->dbgdir = d;
- return;
-
-free_name:
- kfree(vinst->dbgname);
- vinst->dbgname = NULL;
- vinst->dbgdir = NULL;
}
/*
@@ -191,6 +162,4 @@ void vas_init_dbgdir(void)
first_time = false;
vas_debugfs = debugfs_create_dir("vas", NULL);
- if (IS_ERR(vas_debugfs))
- vas_debugfs = NULL;
}
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index a3c74a5cf20d..c8a2b0b05ac0 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -29,6 +29,4 @@ obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_SVM) += svm.o
obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
-ifdef CONFIG_PPC_PSERIES
obj-$(CONFIG_SUSPEND) += suspend.o
-endif
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 893ba3f562c4..845342814edc 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -67,8 +67,7 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index];
}
#endif
- eeh_add_device_early(pdn);
- eeh_add_device_late(pdev);
+ pseries_eeh_init_edev(pdn);
#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn) {
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
@@ -78,7 +77,7 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
eeh_add_to_parent_pe(edev); /* Add as VF PE type */
}
#endif
- eeh_sysfs_add_device(pdev);
+ eeh_probe_device(pdev);
}
/*
@@ -222,15 +221,16 @@ static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
}
/**
- * pseries_eeh_probe - EEH probe on the given device
+ * pseries_eeh_init_edev - initialise the eeh_dev and eeh_pe for a pci_dn
+ *
* @pdn: PCI device node
- * @data: Unused
*
- * When EEH module is installed during system boot, all PCI devices
- * are checked one by one to see if it supports EEH. The function
- * is introduced for the purpose.
+ * When we discover a new PCI device via the device-tree we create a
+ * corresponding pci_dn and we allocate, but don't initialise, an eeh_dev.
+ * This function takes care of the initialisation and inserts the eeh_dev
+ * into the correct eeh_pe. If no eeh_pe exists we'll allocate one.
*/
-static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
+void pseries_eeh_init_edev(struct pci_dn *pdn)
{
struct eeh_dev *edev;
struct eeh_pe pe;
@@ -238,18 +238,35 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
int enable = 0;
int ret;
- /* Retrieve OF node and eeh device */
+ if (WARN_ON_ONCE(!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)))
+ return;
+
+ /*
+ * Find the eeh_dev for this pdn. The storage for the eeh_dev was
+ * allocated at the same time as the pci_dn.
+ *
+ * XXX: We should probably re-visit that.
+ */
edev = pdn_to_eeh_dev(pdn);
- if (!edev || edev->pe)
- return NULL;
+ if (!edev)
+ return;
+
+ /*
+ * If ->pe is set then we've already probed this device. We hit
+ * this path when a pci_dev is removed and rescanned while recovering
+ * a PE (i.e. for devices where the driver doesn't support error
+ * recovery).
+ */
+ if (edev->pe)
+ return;
/* Check class/vendor/device IDs */
if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
- return NULL;
+ return;
/* Skip for PCI-ISA bridge */
if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
- return NULL;
+ return;
eeh_edev_dbg(edev, "Probing device\n");
@@ -316,9 +333,49 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
/* Save memory bars */
eeh_save_bars(edev);
+}
+
+static struct eeh_dev *pseries_eeh_probe(struct pci_dev *pdev)
+{
+ struct eeh_dev *edev;
+ struct pci_dn *pdn;
+
+ pdn = pci_get_pdn_by_devfn(pdev->bus, pdev->devfn);
+ if (!pdn)
+ return NULL;
+
+ /*
+ * If the system supports EEH on this device then the eeh_dev was
+ * configured and inserted into a PE in pseries_eeh_init_edev()
+ */
+ edev = pdn_to_eeh_dev(pdn);
+ if (!edev || !edev->pe)
+ return NULL;
+
+ return edev;
+}
+
+/**
+ * pseries_eeh_init_edev_recursive - Enable EEH for the indicated device
+ * @pdn: PCI device node
+ *
+ * This routine must be used to perform EEH initialization for the
+ * indicated PCI device that was added after system boot (e.g.
+ * hotplug, dlpar).
+ */
+void pseries_eeh_init_edev_recursive(struct pci_dn *pdn)
+{
+ struct pci_dn *n;
+
+ if (!pdn)
+ return;
+
+ list_for_each_entry(n, &pdn->child_list, list)
+ pseries_eeh_init_edev_recursive(n);
- return NULL;
+ pseries_eeh_init_edev(pdn);
}
+EXPORT_SYMBOL_GPL(pseries_eeh_init_edev_recursive);
/**
* pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index a4d40a3ceea3..b2cde1732301 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -223,7 +223,7 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
struct drmem_lmb **end_lmb)
{
struct drmem_lmb *lmb, *start, *end;
- struct drmem_lmb *last_lmb;
+ struct drmem_lmb *limit;
start = NULL;
for_each_drmem_lmb(lmb) {
@@ -236,10 +236,10 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
if (!start)
return -EINVAL;
- end = &start[n_lmbs - 1];
+ end = &start[n_lmbs];
- last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
- if (end > last_lmb)
+ limit = &drmem_info->lmbs[drmem_info->n_lmbs];
+ if (end > limit)
return -EINVAL;
*start_lmb = start;
@@ -360,7 +360,7 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
for (i = 0; i < scns_per_block; i++) {
pfn = PFN_DOWN(phys_addr);
- if (!pfn_present(pfn)) {
+ if (!pfn_in_present_section(pfn)) {
phys_addr += MIN_MEMORY_BLOCK_SIZE;
continue;
}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 3c3da25b445c..e4ed5317f117 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -636,8 +636,16 @@ static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
static int __init vcpudispatch_stats_procfs_init(void)
{
- if (!lppaca_shared_proc(get_lppaca()))
+ /*
+ * Avoid smp_processor_id while preemptible. All CPUs should have
+ * the same value for lppaca_shared_proc.
+ */
+ preempt_disable();
+ if (!lppaca_shared_proc(get_lppaca())) {
+ preempt_enable();
return 0;
+ }
+ preempt_enable();
if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
&vcpudispatch_stats_proc_ops))
diff --git a/arch/powerpc/platforms/pseries/of_helpers.c b/arch/powerpc/platforms/pseries/of_helpers.c
index 66dfd8256712..23241c71ef37 100644
--- a/arch/powerpc/platforms/pseries/of_helpers.c
+++ b/arch/powerpc/platforms/pseries/of_helpers.c
@@ -88,7 +88,7 @@ int of_read_drc_info_cell(struct property **prop, const __be32 **curval,
return -EINVAL;
/* Should now know end of current entry */
- (*curval) = (void *)p2;
+ (*curval) = (void *)(++p2);
data->last_drc_index = data->drc_index_start +
((data->num_sequential_elems - 1) * data->sequential_inc);
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 0b4467e378e5..e4606100e286 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -246,8 +246,9 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
return 0;
}
-int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
- unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
+static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
{
struct nd_cmd_get_config_size *get_size_hdr;
struct papr_scm_priv *p;
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 361986e4354e..b3a38f5a6b68 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -37,7 +37,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
eeh_dev_phb_init_dynamic(phb);
if (dn->child)
- eeh_add_device_tree_early(PCI_DN(dn));
+ pseries_eeh_init_edev_recursive(PCI_DN(dn));
pcibios_scan_phb(phb);
pcibios_finish_adding_to_bus(phb->bus);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 1d7f973c647b..aa6208c8d4f0 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -558,6 +558,9 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
switch (mce_log->error_type) {
case MC_ERROR_TYPE_UE:
mce_err.error_type = MCE_ERROR_TYPE_UE;
+ mce_common_process_ue(regs, &mce_err);
+ if (mce_err.ignore_event)
+ disposition = RTAS_DISP_FULLY_RECOVERED;
switch (err_sub_type) {
case MC_ERROR_UE_IFETCH:
mce_err.u.ue_error_type = MCE_UE_ERROR_IFETCH;
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index f682b7babc09..37f1f25ba804 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1628,7 +1628,6 @@ const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
}
EXPORT_SYMBOL(vio_get_attribute);
-#ifdef CONFIG_PPC_PSERIES
/* vio_find_name() - internal because only vio.c knows how we formatted the
* kobject name
*/
@@ -1698,7 +1697,6 @@ int vio_disable_interrupts(struct vio_dev *dev)
return rc;
}
EXPORT_SYMBOL(vio_disable_interrupts);
-#endif /* CONFIG_PPC_PSERIES */
static int __init vio_init(void)
{
diff --git a/arch/powerpc/platforms/pseries/vphn.c b/arch/powerpc/platforms/pseries/vphn.c
index 3f07bf6c670e..cca474a2c396 100644
--- a/arch/powerpc/platforms/pseries/vphn.c
+++ b/arch/powerpc/platforms/pseries/vphn.c
@@ -82,7 +82,8 @@ long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity)
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, cpu);
- vphn_unpack_associativity(retbuf, associativity);
+ if (rc == H_SUCCESS)
+ vphn_unpack_associativity(retbuf, associativity);
return rc;
}
diff --git a/arch/powerpc/purgatory/.gitignore b/arch/powerpc/purgatory/.gitignore
index e9e66f178a6d..b8dc6ff34254 100644
--- a/arch/powerpc/purgatory/.gitignore
+++ b/arch/powerpc/purgatory/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
kexec-purgatory.c
purgatory.ro
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 9651ca061828..b294f70f1a67 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/msi.h>
+#include <asm/debugfs.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -68,13 +69,6 @@ static u32 xive_ipi_irq;
/* Xive state for each CPU */
static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
-/*
- * A "disabled" interrupt should never fire, to catch problems
- * we set its logical number to this
- */
-#define XIVE_BAD_IRQ 0x7fffffff
-#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
-
/* An invalid CPU target */
#define XIVE_INVALID_TARGET (-1)
@@ -265,11 +259,15 @@ notrace void xmon_xive_do_dump(int cpu)
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
{
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
int rc;
u32 target;
u8 prio;
u32 lirq;
+ if (!is_xive_irq(chip))
+ return -EINVAL;
+
rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
if (rc) {
xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
@@ -283,7 +281,10 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
u64 val = xive_esb_read(xd, XIVE_ESB_GET);
- xmon_printf("PQ=%c%c",
+ xmon_printf("flags=%c%c%c PQ=%c%c",
+ xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
val & XIVE_ESB_VAL_P ? 'P' : '-',
val & XIVE_ESB_VAL_Q ? 'Q' : '-');
}
@@ -1150,7 +1151,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
xc = per_cpu(xive_cpu, cpu);
/* Check if we are already setup */
- if (xc->hw_ipi != 0)
+ if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0;
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
@@ -1187,7 +1188,7 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
/* Disable the IPI and free the IRQ data */
/* Already cleaned up ? */
- if (xc->hw_ipi == 0)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
/* Mask the IPI */
@@ -1343,6 +1344,7 @@ static int xive_prepare_cpu(unsigned int cpu)
if (np)
xc->chip_id = of_get_ibm_chip_id(np);
of_node_put(np);
+ xc->hw_ipi = XIVE_BAD_IRQ;
per_cpu(xive_cpu, cpu) = xc;
}
@@ -1554,3 +1556,107 @@ static int __init xive_off(char *arg)
return 0;
}
__setup("xive=off", xive_off);
+
+void xive_debug_show_cpu(struct seq_file *m, int cpu)
+{
+ struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
+
+ seq_printf(m, "CPU %d:", cpu);
+ if (xc) {
+ seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
+
+#ifdef CONFIG_SMP
+ {
+ u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
+
+ seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
+ val & XIVE_ESB_VAL_P ? 'P' : '-',
+ val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ }
+#endif
+ {
+ struct xive_q *q = &xc->queue[xive_irq_priority];
+ u32 i0, i1, idx;
+
+ if (q->qpage) {
+ idx = q->idx;
+ i0 = be32_to_cpup(q->qpage + idx);
+ idx = (idx + 1) & q->msk;
+ i1 = be32_to_cpup(q->qpage + idx);
+ seq_printf(m, "EQ idx=%d T=%d %08x %08x ...",
+ q->idx, q->toggle, i0, i1);
+ }
+ }
+ }
+ seq_puts(m, "\n");
+}
+
+void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data *d)
+{
+ struct irq_chip *chip = irq_data_get_irq_chip(d);
+ int rc;
+ u32 target;
+ u8 prio;
+ u32 lirq;
+
+ if (!is_xive_irq(chip))
+ return;
+
+ rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
+ if (rc) {
+ seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
+ return;
+ }
+
+ seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
+ hw_irq, target, prio, lirq);
+
+ if (d) {
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+
+ seq_printf(m, "flags=%c%c%c PQ=%c%c",
+ xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
+ val & XIVE_ESB_VAL_P ? 'P' : '-',
+ val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ }
+ seq_puts(m, "\n");
+}
+
+static int xive_core_debug_show(struct seq_file *m, void *private)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+ int cpu;
+
+ if (xive_ops->debug_show)
+ xive_ops->debug_show(m, private);
+
+ for_each_possible_cpu(cpu)
+ xive_debug_show_cpu(m, cpu);
+
+ for_each_irq_desc(i, desc) {
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ unsigned int hw_irq;
+
+ if (!d)
+ continue;
+
+ hw_irq = (unsigned int)irqd_to_hwirq(d);
+
+ /* IPIs are special (HW number 0) */
+ if (hw_irq)
+ xive_debug_show_irq(m, hw_irq, d);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(xive_core_debug);
+
+int xive_core_debug_init(void)
+{
+ debugfs_create_file("xive", 0400, powerpc_debugfs_root,
+ NULL, &xive_core_debug_fops);
+ return 0;
+}
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 0ff6b739052c..5218fdc4b29a 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -19,6 +19,7 @@
#include <linux/cpumask.h>
#include <linux/mm.h>
+#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -312,7 +313,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
s64 rc;
/* Free the IPI */
- if (!xc->hw_ipi)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
@@ -320,7 +321,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
msleep(OPAL_BUSY_DELAY_MS);
continue;
}
- xc->hw_ipi = 0;
+ xc->hw_ipi = XIVE_BAD_IRQ;
break;
}
}
@@ -850,3 +851,5 @@ int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
+
+machine_arch_initcall(powernv, xive_core_debug_init);
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index 55dc61cb4867..7ab5c6780997 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/libfdt.h>
+#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -560,11 +561,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
- if (!xc->hw_ipi)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
xive_irq_bitmap_free(xc->hw_ipi);
- xc->hw_ipi = 0;
+ xc->hw_ipi = XIVE_BAD_IRQ;
}
#endif /* CONFIG_SMP */
@@ -645,6 +646,21 @@ static void xive_spapr_sync_source(u32 hw_irq)
plpar_int_sync(0, hw_irq);
}
+static int xive_spapr_debug_show(struct seq_file *m, void *private)
+{
+ struct xive_irq_bitmap *xibm;
+ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
+ memset(buf, 0, PAGE_SIZE);
+ bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
+ seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
+ }
+ kfree(buf);
+
+ return 0;
+}
+
static const struct xive_ops xive_spapr_ops = {
.populate_irq_data = xive_spapr_populate_irq_data,
.configure_irq = xive_spapr_configure_irq,
@@ -662,6 +678,7 @@ static const struct xive_ops xive_spapr_ops = {
#ifdef CONFIG_SMP
.get_ipi = xive_spapr_get_ipi,
.put_ipi = xive_spapr_put_ipi,
+ .debug_show = xive_spapr_debug_show,
#endif /* CONFIG_SMP */
.name = "spapr",
};
@@ -839,3 +856,5 @@ bool __init xive_spapr_init(void)
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
return true;
}
+
+machine_arch_initcall(pseries, xive_core_debug_init);
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
index 59cd366e7933..b7b901da2168 100644
--- a/arch/powerpc/sysdev/xive/xive-internal.h
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -5,6 +5,13 @@
#ifndef __XIVE_INTERNAL_H
#define __XIVE_INTERNAL_H
+/*
+ * A "disabled" interrupt should never fire, to catch problems
+ * we set its logical number to this
+ */
+#define XIVE_BAD_IRQ 0x7fffffff
+#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
+
/* Each CPU carry one of these with various per-CPU state */
struct xive_cpu {
#ifdef CONFIG_SMP
@@ -50,12 +57,14 @@ struct xive_ops {
int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
#endif
+ int (*debug_show)(struct seq_file *m, void *private);
const char *name;
};
bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
u8 max_prio);
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
+int xive_core_debug_init(void);
static inline u32 xive_alloc_order(u32 queue_shift)
{
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index c3842dbeb1b7..6f9cccea54f3 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for xmon
-# Avoid clang warnings around longjmp/setjmp declarations
-subdir-ccflags-y := -ffreestanding
-
GCOV_PROFILE := n
KCOV_INSTRUMENT := n
UBSAN_SANITIZE := n
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0ec9640335bb..7af840c0fc93 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -81,8 +81,9 @@ static bool xmon_is_ro = IS_ENABLED(CONFIG_XMON_DEFAULT_RO_MODE);
static unsigned long adrs;
static int size = 1;
-#define MAX_DUMP (128 * 1024)
+#define MAX_DUMP (64 * 1024)
static unsigned long ndump = 64;
+#define MAX_IDUMP (MAX_DUMP >> 2)
static unsigned long nidump = 16;
static unsigned long ncsum = 4096;
static int termch;
@@ -2712,7 +2713,12 @@ static void dump_by_size(unsigned long addr, long count, int size)
printf("%0*llx", size * 2, val);
}
- printf("\n");
+ printf(" |");
+ for (j = 0; j < 16; ++j) {
+ val = temp[j];
+ putchar(' ' <= val && val <= '~' ? val : '.');
+ }
+ printf("|\n");
}
}
@@ -2756,8 +2762,8 @@ dump(void)
scanhex(&nidump);
if (nidump == 0)
nidump = 16;
- else if (nidump > MAX_DUMP)
- nidump = MAX_DUMP;
+ else if (nidump > MAX_IDUMP)
+ nidump = MAX_IDUMP;
adrs += ppc_inst_dump(adrs, nidump, 1);
last_cmd = "di\n";
} else if (c == 'l') {
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
index 8a45a37d2af4..574c10f8ff68 100644
--- a/arch/riscv/boot/.gitignore
+++ b/arch/riscv/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
Image
Image.gz
loader
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index ec0ca8c6ab64..3d9410bb4de0 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -1,35 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += bugs.h
-generic-y += checksum.h
-generic-y += compat.h
-generic-y += device.h
-generic-y += div64.h
generic-y += extable.h
generic-y += flat.h
-generic-y += dma.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
-generic-y += fb.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += local64.h
-generic-y += mm-arch-hooks.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += serial.h
-generic-y += shmparam.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
generic-y += user.h
-generic-y += vga.h
generic-y += vmlinux.lds.h
-generic-y += xor.h
diff --git a/arch/riscv/kernel/.gitignore b/arch/riscv/kernel/.gitignore
index b51634f6a7cd..e052ed331cc1 100644
--- a/arch/riscv/kernel/.gitignore
+++ b/arch/riscv/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
/vmlinux.lds
diff --git a/arch/riscv/kernel/vdso/.gitignore b/arch/riscv/kernel/vdso/.gitignore
index 97c2d69d0289..11ebee9e4c1d 100644
--- a/arch/riscv/kernel/vdso/.gitignore
+++ b/arch/riscv/kernel/vdso/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
*.tmp
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index cf7248e07f43..be84e32adc4c 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -30,7 +30,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long addr, cause;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
int code = SEGV_MAPERR;
vm_fault_t fault;
@@ -117,7 +117,7 @@ good_area:
* signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -144,11 +144,6 @@ good_area:
1, regs, addr);
}
if (fault & VM_FAULT_RETRY) {
- /*
- * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation.
- */
- flags &= ~(FAULT_FLAG_ALLOW_RETRY);
flags |= FAULT_FLAG_TRIED;
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8abe77536d9d..2167bce993ff 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -102,13 +102,13 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select ARCH_KEEP_MEMBLOCK
- select ARCH_SAVE_PAGE_KEYS if HIBERNATION
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
@@ -195,6 +195,7 @@ config S390
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select SWIOTLB
select GENERIC_ALLOCATOR
+ imply IMA_SECURE_AND_OR_TRUSTED_BOOT
config SCHED_OMIT_FRAME_POINTER
@@ -450,14 +451,6 @@ config NR_CPUS
config HOTPLUG_CPU
def_bool y
-# Some NUMA nodes have memory ranges that span
-# other nodes. Even though a pfn is valid and
-# between a node's start and end pfns, it may not
-# reside on that node. See memmap_init_zone()
-# for details. <- They meant memory holes!
-config NODES_SPAN_OTHER_NODES
- def_bool NUMA
-
config NUMA
bool "NUMA support"
depends on SCHED_TOPOLOGY
@@ -467,58 +460,9 @@ config NUMA
This option adds NUMA support to the kernel.
- An operation mode can be selected by appending
- numa=<method> to the kernel command line.
-
- The default behaviour is identical to appending numa=plain to
- the command line. This will create just one node with all
- available memory and all CPUs in it.
-
config NODES_SHIFT
- int "Maximum NUMA nodes (as a power of 2)"
- range 1 10
- depends on NUMA
- default "4"
- help
- Specify the maximum number of NUMA nodes available on the target
- system. Increases memory reserved to accommodate various tables.
-
-menu "Select NUMA modes"
- depends on NUMA
-
-config NUMA_EMU
- bool "NUMA emulation"
- default y
- help
- Numa emulation mode will split the available system memory into
- equal chunks which then are distributed over the configured number
- of nodes in a round-robin manner.
-
- The number of fake nodes is limited by the number of available memory
- chunks (i.e. memory size / fake size) and the number of supported
- nodes in the kernel.
-
- The CPUs are assigned to the nodes in a way that partially respects
- the original machine topology (if supported by the machine).
- Fair distribution of the CPUs is not guaranteed.
-
-config EMU_SIZE
- hex "NUMA emulation memory chunk size"
- default 0x10000000
- range 0x400000 0x100000000
- depends on NUMA_EMU
- help
- Select the default size by which the memory is chopped and then
- assigned to emulated NUMA nodes.
-
- This can be overridden by specifying
-
- emu_size=<n>
-
- on the kernel command line where also suffixes K, M, G, and T are
- supported.
-
-endmenu
+ int
+ default "1"
config SCHED_SMT
def_bool n
@@ -866,15 +810,6 @@ config SECCOMP
If unsure, say Y.
-menu "Power Management"
-
-config ARCH_HIBERNATION_POSSIBLE
- def_bool y
-
-source "kernel/power/Kconfig"
-
-endmenu
-
config CCW
def_bool y
@@ -1009,7 +944,6 @@ config S390_GUEST
select TTY
select VIRTUALIZATION
select VIRTIO
- select VIRTIO_CONSOLE
help
Enabling this option adds support for virtio based paravirtual device
drivers on s390.
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 54f375627532..8bf46d705957 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -75,7 +75,7 @@ struct appldata_os_data {
(waiting for I/O) */
/* per cpu data */
- struct appldata_os_per_cpu os_cpu[0];
+ struct appldata_os_per_cpu os_cpu[];
} __attribute__((packed));
static struct appldata_os_data *appldata_os_data;
diff --git a/arch/s390/boot/.gitignore b/arch/s390/boot/.gitignore
index 16ff906e4610..b265bfede188 100644
--- a/arch/s390/boot/.gitignore
+++ b/arch/s390/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
image
bzImage
section_cmp.*
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 0ff9261c915e..45b33b83de08 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -37,7 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o text_dma.o
-obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
+obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
index e72fcd7ecebb..765a08f1bd77 100644
--- a/arch/s390/boot/compressed/.gitignore
+++ b/arch/s390/boot/compressed/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux
vmlinux.lds
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh
index bed227f267ae..515b27a996b3 100644
--- a/arch/s390/boot/install.sh
+++ b/arch/s390/boot/install.sh
@@ -21,15 +21,10 @@
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
-# Default install - same as make zlilo
+echo "Warning: '${INSTALLKERNEL}' command not available - additional " \
+ "bootloader config required" >&2
+if [ -f $4/vmlinuz-$1 ]; then mv $4/vmlinuz-$1 $4/vmlinuz-$1.old; fi
+if [ -f $4/System.map-$1 ]; then mv $4/System.map-$1 $4/System.map-$1.old; fi
-if [ -f $4/vmlinuz ]; then
- mv $4/vmlinuz $4/vmlinuz.old
-fi
-
-if [ -f $4/System.map ]; then
- mv $4/System.map $4/System.old
-fi
-
-cat $2 > $4/vmlinuz
-cp $3 $4/System.map
+cat $2 > $4/vmlinuz-$1
+cp $3 $4/System.map-$1
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index 3f501159ee9f..8fde561f1d07 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -3,7 +3,13 @@
#include <asm/facility.h>
#include <asm/sections.h>
+/* will be used in arch/s390/kernel/uv.c */
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest);
+#endif
+#if IS_ENABLED(CONFIG_KVM)
+struct uv_info __bootdata_preserved(uv_info);
+#endif
void uv_query_info(void)
{
@@ -19,7 +25,21 @@ void uv_query_info(void)
if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
return;
+ if (IS_ENABLED(CONFIG_KVM)) {
+ memcpy(uv_info.inst_calls_list, uvcb.inst_calls_list, sizeof(uv_info.inst_calls_list));
+ uv_info.uv_base_stor_len = uvcb.uv_base_stor_len;
+ uv_info.guest_base_stor_len = uvcb.conf_base_phys_stor_len;
+ uv_info.guest_virt_base_stor_len = uvcb.conf_base_virt_stor_len;
+ uv_info.guest_virt_var_stor_len = uvcb.conf_virt_var_stor_len;
+ uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
+ uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
+ uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
+ uv_info.max_guest_cpus = uvcb.max_guest_cpus;
+ }
+
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
prot_virt_guest = 1;
+#endif
}
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 0c86ba19fa2b..46038bc58c9e 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -532,6 +532,7 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_NULL_TTY=m
+CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 6b27d861a9a3..7cd0648c1f4e 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -528,6 +528,7 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_NULL_TTY=m
+CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 1c23d84a9097..73044634d342 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -342,6 +342,7 @@ static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - n);
}
+ memzero_explicit(&param, sizeof(param));
return ret;
}
@@ -470,6 +471,8 @@ static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
walk.dst.virt.addr, walk.src.virt.addr, n);
ret = skcipher_walk_done(&walk, nbytes - n);
}
+ memzero_explicit(&pcc_param, sizeof(pcc_param));
+ memzero_explicit(&xts_param, sizeof(xts_param));
return ret;
}
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 1832ae6442ef..83f6e85de7bc 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -5,21 +5,6 @@ generated-y += syscall_table.h
generated-y += unistd_nr.h
generic-y += asm-offsets.h
-generic-y += cacheflush.h
-generic-y += device.h
-generic-y += dma-mapping.h
-generic-y += div64.h
-generic-y += emergency-restart.h
generic-y += export.h
-generic-y += fb.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kmap_types.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
-generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 37f96b6f0e61..a816fb4734b8 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -9,6 +9,7 @@
#ifndef _ASM_S390_GMAP_H
#define _ASM_S390_GMAP_H
+#include <linux/radix-tree.h>
#include <linux/refcount.h>
/* Generic bits for GMAP notification on DAT table entry changes. */
@@ -31,6 +32,7 @@
* @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @pfault_enabled: defines if pfaults are applicable for the guest
+ * @guest_handle: protected virtual machine handle for the ultravisor
* @host_to_rmap: radix tree with gmap_rmap lists
* @children: list of shadow gmap structures
* @pt_list: list of all page tables used in the shadow guest address space
@@ -54,6 +56,8 @@ struct gmap {
unsigned long asce_end;
void *private;
bool pfault_enabled;
+ /* only set for protected virtual machines */
+ unsigned long guest_handle;
/* Additional data for shadow guest address spaces */
struct radix_tree_root host_to_rmap;
struct list_head children;
@@ -144,4 +148,6 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr);
+int gmap_mark_unmergeable(void);
+void s390_reset_acc(struct mm_struct *mm);
#endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h
index adae176757ae..9078b5b6b837 100644
--- a/arch/s390/include/asm/hw_irq.h
+++ b/arch/s390/include/asm/hw_irq.h
@@ -7,6 +7,5 @@
void __init init_airq_interrupts(void);
void __init init_cio_interrupts(void);
-void __init init_ext_interrupts(void);
#endif
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 084e71b7272a..b63bd66404b8 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -119,6 +119,7 @@ enum diag308_subcode {
DIAG308_LOAD_NORMAL_DUMP = 4,
DIAG308_SET = 5,
DIAG308_STORE = 6,
+ DIAG308_LOAD_NORMAL = 7,
};
enum diag308_rc {
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 1726224e7772..d6bcd34f3ec3 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -127,6 +127,12 @@ struct mcck_volatile_info {
#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \
CR14_EXTERNAL_DAMAGE_SUBMASK)
+#define SIDAD_SIZE_MASK 0xff
+#define sida_origin(sie_block) \
+ ((sie_block)->sidad & PAGE_MASK)
+#define sida_size(sie_block) \
+ ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
+
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
@@ -160,7 +166,13 @@ struct kvm_s390_sie_block {
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */
- __u8 reserved10[16]; /* 0x0010 */
+ union {
+ __u8 reserved10[16]; /* 0x0010 */
+ struct {
+ __u64 pv_handle_cpu;
+ __u64 pv_handle_config;
+ };
+ };
#define PROG_BLOCK_SIE (1<<0)
#define PROG_REQUEST (1<<1)
atomic_t prog20; /* 0x0020 */
@@ -209,10 +221,23 @@ struct kvm_s390_sie_block {
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
#define ICPT_KSS 0x5c
+#define ICPT_MCHKREQ 0x60
+#define ICPT_INT_ENABLE 0x64
+#define ICPT_PV_INSTR 0x68
+#define ICPT_PV_NOTIFY 0x6c
+#define ICPT_PV_PREF 0x70
__u8 icptcode; /* 0x0050 */
__u8 icptstatus; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */
- __u8 reserved54[2]; /* 0x0054 */
+ __u8 reserved54; /* 0x0054 */
+#define IICTL_CODE_NONE 0x00
+#define IICTL_CODE_MCHK 0x01
+#define IICTL_CODE_EXT 0x02
+#define IICTL_CODE_IO 0x03
+#define IICTL_CODE_RESTART 0x04
+#define IICTL_CODE_SPECIFICATION 0x10
+#define IICTL_CODE_OPERAND 0x11
+ __u8 iictl; /* 0x0055 */
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
@@ -233,7 +258,7 @@ struct kvm_s390_sie_block {
#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
__u32 scaol; /* 0x0064 */
- __u8 reserved68; /* 0x0068 */
+ __u8 sdf; /* 0x0068 */
__u8 epdx; /* 0x0069 */
__u8 reserved6a[2]; /* 0x006a */
__u32 todpr; /* 0x006c */
@@ -249,31 +274,58 @@ struct kvm_s390_sie_block {
#define HPID_KVM 0x4
#define HPID_VSIE 0x5
__u8 hpid; /* 0x00b8 */
- __u8 reservedb9[11]; /* 0x00b9 */
- __u16 extcpuaddr; /* 0x00c4 */
- __u16 eic; /* 0x00c6 */
+ __u8 reservedb9[7]; /* 0x00b9 */
+ union {
+ struct {
+ __u32 eiparams; /* 0x00c0 */
+ __u16 extcpuaddr; /* 0x00c4 */
+ __u16 eic; /* 0x00c6 */
+ };
+ __u64 mcic; /* 0x00c0 */
+ } __packed;
__u32 reservedc8; /* 0x00c8 */
- __u16 pgmilc; /* 0x00cc */
- __u16 iprcc; /* 0x00ce */
- __u32 dxc; /* 0x00d0 */
- __u16 mcn; /* 0x00d4 */
- __u8 perc; /* 0x00d6 */
- __u8 peratmid; /* 0x00d7 */
+ union {
+ struct {
+ __u16 pgmilc; /* 0x00cc */
+ __u16 iprcc; /* 0x00ce */
+ };
+ __u32 edc; /* 0x00cc */
+ } __packed;
+ union {
+ struct {
+ __u32 dxc; /* 0x00d0 */
+ __u16 mcn; /* 0x00d4 */
+ __u8 perc; /* 0x00d6 */
+ __u8 peratmid; /* 0x00d7 */
+ };
+ __u64 faddr; /* 0x00d0 */
+ } __packed;
__u64 peraddr; /* 0x00d8 */
__u8 eai; /* 0x00e0 */
__u8 peraid; /* 0x00e1 */
__u8 oai; /* 0x00e2 */
__u8 armid; /* 0x00e3 */
__u8 reservede4[4]; /* 0x00e4 */
- __u64 tecmc; /* 0x00e8 */
- __u8 reservedf0[12]; /* 0x00f0 */
+ union {
+ __u64 tecmc; /* 0x00e8 */
+ struct {
+ __u16 subchannel_id; /* 0x00e8 */
+ __u16 subchannel_nr; /* 0x00ea */
+ __u32 io_int_parm; /* 0x00ec */
+ __u32 io_int_word; /* 0x00f0 */
+ };
+ } __packed;
+ __u8 reservedf4[8]; /* 0x00f4 */
#define CRYCB_FORMAT_MASK 0x00000003
#define CRYCB_FORMAT0 0x00000000
#define CRYCB_FORMAT1 0x00000001
#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
- __u64 gbea; /* 0x0180 */
+ union {
+ __u64 gbea; /* 0x0180 */
+ __u64 sidad;
+ };
__u8 reserved188[8]; /* 0x0188 */
__u64 sdnxo; /* 0x0190 */
__u8 reserved198[8]; /* 0x0198 */
@@ -292,7 +344,7 @@ struct kvm_s390_sie_block {
__u64 itdba; /* 0x01e8 */
__u64 riccbd; /* 0x01f0 */
__u64 gvrd; /* 0x01f8 */
-} __attribute__((packed));
+} __packed __aligned(512);
struct kvm_s390_itdb {
__u8 data[256];
@@ -301,7 +353,9 @@ struct kvm_s390_itdb {
struct sie_page {
struct kvm_s390_sie_block sie_block;
struct mcck_volatile_info mcck_info; /* 0x0200 */
- __u8 reserved218[1000]; /* 0x0218 */
+ __u8 reserved218[360]; /* 0x0218 */
+ __u64 pv_grregs[16]; /* 0x0380 */
+ __u8 reserved400[512]; /* 0x0400 */
struct kvm_s390_itdb itdb; /* 0x0600 */
__u8 reserved700[2304]; /* 0x0700 */
};
@@ -476,6 +530,7 @@ enum irq_types {
IRQ_PEND_PFAULT_INIT,
IRQ_PEND_EXT_HOST,
IRQ_PEND_EXT_SERVICE,
+ IRQ_PEND_EXT_SERVICE_EV,
IRQ_PEND_EXT_TIMING,
IRQ_PEND_EXT_CPU_TIMER,
IRQ_PEND_EXT_CLOCK_COMP,
@@ -520,6 +575,7 @@ enum irq_types {
(1UL << IRQ_PEND_EXT_TIMING) | \
(1UL << IRQ_PEND_EXT_HOST) | \
(1UL << IRQ_PEND_EXT_SERVICE) | \
+ (1UL << IRQ_PEND_EXT_SERVICE_EV) | \
(1UL << IRQ_PEND_VIRTIO) | \
(1UL << IRQ_PEND_PFAULT_INIT) | \
(1UL << IRQ_PEND_PFAULT_DONE))
@@ -536,6 +592,13 @@ enum irq_types {
#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
(1UL << IRQ_PEND_MCHK_EX))
+#define IRQ_PEND_EXT_II_MASK ((1UL << IRQ_PEND_EXT_CPU_TIMER) | \
+ (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
+ (1UL << IRQ_PEND_EXT_EMERGENCY) | \
+ (1UL << IRQ_PEND_EXT_EXTERNAL) | \
+ (1UL << IRQ_PEND_EXT_SERVICE) | \
+ (1UL << IRQ_PEND_EXT_SERVICE_EV))
+
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
@@ -594,6 +657,7 @@ struct kvm_s390_local_interrupt {
struct kvm_s390_float_interrupt {
unsigned long pending_irqs;
+ unsigned long masked_irqs;
spinlock_t lock;
struct list_head lists[FIRQ_LIST_COUNT];
int counters[FIRQ_MAX_COUNT];
@@ -645,6 +709,11 @@ struct kvm_guestdbg_info_arch {
unsigned long last_bp;
};
+struct kvm_s390_pv_vcpu {
+ u64 handle;
+ unsigned long stor_base;
+};
+
struct kvm_vcpu_arch {
struct kvm_s390_sie_block *sie_block;
/* if vsie is active, currently executed shadow sie control block */
@@ -673,6 +742,7 @@ struct kvm_vcpu_arch {
__u64 cputm_start;
bool gs_enabled;
bool skey_enabled;
+ struct kvm_s390_pv_vcpu pv;
};
struct kvm_vm_stat {
@@ -701,9 +771,6 @@ struct s390_io_adapter {
bool masked;
bool swap;
bool suppressible;
- struct rw_semaphore maps_lock;
- struct list_head maps;
- atomic_t nr_maps;
};
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
@@ -846,6 +913,13 @@ struct kvm_s390_gisa_interrupt {
DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
};
+struct kvm_s390_pv {
+ u64 handle;
+ u64 guest_len;
+ unsigned long stor_base;
+ void *stor_var;
+};
+
struct kvm_arch{
void *sca;
int use_esca;
@@ -881,6 +955,7 @@ struct kvm_arch{
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
struct kvm_s390_gisa_interrupt gisa_int;
+ struct kvm_s390_pv pv;
};
#define KVM_HVA_ERR_BAD (-1UL)
@@ -921,7 +996,7 @@ static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
- struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
+ struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 237ee0c4169f..612ed3c6d581 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -141,7 +141,9 @@ struct lowcore {
/* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0400 */
- __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */
+ __u32 return_lpswe; /* 0x0402 */
+ __u32 return_mcck_lpswe; /* 0x0406 */
+ __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
/*
* 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index bcfb6371086f..e12ff0f29d1a 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -16,6 +16,8 @@ typedef struct {
unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
+ /* The mmu context belongs to a secure guest. */
+ atomic_t is_protected;
/*
* The following bitfields need a down_write on the mm
* semaphore when they are written to. As they are only
@@ -32,8 +34,6 @@ typedef struct {
unsigned int uses_cmm:1;
/* The gmaps associated with this context are allowed to use huge pages. */
unsigned int allow_gmap_hpage_1m:1;
- /* The mmu context is for compat task */
- unsigned int compat_mm:1;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8d04e6f3f796..c9f3d8a52756 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -18,14 +18,16 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ unsigned long asce_type, init_entry;
+
spin_lock_init(&mm->context.lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0);
+ atomic_set(&mm->context.is_protected, 0);
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
- mm->context.compat_mm = test_thread_flag(TIF_31BIT);
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) ||
@@ -36,33 +38,34 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.allow_gmap_hpage_1m = 0;
#endif
switch (mm->context.asce_limit) {
- case _REGION2_SIZE:
+ default:
/*
- * forked 3-level task, fall through to set new asce with new
- * mm->pgd
+ * context created by exec, the value of asce_limit can
+ * only be zero in this case
*/
- case 0:
- /* context created by exec, set asce limit to 4TB */
- mm->context.asce_limit = STACK_TOP_MAX;
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ VM_BUG_ON(mm->context.asce_limit);
+ /* continue as 3-level task */
+ mm->context.asce_limit = _REGION2_SIZE;
+ fallthrough;
+ case _REGION2_SIZE:
+ /* forked 3-level task */
+ init_entry = _REGION3_ENTRY_EMPTY;
+ asce_type = _ASCE_TYPE_REGION3;
break;
- case -PAGE_SIZE:
- /* forked 5-level task, set new asce with new_mm->pgd */
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
+ case TASK_SIZE_MAX:
+ /* forked 5-level task */
+ init_entry = _REGION1_ENTRY_EMPTY;
+ asce_type = _ASCE_TYPE_REGION1;
break;
case _REGION1_SIZE:
- /* forked 4-level task, set new asce with new mm->pgd */
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ /* forked 4-level task */
+ init_entry = _REGION2_ENTRY_EMPTY;
+ asce_type = _ASCE_TYPE_REGION2;
break;
- case _REGION3_SIZE:
- /* forked 2-level compat task, set new asce with new mm->pgd */
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
}
- crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | asce_type;
+ crst_table_init((unsigned long *) mm->pgd, init_entry);
return 0;
}
diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h
index 35f8cbe7e5bb..23cd5d1b734b 100644
--- a/arch/s390/include/asm/numa.h
+++ b/arch/s390/include/asm/numa.h
@@ -13,24 +13,13 @@
#ifdef CONFIG_NUMA
#include <linux/numa.h>
-#include <linux/cpumask.h>
void numa_setup(void);
-int numa_pfn_to_nid(unsigned long pfn);
-int __node_distance(int a, int b);
-void numa_update_cpu_topology(void);
-
-extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
-extern int numa_debug_enabled;
#else
static inline void numa_setup(void) { }
-static inline void numa_update_cpu_topology(void) { }
-static inline int numa_pfn_to_nid(unsigned long pfn)
-{
- return 0;
-}
#endif /* CONFIG_NUMA */
+
#endif /* _ASM_S390_NUMA_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 1019efd85b9d..f2d4c1bd3429 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -153,6 +153,11 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
+#if IS_ENABLED(CONFIG_PGSTE)
+int arch_make_page_accessible(struct page *page);
+#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+#endif
+
#endif /* !__ASSEMBLY__ */
#define __PAGE_OFFSET 0x0UL
@@ -161,20 +166,20 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define __pa(x) ((unsigned long)(x))
#define __va(x) ((void *)(unsigned long)(x))
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+#define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
+#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+
+#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
+#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+
+#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn))
+#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr)))
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
-#define phys_to_pfn(kaddr) ((kaddr) >> PAGE_SHIFT)
-#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
-
-#define phys_to_page(kaddr) pfn_to_page(phys_to_pfn(kaddr))
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b05187ce5dbd..7485ee561fec 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/iommu.h>
+#include <linux/pci_hotplug.h>
#include <asm-generic/pci.h>
#include <asm/pci_clp.h>
#include <asm/pci_debug.h>
@@ -25,6 +26,7 @@ int pci_proc_domain(struct pci_bus *);
#define ZPCI_NR_DMA_SPACES 1
#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
+#define ZPCI_DOMAIN_BITMAP_SIZE (1 << 16)
/* PCI Function Controls */
#define ZPCI_FC_FN_ENABLED 0x80
@@ -96,6 +98,7 @@ struct s390_domain;
struct zpci_dev {
struct pci_bus *bus;
struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
+ struct hotplug_slot hotplug_slot;
enum zpci_state state;
u32 fid; /* function ID, used by sclp */
@@ -186,6 +189,9 @@ int clp_enable_fh(struct zpci_dev *, u8);
int clp_disable_fh(struct zpci_dev *);
int clp_get_state(u32 fid, enum zpci_state *state);
+/* UID */
+void update_uid_checking(bool new);
+
/* IOMMU Interface */
int zpci_init_iommu(struct zpci_dev *zdev);
void zpci_destroy_iommu(struct zpci_dev *zdev);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 77606c4acd58..74a352f8c0d1 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -34,19 +34,21 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
memset64((u64 *)crst, entry, _CRST_ENTRIES);
}
-static inline unsigned long pgd_entry_type(struct mm_struct *mm)
+int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
+
+static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
+ unsigned long len)
{
- if (mm_pmd_folded(mm))
- return _SEGMENT_ENTRY_EMPTY;
- if (mm_pud_folded(mm))
- return _REGION3_ENTRY_EMPTY;
- if (mm_p4d_folded(mm))
- return _REGION2_ENTRY_EMPTY;
- return _REGION1_ENTRY_EMPTY;
-}
+ int rc;
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
-void crst_table_downgrade(struct mm_struct *);
+ if (addr + len > mm->context.asce_limit &&
+ addr + len <= TASK_SIZE) {
+ rc = crst_table_upgrade(mm, addr + len);
+ if (rc)
+ return (unsigned long) rc;
+ }
+ return addr;
+}
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
{
@@ -116,24 +118,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- unsigned long *table = crst_table_alloc(mm);
-
- if (!table)
- return NULL;
- if (mm->context.asce_limit == _REGION3_SIZE) {
- /* Forking a compat process with 2 page table levels */
- if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
- crst_table_free(mm, table);
- return NULL;
- }
- }
- return (pgd_t *) table;
+ return (pgd_t *) crst_table_alloc(mm);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- if (mm->context.asce_limit == _REGION3_SIZE)
- pgtable_pmd_page_dtor(virt_to_page(pgd));
crst_table_free(mm, (unsigned long *) pgd);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 6d7c3b7e9281..6076c8c912d2 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <asm/bug.h>
#include <asm/page.h>
+#include <asm/uv.h>
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
@@ -520,6 +521,15 @@ static inline int mm_has_pgste(struct mm_struct *mm)
return 0;
}
+static inline int mm_is_protected(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (unlikely(atomic_read(&mm->context.is_protected)))
+ return 1;
+#endif
+ return 0;
+}
+
static inline int mm_alloc_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -1067,7 +1077,12 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
+ pte_t res;
+
+ res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
+ if (mm_is_protected(mm) && pte_present(res))
+ uv_convert_from_secure(pte_val(res) & PAGE_MASK);
+ return res;
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -1079,7 +1094,12 @@ void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
+ pte_t res;
+
+ res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
+ if (mm_is_protected(vma->vm_mm) && pte_present(res))
+ uv_convert_from_secure(pte_val(res) & PAGE_MASK);
+ return res;
}
/*
@@ -1094,12 +1114,17 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, int full)
{
+ pte_t res;
+
if (full) {
- pte_t pte = *ptep;
+ res = *ptep;
*ptep = __pte(_PAGE_INVALID);
- return pte;
+ } else {
+ res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
- return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
+ if (mm_is_protected(mm) && pte_present(res))
+ uv_convert_from_secure(pte_val(res) & PAGE_MASK);
+ return res;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index aadb3d0e2adc..555d148ccf32 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -92,15 +92,15 @@ extern void __bpon(void);
*/
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \
- (1UL << 31) : -PAGE_SIZE)
+ _REGION3_SIZE : TASK_SIZE_MAX)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
- (1UL << 30) : (1UL << 41))
+ (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1))
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_SIZE_MAX (-PAGE_SIZE)
#define STACK_TOP (test_thread_flag(TIF_31BIT) ? \
- (1UL << 31) : (1UL << 42))
-#define STACK_TOP_MAX (1UL << 42)
+ _REGION3_SIZE : _REGION2_SIZE)
+#define STACK_TOP_MAX _REGION2_SIZE
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -161,6 +161,7 @@ typedef struct thread_struct thread_struct;
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
.fpu.regs = (void *) init_task.thread.fpu.fprs, \
+ .last_break = 1, \
}
/*
@@ -177,7 +178,6 @@ typedef struct thread_struct thread_struct;
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
- crst_table_downgrade(current->mm); \
execve_tail(); \
} while (0)
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b241ddb67caf..534f212753d6 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <uapi/asm/setup.h>
+#include <linux/build_bug.h>
#define EP_OFFSET 0x10008
#define EP_STRING "S390EP"
@@ -162,6 +163,12 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset;
}
+static inline u32 gen_lpswe(unsigned long addr)
+{
+ BUILD_BUG_ON(addr > 0xfff);
+ return 0xb2b20000 | addr;
+}
+
#else /* __ASSEMBLY__ */
#define IPL_DEVICE (IPL_DEVICE_OFFSET)
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index b157a81fb977..231a51e870fe 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -34,6 +34,7 @@ extern int smp_vcpu_scheduled(int cpu);
extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
+extern int smp_cpu_get_cpu_address(int cpu);
extern void smp_fill_possible_mask(void);
extern void smp_detect_cpus(void);
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index cca406fdbe51..fbb507504a3b 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -16,8 +16,8 @@ struct cpu_topology_s390 {
unsigned short socket_id;
unsigned short book_id;
unsigned short drawer_id;
- unsigned short node_id;
unsigned short dedicated : 1;
+ int booted_cores;
cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
@@ -25,7 +25,6 @@ struct cpu_topology_s390 {
};
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
-extern cpumask_t cpus_with_topology;
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
@@ -37,6 +36,7 @@ extern cpumask_t cpus_with_topology;
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
#define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated)
+#define topology_booted_cores(cpu) (cpu_topology[cpu].booted_cores)
#define mc_capable() 1
@@ -45,6 +45,7 @@ int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
void store_topology(struct sysinfo_15_1_x *info);
+void update_cpu_masks(void);
void topology_expect_change(void);
const struct cpumask *cpu_coregroup_mask(int cpu);
@@ -54,6 +55,8 @@ static inline void topology_init_early(void) { }
static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
+static inline int topology_booted_cores(int cpu_nr) { return 1; }
+static inline void update_cpu_masks(void) { }
static inline void topology_expect_change(void) { }
#endif /* CONFIG_SCHED_TOPOLOGY */
@@ -71,19 +74,23 @@ static inline void topology_expect_change(void) { }
#define cpu_to_node cpu_to_node
static inline int cpu_to_node(int cpu)
{
- return cpu_topology[cpu].node_id;
+ return 0;
}
/* Returns a pointer to the cpumask of CPUs on node 'node'. */
#define cpumask_of_node cpumask_of_node
static inline const struct cpumask *cpumask_of_node(int node)
{
- return &node_to_cpumask_map[node];
+ return cpu_possible_mask;
}
#define pcibus_to_node(bus) __pcibus_to_node(bus)
#define node_distance(a, b) __node_distance(a, b)
+static inline int __node_distance(int a, int b)
+{
+ return 0;
+}
#else /* !CONFIG_NUMA */
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 4093a2856929..cff4b4c99b75 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -14,23 +14,62 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
+#include <linux/sched.h>
#include <asm/page.h>
+#include <asm/gmap.h>
#define UVC_RC_EXECUTED 0x0001
#define UVC_RC_INV_CMD 0x0002
#define UVC_RC_INV_STATE 0x0003
#define UVC_RC_INV_LEN 0x0005
#define UVC_RC_NO_RESUME 0x0007
+#define UVC_RC_NEED_DESTROY 0x8000
#define UVC_CMD_QUI 0x0001
+#define UVC_CMD_INIT_UV 0x000f
+#define UVC_CMD_CREATE_SEC_CONF 0x0100
+#define UVC_CMD_DESTROY_SEC_CONF 0x0101
+#define UVC_CMD_CREATE_SEC_CPU 0x0120
+#define UVC_CMD_DESTROY_SEC_CPU 0x0121
+#define UVC_CMD_CONV_TO_SEC_STOR 0x0200
+#define UVC_CMD_CONV_FROM_SEC_STOR 0x0201
+#define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300
+#define UVC_CMD_UNPACK_IMG 0x0301
+#define UVC_CMD_VERIFY_IMG 0x0302
+#define UVC_CMD_CPU_RESET 0x0310
+#define UVC_CMD_CPU_RESET_INITIAL 0x0311
+#define UVC_CMD_PREPARE_RESET 0x0320
+#define UVC_CMD_CPU_RESET_CLEAR 0x0321
+#define UVC_CMD_CPU_SET_STATE 0x0330
+#define UVC_CMD_SET_UNSHARE_ALL 0x0340
+#define UVC_CMD_PIN_PAGE_SHARED 0x0341
+#define UVC_CMD_UNPIN_PAGE_SHARED 0x0342
#define UVC_CMD_SET_SHARED_ACCESS 0x1000
#define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
/* Bits in installed uv calls */
enum uv_cmds_inst {
BIT_UVC_CMD_QUI = 0,
+ BIT_UVC_CMD_INIT_UV = 1,
+ BIT_UVC_CMD_CREATE_SEC_CONF = 2,
+ BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
+ BIT_UVC_CMD_CREATE_SEC_CPU = 4,
+ BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
+ BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
+ BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
+ BIT_UVC_CMD_SET_SEC_PARMS = 11,
+ BIT_UVC_CMD_UNPACK_IMG = 13,
+ BIT_UVC_CMD_VERIFY_IMG = 14,
+ BIT_UVC_CMD_CPU_RESET = 15,
+ BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
+ BIT_UVC_CMD_CPU_SET_STATE = 17,
+ BIT_UVC_CMD_PREPARE_RESET = 18,
+ BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
+ BIT_UVC_CMD_UNSHARE_ALL = 20,
+ BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
+ BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
};
struct uv_cb_header {
@@ -40,13 +79,127 @@ struct uv_cb_header {
u16 rrc; /* Return Reason Code */
} __packed __aligned(8);
+/* Query Ultravisor Information */
struct uv_cb_qui {
struct uv_cb_header header;
u64 reserved08;
u64 inst_calls_list[4];
- u64 reserved30[15];
+ u64 reserved30[2];
+ u64 uv_base_stor_len;
+ u64 reserved48;
+ u64 conf_base_phys_stor_len;
+ u64 conf_base_virt_stor_len;
+ u64 conf_virt_var_stor_len;
+ u64 cpu_stor_len;
+ u32 reserved70[3];
+ u32 max_num_sec_conf;
+ u64 max_guest_stor_addr;
+ u8 reserved88[158 - 136];
+ u16 max_guest_cpus;
+ u8 reserveda0[200 - 160];
} __packed __aligned(8);
+/* Initialize Ultravisor */
+struct uv_cb_init {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 stor_origin;
+ u64 stor_len;
+ u64 reserved28[4];
+} __packed __aligned(8);
+
+/* Create Guest Configuration */
+struct uv_cb_cgc {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 guest_handle;
+ u64 conf_base_stor_origin;
+ u64 conf_virt_stor_origin;
+ u64 reserved30;
+ u64 guest_stor_origin;
+ u64 guest_stor_len;
+ u64 guest_sca;
+ u64 guest_asce;
+ u64 reserved58[5];
+} __packed __aligned(8);
+
+/* Create Secure CPU */
+struct uv_cb_csc {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 cpu_handle;
+ u64 guest_handle;
+ u64 stor_origin;
+ u8 reserved30[6];
+ u16 num;
+ u64 state_origin;
+ u64 reserved40[4];
+} __packed __aligned(8);
+
+/* Convert to Secure */
+struct uv_cb_cts {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 guest_handle;
+ u64 gaddr;
+} __packed __aligned(8);
+
+/* Convert from Secure / Pin Page Shared */
+struct uv_cb_cfs {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 paddr;
+} __packed __aligned(8);
+
+/* Set Secure Config Parameter */
+struct uv_cb_ssc {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 guest_handle;
+ u64 sec_header_origin;
+ u32 sec_header_len;
+ u32 reserved2c;
+ u64 reserved30[4];
+} __packed __aligned(8);
+
+/* Unpack */
+struct uv_cb_unp {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 guest_handle;
+ u64 gaddr;
+ u64 tweak[2];
+ u64 reserved38[3];
+} __packed __aligned(8);
+
+#define PV_CPU_STATE_OPR 1
+#define PV_CPU_STATE_STP 2
+#define PV_CPU_STATE_CHKSTP 3
+#define PV_CPU_STATE_OPR_LOAD 5
+
+struct uv_cb_cpu_set_state {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 cpu_handle;
+ u8 reserved20[7];
+ u8 state;
+ u64 reserved28[5];
+};
+
+/*
+ * A common UV call struct for calls that take no payload
+ * Examples:
+ * Destroy cpu/config
+ * Verify
+ */
+struct uv_cb_nodata {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 handle;
+ u64 reserved20[4];
+} __packed __aligned(8);
+
+/* Set Shared Access */
struct uv_cb_share {
struct uv_cb_header header;
u64 reserved08[3];
@@ -54,21 +207,76 @@ struct uv_cb_share {
u64 reserved28;
} __packed __aligned(8);
-static inline int uv_call(unsigned long r1, unsigned long r2)
+static inline int __uv_call(unsigned long r1, unsigned long r2)
{
int cc;
asm volatile(
- "0: .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
- " brc 3,0b\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
+ " .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
: [cc] "=d" (cc)
: [r1] "a" (r1), [r2] "a" (r2)
: "memory", "cc");
return cc;
}
+static inline int uv_call(unsigned long r1, unsigned long r2)
+{
+ int cc;
+
+ do {
+ cc = __uv_call(r1, r2);
+ } while (cc > 1);
+ return cc;
+}
+
+/* Low level uv_call that avoids stalls for long running busy conditions */
+static inline int uv_call_sched(unsigned long r1, unsigned long r2)
+{
+ int cc;
+
+ do {
+ cc = __uv_call(r1, r2);
+ cond_resched();
+ } while (cc > 1);
+ return cc;
+}
+
+/*
+ * special variant of uv_call that only transports the cpu or guest
+ * handle and the command, like destroy or verify.
+ */
+static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
+{
+ struct uv_cb_nodata uvcb = {
+ .header.cmd = cmd,
+ .header.len = sizeof(uvcb),
+ .handle = handle,
+ };
+ int cc;
+
+ WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
+ cc = uv_call_sched(0, (u64)&uvcb);
+ *rc = uvcb.header.rc;
+ *rrc = uvcb.header.rrc;
+ return cc ? -EINVAL : 0;
+}
+
+struct uv_info {
+ unsigned long inst_calls_list[4];
+ unsigned long uv_base_stor_len;
+ unsigned long guest_base_stor_len;
+ unsigned long guest_virt_base_stor_len;
+ unsigned long guest_virt_var_stor_len;
+ unsigned long guest_cpu_stor_len;
+ unsigned long max_sec_stor_addr;
+ unsigned int max_num_sec_conf;
+ unsigned short max_guest_cpus;
+};
+
+extern struct uv_info uv_info;
+
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
extern int prot_virt_guest;
@@ -121,11 +329,40 @@ static inline int uv_remove_shared(unsigned long addr)
return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
}
-void uv_query_info(void);
#else
#define is_prot_virt_guest() 0
static inline int uv_set_shared(unsigned long addr) { return 0; }
static inline int uv_remove_shared(unsigned long addr) { return 0; }
+#endif
+
+#if IS_ENABLED(CONFIG_KVM)
+extern int prot_virt_host;
+
+static inline int is_prot_virt_host(void)
+{
+ return prot_virt_host;
+}
+
+int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
+int uv_convert_from_secure(unsigned long paddr);
+int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
+
+void setup_uv(void);
+void adjust_to_uv_max(unsigned long *vmax);
+#else
+#define is_prot_virt_host() 0
+static inline void setup_uv(void) {}
+static inline void adjust_to_uv_max(unsigned long *vmax) {}
+
+static inline int uv_convert_from_secure(unsigned long paddr)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
+void uv_query_info(void);
+#else
static inline void uv_query_info(void) {}
#endif
diff --git a/arch/s390/kernel/.gitignore b/arch/s390/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/s390/kernel/.gitignore
+++ b/arch/s390/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2b1203cf7be6..75f26d775027 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -54,7 +54,6 @@ CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
-obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
@@ -70,7 +69,7 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
-obj-$(CONFIG_IMA) += ima_arch.o
+obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
@@ -78,6 +77,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
+obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
# vdso
obj-y += vdso64/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ce33406cfe83..e80f0e6f5972 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -124,6 +124,8 @@ int main(void)
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
+ OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
+ OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index e9dac9a24d3f..61f2b0412345 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -84,7 +84,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
{
- return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
+ return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 9205add8481d..3ae64914bd14 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -115,26 +115,29 @@ _LPP_OFFSET = __LC_LPP
.macro SWITCH_ASYNC savearea,timer
tmhh %r8,0x0001 # interrupting from user ?
- jnz 1f
+ jnz 2f
lgr %r14,%r9
+ cghi %r14,__LC_RETURN_LPSWE
+ je 0f
slg %r14,BASED(.Lcritical_start)
clg %r14,BASED(.Lcritical_length)
- jhe 0f
+ jhe 1f
+0:
lghi %r11,\savearea # inside critical section, do cleanup
brasl %r14,cleanup_critical
tmhh %r8,0x0001 # retest problem state after cleanup
- jnz 1f
-0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
+ jnz 2f
+1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT
- jnz 2f
+ jnz 3f
CHECK_STACK \savearea
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 3f
-1: UPDATE_VTIME %r14,%r15,\timer
+ j 4f
+2: UPDATE_VTIME %r14,%r15,\timer
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
-2: lg %r15,__LC_ASYNC_STACK # load async stack
-3: la %r11,STACK_FRAME_OVERHEAD(%r15)
+3: lg %r15,__LC_ASYNC_STACK # load async stack
+4: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro UPDATE_VTIME w1,w2,enter_timer
@@ -401,7 +404,7 @@ ENTRY(system_call)
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
+ b __LC_RETURN_LPSWE(%r0)
.Lsysc_done:
#
@@ -608,43 +611,50 @@ ENTRY(pgm_check_handler)
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_CURRENT
+ srag %r11,%r10,12
+ jnz 0f
+ /* if __LC_LAST_BREAK is < 4096, it contains one of
+ * the lpswe addresses in lowcore. Set it to 1 (initial state)
+ * to prevent leaking that address to userspace.
+ */
+ lghi %r10,1
+0: lg %r12,__LC_CURRENT
lghi %r11,0
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
- jnz 2f # -> fault in user space
+ jnz 3f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
lgr %r14,%r9
slg %r14,BASED(.Lsie_critical_start)
clg %r14,BASED(.Lsie_critical_length)
- jhe 0f
+ jhe 1f
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
lghi %r11,_PIF_GUEST_FAULT
#endif
-0: tmhh %r8,0x4000 # PER bit set in old PSW ?
- jnz 1f # -> enabled, can't be a double fault
+1: tmhh %r8,0x4000 # PER bit set in old PSW ?
+ jnz 2f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
-1: CHECK_STACK __LC_SAVE_AREA_SYNC
+2: CHECK_STACK __LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- # CHECK_VMAP_STACK branches to stack_overflow or 4f
- CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
-2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ # CHECK_VMAP_STACK branches to stack_overflow or 5f
+ CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
+3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
lgr %r14,%r12
aghi %r14,__TASK_thread # pointer to thread_struct
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
- jz 3f
+ jz 4f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-3: stg %r10,__THREAD_last_break(%r14)
-4: lgr %r13,%r11
+4: stg %r10,__THREAD_last_break(%r14)
+5: lgr %r13,%r11
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
# clear user controlled registers to prevent speculative use
@@ -663,14 +673,14 @@ ENTRY(pgm_check_handler)
stg %r13,__PT_FLAGS(%r11)
stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
- jz 5f
+ jz 6f
tmhh %r8,0x0001 # kernel per event ?
jz .Lpgm_kprobe
oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-5: REENABLE_IRQS
+6: REENABLE_IRQS
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
@@ -775,7 +785,7 @@ ENTRY(io_int_handler)
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
.Lio_exit_kernel:
lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
+ b __LC_RETURN_LPSWE(%r0)
.Lio_done:
#
@@ -1214,7 +1224,7 @@ ENTRY(mcck_int_handler)
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0: lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_MCCK_PSW
+ b __LC_RETURN_MCCK_LPSWE
.Lmcck_panic:
lg %r15,__LC_NODAT_STACK
@@ -1271,6 +1281,8 @@ ENDPROC(stack_overflow)
#endif
ENTRY(cleanup_critical)
+ cghi %r9,__LC_RETURN_LPSWE
+ je .Lcleanup_lpswe
#if IS_ENABLED(CONFIG_KVM)
clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
jl 0f
@@ -1424,6 +1436,7 @@ ENDPROC(cleanup_critical)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
+.Lcleanup_lpswe:
1: lmg %r8,%r9,__LC_RETURN_PSW
BR_EX %r14,%r11
.Lcleanup_sysc_restore_insn:
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 1d3927e01a5f..faca269d5f27 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -24,6 +24,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
+void do_secure_storage_access(struct pt_regs *regs);
+void do_non_secure_storage_access(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 6837affc19e8..4a71061974fd 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -144,6 +144,9 @@ static struct ipl_parameter_block *dump_block_ccw;
static struct sclp_ipl_info sclp_ipl_info;
+static bool reipl_fcp_clear;
+static bool reipl_ccw_clear;
+
static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long) addr;
@@ -691,6 +694,21 @@ static struct kobj_attribute sys_reipl_fcp_loadparm_attr =
__ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show,
reipl_fcp_loadparm_store);
+static ssize_t reipl_fcp_clear_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%u\n", reipl_fcp_clear);
+}
+
+static ssize_t reipl_fcp_clear_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (strtobool(buf, &reipl_fcp_clear) < 0)
+ return -EINVAL;
+ return len;
+}
+
static struct attribute *reipl_fcp_attrs[] = {
&sys_reipl_fcp_device_attr.attr,
&sys_reipl_fcp_wwpn_attr.attr,
@@ -706,6 +724,9 @@ static struct attribute_group reipl_fcp_attr_group = {
.bin_attrs = reipl_fcp_bin_attrs,
};
+static struct kobj_attribute sys_reipl_fcp_clear_attr =
+ __ATTR(clear, 0644, reipl_fcp_clear_show, reipl_fcp_clear_store);
+
/* CCW reipl device attributes */
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
@@ -741,16 +762,36 @@ static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
__ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show,
reipl_ccw_loadparm_store);
+static ssize_t reipl_ccw_clear_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%u\n", reipl_ccw_clear);
+}
+
+static ssize_t reipl_ccw_clear_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (strtobool(buf, &reipl_ccw_clear) < 0)
+ return -EINVAL;
+ return len;
+}
+
+static struct kobj_attribute sys_reipl_ccw_clear_attr =
+ __ATTR(clear, 0644, reipl_ccw_clear_show, reipl_ccw_clear_store);
+
static struct attribute *reipl_ccw_attrs_vm[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
&sys_reipl_ccw_vmparm_attr.attr,
+ &sys_reipl_ccw_clear_attr.attr,
NULL,
};
static struct attribute *reipl_ccw_attrs_lpar[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
+ &sys_reipl_ccw_clear_attr.attr,
NULL,
};
@@ -892,11 +933,17 @@ static void __reipl_run(void *unused)
switch (reipl_type) {
case IPL_TYPE_CCW:
diag308(DIAG308_SET, reipl_block_ccw);
- diag308(DIAG308_LOAD_CLEAR, NULL);
+ if (reipl_ccw_clear)
+ diag308(DIAG308_LOAD_CLEAR, NULL);
+ else
+ diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
break;
case IPL_TYPE_FCP:
diag308(DIAG308_SET, reipl_block_fcp);
- diag308(DIAG308_LOAD_CLEAR, NULL);
+ if (reipl_fcp_clear)
+ diag308(DIAG308_LOAD_CLEAR, NULL);
+ else
+ diag308(DIAG308_LOAD_NORMAL, NULL);
break;
case IPL_TYPE_NSS:
diag308(DIAG308_SET, reipl_block_nss);
@@ -1008,11 +1055,16 @@ static int __init reipl_fcp_init(void)
}
rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
- if (rc) {
- kset_unregister(reipl_fcp_kset);
- free_page((unsigned long) reipl_block_fcp);
- return rc;
- }
+ if (rc)
+ goto out1;
+
+ if (test_facility(141)) {
+ rc = sysfs_create_file(&reipl_fcp_kset->kobj,
+ &sys_reipl_fcp_clear_attr.attr);
+ if (rc)
+ goto out2;
+ } else
+ reipl_fcp_clear = true;
if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, &ipl_block, sizeof(ipl_block));
@@ -1032,6 +1084,13 @@ static int __init reipl_fcp_init(void)
}
reipl_capabilities |= IPL_TYPE_FCP;
return 0;
+
+out2:
+ sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
+out1:
+ kset_unregister(reipl_fcp_kset);
+ free_page((unsigned long) reipl_block_fcp);
+ return rc;
}
static int __init reipl_type_init(void)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8371855042dc..3514420f0259 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -95,14 +95,6 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
};
-void __init init_IRQ(void)
-{
- BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS);
- init_cio_interrupts();
- init_airq_interrupts();
- init_ext_interrupts();
-}
-
void do_IRQ(struct pt_regs *regs, int irq)
{
struct pt_regs *old_regs;
@@ -294,12 +286,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-static struct irqaction external_interrupt = {
- .name = "EXT",
- .handler = do_ext_interrupt,
-};
-
-void __init init_ext_interrupts(void)
+static void __init init_ext_interrupts(void)
{
int idx;
@@ -308,7 +295,16 @@ void __init init_ext_interrupts(void)
irq_set_chip_and_handler(EXT_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
- setup_irq(EXT_INTERRUPT, &external_interrupt);
+ if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL))
+ panic("Failed to register EXT interrupt\n");
+}
+
+void __init init_IRQ(void)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS);
+ init_cio_interrupts();
+ init_airq_interrupts();
+ init_ext_interrupts();
}
static DEFINE_SPINLOCK(irq_subclass_lock);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index cb8b1cc285c9..3a854cb5a4c6 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -14,7 +14,6 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
-#include <linux/suspend.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
@@ -39,36 +38,6 @@ extern const unsigned long long relocate_kernel_len;
#ifdef CONFIG_CRASH_DUMP
/*
- * PM notifier callback for kdump
- */
-static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
- void *ptr)
-{
- switch (action) {
- case PM_SUSPEND_PREPARE:
- case PM_HIBERNATION_PREPARE:
- if (kexec_crash_image)
- arch_kexec_unprotect_crashkres();
- break;
- case PM_POST_SUSPEND:
- case PM_POST_HIBERNATION:
- if (kexec_crash_image)
- arch_kexec_protect_crashkres();
- break;
- default:
- return NOTIFY_DONE;
- }
- return NOTIFY_OK;
-}
-
-static int __init machine_kdump_pm_init(void)
-{
- pm_notifier(machine_kdump_pm_cb, 0);
- return 0;
-}
-arch_initcall(machine_kdump_pm_init);
-
-/*
* Reset the system, copy boot CPU registers to absolute zero,
* and jump to the kdump image
*/
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 8b33e03e47b8..1e3df52b2b65 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -238,6 +238,64 @@ CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080);
+CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z15, DTLB2_HPAGE_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z15, DTLB2_GPAGE_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_z15, L1D_L2D_SOURCED_WRITES, 0x0085);
+CPUMF_EVENT_ATTR(cf_z15, ITLB2_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z15, ITLB2_MISSES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z15, L1I_L2I_SOURCED_WRITES, 0x0088);
+CPUMF_EVENT_ATTR(cf_z15, TLB2_PTE_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z15, TLB2_CRSTE_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z15, TLB2_ENGINES_BUSY, 0x008b);
+CPUMF_EVENT_ATTR(cf_z15, TX_C_TEND, 0x008c);
+CPUMF_EVENT_ATTR(cf_z15, TX_NC_TEND, 0x008d);
+CPUMF_EVENT_ATTR(cf_z15, L1C_TLB2_MISSES, 0x008f);
+CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
+CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_MEMORY_SOURCED_WRITES, 0x0091);
+CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0092);
+CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES, 0x0093);
+CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x0094);
+CPUMF_EVENT_ATTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x0095);
+CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES, 0x0096);
+CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x0097);
+CPUMF_EVENT_ATTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x0098);
+CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES, 0x0099);
+CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x009a);
+CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x009b);
+CPUMF_EVENT_ATTR(cf_z15, L1D_ONDRAWER_L4_SOURCED_WRITES, 0x009c);
+CPUMF_EVENT_ATTR(cf_z15, L1D_OFFDRAWER_L4_SOURCED_WRITES, 0x009d);
+CPUMF_EVENT_ATTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_RO, 0x009e);
+CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES, 0x00a2);
+CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_MEMORY_SOURCED_WRITES, 0x00a3);
+CPUMF_EVENT_ATTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x00a4);
+CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES, 0x00a5);
+CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x00a6);
+CPUMF_EVENT_ATTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x00a7);
+CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES, 0x00a8);
+CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x00a9);
+CPUMF_EVENT_ATTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x00aa);
+CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES, 0x00ab);
+CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x00ac);
+CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x00ad);
+CPUMF_EVENT_ATTR(cf_z15, L1I_ONDRAWER_L4_SOURCED_WRITES, 0x00ae);
+CPUMF_EVENT_ATTR(cf_z15, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af);
+CPUMF_EVENT_ATTR(cf_z15, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
+CPUMF_EVENT_ATTR(cf_z15, VX_BCD_EXECUTION_SLOTS, 0x00e1);
+CPUMF_EVENT_ATTR(cf_z15, DECIMAL_INSTRUCTIONS, 0x00e2);
+CPUMF_EVENT_ATTR(cf_z15, LAST_HOST_TRANSLATIONS, 0x00e8);
+CPUMF_EVENT_ATTR(cf_z15, TX_NC_TABORT, 0x00f3);
+CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_NO_SPECIAL, 0x00f4);
+CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
+CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
+CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
+CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
+CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109);
+CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
+CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
CPUMF_EVENT_PTR(cf_fvn1, INSTRUCTIONS),
@@ -516,6 +574,67 @@ static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
NULL,
};
+static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z15, L1D_RO_EXCL_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, DTLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, DTLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z15, DTLB2_HPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, DTLB2_GPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_L2D_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, ITLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, ITLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_L2I_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, TLB2_ENGINES_BUSY),
+ CPUMF_EVENT_PTR(cf_z15, TX_C_TEND),
+ CPUMF_EVENT_PTR(cf_z15, TX_NC_TEND),
+ CPUMF_EVENT_PTR(cf_z15, L1C_TLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z15, L1D_ONDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_OFFDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1D_ONCHIP_L3_SOURCED_WRITES_RO),
+ CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z15, L1I_ONDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, L1I_OFFDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z15, BCD_DFP_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z15, VX_BCD_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z15, DECIMAL_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_z15, LAST_HOST_TRANSLATIONS),
+ CPUMF_EVENT_PTR(cf_z15, TX_NC_TABORT),
+ CPUMF_EVENT_PTR(cf_z15, TX_C_TABORT_NO_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z15, TX_C_TABORT_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS),
+ CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES),
+ CPUMF_EVENT_PTR(cf_z15, DFLT_CC),
+ CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR),
+ CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
+ CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
+ NULL,
+};
+
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
static struct attribute_group cpumcf_pmu_events_group = {
@@ -624,9 +743,11 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
break;
case 0x3906:
case 0x3907:
+ model = cpumcf_z14_pmu_event_attr;
+ break;
case 0x8561:
case 0x8562:
- model = cpumcf_z14_pmu_event_attr;
+ model = cpumcf_z15_pmu_event_attr;
break;
default:
model = none;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index b095b1c78987..85a711d783eb 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -372,28 +372,33 @@ static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
{
- unsigned long n_sdb, freq, factor;
+ unsigned long n_sdb, freq;
size_t sample_size;
/* Calculate sampling buffers using 4K pages
*
- * 1. Determine the sample data size which depends on the used
- * sampling functions, for example, basic-sampling or
- * basic-sampling with diagnostic-sampling.
+ * 1. The sampling size is 32 bytes for basic sampling. This size
+ * is the same for all machine types. Diagnostic
+ * sampling uses auxlilary data buffer setup which provides the
+ * memory for SDBs using linux common code auxiliary trace
+ * setup.
*
- * 2. Use the sampling frequency as input. The sampling buffer is
- * designed for almost one second. This can be adjusted through
- * the "factor" variable.
- * In any case, alloc_sampling_buffer() sets the Alert Request
+ * 2. Function alloc_sampling_buffer() sets the Alert Request
* Control indicator to trigger a measurement-alert to harvest
- * sample-data-blocks (sdb).
+ * sample-data-blocks (SDB). This is done per SDB. This
+ * measurement alert interrupt fires quick enough to handle
+ * one SDB, on very high frequency and work loads there might
+ * be 2 to 3 SBDs available for sample processing.
+ * Currently there is no need for setup alert request on every
+ * n-th page. This is counterproductive as one IRQ triggers
+ * a very high number of samples to be processed at one IRQ.
*
- * 3. Compute the number of sample-data-blocks and ensure a minimum
- * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not
- * exceed a "calculated" maximum. The symbolic maximum is
- * designed for basic-sampling only and needs to be increased if
- * diagnostic-sampling is active.
- * See also the remarks for these symbolic constants.
+ * 3. Use the sampling frequency as input.
+ * Compute the number of SDBs and ensure a minimum
+ * of CPUM_SF_MIN_SDB. Depending on frequency add some more
+ * SDBs to handle a higher sampling rate.
+ * Use a minimum of CPUM_SF_MIN_SDB and allow for 100 samples
+ * (one SDB) for every 10000 HZ frequency increment.
*
* 4. Compute the number of sample-data-block-tables (SDBT) and
* ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
@@ -401,10 +406,7 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
*/
sample_size = sizeof(struct hws_basic_entry);
freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
- factor = 1;
- n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size));
- if (n_sdb < CPUM_SF_MIN_SDB)
- n_sdb = CPUM_SF_MIN_SDB;
+ n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000);
/* If there is already a sampling buffer allocated, it is very likely
* that the sampling facility is enabled too. If the event to be
@@ -1576,6 +1578,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
unsigned long range = 0, size;
unsigned long long overflow = 0;
struct perf_output_handle *handle = &cpuhw->handle;
+ unsigned long num_sdb;
aux = perf_get_aux(handle);
if (WARN_ON_ONCE(!aux))
@@ -1587,13 +1590,14 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
size >> PAGE_SHIFT);
perf_aux_output_end(handle, size);
+ num_sdb = aux->sfb.num_sdb;
while (!done) {
/* Get an output handle */
aux = perf_aux_output_begin(handle, cpuhw->event);
if (handle->size == 0) {
pr_err("The AUX buffer with %lu pages for the "
"diagnostic-sampling mode is full\n",
- aux->sfb.num_sdb);
+ num_sdb);
debug_sprintf_event(sfdbg, 1,
"%s: AUX buffer used up\n",
__func__);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index eee3a482195a..2c27907a5ffc 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,8 +78,8 @@ PGM_CHECK(do_dat_exception) /* 39 */
PGM_CHECK(do_dat_exception) /* 3a */
PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
-PGM_CHECK_DEFAULT /* 3d */
-PGM_CHECK_DEFAULT /* 3e */
+PGM_CHECK(do_secure_storage_access) /* 3d */
+PGM_CHECK(do_non_secure_storage_access) /* 3e */
PGM_CHECK_DEFAULT /* 3f */
PGM_CHECK(monitor_event_exception) /* 40 */
PGM_CHECK_DEFAULT /* 41 */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6ccef5f29761..eb6e23ad15a2 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -106,6 +106,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
p->thread.system_timer = 0;
p->thread.hardirq_timer = 0;
p->thread.softirq_timer = 0;
+ p->thread.last_break = 1;
frame->sf.back_chain = 0;
/* new return point is ret_from_fork */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 6ebc2117c66c..c92d04f876cb 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -151,10 +151,35 @@ static void show_cpu_summary(struct seq_file *m, void *v)
}
}
+static void show_cpu_topology(struct seq_file *m, unsigned long n)
+{
+#ifdef CONFIG_SCHED_TOPOLOGY
+ seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
+ seq_printf(m, "core id : %d\n", topology_core_id(n));
+ seq_printf(m, "book id : %d\n", topology_book_id(n));
+ seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
+ seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
+ seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
+ seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
+ seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
+#endif /* CONFIG_SCHED_TOPOLOGY */
+}
+
+static void show_cpu_ids(struct seq_file *m, unsigned long n)
+{
+ struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
+
+ seq_printf(m, "version : %02X\n", id->version);
+ seq_printf(m, "identification : %06X\n", id->ident);
+ seq_printf(m, "machine : %04X\n", id->machine);
+}
+
static void show_cpu_mhz(struct seq_file *m, unsigned long n)
{
struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
+ if (!machine_has_cpu_mhz)
+ return;
seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
}
@@ -165,12 +190,13 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n)
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
+ unsigned long first = cpumask_first(cpu_online_mask);
- if (!n)
+ if (n == first)
show_cpu_summary(m, v);
- if (!machine_has_cpu_mhz)
- return 0;
seq_printf(m, "\ncpu number : %ld\n", n);
+ show_cpu_topology(m, n);
+ show_cpu_ids(m, n);
show_cpu_mhz(m, n);
return 0;
}
@@ -179,6 +205,8 @@ static inline void *c_update(loff_t *pos)
{
if (*pos)
*pos = cpumask_next(*pos - 1, cpu_online_mask);
+ else
+ *pos = cpumask_first(cpu_online_mask);
return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b2c2f75860e8..36445dd40fdb 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -73,6 +73,7 @@
#include <asm/nospec-branch.h>
#include <asm/mem_detect.h>
#include <asm/uv.h>
+#include <asm/asm-offsets.h>
#include "entry.h"
/*
@@ -92,10 +93,6 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
-#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
-int __bootdata_preserved(prot_virt_guest);
-#endif
-
int __bootdata(noexec_disabled);
int __bootdata(memory_end_set);
unsigned long __bootdata(memory_end);
@@ -450,6 +447,8 @@ static void __init setup_lowcore_dat_off(void)
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
+ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
+ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -564,6 +563,9 @@ static void __init setup_memory_end(void)
vmax = _REGION1_SIZE; /* 4-level kernel page table */
}
+ if (is_prot_virt_host())
+ adjust_to_uv_max(&vmax);
+
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
@@ -790,6 +792,7 @@ static void __init memblock_add_mem_detect_info(void)
memblock_physmem_add(start, end - start);
}
memblock_set_bottom_up(false);
+ memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
memblock_dump_all();
}
@@ -1138,6 +1141,8 @@ void __init setup_arch(char **cmdline_p)
*/
memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
+ if (is_prot_virt_host())
+ setup_uv();
setup_memory_end();
setup_memory();
dma_contiguous_reserve(memory_end);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index e6fca5498e1f..b295090e2ce6 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -487,7 +487,7 @@ void do_signal(struct pt_regs *regs)
regs->gprs[2] = -EINTR;
break;
}
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->gprs[2] = regs->orig_gpr2;
regs->psw.addr =
@@ -514,7 +514,7 @@ void do_signal(struct pt_regs *regs)
case -ERESTART_RESTARTBLOCK:
/* Restart with sys_restart_syscall */
regs->int_code = __NR_restart_syscall;
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a08bd2522dd9..7eaabbab2213 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -212,6 +212,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
+ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
+ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
if (nmi_alloc_per_cpu(lc))
goto out_async;
if (vdso_alloc_per_cpu(lc))
@@ -701,6 +703,11 @@ int smp_cpu_get_polarization(int cpu)
return pcpu_devices[cpu].polarization;
}
+int smp_cpu_get_cpu_address(int cpu)
+{
+ return pcpu_devices[cpu].address;
+}
+
static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
{
static int use_sigp_detection;
@@ -851,12 +858,13 @@ static void smp_init_secondary(void)
init_cpu_timer();
vtime_init();
pfault_init();
- notify_cpu_starting(smp_processor_id());
+ notify_cpu_starting(cpu);
if (topology_cpu_dedicated(cpu))
set_cpu_flag(CIF_DEDICATED_CPU);
else
clear_cpu_flag(CIF_DEDICATED_CPU);
- set_cpu_online(smp_processor_id(), true);
+ set_cpu_online(cpu, true);
+ update_cpu_masks();
inc_irq_stat(CPU_RST);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
@@ -928,6 +936,7 @@ int __cpu_disable(void)
/* Handle possible pending IPIs */
smp_handle_ext_call();
set_cpu_online(smp_processor_id(), false);
+ update_cpu_masks();
/* Disable pseudo page faults on this cpu. */
pfault_fini();
/* Disable interrupt sources via control register. */
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
deleted file mode 100644
index 75b7b307946e..000000000000
--- a/arch/s390/kernel/suspend.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Suspend support specific for s390.
- *
- * Copyright IBM Corp. 2009
- *
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- */
-
-#include <linux/pfn.h>
-#include <linux/suspend.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <asm/ctl_reg.h>
-#include <asm/ipl.h>
-#include <asm/cio.h>
-#include <asm/sections.h>
-#include "entry.h"
-
-/*
- * The restore of the saved pages in an hibernation image will set
- * the change and referenced bits in the storage key for each page.
- * Overindication of the referenced bits after an hibernation cycle
- * does not cause any harm but the overindication of the change bits
- * would cause trouble.
- * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each
- * page to the most significant byte of the associated page frame
- * number in the hibernation image.
- */
-
-/*
- * Key storage is allocated as a linked list of pages.
- * The size of the keys array is (PAGE_SIZE - sizeof(long))
- */
-struct page_key_data {
- struct page_key_data *next;
- unsigned char data[];
-};
-
-#define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *))
-
-static struct page_key_data *page_key_data;
-static struct page_key_data *page_key_rp, *page_key_wp;
-static unsigned long page_key_rx, page_key_wx;
-unsigned long suspend_zero_pages;
-
-/*
- * For each page in the hibernation image one additional byte is
- * stored in the most significant byte of the page frame number.
- * On suspend no additional memory is required but on resume the
- * keys need to be memorized until the page data has been restored.
- * Only then can the storage keys be set to their old state.
- */
-unsigned long page_key_additional_pages(unsigned long pages)
-{
- return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
-}
-
-/*
- * Free page_key_data list of arrays.
- */
-void page_key_free(void)
-{
- struct page_key_data *pkd;
-
- while (page_key_data) {
- pkd = page_key_data;
- page_key_data = pkd->next;
- free_page((unsigned long) pkd);
- }
-}
-
-/*
- * Allocate page_key_data list of arrays with enough room to store
- * one byte for each page in the hibernation image.
- */
-int page_key_alloc(unsigned long pages)
-{
- struct page_key_data *pk;
- unsigned long size;
-
- size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
- while (size--) {
- pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL);
- if (!pk) {
- page_key_free();
- return -ENOMEM;
- }
- pk->next = page_key_data;
- page_key_data = pk;
- }
- page_key_rp = page_key_wp = page_key_data;
- page_key_rx = page_key_wx = 0;
- return 0;
-}
-
-/*
- * Save the storage key into the upper 8 bits of the page frame number.
- */
-void page_key_read(unsigned long *pfn)
-{
- struct page *page;
- unsigned long addr;
- unsigned char key;
-
- page = pfn_to_page(*pfn);
- addr = (unsigned long) page_address(page);
- key = (unsigned char) page_get_storage_key(addr) & 0x7f;
- if (arch_test_page_nodat(page))
- key |= 0x80;
- *(unsigned char *) pfn = key;
-}
-
-/*
- * Extract the storage key from the upper 8 bits of the page frame number
- * and store it in the page_key_data list of arrays.
- */
-void page_key_memorize(unsigned long *pfn)
-{
- page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
- *(unsigned char *) pfn = 0;
- if (++page_key_wx < PAGE_KEY_DATA_SIZE)
- return;
- page_key_wp = page_key_wp->next;
- page_key_wx = 0;
-}
-
-/*
- * Get the next key from the page_key_data list of arrays and set the
- * storage key of the page referred by @address. If @address refers to
- * a "safe" page the swsusp_arch_resume code will transfer the storage
- * key from the buffer page to the original page.
- */
-void page_key_write(void *address)
-{
- struct page *page;
- unsigned char key;
-
- key = page_key_rp->data[page_key_rx];
- page_set_storage_key((unsigned long) address, key & 0x7f, 0);
- page = virt_to_page(address);
- if (key & 0x80)
- arch_set_page_nodat(page, 0);
- else
- arch_set_page_dat(page, 0);
- if (++page_key_rx >= PAGE_KEY_DATA_SIZE)
- return;
- page_key_rp = page_key_rp->next;
- page_key_rx = 0;
-}
-
-int pfn_is_nosave(unsigned long pfn)
-{
- unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
- unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
- unsigned long end_rodata_pfn = PFN_DOWN(__pa(__end_rodata)) - 1;
- unsigned long stext_pfn = PFN_DOWN(__pa(_stext));
-
- /* Always save lowcore pages (LC protection might be enabled). */
- if (pfn <= LC_PAGES)
- return 0;
- if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
- return 1;
- /* Skip memory holes and read-only pages (DCSS, ...). */
- if (pfn >= stext_pfn && pfn <= end_rodata_pfn)
- return 0;
- if (tprot(PFN_PHYS(pfn)))
- return 1;
- return 0;
-}
-
-/*
- * PM notifier callback for suspend
- */
-static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
- void *ptr)
-{
- switch (action) {
- case PM_SUSPEND_PREPARE:
- case PM_HIBERNATION_PREPARE:
- suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
- if (!suspend_zero_pages)
- return NOTIFY_BAD;
- break;
- case PM_POST_SUSPEND:
- case PM_POST_HIBERNATION:
- free_pages(suspend_zero_pages, LC_ORDER);
- break;
- default:
- return NOTIFY_DONE;
- }
- return NOTIFY_OK;
-}
-
-static int __init suspend_pm_init(void)
-{
- pm_notifier(suspend_pm_cb, 0);
- return 0;
-}
-arch_initcall(suspend_pm_init);
-
-void save_processor_state(void)
-{
- /* swsusp_arch_suspend() actually saves all cpu register contents.
- * Machine checks must be disabled since swsusp_arch_suspend() stores
- * register contents to their lowcore save areas. That's the same
- * place where register contents on machine checks would be saved.
- * To avoid register corruption disable machine checks.
- * We must also disable machine checks in the new psw mask for
- * program checks, since swsusp_arch_suspend() may generate program
- * checks. Disabling machine checks for all other new psw masks is
- * just paranoia.
- */
- local_mcck_disable();
- /* Disable lowcore protection */
- __ctl_clear_bit(0,28);
- S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
- S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
- S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
- S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
-}
-
-void restore_processor_state(void)
-{
- S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
- S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
- S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
- S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
- /* Enable lowcore protection */
- __ctl_set_bit(0,28);
- local_mcck_enable();
-}
-
-/* Called at the end of swsusp_arch_resume */
-void s390_early_resume(void)
-{
- lgr_info_log();
- channel_subsystem_reinit();
- zpci_rescan();
-}
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
deleted file mode 100644
index a7baf0b5f818..000000000000
--- a/arch/s390/kernel/swsusp.S
+++ /dev/null
@@ -1,276 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * S390 64-bit swsusp implementation
- *
- * Copyright IBM Corp. 2009
- *
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- * Michael Holzheu <holzheu@linux.vnet.ibm.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-#include <asm/nospec-insn.h>
-#include <asm/sigp.h>
-
-/*
- * Save register context in absolute 0 lowcore and call swsusp_save() to
- * create in-memory kernel image. The context is saved in the designated
- * "store status" memory locations (see POP).
- * We return from this function twice. The first time during the suspend to
- * disk process. The second time via the swsusp_arch_resume() function
- * (see below) in the resume process.
- * This function runs with disabled interrupts.
- */
- GEN_BR_THUNK %r14
-
- .section .text
-ENTRY(swsusp_arch_suspend)
- lg %r1,__LC_NODAT_STACK
- stmg %r6,%r15,__SF_GPRS(%r1)
- aghi %r1,-STACK_FRAME_OVERHEAD
- stg %r15,__SF_BACKCHAIN(%r1)
- lgr %r15,%r1
-
- /* Store FPU registers */
- brasl %r14,save_fpu_regs
-
- /* Deactivate DAT */
- stnsm __SF_EMPTY(%r15),0xfb
-
- /* Store prefix register on stack */
- stpx __SF_EMPTY(%r15)
-
- /* Save prefix register contents for lowcore copy */
- llgf %r10,__SF_EMPTY(%r15)
-
- /* Get pointer to save area */
- lghi %r1,0x1000
-
- /* Save CPU address */
- stap __LC_EXT_CPU_ADDR(%r0)
-
- /* Store registers */
- mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
- stam %a0,%a15,0x340(%r1) /* store access registers */
- stctg %c0,%c15,0x380(%r1) /* store control registers */
- stmg %r0,%r15,0x280(%r1) /* store general registers */
-
- stpt 0x328(%r1) /* store timer */
- stck __SF_EMPTY(%r15) /* store clock */
- stckc 0x330(%r1) /* store clock comparator */
-
- /* Update cputime accounting before going to sleep */
- lg %r0,__LC_LAST_UPDATE_TIMER
- slg %r0,0x328(%r1)
- alg %r0,__LC_SYSTEM_TIMER
- stg %r0,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),0x328(%r1)
- lg %r0,__LC_LAST_UPDATE_CLOCK
- slg %r0,__SF_EMPTY(%r15)
- alg %r0,__LC_STEAL_TIMER
- stg %r0,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__SF_EMPTY(%r15)
-
- /* Activate DAT */
- stosm __SF_EMPTY(%r15),0x04
-
- /* Set prefix page to zero */
- xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
- spx __SF_EMPTY(%r15)
-
- /* Save absolute zero pages */
- larl %r2,suspend_zero_pages
- lg %r2,0(%r2)
- lghi %r4,0
- lghi %r3,2*PAGE_SIZE
- lghi %r5,2*PAGE_SIZE
-1: mvcle %r2,%r4,0
- jo 1b
-
- /* Copy lowcore to absolute zero lowcore */
- lghi %r2,0
- lgr %r4,%r10
- lghi %r3,2*PAGE_SIZE
- lghi %r5,2*PAGE_SIZE
-1: mvcle %r2,%r4,0
- jo 1b
-
- /* Save image */
- brasl %r14,swsusp_save
-
- /* Restore prefix register and return */
- lghi %r1,0x1000
- spx 0x318(%r1)
- lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
- lghi %r2,0
- BR_EX %r14
-ENDPROC(swsusp_arch_suspend)
-
-/*
- * Restore saved memory image to correct place and restore register context.
- * Then we return to the function that called swsusp_arch_suspend().
- * swsusp_arch_resume() runs with disabled interrupts.
- */
-ENTRY(swsusp_arch_resume)
- stmg %r6,%r15,__SF_GPRS(%r15)
- lgr %r1,%r15
- aghi %r15,-STACK_FRAME_OVERHEAD
- stg %r1,__SF_BACKCHAIN(%r15)
-
- /* Make all free pages stable */
- lghi %r2,1
- brasl %r14,arch_set_page_states
-
- /* Set prefix page to zero */
- xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
- spx __SF_EMPTY(%r15)
-
- /* Deactivate DAT */
- stnsm __SF_EMPTY(%r15),0xfb
-
- /* Restore saved image */
- larl %r1,restore_pblist
- lg %r1,0(%r1)
- ltgr %r1,%r1
- jz 2f
-0:
- lg %r2,8(%r1)
- lg %r4,0(%r1)
- iske %r0,%r4
- lghi %r3,PAGE_SIZE
- lghi %r5,PAGE_SIZE
-1:
- mvcle %r2,%r4,0
- jo 1b
- lg %r2,8(%r1)
- sske %r0,%r2
- lg %r1,16(%r1)
- ltgr %r1,%r1
- jnz 0b
-2:
- ptlb /* flush tlb */
-
- /* Reset System */
- larl %r1,.Lnew_pgm_check_psw
- epsw %r2,%r3
- stm %r2,%r3,0(%r1)
- mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1)
- larl %r1,__swsusp_reset_dma
- lg %r1,0(%r1)
- BASR_EX %r14,%r1
- larl %r1,smp_cpu_mt_shift
- icm %r1,15,0(%r1)
- jz smt_done
- llgfr %r1,%r1
-smt_loop:
- sigp %r1,%r0,SIGP_SET_MULTI_THREADING
- brc 8,smt_done /* accepted */
- brc 2,smt_loop /* busy, try again */
-smt_done:
- larl %r1,.Lnew_pgm_check_psw
- lpswe 0(%r1)
-pgm_check_entry:
-
- /* Switch to original suspend CPU */
- larl %r1,.Lresume_cpu /* Resume CPU address: r2 */
- stap 0(%r1)
- llgh %r2,0(%r1)
- llgh %r1,__LC_EXT_CPU_ADDR(%r0) /* Suspend CPU address: r1 */
- cgr %r1,%r2
- je restore_registers /* r1 = r2 -> nothing to do */
- larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
- mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
-3:
- sigp %r9,%r1,SIGP_INITIAL_CPU_RESET /* sigp initial cpu reset */
- brc 8,4f /* accepted */
- brc 2,3b /* busy, try again */
-
- /* Suspend CPU not available -> panic */
- larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD
- larl %r2,.Lpanic_string
- brasl %r14,sclp_early_printk_force
- larl %r3,.Ldisabled_wait_31
- lpsw 0(%r3)
-4:
- /* Switch to suspend CPU */
- sigp %r9,%r1,SIGP_RESTART /* sigp restart to suspend CPU */
- brc 2,4b /* busy, try again */
-5:
- sigp %r9,%r2,SIGP_STOP /* sigp stop to current resume CPU */
- brc 2,5b /* busy, try again */
-6: j 6b
-
-restart_suspend:
- larl %r1,.Lresume_cpu
- llgh %r2,0(%r1)
-7:
- sigp %r9,%r2,SIGP_SENSE /* sigp sense, wait for resume CPU */
- brc 8,7b /* accepted, status 0, still running */
- brc 2,7b /* busy, try again */
- tmll %r9,0x40 /* Test if resume CPU is stopped */
- jz 7b
-
-restore_registers:
- /* Restore registers */
- lghi %r13,0x1000 /* %r1 = pointer to save area */
-
- /* Ignore time spent in suspended state. */
- llgf %r1,0x318(%r13)
- stck __LC_LAST_UPDATE_CLOCK(%r1)
- spt 0x328(%r13) /* reprogram timer */
- //sckc 0x330(%r13) /* set clock comparator */
-
- lctlg %c0,%c15,0x380(%r13) /* load control registers */
- lam %a0,%a15,0x340(%r13) /* load access registers */
-
- /* Load old stack */
- lg %r15,0x2f8(%r13)
-
- /* Save prefix register */
- mvc __SF_EMPTY(4,%r15),0x318(%r13)
-
- /* Restore absolute zero pages */
- lghi %r2,0
- larl %r4,suspend_zero_pages
- lg %r4,0(%r4)
- lghi %r3,2*PAGE_SIZE
- lghi %r5,2*PAGE_SIZE
-1: mvcle %r2,%r4,0
- jo 1b
-
- /* Restore prefix register */
- spx __SF_EMPTY(%r15)
-
- /* Activate DAT */
- stosm __SF_EMPTY(%r15),0x04
-
- /* Make all free pages unstable */
- lghi %r2,0
- brasl %r14,arch_set_page_states
-
- /* Call arch specific early resume code */
- brasl %r14,s390_early_resume
-
- /* Return 0 */
- lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
- lghi %r2,0
- BR_EX %r14
-ENDPROC(swsusp_arch_resume)
-
- .section .data..nosave,"aw",@progbits
- .align 8
-.Ldisabled_wait_31:
- .long 0x000a0000,0x00000000
-.Lpanic_string:
- .asciz "Resume not possible because suspend CPU is no longer available\n"
- .align 8
-.Lrestart_suspend_psw:
- .quad 0x0000000180000000,restart_suspend
-.Lnew_pgm_check_psw:
- .quad 0,pgm_check_entry
-.Lresume_cpu:
- .byte 0,0
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 3627953007ed..5f70cefc13e4 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -26,7 +26,6 @@
#include <linux/nodemask.h>
#include <linux/node.h>
#include <asm/sysinfo.h>
-#include <asm/numa.h>
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
@@ -63,8 +62,6 @@ static struct mask_info drawer_info;
struct cpu_topology_s390 cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
-cpumask_t cpus_with_topology;
-
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
cpumask_t mask;
@@ -86,11 +83,12 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
cpumask_copy(&mask, cpu_present_mask);
break;
default:
- /* fallthrough */
+ fallthrough;
case TOPOLOGY_MODE_SINGLE:
cpumask_copy(&mask, cpumask_of(cpu));
break;
}
+ cpumask_and(&mask, &mask, cpu_online_mask);
return mask;
}
@@ -106,6 +104,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu)
for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_present(cpu + i))
cpumask_set_cpu(cpu + i, &mask);
+ cpumask_and(&mask, &mask, cpu_online_mask);
return mask;
}
@@ -138,7 +137,6 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
cpumask_set_cpu(lcpu + i, &drawer->mask);
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
- cpumask_set_cpu(lcpu + i, &cpus_with_topology);
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
}
@@ -245,10 +243,10 @@ int topology_set_cpu_management(int fc)
return rc;
}
-static void update_cpu_masks(void)
+void update_cpu_masks(void)
{
- struct cpu_topology_s390 *topo;
- int cpu, id;
+ struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
+ int cpu, sibling, pkg_first, smt_first, id;
for_each_possible_cpu(cpu) {
topo = &cpu_topology[cpu];
@@ -256,6 +254,7 @@ static void update_cpu_masks(void)
topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu);
topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
+ topo->booted_cores = 0;
if (topology_mode != TOPOLOGY_MODE_HW) {
id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
topo->thread_id = cpu;
@@ -263,11 +262,23 @@ static void update_cpu_masks(void)
topo->socket_id = id;
topo->book_id = id;
topo->drawer_id = id;
- if (cpu_present(cpu))
- cpumask_set_cpu(cpu, &cpus_with_topology);
}
}
- numa_update_cpu_topology();
+ for_each_online_cpu(cpu) {
+ topo = &cpu_topology[cpu];
+ pkg_first = cpumask_first(&topo->core_mask);
+ topo_package = &cpu_topology[pkg_first];
+ if (cpu == pkg_first) {
+ for_each_cpu(sibling, &topo->core_mask) {
+ topo_sibling = &cpu_topology[sibling];
+ smt_first = cpumask_first(&topo_sibling->thread_mask);
+ if (sibling == smt_first)
+ topo_package->booted_cores++;
+ }
+ } else {
+ topo->booted_cores = topo_package->booted_cores;
+ }
+ }
}
void store_topology(struct sysinfo_15_1_x *info)
@@ -289,7 +300,6 @@ static int __arch_update_cpu_topology(void)
int rc = 0;
mutex_lock(&smp_cpu_state_mutex);
- cpumask_clear(&cpus_with_topology);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
store_topology(info);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index dc75588d7894..ff9cc4c3290e 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -271,7 +271,7 @@ void kernel_stack_overflow(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(kernel_stack_overflow);
-static void test_monitor_call(void)
+static void __init test_monitor_call(void)
{
int val = 1;
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
new file mode 100644
index 000000000000..c86d654351d1
--- /dev/null
+++ b/arch/s390/kernel/uv.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common Ultravisor functions and initialization
+ *
+ * Copyright IBM Corp. 2019, 2020
+ */
+#define KMSG_COMPONENT "prot_virt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/bitmap.h>
+#include <linux/memblock.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <asm/facility.h>
+#include <asm/sections.h>
+#include <asm/uv.h>
+
+/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
+int __bootdata_preserved(prot_virt_guest);
+#endif
+
+#if IS_ENABLED(CONFIG_KVM)
+int prot_virt_host;
+EXPORT_SYMBOL(prot_virt_host);
+struct uv_info __bootdata_preserved(uv_info);
+EXPORT_SYMBOL(uv_info);
+
+static int __init prot_virt_setup(char *val)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(val, &enabled);
+ if (!rc && enabled)
+ prot_virt_host = 1;
+
+ if (is_prot_virt_guest() && prot_virt_host) {
+ prot_virt_host = 0;
+ pr_warn("Protected virtualization not available in protected guests.");
+ }
+
+ if (prot_virt_host && !test_facility(158)) {
+ prot_virt_host = 0;
+ pr_warn("Protected virtualization not supported by the hardware.");
+ }
+
+ return rc;
+}
+early_param("prot_virt", prot_virt_setup);
+
+static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
+{
+ struct uv_cb_init uvcb = {
+ .header.cmd = UVC_CMD_INIT_UV,
+ .header.len = sizeof(uvcb),
+ .stor_origin = stor_base,
+ .stor_len = stor_len,
+ };
+
+ if (uv_call(0, (uint64_t)&uvcb)) {
+ pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
+ uvcb.header.rc, uvcb.header.rrc);
+ return -1;
+ }
+ return 0;
+}
+
+void __init setup_uv(void)
+{
+ unsigned long uv_stor_base;
+
+ uv_stor_base = (unsigned long)memblock_alloc_try_nid(
+ uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+ if (!uv_stor_base) {
+ pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
+ uv_info.uv_base_stor_len);
+ goto fail;
+ }
+
+ if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
+ memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
+ goto fail;
+ }
+
+ pr_info("Reserving %luMB as ultravisor base storage\n",
+ uv_info.uv_base_stor_len >> 20);
+ return;
+fail:
+ pr_info("Disabling support for protected virtualization");
+ prot_virt_host = 0;
+}
+
+void adjust_to_uv_max(unsigned long *vmax)
+{
+ *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+}
+
+/*
+ * Requests the Ultravisor to pin the page in the shared state. This will
+ * cause an intercept when the guest attempts to unshare the pinned page.
+ */
+static int uv_pin_shared(unsigned long paddr)
+{
+ struct uv_cb_cfs uvcb = {
+ .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
+ .header.len = sizeof(uvcb),
+ .paddr = paddr,
+ };
+
+ if (uv_call(0, (u64)&uvcb))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Requests the Ultravisor to encrypt a guest page and make it
+ * accessible to the host for paging (export).
+ *
+ * @paddr: Absolute host address of page to be exported
+ */
+int uv_convert_from_secure(unsigned long paddr)
+{
+ struct uv_cb_cfs uvcb = {
+ .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
+ .header.len = sizeof(uvcb),
+ .paddr = paddr
+ };
+
+ if (uv_call(0, (u64)&uvcb))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Calculate the expected ref_count for a page that would otherwise have no
+ * further pins. This was cribbed from similar functions in other places in
+ * the kernel, but with some slight modifications. We know that a secure
+ * page can not be a huge page for example.
+ */
+static int expected_page_refs(struct page *page)
+{
+ int res;
+
+ res = page_mapcount(page);
+ if (PageSwapCache(page)) {
+ res++;
+ } else if (page_mapping(page)) {
+ res++;
+ if (page_has_private(page))
+ res++;
+ }
+ return res;
+}
+
+static int make_secure_pte(pte_t *ptep, unsigned long addr,
+ struct page *exp_page, struct uv_cb_header *uvcb)
+{
+ pte_t entry = READ_ONCE(*ptep);
+ struct page *page;
+ int expected, rc = 0;
+
+ if (!pte_present(entry))
+ return -ENXIO;
+ if (pte_val(entry) & _PAGE_INVALID)
+ return -ENXIO;
+
+ page = pte_page(entry);
+ if (page != exp_page)
+ return -ENXIO;
+ if (PageWriteback(page))
+ return -EAGAIN;
+ expected = expected_page_refs(page);
+ if (!page_ref_freeze(page, expected))
+ return -EBUSY;
+ set_bit(PG_arch_1, &page->flags);
+ rc = uv_call(0, (u64)uvcb);
+ page_ref_unfreeze(page, expected);
+ /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
+ if (rc)
+ rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
+ return rc;
+}
+
+/*
+ * Requests the Ultravisor to make a page accessible to a guest.
+ * If it's brought in the first time, it will be cleared. If
+ * it has been exported before, it will be decrypted and integrity
+ * checked.
+ */
+int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+{
+ struct vm_area_struct *vma;
+ bool local_drain = false;
+ spinlock_t *ptelock;
+ unsigned long uaddr;
+ struct page *page;
+ pte_t *ptep;
+ int rc;
+
+again:
+ rc = -EFAULT;
+ down_read(&gmap->mm->mmap_sem);
+
+ uaddr = __gmap_translate(gmap, gaddr);
+ if (IS_ERR_VALUE(uaddr))
+ goto out;
+ vma = find_vma(gmap->mm, uaddr);
+ if (!vma)
+ goto out;
+ /*
+ * Secure pages cannot be huge and userspace should not combine both.
+ * In case userspace does it anyway this will result in an -EFAULT for
+ * the unpack. The guest is thus never reaching secure mode. If
+ * userspace is playing dirty tricky with mapping huge pages later
+ * on this will result in a segmentation fault.
+ */
+ if (is_vm_hugetlb_page(vma))
+ goto out;
+
+ rc = -ENXIO;
+ page = follow_page(vma, uaddr, FOLL_WRITE);
+ if (IS_ERR_OR_NULL(page))
+ goto out;
+
+ lock_page(page);
+ ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
+ rc = make_secure_pte(ptep, uaddr, page, uvcb);
+ pte_unmap_unlock(ptep, ptelock);
+ unlock_page(page);
+out:
+ up_read(&gmap->mm->mmap_sem);
+
+ if (rc == -EAGAIN) {
+ wait_on_page_writeback(page);
+ } else if (rc == -EBUSY) {
+ /*
+ * If we have tried a local drain and the page refcount
+ * still does not match our expected safe value, try with a
+ * system wide drain. This is needed if the pagevecs holding
+ * the page are on a different CPU.
+ */
+ if (local_drain) {
+ lru_add_drain_all();
+ /* We give up here, and let the caller try again */
+ return -EAGAIN;
+ }
+ /*
+ * We are here if the page refcount does not match the
+ * expected safe value. The main culprits are usually
+ * pagevecs. With lru_add_drain() we drain the pagevecs
+ * on the local CPU so that hopefully the refcount will
+ * reach the expected safe value.
+ */
+ lru_add_drain();
+ local_drain = true;
+ /* And now we try again immediately after draining */
+ goto again;
+ } else if (rc == -ENXIO) {
+ if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
+ return -EFAULT;
+ return -EAGAIN;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_make_secure);
+
+int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
+{
+ struct uv_cb_cts uvcb = {
+ .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
+ .header.len = sizeof(uvcb),
+ .guest_handle = gmap->guest_handle,
+ .gaddr = gaddr,
+ };
+
+ return gmap_make_secure(gmap, gaddr, &uvcb);
+}
+EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
+
+/*
+ * To be called with the page locked or with an extra reference! This will
+ * prevent gmap_make_secure from touching the page concurrently. Having 2
+ * parallel make_page_accessible is fine, as the UV calls will become a
+ * no-op if the page is already exported.
+ */
+int arch_make_page_accessible(struct page *page)
+{
+ int rc = 0;
+
+ /* Hugepage cannot be protected, so nothing to do */
+ if (PageHuge(page))
+ return 0;
+
+ /*
+ * PG_arch_1 is used in 3 places:
+ * 1. for kernel page tables during early boot
+ * 2. for storage keys of huge pages and KVM
+ * 3. As an indication that this page might be secure. This can
+ * overindicate, e.g. we set the bit before calling
+ * convert_to_secure.
+ * As secure pages are never huge, all 3 variants can co-exists.
+ */
+ if (!test_bit(PG_arch_1, &page->flags))
+ return 0;
+
+ rc = uv_pin_shared(page_to_phys(page));
+ if (!rc) {
+ clear_bit(PG_arch_1, &page->flags);
+ return 0;
+ }
+
+ rc = uv_convert_from_secure(page_to_phys(page));
+ if (!rc) {
+ clear_bit(PG_arch_1, &page->flags);
+ return 0;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(arch_make_page_accessible);
+
+#endif
+
+#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
+static ssize_t uv_query_facilities(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
+ uv_info.inst_calls_list[0],
+ uv_info.inst_calls_list[1],
+ uv_info.inst_calls_list[2],
+ uv_info.inst_calls_list[3]);
+}
+
+static struct kobj_attribute uv_query_facilities_attr =
+ __ATTR(facilities, 0444, uv_query_facilities, NULL);
+
+static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ uv_info.max_guest_cpus);
+}
+
+static struct kobj_attribute uv_query_max_guest_cpus_attr =
+ __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
+
+static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ uv_info.max_num_sec_conf);
+}
+
+static struct kobj_attribute uv_query_max_guest_vms_attr =
+ __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
+
+static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lx\n",
+ uv_info.max_sec_stor_addr);
+}
+
+static struct kobj_attribute uv_query_max_guest_addr_attr =
+ __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
+
+static struct attribute *uv_query_attrs[] = {
+ &uv_query_facilities_attr.attr,
+ &uv_query_max_guest_cpus_attr.attr,
+ &uv_query_max_guest_vms_attr.attr,
+ &uv_query_max_guest_addr_attr.attr,
+ NULL,
+};
+
+static struct attribute_group uv_query_attr_group = {
+ .attrs = uv_query_attrs,
+};
+
+static struct kset *uv_query_kset;
+static struct kobject *uv_kobj;
+
+static int __init uv_info_init(void)
+{
+ int rc = -ENOMEM;
+
+ if (!test_facility(158))
+ return 0;
+
+ uv_kobj = kobject_create_and_add("uv", firmware_kobj);
+ if (!uv_kobj)
+ return -ENOMEM;
+
+ uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
+ if (!uv_query_kset)
+ goto out_kobj;
+
+ rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
+ if (!rc)
+ return 0;
+
+ kset_unregister(uv_query_kset);
+out_kobj:
+ kobject_del(uv_kobj);
+ kobject_put(uv_kobj);
+ return rc;
+}
+device_initcall(uv_info_init);
+#endif
diff --git a/arch/s390/kernel/vdso64/.gitignore b/arch/s390/kernel/vdso64/.gitignore
index 3fd18cf9fec2..4ec80685fecc 100644
--- a/arch/s390/kernel/vdso64/.gitignore
+++ b/arch/s390/kernel/vdso64/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso64.lds
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 05ee90a5ea08..12decca22e7c 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -9,6 +9,6 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
-kvm-objs += diag.o gaccess.o guestdbg.o vsie.o
+kvm-objs += diag.o gaccess.o guestdbg.o vsie.o pv.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 3fb54ec2cf3e..563429dece03 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -2,7 +2,7 @@
/*
* handling diagnose instructions
*
- * Copyright IBM Corp. 2008, 2011
+ * Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -201,6 +201,10 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+ /*
+ * no need to check the return value of vcpu_stop as it can only have
+ * an error for protvirt, but protvirt means user cpu state
+ */
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 07d30ffcfa41..47a67a958107 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -505,7 +505,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
switch (prot) {
case PROT_TYPE_IEP:
tec->b61 = 1;
- /* FALL THROUGH */
+ fallthrough;
case PROT_TYPE_LA:
tec->b56 = 1;
break;
@@ -514,12 +514,12 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
break;
case PROT_TYPE_ALC:
tec->b60 = 1;
- /* FALL THROUGH */
+ fallthrough;
case PROT_TYPE_DAT:
tec->b61 = 1;
break;
}
- /* FALL THROUGH */
+ fallthrough;
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
case PGM_REGION_FIRST_TRANS:
@@ -534,7 +534,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
tec->addr = gva >> PAGE_SHIFT;
tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
- /* FALL THROUGH */
+ fallthrough;
case PGM_ALEN_TRANSLATION:
case PGM_ALE_SEQUENCE:
case PGM_ASTE_VALIDITY:
@@ -677,7 +677,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
dat_protection |= rfte.p;
ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
}
- /* fallthrough */
+ fallthrough;
case ASCE_TYPE_REGION2: {
union region2_table_entry rste;
@@ -695,7 +695,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
dat_protection |= rste.p;
ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
}
- /* fallthrough */
+ fallthrough;
case ASCE_TYPE_REGION3: {
union region3_table_entry rtte;
@@ -723,7 +723,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
dat_protection |= rtte.fc0.p;
ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
}
- /* fallthrough */
+ fallthrough;
case ASCE_TYPE_SEGMENT: {
union segment_table_entry ste;
@@ -1050,7 +1050,8 @@ shadow_r2t:
rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
if (rc)
return rc;
- } /* fallthrough */
+ }
+ fallthrough;
case ASCE_TYPE_REGION2: {
union region2_table_entry rste;
@@ -1076,7 +1077,8 @@ shadow_r3t:
rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
if (rc)
return rc;
- } /* fallthrough */
+ }
+ fallthrough;
case ASCE_TYPE_REGION3: {
union region3_table_entry rtte;
@@ -1111,7 +1113,8 @@ shadow_sgt:
rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
if (rc)
return rc;
- } /* fallthrough */
+ }
+ fallthrough;
case ASCE_TYPE_SEGMENT: {
union segment_table_entry ste;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index a389fa85cca2..e7a7c499a73f 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -2,7 +2,7 @@
/*
* in-kernel handling for sie intercepts
*
- * Copyright IBM Corp. 2008, 2014
+ * Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -12,10 +12,10 @@
#include <linux/errno.h>
#include <linux/pagemap.h>
-#include <asm/kvm_host.h>
#include <asm/asm-offsets.h>
#include <asm/irq.h>
#include <asm/sysinfo.h>
+#include <asm/uv.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -79,6 +79,10 @@ static int handle_stop(struct kvm_vcpu *vcpu)
return rc;
}
+ /*
+ * no need to check the return value of vcpu_stop as it can only have
+ * an error for protvirt, but protvirt means user cpu state
+ */
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm_s390_vcpu_stop(vcpu);
return -EOPNOTSUPP;
@@ -231,6 +235,13 @@ static int handle_prog(struct kvm_vcpu *vcpu)
vcpu->stat.exit_program_interruption++;
+ /*
+ * Intercept 8 indicates a loop of specification exceptions
+ * for protected guests.
+ */
+ if (kvm_s390_pv_cpu_is_protected(vcpu))
+ return -EOPNOTSUPP;
+
if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
rc = kvm_s390_handle_per_event(vcpu);
if (rc)
@@ -384,7 +395,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
goto out;
}
- if (addr & ~PAGE_MASK)
+ if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
sctns = (void *)get_zeroed_page(GFP_KERNEL);
@@ -395,10 +406,15 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
out:
if (!cc) {
- r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
- if (r) {
- free_page((unsigned long)sctns);
- return kvm_s390_inject_prog_cond(vcpu, r);
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
+ sctns, PAGE_SIZE);
+ } else {
+ r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
+ if (r) {
+ free_page((unsigned long)sctns);
+ return kvm_s390_inject_prog_cond(vcpu, r);
+ }
}
}
@@ -444,6 +460,77 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}
+static int handle_pv_spx(struct kvm_vcpu *vcpu)
+{
+ u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
+
+ kvm_s390_set_prefix(vcpu, pref);
+ trace_kvm_s390_handle_prefix(vcpu, 1, pref);
+ return 0;
+}
+
+static int handle_pv_sclp(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ /*
+ * 2 cases:
+ * a: an sccb answering interrupt was already pending or in flight.
+ * As the sccb value is not known we can simply set some value to
+ * trigger delivery of a saved SCCB. UV will then use its saved
+ * copy of the SCCB value.
+ * b: an error SCCB interrupt needs to be injected so we also inject
+ * a fake SCCB address. Firmware will use the proper one.
+ * This makes sure, that both errors and real sccb returns will only
+ * be delivered after a notification intercept (instruction has
+ * finished) but not after others.
+ */
+ fi->srv_signal.ext_params |= 0x43000;
+ set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+ clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+static int handle_pv_uvc(struct kvm_vcpu *vcpu)
+{
+ struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
+ struct uv_cb_cts uvcb = {
+ .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
+ .header.len = sizeof(uvcb),
+ .guest_handle = kvm_s390_pv_get_handle(vcpu->kvm),
+ .gaddr = guest_uvcb->paddr,
+ };
+ int rc;
+
+ if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
+ WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
+ guest_uvcb->header.cmd);
+ return 0;
+ }
+ rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
+ /*
+ * If the unpin did not succeed, the guest will exit again for the UVC
+ * and we will retry the unpin.
+ */
+ if (rc == -EINVAL)
+ return 0;
+ return rc;
+}
+
+static int handle_pv_notification(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.sie_block->ipa == 0xb210)
+ return handle_pv_spx(vcpu);
+ if (vcpu->arch.sie_block->ipa == 0xb220)
+ return handle_pv_sclp(vcpu);
+ if (vcpu->arch.sie_block->ipa == 0xb9a4)
+ return handle_pv_uvc(vcpu);
+
+ return handle_instruction(vcpu);
+}
+
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
int rc, per_rc = 0;
@@ -480,6 +567,28 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
case ICPT_KSS:
rc = kvm_s390_skey_check_enable(vcpu);
break;
+ case ICPT_MCHKREQ:
+ case ICPT_INT_ENABLE:
+ /*
+ * PSW bit 13 or a CR (0, 6, 14) changed and we might
+ * now be able to deliver interrupts. The pre-run code
+ * will take care of this.
+ */
+ rc = 0;
+ break;
+ case ICPT_PV_INSTR:
+ rc = handle_instruction(vcpu);
+ break;
+ case ICPT_PV_NOTIFY:
+ rc = handle_pv_notification(vcpu);
+ break;
+ case ICPT_PV_PREF:
+ rc = 0;
+ gmap_convert_to_secure(vcpu->arch.gmap,
+ kvm_s390_get_prefix(vcpu));
+ gmap_convert_to_secure(vcpu->arch.gmap,
+ kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c06c89d370a7..8191106bf7b9 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2,7 +2,7 @@
/*
* handling kvm guest interrupts
*
- * Copyright IBM Corp. 2008, 2015
+ * Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
@@ -324,8 +324,11 @@ static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
{
- return vcpu->kvm->arch.float_int.pending_irqs |
- vcpu->arch.local_int.pending_irqs;
+ unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
+ vcpu->arch.local_int.pending_irqs;
+
+ pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
+ return pending;
}
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
@@ -383,10 +386,18 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
- if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
+ if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
__clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
+ __clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
+ }
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
+ /* PV guest cpus can have a single interruption injected at a time. */
+ if (kvm_s390_pv_cpu_is_protected(vcpu) &&
+ vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
+ active_mask &= ~(IRQ_PEND_EXT_II_MASK |
+ IRQ_PEND_IO_MASK |
+ IRQ_PEND_MCHK_MASK);
/*
* Check both floating and local interrupt's cr14 because
* bit IRQ_PEND_MCHK_REP could be set in both cases.
@@ -479,19 +490,23 @@ static void set_intercept_indicators(struct kvm_vcpu *vcpu)
static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- int rc;
+ int rc = 0;
vcpu->stat.deliver_cputm++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
0, 0);
-
- rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
- (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
+ vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
+ } else {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ }
clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
@@ -499,19 +514,23 @@ static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- int rc;
+ int rc = 0;
vcpu->stat.deliver_ckc++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
0, 0);
-
- rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
- (u16 __user *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
+ vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
+ } else {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
+ (u16 __user *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ }
clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
@@ -553,6 +572,20 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
union mci mci;
int rc;
+ /*
+ * All other possible payload for a machine check (e.g. the register
+ * contents in the save area) will be handled by the ultravisor, as
+ * the hypervisor does not not have the needed information for
+ * protected guests.
+ */
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
+ vcpu->arch.sie_block->mcic = mchk->mcic;
+ vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
+ vcpu->arch.sie_block->edc = mchk->ext_damage_code;
+ return 0;
+ }
+
mci.val = mchk->mcic;
/* take care of lazy register loading */
save_fpu_regs();
@@ -696,17 +729,21 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- int rc;
+ int rc = 0;
VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
vcpu->stat.deliver_restart_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
- rc = write_guest_lc(vcpu,
- offsetof(struct lowcore, restart_old_psw),
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
+ } else {
+ rc = write_guest_lc(vcpu,
+ offsetof(struct lowcore, restart_old_psw),
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ }
clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
return rc ? -EFAULT : 0;
}
@@ -748,6 +785,12 @@ static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
vcpu->stat.deliver_emergency_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
cpu_addr, 0);
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
+ vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
+ vcpu->arch.sie_block->extcpuaddr = cpu_addr;
+ return 0;
+ }
rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
(u16 *)__LC_EXT_INT_CODE);
@@ -776,6 +819,12 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_INT_EXTERNAL_CALL,
extcall.code, 0);
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
+ vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
+ vcpu->arch.sie_block->extcpuaddr = extcall.code;
+ return 0;
+ }
rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
(u16 *)__LC_EXT_INT_CODE);
@@ -787,6 +836,21 @@ static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
+static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
+{
+ switch (code) {
+ case PGM_SPECIFICATION:
+ vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
+ break;
+ case PGM_OPERAND:
+ vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -807,6 +871,10 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
pgm_info.code, 0);
+ /* PER is handled by the ultravisor */
+ if (kvm_s390_pv_cpu_is_protected(vcpu))
+ return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
+
switch (pgm_info.code & ~PGM_PER) {
case PGM_AFX_TRANSLATION:
case PGM_ASX_TRANSLATION:
@@ -818,7 +886,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
nullifying = true;
- /* fall through */
+ fallthrough;
case PGM_SPACE_SWITCH:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
@@ -902,20 +970,49 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
+#define SCCB_MASK 0xFFFFFFF8
+#define SCCB_EVENT_PENDING 0x3
+
+static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
+{
+ int rc;
+
+ if (kvm_s390_pv_cpu_get_handle(vcpu)) {
+ vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
+ vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
+ vcpu->arch.sie_block->eiparams = parm;
+ return 0;
+ }
+
+ rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, parm,
+ (u32 *)__LC_EXT_PARAMS);
+
+ return rc ? -EFAULT : 0;
+}
+
static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_ext_info ext;
- int rc = 0;
spin_lock(&fi->lock);
- if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
+ if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
+ !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
spin_unlock(&fi->lock);
return 0;
}
ext = fi->srv_signal;
memset(&fi->srv_signal, 0, sizeof(ext));
clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+ clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
+ if (kvm_s390_pv_cpu_is_protected(vcpu))
+ set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
@@ -924,16 +1021,31 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
ext.ext_params, 0);
- rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
+ return write_sclp(vcpu, ext.ext_params);
+}
- return rc ? -EFAULT : 0;
+static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_ext_info ext;
+
+ spin_lock(&fi->lock);
+ if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
+ spin_unlock(&fi->lock);
+ return 0;
+ }
+ ext = fi->srv_signal;
+ /* only clear the event bit */
+ fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
+ clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+
+ VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
+ vcpu->stat.deliver_service_signal++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
+ ext.ext_params, 0);
+
+ return write_sclp(vcpu, SCCB_EVENT_PENDING);
}
static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
@@ -1028,6 +1140,15 @@ static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
{
int rc;
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
+ vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
+ vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
+ vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
+ vcpu->arch.sie_block->io_int_word = io->io_int_word;
+ return 0;
+ }
+
rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
@@ -1329,6 +1450,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
case IRQ_PEND_EXT_SERVICE:
rc = __deliver_service(vcpu);
break;
+ case IRQ_PEND_EXT_SERVICE_EV:
+ rc = __deliver_service_ev(vcpu);
+ break;
case IRQ_PEND_PFAULT_DONE:
rc = __deliver_pfault_done(vcpu);
break;
@@ -1421,7 +1545,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
return -EINVAL;
- if (sclp.has_sigpif)
+ if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
return sca_inject_ext_call(vcpu, src_id);
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
@@ -1681,9 +1805,6 @@ out:
return inti;
}
-#define SCCB_MASK 0xFFFFFFF8
-#define SCCB_EVENT_PENDING 0x3
-
static int __inject_service(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
@@ -1692,6 +1813,11 @@ static int __inject_service(struct kvm *kvm,
kvm->stat.inject_service_signal++;
spin_lock(&fi->lock);
fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
+
+ /* We always allow events, track them separately from the sccb ints */
+ if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
+ set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
+
/*
* Early versions of the QEMU s390 bios will inject several
* service interrupts after another without handling a
@@ -1773,7 +1899,14 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm->stat.inject_io++;
isc = int_word_to_isc(inti->io.io_int_word);
- if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
+ /*
+ * Do not make use of gisa in protected mode. We do not use the lock
+ * checking variant as this is just a performance optimization and we
+ * do not hold the lock here. This is ok as the code will pick
+ * interrupts from both "lists" for delivery.
+ */
+ if (!kvm_s390_pv_get_handle(kvm) &&
+ gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
gisa_set_ipm_gisc(gi->origin, isc);
kfree(inti);
@@ -1834,7 +1967,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
if (!(type & KVM_S390_INT_IO_AI_MASK &&
- kvm->arch.gisa_int.origin))
+ kvm->arch.gisa_int.origin) ||
+ kvm_s390_pv_cpu_get_handle(dst_vcpu))
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break;
default:
@@ -2080,6 +2214,10 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
int i;
+ mutex_lock(&kvm->lock);
+ if (!kvm_s390_pv_is_protected(kvm))
+ fi->masked_irqs = 0;
+ mutex_unlock(&kvm->lock);
spin_lock(&fi->lock);
fi->pending_irqs = 0;
memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
@@ -2146,7 +2284,8 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
n++;
}
}
- if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
+ if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
+ test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
@@ -2327,9 +2466,6 @@ static int register_io_adapter(struct kvm_device *dev,
if (!adapter)
return -ENOMEM;
- INIT_LIST_HEAD(&adapter->maps);
- init_rwsem(&adapter->maps_lock);
- atomic_set(&adapter->nr_maps, 0);
adapter->id = adapter_info.id;
adapter->isc = adapter_info.isc;
adapter->maskable = adapter_info.maskable;
@@ -2354,87 +2490,12 @@ int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
return ret;
}
-static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
-{
- struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
- struct s390_map_info *map;
- int ret;
-
- if (!adapter || !addr)
- return -EINVAL;
-
- map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map) {
- ret = -ENOMEM;
- goto out;
- }
- INIT_LIST_HEAD(&map->list);
- map->guest_addr = addr;
- map->addr = gmap_translate(kvm->arch.gmap, addr);
- if (map->addr == -EFAULT) {
- ret = -EFAULT;
- goto out;
- }
- ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
- if (ret < 0)
- goto out;
- BUG_ON(ret != 1);
- down_write(&adapter->maps_lock);
- if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
- list_add_tail(&map->list, &adapter->maps);
- ret = 0;
- } else {
- put_page(map->page);
- ret = -EINVAL;
- }
- up_write(&adapter->maps_lock);
-out:
- if (ret)
- kfree(map);
- return ret;
-}
-
-static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
-{
- struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
- struct s390_map_info *map, *tmp;
- int found = 0;
-
- if (!adapter || !addr)
- return -EINVAL;
-
- down_write(&adapter->maps_lock);
- list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
- if (map->guest_addr == addr) {
- found = 1;
- atomic_dec(&adapter->nr_maps);
- list_del(&map->list);
- put_page(map->page);
- kfree(map);
- break;
- }
- }
- up_write(&adapter->maps_lock);
-
- return found ? 0 : -EINVAL;
-}
-
void kvm_s390_destroy_adapters(struct kvm *kvm)
{
int i;
- struct s390_map_info *map, *tmp;
- for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
- if (!kvm->arch.adapters[i])
- continue;
- list_for_each_entry_safe(map, tmp,
- &kvm->arch.adapters[i]->maps, list) {
- list_del(&map->list);
- put_page(map->page);
- kfree(map);
- }
+ for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
kfree(kvm->arch.adapters[i]);
- }
}
static int modify_io_adapter(struct kvm_device *dev,
@@ -2456,11 +2517,14 @@ static int modify_io_adapter(struct kvm_device *dev,
if (ret > 0)
ret = 0;
break;
+ /*
+ * The following operations are no longer needed and therefore no-ops.
+ * The gpa to hva translation is done when an IRQ route is set up. The
+ * set_irq code uses get_user_pages_remote() to do the actual write.
+ */
case KVM_S390_IO_ADAPTER_MAP:
- ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
- break;
case KVM_S390_IO_ADAPTER_UNMAP:
- ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
+ ret = 0;
break;
default:
ret = -EINVAL;
@@ -2699,19 +2763,15 @@ static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
}
-static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
- u64 addr)
+static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
{
- struct s390_map_info *map;
+ struct page *page = NULL;
- if (!adapter)
- return NULL;
-
- list_for_each_entry(map, &adapter->maps, list) {
- if (map->guest_addr == addr)
- return map;
- }
- return NULL;
+ down_read(&kvm->mm->mmap_sem);
+ get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
+ &page, NULL, NULL);
+ up_read(&kvm->mm->mmap_sem);
+ return page;
}
static int adapter_indicators_set(struct kvm *kvm,
@@ -2720,30 +2780,35 @@ static int adapter_indicators_set(struct kvm *kvm,
{
unsigned long bit;
int summary_set, idx;
- struct s390_map_info *info;
+ struct page *ind_page, *summary_page;
void *map;
- info = get_map_info(adapter, adapter_int->ind_addr);
- if (!info)
+ ind_page = get_map_page(kvm, adapter_int->ind_addr);
+ if (!ind_page)
return -1;
- map = page_address(info->page);
- bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
- set_bit(bit, map);
- idx = srcu_read_lock(&kvm->srcu);
- mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
- set_page_dirty_lock(info->page);
- info = get_map_info(adapter, adapter_int->summary_addr);
- if (!info) {
- srcu_read_unlock(&kvm->srcu, idx);
+ summary_page = get_map_page(kvm, adapter_int->summary_addr);
+ if (!summary_page) {
+ put_page(ind_page);
return -1;
}
- map = page_address(info->page);
- bit = get_ind_bit(info->addr, adapter_int->summary_offset,
- adapter->swap);
+
+ idx = srcu_read_lock(&kvm->srcu);
+ map = page_address(ind_page);
+ bit = get_ind_bit(adapter_int->ind_addr,
+ adapter_int->ind_offset, adapter->swap);
+ set_bit(bit, map);
+ mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
+ set_page_dirty_lock(ind_page);
+ map = page_address(summary_page);
+ bit = get_ind_bit(adapter_int->summary_addr,
+ adapter_int->summary_offset, adapter->swap);
summary_set = test_and_set_bit(bit, map);
- mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
- set_page_dirty_lock(info->page);
+ mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
+ set_page_dirty_lock(summary_page);
srcu_read_unlock(&kvm->srcu, idx);
+
+ put_page(ind_page);
+ put_page(summary_page);
return summary_set ? 0 : 1;
}
@@ -2765,9 +2830,7 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
adapter = get_io_adapter(kvm, e->adapter.adapter_id);
if (!adapter)
return -1;
- down_read(&adapter->maps_lock);
ret = adapter_indicators_set(kvm, adapter, &e->adapter);
- up_read(&adapter->maps_lock);
if ((ret > 0) && !adapter->masked) {
ret = kvm_s390_inject_airq(kvm, adapter);
if (ret == 0)
@@ -2818,23 +2881,27 @@ int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
- int ret;
+ u64 uaddr;
switch (ue->type) {
+ /* we store the userspace addresses instead of the guest addresses */
case KVM_IRQ_ROUTING_S390_ADAPTER:
e->set = set_adapter_int;
- e->adapter.summary_addr = ue->u.adapter.summary_addr;
- e->adapter.ind_addr = ue->u.adapter.ind_addr;
+ uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
+ if (uaddr == -EFAULT)
+ return -EFAULT;
+ e->adapter.summary_addr = uaddr;
+ uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
+ if (uaddr == -EFAULT)
+ return -EFAULT;
+ e->adapter.ind_addr = uaddr;
e->adapter.summary_offset = ue->u.adapter.summary_offset;
e->adapter.ind_offset = ue->u.adapter.ind_offset;
e->adapter.adapter_id = ue->u.adapter.adapter_id;
- ret = 0;
- break;
+ return 0;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-
- return ret;
}
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c2e6d4ba4e23..19a81024fe16 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2,7 +2,7 @@
/*
* hosting IBM Z kernel virtual machines (s390x)
*
- * Copyright IBM Corp. 2008, 2018
+ * Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -44,6 +44,7 @@
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/ap.h>
+#include <asm/uv.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -184,6 +185,11 @@ static u8 halt_poll_max_steal = 10;
module_param(halt_poll_max_steal, byte, 0644);
MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
+/* if set to true, the GISA will be initialized and used if available */
+static bool use_gisa = true;
+module_param(use_gisa, bool, 0644);
+MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
+
/*
* For now we handle at most 16 double words as this is what the s390 base
* kernel handles and stores in the prefix page. If we ever need to go beyond
@@ -220,6 +226,7 @@ static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
static struct gmap_notifier gmap_notifier;
static struct gmap_notifier vsie_gmap_notifier;
debug_info_t *kvm_s390_dbf;
+debug_info_t *kvm_s390_dbf_uv;
/* Section: not file related */
int kvm_arch_hardware_enable(void)
@@ -228,13 +235,15 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-int kvm_arch_check_processor_compat(void)
+int kvm_arch_check_processor_compat(void *opaque)
{
return 0;
}
+/* forward declarations */
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
unsigned long end);
+static int sca_switch_to_extended(struct kvm *kvm);
static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
{
@@ -293,7 +302,7 @@ static struct notifier_block kvm_clock_notifier = {
.notifier_call = kvm_clock_sync,
};
-int kvm_arch_hardware_setup(void)
+int kvm_arch_hardware_setup(void *opaque)
{
gmap_notifier.notifier_call = kvm_gmap_notifier;
gmap_register_pte_notifier(&gmap_notifier);
@@ -460,7 +469,12 @@ int kvm_arch_init(void *opaque)
if (!kvm_s390_dbf)
return -ENOMEM;
- if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view))
+ kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
+ if (!kvm_s390_dbf_uv)
+ goto out;
+
+ if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
+ debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
goto out;
kvm_s390_cpu_feat_init();
@@ -487,6 +501,7 @@ void kvm_arch_exit(void)
{
kvm_s390_gib_destroy();
debug_unregister(kvm_s390_dbf);
+ debug_unregister(kvm_s390_dbf_uv);
}
/* Section: device related */
@@ -564,14 +579,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_BPB:
r = test_facility(82);
break;
+ case KVM_CAP_S390_PROTECTED:
+ r = is_prot_virt_host();
+ break;
default:
r = 0;
}
return r;
}
-static void kvm_s390_sync_dirty_log(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
int i;
gfn_t cur_gfn, last_gfn;
@@ -612,9 +629,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
{
int r;
unsigned long n;
- struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int is_dirty = 0;
+ int is_dirty;
if (kvm_is_ucontrol(kvm))
return -EINVAL;
@@ -625,14 +641,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
- r = -ENOENT;
- if (!memslot->dirty_bitmap)
- goto out;
-
- kvm_s390_sync_dirty_log(kvm, memslot);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
if (r)
goto out;
@@ -1993,6 +2002,9 @@ static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *ms;
+ if (unlikely(!slots->used_slots))
+ return 0;
+
cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
ms = gfn_to_memslot(kvm, cur_gfn);
args->count = 0;
@@ -2158,6 +2170,194 @@ out:
return r;
}
+static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
+{
+ struct kvm_vcpu *vcpu;
+ u16 rc, rrc;
+ int ret = 0;
+ int i;
+
+ /*
+ * We ignore failures and try to destroy as many CPUs as possible.
+ * At the same time we must not free the assigned resources when
+ * this fails, as the ultravisor has still access to that memory.
+ * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
+ * behind.
+ * We want to return the first failure rc and rrc, though.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ mutex_lock(&vcpu->mutex);
+ if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
+ *rcp = rc;
+ *rrcp = rrc;
+ ret = -EIO;
+ }
+ mutex_unlock(&vcpu->mutex);
+ }
+ return ret;
+}
+
+static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
+{
+ int i, r = 0;
+ u16 dummy;
+
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ mutex_lock(&vcpu->mutex);
+ r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
+ mutex_unlock(&vcpu->mutex);
+ if (r)
+ break;
+ }
+ if (r)
+ kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
+ return r;
+}
+
+static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
+{
+ int r = 0;
+ u16 dummy;
+ void __user *argp = (void __user *)cmd->data;
+
+ switch (cmd->cmd) {
+ case KVM_PV_ENABLE: {
+ r = -EINVAL;
+ if (kvm_s390_pv_is_protected(kvm))
+ break;
+
+ /*
+ * FMT 4 SIE needs esca. As we never switch back to bsca from
+ * esca, we need no cleanup in the error cases below
+ */
+ r = sca_switch_to_extended(kvm);
+ if (r)
+ break;
+
+ down_write(&current->mm->mmap_sem);
+ r = gmap_mark_unmergeable();
+ up_write(&current->mm->mmap_sem);
+ if (r)
+ break;
+
+ r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
+ if (r)
+ break;
+
+ r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
+ if (r)
+ kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
+
+ /* we need to block service interrupts from now on */
+ set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
+ break;
+ }
+ case KVM_PV_DISABLE: {
+ r = -EINVAL;
+ if (!kvm_s390_pv_is_protected(kvm))
+ break;
+
+ r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
+ /*
+ * If a CPU could not be destroyed, destroy VM will also fail.
+ * There is no point in trying to destroy it. Instead return
+ * the rc and rrc from the first CPU that failed destroying.
+ */
+ if (r)
+ break;
+ r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
+
+ /* no need to block service interrupts any more */
+ clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
+ break;
+ }
+ case KVM_PV_SET_SEC_PARMS: {
+ struct kvm_s390_pv_sec_parm parms = {};
+ void *hdr;
+
+ r = -EINVAL;
+ if (!kvm_s390_pv_is_protected(kvm))
+ break;
+
+ r = -EFAULT;
+ if (copy_from_user(&parms, argp, sizeof(parms)))
+ break;
+
+ /* Currently restricted to 8KB */
+ r = -EINVAL;
+ if (parms.length > PAGE_SIZE * 2)
+ break;
+
+ r = -ENOMEM;
+ hdr = vmalloc(parms.length);
+ if (!hdr)
+ break;
+
+ r = -EFAULT;
+ if (!copy_from_user(hdr, (void __user *)parms.origin,
+ parms.length))
+ r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
+ &cmd->rc, &cmd->rrc);
+
+ vfree(hdr);
+ break;
+ }
+ case KVM_PV_UNPACK: {
+ struct kvm_s390_pv_unp unp = {};
+
+ r = -EINVAL;
+ if (!kvm_s390_pv_is_protected(kvm))
+ break;
+
+ r = -EFAULT;
+ if (copy_from_user(&unp, argp, sizeof(unp)))
+ break;
+
+ r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
+ &cmd->rc, &cmd->rrc);
+ break;
+ }
+ case KVM_PV_VERIFY: {
+ r = -EINVAL;
+ if (!kvm_s390_pv_is_protected(kvm))
+ break;
+
+ r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
+ UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
+ cmd->rrc);
+ break;
+ }
+ case KVM_PV_PREP_RESET: {
+ r = -EINVAL;
+ if (!kvm_s390_pv_is_protected(kvm))
+ break;
+
+ r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
+ UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
+ cmd->rc, cmd->rrc);
+ break;
+ }
+ case KVM_PV_UNSHARE_ALL: {
+ r = -EINVAL;
+ if (!kvm_s390_pv_is_protected(kvm))
+ break;
+
+ r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
+ UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
+ cmd->rc, cmd->rrc);
+ break;
+ }
+ default:
+ r = -ENOTTY;
+ }
+ return r;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2255,6 +2455,33 @@ long kvm_arch_vm_ioctl(struct file *filp,
mutex_unlock(&kvm->slots_lock);
break;
}
+ case KVM_S390_PV_COMMAND: {
+ struct kvm_pv_cmd args;
+
+ /* protvirt means user sigp */
+ kvm->arch.user_cpu_state_ctrl = 1;
+ r = 0;
+ if (!is_prot_virt_host()) {
+ r = -EINVAL;
+ break;
+ }
+ if (copy_from_user(&args, argp, sizeof(args))) {
+ r = -EFAULT;
+ break;
+ }
+ if (args.flags) {
+ r = -EINVAL;
+ break;
+ }
+ mutex_lock(&kvm->lock);
+ r = kvm_s390_handle_pv(kvm, &args);
+ mutex_unlock(&kvm->lock);
+ if (copy_to_user(argp, &args, sizeof(args))) {
+ r = -EFAULT;
+ break;
+ }
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -2504,7 +2731,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.use_skf = sclp.has_skey;
spin_lock_init(&kvm->arch.start_stop_lock);
kvm_s390_vsie_init(kvm);
- kvm_s390_gisa_init(kvm);
+ if (use_gisa)
+ kvm_s390_gisa_init(kvm);
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
return 0;
@@ -2518,6 +2746,8 @@ out_err:
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ u16 rc, rrc;
+
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
kvm_s390_clear_local_irqs(vcpu);
@@ -2530,6 +2760,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.use_cmma)
kvm_s390_vcpu_unsetup_cmma(vcpu);
+ /* We can not hold the vcpu mutex here, we are already dying */
+ if (kvm_s390_pv_cpu_get_handle(vcpu))
+ kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
free_page((unsigned long)(vcpu->arch.sie_block));
}
@@ -2551,10 +2784,20 @@ static void kvm_free_vcpus(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
+ u16 rc, rrc;
+
kvm_free_vcpus(kvm);
sca_dispose(kvm);
- debug_unregister(kvm->arch.dbf);
kvm_s390_gisa_destroy(kvm);
+ /*
+ * We are already at the end of life and kvm->lock is not taken.
+ * This is ok as the file descriptor is closed by now and nobody
+ * can mess with the pv state. To avoid lockdep_assert_held from
+ * complaining we do not use kvm_s390_pv_is_protected.
+ */
+ if (kvm_s390_pv_get_handle(kvm))
+ kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
+ debug_unregister(kvm->arch.dbf);
free_page((unsigned long)kvm->arch.sie_page2);
if (!kvm_is_ucontrol(kvm))
gmap_remove(kvm->arch.gmap);
@@ -2650,6 +2893,9 @@ static int sca_switch_to_extended(struct kvm *kvm)
unsigned int vcpu_idx;
u32 scaol, scaoh;
+ if (kvm->arch.use_esca)
+ return 0;
+
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
if (!new_sca)
return -ENOMEM;
@@ -2901,6 +3147,7 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
{
int rc = 0;
+ u16 uvrc, uvrrc;
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
CPUSTAT_SM |
@@ -2968,6 +3215,14 @@ static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_s390_vcpu_crypto_setup(vcpu);
+ mutex_lock(&vcpu->kvm->lock);
+ if (kvm_s390_pv_is_protected(vcpu->kvm)) {
+ rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
+ if (rc)
+ kvm_s390_vcpu_unsetup_cmma(vcpu);
+ }
+ mutex_unlock(&vcpu->kvm->lock);
+
return rc;
}
@@ -3277,7 +3532,6 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
kvm_s390_set_prefix(vcpu, 0);
kvm_s390_set_cpu_timer(vcpu, 0);
vcpu->arch.sie_block->ckc = 0;
- vcpu->arch.sie_block->todpr = 0;
memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
@@ -3295,9 +3549,17 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
vcpu->run->s.regs.pp = 0;
vcpu->run->s.regs.gbea = 1;
vcpu->run->s.regs.fpc = 0;
- vcpu->arch.sie_block->gbea = 1;
- vcpu->arch.sie_block->pp = 0;
- vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ /*
+ * Do not reset these registers in the protected case, as some of
+ * them are overlayed and they are not accessible in this case
+ * anyway.
+ */
+ if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
+ vcpu->arch.sie_block->gbea = 1;
+ vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ vcpu->arch.sie_block->todpr = 0;
+ }
}
static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
@@ -3487,14 +3749,20 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
switch (mp_state->mp_state) {
case KVM_MP_STATE_STOPPED:
- kvm_s390_vcpu_stop(vcpu);
+ rc = kvm_s390_vcpu_stop(vcpu);
break;
case KVM_MP_STATE_OPERATING:
- kvm_s390_vcpu_start(vcpu);
+ rc = kvm_s390_vcpu_start(vcpu);
break;
case KVM_MP_STATE_LOAD:
+ if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
+ rc = -ENXIO;
+ break;
+ }
+ rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
+ break;
case KVM_MP_STATE_CHECK_STOP:
- /* fall through - CHECK_STOP and LOAD are not supported yet */
+ fallthrough; /* CHECK_STOP and LOAD are not supported yet */
default:
rc = -ENXIO;
}
@@ -3844,9 +4112,11 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
return vcpu_post_run_fault_in_sie(vcpu);
}
+#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int rc, exit_reason;
+ struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
/*
* We try to hold kvm->srcu during most of vcpu_run (except when run-
@@ -3868,8 +4138,28 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
guest_enter_irqoff();
__disable_cpu_timer_accounting(vcpu);
local_irq_enable();
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ memcpy(sie_page->pv_grregs,
+ vcpu->run->s.regs.gprs,
+ sizeof(sie_page->pv_grregs));
+ }
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ memcpy(vcpu->run->s.regs.gprs,
+ sie_page->pv_grregs,
+ sizeof(sie_page->pv_grregs));
+ /*
+ * We're not allowed to inject interrupts on intercepts
+ * that leave the guest state in an "in-between" state
+ * where the next SIE entry will do a continuation.
+ * Fence interrupts in our "internal" PSW.
+ */
+ if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
+ vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
+ vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
+ }
+ }
local_irq_disable();
__enable_cpu_timer_accounting(vcpu);
guest_exit_irqoff();
@@ -3883,7 +4173,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
}
-static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
@@ -3892,16 +4182,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
- if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
- kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
- if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
- memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
- /* some control register changes require a tlb flush */
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- }
if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
- kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
- vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
@@ -3942,6 +4223,36 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
}
+ if (MACHINE_HAS_GS) {
+ preempt_disable();
+ __ctl_set_bit(2, 4);
+ if (current->thread.gs_cb) {
+ vcpu->arch.host_gscb = current->thread.gs_cb;
+ save_gs_cb(vcpu->arch.host_gscb);
+ }
+ if (vcpu->arch.gs_enabled) {
+ current->thread.gs_cb = (struct gs_cb *)
+ &vcpu->run->s.regs.gscb;
+ restore_gs_cb(current->thread.gs_cb);
+ }
+ preempt_enable();
+ }
+ /* SIE will load etoken directly from SDNX and therefore kvm_run */
+}
+
+static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
+ kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
+ if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
+ memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
+ /* some control register changes require a tlb flush */
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
+ if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
+ kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
+ vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
+ }
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
/* save host (userspace) fprs/vrs */
@@ -3956,23 +4267,47 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
+
+ /* Sync fmt2 only data */
+ if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
+ sync_regs_fmt2(vcpu, kvm_run);
+ } else {
+ /*
+ * In several places we have to modify our internal view to
+ * not do things that are disallowed by the ultravisor. For
+ * example we must not inject interrupts after specific exits
+ * (e.g. 112 prefix page not secure). We do this by turning
+ * off the machine check, external and I/O interrupt bits
+ * of our PSW copy. To avoid getting validity intercepts, we
+ * do only accept the condition code from userspace.
+ */
+ vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
+ vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
+ PSW_MASK_CC;
+ }
+
+ kvm_run->kvm_dirty_regs = 0;
+}
+
+static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
+ kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
+ kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
+ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
if (MACHINE_HAS_GS) {
- preempt_disable();
__ctl_set_bit(2, 4);
- if (current->thread.gs_cb) {
- vcpu->arch.host_gscb = current->thread.gs_cb;
- save_gs_cb(vcpu->arch.host_gscb);
- }
- if (vcpu->arch.gs_enabled) {
- current->thread.gs_cb = (struct gs_cb *)
- &vcpu->run->s.regs.gscb;
- restore_gs_cb(current->thread.gs_cb);
- }
+ if (vcpu->arch.gs_enabled)
+ save_gs_cb(current->thread.gs_cb);
+ preempt_disable();
+ current->thread.gs_cb = vcpu->arch.host_gscb;
+ restore_gs_cb(vcpu->arch.host_gscb);
preempt_enable();
+ if (!vcpu->arch.host_gscb)
+ __ctl_clear_bit(2, 4);
+ vcpu->arch.host_gscb = NULL;
}
- /* SIE will load etoken directly from SDNX and therefore kvm_run */
-
- kvm_run->kvm_dirty_regs = 0;
+ /* SIE will save etoken directly into SDNX and therefore kvm_run */
}
static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -3983,13 +4318,9 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
- kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
- kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
- kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
- kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
/* Save guest register state */
@@ -3998,19 +4329,8 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* Restore will be done lazily at return */
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
- if (MACHINE_HAS_GS) {
- __ctl_set_bit(2, 4);
- if (vcpu->arch.gs_enabled)
- save_gs_cb(current->thread.gs_cb);
- preempt_disable();
- current->thread.gs_cb = vcpu->arch.host_gscb;
- restore_gs_cb(vcpu->arch.host_gscb);
- preempt_enable();
- if (!vcpu->arch.host_gscb)
- __ctl_clear_bit(2, 4);
- vcpu->arch.host_gscb = NULL;
- }
- /* SIE will save etoken directly into SDNX and therefore kvm_run */
+ if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
+ store_regs_fmt2(vcpu, kvm_run);
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -4034,6 +4354,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_sigset_activate(vcpu);
+ /*
+ * no need to check the return value of vcpu_start as it can only have
+ * an error for protvirt, but protvirt means user cpu state
+ */
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm_s390_vcpu_start(vcpu);
} else if (is_vcpu_stopped(vcpu)) {
@@ -4171,18 +4495,27 @@ static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
}
-void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
+int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
{
- int i, online_vcpus, started_vcpus = 0;
+ int i, online_vcpus, r = 0, started_vcpus = 0;
if (!is_vcpu_stopped(vcpu))
- return;
+ return 0;
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
/* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
+ /* Let's tell the UV that we want to change into the operating state */
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
+ if (r) {
+ spin_unlock(&vcpu->kvm->arch.start_stop_lock);
+ return r;
+ }
+ }
+
for (i = 0; i < online_vcpus; i++) {
if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
started_vcpus++;
@@ -4202,27 +4535,43 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
/*
+ * The real PSW might have changed due to a RESTART interpreted by the
+ * ultravisor. We block all interrupts and let the next sie exit
+ * refresh our view.
+ */
+ if (kvm_s390_pv_cpu_is_protected(vcpu))
+ vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
+ /*
* Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup.
*/
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
- return;
+ return 0;
}
-void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
+int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
{
- int i, online_vcpus, started_vcpus = 0;
+ int i, online_vcpus, r = 0, started_vcpus = 0;
struct kvm_vcpu *started_vcpu = NULL;
if (is_vcpu_stopped(vcpu))
- return;
+ return 0;
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
/* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
+ /* Let's tell the UV that we want to change into the stopped state */
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
+ if (r) {
+ spin_unlock(&vcpu->kvm->arch.start_stop_lock);
+ return r;
+ }
+ }
+
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
kvm_s390_clear_stop_irq(vcpu);
@@ -4245,7 +4594,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
}
spin_unlock(&vcpu->kvm->arch.start_stop_lock);
- return;
+ return 0;
}
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
@@ -4272,12 +4621,40 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
+static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
+ struct kvm_s390_mem_op *mop)
+{
+ void __user *uaddr = (void __user *)mop->buf;
+ int r = 0;
+
+ if (mop->flags || !mop->size)
+ return -EINVAL;
+ if (mop->size + mop->sida_offset < mop->size)
+ return -EINVAL;
+ if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
+ return -E2BIG;
+
+ switch (mop->op) {
+ case KVM_S390_MEMOP_SIDA_READ:
+ if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
+ mop->sida_offset), mop->size))
+ r = -EFAULT;
+
+ break;
+ case KVM_S390_MEMOP_SIDA_WRITE:
+ if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
+ mop->sida_offset), uaddr, mop->size))
+ r = -EFAULT;
+ break;
+ }
+ return r;
+}
static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop)
{
void __user *uaddr = (void __user *)mop->buf;
void *tmpbuf = NULL;
- int r, srcu_idx;
+ int r = 0;
const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
| KVM_S390_MEMOP_F_CHECK_ONLY;
@@ -4287,14 +4664,15 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
if (mop->size > MEM_OP_MAX_SIZE)
return -E2BIG;
+ if (kvm_s390_pv_cpu_is_protected(vcpu))
+ return -EINVAL;
+
if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
tmpbuf = vmalloc(mop->size);
if (!tmpbuf)
return -ENOMEM;
}
- srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-
switch (mop->op) {
case KVM_S390_MEMOP_LOGICAL_READ:
if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
@@ -4320,12 +4698,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
}
r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
break;
- default:
- r = -EINVAL;
}
- srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
-
if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
@@ -4333,6 +4707,31 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
return r;
}
+static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
+ struct kvm_s390_mem_op *mop)
+{
+ int r, srcu_idx;
+
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ switch (mop->op) {
+ case KVM_S390_MEMOP_LOGICAL_READ:
+ case KVM_S390_MEMOP_LOGICAL_WRITE:
+ r = kvm_s390_guest_mem_op(vcpu, mop);
+ break;
+ case KVM_S390_MEMOP_SIDA_READ:
+ case KVM_S390_MEMOP_SIDA_WRITE:
+ /* we are locked against sida going away by the vcpu->mutex */
+ r = kvm_s390_guest_sida_op(vcpu, mop);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ return r;
+}
+
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -4368,6 +4767,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
int idx;
long r;
+ u16 rc, rrc;
vcpu_load(vcpu);
@@ -4389,18 +4789,40 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_S390_CLEAR_RESET:
r = 0;
kvm_arch_vcpu_ioctl_clear_reset(vcpu);
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
+ UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
+ VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
+ rc, rrc);
+ }
break;
case KVM_S390_INITIAL_RESET:
r = 0;
kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
+ UVC_CMD_CPU_RESET_INITIAL,
+ &rc, &rrc);
+ VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
+ rc, rrc);
+ }
break;
case KVM_S390_NORMAL_RESET:
r = 0;
kvm_arch_vcpu_ioctl_normal_reset(vcpu);
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
+ UVC_CMD_CPU_RESET, &rc, &rrc);
+ VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
+ rc, rrc);
+ }
break;
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
+ r = -EINVAL;
+ if (kvm_s390_pv_cpu_is_protected(vcpu))
+ break;
r = -EFAULT;
if (copy_from_user(&reg, argp, sizeof(reg)))
break;
@@ -4463,7 +4885,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_s390_mem_op mem_op;
if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
- r = kvm_s390_guest_mem_op(vcpu, &mem_op);
+ r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
else
r = -EFAULT;
break;
@@ -4523,12 +4945,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -4549,12 +4965,15 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
return -EINVAL;
+ /* When we are protected, we should not change the memory slots */
+ if (kvm_s390_pv_get_handle(kvm))
+ return -EINVAL;
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
@@ -4570,7 +4989,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
old->npages * PAGE_SIZE);
if (rc)
break;
- /* FALLTHROUGH */
+ fallthrough;
case KVM_MR_CREATE:
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
mem->guest_phys_addr, mem->memory_size);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 6d9448dbd052..79dcd647b378 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -2,7 +2,7 @@
/*
* definition for kvm on s390
*
- * Copyright IBM Corp. 2008, 2009
+ * Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -15,6 +15,7 @@
#include <linux/hrtimer.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/lockdep.h>
#include <asm/facility.h>
#include <asm/processor.h>
#include <asm/sclp.h>
@@ -25,6 +26,17 @@
#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
extern debug_info_t *kvm_s390_dbf;
+extern debug_info_t *kvm_s390_dbf_uv;
+
+#define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
+do { \
+ debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
+ d_args); \
+ debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
+ "%d: " d_string "\n", (d_kvm)->userspace_pid, \
+ d_args); \
+} while (0)
+
#define KVM_EVENT(d_loglevel, d_string, d_args...)\
do { \
debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
@@ -196,6 +208,39 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
return kvm->arch.user_cpu_state_ctrl != 0;
}
+/* implemented in pv.c */
+int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
+int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
+int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
+int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
+int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
+ u16 *rrc);
+int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
+ unsigned long tweak, u16 *rc, u16 *rrc);
+int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
+
+static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
+{
+ return kvm->arch.pv.handle;
+}
+
+static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pv.handle;
+}
+
+static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
+{
+ lockdep_assert_held(&kvm->lock);
+ return !!kvm_s390_pv_get_handle(kvm);
+}
+
+static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_held(&vcpu->mutex);
+ return !!kvm_s390_pv_cpu_get_handle(vcpu);
+}
+
/* implemented in interrupt.c */
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
@@ -286,8 +331,8 @@ void kvm_s390_set_tod_clock(struct kvm *kvm,
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
-void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
+int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
+int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index ed52ffa8d5d4..69a824f9ef0b 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -2,7 +2,7 @@
/*
* handling privileged instructions
*
- * Copyright IBM Corp. 2008, 2018
+ * Copyright IBM Corp. 2008, 2020
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -872,7 +872,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
- if (operand2 & 0xfff)
+ if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
switch (fc) {
@@ -893,8 +893,13 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
handle_stsi_3_2_2(vcpu, (void *) mem);
break;
}
-
- rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
+ if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+ memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem,
+ PAGE_SIZE);
+ rc = 0;
+ } else {
+ rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
+ }
if (rc) {
rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto out;
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
new file mode 100644
index 000000000000..63e330109b63
--- /dev/null
+++ b/arch/s390/kvm/pv.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hosting Protected Virtual Machines
+ *
+ * Copyright IBM Corp. 2019, 2020
+ * Author(s): Janosch Frank <frankja@linux.ibm.com>
+ */
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/pagemap.h>
+#include <linux/sched/signal.h>
+#include <asm/pgalloc.h>
+#include <asm/gmap.h>
+#include <asm/uv.h>
+#include <asm/mman.h>
+#include "kvm-s390.h"
+
+int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
+{
+ int cc = 0;
+
+ if (kvm_s390_pv_cpu_get_handle(vcpu)) {
+ cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
+ UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
+
+ KVM_UV_EVENT(vcpu->kvm, 3,
+ "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
+ vcpu->vcpu_id, *rc, *rrc);
+ WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x",
+ *rc, *rrc);
+ }
+ /* Intended memory leak for something that should never happen. */
+ if (!cc)
+ free_pages(vcpu->arch.pv.stor_base,
+ get_order(uv_info.guest_cpu_stor_len));
+
+ free_page(sida_origin(vcpu->arch.sie_block));
+ vcpu->arch.sie_block->pv_handle_cpu = 0;
+ vcpu->arch.sie_block->pv_handle_config = 0;
+ memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
+ vcpu->arch.sie_block->sdf = 0;
+ /*
+ * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
+ * Use the reset value of gbea to avoid leaking the kernel pointer of
+ * the just freed sida.
+ */
+ vcpu->arch.sie_block->gbea = 1;
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+
+ return cc ? EIO : 0;
+}
+
+int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
+{
+ struct uv_cb_csc uvcb = {
+ .header.cmd = UVC_CMD_CREATE_SEC_CPU,
+ .header.len = sizeof(uvcb),
+ };
+ int cc;
+
+ if (kvm_s390_pv_cpu_get_handle(vcpu))
+ return -EINVAL;
+
+ vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL,
+ get_order(uv_info.guest_cpu_stor_len));
+ if (!vcpu->arch.pv.stor_base)
+ return -ENOMEM;
+
+ /* Input */
+ uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
+ uvcb.num = vcpu->arch.sie_block->icpua;
+ uvcb.state_origin = (u64)vcpu->arch.sie_block;
+ uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
+
+ /* Alloc Secure Instruction Data Area Designation */
+ vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (!vcpu->arch.sie_block->sidad) {
+ free_pages(vcpu->arch.pv.stor_base,
+ get_order(uv_info.guest_cpu_stor_len));
+ return -ENOMEM;
+ }
+
+ cc = uv_call(0, (u64)&uvcb);
+ *rc = uvcb.header.rc;
+ *rrc = uvcb.header.rrc;
+ KVM_UV_EVENT(vcpu->kvm, 3,
+ "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
+ vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
+ uvcb.header.rrc);
+
+ if (cc) {
+ u16 dummy;
+
+ kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
+ return -EIO;
+ }
+
+ /* Output */
+ vcpu->arch.pv.handle = uvcb.cpu_handle;
+ vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
+ vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
+ vcpu->arch.sie_block->sdf = 2;
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ return 0;
+}
+
+/* only free resources when the destroy was successful */
+static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
+{
+ vfree(kvm->arch.pv.stor_var);
+ free_pages(kvm->arch.pv.stor_base,
+ get_order(uv_info.guest_base_stor_len));
+ memset(&kvm->arch.pv, 0, sizeof(kvm->arch.pv));
+}
+
+static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
+{
+ unsigned long base = uv_info.guest_base_stor_len;
+ unsigned long virt = uv_info.guest_virt_var_stor_len;
+ unsigned long npages = 0, vlen = 0;
+ struct kvm_memory_slot *memslot;
+
+ kvm->arch.pv.stor_var = NULL;
+ kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL, get_order(base));
+ if (!kvm->arch.pv.stor_base)
+ return -ENOMEM;
+
+ /*
+ * Calculate current guest storage for allocation of the
+ * variable storage, which is based on the length in MB.
+ *
+ * Slots are sorted by GFN
+ */
+ mutex_lock(&kvm->slots_lock);
+ memslot = kvm_memslots(kvm)->memslots;
+ npages = memslot->base_gfn + memslot->npages;
+ mutex_unlock(&kvm->slots_lock);
+
+ kvm->arch.pv.guest_len = npages * PAGE_SIZE;
+
+ /* Allocate variable storage */
+ vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
+ vlen += uv_info.guest_virt_base_stor_len;
+ kvm->arch.pv.stor_var = vzalloc(vlen);
+ if (!kvm->arch.pv.stor_var)
+ goto out_err;
+ return 0;
+
+out_err:
+ kvm_s390_pv_dealloc_vm(kvm);
+ return -ENOMEM;
+}
+
+/* this should not fail, but if it does, we must not free the donated memory */
+int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
+{
+ int cc;
+
+ /* make all pages accessible before destroying the guest */
+ s390_reset_acc(kvm->mm);
+
+ cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
+ UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
+ WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
+ atomic_set(&kvm->mm->context.is_protected, 0);
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
+ WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
+ /* Inteded memory leak on "impossible" error */
+ if (!cc)
+ kvm_s390_pv_dealloc_vm(kvm);
+ return cc ? -EIO : 0;
+}
+
+int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
+{
+ struct uv_cb_cgc uvcb = {
+ .header.cmd = UVC_CMD_CREATE_SEC_CONF,
+ .header.len = sizeof(uvcb)
+ };
+ int cc, ret;
+ u16 dummy;
+
+ ret = kvm_s390_pv_alloc_vm(kvm);
+ if (ret)
+ return ret;
+
+ /* Inputs */
+ uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
+ uvcb.guest_stor_len = kvm->arch.pv.guest_len;
+ uvcb.guest_asce = kvm->arch.gmap->asce;
+ uvcb.guest_sca = (unsigned long)kvm->arch.sca;
+ uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base;
+ uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
+
+ cc = uv_call(0, (u64)&uvcb);
+ *rc = uvcb.header.rc;
+ *rrc = uvcb.header.rrc;
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
+ uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
+
+ /* Outputs */
+ kvm->arch.pv.handle = uvcb.guest_handle;
+
+ if (cc) {
+ if (uvcb.header.rc & UVC_RC_NEED_DESTROY)
+ kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
+ else
+ kvm_s390_pv_dealloc_vm(kvm);
+ return -EIO;
+ }
+ kvm->arch.gmap->guest_handle = uvcb.guest_handle;
+ atomic_set(&kvm->mm->context.is_protected, 1);
+ return 0;
+}
+
+int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
+ u16 *rrc)
+{
+ struct uv_cb_ssc uvcb = {
+ .header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
+ .header.len = sizeof(uvcb),
+ .sec_header_origin = (u64)hdr,
+ .sec_header_len = length,
+ .guest_handle = kvm_s390_pv_get_handle(kvm),
+ };
+ int cc = uv_call(0, (u64)&uvcb);
+
+ *rc = uvcb.header.rc;
+ *rrc = uvcb.header.rrc;
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
+ *rc, *rrc);
+ return cc ? -EINVAL : 0;
+}
+
+static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
+ u64 offset, u16 *rc, u16 *rrc)
+{
+ struct uv_cb_unp uvcb = {
+ .header.cmd = UVC_CMD_UNPACK_IMG,
+ .header.len = sizeof(uvcb),
+ .guest_handle = kvm_s390_pv_get_handle(kvm),
+ .gaddr = addr,
+ .tweak[0] = tweak,
+ .tweak[1] = offset,
+ };
+ int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
+
+ *rc = uvcb.header.rc;
+ *rrc = uvcb.header.rrc;
+
+ if (ret && ret != -EAGAIN)
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
+ uvcb.gaddr, *rc, *rrc);
+ return ret;
+}
+
+int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
+ unsigned long tweak, u16 *rc, u16 *rrc)
+{
+ u64 offset = 0;
+ int ret = 0;
+
+ if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
+ return -EINVAL;
+
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
+ addr, size);
+
+ while (offset < size) {
+ ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
+ if (ret == -EAGAIN) {
+ cond_resched();
+ if (fatal_signal_pending(current))
+ break;
+ continue;
+ }
+ if (ret)
+ break;
+ addr += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ }
+ if (!ret)
+ KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
+ return ret;
+}
+
+int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
+{
+ struct uv_cb_cpu_set_state uvcb = {
+ .header.cmd = UVC_CMD_CPU_SET_STATE,
+ .header.len = sizeof(uvcb),
+ .cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu),
+ .state = state,
+ };
+ int cc;
+
+ cc = uv_call(0, (u64)&uvcb);
+ KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
+ vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
+ if (cc)
+ return -EINVAL;
+ return 0;
+}
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index a51c892f14f3..ae989b740376 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -19,7 +19,6 @@
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/oom.h>
-#include <linux/suspend.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
@@ -49,7 +48,6 @@ static volatile long cmm_pages_target;
static volatile long cmm_timed_pages_target;
static long cmm_timeout_pages;
static long cmm_timeout_seconds;
-static int cmm_suspended;
static struct cmm_page_array *cmm_page_list;
static struct cmm_page_array *cmm_timed_page_list;
@@ -151,9 +149,9 @@ static int cmm_thread(void *dummy)
while (1) {
rc = wait_event_interruptible(cmm_thread_wait,
- (!cmm_suspended && (cmm_pages != cmm_pages_target ||
- cmm_timed_pages != cmm_timed_pages_target)) ||
- kthread_should_stop());
+ cmm_pages != cmm_pages_target ||
+ cmm_timed_pages != cmm_timed_pages_target ||
+ kthread_should_stop());
if (kthread_should_stop() || rc == -ERESTARTSYS) {
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
@@ -390,38 +388,6 @@ static void cmm_smsg_target(const char *from, char *msg)
static struct ctl_table_header *cmm_sysctl_header;
-static int cmm_suspend(void)
-{
- cmm_suspended = 1;
- cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
- cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
- return 0;
-}
-
-static int cmm_resume(void)
-{
- cmm_suspended = 0;
- cmm_kick_thread();
- return 0;
-}
-
-static int cmm_power_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- switch (event) {
- case PM_POST_HIBERNATION:
- return cmm_resume();
- case PM_HIBERNATION_PREPARE:
- return cmm_suspend();
- default:
- return NOTIFY_DONE;
- }
-}
-
-static struct notifier_block cmm_power_notifier = {
- .notifier_call = cmm_power_event,
-};
-
static int __init cmm_init(void)
{
int rc = -ENOMEM;
@@ -446,16 +412,11 @@ static int __init cmm_init(void)
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
goto out_oom_notify;
- rc = register_pm_notifier(&cmm_power_notifier);
- if (rc)
- goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (!IS_ERR(cmm_thread_ptr))
return 0;
rc = PTR_ERR(cmm_thread_ptr);
- unregister_pm_notifier(&cmm_power_notifier);
-out_pm:
unregister_oom_notifier(&cmm_oom_nb);
out_oom_notify:
#ifdef CONFIG_CMM_IUCV
@@ -475,7 +436,6 @@ static void __exit cmm_exit(void)
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif
- unregister_pm_notifier(&cmm_power_notifier);
unregister_oom_notifier(&cmm_oom_nb);
kthread_stop(cmm_thread_ptr);
del_timer_sync(&cmm_timer);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7b0bb475c166..d56f67745e3e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -38,17 +38,18 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/facility.h>
+#include <asm/uv.h>
#include "../kernel/entry.h"
#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
-#define VM_FAULT_BADCONTEXT 0x010000
-#define VM_FAULT_BADMAP 0x020000
-#define VM_FAULT_BADACCESS 0x040000
-#define VM_FAULT_SIGNAL 0x080000
-#define VM_FAULT_PFAULT 0x100000
+#define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
+#define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
+#define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
+#define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
+#define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
enum fault_type {
KERNEL_FAULT,
@@ -122,7 +123,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
if (*table & _REGION_ENTRY_INVALID)
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* fallthrough */
+ fallthrough;
case _ASCE_TYPE_REGION2:
table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
if (bad_address(table))
@@ -131,7 +132,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
if (*table & _REGION_ENTRY_INVALID)
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* fallthrough */
+ fallthrough;
case _ASCE_TYPE_REGION3:
table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
if (bad_address(table))
@@ -140,7 +141,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* fallthrough */
+ fallthrough;
case _ASCE_TYPE_SEGMENT:
table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
if (bad_address(table))
@@ -327,7 +328,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
case VM_FAULT_BADACCESS:
if (access == VM_EXEC && signal_return(regs) == 0)
break;
- /* fallthrough */
+ fallthrough;
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (user_mode(regs)) {
@@ -337,9 +338,8 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
do_sigsegv(regs, si_code);
break;
}
- /* fallthrough */
+ fallthrough;
case VM_FAULT_BADCONTEXT:
- /* fallthrough */
case VM_FAULT_PFAULT:
do_no_context(regs);
break;
@@ -429,7 +429,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
@@ -480,8 +480,7 @@ retry:
* the fault.
*/
fault = handle_mm_fault(vma, address, flags);
- /* No reason to continue if interrupted by SIGKILL. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (fault_signal_pending(fault, regs)) {
fault = VM_FAULT_SIGNAL;
if (flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_up;
@@ -514,10 +513,7 @@ retry:
fault = VM_FAULT_PFAULT;
goto out_up;
}
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~(FAULT_FLAG_ALLOW_RETRY |
- FAULT_FLAG_RETRY_NOWAIT);
+ flags &= ~FAULT_FLAG_RETRY_NOWAIT;
flags |= FAULT_FLAG_TRIED;
down_read(&mm->mmap_sem);
goto retry;
@@ -816,3 +812,80 @@ out_extint:
early_initcall(pfault_irq_init);
#endif /* CONFIG_PFAULT */
+
+#if IS_ENABLED(CONFIG_PGSTE)
+void do_secure_storage_access(struct pt_regs *regs)
+{
+ unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ struct page *page;
+ int rc;
+
+ switch (get_fault_type(regs)) {
+ case USER_FAULT:
+ mm = current->mm;
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, addr);
+ if (!vma) {
+ up_read(&mm->mmap_sem);
+ do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
+ break;
+ }
+ page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
+ if (IS_ERR_OR_NULL(page)) {
+ up_read(&mm->mmap_sem);
+ break;
+ }
+ if (arch_make_page_accessible(page))
+ send_sig(SIGSEGV, current, 0);
+ put_page(page);
+ up_read(&mm->mmap_sem);
+ break;
+ case KERNEL_FAULT:
+ page = phys_to_page(addr);
+ if (unlikely(!try_get_page(page)))
+ break;
+ rc = arch_make_page_accessible(page);
+ put_page(page);
+ if (rc)
+ BUG();
+ break;
+ case VDSO_FAULT:
+ /* fallthrough */
+ case GMAP_FAULT:
+ /* fallthrough */
+ default:
+ do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
+ WARN_ON_ONCE(1);
+ }
+}
+NOKPROBE_SYMBOL(do_secure_storage_access);
+
+void do_non_secure_storage_access(struct pt_regs *regs)
+{
+ unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
+ struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+
+ if (get_fault_type(regs) != GMAP_FAULT) {
+ do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
+ send_sig(SIGSEGV, current, 0);
+}
+NOKPROBE_SYMBOL(do_non_secure_storage_access);
+
+#else
+void do_secure_storage_access(struct pt_regs *regs)
+{
+ default_trap_handler(regs);
+}
+
+void do_non_secure_storage_access(struct pt_regs *regs)
+{
+ default_trap_handler(regs);
+}
+#endif
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index edcdca97e85e..2fbece47ef6f 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -804,7 +804,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
if (*table & _REGION_ENTRY_INVALID)
return NULL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* Fallthrough */
+ fallthrough;
case _ASCE_TYPE_REGION2:
table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
if (level == 3)
@@ -812,7 +812,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
if (*table & _REGION_ENTRY_INVALID)
return NULL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* Fallthrough */
+ fallthrough;
case _ASCE_TYPE_REGION3:
table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
if (level == 2)
@@ -820,7 +820,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
if (*table & _REGION_ENTRY_INVALID)
return NULL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* Fallthrough */
+ fallthrough;
case _ASCE_TYPE_SEGMENT:
table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
if (level == 1)
@@ -2548,6 +2548,23 @@ int s390_enable_sie(void)
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
+int gmap_mark_unmergeable(void)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int ret;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
+ MADV_UNMERGEABLE, &vma->vm_flags);
+ if (ret)
+ return ret;
+ }
+ mm->def_flags &= ~VM_MERGEABLE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
+
/*
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
@@ -2593,7 +2610,6 @@ static const struct mm_walk_ops enable_skey_walk_ops = {
int s390_enable_skey(void)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
int rc = 0;
down_write(&mm->mmap_sem);
@@ -2601,16 +2617,11 @@ int s390_enable_skey(void)
goto out_up;
mm->context.uses_skeys = 1;
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
- MADV_UNMERGEABLE, &vma->vm_flags)) {
- mm->context.uses_skeys = 0;
- rc = -ENOMEM;
- goto out_up;
- }
+ rc = gmap_mark_unmergeable();
+ if (rc) {
+ mm->context.uses_skeys = 0;
+ goto out_up;
}
- mm->def_flags &= ~VM_MERGEABLE;
-
walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
out_up:
@@ -2640,3 +2651,38 @@ void s390_reset_cmma(struct mm_struct *mm)
up_write(&mm->mmap_sem);
}
EXPORT_SYMBOL_GPL(s390_reset_cmma);
+
+/*
+ * make inaccessible pages accessible again
+ */
+static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_t pte = READ_ONCE(*ptep);
+
+ if (pte_present(pte))
+ WARN_ON_ONCE(uv_convert_from_secure(pte_val(pte) & PAGE_MASK));
+ return 0;
+}
+
+static const struct mm_walk_ops reset_acc_walk_ops = {
+ .pte_entry = __s390_reset_acc,
+};
+
+#include <linux/sched/mm.h>
+void s390_reset_acc(struct mm_struct *mm)
+{
+ /*
+ * we might be called during
+ * reset: we walk the pages and clear
+ * close of all kvm file descriptors: we walk the pages and clear
+ * exit of process on fd closure: vma already gone, do nothing
+ */
+ if (!mmget_not_zero(mm))
+ return;
+ down_read(&mm->mmap_sem);
+ walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+}
+EXPORT_SYMBOL_GPL(s390_reset_acc);
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 5674710a4841..f01daddcbc5e 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -326,7 +326,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- int rc;
if (len & ~huge_page_mask(h))
return -EINVAL;
@@ -353,15 +352,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
else
addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
- if (addr & ~PAGE_MASK)
+ if (offset_in_page(addr))
return addr;
check_asce_limit:
- if (addr + len > current->mm->context.asce_limit &&
- addr + len <= TASK_SIZE) {
- rc = crst_table_upgrade(mm, addr + len);
- if (rc)
- return (unsigned long) rc;
- }
- return addr;
+ return check_asce_limit(mm, addr, len);
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index cbc718ba6d78..1b78f630a9ca 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -72,14 +72,13 @@ static inline unsigned long mmap_base(unsigned long rnd,
return PAGE_ALIGN(STACK_TOP - gap - rnd);
}
-unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
- int rc;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
@@ -105,30 +104,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
- if (addr & ~PAGE_MASK)
+ if (offset_in_page(addr))
return addr;
check_asce_limit:
- if (addr + len > current->mm->context.asce_limit &&
- addr + len <= TASK_SIZE) {
- rc = crst_table_upgrade(mm, addr + len);
- if (rc)
- return (unsigned long) rc;
- }
-
- return addr;
+ return check_asce_limit(mm, addr, len);
}
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
struct vm_unmapped_area_info info;
- int rc;
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
@@ -163,25 +152,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
* can happen with large stack limits and large mmap()
* allocations.
*/
- if (addr & ~PAGE_MASK) {
+ if (offset_in_page(addr)) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
- if (addr & ~PAGE_MASK)
+ if (offset_in_page(addr))
return addr;
}
check_asce_limit:
- if (addr + len > current->mm->context.asce_limit &&
- addr + len <= TASK_SIZE) {
- rc = crst_table_upgrade(mm, addr + len);
- if (rc)
- return (unsigned long) rc;
- }
-
- return addr;
+ return check_asce_limit(mm, addr, len);
}
/*
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index f8c6faab41f4..e22c06d5f206 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -367,20 +367,4 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
}
}
-#ifdef CONFIG_HIBERNATION
-bool kernel_page_present(struct page *page)
-{
- unsigned long addr;
- int cc;
-
- addr = page_to_phys(page);
- asm volatile(
- " lra %1,0(%1)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (cc), "+a" (addr) : : "cc");
- return cc == 0;
-}
-#endif /* CONFIG_HIBERNATION */
-
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 3dd253f81a77..498c98a312f4 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -77,67 +77,65 @@ static void __crst_table_upgrade(void *arg)
int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
{
- unsigned long *table, *pgd;
- int rc, notify;
+ unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
+ unsigned long asce_limit = mm->context.asce_limit;
/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
- VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
- rc = 0;
- notify = 0;
- while (mm->context.asce_limit < end) {
- table = crst_table_alloc(mm);
- if (!table) {
- rc = -ENOMEM;
- break;
- }
- spin_lock_bh(&mm->page_table_lock);
- pgd = (unsigned long *) mm->pgd;
- if (mm->context.asce_limit == _REGION2_SIZE) {
- crst_table_init(table, _REGION2_ENTRY_EMPTY);
- p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->context.asce_limit = _REGION1_SIZE;
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
- mm_inc_nr_puds(mm);
- } else {
- crst_table_init(table, _REGION1_ENTRY_EMPTY);
- pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->context.asce_limit = -PAGE_SIZE;
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
- }
- notify = 1;
- spin_unlock_bh(&mm->page_table_lock);
- }
- if (notify)
- on_each_cpu(__crst_table_upgrade, mm, 0);
- return rc;
-}
+ VM_BUG_ON(asce_limit < _REGION2_SIZE);
-void crst_table_downgrade(struct mm_struct *mm)
-{
- pgd_t *pgd;
+ if (end <= asce_limit)
+ return 0;
- /* downgrade should only happen from 3 to 2 levels (compat only) */
- VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
+ if (asce_limit == _REGION2_SIZE) {
+ p4d = crst_table_alloc(mm);
+ if (unlikely(!p4d))
+ goto err_p4d;
+ crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
+ }
+ if (end > _REGION1_SIZE) {
+ pgd = crst_table_alloc(mm);
+ if (unlikely(!pgd))
+ goto err_pgd;
+ crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
+ }
- if (current->active_mm == mm) {
- clear_user_asce();
- __tlb_flush_mm(mm);
+ spin_lock_bh(&mm->page_table_lock);
+
+ /*
+ * This routine gets called with mmap_sem lock held and there is
+ * no reason to optimize for the case of otherwise. However, if
+ * that would ever change, the below check will let us know.
+ */
+ VM_BUG_ON(asce_limit != mm->context.asce_limit);
+
+ if (p4d) {
+ __pgd = (unsigned long *) mm->pgd;
+ p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
+ mm->pgd = (pgd_t *) p4d;
+ mm->context.asce_limit = _REGION1_SIZE;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm_inc_nr_puds(mm);
+ }
+ if (pgd) {
+ __pgd = (unsigned long *) mm->pgd;
+ pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
+ mm->pgd = (pgd_t *) pgd;
+ mm->context.asce_limit = TASK_SIZE_MAX;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
}
- pgd = mm->pgd;
- mm_dec_nr_pmds(mm);
- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->context.asce_limit = _REGION3_SIZE;
- mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
- crst_table_free(mm, (unsigned long *) pgd);
+ spin_unlock_bh(&mm->page_table_lock);
- if (current->active_mm == mm)
- set_user_asce(mm);
+ on_each_cpu(__crst_table_upgrade, mm, 0);
+
+ return 0;
+
+err_pgd:
+ crst_table_free(mm, p4d);
+err_p4d:
+ return -ENOMEM;
}
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
@@ -304,7 +302,7 @@ void __tlb_remove_table(void *_table)
mask >>= 24;
if (mask != 0)
break;
- /* fallthrough */
+ fallthrough;
case 3: /* 4K page table with pgstes */
if (mask & 3)
atomic_xor_bits(&page->_refcount, 3 << 24);
@@ -529,7 +527,7 @@ void base_asce_free(unsigned long asce)
base_region2_walk(table, 0, _REGION1_SIZE, 0);
break;
case _ASCE_TYPE_REGION1:
- base_region1_walk(table, 0, -_PAGE_SIZE, 0);
+ base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
break;
}
base_crst_free(table);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b403fa14847d..f810930aff42 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -415,6 +415,10 @@ void __init vmem_map_init(void)
SET_MEMORY_RO | SET_MEMORY_X);
__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
+
+ /* we need lowcore executable for our LPSWE instructions */
+ set_memory_x(0, 1);
+
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
diff --git a/arch/s390/numa/Makefile b/arch/s390/numa/Makefile
index 66c2dff74895..c89d26f4f77d 100644
--- a/arch/s390/numa/Makefile
+++ b/arch/s390/numa/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += numa.o
-obj-y += toptree.o
-obj-$(CONFIG_NUMA_EMU) += mode_emu.o
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
deleted file mode 100644
index 72d742bb2d17..000000000000
--- a/arch/s390/numa/mode_emu.c
+++ /dev/null
@@ -1,577 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NUMA support for s390
- *
- * NUMA emulation (aka fake NUMA) distributes the available memory to nodes
- * without using real topology information about the physical memory of the
- * machine.
- *
- * It distributes the available CPUs to nodes while respecting the original
- * machine topology information. This is done by trying to avoid to separate
- * CPUs which reside on the same book or even on the same MC.
- *
- * Because the current Linux scheduler code requires a stable cpu to node
- * mapping, cores are pinned to nodes when the first CPU thread is set online.
- *
- * Copyright IBM Corp. 2015
- */
-
-#define KMSG_COMPONENT "numa_emu"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/cpumask.h>
-#include <linux/memblock.h>
-#include <linux/node.h>
-#include <linux/memory.h>
-#include <linux/slab.h>
-#include <asm/smp.h>
-#include <asm/topology.h>
-#include "numa_mode.h"
-#include "toptree.h"
-
-/* Distances between the different system components */
-#define DIST_EMPTY 0
-#define DIST_CORE 1
-#define DIST_MC 2
-#define DIST_BOOK 3
-#define DIST_DRAWER 4
-#define DIST_MAX 5
-
-/* Node distance reported to common code */
-#define EMU_NODE_DIST 10
-
-/* Node ID for free (not yet pinned) cores */
-#define NODE_ID_FREE -1
-
-/* Different levels of toptree */
-enum toptree_level {CORE, MC, BOOK, DRAWER, NODE, TOPOLOGY};
-
-/* The two toptree IDs */
-enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA};
-
-/* Number of NUMA nodes */
-static int emu_nodes = 1;
-/* NUMA stripe size */
-static unsigned long emu_size;
-
-/*
- * Node to core pinning information updates are protected by
- * "sched_domains_mutex".
- */
-static struct {
- s32 to_node_id[CONFIG_NR_CPUS]; /* Pinned core to node mapping */
- int total; /* Total number of pinned cores */
- int per_node_target; /* Cores per node without extra cores */
- int per_node[MAX_NUMNODES]; /* Number of cores pinned to node */
-} *emu_cores;
-
-/*
- * Pin a core to a node
- */
-static void pin_core_to_node(int core_id, int node_id)
-{
- if (emu_cores->to_node_id[core_id] == NODE_ID_FREE) {
- emu_cores->per_node[node_id]++;
- emu_cores->to_node_id[core_id] = node_id;
- emu_cores->total++;
- } else {
- WARN_ON(emu_cores->to_node_id[core_id] != node_id);
- }
-}
-
-/*
- * Number of pinned cores of a node
- */
-static int cores_pinned(struct toptree *node)
-{
- return emu_cores->per_node[node->id];
-}
-
-/*
- * ID of the node where the core is pinned (or NODE_ID_FREE)
- */
-static int core_pinned_to_node_id(struct toptree *core)
-{
- return emu_cores->to_node_id[core->id];
-}
-
-/*
- * Number of cores in the tree that are not yet pinned
- */
-static int cores_free(struct toptree *tree)
-{
- struct toptree *core;
- int count = 0;
-
- toptree_for_each(core, tree, CORE) {
- if (core_pinned_to_node_id(core) == NODE_ID_FREE)
- count++;
- }
- return count;
-}
-
-/*
- * Return node of core
- */
-static struct toptree *core_node(struct toptree *core)
-{
- return core->parent->parent->parent->parent;
-}
-
-/*
- * Return drawer of core
- */
-static struct toptree *core_drawer(struct toptree *core)
-{
- return core->parent->parent->parent;
-}
-
-/*
- * Return book of core
- */
-static struct toptree *core_book(struct toptree *core)
-{
- return core->parent->parent;
-}
-
-/*
- * Return mc of core
- */
-static struct toptree *core_mc(struct toptree *core)
-{
- return core->parent;
-}
-
-/*
- * Distance between two cores
- */
-static int dist_core_to_core(struct toptree *core1, struct toptree *core2)
-{
- if (core_drawer(core1)->id != core_drawer(core2)->id)
- return DIST_DRAWER;
- if (core_book(core1)->id != core_book(core2)->id)
- return DIST_BOOK;
- if (core_mc(core1)->id != core_mc(core2)->id)
- return DIST_MC;
- /* Same core or sibling on same MC */
- return DIST_CORE;
-}
-
-/*
- * Distance of a node to a core
- */
-static int dist_node_to_core(struct toptree *node, struct toptree *core)
-{
- struct toptree *core_node;
- int dist_min = DIST_MAX;
-
- toptree_for_each(core_node, node, CORE)
- dist_min = min(dist_min, dist_core_to_core(core_node, core));
- return dist_min == DIST_MAX ? DIST_EMPTY : dist_min;
-}
-
-/*
- * Unify will delete empty nodes, therefore recreate nodes.
- */
-static void toptree_unify_tree(struct toptree *tree)
-{
- int nid;
-
- toptree_unify(tree);
- for (nid = 0; nid < emu_nodes; nid++)
- toptree_get_child(tree, nid);
-}
-
-/*
- * Find the best/nearest node for a given core and ensure that no node
- * gets more than "emu_cores->per_node_target + extra" cores.
- */
-static struct toptree *node_for_core(struct toptree *numa, struct toptree *core,
- int extra)
-{
- struct toptree *node, *node_best = NULL;
- int dist_cur, dist_best, cores_target;
-
- cores_target = emu_cores->per_node_target + extra;
- dist_best = DIST_MAX;
- node_best = NULL;
- toptree_for_each(node, numa, NODE) {
- /* Already pinned cores must use their nodes */
- if (core_pinned_to_node_id(core) == node->id) {
- node_best = node;
- break;
- }
- /* Skip nodes that already have enough cores */
- if (cores_pinned(node) >= cores_target)
- continue;
- dist_cur = dist_node_to_core(node, core);
- if (dist_cur < dist_best) {
- dist_best = dist_cur;
- node_best = node;
- }
- }
- return node_best;
-}
-
-/*
- * Find the best node for each core with respect to "extra" core count
- */
-static void toptree_to_numa_single(struct toptree *numa, struct toptree *phys,
- int extra)
-{
- struct toptree *node, *core, *tmp;
-
- toptree_for_each_safe(core, tmp, phys, CORE) {
- node = node_for_core(numa, core, extra);
- if (!node)
- return;
- toptree_move(core, node);
- pin_core_to_node(core->id, node->id);
- }
-}
-
-/*
- * Move structures of given level to specified NUMA node
- */
-static void move_level_to_numa_node(struct toptree *node, struct toptree *phys,
- enum toptree_level level, bool perfect)
-{
- int cores_free, cores_target = emu_cores->per_node_target;
- struct toptree *cur, *tmp;
-
- toptree_for_each_safe(cur, tmp, phys, level) {
- cores_free = cores_target - toptree_count(node, CORE);
- if (perfect) {
- if (cores_free == toptree_count(cur, CORE))
- toptree_move(cur, node);
- } else {
- if (cores_free >= toptree_count(cur, CORE))
- toptree_move(cur, node);
- }
- }
-}
-
-/*
- * Move structures of a given level to NUMA nodes. If "perfect" is specified
- * move only perfectly fitting structures. Otherwise move also smaller
- * than needed structures.
- */
-static void move_level_to_numa(struct toptree *numa, struct toptree *phys,
- enum toptree_level level, bool perfect)
-{
- struct toptree *node;
-
- toptree_for_each(node, numa, NODE)
- move_level_to_numa_node(node, phys, level, perfect);
-}
-
-/*
- * For the first run try to move the big structures
- */
-static void toptree_to_numa_first(struct toptree *numa, struct toptree *phys)
-{
- struct toptree *core;
-
- /* Always try to move perfectly fitting structures first */
- move_level_to_numa(numa, phys, DRAWER, true);
- move_level_to_numa(numa, phys, DRAWER, false);
- move_level_to_numa(numa, phys, BOOK, true);
- move_level_to_numa(numa, phys, BOOK, false);
- move_level_to_numa(numa, phys, MC, true);
- move_level_to_numa(numa, phys, MC, false);
- /* Now pin all the moved cores */
- toptree_for_each(core, numa, CORE)
- pin_core_to_node(core->id, core_node(core)->id);
-}
-
-/*
- * Allocate new topology and create required nodes
- */
-static struct toptree *toptree_new(int id, int nodes)
-{
- struct toptree *tree;
- int nid;
-
- tree = toptree_alloc(TOPOLOGY, id);
- if (!tree)
- goto fail;
- for (nid = 0; nid < nodes; nid++) {
- if (!toptree_get_child(tree, nid))
- goto fail;
- }
- return tree;
-fail:
- panic("NUMA emulation could not allocate topology");
-}
-
-/*
- * Allocate and initialize core to node mapping
- */
-static void __ref create_core_to_node_map(void)
-{
- int i;
-
- emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
- if (!emu_cores)
- panic("%s: Failed to allocate %zu bytes align=0x%x\n",
- __func__, sizeof(*emu_cores), 8);
- for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
- emu_cores->to_node_id[i] = NODE_ID_FREE;
-}
-
-/*
- * Move cores from physical topology into NUMA target topology
- * and try to keep as much of the physical topology as possible.
- */
-static struct toptree *toptree_to_numa(struct toptree *phys)
-{
- static int first = 1;
- struct toptree *numa;
- int cores_total;
-
- cores_total = emu_cores->total + cores_free(phys);
- emu_cores->per_node_target = cores_total / emu_nodes;
- numa = toptree_new(TOPTREE_ID_NUMA, emu_nodes);
- if (first) {
- toptree_to_numa_first(numa, phys);
- first = 0;
- }
- toptree_to_numa_single(numa, phys, 0);
- toptree_to_numa_single(numa, phys, 1);
- toptree_unify_tree(numa);
-
- WARN_ON(cpumask_weight(&phys->mask));
- return numa;
-}
-
-/*
- * Create a toptree out of the physical topology that we got from the hypervisor
- */
-static struct toptree *toptree_from_topology(void)
-{
- struct toptree *phys, *node, *drawer, *book, *mc, *core;
- struct cpu_topology_s390 *top;
- int cpu;
-
- phys = toptree_new(TOPTREE_ID_PHYS, 1);
-
- for_each_cpu(cpu, &cpus_with_topology) {
- top = &cpu_topology[cpu];
- node = toptree_get_child(phys, 0);
- drawer = toptree_get_child(node, top->drawer_id);
- book = toptree_get_child(drawer, top->book_id);
- mc = toptree_get_child(book, top->socket_id);
- core = toptree_get_child(mc, smp_get_base_cpu(cpu));
- if (!drawer || !book || !mc || !core)
- panic("NUMA emulation could not allocate memory");
- cpumask_set_cpu(cpu, &core->mask);
- toptree_update_mask(mc);
- }
- return phys;
-}
-
-/*
- * Add toptree core to topology and create correct CPU masks
- */
-static void topology_add_core(struct toptree *core)
-{
- struct cpu_topology_s390 *top;
- int cpu;
-
- for_each_cpu(cpu, &core->mask) {
- top = &cpu_topology[cpu];
- cpumask_copy(&top->thread_mask, &core->mask);
- cpumask_copy(&top->core_mask, &core_mc(core)->mask);
- cpumask_copy(&top->book_mask, &core_book(core)->mask);
- cpumask_copy(&top->drawer_mask, &core_drawer(core)->mask);
- cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
- top->node_id = core_node(core)->id;
- }
-}
-
-/*
- * Apply toptree to topology and create CPU masks
- */
-static void toptree_to_topology(struct toptree *numa)
-{
- struct toptree *core;
- int i;
-
- /* Clear all node masks */
- for (i = 0; i < MAX_NUMNODES; i++)
- cpumask_clear(&node_to_cpumask_map[i]);
-
- /* Rebuild all masks */
- toptree_for_each(core, numa, CORE)
- topology_add_core(core);
-}
-
-/*
- * Show the node to core mapping
- */
-static void print_node_to_core_map(void)
-{
- int nid, cid;
-
- if (!numa_debug_enabled)
- return;
- printk(KERN_DEBUG "NUMA node to core mapping\n");
- for (nid = 0; nid < emu_nodes; nid++) {
- printk(KERN_DEBUG " node %3d: ", nid);
- for (cid = 0; cid < ARRAY_SIZE(emu_cores->to_node_id); cid++) {
- if (emu_cores->to_node_id[cid] == nid)
- printk(KERN_CONT "%d ", cid);
- }
- printk(KERN_CONT "\n");
- }
-}
-
-static void pin_all_possible_cpus(void)
-{
- int core_id, node_id, cpu;
- static int initialized;
-
- if (initialized)
- return;
- print_node_to_core_map();
- node_id = 0;
- for_each_possible_cpu(cpu) {
- core_id = smp_get_base_cpu(cpu);
- if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
- continue;
- pin_core_to_node(core_id, node_id);
- cpu_topology[cpu].node_id = node_id;
- node_id = (node_id + 1) % emu_nodes;
- }
- print_node_to_core_map();
- initialized = 1;
-}
-
-/*
- * Transfer physical topology into a NUMA topology and modify CPU masks
- * according to the NUMA topology.
- *
- * Must be called with "sched_domains_mutex" lock held.
- */
-static void emu_update_cpu_topology(void)
-{
- struct toptree *phys, *numa;
-
- if (emu_cores == NULL)
- create_core_to_node_map();
- phys = toptree_from_topology();
- numa = toptree_to_numa(phys);
- toptree_free(phys);
- toptree_to_topology(numa);
- toptree_free(numa);
- pin_all_possible_cpus();
-}
-
-/*
- * If emu_size is not set, use CONFIG_EMU_SIZE. Then round to minimum
- * alignment (needed for memory hotplug).
- */
-static unsigned long emu_setup_size_adjust(unsigned long size)
-{
- unsigned long size_new;
-
- size = size ? : CONFIG_EMU_SIZE;
- size_new = roundup(size, memory_block_size_bytes());
- if (size_new == size)
- return size;
- pr_warn("Increasing memory stripe size from %ld MB to %ld MB\n",
- size >> 20, size_new >> 20);
- return size_new;
-}
-
-/*
- * If we have not enough memory for the specified nodes, reduce the node count.
- */
-static int emu_setup_nodes_adjust(int nodes)
-{
- int nodes_max;
-
- nodes_max = memblock.memory.total_size / emu_size;
- nodes_max = max(nodes_max, 1);
- if (nodes_max >= nodes)
- return nodes;
- pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes);
- return nodes_max;
-}
-
-/*
- * Early emu setup
- */
-static void emu_setup(void)
-{
- int nid;
-
- emu_size = emu_setup_size_adjust(emu_size);
- emu_nodes = emu_setup_nodes_adjust(emu_nodes);
- for (nid = 0; nid < emu_nodes; nid++)
- node_set(nid, node_possible_map);
- pr_info("Creating %d nodes with memory stripe size %ld MB\n",
- emu_nodes, emu_size >> 20);
-}
-
-/*
- * Return node id for given page number
- */
-static int emu_pfn_to_nid(unsigned long pfn)
-{
- return (pfn / (emu_size >> PAGE_SHIFT)) % emu_nodes;
-}
-
-/*
- * Return stripe size
- */
-static unsigned long emu_align(void)
-{
- return emu_size;
-}
-
-/*
- * Return distance between two nodes
- */
-static int emu_distance(int node1, int node2)
-{
- return (node1 != node2) * EMU_NODE_DIST;
-}
-
-/*
- * Define callbacks for generic s390 NUMA infrastructure
- */
-const struct numa_mode numa_mode_emu = {
- .name = "emu",
- .setup = emu_setup,
- .update_cpu_topology = emu_update_cpu_topology,
- .__pfn_to_nid = emu_pfn_to_nid,
- .align = emu_align,
- .distance = emu_distance,
-};
-
-/*
- * Kernel parameter: emu_nodes=<n>
- */
-static int __init early_parse_emu_nodes(char *p)
-{
- int count;
-
- if (!p || kstrtoint(p, 0, &count) != 0 || count <= 0)
- return 0;
- emu_nodes = min(count, MAX_NUMNODES);
- return 0;
-}
-early_param("emu_nodes", early_parse_emu_nodes);
-
-/*
- * Kernel parameter: emu_size=[<n>[k|M|G|T]]
- */
-static int __init early_parse_emu_size(char *p)
-{
- if (p)
- emu_size = memparse(p, NULL);
- return 0;
-}
-early_param("emu_size", early_parse_emu_size);
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index d2910fa834c8..51c5a9f6e525 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -7,165 +7,36 @@
* Copyright IBM Corp. 2015
*/
-#define KMSG_COMPONENT "numa"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
-#include <linux/slab.h>
#include <linux/node.h>
-
#include <asm/numa.h>
-#include "numa_mode.h"
-pg_data_t *node_data[MAX_NUMNODES];
+struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(node_data);
-cpumask_t node_to_cpumask_map[MAX_NUMNODES];
-EXPORT_SYMBOL(node_to_cpumask_map);
-
-static void plain_setup(void)
-{
- node_set(0, node_possible_map);
-}
-
-const struct numa_mode numa_mode_plain = {
- .name = "plain",
- .setup = plain_setup,
-};
-
-static const struct numa_mode *mode = &numa_mode_plain;
-
-int numa_pfn_to_nid(unsigned long pfn)
-{
- return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0;
-}
-
-void numa_update_cpu_topology(void)
-{
- if (mode->update_cpu_topology)
- mode->update_cpu_topology();
-}
-
-int __node_distance(int a, int b)
-{
- return mode->distance ? mode->distance(a, b) : 0;
-}
-EXPORT_SYMBOL(__node_distance);
-
-int numa_debug_enabled;
-
-/*
- * numa_setup_memory() - Assign bootmem to nodes
- *
- * The memory is first added to memblock without any respect to nodes.
- * This is fixed before remaining memblock memory is handed over to the
- * buddy allocator.
- * An important side effect is that large bootmem allocations might easily
- * cross node boundaries, which can be needed for large allocations with
- * smaller memory stripes in each node (i.e. when using NUMA emulation).
- *
- * Memory defines nodes:
- * Therefore this routine also sets the nodes online with memory.
- */
-static void __init numa_setup_memory(void)
+void __init numa_setup(void)
{
- unsigned long cur_base, align, end_of_dram;
- int nid = 0;
-
- end_of_dram = memblock_end_of_DRAM();
- align = mode->align ? mode->align() : ULONG_MAX;
-
- /*
- * Step through all available memory and assign it to the nodes
- * indicated by the mode implementation.
- * All nodes which are seen here will be set online.
- */
- cur_base = 0;
- do {
- nid = numa_pfn_to_nid(PFN_DOWN(cur_base));
- node_set_online(nid);
- memblock_set_node(cur_base, align, &memblock.memory, nid);
- cur_base += align;
- } while (cur_base < end_of_dram);
+ int nid;
- /* Allocate and fill out node_data */
+ nodes_clear(node_possible_map);
+ node_set(0, node_possible_map);
+ node_set_online(0);
for (nid = 0; nid < MAX_NUMNODES; nid++) {
NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
if (!NODE_DATA(nid))
panic("%s: Failed to allocate %zu bytes align=0x%x\n",
__func__, sizeof(pg_data_t), 8);
}
-
- for_each_online_node(nid) {
- unsigned long start_pfn, end_pfn;
- unsigned long t_start, t_end;
- int i;
-
- start_pfn = ULONG_MAX;
- end_pfn = 0;
- for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) {
- if (t_start < start_pfn)
- start_pfn = t_start;
- if (t_end > end_pfn)
- end_pfn = t_end;
- }
- NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
- NODE_DATA(nid)->node_id = nid;
- }
-}
-
-/*
- * numa_setup() - Earliest initialization
- *
- * Assign the mode and call the mode's setup routine.
- */
-void __init numa_setup(void)
-{
- pr_info("NUMA mode: %s\n", mode->name);
- nodes_clear(node_possible_map);
- /* Initially attach all possible CPUs to node 0. */
- cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
- if (mode->setup)
- mode->setup();
- numa_setup_memory();
- memblock_dump_all();
+ NODE_DATA(0)->node_spanned_pages = memblock_end_of_DRAM() >> PAGE_SHIFT;
+ NODE_DATA(0)->node_id = 0;
}
-/*
- * numa_init_late() - Initialization initcall
- *
- * Register NUMA nodes.
- */
static int __init numa_init_late(void)
{
- int nid;
-
- for_each_online_node(nid)
- register_one_node(nid);
+ register_one_node(0);
return 0;
}
arch_initcall(numa_init_late);
-
-static int __init parse_debug(char *parm)
-{
- numa_debug_enabled = 1;
- return 0;
-}
-early_param("numa_debug", parse_debug);
-
-static int __init parse_numa(char *parm)
-{
- if (!parm)
- return 1;
- if (strcmp(parm, numa_mode_plain.name) == 0)
- mode = &numa_mode_plain;
-#ifdef CONFIG_NUMA_EMU
- if (strcmp(parm, numa_mode_emu.name) == 0)
- mode = &numa_mode_emu;
-#endif
- return 0;
-}
-early_param("numa", parse_numa);
diff --git a/arch/s390/numa/numa_mode.h b/arch/s390/numa/numa_mode.h
deleted file mode 100644
index dfd3e2784081..000000000000
--- a/arch/s390/numa/numa_mode.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * NUMA support for s390
- *
- * Define declarations used for communication between NUMA mode
- * implementations and NUMA core functionality.
- *
- * Copyright IBM Corp. 2015
- */
-#ifndef __S390_NUMA_MODE_H
-#define __S390_NUMA_MODE_H
-
-struct numa_mode {
- char *name; /* Name of mode */
- void (*setup)(void); /* Initizalize mode */
- void (*update_cpu_topology)(void); /* Called by topology code */
- int (*__pfn_to_nid)(unsigned long pfn); /* PFN to node ID */
- unsigned long (*align)(void); /* Minimum node alignment */
- int (*distance)(int a, int b); /* Distance between two nodes */
-};
-
-extern const struct numa_mode numa_mode_plain;
-extern const struct numa_mode numa_mode_emu;
-
-#endif /* __S390_NUMA_MODE_H */
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
deleted file mode 100644
index 71a608cd4f61..000000000000
--- a/arch/s390/numa/toptree.c
+++ /dev/null
@@ -1,351 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NUMA support for s390
- *
- * A tree structure used for machine topology mangling
- *
- * Copyright IBM Corp. 2015
- */
-
-#include <linux/kernel.h>
-#include <linux/memblock.h>
-#include <linux/cpumask.h>
-#include <linux/list.h>
-#include <linux/list_sort.h>
-#include <linux/slab.h>
-#include <asm/numa.h>
-
-#include "toptree.h"
-
-/**
- * toptree_alloc - Allocate and initialize a new tree node.
- * @level: The node's vertical level; level 0 contains the leaves.
- * @id: ID number, explicitly not unique beyond scope of node's siblings
- *
- * Allocate a new tree node and initialize it.
- *
- * RETURNS:
- * Pointer to the new tree node or NULL on error
- */
-struct toptree __ref *toptree_alloc(int level, int id)
-{
- struct toptree *res;
-
- if (slab_is_available())
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- else
- res = memblock_alloc(sizeof(*res), 8);
- if (!res)
- return res;
-
- INIT_LIST_HEAD(&res->children);
- INIT_LIST_HEAD(&res->sibling);
- cpumask_clear(&res->mask);
- res->level = level;
- res->id = id;
- return res;
-}
-
-/**
- * toptree_remove - Remove a tree node from a tree
- * @cand: Pointer to the node to remove
- *
- * The node is detached from its parent node. The parent node's
- * masks will be updated to reflect the loss of the child.
- */
-static void toptree_remove(struct toptree *cand)
-{
- struct toptree *oldparent;
-
- list_del_init(&cand->sibling);
- oldparent = cand->parent;
- cand->parent = NULL;
- toptree_update_mask(oldparent);
-}
-
-/**
- * toptree_free - discard a tree node
- * @cand: Pointer to the tree node to discard
- *
- * Checks if @cand is attached to a parent node. Detaches it
- * cleanly using toptree_remove. Possible children are freed
- * recursively. In the end @cand itself is freed.
- */
-void __ref toptree_free(struct toptree *cand)
-{
- struct toptree *child, *tmp;
-
- if (cand->parent)
- toptree_remove(cand);
- toptree_for_each_child_safe(child, tmp, cand)
- toptree_free(child);
- if (slab_is_available())
- kfree(cand);
- else
- memblock_free_early((unsigned long)cand, sizeof(*cand));
-}
-
-/**
- * toptree_update_mask - Update node bitmasks
- * @cand: Pointer to a tree node
- *
- * The node's cpumask will be updated by combining all children's
- * masks. Then toptree_update_mask is called recursively for the
- * parent if applicable.
- *
- * NOTE:
- * This must not be called on leaves. If called on a leaf, its
- * CPU mask is cleared and lost.
- */
-void toptree_update_mask(struct toptree *cand)
-{
- struct toptree *child;
-
- cpumask_clear(&cand->mask);
- list_for_each_entry(child, &cand->children, sibling)
- cpumask_or(&cand->mask, &cand->mask, &child->mask);
- if (cand->parent)
- toptree_update_mask(cand->parent);
-}
-
-/**
- * toptree_insert - Insert a tree node into tree
- * @cand: Pointer to the node to insert
- * @target: Pointer to the node to which @cand will added as a child
- *
- * Insert a tree node into a tree. Masks will be updated automatically.
- *
- * RETURNS:
- * 0 on success, -1 if NULL is passed as argument or the node levels
- * don't fit.
- */
-static int toptree_insert(struct toptree *cand, struct toptree *target)
-{
- if (!cand || !target)
- return -1;
- if (target->level != (cand->level + 1))
- return -1;
- list_add_tail(&cand->sibling, &target->children);
- cand->parent = target;
- toptree_update_mask(target);
- return 0;
-}
-
-/**
- * toptree_move_children - Move all child nodes of a node to a new place
- * @cand: Pointer to the node whose children are to be moved
- * @target: Pointer to the node to which @cand's children will be attached
- *
- * Take all child nodes of @cand and move them using toptree_move.
- */
-static void toptree_move_children(struct toptree *cand, struct toptree *target)
-{
- struct toptree *child, *tmp;
-
- toptree_for_each_child_safe(child, tmp, cand)
- toptree_move(child, target);
-}
-
-/**
- * toptree_unify - Merge children with same ID
- * @cand: Pointer to node whose direct children should be made unique
- *
- * When mangling the tree it is possible that a node has two or more children
- * which have the same ID. This routine merges these children into one and
- * moves all children of the merged nodes into the unified node.
- */
-void toptree_unify(struct toptree *cand)
-{
- struct toptree *child, *tmp, *cand_copy;
-
- /* Threads cannot be split, cores are not split */
- if (cand->level < 2)
- return;
-
- cand_copy = toptree_alloc(cand->level, 0);
- toptree_for_each_child_safe(child, tmp, cand) {
- struct toptree *tmpchild;
-
- if (!cpumask_empty(&child->mask)) {
- tmpchild = toptree_get_child(cand_copy, child->id);
- toptree_move_children(child, tmpchild);
- }
- toptree_free(child);
- }
- toptree_move_children(cand_copy, cand);
- toptree_free(cand_copy);
-
- toptree_for_each_child(child, cand)
- toptree_unify(child);
-}
-
-/**
- * toptree_move - Move a node to another context
- * @cand: Pointer to the node to move
- * @target: Pointer to the node where @cand should go
- *
- * In the easiest case @cand is exactly on the level below @target
- * and will be immediately moved to the target.
- *
- * If @target's level is not the direct parent level of @cand,
- * nodes for the missing levels are created and put between
- * @cand and @target. The "stacking" nodes' IDs are taken from
- * @cand's parents.
- *
- * After this it is likely to have redundant nodes in the tree
- * which are addressed by means of toptree_unify.
- */
-void toptree_move(struct toptree *cand, struct toptree *target)
-{
- struct toptree *stack_target, *real_insert_point, *ptr, *tmp;
-
- if (cand->level + 1 == target->level) {
- toptree_remove(cand);
- toptree_insert(cand, target);
- return;
- }
-
- real_insert_point = NULL;
- ptr = cand;
- stack_target = NULL;
-
- do {
- tmp = stack_target;
- stack_target = toptree_alloc(ptr->level + 1,
- ptr->parent->id);
- toptree_insert(tmp, stack_target);
- if (!real_insert_point)
- real_insert_point = stack_target;
- ptr = ptr->parent;
- } while (stack_target->level < (target->level - 1));
-
- toptree_remove(cand);
- toptree_insert(cand, real_insert_point);
- toptree_insert(stack_target, target);
-}
-
-/**
- * toptree_get_child - Access a tree node's child by its ID
- * @cand: Pointer to tree node whose child is to access
- * @id: The desired child's ID
- *
- * @cand's children are searched for a child with matching ID.
- * If no match can be found, a new child with the desired ID
- * is created and returned.
- */
-struct toptree *toptree_get_child(struct toptree *cand, int id)
-{
- struct toptree *child;
-
- toptree_for_each_child(child, cand)
- if (child->id == id)
- return child;
- child = toptree_alloc(cand->level-1, id);
- toptree_insert(child, cand);
- return child;
-}
-
-/**
- * toptree_first - Find the first descendant on specified level
- * @context: Pointer to tree node whose descendants are to be used
- * @level: The level of interest
- *
- * RETURNS:
- * @context's first descendant on the specified level, or NULL
- * if there is no matching descendant
- */
-struct toptree *toptree_first(struct toptree *context, int level)
-{
- struct toptree *child, *tmp;
-
- if (context->level == level)
- return context;
-
- if (!list_empty(&context->children)) {
- list_for_each_entry(child, &context->children, sibling) {
- tmp = toptree_first(child, level);
- if (tmp)
- return tmp;
- }
- }
- return NULL;
-}
-
-/**
- * toptree_next_sibling - Return next sibling
- * @cur: Pointer to a tree node
- *
- * RETURNS:
- * If @cur has a parent and is not the last in the parent's children list,
- * the next sibling is returned. Or NULL when there are no siblings left.
- */
-static struct toptree *toptree_next_sibling(struct toptree *cur)
-{
- if (cur->parent == NULL)
- return NULL;
-
- if (cur == list_last_entry(&cur->parent->children,
- struct toptree, sibling))
- return NULL;
- return (struct toptree *) list_next_entry(cur, sibling);
-}
-
-/**
- * toptree_next - Tree traversal function
- * @cur: Pointer to current element
- * @context: Pointer to the root node of the tree or subtree to
- * be traversed.
- * @level: The level of interest.
- *
- * RETURNS:
- * Pointer to the next node on level @level
- * or NULL when there is no next node.
- */
-struct toptree *toptree_next(struct toptree *cur, struct toptree *context,
- int level)
-{
- struct toptree *cur_context, *tmp;
-
- if (!cur)
- return NULL;
-
- if (context->level == level)
- return NULL;
-
- tmp = toptree_next_sibling(cur);
- if (tmp != NULL)
- return tmp;
-
- cur_context = cur;
- while (cur_context->level < context->level - 1) {
- /* Step up */
- cur_context = cur_context->parent;
- /* Step aside */
- tmp = toptree_next_sibling(cur_context);
- if (tmp != NULL) {
- /* Step down */
- tmp = toptree_first(tmp, level);
- if (tmp != NULL)
- return tmp;
- }
- }
- return NULL;
-}
-
-/**
- * toptree_count - Count descendants on specified level
- * @context: Pointer to node whose descendants are to be considered
- * @level: Only descendants on the specified level will be counted
- *
- * RETURNS:
- * Number of descendants on the specified level
- */
-int toptree_count(struct toptree *context, int level)
-{
- struct toptree *cur;
- int cnt = 0;
-
- toptree_for_each(cur, context, level)
- cnt++;
- return cnt;
-}
diff --git a/arch/s390/numa/toptree.h b/arch/s390/numa/toptree.h
deleted file mode 100644
index 5246371ec713..000000000000
--- a/arch/s390/numa/toptree.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * NUMA support for s390
- *
- * A tree structure used for machine topology mangling
- *
- * Copyright IBM Corp. 2015
- */
-#ifndef S390_TOPTREE_H
-#define S390_TOPTREE_H
-
-#include <linux/cpumask.h>
-#include <linux/list.h>
-
-struct toptree {
- int level;
- int id;
- cpumask_t mask;
- struct toptree *parent;
- struct list_head sibling;
- struct list_head children;
-};
-
-struct toptree *toptree_alloc(int level, int id);
-void toptree_free(struct toptree *cand);
-void toptree_update_mask(struct toptree *cand);
-void toptree_unify(struct toptree *cand);
-struct toptree *toptree_get_child(struct toptree *cand, int id);
-void toptree_move(struct toptree *cand, struct toptree *target);
-int toptree_count(struct toptree *context, int level);
-
-struct toptree *toptree_first(struct toptree *context, int level);
-struct toptree *toptree_next(struct toptree *cur, struct toptree *context,
- int level);
-
-#define toptree_for_each_child(child, ptree) \
- list_for_each_entry(child, &ptree->children, sibling)
-
-#define toptree_for_each_child_safe(child, ptmp, ptree) \
- list_for_each_entry_safe(child, ptmp, &ptree->children, sibling)
-
-#define toptree_is_last(ptree) \
- ((ptree->parent == NULL) || \
- (ptree->parent->children.prev == &ptree->sibling))
-
-#define toptree_for_each(ptree, cont, ttype) \
- for (ptree = toptree_first(cont, ttype); \
- ptree != NULL; \
- ptree = toptree_next(ptree, cont, ttype))
-
-#define toptree_for_each_safe(ptree, tmp, cont, ttype) \
- for (ptree = toptree_first(cont, ttype), \
- tmp = toptree_next(ptree, cont, ttype); \
- ptree != NULL; \
- ptree = tmp, \
- tmp = toptree_next(ptree, cont, ttype))
-
-#define toptree_for_each_sibling(ptree, start) \
- toptree_for_each(ptree, start->parent, start->level)
-
-#endif /* S390_TOPTREE_H */
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 60716d18ce5a..94ca121933de 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -40,8 +40,9 @@
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
-static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
+static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
static DEFINE_SPINLOCK(zpci_domain_lock);
+static unsigned int zpci_num_domains_allocated;
#define ZPCI_IOMAP_ENTRIES \
min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
@@ -607,57 +608,25 @@ void pcibios_disable_device(struct pci_dev *pdev)
zpci_debug_exit_device(zdev);
}
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-static int zpci_restore(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct zpci_dev *zdev = to_zpci(pdev);
- int ret = 0;
-
- if (zdev->state != ZPCI_FN_STATE_ONLINE)
- goto out;
-
- ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
- if (ret)
- goto out;
-
- zpci_map_resources(pdev);
- zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- (u64) zdev->dma_table);
-
-out:
- return ret;
-}
-
-static int zpci_freeze(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct zpci_dev *zdev = to_zpci(pdev);
-
- if (zdev->state != ZPCI_FN_STATE_ONLINE)
- return 0;
-
- zpci_unregister_ioat(zdev, 0);
- zpci_unmap_resources(pdev);
- return clp_disable_fh(zdev);
-}
-
-struct dev_pm_ops pcibios_pm_ops = {
- .thaw_noirq = zpci_restore,
- .freeze_noirq = zpci_freeze,
- .restore_noirq = zpci_restore,
- .poweroff_noirq = zpci_freeze,
-};
-#endif /* CONFIG_HIBERNATE_CALLBACKS */
-
static int zpci_alloc_domain(struct zpci_dev *zdev)
{
+ spin_lock(&zpci_domain_lock);
+ if (zpci_num_domains_allocated > (ZPCI_NR_DEVICES - 1)) {
+ spin_unlock(&zpci_domain_lock);
+ pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n",
+ zdev->fid, ZPCI_NR_DEVICES);
+ return -ENOSPC;
+ }
+
if (zpci_unique_uid) {
zdev->domain = (u16) zdev->uid;
- if (zdev->domain >= ZPCI_NR_DEVICES)
- return 0;
+ if (zdev->domain == 0) {
+ pr_warn("UID checking is active but no UID is set for PCI function %08x, so automatic domain allocation is used instead\n",
+ zdev->fid);
+ update_uid_checking(false);
+ goto auto_allocate;
+ }
- spin_lock(&zpci_domain_lock);
if (test_bit(zdev->domain, zpci_domain)) {
spin_unlock(&zpci_domain_lock);
pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n",
@@ -665,30 +634,28 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
return -EEXIST;
}
set_bit(zdev->domain, zpci_domain);
+ zpci_num_domains_allocated++;
spin_unlock(&zpci_domain_lock);
return 0;
}
-
- spin_lock(&zpci_domain_lock);
+auto_allocate:
+ /*
+ * We can always auto allocate domains below ZPCI_NR_DEVICES.
+ * There is either a free domain or we have reached the maximum in
+ * which case we would have bailed earlier.
+ */
zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
- if (zdev->domain == ZPCI_NR_DEVICES) {
- spin_unlock(&zpci_domain_lock);
- pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n",
- zdev->fid, ZPCI_NR_DEVICES);
- return -ENOSPC;
- }
set_bit(zdev->domain, zpci_domain);
+ zpci_num_domains_allocated++;
spin_unlock(&zpci_domain_lock);
return 0;
}
static void zpci_free_domain(struct zpci_dev *zdev)
{
- if (zdev->domain >= ZPCI_NR_DEVICES)
- return;
-
spin_lock(&zpci_domain_lock);
clear_bit(zdev->domain, zpci_domain);
+ zpci_num_domains_allocated--;
spin_unlock(&zpci_domain_lock);
}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 0d3d8f170ea4..ea794ae755ae 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -24,7 +24,7 @@
bool zpci_unique_uid;
-static void update_uid_checking(bool new)
+void update_uid_checking(bool new)
{
if (zpci_unique_uid != new)
zpci_dbg(1, "uid checking:%d\n", new);
diff --git a/arch/s390/purgatory/.gitignore b/arch/s390/purgatory/.gitignore
index c82157f46b18..97ca52779457 100644
--- a/arch/s390/purgatory/.gitignore
+++ b/arch/s390/purgatory/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
purgatory
purgatory.chk
purgatory.lds
diff --git a/arch/s390/tools/.gitignore b/arch/s390/tools/.gitignore
index 71bd6f8eebaf..ea62f37b79ef 100644
--- a/arch/s390/tools/.gitignore
+++ b/arch/s390/tools/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
gen_facilities
gen_opcode_table
diff --git a/arch/sh/boot/.gitignore b/arch/sh/boot/.gitignore
index f50fdd9975c5..6603bbbc917d 100644
--- a/arch/sh/boot/.gitignore
+++ b/arch/sh/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
zImage
vmlinux*
uImage*
diff --git a/arch/sh/boot/compressed/.gitignore b/arch/sh/boot/compressed/.gitignore
index edff113f1b85..37aa53057369 100644
--- a/arch/sh/boot/compressed/.gitignore
+++ b/arch/sh/boot/compressed/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ashiftrt.S
ashldi3.c
ashlsi3.S
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index e5beb625ab88..87db9a84b5ec 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -46,7 +46,6 @@ CONFIG_BLK_DEV_IDETAPE=m
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 51a54df22c11..7435182ef846 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,22 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += delay.h
-generic-y += div64.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
generic-y += parport.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += serial.h
-generic-y += trace_clock.h
-generic-y += xor.h
diff --git a/arch/sh/include/mach-common/mach/highlander.h b/arch/sh/include/mach-common/mach/highlander.h
index 66d3e40fd046..fb44c299d033 100644
--- a/arch/sh/include/mach-common/mach/highlander.h
+++ b/arch/sh/include/mach-common/mach/highlander.h
@@ -18,7 +18,7 @@
#define PA_IRLPRI4 (PA_BCR+0x000a) /* Interrupt Priorty 4 */
#define PA_RSTCTL (PA_BCR+0x000c) /* Reset Control */
#define PA_PCIBD (PA_BCR+0x000e) /* PCI Board detect control */
-#define PA_PCICD (PA_BCR+0x0010) /* PCI Conector detect control */
+#define PA_PCICD (PA_BCR+0x0010) /* PCI Connector detect control */
#define PA_EXTGIO (PA_BCR+0x0016) /* Extension GPIO Control */
#define PA_IVDRMON (PA_BCR+0x0018) /* iVDR Moniter control */
#define PA_IVDRCTL (PA_BCR+0x001a) /* iVDR control */
@@ -80,7 +80,7 @@
#define PA_SDPOW (PA_BCR+0x0004) /* SD Power control */
#define PA_RSTCTL (PA_BCR+0x0006) /* Device Reset control */
#define PA_PCIBD (PA_BCR+0x0008) /* PCI Board detect control */
-#define PA_PCICD (PA_BCR+0x000a) /* PCI Conector detect control */
+#define PA_PCICD (PA_BCR+0x000a) /* PCI Connector detect control */
#define PA_ZIGIO1 (PA_BCR+0x000c) /* Zigbee IO control 1 */
#define PA_ZIGIO2 (PA_BCR+0x000e) /* Zigbee IO control 2 */
#define PA_ZIGIO3 (PA_BCR+0x0010) /* Zigbee IO control 3 */
diff --git a/arch/sh/kernel/.gitignore b/arch/sh/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/sh/kernel/.gitignore
+++ b/arch/sh/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/sh/kernel/syscalls/syscallhdr.sh b/arch/sh/kernel/syscalls/syscallhdr.sh
index 1de0334e577f..4c0519861e97 100644
--- a/arch/sh/kernel/syscalls/syscallhdr.sh
+++ b/arch/sh/kernel/syscalls/syscallhdr.sh
@@ -32,5 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/sh/kernel/vsyscall/.gitignore b/arch/sh/kernel/vsyscall/.gitignore
index 40836ad9079c..530a3031a88d 100644
--- a/arch/sh/kernel/vsyscall/.gitignore
+++ b/arch/sh/kernel/vsyscall/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vsyscall.lds
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 5f51456f4fc7..5f23d7907597 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -302,25 +302,25 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
* Pagefault was interrupted by SIGKILL. We have no reason to
* continue pagefault.
*/
- if (fatal_signal_pending(current)) {
- if (!(fault & VM_FAULT_RETRY))
- up_read(&current->mm->mmap_sem);
+ if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
no_context(regs, error_code, address);
return 1;
}
+ /* Release mmap_sem first if necessary */
+ if (!(fault & VM_FAULT_RETRY))
+ up_read(&current->mm->mmap_sem);
+
if (!(fault & VM_FAULT_ERROR))
return 0;
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
- up_read(&current->mm->mmap_sem);
no_context(regs, error_code, address);
return 1;
}
- up_read(&current->mm->mmap_sem);
/*
* We ran out of memory, call the OOM killer, and return the
@@ -355,7 +355,7 @@ static inline int access_error(int error_code, struct vm_area_struct *vma)
return 1;
/* read, not present: */
- if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+ if (unlikely(!vma_is_accessible(vma)))
return 1;
return 0;
@@ -380,7 +380,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
struct mm_struct *mm;
struct vm_area_struct * vma;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
tsk = current;
mm = tsk->mm;
@@ -481,7 +481,6 @@ good_area:
regs, address);
}
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/*
diff --git a/arch/sparc/boot/.gitignore b/arch/sparc/boot/.gitignore
index fc6f3986c76c..f3d8569a21d1 100644
--- a/arch/sparc/boot/.gitignore
+++ b/arch/sparc/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
btfix.S
btfixupprep
image
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 6c325d53a20a..bde4d21a8ac8 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -73,7 +73,6 @@ CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 62de2eb2773d..5269a704801f 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -4,21 +4,7 @@
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
-generic-y += div64.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += export.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
generic-y += kvm_para.h
-generic-y += linkage.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += module.h
-generic-y += preempt.h
-generic-y += serial.h
-generic-y += trace_clock.h
-generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index ed32845bd2d2..2f051343612e 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -2,23 +2,12 @@
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
-#include <asm/cpu_type.h>
-
extern const struct dma_map_ops *dma_ops;
-extern struct bus_type pci_bus_type;
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
-#ifdef CONFIG_SPARC_LEON
- if (sparc_cpu_model == sparc_leon)
- return NULL;
-#endif
-#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
- if (bus == &pci_bus_type)
- return NULL;
-#endif
- return dma_ops;
+ /* sparc32 uses per-device dma_ops */
+ return IS_ENABLED(CONFIG_SPARC64) ? dma_ops : NULL;
}
#endif
diff --git a/arch/sparc/kernel/.gitignore b/arch/sparc/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/sparc/kernel/.gitignore
+++ b/arch/sparc/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index e59461d03b9a..d6874c9b639f 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -373,9 +373,6 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
dma_make_coherent(paddr, PAGE_ALIGN(size));
}
-const struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
#ifdef CONFIG_PROC_FS
static int sparc_io_proc_show(struct seq_file *m, void *v)
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
index b32cc5610712..e717a56efc5d 100644
--- a/arch/sparc/kernel/of_device_common.c
+++ b/arch/sparc/kernel/of_device_common.c
@@ -67,6 +67,7 @@ void of_propagate_archdata(struct platform_device *bus)
op->dev.archdata.stc = bus_sd->stc;
op->dev.archdata.host_controller = bus_sd->host_controller;
op->dev.archdata.numa_node = bus_sd->numa_node;
+ op->dev.dma_ops = bus->dev.dma_ops;
if (dp->child)
of_propagate_archdata(op);
diff --git a/arch/sparc/kernel/syscalls/syscallhdr.sh b/arch/sparc/kernel/syscalls/syscallhdr.sh
index 626b5740a9f1..cf50a75cc0bb 100644
--- a/arch/sparc/kernel/syscalls/syscallhdr.sh
+++ b/arch/sparc/kernel/syscalls/syscallhdr.sh
@@ -32,5 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 89976c9b936c..f6e0e601f857 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -168,7 +168,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
int from_user = !(regs->psr & PSR_PS);
int code;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
if (text_fault)
address = regs->pc;
@@ -237,7 +237,7 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -261,7 +261,6 @@ good_area:
1, regs, address);
}
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 8b7ddbd14b65..c0c0dd471b6b 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -271,7 +271,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
int si_code, fault_code;
vm_fault_t fault;
unsigned long address, mm_rss;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
fault_code = get_thread_fault_code();
@@ -425,7 +425,7 @@ good_area:
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
goto exit_exception;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -449,7 +449,6 @@ good_area:
1, regs, address);
}
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 33a0facd9eb5..289276b99b01 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -38,6 +38,8 @@
#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
+static const struct dma_map_ops iounit_dma_ops;
+
static void __init iounit_iommu_init(struct platform_device *op)
{
struct iounit_struct *iounit;
@@ -70,6 +72,8 @@ static void __init iounit_iommu_init(struct platform_device *op)
xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
for (; xpt < xptend; xpt++)
sbus_writel(0, xpt);
+
+ op->dev.dma_ops = &iounit_dma_ops;
}
static int __init iounit_init(void)
@@ -288,8 +292,3 @@ static const struct dma_map_ops iounit_dma_ops = {
.map_sg = iounit_map_sg,
.unmap_sg = iounit_unmap_sg,
};
-
-void __init ld_mmu_iounit(void)
-{
- dma_ops = &iounit_dma_ops;
-}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 4d3c6991f0ae..b00dde13681b 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -54,6 +54,9 @@ static pgprot_t dvma_prot; /* Consistent mapping pte flags */
#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
+static const struct dma_map_ops sbus_iommu_dma_gflush_ops;
+static const struct dma_map_ops sbus_iommu_dma_pflush_ops;
+
static void __init sbus_iommu_init(struct platform_device *op)
{
struct iommu_struct *iommu;
@@ -129,6 +132,11 @@ static void __init sbus_iommu_init(struct platform_device *op)
(int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
op->dev.archdata.iommu = iommu;
+
+ if (flush_page_for_dma_global)
+ op->dev.dma_ops = &sbus_iommu_dma_gflush_ops;
+ else
+ op->dev.dma_ops = &sbus_iommu_dma_pflush_ops;
}
static int __init iommu_init(void)
@@ -445,13 +453,6 @@ static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
void __init ld_mmu_iommu(void)
{
- if (flush_page_for_dma_global) {
- /* flush_page_for_dma flushes everything, no matter of what page is it */
- dma_ops = &sbus_iommu_dma_gflush_ops;
- } else {
- dma_ops = &sbus_iommu_dma_pflush_ops;
- }
-
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h
index 0d0b06e952a5..ce750a99eea9 100644
--- a/arch/sparc/mm/mm_32.h
+++ b/arch/sparc/mm/mm_32.h
@@ -20,6 +20,3 @@ void __init srmmu_paging_init(void);
/* iommu.c */
void ld_mmu_iommu(void);
-
-/* io-unit.c */
-void ld_mmu_iounit(void);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index f56c3c9a9793..b7c94de70cca 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1865,9 +1865,7 @@ void __init load_mmu(void)
&smp_cachetlb_ops;
#endif
- if (sparc_cpu_model == sun4d)
- ld_mmu_iounit();
- else
+ if (sparc_cpu_model != sun4d)
ld_mmu_iommu();
#ifdef CONFIG_SMP
if (sparc_cpu_model == sun4d)
diff --git a/arch/sparc/vdso/.gitignore b/arch/sparc/vdso/.gitignore
index ef925b998222..8d4ebc990bf3 100644
--- a/arch/sparc/vdso/.gitignore
+++ b/arch/sparc/vdso/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
vdso-image-*.c
vdso2c
diff --git a/arch/sparc/vdso/vdso32/.gitignore b/arch/sparc/vdso/vdso32/.gitignore
index e45fba9d0ced..5167384843b9 100644
--- a/arch/sparc/vdso/vdso32/.gitignore
+++ b/arch/sparc/vdso/vdso32/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso32.lds
diff --git a/arch/sparc/vdso/vdso32/vclock_gettime.c b/arch/sparc/vdso/vdso32/vclock_gettime.c
index 026abb3b826c..d7f99e6745ea 100644
--- a/arch/sparc/vdso/vdso32/vclock_gettime.c
+++ b/arch/sparc/vdso/vdso32/vclock_gettime.c
@@ -4,10 +4,6 @@
#define BUILD_VDSO32
-#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
-#undef CONFIG_OPTIMIZE_INLINING
-#endif
-
#ifdef CONFIG_SPARC64
/*
diff --git a/arch/um/.gitignore b/arch/um/.gitignore
index a73d3a1cc746..6323e5571887 100644
--- a/arch/um/.gitignore
+++ b/arch/um/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
kernel/config.c
kernel/config.tmp
kernel/vmlinux.lds
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 0917f8443c28..96ab7026b037 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -62,9 +62,12 @@ config NR_CPUS
source "arch/$(HEADER_ARCH)/um/Kconfig"
+config FORBID_STATIC_LINK
+ bool
+
config STATIC_LINK
bool "Force a static link"
- default n
+ depends on !FORBID_STATIC_LINK
help
This option gives you the ability to force a static link of UML.
Normally, UML is linked as a shared binary. This is inconvenient for
@@ -73,6 +76,9 @@ config STATIC_LINK
Additionally, this option enables using higher memory spaces (up to
2.75G) for UML.
+ NOTE: This option is incompatible with some networking features which
+ depend on features that require being dynamically loaded (like NSS).
+
config LD_SCRIPT_STATIC
bool
default y
@@ -191,6 +197,7 @@ config UML_TIME_TRAVEL_SUPPORT
prompt "Support time-travel mode (e.g. for test execution)"
# inf-cpu mode is incompatible with the benchmarking
depends on !RAID6_PQ_BENCHMARK
+ depends on !SMP
help
Enable this option to support time travel inside the UML instance.
diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig
index 73e98bb57bf5..fb51bd206dbe 100644
--- a/arch/um/configs/i386_defconfig
+++ b/arch/um/configs/i386_defconfig
@@ -26,7 +26,7 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_IOSCHED_CFQ=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_SSL=y
CONFIG_NULL_CHAN=y
CONFIG_PORT_CHAN=y
diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig
index 3281d7600225..477b87317424 100644
--- a/arch/um/configs/x86_64_defconfig
+++ b/arch/um/configs/x86_64_defconfig
@@ -24,7 +24,7 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_IOSCHED_CFQ=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_SSL=y
CONFIG_NULL_CHAN=y
CONFIG_PORT_CHAN=y
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index 72d417055782..9160ead56e33 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -234,6 +234,7 @@ config UML_NET_DAEMON
config UML_NET_VECTOR
bool "Vector I/O high performance network devices"
depends on UML_NET
+ select FORBID_STATIC_LINK
help
This User-Mode Linux network driver uses multi-message send
and receive functions. The host running the UML guest must have
@@ -245,6 +246,7 @@ config UML_NET_VECTOR
config UML_NET_VDE
bool "VDE transport (obsolete)"
depends on UML_NET
+ select FORBID_STATIC_LINK
help
This User-Mode Linux network transport allows one or more running
UMLs on a single host to communicate with each other and also
@@ -292,6 +294,7 @@ config UML_NET_MCAST
config UML_NET_PCAP
bool "pcap transport (obsolete)"
depends on UML_NET
+ select FORBID_STATIC_LINK
help
The pcap transport makes a pcap packet stream on the host look
like an ethernet device inside UML. This is useful for making
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index b80a1d616e4e..30575bd92975 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -36,6 +36,8 @@
#include "mconsole_kern.h"
#include <os.h>
+static struct vfsmount *proc_mnt = NULL;
+
static int do_unlink_socket(struct notifier_block *notifier,
unsigned long what, void *data)
{
@@ -123,7 +125,7 @@ void mconsole_log(struct mc_request *req)
void mconsole_proc(struct mc_request *req)
{
- struct vfsmount *mnt = task_active_pid_ns(current)->proc_mnt;
+ struct vfsmount *mnt = proc_mnt;
char *buf;
int len;
struct file *file;
@@ -134,6 +136,10 @@ void mconsole_proc(struct mc_request *req)
ptr += strlen("proc");
ptr = skip_spaces(ptr);
+ if (!mnt) {
+ mconsole_reply(req, "Proc not available", 1, 0);
+ goto out;
+ }
file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
@@ -683,6 +689,24 @@ void mconsole_stack(struct mc_request *req)
with_console(req, stack_proc, to);
}
+static int __init mount_proc(void)
+{
+ struct file_system_type *proc_fs_type;
+ struct vfsmount *mnt;
+
+ proc_fs_type = get_fs_type("proc");
+ if (!proc_fs_type)
+ return -ENODEV;
+
+ mnt = kern_mount(proc_fs_type);
+ put_filesystem(proc_fs_type);
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+
+ proc_mnt = mnt;
+ return 0;
+}
+
/*
* Changed by mconsole_setup, which is __setup, and called before SMP is
* active.
@@ -696,6 +720,8 @@ static int __init mconsole_init(void)
int err;
char file[UNIX_PATH_MAX];
+ mount_proc();
+
if (umid_file_name("mconsole", file, sizeof(file)))
return -1;
snprintf(mconsole_socket_name, sizeof(file), "%s", file);
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 35ebeebfc1a8..1802cf4ef5a5 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -266,7 +266,6 @@ static void uml_net_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, "42", sizeof(info->version));
}
static const struct ethtool_ops uml_net_ethtool_ops = {
@@ -275,17 +274,6 @@ static const struct ethtool_ops uml_net_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-static void uml_net_user_timer_expire(struct timer_list *t)
-{
-#ifdef undef
- struct uml_net_private *lp = from_timer(lp, t, tl);
- struct connection *conn = &lp->user;
-
- dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
- do_connect(conn);
-#endif
-}
-
void uml_net_setup_etheraddr(struct net_device *dev, char *str)
{
unsigned char *addr = dev->dev_addr;
@@ -456,7 +444,6 @@ static void eth_configure(int n, void *init, char *mac,
.add_address = transport->user->add_address,
.delete_address = transport->user->delete_address });
- timer_setup(&lp->tl, uml_net_user_timer_expire, 0);
spin_lock_init(&lp->lock);
memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac));
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index 1d5d3057e6f1..ce115fce52f0 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -23,8 +23,6 @@
#define RNG_VERSION "1.0.0"
#define RNG_MODULE_NAME "hw_random"
-#define RNG_MISCDEV_MINOR 183 /* official */
-
/* Changed at init time, in the non-modular case, and at module load
* time, in the module case. Presumably, the module subsystem
* protects against a module being loaded twice at the same time.
@@ -104,7 +102,7 @@ static const struct file_operations rng_chrdev_ops = {
/* rng_init shouldn't be called more than once at boot time */
static struct miscdevice rng_miscdev = {
- RNG_MISCDEV_MINOR,
+ HWRNG_MINOR,
RNG_MODULE_NAME,
&rng_chrdev_ops,
};
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 247f95da057b..eae8c83364f7 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1592,11 +1592,11 @@ int io_thread(void *arg)
&io_remainder_size,
UBD_REQ_BUFFER_SIZE
);
- if (n < 0) {
- if (n == -EAGAIN) {
+ if (n <= 0) {
+ if (n == -EAGAIN)
ubd_read_poll(-1);
- continue;
- }
+
+ continue;
}
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
@@ -1607,7 +1607,9 @@ int io_thread(void *arg)
written = 0;
do {
- res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n);
+ res = os_write_file(kernel_fd,
+ ((char *) io_req_buffer) + written,
+ n - written);
if (res >= 0) {
written += res;
}
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index e98304d0219e..8735c468230a 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -46,7 +46,6 @@
#define DRIVER_NAME "uml-vector"
-#define DRIVER_VERSION "01"
struct vector_cmd_line_arg {
struct list_head list;
int unit;
@@ -198,6 +197,9 @@ static int get_transport_options(struct arglist *def)
long parsed;
int result = 0;
+ if (transport == NULL)
+ return -EINVAL;
+
if (vector != NULL) {
if (kstrtoul(vector, 10, &parsed) == 0) {
if (parsed == 0) {
@@ -1378,7 +1380,6 @@ static void vector_net_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
static int vector_net_load_bpf_flash(struct net_device *dev,
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index ddcd917be0af..aa28e9eecb7b 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -221,8 +221,7 @@ static struct vector_fds *user_init_tap_fds(struct arglist *ifspec)
return result;
tap_cleanup:
printk(UM_KERN_ERR "user_init_tap: init failed, error %d", fd);
- if (result != NULL)
- kfree(result);
+ kfree(result);
return NULL;
}
@@ -266,8 +265,7 @@ static struct vector_fds *user_init_hybrid_fds(struct arglist *ifspec)
return result;
hybrid_cleanup:
printk(UM_KERN_ERR "user_init_hybrid: init failed");
- if (result != NULL)
- kfree(result);
+ kfree(result);
return NULL;
}
@@ -344,10 +342,8 @@ static struct vector_fds *user_init_unix_fds(struct arglist *ifspec, int id)
unix_cleanup:
if (fd >= 0)
os_close_file(fd);
- if (remote_addr != NULL)
- kfree(remote_addr);
- if (result != NULL)
- kfree(result);
+ kfree(remote_addr);
+ kfree(result);
return NULL;
}
@@ -382,8 +378,7 @@ static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
return result;
raw_cleanup:
printk(UM_KERN_ERR "user_init_raw: init failed, error %d", err);
- if (result != NULL)
- kfree(result);
+ kfree(result);
return NULL;
}
diff --git a/arch/um/drivers/vhost_user.h b/arch/um/drivers/vhost_user.h
index 45ff5ea22fea..6c71b6005177 100644
--- a/arch/um/drivers/vhost_user.h
+++ b/arch/um/drivers/vhost_user.h
@@ -10,9 +10,10 @@
/* Feature bits */
#define VHOST_USER_F_PROTOCOL_FEATURES 30
/* Protocol feature bits */
-#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
-#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
-#define VHOST_USER_PROTOCOL_F_CONFIG 9
+#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
+#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
+#define VHOST_USER_PROTOCOL_F_CONFIG 9
+#define VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS 14
/* Vring state index masks */
#define VHOST_USER_VRING_INDEX_MASK 0xff
#define VHOST_USER_VRING_POLL_MASK BIT(8)
@@ -24,7 +25,8 @@
/* Supported protocol features */
#define VHOST_USER_SUPPORTED_PROTOCOL_F (BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
- BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG))
+ BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG) | \
+ BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS))
enum vhost_user_request {
VHOST_USER_GET_FEATURES = 1,
@@ -52,12 +54,14 @@ enum vhost_user_request {
VHOST_USER_SET_VRING_ENDIAN = 23,
VHOST_USER_GET_CONFIG = 24,
VHOST_USER_SET_CONFIG = 25,
+ VHOST_USER_VRING_KICK = 35,
};
enum vhost_user_slave_request {
VHOST_USER_SLAVE_IOTLB_MSG = 1,
VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
+ VHOST_USER_SLAVE_VRING_CALL = 4,
};
struct vhost_user_header {
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index 023ced2250ea..be54d368e73d 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -26,6 +26,7 @@
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
+#include <linux/time-internal.h>
#include <shared/as-layout.h>
#include <irq_kern.h>
#include <init.h>
@@ -53,6 +54,7 @@ struct virtio_uml_device {
struct virtio_device vdev;
struct platform_device *pdev;
+ spinlock_t sock_lock;
int sock, req_fd;
u64 features;
u64 protocol_features;
@@ -63,6 +65,11 @@ struct virtio_uml_device {
struct virtio_uml_vq_info {
int kick_fd, call_fd;
char name[32];
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ struct virtqueue *vq;
+ vq_callback_t *callback;
+ struct time_travel_event defer;
+#endif
};
extern unsigned long long physmem_size, highmem;
@@ -117,10 +124,27 @@ static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
static int vhost_user_recv(struct virtio_uml_device *vu_dev,
int fd, struct vhost_user_msg *msg,
- size_t max_payload_size)
+ size_t max_payload_size, bool wait)
{
size_t size;
- int rc = vhost_user_recv_header(fd, msg);
+ int rc;
+
+ /*
+ * In virtio time-travel mode, we're handling all the vhost-user
+ * FDs by polling them whenever appropriate. However, we may get
+ * into a situation where we're sending out an interrupt message
+ * to a device (e.g. a net device) and need to handle a simulation
+ * time message while doing so, e.g. one that tells us to update
+ * our idea of how long we can run without scheduling.
+ *
+ * Thus, we need to not just read() from the given fd, but need
+ * to also handle messages for the simulation time - this function
+ * does that for us while waiting for the given fd to be readable.
+ */
+ if (wait)
+ time_travel_wait_readable(fd);
+
+ rc = vhost_user_recv_header(fd, msg);
if (rc == -ECONNRESET && vu_dev->registered) {
struct virtio_uml_platform_data *pdata;
@@ -142,7 +166,8 @@ static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg,
size_t max_payload_size)
{
- int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg, max_payload_size);
+ int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
+ max_payload_size, true);
if (rc)
return rc;
@@ -172,7 +197,8 @@ static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg,
size_t max_payload_size)
{
- int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg, max_payload_size);
+ int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
+ max_payload_size, false);
if (rc)
return rc;
@@ -189,6 +215,7 @@ static int vhost_user_send(struct virtio_uml_device *vu_dev,
int *fds, size_t num_fds)
{
size_t size = sizeof(msg->header) + msg->header.size;
+ unsigned long flags;
bool request_ack;
int rc;
@@ -207,24 +234,28 @@ static int vhost_user_send(struct virtio_uml_device *vu_dev,
if (request_ack)
msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
+ spin_lock_irqsave(&vu_dev->sock_lock, flags);
rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
if (rc < 0)
- return rc;
+ goto out;
if (request_ack) {
uint64_t status;
rc = vhost_user_recv_u64(vu_dev, &status);
if (rc)
- return rc;
+ goto out;
if (status) {
vu_err(vu_dev, "slave reports error: %llu\n", status);
- return -EIO;
+ rc = -EIO;
+ goto out;
}
}
- return 0;
+out:
+ spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
+ return rc;
}
static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
@@ -324,6 +355,7 @@ static void vhost_user_reply(struct virtio_uml_device *vu_dev,
static irqreturn_t vu_req_interrupt(int irq, void *data)
{
struct virtio_uml_device *vu_dev = data;
+ struct virtqueue *vq;
int response = 1;
struct {
struct vhost_user_msg msg;
@@ -343,6 +375,15 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
virtio_config_changed(&vu_dev->vdev);
response = 0;
break;
+ case VHOST_USER_SLAVE_VRING_CALL:
+ virtio_device_for_each_vq((&vu_dev->vdev), vq) {
+ if (vq->index == msg.msg.payload.vring_state.index) {
+ response = 0;
+ vring_interrupt(0 /* ignored */, vq);
+ break;
+ }
+ }
+ break;
case VHOST_USER_SLAVE_IOTLB_MSG:
/* not supported - VIRTIO_F_IOMMU_PLATFORM */
case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
@@ -684,6 +725,17 @@ static bool vu_notify(struct virtqueue *vq)
const uint64_t n = 1;
int rc;
+ time_travel_propagate_time();
+
+ if (info->kick_fd < 0) {
+ struct virtio_uml_device *vu_dev;
+
+ vu_dev = to_virtio_uml_device(vq->vdev);
+
+ return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
+ vq->index, 0) == 0;
+ }
+
do {
rc = os_write_file(info->kick_fd, &n, sizeof(n));
} while (rc == -EINTR);
@@ -749,10 +801,13 @@ static void vu_del_vq(struct virtqueue *vq)
{
struct virtio_uml_vq_info *info = vq->priv;
- um_free_irq(VIRTIO_IRQ, vq);
+ if (info->call_fd >= 0) {
+ um_free_irq(VIRTIO_IRQ, vq);
+ os_close_file(info->call_fd);
+ }
- os_close_file(info->call_fd);
- os_close_file(info->kick_fd);
+ if (info->kick_fd >= 0)
+ os_close_file(info->kick_fd);
vring_del_virtqueue(vq);
kfree(info);
@@ -782,6 +837,15 @@ static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
int call_fds[2];
int rc;
+ /* no call FD needed/desired in this case */
+ if (vu_dev->protocol_features &
+ BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
+ vu_dev->protocol_features &
+ BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
+ info->call_fd = -1;
+ return 0;
+ }
+
/* Use a pipe for call fd, since SIGIO is not supported for eventfd */
rc = os_pipe(call_fds, true, true);
if (rc < 0)
@@ -810,6 +874,23 @@ out:
return rc;
}
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+static void vu_defer_irq_handle(struct time_travel_event *d)
+{
+ struct virtio_uml_vq_info *info;
+
+ info = container_of(d, struct virtio_uml_vq_info, defer);
+ info->callback(info->vq);
+}
+
+static void vu_defer_irq_callback(struct virtqueue *vq)
+{
+ struct virtio_uml_vq_info *info = vq->priv;
+
+ time_travel_add_irq_event(&info->defer);
+}
+#endif
+
static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
unsigned index, vq_callback_t *callback,
const char *name, bool ctx)
@@ -829,6 +910,19 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
pdev->id, name);
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ /*
+ * When we get an interrupt, we must bounce it through the simulation
+ * calendar (the simtime device), except for the simtime device itself
+ * since that's part of the simulation control.
+ */
+ if (time_travel_mode == TT_MODE_EXTERNAL && callback) {
+ info->callback = callback;
+ callback = vu_defer_irq_callback;
+ time_travel_set_event_fn(&info->defer, vu_defer_irq_handle);
+ }
+#endif
+
vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
ctx, vu_notify, callback, info->name);
if (!vq) {
@@ -837,11 +931,19 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
}
vq->priv = info;
num = virtqueue_get_vring_size(vq);
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+ info->vq = vq;
+#endif
- rc = os_eventfd(0, 0);
- if (rc < 0)
- goto error_kick;
- info->kick_fd = rc;
+ if (vu_dev->protocol_features &
+ BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
+ info->kick_fd = -1;
+ } else {
+ rc = os_eventfd(0, 0);
+ if (rc < 0)
+ goto error_kick;
+ info->kick_fd = rc;
+ }
rc = vu_setup_vq_call_fd(vu_dev, vq);
if (rc)
@@ -866,10 +968,13 @@ static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
return vq;
error_setup:
- um_free_irq(VIRTIO_IRQ, vq);
- os_close_file(info->call_fd);
+ if (info->call_fd >= 0) {
+ um_free_irq(VIRTIO_IRQ, vq);
+ os_close_file(info->call_fd);
+ }
error_call:
- os_close_file(info->kick_fd);
+ if (info->kick_fd >= 0)
+ os_close_file(info->kick_fd);
error_kick:
vring_del_virtqueue(vq);
error_create:
@@ -908,10 +1013,12 @@ static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
list_for_each_entry(vq, &vdev->vqs, list) {
struct virtio_uml_vq_info *info = vq->priv;
- rc = vhost_user_set_vring_kick(vu_dev, vq->index,
- info->kick_fd);
- if (rc)
- goto error_setup;
+ if (info->kick_fd >= 0) {
+ rc = vhost_user_set_vring_kick(vu_dev, vq->index,
+ info->kick_fd);
+ if (rc)
+ goto error_setup;
+ }
rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
if (rc)
@@ -1008,6 +1115,8 @@ static int virtio_uml_probe(struct platform_device *pdev)
return rc;
vu_dev->sock = rc;
+ spin_lock_init(&vu_dev->sock_lock);
+
rc = vhost_user_init(vu_dev);
if (rc)
goto error_init;
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index db7d9d4e30d8..8d435f8a6dec 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -3,7 +3,6 @@ generic-y += bpf_perf_event.h
generic-y += bug.h
generic-y += compat.h
generic-y += current.h
-generic-y += delay.h
generic-y += device.h
generic-y += emergency-restart.h
generic-y += exec.h
diff --git a/arch/um/include/asm/delay.h b/arch/um/include/asm/delay.h
new file mode 100644
index 000000000000..56fc2b8f2dd0
--- /dev/null
+++ b/arch/um/include/asm/delay.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_DELAY_H
+#define __UM_DELAY_H
+#include <asm-generic/delay.h>
+#include <linux/time-internal.h>
+
+static inline void um_ndelay(unsigned long nsecs)
+{
+ if (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL) {
+ time_travel_ndelay(nsecs);
+ return;
+ }
+ ndelay(nsecs);
+}
+#undef ndelay
+#define ndelay um_ndelay
+
+static inline void um_udelay(unsigned long usecs)
+{
+ if (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL) {
+ time_travel_ndelay(1000 * usecs);
+ return;
+ }
+ udelay(usecs);
+}
+#undef udelay
+#define udelay um_udelay
+#endif /* __UM_DELAY_H */
diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h
new file mode 100644
index 000000000000..f3b03d39a854
--- /dev/null
+++ b/arch/um/include/linux/time-internal.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 - 2014 Cisco Systems
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
+#ifndef __TIMER_INTERNAL_H__
+#define __TIMER_INTERNAL_H__
+#include <linux/list.h>
+
+#define TIMER_MULTIPLIER 256
+#define TIMER_MIN_DELTA 500
+
+enum time_travel_mode {
+ TT_MODE_OFF,
+ TT_MODE_BASIC,
+ TT_MODE_INFCPU,
+ TT_MODE_EXTERNAL,
+};
+
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+struct time_travel_event {
+ unsigned long long time;
+ void (*fn)(struct time_travel_event *d);
+ struct list_head list;
+ bool pending, onstack;
+};
+
+extern enum time_travel_mode time_travel_mode;
+
+void time_travel_sleep(unsigned long long duration);
+
+static inline void
+time_travel_set_event_fn(struct time_travel_event *e,
+ void (*fn)(struct time_travel_event *d))
+{
+ e->fn = fn;
+}
+
+void __time_travel_propagate_time(void);
+
+static inline void time_travel_propagate_time(void)
+{
+ if (time_travel_mode == TT_MODE_EXTERNAL)
+ __time_travel_propagate_time();
+}
+
+void __time_travel_wait_readable(int fd);
+
+static inline void time_travel_wait_readable(int fd)
+{
+ if (time_travel_mode == TT_MODE_EXTERNAL)
+ __time_travel_wait_readable(fd);
+}
+
+void time_travel_add_irq_event(struct time_travel_event *e);
+#else
+struct time_travel_event {
+};
+
+#define time_travel_mode TT_MODE_OFF
+
+static inline void time_travel_sleep(unsigned long long duration)
+{
+}
+
+/* this is a macro so the event/function need not exist */
+#define time_travel_set_event_fn(e, fn) do {} while (0)
+
+static inline void time_travel_propagate_time(void)
+{
+}
+
+static inline void time_travel_wait_readable(int fd)
+{
+}
+#endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
+
+/*
+ * Without CONFIG_UML_TIME_TRAVEL_SUPPORT this is a linker error if used,
+ * which is intentional since we really shouldn't link it in that case.
+ */
+void time_travel_ndelay(unsigned long nsec);
+#endif /* __TIMER_INTERNAL_H__ */
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 0f30204b6afa..f467d28fc0b4 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -181,6 +181,7 @@ extern int os_falloc_punch(int fd, unsigned long long offset, int count);
extern int os_eventfd(unsigned int initval, int flags);
extern int os_sendmsg_fds(int fd, const void *buf, unsigned int len,
const int *fds, unsigned int fds_num);
+int os_poll(unsigned int n, const int *fds);
/* start_up.c */
extern void os_early_checks(void);
diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h
deleted file mode 100644
index 2d2d13c9b46f..000000000000
--- a/arch/um/include/shared/timer-internal.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 - 2014 Cisco Systems
- * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- */
-
-#ifndef __TIMER_INTERNAL_H__
-#define __TIMER_INTERNAL_H__
-
-#define TIMER_MULTIPLIER 256
-#define TIMER_MIN_DELTA 500
-
-enum time_travel_mode {
- TT_MODE_OFF,
- TT_MODE_BASIC,
- TT_MODE_INFCPU,
-};
-
-enum time_travel_timer_mode {
- TT_TMR_DISABLED,
- TT_TMR_ONESHOT,
- TT_TMR_PERIODIC,
-};
-
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
-extern enum time_travel_mode time_travel_mode;
-extern unsigned long long time_travel_time;
-extern enum time_travel_timer_mode time_travel_timer_mode;
-extern unsigned long long time_travel_timer_expiry;
-extern unsigned long long time_travel_timer_interval;
-
-static inline void time_travel_set_time(unsigned long long ns)
-{
- time_travel_time = ns;
-}
-
-static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
-{
- time_travel_timer_mode = mode;
-}
-
-static inline void time_travel_set_timer_expiry(unsigned long long expiry)
-{
- time_travel_timer_expiry = expiry;
-}
-
-static inline void time_travel_set_timer_interval(unsigned long long interval)
-{
- time_travel_timer_interval = interval;
-}
-#else
-#define time_travel_mode TT_MODE_OFF
-#define time_travel_time 0
-#define time_travel_timer_expiry 0
-#define time_travel_timer_interval 0
-
-static inline void time_travel_set_time(unsigned long long ns)
-{
-}
-
-static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
-{
-}
-
-static inline void time_travel_set_timer_expiry(unsigned long long expiry)
-{
-}
-
-static inline void time_travel_set_timer_interval(unsigned long long interval)
-{
-}
-
-#define time_travel_timer_mode TT_TMR_DISABLED
-#endif
-
-#endif
diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
index 98bdf69e4c2e..e4abac6c9727 100644
--- a/arch/um/kernel/kmsg_dump.c
+++ b/arch/um/kernel/kmsg_dump.c
@@ -9,20 +9,19 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
static char line[1024];
-
+ struct console *con;
size_t len = 0;
- bool con_available = false;
/* only dump kmsg when no console is available */
if (!console_trylock())
return;
- if (console_drivers != NULL)
- con_available = true;
+ for_each_console(con)
+ break;
console_unlock();
- if (con_available == true)
+ if (con)
return;
printf("kmsg_dump:\n");
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 56a094182bf5..cbe33af2a880 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -32,7 +32,7 @@
#include <kern_util.h>
#include <os.h>
#include <skas.h>
-#include <timer-internal.h>
+#include <linux/time-internal.h>
/*
* This is a per-cpu array. A processor only modifies its entry and it only
@@ -203,43 +203,6 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
kmalloc_ok = save_kmalloc_ok;
}
-static void time_travel_sleep(unsigned long long duration)
-{
- unsigned long long next = time_travel_time + duration;
-
- if (time_travel_mode != TT_MODE_INFCPU)
- os_timer_disable();
-
- while (time_travel_timer_mode == TT_TMR_PERIODIC &&
- time_travel_timer_expiry < time_travel_time)
- time_travel_set_timer_expiry(time_travel_timer_expiry +
- time_travel_timer_interval);
-
- if (time_travel_timer_mode != TT_TMR_DISABLED &&
- time_travel_timer_expiry < next) {
- if (time_travel_timer_mode == TT_TMR_ONESHOT)
- time_travel_set_timer_mode(TT_TMR_DISABLED);
- /*
- * In basic mode, time_travel_time will be adjusted in
- * the timer IRQ handler so it works even when the signal
- * comes from the OS timer, see there.
- */
- if (time_travel_mode != TT_MODE_BASIC)
- time_travel_set_time(time_travel_timer_expiry);
-
- deliver_alarm();
- } else {
- time_travel_set_time(next);
- }
-
- if (time_travel_mode != TT_MODE_INFCPU) {
- if (time_travel_timer_mode == TT_TMR_PERIODIC)
- os_timer_set_interval(time_travel_timer_interval);
- else if (time_travel_timer_mode == TT_TMR_ONESHOT)
- os_timer_one_shot(time_travel_timer_expiry - next);
- }
-}
-
static void um_idle_sleep(void)
{
unsigned long long duration = UM_NSEC_PER_SEC;
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 40d90dddf3f1..0a12d5a09217 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -10,7 +10,7 @@
#include <sysdep/ptrace.h>
#include <sysdep/ptrace_user.h>
#include <sysdep/syscalls.h>
-#include <shared/timer-internal.h>
+#include <linux/time-internal.h>
void handle_syscall(struct uml_pt_regs *r)
{
@@ -24,7 +24,8 @@ void handle_syscall(struct uml_pt_regs *r)
* went to sleep, even if said userspace interacts with the kernel in
* various ways.
*/
- if (time_travel_mode == TT_MODE_INFCPU)
+ if (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL)
schedule();
/* Initialize the syscall number and default return value. */
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 94ea87bd231c..25eaa6a0c658 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
* Copyright (C) 2012-2014 Cisco Systems
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Copyright (C) 2019 Intel Corporation
*/
#include <linux/clockchips.h>
@@ -18,21 +19,484 @@
#include <asm/param.h>
#include <kern_util.h>
#include <os.h>
-#include <timer-internal.h>
+#include <linux/time-internal.h>
+#include <linux/um_timetravel.h>
#include <shared/init.h>
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
enum time_travel_mode time_travel_mode;
-unsigned long long time_travel_time;
-enum time_travel_timer_mode time_travel_timer_mode;
-unsigned long long time_travel_timer_expiry;
-unsigned long long time_travel_timer_interval;
+EXPORT_SYMBOL_GPL(time_travel_mode);
static bool time_travel_start_set;
static unsigned long long time_travel_start;
-#else
+static unsigned long long time_travel_time;
+static LIST_HEAD(time_travel_events);
+static unsigned long long time_travel_timer_interval;
+static unsigned long long time_travel_next_event;
+static struct time_travel_event time_travel_timer_event;
+static int time_travel_ext_fd = -1;
+static unsigned int time_travel_ext_waiting;
+static bool time_travel_ext_prev_request_valid;
+static unsigned long long time_travel_ext_prev_request;
+static bool time_travel_ext_free_until_valid;
+static unsigned long long time_travel_ext_free_until;
+
+static void time_travel_set_time(unsigned long long ns)
+{
+ if (unlikely(ns < time_travel_time))
+ panic("time-travel: time goes backwards %lld -> %lld\n",
+ time_travel_time, ns);
+ time_travel_time = ns;
+}
+
+enum time_travel_message_handling {
+ TTMH_IDLE,
+ TTMH_POLL,
+ TTMH_READ,
+};
+
+static void time_travel_handle_message(struct um_timetravel_msg *msg,
+ enum time_travel_message_handling mode)
+{
+ struct um_timetravel_msg resp = {
+ .op = UM_TIMETRAVEL_ACK,
+ };
+ int ret;
+
+ /*
+ * Poll outside the locked section (if we're not called to only read
+ * the response) so we can get interrupts for e.g. virtio while we're
+ * here, but then we need to lock to not get interrupted between the
+ * read of the message and write of the ACK.
+ */
+ if (mode != TTMH_READ) {
+ while (os_poll(1, &time_travel_ext_fd) != 0) {
+ if (mode == TTMH_IDLE) {
+ BUG_ON(!irqs_disabled());
+ local_irq_enable();
+ local_irq_disable();
+ }
+ }
+ }
+
+ ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
+
+ if (ret == 0)
+ panic("time-travel external link is broken\n");
+ if (ret != sizeof(*msg))
+ panic("invalid time-travel message - %d bytes\n", ret);
+
+ switch (msg->op) {
+ default:
+ WARN_ONCE(1, "time-travel: unexpected message %lld\n",
+ (unsigned long long)msg->op);
+ break;
+ case UM_TIMETRAVEL_ACK:
+ return;
+ case UM_TIMETRAVEL_RUN:
+ time_travel_set_time(msg->time);
+ break;
+ case UM_TIMETRAVEL_FREE_UNTIL:
+ time_travel_ext_free_until_valid = true;
+ time_travel_ext_free_until = msg->time;
+ break;
+ }
+
+ os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
+}
+
+static u64 time_travel_ext_req(u32 op, u64 time)
+{
+ static int seq;
+ int mseq = ++seq;
+ struct um_timetravel_msg msg = {
+ .op = op,
+ .time = time,
+ .seq = mseq,
+ };
+ unsigned long flags;
+
+ /*
+ * We need to save interrupts here and only restore when we
+ * got the ACK - otherwise we can get interrupted and send
+ * another request while we're still waiting for an ACK, but
+ * the peer doesn't know we got interrupted and will send
+ * the ACKs in the same order as the message, but we'd need
+ * to see them in the opposite order ...
+ *
+ * This wouldn't matter *too* much, but some ACKs carry the
+ * current time (for UM_TIMETRAVEL_GET) and getting another
+ * ACK without a time would confuse us a lot!
+ *
+ * The sequence number assignment that happens here lets us
+ * debug such message handling issues more easily.
+ */
+ local_irq_save(flags);
+ os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
+
+ while (msg.op != UM_TIMETRAVEL_ACK)
+ time_travel_handle_message(&msg, TTMH_READ);
+
+ if (msg.seq != mseq)
+ panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
+ msg.op, msg.seq, mseq, msg.time);
+
+ if (op == UM_TIMETRAVEL_GET)
+ time_travel_set_time(msg.time);
+ local_irq_restore(flags);
+
+ return msg.time;
+}
+
+void __time_travel_wait_readable(int fd)
+{
+ int fds[2] = { fd, time_travel_ext_fd };
+ int ret;
+
+ if (time_travel_mode != TT_MODE_EXTERNAL)
+ return;
+
+ while ((ret = os_poll(2, fds))) {
+ struct um_timetravel_msg msg;
+
+ if (ret == 1)
+ time_travel_handle_message(&msg, TTMH_READ);
+ }
+}
+EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
+
+static void time_travel_ext_update_request(unsigned long long time)
+{
+ if (time_travel_mode != TT_MODE_EXTERNAL)
+ return;
+
+ /* asked for exactly this time previously */
+ if (time_travel_ext_prev_request_valid &&
+ time == time_travel_ext_prev_request)
+ return;
+
+ time_travel_ext_prev_request = time;
+ time_travel_ext_prev_request_valid = true;
+ time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
+}
+
+void __time_travel_propagate_time(void)
+{
+ time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
+}
+EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
+
+/* returns true if we must do a wait to the simtime device */
+static bool time_travel_ext_request(unsigned long long time)
+{
+ /*
+ * If we received an external sync point ("free until") then we
+ * don't have to request/wait for anything until then, unless
+ * we're already waiting.
+ */
+ if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
+ time < time_travel_ext_free_until)
+ return false;
+
+ time_travel_ext_update_request(time);
+ return true;
+}
+
+static void time_travel_ext_wait(bool idle)
+{
+ struct um_timetravel_msg msg = {
+ .op = UM_TIMETRAVEL_ACK,
+ };
+
+ time_travel_ext_prev_request_valid = false;
+ time_travel_ext_waiting++;
+
+ time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
+
+ /*
+ * Here we are deep in the idle loop, so we have to break out of the
+ * kernel abstraction in a sense and implement this in terms of the
+ * UML system waiting on the VQ interrupt while sleeping, when we get
+ * the signal it'll call time_travel_ext_vq_notify_done() completing the
+ * call.
+ */
+ while (msg.op != UM_TIMETRAVEL_RUN)
+ time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
+
+ time_travel_ext_waiting--;
+
+ /* we might request more stuff while polling - reset when we run */
+ time_travel_ext_prev_request_valid = false;
+}
+
+static void time_travel_ext_get_time(void)
+{
+ time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
+}
+
+static void __time_travel_update_time(unsigned long long ns, bool idle)
+{
+ if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
+ time_travel_ext_wait(idle);
+ else
+ time_travel_set_time(ns);
+}
+
+static struct time_travel_event *time_travel_first_event(void)
+{
+ return list_first_entry_or_null(&time_travel_events,
+ struct time_travel_event,
+ list);
+}
+
+static void __time_travel_add_event(struct time_travel_event *e,
+ unsigned long long time)
+{
+ struct time_travel_event *tmp;
+ bool inserted = false;
+
+ if (WARN(time_travel_mode == TT_MODE_BASIC &&
+ e != &time_travel_timer_event,
+ "only timer events can be handled in basic mode"))
+ return;
+
+ if (e->pending)
+ return;
+
+ e->pending = true;
+ e->time = time;
+
+ list_for_each_entry(tmp, &time_travel_events, list) {
+ /*
+ * Add the new entry before one with higher time,
+ * or if they're equal and both on stack, because
+ * in that case we need to unwind the stack in the
+ * right order, and the later event (timer sleep
+ * or such) must be dequeued first.
+ */
+ if ((tmp->time > e->time) ||
+ (tmp->time == e->time && tmp->onstack && e->onstack)) {
+ list_add_tail(&e->list, &tmp->list);
+ inserted = true;
+ break;
+ }
+ }
+
+ if (!inserted)
+ list_add_tail(&e->list, &time_travel_events);
+
+ tmp = time_travel_first_event();
+ time_travel_ext_update_request(tmp->time);
+ time_travel_next_event = tmp->time;
+}
+
+static void time_travel_add_event(struct time_travel_event *e,
+ unsigned long long time)
+{
+ if (WARN_ON(!e->fn))
+ return;
+
+ __time_travel_add_event(e, time);
+}
+
+void time_travel_periodic_timer(struct time_travel_event *e)
+{
+ time_travel_add_event(&time_travel_timer_event,
+ time_travel_time + time_travel_timer_interval);
+ deliver_alarm();
+}
+
+static void time_travel_deliver_event(struct time_travel_event *e)
+{
+ if (e == &time_travel_timer_event) {
+ /*
+ * deliver_alarm() does the irq_enter/irq_exit
+ * by itself, so must handle it specially here
+ */
+ e->fn(e);
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ irq_enter();
+ e->fn(e);
+ irq_exit();
+ local_irq_restore(flags);
+ }
+}
+
+static bool time_travel_del_event(struct time_travel_event *e)
+{
+ if (!e->pending)
+ return false;
+ list_del(&e->list);
+ e->pending = false;
+ return true;
+}
+
+static void time_travel_update_time(unsigned long long next, bool idle)
+{
+ struct time_travel_event ne = {
+ .onstack = true,
+ };
+ struct time_travel_event *e;
+ bool finished = idle;
+
+ /* add it without a handler - we deal with that specifically below */
+ __time_travel_add_event(&ne, next);
+
+ do {
+ e = time_travel_first_event();
+
+ BUG_ON(!e);
+ __time_travel_update_time(e->time, idle);
+
+ /* new events may have been inserted while we were waiting */
+ if (e == time_travel_first_event()) {
+ BUG_ON(!time_travel_del_event(e));
+ BUG_ON(time_travel_time != e->time);
+
+ if (e == &ne) {
+ finished = true;
+ } else {
+ if (e->onstack)
+ panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
+ time_travel_time, e->time, e);
+ time_travel_deliver_event(e);
+ }
+ }
+
+ e = time_travel_first_event();
+ if (e)
+ time_travel_ext_update_request(e->time);
+ } while (ne.pending && !finished);
+
+ time_travel_del_event(&ne);
+}
+
+void time_travel_ndelay(unsigned long nsec)
+{
+ time_travel_update_time(time_travel_time + nsec, false);
+}
+EXPORT_SYMBOL(time_travel_ndelay);
+
+void time_travel_add_irq_event(struct time_travel_event *e)
+{
+ BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
+
+ time_travel_ext_get_time();
+ /*
+ * We could model interrupt latency here, for now just
+ * don't have any latency at all and request the exact
+ * same time (again) to run the interrupt...
+ */
+ time_travel_add_event(e, time_travel_time);
+}
+EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
+
+static void time_travel_oneshot_timer(struct time_travel_event *e)
+{
+ deliver_alarm();
+}
+
+void time_travel_sleep(unsigned long long duration)
+{
+ unsigned long long next = time_travel_time + duration;
+
+ if (time_travel_mode == TT_MODE_BASIC)
+ os_timer_disable();
+
+ time_travel_update_time(next, true);
+
+ if (time_travel_mode == TT_MODE_BASIC &&
+ time_travel_timer_event.pending) {
+ if (time_travel_timer_event.fn == time_travel_periodic_timer) {
+ /*
+ * This is somewhat wrong - we should get the first
+ * one sooner like the os_timer_one_shot() below...
+ */
+ os_timer_set_interval(time_travel_timer_interval);
+ } else {
+ os_timer_one_shot(time_travel_timer_event.time - next);
+ }
+ }
+}
+
+static void time_travel_handle_real_alarm(void)
+{
+ time_travel_set_time(time_travel_next_event);
+
+ time_travel_del_event(&time_travel_timer_event);
+
+ if (time_travel_timer_event.fn == time_travel_periodic_timer)
+ time_travel_add_event(&time_travel_timer_event,
+ time_travel_time +
+ time_travel_timer_interval);
+}
+
+static void time_travel_set_interval(unsigned long long interval)
+{
+ time_travel_timer_interval = interval;
+}
+
+static int time_travel_connect_external(const char *socket)
+{
+ const char *sep;
+ unsigned long long id = (unsigned long long)-1;
+ int rc;
+
+ if ((sep = strchr(socket, ':'))) {
+ char buf[25] = {};
+ if (sep - socket > sizeof(buf) - 1)
+ goto invalid_number;
+
+ memcpy(buf, socket, sep - socket);
+ if (kstrtoull(buf, 0, &id)) {
+invalid_number:
+ panic("time-travel: invalid external ID in string '%s'\n",
+ socket);
+ return -EINVAL;
+ }
+
+ socket = sep + 1;
+ }
+
+ rc = os_connect_socket(socket);
+ if (rc < 0) {
+ panic("time-travel: failed to connect to external socket %s\n",
+ socket);
+ return rc;
+ }
+
+ time_travel_ext_fd = rc;
+
+ time_travel_ext_req(UM_TIMETRAVEL_START, id);
+
+ return 1;
+}
+#else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
#define time_travel_start_set 0
#define time_travel_start 0
+#define time_travel_time 0
+
+static inline void time_travel_update_time(unsigned long long ns, bool retearly)
+{
+}
+
+static inline void time_travel_handle_real_alarm(void)
+{
+}
+
+static void time_travel_set_interval(unsigned long long interval)
+{
+}
+
+/* fail link if this actually gets used */
+extern u64 time_travel_ext_req(u32 op, u64 time);
+
+/* these are empty macros so the struct/fn need not exist */
+#define time_travel_add_event(e, time) do { } while (0)
+#define time_travel_del_event(e) do { } while (0)
#endif
void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
@@ -48,7 +512,7 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
* never get any real signals from the OS.
*/
if (time_travel_mode == TT_MODE_BASIC)
- time_travel_set_time(time_travel_timer_expiry);
+ time_travel_handle_real_alarm();
local_irq_save(flags);
do_IRQ(TIMER_IRQ, regs);
@@ -58,9 +522,10 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
static int itimer_shutdown(struct clock_event_device *evt)
{
if (time_travel_mode != TT_MODE_OFF)
- time_travel_set_timer_mode(TT_TMR_DISABLED);
+ time_travel_del_event(&time_travel_timer_event);
- if (time_travel_mode != TT_MODE_INFCPU)
+ if (time_travel_mode != TT_MODE_INFCPU &&
+ time_travel_mode != TT_MODE_EXTERNAL)
os_timer_disable();
return 0;
@@ -71,12 +536,16 @@ static int itimer_set_periodic(struct clock_event_device *evt)
unsigned long long interval = NSEC_PER_SEC / HZ;
if (time_travel_mode != TT_MODE_OFF) {
- time_travel_set_timer_mode(TT_TMR_PERIODIC);
- time_travel_set_timer_expiry(time_travel_time + interval);
- time_travel_set_timer_interval(interval);
+ time_travel_del_event(&time_travel_timer_event);
+ time_travel_set_event_fn(&time_travel_timer_event,
+ time_travel_periodic_timer);
+ time_travel_set_interval(interval);
+ time_travel_add_event(&time_travel_timer_event,
+ time_travel_time + interval);
}
- if (time_travel_mode != TT_MODE_INFCPU)
+ if (time_travel_mode != TT_MODE_INFCPU &&
+ time_travel_mode != TT_MODE_EXTERNAL)
os_timer_set_interval(interval);
return 0;
@@ -88,11 +557,15 @@ static int itimer_next_event(unsigned long delta,
delta += 1;
if (time_travel_mode != TT_MODE_OFF) {
- time_travel_set_timer_mode(TT_TMR_ONESHOT);
- time_travel_set_timer_expiry(time_travel_time + delta);
+ time_travel_del_event(&time_travel_timer_event);
+ time_travel_set_event_fn(&time_travel_timer_event,
+ time_travel_oneshot_timer);
+ time_travel_add_event(&time_travel_timer_event,
+ time_travel_time + delta);
}
- if (time_travel_mode != TT_MODE_INFCPU)
+ if (time_travel_mode != TT_MODE_INFCPU &&
+ time_travel_mode != TT_MODE_EXTERNAL)
return os_timer_one_shot(delta);
return 0;
@@ -143,8 +616,17 @@ static u64 timer_read(struct clocksource *cs)
* stuck in loops that expect time to move more than the
* exact requested sleep amount, e.g. python's socket server,
* see https://bugs.python.org/issue37026.
+ *
+ * However, don't do that when we're in interrupt or such as
+ * then we might recurse into our own processing, and get to
+ * even more waiting, and that's not good - it messes up the
+ * "what do I do next" and onstack event we use to know when
+ * to return from time_travel_update_time().
*/
- time_travel_set_time(time_travel_time + TIMER_MULTIPLIER);
+ if (!irqs_disabled() && !in_interrupt() && !in_softirq())
+ time_travel_update_time(time_travel_time +
+ TIMER_MULTIPLIER,
+ false);
return time_travel_time / TIMER_MULTIPLIER;
}
@@ -188,6 +670,8 @@ void read_persistent_clock64(struct timespec64 *ts)
if (time_travel_start_set)
nsecs = time_travel_start + time_travel_time;
+ else if (time_travel_mode == TT_MODE_EXTERNAL)
+ nsecs = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
else
nsecs = os_persistent_clock_emulation();
@@ -204,7 +688,8 @@ void __init time_init(void)
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
unsigned long calibrate_delay_is_known(void)
{
- if (time_travel_mode == TT_MODE_INFCPU)
+ if (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL)
return 1;
return 0;
}
@@ -218,6 +703,13 @@ int setup_time_travel(char *str)
return 1;
}
+ if (strncmp(str, "=ext:", 5) == 0) {
+ time_travel_mode = TT_MODE_EXTERNAL;
+ timer_clockevent.name = "time-travel-timer-external";
+ timer_clocksource.name = "time-travel-clock-external";
+ return time_travel_connect_external(str + 5);
+ }
+
if (!*str) {
time_travel_mode = TT_MODE_BASIC;
timer_clockevent.name = "time-travel-timer";
@@ -242,7 +734,15 @@ __uml_help(setup_time_travel,
"are no wall clock timers, and any CPU processing happens - as seen from the\n"
"guest - instantly. This can be useful for accurate simulation regardless of\n"
"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
-"easily lead to getting stuck (e.g. if anything in the system busy loops).\n");
+"easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
+"\n"
+"time-travel=ext:[ID:]/path/to/socket\n"
+"This enables time travel mode similar to =inf-cpu, except the system will\n"
+"use the given socket to coordinate with a central scheduler, in order to\n"
+"have more than one system simultaneously be on simulated time. The virtio\n"
+"driver code in UML knows about this so you can also simulate networks and\n"
+"devices using it, assuming the device has the right capabilities.\n"
+"The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
int setup_time_travel_start(char *str)
{
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 818553064f04..8f18cf56b3dd 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -33,7 +33,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
pmd_t *pmd;
pte_t *pte;
int err = -EFAULT;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
*code_out = SEGV_MAPERR;
@@ -97,7 +97,6 @@ good_area:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry;
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 9f21443be2c9..3b6dab3d4501 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -19,10 +19,10 @@ SECTIONS
__binary_start = START;
. = START + SIZEOF_HEADERS;
+ . = ALIGN(PAGE_SIZE);
_text = .;
INIT_TEXT_SECTION(0)
- . = ALIGN(PAGE_SIZE);
.text :
{
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index fbda10535dab..26ecbd64c409 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -5,9 +5,11 @@
#include <stdio.h>
#include <unistd.h>
+#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
+#include <linux/falloc.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/socket.h>
@@ -16,6 +18,7 @@
#include <sys/un.h>
#include <sys/types.h>
#include <sys/eventfd.h>
+#include <poll.h>
#include <os.h>
static void copy_stat(struct uml_stat *dst, const struct stat64 *src)
@@ -664,3 +667,31 @@ int os_sendmsg_fds(int fd, const void *buf, unsigned int len, const int *fds,
return -errno;
return err;
}
+
+int os_poll(unsigned int n, const int *fds)
+{
+ /* currently need 2 FDs at most so avoid dynamic allocation */
+ struct pollfd pollfds[2] = {};
+ unsigned int i;
+ int ret;
+
+ if (n > ARRAY_SIZE(pollfds))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ pollfds[i].fd = fds[i];
+ pollfds[i].events = POLLIN;
+ }
+
+ ret = poll(pollfds, n, -1);
+ if (ret < 0)
+ return -errno;
+
+ /* Return the index of the available FD */
+ for (i = 0; i < n; i++) {
+ if (pollfds[i].revents)
+ return i;
+ }
+
+ return -EIO;
+}
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index 432f8e1f55c2..90f6de224c70 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -14,7 +14,6 @@
#include <kern_util.h>
#include <os.h>
#include <string.h>
-#include <timer-internal.h>
static timer_t event_high_res_timer = 0;
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
index 44def53a11cd..9e16078a4bf8 100644
--- a/arch/um/os-Linux/umid.c
+++ b/arch/um/os-Linux/umid.c
@@ -220,11 +220,12 @@ static void __init create_pid_file(void)
char pid[sizeof("nnnnn\0")], *file;
int fd, n;
- file = malloc(strlen(uml_dir) + UMID_LEN + sizeof("/pid\0"));
+ n = strlen(uml_dir) + UMID_LEN + sizeof("/pid\0");
+ file = malloc(n);
if (!file)
return;
- if (umid_file_name("pid", file, sizeof(file)))
+ if (umid_file_name("pid", file, n))
goto out;
fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644);
diff --git a/arch/unicore32/.gitignore b/arch/unicore32/.gitignore
index 947e99c2a957..e82f3fb57ba0 100644
--- a/arch/unicore32/.gitignore
+++ b/arch/unicore32/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Generated include files
#
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 98aa125a8f06..55026e8240d8 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -1,41 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += atomic.h
-generic-y += bugs.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += fb.h
-generic-y += ftrace.h
-generic-y += futex.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += module.h
generic-y += parport.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += serial.h
-generic-y += shmparam.h
generic-y += syscalls.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += unaligned.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 76342de9cf8c..a9bd08fbe588 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -202,7 +202,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
struct mm_struct *mm;
int sig, code;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
tsk = current;
mm = tsk->mm;
@@ -250,7 +250,7 @@ retry:
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return 0;
if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
@@ -259,9 +259,7 @@ retry:
else
tsk->min_flt++;
if (fault & VM_FAULT_RETRY) {
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore
index 5a82bac5e0bc..677111acbaa3 100644
--- a/arch/x86/.gitignore
+++ b/arch/x86/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
boot/compressed/vmlinux
tools/test_get_len
tools/insn_sanity
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c1e1931f591f..8d078642b4be 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -149,6 +149,7 @@ config X86
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
+ select HAVE_ARCH_USERFAULTFD_WP if USERFAULTFD
select HAVE_ARCH_VMAP_STACK if X86_64
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_ASM_MODVERSIONS
@@ -230,6 +231,7 @@ config X86
select VIRT_TO_BUS
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS
+ imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
config INSTRUCTION_DECODER
def_bool y
diff --git a/arch/x86/boot/.gitignore b/arch/x86/boot/.gitignore
index 09d25dd09307..9cc7f1357b9b 100644
--- a/arch/x86/boot/.gitignore
+++ b/arch/x86/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
bootsect
bzImage
cpustr.h
diff --git a/arch/x86/boot/apm.c b/arch/x86/boot/apm.c
index b72fc10fc1be..bda15f9673d5 100644
--- a/arch/x86/boot/apm.c
+++ b/arch/x86/boot/apm.c
@@ -60,7 +60,7 @@ int query_apm_bios(void)
intcall(0x15, &ireg, &oreg);
if ((oreg.eflags & X86_EFLAGS_CF) || oreg.bx != 0x504d) {
- /* Failure with 32-bit connect, try to disconect and ignore */
+ /* Failure with 32-bit connect, try to disconnect and ignore */
ireg.al = 0x04;
intcall(0x15, &ireg, NULL);
return -1;
diff --git a/arch/x86/boot/compressed/.gitignore b/arch/x86/boot/compressed/.gitignore
index 4a46fab7162e..25805199a506 100644
--- a/arch/x86/boot/compressed/.gitignore
+++ b/arch/x86/boot/compressed/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
relocs
vmlinux.bin.all
vmlinux.relocs
diff --git a/arch/x86/boot/tools/.gitignore b/arch/x86/boot/tools/.gitignore
index 378eac25d311..ae91f4d0d78b 100644
--- a/arch/x86/boot/tools/.gitignore
+++ b/arch/x86/boot/tools/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
build
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 5b602beb0b72..550904591e94 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -136,7 +136,6 @@ CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
@@ -286,7 +285,6 @@ CONFIG_EARLY_PRINTK_DBGP=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_BOOT_PARAMS=y
-CONFIG_OPTIMIZE_INLINING=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index f3d1f36103b1..614961009075 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -134,7 +134,6 @@ CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
@@ -283,7 +282,6 @@ CONFIG_EARLY_PRINTK_DBGP=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_BOOT_PARAMS=y
-CONFIG_OPTIMIZE_INLINING=y
CONFIG_UNWINDER_ORC=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
diff --git a/arch/x86/crypto/.gitignore b/arch/x86/crypto/.gitignore
index 30be0400a439..580c839bb177 100644
--- a/arch/x86/crypto/.gitignore
+++ b/arch/x86/crypto/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
poly1305-x86_64-cryptogams.S
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index eec7d2d24239..8a17621f7d3a 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -1,8 +1,7 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
- * Copyright (c) 2017 Armando Faz <armfazh@ic.unicamp.br>. All Rights Reserved.
- * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- * Copyright (C) 2018 Samuel Neves <sneves@dei.uc.pt>. All Rights Reserved.
+ * Copyright (C) 2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation
*/
#include <crypto/curve25519.h>
@@ -16,2337 +15,1378 @@
#include <asm/cpufeature.h>
#include <asm/processor.h>
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_bmi2);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_adx);
-
-enum { NUM_WORDS_ELTFP25519 = 4 };
-typedef __aligned(32) u64 eltfp25519_1w[NUM_WORDS_ELTFP25519];
-typedef __aligned(32) u64 eltfp25519_1w_buffer[2 * NUM_WORDS_ELTFP25519];
-
-#define mul_eltfp25519_1w_adx(c, a, b) do { \
- mul_256x256_integer_adx(m.buffer, a, b); \
- red_eltfp25519_1w_adx(c, m.buffer); \
-} while (0)
-
-#define mul_eltfp25519_1w_bmi2(c, a, b) do { \
- mul_256x256_integer_bmi2(m.buffer, a, b); \
- red_eltfp25519_1w_bmi2(c, m.buffer); \
-} while (0)
-
-#define sqr_eltfp25519_1w_adx(a) do { \
- sqr_256x256_integer_adx(m.buffer, a); \
- red_eltfp25519_1w_adx(a, m.buffer); \
-} while (0)
-
-#define sqr_eltfp25519_1w_bmi2(a) do { \
- sqr_256x256_integer_bmi2(m.buffer, a); \
- red_eltfp25519_1w_bmi2(a, m.buffer); \
-} while (0)
-
-#define mul_eltfp25519_2w_adx(c, a, b) do { \
- mul2_256x256_integer_adx(m.buffer, a, b); \
- red_eltfp25519_2w_adx(c, m.buffer); \
-} while (0)
-
-#define mul_eltfp25519_2w_bmi2(c, a, b) do { \
- mul2_256x256_integer_bmi2(m.buffer, a, b); \
- red_eltfp25519_2w_bmi2(c, m.buffer); \
-} while (0)
-
-#define sqr_eltfp25519_2w_adx(a) do { \
- sqr2_256x256_integer_adx(m.buffer, a); \
- red_eltfp25519_2w_adx(a, m.buffer); \
-} while (0)
-
-#define sqr_eltfp25519_2w_bmi2(a) do { \
- sqr2_256x256_integer_bmi2(m.buffer, a); \
- red_eltfp25519_2w_bmi2(a, m.buffer); \
-} while (0)
-
-#define sqrn_eltfp25519_1w_adx(a, times) do { \
- int ____counter = (times); \
- while (____counter-- > 0) \
- sqr_eltfp25519_1w_adx(a); \
-} while (0)
-
-#define sqrn_eltfp25519_1w_bmi2(a, times) do { \
- int ____counter = (times); \
- while (____counter-- > 0) \
- sqr_eltfp25519_1w_bmi2(a); \
-} while (0)
-
-#define copy_eltfp25519_1w(C, A) do { \
- (C)[0] = (A)[0]; \
- (C)[1] = (A)[1]; \
- (C)[2] = (A)[2]; \
- (C)[3] = (A)[3]; \
-} while (0)
-
-#define setzero_eltfp25519_1w(C) do { \
- (C)[0] = 0; \
- (C)[1] = 0; \
- (C)[2] = 0; \
- (C)[3] = 0; \
-} while (0)
-
-__aligned(32) static const u64 table_ladder_8k[252 * NUM_WORDS_ELTFP25519] = {
- /* 1 */ 0xfffffffffffffff3UL, 0xffffffffffffffffUL,
- 0xffffffffffffffffUL, 0x5fffffffffffffffUL,
- /* 2 */ 0x6b8220f416aafe96UL, 0x82ebeb2b4f566a34UL,
- 0xd5a9a5b075a5950fUL, 0x5142b2cf4b2488f4UL,
- /* 3 */ 0x6aaebc750069680cUL, 0x89cf7820a0f99c41UL,
- 0x2a58d9183b56d0f4UL, 0x4b5aca80e36011a4UL,
- /* 4 */ 0x329132348c29745dUL, 0xf4a2e616e1642fd7UL,
- 0x1e45bb03ff67bc34UL, 0x306912d0f42a9b4aUL,
- /* 5 */ 0xff886507e6af7154UL, 0x04f50e13dfeec82fUL,
- 0xaa512fe82abab5ceUL, 0x174e251a68d5f222UL,
- /* 6 */ 0xcf96700d82028898UL, 0x1743e3370a2c02c5UL,
- 0x379eec98b4e86eaaUL, 0x0c59888a51e0482eUL,
- /* 7 */ 0xfbcbf1d699b5d189UL, 0xacaef0d58e9fdc84UL,
- 0xc1c20d06231f7614UL, 0x2938218da274f972UL,
- /* 8 */ 0xf6af49beff1d7f18UL, 0xcc541c22387ac9c2UL,
- 0x96fcc9ef4015c56bUL, 0x69c1627c690913a9UL,
- /* 9 */ 0x7a86fd2f4733db0eUL, 0xfdb8c4f29e087de9UL,
- 0x095e4b1a8ea2a229UL, 0x1ad7a7c829b37a79UL,
- /* 10 */ 0x342d89cad17ea0c0UL, 0x67bedda6cced2051UL,
- 0x19ca31bf2bb42f74UL, 0x3df7b4c84980acbbUL,
- /* 11 */ 0xa8c6444dc80ad883UL, 0xb91e440366e3ab85UL,
- 0xc215cda00164f6d8UL, 0x3d867c6ef247e668UL,
- /* 12 */ 0xc7dd582bcc3e658cUL, 0xfd2c4748ee0e5528UL,
- 0xa0fd9b95cc9f4f71UL, 0x7529d871b0675ddfUL,
- /* 13 */ 0xb8f568b42d3cbd78UL, 0x1233011b91f3da82UL,
- 0x2dce6ccd4a7c3b62UL, 0x75e7fc8e9e498603UL,
- /* 14 */ 0x2f4f13f1fcd0b6ecUL, 0xf1a8ca1f29ff7a45UL,
- 0xc249c1a72981e29bUL, 0x6ebe0dbb8c83b56aUL,
- /* 15 */ 0x7114fa8d170bb222UL, 0x65a2dcd5bf93935fUL,
- 0xbdc41f68b59c979aUL, 0x2f0eef79a2ce9289UL,
- /* 16 */ 0x42ecbf0c083c37ceUL, 0x2930bc09ec496322UL,
- 0xf294b0c19cfeac0dUL, 0x3780aa4bedfabb80UL,
- /* 17 */ 0x56c17d3e7cead929UL, 0xe7cb4beb2e5722c5UL,
- 0x0ce931732dbfe15aUL, 0x41b883c7621052f8UL,
- /* 18 */ 0xdbf75ca0c3d25350UL, 0x2936be086eb1e351UL,
- 0xc936e03cb4a9b212UL, 0x1d45bf82322225aaUL,
- /* 19 */ 0xe81ab1036a024cc5UL, 0xe212201c304c9a72UL,
- 0xc5d73fba6832b1fcUL, 0x20ffdb5a4d839581UL,
- /* 20 */ 0xa283d367be5d0fadUL, 0x6c2b25ca8b164475UL,
- 0x9d4935467caaf22eUL, 0x5166408eee85ff49UL,
- /* 21 */ 0x3c67baa2fab4e361UL, 0xb3e433c67ef35cefUL,
- 0x5259729241159b1cUL, 0x6a621892d5b0ab33UL,
- /* 22 */ 0x20b74a387555cdcbUL, 0x532aa10e1208923fUL,
- 0xeaa17b7762281dd1UL, 0x61ab3443f05c44bfUL,
- /* 23 */ 0x257a6c422324def8UL, 0x131c6c1017e3cf7fUL,
- 0x23758739f630a257UL, 0x295a407a01a78580UL,
- /* 24 */ 0xf8c443246d5da8d9UL, 0x19d775450c52fa5dUL,
- 0x2afcfc92731bf83dUL, 0x7d10c8e81b2b4700UL,
- /* 25 */ 0xc8e0271f70baa20bUL, 0x993748867ca63957UL,
- 0x5412efb3cb7ed4bbUL, 0x3196d36173e62975UL,
- /* 26 */ 0xde5bcad141c7dffcUL, 0x47cc8cd2b395c848UL,
- 0xa34cd942e11af3cbUL, 0x0256dbf2d04ecec2UL,
- /* 27 */ 0x875ab7e94b0e667fUL, 0xcad4dd83c0850d10UL,
- 0x47f12e8f4e72c79fUL, 0x5f1a87bb8c85b19bUL,
- /* 28 */ 0x7ae9d0b6437f51b8UL, 0x12c7ce5518879065UL,
- 0x2ade09fe5cf77aeeUL, 0x23a05a2f7d2c5627UL,
- /* 29 */ 0x5908e128f17c169aUL, 0xf77498dd8ad0852dUL,
- 0x74b4c4ceab102f64UL, 0x183abadd10139845UL,
- /* 30 */ 0xb165ba8daa92aaacUL, 0xd5c5ef9599386705UL,
- 0xbe2f8f0cf8fc40d1UL, 0x2701e635ee204514UL,
- /* 31 */ 0x629fa80020156514UL, 0xf223868764a8c1ceUL,
- 0x5b894fff0b3f060eUL, 0x60d9944cf708a3faUL,
- /* 32 */ 0xaeea001a1c7a201fUL, 0xebf16a633ee2ce63UL,
- 0x6f7709594c7a07e1UL, 0x79b958150d0208cbUL,
- /* 33 */ 0x24b55e5301d410e7UL, 0xe3a34edff3fdc84dUL,
- 0xd88768e4904032d8UL, 0x131384427b3aaeecUL,
- /* 34 */ 0x8405e51286234f14UL, 0x14dc4739adb4c529UL,
- 0xb8a2b5b250634ffdUL, 0x2fe2a94ad8a7ff93UL,
- /* 35 */ 0xec5c57efe843faddUL, 0x2843ce40f0bb9918UL,
- 0xa4b561d6cf3d6305UL, 0x743629bde8fb777eUL,
- /* 36 */ 0x343edd46bbaf738fUL, 0xed981828b101a651UL,
- 0xa401760b882c797aUL, 0x1fc223e28dc88730UL,
- /* 37 */ 0x48604e91fc0fba0eUL, 0xb637f78f052c6fa4UL,
- 0x91ccac3d09e9239cUL, 0x23f7eed4437a687cUL,
- /* 38 */ 0x5173b1118d9bd800UL, 0x29d641b63189d4a7UL,
- 0xfdbf177988bbc586UL, 0x2959894fcad81df5UL,
- /* 39 */ 0xaebc8ef3b4bbc899UL, 0x4148995ab26992b9UL,
- 0x24e20b0134f92cfbUL, 0x40d158894a05dee8UL,
- /* 40 */ 0x46b00b1185af76f6UL, 0x26bac77873187a79UL,
- 0x3dc0bf95ab8fff5fUL, 0x2a608bd8945524d7UL,
- /* 41 */ 0x26449588bd446302UL, 0x7c4bc21c0388439cUL,
- 0x8e98a4f383bd11b2UL, 0x26218d7bc9d876b9UL,
- /* 42 */ 0xe3081542997c178aUL, 0x3c2d29a86fb6606fUL,
- 0x5c217736fa279374UL, 0x7dde05734afeb1faUL,
- /* 43 */ 0x3bf10e3906d42babUL, 0xe4f7803e1980649cUL,
- 0xe6053bf89595bf7aUL, 0x394faf38da245530UL,
- /* 44 */ 0x7a8efb58896928f4UL, 0xfbc778e9cc6a113cUL,
- 0x72670ce330af596fUL, 0x48f222a81d3d6cf7UL,
- /* 45 */ 0xf01fce410d72caa7UL, 0x5a20ecc7213b5595UL,
- 0x7bc21165c1fa1483UL, 0x07f89ae31da8a741UL,
- /* 46 */ 0x05d2c2b4c6830ff9UL, 0xd43e330fc6316293UL,
- 0xa5a5590a96d3a904UL, 0x705edb91a65333b6UL,
- /* 47 */ 0x048ee15e0bb9a5f7UL, 0x3240cfca9e0aaf5dUL,
- 0x8f4b71ceedc4a40bUL, 0x621c0da3de544a6dUL,
- /* 48 */ 0x92872836a08c4091UL, 0xce8375b010c91445UL,
- 0x8a72eb524f276394UL, 0x2667fcfa7ec83635UL,
- /* 49 */ 0x7f4c173345e8752aUL, 0x061b47feee7079a5UL,
- 0x25dd9afa9f86ff34UL, 0x3780cef5425dc89cUL,
- /* 50 */ 0x1a46035a513bb4e9UL, 0x3e1ef379ac575adaUL,
- 0xc78c5f1c5fa24b50UL, 0x321a967634fd9f22UL,
- /* 51 */ 0x946707b8826e27faUL, 0x3dca84d64c506fd0UL,
- 0xc189218075e91436UL, 0x6d9284169b3b8484UL,
- /* 52 */ 0x3a67e840383f2ddfUL, 0x33eec9a30c4f9b75UL,
- 0x3ec7c86fa783ef47UL, 0x26ec449fbac9fbc4UL,
- /* 53 */ 0x5c0f38cba09b9e7dUL, 0x81168cc762a3478cUL,
- 0x3e23b0d306fc121cUL, 0x5a238aa0a5efdcddUL,
- /* 54 */ 0x1ba26121c4ea43ffUL, 0x36f8c77f7c8832b5UL,
- 0x88fbea0b0adcf99aUL, 0x5ca9938ec25bebf9UL,
- /* 55 */ 0xd5436a5e51fccda0UL, 0x1dbc4797c2cd893bUL,
- 0x19346a65d3224a08UL, 0x0f5034e49b9af466UL,
- /* 56 */ 0xf23c3967a1e0b96eUL, 0xe58b08fa867a4d88UL,
- 0xfb2fabc6a7341679UL, 0x2a75381eb6026946UL,
- /* 57 */ 0xc80a3be4c19420acUL, 0x66b1f6c681f2b6dcUL,
- 0x7cf7036761e93388UL, 0x25abbbd8a660a4c4UL,
- /* 58 */ 0x91ea12ba14fd5198UL, 0x684950fc4a3cffa9UL,
- 0xf826842130f5ad28UL, 0x3ea988f75301a441UL,
- /* 59 */ 0xc978109a695f8c6fUL, 0x1746eb4a0530c3f3UL,
- 0x444d6d77b4459995UL, 0x75952b8c054e5cc7UL,
- /* 60 */ 0xa3703f7915f4d6aaUL, 0x66c346202f2647d8UL,
- 0xd01469df811d644bUL, 0x77fea47d81a5d71fUL,
- /* 61 */ 0xc5e9529ef57ca381UL, 0x6eeeb4b9ce2f881aUL,
- 0xb6e91a28e8009bd6UL, 0x4b80be3e9afc3fecUL,
- /* 62 */ 0x7e3773c526aed2c5UL, 0x1b4afcb453c9a49dUL,
- 0xa920bdd7baffb24dUL, 0x7c54699f122d400eUL,
- /* 63 */ 0xef46c8e14fa94bc8UL, 0xe0b074ce2952ed5eUL,
- 0xbea450e1dbd885d5UL, 0x61b68649320f712cUL,
- /* 64 */ 0x8a485f7309ccbdd1UL, 0xbd06320d7d4d1a2dUL,
- 0x25232973322dbef4UL, 0x445dc4758c17f770UL,
- /* 65 */ 0xdb0434177cc8933cUL, 0xed6fe82175ea059fUL,
- 0x1efebefdc053db34UL, 0x4adbe867c65daf99UL,
- /* 66 */ 0x3acd71a2a90609dfUL, 0xe5e991856dd04050UL,
- 0x1ec69b688157c23cUL, 0x697427f6885cfe4dUL,
- /* 67 */ 0xd7be7b9b65e1a851UL, 0xa03d28d522c536ddUL,
- 0x28399d658fd2b645UL, 0x49e5b7e17c2641e1UL,
- /* 68 */ 0x6f8c3a98700457a4UL, 0x5078f0a25ebb6778UL,
- 0xd13c3ccbc382960fUL, 0x2e003258a7df84b1UL,
- /* 69 */ 0x8ad1f39be6296a1cUL, 0xc1eeaa652a5fbfb2UL,
- 0x33ee0673fd26f3cbUL, 0x59256173a69d2cccUL,
- /* 70 */ 0x41ea07aa4e18fc41UL, 0xd9fc19527c87a51eUL,
- 0xbdaacb805831ca6fUL, 0x445b652dc916694fUL,
- /* 71 */ 0xce92a3a7f2172315UL, 0x1edc282de11b9964UL,
- 0xa1823aafe04c314aUL, 0x790a2d94437cf586UL,
- /* 72 */ 0x71c447fb93f6e009UL, 0x8922a56722845276UL,
- 0xbf70903b204f5169UL, 0x2f7a89891ba319feUL,
- /* 73 */ 0x02a08eb577e2140cUL, 0xed9a4ed4427bdcf4UL,
- 0x5253ec44e4323cd1UL, 0x3e88363c14e9355bUL,
- /* 74 */ 0xaa66c14277110b8cUL, 0x1ae0391610a23390UL,
- 0x2030bd12c93fc2a2UL, 0x3ee141579555c7abUL,
- /* 75 */ 0x9214de3a6d6e7d41UL, 0x3ccdd88607f17efeUL,
- 0x674f1288f8e11217UL, 0x5682250f329f93d0UL,
- /* 76 */ 0x6cf00b136d2e396eUL, 0x6e4cf86f1014debfUL,
- 0x5930b1b5bfcc4e83UL, 0x047069b48aba16b6UL,
- /* 77 */ 0x0d4ce4ab69b20793UL, 0xb24db91a97d0fb9eUL,
- 0xcdfa50f54e00d01dUL, 0x221b1085368bddb5UL,
- /* 78 */ 0xe7e59468b1e3d8d2UL, 0x53c56563bd122f93UL,
- 0xeee8a903e0663f09UL, 0x61efa662cbbe3d42UL,
- /* 79 */ 0x2cf8ddddde6eab2aUL, 0x9bf80ad51435f231UL,
- 0x5deadacec9f04973UL, 0x29275b5d41d29b27UL,
- /* 80 */ 0xcfde0f0895ebf14fUL, 0xb9aab96b054905a7UL,
- 0xcae80dd9a1c420fdUL, 0x0a63bf2f1673bbc7UL,
- /* 81 */ 0x092f6e11958fbc8cUL, 0x672a81e804822fadUL,
- 0xcac8351560d52517UL, 0x6f3f7722c8f192f8UL,
- /* 82 */ 0xf8ba90ccc2e894b7UL, 0x2c7557a438ff9f0dUL,
- 0x894d1d855ae52359UL, 0x68e122157b743d69UL,
- /* 83 */ 0xd87e5570cfb919f3UL, 0x3f2cdecd95798db9UL,
- 0x2121154710c0a2ceUL, 0x3c66a115246dc5b2UL,
- /* 84 */ 0xcbedc562294ecb72UL, 0xba7143c36a280b16UL,
- 0x9610c2efd4078b67UL, 0x6144735d946a4b1eUL,
- /* 85 */ 0x536f111ed75b3350UL, 0x0211db8c2041d81bUL,
- 0xf93cb1000e10413cUL, 0x149dfd3c039e8876UL,
- /* 86 */ 0xd479dde46b63155bUL, 0xb66e15e93c837976UL,
- 0xdafde43b1f13e038UL, 0x5fafda1a2e4b0b35UL,
- /* 87 */ 0x3600bbdf17197581UL, 0x3972050bbe3cd2c2UL,
- 0x5938906dbdd5be86UL, 0x34fce5e43f9b860fUL,
- /* 88 */ 0x75a8a4cd42d14d02UL, 0x828dabc53441df65UL,
- 0x33dcabedd2e131d3UL, 0x3ebad76fb814d25fUL,
- /* 89 */ 0xd4906f566f70e10fUL, 0x5d12f7aa51690f5aUL,
- 0x45adb16e76cefcf2UL, 0x01f768aead232999UL,
- /* 90 */ 0x2b6cc77b6248febdUL, 0x3cd30628ec3aaffdUL,
- 0xce1c0b80d4ef486aUL, 0x4c3bff2ea6f66c23UL,
- /* 91 */ 0x3f2ec4094aeaeb5fUL, 0x61b19b286e372ca7UL,
- 0x5eefa966de2a701dUL, 0x23b20565de55e3efUL,
- /* 92 */ 0xe301ca5279d58557UL, 0x07b2d4ce27c2874fUL,
- 0xa532cd8a9dcf1d67UL, 0x2a52fee23f2bff56UL,
- /* 93 */ 0x8624efb37cd8663dUL, 0xbbc7ac20ffbd7594UL,
- 0x57b85e9c82d37445UL, 0x7b3052cb86a6ec66UL,
- /* 94 */ 0x3482f0ad2525e91eUL, 0x2cb68043d28edca0UL,
- 0xaf4f6d052e1b003aUL, 0x185f8c2529781b0aUL,
- /* 95 */ 0xaa41de5bd80ce0d6UL, 0x9407b2416853e9d6UL,
- 0x563ec36e357f4c3aUL, 0x4cc4b8dd0e297bceUL,
- /* 96 */ 0xa2fc1a52ffb8730eUL, 0x1811f16e67058e37UL,
- 0x10f9a366cddf4ee1UL, 0x72f4a0c4a0b9f099UL,
- /* 97 */ 0x8c16c06f663f4ea7UL, 0x693b3af74e970fbaUL,
- 0x2102e7f1d69ec345UL, 0x0ba53cbc968a8089UL,
- /* 98 */ 0xca3d9dc7fea15537UL, 0x4c6824bb51536493UL,
- 0xb9886314844006b1UL, 0x40d2a72ab454cc60UL,
- /* 99 */ 0x5936a1b712570975UL, 0x91b9d648debda657UL,
- 0x3344094bb64330eaUL, 0x006ba10d12ee51d0UL,
- /* 100 */ 0x19228468f5de5d58UL, 0x0eb12f4c38cc05b0UL,
- 0xa1039f9dd5601990UL, 0x4502d4ce4fff0e0bUL,
- /* 101 */ 0xeb2054106837c189UL, 0xd0f6544c6dd3b93cUL,
- 0x40727064c416d74fUL, 0x6e15c6114b502ef0UL,
- /* 102 */ 0x4df2a398cfb1a76bUL, 0x11256c7419f2f6b1UL,
- 0x4a497962066e6043UL, 0x705b3aab41355b44UL,
- /* 103 */ 0x365ef536d797b1d8UL, 0x00076bd622ddf0dbUL,
- 0x3bbf33b0e0575a88UL, 0x3777aa05c8e4ca4dUL,
- /* 104 */ 0x392745c85578db5fUL, 0x6fda4149dbae5ae2UL,
- 0xb1f0b00b8adc9867UL, 0x09963437d36f1da3UL,
- /* 105 */ 0x7e824e90a5dc3853UL, 0xccb5f6641f135cbdUL,
- 0x6736d86c87ce8fccUL, 0x625f3ce26604249fUL,
- /* 106 */ 0xaf8ac8059502f63fUL, 0x0c05e70a2e351469UL,
- 0x35292e9c764b6305UL, 0x1a394360c7e23ac3UL,
- /* 107 */ 0xd5c6d53251183264UL, 0x62065abd43c2b74fUL,
- 0xb5fbf5d03b973f9bUL, 0x13a3da3661206e5eUL,
- /* 108 */ 0xc6bd5837725d94e5UL, 0x18e30912205016c5UL,
- 0x2088ce1570033c68UL, 0x7fba1f495c837987UL,
- /* 109 */ 0x5a8c7423f2f9079dUL, 0x1735157b34023fc5UL,
- 0xe4f9b49ad2fab351UL, 0x6691ff72c878e33cUL,
- /* 110 */ 0x122c2adedc5eff3eUL, 0xf8dd4bf1d8956cf4UL,
- 0xeb86205d9e9e5bdaUL, 0x049b92b9d975c743UL,
- /* 111 */ 0xa5379730b0f6c05aUL, 0x72a0ffacc6f3a553UL,
- 0xb0032c34b20dcd6dUL, 0x470e9dbc88d5164aUL,
- /* 112 */ 0xb19cf10ca237c047UL, 0xb65466711f6c81a2UL,
- 0xb3321bd16dd80b43UL, 0x48c14f600c5fbe8eUL,
- /* 113 */ 0x66451c264aa6c803UL, 0xb66e3904a4fa7da6UL,
- 0xd45f19b0b3128395UL, 0x31602627c3c9bc10UL,
- /* 114 */ 0x3120dc4832e4e10dUL, 0xeb20c46756c717f7UL,
- 0x00f52e3f67280294UL, 0x566d4fc14730c509UL,
- /* 115 */ 0x7e3a5d40fd837206UL, 0xc1e926dc7159547aUL,
- 0x216730fba68d6095UL, 0x22e8c3843f69cea7UL,
- /* 116 */ 0x33d074e8930e4b2bUL, 0xb6e4350e84d15816UL,
- 0x5534c26ad6ba2365UL, 0x7773c12f89f1f3f3UL,
- /* 117 */ 0x8cba404da57962aaUL, 0x5b9897a81999ce56UL,
- 0x508e862f121692fcUL, 0x3a81907fa093c291UL,
- /* 118 */ 0x0dded0ff4725a510UL, 0x10d8cc10673fc503UL,
- 0x5b9d151c9f1f4e89UL, 0x32a5c1d5cb09a44cUL,
- /* 119 */ 0x1e0aa442b90541fbUL, 0x5f85eb7cc1b485dbUL,
- 0xbee595ce8a9df2e5UL, 0x25e496c722422236UL,
- /* 120 */ 0x5edf3c46cd0fe5b9UL, 0x34e75a7ed2a43388UL,
- 0xe488de11d761e352UL, 0x0e878a01a085545cUL,
- /* 121 */ 0xba493c77e021bb04UL, 0x2b4d1843c7df899aUL,
- 0x9ea37a487ae80d67UL, 0x67a9958011e41794UL,
- /* 122 */ 0x4b58051a6697b065UL, 0x47e33f7d8d6ba6d4UL,
- 0xbb4da8d483ca46c1UL, 0x68becaa181c2db0dUL,
- /* 123 */ 0x8d8980e90b989aa5UL, 0xf95eb14a2c93c99bUL,
- 0x51c6c7c4796e73a2UL, 0x6e228363b5efb569UL,
- /* 124 */ 0xc6bbc0b02dd624c8UL, 0x777eb47dec8170eeUL,
- 0x3cde15a004cfafa9UL, 0x1dc6bc087160bf9bUL,
- /* 125 */ 0x2e07e043eec34002UL, 0x18e9fc677a68dc7fUL,
- 0xd8da03188bd15b9aUL, 0x48fbc3bb00568253UL,
- /* 126 */ 0x57547d4cfb654ce1UL, 0xd3565b82a058e2adUL,
- 0xf63eaf0bbf154478UL, 0x47531ef114dfbb18UL,
- /* 127 */ 0xe1ec630a4278c587UL, 0x5507d546ca8e83f3UL,
- 0x85e135c63adc0c2bUL, 0x0aa7efa85682844eUL,
- /* 128 */ 0x72691ba8b3e1f615UL, 0x32b4e9701fbe3ffaUL,
- 0x97b6d92e39bb7868UL, 0x2cfe53dea02e39e8UL,
- /* 129 */ 0x687392cd85cd52b0UL, 0x27ff66c910e29831UL,
- 0x97134556a9832d06UL, 0x269bb0360a84f8a0UL,
- /* 130 */ 0x706e55457643f85cUL, 0x3734a48c9b597d1bUL,
- 0x7aee91e8c6efa472UL, 0x5cd6abc198a9d9e0UL,
- /* 131 */ 0x0e04de06cb3ce41aUL, 0xd8c6eb893402e138UL,
- 0x904659bb686e3772UL, 0x7215c371746ba8c8UL,
- /* 132 */ 0xfd12a97eeae4a2d9UL, 0x9514b7516394f2c5UL,
- 0x266fd5809208f294UL, 0x5c847085619a26b9UL,
- /* 133 */ 0x52985410fed694eaUL, 0x3c905b934a2ed254UL,
- 0x10bb47692d3be467UL, 0x063b3d2d69e5e9e1UL,
- /* 134 */ 0x472726eedda57debUL, 0xefb6c4ae10f41891UL,
- 0x2b1641917b307614UL, 0x117c554fc4f45b7cUL,
- /* 135 */ 0xc07cf3118f9d8812UL, 0x01dbd82050017939UL,
- 0xd7e803f4171b2827UL, 0x1015e87487d225eaUL,
- /* 136 */ 0xc58de3fed23acc4dUL, 0x50db91c294a7be2dUL,
- 0x0b94d43d1c9cf457UL, 0x6b1640fa6e37524aUL,
- /* 137 */ 0x692f346c5fda0d09UL, 0x200b1c59fa4d3151UL,
- 0xb8c46f760777a296UL, 0x4b38395f3ffdfbcfUL,
- /* 138 */ 0x18d25e00be54d671UL, 0x60d50582bec8aba6UL,
- 0x87ad8f263b78b982UL, 0x50fdf64e9cda0432UL,
- /* 139 */ 0x90f567aac578dcf0UL, 0xef1e9b0ef2a3133bUL,
- 0x0eebba9242d9de71UL, 0x15473c9bf03101c7UL,
- /* 140 */ 0x7c77e8ae56b78095UL, 0xb678e7666e6f078eUL,
- 0x2da0b9615348ba1fUL, 0x7cf931c1ff733f0bUL,
- /* 141 */ 0x26b357f50a0a366cUL, 0xe9708cf42b87d732UL,
- 0xc13aeea5f91cb2c0UL, 0x35d90c991143bb4cUL,
- /* 142 */ 0x47c1c404a9a0d9dcUL, 0x659e58451972d251UL,
- 0x3875a8c473b38c31UL, 0x1fbd9ed379561f24UL,
- /* 143 */ 0x11fabc6fd41ec28dUL, 0x7ef8dfe3cd2a2dcaUL,
- 0x72e73b5d8c404595UL, 0x6135fa4954b72f27UL,
- /* 144 */ 0xccfc32a2de24b69cUL, 0x3f55698c1f095d88UL,
- 0xbe3350ed5ac3f929UL, 0x5e9bf806ca477eebUL,
- /* 145 */ 0xe9ce8fb63c309f68UL, 0x5376f63565e1f9f4UL,
- 0xd1afcfb35a6393f1UL, 0x6632a1ede5623506UL,
- /* 146 */ 0x0b7d6c390c2ded4cUL, 0x56cb3281df04cb1fUL,
- 0x66305a1249ecc3c7UL, 0x5d588b60a38ca72aUL,
- /* 147 */ 0xa6ecbf78e8e5f42dUL, 0x86eeb44b3c8a3eecUL,
- 0xec219c48fbd21604UL, 0x1aaf1af517c36731UL,
- /* 148 */ 0xc306a2836769bde7UL, 0x208280622b1e2adbUL,
- 0x8027f51ffbff94a6UL, 0x76cfa1ce1124f26bUL,
- /* 149 */ 0x18eb00562422abb6UL, 0xf377c4d58f8c29c3UL,
- 0x4dbbc207f531561aUL, 0x0253b7f082128a27UL,
- /* 150 */ 0x3d1f091cb62c17e0UL, 0x4860e1abd64628a9UL,
- 0x52d17436309d4253UL, 0x356f97e13efae576UL,
- /* 151 */ 0xd351e11aa150535bUL, 0x3e6b45bb1dd878ccUL,
- 0x0c776128bed92c98UL, 0x1d34ae93032885b8UL,
- /* 152 */ 0x4ba0488ca85ba4c3UL, 0x985348c33c9ce6ceUL,
- 0x66124c6f97bda770UL, 0x0f81a0290654124aUL,
- /* 153 */ 0x9ed09ca6569b86fdUL, 0x811009fd18af9a2dUL,
- 0xff08d03f93d8c20aUL, 0x52a148199faef26bUL,
- /* 154 */ 0x3e03f9dc2d8d1b73UL, 0x4205801873961a70UL,
- 0xc0d987f041a35970UL, 0x07aa1f15a1c0d549UL,
- /* 155 */ 0xdfd46ce08cd27224UL, 0x6d0a024f934e4239UL,
- 0x808a7a6399897b59UL, 0x0a4556e9e13d95a2UL,
- /* 156 */ 0xd21a991fe9c13045UL, 0x9b0e8548fe7751b8UL,
- 0x5da643cb4bf30035UL, 0x77db28d63940f721UL,
- /* 157 */ 0xfc5eeb614adc9011UL, 0x5229419ae8c411ebUL,
- 0x9ec3e7787d1dcf74UL, 0x340d053e216e4cb5UL,
- /* 158 */ 0xcac7af39b48df2b4UL, 0xc0faec2871a10a94UL,
- 0x140a69245ca575edUL, 0x0cf1c37134273a4cUL,
- /* 159 */ 0xc8ee306ac224b8a5UL, 0x57eaee7ccb4930b0UL,
- 0xa1e806bdaacbe74fUL, 0x7d9a62742eeb657dUL,
- /* 160 */ 0x9eb6b6ef546c4830UL, 0x885cca1fddb36e2eUL,
- 0xe6b9f383ef0d7105UL, 0x58654fef9d2e0412UL,
- /* 161 */ 0xa905c4ffbe0e8e26UL, 0x942de5df9b31816eUL,
- 0x497d723f802e88e1UL, 0x30684dea602f408dUL,
- /* 162 */ 0x21e5a278a3e6cb34UL, 0xaefb6e6f5b151dc4UL,
- 0xb30b8e049d77ca15UL, 0x28c3c9cf53b98981UL,
- /* 163 */ 0x287fb721556cdd2aUL, 0x0d317ca897022274UL,
- 0x7468c7423a543258UL, 0x4a7f11464eb5642fUL,
- /* 164 */ 0xa237a4774d193aa6UL, 0xd865986ea92129a1UL,
- 0x24c515ecf87c1a88UL, 0x604003575f39f5ebUL,
- /* 165 */ 0x47b9f189570a9b27UL, 0x2b98cede465e4b78UL,
- 0x026df551dbb85c20UL, 0x74fcd91047e21901UL,
- /* 166 */ 0x13e2a90a23c1bfa3UL, 0x0cb0074e478519f6UL,
- 0x5ff1cbbe3af6cf44UL, 0x67fe5438be812dbeUL,
- /* 167 */ 0xd13cf64fa40f05b0UL, 0x054dfb2f32283787UL,
- 0x4173915b7f0d2aeaUL, 0x482f144f1f610d4eUL,
- /* 168 */ 0xf6210201b47f8234UL, 0x5d0ae1929e70b990UL,
- 0xdcd7f455b049567cUL, 0x7e93d0f1f0916f01UL,
- /* 169 */ 0xdd79cbf18a7db4faUL, 0xbe8391bf6f74c62fUL,
- 0x027145d14b8291bdUL, 0x585a73ea2cbf1705UL,
- /* 170 */ 0x485ca03e928a0db2UL, 0x10fc01a5742857e7UL,
- 0x2f482edbd6d551a7UL, 0x0f0433b5048fdb8aUL,
- /* 171 */ 0x60da2e8dd7dc6247UL, 0x88b4c9d38cd4819aUL,
- 0x13033ac001f66697UL, 0x273b24fe3b367d75UL,
- /* 172 */ 0xc6e8f66a31b3b9d4UL, 0x281514a494df49d5UL,
- 0xd1726fdfc8b23da7UL, 0x4b3ae7d103dee548UL,
- /* 173 */ 0xc6256e19ce4b9d7eUL, 0xff5c5cf186e3c61cUL,
- 0xacc63ca34b8ec145UL, 0x74621888fee66574UL,
- /* 174 */ 0x956f409645290a1eUL, 0xef0bf8e3263a962eUL,
- 0xed6a50eb5ec2647bUL, 0x0694283a9dca7502UL,
- /* 175 */ 0x769b963643a2dcd1UL, 0x42b7c8ea09fc5353UL,
- 0x4f002aee13397eabUL, 0x63005e2c19b7d63aUL,
- /* 176 */ 0xca6736da63023beaUL, 0x966c7f6db12a99b7UL,
- 0xace09390c537c5e1UL, 0x0b696063a1aa89eeUL,
- /* 177 */ 0xebb03e97288c56e5UL, 0x432a9f9f938c8be8UL,
- 0xa6a5a93d5b717f71UL, 0x1a5fb4c3e18f9d97UL,
- /* 178 */ 0x1c94e7ad1c60cdceUL, 0xee202a43fc02c4a0UL,
- 0x8dafe4d867c46a20UL, 0x0a10263c8ac27b58UL,
- /* 179 */ 0xd0dea9dfe4432a4aUL, 0x856af87bbe9277c5UL,
- 0xce8472acc212c71aUL, 0x6f151b6d9bbb1e91UL,
- /* 180 */ 0x26776c527ceed56aUL, 0x7d211cb7fbf8faecUL,
- 0x37ae66a6fd4609ccUL, 0x1f81b702d2770c42UL,
- /* 181 */ 0x2fb0b057eac58392UL, 0xe1dd89fe29744e9dUL,
- 0xc964f8eb17beb4f8UL, 0x29571073c9a2d41eUL,
- /* 182 */ 0xa948a18981c0e254UL, 0x2df6369b65b22830UL,
- 0xa33eb2d75fcfd3c6UL, 0x078cd6ec4199a01fUL,
- /* 183 */ 0x4a584a41ad900d2fUL, 0x32142b78e2c74c52UL,
- 0x68c4e8338431c978UL, 0x7f69ea9008689fc2UL,
- /* 184 */ 0x52f2c81e46a38265UL, 0xfd78072d04a832fdUL,
- 0x8cd7d5fa25359e94UL, 0x4de71b7454cc29d2UL,
- /* 185 */ 0x42eb60ad1eda6ac9UL, 0x0aad37dfdbc09c3aUL,
- 0x81004b71e33cc191UL, 0x44e6be345122803cUL,
- /* 186 */ 0x03fe8388ba1920dbUL, 0xf5d57c32150db008UL,
- 0x49c8c4281af60c29UL, 0x21edb518de701aeeUL,
- /* 187 */ 0x7fb63e418f06dc99UL, 0xa4460d99c166d7b8UL,
- 0x24dd5248ce520a83UL, 0x5ec3ad712b928358UL,
- /* 188 */ 0x15022a5fbd17930fUL, 0xa4f64a77d82570e3UL,
- 0x12bc8d6915783712UL, 0x498194c0fc620abbUL,
- /* 189 */ 0x38a2d9d255686c82UL, 0x785c6bd9193e21f0UL,
- 0xe4d5c81ab24a5484UL, 0x56307860b2e20989UL,
- /* 190 */ 0x429d55f78b4d74c4UL, 0x22f1834643350131UL,
- 0x1e60c24598c71fffUL, 0x59f2f014979983efUL,
- /* 191 */ 0x46a47d56eb494a44UL, 0x3e22a854d636a18eUL,
- 0xb346e15274491c3bUL, 0x2ceafd4e5390cde7UL,
- /* 192 */ 0xba8a8538be0d6675UL, 0x4b9074bb50818e23UL,
- 0xcbdab89085d304c3UL, 0x61a24fe0e56192c4UL,
- /* 193 */ 0xcb7615e6db525bcbUL, 0xdd7d8c35a567e4caUL,
- 0xe6b4153acafcdd69UL, 0x2d668e097f3c9766UL,
- /* 194 */ 0xa57e7e265ce55ef0UL, 0x5d9f4e527cd4b967UL,
- 0xfbc83606492fd1e5UL, 0x090d52beb7c3f7aeUL,
- /* 195 */ 0x09b9515a1e7b4d7cUL, 0x1f266a2599da44c0UL,
- 0xa1c49548e2c55504UL, 0x7ef04287126f15ccUL,
- /* 196 */ 0xfed1659dbd30ef15UL, 0x8b4ab9eec4e0277bUL,
- 0x884d6236a5df3291UL, 0x1fd96ea6bf5cf788UL,
- /* 197 */ 0x42a161981f190d9aUL, 0x61d849507e6052c1UL,
- 0x9fe113bf285a2cd5UL, 0x7c22d676dbad85d8UL,
- /* 198 */ 0x82e770ed2bfbd27dUL, 0x4c05b2ece996f5a5UL,
- 0xcd40a9c2b0900150UL, 0x5895319213d9bf64UL,
- /* 199 */ 0xe7cc5d703fea2e08UL, 0xb50c491258e2188cUL,
- 0xcce30baa48205bf0UL, 0x537c659ccfa32d62UL,
- /* 200 */ 0x37b6623a98cfc088UL, 0xfe9bed1fa4d6aca4UL,
- 0x04d29b8e56a8d1b0UL, 0x725f71c40b519575UL,
- /* 201 */ 0x28c7f89cd0339ce6UL, 0x8367b14469ddc18bUL,
- 0x883ada83a6a1652cUL, 0x585f1974034d6c17UL,
- /* 202 */ 0x89cfb266f1b19188UL, 0xe63b4863e7c35217UL,
- 0xd88c9da6b4c0526aUL, 0x3e035c9df0954635UL,
- /* 203 */ 0xdd9d5412fb45de9dUL, 0xdd684532e4cff40dUL,
- 0x4b5c999b151d671cUL, 0x2d8c2cc811e7f690UL,
- /* 204 */ 0x7f54be1d90055d40UL, 0xa464c5df464aaf40UL,
- 0x33979624f0e917beUL, 0x2c018dc527356b30UL,
- /* 205 */ 0xa5415024e330b3d4UL, 0x73ff3d96691652d3UL,
- 0x94ec42c4ef9b59f1UL, 0x0747201618d08e5aUL,
- /* 206 */ 0x4d6ca48aca411c53UL, 0x66415f2fcfa66119UL,
- 0x9c4dd40051e227ffUL, 0x59810bc09a02f7ebUL,
- /* 207 */ 0x2a7eb171b3dc101dUL, 0x441c5ab99ffef68eUL,
- 0x32025c9b93b359eaUL, 0x5e8ce0a71e9d112fUL,
- /* 208 */ 0xbfcccb92429503fdUL, 0xd271ba752f095d55UL,
- 0x345ead5e972d091eUL, 0x18c8df11a83103baUL,
- /* 209 */ 0x90cd949a9aed0f4cUL, 0xc5d1f4cb6660e37eUL,
- 0xb8cac52d56c52e0bUL, 0x6e42e400c5808e0dUL,
- /* 210 */ 0xa3b46966eeaefd23UL, 0x0c4f1f0be39ecdcaUL,
- 0x189dc8c9d683a51dUL, 0x51f27f054c09351bUL,
- /* 211 */ 0x4c487ccd2a320682UL, 0x587ea95bb3df1c96UL,
- 0xc8ccf79e555cb8e8UL, 0x547dc829a206d73dUL,
- /* 212 */ 0xb822a6cd80c39b06UL, 0xe96d54732000d4c6UL,
- 0x28535b6f91463b4dUL, 0x228f4660e2486e1dUL,
- /* 213 */ 0x98799538de8d3abfUL, 0x8cd8330045ebca6eUL,
- 0x79952a008221e738UL, 0x4322e1a7535cd2bbUL,
- /* 214 */ 0xb114c11819d1801cUL, 0x2016e4d84f3f5ec7UL,
- 0xdd0e2df409260f4cUL, 0x5ec362c0ae5f7266UL,
- /* 215 */ 0xc0462b18b8b2b4eeUL, 0x7cc8d950274d1afbUL,
- 0xf25f7105436b02d2UL, 0x43bbf8dcbff9ccd3UL,
- /* 216 */ 0xb6ad1767a039e9dfUL, 0xb0714da8f69d3583UL,
- 0x5e55fa18b42931f5UL, 0x4ed5558f33c60961UL,
- /* 217 */ 0x1fe37901c647a5ddUL, 0x593ddf1f8081d357UL,
- 0x0249a4fd813fd7a6UL, 0x69acca274e9caf61UL,
- /* 218 */ 0x047ba3ea330721c9UL, 0x83423fc20e7e1ea0UL,
- 0x1df4c0af01314a60UL, 0x09a62dab89289527UL,
- /* 219 */ 0xa5b325a49cc6cb00UL, 0xe94b5dc654b56cb6UL,
- 0x3be28779adc994a0UL, 0x4296e8f8ba3a4aadUL,
- /* 220 */ 0x328689761e451eabUL, 0x2e4d598bff59594aUL,
- 0x49b96853d7a7084aUL, 0x4980a319601420a8UL,
- /* 221 */ 0x9565b9e12f552c42UL, 0x8a5318db7100fe96UL,
- 0x05c90b4d43add0d7UL, 0x538b4cd66a5d4edaUL,
- /* 222 */ 0xf4e94fc3e89f039fUL, 0x592c9af26f618045UL,
- 0x08a36eb5fd4b9550UL, 0x25fffaf6c2ed1419UL,
- /* 223 */ 0x34434459cc79d354UL, 0xeeecbfb4b1d5476bUL,
- 0xddeb34a061615d99UL, 0x5129cecceb64b773UL,
- /* 224 */ 0xee43215894993520UL, 0x772f9c7cf14c0b3bUL,
- 0xd2e2fce306bedad5UL, 0x715f42b546f06a97UL,
- /* 225 */ 0x434ecdceda5b5f1aUL, 0x0da17115a49741a9UL,
- 0x680bd77c73edad2eUL, 0x487c02354edd9041UL,
- /* 226 */ 0xb8efeff3a70ed9c4UL, 0x56a32aa3e857e302UL,
- 0xdf3a68bd48a2a5a0UL, 0x07f650b73176c444UL,
- /* 227 */ 0xe38b9b1626e0ccb1UL, 0x79e053c18b09fb36UL,
- 0x56d90319c9f94964UL, 0x1ca941e7ac9ff5c4UL,
- /* 228 */ 0x49c4df29162fa0bbUL, 0x8488cf3282b33305UL,
- 0x95dfda14cabb437dUL, 0x3391f78264d5ad86UL,
- /* 229 */ 0x729ae06ae2b5095dUL, 0xd58a58d73259a946UL,
- 0xe9834262d13921edUL, 0x27fedafaa54bb592UL,
- /* 230 */ 0xa99dc5b829ad48bbUL, 0x5f025742499ee260UL,
- 0x802c8ecd5d7513fdUL, 0x78ceb3ef3f6dd938UL,
- /* 231 */ 0xc342f44f8a135d94UL, 0x7b9edb44828cdda3UL,
- 0x9436d11a0537cfe7UL, 0x5064b164ec1ab4c8UL,
- /* 232 */ 0x7020eccfd37eb2fcUL, 0x1f31ea3ed90d25fcUL,
- 0x1b930d7bdfa1bb34UL, 0x5344467a48113044UL,
- /* 233 */ 0x70073170f25e6dfbUL, 0xe385dc1a50114cc8UL,
- 0x2348698ac8fc4f00UL, 0x2a77a55284dd40d8UL,
- /* 234 */ 0xfe06afe0c98c6ce4UL, 0xc235df96dddfd6e4UL,
- 0x1428d01e33bf1ed3UL, 0x785768ec9300bdafUL,
- /* 235 */ 0x9702e57a91deb63bUL, 0x61bdb8bfe5ce8b80UL,
- 0x645b426f3d1d58acUL, 0x4804a82227a557bcUL,
- /* 236 */ 0x8e57048ab44d2601UL, 0x68d6501a4b3a6935UL,
- 0xc39c9ec3f9e1c293UL, 0x4172f257d4de63e2UL,
- /* 237 */ 0xd368b450330c6401UL, 0x040d3017418f2391UL,
- 0x2c34bb6090b7d90dUL, 0x16f649228fdfd51fUL,
- /* 238 */ 0xbea6818e2b928ef5UL, 0xe28ccf91cdc11e72UL,
- 0x594aaa68e77a36cdUL, 0x313034806c7ffd0fUL,
- /* 239 */ 0x8a9d27ac2249bd65UL, 0x19a3b464018e9512UL,
- 0xc26ccff352b37ec7UL, 0x056f68341d797b21UL,
- /* 240 */ 0x5e79d6757efd2327UL, 0xfabdbcb6553afe15UL,
- 0xd3e7222c6eaf5a60UL, 0x7046c76d4dae743bUL,
- /* 241 */ 0x660be872b18d4a55UL, 0x19992518574e1496UL,
- 0xc103053a302bdcbbUL, 0x3ed8e9800b218e8eUL,
- /* 242 */ 0x7b0b9239fa75e03eUL, 0xefe9fb684633c083UL,
- 0x98a35fbe391a7793UL, 0x6065510fe2d0fe34UL,
- /* 243 */ 0x55cb668548abad0cUL, 0xb4584548da87e527UL,
- 0x2c43ecea0107c1ddUL, 0x526028809372de35UL,
- /* 244 */ 0x3415c56af9213b1fUL, 0x5bee1a4d017e98dbUL,
- 0x13f6b105b5cf709bUL, 0x5ff20e3482b29ab6UL,
- /* 245 */ 0x0aa29c75cc2e6c90UL, 0xfc7d73ca3a70e206UL,
- 0x899fc38fc4b5c515UL, 0x250386b124ffc207UL,
- /* 246 */ 0x54ea28d5ae3d2b56UL, 0x9913149dd6de60ceUL,
- 0x16694fc58f06d6c1UL, 0x46b23975eb018fc7UL,
- /* 247 */ 0x470a6a0fb4b7b4e2UL, 0x5d92475a8f7253deUL,
- 0xabeee5b52fbd3adbUL, 0x7fa20801a0806968UL,
- /* 248 */ 0x76f3faf19f7714d2UL, 0xb3e840c12f4660c3UL,
- 0x0fb4cd8df212744eUL, 0x4b065a251d3a2dd2UL,
- /* 249 */ 0x5cebde383d77cd4aUL, 0x6adf39df882c9cb1UL,
- 0xa2dd242eb09af759UL, 0x3147c0e50e5f6422UL,
- /* 250 */ 0x164ca5101d1350dbUL, 0xf8d13479c33fc962UL,
- 0xe640ce4d13e5da08UL, 0x4bdee0c45061f8baUL,
- /* 251 */ 0xd7c46dc1a4edb1c9UL, 0x5514d7b6437fd98aUL,
- 0x58942f6bb2a1c00bUL, 0x2dffb2ab1d70710eUL,
- /* 252 */ 0xccdfcf2fc18b6d68UL, 0xa8ebcba8b7806167UL,
- 0x980697f95e2937e3UL, 0x02fbba1cd0126e8cUL
-};
-
-/* c is two 512-bit products: c0[0:7]=a0[0:3]*b0[0:3] and c1[8:15]=a1[4:7]*b1[4:7]
- * a is two 256-bit integers: a0[0:3] and a1[4:7]
- * b is two 256-bit integers: b0[0:3] and b1[4:7]
- */
-static void mul2_256x256_integer_adx(u64 *const c, const u64 *const a,
- const u64 *const b)
-{
- asm volatile(
- "xorl %%r14d, %%r14d ;"
- "movq (%1), %%rdx; " /* A[0] */
- "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */
- "xorl %%r10d, %%r10d ;"
- "movq %%r8, (%0) ;"
- "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */
- "adox %%r10, %%r15 ;"
- "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */
- "adox %%r8, %%rax ;"
- "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */
- "adox %%r10, %%rbx ;"
- /******************************************/
- "adox %%r14, %%rcx ;"
-
- "movq 8(%1), %%rdx; " /* A[1] */
- "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */
- "adox %%r15, %%r8 ;"
- "movq %%r8, 8(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */
- "adox %%r10, %%r9 ;"
- "adcx %%r9, %%rax ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */
- "adox %%r8, %%r11 ;"
- "adcx %%r11, %%rbx ;"
- "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */
- "adox %%r10, %%r13 ;"
- "adcx %%r13, %%rcx ;"
- /******************************************/
- "adox %%r14, %%r15 ;"
- "adcx %%r14, %%r15 ;"
-
- "movq 16(%1), %%rdx; " /* A[2] */
- "xorl %%r10d, %%r10d ;"
- "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */
- "adox %%rax, %%r8 ;"
- "movq %%r8, 16(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */
- "adox %%r10, %%r9 ;"
- "adcx %%r9, %%rbx ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */
- "adox %%r8, %%r11 ;"
- "adcx %%r11, %%rcx ;"
- "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */
- "adox %%r10, %%r13 ;"
- "adcx %%r13, %%r15 ;"
- /******************************************/
- "adox %%r14, %%rax ;"
- "adcx %%r14, %%rax ;"
-
- "movq 24(%1), %%rdx; " /* A[3] */
- "xorl %%r10d, %%r10d ;"
- "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */
- "adox %%rbx, %%r8 ;"
- "movq %%r8, 24(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */
- "adox %%r10, %%r9 ;"
- "adcx %%r9, %%rcx ;"
- "movq %%rcx, 32(%0) ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */
- "adox %%r8, %%r11 ;"
- "adcx %%r11, %%r15 ;"
- "movq %%r15, 40(%0) ;"
- "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */
- "adox %%r10, %%r13 ;"
- "adcx %%r13, %%rax ;"
- "movq %%rax, 48(%0) ;"
- /******************************************/
- "adox %%r14, %%rbx ;"
- "adcx %%r14, %%rbx ;"
- "movq %%rbx, 56(%0) ;"
-
- "movq 32(%1), %%rdx; " /* C[0] */
- "mulx 32(%2), %%r8, %%r15; " /* C[0]*D[0] */
- "xorl %%r10d, %%r10d ;"
- "movq %%r8, 64(%0);"
- "mulx 40(%2), %%r10, %%rax; " /* C[0]*D[1] */
- "adox %%r10, %%r15 ;"
- "mulx 48(%2), %%r8, %%rbx; " /* C[0]*D[2] */
- "adox %%r8, %%rax ;"
- "mulx 56(%2), %%r10, %%rcx; " /* C[0]*D[3] */
- "adox %%r10, %%rbx ;"
- /******************************************/
- "adox %%r14, %%rcx ;"
-
- "movq 40(%1), %%rdx; " /* C[1] */
- "xorl %%r10d, %%r10d ;"
- "mulx 32(%2), %%r8, %%r9; " /* C[1]*D[0] */
- "adox %%r15, %%r8 ;"
- "movq %%r8, 72(%0);"
- "mulx 40(%2), %%r10, %%r11; " /* C[1]*D[1] */
- "adox %%r10, %%r9 ;"
- "adcx %%r9, %%rax ;"
- "mulx 48(%2), %%r8, %%r13; " /* C[1]*D[2] */
- "adox %%r8, %%r11 ;"
- "adcx %%r11, %%rbx ;"
- "mulx 56(%2), %%r10, %%r15; " /* C[1]*D[3] */
- "adox %%r10, %%r13 ;"
- "adcx %%r13, %%rcx ;"
- /******************************************/
- "adox %%r14, %%r15 ;"
- "adcx %%r14, %%r15 ;"
-
- "movq 48(%1), %%rdx; " /* C[2] */
- "xorl %%r10d, %%r10d ;"
- "mulx 32(%2), %%r8, %%r9; " /* C[2]*D[0] */
- "adox %%rax, %%r8 ;"
- "movq %%r8, 80(%0);"
- "mulx 40(%2), %%r10, %%r11; " /* C[2]*D[1] */
- "adox %%r10, %%r9 ;"
- "adcx %%r9, %%rbx ;"
- "mulx 48(%2), %%r8, %%r13; " /* C[2]*D[2] */
- "adox %%r8, %%r11 ;"
- "adcx %%r11, %%rcx ;"
- "mulx 56(%2), %%r10, %%rax; " /* C[2]*D[3] */
- "adox %%r10, %%r13 ;"
- "adcx %%r13, %%r15 ;"
- /******************************************/
- "adox %%r14, %%rax ;"
- "adcx %%r14, %%rax ;"
-
- "movq 56(%1), %%rdx; " /* C[3] */
- "xorl %%r10d, %%r10d ;"
- "mulx 32(%2), %%r8, %%r9; " /* C[3]*D[0] */
- "adox %%rbx, %%r8 ;"
- "movq %%r8, 88(%0);"
- "mulx 40(%2), %%r10, %%r11; " /* C[3]*D[1] */
- "adox %%r10, %%r9 ;"
- "adcx %%r9, %%rcx ;"
- "movq %%rcx, 96(%0) ;"
- "mulx 48(%2), %%r8, %%r13; " /* C[3]*D[2] */
- "adox %%r8, %%r11 ;"
- "adcx %%r11, %%r15 ;"
- "movq %%r15, 104(%0) ;"
- "mulx 56(%2), %%r10, %%rbx; " /* C[3]*D[3] */
- "adox %%r10, %%r13 ;"
- "adcx %%r13, %%rax ;"
- "movq %%rax, 112(%0) ;"
- /******************************************/
- "adox %%r14, %%rbx ;"
- "adcx %%r14, %%rbx ;"
- "movq %%rbx, 120(%0) ;"
- :
- : "r"(c), "r"(a), "r"(b)
- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
- "%r10", "%r11", "%r13", "%r14", "%r15");
-}
-
-static void mul2_256x256_integer_bmi2(u64 *const c, const u64 *const a,
- const u64 *const b)
+static __always_inline u64 eq_mask(u64 a, u64 b)
{
- asm volatile(
- "movq (%1), %%rdx; " /* A[0] */
- "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */
- "movq %%r8, (%0) ;"
- "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */
- "addq %%r10, %%r15 ;"
- "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */
- "adcq %%r8, %%rax ;"
- "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */
- "adcq %%r10, %%rbx ;"
- /******************************************/
- "adcq $0, %%rcx ;"
-
- "movq 8(%1), %%rdx; " /* A[1] */
- "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */
- "addq %%r15, %%r8 ;"
- "movq %%r8, 8(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%r15 ;"
-
- "addq %%r9, %%rax ;"
- "adcq %%r11, %%rbx ;"
- "adcq %%r13, %%rcx ;"
- "adcq $0, %%r15 ;"
-
- "movq 16(%1), %%rdx; " /* A[2] */
- "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */
- "addq %%rax, %%r8 ;"
- "movq %%r8, 16(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%rax ;"
-
- "addq %%r9, %%rbx ;"
- "adcq %%r11, %%rcx ;"
- "adcq %%r13, %%r15 ;"
- "adcq $0, %%rax ;"
-
- "movq 24(%1), %%rdx; " /* A[3] */
- "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */
- "addq %%rbx, %%r8 ;"
- "movq %%r8, 24(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%rbx ;"
-
- "addq %%r9, %%rcx ;"
- "movq %%rcx, 32(%0) ;"
- "adcq %%r11, %%r15 ;"
- "movq %%r15, 40(%0) ;"
- "adcq %%r13, %%rax ;"
- "movq %%rax, 48(%0) ;"
- "adcq $0, %%rbx ;"
- "movq %%rbx, 56(%0) ;"
-
- "movq 32(%1), %%rdx; " /* C[0] */
- "mulx 32(%2), %%r8, %%r15; " /* C[0]*D[0] */
- "movq %%r8, 64(%0) ;"
- "mulx 40(%2), %%r10, %%rax; " /* C[0]*D[1] */
- "addq %%r10, %%r15 ;"
- "mulx 48(%2), %%r8, %%rbx; " /* C[0]*D[2] */
- "adcq %%r8, %%rax ;"
- "mulx 56(%2), %%r10, %%rcx; " /* C[0]*D[3] */
- "adcq %%r10, %%rbx ;"
- /******************************************/
- "adcq $0, %%rcx ;"
-
- "movq 40(%1), %%rdx; " /* C[1] */
- "mulx 32(%2), %%r8, %%r9; " /* C[1]*D[0] */
- "addq %%r15, %%r8 ;"
- "movq %%r8, 72(%0) ;"
- "mulx 40(%2), %%r10, %%r11; " /* C[1]*D[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 48(%2), %%r8, %%r13; " /* C[1]*D[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 56(%2), %%r10, %%r15; " /* C[1]*D[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%r15 ;"
-
- "addq %%r9, %%rax ;"
- "adcq %%r11, %%rbx ;"
- "adcq %%r13, %%rcx ;"
- "adcq $0, %%r15 ;"
-
- "movq 48(%1), %%rdx; " /* C[2] */
- "mulx 32(%2), %%r8, %%r9; " /* C[2]*D[0] */
- "addq %%rax, %%r8 ;"
- "movq %%r8, 80(%0) ;"
- "mulx 40(%2), %%r10, %%r11; " /* C[2]*D[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 48(%2), %%r8, %%r13; " /* C[2]*D[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 56(%2), %%r10, %%rax; " /* C[2]*D[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%rax ;"
-
- "addq %%r9, %%rbx ;"
- "adcq %%r11, %%rcx ;"
- "adcq %%r13, %%r15 ;"
- "adcq $0, %%rax ;"
-
- "movq 56(%1), %%rdx; " /* C[3] */
- "mulx 32(%2), %%r8, %%r9; " /* C[3]*D[0] */
- "addq %%rbx, %%r8 ;"
- "movq %%r8, 88(%0) ;"
- "mulx 40(%2), %%r10, %%r11; " /* C[3]*D[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 48(%2), %%r8, %%r13; " /* C[3]*D[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 56(%2), %%r10, %%rbx; " /* C[3]*D[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%rbx ;"
-
- "addq %%r9, %%rcx ;"
- "movq %%rcx, 96(%0) ;"
- "adcq %%r11, %%r15 ;"
- "movq %%r15, 104(%0) ;"
- "adcq %%r13, %%rax ;"
- "movq %%rax, 112(%0) ;"
- "adcq $0, %%rbx ;"
- "movq %%rbx, 120(%0) ;"
- :
- : "r"(c), "r"(a), "r"(b)
- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
- "%r10", "%r11", "%r13", "%r15");
+ u64 x = a ^ b;
+ u64 minus_x = ~x + (u64)1U;
+ u64 x_or_minus_x = x | minus_x;
+ u64 xnx = x_or_minus_x >> (u32)63U;
+ return xnx - (u64)1U;
}
-static void sqr2_256x256_integer_adx(u64 *const c, const u64 *const a)
+static __always_inline u64 gte_mask(u64 a, u64 b)
{
- asm volatile(
- "movq (%1), %%rdx ;" /* A[0] */
- "mulx 8(%1), %%r8, %%r14 ;" /* A[1]*A[0] */
- "xorl %%r15d, %%r15d;"
- "mulx 16(%1), %%r9, %%r10 ;" /* A[2]*A[0] */
- "adcx %%r14, %%r9 ;"
- "mulx 24(%1), %%rax, %%rcx ;" /* A[3]*A[0] */
- "adcx %%rax, %%r10 ;"
- "movq 24(%1), %%rdx ;" /* A[3] */
- "mulx 8(%1), %%r11, %%rbx ;" /* A[1]*A[3] */
- "adcx %%rcx, %%r11 ;"
- "mulx 16(%1), %%rax, %%r13 ;" /* A[2]*A[3] */
- "adcx %%rax, %%rbx ;"
- "movq 8(%1), %%rdx ;" /* A[1] */
- "adcx %%r15, %%r13 ;"
- "mulx 16(%1), %%rax, %%rcx ;" /* A[2]*A[1] */
- "movq $0, %%r14 ;"
- /******************************************/
- "adcx %%r15, %%r14 ;"
-
- "xorl %%r15d, %%r15d;"
- "adox %%rax, %%r10 ;"
- "adcx %%r8, %%r8 ;"
- "adox %%rcx, %%r11 ;"
- "adcx %%r9, %%r9 ;"
- "adox %%r15, %%rbx ;"
- "adcx %%r10, %%r10 ;"
- "adox %%r15, %%r13 ;"
- "adcx %%r11, %%r11 ;"
- "adox %%r15, %%r14 ;"
- "adcx %%rbx, %%rbx ;"
- "adcx %%r13, %%r13 ;"
- "adcx %%r14, %%r14 ;"
-
- "movq (%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */
- /*******************/
- "movq %%rax, 0(%0) ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, 8(%0) ;"
- "movq 8(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */
- "adcq %%rax, %%r9 ;"
- "movq %%r9, 16(%0) ;"
- "adcq %%rcx, %%r10 ;"
- "movq %%r10, 24(%0) ;"
- "movq 16(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */
- "adcq %%rax, %%r11 ;"
- "movq %%r11, 32(%0) ;"
- "adcq %%rcx, %%rbx ;"
- "movq %%rbx, 40(%0) ;"
- "movq 24(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */
- "adcq %%rax, %%r13 ;"
- "movq %%r13, 48(%0) ;"
- "adcq %%rcx, %%r14 ;"
- "movq %%r14, 56(%0) ;"
-
-
- "movq 32(%1), %%rdx ;" /* B[0] */
- "mulx 40(%1), %%r8, %%r14 ;" /* B[1]*B[0] */
- "xorl %%r15d, %%r15d;"
- "mulx 48(%1), %%r9, %%r10 ;" /* B[2]*B[0] */
- "adcx %%r14, %%r9 ;"
- "mulx 56(%1), %%rax, %%rcx ;" /* B[3]*B[0] */
- "adcx %%rax, %%r10 ;"
- "movq 56(%1), %%rdx ;" /* B[3] */
- "mulx 40(%1), %%r11, %%rbx ;" /* B[1]*B[3] */
- "adcx %%rcx, %%r11 ;"
- "mulx 48(%1), %%rax, %%r13 ;" /* B[2]*B[3] */
- "adcx %%rax, %%rbx ;"
- "movq 40(%1), %%rdx ;" /* B[1] */
- "adcx %%r15, %%r13 ;"
- "mulx 48(%1), %%rax, %%rcx ;" /* B[2]*B[1] */
- "movq $0, %%r14 ;"
- /******************************************/
- "adcx %%r15, %%r14 ;"
-
- "xorl %%r15d, %%r15d;"
- "adox %%rax, %%r10 ;"
- "adcx %%r8, %%r8 ;"
- "adox %%rcx, %%r11 ;"
- "adcx %%r9, %%r9 ;"
- "adox %%r15, %%rbx ;"
- "adcx %%r10, %%r10 ;"
- "adox %%r15, %%r13 ;"
- "adcx %%r11, %%r11 ;"
- "adox %%r15, %%r14 ;"
- "adcx %%rbx, %%rbx ;"
- "adcx %%r13, %%r13 ;"
- "adcx %%r14, %%r14 ;"
-
- "movq 32(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* B[0]^2 */
- /*******************/
- "movq %%rax, 64(%0) ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, 72(%0) ;"
- "movq 40(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* B[1]^2 */
- "adcq %%rax, %%r9 ;"
- "movq %%r9, 80(%0) ;"
- "adcq %%rcx, %%r10 ;"
- "movq %%r10, 88(%0) ;"
- "movq 48(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* B[2]^2 */
- "adcq %%rax, %%r11 ;"
- "movq %%r11, 96(%0) ;"
- "adcq %%rcx, %%rbx ;"
- "movq %%rbx, 104(%0) ;"
- "movq 56(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* B[3]^2 */
- "adcq %%rax, %%r13 ;"
- "movq %%r13, 112(%0) ;"
- "adcq %%rcx, %%r14 ;"
- "movq %%r14, 120(%0) ;"
- :
- : "r"(c), "r"(a)
- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
- "%r10", "%r11", "%r13", "%r14", "%r15");
+ u64 x = a;
+ u64 y = b;
+ u64 x_xor_y = x ^ y;
+ u64 x_sub_y = x - y;
+ u64 x_sub_y_xor_y = x_sub_y ^ y;
+ u64 q = x_xor_y | x_sub_y_xor_y;
+ u64 x_xor_q = x ^ q;
+ u64 x_xor_q_ = x_xor_q >> (u32)63U;
+ return x_xor_q_ - (u64)1U;
}
-static void sqr2_256x256_integer_bmi2(u64 *const c, const u64 *const a)
+/* Computes the addition of four-element f1 with value in f2
+ * and returns the carry (if any) */
+static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2)
{
- asm volatile(
- "movq 8(%1), %%rdx ;" /* A[1] */
- "mulx (%1), %%r8, %%r9 ;" /* A[0]*A[1] */
- "mulx 16(%1), %%r10, %%r11 ;" /* A[2]*A[1] */
- "mulx 24(%1), %%rcx, %%r14 ;" /* A[3]*A[1] */
-
- "movq 16(%1), %%rdx ;" /* A[2] */
- "mulx 24(%1), %%r15, %%r13 ;" /* A[3]*A[2] */
- "mulx (%1), %%rax, %%rdx ;" /* A[0]*A[2] */
-
- "addq %%rax, %%r9 ;"
- "adcq %%rdx, %%r10 ;"
- "adcq %%rcx, %%r11 ;"
- "adcq %%r14, %%r15 ;"
- "adcq $0, %%r13 ;"
- "movq $0, %%r14 ;"
- "adcq $0, %%r14 ;"
-
- "movq (%1), %%rdx ;" /* A[0] */
- "mulx 24(%1), %%rax, %%rcx ;" /* A[0]*A[3] */
-
- "addq %%rax, %%r10 ;"
- "adcq %%rcx, %%r11 ;"
- "adcq $0, %%r15 ;"
- "adcq $0, %%r13 ;"
- "adcq $0, %%r14 ;"
-
- "shldq $1, %%r13, %%r14 ;"
- "shldq $1, %%r15, %%r13 ;"
- "shldq $1, %%r11, %%r15 ;"
- "shldq $1, %%r10, %%r11 ;"
- "shldq $1, %%r9, %%r10 ;"
- "shldq $1, %%r8, %%r9 ;"
- "shlq $1, %%r8 ;"
-
- /*******************/
- "mulx %%rdx, %%rax, %%rcx ; " /* A[0]^2 */
- /*******************/
- "movq %%rax, 0(%0) ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, 8(%0) ;"
- "movq 8(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ; " /* A[1]^2 */
- "adcq %%rax, %%r9 ;"
- "movq %%r9, 16(%0) ;"
- "adcq %%rcx, %%r10 ;"
- "movq %%r10, 24(%0) ;"
- "movq 16(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ; " /* A[2]^2 */
- "adcq %%rax, %%r11 ;"
- "movq %%r11, 32(%0) ;"
- "adcq %%rcx, %%r15 ;"
- "movq %%r15, 40(%0) ;"
- "movq 24(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ; " /* A[3]^2 */
- "adcq %%rax, %%r13 ;"
- "movq %%r13, 48(%0) ;"
- "adcq %%rcx, %%r14 ;"
- "movq %%r14, 56(%0) ;"
-
- "movq 40(%1), %%rdx ;" /* B[1] */
- "mulx 32(%1), %%r8, %%r9 ;" /* B[0]*B[1] */
- "mulx 48(%1), %%r10, %%r11 ;" /* B[2]*B[1] */
- "mulx 56(%1), %%rcx, %%r14 ;" /* B[3]*B[1] */
-
- "movq 48(%1), %%rdx ;" /* B[2] */
- "mulx 56(%1), %%r15, %%r13 ;" /* B[3]*B[2] */
- "mulx 32(%1), %%rax, %%rdx ;" /* B[0]*B[2] */
-
- "addq %%rax, %%r9 ;"
- "adcq %%rdx, %%r10 ;"
- "adcq %%rcx, %%r11 ;"
- "adcq %%r14, %%r15 ;"
- "adcq $0, %%r13 ;"
- "movq $0, %%r14 ;"
- "adcq $0, %%r14 ;"
-
- "movq 32(%1), %%rdx ;" /* B[0] */
- "mulx 56(%1), %%rax, %%rcx ;" /* B[0]*B[3] */
-
- "addq %%rax, %%r10 ;"
- "adcq %%rcx, %%r11 ;"
- "adcq $0, %%r15 ;"
- "adcq $0, %%r13 ;"
- "adcq $0, %%r14 ;"
-
- "shldq $1, %%r13, %%r14 ;"
- "shldq $1, %%r15, %%r13 ;"
- "shldq $1, %%r11, %%r15 ;"
- "shldq $1, %%r10, %%r11 ;"
- "shldq $1, %%r9, %%r10 ;"
- "shldq $1, %%r8, %%r9 ;"
- "shlq $1, %%r8 ;"
-
- /*******************/
- "mulx %%rdx, %%rax, %%rcx ; " /* B[0]^2 */
- /*******************/
- "movq %%rax, 64(%0) ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, 72(%0) ;"
- "movq 40(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ; " /* B[1]^2 */
- "adcq %%rax, %%r9 ;"
- "movq %%r9, 80(%0) ;"
- "adcq %%rcx, %%r10 ;"
- "movq %%r10, 88(%0) ;"
- "movq 48(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ; " /* B[2]^2 */
- "adcq %%rax, %%r11 ;"
- "movq %%r11, 96(%0) ;"
- "adcq %%rcx, %%r15 ;"
- "movq %%r15, 104(%0) ;"
- "movq 56(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ; " /* B[3]^2 */
- "adcq %%rax, %%r13 ;"
- "movq %%r13, 112(%0) ;"
- "adcq %%rcx, %%r14 ;"
- "movq %%r14, 120(%0) ;"
- :
- : "r"(c), "r"(a)
- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
- "%r11", "%r13", "%r14", "%r15");
-}
+ u64 carry_r;
-static void red_eltfp25519_2w_adx(u64 *const c, const u64 *const a)
-{
asm volatile(
- "movl $38, %%edx; " /* 2*c = 38 = 2^256 */
- "mulx 32(%1), %%r8, %%r10; " /* c*C[4] */
- "xorl %%ebx, %%ebx ;"
- "adox (%1), %%r8 ;"
- "mulx 40(%1), %%r9, %%r11; " /* c*C[5] */
- "adcx %%r10, %%r9 ;"
- "adox 8(%1), %%r9 ;"
- "mulx 48(%1), %%r10, %%rax; " /* c*C[6] */
- "adcx %%r11, %%r10 ;"
- "adox 16(%1), %%r10 ;"
- "mulx 56(%1), %%r11, %%rcx; " /* c*C[7] */
- "adcx %%rax, %%r11 ;"
- "adox 24(%1), %%r11 ;"
- /***************************************/
- "adcx %%rbx, %%rcx ;"
- "adox %%rbx, %%rcx ;"
- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */
- "adcx %%rcx, %%r8 ;"
- "adcx %%rbx, %%r9 ;"
- "movq %%r9, 8(%0) ;"
- "adcx %%rbx, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "adcx %%rbx, %%r11 ;"
- "movq %%r11, 24(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%edx, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, (%0) ;"
-
- "mulx 96(%1), %%r8, %%r10; " /* c*C[4] */
- "xorl %%ebx, %%ebx ;"
- "adox 64(%1), %%r8 ;"
- "mulx 104(%1), %%r9, %%r11; " /* c*C[5] */
- "adcx %%r10, %%r9 ;"
- "adox 72(%1), %%r9 ;"
- "mulx 112(%1), %%r10, %%rax; " /* c*C[6] */
- "adcx %%r11, %%r10 ;"
- "adox 80(%1), %%r10 ;"
- "mulx 120(%1), %%r11, %%rcx; " /* c*C[7] */
- "adcx %%rax, %%r11 ;"
- "adox 88(%1), %%r11 ;"
- /****************************************/
- "adcx %%rbx, %%rcx ;"
- "adox %%rbx, %%rcx ;"
- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */
- "adcx %%rcx, %%r8 ;"
- "adcx %%rbx, %%r9 ;"
- "movq %%r9, 40(%0) ;"
- "adcx %%rbx, %%r10 ;"
- "movq %%r10, 48(%0) ;"
- "adcx %%rbx, %%r11 ;"
- "movq %%r11, 56(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%edx, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, 32(%0) ;"
- :
- : "r"(c), "r"(a)
- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
- "%r10", "%r11");
-}
+ /* Clear registers to propagate the carry bit */
+ " xor %%r8, %%r8;"
+ " xor %%r9, %%r9;"
+ " xor %%r10, %%r10;"
+ " xor %%r11, %%r11;"
+ " xor %1, %1;"
+
+ /* Begin addition chain */
+ " addq 0(%3), %0;"
+ " movq %0, 0(%2);"
+ " adcxq 8(%3), %%r8;"
+ " movq %%r8, 8(%2);"
+ " adcxq 16(%3), %%r9;"
+ " movq %%r9, 16(%2);"
+ " adcxq 24(%3), %%r10;"
+ " movq %%r10, 24(%2);"
+
+ /* Return the carry bit in a register */
+ " adcx %%r11, %1;"
+ : "+&r" (f2), "=&r" (carry_r)
+ : "r" (out), "r" (f1)
+ : "%r8", "%r9", "%r10", "%r11", "memory", "cc"
+ );
-static void red_eltfp25519_2w_bmi2(u64 *const c, const u64 *const a)
-{
- asm volatile(
- "movl $38, %%edx ; " /* 2*c = 38 = 2^256 */
- "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */
- "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */
- "addq %%r10, %%r9 ;"
- "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */
- "adcq %%r11, %%r10 ;"
- "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */
- "adcq %%rax, %%r11 ;"
- /***************************************/
- "adcq $0, %%rcx ;"
- "addq (%1), %%r8 ;"
- "adcq 8(%1), %%r9 ;"
- "adcq 16(%1), %%r10 ;"
- "adcq 24(%1), %%r11 ;"
- "adcq $0, %%rcx ;"
- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */
- "addq %%rcx, %%r8 ;"
- "adcq $0, %%r9 ;"
- "movq %%r9, 8(%0) ;"
- "adcq $0, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "adcq $0, %%r11 ;"
- "movq %%r11, 24(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%edx, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, (%0) ;"
-
- "mulx 96(%1), %%r8, %%r10 ;" /* c*C[4] */
- "mulx 104(%1), %%r9, %%r11 ;" /* c*C[5] */
- "addq %%r10, %%r9 ;"
- "mulx 112(%1), %%r10, %%rax ;" /* c*C[6] */
- "adcq %%r11, %%r10 ;"
- "mulx 120(%1), %%r11, %%rcx ;" /* c*C[7] */
- "adcq %%rax, %%r11 ;"
- /****************************************/
- "adcq $0, %%rcx ;"
- "addq 64(%1), %%r8 ;"
- "adcq 72(%1), %%r9 ;"
- "adcq 80(%1), %%r10 ;"
- "adcq 88(%1), %%r11 ;"
- "adcq $0, %%rcx ;"
- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */
- "addq %%rcx, %%r8 ;"
- "adcq $0, %%r9 ;"
- "movq %%r9, 40(%0) ;"
- "adcq $0, %%r10 ;"
- "movq %%r10, 48(%0) ;"
- "adcq $0, %%r11 ;"
- "movq %%r11, 56(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%edx, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, 32(%0) ;"
- :
- : "r"(c), "r"(a)
- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
- "%r11");
+ return carry_r;
}
-static void mul_256x256_integer_adx(u64 *const c, const u64 *const a,
- const u64 *const b)
+/* Computes the field addition of two field elements */
+static inline void fadd(u64 *out, const u64 *f1, const u64 *f2)
{
asm volatile(
- "movq (%1), %%rdx; " /* A[0] */
- "mulx (%2), %%r8, %%r9; " /* A[0]*B[0] */
- "xorl %%r10d, %%r10d ;"
- "movq %%r8, (%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[0]*B[1] */
- "adox %%r9, %%r10 ;"
- "movq %%r10, 8(%0) ;"
- "mulx 16(%2), %%r15, %%r13; " /* A[0]*B[2] */
- "adox %%r11, %%r15 ;"
- "mulx 24(%2), %%r14, %%rdx; " /* A[0]*B[3] */
- "adox %%r13, %%r14 ;"
- "movq $0, %%rax ;"
- /******************************************/
- "adox %%rdx, %%rax ;"
-
- "movq 8(%1), %%rdx; " /* A[1] */
- "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */
- "xorl %%r10d, %%r10d ;"
- "adcx 8(%0), %%r8 ;"
- "movq %%r8, 8(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */
- "adox %%r9, %%r10 ;"
- "adcx %%r15, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "mulx 16(%2), %%r15, %%r13; " /* A[1]*B[2] */
- "adox %%r11, %%r15 ;"
- "adcx %%r14, %%r15 ;"
- "movq $0, %%r8 ;"
- "mulx 24(%2), %%r14, %%rdx; " /* A[1]*B[3] */
- "adox %%r13, %%r14 ;"
- "adcx %%rax, %%r14 ;"
- "movq $0, %%rax ;"
- /******************************************/
- "adox %%rdx, %%rax ;"
- "adcx %%r8, %%rax ;"
-
- "movq 16(%1), %%rdx; " /* A[2] */
- "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */
- "xorl %%r10d, %%r10d ;"
- "adcx 16(%0), %%r8 ;"
- "movq %%r8, 16(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */
- "adox %%r9, %%r10 ;"
- "adcx %%r15, %%r10 ;"
- "movq %%r10, 24(%0) ;"
- "mulx 16(%2), %%r15, %%r13; " /* A[2]*B[2] */
- "adox %%r11, %%r15 ;"
- "adcx %%r14, %%r15 ;"
- "movq $0, %%r8 ;"
- "mulx 24(%2), %%r14, %%rdx; " /* A[2]*B[3] */
- "adox %%r13, %%r14 ;"
- "adcx %%rax, %%r14 ;"
- "movq $0, %%rax ;"
- /******************************************/
- "adox %%rdx, %%rax ;"
- "adcx %%r8, %%rax ;"
-
- "movq 24(%1), %%rdx; " /* A[3] */
- "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */
- "xorl %%r10d, %%r10d ;"
- "adcx 24(%0), %%r8 ;"
- "movq %%r8, 24(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */
- "adox %%r9, %%r10 ;"
- "adcx %%r15, %%r10 ;"
- "movq %%r10, 32(%0) ;"
- "mulx 16(%2), %%r15, %%r13; " /* A[3]*B[2] */
- "adox %%r11, %%r15 ;"
- "adcx %%r14, %%r15 ;"
- "movq %%r15, 40(%0) ;"
- "movq $0, %%r8 ;"
- "mulx 24(%2), %%r14, %%rdx; " /* A[3]*B[3] */
- "adox %%r13, %%r14 ;"
- "adcx %%rax, %%r14 ;"
- "movq %%r14, 48(%0) ;"
- "movq $0, %%rax ;"
- /******************************************/
- "adox %%rdx, %%rax ;"
- "adcx %%r8, %%rax ;"
- "movq %%rax, 56(%0) ;"
- :
- : "r"(c), "r"(a), "r"(b)
- : "memory", "cc", "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11",
- "%r13", "%r14", "%r15");
+ /* Compute the raw addition of f1 + f2 */
+ " movq 0(%0), %%r8;"
+ " addq 0(%2), %%r8;"
+ " movq 8(%0), %%r9;"
+ " adcxq 8(%2), %%r9;"
+ " movq 16(%0), %%r10;"
+ " adcxq 16(%2), %%r10;"
+ " movq 24(%0), %%r11;"
+ " adcxq 24(%2), %%r11;"
+
+ /* Wrap the result back into the field */
+
+ /* Step 1: Compute carry*38 */
+ " mov $0, %%rax;"
+ " mov $38, %0;"
+ " cmovc %0, %%rax;"
+
+ /* Step 2: Add carry*38 to the original sum */
+ " xor %%rcx, %%rcx;"
+ " add %%rax, %%r8;"
+ " adcx %%rcx, %%r9;"
+ " movq %%r9, 8(%1);"
+ " adcx %%rcx, %%r10;"
+ " movq %%r10, 16(%1);"
+ " adcx %%rcx, %%r11;"
+ " movq %%r11, 24(%1);"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %0, %%rax;"
+ " add %%rax, %%r8;"
+ " movq %%r8, 0(%1);"
+ : "+&r" (f2)
+ : "r" (out), "r" (f1)
+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc"
+ );
}
-static void mul_256x256_integer_bmi2(u64 *const c, const u64 *const a,
- const u64 *const b)
+/* Computes the field substraction of two field elements */
+static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
{
asm volatile(
- "movq (%1), %%rdx; " /* A[0] */
- "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */
- "movq %%r8, (%0) ;"
- "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */
- "addq %%r10, %%r15 ;"
- "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */
- "adcq %%r8, %%rax ;"
- "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */
- "adcq %%r10, %%rbx ;"
- /******************************************/
- "adcq $0, %%rcx ;"
-
- "movq 8(%1), %%rdx; " /* A[1] */
- "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */
- "addq %%r15, %%r8 ;"
- "movq %%r8, 8(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%r15 ;"
-
- "addq %%r9, %%rax ;"
- "adcq %%r11, %%rbx ;"
- "adcq %%r13, %%rcx ;"
- "adcq $0, %%r15 ;"
-
- "movq 16(%1), %%rdx; " /* A[2] */
- "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */
- "addq %%rax, %%r8 ;"
- "movq %%r8, 16(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%rax ;"
-
- "addq %%r9, %%rbx ;"
- "adcq %%r11, %%rcx ;"
- "adcq %%r13, %%r15 ;"
- "adcq $0, %%rax ;"
-
- "movq 24(%1), %%rdx; " /* A[3] */
- "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */
- "addq %%rbx, %%r8 ;"
- "movq %%r8, 24(%0) ;"
- "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */
- "adcq %%r10, %%r9 ;"
- "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */
- "adcq %%r8, %%r11 ;"
- "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */
- "adcq %%r10, %%r13 ;"
- /******************************************/
- "adcq $0, %%rbx ;"
-
- "addq %%r9, %%rcx ;"
- "movq %%rcx, 32(%0) ;"
- "adcq %%r11, %%r15 ;"
- "movq %%r15, 40(%0) ;"
- "adcq %%r13, %%rax ;"
- "movq %%rax, 48(%0) ;"
- "adcq $0, %%rbx ;"
- "movq %%rbx, 56(%0) ;"
- :
- : "r"(c), "r"(a), "r"(b)
- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
- "%r10", "%r11", "%r13", "%r15");
+ /* Compute the raw substraction of f1-f2 */
+ " movq 0(%1), %%r8;"
+ " subq 0(%2), %%r8;"
+ " movq 8(%1), %%r9;"
+ " sbbq 8(%2), %%r9;"
+ " movq 16(%1), %%r10;"
+ " sbbq 16(%2), %%r10;"
+ " movq 24(%1), %%r11;"
+ " sbbq 24(%2), %%r11;"
+
+ /* Wrap the result back into the field */
+
+ /* Step 1: Compute carry*38 */
+ " mov $0, %%rax;"
+ " mov $38, %%rcx;"
+ " cmovc %%rcx, %%rax;"
+
+ /* Step 2: Substract carry*38 from the original difference */
+ " sub %%rax, %%r8;"
+ " sbb $0, %%r9;"
+ " sbb $0, %%r10;"
+ " sbb $0, %%r11;"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %%rcx, %%rax;"
+ " sub %%rax, %%r8;"
+
+ /* Store the result */
+ " movq %%r8, 0(%0);"
+ " movq %%r9, 8(%0);"
+ " movq %%r10, 16(%0);"
+ " movq %%r11, 24(%0);"
+ :
+ : "r" (out), "r" (f1), "r" (f2)
+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc"
+ );
}
-static void sqr_256x256_integer_adx(u64 *const c, const u64 *const a)
+/* Computes a field multiplication: out <- f1 * f2
+ * Uses the 8-element buffer tmp for intermediate results */
+static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
{
asm volatile(
- "movq (%1), %%rdx ;" /* A[0] */
- "mulx 8(%1), %%r8, %%r14 ;" /* A[1]*A[0] */
- "xorl %%r15d, %%r15d;"
- "mulx 16(%1), %%r9, %%r10 ;" /* A[2]*A[0] */
- "adcx %%r14, %%r9 ;"
- "mulx 24(%1), %%rax, %%rcx ;" /* A[3]*A[0] */
- "adcx %%rax, %%r10 ;"
- "movq 24(%1), %%rdx ;" /* A[3] */
- "mulx 8(%1), %%r11, %%rbx ;" /* A[1]*A[3] */
- "adcx %%rcx, %%r11 ;"
- "mulx 16(%1), %%rax, %%r13 ;" /* A[2]*A[3] */
- "adcx %%rax, %%rbx ;"
- "movq 8(%1), %%rdx ;" /* A[1] */
- "adcx %%r15, %%r13 ;"
- "mulx 16(%1), %%rax, %%rcx ;" /* A[2]*A[1] */
- "movq $0, %%r14 ;"
- /******************************************/
- "adcx %%r15, %%r14 ;"
-
- "xorl %%r15d, %%r15d;"
- "adox %%rax, %%r10 ;"
- "adcx %%r8, %%r8 ;"
- "adox %%rcx, %%r11 ;"
- "adcx %%r9, %%r9 ;"
- "adox %%r15, %%rbx ;"
- "adcx %%r10, %%r10 ;"
- "adox %%r15, %%r13 ;"
- "adcx %%r11, %%r11 ;"
- "adox %%r15, %%r14 ;"
- "adcx %%rbx, %%rbx ;"
- "adcx %%r13, %%r13 ;"
- "adcx %%r14, %%r14 ;"
-
- "movq (%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */
- /*******************/
- "movq %%rax, 0(%0) ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, 8(%0) ;"
- "movq 8(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */
- "adcq %%rax, %%r9 ;"
- "movq %%r9, 16(%0) ;"
- "adcq %%rcx, %%r10 ;"
- "movq %%r10, 24(%0) ;"
- "movq 16(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */
- "adcq %%rax, %%r11 ;"
- "movq %%r11, 32(%0) ;"
- "adcq %%rcx, %%rbx ;"
- "movq %%rbx, 40(%0) ;"
- "movq 24(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */
- "adcq %%rax, %%r13 ;"
- "movq %%r13, 48(%0) ;"
- "adcq %%rcx, %%r14 ;"
- "movq %%r14, 56(%0) ;"
- :
- : "r"(c), "r"(a)
- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
- "%r10", "%r11", "%r13", "%r14", "%r15");
+ /* Compute the raw multiplication: tmp <- src1 * src2 */
+
+ /* Compute src1[0] * src2 */
+ " movq 0(%1), %%rdx;"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ /* Compute src1[1] * src2 */
+ " movq 8(%1), %%rdx;"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ /* Compute src1[2] * src2 */
+ " movq 16(%1), %%rdx;"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ /* Compute src1[3] * src2 */
+ " movq 24(%1), %%rdx;"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;"
+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);"
+ /* Line up pointers */
+ " mov %0, %1;"
+ " mov %2, %0;"
+
+ /* Wrap the result back into the field */
+
+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
+ " mov $38, %%rdx;"
+ " mulxq 32(%1), %%r8, %%r13;"
+ " xor %3, %3;"
+ " adoxq 0(%1), %%r8;"
+ " mulxq 40(%1), %%r9, %%rbx;"
+ " adcx %%r13, %%r9;"
+ " adoxq 8(%1), %%r9;"
+ " mulxq 48(%1), %%r10, %%r13;"
+ " adcx %%rbx, %%r10;"
+ " adoxq 16(%1), %%r10;"
+ " mulxq 56(%1), %%r11, %%rax;"
+ " adcx %%r13, %%r11;"
+ " adoxq 24(%1), %%r11;"
+ " adcx %3, %%rax;"
+ " adox %3, %%rax;"
+ " imul %%rdx, %%rax;"
+
+ /* Step 2: Fold the carry back into dst */
+ " add %%rax, %%r8;"
+ " adcx %3, %%r9;"
+ " movq %%r9, 8(%0);"
+ " adcx %3, %%r10;"
+ " movq %%r10, 16(%0);"
+ " adcx %3, %%r11;"
+ " movq %%r11, 24(%0);"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %%rdx, %%rax;"
+ " add %%rax, %%r8;"
+ " movq %%r8, 0(%0);"
+ : "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2)
+ :
+ : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "memory", "cc"
+ );
}
-static void sqr_256x256_integer_bmi2(u64 *const c, const u64 *const a)
+/* Computes two field multiplications:
+ * out[0] <- f1[0] * f2[0]
+ * out[1] <- f1[1] * f2[1]
+ * Uses the 16-element buffer tmp for intermediate results. */
+static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp)
{
asm volatile(
- "movq 8(%1), %%rdx ;" /* A[1] */
- "mulx (%1), %%r8, %%r9 ;" /* A[0]*A[1] */
- "mulx 16(%1), %%r10, %%r11 ;" /* A[2]*A[1] */
- "mulx 24(%1), %%rcx, %%r14 ;" /* A[3]*A[1] */
-
- "movq 16(%1), %%rdx ;" /* A[2] */
- "mulx 24(%1), %%r15, %%r13 ;" /* A[3]*A[2] */
- "mulx (%1), %%rax, %%rdx ;" /* A[0]*A[2] */
-
- "addq %%rax, %%r9 ;"
- "adcq %%rdx, %%r10 ;"
- "adcq %%rcx, %%r11 ;"
- "adcq %%r14, %%r15 ;"
- "adcq $0, %%r13 ;"
- "movq $0, %%r14 ;"
- "adcq $0, %%r14 ;"
-
- "movq (%1), %%rdx ;" /* A[0] */
- "mulx 24(%1), %%rax, %%rcx ;" /* A[0]*A[3] */
-
- "addq %%rax, %%r10 ;"
- "adcq %%rcx, %%r11 ;"
- "adcq $0, %%r15 ;"
- "adcq $0, %%r13 ;"
- "adcq $0, %%r14 ;"
-
- "shldq $1, %%r13, %%r14 ;"
- "shldq $1, %%r15, %%r13 ;"
- "shldq $1, %%r11, %%r15 ;"
- "shldq $1, %%r10, %%r11 ;"
- "shldq $1, %%r9, %%r10 ;"
- "shldq $1, %%r8, %%r9 ;"
- "shlq $1, %%r8 ;"
-
- /*******************/
- "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */
- /*******************/
- "movq %%rax, 0(%0) ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, 8(%0) ;"
- "movq 8(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */
- "adcq %%rax, %%r9 ;"
- "movq %%r9, 16(%0) ;"
- "adcq %%rcx, %%r10 ;"
- "movq %%r10, 24(%0) ;"
- "movq 16(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */
- "adcq %%rax, %%r11 ;"
- "movq %%r11, 32(%0) ;"
- "adcq %%rcx, %%r15 ;"
- "movq %%r15, 40(%0) ;"
- "movq 24(%1), %%rdx ;"
- "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */
- "adcq %%rax, %%r13 ;"
- "movq %%r13, 48(%0) ;"
- "adcq %%rcx, %%r14 ;"
- "movq %%r14, 56(%0) ;"
- :
- : "r"(c), "r"(a)
- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
- "%r11", "%r13", "%r14", "%r15");
+ /* Compute the raw multiplication tmp[0] <- f1[0] * f2[0] */
+
+ /* Compute src1[0] * src2 */
+ " movq 0(%1), %%rdx;"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ /* Compute src1[1] * src2 */
+ " movq 8(%1), %%rdx;"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ /* Compute src1[2] * src2 */
+ " movq 16(%1), %%rdx;"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ /* Compute src1[3] * src2 */
+ " movq 24(%1), %%rdx;"
+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);"
+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);"
+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;"
+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);"
+
+ /* Compute the raw multiplication tmp[1] <- f1[1] * f2[1] */
+
+ /* Compute src1[0] * src2 */
+ " movq 32(%1), %%rdx;"
+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 64(%0);"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);"
+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;"
+ " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;"
+ /* Compute src1[1] * src2 */
+ " movq 40(%1), %%rdx;"
+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);"
+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
+ " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ /* Compute src1[2] * src2 */
+ " movq 48(%1), %%rdx;"
+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);"
+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;"
+ " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;"
+ /* Compute src1[3] * src2 */
+ " movq 56(%1), %%rdx;"
+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);"
+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);"
+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 104(%0);" " mov $0, %%r8;"
+ " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%0);" " mov $0, %%rax;"
+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%0);"
+ /* Line up pointers */
+ " mov %0, %1;"
+ " mov %2, %0;"
+
+ /* Wrap the results back into the field */
+
+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
+ " mov $38, %%rdx;"
+ " mulxq 32(%1), %%r8, %%r13;"
+ " xor %3, %3;"
+ " adoxq 0(%1), %%r8;"
+ " mulxq 40(%1), %%r9, %%rbx;"
+ " adcx %%r13, %%r9;"
+ " adoxq 8(%1), %%r9;"
+ " mulxq 48(%1), %%r10, %%r13;"
+ " adcx %%rbx, %%r10;"
+ " adoxq 16(%1), %%r10;"
+ " mulxq 56(%1), %%r11, %%rax;"
+ " adcx %%r13, %%r11;"
+ " adoxq 24(%1), %%r11;"
+ " adcx %3, %%rax;"
+ " adox %3, %%rax;"
+ " imul %%rdx, %%rax;"
+
+ /* Step 2: Fold the carry back into dst */
+ " add %%rax, %%r8;"
+ " adcx %3, %%r9;"
+ " movq %%r9, 8(%0);"
+ " adcx %3, %%r10;"
+ " movq %%r10, 16(%0);"
+ " adcx %3, %%r11;"
+ " movq %%r11, 24(%0);"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %%rdx, %%rax;"
+ " add %%rax, %%r8;"
+ " movq %%r8, 0(%0);"
+
+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
+ " mov $38, %%rdx;"
+ " mulxq 96(%1), %%r8, %%r13;"
+ " xor %3, %3;"
+ " adoxq 64(%1), %%r8;"
+ " mulxq 104(%1), %%r9, %%rbx;"
+ " adcx %%r13, %%r9;"
+ " adoxq 72(%1), %%r9;"
+ " mulxq 112(%1), %%r10, %%r13;"
+ " adcx %%rbx, %%r10;"
+ " adoxq 80(%1), %%r10;"
+ " mulxq 120(%1), %%r11, %%rax;"
+ " adcx %%r13, %%r11;"
+ " adoxq 88(%1), %%r11;"
+ " adcx %3, %%rax;"
+ " adox %3, %%rax;"
+ " imul %%rdx, %%rax;"
+
+ /* Step 2: Fold the carry back into dst */
+ " add %%rax, %%r8;"
+ " adcx %3, %%r9;"
+ " movq %%r9, 40(%0);"
+ " adcx %3, %%r10;"
+ " movq %%r10, 48(%0);"
+ " adcx %3, %%r11;"
+ " movq %%r11, 56(%0);"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %%rdx, %%rax;"
+ " add %%rax, %%r8;"
+ " movq %%r8, 32(%0);"
+ : "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2)
+ :
+ : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "memory", "cc"
+ );
}
-static void red_eltfp25519_1w_adx(u64 *const c, const u64 *const a)
+/* Computes the field multiplication of four-element f1 with value in f2 */
+static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2)
{
- asm volatile(
- "movl $38, %%edx ;" /* 2*c = 38 = 2^256 */
- "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */
- "xorl %%ebx, %%ebx ;"
- "adox (%1), %%r8 ;"
- "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */
- "adcx %%r10, %%r9 ;"
- "adox 8(%1), %%r9 ;"
- "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */
- "adcx %%r11, %%r10 ;"
- "adox 16(%1), %%r10 ;"
- "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */
- "adcx %%rax, %%r11 ;"
- "adox 24(%1), %%r11 ;"
- /***************************************/
- "adcx %%rbx, %%rcx ;"
- "adox %%rbx, %%rcx ;"
- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */
- "adcx %%rcx, %%r8 ;"
- "adcx %%rbx, %%r9 ;"
- "movq %%r9, 8(%0) ;"
- "adcx %%rbx, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "adcx %%rbx, %%r11 ;"
- "movq %%r11, 24(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%edx, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, (%0) ;"
- :
- : "r"(c), "r"(a)
- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9",
- "%r10", "%r11");
-}
+ register u64 f2_r asm("rdx") = f2;
-static void red_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a)
-{
asm volatile(
- "movl $38, %%edx ;" /* 2*c = 38 = 2^256 */
- "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */
- "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */
- "addq %%r10, %%r9 ;"
- "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */
- "adcq %%r11, %%r10 ;"
- "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */
- "adcq %%rax, %%r11 ;"
- /***************************************/
- "adcq $0, %%rcx ;"
- "addq (%1), %%r8 ;"
- "adcq 8(%1), %%r9 ;"
- "adcq 16(%1), %%r10 ;"
- "adcq 24(%1), %%r11 ;"
- "adcq $0, %%rcx ;"
- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */
- "addq %%rcx, %%r8 ;"
- "adcq $0, %%r9 ;"
- "movq %%r9, 8(%0) ;"
- "adcq $0, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "adcq $0, %%r11 ;"
- "movq %%r11, 24(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%edx, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, (%0) ;"
- :
- : "r"(c), "r"(a)
- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
- "%r11");
+ /* Compute the raw multiplication of f1*f2 */
+ " mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */
+ " mulxq 8(%2), %%r9, %%rbx;" /* f1[1]*f2 */
+ " add %%rcx, %%r9;"
+ " mov $0, %%rcx;"
+ " mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */
+ " adcx %%rbx, %%r10;"
+ " mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */
+ " adcx %%r13, %%r11;"
+ " adcx %%rcx, %%rax;"
+
+ /* Wrap the result back into the field */
+
+ /* Step 1: Compute carry*38 */
+ " mov $38, %%rdx;"
+ " imul %%rdx, %%rax;"
+
+ /* Step 2: Fold the carry back into dst */
+ " add %%rax, %%r8;"
+ " adcx %%rcx, %%r9;"
+ " movq %%r9, 8(%1);"
+ " adcx %%rcx, %%r10;"
+ " movq %%r10, 16(%1);"
+ " adcx %%rcx, %%r11;"
+ " movq %%r11, 24(%1);"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %%rdx, %%rax;"
+ " add %%rax, %%r8;"
+ " movq %%r8, 0(%1);"
+ : "+&r" (f2_r)
+ : "r" (out), "r" (f1)
+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "memory", "cc"
+ );
}
-static __always_inline void
-add_eltfp25519_1w_adx(u64 *const c, const u64 *const a, const u64 *const b)
+/* Computes p1 <- bit ? p2 : p1 in constant time */
+static inline void cswap2(u64 bit, const u64 *p1, const u64 *p2)
{
asm volatile(
- "mov $38, %%eax ;"
- "xorl %%ecx, %%ecx ;"
- "movq (%2), %%r8 ;"
- "adcx (%1), %%r8 ;"
- "movq 8(%2), %%r9 ;"
- "adcx 8(%1), %%r9 ;"
- "movq 16(%2), %%r10 ;"
- "adcx 16(%1), %%r10 ;"
- "movq 24(%2), %%r11 ;"
- "adcx 24(%1), %%r11 ;"
- "cmovc %%eax, %%ecx ;"
- "xorl %%eax, %%eax ;"
- "adcx %%rcx, %%r8 ;"
- "adcx %%rax, %%r9 ;"
- "movq %%r9, 8(%0) ;"
- "adcx %%rax, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "adcx %%rax, %%r11 ;"
- "movq %%r11, 24(%0) ;"
- "mov $38, %%ecx ;"
- "cmovc %%ecx, %%eax ;"
- "addq %%rax, %%r8 ;"
- "movq %%r8, (%0) ;"
- :
- : "r"(c), "r"(a), "r"(b)
- : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11");
+ /* Invert the polarity of bit to match cmov expectations */
+ " add $18446744073709551615, %0;"
+
+ /* cswap p1[0], p2[0] */
+ " movq 0(%1), %%r8;"
+ " movq 0(%2), %%r9;"
+ " mov %%r8, %%r10;"
+ " cmovc %%r9, %%r8;"
+ " cmovc %%r10, %%r9;"
+ " movq %%r8, 0(%1);"
+ " movq %%r9, 0(%2);"
+
+ /* cswap p1[1], p2[1] */
+ " movq 8(%1), %%r8;"
+ " movq 8(%2), %%r9;"
+ " mov %%r8, %%r10;"
+ " cmovc %%r9, %%r8;"
+ " cmovc %%r10, %%r9;"
+ " movq %%r8, 8(%1);"
+ " movq %%r9, 8(%2);"
+
+ /* cswap p1[2], p2[2] */
+ " movq 16(%1), %%r8;"
+ " movq 16(%2), %%r9;"
+ " mov %%r8, %%r10;"
+ " cmovc %%r9, %%r8;"
+ " cmovc %%r10, %%r9;"
+ " movq %%r8, 16(%1);"
+ " movq %%r9, 16(%2);"
+
+ /* cswap p1[3], p2[3] */
+ " movq 24(%1), %%r8;"
+ " movq 24(%2), %%r9;"
+ " mov %%r8, %%r10;"
+ " cmovc %%r9, %%r8;"
+ " cmovc %%r10, %%r9;"
+ " movq %%r8, 24(%1);"
+ " movq %%r9, 24(%2);"
+
+ /* cswap p1[4], p2[4] */
+ " movq 32(%1), %%r8;"
+ " movq 32(%2), %%r9;"
+ " mov %%r8, %%r10;"
+ " cmovc %%r9, %%r8;"
+ " cmovc %%r10, %%r9;"
+ " movq %%r8, 32(%1);"
+ " movq %%r9, 32(%2);"
+
+ /* cswap p1[5], p2[5] */
+ " movq 40(%1), %%r8;"
+ " movq 40(%2), %%r9;"
+ " mov %%r8, %%r10;"
+ " cmovc %%r9, %%r8;"
+ " cmovc %%r10, %%r9;"
+ " movq %%r8, 40(%1);"
+ " movq %%r9, 40(%2);"
+
+ /* cswap p1[6], p2[6] */
+ " movq 48(%1), %%r8;"
+ " movq 48(%2), %%r9;"
+ " mov %%r8, %%r10;"
+ " cmovc %%r9, %%r8;"
+ " cmovc %%r10, %%r9;"
+ " movq %%r8, 48(%1);"
+ " movq %%r9, 48(%2);"
+
+ /* cswap p1[7], p2[7] */
+ " movq 56(%1), %%r8;"
+ " movq 56(%2), %%r9;"
+ " mov %%r8, %%r10;"
+ " cmovc %%r9, %%r8;"
+ " cmovc %%r10, %%r9;"
+ " movq %%r8, 56(%1);"
+ " movq %%r9, 56(%2);"
+ : "+&r" (bit)
+ : "r" (p1), "r" (p2)
+ : "%r8", "%r9", "%r10", "memory", "cc"
+ );
}
-static __always_inline void
-add_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a, const u64 *const b)
+/* Computes the square of a field element: out <- f * f
+ * Uses the 8-element buffer tmp for intermediate results */
+static inline void fsqr(u64 *out, const u64 *f, u64 *tmp)
{
asm volatile(
- "mov $38, %%eax ;"
- "movq (%2), %%r8 ;"
- "addq (%1), %%r8 ;"
- "movq 8(%2), %%r9 ;"
- "adcq 8(%1), %%r9 ;"
- "movq 16(%2), %%r10 ;"
- "adcq 16(%1), %%r10 ;"
- "movq 24(%2), %%r11 ;"
- "adcq 24(%1), %%r11 ;"
- "mov $0, %%ecx ;"
- "cmovc %%eax, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "adcq $0, %%r9 ;"
- "movq %%r9, 8(%0) ;"
- "adcq $0, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "adcq $0, %%r11 ;"
- "movq %%r11, 24(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%eax, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, (%0) ;"
- :
- : "r"(c), "r"(a), "r"(b)
- : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11");
+ /* Compute the raw multiplication: tmp <- f * f */
+
+ /* Step 1: Compute all partial products */
+ " movq 0(%1), %%rdx;" /* f[0] */
+ " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
+ " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
+ " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
+ " movq 24(%1), %%rdx;" /* f[3] */
+ " mulxq 8(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
+ " movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
+ " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
+
+ /* Step 2: Compute two parallel carry chains */
+ " xor %%r15, %%r15;"
+ " adox %%rax, %%r10;"
+ " adcx %%r8, %%r8;"
+ " adox %%rcx, %%r11;"
+ " adcx %%r9, %%r9;"
+ " adox %%r15, %%rbx;"
+ " adcx %%r10, %%r10;"
+ " adox %%r15, %%r13;"
+ " adcx %%r11, %%r11;"
+ " adox %%r15, %%r14;"
+ " adcx %%rbx, %%rbx;"
+ " adcx %%r13, %%r13;"
+ " adcx %%r14, %%r14;"
+
+ /* Step 3: Compute intermediate squares */
+ " movq 0(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
+ " movq %%rax, 0(%0);"
+ " add %%rcx, %%r8;" " movq %%r8, 8(%0);"
+ " movq 8(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
+ " adcx %%rax, %%r9;" " movq %%r9, 16(%0);"
+ " adcx %%rcx, %%r10;" " movq %%r10, 24(%0);"
+ " movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
+ " adcx %%rax, %%r11;" " movq %%r11, 32(%0);"
+ " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%0);"
+ " movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
+ " adcx %%rax, %%r13;" " movq %%r13, 48(%0);"
+ " adcx %%rcx, %%r14;" " movq %%r14, 56(%0);"
+
+ /* Line up pointers */
+ " mov %0, %1;"
+ " mov %2, %0;"
+
+ /* Wrap the result back into the field */
+
+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
+ " mov $38, %%rdx;"
+ " mulxq 32(%1), %%r8, %%r13;"
+ " xor %%rcx, %%rcx;"
+ " adoxq 0(%1), %%r8;"
+ " mulxq 40(%1), %%r9, %%rbx;"
+ " adcx %%r13, %%r9;"
+ " adoxq 8(%1), %%r9;"
+ " mulxq 48(%1), %%r10, %%r13;"
+ " adcx %%rbx, %%r10;"
+ " adoxq 16(%1), %%r10;"
+ " mulxq 56(%1), %%r11, %%rax;"
+ " adcx %%r13, %%r11;"
+ " adoxq 24(%1), %%r11;"
+ " adcx %%rcx, %%rax;"
+ " adox %%rcx, %%rax;"
+ " imul %%rdx, %%rax;"
+
+ /* Step 2: Fold the carry back into dst */
+ " add %%rax, %%r8;"
+ " adcx %%rcx, %%r9;"
+ " movq %%r9, 8(%0);"
+ " adcx %%rcx, %%r10;"
+ " movq %%r10, 16(%0);"
+ " adcx %%rcx, %%r11;"
+ " movq %%r11, 24(%0);"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %%rdx, %%rax;"
+ " add %%rax, %%r8;"
+ " movq %%r8, 0(%0);"
+ : "+&r" (tmp), "+&r" (f), "+&r" (out)
+ :
+ : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc"
+ );
}
-static __always_inline void
-sub_eltfp25519_1w(u64 *const c, const u64 *const a, const u64 *const b)
+/* Computes two field squarings:
+ * out[0] <- f[0] * f[0]
+ * out[1] <- f[1] * f[1]
+ * Uses the 16-element buffer tmp for intermediate results */
+static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp)
{
asm volatile(
- "mov $38, %%eax ;"
- "movq (%1), %%r8 ;"
- "subq (%2), %%r8 ;"
- "movq 8(%1), %%r9 ;"
- "sbbq 8(%2), %%r9 ;"
- "movq 16(%1), %%r10 ;"
- "sbbq 16(%2), %%r10 ;"
- "movq 24(%1), %%r11 ;"
- "sbbq 24(%2), %%r11 ;"
- "mov $0, %%ecx ;"
- "cmovc %%eax, %%ecx ;"
- "subq %%rcx, %%r8 ;"
- "sbbq $0, %%r9 ;"
- "movq %%r9, 8(%0) ;"
- "sbbq $0, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "sbbq $0, %%r11 ;"
- "movq %%r11, 24(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%eax, %%ecx ;"
- "subq %%rcx, %%r8 ;"
- "movq %%r8, (%0) ;"
- :
- : "r"(c), "r"(a), "r"(b)
- : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11");
+ /* Step 1: Compute all partial products */
+ " movq 0(%1), %%rdx;" /* f[0] */
+ " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
+ " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
+ " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
+ " movq 24(%1), %%rdx;" /* f[3] */
+ " mulxq 8(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
+ " movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
+ " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
+
+ /* Step 2: Compute two parallel carry chains */
+ " xor %%r15, %%r15;"
+ " adox %%rax, %%r10;"
+ " adcx %%r8, %%r8;"
+ " adox %%rcx, %%r11;"
+ " adcx %%r9, %%r9;"
+ " adox %%r15, %%rbx;"
+ " adcx %%r10, %%r10;"
+ " adox %%r15, %%r13;"
+ " adcx %%r11, %%r11;"
+ " adox %%r15, %%r14;"
+ " adcx %%rbx, %%rbx;"
+ " adcx %%r13, %%r13;"
+ " adcx %%r14, %%r14;"
+
+ /* Step 3: Compute intermediate squares */
+ " movq 0(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
+ " movq %%rax, 0(%0);"
+ " add %%rcx, %%r8;" " movq %%r8, 8(%0);"
+ " movq 8(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
+ " adcx %%rax, %%r9;" " movq %%r9, 16(%0);"
+ " adcx %%rcx, %%r10;" " movq %%r10, 24(%0);"
+ " movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
+ " adcx %%rax, %%r11;" " movq %%r11, 32(%0);"
+ " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%0);"
+ " movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
+ " adcx %%rax, %%r13;" " movq %%r13, 48(%0);"
+ " adcx %%rcx, %%r14;" " movq %%r14, 56(%0);"
+
+ /* Step 1: Compute all partial products */
+ " movq 32(%1), %%rdx;" /* f[0] */
+ " mulxq 40(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */
+ " mulxq 48(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */
+ " mulxq 56(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */
+ " movq 56(%1), %%rdx;" /* f[3] */
+ " mulxq 40(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */
+ " mulxq 48(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */
+ " movq 40(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */
+ " mulxq 48(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */
+
+ /* Step 2: Compute two parallel carry chains */
+ " xor %%r15, %%r15;"
+ " adox %%rax, %%r10;"
+ " adcx %%r8, %%r8;"
+ " adox %%rcx, %%r11;"
+ " adcx %%r9, %%r9;"
+ " adox %%r15, %%rbx;"
+ " adcx %%r10, %%r10;"
+ " adox %%r15, %%r13;"
+ " adcx %%r11, %%r11;"
+ " adox %%r15, %%r14;"
+ " adcx %%rbx, %%rbx;"
+ " adcx %%r13, %%r13;"
+ " adcx %%r14, %%r14;"
+
+ /* Step 3: Compute intermediate squares */
+ " movq 32(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */
+ " movq %%rax, 64(%0);"
+ " add %%rcx, %%r8;" " movq %%r8, 72(%0);"
+ " movq 40(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */
+ " adcx %%rax, %%r9;" " movq %%r9, 80(%0);"
+ " adcx %%rcx, %%r10;" " movq %%r10, 88(%0);"
+ " movq 48(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */
+ " adcx %%rax, %%r11;" " movq %%r11, 96(%0);"
+ " adcx %%rcx, %%rbx;" " movq %%rbx, 104(%0);"
+ " movq 56(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */
+ " adcx %%rax, %%r13;" " movq %%r13, 112(%0);"
+ " adcx %%rcx, %%r14;" " movq %%r14, 120(%0);"
+
+ /* Line up pointers */
+ " mov %0, %1;"
+ " mov %2, %0;"
+
+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
+ " mov $38, %%rdx;"
+ " mulxq 32(%1), %%r8, %%r13;"
+ " xor %%rcx, %%rcx;"
+ " adoxq 0(%1), %%r8;"
+ " mulxq 40(%1), %%r9, %%rbx;"
+ " adcx %%r13, %%r9;"
+ " adoxq 8(%1), %%r9;"
+ " mulxq 48(%1), %%r10, %%r13;"
+ " adcx %%rbx, %%r10;"
+ " adoxq 16(%1), %%r10;"
+ " mulxq 56(%1), %%r11, %%rax;"
+ " adcx %%r13, %%r11;"
+ " adoxq 24(%1), %%r11;"
+ " adcx %%rcx, %%rax;"
+ " adox %%rcx, %%rax;"
+ " imul %%rdx, %%rax;"
+
+ /* Step 2: Fold the carry back into dst */
+ " add %%rax, %%r8;"
+ " adcx %%rcx, %%r9;"
+ " movq %%r9, 8(%0);"
+ " adcx %%rcx, %%r10;"
+ " movq %%r10, 16(%0);"
+ " adcx %%rcx, %%r11;"
+ " movq %%r11, 24(%0);"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %%rdx, %%rax;"
+ " add %%rax, %%r8;"
+ " movq %%r8, 0(%0);"
+
+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */
+ " mov $38, %%rdx;"
+ " mulxq 96(%1), %%r8, %%r13;"
+ " xor %%rcx, %%rcx;"
+ " adoxq 64(%1), %%r8;"
+ " mulxq 104(%1), %%r9, %%rbx;"
+ " adcx %%r13, %%r9;"
+ " adoxq 72(%1), %%r9;"
+ " mulxq 112(%1), %%r10, %%r13;"
+ " adcx %%rbx, %%r10;"
+ " adoxq 80(%1), %%r10;"
+ " mulxq 120(%1), %%r11, %%rax;"
+ " adcx %%r13, %%r11;"
+ " adoxq 88(%1), %%r11;"
+ " adcx %%rcx, %%rax;"
+ " adox %%rcx, %%rax;"
+ " imul %%rdx, %%rax;"
+
+ /* Step 2: Fold the carry back into dst */
+ " add %%rax, %%r8;"
+ " adcx %%rcx, %%r9;"
+ " movq %%r9, 40(%0);"
+ " adcx %%rcx, %%r10;"
+ " movq %%r10, 48(%0);"
+ " adcx %%rcx, %%r11;"
+ " movq %%r11, 56(%0);"
+
+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */
+ " mov $0, %%rax;"
+ " cmovc %%rdx, %%rax;"
+ " add %%rax, %%r8;"
+ " movq %%r8, 32(%0);"
+ : "+&r" (tmp), "+&r" (f), "+&r" (out)
+ :
+ : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc"
+ );
}
-/* Multiplication by a24 = (A+2)/4 = (486662+2)/4 = 121666 */
-static __always_inline void
-mul_a24_eltfp25519_1w(u64 *const c, const u64 *const a)
+static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2)
{
- const u64 a24 = 121666;
- asm volatile(
- "movq %2, %%rdx ;"
- "mulx (%1), %%r8, %%r10 ;"
- "mulx 8(%1), %%r9, %%r11 ;"
- "addq %%r10, %%r9 ;"
- "mulx 16(%1), %%r10, %%rax ;"
- "adcq %%r11, %%r10 ;"
- "mulx 24(%1), %%r11, %%rcx ;"
- "adcq %%rax, %%r11 ;"
- /**************************/
- "adcq $0, %%rcx ;"
- "movl $38, %%edx ;" /* 2*c = 38 = 2^256 mod 2^255-19*/
- "imul %%rdx, %%rcx ;"
- "addq %%rcx, %%r8 ;"
- "adcq $0, %%r9 ;"
- "movq %%r9, 8(%0) ;"
- "adcq $0, %%r10 ;"
- "movq %%r10, 16(%0) ;"
- "adcq $0, %%r11 ;"
- "movq %%r11, 24(%0) ;"
- "mov $0, %%ecx ;"
- "cmovc %%edx, %%ecx ;"
- "addq %%rcx, %%r8 ;"
- "movq %%r8, (%0) ;"
- :
- : "r"(c), "r"(a), "r"(a24)
- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10",
- "%r11");
+ u64 *nq = p01_tmp1;
+ u64 *nq_p1 = p01_tmp1 + (u32)8U;
+ u64 *tmp1 = p01_tmp1 + (u32)16U;
+ u64 *x1 = q;
+ u64 *x2 = nq;
+ u64 *z2 = nq + (u32)4U;
+ u64 *z3 = nq_p1 + (u32)4U;
+ u64 *a = tmp1;
+ u64 *b = tmp1 + (u32)4U;
+ u64 *ab = tmp1;
+ u64 *dc = tmp1 + (u32)8U;
+ u64 *x3;
+ u64 *z31;
+ u64 *d0;
+ u64 *c0;
+ u64 *a1;
+ u64 *b1;
+ u64 *d;
+ u64 *c;
+ u64 *ab1;
+ u64 *dc1;
+ fadd(a, x2, z2);
+ fsub(b, x2, z2);
+ x3 = nq_p1;
+ z31 = nq_p1 + (u32)4U;
+ d0 = dc;
+ c0 = dc + (u32)4U;
+ fadd(c0, x3, z31);
+ fsub(d0, x3, z31);
+ fmul2(dc, dc, ab, tmp2);
+ fadd(x3, d0, c0);
+ fsub(z31, d0, c0);
+ a1 = tmp1;
+ b1 = tmp1 + (u32)4U;
+ d = tmp1 + (u32)8U;
+ c = tmp1 + (u32)12U;
+ ab1 = tmp1;
+ dc1 = tmp1 + (u32)8U;
+ fsqr2(dc1, ab1, tmp2);
+ fsqr2(nq_p1, nq_p1, tmp2);
+ a1[0U] = c[0U];
+ a1[1U] = c[1U];
+ a1[2U] = c[2U];
+ a1[3U] = c[3U];
+ fsub(c, d, c);
+ fmul_scalar(b1, c, (u64)121665U);
+ fadd(b1, b1, d);
+ fmul2(nq, dc1, ab1, tmp2);
+ fmul(z3, z3, x1, tmp2);
}
-static void inv_eltfp25519_1w_adx(u64 *const c, const u64 *const a)
+static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2)
{
- struct {
- eltfp25519_1w_buffer buffer;
- eltfp25519_1w x0, x1, x2;
- } __aligned(32) m;
- u64 *T[4];
-
- T[0] = m.x0;
- T[1] = c; /* x^(-1) */
- T[2] = m.x1;
- T[3] = m.x2;
-
- copy_eltfp25519_1w(T[1], a);
- sqrn_eltfp25519_1w_adx(T[1], 1);
- copy_eltfp25519_1w(T[2], T[1]);
- sqrn_eltfp25519_1w_adx(T[2], 2);
- mul_eltfp25519_1w_adx(T[0], a, T[2]);
- mul_eltfp25519_1w_adx(T[1], T[1], T[0]);
- copy_eltfp25519_1w(T[2], T[1]);
- sqrn_eltfp25519_1w_adx(T[2], 1);
- mul_eltfp25519_1w_adx(T[0], T[0], T[2]);
- copy_eltfp25519_1w(T[2], T[0]);
- sqrn_eltfp25519_1w_adx(T[2], 5);
- mul_eltfp25519_1w_adx(T[0], T[0], T[2]);
- copy_eltfp25519_1w(T[2], T[0]);
- sqrn_eltfp25519_1w_adx(T[2], 10);
- mul_eltfp25519_1w_adx(T[2], T[2], T[0]);
- copy_eltfp25519_1w(T[3], T[2]);
- sqrn_eltfp25519_1w_adx(T[3], 20);
- mul_eltfp25519_1w_adx(T[3], T[3], T[2]);
- sqrn_eltfp25519_1w_adx(T[3], 10);
- mul_eltfp25519_1w_adx(T[3], T[3], T[0]);
- copy_eltfp25519_1w(T[0], T[3]);
- sqrn_eltfp25519_1w_adx(T[0], 50);
- mul_eltfp25519_1w_adx(T[0], T[0], T[3]);
- copy_eltfp25519_1w(T[2], T[0]);
- sqrn_eltfp25519_1w_adx(T[2], 100);
- mul_eltfp25519_1w_adx(T[2], T[2], T[0]);
- sqrn_eltfp25519_1w_adx(T[2], 50);
- mul_eltfp25519_1w_adx(T[2], T[2], T[3]);
- sqrn_eltfp25519_1w_adx(T[2], 5);
- mul_eltfp25519_1w_adx(T[1], T[1], T[2]);
-
- memzero_explicit(&m, sizeof(m));
+ u64 *x2 = nq;
+ u64 *z2 = nq + (u32)4U;
+ u64 *a = tmp1;
+ u64 *b = tmp1 + (u32)4U;
+ u64 *d = tmp1 + (u32)8U;
+ u64 *c = tmp1 + (u32)12U;
+ u64 *ab = tmp1;
+ u64 *dc = tmp1 + (u32)8U;
+ fadd(a, x2, z2);
+ fsub(b, x2, z2);
+ fsqr2(dc, ab, tmp2);
+ a[0U] = c[0U];
+ a[1U] = c[1U];
+ a[2U] = c[2U];
+ a[3U] = c[3U];
+ fsub(c, d, c);
+ fmul_scalar(b, c, (u64)121665U);
+ fadd(b, b, d);
+ fmul2(nq, dc, ab, tmp2);
}
-static void inv_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a)
+static void montgomery_ladder(u64 *out, const u8 *key, u64 *init1)
{
- struct {
- eltfp25519_1w_buffer buffer;
- eltfp25519_1w x0, x1, x2;
- } __aligned(32) m;
- u64 *T[5];
-
- T[0] = m.x0;
- T[1] = c; /* x^(-1) */
- T[2] = m.x1;
- T[3] = m.x2;
-
- copy_eltfp25519_1w(T[1], a);
- sqrn_eltfp25519_1w_bmi2(T[1], 1);
- copy_eltfp25519_1w(T[2], T[1]);
- sqrn_eltfp25519_1w_bmi2(T[2], 2);
- mul_eltfp25519_1w_bmi2(T[0], a, T[2]);
- mul_eltfp25519_1w_bmi2(T[1], T[1], T[0]);
- copy_eltfp25519_1w(T[2], T[1]);
- sqrn_eltfp25519_1w_bmi2(T[2], 1);
- mul_eltfp25519_1w_bmi2(T[0], T[0], T[2]);
- copy_eltfp25519_1w(T[2], T[0]);
- sqrn_eltfp25519_1w_bmi2(T[2], 5);
- mul_eltfp25519_1w_bmi2(T[0], T[0], T[2]);
- copy_eltfp25519_1w(T[2], T[0]);
- sqrn_eltfp25519_1w_bmi2(T[2], 10);
- mul_eltfp25519_1w_bmi2(T[2], T[2], T[0]);
- copy_eltfp25519_1w(T[3], T[2]);
- sqrn_eltfp25519_1w_bmi2(T[3], 20);
- mul_eltfp25519_1w_bmi2(T[3], T[3], T[2]);
- sqrn_eltfp25519_1w_bmi2(T[3], 10);
- mul_eltfp25519_1w_bmi2(T[3], T[3], T[0]);
- copy_eltfp25519_1w(T[0], T[3]);
- sqrn_eltfp25519_1w_bmi2(T[0], 50);
- mul_eltfp25519_1w_bmi2(T[0], T[0], T[3]);
- copy_eltfp25519_1w(T[2], T[0]);
- sqrn_eltfp25519_1w_bmi2(T[2], 100);
- mul_eltfp25519_1w_bmi2(T[2], T[2], T[0]);
- sqrn_eltfp25519_1w_bmi2(T[2], 50);
- mul_eltfp25519_1w_bmi2(T[2], T[2], T[3]);
- sqrn_eltfp25519_1w_bmi2(T[2], 5);
- mul_eltfp25519_1w_bmi2(T[1], T[1], T[2]);
-
- memzero_explicit(&m, sizeof(m));
+ u64 tmp2[16U] = { 0U };
+ u64 p01_tmp1_swap[33U] = { 0U };
+ u64 *p0 = p01_tmp1_swap;
+ u64 *p01 = p01_tmp1_swap;
+ u64 *p03 = p01;
+ u64 *p11 = p01 + (u32)8U;
+ u64 *x0;
+ u64 *z0;
+ u64 *p01_tmp1;
+ u64 *p01_tmp11;
+ u64 *nq10;
+ u64 *nq_p11;
+ u64 *swap1;
+ u64 sw0;
+ u64 *nq1;
+ u64 *tmp1;
+ memcpy(p11, init1, (u32)8U * sizeof(init1[0U]));
+ x0 = p03;
+ z0 = p03 + (u32)4U;
+ x0[0U] = (u64)1U;
+ x0[1U] = (u64)0U;
+ x0[2U] = (u64)0U;
+ x0[3U] = (u64)0U;
+ z0[0U] = (u64)0U;
+ z0[1U] = (u64)0U;
+ z0[2U] = (u64)0U;
+ z0[3U] = (u64)0U;
+ p01_tmp1 = p01_tmp1_swap;
+ p01_tmp11 = p01_tmp1_swap;
+ nq10 = p01_tmp1_swap;
+ nq_p11 = p01_tmp1_swap + (u32)8U;
+ swap1 = p01_tmp1_swap + (u32)32U;
+ cswap2((u64)1U, nq10, nq_p11);
+ point_add_and_double(init1, p01_tmp11, tmp2);
+ swap1[0U] = (u64)1U;
+ {
+ u32 i;
+ for (i = (u32)0U; i < (u32)251U; i = i + (u32)1U) {
+ u64 *p01_tmp12 = p01_tmp1_swap;
+ u64 *swap2 = p01_tmp1_swap + (u32)32U;
+ u64 *nq2 = p01_tmp12;
+ u64 *nq_p12 = p01_tmp12 + (u32)8U;
+ u64 bit = (u64)(key[((u32)253U - i) / (u32)8U] >> ((u32)253U - i) % (u32)8U & (u8)1U);
+ u64 sw = swap2[0U] ^ bit;
+ cswap2(sw, nq2, nq_p12);
+ point_add_and_double(init1, p01_tmp12, tmp2);
+ swap2[0U] = bit;
+ }
+ }
+ sw0 = swap1[0U];
+ cswap2(sw0, nq10, nq_p11);
+ nq1 = p01_tmp1;
+ tmp1 = p01_tmp1 + (u32)16U;
+ point_double(nq1, tmp1, tmp2);
+ point_double(nq1, tmp1, tmp2);
+ point_double(nq1, tmp1, tmp2);
+ memcpy(out, p0, (u32)8U * sizeof(p0[0U]));
+
+ memzero_explicit(tmp2, sizeof(tmp2));
+ memzero_explicit(p01_tmp1_swap, sizeof(p01_tmp1_swap));
}
-/* Given c, a 256-bit number, fred_eltfp25519_1w updates c
- * with a number such that 0 <= C < 2**255-19.
- */
-static __always_inline void fred_eltfp25519_1w(u64 *const c)
+static void fsquare_times(u64 *o, const u64 *inp, u64 *tmp, u32 n1)
{
- u64 tmp0 = 38, tmp1 = 19;
- asm volatile(
- "btrq $63, %3 ;" /* Put bit 255 in carry flag and clear */
- "cmovncl %k5, %k4 ;" /* c[255] ? 38 : 19 */
-
- /* Add either 19 or 38 to c */
- "addq %4, %0 ;"
- "adcq $0, %1 ;"
- "adcq $0, %2 ;"
- "adcq $0, %3 ;"
-
- /* Test for bit 255 again; only triggered on overflow modulo 2^255-19 */
- "movl $0, %k4 ;"
- "cmovnsl %k5, %k4 ;" /* c[255] ? 0 : 19 */
- "btrq $63, %3 ;" /* Clear bit 255 */
-
- /* Subtract 19 if necessary */
- "subq %4, %0 ;"
- "sbbq $0, %1 ;"
- "sbbq $0, %2 ;"
- "sbbq $0, %3 ;"
-
- : "+r"(c[0]), "+r"(c[1]), "+r"(c[2]), "+r"(c[3]), "+r"(tmp0),
- "+r"(tmp1)
- :
- : "memory", "cc");
+ u32 i;
+ fsqr(o, inp, tmp);
+ for (i = (u32)0U; i < n1 - (u32)1U; i = i + (u32)1U)
+ fsqr(o, o, tmp);
}
-static __always_inline void cswap(u8 bit, u64 *const px, u64 *const py)
+static void finv(u64 *o, const u64 *i, u64 *tmp)
{
- u64 temp;
- asm volatile(
- "test %9, %9 ;"
- "movq %0, %8 ;"
- "cmovnzq %4, %0 ;"
- "cmovnzq %8, %4 ;"
- "movq %1, %8 ;"
- "cmovnzq %5, %1 ;"
- "cmovnzq %8, %5 ;"
- "movq %2, %8 ;"
- "cmovnzq %6, %2 ;"
- "cmovnzq %8, %6 ;"
- "movq %3, %8 ;"
- "cmovnzq %7, %3 ;"
- "cmovnzq %8, %7 ;"
- : "+r"(px[0]), "+r"(px[1]), "+r"(px[2]), "+r"(px[3]),
- "+r"(py[0]), "+r"(py[1]), "+r"(py[2]), "+r"(py[3]),
- "=r"(temp)
- : "r"(bit)
- : "cc"
- );
+ u64 t1[16U] = { 0U };
+ u64 *a0 = t1;
+ u64 *b = t1 + (u32)4U;
+ u64 *c = t1 + (u32)8U;
+ u64 *t00 = t1 + (u32)12U;
+ u64 *tmp1 = tmp;
+ u64 *a;
+ u64 *t0;
+ fsquare_times(a0, i, tmp1, (u32)1U);
+ fsquare_times(t00, a0, tmp1, (u32)2U);
+ fmul(b, t00, i, tmp);
+ fmul(a0, b, a0, tmp);
+ fsquare_times(t00, a0, tmp1, (u32)1U);
+ fmul(b, t00, b, tmp);
+ fsquare_times(t00, b, tmp1, (u32)5U);
+ fmul(b, t00, b, tmp);
+ fsquare_times(t00, b, tmp1, (u32)10U);
+ fmul(c, t00, b, tmp);
+ fsquare_times(t00, c, tmp1, (u32)20U);
+ fmul(t00, t00, c, tmp);
+ fsquare_times(t00, t00, tmp1, (u32)10U);
+ fmul(b, t00, b, tmp);
+ fsquare_times(t00, b, tmp1, (u32)50U);
+ fmul(c, t00, b, tmp);
+ fsquare_times(t00, c, tmp1, (u32)100U);
+ fmul(t00, t00, c, tmp);
+ fsquare_times(t00, t00, tmp1, (u32)50U);
+ fmul(t00, t00, b, tmp);
+ fsquare_times(t00, t00, tmp1, (u32)5U);
+ a = t1;
+ t0 = t1 + (u32)12U;
+ fmul(o, t0, a, tmp);
}
-static __always_inline void cselect(u8 bit, u64 *const px, const u64 *const py)
+static void store_felem(u64 *b, u64 *f)
{
- asm volatile(
- "test %4, %4 ;"
- "cmovnzq %5, %0 ;"
- "cmovnzq %6, %1 ;"
- "cmovnzq %7, %2 ;"
- "cmovnzq %8, %3 ;"
- : "+r"(px[0]), "+r"(px[1]), "+r"(px[2]), "+r"(px[3])
- : "r"(bit), "rm"(py[0]), "rm"(py[1]), "rm"(py[2]), "rm"(py[3])
- : "cc"
- );
+ u64 f30 = f[3U];
+ u64 top_bit0 = f30 >> (u32)63U;
+ u64 carry0;
+ u64 f31;
+ u64 top_bit;
+ u64 carry;
+ u64 f0;
+ u64 f1;
+ u64 f2;
+ u64 f3;
+ u64 m0;
+ u64 m1;
+ u64 m2;
+ u64 m3;
+ u64 mask;
+ u64 f0_;
+ u64 f1_;
+ u64 f2_;
+ u64 f3_;
+ u64 o0;
+ u64 o1;
+ u64 o2;
+ u64 o3;
+ f[3U] = f30 & (u64)0x7fffffffffffffffU;
+ carry0 = add_scalar(f, f, (u64)19U * top_bit0);
+ f31 = f[3U];
+ top_bit = f31 >> (u32)63U;
+ f[3U] = f31 & (u64)0x7fffffffffffffffU;
+ carry = add_scalar(f, f, (u64)19U * top_bit);
+ f0 = f[0U];
+ f1 = f[1U];
+ f2 = f[2U];
+ f3 = f[3U];
+ m0 = gte_mask(f0, (u64)0xffffffffffffffedU);
+ m1 = eq_mask(f1, (u64)0xffffffffffffffffU);
+ m2 = eq_mask(f2, (u64)0xffffffffffffffffU);
+ m3 = eq_mask(f3, (u64)0x7fffffffffffffffU);
+ mask = ((m0 & m1) & m2) & m3;
+ f0_ = f0 - (mask & (u64)0xffffffffffffffedU);
+ f1_ = f1 - (mask & (u64)0xffffffffffffffffU);
+ f2_ = f2 - (mask & (u64)0xffffffffffffffffU);
+ f3_ = f3 - (mask & (u64)0x7fffffffffffffffU);
+ o0 = f0_;
+ o1 = f1_;
+ o2 = f2_;
+ o3 = f3_;
+ b[0U] = o0;
+ b[1U] = o1;
+ b[2U] = o2;
+ b[3U] = o3;
}
-static void curve25519_adx(u8 shared[CURVE25519_KEY_SIZE],
- const u8 private_key[CURVE25519_KEY_SIZE],
- const u8 session_key[CURVE25519_KEY_SIZE])
+static void encode_point(u8 *o, const u64 *i)
{
- struct {
- u64 buffer[4 * NUM_WORDS_ELTFP25519];
- u64 coordinates[4 * NUM_WORDS_ELTFP25519];
- u64 workspace[6 * NUM_WORDS_ELTFP25519];
- u8 session[CURVE25519_KEY_SIZE];
- u8 private[CURVE25519_KEY_SIZE];
- } __aligned(32) m;
-
- int i = 0, j = 0;
- u64 prev = 0;
- u64 *const X1 = (u64 *)m.session;
- u64 *const key = (u64 *)m.private;
- u64 *const Px = m.coordinates + 0;
- u64 *const Pz = m.coordinates + 4;
- u64 *const Qx = m.coordinates + 8;
- u64 *const Qz = m.coordinates + 12;
- u64 *const X2 = Qx;
- u64 *const Z2 = Qz;
- u64 *const X3 = Px;
- u64 *const Z3 = Pz;
- u64 *const X2Z2 = Qx;
- u64 *const X3Z3 = Px;
-
- u64 *const A = m.workspace + 0;
- u64 *const B = m.workspace + 4;
- u64 *const D = m.workspace + 8;
- u64 *const C = m.workspace + 12;
- u64 *const DA = m.workspace + 16;
- u64 *const CB = m.workspace + 20;
- u64 *const AB = A;
- u64 *const DC = D;
- u64 *const DACB = DA;
-
- memcpy(m.private, private_key, sizeof(m.private));
- memcpy(m.session, session_key, sizeof(m.session));
-
- curve25519_clamp_secret(m.private);
-
- /* As in the draft:
- * When receiving such an array, implementations of curve25519
- * MUST mask the most-significant bit in the final byte. This
- * is done to preserve compatibility with point formats which
- * reserve the sign bit for use in other protocols and to
- * increase resistance to implementation fingerprinting
- */
- m.session[CURVE25519_KEY_SIZE - 1] &= (1 << (255 % 8)) - 1;
-
- copy_eltfp25519_1w(Px, X1);
- setzero_eltfp25519_1w(Pz);
- setzero_eltfp25519_1w(Qx);
- setzero_eltfp25519_1w(Qz);
-
- Pz[0] = 1;
- Qx[0] = 1;
-
- /* main-loop */
- prev = 0;
- j = 62;
- for (i = 3; i >= 0; --i) {
- while (j >= 0) {
- u64 bit = (key[i] >> j) & 0x1;
- u64 swap = bit ^ prev;
- prev = bit;
-
- add_eltfp25519_1w_adx(A, X2, Z2); /* A = (X2+Z2) */
- sub_eltfp25519_1w(B, X2, Z2); /* B = (X2-Z2) */
- add_eltfp25519_1w_adx(C, X3, Z3); /* C = (X3+Z3) */
- sub_eltfp25519_1w(D, X3, Z3); /* D = (X3-Z3) */
- mul_eltfp25519_2w_adx(DACB, AB, DC); /* [DA|CB] = [A|B]*[D|C] */
-
- cselect(swap, A, C);
- cselect(swap, B, D);
-
- sqr_eltfp25519_2w_adx(AB); /* [AA|BB] = [A^2|B^2] */
- add_eltfp25519_1w_adx(X3, DA, CB); /* X3 = (DA+CB) */
- sub_eltfp25519_1w(Z3, DA, CB); /* Z3 = (DA-CB) */
- sqr_eltfp25519_2w_adx(X3Z3); /* [X3|Z3] = [(DA+CB)|(DA+CB)]^2 */
-
- copy_eltfp25519_1w(X2, B); /* X2 = B^2 */
- sub_eltfp25519_1w(Z2, A, B); /* Z2 = E = AA-BB */
-
- mul_a24_eltfp25519_1w(B, Z2); /* B = a24*E */
- add_eltfp25519_1w_adx(B, B, X2); /* B = a24*E+B */
- mul_eltfp25519_2w_adx(X2Z2, X2Z2, AB); /* [X2|Z2] = [B|E]*[A|a24*E+B] */
- mul_eltfp25519_1w_adx(Z3, Z3, X1); /* Z3 = Z3*X1 */
- --j;
- }
- j = 63;
- }
-
- inv_eltfp25519_1w_adx(A, Qz);
- mul_eltfp25519_1w_adx((u64 *)shared, Qx, A);
- fred_eltfp25519_1w((u64 *)shared);
-
- memzero_explicit(&m, sizeof(m));
+ const u64 *x = i;
+ const u64 *z = i + (u32)4U;
+ u64 tmp[4U] = { 0U };
+ u64 tmp_w[16U] = { 0U };
+ finv(tmp, z, tmp_w);
+ fmul(tmp, tmp, x, tmp_w);
+ store_felem((u64 *)o, tmp);
}
-static void curve25519_adx_base(u8 session_key[CURVE25519_KEY_SIZE],
- const u8 private_key[CURVE25519_KEY_SIZE])
+static void curve25519_ever64(u8 *out, const u8 *priv, const u8 *pub)
{
- struct {
- u64 buffer[4 * NUM_WORDS_ELTFP25519];
- u64 coordinates[4 * NUM_WORDS_ELTFP25519];
- u64 workspace[4 * NUM_WORDS_ELTFP25519];
- u8 private[CURVE25519_KEY_SIZE];
- } __aligned(32) m;
-
- const int ite[4] = { 64, 64, 64, 63 };
- const int q = 3;
- u64 swap = 1;
-
- int i = 0, j = 0, k = 0;
- u64 *const key = (u64 *)m.private;
- u64 *const Ur1 = m.coordinates + 0;
- u64 *const Zr1 = m.coordinates + 4;
- u64 *const Ur2 = m.coordinates + 8;
- u64 *const Zr2 = m.coordinates + 12;
-
- u64 *const UZr1 = m.coordinates + 0;
- u64 *const ZUr2 = m.coordinates + 8;
-
- u64 *const A = m.workspace + 0;
- u64 *const B = m.workspace + 4;
- u64 *const C = m.workspace + 8;
- u64 *const D = m.workspace + 12;
-
- u64 *const AB = m.workspace + 0;
- u64 *const CD = m.workspace + 8;
-
- const u64 *const P = table_ladder_8k;
-
- memcpy(m.private, private_key, sizeof(m.private));
-
- curve25519_clamp_secret(m.private);
-
- setzero_eltfp25519_1w(Ur1);
- setzero_eltfp25519_1w(Zr1);
- setzero_eltfp25519_1w(Zr2);
- Ur1[0] = 1;
- Zr1[0] = 1;
- Zr2[0] = 1;
-
- /* G-S */
- Ur2[3] = 0x1eaecdeee27cab34UL;
- Ur2[2] = 0xadc7a0b9235d48e2UL;
- Ur2[1] = 0xbbf095ae14b2edf8UL;
- Ur2[0] = 0x7e94e1fec82faabdUL;
-
- /* main-loop */
- j = q;
- for (i = 0; i < NUM_WORDS_ELTFP25519; ++i) {
- while (j < ite[i]) {
- u64 bit = (key[i] >> j) & 0x1;
- k = (64 * i + j - q);
- swap = swap ^ bit;
- cswap(swap, Ur1, Ur2);
- cswap(swap, Zr1, Zr2);
- swap = bit;
- /* Addition */
- sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */
- add_eltfp25519_1w_adx(A, Ur1, Zr1); /* A = Ur1+Zr1 */
- mul_eltfp25519_1w_adx(C, &P[4 * k], B); /* C = M0-B */
- sub_eltfp25519_1w(B, A, C); /* B = (Ur1+Zr1) - M*(Ur1-Zr1) */
- add_eltfp25519_1w_adx(A, A, C); /* A = (Ur1+Zr1) + M*(Ur1-Zr1) */
- sqr_eltfp25519_2w_adx(AB); /* A = A^2 | B = B^2 */
- mul_eltfp25519_2w_adx(UZr1, ZUr2, AB); /* Ur1 = Zr2*A | Zr1 = Ur2*B */
- ++j;
+ u64 init1[8U] = { 0U };
+ u64 tmp[4U] = { 0U };
+ u64 tmp3;
+ u64 *x;
+ u64 *z;
+ {
+ u32 i;
+ for (i = (u32)0U; i < (u32)4U; i = i + (u32)1U) {
+ u64 *os = tmp;
+ const u8 *bj = pub + i * (u32)8U;
+ u64 u = *(u64 *)bj;
+ u64 r = u;
+ u64 x0 = r;
+ os[i] = x0;
}
- j = 0;
}
-
- /* Doubling */
- for (i = 0; i < q; ++i) {
- add_eltfp25519_1w_adx(A, Ur1, Zr1); /* A = Ur1+Zr1 */
- sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */
- sqr_eltfp25519_2w_adx(AB); /* A = A**2 B = B**2 */
- copy_eltfp25519_1w(C, B); /* C = B */
- sub_eltfp25519_1w(B, A, B); /* B = A-B */
- mul_a24_eltfp25519_1w(D, B); /* D = my_a24*B */
- add_eltfp25519_1w_adx(D, D, C); /* D = D+C */
- mul_eltfp25519_2w_adx(UZr1, AB, CD); /* Ur1 = A*B Zr1 = Zr1*A */
- }
-
- /* Convert to affine coordinates */
- inv_eltfp25519_1w_adx(A, Zr1);
- mul_eltfp25519_1w_adx((u64 *)session_key, Ur1, A);
- fred_eltfp25519_1w((u64 *)session_key);
-
- memzero_explicit(&m, sizeof(m));
+ tmp3 = tmp[3U];
+ tmp[3U] = tmp3 & (u64)0x7fffffffffffffffU;
+ x = init1;
+ z = init1 + (u32)4U;
+ z[0U] = (u64)1U;
+ z[1U] = (u64)0U;
+ z[2U] = (u64)0U;
+ z[3U] = (u64)0U;
+ x[0U] = tmp[0U];
+ x[1U] = tmp[1U];
+ x[2U] = tmp[2U];
+ x[3U] = tmp[3U];
+ montgomery_ladder(init1, priv, init1);
+ encode_point(out, init1);
}
-static void curve25519_bmi2(u8 shared[CURVE25519_KEY_SIZE],
- const u8 private_key[CURVE25519_KEY_SIZE],
- const u8 session_key[CURVE25519_KEY_SIZE])
-{
- struct {
- u64 buffer[4 * NUM_WORDS_ELTFP25519];
- u64 coordinates[4 * NUM_WORDS_ELTFP25519];
- u64 workspace[6 * NUM_WORDS_ELTFP25519];
- u8 session[CURVE25519_KEY_SIZE];
- u8 private[CURVE25519_KEY_SIZE];
- } __aligned(32) m;
-
- int i = 0, j = 0;
- u64 prev = 0;
- u64 *const X1 = (u64 *)m.session;
- u64 *const key = (u64 *)m.private;
- u64 *const Px = m.coordinates + 0;
- u64 *const Pz = m.coordinates + 4;
- u64 *const Qx = m.coordinates + 8;
- u64 *const Qz = m.coordinates + 12;
- u64 *const X2 = Qx;
- u64 *const Z2 = Qz;
- u64 *const X3 = Px;
- u64 *const Z3 = Pz;
- u64 *const X2Z2 = Qx;
- u64 *const X3Z3 = Px;
-
- u64 *const A = m.workspace + 0;
- u64 *const B = m.workspace + 4;
- u64 *const D = m.workspace + 8;
- u64 *const C = m.workspace + 12;
- u64 *const DA = m.workspace + 16;
- u64 *const CB = m.workspace + 20;
- u64 *const AB = A;
- u64 *const DC = D;
- u64 *const DACB = DA;
-
- memcpy(m.private, private_key, sizeof(m.private));
- memcpy(m.session, session_key, sizeof(m.session));
-
- curve25519_clamp_secret(m.private);
-
- /* As in the draft:
- * When receiving such an array, implementations of curve25519
- * MUST mask the most-significant bit in the final byte. This
- * is done to preserve compatibility with point formats which
- * reserve the sign bit for use in other protocols and to
- * increase resistance to implementation fingerprinting
- */
- m.session[CURVE25519_KEY_SIZE - 1] &= (1 << (255 % 8)) - 1;
-
- copy_eltfp25519_1w(Px, X1);
- setzero_eltfp25519_1w(Pz);
- setzero_eltfp25519_1w(Qx);
- setzero_eltfp25519_1w(Qz);
-
- Pz[0] = 1;
- Qx[0] = 1;
-
- /* main-loop */
- prev = 0;
- j = 62;
- for (i = 3; i >= 0; --i) {
- while (j >= 0) {
- u64 bit = (key[i] >> j) & 0x1;
- u64 swap = bit ^ prev;
- prev = bit;
-
- add_eltfp25519_1w_bmi2(A, X2, Z2); /* A = (X2+Z2) */
- sub_eltfp25519_1w(B, X2, Z2); /* B = (X2-Z2) */
- add_eltfp25519_1w_bmi2(C, X3, Z3); /* C = (X3+Z3) */
- sub_eltfp25519_1w(D, X3, Z3); /* D = (X3-Z3) */
- mul_eltfp25519_2w_bmi2(DACB, AB, DC); /* [DA|CB] = [A|B]*[D|C] */
-
- cselect(swap, A, C);
- cselect(swap, B, D);
-
- sqr_eltfp25519_2w_bmi2(AB); /* [AA|BB] = [A^2|B^2] */
- add_eltfp25519_1w_bmi2(X3, DA, CB); /* X3 = (DA+CB) */
- sub_eltfp25519_1w(Z3, DA, CB); /* Z3 = (DA-CB) */
- sqr_eltfp25519_2w_bmi2(X3Z3); /* [X3|Z3] = [(DA+CB)|(DA+CB)]^2 */
-
- copy_eltfp25519_1w(X2, B); /* X2 = B^2 */
- sub_eltfp25519_1w(Z2, A, B); /* Z2 = E = AA-BB */
-
- mul_a24_eltfp25519_1w(B, Z2); /* B = a24*E */
- add_eltfp25519_1w_bmi2(B, B, X2); /* B = a24*E+B */
- mul_eltfp25519_2w_bmi2(X2Z2, X2Z2, AB); /* [X2|Z2] = [B|E]*[A|a24*E+B] */
- mul_eltfp25519_1w_bmi2(Z3, Z3, X1); /* Z3 = Z3*X1 */
- --j;
- }
- j = 63;
- }
-
- inv_eltfp25519_1w_bmi2(A, Qz);
- mul_eltfp25519_1w_bmi2((u64 *)shared, Qx, A);
- fred_eltfp25519_1w((u64 *)shared);
+/* The below constants were generated using this sage script:
+ *
+ * #!/usr/bin/env sage
+ * import sys
+ * from sage.all import *
+ * def limbs(n):
+ * n = int(n)
+ * l = ((n >> 0) % 2^64, (n >> 64) % 2^64, (n >> 128) % 2^64, (n >> 192) % 2^64)
+ * return "0x%016xULL, 0x%016xULL, 0x%016xULL, 0x%016xULL" % l
+ * ec = EllipticCurve(GF(2^255 - 19), [0, 486662, 0, 1, 0])
+ * p_minus_s = (ec.lift_x(9) - ec.lift_x(1))[0]
+ * print("static const u64 p_minus_s[] = { %s };\n" % limbs(p_minus_s))
+ * print("static const u64 table_ladder[] = {")
+ * p = ec.lift_x(9)
+ * for i in range(252):
+ * l = (p[0] + p[2]) / (p[0] - p[2])
+ * print(("\t%s" + ("," if i != 251 else "")) % limbs(l))
+ * p = p * 2
+ * print("};")
+ *
+ */
- memzero_explicit(&m, sizeof(m));
-}
+static const u64 p_minus_s[] = { 0x816b1e0137d48290ULL, 0x440f6a51eb4d1207ULL, 0x52385f46dca2b71dULL, 0x215132111d8354cbULL };
+
+static const u64 table_ladder[] = {
+ 0xfffffffffffffff3ULL, 0xffffffffffffffffULL, 0xffffffffffffffffULL, 0x5fffffffffffffffULL,
+ 0x6b8220f416aafe96ULL, 0x82ebeb2b4f566a34ULL, 0xd5a9a5b075a5950fULL, 0x5142b2cf4b2488f4ULL,
+ 0x6aaebc750069680cULL, 0x89cf7820a0f99c41ULL, 0x2a58d9183b56d0f4ULL, 0x4b5aca80e36011a4ULL,
+ 0x329132348c29745dULL, 0xf4a2e616e1642fd7ULL, 0x1e45bb03ff67bc34ULL, 0x306912d0f42a9b4aULL,
+ 0xff886507e6af7154ULL, 0x04f50e13dfeec82fULL, 0xaa512fe82abab5ceULL, 0x174e251a68d5f222ULL,
+ 0xcf96700d82028898ULL, 0x1743e3370a2c02c5ULL, 0x379eec98b4e86eaaULL, 0x0c59888a51e0482eULL,
+ 0xfbcbf1d699b5d189ULL, 0xacaef0d58e9fdc84ULL, 0xc1c20d06231f7614ULL, 0x2938218da274f972ULL,
+ 0xf6af49beff1d7f18ULL, 0xcc541c22387ac9c2ULL, 0x96fcc9ef4015c56bULL, 0x69c1627c690913a9ULL,
+ 0x7a86fd2f4733db0eULL, 0xfdb8c4f29e087de9ULL, 0x095e4b1a8ea2a229ULL, 0x1ad7a7c829b37a79ULL,
+ 0x342d89cad17ea0c0ULL, 0x67bedda6cced2051ULL, 0x19ca31bf2bb42f74ULL, 0x3df7b4c84980acbbULL,
+ 0xa8c6444dc80ad883ULL, 0xb91e440366e3ab85ULL, 0xc215cda00164f6d8ULL, 0x3d867c6ef247e668ULL,
+ 0xc7dd582bcc3e658cULL, 0xfd2c4748ee0e5528ULL, 0xa0fd9b95cc9f4f71ULL, 0x7529d871b0675ddfULL,
+ 0xb8f568b42d3cbd78ULL, 0x1233011b91f3da82ULL, 0x2dce6ccd4a7c3b62ULL, 0x75e7fc8e9e498603ULL,
+ 0x2f4f13f1fcd0b6ecULL, 0xf1a8ca1f29ff7a45ULL, 0xc249c1a72981e29bULL, 0x6ebe0dbb8c83b56aULL,
+ 0x7114fa8d170bb222ULL, 0x65a2dcd5bf93935fULL, 0xbdc41f68b59c979aULL, 0x2f0eef79a2ce9289ULL,
+ 0x42ecbf0c083c37ceULL, 0x2930bc09ec496322ULL, 0xf294b0c19cfeac0dULL, 0x3780aa4bedfabb80ULL,
+ 0x56c17d3e7cead929ULL, 0xe7cb4beb2e5722c5ULL, 0x0ce931732dbfe15aULL, 0x41b883c7621052f8ULL,
+ 0xdbf75ca0c3d25350ULL, 0x2936be086eb1e351ULL, 0xc936e03cb4a9b212ULL, 0x1d45bf82322225aaULL,
+ 0xe81ab1036a024cc5ULL, 0xe212201c304c9a72ULL, 0xc5d73fba6832b1fcULL, 0x20ffdb5a4d839581ULL,
+ 0xa283d367be5d0fadULL, 0x6c2b25ca8b164475ULL, 0x9d4935467caaf22eULL, 0x5166408eee85ff49ULL,
+ 0x3c67baa2fab4e361ULL, 0xb3e433c67ef35cefULL, 0x5259729241159b1cULL, 0x6a621892d5b0ab33ULL,
+ 0x20b74a387555cdcbULL, 0x532aa10e1208923fULL, 0xeaa17b7762281dd1ULL, 0x61ab3443f05c44bfULL,
+ 0x257a6c422324def8ULL, 0x131c6c1017e3cf7fULL, 0x23758739f630a257ULL, 0x295a407a01a78580ULL,
+ 0xf8c443246d5da8d9ULL, 0x19d775450c52fa5dULL, 0x2afcfc92731bf83dULL, 0x7d10c8e81b2b4700ULL,
+ 0xc8e0271f70baa20bULL, 0x993748867ca63957ULL, 0x5412efb3cb7ed4bbULL, 0x3196d36173e62975ULL,
+ 0xde5bcad141c7dffcULL, 0x47cc8cd2b395c848ULL, 0xa34cd942e11af3cbULL, 0x0256dbf2d04ecec2ULL,
+ 0x875ab7e94b0e667fULL, 0xcad4dd83c0850d10ULL, 0x47f12e8f4e72c79fULL, 0x5f1a87bb8c85b19bULL,
+ 0x7ae9d0b6437f51b8ULL, 0x12c7ce5518879065ULL, 0x2ade09fe5cf77aeeULL, 0x23a05a2f7d2c5627ULL,
+ 0x5908e128f17c169aULL, 0xf77498dd8ad0852dULL, 0x74b4c4ceab102f64ULL, 0x183abadd10139845ULL,
+ 0xb165ba8daa92aaacULL, 0xd5c5ef9599386705ULL, 0xbe2f8f0cf8fc40d1ULL, 0x2701e635ee204514ULL,
+ 0x629fa80020156514ULL, 0xf223868764a8c1ceULL, 0x5b894fff0b3f060eULL, 0x60d9944cf708a3faULL,
+ 0xaeea001a1c7a201fULL, 0xebf16a633ee2ce63ULL, 0x6f7709594c7a07e1ULL, 0x79b958150d0208cbULL,
+ 0x24b55e5301d410e7ULL, 0xe3a34edff3fdc84dULL, 0xd88768e4904032d8ULL, 0x131384427b3aaeecULL,
+ 0x8405e51286234f14ULL, 0x14dc4739adb4c529ULL, 0xb8a2b5b250634ffdULL, 0x2fe2a94ad8a7ff93ULL,
+ 0xec5c57efe843faddULL, 0x2843ce40f0bb9918ULL, 0xa4b561d6cf3d6305ULL, 0x743629bde8fb777eULL,
+ 0x343edd46bbaf738fULL, 0xed981828b101a651ULL, 0xa401760b882c797aULL, 0x1fc223e28dc88730ULL,
+ 0x48604e91fc0fba0eULL, 0xb637f78f052c6fa4ULL, 0x91ccac3d09e9239cULL, 0x23f7eed4437a687cULL,
+ 0x5173b1118d9bd800ULL, 0x29d641b63189d4a7ULL, 0xfdbf177988bbc586ULL, 0x2959894fcad81df5ULL,
+ 0xaebc8ef3b4bbc899ULL, 0x4148995ab26992b9ULL, 0x24e20b0134f92cfbULL, 0x40d158894a05dee8ULL,
+ 0x46b00b1185af76f6ULL, 0x26bac77873187a79ULL, 0x3dc0bf95ab8fff5fULL, 0x2a608bd8945524d7ULL,
+ 0x26449588bd446302ULL, 0x7c4bc21c0388439cULL, 0x8e98a4f383bd11b2ULL, 0x26218d7bc9d876b9ULL,
+ 0xe3081542997c178aULL, 0x3c2d29a86fb6606fULL, 0x5c217736fa279374ULL, 0x7dde05734afeb1faULL,
+ 0x3bf10e3906d42babULL, 0xe4f7803e1980649cULL, 0xe6053bf89595bf7aULL, 0x394faf38da245530ULL,
+ 0x7a8efb58896928f4ULL, 0xfbc778e9cc6a113cULL, 0x72670ce330af596fULL, 0x48f222a81d3d6cf7ULL,
+ 0xf01fce410d72caa7ULL, 0x5a20ecc7213b5595ULL, 0x7bc21165c1fa1483ULL, 0x07f89ae31da8a741ULL,
+ 0x05d2c2b4c6830ff9ULL, 0xd43e330fc6316293ULL, 0xa5a5590a96d3a904ULL, 0x705edb91a65333b6ULL,
+ 0x048ee15e0bb9a5f7ULL, 0x3240cfca9e0aaf5dULL, 0x8f4b71ceedc4a40bULL, 0x621c0da3de544a6dULL,
+ 0x92872836a08c4091ULL, 0xce8375b010c91445ULL, 0x8a72eb524f276394ULL, 0x2667fcfa7ec83635ULL,
+ 0x7f4c173345e8752aULL, 0x061b47feee7079a5ULL, 0x25dd9afa9f86ff34ULL, 0x3780cef5425dc89cULL,
+ 0x1a46035a513bb4e9ULL, 0x3e1ef379ac575adaULL, 0xc78c5f1c5fa24b50ULL, 0x321a967634fd9f22ULL,
+ 0x946707b8826e27faULL, 0x3dca84d64c506fd0ULL, 0xc189218075e91436ULL, 0x6d9284169b3b8484ULL,
+ 0x3a67e840383f2ddfULL, 0x33eec9a30c4f9b75ULL, 0x3ec7c86fa783ef47ULL, 0x26ec449fbac9fbc4ULL,
+ 0x5c0f38cba09b9e7dULL, 0x81168cc762a3478cULL, 0x3e23b0d306fc121cULL, 0x5a238aa0a5efdcddULL,
+ 0x1ba26121c4ea43ffULL, 0x36f8c77f7c8832b5ULL, 0x88fbea0b0adcf99aULL, 0x5ca9938ec25bebf9ULL,
+ 0xd5436a5e51fccda0ULL, 0x1dbc4797c2cd893bULL, 0x19346a65d3224a08ULL, 0x0f5034e49b9af466ULL,
+ 0xf23c3967a1e0b96eULL, 0xe58b08fa867a4d88ULL, 0xfb2fabc6a7341679ULL, 0x2a75381eb6026946ULL,
+ 0xc80a3be4c19420acULL, 0x66b1f6c681f2b6dcULL, 0x7cf7036761e93388ULL, 0x25abbbd8a660a4c4ULL,
+ 0x91ea12ba14fd5198ULL, 0x684950fc4a3cffa9ULL, 0xf826842130f5ad28ULL, 0x3ea988f75301a441ULL,
+ 0xc978109a695f8c6fULL, 0x1746eb4a0530c3f3ULL, 0x444d6d77b4459995ULL, 0x75952b8c054e5cc7ULL,
+ 0xa3703f7915f4d6aaULL, 0x66c346202f2647d8ULL, 0xd01469df811d644bULL, 0x77fea47d81a5d71fULL,
+ 0xc5e9529ef57ca381ULL, 0x6eeeb4b9ce2f881aULL, 0xb6e91a28e8009bd6ULL, 0x4b80be3e9afc3fecULL,
+ 0x7e3773c526aed2c5ULL, 0x1b4afcb453c9a49dULL, 0xa920bdd7baffb24dULL, 0x7c54699f122d400eULL,
+ 0xef46c8e14fa94bc8ULL, 0xe0b074ce2952ed5eULL, 0xbea450e1dbd885d5ULL, 0x61b68649320f712cULL,
+ 0x8a485f7309ccbdd1ULL, 0xbd06320d7d4d1a2dULL, 0x25232973322dbef4ULL, 0x445dc4758c17f770ULL,
+ 0xdb0434177cc8933cULL, 0xed6fe82175ea059fULL, 0x1efebefdc053db34ULL, 0x4adbe867c65daf99ULL,
+ 0x3acd71a2a90609dfULL, 0xe5e991856dd04050ULL, 0x1ec69b688157c23cULL, 0x697427f6885cfe4dULL,
+ 0xd7be7b9b65e1a851ULL, 0xa03d28d522c536ddULL, 0x28399d658fd2b645ULL, 0x49e5b7e17c2641e1ULL,
+ 0x6f8c3a98700457a4ULL, 0x5078f0a25ebb6778ULL, 0xd13c3ccbc382960fULL, 0x2e003258a7df84b1ULL,
+ 0x8ad1f39be6296a1cULL, 0xc1eeaa652a5fbfb2ULL, 0x33ee0673fd26f3cbULL, 0x59256173a69d2cccULL,
+ 0x41ea07aa4e18fc41ULL, 0xd9fc19527c87a51eULL, 0xbdaacb805831ca6fULL, 0x445b652dc916694fULL,
+ 0xce92a3a7f2172315ULL, 0x1edc282de11b9964ULL, 0xa1823aafe04c314aULL, 0x790a2d94437cf586ULL,
+ 0x71c447fb93f6e009ULL, 0x8922a56722845276ULL, 0xbf70903b204f5169ULL, 0x2f7a89891ba319feULL,
+ 0x02a08eb577e2140cULL, 0xed9a4ed4427bdcf4ULL, 0x5253ec44e4323cd1ULL, 0x3e88363c14e9355bULL,
+ 0xaa66c14277110b8cULL, 0x1ae0391610a23390ULL, 0x2030bd12c93fc2a2ULL, 0x3ee141579555c7abULL,
+ 0x9214de3a6d6e7d41ULL, 0x3ccdd88607f17efeULL, 0x674f1288f8e11217ULL, 0x5682250f329f93d0ULL,
+ 0x6cf00b136d2e396eULL, 0x6e4cf86f1014debfULL, 0x5930b1b5bfcc4e83ULL, 0x047069b48aba16b6ULL,
+ 0x0d4ce4ab69b20793ULL, 0xb24db91a97d0fb9eULL, 0xcdfa50f54e00d01dULL, 0x221b1085368bddb5ULL,
+ 0xe7e59468b1e3d8d2ULL, 0x53c56563bd122f93ULL, 0xeee8a903e0663f09ULL, 0x61efa662cbbe3d42ULL,
+ 0x2cf8ddddde6eab2aULL, 0x9bf80ad51435f231ULL, 0x5deadacec9f04973ULL, 0x29275b5d41d29b27ULL,
+ 0xcfde0f0895ebf14fULL, 0xb9aab96b054905a7ULL, 0xcae80dd9a1c420fdULL, 0x0a63bf2f1673bbc7ULL,
+ 0x092f6e11958fbc8cULL, 0x672a81e804822fadULL, 0xcac8351560d52517ULL, 0x6f3f7722c8f192f8ULL,
+ 0xf8ba90ccc2e894b7ULL, 0x2c7557a438ff9f0dULL, 0x894d1d855ae52359ULL, 0x68e122157b743d69ULL,
+ 0xd87e5570cfb919f3ULL, 0x3f2cdecd95798db9ULL, 0x2121154710c0a2ceULL, 0x3c66a115246dc5b2ULL,
+ 0xcbedc562294ecb72ULL, 0xba7143c36a280b16ULL, 0x9610c2efd4078b67ULL, 0x6144735d946a4b1eULL,
+ 0x536f111ed75b3350ULL, 0x0211db8c2041d81bULL, 0xf93cb1000e10413cULL, 0x149dfd3c039e8876ULL,
+ 0xd479dde46b63155bULL, 0xb66e15e93c837976ULL, 0xdafde43b1f13e038ULL, 0x5fafda1a2e4b0b35ULL,
+ 0x3600bbdf17197581ULL, 0x3972050bbe3cd2c2ULL, 0x5938906dbdd5be86ULL, 0x34fce5e43f9b860fULL,
+ 0x75a8a4cd42d14d02ULL, 0x828dabc53441df65ULL, 0x33dcabedd2e131d3ULL, 0x3ebad76fb814d25fULL,
+ 0xd4906f566f70e10fULL, 0x5d12f7aa51690f5aULL, 0x45adb16e76cefcf2ULL, 0x01f768aead232999ULL,
+ 0x2b6cc77b6248febdULL, 0x3cd30628ec3aaffdULL, 0xce1c0b80d4ef486aULL, 0x4c3bff2ea6f66c23ULL,
+ 0x3f2ec4094aeaeb5fULL, 0x61b19b286e372ca7ULL, 0x5eefa966de2a701dULL, 0x23b20565de55e3efULL,
+ 0xe301ca5279d58557ULL, 0x07b2d4ce27c2874fULL, 0xa532cd8a9dcf1d67ULL, 0x2a52fee23f2bff56ULL,
+ 0x8624efb37cd8663dULL, 0xbbc7ac20ffbd7594ULL, 0x57b85e9c82d37445ULL, 0x7b3052cb86a6ec66ULL,
+ 0x3482f0ad2525e91eULL, 0x2cb68043d28edca0ULL, 0xaf4f6d052e1b003aULL, 0x185f8c2529781b0aULL,
+ 0xaa41de5bd80ce0d6ULL, 0x9407b2416853e9d6ULL, 0x563ec36e357f4c3aULL, 0x4cc4b8dd0e297bceULL,
+ 0xa2fc1a52ffb8730eULL, 0x1811f16e67058e37ULL, 0x10f9a366cddf4ee1ULL, 0x72f4a0c4a0b9f099ULL,
+ 0x8c16c06f663f4ea7ULL, 0x693b3af74e970fbaULL, 0x2102e7f1d69ec345ULL, 0x0ba53cbc968a8089ULL,
+ 0xca3d9dc7fea15537ULL, 0x4c6824bb51536493ULL, 0xb9886314844006b1ULL, 0x40d2a72ab454cc60ULL,
+ 0x5936a1b712570975ULL, 0x91b9d648debda657ULL, 0x3344094bb64330eaULL, 0x006ba10d12ee51d0ULL,
+ 0x19228468f5de5d58ULL, 0x0eb12f4c38cc05b0ULL, 0xa1039f9dd5601990ULL, 0x4502d4ce4fff0e0bULL,
+ 0xeb2054106837c189ULL, 0xd0f6544c6dd3b93cULL, 0x40727064c416d74fULL, 0x6e15c6114b502ef0ULL,
+ 0x4df2a398cfb1a76bULL, 0x11256c7419f2f6b1ULL, 0x4a497962066e6043ULL, 0x705b3aab41355b44ULL,
+ 0x365ef536d797b1d8ULL, 0x00076bd622ddf0dbULL, 0x3bbf33b0e0575a88ULL, 0x3777aa05c8e4ca4dULL,
+ 0x392745c85578db5fULL, 0x6fda4149dbae5ae2ULL, 0xb1f0b00b8adc9867ULL, 0x09963437d36f1da3ULL,
+ 0x7e824e90a5dc3853ULL, 0xccb5f6641f135cbdULL, 0x6736d86c87ce8fccULL, 0x625f3ce26604249fULL,
+ 0xaf8ac8059502f63fULL, 0x0c05e70a2e351469ULL, 0x35292e9c764b6305ULL, 0x1a394360c7e23ac3ULL,
+ 0xd5c6d53251183264ULL, 0x62065abd43c2b74fULL, 0xb5fbf5d03b973f9bULL, 0x13a3da3661206e5eULL,
+ 0xc6bd5837725d94e5ULL, 0x18e30912205016c5ULL, 0x2088ce1570033c68ULL, 0x7fba1f495c837987ULL,
+ 0x5a8c7423f2f9079dULL, 0x1735157b34023fc5ULL, 0xe4f9b49ad2fab351ULL, 0x6691ff72c878e33cULL,
+ 0x122c2adedc5eff3eULL, 0xf8dd4bf1d8956cf4ULL, 0xeb86205d9e9e5bdaULL, 0x049b92b9d975c743ULL,
+ 0xa5379730b0f6c05aULL, 0x72a0ffacc6f3a553ULL, 0xb0032c34b20dcd6dULL, 0x470e9dbc88d5164aULL,
+ 0xb19cf10ca237c047ULL, 0xb65466711f6c81a2ULL, 0xb3321bd16dd80b43ULL, 0x48c14f600c5fbe8eULL,
+ 0x66451c264aa6c803ULL, 0xb66e3904a4fa7da6ULL, 0xd45f19b0b3128395ULL, 0x31602627c3c9bc10ULL,
+ 0x3120dc4832e4e10dULL, 0xeb20c46756c717f7ULL, 0x00f52e3f67280294ULL, 0x566d4fc14730c509ULL,
+ 0x7e3a5d40fd837206ULL, 0xc1e926dc7159547aULL, 0x216730fba68d6095ULL, 0x22e8c3843f69cea7ULL,
+ 0x33d074e8930e4b2bULL, 0xb6e4350e84d15816ULL, 0x5534c26ad6ba2365ULL, 0x7773c12f89f1f3f3ULL,
+ 0x8cba404da57962aaULL, 0x5b9897a81999ce56ULL, 0x508e862f121692fcULL, 0x3a81907fa093c291ULL,
+ 0x0dded0ff4725a510ULL, 0x10d8cc10673fc503ULL, 0x5b9d151c9f1f4e89ULL, 0x32a5c1d5cb09a44cULL,
+ 0x1e0aa442b90541fbULL, 0x5f85eb7cc1b485dbULL, 0xbee595ce8a9df2e5ULL, 0x25e496c722422236ULL,
+ 0x5edf3c46cd0fe5b9ULL, 0x34e75a7ed2a43388ULL, 0xe488de11d761e352ULL, 0x0e878a01a085545cULL,
+ 0xba493c77e021bb04ULL, 0x2b4d1843c7df899aULL, 0x9ea37a487ae80d67ULL, 0x67a9958011e41794ULL,
+ 0x4b58051a6697b065ULL, 0x47e33f7d8d6ba6d4ULL, 0xbb4da8d483ca46c1ULL, 0x68becaa181c2db0dULL,
+ 0x8d8980e90b989aa5ULL, 0xf95eb14a2c93c99bULL, 0x51c6c7c4796e73a2ULL, 0x6e228363b5efb569ULL,
+ 0xc6bbc0b02dd624c8ULL, 0x777eb47dec8170eeULL, 0x3cde15a004cfafa9ULL, 0x1dc6bc087160bf9bULL,
+ 0x2e07e043eec34002ULL, 0x18e9fc677a68dc7fULL, 0xd8da03188bd15b9aULL, 0x48fbc3bb00568253ULL,
+ 0x57547d4cfb654ce1ULL, 0xd3565b82a058e2adULL, 0xf63eaf0bbf154478ULL, 0x47531ef114dfbb18ULL,
+ 0xe1ec630a4278c587ULL, 0x5507d546ca8e83f3ULL, 0x85e135c63adc0c2bULL, 0x0aa7efa85682844eULL,
+ 0x72691ba8b3e1f615ULL, 0x32b4e9701fbe3ffaULL, 0x97b6d92e39bb7868ULL, 0x2cfe53dea02e39e8ULL,
+ 0x687392cd85cd52b0ULL, 0x27ff66c910e29831ULL, 0x97134556a9832d06ULL, 0x269bb0360a84f8a0ULL,
+ 0x706e55457643f85cULL, 0x3734a48c9b597d1bULL, 0x7aee91e8c6efa472ULL, 0x5cd6abc198a9d9e0ULL,
+ 0x0e04de06cb3ce41aULL, 0xd8c6eb893402e138ULL, 0x904659bb686e3772ULL, 0x7215c371746ba8c8ULL,
+ 0xfd12a97eeae4a2d9ULL, 0x9514b7516394f2c5ULL, 0x266fd5809208f294ULL, 0x5c847085619a26b9ULL,
+ 0x52985410fed694eaULL, 0x3c905b934a2ed254ULL, 0x10bb47692d3be467ULL, 0x063b3d2d69e5e9e1ULL,
+ 0x472726eedda57debULL, 0xefb6c4ae10f41891ULL, 0x2b1641917b307614ULL, 0x117c554fc4f45b7cULL,
+ 0xc07cf3118f9d8812ULL, 0x01dbd82050017939ULL, 0xd7e803f4171b2827ULL, 0x1015e87487d225eaULL,
+ 0xc58de3fed23acc4dULL, 0x50db91c294a7be2dULL, 0x0b94d43d1c9cf457ULL, 0x6b1640fa6e37524aULL,
+ 0x692f346c5fda0d09ULL, 0x200b1c59fa4d3151ULL, 0xb8c46f760777a296ULL, 0x4b38395f3ffdfbcfULL,
+ 0x18d25e00be54d671ULL, 0x60d50582bec8aba6ULL, 0x87ad8f263b78b982ULL, 0x50fdf64e9cda0432ULL,
+ 0x90f567aac578dcf0ULL, 0xef1e9b0ef2a3133bULL, 0x0eebba9242d9de71ULL, 0x15473c9bf03101c7ULL,
+ 0x7c77e8ae56b78095ULL, 0xb678e7666e6f078eULL, 0x2da0b9615348ba1fULL, 0x7cf931c1ff733f0bULL,
+ 0x26b357f50a0a366cULL, 0xe9708cf42b87d732ULL, 0xc13aeea5f91cb2c0ULL, 0x35d90c991143bb4cULL,
+ 0x47c1c404a9a0d9dcULL, 0x659e58451972d251ULL, 0x3875a8c473b38c31ULL, 0x1fbd9ed379561f24ULL,
+ 0x11fabc6fd41ec28dULL, 0x7ef8dfe3cd2a2dcaULL, 0x72e73b5d8c404595ULL, 0x6135fa4954b72f27ULL,
+ 0xccfc32a2de24b69cULL, 0x3f55698c1f095d88ULL, 0xbe3350ed5ac3f929ULL, 0x5e9bf806ca477eebULL,
+ 0xe9ce8fb63c309f68ULL, 0x5376f63565e1f9f4ULL, 0xd1afcfb35a6393f1ULL, 0x6632a1ede5623506ULL,
+ 0x0b7d6c390c2ded4cULL, 0x56cb3281df04cb1fULL, 0x66305a1249ecc3c7ULL, 0x5d588b60a38ca72aULL,
+ 0xa6ecbf78e8e5f42dULL, 0x86eeb44b3c8a3eecULL, 0xec219c48fbd21604ULL, 0x1aaf1af517c36731ULL,
+ 0xc306a2836769bde7ULL, 0x208280622b1e2adbULL, 0x8027f51ffbff94a6ULL, 0x76cfa1ce1124f26bULL,
+ 0x18eb00562422abb6ULL, 0xf377c4d58f8c29c3ULL, 0x4dbbc207f531561aULL, 0x0253b7f082128a27ULL,
+ 0x3d1f091cb62c17e0ULL, 0x4860e1abd64628a9ULL, 0x52d17436309d4253ULL, 0x356f97e13efae576ULL,
+ 0xd351e11aa150535bULL, 0x3e6b45bb1dd878ccULL, 0x0c776128bed92c98ULL, 0x1d34ae93032885b8ULL,
+ 0x4ba0488ca85ba4c3ULL, 0x985348c33c9ce6ceULL, 0x66124c6f97bda770ULL, 0x0f81a0290654124aULL,
+ 0x9ed09ca6569b86fdULL, 0x811009fd18af9a2dULL, 0xff08d03f93d8c20aULL, 0x52a148199faef26bULL,
+ 0x3e03f9dc2d8d1b73ULL, 0x4205801873961a70ULL, 0xc0d987f041a35970ULL, 0x07aa1f15a1c0d549ULL,
+ 0xdfd46ce08cd27224ULL, 0x6d0a024f934e4239ULL, 0x808a7a6399897b59ULL, 0x0a4556e9e13d95a2ULL,
+ 0xd21a991fe9c13045ULL, 0x9b0e8548fe7751b8ULL, 0x5da643cb4bf30035ULL, 0x77db28d63940f721ULL,
+ 0xfc5eeb614adc9011ULL, 0x5229419ae8c411ebULL, 0x9ec3e7787d1dcf74ULL, 0x340d053e216e4cb5ULL,
+ 0xcac7af39b48df2b4ULL, 0xc0faec2871a10a94ULL, 0x140a69245ca575edULL, 0x0cf1c37134273a4cULL,
+ 0xc8ee306ac224b8a5ULL, 0x57eaee7ccb4930b0ULL, 0xa1e806bdaacbe74fULL, 0x7d9a62742eeb657dULL,
+ 0x9eb6b6ef546c4830ULL, 0x885cca1fddb36e2eULL, 0xe6b9f383ef0d7105ULL, 0x58654fef9d2e0412ULL,
+ 0xa905c4ffbe0e8e26ULL, 0x942de5df9b31816eULL, 0x497d723f802e88e1ULL, 0x30684dea602f408dULL,
+ 0x21e5a278a3e6cb34ULL, 0xaefb6e6f5b151dc4ULL, 0xb30b8e049d77ca15ULL, 0x28c3c9cf53b98981ULL,
+ 0x287fb721556cdd2aULL, 0x0d317ca897022274ULL, 0x7468c7423a543258ULL, 0x4a7f11464eb5642fULL,
+ 0xa237a4774d193aa6ULL, 0xd865986ea92129a1ULL, 0x24c515ecf87c1a88ULL, 0x604003575f39f5ebULL,
+ 0x47b9f189570a9b27ULL, 0x2b98cede465e4b78ULL, 0x026df551dbb85c20ULL, 0x74fcd91047e21901ULL,
+ 0x13e2a90a23c1bfa3ULL, 0x0cb0074e478519f6ULL, 0x5ff1cbbe3af6cf44ULL, 0x67fe5438be812dbeULL,
+ 0xd13cf64fa40f05b0ULL, 0x054dfb2f32283787ULL, 0x4173915b7f0d2aeaULL, 0x482f144f1f610d4eULL,
+ 0xf6210201b47f8234ULL, 0x5d0ae1929e70b990ULL, 0xdcd7f455b049567cULL, 0x7e93d0f1f0916f01ULL,
+ 0xdd79cbf18a7db4faULL, 0xbe8391bf6f74c62fULL, 0x027145d14b8291bdULL, 0x585a73ea2cbf1705ULL,
+ 0x485ca03e928a0db2ULL, 0x10fc01a5742857e7ULL, 0x2f482edbd6d551a7ULL, 0x0f0433b5048fdb8aULL,
+ 0x60da2e8dd7dc6247ULL, 0x88b4c9d38cd4819aULL, 0x13033ac001f66697ULL, 0x273b24fe3b367d75ULL,
+ 0xc6e8f66a31b3b9d4ULL, 0x281514a494df49d5ULL, 0xd1726fdfc8b23da7ULL, 0x4b3ae7d103dee548ULL,
+ 0xc6256e19ce4b9d7eULL, 0xff5c5cf186e3c61cULL, 0xacc63ca34b8ec145ULL, 0x74621888fee66574ULL,
+ 0x956f409645290a1eULL, 0xef0bf8e3263a962eULL, 0xed6a50eb5ec2647bULL, 0x0694283a9dca7502ULL,
+ 0x769b963643a2dcd1ULL, 0x42b7c8ea09fc5353ULL, 0x4f002aee13397eabULL, 0x63005e2c19b7d63aULL,
+ 0xca6736da63023beaULL, 0x966c7f6db12a99b7ULL, 0xace09390c537c5e1ULL, 0x0b696063a1aa89eeULL,
+ 0xebb03e97288c56e5ULL, 0x432a9f9f938c8be8ULL, 0xa6a5a93d5b717f71ULL, 0x1a5fb4c3e18f9d97ULL,
+ 0x1c94e7ad1c60cdceULL, 0xee202a43fc02c4a0ULL, 0x8dafe4d867c46a20ULL, 0x0a10263c8ac27b58ULL,
+ 0xd0dea9dfe4432a4aULL, 0x856af87bbe9277c5ULL, 0xce8472acc212c71aULL, 0x6f151b6d9bbb1e91ULL,
+ 0x26776c527ceed56aULL, 0x7d211cb7fbf8faecULL, 0x37ae66a6fd4609ccULL, 0x1f81b702d2770c42ULL,
+ 0x2fb0b057eac58392ULL, 0xe1dd89fe29744e9dULL, 0xc964f8eb17beb4f8ULL, 0x29571073c9a2d41eULL,
+ 0xa948a18981c0e254ULL, 0x2df6369b65b22830ULL, 0xa33eb2d75fcfd3c6ULL, 0x078cd6ec4199a01fULL,
+ 0x4a584a41ad900d2fULL, 0x32142b78e2c74c52ULL, 0x68c4e8338431c978ULL, 0x7f69ea9008689fc2ULL,
+ 0x52f2c81e46a38265ULL, 0xfd78072d04a832fdULL, 0x8cd7d5fa25359e94ULL, 0x4de71b7454cc29d2ULL,
+ 0x42eb60ad1eda6ac9ULL, 0x0aad37dfdbc09c3aULL, 0x81004b71e33cc191ULL, 0x44e6be345122803cULL,
+ 0x03fe8388ba1920dbULL, 0xf5d57c32150db008ULL, 0x49c8c4281af60c29ULL, 0x21edb518de701aeeULL,
+ 0x7fb63e418f06dc99ULL, 0xa4460d99c166d7b8ULL, 0x24dd5248ce520a83ULL, 0x5ec3ad712b928358ULL,
+ 0x15022a5fbd17930fULL, 0xa4f64a77d82570e3ULL, 0x12bc8d6915783712ULL, 0x498194c0fc620abbULL,
+ 0x38a2d9d255686c82ULL, 0x785c6bd9193e21f0ULL, 0xe4d5c81ab24a5484ULL, 0x56307860b2e20989ULL,
+ 0x429d55f78b4d74c4ULL, 0x22f1834643350131ULL, 0x1e60c24598c71fffULL, 0x59f2f014979983efULL,
+ 0x46a47d56eb494a44ULL, 0x3e22a854d636a18eULL, 0xb346e15274491c3bULL, 0x2ceafd4e5390cde7ULL,
+ 0xba8a8538be0d6675ULL, 0x4b9074bb50818e23ULL, 0xcbdab89085d304c3ULL, 0x61a24fe0e56192c4ULL,
+ 0xcb7615e6db525bcbULL, 0xdd7d8c35a567e4caULL, 0xe6b4153acafcdd69ULL, 0x2d668e097f3c9766ULL,
+ 0xa57e7e265ce55ef0ULL, 0x5d9f4e527cd4b967ULL, 0xfbc83606492fd1e5ULL, 0x090d52beb7c3f7aeULL,
+ 0x09b9515a1e7b4d7cULL, 0x1f266a2599da44c0ULL, 0xa1c49548e2c55504ULL, 0x7ef04287126f15ccULL,
+ 0xfed1659dbd30ef15ULL, 0x8b4ab9eec4e0277bULL, 0x884d6236a5df3291ULL, 0x1fd96ea6bf5cf788ULL,
+ 0x42a161981f190d9aULL, 0x61d849507e6052c1ULL, 0x9fe113bf285a2cd5ULL, 0x7c22d676dbad85d8ULL,
+ 0x82e770ed2bfbd27dULL, 0x4c05b2ece996f5a5ULL, 0xcd40a9c2b0900150ULL, 0x5895319213d9bf64ULL,
+ 0xe7cc5d703fea2e08ULL, 0xb50c491258e2188cULL, 0xcce30baa48205bf0ULL, 0x537c659ccfa32d62ULL,
+ 0x37b6623a98cfc088ULL, 0xfe9bed1fa4d6aca4ULL, 0x04d29b8e56a8d1b0ULL, 0x725f71c40b519575ULL,
+ 0x28c7f89cd0339ce6ULL, 0x8367b14469ddc18bULL, 0x883ada83a6a1652cULL, 0x585f1974034d6c17ULL,
+ 0x89cfb266f1b19188ULL, 0xe63b4863e7c35217ULL, 0xd88c9da6b4c0526aULL, 0x3e035c9df0954635ULL,
+ 0xdd9d5412fb45de9dULL, 0xdd684532e4cff40dULL, 0x4b5c999b151d671cULL, 0x2d8c2cc811e7f690ULL,
+ 0x7f54be1d90055d40ULL, 0xa464c5df464aaf40ULL, 0x33979624f0e917beULL, 0x2c018dc527356b30ULL,
+ 0xa5415024e330b3d4ULL, 0x73ff3d96691652d3ULL, 0x94ec42c4ef9b59f1ULL, 0x0747201618d08e5aULL,
+ 0x4d6ca48aca411c53ULL, 0x66415f2fcfa66119ULL, 0x9c4dd40051e227ffULL, 0x59810bc09a02f7ebULL,
+ 0x2a7eb171b3dc101dULL, 0x441c5ab99ffef68eULL, 0x32025c9b93b359eaULL, 0x5e8ce0a71e9d112fULL,
+ 0xbfcccb92429503fdULL, 0xd271ba752f095d55ULL, 0x345ead5e972d091eULL, 0x18c8df11a83103baULL,
+ 0x90cd949a9aed0f4cULL, 0xc5d1f4cb6660e37eULL, 0xb8cac52d56c52e0bULL, 0x6e42e400c5808e0dULL,
+ 0xa3b46966eeaefd23ULL, 0x0c4f1f0be39ecdcaULL, 0x189dc8c9d683a51dULL, 0x51f27f054c09351bULL,
+ 0x4c487ccd2a320682ULL, 0x587ea95bb3df1c96ULL, 0xc8ccf79e555cb8e8ULL, 0x547dc829a206d73dULL,
+ 0xb822a6cd80c39b06ULL, 0xe96d54732000d4c6ULL, 0x28535b6f91463b4dULL, 0x228f4660e2486e1dULL,
+ 0x98799538de8d3abfULL, 0x8cd8330045ebca6eULL, 0x79952a008221e738ULL, 0x4322e1a7535cd2bbULL,
+ 0xb114c11819d1801cULL, 0x2016e4d84f3f5ec7ULL, 0xdd0e2df409260f4cULL, 0x5ec362c0ae5f7266ULL,
+ 0xc0462b18b8b2b4eeULL, 0x7cc8d950274d1afbULL, 0xf25f7105436b02d2ULL, 0x43bbf8dcbff9ccd3ULL,
+ 0xb6ad1767a039e9dfULL, 0xb0714da8f69d3583ULL, 0x5e55fa18b42931f5ULL, 0x4ed5558f33c60961ULL,
+ 0x1fe37901c647a5ddULL, 0x593ddf1f8081d357ULL, 0x0249a4fd813fd7a6ULL, 0x69acca274e9caf61ULL,
+ 0x047ba3ea330721c9ULL, 0x83423fc20e7e1ea0ULL, 0x1df4c0af01314a60ULL, 0x09a62dab89289527ULL,
+ 0xa5b325a49cc6cb00ULL, 0xe94b5dc654b56cb6ULL, 0x3be28779adc994a0ULL, 0x4296e8f8ba3a4aadULL,
+ 0x328689761e451eabULL, 0x2e4d598bff59594aULL, 0x49b96853d7a7084aULL, 0x4980a319601420a8ULL,
+ 0x9565b9e12f552c42ULL, 0x8a5318db7100fe96ULL, 0x05c90b4d43add0d7ULL, 0x538b4cd66a5d4edaULL,
+ 0xf4e94fc3e89f039fULL, 0x592c9af26f618045ULL, 0x08a36eb5fd4b9550ULL, 0x25fffaf6c2ed1419ULL,
+ 0x34434459cc79d354ULL, 0xeeecbfb4b1d5476bULL, 0xddeb34a061615d99ULL, 0x5129cecceb64b773ULL,
+ 0xee43215894993520ULL, 0x772f9c7cf14c0b3bULL, 0xd2e2fce306bedad5ULL, 0x715f42b546f06a97ULL,
+ 0x434ecdceda5b5f1aULL, 0x0da17115a49741a9ULL, 0x680bd77c73edad2eULL, 0x487c02354edd9041ULL,
+ 0xb8efeff3a70ed9c4ULL, 0x56a32aa3e857e302ULL, 0xdf3a68bd48a2a5a0ULL, 0x07f650b73176c444ULL,
+ 0xe38b9b1626e0ccb1ULL, 0x79e053c18b09fb36ULL, 0x56d90319c9f94964ULL, 0x1ca941e7ac9ff5c4ULL,
+ 0x49c4df29162fa0bbULL, 0x8488cf3282b33305ULL, 0x95dfda14cabb437dULL, 0x3391f78264d5ad86ULL,
+ 0x729ae06ae2b5095dULL, 0xd58a58d73259a946ULL, 0xe9834262d13921edULL, 0x27fedafaa54bb592ULL,
+ 0xa99dc5b829ad48bbULL, 0x5f025742499ee260ULL, 0x802c8ecd5d7513fdULL, 0x78ceb3ef3f6dd938ULL,
+ 0xc342f44f8a135d94ULL, 0x7b9edb44828cdda3ULL, 0x9436d11a0537cfe7ULL, 0x5064b164ec1ab4c8ULL,
+ 0x7020eccfd37eb2fcULL, 0x1f31ea3ed90d25fcULL, 0x1b930d7bdfa1bb34ULL, 0x5344467a48113044ULL,
+ 0x70073170f25e6dfbULL, 0xe385dc1a50114cc8ULL, 0x2348698ac8fc4f00ULL, 0x2a77a55284dd40d8ULL,
+ 0xfe06afe0c98c6ce4ULL, 0xc235df96dddfd6e4ULL, 0x1428d01e33bf1ed3ULL, 0x785768ec9300bdafULL,
+ 0x9702e57a91deb63bULL, 0x61bdb8bfe5ce8b80ULL, 0x645b426f3d1d58acULL, 0x4804a82227a557bcULL,
+ 0x8e57048ab44d2601ULL, 0x68d6501a4b3a6935ULL, 0xc39c9ec3f9e1c293ULL, 0x4172f257d4de63e2ULL,
+ 0xd368b450330c6401ULL, 0x040d3017418f2391ULL, 0x2c34bb6090b7d90dULL, 0x16f649228fdfd51fULL,
+ 0xbea6818e2b928ef5ULL, 0xe28ccf91cdc11e72ULL, 0x594aaa68e77a36cdULL, 0x313034806c7ffd0fULL,
+ 0x8a9d27ac2249bd65ULL, 0x19a3b464018e9512ULL, 0xc26ccff352b37ec7ULL, 0x056f68341d797b21ULL,
+ 0x5e79d6757efd2327ULL, 0xfabdbcb6553afe15ULL, 0xd3e7222c6eaf5a60ULL, 0x7046c76d4dae743bULL,
+ 0x660be872b18d4a55ULL, 0x19992518574e1496ULL, 0xc103053a302bdcbbULL, 0x3ed8e9800b218e8eULL,
+ 0x7b0b9239fa75e03eULL, 0xefe9fb684633c083ULL, 0x98a35fbe391a7793ULL, 0x6065510fe2d0fe34ULL,
+ 0x55cb668548abad0cULL, 0xb4584548da87e527ULL, 0x2c43ecea0107c1ddULL, 0x526028809372de35ULL,
+ 0x3415c56af9213b1fULL, 0x5bee1a4d017e98dbULL, 0x13f6b105b5cf709bULL, 0x5ff20e3482b29ab6ULL,
+ 0x0aa29c75cc2e6c90ULL, 0xfc7d73ca3a70e206ULL, 0x899fc38fc4b5c515ULL, 0x250386b124ffc207ULL,
+ 0x54ea28d5ae3d2b56ULL, 0x9913149dd6de60ceULL, 0x16694fc58f06d6c1ULL, 0x46b23975eb018fc7ULL,
+ 0x470a6a0fb4b7b4e2ULL, 0x5d92475a8f7253deULL, 0xabeee5b52fbd3adbULL, 0x7fa20801a0806968ULL,
+ 0x76f3faf19f7714d2ULL, 0xb3e840c12f4660c3ULL, 0x0fb4cd8df212744eULL, 0x4b065a251d3a2dd2ULL,
+ 0x5cebde383d77cd4aULL, 0x6adf39df882c9cb1ULL, 0xa2dd242eb09af759ULL, 0x3147c0e50e5f6422ULL,
+ 0x164ca5101d1350dbULL, 0xf8d13479c33fc962ULL, 0xe640ce4d13e5da08ULL, 0x4bdee0c45061f8baULL,
+ 0xd7c46dc1a4edb1c9ULL, 0x5514d7b6437fd98aULL, 0x58942f6bb2a1c00bULL, 0x2dffb2ab1d70710eULL,
+ 0xccdfcf2fc18b6d68ULL, 0xa8ebcba8b7806167ULL, 0x980697f95e2937e3ULL, 0x02fbba1cd0126e8cULL
+};
-static void curve25519_bmi2_base(u8 session_key[CURVE25519_KEY_SIZE],
- const u8 private_key[CURVE25519_KEY_SIZE])
+static void curve25519_ever64_base(u8 *out, const u8 *priv)
{
- struct {
- u64 buffer[4 * NUM_WORDS_ELTFP25519];
- u64 coordinates[4 * NUM_WORDS_ELTFP25519];
- u64 workspace[4 * NUM_WORDS_ELTFP25519];
- u8 private[CURVE25519_KEY_SIZE];
- } __aligned(32) m;
-
- const int ite[4] = { 64, 64, 64, 63 };
- const int q = 3;
u64 swap = 1;
-
- int i = 0, j = 0, k = 0;
- u64 *const key = (u64 *)m.private;
- u64 *const Ur1 = m.coordinates + 0;
- u64 *const Zr1 = m.coordinates + 4;
- u64 *const Ur2 = m.coordinates + 8;
- u64 *const Zr2 = m.coordinates + 12;
-
- u64 *const UZr1 = m.coordinates + 0;
- u64 *const ZUr2 = m.coordinates + 8;
-
- u64 *const A = m.workspace + 0;
- u64 *const B = m.workspace + 4;
- u64 *const C = m.workspace + 8;
- u64 *const D = m.workspace + 12;
-
- u64 *const AB = m.workspace + 0;
- u64 *const CD = m.workspace + 8;
-
- const u64 *const P = table_ladder_8k;
-
- memcpy(m.private, private_key, sizeof(m.private));
-
- curve25519_clamp_secret(m.private);
-
- setzero_eltfp25519_1w(Ur1);
- setzero_eltfp25519_1w(Zr1);
- setzero_eltfp25519_1w(Zr2);
- Ur1[0] = 1;
- Zr1[0] = 1;
- Zr2[0] = 1;
-
- /* G-S */
- Ur2[3] = 0x1eaecdeee27cab34UL;
- Ur2[2] = 0xadc7a0b9235d48e2UL;
- Ur2[1] = 0xbbf095ae14b2edf8UL;
- Ur2[0] = 0x7e94e1fec82faabdUL;
-
- /* main-loop */
- j = q;
- for (i = 0; i < NUM_WORDS_ELTFP25519; ++i) {
- while (j < ite[i]) {
- u64 bit = (key[i] >> j) & 0x1;
- k = (64 * i + j - q);
+ int i, j, k;
+ u64 tmp[16 + 32 + 4];
+ u64 *x1 = &tmp[0];
+ u64 *z1 = &tmp[4];
+ u64 *x2 = &tmp[8];
+ u64 *z2 = &tmp[12];
+ u64 *xz1 = &tmp[0];
+ u64 *xz2 = &tmp[8];
+ u64 *a = &tmp[0 + 16];
+ u64 *b = &tmp[4 + 16];
+ u64 *c = &tmp[8 + 16];
+ u64 *ab = &tmp[0 + 16];
+ u64 *abcd = &tmp[0 + 16];
+ u64 *ef = &tmp[16 + 16];
+ u64 *efgh = &tmp[16 + 16];
+ u64 *key = &tmp[0 + 16 + 32];
+
+ memcpy(key, priv, 32);
+ ((u8 *)key)[0] &= 248;
+ ((u8 *)key)[31] = (((u8 *)key)[31] & 127) | 64;
+
+ x1[0] = 1, x1[1] = x1[2] = x1[3] = 0;
+ z1[0] = 1, z1[1] = z1[2] = z1[3] = 0;
+ z2[0] = 1, z2[1] = z2[2] = z2[3] = 0;
+ memcpy(x2, p_minus_s, sizeof(p_minus_s));
+
+ j = 3;
+ for (i = 0; i < 4; ++i) {
+ while (j < (const int[]){ 64, 64, 64, 63 }[i]) {
+ u64 bit = (key[i] >> j) & 1;
+ k = (64 * i + j - 3);
swap = swap ^ bit;
- cswap(swap, Ur1, Ur2);
- cswap(swap, Zr1, Zr2);
+ cswap2(swap, xz1, xz2);
swap = bit;
- /* Addition */
- sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */
- add_eltfp25519_1w_bmi2(A, Ur1, Zr1); /* A = Ur1+Zr1 */
- mul_eltfp25519_1w_bmi2(C, &P[4 * k], B);/* C = M0-B */
- sub_eltfp25519_1w(B, A, C); /* B = (Ur1+Zr1) - M*(Ur1-Zr1) */
- add_eltfp25519_1w_bmi2(A, A, C); /* A = (Ur1+Zr1) + M*(Ur1-Zr1) */
- sqr_eltfp25519_2w_bmi2(AB); /* A = A^2 | B = B^2 */
- mul_eltfp25519_2w_bmi2(UZr1, ZUr2, AB); /* Ur1 = Zr2*A | Zr1 = Ur2*B */
+ fsub(b, x1, z1);
+ fadd(a, x1, z1);
+ fmul(c, &table_ladder[4 * k], b, ef);
+ fsub(b, a, c);
+ fadd(a, a, c);
+ fsqr2(ab, ab, efgh);
+ fmul2(xz1, xz2, ab, efgh);
++j;
}
j = 0;
}
- /* Doubling */
- for (i = 0; i < q; ++i) {
- add_eltfp25519_1w_bmi2(A, Ur1, Zr1); /* A = Ur1+Zr1 */
- sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */
- sqr_eltfp25519_2w_bmi2(AB); /* A = A**2 B = B**2 */
- copy_eltfp25519_1w(C, B); /* C = B */
- sub_eltfp25519_1w(B, A, B); /* B = A-B */
- mul_a24_eltfp25519_1w(D, B); /* D = my_a24*B */
- add_eltfp25519_1w_bmi2(D, D, C); /* D = D+C */
- mul_eltfp25519_2w_bmi2(UZr1, AB, CD); /* Ur1 = A*B Zr1 = Zr1*A */
- }
+ point_double(xz1, abcd, efgh);
+ point_double(xz1, abcd, efgh);
+ point_double(xz1, abcd, efgh);
+ encode_point(out, xz1);
- /* Convert to affine coordinates */
- inv_eltfp25519_1w_bmi2(A, Zr1);
- mul_eltfp25519_1w_bmi2((u64 *)session_key, Ur1, A);
- fred_eltfp25519_1w((u64 *)session_key);
-
- memzero_explicit(&m, sizeof(m));
+ memzero_explicit(tmp, sizeof(tmp));
}
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_bmi2_adx);
+
void curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
- if (static_branch_likely(&curve25519_use_adx))
- curve25519_adx(mypublic, secret, basepoint);
- else if (static_branch_likely(&curve25519_use_bmi2))
- curve25519_bmi2(mypublic, secret, basepoint);
+ if (static_branch_likely(&curve25519_use_bmi2_adx))
+ curve25519_ever64(mypublic, secret, basepoint);
else
curve25519_generic(mypublic, secret, basepoint);
}
@@ -2355,10 +1395,8 @@ EXPORT_SYMBOL(curve25519_arch);
void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
{
- if (static_branch_likely(&curve25519_use_adx))
- curve25519_adx_base(pub, secret);
- else if (static_branch_likely(&curve25519_use_bmi2))
- curve25519_bmi2_base(pub, secret);
+ if (static_branch_likely(&curve25519_use_bmi2_adx))
+ curve25519_ever64_base(pub, secret);
else
curve25519_generic(pub, secret, curve25519_base_point);
}
@@ -2449,12 +1487,11 @@ static struct kpp_alg curve25519_alg = {
.max_size = curve25519_max_size,
};
+
static int __init curve25519_mod_init(void)
{
- if (boot_cpu_has(X86_FEATURE_BMI2))
- static_branch_enable(&curve25519_use_bmi2);
- else if (boot_cpu_has(X86_FEATURE_ADX))
- static_branch_enable(&curve25519_use_adx);
+ if (boot_cpu_has(X86_FEATURE_BMI2) && boot_cpu_has(X86_FEATURE_ADX))
+ static_branch_enable(&curve25519_use_bmi2_adx);
else
return 0;
return IS_REACHABLE(CONFIG_CRYPTO_KPP) ?
@@ -2474,3 +1511,4 @@ module_exit(curve25519_mod_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-x86");
MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/arch/x86/entry/vdso/.gitignore b/arch/x86/entry/vdso/.gitignore
index aae8ffdd5880..37a6129d597b 100644
--- a/arch/x86/entry/vdso/.gitignore
+++ b/arch/x86/entry/vdso/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
vdsox32.lds
vdso32-syscall-syms.lds
diff --git a/arch/x86/entry/vdso/vdso32/.gitignore b/arch/x86/entry/vdso/vdso32/.gitignore
index e45fba9d0ced..5167384843b9 100644
--- a/arch/x86/entry/vdso/vdso32/.gitignore
+++ b/arch/x86/entry/vdso/vdso32/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso32.lds
diff --git a/arch/x86/entry/vdso/vdso32/vclock_gettime.c b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
index 1e82bd43286c..84a4a73f77f7 100644
--- a/arch/x86/entry/vdso/vdso32/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define BUILD_VDSO32
-#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
-#undef CONFIG_OPTIMIZE_INLINING
-#endif
-
#ifdef CONFIG_X86_64
/*
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index ea34464d6221..b19ec8282d50 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -10,5 +10,3 @@ generated-y += xen-hypercalls.h
generic-y += early_ioremap.h
generic-y += export.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 062cdecb2f24..53f246e9df5a 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -54,7 +54,7 @@ arch_set_bit(long nr, volatile unsigned long *addr)
if (__builtin_constant_p(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
: CONST_MASK_ADDR(nr, addr)
- : "iq" ((u8)CONST_MASK(nr))
+ : "iq" (CONST_MASK(nr) & 0xff)
: "memory");
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
@@ -74,7 +74,7 @@ arch_clear_bit(long nr, volatile unsigned long *addr)
if (__builtin_constant_p(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
: CONST_MASK_ADDR(nr, addr)
- : "iq" ((u8)~CONST_MASK(nr)));
+ : "iq" (CONST_MASK(nr) ^ 0xff));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 92abc1e42bfc..29336574d0bc 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -376,6 +376,7 @@ struct hv_tsc_emulation_status {
#define HVCALL_SEND_IPI_EX 0x0015
#define HVCALL_POST_MESSAGE 0x005c
#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_RETARGET_INTERRUPT 0x007e
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
@@ -405,6 +406,8 @@ enum HV_GENERIC_SET_FORMAT {
HV_GENERIC_SET_ALL,
};
+#define HV_PARTITION_ID_SELF ((u64)-1)
+
#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
#define HV_HYPERCALL_FAST_BIT BIT(16)
#define HV_HYPERCALL_VARHEAD_OFFSET 17
@@ -909,4 +912,42 @@ struct hv_tlb_flush_ex {
struct hv_partition_assist_pg {
u32 tlb_lock_count;
};
+
+union hv_msi_entry {
+ u64 as_uint64;
+ struct {
+ u32 address;
+ u32 data;
+ } __packed;
+};
+
+struct hv_interrupt_entry {
+ u32 source; /* 1 for MSI(-X) */
+ u32 reserved1;
+ union hv_msi_entry msi_entry;
+} __packed;
+
+/*
+ * flags for hv_device_interrupt_target.flags
+ */
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+
+struct hv_device_interrupt_target {
+ u32 vector;
+ u32 flags;
+ union {
+ u64 vp_mask;
+ struct hv_vpset vp_set;
+ };
+} __packed;
+
+/* HvRetargetDeviceInterrupt hypercall */
+struct hv_retarget_device_interrupt {
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ struct hv_interrupt_entry int_entry;
+ u64 reserved2;
+ struct hv_device_interrupt_target int_target;
+} __packed __aligned(8);
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 98959e8cd448..42a2d0d3984a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -49,13 +49,16 @@
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
+#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
+ KVM_DIRTY_LOG_INITIALLY_SET)
+
/* x86-specific vcpu->requests bit members */
#define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
#define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
-#define KVM_REQ_LOAD_CR3 KVM_ARCH_REQ(5)
+#define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5)
#define KVM_REQ_EVENT KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
@@ -182,7 +185,10 @@ enum exit_fastpath_completion {
EXIT_FASTPATH_SKIP_EMUL_INS,
};
-#include <asm/kvm_emulate.h>
+struct x86_emulate_ctxt;
+struct x86_exception;
+enum x86_intercept;
+enum x86_intercept_stage;
#define KVM_NR_MEM_OBJS 40
@@ -297,7 +303,6 @@ union kvm_mmu_extended_role {
unsigned int cr4_pke:1;
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
- unsigned int cr4_la57:1;
unsigned int maxphyaddr:6;
};
};
@@ -382,8 +387,7 @@ struct kvm_mmu_root_info {
* current mmu mode.
*/
struct kvm_mmu {
- void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
- unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
+ unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
bool prefault);
@@ -678,7 +682,7 @@ struct kvm_vcpu_arch {
/* emulate context */
- struct x86_emulate_ctxt emulate_ctxt;
+ struct x86_emulate_ctxt *emulate_ctxt;
bool emulate_regs_need_sync_to_vcpu;
bool emulate_regs_need_sync_from_vcpu;
int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
@@ -808,10 +812,6 @@ struct kvm_vcpu_arch {
int pending_ioapic_eoi;
int pending_external_vector;
- /* GPA available */
- bool gpa_available;
- gpa_t gpa_val;
-
/* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel;
@@ -890,6 +890,7 @@ enum kvm_irqchip_mode {
#define APICV_INHIBIT_REASON_NESTED 2
#define APICV_INHIBIT_REASON_IRQWIN 3
#define APICV_INHIBIT_REASON_PIT_REINJ 4
+#define APICV_INHIBIT_REASON_X2APIC 5
struct kvm_arch {
unsigned long n_used_mmu_pages;
@@ -920,6 +921,7 @@ struct kvm_arch {
atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock;
struct kvm_apic_map *apic_map;
+ bool apic_map_dirty;
bool apic_access_page_done;
unsigned long apicv_inhibit_reasons;
@@ -1052,19 +1054,14 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
}
struct kvm_x86_ops {
- int (*cpu_has_kvm_support)(void); /* __init */
- int (*disabled_by_bios)(void); /* __init */
int (*hardware_enable)(void);
void (*hardware_disable)(void);
- int (*check_processor_compatibility)(void);/* __init */
- int (*hardware_setup)(void); /* __init */
- void (*hardware_unsetup)(void); /* __exit */
+ void (*hardware_unsetup)(void);
bool (*cpu_has_accelerated_tpr)(void);
bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
- struct kvm *(*vm_alloc)(void);
- void (*vm_free)(struct kvm *);
+ unsigned int vm_size;
int (*vm_init)(struct kvm *kvm);
void (*vm_destroy)(struct kvm *kvm);
@@ -1090,7 +1087,6 @@ struct kvm_x86_ops {
void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
- void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
@@ -1153,13 +1149,8 @@ struct kvm_x86_ops {
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
int (*get_tdp_level)(struct kvm_vcpu *vcpu);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
- int (*get_lpage_level)(void);
- bool (*rdtscp_supported)(void);
- bool (*invpcid_supported)(void);
-
- void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
- void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
+ void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long cr3);
bool (*has_wbinvd_exit)(void);
@@ -1171,16 +1162,12 @@ struct kvm_x86_ops {
int (*check_intercept)(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
- enum x86_intercept_stage stage);
+ enum x86_intercept_stage stage,
+ struct x86_exception *exception);
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion *exit_fastpath);
- bool (*mpx_supported)(void);
- bool (*xsaves_supported)(void);
- bool (*umip_emulated)(void);
- bool (*pt_supported)(void);
- bool (*pku_supported)(void);
- int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
+ int (*check_nested_events)(struct kvm_vcpu *vcpu);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
@@ -1269,6 +1256,15 @@ struct kvm_x86_ops {
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
};
+struct kvm_x86_init_ops {
+ int (*cpu_has_kvm_support)(void);
+ int (*disabled_by_bios)(void);
+ int (*check_processor_compatibility)(void);
+ int (*hardware_setup)(void);
+
+ struct kvm_x86_ops *runtime_ops;
+};
+
struct kvm_arch_async_pf {
u32 token;
gfn_t gfn;
@@ -1276,25 +1272,24 @@ struct kvm_arch_async_pf {
bool direct_map;
};
-extern struct kvm_x86_ops *kvm_x86_ops;
+extern u64 __read_mostly host_efer;
+
+extern struct kvm_x86_ops kvm_x86_ops;
extern struct kmem_cache *x86_fpu_cache;
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
- return kvm_x86_ops->vm_alloc();
-}
-
-static inline void kvm_arch_free_vm(struct kvm *kvm)
-{
- return kvm_x86_ops->vm_free(kvm);
+ return __vmalloc(kvm_x86_ops.vm_size,
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
+void kvm_arch_free_vm(struct kvm *kvm);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
- if (kvm_x86_ops->tlb_remote_flush &&
- !kvm_x86_ops->tlb_remote_flush(kvm))
+ if (kvm_x86_ops.tlb_remote_flush &&
+ !kvm_x86_ops.tlb_remote_flush(kvm))
return 0;
else
return -ENOTSUPP;
@@ -1313,7 +1308,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
- struct kvm_memory_slot *memslot);
+ struct kvm_memory_slot *memslot,
+ int start_level);
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot);
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
@@ -1379,10 +1375,11 @@ extern u64 kvm_mce_cap_supported;
*
* EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
* decode the instruction length. For use *only* by
- * kvm_x86_ops->skip_emulated_instruction() implementations.
+ * kvm_x86_ops.skip_emulated_instruction() implementations.
*
- * EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to
- * retry native execution under certain conditions.
+ * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
+ * retry native execution under certain conditions,
+ * Can only be set in conjunction with EMULTYPE_PF.
*
* EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
* triggered by KVM's magic "force emulation" prefix,
@@ -1395,13 +1392,18 @@ extern u64 kvm_mce_cap_supported;
* backdoor emulation, which is opt in via module param.
* VMware backoor emulation handles select instructions
* and reinjects the #GP for all other cases.
+ *
+ * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
+ * case the CR2/GPA value pass on the stack is valid.
*/
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
-#define EMULTYPE_ALLOW_RETRY (1 << 3)
+#define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
#define EMULTYPE_TRAP_UD_FORCED (1 << 4)
#define EMULTYPE_VMWARE_GP (1 << 5)
+#define EMULTYPE_PF (1 << 6)
+
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
void *insn, int insn_len);
@@ -1414,8 +1416,6 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
-struct x86_emulate_ctxt;
-
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
@@ -1512,8 +1512,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
-void kvm_enable_tdp(void);
-void kvm_disable_tdp(void);
+void kvm_configure_mmu(bool enable_tdp, int tdp_page_level);
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception)
@@ -1670,14 +1669,14 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
- if (kvm_x86_ops->vcpu_blocking)
- kvm_x86_ops->vcpu_blocking(vcpu);
+ if (kvm_x86_ops.vcpu_blocking)
+ kvm_x86_ops.vcpu_blocking(vcpu);
}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
- if (kvm_x86_ops->vcpu_unblocking)
- kvm_x86_ops->vcpu_unblocking(vcpu);
+ if (kvm_x86_ops.vcpu_unblocking)
+ kvm_x86_ops.vcpu_unblocking(vcpu);
}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index 172f9749dbb2..87bd6025d91d 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -49,8 +49,7 @@ struct kvm_page_track_notifier_node {
void kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm);
-void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
+void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
unsigned long npages);
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index b538d9ddee9c..4e55370e48e8 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -213,21 +213,6 @@ static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
* So do not enforce things if the VMA is not from the current
* mm, or if we are in a kernel thread.
*/
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
-{
- if (!current->mm)
- return true;
- /*
- * Should PKRU be enforced on the access to this VMA? If
- * the VMA is from another process, then PKRU has no
- * relevance and should not be enforced.
- */
- if (current->mm != vma->vm_mm)
- return true;
-
- return false;
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index edc2c581704a..1c42ecbe75cb 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/nmi.h>
+#include <linux/msi.h>
#include <asm/io.h>
#include <asm/hyperv-tlfs.h>
#include <asm/nospec-branch.h>
@@ -242,6 +243,13 @@ bool hv_vcpu_is_preempted(int vcpu);
static inline void hv_apic_init(void) {}
#endif
+static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
+ struct msi_desc *msi_desc)
+{
+ msi_entry->address = msi_desc->msg.address_lo;
+ msi_entry->data = msi_desc->msg.data;
+}
+
#else /* CONFIG_HYPERV */
static inline void hyperv_init(void) {}
static inline void hyperv_setup_mmu_ops(void) {}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index afda66a6d325..28838d790191 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -25,6 +25,7 @@
#include <asm/x86_init.h>
#include <asm/fpu/xstate.h>
#include <asm/fpu/api.h>
+#include <asm-generic/pgtable_uffd.h>
extern pgd_t early_top_pgt[PTRS_PER_PGD];
int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
@@ -313,6 +314,23 @@ static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
return native_make_pte(v & ~clear);
}
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static inline int pte_uffd_wp(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_UFFD_WP;
+}
+
+static inline pte_t pte_mkuffd_wp(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_UFFD_WP);
+}
+
+static inline pte_t pte_clear_uffd_wp(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_UFFD_WP);
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
static inline pte_t pte_mkclean(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_DIRTY);
@@ -392,6 +410,23 @@ static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
return native_make_pmd(v & ~clear);
}
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static inline int pmd_uffd_wp(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_UFFD_WP;
+}
+
+static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_UFFD_WP);
+}
+
+static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
static inline pmd_t pmd_mkold(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_ACCESSED);
@@ -1374,6 +1409,38 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
#endif
#endif
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
+}
+
+static inline int pte_swp_uffd_wp(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
+}
+
+static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
+}
+
+static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
+}
+
+static inline int pmd_swp_uffd_wp(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
+}
+
+static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
#define PKRU_AD_BIT 0x1
#define PKRU_WD_BIT 0x2
#define PKRU_BITS_PER_PKEY 2
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 0b6c4042942a..df1373415f11 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -189,7 +189,7 @@ extern void sync_global_pgds(unsigned long start, unsigned long end);
*
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
- * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
+ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|F|SD|0| <- swp entry
*
* G (8) is aliased and used as a PROT_NONE indicator for
* !present ptes. We need to start storing swap entries above
@@ -197,9 +197,15 @@ extern void sync_global_pgds(unsigned long start, unsigned long end);
* erratum where they can be incorrectly set by hardware on
* non-present PTEs.
*
+ * SD Bits 1-4 are not used in non-present format and available for
+ * special use described below:
+ *
* SD (1) in swp entry is used to store soft dirty bit, which helps us
* remember soft dirty over page migration
*
+ * F (2) in swp entry is used to record when a pagetable is
+ * writeprotected by userfaultfd WP support.
+ *
* Bit 7 in swp entry should be 0 because pmd_present checks not only P,
* but also L and G.
*
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 65c2ecd730c5..b6606fe6cfdf 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -32,6 +32,7 @@
#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
+#define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
@@ -100,6 +101,14 @@
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
#endif
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+#define _PAGE_UFFD_WP (_AT(pteval_t, 1) << _PAGE_BIT_UFFD_WP)
+#define _PAGE_SWP_UFFD_WP _PAGE_USER
+#else
+#define _PAGE_UFFD_WP (_AT(pteval_t, 0))
+#define _PAGE_SWP_UFFD_WP (_AT(pteval_t, 0))
+#endif
+
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
@@ -118,7 +127,8 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
+ _PAGE_UFFD_WP)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index bd924d6b4e68..d8f283b9a569 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -126,11 +126,17 @@ extern int __get_user_bad(void);
})
/*
- * This is a type: either unsigned long, if the argument fits into
- * that type, or otherwise unsigned long long.
+ * This is the smallest unsigned integer type that can fit a value
+ * (up to 'long long')
*/
-#define __inttype(x) \
-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+#define __inttype(x) __typeof__( \
+ __typefits(x,char, \
+ __typefits(x,short, \
+ __typefits(x,int, \
+ __typefits(x,long,0ULL)))))
+
+#define __typefits(x,type,not) \
+ __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
/**
* get_user - Get a simple variable from user space.
@@ -198,7 +204,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#define __put_user_goto_u64(x, ptr, label) \
- __put_user_goto(x, ptr, "q", "", "er", label)
+ __put_user_goto(x, ptr, "q", "er", label)
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
@@ -262,13 +268,13 @@ do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __put_user_goto(x, ptr, "b", "b", "iq", label); \
+ __put_user_goto(x, ptr, "b", "iq", label); \
break; \
case 2: \
- __put_user_goto(x, ptr, "w", "w", "ir", label); \
+ __put_user_goto(x, ptr, "w", "ir", label); \
break; \
case 4: \
- __put_user_goto(x, ptr, "l", "k", "ir", label); \
+ __put_user_goto(x, ptr, "l", "ir", label); \
break; \
case 8: \
__put_user_goto_u64(x, ptr, label); \
@@ -283,25 +289,27 @@ do { \
({ \
__typeof__(ptr) __ptr = (ptr); \
asm volatile("\n" \
- "1: movl %2,%%eax\n" \
- "2: movl %3,%%edx\n" \
+ "1: movl %[lowbits],%%eax\n" \
+ "2: movl %[highbits],%%edx\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
- "4: mov %4,%0\n" \
+ "4: mov %[efault],%[errout]\n" \
" xorl %%eax,%%eax\n" \
" xorl %%edx,%%edx\n" \
" jmp 3b\n" \
".previous\n" \
_ASM_EXTABLE_UA(1b, 4b) \
_ASM_EXTABLE_UA(2b, 4b) \
- : "=r" (retval), "=&A"(x) \
- : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
- "i" (-EFAULT), "0" (retval)); \
+ : [errout] "=r" (retval), \
+ [output] "=&A"(x) \
+ : [lowbits] "m" (__m(__ptr)), \
+ [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
+ [efault] "i" (-EFAULT), "0" (retval)); \
})
#else
#define __get_user_asm_u64(x, ptr, retval) \
- __get_user_asm(x, ptr, retval, "q", "", "=r")
+ __get_user_asm(x, ptr, retval, "q", "=r")
#endif
#define __get_user_size(x, ptr, size, retval) \
@@ -310,13 +318,13 @@ do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __get_user_asm(x, ptr, retval, "b", "b", "=q"); \
+ __get_user_asm(x, ptr, retval, "b", "=q"); \
break; \
case 2: \
- __get_user_asm(x, ptr, retval, "w", "w", "=r"); \
+ __get_user_asm(x, ptr, retval, "w", "=r"); \
break; \
case 4: \
- __get_user_asm(x, ptr, retval, "l", "k", "=r"); \
+ __get_user_asm(x, ptr, retval, "l", "=r"); \
break; \
case 8: \
__get_user_asm_u64(x, ptr, retval); \
@@ -326,18 +334,20 @@ do { \
} \
} while (0)
-#define __get_user_asm(x, addr, err, itype, rtype, ltype) \
+#define __get_user_asm(x, addr, err, itype, ltype) \
asm volatile("\n" \
- "1: mov"itype" %2,%"rtype"1\n" \
+ "1: mov"itype" %[umem],%[output]\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " xor"itype" %"rtype"1,%"rtype"1\n" \
+ "3: mov %[efault],%[errout]\n" \
+ " xor"itype" %[output],%[output]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_UA(1b, 3b) \
- : "=r" (err), ltype(x) \
- : "m" (__m(addr)), "i" (-EFAULT), "0" (err))
+ : [errout] "=r" (err), \
+ [output] ltype(x) \
+ : [umem] "m" (__m(addr)), \
+ [efault] "i" (-EFAULT), "0" (err))
#define __put_user_nocheck(x, ptr, size) \
({ \
@@ -376,10 +386,10 @@ struct __large_struct { unsigned long buf[100]; };
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
-#define __put_user_goto(x, addr, itype, rtype, ltype, label) \
+#define __put_user_goto(x, addr, itype, ltype, label) \
asm_volatile_goto("\n" \
- "1: mov"itype" %"rtype"0,%1\n" \
- _ASM_EXTABLE_UA(1b, %l2) \
+ "1: mov"itype" %0,%1\n" \
+ _ASM_EXTABLE_UA(1b, %l2) \
: : ltype(x), "m" (__m(addr)) \
: : label)
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 8521af3fef27..5e090d1f03f8 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -500,6 +500,18 @@ enum vmcs_field {
VMX_EPT_EXECUTABLE_MASK)
#define VMX_EPT_MT_MASK (7ull << VMX_EPT_MT_EPTE_SHIFT)
+static inline u8 vmx_eptp_page_walk_level(u64 eptp)
+{
+ u64 encoded_level = eptp & VMX_EPTP_PWL_MASK;
+
+ if (encoded_level == VMX_EPTP_PWL_5)
+ return 5;
+
+ /* @eptp must be pre-validated by the caller. */
+ WARN_ON_ONCE(encoded_level != VMX_EPTP_PWL_4);
+ return 4;
+}
+
/* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */
#define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \
VMX_EPT_EXECUTABLE_MASK)
diff --git a/arch/x86/kernel/.gitignore b/arch/x86/kernel/.gitignore
index 08f4fd731469..ef66569e7e22 100644
--- a/arch/x86/kernel/.gitignore
+++ b/arch/x86/kernel/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
vsyscall.lds
vsyscall_32.lds
vmlinux.lds
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index bb5abfef0256..ba89cabe5fcf 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -155,6 +155,4 @@ ifeq ($(CONFIG_X86_64),y)
obj-y += vsmp_64.o
endif
-ifdef CONFIG_EFI
-obj-$(CONFIG_IMA) += ima_arch.o
-endif
+obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index caf2edccbad2..49ae4e1ac9cd 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -161,7 +161,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
/* Make sure we are running on right CPU */
- retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
+ retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
+ false);
if (retval == 0) {
/* Use the hint in CST */
percpu_entry->states[cx->index].eax = cx->address;
diff --git a/arch/x86/kernel/cpu/.gitignore b/arch/x86/kernel/cpu/.gitignore
index 667df55a4399..0bca7ef7426a 100644
--- a/arch/x86/kernel/cpu/.gitignore
+++ b/arch/x86/kernel/cpu/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
capflags.c
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index b1c469446b07..901cd1fdecd9 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -24,6 +24,13 @@
#include "trace.h"
#include "pmu.h"
+/*
+ * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
+ * aligned to sizeof(unsigned long) because it's not accessed via bitops.
+ */
+u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
+EXPORT_SYMBOL_GPL(kvm_cpu_caps);
+
static u32 xstate_required_size(u64 xstate_bv, bool compacted)
{
int feature_bit = 0;
@@ -45,23 +52,6 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
return ret;
}
-bool kvm_mpx_supported(void)
-{
- return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
- && kvm_x86_ops->mpx_supported());
-}
-EXPORT_SYMBOL_GPL(kvm_mpx_supported);
-
-u64 kvm_supported_xcr0(void)
-{
- u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
-
- if (!kvm_mpx_supported())
- xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
-
- return xcr0;
-}
-
#define F feature_bit
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
@@ -74,32 +64,24 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
return 0;
/* Update OSXSAVE bit */
- if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
- best->ecx &= ~F(OSXSAVE);
- if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
- best->ecx |= F(OSXSAVE);
- }
+ if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1)
+ cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
+ kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
- best->edx &= ~F(APIC);
- if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
- best->edx |= F(APIC);
+ cpuid_entry_change(best, X86_FEATURE_APIC,
+ vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
if (apic) {
- if (best->ecx & F(TSC_DEADLINE_TIMER))
+ if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
apic->lapic_timer.timer_mode_mask = 3 << 17;
else
apic->lapic_timer.timer_mode_mask = 1 << 17;
}
best = kvm_find_cpuid_entry(vcpu, 7, 0);
- if (best) {
- /* Update OSPKE bit */
- if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
- best->ecx &= ~F(OSPKE);
- if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
- best->ecx |= F(OSPKE);
- }
- }
+ if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
+ cpuid_entry_change(best, X86_FEATURE_OSPKE,
+ kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
if (!best) {
@@ -107,14 +89,14 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
} else {
vcpu->arch.guest_supported_xcr0 =
- (best->eax | ((u64)best->edx << 32)) &
- kvm_supported_xcr0();
+ (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
vcpu->arch.guest_xstate_size = best->ebx =
xstate_required_size(vcpu->arch.xcr0, false);
}
best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
- if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+ if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
+ cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
/*
@@ -136,12 +118,10 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
- if (best) {
- if (vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT)
- best->ecx |= F(MWAIT);
- else
- best->ecx &= ~F(MWAIT);
- }
+ if (best)
+ cpuid_entry_change(best, X86_FEATURE_MWAIT,
+ vcpu->arch.ia32_misc_enable_msr &
+ MSR_IA32_MISC_ENABLE_MWAIT);
}
/* Update physical-address width */
@@ -154,10 +134,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
static int is_efer_nx(void)
{
- unsigned long long efer = 0;
-
- rdmsrl_safe(MSR_EFER, &efer);
- return efer & EFER_NX;
+ return host_efer & EFER_NX;
}
static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
@@ -173,8 +150,8 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
break;
}
}
- if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
- entry->edx &= ~F(NX);
+ if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
+ cpuid_entry_clear(entry, X86_FEATURE_NX);
printk(KERN_INFO "kvm: guest NX capability removed\n");
}
}
@@ -232,7 +209,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
vcpu->arch.cpuid_nent = cpuid->nent;
cpuid_fix_nx_cap(vcpu);
kvm_apic_set_version(vcpu);
- kvm_x86_ops->cpuid_update(vcpu);
+ kvm_x86_ops.cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);
out:
@@ -255,7 +232,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
goto out;
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
- kvm_x86_ops->cpuid_update(vcpu);
+ kvm_x86_ops.cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);
out:
return r;
@@ -281,15 +258,189 @@ out:
return r;
}
-static __always_inline void cpuid_mask(u32 *word, int wordnum)
+static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
{
- reverse_cpuid_check(wordnum);
- *word &= boot_cpu_data.x86_capability[wordnum];
+ const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
+ struct kvm_cpuid_entry2 entry;
+
+ reverse_cpuid_check(leaf);
+ kvm_cpu_caps[leaf] &= mask;
+
+ cpuid_count(cpuid.function, cpuid.index,
+ &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
+
+ kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
+}
+
+void kvm_set_cpu_caps(void)
+{
+ unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
+#ifdef CONFIG_X86_64
+ unsigned int f_gbpages = F(GBPAGES);
+ unsigned int f_lm = F(LM);
+#else
+ unsigned int f_gbpages = 0;
+ unsigned int f_lm = 0;
+#endif
+
+ BUILD_BUG_ON(sizeof(kvm_cpu_caps) >
+ sizeof(boot_cpu_data.x86_capability));
+
+ memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
+ sizeof(kvm_cpu_caps));
+
+ kvm_cpu_cap_mask(CPUID_1_ECX,
+ /*
+ * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
+ * advertised to guests via CPUID!
+ */
+ F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
+ 0 /* DS-CPL, VMX, SMX, EST */ |
+ 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
+ F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
+ F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
+ F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
+ 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
+ F(F16C) | F(RDRAND)
+ );
+ /* KVM emulates x2apic in software irrespective of host support. */
+ kvm_cpu_cap_set(X86_FEATURE_X2APIC);
+
+ kvm_cpu_cap_mask(CPUID_1_EDX,
+ F(FPU) | F(VME) | F(DE) | F(PSE) |
+ F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+ F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
+ F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+ F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
+ 0 /* Reserved, DS, ACPI */ | F(MMX) |
+ F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
+ 0 /* HTT, TM, Reserved, PBE */
+ );
+
+ kvm_cpu_cap_mask(CPUID_7_0_EBX,
+ F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
+ F(BMI2) | F(ERMS) | 0 /*INVPCID*/ | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
+ F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
+ F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
+ F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
+ );
+
+ kvm_cpu_cap_mask(CPUID_7_ECX,
+ F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) |
+ F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
+ F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
+ F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/
+ );
+ /* Set LA57 based on hardware capability. */
+ if (cpuid_ecx(7) & F(LA57))
+ kvm_cpu_cap_set(X86_FEATURE_LA57);
+
+ kvm_cpu_cap_mask(CPUID_7_EDX,
+ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
+ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
+ F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM)
+ );
+
+ /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
+ kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
+ kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
+
+ if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
+ kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
+ if (boot_cpu_has(X86_FEATURE_STIBP))
+ kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
+
+ kvm_cpu_cap_mask(CPUID_7_1_EAX,
+ F(AVX512_BF16)
+ );
+
+ kvm_cpu_cap_mask(CPUID_D_1_EAX,
+ F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES)
+ );
+
+ kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
+ F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
+ F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+ F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
+ 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
+ F(TOPOEXT) | F(PERFCTR_CORE)
+ );
+
+ kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
+ F(FPU) | F(VME) | F(DE) | F(PSE) |
+ F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+ F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
+ F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+ F(PAT) | F(PSE36) | 0 /* Reserved */ |
+ f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+ F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
+ 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
+ );
+
+ if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
+ kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
+
+ kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
+ F(CLZERO) | F(XSAVEERPTR) |
+ F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
+ F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON)
+ );
+
+ /*
+ * AMD has separate bits for each SPEC_CTRL bit.
+ * arch/x86/kernel/cpu/bugs.c is kind enough to
+ * record that in cpufeatures so use them.
+ */
+ if (boot_cpu_has(X86_FEATURE_IBPB))
+ kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+ kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
+ if (boot_cpu_has(X86_FEATURE_STIBP))
+ kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
+ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
+ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+ kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
+ /*
+ * The preference is to use SPEC CTRL MSR instead of the
+ * VIRT_SPEC MSR.
+ */
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+ !boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
+
+ /*
+ * Hide all SVM features by default, SVM will set the cap bits for
+ * features it emulates and/or exposes for L1.
+ */
+ kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
+
+ kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
+ F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
+ F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
+ F(PMM) | F(PMM_EN)
+ );
}
+EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
-static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function,
- u32 index)
+struct kvm_cpuid_array {
+ struct kvm_cpuid_entry2 *entries;
+ const int maxnent;
+ int nent;
+};
+
+static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
+ u32 function, u32 index)
{
+ struct kvm_cpuid_entry2 *entry;
+
+ if (array->nent >= array->maxnent)
+ return NULL;
+
+ entry = &array->entries[array->nent++];
+
entry->function = function;
entry->index = index;
entry->flags = 0;
@@ -298,9 +449,6 @@ static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function,
&entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
switch (function) {
- case 2:
- entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
- break;
case 4:
case 7:
case 0xb:
@@ -316,11 +464,18 @@ static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function,
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
break;
}
+
+ return entry;
}
-static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry,
- u32 func, int *nent, int maxnent)
+static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
{
+ struct kvm_cpuid_entry2 *entry;
+
+ if (array->nent >= array->maxnent)
+ return -E2BIG;
+
+ entry = &array->entries[array->nent];
entry->function = func;
entry->index = 0;
entry->flags = 0;
@@ -328,17 +483,17 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry,
switch (func) {
case 0:
entry->eax = 7;
- ++*nent;
+ ++array->nent;
break;
case 1:
entry->ecx = F(MOVBE);
- ++*nent;
+ ++array->nent;
break;
case 7:
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
entry->eax = 0;
entry->ecx = F(RDPID);
- ++*nent;
+ ++array->nent;
default:
break;
}
@@ -346,223 +501,60 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry,
return 0;
}
-static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
+static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
{
- unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
- unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
- unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
- unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
- unsigned f_la57;
- unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0;
-
- /* cpuid 7.0.ebx */
- const u32 kvm_cpuid_7_0_ebx_x86_features =
- F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
- F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
- F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
- F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
- F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt;
-
- /* cpuid 7.0.ecx*/
- const u32 kvm_cpuid_7_0_ecx_x86_features =
- F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) |
- F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
- F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
- F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
-
- /* cpuid 7.0.edx*/
- const u32 kvm_cpuid_7_0_edx_x86_features =
- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
- F(MD_CLEAR);
-
- /* cpuid 7.1.eax */
- const u32 kvm_cpuid_7_1_eax_x86_features =
- F(AVX512_BF16);
-
- switch (index) {
- case 0:
- entry->eax = min(entry->eax, 1u);
- entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
- cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
- /* TSC_ADJUST is emulated */
- entry->ebx |= F(TSC_ADJUST);
-
- entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
- f_la57 = entry->ecx & F(LA57);
- cpuid_mask(&entry->ecx, CPUID_7_ECX);
- /* Set LA57 based on hardware capability. */
- entry->ecx |= f_la57;
- entry->ecx |= f_umip;
- entry->ecx |= f_pku;
- /* PKU is not yet implemented for shadow paging. */
- if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
- entry->ecx &= ~F(PKU);
-
- entry->edx &= kvm_cpuid_7_0_edx_x86_features;
- cpuid_mask(&entry->edx, CPUID_7_EDX);
- if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
- entry->edx |= F(SPEC_CTRL);
- if (boot_cpu_has(X86_FEATURE_STIBP))
- entry->edx |= F(INTEL_STIBP);
- if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
- boot_cpu_has(X86_FEATURE_AMD_SSBD))
- entry->edx |= F(SPEC_CTRL_SSBD);
- /*
- * We emulate ARCH_CAPABILITIES in software even
- * if the host doesn't support it.
- */
- entry->edx |= F(ARCH_CAPABILITIES);
- break;
- case 1:
- entry->eax &= kvm_cpuid_7_1_eax_x86_features;
- entry->ebx = 0;
- entry->ecx = 0;
- entry->edx = 0;
- break;
- default:
- WARN_ON_ONCE(1);
- entry->eax = 0;
- entry->ebx = 0;
- entry->ecx = 0;
- entry->edx = 0;
- break;
- }
-}
-
-static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
- int *nent, int maxnent)
-{
- int r;
- unsigned f_nx = is_efer_nx() ? F(NX) : 0;
-#ifdef CONFIG_X86_64
- unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
- ? F(GBPAGES) : 0;
- unsigned f_lm = F(LM);
-#else
- unsigned f_gbpages = 0;
- unsigned f_lm = 0;
-#endif
- unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
- unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
- unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
-
- /* cpuid 1.edx */
- const u32 kvm_cpuid_1_edx_x86_features =
- F(FPU) | F(VME) | F(DE) | F(PSE) |
- F(TSC) | F(MSR) | F(PAE) | F(MCE) |
- F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
- F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
- F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
- 0 /* Reserved, DS, ACPI */ | F(MMX) |
- F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
- 0 /* HTT, TM, Reserved, PBE */;
- /* cpuid 0x80000001.edx */
- const u32 kvm_cpuid_8000_0001_edx_x86_features =
- F(FPU) | F(VME) | F(DE) | F(PSE) |
- F(TSC) | F(MSR) | F(PAE) | F(MCE) |
- F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
- F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
- F(PAT) | F(PSE36) | 0 /* Reserved */ |
- f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
- F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
- 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
- /* cpuid 1.ecx */
- const u32 kvm_cpuid_1_ecx_x86_features =
- /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
- * but *not* advertised to guests via CPUID ! */
- F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
- 0 /* DS-CPL, VMX, SMX, EST */ |
- 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
- F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
- F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
- F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
- 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
- F(F16C) | F(RDRAND);
- /* cpuid 0x80000001.ecx */
- const u32 kvm_cpuid_8000_0001_ecx_x86_features =
- F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
- F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
- F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
- 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
- F(TOPOEXT) | F(PERFCTR_CORE);
-
- /* cpuid 0x80000008.ebx */
- const u32 kvm_cpuid_8000_0008_ebx_x86_features =
- F(CLZERO) | F(XSAVEERPTR) |
- F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
- F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
-
- /* cpuid 0xC0000001.edx */
- const u32 kvm_cpuid_C000_0001_edx_x86_features =
- F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
- F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
- F(PMM) | F(PMM_EN);
-
- /* cpuid 0xD.1.eax */
- const u32 kvm_cpuid_D_1_eax_x86_features =
- F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
+ struct kvm_cpuid_entry2 *entry;
+ int r, i, max_idx;
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
r = -E2BIG;
- if (WARN_ON(*nent >= maxnent))
+ entry = do_host_cpuid(array, function, 0);
+ if (!entry)
goto out;
- do_host_cpuid(entry, function, 0);
- ++*nent;
-
switch (function) {
case 0:
/* Limited to the highest leaf implemented in KVM. */
entry->eax = min(entry->eax, 0x1fU);
break;
case 1:
- entry->edx &= kvm_cpuid_1_edx_x86_features;
- cpuid_mask(&entry->edx, CPUID_1_EDX);
- entry->ecx &= kvm_cpuid_1_ecx_x86_features;
- cpuid_mask(&entry->ecx, CPUID_1_ECX);
- /* we support x2apic emulation even if host does not support
- * it since we emulate x2apic in software */
- entry->ecx |= F(X2APIC);
+ cpuid_entry_override(entry, CPUID_1_EDX);
+ cpuid_entry_override(entry, CPUID_1_ECX);
break;
- /* function 2 entries are STATEFUL. That is, repeated cpuid commands
- * may return different values. This forces us to get_cpu() before
- * issuing the first command, and also to emulate this annoying behavior
- * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
- case 2: {
- int t, times = entry->eax & 0xff;
-
- entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
- for (t = 1; t < times; ++t) {
- if (*nent >= maxnent)
- goto out;
-
- do_host_cpuid(&entry[t], function, 0);
- ++*nent;
- }
+ case 2:
+ /*
+ * On ancient CPUs, function 2 entries are STATEFUL. That is,
+ * CPUID(function=2, index=0) may return different results each
+ * time, with the least-significant byte in EAX enumerating the
+ * number of times software should do CPUID(2, 0).
+ *
+ * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
+ * idiotic. Intel's SDM states that EAX & 0xff "will always
+ * return 01H. Software should ignore this value and not
+ * interpret it as an informational descriptor", while AMD's
+ * APM states that CPUID(2) is reserved.
+ *
+ * WARN if a frankenstein CPU that supports virtualization and
+ * a stateful CPUID.0x2 is encountered.
+ */
+ WARN_ON_ONCE((entry->eax & 0xff) > 1);
break;
- }
/* functions 4 and 0x8000001d have additional index. */
case 4:
- case 0x8000001d: {
- int i, cache_type;
-
- /* read more entries until cache_type is zero */
- for (i = 1; ; ++i) {
- if (*nent >= maxnent)
+ case 0x8000001d:
+ /*
+ * Read entries until the cache type in the previous entry is
+ * zero, i.e. indicates an invalid entry.
+ */
+ for (i = 1; entry->eax & 0x1f; ++i) {
+ entry = do_host_cpuid(array, function, i);
+ if (!entry)
goto out;
-
- cache_type = entry[i - 1].eax & 0x1f;
- if (!cache_type)
- break;
- do_host_cpuid(&entry[i], function, i);
- ++*nent;
}
break;
- }
case 6: /* Thermal management */
entry->eax = 0x4; /* allow ARAT */
entry->ebx = 0;
@@ -570,22 +562,24 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
entry->edx = 0;
break;
/* function 7 has additional index. */
- case 7: {
- int i;
-
- for (i = 0; ; ) {
- do_cpuid_7_mask(&entry[i], i);
- if (i == entry->eax)
- break;
- if (*nent >= maxnent)
+ case 7:
+ entry->eax = min(entry->eax, 1u);
+ cpuid_entry_override(entry, CPUID_7_0_EBX);
+ cpuid_entry_override(entry, CPUID_7_ECX);
+ cpuid_entry_override(entry, CPUID_7_EDX);
+
+ /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
+ if (entry->eax == 1) {
+ entry = do_host_cpuid(array, function, 1);
+ if (!entry)
goto out;
- ++i;
- do_host_cpuid(&entry[i], function, i);
- ++*nent;
+ cpuid_entry_override(entry, CPUID_7_1_EAX);
+ entry->ebx = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
}
break;
- }
case 9:
break;
case 0xa: { /* Architectural Performance Monitoring */
@@ -622,79 +616,81 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
* thus they can be handled by common code.
*/
case 0x1f:
- case 0xb: {
- int i;
-
+ case 0xb:
/*
- * We filled in entry[0] for CPUID(EAX=<function>,
- * ECX=00H) above. If its level type (ECX[15:8]) is
- * zero, then the leaf is unimplemented, and we're
- * done. Otherwise, continue to populate entries
- * until the level type (ECX[15:8]) of the previously
- * added entry is zero.
+ * Populate entries until the level type (ECX[15:8]) of the
+ * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is
+ * the starting entry, filled by the primary do_host_cpuid().
*/
- for (i = 1; entry[i - 1].ecx & 0xff00; ++i) {
- if (*nent >= maxnent)
+ for (i = 1; entry->ecx & 0xff00; ++i) {
+ entry = do_host_cpuid(array, function, i);
+ if (!entry)
goto out;
-
- do_host_cpuid(&entry[i], function, i);
- ++*nent;
}
break;
- }
- case 0xd: {
- int idx, i;
- u64 supported = kvm_supported_xcr0();
-
- entry->eax &= supported;
- entry->ebx = xstate_required_size(supported, false);
+ case 0xd:
+ entry->eax &= supported_xcr0;
+ entry->ebx = xstate_required_size(supported_xcr0, false);
entry->ecx = entry->ebx;
- entry->edx &= supported >> 32;
- if (!supported)
+ entry->edx &= supported_xcr0 >> 32;
+ if (!supported_xcr0)
break;
- for (idx = 1, i = 1; idx < 64; ++idx) {
- u64 mask = ((u64)1 << idx);
- if (*nent >= maxnent)
+ entry = do_host_cpuid(array, function, 1);
+ if (!entry)
+ goto out;
+
+ cpuid_entry_override(entry, CPUID_D_1_EAX);
+ if (entry->eax & (F(XSAVES)|F(XSAVEC)))
+ entry->ebx = xstate_required_size(supported_xcr0 | supported_xss,
+ true);
+ else {
+ WARN_ON_ONCE(supported_xss != 0);
+ entry->ebx = 0;
+ }
+ entry->ecx &= supported_xss;
+ entry->edx &= supported_xss >> 32;
+
+ for (i = 2; i < 64; ++i) {
+ bool s_state;
+ if (supported_xcr0 & BIT_ULL(i))
+ s_state = false;
+ else if (supported_xss & BIT_ULL(i))
+ s_state = true;
+ else
+ continue;
+
+ entry = do_host_cpuid(array, function, i);
+ if (!entry)
goto out;
- do_host_cpuid(&entry[i], function, idx);
- if (idx == 1) {
- entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
- cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
- entry[i].ebx = 0;
- if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
- entry[i].ebx =
- xstate_required_size(supported,
- true);
- } else {
- if (entry[i].eax == 0 || !(supported & mask))
- continue;
- if (WARN_ON_ONCE(entry[i].ecx & 1))
- continue;
+ /*
+ * The supported check above should have filtered out
+ * invalid sub-leafs. Only valid sub-leafs should
+ * reach this point, and they should have a non-zero
+ * save state size. Furthermore, check whether the
+ * processor agrees with supported_xcr0/supported_xss
+ * on whether this is an XCR0- or IA32_XSS-managed area.
+ */
+ if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
+ --array->nent;
+ continue;
}
- entry[i].ecx = 0;
- entry[i].edx = 0;
- ++*nent;
- ++i;
+ entry->edx = 0;
}
break;
- }
/* Intel PT */
- case 0x14: {
- int t, times = entry->eax;
-
- if (!f_intel_pt)
+ case 0x14:
+ if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
+ }
- for (t = 1; t <= times; ++t) {
- if (*nent >= maxnent)
+ for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
+ if (!do_host_cpuid(array, function, i))
goto out;
- do_host_cpuid(&entry[t], function, t);
- ++*nent;
}
break;
- }
case KVM_CPUID_SIGNATURE: {
static const char signature[12] = "KVMKVMKVM\0\0";
const u32 *sigptr = (const u32 *)signature;
@@ -729,10 +725,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
entry->eax = min(entry->eax, 0x8000001f);
break;
case 0x80000001:
- entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
- cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
- entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
- cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
+ cpuid_entry_override(entry, CPUID_8000_0001_EDX);
+ cpuid_entry_override(entry, CPUID_8000_0001_ECX);
break;
case 0x80000007: /* Advanced power management */
/* invariant TSC is CPUID.80000007H:EDX[8] */
@@ -750,33 +744,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
entry->edx = 0;
- entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
- cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
- /*
- * AMD has separate bits for each SPEC_CTRL bit.
- * arch/x86/kernel/cpu/bugs.c is kind enough to
- * record that in cpufeatures so use them.
- */
- if (boot_cpu_has(X86_FEATURE_IBPB))
- entry->ebx |= F(AMD_IBPB);
- if (boot_cpu_has(X86_FEATURE_IBRS))
- entry->ebx |= F(AMD_IBRS);
- if (boot_cpu_has(X86_FEATURE_STIBP))
- entry->ebx |= F(AMD_STIBP);
- if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
- boot_cpu_has(X86_FEATURE_AMD_SSBD))
- entry->ebx |= F(AMD_SSBD);
- if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
- entry->ebx |= F(AMD_SSB_NO);
- /*
- * The preference is to use SPEC CTRL MSR instead of the
- * VIRT_SPEC MSR.
- */
- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
- !boot_cpu_has(X86_FEATURE_AMD_SSBD))
- entry->ebx |= F(VIRT_SSBD);
+ cpuid_entry_override(entry, CPUID_8000_0008_EBX);
break;
}
+ case 0x8000000A:
+ if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ break;
+ }
+ entry->eax = 1; /* SVM revision 1 */
+ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
+ ASID emulation to nested SVM */
+ entry->ecx = 0; /* Reserved */
+ cpuid_entry_override(entry, CPUID_8000_000A_EDX);
+ break;
case 0x80000019:
entry->ecx = entry->edx = 0;
break;
@@ -794,8 +775,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
entry->eax = min(entry->eax, 0xC0000004);
break;
case 0xC0000001:
- entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
- cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
+ cpuid_entry_override(entry, CPUID_C000_0001_EDX);
break;
case 3: /* Processor serial number */
case 5: /* MONITOR/MWAIT */
@@ -807,8 +787,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
break;
}
- kvm_x86_ops->set_supported_cpuid(function, entry);
-
r = 0;
out:
@@ -817,26 +795,39 @@ out:
return r;
}
-static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
- int *nent, int maxnent, unsigned int type)
+static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
+ unsigned int type)
{
- if (*nent >= maxnent)
- return -E2BIG;
-
if (type == KVM_GET_EMULATED_CPUID)
- return __do_cpuid_func_emulated(entry, func, nent, maxnent);
+ return __do_cpuid_func_emulated(array, func);
- return __do_cpuid_func(entry, func, nent, maxnent);
+ return __do_cpuid_func(array, func);
}
-struct kvm_cpuid_param {
- u32 func;
- bool (*qualifier)(const struct kvm_cpuid_param *param);
-};
+#define CENTAUR_CPUID_SIGNATURE 0xC0000000
-static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
+static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
+ unsigned int type)
{
- return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
+ u32 limit;
+ int r;
+
+ if (func == CENTAUR_CPUID_SIGNATURE &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
+ return 0;
+
+ r = do_cpuid_func(array, func, type);
+ if (r)
+ return r;
+
+ limit = array->entries[array->nent - 1].eax;
+ for (func = func + 1; func <= limit; ++func) {
+ r = do_cpuid_func(array, func, type);
+ if (r)
+ break;
+ }
+
+ return r;
}
static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
@@ -870,157 +861,145 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries,
unsigned int type)
{
- struct kvm_cpuid_entry2 *cpuid_entries;
- int limit, nent = 0, r = -E2BIG, i;
- u32 func;
- static const struct kvm_cpuid_param param[] = {
- { .func = 0 },
- { .func = 0x80000000 },
- { .func = 0xC0000000, .qualifier = is_centaur_cpu },
- { .func = KVM_CPUID_SIGNATURE },
+ static const u32 funcs[] = {
+ 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
+ };
+
+ struct kvm_cpuid_array array = {
+ .nent = 0,
+ .maxnent = cpuid->nent,
};
+ int r, i;
if (cpuid->nent < 1)
- goto out;
+ return -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
cpuid->nent = KVM_MAX_CPUID_ENTRIES;
if (sanity_check_entries(entries, cpuid->nent, type))
return -EINVAL;
- r = -ENOMEM;
- cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
+ array.entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
cpuid->nent));
- if (!cpuid_entries)
- goto out;
-
- r = 0;
- for (i = 0; i < ARRAY_SIZE(param); i++) {
- const struct kvm_cpuid_param *ent = &param[i];
-
- if (ent->qualifier && !ent->qualifier(ent))
- continue;
-
- r = do_cpuid_func(&cpuid_entries[nent], ent->func,
- &nent, cpuid->nent, type);
-
- if (r)
- goto out_free;
-
- limit = cpuid_entries[nent - 1].eax;
- for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
- r = do_cpuid_func(&cpuid_entries[nent], func,
- &nent, cpuid->nent, type);
+ if (!array.entries)
+ return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(funcs); i++) {
+ r = get_cpuid_func(&array, funcs[i], type);
if (r)
goto out_free;
}
+ cpuid->nent = array.nent;
- r = -EFAULT;
- if (copy_to_user(entries, cpuid_entries,
- nent * sizeof(struct kvm_cpuid_entry2)))
- goto out_free;
- cpuid->nent = nent;
- r = 0;
+ if (copy_to_user(entries, array.entries,
+ array.nent * sizeof(struct kvm_cpuid_entry2)))
+ r = -EFAULT;
out_free:
- vfree(cpuid_entries);
-out:
+ vfree(array.entries);
return r;
}
-static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
-{
- struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
- struct kvm_cpuid_entry2 *ej;
- int j = i;
- int nent = vcpu->arch.cpuid_nent;
-
- e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
- /* when no next entry is found, the current entry[i] is reselected */
- do {
- j = (j + 1) % nent;
- ej = &vcpu->arch.cpuid_entries[j];
- } while (ej->function != e->function);
-
- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
-
- return j;
-}
-
-/* find an entry with matching function, matching index (if needed), and that
- * should be read next (if it's stateful) */
-static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
- u32 function, u32 index)
-{
- if (e->function != function)
- return 0;
- if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
- return 0;
- if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
- !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
- return 0;
- return 1;
-}
-
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index)
{
+ struct kvm_cpuid_entry2 *e;
int i;
- struct kvm_cpuid_entry2 *best = NULL;
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
- struct kvm_cpuid_entry2 *e;
-
e = &vcpu->arch.cpuid_entries[i];
- if (is_matching_cpuid_entry(e, function, index)) {
- if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
- move_to_next_stateful_cpuid_entry(vcpu, i);
- best = e;
- break;
- }
+
+ if (e->function == function && (e->index == index ||
+ !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
+ return e;
}
- return best;
+ return NULL;
}
EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
/*
- * If the basic or extended CPUID leaf requested is higher than the
- * maximum supported basic or extended leaf, respectively, then it is
- * out of range.
+ * Intel CPUID semantics treats any query for an out-of-range leaf as if the
+ * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
+ * returns all zeroes for any undefined leaf, whether or not the leaf is in
+ * range. Centaur/VIA follows Intel semantics.
+ *
+ * A leaf is considered out-of-range if its function is higher than the maximum
+ * supported leaf of its associated class or if its associated class does not
+ * exist.
+ *
+ * There are three primary classes to be considered, with their respective
+ * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
+ * class exists if a guest CPUID entry for its <base> leaf exists. For a given
+ * class, CPUID.<base>.EAX contains the max supported leaf for the class.
+ *
+ * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
+ * - Hypervisor: 0x40000000 - 0x4fffffff
+ * - Extended: 0x80000000 - 0xbfffffff
+ * - Centaur: 0xc0000000 - 0xcfffffff
+ *
+ * The Hypervisor class is further subdivided into sub-classes that each act as
+ * their own indepdent class associated with a 0x100 byte range. E.g. if Qemu
+ * is advertising support for both HyperV and KVM, the resulting Hypervisor
+ * CPUID sub-classes are:
+ *
+ * - HyperV: 0x40000000 - 0x400000ff
+ * - KVM: 0x40000100 - 0x400001ff
*/
-static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function)
+static struct kvm_cpuid_entry2 *
+get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
{
- struct kvm_cpuid_entry2 *max;
+ struct kvm_cpuid_entry2 *basic, *class;
+ u32 function = *fn_ptr;
+
+ basic = kvm_find_cpuid_entry(vcpu, 0, 0);
+ if (!basic)
+ return NULL;
+
+ if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
+ is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
+ return NULL;
- max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
- return max && function <= max->eax;
+ if (function >= 0x40000000 && function <= 0x4fffffff)
+ class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0);
+ else if (function >= 0xc0000000)
+ class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0);
+ else
+ class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+
+ if (class && function <= class->eax)
+ return NULL;
+
+ /*
+ * Leaf specific adjustments are also applied when redirecting to the
+ * max basic entry, e.g. if the max basic leaf is 0xb but there is no
+ * entry for CPUID.0xb.index (see below), then the output value for EDX
+ * needs to be pulled from CPUID.0xb.1.
+ */
+ *fn_ptr = basic->eax;
+
+ /*
+ * The class does not exist or the requested function is out of range;
+ * the effective CPUID entry is the max basic leaf. Note, the index of
+ * the original requested leaf is observed!
+ */
+ return kvm_find_cpuid_entry(vcpu, basic->eax, index);
}
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
- u32 *ecx, u32 *edx, bool check_limit)
+ u32 *ecx, u32 *edx, bool exact_only)
{
- u32 function = *eax, index = *ecx;
+ u32 orig_function = *eax, function = *eax, index = *ecx;
struct kvm_cpuid_entry2 *entry;
- struct kvm_cpuid_entry2 *max;
- bool found;
+ bool exact, used_max_basic = false;
entry = kvm_find_cpuid_entry(vcpu, function, index);
- found = entry;
- /*
- * Intel CPUID semantics treats any query for an out-of-range
- * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were
- * requested. AMD CPUID semantics returns all zeroes for any
- * undefined leaf, whether or not the leaf is in range.
- */
- if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) &&
- !cpuid_function_in_range(vcpu, function)) {
- max = kvm_find_cpuid_entry(vcpu, 0, 0);
- if (max) {
- function = max->eax;
- entry = kvm_find_cpuid_entry(vcpu, function, index);
- }
+ exact = !!entry;
+
+ if (!entry && !exact_only) {
+ entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
+ used_max_basic = !!entry;
}
+
if (entry) {
*eax = entry->eax;
*ebx = entry->ebx;
@@ -1049,8 +1028,9 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
}
}
}
- trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, found);
- return found;
+ trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
+ used_max_basic);
+ return exact;
}
EXPORT_SYMBOL_GPL(kvm_cpuid);
@@ -1063,7 +1043,7 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
eax = kvm_rax_read(vcpu);
ecx = kvm_rcx_read(vcpu);
- kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
+ kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
kvm_rax_write(vcpu, eax);
kvm_rbx_write(vcpu, ebx);
kvm_rcx_write(vcpu, ecx);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 7366c618aa04..63a70f6a3df3 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -6,8 +6,10 @@
#include <asm/cpu.h>
#include <asm/processor.h>
+extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
+void kvm_set_cpu_caps(void);
+
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
-bool kvm_mpx_supported(void);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@@ -23,7 +25,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
- u32 *ecx, u32 *edx, bool check_limit);
+ u32 *ecx, u32 *edx, bool exact_only);
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
@@ -64,7 +66,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
* and can't be used by KVM to query/control guest capabilities. And obviously
* the leaf being queried must have an entry in the lookup table.
*/
-static __always_inline void reverse_cpuid_check(unsigned x86_leaf)
+static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
{
BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
@@ -88,24 +90,18 @@ static __always_inline u32 __feature_bit(int x86_feature)
#define feature_bit(name) __feature_bit(X86_FEATURE_##name)
-static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
+static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
{
- unsigned x86_leaf = x86_feature / 32;
+ unsigned int x86_leaf = x86_feature / 32;
reverse_cpuid_check(x86_leaf);
return reverse_cpuid[x86_leaf];
}
-static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
+static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
+ u32 reg)
{
- struct kvm_cpuid_entry2 *entry;
- const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
-
- entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
- if (!entry)
- return NULL;
-
- switch (cpuid.reg) {
+ switch (reg) {
case CPUID_EAX:
return &entry->eax;
case CPUID_EBX:
@@ -120,9 +116,86 @@ static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsi
}
}
-static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
+static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
+
+ return __cpuid_entry_get_reg(entry, cpuid.reg);
+}
+
+static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+ return *reg & __feature_bit(x86_feature);
+}
+
+static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ return cpuid_entry_get(entry, x86_feature);
+}
+
+static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+ *reg &= ~__feature_bit(x86_feature);
+}
+
+static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+ *reg |= __feature_bit(x86_feature);
+}
+
+static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature,
+ bool set)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+ /*
+ * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
+ * compiler into using CMOV instead of Jcc when possible.
+ */
+ if (set)
+ *reg |= __feature_bit(x86_feature);
+ else
+ *reg &= ~__feature_bit(x86_feature);
+}
+
+static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
+ enum cpuid_leafs leaf)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
+
+ BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
+ *reg = kvm_cpu_caps[leaf];
+}
+
+static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
{
- int *reg;
+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
+ struct kvm_cpuid_entry2 *entry;
+
+ entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
+ if (!entry)
+ return NULL;
+
+ return __cpuid_entry_get_reg(entry, cpuid.reg);
+}
+
+static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
+{
+ u32 *reg;
reg = guest_cpuid_get_register(vcpu, x86_feature);
if (!reg)
@@ -131,21 +204,24 @@ static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_
return *reg & __feature_bit(x86_feature);
}
-static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
+static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
{
- int *reg;
+ u32 *reg;
reg = guest_cpuid_get_register(vcpu, x86_feature);
if (reg)
*reg &= ~__feature_bit(x86_feature);
}
-static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
+static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 0, 0);
- return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
+ return best &&
+ (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
+ is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
}
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
@@ -192,4 +268,39 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
}
+static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
+{
+ unsigned int x86_leaf = x86_feature / 32;
+
+ reverse_cpuid_check(x86_leaf);
+ kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
+}
+
+static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
+{
+ unsigned int x86_leaf = x86_feature / 32;
+
+ reverse_cpuid_check(x86_leaf);
+ kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
+}
+
+static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
+{
+ unsigned int x86_leaf = x86_feature / 32;
+
+ reverse_cpuid_check(x86_leaf);
+ return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
+}
+
+static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
+{
+ return !!kvm_cpu_cap_get(x86_feature);
+}
+
+static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
+{
+ if (boot_cpu_has(x86_feature))
+ kvm_cpu_cap_set(x86_feature);
+}
+
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index bc00642e5d3b..bddaba9c68dd 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -20,7 +20,7 @@
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
-#include <asm/kvm_emulate.h>
+#include "kvm_emulate.h"
#include <linux/stringify.h>
#include <asm/fpu/api.h>
#include <asm/debugreg.h>
@@ -665,6 +665,17 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
+static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
+{
+ return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
+}
+
+static inline bool emul_is_noncanonical_address(u64 la,
+ struct x86_emulate_ctxt *ctxt)
+{
+ return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
+}
+
/*
* x86 defines three classes of vector instructions: explicitly
* aligned, explicitly unaligned, and the rest, which change behaviour
@@ -2711,10 +2722,8 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
u32 eax, ebx, ecx, edx;
eax = ecx = 0;
- ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
- return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
- && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
- && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
+ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
+ return is_guest_vendor_intel(ebx, ecx, edx);
}
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
@@ -2731,36 +2740,18 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
eax = 0x00000000;
ecx = 0x00000000;
- ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
+ ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
/*
- * Intel ("GenuineIntel")
- * remark: Intel CPUs only support "syscall" in 64bit
- * longmode. Also an 64bit guest with a
- * 32bit compat-app running will #UD !! While this
- * behaviour can be fixed (by emulating) into AMD
- * response - CPUs of AMD can't behave like Intel.
+ * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
+ * 64bit guest with a 32bit compat-app running will #UD !! While this
+ * behaviour can be fixed (by emulating) into AMD response - CPUs of
+ * AMD can't behave like Intel.
*/
- if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
- ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
- edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
+ if (is_guest_vendor_intel(ebx, ecx, edx))
return false;
- /* AMD ("AuthenticAMD") */
- if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
- ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
- edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
- return true;
-
- /* AMD ("AMDisbetter!") */
- if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
- ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
- edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
- return true;
-
- /* Hygon ("HygonGenuine") */
- if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
- ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
- edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
+ if (is_guest_vendor_amd(ebx, ecx, edx) ||
+ is_guest_vendor_hygon(ebx, ecx, edx))
return true;
/*
@@ -3980,7 +3971,7 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt)
eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
- ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
+ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
*reg_write(ctxt, VCPU_REGS_RAX) = eax;
*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
@@ -4250,7 +4241,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
eax = 0x80000008;
ecx = 0;
if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
- &edx, false))
+ &edx, true))
maxphyaddr = eax & 0xff;
else
maxphyaddr = 36;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index a86fda7a1d03..bcefa9d4e57e 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1022,7 +1022,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
- kvm_x86_ops->patch_hypercall(vcpu, instructions);
+ kvm_x86_ops.patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
@@ -1607,7 +1607,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
* hypercall generates UD from non zero cpl and real mode
* per HYPER-V spec
*/
- if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
+ if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
@@ -1800,8 +1800,8 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
};
int i, nent = ARRAY_SIZE(cpuid_entries);
- if (kvm_x86_ops->nested_get_evmcs_version)
- evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
+ if (kvm_x86_ops.nested_get_evmcs_version)
+ evmcs_ver = kvm_x86_ops.nested_get_evmcs_version(vcpu);
/* Skip NESTED_FEATURES if eVMCS is not supported */
if (!evmcs_ver)
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index b24c606ac04b..febca334c320 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -367,7 +367,7 @@ static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
{
struct kvm_kpit_state *ps = &pit->pit_state;
- pr_debug("load_count val is %d, channel is %d\n", val, channel);
+ pr_debug("load_count val is %u, channel is %d\n", val, channel);
/*
* The largest possible initial count is 0; this is equivalent
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 58767020de41..62558b9bdda7 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
return 0;
if (!kvm_register_is_available(vcpu, reg))
- kvm_x86_ops->cache_reg(vcpu, reg);
+ kvm_x86_ops.cache_reg(vcpu, reg);
return vcpu->arch.regs[reg];
}
@@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
might_sleep(); /* on svm */
if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
- kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
+ kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
return vcpu->arch.walk_mmu->pdptrs[index];
}
@@ -117,7 +117,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
if (tmask & vcpu->arch.cr0_guest_owned_bits)
- kvm_x86_ops->decache_cr0_guest_bits(vcpu);
+ kvm_x86_ops.decache_cr0_guest_bits(vcpu);
return vcpu->arch.cr0 & mask;
}
@@ -130,14 +130,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
if (tmask & vcpu->arch.cr4_guest_owned_bits)
- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+ kvm_x86_ops.decache_cr4_guest_bits(vcpu);
return vcpu->arch.cr4 & mask;
}
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
- kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_CR3);
+ kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
return vcpu->arch.cr3;
}
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index c06e8353efd3..43c93ffa76ed 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -221,7 +221,7 @@ struct x86_emulate_ops {
enum x86_intercept_stage stage);
bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx,
- u32 *ecx, u32 *edx, bool check_limit);
+ u32 *ecx, u32 *edx, bool exact_only);
bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
@@ -301,6 +301,7 @@ struct fastop;
typedef void (*fastop_t)(struct fastop *);
struct x86_emulate_ctxt {
+ void *vcpu;
const struct x86_emulate_ops *ops;
/* Register state before/after emulation. */
@@ -319,6 +320,10 @@ struct x86_emulate_ctxt {
bool have_exception;
struct x86_exception exception;
+ /* GPA available */
+ bool gpa_available;
+ gpa_t gpa_val;
+
/*
* decode cache
*/
@@ -329,9 +334,6 @@ struct x86_emulate_ctxt {
u8 intercept;
u8 op_bytes;
u8 ad_bytes;
- struct operand src;
- struct operand src2;
- struct operand dst;
union {
int (*execute)(struct x86_emulate_ctxt *ctxt);
fastop_t fop;
@@ -359,6 +361,11 @@ struct x86_emulate_ctxt {
u8 seg_override;
u64 d;
unsigned long _eip;
+
+ /* Here begins the usercopy section. */
+ struct operand src;
+ struct operand src2;
+ struct operand dst;
struct operand memop;
unsigned long _regs[NR_VCPU_REGS];
struct operand *memopp;
@@ -388,6 +395,34 @@ struct x86_emulate_ctxt {
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
+#define X86EMUL_CPUID_VENDOR_CentaurHauls_ebx 0x746e6543
+#define X86EMUL_CPUID_VENDOR_CentaurHauls_ecx 0x736c7561
+#define X86EMUL_CPUID_VENDOR_CentaurHauls_edx 0x48727561
+
+static inline bool is_guest_vendor_intel(u32 ebx, u32 ecx, u32 edx)
+{
+ return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
+ ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
+ edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
+}
+
+static inline bool is_guest_vendor_amd(u32 ebx, u32 ecx, u32 edx)
+{
+ return (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
+ ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
+ edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) ||
+ (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
+ ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
+ edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx);
+}
+
+static inline bool is_guest_vendor_hygon(u32 ebx, u32 ecx, u32 edx)
+{
+ return ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
+ ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
+ edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx;
+}
+
enum x86_intercept_stage {
X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
X86_ICPT_PRE_EXCEPT,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 7356a56e6282..ca80daf8f878 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -164,14 +164,28 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
kvfree(map);
}
-static void recalculate_apic_map(struct kvm *kvm)
+void kvm_recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
u32 max_id = 255; /* enough space for any xAPIC ID */
+ if (!kvm->arch.apic_map_dirty) {
+ /*
+ * Read kvm->arch.apic_map_dirty before
+ * kvm->arch.apic_map
+ */
+ smp_rmb();
+ return;
+ }
+
mutex_lock(&kvm->arch.apic_map_lock);
+ if (!kvm->arch.apic_map_dirty) {
+ /* Someone else has updated the map. */
+ mutex_unlock(&kvm->arch.apic_map_lock);
+ return;
+ }
kvm_for_each_vcpu(i, vcpu, kvm)
if (kvm_apic_present(vcpu))
@@ -236,6 +250,12 @@ out:
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
+ /*
+ * Write kvm->arch.apic_map before
+ * clearing apic->apic_map_dirty
+ */
+ smp_wmb();
+ kvm->arch.apic_map_dirty = false;
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
@@ -257,20 +277,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
else
static_key_slow_inc(&apic_sw_disabled.key);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
}
}
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{
kvm_lapic_set_reg(apic, APIC_ID, id << 24);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
}
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
kvm_lapic_set_reg(apic, APIC_LDR, id);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
}
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
@@ -286,7 +306,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
kvm_lapic_set_reg(apic, APIC_ID, id);
kvm_lapic_set_reg(apic, APIC_LDR, ldr);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
}
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
@@ -294,11 +314,6 @@ static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
}
-static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
-{
- return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
-}
-
static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{
return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
@@ -448,7 +463,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
if (unlikely(vcpu->arch.apicv_active)) {
/* need to update RVI */
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
- kvm_x86_ops->hwapic_irr_update(vcpu,
+ kvm_x86_ops.hwapic_irr_update(vcpu,
apic_find_highest_irr(apic));
} else {
apic->irr_pending = false;
@@ -473,7 +488,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
* just set SVI.
*/
if (unlikely(vcpu->arch.apicv_active))
- kvm_x86_ops->hwapic_isr_update(vcpu, vec);
+ kvm_x86_ops.hwapic_isr_update(vcpu, vec);
else {
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -521,7 +536,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
* and must be left alone.
*/
if (unlikely(vcpu->arch.apicv_active))
- kvm_x86_ops->hwapic_isr_update(vcpu,
+ kvm_x86_ops.hwapic_isr_update(vcpu,
apic_find_highest_isr(apic));
else {
--apic->isr_count;
@@ -659,7 +674,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
int highest_irr;
if (apic->vcpu->arch.apicv_active)
- highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
+ highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu);
else
highest_irr = apic_find_highest_irr(apic);
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
@@ -1048,7 +1063,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR);
}
- if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
+ if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) {
kvm_lapic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
@@ -1226,7 +1241,7 @@ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
}
EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
-static void apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
+void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
{
struct kvm_lapic_irq irq;
@@ -1737,7 +1752,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
{
WARN_ON(preemptible());
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
- kvm_x86_ops->cancel_hv_timer(apic->vcpu);
+ kvm_x86_ops.cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
}
@@ -1748,13 +1763,13 @@ static bool start_hv_timer(struct kvm_lapic *apic)
bool expired;
WARN_ON(preemptible());
- if (!kvm_x86_ops->set_hv_timer)
+ if (!kvm_x86_ops.set_hv_timer)
return false;
if (!ktimer->tscdeadline)
return false;
- if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
+ if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
return false;
ktimer->hv_timer_in_use = true;
@@ -1917,7 +1932,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_DFR:
if (!apic_x2apic_mode(apic)) {
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
- recalculate_apic_map(apic->vcpu->kvm);
+ apic->vcpu->kvm->arch.apic_map_dirty = true;
} else
ret = 1;
break;
@@ -1946,7 +1961,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_ICR:
/* No delay here, so we always clear the pending bit */
val &= ~(1 << 12);
- apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
+ kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
kvm_lapic_set_reg(apic, APIC_ICR, val);
break;
@@ -2023,6 +2038,8 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
break;
}
+ kvm_recalculate_apic_map(apic->vcpu->kvm);
+
return ret;
}
EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
@@ -2171,7 +2188,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
static_key_slow_dec_deferred(&apic_hw_disabled);
} else {
static_key_slow_inc(&apic_hw_disabled.key);
- recalculate_apic_map(vcpu->kvm);
+ vcpu->kvm->arch.apic_map_dirty = true;
}
}
@@ -2179,7 +2196,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
- kvm_x86_ops->set_virtual_apic_mode(vcpu);
+ kvm_x86_ops.set_virtual_apic_mode(vcpu);
apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE;
@@ -2212,6 +2229,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!apic)
return;
+ vcpu->kvm->arch.apic_map_dirty = false;
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
@@ -2256,13 +2274,15 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
if (vcpu->arch.apicv_active) {
- kvm_x86_ops->apicv_post_state_restore(vcpu);
- kvm_x86_ops->hwapic_irr_update(vcpu, -1);
- kvm_x86_ops->hwapic_isr_update(vcpu, -1);
+ kvm_x86_ops.apicv_post_state_restore(vcpu);
+ kvm_x86_ops.hwapic_irr_update(vcpu, -1);
+ kvm_x86_ops.hwapic_isr_update(vcpu, -1);
}
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
+
+ kvm_recalculate_apic_map(vcpu->kvm);
}
/*
@@ -2484,17 +2504,18 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
struct kvm_lapic *apic = vcpu->arch.apic;
int r;
-
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
r = kvm_apic_state_fixup(vcpu, s, true);
- if (r)
+ if (r) {
+ kvm_recalculate_apic_map(vcpu->kvm);
return r;
+ }
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
- recalculate_apic_map(vcpu->kvm);
+ kvm_recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu);
apic_update_ppr(apic);
@@ -2506,10 +2527,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_apic_update_apicv(vcpu);
apic->highest_isr_cache = -1;
if (vcpu->arch.apicv_active) {
- kvm_x86_ops->apicv_post_state_restore(vcpu);
- kvm_x86_ops->hwapic_irr_update(vcpu,
+ kvm_x86_ops.apicv_post_state_restore(vcpu);
+ kvm_x86_ops.hwapic_irr_update(vcpu,
apic_find_highest_irr(apic));
- kvm_x86_ops->hwapic_isr_update(vcpu,
+ kvm_x86_ops.hwapic_isr_update(vcpu,
apic_find_highest_isr(apic));
}
kvm_make_request(KVM_REQ_EVENT, vcpu);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index ec6fbfe325cf..40ed6ed22751 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -78,6 +78,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
+void kvm_recalculate_apic_map(struct kvm *kvm);
void kvm_apic_set_version(struct kvm_vcpu *vcpu);
int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val);
int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
@@ -95,6 +96,7 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu);
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map);
+void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index a647601c9e1c..8a3b1bce722a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -95,11 +95,11 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
}
-static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
+static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
{
if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
- vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
- kvm_get_active_pcid(vcpu));
+ kvm_x86_ops.load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
+ kvm_get_active_pcid(vcpu));
}
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
@@ -170,8 +170,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned pte_access, unsigned pte_pkey,
unsigned pfec)
{
- int cpl = kvm_x86_ops->get_cpl(vcpu);
- unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+ int cpl = kvm_x86_ops.get_cpl(vcpu);
+ unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
/*
* If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 87e9ba27ada1..8071952e9cf2 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -19,6 +19,7 @@
#include "mmu.h"
#include "x86.h"
#include "kvm_cache_regs.h"
+#include "kvm_emulate.h"
#include "cpuid.h"
#include <linux/kvm_host.h>
@@ -86,6 +87,8 @@ __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
*/
bool tdp_enabled = false;
+static int max_page_level __read_mostly;
+
enum {
AUDIT_PRE_PAGE_FAULT,
AUDIT_POST_PAGE_FAULT,
@@ -215,17 +218,6 @@ struct kvm_shadow_walk_iterator {
unsigned index;
};
-static const union kvm_mmu_page_role mmu_base_role_mask = {
- .cr0_wp = 1,
- .gpte_is_8_bytes = 1,
- .nxe = 1,
- .smep_andnot_wp = 1,
- .smap_andnot_wp = 1,
- .smm = 1,
- .guest_mode = 1,
- .ad_disabled = 1,
-};
-
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
(_root), (_addr)); \
@@ -313,7 +305,7 @@ kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
static inline bool kvm_available_flush_tlb_with_range(void)
{
- return kvm_x86_ops->tlb_remote_flush_with_range;
+ return kvm_x86_ops.tlb_remote_flush_with_range;
}
static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
@@ -321,8 +313,8 @@ static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
{
int ret = -ENOTSUPP;
- if (range && kvm_x86_ops->tlb_remote_flush_with_range)
- ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
+ if (range && kvm_x86_ops.tlb_remote_flush_with_range)
+ ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range);
if (ret)
kvm_flush_remote_tlbs(kvm);
@@ -1650,7 +1642,7 @@ static bool spte_set_dirty(u64 *sptep)
rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
/*
- * Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
+ * Similar to the !kvm_x86_ops.slot_disable_log_dirty case,
* do not bother adding back write access to pages marked
* SPTE_AD_WRPROT_ONLY_MASK.
*/
@@ -1739,8 +1731,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
- if (kvm_x86_ops->enable_log_dirty_pt_masked)
- kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
+ if (kvm_x86_ops.enable_log_dirty_pt_masked)
+ kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
mask);
else
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
@@ -1755,8 +1747,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
*/
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
{
- if (kvm_x86_ops->write_log_dirty)
- return kvm_x86_ops->write_log_dirty(vcpu);
+ if (kvm_x86_ops.write_log_dirty)
+ return kvm_x86_ops.write_log_dirty(vcpu);
return 0;
}
@@ -3044,7 +3036,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (level > PT_PAGE_TABLE_LEVEL)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
- spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
+ spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
if (host_writable)
@@ -3292,7 +3284,7 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
if (!slot)
return PT_PAGE_TABLE_LEVEL;
- max_level = min(max_level, kvm_x86_ops->get_lpage_level());
+ max_level = min(max_level, max_page_level);
for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
linfo = lpage_info_slot(gfn, slot, max_level);
if (!linfo->disallow_lpage)
@@ -3568,8 +3560,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* write-protected for dirty-logging or access tracking.
*/
if ((error_code & PFERR_WRITE_MASK) &&
- spte_can_locklessly_be_made_writable(spte))
- {
+ spte_can_locklessly_be_made_writable(spte)) {
new_spte |= PT_WRITABLE_MASK;
/*
@@ -3731,7 +3722,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else
BUG();
- vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+
+ /* root_cr3 is ignored for direct MMUs. */
+ vcpu->arch.mmu->root_cr3 = 0;
return 0;
}
@@ -3743,7 +3736,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn, root_cr3;
int i;
- root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+ root_cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
root_gfn = root_cr3 >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
@@ -4080,7 +4073,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu->direct_map;
- arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+ arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
return kvm_setup_async_pf(vcpu, cr2_or_gpa,
kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
@@ -4252,6 +4245,14 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
context->nx = false;
}
+static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t cr3,
+ union kvm_mmu_page_role role)
+{
+ return (role.direct || cr3 == root->cr3) &&
+ VALID_PAGE(root->hpa) && page_header(root->hpa) &&
+ role.word == page_header(root->hpa)->role.word;
+}
+
/*
* Find out if a previously cached root matching the new CR3/role is available.
* The current root is also inserted into the cache.
@@ -4270,12 +4271,13 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
root.cr3 = mmu->root_cr3;
root.hpa = mmu->root_hpa;
+ if (is_root_usable(&root, new_cr3, new_role))
+ return true;
+
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
swap(root, mmu->prev_roots[i]);
- if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) &&
- page_header(root.hpa) != NULL &&
- new_role.word == page_header(root.hpa)->role.word)
+ if (is_root_usable(&root, new_cr3, new_role))
break;
}
@@ -4309,7 +4311,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
* accompanied by KVM_REQ_MMU_RELOAD, which will free
* the root set here and allocate a new one.
*/
- kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
+ kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -4508,7 +4510,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
cpuid_maxphyaddr(vcpu), context->root_level,
context->nx,
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
- is_pse(vcpu), guest_cpuid_is_amd(vcpu));
+ is_pse(vcpu),
+ guest_cpuid_is_amd_or_hygon(vcpu));
}
static void
@@ -4874,7 +4877,6 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
ext.cr4_pse = !!is_pse(vcpu);
ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
- ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
ext.valid = 1;
@@ -4907,7 +4909,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
role.base.ad_disabled = (shadow_accessed_mask == 0);
- role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
+ role.base.level = kvm_x86_ops.get_tdp_level(vcpu);
role.base.direct = true;
role.base.gpte_is_8_bytes = true;
@@ -4920,7 +4922,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
union kvm_mmu_role new_role =
kvm_calc_tdp_mmu_root_page_role(vcpu, false);
- new_role.base.word &= mmu_base_role_mask.word;
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
@@ -4929,10 +4930,9 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
- context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
+ context->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu);
context->direct_map = true;
- context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
- context->get_cr3 = get_cr3;
+ context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
@@ -4992,7 +4992,6 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
union kvm_mmu_role new_role =
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
- new_role.base.word &= mmu_base_role_mask.word;
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
@@ -5012,14 +5011,14 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
- bool execonly)
+ bool execonly, u8 level)
{
union kvm_mmu_role role = {0};
/* SMM flag is inherited from root_mmu */
role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
- role.base.level = PT64_ROOT_4LEVEL;
+ role.base.level = level;
role.base.gpte_is_8_bytes = true;
role.base.direct = false;
role.base.ad_disabled = !accessed_dirty;
@@ -5043,17 +5042,17 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp)
{
struct kvm_mmu *context = vcpu->arch.mmu;
+ u8 level = vmx_eptp_page_walk_level(new_eptp);
union kvm_mmu_role new_role =
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
- execonly);
+ execonly, level);
__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
- new_role.base.word &= mmu_base_role_mask.word;
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
- context->shadow_root_level = PT64_ROOT_4LEVEL;
+ context->shadow_root_level = level;
context->nx = true;
context->ept_ad = accessed_dirty;
@@ -5062,7 +5061,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->sync_page = ept_sync_page;
context->invlpg = ept_invlpg;
context->update_pte = ept_update_pte;
- context->root_level = PT64_ROOT_4LEVEL;
+ context->root_level = level;
context->direct_map = false;
context->mmu_role.as_u64 = new_role.as_u64;
@@ -5079,8 +5078,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
struct kvm_mmu *context = vcpu->arch.mmu;
kvm_init_shadow_mmu(vcpu);
- context->set_cr3 = kvm_x86_ops->set_cr3;
- context->get_cr3 = get_cr3;
+ context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
}
@@ -5090,12 +5088,11 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
- new_role.base.word &= mmu_base_role_mask.word;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
return;
g_context->mmu_role.as_u64 = new_role.as_u64;
- g_context->get_cr3 = get_cr3;
+ g_context->get_guest_pgd = get_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
@@ -5185,8 +5182,8 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
kvm_mmu_sync_roots(vcpu);
if (r)
goto out;
- kvm_mmu_load_cr3(vcpu);
- kvm_x86_ops->tlb_flush(vcpu, true);
+ kvm_mmu_load_pgd(vcpu);
+ kvm_x86_ops.tlb_flush(vcpu, true);
out:
return r;
}
@@ -5329,6 +5326,22 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
return spte;
}
+/*
+ * Ignore various flags when determining if a SPTE can be immediately
+ * overwritten for the current MMU.
+ * - level: explicitly checked in mmu_pte_write_new_pte(), and will never
+ * match the current MMU role, as MMU's level tracks the root level.
+ * - access: updated based on the new guest PTE
+ * - quadrant: handled by get_written_sptes()
+ * - invalid: always false (loop only walks valid shadow pages)
+ */
+static const union kvm_mmu_page_role role_ign = {
+ .level = 0xf,
+ .access = 0x7,
+ .quadrant = 0x3,
+ .invalid = 0x1,
+};
+
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes,
struct kvm_page_track_notifier_node *node)
@@ -5384,8 +5397,8 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
- !((sp->role.word ^ base_role)
- & mmu_base_role_mask.word) && rmap_can_add(vcpu))
+ !((sp->role.word ^ base_role) & ~role_ign.word) &&
+ rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (need_remote_flush(entry, *spte))
remote_flush = true;
@@ -5416,18 +5429,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len)
{
- int r, emulation_type = 0;
+ int r, emulation_type = EMULTYPE_PF;
bool direct = vcpu->arch.mmu->direct_map;
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
return RET_PF_RETRY;
- /* With shadow page tables, fault_address contains a GVA or nGPA. */
- if (vcpu->arch.mmu->direct_map) {
- vcpu->arch.gpa_available = true;
- vcpu->arch.gpa_val = cr2_or_gpa;
- }
-
r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
@@ -5471,7 +5478,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
* for L1 isn't going to magically fix whatever issue cause L2 to fail.
*/
if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
- emulation_type = EMULTYPE_ALLOW_RETRY;
+ emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
emulate:
/*
* On AMD platforms, under certain conditions insn_len may be zero on #NPF.
@@ -5481,7 +5488,7 @@ emulate:
* guest, with the exception of AMD Erratum 1096 which is unrecoverable.
*/
if (unlikely(insn && !insn_len)) {
- if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
+ if (!kvm_x86_ops.need_emulation_on_page_fault(vcpu))
return 1;
}
@@ -5516,7 +5523,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
if (VALID_PAGE(mmu->prev_roots[i].hpa))
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
- kvm_x86_ops->tlb_flush_gva(vcpu, gva);
+ kvm_x86_ops.tlb_flush_gva(vcpu, gva);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5541,7 +5548,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
}
if (tlb_flush)
- kvm_x86_ops->tlb_flush_gva(vcpu, gva);
+ kvm_x86_ops.tlb_flush_gva(vcpu, gva);
++vcpu->stat.invlpg;
@@ -5553,18 +5560,25 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
}
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
-void kvm_enable_tdp(void)
+void kvm_configure_mmu(bool enable_tdp, int tdp_page_level)
{
- tdp_enabled = true;
-}
-EXPORT_SYMBOL_GPL(kvm_enable_tdp);
+ tdp_enabled = enable_tdp;
-void kvm_disable_tdp(void)
-{
- tdp_enabled = false;
+ /*
+ * max_page_level reflects the capabilities of KVM's MMU irrespective
+ * of kernel support, e.g. KVM may be capable of using 1GB pages when
+ * the kernel is not. But, KVM never creates a page size greater than
+ * what is used by the kernel for any given HVA, i.e. the kernel's
+ * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
+ */
+ if (tdp_enabled)
+ max_page_level = tdp_page_level;
+ else if (boot_cpu_has(X86_FEATURE_GBPAGES))
+ max_page_level = PT_PDPE_LEVEL;
+ else
+ max_page_level = PT_DIRECTORY_LEVEL;
}
-EXPORT_SYMBOL_GPL(kvm_disable_tdp);
-
+EXPORT_SYMBOL_GPL(kvm_configure_mmu);
/* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
@@ -5658,7 +5672,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
* SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
* skip allocating the PDP table.
*/
- if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
+ if (tdp_enabled && kvm_x86_ops.get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
return 0;
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
@@ -5860,23 +5874,17 @@ static bool slot_rmap_write_protect(struct kvm *kvm,
}
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+ struct kvm_memory_slot *memslot,
+ int start_level)
{
bool flush;
spin_lock(&kvm->mmu_lock);
- flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
- false);
+ flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
+ start_level, PT_MAX_HUGEPAGE_LEVEL, false);
spin_unlock(&kvm->mmu_lock);
/*
- * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
- * which do tlb flush out of mmu-lock should be serialized by
- * kvm->slots_lock otherwise tlb flush would be missed.
- */
- lockdep_assert_held(&kvm->slots_lock);
-
- /*
* We can flush all the TLBs out of the mmu lock without TLB
* corruption since we just change the spte from writable to
* readonly so that we only need to care the case of changing
@@ -5888,8 +5896,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
* on PT_WRITABLE_MASK anymore.
*/
if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
- memslot->npages);
+ kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
@@ -5941,6 +5948,21 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
}
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ /*
+ * All current use cases for flushing the TLBs for a specific memslot
+ * are related to dirty logging, and do the TLB flush out of mmu_lock.
+ * The interaction between the various operations on memslot must be
+ * serialized by slots_locks to ensure the TLB flush from one operation
+ * is observed by any other operation on the same memslot.
+ */
+ lockdep_assert_held(&kvm->slots_lock);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
+}
+
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
@@ -5950,8 +5972,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
spin_unlock(&kvm->mmu_lock);
- lockdep_assert_held(&kvm->slots_lock);
-
/*
* It's also safe to flush TLBs out of mmu lock here as currently this
* function is only used for dirty logging, in which case flushing TLB
@@ -5959,8 +5979,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
* dirty_bitmap.
*/
if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
- memslot->npages);
+ kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
@@ -5974,12 +5993,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
false);
spin_unlock(&kvm->mmu_lock);
- /* see kvm_mmu_slot_remove_write_access */
- lockdep_assert_held(&kvm->slots_lock);
-
if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
- memslot->npages);
+ kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
@@ -5992,12 +6007,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
spin_unlock(&kvm->mmu_lock);
- lockdep_assert_held(&kvm->slots_lock);
-
- /* see kvm_mmu_slot_leaf_clear_dirty */
if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
- memslot->npages);
+ kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 3521e2d176f2..ddc1ec3bdacd 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -14,22 +14,18 @@
#include <linux/kvm_host.h>
#include <linux/rculist.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_page_track.h>
#include "mmu.h"
-void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
{
int i;
- for (i = 0; i < KVM_PAGE_TRACK_MAX; i++)
- if (!dont || free->arch.gfn_track[i] !=
- dont->arch.gfn_track[i]) {
- kvfree(free->arch.gfn_track[i]);
- free->arch.gfn_track[i] = NULL;
- }
+ for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
+ kvfree(slot->arch.gfn_track[i]);
+ slot->arch.gfn_track[i] = NULL;
+ }
}
int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
@@ -48,7 +44,7 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
return 0;
track_free:
- kvm_page_track_free_memslot(slot, NULL);
+ kvm_page_track_free_memslot(slot);
return -ENOMEM;
}
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 21a3320f166a..9bdf9b7d9a96 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -66,7 +66,7 @@
#define PT_GUEST_ACCESSED_SHIFT 8
#define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
#define CMPXCHG cmpxchg64
- #define PT_MAX_FULL_LEVELS 4
+ #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
#else
#error Invalid PTTYPE value
#endif
@@ -333,7 +333,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
trace_kvm_mmu_pagetable_walk(addr, access);
retry_walk:
walker->level = mmu->root_level;
- pte = mmu->get_cr3(vcpu);
+ pte = mmu->get_guest_pgd(vcpu);
have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
#if PTTYPE == 64
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index bcc6a73d6628..a5078841bdac 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -111,7 +111,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
.config = config,
};
- attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+ attr.sample_period = get_sample_period(pmc, pmc->counter);
if (in_tx)
attr.config |= HSW_IN_TX;
@@ -158,7 +158,7 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
/* recalibrate sample period and check if it's accepted by perf core */
if (perf_event_period(pmc->perf_event,
- (-pmc->counter) & pmc_bitmask(pmc)))
+ get_sample_period(pmc, pmc->counter)))
return false;
/* reuse perf_event to serve as pmc_reprogram_counter() does*/
@@ -211,7 +211,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
ARCH_PERFMON_EVENTSEL_CMASK |
HSW_IN_TX |
HSW_IN_TX_CHECKPOINTED))) {
- config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
+ config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
event_select,
unit_mask);
if (config != PERF_COUNT_HW_MAX)
@@ -265,7 +265,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
pmc->current_config = (u64)ctrl;
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
- kvm_x86_ops->pmu_ops->find_fixed_event(idx),
+ kvm_x86_ops.pmu_ops->find_fixed_event(idx),
!(en_field & 0x2), /* exclude user */
!(en_field & 0x1), /* exclude kernel */
pmi, false, false);
@@ -274,7 +274,7 @@ EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
{
- struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
+ struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
if (!pmc)
return;
@@ -296,7 +296,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
int bit;
for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
- struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
+ struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
if (unlikely(!pmc || !pmc->perf_event)) {
clear_bit(bit, pmu->reprogram_pmi);
@@ -318,7 +318,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
/* check if idx is a valid index to access PMU */
int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
- return kvm_x86_ops->pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
+ return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
}
bool is_vmware_backdoor_pmc(u32 pmc_idx)
@@ -368,7 +368,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
- pmc = kvm_x86_ops->pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
+ pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
if (!pmc)
return 1;
@@ -384,14 +384,14 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
- return kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
- kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
+ return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
+ kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
}
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, msr);
+ struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
if (pmc)
__set_bit(pmc->idx, pmu->pmc_in_use);
@@ -399,13 +399,13 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
- return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
+ return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr, data);
}
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
- return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
+ return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
}
/* refresh PMU settings. This function generally is called when underlying
@@ -414,7 +414,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
*/
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{
- kvm_x86_ops->pmu_ops->refresh(vcpu);
+ kvm_x86_ops.pmu_ops->refresh(vcpu);
}
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
@@ -422,7 +422,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
irq_work_sync(&pmu->irq_work);
- kvm_x86_ops->pmu_ops->reset(vcpu);
+ kvm_x86_ops.pmu_ops->reset(vcpu);
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
@@ -430,7 +430,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
memset(pmu, 0, sizeof(*pmu));
- kvm_x86_ops->pmu_ops->init(vcpu);
+ kvm_x86_ops.pmu_ops->init(vcpu);
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
pmu->event_count = 0;
pmu->need_cleanup = false;
@@ -462,7 +462,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
pmu->pmc_in_use, X86_PMC_IDX_MAX);
for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
- pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, i);
+ pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
pmc_stop_counter(pmc);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 13332984b6d5..a6c78a797cb1 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -88,7 +88,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
{
- return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
+ return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc);
}
static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
@@ -129,6 +129,15 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
return NULL;
}
+static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
+{
+ u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
+
+ if (!sample_period)
+ sample_period = pmc_bitmask(pmc) + 1;
+ return sample_period;
+}
+
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 216364cb65a3..851e9cc79930 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -522,10 +522,31 @@ static void recalc_intercepts(struct vcpu_svm *svm)
h = &svm->nested.hsave->control;
g = &svm->nested;
- c->intercept_cr = h->intercept_cr | g->intercept_cr;
- c->intercept_dr = h->intercept_dr | g->intercept_dr;
- c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
- c->intercept = h->intercept | g->intercept;
+ c->intercept_cr = h->intercept_cr;
+ c->intercept_dr = h->intercept_dr;
+ c->intercept_exceptions = h->intercept_exceptions;
+ c->intercept = h->intercept;
+
+ if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
+ /* We only want the cr8 intercept bits of L1 */
+ c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
+ c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
+
+ /*
+ * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
+ * affect any interrupt we may want to inject; therefore,
+ * interrupt window vmexits are irrelevant to L0.
+ */
+ c->intercept &= ~(1ULL << INTERCEPT_VINTR);
+ }
+
+ /* We don't want to see VMMCALLs from a nested guest */
+ c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
+
+ c->intercept_cr |= g->intercept_cr;
+ c->intercept_dr |= g->intercept_dr;
+ c->intercept_exceptions |= g->intercept_exceptions;
+ c->intercept |= g->intercept;
}
static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
@@ -630,6 +651,11 @@ static inline void clr_intercept(struct vcpu_svm *svm, int bit)
recalc_intercepts(svm);
}
+static inline bool is_intercept(struct vcpu_svm *svm, int bit)
+{
+ return (svm->vmcb->control.intercept & (1ULL << bit)) != 0;
+}
+
static inline bool vgif_enabled(struct vcpu_svm *svm)
{
return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
@@ -1209,6 +1235,7 @@ static int avic_ga_log_notifier(u32 ga_tag)
u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
+ trace_kvm_avic_ga_log(vm_id, vcpu_id);
spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
@@ -1370,6 +1397,29 @@ static void svm_hardware_teardown(void)
iopm_base = 0;
}
+static __init void svm_set_cpu_caps(void)
+{
+ kvm_set_cpu_caps();
+
+ supported_xss = 0;
+
+ /* CPUID 0x80000001 and 0x8000000A (SVM features) */
+ if (nested) {
+ kvm_cpu_cap_set(X86_FEATURE_SVM);
+
+ if (nrips)
+ kvm_cpu_cap_set(X86_FEATURE_NRIPS);
+
+ if (npt_enabled)
+ kvm_cpu_cap_set(X86_FEATURE_NPT);
+ }
+
+ /* CPUID 0x80000008 */
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
+ boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
+}
+
static __init int svm_hardware_setup(void)
{
int cpu;
@@ -1388,6 +1438,8 @@ static __init int svm_hardware_setup(void)
init_msrpm_offsets();
+ supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
+
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@@ -1435,16 +1487,11 @@ static __init int svm_hardware_setup(void)
if (!boot_cpu_has(X86_FEATURE_NPT))
npt_enabled = false;
- if (npt_enabled && !npt) {
- printk(KERN_INFO "kvm: Nested Paging disabled\n");
+ if (npt_enabled && !npt)
npt_enabled = false;
- }
- if (npt_enabled) {
- printk(KERN_INFO "kvm: Nested Paging enabled\n");
- kvm_enable_tdp();
- } else
- kvm_disable_tdp();
+ kvm_configure_mmu(npt_enabled, PT_PDPE_LEVEL);
+ pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
if (nrips) {
if (!boot_cpu_has(X86_FEATURE_NRIPS))
@@ -1480,6 +1527,8 @@ static __init int svm_hardware_setup(void)
pr_info("Virtual GIF supported\n");
}
+ svm_set_cpu_caps();
+
return 0;
err:
@@ -1939,19 +1988,6 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
kfree(region);
}
-static struct kvm *svm_vm_alloc(void)
-{
- struct kvm_svm *kvm_svm = __vmalloc(sizeof(struct kvm_svm),
- GFP_KERNEL_ACCOUNT | __GFP_ZERO,
- PAGE_KERNEL);
- return &kvm_svm->kvm;
-}
-
-static void svm_vm_free(struct kvm *kvm)
-{
- vfree(to_kvm_svm(kvm));
-}
-
static void sev_vm_destroy(struct kvm *kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
@@ -2186,7 +2222,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
}
init_vmcb(svm);
- kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
+ kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
kvm_rdx_write(vcpu, eax);
if (kvm_vcpu_apicv_active(vcpu) && !init_event)
@@ -2420,14 +2456,38 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
}
}
+static inline void svm_enable_vintr(struct vcpu_svm *svm)
+{
+ struct vmcb_control_area *control;
+
+ /* The following fields are ignored when AVIC is enabled */
+ WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
+
+ /*
+ * This is just a dummy VINTR to actually cause a vmexit to happen.
+ * Actual injection of virtual interrupts happens through EVENTINJ.
+ */
+ control = &svm->vmcb->control;
+ control->int_vector = 0x0;
+ control->int_ctl &= ~V_INTR_PRIO_MASK;
+ control->int_ctl |= V_IRQ_MASK |
+ ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
+ mark_dirty(svm->vmcb, VMCB_INTR);
+}
+
static void svm_set_vintr(struct vcpu_svm *svm)
{
set_intercept(svm, INTERCEPT_VINTR);
+ if (is_intercept(svm, INTERCEPT_VINTR))
+ svm_enable_vintr(svm);
}
static void svm_clear_vintr(struct vcpu_svm *svm)
{
clr_intercept(svm, INTERCEPT_VINTR);
+
+ svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
+ mark_dirty(svm->vmcb, VMCB_INTR);
}
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
@@ -2983,15 +3043,6 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
return pdpte;
}
-static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
- unsigned long root)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- svm->vmcb->control.nested_cr3 = __sme_set(root);
- mark_dirty(svm->vmcb, VMCB_NPT);
-}
-
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
@@ -3027,8 +3078,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
kvm_init_shadow_mmu(vcpu);
- vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3;
- vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3;
+ vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
@@ -3089,43 +3139,36 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
return vmexit;
}
-/* This function returns true if it is save to enable the irq window */
-static inline bool nested_svm_intr(struct vcpu_svm *svm)
+static void nested_svm_intr(struct vcpu_svm *svm)
{
- if (!is_guest_mode(&svm->vcpu))
- return true;
-
- if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
- return true;
-
- if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
- return false;
-
- /*
- * if vmexit was already requested (by intercepted exception
- * for instance) do not overwrite it with "external interrupt"
- * vmexit.
- */
- if (svm->nested.exit_required)
- return false;
-
svm->vmcb->control.exit_code = SVM_EXIT_INTR;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
- if (svm->nested.intercept & 1ULL) {
- /*
- * The #vmexit can't be emulated here directly because this
- * code path runs with irqs and preemption disabled. A
- * #vmexit emulation might sleep. Only signal request for
- * the #vmexit here.
- */
- svm->nested.exit_required = true;
- trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
- return false;
+ /* nested_svm_vmexit this gets called afterwards from handle_exit */
+ svm->nested.exit_required = true;
+ trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
+}
+
+static bool nested_exit_on_intr(struct vcpu_svm *svm)
+{
+ return (svm->nested.intercept & 1ULL);
+}
+
+static int svm_check_nested_events(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ bool block_nested_events =
+ kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required;
+
+ if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) {
+ if (block_nested_events)
+ return -EBUSY;
+ nested_svm_intr(svm);
+ return 0;
}
- return true;
+ return 0;
}
/* This function returns true if it is save to enable the nmi window */
@@ -3244,9 +3287,6 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
return NESTED_EXIT_CONTINUE;
}
-/*
- * If this function returns true, this #vmexit was already handled
- */
static int nested_svm_intercept(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
@@ -3521,6 +3561,9 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
static bool nested_vmcb_checks(struct vmcb *vmcb)
{
+ if ((vmcb->save.efer & EFER_SVME) == 0)
+ return false;
+
if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
return false;
@@ -3537,6 +3580,10 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb, struct kvm_host_map *map)
{
+ bool evaluate_pending_interrupts =
+ is_intercept(svm, INTERCEPT_VINTR) ||
+ is_intercept(svm, INTERCEPT_IRET);
+
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
else
@@ -3596,15 +3643,6 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
- if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
- /* We only want the cr8 intercept bits of the guest */
- clr_cr_intercept(svm, INTERCEPT_CR8_READ);
- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
- }
-
- /* We don't want to see VMMCALLs from a nested guest */
- clr_intercept(svm, INTERCEPT_VMMCALL);
-
svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
@@ -3632,7 +3670,21 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->nested.vmcb = vmcb_gpa;
+ /*
+ * If L1 had a pending IRQ/NMI before executing VMRUN,
+ * which wasn't delivered because it was disallowed (e.g.
+ * interrupts disabled), L0 needs to evaluate if this pending
+ * event should cause an exit from L2 to L1 or be delivered
+ * directly to L2.
+ *
+ * Usually this would be handled by the processor noticing an
+ * IRQ/NMI window request. However, VMRUN can unblock interrupts
+ * by implicitly setting GIF, so force L0 to perform pending event
+ * evaluation by requesting a KVM_REQ_EVENT.
+ */
enable_gif(svm);
+ if (unlikely(evaluate_pending_interrupts))
+ kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
mark_all_dirty(svm->vmcb);
}
@@ -3834,11 +3886,8 @@ static int clgi_interception(struct vcpu_svm *svm)
disable_gif(svm);
/* After a CLGI no interrupts should come */
- if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
+ if (!kvm_vcpu_apicv_active(&svm->vcpu))
svm_clear_vintr(svm);
- svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
- mark_dirty(svm->vmcb, VMCB_INTR);
- }
return ret;
}
@@ -5124,19 +5173,6 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
++vcpu->stat.nmi_injections;
}
-static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
-{
- struct vmcb_control_area *control;
-
- /* The following fields are ignored when AVIC is enabled */
- control = &svm->vmcb->control;
- control->int_vector = irq;
- control->int_ctl &= ~V_INTR_PRIO_MASK;
- control->int_ctl |= V_IRQ_MASK |
- ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
- mark_dirty(svm->vmcb, VMCB_INTR);
-}
-
static void svm_set_irq(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -5525,18 +5561,15 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
- int ret;
if (!gif_set(svm) ||
(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
return 0;
- ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
-
- if (is_guest_mode(vcpu))
- return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
-
- return ret;
+ if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK))
+ return !!(svm->vcpu.arch.hflags & HF_HIF_MASK);
+ else
+ return !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
}
static void enable_irq_window(struct kvm_vcpu *vcpu)
@@ -5551,7 +5584,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
* enabled, the STGI interception will not occur. Enable the irq
* window under the assumption that the hardware will set the GIF.
*/
- if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
+ if (vgif_enabled(svm) || gif_set(svm)) {
/*
* IRQ window is not needed when AVIC is enabled,
* unless we have pending ExtINT since it cannot be injected
@@ -5560,7 +5593,6 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
*/
svm_toggle_avic_for_irq_window(vcpu, false);
svm_set_vintr(svm);
- svm_inject_irq(svm, 0x0);
}
}
@@ -5946,24 +5978,30 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
}
STACK_FRAME_NON_STANDARD(svm_vcpu_run);
-static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
+static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ bool update_guest_cr3 = true;
+ unsigned long cr3;
- svm->vmcb->save.cr3 = __sme_set(root);
- mark_dirty(svm->vmcb, VMCB_CR);
-}
-
-static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
+ cr3 = __sme_set(root);
+ if (npt_enabled) {
+ svm->vmcb->control.nested_cr3 = cr3;
+ mark_dirty(svm->vmcb, VMCB_NPT);
- svm->vmcb->control.nested_cr3 = __sme_set(root);
- mark_dirty(svm->vmcb, VMCB_NPT);
+ /* Loading L2's CR3 is handled by enter_svm_guest_mode. */
+ if (is_guest_mode(vcpu))
+ update_guest_cr3 = false;
+ else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+ cr3 = vcpu->arch.cr3;
+ else /* CR3 is already up-to-date. */
+ update_guest_cr3 = false;
+ }
- /* Also sync guest cr3 here in case we live migrate */
- svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
- mark_dirty(svm->vmcb, VMCB_CR);
+ if (update_guest_cr3) {
+ svm->vmcb->save.cr3 = cr3;
+ mark_dirty(svm->vmcb, VMCB_CR);
+ }
}
static int is_disabled(void)
@@ -6025,12 +6063,19 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
boot_cpu_has(X86_FEATURE_XSAVES);
/* Update nrips enabled cache */
- svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
+ svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
+ guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
if (!kvm_vcpu_apicv_active(vcpu))
return;
- guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
+ /*
+ * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
+ * is exposed to the guest, disable AVIC.
+ */
+ if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
+ kvm_request_apicv_update(vcpu->kvm, false,
+ APICV_INHIBIT_REASON_X2APIC);
/*
* Currently, AVIC does not work with nested virtualization.
@@ -6041,88 +6086,11 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
APICV_INHIBIT_REASON_NESTED);
}
-#define F feature_bit
-
-static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
-{
- switch (func) {
- case 0x1:
- if (avic)
- entry->ecx &= ~F(X2APIC);
- break;
- case 0x80000001:
- if (nested)
- entry->ecx |= (1 << 2); /* Set SVM bit */
- break;
- case 0x80000008:
- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
- boot_cpu_has(X86_FEATURE_AMD_SSBD))
- entry->ebx |= F(VIRT_SSBD);
- break;
- case 0x8000000A:
- entry->eax = 1; /* SVM revision 1 */
- entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
- ASID emulation to nested SVM */
- entry->ecx = 0; /* Reserved */
- entry->edx = 0; /* Per default do not support any
- additional features */
-
- /* Support next_rip if host supports it */
- if (boot_cpu_has(X86_FEATURE_NRIPS))
- entry->edx |= F(NRIPS);
-
- /* Support NPT for the guest if enabled */
- if (npt_enabled)
- entry->edx |= F(NPT);
-
- }
-}
-
-static int svm_get_lpage_level(void)
-{
- return PT_PDPE_LEVEL;
-}
-
-static bool svm_rdtscp_supported(void)
-{
- return boot_cpu_has(X86_FEATURE_RDTSCP);
-}
-
-static bool svm_invpcid_supported(void)
-{
- return false;
-}
-
-static bool svm_mpx_supported(void)
-{
- return false;
-}
-
-static bool svm_xsaves_supported(void)
-{
- return boot_cpu_has(X86_FEATURE_XSAVES);
-}
-
-static bool svm_umip_emulated(void)
-{
- return false;
-}
-
-static bool svm_pt_supported(void)
-{
- return false;
-}
-
static bool svm_has_wbinvd_exit(void)
{
return true;
}
-static bool svm_pku_supported(void)
-{
- return false;
-}
-
#define PRE_EX(exit) { .exit_code = (exit), \
.stage = X86_ICPT_PRE_EXCEPT, }
#define POST_EX(exit) { .exit_code = (exit), \
@@ -6189,7 +6157,8 @@ static const struct __x86_intercept {
static int svm_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
- enum x86_intercept_stage stage)
+ enum x86_intercept_stage stage,
+ struct x86_exception *exception)
{
struct vcpu_svm *svm = to_svm(vcpu);
int vmexit, ret = X86EMUL_CONTINUE;
@@ -7371,7 +7340,7 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
* TODO: Last condition latch INIT signals on vCPU when
* vCPU is in guest-mode and vmcb12 defines intercept on INIT.
* To properly emulate the INIT intercept, SVM should implement
- * kvm_x86_ops->check_nested_events() and call nested_svm_vmexit()
+ * kvm_x86_ops.check_nested_events() and call nested_svm_vmexit()
* there if an INIT signal is pending.
*/
return !gif_set(svm) ||
@@ -7384,7 +7353,8 @@ static bool svm_check_apicv_inhibit_reasons(ulong bit)
BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_NESTED) |
BIT(APICV_INHIBIT_REASON_IRQWIN) |
- BIT(APICV_INHIBIT_REASON_PIT_REINJ);
+ BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
+ BIT(APICV_INHIBIT_REASON_X2APIC);
return supported & BIT(bit);
}
@@ -7394,12 +7364,8 @@ static void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate)
avic_update_access_page(kvm, activate);
}
-static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
- .cpu_has_kvm_support = has_svm,
- .disabled_by_bios = is_disabled,
- .hardware_setup = svm_hardware_setup,
+static struct kvm_x86_ops svm_x86_ops __initdata = {
.hardware_unsetup = svm_hardware_teardown,
- .check_processor_compatibility = svm_check_processor_compat,
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
@@ -7409,8 +7375,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.vcpu_free = svm_free_vcpu,
.vcpu_reset = svm_vcpu_reset,
- .vm_alloc = svm_vm_alloc,
- .vm_free = svm_vm_free,
+ .vm_size = sizeof(struct kvm_svm),
.vm_init = svm_vm_init,
.vm_destroy = svm_vm_destroy,
@@ -7432,7 +7397,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0,
- .set_cr3 = svm_set_cr3,
.set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer,
.get_idt = svm_get_idt,
@@ -7485,26 +7449,14 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.get_exit_info = svm_get_exit_info,
- .get_lpage_level = svm_get_lpage_level,
-
.cpuid_update = svm_cpuid_update,
- .rdtscp_supported = svm_rdtscp_supported,
- .invpcid_supported = svm_invpcid_supported,
- .mpx_supported = svm_mpx_supported,
- .xsaves_supported = svm_xsaves_supported,
- .umip_emulated = svm_umip_emulated,
- .pt_supported = svm_pt_supported,
- .pku_supported = svm_pku_supported,
-
- .set_supported_cpuid = svm_set_supported_cpuid,
-
.has_wbinvd_exit = svm_has_wbinvd_exit,
.read_l1_tsc_offset = svm_read_l1_tsc_offset,
.write_l1_tsc_offset = svm_write_l1_tsc_offset,
- .set_tdp_cr3 = set_tdp_cr3,
+ .load_mmu_pgd = svm_load_mmu_pgd,
.check_intercept = svm_check_intercept,
.handle_exit_irqoff = svm_handle_exit_irqoff,
@@ -7534,11 +7486,22 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
+
+ .check_nested_events = svm_check_nested_events,
+};
+
+static struct kvm_x86_init_ops svm_init_ops __initdata = {
+ .cpu_has_kvm_support = has_svm,
+ .disabled_by_bios = is_disabled,
+ .hardware_setup = svm_hardware_setup,
+ .check_processor_compatibility = svm_check_processor_compat,
+
+ .runtime_ops = &svm_x86_ops,
};
static int __init svm_init(void)
{
- return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
+ return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
}
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index cef5a344fedb..249062f24b94 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -151,32 +151,38 @@ TRACE_EVENT(kvm_fast_mmio,
* Tracepoint for cpuid.
*/
TRACE_EVENT(kvm_cpuid,
- TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
- unsigned long rcx, unsigned long rdx, bool found),
- TP_ARGS(function, rax, rbx, rcx, rdx, found),
+ TP_PROTO(unsigned int function, unsigned int index, unsigned long rax,
+ unsigned long rbx, unsigned long rcx, unsigned long rdx,
+ bool found, bool used_max_basic),
+ TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic),
TP_STRUCT__entry(
__field( unsigned int, function )
+ __field( unsigned int, index )
__field( unsigned long, rax )
__field( unsigned long, rbx )
__field( unsigned long, rcx )
__field( unsigned long, rdx )
__field( bool, found )
+ __field( bool, used_max_basic )
),
TP_fast_assign(
__entry->function = function;
+ __entry->index = index;
__entry->rax = rax;
__entry->rbx = rbx;
__entry->rcx = rcx;
__entry->rdx = rdx;
__entry->found = found;
+ __entry->used_max_basic = used_max_basic;
),
- TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s",
- __entry->function, __entry->rax,
+ TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s",
+ __entry->function, __entry->index, __entry->rax,
__entry->rbx, __entry->rcx, __entry->rdx,
- __entry->found ? "found" : "not found")
+ __entry->found ? "found" : "not found",
+ __entry->used_max_basic ? ", used max basic" : "")
);
#define AREG(x) { APIC_##x, "APIC_" #x }
@@ -240,7 +246,7 @@ TRACE_EVENT(kvm_exit,
__entry->guest_rip = kvm_rip_read(vcpu);
__entry->isa = isa;
__entry->vcpu_id = vcpu->vcpu_id;
- kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
+ kvm_x86_ops.get_exit_info(vcpu, &__entry->info1,
&__entry->info2);
),
@@ -744,14 +750,14 @@ TRACE_EVENT(kvm_emulate_insn,
),
TP_fast_assign(
- __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
- __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
- - vcpu->arch.emulate_ctxt.fetch.data;
- __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len;
+ __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS);
+ __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
+ - vcpu->arch.emulate_ctxt->fetch.data;
+ __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
memcpy(__entry->insn,
- vcpu->arch.emulate_ctxt.fetch.data,
+ vcpu->arch.emulate_ctxt->fetch.data,
15);
- __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
+ __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode);
__entry->failed = failed;
),
@@ -1367,6 +1373,24 @@ TRACE_EVENT(kvm_avic_unaccelerated_access,
__entry->vec)
);
+TRACE_EVENT(kvm_avic_ga_log,
+ TP_PROTO(u32 vmid, u32 vcpuid),
+ TP_ARGS(vmid, vcpuid),
+
+ TP_STRUCT__entry(
+ __field(u32, vmid)
+ __field(u32, vcpuid)
+ ),
+
+ TP_fast_assign(
+ __entry->vmid = vmid;
+ __entry->vcpuid = vcpuid;
+ ),
+
+ TP_printk("vmid=%u, vcpuid=%u",
+ __entry->vmid, __entry->vcpuid)
+);
+
TRACE_EVENT(kvm_hv_timer_state,
TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
TP_ARGS(vcpu_id, hv_timer_in_use),
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index f486e2606247..8903475f751e 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -101,7 +101,7 @@ static inline bool cpu_has_load_perf_global_ctrl(void)
(vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
}
-static inline bool vmx_mpx_supported(void)
+static inline bool cpu_has_vmx_mpx(void)
{
return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
@@ -146,11 +146,6 @@ static inline bool vmx_umip_emulated(void)
SECONDARY_EXEC_DESC;
}
-static inline bool vmx_pku_supported(void)
-{
- return boot_cpu_has(X86_FEATURE_PKU);
-}
-
static inline bool cpu_has_vmx_rdtscp(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -354,4 +349,22 @@ static inline bool cpu_has_vmx_intel_pt(void)
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL);
}
+/*
+ * Processor Trace can operate in one of three modes:
+ * a. system-wide: trace both host/guest and output to host buffer
+ * b. host-only: only trace host and output to host buffer
+ * c. host-guest: trace host and guest simultaneously and output to their
+ * respective buffer
+ *
+ * KVM currently only supports (a) and (c).
+ */
+static inline bool vmx_pt_mode_is_system(void)
+{
+ return pt_mode == PT_MODE_SYSTEM;
+}
+static inline bool vmx_pt_mode_is_host_guest(void)
+{
+ return pt_mode == PT_MODE_HOST_GUEST;
+}
+
#endif /* __KVM_X86_VMX_CAPS_H */
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index 6de47f2569c9..e5f7a7ebf27d 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -198,6 +198,13 @@ static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
static inline void evmcs_touch_msr_bitmap(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
+enum nested_evmptrld_status {
+ EVMPTRLD_DISABLED,
+ EVMPTRLD_SUCCEEDED,
+ EVMPTRLD_VMFAIL,
+ EVMPTRLD_ERROR,
+};
+
bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
int nested_enable_evmcs(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 9750e590c89d..de232306561a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -353,9 +353,8 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->nested.msrs.ept_caps &
VMX_EPT_EXECUTE_ONLY_BIT,
nested_ept_ad_enabled(vcpu),
- nested_ept_get_cr3(vcpu));
- vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
- vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
+ nested_ept_get_eptp(vcpu));
+ vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
@@ -1910,18 +1909,18 @@ static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
* This is an equivalent of the nested hypervisor executing the vmptrld
* instruction.
*/
-static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
- bool from_launch)
+static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
+ struct kvm_vcpu *vcpu, bool from_launch)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool evmcs_gpa_changed = false;
u64 evmcs_gpa;
if (likely(!vmx->nested.enlightened_vmcs_enabled))
- return 1;
+ return EVMPTRLD_DISABLED;
if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
- return 1;
+ return EVMPTRLD_DISABLED;
if (unlikely(!vmx->nested.hv_evmcs ||
evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
@@ -1932,7 +1931,7 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
&vmx->nested.hv_evmcs_map))
- return 0;
+ return EVMPTRLD_ERROR;
vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
@@ -1961,7 +1960,7 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
(vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
nested_release_evmcs(vcpu);
- return 0;
+ return EVMPTRLD_VMFAIL;
}
vmx->nested.dirty_vmcs12 = true;
@@ -1990,21 +1989,13 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
vmx->nested.hv_evmcs->hv_clean_fields &=
~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
- return 1;
+ return EVMPTRLD_SUCCEEDED;
}
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- /*
- * hv_evmcs may end up being not mapped after migration (when
- * L2 was running), map it here to make sure vmcs12 changes are
- * properly reflected.
- */
- if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
- nested_vmx_handle_enlightened_vmptrld(vcpu, false);
-
if (vmx->nested.hv_evmcs) {
copy_vmcs12_to_enlightened(vmx);
/* All fields are clean */
@@ -2475,9 +2466,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* If L1 use EPT, then L0 needs to execute INVEPT on
* EPTP02 instead of EPTP01. Therefore, delay TLB
* flush until vmcs02->eptp is fully updated by
- * KVM_REQ_LOAD_CR3. Note that this assumes
+ * KVM_REQ_LOAD_MMU_PGD. Note that this assumes
* KVM_REQ_TLB_FLUSH is evaluated after
- * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
+ * KVM_REQ_LOAD_MMU_PGD in vcpu_enter_guest().
*/
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
@@ -2522,7 +2513,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/*
* Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
* on nested VM-Exit, which can occur without actually running L2 and
- * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with
+ * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
* vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
* transition to HLT instead of running L2.
*/
@@ -2564,13 +2555,13 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
return 0;
}
-static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
+static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int maxphyaddr = cpuid_maxphyaddr(vcpu);
/* Check for memory type validity */
- switch (address & VMX_EPTP_MT_MASK) {
+ switch (new_eptp & VMX_EPTP_MT_MASK) {
case VMX_EPTP_MT_UC:
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
return false;
@@ -2583,16 +2574,26 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
return false;
}
- /* only 4 levels page-walk length are valid */
- if (CC((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4))
+ /* Page-walk levels validity. */
+ switch (new_eptp & VMX_EPTP_PWL_MASK) {
+ case VMX_EPTP_PWL_5:
+ if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
+ return false;
+ break;
+ case VMX_EPTP_PWL_4:
+ if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
+ return false;
+ break;
+ default:
return false;
+ }
/* Reserved bits should not be set */
- if (CC(address >> maxphyaddr || ((address >> 7) & 0x1f)))
+ if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f)))
return false;
/* AD, if set, should be supported */
- if (address & VMX_EPTP_AD_ENABLE_BIT) {
+ if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
return false;
}
@@ -2641,7 +2642,7 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
return -EINVAL;
if (nested_cpu_has_ept(vmcs12) &&
- CC(!valid_ept_address(vcpu, vmcs12->ept_pointer)))
+ CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
return -EINVAL;
if (nested_cpu_has_vmfunc(vmcs12)) {
@@ -2961,7 +2962,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
/*
* Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
* which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
- * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
+ * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
* there is no need to preserve other bits or save/restore the field.
*/
vmcs_writel(GUEST_RFLAGS, 0);
@@ -3053,6 +3054,27 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
struct page *page;
u64 hpa;
+ /*
+ * hv_evmcs may end up being not mapped after migration (when
+ * L2 was running), map it here to make sure vmcs12 changes are
+ * properly reflected.
+ */
+ if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) {
+ enum nested_evmptrld_status evmptrld_status =
+ nested_vmx_handle_enlightened_vmptrld(vcpu, false);
+
+ if (evmptrld_status == EVMPTRLD_VMFAIL ||
+ evmptrld_status == EVMPTRLD_ERROR) {
+ pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
+ __func__);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror =
+ KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return false;
+ }
+ }
+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
@@ -3316,12 +3338,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
enum nvmx_vmentry_status status;
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
+ enum nested_evmptrld_status evmptrld_status;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (!nested_vmx_handle_enlightened_vmptrld(vcpu, launch))
+ evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
+ if (evmptrld_status == EVMPTRLD_ERROR) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
+ } else if (evmptrld_status == EVMPTRLD_VMFAIL) {
+ return nested_vmx_failInvalid(vcpu);
+ }
if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
return nested_vmx_failInvalid(vcpu);
@@ -3499,7 +3527,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
}
-static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
+void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
gfn_t gfn;
@@ -3604,7 +3632,7 @@ static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
vcpu->arch.exception.payload);
}
-static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qual;
@@ -3680,8 +3708,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
return 0;
}
- if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
- nested_exit_on_intr(vcpu)) {
+ if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) {
if (block_nested_events)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
@@ -4024,7 +4051,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
*
* If vmcs12 uses EPT, we need to execute this flush on EPTP01
* and therefore we request the TLB flush to happen only after VMCS EPTP
- * has been set by KVM_REQ_LOAD_CR3.
+ * has been set by KVM_REQ_LOAD_MMU_PGD.
*/
if (enable_vpid &&
(!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
@@ -4329,17 +4356,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (likely(!vmx->fail)) {
- /*
- * TODO: SDM says that with acknowledge interrupt on
- * exit, bit 31 of the VM-exit interrupt information
- * (valid interrupt) is always set to 1 on
- * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
- * need kvm_cpu_has_interrupt(). See the commit
- * message for details.
- */
- if (nested_exit_intr_ack_set(vcpu) &&
- exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
- kvm_cpu_has_interrupt(vcpu)) {
+ if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
+ nested_exit_intr_ack_set(vcpu)) {
int irq = kvm_cpu_get_interrupt(vcpu);
WARN_ON(irq < 0);
vmcs12->vm_exit_intr_info = irq |
@@ -4383,7 +4401,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
* Decode the memory-address operand of a vmx instruction, as recorded on an
* exit caused by such an instruction (run by a guest hypervisor).
* On success, returns 0. When the operand is invalid, returns 1 and throws
- * #UD or #GP.
+ * #UD, #GP, or #SS.
*/
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
@@ -4424,7 +4442,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
if (base_is_valid)
off += kvm_register_read(vcpu, base_reg);
if (index_is_valid)
- off += kvm_register_read(vcpu, index_reg)<<scaling;
+ off += kvm_register_read(vcpu, index_reg) << scaling;
vmx_get_segment(vcpu, &s, seg_reg);
/*
@@ -4517,7 +4535,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
return;
vmx = to_vmx(vcpu);
- if (kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
+ if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
vmx->nested.msrs.entry_ctls_high |=
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
vmx->nested.msrs.exit_ctls_high |=
@@ -4603,7 +4621,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
vmx->nested.vmcs02_initialized = false;
vmx->nested.vmxon = true;
- if (pt_mode == PT_MODE_HOST_GUEST) {
+ if (vmx_pt_mode_is_host_guest()) {
vmx->pt_desc.guest.ctl = 0;
pt_update_intercept_for_msr(vmx);
}
@@ -5235,7 +5253,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
u32 index = kvm_rcx_read(vcpu);
- u64 address;
+ u64 new_eptp;
bool accessed_dirty;
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -5248,23 +5266,23 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
- &address, index * 8, 8))
+ &new_eptp, index * 8, 8))
return 1;
- accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
+ accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
/*
* If the (L2) guest does a vmfunc to the currently
* active ept pointer, we don't have to do anything else
*/
- if (vmcs12->ept_pointer != address) {
- if (!valid_ept_address(vcpu, address))
+ if (vmcs12->ept_pointer != new_eptp) {
+ if (!nested_vmx_check_eptp(vcpu, new_eptp))
return 1;
kvm_mmu_unload(vcpu);
mmu->ept_ad = accessed_dirty;
mmu->mmu_role.base.ad_disabled = !accessed_dirty;
- vmcs12->ept_pointer = address;
+ vmcs12->ept_pointer = new_eptp;
/*
* TODO: Check what's the correct approach in case
* mmu reload fails. Currently, we just let the next
@@ -5525,8 +5543,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- if (vmx->nested.nested_run_pending)
- return false;
+ WARN_ON_ONCE(vmx->nested.nested_run_pending);
if (unlikely(vmx->fail)) {
trace_kvm_nested_vmenter_failed(
@@ -5535,19 +5552,6 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
return true;
}
- /*
- * The host physical addresses of some pages of guest memory
- * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
- * Page). The CPU may write to these pages via their host
- * physical address while L2 is running, bypassing any
- * address-translation-based dirty tracking (e.g. EPT write
- * protection).
- *
- * Mark them dirty on every exit from L2 to prevent them from
- * getting out of sync with dirty tracking.
- */
- nested_mark_vmcs12_pages_dirty(vcpu);
-
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
vmcs_readl(EXIT_QUALIFICATION),
vmx->idt_vectoring_info,
@@ -5628,7 +5632,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_TRAP_FLAG:
- return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
+ return nested_cpu_has_mtf(vmcs12);
case EXIT_REASON_MONITOR_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
case EXIT_REASON_PAUSE_INSTRUCTION:
@@ -5905,10 +5909,12 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
} else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
/*
- * Sync eVMCS upon entry as we may not have
- * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
+ * nested_vmx_handle_enlightened_vmptrld() cannot be called
+ * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
+ * restored yet. EVMCS will be mapped from
+ * nested_get_vmcs12_pages().
*/
- vmx->nested.need_vmcs12_to_shadow_sync = true;
+ kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
} else {
return -EINVAL;
}
@@ -6130,11 +6136,13 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
/* nested EPT: emulate EPT also to L1 */
msrs->secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_EPT;
- msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
- VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
- if (cpu_has_vmx_ept_execute_only())
- msrs->ept_caps |=
- VMX_EPT_EXECUTE_ONLY_BIT;
+ msrs->ept_caps =
+ VMX_EPT_PAGE_WALK_4_BIT |
+ VMX_EPT_PAGE_WALK_5_BIT |
+ VMX_EPTP_WB_BIT |
+ VMX_EPT_INVEPT_BIT |
+ VMX_EPT_EXECUTE_ONLY_BIT;
+
msrs->ept_caps &= ept_caps;
msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
@@ -6233,7 +6241,8 @@ void nested_vmx_hardware_unsetup(void)
}
}
-__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
+__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
+ int (*exit_handlers[])(struct kvm_vcpu *))
{
int i;
@@ -6269,12 +6278,12 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
- kvm_x86_ops->check_nested_events = vmx_check_nested_events;
- kvm_x86_ops->get_nested_state = vmx_get_nested_state;
- kvm_x86_ops->set_nested_state = vmx_set_nested_state;
- kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages;
- kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
- kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
+ ops->check_nested_events = vmx_check_nested_events;
+ ops->get_nested_state = vmx_get_nested_state;
+ ops->set_nested_state = vmx_set_nested_state;
+ ops->get_vmcs12_pages = nested_get_vmcs12_pages;
+ ops->nested_enable_evmcs = nested_enable_evmcs;
+ ops->nested_get_evmcs_version = nested_get_evmcs_version;
return 0;
}
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 9aeda46f473e..ac56aefa49e3 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -19,7 +19,8 @@ enum nvmx_vmentry_status {
void vmx_leave_nested(struct kvm_vcpu *vcpu);
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
void nested_vmx_hardware_unsetup(void);
-__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
+__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
+ int (*exit_handlers[])(struct kvm_vcpu *));
void nested_vmx_set_vmcs_shadowing_bitmap(void);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
@@ -33,6 +34,7 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
+void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
int size);
@@ -60,7 +62,7 @@ static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
vmx->nested.hv_evmcs;
}
-static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
+static inline unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
{
/* return the page table to be shadowed - in our case, EPT12 */
return get_vmcs12(vcpu)->ept_pointer;
@@ -68,7 +70,7 @@ static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
{
- return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
+ return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
}
/*
diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h
index 45eaedee2ac0..19717d0a1100 100644
--- a/arch/x86/kvm/vmx/ops.h
+++ b/arch/x86/kvm/vmx/ops.h
@@ -13,6 +13,8 @@
#define __ex(x) __kvm_handle_fault_on_reboot(x)
asmlinkage void vmread_error(unsigned long field, bool fault);
+__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
+ bool fault);
void vmwrite_error(unsigned long field, unsigned long value);
void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
@@ -70,15 +72,28 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
asm volatile("1: vmread %2, %1\n\t"
".byte 0x3e\n\t" /* branch taken hint */
"ja 3f\n\t"
- "mov %2, %%" _ASM_ARG1 "\n\t"
- "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t"
- "2: call vmread_error\n\t"
- "xor %k1, %k1\n\t"
+
+ /*
+ * VMREAD failed. Push '0' for @fault, push the failing
+ * @field, and bounce through the trampoline to preserve
+ * volatile registers.
+ */
+ "push $0\n\t"
+ "push %2\n\t"
+ "2:call vmread_error_trampoline\n\t"
+
+ /*
+ * Unwind the stack. Note, the trampoline zeros out the
+ * memory for @fault so that the result is '0' on error.
+ */
+ "pop %2\n\t"
+ "pop %1\n\t"
"3:\n\t"
+ /* VMREAD faulted. As above, except push '1' for @fault. */
".pushsection .fixup, \"ax\"\n\t"
- "4: mov %2, %%" _ASM_ARG1 "\n\t"
- "mov $1, %%" _ASM_ARG2 "\n\t"
+ "4: push $1\n\t"
+ "push %2\n\t"
"jmp 2b\n\t"
".popsection\n\t"
_ASM_EXTABLE(1b, 4b)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index fd21cdb10b79..7c857737b438 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -263,9 +263,15 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated)
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
+ if (pmc->perf_event)
+ perf_event_period(pmc->perf_event,
+ get_sample_period(pmc, data));
return 0;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc->counter += data - pmc_read_counter(pmc);
+ if (pmc->perf_event)
+ perf_event_period(pmc->perf_event,
+ get_sample_period(pmc, data));
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
@@ -329,7 +335,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
- if (kvm_x86_ops->pt_supported())
+ if (vmx_pt_mode_is_host_guest())
pmu->global_ovf_ctrl_mask &=
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 81ada2ce99e7..9651ba388ba9 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -135,12 +135,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
cmpb $0, %bl
/* Load guest registers. Don't clobber flags. */
- mov VCPU_RBX(%_ASM_AX), %_ASM_BX
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
mov VCPU_RDX(%_ASM_AX), %_ASM_DX
+ mov VCPU_RBX(%_ASM_AX), %_ASM_BX
+ mov VCPU_RBP(%_ASM_AX), %_ASM_BP
mov VCPU_RSI(%_ASM_AX), %_ASM_SI
mov VCPU_RDI(%_ASM_AX), %_ASM_DI
- mov VCPU_RBP(%_ASM_AX), %_ASM_BP
#ifdef CONFIG_X86_64
mov VCPU_R8 (%_ASM_AX), %r8
mov VCPU_R9 (%_ASM_AX), %r9
@@ -168,12 +168,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Save all guest registers, including RAX from the stack */
__ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
- mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
+ mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
+ mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
- mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
#ifdef CONFIG_X86_64
mov %r8, VCPU_R8 (%_ASM_AX)
mov %r9, VCPU_R9 (%_ASM_AX)
@@ -197,12 +197,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
* free. RSP and RAX are exempt as RSP is restored by hardware during
* VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
*/
-1: xor %ebx, %ebx
- xor %ecx, %ecx
+1: xor %ecx, %ecx
xor %edx, %edx
+ xor %ebx, %ebx
+ xor %ebp, %ebp
xor %esi, %esi
xor %edi, %edi
- xor %ebp, %ebp
#ifdef CONFIG_X86_64
xor %r8d, %r8d
xor %r9d, %r9d
@@ -234,3 +234,61 @@ SYM_FUNC_START(__vmx_vcpu_run)
2: mov $1, %eax
jmp 1b
SYM_FUNC_END(__vmx_vcpu_run)
+
+/**
+ * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
+ * @field: VMCS field encoding that failed
+ * @fault: %true if the VMREAD faulted, %false if it failed
+
+ * Save and restore volatile registers across a call to vmread_error(). Note,
+ * all parameters are passed on the stack.
+ */
+SYM_FUNC_START(vmread_error_trampoline)
+ push %_ASM_BP
+ mov %_ASM_SP, %_ASM_BP
+
+ push %_ASM_AX
+ push %_ASM_CX
+ push %_ASM_DX
+#ifdef CONFIG_X86_64
+ push %rdi
+ push %rsi
+ push %r8
+ push %r9
+ push %r10
+ push %r11
+#endif
+#ifdef CONFIG_X86_64
+ /* Load @field and @fault to arg1 and arg2 respectively. */
+ mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
+ mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
+#else
+ /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
+ push 3*WORD_SIZE(%ebp)
+ push 2*WORD_SIZE(%ebp)
+#endif
+
+ call vmread_error
+
+#ifndef CONFIG_X86_64
+ add $8, %esp
+#endif
+
+ /* Zero out @fault, which will be popped into the result register. */
+ _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
+
+#ifdef CONFIG_X86_64
+ pop %r11
+ pop %r10
+ pop %r9
+ pop %r8
+ pop %rsi
+ pop %rdi
+#endif
+ pop %_ASM_DX
+ pop %_ASM_CX
+ pop %_ASM_AX
+ pop %_ASM_BP
+
+ ret
+SYM_FUNC_END(vmread_error_trampoline)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 458e684dfbdc..91749f1254e8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -437,7 +437,6 @@ static const struct kvm_vmx_segment_field {
VMX_SEGMENT_FIELD(LDTR),
};
-u64 host_efer;
static unsigned long host_idt_base;
/*
@@ -658,53 +657,16 @@ static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr,
return ret;
}
-void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
-{
- vmcs_clear(loaded_vmcs->vmcs);
- if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
- vmcs_clear(loaded_vmcs->shadow_vmcs);
- loaded_vmcs->cpu = -1;
- loaded_vmcs->launched = 0;
-}
-
#ifdef CONFIG_KEXEC_CORE
-/*
- * This bitmap is used to indicate whether the vmclear
- * operation is enabled on all cpus. All disabled by
- * default.
- */
-static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
-
-static inline void crash_enable_local_vmclear(int cpu)
-{
- cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static inline void crash_disable_local_vmclear(int cpu)
-{
- cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static inline int crash_local_vmclear_enabled(int cpu)
-{
- return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
static void crash_vmclear_local_loaded_vmcss(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
- if (!crash_local_vmclear_enabled(cpu))
- return;
-
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
loaded_vmcss_on_cpu_link)
vmcs_clear(v->vmcs);
}
-#else
-static inline void crash_enable_local_vmclear(int cpu) { }
-static inline void crash_disable_local_vmclear(int cpu) { }
#endif /* CONFIG_KEXEC_CORE */
static void __loaded_vmcs_clear(void *arg)
@@ -716,19 +678,24 @@ static void __loaded_vmcs_clear(void *arg)
return; /* vcpu migration can race with cpu offline */
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
- crash_disable_local_vmclear(cpu);
+
+ vmcs_clear(loaded_vmcs->vmcs);
+ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
+
list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
/*
- * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
- * is before setting loaded_vmcs->vcpu to -1 which is done in
- * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
- * then adds the vmcs into percpu list before it is deleted.
+ * Ensure all writes to loaded_vmcs, including deleting it from its
+ * current percpu list, complete before setting loaded_vmcs->vcpu to
+ * -1, otherwise a different cpu can see vcpu == -1 first and add
+ * loaded_vmcs to its percpu list before it's deleted from this cpu's
+ * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
*/
smp_wmb();
- loaded_vmcs_init(loaded_vmcs);
- crash_enable_local_vmclear(cpu);
+ loaded_vmcs->cpu = -1;
+ loaded_vmcs->launched = 0;
}
void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
@@ -812,7 +779,7 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu)
if (to_vmx(vcpu)->rmode.vm86_active)
eb = ~0;
if (enable_ept)
- eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
+ eb &= ~(1u << PF_VECTOR);
/* When we are running a nested L2 guest and L1 specified for it a
* certain exception bitmap, we must trap the same exceptions and pass
@@ -1063,7 +1030,7 @@ static unsigned long segment_base(u16 selector)
static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
{
- return (pt_mode == PT_MODE_HOST_GUEST) &&
+ return vmx_pt_mode_is_host_guest() &&
!(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
}
@@ -1097,7 +1064,7 @@ static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
static void pt_guest_enter(struct vcpu_vmx *vmx)
{
- if (pt_mode == PT_MODE_SYSTEM)
+ if (vmx_pt_mode_is_system())
return;
/*
@@ -1114,7 +1081,7 @@ static void pt_guest_enter(struct vcpu_vmx *vmx)
static void pt_guest_exit(struct vcpu_vmx *vmx)
{
- if (pt_mode == PT_MODE_SYSTEM)
+ if (vmx_pt_mode_is_system())
return;
if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
@@ -1347,18 +1314,17 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
if (!already_loaded) {
loaded_vmcs_clear(vmx->loaded_vmcs);
local_irq_disable();
- crash_disable_local_vmclear(cpu);
/*
- * Read loaded_vmcs->cpu should be before fetching
- * loaded_vmcs->loaded_vmcss_on_cpu_link.
- * See the comments in __loaded_vmcs_clear().
+ * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
+ * this cpu's percpu list, otherwise it may not yet be deleted
+ * from its previous cpu's percpu list. Pairs with the
+ * smb_wmb() in __loaded_vmcs_clear().
*/
smp_rmb();
list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
&per_cpu(loaded_vmcss_on_cpu, cpu));
- crash_enable_local_vmclear(cpu);
local_irq_enable();
}
@@ -1691,16 +1657,6 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
vmx_clear_hlt(vcpu);
}
-static bool vmx_rdtscp_supported(void)
-{
- return cpu_has_vmx_rdtscp();
-}
-
-static bool vmx_invpcid_supported(void)
-{
- return cpu_has_vmx_invpcid();
-}
-
/*
* Swap MSR entry in host/guest MSR entry array.
*/
@@ -1908,24 +1864,24 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
&msr_info->data);
break;
case MSR_IA32_RTIT_CTL:
- if (pt_mode != PT_MODE_HOST_GUEST)
+ if (!vmx_pt_mode_is_host_guest())
return 1;
msr_info->data = vmx->pt_desc.guest.ctl;
break;
case MSR_IA32_RTIT_STATUS:
- if (pt_mode != PT_MODE_HOST_GUEST)
+ if (!vmx_pt_mode_is_host_guest())
return 1;
msr_info->data = vmx->pt_desc.guest.status;
break;
case MSR_IA32_RTIT_CR3_MATCH:
- if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ if (!vmx_pt_mode_is_host_guest() ||
!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_cr3_filtering))
return 1;
msr_info->data = vmx->pt_desc.guest.cr3_match;
break;
case MSR_IA32_RTIT_OUTPUT_BASE:
- if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ if (!vmx_pt_mode_is_host_guest() ||
(!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_topa_output) &&
!intel_pt_validate_cap(vmx->pt_desc.caps,
@@ -1934,7 +1890,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmx->pt_desc.guest.output_base;
break;
case MSR_IA32_RTIT_OUTPUT_MASK:
- if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ if (!vmx_pt_mode_is_host_guest() ||
(!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_topa_output) &&
!intel_pt_validate_cap(vmx->pt_desc.caps,
@@ -1944,7 +1900,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
- if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ if (!vmx_pt_mode_is_host_guest() ||
(index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_num_address_ranges)))
return 1;
@@ -2150,7 +2106,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
return vmx_set_vmx_msr(vcpu, msr_index, data);
case MSR_IA32_RTIT_CTL:
- if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ if (!vmx_pt_mode_is_host_guest() ||
vmx_rtit_ctl_check(vcpu, data) ||
vmx->nested.vmxon)
return 1;
@@ -2266,18 +2222,33 @@ static __init int vmx_disabled_by_bios(void)
!boot_cpu_has(X86_FEATURE_VMX);
}
-static void kvm_cpu_vmxon(u64 addr)
+static int kvm_cpu_vmxon(u64 vmxon_pointer)
{
+ u64 msr;
+
cr4_set_bits(X86_CR4_VMXE);
intel_pt_handle_vmx(1);
- asm volatile ("vmxon %0" : : "m"(addr));
+ asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ : : [vmxon_pointer] "m"(vmxon_pointer)
+ : : fault);
+ return 0;
+
+fault:
+ WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
+ rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
+ intel_pt_handle_vmx(0);
+ cr4_clear_bits(X86_CR4_VMXE);
+
+ return -EFAULT;
}
static int hardware_enable(void)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
+ int r;
if (cr4_read_shadow() & X86_CR4_VMXE)
return -EBUSY;
@@ -2294,18 +2265,10 @@ static int hardware_enable(void)
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
- /*
- * Now we can enable the vmclear operation in kdump
- * since the loaded_vmcss_on_cpu list on this cpu
- * has been initialized.
- *
- * Though the cpu is not in VMX operation now, there
- * is no problem to enable the vmclear operation
- * for the loaded_vmcss_on_cpu list is empty!
- */
- crash_enable_local_vmclear(cpu);
+ r = kvm_cpu_vmxon(phys_addr);
+ if (r)
+ return r;
- kvm_cpu_vmxon(phys_addr);
if (enable_ept)
ept_sync_global();
@@ -2617,9 +2580,12 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
if (!loaded_vmcs->vmcs)
return -ENOMEM;
+ vmcs_clear(loaded_vmcs->vmcs);
+
loaded_vmcs->shadow_vmcs = NULL;
loaded_vmcs->hv_timer_soft_disabled = false;
- loaded_vmcs_init(loaded_vmcs);
+ loaded_vmcs->cpu = -1;
+ loaded_vmcs->launched = 0;
if (cpu_has_vmx_msr_bitmap()) {
loaded_vmcs->msr_bitmap = (unsigned long *)
@@ -3001,9 +2967,8 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static int get_ept_level(struct kvm_vcpu *vcpu)
{
- /* Nested EPT currently only supports 4-level walks. */
if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
- return 4;
+ return vmx_eptp_page_walk_level(nested_ept_get_eptp(vcpu));
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
return 5;
return 4;
@@ -3023,7 +2988,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
return eptp;
}
-void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3)
{
struct kvm *kvm = vcpu->kvm;
bool update_guest_cr3 = true;
@@ -3035,7 +3000,7 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
eptp = construct_eptp(vcpu, cr3);
vmcs_write64(EPT_POINTER, eptp);
- if (kvm_x86_ops->tlb_remote_flush) {
+ if (kvm_x86_ops.tlb_remote_flush) {
spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
to_vmx(vcpu)->ept_pointer = eptp;
to_kvm_vmx(kvm)->ept_pointers_match
@@ -4040,7 +4005,7 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
- if (pt_mode == PT_MODE_SYSTEM)
+ if (vmx_pt_mode_is_system())
exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
if (!cpu_need_virtualize_apic_accesses(vcpu))
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
@@ -4095,7 +4060,7 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
}
}
- if (vmx_rdtscp_supported()) {
+ if (cpu_has_vmx_rdtscp()) {
bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
if (!rdtscp_enabled)
exec_control &= ~SECONDARY_EXEC_RDTSCP;
@@ -4110,7 +4075,7 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
}
}
- if (vmx_invpcid_supported()) {
+ if (cpu_has_vmx_invpcid()) {
/* Exposing INVPCID only when PCID is exposed */
bool invpcid_enabled =
guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) &&
@@ -4281,7 +4246,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
if (cpu_has_vmx_encls_vmexit())
vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
- if (pt_mode == PT_MODE_HOST_GUEST) {
+ if (vmx_pt_mode_is_host_guest()) {
memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
/* Bit[6~0] are forced to 1, writes are ignored. */
vmx->pt_desc.guest.output_mask = 0x7F;
@@ -4509,8 +4474,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
- return (!to_vmx(vcpu)->nested.nested_run_pending &&
- vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+ if (to_vmx(vcpu)->nested.nested_run_pending)
+ return false;
+
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ return true;
+
+ return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
}
@@ -4566,7 +4536,6 @@ static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
case GP_VECTOR:
case MF_VECTOR:
return true;
- break;
}
return false;
}
@@ -5343,7 +5312,6 @@ static void vmx_enable_tdp(void)
VMX_EPT_RWX_MASK, 0ull);
ept_set_mmio_spte_mask();
- kvm_enable_tdp();
}
/*
@@ -5876,8 +5844,23 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
- if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason))
- return nested_vmx_reflect_vmexit(vcpu, exit_reason);
+ if (is_guest_mode(vcpu)) {
+ /*
+ * The host physical addresses of some pages of guest memory
+ * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
+ * Page). The CPU may write to these pages via their host
+ * physical address while L2 is running, bypassing any
+ * address-translation-based dirty tracking (e.g. EPT write
+ * protection).
+ *
+ * Mark them dirty on every exit from L2 to prevent them from
+ * getting out of sync with dirty tracking.
+ */
+ nested_mark_vmcs12_pages_dirty(vcpu);
+
+ if (nested_vmx_exit_reflected(vcpu, exit_reason))
+ return nested_vmx_reflect_vmexit(vcpu, exit_reason);
+ }
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
dump_vmcs();
@@ -6237,15 +6220,13 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
/* if exit due to PF check for async PF */
- if (is_page_fault(vmx->exit_intr_info))
+ if (is_page_fault(vmx->exit_intr_info)) {
vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
-
/* Handle machine checks before interrupts are enabled */
- if (is_machine_check(vmx->exit_intr_info))
+ } else if (is_machine_check(vmx->exit_intr_info)) {
kvm_machine_check();
-
/* We need to handle NMIs before interrupts are enabled */
- if (is_nmi(vmx->exit_intr_info)) {
+ } else if (is_nmi(vmx->exit_intr_info)) {
kvm_before_interrupt(&vmx->vcpu);
asm("int $2");
kvm_after_interrupt(&vmx->vcpu);
@@ -6331,11 +6312,6 @@ static bool vmx_has_emulated_msr(int index)
}
}
-static bool vmx_pt_supported(void)
-{
- return pt_mode == PT_MODE_HOST_GUEST;
-}
-
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
@@ -6581,7 +6557,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
pt_guest_enter(vmx);
- atomic_switch_perf_msrs(vmx);
+ if (vcpu_to_pmu(vcpu)->version)
+ atomic_switch_perf_msrs(vmx);
atomic_switch_umwait_control_msr(vmx);
if (enable_preemption_timer)
@@ -6698,20 +6675,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_complete_interrupts(vmx);
}
-static struct kvm *vmx_vm_alloc(void)
-{
- struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx),
- GFP_KERNEL_ACCOUNT | __GFP_ZERO,
- PAGE_KERNEL);
- return &kvm_vmx->kvm;
-}
-
-static void vmx_vm_free(struct kvm *kvm)
-{
- kfree(kvm->arch.hyperv.hv_pa_pg);
- vfree(to_kvm_vmx(kvm));
-}
-
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6914,17 +6877,24 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
u8 cache;
u64 ipat = 0;
- /* For VT-d and EPT combination
- * 1. MMIO: always map as UC
- * 2. EPT with VT-d:
- * a. VT-d without snooping control feature: can't guarantee the
- * result, try to trust guest.
- * b. VT-d with snooping control feature: snooping control feature of
- * VT-d engine can guarantee the cache correctness. Just set it
- * to WB to keep consistent with host. So the same as item 3.
- * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
- * consistent with host MTRR
+ /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
+ * memory aliases with conflicting memory types and sometimes MCEs.
+ * We have to be careful as to what are honored and when.
+ *
+ * For MMIO, guest CD/MTRR are ignored. The EPT memory type is set to
+ * UC. The effective memory type is UC or WC depending on guest PAT.
+ * This was historically the source of MCEs and we want to be
+ * conservative.
+ *
+ * When there is no need to deal with noncoherent DMA (e.g., no VT-d
+ * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored. The
+ * EPT memory type is set to WB. The effective memory type is forced
+ * WB.
+ *
+ * Otherwise, we trust guest. Guest CD/MTRR/PAT are all honored. The
+ * EPT memory type is used to emulate guest CD/MTRR.
*/
+
if (is_mmio) {
cache = MTRR_TYPE_UNCACHABLE;
goto exit;
@@ -6951,15 +6921,6 @@ exit:
return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
}
-static int vmx_get_lpage_level(void)
-{
- if (enable_ept && !cpu_has_vmx_ept_1g_page())
- return PT_DIRECTORY_LEVEL;
- else
- /* For shadow and EPT supported 1GB page */
- return PT_PDPE_LEVEL;
-}
-
static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx)
{
/*
@@ -7150,10 +7111,37 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
}
}
-static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+static __init void vmx_set_cpu_caps(void)
{
- if (func == 1 && nested)
- entry->ecx |= feature_bit(VMX);
+ kvm_set_cpu_caps();
+
+ /* CPUID 0x1 */
+ if (nested)
+ kvm_cpu_cap_set(X86_FEATURE_VMX);
+
+ /* CPUID 0x7 */
+ if (kvm_mpx_supported())
+ kvm_cpu_cap_check_and_set(X86_FEATURE_MPX);
+ if (cpu_has_vmx_invpcid())
+ kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID);
+ if (vmx_pt_mode_is_host_guest())
+ kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
+
+ /* PKU is not yet implemented for shadow paging. */
+ if (enable_ept && boot_cpu_has(X86_FEATURE_OSPKE))
+ kvm_cpu_cap_check_and_set(X86_FEATURE_PKU);
+
+ if (vmx_umip_emulated())
+ kvm_cpu_cap_set(X86_FEATURE_UMIP);
+
+ /* CPUID 0xD.1 */
+ supported_xss = 0;
+ if (!vmx_xsaves_supported())
+ kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
+
+ /* CPUID 0x80000001 */
+ if (!cpu_has_vmx_rdtscp())
+ kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
}
static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
@@ -7197,10 +7185,10 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
- enum x86_intercept_stage stage)
+ enum x86_intercept_stage stage,
+ struct x86_exception *exception)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
switch (info->intercept) {
/*
@@ -7209,8 +7197,8 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
*/
case x86_intercept_rdtscp:
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
- ctxt->exception.vector = UD_VECTOR;
- ctxt->exception.error_code_valid = false;
+ exception->vector = UD_VECTOR;
+ exception->error_code_valid = false;
return X86EMUL_PROPAGATE_FAULT;
}
break;
@@ -7321,7 +7309,8 @@ static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
static void vmx_slot_enable_log_dirty(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
+ if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
+ kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
}
@@ -7504,7 +7493,7 @@ static void pi_post_block(struct kvm_vcpu *vcpu)
static void vmx_post_block(struct kvm_vcpu *vcpu)
{
- if (kvm_x86_ops->set_hv_timer)
+ if (kvm_x86_ops.set_hv_timer)
kvm_lapic_switch_to_hv_timer(vcpu);
pi_post_block(vcpu);
@@ -7671,13 +7660,164 @@ static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->nested.vmxon;
}
+static void hardware_unsetup(void)
+{
+ if (nested)
+ nested_vmx_hardware_unsetup();
+
+ free_kvm_area();
+}
+
+static bool vmx_check_apicv_inhibit_reasons(ulong bit)
+{
+ ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+ BIT(APICV_INHIBIT_REASON_HYPERV);
+
+ return supported & BIT(bit);
+}
+
+static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ .hardware_unsetup = hardware_unsetup,
+
+ .hardware_enable = hardware_enable,
+ .hardware_disable = hardware_disable,
+ .cpu_has_accelerated_tpr = report_flexpriority,
+ .has_emulated_msr = vmx_has_emulated_msr,
+
+ .vm_size = sizeof(struct kvm_vmx),
+ .vm_init = vmx_vm_init,
+
+ .vcpu_create = vmx_create_vcpu,
+ .vcpu_free = vmx_free_vcpu,
+ .vcpu_reset = vmx_vcpu_reset,
+
+ .prepare_guest_switch = vmx_prepare_switch_to_guest,
+ .vcpu_load = vmx_vcpu_load,
+ .vcpu_put = vmx_vcpu_put,
+
+ .update_bp_intercept = update_exception_bitmap,
+ .get_msr_feature = vmx_get_msr_feature,
+ .get_msr = vmx_get_msr,
+ .set_msr = vmx_set_msr,
+ .get_segment_base = vmx_get_segment_base,
+ .get_segment = vmx_get_segment,
+ .set_segment = vmx_set_segment,
+ .get_cpl = vmx_get_cpl,
+ .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
+ .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
+ .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
+ .set_cr0 = vmx_set_cr0,
+ .set_cr4 = vmx_set_cr4,
+ .set_efer = vmx_set_efer,
+ .get_idt = vmx_get_idt,
+ .set_idt = vmx_set_idt,
+ .get_gdt = vmx_get_gdt,
+ .set_gdt = vmx_set_gdt,
+ .get_dr6 = vmx_get_dr6,
+ .set_dr6 = vmx_set_dr6,
+ .set_dr7 = vmx_set_dr7,
+ .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
+ .cache_reg = vmx_cache_reg,
+ .get_rflags = vmx_get_rflags,
+ .set_rflags = vmx_set_rflags,
+
+ .tlb_flush = vmx_flush_tlb,
+ .tlb_flush_gva = vmx_flush_tlb_gva,
+
+ .run = vmx_vcpu_run,
+ .handle_exit = vmx_handle_exit,
+ .skip_emulated_instruction = vmx_skip_emulated_instruction,
+ .update_emulated_instruction = vmx_update_emulated_instruction,
+ .set_interrupt_shadow = vmx_set_interrupt_shadow,
+ .get_interrupt_shadow = vmx_get_interrupt_shadow,
+ .patch_hypercall = vmx_patch_hypercall,
+ .set_irq = vmx_inject_irq,
+ .set_nmi = vmx_inject_nmi,
+ .queue_exception = vmx_queue_exception,
+ .cancel_injection = vmx_cancel_injection,
+ .interrupt_allowed = vmx_interrupt_allowed,
+ .nmi_allowed = vmx_nmi_allowed,
+ .get_nmi_mask = vmx_get_nmi_mask,
+ .set_nmi_mask = vmx_set_nmi_mask,
+ .enable_nmi_window = enable_nmi_window,
+ .enable_irq_window = enable_irq_window,
+ .update_cr8_intercept = update_cr8_intercept,
+ .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
+ .apicv_post_state_restore = vmx_apicv_post_state_restore,
+ .check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
+ .sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
+
+ .set_tss_addr = vmx_set_tss_addr,
+ .set_identity_map_addr = vmx_set_identity_map_addr,
+ .get_tdp_level = get_ept_level,
+ .get_mt_mask = vmx_get_mt_mask,
+
+ .get_exit_info = vmx_get_exit_info,
+
+ .cpuid_update = vmx_cpuid_update,
+
+ .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
+
+ .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
+ .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
+
+ .load_mmu_pgd = vmx_load_mmu_pgd,
+
+ .check_intercept = vmx_check_intercept,
+ .handle_exit_irqoff = vmx_handle_exit_irqoff,
+
+ .request_immediate_exit = vmx_request_immediate_exit,
+
+ .sched_in = vmx_sched_in,
+
+ .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
+ .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
+ .flush_log_dirty = vmx_flush_log_dirty,
+ .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+ .write_log_dirty = vmx_write_pml_buffer,
+
+ .pre_block = vmx_pre_block,
+ .post_block = vmx_post_block,
+
+ .pmu_ops = &intel_pmu_ops,
+
+ .update_pi_irte = vmx_update_pi_irte,
+
+#ifdef CONFIG_X86_64
+ .set_hv_timer = vmx_set_hv_timer,
+ .cancel_hv_timer = vmx_cancel_hv_timer,
+#endif
+
+ .setup_mce = vmx_setup_mce,
+
+ .smi_allowed = vmx_smi_allowed,
+ .pre_enter_smm = vmx_pre_enter_smm,
+ .pre_leave_smm = vmx_pre_leave_smm,
+ .enable_smi_window = enable_smi_window,
+
+ .check_nested_events = NULL,
+ .get_nested_state = NULL,
+ .set_nested_state = NULL,
+ .get_vmcs12_pages = NULL,
+ .nested_enable_evmcs = NULL,
+ .nested_get_evmcs_version = NULL,
+ .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
+ .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+};
+
static __init int hardware_setup(void)
{
unsigned long host_bndcfgs;
struct desc_ptr dt;
- int r, i;
-
- rdmsrl_safe(MSR_EFER, &host_efer);
+ int r, i, ept_lpage_level;
store_idt(&dt);
host_idt_base = dt.address;
@@ -7696,6 +7836,10 @@ static __init int hardware_setup(void)
WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
}
+ if (!cpu_has_vmx_mpx())
+ supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
+ XFEATURE_MASK_BNDCSR);
+
if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0;
@@ -7724,19 +7868,16 @@ static __init int hardware_setup(void)
* using the APIC_ACCESS_ADDR VMCS field.
*/
if (!flexpriority_enabled)
- kvm_x86_ops->set_apic_access_page_addr = NULL;
+ vmx_x86_ops.set_apic_access_page_addr = NULL;
if (!cpu_has_vmx_tpr_shadow())
- kvm_x86_ops->update_cr8_intercept = NULL;
-
- if (enable_ept && !cpu_has_vmx_ept_2m_page())
- kvm_disable_largepages();
+ vmx_x86_ops.update_cr8_intercept = NULL;
#if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
&& enable_ept) {
- kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
- kvm_x86_ops->tlb_remote_flush_with_range =
+ vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
+ vmx_x86_ops.tlb_remote_flush_with_range =
hv_remote_flush_tlb_with_range;
}
#endif
@@ -7751,7 +7892,7 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv()) {
enable_apicv = 0;
- kvm_x86_ops->sync_pir_to_irr = NULL;
+ vmx_x86_ops.sync_pir_to_irr = NULL;
}
if (cpu_has_vmx_tsc_scaling()) {
@@ -7764,8 +7905,16 @@ static __init int hardware_setup(void)
if (enable_ept)
vmx_enable_tdp();
+
+ if (!enable_ept)
+ ept_lpage_level = 0;
+ else if (cpu_has_vmx_ept_1g_page())
+ ept_lpage_level = PT_PDPE_LEVEL;
+ else if (cpu_has_vmx_ept_2m_page())
+ ept_lpage_level = PT_DIRECTORY_LEVEL;
else
- kvm_disable_tdp();
+ ept_lpage_level = PT_PAGE_TABLE_LEVEL;
+ kvm_configure_mmu(enable_ept, ept_lpage_level);
/*
* Only enable PML when hardware supports PML feature, and both EPT
@@ -7775,10 +7924,10 @@ static __init int hardware_setup(void)
enable_pml = 0;
if (!enable_pml) {
- kvm_x86_ops->slot_enable_log_dirty = NULL;
- kvm_x86_ops->slot_disable_log_dirty = NULL;
- kvm_x86_ops->flush_log_dirty = NULL;
- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
+ vmx_x86_ops.slot_enable_log_dirty = NULL;
+ vmx_x86_ops.slot_disable_log_dirty = NULL;
+ vmx_x86_ops.flush_log_dirty = NULL;
+ vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
}
if (!cpu_has_vmx_preemption_timer())
@@ -7806,9 +7955,9 @@ static __init int hardware_setup(void)
}
if (!enable_preemption_timer) {
- kvm_x86_ops->set_hv_timer = NULL;
- kvm_x86_ops->cancel_hv_timer = NULL;
- kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
+ vmx_x86_ops.set_hv_timer = NULL;
+ vmx_x86_ops.cancel_hv_timer = NULL;
+ vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
}
kvm_set_posted_intr_wakeup_handler(wakeup_handler);
@@ -7824,185 +7973,27 @@ static __init int hardware_setup(void)
nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
vmx_capability.ept);
- r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
+ r = nested_vmx_hardware_setup(&vmx_x86_ops,
+ kvm_vmx_exit_handlers);
if (r)
return r;
}
+ vmx_set_cpu_caps();
+
r = alloc_kvm_area();
if (r)
nested_vmx_hardware_unsetup();
return r;
}
-static __exit void hardware_unsetup(void)
-{
- if (nested)
- nested_vmx_hardware_unsetup();
-
- free_kvm_area();
-}
-
-static bool vmx_check_apicv_inhibit_reasons(ulong bit)
-{
- ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
- BIT(APICV_INHIBIT_REASON_HYPERV);
-
- return supported & BIT(bit);
-}
-
-static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+static struct kvm_x86_init_ops vmx_init_ops __initdata = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
- .hardware_setup = hardware_setup,
- .hardware_unsetup = hardware_unsetup,
.check_processor_compatibility = vmx_check_processor_compat,
- .hardware_enable = hardware_enable,
- .hardware_disable = hardware_disable,
- .cpu_has_accelerated_tpr = report_flexpriority,
- .has_emulated_msr = vmx_has_emulated_msr,
-
- .vm_init = vmx_vm_init,
- .vm_alloc = vmx_vm_alloc,
- .vm_free = vmx_vm_free,
-
- .vcpu_create = vmx_create_vcpu,
- .vcpu_free = vmx_free_vcpu,
- .vcpu_reset = vmx_vcpu_reset,
-
- .prepare_guest_switch = vmx_prepare_switch_to_guest,
- .vcpu_load = vmx_vcpu_load,
- .vcpu_put = vmx_vcpu_put,
-
- .update_bp_intercept = update_exception_bitmap,
- .get_msr_feature = vmx_get_msr_feature,
- .get_msr = vmx_get_msr,
- .set_msr = vmx_set_msr,
- .get_segment_base = vmx_get_segment_base,
- .get_segment = vmx_get_segment,
- .set_segment = vmx_set_segment,
- .get_cpl = vmx_get_cpl,
- .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
- .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
- .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
- .set_cr0 = vmx_set_cr0,
- .set_cr3 = vmx_set_cr3,
- .set_cr4 = vmx_set_cr4,
- .set_efer = vmx_set_efer,
- .get_idt = vmx_get_idt,
- .set_idt = vmx_set_idt,
- .get_gdt = vmx_get_gdt,
- .set_gdt = vmx_set_gdt,
- .get_dr6 = vmx_get_dr6,
- .set_dr6 = vmx_set_dr6,
- .set_dr7 = vmx_set_dr7,
- .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
- .cache_reg = vmx_cache_reg,
- .get_rflags = vmx_get_rflags,
- .set_rflags = vmx_set_rflags,
-
- .tlb_flush = vmx_flush_tlb,
- .tlb_flush_gva = vmx_flush_tlb_gva,
-
- .run = vmx_vcpu_run,
- .handle_exit = vmx_handle_exit,
- .skip_emulated_instruction = vmx_skip_emulated_instruction,
- .update_emulated_instruction = vmx_update_emulated_instruction,
- .set_interrupt_shadow = vmx_set_interrupt_shadow,
- .get_interrupt_shadow = vmx_get_interrupt_shadow,
- .patch_hypercall = vmx_patch_hypercall,
- .set_irq = vmx_inject_irq,
- .set_nmi = vmx_inject_nmi,
- .queue_exception = vmx_queue_exception,
- .cancel_injection = vmx_cancel_injection,
- .interrupt_allowed = vmx_interrupt_allowed,
- .nmi_allowed = vmx_nmi_allowed,
- .get_nmi_mask = vmx_get_nmi_mask,
- .set_nmi_mask = vmx_set_nmi_mask,
- .enable_nmi_window = enable_nmi_window,
- .enable_irq_window = enable_irq_window,
- .update_cr8_intercept = update_cr8_intercept,
- .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
- .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
- .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
- .load_eoi_exitmap = vmx_load_eoi_exitmap,
- .apicv_post_state_restore = vmx_apicv_post_state_restore,
- .check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
- .hwapic_irr_update = vmx_hwapic_irr_update,
- .hwapic_isr_update = vmx_hwapic_isr_update,
- .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
- .sync_pir_to_irr = vmx_sync_pir_to_irr,
- .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
- .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
-
- .set_tss_addr = vmx_set_tss_addr,
- .set_identity_map_addr = vmx_set_identity_map_addr,
- .get_tdp_level = get_ept_level,
- .get_mt_mask = vmx_get_mt_mask,
-
- .get_exit_info = vmx_get_exit_info,
-
- .get_lpage_level = vmx_get_lpage_level,
-
- .cpuid_update = vmx_cpuid_update,
-
- .rdtscp_supported = vmx_rdtscp_supported,
- .invpcid_supported = vmx_invpcid_supported,
-
- .set_supported_cpuid = vmx_set_supported_cpuid,
-
- .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
-
- .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
- .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
-
- .set_tdp_cr3 = vmx_set_cr3,
-
- .check_intercept = vmx_check_intercept,
- .handle_exit_irqoff = vmx_handle_exit_irqoff,
- .mpx_supported = vmx_mpx_supported,
- .xsaves_supported = vmx_xsaves_supported,
- .umip_emulated = vmx_umip_emulated,
- .pt_supported = vmx_pt_supported,
- .pku_supported = vmx_pku_supported,
-
- .request_immediate_exit = vmx_request_immediate_exit,
-
- .sched_in = vmx_sched_in,
-
- .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
- .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
- .flush_log_dirty = vmx_flush_log_dirty,
- .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
- .write_log_dirty = vmx_write_pml_buffer,
-
- .pre_block = vmx_pre_block,
- .post_block = vmx_post_block,
-
- .pmu_ops = &intel_pmu_ops,
-
- .update_pi_irte = vmx_update_pi_irte,
-
-#ifdef CONFIG_X86_64
- .set_hv_timer = vmx_set_hv_timer,
- .cancel_hv_timer = vmx_cancel_hv_timer,
-#endif
-
- .setup_mce = vmx_setup_mce,
-
- .smi_allowed = vmx_smi_allowed,
- .pre_enter_smm = vmx_pre_enter_smm,
- .pre_leave_smm = vmx_pre_leave_smm,
- .enable_smi_window = enable_smi_window,
+ .hardware_setup = hardware_setup,
- .check_nested_events = NULL,
- .get_nested_state = NULL,
- .set_nested_state = NULL,
- .get_vmcs12_pages = NULL,
- .nested_enable_evmcs = NULL,
- .nested_get_evmcs_version = NULL,
- .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
- .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+ .runtime_ops = &vmx_x86_ops,
};
static void vmx_cleanup_l1d_flush(void)
@@ -8089,7 +8080,7 @@ static int __init vmx_init(void)
}
#endif
- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
return r;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 0695ea177e22..aab9df55336e 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -12,7 +12,6 @@
#include "vmcs.h"
extern const u32 vmx_msr_index[];
-extern u64 host_efer;
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
@@ -333,9 +332,9 @@ u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
+void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3);
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
@@ -450,7 +449,7 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
static inline u32 vmx_vmentry_ctrl(void)
{
u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
- if (pt_mode == PT_MODE_SYSTEM)
+ if (vmx_pt_mode_is_system())
vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
VM_ENTRY_LOAD_IA32_RTIT_CTL);
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
@@ -461,7 +460,7 @@ static inline u32 vmx_vmentry_ctrl(void)
static inline u32 vmx_vmexit_ctrl(void)
{
u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
- if (pt_mode == PT_MODE_SYSTEM)
+ if (vmx_pt_mode_is_system())
vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
VM_EXIT_CLEAR_IA32_RTIT_CTL);
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
@@ -491,7 +490,6 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
void free_vmcs(struct vmcs *vmcs);
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
-void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
static inline struct vmcs *alloc_vmcs(bool shadow)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bf8564d73fc3..b8124b562dea 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -22,6 +22,7 @@
#include "i8254.h"
#include "tss.h"
#include "kvm_cache_regs.h"
+#include "kvm_emulate.h"
#include "x86.h"
#include "cpuid.h"
#include "pmu.h"
@@ -81,7 +82,7 @@ u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
#define emul_to_vcpu(ctxt) \
- container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
+ ((struct kvm_vcpu *)(ctxt)->vcpu)
/* EFER defaults:
* - enable syscall per default because its emulated by KVM
@@ -109,7 +110,7 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
-struct kvm_x86_ops *kvm_x86_ops __read_mostly;
+struct kvm_x86_ops kvm_x86_ops __read_mostly;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
static bool __read_mostly ignore_msrs = 0;
@@ -180,7 +181,17 @@ struct kvm_shared_msrs {
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static struct kvm_shared_msrs __percpu *shared_msrs;
+#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
+ | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
+ | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
+ | XFEATURE_MASK_PKRU)
+
+u64 __read_mostly host_efer;
+EXPORT_SYMBOL_GPL(host_efer);
+
static u64 __read_mostly host_xss;
+u64 __read_mostly supported_xss;
+EXPORT_SYMBOL_GPL(supported_xss);
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -226,10 +237,25 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
};
u64 __read_mostly host_xcr0;
+u64 __read_mostly supported_xcr0;
+EXPORT_SYMBOL_GPL(supported_xcr0);
struct kmem_cache *x86_fpu_cache;
EXPORT_SYMBOL_GPL(x86_fpu_cache);
+static struct kmem_cache *x86_emulator_cache;
+
+static struct kmem_cache *kvm_alloc_emulator_cache(void)
+{
+ unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
+ unsigned int size = sizeof(struct x86_emulate_ctxt);
+
+ return kmem_cache_create_usercopy("x86_emulator", size,
+ __alignof__(struct x86_emulate_ctxt),
+ SLAB_ACCOUNT, useroffset,
+ size - useroffset, NULL);
+}
+
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
@@ -350,6 +376,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
kvm_lapic_set_base(vcpu, msr_info->data);
+ kvm_recalculate_apic_map(vcpu->kvm);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
@@ -619,7 +646,7 @@ EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
*/
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
{
- if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
+ if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl)
return true;
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return false;
@@ -760,7 +787,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (!is_pae(vcpu))
return 1;
- kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l)
return 1;
} else
@@ -773,7 +800,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
return 1;
- kvm_x86_ops->set_cr0(vcpu, cr0);
+ kvm_x86_ops.set_cr0(vcpu, cr0);
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
@@ -869,7 +896,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
- if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
+ if (kvm_x86_ops.get_cpl(vcpu) != 0 ||
__kvm_set_xcr(vcpu, index, xcr)) {
kvm_inject_gp(vcpu, 0);
return 1;
@@ -903,10 +930,10 @@ static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
{
u64 reserved_bits = __cr4_reserved_bits(cpu_has, c);
- if (cpuid_ecx(0x7) & feature_bit(LA57))
+ if (kvm_cpu_cap_has(X86_FEATURE_LA57))
reserved_bits &= ~X86_CR4_LA57;
- if (kvm_x86_ops->umip_emulated())
+ if (kvm_cpu_cap_has(X86_FEATURE_UMIP))
reserved_bits &= ~X86_CR4_UMIP;
return reserved_bits;
@@ -950,7 +977,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1;
}
- if (kvm_x86_ops->set_cr4(vcpu, cr4))
+ if (kvm_x86_ops.set_cr4(vcpu, cr4))
return 1;
if (((cr4 ^ old_cr4) & pdptr_bits) ||
@@ -1034,7 +1061,7 @@ static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
{
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
- kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
+ kvm_x86_ops.set_dr6(vcpu, vcpu->arch.dr6);
}
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
@@ -1045,7 +1072,7 @@ static void kvm_update_dr7(struct kvm_vcpu *vcpu)
dr7 = vcpu->arch.guest_debug_dr7;
else
dr7 = vcpu->arch.dr7;
- kvm_x86_ops->set_dr7(vcpu, dr7);
+ kvm_x86_ops.set_dr7(vcpu, dr7);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
if (dr7 & DR7_BP_EN_MASK)
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
@@ -1115,7 +1142,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
*val = vcpu->arch.dr6;
else
- *val = kvm_x86_ops->get_dr6(vcpu);
+ *val = kvm_x86_ops.get_dr6(vcpu);
break;
case 5:
/* fall through */
@@ -1350,7 +1377,7 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
rdmsrl_safe(msr->index, &msr->data);
break;
default:
- if (kvm_x86_ops->get_msr_feature(msr))
+ if (kvm_x86_ops.get_msr_feature(msr))
return 1;
}
return 0;
@@ -1418,7 +1445,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
- kvm_x86_ops->set_efer(vcpu, efer);
+ kvm_x86_ops.set_efer(vcpu, efer);
/* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX)
@@ -1474,7 +1501,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
msr.index = index;
msr.host_initiated = host_initiated;
- return kvm_x86_ops->set_msr(vcpu, &msr);
+ return kvm_x86_ops.set_msr(vcpu, &msr);
}
/*
@@ -1492,7 +1519,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
msr.index = index;
msr.host_initiated = host_initiated;
- ret = kvm_x86_ops->get_msr(vcpu, &msr);
+ ret = kvm_x86_ops.get_msr(vcpu, &msr);
if (!ret)
*data = msr.data;
return ret;
@@ -1561,8 +1588,12 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
((data & APIC_MODE_MASK) == APIC_DM_FIXED)) {
+ data &= ~(1 << 12);
+ kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32));
kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32));
- return kvm_lapic_reg_write(vcpu->arch.apic, APIC_ICR, (u32)data);
+ kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data);
+ trace_kvm_apic_write(APIC_ICR, (u32)data);
+ return 0;
}
return 1;
@@ -1571,11 +1602,12 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{
u32 msr = kvm_rcx_read(vcpu);
- u64 data = kvm_read_edx_eax(vcpu);
+ u64 data;
int ret = 0;
switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
+ data = kvm_read_edx_eax(vcpu);
ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
break;
default:
@@ -1876,7 +1908,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
- u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
+ u64 curr_offset = kvm_x86_ops.read_l1_tsc_offset(vcpu);
vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}
@@ -1918,7 +1950,7 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
- u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
+ u64 tsc_offset = kvm_x86_ops.read_l1_tsc_offset(vcpu);
return tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
}
@@ -1926,7 +1958,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
- vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
+ vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset);
}
static inline bool kvm_check_tsc_unstable(void)
@@ -2050,7 +2082,7 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
{
- u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
+ u64 tsc_offset = kvm_x86_ops.read_l1_tsc_offset(vcpu);
kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
}
@@ -2525,7 +2557,7 @@ static void kvmclock_sync_fn(struct work_struct *work)
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{
/* McStatusWrEn enabled? */
- if (guest_cpuid_is_amd(vcpu))
+ if (guest_cpuid_is_amd_or_hygon(vcpu))
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
return false;
@@ -2647,7 +2679,7 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{
++vcpu->stat.tlb_flush;
- kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
+ kvm_x86_ops.tlb_flush(vcpu, invalidate_gpa);
}
static void record_steal_time(struct kvm_vcpu *vcpu)
@@ -2800,12 +2832,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
return 1;
/*
- * We do support PT if kvm_x86_ops->pt_supported(), but we do
- * not support IA32_XSS[bit 8]. Guests will have to use
- * RDMSR/WRMSR rather than XSAVES/XRSTORS to save/restore PT
- * MSRs.
+ * KVM supports exposing PT to the guest, but does not support
+ * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
+ * XSAVES/XRSTORS to save/restore PT MSRs.
*/
- if (data != 0)
+ if (data & ~supported_xss)
return 1;
vcpu->arch.ia32_xss = data;
break;
@@ -3079,7 +3110,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
- break;
case MSR_IA32_TSCDEADLINE:
msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
break;
@@ -3162,7 +3192,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return kvm_hv_get_msr_common(vcpu,
msr_info->index, &msr_info->data,
msr_info->host_initiated);
- break;
case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current
* silicon. It is however accessed by winxp in very narrow
@@ -3367,10 +3396,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* fringe case that is not enabled except via specific settings
* of the module parameters.
*/
- r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
+ r = kvm_x86_ops.has_emulated_msr(MSR_IA32_SMBASE);
break;
case KVM_CAP_VAPIC:
- r = !kvm_x86_ops->cpu_has_accelerated_tpr();
+ r = !kvm_x86_ops.cpu_has_accelerated_tpr();
break;
case KVM_CAP_NR_VCPUS:
r = KVM_SOFT_MAX_VCPUS;
@@ -3397,14 +3426,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_X2APIC_API_VALID_FLAGS;
break;
case KVM_CAP_NESTED_STATE:
- r = kvm_x86_ops->get_nested_state ?
- kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
+ r = kvm_x86_ops.get_nested_state ?
+ kvm_x86_ops.get_nested_state(NULL, NULL, 0) : 0;
break;
case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
- r = kvm_x86_ops->enable_direct_tlbflush != NULL;
+ r = kvm_x86_ops.enable_direct_tlbflush != NULL;
break;
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
- r = kvm_x86_ops->nested_enable_evmcs != NULL;
+ r = kvm_x86_ops.nested_enable_evmcs != NULL;
break;
default:
break;
@@ -3466,7 +3495,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
r = 0;
break;
}
- case KVM_X86_GET_MCE_CAP_SUPPORTED: {
+ case KVM_X86_GET_MCE_CAP_SUPPORTED:
r = -EFAULT;
if (copy_to_user(argp, &kvm_mce_cap_supported,
sizeof(kvm_mce_cap_supported)))
@@ -3498,9 +3527,9 @@ long kvm_arch_dev_ioctl(struct file *filp,
case KVM_GET_MSRS:
r = msr_io(NULL, argp, do_get_msr_feature, 1);
break;
- }
default:
r = -EINVAL;
+ break;
}
out:
return r;
@@ -3520,14 +3549,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Address WBINVD may be executed by guest */
if (need_emulate_wbinvd(vcpu)) {
- if (kvm_x86_ops->has_wbinvd_exit())
+ if (kvm_x86_ops.has_wbinvd_exit())
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
smp_call_function_single(vcpu->cpu,
wbinvd_ipi, NULL, 1);
}
- kvm_x86_ops->vcpu_load(vcpu, cpu);
+ kvm_x86_ops.vcpu_load(vcpu, cpu);
/* Apply any externally detected TSC adjustments (due to suspend) */
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
@@ -3594,7 +3623,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
int idx;
if (vcpu->preempted)
- vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
+ vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu);
/*
* Disable page faults because we're in atomic context here.
@@ -3613,7 +3642,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_steal_time_set_preempted(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
pagefault_enable();
- kvm_x86_ops->vcpu_put(vcpu);
+ kvm_x86_ops.vcpu_put(vcpu);
vcpu->arch.last_host_tsc = rdtsc();
/*
* If userspace has set any breakpoints or watchpoints, dr6 is restored
@@ -3627,7 +3656,7 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
if (vcpu->arch.apicv_active)
- kvm_x86_ops->sync_pir_to_irr(vcpu);
+ kvm_x86_ops.sync_pir_to_irr(vcpu);
return kvm_apic_get_state(vcpu, s);
}
@@ -3735,7 +3764,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
- kvm_x86_ops->setup_mce(vcpu);
+ kvm_x86_ops.setup_mce(vcpu);
out:
return r;
}
@@ -3839,11 +3868,11 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
events->interrupt.nr = vcpu->arch.interrupt.nr;
events->interrupt.soft = 0;
- events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
+ events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu);
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending != 0;
- events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+ events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu);
events->nmi.pad = 0;
events->sipi_vector = 0; /* never valid when reporting to user space */
@@ -3910,13 +3939,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.nr = events->interrupt.nr;
vcpu->arch.interrupt.soft = events->interrupt.soft;
if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
- kvm_x86_ops->set_interrupt_shadow(vcpu,
+ kvm_x86_ops.set_interrupt_shadow(vcpu,
events->interrupt.shadow);
vcpu->arch.nmi_injected = events->nmi.injected;
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
vcpu->arch.nmi_pending = events->nmi.pending;
- kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
+ kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked);
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
lapic_in_kernel(vcpu))
@@ -4103,8 +4132,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
* CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
* with old userspace.
*/
- if (xstate_bv & ~kvm_supported_xcr0() ||
- mxcsr & ~mxcsr_feature_mask)
+ if (xstate_bv & ~supported_xcr0 || mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
load_xsave(vcpu, (u8 *)guest_xsave->region);
} else {
@@ -4191,9 +4219,9 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return kvm_hv_activate_synic(vcpu, cap->cap ==
KVM_CAP_HYPERV_SYNIC2);
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
- if (!kvm_x86_ops->nested_enable_evmcs)
+ if (!kvm_x86_ops.nested_enable_evmcs)
return -ENOTTY;
- r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version);
+ r = kvm_x86_ops.nested_enable_evmcs(vcpu, &vmcs_version);
if (!r) {
user_ptr = (void __user *)(uintptr_t)cap->args[0];
if (copy_to_user(user_ptr, &vmcs_version,
@@ -4202,10 +4230,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
}
return r;
case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
- if (!kvm_x86_ops->enable_direct_tlbflush)
+ if (!kvm_x86_ops.enable_direct_tlbflush)
return -ENOTTY;
- return kvm_x86_ops->enable_direct_tlbflush(vcpu);
+ return kvm_x86_ops.enable_direct_tlbflush(vcpu);
default:
return -EINVAL;
@@ -4508,7 +4536,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
u32 user_data_size;
r = -EINVAL;
- if (!kvm_x86_ops->get_nested_state)
+ if (!kvm_x86_ops.get_nested_state)
break;
BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
@@ -4516,7 +4544,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (get_user(user_data_size, &user_kvm_nested_state->size))
break;
- r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
+ r = kvm_x86_ops.get_nested_state(vcpu, user_kvm_nested_state,
user_data_size);
if (r < 0)
break;
@@ -4538,7 +4566,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
int idx;
r = -EINVAL;
- if (!kvm_x86_ops->set_nested_state)
+ if (!kvm_x86_ops.set_nested_state)
break;
r = -EFAULT;
@@ -4560,7 +4588,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
+ r = kvm_x86_ops.set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
@@ -4604,14 +4632,14 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
if (addr > (unsigned int)(-3 * PAGE_SIZE))
return -EINVAL;
- ret = kvm_x86_ops->set_tss_addr(kvm, addr);
+ ret = kvm_x86_ops.set_tss_addr(kvm, addr);
return ret;
}
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
u64 ident_addr)
{
- return kvm_x86_ops->set_identity_map_addr(kvm, ident_addr);
+ return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr);
}
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
@@ -4763,77 +4791,13 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
return 0;
}
-/**
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
- * @kvm: kvm instance
- * @log: slot id and address to which we copy the log
- *
- * Steps 1-4 below provide general overview of dirty page logging. See
- * kvm_get_dirty_log_protect() function description for additional details.
- *
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
- * always flush the TLB (step 4) even if previous step failed and the dirty
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
- * writes will be marked dirty for next log read.
- *
- * 1. Take a snapshot of the bit and clear it if needed.
- * 2. Write protect the corresponding page.
- * 3. Copy the snapshot to the userspace.
- * 4. Flush TLB's if needed.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
/*
* Flush potentially hardware-cached dirty pages to dirty_bitmap.
*/
- if (kvm_x86_ops->flush_log_dirty)
- kvm_x86_ops->flush_log_dirty(kvm);
-
- r = kvm_get_dirty_log_protect(kvm, log, &flush);
-
- /*
- * All the TLBs can be flushed out of mmu lock, see the comments in
- * kvm_mmu_slot_remove_write_access().
- */
- lockdep_assert_held(&kvm->slots_lock);
- if (flush)
- kvm_flush_remote_tlbs(kvm);
-
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
-{
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- /*
- * Flush potentially hardware-cached dirty pages to dirty_bitmap.
- */
- if (kvm_x86_ops->flush_log_dirty)
- kvm_x86_ops->flush_log_dirty(kvm);
-
- r = kvm_clear_dirty_log_protect(kvm, log, &flush);
-
- /*
- * All the TLBs can be flushed out of mmu lock, see the comments in
- * kvm_mmu_slot_remove_write_access().
- */
- lockdep_assert_held(&kvm->slots_lock);
- if (flush)
- kvm_flush_remote_tlbs(kvm);
-
- mutex_unlock(&kvm->slots_lock);
- return r;
+ if (kvm_x86_ops.flush_log_dirty)
+ kvm_x86_ops.flush_log_dirty(kvm);
}
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
@@ -5186,8 +5150,8 @@ set_identity_unlock:
}
case KVM_MEMORY_ENCRYPT_OP: {
r = -ENOTTY;
- if (kvm_x86_ops->mem_enc_op)
- r = kvm_x86_ops->mem_enc_op(kvm, argp);
+ if (kvm_x86_ops.mem_enc_op)
+ r = kvm_x86_ops.mem_enc_op(kvm, argp);
break;
}
case KVM_MEMORY_ENCRYPT_REG_REGION: {
@@ -5198,8 +5162,8 @@ set_identity_unlock:
goto out;
r = -ENOTTY;
- if (kvm_x86_ops->mem_enc_reg_region)
- r = kvm_x86_ops->mem_enc_reg_region(kvm, &region);
+ if (kvm_x86_ops.mem_enc_reg_region)
+ r = kvm_x86_ops.mem_enc_reg_region(kvm, &region);
break;
}
case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
@@ -5210,8 +5174,8 @@ set_identity_unlock:
goto out;
r = -ENOTTY;
- if (kvm_x86_ops->mem_enc_unreg_region)
- r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
+ if (kvm_x86_ops.mem_enc_unreg_region)
+ r = kvm_x86_ops.mem_enc_unreg_region(kvm, &region);
break;
}
case KVM_HYPERV_EVENTFD: {
@@ -5262,28 +5226,28 @@ static void kvm_init_msr_list(void)
continue;
break;
case MSR_TSC_AUX:
- if (!kvm_x86_ops->rdtscp_supported())
+ if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
continue;
break;
case MSR_IA32_RTIT_CTL:
case MSR_IA32_RTIT_STATUS:
- if (!kvm_x86_ops->pt_supported())
+ if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
continue;
break;
case MSR_IA32_RTIT_CR3_MATCH:
- if (!kvm_x86_ops->pt_supported() ||
+ if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
!intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
continue;
break;
case MSR_IA32_RTIT_OUTPUT_BASE:
case MSR_IA32_RTIT_OUTPUT_MASK:
- if (!kvm_x86_ops->pt_supported() ||
+ if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
(!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
!intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
continue;
break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
- if (!kvm_x86_ops->pt_supported() ||
+ if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
continue;
@@ -5306,7 +5270,7 @@ static void kvm_init_msr_list(void)
}
for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
- if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i]))
+ if (!kvm_x86_ops.has_emulated_msr(emulated_msrs_all[i]))
continue;
emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
@@ -5369,13 +5333,13 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
static void kvm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
- kvm_x86_ops->set_segment(vcpu, var, seg);
+ kvm_x86_ops.set_segment(vcpu, var, seg);
}
void kvm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
- kvm_x86_ops->get_segment(vcpu, var, seg);
+ kvm_x86_ops.get_segment(vcpu, var, seg);
}
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
@@ -5395,14 +5359,14 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
@@ -5410,7 +5374,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
@@ -5459,7 +5423,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
unsigned offset;
int ret;
@@ -5484,7 +5448,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
- u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
/*
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
@@ -5505,7 +5469,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = 0;
- if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
+ if (!system && kvm_x86_ops.get_cpl(vcpu) == 3)
access |= PFERR_USER_MASK;
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
@@ -5558,7 +5522,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = PFERR_WRITE_MASK;
- if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
+ if (!system && kvm_x86_ops.get_cpl(vcpu) == 3)
access |= PFERR_USER_MASK;
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
@@ -5621,7 +5585,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t *gpa, struct x86_exception *exception,
bool write)
{
- u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
+ u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
| (write ? PFERR_WRITE_MASK : 0);
/*
@@ -5740,7 +5704,7 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
int handled, ret;
bool write = ops->write;
struct kvm_mmio_fragment *frag;
- struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
/*
* If the exit was due to a NPF we may already have a GPA.
@@ -5749,10 +5713,9 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
* operation using rep will only have the initial GPA from the NPF
* occurred.
*/
- if (vcpu->arch.gpa_available &&
- emulator_can_use_gpa(ctxt) &&
- (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
- gpa = vcpu->arch.gpa_val;
+ if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) &&
+ (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) {
+ gpa = ctxt->gpa_val;
ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
} else {
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
@@ -5972,11 +5935,9 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
return 0;
}
-static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
- int size, unsigned short port, void *val,
- unsigned int count)
+static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
+ unsigned short port, void *val, unsigned int count)
{
- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int ret;
if (vcpu->arch.pio.count)
@@ -5996,20 +5957,33 @@ data_avail:
return 0;
}
-static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
- int size, unsigned short port,
- const void *val, unsigned int count)
+static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+ int size, unsigned short port, void *val,
+ unsigned int count)
{
- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ return emulator_pio_in(emul_to_vcpu(ctxt), size, port, val, count);
+
+}
+static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
+ unsigned short port, const void *val,
+ unsigned int count)
+{
memcpy(vcpu->arch.pio_data, val, size * count);
trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
}
+static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
+ int size, unsigned short port,
+ const void *val, unsigned int count)
+{
+ return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count);
+}
+
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
- return kvm_x86_ops->get_segment_base(vcpu, seg);
+ return kvm_x86_ops.get_segment_base(vcpu, seg);
}
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
@@ -6022,7 +5996,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
- if (kvm_x86_ops->has_wbinvd_exit()) {
+ if (kvm_x86_ops.has_wbinvd_exit()) {
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
@@ -6127,27 +6101,27 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
{
- return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
+ return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt));
}
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
- kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
+ kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt);
}
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
- kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
+ kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt);
}
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
- kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
+ kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt);
}
static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
- kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
+ kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt);
}
static unsigned long emulator_get_cached_segment_base(
@@ -6269,13 +6243,15 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
- return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
+ return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage,
+ &ctxt->exception);
}
static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
- u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool check_limit)
+ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
+ bool exact_only)
{
- return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, check_limit);
+ return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only);
}
static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt)
@@ -6305,7 +6281,7 @@ static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulon
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
{
- kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
+ kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked);
}
static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
@@ -6321,7 +6297,7 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
const char *smstate)
{
- return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
+ return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate);
}
static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
@@ -6383,7 +6359,7 @@ static const struct x86_emulate_ops emulate_ops = {
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{
- u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
+ u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu);
/*
* an sti; sti; sequence only disable interrupts for the first
* instruction. So, if the last instruction, be it emulated or
@@ -6394,7 +6370,7 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
if (int_shadow & mask)
mask = 0;
if (unlikely(int_shadow || mask)) {
- kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
+ kvm_x86_ops.set_interrupt_shadow(vcpu, mask);
if (!mask)
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
@@ -6402,7 +6378,7 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
{
- struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
if (ctxt->exception.vector == PF_VECTOR)
return kvm_propagate_fault(vcpu, &ctxt->exception);
@@ -6414,13 +6390,31 @@ static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
return false;
}
+static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt;
+
+ ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT);
+ if (!ctxt) {
+ pr_err("kvm: failed to allocate vcpu's emulator\n");
+ return NULL;
+ }
+
+ ctxt->vcpu = vcpu;
+ ctxt->ops = &emulate_ops;
+ vcpu->arch.emulate_ctxt = ctxt;
+
+ return ctxt;
+}
+
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{
- struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
int cs_db, cs_l;
- kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ ctxt->gpa_available = false;
ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
@@ -6440,7 +6434,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
{
- struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
@@ -6479,7 +6473,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
kvm_queue_exception(vcpu, UD_VECTOR);
- if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
+ if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
@@ -6496,10 +6490,11 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
gpa_t gpa = cr2_or_gpa;
kvm_pfn_t pfn;
- if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false;
- if (WARN_ON_ONCE(is_guest_mode(vcpu)))
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
+ WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
return false;
if (!vcpu->arch.mmu->direct_map) {
@@ -6587,10 +6582,11 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
- if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false;
- if (WARN_ON_ONCE(is_guest_mode(vcpu)))
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
+ WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
return false;
if (x86_page_table_writing_insn(ctxt))
@@ -6658,10 +6654,10 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
- unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+ unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
int r;
- r = kvm_x86_ops->skip_emulated_instruction(vcpu);
+ r = kvm_x86_ops.skip_emulated_instruction(vcpu);
if (unlikely(!r))
return 0;
@@ -6753,7 +6749,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len)
{
int r;
- struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
@@ -6843,8 +6839,19 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
}
restart:
- /* Save the faulting GPA (cr2) in the address field */
- ctxt->exception.address = cr2_or_gpa;
+ if (emulation_type & EMULTYPE_PF) {
+ /* Save the faulting GPA (cr2) in the address field */
+ ctxt->exception.address = cr2_or_gpa;
+
+ /* With shadow page tables, cr2 contains a GVA or nGPA. */
+ if (vcpu->arch.mmu->direct_map) {
+ ctxt->gpa_available = true;
+ ctxt->gpa_val = cr2_or_gpa;
+ }
+ } else {
+ /* Sanitize the address out of an abundance of paranoia. */
+ ctxt->exception.address = 0;
+ }
r = x86_emulate_insn(ctxt);
@@ -6885,7 +6892,7 @@ restart:
r = 1;
if (writeback) {
- unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+ unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
if (!ctxt->have_exception ||
@@ -6893,8 +6900,8 @@ restart:
kvm_rip_write(vcpu, ctxt->eip);
if (r && ctxt->tf)
r = kvm_vcpu_do_singlestep(vcpu);
- if (kvm_x86_ops->update_emulated_instruction)
- kvm_x86_ops->update_emulated_instruction(vcpu);
+ if (kvm_x86_ops.update_emulated_instruction)
+ kvm_x86_ops.update_emulated_instruction(vcpu);
__kvm_set_rflags(vcpu, ctxt->eflags);
}
@@ -6945,8 +6952,8 @@ static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port)
{
unsigned long val = kvm_rax_read(vcpu);
- int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
- size, port, &val, 1);
+ int ret = emulator_pio_out(vcpu, size, port, &val, 1);
+
if (ret)
return ret;
@@ -6982,11 +6989,10 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
/*
- * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
+ * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform
* the copy and tracing
*/
- emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
- vcpu->arch.pio.port, &val, 1);
+ emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1);
kvm_rax_write(vcpu, val);
return kvm_skip_emulated_instruction(vcpu);
@@ -7001,8 +7007,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
/* For size less than 4 we merge, else we zero extend */
val = (size < 4) ? kvm_rax_read(vcpu) : 0;
- ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
- &val, 1);
+ ret = emulator_pio_in(vcpu, size, port, &val, 1);
if (ret) {
kvm_rax_write(vcpu, val);
return ret;
@@ -7225,7 +7230,7 @@ static int kvm_is_user_mode(void)
int user_mode = 3;
if (__this_cpu_read(current_vcpu))
- user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
+ user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu));
return user_mode != 0;
}
@@ -7302,10 +7307,10 @@ static struct notifier_block pvclock_gtod_notifier = {
int kvm_arch_init(void *opaque)
{
+ struct kvm_x86_init_ops *ops = opaque;
int r;
- struct kvm_x86_ops *ops = opaque;
- if (kvm_x86_ops) {
+ if (kvm_x86_ops.hardware_enable) {
printk(KERN_ERR "kvm: already loaded the other module\n");
r = -EEXIST;
goto out;
@@ -7342,18 +7347,22 @@ int kvm_arch_init(void *opaque)
goto out;
}
+ x86_emulator_cache = kvm_alloc_emulator_cache();
+ if (!x86_emulator_cache) {
+ pr_err("kvm: failed to allocate cache for x86 emulator\n");
+ goto out_free_x86_fpu_cache;
+ }
+
shared_msrs = alloc_percpu(struct kvm_shared_msrs);
if (!shared_msrs) {
printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
- goto out_free_x86_fpu_cache;
+ goto out_free_x86_emulator_cache;
}
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
- kvm_x86_ops = ops;
-
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0,
PT_PRESENT_MASK, 0, sme_me_mask);
@@ -7361,8 +7370,10 @@ int kvm_arch_init(void *opaque)
perf_register_guest_info_callbacks(&kvm_guest_cbs);
- if (boot_cpu_has(X86_FEATURE_XSAVE))
+ if (boot_cpu_has(X86_FEATURE_XSAVE)) {
host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
+ }
kvm_lapic_init();
if (pi_inject_timer == -1)
@@ -7378,6 +7389,8 @@ int kvm_arch_init(void *opaque)
out_free_percpu:
free_percpu(shared_msrs);
+out_free_x86_emulator_cache:
+ kmem_cache_destroy(x86_emulator_cache);
out_free_x86_fpu_cache:
kmem_cache_destroy(x86_fpu_cache);
out:
@@ -7400,7 +7413,7 @@ void kvm_arch_exit(void)
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif
- kvm_x86_ops = NULL;
+ kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();
free_percpu(shared_msrs);
kmem_cache_destroy(x86_fpu_cache);
@@ -7538,7 +7551,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
a3 &= 0xFFFFFFFF;
}
- if (kvm_x86_ops->get_cpl(vcpu) != 0) {
+ if (kvm_x86_ops.get_cpl(vcpu) != 0) {
ret = -KVM_EPERM;
goto out;
}
@@ -7584,7 +7597,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
char instruction[3];
unsigned long rip = kvm_rip_read(vcpu);
- kvm_x86_ops->patch_hypercall(vcpu, instruction);
+ kvm_x86_ops.patch_hypercall(vcpu, instruction);
return emulator_write_emulated(ctxt, rip, instruction, 3,
&ctxt->exception);
@@ -7613,7 +7626,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
- if (!kvm_x86_ops->update_cr8_intercept)
+ if (!kvm_x86_ops.update_cr8_intercept)
return;
if (!lapic_in_kernel(vcpu))
@@ -7632,17 +7645,17 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
tpr = kvm_lapic_get_cr8(vcpu);
- kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
+ kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr);
}
-static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
+static int inject_pending_event(struct kvm_vcpu *vcpu)
{
int r;
/* try to reinject previous events if any */
if (vcpu->arch.exception.injected)
- kvm_x86_ops->queue_exception(vcpu);
+ kvm_x86_ops.queue_exception(vcpu);
/*
* Do not inject an NMI or interrupt if there is a pending
* exception. Exceptions and interrupts are recognized at
@@ -7659,9 +7672,9 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
*/
else if (!vcpu->arch.exception.pending) {
if (vcpu->arch.nmi_injected)
- kvm_x86_ops->set_nmi(vcpu);
+ kvm_x86_ops.set_nmi(vcpu);
else if (vcpu->arch.interrupt.injected)
- kvm_x86_ops->set_irq(vcpu);
+ kvm_x86_ops.set_irq(vcpu);
}
/*
@@ -7670,8 +7683,8 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
* from L2 to L1 due to pending L1 events which require exit
* from L2 to L1.
*/
- if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+ if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) {
+ r = kvm_x86_ops.check_nested_events(vcpu);
if (r != 0)
return r;
}
@@ -7708,7 +7721,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
}
}
- kvm_x86_ops->queue_exception(vcpu);
+ kvm_x86_ops.queue_exception(vcpu);
}
/* Don't consider new event if we re-injected an event */
@@ -7716,14 +7729,14 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
return 0;
if (vcpu->arch.smi_pending && !is_smm(vcpu) &&
- kvm_x86_ops->smi_allowed(vcpu)) {
+ kvm_x86_ops.smi_allowed(vcpu)) {
vcpu->arch.smi_pending = false;
++vcpu->arch.smi_count;
enter_smm(vcpu);
- } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
+ } else if (vcpu->arch.nmi_pending && kvm_x86_ops.nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
- kvm_x86_ops->set_nmi(vcpu);
+ kvm_x86_ops.set_nmi(vcpu);
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
/*
* Because interrupts can be injected asynchronously, we are
@@ -7732,15 +7745,15 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
* proposal and current concerns. Perhaps we should be setting
* KVM_REQ_EVENT only on certain events and not unconditionally?
*/
- if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
- r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+ if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) {
+ r = kvm_x86_ops.check_nested_events(vcpu);
if (r != 0)
return r;
}
- if (kvm_x86_ops->interrupt_allowed(vcpu)) {
+ if (kvm_x86_ops.interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false);
- kvm_x86_ops->set_irq(vcpu);
+ kvm_x86_ops.set_irq(vcpu);
}
}
@@ -7756,7 +7769,7 @@ static void process_nmi(struct kvm_vcpu *vcpu)
* If an NMI is already in progress, limit further NMIs to just one.
* Otherwise, allow two (and we'll inject the first one immediately).
*/
- if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
+ if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
limit = 1;
vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
@@ -7846,11 +7859,11 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7f7c, seg.limit);
put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
- kvm_x86_ops->get_gdt(vcpu, &dt);
+ kvm_x86_ops.get_gdt(vcpu, &dt);
put_smstate(u32, buf, 0x7f74, dt.address);
put_smstate(u32, buf, 0x7f70, dt.size);
- kvm_x86_ops->get_idt(vcpu, &dt);
+ kvm_x86_ops.get_idt(vcpu, &dt);
put_smstate(u32, buf, 0x7f58, dt.address);
put_smstate(u32, buf, 0x7f54, dt.size);
@@ -7900,7 +7913,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7e94, seg.limit);
put_smstate(u64, buf, 0x7e98, seg.base);
- kvm_x86_ops->get_idt(vcpu, &dt);
+ kvm_x86_ops.get_idt(vcpu, &dt);
put_smstate(u32, buf, 0x7e84, dt.size);
put_smstate(u64, buf, 0x7e88, dt.address);
@@ -7910,7 +7923,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7e74, seg.limit);
put_smstate(u64, buf, 0x7e78, seg.base);
- kvm_x86_ops->get_gdt(vcpu, &dt);
+ kvm_x86_ops.get_gdt(vcpu, &dt);
put_smstate(u32, buf, 0x7e64, dt.size);
put_smstate(u64, buf, 0x7e68, dt.address);
@@ -7940,28 +7953,28 @@ static void enter_smm(struct kvm_vcpu *vcpu)
* vCPU state (e.g. leave guest mode) after we've saved the state into
* the SMM state-save area.
*/
- kvm_x86_ops->pre_enter_smm(vcpu, buf);
+ kvm_x86_ops.pre_enter_smm(vcpu, buf);
vcpu->arch.hflags |= HF_SMM_MASK;
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
- if (kvm_x86_ops->get_nmi_mask(vcpu))
+ if (kvm_x86_ops.get_nmi_mask(vcpu))
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
else
- kvm_x86_ops->set_nmi_mask(vcpu, true);
+ kvm_x86_ops.set_nmi_mask(vcpu, true);
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
kvm_rip_write(vcpu, 0x8000);
cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
- kvm_x86_ops->set_cr0(vcpu, cr0);
+ kvm_x86_ops.set_cr0(vcpu, cr0);
vcpu->arch.cr0 = cr0;
- kvm_x86_ops->set_cr4(vcpu, 0);
+ kvm_x86_ops.set_cr4(vcpu, 0);
/* Undocumented: IDT limit is set to zero on entry to SMM. */
dt.address = dt.size = 0;
- kvm_x86_ops->set_idt(vcpu, &dt);
+ kvm_x86_ops.set_idt(vcpu, &dt);
__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
@@ -7992,7 +8005,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
- kvm_x86_ops->set_efer(vcpu, 0);
+ kvm_x86_ops.set_efer(vcpu, 0);
#endif
kvm_update_cpuid(vcpu);
@@ -8030,7 +8043,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm);
kvm_apic_update_apicv(vcpu);
- kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
+ kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
@@ -8043,23 +8056,30 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
*/
void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
{
- if (!kvm_x86_ops->check_apicv_inhibit_reasons ||
- !kvm_x86_ops->check_apicv_inhibit_reasons(bit))
+ unsigned long old, new, expected;
+
+ if (!kvm_x86_ops.check_apicv_inhibit_reasons ||
+ !kvm_x86_ops.check_apicv_inhibit_reasons(bit))
return;
- if (activate) {
- if (!test_and_clear_bit(bit, &kvm->arch.apicv_inhibit_reasons) ||
- !kvm_apicv_activated(kvm))
- return;
- } else {
- if (test_and_set_bit(bit, &kvm->arch.apicv_inhibit_reasons) ||
- kvm_apicv_activated(kvm))
- return;
- }
+ old = READ_ONCE(kvm->arch.apicv_inhibit_reasons);
+ do {
+ expected = new = old;
+ if (activate)
+ __clear_bit(bit, &new);
+ else
+ __set_bit(bit, &new);
+ if (new == old)
+ break;
+ old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new);
+ } while (old != expected);
+
+ if (!!old == !!new)
+ return;
trace_kvm_apicv_update_request(activate, bit);
- if (kvm_x86_ops->pre_update_apicv_exec_ctrl)
- kvm_x86_ops->pre_update_apicv_exec_ctrl(kvm, activate);
+ if (kvm_x86_ops.pre_update_apicv_exec_ctrl)
+ kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate);
kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
}
EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
@@ -8075,7 +8095,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
else {
if (vcpu->arch.apicv_active)
- kvm_x86_ops->sync_pir_to_irr(vcpu);
+ kvm_x86_ops.sync_pir_to_irr(vcpu);
if (ioapic_in_kernel(vcpu->kvm))
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
}
@@ -8095,7 +8115,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
vcpu_to_synic(vcpu)->vec_bitmap, 256);
- kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+ kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
@@ -8122,13 +8142,13 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
if (!lapic_in_kernel(vcpu))
return;
- if (!kvm_x86_ops->set_apic_access_page_addr)
+ if (!kvm_x86_ops.set_apic_access_page_addr)
return;
page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (is_error_page(page))
return;
- kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
+ kvm_x86_ops.set_apic_access_page_addr(vcpu, page_to_phys(page));
/*
* Do not pin apic access page in memory, the MMU notifier
@@ -8160,7 +8180,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
- if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
+ if (unlikely(!kvm_x86_ops.get_vmcs12_pages(vcpu))) {
r = 0;
goto out;
}
@@ -8180,8 +8200,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
- if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu))
- kvm_mmu_load_cr3(vcpu);
+ if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
+ kvm_mmu_load_pgd(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_vcpu_flush_tlb(vcpu, true);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
@@ -8266,7 +8286,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
- if (inject_pending_event(vcpu, req_int_win) != 0)
+ if (inject_pending_event(vcpu) != 0)
req_immediate_exit = true;
else {
/* Enable SMI/NMI/IRQ window open exits if needed.
@@ -8284,12 +8304,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* SMI.
*/
if (vcpu->arch.smi_pending && !is_smm(vcpu))
- if (!kvm_x86_ops->enable_smi_window(vcpu))
+ if (!kvm_x86_ops.enable_smi_window(vcpu))
req_immediate_exit = true;
if (vcpu->arch.nmi_pending)
- kvm_x86_ops->enable_nmi_window(vcpu);
+ kvm_x86_ops.enable_nmi_window(vcpu);
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
- kvm_x86_ops->enable_irq_window(vcpu);
+ kvm_x86_ops.enable_irq_window(vcpu);
WARN_ON(vcpu->arch.exception.pending);
}
@@ -8306,7 +8326,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
preempt_disable();
- kvm_x86_ops->prepare_guest_switch(vcpu);
+ kvm_x86_ops.prepare_guest_switch(vcpu);
/*
* Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
@@ -8337,7 +8357,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* notified with kvm_vcpu_kick.
*/
if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
- kvm_x86_ops->sync_pir_to_irr(vcpu);
+ kvm_x86_ops.sync_pir_to_irr(vcpu);
if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
|| need_resched() || signal_pending(current)) {
@@ -8352,7 +8372,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (req_immediate_exit) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_x86_ops->request_immediate_exit(vcpu);
+ kvm_x86_ops.request_immediate_exit(vcpu);
}
trace_kvm_entry(vcpu->vcpu_id);
@@ -8372,7 +8392,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
- kvm_x86_ops->run(vcpu);
+ kvm_x86_ops.run(vcpu);
/*
* Do this here before restoring debug registers on the host. And
@@ -8382,7 +8402,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
*/
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
- kvm_x86_ops->sync_dirty_debug_regs(vcpu);
+ kvm_x86_ops.sync_dirty_debug_regs(vcpu);
kvm_update_dr0123(vcpu);
kvm_update_dr6(vcpu);
kvm_update_dr7(vcpu);
@@ -8404,7 +8424,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
- kvm_x86_ops->handle_exit_irqoff(vcpu, &exit_fastpath);
+ kvm_x86_ops.handle_exit_irqoff(vcpu, &exit_fastpath);
/*
* Consume any pending interrupts, including the possible source of
@@ -8447,12 +8467,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu);
- vcpu->arch.gpa_available = false;
- r = kvm_x86_ops->handle_exit(vcpu, exit_fastpath);
+ r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath);
return r;
cancel_injection:
- kvm_x86_ops->cancel_injection(vcpu);
+ kvm_x86_ops.cancel_injection(vcpu);
if (unlikely(vcpu->arch.apic_attention))
kvm_lapic_sync_from_vapic(vcpu);
out:
@@ -8462,13 +8481,13 @@ out:
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu) &&
- (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
+ (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- if (kvm_x86_ops->post_block)
- kvm_x86_ops->post_block(vcpu);
+ if (kvm_x86_ops.post_block)
+ kvm_x86_ops.post_block(vcpu);
if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
return 1;
@@ -8488,15 +8507,14 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
break;
default:
return -EINTR;
- break;
}
return 1;
}
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
- if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
- kvm_x86_ops->check_nested_events(vcpu, false);
+ if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events)
+ kvm_x86_ops.check_nested_events(vcpu);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);
@@ -8652,7 +8670,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
kvm_save_current_fpu(vcpu->arch.user_fpu);
- /* PKRU is separately restored in kvm_x86_ops->run. */
+ /* PKRU is separately restored in kvm_x86_ops.run. */
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
~XFEATURE_MASK_PKRU);
@@ -8757,7 +8775,7 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
* that usually, but some bad designed PV devices (vmware
* backdoor interface) need this to work
*/
- emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
+ emulator_writeback_register_cache(vcpu->arch.emulate_ctxt);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
}
regs->rax = kvm_rax_read(vcpu);
@@ -8855,10 +8873,10 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
- kvm_x86_ops->get_idt(vcpu, &dt);
+ kvm_x86_ops.get_idt(vcpu, &dt);
sregs->idt.limit = dt.size;
sregs->idt.base = dt.address;
- kvm_x86_ops->get_gdt(vcpu, &dt);
+ kvm_x86_ops.get_gdt(vcpu, &dt);
sregs->gdt.limit = dt.size;
sregs->gdt.base = dt.address;
@@ -8943,7 +8961,7 @@ out:
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
int reason, bool has_error_code, u32 error_code)
{
- struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
@@ -9005,10 +9023,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
- kvm_x86_ops->set_idt(vcpu, &dt);
+ kvm_x86_ops.set_idt(vcpu, &dt);
dt.size = sregs->gdt.limit;
dt.address = sregs->gdt.base;
- kvm_x86_ops->set_gdt(vcpu, &dt);
+ kvm_x86_ops.set_gdt(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
@@ -9018,16 +9036,16 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvm_set_cr8(vcpu, sregs->cr8);
mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
- kvm_x86_ops->set_efer(vcpu, sregs->efer);
+ kvm_x86_ops.set_efer(vcpu, sregs->efer);
mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
- kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
+ kvm_x86_ops.set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
(X86_CR4_OSXSAVE | X86_CR4_PKE));
- kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+ kvm_x86_ops.set_cr4(vcpu, sregs->cr4);
if (cpuid_update_needed)
kvm_update_cpuid(vcpu);
@@ -9133,7 +9151,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
*/
kvm_set_rflags(vcpu, rflags);
- kvm_x86_ops->update_bp_intercept(vcpu);
+ kvm_x86_ops.update_bp_intercept(vcpu);
r = 0;
@@ -9275,7 +9293,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct page *page;
int r;
- vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
@@ -9313,11 +9330,14 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
GFP_KERNEL_ACCOUNT))
goto fail_free_mce_banks;
+ if (!alloc_emulate_ctxt(vcpu))
+ goto free_wbinvd_dirty_mask;
+
vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.user_fpu) {
pr_err("kvm: failed to allocate userspace's fpu\n");
- goto free_wbinvd_dirty_mask;
+ goto free_emulate_ctxt;
}
vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
@@ -9342,7 +9362,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_hv_vcpu_init(vcpu);
- r = kvm_x86_ops->vcpu_create(vcpu);
+ r = kvm_x86_ops.vcpu_create(vcpu);
if (r)
goto free_guest_fpu;
@@ -9359,6 +9379,8 @@ free_guest_fpu:
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
free_user_fpu:
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
+free_emulate_ctxt:
+ kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
free_wbinvd_dirty_mask:
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fail_free_mce_banks:
@@ -9393,11 +9415,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
mutex_unlock(&vcpu->mutex);
- if (!kvmclock_periodic_sync)
- return;
-
- schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
- KVMCLOCK_SYNC_PERIOD);
+ if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
+ schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
+ KVMCLOCK_SYNC_PERIOD);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -9409,8 +9429,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvmclock_reset(vcpu);
- kvm_x86_ops->vcpu_free(vcpu);
+ kvm_x86_ops.vcpu_free(vcpu);
+ kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
@@ -9496,7 +9517,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.ia32_xss = 0;
- kvm_x86_ops->vcpu_reset(vcpu, init_event);
+ kvm_x86_ops.vcpu_reset(vcpu, init_event);
}
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
@@ -9521,7 +9542,7 @@ int kvm_arch_hardware_enable(void)
bool stable, backwards_tsc = false;
kvm_shared_msr_cpu_online();
- ret = kvm_x86_ops->hardware_enable();
+ ret = kvm_x86_ops.hardware_enable();
if (ret != 0)
return ret;
@@ -9603,18 +9624,29 @@ int kvm_arch_hardware_enable(void)
void kvm_arch_hardware_disable(void)
{
- kvm_x86_ops->hardware_disable();
+ kvm_x86_ops.hardware_disable();
drop_user_return_notifiers();
}
-int kvm_arch_hardware_setup(void)
+int kvm_arch_hardware_setup(void *opaque)
{
+ struct kvm_x86_init_ops *ops = opaque;
int r;
- r = kvm_x86_ops->hardware_setup();
+ rdmsrl_safe(MSR_EFER, &host_efer);
+
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ rdmsrl(MSR_IA32_XSS, host_xss);
+
+ r = ops->hardware_setup();
if (r != 0)
return r;
+ memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
+
+ if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
+ supported_xss = 0;
+
cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);
if (kvm_has_tsc_control) {
@@ -9631,28 +9663,26 @@ int kvm_arch_hardware_setup(void)
kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
}
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- rdmsrl(MSR_IA32_XSS, host_xss);
-
kvm_init_msr_list();
return 0;
}
void kvm_arch_hardware_unsetup(void)
{
- kvm_x86_ops->hardware_unsetup();
+ kvm_x86_ops.hardware_unsetup();
}
-int kvm_arch_check_processor_compat(void)
+int kvm_arch_check_processor_compat(void *opaque)
{
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ struct kvm_x86_init_ops *ops = opaque;
WARN_ON(!irqs_disabled());
if (kvm_host_cr4_reserved_bits(c) != cr4_reserved_bits)
return -EIO;
- return kvm_x86_ops->check_processor_compatibility();
+ return ops->check_processor_compatibility();
}
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
@@ -9678,9 +9708,16 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
pmu->need_cleanup = true;
kvm_make_request(KVM_REQ_PMU, vcpu);
}
- kvm_x86_ops->sched_in(vcpu, cpu);
+ kvm_x86_ops.sched_in(vcpu, cpu);
+}
+
+void kvm_arch_free_vm(struct kvm *kvm)
+{
+ kfree(kvm->arch.hyperv.hv_pa_pg);
+ vfree(kvm);
}
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
if (type)
@@ -9715,7 +9752,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_page_track_init(kvm);
kvm_mmu_init_vm(kvm);
- return kvm_x86_ops->vm_init(kvm);
+ return kvm_x86_ops.vm_init(kvm);
}
int kvm_arch_post_init_vm(struct kvm *kvm)
@@ -9763,9 +9800,9 @@ void kvm_arch_sync_events(struct kvm *kvm)
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
{
int i, r;
- unsigned long hva;
+ unsigned long hva, uninitialized_var(old_npages);
struct kvm_memslots *slots = kvm_memslots(kvm);
- struct kvm_memory_slot *slot, old;
+ struct kvm_memory_slot *slot;
/* Called with kvm->slots_lock held. */
if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
@@ -9773,7 +9810,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
slot = id_to_memslot(slots, id);
if (size) {
- if (slot->npages)
+ if (slot && slot->npages)
return -EEXIST;
/*
@@ -9785,13 +9822,18 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
if (IS_ERR((void *)hva))
return PTR_ERR((void *)hva);
} else {
- if (!slot->npages)
+ if (!slot || !slot->npages)
return 0;
- hva = 0;
+ /*
+ * Stuff a non-canonical value to catch use-after-delete. This
+ * ends up being 0 on 32-bit KVM, but there's no better
+ * alternative.
+ */
+ hva = (unsigned long)(0xdeadull << 48);
+ old_npages = slot->npages;
}
- old = *slot;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_userspace_memory_region m;
@@ -9806,7 +9848,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
}
if (!size)
- vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+ vm_munmap(hva, old_npages * PAGE_SIZE);
return 0;
}
@@ -9833,8 +9875,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
__x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
mutex_unlock(&kvm->slots_lock);
}
- if (kvm_x86_ops->vm_destroy)
- kvm_x86_ops->vm_destroy(kvm);
+ if (kvm_x86_ops.vm_destroy)
+ kvm_x86_ops.vm_destroy(kvm);
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
kvm_free_vcpus(kvm);
@@ -9845,34 +9887,36 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_hv_destroy_vm(kvm);
}
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
- if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
- kvfree(free->arch.rmap[i]);
- free->arch.rmap[i] = NULL;
- }
+ kvfree(slot->arch.rmap[i]);
+ slot->arch.rmap[i] = NULL;
+
if (i == 0)
continue;
- if (!dont || free->arch.lpage_info[i - 1] !=
- dont->arch.lpage_info[i - 1]) {
- kvfree(free->arch.lpage_info[i - 1]);
- free->arch.lpage_info[i - 1] = NULL;
- }
+ kvfree(slot->arch.lpage_info[i - 1]);
+ slot->arch.lpage_info[i - 1] = NULL;
}
- kvm_page_track_free_memslot(free, dont);
+ kvm_page_track_free_memslot(slot);
}
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
+static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
int i;
+ /*
+ * Clear out the previous array pointers for the KVM_MR_MOVE case. The
+ * old arrays will be freed by __kvm_set_memory_region() if installing
+ * the new memslot is successful.
+ */
+ memset(&slot->arch, 0, sizeof(slot->arch));
+
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn;
@@ -9903,11 +9947,9 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
ugfn = slot->userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
- * other, or if explicitly asked to, disable large page
- * support for this slot
+ * other, disable large page support for this slot.
*/
- if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
- !kvm_largepages_enabled()) {
+ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
unsigned long j;
for (j = 0; j < lpages; ++j)
@@ -9954,6 +9996,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
+ if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
+ return kvm_alloc_memslot_metadata(memslot,
+ mem->memory_size >> PAGE_SHIFT);
return 0;
}
@@ -9962,14 +10007,14 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
{
/* Still write protect RO slot */
if (new->flags & KVM_MEM_READONLY) {
- kvm_mmu_slot_remove_write_access(kvm, new);
+ kvm_mmu_slot_remove_write_access(kvm, new, PT_PAGE_TABLE_LEVEL);
return;
}
/*
* Call kvm_x86_ops dirty logging hooks when they are valid.
*
- * kvm_x86_ops->slot_disable_log_dirty is called when:
+ * kvm_x86_ops.slot_disable_log_dirty is called when:
*
* - KVM_MR_CREATE with dirty logging is disabled
* - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
@@ -9981,7 +10026,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* any additional overhead from PML when guest is running with dirty
* logging disabled for memory slots.
*
- * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
+ * kvm_x86_ops.slot_enable_log_dirty is called when switching new slot
* to dirty logging mode.
*
* If kvm_x86_ops dirty logging hooks are invalid, use write protect.
@@ -9997,19 +10042,32 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* See the comments in fast_page_fault().
*/
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
- if (kvm_x86_ops->slot_enable_log_dirty)
- kvm_x86_ops->slot_enable_log_dirty(kvm, new);
- else
- kvm_mmu_slot_remove_write_access(kvm, new);
+ if (kvm_x86_ops.slot_enable_log_dirty) {
+ kvm_x86_ops.slot_enable_log_dirty(kvm, new);
+ } else {
+ int level =
+ kvm_dirty_log_manual_protect_and_init_set(kvm) ?
+ PT_DIRECTORY_LEVEL : PT_PAGE_TABLE_LEVEL;
+
+ /*
+ * If we're with initial-all-set, we don't need
+ * to write protect any small page because
+ * they're reported as dirty already. However
+ * we still need to write-protect huge pages
+ * so that the page split can happen lazily on
+ * the first write to the huge page.
+ */
+ kvm_mmu_slot_remove_write_access(kvm, new, level);
+ }
} else {
- if (kvm_x86_ops->slot_disable_log_dirty)
- kvm_x86_ops->slot_disable_log_dirty(kvm, new);
+ if (kvm_x86_ops.slot_disable_log_dirty)
+ kvm_x86_ops.slot_disable_log_dirty(kvm, new);
}
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
@@ -10051,6 +10109,10 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
*/
if (change != KVM_MR_DELETE)
kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
+
+ /* Free the arrays associated with the old memslot. */
+ if (change == KVM_MR_MOVE)
+ kvm_arch_free_memslot(kvm, old);
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -10067,8 +10129,8 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
return (is_guest_mode(vcpu) &&
- kvm_x86_ops->guest_apic_has_interrupt &&
- kvm_x86_ops->guest_apic_has_interrupt(vcpu));
+ kvm_x86_ops.guest_apic_has_interrupt &&
+ kvm_x86_ops.guest_apic_has_interrupt(vcpu));
}
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
@@ -10087,7 +10149,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
(vcpu->arch.nmi_pending &&
- kvm_x86_ops->nmi_allowed(vcpu)))
+ kvm_x86_ops.nmi_allowed(vcpu)))
return true;
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
@@ -10120,7 +10182,7 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
kvm_test_request(KVM_REQ_EVENT, vcpu))
return true;
- if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
+ if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu))
return true;
return false;
@@ -10138,7 +10200,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{
- return kvm_x86_ops->interrupt_allowed(vcpu);
+ return kvm_x86_ops.interrupt_allowed(vcpu);
}
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
@@ -10160,7 +10222,7 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags;
- rflags = kvm_x86_ops->get_rflags(vcpu);
+ rflags = kvm_x86_ops.get_rflags(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
rflags &= ~X86_EFLAGS_TF;
return rflags;
@@ -10172,7 +10234,7 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
rflags |= X86_EFLAGS_TF;
- kvm_x86_ops->set_rflags(vcpu, rflags);
+ kvm_x86_ops.set_rflags(vcpu, rflags);
}
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
@@ -10195,7 +10257,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
return;
if (!vcpu->arch.mmu->direct_map &&
- work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
+ work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
return;
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
@@ -10283,7 +10345,7 @@ static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
(vcpu->arch.apf.send_user_only &&
- kvm_x86_ops->get_cpl(vcpu) == 0))
+ kvm_x86_ops.get_cpl(vcpu) == 0))
return false;
return true;
@@ -10303,7 +10365,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
* If interrupts are off we cannot even use an artificial
* halt state.
*/
- return kvm_x86_ops->interrupt_allowed(vcpu);
+ return kvm_x86_ops.interrupt_allowed(vcpu);
}
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
@@ -10432,7 +10494,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
irqfd->producer = prod;
- return kvm_x86_ops->update_pi_irte(irqfd->kvm,
+ return kvm_x86_ops.update_pi_irte(irqfd->kvm,
prod->irq, irqfd->gsi, 1);
}
@@ -10452,7 +10514,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
* when the irq is masked/disabled or the consumer side (KVM
* int this case doesn't want to receive the interrupts.
*/
- ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
+ ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
if (ret)
printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
" fails: %d\n", irqfd->consumer.token, ret);
@@ -10461,7 +10523,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set)
{
- return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
+ return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set);
}
bool kvm_vector_hashing_enabled(void)
@@ -10518,4 +10580,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 3624665acee4..b968acc0516f 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -5,6 +5,7 @@
#include <linux/kvm_host.h>
#include <asm/pvclock.h>
#include "kvm_cache_regs.h"
+#include "kvm_emulate.h"
#define KVM_DEFAULT_PLE_GAP 128
#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
@@ -96,7 +97,7 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
if (!is_long_mode(vcpu))
return false;
- kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
return cs_l;
}
@@ -149,11 +150,6 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
}
-static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
-{
- return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
-}
-
static inline u64 get_canonical(u64 la, u8 vaddr_bits)
{
return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
@@ -164,12 +160,6 @@ static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
}
-static inline bool emul_is_noncanonical_address(u64 la,
- struct x86_emulate_ctxt *ctxt)
-{
- return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
-}
-
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
gva_t gva, gfn_t gfn, unsigned access)
{
@@ -247,7 +237,7 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
{
- return is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu);
+ return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
}
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
@@ -280,13 +270,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
-#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
- | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
- | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
- | XFEATURE_MASK_PKRU)
extern u64 host_xcr0;
+extern u64 supported_xcr0;
+extern u64 supported_xss;
-extern u64 kvm_supported_xcr0(void);
+static inline bool kvm_mpx_supported(void)
+{
+ return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
+ == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
+}
extern unsigned int min_timer_period_us;
diff --git a/arch/x86/lib/.gitignore b/arch/x86/lib/.gitignore
index 8df89f0a3fe6..8ae0f93ecbfd 100644
--- a/arch/x86/lib/.gitignore
+++ b/arch/x86/lib/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
inat-tables.c
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 629fdf13f846..a51df516b87b 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1222,7 +1222,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
return 1;
/* read, not present: */
- if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+ if (unlikely(!vma_is_accessible(vma)))
return 1;
return 0;
@@ -1310,7 +1310,7 @@ void do_user_addr_fault(struct pt_regs *regs,
struct task_struct *tsk;
struct mm_struct *mm;
vm_fault_t fault, major = 0;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
tsk = current;
mm = tsk->mm;
@@ -1464,27 +1464,23 @@ good_area:
fault = handle_mm_fault(vma, address, flags);
major |= fault & VM_FAULT_MAJOR;
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, hw_error_code, address, SIGBUS,
+ BUS_ADRERR);
+ return;
+ }
+
/*
* If we need to retry the mmap_sem has already been released,
* and if there is a fatal signal pending there is no guarantee
* that we made any progress. Handle this case first.
*/
- if (unlikely(fault & VM_FAULT_RETRY)) {
- /* Retry at most once */
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- if (!fatal_signal_pending(tsk))
- goto retry;
- }
-
- /* User mode? Just return to handle the fatal exception */
- if (flags & FAULT_FLAG_USER)
- return;
-
- /* Not returning to user mode? Handle exceptions or die: */
- no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR);
- return;
+ if (unlikely((fault & VM_FAULT_RETRY) &&
+ (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
up_read(&mm->mmap_sem);
diff --git a/arch/x86/realmode/rm/.gitignore b/arch/x86/realmode/rm/.gitignore
index b6ed3a2555cb..6c3464f46166 100644
--- a/arch/x86/realmode/rm/.gitignore
+++ b/arch/x86/realmode/rm/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
pasyms.h
realmode.lds
realmode.relocs
diff --git a/arch/x86/tools/.gitignore b/arch/x86/tools/.gitignore
index be0ed065249b..d36dc7cf9115 100644
--- a/arch/x86/tools/.gitignore
+++ b/arch/x86/tools/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
relocs
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
index 593d5f3902bd..478710384b34 100644
--- a/arch/x86/um/asm/processor.h
+++ b/arch/x86/um/asm/processor.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __UM_PROCESSOR_H
#define __UM_PROCESSOR_H
+#include <linux/time-internal.h>
/* include faultinfo structure */
#include <sysdep/faultinfo.h>
@@ -21,12 +22,19 @@
#include <asm/user.h>
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
+static __always_inline void rep_nop(void)
{
__asm__ __volatile__("rep;nop": : :"memory");
}
-#define cpu_relax() rep_nop()
+static __always_inline void cpu_relax(void)
+{
+ if (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL)
+ time_travel_ndelay(1);
+ else
+ rep_nop();
+}
#define task_pt_regs(t) (&(t)->thread.regs)
diff --git a/arch/x86/um/vdso/.gitignore b/arch/x86/um/vdso/.gitignore
index f8b69d84238e..652e31d82582 100644
--- a/arch/x86/um/vdso/.gitignore
+++ b/arch/x86/um/vdso/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 802ee5bba66c..8fb8a50a28b4 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -53,6 +53,7 @@ static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
+void asm_cpu_bringup_and_idle(void);
static void cpu_bringup(void)
{
@@ -309,7 +310,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
* pointing just below where pt_regs would be if it were a normal
* kernel entry.
*/
- ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
+ ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle;
ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
ctxt->user_regs.ds = __USER_DS;
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 1d0cee3163e4..7d1c4fcbe8f7 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -35,7 +35,11 @@ SYM_CODE_START(startup_xen)
rep __ASM_SIZE(stos)
mov %_ASM_SI, xen_start_info
- mov $init_thread_union+THREAD_SIZE, %_ASM_SP
+#ifdef CONFIG_X86_64
+ mov initial_stack(%rip), %rsp
+#else
+ mov pa(initial_stack), %esp
+#endif
#ifdef CONFIG_X86_64
/* Set up %gs.
@@ -51,9 +55,19 @@ SYM_CODE_START(startup_xen)
wrmsr
#endif
- jmp xen_start_kernel
+ call xen_start_kernel
SYM_CODE_END(startup_xen)
__FINIT
+
+#ifdef CONFIG_XEN_PV_SMP
+.pushsection .text
+SYM_CODE_START(asm_cpu_bringup_and_idle)
+ UNWIND_HINT_EMPTY
+
+ call cpu_bringup_and_idle
+SYM_CODE_END(asm_cpu_bringup_and_idle)
+.popsection
+#endif
#endif
.pushsection .text
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 32ee759a3fda..de229424b659 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -6,7 +6,7 @@ config XTENSA
select ARCH_HAS_DMA_PREP_COHERENT if MMU
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
- select ARCH_HAS_UNCACHED_SEGMENT if MMU
+ select ARCH_HAS_DMA_SET_UNCACHED if MMU
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore
index 38177c7ebcab..615f1f741a03 100644
--- a/arch/xtensa/boot/.gitignore
+++ b/arch/xtensa/boot/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
uImage
zImage.redboot
diff --git a/arch/xtensa/boot/boot-elf/.gitignore b/arch/xtensa/boot/boot-elf/.gitignore
index 5ff8fbb8561b..7473404500cc 100644
--- a/arch/xtensa/boot/boot-elf/.gitignore
+++ b/arch/xtensa/boot/boot-elf/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
boot.lds
diff --git a/arch/xtensa/boot/lib/.gitignore b/arch/xtensa/boot/lib/.gitignore
index 1629a6167755..805a8249252a 100644
--- a/arch/xtensa/boot/lib/.gitignore
+++ b/arch/xtensa/boot/lib/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
inffast.c
inflate.c
inftrees.c
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 271917c24b7f..9718e9593564 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -1,36 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
-generic-y += bug.h
-generic-y += compat.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
generic-y += extable.h
-generic-y += fb.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
generic-y += param.h
-generic-y += percpu.h
-generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += sections.h
-generic-y += topology.h
-generic-y += trace_clock.h
generic-y += user.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/xtensa/kernel/.gitignore b/arch/xtensa/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/xtensa/kernel/.gitignore
+++ b/arch/xtensa/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 72b6222daa0b..17c4384f8495 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -88,18 +88,12 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
/*
* Memory caching is platform-dependent in noMMU xtensa configurations.
- * The following two functions should be implemented in platform code
- * in order to enable coherent DMA memory operations when CONFIG_MMU is not
- * enabled.
+ * This function should be implemented in platform code in order to enable
+ * coherent DMA memory operations when CONFIG_MMU is not enabled.
*/
#ifdef CONFIG_MMU
-void *uncached_kernel_address(void *p)
+void *arch_dma_set_uncached(void *p, size_t size)
{
return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
}
-
-void *cached_kernel_address(void *p)
-{
- return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
-}
#endif /* CONFIG_MMU */
diff --git a/arch/xtensa/kernel/syscalls/syscallhdr.sh b/arch/xtensa/kernel/syscalls/syscallhdr.sh
index d37db641ca31..eebfb8a8ace6 100644
--- a/arch/xtensa/kernel/syscalls/syscallhdr.sh
+++ b/arch/xtensa/kernel/syscalls/syscallhdr.sh
@@ -32,5 +32,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#define __NR_syscalls\t%s\n" "${nxt}"
printf "#endif\n"
printf "\n"
- printf "#endif /* %s */" "${fileguard}"
+ printf "#endif /* %s */\n" "${fileguard}"
) > "$out"
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index bee30a77cd70..e7172bd53ced 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -43,7 +43,7 @@ void do_page_fault(struct pt_regs *regs)
int is_write, is_exec;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
code = SEGV_MAPERR;
@@ -110,7 +110,7 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -128,7 +128,6 @@ good_area:
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index 488341628256..7b8a42c35102 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -16,7 +16,7 @@
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the virtio device @vdev has at least as many available
- * interrupt vetors as @set has queues. It will then queuery the vector
+ * interrupt vectors as @set has queues. It will then query the vector
* corresponding to each queue for it's affinity mask and built queue mapping
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index b4e73d5dd5c2..ef722f04f88a 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -193,6 +193,10 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
+
+ /* ZBC Commands */
+ __set_bit(ZBC_OUT, filter->write_ok);
+ __set_bit(ZBC_IN, filter->read_ok);
}
int blk_verify_command(unsigned char *cmd, fmode_t mode)
diff --git a/certs/.gitignore b/certs/.gitignore
index f51aea4a71ec..2a2483990686 100644
--- a/certs/.gitignore
+++ b/certs/.gitignore
@@ -1,4 +1,2 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
x509_certificate_list
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 439367a8e95c..b1cd3535c525 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -821,8 +821,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
struct af_alg_tsgl *sgl;
struct af_alg_control con = {};
long copied = 0;
- bool enc = 0;
- bool init = 0;
+ bool enc = false;
+ bool init = false;
int err = 0;
if (msg->msg_controllen) {
@@ -830,13 +830,13 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
if (err)
return err;
- init = 1;
+ init = true;
switch (con.op) {
case ALG_OP_ENCRYPT:
- enc = 1;
+ enc = true;
break;
case ALG_OP_DECRYPT:
- enc = 0;
+ enc = false;
break;
default:
return -EINVAL;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 178f4cd75ef1..da1ffa4f7f8d 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -83,7 +83,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
goto unlock;
}
- ctx->more = 0;
+ ctx->more = false;
while (msg_data_left(msg)) {
int len = msg_data_left(msg);
@@ -211,7 +211,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
}
if (!result || ctx->more) {
- ctx->more = 0;
+ ctx->more = false;
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->wait);
if (err)
@@ -436,7 +436,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ctx->result = NULL;
ctx->len = len;
- ctx->more = 0;
+ ctx->more = false;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 589008146fce..149b70df2a91 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -458,7 +458,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
inst->alg.encrypt = crypto_authenc_esn_encrypt;
inst->alg.decrypt = crypto_authenc_esn_decrypt;
- inst->free = crypto_authenc_esn_free,
+ inst->free = crypto_authenc_esn_free;
err = aead_register_instance(tmpl, inst);
if (err) {
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 241ecdc5c4e0..d1fb01bbc814 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -717,7 +717,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
- const char *ccm_name;
int err;
algt = crypto_get_attr_type(tb);
@@ -729,19 +728,15 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
mask = crypto_requires_sync(algt->type, algt->mask);
- ccm_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(ccm_name))
- return PTR_ERR(ccm_name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = aead_instance_ctx(inst);
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
- ccm_name, 0, mask);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
alg = crypto_spawn_aead_alg(spawn);
@@ -749,11 +744,11 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
/* We only support 16-byte blocks. */
if (crypto_aead_alg_ivsize(alg) != 16)
- goto out_drop_alg;
+ goto err_free_inst;
/* Not a stream cipher? */
if (alg->base.cra_blocksize != 1)
- goto out_drop_alg;
+ goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
@@ -762,7 +757,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc4309(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
- goto out_drop_alg;
+ goto err_free_inst;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
@@ -786,17 +781,11 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
inst->free = crypto_rfc4309_free;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto out_drop_alg;
-
-out:
+ if (err) {
+err_free_inst:
+ crypto_rfc4309_free(inst);
+ }
return err;
-
-out_drop_alg:
- crypto_drop_aead(spawn);
-out_free_inst:
- kfree(inst);
- goto out;
}
static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index d94c75c840a5..283212262adb 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -369,7 +369,6 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
struct skcipherd_instance_ctx *ctx;
struct skcipher_instance *inst;
struct skcipher_alg *alg;
- const char *name;
u32 type;
u32 mask;
int err;
@@ -379,10 +378,6 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
cryptd_check_internal(tb, &type, &mask);
- name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(name))
- return PTR_ERR(name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
@@ -391,14 +386,14 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
ctx->queue = queue;
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
- name, type, mask);
+ crypto_attr_alg_name(tb[1]), type, mask);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
alg = crypto_spawn_skcipher_alg(&ctx->spawn);
err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
if (err)
- goto out_drop_skcipher;
+ goto err_free_inst;
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
@@ -421,10 +416,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
err = skcipher_register_instance(tmpl, inst);
if (err) {
-out_drop_skcipher:
- crypto_drop_skcipher(&ctx->spawn);
-out_free_inst:
- kfree(inst);
+err_free_inst:
+ cryptd_skcipher_free(inst);
}
return err;
}
@@ -694,8 +687,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
err = ahash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
- crypto_drop_shash(&ctx->spawn);
- kfree(inst);
+ cryptd_hash_free(inst);
}
return err;
}
@@ -833,17 +825,12 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
struct aead_instance_ctx *ctx;
struct aead_instance *inst;
struct aead_alg *alg;
- const char *name;
u32 type = 0;
u32 mask = CRYPTO_ALG_ASYNC;
int err;
cryptd_check_internal(tb, &type, &mask);
- name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(name))
- return PTR_ERR(name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
@@ -852,14 +839,14 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
ctx->queue = queue;
err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
- name, type, mask);
+ crypto_attr_alg_name(tb[1]), type, mask);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
if (err)
- goto out_drop_aead;
+ goto err_free_inst;
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
@@ -879,10 +866,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
err = aead_register_instance(tmpl, inst);
if (err) {
-out_drop_aead:
- crypto_drop_aead(&ctx->aead_spawn);
-out_free_inst:
- kfree(inst);
+err_free_inst:
+ cryptd_aead_free(inst);
}
return err;
}
diff --git a/crypto/ctr.c b/crypto/ctr.c
index a8feab621c6c..31ac4ae598e1 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -260,7 +260,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
struct skcipher_instance *inst;
struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
- const char *cipher_name;
u32 mask;
int err;
@@ -272,10 +271,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
return -EINVAL;
- cipher_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(cipher_name))
- return PTR_ERR(cipher_name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
@@ -287,7 +282,7 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
spawn = skcipher_instance_ctx(inst);
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
- cipher_name, 0, mask);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
@@ -296,20 +291,20 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
/* We only support 16-byte blocks. */
err = -EINVAL;
if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
- goto err_drop_spawn;
+ goto err_free_inst;
/* Not a stream cipher? */
if (alg->base.cra_blocksize != 1)
- goto err_drop_spawn;
+ goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
- goto err_drop_spawn;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc3686(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
- goto err_drop_spawn;
+ goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
@@ -336,17 +331,11 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
inst->free = crypto_rfc3686_free;
err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto err_drop_spawn;
-
-out:
- return err;
-
-err_drop_spawn:
- crypto_drop_skcipher(spawn);
+ if (err) {
err_free_inst:
- kfree(inst);
- goto out;
+ crypto_rfc3686_free(inst);
+ }
+ return err;
}
static struct crypto_template crypto_ctr_tmpls[] = {
diff --git a/crypto/cts.c b/crypto/cts.c
index 48188adc8e91..5e005c4f0221 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -327,7 +327,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
struct skcipher_instance *inst;
struct crypto_attr_type *algt;
struct skcipher_alg *alg;
- const char *cipher_name;
u32 mask;
int err;
@@ -340,10 +339,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
mask = crypto_requires_sync(algt->type, algt->mask);
- cipher_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(cipher_name))
- return PTR_ERR(cipher_name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
@@ -351,7 +346,7 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
- cipher_name, 0, mask);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
@@ -359,15 +354,15 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
err = -EINVAL;
if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
- goto err_drop_spawn;
+ goto err_free_inst;
if (strncmp(alg->base.cra_name, "cbc(", 4))
- goto err_drop_spawn;
+ goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts",
&alg->base);
if (err)
- goto err_drop_spawn;
+ goto err_free_inst;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
@@ -391,17 +386,11 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->free = crypto_cts_free;
err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto err_drop_spawn;
-
-out:
- return err;
-
-err_drop_spawn:
- crypto_drop_skcipher(spawn);
+ if (err) {
err_free_inst:
- kfree(inst);
- goto out;
+ crypto_cts_free(inst);
+ }
+ return err;
}
static struct crypto_template crypto_cts_tmpl = {
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 8e5c0ac65661..0103d28c541e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -840,7 +840,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
- const char *ccm_name;
int err;
algt = crypto_get_attr_type(tb);
@@ -852,19 +851,15 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
mask = crypto_requires_sync(algt->type, algt->mask);
- ccm_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(ccm_name))
- return PTR_ERR(ccm_name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = aead_instance_ctx(inst);
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
- ccm_name, 0, mask);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
alg = crypto_spawn_aead_alg(spawn);
@@ -872,11 +867,11 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
/* Underlying IV size must be 12. */
if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
- goto out_drop_alg;
+ goto err_free_inst;
/* Not a stream cipher? */
if (alg->base.cra_blocksize != 1)
- goto out_drop_alg;
+ goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
@@ -885,7 +880,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc4106(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
- goto out_drop_alg;
+ goto err_free_inst;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
@@ -909,17 +904,11 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
inst->free = crypto_rfc4106_free;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto out_drop_alg;
-
-out:
+ if (err) {
+err_free_inst:
+ crypto_rfc4106_free(inst);
+ }
return err;
-
-out_drop_alg:
- crypto_drop_aead(spawn);
-out_free_inst:
- kfree(inst);
- goto out;
}
static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1071,10 +1060,8 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
struct crypto_rfc4543_instance_ctx *ctx;
- const char *ccm_name;
int err;
algt = crypto_get_attr_type(tb);
@@ -1086,32 +1073,27 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
mask = crypto_requires_sync(algt->type, algt->mask);
- ccm_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(ccm_name))
- return PTR_ERR(ccm_name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = aead_instance_ctx(inst);
- spawn = &ctx->aead;
- err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
- ccm_name, 0, mask);
+ err = crypto_grab_aead(&ctx->aead, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
- alg = crypto_spawn_aead_alg(spawn);
+ alg = crypto_spawn_aead_alg(&ctx->aead);
err = -EINVAL;
/* Underlying IV size must be 12. */
if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
- goto out_drop_alg;
+ goto err_free_inst;
/* Not a stream cipher? */
if (alg->base.cra_blocksize != 1)
- goto out_drop_alg;
+ goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
@@ -1120,7 +1102,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc4543(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
- goto out_drop_alg;
+ goto err_free_inst;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
@@ -1141,20 +1123,14 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
inst->alg.encrypt = crypto_rfc4543_encrypt;
inst->alg.decrypt = crypto_rfc4543_decrypt;
- inst->free = crypto_rfc4543_free,
+ inst->free = crypto_rfc4543_free;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto out_drop_alg;
-
-out:
+ if (err) {
+err_free_inst:
+ crypto_rfc4543_free(inst);
+ }
return err;
-
-out_drop_alg:
- crypto_drop_aead(spawn);
-out_free_inst:
- kfree(inst);
- goto out;
}
static struct crypto_template crypto_gcm_tmpls[] = {
diff --git a/crypto/geniv.c b/crypto/geniv.c
index dbcc640274cd..6a90c52d49ad 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -41,7 +41,6 @@ static void aead_geniv_free(struct aead_instance *inst)
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type, u32 mask)
{
- const char *name;
struct crypto_aead_spawn *spawn;
struct crypto_attr_type *algt;
struct aead_instance *inst;
@@ -57,10 +56,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
- name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(name))
- return ERR_CAST(name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return ERR_PTR(-ENOMEM);
@@ -71,7 +66,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
mask |= crypto_requires_sync(algt->type, algt->mask);
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
- name, type, mask);
+ crypto_attr_alg_name(tb[1]), type, mask);
if (err)
goto err_free_inst;
@@ -82,17 +77,17 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
err = -EINVAL;
if (ivsize < sizeof(u64))
- goto err_drop_alg;
+ goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"%s(%s)", tmpl->name, alg->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
- goto err_drop_alg;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s)", tmpl->name, alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
- goto err_drop_alg;
+ goto err_free_inst;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
@@ -111,10 +106,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
out:
return inst;
-err_drop_alg:
- crypto_drop_aead(spawn);
err_free_inst:
- kfree(inst);
+ aead_geniv_free(inst);
inst = ERR_PTR(err);
goto out;
}
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 63c485c0d8a6..376d7ed3f1f8 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -343,15 +343,15 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
err = -EINVAL;
if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
- goto err_drop_spawn;
+ goto err_free_inst;
if (crypto_skcipher_alg_ivsize(alg))
- goto err_drop_spawn;
+ goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
&alg->base);
if (err)
- goto err_drop_spawn;
+ goto err_free_inst;
err = -EINVAL;
cipher_name = alg->base.cra_name;
@@ -364,20 +364,20 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
if (len < 2 || len >= sizeof(ecb_name))
- goto err_drop_spawn;
+ goto err_free_inst;
if (ecb_name[len - 1] != ')')
- goto err_drop_spawn;
+ goto err_free_inst;
ecb_name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
err = -ENAMETOOLONG;
- goto err_drop_spawn;
+ goto err_free_inst;
}
} else
- goto err_drop_spawn;
+ goto err_free_inst;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
@@ -403,17 +403,11 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->free = free;
err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto err_drop_spawn;
-
-out:
- return err;
-
-err_drop_spawn:
- crypto_drop_skcipher(spawn);
+ if (err) {
err_free_inst:
- kfree(inst);
- goto out;
+ free(inst);
+ }
+ return err;
}
static struct crypto_template crypto_tmpl = {
diff --git a/crypto/md5.c b/crypto/md5.c
index 22dc60bc0437..72c0c46fb5ee 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -23,9 +23,6 @@
#include <linux/types.h>
#include <asm/byteorder.h>
-#define MD5_DIGEST_WORDS 4
-#define MD5_MESSAGE_BYTES 64
-
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 1b632139a8c1..8bddc65cd509 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -232,17 +232,12 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
struct crypto_attr_type *algt;
struct aead_instance *inst;
struct aead_alg *alg;
- const char *name;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
- name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(name))
- return PTR_ERR(name);
-
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
@@ -252,21 +247,21 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
ctx = aead_instance_ctx(inst);
ctx->psenc = padata_alloc_shell(pencrypt);
if (!ctx->psenc)
- goto out_free_inst;
+ goto err_free_inst;
ctx->psdec = padata_alloc_shell(pdecrypt);
if (!ctx->psdec)
- goto out_free_psenc;
+ goto err_free_inst;
err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
- name, 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, 0);
if (err)
- goto out_free_psdec;
+ goto err_free_inst;
alg = crypto_spawn_aead_alg(&ctx->spawn);
err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
if (err)
- goto out_drop_aead;
+ goto err_free_inst;
inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
@@ -286,21 +281,11 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
inst->free = pcrypt_free;
err = aead_register_instance(tmpl, inst);
- if (err)
- goto out_drop_aead;
-
-out:
+ if (err) {
+err_free_inst:
+ pcrypt_free(inst);
+ }
return err;
-
-out_drop_aead:
- crypto_drop_aead(&ctx->spawn);
-out_free_psdec:
- padata_free_shell(ctx->psdec);
-out_free_psenc:
- padata_free_shell(ctx->psenc);
-out_free_inst:
- kfree(inst);
- goto out;
}
static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
diff --git a/crypto/proc.c b/crypto/proc.c
index 7b91557adccb..08d8c2bc7e62 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -60,7 +60,7 @@ static int c_show(struct seq_file *m, void *p)
goto out;
}
- switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
+ switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_CIPHER:
seq_printf(m, "type : cipher\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
diff --git a/crypto/rng.c b/crypto/rng.c
index 1e21231f71c9..1490d210f1a1 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -37,12 +37,16 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
crypto_stats_get(alg);
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ crypto_alg_put(alg);
return -ENOMEM;
+ }
err = get_random_bytes_wait(buf, slen);
- if (err)
+ if (err) {
+ crypto_alg_put(alg);
goto out;
+ }
seed = buf;
}
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 176b63afec8d..d31031de51bc 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -596,14 +596,11 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- const struct rsa_asn1_template *digest_info;
struct crypto_attr_type *algt;
u32 mask;
struct akcipher_instance *inst;
struct pkcs1pad_inst_ctx *ctx;
- struct crypto_akcipher_spawn *spawn;
struct akcipher_alg *rsa_alg;
- const char *rsa_alg_name;
const char *hash_name;
int err;
@@ -616,60 +613,49 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
mask = crypto_requires_sync(algt->type, algt->mask);
- rsa_alg_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(rsa_alg_name))
- return PTR_ERR(rsa_alg_name);
-
- hash_name = crypto_attr_alg_name(tb[2]);
- if (IS_ERR(hash_name))
- hash_name = NULL;
-
- if (hash_name) {
- digest_info = rsa_lookup_asn1(hash_name);
- if (!digest_info)
- return -EINVAL;
- } else
- digest_info = NULL;
-
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = akcipher_instance_ctx(inst);
- spawn = &ctx->spawn;
- ctx->digest_info = digest_info;
- err = crypto_grab_akcipher(spawn, akcipher_crypto_instance(inst),
- rsa_alg_name, 0, mask);
+ err = crypto_grab_akcipher(&ctx->spawn, akcipher_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
- goto out_free_inst;
+ goto err_free_inst;
- rsa_alg = crypto_spawn_akcipher_alg(spawn);
+ rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
err = -ENAMETOOLONG;
-
- if (!hash_name) {
+ hash_name = crypto_attr_alg_name(tb[2]);
+ if (IS_ERR(hash_name)) {
if (snprintf(inst->alg.base.cra_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_drop_alg;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
rsa_alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
- goto out_drop_alg;
+ goto err_free_inst;
} else {
+ ctx->digest_info = rsa_lookup_asn1(hash_name);
+ if (!ctx->digest_info) {
+ err = -EINVAL;
+ goto err_free_inst;
+ }
+
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_drop_alg;
+ goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
rsa_alg->base.cra_driver_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_drop_alg;
+ goto err_free_inst;
}
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
@@ -691,15 +677,10 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->free = pkcs1pad_free;
err = akcipher_register_instance(tmpl, inst);
- if (err)
- goto out_drop_alg;
-
- return 0;
-
-out_drop_alg:
- crypto_drop_akcipher(spawn);
-out_free_inst:
- kfree(inst);
+ if (err) {
+err_free_inst:
+ pkcs1pad_free(inst);
+ }
return err;
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index f42f486e90e8..ba0b7702f2e9 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1514,8 +1514,8 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
return;
}
- pr_info("\ntesting speed of async %s (%s) %s\n", algo,
- get_driver_name(crypto_skcipher, tfm), e);
+ pr_info("\ntesting speed of %s %s (%s) %s\n", async ? "async" : "sync",
+ algo, get_driver_name(crypto_skcipher, tfm), e);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index ccb3d60729fc..6863f911fcee 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -91,10 +91,11 @@ struct aead_test_suite {
unsigned int einval_allowed : 1;
/*
- * Set if the algorithm intentionally ignores the last 8 bytes of the
- * AAD buffer during decryption.
+ * Set if this algorithm requires that the IV be located at the end of
+ * the AAD buffer, in addition to being given in the normal way. The
+ * behavior when the two IV copies differ is implementation-defined.
*/
- unsigned int esp_aad : 1;
+ unsigned int aad_iv : 1;
};
struct cipher_test_suite {
@@ -2167,9 +2168,10 @@ struct aead_extra_tests_ctx {
* here means the full ciphertext including the authentication tag. The
* authentication tag (and hence also the ciphertext) is assumed to be nonempty.
*/
-static void mutate_aead_message(struct aead_testvec *vec, bool esp_aad)
+static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
+ unsigned int ivsize)
{
- const unsigned int aad_tail_size = esp_aad ? 8 : 0;
+ const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen;
if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) {
@@ -2207,6 +2209,9 @@ static void generate_aead_message(struct aead_request *req,
/* Generate the AAD. */
generate_random_bytes((u8 *)vec->assoc, vec->alen);
+ if (suite->aad_iv && vec->alen >= ivsize)
+ /* Avoid implementation-defined behavior. */
+ memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
if (inauthentic && prandom_u32() % 2 == 0) {
/* Generate a random ciphertext. */
@@ -2242,7 +2247,7 @@ static void generate_aead_message(struct aead_request *req,
* Mutate the authentic (ciphertext, AAD) pair to get an
* inauthentic one.
*/
- mutate_aead_message(vec, suite->esp_aad);
+ mutate_aead_message(vec, suite->aad_iv, ivsize);
}
vec->novrfy = 1;
if (suite->einval_allowed)
@@ -2507,11 +2512,11 @@ static int test_aead_extra(const char *driver,
goto out;
}
- err = test_aead_inauthentic_inputs(ctx);
+ err = test_aead_vs_generic_impl(ctx);
if (err)
goto out;
- err = test_aead_vs_generic_impl(ctx);
+ err = test_aead_inauthentic_inputs(ctx);
out:
kfree(ctx->vec.key);
kfree(ctx->vec.iv);
@@ -5229,7 +5234,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = {
____VECS(aes_gcm_rfc4106_tv_template),
.einval_allowed = 1,
- .esp_aad = 1,
+ .aad_iv = 1,
}
}
}, {
@@ -5241,7 +5246,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = {
____VECS(aes_ccm_rfc4309_tv_template),
.einval_allowed = 1,
- .esp_aad = 1,
+ .aad_iv = 1,
}
}
}, {
@@ -5252,6 +5257,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = {
____VECS(aes_gcm_rfc4543_tv_template),
.einval_allowed = 1,
+ .aad_iv = 1,
}
}
}, {
@@ -5267,7 +5273,7 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = {
____VECS(rfc7539esp_tv_template),
.einval_allowed = 1,
- .esp_aad = 1,
+ .aad_iv = 1,
}
}
}, {
diff --git a/crypto/xts.c b/crypto/xts.c
index 29efa15f1495..dbdd8af629e6 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -379,15 +379,15 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
err = -EINVAL;
if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
- goto err_drop_spawn;
+ goto err_free_inst;
if (crypto_skcipher_alg_ivsize(alg))
- goto err_drop_spawn;
+ goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
&alg->base);
if (err)
- goto err_drop_spawn;
+ goto err_free_inst;
err = -EINVAL;
cipher_name = alg->base.cra_name;
@@ -400,20 +400,20 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
if (len < 2 || len >= sizeof(ctx->name))
- goto err_drop_spawn;
+ goto err_free_inst;
if (ctx->name[len - 1] != ')')
- goto err_drop_spawn;
+ goto err_free_inst;
ctx->name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
err = -ENAMETOOLONG;
- goto err_drop_spawn;
+ goto err_free_inst;
}
} else
- goto err_drop_spawn;
+ goto err_free_inst;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
@@ -437,17 +437,11 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->free = free;
err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto err_drop_spawn;
-
-out:
- return err;
-
-err_drop_spawn:
- crypto_drop_skcipher(&ctx->spawn);
+ if (err) {
err_free_inst:
- kfree(inst);
- goto out;
+ free(inst);
+ }
+ return err;
}
static struct crypto_template crypto_tmpl = {
diff --git a/drivers/Kconfig b/drivers/Kconfig
index c7396659fdbe..74920bda3fda 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -200,6 +200,8 @@ source "drivers/thunderbolt/Kconfig"
source "drivers/android/Kconfig"
+source "drivers/gpu/trace/Kconfig"
+
source "drivers/nvdimm/Kconfig"
source "drivers/dax/Kconfig"
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index e618ddfab2fd..40f6a3c33a15 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -256,6 +256,8 @@ u32
acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing);
+void acpi_ns_normalize_pathname(char *original_path);
+
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
u8 no_trailing);
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index aa71f65395d2..ee6a1b77af3f 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -468,16 +468,14 @@ char *acpi_db_get_next_token(char *string,
return (NULL);
}
- /* Remove any spaces at the beginning */
+ /* Remove any spaces at the beginning, ignore blank lines */
- if (*string == ' ') {
- while (*string && (*string == ' ')) {
- string++;
- }
+ while (*string && isspace(*string)) {
+ string++;
+ }
- if (!(*string)) {
- return (NULL);
- }
+ if (!(*string)) {
+ return (NULL);
}
switch (*string) {
@@ -570,7 +568,7 @@ char *acpi_db_get_next_token(char *string,
/* Find end of token */
- while (*string && (*string != ' ')) {
+ while (*string && !isspace(*string)) {
string++;
}
break;
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index 3eb45ea93e5e..9dfd693cda3e 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -409,6 +409,7 @@ acpi_status acpi_initialize_debugger(void)
acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
acpi_gbl_db_opt_no_ini_methods = FALSE;
+ acpi_gbl_db_opt_no_region_support = FALSE;
acpi_gbl_db_buffer = acpi_os_allocate(ACPI_DEBUG_BUFFER_SIZE);
if (!acpi_gbl_db_buffer) {
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 5e81a1ae44cf..1d4f8c81028c 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -16,6 +16,9 @@
#include "acinterp.h"
#include "acnamesp.h"
#include "acdebug.h"
+#ifdef ACPI_EXEC_APP
+#include "aecommon.h"
+#endif
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswexec")
@@ -329,6 +332,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
u32 op_class;
union acpi_parse_object *next_op;
union acpi_parse_object *first_arg;
+#ifdef ACPI_EXEC_APP
+ char *namepath;
+ union acpi_operand_object *obj_desc;
+#endif
ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
@@ -537,6 +544,32 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ds_eval_buffer_field_operands(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+#ifdef ACPI_EXEC_APP
+ /*
+ * acpi_exec support for namespace initialization file (initialize
+ * buffer_fields in this code.)
+ */
+ namepath =
+ acpi_ns_get_external_pathname(op->common.node);
+ status = ae_lookup_init_file_entry(namepath, &obj_desc);
+ if (ACPI_SUCCESS(status)) {
+ status =
+ acpi_ex_write_data_to_field(obj_desc,
+ op->common.
+ node->object,
+ NULL);
+ if ACPI_FAILURE
+ (status) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While writing to buffer field"));
+ }
+ }
+ ACPI_FREE(namepath);
+ status = AE_OK;
+#endif
break;
case AML_TYPE_CREATE_OBJECT:
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 697974e37edf..27069325b6de 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -14,7 +14,6 @@
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
-
#ifdef ACPI_ASL_COMPILER
#include "acdisasm.h"
#endif
@@ -399,7 +398,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
union acpi_parse_object *op;
acpi_object_type object_type;
acpi_status status = AE_OK;
-
#ifdef ACPI_ASL_COMPILER
u8 param_count;
#endif
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index b31457ca926c..edadbe146506 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -15,6 +15,9 @@
#include "acinterp.h"
#include "acnamesp.h"
#include "acevents.h"
+#ifdef ACPI_EXEC_APP
+#include "aecommon.h"
+#endif
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswload2")
@@ -373,6 +376,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
struct acpi_namespace_node *new_node;
u32 i;
u8 region_space;
+#ifdef ACPI_EXEC_APP
+ union acpi_operand_object *obj_desc;
+ char *namepath;
+#endif
ACPI_FUNCTION_TRACE(ds_load2_end_op);
@@ -466,6 +473,11 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
* be evaluated later during the execution phase
*/
status = acpi_ds_create_buffer_field(op, walk_state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "CreateBufferField failure"));
+ goto cleanup;
+ }
break;
case AML_TYPE_NAMED_FIELD:
@@ -604,6 +616,29 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
case AML_NAME_OP:
status = acpi_ds_create_node(walk_state, node, op);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+#ifdef ACPI_EXEC_APP
+ /*
+ * acpi_exec support for namespace initialization file (initialize
+ * Name opcodes in this code.)
+ */
+ namepath = acpi_ns_get_external_pathname(node);
+ status = ae_lookup_init_file_entry(namepath, &obj_desc);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Detach any existing object, attach new object */
+
+ if (node->object) {
+ acpi_ns_detach_object(node);
+ }
+ acpi_ns_attach_object(node, obj_desc,
+ obj_desc->common.type);
+ }
+ ACPI_FREE(namepath);
+ status = AE_OK;
+#endif
break;
case AML_METHOD_OP:
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d4d26147610e..d91153f65700 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -13,9 +13,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
-/* Local Prototypes */
-static void acpi_ns_normalize_pathname(char *original_path);
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_external_pathname
@@ -30,7 +27,6 @@ static void acpi_ns_normalize_pathname(char *original_path);
* for error and debug statements.
*
******************************************************************************/
-
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
char *name_buffer;
@@ -411,7 +407,7 @@ cleanup:
*
******************************************************************************/
-static void acpi_ns_normalize_pathname(char *original_path)
+void acpi_ns_normalize_pathname(char *original_path)
{
char *input_path = original_path;
char *new_path_buffer;
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index befdd13b403b..177ab88d95de 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -78,7 +78,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"IPMI", /* 0x07 */
"GeneralPurposeIo", /* 0x08 */
"GenericSerialBus", /* 0x09 */
- "PlatformCommChannel" /* 0x0A */
+ "PCC" /* 0x0A */
};
const char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index eee263cb7beb..c365faf4e6cd 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -452,13 +452,13 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*
* FUNCTION: acpi_ut_update_object_reference
*
- * PARAMETERS: object - Increment ref count for this object
- * and all sub-objects
+ * PARAMETERS: object - Increment or decrement the ref count for
+ * this object and all sub-objects
* action - Either REF_INCREMENT or REF_DECREMENT
*
* RETURN: Status
*
- * DESCRIPTION: Increment the object reference count
+ * DESCRIPTION: Increment or decrement the object reference count
*
* Object references are incremented when:
* 1) An object is attached to a Node (namespace object)
@@ -492,7 +492,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
}
/*
- * All sub-objects must have their reference count incremented
+ * All sub-objects must have their reference count updated
* also. Different object types have different subobjects.
*/
switch (object->common.type) {
@@ -559,6 +559,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
break;
}
}
+
next_object = NULL;
break;
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index a874dac7db5c..681c11f4af4e 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -332,7 +332,12 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
int i;
pos = string;
- end = string + size;
+
+ if (size != ACPI_UINT32_MAX) {
+ end = string + size;
+ } else {
+ end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
+ }
for (; *format; ++format) {
if (*format != '%') {
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a1a858ad4d18..8b2e89c20c11 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -438,13 +438,10 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
* domain info.
*/
for_each_possible_cpu(i) {
- pr = all_cpu_data[i];
- if (!pr)
- continue;
-
if (cpumask_test_cpu(i, covered_cpus))
continue;
+ pr = all_cpu_data[i];
cpc_ptr = per_cpu(cpc_desc_ptr, i);
if (!cpc_ptr) {
retval = -EFAULT;
@@ -495,44 +492,28 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
cpumask_set_cpu(j, pr->shared_cpu_map);
}
- for_each_possible_cpu(j) {
+ for_each_cpu(j, pr->shared_cpu_map) {
if (i == j)
continue;
match_pr = all_cpu_data[j];
- if (!match_pr)
- continue;
-
- match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr) {
- retval = -EFAULT;
- goto err_ret;
- }
-
- match_pdomain = &(match_cpc_ptr->domain_info);
- if (match_pdomain->domain != pdomain->domain)
- continue;
-
match_pr->shared_type = pr->shared_type;
cpumask_copy(match_pr->shared_cpu_map,
pr->shared_cpu_map);
}
}
+ goto out;
err_ret:
for_each_possible_cpu(i) {
pr = all_cpu_data[i];
- if (!pr)
- continue;
/* Assume no coordination on any error parsing domain info */
- if (retval) {
- cpumask_clear(pr->shared_cpu_map);
- cpumask_set_cpu(i, pr->shared_cpu_map);
- pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- }
+ cpumask_clear(pr->shared_cpu_map);
+ cpumask_set_cpu(i, pr->shared_cpu_map);
+ pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
}
-
+out:
free_cpumask_var(covered_cpus);
return retval;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index b64c62bfcea5..b2263ec67b43 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1321,8 +1321,8 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
*/
static const struct acpi_device_id special_pm_ids[] = {
{"PNP0C0B", }, /* Generic ACPI fan */
- {"INT1044", }, /* Fan for Tiger Lake generation */
{"INT3404", }, /* Fan */
+ {"INTC1044", }, /* Fan for Tiger Lake generation */
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 387f27ef3368..e4e8b75d39f0 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -97,8 +97,8 @@ static int dptf_power_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3407_device_ids[] = {
- {"INT1047", 0},
{"INT3407", 0},
+ {"INTC1047", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 1ec7b6900662..bc71a6a60334 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -13,10 +13,6 @@
#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT1040"},
- {"INT1043"},
- {"INT1044"},
- {"INT1047"},
{"INT3400"},
{"INT3401", INT3401_DEVICE},
{"INT3402"},
@@ -28,6 +24,10 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INT3409"},
{"INT340A"},
{"INT340B"},
+ {"INTC1040"},
+ {"INTC1043"},
+ {"INTC1044"},
+ {"INTC1047"},
{""},
};
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index f92df2533e7e..ac8ad6cb82aa 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -131,6 +131,7 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = {
{ OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
{ OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
{ OSC_PCI_MSI_SUPPORT, "MSI" },
+ { OSC_PCI_EDR_SUPPORT, "EDR" },
{ OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" },
};
@@ -141,6 +142,7 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = {
{ OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
{ OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
{ OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" },
+ { OSC_PCI_EXPRESS_DPC_CONTROL, "DPC" },
};
static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
@@ -440,6 +442,8 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
if (pci_msi_enabled())
support |= OSC_PCI_MSI_SUPPORT;
+ if (IS_ENABLED(CONFIG_PCIE_EDR))
+ support |= OSC_PCI_EDR_SUPPORT;
decode_osc_support(root, "OS supports", support);
status = acpi_pci_osc_support(root, support);
@@ -487,6 +491,15 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
control |= OSC_PCI_EXPRESS_AER_CONTROL;
}
+ /*
+ * Per the Downstream Port Containment Related Enhancements ECN to
+ * the PCI Firmware Spec, r3.2, sec 4.5.1, table 4-5,
+ * OSC_PCI_EXPRESS_DPC_CONTROL indicates the OS supports both DPC
+ * and EDR.
+ */
+ if (IS_ENABLED(CONFIG_PCIE_DPC) && IS_ENABLED(CONFIG_PCIE_EDR))
+ control |= OSC_PCI_EXPRESS_DPC_CONTROL;
+
requested = control;
status = acpi_pci_osc_control_set(handle, &control,
OSC_PCI_EXPRESS_CAPABILITY_CONTROL);
@@ -916,6 +929,8 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
host_bridge->native_pme = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL))
host_bridge->native_ltr = 0;
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_DPC_CONTROL))
+ host_bridge->native_dpc = 0;
/*
* Evaluate the "PCI Boot Configuration" _DSM Function. If it
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 532a1ae3595a..a0bd56ece3ff 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -897,13 +897,6 @@ static long __acpi_processor_get_throttling(void *data)
return pr->throttling.acpi_processor_get_throttling(pr);
}
-static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
-{
- if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
- return fn(arg);
- return work_on_cpu(cpu, fn, arg);
-}
-
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
if (!pr)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index bb1ae400ec1f..4edc8a3ce40f 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -1009,6 +1009,10 @@ static bool acpi_s2idle_wake(void)
if (acpi_any_fixed_event_status_set())
return true;
+ /* Check wakeups from drivers sharing the SCI. */
+ if (acpi_check_wakeup_handlers())
+ return true;
+
/*
* If the status bit is set for any enabled GPE other than the
* EC one, the wakeup is regarded as a genuine one.
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 41675d24a9bc..3d90480ce1b1 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -2,6 +2,7 @@
extern void acpi_enable_wakeup_devices(u8 sleep_state);
extern void acpi_disable_wakeup_devices(u8 sleep_state);
+extern bool acpi_check_wakeup_handlers(void);
extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 180ac4329763..0e905c3d1645 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -501,7 +501,7 @@ static const char * const table_sigs[] = {
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
- NULL };
+ ACPI_SIG_NHLT, NULL };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 419f814d596a..b4994e50608d 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -352,6 +352,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+ {
+ .callback = video_detect_force_native,
+ .ident = "Acer Aspire 5738z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ DMI_MATCH(DMI_BOARD_NAME, "JV50"),
+ },
+ },
/*
* Desktops which falsely report a backlight and which our heuristics
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index c28244df56a5..0b2e42530adf 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -12,6 +12,15 @@
#include "internal.h"
#include "sleep.h"
+struct acpi_wakeup_handler {
+ struct list_head list_node;
+ bool (*wakeup)(void *context);
+ void *context;
+};
+
+static LIST_HEAD(acpi_wakeup_handler_head);
+static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
+
/*
* We didn't lock acpi_device_lock in the file, because it invokes oops in
* suspend/resume and isn't really required as this is called in S-state. At
@@ -90,3 +99,75 @@ int __init acpi_wakeup_device_init(void)
mutex_unlock(&acpi_device_lock);
return 0;
}
+
+/**
+ * acpi_register_wakeup_handler - Register wakeup handler
+ * @wake_irq: The IRQ through which the device may receive wakeups
+ * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup
+ * @context: Context to pass to the handler when calling it
+ *
+ * Drivers which may share an IRQ with the SCI can use this to register
+ * a handler which returns true when the device they are managing wants
+ * to trigger a wakeup.
+ */
+int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context),
+ void *context)
+{
+ struct acpi_wakeup_handler *handler;
+
+ /*
+ * If the device is not sharing its IRQ with the SCI, there is no
+ * need to register the handler.
+ */
+ if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq)
+ return 0;
+
+ handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler)
+ return -ENOMEM;
+
+ handler->wakeup = wakeup;
+ handler->context = context;
+
+ mutex_lock(&acpi_wakeup_handler_mutex);
+ list_add(&handler->list_node, &acpi_wakeup_handler_head);
+ mutex_unlock(&acpi_wakeup_handler_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler);
+
+/**
+ * acpi_unregister_wakeup_handler - Unregister wakeup handler
+ * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler()
+ * @context: Context passed to acpi_register_wakeup_handler()
+ */
+void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context),
+ void *context)
+{
+ struct acpi_wakeup_handler *handler;
+
+ mutex_lock(&acpi_wakeup_handler_mutex);
+ list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
+ if (handler->wakeup == wakeup && handler->context == context) {
+ list_del(&handler->list_node);
+ kfree(handler);
+ break;
+ }
+ }
+ mutex_unlock(&acpi_wakeup_handler_mutex);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler);
+
+bool acpi_check_wakeup_handlers(void)
+{
+ struct acpi_wakeup_handler *handler;
+
+ /* No need to lock, nothing else is running when we're called. */
+ list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
+ if (handler->wakeup(handler->context))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index f303106b3362..9ecad74183a3 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mount.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
#include <linux/radix-tree.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -48,26 +48,30 @@ static dev_t binderfs_dev;
static DEFINE_MUTEX(binderfs_minors_mutex);
static DEFINE_IDA(binderfs_minors);
-enum {
+enum binderfs_param {
Opt_max,
Opt_stats_mode,
- Opt_err
};
enum binderfs_stats_mode {
- STATS_NONE,
- STATS_GLOBAL,
+ binderfs_stats_mode_unset,
+ binderfs_stats_mode_global,
};
-static const match_table_t tokens = {
- { Opt_max, "max=%d" },
- { Opt_stats_mode, "stats=%s" },
- { Opt_err, NULL }
+static const struct constant_table binderfs_param_stats[] = {
+ { "global", binderfs_stats_mode_global },
+ {}
};
-static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
+const struct fs_parameter_spec binderfs_fs_parameters[] = {
+ fsparam_u32("max", Opt_max),
+ fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
+ {}
+};
+
+static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
{
- return inode->i_sb->s_fs_info;
+ return sb->s_fs_info;
}
bool is_binderfs_device(const struct inode *inode)
@@ -246,7 +250,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
static void binderfs_evict_inode(struct inode *inode)
{
struct binder_device *device = inode->i_private;
- struct binderfs_info *info = BINDERFS_I(inode);
+ struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
clear_inode(inode);
@@ -264,97 +268,84 @@ static void binderfs_evict_inode(struct inode *inode)
}
}
-/**
- * binderfs_parse_mount_opts - parse binderfs mount options
- * @data: options to set (can be NULL in which case defaults are used)
- */
-static int binderfs_parse_mount_opts(char *data,
- struct binderfs_mount_opts *opts)
+static int binderfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
{
- char *p, *stats;
- opts->max = BINDERFS_MAX_MINOR;
- opts->stats_mode = STATS_NONE;
-
- while ((p = strsep(&data, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- int token;
- int max_devices;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_max:
- if (match_int(&args[0], &max_devices) ||
- (max_devices < 0 ||
- (max_devices > BINDERFS_MAX_MINOR)))
- return -EINVAL;
-
- opts->max = max_devices;
- break;
- case Opt_stats_mode:
- if (!capable(CAP_SYS_ADMIN))
- return -EINVAL;
+ int opt;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct fs_parse_result result;
- stats = match_strdup(&args[0]);
- if (!stats)
- return -ENOMEM;
+ opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
- if (strcmp(stats, "global") != 0) {
- kfree(stats);
- return -EINVAL;
- }
+ switch (opt) {
+ case Opt_max:
+ if (result.uint_32 > BINDERFS_MAX_MINOR)
+ return invalfc(fc, "Bad value for '%s'", param->key);
- opts->stats_mode = STATS_GLOBAL;
- kfree(stats);
- break;
- default:
- pr_err("Invalid mount options\n");
- return -EINVAL;
- }
+ ctx->max = result.uint_32;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ctx->stats_mode = result.uint_32;
+ break;
+ default:
+ return invalfc(fc, "Unsupported parameter '%s'", param->key);
}
return 0;
}
-static int binderfs_remount(struct super_block *sb, int *flags, char *data)
+static int binderfs_fs_context_reconfigure(struct fs_context *fc)
{
- int prev_stats_mode, ret;
- struct binderfs_info *info = sb->s_fs_info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
- prev_stats_mode = info->mount_opts.stats_mode;
- ret = binderfs_parse_mount_opts(data, &info->mount_opts);
- if (ret)
- return ret;
-
- if (prev_stats_mode != info->mount_opts.stats_mode) {
- pr_err("Binderfs stats mode cannot be changed during a remount\n");
- info->mount_opts.stats_mode = prev_stats_mode;
- return -EINVAL;
- }
+ if (info->mount_opts.stats_mode != ctx->stats_mode)
+ return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
+ info->mount_opts.stats_mode = ctx->stats_mode;
+ info->mount_opts.max = ctx->max;
return 0;
}
-static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
+static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
{
- struct binderfs_info *info;
+ struct binderfs_info *info = BINDERFS_SB(root->d_sb);
- info = root->d_sb->s_fs_info;
if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
seq_printf(seq, ",max=%d", info->mount_opts.max);
- if (info->mount_opts.stats_mode == STATS_GLOBAL)
+
+ switch (info->mount_opts.stats_mode) {
+ case binderfs_stats_mode_unset:
+ break;
+ case binderfs_stats_mode_global:
seq_printf(seq, ",stats=global");
+ break;
+ }
return 0;
}
+static void binderfs_put_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+ sb->s_fs_info = NULL;
+}
+
static const struct super_operations binderfs_super_ops = {
.evict_inode = binderfs_evict_inode,
- .remount_fs = binderfs_remount,
- .show_options = binderfs_show_mount_opts,
+ .show_options = binderfs_show_options,
.statfs = simple_statfs,
+ .put_super = binderfs_put_super,
};
static inline bool is_binderfs_control_device(const struct dentry *dentry)
@@ -653,10 +644,11 @@ out:
return ret;
}
-static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
+static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
int ret;
struct binderfs_info *info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
struct inode *inode = NULL;
struct binderfs_device device_info = { 0 };
const char *name;
@@ -689,16 +681,14 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
- ret = binderfs_parse_mount_opts(data, &info->mount_opts);
- if (ret)
- return ret;
-
info->root_gid = make_kgid(sb->s_user_ns, 0);
if (!gid_valid(info->root_gid))
info->root_gid = GLOBAL_ROOT_GID;
info->root_uid = make_kuid(sb->s_user_ns, 0);
if (!uid_valid(info->root_uid))
info->root_uid = GLOBAL_ROOT_UID;
+ info->mount_opts.max = ctx->max;
+ info->mount_opts.stats_mode = ctx->stats_mode;
inode = new_inode(sb);
if (!inode)
@@ -730,36 +720,54 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
name++;
}
- if (info->mount_opts.stats_mode == STATS_GLOBAL)
+ if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
return init_binder_logs(sb);
return 0;
}
-static struct dentry *binderfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int binderfs_fs_context_get_tree(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, binderfs_fill_super);
+ return get_tree_nodev(fc, binderfs_fill_super);
}
-static void binderfs_kill_super(struct super_block *sb)
+static void binderfs_fs_context_free(struct fs_context *fc)
{
- struct binderfs_info *info = sb->s_fs_info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
- kill_litter_super(sb);
+ kfree(ctx);
+}
- if (info && info->ipc_ns)
- put_ipc_ns(info->ipc_ns);
+static const struct fs_context_operations binderfs_fs_context_ops = {
+ .free = binderfs_fs_context_free,
+ .get_tree = binderfs_fs_context_get_tree,
+ .parse_param = binderfs_fs_context_parse_param,
+ .reconfigure = binderfs_fs_context_reconfigure,
+};
- kfree(info);
+static int binderfs_init_fs_context(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+
+ ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max = BINDERFS_MAX_MINOR;
+ ctx->stats_mode = binderfs_stats_mode_unset;
+
+ fc->fs_private = ctx;
+ fc->ops = &binderfs_fs_context_ops;
+
+ return 0;
}
static struct file_system_type binder_fs_type = {
- .name = "binder",
- .mount = binderfs_mount,
- .kill_sb = binderfs_kill_super,
- .fs_flags = FS_USERNS_MOUNT,
+ .name = "binder",
+ .init_fs_context = binderfs_init_fs_context,
+ .parameters = binderfs_fs_parameters,
+ .kill_sb = kill_litter_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init init_binderfs(void)
diff --git a/drivers/atm/.gitignore b/drivers/atm/.gitignore
index fc0ae5eb05d8..ddd374e91965 100644
--- a/drivers/atm/.gitignore
+++ b/drivers/atm/.gitignore
@@ -1,4 +1,4 @@
-# Ignore generated files
+# SPDX-License-Identifier: GPL-2.0-only
fore200e_mkfirm
fore200e_pca_fw.c
pca200e.bin
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index c0da3820454b..d58278ae9e4a 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -22,8 +22,6 @@
#include "charlcd.h"
-#define LCD_MINOR 156
-
#define DEFAULT_LCD_BWIDTH 40
#define DEFAULT_LCD_HWIDTH 64
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 85965953683e..99980aa3644b 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -57,8 +57,6 @@
#include "charlcd.h"
-#define KEYPAD_MINOR 185
-
#define LCD_MAXBYTES 256 /* max burst write */
#define KEYPAD_BUFFER 64
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 4086718f6876..dbec3a05590a 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -27,6 +27,24 @@
#define MEMORY_CLASS_NAME "memory"
+static const char *const online_type_to_str[] = {
+ [MMOP_OFFLINE] = "offline",
+ [MMOP_ONLINE] = "online",
+ [MMOP_ONLINE_KERNEL] = "online_kernel",
+ [MMOP_ONLINE_MOVABLE] = "online_movable",
+};
+
+int memhp_online_type_from_str(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
+ if (sysfs_streq(str, online_type_to_str[i]))
+ return i;
+ }
+ return -EINVAL;
+}
+
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
static int sections_per_block;
@@ -145,45 +163,6 @@ int memory_notify(unsigned long val, void *v)
}
/*
- * The probe routines leave the pages uninitialized, just as the bootmem code
- * does. Make sure we do not access them, but instead use only information from
- * within sections.
- */
-static bool pages_correctly_probed(unsigned long start_pfn)
-{
- unsigned long section_nr = pfn_to_section_nr(start_pfn);
- unsigned long section_nr_end = section_nr + sections_per_block;
- unsigned long pfn = start_pfn;
-
- /*
- * memmap between sections is not contiguous except with
- * SPARSEMEM_VMEMMAP. We lookup the page once per section
- * and assume memmap is contiguous within each section
- */
- for (; section_nr < section_nr_end; section_nr++) {
- if (WARN_ON_ONCE(!pfn_valid(pfn)))
- return false;
-
- if (!present_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) not present\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- } else if (!valid_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- } else if (online_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) is already online\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- }
- pfn += PAGES_PER_SECTION;
- }
-
- return true;
-}
-
-/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
*/
@@ -199,9 +178,6 @@ memory_block_action(unsigned long start_section_nr, unsigned long action,
switch (action) {
case MEM_ONLINE:
- if (!pages_correctly_probed(start_pfn))
- return -EBUSY;
-
ret = online_pages(start_pfn, nr_pages, online_type, nid);
break;
case MEM_OFFLINE:
@@ -245,17 +221,14 @@ static int memory_subsys_online(struct device *dev)
return 0;
/*
- * If we are called from state_store(), online_type will be
- * set >= 0 Otherwise we were called from the device online
- * attribute and need to set the online_type.
+ * When called via device_online() without configuring the online_type,
+ * we want to default to MMOP_ONLINE.
*/
- if (mem->online_type < 0)
- mem->online_type = MMOP_ONLINE_KEEP;
+ if (mem->online_type == MMOP_OFFLINE)
+ mem->online_type = MMOP_ONLINE;
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
-
- /* clear online_type */
- mem->online_type = -1;
+ mem->online_type = MMOP_OFFLINE;
return ret;
}
@@ -267,40 +240,27 @@ static int memory_subsys_offline(struct device *dev)
if (mem->state == MEM_OFFLINE)
return 0;
- /* Can't offline block with non-present sections */
- if (mem->section_count != sections_per_block)
- return -EINVAL;
-
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ const int online_type = memhp_online_type_from_str(buf);
struct memory_block *mem = to_memory_block(dev);
- int ret, online_type;
+ int ret;
+
+ if (online_type < 0)
+ return -EINVAL;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
- if (sysfs_streq(buf, "online_kernel"))
- online_type = MMOP_ONLINE_KERNEL;
- else if (sysfs_streq(buf, "online_movable"))
- online_type = MMOP_ONLINE_MOVABLE;
- else if (sysfs_streq(buf, "online"))
- online_type = MMOP_ONLINE_KEEP;
- else if (sysfs_streq(buf, "offline"))
- online_type = MMOP_OFFLINE;
- else {
- ret = -EINVAL;
- goto err;
- }
-
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
- case MMOP_ONLINE_KEEP:
+ case MMOP_ONLINE:
/* mem->online_type is protected by device_hotplug_lock */
mem->online_type = online_type;
ret = device_online(&mem->dev);
@@ -312,7 +272,6 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
ret = -EINVAL; /* should never happen */
}
-err:
unlock_device_hotplug();
if (ret < 0)
@@ -380,7 +339,8 @@ static ssize_t valid_zones_show(struct device *dev,
}
nid = mem->nid;
- default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
+ default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
+ nr_pages);
strcat(buf, default_zone->name);
print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
@@ -418,23 +378,20 @@ static DEVICE_ATTR_RO(block_size_bytes);
static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (memhp_auto_online)
- return sprintf(buf, "online\n");
- else
- return sprintf(buf, "offline\n");
+ return sprintf(buf, "%s\n",
+ online_type_to_str[memhp_default_online_type]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- if (sysfs_streq(buf, "online"))
- memhp_auto_online = true;
- else if (sysfs_streq(buf, "offline"))
- memhp_auto_online = false;
- else
+ const int online_type = memhp_online_type_from_str(buf);
+
+ if (online_type < 0)
return -EINVAL;
+ memhp_default_online_type = online_type;
return count;
}
@@ -627,7 +584,7 @@ static int init_memory_block(struct memory_block **memory,
static int add_memory_block(unsigned long base_section_nr)
{
- int ret, section_count = 0;
+ int section_count = 0;
struct memory_block *mem;
unsigned long nr;
@@ -638,12 +595,8 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
- ret = init_memory_block(&mem, base_memory_block_id(base_section_nr),
- MEM_ONLINE);
- if (ret)
- return ret;
- mem->section_count = section_count;
- return 0;
+ return init_memory_block(&mem, base_memory_block_id(base_section_nr),
+ MEM_ONLINE);
}
static void unregister_memory(struct memory_block *memory)
@@ -679,7 +632,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
if (ret)
break;
- mem->section_count = sections_per_block;
}
if (ret) {
end_block_id = block_id;
@@ -688,7 +640,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- mem->section_count = 0;
unregister_memory(mem);
}
}
@@ -717,7 +668,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- mem->section_count = 0;
unregister_memory_block_under_nodes(mem);
unregister_memory(mem);
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 98a31bafc8a2..10d7e818e118 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -772,7 +772,7 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
* memory block could have several absent sections from start.
* skip pfn range from absent section
*/
- if (!pfn_present(pfn)) {
+ if (!pfn_in_present_section(pfn)) {
pfn = round_down(pfn + PAGES_PER_SECTION,
PAGES_PER_SECTION) - 1;
continue;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 6d1dee7051eb..fdd508a78ffd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1922,10 +1922,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
- WARN_ON(!pm_runtime_enabled(dev) &&
- dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED));
-
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1973,8 +1969,7 @@ unlock:
*/
spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
- ((pm_runtime_suspended(dev) && ret > 0) ||
- dev->power.no_pm_callbacks) &&
+ (ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
spin_unlock_irq(&dev->power.lock);
return 0;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6095b6df8a81..6d4e4497b59b 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -201,5 +201,6 @@ config DA8XX_MSTPRI
peripherals.
source "drivers/bus/fsl-mc/Kconfig"
+source "drivers/bus/mhi/Kconfig"
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 1320bcf9fa9d..05f32cd694a4 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -34,3 +34,6 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
+
+# MHI
+obj-$(CONFIG_MHI_BUS) += mhi/
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index c78d10ea641f..40526da5c6a6 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -26,6 +26,8 @@
*/
#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+static struct fsl_mc_version mc_version;
+
/**
* struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
* @root_mc_bus_dev: fsl-mc device representing the root DPRC
@@ -55,20 +57,6 @@ struct fsl_mc_addr_translation_range {
};
/**
- * struct mc_version
- * @major: Major version number: incremented on API compatibility changes
- * @minor: Minor version number: incremented on API additions (that are
- * backward compatible); reset when major version is incremented
- * @revision: Internal revision number: incremented on implementation changes
- * and/or bug fixes that have no impact on API
- */
-struct mc_version {
- u32 major;
- u32 minor;
- u32 revision;
-};
-
-/**
* fsl_mc_bus_match - device to driver matching callback
* @dev: the fsl-mc device to match against
* @drv: the device driver to search for matching fsl-mc object type
@@ -338,7 +326,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
*/
static int mc_get_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
- struct mc_version *mc_ver_info)
+ struct fsl_mc_version *mc_ver_info)
{
struct fsl_mc_command cmd = { 0 };
struct dpmng_rsp_get_version *rsp_params;
@@ -364,6 +352,20 @@ static int mc_get_version(struct fsl_mc_io *mc_io,
}
/**
+ * fsl_mc_get_version - function to retrieve the MC f/w version information
+ *
+ * Return: mc version when called after fsl-mc-bus probe; NULL otherwise.
+ */
+struct fsl_mc_version *fsl_mc_get_version(void)
+{
+ if (mc_version.major)
+ return &mc_version;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_version);
+
+/**
* fsl_mc_get_root_dprc - function to traverse to the root dprc
*/
static void fsl_mc_get_root_dprc(struct device *dev,
@@ -862,7 +864,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
int container_id;
phys_addr_t mc_portal_phys_addr;
u32 mc_portal_size;
- struct mc_version mc_version;
struct resource res;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 8101df901830..378f5d62a991 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
}
/*
+ * Released firmware describes the IO port max address as 0x3fff, which is
+ * the max host bus address. Fixup to a proper range. This will probably
+ * never be fixed in firmware.
+ */
+static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
+ struct resource *r)
+{
+ if (r->end != 0x3fff)
+ return;
+
+ if (r->start == 0xe4)
+ r->end = 0xe4 + 0x04 - 1;
+ else if (r->start == 0x2f8)
+ r->end = 0x2f8 + 0x08 - 1;
+ else
+ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
+ r);
+}
+
+/*
* hisi_lpc_acpi_set_io_res - set the resources for a child
* @child: the device node to be updated the I/O resource
* @hostdev: the device node associated with host controller
@@ -418,8 +438,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
return -ENOMEM;
}
count = 0;
- list_for_each_entry(rentry, &resource_list, node)
- resources[count++] = *rentry->res;
+ list_for_each_entry(rentry, &resource_list, node) {
+ resources[count] = *rentry->res;
+ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
+ count++;
+ }
acpi_dev_free_resource_list(&resource_list);
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
new file mode 100644
index 000000000000..a8bd9bd7db7c
--- /dev/null
+++ b/drivers/bus/mhi/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+#
+
+config MHI_BUS
+ tristate "Modem Host Interface (MHI) bus"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
new file mode 100644
index 000000000000..19e6443b72df
--- /dev/null
+++ b/drivers/bus/mhi/Makefile
@@ -0,0 +1,2 @@
+# core layer
+obj-y += core/
diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
new file mode 100644
index 000000000000..66e2700c9032
--- /dev/null
+++ b/drivers/bus/mhi/core/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MHI_BUS) := mhi.o
+
+mhi-y := init.o main.o pm.o boot.o
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
new file mode 100644
index 000000000000..ebad5eb48e5a
--- /dev/null
+++ b/drivers/bus/mhi/core/boot.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/* Setup RDDM vector table for RDDM transfer and program RXVEC */
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
+{
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 sequence_id;
+ unsigned int i;
+
+ for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = mhi_buf->len;
+ }
+
+ dev_dbg(dev, "BHIe programming for RDDM\n");
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+ sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
+
+ if (unlikely(!sequence_id))
+ sequence_id = 1;
+
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
+ sequence_id);
+
+ dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
+ &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+}
+
+/* Collect RDDM buffer during kernel panic */
+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 rx_status;
+ enum mhi_ee_type ee;
+ const u32 delayus = 2000;
+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ const u32 rddm_timeout_us = 200000;
+ int rddm_retry = rddm_timeout_us / delayus;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /*
+ * This should only be executing during a kernel panic, we expect all
+ * other cores to shutdown while we're collecting RDDM buffer. After
+ * returning from this function, we expect the device to reset.
+ *
+ * Normaly, we read/write pm_state only after grabbing the
+ * pm_lock, since we're in a panic, skipping it. Also there is no
+ * gurantee that this state change would take effect since
+ * we're setting it w/o grabbing pm_lock
+ */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ /* update should take the effect immediately */
+ smp_wmb();
+
+ /*
+ * Make sure device is not already in RDDM. In case the device asserts
+ * and a kernel panic follows, device will already be in RDDM.
+ * Do not trigger SYS ERR again and proceed with waiting for
+ * image download completion.
+ */
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee != MHI_EE_RDDM) {
+ dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ dev_dbg(dev, "Waiting for device to enter RDDM\n");
+ while (rddm_retry--) {
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_RDDM)
+ break;
+
+ udelay(delayus);
+ }
+
+ if (rddm_retry <= 0) {
+ /* Hardware reset so force device to enter RDDM */
+ dev_dbg(dev,
+ "Did not enter RDDM, do a host req reset\n");
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+ MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
+ TO_MHI_EXEC_STR(ee));
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status);
+ if (ret)
+ return -EIO;
+
+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
+ return 0;
+
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+
+ dev_err(dev, "Did not complete RDDM transfer\n");
+ dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
+ dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
+
+ return -EIO;
+}
+
+/* Download RDDM image from device */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ u32 rx_status;
+
+ if (in_panic)
+ return __mhi_download_rddm_in_panic(mhi_cntrl);
+
+ /* Wait for the image download to complete */
+ wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status) || rx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
+
+static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
+ const struct mhi_buf *mhi_buf)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ u32 tx_status, sequence_id;
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ return -EIO;
+ }
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+
+ sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
+ sequence_id);
+ read_unlock_bh(pm_lock);
+
+ /* Wait for the image download to complete */
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ BHIE_TXVECSTATUS_STATUS_SHFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+
+static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 tx_status, val, session_id;
+ int i, ret;
+ void __iomem *base = mhi_cntrl->bhi;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ dev_dbg(dev, "Starting SBL download via BHI\n");
+ mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+ upper_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+ lower_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
+ read_unlock_bh(pm_lock);
+
+ /* Wait for the image download to complete */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
+ BHI_STATUS_MASK, BHI_STATUS_SHIFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ goto invalid_pm_state;
+
+ if (tx_status == BHI_STATUS_ERROR) {
+ dev_err(dev, "Image transfer failed\n");
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base,
+ error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n",
+ error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ return (!ret) ? -ETIMEDOUT : 0;
+
+invalid_pm_state:
+
+ return -EIO;
+}
+
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ int i;
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ for (i = 0; i < image_info->entries; i++, mhi_buf++)
+ mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+ mhi_buf->dma_addr);
+
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ size_t seg_size = mhi_cntrl->seg_len;
+ int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+ int i;
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entries */
+ img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
+ GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+ for (i = 0; i < segments; i++, mhi_buf++) {
+ size_t vec_size = seg_size;
+
+ /* Vector table is the last entry */
+ if (i == segments - 1)
+ vec_size = sizeof(struct bhi_vec_entry) * i;
+
+ mhi_buf->len = vec_size;
+ mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
+ &mhi_buf->dma_addr,
+ GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+ }
+
+ img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
+ img_info->entries = segments;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
+ mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+ mhi_buf->dma_addr);
+
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
+static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+ const struct firmware *firmware,
+ struct image_info *img_info)
+{
+ size_t remainder = firmware->size;
+ size_t to_cpy;
+ const u8 *buf = firmware->data;
+ int i = 0;
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+
+ while (remainder) {
+ to_cpy = min(remainder, mhi_buf->len);
+ memcpy(mhi_buf->buf, buf, to_cpy);
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = to_cpy;
+
+ buf += to_cpy;
+ remainder -= to_cpy;
+ i++;
+ bhi_vec++;
+ mhi_buf++;
+ }
+}
+
+void mhi_fw_load_worker(struct work_struct *work)
+{
+ struct mhi_controller *mhi_cntrl;
+ const struct firmware *firmware = NULL;
+ struct image_info *image_info;
+ struct device *dev;
+ const char *fw_name;
+ void *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+ int ret;
+
+ mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
+ dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Waiting for device to enter PBL from: %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_PBL(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device MHI is not in valid state\n");
+ return;
+ }
+
+ /* If device is in pass through, do reset to ready state transition */
+ if (mhi_cntrl->ee == MHI_EE_PTHRU)
+ goto fw_load_ee_pthru;
+
+ fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
+ mhi_cntrl->edl_image : mhi_cntrl->fw_image;
+
+ if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
+ !mhi_cntrl->seg_len))) {
+ dev_err(dev,
+ "No firmware image defined or !sbl_size || !seg_len\n");
+ return;
+ }
+
+ ret = request_firmware(&firmware, fw_name, dev);
+ if (ret) {
+ dev_err(dev, "Error loading firmware: %d\n", ret);
+ return;
+ }
+
+ size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
+
+ /* SBL size provided is maximum size, not necessarily the image size */
+ if (size > firmware->size)
+ size = firmware->size;
+
+ buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
+ if (!buf) {
+ release_firmware(firmware);
+ return;
+ }
+
+ /* Download SBL image */
+ memcpy(buf, firmware->data, size);
+ ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
+ mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
+
+ if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
+ release_firmware(firmware);
+
+ /* Error or in EDL mode, we're done */
+ if (ret || mhi_cntrl->ee == MHI_EE_EDL)
+ return;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /*
+ * If we're doing fbc, populate vector tables while
+ * device transitioning into MHI READY state
+ */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
+ firmware->size);
+ if (ret)
+ goto error_alloc_fw_table;
+
+ /* Load the firmware into BHIE vec table */
+ mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
+ }
+
+fw_load_ee_pthru:
+ /* Transitioning into MHI RESET->READY state */
+ ret = mhi_ready_state_transition(mhi_cntrl);
+
+ if (!mhi_cntrl->fbc_download)
+ return;
+
+ if (ret)
+ goto error_read;
+
+ /* Wait for the SBL event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_SBL ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "MHI did not enter SBL\n");
+ goto error_read;
+ }
+
+ /* Start full firmware image download */
+ image_info = mhi_cntrl->fbc_image;
+ ret = mhi_fw_load_amss(mhi_cntrl,
+ /* Vector table is the last entry */
+ &image_info->mhi_buf[image_info->entries - 1]);
+
+ release_firmware(firmware);
+
+ return;
+
+error_read:
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+
+error_alloc_fw_table:
+ release_firmware(firmware);
+}
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
new file mode 100644
index 000000000000..b38359c480ea
--- /dev/null
+++ b/drivers/bus/mhi/core/init.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+const char * const mhi_ee_str[MHI_EE_MAX] = {
+ [MHI_EE_PBL] = "PBL",
+ [MHI_EE_SBL] = "SBL",
+ [MHI_EE_AMSS] = "AMSS",
+ [MHI_EE_RDDM] = "RDDM",
+ [MHI_EE_WFW] = "WFW",
+ [MHI_EE_PTHRU] = "PASS THRU",
+ [MHI_EE_EDL] = "EDL",
+ [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
+ [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+};
+
+const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
+ [DEV_ST_TRANSITION_PBL] = "PBL",
+ [DEV_ST_TRANSITION_READY] = "READY",
+ [DEV_ST_TRANSITION_SBL] = "SBL",
+ [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
+};
+
+const char * const mhi_state_str[MHI_STATE_MAX] = {
+ [MHI_STATE_RESET] = "RESET",
+ [MHI_STATE_READY] = "READY",
+ [MHI_STATE_M0] = "M0",
+ [MHI_STATE_M1] = "M1",
+ [MHI_STATE_M2] = "M2",
+ [MHI_STATE_M3] = "M3",
+ [MHI_STATE_M3_FAST] = "M3_FAST",
+ [MHI_STATE_BHI] = "BHI",
+ [MHI_STATE_SYS_ERR] = "SYS_ERR",
+};
+
+static const char * const mhi_pm_state_str[] = {
+ [MHI_PM_STATE_DISABLE] = "DISABLE",
+ [MHI_PM_STATE_POR] = "POR",
+ [MHI_PM_STATE_M0] = "M0",
+ [MHI_PM_STATE_M2] = "M2",
+ [MHI_PM_STATE_M3_ENTER] = "M?->M3",
+ [MHI_PM_STATE_M3] = "M3",
+ [MHI_PM_STATE_M3_EXIT] = "M3->M0",
+ [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
+ [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
+ [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
+ [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+ [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+};
+
+const char *to_mhi_pm_state_str(enum mhi_pm_state state)
+{
+ int index = find_last_bit((unsigned long *)&state, 32);
+
+ if (index >= ARRAY_SIZE(mhi_pm_state_str))
+ return "Invalid State";
+
+ return mhi_pm_state_str[index];
+}
+
+/* MHI protocol requires the transfer ring to be aligned with ring length */
+static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring,
+ u64 len)
+{
+ ring->alloc_size = len + (len - 1);
+ ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
+ &ring->dma_handle, GFP_KERNEL);
+ if (!ring->pre_aligned)
+ return -ENOMEM;
+
+ ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
+
+ return 0;
+}
+
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+}
+
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ /* Setup BHI_INTVEC IRQ */
+ ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
+ mhi_intvec_threaded_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "bhi", mhi_cntrl);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
+ mhi_irq_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "mhi", mhi_event);
+ if (ret) {
+ dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ mhi_cntrl->irq[mhi_event->irq], i);
+ goto error_request;
+ }
+ }
+
+ return 0;
+
+error_request:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+
+ return ret;
+}
+
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event *mhi_event;
+ struct mhi_ring *ring;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+ ring = &mhi_cmd->ring;
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ mhi_free_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring = &mhi_event->ring;
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+ kfree(mhi_ctxt);
+ mhi_cntrl->mhi_ctxt = NULL;
+}
+
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_ctxt *mhi_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ u32 tmp;
+ int ret = -ENOMEM, i;
+
+ atomic_set(&mhi_cntrl->dev_wake, 0);
+ atomic_set(&mhi_cntrl->pending_pkts, 0);
+
+ mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+ if (!mhi_ctxt)
+ return -ENOMEM;
+
+ /* Setup channel ctxt */
+ mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan,
+ &mhi_ctxt->chan_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->chan_ctxt)
+ goto error_alloc_chan_ctxt;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ /* Skip if it is an offload channel */
+ if (mhi_chan->offload_ch)
+ continue;
+
+ tmp = chan_ctxt->chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
+ tmp &= ~CHAN_CTX_BRSTMODE_MASK;
+ tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
+ tmp &= ~CHAN_CTX_POLLCFG_MASK;
+ tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
+ chan_ctxt->chcfg = tmp;
+
+ chan_ctxt->chtype = mhi_chan->type;
+ chan_ctxt->erindex = mhi_chan->er_index;
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
+ }
+
+ /* Setup event context */
+ mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings,
+ &mhi_ctxt->er_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->er_ctxt)
+ goto error_alloc_er_ctxt;
+
+ er_ctxt = mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if it is an offload event */
+ if (mhi_event->offload_ev)
+ continue;
+
+ tmp = er_ctxt->intmod;
+ tmp &= ~EV_CTX_INTMODC_MASK;
+ tmp &= ~EV_CTX_INTMODT_MASK;
+ tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
+ er_ctxt->intmod = tmp;
+
+ er_ctxt->ertype = MHI_ER_TYPE_VALID;
+ er_ctxt->msivec = mhi_event->irq;
+ mhi_event->db_cfg.db_mode = true;
+
+ ring->el_size = sizeof(struct mhi_tre);
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_er;
+
+ /*
+ * If the read pointer equals to the write pointer, then the
+ * ring is empty
+ */
+ ring->rp = ring->wp = ring->base;
+ er_ctxt->rbase = ring->iommu_base;
+ er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
+ er_ctxt->rlen = ring->len;
+ ring->ctxt_wp = &er_ctxt->wp;
+ }
+
+ /* Setup cmd context */
+ mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) *
+ NR_OF_CMD_RINGS,
+ &mhi_ctxt->cmd_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->cmd_ctxt)
+ goto error_alloc_er;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->el_size = sizeof(struct mhi_tre);
+ ring->elements = CMD_EL_PER_RING;
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_cmd;
+
+ ring->rp = ring->wp = ring->base;
+ cmd_ctxt->rbase = ring->iommu_base;
+ cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
+ cmd_ctxt->rlen = ring->len;
+ ring->ctxt_wp = &cmd_ctxt->wp;
+ }
+
+ mhi_cntrl->mhi_ctxt = mhi_ctxt;
+
+ return 0;
+
+error_alloc_cmd:
+ for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ mhi_free_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+ i = mhi_cntrl->total_ev_rings;
+ mhi_event = mhi_cntrl->mhi_event + i;
+
+error_alloc_er:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+error_alloc_er_ctxt:
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+error_alloc_chan_ctxt:
+ kfree(mhi_ctxt);
+
+ return ret;
+}
+
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+{
+ u32 val;
+ int i, ret;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ void __iomem *base = mhi_cntrl->regs;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 val;
+ } reg_info[] = {
+ {
+ CCABAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ CCABAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ ECABAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ ECABAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ CRCBAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ CRCBAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
+ mhi_cntrl->total_ev_rings,
+ },
+ {
+ MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
+ mhi_cntrl->hw_ev_rings,
+ },
+ {
+ MHICTRLBASE_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLBASE_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLLIMIT_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHICTRLLIMIT_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ { 0, 0, 0 }
+ };
+
+ dev_dbg(dev, "Initializing MHI registers\n");
+
+ /* Read channel db offset */
+ ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
+ CHDBOFF_CHDBOFF_SHIFT, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read CHDBOFF register\n");
+ return -EIO;
+ }
+
+ /* Setup wake db */
+ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
+ mhi_cntrl->wake_set = false;
+
+ /* Setup channel db address for each channel in tre_ring */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
+ mhi_chan->tre_ring.db_addr = base + val;
+
+ /* Read event ring db offset */
+ ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
+ ERDBOFF_ERDBOFF_SHIFT, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read ERDBOFF register\n");
+ return -EIO;
+ }
+
+ /* Setup event db address for each ev_ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->ring.db_addr = base + val;
+ }
+
+ /* Setup DB register for primary CMD rings */
+ mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
+
+ /* Write to MMIO registers */
+ for (i = 0; reg_info[i].offset; i++)
+ mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
+ reg_info[i].mask, reg_info[i].shift,
+ reg_info[i].val);
+
+ return 0;
+}
+
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ vfree(buf_ring->base);
+
+ buf_ring->base = tre_ring->base = NULL;
+ chan_ctxt->rbase = 0;
+}
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ tre_ring->el_size = sizeof(struct mhi_tre);
+ tre_ring->len = tre_ring->el_size * tre_ring->elements;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
+ if (ret)
+ return -ENOMEM;
+
+ buf_ring->el_size = sizeof(struct mhi_buf_info);
+ buf_ring->len = buf_ring->el_size * buf_ring->elements;
+ buf_ring->base = vzalloc(buf_ring->len);
+
+ if (!buf_ring->base) {
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ return -ENOMEM;
+ }
+
+ tmp = chan_ctxt->chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
+ chan_ctxt->chcfg = tmp;
+
+ chan_ctxt->rbase = tre_ring->iommu_base;
+ chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
+ chan_ctxt->rlen = tre_ring->len;
+ tre_ring->ctxt_wp = &chan_ctxt->wp;
+
+ tre_ring->rp = tre_ring->wp = tre_ring->base;
+ buf_ring->rp = buf_ring->wp = buf_ring->base;
+ mhi_chan->db_cfg.db_mode = 1;
+
+ /* Update to all cores */
+ smp_wmb();
+
+ return 0;
+}
+
+static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_event_config *event_cfg;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, num;
+
+ num = config->num_events;
+ mhi_cntrl->total_ev_rings = num;
+ mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Populate event ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < num; i++) {
+ event_cfg = &config->event_cfg[i];
+
+ mhi_event->er_index = i;
+ mhi_event->ring.elements = event_cfg->num_elements;
+ mhi_event->intmod = event_cfg->irq_moderation_ms;
+ mhi_event->irq = event_cfg->irq;
+
+ if (event_cfg->channel != U32_MAX) {
+ /* This event ring has a dedicated channel */
+ mhi_event->chan = event_cfg->channel;
+ if (mhi_event->chan >= mhi_cntrl->max_chan) {
+ dev_err(dev,
+ "Event Ring channel not available\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->mhi_chan =
+ &mhi_cntrl->mhi_chan[mhi_event->chan];
+ }
+
+ /* Priority is fixed to 1 for now */
+ mhi_event->priority = 1;
+
+ mhi_event->db_cfg.brstmode = event_cfg->mode;
+ if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
+ goto error_ev_cfg;
+
+ if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_event->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_event->data_type = event_cfg->data_type;
+
+ switch (mhi_event->data_type) {
+ case MHI_ER_DATA:
+ mhi_event->process_event = mhi_process_data_event_ring;
+ break;
+ case MHI_ER_CTRL:
+ mhi_event->process_event = mhi_process_ctrl_ev_ring;
+ break;
+ default:
+ dev_err(dev, "Event Ring type not supported\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->hw_ring = event_cfg->hardware_event;
+ if (mhi_event->hw_ring)
+ mhi_cntrl->hw_ev_rings++;
+ else
+ mhi_cntrl->sw_ev_rings++;
+
+ mhi_event->cl_manage = event_cfg->client_managed;
+ mhi_event->offload_ev = event_cfg->offload_channel;
+ mhi_event++;
+ }
+
+ /* We need IRQ for each event ring + additional one for BHI */
+ mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1;
+
+ return 0;
+
+error_ev_cfg:
+
+ kfree(mhi_cntrl->mhi_event);
+ return -EINVAL;
+}
+
+static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config)
+{
+ struct mhi_channel_config *ch_cfg;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i;
+ u32 chan;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * The allocation of MHI channels can exceed 32KB in some scenarios,
+ * so to avoid any memory possible allocation failures, vzalloc is
+ * used here
+ */
+ mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
+ sizeof(*mhi_cntrl->mhi_chan));
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
+
+ /* Populate channel configurations */
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel %d not available\n", chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+
+ mhi_chan->tre_ring.elements = ch_cfg->num_elements;
+ if (!mhi_chan->tre_ring.elements)
+ goto error_chan_cfg;
+
+ /*
+ * For some channels, local ring length should be bigger than
+ * the transfer ring length due to internal logical channels
+ * in device. So host can queue much more buffers than transfer
+ * ring length. Example, RSC channels should have a larger local
+ * channel length than transfer ring length.
+ */
+ mhi_chan->buf_ring.elements = ch_cfg->local_elements;
+ if (!mhi_chan->buf_ring.elements)
+ mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
+ mhi_chan->er_index = ch_cfg->event_ring;
+ mhi_chan->dir = ch_cfg->dir;
+
+ /*
+ * For most channels, chtype is identical to channel directions.
+ * So, if it is not defined then assign channel direction to
+ * chtype
+ */
+ mhi_chan->type = ch_cfg->type;
+ if (!mhi_chan->type)
+ mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
+
+ mhi_chan->ee_mask = ch_cfg->ee_mask;
+ mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
+ mhi_chan->lpm_notify = ch_cfg->lpm_notify;
+ mhi_chan->offload_ch = ch_cfg->offload_channel;
+ mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
+ mhi_chan->pre_alloc = ch_cfg->auto_queue;
+ mhi_chan->auto_start = ch_cfg->auto_start;
+
+ /*
+ * If MHI host allocates buffers, then the channel direction
+ * should be DMA_FROM_DEVICE
+ */
+ if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ /*
+ * Bi-directional and direction less channel must be an
+ * offload channel
+ */
+ if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
+ mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ if (!mhi_chan->offload_ch) {
+ mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
+ if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
+ dev_err(dev, "Invalid Door bell mode\n");
+ goto error_chan_cfg;
+ }
+ }
+
+ if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_chan->configured = true;
+
+ if (mhi_chan->lpm_notify)
+ list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return -EINVAL;
+}
+
+static int parse_config(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config)
+{
+ int ret;
+
+ /* Parse MHI channel configuration */
+ ret = parse_ch_cfg(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ /* Parse MHI event configuration */
+ ret = parse_ev_cfg(mhi_cntrl, config);
+ if (ret)
+ goto error_ev_cfg;
+
+ mhi_cntrl->timeout_ms = config->timeout_ms;
+ if (!mhi_cntrl->timeout_ms)
+ mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+
+ mhi_cntrl->bounce_buf = config->use_bounce_buf;
+ mhi_cntrl->buffer_len = config->buf_len;
+ if (!mhi_cntrl->buffer_len)
+ mhi_cntrl->buffer_len = MHI_MAX_MTU;
+
+ /* By default, host is allowed to ring DB in both M0 and M2 states */
+ mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
+ if (config->m2_no_db)
+ mhi_cntrl->db_access &= ~MHI_PM_M2;
+
+ return 0;
+
+error_ev_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_chan *mhi_chan;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_device *mhi_dev;
+ u32 soc_info;
+ int ret, i;
+
+ if (!mhi_cntrl)
+ return -EINVAL;
+
+ if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put)
+ return -EINVAL;
+
+ if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status)
+ return -EINVAL;
+
+ ret = parse_config(mhi_cntrl, config);
+ if (ret)
+ return -EINVAL;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
+ sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto error_alloc_cmd;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->transition_list);
+ mutex_init(&mhi_cntrl->pm_mutex);
+ rwlock_init(&mhi_cntrl->pm_lock);
+ spin_lock_init(&mhi_cntrl->transition_lock);
+ spin_lock_init(&mhi_cntrl->wlock);
+ INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+ INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
+ INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
+ init_waitqueue_head(&mhi_cntrl->state_event);
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
+ spin_lock_init(&mhi_cmd->lock);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ /* Skip for offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->mhi_cntrl = mhi_cntrl;
+ spin_lock_init(&mhi_event->lock);
+ if (mhi_event->data_type == MHI_ER_CTRL)
+ tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
+ (ulong)mhi_event);
+ else
+ tasklet_init(&mhi_event->task, mhi_ev_task,
+ (ulong)mhi_event);
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ mutex_init(&mhi_chan->mutex);
+ init_completion(&mhi_chan->completion);
+ rwlock_init(&mhi_chan->lock);
+ }
+
+ if (mhi_cntrl->bounce_buf) {
+ mhi_cntrl->map_single = mhi_map_single_use_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
+ } else {
+ mhi_cntrl->map_single = mhi_map_single_no_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
+ }
+
+ /* Read the MHI device info */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+ SOC_HW_VERSION_OFFS, &soc_info);
+ if (ret)
+ goto error_alloc_dev;
+
+ mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
+ SOC_HW_VERSION_FAM_NUM_SHFT;
+ mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
+ SOC_HW_VERSION_DEV_NUM_SHFT;
+ mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
+ SOC_HW_VERSION_MAJOR_VER_SHFT;
+ mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
+ SOC_HW_VERSION_MINOR_VER_SHFT;
+
+ /* Register controller with MHI bus */
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto error_alloc_dev;
+ }
+
+ mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
+
+ /* Init wakeup source */
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto error_add_dev;
+
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ return 0;
+
+error_add_dev:
+ put_device(&mhi_dev->dev);
+
+error_alloc_dev:
+ kfree(mhi_cntrl->mhi_cmd);
+
+error_alloc_cmd:
+ vfree(mhi_cntrl->mhi_chan);
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_register_controller);
+
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
+ unsigned int i;
+
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_event);
+
+ /* Drop the references to MHI devices created for channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ put_device(&mhi_chan->mhi_dev->dev);
+ }
+ vfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+}
+EXPORT_SYMBOL_GPL(mhi_unregister_controller);
+
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 bhie_off;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret)
+ goto error_dev_ctxt;
+
+ /*
+ * Allocate RDDM table if specified, this table is for debugging purpose
+ */
+ if (mhi_cntrl->rddm_size) {
+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size);
+
+ /*
+ * This controller supports RDDM, so we need to manually clear
+ * BHIE RX registers since POR values are undefined.
+ */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+ &bhie_off);
+ if (ret) {
+ dev_err(dev, "Error getting BHIE offset\n");
+ goto bhie_error;
+ }
+
+ mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
+ memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
+ 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
+ 4);
+
+ if (mhi_cntrl->rddm_image)
+ mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+ }
+
+ mhi_cntrl->pre_init = true;
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return 0;
+
+bhie_error:
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
+
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+ mhi_cntrl->pre_init = false;
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
+
+static void mhi_release_device(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created if the mhi_dev
+ * associated with it is NULL. This scenario will happen during the
+ * controller suspend and resume.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_bus_type;
+ dev->release = mhi_release_device;
+ dev->parent = mhi_cntrl->cntrl_dev;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_wake = 0;
+
+ return mhi_dev;
+}
+
+static int mhi_driver_probe(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct device_driver *drv = dev->driver;
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ struct mhi_event *mhi_event;
+ struct mhi_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_chan *dl_chan = mhi_dev->dl_chan;
+ int ret;
+
+ /* Bring device out of LPM */
+ ret = mhi_device_get_sync(mhi_dev);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ if (ul_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (ul_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
+ goto exit_probe;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ if (ul_chan->auto_start) {
+ ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
+ if (ret)
+ goto exit_probe;
+ }
+ }
+
+ if (dl_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (dl_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
+ goto exit_probe;
+
+ mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
+
+ /*
+ * If the channel event ring is managed by client, then
+ * status_cb must be provided so that the framework can
+ * notify pending data
+ */
+ if (mhi_event->cl_manage && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+ }
+
+ /* Call the user provided probe function */
+ ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+ if (ret)
+ goto exit_probe;
+
+ if (dl_chan && dl_chan->auto_start)
+ mhi_prepare_channel(mhi_cntrl, dl_chan);
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+
+exit_probe:
+ mhi_unprepare_from_transfer(mhi_dev);
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+}
+
+static int mhi_driver_remove(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ enum mhi_ch_state ch_state[] = {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_DISABLED
+ };
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Reset both channels */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ /* Wake all threads waiting for completion */
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_EV_CC_INVALID;
+ complete_all(&mhi_chan->completion);
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Set the channel state to disabled */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ ch_state[dir] = mhi_chan->ch_state;
+ mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Reset the non-offload channel */
+ if (!mhi_chan->offload_ch)
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ mhi_drv->remove(mhi_dev);
+
+ /* De-init channel if it was enabled */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
+ !mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ while (mhi_dev->dev_wake)
+ mhi_device_put(mhi_dev);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+}
+
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ driver->bus = &mhi_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_driver_probe;
+ driver->remove = mhi_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_driver_register);
+
+void mhi_driver_unregister(struct mhi_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_driver_unregister);
+
+static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
+ mhi_dev->chan_name);
+}
+
+static int mhi_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->chan_name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_bus_type = {
+ .name = "mhi",
+ .dev_name = "mhi",
+ .match = mhi_match,
+ .uevent = mhi_uevent,
+};
+
+static int __init mhi_init(void)
+{
+ return bus_register(&mhi_bus_type);
+}
+
+static void __exit mhi_exit(void)
+{
+ bus_unregister(&mhi_bus_type);
+}
+
+postcore_initcall(mhi_init);
+module_exit(mhi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Host Interface");
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
new file mode 100644
index 000000000000..5deadfaa053a
--- /dev/null
+++ b/drivers/bus/mhi/core/internal.h
@@ -0,0 +1,687 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef _MHI_INT_H
+#define _MHI_INT_H
+
+#include <linux/mhi.h>
+
+extern struct bus_type mhi_bus_type;
+
+/* MHI MMIO register mapping */
+#define PCI_INVALID_READ(val) (val == U32_MAX)
+
+#define MHIREGLEN (0x0)
+#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
+#define MHIREGLEN_MHIREGLEN_SHIFT (0)
+
+#define MHIVER (0x8)
+#define MHIVER_MHIVER_MASK (0xFFFFFFFF)
+#define MHIVER_MHIVER_SHIFT (0)
+
+#define MHICFG (0x10)
+#define MHICFG_NHWER_MASK (0xFF000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xFF0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xFF00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xFF)
+#define MHICFG_NCH_SHIFT (0)
+
+#define CHDBOFF (0x18)
+#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
+#define CHDBOFF_CHDBOFF_SHIFT (0)
+
+#define ERDBOFF (0x20)
+#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
+#define ERDBOFF_ERDBOFF_SHIFT (0)
+
+#define BHIOFF (0x28)
+#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
+#define BHIOFF_BHIOFF_SHIFT (0)
+
+#define BHIEOFF (0x2C)
+#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF)
+#define BHIEOFF_BHIEOFF_SHIFT (0)
+
+#define DEBUGOFF (0x30)
+#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
+#define DEBUGOFF_DEBUGOFF_SHIFT (0)
+
+#define MHICTRL (0x38)
+#define MHICTRL_MHISTATE_MASK (0x0000FF00)
+#define MHICTRL_MHISTATE_SHIFT (8)
+#define MHICTRL_RESET_MASK (0x2)
+#define MHICTRL_RESET_SHIFT (1)
+
+#define MHISTATUS (0x48)
+#define MHISTATUS_MHISTATE_MASK (0x0000FF00)
+#define MHISTATUS_MHISTATE_SHIFT (8)
+#define MHISTATUS_SYSERR_MASK (0x4)
+#define MHISTATUS_SYSERR_SHIFT (2)
+#define MHISTATUS_READY_MASK (0x1)
+#define MHISTATUS_READY_SHIFT (0)
+
+#define CCABAP_LOWER (0x58)
+#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
+
+#define CCABAP_HIGHER (0x5C)
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
+
+#define ECABAP_LOWER (0x60)
+#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
+
+#define ECABAP_HIGHER (0x64)
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
+
+#define CRCBAP_LOWER (0x68)
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
+
+#define CRCBAP_HIGHER (0x6C)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
+
+#define CRDB_LOWER (0x70)
+#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
+#define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
+
+#define CRDB_HIGHER (0x74)
+#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
+
+#define MHICTRLBASE_LOWER (0x80)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
+
+#define MHICTRLBASE_HIGHER (0x84)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
+
+#define MHICTRLLIMIT_LOWER (0x88)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
+
+#define MHICTRLLIMIT_HIGHER (0x8C)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
+
+#define MHIDATABASE_LOWER (0x98)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
+
+#define MHIDATABASE_HIGHER (0x9C)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
+
+#define MHIDATALIMIT_LOWER (0xA0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
+
+#define MHIDATALIMIT_HIGHER (0xA4)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
+
+/* Host request register */
+#define MHI_SOC_RESET_REQ_OFFSET (0xB0)
+#define MHI_SOC_RESET_REQ BIT(0)
+
+/* MHI BHI offfsets */
+#define BHI_BHIVERSION_MINOR (0x00)
+#define BHI_BHIVERSION_MAJOR (0x04)
+#define BHI_IMGADDR_LOW (0x08)
+#define BHI_IMGADDR_HIGH (0x0C)
+#define BHI_IMGSIZE (0x10)
+#define BHI_RSVD1 (0x14)
+#define BHI_IMGTXDB (0x18)
+#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHI_TXDB_SEQNUM_SHFT (0)
+#define BHI_RSVD2 (0x1C)
+#define BHI_INTVEC (0x20)
+#define BHI_RSVD3 (0x24)
+#define BHI_EXECENV (0x28)
+#define BHI_STATUS (0x2C)
+#define BHI_ERRCODE (0x30)
+#define BHI_ERRDBG1 (0x34)
+#define BHI_ERRDBG2 (0x38)
+#define BHI_ERRDBG3 (0x3C)
+#define BHI_SERIALNU (0x40)
+#define BHI_SBLANTIROLLVER (0x44)
+#define BHI_NUMSEG (0x48)
+#define BHI_MSMHWID(n) (0x4C + (0x4 * n))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
+#define BHI_RSVD5 (0xC4)
+#define BHI_STATUS_MASK (0xC0000000)
+#define BHI_STATUS_SHIFT (30)
+#define BHI_STATUS_ERROR (3)
+#define BHI_STATUS_SUCCESS (2)
+#define BHI_STATUS_RESET (0)
+
+/* MHI BHIE offsets */
+#define BHIE_MSMSOCID_OFFS (0x0000)
+#define BHIE_TXVECADDR_LOW_OFFS (0x002C)
+#define BHIE_TXVECADDR_HIGH_OFFS (0x0030)
+#define BHIE_TXVECSIZE_OFFS (0x0034)
+#define BHIE_TXVECDB_OFFS (0x003C)
+#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECDB_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_OFFS (0x0044)
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
+#define BHIE_RXVECADDR_LOW_OFFS (0x0060)
+#define BHIE_RXVECADDR_HIGH_OFFS (0x0064)
+#define BHIE_RXVECSIZE_OFFS (0x0068)
+#define BHIE_RXVECDB_OFFS (0x0070)
+#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECDB_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_OFFS (0x0078)
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
+
+#define SOC_HW_VERSION_OFFS (0x224)
+#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
+#define SOC_HW_VERSION_FAM_NUM_SHFT (28)
+#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
+#define SOC_HW_VERSION_DEV_NUM_SHFT (16)
+#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
+#define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
+#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
+#define SOC_HW_VERSION_MINOR_VER_SHFT (0)
+
+#define EV_CTX_RESERVED_MASK GENMASK(7, 0)
+#define EV_CTX_INTMODC_MASK GENMASK(15, 8)
+#define EV_CTX_INTMODC_SHIFT 8
+#define EV_CTX_INTMODT_MASK GENMASK(31, 16)
+#define EV_CTX_INTMODT_SHIFT 16
+struct mhi_event_ctxt {
+ __u32 intmod;
+ __u32 ertype;
+ __u32 msivec;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
+#define CHAN_CTX_CHSTATE_SHIFT 0
+#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
+#define CHAN_CTX_BRSTMODE_SHIFT 8
+#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
+#define CHAN_CTX_POLLCFG_SHIFT 10
+#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
+struct mhi_chan_ctxt {
+ __u32 chcfg;
+ __u32 chtype;
+ __u32 erindex;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+struct mhi_cmd_ctxt {
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+struct mhi_ctxt {
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ dma_addr_t er_ctxt_addr;
+ dma_addr_t chan_ctxt_addr;
+ dma_addr_t cmd_ctxt_addr;
+};
+
+struct mhi_tre {
+ u64 ptr;
+ u32 dword[2];
+};
+
+struct bhi_vec_entry {
+ u64 dma_addr;
+ u64 size;
+};
+
+enum mhi_cmd_type {
+ MHI_CMD_NOP = 1,
+ MHI_CMD_RESET_CHAN = 16,
+ MHI_CMD_STOP_CHAN = 17,
+ MHI_CMD_START_CHAN = 18,
+};
+
+/* No operation command */
+#define MHI_TRE_CMD_NOOP_PTR (0)
+#define MHI_TRE_CMD_NOOP_DWORD0 (0)
+#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16)
+
+/* Channel reset command */
+#define MHI_TRE_CMD_RESET_PTR (0)
+#define MHI_TRE_CMD_RESET_DWORD0 (0)
+#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_RESET_CHAN << 16))
+
+/* Channel stop command */
+#define MHI_TRE_CMD_STOP_PTR (0)
+#define MHI_TRE_CMD_STOP_DWORD0 (0)
+#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_STOP_CHAN << 16))
+
+/* Channel start command */
+#define MHI_TRE_CMD_START_PTR (0)
+#define MHI_TRE_CMD_START_DWORD0 (0)
+#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_START_CHAN << 16))
+
+#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+
+/* Event descriptor macros */
+#define MHI_TRE_EV_PTR(ptr) (ptr)
+#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
+#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
+#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
+#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
+#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
+#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
+
+/* Transfer descriptor macros */
+#define MHI_TRE_DATA_PTR(ptr) (ptr)
+#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
+ | (ieot << 9) | (ieob << 8) | chain)
+
+/* RSC transfer descriptor macros */
+#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
+#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
+#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
+
+enum mhi_pkt_type {
+ MHI_PKT_TYPE_INVALID = 0x0,
+ MHI_PKT_TYPE_NOOP_CMD = 0x1,
+ MHI_PKT_TYPE_TRANSFER = 0x2,
+ MHI_PKT_TYPE_COALESCING = 0x8,
+ MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
+ MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
+ MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
+ MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
+ MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
+ MHI_PKT_TYPE_TX_EVENT = 0x22,
+ MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
+ MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+ MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
+ MHI_PKT_TYPE_STALE_EVENT, /* internal event */
+};
+
+/* MHI transfer completion events */
+enum mhi_ev_ccs {
+ MHI_EV_CC_INVALID = 0x0,
+ MHI_EV_CC_SUCCESS = 0x1,
+ MHI_EV_CC_EOT = 0x2, /* End of transfer event */
+ MHI_EV_CC_OVERFLOW = 0x3,
+ MHI_EV_CC_EOB = 0x4, /* End of block event */
+ MHI_EV_CC_OOB = 0x5, /* Out of block event */
+ MHI_EV_CC_DB_MODE = 0x6,
+ MHI_EV_CC_UNDEFINED_ERR = 0x10,
+ MHI_EV_CC_BAD_TRE = 0x11,
+};
+
+enum mhi_ch_state {
+ MHI_CH_STATE_DISABLED = 0x0,
+ MHI_CH_STATE_ENABLED = 0x1,
+ MHI_CH_STATE_RUNNING = 0x2,
+ MHI_CH_STATE_SUSPENDED = 0x3,
+ MHI_CH_STATE_STOP = 0x4,
+ MHI_CH_STATE_ERROR = 0x5,
+};
+
+#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
+ mode != MHI_DB_BRST_ENABLE)
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
+ "INVALID_EE" : mhi_ee_str[ee])
+
+#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
+ ee == MHI_EE_EDL)
+
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
+
+enum dev_st_transition {
+ DEV_ST_TRANSITION_PBL,
+ DEV_ST_TRANSITION_READY,
+ DEV_ST_TRANSITION_SBL,
+ DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_MAX,
+};
+
+extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
+#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
+ "INVALID_STATE" : dev_state_tran_str[state])
+
+extern const char * const mhi_state_str[MHI_STATE_MAX];
+#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
+ !mhi_state_str[state]) ? \
+ "INVALID_STATE" : mhi_state_str[state])
+
+/* internal power states */
+enum mhi_pm_state {
+ MHI_PM_STATE_DISABLE,
+ MHI_PM_STATE_POR,
+ MHI_PM_STATE_M0,
+ MHI_PM_STATE_M2,
+ MHI_PM_STATE_M3_ENTER,
+ MHI_PM_STATE_M3,
+ MHI_PM_STATE_M3_EXIT,
+ MHI_PM_STATE_FW_DL_ERR,
+ MHI_PM_STATE_SYS_ERR_DETECT,
+ MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SHUTDOWN_PROCESS,
+ MHI_PM_STATE_LD_ERR_FATAL_DETECT,
+ MHI_PM_STATE_MAX
+};
+
+#define MHI_PM_DISABLE BIT(0)
+#define MHI_PM_POR BIT(1)
+#define MHI_PM_M0 BIT(2)
+#define MHI_PM_M2 BIT(3)
+#define MHI_PM_M3_ENTER BIT(4)
+#define MHI_PM_M3 BIT(5)
+#define MHI_PM_M3_EXIT BIT(6)
+/* firmware download failure state */
+#define MHI_PM_FW_DL_ERR BIT(7)
+#define MHI_PM_SYS_ERR_DETECT BIT(8)
+#define MHI_PM_SYS_ERR_PROCESS BIT(9)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+/* link not accessible */
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
+ mhi_cntrl->db_access)
+#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_EXIT))
+#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
+#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+ MHI_PM_IN_ERROR_STATE(pm_state))
+#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
+ (MHI_PM_M3_ENTER | MHI_PM_M3))
+
+#define NR_OF_CMD_RINGS 1
+#define CMD_EL_PER_RING 128
+#define PRIMARY_CMD_RING 0
+#define MHI_DEV_WAKE_DB 127
+#define MHI_MAX_MTU 0xffff
+
+enum mhi_er_type {
+ MHI_ER_TYPE_INVALID = 0x0,
+ MHI_ER_TYPE_VALID = 0x1,
+};
+
+struct db_cfg {
+ bool reset_req;
+ bool db_mode;
+ u32 pollcfg;
+ enum mhi_db_brst_mode brstmode;
+ dma_addr_t db_val;
+ void (*process_db)(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg, void __iomem *io_addr,
+ dma_addr_t db_val);
+};
+
+struct mhi_pm_transitions {
+ enum mhi_pm_state from_state;
+ u32 to_states;
+};
+
+struct state_transition {
+ struct list_head node;
+ enum dev_st_transition state;
+};
+
+struct mhi_ring {
+ dma_addr_t dma_handle;
+ dma_addr_t iommu_base;
+ u64 *ctxt_wp; /* point to ctxt wp */
+ void *pre_aligned;
+ void *base;
+ void *rp;
+ void *wp;
+ size_t el_size;
+ size_t len;
+ size_t elements;
+ size_t alloc_size;
+ void __iomem *db_addr;
+};
+
+struct mhi_cmd {
+ struct mhi_ring ring;
+ spinlock_t lock;
+};
+
+struct mhi_buf_info {
+ void *v_addr;
+ void *bb_addr;
+ void *wp;
+ void *cb_buf;
+ dma_addr_t p_addr;
+ size_t len;
+ enum dma_data_direction dir;
+ bool used; /* Indicates whether the buffer is used or not */
+ bool pre_mapped; /* Already pre-mapped by client */
+};
+
+struct mhi_event {
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *mhi_chan; /* dedicated to channel */
+ u32 er_index;
+ u32 intmod;
+ u32 irq;
+ int chan; /* this event ring is dedicated to a channel (optional) */
+ u32 priority;
+ enum mhi_er_data_type data_type;
+ struct mhi_ring ring;
+ struct db_cfg db_cfg;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ int (*process_event)(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota);
+ bool hw_ring;
+ bool cl_manage;
+ bool offload_ev; /* managed by a device driver */
+};
+
+struct mhi_chan {
+ const char *name;
+ /*
+ * Important: When consuming, increment tre_ring first and when
+ * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
+ * is guranteed to have space so we do not need to check both rings.
+ */
+ struct mhi_ring buf_ring;
+ struct mhi_ring tre_ring;
+ u32 chan;
+ u32 er_index;
+ u32 intmod;
+ enum mhi_ch_type type;
+ enum dma_data_direction dir;
+ struct db_cfg db_cfg;
+ enum mhi_ch_ee_mask ee_mask;
+ enum mhi_ch_state ch_state;
+ enum mhi_ev_ccs ccs;
+ struct mhi_device *mhi_dev;
+ void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
+ struct mutex mutex;
+ struct completion completion;
+ rwlock_t lock;
+ struct list_head node;
+ bool lpm_notify;
+ bool configured;
+ bool offload_ch;
+ bool pre_alloc;
+ bool auto_start;
+ bool wake_capable;
+};
+
+/* Default MHI timeout */
+#define MHI_TIMEOUT_MS (1000)
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
+
+int mhi_destroy_device(struct device *dev, void *data);
+void mhi_create_devices(struct mhi_controller *mhi_cntrl);
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info, size_t alloc_size);
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info);
+
+/* Power management APIs */
+enum mhi_pm_state __must_check mhi_tryset_pm_state(
+ struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state);
+const char *to_mhi_pm_state_str(enum mhi_pm_state state);
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state);
+void mhi_pm_st_worker(struct work_struct *work);
+void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_fw_load_worker(struct work_struct *work);
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
+void mhi_ctrl_ev_task(unsigned long data);
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd);
+
+/* Register access methods */
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
+ void __iomem *db_addr, dma_addr_t db_val);
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_mode, void __iomem *db_addr,
+ dma_addr_t db_val);
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out);
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 shift, u32 *out);
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val);
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 mask, u32 shift, u32 val);
+void mhi_ring_er_db(struct mhi_event *mhi_event);
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val);
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Initialization methods */
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info);
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Memory allocation methods */
+static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
+ size_t size,
+ dma_addr_t *dma_handle,
+ gfp_t gfp)
+{
+ void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle,
+ gfp);
+
+ return buf;
+}
+
+static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
+ size_t size,
+ void *vaddr,
+ dma_addr_t dma_handle)
+{
+ dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle);
+}
+
+/* Event processing methods */
+void mhi_ctrl_ev_task(unsigned long data);
+void mhi_ev_task(unsigned long data);
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+
+/* ISR handlers */
+irqreturn_t mhi_irq_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ void *buf, void *cb, size_t buf_len, enum mhi_flags flags);
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+
+#endif /* _MHI_INT_H */
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
new file mode 100644
index 000000000000..eb4256b81406
--- /dev/null
+++ b/drivers/bus/mhi/core/main.c
@@ -0,0 +1,1529 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out)
+{
+ u32 tmp = readl(base + offset);
+
+ /* If there is any unexpected value, query the link status */
+ if (PCI_INVALID_READ(tmp) &&
+ mhi_cntrl->link_status(mhi_cntrl))
+ return -EIO;
+
+ *out = tmp;
+
+ return 0;
+}
+
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset,
+ u32 mask, u32 shift, u32 *out)
+{
+ u32 tmp;
+ int ret;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return ret;
+
+ *out = (tmp & mask) >> shift;
+
+ return 0;
+}
+
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 mask, u32 shift, u32 val)
+{
+ int ret;
+ u32 tmp;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return;
+
+ tmp &= ~mask;
+ tmp |= (val << shift);
+ mhi_write_reg(mhi_cntrl, base, offset, tmp);
+}
+
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
+ mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
+}
+
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ if (db_cfg->db_mode) {
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+ db_cfg->db_mode = 0;
+ }
+}
+
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+}
+
+void mhi_ring_er_db(struct mhi_event *mhi_event)
+{
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
+ ring->db_addr, *ring->ctxt_wp);
+}
+
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
+{
+ dma_addr_t db;
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = db;
+ mhi_write_db(mhi_cntrl, ring->db_addr, db);
+}
+
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+ dma_addr_t db;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = db;
+ mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
+ ring->db_addr, db);
+}
+
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
+{
+ u32 exec;
+ int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
+
+ return (ret) ? MHI_EE_MAX : exec;
+}
+
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
+{
+ u32 state;
+ int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, &state);
+ return ret ? MHI_STATE_MAX : state;
+}
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
+ buf_info->v_addr, buf_info->len,
+ buf_info->dir);
+ if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
+ &buf_info->p_addr, GFP_ATOMIC);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (buf_info->dir == DMA_TO_DEVICE)
+ memcpy(buf, buf_info->v_addr, buf_info->len);
+
+ buf_info->bb_addr = buf;
+
+ return 0;
+}
+
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
+ buf_info->dir);
+}
+
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ if (buf_info->dir == DMA_FROM_DEVICE)
+ memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
+
+ mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
+ buf_info->p_addr);
+}
+
+static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ int nr_el;
+
+ if (ring->wp < ring->rp) {
+ nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ } else {
+ nr_el = (ring->rp - ring->base) / ring->el_size;
+ nr_el += ((ring->base + ring->len - ring->wp) /
+ ring->el_size) - 1;
+ }
+
+ return nr_el;
+}
+
+static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return (addr - ring->iommu_base) + ring->base;
+}
+
+static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+int mhi_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_device *mhi_dev;
+ struct mhi_controller *mhi_cntrl;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy virtual devices thats attached to bus */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /*
+ * For the suspend and resume case, this function will get called
+ * without mhi_unregister_controller(). Hence, we need to drop the
+ * references to mhi_dev created for ul and dl channels. We can
+ * be sure that there will be no instances of mhi_dev left after
+ * this.
+ */
+ if (mhi_dev->ul_chan)
+ put_device(&mhi_dev->ul_chan->mhi_dev->dev);
+
+ if (mhi_dev->dl_chan)
+ put_device(&mhi_dev->dl_chan->mhi_dev->dev);
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
+ mhi_dev->chan_name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+{
+ struct mhi_driver *mhi_drv;
+
+ if (!mhi_dev->dev.driver)
+ return;
+
+ mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
+
+ if (mhi_drv->status_cb)
+ mhi_drv->status_cb(mhi_dev, cb_reason);
+}
+
+/* Bind MHI channels to MHI devices */
+void mhi_create_devices(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *mhi_chan;
+ struct mhi_device *mhi_dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->configured || mhi_chan->mhi_dev ||
+ !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
+ continue;
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (!mhi_dev)
+ return;
+
+ mhi_dev->dev_type = MHI_DEVICE_XFER;
+ switch (mhi_chan->dir) {
+ case DMA_TO_DEVICE:
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ break;
+ case DMA_FROM_DEVICE:
+ /* We use dl_chan as offload channels */
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ break;
+ default:
+ dev_err(dev, "Direction not supported\n");
+ put_device(&mhi_dev->dev);
+ return;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Check next channel if it matches */
+ if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
+ if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
+ i++;
+ mhi_chan++;
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+ }
+ }
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->chan_name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%04x_%s", mhi_chan->chan,
+ mhi_dev->chan_name);
+
+ /* Init wakeup source if available */
+ if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+ }
+}
+
+irqreturn_t mhi_irq_handler(int irq_number, void *dev)
+{
+ struct mhi_event *mhi_event = dev;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ /* Only proceed if event ring has pending events */
+ if (ev_ring->rp == dev_rp)
+ return IRQ_HANDLED;
+
+ /* For client managed event ring, notify pending data */
+ if (mhi_event->cl_manage) {
+ struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
+ struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_dev)
+ mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
+ } else {
+ tasklet_schedule(&mhi_event->task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev;
+ enum mhi_state state = MHI_STATE_MAX;
+ enum mhi_pm_state pm_state = 0;
+ enum mhi_ee_type ee = 0;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_cntrl->ee;
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* If device in RDDM don't bother processing SYS error */
+ if (mhi_cntrl->ee == MHI_EE_RDDM) {
+ if (mhi_cntrl->ee != ee) {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ wake_up_all(&mhi_cntrl->state_event);
+ }
+ goto exit_intvec;
+ }
+
+ if (pm_state == MHI_PM_SYS_ERR_DETECT) {
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* For fatal errors, we let controller decide next step */
+ if (MHI_IN_PBL(ee))
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+ else
+ schedule_work(&mhi_cntrl->syserr_worker);
+ }
+
+exit_intvec:
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev;
+
+ /* Wake up events waiting for state change */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ dma_addr_t ctxt_wp;
+
+ /* Update the WP */
+ ring->wp += ring->el_size;
+ ctxt_wp = *ring->ctxt_wp + ring->el_size;
+
+ if (ring->wp >= (ring->base + ring->len)) {
+ ring->wp = ring->base;
+ ctxt_wp = ring->iommu_base;
+ }
+
+ *ring->ctxt_wp = ctxt_wp;
+
+ /* Update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* Update to all cores */
+ smp_wmb();
+}
+
+static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result;
+ unsigned long flags = 0;
+ u32 ev_code;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /*
+ * If it's a DB Event then we need to grab the lock
+ * with preemption disabled and as a write because we
+ * have to update db register and there are chances that
+ * another thread could be doing the same.
+ */
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_lock_irqsave(&mhi_chan->lock, flags);
+ else
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_tx_event;
+
+ switch (ev_code) {
+ case MHI_EV_CC_OVERFLOW:
+ case MHI_EV_CC_EOB:
+ case MHI_EV_CC_EOT:
+ {
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
+ struct mhi_tre *local_rp, *ev_tre;
+ void *dev_rp;
+ struct mhi_buf_info *buf_info;
+ u16 xfer_len;
+
+ /* Get the TRB this event points to */
+ ev_tre = mhi_to_virtual(tre_ring, ptr);
+
+ dev_rp = ev_tre + 1;
+ if (dev_rp >= (tre_ring->base + tre_ring->len))
+ dev_rp = tre_ring->base;
+
+ result.dir = mhi_chan->dir;
+
+ local_rp = tre_ring->rp;
+ while (local_rp != dev_rp) {
+ buf_info = buf_ring->rp;
+ /* If it's the last TRE, get length from the event */
+ if (local_rp == ev_tre)
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+ else
+ xfer_len = buf_info->len;
+
+ /* Unmap if it's not pre-mapped by client */
+ if (likely(!buf_info->pre_mapped))
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ result.buf_addr = buf_info->cb_buf;
+ result.bytes_xferd = xfer_len;
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_dec(&mhi_cntrl->pending_pkts);
+
+ /*
+ * Recycle the buffer if buffer is pre-allocated,
+ * if there is an error, not much we can do apart
+ * from dropping the packet
+ */
+ if (mhi_chan->pre_alloc) {
+ if (mhi_queue_buf(mhi_chan->mhi_dev,
+ mhi_chan->dir,
+ buf_info->cb_buf,
+ buf_info->len, MHI_EOT)) {
+ dev_err(dev,
+ "Error recycling buffer for chan:%d\n",
+ mhi_chan->chan);
+ kfree(buf_info->cb_buf);
+ }
+ }
+ }
+ break;
+ } /* CC_EOT */
+ case MHI_EV_CC_OOB:
+ case MHI_EV_CC_DB_MODE:
+ {
+ unsigned long flags;
+
+ mhi_chan->db_cfg.db_mode = 1;
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+ if (tre_ring->wp != tre_ring->rp &&
+ MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ }
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+ break;
+ }
+ case MHI_EV_CC_BAD_TRE:
+ default:
+ dev_err(dev, "Unknown event 0x%x\n", ev_code);
+ break;
+ } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
+
+end_process_tx_event:
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_unlock_irqrestore(&mhi_chan->lock, flags);
+ else
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_result result;
+ int ev_code;
+ u32 cookie; /* offset to local descriptor */
+ u16 xfer_len;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ cookie = MHI_TRE_GET_EV_COOKIE(event);
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+
+ /* Received out of bound cookie */
+ WARN_ON(cookie >= buf_ring->len);
+
+ buf_info = buf_ring->base + cookie;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+ result.bytes_xferd = xfer_len;
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_rsc_event;
+
+ WARN_ON(!buf_info->used);
+
+ /* notify the client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /*
+ * Note: We're arbitrarily incrementing RP even though, completion
+ * packet we processed might not be the same one, reason we can do this
+ * is because device guaranteed to cache descriptors in order it
+ * receive, so even though completion event is different we can re-use
+ * all descriptors in between.
+ * Example:
+ * Transfer Ring has descriptors: A, B, C, D
+ * Last descriptor host queue is D (WP) and first descriptor
+ * host queue is A (RP).
+ * The completion event we just serviced is descriptor C.
+ * Then we can safely queue descriptors to replace A, B, and C
+ * even though host did not receive any completions.
+ */
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ buf_info->used = false;
+
+end_process_rsc_event:
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *tre)
+{
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
+ struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *mhi_ring = &cmd_ring->ring;
+ struct mhi_tre *cmd_pkt;
+ struct mhi_chan *mhi_chan;
+ u32 chan;
+
+ cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+
+ chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+
+ mhi_del_ring_element(mhi_cntrl, mhi_ring);
+}
+
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 chan;
+ int count = 0;
+
+ /*
+ * This is a quick check to avoid unnecessary event processing
+ * in case MHI is already in error state, but it's still possible
+ * to transition to error state while processing events
+ */
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ switch (type) {
+ case MHI_PKT_TYPE_BW_REQ_EVENT:
+ {
+ struct mhi_link_info *link_info;
+
+ link_info = &mhi_cntrl->mhi_link_info;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ link_info->target_link_speed =
+ MHI_TRE_GET_EV_LINKSPEED(local_rp);
+ link_info->target_link_width =
+ MHI_TRE_GET_EV_LINKWIDTH(local_rp);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_dbg(dev, "Received BW_REQ event\n");
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
+ break;
+ }
+ case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
+ {
+ enum mhi_state new_state;
+
+ new_state = MHI_TRE_GET_EV_STATE(local_rp);
+
+ dev_dbg(dev, "State change event to state: %s\n",
+ TO_MHI_STATE_STR(new_state));
+
+ switch (new_state) {
+ case MHI_STATE_M0:
+ mhi_pm_m0_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M1:
+ mhi_pm_m1_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M3:
+ mhi_pm_m3_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_SYS_ERR:
+ {
+ enum mhi_pm_state new_state;
+
+ dev_dbg(dev, "System error detected\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_cntrl->syserr_worker);
+ break;
+ }
+ default:
+ dev_err(dev, "Invalid state: %s\n",
+ TO_MHI_STATE_STR(new_state));
+ }
+
+ break;
+ }
+ case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
+ mhi_process_cmd_completion(mhi_cntrl, local_rp);
+ break;
+ case MHI_PKT_TYPE_EE_EVENT:
+ {
+ enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
+ enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
+
+ dev_dbg(dev, "Received EE event: %s\n",
+ TO_MHI_EXEC_STR(event));
+ switch (event) {
+ case MHI_EE_SBL:
+ st = DEV_ST_TRANSITION_SBL;
+ break;
+ case MHI_EE_WFW:
+ case MHI_EE_AMSS:
+ st = DEV_ST_TRANSITION_MISSION_MODE;
+ break;
+ case MHI_EE_RDDM:
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = event;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ default:
+ dev_err(dev,
+ "Unhandled EE event: 0x%x\n", type);
+ }
+ if (st != DEV_ST_TRANSITION_MAX)
+ mhi_queue_state_transition(mhi_cntrl, st);
+
+ break;
+ }
+ case MHI_PKT_TYPE_TX_EVENT:
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ break;
+ default:
+ dev_err(dev, "Unhandled event type: %d\n", type);
+ break;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ count++;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ int count = 0;
+ u32 chan;
+ struct mhi_chan *mhi_chan;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp && event_quota > 0) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ count++;
+ }
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+void mhi_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+ /* process all pending events */
+ spin_lock_bh(&mhi_event->lock);
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ spin_unlock_bh(&mhi_event->lock);
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ enum mhi_pm_state pm_state = 0;
+ int ret;
+
+ /*
+ * We can check PM state w/o a lock here because there is no way
+ * PM state can change from reg access valid to no access while this
+ * thread being executed.
+ */
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ /*
+ * We may have a pending event but not allowed to
+ * process it since we are probably in a suspended state,
+ * so trigger a resume.
+ */
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+ return;
+ }
+
+ /* Process ctrl events events */
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+
+ /*
+ * We received an IRQ but no events to process, maybe device went to
+ * SYS_ERR state? Check the state to confirm.
+ */
+ if (!ret) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_cntrl->syserr_worker);
+ }
+}
+
+static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ void *tmp = ring->wp + ring->el_size;
+
+ if (tmp >= (ring->base + ring->len))
+ tmp = ring->base;
+
+ return (tmp == ring->rp);
+}
+
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_tre *mhi_tre;
+ int ret;
+
+ /* If MHI host pre-allocates buffers then client drivers cannot queue */
+ if (mhi_chan->pre_alloc)
+ return -EINVAL;
+
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ /* Generate the TRE */
+ buf_info = buf_ring->wp;
+
+ buf_info->v_addr = skb->data;
+ buf_info->cb_buf = skb;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = len;
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ goto map_error;
+
+ mhi_tre = tre_ring->wp;
+
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ read_lock_bh(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_bh(&mhi_chan->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+map_error:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
+
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_tre *mhi_tre;
+
+ /* If MHI host pre-allocates buffers then client drivers cannot queue */
+ if (mhi_chan->pre_alloc)
+ return -EINVAL;
+
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ dev_err(dev, "MHI is not in activate state, PM state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+ }
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ /* Generate the TRE */
+ buf_info = buf_ring->wp;
+ WARN_ON(buf_info->used);
+ buf_info->p_addr = mhi_buf->dma_addr;
+ buf_info->pre_mapped = true;
+ buf_info->cb_buf = mhi_buf;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = len;
+
+ mhi_tre = tre_ring->wp;
+
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ read_lock_bh(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_bh(&mhi_chan->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_dma);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_tre *mhi_tre;
+ struct mhi_buf_info *buf_info;
+ int eot, eob, chain, bei;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ buf_info = buf_ring->wp;
+ buf_info->v_addr = buf;
+ buf_info->cb_buf = cb;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = buf_len;
+
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+
+ eob = !!(flags & MHI_EOB);
+ eot = !!(flags & MHI_EOT);
+ chain = !!(flags & MHI_CHAIN);
+ bei = !!(mhi_chan->intmod);
+
+ mhi_tre = tre_ring->wp;
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ return 0;
+}
+
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * this check here only as a guard, it's always
+ * possible mhi can enter error while executing rest of function,
+ * which is not fatal so we do not need to hold pm_lock
+ */
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ tre_ring = &mhi_chan->tre_ring;
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
+ if (unlikely(ret))
+ return ret;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ unsigned long flags;
+
+ read_lock_irqsave(&mhi_chan->lock, flags);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irqrestore(&mhi_chan->lock, flags);
+ }
+
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_buf);
+
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd)
+{
+ struct mhi_tre *cmd_tre = NULL;
+ struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *ring = &mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int chan = 0;
+
+ if (mhi_chan)
+ chan = mhi_chan->chan;
+
+ spin_lock_bh(&mhi_cmd->lock);
+ if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
+ spin_unlock_bh(&mhi_cmd->lock);
+ return -ENOMEM;
+ }
+
+ /* prepare the cmd tre */
+ cmd_tre = ring->wp;
+ switch (cmd) {
+ case MHI_CMD_RESET_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
+ break;
+ case MHI_CMD_START_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
+ break;
+ default:
+ dev_err(dev, "Command not supported\n");
+ break;
+ }
+
+ /* queue to hardware */
+ mhi_add_ring_element(mhi_cntrl, ring);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ spin_unlock_bh(&mhi_cmd->lock);
+
+ return 0;
+}
+
+static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
+
+ /* no more processing events for this channel */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ write_unlock_irq(&mhi_chan->lock);
+ mutex_unlock(&mhi_chan->mutex);
+ return;
+ }
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ reinit_completion(&mhi_chan->completion);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ goto error_invalid_state;
+ }
+
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
+ if (ret)
+ goto error_invalid_state;
+
+ /* even if it fails we will still reset */
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
+ dev_err(dev,
+ "Failed to receive cmd completion, still resetting\n");
+
+error_invalid_state:
+ if (!mhi_chan->offload_ch) {
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+ }
+ dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
+ mutex_unlock(&mhi_chan->mutex);
+}
+
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret = 0;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ dev_err(dev,
+ "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
+ mhi_chan->name);
+ return -ENOTCONN;
+ }
+
+ mutex_lock(&mhi_chan->mutex);
+
+ /* If channel is not in disable state, do not allow it to start */
+ if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
+ ret = -EIO;
+ dev_dbg(dev, "channel: %d is not in disabled state\n",
+ mhi_chan->chan);
+ goto error_init_chan;
+ }
+
+ /* Check of client manages channel context for offload channels */
+ if (!mhi_chan->offload_ch) {
+ ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
+ if (ret)
+ goto error_init_chan;
+ }
+
+ reinit_completion(&mhi_chan->completion);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ ret = -EIO;
+ goto error_pm_state;
+ }
+
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
+ if (ret)
+ goto error_pm_state;
+
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+ ret = -EIO;
+ goto error_pm_state;
+ }
+
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Pre-allocate buffer for xfer ring */
+ if (mhi_chan->pre_alloc) {
+ int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
+ &mhi_chan->tre_ring);
+ size_t len = mhi_cntrl->buffer_len;
+
+ while (nr_el--) {
+ void *buf;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_pre_alloc;
+ }
+
+ /* Prepare transfer descriptors */
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
+ len, MHI_EOT);
+ if (ret) {
+ kfree(buf);
+ goto error_pre_alloc;
+ }
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ read_lock_irq(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ mutex_unlock(&mhi_chan->mutex);
+
+ dev_dbg(dev, "Chan: %d successfully moved to start state\n",
+ mhi_chan->chan);
+
+ return 0;
+
+error_pm_state:
+ if (!mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+error_init_chan:
+ mutex_unlock(&mhi_chan->mutex);
+
+ return ret;
+
+error_pre_alloc:
+ mutex_unlock(&mhi_chan->mutex);
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+
+ return ret;
+}
+
+static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ struct mhi_event_ctxt *er_ctxt,
+ int chan)
+
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long flags;
+
+ dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
+
+ ev_ring = &mhi_event->ring;
+
+ /* mark all stale events related to channel as STALE event */
+ spin_lock_irqsave(&mhi_event->lock, flags);
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ local_rp = ev_ring->rp;
+ while (dev_rp != local_rp) {
+ if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
+ chan == MHI_TRE_GET_EV_CHID(local_rp))
+ local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
+ MHI_PKT_TYPE_STALE_EVENT);
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+
+ dev_dbg(dev, "Finished marking events as stale events\n");
+ spin_unlock_irqrestore(&mhi_event->lock, flags);
+}
+
+static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_result result;
+
+ /* Reset any pending buffers */
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ while (tre_ring->rp != tre_ring->wp) {
+ struct mhi_buf_info *buf_info = buf_ring->rp;
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_dec(&mhi_cntrl->pending_pkts);
+
+ if (!buf_info->pre_mapped)
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+
+ if (mhi_chan->pre_alloc) {
+ kfree(buf_info->cb_buf);
+ } else {
+ result.buf_addr = buf_info->cb_buf;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+ }
+}
+
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int chan = mhi_chan->chan;
+
+ /* Nothing to reset, client doesn't queue buffers */
+ if (mhi_chan->offload_ch)
+ return;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
+
+ mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
+
+ mhi_reset_data_chan(mhi_cntrl, mhi_chan);
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+
+/* Move channel to start state */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ int ret, dir;
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ if (ret)
+ goto error_open_chan;
+ }
+
+ return 0;
+
+error_open_chan:
+ for (--dir; dir >= 0; dir--) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
+
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ int dir;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+ if (!mhi_chan)
+ continue;
+
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
+
+int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
+ struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ int ret;
+
+ spin_lock_bh(&mhi_event->lock);
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
+ spin_unlock_bh(&mhi_event->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_poll);
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
new file mode 100644
index 000000000000..52690cb5c89c
--- /dev/null
+++ b/drivers/bus/mhi/core/pm.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/*
+ * Not all MHI state transitions are synchronous. Transitions like Linkdown,
+ * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
+ * transition to a new state only if we're allowed to.
+ *
+ * Priority increases as we go down. For instance, from any state in L0, the
+ * transition can be made to states in L1, L2 and L3. A notable exception to
+ * this rule is state DISABLE. From DISABLE state we can only transition to
+ * POR state. Also, while in L2 state, user cannot jump back to previous
+ * L1 or L0 states.
+ *
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ * POR <--> POR
+ * POR -> M0 -> M2 --> M0
+ * POR -> FW_DL_ERR
+ * FW_DL_ERR <--> FW_DL_ERR
+ * M0 <--> M0
+ * M0 -> FW_DL_ERR
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+ * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ */
+static struct mhi_pm_transitions const dev_state_transitions[] = {
+ /* L0 States */
+ {
+ MHI_PM_DISABLE,
+ MHI_PM_POR
+ },
+ {
+ MHI_PM_POR,
+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M0,
+ MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M2,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_ENTER,
+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3,
+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_EXIT,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_FW_DL_ERR,
+ MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L1 States */
+ {
+ MHI_PM_SYS_ERR_DETECT,
+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+ {
+ MHI_PM_SHUTDOWN_PROCESS,
+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L3 States */
+ {
+ MHI_PM_LD_ERR_FATAL_DETECT,
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+ },
+};
+
+enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state)
+{
+ unsigned long cur_state = mhi_cntrl->pm_state;
+ int index = find_last_bit(&cur_state, 32);
+
+ if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
+ return cur_state;
+
+ if (unlikely(dev_state_transitions[index].from_state != cur_state))
+ return cur_state;
+
+ if (unlikely(!(dev_state_transitions[index].to_states & state)))
+ return cur_state;
+
+ mhi_cntrl->pm_state = state;
+ return mhi_cntrl->pm_state;
+}
+
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
+{
+ if (state == MHI_STATE_RESET) {
+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
+ } else {
+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_MHISTATE_MASK,
+ MHICTRL_MHISTATE_SHIFT, state);
+ }
+}
+
+/* NOP for backward compatibility, host allowed to ring DB in M2 state */
+static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+}
+
+/* Handle device ready state transition */
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
+{
+ void __iomem *base = mhi_cntrl->regs;
+ struct mhi_event *mhi_event;
+ enum mhi_pm_state cur_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 reset = 1, ready = 0;
+ int ret, i;
+
+ /* Wait for RESET to be cleared and READY bit to be set by the device */
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT, &reset) ||
+ mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, &ready) ||
+ (!reset && ready),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ /* Check if device entered error state */
+ if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device link is not accessible\n");
+ return -EIO;
+ }
+
+ /* Timeout if device did not transition to ready state */
+ if (reset || !ready) {
+ dev_err(dev, "Device Ready timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "Device in READY State\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ mhi_cntrl->dev_state = MHI_STATE_READY;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_POR) {
+ dev_err(dev, "Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ return -EIO;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device registers not accessible\n");
+ goto error_mmio;
+ }
+
+ /* Configure MMIO registers */
+ ret = mhi_init_mmio(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Error configuring MMIO registers\n");
+ goto error_mmio;
+ }
+
+ /* Add elements to all SW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if this is an offload or HW event */
+ if (mhi_event->offload_ev || mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+ /* Update all cores */
+ smp_wmb();
+
+ /* Ring the event ring db */
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Set MHI to M0 state */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+error_mmio:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+}
+
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state;
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M0;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_M0)) {
+ dev_err(dev, "Unable to transition to M0 state\n");
+ return -EIO;
+ }
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+
+ /* Ring all event rings and CMD ring only if we're in mission mode */
+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct mhi_cmd *mhi_cmd =
+ &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Only ring primary cmd ring if ring is not empty */
+ spin_lock_irq(&mhi_cmd->lock);
+ if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ spin_unlock_irq(&mhi_cmd->lock);
+ }
+
+ /* Ring channel DB registers */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->db_cfg.reset_req)
+ mhi_chan->db_cfg.db_mode = true;
+
+ /* Only ring DB if ring is not empty */
+ if (tre_ring->base && tre_ring->wp != tre_ring->rp)
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/*
+ * After receiving the MHI state change event from the device indicating the
+ * transition to M1 state, the host can transition the device to M2 state
+ * for keeping it in low power state.
+ */
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
+ if (state == MHI_PM_M2) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
+ mhi_cntrl->dev_state = MHI_STATE_M2;
+
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* If there are any pending resources, exit M2 immediately */
+ if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_cntrl->dev_wake))) {
+ dev_dbg(dev,
+ "Exiting M2, pending_pkts: %d dev_wake: %d\n",
+ atomic_read(&mhi_cntrl->pending_pkts),
+ atomic_read(&mhi_cntrl->dev_wake));
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ } else {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
+ }
+ } else {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ }
+}
+
+/* MHI M3 completion handler */
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M3;
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (state != MHI_PM_M3) {
+ dev_err(dev, "Unable to transition to M3 state\n");
+ return -EIO;
+ }
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/* Handle device Mission Mode transition */
+static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ dev_dbg(dev, "Processing Mission Mode transition\n");
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ return -EIO;
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+
+ /* Force MHI to be in M0 state before continuing */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ ret = -EIO;
+ goto error_mission_mode;
+ }
+
+ /* Add elements to all HW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev || !mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+ /* Update to all cores */
+ smp_wmb();
+
+ spin_lock_irq(&mhi_event->lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl))
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ /*
+ * The MHI devices are only created when the client device switches its
+ * Execution Environment (EE) to either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+error_mission_mode:
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+
+/* Handle SYS_ERR and Shutdown transitions */
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state transition_state)
+{
+ enum mhi_pm_state cur_state, prev_state;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event_ctxt *er_ctxt;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ to_mhi_pm_state_str(transition_state));
+
+ /* We must notify MHI control driver so it can clean up first */
+ if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
+ /*
+ * If controller supports RDDM, we do not process
+ * SYS error state, instead we will jump directly
+ * to RDDM state
+ */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev,
+ "Controller supports RDDM, so skip SYS_ERR\n");
+ return;
+ }
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ prev_state = mhi_cntrl->pm_state;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
+ if (cur_state == transition_state) {
+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* Wake up threads waiting for state transition */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ if (cur_state != transition_state) {
+ dev_err(dev, "Failed to transition to state: %s from: %s\n",
+ to_mhi_pm_state_str(transition_state),
+ to_mhi_pm_state_str(cur_state));
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return;
+ }
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (MHI_REG_ACCESS_VALID(prev_state)) {
+ u32 in_reset = -1;
+ unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+
+ dev_dbg(dev, "Triggering MHI Reset in device\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+ /* Wait for the reset bit to be cleared by the device */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT,
+ &in_reset) ||
+ !in_reset, timeout);
+ if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ dev_err(dev, "Device failed to exit MHI Reset state\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return;
+ }
+
+ /*
+ * Device will clear BHI_INTVEC as a part of RESET processing,
+ * hence re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+ dev_dbg(dev,
+ "Waiting for all pending event ring processing to complete\n");
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+ tasklet_kill(&mhi_event->task);
+ }
+
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ dev_dbg(dev, "Waiting for all pending threads to complete\n");
+ wake_up_all(&mhi_cntrl->state_event);
+ flush_work(&mhi_cntrl->st_worker);
+ flush_work(&mhi_cntrl->fw_worker);
+
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
+
+ /* Reset the ev rings and cmd rings */
+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ cmd_ctxt->rp = cmd_ctxt->rbase;
+ cmd_ctxt->wp = cmd_ctxt->rbase;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ er_ctxt->rp = er_ctxt->rbase;
+ er_ctxt->wp = er_ctxt->rbase;
+ }
+
+ if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ mhi_ready_state_transition(mhi_cntrl);
+ } else {
+ /* Move to disable state */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_DISABLE))
+ dev_err(dev, "Error moving from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(MHI_PM_DISABLE));
+ }
+
+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+/* Queue a new work item and schedule work */
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state)
+{
+ struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+ unsigned long flags;
+
+ if (!item)
+ return -ENOMEM;
+
+ item->state = state;
+ spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+ list_add_tail(&item->node, &mhi_cntrl->transition_list);
+ spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+ schedule_work(&mhi_cntrl->st_worker);
+
+ return 0;
+}
+
+/* SYS_ERR worker */
+void mhi_pm_sys_err_worker(struct work_struct *work)
+{
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ syserr_worker);
+
+ mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+}
+
+/* Device State Transition worker */
+void mhi_pm_st_worker(struct work_struct *work)
+{
+ struct state_transition *itr, *tmp;
+ LIST_HEAD(head);
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ st_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ spin_lock_irq(&mhi_cntrl->transition_lock);
+ list_splice_tail_init(&mhi_cntrl->transition_list, &head);
+ spin_unlock_irq(&mhi_cntrl->transition_lock);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling state transition: %s\n",
+ TO_DEV_STATE_TRANS_STR(itr->state));
+
+ switch (itr->state) {
+ case DEV_ST_TRANSITION_PBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_IN_PBL(mhi_cntrl->ee))
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ case DEV_ST_TRANSITION_SBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_SBL;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /*
+ * The MHI devices are only created when the client
+ * device switches its Execution Environment (EE) to
+ * either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_MISSION_MODE:
+ mhi_pm_mission_mode_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_READY:
+ mhi_ready_state_transition(mhi_cntrl);
+ break;
+ default:
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->pm_state == MHI_PM_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Assert device wake db */
+static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
+{
+ unsigned long flags;
+
+ /*
+ * If force flag is set, then increment the wake count value and
+ * ring wake db
+ */
+ if (unlikely(force)) {
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ atomic_inc(&mhi_cntrl->dev_wake);
+ if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ } else {
+ /*
+ * If resources are already requested, then just increment
+ * the wake count value and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
+ MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ }
+}
+
+/* De-assert device wake db */
+static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
+ bool override)
+{
+ unsigned long flags;
+
+ /*
+ * Only continue if there is a single resource, else just decrement
+ * and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
+ MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
+ mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
+ mhi_cntrl->wake_set = false;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+}
+
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_ee_type current_ee;
+ enum dev_st_transition next_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 val;
+ int ret;
+
+ dev_info(dev, "Requested to power ON\n");
+
+ if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings)
+ return -EINVAL;
+
+ /* Supply default wake routines if not provided by controller driver */
+ if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
+ !mhi_cntrl->wake_toggle) {
+ mhi_cntrl->wake_get = mhi_assert_dev_wake;
+ mhi_cntrl->wake_put = mhi_deassert_dev_wake;
+ mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
+ mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
+
+ if (!mhi_cntrl->pre_init) {
+ /* Setup device context */
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret)
+ goto error_dev_ctxt;
+ }
+
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto error_setup_irq;
+
+ /* Setup BHI offset & INTVEC */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
+ if (ret) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto error_bhi_offset;
+ }
+
+ mhi_cntrl->bhi = mhi_cntrl->regs + val;
+
+ /* Setup BHIE offset */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
+ if (ret) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev, "Error reading BHIE offset\n");
+ goto error_bhi_offset;
+ }
+
+ mhi_cntrl->bhie = mhi_cntrl->regs + val;
+ }
+
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ mhi_cntrl->pm_state = MHI_PM_POR;
+ mhi_cntrl->ee = MHI_EE_MAX;
+ current_ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* Confirm that the device is in valid exec env */
+ if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
+ dev_err(dev, "Not a valid EE for power on\n");
+ ret = -EIO;
+ goto error_bhi_offset;
+ }
+
+ /* Transition to next state */
+ next_state = MHI_IN_PBL(current_ee) ?
+ DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
+
+ if (next_state == DEV_ST_TRANSITION_PBL)
+ schedule_work(&mhi_cntrl->fw_worker);
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ dev_info(dev, "Power on setup success\n");
+
+ return 0;
+
+error_bhi_offset:
+ mhi_deinit_free_irq(mhi_cntrl);
+
+error_setup_irq:
+ if (!mhi_cntrl->pre_init)
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_async_power_up);
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+ enum mhi_pm_state cur_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* If it's not a graceful shutdown, force MHI to linkdown state */
+ if (!graceful) {
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
+ dev_dbg(dev, "Failed to move to state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ }
+ mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+ mhi_deinit_free_irq(mhi_cntrl);
+
+ if (!mhi_cntrl->pre_init) {
+ /* Free all allocated resources */
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_power_down);
+
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
+{
+ int ret = mhi_async_power_up(mhi_cntrl);
+
+ if (ret)
+ return ret;
+
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(mhi_sync_power_up);
+
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Check if device is already in RDDM */
+ if (mhi_cntrl->ee == MHI_EE_RDDM)
+ return 0;
+
+ dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ /* Wait for RDDM event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_RDDM,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ ret = ret ? 0 : -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
+
+void mhi_device_get(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake++;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_get);
+
+int mhi_device_get_sync(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int ret;
+
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (!ret)
+ mhi_dev->dev_wake++;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_device_get_sync);
+
+void mhi_device_put(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake--;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_put);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 440019655fbb..e5f5f48d69d2 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -15,15 +16,47 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include <linux/iopoll.h>
#include <linux/platform_data/ti-sysc.h>
#include <dt-bindings/bus/ti-sysc.h>
+#define DIS_ISP BIT(2)
+#define DIS_IVA BIT(1)
+#define DIS_SGX BIT(0)
+
+#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
+
#define MAX_MODULE_SOFTRESET_WAIT 10000
-static const char * const reg_names[] = { "rev", "sysc", "syss", };
+enum sysc_soc {
+ SOC_UNKNOWN,
+ SOC_2420,
+ SOC_2430,
+ SOC_3430,
+ SOC_3630,
+ SOC_4430,
+ SOC_4460,
+ SOC_4470,
+ SOC_5430,
+ SOC_AM3,
+ SOC_AM4,
+ SOC_DRA7,
+};
+
+struct sysc_address {
+ unsigned long base;
+ struct list_head node;
+};
+
+struct sysc_soc_info {
+ unsigned long general_purpose:1;
+ enum sysc_soc soc;
+ struct mutex list_lock; /* disabled modules list lock */
+ struct list_head disabled_modules;
+};
enum sysc_clocks {
SYSC_FCK,
@@ -39,6 +72,8 @@ enum sysc_clocks {
SYSC_MAX_CLOCKS,
};
+static struct sysc_soc_info *sysc_soc;
+static const char * const reg_names[] = { "rev", "sysc", "syss", };
static const char * const clock_names[SYSC_MAX_CLOCKS] = {
"fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
"opt5", "opt6", "opt7",
@@ -70,11 +105,13 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @child_needs_resume: runtime resume needed for child on resume from suspend
* @disable_on_idle: status flag used for disabling modules with resets
* @idle_work: work structure used to perform delayed idle on a module
- * @clk_enable_quirk: module specific clock enable quirk
- * @clk_disable_quirk: module specific clock disable quirk
+ * @pre_reset_quirk: module specific pre-reset quirk
+ * @post_reset_quirk: module specific post-reset quirk
* @reset_done_quirk: module specific reset done quirk
* @module_enable_quirk: module specific enable quirk
* @module_disable_quirk: module specific disable quirk
+ * @module_unlock_quirk: module specific sysconfig unlock quirk
+ * @module_lock_quirk: module specific sysconfig lock quirk
*/
struct sysc {
struct device *dev;
@@ -97,11 +134,13 @@ struct sysc {
unsigned int needs_resume:1;
unsigned int child_needs_resume:1;
struct delayed_work idle_work;
- void (*clk_enable_quirk)(struct sysc *sysc);
- void (*clk_disable_quirk)(struct sysc *sysc);
+ void (*pre_reset_quirk)(struct sysc *sysc);
+ void (*post_reset_quirk)(struct sysc *sysc);
void (*reset_done_quirk)(struct sysc *sysc);
void (*module_enable_quirk)(struct sysc *sysc);
void (*module_disable_quirk)(struct sysc *sysc);
+ void (*module_unlock_quirk)(struct sysc *sysc);
+ void (*module_lock_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -624,7 +663,7 @@ static void sysc_check_one_child(struct sysc *ddata,
const char *name;
name = of_get_property(np, "ti,hwmods", NULL);
- if (name)
+ if (name && !of_device_is_compatible(np, "ti,sysc"))
dev_warn(ddata->dev, "really a child ti,hwmods property?");
sysc_check_quirk_stdout(ddata, np);
@@ -861,6 +900,22 @@ static void sysc_show_registers(struct sysc *ddata)
buf);
}
+/**
+ * sysc_write_sysconfig - handle sysconfig quirks for register write
+ * @ddata: device driver data
+ * @value: register value
+ */
+static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
+{
+ if (ddata->module_unlock_quirk)
+ ddata->module_unlock_quirk(ddata);
+
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
+
+ if (ddata->module_lock_quirk)
+ ddata->module_lock_quirk(ddata);
+}
+
#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
#define SYSC_CLOCACT_ICK 2
@@ -907,7 +962,7 @@ static int sysc_enable_module(struct device *dev)
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_midle:
/* Set MIDLE mode */
@@ -926,14 +981,14 @@ set_midle:
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_autoidle:
/* Autoidle bit must enabled separately if available */
if (regbits->autoidle_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
reg |= 1 << regbits->autoidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
}
if (ddata->module_enable_quirk)
@@ -991,7 +1046,7 @@ static int sysc_disable_module(struct device *dev)
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
reg |= best_mode << regbits->midle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
set_sidle:
/* Set SIDLE mode */
@@ -1014,7 +1069,7 @@ set_sidle:
if (regbits->autoidle_shift >= 0 &&
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
reg |= 1 << regbits->autoidle_shift;
- sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ sysc_write_sysconfig(ddata, reg);
return 0;
}
@@ -1216,16 +1271,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
0),
/* Some timers on omap4 and later */
- SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff,
0),
- SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff,
0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
@@ -1238,19 +1293,27 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
- SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
SYSC_QUIRK_SWSUP_SIDLE),
/* Quirks that need to be set based on detected module */
- SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
+ SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
SYSC_MODULE_QUIRK_AESS),
- SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
- SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
- SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_NEEDED),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
SYSC_MODULE_QUIRK_HDQ1W),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
@@ -1263,72 +1326,92 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_MODULE_QUIRK_I2C),
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
SYSC_MODULE_QUIRK_I2C),
- SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0),
- SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff,
+ SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
SYSC_MODULE_QUIRK_SGX),
- SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff,
+ SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
+ SYSC_MODULE_QUIRK_RTC_UNLOCK),
+ SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
- SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff,
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
+ /* PRUSS on am3, am4 and am5 */
+ SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
+ SYSC_MODULE_QUIRK_PRUSS),
/* Watchdog on am3 and am4 */
SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
- SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
- SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0),
- SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0),
- SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
+ SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
+ SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
+ SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
0xffff00f0, 0),
- SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
- SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
- SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
- SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
- SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
- SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
- SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0),
+ SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
- SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
- SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0),
- SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0),
- SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0),
- SYSC_QUIRK("mcspi", 0, 0, 0x10, -1, 0x40300a0b, 0xffff00ff, 0),
+ SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
+ SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
- SYSC_QUIRK("mailbox", 0, 0, 0x10, -1, 0x00000400, 0xffffffff, 0),
- SYSC_QUIRK("m3", 0, 0, -1, -1, 0x5f580105, 0x0fff0f00, 0),
+ SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
+ SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
- SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0),
- SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0),
- SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0),
- SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4e8b0100, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4f000100, 0xffffffff, 0),
- SYSC_QUIRK("scm", 0, 0, -1, -1, 0x40000900, 0xffffffff, 0),
- SYSC_QUIRK("scrm", 0, 0, -1, -1, 0x00000010, 0xffffffff, 0),
- SYSC_QUIRK("sdio", 0, 0, 0x10, -1, 0x40202301, 0xffff0ff0, 0),
+ SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
- SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40000902, 0xffffffff, 0),
- SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40002903, 0xffffffff, 0),
- SYSC_QUIRK("spinlock", 0, 0, 0x10, -1, 0x50020000, 0xffffffff, 0),
- SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -1, 0x00000020, 0xffffffff, 0),
- SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0),
- SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
+ SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
+ SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
+ SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0),
- SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0),
+ SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
+ SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
#endif
};
@@ -1350,16 +1433,13 @@ static void sysc_init_early_quirks(struct sysc *ddata)
if (q->base != ddata->module_pa)
continue;
- if (q->rev_offset >= 0 &&
- q->rev_offset != ddata->offsets[SYSC_REVISION])
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
continue;
- if (q->sysc_offset >= 0 &&
- q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
continue;
- if (q->syss_offset >= 0 &&
- q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
continue;
ddata->name = q->name;
@@ -1379,16 +1459,13 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
if (q->base && q->base != ddata->module_pa)
continue;
- if (q->rev_offset >= 0 &&
- q->rev_offset != ddata->offsets[SYSC_REVISION])
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
continue;
- if (q->sysc_offset >= 0 &&
- q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
continue;
- if (q->syss_offset >= 0 &&
- q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
continue;
if (q->revision == ddata->revision ||
@@ -1400,6 +1477,128 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/*
+ * DSS needs dispc outputs disabled to reset modules. Returns mask of
+ * enabled DSS interrupts. Eventually we may be able to do this on
+ * dispc init rather than top-level DSS init.
+ */
+static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ bool disable)
+{
+ bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
+ const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
+ int manager_count;
+ bool framedonetv_irq;
+ u32 val, irq_mask = 0;
+
+ switch (sysc_soc->soc) {
+ case SOC_2420 ... SOC_3630:
+ manager_count = 2;
+ framedonetv_irq = false;
+ break;
+ case SOC_4430 ... SOC_4470:
+ manager_count = 3;
+ break;
+ case SOC_5430:
+ case SOC_DRA7:
+ manager_count = 4;
+ break;
+ case SOC_AM4:
+ manager_count = 1;
+ break;
+ case SOC_UNKNOWN:
+ default:
+ return 0;
+ };
+
+ /* Remap the whole module range to be able to reset dispc outputs */
+ devm_iounmap(ddata->dev, ddata->module_va);
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ ddata->module_size);
+ if (!ddata->module_va)
+ return -EIO;
+
+ /* DISP_CONTROL */
+ val = sysc_read(ddata, dispc_offset + 0x40);
+ lcd_en = val & lcd_en_mask;
+ digit_en = val & digit_en_mask;
+ if (lcd_en)
+ irq_mask |= BIT(0); /* FRAMEDONE */
+ if (digit_en) {
+ if (framedonetv_irq)
+ irq_mask |= BIT(24); /* FRAMEDONETV */
+ else
+ irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
+ }
+ if (disable & (lcd_en | digit_en))
+ sysc_write(ddata, dispc_offset + 0x40,
+ val & ~(lcd_en_mask | digit_en_mask));
+
+ if (manager_count <= 2)
+ return irq_mask;
+
+ /* DISPC_CONTROL2 */
+ val = sysc_read(ddata, dispc_offset + 0x238);
+ lcd2_en = val & lcd_en_mask;
+ if (lcd2_en)
+ irq_mask |= BIT(22); /* FRAMEDONE2 */
+ if (disable && lcd2_en)
+ sysc_write(ddata, dispc_offset + 0x238,
+ val & ~lcd_en_mask);
+
+ if (manager_count <= 3)
+ return irq_mask;
+
+ /* DISPC_CONTROL3 */
+ val = sysc_read(ddata, dispc_offset + 0x848);
+ lcd3_en = val & lcd_en_mask;
+ if (lcd3_en)
+ irq_mask |= BIT(30); /* FRAMEDONE3 */
+ if (disable && lcd3_en)
+ sysc_write(ddata, dispc_offset + 0x848,
+ val & ~lcd_en_mask);
+
+ return irq_mask;
+}
+
+/* DSS needs child outputs disabled and SDI registers cleared for reset */
+static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
+{
+ const int dispc_offset = 0x1000;
+ int error;
+ u32 irq_mask, val;
+
+ /* Get enabled outputs */
+ irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
+ if (!irq_mask)
+ return;
+
+ /* Clear IRQSTATUS */
+ sysc_write(ddata, dispc_offset + 0x18, irq_mask);
+
+ /* Disable outputs */
+ val = sysc_quirk_dispc(ddata, dispc_offset, true);
+
+ /* Poll IRQSTATUS */
+ error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
+ val, val != irq_mask, 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
+ __func__, val, irq_mask);
+
+ if (sysc_soc->soc == SOC_3430) {
+ /* Clear DSS_SDI_CONTROL */
+ sysc_write(ddata, 0x44, 0);
+
+ /* Clear DSS_PLL_CONTROL */
+ sysc_write(ddata, 0x48, 0);
+ }
+
+ /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
+ sysc_write(ddata, 0x40, 0);
+}
+
/* 1-wire needs module's internal clocks enabled for reset */
static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
{
@@ -1419,7 +1618,7 @@ static void sysc_module_enable_quirk_aess(struct sysc *ddata)
sysc_write(ddata, offset, 1);
}
-/* I2C needs extra enable bit toggling for reset */
+/* I2C needs to be disabled for reset */
static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
{
int offset;
@@ -1440,14 +1639,48 @@ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
sysc_write(ddata, offset, val);
}
-static void sysc_clk_enable_quirk_i2c(struct sysc *ddata)
+static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, false);
+}
+
+static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
{
sysc_clk_quirk_i2c(ddata, true);
}
-static void sysc_clk_disable_quirk_i2c(struct sysc *ddata)
+/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
+static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
{
- sysc_clk_quirk_i2c(ddata, false);
+ u32 val, kick0_val = 0, kick1_val = 0;
+ unsigned long flags;
+ int error;
+
+ if (!lock) {
+ kick0_val = 0x83e70b13;
+ kick1_val = 0x95a4f1e0;
+ }
+
+ local_irq_save(flags);
+ /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
+ error = readl_poll_timeout(ddata->module_va + 0x44, val,
+ !(val & BIT(0)), 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "rtc busy timeout\n");
+ /* Now we have ~15 microseconds to read/write various registers */
+ sysc_write(ddata, 0x6c, kick0_val);
+ sysc_write(ddata, 0x70, kick1_val);
+ local_irq_restore(flags);
+}
+
+static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, false);
+}
+
+static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, true);
}
/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
@@ -1483,20 +1716,30 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
dev_warn(ddata->dev, "wdt disable step2 failed\n");
}
+/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
+static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ reg |= SYSC_PRUSS_STANDBY_INIT;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
return;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
- ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
return;
}
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
- ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c;
- ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c;
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
+ ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
return;
}
@@ -1504,6 +1747,16 @@ static void sysc_init_module_quirks(struct sysc *ddata)
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
+ ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
+ ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
+
+ return;
+ }
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
@@ -1511,6 +1764,9 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
}
static int sysc_clockdomain_init(struct sysc *ddata)
@@ -1572,7 +1828,7 @@ static int sysc_reset(struct sysc *ddata)
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
- if (ddata->legacy_mode || sysc_offset < 0 ||
+ if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
return 0;
@@ -1584,19 +1840,21 @@ static int sysc_reset(struct sysc *ddata)
else
syss_done = ddata->cfg.syss_mask;
- if (ddata->clk_disable_quirk)
- ddata->clk_disable_quirk(ddata);
+ if (ddata->pre_reset_quirk)
+ ddata->pre_reset_quirk(ddata);
- sysc_val = sysc_read_sysconfig(ddata);
- sysc_val |= sysc_mask;
- sysc_write(ddata, sysc_offset, sysc_val);
+ if (sysc_offset >= 0) {
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+ }
if (ddata->cfg.srst_udelay)
usleep_range(ddata->cfg.srst_udelay,
ddata->cfg.srst_udelay * 2);
- if (ddata->clk_enable_quirk)
- ddata->clk_enable_quirk(ddata);
+ if (ddata->post_reset_quirk)
+ ddata->post_reset_quirk(ddata);
/* Poll on reset status */
if (syss_offset >= 0) {
@@ -2314,6 +2572,16 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
.mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
};
+/*
+ * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
+ */
+static const struct sysc_capabilities sysc_pruss = {
+ .type = TI_SYSC_PRUSS,
+ .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
+ .regbits = &sysc_regbits_omap4_simple,
+ .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
+};
+
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
@@ -2387,6 +2655,154 @@ static void ti_sysc_idle(struct work_struct *work)
pm_runtime_put_sync(ddata->dev);
}
+/*
+ * SoC model and features detection. Only needed for SoCs that need
+ * special handling for quirks, no need to list others.
+ */
+static const struct soc_device_attribute sysc_soc_match[] = {
+ SOC_FLAG("OMAP242*", SOC_2420),
+ SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("OMAP3[45]*", SOC_3430),
+ SOC_FLAG("OMAP3[67]*", SOC_3630),
+ SOC_FLAG("OMAP443*", SOC_4430),
+ SOC_FLAG("OMAP446*", SOC_4460),
+ SOC_FLAG("OMAP447*", SOC_4470),
+ SOC_FLAG("OMAP54*", SOC_5430),
+ SOC_FLAG("AM433", SOC_AM3),
+ SOC_FLAG("AM43*", SOC_AM4),
+ SOC_FLAG("DRA7*", SOC_DRA7),
+
+ { /* sentinel */ },
+};
+
+/*
+ * List of SoCs variants with disabled features. By default we assume all
+ * devices in the device tree are available so no need to list those SoCs.
+ */
+static const struct soc_device_attribute sysc_soc_feat_match[] = {
+ /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
+ SOC_FLAG("AM3505", DIS_SGX),
+ SOC_FLAG("OMAP3525", DIS_SGX),
+ SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
+
+ /* OMAP3630/DM3730 variants with some accelerators disabled */
+ SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
+ SOC_FLAG("DM3725", DIS_SGX),
+ SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
+ SOC_FLAG("OMAP3621", DIS_ISP),
+
+ { /* sentinel */ },
+};
+
+static int sysc_add_disabled(unsigned long base)
+{
+ struct sysc_address *disabled_module;
+
+ disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
+ if (!disabled_module)
+ return -ENOMEM;
+
+ disabled_module->base = base;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_add(&disabled_module->node, &sysc_soc->disabled_modules);
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return 0;
+}
+
+/*
+ * One time init to detect the booted SoC and disable unavailable features.
+ * Note that we initialize static data shared across all ti-sysc instances
+ * so ddata is only used for SoC type. This can be called from module_init
+ * once we no longer need to rely on platform data.
+ */
+static int sysc_init_soc(struct sysc *ddata)
+{
+ const struct soc_device_attribute *match;
+ struct ti_sysc_platform_data *pdata;
+ unsigned long features = 0;
+
+ if (sysc_soc)
+ return 0;
+
+ sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
+ if (!sysc_soc)
+ return -ENOMEM;
+
+ mutex_init(&sysc_soc->list_lock);
+ INIT_LIST_HEAD(&sysc_soc->disabled_modules);
+ sysc_soc->general_purpose = true;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->soc_type_gp)
+ sysc_soc->general_purpose = pdata->soc_type_gp();
+
+ match = soc_device_match(sysc_soc_match);
+ if (match && match->data)
+ sysc_soc->soc = (int)match->data;
+
+ match = soc_device_match(sysc_soc_feat_match);
+ if (!match)
+ return 0;
+
+ if (match->data)
+ features = (unsigned long)match->data;
+
+ /*
+ * Add disabled devices to the list based on the module base.
+ * Note that this must be done before we attempt to access the
+ * device and have module revision checks working.
+ */
+ if (features & DIS_ISP)
+ sysc_add_disabled(0x480bd400);
+ if (features & DIS_IVA)
+ sysc_add_disabled(0x5d000000);
+ if (features & DIS_SGX)
+ sysc_add_disabled(0x50000000);
+
+ return 0;
+}
+
+static void sysc_cleanup_soc(void)
+{
+ struct sysc_address *disabled_module;
+ struct list_head *pos, *tmp;
+
+ if (!sysc_soc)
+ return;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ list_del(pos);
+ kfree(disabled_module);
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
+static int sysc_check_disabled_devices(struct sysc *ddata)
+{
+ struct sysc_address *disabled_module;
+ struct list_head *pos;
+ int error = 0;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each(pos, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ if (ddata->module_pa == disabled_module->base) {
+ dev_dbg(ddata->dev, "module disabled for this SoC\n");
+ error = -ENODEV;
+ break;
+ }
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return error;
+}
+
static const struct of_device_id sysc_match_table[] = {
{ .compatible = "simple-bus", },
{ /* sentinel */ },
@@ -2405,6 +2821,10 @@ static int sysc_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
+ error = sysc_init_soc(ddata);
+ if (error)
+ return error;
+
error = sysc_init_match(ddata);
if (error)
return error;
@@ -2435,6 +2855,10 @@ static int sysc_probe(struct platform_device *pdev)
sysc_init_early_quirks(ddata);
+ error = sysc_check_disabled_devices(ddata);
+ if (error)
+ return error;
+
error = sysc_get_clocks(ddata);
if (error)
return error;
@@ -2539,6 +2963,7 @@ static const struct of_device_id sysc_match[] = {
{ .compatible = "ti,sysc-usb-host-fs",
.data = &sysc_omap4_usb_host_fs, },
{ .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
+ { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
{ },
};
MODULE_DEVICE_TABLE(of, sysc_match);
@@ -2565,6 +2990,7 @@ static void __exit sysc_exit(void)
{
bus_unregister_notifier(&platform_bus_type, &sysc_nb);
platform_driver_unregister(&sysc_driver);
+ sysc_cleanup_soc();
}
module_exit(sysc_exit);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 26956c006987..d4665fe9ccd2 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -7,28 +7,6 @@ menu "Character devices"
source "drivers/tty/Kconfig"
-config DEVMEM
- bool "/dev/mem virtual device support"
- default y
- help
- Say Y here if you want to support the /dev/mem device.
- The /dev/mem device is used to access areas of physical
- memory.
- When in doubt, say "Y".
-
-config DEVKMEM
- bool "/dev/kmem virtual device support"
- # On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write
- depends on !ARM64
- help
- Say Y here if you want to support the /dev/kmem device. The
- /dev/kmem device is rarely used, but can be used for certain
- kind of kernel debugging operations.
- When in doubt, say "N".
-
-source "drivers/tty/serial/Kconfig"
-source "drivers/tty/serdev/Kconfig"
-
config TTY_PRINTK
tristate "TTY driver to output user messages via printk"
depends on EXPERT && TTY
@@ -113,8 +91,6 @@ config PPDEV
If unsure, say N.
-source "drivers/tty/hvc/Kconfig"
-
config VIRTIO_CONSOLE
tristate "Virtio console"
depends on VIRTIO && TTY
@@ -220,89 +196,6 @@ config NWFLASH
source "drivers/char/hw_random/Kconfig"
-config NVRAM
- tristate "/dev/nvram support"
- depends on X86 || HAVE_ARCH_NVRAM_OPS
- default M68K || PPC
- ---help---
- If you say Y here and create a character special file /dev/nvram
- with major number 10 and minor number 144 using mknod ("man mknod"),
- you get read and write access to the non-volatile memory.
-
- /dev/nvram may be used to view settings in NVRAM or to change them
- (with some utility). It could also be used to frequently
- save a few bits of very important data that may not be lost over
- power-off and for which writing to disk is too insecure. Note
- however that most NVRAM space in a PC belongs to the BIOS and you
- should NEVER idly tamper with it. See Ralf Brown's interrupt list
- for a guide to the use of CMOS bytes by your BIOS.
-
- This memory is conventionally called "NVRAM" on PowerPC machines,
- "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
-
- To compile this driver as a module, choose M here: the
- module will be called nvram.
-
-#
-# These legacy RTC drivers just cause too many conflicts with the generic
-# RTC framework ... let's not even try to coexist any more.
-#
-if RTC_LIB=n
-
-config RTC
- tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
- depends on ALPHA
- ---help---
- If you say Y here and create a character special file /dev/rtc with
- major number 10 and minor number 135 using mknod ("man mknod"), you
- will get access to the real time clock (or hardware clock) built
- into your computer.
-
- Every PC has such a clock built in. It can be used to generate
- signals from as low as 1Hz up to 8192Hz, and can also be used
- as a 24 hour alarm. It reports status information via the file
- /proc/driver/rtc and its behaviour is set by various ioctls on
- /dev/rtc.
-
- If you run Linux on a multiprocessor machine and said Y to
- "Symmetric Multi Processing" above, you should say Y here to read
- and set the RTC in an SMP compatible fashion.
-
- If you think you have a use for such a device (such as periodic data
- sampling), then say Y here, and read <file:Documentation/admin-guide/rtc.rst>
- for details.
-
- To compile this driver as a module, choose M here: the
- module will be called rtc.
-
-config JS_RTC
- tristate "Enhanced Real Time Clock Support"
- depends on SPARC32 && PCI
- ---help---
- If you say Y here and create a character special file /dev/rtc with
- major number 10 and minor number 135 using mknod ("man mknod"), you
- will get access to the real time clock (or hardware clock) built
- into your computer.
-
- Every PC has such a clock built in. It can be used to generate
- signals from as low as 1Hz up to 8192Hz, and can also be used
- as a 24 hour alarm. It reports status information via the file
- /proc/driver/rtc and its behaviour is set by various ioctls on
- /dev/rtc.
-
- If you think you have a use for such a device (such as periodic data
- sampling), then say Y here, and read <file:Documentation/admin-guide/rtc.rst>
- for details.
-
- To compile this driver as a module, choose M here: the
- module will be called js-rtc.
-
-config EFI_RTC
- bool "EFI Real Time Clock Services"
- depends on IA64
-
-endif # RTC_LIB
-
config DTLK
tristate "Double Talk PC internal speech card support"
depends on ISA
@@ -431,6 +324,48 @@ config NSC_GPIO
pc8736x_gpio drivers. If those drivers are built as
modules, this one will be too, named nsc_gpio
+config DEVMEM
+ bool "/dev/mem virtual device support"
+ default y
+ help
+ Say Y here if you want to support the /dev/mem device.
+ The /dev/mem device is used to access areas of physical
+ memory.
+ When in doubt, say "Y".
+
+config DEVKMEM
+ bool "/dev/kmem virtual device support"
+ # On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write
+ depends on !ARM64
+ help
+ Say Y here if you want to support the /dev/kmem device. The
+ /dev/kmem device is rarely used, but can be used for certain
+ kind of kernel debugging operations.
+ When in doubt, say "N".
+
+config NVRAM
+ tristate "/dev/nvram support"
+ depends on X86 || HAVE_ARCH_NVRAM_OPS
+ default M68K || PPC
+ ---help---
+ If you say Y here and create a character special file /dev/nvram
+ with major number 10 and minor number 144 using mknod ("man mknod"),
+ you get read and write access to the non-volatile memory.
+
+ /dev/nvram may be used to view settings in NVRAM or to change them
+ (with some utility). It could also be used to frequently
+ save a few bits of very important data that may not be lost over
+ power-off and for which writing to disk is too insecure. Note
+ however that most NVRAM space in a PC belongs to the BIOS and you
+ should NEVER idly tamper with it. See Ralf Brown's interrupt list
+ for a guide to the use of CMOS bytes by your BIOS.
+
+ This memory is conventionally called "NVRAM" on PowerPC machines,
+ "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvram.
+
config RAW_DRIVER
tristate "RAW driver (/dev/raw/rawN)"
depends on BLOCK
@@ -452,6 +387,14 @@ config MAX_RAW_DEVS
Default is 256. Increase this number in case you need lots of
raw devices.
+config DEVPORT
+ bool "/dev/port character device"
+ depends on ISA || PCI
+ default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
+
config HPET
bool "HPET - High Precision Event Timer" if (X86 || IA64)
default n
@@ -511,14 +454,6 @@ config TELCLOCK
/sys/devices/platform/telco_clock, with a number of files for
controlling the behavior of this hardware.
-config DEVPORT
- bool "/dev/port character device"
- depends on ISA || PCI
- default y
- help
- Say Y here if you want to support the /dev/port device. The /dev/port
- device is similar to /dev/mem, but for I/O ports.
-
source "drivers/s390/char/Kconfig"
source "drivers/char/xillybus/Kconfig"
@@ -539,7 +474,7 @@ endmenu
config RANDOM_TRUST_CPU
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
- depends on X86 || S390 || PPC
+ depends on ARCH_RANDOM
default n
help
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7c5ea6f9df14..ffce287ef415 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -20,9 +20,7 @@ obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
obj-$(CONFIG_DTLK) += dtlk.o
obj-$(CONFIG_APPLICOM) += applicom.o
obj-$(CONFIG_SONYPI) += sonypi.o
-obj-$(CONFIG_RTC) += rtc.o
obj-$(CONFIG_HPET) += hpet.o
-obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
obj-$(CONFIG_NVRAM) += nvram.o
obj-$(CONFIG_TOSHIBA) += toshiba.o
@@ -46,9 +44,6 @@ obj-$(CONFIG_TCG_TPM) += tpm/
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
-obj-$(CONFIG_JS_RTC) += js-rtc.o
-js-rtc-y = rtc.o
-
obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
obj-$(CONFIG_ADI) += adi.o
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 51121a4b82c7..14b2d8034c51 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -53,7 +53,6 @@
#define MAX_BOARD 8 /* maximum of pc board possible */
#define MAX_ISA_BOARD 4
#define LEN_RAM_IO 0x800
-#define AC_MINOR 157
#ifndef PCI_VENDOR_ID_APPLICOM
#define PCI_VENDOR_ID_APPLICOM 0x1389
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
deleted file mode 100644
index 4f73064d0c6f..000000000000
--- a/drivers/char/efirtc.c
+++ /dev/null
@@ -1,366 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * EFI Time Services Driver for Linux
- *
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
- *
- * Based on skeleton from the drivers/char/rtc.c driver by P. Gortmaker
- *
- * This code provides an architected & portable interface to the real time
- * clock by using EFI instead of direct bit fiddling. The functionalities are
- * quite different from the rtc.c driver. The only way to talk to the device
- * is by using ioctl(). There is a /proc interface which provides the raw
- * information.
- *
- * Please note that we have kept the API as close as possible to the
- * legacy RTC. The standard /sbin/hwclock program should work normally
- * when used to get/set the time.
- *
- * NOTES:
- * - Locking is required for safe execution of EFI calls with regards
- * to interrupts and SMP.
- *
- * TODO (December 1999):
- * - provide the API to set/get the WakeUp Alarm (different from the
- * rtc.c alarm).
- * - SMP testing
- * - Add module support
- */
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/init.h>
-#include <linux/rtc.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/efi.h>
-#include <linux/uaccess.h>
-
-
-#define EFI_RTC_VERSION "0.4"
-
-#define EFI_ISDST (EFI_TIME_ADJUST_DAYLIGHT|EFI_TIME_IN_DAYLIGHT)
-/*
- * EFI Epoch is 1/1/1998
- */
-#define EFI_RTC_EPOCH 1998
-
-static DEFINE_SPINLOCK(efi_rtc_lock);
-
-static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
-
-#define is_leap(year) \
- ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
-
-static const unsigned short int __mon_yday[2][13] =
-{
- /* Normal years. */
- { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
- /* Leap years. */
- { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
-};
-
-/*
- * returns day of the year [0-365]
- */
-static inline int
-compute_yday(efi_time_t *eft)
-{
- /* efi_time_t.month is in the [1-12] so, we need -1 */
- return __mon_yday[is_leap(eft->year)][eft->month-1]+ eft->day -1;
-}
-/*
- * returns day of the week [0-6] 0=Sunday
- *
- * Don't try to provide a year that's before 1998, please !
- */
-static int
-compute_wday(efi_time_t *eft)
-{
- int y;
- int ndays = 0;
-
- if ( eft->year < 1998 ) {
- printk(KERN_ERR "efirtc: EFI year < 1998, invalid date\n");
- return -1;
- }
-
- for(y=EFI_RTC_EPOCH; y < eft->year; y++ ) {
- ndays += 365 + (is_leap(y) ? 1 : 0);
- }
- ndays += compute_yday(eft);
-
- /*
- * 4=1/1/1998 was a Thursday
- */
- return (ndays + 4) % 7;
-}
-
-static void
-convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
-{
-
- eft->year = wtime->tm_year + 1900;
- eft->month = wtime->tm_mon + 1;
- eft->day = wtime->tm_mday;
- eft->hour = wtime->tm_hour;
- eft->minute = wtime->tm_min;
- eft->second = wtime->tm_sec;
- eft->nanosecond = 0;
- eft->daylight = wtime->tm_isdst ? EFI_ISDST: 0;
- eft->timezone = EFI_UNSPECIFIED_TIMEZONE;
-}
-
-static void
-convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
-{
- memset(wtime, 0, sizeof(*wtime));
- wtime->tm_sec = eft->second;
- wtime->tm_min = eft->minute;
- wtime->tm_hour = eft->hour;
- wtime->tm_mday = eft->day;
- wtime->tm_mon = eft->month - 1;
- wtime->tm_year = eft->year - 1900;
-
- /* day of the week [0-6], Sunday=0 */
- wtime->tm_wday = compute_wday(eft);
-
- /* day in the year [1-365]*/
- wtime->tm_yday = compute_yday(eft);
-
-
- switch (eft->daylight & EFI_ISDST) {
- case EFI_ISDST:
- wtime->tm_isdst = 1;
- break;
- case EFI_TIME_ADJUST_DAYLIGHT:
- wtime->tm_isdst = 0;
- break;
- default:
- wtime->tm_isdst = -1;
- }
-}
-
-static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
-
- efi_status_t status;
- unsigned long flags;
- efi_time_t eft;
- efi_time_cap_t cap;
- struct rtc_time wtime;
- struct rtc_wkalrm __user *ewp;
- unsigned char enabled, pending;
-
- switch (cmd) {
- case RTC_UIE_ON:
- case RTC_UIE_OFF:
- case RTC_PIE_ON:
- case RTC_PIE_OFF:
- case RTC_AIE_ON:
- case RTC_AIE_OFF:
- case RTC_ALM_SET:
- case RTC_ALM_READ:
- case RTC_IRQP_READ:
- case RTC_IRQP_SET:
- case RTC_EPOCH_READ:
- case RTC_EPOCH_SET:
- return -EINVAL;
-
- case RTC_RD_TIME:
- spin_lock_irqsave(&efi_rtc_lock, flags);
-
- status = efi.get_time(&eft, &cap);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- if (status != EFI_SUCCESS) {
- /* should never happen */
- printk(KERN_ERR "efitime: can't read time\n");
- return -EINVAL;
- }
-
- convert_from_efi_time(&eft, &wtime);
-
- return copy_to_user((void __user *)arg, &wtime,
- sizeof (struct rtc_time)) ? - EFAULT : 0;
-
- case RTC_SET_TIME:
-
- if (!capable(CAP_SYS_TIME)) return -EACCES;
-
- if (copy_from_user(&wtime, (struct rtc_time __user *)arg,
- sizeof(struct rtc_time)) )
- return -EFAULT;
-
- convert_to_efi_time(&wtime, &eft);
-
- spin_lock_irqsave(&efi_rtc_lock, flags);
-
- status = efi.set_time(&eft);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- return status == EFI_SUCCESS ? 0 : -EINVAL;
-
- case RTC_WKALM_SET:
-
- if (!capable(CAP_SYS_TIME)) return -EACCES;
-
- ewp = (struct rtc_wkalrm __user *)arg;
-
- if ( get_user(enabled, &ewp->enabled)
- || copy_from_user(&wtime, &ewp->time, sizeof(struct rtc_time)) )
- return -EFAULT;
-
- convert_to_efi_time(&wtime, &eft);
-
- spin_lock_irqsave(&efi_rtc_lock, flags);
- /*
- * XXX Fixme:
- * As of EFI 0.92 with the firmware I have on my
- * machine this call does not seem to work quite
- * right
- */
- status = efi.set_wakeup_time((efi_bool_t)enabled, &eft);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- return status == EFI_SUCCESS ? 0 : -EINVAL;
-
- case RTC_WKALM_RD:
-
- spin_lock_irqsave(&efi_rtc_lock, flags);
-
- status = efi.get_wakeup_time((efi_bool_t *)&enabled, (efi_bool_t *)&pending, &eft);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- if (status != EFI_SUCCESS) return -EINVAL;
-
- ewp = (struct rtc_wkalrm __user *)arg;
-
- if ( put_user(enabled, &ewp->enabled)
- || put_user(pending, &ewp->pending)) return -EFAULT;
-
- convert_from_efi_time(&eft, &wtime);
-
- return copy_to_user(&ewp->time, &wtime,
- sizeof(struct rtc_time)) ? -EFAULT : 0;
- }
- return -ENOTTY;
-}
-
-/*
- * The various file operations we support.
- */
-
-static const struct file_operations efi_rtc_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = efi_rtc_ioctl,
- .llseek = no_llseek,
-};
-
-static struct miscdevice efi_rtc_dev= {
- EFI_RTC_MINOR,
- "efirtc",
- &efi_rtc_fops
-};
-
-/*
- * We export RAW EFI information to /proc/driver/efirtc
- */
-static int efi_rtc_proc_show(struct seq_file *m, void *v)
-{
- efi_time_t eft, alm;
- efi_time_cap_t cap;
- efi_bool_t enabled, pending;
- unsigned long flags;
-
- memset(&eft, 0, sizeof(eft));
- memset(&alm, 0, sizeof(alm));
- memset(&cap, 0, sizeof(cap));
-
- spin_lock_irqsave(&efi_rtc_lock, flags);
-
- efi.get_time(&eft, &cap);
- efi.get_wakeup_time(&enabled, &pending, &alm);
-
- spin_unlock_irqrestore(&efi_rtc_lock,flags);
-
- seq_printf(m,
- "Time : %u:%u:%u.%09u\n"
- "Date : %u-%u-%u\n"
- "Daylight : %u\n",
- eft.hour, eft.minute, eft.second, eft.nanosecond,
- eft.year, eft.month, eft.day,
- eft.daylight);
-
- if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- seq_puts(m, "Timezone : unspecified\n");
- else
- /* XXX fixme: convert to string? */
- seq_printf(m, "Timezone : %u\n", eft.timezone);
-
-
- seq_printf(m,
- "Alarm Time : %u:%u:%u.%09u\n"
- "Alarm Date : %u-%u-%u\n"
- "Alarm Daylight : %u\n"
- "Enabled : %s\n"
- "Pending : %s\n",
- alm.hour, alm.minute, alm.second, alm.nanosecond,
- alm.year, alm.month, alm.day,
- alm.daylight,
- enabled == 1 ? "yes" : "no",
- pending == 1 ? "yes" : "no");
-
- if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- seq_puts(m, "Timezone : unspecified\n");
- else
- /* XXX fixme: convert to string? */
- seq_printf(m, "Timezone : %u\n", alm.timezone);
-
- /*
- * now prints the capabilities
- */
- seq_printf(m,
- "Resolution : %u\n"
- "Accuracy : %u\n"
- "SetstoZero : %u\n",
- cap.resolution, cap.accuracy, cap.sets_to_zero);
-
- return 0;
-}
-static int __init
-efi_rtc_init(void)
-{
- int ret;
- struct proc_dir_entry *dir;
-
- printk(KERN_INFO "EFI Time Services Driver v%s\n", EFI_RTC_VERSION);
-
- ret = misc_register(&efi_rtc_dev);
- if (ret) {
- printk(KERN_ERR "efirtc: can't misc_register on minor=%d\n",
- EFI_RTC_MINOR);
- return ret;
- }
-
- dir = proc_create_single("driver/efirtc", 0, NULL, efi_rtc_proc_show);
- if (dir == NULL) {
- printk(KERN_ERR "efirtc: can't create /proc/driver/efirtc.\n");
- misc_deregister(&efi_rtc_dev);
- return -1;
- }
- return 0;
-}
-device_initcall(efi_rtc_init);
-
-/*
-MODULE_LICENSE("GPL");
-*/
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 914e293ba62b..9bc46da8d77a 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -244,7 +244,8 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_IMX_RNGC
tristate "Freescale i.MX RNGC Random Number Generator"
- depends on ARCH_MXC
+ depends on HAS_IOMEM && HAVE_CLK
+ depends on SOC_IMX25 || COMPILE_TEST
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -466,6 +467,13 @@ config HW_RANDOM_NPCM
If unsure, say Y.
+config HW_RANDOM_KEYSTONE
+ depends on ARCH_KEYSTONE || COMPILE_TEST
+ default HW_RANDOM
+ tristate "TI Keystone NETCP SA Hardware random number generator"
+ help
+ This option enables Keystone's hardware random generator.
+
endif # HW_RANDOM
config UML_RANDOM
@@ -482,10 +490,3 @@ config UML_RANDOM
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random.
-
-config HW_RANDOM_KEYSTONE
- depends on ARCH_KEYSTONE || COMPILE_TEST
- default HW_RANDOM
- tristate "TI Keystone NETCP SA Hardware random number generator"
- help
- This option enables Keystone's hardware random generator.
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 30cf00f8e9a0..9c47e431ce90 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -18,12 +18,22 @@
#include <linux/completion.h>
#include <linux/io.h>
+#define RNGC_VER_ID 0x0000
#define RNGC_COMMAND 0x0004
#define RNGC_CONTROL 0x0008
#define RNGC_STATUS 0x000C
#define RNGC_ERROR 0x0010
#define RNGC_FIFO 0x0014
+/* the fields in the ver id register */
+#define RNGC_TYPE_SHIFT 28
+#define RNGC_VER_MAJ_SHIFT 8
+
+/* the rng_type field */
+#define RNGC_TYPE_RNGB 0x1
+#define RNGC_TYPE_RNGC 0x2
+
+
#define RNGC_CMD_CLR_ERR 0x00000020
#define RNGC_CMD_CLR_INT 0x00000010
#define RNGC_CMD_SEED 0x00000002
@@ -31,6 +41,7 @@
#define RNGC_CTRL_MASK_ERROR 0x00000040
#define RNGC_CTRL_MASK_DONE 0x00000020
+#define RNGC_CTRL_AUTO_SEED 0x00000010
#define RNGC_STATUS_ERROR 0x00010000
#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
@@ -100,15 +111,11 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
- if (!ret) {
- imx_rngc_irq_mask_clear(rngc);
+ imx_rngc_irq_mask_clear(rngc);
+ if (!ret)
return -ETIMEDOUT;
- }
- if (rngc->err_reg != 0)
- return -EIO;
-
- return 0;
+ return rngc->err_reg ? -EIO : 0;
}
static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
@@ -165,17 +172,17 @@ static irqreturn_t imx_rngc_irq(int irq, void *priv)
static int imx_rngc_init(struct hwrng *rng)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
- u32 cmd;
+ u32 cmd, ctrl;
int ret;
/* clear error */
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
+ imx_rngc_irq_unmask(rngc);
+
/* create seed, repeat while there is some statistical error */
do {
- imx_rngc_irq_unmask(rngc);
-
/* seed creation */
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
@@ -184,13 +191,42 @@ static int imx_rngc_init(struct hwrng *rng)
RNGC_TIMEOUT);
if (!ret) {
- imx_rngc_irq_mask_clear(rngc);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto err;
}
} while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
- return rngc->err_reg ? -EIO : 0;
+ if (rngc->err_reg) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /*
+ * enable automatic seeding, the rngc creates a new seed automatically
+ * after serving 2^20 random 160-bit words
+ */
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl |= RNGC_CTRL_AUTO_SEED;
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+
+ /*
+ * if initialisation was successful, we keep the interrupt
+ * unmasked until imx_rngc_cleanup is called
+ * we mask the interrupt ourselves if we return an error
+ */
+ return 0;
+
+err:
+ imx_rngc_irq_mask_clear(rngc);
+ return ret;
+}
+
+static void imx_rngc_cleanup(struct hwrng *rng)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+
+ imx_rngc_irq_mask_clear(rngc);
}
static int imx_rngc_probe(struct platform_device *pdev)
@@ -198,6 +234,8 @@ static int imx_rngc_probe(struct platform_device *pdev)
struct imx_rngc *rngc;
int ret;
int irq;
+ u32 ver_id;
+ u8 rng_type;
rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
if (!rngc)
@@ -223,6 +261,17 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ver_id = readl(rngc->base + RNGC_VER_ID);
+ rng_type = ver_id >> RNGC_TYPE_SHIFT;
+ /*
+ * This driver supports only RNGC and RNGB. (There's a different
+ * driver for RNGA.)
+ */
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
+ ret = -ENODEV;
+ goto err;
+ }
+
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
if (ret) {
@@ -235,6 +284,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
rngc->rng.name = pdev->name;
rngc->rng.init = imx_rngc_init;
rngc->rng.read = imx_rngc_read;
+ rngc->rng.cleanup = imx_rngc_cleanup;
rngc->dev = &pdev->dev;
platform_set_drvdata(pdev, rngc);
@@ -244,18 +294,21 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
- dev_err(rngc->dev, "FSL RNGC self test failed.\n");
+ dev_err(rngc->dev, "self test failed\n");
goto err;
}
}
ret = hwrng_register(&rngc->rng);
if (ret) {
- dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret);
+ dev_err(&pdev->dev, "hwrng registration failed\n");
goto err;
}
- dev_info(&pdev->dev, "Freescale RNGC registered.\n");
+ dev_info(&pdev->dev,
+ "Freescale RNG%c registered (HW revision %d.%02d)\n",
+ rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
+ (ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
return 0;
err:
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index e08a8887e718..a431c5cbe2be 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index a9d9f074fbd6..7d583222e8fa 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -75,7 +75,7 @@ struct vma_data {
enum mspec_page_type type; /* Type of pages allocated. */
unsigned long vm_start; /* Original (unsplit) base. */
unsigned long vm_end; /* Original (unsplit) end. */
- unsigned long maddr[0]; /* Array of MSPEC addresses. */
+ unsigned long maddr[]; /* Array of MSPEC addresses. */
};
/*
diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h
index 9dedfd7adc0e..f2b9fdc1f9ea 100644
--- a/drivers/char/nwbutton.h
+++ b/drivers/char/nwbutton.h
@@ -14,7 +14,6 @@
#define NUM_PRESSES_REBOOT 2 /* How many presses to activate shutdown */
#define BUTTON_DELAY 30 /* How many jiffies for sequence to end */
#define VERSION "0.3" /* Driver version number */
-#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */
/* Structure definitions: */
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index a4a0797daa19..0973c2c2b01a 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -576,7 +576,7 @@ static const struct file_operations flash_fops =
static struct miscdevice flash_miscdev =
{
- FLASH_MINOR,
+ NWFLASH_MINOR,
"nwflash",
&flash_fops
};
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 15bf585af5d3..4edb4174a1e2 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -731,8 +731,9 @@ static void monitor_card(struct timer_list *t)
}
switch (dev->mstate) {
+ case M_CARDOFF: {
unsigned char flags0;
- case M_CARDOFF:
+
DEBUGP(4, dev, "M_CARDOFF\n");
flags0 = inb(REG_FLAGS0(iobase));
if (flags0 & 0x02) {
@@ -755,6 +756,7 @@ static void monitor_card(struct timer_list *t)
dev->mdelay = T_50MSEC;
}
break;
+ }
case M_FETCH_ATR:
DEBUGP(4, dev, "M_FETCH_ATR\n");
xoutb(0x80, REG_FLAGS0(iobase));
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 2c2381a806ae..38b46c7d1737 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -355,14 +355,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct pp_struct *pp = file->private_data;
struct parport *port;
void __user *argp = (void __user *)arg;
+ struct ieee1284_info *info;
+ unsigned char reg;
+ unsigned char mask;
+ int mode;
+ s32 time32[2];
+ s64 time64[2];
+ struct timespec64 ts;
+ int ret;
/* First handle the cases that don't take arguments. */
switch (cmd) {
case PPCLAIM:
{
- struct ieee1284_info *info;
- int ret;
-
if (pp->flags & PP_CLAIMED) {
dev_dbg(&pp->pdev->dev, "you've already got it!\n");
return -EINVAL;
@@ -513,15 +518,6 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
port = pp->pdev->port;
switch (cmd) {
- struct ieee1284_info *info;
- unsigned char reg;
- unsigned char mask;
- int mode;
- s32 time32[2];
- s64 time64[2];
- struct timespec64 ts;
- int ret;
-
case PPRSTATUS:
reg = parport_read_status(port);
if (copy_to_user(argp, &reg, sizeof(reg)))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c7f9584de2c8..0d10e31fd342 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -781,27 +781,55 @@ static int __init parse_trust_cpu(char *arg)
}
early_param("random.trust_cpu", parse_trust_cpu);
-static void crng_initialize(struct crng_state *crng)
+static bool crng_init_try_arch(struct crng_state *crng)
{
int i;
- int arch_init = 1;
+ bool arch_init = true;
unsigned long rv;
- memcpy(&crng->state[0], "expand 32-byte k", 16);
- if (crng == &primary_crng)
- _extract_entropy(&input_pool, &crng->state[4],
- sizeof(__u32) * 12, 0);
- else
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv)) {
rv = random_get_entropy();
- arch_init = 0;
+ arch_init = false;
+ }
+ crng->state[i] ^= rv;
+ }
+
+ return arch_init;
+}
+
+static bool __init crng_init_try_arch_early(struct crng_state *crng)
+{
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
+
+ for (i = 4; i < 16; i++) {
+ if (!arch_get_random_seed_long_early(&rv) &&
+ !arch_get_random_long_early(&rv)) {
+ rv = random_get_entropy();
+ arch_init = false;
}
crng->state[i] ^= rv;
}
- if (trust_cpu && arch_init && crng == &primary_crng) {
+
+ return arch_init;
+}
+
+static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+{
+ memcpy(&crng->state[0], "expand 32-byte k", 16);
+ _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ crng_init_try_arch(crng);
+ crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+}
+
+static void __init crng_initialize_primary(struct crng_state *crng)
+{
+ memcpy(&crng->state[0], "expand 32-byte k", 16);
+ _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
+ if (crng_init_try_arch_early(crng) && trust_cpu) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
@@ -822,7 +850,7 @@ static void do_numa_crng_init(struct work_struct *work)
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
spin_lock_init(&crng->lock);
- crng_initialize(crng);
+ crng_initialize_secondary(crng);
pool[i] = crng;
}
mb();
@@ -1142,14 +1170,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- delta = sample.jiffies - state->last_time;
- state->last_time = sample.jiffies;
+ delta = sample.jiffies - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, sample.jiffies);
- delta2 = delta - state->last_delta;
- state->last_delta = delta;
+ delta2 = delta - READ_ONCE(state->last_delta);
+ WRITE_ONCE(state->last_delta, delta);
- delta3 = delta2 - state->last_delta2;
- state->last_delta2 = delta2;
+ delta3 = delta2 - READ_ONCE(state->last_delta2);
+ WRITE_ONCE(state->last_delta2, delta2);
if (delta < 0)
delta = -delta;
@@ -1771,7 +1799,7 @@ static void __init init_std_data(struct entropy_store *r)
int __init rand_initialize(void)
{
init_std_data(&input_pool);
- crng_initialize(&primary_crng);
+ crng_initialize_primary(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
urandom_warning.interval = 0;
@@ -2149,11 +2177,11 @@ struct batched_entropy {
/*
* Get a random word for internal kernel use only. The quality of the random
- * number is either as good as RDRAND or as good as /dev/urandom, with the
- * goal of being quite fast and not depleting entropy. In order to ensure
+ * number is good as /dev/urandom, but there is no backtrack protection, with
+ * the goal of being quite fast and not depleting entropy. In order to ensure
* that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once
- * at any point prior.
+ * wait_for_random_bytes() should be called and return 0 at least once at any
+ * point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
@@ -2166,15 +2194,6 @@ u64 get_random_u64(void)
struct batched_entropy *batch;
static void *previous;
-#if BITS_PER_LONG == 64
- if (arch_get_random_long((unsigned long *)&ret))
- return ret;
-#else
- if (arch_get_random_long((unsigned long *)&ret) &&
- arch_get_random_long((unsigned long *)&ret + 1))
- return ret;
-#endif
-
warn_unseeded_randomness(&previous);
batch = raw_cpu_ptr(&batched_entropy_u64);
@@ -2199,9 +2218,6 @@ u32 get_random_u32(void)
struct batched_entropy *batch;
static void *previous;
- if (arch_get_random_int(&ret))
- return ret;
-
warn_unseeded_randomness(&previous);
batch = raw_cpu_ptr(&batched_entropy_u32);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
deleted file mode 100644
index 3b91184b77ae..000000000000
--- a/drivers/char/rtc.c
+++ /dev/null
@@ -1,1311 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Real Time Clock interface for Linux
- *
- * Copyright (C) 1996 Paul Gortmaker
- *
- * This driver allows use of the real time clock (built into
- * nearly all computers) from user space. It exports the /dev/rtc
- * interface supporting various ioctl() and also the
- * /proc/driver/rtc pseudo-file for status information.
- *
- * The ioctls can be used to set the interrupt behaviour and
- * generation rate from the RTC via IRQ 8. Then the /dev/rtc
- * interface can be used to make use of these timer interrupts,
- * be they interval or alarm based.
- *
- * The /dev/rtc interface will block on reads until an interrupt
- * has been received. If a RTC interrupt has already happened,
- * it will output an unsigned long and then block. The output value
- * contains the interrupt status in the low byte and the number of
- * interrupts since the last read in the remaining high bytes. The
- * /dev/rtc interface can also be used with the select(2) call.
- *
- * Based on other minimal char device drivers, like Alan's
- * watchdog, Ted's random, etc. etc.
- *
- * 1.07 Paul Gortmaker.
- * 1.08 Miquel van Smoorenburg: disallow certain things on the
- * DEC Alpha as the CMOS clock is also used for other things.
- * 1.09 Nikita Schmidt: epoch support and some Alpha cleanup.
- * 1.09a Pete Zaitcev: Sun SPARC
- * 1.09b Jeff Garzik: Modularize, init cleanup
- * 1.09c Jeff Garzik: SMP cleanup
- * 1.10 Paul Barton-Davis: add support for async I/O
- * 1.10a Andrea Arcangeli: Alpha updates
- * 1.10b Andrew Morton: SMP lock fix
- * 1.10c Cesar Barros: SMP locking fixes and cleanup
- * 1.10d Paul Gortmaker: delete paranoia check in rtc_exit
- * 1.10e Maciej W. Rozycki: Handle DECstation's year weirdness.
- * 1.11 Takashi Iwai: Kernel access functions
- * rtc_register/rtc_unregister/rtc_control
- * 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init
- * 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer
- * CONFIG_HPET_EMULATE_RTC
- * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly.
- * 1.12ac Alan Cox: Allow read access to the day of week register
- * 1.12b David John: Remove calls to the BKL.
- */
-
-#define RTC_VERSION "1.12b"
-
-/*
- * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with
- * interrupts disabled. Due to the index-port/data-port (0x70/0x71)
- * design of the RTC, we don't want two different things trying to
- * get to it at once. (e.g. the periodic 11 min sync from
- * kernel/time/ntp.c vs. this driver.)
- */
-
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/ioport.h>
-#include <linux/fcntl.h>
-#include <linux/mc146818rtc.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/spinlock.h>
-#include <linux/sched/signal.h>
-#include <linux/sysctl.h>
-#include <linux/wait.h>
-#include <linux/bcd.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-#include <linux/ratelimit.h>
-
-#include <asm/current.h>
-
-#ifdef CONFIG_X86
-#include <asm/hpet.h>
-#endif
-
-#ifdef CONFIG_SPARC32
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <asm/io.h>
-
-static unsigned long rtc_port;
-static int rtc_irq;
-#endif
-
-#ifdef CONFIG_HPET_EMULATE_RTC
-#undef RTC_IRQ
-#endif
-
-#ifdef RTC_IRQ
-static int rtc_has_irq = 1;
-#endif
-
-#ifndef CONFIG_HPET_EMULATE_RTC
-#define is_hpet_enabled() 0
-#define hpet_set_alarm_time(hrs, min, sec) 0
-#define hpet_set_periodic_freq(arg) 0
-#define hpet_mask_rtc_irq_bit(arg) 0
-#define hpet_set_rtc_irq_bit(arg) 0
-#define hpet_rtc_timer_init() do { } while (0)
-#define hpet_rtc_dropped_irq() 0
-#define hpet_register_irq_handler(h) ({ 0; })
-#define hpet_unregister_irq_handler(h) ({ 0; })
-#ifdef RTC_IRQ
-static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
-{
- return 0;
-}
-#endif
-#endif
-
-/*
- * We sponge a minor off of the misc major. No need slurping
- * up another valuable major dev number for this. If you add
- * an ioctl, make sure you don't conflict with SPARC's RTC
- * ioctls.
- */
-
-static struct fasync_struct *rtc_async_queue;
-
-static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
-
-#ifdef RTC_IRQ
-static void rtc_dropped_irq(struct timer_list *unused);
-
-static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
-#endif
-
-static ssize_t rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-
-static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
-
-#ifdef RTC_IRQ
-static __poll_t rtc_poll(struct file *file, poll_table *wait);
-#endif
-
-static void get_rtc_alm_time(struct rtc_time *alm_tm);
-#ifdef RTC_IRQ
-static void set_rtc_irq_bit_locked(unsigned char bit);
-static void mask_rtc_irq_bit_locked(unsigned char bit);
-
-static inline void set_rtc_irq_bit(unsigned char bit)
-{
- spin_lock_irq(&rtc_lock);
- set_rtc_irq_bit_locked(bit);
- spin_unlock_irq(&rtc_lock);
-}
-
-static void mask_rtc_irq_bit(unsigned char bit)
-{
- spin_lock_irq(&rtc_lock);
- mask_rtc_irq_bit_locked(bit);
- spin_unlock_irq(&rtc_lock);
-}
-#endif
-
-#ifdef CONFIG_PROC_FS
-static int rtc_proc_show(struct seq_file *seq, void *v);
-#endif
-
-/*
- * Bits in rtc_status. (6 bits of room for future expansion)
- */
-
-#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
-#define RTC_TIMER_ON 0x02 /* missed irq timer active */
-
-/*
- * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is
- * protected by the spin lock rtc_lock. However, ioctl can still disable the
- * timer in rtc_status and then with del_timer after the interrupt has read
- * rtc_status but before mod_timer is called, which would then reenable the
- * timer (but you would need to have an awful timing before you'd trip on it)
- */
-static unsigned long rtc_status; /* bitmapped status byte. */
-static unsigned long rtc_freq; /* Current periodic IRQ rate */
-static unsigned long rtc_irq_data; /* our output to the world */
-static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
-
-/*
- * If this driver ever becomes modularised, it will be really nice
- * to make the epoch retain its value across module reload...
- */
-
-static unsigned long epoch = 1900; /* year corresponding to 0x00 */
-
-static const unsigned char days_in_mo[] =
-{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-/*
- * Returns true if a clock update is in progress
- */
-static inline unsigned char rtc_is_updating(void)
-{
- unsigned long flags;
- unsigned char uip;
-
- spin_lock_irqsave(&rtc_lock, flags);
- uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
- spin_unlock_irqrestore(&rtc_lock, flags);
- return uip;
-}
-
-#ifdef RTC_IRQ
-/*
- * A very tiny interrupt handler. It runs with interrupts disabled,
- * but there is possibility of conflicting with the set_rtc_mmss()
- * call (the rtc irq and the timer irq can easily run at the same
- * time in two different CPUs). So we need to serialize
- * accesses to the chip with the rtc_lock spinlock that each
- * architecture should implement in the timer code.
- * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.)
- */
-
-static irqreturn_t rtc_interrupt(int irq, void *dev_id)
-{
- /*
- * Can be an alarm interrupt, update complete interrupt,
- * or a periodic interrupt. We store the status in the
- * low byte and the number of interrupts received since
- * the last read in the remainder of rtc_irq_data.
- */
-
- spin_lock(&rtc_lock);
- rtc_irq_data += 0x100;
- rtc_irq_data &= ~0xff;
- if (is_hpet_enabled()) {
- /*
- * In this case it is HPET RTC interrupt handler
- * calling us, with the interrupt information
- * passed as arg1, instead of irq.
- */
- rtc_irq_data |= (unsigned long)irq & 0xF0;
- } else {
- rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0);
- }
-
- if (rtc_status & RTC_TIMER_ON)
- mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
-
- spin_unlock(&rtc_lock);
-
- wake_up_interruptible(&rtc_wait);
-
- kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
-
- return IRQ_HANDLED;
-}
-#endif
-
-/*
- * sysctl-tuning infrastructure.
- */
-static struct ctl_table rtc_table[] = {
- {
- .procname = "max-user-freq",
- .data = &rtc_max_user_freq,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- { }
-};
-
-static struct ctl_table rtc_root[] = {
- {
- .procname = "rtc",
- .mode = 0555,
- .child = rtc_table,
- },
- { }
-};
-
-static struct ctl_table dev_root[] = {
- {
- .procname = "dev",
- .mode = 0555,
- .child = rtc_root,
- },
- { }
-};
-
-static struct ctl_table_header *sysctl_header;
-
-static int __init init_sysctl(void)
-{
- sysctl_header = register_sysctl_table(dev_root);
- return 0;
-}
-
-static void __exit cleanup_sysctl(void)
-{
- unregister_sysctl_table(sysctl_header);
-}
-
-/*
- * Now all the various file operations that we export.
- */
-
-static ssize_t rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
-#ifndef RTC_IRQ
- return -EIO;
-#else
- DECLARE_WAITQUEUE(wait, current);
- unsigned long data;
- ssize_t retval;
-
- if (rtc_has_irq == 0)
- return -EIO;
-
- /*
- * Historically this function used to assume that sizeof(unsigned long)
- * is the same in userspace and kernelspace. This lead to problems
- * for configurations with multiple ABIs such a the MIPS o32 and 64
- * ABIs supported on the same kernel. So now we support read of both
- * 4 and 8 bytes and assume that's the sizeof(unsigned long) in the
- * userspace ABI.
- */
- if (count != sizeof(unsigned int) && count != sizeof(unsigned long))
- return -EINVAL;
-
- add_wait_queue(&rtc_wait, &wait);
-
- do {
- /* First make it right. Then make it fast. Putting this whole
- * block within the parentheses of a while would be too
- * confusing. And no, xchg() is not the answer. */
-
- __set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irq(&rtc_lock);
- data = rtc_irq_data;
- rtc_irq_data = 0;
- spin_unlock_irq(&rtc_lock);
-
- if (data != 0)
- break;
-
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- goto out;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- goto out;
- }
- schedule();
- } while (1);
-
- if (count == sizeof(unsigned int)) {
- retval = put_user(data,
- (unsigned int __user *)buf) ?: sizeof(int);
- } else {
- retval = put_user(data,
- (unsigned long __user *)buf) ?: sizeof(long);
- }
- if (!retval)
- retval = count;
- out:
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&rtc_wait, &wait);
-
- return retval;
-#endif
-}
-
-static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
-{
- struct rtc_time wtime;
-
-#ifdef RTC_IRQ
- if (rtc_has_irq == 0) {
- switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- case RTC_PIE_OFF:
- case RTC_PIE_ON:
- case RTC_UIE_OFF:
- case RTC_UIE_ON:
- case RTC_IRQP_READ:
- case RTC_IRQP_SET:
- return -EINVAL;
- }
- }
-#endif
-
- switch (cmd) {
-#ifdef RTC_IRQ
- case RTC_AIE_OFF: /* Mask alarm int. enab. bit */
- {
- mask_rtc_irq_bit(RTC_AIE);
- return 0;
- }
- case RTC_AIE_ON: /* Allow alarm interrupts. */
- {
- set_rtc_irq_bit(RTC_AIE);
- return 0;
- }
- case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
- {
- /* can be called from isr via rtc_control() */
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- mask_rtc_irq_bit_locked(RTC_PIE);
- if (rtc_status & RTC_TIMER_ON) {
- rtc_status &= ~RTC_TIMER_ON;
- del_timer(&rtc_irq_timer);
- }
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return 0;
- }
- case RTC_PIE_ON: /* Allow periodic ints */
- {
- /* can be called from isr via rtc_control() */
- unsigned long flags;
-
- /*
- * We don't really want Joe User enabling more
- * than 64Hz of interrupts on a multi-user machine.
- */
- if (!kernel && (rtc_freq > rtc_max_user_freq) &&
- (!capable(CAP_SYS_RESOURCE)))
- return -EACCES;
-
- spin_lock_irqsave(&rtc_lock, flags);
- if (!(rtc_status & RTC_TIMER_ON)) {
- mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
- 2*HZ/100);
- rtc_status |= RTC_TIMER_ON;
- }
- set_rtc_irq_bit_locked(RTC_PIE);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return 0;
- }
- case RTC_UIE_OFF: /* Mask ints from RTC updates. */
- {
- mask_rtc_irq_bit(RTC_UIE);
- return 0;
- }
- case RTC_UIE_ON: /* Allow ints for RTC updates. */
- {
- set_rtc_irq_bit(RTC_UIE);
- return 0;
- }
-#endif
- case RTC_ALM_READ: /* Read the present alarm time */
- {
- /*
- * This returns a struct rtc_time. Reading >= 0xc0
- * means "don't care" or "match all". Only the tm_hour,
- * tm_min, and tm_sec values are filled in.
- */
- memset(&wtime, 0, sizeof(struct rtc_time));
- get_rtc_alm_time(&wtime);
- break;
- }
- case RTC_ALM_SET: /* Store a time into the alarm */
- {
- /*
- * This expects a struct rtc_time. Writing 0xff means
- * "don't care" or "match all". Only the tm_hour,
- * tm_min and tm_sec are used.
- */
- unsigned char hrs, min, sec;
- struct rtc_time alm_tm;
-
- if (copy_from_user(&alm_tm, (struct rtc_time __user *)arg,
- sizeof(struct rtc_time)))
- return -EFAULT;
-
- hrs = alm_tm.tm_hour;
- min = alm_tm.tm_min;
- sec = alm_tm.tm_sec;
-
- spin_lock_irq(&rtc_lock);
- if (hpet_set_alarm_time(hrs, min, sec)) {
- /*
- * Fallthru and set alarm time in CMOS too,
- * so that we will get proper value in RTC_ALM_READ
- */
- }
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
- RTC_ALWAYS_BCD) {
- if (sec < 60)
- sec = bin2bcd(sec);
- else
- sec = 0xff;
-
- if (min < 60)
- min = bin2bcd(min);
- else
- min = 0xff;
-
- if (hrs < 24)
- hrs = bin2bcd(hrs);
- else
- hrs = 0xff;
- }
- CMOS_WRITE(hrs, RTC_HOURS_ALARM);
- CMOS_WRITE(min, RTC_MINUTES_ALARM);
- CMOS_WRITE(sec, RTC_SECONDS_ALARM);
- spin_unlock_irq(&rtc_lock);
-
- return 0;
- }
- case RTC_RD_TIME: /* Read the time/date from RTC */
- {
- memset(&wtime, 0, sizeof(struct rtc_time));
- rtc_get_rtc_time(&wtime);
- break;
- }
- case RTC_SET_TIME: /* Set the RTC */
- {
- struct rtc_time rtc_tm;
- unsigned char mon, day, hrs, min, sec, leap_yr;
- unsigned char save_control, save_freq_select;
- unsigned int yrs;
-#ifdef CONFIG_MACH_DECSTATION
- unsigned int real_yrs;
-#endif
-
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
-
- if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
- sizeof(struct rtc_time)))
- return -EFAULT;
-
- yrs = rtc_tm.tm_year + 1900;
- mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
- day = rtc_tm.tm_mday;
- hrs = rtc_tm.tm_hour;
- min = rtc_tm.tm_min;
- sec = rtc_tm.tm_sec;
-
- if (yrs < 1970)
- return -EINVAL;
-
- leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
-
- if ((mon > 12) || (day == 0))
- return -EINVAL;
-
- if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
- return -EINVAL;
-
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
- yrs -= epoch;
- if (yrs > 255) /* They are unsigned */
- return -EINVAL;
-
- spin_lock_irq(&rtc_lock);
-#ifdef CONFIG_MACH_DECSTATION
- real_yrs = yrs;
- yrs = 72;
-
- /*
- * We want to keep the year set to 73 until March
- * for non-leap years, so that Feb, 29th is handled
- * correctly.
- */
- if (!leap_yr && mon < 3) {
- real_yrs--;
- yrs = 73;
- }
-#endif
- /* These limits and adjustments are independent of
- * whether the chip is in binary mode or not.
- */
- if (yrs > 169) {
- spin_unlock_irq(&rtc_lock);
- return -EINVAL;
- }
- if (yrs >= 100)
- yrs -= 100;
-
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
- || RTC_ALWAYS_BCD) {
- sec = bin2bcd(sec);
- min = bin2bcd(min);
- hrs = bin2bcd(hrs);
- day = bin2bcd(day);
- mon = bin2bcd(mon);
- yrs = bin2bcd(yrs);
- }
-
- save_control = CMOS_READ(RTC_CONTROL);
- CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
- save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-
-#ifdef CONFIG_MACH_DECSTATION
- CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
-#endif
- CMOS_WRITE(yrs, RTC_YEAR);
- CMOS_WRITE(mon, RTC_MONTH);
- CMOS_WRITE(day, RTC_DAY_OF_MONTH);
- CMOS_WRITE(hrs, RTC_HOURS);
- CMOS_WRITE(min, RTC_MINUTES);
- CMOS_WRITE(sec, RTC_SECONDS);
-
- CMOS_WRITE(save_control, RTC_CONTROL);
- CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-
- spin_unlock_irq(&rtc_lock);
- return 0;
- }
-#ifdef RTC_IRQ
- case RTC_IRQP_READ: /* Read the periodic IRQ rate. */
- {
- return put_user(rtc_freq, (unsigned long __user *)arg);
- }
- case RTC_IRQP_SET: /* Set periodic IRQ rate. */
- {
- int tmp = 0;
- unsigned char val;
- /* can be called from isr via rtc_control() */
- unsigned long flags;
-
- /*
- * The max we can do is 8192Hz.
- */
- if ((arg < 2) || (arg > 8192))
- return -EINVAL;
- /*
- * We don't really want Joe User generating more
- * than 64Hz of interrupts on a multi-user machine.
- */
- if (!kernel && (arg > rtc_max_user_freq) &&
- !capable(CAP_SYS_RESOURCE))
- return -EACCES;
-
- while (arg > (1<<tmp))
- tmp++;
-
- /*
- * Check that the input was really a power of 2.
- */
- if (arg != (1<<tmp))
- return -EINVAL;
-
- rtc_freq = arg;
-
- spin_lock_irqsave(&rtc_lock, flags);
- if (hpet_set_periodic_freq(arg)) {
- spin_unlock_irqrestore(&rtc_lock, flags);
- return 0;
- }
-
- val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0;
- val |= (16 - tmp);
- CMOS_WRITE(val, RTC_FREQ_SELECT);
- spin_unlock_irqrestore(&rtc_lock, flags);
- return 0;
- }
-#endif
- case RTC_EPOCH_READ: /* Read the epoch. */
- {
- return put_user(epoch, (unsigned long __user *)arg);
- }
- case RTC_EPOCH_SET: /* Set the epoch. */
- {
- /*
- * There were no RTC clocks before 1900.
- */
- if (arg < 1900)
- return -EINVAL;
-
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
-
- epoch = arg;
- return 0;
- }
- default:
- return -ENOTTY;
- }
- return copy_to_user((void __user *)arg,
- &wtime, sizeof wtime) ? -EFAULT : 0;
-}
-
-static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long ret;
- ret = rtc_do_ioctl(cmd, arg, 0);
- return ret;
-}
-
-/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
- */
-static int rtc_open(struct inode *inode, struct file *file)
-{
- spin_lock_irq(&rtc_lock);
-
- if (rtc_status & RTC_IS_OPEN)
- goto out_busy;
-
- rtc_status |= RTC_IS_OPEN;
-
- rtc_irq_data = 0;
- spin_unlock_irq(&rtc_lock);
- return 0;
-
-out_busy:
- spin_unlock_irq(&rtc_lock);
- return -EBUSY;
-}
-
-static int rtc_fasync(int fd, struct file *filp, int on)
-{
- return fasync_helper(fd, filp, on, &rtc_async_queue);
-}
-
-static int rtc_release(struct inode *inode, struct file *file)
-{
-#ifdef RTC_IRQ
- unsigned char tmp;
-
- if (rtc_has_irq == 0)
- goto no_irq;
-
- /*
- * Turn off all interrupts once the device is no longer
- * in use, and clear the data.
- */
-
- spin_lock_irq(&rtc_lock);
- if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
- tmp = CMOS_READ(RTC_CONTROL);
- tmp &= ~RTC_PIE;
- tmp &= ~RTC_AIE;
- tmp &= ~RTC_UIE;
- CMOS_WRITE(tmp, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
- }
- if (rtc_status & RTC_TIMER_ON) {
- rtc_status &= ~RTC_TIMER_ON;
- del_timer(&rtc_irq_timer);
- }
- spin_unlock_irq(&rtc_lock);
-
-no_irq:
-#endif
-
- spin_lock_irq(&rtc_lock);
- rtc_irq_data = 0;
- rtc_status &= ~RTC_IS_OPEN;
- spin_unlock_irq(&rtc_lock);
-
- return 0;
-}
-
-#ifdef RTC_IRQ
-static __poll_t rtc_poll(struct file *file, poll_table *wait)
-{
- unsigned long l;
-
- if (rtc_has_irq == 0)
- return 0;
-
- poll_wait(file, &rtc_wait, wait);
-
- spin_lock_irq(&rtc_lock);
- l = rtc_irq_data;
- spin_unlock_irq(&rtc_lock);
-
- if (l != 0)
- return EPOLLIN | EPOLLRDNORM;
- return 0;
-}
-#endif
-
-/*
- * The various file operations we support.
- */
-
-static const struct file_operations rtc_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = rtc_read,
-#ifdef RTC_IRQ
- .poll = rtc_poll,
-#endif
- .unlocked_ioctl = rtc_ioctl,
- .open = rtc_open,
- .release = rtc_release,
- .fasync = rtc_fasync,
-};
-
-static struct miscdevice rtc_dev = {
- .minor = RTC_MINOR,
- .name = "rtc",
- .fops = &rtc_fops,
-};
-
-static resource_size_t rtc_size;
-
-static struct resource * __init rtc_request_region(resource_size_t size)
-{
- struct resource *r;
-
- if (RTC_IOMAPPED)
- r = request_region(RTC_PORT(0), size, "rtc");
- else
- r = request_mem_region(RTC_PORT(0), size, "rtc");
-
- if (r)
- rtc_size = size;
-
- return r;
-}
-
-static void rtc_release_region(void)
-{
- if (RTC_IOMAPPED)
- release_region(RTC_PORT(0), rtc_size);
- else
- release_mem_region(RTC_PORT(0), rtc_size);
-}
-
-static int __init rtc_init(void)
-{
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *ent;
-#endif
-#if defined(__alpha__) || defined(__mips__)
- unsigned int year, ctrl;
- char *guess = NULL;
-#endif
-#ifdef CONFIG_SPARC32
- struct device_node *ebus_dp;
- struct platform_device *op;
-#else
- void *r;
-#ifdef RTC_IRQ
- irq_handler_t rtc_int_handler_ptr;
-#endif
-#endif
-
-#ifdef CONFIG_SPARC32
- for_each_node_by_name(ebus_dp, "ebus") {
- struct device_node *dp;
- for_each_child_of_node(ebus_dp, dp) {
- if (of_node_name_eq(dp, "rtc")) {
- op = of_find_device_by_node(dp);
- if (op) {
- rtc_port = op->resource[0].start;
- rtc_irq = op->irqs[0];
- goto found;
- }
- }
- }
- }
- rtc_has_irq = 0;
- printk(KERN_ERR "rtc_init: no PC rtc found\n");
- return -EIO;
-
-found:
- if (!rtc_irq) {
- rtc_has_irq = 0;
- goto no_irq;
- }
-
- /*
- * XXX Interrupt pin #7 in Espresso is shared between RTC and
- * PCI Slot 2 INTA# (and some INTx# in Slot 1).
- */
- if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
- (void *)&rtc_port)) {
- rtc_has_irq = 0;
- printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
- return -EIO;
- }
-no_irq:
-#else
- r = rtc_request_region(RTC_IO_EXTENT);
-
- /*
- * If we've already requested a smaller range (for example, because
- * PNPBIOS or ACPI told us how the device is configured), the request
- * above might fail because it's too big.
- *
- * If so, request just the range we actually use.
- */
- if (!r)
- r = rtc_request_region(RTC_IO_EXTENT_USED);
- if (!r) {
-#ifdef RTC_IRQ
- rtc_has_irq = 0;
-#endif
- printk(KERN_ERR "rtc: I/O resource %lx is not free.\n",
- (long)(RTC_PORT(0)));
- return -EIO;
- }
-
-#ifdef RTC_IRQ
- if (is_hpet_enabled()) {
- int err;
-
- rtc_int_handler_ptr = hpet_rtc_interrupt;
- err = hpet_register_irq_handler(rtc_interrupt);
- if (err != 0) {
- printk(KERN_WARNING "hpet_register_irq_handler failed "
- "in rtc_init().");
- return err;
- }
- } else {
- rtc_int_handler_ptr = rtc_interrupt;
- }
-
- if (request_irq(RTC_IRQ, rtc_int_handler_ptr, 0, "rtc", NULL)) {
- /* Yeah right, seeing as irq 8 doesn't even hit the bus. */
- rtc_has_irq = 0;
- printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
- rtc_release_region();
-
- return -EIO;
- }
- hpet_rtc_timer_init();
-
-#endif
-
-#endif /* CONFIG_SPARC32 vs. others */
-
- if (misc_register(&rtc_dev)) {
-#ifdef RTC_IRQ
- free_irq(RTC_IRQ, NULL);
- hpet_unregister_irq_handler(rtc_interrupt);
- rtc_has_irq = 0;
-#endif
- rtc_release_region();
- return -ENODEV;
- }
-
-#ifdef CONFIG_PROC_FS
- ent = proc_create_single("driver/rtc", 0, NULL, rtc_proc_show);
- if (!ent)
- printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
-#endif
-
-#if defined(__alpha__) || defined(__mips__)
- rtc_freq = HZ;
-
- /* Each operating system on an Alpha uses its own epoch.
- Let's try to guess which one we are using now. */
-
- if (rtc_is_updating() != 0)
- msleep(20);
-
- spin_lock_irq(&rtc_lock);
- year = CMOS_READ(RTC_YEAR);
- ctrl = CMOS_READ(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
-
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- year = bcd2bin(year); /* This should never happen... */
-
- if (year < 20) {
- epoch = 2000;
- guess = "SRM (post-2000)";
- } else if (year >= 20 && year < 48) {
- epoch = 1980;
- guess = "ARC console";
- } else if (year >= 48 && year < 72) {
- epoch = 1952;
- guess = "Digital UNIX";
-#if defined(__mips__)
- } else if (year >= 72 && year < 74) {
- epoch = 2000;
- guess = "Digital DECstation";
-#else
- } else if (year >= 70) {
- epoch = 1900;
- guess = "Standard PC (1900)";
-#endif
- }
- if (guess)
- printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
- guess, epoch);
-#endif
-#ifdef RTC_IRQ
- if (rtc_has_irq == 0)
- goto no_irq2;
-
- spin_lock_irq(&rtc_lock);
- rtc_freq = 1024;
- if (!hpet_set_periodic_freq(rtc_freq)) {
- /*
- * Initialize periodic frequency to CMOS reset default,
- * which is 1024Hz
- */
- CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
- RTC_FREQ_SELECT);
- }
- spin_unlock_irq(&rtc_lock);
-no_irq2:
-#endif
-
- (void) init_sysctl();
-
- printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n");
-
- return 0;
-}
-
-static void __exit rtc_exit(void)
-{
- cleanup_sysctl();
- remove_proc_entry("driver/rtc", NULL);
- misc_deregister(&rtc_dev);
-
-#ifdef CONFIG_SPARC32
- if (rtc_has_irq)
- free_irq(rtc_irq, &rtc_port);
-#else
- rtc_release_region();
-#ifdef RTC_IRQ
- if (rtc_has_irq) {
- free_irq(RTC_IRQ, NULL);
- hpet_unregister_irq_handler(hpet_rtc_interrupt);
- }
-#endif
-#endif /* CONFIG_SPARC32 */
-}
-
-module_init(rtc_init);
-module_exit(rtc_exit);
-
-#ifdef RTC_IRQ
-/*
- * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
- * (usually during an IDE disk interrupt, with IRQ unmasking off)
- * Since the interrupt handler doesn't get called, the IRQ status
- * byte doesn't get read, and the RTC stops generating interrupts.
- * A timer is set, and will call this function if/when that happens.
- * To get it out of this stalled state, we just read the status.
- * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
- * (You *really* shouldn't be trying to use a non-realtime system
- * for something that requires a steady > 1KHz signal anyways.)
- */
-
-static void rtc_dropped_irq(struct timer_list *unused)
-{
- unsigned long freq;
-
- spin_lock_irq(&rtc_lock);
-
- if (hpet_rtc_dropped_irq()) {
- spin_unlock_irq(&rtc_lock);
- return;
- }
-
- /* Just in case someone disabled the timer from behind our back... */
- if (rtc_status & RTC_TIMER_ON)
- mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
-
- rtc_irq_data += ((rtc_freq/HZ)<<8);
- rtc_irq_data &= ~0xff;
- rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */
-
- freq = rtc_freq;
-
- spin_unlock_irq(&rtc_lock);
-
- printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
- freq);
-
- /* Now we have new data */
- wake_up_interruptible(&rtc_wait);
-
- kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
-}
-#endif
-
-#ifdef CONFIG_PROC_FS
-/*
- * Info exported via "/proc/driver/rtc".
- */
-
-static int rtc_proc_show(struct seq_file *seq, void *v)
-{
-#define YN(bit) ((ctrl & bit) ? "yes" : "no")
-#define NY(bit) ((ctrl & bit) ? "no" : "yes")
- struct rtc_time tm;
- unsigned char batt, ctrl;
- unsigned long freq;
-
- spin_lock_irq(&rtc_lock);
- batt = CMOS_READ(RTC_VALID) & RTC_VRT;
- ctrl = CMOS_READ(RTC_CONTROL);
- freq = rtc_freq;
- spin_unlock_irq(&rtc_lock);
-
-
- rtc_get_rtc_time(&tm);
-
- /*
- * There is no way to tell if the luser has the RTC set for local
- * time or for Universal Standard Time (GMT). Probably local though.
- */
- seq_printf(seq,
- "rtc_time\t: %ptRt\n"
- "rtc_date\t: %ptRd\n"
- "rtc_epoch\t: %04lu\n",
- &tm, &tm, epoch);
-
- get_rtc_alm_time(&tm);
-
- /*
- * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will
- * match any value for that particular field. Values that are
- * greater than a valid time, but less than 0xc0 shouldn't appear.
- */
- seq_puts(seq, "alarm\t\t: ");
- if (tm.tm_hour <= 24)
- seq_printf(seq, "%02d:", tm.tm_hour);
- else
- seq_puts(seq, "**:");
-
- if (tm.tm_min <= 59)
- seq_printf(seq, "%02d:", tm.tm_min);
- else
- seq_puts(seq, "**:");
-
- if (tm.tm_sec <= 59)
- seq_printf(seq, "%02d\n", tm.tm_sec);
- else
- seq_puts(seq, "**\n");
-
- seq_printf(seq,
- "DST_enable\t: %s\n"
- "BCD\t\t: %s\n"
- "24hr\t\t: %s\n"
- "square_wave\t: %s\n"
- "alarm_IRQ\t: %s\n"
- "update_IRQ\t: %s\n"
- "periodic_IRQ\t: %s\n"
- "periodic_freq\t: %ld\n"
- "batt_status\t: %s\n",
- YN(RTC_DST_EN),
- NY(RTC_DM_BINARY),
- YN(RTC_24H),
- YN(RTC_SQWE),
- YN(RTC_AIE),
- YN(RTC_UIE),
- YN(RTC_PIE),
- freq,
- batt ? "okay" : "dead");
-
- return 0;
-#undef YN
-#undef NY
-}
-#endif
-
-static void rtc_get_rtc_time(struct rtc_time *rtc_tm)
-{
- unsigned long uip_watchdog = jiffies, flags;
- unsigned char ctrl;
-#ifdef CONFIG_MACH_DECSTATION
- unsigned int real_year;
-#endif
-
- /*
- * read RTC once any update in progress is done. The update
- * can take just over 2ms. We wait 20ms. There is no need to
- * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
- * If you need to know *exactly* when a second has started, enable
- * periodic update complete interrupts, (via ioctl) and then
- * immediately read /dev/rtc which will block until you get the IRQ.
- * Once the read clears, read the RTC time (again via ioctl). Easy.
- */
-
- while (rtc_is_updating() != 0 &&
- time_before(jiffies, uip_watchdog + 2*HZ/100))
- cpu_relax();
-
- /*
- * Only the values that we read from the RTC are set. We leave
- * tm_wday, tm_yday and tm_isdst untouched. Note that while the
- * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is
- * only updated by the RTC when initially set to a non-zero value.
- */
- spin_lock_irqsave(&rtc_lock, flags);
- rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
- rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
- rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
- rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
- rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
- rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
- /* Only set from 2.6.16 onwards */
- rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK);
-
-#ifdef CONFIG_MACH_DECSTATION
- real_year = CMOS_READ(RTC_DEC_YEAR);
-#endif
- ctrl = CMOS_READ(RTC_CONTROL);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
- rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
- rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
- rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
- rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
- rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
- rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday);
- }
-
-#ifdef CONFIG_MACH_DECSTATION
- rtc_tm->tm_year += real_year - 72;
-#endif
-
- /*
- * Account for differences between how the RTC uses the values
- * and how they are defined in a struct rtc_time;
- */
- rtc_tm->tm_year += epoch - 1900;
- if (rtc_tm->tm_year <= 69)
- rtc_tm->tm_year += 100;
-
- rtc_tm->tm_mon--;
-}
-
-static void get_rtc_alm_time(struct rtc_time *alm_tm)
-{
- unsigned char ctrl;
-
- /*
- * Only the values that we read from the RTC are set. That
- * means only tm_hour, tm_min, and tm_sec.
- */
- spin_lock_irq(&rtc_lock);
- alm_tm->tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
- alm_tm->tm_min = CMOS_READ(RTC_MINUTES_ALARM);
- alm_tm->tm_hour = CMOS_READ(RTC_HOURS_ALARM);
- ctrl = CMOS_READ(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
-
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
- alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
- alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
- }
-}
-
-#ifdef RTC_IRQ
-/*
- * Used to disable/enable interrupts for any one of UIE, AIE, PIE.
- * Rumour has it that if you frob the interrupt enable/disable
- * bits in RTC_CONTROL, you should read RTC_INTR_FLAGS, to
- * ensure you actually start getting interrupts. Probably for
- * compatibility with older/broken chipset RTC implementations.
- * We also clear out any old irq data after an ioctl() that
- * meddles with the interrupt enable/disable bits.
- */
-
-static void mask_rtc_irq_bit_locked(unsigned char bit)
-{
- unsigned char val;
-
- if (hpet_mask_rtc_irq_bit(bit))
- return;
- val = CMOS_READ(RTC_CONTROL);
- val &= ~bit;
- CMOS_WRITE(val, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- rtc_irq_data = 0;
-}
-
-static void set_rtc_irq_bit_locked(unsigned char bit)
-{
- unsigned char val;
-
- if (hpet_set_rtc_irq_bit(bit))
- return;
- val = CMOS_READ(RTC_CONTROL);
- val |= bit;
- CMOS_WRITE(val, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- rtc_irq_data = 0;
-}
-#endif
-
-MODULE_AUTHOR("Paul Gortmaker");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(RTC_MINOR);
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 98f3150e0048..aff0a8e44fff 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -61,8 +61,6 @@
#include <linux/mutex.h>
#include <linux/toshiba.h>
-#define TOSH_MINOR_DEV 181
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>");
MODULE_DESCRIPTION("Toshiba laptop SMM driver");
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 58073836b555..8c77e88012e9 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -514,15 +514,15 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
return 0;
- rc = __compat_only_sysfs_link_entry_to_kobj(
- &chip->dev.parent->kobj, &chip->dev.kobj, "ppi");
+ rc = compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, "ppi", NULL);
if (rc && rc != -ENOENT)
return rc;
/* All the names from tpm-sysfs */
for (i = chip->groups[0]->attrs; *i != NULL; ++i) {
- rc = __compat_only_sysfs_link_entry_to_kobj(
- &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name);
+ rc = compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name, NULL);
if (rc) {
tpm_del_legacy_sysfs(chip);
return rc;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 4df9b40d6342..3cbaec925606 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -112,7 +112,7 @@ struct port_buffer {
unsigned int sgpages;
/* sg is used if spages > 0. sg must be the last in is struct */
- struct scatterlist sg[0];
+ struct scatterlist sg[];
};
/*
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 3732241352ce..8b90357f2a93 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -15,7 +15,11 @@ obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
obj-$(CONFIG_HAVE_AT91_SAM9X60_PLL) += clk-sam9x60-pll.o
+obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o
+obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o
+obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o
obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
+obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
new file mode 100644
index 000000000000..c44a431b6c97
--- /dev/null
+++ b/drivers/clk/at91/at91rm9200.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+struct sck {
+ char *n;
+ char *p;
+ u8 id;
+};
+
+struct pck {
+ char *n;
+ u8 id;
+};
+
+static const struct clk_master_characteristics rm9200_mck_characteristics = {
+ .output = { .min = 0, .max = 80000000 },
+ .divisors = { 1, 2, 3, 4 },
+};
+
+static u8 rm9200_pll_out[] = { 0, 2 };
+
+static const struct clk_range rm9200_pll_outputs[] = {
+ { .min = 80000000, .max = 160000000 },
+ { .min = 150000000, .max = 180000000 },
+};
+
+static const struct clk_pll_characteristics rm9200_pll_characteristics = {
+ .input = { .min = 1000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(rm9200_pll_outputs),
+ .output = rm9200_pll_outputs,
+ .out = rm9200_pll_out,
+};
+
+static const struct sck at91rm9200_systemck[] = {
+ { .n = "udpck", .p = "usbck", .id = 2 },
+ { .n = "uhpck", .p = "usbck", .id = 4 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "pck3", .p = "prog3", .id = 11 },
+};
+
+static const struct pck at91rm9200_periphck[] = {
+ { .n = "pioA_clk", .id = 2 },
+ { .n = "pioB_clk", .id = 3 },
+ { .n = "pioC_clk", .id = 4 },
+ { .n = "pioD_clk", .id = 5 },
+ { .n = "usart0_clk", .id = 6 },
+ { .n = "usart1_clk", .id = 7 },
+ { .n = "usart2_clk", .id = 8 },
+ { .n = "usart3_clk", .id = 9 },
+ { .n = "mci0_clk", .id = 10 },
+ { .n = "udc_clk", .id = 11 },
+ { .n = "twi0_clk", .id = 12 },
+ { .n = "spi0_clk", .id = 13 },
+ { .n = "ssc0_clk", .id = 14 },
+ { .n = "ssc1_clk", .id = 15 },
+ { .n = "ssc2_clk", .id = 16 },
+ { .n = "tc0_clk", .id = 17 },
+ { .n = "tc1_clk", .id = 18 },
+ { .n = "tc2_clk", .id = 19 },
+ { .n = "tc3_clk", .id = 20 },
+ { .n = "tc4_clk", .id = 21 },
+ { .n = "tc5_clk", .id = 22 },
+ { .n = "ohci_clk", .id = 23 },
+ { .n = "macb0_clk", .id = 24 },
+};
+
+static void __init at91rm9200_pmc_setup(struct device_node *np)
+{
+ const char *slowxtal_name, *mainxtal_name;
+ struct pmc_data *at91rm9200_pmc;
+ u32 usb_div[] = { 1, 2, 0, 0 };
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_xtal");
+ if (i < 0)
+ return;
+
+ slowxtal_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91rm9200_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(at91rm9200_systemck),
+ nck(at91rm9200_periphck), 0);
+ if (!at91rm9200_pmc)
+ return;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91rm9200_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &at91rm9200_pll_layout,
+ &rm9200_pll_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
+ &at91rm9200_pll_layout,
+ &rm9200_pll_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slowxtal_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ parent_names[3] = "pllbck";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91rm9200_master_layout,
+ &rm9200_mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91rm9200_pmc->chws[PMC_MCK] = hw;
+
+ hw = at91rm9200_clk_register_usb(regmap, "usbck", "pllbck", usb_div);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slowxtal_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ parent_names[3] = "pllbck";
+ for (i = 0; i < 4; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 4, i,
+ &at91rm9200_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91rm9200_systemck); i++) {
+ hw = at91_clk_register_system(regmap, at91rm9200_systemck[i].n,
+ at91rm9200_systemck[i].p,
+ at91rm9200_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91rm9200_pmc->shws[at91rm9200_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91rm9200_periphck); i++) {
+ hw = at91_clk_register_peripheral(regmap,
+ at91rm9200_periphck[i].n,
+ "masterck",
+ at91rm9200_periphck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91rm9200_pmc->phws[at91rm9200_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91rm9200_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91rm9200_pmc);
+}
+/*
+ * While the TCB can be used as the clocksource, the system timer is most likely
+ * to be used instead. However, the pinctrl driver doesn't support probe
+ * deferring properly. Once this is fixed, this can be switched to a platform
+ * driver.
+ */
+CLK_OF_DECLARE_DRIVER(at91rm9200_pmc, "atmel,at91rm9200-pmc",
+ at91rm9200_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
new file mode 100644
index 000000000000..38a7d2d2df0c
--- /dev/null
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 0, .max = 133333333 },
+ .divisors = { 1, 2, 4, 3 },
+};
+
+static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
+
+static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
+
+static const struct clk_range plla_outputs[] = {
+ { .min = 745000000, .max = 800000000 },
+ { .min = 695000000, .max = 750000000 },
+ { .min = 645000000, .max = 700000000 },
+ { .min = 595000000, .max = 650000000 },
+ { .min = 545000000, .max = 600000000 },
+ { .min = 495000000, .max = 555000000 },
+ { .min = 445000000, .max = 500000000 },
+ { .min = 400000000, .max = 450000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} at91sam9g45_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+static const struct clk_pcr_layout at91sam9g45_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(5, 0),
+ .div_mask = GENMASK(17, 16),
+};
+
+struct pck {
+ char *n;
+ u8 id;
+};
+
+static const struct pck at91sam9g45_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "pioDE_clk", .id = 5, },
+ { .n = "trng_clk", .id = 6, },
+ { .n = "usart0_clk", .id = 7, },
+ { .n = "usart1_clk", .id = 8, },
+ { .n = "usart2_clk", .id = 9, },
+ { .n = "usart3_clk", .id = 10, },
+ { .n = "mci0_clk", .id = 11, },
+ { .n = "twi0_clk", .id = 12, },
+ { .n = "twi1_clk", .id = 13, },
+ { .n = "spi0_clk", .id = 14, },
+ { .n = "spi1_clk", .id = 15, },
+ { .n = "ssc0_clk", .id = 16, },
+ { .n = "ssc1_clk", .id = 17, },
+ { .n = "tcb0_clk", .id = 18, },
+ { .n = "pwm_clk", .id = 19, },
+ { .n = "adc_clk", .id = 20, },
+ { .n = "dma0_clk", .id = 21, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "lcd_clk", .id = 23, },
+ { .n = "ac97_clk", .id = 24, },
+ { .n = "macb0_clk", .id = 25, },
+ { .n = "isi_clk", .id = 26, },
+ { .n = "udphs_clk", .id = 27, },
+ { .n = "aestdessha_clk", .id = 28, },
+ { .n = "mci1_clk", .id = 29, },
+ { .n = "vdec_clk", .id = 30, },
+};
+
+static void __init at91sam9g45_pmc_setup(struct device_node *np)
+{
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *at91sam9g45_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91sam9g45_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(at91sam9g45_systemck),
+ nck(at91sam9g45_periphck), 0);
+ if (!at91sam9g45_pmc)
+ return;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &at91rm9200_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91rm9200_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "plladivck";
+ parent_names[1] = "utmick";
+ hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ parent_names[4] = "masterck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9g45_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9g45_systemck); i++) {
+ hw = at91_clk_register_system(regmap, at91sam9g45_systemck[i].n,
+ at91sam9g45_systemck[i].p,
+ at91sam9g45_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->shws[at91sam9g45_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9g45_periphck); i++) {
+ hw = at91_clk_register_peripheral(regmap,
+ at91sam9g45_periphck[i].n,
+ "masterck",
+ at91sam9g45_periphck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9g45_pmc->phws[at91sam9g45_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9g45_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91sam9g45_pmc);
+}
+/*
+ * The TCB is used as the clocksource so its clock is needed early. This means
+ * this can't be a platform driver.
+ */
+CLK_OF_DECLARE_DRIVER(at91sam9g45_pmc, "atmel,at91sam9g45-pmc",
+ at91sam9g45_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
new file mode 100644
index 000000000000..8bb39d2ba84b
--- /dev/null
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 0, .max = 133333333 },
+ .divisors = { 1, 2, 4, 3 },
+ .have_div3_pres = 1,
+};
+
+static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
+
+static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
+
+static const struct clk_range plla_outputs[] = {
+ { .min = 745000000, .max = 800000000 },
+ { .min = 695000000, .max = 750000000 },
+ { .min = 645000000, .max = 700000000 },
+ { .min = 595000000, .max = 650000000 },
+ { .min = 545000000, .max = 600000000 },
+ { .min = 495000000, .max = 555000000 },
+ { .min = 445000000, .max = 500000000 },
+ { .min = 400000000, .max = 450000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static u8 pllb_out[] = { 0 };
+
+static const struct clk_range pllb_outputs[] = {
+ { .min = 30000000, .max = 100000000 },
+};
+
+static const struct clk_pll_characteristics pllb_characteristics = {
+ .input = { .min = 2000000, .max = 32000000 },
+ .num_output = ARRAY_SIZE(pllb_outputs),
+ .output = pllb_outputs,
+ .out = pllb_out,
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} at91sam9n12_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "lcdck", .p = "masterck", .id = 3 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+};
+
+static const struct clk_pcr_layout at91sam9n12_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(5, 0),
+ .div_mask = GENMASK(17, 16),
+};
+
+struct pck {
+ char *n;
+ u8 id;
+};
+
+static const struct pck at91sam9n12_periphck[] = {
+ { .n = "pioAB_clk", .id = 2, },
+ { .n = "pioCD_clk", .id = 3, },
+ { .n = "fuse_clk", .id = 4, },
+ { .n = "usart0_clk", .id = 5, },
+ { .n = "usart1_clk", .id = 6, },
+ { .n = "usart2_clk", .id = 7, },
+ { .n = "usart3_clk", .id = 8, },
+ { .n = "twi0_clk", .id = 9, },
+ { .n = "twi1_clk", .id = 10, },
+ { .n = "mci0_clk", .id = 12, },
+ { .n = "spi0_clk", .id = 13, },
+ { .n = "spi1_clk", .id = 14, },
+ { .n = "uart0_clk", .id = 15, },
+ { .n = "uart1_clk", .id = 16, },
+ { .n = "tcb_clk", .id = 17, },
+ { .n = "pwm_clk", .id = 18, },
+ { .n = "adc_clk", .id = 19, },
+ { .n = "dma0_clk", .id = 20, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "udphs_clk", .id = 23, },
+ { .n = "lcdc_clk", .id = 25, },
+ { .n = "sha_clk", .id = 27, },
+ { .n = "ssc0_clk", .id = 28, },
+ { .n = "aes_clk", .id = 29, },
+ { .n = "trng_clk", .id = 30, },
+};
+
+static void __init at91sam9n12_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *at91sam9n12_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ at91sam9n12_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(at91sam9n12_systemck), 31, 0);
+ if (!at91sam9n12_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9n12_pmc->chws[PMC_MAIN] = hw;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &at91rm9200_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
+ &at91rm9200_pll_layout, &pllb_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "pllbck";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9n12_pmc->chws[PMC_MCK] = hw;
+
+ hw = at91sam9n12_clk_register_usb(regmap, "usbck", "pllbck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "pllbck";
+ parent_names[4] = "masterck";
+ for (i = 0; i < 2; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9x5_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9n12_systemck); i++) {
+ hw = at91_clk_register_system(regmap, at91sam9n12_systemck[i].n,
+ at91sam9n12_systemck[i].p,
+ at91sam9n12_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9n12_pmc->shws[at91sam9n12_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(at91sam9n12_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &at91sam9n12_pcr_layout,
+ at91sam9n12_periphck[i].n,
+ "masterck",
+ at91sam9n12_periphck[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ at91sam9n12_pmc->phws[at91sam9n12_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9n12_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(at91sam9n12_pmc);
+}
+/*
+ * The TCB is used as the clocksource so its clock is needed early. This means
+ * this can't be a platform driver.
+ */
+CLK_OF_DECLARE_DRIVER(at91sam9n12_pmc, "atmel,at91sam9n12-pmc",
+ at91sam9n12_pmc_setup);
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index dfb354a5ff18..e699803986e5 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -14,27 +14,8 @@
#include "pmc.h"
-#define PMC_PLL_CTRL0 0xc
-#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0)
-#define PMC_PLL_CTRL0_ENPLL BIT(28)
-#define PMC_PLL_CTRL0_ENPLLCK BIT(29)
-#define PMC_PLL_CTRL0_ENLOCK BIT(31)
-
-#define PMC_PLL_CTRL1 0x10
-#define PMC_PLL_CTRL1_FRACR_MSK GENMASK(21, 0)
-#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24)
-
-#define PMC_PLL_ACR 0x18
-#define PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL
-#define PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL
-#define PMC_PLL_ACR_UTMIVR BIT(12)
-#define PMC_PLL_ACR_UTMIBG BIT(13)
-#define PMC_PLL_ACR_LOOP_FILTER_MSK GENMASK(31, 24)
-
-#define PMC_PLL_UPDT 0x1c
-#define PMC_PLL_UPDT_UPDATE BIT(8)
-
-#define PMC_PLL_ISR0 0xec
+#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0)
+#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24)
#define PLL_DIV_MAX (FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, UINT_MAX) + 1)
#define UPLL_DIV 2
@@ -59,7 +40,7 @@ static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
{
unsigned int status;
- regmap_read(regmap, PMC_PLL_ISR0, &status);
+ regmap_read(regmap, AT91_PMC_PLL_ISR0, &status);
return !!(status & BIT(id));
}
@@ -74,12 +55,12 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
u32 val;
spin_lock_irqsave(pll->lock, flags);
- regmap_write(regmap, PMC_PLL_UPDT, pll->id);
+ regmap_write(regmap, AT91_PMC_PLL_UPDT, pll->id);
- regmap_read(regmap, PMC_PLL_CTRL0, &val);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
- regmap_read(regmap, PMC_PLL_CTRL1, &val);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val);
if (sam9x60_pll_ready(regmap, pll->id) &&
@@ -88,39 +69,39 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
return 0;
}
- /* Recommended value for PMC_PLL_ACR */
+ /* Recommended value for AT91_PMC_PLL_ACR */
if (pll->characteristics->upll)
- val = PMC_PLL_ACR_DEFAULT_UPLL;
+ val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
else
- val = PMC_PLL_ACR_DEFAULT_PLLA;
- regmap_write(regmap, PMC_PLL_ACR, val);
+ val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
+ regmap_write(regmap, AT91_PMC_PLL_ACR, val);
- regmap_write(regmap, PMC_PLL_CTRL1,
+ regmap_write(regmap, AT91_PMC_PLL_CTRL1,
FIELD_PREP(PMC_PLL_CTRL1_MUL_MSK, pll->mul));
if (pll->characteristics->upll) {
/* Enable the UTMI internal bandgap */
- val |= PMC_PLL_ACR_UTMIBG;
- regmap_write(regmap, PMC_PLL_ACR, val);
+ val |= AT91_PMC_PLL_ACR_UTMIBG;
+ regmap_write(regmap, AT91_PMC_PLL_ACR, val);
udelay(10);
/* Enable the UTMI internal regulator */
- val |= PMC_PLL_ACR_UTMIVR;
- regmap_write(regmap, PMC_PLL_ACR, val);
+ val |= AT91_PMC_PLL_ACR_UTMIVR;
+ regmap_write(regmap, AT91_PMC_PLL_ACR, val);
udelay(10);
}
- regmap_update_bits(regmap, PMC_PLL_UPDT,
- PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
- regmap_write(regmap, PMC_PLL_CTRL0,
- PMC_PLL_CTRL0_ENLOCK | PMC_PLL_CTRL0_ENPLL |
- PMC_PLL_CTRL0_ENPLLCK | pll->div);
+ regmap_write(regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL |
+ AT91_PMC_PLL_CTRL0_ENPLLCK | pll->div);
- regmap_update_bits(regmap, PMC_PLL_UPDT,
- PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
while (!sam9x60_pll_ready(regmap, pll->id))
cpu_relax();
@@ -144,22 +125,24 @@ static void sam9x60_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(pll->lock, flags);
- regmap_write(pll->regmap, PMC_PLL_UPDT, pll->id);
+ regmap_write(pll->regmap, AT91_PMC_PLL_UPDT, pll->id);
- regmap_update_bits(pll->regmap, PMC_PLL_CTRL0,
- PMC_PLL_CTRL0_ENPLLCK, 0);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENPLLCK, 0);
- regmap_update_bits(pll->regmap, PMC_PLL_UPDT,
- PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
- regmap_update_bits(pll->regmap, PMC_PLL_CTRL0, PMC_PLL_CTRL0_ENPLL, 0);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENPLL, 0);
if (pll->characteristics->upll)
- regmap_update_bits(pll->regmap, PMC_PLL_ACR,
- PMC_PLL_ACR_UTMIBG | PMC_PLL_ACR_UTMIVR, 0);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_ACR,
+ AT91_PMC_PLL_ACR_UTMIBG |
+ AT91_PMC_PLL_ACR_UTMIVR, 0);
- regmap_update_bits(pll->regmap, PMC_PLL_UPDT,
- PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+ regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
spin_unlock_irqrestore(pll->lock, flags);
}
@@ -316,10 +299,10 @@ sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
pll->regmap = regmap;
pll->lock = lock;
- regmap_write(regmap, PMC_PLL_UPDT, id);
- regmap_read(regmap, PMC_PLL_CTRL0, &pllr);
+ regmap_write(regmap, AT91_PMC_PLL_UPDT, id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &pllr);
pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr);
- regmap_read(regmap, PMC_PLL_CTRL1, &pllr);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL1, &pllr);
pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr);
hw = &pll->hw;
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 22aede42a336..31d5c45e30d7 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -25,6 +25,7 @@ struct at91sam9x5_clk_usb {
struct clk_hw hw;
struct regmap *regmap;
u32 usbs_mask;
+ u8 num_parents;
};
#define to_at91sam9x5_clk_usb(hw) \
@@ -75,6 +76,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
tmp_parent_rate = req->rate * div;
tmp_parent_rate = clk_hw_round_rate(parent,
tmp_parent_rate);
+ if (!tmp_parent_rate)
+ continue;
+
tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
if (tmp_rate < req->rate)
tmp_diff = req->rate - tmp_rate;
@@ -107,7 +111,7 @@ static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- if (index > 1)
+ if (index >= usb->num_parents)
return -EINVAL;
regmap_update_bits(usb->regmap, AT91_PMC_USB, usb->usbs_mask, index);
@@ -211,7 +215,8 @@ _at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
usb->hw.init = &init;
usb->regmap = regmap;
- usb->usbs_mask = SAM9X5_USBS_MASK;
+ usb->usbs_mask = usbs_mask;
+ usb->num_parents = num_parents;
hw = &usb->hw;
ret = clk_hw_register(NULL, &usb->hw);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 77398aefeb6d..cc19e8fb83be 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -124,7 +124,6 @@ static const struct {
char *n;
u8 id;
struct clk_range r;
- bool pll;
} sam9x60_gck[] = {
{ .n = "flex0_gclk", .id = 5, },
{ .n = "flex1_gclk", .id = 6, },
@@ -144,11 +143,9 @@ static const struct {
{ .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, },
{ .n = "flex11_gclk", .id = 32, },
{ .n = "flex12_gclk", .id = 33, },
- { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 },
- .pll = true, },
+ { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 }, },
{ .n = "pit64b_gclk", .id = 37, },
- { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 },
- .pll = true, },
+ { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 }, },
{ .n = "tcb1_gclk", .id = 45, },
{ .n = "dbgu_gclk", .id = 47, },
};
@@ -237,9 +234,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[0] = "pllack";
parent_names[1] = "upllck";
- parent_names[2] = "mainck";
- parent_names[3] = "mainck";
- hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 4);
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
if (IS_ERR(hw))
goto err_free;
@@ -290,7 +286,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
sam9x60_gck[i].n,
parent_names, 6,
sam9x60_gck[i].id,
- sam9x60_gck[i].pll,
+ false,
&sam9x60_gck[i].r);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
new file mode 100644
index 000000000000..88506f909c08
--- /dev/null
+++ b/drivers/clk/at91/sama5d3.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 0, .max = 166000000 },
+ .divisors = { 1, 2, 4, 3 },
+};
+
+static u8 plla_out[] = { 0 };
+
+static u16 plla_icpll[] = { 0 };
+
+static const struct clk_range plla_outputs[] = {
+ { .min = 400000000, .max = 1000000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 8000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+ .icpll = plla_icpll,
+ .out = plla_out,
+};
+
+static const struct clk_pcr_layout sama5d3_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(6, 0),
+ .div_mask = GENMASK(17, 16),
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} sama5d3_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "lcdck", .p = "masterck", .id = 3 },
+ { .n = "smdck", .p = "smdclk", .id = 4 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+ struct clk_range r;
+} sama5d3_periphck[] = {
+ { .n = "dbgu_clk", .id = 2, },
+ { .n = "hsmc_clk", .id = 5, },
+ { .n = "pioA_clk", .id = 6, },
+ { .n = "pioB_clk", .id = 7, },
+ { .n = "pioC_clk", .id = 8, },
+ { .n = "pioD_clk", .id = 9, },
+ { .n = "pioE_clk", .id = 10, },
+ { .n = "usart0_clk", .id = 12, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "usart1_clk", .id = 13, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "usart2_clk", .id = 14, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "usart3_clk", .id = 15, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart0_clk", .id = 16, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "uart1_clk", .id = 17, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "twi0_clk", .id = 18, .r = { .min = 0, .max = 41500000 }, },
+ { .n = "twi1_clk", .id = 19, .r = { .min = 0, .max = 41500000 }, },
+ { .n = "twi2_clk", .id = 20, .r = { .min = 0, .max = 41500000 }, },
+ { .n = "mci0_clk", .id = 21, },
+ { .n = "mci1_clk", .id = 22, },
+ { .n = "mci2_clk", .id = 23, },
+ { .n = "spi0_clk", .id = 24, .r = { .min = 0, .max = 166000000 }, },
+ { .n = "spi1_clk", .id = 25, .r = { .min = 0, .max = 166000000 }, },
+ { .n = "tcb0_clk", .id = 26, .r = { .min = 0, .max = 166000000 }, },
+ { .n = "tcb1_clk", .id = 27, .r = { .min = 0, .max = 166000000 }, },
+ { .n = "pwm_clk", .id = 28, },
+ { .n = "adc_clk", .id = 29, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "dma0_clk", .id = 30, },
+ { .n = "dma1_clk", .id = 31, },
+ { .n = "uhphs_clk", .id = 32, },
+ { .n = "udphs_clk", .id = 33, },
+ { .n = "macb0_clk", .id = 34, },
+ { .n = "macb1_clk", .id = 35, },
+ { .n = "lcdc_clk", .id = 36, },
+ { .n = "isi_clk", .id = 37, },
+ { .n = "ssc0_clk", .id = 38, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "ssc1_clk", .id = 39, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "can0_clk", .id = 40, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "can1_clk", .id = 41, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "sha_clk", .id = 42, },
+ { .n = "aes_clk", .id = 43, },
+ { .n = "tdes_clk", .id = 44, },
+ { .n = "trng_clk", .id = 45, },
+ { .n = "fuse_clk", .id = 48, },
+ { .n = "mpddr_clk", .id = 49, },
+};
+
+static void __init sama5d3_pmc_setup(struct device_node *np)
+{
+ const char *slck_name, *mainxtal_name;
+ struct pmc_data *sama5d3_pmc;
+ const char *parent_names[5];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "slow_clk");
+ if (i < 0)
+ return;
+
+ slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama5d3_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(sama5d3_systemck),
+ nck(sama5d3_periphck), 0);
+ if (!sama5d3_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0,
+ &sama5d3_pll_layout, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d3_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d3_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "plladivck";
+ parent_names[1] = "utmick";
+ hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91sam9x5_clk_register_smd(regmap, "smdclk", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "plladivck";
+ parent_names[3] = "utmick";
+ parent_names[4] = "masterck";
+ for (i = 0; i < 3; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 5, i,
+ &at91sam9x5_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d3_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama5d3_systemck[i].n,
+ sama5d3_systemck[i].p,
+ sama5d3_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d3_pmc->shws[sama5d3_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama5d3_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d3_pcr_layout,
+ sama5d3_periphck[i].n,
+ "masterck",
+ sama5d3_periphck[i].id,
+ &sama5d3_periphck[i].r);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama5d3_pmc->phws[sama5d3_periphck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama5d3_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(sama5d3_pmc);
+}
+/*
+ * The TCB is used as the clocksource so its clock is needed early. This means
+ * this can't be a platform driver.
+ */
+CLK_OF_DECLARE_DRIVER(sama5d3_pmc, "atmel,sama5d3-pmc", sama5d3_pmc_setup);
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 6e780c2a9e6b..3c228b018116 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
+#define SI5341_NUM_INPUTS 4
+
#define SI5341_MAX_NUM_OUTPUTS 10
#define SI5340_MAX_NUM_OUTPUTS 4
@@ -56,8 +58,8 @@ struct clk_si5341 {
struct i2c_client *i2c_client;
struct clk_si5341_synth synth[SI5341_NUM_SYNTH];
struct clk_si5341_output clk[SI5341_MAX_NUM_OUTPUTS];
- struct clk *pxtal;
- const char *pxtal_name;
+ struct clk *input_clk[SI5341_NUM_INPUTS];
+ const char *input_clk_name[SI5341_NUM_INPUTS];
const u16 *reg_output_offset;
const u16 *reg_rdiv_offset;
u64 freq_vco; /* 13500–14256 MHz */
@@ -78,10 +80,25 @@ struct clk_si5341_output_config {
#define SI5341_DEVICE_REV 0x0005
#define SI5341_STATUS 0x000C
#define SI5341_SOFT_RST 0x001C
+#define SI5341_IN_SEL 0x0021
+#define SI5341_XAXB_CFG 0x090E
+#define SI5341_IN_EN 0x0949
+#define SI5341_INX_TO_PFD_EN 0x094A
+
+/* Input selection */
+#define SI5341_IN_SEL_MASK 0x06
+#define SI5341_IN_SEL_SHIFT 1
+#define SI5341_IN_SEL_REGCTRL 0x01
+#define SI5341_INX_TO_PFD_SHIFT 4
+
+/* XTAL config bits */
+#define SI5341_XAXB_CFG_EXTCLK_EN BIT(0)
+#define SI5341_XAXB_CFG_PDNB BIT(1)
/* Input dividers (48-bit) */
#define SI5341_IN_PDIV(x) (0x0208 + ((x) * 10))
#define SI5341_IN_PSET(x) (0x020E + ((x) * 10))
+#define SI5341_PX_UPD 0x0230
/* PLL configuration */
#define SI5341_PLL_M_NUM 0x0235
@@ -120,6 +137,10 @@ struct si5341_reg_default {
u8 value;
};
+static const char * const si5341_input_clock_names[] = {
+ "in0", "in1", "in2", "xtal"
+};
+
/* Output configuration registers 0..9 are not quite logically organized */
static const u16 si5341_reg_output_offset[] = {
0x0108,
@@ -390,7 +411,112 @@ static unsigned long si5341_clk_recalc_rate(struct clk_hw *hw,
return (unsigned long)res;
}
+static int si5341_clk_get_selected_input(struct clk_si5341 *data)
+{
+ int err;
+ u32 val;
+
+ err = regmap_read(data->regmap, SI5341_IN_SEL, &val);
+ if (err < 0)
+ return err;
+
+ return (val & SI5341_IN_SEL_MASK) >> SI5341_IN_SEL_SHIFT;
+}
+
+static u8 si5341_clk_get_parent(struct clk_hw *hw)
+{
+ struct clk_si5341 *data = to_clk_si5341(hw);
+ int res = si5341_clk_get_selected_input(data);
+
+ if (res < 0)
+ return 0; /* Apparently we cannot report errors */
+
+ return res;
+}
+
+static int si5341_clk_reparent(struct clk_si5341 *data, u8 index)
+{
+ int err;
+ u8 val;
+
+ val = (index << SI5341_IN_SEL_SHIFT) & SI5341_IN_SEL_MASK;
+ /* Enable register-based input selection */
+ val |= SI5341_IN_SEL_REGCTRL;
+
+ err = regmap_update_bits(data->regmap,
+ SI5341_IN_SEL, SI5341_IN_SEL_REGCTRL | SI5341_IN_SEL_MASK, val);
+ if (err < 0)
+ return err;
+
+ if (index < 3) {
+ /* Enable input buffer for selected input */
+ err = regmap_update_bits(data->regmap,
+ SI5341_IN_EN, 0x07, BIT(index));
+ if (err < 0)
+ return err;
+
+ /* Enables the input to phase detector */
+ err = regmap_update_bits(data->regmap, SI5341_INX_TO_PFD_EN,
+ 0x7 << SI5341_INX_TO_PFD_SHIFT,
+ BIT(index + SI5341_INX_TO_PFD_SHIFT));
+ if (err < 0)
+ return err;
+
+ /* Power down XTAL oscillator and buffer */
+ err = regmap_update_bits(data->regmap, SI5341_XAXB_CFG,
+ SI5341_XAXB_CFG_PDNB, 0);
+ if (err < 0)
+ return err;
+
+ /*
+ * Set the P divider to "1". There's no explanation in the
+ * datasheet of these registers, but the clockbuilder software
+ * programs a "1" when the input is being used.
+ */
+ err = regmap_write(data->regmap, SI5341_IN_PDIV(index), 1);
+ if (err < 0)
+ return err;
+
+ err = regmap_write(data->regmap, SI5341_IN_PSET(index), 1);
+ if (err < 0)
+ return err;
+
+ /* Set update PDIV bit */
+ err = regmap_write(data->regmap, SI5341_PX_UPD, BIT(index));
+ if (err < 0)
+ return err;
+ } else {
+ /* Disable all input buffers */
+ err = regmap_update_bits(data->regmap, SI5341_IN_EN, 0x07, 0);
+ if (err < 0)
+ return err;
+
+ /* Disable input to phase detector */
+ err = regmap_update_bits(data->regmap, SI5341_INX_TO_PFD_EN,
+ 0x7 << SI5341_INX_TO_PFD_SHIFT, 0);
+ if (err < 0)
+ return err;
+
+ /* Power up XTAL oscillator and buffer */
+ err = regmap_update_bits(data->regmap, SI5341_XAXB_CFG,
+ SI5341_XAXB_CFG_PDNB, SI5341_XAXB_CFG_PDNB);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int si5341_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_si5341 *data = to_clk_si5341(hw);
+
+ return si5341_clk_reparent(data, index);
+}
+
static const struct clk_ops si5341_clk_ops = {
+ .set_parent = si5341_clk_set_parent,
+ .get_parent = si5341_clk_get_parent,
.recalc_rate = si5341_clk_recalc_rate,
};
@@ -985,7 +1111,8 @@ static const struct regmap_range si5341_regmap_volatile_range[] = {
regmap_reg_range(0x000C, 0x0012), /* Status */
regmap_reg_range(0x001C, 0x001E), /* reset, finc/fdec */
regmap_reg_range(0x00E2, 0x00FE), /* NVM, interrupts, device ready */
- /* Update bits for synth config */
+ /* Update bits for P divider and synth config */
+ regmap_reg_range(SI5341_PX_UPD, SI5341_PX_UPD),
regmap_reg_range(SI5341_SYNTH_N_UPD(0), SI5341_SYNTH_N_UPD(0)),
regmap_reg_range(SI5341_SYNTH_N_UPD(1), SI5341_SYNTH_N_UPD(1)),
regmap_reg_range(SI5341_SYNTH_N_UPD(2), SI5341_SYNTH_N_UPD(2)),
@@ -1122,6 +1249,7 @@ static int si5341_initialize_pll(struct clk_si5341 *data)
struct device_node *np = data->i2c_client->dev.of_node;
u32 m_num = 0;
u32 m_den = 0;
+ int sel;
if (of_property_read_u32(np, "silabs,pll-m-num", &m_num)) {
dev_err(&data->i2c_client->dev,
@@ -1135,7 +1263,11 @@ static int si5341_initialize_pll(struct clk_si5341 *data)
if (!m_num || !m_den) {
dev_err(&data->i2c_client->dev,
"PLL configuration invalid, assume 14GHz\n");
- m_den = clk_get_rate(data->pxtal) / 10;
+ sel = si5341_clk_get_selected_input(data);
+ if (sel < 0)
+ return sel;
+
+ m_den = clk_get_rate(data->input_clk[sel]) / 10;
m_num = 1400000000;
}
@@ -1143,11 +1275,52 @@ static int si5341_initialize_pll(struct clk_si5341 *data)
SI5341_PLL_M_NUM, m_num, m_den);
}
+static int si5341_clk_select_active_input(struct clk_si5341 *data)
+{
+ int res;
+ int err;
+ int i;
+
+ res = si5341_clk_get_selected_input(data);
+ if (res < 0)
+ return res;
+
+ /* If the current register setting is invalid, pick the first input */
+ if (!data->input_clk[res]) {
+ dev_dbg(&data->i2c_client->dev,
+ "Input %d not connected, rerouting\n", res);
+ res = -ENODEV;
+ for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
+ if (data->input_clk[i]) {
+ res = i;
+ break;
+ }
+ }
+ if (res < 0) {
+ dev_err(&data->i2c_client->dev,
+ "No clock input available\n");
+ return res;
+ }
+ }
+
+ /* Make sure the selected clock is also enabled and routed */
+ err = si5341_clk_reparent(data, res);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(data->input_clk[res]);
+ if (err < 0)
+ return err;
+
+ return res;
+}
+
static int si5341_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct clk_si5341 *data;
struct clk_init_data init;
+ struct clk *input;
const char *root_clock_name;
const char *synth_clock_names[SI5341_NUM_SYNTH];
int err;
@@ -1161,12 +1334,16 @@ static int si5341_probe(struct i2c_client *client,
data->i2c_client = client;
- data->pxtal = devm_clk_get(&client->dev, "xtal");
- if (IS_ERR(data->pxtal)) {
- if (PTR_ERR(data->pxtal) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_err(&client->dev, "Missing xtal clock input\n");
+ for (i = 0; i < SI5341_NUM_INPUTS; ++i) {
+ input = devm_clk_get(&client->dev, si5341_input_clock_names[i]);
+ if (IS_ERR(input)) {
+ if (PTR_ERR(input) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ data->input_clk_name[i] = si5341_input_clock_names[i];
+ } else {
+ data->input_clk[i] = input;
+ data->input_clk_name[i] = __clk_get_name(input);
+ }
}
err = si5341_dt_parse_dt(client, config);
@@ -1188,9 +1365,6 @@ static int si5341_probe(struct i2c_client *client,
if (err < 0)
return err;
- /* "Activate" the xtal (usually a fixed clock) */
- clk_prepare_enable(data->pxtal);
-
if (of_property_read_bool(client->dev.of_node, "silabs,reprogram")) {
initialization_required = true;
} else {
@@ -1223,7 +1397,14 @@ static int si5341_probe(struct i2c_client *client,
ARRAY_SIZE(si5341_reg_defaults));
if (err < 0)
return err;
+ }
+
+ /* Input must be up and running at this point */
+ err = si5341_clk_select_active_input(data);
+ if (err < 0)
+ return err;
+ if (initialization_required) {
/* PLL configuration is required */
err = si5341_initialize_pll(data);
if (err < 0)
@@ -1231,9 +1412,8 @@ static int si5341_probe(struct i2c_client *client,
}
/* Register the PLL */
- data->pxtal_name = __clk_get_name(data->pxtal);
- init.parent_names = &data->pxtal_name;
- init.num_parents = 1; /* For now, only XTAL input supported */
+ init.parent_names = data->input_clk_name;
+ init.num_parents = SI5341_NUM_INPUTS;
init.ops = &si5341_clk_ops;
init.flags = 0;
data->hw.init = &init;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 95adf6c6db3d..39c59f063aa0 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -488,7 +488,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_rate);
-static unsigned long __clk_get_accuracy(struct clk_core *core)
+static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
{
if (!core)
return 0;
@@ -774,7 +774,7 @@ static void clk_core_rate_restore_protect(struct clk_core *core, int count)
* clk_rate_exclusive_get - get exclusivity over the clk rate control
* @clk: the clk over which the exclusity of rate control is requested
*
- * clk_rate_exlusive_get() begins a critical section during which a clock
+ * clk_rate_exclusive_get() begins a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
@@ -1517,18 +1517,12 @@ static void __clk_recalc_accuracies(struct clk_core *core)
__clk_recalc_accuracies(child);
}
-static long clk_core_get_accuracy(struct clk_core *core)
+static long clk_core_get_accuracy_recalc(struct clk_core *core)
{
- unsigned long accuracy;
-
- clk_prepare_lock();
if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
__clk_recalc_accuracies(core);
- accuracy = __clk_get_accuracy(core);
- clk_prepare_unlock();
-
- return accuracy;
+ return clk_core_get_accuracy_no_lock(core);
}
/**
@@ -1542,10 +1536,16 @@ static long clk_core_get_accuracy(struct clk_core *core)
*/
long clk_get_accuracy(struct clk *clk)
{
+ long accuracy;
+
if (!clk)
return 0;
- return clk_core_get_accuracy(clk->core);
+ clk_prepare_lock();
+ accuracy = clk_core_get_accuracy_recalc(clk->core);
+ clk_prepare_unlock();
+
+ return accuracy;
}
EXPORT_SYMBOL_GPL(clk_get_accuracy);
@@ -1599,19 +1599,12 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
__clk_recalc_rates(child, msg);
}
-static unsigned long clk_core_get_rate(struct clk_core *core)
+static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
{
- unsigned long rate;
-
- clk_prepare_lock();
-
if (core && (core->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(core, 0);
- rate = clk_core_get_rate_nolock(core);
- clk_prepare_unlock();
-
- return rate;
+ return clk_core_get_rate_nolock(core);
}
/**
@@ -1624,10 +1617,16 @@ static unsigned long clk_core_get_rate(struct clk_core *core)
*/
unsigned long clk_get_rate(struct clk *clk)
{
+ unsigned long rate;
+
if (!clk)
return 0;
- return clk_core_get_rate(clk->core);
+ clk_prepare_lock();
+ rate = clk_core_get_rate_recalc(clk->core);
+ clk_prepare_unlock();
+
+ return rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
@@ -2660,12 +2659,14 @@ static int clk_core_get_phase(struct clk_core *core)
{
int ret;
- clk_prepare_lock();
+ lockdep_assert_held(&prepare_lock);
+ if (!core->ops->get_phase)
+ return 0;
+
/* Always try to update cached phase if possible */
- if (core->ops->get_phase)
- core->phase = core->ops->get_phase(core->hw);
- ret = core->phase;
- clk_prepare_unlock();
+ ret = core->ops->get_phase(core->hw);
+ if (ret >= 0)
+ core->phase = ret;
return ret;
}
@@ -2679,10 +2680,16 @@ static int clk_core_get_phase(struct clk_core *core)
*/
int clk_get_phase(struct clk *clk)
{
+ int ret;
+
if (!clk)
return 0;
- return clk_core_get_phase(clk->core);
+ clk_prepare_lock();
+ ret = clk_core_get_phase(clk->core);
+ clk_prepare_unlock();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(clk_get_phase);
@@ -2896,13 +2903,22 @@ static struct hlist_head *orphan_list[] = {
static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
+ int phase;
+
+ seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
- clk_core_get_rate(c), clk_core_get_accuracy(c),
- clk_core_get_phase(c),
- clk_core_get_scaled_duty_cycle(c, 100000));
+ clk_core_get_rate_recalc(c),
+ clk_core_get_accuracy_recalc(c));
+
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+ seq_printf(s, "%5d", phase);
+ else
+ seq_puts(s, "-----");
+
+ seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2939,6 +2955,7 @@ DEFINE_SHOW_ATTRIBUTE(clk_summary);
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
+ int phase;
unsigned long min_rate, max_rate;
clk_core_get_boundaries(c, &min_rate, &max_rate);
@@ -2948,11 +2965,13 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
- seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
+ seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
seq_printf(s, "\"min_rate\": %lu,", min_rate);
seq_printf(s, "\"max_rate\": %lu,", max_rate);
- seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
- seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
+ seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+ seq_printf(s, "\"phase\": %d,", phase);
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
@@ -3323,7 +3342,9 @@ static void clk_core_reparent_orphans_nolock(void)
static int __clk_core_init(struct clk_core *core)
{
int ret;
+ struct clk_core *parent;
unsigned long rate;
+ int phase;
if (!core)
return -EINVAL;
@@ -3394,7 +3415,7 @@ static int __clk_core_init(struct clk_core *core)
goto out;
}
- core->parent = __clk_init_parent(core);
+ parent = core->parent = __clk_init_parent(core);
/*
* Populate core->parent if parent has already been clk_core_init'd. If
@@ -3406,10 +3427,9 @@ static int __clk_core_init(struct clk_core *core)
* clocks and re-parent any that are children of the clock currently
* being clk_init'd.
*/
- if (core->parent) {
- hlist_add_head(&core->child_node,
- &core->parent->children);
- core->orphan = core->parent->orphan;
+ if (parent) {
+ hlist_add_head(&core->child_node, &parent->children);
+ core->orphan = parent->orphan;
} else if (!core->num_parents) {
hlist_add_head(&core->child_node, &clk_root_list);
core->orphan = false;
@@ -3427,21 +3447,24 @@ static int __clk_core_init(struct clk_core *core)
*/
if (core->ops->recalc_accuracy)
core->accuracy = core->ops->recalc_accuracy(core->hw,
- __clk_get_accuracy(core->parent));
- else if (core->parent)
- core->accuracy = core->parent->accuracy;
+ clk_core_get_accuracy_no_lock(parent));
+ else if (parent)
+ core->accuracy = parent->accuracy;
else
core->accuracy = 0;
/*
- * Set clk's phase.
+ * Set clk's phase by clk_core_get_phase() caching the phase.
* Since a phase is by definition relative to its parent, just
* query the current clock phase, or just assume it's in phase.
*/
- if (core->ops->get_phase)
- core->phase = core->ops->get_phase(core->hw);
- else
- core->phase = 0;
+ phase = clk_core_get_phase(core);
+ if (phase < 0) {
+ ret = phase;
+ pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
+ core->name);
+ goto out;
+ }
/*
* Set clk's duty cycle.
@@ -3456,9 +3479,9 @@ static int __clk_core_init(struct clk_core *core)
*/
if (core->ops->recalc_rate)
rate = core->ops->recalc_rate(core->hw,
- clk_core_get_rate_nolock(core->parent));
- else if (core->parent)
- rate = core->parent->rate;
+ clk_core_get_rate_nolock(parent));
+ else if (parent)
+ rate = parent->rate;
else
rate = 0;
core->rate = core->req_rate = rate;
@@ -4865,8 +4888,8 @@ static int parent_ready(struct device_node *np)
*
* Return: error code or zero on success
*/
-int of_clk_detect_critical(struct device_node *np,
- int index, unsigned long *flags)
+int of_clk_detect_critical(struct device_node *np, int index,
+ unsigned long *flags)
{
struct property *prop;
const __be32 *cur;
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 20f7c91c03d2..99773519b5a5 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -15,6 +15,7 @@
#define PCG_PREDIV_MAX 8
#define PCG_DIV_SHIFT 0
+#define PCG_CORE_DIV_WIDTH 3
#define PCG_DIV_WIDTH 6
#define PCG_DIV_MAX 64
@@ -91,7 +92,7 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
- unsigned long flags = 0;
+ unsigned long flags;
int prediv_value;
int div_value;
int ret;
@@ -126,6 +127,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
+ u32 composite_flags,
unsigned long flags)
{
struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
@@ -133,6 +135,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
struct clk_divider *div = NULL;
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
+ const struct clk_ops *divider_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -150,8 +153,16 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
div_hw = &div->hw;
div->reg = reg;
- div->shift = PCG_PREDIV_SHIFT;
- div->width = PCG_PREDIV_WIDTH;
+ if (composite_flags & IMX_COMPOSITE_CORE) {
+ div->shift = PCG_DIV_SHIFT;
+ div->width = PCG_CORE_DIV_WIDTH;
+ divider_ops = &clk_divider_ops;
+ } else {
+ div->shift = PCG_PREDIV_SHIFT;
+ div->width = PCG_PREDIV_WIDTH;
+ divider_ops = &imx8m_clk_composite_divider_ops;
+ }
+
div->lock = &imx_ccm_lock;
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
@@ -166,8 +177,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ops, div_hw,
- &imx8m_clk_composite_divider_ops,
- gate_hw, &clk_gate_ops, flags);
+ divider_ops, gate_hw, &clk_gate_ops, flags);
if (IS_ERR(hw))
goto fail;
diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c
index 4b17b91504ed..100ca828b052 100644
--- a/drivers/clk/imx/clk-fixup-div.c
+++ b/drivers/clk/imx/clk-fixup-div.c
@@ -55,7 +55,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw);
struct clk_divider *div = to_clk_divider(hw);
unsigned int divider, value;
- unsigned long flags = 0;
+ unsigned long flags;
u32 val;
divider = parent_rate / rate;
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index b569d919c645..58a67630bb6a 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -42,7 +42,7 @@ static int clk_fixup_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_fixup_mux *fixup_mux = to_clk_fixup_mux(hw);
struct clk_mux *mux = to_clk_mux(hw);
- unsigned long flags = 0;
+ unsigned long flags;
u32 val;
spin_lock_irqsave(mux->lock, flags);
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index 7d44ce814806..ce0060e8873e 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -40,7 +40,7 @@ static int clk_gate2_enable(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
u32 reg;
- unsigned long flags = 0;
+ unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
@@ -62,7 +62,7 @@ static void clk_gate2_disable(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
u32 reg;
- unsigned long flags = 0;
+ unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
@@ -101,7 +101,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw)
static void clk_gate2_disable_unused(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
- unsigned long flags = 0;
+ unsigned long flags;
u32 reg;
spin_lock_irqsave(gate->lock, flags);
@@ -154,7 +154,7 @@ struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
gate->hw.init = &init;
hw = &gate->hw;
- ret = clk_hw_register(NULL, hw);
+ ret = clk_hw_register(dev, hw);
if (ret) {
kfree(gate);
return ERR_PTR(ret);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 4bd44d89eaaa..0f647d148abf 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -208,6 +208,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
anatop_base = base;
hws[IMX6SL_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 0c9f7adb41ae..b2057bd42e25 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -802,6 +802,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_hw_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0);
hws[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0);
hws[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0);
+ hws[IMX7D_PXP_CLK] = imx_clk_hw_gate4("pxp_clk", "main_axi_root_clk", base + 0x44c0, 0);
hws[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0);
hws[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0);
hws[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_hw_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 0620d6c8c072..3710aa0dee9b 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -8,7 +8,7 @@
*/
#include <dt-bindings/clock/imx7ulp-clock.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 2ed93fc25087..925670438f23 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -4,12 +4,10 @@
*/
#include <dt-bindings/clock/imx8mm-clock.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -41,6 +39,8 @@ static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
static const char *imx8mm_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m",
"sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", };
+static const char * const imx8mm_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
static const char *imx8mm_m4_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "sys_pll1_266m",
"sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", };
@@ -283,8 +283,10 @@ static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_8
static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
-static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out",
- "vpu_pll", "sys_pll1_80m", };
+static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy", "sys_pll1_200m",
+ "audio_pll2_out", "sys_pll2_500m", "vpu_pll", "sys_pll1_80m", };
+static const char *imx8mm_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out", "osc_32k", };
static struct clk_hw_onecell_data *clk_hw_data;
static struct clk_hw **hws;
@@ -322,6 +324,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
if (WARN_ON(!base))
return -ENOMEM;
@@ -414,20 +417,30 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
/* Core Slice */
hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
- hws[IMX8MM_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels));
- hws[IMX8MM_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels));
- hws[IMX8MM_CLK_GPU3D_SRC] = imx_clk_hw_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels));
- hws[IMX8MM_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels));
hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- hws[IMX8MM_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- hws[IMX8MM_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
- hws[IMX8MM_CLK_GPU3D_CG] = imx_clk_hw_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28);
- hws[IMX8MM_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28);
hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- hws[IMX8MM_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- hws[IMX8MM_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
- hws[IMX8MM_CLK_GPU3D_DIV] = imx_clk_hw_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3);
- hws[IMX8MM_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3);
+
+ hws[IMX8MM_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mm_m4_sels, base + 0x8080);
+ hws[IMX8MM_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mm_vpu_sels, base + 0x8100);
+ hws[IMX8MM_CLK_GPU3D_CORE] = imx8m_clk_hw_composite_core("gpu3d_core", imx8mm_gpu3d_sels, base + 0x8180);
+ hws[IMX8MM_CLK_GPU2D_CORE] = imx8m_clk_hw_composite_core("gpu2d_core", imx8mm_gpu2d_sels, base + 0x8200);
+
+ /* For backwards compatibility */
+ hws[IMX8MM_CLK_M4_SRC] = hws[IMX8MM_CLK_M4_CORE];
+ hws[IMX8MM_CLK_M4_CG] = hws[IMX8MM_CLK_M4_CORE];
+ hws[IMX8MM_CLK_M4_DIV] = hws[IMX8MM_CLK_M4_CORE];
+ hws[IMX8MM_CLK_VPU_SRC] = hws[IMX8MM_CLK_VPU_CORE];
+ hws[IMX8MM_CLK_VPU_CG] = hws[IMX8MM_CLK_VPU_CORE];
+ hws[IMX8MM_CLK_VPU_DIV] = hws[IMX8MM_CLK_VPU_CORE];
+ hws[IMX8MM_CLK_GPU3D_SRC] = hws[IMX8MM_CLK_GPU3D_CORE];
+ hws[IMX8MM_CLK_GPU3D_CG] = hws[IMX8MM_CLK_GPU3D_CORE];
+ hws[IMX8MM_CLK_GPU3D_DIV] = hws[IMX8MM_CLK_GPU3D_CORE];
+ hws[IMX8MM_CLK_GPU2D_SRC] = hws[IMX8MM_CLK_GPU2D_CORE];
+ hws[IMX8MM_CLK_GPU2D_CG] = hws[IMX8MM_CLK_GPU2D_CORE];
+ hws[IMX8MM_CLK_GPU2D_DIV] = hws[IMX8MM_CLK_GPU2D_CORE];
+
+ /* CORE SEL */
+ hws[IMX8MM_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mm_a53_core_sels, ARRAY_SIZE(imx8mm_a53_core_sels));
/* BUS */
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
@@ -504,6 +517,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mm_wdog_sels, base + 0xb900);
hws[IMX8MM_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980);
hws[IMX8MM_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mm_clko1_sels, base + 0xba00);
+ hws[IMX8MM_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mm_clko2_sels, base + 0xba80);
hws[IMX8MM_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00);
hws[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80);
hws[IMX8MM_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00);
@@ -564,7 +578,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
hws[IMX8MM_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
hws[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
- hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0);
+ hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", base + 0x44f0, 0);
hws[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
hws[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
hws[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
@@ -586,7 +600,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
hws[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
hws[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
- hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0);
+ hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", base + 0x4660, 0);
hws[IMX8MM_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
hws[IMX8MM_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8);
@@ -594,11 +608,14 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
hws[IMX8MM_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL);
- hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
- hws[IMX8MM_CLK_A53_DIV]->clk,
- hws[IMX8MM_CLK_A53_SRC]->clk,
+ hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
+ hws[IMX8MM_CLK_A53_CORE]->clk,
+ hws[IMX8MM_CLK_A53_CORE]->clk,
hws[IMX8MM_ARM_PLL_OUT]->clk,
- hws[IMX8MM_SYS_PLL1_800M]->clk);
+ hws[IMX8MM_CLK_A53_DIV]->clk);
+
+ clk_hw_set_parent(hws[IMX8MM_CLK_A53_SRC], hws[IMX8MM_SYS_PLL1_800M]);
+ clk_hw_set_parent(hws[IMX8MM_CLK_A53_CORE], hws[IMX8MM_ARM_PLL_OUT]);
imx_check_clk_hws(hws, IMX8MM_CLK_END);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index c5e7316b4c66..0bc7070235bd 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -4,12 +4,10 @@
*/
#include <dt-bindings/clock/imx8mn-clock.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -40,6 +38,8 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl
"sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m",
"audio_pll1_out", "sys_pll3_out", };
+static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
"sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", };
@@ -317,6 +317,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
if (WARN_ON(!base)) {
ret = -ENOMEM;
goto unregister_hws;
@@ -413,15 +414,21 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
/* CORE */
hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
- hws[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
- hws[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels, ARRAY_SIZE(imx8mn_gpu_shader_sels));
hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- hws[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
- hws[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
-
hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- hws[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
- hws[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+
+ hws[IMX8MN_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mn_gpu_core_sels, base + 0x8180);
+ hws[IMX8MN_CLK_GPU_SHADER] = imx8m_clk_hw_composite_core("gpu_shader", imx8mn_gpu_shader_sels, base + 0x8200);
+
+ hws[IMX8MN_CLK_GPU_CORE_SRC] = hws[IMX8MN_CLK_GPU_CORE];
+ hws[IMX8MN_CLK_GPU_CORE_CG] = hws[IMX8MN_CLK_GPU_CORE];
+ hws[IMX8MN_CLK_GPU_CORE_DIV] = hws[IMX8MN_CLK_GPU_CORE];
+ hws[IMX8MN_CLK_GPU_SHADER_SRC] = hws[IMX8MN_CLK_GPU_SHADER];
+ hws[IMX8MN_CLK_GPU_SHADER_CG] = hws[IMX8MN_CLK_GPU_SHADER];
+ hws[IMX8MN_CLK_GPU_SHADER_DIV] = hws[IMX8MN_CLK_GPU_SHADER];
+
+ /* CORE SEL */
+ hws[IMX8MN_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mn_a53_core_sels, ARRAY_SIZE(imx8mn_a53_core_sels));
/* BUS */
hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
@@ -523,12 +530,13 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
hws[IMX8MN_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
hws[IMX8MN_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ hws[IMX8MN_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0);
hws[IMX8MN_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
hws[IMX8MN_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
hws[IMX8MN_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
hws[IMX8MN_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
hws[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
- hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
+ hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core", base + 0x44f0, 0);
hws[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
hws[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
hws[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
@@ -551,11 +559,14 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
- hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
- hws[IMX8MN_CLK_A53_DIV]->clk,
- hws[IMX8MN_CLK_A53_SRC]->clk,
+ hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
+ hws[IMX8MN_CLK_A53_CORE]->clk,
+ hws[IMX8MN_CLK_A53_CORE]->clk,
hws[IMX8MN_ARM_PLL_OUT]->clk,
- hws[IMX8MN_SYS_PLL1_800M]->clk);
+ hws[IMX8MN_CLK_A53_DIV]->clk);
+
+ clk_hw_set_parent(hws[IMX8MN_CLK_A53_SRC], hws[IMX8MN_SYS_PLL1_800M]);
+ clk_hw_set_parent(hws[IMX8MN_CLK_A53_CORE], hws[IMX8MN_ARM_PLL_OUT]);
imx_check_clk_hws(hws, IMX8MN_CLK_END);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index cf192907b7dc..41469e2cc3de 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -4,13 +4,13 @@
*/
#include <dt-bindings/clock/imx8mp-clock.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include "clk.h"
@@ -34,6 +34,8 @@ static const char * const imx8mp_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl
"sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m",
"audio_pll1_out", "sys_pll3_out", };
+static const char * const imx8mp_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
static const char * const imx8mp_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m",
"vpu_pll_out", "sys_pll1_800m", "audio_pll1_out",
"video_pll1_out", "sys_pll3_out", };
@@ -342,7 +344,7 @@ static const char * const imx8mp_hdmi_fdcc_tst_sels[] = {"osc_24m", "sys_pll1_26
"sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
"audio_pll2_out", "video_pll1_out", };
-static const char * const imx8mp_hdmi_27m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+static const char * const imx8mp_hdmi_24m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
"sys_pll3_out", "audio_pll1_out", "video_pll1_out",
"audio_pll2_out", "sys_pll1_133m", };
@@ -434,6 +436,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
anatop_base = of_iomap(np, 0);
+ of_node_put(np);
if (WARN_ON(!anatop_base))
return -ENOMEM;
@@ -553,6 +556,9 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3);
hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3);
+ /* CORE SEL */
+ hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels));
+
hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
@@ -631,7 +637,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_IPP_DO_CLKO1] = imx8m_clk_hw_composite("ipp_do_clko1", imx8mp_ipp_do_clko1_sels, ccm_base + 0xba00);
hws[IMX8MP_CLK_IPP_DO_CLKO2] = imx8m_clk_hw_composite("ipp_do_clko2", imx8mp_ipp_do_clko2_sels, ccm_base + 0xba80);
hws[IMX8MP_CLK_HDMI_FDCC_TST] = imx8m_clk_hw_composite("hdmi_fdcc_tst", imx8mp_hdmi_fdcc_tst_sels, ccm_base + 0xbb00);
- hws[IMX8MP_CLK_HDMI_27M] = imx8m_clk_hw_composite("hdmi_27m", imx8mp_hdmi_27m_sels, ccm_base + 0xbb80);
+ hws[IMX8MP_CLK_HDMI_24M] = imx8m_clk_hw_composite("hdmi_24m", imx8mp_hdmi_24m_sels, ccm_base + 0xbb80);
hws[IMX8MP_CLK_HDMI_REF_266M] = imx8m_clk_hw_composite("hdmi_ref_266m", imx8mp_hdmi_ref_266m_sels, ccm_base + 0xbc00);
hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80);
hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00);
@@ -671,6 +677,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", ccm_base + 0x4180, 0);
hws[IMX8MP_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", ccm_base + 0x4190, 0);
hws[IMX8MP_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", ccm_base + 0x41a0, 0);
+ hws[IMX8MP_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", ccm_base + 0x4220, 0);
hws[IMX8MP_CLK_PCIE_ROOT] = imx_clk_hw_gate4("pcie_root_clk", "pcie_aux", ccm_base + 0x4250, 0);
hws[IMX8MP_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", ccm_base + 0x4280, 0);
hws[IMX8MP_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", ccm_base + 0x4290, 0);
@@ -722,11 +729,14 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_VPU_ROOT] = imx_clk_hw_gate4("vpu_root_clk", "vpu_bus", ccm_base + 0x4630, 0);
hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "ipg_root", ccm_base + 0x4650, 0);
- hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
- hws[IMX8MP_CLK_A53_DIV]->clk,
- hws[IMX8MP_CLK_A53_SRC]->clk,
+ hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
+ hws[IMX8MP_CLK_A53_CORE]->clk,
+ hws[IMX8MP_CLK_A53_CORE]->clk,
hws[IMX8MP_ARM_PLL_OUT]->clk,
- hws[IMX8MP_SYS_PLL1_800M]->clk);
+ hws[IMX8MP_CLK_A53_DIV]->clk);
+
+ clk_hw_set_parent(hws[IMX8MP_CLK_A53_SRC], hws[IMX8MP_SYS_PLL1_800M]);
+ clk_hw_set_parent(hws[IMX8MP_CLK_A53_CORE], hws[IMX8MP_ARM_PLL_OUT]);
imx_check_clk_hws(hws, IMX8MP_CLK_END);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 4c0edca1a6d0..fdc68db68de5 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -5,7 +5,7 @@
*/
#include <dt-bindings/clock/imx8mq-clock.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -41,6 +41,8 @@ static const char * const video2_pll_out_sels[] = {"video2_pll1_ref_sel", };
static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
"sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll_out", };
+static const char * const imx8mq_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", };
+
static const char * const imx8mq_arm_m4_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_250m", "sys1_pll_266m",
"sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll_out", };
@@ -305,6 +307,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
if (WARN_ON(!base))
return -ENOMEM;
@@ -403,22 +406,29 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
/* CORE */
hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
- hws[IMX8MQ_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels));
- hws[IMX8MQ_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
- hws[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
- hws[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels));
-
hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
- hws[IMX8MQ_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- hws[IMX8MQ_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
- hws[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
- hws[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
-
hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
- hws[IMX8MQ_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- hws[IMX8MQ_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
- hws[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
- hws[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+
+ hws[IMX8MQ_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mq_arm_m4_sels, base + 0x8080);
+ hws[IMX8MQ_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mq_vpu_sels, base + 0x8100);
+ hws[IMX8MQ_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mq_gpu_core_sels, base + 0x8180);
+ hws[IMX8MQ_CLK_GPU_SHADER] = imx8m_clk_hw_composite("gpu_shader", imx8mq_gpu_shader_sels, base + 0x8200);
+ /* For backwards compatibility */
+ hws[IMX8MQ_CLK_M4_SRC] = hws[IMX8MQ_CLK_M4_CORE];
+ hws[IMX8MQ_CLK_M4_CG] = hws[IMX8MQ_CLK_M4_CORE];
+ hws[IMX8MQ_CLK_M4_DIV] = hws[IMX8MQ_CLK_M4_CORE];
+ hws[IMX8MQ_CLK_VPU_SRC] = hws[IMX8MQ_CLK_VPU_CORE];
+ hws[IMX8MQ_CLK_VPU_CG] = hws[IMX8MQ_CLK_VPU_CORE];
+ hws[IMX8MQ_CLK_VPU_DIV] = hws[IMX8MQ_CLK_VPU_CORE];
+ hws[IMX8MQ_CLK_GPU_CORE_SRC] = hws[IMX8MQ_CLK_GPU_CORE];
+ hws[IMX8MQ_CLK_GPU_CORE_CG] = hws[IMX8MQ_CLK_GPU_CORE];
+ hws[IMX8MQ_CLK_GPU_CORE_DIV] = hws[IMX8MQ_CLK_GPU_CORE];
+ hws[IMX8MQ_CLK_GPU_SHADER_SRC] = hws[IMX8MQ_CLK_GPU_SHADER];
+ hws[IMX8MQ_CLK_GPU_SHADER_CG] = hws[IMX8MQ_CLK_GPU_SHADER];
+ hws[IMX8MQ_CLK_GPU_SHADER_DIV] = hws[IMX8MQ_CLK_GPU_SHADER];
+
+ /* CORE SEL */
+ hws[IMX8MQ_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mq_a53_core_sels, ARRAY_SIZE(imx8mq_a53_core_sels));
/* BUS */
hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
@@ -567,7 +577,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
hws[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
hws[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_hw_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
- hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
+ hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core", base + 0x4570, 0);
hws[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_hw_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
hws[IMX8MQ_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
hws[IMX8MQ_CLK_DISP_AXI_ROOT] = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
@@ -583,11 +593,14 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_GPT_3M_CLK] = imx_clk_hw_fixed_factor("gpt_3m", "osc_25m", 1, 8);
hws[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
- hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div",
- hws[IMX8MQ_CLK_A53_DIV]->clk,
- hws[IMX8MQ_CLK_A53_SRC]->clk,
+ hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
+ hws[IMX8MQ_CLK_A53_CORE]->clk,
+ hws[IMX8MQ_CLK_A53_CORE]->clk,
hws[IMX8MQ_ARM_PLL_OUT]->clk,
- hws[IMX8MQ_SYS1_PLL_800M]->clk);
+ hws[IMX8MQ_CLK_A53_DIV]->clk);
+
+ clk_hw_set_parent(hws[IMX8MQ_CLK_A53_SRC], hws[IMX8MQ_SYS1_PLL_800M]);
+ clk_hw_set_parent(hws[IMX8MQ_CLK_A53_CORE], hws[IMX8MQ_ARM_PLL_OUT]);
imx_check_clk_hws(hws, IMX8MQ_CLK_END);
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index de93ce73101b..78e1f7641aaa 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -98,26 +98,45 @@ static unsigned long clk_pfdv2_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_pfdv2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pfdv2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 tmp = *prate;
+ unsigned long parent_rates[] = {
+ 480000000,
+ 528000000,
+ req->best_parent_rate
+ };
+ unsigned long best_rate = -1UL, rate = req->rate;
+ unsigned long best_parent_rate = req->best_parent_rate;
+ u64 tmp;
u8 frac;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(parent_rates); i++) {
+ tmp = parent_rates[i];
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 12)
+ frac = 12;
+ else if (frac > 35)
+ frac = 35;
+
+ tmp = parent_rates[i];
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ if (abs(tmp - req->rate) < abs(best_rate - req->rate)) {
+ best_rate = tmp;
+ best_parent_rate = parent_rates[i];
+ }
+ }
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
- frac = tmp;
-
- if (frac < 12)
- frac = 12;
- else if (frac > 35)
- frac = 35;
-
- tmp = *prate;
- tmp *= 18;
- do_div(tmp, frac);
+ req->best_parent_rate = best_parent_rate;
+ req->rate = best_rate;
- return tmp;
+ return 0;
}
static int clk_pfdv2_is_enabled(struct clk_hw *hw)
@@ -139,6 +158,12 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate,
u32 val;
u8 frac;
+ if (!rate)
+ return -EINVAL;
+
+ /* PFD can NOT change rate without gating */
+ WARN_ON(clk_pfdv2_is_enabled(hw));
+
tmp = tmp * 18 + rate / 2;
do_div(tmp, rate);
frac = tmp;
@@ -161,7 +186,7 @@ static const struct clk_ops clk_pfdv2_ops = {
.enable = clk_pfdv2_enable,
.disable = clk_pfdv2_disable,
.recalc_rate = clk_pfdv2_recalc_rate,
- .round_rate = clk_pfdv2_round_rate,
+ .determine_rate = clk_pfdv2_determine_rate,
.set_rate = clk_pfdv2_set_rate,
.is_enabled = clk_pfdv2_is_enabled,
};
@@ -189,7 +214,7 @@ struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
init.ops = &clk_pfdv2_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.flags = CLK_SET_RATE_GATE;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
pfd->hw.init = &init;
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 5b0519a81a7a..a83bbbee77d9 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -55,8 +55,10 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
};
static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
+ PLL_1443X_RATE(1039500000U, 173, 2, 1, 16384),
PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+ PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
};
@@ -408,6 +410,8 @@ struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
default:
pr_err("%s: Unknown pll type for pll clk %s\n",
__func__, name);
+ kfree(pll);
+ return ERR_PTR(-EINVAL);
};
pll->base = base;
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index f51a800c268c..a49450431855 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -54,7 +54,7 @@ static inline int clk_pllv4_wait_lock(struct clk_pllv4 *pll)
csr, csr & PLL_VLD, 0, LOCK_TIMEOUT_US);
}
-static int clk_pllv4_is_enabled(struct clk_hw *hw)
+static int clk_pllv4_is_prepared(struct clk_hw *hw)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
@@ -175,7 +175,7 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static int clk_pllv4_enable(struct clk_hw *hw)
+static int clk_pllv4_prepare(struct clk_hw *hw)
{
u32 val;
struct clk_pllv4 *pll = to_clk_pllv4(hw);
@@ -187,7 +187,7 @@ static int clk_pllv4_enable(struct clk_hw *hw)
return clk_pllv4_wait_lock(pll);
}
-static void clk_pllv4_disable(struct clk_hw *hw)
+static void clk_pllv4_unprepare(struct clk_hw *hw)
{
u32 val;
struct clk_pllv4 *pll = to_clk_pllv4(hw);
@@ -201,9 +201,9 @@ static const struct clk_ops clk_pllv4_ops = {
.recalc_rate = clk_pllv4_recalc_rate,
.round_rate = clk_pllv4_round_rate,
.set_rate = clk_pllv4_set_rate,
- .enable = clk_pllv4_enable,
- .disable = clk_pllv4_disable,
- .is_enabled = clk_pllv4_is_enabled,
+ .prepare = clk_pllv4_prepare,
+ .unprepare = clk_pllv4_unprepare,
+ .is_prepared = clk_pllv4_is_prepared,
};
struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c
index acd1b9002be6..d4a2be16d132 100644
--- a/drivers/clk/imx/clk-sscg-pll.c
+++ b/drivers/clk/imx/clk-sscg-pll.c
@@ -195,10 +195,10 @@ static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup,
uint64_t ref)
{
- int ret = -EINVAL;
+ int ret;
if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
- return ret;
+ return -EINVAL;
temp_setup->vco1 = ref;
@@ -254,10 +254,10 @@ static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup,
uint64_t ref)
{
- int ret = -EINVAL;
+ int ret;
if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
- return ret;
+ return -EINVAL;
temp_setup->ref = ref;
@@ -428,7 +428,7 @@ static int __clk_sscg_pll_determine_rate(struct clk_hw *hw,
struct clk_sscg_pll_setup *setup = &pll->setup;
struct clk_hw *parent_hw = NULL;
int bypass_parent_index;
- int ret = -EINVAL;
+ int ret;
req->max_rate = max;
req->min_rate = min;
@@ -467,10 +467,10 @@ static int clk_sscg_pll_determine_rate(struct clk_hw *hw,
uint64_t rate = req->rate;
uint64_t min = req->min_rate;
uint64_t max = req->max_rate;
- int ret = -EINVAL;
+ int ret;
if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
- return ret;
+ return -EINVAL;
ret = __clk_sscg_pll_determine_rate(hw, req, req->rate, req->rate,
rate, PLL_BYPASS2);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index b05213b91dcf..f074dd8ec42e 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -477,20 +477,29 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
struct clk *div, struct clk *mux, struct clk *pll,
struct clk *step);
+#define IMX_COMPOSITE_CORE BIT(0)
+
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
int num_parents,
void __iomem *reg,
+ u32 composite_flags,
unsigned long flags);
+#define imx8m_clk_hw_composite_core(name, parent_names, reg) \
+ imx8m_clk_hw_composite_flags(name, parent_names, \
+ ARRAY_SIZE(parent_names), reg, \
+ IMX_COMPOSITE_CORE, \
+ CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+
#define imx8m_clk_composite_flags(name, parent_names, num_parents, reg, \
flags) \
to_clk(imx8m_clk_hw_composite_flags(name, parent_names, \
- num_parents, reg, flags))
+ num_parents, reg, 0, flags))
#define __imx8m_clk_hw_composite(name, parent_names, reg, flags) \
imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, \
+ ARRAY_SIZE(parent_names), reg, 0, \
flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
#define __imx8m_clk_composite(name, parent_names, reg, flags) \
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 956dd653a43d..c051ecba5cf8 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -432,8 +432,10 @@ static void __init jz4770_cgu_init(struct device_node *np)
cgu = ingenic_cgu_new(jz4770_cgu_clocks,
ARRAY_SIZE(jz4770_cgu_clocks), np);
- if (!cgu)
+ if (!cgu) {
pr_err("%s: failed to initialise CGU\n", __func__);
+ return;
+ }
retval = ingenic_cgu_register_clocks(cgu);
if (retval)
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index ea905ff72bf0..c758f1643067 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -9,14 +9,16 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/jz4780-cgu.h>
#include "cgu.h"
#include "pm.h"
/* CGU register offsets */
#define CGU_REG_CLOCKCONTROL 0x00
-#define CGU_REG_PLLCONTROL 0x0c
+#define CGU_REG_LCR 0x04
#define CGU_REG_APLL 0x10
#define CGU_REG_MPLL 0x14
#define CGU_REG_EPLL 0x18
@@ -46,8 +48,8 @@
#define CGU_REG_CLOCKSTATUS 0xd4
/* bits within the OPCR register */
-#define OPCR_SPENDN0 (1 << 7)
-#define OPCR_SPENDN1 (1 << 6)
+#define OPCR_SPENDN0 BIT(7)
+#define OPCR_SPENDN1 BIT(6)
/* bits within the USBPCR register */
#define USBPCR_USB_MODE BIT(31)
@@ -88,6 +90,13 @@
#define USBVBFIL_IDDIGFIL_MASK (0xffff << USBVBFIL_IDDIGFIL_SHIFT)
#define USBVBFIL_USBVBFIL_MASK (0xffff)
+/* bits within the LCR register */
+#define LCR_PD_SCPU BIT(31)
+#define LCR_SCPUS BIT(27)
+
+/* bits within the CLKGR1 register */
+#define CLKGR1_CORE1 BIT(15)
+
static struct ingenic_cgu *cgu;
static u8 jz4780_otg_phy_get_parent(struct clk_hw *hw)
@@ -205,6 +214,42 @@ static const struct clk_ops jz4780_otg_phy_ops = {
.set_rate = jz4780_otg_phy_set_rate,
};
+static int jz4780_core1_enable(struct clk_hw *hw)
+{
+ struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ struct ingenic_cgu *cgu = ingenic_clk->cgu;
+ const unsigned int timeout = 5000;
+ unsigned long flags;
+ int retval;
+ u32 lcr, clkgr1;
+
+ spin_lock_irqsave(&cgu->lock, flags);
+
+ lcr = readl(cgu->base + CGU_REG_LCR);
+ lcr &= ~LCR_PD_SCPU;
+ writel(lcr, cgu->base + CGU_REG_LCR);
+
+ clkgr1 = readl(cgu->base + CGU_REG_CLKGR1);
+ clkgr1 &= ~CLKGR1_CORE1;
+ writel(clkgr1, cgu->base + CGU_REG_CLKGR1);
+
+ spin_unlock_irqrestore(&cgu->lock, flags);
+
+ /* wait for the CPU to be powered up */
+ retval = readl_poll_timeout(cgu->base + CGU_REG_LCR, lcr,
+ !(lcr & LCR_SCPUS), 10, timeout);
+ if (retval == -ETIMEDOUT) {
+ pr_err("%s: Wait for power up core1 timeout\n", __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static const struct clk_ops jz4780_core1_ops = {
+ .enable = jz4780_core1_enable,
+};
+
static const s8 pll_od_encoding[16] = {
0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
@@ -699,9 +744,9 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
},
[JZ4780_CLK_CORE1] = {
- "core1", CGU_CLK_GATE,
+ "core1", CGU_CLK_CUSTOM,
.parents = { JZ4780_CLK_CPU, -1, -1, -1 },
- .gate = { CGU_REG_CLKGR1, 15 },
+ .custom = { &jz4780_core1_ops },
},
};
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index ad7daa494fd4..153a954b0d2f 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -189,7 +189,7 @@ static long ingenic_tcu_round_rate(struct clk_hw *hw, unsigned long req_rate,
u8 prescale;
if (req_rate > rate)
- return -EINVAL;
+ return rate;
prescale = ingenic_tcu_get_prescale(rate, req_rate);
@@ -317,10 +317,17 @@ static const struct ingenic_soc_info jz4770_soc_info = {
.has_tcu_clk = false,
};
+static const struct ingenic_soc_info x1000_soc_info = {
+ .num_channels = 8,
+ .has_ost = false, /* X1000 has OST, but it not belong TCU */
+ .has_tcu_clk = false,
+};
+
static const struct of_device_id ingenic_tcu_of_match[] __initconst = {
{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
{ .compatible = "ingenic,jz4770-tcu", .data = &jz4770_soc_info, },
+ { .compatible = "ingenic,x1000-tcu", .data = &x1000_soc_info, },
{ /* sentinel */ }
};
@@ -471,3 +478,4 @@ static void __init ingenic_tcu_init(struct device_node *np)
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-tcu", ingenic_tcu_init);
CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-tcu", ingenic_tcu_init);
CLK_OF_DECLARE_DRIVER(jz4770_cgu, "ingenic,jz4770-tcu", ingenic_tcu_init);
+CLK_OF_DECLARE_DRIVER(x1000_cgu, "ingenic,x1000-tcu", ingenic_tcu_init);
diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig
index 38aeefb1e808..ab613f28b502 100644
--- a/drivers/clk/keystone/Kconfig
+++ b/drivers/clk/keystone/Kconfig
@@ -26,3 +26,11 @@ config TI_SCI_CLK_PROBE_FROM_FW
This is mostly only useful for debugging purposes, and will
increase the boot time of the device. If you want the clocks probed
from firmware, say Y. Otherwise, say N.
+
+config TI_SYSCON_CLK
+ tristate "Syscon based clock driver for K2/K3 SoCs"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ default ARCH_KEYSTONE || ARCH_K3
+ help
+ This adds clock driver support for syscon based gate
+ clocks on TI's K2 and K3 SoCs.
diff --git a/drivers/clk/keystone/Makefile b/drivers/clk/keystone/Makefile
index d044de6f965c..0e426e648f7c 100644
--- a/drivers/clk/keystone/Makefile
+++ b/drivers/clk/keystone/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_COMMON_CLK_KEYSTONE) += pll.o gate.o
obj-$(CONFIG_TI_SCI_CLK) += sci-clk.o
+obj-$(CONFIG_TI_SYSCON_CLK) += syscon-clk.o
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
new file mode 100644
index 000000000000..8d7dbea3bd30
--- /dev/null
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct ti_syscon_gate_clk_priv {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 reg;
+ u32 idx;
+};
+
+struct ti_syscon_gate_clk_data {
+ char *name;
+ u32 offset;
+ u32 bit_idx;
+};
+
+static struct
+ti_syscon_gate_clk_priv *to_ti_syscon_gate_clk_priv(struct clk_hw *hw)
+{
+ return container_of(hw, struct ti_syscon_gate_clk_priv, hw);
+}
+
+static int ti_syscon_gate_clk_enable(struct clk_hw *hw)
+{
+ struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw);
+
+ return regmap_write_bits(priv->regmap, priv->reg, priv->idx,
+ priv->idx);
+}
+
+static void ti_syscon_gate_clk_disable(struct clk_hw *hw)
+{
+ struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw);
+
+ regmap_write_bits(priv->regmap, priv->reg, priv->idx, 0);
+}
+
+static int ti_syscon_gate_clk_is_enabled(struct clk_hw *hw)
+{
+ unsigned int val;
+ struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw);
+
+ regmap_read(priv->regmap, priv->reg, &val);
+
+ return !!(val & priv->idx);
+}
+
+static const struct clk_ops ti_syscon_gate_clk_ops = {
+ .enable = ti_syscon_gate_clk_enable,
+ .disable = ti_syscon_gate_clk_disable,
+ .is_enabled = ti_syscon_gate_clk_is_enabled,
+};
+
+static struct clk_hw
+*ti_syscon_gate_clk_register(struct device *dev, struct regmap *regmap,
+ const struct ti_syscon_gate_clk_data *data)
+{
+ struct ti_syscon_gate_clk_priv *priv;
+ struct clk_init_data init;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = data->name;
+ init.ops = &ti_syscon_gate_clk_ops;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ init.flags = 0;
+
+ priv->regmap = regmap;
+ priv->reg = data->offset;
+ priv->idx = BIT(data->bit_idx);
+ priv->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &priv->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &priv->hw;
+}
+
+static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
+{
+ const struct ti_syscon_gate_clk_data *data, *p;
+ struct clk_hw_onecell_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int num_clks, i;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ regmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(regmap)) {
+ if (PTR_ERR(regmap) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(dev, "failed to find parent regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ num_clks = 0;
+ for (p = data; p->name; p++)
+ num_clks++;
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ hw_data->num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ hw_data->hws[i] = ti_syscon_gate_clk_register(dev, regmap,
+ &data[i]);
+ if (IS_ERR(hw_data->hws[i]))
+ dev_warn(dev, "failed to register %s\n",
+ data[i].name);
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ hw_data);
+}
+
+#define TI_SYSCON_CLK_GATE(_name, _offset, _bit_idx) \
+ { \
+ .name = _name, \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ }
+
+static const struct ti_syscon_gate_clk_data am654_clk_data[] = {
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk0", 0x0, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk1", 0x4, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk2", 0x8, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk3", 0xc, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk4", 0x10, 0),
+ TI_SYSCON_CLK_GATE("ehrpwm_tbclk5", 0x14, 0),
+ { /* Sentinel */ },
+};
+
+static const struct of_device_id ti_syscon_gate_clk_ids[] = {
+ {
+ .compatible = "ti,am654-ehrpwm-tbclk",
+ .data = &am654_clk_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_syscon_gate_clk_ids);
+
+static struct platform_driver ti_syscon_gate_clk_driver = {
+ .probe = ti_syscon_gate_clk_probe,
+ .driver = {
+ .name = "ti-syscon-gate-clk",
+ .of_match_table = ti_syscon_gate_clk_ids,
+ },
+};
+module_platform_driver(ti_syscon_gate_clk_driver);
+
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_DESCRIPTION("Syscon backed gate-clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index d2760a021301..fad616cac01e 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -3862,6 +3862,111 @@ static struct clk_regmap g12a_ts = {
},
};
+/* SPICC SCLK source clock */
+
+static const struct clk_parent_data spicc_sclk_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_clk81.hw },
+ { .hw = &g12a_fclk_div4.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div5.hw },
+ { .hw = &g12a_fclk_div7.hw },
+};
+
+static struct clk_regmap g12a_spicc0_sclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .mask = 7,
+ .shift = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc0_sclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = spicc_sclk_parent_data,
+ .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ },
+};
+
+static struct clk_regmap g12a_spicc0_sclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .shift = 0,
+ .width = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc0_sclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_spicc0_sclk_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_spicc0_sclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .bit_idx = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc0_sclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_spicc0_sclk_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_spicc1_sclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .mask = 7,
+ .shift = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc1_sclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = spicc_sclk_parent_data,
+ .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ },
+};
+
+static struct clk_regmap g12a_spicc1_sclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .shift = 16,
+ .width = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc1_sclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_spicc1_sclk_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_spicc1_sclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SPICC_CLK_CNTL,
+ .bit_idx = 22,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "spicc1_sclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_spicc1_sclk_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
#define MESON_GATE(_name, _reg, _bit) \
MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw)
@@ -4159,6 +4264,12 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw,
[CLKID_TS_DIV] = &g12a_ts_div.hw,
[CLKID_TS] = &g12a_ts.hw,
+ [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw,
+ [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw,
+ [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw,
+ [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
+ [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
+ [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4408,6 +4519,12 @@ static struct clk_hw_onecell_data g12b_hw_onecell_data = {
[CLKID_CPUB_CLK_AXI] = &g12b_cpub_clk_axi.hw,
[CLKID_CPUB_CLK_TRACE_SEL] = &g12b_cpub_clk_trace_sel.hw,
[CLKID_CPUB_CLK_TRACE] = &g12b_cpub_clk_trace.hw,
+ [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw,
+ [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw,
+ [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw,
+ [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
+ [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
+ [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4642,6 +4759,12 @@ static struct clk_hw_onecell_data sm1_hw_onecell_data = {
[CLKID_CPU1_CLK] = &sm1_cpu1_clk.hw,
[CLKID_CPU2_CLK] = &sm1_cpu2_clk.hw,
[CLKID_CPU3_CLK] = &sm1_cpu3_clk.hw,
+ [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw,
+ [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw,
+ [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw,
+ [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
+ [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
+ [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4877,6 +5000,12 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&sm1_cpu1_clk,
&sm1_cpu2_clk,
&sm1_cpu3_clk,
+ &g12a_spicc0_sclk_sel,
+ &g12a_spicc0_sclk_div,
+ &g12a_spicc0_sclk,
+ &g12a_spicc1_sclk_sel,
+ &g12a_spicc1_sclk_div,
+ &g12a_spicc1_sclk,
};
static const struct reg_sequence g12a_init_regs[] = {
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index 9df4068aced1..a8852556836e 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -255,8 +255,12 @@
#define CLKID_DSU_CLK_DYN1 249
#define CLKID_DSU_CLK_DYN 250
#define CLKID_DSU_CLK_FINAL 251
+#define CLKID_SPICC0_SCLK_SEL 256
+#define CLKID_SPICC0_SCLK_DIV 257
+#define CLKID_SPICC1_SCLK_SEL 259
+#define CLKID_SPICC1_SCLK_DIV 260
-#define NR_CLKS 256
+#define NR_CLKS 262
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/g12a-clkc.h>
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 1f9c056e684c..5fd6a574f8c3 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -2613,19 +2613,12 @@ static MESON_GATE(gxbb_assist_misc, HHI_GCLK_MPEG0, 23);
static MESON_GATE(gxbb_emmc_a, HHI_GCLK_MPEG0, 24);
static MESON_GATE(gxbb_emmc_b, HHI_GCLK_MPEG0, 25);
static MESON_GATE(gxbb_emmc_c, HHI_GCLK_MPEG0, 26);
+static MESON_GATE(gxl_acodec, HHI_GCLK_MPEG0, 28);
static MESON_GATE(gxbb_spi, HHI_GCLK_MPEG0, 30);
static MESON_GATE(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2);
static MESON_GATE(gxbb_eth, HHI_GCLK_MPEG1, 3);
static MESON_GATE(gxbb_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6);
-static MESON_GATE(gxbb_iec958, HHI_GCLK_MPEG1, 7);
-static MESON_GATE(gxbb_i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_GATE(gxbb_amclk, HHI_GCLK_MPEG1, 9);
-static MESON_GATE(gxbb_aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_GATE(gxbb_mixer, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_GATE(gxbb_adc, HHI_GCLK_MPEG1, 13);
static MESON_GATE(gxbb_blkmv, HHI_GCLK_MPEG1, 14);
static MESON_GATE(gxbb_aiu, HHI_GCLK_MPEG1, 15);
static MESON_GATE(gxbb_uart1, HHI_GCLK_MPEG1, 16);
@@ -2680,6 +2673,16 @@ static MESON_GATE(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2);
static MESON_GATE(gxbb_ao_iface, HHI_GCLK_AO, 3);
static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4);
+/* AIU gates */
+static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu.hw);
+static MESON_PCLK(gxbb_iec958, HHI_GCLK_MPEG1, 7, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_i2s_out, HHI_GCLK_MPEG1, 8, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_amclk, HHI_GCLK_MPEG1, 9, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_aififo2, HHI_GCLK_MPEG1, 10, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_mixer, HHI_GCLK_MPEG1, 11, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12, &gxbb_aiu_glue.hw);
+static MESON_PCLK(gxbb_adc, HHI_GCLK_MPEG1, 13, &gxbb_aiu_glue.hw);
+
/* Array of all clocks provided by this provider */
static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
@@ -3100,6 +3103,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_HDMI_SEL] = &gxbb_hdmi_sel.hw,
[CLKID_HDMI_DIV] = &gxbb_hdmi_div.hw,
[CLKID_HDMI] = &gxbb_hdmi.hw,
+ [CLKID_ACODEC] = &gxl_acodec.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -3491,6 +3495,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = {
&gxl_hdmi_pll_od,
&gxl_hdmi_pll_od2,
&gxl_hdmi_pll_dco,
+ &gxl_acodec,
};
static const struct meson_eeclkc_data gxbb_clkc_data = {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index b53584fe66cf..1ee8cb7e2f5a 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -188,7 +188,7 @@
#define CLKID_HDMI_SEL 203
#define CLKID_HDMI_DIV 204
-#define NR_CLKS 206
+#define NR_CLKS 207
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 9fd31f23b2a9..34a70c4b4899 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -2605,14 +2605,6 @@ static MESON_GATE(meson8b_spi, HHI_GCLK_MPEG0, 30);
static MESON_GATE(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2);
static MESON_GATE(meson8b_eth, HHI_GCLK_MPEG1, 3);
static MESON_GATE(meson8b_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6);
-static MESON_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
-static MESON_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
-static MESON_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
static MESON_GATE(meson8b_blkmv, HHI_GCLK_MPEG1, 14);
static MESON_GATE(meson8b_aiu, HHI_GCLK_MPEG1, 15);
static MESON_GATE(meson8b_uart1, HHI_GCLK_MPEG1, 16);
@@ -2659,6 +2651,19 @@ static MESON_GATE(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25);
static MESON_GATE(meson8b_vclk2_other, HHI_GCLK_OTHER, 26);
static MESON_GATE(meson8b_edp, HHI_GCLK_OTHER, 31);
+/* AIU gates */
+#define MESON_AIU_GLUE_GATE(_name, _reg, _bit) \
+ MESON_PCLK(_name, _reg, _bit, &meson8b_aiu_glue.hw)
+
+static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6, &meson8b_aiu.hw);
+static MESON_AIU_GLUE_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
+static MESON_AIU_GLUE_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
+static MESON_AIU_GLUE_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
+static MESON_AIU_GLUE_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
+static MESON_AIU_GLUE_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
+static MESON_AIU_GLUE_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
+static MESON_AIU_GLUE_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
+
/* Always On (AO) domain gates */
static MESON_GATE(meson8b_ao_media_cpu, HHI_GCLK_AO, 0);
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index acc141adf087..14dc8a8a9d08 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -8,7 +8,7 @@ obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
-obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o
+obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o
obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index d2cd36c54474..7a351ec65564 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -441,7 +441,7 @@ const struct clk_ops mmp_clk_mix_ops = {
struct clk *mmp_clk_register_mix(struct device *dev,
const char *name,
- const char **parent_names,
+ const char * const *parent_names,
u8 num_parents,
unsigned long flags,
struct mmp_clk_mix_config *config,
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 6e71591e63a0..52dc8b43acd9 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -48,17 +49,39 @@
#define APMU_SDH1 0x58
#define APMU_SDH2 0xe8
#define APMU_SDH3 0xec
+#define APMU_SDH4 0x15c
#define APMU_USB 0x5c
#define APMU_DISP0 0x4c
#define APMU_DISP1 0x110
#define APMU_CCIC0 0x50
#define APMU_CCIC1 0xf4
+#define APBC_THERMAL0 0x90
+#define APBC_THERMAL1 0x98
+#define APBC_THERMAL2 0x9c
+#define APBC_THERMAL3 0xa0
#define APMU_USBHSIC0 0xf8
#define APMU_USBHSIC1 0xfc
-#define MPMU_UART_PLL 0x14
+#define APMU_GPU 0xcc
+
+#define MPMU_FCCR 0x8
+#define MPMU_POSR 0x10
+#define MPMU_UART_PLL 0x14
+#define MPMU_PLL2_CR 0x34
+/* MMP3 specific below */
+#define MPMU_PLL3_CR 0x50
+#define MPMU_PLL3_CTRL1 0x58
+#define MPMU_PLL1_CTRL 0x5c
+#define MPMU_PLL_DIFF_CTRL 0x68
+#define MPMU_PLL2_CTRL1 0x414
+
+enum mmp2_clk_model {
+ CLK_MODEL_MMP2,
+ CLK_MODEL_MMP3,
+};
struct mmp2_clk_unit {
struct mmp_clk_unit unit;
+ enum mmp2_clk_model model;
void __iomem *mpmu_base;
void __iomem *apmu_base;
void __iomem *apbc_base;
@@ -67,11 +90,22 @@ struct mmp2_clk_unit {
static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
{MMP2_CLK_CLK32, "clk32", NULL, 0, 32768},
{MMP2_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000},
- {MMP2_CLK_PLL1, "pll1", NULL, 0, 800000000},
- {MMP2_CLK_PLL2, "pll2", NULL, 0, 960000000},
{MMP2_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000},
};
+static struct mmp_param_pll_clk pll_clks[] = {
+ {MMP2_CLK_PLL1, "pll1", 797330000, MPMU_FCCR, 0x4000, MPMU_POSR, 0},
+ {MMP2_CLK_PLL2, "pll2", 0, MPMU_PLL2_CR, 0x0300, MPMU_PLL2_CR, 10},
+};
+
+static struct mmp_param_pll_clk mmp3_pll_clks[] = {
+ {MMP2_CLK_PLL2, "pll1", 797330000, MPMU_FCCR, 0x4000, MPMU_POSR, 0, 26000000, MPMU_PLL1_CTRL, 25},
+ {MMP2_CLK_PLL2, "pll2", 0, MPMU_PLL2_CR, 0x0300, MPMU_PLL2_CR, 10, 26000000, MPMU_PLL2_CTRL1, 25},
+ {MMP3_CLK_PLL1_P, "pll1_p", 0, MPMU_PLL_DIFF_CTRL, 0x0010, 0, 0, 797330000, MPMU_PLL_DIFF_CTRL, 0},
+ {MMP3_CLK_PLL2_P, "pll2_p", 0, MPMU_PLL_DIFF_CTRL, 0x0100, MPMU_PLL2_CR, 10, 26000000, MPMU_PLL_DIFF_CTRL, 5},
+ {MMP3_CLK_PLL3, "pll3", 0, MPMU_PLL3_CR, 0x0300, MPMU_PLL3_CR, 10, 26000000, MPMU_PLL3_CTRL1, 25},
+};
+
static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
{MMP2_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
{MMP2_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
@@ -113,6 +147,16 @@ static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
ARRAY_SIZE(fixed_rate_clks));
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ mmp_register_pll_clks(unit, mmp3_pll_clks,
+ pxa_unit->mpmu_base,
+ ARRAY_SIZE(mmp3_pll_clks));
+ } else {
+ mmp_register_pll_clks(unit, pll_clks,
+ pxa_unit->mpmu_base,
+ ARRAY_SIZE(pll_clks));
+ }
+
mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
ARRAY_SIZE(fixed_factor_clks));
@@ -127,16 +171,16 @@ static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
static DEFINE_SPINLOCK(uart0_lock);
static DEFINE_SPINLOCK(uart1_lock);
static DEFINE_SPINLOCK(uart2_lock);
-static const char *uart_parent_names[] = {"uart_pll", "vctcxo"};
+static const char * const uart_parent_names[] = {"uart_pll", "vctcxo"};
static DEFINE_SPINLOCK(ssp0_lock);
static DEFINE_SPINLOCK(ssp1_lock);
static DEFINE_SPINLOCK(ssp2_lock);
static DEFINE_SPINLOCK(ssp3_lock);
-static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
+static const char * const ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
+static const char * const timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
static DEFINE_SPINLOCK(reset_lock);
@@ -176,6 +220,13 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = {
{MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock},
{MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock},
{MMP2_CLK_TIMER, "timer_clk", "timer_mux", CLK_SET_RATE_PARENT, APBC_TIMER, 0x7, 0x3, 0x0, 0, &timer_lock},
+ {MMP2_CLK_THERMAL0, "thermal0_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL0, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+};
+
+static struct mmp_param_gate_clk mmp3_apbc_gate_clks[] = {
+ {MMP3_CLK_THERMAL1, "thermal1_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL1, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+ {MMP3_CLK_THERMAL2, "thermal2_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL2, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+ {MMP3_CLK_THERMAL3, "thermal3_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL3, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
};
static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
@@ -187,10 +238,15 @@ static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
ARRAY_SIZE(apbc_gate_clks));
+
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ mmp_register_gate_clks(unit, mmp3_apbc_gate_clks, pxa_unit->apbc_base,
+ ARRAY_SIZE(mmp3_apbc_gate_clks));
+ }
}
static DEFINE_SPINLOCK(sdh_lock);
-static const char *sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"};
+static const char * const sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"};
static struct mmp_clk_mix_config sdh_mix_config = {
.reg_info = DEFINE_MIX_REG_INFO(4, 10, 2, 8, 32),
};
@@ -201,11 +257,20 @@ static DEFINE_SPINLOCK(usbhsic1_lock);
static DEFINE_SPINLOCK(disp0_lock);
static DEFINE_SPINLOCK(disp1_lock);
-static const char *disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"};
+static const char * const disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"};
static DEFINE_SPINLOCK(ccic0_lock);
static DEFINE_SPINLOCK(ccic1_lock);
-static const char *ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
+static const char * const ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
+
+static DEFINE_SPINLOCK(gpu_lock);
+static const char * const mmp2_gpu_gc_parent_names[] = {"pll1_2", "pll1_3", "pll2_2", "pll2_3", "pll2", "usb_pll"};
+static u32 mmp2_gpu_gc_parent_table[] = { 0x0000, 0x0040, 0x0080, 0x00c0, 0x1000, 0x1040 };
+static const char * const mmp2_gpu_bus_parent_names[] = {"pll1_4", "pll2", "pll2_2", "usb_pll"};
+static u32 mmp2_gpu_bus_parent_table[] = { 0x0000, 0x0020, 0x0030, 0x4020 };
+static const char * const mmp3_gpu_bus_parent_names[] = {"pll1_4", "pll1_6", "pll1_2", "pll2_2"};
+static const char * const mmp3_gpu_gc_parent_names[] = {"pll1", "pll2", "pll1_p", "pll2_p"};
+
static struct mmp_clk_mix_config ccic0_mix_config = {
.reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32),
};
@@ -218,6 +283,15 @@ static struct mmp_param_mux_clk apmu_mux_clks[] = {
{MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
};
+static struct mmp_param_mux_clk mmp3_apmu_mux_clks[] = {
+ {0, "gpu_bus_mux", mmp3_gpu_bus_parent_names, ARRAY_SIZE(mmp3_gpu_bus_parent_names),
+ CLK_SET_RATE_PARENT, APMU_GPU, 4, 2, 0, &gpu_lock},
+ {0, "gpu_3d_mux", mmp3_gpu_gc_parent_names, ARRAY_SIZE(mmp3_gpu_gc_parent_names),
+ CLK_SET_RATE_PARENT, APMU_GPU, 6, 2, 0, &gpu_lock},
+ {0, "gpu_2d_mux", mmp3_gpu_gc_parent_names, ARRAY_SIZE(mmp3_gpu_gc_parent_names),
+ CLK_SET_RATE_PARENT, APMU_GPU, 12, 2, 0, &gpu_lock},
+};
+
static struct mmp_param_div_clk apmu_div_clks[] = {
{0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, 0, &disp0_lock},
{0, "disp0_sphy_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 15, 5, 0, &disp0_lock},
@@ -226,6 +300,11 @@ static struct mmp_param_div_clk apmu_div_clks[] = {
{0, "ccic1_sphy_div", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 10, 5, 0, &ccic1_lock},
};
+static struct mmp_param_div_clk mmp3_apmu_div_clks[] = {
+ {0, "gpu_3d_div", "gpu_3d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 24, 4, 0, &gpu_lock},
+ {0, "gpu_2d_div", "gpu_2d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 28, 4, 0, &gpu_lock},
+};
+
static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
{MMP2_CLK_USBHSIC0, "usbhsic0_clk", "usb_pll", 0, APMU_USBHSIC0, 0x1b, 0x1b, 0x0, 0, &usbhsic0_lock},
@@ -235,8 +314,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
- {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock},
- {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock},
+ {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x09, 0x09, 0x0, 0, &disp1_lock},
{MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
@@ -246,6 +325,17 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
+ {MMP2_CLK_GPU_BUS, "gpu_bus_clk", "gpu_bus_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0xa, 0xa, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
+};
+
+static struct mmp_param_gate_clk mmp2_apmu_gate_clks[] = {
+ {MMP2_CLK_GPU_3D, "gpu_3d_clk", "gpu_3d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0x5, 0x5, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
+};
+
+static struct mmp_param_gate_clk mmp3_apmu_gate_clks[] = {
+ {MMP3_CLK_SDH4, "sdh4_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH4, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP3_CLK_GPU_3D, "gpu_3d_clk", "gpu_3d_div", CLK_SET_RATE_PARENT, APMU_GPU, 0x5, 0x5, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
+ {MMP3_CLK_GPU_2D, "gpu_2d_clk", "gpu_2d_div", CLK_SET_RATE_PARENT, APMU_GPU, 0x1c0000, 0x1c0000, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
};
static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
@@ -281,6 +371,34 @@ static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
ARRAY_SIZE(apmu_gate_clks));
+
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ mmp_register_mux_clks(unit, mmp3_apmu_mux_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(mmp3_apmu_mux_clks));
+
+ mmp_register_div_clks(unit, mmp3_apmu_div_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(mmp3_apmu_div_clks));
+
+ mmp_register_gate_clks(unit, mmp3_apmu_gate_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(mmp3_apmu_gate_clks));
+ } else {
+ clk_register_mux_table(NULL, "gpu_3d_mux", mmp2_gpu_gc_parent_names,
+ ARRAY_SIZE(mmp2_gpu_gc_parent_names),
+ CLK_SET_RATE_PARENT,
+ pxa_unit->apmu_base + APMU_GPU,
+ 0, 0x10c0, 0,
+ mmp2_gpu_gc_parent_table, &gpu_lock);
+
+ clk_register_mux_table(NULL, "gpu_bus_mux", mmp2_gpu_bus_parent_names,
+ ARRAY_SIZE(mmp2_gpu_bus_parent_names),
+ CLK_SET_RATE_PARENT,
+ pxa_unit->apmu_base + APMU_GPU,
+ 0, 0x4030, 0,
+ mmp2_gpu_bus_parent_table, &gpu_lock);
+
+ mmp_register_gate_clks(unit, mmp2_apmu_gate_clks, pxa_unit->apmu_base,
+ ARRAY_SIZE(mmp2_apmu_gate_clks));
+ }
}
static void mmp2_clk_reset_init(struct device_node *np,
@@ -313,6 +431,11 @@ static void __init mmp2_clk_init(struct device_node *np)
if (!pxa_unit)
return;
+ if (of_device_is_compatible(np, "marvell,mmp3-clock"))
+ pxa_unit->model = CLK_MODEL_MMP3;
+ else
+ pxa_unit->model = CLK_MODEL_MMP2;
+
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
@@ -352,3 +475,4 @@ free_memory:
}
CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init);
+CLK_OF_DECLARE(mmp3_clk, "marvell,mmp3-clock", mmp2_clk_init);
diff --git a/drivers/clk/mmp/clk-pll.c b/drivers/clk/mmp/clk-pll.c
new file mode 100644
index 000000000000..7077be293871
--- /dev/null
+++ b/drivers/clk/mmp/clk-pll.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MMP PLL clock rate calculation
+ *
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include "clk.h"
+
+#define to_clk_mmp_pll(hw) container_of(hw, struct mmp_clk_pll, hw)
+
+struct mmp_clk_pll {
+ struct clk_hw hw;
+ unsigned long default_rate;
+ void __iomem *enable_reg;
+ u32 enable;
+ void __iomem *reg;
+ u8 shift;
+
+ unsigned long input_rate;
+ void __iomem *postdiv_reg;
+ u8 postdiv_shift;
+};
+
+static int mmp_clk_pll_is_enabled(struct clk_hw *hw)
+{
+ struct mmp_clk_pll *pll = to_clk_mmp_pll(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->enable_reg);
+ if ((val & pll->enable) == pll->enable)
+ return 1;
+
+ /* Some PLLs, if not software controlled, output default clock. */
+ if (pll->default_rate > 0)
+ return 1;
+
+ return 0;
+}
+
+static unsigned long mmp_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mmp_clk_pll *pll = to_clk_mmp_pll(hw);
+ u32 fbdiv, refdiv, postdiv;
+ u64 rate;
+ u32 val;
+
+ val = readl_relaxed(pll->enable_reg);
+ if ((val & pll->enable) != pll->enable)
+ return pll->default_rate;
+
+ if (pll->reg) {
+ val = readl_relaxed(pll->reg);
+ fbdiv = (val >> pll->shift) & 0x1ff;
+ refdiv = (val >> (pll->shift + 9)) & 0x1f;
+ } else {
+ fbdiv = 2;
+ refdiv = 1;
+ }
+
+ if (pll->postdiv_reg) {
+ /* MMP3 clock rate calculation */
+ static const u8 postdivs[] = {2, 3, 4, 5, 6, 8, 10, 12, 16};
+
+ val = readl_relaxed(pll->postdiv_reg);
+ postdiv = (val >> pll->postdiv_shift) & 0x7;
+
+ rate = pll->input_rate;
+ rate *= 2 * fbdiv;
+ do_div(rate, refdiv);
+ do_div(rate, postdivs[postdiv]);
+ } else {
+ /* MMP2 clock rate calculation */
+ if (refdiv == 3) {
+ rate = 19200000;
+ } else if (refdiv == 4) {
+ rate = 26000000;
+ } else {
+ pr_err("bad refdiv: %d (0x%08x)\n", refdiv, val);
+ return 0;
+ }
+
+ rate *= fbdiv + 2;
+ do_div(rate, refdiv + 2);
+ }
+
+ return (unsigned long)rate;
+}
+
+static const struct clk_ops mmp_clk_pll_ops = {
+ .is_enabled = mmp_clk_pll_is_enabled,
+ .recalc_rate = mmp_clk_pll_recalc_rate,
+};
+
+struct clk *mmp_clk_register_pll(char *name,
+ unsigned long default_rate,
+ void __iomem *enable_reg, u32 enable,
+ void __iomem *reg, u8 shift,
+ unsigned long input_rate,
+ void __iomem *postdiv_reg, u8 postdiv_shift)
+{
+ struct mmp_clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &mmp_clk_pll_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+
+ pll->default_rate = default_rate;
+ pll->enable_reg = enable_reg;
+ pll->enable = enable;
+ pll->reg = reg;
+ pll->shift = shift;
+
+ pll->input_rate = input_rate;
+ pll->postdiv_reg = postdiv_reg;
+ pll->postdiv_shift = postdiv_shift;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
index ca7d37e2c7be..317123641d1e 100644
--- a/drivers/clk/mmp/clk.c
+++ b/drivers/clk/mmp/clk.c
@@ -176,6 +176,37 @@ void mmp_register_div_clks(struct mmp_clk_unit *unit,
}
}
+void mmp_register_pll_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_pll_clk *clks,
+ void __iomem *base, int size)
+{
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ void __iomem *reg = NULL;
+
+ if (clks[i].offset)
+ reg = base + clks[i].offset;
+
+ clk = mmp_clk_register_pll(clks[i].name,
+ clks[i].default_rate,
+ base + clks[i].enable_offset,
+ clks[i].enable,
+ reg, clks[i].shift,
+ clks[i].input_rate,
+ base + clks[i].postdiv_offset,
+ clks[i].postdiv_shift);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ if (clks[i].id)
+ unit->clk_table[clks[i].id] = clk;
+ }
+}
+
void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
struct clk *clk)
{
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index 70bb73257647..971b4d6d992f 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -97,7 +97,7 @@ struct mmp_clk_mix {
extern const struct clk_ops mmp_clk_mix_ops;
extern struct clk *mmp_clk_register_mix(struct device *dev,
const char *name,
- const char **parent_names,
+ const char * const *parent_names,
u8 num_parents,
unsigned long flags,
struct mmp_clk_mix_config *config,
@@ -124,9 +124,6 @@ extern struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
u32 val_disable, unsigned int gate_flags,
spinlock_t *lock);
-
-extern struct clk *mmp_clk_register_pll2(const char *name,
- const char *parent_name, unsigned long flags);
extern struct clk *mmp_clk_register_apbc(const char *name,
const char *parent_name, void __iomem *base,
unsigned int delay, unsigned int apbc_flags, spinlock_t *lock);
@@ -196,7 +193,7 @@ void mmp_register_gate_clks(struct mmp_clk_unit *unit,
struct mmp_param_mux_clk {
unsigned int id;
char *name;
- const char **parent_name;
+ const char * const *parent_name;
u8 num_parents;
unsigned long flags;
unsigned long offset;
@@ -224,6 +221,30 @@ void mmp_register_div_clks(struct mmp_clk_unit *unit,
struct mmp_param_div_clk *clks,
void __iomem *base, int size);
+struct mmp_param_pll_clk {
+ unsigned int id;
+ char *name;
+ unsigned long default_rate;
+ unsigned long enable_offset;
+ u32 enable;
+ unsigned long offset;
+ u8 shift;
+ /* MMP3 specific: */
+ unsigned long input_rate;
+ unsigned long postdiv_offset;
+ unsigned long postdiv_shift;
+};
+void mmp_register_pll_clks(struct mmp_clk_unit *unit,
+ struct mmp_param_pll_clk *clks,
+ void __iomem *base, int size);
+
+extern struct clk *mmp_clk_register_pll(char *name,
+ unsigned long default_rate,
+ void __iomem *enable_reg, u32 enable,
+ void __iomem *reg, u8 shift,
+ unsigned long input_rate,
+ void __iomem *postdiv_reg, u8 postdiv_shift);
+
#define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc) \
{ \
.width_div = (w_d), \
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 15cdcdc9b3b8..11ec6f466467 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -280,6 +280,15 @@ config SC_GPUCC_7180
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SC_MSS_7180
+ tristate "SC7180 Modem Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the Modem Subsystem clock controller on Qualcomm
+ Technologies, Inc on SC7180 devices.
+ Say Y if you want to use the Modem branch clocks of the Modem
+ subsystem clock controller to reset the MSS subsystem.
+
config SC_VIDEOCC_7180
tristate "SC7180 Video Clock Controller"
select SC_GCC_7180
@@ -366,6 +375,13 @@ config SM_GCC_8150
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8250
+ tristate "SM8250 Global Clock Controller"
+ help
+ Support for the global clock controller on SM8250 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on SPMI || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 656a87e629d4..691efbf7e81f 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
+obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
+obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 7c2936da9b14..9b2dfa08acb2 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -52,6 +52,7 @@
#define PLL_CONFIG_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U1])
#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
+#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
@@ -116,6 +117,22 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_ALPHA_VAL] = 0x40,
[PLL_OFF_CAL_VAL] = 0x44,
},
+ [CLK_ALPHA_PLL_TYPE_LUCID] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_CAL_L_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_USER_CTL_U] = 0x10,
+ [PLL_OFF_USER_CTL_U1] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x20,
+ [PLL_OFF_TEST_CTL] = 0x24,
+ [PLL_OFF_TEST_CTL_U] = 0x28,
+ [PLL_OFF_TEST_CTL_U1] = 0x2c,
+ [PLL_OFF_STATUS] = 0x30,
+ [PLL_OFF_OPMODE] = 0x38,
+ [PLL_OFF_ALPHA_VAL] = 0x40,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -134,15 +151,14 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define PLL_HUAYRA_N_MASK 0xff
#define PLL_HUAYRA_ALPHA_WIDTH 16
-#define FABIA_OPMODE_STANDBY 0x0
-#define FABIA_OPMODE_RUN 0x1
-
-#define FABIA_PLL_OUT_MASK 0x7
-#define FABIA_PLL_RATE_MARGIN 500
+#define PLL_STANDBY 0x0
+#define PLL_RUN 0x1
+#define PLL_OUT_MASK 0x7
+#define PLL_RATE_MARGIN 500
-#define TRION_PLL_STANDBY 0x0
-#define TRION_PLL_RUN 0x1
-#define TRION_PLL_OUT_MASK 0x7
+/* LUCID PLL specific settings and offsets */
+#define LUCID_PLL_CAL_VAL 0x44
+#define LUCID_PCAL_DONE BIT(26)
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
@@ -544,7 +560,8 @@ static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
vco = alpha_pll_find_vco(pll, rate);
if (pll->vco_table && !vco) {
- pr_err("alpha pll not in a valid vco range\n");
+ pr_err("%s: alpha pll not in a valid vco range\n",
+ clk_hw_get_name(hw));
return -EINVAL;
}
@@ -722,7 +739,7 @@ static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
*/
if (clk_alpha_pll_is_enabled(hw)) {
if (cur_alpha != a) {
- pr_err("clock needs to be gated %s\n",
+ pr_err("%s: clock needs to be gated\n",
clk_hw_get_name(hw));
return -EBUSY;
}
@@ -765,7 +782,7 @@ static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
if (ret)
return 0;
- return ((opmode_regval & TRION_PLL_RUN) && (mode_regval & PLL_OUTCTRL));
+ return ((opmode_regval & PLL_RUN) && (mode_regval & PLL_OUTCTRL));
}
static int clk_trion_pll_is_enabled(struct clk_hw *hw)
@@ -795,7 +812,7 @@ static int clk_trion_pll_enable(struct clk_hw *hw)
}
/* Set operation mode to RUN */
- regmap_write(regmap, PLL_OPMODE(pll), TRION_PLL_RUN);
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
ret = wait_for_pll_enable_lock(pll);
if (ret)
@@ -803,7 +820,7 @@ static int clk_trion_pll_enable(struct clk_hw *hw)
/* Enable the PLL outputs */
ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
- TRION_PLL_OUT_MASK, TRION_PLL_OUT_MASK);
+ PLL_OUT_MASK, PLL_OUT_MASK);
if (ret)
return ret;
@@ -836,12 +853,12 @@ static void clk_trion_pll_disable(struct clk_hw *hw)
/* Disable the PLL outputs */
ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
- TRION_PLL_OUT_MASK, 0);
+ PLL_OUT_MASK, 0);
if (ret)
return;
/* Place the PLL mode in STANDBY */
- regmap_write(regmap, PLL_OPMODE(pll), TRION_PLL_STANDBY);
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
}
@@ -849,33 +866,12 @@ static unsigned long
clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- struct regmap *regmap = pll->clkr.regmap;
- u32 l, frac;
- u64 prate = parent_rate;
-
- regmap_read(regmap, PLL_L_VAL(pll), &l);
- regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
-
- return alpha_pll_calc_rate(prate, l, frac, ALPHA_REG_16BIT_WIDTH);
-}
-
-static long clk_trion_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- unsigned long min_freq, max_freq;
- u32 l;
- u64 a;
-
- rate = alpha_pll_round_rate(rate, *prate,
- &l, &a, ALPHA_REG_16BIT_WIDTH);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ u32 l, frac, alpha_width = pll_alpha_width(pll);
- min_freq = pll->vco_table[0].min_freq;
- max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+ regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac);
- return clamp(rate, min_freq, max_freq);
+ return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
const struct clk_ops clk_alpha_pll_fixed_ops = {
@@ -921,7 +917,7 @@ const struct clk_ops clk_trion_fixed_pll_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_trion_pll_round_rate,
+ .round_rate = clk_alpha_pll_round_rate,
};
EXPORT_SYMBOL_GPL(clk_trion_fixed_pll_ops);
@@ -1088,14 +1084,14 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw)
return ret;
/* Skip If PLL is already running */
- if ((opmode_val & FABIA_OPMODE_RUN) && (val & PLL_OUTCTRL))
+ if ((opmode_val & PLL_RUN) && (val & PLL_OUTCTRL))
return 0;
ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
if (ret)
return ret;
- ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ ret = regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
if (ret)
return ret;
@@ -1104,7 +1100,7 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw)
if (ret)
return ret;
- ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_RUN);
+ ret = regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
if (ret)
return ret;
@@ -1113,7 +1109,7 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw)
return ret;
ret = regmap_update_bits(regmap, PLL_USER_CTL(pll),
- FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+ PLL_OUT_MASK, PLL_OUT_MASK);
if (ret)
return ret;
@@ -1143,13 +1139,12 @@ static void alpha_pll_fabia_disable(struct clk_hw *hw)
return;
/* Disable main outputs */
- ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), FABIA_PLL_OUT_MASK,
- 0);
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, 0);
if (ret)
return;
/* Place the PLL in STANDBY */
- regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY);
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
}
static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
@@ -1170,7 +1165,7 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
- unsigned long rrate;
+ unsigned long rrate, max = rate + PLL_RATE_MARGIN;
rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
@@ -1178,8 +1173,9 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
* Due to limited number of bits for fractional rate programming, the
* rounded up rate could be marginally higher than the requested rate.
*/
- if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
- pr_err("Call set rate on the PLL with rounded rates!\n");
+ if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("%s: Rounded rate %lu not within range [%lu, %lu)\n",
+ clk_hw_get_name(hw), rrate, rate, max);
return -EINVAL;
}
@@ -1196,6 +1192,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
struct clk_hw *parent_hw;
unsigned long cal_freq, rrate;
u32 cal_l, val, alpha_width = pll_alpha_width(pll);
+ const char *name = clk_hw_get_name(hw);
u64 a;
int ret;
@@ -1210,7 +1207,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
if (!vco) {
- pr_err("alpha pll: not in a valid vco range\n");
+ pr_err("%s: alpha pll not in a valid vco range\n", name);
return -EINVAL;
}
@@ -1227,7 +1224,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
* Due to a limited number of bits for fractional rate programming, the
* rounded up rate could be marginally higher than the requested rate.
*/
- if (rrate > (cal_freq + FABIA_PLL_RATE_MARGIN) || rrate < cal_freq)
+ if (rrate > (cal_freq + PLL_RATE_MARGIN) || rrate < cal_freq)
return -EINVAL;
/* Setup PLL for calibration frequency */
@@ -1236,7 +1233,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
/* Bringup the PLL at calibration frequency */
ret = clk_alpha_pll_enable(hw);
if (ret) {
- pr_err("alpha pll calibration failed\n");
+ pr_err("%s: alpha pll calibration failed\n", name);
return ret;
}
@@ -1394,3 +1391,175 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
+
+/**
+ * clk_lucid_pll_configure - configure the lucid pll
+ *
+ * @pll: clk alpha pll
+ * @regmap: register map
+ * @config: configuration to apply for pll
+ */
+void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ if (config->l)
+ regmap_write(regmap, PLL_L_VAL(pll), config->l);
+
+ regmap_write(regmap, PLL_CAL_L_VAL(pll), LUCID_PLL_CAL_VAL);
+
+ if (config->alpha)
+ regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+
+ if (config->config_ctl_hi_val)
+ regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+
+ if (config->config_ctl_hi1_val)
+ regmap_write(regmap, PLL_CONFIG_CTL_U1(pll),
+ config->config_ctl_hi1_val);
+
+ if (config->user_ctl_val)
+ regmap_write(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+
+ if (config->user_ctl_hi_val)
+ regmap_write(regmap, PLL_USER_CTL_U(pll),
+ config->user_ctl_hi_val);
+
+ if (config->user_ctl_hi1_val)
+ regmap_write(regmap, PLL_USER_CTL_U1(pll),
+ config->user_ctl_hi1_val);
+
+ if (config->test_ctl_val)
+ regmap_write(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+
+ if (config->test_ctl_hi_val)
+ regmap_write(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+
+ if (config->test_ctl_hi1_val)
+ regmap_write(regmap, PLL_TEST_CTL_U1(pll),
+ config->test_ctl_hi1_val);
+
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
+ PLL_UPDATE_BYPASS);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+
+ /* Set operation mode to OFF */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ /* Place the PLL in STANDBY mode */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+}
+EXPORT_SYMBOL_GPL(clk_lucid_pll_configure);
+
+/*
+ * The Lucid PLL requires a power-on self-calibration which happens when the
+ * PLL comes out of reset. Calibrate in case it is not completed.
+ */
+static int alpha_pll_lucid_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 regval;
+ int ret;
+
+ /* Return early if calibration is not needed. */
+ regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &regval);
+ if (regval & LUCID_PCAL_DONE)
+ return 0;
+
+ /* On/off to calibrate */
+ ret = clk_trion_pll_enable(hw);
+ if (!ret)
+ clk_trion_pll_disable(hw);
+
+ return ret;
+}
+
+static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ unsigned long rrate;
+ u32 regval, l, alpha_width = pll_alpha_width(pll);
+ u64 a;
+ int ret;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+
+ /*
+ * Due to a limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Call set rate on the PLL with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ /* Latch the PLL input */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_UPDATE, PLL_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for 2 reference cycles before checking the ACK bit. */
+ udelay(1);
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &regval);
+ if (!(regval & ALPHA_PLL_ACK_LATCH)) {
+ pr_err("Lucid PLL latch failed. Output may be unstable!\n");
+ return -EINVAL;
+ }
+
+ /* Return the latch input to 0 */
+ ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
+ PLL_UPDATE, 0);
+ if (ret)
+ return ret;
+
+ if (clk_hw_is_enabled(hw)) {
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for PLL output to stabilize */
+ udelay(100);
+ return 0;
+}
+
+const struct clk_ops clk_alpha_pll_lucid_ops = {
+ .prepare = alpha_pll_lucid_prepare,
+ .enable = clk_trion_pll_enable,
+ .disable = clk_trion_pll_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
+
+const struct clk_ops clk_alpha_pll_fixed_lucid_ops = {
+ .enable = clk_trion_pll_enable,
+ .disable = clk_trion_pll_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index fbc1f67c7a26..704674a153b6 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -14,6 +14,7 @@ enum {
CLK_ALPHA_PLL_TYPE_BRAMMO,
CLK_ALPHA_PLL_TYPE_FABIA,
CLK_ALPHA_PLL_TYPE_TRION,
+ CLK_ALPHA_PLL_TYPE_LUCID,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -30,6 +31,7 @@ enum {
PLL_OFF_CONFIG_CTL_U1,
PLL_OFF_TEST_CTL,
PLL_OFF_TEST_CTL_U,
+ PLL_OFF_TEST_CTL_U1,
PLL_OFF_STATUS,
PLL_OFF_OPMODE,
PLL_OFF_FRAC,
@@ -94,10 +96,13 @@ struct alpha_pll_config {
u32 alpha_hi;
u32 config_ctl_val;
u32 config_ctl_hi_val;
+ u32 config_ctl_hi1_val;
u32 user_ctl_val;
u32 user_ctl_hi_val;
+ u32 user_ctl_hi1_val;
u32 test_ctl_val;
u32 test_ctl_hi_val;
+ u32 test_ctl_hi1_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
@@ -123,10 +128,17 @@ extern const struct clk_ops clk_alpha_pll_fabia_ops;
extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_fabia_ops;
+extern const struct clk_ops clk_alpha_pll_lucid_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_lucid_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+
extern const struct clk_ops clk_trion_fixed_pll_ops;
extern const struct clk_ops clk_trion_pll_postdiv_ops;
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index 9e3110a71f12..f71d228fd6bd 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -543,10 +543,45 @@ static const struct rpm_clk_desc rpm_clk_apq8064 = {
.num_clks = ARRAY_SIZE(apq8064_clks),
};
+/* ipq806x */
+DEFINE_CLK_RPM(ipq806x, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(ipq806x, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(ipq806x, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(ipq806x, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM(ipq806x, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(ipq806x, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(ipq806x, nss_fabric_0_clk, nss_fabric_0_a_clk, QCOM_RPM_NSS_FABRIC_0_CLK);
+DEFINE_CLK_RPM(ipq806x, nss_fabric_1_clk, nss_fabric_1_a_clk, QCOM_RPM_NSS_FABRIC_1_CLK);
+
+static struct clk_rpm *ipq806x_clks[] = {
+ [RPM_APPS_FABRIC_CLK] = &ipq806x_afab_clk,
+ [RPM_APPS_FABRIC_A_CLK] = &ipq806x_afab_a_clk,
+ [RPM_CFPB_CLK] = &ipq806x_cfpb_clk,
+ [RPM_CFPB_A_CLK] = &ipq806x_cfpb_a_clk,
+ [RPM_DAYTONA_FABRIC_CLK] = &ipq806x_daytona_clk,
+ [RPM_DAYTONA_FABRIC_A_CLK] = &ipq806x_daytona_a_clk,
+ [RPM_EBI1_CLK] = &ipq806x_ebi1_clk,
+ [RPM_EBI1_A_CLK] = &ipq806x_ebi1_a_clk,
+ [RPM_SYS_FABRIC_CLK] = &ipq806x_sfab_clk,
+ [RPM_SYS_FABRIC_A_CLK] = &ipq806x_sfab_a_clk,
+ [RPM_SFPB_CLK] = &ipq806x_sfpb_clk,
+ [RPM_SFPB_A_CLK] = &ipq806x_sfpb_a_clk,
+ [RPM_NSS_FABRIC_0_CLK] = &ipq806x_nss_fabric_0_clk,
+ [RPM_NSS_FABRIC_0_A_CLK] = &ipq806x_nss_fabric_0_a_clk,
+ [RPM_NSS_FABRIC_1_CLK] = &ipq806x_nss_fabric_1_clk,
+ [RPM_NSS_FABRIC_1_A_CLK] = &ipq806x_nss_fabric_1_a_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_ipq806x = {
+ .clks = ipq806x_clks,
+ .num_clks = ARRAY_SIZE(ipq806x_clks),
+};
+
static const struct of_device_id rpm_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
{ .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
{ .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
+ { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 98a118c1e244..e2c669b08aff 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -143,12 +143,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
!= (c->aggr_state & BIT(state));
}
+static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state,
+ struct tcs_cmd *cmd, bool wait)
+{
+ if (wait)
+ return rpmh_write(c->dev, state, cmd, 1);
+
+ return rpmh_write_async(c->dev, state, cmd, 1);
+}
+
static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
{
struct tcs_cmd cmd = { 0 };
u32 cmd_state, on_val;
enum rpmh_state state = RPMH_SLEEP_STATE;
int ret;
+ bool wait;
cmd.addr = c->res_addr;
cmd_state = c->aggr_state;
@@ -159,7 +169,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
if (cmd_state & BIT(state))
cmd.data = on_val;
- ret = rpmh_write_async(c->dev, state, &cmd, 1);
+ wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE;
+ ret = clk_rpmh_send(c, state, &cmd, wait);
if (ret) {
dev_err(c->dev, "set %s state of %s failed: (%d)\n",
!state ? "sleep" :
@@ -216,7 +227,7 @@ static int clk_rpmh_prepare(struct clk_hw *hw)
mutex_unlock(&rpmh_clk_lock);
return ret;
-};
+}
static void clk_rpmh_unprepare(struct clk_hw *hw)
{
@@ -248,38 +259,33 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
{
struct tcs_cmd cmd = { 0 };
u32 cmd_state;
- int ret;
+ int ret = 0;
mutex_lock(&rpmh_clk_lock);
-
- cmd_state = 0;
if (enable) {
cmd_state = 1;
if (c->aggr_state)
cmd_state = c->aggr_state;
+ } else {
+ cmd_state = 0;
}
- if (c->last_sent_aggr_state == cmd_state) {
- mutex_unlock(&rpmh_clk_lock);
- return 0;
- }
+ if (c->last_sent_aggr_state != cmd_state) {
+ cmd.addr = c->res_addr;
+ cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
- cmd.addr = c->res_addr;
- cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
-
- ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
- if (ret) {
- dev_err(c->dev, "set active state of %s failed: (%d)\n",
- c->res_name, ret);
- mutex_unlock(&rpmh_clk_lock);
- return ret;
+ ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
+ if (ret) {
+ dev_err(c->dev, "set active state of %s failed: (%d)\n",
+ c->res_name, ret);
+ } else {
+ c->last_sent_aggr_state = cmd_state;
+ }
}
- c->last_sent_aggr_state = cmd_state;
-
mutex_unlock(&rpmh_clk_lock);
- return 0;
+ return ret;
}
static int clk_rpmh_bcm_prepare(struct clk_hw *hw)
@@ -287,14 +293,14 @@ static int clk_rpmh_bcm_prepare(struct clk_hw *hw)
struct clk_rpmh *c = to_clk_rpmh(hw);
return clk_rpmh_bcm_send_cmd(c, true);
-};
+}
static void clk_rpmh_bcm_unprepare(struct clk_hw *hw)
{
struct clk_rpmh *c = to_clk_rpmh(hw);
clk_rpmh_bcm_send_cmd(c, false);
-};
+}
static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
@@ -310,7 +316,7 @@ static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
clk_rpmh_bcm_send_cmd(c, true);
return 0;
-};
+}
static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
@@ -404,6 +410,28 @@ static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
.num_clks = ARRAY_SIZE(sc7180_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sm8250, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 2);
+
+static struct clk_hw *sm8250_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK1] = &sm8250_ln_bb_clk1.hw,
+ [RPMH_LN_BB_CLK1_A] = &sm8250_ln_bb_clk1_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8250 = {
+ .clks = sm8250_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8250_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -490,6 +518,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
+ { .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 0bbfef9fa6de..52f63ad787ba 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -525,6 +525,55 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
.num_clks = ARRAY_SIZE(msm8974_clks),
};
+
+/* msm8976 */
+DEFINE_CLK_SMD_RPM(msm8976, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8976, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8976, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk,
+ QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8976, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8976, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM_QDSS(msm8976, qdss_clk, qdss_a_clk,
+ QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, div_clk2, div_clk2_a, 12);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8976, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8976, bb_clk2_pin, bb_clk2_a_pin, 2);
+
+static struct clk_smd_rpm *msm8976_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8976_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8976_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8976_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8976_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8976_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8976_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8976_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8976_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8976_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8976_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8976_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8976_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &msm8976_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8976_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8976_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8976_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8976_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8976_bb_clk2_a_pin,
+ [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8976_mmssnoc_ahb_clk,
+ [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8976_mmssnoc_ahb_a_clk,
+ [RPM_SMD_DIV_CLK2] = &msm8976_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &msm8976_div_clk2_a,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
+ .clks = msm8976_clks,
+ .num_clks = ARRAY_SIZE(msm8976_clks),
+};
+
/* msm8996 */
DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
@@ -720,6 +769,7 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
+ { .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 },
{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
{ .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b0eee0903807..a8456e09c44d 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -1224,6 +1224,8 @@ static struct clk_rcg prng_src = {
.parent_map = gcc_pxo_pll8_map,
},
.clkr = {
+ .enable_reg = 0x2e80,
+ .enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "prng_src",
.parent_names = gcc_pxo_pll8,
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 7f59fb8da033..6a51b5b5fc19 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -2165,6 +2165,71 @@ static struct clk_branch gcc_video_xo_clk = {
},
};
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mfab_axis_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mfab_axis_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_nav_axi_clk = {
+ .halt_reg = 0x8a00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_nav_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
+ .halt_reg = 0x8a154,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_memnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc ufs_phy_gdsc = {
.gdscr = 0x77004,
.pd = {
@@ -2336,6 +2401,11 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GPLL7] = &gpll7.clkr,
[GPLL4] = &gpll4.clkr,
[GPLL1] = &gpll1.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_NAV_AXI_CLK] = &gcc_mss_nav_axi_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
};
static const struct qcom_reset_map gcc_sc7180_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index 20877214acff..ef98fdc51755 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -21,6 +21,7 @@
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_BI_TCXO,
@@ -3171,6 +3172,18 @@ static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
},
};
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_sec_clkref_clk = {
.halt_reg = 0x8c028,
.halt_check = BRANCH_HALT,
@@ -3218,6 +3231,18 @@ static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
},
};
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
/*
* Clock ON depends on external parent 'config noc', so cant poll
* delay and also mark as crtitical for video boot
@@ -3292,6 +3317,24 @@ static struct clk_branch gcc_video_xo_clk = {
},
};
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
@@ -3480,10 +3523,12 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
[GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
[GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
[GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
[GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
[GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
[GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
[GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
[GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
[GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
@@ -3527,6 +3572,11 @@ static const struct qcom_reset_map gcc_sm8150_resets[] = {
[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
};
+static struct gdsc *gcc_sm8150_gdscs[] = {
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+};
+
static const struct regmap_config gcc_sm8150_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3541,6 +3591,8 @@ static const struct qcom_cc_desc gcc_sm8150_desc = {
.num_clks = ARRAY_SIZE(gcc_sm8150_clocks),
.resets = gcc_sm8150_resets,
.num_resets = ARRAY_SIZE(gcc_sm8150_resets),
+ .gdscs = gcc_sm8150_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8150_gdscs),
};
static const struct of_device_id gcc_sm8150_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
new file mode 100644
index 000000000000..6cb6617b8d88
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8250.c
@@ -0,0 +1,3690 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sm8250.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x1c000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL9_OUT_MAIN, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll9.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "aud_ref_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b038,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d038,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x17730,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x17860,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e270,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e4d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e600,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
+ F(105495, P_BI_TCXO, 2, 1, 91),
+ { }
+};
+
+static struct clk_rcg2 gcc_tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
+ .cmd_rcgr = 0x75024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = {
+ .cmd_rcgr = 0x7506c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
+ .cmd_rcgr = 0x750a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = {
+ .cmd_rcgr = 0x75084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7706c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0x10020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x10038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0x10064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
+ .reg = 0x48028,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0xf050,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0x10050,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
+ .halt_reg = 0x9000c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_card_axi_clk = {
+ .halt_reg = 0x750cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x750cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x750cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_card_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0x10080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x10080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1007c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = {
+ .halt_reg = 0x8d058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8d058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_pcie_sf_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_en = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_axi_clk = {
+ .halt_reg = 0x73008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x73008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_cfg_ahb_clk = {
+ .halt_reg = 0x73004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x73004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_cfg_ahb_clk = {
+ .halt_reg = 0x4d004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_dma_clk = {
+ .halt_reg = 0x4d00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_dma_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie0_phy_refgen_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_refgen_clk = {
+ .halt_reg = 0x6f030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie1_phy_refgen_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_phy_refgen_clk = {
+ .halt_reg = 0x6f034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie2_phy_refgen_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0x6024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0x601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0x602c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0x6014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0x6010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mdm_clkref_en = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_mdm_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_wifi_clkref_en = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_wifi_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_wigig_clkref_en = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_wigig_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1713c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1726c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1739c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x175fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1772c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x1785c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23138,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1826c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1839c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x185fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x23278,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x23270,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e26c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e39c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e5fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_tsif_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_1x_clkref_en = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_1x_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ahb_clk = {
+ .halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_axi_clk = {
+ .halt_reg = 0x75010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x75010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_ice_core_clk = {
+ .halt_reg = 0x75064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x75064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x75064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_phy_aux_clk = {
+ .halt_reg = 0x7509c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7509c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_reg = 0x75020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x75020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_reg = 0x750b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x750b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_reg = 0x7501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_unipro_core_clk = {
+ .halt_reg = 0x7505c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7505c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_card_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x7709c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7709c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7709c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77020,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770b8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7705c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7705c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0x10010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x10010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xf05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_en = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0x10054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0x1005c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_2_gdsc = {
+ .gdscr = 0x6004,
+ .pd = {
+ .name = "pcie_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x10004,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc = {
+ .gdscr = 0x7d06c,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sm8250_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
+ [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+ [GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
+ [GCC_NPU_BWMON_CFG_AHB_CLK] = &gcc_npu_bwmon_cfg_ahb_clk.clkr,
+ [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+ [GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
+ [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr,
+ [GCC_PCIE2_PHY_REFGEN_CLK] = &gcc_pcie2_phy_refgen_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_MDM_CLKREF_EN] = &gcc_pcie_mdm_clkref_en.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PCIE_WIFI_CLKREF_EN] = &gcc_pcie_wifi_clkref_en.clkr,
+ [GCC_PCIE_WIGIG_CLKREF_EN] = &gcc_pcie_wigig_clkref_en.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_1X_CLKREF_EN] = &gcc_ufs_1x_clkref_en.clkr,
+ [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr,
+ [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr,
+ [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr,
+ [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
+ [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_card_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_EN] = &gcc_usb3_sec_clkref_en.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL9] = &gpll9.clkr,
+};
+
+static struct gdsc *gcc_sm8250_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_2_GDSC] = &pcie_2_gdsc,
+ [UFS_CARD_GDSC] = &ufs_card_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB30_SEC_GDSC] = &usb30_sec_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm8250_resets[] = {
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_NPU_BWMON_BCR] = { 0x73000 },
+ [GCC_NPU_BCR] = { 0x4d000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e000 },
+ [GCC_PCIE_2_BCR] = { 0x6000 },
+ [GCC_PCIE_2_LINK_DOWN_BCR] = { 0x1f014 },
+ [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0x1f020 },
+ [GCC_PCIE_2_PHY_BCR] = { 0x1f01c },
+ [GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR] = { 0x1f028 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_CARD_BCR] = { 0x75000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB30_SEC_BCR] = { 0x10000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0xb024, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0xb028, 2 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+};
+
+static const struct regmap_config gcc_sm8250_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9c100,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8250_desc = {
+ .config = &gcc_sm8250_regmap_config,
+ .clks = gcc_sm8250_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8250_clocks),
+ .resets = gcc_sm8250_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8250_resets),
+ .gdscs = gcc_sm8250_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8250_gdscs),
+};
+
+static const struct of_device_id gcc_sm8250_match_table[] = {
+ { .compatible = "qcom,gcc-sm8250" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8250_match_table);
+
+static int gcc_sm8250_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8250_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Disable the GPLL0 active input to NPU and GPU
+ * via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /*
+ * Keep the clocks always-ON
+ * GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK, GCC_DISP_AHB_CLK,
+ * GCC_CPUSS_DVM_BUS_CLK, GCC_GPU_CFG_AHB_CLK,
+ * GCC_SYS_NOC_CPUSS_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x4818c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x52000, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sm8250_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8250_driver = {
+ .probe = gcc_sm8250_probe,
+ .driver = {
+ .name = "gcc-sm8250",
+ .of_match_table = gcc_sm8250_match_table,
+ },
+};
+
+static int __init gcc_sm8250_init(void)
+{
+ return platform_driver_register(&gcc_sm8250_driver);
+}
+subsys_initcall(gcc_sm8250_init);
+
+static void __exit gcc_sm8250_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8250_driver);
+}
+module_exit(gcc_sm8250_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8250 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index a96c0b945de2..7b656b6aeced 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -170,8 +170,45 @@ static struct gdsc cx_gdsc = {
.flags = VOTABLE,
};
+/*
+ * On SC7180 the GPU GX domain is *almost* entirely controlled by the GMU
+ * running in the CX domain so the CPU doesn't need to know anything about the
+ * GX domain EXCEPT....
+ *
+ * Hardware constraints dictate that the GX be powered down before the CX. If
+ * the GMU crashes it could leave the GX on. In order to successfully bring back
+ * the device the CPU needs to disable the GX headswitch. There being no sane
+ * way to reach in and touch that register from deep inside the GPU driver we
+ * need to set up the infrastructure to be able to ensure that the GPU can
+ * ensure that the GX is off during this super special case. We do this by
+ * defining a GX gdsc with a dummy enable function and a "default" disable
+ * function.
+ *
+ * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
+ * driver. During power up, nothing will happen from the CPU (and the GMU will
+ * power up normally but during power down this will ensure that the GX domain
+ * is *really* off - this gives us a semi standard way of doing what we need.
+ */
+static int gx_gdsc_enable(struct generic_pm_domain *domain)
+{
+ /* Do nothing but give genpd the impression that we were successful */
+ return 0;
+}
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gx_gdsc",
+ .power_on = gx_gdsc_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
static struct gdsc *gpu_cc_sc7180_gdscs[] = {
[CX_GDSC] = &cx_gdsc,
+ [GX_GDSC] = &gx_gdsc,
};
static struct clk_regmap *gpu_cc_sc7180_clocks[] = {
diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c
new file mode 100644
index 000000000000..673fa1a4f734
--- /dev/null
+++ b/drivers/clk/qcom/mss-sc7180.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,mss-sc7180.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+
+static struct clk_branch mss_axi_nav_clk = {
+ .halt_reg = 0x20bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mss_axi_nav_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "gcc_mss_nav_axi",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mss_axi_crypto_clk = {
+ .halt_reg = 0x20cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mss_axi_crypto_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "gcc_mss_mfab_axis",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct regmap_config mss_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = 0x41aa0cc,
+};
+
+static struct clk_regmap *mss_sc7180_clocks[] = {
+ [MSS_AXI_CRYPTO_CLK] = &mss_axi_crypto_clk.clkr,
+ [MSS_AXI_NAV_CLK] = &mss_axi_nav_clk.clkr,
+};
+
+static const struct qcom_cc_desc mss_sc7180_desc = {
+ .config = &mss_regmap_config,
+ .clks = mss_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(mss_sc7180_clocks),
+};
+
+static int mss_sc7180_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, "cfg_ahb");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto destroy_pm_clk;
+ }
+
+ ret = qcom_cc_probe(pdev, &mss_sc7180_desc);
+ if (ret < 0)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int mss_sc7180_remove(struct platform_device *pdev)
+{
+ pm_clk_destroy(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mss_sc7180_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id mss_sc7180_match_table[] = {
+ { .compatible = "qcom,sc7180-mss" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mss_sc7180_match_table);
+
+static struct platform_driver mss_sc7180_driver = {
+ .probe = mss_sc7180_probe,
+ .remove = mss_sc7180_remove,
+ .driver = {
+ .name = "sc7180-mss",
+ .of_match_table = mss_sc7180_match_table,
+ .pm = &mss_sc7180_pm_ops,
+ },
+};
+
+static int __init mss_sc7180_init(void)
+{
+ return platform_driver_register(&mss_sc7180_driver);
+}
+subsys_initcall(mss_sc7180_init);
+
+static void __exit mss_sc7180_exit(void)
+{
+ platform_driver_unregister(&mss_sc7180_driver);
+}
+module_exit(mss_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI MSS SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 250d8165167a..ac2dd92ce2ef 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -20,7 +20,7 @@ config CLK_RENESAS
select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
select CLK_R8A7792 if ARCH_R8A7792
select CLK_R8A7794 if ARCH_R8A7794
- select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951 || ARCH_R8A7795
+ select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951
select CLK_R8A77960 if ARCH_R8A77960
select CLK_R8A77961 if ARCH_R8A77961
select CLK_R8A77965 if ARCH_R8A77965
@@ -161,6 +161,7 @@ config CLK_RCAR_GEN3_CPG
config CLK_RCAR_USB2_CLOCK_SEL
bool "Renesas R-Car USB2 clock selector support"
depends on ARCH_RENESAS || COMPILE_TEST
+ select RESET_CONTROLLER
help
This is a driver for R-Car USB2 clock selector
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index fbc8c75f4314..ff5b3020cb03 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -44,6 +44,7 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -70,6 +71,12 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A7795_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A7795_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A7795_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -242,6 +249,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("can-fd", 914, R8A7795_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A7795_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A7795_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A7795_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A7795_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A7795_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A7795_CLK_CP),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index e8420d3ada94..e8d466dbc7f9 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -46,6 +46,7 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -72,6 +73,12 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A7796_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A7796_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A7796_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -105,6 +112,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, CLK_SDSRC, 0x26c),
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cr", R8A7796_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A7796_CLK_CPEX, CLK_EXTAL, 2, 1),
@@ -132,6 +140,7 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
+ DEF_MOD("sceg-pub", 229, R8A7796_CLK_CR),
DEF_MOD("cmt3", 300, R8A7796_CLK_R),
DEF_MOD("cmt2", 301, R8A7796_CLK_R),
DEF_MOD("cmt1", 302, R8A7796_CLK_R),
@@ -215,6 +224,7 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("can-fd", 914, R8A7796_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A7796_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A7796_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A7796_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A7796_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A7796_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A7796_CLK_CP),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index b3af4da2ca74..7a05a2fc1cc6 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -43,6 +43,7 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -68,6 +69,12 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A77965_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A77965_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A77965_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -99,7 +106,8 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, CLK_SDSRC, 0x268),
DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, CLK_SDSRC, 0x26c),
- DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cr", R8A77965_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A77965_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A77965_CLK_CPEX, CLK_EXTAL, 2, 1),
@@ -127,6 +135,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
+ DEF_MOD("sceg-pub", 229, R8A77965_CLK_CR),
DEF_MOD("cmt3", 300, R8A77965_CLK_R),
DEF_MOD("cmt2", 301, R8A77965_CLK_R),
@@ -215,6 +224,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A77965_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A77965_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A77965_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A77965_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A77965_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A77965_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A77965_CLK_CP),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index ceabf55c21c2..8eda2e3e2480 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -105,6 +105,7 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, CLK_SDSRC, 0x026c),
DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cr", R8A77990_CLK_CR, CLK_PLL1D2, 2, 1),
DEF_FIXED("cp", R8A77990_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A77990_CLK_CPEX, CLK_EXTAL, 4, 1),
@@ -135,6 +136,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("sys-dmac2", 217, R8A77990_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A77990_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A77990_CLK_S3D1),
+ DEF_MOD("sceg-pub", 229, R8A77990_CLK_CR),
DEF_MOD("cmt3", 300, R8A77990_CLK_R),
DEF_MOD("cmt2", 301, R8A77990_CLK_R),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 962bb337f2e7..056ebf3e70e2 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -91,6 +91,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_FIXED("s3d4", R8A77995_CLK_S3D4, CLK_S3, 4, 1),
DEF_FIXED("cl", R8A77995_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cr", R8A77995_CLK_CR, CLK_PLL1D2, 2, 1),
DEF_FIXED("cp", R8A77995_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A77995_CLK_CPEX, CLK_EXTAL, 4, 1),
@@ -122,6 +123,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("sys-dmac2", 217, R8A77995_CLK_S3D1),
DEF_MOD("sys-dmac1", 218, R8A77995_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A77995_CLK_S3D1),
+ DEF_MOD("sceg-pub", 229, R8A77995_CLK_CR),
DEF_MOD("cmt3", 300, R8A77995_CLK_R),
DEF_MOD("cmt2", 301, R8A77995_CLK_R),
DEF_MOD("cmt1", 302, R8A77995_CLK_R),
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index b97f5f9326cf..d4c02986c34e 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#define USB20_CLKSET0 0x00
@@ -26,9 +27,16 @@
#define CLKSET0_PRIVATE BIT(0)
#define CLKSET0_EXTAL_ONLY (CLKSET0_INTCLK_EN | CLKSET0_PRIVATE)
+static const struct clk_bulk_data rcar_usb2_clocks[] = {
+ { .id = "ehci_ohci", },
+ { .id = "hs-usb-if", },
+};
+
struct usb2_clock_sel_priv {
void __iomem *base;
struct clk_hw hw;
+ struct clk_bulk_data clks[ARRAY_SIZE(rcar_usb2_clocks)];
+ struct reset_control *rsts;
bool extal;
bool xtal;
};
@@ -53,14 +61,32 @@ static void usb2_clock_sel_disable_extal_only(struct usb2_clock_sel_priv *priv)
static int usb2_clock_sel_enable(struct clk_hw *hw)
{
- usb2_clock_sel_enable_extal_only(to_priv(hw));
+ struct usb2_clock_sel_priv *priv = to_priv(hw);
+ int ret;
+
+ ret = reset_control_deassert(priv->rsts);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clks), priv->clks);
+ if (ret) {
+ reset_control_assert(priv->rsts);
+ return ret;
+ }
+
+ usb2_clock_sel_enable_extal_only(priv);
return 0;
}
static void usb2_clock_sel_disable(struct clk_hw *hw)
{
- usb2_clock_sel_disable_extal_only(to_priv(hw));
+ struct usb2_clock_sel_priv *priv = to_priv(hw);
+
+ usb2_clock_sel_disable_extal_only(priv);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clks), priv->clks);
+ reset_control_assert(priv->rsts);
}
/*
@@ -119,6 +145,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
struct usb2_clock_sel_priv *priv;
struct clk *clk;
struct clk_init_data init;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -128,6 +155,15 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ memcpy(priv->clks, rcar_usb2_clocks, sizeof(priv->clks));
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clks), priv->clks);
+ if (ret < 0)
+ return ret;
+
+ priv->rsts = devm_reset_control_array_get(dev, true, false);
+ if (IS_ERR(priv->rsts))
+ return PTR_ERR(priv->rsts);
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 4abe7ff31f53..975454a3dd72 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -51,9 +51,9 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
u16 degrees;
u32 delay_num = 0;
- /* See the comment for rockchip_mmc_set_phase below */
+ /* Constant signal, no measurable phase shift */
if (!rate)
- return -EINVAL;
+ return 0;
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index dad31308c071..1949ae7851b2 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -356,10 +356,6 @@ struct samsung_clk_provider * __init samsung_cmu_register_one(
}
ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
- if (!ctx) {
- panic("%s: unable to allocate ctx\n", __func__);
- return ctx;
- }
if (cmu->pll_clks)
samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index 54a464fa63e0..8be4722f6064 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -65,54 +65,49 @@ static const struct clk_ops dbgclk_ops = {
.get_parent = socfpga_gate_get_parent,
};
-struct clk *s10_register_gate(const char *name, const char *parent_name,
- const char * const *parent_names,
- u8 num_parents, unsigned long flags,
- void __iomem *regbase, unsigned long gate_reg,
- unsigned long gate_idx, unsigned long div_reg,
- unsigned long div_offset, u8 div_width,
- unsigned long bypass_reg, u8 bypass_shift,
- u8 fixed_div)
+struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __iomem *regbase)
{
struct clk *clk;
struct socfpga_gate_clk *socfpga_clk;
struct clk_init_data init;
+ const char * const *parent_names = clks->parent_names;
+ const char *parent_name = clks->parent_name;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (!socfpga_clk)
return NULL;
- socfpga_clk->hw.reg = regbase + gate_reg;
- socfpga_clk->hw.bit_idx = gate_idx;
+ socfpga_clk->hw.reg = regbase + clks->gate_reg;
+ socfpga_clk->hw.bit_idx = clks->gate_idx;
gateclk_ops.enable = clk_gate_ops.enable;
gateclk_ops.disable = clk_gate_ops.disable;
- socfpga_clk->fixed_div = fixed_div;
+ socfpga_clk->fixed_div = clks->fixed_div;
- if (div_reg)
- socfpga_clk->div_reg = regbase + div_reg;
+ if (clks->div_reg)
+ socfpga_clk->div_reg = regbase + clks->div_reg;
else
socfpga_clk->div_reg = NULL;
- socfpga_clk->width = div_width;
- socfpga_clk->shift = div_offset;
+ socfpga_clk->width = clks->div_width;
+ socfpga_clk->shift = clks->div_offset;
- if (bypass_reg)
- socfpga_clk->bypass_reg = regbase + bypass_reg;
+ if (clks->bypass_reg)
+ socfpga_clk->bypass_reg = regbase + clks->bypass_reg;
else
socfpga_clk->bypass_reg = NULL;
- socfpga_clk->bypass_shift = bypass_shift;
+ socfpga_clk->bypass_shift = clks->bypass_shift;
- if (streq(name, "cs_pdbg_clk"))
+ if (streq(clks->name, "cs_pdbg_clk"))
init.ops = &dbgclk_ops;
else
init.ops = &gateclk_ops;
- init.name = name;
- init.flags = flags;
+ init.name = clks->name;
+ init.flags = clks->flags;
- init.num_parents = num_parents;
+ init.num_parents = clks->num_parents;
init.parent_names = parent_names ? parent_names : &parent_name;
socfpga_clk->hw.hw.init = &init;
@@ -121,6 +116,5 @@ struct clk *s10_register_gate(const char *name, const char *parent_name,
kfree(socfpga_clk);
return NULL;
}
-
return clk;
}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 1a191eeeebba..dd6d4056e9de 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -73,26 +73,27 @@ static const struct clk_ops peri_cnt_clk_ops = {
.get_parent = clk_periclk_get_parent,
};
-struct clk *s10_register_periph(const char *name, const char *parent_name,
- const char * const *parent_names,
- u8 num_parents, unsigned long flags,
- void __iomem *reg, unsigned long offset)
+struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks,
+ void __iomem *reg)
{
struct clk *clk;
struct socfpga_periph_clk *periph_clk;
struct clk_init_data init;
+ const char *name = clks->name;
+ const char *parent_name = clks->parent_name;
+ const char * const *parent_names = clks->parent_names;
periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
if (WARN_ON(!periph_clk))
return NULL;
- periph_clk->hw.reg = reg + offset;
+ periph_clk->hw.reg = reg + clks->offset;
init.name = name;
init.ops = &peri_c_clk_ops;
- init.flags = flags;
+ init.flags = clks->flags;
- init.num_parents = num_parents;
+ init.num_parents = clks->num_parents;
init.parent_names = parent_names ? parent_names : &parent_name;
periph_clk->hw.hw.init = &init;
@@ -105,38 +106,37 @@ struct clk *s10_register_periph(const char *name, const char *parent_name,
return clk;
}
-struct clk *s10_register_cnt_periph(const char *name, const char *parent_name,
- const char * const *parent_names,
- u8 num_parents, unsigned long flags,
- void __iomem *regbase, unsigned long offset,
- u8 fixed_divider, unsigned long bypass_reg,
- unsigned long bypass_shift)
+struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks,
+ void __iomem *regbase)
{
struct clk *clk;
struct socfpga_periph_clk *periph_clk;
struct clk_init_data init;
+ const char *name = clks->name;
+ const char *parent_name = clks->parent_name;
+ const char * const *parent_names = clks->parent_names;
periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
if (WARN_ON(!periph_clk))
return NULL;
- if (offset)
- periph_clk->hw.reg = regbase + offset;
+ if (clks->offset)
+ periph_clk->hw.reg = regbase + clks->offset;
else
periph_clk->hw.reg = NULL;
- if (bypass_reg)
- periph_clk->bypass_reg = regbase + bypass_reg;
+ if (clks->bypass_reg)
+ periph_clk->bypass_reg = regbase + clks->bypass_reg;
else
periph_clk->bypass_reg = NULL;
- periph_clk->bypass_shift = bypass_shift;
- periph_clk->fixed_div = fixed_divider;
+ periph_clk->bypass_shift = clks->bypass_shift;
+ periph_clk->fixed_div = clks->fixed_divider;
init.name = name;
init.ops = &peri_cnt_clk_ops;
- init.flags = flags;
+ init.flags = clks->flags;
- init.num_parents = num_parents;
+ init.num_parents = clks->num_parents;
init.parent_names = parent_names ? parent_names : &parent_name;
periph_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 4705eb544f01..a301bb22f36c 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -39,7 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
/* read VCO1 reg for numerator and denominator */
reg = readl(socfpgaclk->hw.reg);
refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
- vco_freq = (unsigned long long)parent_rate / refdiv;
+
+ vco_freq = parent_rate;
+ do_div(vco_freq, refdiv);
/* Read mdiv and fdiv from the fdbck register */
reg = readl(socfpgaclk->hw.reg + 0x4);
@@ -108,19 +110,20 @@ static struct clk_ops clk_boot_ops = {
.prepare = clk_pll_prepare,
};
-struct clk *s10_register_pll(const char *name, const char * const *parent_names,
- u8 num_parents, unsigned long flags,
- void __iomem *reg, unsigned long offset)
+struct clk *s10_register_pll(const struct stratix10_pll_clock *clks,
+ void __iomem *reg)
{
struct clk *clk;
struct socfpga_pll *pll_clk;
struct clk_init_data init;
+ const char *name = clks->name;
+ const char * const *parent_names = clks->parent_names;
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (WARN_ON(!pll_clk))
return NULL;
- pll_clk->hw.reg = reg + offset;
+ pll_clk->hw.reg = reg + clks->offset;
if (streq(name, SOCFPGA_BOOT_CLK))
init.ops = &clk_boot_ops;
@@ -128,9 +131,9 @@ struct clk *s10_register_pll(const char *name, const char * const *parent_names,
init.ops = &clk_pll_ops;
init.name = name;
- init.flags = flags;
+ init.flags = clks->flags;
- init.num_parents = num_parents;
+ init.num_parents = clks->num_parents;
init.parent_names = parent_names;
pll_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 993f3a73c71e..dea7c6c7d269 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -177,9 +177,7 @@ static int s10_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
int i;
for (i = 0; i < nums; i++) {
- clk = s10_register_periph(clks[i].name, clks[i].parent_name,
- clks[i].parent_names, clks[i].num_parents,
- clks[i].flags, base, clks[i].offset);
+ clk = s10_register_periph(&clks[i], base);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
@@ -198,14 +196,7 @@ static int s10_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *cl
int i;
for (i = 0; i < nums; i++) {
- clk = s10_register_cnt_periph(clks[i].name, clks[i].parent_name,
- clks[i].parent_names,
- clks[i].num_parents,
- clks[i].flags, base,
- clks[i].offset,
- clks[i].fixed_divider,
- clks[i].bypass_reg,
- clks[i].bypass_shift);
+ clk = s10_register_cnt_periph(&clks[i], base);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
@@ -225,16 +216,7 @@ static int s10_clk_register_gate(const struct stratix10_gate_clock *clks,
int i;
for (i = 0; i < nums; i++) {
- clk = s10_register_gate(clks[i].name, clks[i].parent_name,
- clks[i].parent_names,
- clks[i].num_parents,
- clks[i].flags, base,
- clks[i].gate_reg,
- clks[i].gate_idx, clks[i].div_reg,
- clks[i].div_offset, clks[i].div_width,
- clks[i].bypass_reg,
- clks[i].bypass_shift,
- clks[i].fixed_div);
+ clk = s10_register_gate(&clks[i], base);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
@@ -254,10 +236,7 @@ static int s10_clk_register_pll(const struct stratix10_pll_clock *clks,
int i;
for (i = 0; i < nums; i++) {
- clk = s10_register_pll(clks[i].name, clks[i].parent_names,
- clks[i].num_parents,
- clks[i].flags, base,
- clks[i].offset);
+ clk = s10_register_pll(&clks[i], base);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
index e8e121907952..fcabef42249c 100644
--- a/drivers/clk/socfpga/stratix10-clk.h
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -60,21 +60,12 @@ struct stratix10_gate_clock {
u8 fixed_div;
};
-struct clk *s10_register_pll(const char *, const char *const *, u8,
- unsigned long, void __iomem *, unsigned long);
-
-struct clk *s10_register_periph(const char *, const char *,
- const char * const *, u8, unsigned long,
- void __iomem *, unsigned long);
-struct clk *s10_register_cnt_periph(const char *, const char *,
- const char * const *, u8,
- unsigned long, void __iomem *,
- unsigned long, u8, unsigned long,
- unsigned long);
-struct clk *s10_register_gate(const char *, const char *,
- const char * const *, u8,
- unsigned long, void __iomem *,
- unsigned long, unsigned long,
- unsigned long, unsigned long, u8,
- unsigned long, u8, u8);
+struct clk *s10_register_pll(const struct stratix10_pll_clock *,
+ void __iomem *);
+struct clk *s10_register_periph(const struct stratix10_perip_c_clock *,
+ void __iomem *);
+struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *,
+ void __iomem *);
+struct clk *s10_register_gate(const struct stratix10_gate_clock *,
+ void __iomem *);
#endif /* __STRATIX10_CLK_H */
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 3c219af25100..e18c80fbe804 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -13,4 +13,12 @@ config SPRD_SC9860_CLK
tristate "Support for the Spreadtrum SC9860 clocks"
depends on (ARM64 && ARCH_SPRD) || COMPILE_TEST
default ARM64 && ARCH_SPRD
+
+config SPRD_SC9863A_CLK
+ tristate "Support for the Spreadtrum SC9863A clocks"
+ depends on (ARM64 && ARCH_SPRD) || COMPILE_TEST
+ default ARM64 && ARCH_SPRD
+ help
+ Support for the global clock controller on sc9863a devices.
+ Say Y if you want to use peripheral devices on sc9863a SoC.
endif
diff --git a/drivers/clk/sprd/Makefile b/drivers/clk/sprd/Makefile
index d4c00788d53c..41d90e0d7863 100644
--- a/drivers/clk/sprd/Makefile
+++ b/drivers/clk/sprd/Makefile
@@ -10,3 +10,4 @@ clk-sprd-y += pll.o
## SoC support
obj-$(CONFIG_SPRD_SC9860_CLK) += sc9860-clk.o
+obj-$(CONFIG_SPRD_SC9863A_CLK) += sc9863a-clk.o
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index c0af4779892b..d620bbbcdfc8 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -40,7 +40,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
const struct sprd_clk_desc *desc)
{
void __iomem *base;
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct regmap *regmap;
if (of_find_property(node, "sprd,syscon", NULL)) {
@@ -49,6 +50,13 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
pr_err("%s: failed to get syscon regmap\n", __func__);
return PTR_ERR(regmap);
}
+ } else if (of_device_is_compatible(of_get_parent(dev->of_node),
+ "syscon")) {
+ regmap = device_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get regmap from its parent.\n");
+ return PTR_ERR(regmap);
+ }
} else {
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/clk/sprd/composite.h b/drivers/clk/sprd/composite.h
index 04ab3f587ee2..adbabbe596b7 100644
--- a/drivers/clk/sprd/composite.h
+++ b/drivers/clk/sprd/composite.h
@@ -18,26 +18,43 @@ struct sprd_comp {
struct sprd_clk_common common;
};
-#define SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, _table, \
- _mshift, _mwidth, _dshift, _dwidth, _flags) \
+#define SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, _dwidth, \
+ _flags, _fn) \
struct sprd_comp _struct = { \
.mux = _SPRD_MUX_CLK(_mshift, _mwidth, _table), \
.div = _SPRD_DIV_CLK(_dshift, _dwidth), \
.common = { \
.regmap = NULL, \
.reg = _reg, \
- .hw.init = CLK_HW_INIT_PARENTS(_name, \
- _parent, \
- &sprd_comp_ops, \
- _flags), \
+ .hw.init = _fn(_name, _parent, \
+ &sprd_comp_ops, _flags), \
} \
}
-#define SPRD_COMP_CLK(_struct, _name, _parent, _reg, _mshift, \
- _mwidth, _dshift, _dwidth, _flags) \
- SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, \
- NULL, _mshift, _mwidth, \
- _dshift, _dwidth, _flags)
+#define SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, _dwidth, _flags) \
+ SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, _dwidth, \
+ _flags, CLK_HW_INIT_PARENTS)
+
+#define SPRD_COMP_CLK(_struct, _name, _parent, _reg, _mshift, \
+ _mwidth, _dshift, _dwidth, _flags) \
+ SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, NULL, \
+ _mshift, _mwidth, _dshift, _dwidth, _flags)
+
+#define SPRD_COMP_CLK_DATA_TABLE(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, \
+ _dwidth, _flags) \
+ SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \
+ _mshift, _mwidth, _dshift, _dwidth, \
+ _flags, CLK_HW_INIT_PARENTS_DATA)
+
+#define SPRD_COMP_CLK_DATA(_struct, _name, _parent, _reg, _mshift, \
+ _mwidth, _dshift, _dwidth, _flags) \
+ SPRD_COMP_CLK_DATA_TABLE(_struct, _name, _parent, _reg, NULL, \
+ _mshift, _mwidth, _dshift, _dwidth, \
+ _flags)
static inline struct sprd_comp *hw_to_sprd_comp(const struct clk_hw *hw)
{
diff --git a/drivers/clk/sprd/div.h b/drivers/clk/sprd/div.h
index 87510e3d0e14..6acfe6b179fc 100644
--- a/drivers/clk/sprd/div.h
+++ b/drivers/clk/sprd/div.h
@@ -35,20 +35,28 @@ struct sprd_div {
struct sprd_clk_common common;
};
-#define SPRD_DIV_CLK(_struct, _name, _parent, _reg, \
- _shift, _width, _flags) \
+#define SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags, _fn) \
struct sprd_div _struct = { \
.div = _SPRD_DIV_CLK(_shift, _width), \
.common = { \
.regmap = NULL, \
.reg = _reg, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &sprd_div_ops, \
- _flags), \
+ .hw.init = _fn(_name, _parent, \
+ &sprd_div_ops, _flags), \
} \
}
+#define SPRD_DIV_CLK(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags) \
+ SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags, CLK_HW_INIT)
+
+#define SPRD_DIV_CLK_HW(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags) \
+ SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _shift, _width, _flags, CLK_HW_INIT_HW)
+
static inline struct sprd_div *hw_to_sprd_div(const struct clk_hw *hw)
{
struct sprd_clk_common *common = hw_to_sprd_clk_common(hw);
diff --git a/drivers/clk/sprd/gate.c b/drivers/clk/sprd/gate.c
index f59d1936b412..574cfc116bbc 100644
--- a/drivers/clk/sprd/gate.c
+++ b/drivers/clk/sprd/gate.c
@@ -79,6 +79,17 @@ static int sprd_sc_gate_enable(struct clk_hw *hw)
return 0;
}
+
+static int sprd_pll_sc_gate_prepare(struct clk_hw *hw)
+{
+ struct sprd_gate *sg = hw_to_sprd_gate(hw);
+
+ clk_sc_gate_toggle(sg, true);
+ udelay(sg->udelay);
+
+ return 0;
+}
+
static int sprd_gate_is_enabled(struct clk_hw *hw)
{
struct sprd_gate *sg = hw_to_sprd_gate(hw);
@@ -109,3 +120,9 @@ const struct clk_ops sprd_sc_gate_ops = {
};
EXPORT_SYMBOL_GPL(sprd_sc_gate_ops);
+const struct clk_ops sprd_pll_sc_gate_ops = {
+ .unprepare = sprd_sc_gate_disable,
+ .prepare = sprd_pll_sc_gate_prepare,
+ .is_enabled = sprd_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(sprd_pll_sc_gate_ops);
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index dc352ea55e1f..b55817869367 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -14,37 +14,136 @@ struct sprd_gate {
u32 enable_mask;
u16 flags;
u16 sc_offset;
+ u16 udelay;
struct sprd_clk_common common;
};
-#define SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \
- _enable_mask, _flags, _gate_flags, _ops) \
+#define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops, _fn) \
struct sprd_gate _struct = { \
.enable_mask = _enable_mask, \
.sc_offset = _sc_offset, \
.flags = _gate_flags, \
+ .udelay = _udelay, \
.common = { \
.regmap = NULL, \
.reg = _reg, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- _ops, \
- _flags), \
+ .hw.init = _fn(_name, _parent, \
+ _ops, _flags), \
} \
}
+#define SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops) \
+ SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops, CLK_HW_INIT)
+
+#define SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \
+ _enable_mask, _flags, _gate_flags, _ops) \
+ SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, 0, _ops)
+
+#define SPRD_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \
+ _enable_mask, _flags, _gate_flags) \
+ SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \
+ _enable_mask, _flags, _gate_flags, \
+ &sprd_sc_gate_ops)
+
#define SPRD_GATE_CLK(_struct, _name, _parent, _reg, \
_enable_mask, _flags, _gate_flags) \
SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, 0, \
_enable_mask, _flags, _gate_flags, \
&sprd_gate_ops)
-#define SPRD_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \
- _enable_mask, _flags, _gate_flags) \
- SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \
+#define SPRD_PLL_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \
_enable_mask, _flags, _gate_flags, \
- &sprd_sc_gate_ops)
+ _udelay) \
+ SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, \
+ &sprd_pll_sc_gate_ops)
+
+
+#define SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, \
+ _flags, _gate_flags, \
+ _udelay, _ops) \
+ SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops, \
+ CLK_HW_INIT_HW)
+
+#define SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _ops) \
+ SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, \
+ _flags, _gate_flags, 0, _ops)
+
+#define SPRD_SC_GATE_CLK_HW(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags) \
+ SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, &sprd_sc_gate_ops)
+
+#define SPRD_GATE_CLK_HW(_struct, _name, _parent, _reg, \
+ _enable_mask, _flags, _gate_flags) \
+ SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, 0, \
+ _enable_mask, _flags, _gate_flags, \
+ &sprd_gate_ops)
+
+#define SPRD_PLL_SC_GATE_CLK_HW(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay) \
+ SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, \
+ _flags, _gate_flags, _udelay, \
+ &sprd_pll_sc_gate_ops)
+
+#define SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \
+ _reg, _sc_offset, \
+ _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops) \
+ SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay, _ops, \
+ CLK_HW_INIT_FW_NAME)
+
+#define SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _ops) \
+ SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \
+ _reg, _sc_offset, \
+ _enable_mask, _flags, \
+ _gate_flags, 0, _ops)
+
+#define SPRD_SC_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags) \
+ SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, &sprd_sc_gate_ops)
+
+#define SPRD_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \
+ _enable_mask, _flags, _gate_flags) \
+ SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, 0, \
+ _enable_mask, _flags, _gate_flags, \
+ &sprd_gate_ops)
+
+#define SPRD_PLL_SC_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \
+ _sc_offset, _enable_mask, _flags, \
+ _gate_flags, _udelay) \
+ SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \
+ _reg, _sc_offset, \
+ _enable_mask, _flags, \
+ _gate_flags, _udelay, \
+ &sprd_pll_sc_gate_ops)
static inline struct sprd_gate *hw_to_sprd_gate(const struct clk_hw *hw)
{
@@ -55,5 +154,6 @@ static inline struct sprd_gate *hw_to_sprd_gate(const struct clk_hw *hw)
extern const struct clk_ops sprd_gate_ops;
extern const struct clk_ops sprd_sc_gate_ops;
+extern const struct clk_ops sprd_pll_sc_gate_ops;
#endif /* _SPRD_GATE_H_ */
diff --git a/drivers/clk/sprd/mux.h b/drivers/clk/sprd/mux.h
index 892e4191cc7f..f3cc31dae06f 100644
--- a/drivers/clk/sprd/mux.h
+++ b/drivers/clk/sprd/mux.h
@@ -36,26 +36,40 @@ struct sprd_mux {
.table = _table, \
}
-#define SPRD_MUX_CLK_TABLE(_struct, _name, _parents, _table, \
- _reg, _shift, _width, \
- _flags) \
+#define SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags, _fn) \
struct sprd_mux _struct = { \
.mux = _SPRD_MUX_CLK(_shift, _width, _table), \
.common = { \
.regmap = NULL, \
.reg = _reg, \
- .hw.init = CLK_HW_INIT_PARENTS(_name, \
- _parents, \
- &sprd_mux_ops, \
- _flags), \
+ .hw.init = _fn(_name, _parents, \
+ &sprd_mux_ops, _flags), \
} \
}
+#define SPRD_MUX_CLK_TABLE(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags) \
+ SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags, \
+ CLK_HW_INIT_PARENTS)
+
#define SPRD_MUX_CLK(_struct, _name, _parents, _reg, \
_shift, _width, _flags) \
SPRD_MUX_CLK_TABLE(_struct, _name, _parents, NULL, \
_reg, _shift, _width, _flags)
+#define SPRD_MUX_CLK_DATA_TABLE(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags) \
+ SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \
+ _reg, _shift, _width, _flags, \
+ CLK_HW_INIT_PARENTS_DATA)
+
+#define SPRD_MUX_CLK_DATA(_struct, _name, _parents, _reg, \
+ _shift, _width, _flags) \
+ SPRD_MUX_CLK_DATA_TABLE(_struct, _name, _parents, NULL, \
+ _reg, _shift, _width, _flags)
+
static inline struct sprd_mux *hw_to_sprd_mux(const struct clk_hw *hw)
{
struct sprd_clk_common *common = hw_to_sprd_clk_common(hw);
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 640270f51aa5..15791484388f 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -87,11 +87,12 @@ static u32 pll_get_ibias(u64 rate, const u64 *table)
{
u32 i, num = table[0];
- for (i = 1; i < num + 1; i++)
- if (rate <= table[i])
+ /* table[0] indicates the number of items in this table */
+ for (i = 0; i < num; i++)
+ if (rate <= table[i + 1])
break;
- return (i == num + 1) ? num : i;
+ return i == num ? num - 1 : i;
}
static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
diff --git a/drivers/clk/sprd/pll.h b/drivers/clk/sprd/pll.h
index e95f11e91ffe..6558f50d0296 100644
--- a/drivers/clk/sprd/pll.h
+++ b/drivers/clk/sprd/pll.h
@@ -61,27 +61,33 @@ struct sprd_pll {
struct sprd_clk_common common;
};
+#define SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, \
+ _regs_num, _itable, _factors, \
+ _udelay, _k1, _k2, _fflag, \
+ _fvco, _fn) \
+ struct sprd_pll _struct = { \
+ .regs_num = _regs_num, \
+ .itable = _itable, \
+ .factors = _factors, \
+ .udelay = _udelay, \
+ .k1 = _k1, \
+ .k2 = _k2, \
+ .fflag = _fflag, \
+ .fvco = _fvco, \
+ .common = { \
+ .regmap = NULL, \
+ .reg = _reg, \
+ .hw.init = _fn(_name, _parent, \
+ &sprd_pll_ops, 0),\
+ }, \
+ }
+
#define SPRD_PLL_WITH_ITABLE_K_FVCO(_struct, _name, _parent, _reg, \
_regs_num, _itable, _factors, \
_udelay, _k1, _k2, _fflag, _fvco) \
- struct sprd_pll _struct = { \
- .regs_num = _regs_num, \
- .itable = _itable, \
- .factors = _factors, \
- .udelay = _udelay, \
- .k1 = _k1, \
- .k2 = _k2, \
- .fflag = _fflag, \
- .fvco = _fvco, \
- .common = { \
- .regmap = NULL, \
- .reg = _reg, \
- .hw.init = CLK_HW_INIT(_name, \
- _parent, \
- &sprd_pll_ops, \
- 0), \
- }, \
- }
+ SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \
+ _itable, _factors, _udelay, _k1, _k2, \
+ _fflag, _fvco, CLK_HW_INIT)
#define SPRD_PLL_WITH_ITABLE_K(_struct, _name, _parent, _reg, \
_regs_num, _itable, _factors, \
@@ -96,6 +102,19 @@ struct sprd_pll {
_regs_num, _itable, _factors, \
_udelay, 1000, 1000, 0, 0)
+#define SPRD_PLL_FW_NAME(_struct, _name, _parent, _reg, _regs_num, \
+ _itable, _factors, _udelay, _k1, _k2, \
+ _fflag, _fvco) \
+ SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \
+ _itable, _factors, _udelay, _k1, _k2, \
+ _fflag, _fvco, CLK_HW_INIT_FW_NAME)
+
+#define SPRD_PLL_HW(_struct, _name, _parent, _reg, _regs_num, _itable, \
+ _factors, _udelay, _k1, _k2, _fflag, _fvco) \
+ SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \
+ _itable, _factors, _udelay, _k1, _k2, \
+ _fflag, _fvco, CLK_HW_INIT_HW)
+
static inline struct sprd_pll *hw_to_sprd_pll(struct clk_hw *hw)
{
struct sprd_clk_common *common = hw_to_sprd_clk_common(hw);
diff --git a/drivers/clk/sprd/sc9863a-clk.c b/drivers/clk/sprd/sc9863a-clk.c
new file mode 100644
index 000000000000..a0631f7756cf
--- /dev/null
+++ b/drivers/clk/sprd/sc9863a-clk.c
@@ -0,0 +1,1772 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Unisoc SC9863A clock driver
+ *
+ * Copyright (C) 2019 Unisoc, Inc.
+ * Author: Chunyan Zhang <chunyan.zhang@unisoc.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/sprd,sc9863a-clk.h>
+
+#include "common.h"
+#include "composite.h"
+#include "div.h"
+#include "gate.h"
+#include "mux.h"
+#include "pll.h"
+
+/* mpll*_gate clocks control cpu cores, they were enabled by default */
+SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98,
+ 0x1000, BIT(0), 0, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c,
+ 0x1000, BIT(0), 0, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8,
+ 0x1000, BIT(0), 0, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc,
+ 0x1000, BIT(0), 0, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", 0x1e8,
+ 0x1000, BIT(0), 0, 0, 240);
+
+static struct sprd_clk_common *sc9863a_pmu_gate_clks[] = {
+ /* address base is 0x402b0000 */
+ &mpll0_gate.common,
+ &dpll0_gate.common,
+ &lpll_gate.common,
+ &gpll_gate.common,
+ &dpll1_gate.common,
+ &mpll1_gate.common,
+ &mpll2_gate.common,
+ &isppll_gate.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_pmu_gate_hws = {
+ .hws = {
+ [CLK_MPLL0_GATE] = &mpll0_gate.common.hw,
+ [CLK_DPLL0_GATE] = &dpll0_gate.common.hw,
+ [CLK_LPLL_GATE] = &lpll_gate.common.hw,
+ [CLK_GPLL_GATE] = &gpll_gate.common.hw,
+ [CLK_DPLL1_GATE] = &dpll1_gate.common.hw,
+ [CLK_MPLL1_GATE] = &mpll1_gate.common.hw,
+ [CLK_MPLL2_GATE] = &mpll2_gate.common.hw,
+ [CLK_ISPPLL_GATE] = &isppll_gate.common.hw,
+ },
+ .num = CLK_PMU_APB_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_pmu_gate_desc = {
+ .clk_clks = sc9863a_pmu_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_pmu_gate_clks),
+ .hw_clks = &sc9863a_pmu_gate_hws,
+};
+
+static const u64 itable[5] = {4, 1000000000, 1200000000,
+ 1400000000, 1600000000};
+
+static const struct clk_bit_field f_twpll[PLL_FACT_MAX] = {
+ { .shift = 95, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 1, .width = 1 }, /* mod_en */
+ { .shift = 2, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 3, .width = 3 }, /* ibias */
+ { .shift = 8, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 0, .width = 0 }, /* postdiv */
+};
+static SPRD_PLL_FW_NAME(twpll, "twpll", "ext-26m", 0x4, 3, itable,
+ f_twpll, 240, 1000, 1000, 0, 0);
+static CLK_FIXED_FACTOR_HW(twpll_768m, "twpll-768m", &twpll.common.hw, 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_384m, "twpll-384m", &twpll.common.hw, 4, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_192m, "twpll-192m", &twpll.common.hw, 8, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_96m, "twpll-96m", &twpll.common.hw, 16, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_48m, "twpll-48m", &twpll.common.hw, 32, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_24m, "twpll-24m", &twpll.common.hw, 64, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_12m, "twpll-12m", &twpll.common.hw, 128, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_512m, "twpll-512m", &twpll.common.hw, 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_256m, "twpll-256m", &twpll.common.hw, 6, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_128m, "twpll-128m", &twpll.common.hw, 12, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_64m, "twpll-64m", &twpll.common.hw, 24, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_307m2, "twpll-307m2", &twpll.common.hw, 5, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_219m4, "twpll-219m4", &twpll.common.hw, 7, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_170m6, "twpll-170m6", &twpll.common.hw, 9, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_153m6, "twpll-153m6", &twpll.common.hw, 10, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_76m8, "twpll-76m8", &twpll.common.hw, 20, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_51m2, "twpll-51m2", &twpll.common.hw, 30, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_38m4, "twpll-38m4", &twpll.common.hw, 40, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_19m2, "twpll-19m2", &twpll.common.hw, 80, 1, 0);
+
+static const struct clk_bit_field f_lpll[PLL_FACT_MAX] = {
+ { .shift = 95, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 1, .width = 1 }, /* mod_en */
+ { .shift = 2, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 6, .width = 2 }, /* ibias */
+ { .shift = 8, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 0, .width = 0 }, /* postdiv */
+};
+static SPRD_PLL_HW(lpll, "lpll", &lpll_gate.common.hw, 0x20, 3, itable,
+ f_lpll, 240, 1000, 1000, 0, 0);
+static CLK_FIXED_FACTOR_HW(lpll_409m6, "lpll-409m6", &lpll.common.hw, 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(lpll_245m76, "lpll-245m76", &lpll.common.hw, 5, 1, 0);
+
+static const struct clk_bit_field f_gpll[PLL_FACT_MAX] = {
+ { .shift = 95, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 1, .width = 1 }, /* mod_en */
+ { .shift = 2, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 6, .width = 2 }, /* ibias */
+ { .shift = 8, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 80, .width = 1 }, /* postdiv */
+};
+static SPRD_PLL_HW(gpll, "gpll", &gpll_gate.common.hw, 0x38, 3, itable,
+ f_gpll, 240, 1000, 1000, 1, 400000000);
+
+static SPRD_PLL_HW(isppll, "isppll", &isppll_gate.common.hw, 0x50, 3, itable,
+ f_gpll, 240, 1000, 1000, 0, 0);
+static CLK_FIXED_FACTOR_HW(isppll_468m, "isppll-468m", &isppll.common.hw, 2, 1, 0);
+
+static struct sprd_clk_common *sc9863a_pll_clks[] = {
+ /* address base is 0x40353000 */
+ &twpll.common,
+ &lpll.common,
+ &gpll.common,
+ &isppll.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_pll_hws = {
+ .hws = {
+ [CLK_TWPLL] = &twpll.common.hw,
+ [CLK_TWPLL_768M] = &twpll_768m.hw,
+ [CLK_TWPLL_384M] = &twpll_384m.hw,
+ [CLK_TWPLL_192M] = &twpll_192m.hw,
+ [CLK_TWPLL_96M] = &twpll_96m.hw,
+ [CLK_TWPLL_48M] = &twpll_48m.hw,
+ [CLK_TWPLL_24M] = &twpll_24m.hw,
+ [CLK_TWPLL_12M] = &twpll_12m.hw,
+ [CLK_TWPLL_512M] = &twpll_512m.hw,
+ [CLK_TWPLL_256M] = &twpll_256m.hw,
+ [CLK_TWPLL_128M] = &twpll_128m.hw,
+ [CLK_TWPLL_64M] = &twpll_64m.hw,
+ [CLK_TWPLL_307M2] = &twpll_307m2.hw,
+ [CLK_TWPLL_219M4] = &twpll_219m4.hw,
+ [CLK_TWPLL_170M6] = &twpll_170m6.hw,
+ [CLK_TWPLL_153M6] = &twpll_153m6.hw,
+ [CLK_TWPLL_76M8] = &twpll_76m8.hw,
+ [CLK_TWPLL_51M2] = &twpll_51m2.hw,
+ [CLK_TWPLL_38M4] = &twpll_38m4.hw,
+ [CLK_TWPLL_19M2] = &twpll_19m2.hw,
+ [CLK_LPLL] = &lpll.common.hw,
+ [CLK_LPLL_409M6] = &lpll_409m6.hw,
+ [CLK_LPLL_245M76] = &lpll_245m76.hw,
+ [CLK_GPLL] = &gpll.common.hw,
+ [CLK_ISPPLL] = &isppll.common.hw,
+ [CLK_ISPPLL_468M] = &isppll_468m.hw,
+
+ },
+ .num = CLK_ANLG_PHY_G1_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_pll_desc = {
+ .clk_clks = sc9863a_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_pll_clks),
+ .hw_clks = &sc9863a_pll_hws,
+};
+
+static const u64 itable_mpll[6] = {5, 1000000000, 1200000000, 1400000000,
+ 1600000000, 1800000000};
+static SPRD_PLL_HW(mpll0, "mpll0", &mpll0_gate.common.hw, 0x0, 3, itable_mpll,
+ f_gpll, 240, 1000, 1000, 1, 1000000000);
+static SPRD_PLL_HW(mpll1, "mpll1", &mpll1_gate.common.hw, 0x18, 3, itable_mpll,
+ f_gpll, 240, 1000, 1000, 1, 1000000000);
+static SPRD_PLL_HW(mpll2, "mpll2", &mpll2_gate.common.hw, 0x30, 3, itable_mpll,
+ f_gpll, 240, 1000, 1000, 1, 1000000000);
+static CLK_FIXED_FACTOR_HW(mpll2_675m, "mpll2-675m", &mpll2.common.hw, 2, 1, 0);
+
+static struct sprd_clk_common *sc9863a_mpll_clks[] = {
+ /* address base is 0x40359000 */
+ &mpll0.common,
+ &mpll1.common,
+ &mpll2.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_mpll_hws = {
+ .hws = {
+ [CLK_MPLL0] = &mpll0.common.hw,
+ [CLK_MPLL1] = &mpll1.common.hw,
+ [CLK_MPLL2] = &mpll2.common.hw,
+ [CLK_MPLL2_675M] = &mpll2_675m.hw,
+
+ },
+ .num = CLK_ANLG_PHY_G4_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_mpll_desc = {
+ .clk_clks = sc9863a_mpll_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_mpll_clks),
+ .hw_clks = &sc9863a_mpll_hws,
+};
+
+static SPRD_SC_GATE_CLK_FW_NAME(audio_gate, "audio-gate", "ext-26m",
+ 0x4, 0x1000, BIT(8), 0, 0);
+
+static SPRD_PLL_FW_NAME(rpll, "rpll", "ext-26m", 0x10,
+ 3, itable, f_lpll, 240, 1000, 1000, 0, 0);
+
+static CLK_FIXED_FACTOR_HW(rpll_390m, "rpll-390m", &rpll.common.hw, 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(rpll_260m, "rpll-260m", &rpll.common.hw, 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(rpll_195m, "rpll-195m", &rpll.common.hw, 4, 1, 0);
+static CLK_FIXED_FACTOR_HW(rpll_26m, "rpll-26m", &rpll.common.hw, 30, 1, 0);
+
+static struct sprd_clk_common *sc9863a_rpll_clks[] = {
+ /* address base is 0x4035c000 */
+ &audio_gate.common,
+ &rpll.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_rpll_hws = {
+ .hws = {
+ [CLK_AUDIO_GATE] = &audio_gate.common.hw,
+ [CLK_RPLL] = &rpll.common.hw,
+ [CLK_RPLL_390M] = &rpll_390m.hw,
+ [CLK_RPLL_260M] = &rpll_260m.hw,
+ [CLK_RPLL_195M] = &rpll_195m.hw,
+ [CLK_RPLL_26M] = &rpll_26m.hw,
+ },
+ .num = CLK_ANLG_PHY_G5_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_rpll_desc = {
+ .clk_clks = sc9863a_rpll_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_rpll_clks),
+ .hw_clks = &sc9863a_rpll_hws,
+};
+
+static const u64 itable_dpll[5] = {4, 1211000000, 1320000000, 1570000000,
+ 1866000000};
+static SPRD_PLL_HW(dpll0, "dpll0", &dpll0_gate.common.hw, 0x0, 3, itable_dpll,
+ f_lpll, 240, 1000, 1000, 0, 0);
+static SPRD_PLL_HW(dpll1, "dpll1", &dpll1_gate.common.hw, 0x18, 3, itable_dpll,
+ f_lpll, 240, 1000, 1000, 0, 0);
+
+static CLK_FIXED_FACTOR_HW(dpll0_933m, "dpll0-933m", &dpll0.common.hw, 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll0_622m3, "dpll0-622m3", &dpll0.common.hw, 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll1_400m, "dpll1-400m", &dpll0.common.hw, 4, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll1_266m7, "dpll1-266m7", &dpll0.common.hw, 6, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll1_123m1, "dpll1-123m1", &dpll0.common.hw, 13, 1, 0);
+static CLK_FIXED_FACTOR_HW(dpll1_50m, "dpll1-50m", &dpll0.common.hw, 32, 1, 0);
+
+static struct sprd_clk_common *sc9863a_dpll_clks[] = {
+ /* address base is 0x40363000 */
+ &dpll0.common,
+ &dpll1.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_dpll_hws = {
+ .hws = {
+ [CLK_DPLL0] = &dpll0.common.hw,
+ [CLK_DPLL1] = &dpll1.common.hw,
+ [CLK_DPLL0_933M] = &dpll0_933m.hw,
+ [CLK_DPLL0_622M3] = &dpll0_622m3.hw,
+ [CLK_DPLL0_400M] = &dpll1_400m.hw,
+ [CLK_DPLL0_266M7] = &dpll1_266m7.hw,
+ [CLK_DPLL0_123M1] = &dpll1_123m1.hw,
+ [CLK_DPLL0_50M] = &dpll1_50m.hw,
+
+ },
+ .num = CLK_ANLG_PHY_G7_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_dpll_desc = {
+ .clk_clks = sc9863a_dpll_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_dpll_clks),
+ .hw_clks = &sc9863a_dpll_hws,
+};
+
+static CLK_FIXED_FACTOR_FW_NAME(clk_6m5, "clk-6m5", "ext-26m", 4, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_4m3, "clk-4m3", "ext-26m", 6, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_2m, "clk-2m", "ext-26m", 13, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_250k, "clk-250k", "ext-26m", 104, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_25m, "rco-25m", "rco-100m", 4, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_4m, "rco-4m", "rco-100m", 25, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_2m, "rco-2m", "rco-100m", 50, 1, 0);
+
+#define SC9863A_MUX_FLAG \
+ (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT)
+
+static CLK_FIXED_FACTOR_FW_NAME(clk_13m, "clk-13m", "ext-26m", 2, 1, 0);
+static const struct clk_parent_data emc_clk_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &twpll_768m.hw },
+ { .hw = &twpll.common.hw },
+};
+static SPRD_MUX_CLK_DATA(emc_clk, "emc-clk", emc_clk_parents, 0x220,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data aon_apb_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(aon_apb, "aon-apb", aon_apb_parents, 0x224,
+ 0, 3, 8, 2, 0);
+
+static const struct clk_parent_data adi_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_38m4.hw },
+ { .hw = &twpll_51m2.hw },
+};
+static SPRD_MUX_CLK_DATA(adi_clk, "adi-clk", adi_parents, 0x228,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data aux_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rpll_26m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_COMP_CLK_DATA(aux0_clk, "aux0-clk", aux_parents, 0x22c,
+ 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(aux1_clk, "aux1-clk", aux_parents, 0x230,
+ 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(aux2_clk, "aux2-clk", aux_parents, 0x234,
+ 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(probe_clk, "probe-clk", aux_parents, 0x238,
+ 0, 5, 8, 4, 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rpll_26m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+};
+static SPRD_MUX_CLK_DATA(pwm0_clk, "pwm0-clk", pwm_parents, 0x23c,
+ 0, 2, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm1_clk, "pwm1-clk", pwm_parents, 0x240,
+ 0, 2, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm2_clk, "pwm2-clk", pwm_parents, 0x244,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data aon_thm_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &clk_250k.hw },
+};
+static SPRD_MUX_CLK_DATA(aon_thm_clk, "aon-thm-clk", aon_thm_parents, 0x25c,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data audif_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_38m4.hw },
+ { .hw = &twpll_51m2.hw },
+};
+static SPRD_MUX_CLK_DATA(audif_clk, "audif-clk", audif_parents, 0x264,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data cpu_dap_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(cpu_dap_clk, "cpu-dap-clk", cpu_dap_parents, 0x26c,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data cpu_ts_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(cpu_ts_clk, "cpu-ts-clk", cpu_ts_parents, 0x274,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data djtag_tck_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(djtag_tck_clk, "djtag-tck-clk", djtag_tck_parents, 0x28c,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data emc_ref_parents[] = {
+ { .hw = &clk_6m5.hw },
+ { .hw = &clk_13m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(emc_ref_clk, "emc-ref-clk", emc_ref_parents, 0x29c,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data cssys_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &mpll2_675m.hw },
+};
+static SPRD_COMP_CLK_DATA(cssys_clk, "cssys-clk", cssys_parents, 0x2a0,
+ 0, 4, 8, 2, 0);
+
+static const struct clk_parent_data aon_pmu_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-4m" },
+};
+static SPRD_MUX_CLK_DATA(aon_pmu_clk, "aon-pmu-clk", aon_pmu_parents, 0x2a8,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data pmu_26m_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(pmu_26m_clk, "26m-pmu-clk", pmu_26m_parents, 0x2ac,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data aon_tmr_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(aon_tmr_clk, "aon-tmr-clk", aon_tmr_parents, 0x2b0,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data power_cpu_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(power_cpu_clk, "power-cpu-clk", power_cpu_parents, 0x2c4,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data ap_axi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_axi, "ap-axi", ap_axi_parents, 0x2c8,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data sdio_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &rpll_390m.hw },
+ { .hw = &dpll1_400m.hw },
+ { .hw = &lpll_409m6.hw },
+};
+static SPRD_MUX_CLK_DATA(sdio0_2x, "sdio0-2x", sdio_parents, 0x2cc,
+ 0, 3, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio1_2x, "sdio1-2x", sdio_parents, 0x2d4,
+ 0, 3, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio2_2x, "sdio2-2x", sdio_parents, 0x2dc,
+ 0, 3, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(emmc_2x, "emmc-2x", sdio_parents, 0x2e4,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data dpu_parents[] = {
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(dpu_clk, "dpu", dpu_parents, 0x2f4,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data dpu_dpi_parents[] = {
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_COMP_CLK_DATA(dpu_dpi, "dpu-dpi", dpu_dpi_parents, 0x2f8,
+ 0, 2, 8, 4, 0);
+
+static const struct clk_parent_data otg_ref_parents[] = {
+ { .hw = &twpll_12m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(otg_ref_clk, "otg-ref-clk", otg_ref_parents, 0x308,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data sdphy_apb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+};
+static SPRD_MUX_CLK_DATA(sdphy_apb_clk, "sdphy-apb-clk", sdphy_apb_parents, 0x330,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data alg_io_apb_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_MUX_CLK_DATA(alg_io_apb_clk, "alg-io-apb-clk", alg_io_apb_parents, 0x33c,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data gpu_parents[] = {
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &gpll.common.hw },
+};
+static SPRD_COMP_CLK_DATA(gpu_core, "gpu-core", gpu_parents, 0x344,
+ 0, 3, 8, 2, 0);
+static SPRD_COMP_CLK_DATA(gpu_soc, "gpu-soc", gpu_parents, 0x348,
+ 0, 3, 8, 2, 0);
+
+static const struct clk_parent_data mm_emc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_emc, "mm-emc", mm_emc_parents, 0x350,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data mm_ahb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_ahb, "mm-ahb", mm_ahb_parents, 0x354,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data bpc_clk_parents[] = {
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+ { .hw = &dpll0_622m3.hw },
+};
+static SPRD_MUX_CLK_DATA(bpc_clk, "bpc-clk", bpc_clk_parents, 0x358,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data dcam_if_parents[] = {
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(dcam_if_clk, "dcam-if-clk", dcam_if_parents, 0x35c,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data isp_parents[] = {
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+};
+static SPRD_MUX_CLK_DATA(isp_clk, "isp-clk", isp_parents, 0x360,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data jpg_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+};
+static SPRD_MUX_CLK_DATA(jpg_clk, "jpg-clk", jpg_parents, 0x364,
+ 0, 2, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(cpp_clk, "cpp-clk", jpg_parents, 0x368,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data sensor_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_COMP_CLK_DATA(sensor0_clk, "sensor0-clk", sensor_parents, 0x36c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(sensor1_clk, "sensor1-clk", sensor_parents, 0x370,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(sensor2_clk, "sensor2-clk", sensor_parents, 0x374,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data mm_vemc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_vemc, "mm-vemc", mm_vemc_parents, 0x378,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static SPRD_MUX_CLK_DATA(mm_vahb, "mm-vahb", mm_ahb_parents, 0x37c,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data vsp_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(clk_vsp, "vsp-clk", vsp_parents, 0x380,
+ 0, 3, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data core_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_512m.hw },
+ { .hw = &twpll_768m.hw },
+ { .hw = &lpll.common.hw },
+ { .hw = &dpll0.common.hw },
+ { .hw = &mpll2.common.hw },
+ { .hw = &mpll0.common.hw },
+ { .hw = &mpll1.common.hw },
+};
+static SPRD_COMP_CLK_DATA(core0_clk, "core0-clk", core_parents, 0xa20,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core1_clk, "core1-clk", core_parents, 0xa24,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core2_clk, "core2-clk", core_parents, 0xa28,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core3_clk, "core3-clk", core_parents, 0xa2c,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core4_clk, "core4-clk", core_parents, 0xa30,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core5_clk, "core5-clk", core_parents, 0xa34,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core6_clk, "core6-clk", core_parents, 0xa38,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(core7_clk, "core7-clk", core_parents, 0xa3c,
+ 0, 3, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(scu_clk, "scu-clk", core_parents, 0xa40,
+ 0, 3, 8, 3, 0);
+
+static SPRD_DIV_CLK_HW(ace_clk, "ace-clk", &scu_clk.common.hw, 0xa44,
+ 8, 3, 0);
+static SPRD_DIV_CLK_HW(axi_periph_clk, "axi-periph-clk", &scu_clk.common.hw, 0xa48,
+ 8, 3, 0);
+static SPRD_DIV_CLK_HW(axi_acp_clk, "axi-acp-clk", &scu_clk.common.hw, 0xa4c,
+ 8, 3, 0);
+
+static const struct clk_parent_data atb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &mpll2.common.hw },
+};
+static SPRD_COMP_CLK_DATA(atb_clk, "atb-clk", atb_parents, 0xa50,
+ 0, 2, 8, 3, 0);
+static SPRD_DIV_CLK_HW(debug_apb_clk, "debug-apb-clk", &atb_clk.common.hw, 0xa54,
+ 8, 3, 0);
+
+static const struct clk_parent_data gic_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_COMP_CLK_DATA(gic_clk, "gic-clk", gic_parents, 0xa58,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(periph_clk, "periph-clk", gic_parents, 0xa5c,
+ 0, 2, 8, 3, 0);
+
+static struct sprd_clk_common *sc9863a_aon_clks[] = {
+ /* address base is 0x402d0000 */
+ &emc_clk.common,
+ &aon_apb.common,
+ &adi_clk.common,
+ &aux0_clk.common,
+ &aux1_clk.common,
+ &aux2_clk.common,
+ &probe_clk.common,
+ &pwm0_clk.common,
+ &pwm1_clk.common,
+ &pwm2_clk.common,
+ &aon_thm_clk.common,
+ &audif_clk.common,
+ &cpu_dap_clk.common,
+ &cpu_ts_clk.common,
+ &djtag_tck_clk.common,
+ &emc_ref_clk.common,
+ &cssys_clk.common,
+ &aon_pmu_clk.common,
+ &pmu_26m_clk.common,
+ &aon_tmr_clk.common,
+ &power_cpu_clk.common,
+ &ap_axi.common,
+ &sdio0_2x.common,
+ &sdio1_2x.common,
+ &sdio2_2x.common,
+ &emmc_2x.common,
+ &dpu_clk.common,
+ &dpu_dpi.common,
+ &otg_ref_clk.common,
+ &sdphy_apb_clk.common,
+ &alg_io_apb_clk.common,
+ &gpu_core.common,
+ &gpu_soc.common,
+ &mm_emc.common,
+ &mm_ahb.common,
+ &bpc_clk.common,
+ &dcam_if_clk.common,
+ &isp_clk.common,
+ &jpg_clk.common,
+ &cpp_clk.common,
+ &sensor0_clk.common,
+ &sensor1_clk.common,
+ &sensor2_clk.common,
+ &mm_vemc.common,
+ &mm_vahb.common,
+ &clk_vsp.common,
+ &core0_clk.common,
+ &core1_clk.common,
+ &core2_clk.common,
+ &core3_clk.common,
+ &core4_clk.common,
+ &core5_clk.common,
+ &core6_clk.common,
+ &core7_clk.common,
+ &scu_clk.common,
+ &ace_clk.common,
+ &axi_periph_clk.common,
+ &axi_acp_clk.common,
+ &atb_clk.common,
+ &debug_apb_clk.common,
+ &gic_clk.common,
+ &periph_clk.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_aon_clk_hws = {
+ .hws = {
+ [CLK_13M] = &clk_13m.hw,
+ [CLK_6M5] = &clk_6m5.hw,
+ [CLK_4M3] = &clk_4m3.hw,
+ [CLK_2M] = &clk_2m.hw,
+ [CLK_250K] = &clk_250k.hw,
+ [CLK_RCO_25M] = &rco_25m.hw,
+ [CLK_RCO_4M] = &rco_4m.hw,
+ [CLK_RCO_2M] = &rco_2m.hw,
+ [CLK_EMC] = &emc_clk.common.hw,
+ [CLK_AON_APB] = &aon_apb.common.hw,
+ [CLK_ADI] = &adi_clk.common.hw,
+ [CLK_AUX0] = &aux0_clk.common.hw,
+ [CLK_AUX1] = &aux1_clk.common.hw,
+ [CLK_AUX2] = &aux2_clk.common.hw,
+ [CLK_PROBE] = &probe_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_AON_THM] = &aon_thm_clk.common.hw,
+ [CLK_AUDIF] = &audif_clk.common.hw,
+ [CLK_CPU_DAP] = &cpu_dap_clk.common.hw,
+ [CLK_CPU_TS] = &cpu_ts_clk.common.hw,
+ [CLK_DJTAG_TCK] = &djtag_tck_clk.common.hw,
+ [CLK_EMC_REF] = &emc_ref_clk.common.hw,
+ [CLK_CSSYS] = &cssys_clk.common.hw,
+ [CLK_AON_PMU] = &aon_pmu_clk.common.hw,
+ [CLK_PMU_26M] = &pmu_26m_clk.common.hw,
+ [CLK_AON_TMR] = &aon_tmr_clk.common.hw,
+ [CLK_POWER_CPU] = &power_cpu_clk.common.hw,
+ [CLK_AP_AXI] = &ap_axi.common.hw,
+ [CLK_SDIO0_2X] = &sdio0_2x.common.hw,
+ [CLK_SDIO1_2X] = &sdio1_2x.common.hw,
+ [CLK_SDIO2_2X] = &sdio2_2x.common.hw,
+ [CLK_EMMC_2X] = &emmc_2x.common.hw,
+ [CLK_DPU] = &dpu_clk.common.hw,
+ [CLK_DPU_DPI] = &dpu_dpi.common.hw,
+ [CLK_OTG_REF] = &otg_ref_clk.common.hw,
+ [CLK_SDPHY_APB] = &sdphy_apb_clk.common.hw,
+ [CLK_ALG_IO_APB] = &alg_io_apb_clk.common.hw,
+ [CLK_GPU_CORE] = &gpu_core.common.hw,
+ [CLK_GPU_SOC] = &gpu_soc.common.hw,
+ [CLK_MM_EMC] = &mm_emc.common.hw,
+ [CLK_MM_AHB] = &mm_ahb.common.hw,
+ [CLK_BPC] = &bpc_clk.common.hw,
+ [CLK_DCAM_IF] = &dcam_if_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_CPP] = &cpp_clk.common.hw,
+ [CLK_SENSOR0] = &sensor0_clk.common.hw,
+ [CLK_SENSOR1] = &sensor1_clk.common.hw,
+ [CLK_SENSOR2] = &sensor2_clk.common.hw,
+ [CLK_MM_VEMC] = &mm_vemc.common.hw,
+ [CLK_MM_VAHB] = &mm_vahb.common.hw,
+ [CLK_VSP] = &clk_vsp.common.hw,
+ [CLK_CORE0] = &core0_clk.common.hw,
+ [CLK_CORE1] = &core1_clk.common.hw,
+ [CLK_CORE2] = &core2_clk.common.hw,
+ [CLK_CORE3] = &core3_clk.common.hw,
+ [CLK_CORE4] = &core4_clk.common.hw,
+ [CLK_CORE5] = &core5_clk.common.hw,
+ [CLK_CORE6] = &core6_clk.common.hw,
+ [CLK_CORE7] = &core7_clk.common.hw,
+ [CLK_SCU] = &scu_clk.common.hw,
+ [CLK_ACE] = &ace_clk.common.hw,
+ [CLK_AXI_PERIPH] = &axi_periph_clk.common.hw,
+ [CLK_AXI_ACP] = &axi_acp_clk.common.hw,
+ [CLK_ATB] = &atb_clk.common.hw,
+ [CLK_DEBUG_APB] = &debug_apb_clk.common.hw,
+ [CLK_GIC] = &gic_clk.common.hw,
+ [CLK_PERIPH] = &periph_clk.common.hw,
+ },
+ .num = CLK_AON_CLK_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_aon_clk_desc = {
+ .clk_clks = sc9863a_aon_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_aon_clks),
+ .hw_clks = &sc9863a_aon_clk_hws,
+};
+
+static const struct clk_parent_data ap_apb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_apb, "ap-apb", ap_apb_parents, 0x20,
+ 0, 2, SC9863A_MUX_FLAG);
+
+static const struct clk_parent_data ap_ce_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_256m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_ce, "ap-ce", ap_ce_parents, 0x24,
+ 0, 1, 8, 3, 0);
+
+static const struct clk_parent_data nandc_ecc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+};
+static SPRD_COMP_CLK_DATA(nandc_ecc, "nandc-ecc", nandc_ecc_parents, 0x28,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data nandc_26m_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(nandc_26m, "nandc-26m", nandc_26m_parents, 0x2c,
+ 0, 1, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(emmc_32k, "emmc-32k", nandc_26m_parents, 0x30,
+ 0, 1, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio0_32k, "sdio0-32k", nandc_26m_parents, 0x34,
+ 0, 1, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio1_32k, "sdio1-32k", nandc_26m_parents, 0x38,
+ 0, 1, SC9863A_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio2_32k, "sdio2-32k", nandc_26m_parents, 0x3c,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static SPRD_GATE_CLK_HW(otg_utmi, "otg-utmi", &aon_apb.common.hw, 0x40,
+ BIT(16), 0, 0);
+
+static const struct clk_parent_data ap_uart_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_uart0, "ap-uart0", ap_uart_parents, 0x44,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart1, "ap-uart1", ap_uart_parents, 0x48,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart2, "ap-uart2", ap_uart_parents, 0x4c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart3, "ap-uart3", ap_uart_parents, 0x50,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart4, "ap-uart4", ap_uart_parents, 0x54,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data i2c_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_i2c0, "ap-i2c0", i2c_parents, 0x58,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c1, "ap-i2c1", i2c_parents, 0x5c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c2, "ap-i2c2", i2c_parents, 0x60,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c3, "ap-i2c3", i2c_parents, 0x64,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c4, "ap-i2c4", i2c_parents, 0x68,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c5, "ap-i2c5", i2c_parents, 0x6c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c6, "ap-i2c6", i2c_parents, 0x70,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data spi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_spi0, "ap-spi0", spi_parents, 0x74,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi1, "ap-spi1", spi_parents, 0x78,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi2, "ap-spi2", spi_parents, 0x7c,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi3, "ap-spi3", spi_parents, 0x80,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data iis_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_iis0, "ap-iis0", iis_parents, 0x84,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_iis1, "ap-iis1", iis_parents, 0x88,
+ 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_iis2, "ap-iis2", iis_parents, 0x8c,
+ 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data sim0_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(sim0, "sim0", sim0_parents, 0x90,
+ 0, 3, 8, 3, 0);
+
+static const struct clk_parent_data sim0_32k_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(sim0_32k, "sim0-32k", sim0_32k_parents, 0x94,
+ 0, 1, SC9863A_MUX_FLAG);
+
+static struct sprd_clk_common *sc9863a_ap_clks[] = {
+ /* address base is 0x21500000 */
+ &ap_apb.common,
+ &ap_ce.common,
+ &nandc_ecc.common,
+ &nandc_26m.common,
+ &emmc_32k.common,
+ &sdio0_32k.common,
+ &sdio1_32k.common,
+ &sdio2_32k.common,
+ &otg_utmi.common,
+ &ap_uart0.common,
+ &ap_uart1.common,
+ &ap_uart2.common,
+ &ap_uart3.common,
+ &ap_uart4.common,
+ &ap_i2c0.common,
+ &ap_i2c1.common,
+ &ap_i2c2.common,
+ &ap_i2c3.common,
+ &ap_i2c4.common,
+ &ap_i2c5.common,
+ &ap_i2c6.common,
+ &ap_spi0.common,
+ &ap_spi1.common,
+ &ap_spi2.common,
+ &ap_spi3.common,
+ &ap_iis0.common,
+ &ap_iis1.common,
+ &ap_iis2.common,
+ &sim0.common,
+ &sim0_32k.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_ap_clk_hws = {
+ .hws = {
+ [CLK_AP_APB] = &ap_apb.common.hw,
+ [CLK_AP_CE] = &ap_ce.common.hw,
+ [CLK_NANDC_ECC] = &nandc_ecc.common.hw,
+ [CLK_NANDC_26M] = &nandc_26m.common.hw,
+ [CLK_EMMC_32K] = &emmc_32k.common.hw,
+ [CLK_SDIO0_32K] = &sdio0_32k.common.hw,
+ [CLK_SDIO1_32K] = &sdio1_32k.common.hw,
+ [CLK_SDIO2_32K] = &sdio2_32k.common.hw,
+ [CLK_OTG_UTMI] = &otg_utmi.common.hw,
+ [CLK_AP_UART0] = &ap_uart0.common.hw,
+ [CLK_AP_UART1] = &ap_uart1.common.hw,
+ [CLK_AP_UART2] = &ap_uart2.common.hw,
+ [CLK_AP_UART3] = &ap_uart3.common.hw,
+ [CLK_AP_UART4] = &ap_uart4.common.hw,
+ [CLK_AP_I2C0] = &ap_i2c0.common.hw,
+ [CLK_AP_I2C1] = &ap_i2c1.common.hw,
+ [CLK_AP_I2C2] = &ap_i2c2.common.hw,
+ [CLK_AP_I2C3] = &ap_i2c3.common.hw,
+ [CLK_AP_I2C4] = &ap_i2c4.common.hw,
+ [CLK_AP_I2C5] = &ap_i2c5.common.hw,
+ [CLK_AP_I2C6] = &ap_i2c6.common.hw,
+ [CLK_AP_SPI0] = &ap_spi0.common.hw,
+ [CLK_AP_SPI1] = &ap_spi1.common.hw,
+ [CLK_AP_SPI2] = &ap_spi2.common.hw,
+ [CLK_AP_SPI3] = &ap_spi3.common.hw,
+ [CLK_AP_IIS0] = &ap_iis0.common.hw,
+ [CLK_AP_IIS1] = &ap_iis1.common.hw,
+ [CLK_AP_IIS2] = &ap_iis2.common.hw,
+ [CLK_SIM0] = &sim0.common.hw,
+ [CLK_SIM0_32K] = &sim0_32k.common.hw,
+ },
+ .num = CLK_AP_CLK_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_ap_clk_desc = {
+ .clk_clks = sc9863a_ap_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_ap_clks),
+ .hw_clks = &sc9863a_ap_clk_hws,
+};
+
+static SPRD_SC_GATE_CLK_HW(otg_eb, "otg-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dma_eb, "dma-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ce_eb, "ce-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_HW(nandc_eb, "nandc-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio0_eb, "sdio0-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio1_eb, "sdio1-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio2_eb, "sdio2-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(emmc_eb, "emmc-eb", &ap_axi.common.hw, 0x0, 0x1000,
+ BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_HW(emmc_32k_eb, "emmc-32k-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(27), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio0_32k_eb, "sdio0-32k-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(28), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio1_32k_eb, "sdio1-32k-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(29), 0, 0);
+static SPRD_SC_GATE_CLK_HW(sdio2_32k_eb, "sdio2-32k-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(30), 0, 0);
+static SPRD_SC_GATE_CLK_HW(nandc_26m_eb, "nandc-26m-eb", &ap_axi.common.hw, 0x0,
+ 0x1000, BIT(31), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dma_eb2, "dma-eb2", &ap_axi.common.hw, 0x18,
+ 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ce_eb2, "ce-eb2", &ap_axi.common.hw, 0x18,
+ 0x1000, BIT(1), 0, 0);
+
+static struct sprd_clk_common *sc9863a_apahb_gate_clks[] = {
+ /* address base is 0x20e00000 */
+ &otg_eb.common,
+ &dma_eb.common,
+ &ce_eb.common,
+ &nandc_eb.common,
+ &sdio0_eb.common,
+ &sdio1_eb.common,
+ &sdio2_eb.common,
+ &emmc_eb.common,
+ &emmc_32k_eb.common,
+ &sdio0_32k_eb.common,
+ &sdio1_32k_eb.common,
+ &sdio2_32k_eb.common,
+ &nandc_26m_eb.common,
+ &dma_eb2.common,
+ &ce_eb2.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_apahb_gate_hws = {
+ .hws = {
+ [CLK_OTG_EB] = &otg_eb.common.hw,
+ [CLK_DMA_EB] = &dma_eb.common.hw,
+ [CLK_CE_EB] = &ce_eb.common.hw,
+ [CLK_NANDC_EB] = &nandc_eb.common.hw,
+ [CLK_SDIO0_EB] = &sdio0_eb.common.hw,
+ [CLK_SDIO1_EB] = &sdio1_eb.common.hw,
+ [CLK_SDIO2_EB] = &sdio2_eb.common.hw,
+ [CLK_EMMC_EB] = &emmc_eb.common.hw,
+ [CLK_EMMC_32K_EB] = &emmc_32k_eb.common.hw,
+ [CLK_SDIO0_32K_EB] = &sdio0_32k_eb.common.hw,
+ [CLK_SDIO1_32K_EB] = &sdio1_32k_eb.common.hw,
+ [CLK_SDIO2_32K_EB] = &sdio2_32k_eb.common.hw,
+ [CLK_NANDC_26M_EB] = &nandc_26m_eb.common.hw,
+ [CLK_DMA_EB2] = &dma_eb2.common.hw,
+ [CLK_CE_EB2] = &ce_eb2.common.hw,
+ },
+ .num = CLK_AP_AHB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_apahb_gate_desc = {
+ .clk_clks = sc9863a_apahb_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_apahb_gate_clks),
+ .hw_clks = &sc9863a_apahb_gate_hws,
+};
+
+/* aon gate clocks */
+static SPRD_SC_GATE_CLK_HW(gpio_eb, "gpio-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(pwm0_eb, "pwm0-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(pwm1_eb, "pwm1-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(pwm2_eb, "pwm2-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_HW(pwm3_eb, "pwm3-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(kpd_eb, "kpd-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_syst_eb, "aon-syst-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_syst_eb, "ap-syst-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aon_tmr_eb, "aon-tmr-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(efuse_eb, "efuse-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(13), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(eic_eb, "eic-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(intc_eb, "intc-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(adi_eb, "adi-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(audif_eb, "audif-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(17), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aud_eb, "aud-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_HW(vbc_eb, "vbc-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(19), 0, 0);
+static SPRD_SC_GATE_CLK_HW(pin_eb, "pin-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_wdg_eb, "ap-wdg-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(24), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_eb, "mm-eb", &aon_apb.common.hw, 0x0,
+ 0x1000, BIT(25), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aon_apb_ckg_eb, "aon-apb-ckg-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(26), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_ts0_eb, "ca53-ts0-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(28), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_ts1_eb, "ca53-ts1-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(29), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_dap_eb, "ca53-dap-eb", &aon_apb.common.hw,
+ 0x0, 0x1000, BIT(30), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(pmu_eb, "pmu-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(thm_eb, "thm-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aux0_eb, "aux0-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aux1_eb, "aux1-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aux2_eb, "aux2-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(probe_eb, "probe-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(emc_ref_eb, "emc-ref-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_wdg_eb, "ca53-wdg-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr1_eb, "ap-tmr1-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr2_eb, "ap-tmr2-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(disp_emc_eb, "disp-emc-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_HW(zip_emc_eb, "zip-emc-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(12), 0, 0);
+static SPRD_SC_GATE_CLK_HW(gsp_emc_eb, "gsp-emc-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_vsp_eb, "mm-vsp-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mdar_eb, "mdar-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(17), 0, 0);
+static SPRD_SC_GATE_CLK_HW(rtc4m0_cal_eb, "rtc4m0-cal-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_HW(rtc4m1_cal_eb, "rtc4m1-cal-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(19), 0, 0);
+static SPRD_SC_GATE_CLK_HW(djtag_eb, "djtag-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(20), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mbox_eb, "mbox-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(21), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_dma_eb, "aon-dma-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(22), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_apb_def_eb, "aon-apb-def-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(25), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ca5_ts0_eb, "ca5-ts0-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(26), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dbg_eb, "dbg-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(28), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dbg_emc_eb, "dbg-emc-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(29), 0, 0);
+static SPRD_SC_GATE_CLK_HW(cross_trig_eb, "cross-trig-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(30), 0, 0);
+static SPRD_SC_GATE_CLK_HW(serdes_dphy_eb, "serdes-dphy-eb", &aon_apb.common.hw,
+ 0x4, 0x1000, BIT(31), 0, 0);
+static SPRD_SC_GATE_CLK_HW(arch_rtc_eb, "arch-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(kpd_rtc_eb, "kpd-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_syst_rtc_eb, "aon-syst-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_syst_rtc_eb, "ap-syst-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(aon_tmr_rtc_eb, "aon-tmr-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr0_rtc_eb, "ap-tmr0-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(eic_rtc_eb, "eic-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(eic_rtcdv5_eb, "eic-rtcdv5-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_wdg_rtc_eb, "ap-wdg-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ca53_wdg_rtc_eb, "ca53-wdg-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(thm_rtc_eb, "thm-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(athma_rtc_eb, "athma-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_HW(gthma_rtc_eb, "gthma-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(12), 0, 0);
+static SPRD_SC_GATE_CLK_HW(athma_rtc_a_eb, "athma-rtc-a-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_HW(gthma_rtc_a_eb, "gthma-rtc-a-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr1_rtc_eb, "ap-tmr1-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(15), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ap_tmr2_rtc_eb, "ap-tmr2-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(16), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dxco_lc_rtc_eb, "dxco-lc-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(17), 0, 0);
+static SPRD_SC_GATE_CLK_HW(bb_cal_rtc_eb, "bb-cal-rtc-eb", &aon_apb.common.hw,
+ 0x10, 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_HW(gpu_eb, "gpu-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(disp_eb, "disp-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_emc_eb, "mm-emc-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(power_cpu_eb, "power-cpu-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(hw_i2c_eb, "hw-i2c-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_vsp_emc_eb, "mm-vsp-emc-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_HW(vsp_eb, "vsp-eb", &aon_apb.common.hw, 0x50,
+ 0x1000, BIT(16), 0, 0);
+static SPRD_SC_GATE_CLK_HW(cssys_eb, "cssys-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dmc_eb, "dmc-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(rosc_eb, "rosc-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(s_d_cfg_eb, "s-d-cfg-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_HW(s_d_ref_eb, "s-d-ref-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_HW(b_dma_eb, "b-dma-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(anlg_eb, "anlg-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(anlg_apb_eb, "anlg-apb-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_HW(bsmtmr_eb, "bsmtmr-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_HW(ap_axi_eb, "ap-axi-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc0_eb, "ap-intc0-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc1_eb, "ap-intc1-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc2_eb, "ap-intc2-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc3_eb, "ap-intc3-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(19), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc4_eb, "ap-intc4-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(ap_intc5_eb, "ap-intc5-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(21), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_HW(scc_eb, "scc-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(22), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dphy_cfg_eb, "dphy-cfg-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(23), 0, 0);
+static SPRD_SC_GATE_CLK_HW(dphy_ref_eb, "dphy-ref-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(24), 0, 0);
+static SPRD_SC_GATE_CLK_HW(cphy_cfg_eb, "cphy-cfg-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(25), 0, 0);
+static SPRD_SC_GATE_CLK_HW(otg_ref_eb, "otg-ref-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(26), 0, 0);
+static SPRD_SC_GATE_CLK_HW(serdes_eb, "serdes-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(27), 0, 0);
+static SPRD_SC_GATE_CLK_HW(aon_ap_emc_eb, "aon-ap-emc-eb", &aon_apb.common.hw, 0xb0,
+ 0x1000, BIT(28), 0, 0);
+static struct sprd_clk_common *sc9863a_aonapb_gate_clks[] = {
+ /* address base is 0x402e0000 */
+ &gpio_eb.common,
+ &pwm0_eb.common,
+ &pwm1_eb.common,
+ &pwm2_eb.common,
+ &pwm3_eb.common,
+ &kpd_eb.common,
+ &aon_syst_eb.common,
+ &ap_syst_eb.common,
+ &aon_tmr_eb.common,
+ &efuse_eb.common,
+ &eic_eb.common,
+ &intc_eb.common,
+ &adi_eb.common,
+ &audif_eb.common,
+ &aud_eb.common,
+ &vbc_eb.common,
+ &pin_eb.common,
+ &ap_wdg_eb.common,
+ &mm_eb.common,
+ &aon_apb_ckg_eb.common,
+ &ca53_ts0_eb.common,
+ &ca53_ts1_eb.common,
+ &ca53_dap_eb.common,
+ &pmu_eb.common,
+ &thm_eb.common,
+ &aux0_eb.common,
+ &aux1_eb.common,
+ &aux2_eb.common,
+ &probe_eb.common,
+ &emc_ref_eb.common,
+ &ca53_wdg_eb.common,
+ &ap_tmr1_eb.common,
+ &ap_tmr2_eb.common,
+ &disp_emc_eb.common,
+ &zip_emc_eb.common,
+ &gsp_emc_eb.common,
+ &mm_vsp_eb.common,
+ &mdar_eb.common,
+ &rtc4m0_cal_eb.common,
+ &rtc4m1_cal_eb.common,
+ &djtag_eb.common,
+ &mbox_eb.common,
+ &aon_dma_eb.common,
+ &aon_apb_def_eb.common,
+ &ca5_ts0_eb.common,
+ &dbg_eb.common,
+ &dbg_emc_eb.common,
+ &cross_trig_eb.common,
+ &serdes_dphy_eb.common,
+ &arch_rtc_eb.common,
+ &kpd_rtc_eb.common,
+ &aon_syst_rtc_eb.common,
+ &ap_syst_rtc_eb.common,
+ &aon_tmr_rtc_eb.common,
+ &ap_tmr0_rtc_eb.common,
+ &eic_rtc_eb.common,
+ &eic_rtcdv5_eb.common,
+ &ap_wdg_rtc_eb.common,
+ &ca53_wdg_rtc_eb.common,
+ &thm_rtc_eb.common,
+ &athma_rtc_eb.common,
+ &gthma_rtc_eb.common,
+ &athma_rtc_a_eb.common,
+ &gthma_rtc_a_eb.common,
+ &ap_tmr1_rtc_eb.common,
+ &ap_tmr2_rtc_eb.common,
+ &dxco_lc_rtc_eb.common,
+ &bb_cal_rtc_eb.common,
+ &gpu_eb.common,
+ &disp_eb.common,
+ &mm_emc_eb.common,
+ &power_cpu_eb.common,
+ &hw_i2c_eb.common,
+ &mm_vsp_emc_eb.common,
+ &vsp_eb.common,
+ &cssys_eb.common,
+ &dmc_eb.common,
+ &rosc_eb.common,
+ &s_d_cfg_eb.common,
+ &s_d_ref_eb.common,
+ &b_dma_eb.common,
+ &anlg_eb.common,
+ &anlg_apb_eb.common,
+ &bsmtmr_eb.common,
+ &ap_axi_eb.common,
+ &ap_intc0_eb.common,
+ &ap_intc1_eb.common,
+ &ap_intc2_eb.common,
+ &ap_intc3_eb.common,
+ &ap_intc4_eb.common,
+ &ap_intc5_eb.common,
+ &scc_eb.common,
+ &dphy_cfg_eb.common,
+ &dphy_ref_eb.common,
+ &cphy_cfg_eb.common,
+ &otg_ref_eb.common,
+ &serdes_eb.common,
+ &aon_ap_emc_eb.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_aonapb_gate_hws = {
+ .hws = {
+ [CLK_GPIO_EB] = &gpio_eb.common.hw,
+ [CLK_PWM0_EB] = &pwm0_eb.common.hw,
+ [CLK_PWM1_EB] = &pwm1_eb.common.hw,
+ [CLK_PWM2_EB] = &pwm2_eb.common.hw,
+ [CLK_PWM3_EB] = &pwm3_eb.common.hw,
+ [CLK_KPD_EB] = &kpd_eb.common.hw,
+ [CLK_AON_SYST_EB] = &aon_syst_eb.common.hw,
+ [CLK_AP_SYST_EB] = &ap_syst_eb.common.hw,
+ [CLK_AON_TMR_EB] = &aon_tmr_eb.common.hw,
+ [CLK_EFUSE_EB] = &efuse_eb.common.hw,
+ [CLK_EIC_EB] = &eic_eb.common.hw,
+ [CLK_INTC_EB] = &intc_eb.common.hw,
+ [CLK_ADI_EB] = &adi_eb.common.hw,
+ [CLK_AUDIF_EB] = &audif_eb.common.hw,
+ [CLK_AUD_EB] = &aud_eb.common.hw,
+ [CLK_VBC_EB] = &vbc_eb.common.hw,
+ [CLK_PIN_EB] = &pin_eb.common.hw,
+ [CLK_AP_WDG_EB] = &ap_wdg_eb.common.hw,
+ [CLK_MM_EB] = &mm_eb.common.hw,
+ [CLK_AON_APB_CKG_EB] = &aon_apb_ckg_eb.common.hw,
+ [CLK_CA53_TS0_EB] = &ca53_ts0_eb.common.hw,
+ [CLK_CA53_TS1_EB] = &ca53_ts1_eb.common.hw,
+ [CLK_CS53_DAP_EB] = &ca53_dap_eb.common.hw,
+ [CLK_PMU_EB] = &pmu_eb.common.hw,
+ [CLK_THM_EB] = &thm_eb.common.hw,
+ [CLK_AUX0_EB] = &aux0_eb.common.hw,
+ [CLK_AUX1_EB] = &aux1_eb.common.hw,
+ [CLK_AUX2_EB] = &aux2_eb.common.hw,
+ [CLK_PROBE_EB] = &probe_eb.common.hw,
+ [CLK_EMC_REF_EB] = &emc_ref_eb.common.hw,
+ [CLK_CA53_WDG_EB] = &ca53_wdg_eb.common.hw,
+ [CLK_AP_TMR1_EB] = &ap_tmr1_eb.common.hw,
+ [CLK_AP_TMR2_EB] = &ap_tmr2_eb.common.hw,
+ [CLK_DISP_EMC_EB] = &disp_emc_eb.common.hw,
+ [CLK_ZIP_EMC_EB] = &zip_emc_eb.common.hw,
+ [CLK_GSP_EMC_EB] = &gsp_emc_eb.common.hw,
+ [CLK_MM_VSP_EB] = &mm_vsp_eb.common.hw,
+ [CLK_MDAR_EB] = &mdar_eb.common.hw,
+ [CLK_RTC4M0_CAL_EB] = &rtc4m0_cal_eb.common.hw,
+ [CLK_RTC4M1_CAL_EB] = &rtc4m1_cal_eb.common.hw,
+ [CLK_DJTAG_EB] = &djtag_eb.common.hw,
+ [CLK_MBOX_EB] = &mbox_eb.common.hw,
+ [CLK_AON_DMA_EB] = &aon_dma_eb.common.hw,
+ [CLK_AON_APB_DEF_EB] = &aon_apb_def_eb.common.hw,
+ [CLK_CA5_TS0_EB] = &ca5_ts0_eb.common.hw,
+ [CLK_DBG_EB] = &dbg_eb.common.hw,
+ [CLK_DBG_EMC_EB] = &dbg_emc_eb.common.hw,
+ [CLK_CROSS_TRIG_EB] = &cross_trig_eb.common.hw,
+ [CLK_SERDES_DPHY_EB] = &serdes_dphy_eb.common.hw,
+ [CLK_ARCH_RTC_EB] = &arch_rtc_eb.common.hw,
+ [CLK_KPD_RTC_EB] = &kpd_rtc_eb.common.hw,
+ [CLK_AON_SYST_RTC_EB] = &aon_syst_rtc_eb.common.hw,
+ [CLK_AP_SYST_RTC_EB] = &ap_syst_rtc_eb.common.hw,
+ [CLK_AON_TMR_RTC_EB] = &aon_tmr_rtc_eb.common.hw,
+ [CLK_AP_TMR0_RTC_EB] = &ap_tmr0_rtc_eb.common.hw,
+ [CLK_EIC_RTC_EB] = &eic_rtc_eb.common.hw,
+ [CLK_EIC_RTCDV5_EB] = &eic_rtcdv5_eb.common.hw,
+ [CLK_AP_WDG_RTC_EB] = &ap_wdg_rtc_eb.common.hw,
+ [CLK_CA53_WDG_RTC_EB] = &ca53_wdg_rtc_eb.common.hw,
+ [CLK_THM_RTC_EB] = &thm_rtc_eb.common.hw,
+ [CLK_ATHMA_RTC_EB] = &athma_rtc_eb.common.hw,
+ [CLK_GTHMA_RTC_EB] = &gthma_rtc_eb.common.hw,
+ [CLK_ATHMA_RTC_A_EB] = &athma_rtc_a_eb.common.hw,
+ [CLK_GTHMA_RTC_A_EB] = &gthma_rtc_a_eb.common.hw,
+ [CLK_AP_TMR1_RTC_EB] = &ap_tmr1_rtc_eb.common.hw,
+ [CLK_AP_TMR2_RTC_EB] = &ap_tmr2_rtc_eb.common.hw,
+ [CLK_DXCO_LC_RTC_EB] = &dxco_lc_rtc_eb.common.hw,
+ [CLK_BB_CAL_RTC_EB] = &bb_cal_rtc_eb.common.hw,
+ [CLK_GNU_EB] = &gpu_eb.common.hw,
+ [CLK_DISP_EB] = &disp_eb.common.hw,
+ [CLK_MM_EMC_EB] = &mm_emc_eb.common.hw,
+ [CLK_POWER_CPU_EB] = &power_cpu_eb.common.hw,
+ [CLK_HW_I2C_EB] = &hw_i2c_eb.common.hw,
+ [CLK_MM_VSP_EMC_EB] = &mm_vsp_emc_eb.common.hw,
+ [CLK_VSP_EB] = &vsp_eb.common.hw,
+ [CLK_CSSYS_EB] = &cssys_eb.common.hw,
+ [CLK_DMC_EB] = &dmc_eb.common.hw,
+ [CLK_ROSC_EB] = &rosc_eb.common.hw,
+ [CLK_S_D_CFG_EB] = &s_d_cfg_eb.common.hw,
+ [CLK_S_D_REF_EB] = &s_d_ref_eb.common.hw,
+ [CLK_B_DMA_EB] = &b_dma_eb.common.hw,
+ [CLK_ANLG_EB] = &anlg_eb.common.hw,
+ [CLK_ANLG_APB_EB] = &anlg_apb_eb.common.hw,
+ [CLK_BSMTMR_EB] = &bsmtmr_eb.common.hw,
+ [CLK_AP_AXI_EB] = &ap_axi_eb.common.hw,
+ [CLK_AP_INTC0_EB] = &ap_intc0_eb.common.hw,
+ [CLK_AP_INTC1_EB] = &ap_intc1_eb.common.hw,
+ [CLK_AP_INTC2_EB] = &ap_intc2_eb.common.hw,
+ [CLK_AP_INTC3_EB] = &ap_intc3_eb.common.hw,
+ [CLK_AP_INTC4_EB] = &ap_intc4_eb.common.hw,
+ [CLK_AP_INTC5_EB] = &ap_intc5_eb.common.hw,
+ [CLK_SCC_EB] = &scc_eb.common.hw,
+ [CLK_DPHY_CFG_EB] = &dphy_cfg_eb.common.hw,
+ [CLK_DPHY_REF_EB] = &dphy_ref_eb.common.hw,
+ [CLK_CPHY_CFG_EB] = &cphy_cfg_eb.common.hw,
+ [CLK_OTG_REF_EB] = &otg_ref_eb.common.hw,
+ [CLK_SERDES_EB] = &serdes_eb.common.hw,
+ [CLK_AON_AP_EMC_EB] = &aon_ap_emc_eb.common.hw,
+ },
+ .num = CLK_AON_APB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_aonapb_gate_desc = {
+ .clk_clks = sc9863a_aonapb_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_aonapb_gate_clks),
+ .hw_clks = &sc9863a_aonapb_gate_hws,
+};
+
+/* mm gate clocks */
+static SPRD_SC_GATE_CLK_HW(mahb_ckg_eb, "mahb-ckg-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mdcam_eb, "mdcam-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_HW(misp_eb, "misp-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mahbcsi_eb, "mahbcsi-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mcsi_s_eb, "mcsi-s-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mcsi_t_eb, "mcsi-t-eb", &mm_ahb.common.hw, 0x0, 0x1000,
+ BIT(5), 0, 0);
+static SPRD_GATE_CLK_HW(dcam_axi_eb, "dcam-axi-eb", &mm_ahb.common.hw, 0x8,
+ BIT(0), 0, 0);
+static SPRD_GATE_CLK_HW(isp_axi_eb, "isp-axi-eb", &mm_ahb.common.hw, 0x8,
+ BIT(1), 0, 0);
+static SPRD_GATE_CLK_HW(mcsi_eb, "mcsi-eb", &mm_ahb.common.hw, 0x8,
+ BIT(2), 0, 0);
+static SPRD_GATE_CLK_HW(mcsi_s_ckg_eb, "mcsi-s-ckg-eb", &mm_ahb.common.hw, 0x8,
+ BIT(3), 0, 0);
+static SPRD_GATE_CLK_HW(mcsi_t_ckg_eb, "mcsi-t-ckg-eb", &mm_ahb.common.hw, 0x8,
+ BIT(4), 0, 0);
+static SPRD_GATE_CLK_HW(sensor0_eb, "sensor0-eb", &mm_ahb.common.hw, 0x8,
+ BIT(5), 0, 0);
+static SPRD_GATE_CLK_HW(sensor1_eb, "sensor1-eb", &mm_ahb.common.hw, 0x8,
+ BIT(6), 0, 0);
+static SPRD_GATE_CLK_HW(sensor2_eb, "sensor2-eb", &mm_ahb.common.hw, 0x8,
+ BIT(7), 0, 0);
+static SPRD_GATE_CLK_HW(mcphy_cfg_eb, "mcphy-cfg-eb", &mm_ahb.common.hw, 0x8,
+ BIT(8), 0, 0);
+
+static struct sprd_clk_common *sc9863a_mm_gate_clks[] = {
+ /* address base is 0x60800000 */
+ &mahb_ckg_eb.common,
+ &mdcam_eb.common,
+ &misp_eb.common,
+ &mahbcsi_eb.common,
+ &mcsi_s_eb.common,
+ &mcsi_t_eb.common,
+ &dcam_axi_eb.common,
+ &isp_axi_eb.common,
+ &mcsi_eb.common,
+ &mcsi_s_ckg_eb.common,
+ &mcsi_t_ckg_eb.common,
+ &sensor0_eb.common,
+ &sensor1_eb.common,
+ &sensor2_eb.common,
+ &mcphy_cfg_eb.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_mm_gate_hws = {
+ .hws = {
+ [CLK_MAHB_CKG_EB] = &mahb_ckg_eb.common.hw,
+ [CLK_MDCAM_EB] = &mdcam_eb.common.hw,
+ [CLK_MISP_EB] = &misp_eb.common.hw,
+ [CLK_MAHBCSI_EB] = &mahbcsi_eb.common.hw,
+ [CLK_MCSI_S_EB] = &mcsi_s_eb.common.hw,
+ [CLK_MCSI_T_EB] = &mcsi_t_eb.common.hw,
+ [CLK_DCAM_AXI_EB] = &dcam_axi_eb.common.hw,
+ [CLK_ISP_AXI_EB] = &isp_axi_eb.common.hw,
+ [CLK_MCSI_EB] = &mcsi_eb.common.hw,
+ [CLK_MCSI_S_CKG_EB] = &mcsi_s_ckg_eb.common.hw,
+ [CLK_MCSI_T_CKG_EB] = &mcsi_t_ckg_eb.common.hw,
+ [CLK_SENSOR0_EB] = &sensor0_eb.common.hw,
+ [CLK_SENSOR1_EB] = &sensor1_eb.common.hw,
+ [CLK_SENSOR2_EB] = &sensor2_eb.common.hw,
+ [CLK_MCPHY_CFG_EB] = &mcphy_cfg_eb.common.hw,
+ },
+ .num = CLK_MM_GATE_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_mm_gate_desc = {
+ .clk_clks = sc9863a_mm_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_mm_gate_clks),
+ .hw_clks = &sc9863a_mm_gate_hws,
+};
+
+static SPRD_SC_GATE_CLK_FW_NAME(sim0_eb, "sim0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis0_eb, "iis0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis1_eb, "iis1-eb", "ext-26m", 0x0,
+ 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis2_eb, "iis2-eb", "ext-26m", 0x0,
+ 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi0_eb, "spi0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi1_eb, "spi1-eb", "ext-26m", 0x0,
+ 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi2_eb, "spi2-eb", "ext-26m", 0x0,
+ 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c0_eb, "i2c0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c1_eb, "i2c1-eb", "ext-26m", 0x0,
+ 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c2_eb, "i2c2-eb", "ext-26m", 0x0,
+ 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c3_eb, "i2c3-eb", "ext-26m", 0x0,
+ 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c4_eb, "i2c4-eb", "ext-26m", 0x0,
+ 0x1000, BIT(12), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart0_eb, "uart0-eb", "ext-26m", 0x0,
+ 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart1_eb, "uart1-eb", "ext-26m", 0x0,
+ 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart2_eb, "uart2-eb", "ext-26m", 0x0,
+ 0x1000, BIT(15), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart3_eb, "uart3-eb", "ext-26m", 0x0,
+ 0x1000, BIT(16), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart4_eb, "uart4-eb", "ext-26m", 0x0,
+ 0x1000, BIT(17), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sim0_32k_eb, "sim0_32k-eb", "ext-26m", 0x0,
+ 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi3_eb, "spi3-eb", "ext-26m", 0x0,
+ 0x1000, BIT(19), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c5_eb, "i2c5-eb", "ext-26m", 0x0,
+ 0x1000, BIT(20), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c6_eb, "i2c6-eb", "ext-26m", 0x0,
+ 0x1000, BIT(21), 0, 0);
+
+static struct sprd_clk_common *sc9863a_apapb_gate[] = {
+ /* address base is 0x71300000 */
+ &sim0_eb.common,
+ &iis0_eb.common,
+ &iis1_eb.common,
+ &iis2_eb.common,
+ &spi0_eb.common,
+ &spi1_eb.common,
+ &spi2_eb.common,
+ &i2c0_eb.common,
+ &i2c1_eb.common,
+ &i2c2_eb.common,
+ &i2c3_eb.common,
+ &i2c4_eb.common,
+ &uart0_eb.common,
+ &uart1_eb.common,
+ &uart2_eb.common,
+ &uart3_eb.common,
+ &uart4_eb.common,
+ &sim0_32k_eb.common,
+ &spi3_eb.common,
+ &i2c5_eb.common,
+ &i2c6_eb.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_apapb_gate_hws = {
+ .hws = {
+ [CLK_SIM0_EB] = &sim0_eb.common.hw,
+ [CLK_IIS0_EB] = &iis0_eb.common.hw,
+ [CLK_IIS1_EB] = &iis1_eb.common.hw,
+ [CLK_IIS2_EB] = &iis2_eb.common.hw,
+ [CLK_SPI0_EB] = &spi0_eb.common.hw,
+ [CLK_SPI1_EB] = &spi1_eb.common.hw,
+ [CLK_SPI2_EB] = &spi2_eb.common.hw,
+ [CLK_I2C0_EB] = &i2c0_eb.common.hw,
+ [CLK_I2C1_EB] = &i2c1_eb.common.hw,
+ [CLK_I2C2_EB] = &i2c2_eb.common.hw,
+ [CLK_I2C3_EB] = &i2c3_eb.common.hw,
+ [CLK_I2C4_EB] = &i2c4_eb.common.hw,
+ [CLK_UART0_EB] = &uart0_eb.common.hw,
+ [CLK_UART1_EB] = &uart1_eb.common.hw,
+ [CLK_UART2_EB] = &uart2_eb.common.hw,
+ [CLK_UART3_EB] = &uart3_eb.common.hw,
+ [CLK_UART4_EB] = &uart4_eb.common.hw,
+ [CLK_SIM0_32K_EB] = &sim0_32k_eb.common.hw,
+ [CLK_SPI3_EB] = &spi3_eb.common.hw,
+ [CLK_I2C5_EB] = &i2c5_eb.common.hw,
+ [CLK_I2C6_EB] = &i2c6_eb.common.hw,
+ },
+ .num = CLK_AP_APB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_apapb_gate_desc = {
+ .clk_clks = sc9863a_apapb_gate,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_apapb_gate),
+ .hw_clks = &sc9863a_apapb_gate_hws,
+};
+
+static const struct of_device_id sprd_sc9863a_clk_ids[] = {
+ { .compatible = "sprd,sc9863a-ap-clk", /* 0x21500000 */
+ .data = &sc9863a_ap_clk_desc },
+ { .compatible = "sprd,sc9863a-pmu-gate", /* 0x402b0000 */
+ .data = &sc9863a_pmu_gate_desc },
+ { .compatible = "sprd,sc9863a-pll", /* 0x40353000 */
+ .data = &sc9863a_pll_desc },
+ { .compatible = "sprd,sc9863a-mpll", /* 0x40359000 */
+ .data = &sc9863a_mpll_desc },
+ { .compatible = "sprd,sc9863a-rpll", /* 0x4035c000 */
+ .data = &sc9863a_rpll_desc },
+ { .compatible = "sprd,sc9863a-dpll", /* 0x40363000 */
+ .data = &sc9863a_dpll_desc },
+ { .compatible = "sprd,sc9863a-aon-clk", /* 0x402d0000 */
+ .data = &sc9863a_aon_clk_desc },
+ { .compatible = "sprd,sc9863a-apahb-gate", /* 0x20e00000 */
+ .data = &sc9863a_apahb_gate_desc },
+ { .compatible = "sprd,sc9863a-aonapb-gate", /* 0x402e0000 */
+ .data = &sc9863a_aonapb_gate_desc },
+ { .compatible = "sprd,sc9863a-mm-gate", /* 0x60800000 */
+ .data = &sc9863a_mm_gate_desc },
+ { .compatible = "sprd,sc9863a-apapb-gate", /* 0x71300000 */
+ .data = &sc9863a_apapb_gate_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sprd_sc9863a_clk_ids);
+
+static int sc9863a_clk_probe(struct platform_device *pdev)
+{
+ const struct sprd_clk_desc *desc;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -ENODEV;
+
+ ret = sprd_clk_regmap_init(pdev, desc);
+ if (ret)
+ return ret;
+
+ return sprd_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static struct platform_driver sc9863a_clk_driver = {
+ .probe = sc9863a_clk_probe,
+ .driver = {
+ .name = "sc9863a-clk",
+ .of_match_table = sprd_sc9863a_clk_ids,
+ },
+};
+module_platform_driver(sc9863a_clk_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC9863A Clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 116e6f826d04..54d1f96f4b68 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -55,10 +55,6 @@
/* All the DRAM gates are exported */
-/* Some more module clocks are exported */
-
-#define CLK_MBUS 112
-
/* And the DSI and GPU module clock is exported */
#define CLK_NUMBER (CLK_GPU + 1)
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index d9668493c3f9..524f33275bc7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -50,8 +50,10 @@ static SUNXI_CCU_M(mixer1_div_a83_clk, "mixer1-div", "pll-de", 0x0c, 4, 4,
CLK_SET_RATE_PARENT);
static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(rot_div_a83_clk, "rot-div", "pll-de", 0x0c, 0x0c, 4,
+ CLK_SET_RATE_PARENT);
-static struct ccu_common *sun50i_h6_de3_clks[] = {
+static struct ccu_common *sun8i_a83t_de2_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
&wb_clk.common,
@@ -60,16 +62,16 @@ static struct ccu_common *sun50i_h6_de3_clks[] = {
&bus_mixer1_clk.common,
&bus_wb_clk.common,
- &mixer0_div_clk.common,
- &mixer1_div_clk.common,
- &wb_div_clk.common,
+ &mixer0_div_a83_clk.common,
+ &mixer1_div_a83_clk.common,
+ &wb_div_a83_clk.common,
&bus_rot_clk.common,
&rot_clk.common,
- &rot_div_clk.common,
+ &rot_div_a83_clk.common,
};
-static struct ccu_common *sun8i_a83t_de2_clks[] = {
+static struct ccu_common *sun8i_h3_de2_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
&wb_clk.common,
@@ -78,34 +80,38 @@ static struct ccu_common *sun8i_a83t_de2_clks[] = {
&bus_mixer1_clk.common,
&bus_wb_clk.common,
- &mixer0_div_a83_clk.common,
- &mixer1_div_a83_clk.common,
- &wb_div_a83_clk.common,
+ &mixer0_div_clk.common,
+ &mixer1_div_clk.common,
+ &wb_div_clk.common,
};
-static struct ccu_common *sun8i_h3_de2_clks[] = {
+static struct ccu_common *sun8i_v3s_de2_clks[] = {
&mixer0_clk.common,
- &mixer1_clk.common,
&wb_clk.common,
&bus_mixer0_clk.common,
- &bus_mixer1_clk.common,
&bus_wb_clk.common,
&mixer0_div_clk.common,
- &mixer1_div_clk.common,
&wb_div_clk.common,
};
-static struct ccu_common *sun8i_v3s_de2_clks[] = {
+static struct ccu_common *sun50i_a64_de2_clks[] = {
&mixer0_clk.common,
+ &mixer1_clk.common,
&wb_clk.common,
&bus_mixer0_clk.common,
+ &bus_mixer1_clk.common,
&bus_wb_clk.common,
&mixer0_div_clk.common,
+ &mixer1_div_clk.common,
&wb_div_clk.common,
+
+ &bus_rot_clk.common,
+ &rot_clk.common,
+ &rot_div_clk.common,
};
static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
@@ -113,16 +119,19 @@ static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
[CLK_MIXER0] = &mixer0_clk.common.hw,
[CLK_MIXER1] = &mixer1_clk.common.hw,
[CLK_WB] = &wb_clk.common.hw,
+ [CLK_ROT] = &rot_clk.common.hw,
[CLK_BUS_MIXER0] = &bus_mixer0_clk.common.hw,
[CLK_BUS_MIXER1] = &bus_mixer1_clk.common.hw,
[CLK_BUS_WB] = &bus_wb_clk.common.hw,
+ [CLK_BUS_ROT] = &bus_rot_clk.common.hw,
[CLK_MIXER0_DIV] = &mixer0_div_a83_clk.common.hw,
[CLK_MIXER1_DIV] = &mixer1_div_a83_clk.common.hw,
[CLK_WB_DIV] = &wb_div_a83_clk.common.hw,
+ [CLK_ROT_DIV] = &rot_div_a83_clk.common.hw,
},
- .num = CLK_NUMBER_WITHOUT_ROT,
+ .num = CLK_NUMBER_WITH_ROT,
};
static struct clk_hw_onecell_data sun8i_h3_de2_hw_clks = {
@@ -156,7 +165,7 @@ static struct clk_hw_onecell_data sun8i_v3s_de2_hw_clks = {
.num = CLK_NUMBER_WITHOUT_ROT,
};
-static struct clk_hw_onecell_data sun50i_h6_de3_hw_clks = {
+static struct clk_hw_onecell_data sun50i_a64_de2_hw_clks = {
.hws = {
[CLK_MIXER0] = &mixer0_clk.common.hw,
[CLK_MIXER1] = &mixer1_clk.common.hw,
@@ -179,9 +188,19 @@ static struct clk_hw_onecell_data sun50i_h6_de3_hw_clks = {
static struct ccu_reset_map sun8i_a83t_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
/*
- * For A83T, H3 and R40, mixer1 reset line is shared with wb, so
- * only RST_WB is exported here.
- * For V3s there's just no mixer1, so it also shares this struct.
+ * Mixer1 reset line is shared with wb, so only RST_WB is
+ * exported here.
+ */
+ [RST_WB] = { 0x08, BIT(2) },
+ [RST_ROT] = { 0x08, BIT(3) },
+};
+
+static struct ccu_reset_map sun8i_h3_de2_resets[] = {
+ [RST_MIXER0] = { 0x08, BIT(0) },
+ /*
+ * Mixer1 reset line is shared with wb, so only RST_WB is
+ * exported here.
+ * V3s doesn't have mixer1, so it also shares this struct.
*/
[RST_WB] = { 0x08, BIT(2) },
};
@@ -190,13 +209,13 @@ static struct ccu_reset_map sun50i_a64_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
[RST_MIXER1] = { 0x08, BIT(1) },
[RST_WB] = { 0x08, BIT(2) },
+ [RST_ROT] = { 0x08, BIT(3) },
};
-static struct ccu_reset_map sun50i_h6_de3_resets[] = {
+static struct ccu_reset_map sun50i_h5_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
[RST_MIXER1] = { 0x08, BIT(1) },
[RST_WB] = { 0x08, BIT(2) },
- [RST_ROT] = { 0x08, BIT(3) },
};
static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
@@ -215,28 +234,18 @@ static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = {
.hw_clks = &sun8i_h3_de2_hw_clks,
- .resets = sun8i_a83t_de2_resets,
- .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets),
+ .resets = sun8i_h3_de2_resets,
+ .num_resets = ARRAY_SIZE(sun8i_h3_de2_resets),
};
-static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
- .ccu_clks = sun8i_h3_de2_clks,
- .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = {
+ .ccu_clks = sun50i_a64_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
- .hw_clks = &sun8i_h3_de2_hw_clks,
+ .hw_clks = &sun50i_a64_de2_hw_clks,
- .resets = sun50i_a64_de2_resets,
- .num_resets = ARRAY_SIZE(sun50i_a64_de2_resets),
-};
-
-static const struct sunxi_ccu_desc sun50i_h6_de3_clk_desc = {
- .ccu_clks = sun50i_h6_de3_clks,
- .num_ccu_clks = ARRAY_SIZE(sun50i_h6_de3_clks),
-
- .hw_clks = &sun50i_h6_de3_hw_clks,
-
- .resets = sun50i_h6_de3_resets,
- .num_resets = ARRAY_SIZE(sun50i_h6_de3_resets),
+ .resets = sun8i_a83t_de2_resets,
+ .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets),
};
static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
@@ -249,6 +258,26 @@ static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
.num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets),
};
+static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
+ .ccu_clks = sun50i_a64_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks),
+
+ .hw_clks = &sun50i_a64_de2_hw_clks,
+
+ .resets = sun50i_a64_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_a64_de2_resets),
+};
+
+static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
+ .ccu_clks = sun8i_h3_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks),
+
+ .hw_clks = &sun8i_h3_de2_hw_clks,
+
+ .resets = sun50i_h5_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
+};
+
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -338,6 +367,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.data = &sun8i_h3_de2_clk_desc,
},
{
+ .compatible = "allwinner,sun8i-r40-de2-clk",
+ .data = &sun8i_r40_de2_clk_desc,
+ },
+ {
.compatible = "allwinner,sun8i-v3s-de2-clk",
.data = &sun8i_v3s_de2_clk_desc,
},
@@ -347,11 +380,11 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
},
{
.compatible = "allwinner,sun50i-h5-de2-clk",
- .data = &sun50i_a64_de2_clk_desc,
+ .data = &sun50i_h5_de2_clk_desc,
},
{
.compatible = "allwinner,sun50i-h6-de3-clk",
- .data = &sun50i_h6_de3_clk_desc,
+ .data = &sun50i_h5_de2_clk_desc,
},
{ }
};
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index df966ca06788..1f7c30f87ece 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -12,7 +12,6 @@ obj-y += clk-sdmmc-mux.o
obj-y += clk-super.o
obj-y += clk-tegra-audio.o
obj-y += clk-tegra-periph.o
-obj-y += clk-tegra-pmc.o
obj-y += clk-tegra-fixed.o
obj-y += clk-tegra-super-gen4.o
obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index c4faebd32760..ff7da2d3e94d 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -32,7 +32,6 @@ enum clk_id {
tegra_clk_audio4,
tegra_clk_audio4_2x,
tegra_clk_audio4_mux,
- tegra_clk_blink,
tegra_clk_bsea,
tegra_clk_bsev,
tegra_clk_cclk_g,
@@ -44,14 +43,9 @@ enum clk_id {
tegra_clk_clk72Mhz,
tegra_clk_clk72Mhz_8,
tegra_clk_clk_m,
- tegra_clk_clk_m_div2,
- tegra_clk_clk_m_div4,
- tegra_clk_clk_out_1,
- tegra_clk_clk_out_1_mux,
- tegra_clk_clk_out_2,
- tegra_clk_clk_out_2_mux,
- tegra_clk_clk_out_3,
- tegra_clk_clk_out_3_mux,
+ tegra_clk_osc,
+ tegra_clk_osc_div2,
+ tegra_clk_osc_div4,
tegra_clk_cml0,
tegra_clk_cml1,
tegra_clk_csi,
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index 7c6c8abfcde6..77c22cef5014 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -46,7 +46,28 @@ int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
return -EINVAL;
}
+ dt_clk = tegra_lookup_dt_id(tegra_clk_osc, clks);
+ if (!dt_clk)
+ return 0;
+
osc = clk_register_fixed_rate(NULL, "osc", NULL, 0, *osc_freq);
+ *dt_clk = osc;
+
+ /* osc_div2 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_osc_div2, clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_factor(NULL, "osc_div2", "osc",
+ 0, 1, 2);
+ *dt_clk = clk;
+ }
+
+ /* osc_div4 */
+ dt_clk = tegra_lookup_dt_id(tegra_clk_osc_div4, clks);
+ if (dt_clk) {
+ clk = clk_register_fixed_factor(NULL, "osc_div4", "osc",
+ 0, 1, 4);
+ *dt_clk = clk;
+ }
dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, clks);
if (!dt_clk)
@@ -84,22 +105,6 @@ void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768);
*dt_clk = clk;
}
-
- /* clk_m_div2 */
- dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div2, tegra_clks);
- if (dt_clk) {
- clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
- CLK_SET_RATE_PARENT, 1, 2);
- *dt_clk = clk;
- }
-
- /* clk_m_div4 */
- dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div4, tegra_clks);
- if (dt_clk) {
- clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
- CLK_SET_RATE_PARENT, 1, 4);
- *dt_clk = clk;
- }
}
void tegra_clk_osc_resume(void __iomem *clk_base)
diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
deleted file mode 100644
index bec3e008335f..000000000000
--- a/drivers/clk/tegra/clk-tegra-pmc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
- */
-
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/clk/tegra.h>
-
-#include "clk.h"
-#include "clk-id.h"
-
-#define PMC_CLK_OUT_CNTRL 0x1a8
-#define PMC_DPD_PADS_ORIDE 0x1c
-#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
-#define PMC_CTRL 0
-#define PMC_CTRL_BLINK_ENB 7
-#define PMC_BLINK_TIMER 0x40
-
-struct pmc_clk_init_data {
- char *mux_name;
- char *gate_name;
- const char **parents;
- int num_parents;
- int mux_id;
- int gate_id;
- char *dev_name;
- u8 mux_shift;
- u8 gate_shift;
-};
-
-#define PMC_CLK(_num, _mux_shift, _gate_shift)\
- {\
- .mux_name = "clk_out_" #_num "_mux",\
- .gate_name = "clk_out_" #_num,\
- .parents = clk_out ##_num ##_parents,\
- .num_parents = ARRAY_SIZE(clk_out ##_num ##_parents),\
- .mux_id = tegra_clk_clk_out_ ##_num ##_mux,\
- .gate_id = tegra_clk_clk_out_ ##_num,\
- .dev_name = "extern" #_num,\
- .mux_shift = _mux_shift,\
- .gate_shift = _gate_shift,\
- }
-
-static DEFINE_SPINLOCK(clk_out_lock);
-
-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern1",
-};
-
-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern2",
-};
-
-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern3",
-};
-
-static struct pmc_clk_init_data pmc_clks[] = {
- PMC_CLK(1, 6, 2),
- PMC_CLK(2, 14, 10),
- PMC_CLK(3, 22, 18),
-};
-
-void __init tegra_pmc_clk_init(void __iomem *pmc_base,
- struct tegra_clk *tegra_clks)
-{
- struct clk *clk;
- struct clk **dt_clk;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmc_clks); i++) {
- struct pmc_clk_init_data *data;
-
- data = pmc_clks + i;
-
- dt_clk = tegra_lookup_dt_id(data->mux_id, tegra_clks);
- if (!dt_clk)
- continue;
-
- clk = clk_register_mux(NULL, data->mux_name, data->parents,
- data->num_parents,
- CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
- pmc_base + PMC_CLK_OUT_CNTRL, data->mux_shift,
- 3, 0, &clk_out_lock);
- *dt_clk = clk;
-
-
- dt_clk = tegra_lookup_dt_id(data->gate_id, tegra_clks);
- if (!dt_clk)
- continue;
-
- clk = clk_register_gate(NULL, data->gate_name, data->mux_name,
- CLK_SET_RATE_PARENT,
- pmc_base + PMC_CLK_OUT_CNTRL,
- data->gate_shift, 0, &clk_out_lock);
- *dt_clk = clk;
- clk_register_clkdev(clk, data->dev_name, data->gate_name);
- }
-
- /* blink */
- writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
- clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
- pmc_base + PMC_DPD_PADS_ORIDE,
- PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
-
- dt_clk = tegra_lookup_dt_id(tegra_clk_blink, tegra_clks);
- if (!dt_clk)
- return;
-
- clk = clk_register_gate(NULL, "blink", "blink_override", 0,
- pmc_base + PMC_CTRL,
- PMC_CTRL_BLINK_ENB, 0, NULL);
- clk_register_clkdev(clk, "blink", NULL);
- *dt_clk = clk;
-}
-
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 4efcaaf51b3a..bc9e47a4cb60 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -735,8 +735,9 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_fuse_burn] = { .dt_id = TEGRA114_CLK_FUSE_BURN, .present = true },
[tegra_clk_clk_32k] = { .dt_id = TEGRA114_CLK_CLK_32K, .present = true },
[tegra_clk_clk_m] = { .dt_id = TEGRA114_CLK_CLK_M, .present = true },
- [tegra_clk_clk_m_div2] = { .dt_id = TEGRA114_CLK_CLK_M_DIV2, .present = true },
- [tegra_clk_clk_m_div4] = { .dt_id = TEGRA114_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_osc] = { .dt_id = TEGRA114_CLK_OSC, .present = true },
+ [tegra_clk_osc_div2] = { .dt_id = TEGRA114_CLK_OSC_DIV2, .present = true },
+ [tegra_clk_osc_div4] = { .dt_id = TEGRA114_CLK_OSC_DIV4, .present = true },
[tegra_clk_pll_ref] = { .dt_id = TEGRA114_CLK_PLL_REF, .present = true },
[tegra_clk_pll_c] = { .dt_id = TEGRA114_CLK_PLL_C, .present = true },
[tegra_clk_pll_c_out1] = { .dt_id = TEGRA114_CLK_PLL_C_OUT1, .present = true },
@@ -778,10 +779,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3] = { .dt_id = TEGRA114_CLK_AUDIO3, .present = true },
[tegra_clk_audio4] = { .dt_id = TEGRA114_CLK_AUDIO4, .present = true },
[tegra_clk_spdif] = { .dt_id = TEGRA114_CLK_SPDIF, .present = true },
- [tegra_clk_clk_out_1] = { .dt_id = TEGRA114_CLK_CLK_OUT_1, .present = true },
- [tegra_clk_clk_out_2] = { .dt_id = TEGRA114_CLK_CLK_OUT_2, .present = true },
- [tegra_clk_clk_out_3] = { .dt_id = TEGRA114_CLK_CLK_OUT_3, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA114_CLK_BLINK, .present = true },
[tegra_clk_xusb_host_src] = { .dt_id = TEGRA114_CLK_XUSB_HOST_SRC, .present = true },
[tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA114_CLK_XUSB_FALCON_SRC, .present = true },
[tegra_clk_xusb_fs_src] = { .dt_id = TEGRA114_CLK_XUSB_FS_SRC, .present = true },
@@ -803,9 +800,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3_mux] = { .dt_id = TEGRA114_CLK_AUDIO3_MUX, .present = true },
[tegra_clk_audio4_mux] = { .dt_id = TEGRA114_CLK_AUDIO4_MUX, .present = true },
[tegra_clk_spdif_mux] = { .dt_id = TEGRA114_CLK_SPDIF_MUX, .present = true },
- [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_1_MUX, .present = true },
- [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_2_MUX, .present = true },
- [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_dsia_mux] = { .dt_id = TEGRA114_CLK_DSIA_MUX, .present = true },
[tegra_clk_dsib_mux] = { .dt_id = TEGRA114_CLK_DSIB_MUX, .present = true },
[tegra_clk_cec] = { .dt_id = TEGRA114_CLK_CEC, .present = true },
@@ -815,8 +809,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "clk_m", .dt_id = TEGRA114_CLK_CLK_M },
{ .con_id = "pll_ref", .dt_id = TEGRA114_CLK_PLL_REF },
{ .con_id = "clk_32k", .dt_id = TEGRA114_CLK_CLK_32K },
- { .con_id = "clk_m_div2", .dt_id = TEGRA114_CLK_CLK_M_DIV2 },
- { .con_id = "clk_m_div4", .dt_id = TEGRA114_CLK_CLK_M_DIV4 },
+ { .con_id = "osc", .dt_id = TEGRA114_CLK_OSC },
+ { .con_id = "osc_div2", .dt_id = TEGRA114_CLK_OSC_DIV2 },
+ { .con_id = "osc_div4", .dt_id = TEGRA114_CLK_OSC_DIV4 },
{ .con_id = "pll_c", .dt_id = TEGRA114_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA114_CLK_PLL_C_OUT1 },
{ .con_id = "pll_c2", .dt_id = TEGRA114_CLK_PLL_C2 },
@@ -863,10 +858,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "audio3_2x", .dt_id = TEGRA114_CLK_AUDIO3_2X },
{ .con_id = "audio4_2x", .dt_id = TEGRA114_CLK_AUDIO4_2X },
{ .con_id = "spdif_2x", .dt_id = TEGRA114_CLK_SPDIF_2X },
- { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA114_CLK_EXTERN1 },
- { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA114_CLK_EXTERN2 },
- { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA114_CLK_EXTERN3 },
- { .con_id = "blink", .dt_id = TEGRA114_CLK_BLINK },
+ { .con_id = "extern1", .dt_id = TEGRA114_CLK_EXTERN1 },
+ { .con_id = "extern2", .dt_id = TEGRA114_CLK_EXTERN2 },
+ { .con_id = "extern3", .dt_id = TEGRA114_CLK_EXTERN3 },
{ .con_id = "cclk_g", .dt_id = TEGRA114_CLK_CCLK_G },
{ .con_id = "cclk_lp", .dt_id = TEGRA114_CLK_CCLK_LP },
{ .con_id = "sclk", .dt_id = TEGRA114_CLK_SCLK },
@@ -900,17 +894,6 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
/* clk_32k */
clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768);
clks[TEGRA114_CLK_CLK_32K] = clk;
-
- /* clk_m_div2 */
- clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
- CLK_SET_RATE_PARENT, 1, 2);
- clks[TEGRA114_CLK_CLK_M_DIV2] = clk;
-
- /* clk_m_div4 */
- clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
- CLK_SET_RATE_PARENT, 1, 4);
- clks[TEGRA114_CLK_CLK_M_DIV4] = clk;
-
}
static void __init tegra114_pll_init(void __iomem *clk_base,
@@ -1153,11 +1136,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_UARTB, TEGRA114_CLK_PLL_P, 408000000, 0 },
{ TEGRA114_CLK_UARTC, TEGRA114_CLK_PLL_P, 408000000, 0 },
{ TEGRA114_CLK_UARTD, TEGRA114_CLK_PLL_P, 408000000, 0 },
- { TEGRA114_CLK_PLL_A, TEGRA114_CLK_CLK_MAX, 564480000, 1 },
- { TEGRA114_CLK_PLL_A_OUT0, TEGRA114_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA114_CLK_EXTERN1, TEGRA114_CLK_PLL_A_OUT0, 0, 1 },
- { TEGRA114_CLK_CLK_OUT_1_MUX, TEGRA114_CLK_EXTERN1, 0, 1 },
- { TEGRA114_CLK_CLK_OUT_1, TEGRA114_CLK_CLK_MAX, 0, 1 },
+ { TEGRA114_CLK_PLL_A, TEGRA114_CLK_CLK_MAX, 564480000, 0 },
+ { TEGRA114_CLK_PLL_A_OUT0, TEGRA114_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA114_CLK_I2S0, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA114_CLK_I2S1, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA114_CLK_I2S2, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 },
@@ -1359,7 +1339,6 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks,
tegra114_audio_plls,
ARRAY_SIZE(tegra114_audio_plls), 24000000);
- tegra_pmc_clk_init(pmc_base, tegra114_clks);
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index b3110d5b5a6c..64e229ddf2a5 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -860,8 +860,9 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_fuse_burn] = { .dt_id = TEGRA124_CLK_FUSE_BURN, .present = true },
[tegra_clk_clk_32k] = { .dt_id = TEGRA124_CLK_CLK_32K, .present = true },
[tegra_clk_clk_m] = { .dt_id = TEGRA124_CLK_CLK_M, .present = true },
- [tegra_clk_clk_m_div2] = { .dt_id = TEGRA124_CLK_CLK_M_DIV2, .present = true },
- [tegra_clk_clk_m_div4] = { .dt_id = TEGRA124_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_osc] = { .dt_id = TEGRA124_CLK_OSC, .present = true },
+ [tegra_clk_osc_div2] = { .dt_id = TEGRA124_CLK_OSC_DIV2, .present = true },
+ [tegra_clk_osc_div4] = { .dt_id = TEGRA124_CLK_OSC_DIV4, .present = true },
[tegra_clk_pll_ref] = { .dt_id = TEGRA124_CLK_PLL_REF, .present = true },
[tegra_clk_pll_c] = { .dt_id = TEGRA124_CLK_PLL_C, .present = true },
[tegra_clk_pll_c_out1] = { .dt_id = TEGRA124_CLK_PLL_C_OUT1, .present = true },
@@ -902,10 +903,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3] = { .dt_id = TEGRA124_CLK_AUDIO3, .present = true },
[tegra_clk_audio4] = { .dt_id = TEGRA124_CLK_AUDIO4, .present = true },
[tegra_clk_spdif] = { .dt_id = TEGRA124_CLK_SPDIF, .present = true },
- [tegra_clk_clk_out_1] = { .dt_id = TEGRA124_CLK_CLK_OUT_1, .present = true },
- [tegra_clk_clk_out_2] = { .dt_id = TEGRA124_CLK_CLK_OUT_2, .present = true },
- [tegra_clk_clk_out_3] = { .dt_id = TEGRA124_CLK_CLK_OUT_3, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA124_CLK_BLINK, .present = true },
[tegra_clk_xusb_host_src] = { .dt_id = TEGRA124_CLK_XUSB_HOST_SRC, .present = true },
[tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA124_CLK_XUSB_FALCON_SRC, .present = true },
[tegra_clk_xusb_fs_src] = { .dt_id = TEGRA124_CLK_XUSB_FS_SRC, .present = true },
@@ -931,9 +928,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3_mux] = { .dt_id = TEGRA124_CLK_AUDIO3_MUX, .present = true },
[tegra_clk_audio4_mux] = { .dt_id = TEGRA124_CLK_AUDIO4_MUX, .present = true },
[tegra_clk_spdif_mux] = { .dt_id = TEGRA124_CLK_SPDIF_MUX, .present = true },
- [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
- [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
- [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_cec] = { .dt_id = TEGRA124_CLK_CEC, .present = true },
};
@@ -941,8 +935,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "clk_m", .dt_id = TEGRA124_CLK_CLK_M },
{ .con_id = "pll_ref", .dt_id = TEGRA124_CLK_PLL_REF },
{ .con_id = "clk_32k", .dt_id = TEGRA124_CLK_CLK_32K },
- { .con_id = "clk_m_div2", .dt_id = TEGRA124_CLK_CLK_M_DIV2 },
- { .con_id = "clk_m_div4", .dt_id = TEGRA124_CLK_CLK_M_DIV4 },
+ { .con_id = "osc", .dt_id = TEGRA124_CLK_OSC },
+ { .con_id = "osc_div2", .dt_id = TEGRA124_CLK_OSC_DIV2 },
+ { .con_id = "osc_div4", .dt_id = TEGRA124_CLK_OSC_DIV4 },
{ .con_id = "pll_c", .dt_id = TEGRA124_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA124_CLK_PLL_C_OUT1 },
{ .con_id = "pll_c2", .dt_id = TEGRA124_CLK_PLL_C2 },
@@ -988,10 +983,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "audio3_2x", .dt_id = TEGRA124_CLK_AUDIO3_2X },
{ .con_id = "audio4_2x", .dt_id = TEGRA124_CLK_AUDIO4_2X },
{ .con_id = "spdif_2x", .dt_id = TEGRA124_CLK_SPDIF_2X },
- { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA124_CLK_EXTERN1 },
- { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA124_CLK_EXTERN2 },
- { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA124_CLK_EXTERN3 },
- { .con_id = "blink", .dt_id = TEGRA124_CLK_BLINK },
+ { .con_id = "extern1", .dt_id = TEGRA124_CLK_EXTERN1 },
+ { .con_id = "extern2", .dt_id = TEGRA124_CLK_EXTERN2 },
+ { .con_id = "extern3", .dt_id = TEGRA124_CLK_EXTERN3 },
{ .con_id = "cclk_g", .dt_id = TEGRA124_CLK_CCLK_G },
{ .con_id = "cclk_lp", .dt_id = TEGRA124_CLK_CCLK_LP },
{ .con_id = "sclk", .dt_id = TEGRA124_CLK_SCLK },
@@ -1298,11 +1292,8 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0 },
{ TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0 },
{ TEGRA124_CLK_UARTD, TEGRA124_CLK_PLL_P, 408000000, 0 },
- { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 1 },
- { TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA124_CLK_EXTERN1, TEGRA124_CLK_PLL_A_OUT0, 0, 1 },
- { TEGRA124_CLK_CLK_OUT_1_MUX, TEGRA124_CLK_EXTERN1, 0, 1 },
- { TEGRA124_CLK_CLK_OUT_1, TEGRA124_CLK_CLK_MAX, 0, 1 },
+ { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 0 },
+ { TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA124_CLK_I2S0, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S1, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
@@ -1457,11 +1448,9 @@ static void __init tegra132_clock_apply_init_table(void)
* tegra124_132_clock_init_pre - clock initialization preamble for T124/T132
* @np: struct device_node * of the DT node for the SoC CAR IP block
*
- * Register most of the clocks controlled by the CAR IP block, along
- * with a few clocks controlled by the PMC IP block. Everything in
- * this function should be common to Tegra124 and Tegra132. XXX The
- * PMC clock initialization should probably be moved to PMC-specific
- * driver code. No return value.
+ * Register most of the clocks controlled by the CAR IP block.
+ * Everything in this function should be common to Tegra124 and Tegra132.
+ * No return value.
*/
static void __init tegra124_132_clock_init_pre(struct device_node *np)
{
@@ -1504,7 +1493,6 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks,
tegra124_audio_plls,
ARRAY_SIZE(tegra124_audio_plls), 24576000);
- tegra_pmc_clk_init(pmc_base, tegra124_clks);
/* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
plld_base = readl(clk_base + PLLD_BASE);
@@ -1516,11 +1504,11 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
* tegra124_132_clock_init_post - clock initialization postamble for T124/T132
* @np: struct device_node * of the DT node for the SoC CAR IP block
*
- * Register most of the along with a few clocks controlled by the PMC
- * IP block. Everything in this function should be common to Tegra124
+ * Register most of the clocks controlled by the CAR IP block.
+ * Everything in this function should be common to Tegra124
* and Tegra132. This function must be called after
- * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will
- * not be set. No return value.
+ * tegra124_132_clock_init_pre(), otherwise clk_base will not be set.
+ * No return value.
*/
static void __init tegra124_132_clock_init_post(struct device_node *np)
{
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index fff5cba87637..085feb04e913 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -458,7 +458,6 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "cdev1", .dt_id = TEGRA20_CLK_CDEV1 },
{ .con_id = "cdev2", .dt_id = TEGRA20_CLK_CDEV2 },
{ .con_id = "clk_32k", .dt_id = TEGRA20_CLK_CLK_32K },
- { .con_id = "blink", .dt_id = TEGRA20_CLK_BLINK },
{ .con_id = "clk_m", .dt_id = TEGRA20_CLK_CLK_M },
{ .con_id = "pll_ref", .dt_id = TEGRA20_CLK_PLL_REF },
{ .dev_id = "tegra20-i2s.0", .dt_id = TEGRA20_CLK_I2S1 },
@@ -537,7 +536,6 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
[tegra_clk_csi] = { .dt_id = TEGRA20_CLK_CSI, .present = true },
[tegra_clk_isp] = { .dt_id = TEGRA20_CLK_ISP, .present = true },
[tegra_clk_clk_32k] = { .dt_id = TEGRA20_CLK_CLK_32K, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA20_CLK_BLINK, .present = true },
[tegra_clk_hclk] = { .dt_id = TEGRA20_CLK_HCLK, .present = true },
[tegra_clk_pclk] = { .dt_id = TEGRA20_CLK_PCLK, .present = true },
[tegra_clk_pll_p_out1] = { .dt_id = TEGRA20_CLK_PLL_P_OUT1, .present = true },
@@ -1031,10 +1029,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_UARTC, TEGRA20_CLK_PLL_P, 0, 0 },
{ TEGRA20_CLK_UARTD, TEGRA20_CLK_PLL_P, 0, 0 },
{ TEGRA20_CLK_UARTE, TEGRA20_CLK_PLL_P, 0, 0 },
- { TEGRA20_CLK_PLL_A, TEGRA20_CLK_CLK_MAX, 56448000, 1 },
- { TEGRA20_CLK_PLL_A_OUT0, TEGRA20_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA20_CLK_CDEV1, TEGRA20_CLK_CLK_MAX, 0, 1 },
- { TEGRA20_CLK_BLINK, TEGRA20_CLK_CLK_MAX, 32768, 1 },
+ { TEGRA20_CLK_PLL_A, TEGRA20_CLK_CLK_MAX, 56448000, 0 },
+ { TEGRA20_CLK_PLL_A_OUT0, TEGRA20_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA20_CLK_I2S1, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA20_CLK_I2S2, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA20_CLK_SDMMC1, TEGRA20_CLK_PLL_P, 48000000, 0 },
@@ -1146,7 +1142,6 @@ static void __init tegra20_clock_init(struct device_node *np)
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra20_clks, NULL);
tegra20_periph_clk_init();
tegra20_audio_clk_init();
- tegra_pmc_clk_init(pmc_base, tegra20_clks);
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA20_CLK_CLK_MAX);
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 762cd186f714..defe3b7ebfa4 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -2371,8 +2371,9 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_fuse_burn] = { .dt_id = TEGRA210_CLK_FUSE_BURN, .present = true },
[tegra_clk_clk_32k] = { .dt_id = TEGRA210_CLK_CLK_32K, .present = true },
[tegra_clk_clk_m] = { .dt_id = TEGRA210_CLK_CLK_M, .present = true },
- [tegra_clk_clk_m_div2] = { .dt_id = TEGRA210_CLK_CLK_M_DIV2, .present = true },
- [tegra_clk_clk_m_div4] = { .dt_id = TEGRA210_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_osc] = { .dt_id = TEGRA210_CLK_OSC, .present = true },
+ [tegra_clk_osc_div2] = { .dt_id = TEGRA210_CLK_OSC_DIV2, .present = true },
+ [tegra_clk_osc_div4] = { .dt_id = TEGRA210_CLK_OSC_DIV4, .present = true },
[tegra_clk_pll_ref] = { .dt_id = TEGRA210_CLK_PLL_REF, .present = true },
[tegra_clk_pll_c] = { .dt_id = TEGRA210_CLK_PLL_C, .present = true },
[tegra_clk_pll_c_out1] = { .dt_id = TEGRA210_CLK_PLL_C_OUT1, .present = true },
@@ -2417,10 +2418,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3] = { .dt_id = TEGRA210_CLK_AUDIO3, .present = true },
[tegra_clk_audio4] = { .dt_id = TEGRA210_CLK_AUDIO4, .present = true },
[tegra_clk_spdif] = { .dt_id = TEGRA210_CLK_SPDIF, .present = true },
- [tegra_clk_clk_out_1] = { .dt_id = TEGRA210_CLK_CLK_OUT_1, .present = true },
- [tegra_clk_clk_out_2] = { .dt_id = TEGRA210_CLK_CLK_OUT_2, .present = true },
- [tegra_clk_clk_out_3] = { .dt_id = TEGRA210_CLK_CLK_OUT_3, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA210_CLK_BLINK, .present = true },
[tegra_clk_xusb_gate] = { .dt_id = TEGRA210_CLK_XUSB_GATE, .present = true },
[tegra_clk_xusb_host_src_8] = { .dt_id = TEGRA210_CLK_XUSB_HOST_SRC, .present = true },
[tegra_clk_xusb_falcon_src_8] = { .dt_id = TEGRA210_CLK_XUSB_FALCON_SRC, .present = true },
@@ -2452,9 +2449,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3_mux] = { .dt_id = TEGRA210_CLK_AUDIO3_MUX, .present = true },
[tegra_clk_audio4_mux] = { .dt_id = TEGRA210_CLK_AUDIO4_MUX, .present = true },
[tegra_clk_spdif_mux] = { .dt_id = TEGRA210_CLK_SPDIF_MUX, .present = true },
- [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_1_MUX, .present = true },
- [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_2_MUX, .present = true },
- [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_maud] = { .dt_id = TEGRA210_CLK_MAUD, .present = true },
[tegra_clk_mipibif] = { .dt_id = TEGRA210_CLK_MIPIBIF, .present = true },
[tegra_clk_qspi] = { .dt_id = TEGRA210_CLK_QSPI, .present = true },
@@ -2497,8 +2491,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "clk_m", .dt_id = TEGRA210_CLK_CLK_M },
{ .con_id = "pll_ref", .dt_id = TEGRA210_CLK_PLL_REF },
{ .con_id = "clk_32k", .dt_id = TEGRA210_CLK_CLK_32K },
- { .con_id = "clk_m_div2", .dt_id = TEGRA210_CLK_CLK_M_DIV2 },
- { .con_id = "clk_m_div4", .dt_id = TEGRA210_CLK_CLK_M_DIV4 },
+ { .con_id = "osc", .dt_id = TEGRA210_CLK_OSC },
+ { .con_id = "osc_div2", .dt_id = TEGRA210_CLK_OSC_DIV2 },
+ { .con_id = "osc_div4", .dt_id = TEGRA210_CLK_OSC_DIV4 },
{ .con_id = "pll_c", .dt_id = TEGRA210_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA210_CLK_PLL_C_OUT1 },
{ .con_id = "pll_c2", .dt_id = TEGRA210_CLK_PLL_C2 },
@@ -2540,10 +2535,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "audio4", .dt_id = TEGRA210_CLK_AUDIO4 },
{ .con_id = "spdif", .dt_id = TEGRA210_CLK_SPDIF },
{ .con_id = "spdif_2x", .dt_id = TEGRA210_CLK_SPDIF_2X },
- { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA210_CLK_EXTERN1 },
- { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA210_CLK_EXTERN2 },
- { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA210_CLK_EXTERN3 },
- { .con_id = "blink", .dt_id = TEGRA210_CLK_BLINK },
+ { .con_id = "extern1", .dt_id = TEGRA210_CLK_EXTERN1 },
+ { .con_id = "extern2", .dt_id = TEGRA210_CLK_EXTERN2 },
+ { .con_id = "extern3", .dt_id = TEGRA210_CLK_EXTERN3 },
{ .con_id = "cclk_g", .dt_id = TEGRA210_CLK_CCLK_G },
{ .con_id = "cclk_lp", .dt_id = TEGRA210_CLK_CCLK_LP },
{ .con_id = "sclk", .dt_id = TEGRA210_CLK_SCLK },
@@ -2999,7 +2993,7 @@ static const char * const la_parents[] = {
};
static struct tegra_clk_periph tegra210_la =
- TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, 0);
+ TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, NULL);
static __init void tegra210_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
@@ -3448,11 +3442,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_UARTB, TEGRA210_CLK_PLL_P, 408000000, 0 },
{ TEGRA210_CLK_UARTC, TEGRA210_CLK_PLL_P, 408000000, 0 },
{ TEGRA210_CLK_UARTD, TEGRA210_CLK_PLL_P, 408000000, 0 },
- { TEGRA210_CLK_PLL_A, TEGRA210_CLK_CLK_MAX, 564480000, 1 },
- { TEGRA210_CLK_PLL_A_OUT0, TEGRA210_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA210_CLK_EXTERN1, TEGRA210_CLK_PLL_A_OUT0, 0, 1 },
- { TEGRA210_CLK_CLK_OUT_1_MUX, TEGRA210_CLK_EXTERN1, 0, 1 },
- { TEGRA210_CLK_CLK_OUT_1, TEGRA210_CLK_CLK_MAX, 0, 1 },
+ { TEGRA210_CLK_PLL_A, TEGRA210_CLK_CLK_MAX, 564480000, 0 },
+ { TEGRA210_CLK_PLL_A_OUT0, TEGRA210_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA210_CLK_I2S0, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA210_CLK_I2S1, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA210_CLK_I2S2, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
@@ -3693,7 +3684,6 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
tegra210_audio_plls,
ARRAY_SIZE(tegra210_audio_plls), 24576000);
- tegra_pmc_clk_init(pmc_base, tegra210_clks);
/* For Tegra210, PLLD is the only source for DSIA & DSIB */
value = readl(clk_base + PLLD_BASE);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index b20891489e11..3255f82e61b5 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -569,10 +569,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "audio3_2x", .dt_id = TEGRA30_CLK_AUDIO3_2X },
{ .con_id = "audio4_2x", .dt_id = TEGRA30_CLK_AUDIO4_2X },
{ .con_id = "spdif_2x", .dt_id = TEGRA30_CLK_SPDIF_2X },
- { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA30_CLK_EXTERN1 },
- { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA30_CLK_EXTERN2 },
- { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA30_CLK_EXTERN3 },
- { .con_id = "blink", .dt_id = TEGRA30_CLK_BLINK },
+ { .con_id = "extern1", .dt_id = TEGRA30_CLK_EXTERN1 },
+ { .con_id = "extern2", .dt_id = TEGRA30_CLK_EXTERN2 },
+ { .con_id = "extern3", .dt_id = TEGRA30_CLK_EXTERN3 },
{ .con_id = "cclk_g", .dt_id = TEGRA30_CLK_CCLK_G },
{ .con_id = "cclk_lp", .dt_id = TEGRA30_CLK_CCLK_LP },
{ .con_id = "sclk", .dt_id = TEGRA30_CLK_SCLK },
@@ -581,8 +580,9 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "twd", .dt_id = TEGRA30_CLK_TWD },
{ .con_id = "emc", .dt_id = TEGRA30_CLK_EMC },
{ .con_id = "clk_32k", .dt_id = TEGRA30_CLK_CLK_32K },
- { .con_id = "clk_m_div2", .dt_id = TEGRA30_CLK_CLK_M_DIV2 },
- { .con_id = "clk_m_div4", .dt_id = TEGRA30_CLK_CLK_M_DIV4 },
+ { .con_id = "osc", .dt_id = TEGRA30_CLK_OSC },
+ { .con_id = "osc_div2", .dt_id = TEGRA30_CLK_OSC_DIV2 },
+ { .con_id = "osc_div4", .dt_id = TEGRA30_CLK_OSC_DIV4 },
{ .con_id = "cml0", .dt_id = TEGRA30_CLK_CML0 },
{ .con_id = "cml1", .dt_id = TEGRA30_CLK_CML1 },
{ .con_id = "clk_m", .dt_id = TEGRA30_CLK_CLK_M },
@@ -683,8 +683,9 @@ static struct tegra_devclk devclks[] __initdata = {
static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_clk_32k] = { .dt_id = TEGRA30_CLK_CLK_32K, .present = true },
[tegra_clk_clk_m] = { .dt_id = TEGRA30_CLK_CLK_M, .present = true },
- [tegra_clk_clk_m_div2] = { .dt_id = TEGRA30_CLK_CLK_M_DIV2, .present = true },
- [tegra_clk_clk_m_div4] = { .dt_id = TEGRA30_CLK_CLK_M_DIV4, .present = true },
+ [tegra_clk_osc] = { .dt_id = TEGRA30_CLK_OSC, .present = true },
+ [tegra_clk_osc_div2] = { .dt_id = TEGRA30_CLK_OSC_DIV2, .present = true },
+ [tegra_clk_osc_div4] = { .dt_id = TEGRA30_CLK_OSC_DIV4, .present = true },
[tegra_clk_pll_ref] = { .dt_id = TEGRA30_CLK_PLL_REF, .present = true },
[tegra_clk_spdif_in_sync] = { .dt_id = TEGRA30_CLK_SPDIF_IN_SYNC, .present = true },
[tegra_clk_i2s0_sync] = { .dt_id = TEGRA30_CLK_I2S0_SYNC, .present = true },
@@ -711,13 +712,6 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_audio3_2x] = { .dt_id = TEGRA30_CLK_AUDIO3_2X, .present = true },
[tegra_clk_audio4_2x] = { .dt_id = TEGRA30_CLK_AUDIO4_2X, .present = true },
[tegra_clk_spdif_2x] = { .dt_id = TEGRA30_CLK_SPDIF_2X, .present = true },
- [tegra_clk_clk_out_1] = { .dt_id = TEGRA30_CLK_CLK_OUT_1, .present = true },
- [tegra_clk_clk_out_2] = { .dt_id = TEGRA30_CLK_CLK_OUT_2, .present = true },
- [tegra_clk_clk_out_3] = { .dt_id = TEGRA30_CLK_CLK_OUT_3, .present = true },
- [tegra_clk_blink] = { .dt_id = TEGRA30_CLK_BLINK, .present = true },
- [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_1_MUX, .present = true },
- [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_2_MUX, .present = true },
- [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_hclk] = { .dt_id = TEGRA30_CLK_HCLK, .present = true },
[tegra_clk_pclk] = { .dt_id = TEGRA30_CLK_PCLK, .present = true },
[tegra_clk_i2s0] = { .dt_id = TEGRA30_CLK_I2S0, .present = true },
@@ -1227,12 +1221,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_UARTC, TEGRA30_CLK_PLL_P, 408000000, 0 },
{ TEGRA30_CLK_UARTD, TEGRA30_CLK_PLL_P, 408000000, 0 },
{ TEGRA30_CLK_UARTE, TEGRA30_CLK_PLL_P, 408000000, 0 },
- { TEGRA30_CLK_PLL_A, TEGRA30_CLK_CLK_MAX, 564480000, 1 },
- { TEGRA30_CLK_PLL_A_OUT0, TEGRA30_CLK_CLK_MAX, 11289600, 1 },
- { TEGRA30_CLK_EXTERN1, TEGRA30_CLK_PLL_A_OUT0, 0, 1 },
- { TEGRA30_CLK_CLK_OUT_1_MUX, TEGRA30_CLK_EXTERN1, 0, 0 },
- { TEGRA30_CLK_CLK_OUT_1, TEGRA30_CLK_CLK_MAX, 0, 1 },
- { TEGRA30_CLK_BLINK, TEGRA30_CLK_CLK_MAX, 0, 1 },
+ { TEGRA30_CLK_PLL_A, TEGRA30_CLK_CLK_MAX, 564480000, 0 },
+ { TEGRA30_CLK_PLL_A_OUT0, TEGRA30_CLK_CLK_MAX, 11289600, 0 },
{ TEGRA30_CLK_I2S0, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA30_CLK_I2S1, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA30_CLK_I2S2, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 },
@@ -1362,7 +1352,6 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks,
tegra30_audio_plls,
ARRAY_SIZE(tegra30_audio_plls), 24000000);
- tegra_pmc_clk_init(pmc_base, tegra30_clks);
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 416a6b09f6a3..2c9a68302e02 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -854,7 +854,6 @@ void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
struct tegra_clk *tegra_clks,
struct tegra_clk_pll_params *pll_params);
-void tegra_pmc_clk_init(void __iomem *pmc_base, struct tegra_clk *tegra_clks);
void tegra_fixed_clk_init(struct tegra_clk *tegra_clks);
int tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
unsigned long *input_freqs, unsigned int num,
diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c
index 087cfa75ac24..4f8bd34ec1a5 100644
--- a/drivers/clk/ti/clk-814x.c
+++ b/drivers/clk/ti/clk-814x.c
@@ -25,7 +25,6 @@ static const struct omap_clkctrl_reg_data dm814_alwon_clkctrl_regs[] __initconst
{ DM814_WD_TIMER_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk18_ck" },
{ DM814_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
{ DM814_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
- { DM814_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
{ DM814_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "mpu_ck" },
{ DM814_RTC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk18_ck" },
{ DM814_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
@@ -39,9 +38,15 @@ static const struct omap_clkctrl_reg_data dm814_alwon_clkctrl_regs[] __initconst
{ 0 },
};
+static const struct
+omap_clkctrl_reg_data dm814_alwon_ethernet_clkctrl_regs[] __initconst = {
+ { 0, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
+};
+
const struct omap_clkctrl_data dm814_clkctrl_data[] __initconst = {
{ 0x48180500, dm814_default_clkctrl_regs },
{ 0x48181400, dm814_alwon_clkctrl_regs },
+ { 0x481815d4, dm814_alwon_ethernet_clkctrl_regs },
{ 0 },
};
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index fe686f77787f..692be2fd9261 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -34,18 +34,6 @@
#define INTEGRATOR_AP_PCI_25_33_MHZ BIT(8)
/**
- * enum icst_control_type - the type of ICST control register
- */
-enum icst_control_type {
- ICST_VERSATILE, /* The standard type, all control bits available */
- ICST_INTEGRATOR_AP_CM, /* Only 8 bits of VDW available */
- ICST_INTEGRATOR_AP_SYS, /* Only 8 bits of VDW available */
- ICST_INTEGRATOR_AP_PCI, /* Odd bit pattern storage */
- ICST_INTEGRATOR_CP_CM_CORE, /* Only 8 bits of VDW and 3 bits of OD */
- ICST_INTEGRATOR_CP_CM_MEM, /* Only 8 bits of VDW and 3 bits of OD */
-};
-
-/**
* struct clk_icst - ICST VCO clock wrapper
* @hw: corresponding clock hardware entry
* @vcoreg: VCO register address
@@ -344,12 +332,12 @@ static const struct clk_ops icst_ops = {
.set_rate = icst_set_rate,
};
-static struct clk *icst_clk_setup(struct device *dev,
- const struct clk_icst_desc *desc,
- const char *name,
- const char *parent_name,
- struct regmap *map,
- enum icst_control_type ctype)
+struct clk *icst_clk_setup(struct device *dev,
+ const struct clk_icst_desc *desc,
+ const char *name,
+ const char *parent_name,
+ struct regmap *map,
+ enum icst_control_type ctype)
{
struct clk *clk;
struct clk_icst *icst;
@@ -386,6 +374,7 @@ static struct clk *icst_clk_setup(struct device *dev,
return clk;
}
+EXPORT_SYMBOL_GPL(icst_clk_setup);
struct clk *icst_clk_register(struct device *dev,
const struct clk_icst_desc *desc,
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
index e36ca1a20e90..1a119ef11066 100644
--- a/drivers/clk/versatile/clk-icst.h
+++ b/drivers/clk/versatile/clk-icst.h
@@ -1,4 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
+struct regmap;
+
+/**
+ * enum icst_control_type - the type of ICST control register
+ */
+enum icst_control_type {
+ ICST_VERSATILE, /* The standard type, all control bits available */
+ ICST_INTEGRATOR_AP_CM, /* Only 8 bits of VDW available */
+ ICST_INTEGRATOR_AP_SYS, /* Only 8 bits of VDW available */
+ ICST_INTEGRATOR_AP_PCI, /* Odd bit pattern storage */
+ ICST_INTEGRATOR_CP_CM_CORE, /* Only 8 bits of VDW and 3 bits of OD */
+ ICST_INTEGRATOR_CP_CM_MEM, /* Only 8 bits of VDW and 3 bits of OD */
+ ICST_INTEGRATOR_IM_PD1, /* Like the Versatile, all control bits */
+};
+
/**
* struct clk_icst_desc - descriptor for the ICST VCO
* @params: ICST parameters
@@ -17,3 +32,10 @@ struct clk *icst_clk_register(struct device *dev,
const char *name,
const char *parent_name,
void __iomem *base);
+
+struct clk *icst_clk_setup(struct device *dev,
+ const struct clk_icst_desc *desc,
+ const char *name,
+ const char *parent_name,
+ struct regmap *map,
+ enum icst_control_type ctype);
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 1991f15a5db9..b05da8516d4c 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -7,7 +7,11 @@
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/clk-integrator.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "icst.h"
#include "clk-icst.h"
@@ -175,3 +179,78 @@ void integrator_impd1_clk_exit(unsigned int id)
kfree(imc->pclkname);
}
EXPORT_SYMBOL_GPL(integrator_impd1_clk_exit);
+
+static int integrator_impd1_clk_spawn(struct device *dev,
+ struct device_node *parent,
+ struct device_node *np)
+{
+ struct regmap *map;
+ struct clk *clk = ERR_PTR(-EINVAL);
+ const char *name = np->name;
+ const char *parent_name;
+ const struct clk_icst_desc *desc;
+ int ret;
+
+ map = syscon_node_to_regmap(parent);
+ if (IS_ERR(map)) {
+ pr_err("no regmap for syscon IM-PD1 ICST clock parent\n");
+ return PTR_ERR(map);
+ }
+
+ if (of_device_is_compatible(np, "arm,impd1-vco1")) {
+ desc = &impd1_icst1_desc;
+ } else if (of_device_is_compatible(np, "arm,impd1-vco2")) {
+ desc = &impd1_icst2_desc;
+ } else {
+ dev_err(dev, "not a clock node %s\n", name);
+ return -ENODEV;
+ }
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ clk = icst_clk_setup(NULL, desc, name, parent_name, map,
+ ICST_INTEGRATOR_IM_PD1);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ ret = 0;
+ } else {
+ dev_err(dev, "error setting up IM-PD1 ICST clock\n");
+ ret = PTR_ERR(clk);
+ }
+
+ return ret;
+}
+
+static int integrator_impd1_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int ret = 0;
+
+ for_each_available_child_of_node(np, child) {
+ ret = integrator_impd1_clk_spawn(dev, np, child);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id impd1_syscon_match[] = {
+ { .compatible = "arm,im-pd1-syscon", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, impd1_syscon_match);
+
+static struct platform_driver impd1_clk_driver = {
+ .driver = {
+ .name = "impd1-clk",
+ .of_match_table = impd1_syscon_match,
+ },
+ .probe = integrator_impd1_clk_probe,
+};
+builtin_platform_driver(impd1_clk_driver);
+
+MODULE_AUTHOR("Linus Walleij <linusw@kernel.org>");
+MODULE_DESCRIPTION("Arm IM-PD1 module clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
index 7ad4a8b008c2..1a86a4e7e344 100644
--- a/drivers/clocksource/timer-vf-pit.c
+++ b/drivers/clocksource/timer-vf-pit.c
@@ -129,7 +129,7 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "VF pit timer", &clockevent_pit);
+ "VF pit timer", &clockevent_pit));
clockevent_pit.cpumask = cpumask_of(0);
clockevent_pit.irq = irq;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index bff5295016ae..c3e6bd59e920 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -37,10 +37,12 @@ config CPU_FREQ_STAT
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
+ default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if BIG_LITTLE
+ default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP
default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
help
This option sets which CPUFreq governor shall be loaded at
- startup. If in doubt, select 'performance'.
+ startup. If in doubt, use the default setting.
config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
bool "performance"
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 62502d0e4c33..bc58a0809d11 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -8,6 +8,8 @@ config X86_INTEL_PSTATE
depends on X86
select ACPI_PROCESSOR if ACPI
select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
+ select CPU_FREQ_GOV_PERFORMANCE
+ select CPU_FREQ_GOV_SCHEDUTIL if SMP
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 56f4bc0d209e..8646eb197cd9 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -902,6 +902,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
+ struct cpufreq_policy *policy;
unsigned int cpu;
cpumask_t mask;
@@ -916,12 +917,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
chip->restore = false;
for_each_cpu(cpu, &mask) {
int index;
- struct cpufreq_policy policy;
- cpufreq_get_policy(&policy, cpu);
- index = cpufreq_table_find_index_c(&policy, policy.cur);
- powernv_cpufreq_target_index(&policy, index);
- cpumask_andnot(&mask, &mask, policy.cpus);
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ index = cpufreq_table_find_index_c(policy, policy->cur);
+ powernv_cpufreq_target_index(policy, index);
+ cpumask_andnot(&mask, &mask, policy->cpus);
+ cpufreq_cpu_put(policy);
}
out:
put_online_cpus();
@@ -1080,6 +1083,12 @@ free_and_return:
static inline void clean_chip_info(void)
{
+ int i;
+
+ /* flush any pending work items */
+ if (chips)
+ for (i = 0; i < nr_chips; i++)
+ cancel_work_sync(&chips[i].throttle);
kfree(chips);
}
@@ -1108,9 +1117,6 @@ static int __init powernv_cpufreq_init(void)
if (rc)
goto out;
- register_reboot_notifier(&powernv_cpufreq_reboot_nb);
- opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
-
if (powernv_pstate_info.wof_enabled)
powernv_cpufreq_driver.boost_enabled = true;
else
@@ -1119,15 +1125,17 @@ static int __init powernv_cpufreq_init(void)
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
pr_info("Failed to register the cpufreq driver (%d)\n", rc);
- goto cleanup_notifiers;
+ goto cleanup;
}
if (powernv_pstate_info.wof_enabled)
cpufreq_enable_boost_support();
+ register_reboot_notifier(&powernv_cpufreq_reboot_nb);
+ opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
+
return 0;
-cleanup_notifiers:
- unregister_all_notifiers();
+cleanup:
clean_chip_info();
out:
pr_info("Platform driver disabled. System does not support PState control\n");
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 62272ecfa771..99a2d72ac02b 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -86,3 +86,11 @@ config ARM_MVEBU_V7_CPUIDLE
depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
+
+config ARM_TEGRA_CPUIDLE
+ bool "CPU Idle Driver for NVIDIA Tegra SoCs"
+ depends on ARCH_TEGRA && !ARM64
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
+ select ARM_CPU_SUSPEND
+ help
+ Select this to enable cpuidle for NVIDIA Tegra20/30/114/124 SoCs.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cc8c769d7fa9..55a464f6a78b 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o
cpuidle_psci-y := cpuidle-psci.o
cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
+obj-$(CONFIG_ARM_TEGRA_CPUIDLE) += cpuidle-tegra.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
new file mode 100644
index 000000000000..313b0290e97b
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU idle driver for Tegra CPUs
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ * Copyright (c) 2011 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ * Gary King <gking@nvidia.com>
+ *
+ * Rework for 3.3 by Peter De Schrijver <pdeschrijver@nvidia.com>
+ *
+ * Tegra20/124 driver unification by Dmitry Osipenko <digetx@gmail.com>
+ */
+
+#define pr_fmt(fmt) "tegra-cpuidle: " fmt
+
+#include <linux/atomic.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/clk/tegra.h>
+#include <linux/firmware/trusted_foundations.h>
+
+#include <soc/tegra/cpuidle.h>
+#include <soc/tegra/flowctrl.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/irq.h>
+#include <soc/tegra/pm.h>
+#include <soc/tegra/pmc.h>
+
+#include <asm/cpuidle.h>
+#include <asm/firmware.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+enum tegra_state {
+ TEGRA_C1,
+ TEGRA_C7,
+ TEGRA_CC6,
+ TEGRA_STATE_COUNT,
+};
+
+static atomic_t tegra_idle_barrier;
+static atomic_t tegra_abort_flag;
+
+static inline bool tegra_cpuidle_using_firmware(void)
+{
+ return firmware_ops->prepare_idle && firmware_ops->do_idle;
+}
+
+static void tegra_cpuidle_report_cpus_state(void)
+{
+ unsigned long cpu, lcpu, csr;
+
+ for_each_cpu(lcpu, cpu_possible_mask) {
+ cpu = cpu_logical_map(lcpu);
+ csr = flowctrl_read_cpu_csr(cpu);
+
+ pr_err("cpu%lu: online=%d flowctrl_csr=0x%08lx\n",
+ cpu, cpu_online(lcpu), csr);
+ }
+}
+
+static int tegra_cpuidle_wait_for_secondary_cpus_parking(void)
+{
+ unsigned int retries = 3;
+
+ while (retries--) {
+ unsigned int delay_us = 10;
+ unsigned int timeout_us = 500 * 1000 / delay_us;
+
+ /*
+ * The primary CPU0 core shall wait for the secondaries
+ * shutdown in order to power-off CPU's cluster safely.
+ * The timeout value depends on the current CPU frequency,
+ * it takes about 40-150us in average and over 1000us in
+ * a worst case scenario.
+ */
+ do {
+ if (tegra_cpu_rail_off_ready())
+ return 0;
+
+ udelay(delay_us);
+
+ } while (timeout_us--);
+
+ pr_err("secondary CPU taking too long to park\n");
+
+ tegra_cpuidle_report_cpus_state();
+ }
+
+ pr_err("timed out waiting secondaries to park\n");
+
+ return -ETIMEDOUT;
+}
+
+static void tegra_cpuidle_unpark_secondary_cpus(void)
+{
+ unsigned int cpu, lcpu;
+
+ for_each_cpu(lcpu, cpu_online_mask) {
+ cpu = cpu_logical_map(lcpu);
+
+ if (cpu > 0) {
+ tegra_enable_cpu_clock(cpu);
+ tegra_cpu_out_of_reset(cpu);
+ flowctrl_write_cpu_halt(cpu, 0);
+ }
+ }
+}
+
+static int tegra_cpuidle_cc6_enter(unsigned int cpu)
+{
+ int ret;
+
+ if (cpu > 0) {
+ ret = cpu_suspend(cpu, tegra_pm_park_secondary_cpu);
+ } else {
+ ret = tegra_cpuidle_wait_for_secondary_cpus_parking();
+ if (!ret)
+ ret = tegra_pm_enter_lp2();
+
+ tegra_cpuidle_unpark_secondary_cpus();
+ }
+
+ return ret;
+}
+
+static int tegra_cpuidle_c7_enter(void)
+{
+ int err;
+
+ if (tegra_cpuidle_using_firmware()) {
+ err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
+ if (err)
+ return err;
+
+ return call_firmware_op(do_idle, 0);
+ }
+
+ return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
+}
+
+static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
+{
+ if (tegra_pending_sgi()) {
+ /*
+ * CPU got local interrupt that will be lost after GIC's
+ * shutdown because GIC driver doesn't save/restore the
+ * pending SGI state across CPU cluster PM. Abort and retry
+ * next time.
+ */
+ atomic_set(&tegra_abort_flag, 1);
+ }
+
+ cpuidle_coupled_parallel_barrier(dev, &tegra_idle_barrier);
+
+ if (atomic_read(&tegra_abort_flag)) {
+ cpuidle_coupled_parallel_barrier(dev, &tegra_idle_barrier);
+ atomic_set(&tegra_abort_flag, 0);
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ int index, unsigned int cpu)
+{
+ int ret;
+
+ /*
+ * CC6 state is the "CPU cluster power-off" state. In order to
+ * enter this state, at first the secondary CPU cores need to be
+ * parked into offline mode, then the last CPU should clean out
+ * remaining dirty cache lines into DRAM and trigger Flow Controller
+ * logic that turns off the cluster's power domain (which includes
+ * CPU cores, GIC and L2 cache).
+ */
+ if (index == TEGRA_CC6) {
+ ret = tegra_cpuidle_coupled_barrier(dev);
+ if (ret)
+ return ret;
+ }
+
+ local_fiq_disable();
+ tegra_pm_set_cpu_in_lp2();
+ cpu_pm_enter();
+
+ switch (index) {
+ case TEGRA_C7:
+ ret = tegra_cpuidle_c7_enter();
+ break;
+
+ case TEGRA_CC6:
+ ret = tegra_cpuidle_cc6_enter(cpu);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ cpu_pm_exit();
+ tegra_pm_clear_cpu_in_lp2();
+ local_fiq_enable();
+
+ return ret;
+}
+
+static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
+{
+ /*
+ * On Tegra30 CPU0 can't be power-gated separately from secondary
+ * cores because it gates the whole CPU cluster.
+ */
+ if (cpu > 0 || index != TEGRA_C7 || tegra_get_chip_id() != TEGRA30)
+ return index;
+
+ /* put CPU0 into C1 if C7 is requested and secondaries are online */
+ if (!IS_ENABLED(CONFIG_PM_SLEEP) || num_online_cpus() > 1)
+ index = TEGRA_C1;
+ else
+ index = TEGRA_CC6;
+
+ return index;
+}
+
+static int tegra_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned int cpu = cpu_logical_map(dev->cpu);
+ int err;
+
+ index = tegra_cpuidle_adjust_state_index(index, cpu);
+ if (dev->states_usage[index].disable)
+ return -1;
+
+ if (index == TEGRA_C1)
+ err = arm_cpuidle_simple_enter(dev, drv, index);
+ else
+ err = tegra_cpuidle_state_enter(dev, index, cpu);
+
+ if (err && (err != -EINTR || index != TEGRA_CC6))
+ pr_err_once("failed to enter state %d err: %d\n", index, err);
+
+ return err ? -1 : index;
+}
+
+static void tegra114_enter_s2idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ tegra_cpuidle_enter(dev, drv, index);
+}
+
+/*
+ * The previous versions of Tegra CPUIDLE driver used a different "legacy"
+ * terminology for naming of the idling states, while this driver uses the
+ * new terminology.
+ *
+ * Mapping of the old terms into the new ones:
+ *
+ * Old | New
+ * ---------
+ * LP3 | C1 (CPU core clock gating)
+ * LP2 | C7 (CPU core power gating)
+ * LP2 | CC6 (CPU cluster power gating)
+ *
+ * Note that that the older CPUIDLE driver versions didn't explicitly
+ * differentiate the LP2 states because these states either used the same
+ * code path or because CC6 wasn't supported.
+ */
+static struct cpuidle_driver tegra_idle_driver = {
+ .name = "tegra_idle",
+ .states = {
+ [TEGRA_C1] = ARM_CPUIDLE_WFI_STATE_PWR(600),
+ [TEGRA_C7] = {
+ .enter = tegra_cpuidle_enter,
+ .exit_latency = 2000,
+ .target_residency = 2200,
+ .power_usage = 100,
+ .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .name = "C7",
+ .desc = "CPU core powered off",
+ },
+ [TEGRA_CC6] = {
+ .enter = tegra_cpuidle_enter,
+ .exit_latency = 5000,
+ .target_residency = 10000,
+ .power_usage = 0,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_COUPLED,
+ .name = "CC6",
+ .desc = "CPU cluster powered off",
+ },
+ },
+ .state_count = TEGRA_STATE_COUNT,
+ .safe_state_index = TEGRA_C1,
+};
+
+static inline void tegra_cpuidle_disable_state(enum tegra_state state)
+{
+ cpuidle_driver_state_disabled(&tegra_idle_driver, state, true);
+}
+
+/*
+ * Tegra20 HW appears to have a bug such that PCIe device interrupts, whether
+ * they are legacy IRQs or MSI, are lost when CC6 is enabled. To work around
+ * this, simply disable CC6 if the PCI driver and DT node are both enabled.
+ */
+void tegra_cpuidle_pcie_irqs_in_use(void)
+{
+ struct cpuidle_state *state_cc6 = &tegra_idle_driver.states[TEGRA_CC6];
+
+ if ((state_cc6->flags & CPUIDLE_FLAG_UNUSABLE) ||
+ tegra_get_chip_id() != TEGRA20)
+ return;
+
+ pr_info("disabling CC6 state, since PCIe IRQs are in use\n");
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+}
+
+static void tegra_cpuidle_setup_tegra114_c7_state(void)
+{
+ struct cpuidle_state *s = &tegra_idle_driver.states[TEGRA_C7];
+
+ s->enter_s2idle = tegra114_enter_s2idle;
+ s->target_residency = 1000;
+ s->exit_latency = 500;
+}
+
+static int tegra_cpuidle_probe(struct platform_device *pdev)
+{
+ /* LP2 could be disabled in device-tree */
+ if (tegra_pmc_get_suspend_mode() < TEGRA_SUSPEND_LP2)
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+
+ /*
+ * Required suspend-resume functionality, which is provided by the
+ * Tegra-arch core and PMC driver, is unavailable if PM-sleep option
+ * is disabled.
+ */
+ if (!IS_ENABLED(CONFIG_PM_SLEEP)) {
+ if (!tegra_cpuidle_using_firmware())
+ tegra_cpuidle_disable_state(TEGRA_C7);
+
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+ }
+
+ /*
+ * Generic WFI state (also known as C1 or LP3) and the coupled CPU
+ * cluster power-off (CC6 or LP2) states are common for all Tegra SoCs.
+ */
+ switch (tegra_get_chip_id()) {
+ case TEGRA20:
+ /* Tegra20 isn't capable to power-off individual CPU cores */
+ tegra_cpuidle_disable_state(TEGRA_C7);
+ break;
+
+ case TEGRA30:
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+ break;
+
+ case TEGRA114:
+ case TEGRA124:
+ tegra_cpuidle_setup_tegra114_c7_state();
+
+ /* coupled CC6 (LP2) state isn't implemented yet */
+ tegra_cpuidle_disable_state(TEGRA_CC6);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
+}
+
+static struct platform_driver tegra_cpuidle_driver = {
+ .probe = tegra_cpuidle_probe,
+ .driver = {
+ .name = "tegra-cpuidle",
+ },
+};
+builtin_platform_driver(tegra_cpuidle_driver);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index c2767ed54dfe..2c887e4d005a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -233,20 +233,6 @@ config CRYPTO_CRC32_S390
It is available with IBM z13 or later.
-config CRYPTO_DEV_MARVELL_CESA
- tristate "Marvell's Cryptographic Engine driver"
- depends on PLAT_ORION || ARCH_MVEBU
- select CRYPTO_LIB_AES
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select SRAM
- help
- This driver allows you to utilize the Cryptographic Engines and
- Security Accelerator (CESA) which can be found on MVEBU and ORION
- platforms.
- This driver supports CPU offload through DMA transfers.
-
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
@@ -606,6 +592,7 @@ config CRYPTO_DEV_MXS_DCP
source "drivers/crypto/qat/Kconfig"
source "drivers/crypto/cavium/cpt/Kconfig"
source "drivers/crypto/cavium/nitrox/Kconfig"
+source "drivers/crypto/marvell/Kconfig"
config CRYPTO_DEV_CAVIUM_ZIP
tristate "Cavium ZIP driver"
@@ -685,6 +672,29 @@ choice
endchoice
+config CRYPTO_DEV_QCE_SW_MAX_LEN
+ int "Default maximum request size to use software for AES"
+ depends on CRYPTO_DEV_QCE && CRYPTO_DEV_QCE_SKCIPHER
+ default 512
+ help
+ This sets the default maximum request size to perform AES requests
+ using software instead of the crypto engine. It can be changed by
+ setting the aes_sw_max_len parameter.
+
+ Small blocks are processed faster in software than hardware.
+ Considering the 256-bit ciphers, software is 2-3 times faster than
+ qce at 256-bytes, 30% faster at 512, and about even at 768-bytes.
+ With 128-bit keys, the break-even point would be around 1024-bytes.
+
+ The default is set a little lower, to 512 bytes, to balance the
+ cost in CPU usage. The minimum recommended setting is 16-bytes
+ (1 AES block), since AES-GCM will fail if you set it lower.
+ Setting this to zero will send all requests to the hardware.
+
+ Note that 192-bit keys are not supported by the hardware and are
+ always processed by the software fallback, and all DES requests
+ are done by the hardware.
+
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -731,6 +741,18 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+config CRYPTO_DEV_ZYNQMP_AES
+ tristate "Support for Xilinx ZynqMP AES hw accelerator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_AES
+ select CRYPTO_ENGINE
+ select CRYPTO_AEAD
+ help
+ Xilinx ZynqMP has AES-GCM engine used for symmetric key
+ encryption and decryption. This driver interfaces with AES hw
+ accelerator. Select this if you want to use the ZynqMP module
+ for AES algorithms.
+
config CRYPTO_DEV_MEDIATEK
tristate "MediaTek's EIP97 Cryptographic Engine driver"
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 40229d499476..944ed7226e37 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
+obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
@@ -47,5 +47,6 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index f72346a44e69..3e4e4bbda34c 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -565,10 +565,8 @@ static int sun8i_ce_probe(struct platform_device *pdev)
/* Get Non Secure IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(ce->dev, "Cannot get CryptoEngine Non-secure IRQ\n");
+ if (irq < 0)
return irq;
- }
ce->reset = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(ce->reset)) {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 8f8404c84a4d..0e9eac397e1b 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -214,7 +214,7 @@ struct sun8i_cipher_tfm_ctx {
* this template
* @alg: one of sub struct must be used
* @stat_req: number of request done on this template
- * @stat_fb: total of all data len done on this template
+ * @stat_fb: number of request which has fallbacked
*/
struct sun8i_ce_alg_template {
u32 type;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index b5f855f3de10..29c44f279112 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -186,7 +186,7 @@ struct sun8i_cipher_tfm_ctx {
* this template
* @alg: one of sub struct must be used
* @stat_req: number of request done on this template
- * @stat_fb: total of all data len done on this template
+ * @stat_fb: number of request which has fallbacked
*/
struct sun8i_ss_alg_template {
u32 type;
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index 1d3355913b40..e8e8281e027d 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -176,7 +176,8 @@ static int atmel_i2c_wakeup(struct i2c_client *client)
* device is idle, asleep or during waking up. Don't check for error
* when waking up the device.
*/
- i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
+ i2c_transfer_buffer_flags(client, i2c_priv->wake_token,
+ i2c_priv->wake_token_sz, I2C_M_IGNORE_NAK);
/*
* Wait to wake the device. Typical execution times for ecdh and genkey
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index cd7504101acd..2b304fc78059 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -366,88 +366,88 @@ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
ipriv = filp->private_data;
out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Number of SPUs.........%u\n",
ipriv->spu.num_spu);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Current sessions.......%u\n",
atomic_read(&ipriv->session_count));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Session count..........%u\n",
atomic_read(&ipriv->stream_count));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Cipher setkey..........%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Cipher Ops.............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_CIPHER]));
for (alg = 0; alg < CIPHER_ALG_LAST; alg++) {
for (mode = 0; mode < CIPHER_MODE_LAST; mode++) {
op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
spu_alg_name(alg, mode), op_cnt);
}
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Hash Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HASH]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hash_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"HMAC setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"HMAC Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HMAC]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hmac_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"AEAD setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"AEAD Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_AEAD]));
for (alg = 0; alg < AEAD_TYPE_LAST; alg++) {
op_cnt = atomic_read(&ipriv->aead_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
aead_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Bytes of req data......%llu\n",
(u64)atomic64_read(&ipriv->bytes_out));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Bytes of resp data.....%llu\n",
(u64)atomic64_read(&ipriv->bytes_in));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Mailbox full...........%u\n",
atomic_read(&ipriv->mb_no_spc));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Mailbox send failures..%u\n",
atomic_read(&ipriv->mb_send_fail));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Check ICV errors.......%u\n",
atomic_read(&ipriv->bad_icv));
if (ipriv->spu.spu_type == SPU_TYPE_SPUM)
@@ -455,7 +455,7 @@ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] +
SPU_OFIFO_CTRL);
fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK;
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
"SPU %d output FIFO high water.....%u\n",
i, fifo_len);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index fac5b2e26610..a62f228be6da 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -13,6 +13,7 @@ config CRYPTO_DEV_FSL_CAAM
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
select SOC_BUS
select CRYPTO_DEV_FSL_CAAM_COMMON
+ imply FSL_MC_BUS
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -33,6 +34,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
menuconfig CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
+ select CRYPTO_ENGINE
default y
help
Enables the driver module for Job Rings which are part of
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ef1a65f4fc92..b7bb7c30adeb 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -56,6 +56,7 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamalg_desc.h"
+#include <crypto/engine.h>
/*
* crypto alg
@@ -101,6 +102,7 @@ struct caam_skcipher_alg {
* per-session context
*/
struct caam_ctx {
+ struct crypto_engine_ctx enginectx;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
@@ -114,6 +116,14 @@ struct caam_ctx {
unsigned int authsize;
};
+struct caam_skcipher_req_ctx {
+ struct skcipher_edesc *edesc;
+};
+
+struct caam_aead_req_ctx {
+ struct aead_edesc *edesc;
+};
+
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -858,6 +868,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
* @mapped_src_nents: number of segments in input h/w link table
* @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @bklog: stored to determine if the request needs backlog
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
@@ -868,6 +879,7 @@ struct aead_edesc {
int mapped_src_nents;
int mapped_dst_nents;
int sec4_sg_bytes;
+ bool bklog;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
u32 hw_desc[];
@@ -881,6 +893,7 @@ struct aead_edesc {
* @mapped_dst_nents: number of segments in output h/w link table
* @iv_dma: dma address of iv for checking continuity and link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @bklog: stored to determine if the request needs backlog
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
@@ -893,9 +906,10 @@ struct skcipher_edesc {
int mapped_dst_nents;
dma_addr_t iv_dma;
int sec4_sg_bytes;
+ bool bklog;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
- u32 hw_desc[0];
+ u32 hw_desc[];
};
static void caam_unmap(struct device *dev, struct scatterlist *src,
@@ -941,37 +955,18 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
-{
- struct aead_request *req = context;
- struct aead_edesc *edesc;
- int ecode = 0;
-
- dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-
- edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
-
- if (err)
- ecode = caam_jr_strstatus(jrdev, err);
-
- aead_unmap(jrdev, edesc, req);
-
- kfree(edesc);
-
- aead_request_complete(req, ecode);
-}
-
-static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
struct aead_request *req = context;
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct aead_edesc *edesc;
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
+ edesc = rctx->edesc;
if (err)
ecode = caam_jr_strstatus(jrdev, err);
@@ -980,61 +975,30 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
kfree(edesc);
- aead_request_complete(req, ecode);
-}
-
-static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
-{
- struct skcipher_request *req = context;
- struct skcipher_edesc *edesc;
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- int ivsize = crypto_skcipher_ivsize(skcipher);
- int ecode = 0;
-
- dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-
- edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
-
- if (err)
- ecode = caam_jr_strstatus(jrdev, err);
-
- skcipher_unmap(jrdev, edesc, req);
-
/*
- * The crypto API expects us to set the IV (req->iv) to the last
- * ciphertext block (CBC mode) or last counter (CTR mode).
- * This is used e.g. by the CTS mode.
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
*/
- if (ivsize && !ecode) {
- memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
- ivsize);
- print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
- edesc->src_nents > 1 ? 100 : ivsize, 1);
- }
-
- caam_dump_sg("dst @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
-
- kfree(edesc);
-
- skcipher_request_complete(req, ecode);
+ if (!edesc->bklog)
+ aead_request_complete(req, ecode);
+ else
+ crypto_finalize_aead_request(jrp->engine, req, ecode);
}
-static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
struct skcipher_request *req = context;
struct skcipher_edesc *edesc;
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
+ edesc = rctx->edesc;
if (err)
ecode = caam_jr_strstatus(jrdev, err);
@@ -1060,7 +1024,14 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
kfree(edesc);
- skcipher_request_complete(req, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!edesc->bklog)
+ skcipher_request_complete(req, ecode);
+ else
+ crypto_finalize_skcipher_request(jrp->engine, req, ecode);
}
/*
@@ -1306,6 +1277,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
@@ -1406,6 +1378,9 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
+
+ rctx->edesc = edesc;
+
*all_contig_ptr = !(mapped_src_nents > 1);
sec4_sg_index = 0;
@@ -1436,41 +1411,34 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return edesc;
}
-static int gcm_encrypt(struct aead_request *req)
+static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
{
- struct aead_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- bool all_contig;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor */
- init_gcm_job(req, edesc, all_contig, true);
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+ struct aead_edesc *edesc = rctx->edesc;
+ u32 *desc = edesc->hw_desc;
+ int ret;
- print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_aead_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req);
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
aead_unmap(jrdev, edesc, req);
- kfree(edesc);
+ kfree(rctx->edesc);
}
return ret;
}
-static int chachapoly_encrypt(struct aead_request *req)
+static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -1478,180 +1446,130 @@ static int chachapoly_encrypt(struct aead_request *req)
struct device *jrdev = ctx->jrdev;
bool all_contig;
u32 *desc;
- int ret;
edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
- true);
+ encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
desc = edesc->hw_desc;
- init_chachapoly_job(req, edesc, all_contig, true);
+ init_chachapoly_job(req, edesc, all_contig, encrypt);
print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
+ return aead_enqueue_req(jrdev, req);
}
-static int chachapoly_decrypt(struct aead_request *req)
+static int chachapoly_encrypt(struct aead_request *req)
{
- struct aead_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- bool all_contig;
- u32 *desc;
- int ret;
-
- edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
- false);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- desc = edesc->hw_desc;
-
- init_chachapoly_job(req, edesc, all_contig, false);
- print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
- 1);
-
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
+ return chachapoly_crypt(req, true);
}
-static int ipsec_gcm_encrypt(struct aead_request *req)
+static int chachapoly_decrypt(struct aead_request *req)
{
- return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
+ return chachapoly_crypt(req, false);
}
-static int aead_encrypt(struct aead_request *req)
+static inline int aead_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
- u32 *desc;
- int ret = 0;
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
- &all_contig, true);
+ &all_contig, encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor */
- init_authenc_job(req, edesc, all_contig, true);
+ init_authenc_job(req, edesc, all_contig, encrypt);
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
+ return aead_enqueue_req(jrdev, req);
+}
- return ret;
+static int aead_encrypt(struct aead_request *req)
+{
+ return aead_crypt(req, true);
}
-static int gcm_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
{
- struct aead_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- bool all_contig;
- u32 *desc;
- int ret = 0;
+ return aead_crypt(req, false);
+}
- /* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
+static int aead_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = aead_request_cast(areq);
+ struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+ u32 *desc = rctx->edesc->hw_desc;
+ int ret;
- /* Create and submit job descriptor*/
- init_gcm_job(req, edesc, all_contig, false);
+ rctx->edesc->bklog = true;
- print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
+ ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
+ if (ret != -EINPROGRESS) {
+ aead_unmap(ctx->jrdev, rctx->edesc, req);
+ kfree(rctx->edesc);
} else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
+ ret = 0;
}
return ret;
}
-static int ipsec_gcm_decrypt(struct aead_request *req)
-{
- return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
-}
-
-static int aead_decrypt(struct aead_request *req)
+static inline int gcm_crypt(struct aead_request *req, bool encrypt)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool all_contig;
- u32 *desc;
- int ret = 0;
-
- caam_dump_sg("dec src@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1);
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
- &all_contig, false);
+ edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig,
+ encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /* Create and submit job descriptor*/
- init_authenc_job(req, edesc, all_contig, false);
+ /* Create and submit job descriptor */
+ init_gcm_job(req, edesc, all_contig, encrypt);
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- aead_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
+ return aead_enqueue_req(jrdev, req);
+}
- return ret;
+static int gcm_encrypt(struct aead_request *req)
+{
+ return gcm_crypt(req, true);
+}
+
+static int gcm_decrypt(struct aead_request *req)
+{
+ return gcm_crypt(req, false);
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
}
/*
@@ -1662,6 +1580,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1760,6 +1679,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
+ rctx->edesc = edesc;
/* Make sure IV is located in a DMAable area */
if (ivsize) {
@@ -1815,49 +1735,35 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return edesc;
}
-static int skcipher_encrypt(struct skcipher_request *req)
+static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
{
- struct skcipher_edesc *edesc;
- struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
- struct device *jrdev = ctx->jrdev;
- u32 *desc;
- int ret = 0;
-
- if (!req->cryptlen)
- return 0;
-
- /* allocate extended descriptor */
- edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor*/
- init_skcipher_job(req, edesc, true);
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+ u32 *desc = rctx->edesc->hw_desc;
+ int ret;
- print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
+ rctx->edesc->bklog = true;
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
+ ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
+ if (ret != -EINPROGRESS) {
+ skcipher_unmap(ctx->jrdev, rctx->edesc, req);
+ kfree(rctx->edesc);
} else {
- skcipher_unmap(jrdev, edesc, req);
- kfree(edesc);
+ ret = 0;
}
return ret;
}
-static int skcipher_decrypt(struct skcipher_request *req)
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
u32 *desc;
int ret = 0;
@@ -1870,17 +1776,25 @@ static int skcipher_decrypt(struct skcipher_request *req)
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_skcipher_job(req, edesc, false);
- desc = edesc->hw_desc;
+ init_skcipher_job(req, edesc, encrypt);
print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
- ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
+ desc = edesc->hw_desc;
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req);
+
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
@@ -1888,6 +1802,16 @@ static int skcipher_decrypt(struct skcipher_request *req)
return ret;
}
+static int skcipher_encrypt(struct skcipher_request *req)
+{
+ return skcipher_crypt(req, true);
+}
+
+static int skcipher_decrypt(struct skcipher_request *req)
+{
+ return skcipher_crypt(req, false);
+}
+
static struct caam_skcipher_alg driver_algs[] = {
{
.skcipher = {
@@ -3391,6 +3315,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
{
dma_addr_t dma_addr;
struct caam_drv_private *priv;
+ const size_t sh_desc_enc_offset = offsetof(struct caam_ctx,
+ sh_desc_enc);
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -3406,7 +3332,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
- sh_desc_enc_dma),
+ sh_desc_enc_dma) -
+ sh_desc_enc_offset,
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
@@ -3416,8 +3343,10 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->sh_desc_enc_dma = dma_addr;
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
- sh_desc_dec);
- ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
+ sh_desc_dec) -
+ sh_desc_enc_offset;
+ ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) -
+ sh_desc_enc_offset;
/* copy descriptor header template value */
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
@@ -3431,6 +3360,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+
+ ctx->enginectx.op.do_one_request = skcipher_do_one_req;
return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
false);
@@ -3443,13 +3377,18 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
+
+ ctx->enginectx.op.do_one_request = aead_do_one_req;
+
return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
- offsetof(struct caam_ctx, sh_desc_enc_dma),
+ offsetof(struct caam_ctx, sh_desc_enc_dma) -
+ offsetof(struct caam_ctx, sh_desc_enc),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index aa9ccca67045..d6c58184bb57 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1379,6 +1379,9 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
+ u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT;
+ bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_CHACHA20);
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
@@ -1417,14 +1420,15 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
LDST_OFFSET_SHIFT));
/* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
- OP_ALG_ENCRYPT);
+ if (is_chacha20)
+ options |= OP_ALG_AS_FINALIZE;
+ append_operation(desc, options);
/* Perform operation */
skcipher_append_src_dst(desc);
/* Store IV */
- if (ivsize)
+ if (!is_chacha20 && ivsize)
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | (ctx1_iv_off <<
LDST_OFFSET_SHIFT));
@@ -1451,6 +1455,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
+ bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_CHACHA20);
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
@@ -1499,7 +1505,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
skcipher_append_src_dst(desc);
/* Store IV */
- if (ivsize)
+ if (!is_chacha20 && ivsize)
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | (ctx1_iv_off <<
LDST_OFFSET_SHIFT));
@@ -1518,7 +1524,13 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
*/
void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
{
- __be64 sector_size = cpu_to_be64(512);
+ /*
+ * Set sector size to a big value, practically disabling
+ * sector size segmentation in xts implementation. We cannot
+ * take full advantage of this HW feature with existing
+ * crypto API / dm-crypt SW architecture.
+ */
+ __be64 sector_size = cpu_to_be64(BIT(15));
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
@@ -1571,7 +1583,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
*/
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
{
- __be64 sector_size = cpu_to_be64(512);
+ /*
+ * Set sector size to a big value, practically disabling
+ * sector size segmentation in xts implementation. We cannot
+ * take full advantage of this HW feature with existing
+ * crypto API / dm-crypt SW architecture.
+ */
+ __be64 sector_size = cpu_to_be64(BIT(15));
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 4a29e0ef9d63..27e36bdf6163 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -783,7 +783,7 @@ struct aead_edesc {
unsigned int assoclen;
dma_addr_t assoclen_dma;
struct caam_drv_req drv_req;
- struct qm_sg_entry sgt[0];
+ struct qm_sg_entry sgt[];
};
/*
@@ -803,7 +803,7 @@ struct skcipher_edesc {
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct caam_drv_req drv_req;
- struct qm_sg_entry sgt[0];
+ struct qm_sg_entry sgt[];
};
static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 706736776b47..f29cb7bd7dd3 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -114,7 +114,7 @@ struct aead_edesc {
dma_addr_t qm_sg_dma;
unsigned int assoclen;
dma_addr_t assoclen_dma;
- struct dpaa2_sg_entry sgt[0];
+ struct dpaa2_sg_entry sgt[];
};
/*
@@ -132,7 +132,7 @@ struct skcipher_edesc {
dma_addr_t iv_dma;
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
- struct dpaa2_sg_entry sgt[0];
+ struct dpaa2_sg_entry sgt[];
};
/*
@@ -146,7 +146,7 @@ struct ahash_edesc {
dma_addr_t qm_sg_dma;
int src_nents;
int qm_sg_bytes;
- struct dpaa2_sg_entry sgt[0];
+ struct dpaa2_sg_entry sgt[];
};
/**
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 8d9143407fc5..943bc0296267 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -65,6 +65,7 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamhash_desc.h"
+#include <crypto/engine.h>
#define CAAM_CRA_PRIORITY 3000
@@ -86,6 +87,7 @@ static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
+ struct crypto_engine_ctx enginectx;
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
@@ -111,9 +113,12 @@ struct caam_hash_state {
int buflen;
int next_buflen;
u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
- int (*update)(struct ahash_request *req);
+ int (*update)(struct ahash_request *req) ____cacheline_aligned;
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
+ struct ahash_edesc *edesc;
+ void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
+ void *context);
};
struct caam_export_state {
@@ -395,7 +400,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
- if (!ret) {
+ if (ret == -EINPROGRESS) {
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
@@ -521,6 +526,7 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
* @sec4_sg_dma: physical mapped address of h/w link table
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @bklog: stored to determine if the request needs backlog
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* @sec4_sg: h/w link table
*/
@@ -528,8 +534,9 @@ struct ahash_edesc {
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
+ bool bklog;
u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
- struct sec4_sg_entry sec4_sg[0];
+ struct sec4_sg_entry sec4_sg[];
};
static inline void ahash_unmap(struct device *dev,
@@ -565,10 +572,11 @@ static inline void ahash_unmap_ctx(struct device *dev,
ahash_unmap(dev, edesc, req, dst_len);
}
-static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
+ void *context, enum dma_data_direction dir)
{
struct ahash_request *req = context;
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
@@ -578,11 +586,12 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+ edesc = state->edesc;
+
if (err)
ecode = caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
@@ -590,81 +599,33 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!edesc->bklog)
+ req->base.complete(&req->base, ecode);
+ else
+ crypto_finalize_hash_request(jrp->engine, req, ecode);
}
-static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ahash_request *req = context;
- struct ahash_edesc *edesc;
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
- int digestsize = crypto_ahash_digestsize(ahash);
- int ecode = 0;
-
- dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
- if (err)
- ecode = caam_jr_strstatus(jrdev, err);
-
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- kfree(edesc);
-
- scatterwalk_map_and_copy(state->buf, req->src,
- req->nbytes - state->next_buflen,
- state->next_buflen, 0);
- state->buflen = state->next_buflen;
-
- print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
- state->buflen, 1);
-
- print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-
- req->base.complete(&req->base, ecode);
+ ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
}
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct ahash_request *req = context;
- struct ahash_edesc *edesc;
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- int digestsize = crypto_ahash_digestsize(ahash);
- struct caam_hash_state *state = ahash_request_ctx(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- int ecode = 0;
-
- dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
- if (err)
- ecode = caam_jr_strstatus(jrdev, err);
-
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
- memcpy(req->result, state->caam_ctx, digestsize);
- kfree(edesc);
-
- print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
-
- req->base.complete(&req->base, ecode);
+ ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
}
-static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
+ void *context, enum dma_data_direction dir)
{
struct ahash_request *req = context;
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -674,11 +635,11 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+ edesc = state->edesc;
if (err)
ecode = caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
kfree(edesc);
scatterwalk_map_and_copy(state->buf, req->src,
@@ -698,18 +659,42 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
- req->base.complete(&req->base, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!edesc->bklog)
+ req->base.complete(&req->base, ecode);
+ else
+ crypto_finalize_hash_request(jrp->engine, req, ecode);
+
+}
+
+static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
+}
+
+static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
}
/*
* Allocate an enhanced descriptor, which contains the hardware descriptor
* and space for hardware scatter table containing sg_num entries.
*/
-static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
+static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
int sg_num, u32 *sh_desc,
- dma_addr_t sh_desc_dma,
- gfp_t flags)
+ dma_addr_t sh_desc_dma)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
@@ -719,6 +704,8 @@ static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
return NULL;
}
+ state->edesc = edesc;
+
init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
HDR_SHARE_DEFER | HDR_REVERSE);
@@ -761,6 +748,62 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
return 0;
}
+static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = state->edesc->hw_desc;
+ int ret;
+
+ state->edesc->bklog = true;
+
+ ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
+
+ if (ret != -EINPROGRESS) {
+ ahash_unmap(jrdev, state->edesc, req, 0);
+ kfree(state->edesc);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int ahash_enqueue_req(struct device *jrdev,
+ void (*cbk)(struct device *jrdev, u32 *desc,
+ u32 err, void *context),
+ struct ahash_request *req,
+ int dst_len, enum dma_data_direction dir)
+{
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->edesc;
+ u32 *desc = edesc->hw_desc;
+ int ret;
+
+ state->ahash_op_done = cbk;
+
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, cbk, req);
+
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
+ ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
/* submit update job descriptor */
static int ahash_update_ctx(struct ahash_request *req)
{
@@ -768,8 +811,6 @@ static int ahash_update_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@@ -823,8 +864,8 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
- ctx->sh_desc_update_dma, flags);
+ edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
+ ctx->sh_desc_update_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -870,11 +911,8 @@ static int ahash_update_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
- if (ret)
- goto unmap_ctx;
-
- ret = -EINPROGRESS;
+ ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
+ ctx->ctx_len, DMA_BIDIRECTIONAL);
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
@@ -898,8 +936,6 @@ static int ahash_final_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes;
@@ -911,8 +947,8 @@ static int ahash_final_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
- ctx->sh_desc_fin_dma, flags);
+ edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
+ ctx->sh_desc_fin_dma);
if (!edesc)
return -ENOMEM;
@@ -947,11 +983,8 @@ static int ahash_final_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
- if (ret)
- goto unmap_ctx;
-
- return -EINPROGRESS;
+ return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
+ digestsize, DMA_BIDIRECTIONAL);
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
@@ -964,8 +997,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_src_index;
@@ -994,9 +1025,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
- flags);
+ edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1027,11 +1057,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
- if (ret)
- goto unmap_ctx;
-
- return -EINPROGRESS;
+ return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
+ digestsize, DMA_BIDIRECTIONAL);
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
@@ -1044,8 +1071,6 @@ static int ahash_digest(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
int src_nents, mapped_nents;
@@ -1072,9 +1097,8 @@ static int ahash_digest(struct ahash_request *req)
}
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
- ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
- flags);
+ edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1103,15 +1127,8 @@ static int ahash_digest(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
-
- return ret;
+ return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
+ DMA_FROM_DEVICE);
}
/* submit ahash final if it the first job descriptor */
@@ -1121,8 +1138,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int buflen = state->buflen;
u32 *desc;
@@ -1131,8 +1146,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int ret;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
- ctx->sh_desc_digest_dma, flags);
+ edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
+ ctx->sh_desc_digest_dma);
if (!edesc)
return -ENOMEM;
@@ -1157,20 +1172,12 @@ static int ahash_final_no_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
-
- return ret;
+ return ahash_enqueue_req(jrdev, ahash_done, req,
+ digestsize, DMA_FROM_DEVICE);
unmap:
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
return -ENOMEM;
-
}
/* submit ahash update if it the first job descriptor after update */
@@ -1180,8 +1187,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@@ -1234,10 +1239,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, pad_nents,
+ edesc = ahash_edesc_alloc(req, pad_nents,
ctx->sh_desc_update_first,
- ctx->sh_desc_update_first_dma,
- flags);
+ ctx->sh_desc_update_first_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1273,11 +1277,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
- if (ret)
- goto unmap_ctx;
-
- ret = -EINPROGRESS;
+ ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
+ ctx->ctx_len, DMA_TO_DEVICE);
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY))
+ return ret;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
@@ -1305,8 +1308,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
@@ -1336,9 +1337,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
- flags);
+ edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1368,15 +1368,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
-
- return ret;
+ return ahash_enqueue_req(jrdev, ahash_done, req,
+ digestsize, DMA_FROM_DEVICE);
unmap:
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
@@ -1391,8 +1384,6 @@ static int ahash_update_first(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf;
int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen;
@@ -1440,11 +1431,10 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
+ edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
mapped_nents : 0,
ctx->sh_desc_update_first,
- ctx->sh_desc_update_first_dma,
- flags);
+ ctx->sh_desc_update_first_dma);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -1467,11 +1457,10 @@ static int ahash_update_first(struct ahash_request *req)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
- if (ret)
- goto unmap_ctx;
-
- ret = -EINPROGRESS;
+ ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
+ ctx->ctx_len, DMA_TO_DEVICE);
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY))
+ return ret;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
@@ -1774,6 +1763,8 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
+ sh_desc_update);
dma_addr_t dma_addr;
struct caam_drv_private *priv;
@@ -1826,7 +1817,8 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
}
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
- offsetof(struct caam_hash_ctx, key),
+ offsetof(struct caam_hash_ctx, key) -
+ sh_desc_update_offset,
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
@@ -1844,11 +1836,16 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->sh_desc_update_dma = dma_addr;
ctx->sh_desc_update_first_dma = dma_addr +
offsetof(struct caam_hash_ctx,
- sh_desc_update_first);
+ sh_desc_update_first) -
+ sh_desc_update_offset;
ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
- sh_desc_fin);
+ sh_desc_fin) -
+ sh_desc_update_offset;
ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
- sh_desc_digest);
+ sh_desc_digest) -
+ sh_desc_update_offset;
+
+ ctx->enginectx.op.do_one_request = ahash_do_one_req;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
@@ -1865,7 +1862,8 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
- offsetof(struct caam_hash_ctx, key),
+ offsetof(struct caam_hash_ctx, key) -
+ offsetof(struct caam_hash_ctx, sh_desc_update),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (ctx->key_dir != DMA_NONE)
dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 6619c512ef1a..4fcae37a2e33 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -117,76 +117,69 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
{
struct akcipher_request *req = context;
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct rsa_edesc *edesc;
int ecode = 0;
if (err)
ecode = caam_jr_strstatus(dev, err);
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+ edesc = req_ctx->edesc;
rsa_pub_unmap(dev, edesc, req);
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, ecode);
-}
-
-static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
- void *context)
-{
- struct akcipher_request *req = context;
- struct rsa_edesc *edesc;
- int ecode = 0;
-
- if (err)
- ecode = caam_jr_strstatus(dev, err);
-
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
-
- rsa_priv_f1_unmap(dev, edesc, req);
- rsa_io_unmap(dev, edesc, req);
- kfree(edesc);
-
- akcipher_request_complete(req, ecode);
-}
-
-static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
- void *context)
-{
- struct akcipher_request *req = context;
- struct rsa_edesc *edesc;
- int ecode = 0;
-
- if (err)
- ecode = caam_jr_strstatus(dev, err);
-
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
-
- rsa_priv_f2_unmap(dev, edesc, req);
- rsa_io_unmap(dev, edesc, req);
- kfree(edesc);
-
- akcipher_request_complete(req, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!edesc->bklog)
+ akcipher_request_complete(req, ecode);
+ else
+ crypto_finalize_akcipher_request(jrp->engine, req, ecode);
}
-static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
- void *context)
+static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
{
struct akcipher_request *req = context;
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc;
int ecode = 0;
if (err)
ecode = caam_jr_strstatus(dev, err);
- edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+ edesc = req_ctx->edesc;
+
+ switch (key->priv_form) {
+ case FORM1:
+ rsa_priv_f1_unmap(dev, edesc, req);
+ break;
+ case FORM2:
+ rsa_priv_f2_unmap(dev, edesc, req);
+ break;
+ case FORM3:
+ rsa_priv_f3_unmap(dev, edesc, req);
+ }
- rsa_priv_f3_unmap(dev, edesc, req);
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, ecode);
+ /*
+ * If no backlog flag, the completion of the request is done
+ * by CAAM, not crypto engine.
+ */
+ if (!edesc->bklog)
+ akcipher_request_complete(req, ecode);
+ else
+ crypto_finalize_akcipher_request(jrp->engine, req, ecode);
}
/**
@@ -334,6 +327,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ req_ctx->edesc = edesc;
+
if (!sec4_sg_bytes)
return edesc;
@@ -364,6 +359,33 @@ src_fail:
return ERR_PTR(-ENOMEM);
}
+static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct akcipher_request *req = container_of(areq,
+ struct akcipher_request,
+ base);
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *jrdev = ctx->dev;
+ u32 *desc = req_ctx->edesc->hw_desc;
+ int ret;
+
+ req_ctx->edesc->bklog = true;
+
+ ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
+
+ if (ret != -EINPROGRESS) {
+ rsa_pub_unmap(jrdev, req_ctx->edesc, req);
+ rsa_io_unmap(jrdev, req_ctx->edesc, req);
+ kfree(req_ctx->edesc);
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int set_rsa_pub_pdb(struct akcipher_request *req,
struct rsa_edesc *edesc)
{
@@ -627,6 +649,53 @@ unmap_p:
return -ENOMEM;
}
+static int akcipher_enqueue_req(struct device *jrdev,
+ void (*cbk)(struct device *jrdev, u32 *desc,
+ u32 err, void *context),
+ struct akcipher_request *req)
+{
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct rsa_edesc *edesc = req_ctx->edesc;
+ u32 *desc = edesc->hw_desc;
+ int ret;
+
+ req_ctx->akcipher_op_done = cbk;
+ /*
+ * Only the backlog request are sent to crypto-engine since the others
+ * can be handled by CAAM, if free, especially since JR has up to 1024
+ * entries (more than the 10 entries from crypto-engine).
+ */
+ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
+ req);
+ else
+ ret = caam_jr_enqueue(jrdev, desc, cbk, req);
+
+ if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
+ switch (key->priv_form) {
+ case FORM1:
+ rsa_priv_f1_unmap(jrdev, edesc, req);
+ break;
+ case FORM2:
+ rsa_priv_f2_unmap(jrdev, edesc, req);
+ break;
+ case FORM3:
+ rsa_priv_f3_unmap(jrdev, edesc, req);
+ break;
+ default:
+ rsa_pub_unmap(jrdev, edesc, req);
+ }
+ rsa_io_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
static int caam_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
@@ -658,11 +727,7 @@ static int caam_rsa_enc(struct akcipher_request *req)
/* Initialize Job Descriptor */
init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
- if (!ret)
- return -EINPROGRESS;
-
- rsa_pub_unmap(jrdev, edesc, req);
+ return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
@@ -691,11 +756,7 @@ static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
/* Initialize Job Descriptor */
init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
- if (!ret)
- return -EINPROGRESS;
-
- rsa_priv_f1_unmap(jrdev, edesc, req);
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
@@ -724,11 +785,7 @@ static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
/* Initialize Job Descriptor */
init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
- if (!ret)
- return -EINPROGRESS;
-
- rsa_priv_f2_unmap(jrdev, edesc, req);
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
@@ -757,11 +814,7 @@ static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
/* Initialize Job Descriptor */
init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
- ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
- if (!ret)
- return -EINPROGRESS;
-
- rsa_priv_f3_unmap(jrdev, edesc, req);
+ return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
init_fail:
rsa_io_unmap(jrdev, edesc, req);
@@ -1054,6 +1107,8 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
return -ENOMEM;
}
+ ctx->enginectx.op.do_one_request = akcipher_do_one_req;
+
return 0;
}
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index c68fb4c03ee6..cc889a525e2f 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -12,6 +12,7 @@
#define _PKC_DESC_H_
#include "compat.h"
#include "pdb.h"
+#include <crypto/engine.h>
/**
* caam_priv_key_form - CAAM RSA private key representation
@@ -87,11 +88,13 @@ struct caam_rsa_key {
/**
* caam_rsa_ctx - per session context.
+ * @enginectx : crypto engine context
* @key : RSA key in DMA zone
* @dev : device structure
* @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
+ struct crypto_engine_ctx enginectx;
struct caam_rsa_key key;
struct device *dev;
dma_addr_t padding_dma;
@@ -103,11 +106,16 @@ struct caam_rsa_ctx {
* @src : input scatterlist (stripped of leading zeros)
* @fixup_src : input scatterlist (that might be stripped of leading zeros)
* @fixup_src_len : length of the fixup_src input scatterlist
+ * @edesc : s/w-extended rsa descriptor
+ * @akcipher_op_done : callback used when operation is done
*/
struct caam_rsa_req_ctx {
struct scatterlist src[2];
struct scatterlist *fixup_src;
unsigned int fixup_src_len;
+ struct rsa_edesc *edesc;
+ void (*akcipher_op_done)(struct device *jrdev, u32 *desc, u32 err,
+ void *context);
};
/**
@@ -117,6 +125,7 @@ struct caam_rsa_req_ctx {
* @mapped_src_nents: number of segments in input h/w link table
* @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes : length of h/w link table
+ * @bklog : stored to determine if the request needs backlog
* @sec4_sg_dma : dma address of h/w link table
* @sec4_sg : pointer to h/w link table
* @pdb : specific RSA Protocol Data Block (PDB)
@@ -128,6 +137,7 @@ struct rsa_edesc {
int mapped_src_nents;
int mapped_dst_nents;
int sec4_sg_bytes;
+ bool bklog;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
union {
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index e8baacaabe07..77d048dfe5d0 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -7,35 +7,12 @@
*
* Based on caamalg.c crypto API driver.
*
- * relationship between job descriptors to shared descriptors:
- *
- * --------------- --------------
- * | JobDesc #0 |-------------------->| ShareDesc |
- * | *(buffer 0) | |------------->| (generate) |
- * --------------- | | (move) |
- * | | (store) |
- * --------------- | --------------
- * | JobDesc #1 |------|
- * | *(buffer 1) |
- * ---------------
- *
- * A job desc looks like this:
- *
- * ---------------------
- * | Header |
- * | ShareDesc Pointer |
- * | SEQ_OUT_PTR |
- * | (output buffer) |
- * ---------------------
- *
- * The SharedDesc never changes, and each job descriptor points to one of two
- * buffers for each device, from which the data will be copied into the
- * requested destination
*/
#include <linux/hw_random.h>
#include <linux/completion.h>
#include <linux/atomic.h>
+#include <linux/kfifo.h>
#include "compat.h"
@@ -45,278 +22,205 @@
#include "jr.h"
#include "error.h"
+#define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
+
/*
- * Maximum buffer size: maximum number of random, cache-aligned bytes that
- * will be generated and moved to seq out ptr (extlen not allowed)
+ * Length of used descriptors, see caam_init_desc()
*/
-#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
- L1_CACHE_BYTES)
-
-/* length of descriptors */
-#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ_MAX * 2)
-#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
-
-/* Buffer, its dma address and lock */
-struct buf_data {
- u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
- dma_addr_t addr;
- struct completion filled;
- u32 hw_desc[DESC_JOB_O_LEN];
-#define BUF_NOT_EMPTY 0
-#define BUF_EMPTY 1
-#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
- atomic_t empty;
-};
+#define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \
+ CAAM_CMD_SZ + \
+ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
/* rng per-device context */
struct caam_rng_ctx {
+ struct hwrng rng;
struct device *jrdev;
- dma_addr_t sh_desc_dma;
- u32 sh_desc[DESC_RNG_LEN];
- unsigned int cur_buf_idx;
- int current_buf;
- struct buf_data bufs[2];
+ struct device *ctrldev;
+ void *desc_async;
+ void *desc_sync;
+ struct work_struct worker;
+ struct kfifo fifo;
};
-static struct caam_rng_ctx *rng_ctx;
-
-/*
- * Variable used to avoid double free of resources in case
- * algorithm registration was unsuccessful
- */
-static bool init_done;
-
-static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
-{
- if (bd->addr)
- dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
- DMA_FROM_DEVICE);
-}
+struct caam_rng_job_ctx {
+ struct completion *done;
+ int *err;
+};
-static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
+static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
{
- struct device *jrdev = ctx->jrdev;
-
- if (ctx->sh_desc_dma)
- dma_unmap_single(jrdev, ctx->sh_desc_dma,
- desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
- rng_unmap_buf(jrdev, &ctx->bufs[0]);
- rng_unmap_buf(jrdev, &ctx->bufs[1]);
+ return (struct caam_rng_ctx *)r->priv;
}
-static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
+static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct buf_data *bd;
-
- bd = container_of(desc, struct buf_data, hw_desc[0]);
+ struct caam_rng_job_ctx *jctx = context;
if (err)
- caam_jr_strstatus(jrdev, err);
+ *jctx->err = caam_jr_strstatus(jrdev, err);
- atomic_set(&bd->empty, BUF_NOT_EMPTY);
- complete(&bd->filled);
-
- /* Buffer refilled, invalidate cache */
- dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
-
- print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- bd->buf, RN_BUF_SIZE, 1);
+ complete(jctx->done);
}
-static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
+static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
{
- struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
- struct device *jrdev = ctx->jrdev;
- u32 *desc = bd->hw_desc;
- int err;
-
- dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
- init_completion(&bd->filled);
- err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
- if (err)
- complete(&bd->filled); /* don't wait on failed job*/
- else
- atomic_inc(&bd->empty); /* note if pending */
-
- return err;
+ init_job_desc(desc, 0); /* + 1 cmd_sz */
+ /* Generate random bytes: + 1 cmd_sz */
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
+ OP_ALG_PR_ON);
+ /* Store bytes: + 1 cmd_sz + caam_ptr_sz */
+ append_fifo_store(desc, dst_dma,
+ CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
+
+ print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, desc, desc_bytes(desc), 1);
+
+ return desc;
}
-static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
+static int caam_rng_read_one(struct device *jrdev,
+ void *dst, int len,
+ void *desc,
+ struct completion *done)
{
- struct caam_rng_ctx *ctx = rng_ctx;
- struct buf_data *bd = &ctx->bufs[ctx->current_buf];
- int next_buf_idx, copied_idx;
- int err;
-
- if (atomic_read(&bd->empty)) {
- /* try to submit job if there wasn't one */
- if (atomic_read(&bd->empty) == BUF_EMPTY) {
- err = submit_job(ctx, 1);
- /* if can't submit job, can't even wait */
- if (err)
- return 0;
- }
- /* no immediate data, so exit if not waiting */
- if (!wait)
- return 0;
-
- /* waiting for pending job */
- if (atomic_read(&bd->empty))
- wait_for_completion(&bd->filled);
+ dma_addr_t dst_dma;
+ int err, ret = 0;
+ struct caam_rng_job_ctx jctx = {
+ .done = done,
+ .err = &ret,
+ };
+
+ len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
+
+ dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dst_dma)) {
+ dev_err(jrdev, "unable to map destination memory\n");
+ return -ENOMEM;
}
- next_buf_idx = ctx->cur_buf_idx + max;
- dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
- __func__, ctx->current_buf, ctx->cur_buf_idx);
-
- /* if enough data in current buffer */
- if (next_buf_idx < RN_BUF_SIZE) {
- memcpy(data, bd->buf + ctx->cur_buf_idx, max);
- ctx->cur_buf_idx = next_buf_idx;
- return max;
+ init_completion(done);
+ err = caam_jr_enqueue(jrdev,
+ caam_init_desc(desc, dst_dma),
+ caam_rng_done, &jctx);
+ if (err == -EINPROGRESS) {
+ wait_for_completion(done);
+ err = 0;
}
- /* else, copy what's left... */
- copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
- memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
- ctx->cur_buf_idx = 0;
- atomic_set(&bd->empty, BUF_EMPTY);
-
- /* ...refill... */
- submit_job(ctx, 1);
+ dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
- /* and use next buffer */
- ctx->current_buf = !ctx->current_buf;
- dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
-
- /* since there already is some data read, don't wait */
- return copied_idx + caam_read(rng, data + copied_idx,
- max - copied_idx, false);
+ return err ?: (ret ?: len);
}
-static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
+static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
{
- struct device *jrdev = ctx->jrdev;
- u32 *desc = ctx->sh_desc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Generate random bytes */
- append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
-
- /* Store bytes */
- append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
+ struct scatterlist sg[1];
+ struct completion done;
+ int len, nents;
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
+ CAAM_RNG_MAX_FIFO_STORE_SIZE);
+ if (!nents)
+ return;
- ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
+ len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
+ sg[0].length,
+ ctx->desc_async,
+ &done);
+ if (len < 0)
+ return;
- print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
+ kfifo_dma_in_finish(&ctx->fifo, len);
+}
- return 0;
+static void caam_rng_worker(struct work_struct *work)
+{
+ struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
+ worker);
+ caam_rng_fill_async(ctx);
}
-static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
{
- struct device *jrdev = ctx->jrdev;
- struct buf_data *bd = &ctx->bufs[buf_id];
- u32 *desc = bd->hw_desc;
- int sh_len = desc_len(ctx->sh_desc);
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
+ int out;
- init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
- HDR_REVERSE);
+ if (wait) {
+ struct completion done;
- bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, bd->addr)) {
- dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ return caam_rng_read_one(ctx->jrdev, dst, max,
+ ctx->desc_sync, &done);
}
- append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
-
- print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
+ out = kfifo_out(&ctx->fifo, dst, max);
+ if (kfifo_is_empty(&ctx->fifo))
+ schedule_work(&ctx->worker);
- return 0;
+ return out;
}
static void caam_cleanup(struct hwrng *rng)
{
- int i;
- struct buf_data *bd;
-
- for (i = 0; i < 2; i++) {
- bd = &rng_ctx->bufs[i];
- if (atomic_read(&bd->empty) == BUF_PENDING)
- wait_for_completion(&bd->filled);
- }
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
- rng_unmap_ctx(rng_ctx);
+ flush_work(&ctx->worker);
+ caam_jr_free(ctx->jrdev);
+ kfifo_free(&ctx->fifo);
}
-static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_init(struct hwrng *rng)
{
- struct buf_data *bd = &ctx->bufs[buf_id];
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
int err;
- err = rng_create_job_desc(ctx, buf_id);
- if (err)
- return err;
-
- atomic_set(&bd->empty, BUF_EMPTY);
- submit_job(ctx, buf_id == ctx->current_buf);
- wait_for_completion(&bd->filled);
+ ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
+ GFP_DMA | GFP_KERNEL);
+ if (!ctx->desc_sync)
+ return -ENOMEM;
- return 0;
-}
+ ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
+ GFP_DMA | GFP_KERNEL);
+ if (!ctx->desc_async)
+ return -ENOMEM;
-static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
-{
- int err;
+ if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
+ GFP_DMA | GFP_KERNEL))
+ return -ENOMEM;
- ctx->jrdev = jrdev;
+ INIT_WORK(&ctx->worker, caam_rng_worker);
- err = rng_create_sh_desc(ctx);
- if (err)
+ ctx->jrdev = caam_jr_alloc();
+ err = PTR_ERR_OR_ZERO(ctx->jrdev);
+ if (err) {
+ kfifo_free(&ctx->fifo);
+ pr_err("Job Ring Device allocation for transform failed\n");
return err;
+ }
- ctx->current_buf = 0;
- ctx->cur_buf_idx = 0;
+ /*
+ * Fill async buffer to have early randomness data for
+ * hw_random
+ */
+ caam_rng_fill_async(ctx);
- err = caam_init_buf(ctx, 0);
- if (err)
- return err;
-
- return caam_init_buf(ctx, 1);
+ return 0;
}
-static struct hwrng caam_rng = {
- .name = "rng-caam",
- .cleanup = caam_cleanup,
- .read = caam_read,
-};
+int caam_rng_init(struct device *ctrldev);
-void caam_rng_exit(void)
+void caam_rng_exit(struct device *ctrldev)
{
- if (!init_done)
- return;
-
- caam_jr_free(rng_ctx->jrdev);
- hwrng_unregister(&caam_rng);
- kfree(rng_ctx);
+ devres_release_group(ctrldev, caam_rng_init);
}
int caam_rng_init(struct device *ctrldev)
{
- struct device *dev;
+ struct caam_rng_ctx *ctx;
u32 rng_inst;
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- int err;
- init_done = false;
+ int ret;
/* Check for an instantiated RNG before registration */
if (priv->era < 10)
@@ -328,31 +232,30 @@ int caam_rng_init(struct device *ctrldev)
if (!rng_inst)
return 0;
- dev = caam_jr_alloc();
- if (IS_ERR(dev)) {
- pr_err("Job Ring Device allocation for transform failed\n");
- return PTR_ERR(dev);
- }
- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
- if (!rng_ctx) {
- err = -ENOMEM;
- goto free_caam_alloc;
- }
- err = caam_init_rng(rng_ctx, dev);
- if (err)
- goto free_rng_ctx;
+ if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
+ return -ENOMEM;
- dev_info(dev, "registering rng-caam\n");
+ ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
- err = hwrng_register(&caam_rng);
- if (!err) {
- init_done = true;
- return err;
+ ctx->ctrldev = ctrldev;
+
+ ctx->rng.name = "rng-caam";
+ ctx->rng.init = caam_init;
+ ctx->rng.cleanup = caam_cleanup;
+ ctx->rng.read = caam_read;
+ ctx->rng.priv = (unsigned long)ctx;
+ ctx->rng.quality = 1024;
+
+ dev_info(ctrldev, "registering rng-caam\n");
+
+ ret = devm_hwrng_register(ctrldev, &ctx->rng);
+ if (ret) {
+ caam_rng_exit(ctrldev);
+ return ret;
}
-free_rng_ctx:
- kfree(rng_ctx);
-free_caam_alloc:
- caam_jr_free(dev);
- return err;
+ devres_close_group(ctrldev, caam_rng_init);
+ return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 7139366da016..4fcdd262e581 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -10,6 +10,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sys_soc.h>
+#include <linux/fsl/mc.h>
#include "compat.h"
#include "regs.h"
@@ -36,7 +37,8 @@ static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
init_job_desc(desc, 0);
op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
- (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
+ OP_ALG_PR_ON;
/* INIT RNG in non-test mode */
append_operation(desc, op_flags);
@@ -196,7 +198,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
u32 *desc, status;
int sh_idx, ret = 0;
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
if (!desc)
return -ENOMEM;
@@ -273,17 +275,30 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
if (!desc)
return -ENOMEM;
for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ const u32 rdsta_if = RDSTA_IF0 << sh_idx;
+ const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
+ const u32 rdsta_mask = rdsta_if | rdsta_pr;
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
*/
- if ((1 << sh_idx) & state_handle_mask)
- continue;
+ if (rdsta_if & state_handle_mask) {
+ if (rdsta_pr & state_handle_mask)
+ continue;
+
+ dev_info(ctrldev,
+ "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n",
+ sh_idx);
+
+ ret = deinstantiate_rng(ctrldev, rdsta_if);
+ if (ret)
+ break;
+ }
/* Create the descriptor for instantiating RNG State Handle */
build_instantiation_desc(desc, sh_idx, gen_sk);
@@ -303,9 +318,9 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
if (ret)
break;
- rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
+ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
- !(rdsta_val & (1 << sh_idx))) {
+ (rdsta_val & rdsta_mask) != rdsta_mask) {
ret = -EAGAIN;
break;
}
@@ -341,8 +356,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
r4tst = &ctrl->r4tst[0];
- /* put RNG4 into program mode */
- clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
+ /*
+ * Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to
+ * properly invalidate the entropy in the entropy register and
+ * force re-generation.
+ */
+ clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC);
/*
* Performance-wise, it does not make sense to
@@ -372,7 +391,8 @@ start_rng:
* select raw sampling in both entropy shifter
* and statistical checker; ; put RNG4 into run mode
*/
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC,
+ RTMCTL_SAMP_MODE_RAW_ES_SC);
}
static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
@@ -559,6 +579,26 @@ static void caam_remove_debugfs(void *root)
}
#endif
+#ifdef CONFIG_FSL_MC_BUS
+static bool check_version(struct fsl_mc_version *mc_version, u32 major,
+ u32 minor, u32 revision)
+{
+ if (mc_version->major > major)
+ return true;
+
+ if (mc_version->major == major) {
+ if (mc_version->minor > minor)
+ return true;
+
+ if (mc_version->minor == minor &&
+ mc_version->revision > revision)
+ return true;
+ }
+
+ return false;
+}
+#endif
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -577,6 +617,7 @@ static int caam_probe(struct platform_device *pdev)
u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
+ bool pr_support = false;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
@@ -662,6 +703,21 @@ static int caam_probe(struct platform_device *pdev)
/* Get the IRQ of the controller (for security violations only) */
ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
+ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
+ ctrlpriv->mc_en = !!np;
+ of_node_put(np);
+
+#ifdef CONFIG_FSL_MC_BUS
+ if (ctrlpriv->mc_en) {
+ struct fsl_mc_version *mc_version;
+
+ mc_version = fsl_mc_get_version();
+ if (mc_version)
+ pr_support = check_version(mc_version, 10, 20, 0);
+ else
+ return -EPROBE_DEFER;
+ }
+#endif
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
@@ -669,10 +725,6 @@ static int caam_probe(struct platform_device *pdev)
* In case of SoCs with Management Complex, MC f/w performs
* the configuration.
*/
- np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
- ctrlpriv->mc_en = !!np;
- of_node_put(np);
-
if (!ctrlpriv->mc_en)
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
@@ -779,7 +831,7 @@ static int caam_probe(struct platform_device *pdev)
* already instantiated, do RNG instantiation
* In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!ctrlpriv->mc_en && rng_vid >= 4) {
+ if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
@@ -789,11 +841,11 @@ static int caam_probe(struct platform_device *pdev)
* to regenerate these keys before the next POR.
*/
gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
- ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+ ctrlpriv->rng4_sh_init &= RDSTA_MASK;
do {
int inst_handles =
rd_reg32(&ctrl->r4tst[0].rdsta) &
- RDSTA_IFMASK;
+ RDSTA_MASK;
/*
* If either SH were instantiated by somebody else
* (e.g. u-boot) then it is assumed that the entropy
@@ -833,7 +885,7 @@ static int caam_probe(struct platform_device *pdev)
* Set handles init'ed by this module as the complement of the
* already initialized ones
*/
- ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
/* Enable RDB bit so that RNG works faster */
clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 4b6854bf896a..e796d3cb9be8 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1254,6 +1254,8 @@
#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT)
#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_PR_ON BIT(1)
+
#define OP_ALG_DIR_SHIFT 0
#define OP_ALG_DIR_MASK 1
#define OP_ALG_DECRYPT 0
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index c7c10c90464b..402d6a362e8c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -11,6 +11,7 @@
#define INTERN_H
#include "ctrl.h"
+#include <crypto/engine.h>
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
@@ -46,6 +47,7 @@ struct caam_drv_private_jr {
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
int irq; /* One per queue */
+ bool hwrng;
/* Number of scatterlist crypt transforms active on the JobR */
atomic_t tfm_count ____cacheline_aligned;
@@ -60,6 +62,7 @@ struct caam_drv_private_jr {
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
void *outring; /* Base of output ring, DMA-safe */
+ struct crypto_engine *engine;
};
/*
@@ -161,7 +164,7 @@ static inline void caam_pkc_exit(void)
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
int caam_rng_init(struct device *dev);
-void caam_rng_exit(void);
+void caam_rng_exit(struct device *dev);
#else
@@ -170,9 +173,7 @@ static inline int caam_rng_init(struct device *dev)
return 0;
}
-static inline void caam_rng_exit(void)
-{
-}
+static inline void caam_rng_exit(struct device *dev) {}
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index fc97cde27059..4af22e7ceb4f 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -27,7 +27,8 @@ static struct jr_driver_data driver_data;
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
-static void register_algs(struct device *dev)
+static void register_algs(struct caam_drv_private_jr *jrpriv,
+ struct device *dev)
{
mutex_lock(&algs_lock);
@@ -37,7 +38,7 @@ static void register_algs(struct device *dev)
caam_algapi_init(dev);
caam_algapi_hash_init(dev);
caam_pkc_init(dev);
- caam_rng_init(dev);
+ jrpriv->hwrng = !caam_rng_init(dev);
caam_qi_algapi_init(dev);
algs_unlock:
@@ -53,7 +54,6 @@ static void unregister_algs(void)
caam_qi_algapi_exit();
- caam_rng_exit();
caam_pkc_exit();
caam_algapi_hash_exit();
caam_algapi_exit();
@@ -62,6 +62,15 @@ algs_unlock:
mutex_unlock(&algs_lock);
}
+static void caam_jr_crypto_engine_exit(void *data)
+{
+ struct device *jrdev = data;
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+
+ /* Free the resources of crypto-engine */
+ crypto_engine_exit(jrpriv->engine);
+}
+
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
@@ -126,6 +135,9 @@ static int caam_jr_remove(struct platform_device *pdev)
jrdev = &pdev->dev;
jrpriv = dev_get_drvdata(jrdev);
+ if (jrpriv->hwrng)
+ caam_rng_exit(jrdev->parent);
+
/*
* Return EBUSY if job ring already allocated.
*/
@@ -324,8 +336,8 @@ void caam_jr_free(struct device *rdev)
EXPORT_SYMBOL(caam_jr_free);
/**
- * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
- * -EBUSY if the queue is full, -EIO if it cannot map the caller's
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
+ * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
* descriptor.
* @dev: device of the job ring to be used. This device should have
* been assigned prior by caam_jr_register().
@@ -377,7 +389,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
- return -EBUSY;
+ return -ENOSPC;
}
head_entry = &jrp->entinfo[head];
@@ -414,7 +426,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
spin_unlock_bh(&jrp->inplock);
- return 0;
+ return -EINPROGRESS;
}
EXPORT_SYMBOL(caam_jr_enqueue);
@@ -505,7 +517,7 @@ static int caam_jr_probe(struct platform_device *pdev)
int error;
jrdev = &pdev->dev;
- jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
+ jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
if (!jrpriv)
return -ENOMEM;
@@ -538,6 +550,25 @@ static int caam_jr_probe(struct platform_device *pdev)
return error;
}
+ /* Initialize crypto engine */
+ jrpriv->engine = crypto_engine_alloc_init(jrdev, false);
+ if (!jrpriv->engine) {
+ dev_err(jrdev, "Could not init crypto-engine\n");
+ return -ENOMEM;
+ }
+
+ error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit,
+ jrdev);
+ if (error)
+ return error;
+
+ /* Start crypto engine */
+ error = crypto_engine_start(jrpriv->engine);
+ if (error) {
+ dev_err(jrdev, "Could not start crypto-engine\n");
+ return error;
+ }
+
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
if (!jrpriv->irq) {
@@ -562,7 +593,7 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
- register_algs(jrdev->parent);
+ register_algs(jrpriv, jrdev->parent);
return 0;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 5a851ddc48fb..b0e8a4939b4f 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -108,7 +108,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
- if (!ret) {
+ if (ret == -EINPROGRESS) {
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index dacf2fa4aa8e..b390b935db6d 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -4,7 +4,7 @@
* Queue Interface backend functionality
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017, 2019 NXP
+ * Copyright 2016-2017, 2019-2020 NXP
*/
#include <linux/cpumask.h>
@@ -124,8 +124,10 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
do {
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
- if (likely(!ret))
+ if (likely(!ret)) {
+ refcount_inc(&req->drv_ctx->refcnt);
return 0;
+ }
if (ret != -EBUSY)
break;
@@ -148,11 +150,6 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
fd = &msg->ern.fd;
- if (qm_fd_get_format(fd) != qm_fd_compound) {
- dev_err(qidev, "Non-compound FD from CAAM\n");
- return;
- }
-
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (!drv_req) {
dev_err(qidev,
@@ -160,6 +157,13 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
return;
}
+ refcount_dec(&drv_req->drv_ctx->refcnt);
+
+ if (qm_fd_get_format(fd) != qm_fd_compound) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
+ return;
+ }
+
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
@@ -287,9 +291,10 @@ empty_fq:
return ret;
}
-static int empty_caam_fq(struct qman_fq *fq)
+static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
{
int ret;
+ int retries = 10;
struct qm_mcr_queryfq_np np;
/* Wait till the older CAAM FQ get empty */
@@ -304,11 +309,18 @@ static int empty_caam_fq(struct qman_fq *fq)
msleep(20);
} while (1);
- /*
- * Give extra time for pending jobs from this FQ in holding tanks
- * to get processed
- */
- msleep(20);
+ /* Wait until pending jobs from this FQ are processed by CAAM */
+ do {
+ if (refcount_read(&drv_ctx->refcnt) == 1)
+ break;
+
+ msleep(20);
+ } while (--retries);
+
+ if (!retries)
+ dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n",
+ refcount_read(&drv_ctx->refcnt), fq->fqid);
+
return 0;
}
@@ -340,7 +352,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
drv_ctx->req_fq = new_fq;
/* Empty and remove the older FQ */
- ret = empty_caam_fq(old_fq);
+ ret = empty_caam_fq(old_fq, drv_ctx);
if (ret) {
dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
@@ -453,6 +465,9 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
return ERR_PTR(-ENOMEM);
}
+ /* init reference counter used to track references to request FQ */
+ refcount_set(&drv_ctx->refcnt, 1);
+
drv_ctx->qidev = qidev;
return drv_ctx;
}
@@ -571,6 +586,16 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
return qman_cb_dqrr_stop;
fd = &dqrr->fd;
+
+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
+ if (unlikely(!drv_req)) {
+ dev_err(qidev,
+ "Can't find original request for caam response\n");
+ return qman_cb_dqrr_consume;
+ }
+
+ refcount_dec(&drv_req->drv_ctx->refcnt);
+
status = be32_to_cpu(fd->status);
if (unlikely(status)) {
u32 ssrc = status & JRSTA_SSRC_MASK;
@@ -588,13 +613,6 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
return qman_cb_dqrr_consume;
}
- drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
- if (unlikely(!drv_req)) {
- dev_err(qidev,
- "Can't find original request for caam response\n");
- return qman_cb_dqrr_consume;
- }
-
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 848958951f68..5894f16f8fe3 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -3,7 +3,7 @@
* Public definitions for the CAAM/QI (Queue Interface) backend.
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2017, 2020 NXP
*/
#ifndef __QI_H__
@@ -52,6 +52,7 @@ enum optype {
* @context_a: shared descriptor dma address
* @req_fq: to-CAAM request frame queue
* @rsp_fq: from-CAAM response frame queue
+ * @refcnt: reference counter incremented for each frame enqueued in to-CAAM FQ
* @cpu: cpu on which to receive CAAM response
* @op_type: operation type
* @qidev: device pointer for CAAM/QI backend
@@ -62,6 +63,7 @@ struct caam_drv_ctx {
dma_addr_t context_a;
struct qman_fq *req_fq;
struct qman_fq *rsp_fq;
+ refcount_t refcnt;
int cpu;
enum optype op_type;
struct device *qidev;
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 05127b70527d..0f810bc13b2b 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -487,7 +487,8 @@ struct rngtst {
/* RNG4 TRNG test registers */
struct rng4tst {
-#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_ACC BIT(5) /* TRNG access mode */
+#define RTMCTL_PRGM BIT(16) /* 1 -> program mode, 0 -> run mode */
#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
both entropy shifter and
statistical checker */
@@ -523,9 +524,11 @@ struct rng4tst {
u32 rsvd1[40];
#define RDSTA_SKVT 0x80000000
#define RDSTA_SKVN 0x40000000
+#define RDSTA_PR0 BIT(4)
+#define RDSTA_PR1 BIT(5)
#define RDSTA_IF0 0x00000001
#define RDSTA_IF1 0x00000002
-#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
+#define RDSTA_MASK (RDSTA_PR1 | RDSTA_PR0 | RDSTA_IF1 | RDSTA_IF0)
u32 rdsta;
u32 rsvd2[15];
};
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index c4632d84c9a1..e91be9b8b083 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -71,7 +71,7 @@ struct ucode {
char version[VERSION_LEN - 1];
__be32 code_size;
u8 raz[12];
- u64 code[0];
+ u64 code[];
};
/**
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index e95e7aa5dbf1..ae7b44599914 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -215,6 +215,9 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
+
+ if (sp->clear_psp_master_device)
+ sp->clear_psp_master_device(sp);
}
void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e467860f797d..896f190b9a50 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -283,11 +283,11 @@ static int sev_get_platform_state(int *state, int *error)
return rc;
}
-static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
{
int state, rc;
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
/*
@@ -331,12 +331,12 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
return ret;
}
-static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
int rc;
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
if (sev->state == SEV_STATE_UNINIT) {
@@ -348,7 +348,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
return __sev_do_cmd_locked(cmd, NULL, &argp->error);
}
-static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
@@ -356,7 +356,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
void *blob = NULL;
int ret;
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
@@ -539,7 +539,7 @@ fw_err:
return ret;
}
-static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
@@ -547,7 +547,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
void *pek_blob, *oca_blob;
int ret;
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
@@ -698,7 +698,7 @@ static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
return ret;
}
-static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pdh_cert_export input;
@@ -708,7 +708,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
/* If platform is not in INIT state then transition it to INIT. */
if (sev->state != SEV_STATE_INIT) {
- if (!capable(CAP_SYS_ADMIN))
+ if (!writable)
return -EPERM;
ret = __sev_platform_init_locked(&argp->error);
@@ -801,6 +801,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
void __user *argp = (void __user *)arg;
struct sev_issue_cmd input;
int ret = -EFAULT;
+ bool writable = file->f_mode & FMODE_WRITE;
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
@@ -819,25 +820,25 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
switch (input.cmd) {
case SEV_FACTORY_RESET:
- ret = sev_ioctl_do_reset(&input);
+ ret = sev_ioctl_do_reset(&input, writable);
break;
case SEV_PLATFORM_STATUS:
ret = sev_ioctl_do_platform_status(&input);
break;
case SEV_PEK_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable);
break;
case SEV_PDH_GEN:
- ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable);
break;
case SEV_PEK_CSR:
- ret = sev_ioctl_do_pek_csr(&input);
+ ret = sev_ioctl_do_pek_csr(&input, writable);
break;
case SEV_PEK_CERT_IMPORT:
- ret = sev_ioctl_do_pek_import(&input);
+ ret = sev_ioctl_do_pek_import(&input, writable);
break;
case SEV_PDH_CERT_EXPORT:
- ret = sev_ioctl_do_pdh_export(&input);
+ ret = sev_ioctl_do_pdh_export(&input, writable);
break;
case SEV_GET_ID:
pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
@@ -896,9 +897,9 @@ EXPORT_SYMBOL_GPL(sev_guest_df_flush);
static void sev_exit(struct kref *ref)
{
- struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
-
misc_deregister(&misc_dev->misc);
+ kfree(misc_dev);
+ misc_dev = NULL;
}
static int sev_misc_init(struct sev_device *sev)
@@ -916,7 +917,7 @@ static int sev_misc_init(struct sev_device *sev)
if (!misc_dev) {
struct miscdevice *misc;
- misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
if (!misc_dev)
return -ENOMEM;
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 423594608ad1..f913f1494af9 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -90,6 +90,7 @@ struct sp_device {
/* get and set master device */
struct sp_device*(*get_psp_master_device)(void);
void (*set_psp_master_device)(struct sp_device *);
+ void (*clear_psp_master_device)(struct sp_device *);
bool irq_registered;
bool use_tasklet;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 56c1f61c0f84..cb6cb47053f4 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -146,6 +146,14 @@ static struct sp_device *psp_get_master(void)
return sp_dev_master;
}
+static void psp_clear_master(struct sp_device *sp)
+{
+ if (sp == sp_dev_master) {
+ sp_dev_master = NULL;
+ dev_dbg(sp->dev, "Cleared sp_dev_master\n");
+ }
+}
+
static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct sp_device *sp;
@@ -206,6 +214,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
sp->set_psp_master_device = psp_set_master;
sp->get_psp_master_device = psp_get_master;
+ sp->clear_psp_master_device = psp_clear_master;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 2fc0e0da790b..1cf51edbc4b9 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -6,8 +6,9 @@
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
-#include <crypto/internal/des.h>
+#include <crypto/gcm.h>
#include <linux/rtnetlink.h>
+#include <crypto/internal/des.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
#include "cc_aead.h"
@@ -26,7 +27,7 @@
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
struct cc_aead_handle {
- cc_sram_addr_t sram_workspace_addr;
+ u32 sram_workspace_addr;
struct list_head aead_list;
};
@@ -60,11 +61,6 @@ struct cc_aead_ctx {
enum drv_hash_mode auth_mode;
};
-static inline bool valid_assoclen(struct aead_request *req)
-{
- return ((req->assoclen == 16) || (req->assoclen == 20));
-}
-
static void cc_aead_exit(struct crypto_aead *tfm)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -417,7 +413,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
dma_addr_t key_dma_addr = 0;
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
+ u32 larval_addr;
struct cc_crypto_req cc_req = {};
unsigned int blocksize;
unsigned int digestsize;
@@ -448,8 +444,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
if (!key)
return -ENOMEM;
- key_dma_addr = dma_map_single(dev, (void *)key, keylen,
- DMA_TO_DEVICE);
+ key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
@@ -460,6 +455,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
/* Load hash initial state */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hashmode);
+ larval_addr = cc_larval_digest_addr(ctx->drvdata,
+ ctx->auth_mode);
set_din_sram(&desc[idx], larval_addr, digestsize);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
@@ -796,7 +793,7 @@ static void cc_proc_authen_desc(struct aead_request *areq,
* assoc. + iv + data -compact in one table
* if assoclen is ZERO only IV perform
*/
- cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+ u32 mlli_addr = areq_ctx->assoc.sram_addr;
u32 mlli_nents = areq_ctx->assoc.mlli_nents;
if (areq_ctx->is_single_pass) {
@@ -1170,7 +1167,7 @@ static void cc_mlli_to_sram(struct aead_request *req,
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
!req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
- (unsigned int)ctx->drvdata->mlli_sram_addr,
+ ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
/* Copy MLLI table host-to-sram */
hw_desc_init(&desc[*seq_size]);
@@ -1222,7 +1219,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
- /**
+ /*
* Single-pass flow
*/
cc_set_hmac_desc(req, desc, seq_size);
@@ -1234,7 +1231,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
return;
}
- /**
+ /*
* Double-pass flow
* Fallback for unsupported single-pass modes,
* i.e. using assoc. data of non-word-multiple
@@ -1275,7 +1272,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
- /**
+ /*
* Single-pass flow
*/
cc_set_xcbc_desc(req, desc, seq_size);
@@ -1286,7 +1283,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
return;
}
- /**
+ /*
* Double-pass flow
* Fallback for unsupported single-pass modes,
* i.e. using assoc. data of non-word-multiple
@@ -1611,7 +1608,6 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1799,12 +1795,6 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int cipher_flow_mode;
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- cipher_flow_mode = AES_and_HASH;
- } else { /* Encrypt */
- cipher_flow_mode = AES_to_HASH_and_DOUT;
- }
-
//in RFC4543 no data to encrypt. just copy data from src to dest.
if (req_ctx->plaintext_authenticate_only) {
cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
@@ -1816,6 +1806,12 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
return 0;
}
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_and_HASH;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ }
+
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
@@ -1870,8 +1866,7 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
- temp64 = cpu_to_be64((req_ctx->assoclen +
- GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1891,7 +1886,6 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1921,8 +1915,8 @@ static int cc_proc_aead(struct aead_request *req,
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_aead_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_aead_complete;
+ cc_req.user_arg = req;
/* Setup request context */
areq_ctx->gen_ctx.op_type = direct;
@@ -1989,7 +1983,6 @@ static int cc_proc_aead(struct aead_request *req,
/* Load MLLI tables to SRAM if necessary */
cc_mlli_to_sram(req, desc, &seq_len);
- /*TODO: move seq len by reference */
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
case DRV_HASH_SHA256:
@@ -2034,9 +2027,6 @@ static int cc_aead_encrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
- areq_ctx->is_gcm4543 = false;
-
- areq_ctx->plaintext_authenticate_only = false;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2050,22 +2040,17 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
/* Very similar to cc_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->assoclen = req->assoclen;
- areq_ctx->is_gcm4543 = true;
+ areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
cc_proc_rfc4309_ccm(req);
@@ -2086,9 +2071,6 @@ static int cc_aead_decrypt(struct aead_request *req)
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
areq_ctx->assoclen = req->assoclen;
- areq_ctx->is_gcm4543 = false;
-
- areq_ctx->plaintext_authenticate_only = false;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2099,24 +2081,19 @@ static int cc_aead_decrypt(struct aead_request *req)
static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->assoclen = req->assoclen;
+ areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
- areq_ctx->is_gcm4543 = true;
cc_proc_rfc4309_ccm(req);
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
@@ -2216,28 +2193,20 @@ static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to cc_aead_encrypt() above. */
-
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->assoclen = req->assoclen;
- areq_ctx->plaintext_authenticate_only = false;
+ areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
cc_proc_rfc4_gcm(req);
- areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2248,17 +2217,12 @@ out:
static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to cc_aead_encrypt() above. */
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
@@ -2270,7 +2234,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
areq_ctx->assoclen = req->assoclen;
cc_proc_rfc4_gcm(req);
- areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2281,28 +2244,20 @@ out:
static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to cc_aead_decrypt() above. */
-
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->assoclen = req->assoclen;
- areq_ctx->plaintext_authenticate_only = false;
+ areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
cc_proc_rfc4_gcm(req);
- areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2313,17 +2268,12 @@ out:
static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to cc_aead_decrypt() above. */
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc = -EINVAL;
+ int rc;
- if (!valid_assoclen(req)) {
- dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen);
+ rc = crypto_ipsec_check_assoclen(req->assoclen);
+ if (rc)
goto out;
- }
memset(areq_ctx, 0, sizeof(*areq_ctx));
@@ -2335,7 +2285,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
areq_ctx->assoclen = req->assoclen;
cc_proc_rfc4_gcm(req);
- areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2614,7 +2563,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
struct cc_crypto_alg *t_alg;
struct aead_alg *alg;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
@@ -2628,6 +2577,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_blocksize = tmpl->blocksize;
alg->init = cc_aead_init;
alg->exit = cc_aead_exit;
@@ -2643,19 +2593,12 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
int cc_aead_free(struct cc_drvdata *drvdata)
{
struct cc_crypto_alg *t_alg, *n;
- struct cc_aead_handle *aead_handle =
- (struct cc_aead_handle *)drvdata->aead_handle;
-
- if (aead_handle) {
- /* Remove registered algs */
- list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
- entry) {
- crypto_unregister_aead(&t_alg->aead_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
- }
- kfree(aead_handle);
- drvdata->aead_handle = NULL;
+ struct cc_aead_handle *aead_handle = drvdata->aead_handle;
+
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
+ crypto_unregister_aead(&t_alg->aead_alg);
+ list_del(&t_alg->entry);
}
return 0;
@@ -2669,7 +2612,7 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
int alg;
struct device *dev = drvdata_to_dev(drvdata);
- aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
+ aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
if (!aead_handle) {
rc = -ENOMEM;
goto fail0;
@@ -2682,7 +2625,6 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
MAX_HMAC_DIGEST_SIZE);
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
- dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto fail1;
}
@@ -2705,18 +2647,16 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->aead_alg.base.cra_driver_name);
- goto fail2;
- } else {
- list_add_tail(&t_alg->entry, &aead_handle->aead_list);
- dev_dbg(dev, "Registered %s\n",
- t_alg->aead_alg.base.cra_driver_name);
+ goto fail1;
}
+
+ list_add_tail(&t_alg->entry, &aead_handle->aead_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->aead_alg.base.cra_driver_name);
}
return 0;
-fail2:
- kfree(t_alg);
fail1:
cc_aead_free(drvdata);
fail0:
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index f12169b57f9d..b69591550730 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -66,7 +66,7 @@ struct aead_req_ctx {
/* used to prevent cache coherence problem */
u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /* store orig iv */
- u32 assoclen; /* internal assoclen */
+ u32 assoclen; /* size of AAD buffer to authenticate */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;
@@ -79,7 +79,6 @@ struct aead_req_ctx {
dma_addr_t gcm_iv_inc2_dma_addr;
dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
- bool is_gcm4543;
u8 *icv_virt_addr; /* Virt. address of ICV */
struct async_gen_req_ctx gen_ctx;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index a72586eccd81..b2bd093e7013 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -13,16 +13,6 @@
#include "cc_hash.h"
#include "cc_aead.h"
-enum dma_buffer_type {
- DMA_NULL_TYPE = -1,
- DMA_SGL_TYPE = 1,
- DMA_BUFF_TYPE = 2,
-};
-
-struct buff_mgr_handle {
- struct dma_pool *mlli_buffs_pool;
-};
-
union buffer_array_entry {
struct scatterlist *sgl;
dma_addr_t buffer_dma;
@@ -34,7 +24,6 @@ struct buffer_array {
unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
- enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
};
@@ -64,11 +53,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
enum cc_sg_cpy_direct dir)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- u32 skip = areq_ctx->assoclen + req->cryptlen;
-
- if (areq_ctx->is_gcm4543)
- skip += crypto_aead_ivsize(tfm);
+ u32 skip = req->assoclen + req->cryptlen;
cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
(skip - areq_ctx->req_authsize), skip, dir);
@@ -77,9 +62,13 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
/**
* cc_get_sgl_nents() - Get scatterlist number of entries.
*
+ * @dev: Device object
* @sg_list: SG list
* @nbytes: [IN] Total SGL data bytes.
* @lbytes: [OUT] Returns the amount of bytes at the last entry
+ *
+ * Return:
+ * Number of entries in the scatterlist
*/
static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list,
@@ -87,6 +76,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
{
unsigned int nents = 0;
+ *lbytes = 0;
+
while (nbytes && sg_list) {
nents++;
/* get the number of bytes in the last entry */
@@ -95,6 +86,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
nbytes : sg_list->length;
sg_list = sg_next(sg_list);
}
+
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
}
@@ -103,11 +95,13 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
* cc_copy_sg_portion() - Copy scatter list data,
* from to_skip to end, to dest and vice versa
*
- * @dest:
- * @sg:
- * @to_skip:
- * @end:
- * @direct:
+ * @dev: Device object
+ * @dest: Buffer to copy to/from
+ * @sg: SG list
+ * @to_skip: Number of bytes to skip before copying
+ * @end: Offset of last byte to copy
+ * @direct: Transfer direction (true == from SG list to buffer, false == from
+ * buffer to SG list)
*/
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
@@ -115,7 +109,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 nents;
nents = sg_nents_for_len(sg, end);
- sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+ sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@@ -204,21 +198,15 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
goto build_mlli_exit;
}
/* Point to start of MLLI */
- mlli_p = (u32 *)mlli_params->mlli_virt_addr;
+ mlli_p = mlli_params->mlli_virt_addr;
/* go over all SG's and link it to one MLLI table */
for (i = 0; i < sg_data->num_of_buffers; i++) {
union buffer_array_entry *entry = &sg_data->entry[i];
u32 tot_len = sg_data->total_data_len[i];
u32 offset = sg_data->offset[i];
- if (sg_data->type[i] == DMA_SGL_TYPE)
- rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
- offset, &total_nents,
- &mlli_p);
- else /*DMA_BUFF_TYPE*/
- rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
- tot_len, &total_nents,
- &mlli_p);
+ rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
+ &total_nents, &mlli_p);
if (rc)
return rc;
@@ -244,27 +232,6 @@ build_mlli_exit:
return rc;
}
-static void cc_add_buffer_entry(struct device *dev,
- struct buffer_array *sgl_data,
- dma_addr_t buffer_dma, unsigned int buffer_len,
- bool is_last_entry, u32 *mlli_nents)
-{
- unsigned int index = sgl_data->num_of_buffers;
-
- dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
- index, &buffer_dma, buffer_len, is_last_entry);
- sgl_data->nents[index] = 1;
- sgl_data->entry[index].buffer_dma = buffer_dma;
- sgl_data->offset[index] = 0;
- sgl_data->total_data_len[index] = buffer_len;
- sgl_data->type[index] = DMA_BUFF_TYPE;
- sgl_data->is_last[index] = is_last_entry;
- sgl_data->mlli_nents[index] = mlli_nents;
- if (sgl_data->mlli_nents[index])
- *sgl_data->mlli_nents[index] = 0;
- sgl_data->num_of_buffers++;
-}
-
static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
unsigned int nents, struct scatterlist *sgl,
unsigned int data_len, unsigned int data_offset,
@@ -278,7 +245,6 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->entry[index].sgl = sgl;
sgl_data->offset[index] = data_offset;
sgl_data->total_data_len[index] = data_len;
- sgl_data->type[index] = DMA_SGL_TYPE;
sgl_data->is_last[index] = is_last_table;
sgl_data->mlli_nents[index] = mlli_nents;
if (sgl_data->mlli_nents[index])
@@ -290,37 +256,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
- if (sg_is_last(sg)) {
- /* One entry only case -set to DLLI */
- if (dma_map_sg(dev, sg, 1, direction) != 1) {
- dev_err(dev, "dma_map_sg() single buffer failed\n");
- return -ENOMEM;
- }
- dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
- &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
- sg->offset, sg->length);
- *lbytes = nbytes;
- *nents = 1;
- *mapped_nents = 1;
- } else { /*sg_is_last*/
- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
- if (*nents > max_sg_nents) {
- *nents = 0;
- dev_err(dev, "Too many fragments. current %d max %d\n",
- *nents, max_sg_nents);
- return -ENOMEM;
- }
- /* In case of mmu the number of mapped nents might
- * be changed from the original sgl nents
- */
- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (*mapped_nents == 0) {
- *nents = 0;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
+ int ret = 0;
+
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
+ if (*nents > max_sg_nents) {
+ *nents = 0;
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ *nents, max_sg_nents);
+ return -ENOMEM;
+ }
+
+ ret = dma_map_sg(dev, sg, *nents, direction);
+ if (dma_mapping_error(dev, ret)) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
+ return -ENOMEM;
}
+ *mapped_nents = ret;
+
return 0;
}
@@ -411,7 +365,6 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
{
struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
struct mlli_params *mlli_params = &req_ctx->mlli_params;
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
u32 dummy = 0;
@@ -424,10 +377,9 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
/* Map IV buffer */
if (ivsize) {
- dump_byte_array("iv", (u8 *)info, ivsize);
+ dump_byte_array("iv", info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
- dma_map_single(dev, (void *)info,
- ivsize, DMA_BIDIRECTIONAL);
+ dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
@@ -476,7 +428,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
}
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
if (rc)
goto cipher_exit;
@@ -555,11 +507,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
+ DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -614,18 +567,6 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
- // TODO: what about CTR?? ask Ron
- if (do_chain && areq_ctx->plaintext_authenticate_only) {
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
- unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
- /* Chain to given list */
- cc_add_buffer_entry(dev, sg_data,
- (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
- iv_size_to_authenc, is_last,
- &areq_ctx->assoc.mlli_nents);
- areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
- }
chain_iv_exit:
return rc;
@@ -639,13 +580,8 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
int mapped_nents = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
- if (areq_ctx->is_gcm4543)
- size_of_assoc += crypto_aead_ivsize(tfm);
-
if (!sg_data) {
rc = -EINVAL;
goto chain_assoc_exit;
@@ -661,7 +597,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
+ mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
if (mapped_nents < 0)
return mapped_nents;
@@ -854,16 +790,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
- unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int size_for_map = req->assoclen + req->cryptlen;
u32 sg_index = 0;
- bool is_gcm4543 = areq_ctx->is_gcm4543;
- u32 size_to_skip = areq_ctx->assoclen;
+ u32 size_to_skip = req->assoclen;
struct scatterlist *sgl;
- if (is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
-
offset = size_to_skip;
if (!sg_data)
@@ -872,16 +803,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_sgl = req->src;
areq_ctx->dst_sgl = req->dst;
- if (is_gcm4543)
- size_for_map += crypto_aead_ivsize(tfm);
-
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
&src_last_bytes);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
- while (sg_index <= size_to_skip) {
+ while (src_mapped_nents && (sg_index <= size_to_skip)) {
src_mapped_nents--;
offset -= areq_ctx->src_sgl->length;
sgl = sg_next(areq_ctx->src_sgl);
@@ -901,14 +829,15 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
- size_for_map = areq_ctx->assoclen + req->cryptlen;
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
- authsize : 0;
- if (is_gcm4543)
- size_for_map += crypto_aead_ivsize(tfm);
+ size_for_map = req->assoclen + req->cryptlen;
+
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_for_map += authsize;
+ else
+ size_for_map -= authsize;
rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
- &areq_ctx->dst.nents,
+ &areq_ctx->dst.mapped_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
if (rc)
@@ -921,7 +850,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
offset = size_to_skip;
//check where the data starts
- while (sg_index <= size_to_skip) {
+ while (dst_mapped_nents && sg_index <= size_to_skip) {
dst_mapped_nents--;
offset -= areq_ctx->dst_sgl->length;
sgl = sg_next(areq_ctx->dst_sgl);
@@ -1012,14 +941,11 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
unsigned int authsize = areq_ctx->req_authsize;
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
int rc = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- bool is_gcm4543 = areq_ctx->is_gcm4543;
dma_addr_t dma_addr;
u32 mapped_nents = 0;
u32 dummy = 0; /*used for the assoc data fragments */
- u32 size_to_map = 0;
+ u32 size_to_map;
gfp_t flags = cc_gfp_flags(&req->base);
mlli_params->curr_pool = NULL;
@@ -1116,14 +1042,15 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
- size_to_map = req->cryptlen + areq_ctx->assoclen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_map = req->cryptlen + req->assoclen;
+ /* If we do in-place encryption, we also need the auth tag */
+ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
+ (req->src == req->dst)) {
size_to_map += authsize;
+ }
- if (is_gcm4543)
- size_to_map += crypto_aead_ivsize(tfm);
rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
- &areq_ctx->src.nents,
+ &areq_ctx->src.mapped_nents,
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
@@ -1183,7 +1110,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
*/
if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
if (rc)
goto aead_map_failure;
@@ -1211,7 +1138,6 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1229,7 +1155,6 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
return 0;
}
- /*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
@@ -1258,7 +1183,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
/*build mlli */
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
@@ -1296,7 +1221,6 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
unsigned int update_data_len;
u32 total_in_len = nbytes + *curr_buff_cnt;
struct buffer_array sg_data;
- struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
int rc = 0;
u32 dummy = 0;
@@ -1371,7 +1295,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
}
if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
- mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ mlli_params->curr_pool = drvdata->mlli_buffs_pool;
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
@@ -1438,39 +1362,22 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
{
- struct buff_mgr_handle *buff_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
- buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
- if (!buff_mgr_handle)
- return -ENOMEM;
-
- drvdata->buff_mgr_handle = buff_mgr_handle;
-
- buff_mgr_handle->mlli_buffs_pool =
+ drvdata->mlli_buffs_pool =
dma_pool_create("dx_single_mlli_tables", dev,
MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
LLI_ENTRY_BYTE_SIZE,
MLLI_TABLE_MIN_ALIGNMENT, 0);
- if (!buff_mgr_handle->mlli_buffs_pool)
- goto error;
+ if (!drvdata->mlli_buffs_pool)
+ return -ENOMEM;
return 0;
-
-error:
- cc_buffer_mgr_fini(drvdata);
- return -ENOMEM;
}
int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
{
- struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
-
- if (buff_mgr_handle) {
- dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
- kfree(drvdata->buff_mgr_handle);
- drvdata->buff_mgr_handle = NULL;
- }
+ dma_pool_destroy(drvdata->mlli_buffs_pool);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index af434872c6ff..653441b6542e 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -24,14 +24,15 @@ enum cc_sg_cpy_direct {
};
struct cc_mlli {
- cc_sram_addr_t sram_addr;
+ u32 sram_addr;
+ unsigned int mapped_nents;
unsigned int nents; //sg nents
unsigned int mlli_nents; //mlli nents might be different than the above
};
struct mlli_params {
struct dma_pool *curr_pool;
- u8 *mlli_virt_addr;
+ void *mlli_virt_addr;
dma_addr_t mlli_dma_addr;
u32 mlli_len;
};
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 7d6252d892d7..a84335328f37 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -20,10 +20,6 @@
#define template_skcipher template_u.skcipher
-struct cc_cipher_handle {
- struct list_head alg_list;
-};
-
struct cc_user_key_info {
u8 *key;
dma_addr_t key_dma_addr;
@@ -184,7 +180,7 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
ctx_p->user.key);
/* Map key buffer */
- ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+ ctx_p->user.key_dma_addr = dma_map_single(dev, ctx_p->user.key,
max_key_buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
@@ -284,7 +280,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n",
ctx_p, crypto_tfm_alg_name(tfm), keylen);
- dump_byte_array("key", (u8 *)key, keylen);
+ dump_byte_array("key", key, keylen);
/* STAT_PHASE_0: Init and sanity checks */
@@ -387,7 +383,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
ctx_p, crypto_tfm_alg_name(tfm), keylen);
- dump_byte_array("key", (u8 *)key, keylen);
+ dump_byte_array("key", key, keylen);
/* STAT_PHASE_0: Init and sanity checks */
@@ -533,14 +529,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
- unsigned int du_size = nbytes;
-
- struct cc_crypto_alg *cc_alg =
- container_of(tfm->__crt_alg, struct cc_crypto_alg,
- skcipher_alg.base);
-
- if (cc_alg->data_unit)
- du_size = cc_alg->data_unit;
switch (cipher_mode) {
case DRV_CIPHER_ECB:
@@ -753,7 +741,7 @@ static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
&req_ctx->mlli_params.mlli_dma_addr,
req_ctx->mlli_params.mlli_len,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ ctx_p->drvdata->mlli_sram_addr);
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI,
req_ctx->mlli_params.mlli_dma_addr,
@@ -801,16 +789,16 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm,
req_ctx->in_mlli_nents, NS_BIT);
if (req_ctx->out_nents == 0) {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
- (unsigned int)ctx_p->drvdata->mlli_sram_addr,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ ctx_p->drvdata->mlli_sram_addr,
+ ctx_p->drvdata->mlli_sram_addr);
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
(!last_desc ? 0 : 1));
} else {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
- (unsigned int)ctx_p->drvdata->mlli_sram_addr,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr +
+ ctx_p->drvdata->mlli_sram_addr,
+ ctx_p->drvdata->mlli_sram_addr +
(u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
set_dout_mlli(&desc[*seq_size],
(ctx_p->drvdata->mlli_sram_addr +
@@ -871,7 +859,6 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_0: Init and sanity checks */
- /* TODO: check data length according to mode */
if (validate_data_size(ctx_p, nbytes)) {
dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
rc = -EINVAL;
@@ -893,8 +880,8 @@ static int cc_cipher_process(struct skcipher_request *req,
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_cipher_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_cipher_complete;
+ cc_req.user_arg = req;
/* Setup CPP operation details */
if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
@@ -1228,6 +1215,10 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
+ /* See https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg40576.html
+ * for the reason why this differs from the generic
+ * implementation.
+ */
.name = "xts(aes)",
.driver_name = "xts-aes-ccree",
.blocksize = 1,
@@ -1423,7 +1414,7 @@ static const struct cc_alg_template skcipher_algs[] = {
{
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccree",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1576,7 +1567,7 @@ static const struct cc_alg_template skcipher_algs[] = {
{
.name = "ctr(sm4)",
.driver_name = "ctr-sm4-ccree",
- .blocksize = SM4_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1634,7 +1625,7 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
struct cc_crypto_alg *t_alg;
struct skcipher_alg *alg;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
@@ -1665,36 +1656,23 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
int cc_cipher_free(struct cc_drvdata *drvdata)
{
struct cc_crypto_alg *t_alg, *n;
- struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle;
-
- if (cipher_handle) {
- /* Remove registered algs */
- list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list,
- entry) {
- crypto_unregister_skcipher(&t_alg->skcipher_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
- }
- kfree(cipher_handle);
- drvdata->cipher_handle = NULL;
+
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &drvdata->alg_list, entry) {
+ crypto_unregister_skcipher(&t_alg->skcipher_alg);
+ list_del(&t_alg->entry);
}
return 0;
}
int cc_cipher_alloc(struct cc_drvdata *drvdata)
{
- struct cc_cipher_handle *cipher_handle;
struct cc_crypto_alg *t_alg;
struct device *dev = drvdata_to_dev(drvdata);
int rc = -ENOMEM;
int alg;
- cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL);
- if (!cipher_handle)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&cipher_handle->alg_list);
- drvdata->cipher_handle = cipher_handle;
+ INIT_LIST_HEAD(&drvdata->alg_list);
/* Linux crypto */
dev_dbg(dev, "Number of algorithms = %zu\n",
@@ -1723,14 +1701,12 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->skcipher_alg.base.cra_driver_name);
- kfree(t_alg);
goto fail0;
- } else {
- list_add_tail(&t_alg->entry,
- &cipher_handle->alg_list);
- dev_dbg(dev, "Registered %s\n",
- t_alg->skcipher_alg.base.cra_driver_name);
}
+
+ list_add_tail(&t_alg->entry, &drvdata->alg_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->skcipher_alg.base.cra_driver_name);
}
return 0;
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 566999738698..c454afce7781 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -8,10 +8,6 @@
#include "cc_crypto_ctx.h"
#include "cc_debugfs.h"
-struct cc_debugfs_ctx {
- struct dentry *dir;
-};
-
#define CC_DEBUG_REG(_X) { \
.name = __stringify(_X),\
.offset = CC_REG(_X) \
@@ -67,13 +63,8 @@ void __exit cc_debugfs_global_fini(void)
int cc_debugfs_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- struct cc_debugfs_ctx *ctx;
struct debugfs_regset32 *regset, *verset;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
return -ENOMEM;
@@ -81,16 +72,18 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
regset->regs = debug_regs;
regset->nregs = ARRAY_SIZE(debug_regs);
regset->base = drvdata->cc_base;
+ regset->dev = dev;
- ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
+ drvdata->dir = debugfs_create_dir(drvdata->plat_dev->name,
+ cc_debugfs_dir);
- debugfs_create_regset32("regs", 0400, ctx->dir, regset);
- debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
+ debugfs_create_regset32("regs", 0400, drvdata->dir, regset);
+ debugfs_create_bool("coherent", 0400, drvdata->dir, &drvdata->coherent);
verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
/* Failing here is not important enough to fail the module load */
if (!verset)
- goto out;
+ return 0;
if (drvdata->hw_rev <= CC_HW_REV_712) {
ver_sig_regs[0].offset = drvdata->sig_offset;
@@ -102,17 +95,13 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
verset->nregs = ARRAY_SIZE(pid_cid_regs);
}
verset->base = drvdata->cc_base;
+ verset->dev = dev;
- debugfs_create_regset32("version", 0400, ctx->dir, verset);
-
-out:
- drvdata->debugfs = ctx;
+ debugfs_create_regset32("version", 0400, drvdata->dir, verset);
return 0;
}
void cc_debugfs_fini(struct cc_drvdata *drvdata)
{
- struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
-
- debugfs_remove_recursive(ctx->dir);
+ debugfs_remove_recursive(drvdata->dir);
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 532bc95a8373..2d50991b9a17 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -14,6 +14,8 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include "cc_driver.h"
#include "cc_request_mgr.h"
@@ -134,7 +136,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
/* if driver suspended return, probably shared interrupt */
- if (cc_pm_is_dev_suspended(dev))
+ if (pm_runtime_suspended(dev))
return IRQ_NONE;
/* read the interrupt status */
@@ -269,7 +271,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
u32 val, hw_rev_pidr, sig_cidr;
u64 dma_mask;
const struct cc_hw_data *hw_rev;
- const struct of_device_id *dev_id;
struct clk *clk;
int irq;
int rc = 0;
@@ -278,11 +279,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
if (!new_drvdata)
return -ENOMEM;
- dev_id = of_match_node(arm_ccree_dev_of_match, np);
- if (!dev_id)
- return -ENODEV;
-
- hw_rev = (struct cc_hw_data *)dev_id->data;
+ hw_rev = of_device_get_match_data(dev);
new_drvdata->hw_rev_name = hw_rev->name;
new_drvdata->hw_rev = hw_rev->rev;
new_drvdata->std_bodies = hw_rev->std_bodies;
@@ -302,22 +299,12 @@ static int init_cc_resources(struct platform_device *plat_dev)
platform_set_drvdata(plat_dev, new_drvdata);
new_drvdata->plat_dev = plat_dev;
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk))
- switch (PTR_ERR(clk)) {
- /* Clock is optional so this might be fine */
- case -ENOENT:
- break;
-
- /* Clock not available, let's try again soon */
- case -EPROBE_DEFER:
- return -EPROBE_DEFER;
-
- default:
- dev_err(dev, "Error getting clock: %ld\n",
- PTR_ERR(clk));
- return PTR_ERR(clk);
- }
+ clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "Error getting clock: %pe\n", clk);
+ return PTR_ERR(clk);
+ }
new_drvdata->clk = clk;
new_drvdata->coherent = of_dma_is_coherent(np);
@@ -344,13 +331,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
init_completion(&new_drvdata->hw_queue_avail);
- if (!plat_dev->dev.dma_mask)
- plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
while (dma_mask > 0x7fffffffUL) {
- if (dma_supported(&plat_dev->dev, dma_mask)) {
- rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
+ if (dma_supported(dev, dma_mask)) {
+ rc = dma_set_coherent_mask(dev, dma_mask);
if (!rc)
break;
}
@@ -362,7 +349,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
return rc;
}
- rc = cc_clk_on(new_drvdata);
+ rc = clk_prepare_enable(new_drvdata->clk);
if (rc) {
dev_err(dev, "Failed to enable clock");
return rc;
@@ -370,7 +357,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->sec_disabled = cc_sec_disable;
- /* wait for Crytpcell reset completion */
+ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc);
+ goto post_pm_err;
+ }
+
+ /* Wait for Cryptocell reset completion */
if (!cc_wait_for_reset_completion(new_drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
}
@@ -382,7 +379,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
val, hw_rev->sig);
rc = -EINVAL;
- goto post_clk_err;
+ goto post_pm_err;
}
sig_cidr = val;
hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
@@ -393,7 +390,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
val, hw_rev->pidr_0124);
rc = -EINVAL;
- goto post_clk_err;
+ goto post_pm_err;
}
hw_rev_pidr = val;
@@ -402,7 +399,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
val, hw_rev->cidr_0123);
rc = -EINVAL;
- goto post_clk_err;
+ goto post_pm_err;
}
sig_cidr = val;
@@ -421,7 +418,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
default:
dev_err(dev, "Unsupported engines configuration.\n");
rc = -EINVAL;
- goto post_clk_err;
+ goto post_pm_err;
}
/* Check security disable state */
@@ -447,14 +444,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata);
if (rc) {
dev_err(dev, "Could not register to interrupt %d\n", irq);
- goto post_clk_err;
+ goto post_pm_err;
}
dev_dbg(dev, "Registered to IRQ: %d\n", irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
dev_err(dev, "init_cc_regs failed\n");
- goto post_clk_err;
+ goto post_pm_err;
}
rc = cc_debugfs_init(new_drvdata);
@@ -477,15 +474,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->mlli_sram_addr =
cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
- dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
- goto post_sram_mgr_err;
+ goto post_fips_init_err;
}
rc = cc_req_mgr_init(new_drvdata);
if (rc) {
dev_err(dev, "cc_req_mgr_init failed\n");
- goto post_sram_mgr_err;
+ goto post_fips_init_err;
}
rc = cc_buffer_mgr_init(new_drvdata);
@@ -494,12 +490,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_req_mgr_err;
}
- rc = cc_pm_init(new_drvdata);
- if (rc) {
- dev_err(dev, "cc_pm_init failed\n");
- goto post_buf_mgr_err;
- }
-
/* Allocate crypto algs */
rc = cc_cipher_alloc(new_drvdata);
if (rc) {
@@ -520,15 +510,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_hash_err;
}
- /* All set, we can allow autosuspend */
- cc_pm_go(new_drvdata);
-
/* If we got here and FIPS mode is enabled
* it means all FIPS test passed, so let TEE
* know we're good.
*/
cc_set_ree_fips_status(new_drvdata, true);
+ pm_runtime_put(dev);
return 0;
post_hash_err:
@@ -539,16 +527,17 @@ post_buf_mgr_err:
cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
cc_req_mgr_fini(new_drvdata);
-post_sram_mgr_err:
- cc_sram_mgr_fini(new_drvdata);
post_fips_init_err:
cc_fips_fini(new_drvdata);
post_debugfs_err:
cc_debugfs_fini(new_drvdata);
post_regs_err:
fini_cc_regs(new_drvdata);
-post_clk_err:
- cc_clk_off(new_drvdata);
+post_pm_err:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ clk_disable_unprepare(new_drvdata->clk);
return rc;
}
@@ -560,36 +549,22 @@ void fini_cc_regs(struct cc_drvdata *drvdata)
static void cleanup_cc_resources(struct platform_device *plat_dev)
{
+ struct device *dev = &plat_dev->dev;
struct cc_drvdata *drvdata =
(struct cc_drvdata *)platform_get_drvdata(plat_dev);
cc_aead_free(drvdata);
cc_hash_free(drvdata);
cc_cipher_free(drvdata);
- cc_pm_fini(drvdata);
cc_buffer_mgr_fini(drvdata);
cc_req_mgr_fini(drvdata);
- cc_sram_mgr_fini(drvdata);
cc_fips_fini(drvdata);
cc_debugfs_fini(drvdata);
fini_cc_regs(drvdata);
- cc_clk_off(drvdata);
-}
-
-int cc_clk_on(struct cc_drvdata *drvdata)
-{
- struct clk *clk = drvdata->clk;
- int rc;
-
- if (IS_ERR(clk))
- /* Not all devices have a clock associated with CCREE */
- return 0;
-
- rc = clk_prepare_enable(clk);
- if (rc)
- return rc;
-
- return 0;
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ clk_disable_unprepare(drvdata->clk);
}
unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
@@ -600,17 +575,6 @@ unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
return HASH_LEN_SIZE_630;
}
-void cc_clk_off(struct cc_drvdata *drvdata)
-{
- struct clk *clk = drvdata->clk;
-
- if (IS_ERR(clk))
- /* Not all devices have a clock associated with CCREE */
- return;
-
- clk_disable_unprepare(clk);
-}
-
static int ccree_probe(struct platform_device *plat_dev)
{
int rc;
@@ -653,7 +617,6 @@ static struct platform_driver ccree_driver = {
static int __init ccree_init(void)
{
- cc_hash_global_init();
cc_debugfs_global_init();
return platform_driver_register(&ccree_driver);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index c227718ba992..d938886390d2 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -26,7 +26,6 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
-/* Registers definitions from shared/hw/ree_include */
#include "cc_host_regs.h"
#include "cc_crypto_ctx.h"
#include "cc_hw_queue_defs.h"
@@ -71,9 +70,7 @@ enum cc_std_body {
#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
-#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
- CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
- CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+#define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE)
#define CC_CPP_AES_ABORT_MASK ( \
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
@@ -139,15 +136,15 @@ struct cc_drvdata {
int irq;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
- cc_sram_addr_t mlli_sram_addr;
- void *buff_mgr_handle;
- void *cipher_handle;
+ u32 mlli_sram_addr;
+ struct dma_pool *mlli_buffs_pool;
+ struct list_head alg_list;
void *hash_handle;
void *aead_handle;
void *request_mgr_handle;
void *fips_handle;
- void *sram_mgr_handle;
- void *debugfs;
+ u32 sram_free_offset; /* offset to non-allocated area in SRAM */
+ struct dentry *dir; /* for debugfs */
struct clk *clk;
bool coherent;
char *hw_rev_name;
@@ -158,7 +155,6 @@ struct cc_drvdata {
int std_bodies;
bool sec_disabled;
u32 comp_mask;
- bool pm_on;
};
struct cc_crypto_alg {
@@ -212,8 +208,6 @@ static inline void dump_byte_array(const char *name, const u8 *the_array,
bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
void fini_cc_regs(struct cc_drvdata *drvdata);
-int cc_clk_on(struct cc_drvdata *drvdata);
-void cc_clk_off(struct cc_drvdata *drvdata);
unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 912e5ce5079d..d5310783af15 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -20,8 +20,8 @@
#define CC_SM3_HASH_LEN_SIZE 8
struct cc_hash_handle {
- cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
- cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+ u32 digest_len_sram_addr; /* const value in SRAM*/
+ u32 larval_digest_sram_addr; /* const value in SRAM */
struct list_head hash_list;
};
@@ -39,12 +39,19 @@ static const u32 cc_sha256_init[] = {
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
static const u32 cc_digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static u64 cc_sha384_init[] = {
- SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
- SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static u64 cc_sha512_init[] = {
- SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
- SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+
+/*
+ * Due to the way the HW works, every double word in the SHA384 and SHA512
+ * larval hashes must be stored in hi/lo order
+ */
+#define hilo(x) upper_32_bits(x), lower_32_bits(x)
+static const u32 cc_sha384_init[] = {
+ hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
+ hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
+static const u32 cc_sha512_init[] = {
+ hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
+ hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
+
static const u32 cc_sm3_init[] = {
SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
@@ -342,7 +349,6 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
/* Get final MAC result */
hw_desc_init(&desc[idx]);
set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
- /* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
@@ -422,8 +428,7 @@ static int cc_hash_digest(struct ahash_request *req)
bool is_hmac = ctx->is_hmac;
struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- cc_sram_addr_t larval_digest_addr =
- cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+ u32 larval_digest_addr;
int idx = 0;
int rc = 0;
gfp_t flags = cc_gfp_flags(&req->base);
@@ -465,6 +470,8 @@ static int cc_hash_digest(struct ahash_request *req)
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
} else {
+ larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
+ ctx->hash_mode);
set_din_sram(&desc[idx], larval_digest_addr,
ctx->inter_digestsize);
}
@@ -726,7 +733,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
int digestsize = 0;
int i, idx = 0, rc = 0;
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- cc_sram_addr_t larval_addr;
+ u32 larval_addr;
struct device *dev;
ctx = crypto_ahash_ctx(ahash);
@@ -752,7 +759,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
return -ENOMEM;
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+ dma_map_single(dev, ctx->key_params.key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
@@ -1067,8 +1074,8 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
ctx->key_params.keylen = 0;
ctx->digest_buff_dma_addr =
- dma_map_single(dev, (void *)ctx->digest_buff,
- sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
@@ -1079,7 +1086,7 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
&ctx->digest_buff_dma_addr);
ctx->opad_tmp_keys_dma_addr =
- dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+ dma_map_single(dev, ctx->opad_tmp_keys_buff,
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
@@ -1196,8 +1203,8 @@ static int cc_mac_update(struct ahash_request *req)
idx++;
/* Setup request structure */
- cc_req.user_cb = (void *)cc_update_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
if (rc != -EINPROGRESS && rc != -EBUSY) {
@@ -1254,8 +1261,8 @@ static int cc_mac_final(struct ahash_request *req)
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_hash_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
if (state->xcbc_count && rem_cnt == 0) {
/* Load key for ECB decryption */
@@ -1311,7 +1318,6 @@ static int cc_mac_final(struct ahash_request *req)
/* Get final MAC result */
hw_desc_init(&desc[idx]);
- /* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
digestsize, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
@@ -1369,8 +1375,8 @@ static int cc_mac_finup(struct ahash_request *req)
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_hash_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
@@ -1393,7 +1399,6 @@ static int cc_mac_finup(struct ahash_request *req)
/* Get final MAC result */
hw_desc_init(&desc[idx]);
- /* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
digestsize, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
@@ -1448,8 +1453,8 @@ static int cc_mac_digest(struct ahash_request *req)
}
/* Setup request structure */
- cc_req.user_cb = (void *)cc_digest_complete;
- cc_req.user_arg = (void *)req;
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
@@ -1820,7 +1825,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
struct crypto_alg *alg;
struct ahash_alg *halg;
- t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
+ t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
if (!t_crypto_alg)
return ERR_PTR(-ENOMEM);
@@ -1857,104 +1862,85 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
return t_crypto_alg;
}
+static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
+ unsigned int size, u32 *sram_buff_ofs)
+{
+ struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ unsigned int larval_seq_len = 0;
+ int rc;
+
+ cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ return rc;
+
+ *sram_buff_ofs += size;
+ return 0;
+}
+
int cc_init_hash_sram(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
- cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
- unsigned int larval_seq_len = 0;
- struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
int rc = 0;
/* Copy-to-sram digest-len */
- cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs,
- ARRAY_SIZE(cc_digest_len_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
+ sizeof(cc_digest_len_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_digest_len_init);
- larval_seq_len = 0;
-
if (large_sha_supported) {
/* Copy-to-sram digest-len for sha384/512 */
- cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs,
- ARRAY_SIZE(cc_digest_len_sha512_init),
- larval_seq, &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
+ sizeof(cc_digest_len_sha512_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
-
- sram_buff_ofs += sizeof(cc_digest_len_sha512_init);
- larval_seq_len = 0;
}
/* The initial digests offset */
hash_handle->larval_digest_sram_addr = sram_buff_ofs;
/* Copy-to-sram initial SHA* digests */
- cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init),
- larval_seq, &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_md5_init);
- larval_seq_len = 0;
- cc_set_sram_desc(cc_sha1_init, sram_buff_ofs,
- ARRAY_SIZE(cc_sha1_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sha1_init);
- larval_seq_len = 0;
- cc_set_sram_desc(cc_sha224_init, sram_buff_ofs,
- ARRAY_SIZE(cc_sha224_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sha224_init);
- larval_seq_len = 0;
- cc_set_sram_desc(cc_sha256_init, sram_buff_ofs,
- ARRAY_SIZE(cc_sha256_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
+ &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sha256_init);
- larval_seq_len = 0;
if (sm3_supported) {
- cc_set_sram_desc(cc_sm3_init, sram_buff_ofs,
- ARRAY_SIZE(cc_sm3_init), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sm3_init,
+ sizeof(cc_sm3_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sm3_init);
- larval_seq_len = 0;
}
if (large_sha_supported) {
- cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
- (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha384_init,
+ sizeof(cc_sha384_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(cc_sha384_init);
- larval_seq_len = 0;
- cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
- (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
- &larval_seq_len);
- rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ rc = cc_init_copy_sram(drvdata, cc_sha512_init,
+ sizeof(cc_sha512_init), &sram_buff_ofs);
if (rc)
goto init_digest_const_err;
}
@@ -1963,38 +1949,16 @@ init_digest_const_err:
return rc;
}
-static void __init cc_swap_dwords(u32 *buf, unsigned long size)
-{
- int i;
- u32 tmp;
-
- for (i = 0; i < size; i += 2) {
- tmp = buf[i];
- buf[i] = buf[i + 1];
- buf[i + 1] = tmp;
- }
-}
-
-/*
- * Due to the way the HW works we need to swap every
- * double word in the SHA384 and SHA512 larval hashes
- */
-void __init cc_hash_global_init(void)
-{
- cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
- cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
-}
-
int cc_hash_alloc(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle;
- cc_sram_addr_t sram_buff;
+ u32 sram_buff;
u32 sram_size_to_alloc;
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
int alg;
- hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
+ hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
if (!hash_handle)
return -ENOMEM;
@@ -2016,7 +1980,6 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
- dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto fail;
}
@@ -2056,12 +2019,10 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
- kfree(t_alg);
goto fail;
- } else {
- list_add_tail(&t_alg->entry,
- &hash_handle->hash_list);
}
+
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
if (hw_mode == DRV_CIPHER_XCBC_MAC ||
hw_mode == DRV_CIPHER_CMAC)
@@ -2081,18 +2042,16 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
- kfree(t_alg);
goto fail;
- } else {
- list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
+
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
}
return 0;
fail:
- kfree(drvdata->hash_handle);
- drvdata->hash_handle = NULL;
+ cc_hash_free(drvdata);
return rc;
}
@@ -2101,17 +2060,12 @@ int cc_hash_free(struct cc_drvdata *drvdata)
struct cc_hash_alg *t_hash_alg, *hash_n;
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
- if (hash_handle) {
- list_for_each_entry_safe(t_hash_alg, hash_n,
- &hash_handle->hash_list, entry) {
- crypto_unregister_ahash(&t_hash_alg->ahash_alg);
- list_del(&t_hash_alg->entry);
- kfree(t_hash_alg);
- }
-
- kfree(hash_handle);
- drvdata->hash_handle = NULL;
+ list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
+ entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
}
+
return 0;
}
@@ -2272,22 +2226,23 @@ static const void *cc_larval_digest(struct device *dev, u32 mode)
}
}
-/*!
- * Gets the address of the initial digest in SRAM
+/**
+ * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
* according to the given hash mode
*
- * \param drvdata
- * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
*
- * \return u32 The address of the initial digest in SRAM
+ * Return:
+ * The address of the initial digest in SRAM
*/
-cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
+u32 cc_larval_digest_addr(void *drvdata, u32 mode)
{
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
- cc_sram_addr_t addr;
+ u32 addr;
switch (mode) {
case DRV_HASH_NULL:
@@ -2339,12 +2294,11 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
return hash_handle->larval_digest_sram_addr;
}
-cc_sram_addr_t
-cc_digest_len_addr(void *drvdata, u32 mode)
+u32 cc_digest_len_addr(void *drvdata, u32 mode)
{
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
- cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+ u32 digest_len_addr = hash_handle->digest_len_sram_addr;
switch (mode) {
case DRV_HASH_SHA1:
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 0d6dc61484d7..3d0f2179e07e 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -80,30 +80,27 @@ int cc_hash_alloc(struct cc_drvdata *drvdata);
int cc_init_hash_sram(struct cc_drvdata *drvdata);
int cc_hash_free(struct cc_drvdata *drvdata);
-/*!
- * Gets the initial digest length
+/**
+ * cc_digest_len_addr() - Gets the initial digest length
*
- * \param drvdata
- * \param mode The Hash mode. Supported modes:
- * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
*
- * \return u32 returns the address of the initial digest length in SRAM
+ * Return:
+ * Returns the address of the initial digest length in SRAM
*/
-cc_sram_addr_t
-cc_digest_len_addr(void *drvdata, u32 mode);
+u32 cc_digest_len_addr(void *drvdata, u32 mode);
-/*!
- * Gets the address of the initial digest in SRAM
+/**
+ * cc_larval_digest_addr() - Gets the address of the initial digest in SRAM
* according to the given hash mode
*
- * \param drvdata
- * \param mode The Hash mode. Supported modes:
- * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ * @drvdata: Associated device driver context
+ * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
*
- * \return u32 The address of the initial digest in SRAM
+ * Return:
+ * The address of the initial digest in SRAM
*/
-cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
-
-void cc_hash_global_init(void);
+u32 cc_larval_digest_addr(void *drvdata, u32 mode);
#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 9f4db9956e91..15df58c66911 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -17,46 +17,43 @@
/* Define max. available slots in HW queue */
#define HW_QUEUE_SLOTS_MAX 15
-#define CC_REG_LOW(word, name) \
- (CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
-
-#define CC_REG_HIGH(word, name) \
- (CC_REG_LOW(word, name) + \
- CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
-
-#define CC_GENMASK(word, name) \
- GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
-
-#define WORD0_VALUE CC_GENMASK(0, VALUE)
-#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
-#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
-#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
-#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
-#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
-#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
-#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
-#define WORD2_VALUE CC_GENMASK(2, VALUE)
-#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
-#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
-#define WORD3_DOUT_SIZE CC_GENMASK(3, DOUT_SIZE)
-#define WORD3_HASH_XOR_BIT CC_GENMASK(3, HASH_XOR_BIT)
-#define WORD3_NS_BIT CC_GENMASK(3, NS_BIT)
-#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
-#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
-#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
-#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY)
-#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
-#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
-#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
-#define WORD4_CIPHER_CONF2 CC_GENMASK(4, CIPHER_CONF2)
-#define WORD4_CIPHER_DO CC_GENMASK(4, CIPHER_DO)
-#define WORD4_CIPHER_MODE CC_GENMASK(4, CIPHER_MODE)
-#define WORD4_CMAC_SIZE0 CC_GENMASK(4, CMAC_SIZE0)
-#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
-#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
-#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
-#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
-#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
+#define CC_REG_LOW(name) (name ## _BIT_SHIFT)
+#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
+#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
+
+#define CC_HWQ_GENMASK(word, field) \
+ CC_GENMASK(CC_DSCRPTR_QUEUE_WORD ## word ## _ ## field)
+
+#define WORD0_VALUE CC_HWQ_GENMASK(0, VALUE)
+#define WORD0_CPP_CIPHER_MODE CC_HWQ_GENMASK(0, CPP_CIPHER_MODE)
+#define WORD1_DIN_CONST_VALUE CC_HWQ_GENMASK(1, DIN_CONST_VALUE)
+#define WORD1_DIN_DMA_MODE CC_HWQ_GENMASK(1, DIN_DMA_MODE)
+#define WORD1_DIN_SIZE CC_HWQ_GENMASK(1, DIN_SIZE)
+#define WORD1_NOT_LAST CC_HWQ_GENMASK(1, NOT_LAST)
+#define WORD1_NS_BIT CC_HWQ_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE CC_HWQ_GENMASK(1, LOCK_QUEUE)
+#define WORD2_VALUE CC_HWQ_GENMASK(2, VALUE)
+#define WORD3_DOUT_DMA_MODE CC_HWQ_GENMASK(3, DOUT_DMA_MODE)
+#define WORD3_DOUT_LAST_IND CC_HWQ_GENMASK(3, DOUT_LAST_IND)
+#define WORD3_DOUT_SIZE CC_HWQ_GENMASK(3, DOUT_SIZE)
+#define WORD3_HASH_XOR_BIT CC_HWQ_GENMASK(3, HASH_XOR_BIT)
+#define WORD3_NS_BIT CC_HWQ_GENMASK(3, NS_BIT)
+#define WORD3_QUEUE_LAST_IND CC_HWQ_GENMASK(3, QUEUE_LAST_IND)
+#define WORD4_ACK_NEEDED CC_HWQ_GENMASK(4, ACK_NEEDED)
+#define WORD4_AES_SEL_N_HASH CC_HWQ_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_AES_XOR_CRYPTO_KEY CC_HWQ_GENMASK(4, AES_XOR_CRYPTO_KEY)
+#define WORD4_BYTES_SWAP CC_HWQ_GENMASK(4, BYTES_SWAP)
+#define WORD4_CIPHER_CONF0 CC_HWQ_GENMASK(4, CIPHER_CONF0)
+#define WORD4_CIPHER_CONF1 CC_HWQ_GENMASK(4, CIPHER_CONF1)
+#define WORD4_CIPHER_CONF2 CC_HWQ_GENMASK(4, CIPHER_CONF2)
+#define WORD4_CIPHER_DO CC_HWQ_GENMASK(4, CIPHER_DO)
+#define WORD4_CIPHER_MODE CC_HWQ_GENMASK(4, CIPHER_MODE)
+#define WORD4_CMAC_SIZE0 CC_HWQ_GENMASK(4, CMAC_SIZE0)
+#define WORD4_DATA_FLOW_MODE CC_HWQ_GENMASK(4, DATA_FLOW_MODE)
+#define WORD4_KEY_SIZE CC_HWQ_GENMASK(4, KEY_SIZE)
+#define WORD4_SETUP_OPERATION CC_HWQ_GENMASK(4, SETUP_OPERATION)
+#define WORD5_DIN_ADDR_HIGH CC_HWQ_GENMASK(5, DIN_ADDR_HIGH)
+#define WORD5_DOUT_ADDR_HIGH CC_HWQ_GENMASK(5, DOUT_ADDR_HIGH)
/******************************************************************************
* TYPE DEFINITIONS
@@ -207,31 +204,32 @@ enum cc_hash_cipher_pad {
/* Descriptor packing macros */
/*****************************/
-/*
- * Init a HW descriptor struct
- * @pdesc: pointer HW descriptor struct
+/**
+ * hw_desc_init() - Init a HW descriptor struct
+ * @pdesc: pointer to HW descriptor struct
*/
static inline void hw_desc_init(struct cc_hw_desc *pdesc)
{
memset(pdesc, 0, sizeof(struct cc_hw_desc));
}
-/*
- * Indicates the end of current HW descriptors flow and release the HW engines.
+/**
+ * set_queue_last_ind_bit() - Indicate the end of current HW descriptors flow
+ * and release the HW engines.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc)
{
pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
}
-/*
- * Set the DIN field of a HW descriptors
+/**
+ * set_din_type() - Set the DIN field of a HW descriptor
*
- * @pdesc: pointer HW descriptor struct
- * @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
- * @addr: dinAdr DIN address
+ * @pdesc: Pointer to HW descriptor struct
+ * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: DIN address
* @size: Data size in bytes
* @axi_sec: AXI secure bit
*/
@@ -239,20 +237,20 @@ static inline void set_din_type(struct cc_hw_desc *pdesc,
enum cc_dma_mode dma_mode, dma_addr_t addr,
u32 size, enum cc_axi_sec axi_sec)
{
- pdesc->word[0] = (u32)addr;
+ pdesc->word[0] = lower_32_bits(addr);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
+ pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, upper_32_bits(addr));
#endif
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
FIELD_PREP(WORD1_DIN_SIZE, size) |
FIELD_PREP(WORD1_NS_BIT, axi_sec);
}
-/*
- * Set the DIN field of a HW descriptors to NO DMA mode.
+/**
+ * set_din_no_dma() - Set the DIN field of a HW descriptor to NO DMA mode.
* Used for NOP descriptor, register patches and other special modes.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DIN address
* @size: Data size in bytes
*/
@@ -262,14 +260,11 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
}
-/*
- * Setup the special CPP descriptor
+/**
+ * set_cpp_crypto_key() - Setup the special CPP descriptor
*
- * @pdesc: pointer HW descriptor struct
- * @alg: cipher used (AES / SM4)
- * @mode: mode used (CTR or CBC)
- * @slot: slot number
- * @ksize: key size
+ * @pdesc: Pointer to HW descriptor struct
+ * @slot: Slot number
*/
static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
{
@@ -281,27 +276,26 @@ static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
}
-/*
- * Set the DIN field of a HW descriptors to SRAM mode.
+/**
+ * set_din_sram() - Set the DIN field of a HW descriptor to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
- * adaptor will enforce alignment check.
+ * the adaptor will enforce alignment checks.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DIN address
- * @size Data size in bytes
+ * @size: Data size in bytes
*/
-static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
- u32 size)
+static inline void set_din_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
{
- pdesc->word[0] = (u32)addr;
+ pdesc->word[0] = addr;
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
}
-/*
- * Set the DIN field of a HW descriptors to CONST mode
+/**
+ * set_din_const() - Set the DIN field of a HW descriptor to CONST mode
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @val: DIN const value
* @size: Data size in bytes
*/
@@ -313,20 +307,20 @@ static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
FIELD_PREP(WORD1_DIN_SIZE, size);
}
-/*
- * Set the DIN not last input data indicator
+/**
+ * set_din_not_last_indication() - Set the DIN not last input data indicator
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
{
pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
}
-/*
- * Set the DOUT field of a HW descriptors
+/**
+ * set_dout_type() - Set the DOUT field of a HW descriptor
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
* @addr: DOUT address
* @size: Data size in bytes
@@ -336,24 +330,24 @@ static inline void set_dout_type(struct cc_hw_desc *pdesc,
enum cc_dma_mode dma_mode, dma_addr_t addr,
u32 size, enum cc_axi_sec axi_sec)
{
- pdesc->word[2] = (u32)addr;
+ pdesc->word[2] = lower_32_bits(addr);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
+ pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, upper_32_bits(addr));
#endif
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
FIELD_PREP(WORD3_DOUT_SIZE, size) |
FIELD_PREP(WORD3_NS_BIT, axi_sec);
}
-/*
- * Set the DOUT field of a HW descriptors to DLLI type
+/**
+ * set_dout_dlli() - Set the DOUT field of a HW descriptor to DLLI type
* The LAST INDICATION is provided by the user
*
- * @pdesc pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DOUT address
* @size: Data size in bytes
- * @last_ind: The last indication bit
* @axi_sec: AXI secure bit
+ * @last_ind: The last indication bit
*/
static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
u32 size, enum cc_axi_sec axi_sec,
@@ -363,29 +357,28 @@ static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
}
-/*
- * Set the DOUT field of a HW descriptors to DLLI type
+/**
+ * set_dout_mlli() - Set the DOUT field of a HW descriptor to MLLI type
* The LAST INDICATION is provided by the user
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DOUT address
* @size: Data size in bytes
- * @last_ind: The last indication bit
* @axi_sec: AXI secure bit
+ * @last_ind: The last indication bit
*/
-static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
- u32 size, enum cc_axi_sec axi_sec,
- bool last_ind)
+static inline void set_dout_mlli(struct cc_hw_desc *pdesc, u32 addr, u32 size,
+ enum cc_axi_sec axi_sec, bool last_ind)
{
set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
}
-/*
- * Set the DOUT field of a HW descriptors to NO DMA mode.
+/**
+ * set_dout_no_dma() - Set the DOUT field of a HW descriptor to NO DMA mode.
* Used for NOP descriptor, register patches and other special modes.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: pointer to HW descriptor struct
* @addr: DOUT address
* @size: Data size in bytes
* @write_enable: Enables a write operation to a register
@@ -398,54 +391,55 @@ static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
}
-/*
- * Set the word for the XOR operation.
+/**
+ * set_xor_val() - Set the word for the XOR operation.
*
- * @pdesc: pointer HW descriptor struct
- * @val: xor data value
+ * @pdesc: Pointer to HW descriptor struct
+ * @val: XOR data value
*/
static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
{
pdesc->word[2] = val;
}
-/*
- * Sets the XOR indicator bit in the descriptor
+/**
+ * set_xor_active() - Set the XOR indicator bit in the descriptor
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_xor_active(struct cc_hw_desc *pdesc)
{
pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
}
-/*
- * Select the AES engine instead of HASH engine when setting up combined mode
- * with AES XCBC MAC
+/**
+ * set_aes_not_hash_mode() - Select the AES engine instead of HASH engine when
+ * setting up combined mode with AES XCBC MAC
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
{
pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
}
-/*
- * Set aes xor crypto key, this in some secenrios select SM3 engine
+/**
+ * set_aes_xor_crypto_key() - Set aes xor crypto key, which in some scenarios
+ * selects the SM3 engine
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc)
{
pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1);
}
-/*
- * Set the DOUT field of a HW descriptors to SRAM mode
+/**
+ * set_dout_sram() - Set the DOUT field of a HW descriptor to SRAM mode
* Note: No need to check SRAM alignment since host requests do not use SRAM and
- * adaptor will enforce alignment check.
+ * the adaptor will enforce alignment checks.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @addr: DOUT address
* @size: Data size in bytes
*/
@@ -456,32 +450,34 @@ static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
FIELD_PREP(WORD3_DOUT_SIZE, size);
}
-/*
- * Sets the data unit size for XEX mode in data_out_addr[15:0]
+/**
+ * set_xex_data_unit_size() - Set the data unit size for XEX mode in
+ * data_out_addr[15:0]
*
- * @pdesc: pDesc pointer HW descriptor struct
- * @size: data unit size for XEX mode
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Data unit size for XEX mode
*/
static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
{
pdesc->word[2] = size;
}
-/*
- * Set the number of rounds for Multi2 in data_out_addr[15:0]
+/**
+ * set_multi2_num_rounds() - Set the number of rounds for Multi2 in
+ * data_out_addr[15:0]
*
- * @pdesc: pointer HW descriptor struct
- * @num: number of rounds for Multi2
+ * @pdesc: Pointer to HW descriptor struct
+ * @num: Number of rounds for Multi2
*/
static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
{
pdesc->word[2] = num;
}
-/*
- * Set the flow mode.
+/**
+ * set_flow_mode() - Set the flow mode.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
static inline void set_flow_mode(struct cc_hw_desc *pdesc,
@@ -490,22 +486,22 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
}
-/*
- * Set the cipher mode.
+/**
+ * set_cipher_mode() - Set the cipher mode.
*
- * @pdesc: pointer HW descriptor struct
- * @mode: Any one of the modes defined in [CC7x-DESC]
+ * @pdesc: Pointer to HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
*/
static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
}
-/*
- * Set the cipher mode for hash algorithms.
+/**
+ * set_hash_cipher_mode() - Set the cipher mode for hash algorithms.
*
- * @pdesc: pointer HW descriptor struct
- * @cipher_mode: Any one of the modes defined in [CC7x-DESC]
+ * @pdesc: Pointer to HW descriptor struct
+ * @cipher_mode: Any one of the modes defined in [CC7x-DESC]
* @hash_mode: specifies which hash is being handled
*/
static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
@@ -517,10 +513,10 @@ static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
set_aes_xor_crypto_key(pdesc);
}
-/*
- * Set the cipher configuration fields.
+/**
+ * set_cipher_config0() - Set the cipher configuration fields.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
@@ -528,11 +524,11 @@ static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
}
-/*
- * Set the cipher configuration fields.
+/**
+ * set_cipher_config1() - Set the cipher configuration fields.
*
- * @pdesc: pointer HW descriptor struct
- * @config: Any one of the modes defined in [CC7x-DESC]
+ * @pdesc: Pointer to HW descriptor struct
+ * @config: Padding mode
*/
static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
enum cc_hash_conf_pad config)
@@ -540,10 +536,10 @@ static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
}
-/*
- * Set HW key configuration fields.
+/**
+ * set_hw_crypto_key() - Set HW key configuration fields.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
*/
static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
@@ -555,64 +551,64 @@ static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
(hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
}
-/*
- * Set byte order of all setup-finalize descriptors.
+/**
+ * set_bytes_swap() - Set byte order of all setup-finalize descriptors.
*
- * @pdesc: pointer HW descriptor struct
- * @config: Any one of the modes defined in [CC7x-DESC]
+ * @pdesc: Pointer to HW descriptor struct
+ * @config: True to enable byte swapping
*/
static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
{
pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
}
-/*
- * Set CMAC_SIZE0 mode.
+/**
+ * set_cmac_size0_mode() - Set CMAC_SIZE0 mode.
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
*/
static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
}
-/*
- * Set key size descriptor field.
+/**
+ * set_key_size() - Set key size descriptor field.
*
- * @pdesc: pointer HW descriptor struct
- * @size: key size in bytes (NOT size code)
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
*/
static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
{
pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
}
-/*
- * Set AES key size.
+/**
+ * set_key_size_aes() - Set AES key size.
*
- * @pdesc: pointer HW descriptor struct
- * @size: key size in bytes (NOT size code)
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
*/
static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
{
set_key_size(pdesc, ((size >> 3) - 2));
}
-/*
- * Set DES key size.
+/**
+ * set_key_size_des() - Set DES key size.
*
- * @pdesc: pointer HW descriptor struct
- * @size: key size in bytes (NOT size code)
+ * @pdesc: Pointer to HW descriptor struct
+ * @size: Key size in bytes (NOT size code)
*/
static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
{
set_key_size(pdesc, ((size >> 3) - 1));
}
-/*
- * Set the descriptor setup mode
+/**
+ * set_setup_mode() - Set the descriptor setup mode
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @mode: Any one of the setup modes defined in [CC7x-DESC]
*/
static inline void set_setup_mode(struct cc_hw_desc *pdesc,
@@ -621,10 +617,10 @@ static inline void set_setup_mode(struct cc_hw_desc *pdesc,
pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
}
-/*
- * Set the descriptor cipher DO
+/**
+ * set_cipher_do() - Set the descriptor cipher DO
*
- * @pdesc: pointer HW descriptor struct
+ * @pdesc: Pointer to HW descriptor struct
* @config: Any one of the cipher do defined in [CC7x-DESC]
*/
static inline void set_cipher_do(struct cc_hw_desc *pdesc,
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 24c368b866f6..d39e1664fc7e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -15,29 +15,25 @@
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
-const struct dev_pm_ops ccree_pm = {
- SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
-};
-
-int cc_pm_suspend(struct device *dev)
+static int cc_pm_suspend(struct device *dev)
{
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
- cc_clk_off(drvdata);
+ clk_disable_unprepare(drvdata->clk);
return 0;
}
-int cc_pm_resume(struct device *dev)
+static int cc_pm_resume(struct device *dev)
{
int rc;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
/* Enables the device source clk */
- rc = cc_clk_on(drvdata);
+ rc = clk_prepare_enable(drvdata->clk);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
@@ -62,53 +58,19 @@ int cc_pm_resume(struct device *dev)
return 0;
}
+const struct dev_pm_ops ccree_pm = {
+ SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
+};
+
int cc_pm_get(struct device *dev)
{
- int rc = 0;
- struct cc_drvdata *drvdata = dev_get_drvdata(dev);
-
- if (drvdata->pm_on)
- rc = pm_runtime_get_sync(dev);
+ int rc = pm_runtime_get_sync(dev);
return (rc == 1 ? 0 : rc);
}
void cc_pm_put_suspend(struct device *dev)
{
- struct cc_drvdata *drvdata = dev_get_drvdata(dev);
-
- if (drvdata->pm_on) {
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
-}
-
-bool cc_pm_is_dev_suspended(struct device *dev)
-{
- /* check device state using runtime api */
- return pm_runtime_suspended(dev);
-}
-
-int cc_pm_init(struct cc_drvdata *drvdata)
-{
- struct device *dev = drvdata_to_dev(drvdata);
-
- /* must be before the enabling to avoid redundant suspending */
- pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(dev);
- /* set us as active - note we won't do PM ops until cc_pm_go()! */
- return pm_runtime_set_active(dev);
-}
-
-/* enable the PM module*/
-void cc_pm_go(struct cc_drvdata *drvdata)
-{
- pm_runtime_enable(drvdata_to_dev(drvdata));
- drvdata->pm_on = true;
-}
-
-void cc_pm_fini(struct cc_drvdata *drvdata)
-{
- pm_runtime_disable(drvdata_to_dev(drvdata));
- drvdata->pm_on = false;
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 80a18e11cae4..50cac33de118 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -15,26 +15,11 @@
extern const struct dev_pm_ops ccree_pm;
-int cc_pm_init(struct cc_drvdata *drvdata);
-void cc_pm_go(struct cc_drvdata *drvdata);
-void cc_pm_fini(struct cc_drvdata *drvdata);
-int cc_pm_suspend(struct device *dev);
-int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
void cc_pm_put_suspend(struct device *dev);
-bool cc_pm_is_dev_suspended(struct device *dev);
#else
-static inline int cc_pm_init(struct cc_drvdata *drvdata)
-{
- return 0;
-}
-
-static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
-
-static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
-
static inline int cc_pm_get(struct device *dev)
{
return 0;
@@ -42,12 +27,6 @@ static inline int cc_pm_get(struct device *dev)
static inline void cc_pm_put_suspend(struct device *dev) {}
-static inline bool cc_pm_is_dev_suspended(struct device *dev)
-{
- /* if PM not supported device is never suspend */
- return false;
-}
-
#endif
#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 9d61e6f12478..1d7649ecf44e 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -206,12 +206,13 @@ static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
}
}
-/*!
- * Completion will take place if and only if user requested completion
- * by cc_send_sync_request().
+/**
+ * request_mgr_complete() - Completion will take place if and only if user
+ * requested completion by cc_send_sync_request().
*
- * \param dev
- * \param dx_compl_h The completion event to signal
+ * @dev: Device pointer
+ * @dx_compl_h: The completion event to signal
+ * @dummy: unused error code
*/
static void request_mgr_complete(struct device *dev, void *dx_compl_h,
int dummy)
@@ -264,15 +265,15 @@ static int cc_queues_status(struct cc_drvdata *drvdata,
return -ENOSPC;
}
-/*!
- * Enqueue caller request to crypto hardware.
+/**
+ * cc_do_send_request() - Enqueue caller request to crypto hardware.
* Need to be called with HW lock held and PM running
*
- * \param drvdata
- * \param cc_req The request to enqueue
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- * \param add_comp If "true": add an artificial dout DMA to mark completion
+ * @drvdata: Associated device driver context
+ * @cc_req: The request to enqueue
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
+ * @add_comp: If "true": add an artificial dout DMA to mark completion
*
*/
static void cc_do_send_request(struct cc_drvdata *drvdata,
@@ -295,7 +296,6 @@ static void cc_do_send_request(struct cc_drvdata *drvdata,
req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
(MAX_REQUEST_QUEUE_SIZE - 1);
- /* TODO: Use circ_buf.h ? */
dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
@@ -377,7 +377,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
rc = cc_queues_status(drvdata, mgr, bli->len);
if (rc) {
/*
- * There is still not room in the FIFO for
+ * There is still no room in the FIFO for
* this request. Bail out. We'll return here
* on the next completion irq.
*/
@@ -476,10 +476,6 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
break;
spin_unlock_bh(&mgr->hw_lock);
- if (rc != -EAGAIN) {
- cc_pm_put_suspend(dev);
- return rc;
- }
wait_for_completion_interruptible(&drvdata->hw_queue_avail);
reinit_completion(&drvdata->hw_queue_avail);
}
@@ -490,16 +486,18 @@ int cc_send_sync_request(struct cc_drvdata *drvdata,
return 0;
}
-/*!
- * Enqueue caller request to crypto hardware during init process.
- * assume this function is not called in middle of a flow,
+/**
+ * send_request_init() - Enqueue caller request to crypto hardware during init
+ * process.
+ * Assume this function is not called in the middle of a flow,
* since we set QUEUE_LAST_IND flag in the last descriptor.
*
- * \param drvdata
- * \param desc The crypto sequence
- * \param len The crypto sequence length
+ * @drvdata: Associated device driver context
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
*
- * \return int Returns "0" upon success
+ * Return:
+ * Returns "0" upon success
*/
int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
unsigned int len)
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index ff7746aaaf35..ae25ca843dce 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -12,18 +12,17 @@
int cc_req_mgr_init(struct cc_drvdata *drvdata);
-/*!
- * Enqueue caller request to crypto hardware.
+/**
+ * cc_send_request() - Enqueue caller request to crypto hardware.
*
- * \param drvdata
- * \param cc_req The request to enqueue
- * \param desc The crypto sequence
- * \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller
- * If "false": this function adds a dummy descriptor completion
- * and waits upon completion signal.
+ * @drvdata: Associated device driver context
+ * @cc_req: The request to enqueue
+ * @desc: The crypto sequence
+ * @len: The crypto sequence length
+ * @req: Asynchronous crypto request
*
- * \return int Returns -EINPROGRESS or error
+ * Return:
+ * Returns -EINPROGRESS or error
*/
int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
struct cc_hw_desc *desc, unsigned int len,
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index 62c885e6e791..37a95856361f 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -5,88 +5,61 @@
#include "cc_sram_mgr.h"
/**
- * struct cc_sram_ctx -Internal RAM context manager
- * @sram_free_offset: the offset to the non-allocated area
- */
-struct cc_sram_ctx {
- cc_sram_addr_t sram_free_offset;
-};
-
-/**
- * cc_sram_mgr_fini() - Cleanup SRAM pool.
- *
- * @drvdata: Associated device driver context
- */
-void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
-{
- /* Nothing needed */
-}
-
-/**
* cc_sram_mgr_init() - Initializes SRAM pool.
* The pool starts right at the beginning of SRAM.
* Returns zero for success, negative value otherwise.
*
* @drvdata: Associated device driver context
+ *
+ * Return:
+ * 0 for success, negative error code for failure.
*/
int cc_sram_mgr_init(struct cc_drvdata *drvdata)
{
- struct cc_sram_ctx *ctx;
- dma_addr_t start = 0;
+ u32 start = 0;
struct device *dev = drvdata_to_dev(drvdata);
if (drvdata->hw_rev < CC_HW_REV_712) {
/* Pool starts after ROM bytes */
- start = (dma_addr_t)cc_ioread(drvdata,
- CC_REG(HOST_SEP_SRAM_THRESHOLD));
-
+ start = cc_ioread(drvdata, CC_REG(HOST_SEP_SRAM_THRESHOLD));
if ((start & 0x3) != 0) {
- dev_err(dev, "Invalid SRAM offset %pad\n", &start);
+ dev_err(dev, "Invalid SRAM offset 0x%x\n", start);
return -EINVAL;
}
}
- /* Allocate "this" context */
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
-
- if (!ctx)
- return -ENOMEM;
-
- ctx->sram_free_offset = start;
- drvdata->sram_mgr_handle = ctx;
-
+ drvdata->sram_free_offset = start;
return 0;
}
-/*!
- * Allocated buffer from SRAM pool.
- * Note: Caller is responsible to free the LAST allocated buffer.
- * This function does not taking care of any fragmentation may occur
- * by the order of calls to alloc/free.
+/**
+ * cc_sram_alloc() - Allocate buffer from SRAM pool.
+ *
+ * @drvdata: Associated device driver context
+ * @size: The requested numer of bytes to allocate
*
- * \param drvdata
- * \param size The requested bytes to allocate
+ * Return:
+ * Address offset in SRAM or NULL_SRAM_ADDR for failure.
*/
-cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
+u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
{
- struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
- cc_sram_addr_t p;
+ u32 p;
if ((size & 0x3)) {
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
size);
return NULL_SRAM_ADDR;
}
- if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
- dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
- size, smgr_ctx->sram_free_offset);
+ if (size > (CC_CC_SRAM_SIZE - drvdata->sram_free_offset)) {
+ dev_err(dev, "Not enough space to allocate %u B (at offset %u)\n",
+ size, drvdata->sram_free_offset);
return NULL_SRAM_ADDR;
}
- p = smgr_ctx->sram_free_offset;
- smgr_ctx->sram_free_offset += size;
- dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
+ p = drvdata->sram_free_offset;
+ drvdata->sram_free_offset += size;
+ dev_dbg(dev, "Allocated %u B @ %u\n", size, p);
return p;
}
@@ -97,13 +70,12 @@ cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
*
* @src: A pointer to array of words to set as consts.
* @dst: The target SRAM buffer to set into
- * @nelements: The number of words in "src" array
+ * @nelement: The number of words in "src" array
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
- unsigned int nelement, struct cc_hw_desc *seq,
- unsigned int *seq_len)
+void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement,
+ struct cc_hw_desc *seq, unsigned int *seq_len)
{
u32 i;
unsigned int idx = *seq_len;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index 1d14de9ee8c3..1c965ef83002 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -10,42 +10,30 @@
struct cc_drvdata;
-/**
- * Address (offset) within CC internal SRAM
- */
-
-typedef u64 cc_sram_addr_t;
-
-#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
+#define NULL_SRAM_ADDR ((u32)-1)
-/*!
- * Initializes SRAM pool.
+/**
+ * cc_sram_mgr_init() - Initializes SRAM pool.
* The first X bytes of SRAM are reserved for ROM usage, hence, pool
* starts right after X bytes.
*
- * \param drvdata
+ * @drvdata: Associated device driver context
*
- * \return int Zero for success, negative value otherwise.
+ * Return:
+ * Zero for success, negative value otherwise.
*/
int cc_sram_mgr_init(struct cc_drvdata *drvdata);
-/*!
- * Uninits SRAM pool.
+/**
+ * cc_sram_alloc() - Allocate buffer from SRAM pool.
*
- * \param drvdata
- */
-void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
-
-/*!
- * Allocated buffer from SRAM pool.
- * Note: Caller is responsible to free the LAST allocated buffer.
- * This function does not taking care of any fragmentation may occur
- * by the order of calls to alloc/free.
+ * @drvdata: Associated device driver context
+ * @size: The requested bytes to allocate
*
- * \param drvdata
- * \param size The requested bytes to allocate
+ * Return:
+ * Address offset in SRAM or NULL_SRAM_ADDR for failure.
*/
-cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
+u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
/**
* cc_set_sram_desc() - Create const descriptors sequence to
@@ -54,12 +42,11 @@ cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
*
* @src: A pointer to array of words to set as consts.
* @dst: The target SRAM buffer to set into
- * @nelements: The number of words in "src" array
+ * @nelement: The number of words in "src" array
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
- unsigned int nelement, struct cc_hw_desc *seq,
- unsigned int *seq_len);
+void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement,
+ struct cc_hw_desc *seq, unsigned int *seq_len);
#endif /*__CC_SRAM_MGR_H__*/
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b4b9b22125d1..c29b80dd30d8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -715,6 +715,52 @@ static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
return err;
}
+
+static inline int get_qidxs(struct crypto_async_request *req,
+ unsigned int *txqidx, unsigned int *rxqidx)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ int ret = 0;
+
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ {
+ struct aead_request *aead_req =
+ container_of(req, struct aead_request, base);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ {
+ struct skcipher_request *sk_req =
+ container_of(req, struct skcipher_request, base);
+ struct chcr_skcipher_req_ctx *reqctx =
+ skcipher_request_ctx(sk_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ case CRYPTO_ALG_TYPE_AHASH:
+ {
+ struct ahash_request *ahash_req =
+ container_of(req, struct ahash_request, base);
+ struct chcr_ahash_req_ctx *reqctx =
+ ahash_request_ctx(ahash_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ /* should never get here */
+ BUG();
+ break;
+ }
+ return ret;
+}
+
static inline void create_wreq(struct chcr_context *ctx,
struct chcr_wr *chcr_req,
struct crypto_async_request *req,
@@ -725,7 +771,15 @@ static inline void create_wreq(struct chcr_context *ctx,
unsigned int lcb)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
+ unsigned int tx_channel_id, rx_channel_id;
+ unsigned int txqidx = 0, rxqidx = 0;
+ unsigned int qid, fid;
+
+ get_qidxs(req, &txqidx, &rxqidx);
+ qid = u_ctx->lldi.rxq_ids[rxqidx];
+ fid = u_ctx->lldi.rxq_ids[0];
+ tx_channel_id = txqidx / ctx->txq_perchan;
+ rx_channel_id = rxqidx / ctx->rxq_perchan;
chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
@@ -734,15 +788,12 @@ static inline void create_wreq(struct chcr_context *ctx,
chcr_req->wreq.len16_pkd =
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
- chcr_req->wreq.rx_chid_to_rx_q_id =
- FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
- !!lcb, ctx->tx_qidx);
+ chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
+ !!lcb, txqidx);
- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
- qid);
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
- ((sizeof(chcr_req->wreq)) >> 4)));
-
+ ((sizeof(chcr_req->wreq)) >> 4)));
chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
sizeof(chcr_req->key_ctx) + sc_len);
@@ -758,7 +809,8 @@ static inline void create_wreq(struct chcr_context *ctx,
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
- struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct chcr_context *ctx = c_ctx(tfm);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
@@ -771,7 +823,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
unsigned int kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
- struct adapter *adap = padap(c_ctx(tfm)->dev);
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst);
@@ -791,7 +844,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
}
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1086,8 +1139,12 @@ static int chcr_final_cipher_iv(struct skcipher_request *req,
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
AES_BLOCK_SIZE));
- else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv, 1);
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
+ if (!reqctx->partial_req)
+ memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
+ else
+ ret = chcr_update_tweak(req, iv, 1);
+ }
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
/*Already updated for Decrypt*/
if (!reqctx->op)
@@ -1102,12 +1159,13 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
unsigned char *input, int err)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_context *ctx = c_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
- struct cipher_wr_param wrparam;
+ struct cipher_wr_param wrparam;
struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
@@ -1152,7 +1210,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
if (get_cryptoalg_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
- wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
+ wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
wrparam.req = req;
wrparam.bytes = bytes;
skb = create_cipher_wr(&wrparam);
@@ -1162,14 +1220,24 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
goto unmap;
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
reqctx->last_req_len = bytes;
reqctx->processed += bytes;
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ complete(&ctx->cbc_aes_aio_done);
+ }
return 0;
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ complete(&ctx->cbc_aes_aio_done);
+ }
chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
return err;
@@ -1188,6 +1256,7 @@ static int process_cipher(struct skcipher_request *req,
int bytes, err = -EINVAL;
reqctx->processed = 0;
+ reqctx->partial_req = 0;
if (!req->iv)
goto error;
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
@@ -1278,6 +1347,7 @@ static int process_cipher(struct skcipher_request *req,
}
reqctx->processed = bytes;
reqctx->last_req_len = bytes;
+ reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
return 0;
unmap:
@@ -1289,31 +1359,43 @@ error:
static int chcr_aes_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
- int err, isfull = 0;
+ int err;
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_context *ctx = c_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
err = chcr_inc_wrcount(dev);
if (err)
return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- c_ctx(tfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
err = -ENOSPC;
goto error;
- }
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
&skb, CHCR_ENCRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ reqctx->partial_req = 1;
+ wait_for_completion(&ctx->cbc_aes_aio_done);
+ }
+ return -EINPROGRESS;
error:
chcr_dec_wrcount(dev);
return err;
@@ -1322,44 +1404,45 @@ error:
static int chcr_aes_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
- int err, isfull = 0;
+ int err;
+ struct chcr_context *ctx = c_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
err = chcr_inc_wrcount(dev);
if (err)
return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- c_ctx(tfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
return -ENOSPC;
- }
-
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
&skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
}
-
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx = NULL;
- unsigned int id;
- int txq_perchan, txq_idx, ntxq;
- int err = 0, rxq_perchan, rxq_idx;
+ int txq_perchan, ntxq;
+ int err = 0, rxq_perchan;
- id = smp_processor_id();
if (!ctx->dev) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
- err = -ENXIO;
pr_err("chcr device assignment fails\n");
goto out;
}
@@ -1367,23 +1450,10 @@ static int chcr_device_init(struct chcr_context *ctx)
ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
- spin_lock(&ctx->dev->lock_chcr_dev);
- ctx->tx_chan_id = ctx->dev->tx_channel_id;
- ctx->dev->tx_channel_id =
- (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan;
- spin_unlock(&ctx->dev->lock_chcr_dev);
- rxq_idx = ctx->tx_chan_id * rxq_perchan;
- rxq_idx += id % rxq_perchan;
- txq_idx = ctx->tx_chan_id * txq_perchan;
- txq_idx += id % txq_perchan;
- ctx->rx_qidx = rxq_idx;
- ctx->tx_qidx = txq_idx;
- /* Channel Id used by SGE to forward packet to Host.
- * Same value should be used in cpl_fw6_pld RSS_CH field
- * by FW. Driver programs PCI channel ID to be used in fw
- * at the time of queue allocation with value "pi->tx_chan"
- */
- ctx->pci_chan_id = txq_idx / txq_perchan;
+ ctx->ntxq = ntxq;
+ ctx->nrxq = u_ctx->lldi.nrxq;
+ ctx->rxq_perchan = rxq_perchan;
+ ctx->txq_perchan = txq_perchan;
}
out:
return err;
@@ -1401,7 +1471,7 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm)
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
-
+ init_completion(&ctx->cbc_aes_aio_done);
crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
return chcr_device_init(ctx);
@@ -1485,9 +1555,10 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
+ struct chcr_context *ctx = h_ctx(tfm);
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_wr *chcr_req;
struct ulptx_sgl *ulptx;
unsigned int nents = 0, transhdr_len;
@@ -1496,6 +1567,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
GFP_ATOMIC;
struct adapter *adap = padap(h_ctx(tfm)->dev);
int error = 0;
+ unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
@@ -1513,7 +1585,8 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
+
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1576,16 +1649,22 @@ static int chcr_ahash_update(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
struct hash_wr_param params;
- int error, isfull = 0;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
if (nbytes + req_ctx->reqlen >= bs) {
remainder = (nbytes + req_ctx->reqlen) % bs;
@@ -1603,12 +1682,10 @@ static int chcr_ahash_update(struct ahash_request *req)
* inflight count for dev guarantees that lldi and padap is valid
*/
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
error = -ENOSPC;
goto err;
- }
}
chcr_init_hctx_per_wr(req_ctx);
@@ -1650,10 +1727,9 @@ static int chcr_ahash_update(struct ahash_request *req)
}
req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
chcr_send_wr(skb);
-
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
err:
@@ -1678,16 +1754,22 @@ static int chcr_ahash_final(struct ahash_request *req)
struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct hash_wr_param params;
struct sk_buff *skb;
- struct uld_ctx *u_ctx = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
int error = -EINVAL;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
error = chcr_inc_wrcount(dev);
if (error)
return -ENXIO;
chcr_init_hctx_per_wr(req_ctx);
- u_ctx = ULD_CTX(h_ctx(rtfm));
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
@@ -1727,7 +1809,7 @@ static int chcr_ahash_final(struct ahash_request *req)
}
req_ctx->reqlen = 0;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
chcr_send_wr(skb);
return -EINPROGRESS;
err:
@@ -1740,25 +1822,29 @@ static int chcr_ahash_finup(struct ahash_request *req)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_dev *dev = h_ctx(rtfm)->dev;
- struct uld_ctx *u_ctx = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
- int error, isfull = 0;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
error = chcr_inc_wrcount(dev);
if (error)
return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
error = -ENOSPC;
goto err;
- }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
@@ -1816,10 +1902,9 @@ static int chcr_ahash_finup(struct ahash_request *req)
req_ctx->reqlen = 0;
req_ctx->hctx_wr.processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
chcr_send_wr(skb);
-
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
err:
@@ -1832,11 +1917,18 @@ static int chcr_ahash_digest(struct ahash_request *req)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct chcr_dev *dev = h_ctx(rtfm)->dev;
- struct uld_ctx *u_ctx = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
- int error, isfull = 0;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
@@ -1844,14 +1936,11 @@ static int chcr_ahash_digest(struct ahash_request *req)
if (error)
return -ENXIO;
- u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
error = -ENOSPC;
goto err;
- }
}
chcr_init_hctx_per_wr(req_ctx);
@@ -1907,9 +1996,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
}
req_ctx->hctx_wr.processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
err:
@@ -1922,14 +2011,20 @@ static int chcr_ahash_continue(struct ahash_request *req)
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = NULL;
+ struct chcr_context *ctx = h_ctx(rtfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(h_ctx(rtfm));
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
if (is_hmac(crypto_ahash_tfm(rtfm))) {
@@ -1969,7 +2064,7 @@ static int chcr_ahash_continue(struct ahash_request *req)
}
hctx_wr->processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
return 0;
err:
@@ -2315,7 +2410,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
int size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
@@ -2331,7 +2427,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(a_ctx(tfm)->dev);
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
if (req->cryptlen == 0)
return NULL;
@@ -2351,7 +2448,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
CHCR_SRC_SG_SIZE, 0);
dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
@@ -2383,7 +2480,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
null ? 0 : 1 + IV,
@@ -2471,8 +2568,9 @@ int chcr_aead_dma_map(struct device *dev,
else
reqctx->b0_dma = 0;
if (req->src == req->dst) {
- error = dma_map_sg(dev, req->src, sg_nents(req->src),
- DMA_BIDIRECTIONAL);
+ error = dma_map_sg(dev, req->src,
+ sg_nents_for_len(req->src, dst_size),
+ DMA_BIDIRECTIONAL);
if (!error)
goto err;
} else {
@@ -2558,13 +2656,14 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
unsigned int authsize = crypto_aead_authsize(tfm);
struct chcr_context *ctx = a_ctx(tfm);
u32 temp;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
temp = req->assoclen + req->cryptlen +
(reqctx->op ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
- dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
+ dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
}
void chcr_add_cipher_src_ent(struct skcipher_request *req,
@@ -2599,14 +2698,14 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
reqctx->dst_ofst);
reqctx->dstsg = dsgl_walk.last_sg;
reqctx->dst_ofst = dsgl_walk.last_sg_len;
-
- dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
+ dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
}
void chcr_add_hash_src_ent(struct ahash_request *req,
@@ -2804,10 +2903,12 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = a_ctx(tfm)->tx_chan_id;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2828,9 +2929,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
auth_offset = 0;
}
-
- sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, 1);
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
sec_cpl->pldlen =
htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
@@ -2973,7 +3072,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
int size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
@@ -2986,7 +3086,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(a_ctx(tfm)->dev);
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
assoclen = req->assoclen - 8;
@@ -3028,7 +3129,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
//Offset of tag from end
temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- a_ctx(tfm)->tx_chan_id, 2, 1);
+ rx_channel_id, 2, 1);
chcr_req->sec_cpl.pldlen =
htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
@@ -3576,9 +3677,9 @@ static int chcr_aead_op(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct uld_ctx *u_ctx;
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
- int isfull = 0;
struct chcr_dev *cdev;
cdev = a_ctx(tfm)->dev;
@@ -3594,18 +3695,15 @@ static int chcr_aead_op(struct aead_request *req,
return chcr_aead_fallback(req, reqctx->op);
}
- u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- a_ctx(tfm)->tx_qidx)) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
chcr_dec_wrcount(cdev);
return -ENOSPC;
- }
}
/* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
if (IS_ERR_OR_NULL(skb)) {
chcr_dec_wrcount(cdev);
@@ -3613,15 +3711,22 @@ static int chcr_aead_op(struct aead_request *req,
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
chcr_send_wr(skb);
- return isfull ? -EBUSY : -EINPROGRESS;
+ return -EINPROGRESS;
}
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_context *ctx = a_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
reqctx->verify = VERIFY_HW;
reqctx->op = CHCR_ENCRYPT_OP;
@@ -3643,9 +3748,16 @@ static int chcr_aead_encrypt(struct aead_request *req)
static int chcr_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int size;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
if (aeadctx->mayverify == VERIFY_SW) {
size = crypto_aead_maxauthsize(tfm);
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index dfb53e746e51..ffd4ec0c7374 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -195,6 +195,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
return ERR_PTR(-EOPNOTSUPP);
@@ -287,6 +288,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
case CXGB4_STATE_DETACH:
chcr_detach_device(u_ctx);
+ if (!atomic_read(&drv_data.dev_count))
+ stop_crypto();
break;
case CXGB4_STATE_START_RECOVERY:
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index b5b371b8d343..2c09672e00a4 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -43,7 +43,8 @@
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
-#define DRV_VERSION "1.0.0.0"
+#define DRV_VERSION "1.0.0.0-ko"
+#define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
#define MAX_PENDING_REQ_TO_HW 20
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
@@ -67,7 +68,7 @@ struct _key_ctx {
__be32 ctx_hdr;
u8 salt[MAX_SALT];
__be64 iv_to_auth;
- unsigned char key[0];
+ unsigned char key[];
};
#define KEYCTX_TX_WR_IV_S 55
@@ -147,7 +148,6 @@ struct chcr_dev {
int wqretry;
struct delayed_work detach_work;
struct completion detach_comp;
- unsigned char tx_channel_id;
};
struct uld_ctx {
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 6db2df8c8a05..542bebae001f 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -187,6 +187,8 @@ struct chcr_aead_reqctx {
unsigned int op;
u16 imm;
u16 verify;
+ u16 txqidx;
+ u16 rxqidx;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
u8 *scratch_pad;
};
@@ -250,10 +252,11 @@ struct __crypto_ctx {
struct chcr_context {
struct chcr_dev *dev;
- unsigned char tx_qidx;
- unsigned char rx_qidx;
- unsigned char tx_chan_id;
- unsigned char pci_chan_id;
+ unsigned char rxq_perchan;
+ unsigned char txq_perchan;
+ unsigned int ntxq;
+ unsigned int nrxq;
+ struct completion cbc_aes_aio_done;
struct __crypto_ctx crypto_ctx[0];
};
@@ -279,6 +282,8 @@ struct chcr_ahash_req_ctx {
u8 *skbfr;
/* SKB which is being sent to the hardware for processing */
u64 data_len; /* Data len till time */
+ u16 txqidx;
+ u16 rxqidx;
u8 reqlen;
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -290,12 +295,15 @@ struct chcr_skcipher_req_ctx {
struct scatterlist *dstsg;
unsigned int processed;
unsigned int last_req_len;
+ unsigned int partial_req;
struct scatterlist *srcsg;
unsigned int src_ofst;
unsigned int dst_ofst;
unsigned int op;
u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ u16 txqidx;
+ u16 rxqidx;
};
struct chcr_alg_template {
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index e1651adb9d06..dccef3a2908b 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1110,10 +1110,10 @@ new_buf:
pg_size = page_size(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
- merge = 1;
+ merge = true;
goto copy;
}
- merge = 0;
+ merge = false;
if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
MAX_SKB_FRAGS))
goto new_buf;
@@ -1428,6 +1428,8 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct chtls_hws *hws = &csk->tlshws;
+ struct net_device *dev = csk->egress_dev;
+ struct adapter *adap = netdev2adap(dev);
struct tcp_sock *tp = tcp_sk(sk);
unsigned long avail;
int buffers_freed;
@@ -1585,6 +1587,7 @@ skip_copy:
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
} else {
+ atomic_inc(&adap->chcr_stats.tls_pdu_rx);
tp->copied_seq += hws->rcvpld;
}
chtls_free_skb(sk, skb);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index a038de90b2ea..2110d0893bc7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -174,9 +174,16 @@ static inline void chtls_dev_release(struct kref *kref)
{
struct tls_toe_device *dev;
struct chtls_dev *cdev;
+ struct adapter *adap;
dev = container_of(kref, struct tls_toe_device, kref);
cdev = to_chtls_dev(dev);
+
+ /* Reset tls rx/tx stats */
+ adap = pci_get_drvdata(cdev->pdev);
+ atomic_set(&adap->chcr_stats.tls_pdu_tx, 0);
+ atomic_set(&adap->chcr_stats.tls_pdu_rx, 0);
+
chtls_free_uld(cdev);
}
@@ -229,8 +236,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info)
struct chtls_dev *cdev;
int i, j;
- cdev = kzalloc(sizeof(*cdev) + info->nports *
- (sizeof(struct net_device *)), GFP_KERNEL);
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
goto out;
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 8851161f722f..095850d01dcc 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -40,6 +40,7 @@ config CRYPTO_DEV_HISI_QM
tristate
depends on ARM64 || COMPILE_TEST
depends on PCI && PCI_MSI
+ depends on UACCE || UACCE=n
help
HiSilicon accelerator engines use a common queue management
interface. Specific engine driver may use this module.
@@ -49,6 +50,7 @@ config CRYPTO_DEV_HISI_ZIP
depends on PCI && PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
+ depends on UACCE || UACCE=n
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon ZIP Driver
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index ddf13ea9862a..03d512ec6336 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -46,7 +46,6 @@ struct hpre_debug {
struct hpre {
struct hisi_qm qm;
- struct list_head list;
struct hpre_debug debug;
u32 num_vfs;
unsigned long status;
@@ -76,7 +75,7 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hpre *hpre_find_device(int node);
+struct hisi_qp *hpre_create_qp(void);
int hpre_algs_register(void);
void hpre_algs_unregister(void);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 5d400d69e8e4..65425250b2e9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -147,26 +147,18 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
static struct hisi_qp *hpre_get_qp_and_start(void)
{
struct hisi_qp *qp;
- struct hpre *hpre;
int ret;
- /* find the proper hpre device, which is near the current CPU core */
- hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
- if (!hpre) {
- pr_err("Can not find proper hpre device!\n");
- return ERR_PTR(-ENODEV);
- }
-
- qp = hisi_qm_create_qp(&hpre->qm, 0);
- if (IS_ERR(qp)) {
- pci_err(hpre->qm.pdev, "Can not create qp!\n");
+ qp = hpre_create_qp();
+ if (!qp) {
+ pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
}
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
- hisi_qm_release_qp(qp);
- pci_err(hpre->qm.pdev, "Can not start qp!\n");
+ hisi_qm_free_qps(&qp, 1);
+ pci_err(qp->qm->pdev, "Can not start qp!\n");
return ERR_PTR(-EINVAL);
}
@@ -338,7 +330,7 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
if (is_clear_all) {
idr_destroy(&ctx->req_idr);
kfree(ctx->req_list);
- hisi_qm_release_qp(ctx->qp);
+ hisi_qm_free_qps(&ctx->qp, 1);
}
ctx->crt_g2_mode = false;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 401747de67a8..88be53bf4a38 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -82,8 +82,7 @@
#define HPRE_VIA_MSI_DSM 1
-static LIST_HEAD(hpre_list);
-static DEFINE_MUTEX(hpre_list_lock);
+static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -196,43 +195,17 @@ static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM;
module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444);
MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)");
-static inline void hpre_add_to_list(struct hpre *hpre)
+struct hisi_qp *hpre_create_qp(void)
{
- mutex_lock(&hpre_list_lock);
- list_add_tail(&hpre->list, &hpre_list);
- mutex_unlock(&hpre_list_lock);
-}
-
-static inline void hpre_remove_from_list(struct hpre *hpre)
-{
- mutex_lock(&hpre_list_lock);
- list_del(&hpre->list);
- mutex_unlock(&hpre_list_lock);
-}
+ int node = cpu_to_node(smp_processor_id());
+ struct hisi_qp *qp = NULL;
+ int ret;
-struct hpre *hpre_find_device(int node)
-{
- struct hpre *hpre, *ret = NULL;
- int min_distance = INT_MAX;
- struct device *dev;
- int dev_node = 0;
-
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = &hpre->qm.pdev->dev;
-#ifdef CONFIG_NUMA
- dev_node = dev->numa_node;
- if (dev_node < 0)
- dev_node = 0;
-#endif
- if (node_distance(dev_node, node) < min_distance) {
- ret = hpre;
- min_distance = node_distance(dev_node, node);
- }
- }
- mutex_unlock(&hpre_list_lock);
+ ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
+ if (!ret)
+ return qp;
- return ret;
+ return NULL;
}
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
@@ -349,18 +322,14 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hpre_hw_error_disable(struct hpre *hpre)
+static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
-
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
}
-static void hpre_hw_error_enable(struct hpre *hpre)
+static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
-
/* enable hpre hw error interrupts */
writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
@@ -713,13 +682,39 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
return 0;
}
-static void hpre_hw_err_init(struct hpre *hpre)
+static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE,
- 0, QM_DB_RANDOM_INVALID);
- hpre_hw_error_enable(hpre);
+ const struct hpre_hw_error *err = hpre_hw_errors;
+ struct device *dev = &qm->pdev->dev;
+
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+ err++;
+ }
+
+ writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+}
+
+static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + HPRE_HAC_INT_STATUS);
}
+static const struct hisi_qm_err_ini hpre_err_ini = {
+ .hw_err_enable = hpre_hw_error_enable,
+ .hw_err_disable = hpre_hw_error_disable,
+ .get_dev_hw_err_status = hpre_get_hw_err_status,
+ .log_dev_hw_err = hpre_log_hw_error,
+ .err_info = {
+ .ce = QM_BASE_CE,
+ .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
+ .fe = 0,
+ .msi = QM_DB_RANDOM_INVALID,
+ }
+};
+
static int hpre_pf_probe_init(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
@@ -731,7 +726,8 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
- hpre_hw_err_init(hpre);
+ qm->err_ini = &hpre_err_ini;
+ hisi_qm_dev_err_init(qm);
return 0;
}
@@ -776,22 +772,21 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
dev_warn(&pdev->dev, "init debugfs fail!\n");
- hpre_add_to_list(hpre);
+ hisi_qm_add_to_list(qm, &hpre_devices);
ret = hpre_algs_register();
if (ret < 0) {
- hpre_remove_from_list(hpre);
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
return 0;
err_with_qm_start:
+ hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm);
err_with_err_init:
- if (pdev->is_physfn)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
err_with_qm_init:
hisi_qm_uninit(qm);
@@ -907,7 +902,7 @@ static void hpre_remove(struct pci_dev *pdev)
int ret;
hpre_algs_unregister();
- hpre_remove_from_list(hpre);
+ hisi_qm_del_from_list(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) {
ret = hpre_sriov_disable(pdev);
if (ret) {
@@ -922,69 +917,13 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_debugfs_exit(hpre);
hisi_qm_stop(qm);
- if (qm->fun_type == QM_HW_PF)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
-static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts)
-{
- const struct hpre_hw_error *err = hpre_hw_errors;
- struct device *dev = &hpre->qm.pdev->dev;
-
- while (err->msg) {
- if (err->int_msk & err_sts)
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
- err++;
- }
-}
-
-static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre)
-{
- u32 err_sts;
-
- /* read err sts */
- err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS);
- if (err_sts) {
- hpre_log_hw_error(hpre, err_sts);
-
- /* clear error interrupts */
- writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT);
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, hpre_ret;
-
- /* log qm error */
- qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
-
- /* log hpre error */
- hpre_ret = hpre_hw_error_handle(hpre);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- hpre_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hpre_process_hw_error(pdev);
-}
static const struct pci_error_handlers hpre_err_handler = {
- .error_detected = hpre_error_detected,
+ .error_detected = hisi_qm_dev_err_detected,
};
static struct pci_driver hpre_pci_driver = {
@@ -1013,6 +952,7 @@ static int __init hpre_init(void)
{
int ret;
+ hisi_qm_init_list(&hpre_devices);
hpre_register_debugfs();
ret = pci_register_driver(&hpre_pci_driver);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index b57da5ef8b5b..f795fb557630 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -9,6 +9,9 @@
#include <linux/log2.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/uacce.h>
+#include <linux/uaccess.h>
+#include <uapi/misc/uacce/hisi_qm.h>
#include "qm.h"
/* eq/aeq irq enable */
@@ -269,6 +272,12 @@ struct qm_doorbell {
__le16 priority;
};
+struct hisi_qm_resource {
+ struct hisi_qm *qm;
+ int distance;
+ struct list_head list;
+};
+
struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
@@ -277,6 +286,7 @@ struct hisi_qm_hw_ops {
int (*debug_init)(struct hisi_qm *qm);
void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
u32 msi);
+ void (*hw_error_uninit)(struct hisi_qm *qm);
pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm);
};
@@ -465,9 +475,14 @@ static void qm_cq_head_update(struct hisi_qp *qp)
static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
{
- struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
+ if (qp->event_cb) {
+ qp->event_cb(qp);
+ return;
+ }
if (qp->req_cb) {
+ struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
+
while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
dma_rmb();
qp->req_cb(qp, qp->sqe + qm->sqe_size *
@@ -485,17 +500,9 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
}
}
-static void qm_qp_work_func(struct work_struct *work)
+static void qm_work_process(struct work_struct *work)
{
- struct hisi_qp *qp;
-
- qp = container_of(work, struct hisi_qp, work);
- qm_poll_qp(qp, qp->qm);
-}
-
-static irqreturn_t qm_irq_handler(int irq, void *data)
-{
- struct hisi_qm *qm = data;
+ struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qp *qp;
int eqe_num = 0;
@@ -504,7 +511,7 @@ static irqreturn_t qm_irq_handler(int irq, void *data)
eqe_num++;
qp = qm_to_hisi_qp(qm, eqe);
if (qp)
- queue_work(qp->wq, &qp->work);
+ qm_poll_qp(qp, qm);
if (qm->status.eq_head == QM_Q_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
@@ -522,6 +529,17 @@ static irqreturn_t qm_irq_handler(int irq, void *data)
}
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+}
+
+static irqreturn_t do_qm_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = (struct hisi_qm *)data;
+
+ /* the workqueue created by device driver of QM */
+ if (qm->wq)
+ queue_work(qm->wq, &qm->work);
+ else
+ schedule_work(&qm->work);
return IRQ_HANDLED;
}
@@ -531,7 +549,7 @@ static irqreturn_t qm_irq(int irq, void *data)
struct hisi_qm *qm = data;
if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
- return qm_irq_handler(irq, data);
+ return do_qm_irq(irq, data);
dev_err(&qm->pdev->dev, "invalid int source\n");
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
@@ -1011,43 +1029,45 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
+static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
+{
+ writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+}
+
static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
{
- const struct hisi_qm_hw_error *err = qm_hw_error;
+ const struct hisi_qm_hw_error *err;
struct device *dev = &qm->pdev->dev;
u32 reg_val, type, vf_num;
+ int i;
- while (err->msg) {
- if (err->int_msk & error_status) {
- dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
-
- if (error_status & QM_DB_TIMEOUT) {
- reg_val = readl(qm->io_base +
- QM_ABNORMAL_INF01);
- type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
- QM_DB_TIMEOUT_TYPE_SHIFT;
- vf_num = reg_val & QM_DB_TIMEOUT_VF;
- dev_err(dev, "qm %s doorbell timeout in function %u\n",
- qm_db_timeout[type], vf_num);
- }
-
- if (error_status & QM_OF_FIFO_OF) {
- reg_val = readl(qm->io_base +
- QM_ABNORMAL_INF00);
- type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
- QM_FIFO_OVERFLOW_TYPE_SHIFT;
- vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
-
- if (type < ARRAY_SIZE(qm_fifo_overflow))
- dev_err(dev, "qm %s fifo overflow in function %u\n",
- qm_fifo_overflow[type],
- vf_num);
- else
- dev_err(dev, "unknown error type\n");
- }
+ for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
+ err = &qm_hw_error[i];
+ if (!(err->int_msk & error_status))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & QM_DB_TIMEOUT) {
+ reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
+ type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
+ QM_DB_TIMEOUT_TYPE_SHIFT;
+ vf_num = reg_val & QM_DB_TIMEOUT_VF;
+ dev_err(dev, "qm %s doorbell timeout in function %u\n",
+ qm_db_timeout[type], vf_num);
+ } else if (err->int_msk & QM_OF_FIFO_OF) {
+ reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
+ type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
+ QM_FIFO_OVERFLOW_TYPE_SHIFT;
+ vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
+
+ if (type < ARRAY_SIZE(qm_fifo_overflow))
+ dev_err(dev, "qm %s fifo overflow in function %u\n",
+ qm_fifo_overflow[type], vf_num);
+ else
+ dev_err(dev, "unknown error type\n");
}
- err++;
}
}
@@ -1082,6 +1102,7 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.qm_db = qm_db_v2,
.get_irq_num = qm_get_irq_num_v2,
.hw_error_init = qm_hw_error_init_v2,
+ .hw_error_uninit = qm_hw_error_uninit_v2,
.hw_error_handle = qm_hw_error_handle_v2,
};
@@ -1147,20 +1168,9 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
qp->qp_id = qp_id;
qp->alg_type = alg_type;
- INIT_WORK(&qp->work, qm_qp_work_func);
- qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI |
- WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0);
- if (!qp->wq) {
- ret = -EFAULT;
- goto err_free_qp_mem;
- }
return qp;
-err_free_qp_mem:
- if (qm->use_dma_api)
- dma_free_coherent(dev, qp->qdma.size, qp->qdma.va,
- qp->qdma.dma);
err_clear_bit:
write_lock(&qm->qps_lock);
qm->qp_array[qp_id] = NULL;
@@ -1269,7 +1279,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
* @qp: The qp we want to start to run.
* @arg: Accelerator specific argument.
*
- * After this function, qp can receive request from user. Return qp_id if
+ * After this function, qp can receive request from user. Return 0 if
* successful, Return -EBUSY if failed.
*/
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
@@ -1314,7 +1324,7 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
dev_dbg(dev, "queue %d started\n", qp_id);
- return qp_id;
+ return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
@@ -1395,6 +1405,214 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
}
}
+static void qm_qp_event_notifier(struct hisi_qp *qp)
+{
+ wake_up_interruptible(&qp->uacce_q->wait);
+}
+
+static int hisi_qm_get_available_instances(struct uacce_device *uacce)
+{
+ int i, ret;
+ struct hisi_qm *qm = uacce->priv;
+
+ read_lock(&qm->qps_lock);
+ for (i = 0, ret = 0; i < qm->qp_num; i++)
+ if (!qm->qp_array[i])
+ ret++;
+ read_unlock(&qm->qps_lock);
+
+ return ret;
+}
+
+static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
+ unsigned long arg,
+ struct uacce_queue *q)
+{
+ struct hisi_qm *qm = uacce->priv;
+ struct hisi_qp *qp;
+ u8 alg_type = 0;
+
+ qp = hisi_qm_create_qp(qm, alg_type);
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
+
+ q->priv = qp;
+ q->uacce = uacce;
+ qp->uacce_q = q;
+ qp->event_cb = qm_qp_event_notifier;
+ qp->pasid = arg;
+
+ return 0;
+}
+
+static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
+{
+ struct hisi_qp *qp = q->priv;
+
+ hisi_qm_cache_wb(qp->qm);
+ hisi_qm_release_qp(qp);
+}
+
+/* map sq/cq/doorbell to user space */
+static int hisi_qm_uacce_mmap(struct uacce_queue *q,
+ struct vm_area_struct *vma,
+ struct uacce_qfile_region *qfr)
+{
+ struct hisi_qp *qp = q->priv;
+ struct hisi_qm *qm = qp->qm;
+ size_t sz = vma->vm_end - vma->vm_start;
+ struct pci_dev *pdev = qm->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned long vm_pgoff;
+ int ret;
+
+ switch (qfr->type) {
+ case UACCE_QFRT_MMIO:
+ if (qm->ver == QM_HW_V2) {
+ if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
+ QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
+ return -EINVAL;
+ } else {
+ if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO;
+
+ return remap_pfn_range(vma, vma->vm_start,
+ qm->phys_base >> PAGE_SHIFT,
+ sz, pgprot_noncached(vma->vm_page_prot));
+ case UACCE_QFRT_DUS:
+ if (sz != qp->qdma.size)
+ return -EINVAL;
+
+ /*
+ * dma_mmap_coherent() requires vm_pgoff as 0
+ * restore vm_pfoff to initial value for mmap()
+ */
+ vm_pgoff = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
+ ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
+ qp->qdma.dma, sz);
+ vma->vm_pgoff = vm_pgoff;
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
+{
+ struct hisi_qp *qp = q->priv;
+
+ return hisi_qm_start_qp(qp, qp->pasid);
+}
+
+static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
+{
+ hisi_qm_stop_qp(q->priv);
+}
+
+static int qm_set_sqctype(struct uacce_queue *q, u16 type)
+{
+ struct hisi_qm *qm = q->uacce->priv;
+ struct hisi_qp *qp = q->priv;
+
+ write_lock(&qm->qps_lock);
+ qp->alg_type = type;
+ write_unlock(&qm->qps_lock);
+
+ return 0;
+}
+
+static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
+ unsigned long arg)
+{
+ struct hisi_qp *qp = q->priv;
+ struct hisi_qp_ctx qp_ctx;
+
+ if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
+ if (copy_from_user(&qp_ctx, (void __user *)arg,
+ sizeof(struct hisi_qp_ctx)))
+ return -EFAULT;
+
+ if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ return -EINVAL;
+
+ qm_set_sqctype(q, qp_ctx.qc_type);
+ qp_ctx.id = qp->qp_id;
+
+ if (copy_to_user((void __user *)arg, &qp_ctx,
+ sizeof(struct hisi_qp_ctx)))
+ return -EFAULT;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct uacce_ops uacce_qm_ops = {
+ .get_available_instances = hisi_qm_get_available_instances,
+ .get_queue = hisi_qm_uacce_get_queue,
+ .put_queue = hisi_qm_uacce_put_queue,
+ .start_queue = hisi_qm_uacce_start_queue,
+ .stop_queue = hisi_qm_uacce_stop_queue,
+ .mmap = hisi_qm_uacce_mmap,
+ .ioctl = hisi_qm_uacce_ioctl,
+};
+
+static int qm_alloc_uacce(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct uacce_device *uacce;
+ unsigned long mmio_page_nr;
+ unsigned long dus_page_nr;
+ struct uacce_interface interface = {
+ .flags = UACCE_DEV_SVA,
+ .ops = &uacce_qm_ops,
+ };
+
+ strncpy(interface.name, pdev->driver->name, sizeof(interface.name));
+
+ uacce = uacce_alloc(&pdev->dev, &interface);
+ if (IS_ERR(uacce))
+ return PTR_ERR(uacce);
+
+ if (uacce->flags & UACCE_DEV_SVA) {
+ qm->use_sva = true;
+ } else {
+ /* only consider sva case */
+ uacce_remove(uacce);
+ qm->uacce = NULL;
+ return -EINVAL;
+ }
+
+ uacce->is_vf = pdev->is_virtfn;
+ uacce->priv = qm;
+ uacce->algs = qm->algs;
+
+ if (qm->ver == QM_HW_V1) {
+ mmio_page_nr = QM_DOORBELL_PAGE_NR;
+ uacce->api_ver = HISI_QM_API_VER_BASE;
+ } else {
+ mmio_page_nr = QM_DOORBELL_PAGE_NR +
+ QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ }
+
+ dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
+ sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
+
+ uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
+ uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
+
+ qm->uacce = uacce;
+
+ return 0;
+}
+
/**
* hisi_qm_get_free_qp_num() - Get free number of qp in qm.
* @qm: The qm which want to get free qp.
@@ -1437,10 +1655,14 @@ int hisi_qm_init(struct hisi_qm *qm)
return -EINVAL;
}
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
+
ret = pci_enable_device_mem(pdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable device mem!\n");
- return ret;
+ goto err_remove_uacce;
}
ret = pci_request_mem_regions(pdev, qm->dev_name);
@@ -1449,8 +1671,9 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_disable_pcidev;
}
- qm->io_base = ioremap(pci_resource_start(pdev, PCI_BAR_2),
- pci_resource_len(qm->pdev, PCI_BAR_2));
+ qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
+ qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
+ qm->io_base = ioremap(qm->phys_base, qm->phys_size);
if (!qm->io_base) {
ret = -EIO;
goto err_release_mem_regions;
@@ -1479,6 +1702,7 @@ int hisi_qm_init(struct hisi_qm *qm)
qm->qp_in_used = 0;
mutex_init(&qm->mailbox_lock);
rwlock_init(&qm->qps_lock);
+ INIT_WORK(&qm->work, qm_work_process);
dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf",
qm->use_dma_api ? "dma api" : "iommu api");
@@ -1493,6 +1717,9 @@ err_release_mem_regions:
pci_release_mem_regions(pdev);
err_disable_pcidev:
pci_disable_device(pdev);
+err_remove_uacce:
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
return ret;
}
@@ -1509,6 +1736,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+
if (qm->use_dma_api && qm->qdma.va) {
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
@@ -1856,43 +2086,30 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
-/**
- * hisi_qm_hw_error_init() - Configure qm hardware error report method.
- * @qm: The qm which we want to configure.
- * @ce: Bit mask of correctable error configure.
- * @nfe: Bit mask of non-fatal error configure.
- * @fe: Bit mask of fatal error configure.
- * @msi: Bit mask of error reported by message signal interrupt.
- *
- * Hardware errors of qm can be reported either by RAS interrupts which will
- * be handled by UEFI and then PCIe AER or by device MSI. User can configure
- * each error to use either of above two methods. For RAS interrupts, we can
- * configure an error as one of correctable error, non-fatal error or
- * fatal error.
- *
- * Bits indicating errors can be configured to ce, nfe, fe and msi to enable
- * related report methods. Error report will be masked if related error bit
- * does not configure.
- */
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init(struct hisi_qm *qm)
{
+ const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
+
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
- qm->ops->hw_error_init(qm, ce, nfe, fe, msi);
+ qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe,
+ err_info->fe, err_info->msi);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
-/**
- * hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors.
- * @qm: The qm which has non-fatal hardware errors.
- *
- * Accelerators use this function to handle qm non-fatal hardware errors.
- */
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
+static void qm_hw_error_uninit(struct hisi_qm *qm)
+{
+ if (!qm->ops->hw_error_uninit) {
+ dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
+ return;
+ }
+
+ qm->ops->hw_error_uninit(qm);
+}
+
+static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
@@ -1901,7 +2118,6 @@ pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
return qm->ops->hw_error_handle(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle);
/**
* hisi_qm_get_hw_version() - Get hardware version of a qm.
@@ -1922,6 +2138,229 @@ enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
+/**
+ * hisi_qm_dev_err_init() - Initialize device error configuration.
+ * @qm: The qm for which we want to do error initialization.
+ *
+ * Initialize QM and device error related configuration.
+ */
+void hisi_qm_dev_err_init(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_init(qm);
+
+ if (!qm->err_ini->hw_err_enable) {
+ dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
+ return;
+ }
+ qm->err_ini->hw_err_enable(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
+
+/**
+ * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
+ * @qm: The qm for which we want to do error uninitialization.
+ *
+ * Uninitialize QM and device error related configuration.
+ */
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_uninit(qm);
+
+ if (!qm->err_ini->hw_err_disable) {
+ dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
+ return;
+ }
+ qm->err_ini->hw_err_disable(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
+
+/**
+ * hisi_qm_free_qps() - free multiple queue pairs.
+ * @qps: The queue pairs need to be freed.
+ * @qp_num: The num of queue pairs.
+ */
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
+{
+ int i;
+
+ if (!qps || qp_num <= 0)
+ return;
+
+ for (i = qp_num - 1; i >= 0; i--)
+ hisi_qm_release_qp(qps[i]);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
+
+static void free_list(struct list_head *head)
+{
+ struct hisi_qm_resource *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, head, list) {
+ list_del(&res->list);
+ kfree(res);
+ }
+}
+
+static int hisi_qm_sort_devices(int node, struct list_head *head,
+ struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm_resource *res, *tmp;
+ struct hisi_qm *qm;
+ struct list_head *n;
+ struct device *dev;
+ int dev_node = 0;
+
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = &qm->pdev->dev;
+
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ dev_node = dev_to_node(dev);
+ if (dev_node < 0)
+ dev_node = 0;
+ }
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->qm = qm;
+ res->distance = node_distance(dev_node, node);
+ n = head;
+ list_for_each_entry(tmp, head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
+ }
+
+ return 0;
+}
+
+/**
+ * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
+ * @qm_list: The list of all available devices.
+ * @qp_num: The number of queue pairs need created.
+ * @alg_type: The algorithm type.
+ * @node: The numa node.
+ * @qps: The queue pairs need created.
+ *
+ * This function will sort all available device according to numa distance.
+ * Then try to create all queue pairs from one device, if all devices do
+ * not meet the requirements will return error.
+ */
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps)
+{
+ struct hisi_qm_resource *tmp;
+ int ret = -ENODEV;
+ LIST_HEAD(head);
+ int i;
+
+ if (!qps || !qm_list || qp_num <= 0)
+ return -EINVAL;
+
+ mutex_lock(&qm_list->lock);
+ if (hisi_qm_sort_devices(node, &head, qm_list)) {
+ mutex_unlock(&qm_list->lock);
+ goto err;
+ }
+
+ list_for_each_entry(tmp, &head, list) {
+ for (i = 0; i < qp_num; i++) {
+ qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
+ if (IS_ERR(qps[i])) {
+ hisi_qm_free_qps(qps, i);
+ break;
+ }
+ }
+
+ if (i == qp_num) {
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&qm_list->lock);
+ if (ret)
+ pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
+ node, alg_type, qp_num);
+
+err:
+ free_list(&head);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
+
+static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
+{
+ u32 err_sts;
+
+ if (!qm->err_ini->get_dev_hw_err_status) {
+ dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* get device hardware error status */
+ err_sts = qm->err_ini->get_dev_hw_err_status(qm);
+ if (err_sts) {
+ if (!qm->err_ini->log_dev_hw_err) {
+ dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ qm->err_ini->log_dev_hw_err(qm, err_sts);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, dev_ret;
+
+ /* log qm error */
+ qm_ret = qm_hw_error_handle(qm);
+
+ /* log device error */
+ dev_ret = qm_dev_err_handle(qm);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
+ * @pdev: The PCI device which need report error.
+ * @state: The connectivity between CPU and device.
+ *
+ * We register this function into PCIe AER handlers, It will report device or
+ * qm hardware error status when error occur.
+ */
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_NONE;
+
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return qm_process_dev_error(pdev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 078b8f1f1b77..ec5b6f48db6c 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -77,6 +77,9 @@
#define HISI_ACC_SGL_SGE_NR_MAX 255
+/* page number for queue file region */
+#define QM_DOORBELL_PAGE_NR 1
+
enum qp_state {
QP_STOP,
};
@@ -125,6 +128,28 @@ struct hisi_qm_status {
unsigned long flags;
};
+struct hisi_qm;
+
+struct hisi_qm_err_info {
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+ u32 msi;
+};
+
+struct hisi_qm_err_ini {
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
+ struct hisi_qm_err_info err_info;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+};
+
struct hisi_qm {
enum qm_hw_ver ver;
enum qm_fun_type fun_type;
@@ -136,6 +161,7 @@ struct hisi_qm {
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
+ struct list_head list;
struct qm_dma qdma;
struct qm_sqc *sqc;
@@ -148,6 +174,7 @@ struct hisi_qm {
dma_addr_t aeqe_dma;
struct hisi_qm_status status;
+ const struct hisi_qm_err_ini *err_ini;
rwlock_t qps_lock;
unsigned long *qp_bitmap;
@@ -162,7 +189,15 @@ struct hisi_qm {
u32 error_mask;
u32 msi_mask;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+
+ const char *algs;
bool use_dma_api;
+ bool use_sva;
+ resource_size_t phys_base;
+ resource_size_t phys_size;
+ struct uacce_device *uacce;
};
struct hisi_qp_status {
@@ -192,12 +227,35 @@ struct hisi_qp {
struct hisi_qp_ops *hw_ops;
void *qp_ctx;
void (*req_cb)(struct hisi_qp *qp, void *data);
- struct work_struct work;
- struct workqueue_struct *wq;
+ void (*event_cb)(struct hisi_qp *qp);
struct hisi_qm *qm;
+ u16 pasid;
+ struct uacce_queue *uacce_q;
};
+static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
+{
+ INIT_LIST_HEAD(&qm_list->list);
+ mutex_init(&qm_list->lock);
+}
+
+static inline void hisi_qm_add_to_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_from_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
@@ -211,11 +269,12 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi);
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
@@ -227,4 +286,7 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr);
void hisi_acc_free_sgl_pool(struct device *dev,
struct hisi_acc_sgl_pool *pool);
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps);
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 13e2d8d7be94..3598fa17beb2 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -11,6 +11,8 @@
/* Algorithm resource per hardware SEC queue */
struct sec_alg_res {
+ u8 *pbuf;
+ dma_addr_t pbuf_dma;
u8 *c_ivin;
dma_addr_t c_ivin_dma;
u8 *out_mac;
@@ -23,6 +25,8 @@ struct sec_cipher_req {
dma_addr_t c_in_dma;
struct hisi_acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
@@ -48,6 +52,7 @@ struct sec_req {
/* Status of the SEC request */
bool fake_busy;
+ bool use_pbuf;
};
/**
@@ -114,6 +119,7 @@ struct sec_ctx {
struct sec_qp_ctx *qp_ctx;
struct sec_dev *sec;
const struct sec_req_op *req_op;
+ struct hisi_qp **qps;
/* Half queues for encipher, and half for decipher */
u32 hlf_q_num;
@@ -128,6 +134,7 @@ struct sec_ctx {
atomic_t dec_qcyclic;
enum sec_alg_type alg_type;
+ bool pbuf_supported;
struct sec_cipher_ctx c_ctx;
struct sec_auth_ctx a_ctx;
};
@@ -162,14 +169,15 @@ struct sec_debug {
struct sec_dev {
struct hisi_qm qm;
- struct list_head list;
struct sec_debug debug;
u32 ctx_q_num;
+ bool iommu_used;
u32 num_vfs;
unsigned long status;
};
-struct sec_dev *sec_find_device(int node);
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
+struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(void);
void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index a2cfcc9ccd94..7f1c6a31b82f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -46,7 +46,21 @@
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
+#define SEC_MAX_AAD_LEN 65535
#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+
+#define SEC_PBUF_SZ 512
+#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
+#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
+#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
+ SEC_MAX_MAC_LEN * 2)
+#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
+#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
+ SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
+ SEC_PBUF_LEFT_SZ)
+
#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
#define SEC_SQE_AEAD_FLAG 3
@@ -110,12 +124,12 @@ static void sec_free_req_id(struct sec_req *req)
mutex_unlock(&qp_ctx->req_lock);
}
-static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
+static int sec_aead_verify(struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- u8 *mac_out = qp_ctx->res[req->req_id].out_mac;
size_t authsize = crypto_aead_authsize(tfm);
+ u8 *mac_out = req->aead_req.out_mac;
u8 *mac = mac_out + SEC_MAX_MAC_LEN;
struct scatterlist *sgl = aead_req->src;
size_t sz;
@@ -163,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
}
if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
- err = sec_aead_verify(req, qp_ctx);
+ err = sec_aead_verify(req);
atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
@@ -245,6 +259,50 @@ static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
res->out_mac, res->out_mac_dma);
}
+static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->pbuf)
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf, res->pbuf_dma);
+}
+
+/*
+ * To improve performance, pbuffer is used for
+ * small packets (< 512Bytes) as IOMMU translation using.
+ */
+static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int pbuf_page_offset;
+ int i, j, k;
+
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ &res->pbuf_dma, GFP_KERNEL);
+ if (!res->pbuf)
+ return -ENOMEM;
+
+ /*
+ * SEC_PBUF_PKG contains data pbuf, iv and
+ * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
+ * Every PAGE contains six SEC_PBUF_PKG
+ * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
+ * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
+ * for the SEC_TOTAL_PBUF_SZ
+ */
+ for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ pbuf_page_offset = PAGE_SIZE * i;
+ for (j = 0; j < SEC_PBUF_NUM; j++) {
+ k = i * SEC_PBUF_NUM + j;
+ if (k == QM_Q_DEPTH)
+ break;
+ res[k].pbuf = res->pbuf +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ res[k].pbuf_dma = res->pbuf_dma +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ }
+ }
+ return 0;
+}
+
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
@@ -259,11 +317,18 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
if (ctx->alg_type == SEC_AEAD) {
ret = sec_alloc_mac_resource(dev, res);
if (ret)
- goto get_fail;
+ goto alloc_fail;
+ }
+ if (ctx->pbuf_supported) {
+ ret = sec_alloc_pbuf_resource(dev, res);
+ if (ret) {
+ dev_err(dev, "fail to alloc pbuf dma resource!\n");
+ goto alloc_fail;
+ }
}
return 0;
-get_fail:
+alloc_fail:
sec_free_civ_resource(dev, res);
return ret;
@@ -276,6 +341,8 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
sec_free_civ_resource(dev, qp_ctx->res);
+ if (ctx->pbuf_supported)
+ sec_free_pbuf_resource(dev, qp_ctx->res);
if (ctx->alg_type == SEC_AEAD)
sec_free_mac_resource(dev, qp_ctx->res);
}
@@ -288,11 +355,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
struct hisi_qp *qp;
int ret = -ENOMEM;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
qp->req_type = 0;
qp->qp_ctx = qp_ctx;
qp->req_cb = sec_req_cb;
@@ -335,7 +399,6 @@ err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
- hisi_qm_release_qp(qp);
return ret;
}
@@ -352,7 +415,6 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx,
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
idr_destroy(&qp_ctx->req_idr);
- hisi_qm_release_qp(qp_ctx->qp);
}
static int sec_ctx_base_init(struct sec_ctx *ctx)
@@ -360,14 +422,18 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
struct sec_dev *sec;
int i, ret;
- sec = sec_find_device(cpu_to_node(smp_processor_id()));
- if (!sec) {
- pr_err("Can not find proper Hisilicon SEC device!\n");
+ ctx->qps = sec_create_qps();
+ if (!ctx->qps) {
+ pr_err("Can not create sec qps!\n");
return -ENODEV;
}
+
+ sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
ctx->hlf_q_num = sec->ctx_q_num >> 1;
+ ctx->pbuf_supported = ctx->sec->iommu_used;
+
/* Half of queue depth is taken as fake requests limit in the queue. */
ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
@@ -386,6 +452,7 @@ err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
return ret;
}
@@ -397,6 +464,7 @@ static void sec_ctx_base_uninit(struct sec_ctx *ctx)
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
kfree(ctx->qp_ctx);
}
@@ -447,7 +515,6 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
- ctx = crypto_skcipher_ctx(tfm);
ctx->alg_type = SEC_SKCIPHER;
crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
@@ -591,11 +658,94 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int sec_cipher_map(struct device *dev, struct sec_req *req,
+static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
+ int req_id = req->req_id;
+
+ if (ctx->alg_type == SEC_AEAD)
+ copy_size = aead_req->cryptlen + aead_req->assoclen;
+ else
+ copy_size = c_req->c_len;
+
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
+ qp_ctx->res[req_id].pbuf,
+ copy_size);
+
+ if (unlikely(pbuf_length != copy_size)) {
+ dev_err(dev, "copy src data to pbuf error!\n");
+ return -EINVAL;
+ }
+
+ c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
+
+ if (!c_req->c_in_dma) {
+ dev_err(dev, "fail to set pbuffer address!\n");
+ return -ENOMEM;
+ }
+
+ c_req->c_out_dma = c_req->c_in_dma;
+
+ return 0;
+}
+
+static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *dst)
+{
+ struct aead_request *aead_req = req->aead_req.aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
+ int req_id = req->req_id;
+
+ if (ctx->alg_type == SEC_AEAD)
+ copy_size = c_req->c_len + aead_req->assoclen;
+ else
+ copy_size = c_req->c_len;
+
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
+ qp_ctx->res[req_id].pbuf,
+ copy_size);
+
+ if (unlikely(pbuf_length != copy_size))
+ dev_err(dev, "copy pbuf data to dst error!\n");
+
+}
+
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_alg_res *res = &qp_ctx->res[req->req_id];
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int ret;
+
+ if (req->use_pbuf) {
+ ret = sec_cipher_pbuf_map(ctx, req, src);
+ c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
+ c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
+ if (ctx->alg_type == SEC_AEAD) {
+ a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
+ a_req->out_mac_dma = res->pbuf_dma +
+ SEC_PBUF_MAC_OFFSET;
+ }
+
+ return ret;
+ }
+ c_req->c_ivin = res->c_ivin;
+ c_req->c_ivin_dma = res->c_ivin_dma;
+ if (ctx->alg_type == SEC_AEAD) {
+ a_req->out_mac = res->out_mac;
+ a_req->out_mac_dma = res->out_mac_dma;
+ }
c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
qp_ctx->c_in_pool,
@@ -626,29 +776,34 @@ static int sec_cipher_map(struct device *dev, struct sec_req *req,
return 0;
}
-static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req,
+static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
- if (dst != src)
- hisi_acc_sg_buf_unmap(dev, src, req->c_in);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ if (req->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
- hisi_acc_sg_buf_unmap(dev, dst, req->c_out);
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ }
}
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sq = req->c_req.sk_req;
- return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst);
+ return sec_cipher_map(ctx, req, sq->src, sq->dst);
}
static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_req *c_req = &req->c_req;
- struct skcipher_request *sk_req = c_req->sk_req;
+ struct skcipher_request *sq = req->c_req.sk_req;
- sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst);
+ sec_cipher_unmap(ctx, req, sq->src, sq->dst);
}
static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
@@ -759,16 +914,14 @@ static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aq = req->aead_req.aead_req;
- return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst);
+ return sec_cipher_map(ctx, req, aq->src, aq->dst);
}
static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
- struct device *dev = SEC_CTX_DEV(ctx);
- struct sec_cipher_req *cq = &req->c_req;
struct aead_request *aq = req->aead_req.aead_req;
- sec_cipher_unmap(dev, cq, aq->src, aq->dst);
+ sec_cipher_unmap(ctx, req, aq->src, aq->dst);
}
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
@@ -801,9 +954,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
- u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+ struct sec_cipher_req *c_req = &req->c_req;
- memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -818,8 +971,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
memset(sec_sqe, 0, sizeof(struct sec_sqe));
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr =
- cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
@@ -836,7 +988,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
sec_sqe->type_cipher_auth = bd_type | cipher;
- sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ if (req->use_pbuf)
+ sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
+ else
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
if (c_req->c_in_dma != c_req->c_out_dma)
de = 0x1 << SEC_DE_OFFSET;
@@ -844,7 +999,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->sds_sa_type = (de | scene | sa_type);
/* Just set DST address type */
- da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ if (req->use_pbuf)
+ da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
+ else
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
sec_sqe->sdm_addr_type |= da_type;
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
@@ -904,9 +1062,9 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin;
+ struct sec_cipher_req *c_req = &req->c_req;
- memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+ memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
}
static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
@@ -939,8 +1097,7 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
- sec_sqe->type2.mac_addr =
- cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma);
+ sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
}
static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
@@ -964,6 +1121,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
@@ -979,7 +1137,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
struct scatterlist *sgl = a_req->dst;
sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- qp_ctx->res[req->req_id].out_mac,
+ aead_req->out_mac,
authsize, a_req->cryptlen +
a_req->assoclen);
@@ -1031,6 +1189,7 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
+ struct sec_cipher_req *c_req = &req->c_req;
int ret;
ret = sec_request_init(ctx, req);
@@ -1057,12 +1216,10 @@ err_send_req:
/* As failing, restore the IV from user */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
if (ctx->alg_type == SEC_SKCIPHER)
- memcpy(req->c_req.sk_req->iv,
- req->qp_ctx->res[req->req_id].c_ivin,
+ memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
ctx->c_ctx.ivsize);
else
- memcpy(req->aead_req.aead_req->iv,
- req->qp_ctx->res[req->req_id].c_ivin,
+ memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
ctx->c_ctx.ivsize);
}
@@ -1208,6 +1365,12 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
}
sreq->c_req.c_len = sk_req->cryptlen;
+
+ if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
+ sreq->use_pbuf = true;
+ else
+ sreq->use_pbuf = false;
+
if (c_alg == SEC_CALG_3DES) {
if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
dev_err(dev, "skcipher 3des input length error!\n");
@@ -1321,11 +1484,18 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
size_t authsize = crypto_aead_authsize(tfm);
- if (unlikely(!req->src || !req->dst || !req->cryptlen)) {
+ if (unlikely(!req->src || !req->dst || !req->cryptlen ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
return -EINVAL;
}
+ if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
+ SEC_PBUF_SZ)
+ sreq->use_pbuf = true;
+ else
+ sreq->use_pbuf = false;
+
/* Support AES only */
if (unlikely(c_alg != SEC_CALG_AES)) {
dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2bbaf1e2dae7..1f54ebe164b6 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -89,8 +90,7 @@ struct sec_hw_error {
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
-static LIST_HEAD(sec_list);
-static DEFINE_MUTEX(sec_list_lock);
+static struct hisi_qm_list sec_devices;
static const struct sec_hw_error sec_hw_errors[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
@@ -105,37 +105,6 @@ static const struct sec_hw_error sec_hw_errors[] = {
{ /* sentinel */ }
};
-struct sec_dev *sec_find_device(int node)
-{
-#define SEC_NUMA_MAX_DISTANCE 100
- int min_distance = SEC_NUMA_MAX_DISTANCE;
- int dev_node = 0, free_qp_num = 0;
- struct sec_dev *sec, *ret = NULL;
- struct hisi_qm *qm;
- struct device *dev;
-
- mutex_lock(&sec_list_lock);
- list_for_each_entry(sec, &sec_list, list) {
- qm = &sec->qm;
- dev = &qm->pdev->dev;
-#ifdef CONFIG_NUMA
- dev_node = dev->numa_node;
- if (dev_node < 0)
- dev_node = 0;
-#endif
- if (node_distance(dev_node, node) < min_distance) {
- free_qp_num = hisi_qm_get_free_qp_num(qm);
- if (free_qp_num >= sec->ctx_q_num) {
- ret = sec;
- min_distance = node_distance(dev_node, node);
- }
- }
- }
- mutex_unlock(&sec_list_lock);
-
- return ret;
-}
-
static const char * const sec_dbg_file_name[] = {
[SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
@@ -238,6 +207,32 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
+{
+ hisi_qm_free_qps(qps, qp_num);
+ kfree(qps);
+}
+
+struct hisi_qp **sec_create_qps(void)
+{
+ int node = cpu_to_node(smp_processor_id());
+ u32 ctx_num = ctx_q_num;
+ struct hisi_qp **qps;
+ int ret;
+
+ qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
+ if (!qps)
+ return NULL;
+
+ ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
+ if (!ret)
+ return qps;
+
+ kfree(qps);
+ return NULL;
+}
+
+
static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
@@ -245,20 +240,6 @@ static const struct pci_device_id sec_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
-static inline void sec_add_to_list(struct sec_dev *sec)
-{
- mutex_lock(&sec_list_lock);
- list_add_tail(&sec->list, &sec_list);
- mutex_unlock(&sec_list_lock);
-}
-
-static inline void sec_remove_from_list(struct sec_dev *sec)
-{
- mutex_lock(&sec_list_lock);
- list_del(&sec->list);
- mutex_unlock(&sec_list_lock);
-}
-
static u8 sec_get_endian(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
@@ -384,9 +365,8 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void sec_hw_error_enable(struct sec_dev *sec)
+static void sec_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
u32 val;
if (qm->ver == QM_HW_V1) {
@@ -414,9 +394,8 @@ static void sec_hw_error_enable(struct sec_dev *sec)
writel(val, qm->io_base + SEC_CONTROL_REG);
}
-static void sec_hw_error_disable(struct sec_dev *sec)
+static void sec_hw_error_disable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
u32 val;
val = readl(qm->io_base + SEC_CONTROL_REG);
@@ -435,27 +414,6 @@ static void sec_hw_error_disable(struct sec_dev *sec)
writel(val, qm->io_base + SEC_CONTROL_REG);
}
-static void sec_hw_error_init(struct sec_dev *sec)
-{
- if (sec->qm.fun_type == QM_HW_VF)
- return;
-
- hisi_qm_hw_error_init(&sec->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
- | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- sec_hw_error_enable(sec);
-}
-
-static void sec_hw_error_uninit(struct sec_dev *sec)
-{
- if (sec->qm.fun_type == QM_HW_VF)
- return;
-
- sec_hw_error_disable(sec);
- writel(GENMASK(12, 0), sec->qm.io_base + SEC_QM_ABNORMAL_INT_MASK);
-}
-
static u32 sec_current_qm_read(struct sec_debug_file *file)
{
struct hisi_qm *qm = file->qm;
@@ -695,6 +653,51 @@ static void sec_debugfs_exit(struct sec_dev *sec)
debugfs_remove_recursive(sec->qm.debug.debug_root);
}
+static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+{
+ const struct sec_hw_error *errs = sec_hw_errors;
+ struct device *dev = &qm->pdev->dev;
+ u32 err_val;
+
+ while (errs->msg) {
+ if (errs->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ errs->msg, errs->int_msk);
+
+ if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
+ err_val = readl(qm->io_base +
+ SEC_CORE_SRAM_ECC_ERR_INFO);
+ dev_err(dev, "multi ecc sram num=0x%x\n",
+ SEC_ECC_NUM(err_val));
+ dev_err(dev, "multi ecc sram addr=0x%x\n",
+ SEC_ECC_ADDR(err_val));
+ }
+ }
+ errs++;
+ }
+
+ writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+}
+
+static u32 sec_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + SEC_CORE_INT_STATUS);
+}
+
+static const struct hisi_qm_err_ini sec_err_ini = {
+ .hw_err_enable = sec_hw_error_enable,
+ .hw_err_disable = sec_hw_error_disable,
+ .get_dev_hw_err_status = sec_get_hw_err_status,
+ .log_dev_hw_err = sec_log_hw_error,
+ .err_info = {
+ .ce = QM_BASE_CE,
+ .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+ QM_ACC_WB_NOT_READY_TIMEOUT,
+ .fe = 0,
+ .msi = QM_DB_RANDOM_INVALID,
+ }
+};
+
static int sec_pf_probe_init(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
@@ -713,11 +716,13 @@ static int sec_pf_probe_init(struct sec_dev *sec)
return -EINVAL;
}
+ qm->err_ini = &sec_err_ini;
+
ret = sec_set_user_domain_and_cache(sec);
if (ret)
return ret;
- sec_hw_error_init(sec);
+ hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
return 0;
@@ -750,12 +755,30 @@ static void sec_qm_uninit(struct hisi_qm *qm)
static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
{
+ int ret;
+
+ /*
+ * WQ_HIGHPRI: SEC request must be low delayed,
+ * so need a high priority workqueue.
+ * WQ_UNBOUND: SEC task is likely with long
+ * running CPU intensive workloads.
+ */
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI |
+ WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
+ if (!qm->wq) {
+ pci_err(qm->pdev, "fail to alloc workqueue\n");
+ return -ENOMEM;
+ }
+
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = SEC_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
- return sec_pf_probe_init(sec);
+ ret = sec_pf_probe_init(sec);
+ if (ret)
+ goto err_probe_uninit;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
@@ -768,18 +791,43 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
} else if (qm->ver == QM_HW_V2) {
/* v2 starts to support get vft by mailbox */
- return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_probe_uninit;
}
} else {
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_probe_uninit;
}
return 0;
+err_probe_uninit:
+ destroy_workqueue(qm->wq);
+ return ret;
}
-static void sec_probe_uninit(struct sec_dev *sec)
+static void sec_probe_uninit(struct hisi_qm *qm)
{
- sec_hw_error_uninit(sec);
+ hisi_qm_dev_err_uninit(qm);
+
+ destroy_workqueue(qm->wq);
+}
+
+static void sec_iommu_used_check(struct sec_dev *sec)
+{
+ struct iommu_domain *domain;
+ struct device *dev = &sec->qm.pdev->dev;
+
+ domain = iommu_get_domain_for_dev(dev);
+
+ /* Check if iommu is used */
+ sec->iommu_used = false;
+ if (domain) {
+ if (domain->type & __IOMMU_DOMAIN_PAGING)
+ sec->iommu_used = true;
+ dev_info(dev, "SMMU Opened, the iommu type = %u\n",
+ domain->type);
+ }
}
static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -795,6 +843,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, sec);
sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
qm = &sec->qm;
@@ -820,7 +869,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
- sec_add_to_list(sec);
+ hisi_qm_add_to_list(qm, &sec_devices);
ret = sec_register_to_crypto();
if (ret < 0) {
@@ -831,12 +880,12 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_remove_from_list:
- sec_remove_from_list(sec);
+ hisi_qm_del_from_list(qm, &sec_devices);
sec_debugfs_exit(sec);
hisi_qm_stop(qm);
err_probe_uninit:
- sec_probe_uninit(sec);
+ sec_probe_uninit(qm);
err_qm_uninit:
sec_qm_uninit(qm);
@@ -955,7 +1004,7 @@ static void sec_remove(struct pci_dev *pdev)
sec_unregister_from_crypto();
- sec_remove_from_list(sec);
+ hisi_qm_del_from_list(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && sec->num_vfs)
(void)sec_sriov_disable(pdev);
@@ -967,89 +1016,13 @@ static void sec_remove(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF)
sec_debug_regs_clear(qm);
- sec_probe_uninit(sec);
+ sec_probe_uninit(qm);
sec_qm_uninit(qm);
}
-static void sec_log_hw_error(struct sec_dev *sec, u32 err_sts)
-{
- const struct sec_hw_error *errs = sec_hw_errors;
- struct device *dev = &sec->qm.pdev->dev;
- u32 err_val;
-
- while (errs->msg) {
- if (errs->int_msk & err_sts) {
- dev_err(dev, "%s [error status=0x%x] found\n",
- errs->msg, errs->int_msk);
-
- if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
- err_val = readl(sec->qm.io_base +
- SEC_CORE_SRAM_ECC_ERR_INFO);
- dev_err(dev, "multi ecc sram num=0x%x\n",
- SEC_ECC_NUM(err_val));
- dev_err(dev, "multi ecc sram addr=0x%x\n",
- SEC_ECC_ADDR(err_val));
- }
- }
- errs++;
- }
-}
-
-static pci_ers_result_t sec_hw_error_handle(struct sec_dev *sec)
-{
- u32 err_sts;
-
- /* read err sts */
- err_sts = readl(sec->qm.io_base + SEC_CORE_INT_STATUS);
- if (err_sts) {
- sec_log_hw_error(sec, err_sts);
-
- /* clear error interrupts */
- writel(err_sts, sec->qm.io_base + SEC_CORE_INT_SOURCE);
-
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t sec_process_hw_error(struct pci_dev *pdev)
-{
- struct sec_dev *sec = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, sec_ret;
-
- if (!sec) {
- pci_err(pdev, "Can't recover error during device init\n");
- return PCI_ERS_RESULT_NONE;
- }
-
- /* log qm error */
- qm_ret = hisi_qm_hw_error_handle(&sec->qm);
-
- /* log sec error */
- sec_ret = sec_hw_error_handle(sec);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- sec_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t sec_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return sec_process_hw_error(pdev);
-}
-
static const struct pci_error_handlers sec_err_handler = {
- .error_detected = sec_error_detected,
+ .error_detected = hisi_qm_dev_err_detected,
};
static struct pci_driver sec_pci_driver = {
@@ -1078,6 +1051,7 @@ static int __init sec_init(void)
{
int ret;
+ hisi_qm_init_list(&sec_devices);
sec_register_debugfs();
ret = pci_register_driver(&sec_pci_driver);
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index bc1db26598bb..82dc6f867171 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -68,7 +68,7 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
-struct hisi_zip *find_zip_device(int node);
+int zip_create_qps(struct hisi_qp **qps, int ctx_num);
int hisi_zip_register_to_crypto(void);
void hisi_zip_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 9815d5e3ccd0..369ec3220574 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -132,29 +132,25 @@ static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
sqe->dest_addr_h = upper_32_bits(d_addr);
}
-static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx,
- int alg_type, int req_type)
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
+ int alg_type, int req_type)
{
- struct hisi_qp *qp;
+ struct device *dev = &qp->qm->pdev->dev;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp->req_type = req_type;
+ qp->alg_type = alg_type;
qp->qp_ctx = ctx;
- ctx->qp = qp;
ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0)
- goto err_release_qp;
+ if (ret < 0) {
+ dev_err(dev, "start qp failed!\n");
+ return ret;
+ }
- return 0;
+ ctx->qp = qp;
-err_release_qp:
- hisi_qm_release_qp(qp);
- return ret;
+ return 0;
}
static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
@@ -165,34 +161,34 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
{
+ struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip *hisi_zip;
- struct hisi_qm *qm;
int ret, i, j;
- /* find the proper zip device */
- hisi_zip = find_zip_device(cpu_to_node(smp_processor_id()));
- if (!hisi_zip) {
- pr_err("Failed to find a proper ZIP device!\n");
+ ret = zip_create_qps(qps, HZIP_CTX_Q_NUM);
+ if (ret) {
+ pr_err("Can not create zip qps!\n");
return -ENODEV;
}
- qm = &hisi_zip->qm;
+
+ hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
- ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i,
- req_type);
- if (ret)
- goto err;
+ ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
+ req_type);
+ if (ret) {
+ for (j = i - 1; j >= 0; j--)
+ hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
+
+ hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
+ return ret;
+ }
hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip;
}
return 0;
-err:
- for (j = i - 1; j >= 0; j--)
- hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]);
-
- return ret;
}
static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index e1bab1a91333..fcc85d2dbd07 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
+#include <linux/uacce.h>
#include "zip.h"
#define PCI_DEVICE_ID_ZIP_PF 0xa250
@@ -60,13 +61,17 @@
#define HZIP_CORE_DEBUG_DECOMP_5 0x309000
#define HZIP_CORE_INT_SOURCE 0x3010A0
-#define HZIP_CORE_INT_MASK 0x3010A4
+#define HZIP_CORE_INT_MASK_REG 0x3010A4
#define HZIP_CORE_INT_STATUS 0x3010AC
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
-#define SRAM_ECC_ERR_NUM_SHIFT 16
-#define SRAM_ECC_ERR_ADDR_SHIFT 24
-#define HZIP_CORE_INT_DISABLE 0x000007FF
+#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
+#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
+#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
+#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
+#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
+#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
+#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
#define HZIP_COMP_CORE_NUM 2
#define HZIP_DECOMP_CORE_NUM 6
#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
@@ -83,77 +88,7 @@
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-static LIST_HEAD(hisi_zip_list);
-static DEFINE_MUTEX(hisi_zip_list_lock);
-
-struct hisi_zip_resource {
- struct hisi_zip *hzip;
- int distance;
- struct list_head list;
-};
-
-static void free_list(struct list_head *head)
-{
- struct hisi_zip_resource *res, *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_zip *find_zip_device(int node)
-{
- struct hisi_zip_resource *res, *tmp;
- struct hisi_zip *ret = NULL;
- struct hisi_zip *hisi_zip;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_zip_list_lock);
-
- if (IS_ENABLED(CONFIG_NUMA)) {
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_zip->qm.pdev->dev;
- res->hzip = hisi_zip;
- res->distance = node_distance(dev_to_node(dev), node);
-
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
- ret = tmp->hzip;
- break;
- }
- }
-
- free_list(&head);
- } else {
- ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
- }
-
- mutex_unlock(&hisi_zip_list_lock);
-
- return ret;
-
-err:
- free_list(&head);
- mutex_unlock(&hisi_zip_list_lock);
- return NULL;
-}
+static struct hisi_qm_list zip_devices;
struct hisi_zip_hw_error {
u32 int_msk;
@@ -297,9 +232,6 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
-static int uacce_mode;
-module_param(uacce_mode, int, 0);
-
static u32 vfs_num;
module_param(vfs_num, uint, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)");
@@ -311,18 +243,11 @@ static const struct pci_device_id hisi_zip_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
-static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
+int zip_create_qps(struct hisi_qp **qps, int qp_num)
{
- mutex_lock(&hisi_zip_list_lock);
- list_add_tail(&hisi_zip->list, &hisi_zip_list);
- mutex_unlock(&hisi_zip_list_lock);
-}
+ int node = cpu_to_node(smp_processor_id());
-static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
-{
- mutex_lock(&hisi_zip_list_lock);
- list_del(&hisi_zip->list);
- mutex_unlock(&hisi_zip_list_lock);
+ return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
@@ -353,8 +278,14 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
- writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
- writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
+
+ if (hisi_zip->qm.use_sva) {
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
+ } else {
+ writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
+ }
/* let's open all compression/decompression cores */
writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
@@ -366,27 +297,32 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
}
-static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
+static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
if (qm->ver == QM_HW_V1) {
- writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
+ writel(HZIP_CORE_INT_MASK_ALL,
+ qm->io_base + HZIP_CORE_INT_MASK_REG);
dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
return;
}
- if (state) {
- /* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
- HZIP_CORE_INT_SOURCE);
- /* enable ZIP hw error interrupts */
- writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
- } else {
- /* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_DISABLE,
- hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
- }
+ /* clear ZIP hw error source if having */
+ writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ /* configure error type */
+ writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+
+ /* enable ZIP hw error interrupts */
+ writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+}
+
+static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
+{
+ /* disable ZIP hw error interrupts */
+ writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -638,14 +574,53 @@ static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
hisi_zip_debug_regs_clear(hisi_zip);
}
-static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip)
+static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+{
+ const struct hisi_zip_hw_error *err = zip_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 err_val;
+
+ while (err->msg) {
+ if (err->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
+ err_val = readl(qm->io_base +
+ HZIP_CORE_SRAM_ECC_ERR_INFO);
+ dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
+ ((err_val >>
+ HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
+ dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n",
+ (err_val >>
+ HZIP_SRAM_ECC_ERR_ADDR_SHIFT));
+ }
+ }
+ err++;
+ }
+
+ writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+}
+
+static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
{
- hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- hisi_zip_hw_error_set_state(hisi_zip, true);
+ return readl(qm->io_base + HZIP_CORE_INT_STATUS);
}
+static const struct hisi_qm_err_ini hisi_zip_err_ini = {
+ .hw_err_enable = hisi_zip_hw_error_enable,
+ .hw_err_disable = hisi_zip_hw_error_disable,
+ .get_dev_hw_err_status = hisi_zip_get_hw_err_status,
+ .log_dev_hw_err = hisi_zip_log_hw_error,
+ .err_info = {
+ .ce = QM_BASE_CE,
+ .nfe = QM_BASE_NFE |
+ QM_ACC_WB_NOT_READY_TIMEOUT,
+ .fe = 0,
+ .msi = QM_DB_RANDOM_INVALID,
+ }
+};
+
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
{
struct hisi_qm *qm = &hisi_zip->qm;
@@ -671,8 +646,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return -EINVAL;
}
+ qm->err_ini = &hisi_zip_err_ini;
+
hisi_zip_set_user_domain_and_cache(hisi_zip);
- hisi_zip_hw_error_init(hisi_zip);
+ hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(hisi_zip);
return 0;
@@ -791,27 +768,15 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hisi_zip);
qm = &hisi_zip->qm;
+ qm->use_dma_api = true;
qm->pdev = pdev;
qm->ver = rev_id;
+ qm->algs = "zlib\ngzip";
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
QM_HW_VF;
- switch (uacce_mode) {
- case 0:
- qm->use_dma_api = true;
- break;
- case 1:
- qm->use_dma_api = false;
- break;
- case 2:
- qm->use_dma_api = true;
- break;
- default:
- return -EINVAL;
- }
-
ret = hisi_qm_init(qm);
if (ret) {
dev_err(&pdev->dev, "Failed to init qm!\n");
@@ -849,7 +814,13 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
- hisi_zip_add_to_list(hisi_zip);
+ hisi_qm_add_to_list(qm, &zip_devices);
+
+ if (qm->uacce) {
+ ret = uacce_register(qm->uacce);
+ if (ret)
+ goto err_qm_uninit;
+ }
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
ret = hisi_zip_sriov_enable(pdev, vfs_num);
@@ -860,7 +831,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_remove_from_list:
- hisi_zip_remove_from_list(hisi_zip);
+ hisi_qm_del_from_list(qm, &zip_devices);
hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm);
err_qm_uninit:
@@ -887,92 +858,13 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm);
- if (qm->fun_type == QM_HW_PF)
- hisi_zip_hw_error_set_state(hisi_zip, false);
-
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_zip_remove_from_list(hisi_zip);
-}
-
-static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts)
-{
- const struct hisi_zip_hw_error *err = zip_hw_error;
- struct device *dev = &hisi_zip->qm.pdev->dev;
- u32 err_val;
-
- while (err->msg) {
- if (err->int_msk & err_sts) {
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
-
- if (HZIP_CORE_INT_STATUS_M_ECC & err->int_msk) {
- err_val = readl(hisi_zip->qm.io_base +
- HZIP_CORE_SRAM_ECC_ERR_INFO);
- dev_warn(dev, "hisi-zip multi ecc sram num=0x%x\n",
- ((err_val >> SRAM_ECC_ERR_NUM_SHIFT) &
- 0xFF));
- dev_warn(dev, "hisi-zip multi ecc sram addr=0x%x\n",
- (err_val >> SRAM_ECC_ERR_ADDR_SHIFT));
- }
- }
- err++;
- }
-}
-
-static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip)
-{
- u32 err_sts;
-
- /* read err sts */
- err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS);
-
- if (err_sts) {
- hisi_zip_log_hw_error(hisi_zip, err_sts);
- /* clear error interrupts */
- writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE);
-
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- pci_ers_result_t qm_ret, zip_ret;
-
- if (!hisi_zip) {
- dev_err(dev,
- "Can't recover ZIP-error occurred during device init\n");
- return PCI_ERS_RESULT_NONE;
- }
-
- qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm);
-
- zip_ret = hisi_zip_hw_error_handle(hisi_zip);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- zip_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_zip_process_hw_error(pdev);
+ hisi_qm_del_from_list(qm, &zip_devices);
}
static const struct pci_error_handlers hisi_zip_err_handler = {
- .error_detected = hisi_zip_error_detected,
+ .error_detected = hisi_qm_dev_err_detected,
};
static struct pci_driver hisi_zip_pci_driver = {
@@ -1002,6 +894,7 @@ static int __init hisi_zip_init(void)
{
int ret;
+ hisi_qm_init_list(&zip_devices);
hisi_zip_register_debugfs();
ret = pci_register_driver(&hisi_zip_pci_driver);
@@ -1010,12 +903,10 @@ static int __init hisi_zip_init(void)
goto err_pci;
}
- if (uacce_mode == 0 || uacce_mode == 2) {
- ret = hisi_zip_register_to_crypto();
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_crypto;
- }
+ ret = hisi_zip_register_to_crypto();
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_crypto;
}
return 0;
@@ -1030,8 +921,7 @@ err_pci:
static void __exit hisi_zip_exit(void)
{
- if (uacce_mode == 0 || uacce_mode == 2)
- hisi_zip_unregister_from_crypto();
+ hisi_zip_unregister_from_crypto();
pci_unregister_driver(&hisi_zip_pci_driver);
hisi_zip_unregister_debugfs();
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 25d5227f74a1..0e25fc3087f3 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -103,7 +103,7 @@ struct img_hash_request_ctx {
struct ahash_request fallback_req;
/* Zero length buffer must remain last member of struct */
- u8 buffer[0] __aligned(sizeof(u32));
+ u8 buffer[] __aligned(sizeof(u32));
};
struct img_hash_ctx {
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
new file mode 100644
index 000000000000..13063384f958
--- /dev/null
+++ b/drivers/crypto/marvell/Kconfig
@@ -0,0 +1,37 @@
+#
+# Marvell crypto drivers configuration
+#
+
+config CRYPTO_DEV_MARVELL
+ tristate
+
+config CRYPTO_DEV_MARVELL_CESA
+ tristate "Marvell's Cryptographic Engine driver"
+ depends on PLAT_ORION || ARCH_MVEBU
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select SRAM
+ select CRYPTO_DEV_MARVELL
+ help
+ This driver allows you to utilize the Cryptographic Engines and
+ Security Accelerator (CESA) which can be found on MVEBU and ORION
+ platforms.
+ This driver supports CPU offload through DMA transfers.
+
+config CRYPTO_DEV_OCTEONTX_CPT
+ tristate "Support for Marvell OcteonTX CPT driver"
+ depends on ARCH_THUNDER || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ depends on CRYPTO_LIB_AES
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
+ select CRYPTO_DEV_MARVELL
+ help
+ This driver allows you to utilize the Marvell Cryptographic
+ Accelerator Unit(CPT) found in OcteonTX series of processors.
+
+ To compile this driver as module, choose M here:
+ the modules will be called octeontx-cpt and octeontx-cptvf
diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile
index b27cab65e696..6c6a1519b0f1 100644
--- a/drivers/crypto/marvell/Makefile
+++ b/drivers/crypto/marvell/Makefile
@@ -1,3 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
-marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx/
diff --git a/drivers/crypto/marvell/cesa/Makefile b/drivers/crypto/marvell/cesa/Makefile
new file mode 100644
index 000000000000..b27cab65e696
--- /dev/null
+++ b/drivers/crypto/marvell/cesa/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
+marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 8a5f0b0bdf77..8a5f0b0bdf77 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index f1ed3b85c0d2..e8632d5f343f 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -436,7 +436,7 @@ struct mv_cesa_dev {
* @queue: fifo of the pending crypto requests
* @load: engine load counter, useful for load balancing
* @chain: list of the current tdma descriptors being processed
- * by this engine.
+ * by this engine.
* @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
@@ -467,7 +467,7 @@ struct mv_cesa_engine {
* @step: launch the crypto operation on the next chunk
* @cleanup: cleanup the crypto request (release associated data)
* @complete: complete the request, i.e copy result or context from sram when
- * needed.
+ * needed.
*/
struct mv_cesa_req_ops {
int (*process)(struct crypto_async_request *req, u32 status);
@@ -734,6 +734,7 @@ static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
for (i = 0; i < cesa_dev->caps->nengines; i++) {
struct mv_cesa_engine *engine = cesa_dev->engines + i;
u32 load = atomic_read(&engine->load);
+
if (load < min_load) {
min_load = load;
selected = engine;
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index c24f34a48cef..f133c2ccb5ae 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -106,8 +106,8 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
- BUG_ON(readl(engine->regs + CESA_SA_CMD) &
- CESA_SA_CMD_EN_CESA_SA_ACCL0);
+ WARN_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -178,6 +178,7 @@ static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
{
struct skcipher_request *skreq = skcipher_request_cast(req);
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
+
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
@@ -336,7 +337,8 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
do {
struct mv_cesa_op_ctx *op;
- op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
+ op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
+ flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
@@ -365,9 +367,10 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
} while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
- ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
- CESA_SA_DATA_SRAM_OFFSET,
- CESA_TDMA_SRC_IN_SRAM, flags);
+ ret = mv_cesa_dma_add_result_op(&basereq->chain,
+ CESA_SA_CFG_SRAM_OFFSET,
+ CESA_SA_DATA_SRAM_OFFSET,
+ CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/cesa/hash.c
index a2b35fb0fb89..b971284332b6 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -141,9 +141,11 @@ static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
if (creq->algo_le) {
__le64 bits = cpu_to_le64(creq->len << 3);
+
memcpy(buf + padlen, &bits, sizeof(bits));
} else {
__be64 bits = cpu_to_be64(creq->len << 3);
+
memcpy(buf + padlen, &bits, sizeof(bits));
}
@@ -168,7 +170,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
for (i = 0; i < digsize / 4; i++)
- writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
+ writel_relaxed(creq->state[i],
+ engine->regs + CESA_IVDIG(i));
}
if (creq->cache_ptr)
@@ -245,8 +248,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
- BUG_ON(readl(engine->regs + CESA_SA_CMD) &
- CESA_SA_CMD_EN_CESA_SA_ACCL0);
+ WARN_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -329,11 +332,12 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
- (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+ (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
+ CESA_TDMA_RESULT) {
__le32 *data = NULL;
/*
- * Result is already in the correct endianess when the SA is
+ * Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
@@ -347,9 +351,9 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
CESA_IVDIG(i));
if (creq->last_req) {
/*
- * Hardware's MD5 digest is in little endian format, but
- * SHA in big endian format
- */
+ * Hardware's MD5 digest is in little endian format, but
+ * SHA in big endian format
+ */
if (creq->algo_le) {
__le32 *result = (void *)ahashreq->result;
@@ -439,7 +443,8 @@ static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bool cached = false;
- if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
+ if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
+ !creq->last_req) {
cached = true;
if (!req->nbytes)
@@ -648,7 +653,8 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (!mv_cesa_ahash_req_iter_next_op(&iter))
break;
- op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
+ op = mv_cesa_dma_add_frag(&basereq->chain,
+ &creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
@@ -920,7 +926,7 @@ struct ahash_alg mv_md5_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -990,7 +996,7 @@ struct ahash_alg mv_sha1_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -1063,7 +1069,7 @@ struct ahash_alg mv_sha256_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
.cra_init = mv_cesa_ahash_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -1297,7 +1303,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -1367,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
@@ -1437,6 +1443,6 @@ struct ahash_alg mv_ahmac_sha256_alg = {
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
.cra_init = mv_cesa_ahmac_cra_init,
.cra_module = THIS_MODULE,
- }
+ }
}
};
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 45939d53e8d6..b81ee276fe0e 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -50,8 +50,8 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
engine->regs + CESA_SA_CFG);
writel_relaxed(dreq->chain.first->cur_dma,
engine->regs + CESA_TDMA_NEXT_ADDR);
- BUG_ON(readl(engine->regs + CESA_SA_CMD) &
- CESA_SA_CMD_EN_CESA_SA_ACCL0);
+ WARN_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -175,8 +175,10 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
break;
}
- /* Save the last request in error to engine->req, so that the core
- * knows which request was fautly */
+ /*
+ * Save the last request in error to engine->req, so that the core
+ * knows which request was fautly
+ */
if (res) {
spin_lock_bh(&engine->lock);
engine->req = req;
diff --git a/drivers/crypto/marvell/octeontx/Makefile b/drivers/crypto/marvell/octeontx/Makefile
new file mode 100644
index 000000000000..5e956fe1a85b
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx-cpt.o octeontx-cptvf.o
+
+octeontx-cpt-objs := otx_cptpf_main.o otx_cptpf_mbox.o otx_cptpf_ucode.o
+octeontx-cptvf-objs := otx_cptvf_main.o otx_cptvf_mbox.o otx_cptvf_reqmgr.o \
+ otx_cptvf_algs.o
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_common.h b/drivers/crypto/marvell/octeontx/otx_cpt_common.h
new file mode 100644
index 000000000000..ca704a7a265f
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_common.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPT_COMMON_H
+#define __OTX_CPT_COMMON_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#define OTX_CPT_MAX_MBOX_DATA_STR_SIZE 64
+
+enum otx_cptpf_type {
+ OTX_CPT_AE = 2,
+ OTX_CPT_SE = 3,
+ BAD_OTX_CPTPF_TYPE,
+};
+
+enum otx_cptvf_type {
+ OTX_CPT_AE_TYPES = 1,
+ OTX_CPT_SE_TYPES = 2,
+ BAD_OTX_CPTVF_TYPE,
+};
+
+/* VF-PF message opcodes */
+enum otx_cpt_mbox_opcode {
+ OTX_CPT_MSG_VF_UP = 1,
+ OTX_CPT_MSG_VF_DOWN,
+ OTX_CPT_MSG_READY,
+ OTX_CPT_MSG_QLEN,
+ OTX_CPT_MSG_QBIND_GRP,
+ OTX_CPT_MSG_VQ_PRIORITY,
+ OTX_CPT_MSG_PF_TYPE,
+ OTX_CPT_MSG_ACK,
+ OTX_CPT_MSG_NACK
+};
+
+/* OcteonTX CPT mailbox structure */
+struct otx_cpt_mbox {
+ u64 msg; /* Message type MBOX[0] */
+ u64 data;/* Data MBOX[1] */
+};
+
+#endif /* __OTX_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
new file mode 100644
index 000000000000..b8bdb9f134f3
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
@@ -0,0 +1,824 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPT_HW_TYPES_H
+#define __OTX_CPT_HW_TYPES_H
+
+#include <linux/types.h>
+
+/* Device IDs */
+#define OTX_CPT_PCI_PF_DEVICE_ID 0xa040
+#define OTX_CPT_PCI_VF_DEVICE_ID 0xa041
+
+#define OTX_CPT_PCI_PF_SUBSYS_ID 0xa340
+#define OTX_CPT_PCI_VF_SUBSYS_ID 0xa341
+
+/* Configuration and status registers are in BAR0 on OcteonTX platform */
+#define OTX_CPT_PF_PCI_CFG_BAR 0
+#define OTX_CPT_VF_PCI_CFG_BAR 0
+
+#define OTX_CPT_BAR_E_CPTX_VFX_BAR0_OFFSET(a, b) \
+ (0x000020000000ll + 0x1000000000ll * (a) + 0x100000ll * (b))
+#define OTX_CPT_BAR_E_CPTX_VFX_BAR0_SIZE 0x400000
+
+/* Mailbox interrupts offset */
+#define OTX_CPT_PF_MBOX_INT 3
+#define OTX_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
+/* Number of MSIX supported in PF */
+#define OTX_CPT_PF_MSIX_VECTORS 4
+/* Maximum supported microcode groups */
+#define OTX_CPT_MAX_ENGINE_GROUPS 8
+
+/* CPT instruction size in bytes */
+#define OTX_CPT_INST_SIZE 64
+/* CPT queue next chunk pointer size in bytes */
+#define OTX_CPT_NEXT_CHUNK_PTR_SIZE 8
+
+/* OcteonTX CPT VF MSIX vectors and their offsets */
+#define OTX_CPT_VF_MSIX_VECTORS 2
+#define OTX_CPT_VF_INTR_MBOX_MASK BIT(0)
+#define OTX_CPT_VF_INTR_DOVF_MASK BIT(1)
+#define OTX_CPT_VF_INTR_IRDE_MASK BIT(2)
+#define OTX_CPT_VF_INTR_NWRP_MASK BIT(3)
+#define OTX_CPT_VF_INTR_SERR_MASK BIT(4)
+
+/* OcteonTX CPT PF registers */
+#define OTX_CPT_PF_CONSTANTS (0x0ll)
+#define OTX_CPT_PF_RESET (0x100ll)
+#define OTX_CPT_PF_DIAG (0x120ll)
+#define OTX_CPT_PF_BIST_STATUS (0x160ll)
+#define OTX_CPT_PF_ECC0_CTL (0x200ll)
+#define OTX_CPT_PF_ECC0_FLIP (0x210ll)
+#define OTX_CPT_PF_ECC0_INT (0x220ll)
+#define OTX_CPT_PF_ECC0_INT_W1S (0x230ll)
+#define OTX_CPT_PF_ECC0_ENA_W1S (0x240ll)
+#define OTX_CPT_PF_ECC0_ENA_W1C (0x250ll)
+#define OTX_CPT_PF_MBOX_INTX(b) (0x400ll | (u64)(b) << 3)
+#define OTX_CPT_PF_MBOX_INT_W1SX(b) (0x420ll | (u64)(b) << 3)
+#define OTX_CPT_PF_MBOX_ENA_W1CX(b) (0x440ll | (u64)(b) << 3)
+#define OTX_CPT_PF_MBOX_ENA_W1SX(b) (0x460ll | (u64)(b) << 3)
+#define OTX_CPT_PF_EXEC_INT (0x500ll)
+#define OTX_CPT_PF_EXEC_INT_W1S (0x520ll)
+#define OTX_CPT_PF_EXEC_ENA_W1C (0x540ll)
+#define OTX_CPT_PF_EXEC_ENA_W1S (0x560ll)
+#define OTX_CPT_PF_GX_EN(b) (0x600ll | (u64)(b) << 3)
+#define OTX_CPT_PF_EXEC_INFO (0x700ll)
+#define OTX_CPT_PF_EXEC_BUSY (0x800ll)
+#define OTX_CPT_PF_EXEC_INFO0 (0x900ll)
+#define OTX_CPT_PF_EXEC_INFO1 (0x910ll)
+#define OTX_CPT_PF_INST_REQ_PC (0x10000ll)
+#define OTX_CPT_PF_INST_LATENCY_PC (0x10020ll)
+#define OTX_CPT_PF_RD_REQ_PC (0x10040ll)
+#define OTX_CPT_PF_RD_LATENCY_PC (0x10060ll)
+#define OTX_CPT_PF_RD_UC_PC (0x10080ll)
+#define OTX_CPT_PF_ACTIVE_CYCLES_PC (0x10100ll)
+#define OTX_CPT_PF_EXE_CTL (0x4000000ll)
+#define OTX_CPT_PF_EXE_STATUS (0x4000008ll)
+#define OTX_CPT_PF_EXE_CLK (0x4000010ll)
+#define OTX_CPT_PF_EXE_DBG_CTL (0x4000018ll)
+#define OTX_CPT_PF_EXE_DBG_DATA (0x4000020ll)
+#define OTX_CPT_PF_EXE_BIST_STATUS (0x4000028ll)
+#define OTX_CPT_PF_EXE_REQ_TIMER (0x4000030ll)
+#define OTX_CPT_PF_EXE_MEM_CTL (0x4000038ll)
+#define OTX_CPT_PF_EXE_PERF_CTL (0x4001000ll)
+#define OTX_CPT_PF_EXE_DBG_CNTX(b) (0x4001100ll | (u64)(b) << 3)
+#define OTX_CPT_PF_EXE_PERF_EVENT_CNT (0x4001180ll)
+#define OTX_CPT_PF_EXE_EPCI_INBX_CNT(b) (0x4001200ll | (u64)(b) << 3)
+#define OTX_CPT_PF_EXE_EPCI_OUTBX_CNT(b) (0x4001240ll | (u64)(b) << 3)
+#define OTX_CPT_PF_ENGX_UCODE_BASE(b) (0x4002000ll | (u64)(b) << 3)
+#define OTX_CPT_PF_QX_CTL(b) (0x8000000ll | (u64)(b) << 20)
+#define OTX_CPT_PF_QX_GMCTL(b) (0x8000020ll | (u64)(b) << 20)
+#define OTX_CPT_PF_QX_CTL2(b) (0x8000100ll | (u64)(b) << 20)
+#define OTX_CPT_PF_VFX_MBOXX(b, c) (0x8001000ll | (u64)(b) << 20 | \
+ (u64)(c) << 8)
+
+/* OcteonTX CPT VF registers */
+#define OTX_CPT_VQX_CTL(b) (0x100ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_SADDR(b) (0x200ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_WAIT(b) (0x400ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_INPROG(b) (0x410ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE(b) (0x420ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_ACK(b) (0x440ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_INT_W1S(b) (0x460ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_INT_W1C(b) (0x468ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_ENA_W1S(b) (0x470ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DONE_ENA_W1C(b) (0x478ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_MISC_INT(b) (0x500ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_MISC_INT_W1S(b) (0x508ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_MISC_ENA_W1S(b) (0x510ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_MISC_ENA_W1C(b) (0x518ll | (u64)(b) << 20)
+#define OTX_CPT_VQX_DOORBELL(b) (0x600ll | (u64)(b) << 20)
+#define OTX_CPT_VFX_PF_MBOXX(b, c) (0x1000ll | ((b) << 20) | ((c) << 3))
+
+/*
+ * Enumeration otx_cpt_ucode_error_code_e
+ *
+ * Enumerates ucode errors
+ */
+enum otx_cpt_ucode_error_code_e {
+ CPT_NO_UCODE_ERROR = 0x00,
+ ERR_OPCODE_UNSUPPORTED = 0x01,
+
+ /* Scatter gather */
+ ERR_SCATTER_GATHER_WRITE_LENGTH = 0x02,
+ ERR_SCATTER_GATHER_LIST = 0x03,
+ ERR_SCATTER_GATHER_NOT_SUPPORTED = 0x04,
+
+};
+
+/*
+ * Enumeration otx_cpt_comp_e
+ *
+ * CPT OcteonTX Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+enum otx_cpt_comp_e {
+ CPT_COMP_E_NOTDONE = 0x00,
+ CPT_COMP_E_GOOD = 0x01,
+ CPT_COMP_E_FAULT = 0x02,
+ CPT_COMP_E_SWERR = 0x03,
+ CPT_COMP_E_HWERR = 0x04,
+ CPT_COMP_E_LAST_ENTRY = 0x05
+};
+
+/*
+ * Enumeration otx_cpt_vf_int_vec_e
+ *
+ * CPT OcteonTX VF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx_cpt_vf_int_vec_e {
+ CPT_VF_INT_VEC_E_MISC = 0x00,
+ CPT_VF_INT_VEC_E_DONE = 0x01
+};
+
+/*
+ * Structure cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout. Instructions are
+ * stored in memory as little-endian unless CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_inst_s_s
+ * Word 0
+ * doneint:1 Done interrupt.
+ * 0 = No interrupts related to this instruction.
+ * 1 = When the instruction completes, CPT()_VQ()_DONE[DONE] will be
+ * incremented,and based on the rules described there an interrupt may
+ * occur.
+ * Word 1
+ * res_addr [127: 64] Result IOVA.
+ * If nonzero, specifies where to write CPT_RES_S.
+ * If zero, no result structure will be written.
+ * Address must be 16-byte aligned.
+ * Bits <63:49> are ignored by hardware; software should use a
+ * sign-extended bit <48> for forward compatibility.
+ * Word 2
+ * grp:10 [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to use when
+ * CPT submits work SSO.
+ * For the SSO to not discard the add-work request, FPA_PF_MAP() must map
+ * [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ * tt:2 [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use when CPT
+ * submits work to SSO
+ * tag:32 [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when CPT
+ * submits work to SSO.
+ * Word 3
+ * wq_ptr [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ * work-queue entry that CPT submits work to SSO after all context,
+ * output data, and result write operations are visible to other
+ * CNXXXX units and the cores. Bits <2:0> must be zero.
+ * Bits <63:49> are ignored by hardware; software should
+ * use a sign-extended bit <48> for forward compatibility.
+ * Internal:
+ * Bits <63:49>, <2:0> are ignored by hardware, treated as always 0x0.
+ * Word 4
+ * ei0; [319:256] Engine instruction word 0. Passed to the AE/SE.
+ * Word 5
+ * ei1; [383:320] Engine instruction word 1. Passed to the AE/SE.
+ * Word 6
+ * ei2; [447:384] Engine instruction word 1. Passed to the AE/SE.
+ * Word 7
+ * ei3; [511:448] Engine instruction word 1. Passed to the AE/SE.
+ *
+ */
+union otx_cpt_inst_s {
+ u64 u[8];
+
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_17_63:47;
+ u64 doneint:1;
+ u64 reserved_0_15:16;
+#else /* Word 0 - Little Endian */
+ u64 reserved_0_15:16;
+ u64 doneint:1;
+ u64 reserved_17_63:47;
+#endif /* Word 0 - End */
+ u64 res_addr;
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ u64 reserved_172_191:20;
+ u64 grp:10;
+ u64 tt:2;
+ u64 tag:32;
+#else /* Word 2 - Little Endian */
+ u64 tag:32;
+ u64 tt:2;
+ u64 grp:10;
+ u64 reserved_172_191:20;
+#endif /* Word 2 - End */
+ u64 wq_ptr;
+ u64 ei0;
+ u64 ei1;
+ u64 ei2;
+ u64 ei3;
+ } s;
+};
+
+/*
+ * Structure cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and
+ * each instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_res_s_s
+ * Word 0
+ * doneint:1 [16:16] Done interrupt. This bit is copied from the
+ * corresponding instruction's CPT_INST_S[DONEINT].
+ * compcode:8 [7:0] Indicates completion/error status of the CPT coprocessor
+ * for the associated instruction, as enumerated by CPT_COMP_E.
+ * Core software may write the memory location containing [COMPCODE] to
+ * 0x0 before ringing the doorbell, and then poll for completion by
+ * checking for a nonzero value.
+ * Once the core observes a nonzero [COMPCODE] value in this case,the CPT
+ * coprocessor will have also completed L2/DRAM write operations.
+ * Word 1
+ * reserved
+ *
+ */
+union otx_cpt_res_s {
+ u64 u[2];
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_17_63:47;
+ u64 doneint:1;
+ u64 reserved_8_15:8;
+ u64 compcode:8;
+#else /* Word 0 - Little Endian */
+ u64 compcode:8;
+ u64 reserved_8_15:8;
+ u64 doneint:1;
+ u64 reserved_17_63:47;
+#endif /* Word 0 - End */
+ u64 reserved_64_127;
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_pf_bist_status
+ *
+ * CPT PF Control Bist Status Register
+ * This register has the BIST status of memories. Each bit is the BIST result
+ * of an individual memory (per bit, 0 = pass and 1 = fail).
+ * otx_cptx_pf_bist_status_s
+ * Word0
+ * bstatus [29:0](RO/H) BIST status. One bit per memory, enumerated by
+ * CPT_RAMS_E.
+ */
+union otx_cptx_pf_bist_status {
+ u64 u;
+ struct otx_cptx_pf_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_30_63:34;
+ u64 bstatus:30;
+#else /* Word 0 - Little Endian */
+ u64 bstatus:30;
+ u64 reserved_30_63:34;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_pf_constants
+ *
+ * CPT PF Constants Register
+ * This register contains implementation-related parameters of CPT in CNXXXX.
+ * otx_cptx_pf_constants_s
+ * Word 0
+ * reserved_40_63:24 [63:40] Reserved.
+ * epcis:8 [39:32](RO) Number of EPCI busses.
+ * grps:8 [31:24](RO) Number of engine groups implemented.
+ * ae:8 [23:16](RO/H) Number of AEs. In CNXXXX, for CPT0 returns 0x0,
+ * for CPT1 returns 0x18, or less if there are fuse-disables.
+ * se:8 [15:8](RO/H) Number of SEs. In CNXXXX, for CPT0 returns 0x30,
+ * or less if there are fuse-disables, for CPT1 returns 0x0.
+ * vq:8 [7:0](RO) Number of VQs.
+ */
+union otx_cptx_pf_constants {
+ u64 u;
+ struct otx_cptx_pf_constants_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_40_63:24;
+ u64 epcis:8;
+ u64 grps:8;
+ u64 ae:8;
+ u64 se:8;
+ u64 vq:8;
+#else /* Word 0 - Little Endian */
+ u64 vq:8;
+ u64 se:8;
+ u64 ae:8;
+ u64 grps:8;
+ u64 epcis:8;
+ u64 reserved_40_63:24;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_pf_exe_bist_status
+ *
+ * CPT PF Engine Bist Status Register
+ * This register has the BIST status of each engine. Each bit is the
+ * BIST result of an individual engine (per bit, 0 = pass and 1 = fail).
+ * otx_cptx_pf_exe_bist_status_s
+ * Word0
+ * reserved_48_63:16 [63:48] reserved
+ * bstatus:48 [47:0](RO/H) BIST status. One bit per engine.
+ *
+ */
+union otx_cptx_pf_exe_bist_status {
+ u64 u;
+ struct otx_cptx_pf_exe_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_48_63:16;
+ u64 bstatus:48;
+#else /* Word 0 - Little Endian */
+ u64 bstatus:48;
+ u64 reserved_48_63:16;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_pf_q#_ctl
+ *
+ * CPT Queue Control Register
+ * This register configures queues. This register should be changed only
+ * when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
+ * otx_cptx_pf_qx_ctl_s
+ * Word0
+ * reserved_60_63:4 [63:60] reserved.
+ * aura:12; [59:48](R/W) Guest-aura for returning this queue's
+ * instruction-chunk buffers to FPA. Only used when [INST_FREE] is set.
+ * For the FPA to not discard the request, FPA_PF_MAP() must map
+ * [AURA] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ * reserved_45_47:3 [47:45] reserved.
+ * size:13 [44:32](R/W) Command-buffer size, in number of 64-bit words per
+ * command buffer segment. Must be 8*n + 1, where n is the number of
+ * instructions per buffer segment.
+ * reserved_11_31:21 [31:11] Reserved.
+ * cont_err:1 [10:10](R/W) Continue on error.
+ * 0 = When CPT()_VQ()_MISC_INT[NWRP], CPT()_VQ()_MISC_INT[IRDE] or
+ * CPT()_VQ()_MISC_INT[DOVF] are set by hardware or software via
+ * CPT()_VQ()_MISC_INT_W1S, then CPT()_VQ()_CTL[ENA] is cleared. Due to
+ * pipelining, additional instructions may have been processed between the
+ * instruction causing the error and the next instruction in the disabled
+ * queue (the instruction at CPT()_VQ()_SADDR).
+ * 1 = Ignore errors and continue processing instructions.
+ * For diagnostic use only.
+ * inst_free:1 [9:9](R/W) Instruction FPA free. When set, when CPT reaches the
+ * end of an instruction chunk, that chunk will be freed to the FPA.
+ * inst_be:1 [8:8](R/W) Instruction big-endian control. When set, instructions,
+ * instruction next chunk pointers, and result structures are stored in
+ * big-endian format in memory.
+ * iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
+ * 0 = The hardware issues NCB transient load (LDT) towards the cache,
+ * which if the line hits and is is dirty will cause the line to be
+ * written back before being replaced.
+ * 1 = The hardware issues NCB LDWB read-and-invalidate command towards
+ * the cache when fetching the last word of instructions; as a result the
+ * line will not be written back when replaced. This improves
+ * performance, but software must not read the instructions after they are
+ * posted to the hardware. Reads that do not consume the last word of a
+ * cache line always use LDI.
+ * reserved_4_6:3 [6:4] Reserved.
+ * grp:3; [3:1](R/W) Engine group.
+ * pri:1; [0:0](R/W) Queue priority.
+ * 1 = This queue has higher priority. Round-robin between higher
+ * priority queues.
+ * 0 = This queue has lower priority. Round-robin between lower
+ * priority queues.
+ */
+union otx_cptx_pf_qx_ctl {
+ u64 u;
+ struct otx_cptx_pf_qx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_60_63:4;
+ u64 aura:12;
+ u64 reserved_45_47:3;
+ u64 size:13;
+ u64 reserved_11_31:21;
+ u64 cont_err:1;
+ u64 inst_free:1;
+ u64 inst_be:1;
+ u64 iqb_ldwb:1;
+ u64 reserved_4_6:3;
+ u64 grp:3;
+ u64 pri:1;
+#else /* Word 0 - Little Endian */
+ u64 pri:1;
+ u64 grp:3;
+ u64 reserved_4_6:3;
+ u64 iqb_ldwb:1;
+ u64 inst_be:1;
+ u64 inst_free:1;
+ u64 cont_err:1;
+ u64 reserved_11_31:21;
+ u64 size:13;
+ u64 reserved_45_47:3;
+ u64 aura:12;
+ u64 reserved_60_63:4;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_saddr
+ *
+ * CPT Queue Starting Buffer Address Registers
+ * These registers set the instruction buffer starting address.
+ * otx_cptx_vqx_saddr_s
+ * Word0
+ * reserved_49_63:15 [63:49] Reserved.
+ * ptr:43 [48:6](R/W/H) Instruction buffer IOVA <48:6> (64-byte aligned).
+ * When written, it is the initial buffer starting address; when read,
+ * it is the next read pointer to be requested from L2C. The PTR field
+ * is overwritten with the next pointer each time that the command buffer
+ * segment is exhausted. New commands will then be read from the newly
+ * specified command buffer pointer.
+ * reserved_0_5:6 [5:0] Reserved.
+ *
+ */
+union otx_cptx_vqx_saddr {
+ u64 u;
+ struct otx_cptx_vqx_saddr_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_49_63:15;
+ u64 ptr:43;
+ u64 reserved_0_5:6;
+#else /* Word 0 - Little Endian */
+ u64 reserved_0_5:6;
+ u64 ptr:43;
+ u64 reserved_49_63:15;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_misc_ena_w1s
+ *
+ * CPT Queue Misc Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ * otx_cptx_vqx_misc_ena_w1s_s
+ * Word0
+ * reserved_5_63:59 [63:5] Reserved.
+ * swerr:1 [4:4](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[SWERR].
+ * nwrp:1 [3:3](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[NWRP].
+ * irde:1 [2:2](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[IRDE].
+ * dovf:1 [1:1](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[DOVF].
+ * mbox:1 [0:0](R/W1S/H) Reads or sets enable for
+ * CPT(0..1)_VQ(0..63)_MISC_INT[MBOX].
+ *
+ */
+union otx_cptx_vqx_misc_ena_w1s {
+ u64 u;
+ struct otx_cptx_vqx_misc_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_5_63:59;
+ u64 swerr:1;
+ u64 nwrp:1;
+ u64 irde:1;
+ u64 dovf:1;
+ u64 mbox:1;
+#else /* Word 0 - Little Endian */
+ u64 mbox:1;
+ u64 dovf:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 swerr:1;
+ u64 reserved_5_63:59;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_doorbell
+ *
+ * CPT Queue Doorbell Registers
+ * Doorbells for the CPT instruction queues.
+ * otx_cptx_vqx_doorbell_s
+ * Word0
+ * reserved_20_63:44 [63:20] Reserved.
+ * dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
+ * to the CPT instruction doorbell count. Readback value is the the
+ * current number of pending doorbell requests. If counter overflows
+ * CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
+ * zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
+ * then write a value of 2^20 minus the read [DBELL_CNT], then write one
+ * to CPT()_VQ()_MISC_INT_W1C[DBELL_DOVF] and
+ * CPT()_VQ()_MISC_INT_ENA_W1S[DBELL_DOVF]. Must be a multiple of 8.
+ * All CPT instructions are 8 words and require a doorbell count of
+ * multiple of 8.
+ */
+union otx_cptx_vqx_doorbell {
+ u64 u;
+ struct otx_cptx_vqx_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_20_63:44;
+ u64 dbell_cnt:20;
+#else /* Word 0 - Little Endian */
+ u64 dbell_cnt:20;
+ u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_inprog
+ *
+ * CPT Queue In Progress Count Registers
+ * These registers contain the per-queue instruction in flight registers.
+ * otx_cptx_vqx_inprog_s
+ * Word0
+ * reserved_8_63:56 [63:8] Reserved.
+ * inflight:8 [7:0](RO/H) Inflight count. Counts the number of instructions
+ * for the VF for which CPT is fetching, executing or responding to
+ * instructions. However this does not include any interrupts that are
+ * awaiting software handling (CPT()_VQ()_DONE[DONE] != 0x0).
+ * A queue may not be reconfigured until:
+ * 1. CPT()_VQ()_CTL[ENA] is cleared by software.
+ * 2. [INFLIGHT] is polled until equals to zero.
+ */
+union otx_cptx_vqx_inprog {
+ u64 u;
+ struct otx_cptx_vqx_inprog_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_8_63:56;
+ u64 inflight:8;
+#else /* Word 0 - Little Endian */
+ u64 inflight:8;
+ u64 reserved_8_63:56;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_misc_int
+ *
+ * CPT Queue Misc Interrupt Register
+ * These registers contain the per-queue miscellaneous interrupts.
+ * otx_cptx_vqx_misc_int_s
+ * Word 0
+ * reserved_5_63:59 [63:5] Reserved.
+ * swerr:1 [4:4](R/W1C/H) Software error from engines.
+ * nwrp:1 [3:3](R/W1C/H) NCB result write response error.
+ * irde:1 [2:2](R/W1C/H) Instruction NCB read response error.
+ * dovf:1 [1:1](R/W1C/H) Doorbell overflow.
+ * mbox:1 [0:0](R/W1C/H) PF to VF mailbox interrupt. Set when
+ * CPT()_VF()_PF_MBOX(0) is written.
+ *
+ */
+union otx_cptx_vqx_misc_int {
+ u64 u;
+ struct otx_cptx_vqx_misc_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_5_63:59;
+ u64 swerr:1;
+ u64 nwrp:1;
+ u64 irde:1;
+ u64 dovf:1;
+ u64 mbox:1;
+#else /* Word 0 - Little Endian */
+ u64 mbox:1;
+ u64 dovf:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 swerr:1;
+ u64 reserved_5_63:59;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_done_ack
+ *
+ * CPT Queue Done Count Ack Registers
+ * This register is written by software to acknowledge interrupts.
+ * otx_cptx_vqx_done_ack_s
+ * Word0
+ * reserved_20_63:44 [63:20] Reserved.
+ * done_ack:20 [19:0](R/W/H) Number of decrements to CPT()_VQ()_DONE[DONE].
+ * Reads CPT()_VQ()_DONE[DONE]. Written by software to acknowledge
+ * interrupts. If CPT()_VQ()_DONE[DONE] is still nonzero the interrupt
+ * will be re-sent if the conditions described in CPT()_VQ()_DONE[DONE]
+ * are satisfied.
+ *
+ */
+union otx_cptx_vqx_done_ack {
+ u64 u;
+ struct otx_cptx_vqx_done_ack_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_20_63:44;
+ u64 done_ack:20;
+#else /* Word 0 - Little Endian */
+ u64 done_ack:20;
+ u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_done
+ *
+ * CPT Queue Done Count Registers
+ * These registers contain the per-queue instruction done count.
+ * cptx_vqx_done_s
+ * Word0
+ * reserved_20_63:44 [63:20] Reserved.
+ * done:20 [19:0](R/W/H) Done count. When CPT_INST_S[DONEINT] set and that
+ * instruction completes, CPT()_VQ()_DONE[DONE] is incremented when the
+ * instruction finishes. Write to this field are for diagnostic use only;
+ * instead software writes CPT()_VQ()_DONE_ACK with the number of
+ * decrements for this field.
+ * Interrupts are sent as follows:
+ * * When CPT()_VQ()_DONE[DONE] = 0, then no results are pending, the
+ * interrupt coalescing timer is held to zero, and an interrupt is not
+ * sent.
+ * * When CPT()_VQ()_DONE[DONE] != 0, then the interrupt coalescing timer
+ * counts. If the counter is >= CPT()_VQ()_DONE_WAIT[TIME_WAIT]*1024, or
+ * CPT()_VQ()_DONE[DONE] >= CPT()_VQ()_DONE_WAIT[NUM_WAIT], i.e. enough
+ * time has passed or enough results have arrived, then the interrupt is
+ * sent.
+ * * When CPT()_VQ()_DONE_ACK is written (or CPT()_VQ()_DONE is written
+ * but this is not typical), the interrupt coalescing timer restarts.
+ * Note after decrementing this interrupt equation is recomputed,
+ * for example if CPT()_VQ()_DONE[DONE] >= CPT()_VQ()_DONE_WAIT[NUM_WAIT]
+ * and because the timer is zero, the interrupt will be resent immediately.
+ * (This covers the race case between software acknowledging an interrupt
+ * and a result returning.)
+ * * When CPT()_VQ()_DONE_ENA_W1S[DONE] = 0, interrupts are not sent,
+ * but the counting described above still occurs.
+ * Since CPT instructions complete out-of-order, if software is using
+ * completion interrupts the suggested scheme is to request a DONEINT on
+ * each request, and when an interrupt arrives perform a "greedy" scan for
+ * completions; even if a later command is acknowledged first this will
+ * not result in missing a completion.
+ * Software is responsible for making sure [DONE] does not overflow;
+ * for example by insuring there are not more than 2^20-1 instructions in
+ * flight that may request interrupts.
+ *
+ */
+union otx_cptx_vqx_done {
+ u64 u;
+ struct otx_cptx_vqx_done_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_20_63:44;
+ u64 done:20;
+#else /* Word 0 - Little Endian */
+ u64 done:20;
+ u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_done_wait
+ *
+ * CPT Queue Done Interrupt Coalescing Wait Registers
+ * Specifies the per queue interrupt coalescing settings.
+ * cptx_vqx_done_wait_s
+ * Word0
+ * reserved_48_63:16 [63:48] Reserved.
+ * time_wait:16; [47:32](R/W) Time hold-off. When CPT()_VQ()_DONE[DONE] = 0
+ * or CPT()_VQ()_DONE_ACK is written a timer is cleared. When the timer
+ * reaches [TIME_WAIT]*1024 then interrupt coalescing ends.
+ * see CPT()_VQ()_DONE[DONE]. If 0x0, time coalescing is disabled.
+ * reserved_20_31:12 [31:20] Reserved.
+ * num_wait:20 [19:0](R/W) Number of messages hold-off.
+ * When CPT()_VQ()_DONE[DONE] >= [NUM_WAIT] then interrupt coalescing ends
+ * see CPT()_VQ()_DONE[DONE]. If 0x0, same behavior as 0x1.
+ *
+ */
+union otx_cptx_vqx_done_wait {
+ u64 u;
+ struct otx_cptx_vqx_done_wait_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_48_63:16;
+ u64 time_wait:16;
+ u64 reserved_20_31:12;
+ u64 num_wait:20;
+#else /* Word 0 - Little Endian */
+ u64 num_wait:20;
+ u64 reserved_20_31:12;
+ u64 time_wait:16;
+ u64 reserved_48_63:16;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_done_ena_w1s
+ *
+ * CPT Queue Done Interrupt Enable Set Registers
+ * Write 1 to these registers will enable the DONEINT interrupt for the queue.
+ * cptx_vqx_done_ena_w1s_s
+ * Word0
+ * reserved_1_63:63 [63:1] Reserved.
+ * done:1 [0:0](R/W1S/H) Write 1 will enable DONEINT for this queue.
+ * Write 0 has no effect. Read will return the enable bit.
+ */
+union otx_cptx_vqx_done_ena_w1s {
+ u64 u;
+ struct otx_cptx_vqx_done_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_1_63:63;
+ u64 done:1;
+#else /* Word 0 - Little Endian */
+ u64 done:1;
+ u64 reserved_1_63:63;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Register (NCB) otx_cpt#_vq#_ctl
+ *
+ * CPT VF Queue Control Registers
+ * This register configures queues. This register should be changed (other than
+ * clearing [ENA]) only when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
+ * cptx_vqx_ctl_s
+ * Word0
+ * reserved_1_63:63 [63:1] Reserved.
+ * ena:1 [0:0](R/W/H) Enables the logical instruction queue.
+ * See also CPT()_PF_Q()_CTL[CONT_ERR] and CPT()_VQ()_INPROG[INFLIGHT].
+ * 1 = Queue is enabled.
+ * 0 = Queue is disabled.
+ */
+union otx_cptx_vqx_ctl {
+ u64 u;
+ struct otx_cptx_vqx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ u64 reserved_1_63:63;
+ u64 ena:1;
+#else /* Word 0 - Little Endian */
+ u64 ena:1;
+ u64 reserved_1_63:63;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/*
+ * Error Address/Error Codes
+ *
+ * In the event of a severe error, microcode writes an 8-byte Error Code
+ * value (ECODE) to host memory at the Rptr address specified by the host
+ * system (in the 64-byte request).
+ *
+ * Word0
+ * [63:56](R) 8-bit completion code
+ * [55:48](R) Number of the core that reported the severe error
+ * [47:0] Lower 6 bytes of M-Inst word2. Used to assist in uniquely
+ * identifying which specific instruction caused the error. This assumes
+ * that each instruction has a unique result location (RPTR), at least
+ * for a given period of time.
+ */
+union otx_cpt_error_code {
+ u64 u;
+ struct otx_cpt_error_code_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t ccode:8;
+ uint64_t coreid:8;
+ uint64_t rptr6:48;
+#else /* Word 0 - Little Endian */
+ uint64_t rptr6:48;
+ uint64_t coreid:8;
+ uint64_t ccode:8;
+#endif /* Word 0 - End */
+ } s;
+};
+
+#endif /*__OTX_CPT_HW_TYPES_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf.h b/drivers/crypto/marvell/octeontx/otx_cptpf.h
new file mode 100644
index 000000000000..73cd0a9bc563
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPTPF_H
+#define __OTX_CPTPF_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include "otx_cptpf_ucode.h"
+
+/*
+ * OcteonTX CPT device structure
+ */
+struct otx_cpt_device {
+ void __iomem *reg_base; /* Register start address */
+ struct pci_dev *pdev; /* Pci device handle */
+ struct otx_cpt_eng_grps eng_grps;/* Engine groups information */
+ struct list_head list;
+ u8 pf_type; /* PF type SE or AE */
+ u8 max_vfs; /* Maximum number of VFs supported by the CPT */
+ u8 vfs_enabled; /* Number of enabled VFs */
+};
+
+void otx_cpt_mbox_intr_handler(struct otx_cpt_device *cpt, int mbx);
+void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt);
+
+#endif /* __OTX_CPTPF_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
new file mode 100644
index 000000000000..200fb3303db0
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx_cpt_common.h"
+#include "otx_cptpf.h"
+
+#define DRV_NAME "octeontx-cpt"
+#define DRV_VERSION "1.0"
+
+static void otx_cpt_disable_mbox_interrupts(struct otx_cpt_device *cpt)
+{
+ /* Disable mbox(0) interrupts for all VFs */
+ writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1CX(0));
+}
+
+static void otx_cpt_enable_mbox_interrupts(struct otx_cpt_device *cpt)
+{
+ /* Enable mbox(0) interrupts for all VFs */
+ writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1SX(0));
+}
+
+static irqreturn_t otx_cpt_mbx0_intr_handler(int __always_unused irq,
+ void *cpt)
+{
+ otx_cpt_mbox_intr_handler(cpt, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void otx_cpt_reset(struct otx_cpt_device *cpt)
+{
+ writeq(1, cpt->reg_base + OTX_CPT_PF_RESET);
+}
+
+static void otx_cpt_find_max_enabled_cores(struct otx_cpt_device *cpt)
+{
+ union otx_cptx_pf_constants pf_cnsts = {0};
+
+ pf_cnsts.u = readq(cpt->reg_base + OTX_CPT_PF_CONSTANTS);
+ cpt->eng_grps.avail.max_se_cnt = pf_cnsts.s.se;
+ cpt->eng_grps.avail.max_ae_cnt = pf_cnsts.s.ae;
+}
+
+static u32 otx_cpt_check_bist_status(struct otx_cpt_device *cpt)
+{
+ union otx_cptx_pf_bist_status bist_sts = {0};
+
+ bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_BIST_STATUS);
+ return bist_sts.u;
+}
+
+static u64 otx_cpt_check_exe_bist_status(struct otx_cpt_device *cpt)
+{
+ union otx_cptx_pf_exe_bist_status bist_sts = {0};
+
+ bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_EXE_BIST_STATUS);
+ return bist_sts.u;
+}
+
+static int otx_cpt_device_init(struct otx_cpt_device *cpt)
+{
+ struct device *dev = &cpt->pdev->dev;
+ u16 sdevid;
+ u64 bist;
+
+ /* Reset the PF when probed first */
+ otx_cpt_reset(cpt);
+ mdelay(100);
+
+ pci_read_config_word(cpt->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+
+ /* Check BIST status */
+ bist = (u64)otx_cpt_check_bist_status(cpt);
+ if (bist) {
+ dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
+ return -ENODEV;
+ }
+
+ bist = otx_cpt_check_exe_bist_status(cpt);
+ if (bist) {
+ dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
+ return -ENODEV;
+ }
+
+ /* Get max enabled cores */
+ otx_cpt_find_max_enabled_cores(cpt);
+
+ if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
+ (cpt->eng_grps.avail.max_se_cnt == 0)) {
+ cpt->pf_type = OTX_CPT_AE;
+ } else if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) &&
+ (cpt->eng_grps.avail.max_ae_cnt == 0)) {
+ cpt->pf_type = OTX_CPT_SE;
+ }
+
+ /* Get max VQs/VFs supported by the device */
+ cpt->max_vfs = pci_sriov_get_totalvfs(cpt->pdev);
+
+ /* Disable all cores */
+ otx_cpt_disable_all_cores(cpt);
+
+ return 0;
+}
+
+static int otx_cpt_register_interrupts(struct otx_cpt_device *cpt)
+{
+ struct device *dev = &cpt->pdev->dev;
+ u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
+ u32 num_vec = OTX_CPT_PF_MSIX_VECTORS;
+ int ret;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(cpt->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&cpt->pdev->dev,
+ "Request for #%d msix vectors failed\n",
+ num_vec);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handlers */
+ ret = request_irq(pci_irq_vector(cpt->pdev,
+ OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
+ otx_cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
+ if (ret) {
+ dev_err(dev, "Request irq failed\n");
+ pci_free_irq_vectors(cpt->pdev);
+ return ret;
+ }
+ /* Enable mailbox interrupt */
+ otx_cpt_enable_mbox_interrupts(cpt);
+ return 0;
+}
+
+static void otx_cpt_unregister_interrupts(struct otx_cpt_device *cpt)
+{
+ u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT;
+
+ otx_cpt_disable_mbox_interrupts(cpt);
+ free_irq(pci_irq_vector(cpt->pdev,
+ OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)),
+ cpt);
+ pci_free_irq_vectors(cpt->pdev);
+}
+
+
+static int otx_cpt_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (numvfs > cpt->max_vfs)
+ numvfs = cpt->max_vfs;
+
+ if (numvfs > 0) {
+ ret = otx_cpt_try_create_default_eng_grps(cpt->pdev,
+ &cpt->eng_grps,
+ cpt->pf_type);
+ if (ret)
+ return ret;
+
+ cpt->vfs_enabled = numvfs;
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret) {
+ cpt->vfs_enabled = 0;
+ return ret;
+ }
+ otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, true);
+ try_module_get(THIS_MODULE);
+ ret = numvfs;
+ } else {
+ pci_disable_sriov(pdev);
+ otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, false);
+ module_put(THIS_MODULE);
+ cpt->vfs_enabled = 0;
+ }
+ dev_notice(&cpt->pdev->dev, "VFs enabled: %d\n", ret);
+
+ return ret;
+}
+
+static int otx_cpt_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct otx_cpt_device *cpt;
+ int err;
+
+ cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
+ if (!cpt)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, cpt);
+ cpt->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_clear_drvdata;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ cpt->reg_base = pci_iomap(pdev, OTX_CPT_PF_PCI_CFG_BAR, 0);
+ if (!cpt->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* CPT device HW initialization */
+ err = otx_cpt_device_init(cpt);
+ if (err)
+ goto err_unmap_region;
+
+ /* Register interrupts */
+ err = otx_cpt_register_interrupts(cpt);
+ if (err)
+ goto err_unmap_region;
+
+ /* Initialize engine groups */
+ err = otx_cpt_init_eng_grps(pdev, &cpt->eng_grps, cpt->pf_type);
+ if (err)
+ goto err_unregister_interrupts;
+
+ return 0;
+
+err_unregister_interrupts:
+ otx_cpt_unregister_interrupts(cpt);
+err_unmap_region:
+ pci_iounmap(pdev, cpt->reg_base);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void otx_cpt_remove(struct pci_dev *pdev)
+{
+ struct otx_cpt_device *cpt = pci_get_drvdata(pdev);
+
+ if (!cpt)
+ return;
+
+ /* Disable VFs */
+ pci_disable_sriov(pdev);
+ /* Cleanup engine groups */
+ otx_cpt_cleanup_eng_grps(pdev, &cpt->eng_grps);
+ /* Disable CPT PF interrupts */
+ otx_cpt_unregister_interrupts(cpt);
+ /* Disengage SE and AE cores from all groups */
+ otx_cpt_disable_all_cores(cpt);
+ pci_iounmap(pdev, cpt->reg_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx_cpt_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX_CPT_PCI_PF_DEVICE_ID) },
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx_cpt_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = otx_cpt_id_table,
+ .probe = otx_cpt_probe,
+ .remove = otx_cpt_remove,
+ .sriov_configure = otx_cpt_sriov_configure
+};
+
+module_pci_driver(otx_cpt_pci_driver);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX CPT Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, otx_cpt_id_table);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
new file mode 100644
index 000000000000..a6774232e9a3
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx_cpt_common.h"
+#include "otx_cptpf.h"
+
+static char *get_mbox_opcode_str(int msg_opcode)
+{
+ char *str = "Unknown";
+
+ switch (msg_opcode) {
+ case OTX_CPT_MSG_VF_UP:
+ str = "UP";
+ break;
+
+ case OTX_CPT_MSG_VF_DOWN:
+ str = "DOWN";
+ break;
+
+ case OTX_CPT_MSG_READY:
+ str = "READY";
+ break;
+
+ case OTX_CPT_MSG_QLEN:
+ str = "QLEN";
+ break;
+
+ case OTX_CPT_MSG_QBIND_GRP:
+ str = "QBIND_GRP";
+ break;
+
+ case OTX_CPT_MSG_VQ_PRIORITY:
+ str = "VQ_PRIORITY";
+ break;
+
+ case OTX_CPT_MSG_PF_TYPE:
+ str = "PF_TYPE";
+ break;
+
+ case OTX_CPT_MSG_ACK:
+ str = "ACK";
+ break;
+
+ case OTX_CPT_MSG_NACK:
+ str = "NACK";
+ break;
+ }
+
+ return str;
+}
+
+static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
+{
+ char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
+
+ hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
+ raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
+ if (vf_id >= 0)
+ pr_debug("MBOX opcode %s received from VF%d raw_data %s",
+ get_mbox_opcode_str(mbox_msg->msg), vf_id,
+ raw_data_str);
+ else
+ pr_debug("MBOX opcode %s received from PF raw_data %s",
+ get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
+}
+
+static void otx_cpt_send_msg_to_vf(struct otx_cpt_device *cpt, int vf,
+ struct otx_cpt_mbox *mbx)
+{
+ /* Writing mbox(0) causes interrupt */
+ writeq(mbx->data, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
+ writeq(mbx->msg, cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
+}
+
+/*
+ * ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void otx_cpt_mbox_send_ack(struct otx_cpt_device *cpt, int vf,
+ struct otx_cpt_mbox *mbx)
+{
+ mbx->data = 0ull;
+ mbx->msg = OTX_CPT_MSG_ACK;
+ otx_cpt_send_msg_to_vf(cpt, vf, mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to complete the action */
+static void otx_cptpf_mbox_send_nack(struct otx_cpt_device *cpt, int vf,
+ struct otx_cpt_mbox *mbx)
+{
+ mbx->data = 0ull;
+ mbx->msg = OTX_CPT_MSG_NACK;
+ otx_cpt_send_msg_to_vf(cpt, vf, mbx);
+}
+
+static void otx_cpt_clear_mbox_intr(struct otx_cpt_device *cpt, u32 vf)
+{
+ /* W1C for the VF */
+ writeq(1ull << vf, cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
+}
+
+/*
+ * Configure QLEN/Chunk sizes for VF
+ */
+static void otx_cpt_cfg_qlen_for_vf(struct otx_cpt_device *cpt, int vf,
+ u32 size)
+{
+ union otx_cptx_pf_qx_ctl pf_qx_ctl;
+
+ pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
+ pf_qx_ctl.s.size = size;
+ pf_qx_ctl.s.cont_err = true;
+ writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
+}
+
+/*
+ * Configure VQ priority
+ */
+static void otx_cpt_cfg_vq_priority(struct otx_cpt_device *cpt, int vf, u32 pri)
+{
+ union otx_cptx_pf_qx_ctl pf_qx_ctl;
+
+ pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
+ pf_qx_ctl.s.pri = pri;
+ writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(vf));
+}
+
+static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
+{
+ struct device *dev = &cpt->pdev->dev;
+ struct otx_cpt_eng_grp_info *eng_grp;
+ union otx_cptx_pf_qx_ctl pf_qx_ctl;
+ struct otx_cpt_ucode *ucode;
+
+ if (q >= cpt->max_vfs) {
+ dev_err(dev, "Requested queue %d is > than maximum avail %d",
+ q, cpt->max_vfs);
+ return -EINVAL;
+ }
+
+ if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Requested group %d is > than maximum avail %d",
+ grp, OTX_CPT_MAX_ENGINE_GROUPS);
+ return -EINVAL;
+ }
+
+ eng_grp = &cpt->eng_grps.grp[grp];
+ if (!eng_grp->is_enabled) {
+ dev_err(dev, "Requested engine group %d is disabled", grp);
+ return -EINVAL;
+ }
+
+ pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
+ pf_qx_ctl.s.grp = grp;
+ writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
+
+ if (eng_grp->mirror.is_ena)
+ ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
+ else
+ ucode = &eng_grp->ucode[0];
+
+ if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_SE_TYPES))
+ return OTX_CPT_SE_TYPES;
+ else if (otx_cpt_uc_supports_eng_type(ucode, OTX_CPT_AE_TYPES))
+ return OTX_CPT_AE_TYPES;
+ else
+ return BAD_OTX_CPTVF_TYPE;
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf)
+{
+ int vftype = 0;
+ struct otx_cpt_mbox mbx = {};
+ struct device *dev = &cpt->pdev->dev;
+ /*
+ * MBOX[0] contains msg
+ * MBOX[1] contains data
+ */
+ mbx.msg = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 0));
+ mbx.data = readq(cpt->reg_base + OTX_CPT_PF_VFX_MBOXX(vf, 1));
+
+ dump_mbox_msg(&mbx, vf);
+
+ switch (mbx.msg) {
+ case OTX_CPT_MSG_VF_UP:
+ mbx.msg = OTX_CPT_MSG_VF_UP;
+ mbx.data = cpt->vfs_enabled;
+ otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_READY:
+ mbx.msg = OTX_CPT_MSG_READY;
+ mbx.data = vf;
+ otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_VF_DOWN:
+ /* First msg in VF teardown sequence */
+ otx_cpt_mbox_send_ack(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_QLEN:
+ otx_cpt_cfg_qlen_for_vf(cpt, vf, mbx.data);
+ otx_cpt_mbox_send_ack(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_QBIND_GRP:
+ vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
+ if ((vftype != OTX_CPT_AE_TYPES) &&
+ (vftype != OTX_CPT_SE_TYPES)) {
+ dev_err(dev, "VF%d binding to eng group %llu failed",
+ vf, mbx.data);
+ otx_cptpf_mbox_send_nack(cpt, vf, &mbx);
+ } else {
+ mbx.msg = OTX_CPT_MSG_QBIND_GRP;
+ mbx.data = vftype;
+ otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
+ }
+ break;
+ case OTX_CPT_MSG_PF_TYPE:
+ mbx.msg = OTX_CPT_MSG_PF_TYPE;
+ mbx.data = cpt->pf_type;
+ otx_cpt_send_msg_to_vf(cpt, vf, &mbx);
+ break;
+ case OTX_CPT_MSG_VQ_PRIORITY:
+ otx_cpt_cfg_vq_priority(cpt, vf, mbx.data);
+ otx_cpt_mbox_send_ack(cpt, vf, &mbx);
+ break;
+ default:
+ dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
+ vf, mbx.msg);
+ break;
+ }
+}
+
+void otx_cpt_mbox_intr_handler (struct otx_cpt_device *cpt, int mbx)
+{
+ u64 intr;
+ u8 vf;
+
+ intr = readq(cpt->reg_base + OTX_CPT_PF_MBOX_INTX(0));
+ pr_debug("PF interrupt mbox%d mask 0x%llx\n", mbx, intr);
+ for (vf = 0; vf < cpt->max_vfs; vf++) {
+ if (intr & (1ULL << vf)) {
+ otx_cpt_handle_mbox_intr(cpt, vf);
+ otx_cpt_clear_mbox_intr(cpt, vf);
+ }
+ }
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
new file mode 100644
index 000000000000..d04baa319592
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -0,0 +1,1686 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ctype.h>
+#include <linux/firmware.h>
+#include "otx_cpt_common.h"
+#include "otx_cptpf_ucode.h"
+#include "otx_cptpf.h"
+
+#define CSR_DELAY 30
+/* Tar archive defines */
+#define TAR_MAGIC "ustar"
+#define TAR_MAGIC_LEN 6
+#define TAR_BLOCK_LEN 512
+#define REGTYPE '0'
+#define AREGTYPE '\0'
+
+/* tar header as defined in POSIX 1003.1-1990. */
+struct tar_hdr_t {
+ char name[100];
+ char mode[8];
+ char uid[8];
+ char gid[8];
+ char size[12];
+ char mtime[12];
+ char chksum[8];
+ char typeflag;
+ char linkname[100];
+ char magic[6];
+ char version[2];
+ char uname[32];
+ char gname[32];
+ char devmajor[8];
+ char devminor[8];
+ char prefix[155];
+};
+
+struct tar_blk_t {
+ union {
+ struct tar_hdr_t hdr;
+ char block[TAR_BLOCK_LEN];
+ };
+};
+
+struct tar_arch_info_t {
+ struct list_head ucodes;
+ const struct firmware *fw;
+};
+
+static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp)
+{
+ struct otx_cpt_bitmap bmap = { {0} };
+ bool found = false;
+ int i;
+
+ if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
+ dev_err(dev, "unsupported number of engines %d on octeontx",
+ eng_grp->g->engs_num);
+ return bmap;
+ }
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (eng_grp->engs[i].type) {
+ bitmap_or(bmap.bits, bmap.bits,
+ eng_grp->engs[i].bmap,
+ eng_grp->g->engs_num);
+ bmap.size = eng_grp->g->engs_num;
+ found = true;
+ }
+ }
+
+ if (!found)
+ dev_err(dev, "No engines reserved for engine group %d",
+ eng_grp->idx);
+ return bmap;
+}
+
+static int is_eng_type(int val, int eng_type)
+{
+ return val & (1 << eng_type);
+}
+
+static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
+ int eng_type)
+{
+ return is_eng_type(eng_grps->eng_types_supported, eng_type);
+}
+
+static void set_ucode_filename(struct otx_cpt_ucode *ucode,
+ const char *filename)
+{
+ strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
+}
+
+static char *get_eng_type_str(int eng_type)
+{
+ char *str = "unknown";
+
+ switch (eng_type) {
+ case OTX_CPT_SE_TYPES:
+ str = "SE";
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ str = "AE";
+ break;
+ }
+ return str;
+}
+
+static char *get_ucode_type_str(int ucode_type)
+{
+ char *str = "unknown";
+
+ switch (ucode_type) {
+ case (1 << OTX_CPT_SE_TYPES):
+ str = "SE";
+ break;
+
+ case (1 << OTX_CPT_AE_TYPES):
+ str = "AE";
+ break;
+ }
+ return str;
+}
+
+static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
+{
+ char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
+ u32 i, val = 0;
+ u8 nn;
+
+ strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+ for (i = 0; i < strlen(tmp_ver_str); i++)
+ tmp_ver_str[i] = tolower(tmp_ver_str[i]);
+
+ nn = ucode_hdr->ver_num.nn;
+ if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
+ nn == OTX_CPT_SE_UC_TYPE3))
+ val |= 1 << OTX_CPT_SE_TYPES;
+ if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
+ nn == OTX_CPT_AE_UC_TYPE)
+ val |= 1 << OTX_CPT_AE_TYPES;
+
+ *ucode_type = val;
+
+ if (!val)
+ return -EINVAL;
+ if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
+ is_eng_type(val, OTX_CPT_SE_TYPES))
+ return -EINVAL;
+ return 0;
+}
+
+static int is_mem_zero(const char *ptr, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (ptr[i])
+ return 0;
+ }
+ return 1;
+}
+
+static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
+{
+ struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
+ dma_addr_t dma_addr;
+ struct otx_cpt_bitmap bmap;
+ int i;
+
+ bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ if (eng_grp->mirror.is_ena)
+ dma_addr =
+ eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
+ else
+ dma_addr = eng_grp->ucode[0].align_dma;
+
+ /*
+ * Set UCODE_BASE only for the cores which are not used,
+ * other cores should have already valid UCODE_BASE set
+ */
+ for_each_set_bit(i, bmap.bits, bmap.size)
+ if (!eng_grp->g->eng_ref_cnt[i])
+ writeq((u64) dma_addr, cpt->reg_base +
+ OTX_CPT_PF_ENGX_UCODE_BASE(i));
+ return 0;
+}
+
+static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
+ struct otx_cpt_bitmap bmap = { {0} };
+ int timeout = 10;
+ int i, busy;
+ u64 reg;
+
+ bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Detach the cores from group */
+ reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ if (reg & (1ull << i)) {
+ eng_grp->g->eng_ref_cnt[i]--;
+ reg &= ~(1ull << i);
+ }
+ }
+ writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
+ for_each_set_bit(i, bmap.bits, bmap.size)
+ if (reg & (1ull << i)) {
+ busy = 1;
+ break;
+ }
+ } while (busy);
+
+ /* Disable the cores only if they are not used anymore */
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+ for_each_set_bit(i, bmap.bits, bmap.size)
+ if (!eng_grp->g->eng_ref_cnt[i])
+ reg &= ~(1ull << i);
+ writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+
+ return 0;
+}
+
+static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
+ struct otx_cpt_bitmap bmap;
+ u64 reg;
+ int i;
+
+ bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Attach the cores to the group */
+ reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ if (!(reg & (1ull << i))) {
+ eng_grp->g->eng_ref_cnt[i]++;
+ reg |= 1ull << i;
+ }
+ }
+ writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
+
+ /* Enable the cores */
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+ for_each_set_bit(i, bmap.bits, bmap.size)
+ reg |= 1ull << i;
+ writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+
+ return 0;
+}
+
+static int process_tar_file(struct device *dev,
+ struct tar_arch_info_t *tar_arch, char *filename,
+ const u8 *data, u32 size)
+{
+ struct tar_ucode_info_t *tar_info;
+ struct otx_cpt_ucode_hdr *ucode_hdr;
+ int ucode_type, ucode_size;
+
+ /*
+ * If size is less than microcode header size then don't report
+ * an error because it might not be microcode file, just process
+ * next file from archive
+ */
+ if (size < sizeof(struct otx_cpt_ucode_hdr))
+ return 0;
+
+ ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
+ /*
+ * If microcode version can't be found don't report an error
+ * because it might not be microcode file, just process next file
+ */
+ if (get_ucode_type(ucode_hdr, &ucode_type))
+ return 0;
+
+ ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ if (!ucode_size || (size < round_up(ucode_size, 16) +
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
+ dev_err(dev, "Ucode %s invalid size", filename);
+ return -EINVAL;
+ }
+
+ tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
+ if (!tar_info)
+ return -ENOMEM;
+
+ tar_info->ucode_ptr = data;
+ set_ucode_filename(&tar_info->ucode, filename);
+ memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
+ OTX_CPT_UCODE_VER_STR_SZ);
+ tar_info->ucode.ver_num = ucode_hdr->ver_num;
+ tar_info->ucode.type = ucode_type;
+ tar_info->ucode.size = ucode_size;
+ list_add_tail(&tar_info->list, &tar_arch->ucodes);
+
+ return 0;
+}
+
+static void release_tar_archive(struct tar_arch_info_t *tar_arch)
+{
+ struct tar_ucode_info_t *curr, *temp;
+
+ if (!tar_arch)
+ return;
+
+ list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
+ list_del(&curr->list);
+ kfree(curr);
+ }
+
+ if (tar_arch->fw)
+ release_firmware(tar_arch->fw);
+ kfree(tar_arch);
+}
+
+static struct tar_ucode_info_t *get_uc_from_tar_archive(
+ struct tar_arch_info_t *tar_arch,
+ int ucode_type)
+{
+ struct tar_ucode_info_t *curr, *uc_found = NULL;
+
+ list_for_each_entry(curr, &tar_arch->ucodes, list) {
+ if (!is_eng_type(curr->ucode.type, ucode_type))
+ continue;
+
+ if (!uc_found) {
+ uc_found = curr;
+ continue;
+ }
+
+ switch (ucode_type) {
+ case OTX_CPT_AE_TYPES:
+ break;
+
+ case OTX_CPT_SE_TYPES:
+ if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
+ (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
+ && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
+ uc_found = curr;
+ break;
+ }
+ }
+
+ return uc_found;
+}
+
+static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
+ char *tar_filename)
+{
+ struct tar_ucode_info_t *curr;
+
+ pr_debug("Tar archive filename %s", tar_filename);
+ pr_debug("Tar archive pointer %p, size %ld", tar_arch->fw->data,
+ tar_arch->fw->size);
+ list_for_each_entry(curr, &tar_arch->ucodes, list) {
+ pr_debug("Ucode filename %s", curr->ucode.filename);
+ pr_debug("Ucode version string %s", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d",
+ curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
+ curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
+ pr_debug("Ucode type (%d) %s", curr->ucode.type,
+ get_ucode_type_str(curr->ucode.type));
+ pr_debug("Ucode size %d", curr->ucode.size);
+ pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
+ }
+}
+
+static struct tar_arch_info_t *load_tar_archive(struct device *dev,
+ char *tar_filename)
+{
+ struct tar_arch_info_t *tar_arch = NULL;
+ struct tar_blk_t *tar_blk;
+ unsigned int cur_size;
+ size_t tar_offs = 0;
+ size_t tar_size;
+ int ret;
+
+ tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
+ if (!tar_arch)
+ return NULL;
+
+ INIT_LIST_HEAD(&tar_arch->ucodes);
+
+ /* Load tar archive */
+ ret = request_firmware(&tar_arch->fw, tar_filename, dev);
+ if (ret)
+ goto release_tar_arch;
+
+ if (tar_arch->fw->size < TAR_BLOCK_LEN) {
+ dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ goto release_tar_arch;
+ }
+
+ tar_size = tar_arch->fw->size;
+ tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
+ if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
+ dev_err(dev, "Unsupported format of tar archive %s",
+ tar_filename);
+ goto release_tar_arch;
+ }
+
+ while (1) {
+ /* Read current file size */
+ ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
+ if (ret)
+ goto release_tar_arch;
+
+ if (tar_offs + cur_size > tar_size ||
+ tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
+ dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ goto release_tar_arch;
+ }
+
+ tar_offs += TAR_BLOCK_LEN;
+ if (tar_blk->hdr.typeflag == REGTYPE ||
+ tar_blk->hdr.typeflag == AREGTYPE) {
+ ret = process_tar_file(dev, tar_arch,
+ tar_blk->hdr.name,
+ &tar_arch->fw->data[tar_offs],
+ cur_size);
+ if (ret)
+ goto release_tar_arch;
+ }
+
+ tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
+ if (cur_size % TAR_BLOCK_LEN)
+ tar_offs += TAR_BLOCK_LEN;
+
+ /* Check for the end of the archive */
+ if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
+ dev_err(dev, "Invalid tar archive %s ", tar_filename);
+ goto release_tar_arch;
+ }
+
+ if (is_mem_zero(&tar_arch->fw->data[tar_offs],
+ 2*TAR_BLOCK_LEN))
+ break;
+
+ /* Read next block from tar archive */
+ tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
+ }
+
+ print_tar_dbg_info(tar_arch, tar_filename);
+ return tar_arch;
+release_tar_arch:
+ release_tar_archive(tar_arch);
+ return NULL;
+}
+
+static struct otx_cpt_engs_rsvd *find_engines_by_type(
+ struct otx_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ int i;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ if (eng_grp->engs[i].type == eng_type)
+ return &eng_grp->engs[i];
+ }
+ return NULL;
+}
+
+int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
+{
+ return is_eng_type(ucode->type, eng_type);
+}
+EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
+
+int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ struct otx_cpt_engs_rsvd *engs;
+
+ engs = find_engines_by_type(eng_grp, eng_type);
+
+ return (engs != NULL ? 1 : 0);
+}
+EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
+
+static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
+ char *buf, int size)
+{
+ if (eng_grp->mirror.is_ena) {
+ scnprintf(buf, size, "%s (shared with engine_group%d)",
+ eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
+ eng_grp->mirror.idx);
+ } else {
+ scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
+ }
+}
+
+static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
+ char *buf, int size, int idx)
+{
+ struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
+ struct otx_cpt_engs_rsvd *engs;
+ int len, i;
+
+ buf[0] = '\0';
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (idx != -1 && idx != i)
+ continue;
+
+ if (eng_grp->mirror.is_ena)
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ if (i > 0 && idx == -1) {
+ len = strlen(buf);
+ scnprintf(buf+len, size-len, ", ");
+ }
+
+ len = strlen(buf);
+ scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
+ engs->count + mirrored_engs->count : engs->count,
+ get_eng_type_str(engs->type));
+ if (mirrored_engs) {
+ len = strlen(buf);
+ scnprintf(buf+len, size-len,
+ "(%d shared with engine_group%d) ",
+ engs->count <= 0 ? engs->count +
+ mirrored_engs->count : mirrored_engs->count,
+ eng_grp->mirror.idx);
+ }
+ }
+}
+
+static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
+{
+ pr_debug("Ucode info");
+ pr_debug("Ucode version string %s", ucode->ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d", ucode->ver_num.nn,
+ ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
+ pr_debug("Ucode type %s", get_ucode_type_str(ucode->type));
+ pr_debug("Ucode size %d", ucode->size);
+ pr_debug("Ucode virt address %16.16llx", (u64)ucode->align_va);
+ pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
+}
+
+static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
+ struct device *dev, char *buf, int size)
+{
+ struct otx_cpt_bitmap bmap;
+ u32 mask[2];
+
+ bmap = get_cores_bmap(dev, eng_grp);
+ if (!bmap.size) {
+ scnprintf(buf, size, "unknown");
+ return;
+ }
+ bitmap_to_arr32(mask, bmap.bits, bmap.size);
+ scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
+}
+
+
+static void print_dbg_info(struct device *dev,
+ struct otx_cpt_eng_grps *eng_grps)
+{
+ char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
+ struct otx_cpt_eng_grp_info *mirrored_grp;
+ char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
+ struct otx_cpt_eng_grp_info *grp;
+ struct otx_cpt_engs_rsvd *engs;
+ u32 mask[4];
+ int i, j;
+
+ pr_debug("Engine groups global info");
+ pr_debug("max SE %d, max AE %d",
+ eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
+ pr_debug("free SE %d", eng_grps->avail.se_cnt);
+ pr_debug("free AE %d", eng_grps->avail.ae_cnt);
+
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ pr_debug("engine_group%d, state %s", i, grp->is_enabled ?
+ "enabled" : "disabled");
+ if (grp->is_enabled) {
+ mirrored_grp = &eng_grps->grp[grp->mirror.idx];
+ pr_debug("Ucode0 filename %s, version %s",
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].filename :
+ grp->ucode[0].filename,
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].ver_str :
+ grp->ucode[0].ver_str);
+ }
+
+ for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
+ engs = &grp->engs[j];
+ if (engs->type) {
+ print_engs_info(grp, engs_info,
+ 2*OTX_CPT_UCODE_NAME_LENGTH, j);
+ pr_debug("Slot%d: %s", j, engs_info);
+ bitmap_to_arr32(mask, engs->bmap,
+ eng_grps->engs_num);
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
+ mask[3], mask[2], mask[1], mask[0]);
+ } else
+ pr_debug("Slot%d not used", j);
+ }
+ if (grp->is_enabled) {
+ cpt_print_engines_mask(grp, dev, engs_mask,
+ OTX_CPT_UCODE_NAME_LENGTH);
+ pr_debug("Cmask: %s", engs_mask);
+ }
+ }
+}
+
+static int update_engines_avail_count(struct device *dev,
+ struct otx_cpt_engs_available *avail,
+ struct otx_cpt_engs_rsvd *engs, int val)
+{
+ switch (engs->type) {
+ case OTX_CPT_SE_TYPES:
+ avail->se_cnt += val;
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ avail->ae_cnt += val;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int update_engines_offset(struct device *dev,
+ struct otx_cpt_engs_available *avail,
+ struct otx_cpt_engs_rsvd *engs)
+{
+ switch (engs->type) {
+ case OTX_CPT_SE_TYPES:
+ engs->offset = 0;
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ engs->offset = avail->max_se_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type)
+ continue;
+
+ if (grp->engs[i].count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail,
+ &grp->engs[i],
+ grp->engs[i].count);
+ if (ret)
+ return ret;
+ }
+
+ grp->engs[i].type = 0;
+ grp->engs[i].count = 0;
+ grp->engs[i].offset = 0;
+ grp->engs[i].ucode = NULL;
+ bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
+ }
+
+ return 0;
+}
+
+static int do_reserve_engines(struct device *dev,
+ struct otx_cpt_eng_grp_info *grp,
+ struct otx_cpt_engines *req_engs)
+{
+ struct otx_cpt_engs_rsvd *engs = NULL;
+ int i, ret;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type) {
+ engs = &grp->engs[i];
+ break;
+ }
+ }
+
+ if (!engs)
+ return -ENOMEM;
+
+ engs->type = req_engs->type;
+ engs->count = req_engs->count;
+
+ ret = update_engines_offset(dev, &grp->g->avail, engs);
+ if (ret)
+ return ret;
+
+ if (engs->count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail, engs,
+ -engs->count);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int check_engines_availability(struct device *dev,
+ struct otx_cpt_eng_grp_info *grp,
+ struct otx_cpt_engines *req_eng)
+{
+ int avail_cnt = 0;
+
+ switch (req_eng->type) {
+ case OTX_CPT_SE_TYPES:
+ avail_cnt = grp->g->avail.se_cnt;
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ avail_cnt = grp->g->avail.ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", req_eng->type);
+ return -EINVAL;
+ }
+
+ if (avail_cnt < req_eng->count) {
+ dev_err(dev,
+ "Error available %s engines %d < than requested %d",
+ get_eng_type_str(req_eng->type),
+ avail_cnt, req_eng->count);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
+ struct otx_cpt_engines *req_engs, int req_cnt)
+{
+ int i, ret;
+
+ /* Validate if a number of requested engines is available */
+ for (i = 0; i < req_cnt; i++) {
+ ret = check_engines_availability(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Reserve requested engines for this engine group */
+ for (i = 0; i < req_cnt; i++) {
+ ret = do_reserve_engines(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static ssize_t eng_grp_info_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
+ char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
+ char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
+ struct otx_cpt_eng_grp_info *eng_grp;
+ int ret;
+
+ eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
+ mutex_lock(&eng_grp->g->lock);
+
+ print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
+ print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
+ cpt_print_engines_mask(eng_grp, dev, engs_mask,
+ OTX_CPT_UCODE_NAME_LENGTH);
+ ret = scnprintf(buf, PAGE_SIZE,
+ "Microcode : %s\nEngines: %s\nEngines mask: %s\n",
+ ucode_info, engs_info, engs_mask);
+
+ mutex_unlock(&eng_grp->g->lock);
+ return ret;
+}
+
+static int create_sysfs_eng_grps_info(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp)
+{
+ int ret;
+
+ eng_grp->info_attr.show = eng_grp_info_show;
+ eng_grp->info_attr.store = NULL;
+ eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
+ eng_grp->info_attr.attr.mode = 0440;
+ sysfs_attr_init(&eng_grp->info_attr.attr);
+ ret = device_create_file(dev, &eng_grp->info_attr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
+{
+ if (ucode->va) {
+ dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
+ ucode->va, ucode->dma);
+ ucode->va = NULL;
+ ucode->align_va = NULL;
+ ucode->dma = 0;
+ ucode->align_dma = 0;
+ ucode->size = 0;
+ }
+
+ memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
+ memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
+ set_ucode_filename(ucode, "");
+ ucode->type = 0;
+}
+
+static int copy_ucode_to_dma_mem(struct device *dev,
+ struct otx_cpt_ucode *ucode,
+ const u8 *ucode_data)
+{
+ u32 i;
+
+ /* Allocate DMAable space */
+ ucode->va = dma_alloc_coherent(dev, ucode->size +
+ OTX_CPT_UCODE_ALIGNMENT,
+ &ucode->dma, GFP_KERNEL);
+ if (!ucode->va) {
+ dev_err(dev, "Unable to allocate space for microcode");
+ return -ENOMEM;
+ }
+ ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
+ ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
+
+ memcpy((void *) ucode->align_va, (void *) ucode_data +
+ sizeof(struct otx_cpt_ucode_hdr), ucode->size);
+
+ /* Byte swap 64-bit */
+ for (i = 0; i < (ucode->size / 8); i++)
+ ((u64 *)ucode->align_va)[i] =
+ cpu_to_be64(((u64 *)ucode->align_va)[i]);
+ /* Ucode needs 16-bit swap */
+ for (i = 0; i < (ucode->size / 2); i++)
+ ((u16 *)ucode->align_va)[i] =
+ cpu_to_be16(((u16 *)ucode->align_va)[i]);
+ return 0;
+}
+
+static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
+ const char *ucode_filename)
+{
+ struct otx_cpt_ucode_hdr *ucode_hdr;
+ const struct firmware *fw;
+ int ret;
+
+ set_ucode_filename(ucode, ucode_filename);
+ ret = request_firmware(&fw, ucode->filename, dev);
+ if (ret)
+ return ret;
+
+ ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
+ memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+ ucode->ver_num = ucode_hdr->ver_num;
+ ucode->size = ntohl(ucode_hdr->code_length) * 2;
+ if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
+ dev_err(dev, "Ucode %s invalid size", ucode_filename);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ ret = get_ucode_type(ucode_hdr, &ucode->type);
+ if (ret) {
+ dev_err(dev, "Microcode %s unknown type 0x%x", ucode->filename,
+ ucode->type);
+ goto release_fw;
+ }
+
+ ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
+ if (ret)
+ goto release_fw;
+
+ print_ucode_dbg_info(ucode);
+release_fw:
+ release_firmware(fw);
+ return ret;
+}
+
+static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int ret;
+
+ ret = cpt_set_ucode_base(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ ret = cpt_attach_and_enable_cores(eng_grp, obj);
+ return ret;
+}
+
+static int disable_eng_grp(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int i, ret;
+
+ ret = cpt_detach_and_disable_cores(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Unload ucode used by this engine group */
+ ucode_unload(dev, &eng_grp->ucode[0]);
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ eng_grp->engs[i].ucode = &eng_grp->ucode[0];
+ }
+
+ ret = cpt_set_ucode_base(eng_grp, obj);
+
+ return ret;
+}
+
+static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
+ struct otx_cpt_eng_grp_info *src_grp)
+{
+ /* Setup fields for engine group which is mirrored */
+ src_grp->mirror.is_ena = false;
+ src_grp->mirror.idx = 0;
+ src_grp->mirror.ref_count++;
+
+ /* Setup fields for mirroring engine group */
+ dst_grp->mirror.is_ena = true;
+ dst_grp->mirror.idx = src_grp->idx;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
+{
+ struct otx_cpt_eng_grp_info *src_grp;
+
+ if (!dst_grp->mirror.is_ena)
+ return;
+
+ src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
+
+ src_grp->mirror.ref_count--;
+ dst_grp->mirror.is_ena = false;
+ dst_grp->mirror.idx = 0;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
+ struct otx_cpt_engines *engs, int engs_cnt)
+{
+ struct otx_cpt_engs_rsvd *mirrored_engs;
+ int i;
+
+ for (i = 0; i < engs_cnt; i++) {
+ mirrored_engs = find_engines_by_type(mirrored_eng_grp,
+ engs[i].type);
+ if (!mirrored_engs)
+ continue;
+
+ /*
+ * If mirrored group has this type of engines attached then
+ * there are 3 scenarios possible:
+ * 1) mirrored_engs.count == engs[i].count then all engines
+ * from mirrored engine group will be shared with this engine
+ * group
+ * 2) mirrored_engs.count > engs[i].count then only a subset of
+ * engines from mirrored engine group will be shared with this
+ * engine group
+ * 3) mirrored_engs.count < engs[i].count then all engines
+ * from mirrored engine group will be shared with this group
+ * and additional engines will be reserved for exclusively use
+ * by this engine group
+ */
+ engs[i].count -= mirrored_engs->count;
+ }
+}
+
+static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
+ struct otx_cpt_eng_grp_info *grp)
+{
+ struct otx_cpt_eng_grps *eng_grps = grp->g;
+ int i;
+
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ continue;
+ if (eng_grps->grp[i].ucode[0].type)
+ continue;
+ if (grp->idx == i)
+ continue;
+ if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
+ grp->ucode[0].ver_str,
+ OTX_CPT_UCODE_VER_STR_SZ))
+ return &eng_grps->grp[i];
+ }
+
+ return NULL;
+}
+
+static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
+ struct otx_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ return &eng_grps->grp[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_update_masks(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp)
+{
+ struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
+ struct otx_cpt_bitmap tmp_bmap = { {0} };
+ int i, j, cnt, max_cnt;
+ int bit;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (engs->count <= 0)
+ continue;
+
+ switch (engs->type) {
+ case OTX_CPT_SE_TYPES:
+ max_cnt = eng_grp->g->avail.max_se_cnt;
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d", engs->type);
+ return -EINVAL;
+ }
+
+ cnt = engs->count;
+ WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
+ bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
+ for (j = engs->offset; j < engs->offset + max_cnt; j++) {
+ if (!eng_grp->g->eng_ref_cnt[j]) {
+ bitmap_set(tmp_bmap.bits, j, 1);
+ cnt--;
+ if (!cnt)
+ break;
+ }
+ }
+
+ if (cnt)
+ return -ENOSPC;
+
+ bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
+ }
+
+ if (!eng_grp->mirror.is_ena)
+ return 0;
+
+ for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ WARN_ON(!mirrored_engs && engs->count <= 0);
+ if (!mirrored_engs)
+ continue;
+
+ bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ if (engs->count < 0) {
+ bit = find_first_bit(mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ bitmap_clear(tmp_bmap.bits, bit, -engs->count);
+ }
+ bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
+ eng_grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int delete_engine_group(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp)
+{
+ int i, ret;
+
+ if (!eng_grp->is_enabled)
+ return -EINVAL;
+
+ if (eng_grp->mirror.ref_count) {
+ dev_err(dev, "Can't delete engine_group%d as it is used by:",
+ eng_grp->idx);
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (eng_grp->g->grp[i].mirror.is_ena &&
+ eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
+ dev_err(dev, "engine_group%d", i);
+ }
+ return -EINVAL;
+ }
+
+ /* Removing engine group mirroring if enabled */
+ remove_eng_grp_mirroring(eng_grp);
+
+ /* Disable engine group */
+ ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
+ if (ret)
+ return ret;
+
+ /* Release all engines held by this engine group */
+ ret = release_engines(dev, eng_grp);
+ if (ret)
+ return ret;
+
+ device_remove_file(dev, &eng_grp->info_attr);
+ eng_grp->is_enabled = false;
+
+ return 0;
+}
+
+static int validate_1_ucode_scenario(struct device *dev,
+ struct otx_cpt_eng_grp_info *eng_grp,
+ struct otx_cpt_engines *engs, int engs_cnt)
+{
+ int i;
+
+ /* Verify that ucode loaded supports requested engine types */
+ for (i = 0; i < engs_cnt; i++) {
+ if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
+ engs[i].type)) {
+ dev_err(dev,
+ "Microcode %s does not support %s engines",
+ eng_grp->ucode[0].filename,
+ get_eng_type_str(engs[i].type));
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
+{
+ struct otx_cpt_ucode *ucode;
+
+ if (eng_grp->mirror.is_ena)
+ ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
+ else
+ ucode = &eng_grp->ucode[0];
+ WARN_ON(!eng_grp->engs[0].type);
+ eng_grp->engs[0].ucode = ucode;
+}
+
+static int create_engine_group(struct device *dev,
+ struct otx_cpt_eng_grps *eng_grps,
+ struct otx_cpt_engines *engs, int engs_cnt,
+ void *ucode_data[], int ucodes_cnt,
+ bool use_uc_from_tar_arch)
+{
+ struct otx_cpt_eng_grp_info *mirrored_eng_grp;
+ struct tar_ucode_info_t *tar_info;
+ struct otx_cpt_eng_grp_info *eng_grp;
+ int i, ret = 0;
+
+ if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
+ return -EINVAL;
+
+ /* Validate if requested engine types are supported by this device */
+ for (i = 0; i < engs_cnt; i++)
+ if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
+ dev_err(dev, "Device does not support %s engines",
+ get_eng_type_str(engs[i].type));
+ return -EPERM;
+ }
+
+ /* Find engine group which is not used */
+ eng_grp = find_unused_eng_grp(eng_grps);
+ if (!eng_grp) {
+ dev_err(dev, "Error all engine groups are being used");
+ return -ENOSPC;
+ }
+
+ /* Load ucode */
+ for (i = 0; i < ucodes_cnt; i++) {
+ if (use_uc_from_tar_arch) {
+ tar_info = (struct tar_ucode_info_t *) ucode_data[i];
+ eng_grp->ucode[i] = tar_info->ucode;
+ ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
+ tar_info->ucode_ptr);
+ } else
+ ret = ucode_load(dev, &eng_grp->ucode[i],
+ (char *) ucode_data[i]);
+ if (ret)
+ goto err_ucode_unload;
+ }
+
+ /* Validate scenario where 1 ucode is used */
+ ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
+ if (ret)
+ goto err_ucode_unload;
+
+ /* Check if this group mirrors another existing engine group */
+ mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
+ if (mirrored_eng_grp) {
+ /* Setup mirroring */
+ setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
+
+ /*
+ * Update count of requested engines because some
+ * of them might be shared with mirrored group
+ */
+ update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
+ }
+
+ /* Reserve engines */
+ ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
+ if (ret)
+ goto err_ucode_unload;
+
+ /* Update ucode pointers used by engines */
+ update_ucode_ptrs(eng_grp);
+
+ /* Update engine masks used by this group */
+ ret = eng_grp_update_masks(dev, eng_grp);
+ if (ret)
+ goto err_release_engs;
+
+ /* Create sysfs entry for engine group info */
+ ret = create_sysfs_eng_grps_info(dev, eng_grp);
+ if (ret)
+ goto err_release_engs;
+
+ /* Enable engine group */
+ ret = enable_eng_grp(eng_grp, eng_grps->obj);
+ if (ret)
+ goto err_release_engs;
+
+ /*
+ * If this engine group mirrors another engine group
+ * then we need to unload ucode as we will use ucode
+ * from mirrored engine group
+ */
+ if (eng_grp->mirror.is_ena)
+ ucode_unload(dev, &eng_grp->ucode[0]);
+
+ eng_grp->is_enabled = true;
+ if (eng_grp->mirror.is_ena)
+ dev_info(dev,
+ "Engine_group%d: reuse microcode %s from group %d",
+ eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
+ mirrored_eng_grp->idx);
+ else
+ dev_info(dev, "Engine_group%d: microcode loaded %s",
+ eng_grp->idx, eng_grp->ucode[0].ver_str);
+
+ return 0;
+
+err_release_engs:
+ release_engines(dev, eng_grp);
+err_ucode_unload:
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ return ret;
+}
+
+static ssize_t ucode_load_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
+ char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
+ char *start, *val, *err_msg, *tmp;
+ struct otx_cpt_eng_grps *eng_grps;
+ int grp_idx = 0, ret = -EINVAL;
+ bool has_se, has_ie, has_ae;
+ int del_grp_idx = -1;
+ int ucode_idx = 0;
+
+ if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
+ return -EINVAL;
+
+ eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
+ err_msg = "Invalid engine group format";
+ strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
+ start = tmp_buf;
+
+ has_se = has_ie = has_ae = false;
+
+ for (;;) {
+ val = strsep(&start, ";");
+ if (!val)
+ break;
+ val = strim(val);
+ if (!*val)
+ continue;
+
+ if (!strncasecmp(val, "engine_group", 12)) {
+ if (del_grp_idx != -1)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 13)
+ goto err_print;
+ if (kstrtoint((tmp + 12), 10, &del_grp_idx))
+ goto err_print;
+ val = strim(val);
+ if (strncasecmp(val, "null", 4))
+ goto err_print;
+ if (strlen(val) != 4)
+ goto err_print;
+ } else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
+ if (has_se || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX_CPT_SE_TYPES;
+ has_se = true;
+ } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
+ if (has_ae || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX_CPT_AE_TYPES;
+ has_ae = true;
+ } else {
+ if (ucode_idx > 1)
+ goto err_print;
+ if (!strlen(val))
+ goto err_print;
+ if (strnstr(val, " ", strlen(val)))
+ goto err_print;
+ ucode_filename[ucode_idx++] = val;
+ }
+ }
+
+ /* Validate input parameters */
+ if (del_grp_idx == -1) {
+ if (!(grp_idx && ucode_idx))
+ goto err_print;
+
+ if (ucode_idx > 1 && grp_idx < 2)
+ goto err_print;
+
+ if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
+ err_msg = "Error max 2 engine types can be attached";
+ goto err_print;
+ }
+
+ } else {
+ if (del_grp_idx < 0 ||
+ del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Invalid engine group index %d",
+ del_grp_idx);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if (!eng_grps->grp[del_grp_idx].is_enabled) {
+ dev_err(dev, "Error engine_group%d is not configured",
+ del_grp_idx);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if (grp_idx || ucode_idx)
+ goto err_print;
+ }
+
+ mutex_lock(&eng_grps->lock);
+
+ if (eng_grps->is_rdonly) {
+ dev_err(dev, "Disable VFs before modifying engine groups\n");
+ ret = -EACCES;
+ goto err_unlock;
+ }
+
+ if (del_grp_idx == -1)
+ /* create engine group */
+ ret = create_engine_group(dev, eng_grps, engs, grp_idx,
+ (void **) ucode_filename,
+ ucode_idx, false);
+ else
+ /* delete engine group */
+ ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
+ if (ret)
+ goto err_unlock;
+
+ print_dbg_info(dev, eng_grps);
+err_unlock:
+ mutex_unlock(&eng_grps->lock);
+ return ret ? ret : count;
+err_print:
+ dev_err(dev, "%s\n", err_msg);
+
+ return ret;
+}
+
+int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps,
+ int pf_type)
+{
+ struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = { 0 };
+ struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct tar_arch_info_t *tar_arch = NULL;
+ char *tar_filename;
+ int i, ret = 0;
+
+ mutex_lock(&eng_grps->lock);
+
+ /*
+ * We don't create engine group for kernel crypto if attempt to create
+ * it was already made (when user enabled VFs for the first time)
+ */
+ if (eng_grps->is_first_try)
+ goto unlock_mutex;
+ eng_grps->is_first_try = true;
+
+ /* We create group for kcrypto only if no groups are configured */
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
+ if (eng_grps->grp[i].is_enabled)
+ goto unlock_mutex;
+
+ switch (pf_type) {
+ case OTX_CPT_AE:
+ case OTX_CPT_SE:
+ tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ tar_arch = load_tar_archive(&pdev->dev, tar_filename);
+ if (!tar_arch)
+ goto unlock_mutex;
+
+ /*
+ * If device supports SE engines and there is SE microcode in tar
+ * archive try to create engine group with SE engines for kernel
+ * crypto functionality (symmetric crypto)
+ */
+ tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
+ if (tar_info[0] &&
+ dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
+
+ engs[0].type = OTX_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) tar_info, 1, true);
+ if (ret)
+ goto release_tar_arch;
+ }
+ /*
+ * If device supports AE engines and there is AE microcode in tar
+ * archive try to create engine group with AE engines for asymmetric
+ * crypto functionality.
+ */
+ tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
+ if (tar_info[0] &&
+ dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
+
+ engs[0].type = OTX_CPT_AE_TYPES;
+ engs[0].count = eng_grps->avail.max_ae_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) tar_info, 1, true);
+ if (ret)
+ goto release_tar_arch;
+ }
+
+ print_dbg_info(&pdev->dev, eng_grps);
+release_tar_arch:
+ release_tar_archive(tar_arch);
+unlock_mutex:
+ mutex_unlock(&eng_grps->lock);
+ return ret;
+}
+
+void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
+ bool is_rdonly)
+{
+ mutex_lock(&eng_grps->lock);
+
+ eng_grps->is_rdonly = is_rdonly;
+
+ mutex_unlock(&eng_grps->lock);
+}
+
+void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
+{
+ int grp, timeout = 100;
+ u64 reg;
+
+ /* Disengage the cores from groups */
+ for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
+ writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
+ udelay(CSR_DELAY);
+ }
+
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
+ while (reg) {
+ udelay(CSR_DELAY);
+ reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
+ if (timeout--) {
+ dev_warn(&cpt->pdev->dev, "Cores still busy");
+ break;
+ }
+ }
+
+ /* Disable the cores */
+ writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
+}
+
+void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps)
+{
+ struct otx_cpt_eng_grp_info *grp;
+ int i, j;
+
+ mutex_lock(&eng_grps->lock);
+ if (eng_grps->is_ucode_load_created) {
+ device_remove_file(&pdev->dev,
+ &eng_grps->ucode_load_attr);
+ eng_grps->is_ucode_load_created = false;
+ }
+
+ /* First delete all mirroring engine groups */
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
+ if (eng_grps->grp[i].mirror.is_ena)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+
+ /* Delete remaining engine groups */
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+
+ /* Release memory */
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
+ kfree(grp->engs[j].bmap);
+ grp->engs[j].bmap = NULL;
+ }
+ }
+
+ mutex_unlock(&eng_grps->lock);
+}
+
+int otx_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps, int pf_type)
+{
+ struct otx_cpt_eng_grp_info *grp;
+ int i, j, ret = 0;
+
+ mutex_init(&eng_grps->lock);
+ eng_grps->obj = pci_get_drvdata(pdev);
+ eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
+ eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
+
+ eng_grps->engs_num = eng_grps->avail.max_se_cnt +
+ eng_grps->avail.max_ae_cnt;
+ if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
+ dev_err(&pdev->dev,
+ "Number of engines %d > than max supported %d",
+ eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ grp->g = eng_grps;
+ grp->idx = i;
+
+ snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
+ "engine_group%d", i);
+ for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
+ grp->engs[j].bmap =
+ kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
+ sizeof(long), GFP_KERNEL);
+ if (!grp->engs[j].bmap) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+ }
+
+ switch (pf_type) {
+ case OTX_CPT_SE:
+ /* OcteonTX 83XX SE CPT PF has only SE engines attached */
+ eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
+ break;
+
+ case OTX_CPT_AE:
+ /* OcteonTX 83XX AE CPT PF has only AE engines attached */
+ eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ eng_grps->ucode_load_attr.show = NULL;
+ eng_grps->ucode_load_attr.store = ucode_load_store;
+ eng_grps->ucode_load_attr.attr.name = "ucode_load";
+ eng_grps->ucode_load_attr.attr.mode = 0220;
+ sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
+ ret = device_create_file(&pdev->dev,
+ &eng_grps->ucode_load_attr);
+ if (ret)
+ goto err;
+ eng_grps->is_ucode_load_created = true;
+
+ print_dbg_info(&pdev->dev, eng_grps);
+ return ret;
+err:
+ otx_cpt_cleanup_eng_grps(pdev, eng_grps);
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
new file mode 100644
index 000000000000..14f02b60d0c2
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPTPF_UCODE_H
+#define __OTX_CPTPF_UCODE_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include "otx_cpt_hw_types.h"
+
+/* CPT ucode name maximum length */
+#define OTX_CPT_UCODE_NAME_LENGTH 64
+/*
+ * On OcteonTX 83xx platform, only one type of engines is allowed to be
+ * attached to an engine group.
+ */
+#define OTX_CPT_MAX_ETYPES_PER_GRP 1
+
+/* Default tar archive file names */
+#define OTX_CPT_UCODE_TAR_FILE_NAME "cpt8x-mc.tar"
+
+/* CPT ucode alignment */
+#define OTX_CPT_UCODE_ALIGNMENT 128
+
+/* CPT ucode signature size */
+#define OTX_CPT_UCODE_SIGN_LEN 256
+
+/* Microcode version string length */
+#define OTX_CPT_UCODE_VER_STR_SZ 44
+
+/* Maximum number of supported engines/cores on OcteonTX 83XX platform */
+#define OTX_CPT_MAX_ENGINES 64
+
+#define OTX_CPT_ENGS_BITMASK_LEN (OTX_CPT_MAX_ENGINES/(BITS_PER_BYTE * \
+ sizeof(unsigned long)))
+
+/* Microcode types */
+enum otx_cpt_ucode_type {
+ OTX_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
+ OTX_CPT_SE_UC_TYPE1 = 20, /* SE-MAIN - combination of 21 and 22 */
+ OTX_CPT_SE_UC_TYPE2 = 21, /* Fast Path IPSec + AirCrypto */
+ OTX_CPT_SE_UC_TYPE3 = 22, /*
+ * Hash + HMAC + FlexiCrypto + RNG + Full
+ * Feature IPSec + AirCrypto + Kasumi
+ */
+};
+
+struct otx_cpt_bitmap {
+ unsigned long bits[OTX_CPT_ENGS_BITMASK_LEN];
+ int size;
+};
+
+struct otx_cpt_engines {
+ int type;
+ int count;
+};
+
+/* Microcode version number */
+struct otx_cpt_ucode_ver_num {
+ u8 nn;
+ u8 xx;
+ u8 yy;
+ u8 zz;
+};
+
+struct otx_cpt_ucode_hdr {
+ struct otx_cpt_ucode_ver_num ver_num;
+ u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ];
+ u32 code_length;
+ u32 padding[3];
+};
+
+struct otx_cpt_ucode {
+ u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ];/*
+ * ucode version in readable format
+ */
+ struct otx_cpt_ucode_ver_num ver_num;/* ucode version number */
+ char filename[OTX_CPT_UCODE_NAME_LENGTH]; /* ucode filename */
+ dma_addr_t dma; /* phys address of ucode image */
+ dma_addr_t align_dma; /* aligned phys address of ucode image */
+ void *va; /* virt address of ucode image */
+ void *align_va; /* aligned virt address of ucode image */
+ u32 size; /* ucode image size */
+ int type; /* ucode image type SE or AE */
+};
+
+struct tar_ucode_info_t {
+ struct list_head list;
+ struct otx_cpt_ucode ucode;/* microcode information */
+ const u8 *ucode_ptr; /* pointer to microcode in tar archive */
+};
+
+/* Maximum and current number of engines available for all engine groups */
+struct otx_cpt_engs_available {
+ int max_se_cnt;
+ int max_ae_cnt;
+ int se_cnt;
+ int ae_cnt;
+};
+
+/* Engines reserved to an engine group */
+struct otx_cpt_engs_rsvd {
+ int type; /* engine type */
+ int count; /* number of engines attached */
+ int offset; /* constant offset of engine type in the bitmap */
+ unsigned long *bmap; /* attached engines bitmap */
+ struct otx_cpt_ucode *ucode; /* ucode used by these engines */
+};
+
+struct otx_cpt_mirror_info {
+ int is_ena; /*
+ * is mirroring enabled, it is set only for engine
+ * group which mirrors another engine group
+ */
+ int idx; /*
+ * index of engine group which is mirrored by this
+ * group, set only for engine group which mirrors
+ * another group
+ */
+ int ref_count; /*
+ * number of times this engine group is mirrored by
+ * other groups, this is set only for engine group
+ * which is mirrored by other group(s)
+ */
+};
+
+struct otx_cpt_eng_grp_info {
+ struct otx_cpt_eng_grps *g; /* pointer to engine_groups structure */
+ struct device_attribute info_attr; /* group info entry attr */
+ /* engines attached */
+ struct otx_cpt_engs_rsvd engs[OTX_CPT_MAX_ETYPES_PER_GRP];
+ /* Microcode information */
+ struct otx_cpt_ucode ucode[OTX_CPT_MAX_ETYPES_PER_GRP];
+ /* sysfs info entry name */
+ char sysfs_info_name[OTX_CPT_UCODE_NAME_LENGTH];
+ /* engine group mirroring information */
+ struct otx_cpt_mirror_info mirror;
+ int idx; /* engine group index */
+ bool is_enabled; /*
+ * is engine group enabled, engine group is enabled
+ * when it has engines attached and ucode loaded
+ */
+};
+
+struct otx_cpt_eng_grps {
+ struct otx_cpt_eng_grp_info grp[OTX_CPT_MAX_ENGINE_GROUPS];
+ struct device_attribute ucode_load_attr;/* ucode load attr */
+ struct otx_cpt_engs_available avail;
+ struct mutex lock;
+ void *obj;
+ int engs_num; /* total number of engines supported */
+ int eng_types_supported; /* engine types supported SE, AE */
+ u8 eng_ref_cnt[OTX_CPT_MAX_ENGINES];/* engines reference count */
+ bool is_ucode_load_created; /* is ucode_load sysfs entry created */
+ bool is_first_try; /* is this first try to create kcrypto engine grp */
+ bool is_rdonly; /* do engine groups configuration can be modified */
+};
+
+int otx_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps, int pf_type);
+void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps);
+int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
+ struct otx_cpt_eng_grps *eng_grps,
+ int pf_type);
+void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
+ bool is_rdonly);
+int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type);
+int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
+ int eng_type);
+
+#endif /* __OTX_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf.h b/drivers/crypto/marvell/octeontx/otx_cptvf.h
new file mode 100644
index 000000000000..dd02f21659af
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPTVF_H
+#define __OTX_CPTVF_H
+
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include "otx_cpt_common.h"
+#include "otx_cptvf_reqmgr.h"
+
+/* Flags to indicate the features supported */
+#define OTX_CPT_FLAG_DEVICE_READY BIT(1)
+#define otx_cpt_device_ready(cpt) ((cpt)->flags & OTX_CPT_FLAG_DEVICE_READY)
+/* Default command queue length */
+#define OTX_CPT_CMD_QLEN (4*2046)
+#define OTX_CPT_CMD_QCHUNK_SIZE 1023
+#define OTX_CPT_NUM_QS_PER_VF 1
+
+struct otx_cpt_cmd_chunk {
+ u8 *head;
+ dma_addr_t dma_addr;
+ u32 size; /* Chunk size, max OTX_CPT_INST_CHUNK_MAX_SIZE */
+ struct list_head nextchunk;
+};
+
+struct otx_cpt_cmd_queue {
+ u32 idx; /* Command queue host write idx */
+ u32 num_chunks; /* Number of command chunks */
+ struct otx_cpt_cmd_chunk *qhead;/*
+ * Command queue head, instructions
+ * are inserted here
+ */
+ struct otx_cpt_cmd_chunk *base;
+ struct list_head chead;
+};
+
+struct otx_cpt_cmd_qinfo {
+ u32 qchunksize; /* Command queue chunk size */
+ struct otx_cpt_cmd_queue queue[OTX_CPT_NUM_QS_PER_VF];
+};
+
+struct otx_cpt_pending_qinfo {
+ u32 num_queues; /* Number of queues supported */
+ struct otx_cpt_pending_queue queue[OTX_CPT_NUM_QS_PER_VF];
+};
+
+#define for_each_pending_queue(qinfo, q, i) \
+ for (i = 0, q = &qinfo->queue[i]; i < qinfo->num_queues; i++, \
+ q = &qinfo->queue[i])
+
+struct otx_cptvf_wqe {
+ struct tasklet_struct twork;
+ struct otx_cptvf *cptvf;
+};
+
+struct otx_cptvf_wqe_info {
+ struct otx_cptvf_wqe vq_wqe[OTX_CPT_NUM_QS_PER_VF];
+};
+
+struct otx_cptvf {
+ u16 flags; /* Flags to hold device status bits */
+ u8 vfid; /* Device Index 0...OTX_CPT_MAX_VF_NUM */
+ u8 num_vfs; /* Number of enabled VFs */
+ u8 vftype; /* VF type of SE_TYPE(2) or AE_TYPE(1) */
+ u8 vfgrp; /* VF group (0 - 8) */
+ u8 node; /* Operating node: Bits (46:44) in BAR0 address */
+ u8 priority; /*
+ * VF priority ring: 1-High proirity round
+ * robin ring;0-Low priority round robin ring;
+ */
+ struct pci_dev *pdev; /* Pci device handle */
+ void __iomem *reg_base; /* Register start address */
+ void *wqe_info; /* BH worker info */
+ /* MSI-X */
+ cpumask_var_t affinity_mask[OTX_CPT_VF_MSIX_VECTORS];
+ /* Command and Pending queues */
+ u32 qsize;
+ u32 num_queues;
+ struct otx_cpt_cmd_qinfo cqinfo; /* Command queue information */
+ struct otx_cpt_pending_qinfo pqinfo; /* Pending queue information */
+ /* VF-PF mailbox communication */
+ bool pf_acked;
+ bool pf_nacked;
+};
+
+int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf);
+int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf);
+int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group);
+int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf);
+int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf);
+int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf);
+void otx_cptvf_handle_mbox_intr(struct otx_cptvf *cptvf);
+void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val);
+
+#endif /* __OTX_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
new file mode 100644
index 000000000000..946fb62949b2
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -0,0 +1,1744 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/cryptd.h>
+#include <crypto/des.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
+#include <linux/rtnetlink.h>
+#include <linux/sort.h>
+#include <linux/module.h>
+#include "otx_cptvf.h"
+#include "otx_cptvf_algs.h"
+#include "otx_cptvf_reqmgr.h"
+
+#define CPT_MAX_VF_NUM 64
+/* Size of salt in AES GCM mode */
+#define AES_GCM_SALT_SIZE 4
+/* Size of IV in AES GCM mode */
+#define AES_GCM_IV_SIZE 8
+/* Size of ICV (Integrity Check Value) in AES GCM mode */
+#define AES_GCM_ICV_SIZE 16
+/* Offset of IV in AES GCM mode */
+#define AES_GCM_IV_OFFSET 8
+#define CONTROL_WORD_LEN 8
+#define KEY2_OFFSET 48
+#define DMA_MODE_FLAG(dma_mode) \
+ (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
+
+/* Truncated SHA digest size */
+#define SHA1_TRUNC_DIGEST_SIZE 12
+#define SHA256_TRUNC_DIGEST_SIZE 16
+#define SHA384_TRUNC_DIGEST_SIZE 24
+#define SHA512_TRUNC_DIGEST_SIZE 32
+
+static DEFINE_MUTEX(mutex);
+static int is_crypto_registered;
+
+struct cpt_device_desc {
+ enum otx_cptpf_type pf_type;
+ struct pci_dev *dev;
+ int num_queues;
+};
+
+struct cpt_device_table {
+ atomic_t count;
+ struct cpt_device_desc desc[CPT_MAX_VF_NUM];
+};
+
+static struct cpt_device_table se_devices = {
+ .count = ATOMIC_INIT(0)
+};
+
+static struct cpt_device_table ae_devices = {
+ .count = ATOMIC_INIT(0)
+};
+
+static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
+{
+ int count, ret = 0;
+
+ count = atomic_read(&se_devices.count);
+ if (count < 1)
+ return -ENODEV;
+
+ *cpu_num = get_cpu();
+
+ if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
+ /*
+ * On OcteonTX platform there is one CPT instruction queue bound
+ * to each VF. We get maximum performance if one CPT queue
+ * is available for each cpu otherwise CPT queues need to be
+ * shared between cpus.
+ */
+ if (*cpu_num >= count)
+ *cpu_num %= count;
+ *pdev = se_devices.desc[*cpu_num].dev;
+ } else {
+ pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
+ ret = -EINVAL;
+ }
+ put_cpu();
+
+ return ret;
+}
+
+static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
+{
+ struct otx_cpt_req_ctx *rctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+
+ req = container_of(cpt_req->areq, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ rctx = aead_request_ctx(req);
+ if (memcmp(rctx->fctx.hmac.s.hmac_calc,
+ rctx->fctx.hmac.s.hmac_recv,
+ crypto_aead_authsize(tfm)) != 0)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
+{
+ struct otx_cpt_info_buffer *cpt_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct otx_cpt_req_info *cpt_req;
+ struct pci_dev *pdev;
+
+ cpt_req = cpt_info->req;
+ if (!status) {
+ /*
+ * When selected cipher is NULL we need to manually
+ * verify whether calculated hmac value matches
+ * received hmac value
+ */
+ if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
+ !cpt_req->is_enc)
+ status = validate_hmac_cipher_null(cpt_req);
+ }
+ if (cpt_info) {
+ pdev = cpt_info->pdev;
+ do_request_cleanup(pdev, cpt_info);
+ }
+ if (areq)
+ areq->complete(areq, status);
+}
+
+static void output_iv_copyback(struct crypto_async_request *areq)
+{
+ struct otx_cpt_req_info *req_info;
+ struct skcipher_request *sreq;
+ struct crypto_skcipher *stfm;
+ struct otx_cpt_req_ctx *rctx;
+ struct otx_cpt_enc_ctx *ctx;
+ u32 start, ivsize;
+
+ sreq = container_of(areq, struct skcipher_request, base);
+ stfm = crypto_skcipher_reqtfm(sreq);
+ ctx = crypto_skcipher_ctx(stfm);
+ if (ctx->cipher_type == OTX_CPT_AES_CBC ||
+ ctx->cipher_type == OTX_CPT_DES3_CBC) {
+ rctx = skcipher_request_ctx(sreq);
+ req_info = &rctx->cpt_req;
+ ivsize = crypto_skcipher_ivsize(stfm);
+ start = sreq->cryptlen - ivsize;
+
+ if (req_info->is_enc) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
+ ivsize, 0);
+ } else {
+ if (sreq->src != sreq->dst) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->src,
+ start, ivsize, 0);
+ } else {
+ memcpy(sreq->iv, req_info->iv_out, ivsize);
+ kfree(req_info->iv_out);
+ }
+ }
+ }
+}
+
+static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
+{
+ struct otx_cpt_info_buffer *cpt_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct pci_dev *pdev;
+
+ if (areq) {
+ if (!status)
+ output_iv_copyback(areq);
+ if (cpt_info) {
+ pdev = cpt_info->pdev;
+ do_request_cleanup(pdev, cpt_info);
+ }
+ areq->complete(areq, status);
+ }
+}
+
+static inline void update_input_data(struct otx_cpt_req_info *req_info,
+ struct scatterlist *inp_sg,
+ u32 nbytes, u32 *argcnt)
+{
+ req_info->req.dlen += nbytes;
+
+ while (nbytes) {
+ u32 len = min(nbytes, inp_sg->length);
+ u8 *ptr = sg_virt(inp_sg);
+
+ req_info->in[*argcnt].vptr = (void *)ptr;
+ req_info->in[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ inp_sg = sg_next(inp_sg);
+ }
+}
+
+static inline void update_output_data(struct otx_cpt_req_info *req_info,
+ struct scatterlist *outp_sg,
+ u32 offset, u32 nbytes, u32 *argcnt)
+{
+ req_info->rlen += nbytes;
+
+ while (nbytes) {
+ u32 len = min(nbytes, outp_sg->length - offset);
+ u8 *ptr = sg_virt(outp_sg);
+
+ req_info->out[*argcnt].vptr = (void *) (ptr + offset);
+ req_info->out[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ offset = 0;
+ outp_sg = sg_next(outp_sg);
+ }
+}
+
+static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
+ int ivsize = crypto_skcipher_ivsize(stfm);
+ u32 start = req->cryptlen - ivsize;
+ u64 *ctrl_flags = NULL;
+ gfp_t flags;
+
+ flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
+ req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
+
+ req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
+ ctx->cipher_type == OTX_CPT_DES3_CBC) &&
+ req->src == req->dst) {
+ req_info->iv_out = kmalloc(ivsize, flags);
+ if (!req_info->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_info->iv_out, req->src,
+ start, ivsize, 0);
+ }
+ }
+ /* Encryption data length */
+ req_info->req.param1 = req->cryptlen;
+ /* Authentication data length */
+ req_info->req.param2 = 0;
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
+
+ if (ctx->cipher_type == OTX_CPT_AES_XTS)
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
+ else
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
+
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
+
+ ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
+ *ctrl_flags = cpu_to_be64(*ctrl_flags);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
+
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
+ u32 enc_iv_len)
+{
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+ int ret;
+
+ ret = create_ctx_hdr(req, enc, &argcnt);
+ if (ret)
+ return ret;
+
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
+ req_info->incnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_output_list(struct skcipher_request *req,
+ u32 enc_iv_len)
+{
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+
+ /*
+ * OUTPUT Buffer Processing
+ * AES encryption/decryption output would be
+ * received in the following format
+ *
+ * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
+ * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
+ */
+ update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
+ req_info->outcnt = argcnt;
+}
+
+static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ /* Validate that request doesn't exceed maximum CPT supported size */
+ if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
+ return -E2BIG;
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.flags = 0;
+
+ status = create_input_list(req, enc, enc_iv_len);
+ if (status)
+ return status;
+ create_output_list(req, enc_iv_len);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->callback = (void *)otx_cpt_skcipher_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = OTX_CPT_ENC_DEC_REQ;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+ req_info->ctrl.s.grp = 0;
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ status = otx_cpt_do_request(pdev, req_info, cpu_num);
+
+ return status;
+}
+
+static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, true);
+}
+
+static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, false);
+}
+
+static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const u8 *key2 = key + (keylen / 2);
+ const u8 *key1 = key;
+ int ret;
+
+ ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ if (ret)
+ return ret;
+ ctx->key_len = keylen;
+ memcpy(ctx->enc_key, key1, keylen / 2);
+ memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
+ ctx->cipher_type = OTX_CPT_AES_XTS;
+ switch (ctx->key_len) {
+ case 2 * AES_KEYSIZE_128:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ break;
+ case 2 * AES_KEYSIZE_256:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return 0;
+}
+
+static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return 0;
+}
+
+static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
+}
+
+static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
+}
+
+static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
+}
+
+static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
+}
+
+static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
+}
+
+static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
+{
+ struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+ /*
+ * Additional memory for skcipher_request is
+ * allocated since the cryptd daemon uses
+ * this memory for request_ctx information
+ */
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) +
+ sizeof(struct skcipher_request));
+
+ return 0;
+}
+
+static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->cipher_type = cipher_type;
+ ctx->mac_type = mac_type;
+
+ /*
+ * When selected cipher is NULL we use HMAC opcode instead of
+ * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
+ * for calculating ipad and opad
+ */
+ if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
+ switch (ctx->mac_type) {
+ case OTX_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+ }
+ }
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx));
+
+ return 0;
+}
+
+static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
+}
+
+static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
+}
+
+static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
+}
+
+static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
+}
+
+static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
+}
+
+static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
+}
+
+static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
+}
+
+static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
+}
+
+static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
+}
+
+static void otx_cpt_aead_exit(struct crypto_aead *tfm)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ kfree(ctx->ipad);
+ kfree(ctx->opad);
+ if (ctx->hashalg)
+ crypto_free_shash(ctx->hashalg);
+ kfree(ctx->sdesc);
+}
+
+/*
+ * This is the Integrity Check Value validation (aka the authentication tag
+ * length)
+ */
+static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (ctx->mac_type) {
+ case OTX_CPT_SHA1:
+ if (authsize != SHA1_DIGEST_SIZE &&
+ authsize != SHA1_TRUNC_DIGEST_SIZE)
+ return -EINVAL;
+
+ if (authsize == SHA1_TRUNC_DIGEST_SIZE)
+ ctx->is_trunc_hmac = true;
+ break;
+
+ case OTX_CPT_SHA256:
+ if (authsize != SHA256_DIGEST_SIZE &&
+ authsize != SHA256_TRUNC_DIGEST_SIZE)
+ return -EINVAL;
+
+ if (authsize == SHA256_TRUNC_DIGEST_SIZE)
+ ctx->is_trunc_hmac = true;
+ break;
+
+ case OTX_CPT_SHA384:
+ if (authsize != SHA384_DIGEST_SIZE &&
+ authsize != SHA384_TRUNC_DIGEST_SIZE)
+ return -EINVAL;
+
+ if (authsize == SHA384_TRUNC_DIGEST_SIZE)
+ ctx->is_trunc_hmac = true;
+ break;
+
+ case OTX_CPT_SHA512:
+ if (authsize != SHA512_DIGEST_SIZE &&
+ authsize != SHA512_TRUNC_DIGEST_SIZE)
+ return -EINVAL;
+
+ if (authsize == SHA512_TRUNC_DIGEST_SIZE)
+ ctx->is_trunc_hmac = true;
+ break;
+
+ case OTX_CPT_MAC_NULL:
+ if (ctx->cipher_type == OTX_CPT_AES_GCM) {
+ if (authsize != AES_GCM_ICV_SIZE)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tfm->authsize = authsize;
+ return 0;
+}
+
+static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+ struct otx_cpt_sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return NULL;
+
+ sdesc->shash.tfm = alg;
+
+ return sdesc;
+}
+
+static inline void swap_data32(void *buf, u32 len)
+{
+ u32 *store = (u32 *) buf;
+ int i = 0;
+
+ for (i = 0 ; i < len/sizeof(u32); i++, store++)
+ *store = cpu_to_be32(*store);
+}
+
+static inline void swap_data64(void *buf, u32 len)
+{
+ u64 *store = (u64 *) buf;
+ int i = 0;
+
+ for (i = 0 ; i < len/sizeof(u64); i++, store++)
+ *store = cpu_to_be64(*store);
+}
+
+static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+{
+ struct sha512_state *sha512;
+ struct sha256_state *sha256;
+ struct sha1_state *sha1;
+
+ switch (mac_type) {
+ case OTX_CPT_SHA1:
+ sha1 = (struct sha1_state *) in_pad;
+ swap_data32(sha1->state, SHA1_DIGEST_SIZE);
+ memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
+ break;
+
+ case OTX_CPT_SHA256:
+ sha256 = (struct sha256_state *) in_pad;
+ swap_data32(sha256->state, SHA256_DIGEST_SIZE);
+ memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
+ break;
+
+ case OTX_CPT_SHA384:
+ case OTX_CPT_SHA512:
+ sha512 = (struct sha512_state *) in_pad;
+ swap_data64(sha512->state, SHA512_DIGEST_SIZE);
+ memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aead_hmac_init(struct crypto_aead *cipher)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ int state_size = crypto_shash_statesize(ctx->hashalg);
+ int ds = crypto_shash_digestsize(ctx->hashalg);
+ int bs = crypto_shash_blocksize(ctx->hashalg);
+ int authkeylen = ctx->auth_key_len;
+ u8 *ipad = NULL, *opad = NULL;
+ int ret = 0, icount = 0;
+
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc)
+ return -ENOMEM;
+
+ ctx->ipad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ctx->opad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ipad = kzalloc(state_size, GFP_KERNEL);
+ if (!ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ opad = kzalloc(state_size, GFP_KERNEL);
+ if (!opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
+ authkeylen, ipad);
+ if (ret)
+ goto calc_fail;
+
+ authkeylen = ds;
+ } else {
+ memcpy(ipad, ctx->key, authkeylen);
+ }
+
+ memset(ipad + authkeylen, 0, bs - authkeylen);
+ memcpy(opad, ipad, bs);
+
+ for (icount = 0; icount < bs; icount++) {
+ ipad[icount] ^= 0x36;
+ opad[icount] ^= 0x5c;
+ }
+
+ /*
+ * Partial Hash calculated from the software
+ * algorithm is retrieved for IPAD & OPAD
+ */
+
+ /* IPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, ipad);
+ ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ if (ret)
+ goto calc_fail;
+
+ /* OPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, opad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, opad);
+ ret = copy_pad(ctx->mac_type, ctx->opad, opad);
+ if (ret)
+ goto calc_fail;
+
+ kfree(ipad);
+ kfree(opad);
+
+ return 0;
+
+calc_fail:
+ kfree(ctx->ipad);
+ ctx->ipad = NULL;
+ kfree(ctx->opad);
+ ctx->opad = NULL;
+ kfree(ipad);
+ kfree(opad);
+ kfree(ctx->sdesc);
+ ctx->sdesc = NULL;
+
+ return ret;
+}
+
+static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ int enckeylen = 0, authkeylen = 0;
+ struct rtattr *rta = (void *)key;
+ int status = -EINVAL;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < enckeylen)
+ goto badkey;
+
+ if (keylen > OTX_CPT_MAX_KEY_SIZE)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+ memcpy(ctx->key, key, keylen);
+
+ switch (enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ goto badkey;
+ }
+
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = authkeylen;
+
+ status = aead_hmac_init(cipher);
+ if (status)
+ goto badkey;
+
+ return 0;
+badkey:
+ return status;
+}
+
+static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta = (void *)key;
+ int enckeylen = 0;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (enckeylen != 0)
+ goto badkey;
+
+ if (keylen > OTX_CPT_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = keylen;
+ return 0;
+badkey:
+ return -EINVAL;
+}
+
+static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+
+ /*
+ * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
+ * and salt (4 bytes)
+ */
+ switch (keylen) {
+ case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX_CPT_AES_128_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_128;
+ break;
+ case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX_CPT_AES_192_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_192;
+ break;
+ case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX_CPT_AES_256_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_256;
+ break;
+ default:
+ /* Invalid key and salt length */
+ return -EINVAL;
+ }
+
+ /* Store encryption key and salt */
+ memcpy(ctx->key, key, keylen);
+
+ return 0;
+}
+
+static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
+ int mac_len = crypto_aead_authsize(tfm);
+ int ds;
+
+ rctx->ctrl_word.e.enc_data_offset = req->assoclen;
+
+ switch (ctx->cipher_type) {
+ case OTX_CPT_AES_CBC:
+ fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
+ ctx->enc_key_len);
+ /* Copy IV to context */
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
+
+ ds = crypto_shash_digestsize(ctx->hashalg);
+ if (ctx->mac_type == OTX_CPT_SHA384)
+ ds = SHA512_DIGEST_SIZE;
+ if (ctx->ipad)
+ memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
+ if (ctx->opad)
+ memcpy(fctx->hmac.e.opad, ctx->opad, ds);
+ break;
+
+ case OTX_CPT_AES_GCM:
+ fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
+ /* Copy salt to context */
+ memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
+ AES_GCM_SALT_SIZE);
+
+ rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
+ break;
+
+ default:
+ /* Unknown cipher type */
+ return -EINVAL;
+ }
+ rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags);
+
+ req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
+ req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
+ req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ req_info->req.param1 = req->cryptlen;
+ req_info->req.param2 = req->cryptlen + req->assoclen;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ req_info->req.param1 = req->cryptlen - mac_len;
+ req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
+ }
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
+ fctx->enc.enc_ctrl.e.mac_len = mac_len;
+ fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
+ u32 enc)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+
+ req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
+ req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
+ req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
+ DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
+ req_info->is_trunc_hmac = ctx->is_trunc_hmac;
+
+ req_info->req.opcode.s.minor = 0;
+ req_info->req.param1 = ctx->auth_key_len;
+ req_info->req.param2 = ctx->mac_type << 8;
+
+ /* Add authentication key */
+ req_info->in[*argcnt].vptr = ctx->key;
+ req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
+ req_info->req.dlen += round_up(ctx->auth_key_len, 8);
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen = req->cryptlen + req->assoclen;
+ u32 status, argcnt = 0;
+
+ status = create_aead_ctx_hdr(req, enc, &argcnt);
+ if (status)
+ return status;
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->incnt = argcnt;
+
+ return 0;
+}
+
+static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
+ u32 mac_len)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0, outputlen = 0;
+
+ if (enc)
+ outputlen = req->cryptlen + req->assoclen + mac_len;
+ else
+ outputlen = req->cryptlen + req->assoclen - mac_len;
+
+ update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
+ req_info->outcnt = argcnt;
+
+ return 0;
+}
+
+static inline u32 create_aead_null_input_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen, argcnt = 0;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ create_hmac_ctx_hdr(req, &argcnt, enc);
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->incnt = argcnt;
+
+ return 0;
+}
+
+static inline u32 create_aead_null_output_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ struct scatterlist *dst;
+ u8 *ptr = NULL;
+ int argcnt = 0, status, offset;
+ u32 inputlen;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ /*
+ * If source and destination are different
+ * then copy payload to destination
+ */
+ if (req->src != req->dst) {
+
+ ptr = kmalloc(inputlen, (req_info->areq->flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ptr) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error;
+ }
+ status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error;
+ }
+ kfree(ptr);
+ }
+
+ if (enc) {
+ /*
+ * In an encryption scenario hmac needs
+ * to be appended after payload
+ */
+ dst = req->dst;
+ offset = inputlen;
+ while (offset >= dst->length) {
+ offset -= dst->length;
+ dst = sg_next(dst);
+ if (!dst) {
+ status = -ENOENT;
+ goto error;
+ }
+ }
+
+ update_output_data(req_info, dst, offset, mac_len, &argcnt);
+ } else {
+ /*
+ * In a decryption scenario calculated hmac for received
+ * payload needs to be compare with hmac received
+ */
+ status = sg_copy_buffer(req->src, sg_nents(req->src),
+ rctx->fctx.hmac.s.hmac_recv, mac_len,
+ inputlen, true);
+ if (status != mac_len) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
+ req_info->out[argcnt].size = mac_len;
+ argcnt++;
+ }
+
+ req_info->outcnt = argcnt;
+ return 0;
+error:
+ kfree(ptr);
+ return status;
+}
+
+static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
+{
+ struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx_cpt_req_info *req_info = &rctx->cpt_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct pci_dev *pdev;
+ u32 status, cpu_num;
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.flags = 0;
+
+ req_info->callback = otx_cpt_aead_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = reg_type;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+
+ switch (reg_type) {
+ case OTX_CPT_AEAD_ENC_DEC_REQ:
+ status = create_aead_input_list(req, enc);
+ if (status)
+ return status;
+ status = create_aead_output_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ break;
+
+ case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
+ status = create_aead_null_input_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ status = create_aead_null_output_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate that request doesn't exceed maximum CPT supported size */
+ if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
+ req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
+ return -E2BIG;
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->ctrl.s.grp = 0;
+
+ status = otx_cpt_do_request(pdev, req_info, cpu_num);
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ return status;
+}
+
+static int otx_cpt_aead_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
+}
+
+static int otx_cpt_aead_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
+}
+
+static int otx_cpt_aead_null_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
+}
+
+static int otx_cpt_aead_null_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
+}
+
+static struct skcipher_alg otx_cpt_skciphers[] = { {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cpt_xts_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = otx_cpt_skcipher_xts_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cpt_cbc_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx_cpt_skcipher_cbc_aes_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cpt_ecb_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .ivsize = 0,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx_cpt_skcipher_ecb_aes_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "cpt_cfb_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx_cpt_skcipher_cfb_aes_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cpt_cbc_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = otx_cpt_skcipher_cbc_des3_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cpt_ecb_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx_cpt_enc_dec_init,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ .setkey = otx_cpt_skcipher_ecb_des3_setkey,
+ .encrypt = otx_cpt_skcipher_encrypt,
+ .decrypt = otx_cpt_skcipher_decrypt,
+} };
+
+static struct aead_alg otx_cpt_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_cbc_aes_sha1_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_cbc_aes_sha256_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_cbc_aes_sha384_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_cbc_aes_sha512_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha1_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_ecb_null_sha1_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_null_encrypt,
+ .decrypt = otx_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha256_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_ecb_null_sha256_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_null_encrypt,
+ .decrypt = otx_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha384_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_ecb_null_sha384_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_null_encrypt,
+ .decrypt = otx_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha512_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_ecb_null_sha512_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_null_encrypt,
+ .decrypt = otx_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "cpt_rfc4106_gcm_aes",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx_cpt_aead_gcm_aes_init,
+ .exit = otx_cpt_aead_exit,
+ .setkey = otx_cpt_aead_gcm_aes_setkey,
+ .setauthsize = otx_cpt_aead_set_authsize,
+ .encrypt = otx_cpt_aead_encrypt,
+ .decrypt = otx_cpt_aead_decrypt,
+ .ivsize = AES_GCM_IV_SIZE,
+ .maxauthsize = AES_GCM_ICV_SIZE,
+} };
+
+static inline int is_any_alg_used(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
+ if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
+ return true;
+ for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
+ if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
+ return true;
+ return false;
+}
+
+static inline int cpt_register_algs(void)
+{
+ int i, err = 0;
+
+ if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
+ for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
+ otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx_cpt_skciphers,
+ ARRAY_SIZE(otx_cpt_skciphers));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
+ otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
+ if (err) {
+ crypto_unregister_skciphers(otx_cpt_skciphers,
+ ARRAY_SIZE(otx_cpt_skciphers));
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void cpt_unregister_algs(void)
+{
+ crypto_unregister_skciphers(otx_cpt_skciphers,
+ ARRAY_SIZE(otx_cpt_skciphers));
+ crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
+}
+
+static int compare_func(const void *lptr, const void *rptr)
+{
+ struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
+ struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
+
+ if (ldesc->dev->devfn < rdesc->dev->devfn)
+ return -1;
+ if (ldesc->dev->devfn > rdesc->dev->devfn)
+ return 1;
+ return 0;
+}
+
+static void swap_func(void *lptr, void *rptr, int size)
+{
+ struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
+ struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
+ struct cpt_device_desc desc;
+
+ desc = *ldesc;
+ *ldesc = *rdesc;
+ *rdesc = desc;
+}
+
+int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ enum otx_cptpf_type pf_type,
+ enum otx_cptvf_type engine_type,
+ int num_queues, int num_devices)
+{
+ int ret = 0;
+ int count;
+
+ mutex_lock(&mutex);
+ switch (engine_type) {
+ case OTX_CPT_SE_TYPES:
+ count = atomic_read(&se_devices.count);
+ if (count >= CPT_MAX_VF_NUM) {
+ dev_err(&pdev->dev, "No space to add a new device");
+ ret = -ENOSPC;
+ goto err;
+ }
+ se_devices.desc[count].pf_type = pf_type;
+ se_devices.desc[count].num_queues = num_queues;
+ se_devices.desc[count++].dev = pdev;
+ atomic_inc(&se_devices.count);
+
+ if (atomic_read(&se_devices.count) == num_devices &&
+ is_crypto_registered == false) {
+ if (cpt_register_algs()) {
+ dev_err(&pdev->dev,
+ "Error in registering crypto algorithms\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ try_module_get(mod);
+ is_crypto_registered = true;
+ }
+ sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
+ compare_func, swap_func);
+ break;
+
+ case OTX_CPT_AE_TYPES:
+ count = atomic_read(&ae_devices.count);
+ if (count >= CPT_MAX_VF_NUM) {
+ dev_err(&pdev->dev, "No space to a add new device");
+ ret = -ENOSPC;
+ goto err;
+ }
+ ae_devices.desc[count].pf_type = pf_type;
+ ae_devices.desc[count].num_queues = num_queues;
+ ae_devices.desc[count++].dev = pdev;
+ atomic_inc(&ae_devices.count);
+ sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
+ compare_func, swap_func);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
+ ret = BAD_OTX_CPTVF_TYPE;
+ }
+err:
+ mutex_unlock(&mutex);
+ return ret;
+}
+
+void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
+ enum otx_cptvf_type engine_type)
+{
+ struct cpt_device_table *dev_tbl;
+ bool dev_found = false;
+ int i, j, count;
+
+ mutex_lock(&mutex);
+
+ dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
+ count = atomic_read(&dev_tbl->count);
+ for (i = 0; i < count; i++)
+ if (pdev == dev_tbl->desc[i].dev) {
+ for (j = i; j < count-1; j++)
+ dev_tbl->desc[j] = dev_tbl->desc[j+1];
+ dev_found = true;
+ break;
+ }
+
+ if (!dev_found) {
+ dev_err(&pdev->dev, "%s device not found", __func__);
+ goto exit;
+ }
+
+ if (engine_type != OTX_CPT_AE_TYPES) {
+ if (atomic_dec_and_test(&se_devices.count) &&
+ !is_any_alg_used()) {
+ cpt_unregister_algs();
+ module_put(mod);
+ is_crypto_registered = false;
+ }
+ } else
+ atomic_dec(&ae_devices.count);
+exit:
+ mutex_unlock(&mutex);
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
new file mode 100644
index 000000000000..67cc0025f5d5
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPT_ALGS_H
+#define __OTX_CPT_ALGS_H
+
+#include <crypto/hash.h>
+#include "otx_cpt_common.h"
+
+#define OTX_CPT_MAX_ENC_KEY_SIZE 32
+#define OTX_CPT_MAX_HASH_KEY_SIZE 64
+#define OTX_CPT_MAX_KEY_SIZE (OTX_CPT_MAX_ENC_KEY_SIZE + \
+ OTX_CPT_MAX_HASH_KEY_SIZE)
+enum otx_cpt_request_type {
+ OTX_CPT_ENC_DEC_REQ = 0x1,
+ OTX_CPT_AEAD_ENC_DEC_REQ = 0x2,
+ OTX_CPT_AEAD_ENC_DEC_NULL_REQ = 0x3,
+ OTX_CPT_PASSTHROUGH_REQ = 0x4
+};
+
+enum otx_cpt_major_opcodes {
+ OTX_CPT_MAJOR_OP_MISC = 0x01,
+ OTX_CPT_MAJOR_OP_FC = 0x33,
+ OTX_CPT_MAJOR_OP_HMAC = 0x35,
+};
+
+enum otx_cpt_req_type {
+ OTX_CPT_AE_CORE_REQ,
+ OTX_CPT_SE_CORE_REQ
+};
+
+enum otx_cpt_cipher_type {
+ OTX_CPT_CIPHER_NULL = 0x0,
+ OTX_CPT_DES3_CBC = 0x1,
+ OTX_CPT_DES3_ECB = 0x2,
+ OTX_CPT_AES_CBC = 0x3,
+ OTX_CPT_AES_ECB = 0x4,
+ OTX_CPT_AES_CFB = 0x5,
+ OTX_CPT_AES_CTR = 0x6,
+ OTX_CPT_AES_GCM = 0x7,
+ OTX_CPT_AES_XTS = 0x8
+};
+
+enum otx_cpt_mac_type {
+ OTX_CPT_MAC_NULL = 0x0,
+ OTX_CPT_MD5 = 0x1,
+ OTX_CPT_SHA1 = 0x2,
+ OTX_CPT_SHA224 = 0x3,
+ OTX_CPT_SHA256 = 0x4,
+ OTX_CPT_SHA384 = 0x5,
+ OTX_CPT_SHA512 = 0x6,
+ OTX_CPT_GMAC = 0x7
+};
+
+enum otx_cpt_aes_key_len {
+ OTX_CPT_AES_128_BIT = 0x1,
+ OTX_CPT_AES_192_BIT = 0x2,
+ OTX_CPT_AES_256_BIT = 0x3
+};
+
+union otx_cpt_encr_ctrl {
+ u64 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 enc_cipher:4;
+ u64 reserved1:1;
+ u64 aes_key:2;
+ u64 iv_source:1;
+ u64 mac_type:4;
+ u64 reserved2:3;
+ u64 auth_input_type:1;
+ u64 mac_len:8;
+ u64 reserved3:8;
+ u64 encr_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 encr_offset:16;
+ u64 reserved3:8;
+ u64 mac_len:8;
+ u64 auth_input_type:1;
+ u64 reserved2:3;
+ u64 mac_type:4;
+ u64 iv_source:1;
+ u64 aes_key:2;
+ u64 reserved1:1;
+ u64 enc_cipher:4;
+#endif
+ } e;
+};
+
+struct otx_cpt_cipher {
+ const char *name;
+ u8 value;
+};
+
+struct otx_cpt_enc_context {
+ union otx_cpt_encr_ctrl enc_ctrl;
+ u8 encr_key[32];
+ u8 encr_iv[16];
+};
+
+union otx_cpt_fchmac_ctx {
+ struct {
+ u8 ipad[64];
+ u8 opad[64];
+ } e;
+ struct {
+ u8 hmac_calc[64]; /* HMAC calculated */
+ u8 hmac_recv[64]; /* HMAC received */
+ } s;
+};
+
+struct otx_cpt_fc_ctx {
+ struct otx_cpt_enc_context enc;
+ union otx_cpt_fchmac_ctx hmac;
+};
+
+struct otx_cpt_enc_ctx {
+ u32 key_len;
+ u8 enc_key[OTX_CPT_MAX_KEY_SIZE];
+ u8 cipher_type;
+ u8 key_type;
+};
+
+struct otx_cpt_des3_ctx {
+ u32 key_len;
+ u8 des3_key[OTX_CPT_MAX_KEY_SIZE];
+};
+
+union otx_cpt_offset_ctrl_word {
+ u64 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved:32;
+ u64 enc_data_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 enc_data_offset:16;
+ u64 reserved:32;
+#endif
+ } e;
+};
+
+struct otx_cpt_req_ctx {
+ struct otx_cpt_req_info cpt_req;
+ union otx_cpt_offset_ctrl_word ctrl_word;
+ struct otx_cpt_fc_ctx fctx;
+};
+
+struct otx_cpt_sdesc {
+ struct shash_desc shash;
+};
+
+struct otx_cpt_aead_ctx {
+ u8 key[OTX_CPT_MAX_KEY_SIZE];
+ struct crypto_shash *hashalg;
+ struct otx_cpt_sdesc *sdesc;
+ u8 *ipad;
+ u8 *opad;
+ u32 enc_key_len;
+ u32 auth_key_len;
+ u8 cipher_type;
+ u8 mac_type;
+ u8 key_type;
+ u8 is_trunc_hmac;
+};
+int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ enum otx_cptpf_type pf_type,
+ enum otx_cptvf_type engine_type,
+ int num_queues, int num_devices);
+void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
+ enum otx_cptvf_type engine_type);
+void otx_cpt_callback(int status, void *arg, void *req);
+
+#endif /* __OTX_CPT_ALGS_H */
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
new file mode 100644
index 000000000000..a91860b5dc77
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include "otx_cptvf.h"
+#include "otx_cptvf_algs.h"
+#include "otx_cptvf_reqmgr.h"
+
+#define DRV_NAME "octeontx-cptvf"
+#define DRV_VERSION "1.0"
+
+static void vq_work_handler(unsigned long data)
+{
+ struct otx_cptvf_wqe_info *cwqe_info =
+ (struct otx_cptvf_wqe_info *) data;
+
+ otx_cpt_post_process(&cwqe_info->vq_wqe[0]);
+}
+
+static int init_worker_threads(struct otx_cptvf *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ struct otx_cptvf_wqe_info *cwqe_info;
+ int i;
+
+ cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
+ if (!cwqe_info)
+ return -ENOMEM;
+
+ if (cptvf->num_queues) {
+ dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n",
+ cptvf->num_queues);
+ }
+
+ for (i = 0; i < cptvf->num_queues; i++) {
+ tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
+ (u64)cwqe_info);
+ cwqe_info->vq_wqe[i].cptvf = cptvf;
+ }
+ cptvf->wqe_info = cwqe_info;
+
+ return 0;
+}
+
+static void cleanup_worker_threads(struct otx_cptvf *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ struct otx_cptvf_wqe_info *cwqe_info;
+ int i;
+
+ cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
+ if (!cwqe_info)
+ return;
+
+ if (cptvf->num_queues) {
+ dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
+ cptvf->num_queues);
+ }
+
+ for (i = 0; i < cptvf->num_queues; i++)
+ tasklet_kill(&cwqe_info->vq_wqe[i].twork);
+
+ kzfree(cwqe_info);
+ cptvf->wqe_info = NULL;
+}
+
+static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo)
+{
+ struct otx_cpt_pending_queue *queue;
+ int i;
+
+ for_each_pending_queue(pqinfo, queue, i) {
+ if (!queue->head)
+ continue;
+
+ /* free single queue */
+ kzfree((queue->head));
+ queue->front = 0;
+ queue->rear = 0;
+ queue->qlen = 0;
+ }
+ pqinfo->num_queues = 0;
+}
+
+static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
+ u32 num_queues)
+{
+ struct otx_cpt_pending_queue *queue = NULL;
+ size_t size;
+ int ret;
+ u32 i;
+
+ pqinfo->num_queues = num_queues;
+ size = (qlen * sizeof(struct otx_cpt_pending_entry));
+
+ for_each_pending_queue(pqinfo, queue, i) {
+ queue->head = kzalloc((size), GFP_KERNEL);
+ if (!queue->head) {
+ ret = -ENOMEM;
+ goto pending_qfail;
+ }
+
+ queue->pending_count = 0;
+ queue->front = 0;
+ queue->rear = 0;
+ queue->qlen = qlen;
+
+ /* init queue spin lock */
+ spin_lock_init(&queue->lock);
+ }
+ return 0;
+
+pending_qfail:
+ free_pending_queues(pqinfo);
+
+ return ret;
+}
+
+static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen,
+ u32 num_queues)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ int ret;
+
+ if (!num_queues)
+ return 0;
+
+ ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
+ num_queues);
+ return ret;
+ }
+ return 0;
+}
+
+static void cleanup_pending_queues(struct otx_cptvf *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+
+ if (!cptvf->num_queues)
+ return;
+
+ dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
+ cptvf->num_queues);
+ free_pending_queues(&cptvf->pqinfo);
+}
+
+static void free_command_queues(struct otx_cptvf *cptvf,
+ struct otx_cpt_cmd_qinfo *cqinfo)
+{
+ struct otx_cpt_cmd_queue *queue = NULL;
+ struct otx_cpt_cmd_chunk *chunk = NULL;
+ struct pci_dev *pdev = cptvf->pdev;
+ int i;
+
+ /* clean up for each queue */
+ for (i = 0; i < cptvf->num_queues; i++) {
+ queue = &cqinfo->queue[i];
+
+ while (!list_empty(&cqinfo->queue[i].chead)) {
+ chunk = list_first_entry(&cqinfo->queue[i].chead,
+ struct otx_cpt_cmd_chunk, nextchunk);
+
+ dma_free_coherent(&pdev->dev, chunk->size,
+ chunk->head,
+ chunk->dma_addr);
+ chunk->head = NULL;
+ chunk->dma_addr = 0;
+ list_del(&chunk->nextchunk);
+ kzfree(chunk);
+ }
+ queue->num_chunks = 0;
+ queue->idx = 0;
+
+ }
+}
+
+static int alloc_command_queues(struct otx_cptvf *cptvf,
+ struct otx_cpt_cmd_qinfo *cqinfo,
+ u32 qlen)
+{
+ struct otx_cpt_cmd_chunk *curr, *first, *last;
+ struct otx_cpt_cmd_queue *queue = NULL;
+ struct pci_dev *pdev = cptvf->pdev;
+ size_t q_size, c_size, rem_q_size;
+ u32 qcsize_bytes;
+ int i;
+
+
+ /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
+ cptvf->qsize = min(qlen, cqinfo->qchunksize) *
+ OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1;
+ /* Qsize in bytes to create space for alignment */
+ q_size = qlen * OTX_CPT_INST_SIZE;
+
+ qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE;
+
+ /* per queue initialization */
+ for (i = 0; i < cptvf->num_queues; i++) {
+ c_size = 0;
+ rem_q_size = q_size;
+ first = NULL;
+ last = NULL;
+
+ queue = &cqinfo->queue[i];
+ INIT_LIST_HEAD(&queue->chead);
+ do {
+ curr = kzalloc(sizeof(*curr), GFP_KERNEL);
+ if (!curr)
+ goto cmd_qfail;
+
+ c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
+ rem_q_size;
+ curr->head = dma_alloc_coherent(&pdev->dev,
+ c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
+ &curr->dma_addr, GFP_KERNEL);
+ if (!curr->head) {
+ dev_err(&pdev->dev,
+ "Command Q (%d) chunk (%d) allocation failed\n",
+ i, queue->num_chunks);
+ goto free_curr;
+ }
+ curr->size = c_size;
+
+ if (queue->num_chunks == 0) {
+ first = curr;
+ queue->base = first;
+ }
+ list_add_tail(&curr->nextchunk,
+ &cqinfo->queue[i].chead);
+
+ queue->num_chunks++;
+ rem_q_size -= c_size;
+ if (last)
+ *((u64 *)(&last->head[last->size])) =
+ (u64)curr->dma_addr;
+
+ last = curr;
+ } while (rem_q_size);
+
+ /*
+ * Make the queue circular, tie back last chunk entry to head
+ */
+ curr = first;
+ *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
+ queue->qhead = curr;
+ }
+ return 0;
+free_curr:
+ kfree(curr);
+cmd_qfail:
+ free_command_queues(cptvf, cqinfo);
+ return -ENOMEM;
+}
+
+static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ int ret;
+
+ /* setup command queues */
+ ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n",
+ cptvf->num_queues);
+ return ret;
+ }
+ return ret;
+}
+
+static void cleanup_command_queues(struct otx_cptvf *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+
+ if (!cptvf->num_queues)
+ return;
+
+ dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n",
+ cptvf->num_queues);
+ free_command_queues(cptvf, &cptvf->cqinfo);
+}
+
+static void cptvf_sw_cleanup(struct otx_cptvf *cptvf)
+{
+ cleanup_worker_threads(cptvf);
+ cleanup_pending_queues(cptvf);
+ cleanup_command_queues(cptvf);
+}
+
+static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ u32 max_dev_queues = 0;
+ int ret;
+
+ max_dev_queues = OTX_CPT_NUM_QS_PER_VF;
+ /* possible cpus */
+ num_queues = min_t(u32, num_queues, max_dev_queues);
+ cptvf->num_queues = num_queues;
+
+ ret = init_command_queues(cptvf, qlen);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
+ num_queues);
+ return ret;
+ }
+
+ ret = init_pending_queues(cptvf, qlen, num_queues);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
+ num_queues);
+ goto setup_pqfail;
+ }
+
+ /* Create worker threads for BH processing */
+ ret = init_worker_threads(cptvf);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to setup worker threads\n");
+ goto init_work_fail;
+ }
+ return 0;
+
+init_work_fail:
+ cleanup_worker_threads(cptvf);
+ cleanup_pending_queues(cptvf);
+
+setup_pqfail:
+ cleanup_command_queues(cptvf);
+
+ return ret;
+}
+
+static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec)
+{
+ irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
+ free_cpumask_var(cptvf->affinity_mask[vec]);
+}
+
+static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val)
+{
+ union otx_cptx_vqx_ctl vqx_ctl;
+
+ vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0));
+ vqx_ctl.s.ena = val;
+ writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0));
+}
+
+void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val)
+{
+ union otx_cptx_vqx_doorbell vqx_dbell;
+
+ vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
+ vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
+ writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
+}
+
+static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val)
+{
+ union otx_cptx_vqx_inprog vqx_inprg;
+
+ vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
+ vqx_inprg.s.inflight = val;
+ writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
+}
+
+static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val)
+{
+ union otx_cptx_vqx_done_wait vqx_dwait;
+
+ vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+ vqx_dwait.s.num_wait = val;
+ writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+}
+
+static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_done_wait vqx_dwait;
+
+ vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+ return vqx_dwait.s.num_wait;
+}
+
+static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time)
+{
+ union otx_cptx_vqx_done_wait vqx_dwait;
+
+ vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+ vqx_dwait.s.time_wait = time;
+ writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+}
+
+
+static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_done_wait vqx_dwait;
+
+ vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
+ return vqx_dwait.s.time_wait;
+}
+
+static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
+
+ vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
+ /* Enable SWERR interrupts for the requested VF */
+ vqx_misc_ena.s.swerr = 1;
+ writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
+}
+
+static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
+
+ vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
+ /* Enable MBOX interrupt for the requested VF */
+ vqx_misc_ena.s.mbox = 1;
+ writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
+}
+
+static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_done_ena_w1s vqx_done_ena;
+
+ vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
+ /* Enable DONE interrupt for the requested VF */
+ vqx_done_ena.s.done = 1;
+ writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
+}
+
+static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.dovf = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.irde = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.nwrp = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.mbox = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_misc_int vqx_misc_int;
+
+ vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+ /* W1C for the VF */
+ vqx_misc_int.s.swerr = 1;
+ writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf)
+{
+ return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
+}
+
+static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq,
+ void *arg)
+{
+ struct otx_cptvf *cptvf = arg;
+ struct pci_dev *pdev = cptvf->pdev;
+ u64 intr;
+
+ intr = cptvf_read_vf_misc_intr_status(cptvf);
+ /* Check for MISC interrupt types */
+ if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) {
+ dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ otx_cptvf_handle_mbox_intr(cptvf);
+ cptvf_clear_mbox_intr(cptvf);
+ } else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) {
+ cptvf_clear_dovf_intr(cptvf);
+ /* Clear doorbell count */
+ otx_cptvf_write_vq_doorbell(cptvf, 0);
+ dev_err(&pdev->dev,
+ "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ } else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) {
+ cptvf_clear_irde_intr(cptvf);
+ dev_err(&pdev->dev,
+ "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ } else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) {
+ cptvf_clear_nwrp_intr(cptvf);
+ dev_err(&pdev->dev,
+ "NCB response write error interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ } else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) {
+ cptvf_clear_swerr_intr(cptvf);
+ dev_err(&pdev->dev,
+ "Software error interrupt 0x%llx on CPT VF %d\n",
+ intr, cptvf->vfid);
+ } else {
+ dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n",
+ cptvf->vfid);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf,
+ int qno)
+{
+ struct otx_cptvf_wqe_info *nwqe_info;
+
+ if (unlikely(qno >= cptvf->num_queues))
+ return NULL;
+ nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
+
+ return &nwqe_info->vq_wqe[qno];
+}
+
+static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf)
+{
+ union otx_cptx_vqx_done vqx_done;
+
+ vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0));
+ return vqx_done.s.done;
+}
+
+static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf,
+ u32 ackcnt)
+{
+ union otx_cptx_vqx_done_ack vqx_dack_cnt;
+
+ vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
+ vqx_dack_cnt.s.done_ack = ackcnt;
+ writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
+}
+
+static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
+ void *cptvf_dev)
+{
+ struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev;
+ struct pci_dev *pdev = cptvf->pdev;
+ /* Read the number of completions */
+ u32 intr = cptvf_read_vq_done_count(cptvf);
+
+ if (intr) {
+ struct otx_cptvf_wqe *wqe;
+
+ /*
+ * Acknowledge the number of scheduled completions for
+ * processing
+ */
+ cptvf_write_vq_done_ack(cptvf, intr);
+ wqe = get_cptvf_vq_wqe(cptvf, 0);
+ if (unlikely(!wqe)) {
+ dev_err(&pdev->dev, "No work to schedule for VF (%d)",
+ cptvf->vfid);
+ return IRQ_NONE;
+ }
+ tasklet_hi_schedule(&wqe->twork);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
+ GFP_KERNEL)) {
+ dev_err(&pdev->dev,
+ "Allocation failed for affinity_mask for VF %d",
+ cptvf->vfid);
+ return;
+ }
+
+ cpu = cptvf->vfid % num_online_cpus();
+ cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
+ cptvf->affinity_mask[vec]);
+ irq_set_affinity_hint(pci_irq_vector(pdev, vec),
+ cptvf->affinity_mask[vec]);
+}
+
+static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val)
+{
+ union otx_cptx_vqx_saddr vqx_saddr;
+
+ vqx_saddr.u = val;
+ writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0));
+}
+
+static void cptvf_device_init(struct otx_cptvf *cptvf)
+{
+ u64 base_addr = 0;
+
+ /* Disable the VQ */
+ cptvf_write_vq_ctl(cptvf, 0);
+ /* Reset the doorbell */
+ otx_cptvf_write_vq_doorbell(cptvf, 0);
+ /* Clear inflight */
+ cptvf_write_vq_inprog(cptvf, 0);
+ /* Write VQ SADDR */
+ base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
+ cptvf_write_vq_saddr(cptvf, base_addr);
+ /* Configure timerhold / coalescence */
+ cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD);
+ cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD);
+ /* Enable the VQ */
+ cptvf_write_vq_ctl(cptvf, 1);
+ /* Flag the VF ready */
+ cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY;
+}
+
+static ssize_t vf_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+ char *msg;
+
+ switch (cptvf->vftype) {
+ case OTX_CPT_AE_TYPES:
+ msg = "AE";
+ break;
+
+ case OTX_CPT_SE_TYPES:
+ msg = "SE";
+ break;
+
+ default:
+ msg = "Invalid";
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
+}
+
+static ssize_t vf_engine_group_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
+}
+
+static ssize_t vf_engine_group_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+ int val, ret;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val < 0)
+ return -EINVAL;
+
+ if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Engine group >= than max available groups %d",
+ OTX_CPT_MAX_ENGINE_GROUPS);
+ return -EINVAL;
+ }
+
+ ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t vf_coalesc_time_wait_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ cptvf_read_vq_done_timewait(cptvf));
+}
+
+static ssize_t vf_coalesc_num_wait_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ cptvf_read_vq_done_numwait(cptvf));
+}
+
+static ssize_t vf_coalesc_time_wait_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val < OTX_CPT_COALESC_MIN_TIME_WAIT ||
+ val > OTX_CPT_COALESC_MAX_TIME_WAIT)
+ return -EINVAL;
+
+ cptvf_write_vq_done_timewait(cptvf, val);
+ return count;
+}
+
+static ssize_t vf_coalesc_num_wait_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx_cptvf *cptvf = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val < OTX_CPT_COALESC_MIN_NUM_WAIT ||
+ val > OTX_CPT_COALESC_MAX_NUM_WAIT)
+ return -EINVAL;
+
+ cptvf_write_vq_done_numwait(cptvf, val);
+ return count;
+}
+
+static DEVICE_ATTR_RO(vf_type);
+static DEVICE_ATTR_RW(vf_engine_group);
+static DEVICE_ATTR_RW(vf_coalesc_time_wait);
+static DEVICE_ATTR_RW(vf_coalesc_num_wait);
+
+static struct attribute *otx_cptvf_attrs[] = {
+ &dev_attr_vf_type.attr,
+ &dev_attr_vf_engine_group.attr,
+ &dev_attr_vf_coalesc_time_wait.attr,
+ &dev_attr_vf_coalesc_num_wait.attr,
+ NULL
+};
+
+static const struct attribute_group otx_cptvf_sysfs_group = {
+ .attrs = otx_cptvf_attrs,
+};
+
+static int otx_cptvf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct otx_cptvf *cptvf;
+ int err;
+
+ cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
+ if (!cptvf)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, cptvf);
+ cptvf->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto disable_device;
+ }
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+ goto release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0);
+ if (!cptvf->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto release_regions;
+ }
+
+ cptvf->node = dev_to_node(&pdev->dev);
+ err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS,
+ OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "Request for #%d msix vectors failed\n",
+ OTX_CPT_VF_MSIX_VECTORS);
+ goto unmap_region;
+ }
+
+ err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
+ cptvf_misc_intr_handler, 0, "CPT VF misc intr",
+ cptvf);
+ if (err) {
+ dev_err(dev, "Failed to request misc irq");
+ goto free_vectors;
+ }
+
+ /* Enable mailbox interrupt */
+ cptvf_enable_mbox_interrupts(cptvf);
+ cptvf_enable_swerr_interrupts(cptvf);
+
+ /* Check cpt pf status, gets chip ID / device Id from PF if ready */
+ err = otx_cptvf_check_pf_ready(cptvf);
+ if (err)
+ goto free_misc_irq;
+
+ /* CPT VF software resources initialization */
+ cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
+ err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
+ if (err) {
+ dev_err(dev, "cptvf_sw_init() failed");
+ goto free_misc_irq;
+ }
+ /* Convey VQ LEN to PF */
+ err = otx_cptvf_send_vq_size_msg(cptvf);
+ if (err)
+ goto sw_cleanup;
+
+ /* CPT VF device initialization */
+ cptvf_device_init(cptvf);
+ /* Send msg to PF to assign currnet Q to required group */
+ err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp);
+ if (err)
+ goto sw_cleanup;
+
+ cptvf->priority = 1;
+ err = otx_cptvf_send_vf_priority_msg(cptvf);
+ if (err)
+ goto sw_cleanup;
+
+ err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
+ cptvf_done_intr_handler, 0, "CPT VF done intr",
+ cptvf);
+ if (err) {
+ dev_err(dev, "Failed to request done irq\n");
+ goto free_done_irq;
+ }
+
+ /* Enable done interrupt */
+ cptvf_enable_done_interrupts(cptvf);
+
+ /* Set irq affinity masks */
+ cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+ cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+
+ err = otx_cptvf_send_vf_up(cptvf);
+ if (err)
+ goto free_irq_affinity;
+
+ /* Initialize algorithms and set ops */
+ err = otx_cpt_crypto_init(pdev, THIS_MODULE,
+ cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE,
+ cptvf->vftype, 1, cptvf->num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to register crypto algs\n");
+ goto free_irq_affinity;
+ }
+
+ err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group);
+ if (err) {
+ dev_err(dev, "Creating sysfs entries failed\n");
+ goto crypto_exit;
+ }
+
+ return 0;
+
+crypto_exit:
+ otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
+free_irq_affinity:
+ cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+ cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+free_done_irq:
+ free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
+sw_cleanup:
+ cptvf_sw_cleanup(cptvf);
+free_misc_irq:
+ free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
+free_vectors:
+ pci_free_irq_vectors(cptvf->pdev);
+unmap_region:
+ pci_iounmap(pdev, cptvf->reg_base);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void otx_cptvf_remove(struct pci_dev *pdev)
+{
+ struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
+
+ if (!cptvf) {
+ dev_err(&pdev->dev, "Invalid CPT-VF device\n");
+ return;
+ }
+
+ /* Convey DOWN to PF */
+ if (otx_cptvf_send_vf_down(cptvf)) {
+ dev_err(&pdev->dev, "PF not responding to DOWN msg");
+ } else {
+ sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
+ otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
+ cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+ cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+ free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
+ free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
+ cptvf_sw_cleanup(cptvf);
+ pci_free_irq_vectors(cptvf->pdev);
+ pci_iounmap(pdev, cptvf->reg_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+/* Supported devices */
+static const struct pci_device_id otx_cptvf_id_table[] = {
+ {PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0},
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx_cptvf_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = otx_cptvf_id_table,
+ .probe = otx_cptvf_probe,
+ .remove = otx_cptvf_remove,
+};
+
+module_pci_driver(otx_cptvf_pci_driver);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
new file mode 100644
index 000000000000..5663787c7a62
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include "otx_cptvf.h"
+
+#define CPT_MBOX_MSG_TIMEOUT 2000
+
+static char *get_mbox_opcode_str(int msg_opcode)
+{
+ char *str = "Unknown";
+
+ switch (msg_opcode) {
+ case OTX_CPT_MSG_VF_UP:
+ str = "UP";
+ break;
+
+ case OTX_CPT_MSG_VF_DOWN:
+ str = "DOWN";
+ break;
+
+ case OTX_CPT_MSG_READY:
+ str = "READY";
+ break;
+
+ case OTX_CPT_MSG_QLEN:
+ str = "QLEN";
+ break;
+
+ case OTX_CPT_MSG_QBIND_GRP:
+ str = "QBIND_GRP";
+ break;
+
+ case OTX_CPT_MSG_VQ_PRIORITY:
+ str = "VQ_PRIORITY";
+ break;
+
+ case OTX_CPT_MSG_PF_TYPE:
+ str = "PF_TYPE";
+ break;
+
+ case OTX_CPT_MSG_ACK:
+ str = "ACK";
+ break;
+
+ case OTX_CPT_MSG_NACK:
+ str = "NACK";
+ break;
+ }
+ return str;
+}
+
+static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id)
+{
+ char raw_data_str[OTX_CPT_MAX_MBOX_DATA_STR_SIZE];
+
+ hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8,
+ raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false);
+ if (vf_id >= 0)
+ pr_debug("MBOX msg %s received from VF%d raw_data %s",
+ get_mbox_opcode_str(mbox_msg->msg), vf_id,
+ raw_data_str);
+ else
+ pr_debug("MBOX msg %s received from PF raw_data %s",
+ get_mbox_opcode_str(mbox_msg->msg), raw_data_str);
+}
+
+static void cptvf_send_msg_to_pf(struct otx_cptvf *cptvf,
+ struct otx_cpt_mbox *mbx)
+{
+ /* Writing mbox(1) causes interrupt */
+ writeq(mbx->msg, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0));
+ writeq(mbx->data, cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1));
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+void otx_cptvf_handle_mbox_intr(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+
+ /*
+ * MBOX[0] contains msg
+ * MBOX[1] contains data
+ */
+ mbx.msg = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 0));
+ mbx.data = readq(cptvf->reg_base + OTX_CPT_VFX_PF_MBOXX(0, 1));
+
+ dump_mbox_msg(&mbx, -1);
+
+ switch (mbx.msg) {
+ case OTX_CPT_MSG_VF_UP:
+ cptvf->pf_acked = true;
+ cptvf->num_vfs = mbx.data;
+ break;
+ case OTX_CPT_MSG_READY:
+ cptvf->pf_acked = true;
+ cptvf->vfid = mbx.data;
+ dev_dbg(&cptvf->pdev->dev, "Received VFID %d\n", cptvf->vfid);
+ break;
+ case OTX_CPT_MSG_QBIND_GRP:
+ cptvf->pf_acked = true;
+ cptvf->vftype = mbx.data;
+ dev_dbg(&cptvf->pdev->dev, "VF %d type %s group %d\n",
+ cptvf->vfid,
+ ((mbx.data == OTX_CPT_SE_TYPES) ? "SE" : "AE"),
+ cptvf->vfgrp);
+ break;
+ case OTX_CPT_MSG_ACK:
+ cptvf->pf_acked = true;
+ break;
+ case OTX_CPT_MSG_NACK:
+ cptvf->pf_nacked = true;
+ break;
+ default:
+ dev_err(&cptvf->pdev->dev, "Invalid msg from PF, msg 0x%llx\n",
+ mbx.msg);
+ break;
+ }
+}
+
+static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
+ struct otx_cpt_mbox *mbx)
+{
+ int timeout = CPT_MBOX_MSG_TIMEOUT;
+ int sleep = 10;
+
+ cptvf->pf_acked = false;
+ cptvf->pf_nacked = false;
+ cptvf_send_msg_to_pf(cptvf, mbx);
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!cptvf->pf_acked) {
+ if (cptvf->pf_nacked)
+ return -EINVAL;
+ msleep(sleep);
+ if (cptvf->pf_acked)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ dev_err(&cptvf->pdev->dev,
+ "PF didn't ack to mbox msg %llx from VF%u\n",
+ mbx->msg, cptvf->vfid);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Checks if VF is able to comminicate with PF
+ * and also gets the CPT number this VF is associated to.
+ */
+int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_READY;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
+
+/*
+ * Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF.
+ * Must be ACKed.
+ */
+int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_QLEN;
+ mbx.data = cptvf->qsize;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
+
+/*
+ * Communicate VF group required to PF and get the VQ binded to that group
+ */
+int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_QBIND_GRP;
+ /* Convey group of the VF */
+ mbx.data = group;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+ if (ret)
+ return ret;
+ cptvf->vfgrp = group;
+
+ return 0;
+}
+
+/*
+ * Communicate VF group required to PF and get the VQ binded to that group
+ */
+int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
+ /* Convey group of the VF */
+ mbx.data = cptvf->priority;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
+
+/*
+ * Communicate to PF that VF is UP and running
+ */
+int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_VF_UP;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
+
+/*
+ * Communicate to PF that VF is DOWN and running
+ */
+int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_mbox mbx = {};
+ int ret;
+
+ mbx.msg = OTX_CPT_MSG_VF_DOWN;
+ ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
new file mode 100644
index 000000000000..df839b880354
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx_cptvf.h"
+#include "otx_cptvf_algs.h"
+
+/* Completion code size and initial value */
+#define COMPLETION_CODE_SIZE 8
+#define COMPLETION_CODE_INIT 0
+
+/* SG list header size in bytes */
+#define SG_LIST_HDR_SIZE 8
+
+/* Default timeout when waiting for free pending entry in us */
+#define CPT_PENTRY_TIMEOUT 1000
+#define CPT_PENTRY_STEP 50
+
+/* Default threshold for stopping and resuming sender requests */
+#define CPT_IQ_STOP_MARGIN 128
+#define CPT_IQ_RESUME_MARGIN 512
+
+#define CPT_DMA_ALIGN 128
+
+void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req)
+{
+ int i;
+
+ pr_debug("Gather list size %d\n", req->incnt);
+ for (i = 0; i < req->incnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->in[i].size, req->in[i].vptr,
+ (void *) req->in[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n",
+ req->in[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->in[i].vptr, req->in[i].size, false);
+ }
+
+ pr_debug("Scatter list size %d\n", req->outcnt);
+ for (i = 0; i < req->outcnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->out[i].size, req->out[i].vptr,
+ (void *) req->out[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->out[i].vptr, req->out[i].size, false);
+ }
+}
+
+static inline struct otx_cpt_pending_entry *get_free_pending_entry(
+ struct otx_cpt_pending_queue *q,
+ int qlen)
+{
+ struct otx_cpt_pending_entry *ent = NULL;
+
+ ent = &q->head[q->rear];
+ if (unlikely(ent->busy))
+ return NULL;
+
+ q->rear++;
+ if (unlikely(q->rear == qlen))
+ q->rear = 0;
+
+ return ent;
+}
+
+static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
+{
+ if (WARN_ON(inc > length))
+ inc = length;
+
+ index += inc;
+ if (unlikely(index >= length))
+ index -= length;
+
+ return index;
+}
+
+static inline void free_pentry(struct otx_cpt_pending_entry *pentry)
+{
+ pentry->completion_addr = NULL;
+ pentry->info = NULL;
+ pentry->callback = NULL;
+ pentry->areq = NULL;
+ pentry->resume_sender = false;
+ pentry->busy = false;
+}
+
+static inline int setup_sgio_components(struct pci_dev *pdev,
+ struct otx_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct otx_cpt_sglist_component *sg_ptr = NULL;
+ int ret = 0, i, j;
+ int components;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (likely(list[i].vptr)) {
+ list[i].dma_addr = dma_map_single(&pdev->dev,
+ list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev,
+ list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ ret = -EIO;
+ goto sg_cleanup;
+ }
+ }
+ }
+
+ components = buf_count / 4;
+ sg_ptr = (struct otx_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
+ sg_ptr++;
+ }
+ components = buf_count % 4;
+
+ switch (components) {
+ case 3:
+ sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ /* Fall through */
+ case 2:
+ sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ /* Fall through */
+ case 1:
+ sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ break;
+ default:
+ break;
+ }
+ return ret;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[i].dma_addr,
+ list[i].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return ret;
+}
+
+static inline int setup_sgio_list(struct pci_dev *pdev,
+ struct otx_cpt_info_buffer **pinfo,
+ struct otx_cpt_req_info *req, gfp_t gfp)
+{
+ u32 dlen, align_dlen, info_len, rlen;
+ struct otx_cpt_info_buffer *info;
+ u16 g_sz_bytes, s_sz_bytes;
+ int align = CPT_DMA_ALIGN;
+ u32 total_mem_len;
+
+ if (unlikely(req->incnt > OTX_CPT_MAX_SG_IN_CNT ||
+ req->outcnt > OTX_CPT_MAX_SG_OUT_CNT)) {
+ dev_err(&pdev->dev, "Error too many sg components\n");
+ return -EINVAL;
+ }
+
+ g_sz_bytes = ((req->incnt + 3) / 4) *
+ sizeof(struct otx_cpt_sglist_component);
+ s_sz_bytes = ((req->outcnt + 3) / 4) *
+ sizeof(struct otx_cpt_sglist_component);
+
+ dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+ align_dlen = ALIGN(dlen, align);
+ info_len = ALIGN(sizeof(*info), align);
+ rlen = ALIGN(sizeof(union otx_cpt_res_s), align);
+ total_mem_len = align_dlen + info_len + rlen + COMPLETION_CODE_SIZE;
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ *pinfo = info;
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+
+ ((u16 *)info->in_buffer)[0] = req->outcnt;
+ ((u16 *)info->in_buffer)[1] = req->incnt;
+ ((u16 *)info->in_buffer)[2] = 0;
+ ((u16 *)info->in_buffer)[3] = 0;
+ *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer);
+
+ /* Setup gather (input) components */
+ if (setup_sgio_components(pdev, req->in, req->incnt,
+ &info->in_buffer[8])) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ return -EFAULT;
+ }
+
+ if (setup_sgio_components(pdev, req->out, req->outcnt,
+ &info->in_buffer[8 + g_sz_bytes])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ return -EFAULT;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, (void *)info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ return -EIO;
+ }
+ /*
+ * Get buffer for union otx_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = (u64 *)(info->in_buffer + align_dlen);
+ info->comp_baddr = info->dptr_baddr + align_dlen;
+
+ /* Create and initialize RPTR */
+ info->out_buffer = (u8 *)info->completion_addr + rlen;
+ info->rptr_baddr = info->comp_baddr + rlen;
+
+ *((u64 *) info->out_buffer) = ~((u64) COMPLETION_CODE_INIT);
+
+ return 0;
+}
+
+
+static void cpt_fill_inst(union otx_cpt_inst_s *inst,
+ struct otx_cpt_info_buffer *info,
+ struct otx_cpt_iq_cmd *cmd)
+{
+ inst->u[0] = 0x0;
+ inst->s.doneint = true;
+ inst->s.res_addr = (u64)info->comp_baddr;
+ inst->u[2] = 0x0;
+ inst->s.wq_ptr = 0;
+ inst->s.ei0 = cmd->cmd.u64;
+ inst->s.ei1 = cmd->dptr;
+ inst->s.ei2 = cmd->rptr;
+ inst->s.ei3 = cmd->cptr.u64;
+}
+
+/*
+ * On OcteonTX platform the parameter db_count is used as a count for ringing
+ * door bell. The valid values for db_count are:
+ * 0 - 1 CPT instruction will be enqueued however CPT will not be informed
+ * 1 - 1 CPT instruction will be enqueued and CPT will be informed
+ */
+static void cpt_send_cmd(union otx_cpt_inst_s *cptinst, struct otx_cptvf *cptvf)
+{
+ struct otx_cpt_cmd_qinfo *qinfo = &cptvf->cqinfo;
+ struct otx_cpt_cmd_queue *queue;
+ struct otx_cpt_cmd_chunk *curr;
+ u8 *ent;
+
+ queue = &qinfo->queue[0];
+ /*
+ * cpt_send_cmd is currently called only from critical section
+ * therefore no locking is required for accessing instruction queue
+ */
+ ent = &queue->qhead->head[queue->idx * OTX_CPT_INST_SIZE];
+ memcpy(ent, (void *) cptinst, OTX_CPT_INST_SIZE);
+
+ if (++queue->idx >= queue->qhead->size / 64) {
+ curr = queue->qhead;
+
+ if (list_is_last(&curr->nextchunk, &queue->chead))
+ queue->qhead = queue->base;
+ else
+ queue->qhead = list_next_entry(queue->qhead, nextchunk);
+ queue->idx = 0;
+ }
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+ otx_cptvf_write_vq_doorbell(cptvf, 1);
+}
+
+static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
+ struct otx_cpt_pending_queue *pqueue,
+ struct otx_cptvf *cptvf)
+{
+ struct otx_cptvf_request *cpt_req = &req->req;
+ struct otx_cpt_pending_entry *pentry = NULL;
+ union otx_cpt_ctrl_info *ctrl = &req->ctrl;
+ struct otx_cpt_info_buffer *info = NULL;
+ union otx_cpt_res_s *result = NULL;
+ struct otx_cpt_iq_cmd iq_cmd;
+ union otx_cpt_inst_s cptinst;
+ int retry, ret = 0;
+ u8 resume_sender;
+ gfp_t gfp;
+
+ gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+ ret = setup_sgio_list(pdev, &info, req, gfp);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev, "Setting up SG list failed");
+ goto request_cleanup;
+ }
+ cpt_req->dlen = info->dlen;
+
+ result = (union otx_cpt_res_s *) info->completion_addr;
+ result->s.compcode = COMPLETION_CODE_INIT;
+
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
+ while (unlikely(!pentry) && retry--) {
+ spin_unlock_bh(&pqueue->lock);
+ udelay(CPT_PENTRY_STEP);
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ }
+
+ if (unlikely(!pentry)) {
+ ret = -ENOSPC;
+ spin_unlock_bh(&pqueue->lock);
+ goto request_cleanup;
+ }
+
+ /*
+ * Check if we are close to filling in entire pending queue,
+ * if so then tell the sender to stop/sleep by returning -EBUSY
+ * We do it only for context which can sleep (GFP_KERNEL)
+ */
+ if (gfp == GFP_KERNEL &&
+ pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
+ pentry->resume_sender = true;
+ } else
+ pentry->resume_sender = false;
+ resume_sender = pentry->resume_sender;
+ pqueue->pending_count++;
+
+ pentry->completion_addr = info->completion_addr;
+ pentry->info = info;
+ pentry->callback = req->callback;
+ pentry->areq = req->areq;
+ pentry->busy = true;
+ info->pentry = pentry;
+ info->time_in = jiffies;
+ info->req = req;
+
+ /* Fill in the command */
+ iq_cmd.cmd.u64 = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
+ iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
+ iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
+ iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses*/
+ iq_cmd.cmd.u64 = cpu_to_be64(iq_cmd.cmd.u64);
+ iq_cmd.dptr = info->dptr_baddr;
+ iq_cmd.rptr = info->rptr_baddr;
+ iq_cmd.cptr.u64 = 0;
+ iq_cmd.cptr.s.grp = ctrl->s.grp;
+
+ /* Fill in the CPT_INST_S type command for HW interpretation */
+ cpt_fill_inst(&cptinst, info, &iq_cmd);
+
+ /* Print debug info if enabled */
+ otx_cpt_dump_sg_list(pdev, req);
+ pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX_CPT_INST_SIZE);
+ print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX_CPT_INST_SIZE, false);
+ pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
+ print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
+ cpt_req->dlen, false);
+
+ /* Send CPT command */
+ cpt_send_cmd(&cptinst, cptvf);
+
+ /*
+ * We allocate and prepare pending queue entry in critical section
+ * together with submitting CPT instruction to CPT instruction queue
+ * to make sure that order of CPT requests is the same in both
+ * pending and instruction queues
+ */
+ spin_unlock_bh(&pqueue->lock);
+
+ ret = resume_sender ? -EBUSY : -EINPROGRESS;
+ return ret;
+
+request_cleanup:
+ do_request_cleanup(pdev, info);
+ return ret;
+}
+
+int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
+ int cpu_num)
+{
+ struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
+
+ if (!otx_cpt_device_ready(cptvf)) {
+ dev_err(&pdev->dev, "CPT Device is not ready");
+ return -ENODEV;
+ }
+
+ if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) {
+ dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
+ cptvf->vfid);
+ return -EINVAL;
+ } else if ((cptvf->vftype == OTX_CPT_AE_TYPES) &&
+ (req->ctrl.s.se_req)) {
+ dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
+ cptvf->vfid);
+ return -EINVAL;
+ }
+
+ return process_request(pdev, req, &cptvf->pqinfo.queue[0], cptvf);
+}
+
+static int cpt_process_ccode(struct pci_dev *pdev,
+ union otx_cpt_res_s *cpt_status,
+ struct otx_cpt_info_buffer *cpt_info,
+ struct otx_cpt_req_info *req, u32 *res_code)
+{
+ u8 ccode = cpt_status->s.compcode;
+ union otx_cpt_error_code ecode;
+
+ ecode.u = be64_to_cpu(*((u64 *) cpt_info->out_buffer));
+ switch (ccode) {
+ case CPT_COMP_E_FAULT:
+ dev_err(&pdev->dev,
+ "Request failed with DMA fault\n");
+ otx_cpt_dump_sg_list(pdev, req);
+ break;
+
+ case CPT_COMP_E_SWERR:
+ dev_err(&pdev->dev,
+ "Request failed with software error code %d\n",
+ ecode.s.ccode);
+ otx_cpt_dump_sg_list(pdev, req);
+ break;
+
+ case CPT_COMP_E_HWERR:
+ dev_err(&pdev->dev,
+ "Request failed with hardware error\n");
+ otx_cpt_dump_sg_list(pdev, req);
+ break;
+
+ case COMPLETION_CODE_INIT:
+ /* check for timeout */
+ if (time_after_eq(jiffies, cpt_info->time_in +
+ OTX_CPT_COMMAND_TIMEOUT * HZ))
+ dev_warn(&pdev->dev, "Request timed out 0x%p", req);
+ else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
+ cpt_info->time_in = jiffies;
+ cpt_info->extra_time++;
+ }
+ return 1;
+
+ case CPT_COMP_E_GOOD:
+ /* Check microcode completion code */
+ if (ecode.s.ccode) {
+ /*
+ * If requested hmac is truncated and ucode returns
+ * s/g write length error then we report success
+ * because ucode writes as many bytes of calculated
+ * hmac as available in gather buffer and reports
+ * s/g write length error if number of bytes in gather
+ * buffer is less than full hmac size.
+ */
+ if (req->is_trunc_hmac &&
+ ecode.s.ccode == ERR_SCATTER_GATHER_WRITE_LENGTH) {
+ *res_code = 0;
+ break;
+ }
+
+ dev_err(&pdev->dev,
+ "Request failed with software error code 0x%x\n",
+ ecode.s.ccode);
+ otx_cpt_dump_sg_list(pdev, req);
+ break;
+ }
+
+ /* Request has been processed with success */
+ *res_code = 0;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Request returned invalid status\n");
+ break;
+ }
+
+ return 0;
+}
+
+static inline void process_pending_queue(struct pci_dev *pdev,
+ struct otx_cpt_pending_queue *pqueue)
+{
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct otx_cpt_pending_entry *resume_pentry = NULL;
+ struct otx_cpt_pending_entry *pentry = NULL;
+ struct otx_cpt_info_buffer *cpt_info = NULL;
+ union otx_cpt_res_s *cpt_status = NULL;
+ struct otx_cpt_req_info *req = NULL;
+ struct crypto_async_request *areq;
+ u32 res_code, resume_index;
+
+ while (1) {
+ spin_lock_bh(&pqueue->lock);
+ pentry = &pqueue->head[pqueue->front];
+
+ if (WARN_ON(!pentry)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ res_code = -EINVAL;
+ if (unlikely(!pentry->busy)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ if (unlikely(!pentry->callback)) {
+ dev_err(&pdev->dev, "Callback NULL\n");
+ goto process_pentry;
+ }
+
+ cpt_info = pentry->info;
+ if (unlikely(!cpt_info)) {
+ dev_err(&pdev->dev, "Pending entry post arg NULL\n");
+ goto process_pentry;
+ }
+
+ req = cpt_info->req;
+ if (unlikely(!req)) {
+ dev_err(&pdev->dev, "Request NULL\n");
+ goto process_pentry;
+ }
+
+ cpt_status = (union otx_cpt_res_s *) pentry->completion_addr;
+ if (unlikely(!cpt_status)) {
+ dev_err(&pdev->dev, "Completion address NULL\n");
+ goto process_pentry;
+ }
+
+ if (cpt_process_ccode(pdev, cpt_status, cpt_info, req,
+ &res_code)) {
+ spin_unlock_bh(&pqueue->lock);
+ return;
+ }
+ cpt_info->pdev = pdev;
+
+process_pentry:
+ /*
+ * Check if we should inform sending side to resume
+ * We do it CPT_IQ_RESUME_MARGIN elements in advance before
+ * pending queue becomes empty
+ */
+ resume_index = modulo_inc(pqueue->front, pqueue->qlen,
+ CPT_IQ_RESUME_MARGIN);
+ resume_pentry = &pqueue->head[resume_index];
+ if (resume_pentry &&
+ resume_pentry->resume_sender) {
+ resume_pentry->resume_sender = false;
+ callback = resume_pentry->callback;
+ areq = resume_pentry->areq;
+
+ if (callback) {
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * EINPROGRESS is an indication for sending
+ * side that it can resume sending requests
+ */
+ callback(-EINPROGRESS, areq, cpt_info);
+ spin_lock_bh(&pqueue->lock);
+ }
+ }
+
+ callback = pentry->callback;
+ areq = pentry->areq;
+ free_pentry(pentry);
+
+ pqueue->pending_count--;
+ pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * Call callback after current pending entry has been
+ * processed, we don't do it if the callback pointer is
+ * invalid.
+ */
+ if (callback)
+ callback(res_code, areq, cpt_info);
+ }
+}
+
+void otx_cpt_post_process(struct otx_cptvf_wqe *wqe)
+{
+ process_pending_queue(wqe->cptvf->pdev, &wqe->cptvf->pqinfo.queue[0]);
+}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
new file mode 100644
index 000000000000..a4c9ff730b13
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTX CPT driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OTX_CPTVF_REQUEST_MANAGER_H
+#define __OTX_CPTVF_REQUEST_MANAGER_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/pci.h>
+#include "otx_cpt_hw_types.h"
+
+/*
+ * Maximum total number of SG buffers is 100, we divide it equally
+ * between input and output
+ */
+#define OTX_CPT_MAX_SG_IN_CNT 50
+#define OTX_CPT_MAX_SG_OUT_CNT 50
+
+/* DMA mode direct or SG */
+#define OTX_CPT_DMA_DIRECT_DIRECT 0
+#define OTX_CPT_DMA_GATHER_SCATTER 1
+
+/* Context source CPTR or DPTR */
+#define OTX_CPT_FROM_CPTR 0
+#define OTX_CPT_FROM_DPTR 1
+
+/* CPT instruction queue alignment */
+#define OTX_CPT_INST_Q_ALIGNMENT 128
+#define OTX_CPT_MAX_REQ_SIZE 65535
+
+/* Default command timeout in seconds */
+#define OTX_CPT_COMMAND_TIMEOUT 4
+#define OTX_CPT_TIMER_HOLD 0x03F
+#define OTX_CPT_COUNT_HOLD 32
+#define OTX_CPT_TIME_IN_RESET_COUNT 5
+
+/* Minimum and maximum values for interrupt coalescing */
+#define OTX_CPT_COALESC_MIN_TIME_WAIT 0x0
+#define OTX_CPT_COALESC_MAX_TIME_WAIT ((1<<16)-1)
+#define OTX_CPT_COALESC_MIN_NUM_WAIT 0x0
+#define OTX_CPT_COALESC_MAX_NUM_WAIT ((1<<20)-1)
+
+union otx_cpt_opcode_info {
+ u16 flags;
+ struct {
+ u8 major;
+ u8 minor;
+ } s;
+};
+
+struct otx_cptvf_request {
+ u32 param1;
+ u32 param2;
+ u16 dlen;
+ union otx_cpt_opcode_info opcode;
+};
+
+struct otx_cpt_buf_ptr {
+ u8 *vptr;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+union otx_cpt_ctrl_info {
+ u32 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved0:26;
+ u32 grp:3; /* Group bits */
+ u32 dma_mode:2; /* DMA mode */
+ u32 se_req:1; /* To SE core */
+#else
+ u32 se_req:1; /* To SE core */
+ u32 dma_mode:2; /* DMA mode */
+ u32 grp:3; /* Group bits */
+ u32 reserved0:26;
+#endif
+ } s;
+};
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+union otx_cpt_iq_cmd_word0 {
+ u64 u64;
+ struct {
+ u16 opcode;
+ u16 param1;
+ u16 param2;
+ u16 dlen;
+ } s;
+};
+
+union otx_cpt_iq_cmd_word3 {
+ u64 u64;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 grp:3;
+ u64 cptr:61;
+#else
+ u64 cptr:61;
+ u64 grp:3;
+#endif
+ } s;
+};
+
+struct otx_cpt_iq_cmd {
+ union otx_cpt_iq_cmd_word0 cmd;
+ u64 dptr;
+ u64 rptr;
+ union otx_cpt_iq_cmd_word3 cptr;
+};
+
+struct otx_cpt_sglist_component {
+ union {
+ u64 len;
+ struct {
+ u16 len0;
+ u16 len1;
+ u16 len2;
+ u16 len3;
+ } s;
+ } u;
+ u64 ptr0;
+ u64 ptr1;
+ u64 ptr2;
+ u64 ptr3;
+};
+
+struct otx_cpt_pending_entry {
+ u64 *completion_addr; /* Completion address */
+ struct otx_cpt_info_buffer *info;
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ u8 resume_sender; /* Notify sender to resume sending requests */
+ u8 busy; /* Entry status (free/busy) */
+};
+
+struct otx_cpt_pending_queue {
+ struct otx_cpt_pending_entry *head; /* Head of the queue */
+ u32 front; /* Process work from here */
+ u32 rear; /* Append new work here */
+ u32 pending_count; /* Pending requests count */
+ u32 qlen; /* Queue length */
+ spinlock_t lock; /* Queue lock */
+};
+
+struct otx_cpt_req_info {
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ struct otx_cptvf_request req;/* Request information (core specific) */
+ union otx_cpt_ctrl_info ctrl;/* User control information */
+ struct otx_cpt_buf_ptr in[OTX_CPT_MAX_SG_IN_CNT];
+ struct otx_cpt_buf_ptr out[OTX_CPT_MAX_SG_OUT_CNT];
+ u8 *iv_out; /* IV to send back */
+ u16 rlen; /* Output length */
+ u8 incnt; /* Number of input buffers */
+ u8 outcnt; /* Number of output buffers */
+ u8 req_type; /* Type of request */
+ u8 is_enc; /* Is a request an encryption request */
+ u8 is_trunc_hmac;/* Is truncated hmac used */
+};
+
+struct otx_cpt_info_buffer {
+ struct otx_cpt_pending_entry *pentry;
+ struct otx_cpt_req_info *req;
+ struct pci_dev *pdev;
+ u64 *completion_addr;
+ u8 *out_buffer;
+ u8 *in_buffer;
+ dma_addr_t dptr_baddr;
+ dma_addr_t rptr_baddr;
+ dma_addr_t comp_baddr;
+ unsigned long time_in;
+ u32 dlen;
+ u32 dma_len;
+ u8 extra_time;
+};
+
+static inline void do_request_cleanup(struct pci_dev *pdev,
+ struct otx_cpt_info_buffer *info)
+{
+ struct otx_cpt_req_info *req;
+ int i;
+
+ if (info->dptr_baddr)
+ dma_unmap_single(&pdev->dev, info->dptr_baddr,
+ info->dma_len, DMA_BIDIRECTIONAL);
+
+ if (info->req) {
+ req = info->req;
+ for (i = 0; i < req->outcnt; i++) {
+ if (req->out[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->out[i].dma_addr,
+ req->out[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ for (i = 0; i < req->incnt; i++) {
+ if (req->in[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->in[i].dma_addr,
+ req->in[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+ }
+ kzfree(info);
+}
+
+struct otx_cptvf_wqe;
+void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req);
+void otx_cpt_post_process(struct otx_cptvf_wqe *wqe);
+int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
+ int cpu_num);
+
+#endif /* __OTX_CPTVF_REQUEST_MANAGER_H */
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 9e9f48bb7f85..bd6309e57ab8 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -107,7 +107,7 @@ struct mtk_sha_ctx {
u8 id;
u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
- struct mtk_sha_hmac_ctx base[0];
+ struct mtk_sha_hmac_ctx base[];
};
struct mtk_sha_drv {
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 435ac1c83df9..d84530293036 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -20,6 +20,7 @@
#include <crypto/sha.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
@@ -611,49 +612,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
- const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
uint8_t *out_buf = sdcp->coh->sha_out_buf;
- uint8_t *src_buf;
-
struct scatterlist *src;
- unsigned int i, len, clen;
+ unsigned int i, len, clen, oft = 0;
int ret;
int fin = rctx->fini;
if (fin)
rctx->fini = 0;
- for_each_sg(req->src, src, nents, i) {
- src_buf = sg_virt(src);
- len = sg_dma_len(src);
-
- do {
- if (actx->fill + len > DCP_BUF_SZ)
- clen = DCP_BUF_SZ - actx->fill;
- else
- clen = len;
-
- memcpy(in_buf + actx->fill, src_buf, clen);
- len -= clen;
- src_buf += clen;
- actx->fill += clen;
+ src = req->src;
+ len = req->nbytes;
- /*
- * If we filled the buffer and still have some
- * more data, submit the buffer.
- */
- if (len && actx->fill == DCP_BUF_SZ) {
- ret = mxs_dcp_run_sha(req);
- if (ret)
- return ret;
- actx->fill = 0;
- rctx->init = 0;
- }
- } while (len);
+ while (len) {
+ if (actx->fill + len > DCP_BUF_SZ)
+ clen = DCP_BUF_SZ - actx->fill;
+ else
+ clen = len;
+
+ scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
+ 0);
+
+ len -= clen;
+ oft += clen;
+ actx->fill += clen;
+
+ /*
+ * If we filled the buffer and still have some
+ * more data, submit the buffer.
+ */
+ if (len && actx->fill == DCP_BUF_SZ) {
+ ret = mxs_dcp_run_sha(req);
+ if (ret)
+ return ret;
+ actx->fill = 0;
+ rctx->init = 0;
+ }
}
if (fin) {
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 91c54289124a..c6233173c612 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -37,7 +37,7 @@ struct max_sync_cop {
u32 fc;
u32 mode;
u32 triplets;
- struct msc_triplet trip[0];
+ struct msc_triplet trip[];
} __packed;
struct alg_props {
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 4f915a4ef5b0..e4072cd38585 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -159,7 +159,7 @@ struct omap_sham_reqctx {
int sg_len;
unsigned int total; /* total request */
- u8 buffer[0] OMAP_ALIGNED;
+ u8 buffer[] OMAP_ALIGNED;
};
struct omap_sham_hmac_ctx {
@@ -176,7 +176,7 @@ struct omap_sham_ctx {
/* fallback stuff */
struct crypto_shash *fallback;
- struct omap_sham_hmac_ctx base[0];
+ struct omap_sham_hmac_ctx base[];
};
#define OMAP_SHAM_QUEUE_LENGTH 10
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 833bb1d3a11b..e14d3dd291f0 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -97,7 +97,7 @@ struct qat_alg_cd {
struct icp_qat_hw_cipher_algo_blk cipher;
struct icp_qat_hw_auth_algo_blk hash;
} qat_enc_cd;
- struct qat_dec { /* Decrytp content desc */
+ struct qat_dec { /* Decrypt content desc */
struct icp_qat_hw_auth_algo_blk hash;
struct icp_qat_hw_cipher_algo_blk cipher;
} qat_dec_cd;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 3852d31ce0a4..fb504cee0305 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -250,8 +250,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
INIT_LIST_HEAD(&accel_dev->crypto_list);
- strlcpy(key, ADF_NUM_CY, sizeof(key));
- if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+ if (adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val))
return -EFAULT;
if (kstrtoul(val, 0, &num_inst))
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 629e7f34dc09..5006e74c40cd 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -15,8 +15,6 @@
#include "regs-v5.h"
#include "sha.h"
-#define QCE_SECTOR_SIZE 512
-
static inline u32 qce_read(struct qce_device *qce, u32 offset)
{
return readl(qce->base + offset);
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 282d4317470d..9f989cba0f1b 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -12,6 +12,9 @@
#include <crypto/hash.h>
#include <crypto/internal/skcipher.h>
+/* xts du size */
+#define QCE_SECTOR_SIZE 512
+
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
#define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 7da893dc00e7..46db5bf366b4 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -48,9 +48,10 @@ void qce_dma_release(struct qce_dma_data *dma)
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
- int max_ents)
+ unsigned int max_len)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
+ unsigned int new_len;
while (sg) {
if (!sg_page(sg))
@@ -61,13 +62,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg && max_ents) {
- sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
- new_sgl->offset);
+ while (new_sgl && sg && max_len) {
+ new_len = new_sgl->length > max_len ? max_len : new_sgl->length;
+ sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
- max_ents--;
+ max_len -= new_len;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index ed25a0d9829e..786402169360 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -43,6 +43,6 @@ void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
- int max_ents);
+ unsigned int max_len);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 4217b745f124..9412433f3b21 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/internal/des.h>
@@ -12,6 +13,13 @@
#include "cipher.h"
+static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
+module_param(aes_sw_max_len, uint, 0644);
+MODULE_PARM_DESC(aes_sw_max_len,
+ "Only use hardware for AES requests larger than this "
+ "[0=always use hardware; anything <16 breaks AES-GCM; default="
+ __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
+
static LIST_HEAD(skcipher_algs);
static void qce_skcipher_done(void *data)
@@ -97,13 +105,14 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
+ QCE_RESULT_BUF_SZ);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -165,15 +174,10 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
+ memcpy(ctx->enc_key, key, keylen);
break;
- default:
- goto fallback;
}
- ctx->enc_keylen = keylen;
- memcpy(ctx->enc_key, key, keylen);
- return 0;
-fallback:
ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
@@ -223,8 +227,14 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
- keylen != AES_KEYSIZE_256) {
+ /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
+ * is not a multiple of it; pass such requests to the fallback
+ */
+ if (IS_AES(rctx->flags) &&
+ (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+ req->cryptlen <= aes_sw_max_len) ||
+ (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
+ req->cryptlen % QCE_SECTOR_SIZE))) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index d66e20a2f54c..2a16800d2579 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -369,7 +369,7 @@ struct s5p_hash_reqctx {
bool error;
u32 bufcnt;
- u8 buffer[0];
+ u8 buffer[];
};
/**
diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore
index af4a7ce4738d..7aa71d83f739 100644
--- a/drivers/crypto/vmx/.gitignore
+++ b/drivers/crypto/vmx/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
aesp8-ppc.S
ghashp8-ppc.S
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
new file mode 100644
index 000000000000..534e32daf76a
--- /dev/null
+++ b/drivers/crypto/xilinx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
new file mode 100644
index 000000000000..09f7f468eef8
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP AES Driver.
+ * Copyright (c) 2020 Xilinx Inc.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/scatterwalk.h>
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_DMA_BIT_MASK 32U
+
+#define ZYNQMP_AES_KEY_SIZE AES_KEYSIZE_256
+#define ZYNQMP_AES_AUTH_SIZE 16U
+#define ZYNQMP_KEY_SRC_SEL_KEY_LEN 1U
+#define ZYNQMP_AES_BLK_SIZE 1U
+#define ZYNQMP_AES_MIN_INPUT_BLK_SIZE 4U
+#define ZYNQMP_AES_WORD_LEN 4U
+
+#define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01
+#define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13
+#define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300
+
+enum zynqmp_aead_op {
+ ZYNQMP_AES_DECRYPT = 0,
+ ZYNQMP_AES_ENCRYPT
+};
+
+enum zynqmp_aead_keysrc {
+ ZYNQMP_AES_KUP_KEY = 0,
+ ZYNQMP_AES_DEV_KEY,
+ ZYNQMP_AES_PUF_KEY
+};
+
+struct zynqmp_aead_drv_ctx {
+ union {
+ struct aead_alg aead;
+ } alg;
+ struct device *dev;
+ struct crypto_engine *engine;
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+struct zynqmp_aead_hw_req {
+ u64 src;
+ u64 iv;
+ u64 key;
+ u64 dst;
+ u64 size;
+ u64 op;
+ u64 keysrc;
+};
+
+struct zynqmp_aead_tfm_ctx {
+ struct crypto_engine_ctx engine_ctx;
+ struct device *dev;
+ u8 key[ZYNQMP_AES_KEY_SIZE];
+ u8 *iv;
+ u32 keylen;
+ u32 authsize;
+ enum zynqmp_aead_keysrc keysrc;
+ struct crypto_aead *fbk_cipher;
+};
+
+struct zynqmp_aead_req_ctx {
+ enum zynqmp_aead_op op;
+};
+
+static int zynqmp_aes_aead_cipher(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct device *dev = tfm_ctx->dev;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct zynqmp_aead_hw_req *hwreq;
+ dma_addr_t dma_addr_data, dma_addr_hw_req;
+ unsigned int data_size;
+ unsigned int status;
+ size_t dma_size;
+ char *kbuf;
+ int err;
+
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+
+ if (!drv_ctx->eemi_ops->aes)
+ return -ENOTSUPP;
+
+ if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
+ dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
+ + GCM_AES_IV_SIZE;
+ else
+ dma_size = req->cryptlen + GCM_AES_IV_SIZE;
+
+ kbuf = dma_alloc_coherent(dev, dma_size, &dma_addr_data, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ hwreq = dma_alloc_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
+ &dma_addr_hw_req, GFP_KERNEL);
+ if (!hwreq) {
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+ return -ENOMEM;
+ }
+
+ data_size = req->cryptlen;
+ scatterwalk_map_and_copy(kbuf, req->src, 0, req->cryptlen, 0);
+ memcpy(kbuf + data_size, req->iv, GCM_AES_IV_SIZE);
+
+ hwreq->src = dma_addr_data;
+ hwreq->dst = dma_addr_data;
+ hwreq->iv = hwreq->src + data_size;
+ hwreq->keysrc = tfm_ctx->keysrc;
+ hwreq->op = rq_ctx->op;
+
+ if (hwreq->op == ZYNQMP_AES_ENCRYPT)
+ hwreq->size = data_size;
+ else
+ hwreq->size = data_size - ZYNQMP_AES_AUTH_SIZE;
+
+ if (hwreq->keysrc == ZYNQMP_AES_KUP_KEY) {
+ memcpy(kbuf + data_size + GCM_AES_IV_SIZE,
+ tfm_ctx->key, ZYNQMP_AES_KEY_SIZE);
+
+ hwreq->key = hwreq->src + data_size + GCM_AES_IV_SIZE;
+ } else {
+ hwreq->key = 0;
+ }
+
+ drv_ctx->eemi_ops->aes(dma_addr_hw_req, &status);
+
+ if (status) {
+ switch (status) {
+ case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
+ dev_err(dev, "ERROR: Gcm Tag mismatch\n");
+ break;
+ case ZYNQMP_AES_WRONG_KEY_SRC_ERR:
+ dev_err(dev, "ERROR: Wrong KeySrc, enable secure mode\n");
+ break;
+ case ZYNQMP_AES_PUF_NOT_PROGRAMMED:
+ dev_err(dev, "ERROR: PUF is not registered\n");
+ break;
+ default:
+ dev_err(dev, "ERROR: Unknown error\n");
+ break;
+ }
+ err = -status;
+ } else {
+ if (hwreq->op == ZYNQMP_AES_ENCRYPT)
+ data_size = data_size + ZYNQMP_AES_AUTH_SIZE;
+ else
+ data_size = data_size - ZYNQMP_AES_AUTH_SIZE;
+
+ sg_copy_from_buffer(req->dst, sg_nents(req->dst),
+ kbuf, data_size);
+ err = 0;
+ }
+
+ if (kbuf) {
+ memzero_explicit(kbuf, dma_size);
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+ }
+ if (hwreq) {
+ memzero_explicit(hwreq, sizeof(struct zynqmp_aead_hw_req));
+ dma_free_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
+ hwreq, dma_addr_hw_req);
+ }
+ return err;
+}
+
+static int zynqmp_fallback_check(struct zynqmp_aead_tfm_ctx *tfm_ctx,
+ struct aead_request *req)
+{
+ int need_fallback = 0;
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ if (tfm_ctx->authsize != ZYNQMP_AES_AUTH_SIZE)
+ need_fallback = 1;
+
+ if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY &&
+ tfm_ctx->keylen != ZYNQMP_AES_KEY_SIZE) {
+ need_fallback = 1;
+ }
+ if (req->assoclen != 0 ||
+ req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE) {
+ need_fallback = 1;
+ }
+ if ((req->cryptlen % ZYNQMP_AES_WORD_LEN) != 0)
+ need_fallback = 1;
+
+ if (rq_ctx->op == ZYNQMP_AES_DECRYPT &&
+ req->cryptlen <= ZYNQMP_AES_AUTH_SIZE) {
+ need_fallback = 1;
+ }
+ return need_fallback;
+}
+
+static int zynqmp_handle_aes_req(struct crypto_engine *engine,
+ void *req)
+{
+ struct aead_request *areq =
+ container_of(req, struct aead_request, base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(areq);
+ struct aead_request *subreq = aead_request_ctx(req);
+ int need_fallback;
+ int err;
+
+ need_fallback = zynqmp_fallback_check(tfm_ctx, areq);
+
+ if (need_fallback) {
+ aead_request_set_tfm(subreq, tfm_ctx->fbk_cipher);
+
+ aead_request_set_callback(subreq, areq->base.flags,
+ NULL, NULL);
+ aead_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ aead_request_set_ad(subreq, areq->assoclen);
+ if (rq_ctx->op == ZYNQMP_AES_ENCRYPT)
+ err = crypto_aead_encrypt(subreq);
+ else
+ err = crypto_aead_decrypt(subreq);
+ } else {
+ err = zynqmp_aes_aead_cipher(areq);
+ }
+
+ crypto_finalize_aead_request(engine, areq, err);
+ return 0;
+}
+
+static int zynqmp_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ unsigned char keysrc;
+
+ if (keylen == ZYNQMP_KEY_SRC_SEL_KEY_LEN) {
+ keysrc = *key;
+ if (keysrc == ZYNQMP_AES_KUP_KEY ||
+ keysrc == ZYNQMP_AES_DEV_KEY ||
+ keysrc == ZYNQMP_AES_PUF_KEY) {
+ tfm_ctx->keysrc = (enum zynqmp_aead_keysrc)keysrc;
+ } else {
+ tfm_ctx->keylen = keylen;
+ }
+ } else {
+ tfm_ctx->keylen = keylen;
+ if (keylen == ZYNQMP_AES_KEY_SIZE) {
+ tfm_ctx->keysrc = ZYNQMP_AES_KUP_KEY;
+ memcpy(tfm_ctx->key, key, keylen);
+ }
+ }
+
+ tfm_ctx->fbk_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ tfm_ctx->fbk_cipher->base.crt_flags |= (aead->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen);
+}
+
+static int zynqmp_aes_aead_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+
+ tfm_ctx->authsize = authsize;
+ return crypto_aead_setauthsize(tfm_ctx->fbk_cipher, authsize);
+}
+
+static int zynqmp_aes_aead_encrypt(struct aead_request *req)
+{
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ rq_ctx->op = ZYNQMP_AES_ENCRYPT;
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+}
+
+static int zynqmp_aes_aead_decrypt(struct aead_request *req)
+{
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ rq_ctx->op = ZYNQMP_AES_DECRYPT;
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+}
+
+static int zynqmp_aes_aead_init(struct crypto_aead *aead)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ tfm_ctx->dev = drv_ctx->dev;
+
+ tfm_ctx->engine_ctx.op.do_one_request = zynqmp_handle_aes_req;
+ tfm_ctx->engine_ctx.op.prepare_request = NULL;
+ tfm_ctx->engine_ctx.op.unprepare_request = NULL;
+
+ tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.cra_name,
+ 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(tfm_ctx->fbk_cipher)) {
+ pr_err("%s() Error: failed to allocate fallback for %s\n",
+ __func__, drv_ctx->alg.aead.base.cra_name);
+ return PTR_ERR(tfm_ctx->fbk_cipher);
+ }
+
+ crypto_aead_set_reqsize(aead,
+ max(sizeof(struct zynqmp_aead_req_ctx),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(tfm_ctx->fbk_cipher)));
+ return 0;
+}
+
+static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct zynqmp_aead_tfm_ctx *tfm_ctx =
+ (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+
+ if (tfm_ctx->fbk_cipher) {
+ crypto_free_aead(tfm_ctx->fbk_cipher);
+ tfm_ctx->fbk_cipher = NULL;
+ }
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_aead_tfm_ctx));
+}
+
+static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
+ .alg.aead = {
+ .setkey = zynqmp_aes_aead_setkey,
+ .setauthsize = zynqmp_aes_aead_setauthsize,
+ .encrypt = zynqmp_aes_aead_encrypt,
+ .decrypt = zynqmp_aes_aead_decrypt,
+ .init = zynqmp_aes_aead_init,
+ .exit = zynqmp_aes_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = ZYNQMP_AES_AUTH_SIZE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "xilinx-zynqmp-aes-gcm",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = ZYNQMP_AES_BLK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_aead_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+static int zynqmp_aes_aead_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int err;
+
+ /* ZynqMP AES driver supports only one instance */
+ if (!aes_drv_ctx.dev)
+ aes_drv_ctx.dev = dev;
+ else
+ return -ENODEV;
+
+ aes_drv_ctx.eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(aes_drv_ctx.eemi_ops)) {
+ dev_err(dev, "Failed to get ZynqMP EEMI interface\n");
+ return PTR_ERR(aes_drv_ctx.eemi_ops);
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ if (err < 0) {
+ dev_err(dev, "No usable DMA configuration\n");
+ return err;
+ }
+
+ aes_drv_ctx.engine = crypto_engine_alloc_init(dev, 1);
+ if (!aes_drv_ctx.engine) {
+ dev_err(dev, "Cannot alloc AES engine\n");
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ err = crypto_engine_start(aes_drv_ctx.engine);
+ if (err) {
+ dev_err(dev, "Cannot start AES engine\n");
+ goto err_engine;
+ }
+
+ err = crypto_register_aead(&aes_drv_ctx.alg.aead);
+ if (err < 0) {
+ dev_err(dev, "Failed to register AEAD alg.\n");
+ goto err_aead;
+ }
+ return 0;
+
+err_aead:
+ crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+
+err_engine:
+ if (aes_drv_ctx.engine)
+ crypto_engine_exit(aes_drv_ctx.engine);
+
+ return err;
+}
+
+static int zynqmp_aes_aead_remove(struct platform_device *pdev)
+{
+ crypto_engine_exit(aes_drv_ctx.engine);
+ crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_aes_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-aes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
+
+static struct platform_driver zynqmp_aes_driver = {
+ .probe = zynqmp_aes_aead_probe,
+ .remove = zynqmp_aes_aead_remove,
+ .driver = {
+ .name = "zynqmp-aes",
+ .of_match_table = zynqmp_aes_dt_ids,
+ },
+};
+
+module_platform_driver(zynqmp_aes_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 0613bb7770f5..ef73b678419c 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -6,7 +6,7 @@ config SYNC_FILE
default n
select DMA_SHARED_BUFFER
---help---
- The Sync File Framework adds explicit syncronization via
+ The Sync File Framework adds explicit synchronization via
userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
@@ -39,6 +39,16 @@ config UDMABUF
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
+config DMABUF_MOVE_NOTIFY
+ bool "Move notify between drivers (EXPERIMENTAL)"
+ default n
+ help
+ Don''t pin buffers if the dynamic DMA-buf interface is available on both the
+ exporter as well as the importer. This fixes a security problem where
+ userspace is able to pin unrestricted amounts of memory through DMA-buf.
+ But marked experimental because we don''t jet have a consistent execution
+ context and memory management between drivers.
+
config DMABUF_SELFTESTS
tristate "Selftests for the dma-buf interfaces"
default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index c343c7c10b4c..ccc9eda1bc28 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -525,7 +525,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
}
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- exp_info->ops->dynamic_mapping))
+ (exp_info->ops->pin || exp_info->ops->unpin)))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
if (!try_module_get(exp_info->owner))
@@ -652,7 +655,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* calls attach() of dma_buf_ops to allow device-specific attach functionality
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
- * @dynamic_mapping: [in] calling convention for map/unmap
+ * @importer_ops [in] importer operations for the attachment
+ * @importer_priv [in] importer private pointer for the attachment
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@@ -668,7 +672,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
*/
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
- bool dynamic_mapping)
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv)
{
struct dma_buf_attachment *attach;
int ret;
@@ -676,13 +681,17 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
+ if (WARN_ON(importer_ops && !importer_ops->move_notify))
+ return ERR_PTR(-EINVAL);
+
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
return ERR_PTR(-ENOMEM);
attach->dev = dev;
attach->dmabuf = dmabuf;
- attach->dynamic_mapping = dynamic_mapping;
+ attach->importer_ops = importer_ops;
+ attach->importer_priv = importer_priv;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
@@ -701,15 +710,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
dma_buf_is_dynamic(dmabuf)) {
struct sg_table *sgt;
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_unlock;
+ }
sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
- goto err_unlock;
+ goto err_unpin;
}
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
@@ -723,6 +736,10 @@ err_attach:
kfree(attach);
return ERR_PTR(ret);
+err_unpin:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_buf_unpin(attach);
+
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
@@ -743,7 +760,7 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
{
- return dma_buf_dynamic_attach(dmabuf, dev, false);
+ return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
@@ -766,8 +783,10 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
+ dma_buf_unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
+ }
}
dma_resv_lock(dmabuf->resv, NULL);
@@ -781,6 +800,44 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
EXPORT_SYMBOL_GPL(dma_buf_detach);
/**
+ * dma_buf_pin - Lock down the DMA-buf
+ *
+ * @attach: [in] attachment which should be pinned
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+ int ret = 0;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->pin)
+ ret = dmabuf->ops->pin(attach);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_pin);
+
+/**
+ * dma_buf_unpin - Remove lock from DMA-buf
+ *
+ * @attach: [in] attachment which should be unpinned
+ */
+void dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->unpin)
+ dmabuf->ops->unpin(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_unpin);
+
+/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
* dma_buf_ops.
@@ -799,6 +856,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
+ int r;
might_sleep();
@@ -820,13 +878,23 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ r = dma_buf_pin(attach);
+ if (r)
+ return ERR_PTR(r);
+ }
+ }
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ dma_buf_unpin(attach);
+
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;
attach->dir = direction;
@@ -865,10 +933,34 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+
+ if (dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ dma_buf_unpin(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
/**
+ * dma_buf_move_notify - notify attachments that DMA-buf is moving
+ *
+ * @dmabuf: [in] buffer which is moving
+ *
+ * Informs all attachmenst that they need to destroy and recreated all their
+ * mappings.
+ */
+void dma_buf_move_notify(struct dma_buf *dmabuf)
+{
+ struct dma_buf_attachment *attach;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (attach->importer_ops)
+ attach->importer_ops->move_notify(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_move_notify);
+
+/**
* DOC: cpu access
*
* There are mutliple reasons for supporting CPU access to a dma buffer object:
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5142da401db3..092483644315 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -616,8 +616,8 @@ config TXX9_DMAC
integrated in chips such as the Toshiba TX4927/38/39.
config TEGRA20_APB_DMA
- bool "NVIDIA Tegra20 APB DMA support"
- depends on ARCH_TEGRA
+ tristate "NVIDIA Tegra20 APB DMA support"
+ depends on ARCH_TEGRA || COMPILE_TEST
select DMA_ENGINE
help
Support for the NVIDIA Tegra20 APB DMA controller driver. The
@@ -658,6 +658,17 @@ config UNIPHIER_MDMAC
UniPhier platform. This DMA controller is used as the external
DMA engine of the SD/eMMC controllers of the LD4, Pro4, sLD8 SoCs.
+config UNIPHIER_XDMAC
+ tristate "UniPhier XDMAC support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the XDMAC (external DMA controller) on the
+ UniPhier platform. This DMA controller can transfer data from
+ memory to memory, memory to peripheral and peripheral to memory.
+
config XGENE_DMA
tristate "APM X-Gene DMA support"
depends on ARCH_XGENE || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 1d908394fbea..e60f81331d4c 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
+obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 672c73b4a2d4..73a20780744b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -146,17 +146,8 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
"scanned %u descriptors on freelist\n", i);
/* no more descriptor available in initial pool: create one more */
- if (!ret) {
- ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
- if (ret) {
- spin_lock_irqsave(&atchan->lock, flags);
- atchan->descs_allocated++;
- spin_unlock_irqrestore(&atchan->lock, flags);
- } else {
- dev_err(chan2dev(&atchan->chan_common),
- "not enough descriptors available\n");
- }
- }
+ if (!ret)
+ ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
return ret;
}
@@ -435,17 +426,19 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
* atc_chain_complete - finish work for one transaction chain
* @atchan: channel we work on
* @desc: descriptor at the head of the chain we want do complete
- *
- * Called with atchan->lock held and bh disabled */
+ */
static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
+ unsigned long flags;
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
+ spin_lock_irqsave(&atchan->lock, flags);
+
/* mark the descriptor as complete for non cyclic cases only */
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
@@ -462,16 +455,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
dma_descriptor_unmap(txd);
/* for cyclic transfers,
* no need to replay callback function while stopping */
- if (!atc_chan_is_cyclic(atchan)) {
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
+ if (!atc_chan_is_cyclic(atchan))
dmaengine_desc_get_callback_invoke(txd, NULL);
- }
dma_run_dependencies(txd);
}
@@ -489,9 +479,12 @@ static void atc_complete_all(struct at_dma_chan *atchan)
{
struct at_desc *desc, *_desc;
LIST_HEAD(list);
+ unsigned long flags;
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
+ spin_lock_irqsave(&atchan->lock, flags);
+
/*
* Submit queued descriptors ASAP, i.e. before we go through
* the completed ones.
@@ -503,6 +496,8 @@ static void atc_complete_all(struct at_dma_chan *atchan)
/* empty queue list by moving descriptors (if any) to active_list */
list_splice_init(&atchan->queue, &atchan->active_list);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
}
@@ -510,38 +505,44 @@ static void atc_complete_all(struct at_dma_chan *atchan)
/**
* atc_advance_work - at the end of a transaction, move forward
* @atchan: channel where the transaction ended
- *
- * Called with atchan->lock held and bh disabled
*/
static void atc_advance_work(struct at_dma_chan *atchan)
{
+ unsigned long flags;
+ int ret;
+
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
- if (atc_chan_is_enabled(atchan))
+ spin_lock_irqsave(&atchan->lock, flags);
+ ret = atc_chan_is_enabled(atchan);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+ if (ret)
return;
if (list_empty(&atchan->active_list) ||
- list_is_singular(&atchan->active_list)) {
- atc_complete_all(atchan);
- } else {
- atc_chain_complete(atchan, atc_first_active(atchan));
- /* advance work */
- atc_dostart(atchan, atc_first_active(atchan));
- }
+ list_is_singular(&atchan->active_list))
+ return atc_complete_all(atchan);
+
+ atc_chain_complete(atchan, atc_first_active(atchan));
+
+ /* advance work */
+ spin_lock_irqsave(&atchan->lock, flags);
+ atc_dostart(atchan, atc_first_active(atchan));
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
* atc_handle_error - handle errors reported by DMA controller
* @atchan: channel where error occurs
- *
- * Called with atchan->lock held and bh disabled
*/
static void atc_handle_error(struct at_dma_chan *atchan)
{
struct at_desc *bad_desc;
struct at_desc *child;
+ unsigned long flags;
+ spin_lock_irqsave(&atchan->lock, flags);
/*
* The descriptor currently at the head of the active list is
* broked. Since we don't have any way to report errors, we'll
@@ -573,6 +574,8 @@ static void atc_handle_error(struct at_dma_chan *atchan)
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
atc_dump_lli(atchan, &child->lli);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
/* Pretend the descriptor completed successfully */
atc_chain_complete(atchan, bad_desc);
}
@@ -580,8 +583,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
/**
* atc_handle_cyclic - at the end of a period, run callback function
* @atchan: channel used for cyclic operations
- *
- * Called with atchan->lock held and bh disabled
*/
static void atc_handle_cyclic(struct at_dma_chan *atchan)
{
@@ -600,17 +601,14 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
- unsigned long flags;
- spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
- atc_handle_error(atchan);
- else if (atc_chan_is_cyclic(atchan))
- atc_handle_cyclic(atchan);
- else
- atc_advance_work(atchan);
+ return atc_handle_error(atchan);
- spin_unlock_irqrestore(&atchan->lock, flags);
+ if (atc_chan_is_cyclic(atchan))
+ return atc_handle_cyclic(atchan);
+
+ atc_advance_work(atchan);
}
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -940,7 +938,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
return NULL;
}
- vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
+ vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
if (!vaddr) {
dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
__func__);
@@ -998,7 +996,7 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
return NULL;
}
- vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
+ vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
if (!vaddr) {
dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
__func__);
@@ -1446,6 +1444,8 @@ static int atc_terminate_all(struct dma_chan *chan)
list_splice_init(&atchan->queue, &list);
list_splice_init(&atchan->active_list, &list);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
@@ -1454,8 +1454,6 @@ static int atc_terminate_all(struct dma_chan *chan)
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
- spin_unlock_irqrestore(&atchan->lock, flags);
-
return 0;
}
@@ -1516,7 +1514,6 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
@@ -1524,15 +1521,12 @@ static void atc_issue_pending(struct dma_chan *chan)
if (atc_chan_is_cyclic(atchan))
return;
- spin_lock_irqsave(&atchan->lock, flags);
atc_advance_work(atchan);
- spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
- * @client: current client requesting the channel be ready for requests
*
* return - the number of allocated descriptors
*/
@@ -1542,10 +1536,8 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc;
struct at_dma_slave *atslave;
- unsigned long flags;
int i;
u32 cfg;
- LIST_HEAD(tmp_list);
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -1555,6 +1547,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
return -EIO;
}
+ if (!list_empty(&atchan->free_list)) {
+ dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
+ return -EIO;
+ }
+
cfg = ATC_DEFAULT_CFG;
atslave = chan->private;
@@ -1570,11 +1567,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
cfg = atslave->cfg;
}
- /* have we already been set up?
- * reconfigure channel but no need to reallocate descriptors */
- if (!list_empty(&atchan->free_list))
- return atchan->descs_allocated;
-
/* Allocate initial pool of descriptors */
for (i = 0; i < init_nr_desc_per_channel; i++) {
desc = atc_alloc_descriptor(chan, GFP_KERNEL);
@@ -1583,23 +1575,18 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
"Only %d initial descriptors\n", i);
break;
}
- list_add_tail(&desc->desc_node, &tmp_list);
+ list_add_tail(&desc->desc_node, &atchan->free_list);
}
- spin_lock_irqsave(&atchan->lock, flags);
- atchan->descs_allocated = i;
- list_splice(&tmp_list, &atchan->free_list);
dma_cookie_init(chan);
- spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */
channel_writel(atchan, CFG, cfg);
dev_dbg(chan2dev(chan),
- "alloc_chan_resources: allocated %d descriptors\n",
- atchan->descs_allocated);
+ "alloc_chan_resources: allocated %d descriptors\n", i);
- return atchan->descs_allocated;
+ return i;
}
/**
@@ -1613,9 +1600,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
struct at_desc *desc, *_desc;
LIST_HEAD(list);
- dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
- atchan->descs_allocated);
-
/* ASSERT: channel is idle */
BUG_ON(!list_empty(&atchan->active_list));
BUG_ON(!list_empty(&atchan->queue));
@@ -1628,7 +1612,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
}
list_splice_init(&atchan->free_list, &list);
- atchan->descs_allocated = 0;
atchan->status = 0;
/*
@@ -1671,7 +1654,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
+ atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
if (!atslave)
return NULL;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index fe8a5853ec49..397692e937b3 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -243,7 +243,6 @@ enum atc_status {
* @active_list: list of descriptors dmaengine is being running on
* @queue: list of descriptors ready to be submitted to engine
* @free_list: list of descriptors usable by the channel
- * @descs_allocated: records the actual size of the descriptor pool
*/
struct at_dma_chan {
struct dma_chan chan_common;
@@ -264,7 +263,6 @@ struct at_dma_chan {
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
- unsigned int descs_allocated;
};
#define channel_readl(atchan, name) \
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index f71c9f77d405..bb0eaf38b594 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1543,9 +1543,6 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
{
struct at_xdmac_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&atchan->lock, flags);
/*
* If channel is enabled, do nothing, advance_work will be triggered
@@ -1559,8 +1556,6 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
if (!desc->active_xfer)
at_xdmac_start_xfer(atchan, desc);
}
-
- spin_unlock_irqrestore(&atchan->lock, flags);
}
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1596,7 +1591,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
- spin_lock_bh(&atchan->lock);
+ spin_lock_irq(&atchan->lock);
/* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
@@ -1607,7 +1602,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
struct at_xdmac_desc,
xfer_node);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irq(&atchan->lock);
/* Print bad descriptor's details if needed */
dev_dbg(chan2dev(&atchan->chan),
@@ -1640,31 +1635,31 @@ static void at_xdmac_tasklet(unsigned long data)
if (atchan->irq_status & error_mask)
at_xdmac_handle_error(atchan);
- spin_lock(&atchan->lock);
+ spin_lock_irq(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
if (!desc->active_xfer) {
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
- spin_unlock(&atchan->lock);
+ spin_unlock_irq(&atchan->lock);
return;
}
txd = &desc->tx_dma_desc;
at_xdmac_remove_xfer(atchan, desc);
- spin_unlock(&atchan->lock);
+ spin_unlock_irq(&atchan->lock);
- if (!at_xdmac_chan_is_cyclic(atchan)) {
- dma_cookie_complete(txd);
- if (txd->flags & DMA_PREP_INTERRUPT)
- dmaengine_desc_get_callback_invoke(txd, NULL);
- }
+ dma_cookie_complete(txd);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd);
+ spin_lock_irq(&atchan->lock);
at_xdmac_advance_work(atchan);
+ spin_unlock_irq(&atchan->lock);
}
}
@@ -1725,11 +1720,15 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
static void at_xdmac_issue_pending(struct dma_chan *chan)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ unsigned long flags;
dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
- if (!at_xdmac_chan_is_cyclic(atchan))
+ if (!at_xdmac_chan_is_cyclic(atchan)) {
+ spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_advance_work(atchan);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+ }
return;
}
@@ -1822,26 +1821,21 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac_desc *desc;
int i;
- unsigned long flags;
-
- spin_lock_irqsave(&atchan->lock, flags);
if (at_xdmac_chan_is_enabled(atchan)) {
dev_err(chan2dev(chan),
"can't allocate channel resources (channel enabled)\n");
- i = -EIO;
- goto spin_unlock;
+ return -EIO;
}
if (!list_empty(&atchan->free_descs_list)) {
dev_err(chan2dev(chan),
"can't allocate channel resources (channel not free from a previous use)\n");
- i = -EIO;
- goto spin_unlock;
+ return -EIO;
}
for (i = 0; i < init_nr_desc_per_channel; i++) {
- desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
+ desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
if (!desc) {
dev_warn(chan2dev(chan),
"only %d descriptors have been allocated\n", i);
@@ -1854,8 +1848,6 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
-spin_unlock:
- spin_unlock_irqrestore(&atchan->lock, flags);
return i;
}
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 275e90fa829d..64239da02e74 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -120,7 +120,7 @@ struct sba_request {
struct brcm_message msg;
struct dma_async_tx_descriptor tx;
/* SBA commands */
- struct brcm_sba_command cmds[0];
+ struct brcm_sba_command cmds[];
};
enum sba_version {
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 17909fd1820f..4830ba658ce1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -58,6 +58,87 @@ static DEFINE_IDA(dma_ida);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
+/* --- debugfs implementation --- */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static struct dentry *rootdir;
+
+static void dmaengine_debug_register(struct dma_device *dma_dev)
+{
+ dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
+ rootdir);
+ if (IS_ERR(dma_dev->dbg_dev_root))
+ dma_dev->dbg_dev_root = NULL;
+}
+
+static void dmaengine_debug_unregister(struct dma_device *dma_dev)
+{
+ debugfs_remove_recursive(dma_dev->dbg_dev_root);
+ dma_dev->dbg_dev_root = NULL;
+}
+
+static void dmaengine_dbg_summary_show(struct seq_file *s,
+ struct dma_device *dma_dev)
+{
+ struct dma_chan *chan;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count) {
+ seq_printf(s, " %-13s| %s", dma_chan_name(chan),
+ chan->dbg_client_name ?: "in-use");
+
+ if (chan->router)
+ seq_printf(s, " (via router: %s)\n",
+ dev_name(chan->router->dev));
+ else
+ seq_puts(s, "\n");
+ }
+ }
+}
+
+static int dmaengine_summary_show(struct seq_file *s, void *data)
+{
+ struct dma_device *dma_dev = NULL;
+
+ mutex_lock(&dma_list_mutex);
+ list_for_each_entry(dma_dev, &dma_device_list, global_node) {
+ seq_printf(s, "dma%d (%s): number of channels: %u\n",
+ dma_dev->dev_id, dev_name(dma_dev->dev),
+ dma_dev->chancnt);
+
+ if (dma_dev->dbg_summary_show)
+ dma_dev->dbg_summary_show(s, dma_dev);
+ else
+ dmaengine_dbg_summary_show(s, dma_dev);
+
+ if (!list_is_last(&dma_dev->global_node, &dma_device_list))
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&dma_list_mutex);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
+
+static void __init dmaengine_debugfs_init(void)
+{
+ rootdir = debugfs_create_dir("dmaengine", NULL);
+
+ /* /sys/kernel/debug/dmaengine/summary */
+ debugfs_create_file("summary", 0444, rootdir, NULL,
+ &dmaengine_summary_fops);
+}
+#else
+static inline void dmaengine_debugfs_init(void) { }
+static inline int dmaengine_debug_register(struct dma_device *dma_dev)
+{
+ return 0;
+}
+
+static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
+#endif /* DEBUG_FS */
+
/* --- sysfs implementation --- */
#define DMA_SLAVE_NAME "slave"
@@ -760,6 +841,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
return chan ? chan : ERR_PTR(-EPROBE_DEFER);
found:
+#ifdef CONFIG_DEBUG_FS
+ chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
+ name);
+#endif
+
chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
if (!chan->name)
return chan;
@@ -837,6 +923,11 @@ void dma_release_channel(struct dma_chan *chan)
chan->name = NULL;
chan->slave = NULL;
}
+
+#ifdef CONFIG_DEBUG_FS
+ kfree(chan->dbg_client_name);
+ chan->dbg_client_name = NULL;
+#endif
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -1196,6 +1287,8 @@ int dma_async_device_register(struct dma_device *device)
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
+ dmaengine_debug_register(device);
+
return 0;
err_out:
@@ -1229,6 +1322,8 @@ void dma_async_device_unregister(struct dma_device *device)
{
struct dma_chan *chan, *n;
+ dmaengine_debug_unregister(device);
+
list_for_each_entry_safe(chan, n, &device->channels, device_node)
__dma_async_device_channel_unregister(device, chan);
@@ -1559,6 +1654,11 @@ static int __init dma_bus_init(void)
if (err)
return err;
- return class_register(&dma_devclass);
+
+ err = class_register(&dma_devclass);
+ if (!err)
+ dmaengine_debugfs_init();
+
+ return err;
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index e8a320c9e57c..1bfbd64b1371 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -182,4 +182,20 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static inline struct dentry *
+dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
+ return dma_dev->dbg_dev_root;
+}
+#else
+struct dentry;
+static inline struct dentry *
+dmaengine_get_debugfs_root(struct dma_device *dma_dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
#endif
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index c70a7965f140..4ec909e0b810 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -790,6 +790,20 @@ static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
return 0;
}
+static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
+{
+ struct dpaa2_qdma_priv *priv;
+ struct device *dev;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ dpaa2_dpdmai_dpio_unbind(priv);
+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+ dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
@@ -805,6 +819,7 @@ static struct fsl_mc_driver dpaa2_qdma_driver = {
},
.probe = dpaa2_qdma_probe,
.remove = dpaa2_qdma_remove,
+ .shutdown = dpaa2_qdma_shutdown,
.match_id_table = dpaa2_qdma_id_table
};
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index f8d22115154a..878662aaa1c2 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -160,6 +160,27 @@ int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
}
/**
+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_destroy);
+
+/**
* dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
index 6d785093da8e..b13b9bf0c003 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
@@ -18,6 +18,7 @@
#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E)
#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E)
+#define DPDMAI_CMDID_DESTROY DPDMAI_CMDID_FORMAT(0x900)
#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002)
#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003)
@@ -160,6 +161,7 @@ struct dpdmai_rx_queue_attr {
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token);
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
const struct dpdmai_cfg *cfg, u16 *token);
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 989b7a25ca61..ff49847e37a8 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -74,12 +74,10 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_device *idxd;
struct idxd_wq *wq;
struct device *dev;
- struct idxd_cdev *idxd_cdev;
wq = inode_wq(inode);
idxd = wq->idxd;
dev = &idxd->pdev->dev;
- idxd_cdev = &wq->idxd_cdev;
dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
@@ -139,6 +137,8 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
dev_dbg(&pdev->dev, "%s called\n", __func__);
rc = check_vma(wq, vma, __func__);
+ if (rc < 0)
+ return rc;
vma->vm_flags |= VM_DONTCOPY;
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ada69e722f84..f6f49f0f6fae 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -584,11 +584,11 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
struct idxd_group *group = &idxd->groups[i];
if (group->tc_a == -1)
- group->grpcfg.flags.tc_a = 0;
+ group->tc_a = group->grpcfg.flags.tc_a = 0;
else
group->grpcfg.flags.tc_a = group->tc_a;
if (group->tc_b == -1)
- group->grpcfg.flags.tc_b = 1;
+ group->tc_b = group->grpcfg.flags.tc_b = 1;
else
group->grpcfg.flags.tc_b = group->tc_b;
group->grpcfg.flags.use_token_limit = group->use_token_limit;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 6ca6e520a2fa..3999827970ab 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -419,7 +419,7 @@ static ssize_t engine_group_id_store(struct device *dev,
struct idxd_device *idxd = engine->idxd;
long id;
int rc;
- struct idxd_group *prevg, *group;
+ struct idxd_group *prevg;
rc = kstrtol(buf, 10, &id);
if (rc < 0)
@@ -439,7 +439,6 @@ static ssize_t engine_group_id_store(struct device *dev,
return count;
}
- group = &idxd->groups[id];
prevg = engine->group;
if (prevg)
@@ -513,9 +512,6 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->token_limit == 0)
- return -EPERM;
-
if (val > idxd->max_tokens)
return -EINVAL;
@@ -561,8 +557,6 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->token_limit == 0)
- return -EPERM;
if (val < 4 * group->num_engines ||
val > group->tokens_reserved + idxd->nr_tokens)
return -EINVAL;
@@ -1180,6 +1174,16 @@ static ssize_t op_cap_show(struct device *dev,
}
static DEVICE_ATTR_RO(op_cap);
+static ssize_t gen_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
+}
+static DEVICE_ATTR_RO(gen_cap);
+
static ssize_t configurable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1317,6 +1321,7 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_max_batch_size.attr,
&dev_attr_max_transfer_size.attr,
&dev_attr_op_cap.attr,
+ &dev_attr_gen_cap.attr,
&dev_attr_configurable.attr,
&dev_attr_clients.attr,
&dev_attr_state.attr,
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index be61c32a876f..0be385587c4c 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -102,7 +102,7 @@ struct ioat_dca_priv {
int max_requesters;
int requester_count;
u8 tag_map[IOAT_TAG_MAP_LEN];
- struct ioat_dca_slot req_slots[0];
+ struct ioat_dca_slot req_slots[];
};
static int ioat_dca_dev_managed(struct dca_provider *dca,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fbabd2e88a18..4db000d5f01c 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4303,7 +4303,7 @@ static ssize_t devices_show(struct device_driver *dev, char *buf)
for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
if (ppc440spe_adma_devices[i] == -1)
continue;
- size += snprintf(buf + size, PAGE_SIZE - size,
+ size += scnprintf(buf + size, PAGE_SIZE - size,
"PPC440SP(E)-ADMA.%d: %s\n", i,
ppc_adma_errors[ppc440spe_adma_devices[i]]);
}
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index afb68055ed1b..0fa7f14a65a1 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -78,7 +78,7 @@ struct sa11x0_dma_desc {
bool cyclic;
unsigned sglen;
- struct sa11x0_dma_sg sg[0];
+ struct sa11x0_dma_sg sg[];
};
struct sa11x0_dma_phy;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index f06016d38a05..59b36ab5d684 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1219,7 +1219,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
sg_len = buf_len / period_len;
if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
dev_err(chan->device->dev,
- "chan%u: sg length %d exceds limit %d",
+ "chan%u: sg length %d exceeds limit %d",
rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
return NULL;
}
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index c51de498b5b4..2deeaab078a4 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -709,7 +709,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
BUG_ON(!schan->desc_num);
if (sg_len > SHDMA_MAX_SG_LEN) {
- dev_err(schan->dev, "sg length %d exceds limit %d",
+ dev_err(schan->dev, "sg length %d exceeds limit %d",
sg_len, SHDMA_MAX_SG_LEN);
return NULL;
}
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 9a31a315dbef..0ef5ca81ba4d 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -212,7 +212,7 @@ struct sprd_dma_dev {
struct clk *ashb_clk;
int irq;
u32 total_chns;
- struct sprd_dma_chn channels[0];
+ struct sprd_dma_chn channels[];
};
static void sprd_dma_free_desc(struct virt_dma_desc *vd);
@@ -486,6 +486,28 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
return 0;
}
+static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 reg, val, req_id;
+
+ if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
+ return;
+
+ /* The DMA request id always starts from 0. */
+ req_id = schan->dev_id - 1;
+
+ if (req_id < 32) {
+ reg = SPRD_DMA_GLB_REQ_PEND0_EN;
+ val = BIT(req_id);
+ } else {
+ reg = SPRD_DMA_GLB_REQ_PEND1_EN;
+ val = BIT(req_id - 32);
+ }
+
+ sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
+}
+
static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
struct sprd_dma_desc *sdesc)
{
@@ -532,6 +554,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
*/
sprd_dma_set_chn_config(schan, schan->cur_desc);
sprd_dma_set_uid(schan);
+ sprd_dma_set_pending(schan, true);
sprd_dma_enable_chn(schan);
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
@@ -543,6 +566,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
static void sprd_dma_stop(struct sprd_dma_chn *schan)
{
sprd_dma_stop_and_disable(schan);
+ sprd_dma_set_pending(schan, false);
sprd_dma_unset_uid(schan);
sprd_dma_clear_int(schan);
schan->cur_desc = NULL;
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 5989b0893521..0ddbaa4b4f0b 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -207,7 +208,6 @@ struct stm32_dma_device {
struct dma_device ddev;
void __iomem *base;
struct clk *clk;
- struct reset_control *rst;
bool mem2mem;
struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
};
@@ -422,29 +422,19 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
- unsigned long timeout = jiffies + msecs_to_jiffies(5000);
- u32 dma_scr, id;
+ u32 dma_scr, id, reg;
id = chan->id;
- dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ reg = STM32_DMA_SCR(id);
+ dma_scr = stm32_dma_read(dmadev, reg);
if (dma_scr & STM32_DMA_SCR_EN) {
dma_scr &= ~STM32_DMA_SCR_EN;
- stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr);
-
- do {
- dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
- dma_scr &= STM32_DMA_SCR_EN;
- if (!dma_scr)
- break;
-
- if (time_after_eq(jiffies, timeout)) {
- dev_err(chan2dev(chan), "%s: timeout!\n",
- __func__);
- return -EBUSY;
- }
- cond_resched();
- } while (1);
+ stm32_dma_write(dmadev, reg, dma_scr);
+
+ return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
+ dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
+ 10, 1000000);
}
return 0;
@@ -488,8 +478,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (chan->busy) {
- stm32_dma_stop(chan);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vdesc);
+ if (chan->busy)
+ stm32_dma_stop(chan);
chan->desc = NULL;
}
@@ -545,6 +537,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
if (!vdesc)
return;
+ list_del(&vdesc->node);
+
chan->desc = to_stm32_dma_desc(vdesc);
chan->next_sg = 0;
}
@@ -555,6 +549,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
sg_req = &chan->desc->sg_req[chan->next_sg];
reg = &sg_req->chan_reg;
+ reg->dma_scr &= ~STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
@@ -622,7 +617,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
} else {
chan->busy = false;
if (chan->next_sg == chan->desc->num_sgs) {
- list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL;
}
@@ -1275,6 +1269,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
struct dma_device *dd;
const struct of_device_id *match;
struct resource *res;
+ struct reset_control *rst;
int i, ret;
match = of_match_device(stm32_dma_of_match, &pdev->dev);
@@ -1296,8 +1291,10 @@ static int stm32_dma_probe(struct platform_device *pdev)
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dmadev->clk)) {
- dev_err(&pdev->dev, "Error: Missing controller clock\n");
- return PTR_ERR(dmadev->clk);
+ ret = PTR_ERR(dmadev->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get clock\n");
+ return ret;
}
ret = clk_prepare_enable(dmadev->clk);
@@ -1309,13 +1306,19 @@ static int stm32_dma_probe(struct platform_device *pdev)
dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
"st,mem2mem");
- dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(dmadev->rst)) {
- reset_control_assert(dmadev->rst);
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret == -EPROBE_DEFER)
+ goto clk_free;
+ } else {
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(dmadev->rst);
+ reset_control_deassert(rst);
}
+ dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
+
dma_cap_set(DMA_SLAVE, dd->cap_mask);
dma_cap_set(DMA_PRIVATE, dd->cap_mask);
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
@@ -1336,7 +1339,9 @@ static int stm32_dma_probe(struct platform_device *pdev)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
dd->max_burst = STM32_DMA_MAX_BURST;
+ dd->descriptor_reuse = true;
dd->dev = &pdev->dev;
INIT_LIST_HEAD(&dd->channels);
@@ -1427,7 +1432,39 @@ static int stm32_dma_runtime_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dma_suspend(struct device *dev)
+{
+ struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+ int id, ret, scr;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
+ scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ if (scr & STM32_DMA_SCR_EN) {
+ dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
+ return -EBUSY;
+ }
+ }
+
+ pm_runtime_put_sync(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int stm32_dma_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
static const struct dev_pm_ops stm32_dma_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
stm32_dma_runtime_resume, NULL)
};
@@ -1438,10 +1475,11 @@ static struct platform_driver stm32_dma_driver = {
.of_match_table = stm32_dma_of_match,
.pm = &stm32_dma_pm_ops,
},
+ .probe = stm32_dma_probe,
};
static int __init stm32_dma_init(void)
{
- return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe);
+ return platform_driver_register(&stm32_dma_driver);
}
subsys_initcall(stm32_dma_init);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index 3c89bd39e096..12f7637e13a1 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -35,12 +35,14 @@ struct stm32_dmamux {
struct stm32_dmamux_data {
struct dma_router dmarouter;
struct clk *clk;
- struct reset_control *rst;
void __iomem *iomem;
u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
spinlock_t lock; /* Protects register access */
unsigned long *dma_inuse; /* Used DMA channel */
+ u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
+ * in suspend
+ */
u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
* [0] holds number of DMA Masters.
* To be kept at very end end of this structure
@@ -179,6 +181,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
struct stm32_dmamux_data *stm32_dmamux;
struct resource *res;
void __iomem *iomem;
+ struct reset_control *rst;
int i, count, ret;
u32 dma_req;
@@ -251,16 +254,26 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(stm32_dmamux->clk)) {
ret = PTR_ERR(stm32_dmamux->clk);
- if (ret == -EPROBE_DEFER)
- dev_info(&pdev->dev, "Missing controller clock\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Missing clock controller\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(stm32_dmamux->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
return ret;
}
- stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(stm32_dmamux->rst)) {
- reset_control_assert(stm32_dmamux->rst);
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk;
+ } else {
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(stm32_dmamux->rst);
+ reset_control_deassert(rst);
}
stm32_dmamux->iomem = iomem;
@@ -271,14 +284,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- if (!IS_ERR(stm32_dmamux->clk)) {
- ret = clk_prepare_enable(stm32_dmamux->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
- return ret;
- }
- }
-
pm_runtime_get_noresume(&pdev->dev);
/* Reset the dmamux */
@@ -287,8 +292,17 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- return of_dma_router_register(node, stm32_dmamux_route_allocate,
+ ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter);
+ if (ret)
+ goto err_clk;
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(stm32_dmamux->clk);
+
+ return ret;
}
#ifdef CONFIG_PM
@@ -318,7 +332,54 @@ static int stm32_dmamux_runtime_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dmamux_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+ int i, ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < stm32_dmamux->dma_requests; i++)
+ stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
+ STM32_DMAMUX_CCR(i));
+
+ pm_runtime_put_sync(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int stm32_dmamux_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+ int i, ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < stm32_dmamux->dma_requests; i++)
+ stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
+ stm32_dmamux->ccr[i]);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops stm32_dmamux_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
stm32_dmamux_runtime_resume, NULL)
};
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 5838311cf990..5469563703d1 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -273,7 +273,6 @@ struct stm32_mdma_device {
void __iomem *base;
struct clk *clk;
int irq;
- struct reset_control *rst;
u32 nr_channels;
u32 nr_requests;
u32 nr_ahb_addr_masks;
@@ -1127,6 +1126,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
return;
}
+ list_del(&vdesc->node);
+
chan->desc = to_stm32_mdma_desc(vdesc);
hwdesc = chan->desc->node[0].hwdesc;
chan->curr_hwdesc = 0;
@@ -1242,8 +1243,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
LIST_HEAD(head);
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (chan->busy) {
- stm32_mdma_stop(chan);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vdesc);
+ if (chan->busy)
+ stm32_mdma_stop(chan);
chan->desc = NULL;
}
vchan_get_all_descriptors(&chan->vchan, &head);
@@ -1331,7 +1334,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
{
- list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL;
chan->busy = false;
@@ -1532,6 +1534,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
struct dma_device *dd;
struct device_node *of_node;
struct resource *res;
+ struct reset_control *rst;
u32 nr_channels, nr_requests;
int i, count, ret;
@@ -1579,8 +1582,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dmadev->clk)) {
ret = PTR_ERR(dmadev->clk);
- if (ret == -EPROBE_DEFER)
- dev_info(&pdev->dev, "Missing controller clock\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Missing clock controller\n");
return ret;
}
@@ -1590,11 +1593,15 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret;
}
- dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (!IS_ERR(dmadev->rst)) {
- reset_control_assert(dmadev->rst);
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk;
+ } else {
+ reset_control_assert(rst);
udelay(2);
- reset_control_deassert(dmadev->rst);
+ reset_control_deassert(rst);
}
dd = &dmadev->ddev;
@@ -1614,6 +1621,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dd->device_resume = stm32_mdma_resume;
dd->device_terminate_all = stm32_mdma_terminate_all;
dd->device_synchronize = stm32_mdma_synchronize;
+ dd->descriptor_reuse = true;
+
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
@@ -1637,25 +1646,27 @@ static int stm32_mdma_probe(struct platform_device *pdev)
}
dmadev->irq = platform_get_irq(pdev, 0);
- if (dmadev->irq < 0)
- return dmadev->irq;
+ if (dmadev->irq < 0) {
+ ret = dmadev->irq;
+ goto err_clk;
+ }
ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
0, dev_name(&pdev->dev), dmadev);
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ\n");
- return ret;
+ goto err_clk;
}
ret = dmaenginem_async_device_register(dd);
if (ret)
- return ret;
+ goto err_clk;
ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
if (ret < 0) {
dev_err(&pdev->dev,
"STM32 MDMA DMA OF registration failed %d\n", ret);
- goto err_unregister;
+ goto err_clk;
}
platform_set_drvdata(pdev, dmadev);
@@ -1668,7 +1679,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return 0;
-err_unregister:
+err_clk:
+ clk_disable_unprepare(dmadev->clk);
+
return ret;
}
@@ -1697,7 +1710,40 @@ static int stm32_mdma_runtime_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int stm32_mdma_pm_suspend(struct device *dev)
+{
+ struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+ u32 ccr, id;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ for (id = 0; id < dmadev->nr_channels; id++) {
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
+ if (ccr & STM32_MDMA_CCR_EN) {
+ dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
+ return -EBUSY;
+ }
+ }
+
+ pm_runtime_put_sync(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int stm32_mdma_pm_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
static const struct dev_pm_ops stm32_mdma_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
stm32_mdma_runtime_resume, NULL)
};
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index bbc2bda3b902..e7ff09a5031d 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -697,11 +697,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dest = sconfig->dst_addr;
endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
- SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
} else {
src = sconfig->src_addr;
dest = buf;
endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
}
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 4a750e29bfb5..265303a396ca 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -24,6 +24,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/wait.h>
#include "dmaengine.h"
@@ -59,7 +60,7 @@
#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
#define TEGRA_APBDMA_CHAN_CSRE 0x00C
-#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
+#define TEGRA_APBDMA_CHAN_CSRE_PAUSE BIT(31)
/* AHB memory address */
#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
@@ -120,21 +121,21 @@ struct tegra_dma;
* @support_separate_wcount_reg: Support separate word count register.
*/
struct tegra_dma_chip_data {
- int nr_channels;
- int channel_reg_size;
- int max_dma_count;
+ unsigned int nr_channels;
+ unsigned int channel_reg_size;
+ unsigned int max_dma_count;
bool support_channel_pause;
bool support_separate_wcount_reg;
};
/* DMA channel registers */
struct tegra_dma_channel_regs {
- unsigned long csr;
- unsigned long ahb_ptr;
- unsigned long apb_ptr;
- unsigned long ahb_seq;
- unsigned long apb_seq;
- unsigned long wcount;
+ u32 csr;
+ u32 ahb_ptr;
+ u32 apb_ptr;
+ u32 ahb_seq;
+ u32 apb_seq;
+ u32 wcount;
};
/*
@@ -168,7 +169,7 @@ struct tegra_dma_desc {
struct list_head node;
struct list_head tx_list;
struct list_head cb_node;
- int cb_count;
+ unsigned int cb_count;
};
struct tegra_dma_channel;
@@ -181,8 +182,7 @@ struct tegra_dma_channel {
struct dma_chan dma_chan;
char name[12];
bool config_init;
- int id;
- int irq;
+ unsigned int id;
void __iomem *chan_addr;
spinlock_t lock;
bool busy;
@@ -202,7 +202,9 @@ struct tegra_dma_channel {
/* Channel-slave specific configuration */
unsigned int slave_id;
struct dma_slave_config dma_sconfig;
- struct tegra_dma_channel_regs channel_reg;
+ struct tegra_dma_channel_regs channel_reg;
+
+ struct wait_queue_head wq;
};
/* tegra_dma: Tegra DMA specific information */
@@ -222,9 +224,6 @@ struct tegra_dma {
*/
u32 global_pause_count;
- /* Some register need to be cache before suspend */
- u32 reg_gen;
-
/* Last member of the structure */
struct tegra_dma_channel channels[0];
};
@@ -240,7 +239,7 @@ static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
}
static inline void tdc_write(struct tegra_dma_channel *tdc,
- u32 reg, u32 val)
+ u32 reg, u32 val)
{
writel(val, tdc->chan_addr + reg);
}
@@ -255,8 +254,8 @@ static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
return container_of(dc, struct tegra_dma_channel, dma_chan);
}
-static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
- struct dma_async_tx_descriptor *td)
+static inline struct tegra_dma_desc *
+txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td)
{
return container_of(td, struct tegra_dma_desc, txd);
}
@@ -267,12 +266,9 @@ static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
}
static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
-static int tegra_dma_runtime_suspend(struct device *dev);
-static int tegra_dma_runtime_resume(struct device *dev);
/* Get DMA desc from free list, if not there then allocate it. */
-static struct tegra_dma_desc *tegra_dma_desc_get(
- struct tegra_dma_channel *tdc)
+static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
{
struct tegra_dma_desc *dma_desc;
unsigned long flags;
@@ -299,11 +295,12 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
dma_desc->txd.tx_submit = tegra_dma_tx_submit;
dma_desc->txd.flags = 0;
+
return dma_desc;
}
static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
- struct tegra_dma_desc *dma_desc)
+ struct tegra_dma_desc *dma_desc)
{
unsigned long flags;
@@ -314,29 +311,29 @@ static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
spin_unlock_irqrestore(&tdc->lock, flags);
}
-static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
- struct tegra_dma_channel *tdc)
+static struct tegra_dma_sg_req *
+tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
{
- struct tegra_dma_sg_req *sg_req = NULL;
+ struct tegra_dma_sg_req *sg_req;
unsigned long flags;
spin_lock_irqsave(&tdc->lock, flags);
if (!list_empty(&tdc->free_sg_req)) {
- sg_req = list_first_entry(&tdc->free_sg_req,
- typeof(*sg_req), node);
+ sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
+ node);
list_del(&sg_req->node);
spin_unlock_irqrestore(&tdc->lock, flags);
return sg_req;
}
spin_unlock_irqrestore(&tdc->lock, flags);
- sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
+ sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT);
return sg_req;
}
static int tegra_dma_slave_config(struct dma_chan *dc,
- struct dma_slave_config *sconfig)
+ struct dma_slave_config *sconfig)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
@@ -353,11 +350,12 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
tdc->slave_id = sconfig->slave_id;
}
tdc->config_init = true;
+
return 0;
}
static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
- bool wait_for_burst_complete)
+ bool wait_for_burst_complete)
{
struct tegra_dma *tdma = tdc->tdma;
@@ -392,13 +390,13 @@ out:
}
static void tegra_dma_pause(struct tegra_dma_channel *tdc,
- bool wait_for_burst_complete)
+ bool wait_for_burst_complete)
{
struct tegra_dma *tdma = tdc->tdma;
if (tdma->chip_data->support_channel_pause) {
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
- TEGRA_APBDMA_CHAN_CSRE_PAUSE);
+ TEGRA_APBDMA_CHAN_CSRE_PAUSE);
if (wait_for_burst_complete)
udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
} else {
@@ -410,17 +408,15 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
{
struct tegra_dma *tdma = tdc->tdma;
- if (tdma->chip_data->support_channel_pause) {
+ if (tdma->chip_data->support_channel_pause)
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
- } else {
+ else
tegra_dma_global_resume(tdc);
- }
}
static void tegra_dma_stop(struct tegra_dma_channel *tdc)
{
- u32 csr;
- u32 status;
+ u32 csr, status;
/* Disable interrupts */
csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
@@ -441,7 +437,7 @@ static void tegra_dma_stop(struct tegra_dma_channel *tdc)
}
static void tegra_dma_start(struct tegra_dma_channel *tdc,
- struct tegra_dma_sg_req *sg_req)
+ struct tegra_dma_sg_req *sg_req)
{
struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
@@ -455,11 +451,11 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc,
/* Start DMA */
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
- ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
+ ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
}
static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
- struct tegra_dma_sg_req *nsg_req)
+ struct tegra_dma_sg_req *nsg_req)
{
unsigned long status;
@@ -493,9 +489,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
if (tdc->tdma->chip_data->support_separate_wcount_reg)
tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
- nsg_req->ch_regs.wcount);
+ nsg_req->ch_regs.wcount);
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
- nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
+ nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
nsg_req->words_xferred = 0;
@@ -506,11 +502,7 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc)
{
struct tegra_dma_sg_req *sg_req;
- if (list_empty(&tdc->pending_sg_req))
- return;
-
- sg_req = list_first_entry(&tdc->pending_sg_req,
- typeof(*sg_req), node);
+ sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
tegra_dma_start(tdc, sg_req);
sg_req->configured = true;
sg_req->words_xferred = 0;
@@ -519,34 +511,32 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc)
static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
{
- struct tegra_dma_sg_req *hsgreq;
- struct tegra_dma_sg_req *hnsgreq;
-
- if (list_empty(&tdc->pending_sg_req))
- return;
+ struct tegra_dma_sg_req *hsgreq, *hnsgreq;
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
- hnsgreq = list_first_entry(&hsgreq->node,
- typeof(*hnsgreq), node);
+ hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
+ node);
tegra_dma_configure_for_next(tdc, hnsgreq);
}
}
-static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
- struct tegra_dma_sg_req *sg_req, unsigned long status)
+static inline unsigned int
+get_current_xferred_count(struct tegra_dma_channel *tdc,
+ struct tegra_dma_sg_req *sg_req,
+ unsigned long status)
{
return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
}
static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
{
- struct tegra_dma_sg_req *sgreq;
struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sgreq;
while (!list_empty(&tdc->pending_sg_req)) {
- sgreq = list_first_entry(&tdc->pending_sg_req,
- typeof(*sgreq), node);
+ sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
+ node);
list_move_tail(&sgreq->node, &tdc->free_sg_req);
if (sgreq->last_sg) {
dma_desc = sgreq->dma_desc;
@@ -556,7 +546,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
/* Add in cb list if it is not there. */
if (!dma_desc->cb_count)
list_add_tail(&dma_desc->cb_node,
- &tdc->cb_desc);
+ &tdc->cb_desc);
dma_desc->cb_count++;
}
}
@@ -564,15 +554,9 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
}
static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
- struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
+ bool to_terminate)
{
- struct tegra_dma_sg_req *hsgreq = NULL;
-
- if (list_empty(&tdc->pending_sg_req)) {
- dev_err(tdc2dev(tdc), "DMA is running without req\n");
- tegra_dma_stop(tdc);
- return false;
- }
+ struct tegra_dma_sg_req *hsgreq;
/*
* Check that head req on list should be in flight.
@@ -582,7 +566,8 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
if (!hsgreq->configured) {
tegra_dma_stop(tdc);
- dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
+ pm_runtime_put(tdc->tdma->dev);
+ dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
tegra_dma_abort_all(tdc);
return false;
}
@@ -590,14 +575,15 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
/* Configure next request */
if (!to_terminate)
tdc_configure_next_head_desc(tdc);
+
return true;
}
static void handle_once_dma_done(struct tegra_dma_channel *tdc,
- bool to_terminate)
+ bool to_terminate)
{
- struct tegra_dma_sg_req *sgreq;
struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sgreq;
tdc->busy = false;
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
@@ -616,17 +602,22 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
list_add_tail(&sgreq->node, &tdc->free_sg_req);
/* Do not start DMA if it is going to be terminate */
- if (to_terminate || list_empty(&tdc->pending_sg_req))
+ if (to_terminate)
+ return;
+
+ if (list_empty(&tdc->pending_sg_req)) {
+ pm_runtime_put(tdc->tdma->dev);
return;
+ }
tdc_start_head_req(tdc);
}
static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
- bool to_terminate)
+ bool to_terminate)
{
- struct tegra_dma_sg_req *sgreq;
struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sgreq;
bool st;
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
@@ -647,7 +638,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
list_move_tail(&sgreq->node, &tdc->pending_sg_req);
sgreq->configured = false;
- st = handle_continuous_head_request(tdc, sgreq, to_terminate);
+ st = handle_continuous_head_request(tdc, to_terminate);
if (!st)
dma_desc->dma_status = DMA_ERROR;
}
@@ -658,13 +649,13 @@ static void tegra_dma_tasklet(unsigned long data)
struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
struct dmaengine_desc_callback cb;
struct tegra_dma_desc *dma_desc;
+ unsigned int cb_count;
unsigned long flags;
- int cb_count;
spin_lock_irqsave(&tdc->lock, flags);
while (!list_empty(&tdc->cb_desc)) {
- dma_desc = list_first_entry(&tdc->cb_desc,
- typeof(*dma_desc), cb_node);
+ dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
+ cb_node);
list_del(&dma_desc->cb_node);
dmaengine_desc_get_callback(&dma_desc->txd, &cb);
cb_count = dma_desc->cb_count;
@@ -682,10 +673,9 @@ static void tegra_dma_tasklet(unsigned long data)
static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
{
struct tegra_dma_channel *tdc = dev_id;
- unsigned long status;
- unsigned long flags;
+ u32 status;
- spin_lock_irqsave(&tdc->lock, flags);
+ spin_lock(&tdc->lock);
trace_tegra_dma_isr(&tdc->dma_chan, irq);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
@@ -693,13 +683,15 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
tdc->isr_handler(tdc, false);
tasklet_schedule(&tdc->tasklet);
- spin_unlock_irqrestore(&tdc->lock, flags);
+ wake_up_all(&tdc->wq);
+ spin_unlock(&tdc->lock);
return IRQ_HANDLED;
}
- spin_unlock_irqrestore(&tdc->lock, flags);
- dev_info(tdc2dev(tdc),
- "Interrupt already served status 0x%08lx\n", status);
+ spin_unlock(&tdc->lock);
+ dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
+ status);
+
return IRQ_NONE;
}
@@ -715,6 +707,7 @@ static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
cookie = dma_cookie_assign(&dma_desc->txd);
list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
spin_unlock_irqrestore(&tdc->lock, flags);
+
return cookie;
}
@@ -722,6 +715,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned long flags;
+ int err;
spin_lock_irqsave(&tdc->lock, flags);
if (list_empty(&tdc->pending_sg_req)) {
@@ -729,6 +723,12 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
goto end;
}
if (!tdc->busy) {
+ err = pm_runtime_get_sync(tdc->tdma->dev);
+ if (err < 0) {
+ dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
+ goto end;
+ }
+
tdc_start_head_req(tdc);
/* Continuous single mode: Configure next req */
@@ -748,11 +748,10 @@ end:
static int tegra_dma_terminate_all(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
- struct tegra_dma_sg_req *sgreq;
struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sgreq;
unsigned long flags;
- unsigned long status;
- unsigned long wcount;
+ u32 status, wcount;
bool was_busy;
spin_lock_irqsave(&tdc->lock, flags);
@@ -778,30 +777,60 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
tegra_dma_stop(tdc);
if (!list_empty(&tdc->pending_sg_req) && was_busy) {
- sgreq = list_first_entry(&tdc->pending_sg_req,
- typeof(*sgreq), node);
+ sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
+ node);
sgreq->dma_desc->bytes_transferred +=
get_current_xferred_count(tdc, sgreq, wcount);
}
tegra_dma_resume(tdc);
+ pm_runtime_put(tdc->tdma->dev);
+ wake_up_all(&tdc->wq);
+
skip_dma_stop:
tegra_dma_abort_all(tdc);
while (!list_empty(&tdc->cb_desc)) {
- dma_desc = list_first_entry(&tdc->cb_desc,
- typeof(*dma_desc), cb_node);
+ dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
+ cb_node);
list_del(&dma_desc->cb_node);
dma_desc->cb_count = 0;
}
spin_unlock_irqrestore(&tdc->lock, flags);
+
return 0;
}
+static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
+{
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+
+ return !(status & TEGRA_APBDMA_STATUS_ISE_EOC);
+}
+
+static void tegra_dma_synchronize(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ /*
+ * CPU, which handles interrupt, could be busy in
+ * uninterruptible state, in this case sibling CPU
+ * should wait until interrupt is handled.
+ */
+ wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
+
+ tasklet_kill(&tdc->tasklet);
+}
+
static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
struct tegra_dma_sg_req *sg_req)
{
- unsigned long status, wcount = 0;
+ u32 status, wcount = 0;
if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
return 0;
@@ -858,7 +887,8 @@ static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
}
static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc;
@@ -905,11 +935,12 @@ found:
trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
spin_unlock_irqrestore(&tdc->lock, flags);
+
return ret;
}
-static inline int get_bus_width(struct tegra_dma_channel *tdc,
- enum dma_slave_buswidth slave_bw)
+static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
+ enum dma_slave_buswidth slave_bw)
{
switch (slave_bw) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -922,16 +953,17 @@ static inline int get_bus_width(struct tegra_dma_channel *tdc,
return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
default:
dev_warn(tdc2dev(tdc),
- "slave bw is not supported, using 32bits\n");
+ "slave bw is not supported, using 32bits\n");
return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
}
}
-static inline int get_burst_size(struct tegra_dma_channel *tdc,
- u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
+static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
+ u32 burst_size,
+ enum dma_slave_buswidth slave_bw,
+ u32 len)
{
- int burst_byte;
- int burst_ahb_width;
+ unsigned int burst_byte, burst_ahb_width;
/*
* burst_size from client is in terms of the bus_width.
@@ -958,9 +990,12 @@ static inline int get_burst_size(struct tegra_dma_channel *tdc,
}
static int get_transfer_param(struct tegra_dma_channel *tdc,
- enum dma_transfer_direction direction, unsigned long *apb_addr,
- unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
- enum dma_slave_buswidth *slave_bw)
+ enum dma_transfer_direction direction,
+ u32 *apb_addr,
+ u32 *apb_seq,
+ u32 *csr,
+ unsigned int *burst_size,
+ enum dma_slave_buswidth *slave_bw)
{
switch (direction) {
case DMA_MEM_TO_DEV:
@@ -981,13 +1016,15 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
default:
dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
- return -EINVAL;
+ break;
}
+
return -EINVAL;
}
static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
- struct tegra_dma_channel_regs *ch_regs, u32 len)
+ struct tegra_dma_channel_regs *ch_regs,
+ u32 len)
{
u32 len_field = (len - 4) & 0xFFFC;
@@ -997,20 +1034,23 @@ static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
ch_regs->csr |= len_field;
}
-static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
- struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_slave_sg(struct dma_chan *dc,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags,
+ void *context)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_sg_req *sg_req = NULL;
+ u32 csr, ahb_seq, apb_ptr, apb_seq;
+ enum dma_slave_buswidth slave_bw;
struct tegra_dma_desc *dma_desc;
- unsigned int i;
- struct scatterlist *sg;
- unsigned long csr, ahb_seq, apb_ptr, apb_seq;
struct list_head req_list;
- struct tegra_dma_sg_req *sg_req = NULL;
- u32 burst_size;
- enum dma_slave_buswidth slave_bw;
+ struct scatterlist *sg;
+ unsigned int burst_size;
+ unsigned int i;
if (!tdc->config_init) {
dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
@@ -1022,7 +1062,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
}
if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
- &burst_size, &slave_bw) < 0)
+ &burst_size, &slave_bw) < 0)
return NULL;
INIT_LIST_HEAD(&req_list);
@@ -1068,7 +1108,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
len = sg_dma_len(sg);
if ((len & 3) || (mem & 3) ||
- (len > tdc->tdma->chip_data->max_dma_count)) {
+ len > tdc->tdma->chip_data->max_dma_count) {
dev_err(tdc2dev(tdc),
"DMA length/memory address is not supported\n");
tegra_dma_desc_put(tdc, dma_desc);
@@ -1120,20 +1160,21 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
return &dma_desc->txd;
}
-static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
- struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
+ size_t buf_len,
+ size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
- struct tegra_dma_desc *dma_desc = NULL;
struct tegra_dma_sg_req *sg_req = NULL;
- unsigned long csr, ahb_seq, apb_ptr, apb_seq;
- int len;
- size_t remain_len;
- dma_addr_t mem = buf_addr;
- u32 burst_size;
+ u32 csr, ahb_seq, apb_ptr, apb_seq;
enum dma_slave_buswidth slave_bw;
+ struct tegra_dma_desc *dma_desc;
+ dma_addr_t mem = buf_addr;
+ unsigned int burst_size;
+ size_t len, remain_len;
if (!buf_len || !period_len) {
dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
@@ -1167,13 +1208,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
len = period_len;
if ((len & 3) || (buf_addr & 3) ||
- (len > tdc->tdma->chip_data->max_dma_count)) {
+ len > tdc->tdma->chip_data->max_dma_count) {
dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
return NULL;
}
if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
- &burst_size, &slave_bw) < 0)
+ &burst_size, &slave_bw) < 0)
return NULL;
ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
@@ -1259,15 +1300,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
- struct tegra_dma *tdma = tdc->tdma;
- int ret;
dma_cookie_init(&tdc->dma_chan);
- tdc->config_init = false;
-
- ret = pm_runtime_get_sync(tdma->dev);
- if (ret < 0)
- return ret;
return 0;
}
@@ -1275,33 +1309,29 @@ static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
static void tegra_dma_free_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
- struct tegra_dma *tdma = tdc->tdma;
struct tegra_dma_desc *dma_desc;
struct tegra_dma_sg_req *sg_req;
struct list_head dma_desc_list;
struct list_head sg_req_list;
- unsigned long flags;
INIT_LIST_HEAD(&dma_desc_list);
INIT_LIST_HEAD(&sg_req_list);
dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
- if (tdc->busy)
- tegra_dma_terminate_all(dc);
+ tegra_dma_terminate_all(dc);
+ tasklet_kill(&tdc->tasklet);
- spin_lock_irqsave(&tdc->lock, flags);
list_splice_init(&tdc->pending_sg_req, &sg_req_list);
list_splice_init(&tdc->free_sg_req, &sg_req_list);
list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
INIT_LIST_HEAD(&tdc->cb_desc);
tdc->config_init = false;
tdc->isr_handler = NULL;
- spin_unlock_irqrestore(&tdc->lock, flags);
while (!list_empty(&dma_desc_list)) {
- dma_desc = list_first_entry(&dma_desc_list,
- typeof(*dma_desc), node);
+ dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc),
+ node);
list_del(&dma_desc->node);
kfree(dma_desc);
}
@@ -1311,7 +1341,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
list_del(&sg_req->node);
kfree(sg_req);
}
- pm_runtime_put(tdma->dev);
tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
}
@@ -1320,8 +1349,8 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct tegra_dma *tdma = ofdma->of_dma_data;
- struct dma_chan *chan;
struct tegra_dma_channel *tdc;
+ struct dma_chan *chan;
if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
@@ -1374,23 +1403,48 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
.support_separate_wcount_reg = true,
};
+static int tegra_dma_init_hw(struct tegra_dma *tdma)
+{
+ int err;
+
+ err = reset_control_assert(tdma->rst);
+ if (err) {
+ dev_err(tdma->dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_enable(tdma->dma_clk);
+ if (err) {
+ dev_err(tdma->dev, "failed to enable clk: %d\n", err);
+ return err;
+ }
+
+ /* reset DMA controller */
+ udelay(2);
+ reset_control_deassert(tdma->rst);
+
+ /* enable global DMA registers */
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
+ tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
+ tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF);
+
+ clk_disable(tdma->dma_clk);
+
+ return 0;
+}
+
static int tegra_dma_probe(struct platform_device *pdev)
{
- struct resource *res;
+ const struct tegra_dma_chip_data *cdata;
struct tegra_dma *tdma;
+ unsigned int i;
+ size_t size;
int ret;
- int i;
- const struct tegra_dma_chip_data *cdata;
cdata = of_device_get_match_data(&pdev->dev);
- if (!cdata) {
- dev_err(&pdev->dev, "Error: No device match data found\n");
- return -ENODEV;
- }
+ size = struct_size(tdma, channels, cdata->nr_channels);
- tdma = devm_kzalloc(&pdev->dev,
- struct_size(tdma, channels, cdata->nr_channels),
- GFP_KERNEL);
+ tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!tdma)
return -ENOMEM;
@@ -1398,8 +1452,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma->chip_data = cdata;
platform_set_drvdata(pdev, tdma);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
@@ -1417,64 +1470,55 @@ static int tegra_dma_probe(struct platform_device *pdev)
spin_lock_init(&tdma->global_lock);
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev))
- ret = tegra_dma_runtime_resume(&pdev->dev);
- else
- ret = pm_runtime_get_sync(&pdev->dev);
-
- if (ret < 0) {
- pm_runtime_disable(&pdev->dev);
+ ret = clk_prepare(tdma->dma_clk);
+ if (ret)
return ret;
- }
-
- /* Reset DMA controller */
- reset_control_assert(tdma->rst);
- udelay(2);
- reset_control_deassert(tdma->rst);
- /* Enable global DMA registers */
- tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
- tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
- tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+ ret = tegra_dma_init_hw(tdma);
+ if (ret)
+ goto err_clk_unprepare;
- pm_runtime_put(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < cdata->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
+ int irq;
tdc->chan_addr = tdma->base_addr +
TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
(i * cdata->channel_reg_size);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res) {
- ret = -EINVAL;
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = irq;
dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
- goto err_irq;
+ goto err_pm_disable;
}
- tdc->irq = res->start;
+
snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
- ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
+ ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0,
+ tdc->name, tdc);
if (ret) {
dev_err(&pdev->dev,
"request_irq failed with err %d channel %d\n",
ret, i);
- goto err_irq;
+ goto err_pm_disable;
}
tdc->dma_chan.device = &tdma->dma_dev;
dma_cookie_init(&tdc->dma_chan);
list_add_tail(&tdc->dma_chan.device_node,
- &tdma->dma_dev.channels);
+ &tdma->dma_dev.channels);
tdc->tdma = tdma;
tdc->id = i;
tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
- (unsigned long)tdc);
+ (unsigned long)tdc);
spin_lock_init(&tdc->lock);
+ init_waitqueue_head(&tdc->wq);
INIT_LIST_HEAD(&tdc->pending_sg_req);
INIT_LIST_HEAD(&tdc->free_sg_req);
@@ -1506,6 +1550,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
tdma->dma_dev.device_config = tegra_dma_slave_config;
tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
+ tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
@@ -1513,7 +1558,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"Tegra20 APB DMA driver registration failed %d\n", ret);
- goto err_irq;
+ goto err_pm_disable;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -1524,118 +1569,92 @@ static int tegra_dma_probe(struct platform_device *pdev)
goto err_unregister_dma_dev;
}
- dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
- cdata->nr_channels);
+ dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n",
+ cdata->nr_channels);
+
return 0;
err_unregister_dma_dev:
dma_async_device_unregister(&tdma->dma_dev);
-err_irq:
- while (--i >= 0) {
- struct tegra_dma_channel *tdc = &tdma->channels[i];
-
- free_irq(tdc->irq, tdc);
- tasklet_kill(&tdc->tasklet);
- }
+err_pm_disable:
pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- tegra_dma_runtime_suspend(&pdev->dev);
+
+err_clk_unprepare:
+ clk_unprepare(tdma->dma_clk);
+
return ret;
}
static int tegra_dma_remove(struct platform_device *pdev)
{
struct tegra_dma *tdma = platform_get_drvdata(pdev);
- int i;
- struct tegra_dma_channel *tdc;
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
-
- for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
- tdc = &tdma->channels[i];
- free_irq(tdc->irq, tdc);
- tasklet_kill(&tdc->tasklet);
- }
-
pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- tegra_dma_runtime_suspend(&pdev->dev);
+ clk_unprepare(tdma->dma_clk);
return 0;
}
-static int tegra_dma_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
- int i;
-
- tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
- for (i = 0; i < tdma->chip_data->nr_channels; i++) {
- struct tegra_dma_channel *tdc = &tdma->channels[i];
- struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
-
- /* Only save the state of DMA channels that are in use */
- if (!tdc->config_init)
- continue;
-
- ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
- ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
- ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
- ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
- ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
- if (tdma->chip_data->support_separate_wcount_reg)
- ch_reg->wcount = tdc_read(tdc,
- TEGRA_APBDMA_CHAN_WCOUNT);
- }
- clk_disable_unprepare(tdma->dma_clk);
+ clk_disable(tdma->dma_clk);
return 0;
}
-static int tegra_dma_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_dma_runtime_resume(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
- int i, ret;
- ret = clk_prepare_enable(tdma->dma_clk);
- if (ret < 0) {
- dev_err(dev, "clk_enable failed: %d\n", ret);
- return ret;
- }
+ return clk_enable(tdma->dma_clk);
+}
- tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
- tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
- tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+static int __maybe_unused tegra_dma_dev_suspend(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+ bool busy;
for (i = 0; i < tdma->chip_data->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
- struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
-
- /* Only restore the state of DMA channels that are in use */
- if (!tdc->config_init)
- continue;
-
- if (tdma->chip_data->support_separate_wcount_reg)
- tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
- ch_reg->wcount);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
- tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
- (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
+
+ tasklet_kill(&tdc->tasklet);
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ busy = tdc->busy;
+ spin_unlock_irqrestore(&tdc->lock, flags);
+
+ if (busy) {
+ dev_err(tdma->dev, "channel %u busy\n", i);
+ return -EBUSY;
+ }
}
- return 0;
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused tegra_dma_dev_resume(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ int err;
+
+ err = tegra_dma_init_hw(tdma);
+ if (err)
+ return err;
+
+ return pm_runtime_force_resume(dev);
}
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume)
};
static const struct of_device_id tegra_dma_of_match[] = {
@@ -1668,7 +1687,6 @@ static struct platform_driver tegra_dmac_driver = {
module_platform_driver(tegra_dmac_driver);
-MODULE_ALIAS("platform:tegra20-apbdma");
MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 6e1268552f74..c4ce5dfb149b 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -164,7 +164,7 @@ struct tegra_adma {
const struct tegra_adma_chip_data *cdata;
/* Last member of the structure */
- struct tegra_adma_chan channels[0];
+ struct tegra_adma_chan channels[];
};
static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index f255056696ee..4ba8fa5d9c36 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -133,7 +133,6 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct device_node *dma_node;
struct ti_am335x_xbar_data *xbar;
- struct resource *res;
void __iomem *iomem;
int i, ret;
@@ -173,8 +172,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
xbar->xbar_events = TI_AM335X_XBAR_LINES;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
@@ -323,7 +321,6 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
struct device_node *dma_node;
struct ti_dra7_xbar_data *xbar;
struct property *prop;
- struct resource *res;
u32 safe_val;
int sz;
void __iomem *iomem;
@@ -403,8 +400,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
kfree(rsv_events);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 03a7f647f7b2..c4a5c170c1f9 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1275,6 +1275,81 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
+static struct dma_async_tx_descriptor *
+edma_prep_dma_interleaved(struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long tx_flags)
+{
+ struct device *dev = chan->device->dev;
+ struct edma_chan *echan = to_edma_chan(chan);
+ struct edmacc_param *param;
+ struct edma_desc *edesc;
+ size_t src_icg, dst_icg;
+ int src_bidx, dst_bidx;
+
+ /* Slave mode is not supported */
+ if (is_slave_direction(xt->dir))
+ return NULL;
+
+ if (xt->frame_size != 1 || xt->numf == 0)
+ return NULL;
+
+ if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
+ return NULL;
+
+ src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+ if (src_icg) {
+ src_bidx = src_icg + xt->sgl[0].size;
+ } else if (xt->src_inc) {
+ src_bidx = xt->sgl[0].size;
+ } else {
+ dev_err(dev, "%s: SRC constant addressing is not supported\n",
+ __func__);
+ return NULL;
+ }
+
+ dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+ if (dst_icg) {
+ dst_bidx = dst_icg + xt->sgl[0].size;
+ } else if (xt->dst_inc) {
+ dst_bidx = xt->sgl[0].size;
+ } else {
+ dev_err(dev, "%s: DST constant addressing is not supported\n",
+ __func__);
+ return NULL;
+ }
+
+ if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
+ return NULL;
+
+ edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
+ if (!edesc)
+ return NULL;
+
+ edesc->direction = DMA_MEM_TO_MEM;
+ edesc->echan = echan;
+ edesc->pset_nr = 1;
+
+ param = &edesc->pset[0].param;
+
+ param->src = xt->src_start;
+ param->dst = xt->dst_start;
+ param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
+ param->ccnt = 1;
+ param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+ param->src_dst_cidx = 0;
+
+ param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+ param->opt |= ITCCHEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ param->opt |= TCINTEN;
+ else
+ edesc->polled = true;
+
+ return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
@@ -1917,7 +1992,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
"Legacy memcpy is enabled, things might not work\n");
dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
s_ddev->directions = BIT(DMA_MEM_TO_MEM);
}
@@ -1953,8 +2030,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
dma_cap_zero(m_ddev->cap_mask);
dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
m_ddev->device_free_chan_resources = edma_free_chan_resources;
m_ddev->device_issue_pending = edma_issue_pending;
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 4d7561a1b3e3..64c8955e0cf1 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -32,6 +32,7 @@ struct k3_udma_glue_common {
bool epib;
u32 psdata_size;
u32 swdata_size;
+ u32 atype;
};
struct k3_udma_glue_tx_channel {
@@ -121,6 +122,15 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
return -ENOENT;
thread_id = dma_spec.args[0];
+ if (dma_spec.args_count == 2) {
+ if (dma_spec.args[1] > 2) {
+ dev_err(common->dev, "Invalid channel atype: %u\n",
+ dma_spec.args[1]);
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+ common->atype = dma_spec.args[1];
+ }
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
ret = -EINVAL;
@@ -202,7 +212,8 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id;
req.index = tx_chn->udma_tchan_id;
if (tx_chn->tx_pause_on_err)
@@ -216,6 +227,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
req.tx_supr_tdpkt = 1;
req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+ req.tx_atype = tx_chn->common.atype;
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
@@ -502,7 +514,8 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id;
req.index = rx_chn->udma_rchan_id;
@@ -519,6 +532,7 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
req.flowid_cnt = rx_chn->flow_num;
}
req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ req.rx_atype = rx_chn->common.atype;
ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
if (ret)
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 0536866a58ce..a9c0251adf1a 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -149,6 +149,7 @@ struct udma_dev {
struct udma_chan *channels;
u32 psil_base;
+ u32 atype;
};
struct udma_desc {
@@ -192,6 +193,7 @@ struct udma_chan_config {
u32 hdesc_size; /* Size of a packet descriptor in packet mode */
bool notdpkt; /* Suppress sending TDC packet */
int remote_thread_id;
+ u32 atype;
u32 src_thread;
u32 dst_thread;
enum psil_endpoint_type ep_type;
@@ -1569,7 +1571,8 @@ err_rflow:
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
#define TISCI_RCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
@@ -1579,7 +1582,8 @@ err_rflow:
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
{
@@ -1601,6 +1605,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_tx.txcq_qnum = tc_ring;
+ req_tx.tx_atype = ud->atype;
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret) {
@@ -1614,6 +1619,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_rx.rxcq_qnum = tc_ring;
req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+ req_rx.rx_atype = ud->atype;
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret)
@@ -1649,6 +1655,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
req_tx.tx_supr_tdpkt = uc->config.notdpkt;
req_tx.tx_fetch_size = fetch_size >> 2;
req_tx.txcq_qnum = tc_ring;
+ req_tx.tx_atype = uc->config.atype;
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret)
@@ -1685,6 +1692,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
req_rx.rx_fetch_size = fetch_size >> 2;
req_rx.rxcq_qnum = rx_ring;
req_rx.rx_chan_type = mode;
+ req_rx.rx_atype = uc->config.atype;
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret) {
@@ -3063,13 +3071,18 @@ static void udma_free_chan_resources(struct dma_chan *chan)
static struct platform_driver udma_driver;
+struct udma_filter_param {
+ int remote_thread_id;
+ u32 atype;
+};
+
static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
{
struct udma_chan_config *ucc;
struct psil_endpoint_config *ep_config;
+ struct udma_filter_param *filter_param;
struct udma_chan *uc;
struct udma_dev *ud;
- u32 *args;
if (chan->device->dev->driver != &udma_driver.driver)
return false;
@@ -3077,9 +3090,16 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
uc = to_udma_chan(chan);
ucc = &uc->config;
ud = uc->ud;
- args = param;
+ filter_param = param;
+
+ if (filter_param->atype > 2) {
+ dev_err(ud->dev, "Invalid channel atype: %u\n",
+ filter_param->atype);
+ return false;
+ }
- ucc->remote_thread_id = args[0];
+ ucc->remote_thread_id = filter_param->remote_thread_id;
+ ucc->atype = filter_param->atype;
if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
ucc->dir = DMA_MEM_TO_DEV;
@@ -3092,6 +3112,7 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->remote_thread_id);
ucc->dir = DMA_MEM_TO_MEM;
ucc->remote_thread_id = -1;
+ ucc->atype = 0;
return false;
}
@@ -3130,13 +3151,20 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
{
struct udma_dev *ud = ofdma->of_dma_data;
dma_cap_mask_t mask = ud->ddev.cap_mask;
+ struct udma_filter_param filter_param;
struct dma_chan *chan;
- if (dma_spec->args_count != 1)
+ if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
return NULL;
- chan = __dma_request_channel(&mask, udma_dma_filter_fn,
- &dma_spec->args[0], ofdma->of_node);
+ filter_param.remote_thread_id = dma_spec->args[0];
+ if (dma_spec->args_count == 2)
+ filter_param.atype = dma_spec->args[1];
+ else
+ filter_param.atype = 0;
+
+ chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
return ERR_PTR(-EINVAL);
@@ -3473,6 +3501,66 @@ static int udma_setup_rx_flush(struct udma_dev *ud)
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+static void udma_dbg_summary_show_chan(struct seq_file *s,
+ struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_chan_config *ucc = &uc->config;
+
+ seq_printf(s, " %-13s| %s", dma_chan_name(chan),
+ chan->dbg_client_name ?: "in-use");
+ seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
+ ucc->src_thread, ucc->dst_thread);
+ break;
+ case DMA_DEV_TO_MEM:
+ seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
+ ucc->src_thread, ucc->dst_thread);
+ break;
+ case DMA_MEM_TO_DEV:
+ seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
+ ucc->src_thread, ucc->dst_thread);
+ break;
+ default:
+ seq_printf(s, ")\n");
+ return;
+ }
+
+ if (ucc->ep_type == PSIL_EP_NATIVE) {
+ seq_printf(s, "PSI-L Native");
+ if (ucc->metadata_size) {
+ seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
+ if (ucc->psd_size)
+ seq_printf(s, " PSDsize:%u", ucc->psd_size);
+ seq_printf(s, " ]");
+ }
+ } else {
+ seq_printf(s, "PDMA");
+ if (ucc->enable_acc32 || ucc->enable_burst)
+ seq_printf(s, "[%s%s ]",
+ ucc->enable_acc32 ? " ACC32" : "",
+ ucc->enable_burst ? " BURST" : "");
+ }
+
+ seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
+}
+
+static void udma_dbg_summary_show(struct seq_file *s,
+ struct dma_device *dma_dev)
+{
+ struct dma_chan *chan;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count)
+ udma_dbg_summary_show_chan(s, chan);
+ }
+}
+#endif /* CONFIG_DEBUG_FS */
+
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
@@ -3519,6 +3607,12 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
+ if (!ret && ud->atype > 2) {
+ dev_err(dev, "Invalid atype: %u\n", ud->atype);
+ return -EINVAL;
+ }
+
ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
@@ -3553,6 +3647,9 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.device_resume = udma_resume;
ud->ddev.device_terminate_all = udma_terminate_all;
ud->ddev.device_synchronize = udma_synchronize;
+#ifdef CONFIG_DEBUG_FS
+ ud->ddev.dbg_summary_show = udma_dbg_summary_show;
+#endif
ud->ddev.device_free_chan_resources = udma_free_chan_resources;
ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index a014ab96e673..918301e17552 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -124,7 +124,7 @@ struct omap_desc {
uint32_t csdp; /* CSDP value */
unsigned sglen;
- struct omap_sg sg[0];
+ struct omap_sg sg[];
};
enum {
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index 21b8f1131d55..618839df0748 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -68,7 +68,7 @@ struct uniphier_mdmac_device {
struct dma_device ddev;
struct clk *clk;
void __iomem *reg_base;
- struct uniphier_mdmac_chan channels[0];
+ struct uniphier_mdmac_chan channels[];
};
static struct uniphier_mdmac_chan *
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
new file mode 100644
index 000000000000..7b2f8a8c2d31
--- /dev/null
+++ b/drivers/dma/uniphier-xdmac.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * External DMA controller driver for UniPhier SoCs
+ * Copyright 2019 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define XDMAC_CH_WIDTH 0x100
+
+#define XDMAC_TFA 0x08
+#define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
+#define XDMAC_TFA_MASK GENMASK(5, 0)
+#define XDMAC_SADM 0x10
+#define XDMAC_SADM_STW_MASK GENMASK(25, 24)
+#define XDMAC_SADM_SAM BIT(4)
+#define XDMAC_SADM_SAM_FIXED XDMAC_SADM_SAM
+#define XDMAC_SADM_SAM_INC 0
+#define XDMAC_DADM 0x14
+#define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
+#define XDMAC_DADM_DAM XDMAC_SADM_SAM
+#define XDMAC_DADM_DAM_FIXED XDMAC_SADM_SAM_FIXED
+#define XDMAC_DADM_DAM_INC XDMAC_SADM_SAM_INC
+#define XDMAC_EXSAD 0x18
+#define XDMAC_EXDAD 0x1c
+#define XDMAC_SAD 0x20
+#define XDMAC_DAD 0x24
+#define XDMAC_ITS 0x28
+#define XDMAC_ITS_MASK GENMASK(25, 0)
+#define XDMAC_TNUM 0x2c
+#define XDMAC_TNUM_MASK GENMASK(15, 0)
+#define XDMAC_TSS 0x30
+#define XDMAC_TSS_REQ BIT(0)
+#define XDMAC_IEN 0x34
+#define XDMAC_IEN_ERRIEN BIT(1)
+#define XDMAC_IEN_ENDIEN BIT(0)
+#define XDMAC_STAT 0x40
+#define XDMAC_STAT_TENF BIT(0)
+#define XDMAC_IR 0x44
+#define XDMAC_IR_ERRF BIT(1)
+#define XDMAC_IR_ENDF BIT(0)
+#define XDMAC_ID 0x48
+#define XDMAC_ID_ERRIDF BIT(1)
+#define XDMAC_ID_ENDIDF BIT(0)
+
+#define XDMAC_MAX_CHANS 16
+#define XDMAC_INTERVAL_CLKS 20
+#define XDMAC_MAX_WORDS XDMAC_TNUM_MASK
+
+/* cut lower bit for maintain alignment of maximum transfer size */
+#define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
+
+#define UNIPHIER_XDMAC_BUSWIDTHS \
+ (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+struct uniphier_xdmac_desc_node {
+ dma_addr_t src;
+ dma_addr_t dst;
+ u32 burst_size;
+ u32 nr_burst;
+};
+
+struct uniphier_xdmac_desc {
+ struct virt_dma_desc vd;
+
+ unsigned int nr_node;
+ unsigned int cur_node;
+ enum dma_transfer_direction dir;
+ struct uniphier_xdmac_desc_node nodes[];
+};
+
+struct uniphier_xdmac_chan {
+ struct virt_dma_chan vc;
+ struct uniphier_xdmac_device *xdev;
+ struct uniphier_xdmac_desc *xd;
+ void __iomem *reg_ch_base;
+ struct dma_slave_config sconfig;
+ int id;
+ unsigned int req_factor;
+};
+
+struct uniphier_xdmac_device {
+ struct dma_device ddev;
+ void __iomem *reg_base;
+ int nr_chans;
+ struct uniphier_xdmac_chan channels[];
+};
+
+static struct uniphier_xdmac_chan *
+to_uniphier_xdmac_chan(struct virt_dma_chan *vc)
+{
+ return container_of(vc, struct uniphier_xdmac_chan, vc);
+}
+
+static struct uniphier_xdmac_desc *
+to_uniphier_xdmac_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct uniphier_xdmac_desc, vd);
+}
+
+/* xc->vc.lock must be held by caller */
+static struct uniphier_xdmac_desc *
+uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&xc->vc);
+ if (!vd)
+ return NULL;
+
+ list_del(&vd->node);
+
+ return to_uniphier_xdmac_desc(vd);
+}
+
+/* xc->vc.lock must be held by caller */
+static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
+ struct uniphier_xdmac_desc *xd)
+{
+ u32 src_mode, src_addr, src_width;
+ u32 dst_mode, dst_addr, dst_width;
+ u32 val, its, tnum;
+ enum dma_slave_buswidth buswidth;
+
+ src_addr = xd->nodes[xd->cur_node].src;
+ dst_addr = xd->nodes[xd->cur_node].dst;
+ its = xd->nodes[xd->cur_node].burst_size;
+ tnum = xd->nodes[xd->cur_node].nr_burst;
+
+ /*
+ * The width of MEM side must be 4 or 8 bytes, that does not
+ * affect that of DEV side and transfer size.
+ */
+ if (xd->dir == DMA_DEV_TO_MEM) {
+ src_mode = XDMAC_SADM_SAM_FIXED;
+ buswidth = xc->sconfig.src_addr_width;
+ } else {
+ src_mode = XDMAC_SADM_SAM_INC;
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ }
+ src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
+
+ if (xd->dir == DMA_MEM_TO_DEV) {
+ dst_mode = XDMAC_DADM_DAM_FIXED;
+ buswidth = xc->sconfig.dst_addr_width;
+ } else {
+ dst_mode = XDMAC_DADM_DAM_INC;
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ }
+ dst_width = FIELD_PREP(XDMAC_DADM_DTW_MASK, __ffs(buswidth));
+
+ /* setup transfer factor */
+ val = FIELD_PREP(XDMAC_TFA_MCNT_MASK, XDMAC_INTERVAL_CLKS);
+ val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
+ writel(val, xc->reg_ch_base + XDMAC_TFA);
+
+ /* setup the channel */
+ writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
+ writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
+
+ writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
+ writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
+
+ src_mode |= src_width;
+ dst_mode |= dst_width;
+ writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
+ writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
+
+ writel(its, xc->reg_ch_base + XDMAC_ITS);
+ writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
+
+ /* enable interrupt */
+ writel(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN,
+ xc->reg_ch_base + XDMAC_IEN);
+
+ /* start XDMAC */
+ val = readl(xc->reg_ch_base + XDMAC_TSS);
+ val |= XDMAC_TSS_REQ;
+ writel(val, xc->reg_ch_base + XDMAC_TSS);
+}
+
+/* xc->vc.lock must be held by caller */
+static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
+{
+ u32 val;
+
+ /* disable interrupt */
+ val = readl(xc->reg_ch_base + XDMAC_IEN);
+ val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
+ writel(val, xc->reg_ch_base + XDMAC_IEN);
+
+ /* stop XDMAC */
+ val = readl(xc->reg_ch_base + XDMAC_TSS);
+ val &= ~XDMAC_TSS_REQ;
+ writel(0, xc->reg_ch_base + XDMAC_TSS);
+
+ /* wait until transfer is stopped */
+ return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
+ !(val & XDMAC_STAT_TENF), 100, 1000);
+}
+
+/* xc->vc.lock must be held by caller */
+static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
+{
+ struct uniphier_xdmac_desc *xd;
+
+ xd = uniphier_xdmac_next_desc(xc);
+ if (xd)
+ uniphier_xdmac_chan_start(xc, xd);
+
+ /* set desc to chan regardless of xd is null */
+ xc->xd = xd;
+}
+
+static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
+{
+ u32 stat;
+ int ret;
+
+ spin_lock(&xc->vc.lock);
+
+ stat = readl(xc->reg_ch_base + XDMAC_ID);
+
+ if (stat & XDMAC_ID_ERRIDF) {
+ ret = uniphier_xdmac_chan_stop(xc);
+ if (ret)
+ dev_err(xc->xdev->ddev.dev,
+ "DMA transfer error with aborting issue\n");
+ else
+ dev_err(xc->xdev->ddev.dev,
+ "DMA transfer error\n");
+
+ } else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
+ xc->xd->cur_node++;
+ if (xc->xd->cur_node >= xc->xd->nr_node) {
+ vchan_cookie_complete(&xc->xd->vd);
+ uniphier_xdmac_start(xc);
+ } else {
+ uniphier_xdmac_chan_start(xc, xc->xd);
+ }
+ }
+
+ /* write bits to clear */
+ writel(stat, xc->reg_ch_base + XDMAC_IR);
+
+ spin_unlock(&xc->vc.lock);
+}
+
+static irqreturn_t uniphier_xdmac_irq_handler(int irq, void *dev_id)
+{
+ struct uniphier_xdmac_device *xdev = dev_id;
+ int i;
+
+ for (i = 0; i < xdev->nr_chans; i++)
+ uniphier_xdmac_chan_irq(&xdev->channels[i]);
+
+ return IRQ_HANDLED;
+}
+
+static void uniphier_xdmac_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static struct dma_async_tx_descriptor *
+uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_desc *xd;
+ unsigned int nr;
+ size_t burst_size, tlen;
+ int i;
+
+ if (len > XDMAC_MAX_WORD_SIZE * XDMAC_MAX_WORDS)
+ return NULL;
+
+ nr = 1 + len / XDMAC_MAX_WORD_SIZE;
+
+ xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
+ if (!xd)
+ return NULL;
+
+ for (i = 0; i < nr; i++) {
+ burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
+ xd->nodes[i].src = src;
+ xd->nodes[i].dst = dst;
+ xd->nodes[i].burst_size = burst_size;
+ xd->nodes[i].nr_burst = len / burst_size;
+ tlen = rounddown(len, burst_size);
+ src += tlen;
+ dst += tlen;
+ len -= tlen;
+ }
+
+ xd->dir = DMA_MEM_TO_MEM;
+ xd->nr_node = nr;
+ xd->cur_node = 0;
+
+ return vchan_tx_prep(vc, &xd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
+ struct uniphier_xdmac_desc *xd;
+ struct scatterlist *sg;
+ enum dma_slave_buswidth buswidth;
+ u32 maxburst;
+ int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ buswidth = xc->sconfig.src_addr_width;
+ maxburst = xc->sconfig.src_maxburst;
+ } else {
+ buswidth = xc->sconfig.dst_addr_width;
+ maxburst = xc->sconfig.dst_maxburst;
+ }
+
+ if (!maxburst)
+ maxburst = 1;
+ if (maxburst > xc->xdev->ddev.max_burst) {
+ dev_err(xc->xdev->ddev.dev,
+ "Exceed maximum number of burst words\n");
+ return NULL;
+ }
+
+ xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
+ if (!xd)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
+ ? xc->sconfig.src_addr : sg_dma_address(sg);
+ xd->nodes[i].dst = (direction == DMA_MEM_TO_DEV)
+ ? xc->sconfig.dst_addr : sg_dma_address(sg);
+ xd->nodes[i].burst_size = maxburst * buswidth;
+ xd->nodes[i].nr_burst =
+ sg_dma_len(sg) / xd->nodes[i].burst_size;
+
+ /*
+ * Currently transfer that size doesn't align the unit size
+ * (the number of burst words * bus-width) is not allowed,
+ * because the driver does not support the way to transfer
+ * residue size. As a matter of fact, in order to transfer
+ * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
+ * dma_slave_config must be 1.
+ */
+ if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
+ dev_err(xc->xdev->ddev.dev,
+ "Unaligned transfer size: %d", sg_dma_len(sg));
+ kfree(xd);
+ return NULL;
+ }
+
+ if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
+ dev_err(xc->xdev->ddev.dev,
+ "Exceed maximum transfer size");
+ kfree(xd);
+ return NULL;
+ }
+ }
+
+ xd->dir = direction;
+ xd->nr_node = sg_len;
+ xd->cur_node = 0;
+
+ return vchan_tx_prep(vc, &xd->vd, flags);
+}
+
+static int uniphier_xdmac_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
+
+ memcpy(&xc->sconfig, config, sizeof(*config));
+
+ return 0;
+}
+
+static int uniphier_xdmac_terminate_all(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
+ unsigned long flags;
+ int ret = 0;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (xc->xd) {
+ vchan_terminate_vdesc(&xc->xd->vd);
+ xc->xd = NULL;
+ ret = uniphier_xdmac_chan_stop(xc);
+ }
+
+ vchan_get_all_descriptors(vc, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+
+ return ret;
+}
+
+static void uniphier_xdmac_synchronize(struct dma_chan *chan)
+{
+ vchan_synchronize(to_virt_chan(chan));
+}
+
+static void uniphier_xdmac_issue_pending(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (vchan_issue_pending(vc) && !xc->xd)
+ uniphier_xdmac_start(xc);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void uniphier_xdmac_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_uniphier_xdmac_desc(vd));
+}
+
+static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device *xdev,
+ int ch)
+{
+ struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
+
+ xc->xdev = xdev;
+ xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
+ xc->vc.desc_free = uniphier_xdmac_desc_free;
+
+ vchan_init(&xc->vc, &xdev->ddev);
+}
+
+static struct dma_chan *of_dma_uniphier_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct uniphier_xdmac_device *xdev = ofdma->of_dma_data;
+ int chan_id = dma_spec->args[0];
+
+ if (chan_id >= xdev->nr_chans)
+ return NULL;
+
+ xdev->channels[chan_id].id = chan_id;
+ xdev->channels[chan_id].req_factor = dma_spec->args[1];
+
+ return dma_get_slave_channel(&xdev->channels[chan_id].vc.chan);
+}
+
+static int uniphier_xdmac_probe(struct platform_device *pdev)
+{
+ struct uniphier_xdmac_device *xdev;
+ struct device *dev = &pdev->dev;
+ struct dma_device *ddev;
+ int irq;
+ int nr_chans;
+ int i, ret;
+
+ if (of_property_read_u32(dev->of_node, "dma-channels", &nr_chans))
+ return -EINVAL;
+ if (nr_chans > XDMAC_MAX_CHANS)
+ nr_chans = XDMAC_MAX_CHANS;
+
+ xdev = devm_kzalloc(dev, struct_size(xdev, channels, nr_chans),
+ GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->nr_chans = nr_chans;
+ xdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xdev->reg_base))
+ return PTR_ERR(xdev->reg_base);
+
+ ddev = &xdev->ddev;
+ ddev->dev = dev;
+ dma_cap_zero(ddev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ ddev->src_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
+ ddev->dst_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+ BIT(DMA_MEM_TO_MEM);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ ddev->max_burst = XDMAC_MAX_WORDS;
+ ddev->device_free_chan_resources = uniphier_xdmac_free_chan_resources;
+ ddev->device_prep_dma_memcpy = uniphier_xdmac_prep_dma_memcpy;
+ ddev->device_prep_slave_sg = uniphier_xdmac_prep_slave_sg;
+ ddev->device_config = uniphier_xdmac_slave_config;
+ ddev->device_terminate_all = uniphier_xdmac_terminate_all;
+ ddev->device_synchronize = uniphier_xdmac_synchronize;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = uniphier_xdmac_issue_pending;
+ INIT_LIST_HEAD(&ddev->channels);
+
+ for (i = 0; i < nr_chans; i++)
+ uniphier_xdmac_chan_init(xdev, i);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, uniphier_xdmac_irq_handler,
+ IRQF_SHARED, "xdmac", xdev);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(dev, "Failed to register XDMA device\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(dev->of_node,
+ of_dma_uniphier_xlate, xdev);
+ if (ret) {
+ dev_err(dev, "Failed to register XDMA controller\n");
+ goto out_unregister_dmac;
+ }
+
+ platform_set_drvdata(pdev, xdev);
+
+ dev_info(&pdev->dev, "UniPhier XDMAC driver (%d channels)\n",
+ nr_chans);
+
+ return 0;
+
+out_unregister_dmac:
+ dma_async_device_unregister(ddev);
+
+ return ret;
+}
+
+static int uniphier_xdmac_remove(struct platform_device *pdev)
+{
+ struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
+ struct dma_device *ddev = &xdev->ddev;
+ struct dma_chan *chan;
+ int ret;
+
+ /*
+ * Before reaching here, almost all descriptors have been freed by the
+ * ->device_free_chan_resources() hook. However, each channel might
+ * be still holding one descriptor that was on-flight at that moment.
+ * Terminate it to make sure this hardware is no longer running. Then,
+ * free the channel resources once again to avoid memory leak.
+ */
+ list_for_each_entry(chan, &ddev->channels, device_node) {
+ ret = dmaengine_terminate_sync(chan);
+ if (ret)
+ return ret;
+ uniphier_xdmac_free_chan_resources(chan);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(ddev);
+
+ return 0;
+}
+
+static const struct of_device_id uniphier_xdmac_match[] = {
+ { .compatible = "socionext,uniphier-xdmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
+
+static struct platform_driver uniphier_xdmac_driver = {
+ .probe = uniphier_xdmac_probe,
+ .remove = uniphier_xdmac_remove,
+ .driver = {
+ .name = "uniphier-xdmac",
+ .of_match_table = uniphier_xdmac_match,
+ },
+};
+module_platform_driver(uniphier_xdmac_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier external DMA controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a9c5d5cc9f2b..aecd5a35a296 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -125,7 +125,9 @@
#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
/* HW specific definitions */
-#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
+#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
+#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@@ -468,6 +470,7 @@ struct xilinx_dma_config {
struct clk **tx_clk, struct clk **txs_clk,
struct clk **rx_clk, struct clk **rxs_clk);
irqreturn_t (*irq_handler)(int irq, void *data);
+ const int max_channels;
};
/**
@@ -485,16 +488,15 @@ struct xilinx_dma_config {
* @txs_clk: DMA mm2s stream clock
* @rx_clk: DMA s2mm clock
* @rxs_clk: DMA s2mm stream clock
- * @nr_channels: Number of channels DMA device supports
- * @chan_id: DMA channel identifier
+ * @s2mm_chan_id: DMA s2mm channel identifier
+ * @mm2s_chan_id: DMA mm2s channel identifier
* @max_buffer_len: Max buffer length
- * @s2mm_index: S2MM channel index
*/
struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
- struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
+ struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
@@ -504,10 +506,9 @@ struct xilinx_dma_device {
struct clk *txs_clk;
struct clk *rx_clk;
struct clk *rxs_clk;
- u32 nr_channels;
- u32 chan_id;
+ u32 s2mm_chan_id;
+ u32 mm2s_chan_id;
u32 max_buffer_len;
- u32 s2mm_index;
};
/* Macros */
@@ -1745,7 +1746,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
return IRQ_NONE;
if (chan->direction == DMA_DEV_TO_MEM)
- chan_offset = chan->xdev->s2mm_index;
+ chan_offset = chan->xdev->dma_config->max_channels / 2;
chan_offset = chan_offset + (chan_id - 1);
chan = chan->xdev->chan[chan_offset];
@@ -2404,16 +2405,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
u32 reg;
int err;
- if (chan->cyclic)
- xilinx_dma_chan_reset(chan);
-
- err = chan->stop_transfer(chan);
- if (err) {
- dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- chan->err = true;
+ if (!chan->cyclic) {
+ err = chan->stop_transfer(chan);
+ if (err) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, dma_ctrl_read(chan,
+ XILINX_DMA_REG_DMASR));
+ chan->err = true;
+ }
}
+ xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
chan->idle = true;
@@ -2730,12 +2732,11 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
*
* @xdev: Driver specific device structure
* @node: Device node
- * @chan_id: DMA Channel id
*
* Return: '0' on success and failure value on error
*/
static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
- struct device_node *node, int chan_id)
+ struct device_node *node)
{
struct xilinx_dma_chan *chan;
bool has_dre = false;
@@ -2787,8 +2788,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
chan->direction = DMA_MEM_TO_DEV;
- chan->id = chan_id;
- chan->tdest = chan_id;
+ chan->id = xdev->mm2s_chan_id++;
+ chan->tdest = chan->id;
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2804,9 +2805,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node,
"xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
- chan->id = chan_id;
- xdev->s2mm_index = xdev->nr_channels;
- chan->tdest = chan_id - xdev->nr_channels;
+ chan->id = xdev->s2mm_chan_id++;
+ chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip");
if (chan->has_vflip) {
@@ -2908,9 +2908,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
dev_warn(xdev->dev, "missing dma-channels property\n");
for (i = 0; i < nr_channels; i++)
- xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
-
- xdev->nr_channels += nr_channels;
+ xilinx_dma_chan_probe(xdev, node);
return 0;
}
@@ -2928,7 +2926,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_dma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
+ if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
@@ -2938,23 +2936,27 @@ static const struct xilinx_dma_config axidma_config = {
.dmatype = XDMA_TYPE_AXIDMA,
.clk_init = axidma_clk_init,
.irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
};
static const struct xilinx_dma_config aximcdma_config = {
.dmatype = XDMA_TYPE_AXIMCDMA,
.clk_init = axidma_clk_init,
.irq_handler = xilinx_mcdma_irq_handler,
+ .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
};
static const struct xilinx_dma_config axicdma_config = {
.dmatype = XDMA_TYPE_CDMA,
.clk_init = axicdma_clk_init,
.irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
};
static const struct xilinx_dma_config axivdma_config = {
.dmatype = XDMA_TYPE_VDMA,
.clk_init = axivdma_clk_init,
.irq_handler = xilinx_dma_irq_handler,
+ .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
};
static const struct of_device_id xilinx_dma_of_ids[] = {
@@ -3011,6 +3013,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
+ xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
@@ -3104,7 +3107,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- for (i = 0; i < xdev->nr_channels; i++)
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xdev->chan[i]->num_frms = num_frames;
}
@@ -3134,7 +3137,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
disable_clks:
xdma_disable_allclks(xdev);
error:
- for (i = 0; i < xdev->nr_channels; i++)
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
@@ -3156,7 +3159,7 @@ static int xilinx_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&xdev->common);
- for (i = 0; i < xdev->nr_channels; i++)
+ for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
diff --git a/drivers/eisa/.gitignore b/drivers/eisa/.gitignore
index 4b335c0aedb0..7d0a2ad5abe2 100644
--- a/drivers/eisa/.gitignore
+++ b/drivers/eisa/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
devlist.h
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 34b7afffac28..525345367260 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -443,9 +443,40 @@ static int axp288_extcon_probe(struct platform_device *pdev)
/* Start charger cable type detection */
axp288_extcon_enable(info);
+ device_init_wakeup(dev, true);
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+}
+
+static int __maybe_unused axp288_extcon_suspend(struct device *dev)
+{
+ struct axp288_extcon_info *info = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(info->irq[VBUS_RISING_IRQ]);
+
return 0;
}
+static int __maybe_unused axp288_extcon_resume(struct device *dev)
+{
+ struct axp288_extcon_info *info = dev_get_drvdata(dev);
+
+ /*
+ * Wakeup when a charger is connected to do charger-type
+ * connection and generate an extcon event which makes the
+ * axp288 charger driver set the input current limit.
+ */
+ if (device_may_wakeup(dev))
+ disable_irq_wake(info->irq[VBUS_RISING_IRQ]);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(axp288_extcon_pm_ops, axp288_extcon_suspend,
+ axp288_extcon_resume);
+
static const struct platform_device_id axp288_extcon_table[] = {
{ .name = "axp288_extcon" },
{},
@@ -457,6 +488,7 @@ static struct platform_driver axp288_extcon_driver = {
.id_table = axp288_extcon_table,
.driver = {
.name = "axp288_extcon",
+ .pm = &axp288_extcon_pm_ops,
},
};
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index edc5016f46f1..cea58d0cb457 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -205,14 +205,18 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id",
GPIOD_IN);
- if (IS_ERR(palmas_usb->id_gpiod)) {
+ if (PTR_ERR(palmas_usb->id_gpiod) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(palmas_usb->id_gpiod)) {
dev_err(&pdev->dev, "failed to get id gpio\n");
return PTR_ERR(palmas_usb->id_gpiod);
}
palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
GPIOD_IN);
- if (IS_ERR(palmas_usb->vbus_gpiod)) {
+ if (PTR_ERR(palmas_usb->vbus_gpiod) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(palmas_usb->vbus_gpiod)) {
dev_err(&pdev->dev, "failed to get vbus gpio\n");
return PTR_ERR(palmas_usb->vbus_gpiod);
}
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e055893fd5c3..2dfbfec572f9 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1406,6 +1406,7 @@ const char *extcon_get_edev_name(struct extcon_dev *edev)
{
return !edev ? NULL : edev->name;
}
+EXPORT_SYMBOL_GPL(extcon_get_edev_name);
static int __init extcon_class_init(void)
{
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index ea869addc89b..8007d4aa76dc 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -206,7 +206,7 @@ config FW_CFG_SYSFS_CMDLINE
config INTEL_STRATIX10_SERVICE
tristate "Intel Stratix10 Service Layer"
- depends on ARCH_STRATIX10 && HAVE_ARM_SMCCC
+ depends on (ARCH_STRATIX10 || ARCH_AGILEX) && HAVE_ARM_SMCCC
default n
help
Intel Stratix10 service layer runs at privileged exception level,
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 5f298f00a82e..6694d0d908d6 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
+obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
+scmi-transport-y = mailbox.o shmem.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index df35358ff324..5ac06469b01c 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -33,8 +33,8 @@ enum scmi_common_cmd {
/**
* struct scmi_msg_resp_prot_version - Response for a message
*
- * @major_version: Major version of the ABI that firmware supports
* @minor_version: Minor version of the ABI that firmware supports
+ * @major_version: Major version of the ABI that firmware supports
*
* In general, ABI version changes follow the rule that minor version increments
* are backward compatible. Major revision changes in ABI may not be
@@ -47,6 +47,19 @@ struct scmi_msg_resp_prot_version {
__le16 major_version;
};
+#define MSG_ID_MASK GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
+#define MSG_TYPE_MASK GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND 0
+#define MSG_TYPE_DELAYED_RESP 2
+#define MSG_TYPE_NOTIFICATION 3
+#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
+#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
+#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
+#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
@@ -68,6 +81,33 @@ struct scmi_msg_hdr {
};
/**
+ * pack_scmi_header() - packs and returns 32-bit header
+ *
+ * @hdr: pointer to header containing all the information on message id,
+ * protocol id and sequence id.
+ *
+ * Return: 32-bit packed message header to be sent to the platform.
+ */
+static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
+{
+ return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+ FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
+ FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
+}
+
+/**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+ hdr->id = MSG_XTRACT_ID(msg_hdr);
+ hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+}
+
+/**
* struct scmi_msg - Message(Tx/Rx) structure
*
* @buf: Buffer pointer
@@ -88,7 +128,7 @@ struct scmi_msg {
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
* @done: command message transmit completion event
- * @async: pointer to delayed response message received event completion
+ * @async_done: pointer to delayed response message received event completion
*/
struct scmi_xfer {
int transfer_id;
@@ -113,3 +153,74 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
u8 *prot_imp);
int scmi_base_protocol_init(struct scmi_handle *h);
+
+/* SCMI Transport */
+/**
+ * struct scmi_chan_info - Structure representing a SCMI channel information
+ *
+ * @dev: Reference to device in the SCMI hierarchy corresponding to this
+ * channel
+ * @handle: Pointer to SCMI entity handle
+ * @transport_info: Transport layer related information
+ */
+struct scmi_chan_info {
+ struct device *dev;
+ struct scmi_handle *handle;
+ void *transport_info;
+};
+
+/**
+ * struct scmi_transport_ops - Structure representing a SCMI transport ops
+ *
+ * @chan_available: Callback to check if channel is available or not
+ * @chan_setup: Callback to allocate and setup a channel
+ * @chan_free: Callback to free a channel
+ * @send_message: Callback to send a message
+ * @mark_txdone: Callback to mark tx as done
+ * @fetch_response: Callback to fetch response
+ * @poll_done: Callback to poll transfer status
+ */
+struct scmi_transport_ops {
+ bool (*chan_available)(struct device *dev, int idx);
+ int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx);
+ int (*chan_free)(int id, void *p, void *data);
+ int (*send_message)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
+ void (*fetch_response)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
+};
+
+/**
+ * struct scmi_desc - Description of SoC integration
+ *
+ * @ops: Pointer to the transport specific ops structure
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msg: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct scmi_desc {
+ struct scmi_transport_ops *ops;
+ int max_rx_timeout_ms;
+ int max_msg;
+ int max_msg_size;
+};
+
+extern const struct scmi_desc scmi_mailbox_desc;
+
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
+
+/* shmem related declarations */
+struct scmi_shared_mem;
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 2c96f6b5a7d8..dbec767222e9 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -19,12 +19,10 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
-#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/processor.h>
-#include <linux/semaphore.h>
#include <linux/slab.h>
#include "common.h"
@@ -32,19 +30,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
-#define MSG_ID_MASK GENMASK(7, 0)
-#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
-#define MSG_TYPE_MASK GENMASK(9, 8)
-#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
-#define MSG_TYPE_COMMAND 0
-#define MSG_TYPE_DELAYED_RESP 2
-#define MSG_TYPE_NOTIFICATION 3
-#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
-#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
-#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
-#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
-#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
-
enum scmi_error_codes {
SCMI_SUCCESS = 0, /* Success */
SCMI_ERR_SUPPORT = -1, /* Not supported */
@@ -83,45 +68,13 @@ struct scmi_xfers_info {
};
/**
- * struct scmi_desc - Description of SoC integration
- *
- * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
- * @max_msg: Maximum number of messages that can be pending
- * simultaneously in the system
- * @max_msg_size: Maximum size of data per message that can be handled.
- */
-struct scmi_desc {
- int max_rx_timeout_ms;
- int max_msg;
- int max_msg_size;
-};
-
-/**
- * struct scmi_chan_info - Structure representing a SCMI channel information
- *
- * @cl: Mailbox Client
- * @chan: Transmit/Receive mailbox channel
- * @payload: Transmit/Receive mailbox channel payload area
- * @dev: Reference to device in the SCMI hierarchy corresponding to this
- * channel
- * @handle: Pointer to SCMI entity handle
- */
-struct scmi_chan_info {
- struct mbox_client cl;
- struct mbox_chan *chan;
- void __iomem *payload;
- struct device *dev;
- struct scmi_handle *handle;
-};
-
-/**
* struct scmi_info - Structure representing a SCMI instance
*
* @dev: Device pointer
* @desc: SoC description for this instance
- * @handle: Instance of SCMI handle to send to clients
* @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification.
+ * @handle: Instance of SCMI handle to send to clients
* @tx_minfo: Universal Transmit Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
@@ -143,27 +96,8 @@ struct scmi_info {
int users;
};
-#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
-/*
- * SCMI specification requires all parameters, message headers, return
- * arguments or any protocol data to be expressed in little endian
- * format only.
- */
-struct scmi_shared_mem {
- __le32 reserved;
- __le32 channel_status;
-#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
-#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
- __le32 reserved1[2];
- __le32 flags;
-#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
- __le32 length;
- __le32 msg_header;
- u8 msg_payload[0];
-};
-
static const int scmi_linux_errmap[] = {
/* better than switch case as long as return value is continuous */
0, /* SCMI_SUCCESS */
@@ -199,77 +133,6 @@ static inline void scmi_dump_header_dbg(struct device *dev,
hdr->id, hdr->seq, hdr->protocol_id);
}
-static void scmi_fetch_response(struct scmi_xfer *xfer,
- struct scmi_shared_mem __iomem *mem)
-{
- xfer->hdr.status = ioread32(mem->msg_payload);
- /* Skip the length of header and status in payload area i.e 8 bytes */
- xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
-
- /* Take a copy to the rx buffer.. */
- memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
-}
-
-/**
- * pack_scmi_header() - packs and returns 32-bit header
- *
- * @hdr: pointer to header containing all the information on message id,
- * protocol id and sequence id.
- *
- * Return: 32-bit packed message header to be sent to the platform.
- */
-static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
-{
- return FIELD_PREP(MSG_ID_MASK, hdr->id) |
- FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
- FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
-}
-
-/**
- * unpack_scmi_header() - unpacks and records message and protocol id
- *
- * @msg_hdr: 32-bit packed message header sent from the platform
- * @hdr: pointer to header to fetch message and protocol id.
- */
-static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
-{
- hdr->id = MSG_XTRACT_ID(msg_hdr);
- hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
-}
-
-/**
- * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
- *
- * @cl: client pointer
- * @m: mailbox message
- *
- * This function prepares the shared memory which contains the header and the
- * payload.
- */
-static void scmi_tx_prepare(struct mbox_client *cl, void *m)
-{
- struct scmi_xfer *t = m;
- struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
- struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
- /*
- * Ideally channel must be free by now unless OS timeout last
- * request and platform continued to process the same, wait
- * until it releases the shared memory, otherwise we may endup
- * overwriting its response with new message payload or vice-versa
- */
- spin_until_cond(ioread32(&mem->channel_status) &
- SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
- /* Mark channel busy + clear error */
- iowrite32(0x0, &mem->channel_status);
- iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
- &mem->flags);
- iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
- iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
- if (t->tx.buf)
- memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
-}
-
/**
* scmi_xfer_get() - Allocate one message
*
@@ -338,10 +201,10 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
}
/**
- * scmi_rx_callback() - mailbox client callback for receive messages
+ * scmi_rx_callback() - callback for receiving messages
*
- * @cl: client pointer
- * @m: mailbox message
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
@@ -349,21 +212,14 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
-static void scmi_rx_callback(struct mbox_client *cl, void *m)
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
{
- u8 msg_type;
- u32 msg_hdr;
- u16 xfer_id;
- struct scmi_xfer *xfer;
- struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
- struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
- struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
- msg_hdr = ioread32(&mem->msg_header);
- msg_type = MSG_XTRACT_TYPE(msg_hdr);
- xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+ struct device *dev = cinfo->dev;
+ struct scmi_xfer *xfer;
if (msg_type == MSG_TYPE_NOTIFICATION)
return; /* Notifications not yet supported */
@@ -378,7 +234,7 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
scmi_dump_header_dbg(dev, &xfer->hdr);
- scmi_fetch_response(xfer, mem);
+ info->desc->ops->fetch_response(cinfo, xfer);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
@@ -403,28 +259,15 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
__scmi_xfer_put(&info->tx_minfo, xfer);
}
-static bool
-scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
-{
- struct scmi_shared_mem __iomem *mem = cinfo->payload;
- u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
-
- if (xfer->hdr.seq != xfer_id)
- return false;
-
- return ioread32(&mem->channel_status) &
- (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
- SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
-}
-
#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
-static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
+static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop)
{
- ktime_t __cur = ktime_get();
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
+ return info->desc->ops->poll_done(cinfo, xfer) ||
+ ktime_after(ktime_get(), stop);
}
/**
@@ -453,29 +296,26 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
- ret = mbox_send_message(cinfo->chan, xfer);
+ ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
- dev_dbg(dev, "mbox send fail %d\n", ret);
+ dev_dbg(dev, "Failed to send message %d\n", ret);
return ret;
}
- /* mbox_send_message returns non-negative value on success, so reset */
- ret = 0;
-
if (xfer->hdr.poll_completion) {
ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
if (ktime_before(ktime_get(), stop))
- scmi_fetch_response(xfer, cinfo->payload);
+ info->desc->ops->fetch_response(cinfo, xfer);
else
ret = -ETIMEDOUT;
} else {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout)) {
- dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
+ dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
@@ -484,13 +324,8 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
- /*
- * NOTE: we might prefer not to need the mailbox ticker to manage the
- * transfer queueing since the protocol layer queues things by itself.
- * Unfortunately, we have to kick the mailbox framework after we have
- * received our message.
- */
- mbox_client_txdone(cinfo->chan, ret);
+ if (info->desc->ops->mark_txdone)
+ info->desc->ops->mark_txdone(cinfo, ret);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
@@ -731,23 +566,12 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return 0;
}
-static int scmi_mailbox_check(struct device_node *np, int idx)
-{
- return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
- idx, NULL);
-}
-
-static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
- int prot_id, bool tx)
+static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
+ int prot_id, bool tx)
{
int ret, idx;
- struct resource res;
- resource_size_t size;
- struct device_node *shmem, *np = dev->of_node;
struct scmi_chan_info *cinfo;
- struct mbox_client *cl;
struct idr *idr;
- const char *desc = tx ? "Tx" : "Rx";
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1;
@@ -758,7 +582,7 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
if (cinfo)
return 0;
- if (scmi_mailbox_check(np, idx)) {
+ if (!info->desc->ops->chan_available(dev, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
return -EINVAL;
@@ -771,36 +595,9 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
cinfo->dev = dev;
- cl = &cinfo->cl;
- cl->dev = dev;
- cl->rx_callback = scmi_rx_callback;
- cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
- cl->tx_block = false;
- cl->knows_txdone = tx;
-
- shmem = of_parse_phandle(np, "shmem", idx);
- ret = of_address_to_resource(shmem, 0, &res);
- of_node_put(shmem);
- if (ret) {
- dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
- return ret;
- }
-
- size = resource_size(&res);
- cinfo->payload = devm_ioremap(info->dev, res.start, size);
- if (!cinfo->payload) {
- dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
- return -EADDRNOTAVAIL;
- }
-
- cinfo->chan = mbox_request_channel(cl, idx);
- if (IS_ERR(cinfo->chan)) {
- ret = PTR_ERR(cinfo->chan);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to request SCMI %s mailbox\n",
- desc);
+ ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
+ if (ret)
return ret;
- }
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
@@ -814,12 +611,12 @@ idr_alloc:
}
static inline int
-scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
{
- int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
+ int ret = scmi_chan_setup(info, dev, prot_id, true);
if (!ret) /* Rx is optional, hence no error check */
- scmi_mbox_chan_setup(info, dev, prot_id, false);
+ scmi_chan_setup(info, dev, prot_id, false);
return ret;
}
@@ -837,7 +634,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
return;
}
- if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
+ if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
return;
@@ -890,12 +687,6 @@ static int scmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
- /* Only mailbox method supported, check for the presence of one */
- if (scmi_mailbox_check(np, 0)) {
- dev_err(dev, "no mailbox found in %pOF\n", np);
- return -EINVAL;
- }
-
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
@@ -920,7 +711,7 @@ static int scmi_probe(struct platform_device *pdev)
handle->dev = info->dev;
handle->version = &info->version;
- ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+ ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
@@ -955,19 +746,9 @@ static int scmi_probe(struct platform_device *pdev)
return 0;
}
-static int scmi_mbox_free_channel(int id, void *p, void *data)
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
{
- struct scmi_chan_info *cinfo = p;
- struct idr *idr = data;
-
- if (!IS_ERR_OR_NULL(cinfo->chan)) {
- mbox_free_channel(cinfo->chan);
- cinfo->chan = NULL;
- }
-
idr_remove(idr, id);
-
- return 0;
}
static int scmi_remove(struct platform_device *pdev)
@@ -987,11 +768,11 @@ static int scmi_remove(struct platform_device *pdev)
return ret;
/* Safe to free channels since no more users */
- ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
idr_destroy(&info->tx_idr);
idr = &info->rx_idr;
- ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
idr_destroy(&info->rx_idr);
return ret;
@@ -1043,15 +824,9 @@ static struct attribute *versions_attrs[] = {
};
ATTRIBUTE_GROUPS(versions);
-static const struct scmi_desc scmi_generic_desc = {
- .max_rx_timeout_ms = 30, /* We may increase this if required */
- .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
-};
-
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
- { .compatible = "arm,scmi", .data = &scmi_generic_desc },
+ { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
{ /* Sentinel */ },
};
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
new file mode 100644
index 000000000000..73077bbc4ad9
--- /dev/null
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Mailbox Transport
+ * driver.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_mailbox - Structure representing a SCMI mailbox transport
+ *
+ * @cl: Mailbox Client
+ * @chan: Transmit/Receive mailbox channel
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ */
+struct scmi_mailbox {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+};
+
+#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
+
+static void tx_prepare(struct mbox_client *cl, void *m)
+{
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+ shmem_tx_prepare(smbox->shmem, m);
+}
+
+static void rx_callback(struct mbox_client *cl, void *m)
+{
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+ scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem));
+}
+
+static bool mailbox_chan_available(struct device *dev, int idx)
+{
+ return !of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", idx, NULL);
+}
+
+static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ const char *desc = tx ? "Tx" : "Rx";
+ struct device *cdev = cinfo->dev;
+ struct scmi_mailbox *smbox;
+ struct device_node *shmem;
+ int ret, idx = tx ? 0 : 1;
+ struct mbox_client *cl;
+ resource_size_t size;
+ struct resource res;
+
+ smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
+ if (!smbox)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
+ return ret;
+ }
+
+ size = resource_size(&res);
+ smbox->shmem = devm_ioremap(dev, res.start, size);
+ if (!smbox->shmem) {
+ dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
+ return -EADDRNOTAVAIL;
+ }
+
+ cl = &smbox->cl;
+ cl->dev = cdev;
+ cl->tx_prepare = tx ? tx_prepare : NULL;
+ cl->rx_callback = rx_callback;
+ cl->tx_block = false;
+ cl->knows_txdone = tx;
+
+ smbox->chan = mbox_request_channel(cl, tx ? 0 : 1);
+ if (IS_ERR(smbox->chan)) {
+ ret = PTR_ERR(smbox->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cdev, "failed to request SCMI %s mailbox\n",
+ tx ? "Tx" : "Rx");
+ return ret;
+ }
+
+ cinfo->transport_info = smbox;
+ smbox->cinfo = cinfo;
+
+ return 0;
+}
+
+static int mailbox_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ if (!IS_ERR(smbox->chan)) {
+ mbox_free_channel(smbox->chan);
+ cinfo->transport_info = NULL;
+ smbox->chan = NULL;
+ smbox->cinfo = NULL;
+ }
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static int mailbox_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+ int ret;
+
+ ret = mbox_send_message(smbox->chan, xfer);
+
+ /* mbox_send_message returns non-negative value on success, so reset */
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(smbox->chan, ret);
+}
+
+static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_response(smbox->shmem, xfer);
+}
+
+static bool
+mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ return shmem_poll_done(smbox->shmem, xfer);
+}
+
+static struct scmi_transport_ops scmi_mailbox_ops = {
+ .chan_available = mailbox_chan_available,
+ .chan_setup = mailbox_chan_setup,
+ .chan_free = mailbox_chan_free,
+ .send_message = mailbox_send_message,
+ .mark_txdone = mailbox_mark_txdone,
+ .fetch_response = mailbox_fetch_response,
+ .poll_done = mailbox_poll_done,
+};
+
+const struct scmi_desc scmi_mailbox_desc = {
+ .ops = &scmi_mailbox_ops,
+ .max_rx_timeout_ms = 30, /* We may increase this if required */
+ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index ec81e6f7e7a4..34f3a917dd8d 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -89,7 +89,7 @@ struct scmi_msg_resp_perf_describe_levels {
__le32 power;
__le16 transition_latency_us;
__le16 reserved;
- } opp[0];
+ } opp[];
};
struct scmi_perf_get_fc_info {
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
new file mode 100644
index 000000000000..e1e816e0018c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transport using shared mem structure.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/processor.h>
+#include <linux/types.h>
+
+#include "common.h"
+
+/*
+ * SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian
+ * format only.
+ */
+struct scmi_shared_mem {
+ __le32 reserved;
+ __le32 channel_status;
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
+ __le32 reserved1[2];
+ __le32 flags;
+#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
+ __le32 length;
+ __le32 msg_header;
+ u8 msg_payload[];
+};
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ /*
+ * Ideally channel must be free by now unless OS timeout last
+ * request and platform continued to process the same, wait
+ * until it releases the shared memory, otherwise we may endup
+ * overwriting its response with new message payload or vice-versa
+ */
+ spin_until_cond(ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ /* Mark channel busy + clear error */
+ iowrite32(0x0, &shmem->channel_status);
+ iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+ &shmem->flags);
+ iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
+ iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
+ if (xfer->tx.buf)
+ memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+{
+ return ioread32(&shmem->msg_header);
+}
+
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ xfer->hdr.status = ioread32(shmem->msg_payload);
+ /* Skip the length of header and status in shmem area i.e 8 bytes */
+ xfer->rx.len = min_t(size_t, xfer->rx.len,
+ ioread32(&shmem->length) - 8);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+}
+
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ u16 xfer_id;
+
+ xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
+
+ if (xfer->hdr.seq != xfer_id)
+ return false;
+
+ return ioread32(&shmem->channel_status) &
+ (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index a80c331c3a6e..d0dee37ad522 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -262,12 +262,12 @@ struct scpi_drvinfo {
struct scpi_shared_mem {
__le32 command;
__le32 status;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct legacy_scpi_shared_mem {
__le32 status;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct scp_capabilities {
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 1d2e5b85d7ca..116707a075f3 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -12,7 +12,7 @@ config IMX_DSP
config IMX_SCU
bool "IMX SCU Protocol driver"
- depends on IMX_MBOX
+ depends on IMX_MBOX || COMPILE_TEST
help
The System Controller Firmware (SCFW) is a low-level system function
which runs on a dedicated Cortex-M core to provide power, clock, and
@@ -24,6 +24,6 @@ config IMX_SCU
config IMX_SCU_PD
bool "IMX SCU Power Domain driver"
- depends on IMX_SCU
+ depends on IMX_SCU || COMPILE_TEST
help
The System Controller Firmware (SCFW) based power domain driver.
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index af3ae0087de4..fb5523aa16ee 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -93,7 +93,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "kpp", IMX_SC_R_KPP, 1, false, 0 },
{ "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
{ "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
- { "mu_b", IMX_SC_R_MU_13B, 1, true, 13 },
+ { "mu_b", IMX_SC_R_MU_5B, 9, true, 5 },
/* CONN SS */
{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
@@ -109,6 +109,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
{ "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
{ "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
+ { "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 },
{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
{ "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
{ "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
@@ -116,7 +117,13 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
{ "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
{ "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
+ { "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 },
{ "sai", IMX_SC_R_SAI_0, 3, true, 0 },
+ { "sai3", IMX_SC_R_SAI_3, 1, false, 0 },
+ { "sai4", IMX_SC_R_SAI_4, 1, false, 0 },
+ { "sai5", IMX_SC_R_SAI_5, 1, false, 0 },
+ { "sai6", IMX_SC_R_SAI_6, 1, false, 0 },
+ { "sai7", IMX_SC_R_SAI_7, 1, false, 0 },
{ "amix", IMX_SC_R_AMIX, 1, false, 0 },
{ "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
{ "dsp", IMX_SC_R_DSP, 1, false, 0 },
@@ -158,6 +165,10 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* DC SS */
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+
+ /* CM40 SS */
+ { "cm40_i2c", IMX_SC_R_M4_0_I2C, 1, 0 },
+ { "cm40_intmux", IMX_SC_R_M4_0_INTMUX, 1, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 1d5b4d74f96d..2854b56f6e0b 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -44,6 +44,8 @@ static const struct meson_sm_chip gxbb_chip = {
CMD(SM_EFUSE_WRITE, 0x82000031),
CMD(SM_EFUSE_USER_MAX, 0x82000033),
CMD(SM_GET_CHIP_ID, 0x82000044),
+ CMD(SM_A1_PWRC_SET, 0x82000093),
+ CMD(SM_A1_PWRC_GET, 0x82000095),
{ /* sentinel */ },
},
};
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 7ffb42b0775e..d5f0769f3761 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -966,6 +966,7 @@ EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
static const struct of_device_id stratix10_svc_drv_match[] = {
{.compatible = "intel,stratix10-svc"},
+ {.compatible = "intel,agilex-svc"},
{},
};
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
index a887731f50d6..1c8ba1f47c7c 100644
--- a/drivers/firmware/tegra/Kconfig
+++ b/drivers/firmware/tegra/Kconfig
@@ -7,7 +7,7 @@ config TEGRA_IVC
help
IVC (Inter-VM Communication) protocol is part of the IPC
(Inter Processor Communication) framework on Tegra. It maintains the
- data and the different commuication channels in SysRAM or RAM and
+ data and the different communication channels in SysRAM or RAM and
keeps the content is synchronization between host CPU and remote
processors.
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
index bd33bbf70daf..9a9bd190888e 100644
--- a/drivers/firmware/xilinx/Kconfig
+++ b/drivers/firmware/xilinx/Kconfig
@@ -6,6 +6,8 @@ menu "Zynq MPSoC Firmware Drivers"
config ZYNQMP_FIRMWARE
bool "Enable Xilinx Zynq MPSoC firmware interface"
+ depends on ARCH_ZYNQMP
+ default y if ARCH_ZYNQMP
select MFD_CORE
help
Firmware interface driver is used by different
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index f8b03dcfd2f7..41b65164a367 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -709,6 +709,30 @@ static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
qos, ack, NULL);
}
+/**
+ * zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
+ * AES-GCM core.
+ * @address: Address of the AesParams structure.
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
+ lower_32_bits(address),
+ 0, 0, ret_payload);
+ *out = ret_payload[1];
+
+ return ret;
+}
+
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
.get_chipid = zynqmp_pm_get_chipid,
@@ -732,6 +756,7 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.set_requirement = zynqmp_pm_set_requirement,
.fpga_load = zynqmp_pm_fpga_load,
.fpga_get_status = zynqmp_pm_fpga_get_status,
+ .aes = zynqmp_pm_aes_engine,
};
/**
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b8013cf90064..1b96169d84f7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -394,13 +394,13 @@ config GPIO_MVEBU
config GPIO_MXC
def_bool y
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
config GPIO_MXS
def_bool y
- depends on ARCH_MXS
+ depends on ARCH_MXS || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
@@ -1399,6 +1399,13 @@ config GPIO_MLXBF
help
Say Y here if you want GPIO support on Mellanox BlueField SoC.
+config GPIO_MLXBF2
+ tristate "Mellanox BlueField 2 SoC GPIO"
+ depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
+ select GPIO_GENERIC
+ help
+ Say Y here if you want GPIO support on Mellanox BlueField 2 SoC.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on X86 || COMPILE_TEST
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 0b571264ddbc..b2cfc21a97f3 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
+obj-$(CONFIG_GPIO_MLXBF2) += gpio-mlxbf2.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 05e3f99ae59c..fcfc1a1f1a5c 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -603,6 +603,49 @@ static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
.resume_noirq = brcmstb_gpio_resume,
};
+static void brcmstb_gpio_set_names(struct device *dev,
+ struct brcmstb_gpio_bank *bank)
+{
+ struct device_node *np = dev->of_node;
+ const char **names;
+ int nstrings, base;
+ unsigned int i;
+
+ base = bank->id * MAX_GPIO_PER_BANK;
+
+ nstrings = of_property_count_strings(np, "gpio-line-names");
+ if (nstrings <= base)
+ /* Line names not present */
+ return;
+
+ names = devm_kcalloc(dev, MAX_GPIO_PER_BANK, sizeof(*names),
+ GFP_KERNEL);
+ if (!names)
+ return;
+
+ /*
+ * Make sure to not index beyond the end of the number of descriptors
+ * of the GPIO device.
+ */
+ for (i = 0; i < bank->width; i++) {
+ const char *name;
+ int ret;
+
+ ret = of_property_read_string_index(np, "gpio-line-names",
+ base + i, &name);
+ if (ret) {
+ if (ret != -ENODATA)
+ dev_err(dev, "unable to name line %d: %d\n",
+ base + i, ret);
+ break;
+ }
+ if (*name)
+ names[i] = name;
+ }
+
+ bank->gc.names = names;
+}
+
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -726,6 +769,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
+ brcmstb_gpio_set_names(dev, bank);
err = gpiochip_add_data(gc, bank);
if (err) {
dev_err(dev, "Could not add gpiochip for bank %d\n",
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index e0b025689625..085b874db2a9 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -259,11 +259,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.of_gpio_n_cells = 2;
chips->chip.parent = dev;
chips->chip.of_node = dev->of_node;
-
- if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- chips->chip.request = gpiochip_generic_request;
- chips->chip.free = gpiochip_generic_free;
- }
+ chips->chip.request = gpiochip_generic_request;
+ chips->chip.free = gpiochip_generic_free;
#endif
spin_lock_init(&chips->lock);
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index bb287f35cf40..8c9757774010 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -569,6 +569,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
const struct sprd_eic_variant_data *pdata;
struct gpio_irq_chip *irq;
struct sprd_eic *sprd_eic;
+ struct resource *res;
int ret, i;
pdata = of_device_get_match_data(&pdev->dev);
@@ -595,9 +596,13 @@ static int sprd_eic_probe(struct platform_device *pdev)
* have one bank EIC, thus base[1] and base[2] can be
* optional.
*/
- sprd_eic->base[i] = devm_platform_ioremap_resource(pdev, i);
- if (IS_ERR(sprd_eic->base[i]))
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
continue;
+
+ sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sprd_eic->base[i]))
+ return PTR_ERR(sprd_eic->base[i]);
}
sprd_eic->chip.label = sprd_eic_label_name[sprd_eic->type];
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
new file mode 100644
index 000000000000..7b7085050219
--- /dev/null
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/resource.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+/*
+ * There are 3 YU GPIO blocks:
+ * gpio[0]: HOST_GPIO0->HOST_GPIO31
+ * gpio[1]: HOST_GPIO32->HOST_GPIO63
+ * gpio[2]: HOST_GPIO64->HOST_GPIO69
+ */
+#define MLXBF2_GPIO_MAX_PINS_PER_BLOCK 32
+
+/*
+ * arm_gpio_lock register:
+ * bit[31] lock status: active if set
+ * bit[15:0] set lock
+ * The lock is enabled only if 0xd42f is written to this field
+ */
+#define YU_ARM_GPIO_LOCK_ADDR 0x2801088
+#define YU_ARM_GPIO_LOCK_SIZE 0x8
+#define YU_LOCK_ACTIVE_BIT(val) (val >> 31)
+#define YU_ARM_GPIO_LOCK_ACQUIRE 0xd42f
+#define YU_ARM_GPIO_LOCK_RELEASE 0x0
+
+/*
+ * gpio[x] block registers and their offset
+ */
+#define YU_GPIO_DATAIN 0x04
+#define YU_GPIO_MODE1 0x08
+#define YU_GPIO_MODE0 0x0c
+#define YU_GPIO_DATASET 0x14
+#define YU_GPIO_DATACLEAR 0x18
+#define YU_GPIO_MODE1_CLEAR 0x50
+#define YU_GPIO_MODE0_SET 0x54
+#define YU_GPIO_MODE0_CLEAR 0x58
+
+#ifdef CONFIG_PM
+struct mlxbf2_gpio_context_save_regs {
+ u32 gpio_mode0;
+ u32 gpio_mode1;
+};
+#endif
+
+/* BlueField-2 gpio block context structure. */
+struct mlxbf2_gpio_context {
+ struct gpio_chip gc;
+
+ /* YU GPIO blocks address */
+ void __iomem *gpio_io;
+
+#ifdef CONFIG_PM
+ struct mlxbf2_gpio_context_save_regs *csave_regs;
+#endif
+};
+
+/* BlueField-2 gpio shared structure. */
+struct mlxbf2_gpio_param {
+ void __iomem *io;
+ struct resource *res;
+ struct mutex *lock;
+};
+
+static struct resource yu_arm_gpio_lock_res = {
+ .start = YU_ARM_GPIO_LOCK_ADDR,
+ .end = YU_ARM_GPIO_LOCK_ADDR + YU_ARM_GPIO_LOCK_SIZE - 1,
+ .name = "YU_ARM_GPIO_LOCK",
+};
+
+static DEFINE_MUTEX(yu_arm_gpio_lock_mutex);
+
+static struct mlxbf2_gpio_param yu_arm_gpio_lock_param = {
+ .res = &yu_arm_gpio_lock_res,
+ .lock = &yu_arm_gpio_lock_mutex,
+};
+
+/* Request memory region and map yu_arm_gpio_lock resource */
+static int mlxbf2_gpio_get_lock_res(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ resource_size_t size;
+ int ret = 0;
+
+ mutex_lock(yu_arm_gpio_lock_param.lock);
+
+ /* Check if the memory map already exists */
+ if (yu_arm_gpio_lock_param.io)
+ goto exit;
+
+ res = yu_arm_gpio_lock_param.res;
+ size = resource_size(res);
+
+ if (!devm_request_mem_region(dev, res->start, size, res->name)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ yu_arm_gpio_lock_param.io = devm_ioremap(dev, res->start, size);
+ if (IS_ERR(yu_arm_gpio_lock_param.io))
+ ret = PTR_ERR(yu_arm_gpio_lock_param.io);
+
+exit:
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
+
+ return ret;
+}
+
+/*
+ * Acquire the YU arm_gpio_lock to be able to change the direction
+ * mode. If the lock_active bit is already set, return an error.
+ */
+static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
+{
+ u32 arm_gpio_lock_val;
+
+ spin_lock(&gs->gc.bgpio_lock);
+ mutex_lock(yu_arm_gpio_lock_param.lock);
+
+ arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
+
+ /*
+ * When lock active bit[31] is set, ModeX is write enabled
+ */
+ if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
+ spin_unlock(&gs->gc.bgpio_lock);
+ return -EINVAL;
+ }
+
+ writel(YU_ARM_GPIO_LOCK_ACQUIRE, yu_arm_gpio_lock_param.io);
+
+ return 0;
+}
+
+/*
+ * Release the YU arm_gpio_lock after changing the direction mode.
+ */
+static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
+{
+ writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
+ spin_unlock(&gs->gc.bgpio_lock);
+}
+
+/*
+ * mode0 and mode1 are both locked by the gpio_lock field.
+ *
+ * Together, mode0 and mode1 define the gpio Mode dependeing also
+ * on Reg_DataOut.
+ *
+ * {mode1,mode0}:{Reg_DataOut=0,Reg_DataOut=1}->{DataOut=0,DataOut=1}
+ *
+ * {0,0}:Reg_DataOut{0,1}->{Z,Z} Input PAD
+ * {0,1}:Reg_DataOut{0,1}->{0,1} Full drive Output PAD
+ * {1,0}:Reg_DataOut{0,1}->{0,Z} 0-set PAD to low, 1-float
+ * {1,1}:Reg_DataOut{0,1}->{Z,1} 0-float, 1-set PAD to high
+ */
+
+/*
+ * Set input direction:
+ * {mode1,mode0} = {0,0}
+ */
+static int mlxbf2_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mlxbf2_gpio_context *gs = gpiochip_get_data(chip);
+ int ret;
+
+ /*
+ * Although the arm_gpio_lock was set in the probe function, check again
+ * if it is still enabled to be able to write to the ModeX registers.
+ */
+ ret = mlxbf2_gpio_lock_acquire(gs);
+ if (ret < 0)
+ return ret;
+
+ writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE0_CLEAR);
+ writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE1_CLEAR);
+
+ mlxbf2_gpio_lock_release(gs);
+
+ return ret;
+}
+
+/*
+ * Set output direction:
+ * {mode1,mode0} = {0,1}
+ */
+static int mlxbf2_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset,
+ int value)
+{
+ struct mlxbf2_gpio_context *gs = gpiochip_get_data(chip);
+ int ret = 0;
+
+ /*
+ * Although the arm_gpio_lock was set in the probe function,
+ * check again it is still enabled to be able to write to the
+ * ModeX registers.
+ */
+ ret = mlxbf2_gpio_lock_acquire(gs);
+ if (ret < 0)
+ return ret;
+
+ writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE1_CLEAR);
+ writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE0_SET);
+
+ mlxbf2_gpio_lock_release(gs);
+
+ return ret;
+}
+
+/* BlueField-2 GPIO driver initialization routine. */
+static int
+mlxbf2_gpio_probe(struct platform_device *pdev)
+{
+ struct mlxbf2_gpio_context *gs;
+ struct device *dev = &pdev->dev;
+ struct gpio_chip *gc;
+ struct resource *res;
+ unsigned int npins;
+ int ret;
+
+ gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
+ if (!gs)
+ return -ENOMEM;
+
+ /* YU GPIO block address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ gs->gpio_io = devm_ioremap(dev, res->start, resource_size(res));
+ if (!gs->gpio_io)
+ return -ENOMEM;
+
+ ret = mlxbf2_gpio_get_lock_res(pdev);
+ if (ret) {
+ dev_err(dev, "Failed to get yu_arm_gpio_lock resource\n");
+ return ret;
+ }
+
+ if (device_property_read_u32(dev, "npins", &npins))
+ npins = MLXBF2_GPIO_MAX_PINS_PER_BLOCK;
+
+ gc = &gs->gc;
+
+ ret = bgpio_init(gc, dev, 4,
+ gs->gpio_io + YU_GPIO_DATAIN,
+ gs->gpio_io + YU_GPIO_DATASET,
+ gs->gpio_io + YU_GPIO_DATACLEAR,
+ NULL,
+ NULL,
+ 0);
+
+ gc->direction_input = mlxbf2_gpio_direction_input;
+ gc->direction_output = mlxbf2_gpio_direction_output;
+ gc->ngpio = npins;
+ gc->owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, gs);
+
+ ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ if (ret) {
+ dev_err(dev, "Failed adding memory mapped gpiochip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mlxbf2_gpio_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct mlxbf2_gpio_context *gs = platform_get_drvdata(pdev);
+
+ gs->csave_regs->gpio_mode0 = readl(gs->gpio_io +
+ YU_GPIO_MODE0);
+ gs->csave_regs->gpio_mode1 = readl(gs->gpio_io +
+ YU_GPIO_MODE1);
+
+ return 0;
+}
+
+static int mlxbf2_gpio_resume(struct platform_device *pdev)
+{
+ struct mlxbf2_gpio_context *gs = platform_get_drvdata(pdev);
+
+ writel(gs->csave_regs->gpio_mode0, gs->gpio_io +
+ YU_GPIO_MODE0);
+ writel(gs->csave_regs->gpio_mode1, gs->gpio_io +
+ YU_GPIO_MODE1);
+
+ return 0;
+}
+#endif
+
+static const struct acpi_device_id mlxbf2_gpio_acpi_match[] = {
+ { "MLNXBF22", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf2_gpio_acpi_match);
+
+static struct platform_driver mlxbf2_gpio_driver = {
+ .driver = {
+ .name = "mlxbf2_gpio",
+ .acpi_match_table = ACPI_PTR(mlxbf2_gpio_acpi_match),
+ },
+ .probe = mlxbf2_gpio_probe,
+#ifdef CONFIG_PM
+ .suspend = mlxbf2_gpio_suspend,
+ .resume = mlxbf2_gpio_resume,
+#endif
+};
+
+module_platform_driver(mlxbf2_gpio_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField-2 GPIO Driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f729e3e9e983..b778f33cc6af 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -389,12 +389,10 @@ static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
return GPIO_LINE_DIRECTION_IN;
}
-static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
- gc->set(gc, gpio, val);
-
spin_lock_irqsave(&gc->bgpio_lock, flags);
gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
@@ -405,7 +403,21 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
+ int val)
+{
+ bgpio_dir_out(gc, gpio, val);
+ gc->set(gc, gpio, val);
+ return 0;
+}
+
+static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
+ int val)
+{
+ gc->set(gc, gpio, val);
+ bgpio_dir_out(gc, gpio, val);
return 0;
}
@@ -538,7 +550,10 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
if (dirout || dirin) {
gc->reg_dir_out = dirout;
gc->reg_dir_in = dirin;
- gc->direction_output = bgpio_dir_out;
+ if (flags & BGPIOF_NO_SET_ON_INPUT)
+ gc->direction_output = bgpio_dir_out_dir_first;
+ else
+ gc->direction_output = bgpio_dir_out_val_first;
gc->direction_input = bgpio_dir_in;
gc->get_direction = bgpio_get_dir;
} else {
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 7d343bea784a..3eb94f3740d1 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -171,7 +171,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
/* Change the value unless we're actively driving the line. */
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
- !test_bit(FLAG_IS_OUT, &desc->flags))
+ !test_bit(FLAG_IS_OUT, &desc->flags))
__gpio_mockup_set(chip, offset, value);
out:
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index b992321bb852..82fb20dca53a 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -227,8 +227,8 @@ mediatek_gpio_bank_probe(struct device *dev,
ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
- ret = bgpio_init(&rg->chip, dev, 4,
- dat, set, ctrl, diro, NULL, 0);
+ ret = bgpio_init(&rg->chip, dev, 4, dat, set, ctrl, diro, NULL,
+ BGPIOF_NO_SET_ON_INPUT);
if (ret) {
dev_err(dev, "bgpio_init() failed\n");
return ret;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index d2b999c7987f..3c9f4fb3d5a2 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1247,7 +1247,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
* pins.
*/
for (i = 0; i < 4; i++) {
- int irq = platform_get_irq(pdev, i);
+ int irq = platform_get_irq_optional(pdev, i);
if (irq < 0)
continue;
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index c77d474185f3..64278a4756f0 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -485,11 +485,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (err)
goto out_bgio;
- if (of_property_read_bool(np, "gpio-ranges")) {
- port->gc.request = gpiochip_generic_request;
- port->gc.free = gpiochip_generic_free;
- }
-
+ port->gc.request = gpiochip_generic_request;
+ port->gc.free = gpiochip_generic_free;
port->gc.to_irq = mxc_gpio_to_irq;
port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
pdev->id * 32;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 3bd8adaeed9e..b8e2ecc3eade 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1102,23 +1102,13 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
void __iomem *base = bank->base;
- u32 mask, nowake;
+ u32 nowake;
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
- /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
- mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
- mask &= ~bank->context.risingdetect;
- bank->saved_datain |= mask;
-
- /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
- mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
- mask &= ~bank->context.fallingdetect;
- bank->saved_datain &= ~mask;
-
if (!may_lose_context)
goto update_gpio_context_count;
@@ -1237,26 +1227,35 @@ static int gpio_omap_cpu_notifier(struct notifier_block *nb,
{
struct gpio_bank *bank;
unsigned long flags;
+ int ret = NOTIFY_OK;
+ u32 isr, mask;
bank = container_of(nb, struct gpio_bank, nb);
raw_spin_lock_irqsave(&bank->lock, flags);
+ if (bank->is_suspended)
+ goto out_unlock;
+
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
- if (bank->is_suspended)
+ mask = omap_get_gpio_irqbank_mask(bank);
+ isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask;
+ if (isr) {
+ ret = NOTIFY_BAD;
break;
+ }
omap_gpio_idle(bank, true);
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
- if (bank->is_suspended)
- break;
omap_gpio_unidle(bank);
break;
}
+
+out_unlock:
raw_spin_unlock_irqrestore(&bank->lock, flags);
- return NOTIFY_OK;
+ return ret;
}
static const struct omap_gpio_reg_offs omap2_gpio_regs = {
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 5df7782e348f..e241fb884c12 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -298,11 +298,8 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(pl061->base);
raw_spin_lock_init(&pl061->lock);
- if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- pl061->gc.request = gpiochip_generic_request;
- pl061->gc.free = gpiochip_generic_free;
- }
-
+ pl061->gc.request = gpiochip_generic_request;
+ pl061->gc.free = gpiochip_generic_free;
pl061->gc.base = -1;
pl061->gc.get_direction = pl061_get_direction;
pl061->gc.direction_input = pl061_direction_input;
@@ -326,10 +323,8 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
writeb(0, pl061->base + GPIOIE); /* disable irqs */
irq = adev->irq[0];
- if (irq < 0) {
- dev_err(&adev->dev, "invalid IRQ\n");
- return -ENODEV;
- }
+ if (!irq)
+ dev_warn(&adev->dev, "IRQ support disabled\n");
pl061->parent_irq = irq;
girq = &pl061->gc.irq;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 9888b62f37af..1361270ecf8c 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -361,11 +361,8 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
-
- if (pxa_gpio_has_pinctrl()) {
- pchip->chip.request = gpiochip_generic_request;
- pchip->chip.free = gpiochip_generic_free;
- }
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
@@ -652,8 +649,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (!pchip->irqdomain)
return -ENOMEM;
- irq0 = platform_get_irq_byname(pdev, "gpio0");
- irq1 = platform_get_irq_byname(pdev, "gpio1");
+ irq0 = platform_get_irq_byname_optional(pdev, "gpio0");
+ irq1 = platform_get_irq_byname_optional(pdev, "gpio1");
irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
|| (irq_mux <= 0))
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index f800b250971c..7284473c9fe3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -116,7 +116,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
spin_lock_irqsave(&p->lock, flags);
- /* Configure postive or negative logic in POSNEG */
+ /* Configure positive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
/* Configure edge or level trigger in EDGLEVEL */
@@ -228,7 +228,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
spin_lock_irqsave(&p->lock, flags);
- /* Configure postive logic in POSNEG */
+ /* Configure positive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
/* Select "General Input/Output Mode" in IOINTSEL */
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 311f66757b92..26e1fe092304 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -15,7 +15,7 @@ struct gpio_siox_ddata {
u8 setdata[1];
u8 getdata[3];
- spinlock_t irqlock;
+ raw_spinlock_t irqlock;
u32 irq_enable;
u32 irq_status;
u32 irq_type[20];
@@ -44,7 +44,7 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[])
mutex_lock(&ddata->lock);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock_irq(&ddata->irqlock);
for (offset = 0; offset < 12; ++offset) {
unsigned int bitpos = 11 - offset;
@@ -66,7 +66,7 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[])
trigger = ddata->irq_status & ddata->irq_enable;
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock_irq(&ddata->irqlock);
ddata->getdata[0] = buf[0];
ddata->getdata[1] = buf[1];
@@ -84,9 +84,9 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[])
* handler of the irq chip. But it doesn't, so we have
* to clean the irq_status here.
*/
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock_irq(&ddata->irqlock);
ddata->irq_status &= ~(1 << offset);
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock_irq(&ddata->irqlock);
handle_nested_irq(irq);
}
@@ -101,9 +101,9 @@ static void gpio_siox_irq_ack(struct irq_data *d)
struct gpio_siox_ddata *ddata =
container_of(ic, struct gpio_siox_ddata, ichip);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock(&ddata->irqlock);
ddata->irq_status &= ~(1 << d->hwirq);
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock(&ddata->irqlock);
}
static void gpio_siox_irq_mask(struct irq_data *d)
@@ -112,9 +112,9 @@ static void gpio_siox_irq_mask(struct irq_data *d)
struct gpio_siox_ddata *ddata =
container_of(ic, struct gpio_siox_ddata, ichip);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock(&ddata->irqlock);
ddata->irq_enable &= ~(1 << d->hwirq);
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock(&ddata->irqlock);
}
static void gpio_siox_irq_unmask(struct irq_data *d)
@@ -123,9 +123,9 @@ static void gpio_siox_irq_unmask(struct irq_data *d)
struct gpio_siox_ddata *ddata =
container_of(ic, struct gpio_siox_ddata, ichip);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock(&ddata->irqlock);
ddata->irq_enable |= 1 << d->hwirq;
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock(&ddata->irqlock);
}
static int gpio_siox_irq_set_type(struct irq_data *d, u32 type)
@@ -134,9 +134,9 @@ static int gpio_siox_irq_set_type(struct irq_data *d, u32 type)
struct gpio_siox_ddata *ddata =
container_of(ic, struct gpio_siox_ddata, ichip);
- spin_lock_irq(&ddata->irqlock);
+ raw_spin_lock(&ddata->irqlock);
ddata->irq_type[d->hwirq] = type;
- spin_unlock_irq(&ddata->irqlock);
+ raw_spin_unlock(&ddata->irqlock);
return 0;
}
@@ -222,7 +222,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
dev_set_drvdata(dev, ddata);
mutex_init(&ddata->lock);
- spin_lock_init(&ddata->irqlock);
+ raw_spin_lock_init(&ddata->irqlock);
ddata->gchip.base = -1;
ddata->gchip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index de241263d4be..79b553dc39a3 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -58,11 +58,20 @@ struct tegra_gpio_port {
unsigned int pins;
};
+struct tegra186_pin_range {
+ unsigned int offset;
+ const char *group;
+};
+
struct tegra_gpio_soc {
const struct tegra_gpio_port *ports;
unsigned int num_ports;
const char *name;
unsigned int instance;
+
+ const struct tegra186_pin_range *pin_ranges;
+ unsigned int num_pin_ranges;
+ const char *pinmux;
};
struct tegra_gpio {
@@ -254,6 +263,50 @@ static int tegra186_gpio_set_config(struct gpio_chip *chip,
return 0;
}
+static int tegra186_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ struct pinctrl_dev *pctldev;
+ struct device_node *np;
+ unsigned int i, j;
+ int err;
+
+ if (!gpio->soc->pinmux || gpio->soc->num_pin_ranges == 0)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, gpio->soc->pinmux);
+ if (!np)
+ return -ENODEV;
+
+ pctldev = of_pinctrl_get(np);
+ of_node_put(np);
+ if (!pctldev)
+ return -EPROBE_DEFER;
+
+ for (i = 0; i < gpio->soc->num_pin_ranges; i++) {
+ unsigned int pin = gpio->soc->pin_ranges[i].offset, port;
+ const char *group = gpio->soc->pin_ranges[i].group;
+
+ port = pin / 8;
+ pin = pin % 8;
+
+ if (port >= gpio->soc->num_ports) {
+ dev_warn(chip->parent, "invalid port %u for %s\n",
+ port, group);
+ continue;
+ }
+
+ for (j = 0; j < port; j++)
+ pin += gpio->soc->ports[j].pins;
+
+ err = gpiochip_add_pingroup_range(chip, pctldev, pin, group);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec,
u32 *flags)
@@ -578,12 +631,15 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.label = gpio->soc->name;
gpio->gpio.parent = &pdev->dev;
+ gpio->gpio.request = gpiochip_generic_request;
+ gpio->gpio.free = gpiochip_generic_free;
gpio->gpio.get_direction = tegra186_gpio_get_direction;
gpio->gpio.direction_input = tegra186_gpio_direction_input;
gpio->gpio.direction_output = tegra186_gpio_direction_output;
gpio->gpio.get = tegra186_gpio_get,
gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
+ gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
gpio->gpio.base = -1;
@@ -783,11 +839,19 @@ static const struct tegra_gpio_port tegra194_main_ports[] = {
TEGRA194_MAIN_GPIO_PORT(GG, 0, 0, 2)
};
+static const struct tegra186_pin_range tegra194_main_pin_ranges[] = {
+ { TEGRA194_MAIN_GPIO(GG, 0), "pex_l5_clkreq_n_pgg0" },
+ { TEGRA194_MAIN_GPIO(GG, 1), "pex_l5_rst_n_pgg1" },
+};
+
static const struct tegra_gpio_soc tegra194_main_soc = {
.num_ports = ARRAY_SIZE(tegra194_main_ports),
.ports = tegra194_main_ports,
.name = "tegra194-gpio",
.instance = 0,
+ .num_pin_ranges = ARRAY_SIZE(tegra194_main_pin_ranges),
+ .pin_ranges = tegra194_main_pin_ranges,
+ .pinmux = "nvidia,tegra194-pinmux",
};
#define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 7ec97499b7f7..f99f3c10bed0 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -30,7 +30,7 @@ struct uniphier_gpio_priv {
struct irq_domain *domain;
void __iomem *regs;
spinlock_t lock;
- u32 saved_vals[0];
+ u32 saved_vals[];
};
static unsigned int uniphier_gpio_bank_to_reg(unsigned int bank)
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 74913f2e5697..1cbce5990855 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -57,16 +57,19 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
{
struct wcd_gpio_data *data = gpiochip_get_data(chip);
- int value;
+ unsigned int value;
regmap_read(data->map, WCD_REG_VAL_CTL_OFFSET, &value);
- return !!(value && WCD_PIN_MASK(pin));
+ return !!(value & WCD_PIN_MASK(pin));
}
static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
{
- wcd_gpio_direction_output(chip, pin, val);
+ struct wcd_gpio_data *data = gpiochip_get_data(chip);
+
+ regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ WCD_PIN_MASK(pin), val ? WCD_PIN_MASK(pin) : 0);
}
static int wcd_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 98cbaf0e415e..64bfb722756a 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -226,13 +226,11 @@ static int zx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- raw_spin_lock_init(&chip->lock);
- if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- chip->gc.request = gpiochip_generic_request;
- chip->gc.free = gpiochip_generic_free;
- }
-
id = of_alias_get_id(dev->of_node, "gpio");
+
+ raw_spin_lock_init(&chip->lock);
+ chip->gc.request = gpiochip_generic_request;
+ chip->gc.free = gpiochip_generic_free;
chip->gc.direction_input = zx_direction_input;
chip->gc.direction_output = zx_direction_output;
chip->gc.get = zx_get_value;
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 72b6001c56ef..5c91c4365da1 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -478,3 +478,49 @@ void devm_gpio_free(struct device *dev, unsigned int gpio)
&gpio));
}
EXPORT_SYMBOL_GPL(devm_gpio_free);
+
+static void devm_gpio_chip_release(struct device *dev, void *res)
+{
+ struct gpio_chip *gc = *(struct gpio_chip **)res;
+
+ gpiochip_remove(gc);
+}
+
+/**
+ * devm_gpiochip_add_data() - Resource managed gpiochip_add_data()
+ * @dev: pointer to the device that gpio_chip belongs to.
+ * @gc: the GPIO chip to register
+ * @data: driver-private data associated with this chip
+ *
+ * Context: potentially before irqs will work
+ *
+ * The gpio chip automatically be released when the device is unbound.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * gc->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
+ */
+int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc,
+ void *data)
+{
+ struct gpio_chip **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = gpiochip_add_data(gc, data);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = gc;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_gpiochip_add_data);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index c6d30f73df07..ccc449df3792 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -605,6 +605,39 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
}
/**
+ * of_gpiochip_add_hog - Add all hogs in a hog device node
+ * @chip: gpio chip to act on
+ * @hog: device node describing the hogs
+ *
+ * Returns error if it fails otherwise 0 on success.
+ */
+static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog)
+{
+ enum gpiod_flags dflags;
+ struct gpio_desc *desc;
+ unsigned long lflags;
+ const char *name;
+ unsigned int i;
+ int ret;
+
+ for (i = 0;; i++) {
+ desc = of_parse_own_gpio(hog, chip, i, &name, &lflags, &dflags);
+ if (IS_ERR(desc))
+ break;
+
+ ret = gpiod_hog(desc, name, lflags, dflags);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_OF_DYNAMIC
+ desc->hog = hog;
+#endif
+ }
+
+ return 0;
+}
+
+/**
* of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions
* @chip: gpio chip to act on
*
@@ -614,35 +647,109 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*/
static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
{
- struct gpio_desc *desc = NULL;
struct device_node *np;
- const char *name;
- unsigned long lflags;
- enum gpiod_flags dflags;
- unsigned int i;
int ret;
for_each_available_child_of_node(chip->of_node, np) {
if (!of_property_read_bool(np, "gpio-hog"))
continue;
- for (i = 0;; i++) {
- desc = of_parse_own_gpio(np, chip, i, &name, &lflags,
- &dflags);
- if (IS_ERR(desc))
- break;
-
- ret = gpiod_hog(desc, name, lflags, dflags);
- if (ret < 0) {
- of_node_put(np);
- return ret;
- }
+ ret = of_gpiochip_add_hog(chip, np);
+ if (ret < 0) {
+ of_node_put(np);
+ return ret;
}
+
+ of_node_set_flag(np, OF_POPULATED);
}
return 0;
}
+#ifdef CONFIG_OF_DYNAMIC
+/**
+ * of_gpiochip_remove_hog - Remove all hogs in a hog device node
+ * @chip: gpio chip to act on
+ * @hog: device node describing the hogs
+ */
+static void of_gpiochip_remove_hog(struct gpio_chip *chip,
+ struct device_node *hog)
+{
+ struct gpio_desc *descs = chip->gpiodev->descs;
+ unsigned int i;
+
+ for (i = 0; i < chip->ngpio; i++) {
+ if (test_bit(FLAG_IS_HOGGED, &descs[i].flags) &&
+ descs[i].hog == hog)
+ gpiochip_free_own_desc(&descs[i]);
+ }
+}
+
+static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+{
+ return chip->gpiodev->dev.of_node == data;
+}
+
+static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+{
+ return gpiochip_find(np, of_gpiochip_match_node);
+}
+
+static int of_gpio_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct gpio_chip *chip;
+ int ret;
+
+ /*
+ * This only supports adding and removing complete gpio-hog nodes.
+ * Modifying an existing gpio-hog node is not supported (except for
+ * changing its "status" property, which is treated the same as
+ * addition/removal).
+ */
+ switch (of_reconfig_get_state_change(action, arg)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ if (!of_property_read_bool(rd->dn, "gpio-hog"))
+ return NOTIFY_OK; /* not for us */
+
+ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
+ chip = of_find_gpiochip_by_node(rd->dn->parent);
+ if (chip == NULL)
+ return NOTIFY_OK; /* not for us */
+
+ ret = of_gpiochip_add_hog(chip, rd->dn);
+ if (ret < 0) {
+ pr_err("%s: failed to add hogs for %pOF\n", __func__,
+ rd->dn);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
+ return notifier_from_errno(ret);
+ }
+ break;
+
+ case OF_RECONFIG_CHANGE_REMOVE:
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK; /* already depopulated */
+
+ chip = of_find_gpiochip_by_node(rd->dn->parent);
+ if (chip == NULL)
+ return NOTIFY_OK; /* not for us */
+
+ of_gpiochip_remove_hog(chip, rd->dn);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+struct notifier_block gpio_of_notifier = {
+ .notifier_call = of_gpio_notify,
+};
+#endif /* CONFIG_OF_DYNAMIC */
+
/**
* of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 9768831b1fe2..ed26664f1537 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -35,4 +35,6 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
}
#endif /* CONFIG_OF_GPIO */
+extern struct notifier_block gpio_of_notifier;
+
#endif /* GPIOLIB_OF_H */
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 00fb91feba70..40f2d7f69be2 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -81,14 +81,14 @@ LIST_HEAD(gpio_devices);
static DEFINE_MUTEX(gpio_machine_hogs_mutex);
static LIST_HEAD(gpio_machine_hogs);
-static void gpiochip_free_hogs(struct gpio_chip *chip);
-static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+static void gpiochip_free_hogs(struct gpio_chip *gc);
+static int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
-static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip);
-static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
-static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
+static void gpiochip_irqchip_remove(struct gpio_chip *gc);
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gc);
+static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc);
+static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc);
static bool gpiolib_initialized;
@@ -132,23 +132,24 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
/**
* gpiochip_get_desc - get the GPIO descriptor corresponding to the given
* hardware number for this chip
- * @chip: GPIO chip
+ * @gc: GPIO chip
* @hwnum: hardware number of the GPIO for this chip
*
* Returns:
- * A pointer to the GPIO descriptor or %ERR_PTR(-EINVAL) if no GPIO exists
+ * A pointer to the GPIO descriptor or ``ERR_PTR(-EINVAL)`` if no GPIO exists
* in the given chip for the specified hardware number.
*/
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc,
unsigned int hwnum)
{
- struct gpio_device *gdev = chip->gpiodev;
+ struct gpio_device *gdev = gc->gpiodev;
if (hwnum >= gdev->ngpio)
return ERR_PTR(-EINVAL);
return &gdev->descs[hwnum];
}
+EXPORT_SYMBOL_GPL(gpiochip_get_desc);
/**
* desc_to_gpio - convert a GPIO descriptor to the integer namespace
@@ -213,11 +214,11 @@ static int gpiochip_find_base(int ngpio)
*/
int gpiod_get_direction(struct gpio_desc *desc)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
unsigned offset;
int ret;
- chip = gpiod_to_chip(desc);
+ gc = gpiod_to_chip(desc);
offset = gpio_chip_hwgpio(desc);
/*
@@ -228,10 +229,10 @@ int gpiod_get_direction(struct gpio_desc *desc)
test_bit(FLAG_IS_OUT, &desc->flags))
return 0;
- if (!chip->get_direction)
+ if (!gc->get_direction)
return -ENOTSUPP;
- ret = chip->get_direction(chip, offset);
+ ret = gc->get_direction(gc, offset);
if (ret < 0)
return ret;
@@ -301,6 +302,9 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
struct gpio_device *gdev;
unsigned long flags;
+ if (!name)
+ return NULL;
+
spin_lock_irqsave(&gpio_lock, flags);
list_for_each_entry(gdev, &gpio_devices, list) {
@@ -309,7 +313,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
for (i = 0; i != gdev->ngpio; ++i) {
struct gpio_desc *desc = &gdev->descs[i];
- if (!desc->name || !name)
+ if (!desc->name)
continue;
if (!strcmp(desc->name, name)) {
@@ -356,16 +360,16 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
return 0;
}
-static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip)
+static unsigned long *gpiochip_allocate_mask(struct gpio_chip *gc)
{
unsigned long *p;
- p = bitmap_alloc(chip->ngpio, GFP_KERNEL);
+ p = bitmap_alloc(gc->ngpio, GFP_KERNEL);
if (!p)
return NULL;
/* Assume by default all GPIOs are valid */
- bitmap_fill(p, chip->ngpio);
+ bitmap_fill(p, gc->ngpio);
return p;
}
@@ -392,10 +396,10 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
return 0;
}
-static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip)
+static void gpiochip_free_valid_mask(struct gpio_chip *gc)
{
- bitmap_free(gpiochip->valid_mask);
- gpiochip->valid_mask = NULL;
+ bitmap_free(gc->valid_mask);
+ gc->valid_mask = NULL;
}
static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
@@ -406,13 +410,13 @@ static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
return 0;
}
-bool gpiochip_line_is_valid(const struct gpio_chip *gpiochip,
+bool gpiochip_line_is_valid(const struct gpio_chip *gc,
unsigned int offset)
{
/* No mask means all valid */
- if (likely(!gpiochip->valid_mask))
+ if (likely(!gc->valid_mask))
return true;
- return test_bit(offset, gpiochip->valid_mask);
+ return test_bit(offset, gc->valid_mask);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
@@ -546,6 +550,9 @@ static long linehandle_set_config(struct linehandle_state *lh,
if (ret)
return ret;
}
+
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_CONFIG, desc);
}
return 0;
}
@@ -787,8 +794,6 @@ out_free_lh:
* @irq: the interrupt that trigger in response to events on this GPIO
* @wait: wait queue that handles blocking reads of events
* @events: KFIFO for the GPIO events
- * @read_lock: mutex lock to protect reads from colliding with adding
- * new events to the FIFO
* @timestamp: cache for the timestamp storing it between hardirq
* and IRQ thread, used to bring the timestamp close to the actual
* event
@@ -801,7 +806,6 @@ struct lineevent_state {
int irq;
wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpioevent_data, 16);
- struct mutex read_lock;
u64 timestamp;
};
@@ -817,7 +821,7 @@ static __poll_t lineevent_poll(struct file *filep,
poll_wait(filep, &le->wait, wait);
- if (!kfifo_is_empty(&le->events))
+ if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
events = EPOLLIN | EPOLLRDNORM;
return events;
@@ -830,43 +834,52 @@ static ssize_t lineevent_read(struct file *filep,
loff_t *f_ps)
{
struct lineevent_state *le = filep->private_data;
- unsigned int copied;
+ struct gpioevent_data ge;
+ ssize_t bytes_read = 0;
int ret;
- if (count < sizeof(struct gpioevent_data))
+ if (count < sizeof(ge))
return -EINVAL;
do {
+ spin_lock(&le->wait.lock);
if (kfifo_is_empty(&le->events)) {
- if (filep->f_flags & O_NONBLOCK)
+ if (bytes_read) {
+ spin_unlock(&le->wait.lock);
+ return bytes_read;
+ }
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock(&le->wait.lock);
return -EAGAIN;
+ }
- ret = wait_event_interruptible(le->wait,
+ ret = wait_event_interruptible_locked(le->wait,
!kfifo_is_empty(&le->events));
- if (ret)
+ if (ret) {
+ spin_unlock(&le->wait.lock);
return ret;
+ }
}
- if (mutex_lock_interruptible(&le->read_lock))
- return -ERESTARTSYS;
- ret = kfifo_to_user(&le->events, buf, count, &copied);
- mutex_unlock(&le->read_lock);
-
- if (ret)
- return ret;
-
- /*
- * If we couldn't read anything from the fifo (a different
- * thread might have been faster) we either return -EAGAIN if
- * the file descriptor is non-blocking, otherwise we go back to
- * sleep and wait for more data to arrive.
- */
- if (copied == 0 && (filep->f_flags & O_NONBLOCK))
- return -EAGAIN;
+ ret = kfifo_out(&le->events, &ge, 1);
+ spin_unlock(&le->wait.lock);
+ if (ret != 1) {
+ /*
+ * This should never happen - we were holding the lock
+ * from the moment we learned the fifo is no longer
+ * empty until now.
+ */
+ ret = -EIO;
+ break;
+ }
- } while (copied == 0);
+ if (copy_to_user(buf + bytes_read, &ge, sizeof(ge)))
+ return -EFAULT;
+ bytes_read += sizeof(ge);
+ } while (count >= bytes_read + sizeof(ge));
- return copied;
+ return bytes_read;
}
static int lineevent_release(struct inode *inode, struct file *filep)
@@ -945,7 +958,7 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
* we didn't get the timestamp from lineevent_irq_handler().
*/
if (!le->timestamp)
- ge.timestamp = ktime_get_real_ns();
+ ge.timestamp = ktime_get_ns();
else
ge.timestamp = le->timestamp;
@@ -968,9 +981,12 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
return IRQ_NONE;
}
- ret = kfifo_put(&le->events, ge);
+ ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
+ 1, &le->wait.lock);
if (ret)
wake_up_poll(&le->wait, EPOLLIN);
+ else
+ pr_debug_ratelimited("event FIFO is full - event dropped\n");
return IRQ_HANDLED;
}
@@ -983,7 +999,7 @@ static irqreturn_t lineevent_irq_handler(int irq, void *p)
* Just store the timestamp in hardirq context so we get it as
* close in time as possible to the actual event.
*/
- le->timestamp = ktime_get_real_ns();
+ le->timestamp = ktime_get_ns();
return IRQ_WAKE_THREAD;
}
@@ -1083,7 +1099,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
INIT_KFIFO(le->events);
init_waitqueue_head(&le->wait);
- mutex_init(&le->read_lock);
/* Request a thread to read the events */
ret = request_threaded_irq(le->irq,
@@ -1139,17 +1154,82 @@ out_free_le:
return ret;
}
+static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
+ struct gpioline_info *info)
+{
+ struct gpio_chip *gc = desc->gdev->chip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (desc->name) {
+ strncpy(info->name, desc->name, sizeof(info->name));
+ info->name[sizeof(info->name) - 1] = '\0';
+ } else {
+ info->name[0] = '\0';
+ }
+
+ if (desc->label) {
+ strncpy(info->consumer, desc->label, sizeof(info->consumer));
+ info->consumer[sizeof(info->consumer) - 1] = '\0';
+ } else {
+ info->consumer[0] = '\0';
+ }
+
+ /*
+ * Userspace only need to know that the kernel is using this GPIO so
+ * it can't use it.
+ */
+ info->flags = 0;
+ if (test_bit(FLAG_REQUESTED, &desc->flags) ||
+ test_bit(FLAG_IS_HOGGED, &desc->flags) ||
+ test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
+ test_bit(FLAG_EXPORT, &desc->flags) ||
+ test_bit(FLAG_SYSFS, &desc->flags) ||
+ !pinctrl_gpio_can_use_line(gc->base + info->line_offset))
+ info->flags |= GPIOLINE_FLAG_KERNEL;
+ if (test_bit(FLAG_IS_OUT, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_IS_OUT;
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ info->flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+ GPIOLINE_FLAG_IS_OUT);
+ if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ info->flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+ GPIOLINE_FLAG_IS_OUT);
+ if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
+ if (test_bit(FLAG_PULL_DOWN, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
+ if (test_bit(FLAG_PULL_UP, &desc->flags))
+ info->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+}
+
+struct gpio_chardev_data {
+ struct gpio_device *gdev;
+ wait_queue_head_t wait;
+ DECLARE_KFIFO(events, struct gpioline_info_changed, 32);
+ struct notifier_block lineinfo_changed_nb;
+ unsigned long *watched_lines;
+};
+
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct gpio_device *gdev = filp->private_data;
- struct gpio_chip *chip = gdev->chip;
+ struct gpio_chardev_data *priv = filp->private_data;
+ struct gpio_device *gdev = priv->gdev;
+ struct gpio_chip *gc = gdev->chip;
void __user *ip = (void __user *)arg;
+ struct gpio_desc *desc;
+ __u32 offset;
/* We fail any subsequent ioctl():s when the chip is gone */
- if (!chip)
+ if (!gc)
return -ENODEV;
/* Fill in the struct and pass to userspace */
@@ -1168,68 +1248,40 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
return -EFAULT;
return 0;
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
struct gpioline_info lineinfo;
- struct gpio_desc *desc;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- desc = gpiochip_get_desc(chip, lineinfo.line_offset);
+ desc = gpiochip_get_desc(gc, lineinfo.line_offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
- if (desc->name) {
- strncpy(lineinfo.name, desc->name,
- sizeof(lineinfo.name));
- lineinfo.name[sizeof(lineinfo.name)-1] = '\0';
- } else {
- lineinfo.name[0] = '\0';
- }
- if (desc->label) {
- strncpy(lineinfo.consumer, desc->label,
- sizeof(lineinfo.consumer));
- lineinfo.consumer[sizeof(lineinfo.consumer)-1] = '\0';
- } else {
- lineinfo.consumer[0] = '\0';
- }
-
- /*
- * Userspace only need to know that the kernel is using
- * this GPIO so it can't use it.
- */
- lineinfo.flags = 0;
- if (test_bit(FLAG_REQUESTED, &desc->flags) ||
- test_bit(FLAG_IS_HOGGED, &desc->flags) ||
- test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
- test_bit(FLAG_EXPORT, &desc->flags) ||
- test_bit(FLAG_SYSFS, &desc->flags) ||
- !pinctrl_gpio_can_use_line(chip->base + lineinfo.line_offset))
- lineinfo.flags |= GPIOLINE_FLAG_KERNEL;
- if (test_bit(FLAG_IS_OUT, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_IS_OUT;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
- GPIOLINE_FLAG_IS_OUT);
- if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
- GPIOLINE_FLAG_IS_OUT);
- if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_BIAS_DISABLE;
- if (test_bit(FLAG_PULL_DOWN, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
- if (test_bit(FLAG_PULL_UP, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
+ gpio_desc_to_lineinfo(desc, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
+
+ if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL)
+ set_bit(gpio_chip_hwgpio(desc), priv->watched_lines);
+
return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
+ } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
+ if (copy_from_user(&offset, ip, sizeof(offset)))
+ return -EFAULT;
+
+ desc = gpiochip_get_desc(gc, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ clear_bit(gpio_chip_hwgpio(desc), priv->watched_lines);
+ return 0;
}
return -EINVAL;
}
@@ -1242,6 +1294,101 @@ static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
}
#endif
+static struct gpio_chardev_data *
+to_gpio_chardev_data(struct notifier_block *nb)
+{
+ return container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
+}
+
+static int lineinfo_changed_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct gpio_chardev_data *priv = to_gpio_chardev_data(nb);
+ struct gpioline_info_changed chg;
+ struct gpio_desc *desc = data;
+ int ret;
+
+ if (!test_bit(gpio_chip_hwgpio(desc), priv->watched_lines))
+ return NOTIFY_DONE;
+
+ memset(&chg, 0, sizeof(chg));
+ chg.info.line_offset = gpio_chip_hwgpio(desc);
+ chg.event_type = action;
+ chg.timestamp = ktime_get_ns();
+ gpio_desc_to_lineinfo(desc, &chg.info);
+
+ ret = kfifo_in_spinlocked(&priv->events, &chg, 1, &priv->wait.lock);
+ if (ret)
+ wake_up_poll(&priv->wait, EPOLLIN);
+ else
+ pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
+
+ return NOTIFY_OK;
+}
+
+static __poll_t lineinfo_watch_poll(struct file *filep,
+ struct poll_table_struct *pollt)
+{
+ struct gpio_chardev_data *priv = filep->private_data;
+ __poll_t events = 0;
+
+ poll_wait(filep, &priv->wait, pollt);
+
+ if (!kfifo_is_empty_spinlocked_noirqsave(&priv->events,
+ &priv->wait.lock))
+ events = EPOLLIN | EPOLLRDNORM;
+
+ return events;
+}
+
+static ssize_t lineinfo_watch_read(struct file *filep, char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct gpio_chardev_data *priv = filep->private_data;
+ struct gpioline_info_changed event;
+ ssize_t bytes_read = 0;
+ int ret;
+
+ if (count < sizeof(event))
+ return -EINVAL;
+
+ do {
+ spin_lock(&priv->wait.lock);
+ if (kfifo_is_empty(&priv->events)) {
+ if (bytes_read) {
+ spin_unlock(&priv->wait.lock);
+ return bytes_read;
+ }
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock(&priv->wait.lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_locked(priv->wait,
+ !kfifo_is_empty(&priv->events));
+ if (ret) {
+ spin_unlock(&priv->wait.lock);
+ return ret;
+ }
+ }
+
+ ret = kfifo_out(&priv->events, &event, 1);
+ spin_unlock(&priv->wait.lock);
+ if (ret != 1) {
+ ret = -EIO;
+ break;
+ /* We should never get here. See lineevent_read(). */
+ }
+
+ if (copy_to_user(buf + bytes_read, &event, sizeof(event)))
+ return -EFAULT;
+ bytes_read += sizeof(event);
+ } while (count >= bytes_read + sizeof(event));
+
+ return bytes_read;
+}
+
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
@@ -1252,14 +1399,48 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp)
{
struct gpio_device *gdev = container_of(inode->i_cdev,
struct gpio_device, chrdev);
+ struct gpio_chardev_data *priv;
+ int ret = -ENOMEM;
/* Fail on open if the backing gpiochip is gone */
if (!gdev->chip)
return -ENODEV;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
+ if (!priv->watched_lines)
+ goto out_free_priv;
+
+ init_waitqueue_head(&priv->wait);
+ INIT_KFIFO(priv->events);
+ priv->gdev = gdev;
+
+ priv->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
+ ret = atomic_notifier_chain_register(&gdev->notifier,
+ &priv->lineinfo_changed_nb);
+ if (ret)
+ goto out_free_bitmap;
+
get_device(&gdev->dev);
- filp->private_data = gdev;
+ filp->private_data = priv;
- return nonseekable_open(inode, filp);
+ ret = nonseekable_open(inode, filp);
+ if (ret)
+ goto out_unregister_notifier;
+
+ return ret;
+
+out_unregister_notifier:
+ atomic_notifier_chain_unregister(&gdev->notifier,
+ &priv->lineinfo_changed_nb);
+out_free_bitmap:
+ bitmap_free(priv->watched_lines);
+out_free_priv:
+ kfree(priv);
+ return ret;
}
/**
@@ -1270,17 +1451,23 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp)
*/
static int gpio_chrdev_release(struct inode *inode, struct file *filp)
{
- struct gpio_device *gdev = container_of(inode->i_cdev,
- struct gpio_device, chrdev);
+ struct gpio_chardev_data *priv = filp->private_data;
+ struct gpio_device *gdev = priv->gdev;
+ bitmap_free(priv->watched_lines);
+ atomic_notifier_chain_unregister(&gdev->notifier,
+ &priv->lineinfo_changed_nb);
put_device(&gdev->dev);
+ kfree(priv);
+
return 0;
}
-
static const struct file_operations gpio_fileops = {
.release = gpio_chrdev_release,
.open = gpio_chrdev_open,
+ .poll = lineinfo_watch_poll,
+ .read = lineinfo_watch_read,
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = gpio_ioctl,
@@ -1332,12 +1519,12 @@ err_remove_device:
return ret;
}
-static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog)
+static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
{
struct gpio_desc *desc;
int rv;
- desc = gpiochip_get_desc(chip, hog->chip_hwnum);
+ desc = gpiochip_get_desc(gc, hog->chip_hwnum);
if (IS_ERR(desc)) {
pr_err("%s: unable to get GPIO desc: %ld\n",
__func__, PTR_ERR(desc));
@@ -1350,18 +1537,18 @@ static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog)
rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags);
if (rv)
pr_err("%s: unable to hog GPIO line (%s:%u): %d\n",
- __func__, chip->label, hog->chip_hwnum, rv);
+ __func__, gc->label, hog->chip_hwnum, rv);
}
-static void machine_gpiochip_add(struct gpio_chip *chip)
+static void machine_gpiochip_add(struct gpio_chip *gc)
{
struct gpiod_hog *hog;
mutex_lock(&gpio_machine_hogs_mutex);
list_for_each_entry(hog, &gpio_machine_hogs, list) {
- if (!strcmp(chip->label, hog->chip_label))
- gpiochip_machine_hog(chip, hog);
+ if (!strcmp(gc->label, hog->chip_label))
+ gpiochip_machine_hog(gc, hog);
}
mutex_unlock(&gpio_machine_hogs_mutex);
@@ -1380,14 +1567,14 @@ static void gpiochip_setup_devs(void)
}
}
-int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
unsigned long flags;
int ret = 0;
unsigned i;
- int base = chip->base;
+ int base = gc->base;
struct gpio_device *gdev;
/*
@@ -1398,19 +1585,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (!gdev)
return -ENOMEM;
gdev->dev.bus = &gpio_bus_type;
- gdev->chip = chip;
- chip->gpiodev = gdev;
- if (chip->parent) {
- gdev->dev.parent = chip->parent;
- gdev->dev.of_node = chip->parent->of_node;
+ gdev->chip = gc;
+ gc->gpiodev = gdev;
+ if (gc->parent) {
+ gdev->dev.parent = gc->parent;
+ gdev->dev.of_node = gc->parent->of_node;
}
#ifdef CONFIG_OF_GPIO
/* If the gpiochip has an assigned OF node this takes precedence */
- if (chip->of_node)
- gdev->dev.of_node = chip->of_node;
+ if (gc->of_node)
+ gdev->dev.of_node = gc->of_node;
else
- chip->of_node = gdev->dev.of_node;
+ gc->of_node = gdev->dev.of_node;
#endif
gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
@@ -1421,37 +1608,37 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
- if (chip->parent && chip->parent->driver)
- gdev->owner = chip->parent->driver->owner;
- else if (chip->owner)
+ if (gc->parent && gc->parent->driver)
+ gdev->owner = gc->parent->driver->owner;
+ else if (gc->owner)
/* TODO: remove chip->owner */
- gdev->owner = chip->owner;
+ gdev->owner = gc->owner;
else
gdev->owner = THIS_MODULE;
- gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
+ gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
ret = -ENOMEM;
goto err_free_ida;
}
- if (chip->ngpio == 0) {
- chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
+ if (gc->ngpio == 0) {
+ chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
ret = -EINVAL;
goto err_free_descs;
}
- if (chip->ngpio > FASTPATH_NGPIO)
- chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n",
- chip->ngpio, FASTPATH_NGPIO);
+ if (gc->ngpio > FASTPATH_NGPIO)
+ chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
+ gc->ngpio, FASTPATH_NGPIO);
- gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
+ gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
ret = -ENOMEM;
goto err_free_descs;
}
- gdev->ngpio = chip->ngpio;
+ gdev->ngpio = gc->ngpio;
gdev->data = data;
spin_lock_irqsave(&gpio_lock, flags);
@@ -1464,7 +1651,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
* of the sysfs interface anyways.
*/
if (base < 0) {
- base = gpiochip_find_base(chip->ngpio);
+ base = gpiochip_find_base(gc->ngpio);
if (base < 0) {
ret = base;
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -1476,7 +1663,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
* see if anyone makes use of this, else drop this and assign
* a poison instead.
*/
- chip->base = base;
+ gc->base = base;
}
gdev->base = base;
@@ -1486,60 +1673,62 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_label;
}
- for (i = 0; i < chip->ngpio; i++)
+ for (i = 0; i < gc->ngpio; i++)
gdev->descs[i].gdev = gdev;
spin_unlock_irqrestore(&gpio_lock, flags);
+ ATOMIC_INIT_NOTIFIER_HEAD(&gdev->notifier);
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
- ret = gpiochip_set_desc_names(chip);
+ ret = gpiochip_set_desc_names(gc);
if (ret)
goto err_remove_from_list;
- ret = gpiochip_alloc_valid_mask(chip);
+ ret = gpiochip_alloc_valid_mask(gc);
if (ret)
goto err_remove_from_list;
- ret = of_gpiochip_add(chip);
+ ret = of_gpiochip_add(gc);
if (ret)
goto err_free_gpiochip_mask;
- ret = gpiochip_init_valid_mask(chip);
+ ret = gpiochip_init_valid_mask(gc);
if (ret)
goto err_remove_of_chip;
- for (i = 0; i < chip->ngpio; i++) {
+ for (i = 0; i < gc->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
- if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
+ if (gc->get_direction && gpiochip_line_is_valid(gc, i)) {
assign_bit(FLAG_IS_OUT,
- &desc->flags, !chip->get_direction(chip, i));
+ &desc->flags, !gc->get_direction(gc, i));
} else {
assign_bit(FLAG_IS_OUT,
- &desc->flags, !chip->direction_input);
+ &desc->flags, !gc->direction_input);
}
}
- ret = gpiochip_add_pin_ranges(chip);
+ ret = gpiochip_add_pin_ranges(gc);
if (ret)
goto err_remove_of_chip;
- acpi_gpiochip_add(chip);
+ acpi_gpiochip_add(gc);
- machine_gpiochip_add(chip);
+ machine_gpiochip_add(gc);
- ret = gpiochip_irqchip_init_valid_mask(chip);
+ ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
goto err_remove_acpi_chip;
- ret = gpiochip_irqchip_init_hw(chip);
+ ret = gpiochip_irqchip_init_hw(gc);
if (ret)
goto err_remove_acpi_chip;
- ret = gpiochip_add_irqchip(chip, lock_key, request_key);
+ ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
goto err_remove_irqchip_mask;
@@ -1559,17 +1748,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
return 0;
err_remove_irqchip:
- gpiochip_irqchip_remove(chip);
+ gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
- gpiochip_irqchip_free_valid_mask(chip);
+ gpiochip_irqchip_free_valid_mask(gc);
err_remove_acpi_chip:
- acpi_gpiochip_remove(chip);
+ acpi_gpiochip_remove(gc);
err_remove_of_chip:
- gpiochip_free_hogs(chip);
- of_gpiochip_remove(chip);
+ gpiochip_free_hogs(gc);
+ of_gpiochip_remove(gc);
err_free_gpiochip_mask:
- gpiochip_remove_pin_ranges(chip);
- gpiochip_free_valid_mask(chip);
+ gpiochip_remove_pin_ranges(gc);
+ gpiochip_free_valid_mask(gc);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
@@ -1584,7 +1773,7 @@ err_free_gdev:
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
gdev->base, gdev->base + gdev->ngpio - 1,
- chip->label ? : "generic", ret);
+ gc->label ? : "generic", ret);
kfree(gdev);
return ret;
}
@@ -1592,41 +1781,39 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
/**
* gpiochip_get_data() - get per-subdriver data for the chip
- * @chip: GPIO chip
+ * @gc: GPIO chip
*
* Returns:
* The per-subdriver data for the chip.
*/
-void *gpiochip_get_data(struct gpio_chip *chip)
+void *gpiochip_get_data(struct gpio_chip *gc)
{
- return chip->gpiodev->data;
+ return gc->gpiodev->data;
}
EXPORT_SYMBOL_GPL(gpiochip_get_data);
/**
* gpiochip_remove() - unregister a gpio_chip
- * @chip: the chip to unregister
+ * @gc: the chip to unregister
*
* A gpio_chip with any GPIOs still requested may not be removed.
*/
-void gpiochip_remove(struct gpio_chip *chip)
+void gpiochip_remove(struct gpio_chip *gc)
{
- struct gpio_device *gdev = chip->gpiodev;
- struct gpio_desc *desc;
+ struct gpio_device *gdev = gc->gpiodev;
unsigned long flags;
- unsigned i;
- bool requested = false;
+ unsigned int i;
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
- gpiochip_free_hogs(chip);
+ gpiochip_free_hogs(gc);
/* Numb the device, cancelling all outstanding operations */
gdev->chip = NULL;
- gpiochip_irqchip_remove(chip);
- acpi_gpiochip_remove(chip);
- of_gpiochip_remove(chip);
- gpiochip_remove_pin_ranges(chip);
- gpiochip_free_valid_mask(chip);
+ gpiochip_irqchip_remove(gc);
+ acpi_gpiochip_remove(gc);
+ of_gpiochip_remove(gc);
+ gpiochip_remove_pin_ranges(gc);
+ gpiochip_free_valid_mask(gc);
/*
* We accept no more calls into the driver from this point, so
* NULL the driver data pointer
@@ -1635,13 +1822,12 @@ void gpiochip_remove(struct gpio_chip *chip)
spin_lock_irqsave(&gpio_lock, flags);
for (i = 0; i < gdev->ngpio; i++) {
- desc = &gdev->descs[i];
- if (test_bit(FLAG_REQUESTED, &desc->flags))
- requested = true;
+ if (gpiochip_is_requested(gc, i))
+ break;
}
spin_unlock_irqrestore(&gpio_lock, flags);
- if (requested)
+ if (i != gdev->ngpio)
dev_crit(&gdev->dev,
"REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
@@ -1656,52 +1842,6 @@ void gpiochip_remove(struct gpio_chip *chip)
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
-static void devm_gpio_chip_release(struct device *dev, void *res)
-{
- struct gpio_chip *chip = *(struct gpio_chip **)res;
-
- gpiochip_remove(chip);
-}
-
-/**
- * devm_gpiochip_add_data() - Resource managed gpiochip_add_data()
- * @dev: pointer to the device that gpio_chip belongs to.
- * @chip: the chip to register, with chip->base initialized
- * @data: driver-private data associated with this chip
- *
- * Context: potentially before irqs will work
- *
- * The gpio chip automatically be released when the device is unbound.
- *
- * Returns:
- * A negative errno if the chip can't be registered, such as because the
- * chip->base is invalid or already associated with a different chip.
- * Otherwise it returns zero as a success code.
- */
-int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
- void *data)
-{
- struct gpio_chip **ptr;
- int ret;
-
- ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = gpiochip_add_data(chip, data);
- if (ret < 0) {
- devres_free(ptr);
- return ret;
- }
-
- *ptr = chip;
- devres_add(dev, ptr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_gpiochip_add_data);
-
/**
* gpiochip_find() - iterator for locating a specific gpio_chip
* @data: data to pass to match function
@@ -1714,31 +1854,31 @@ EXPORT_SYMBOL_GPL(devm_gpiochip_add_data);
* more gpio_chips.
*/
struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *chip,
+ int (*match)(struct gpio_chip *gc,
void *data))
{
struct gpio_device *gdev;
- struct gpio_chip *chip = NULL;
+ struct gpio_chip *gc = NULL;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
list_for_each_entry(gdev, &gpio_devices, list)
if (gdev->chip && match(gdev->chip, data)) {
- chip = gdev->chip;
+ gc = gdev->chip;
break;
}
spin_unlock_irqrestore(&gpio_lock, flags);
- return chip;
+ return gc;
}
EXPORT_SYMBOL_GPL(gpiochip_find);
-static int gpiochip_match_name(struct gpio_chip *chip, void *data)
+static int gpiochip_match_name(struct gpio_chip *gc, void *data)
{
const char *name = data;
- return !strcmp(chip->label, name);
+ return !strcmp(gc->label, name);
}
static struct gpio_chip *find_chip_by_name(const char *name)
@@ -1778,21 +1918,21 @@ static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
return 0;
}
-static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
+static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
{
- bitmap_free(gpiochip->irq.valid_mask);
- gpiochip->irq.valid_mask = NULL;
+ bitmap_free(gc->irq.valid_mask);
+ gc->irq.valid_mask = NULL;
}
-bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
unsigned int offset)
{
- if (!gpiochip_line_is_valid(gpiochip, offset))
+ if (!gpiochip_line_is_valid(gc, offset))
return false;
/* No mask means all valid */
- if (likely(!gpiochip->irq.valid_mask))
+ if (likely(!gc->irq.valid_mask))
return true;
- return test_bit(offset, gpiochip->irq.valid_mask);
+ return test_bit(offset, gc->irq.valid_mask);
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
@@ -1844,16 +1984,16 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
/**
* gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
- * @gpiochip: the gpiochip to set the irqchip nested handler to
+ * @gc: the gpiochip to set the irqchip nested handler to
* @irqchip: the irqchip to nest to the gpiochip
* @parent_irq: the irq number corresponding to the parent IRQ for this
* nested irqchip
*/
-void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
+void gpiochip_set_nested_irqchip(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int parent_irq)
{
- gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, NULL);
+ gpiochip_set_cascaded_irqchip(gc, parent_irq, NULL);
}
EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
@@ -2030,7 +2170,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
return ret;
}
-static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *chip,
+static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *gc,
unsigned int offset)
{
return offset;
@@ -2090,7 +2230,7 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
return !!gc->irq.parent_domain;
}
-void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type)
{
@@ -2100,7 +2240,7 @@ void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
if (!fwspec)
return NULL;
- fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->fwnode = gc->irq.parent_domain->fwnode;
fwspec->param_count = 2;
fwspec->param[0] = parent_hwirq;
fwspec->param[1] = parent_type;
@@ -2109,7 +2249,7 @@ void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
}
EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
-void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type)
{
@@ -2119,7 +2259,7 @@ void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
if (!fwspec)
return NULL;
- fwspec->fwnode = chip->irq.parent_domain->fwnode;
+ fwspec->fwnode = gc->irq.parent_domain->fwnode;
fwspec->param_count = 4;
fwspec->param[0] = 0;
fwspec->param[1] = parent_hwirq;
@@ -2157,28 +2297,28 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- struct gpio_chip *chip = d->host_data;
+ struct gpio_chip *gc = d->host_data;
int ret = 0;
- if (!gpiochip_irqchip_irq_valid(chip, hwirq))
+ if (!gpiochip_irqchip_irq_valid(gc, hwirq))
return -ENXIO;
- irq_set_chip_data(irq, chip);
+ irq_set_chip_data(irq, gc);
/*
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
- irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
- irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
+ irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
+ irq_set_chip_and_handler(irq, gc->irq.chip, gc->irq.handler);
/* Chips that use nested thread handlers have them marked */
- if (chip->irq.threaded)
+ if (gc->irq.threaded)
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
- if (chip->irq.num_parents == 1)
- ret = irq_set_parent(irq, chip->irq.parents[0]);
- else if (chip->irq.map)
- ret = irq_set_parent(irq, chip->irq.map[hwirq]);
+ if (gc->irq.num_parents == 1)
+ ret = irq_set_parent(irq, gc->irq.parents[0]);
+ else if (gc->irq.map)
+ ret = irq_set_parent(irq, gc->irq.map[hwirq]);
if (ret < 0)
return ret;
@@ -2187,8 +2327,8 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
* No set-up of the hardware will happen if IRQ_TYPE_NONE
* is passed as default type.
*/
- if (chip->irq.default_type != IRQ_TYPE_NONE)
- irq_set_irq_type(irq, chip->irq.default_type);
+ if (gc->irq.default_type != IRQ_TYPE_NONE)
+ irq_set_irq_type(irq, gc->irq.default_type);
return 0;
}
@@ -2196,9 +2336,9 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_map);
void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
- struct gpio_chip *chip = d->host_data;
+ struct gpio_chip *gc = d->host_data;
- if (chip->irq.threaded)
+ if (gc->irq.threaded)
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
@@ -2230,9 +2370,9 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
int gpiochip_irq_domain_activate(struct irq_domain *domain,
struct irq_data *data, bool reserve)
{
- struct gpio_chip *chip = domain->host_data;
+ struct gpio_chip *gc = domain->host_data;
- return gpiochip_lock_as_irq(chip, data->hwirq);
+ return gpiochip_lock_as_irq(gc, data->hwirq);
}
EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate);
@@ -2248,17 +2388,17 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate);
void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *data)
{
- struct gpio_chip *chip = domain->host_data;
+ struct gpio_chip *gc = domain->host_data;
- return gpiochip_unlock_as_irq(chip, data->hwirq);
+ return gpiochip_unlock_as_irq(gc, data->hwirq);
}
EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate);
-static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
+static int gpiochip_to_irq(struct gpio_chip *gc, unsigned offset)
{
- struct irq_domain *domain = chip->irq.domain;
+ struct irq_domain *domain = gc->irq.domain;
- if (!gpiochip_irqchip_irq_valid(chip, offset))
+ if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -2267,7 +2407,7 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
spec.fwnode = domain->fwnode;
spec.param_count = 2;
- spec.param[0] = chip->irq.child_offset_to_irq(chip, offset);
+ spec.param[0] = gc->irq.child_offset_to_irq(gc, offset);
spec.param[1] = IRQ_TYPE_NONE;
return irq_create_fwspec_mapping(&spec);
@@ -2279,32 +2419,32 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
static int gpiochip_irq_reqres(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- return gpiochip_reqres_irq(chip, d->hwirq);
+ return gpiochip_reqres_irq(gc, d->hwirq);
}
static void gpiochip_irq_relres(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- gpiochip_relres_irq(chip, d->hwirq);
+ gpiochip_relres_irq(gc, d->hwirq);
}
static void gpiochip_irq_enable(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- gpiochip_enable_irq(chip, d->hwirq);
- if (chip->irq.irq_enable)
- chip->irq.irq_enable(d);
+ gpiochip_enable_irq(gc, d->hwirq);
+ if (gc->irq.irq_enable)
+ gc->irq.irq_enable(d);
else
- chip->irq.chip->irq_unmask(d);
+ gc->irq.chip->irq_unmask(d);
}
static void gpiochip_irq_disable(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
/*
* Since we override .irq_disable() we need to mimic the
@@ -2313,23 +2453,23 @@ static void gpiochip_irq_disable(struct irq_data *d)
* behaviour of mask_irq() which calls .irq_mask() if
* it exists.
*/
- if (chip->irq.irq_disable)
- chip->irq.irq_disable(d);
- else if (chip->irq.chip->irq_mask)
- chip->irq.chip->irq_mask(d);
- gpiochip_disable_irq(chip, d->hwirq);
+ if (gc->irq.irq_disable)
+ gc->irq.irq_disable(d);
+ else if (gc->irq.chip->irq_mask)
+ gc->irq.chip->irq_mask(d);
+ gpiochip_disable_irq(gc, d->hwirq);
}
-static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip)
+static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
{
- struct irq_chip *irqchip = gpiochip->irq.chip;
+ struct irq_chip *irqchip = gc->irq.chip;
if (!irqchip->irq_request_resources &&
!irqchip->irq_release_resources) {
irqchip->irq_request_resources = gpiochip_irq_reqres;
irqchip->irq_release_resources = gpiochip_irq_relres;
}
- if (WARN_ON(gpiochip->irq.irq_enable))
+ if (WARN_ON(gc->irq.irq_enable))
return;
/* Check if the irqchip already has this hook... */
if (irqchip->irq_enable == gpiochip_irq_enable) {
@@ -2337,27 +2477,27 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip)
* ...and if so, give a gentle warning that this is bad
* practice.
*/
- chip_info(gpiochip,
+ chip_info(gc,
"detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
return;
}
- gpiochip->irq.irq_enable = irqchip->irq_enable;
- gpiochip->irq.irq_disable = irqchip->irq_disable;
+ gc->irq.irq_enable = irqchip->irq_enable;
+ gc->irq.irq_disable = irqchip->irq_disable;
irqchip->irq_enable = gpiochip_irq_enable;
irqchip->irq_disable = gpiochip_irq_disable;
}
/**
* gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
- * @gpiochip: the GPIO chip to add the IRQ chip to
+ * @gc: the GPIO chip to add the IRQ chip to
* @lock_key: lockdep class for IRQ lock
* @request_key: lockdep class for IRQ request
*/
-static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+static int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
- struct irq_chip *irqchip = gpiochip->irq.chip;
+ struct irq_chip *irqchip = gc->irq.chip;
const struct irq_domain_ops *ops = NULL;
struct device_node *np;
unsigned int type;
@@ -2366,13 +2506,13 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
if (!irqchip)
return 0;
- if (gpiochip->irq.parent_handler && gpiochip->can_sleep) {
- chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n");
+ if (gc->irq.parent_handler && gc->can_sleep) {
+ chip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n");
return -EINVAL;
}
- np = gpiochip->gpiodev->dev.of_node;
- type = gpiochip->irq.default_type;
+ np = gc->gpiodev->dev.of_node;
+ type = gc->irq.default_type;
/*
* Specifying a default trigger is a terrible idea if DT or ACPI is
@@ -2383,74 +2523,74 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
"%s: Ignoring %u default trigger\n", np->full_name, type))
type = IRQ_TYPE_NONE;
- if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
- acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
+ if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) {
+ acpi_handle_warn(ACPI_HANDLE(gc->parent),
"Ignoring %u default trigger\n", type);
type = IRQ_TYPE_NONE;
}
- gpiochip->to_irq = gpiochip_to_irq;
- gpiochip->irq.default_type = type;
- gpiochip->irq.lock_key = lock_key;
- gpiochip->irq.request_key = request_key;
+ gc->to_irq = gpiochip_to_irq;
+ gc->irq.default_type = type;
+ gc->irq.lock_key = lock_key;
+ gc->irq.request_key = request_key;
/* If a parent irqdomain is provided, let's build a hierarchy */
- if (gpiochip_hierarchy_is_hierarchical(gpiochip)) {
- int ret = gpiochip_hierarchy_add_domain(gpiochip);
+ if (gpiochip_hierarchy_is_hierarchical(gc)) {
+ int ret = gpiochip_hierarchy_add_domain(gc);
if (ret)
return ret;
} else {
/* Some drivers provide custom irqdomain ops */
- if (gpiochip->irq.domain_ops)
- ops = gpiochip->irq.domain_ops;
+ if (gc->irq.domain_ops)
+ ops = gc->irq.domain_ops;
if (!ops)
ops = &gpiochip_domain_ops;
- gpiochip->irq.domain = irq_domain_add_simple(np,
- gpiochip->ngpio,
- gpiochip->irq.first,
- ops, gpiochip);
- if (!gpiochip->irq.domain)
+ gc->irq.domain = irq_domain_add_simple(np,
+ gc->ngpio,
+ gc->irq.first,
+ ops, gc);
+ if (!gc->irq.domain)
return -EINVAL;
}
- if (gpiochip->irq.parent_handler) {
- void *data = gpiochip->irq.parent_handler_data ?: gpiochip;
+ if (gc->irq.parent_handler) {
+ void *data = gc->irq.parent_handler_data ?: gc;
- for (i = 0; i < gpiochip->irq.num_parents; i++) {
+ for (i = 0; i < gc->irq.num_parents; i++) {
/*
* The parent IRQ chip is already using the chip_data
* for this IRQ chip, so our callbacks simply use the
* handler_data.
*/
- irq_set_chained_handler_and_data(gpiochip->irq.parents[i],
- gpiochip->irq.parent_handler,
+ irq_set_chained_handler_and_data(gc->irq.parents[i],
+ gc->irq.parent_handler,
data);
}
}
- gpiochip_set_irq_hooks(gpiochip);
+ gpiochip_set_irq_hooks(gc);
- acpi_gpiochip_request_interrupts(gpiochip);
+ acpi_gpiochip_request_interrupts(gc);
return 0;
}
/**
* gpiochip_irqchip_remove() - removes an irqchip added to a gpiochip
- * @gpiochip: the gpiochip to remove the irqchip from
+ * @gc: the gpiochip to remove the irqchip from
*
* This is called only from gpiochip_remove()
*/
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
+static void gpiochip_irqchip_remove(struct gpio_chip *gc)
{
- struct irq_chip *irqchip = gpiochip->irq.chip;
+ struct irq_chip *irqchip = gc->irq.chip;
unsigned int offset;
- acpi_gpiochip_free_interrupts(gpiochip);
+ acpi_gpiochip_free_interrupts(gc);
- if (irqchip && gpiochip->irq.parent_handler) {
- struct gpio_irq_chip *irq = &gpiochip->irq;
+ if (irqchip && gc->irq.parent_handler) {
+ struct gpio_irq_chip *irq = &gc->irq;
unsigned int i;
for (i = 0; i < irq->num_parents; i++)
@@ -2459,18 +2599,18 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
/* Remove all IRQ mappings and delete the domain */
- if (gpiochip->irq.domain) {
+ if (gc->irq.domain) {
unsigned int irq;
- for (offset = 0; offset < gpiochip->ngpio; offset++) {
- if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
+ for (offset = 0; offset < gc->ngpio; offset++) {
+ if (!gpiochip_irqchip_irq_valid(gc, offset))
continue;
- irq = irq_find_mapping(gpiochip->irq.domain, offset);
+ irq = irq_find_mapping(gc->irq.domain, offset);
irq_dispose_mapping(irq);
}
- irq_domain_remove(gpiochip->irq.domain);
+ irq_domain_remove(gc->irq.domain);
}
if (irqchip) {
@@ -2479,20 +2619,20 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
irqchip->irq_release_resources = NULL;
}
if (irqchip->irq_enable == gpiochip_irq_enable) {
- irqchip->irq_enable = gpiochip->irq.irq_enable;
- irqchip->irq_disable = gpiochip->irq.irq_disable;
+ irqchip->irq_enable = gc->irq.irq_enable;
+ irqchip->irq_disable = gc->irq.irq_disable;
}
}
- gpiochip->irq.irq_enable = NULL;
- gpiochip->irq.irq_disable = NULL;
- gpiochip->irq.chip = NULL;
+ gc->irq.irq_enable = NULL;
+ gc->irq.irq_disable = NULL;
+ gc->irq.chip = NULL;
- gpiochip_irqchip_free_valid_mask(gpiochip);
+ gpiochip_irqchip_free_valid_mask(gc);
}
/**
* gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
- * @gpiochip: the gpiochip to add the irqchip to
+ * @gc: the gpiochip to add the irqchip to
* @irqchip: the irqchip to add to the gpiochip
* @first_irq: if not dynamically assigned, the base (first) IRQ to
* allocate gpiochip irqs from
@@ -2517,7 +2657,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* the pins on the gpiochip can generate a unique IRQ. Everything else
* need to be open coded.
*/
-int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
@@ -2528,23 +2668,23 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
{
struct device_node *of_node;
- if (!gpiochip || !irqchip)
+ if (!gc || !irqchip)
return -EINVAL;
- if (!gpiochip->parent) {
+ if (!gc->parent) {
pr_err("missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
- gpiochip->irq.threaded = threaded;
- of_node = gpiochip->parent->of_node;
+ gc->irq.threaded = threaded;
+ of_node = gc->parent->of_node;
#ifdef CONFIG_OF_GPIO
/*
* If the gpiochip has an assigned OF node this takes precedence
- * FIXME: get rid of this and use gpiochip->parent->of_node
+ * FIXME: get rid of this and use gc->parent->of_node
* everywhere
*/
- if (gpiochip->of_node)
- of_node = gpiochip->of_node;
+ if (gc->of_node)
+ of_node = gc->of_node;
#endif
/*
* Specifying a default trigger is a terrible idea if DT or ACPI is
@@ -2554,29 +2694,29 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
if (WARN(of_node && type != IRQ_TYPE_NONE,
"%pOF: Ignoring %d default trigger\n", of_node, type))
type = IRQ_TYPE_NONE;
- if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
- acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
+ if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) {
+ acpi_handle_warn(ACPI_HANDLE(gc->parent),
"Ignoring %d default trigger\n", type);
type = IRQ_TYPE_NONE;
}
- gpiochip->irq.chip = irqchip;
- gpiochip->irq.handler = handler;
- gpiochip->irq.default_type = type;
- gpiochip->to_irq = gpiochip_to_irq;
- gpiochip->irq.lock_key = lock_key;
- gpiochip->irq.request_key = request_key;
- gpiochip->irq.domain = irq_domain_add_simple(of_node,
- gpiochip->ngpio, first_irq,
- &gpiochip_domain_ops, gpiochip);
- if (!gpiochip->irq.domain) {
- gpiochip->irq.chip = NULL;
+ gc->irq.chip = irqchip;
+ gc->irq.handler = handler;
+ gc->irq.default_type = type;
+ gc->to_irq = gpiochip_to_irq;
+ gc->irq.lock_key = lock_key;
+ gc->irq.request_key = request_key;
+ gc->irq.domain = irq_domain_add_simple(of_node,
+ gc->ngpio, first_irq,
+ &gpiochip_domain_ops, gc);
+ if (!gc->irq.domain) {
+ gc->irq.chip = NULL;
return -EINVAL;
}
- gpiochip_set_irq_hooks(gpiochip);
+ gpiochip_set_irq_hooks(gc);
- acpi_gpiochip_request_interrupts(gpiochip);
+ acpi_gpiochip_request_interrupts(gc);
return 0;
}
@@ -2584,60 +2724,65 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
-static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+static inline int gpiochip_add_irqchip(struct gpio_chip *gc,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
return 0;
}
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+static void gpiochip_irqchip_remove(struct gpio_chip *gc) {}
-static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip)
+static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gc)
{
return 0;
}
-static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
+static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
{
return 0;
}
-static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
+static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
{ }
#endif /* CONFIG_GPIOLIB_IRQCHIP */
/**
* gpiochip_generic_request() - request the gpio function for a pin
- * @chip: the gpiochip owning the GPIO
+ * @gc: the gpiochip owning the GPIO
* @offset: the offset of the GPIO to request for GPIO function
*/
-int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset)
{
- return pinctrl_gpio_request(chip->gpiodev->base + offset);
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return 0;
+#endif
+
+ return pinctrl_gpio_request(gc->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
/**
* gpiochip_generic_free() - free the gpio function from a pin
- * @chip: the gpiochip to request the gpio function for
+ * @gc: the gpiochip to request the gpio function for
* @offset: the offset of the GPIO to free from GPIO function
*/
-void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset)
{
- pinctrl_gpio_free(chip->gpiodev->base + offset);
+ pinctrl_gpio_free(gc->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
/**
* gpiochip_generic_config() - apply configuration for a pin
- * @chip: the gpiochip owning the GPIO
+ * @gc: the gpiochip owning the GPIO
* @offset: the offset of the GPIO to apply the configuration
* @config: the configuration to be applied
*/
-int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset,
unsigned long config)
{
- return pinctrl_gpio_set_config(chip->gpiodev->base + offset, config);
+ return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_config);
@@ -2645,7 +2790,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config);
/**
* gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping
- * @chip: the gpiochip to add the range for
+ * @gc: the gpiochip to add the range for
* @pctldev: the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_group: name of the pin group inside the pin controller
@@ -2655,24 +2800,24 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config);
* Documentation/devicetree/bindings/gpio/gpio.txt on how to
* bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group)
{
struct gpio_pin_range *pin_range;
- struct gpio_device *gdev = chip->gpiodev;
+ struct gpio_device *gdev = gc->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
if (!pin_range) {
- chip_err(chip, "failed to allocate pin ranges\n");
+ chip_err(gc, "failed to allocate pin ranges\n");
return -ENOMEM;
}
/* Use local offset as range ID */
pin_range->range.id = gpio_offset;
- pin_range->range.gc = chip;
- pin_range->range.name = chip->label;
+ pin_range->range.gc = gc;
+ pin_range->range.name = gc->label;
pin_range->range.base = gdev->base + gpio_offset;
pin_range->pctldev = pctldev;
@@ -2686,7 +2831,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
pinctrl_add_gpio_range(pctldev, &pin_range->range);
- chip_dbg(chip, "created GPIO range %d->%d ==> %s PINGRP %s\n",
+ chip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n",
gpio_offset, gpio_offset + pin_range->range.npins - 1,
pinctrl_dev_get_devname(pctldev), pin_group);
@@ -2698,7 +2843,7 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
/**
* gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
- * @chip: the gpiochip to add the range for
+ * @gc: the gpiochip to add the range for
* @pinctl_name: the dev_name() of the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_offset: the start offset in the pin controller number space
@@ -2713,24 +2858,24 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
* Documentation/devicetree/bindings/gpio/gpio.txt on how to
* bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
struct gpio_pin_range *pin_range;
- struct gpio_device *gdev = chip->gpiodev;
+ struct gpio_device *gdev = gc->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
if (!pin_range) {
- chip_err(chip, "failed to allocate pin ranges\n");
+ chip_err(gc, "failed to allocate pin ranges\n");
return -ENOMEM;
}
/* Use local offset as range ID */
pin_range->range.id = gpio_offset;
- pin_range->range.gc = chip;
- pin_range->range.name = chip->label;
+ pin_range->range.gc = gc;
+ pin_range->range.name = gc->label;
pin_range->range.base = gdev->base + gpio_offset;
pin_range->range.pin_base = pin_offset;
pin_range->range.npins = npins;
@@ -2738,11 +2883,11 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
&pin_range->range);
if (IS_ERR(pin_range->pctldev)) {
ret = PTR_ERR(pin_range->pctldev);
- chip_err(chip, "could not create pin range\n");
+ chip_err(gc, "could not create pin range\n");
kfree(pin_range);
return ret;
}
- chip_dbg(chip, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
+ chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
gpio_offset, gpio_offset + npins - 1,
pinctl_name,
pin_offset, pin_offset + npins - 1);
@@ -2755,12 +2900,12 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pin_range);
/**
* gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings
- * @chip: the chip to remove all the mappings for
+ * @gc: the chip to remove all the mappings for
*/
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+void gpiochip_remove_pin_ranges(struct gpio_chip *gc)
{
struct gpio_pin_range *pin_range, *tmp;
- struct gpio_device *gdev = chip->gpiodev;
+ struct gpio_device *gdev = gc->gpiodev;
list_for_each_entry_safe(pin_range, tmp, &gdev->pin_ranges, node) {
list_del(&pin_range->node);
@@ -2779,7 +2924,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
*/
static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
{
- struct gpio_chip *chip = desc->gdev->chip;
+ struct gpio_chip *gc = desc->gdev->chip;
int ret;
unsigned long flags;
unsigned offset;
@@ -2805,12 +2950,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
goto done;
}
- if (chip->request) {
- /* chip->request may sleep */
+ if (gc->request) {
+ /* gc->request may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
offset = gpio_chip_hwgpio(desc);
- if (gpiochip_line_is_valid(chip, offset))
- ret = chip->request(chip, offset);
+ if (gpiochip_line_is_valid(gc, offset))
+ ret = gc->request(gc, offset);
else
ret = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
@@ -2822,14 +2967,16 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
goto done;
}
}
- if (chip->get_direction) {
- /* chip->get_direction may sleep */
+ if (gc->get_direction) {
+ /* gc->get_direction may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
gpiod_get_direction(desc);
spin_lock_irqsave(&gpio_lock, flags);
}
done:
spin_unlock_irqrestore(&gpio_lock, flags);
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
return ret;
}
@@ -2897,7 +3044,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
{
bool ret = false;
unsigned long flags;
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
might_sleep();
@@ -2905,12 +3052,12 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
spin_lock_irqsave(&gpio_lock, flags);
- chip = desc->gdev->chip;
- if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) {
- if (chip->free) {
+ gc = desc->gdev->chip;
+ if (gc && test_bit(FLAG_REQUESTED, &desc->flags)) {
+ if (gc->free) {
spin_unlock_irqrestore(&gpio_lock, flags);
- might_sleep_if(chip->can_sleep);
- chip->free(chip, gpio_chip_hwgpio(desc));
+ might_sleep_if(gc->can_sleep);
+ gc->free(gc, gpio_chip_hwgpio(desc));
spin_lock_irqsave(&gpio_lock, flags);
}
kfree_const(desc->label);
@@ -2923,10 +3070,16 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
clear_bit(FLAG_PULL_DOWN, &desc->flags);
clear_bit(FLAG_BIAS_DISABLE, &desc->flags);
clear_bit(FLAG_IS_HOGGED, &desc->flags);
+#ifdef CONFIG_OF_DYNAMIC
+ desc->hog = NULL;
+#endif
ret = true;
}
spin_unlock_irqrestore(&gpio_lock, flags);
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_RELEASED, desc);
+
return ret;
}
@@ -2942,7 +3095,7 @@ void gpiod_free(struct gpio_desc *desc)
/**
* gpiochip_is_requested - return string iff signal was requested
- * @chip: controller managing the signal
+ * @gc: controller managing the signal
* @offset: of signal within controller's 0..(ngpio - 1) range
*
* Returns NULL if the GPIO is not currently requested, else a string.
@@ -2953,14 +3106,16 @@ void gpiod_free(struct gpio_desc *desc)
* help with diagnostics, and knowing that the signal is used as a GPIO
* can help avoid accidentally multiplexing it to another controller.
*/
-const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
+const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned offset)
{
struct gpio_desc *desc;
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return NULL;
- desc = &chip->gpiodev->descs[offset];
+ desc = gpiochip_get_desc(gc, offset);
+ if (IS_ERR(desc))
+ return NULL;
if (test_bit(FLAG_REQUESTED, &desc->flags) == 0)
return NULL;
@@ -2970,7 +3125,7 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
/**
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
- * @chip: GPIO chip
+ * @gc: GPIO chip
* @hwnum: hardware number of the GPIO for which to request the descriptor
* @label: label for the GPIO
* @lflags: lookup flags for this GPIO or 0 if default, this can be used to
@@ -2989,17 +3144,17 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error
* code on failure.
*/
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
unsigned int hwnum,
const char *label,
enum gpio_lookup_flags lflags,
enum gpiod_flags dflags)
{
- struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum);
+ struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum);
int ret;
if (IS_ERR(desc)) {
- chip_err(chip, "failed to get GPIO descriptor\n");
+ chip_err(gc, "failed to get GPIO descriptor\n");
return desc;
}
@@ -3009,7 +3164,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret) {
- chip_err(chip, "setup of own GPIO %s failed\n", label);
+ chip_err(gc, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
return ERR_PTR(ret);
}
@@ -3051,9 +3206,9 @@ static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
return gc->set_config(gc, offset, config);
}
-static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
- enum pin_config_param mode)
+static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode)
{
+ struct gpio_chip *gc = desc->gdev->chip;
unsigned long config;
unsigned arg;
@@ -3068,10 +3223,10 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
}
config = PIN_CONF_PACKED(mode, arg);
- return gpio_do_set_config(gc, offset, config);
+ return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
}
-static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
+static int gpio_set_bias(struct gpio_desc *desc)
{
int bias = 0;
int ret = 0;
@@ -3084,7 +3239,7 @@ static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
bias = PIN_CONFIG_BIAS_PULL_DOWN;
if (bias) {
- ret = gpio_set_config(chip, gpio_chip_hwgpio(desc), bias);
+ ret = gpio_set_config(desc, bias);
if (ret != -ENOTSUPP)
return ret;
}
@@ -3102,18 +3257,18 @@ static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
*/
int gpiod_direction_input(struct gpio_desc *desc)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
int ret = 0;
VALIDATE_DESC(desc);
- chip = desc->gdev->chip;
+ gc = desc->gdev->chip;
/*
* It is legal to have no .get() and .direction_input() specified if
* the chip is output-only, but you can't specify .direction_input()
* and not support the .get() operation, that doesn't make sense.
*/
- if (!chip->get && chip->direction_input) {
+ if (!gc->get && gc->direction_input) {
gpiod_warn(desc,
"%s: missing get() but have direction_input()\n",
__func__);
@@ -3126,10 +3281,10 @@ int gpiod_direction_input(struct gpio_desc *desc)
* direction (if .get_direction() is supported) else we silently
* assume we are in input mode after this.
*/
- if (chip->direction_input) {
- ret = chip->direction_input(chip, gpio_chip_hwgpio(desc));
- } else if (chip->get_direction &&
- (chip->get_direction(chip, gpio_chip_hwgpio(desc)) != 1)) {
+ if (gc->direction_input) {
+ ret = gc->direction_input(gc, gpio_chip_hwgpio(desc));
+ } else if (gc->get_direction &&
+ (gc->get_direction(gc, gpio_chip_hwgpio(desc)) != 1)) {
gpiod_warn(desc,
"%s: missing direction_input() operation and line is output\n",
__func__);
@@ -3137,7 +3292,7 @@ int gpiod_direction_input(struct gpio_desc *desc)
}
if (ret == 0) {
clear_bit(FLAG_IS_OUT, &desc->flags);
- ret = gpio_set_bias(chip, desc);
+ ret = gpio_set_bias(desc);
}
trace_gpio_direction(desc_to_gpio(desc), 1, ret);
@@ -3221,7 +3376,6 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- struct gpio_chip *gc;
int ret;
VALIDATE_DESC(desc);
@@ -3239,11 +3393,9 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
return -EIO;
}
- gc = desc->gdev->chip;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
/* First see if we can enable open drain in hardware */
- ret = gpio_set_config(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_OPEN_DRAIN);
+ ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN);
if (!ret)
goto set_output_value;
/* Emulate open drain by not actively driving the line high */
@@ -3253,8 +3405,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
}
}
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
- ret = gpio_set_config(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_OPEN_SOURCE);
+ ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
/* Emulate open source by not actively driving the line low */
@@ -3263,12 +3414,11 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
goto set_output_flag;
}
} else {
- gpio_set_config(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_PUSH_PULL);
+ gpio_set_config(desc, PIN_CONFIG_DRIVE_PUSH_PULL);
}
set_output_value:
- ret = gpio_set_bias(gc, desc);
+ ret = gpio_set_bias(desc);
if (ret)
return ret;
return gpiod_direction_output_raw_commit(desc, value);
@@ -3287,6 +3437,26 @@ set_output_flag:
EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
+ * gpiod_set_config - sets @config for a GPIO
+ * @desc: descriptor of the GPIO for which to set the configuration
+ * @config: Same packed config format as generic pinconf
+ *
+ * Returns:
+ * 0 on success, %-ENOTSUPP if the controller doesn't support setting the
+ * configuration.
+ */
+int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
+{
+ struct gpio_chip *gc;
+
+ VALIDATE_DESC(desc);
+ gc = desc->gdev->chip;
+
+ return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
+}
+EXPORT_SYMBOL_GPL(gpiod_set_config);
+
+/**
* gpiod_set_debounce - sets @debounce time for a GPIO
* @desc: descriptor of the GPIO for which to set debounce time
* @debounce: debounce time in microseconds
@@ -3297,14 +3467,10 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output);
*/
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
- struct gpio_chip *chip;
- unsigned long config;
-
- VALIDATE_DESC(desc);
- chip = desc->gdev->chip;
+ unsigned long config;
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config);
+ return gpiod_set_config(desc, config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -3318,7 +3484,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_debounce);
*/
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
unsigned long packed;
int gpio;
int rc;
@@ -3331,14 +3497,14 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
- chip = desc->gdev->chip;
- if (!chip->set_config)
+ gc = desc->gdev->chip;
+ if (!gc->set_config)
return 0;
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = gpio_do_set_config(chip, gpio, packed);
+ rc = gpio_do_set_config(gc, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
@@ -3397,28 +3563,28 @@ EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
int offset;
int value;
- chip = desc->gdev->chip;
+ gc = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
- value = chip->get ? chip->get(chip, offset) : -EIO;
+ value = gc->get ? gc->get(gc, offset) : -EIO;
value = value < 0 ? value : !!value;
trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
-static int gpio_chip_get_multiple(struct gpio_chip *chip,
+static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
- if (chip->get_multiple) {
- return chip->get_multiple(chip, mask, bits);
- } else if (chip->get) {
+ if (gc->get_multiple) {
+ return gc->get_multiple(gc, mask, bits);
+ } else if (gc->get) {
int i, value;
- for_each_set_bit(i, mask, chip->ngpio) {
- value = chip->get(chip, i);
+ for_each_set_bit(i, mask, gc->ngpio) {
+ value = gc->get(gc, i);
if (value < 0)
return value;
__assign_bit(i, bits, value);
@@ -3466,26 +3632,26 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
}
while (i < array_size) {
- struct gpio_chip *chip = desc_array[i]->gdev->chip;
+ struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
int first, j, ret;
- if (likely(chip->ngpio <= FASTPATH_NGPIO)) {
+ if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
} else {
- mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio),
+ mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio),
sizeof(*mask),
can_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!mask)
return -ENOMEM;
}
- bits = mask + BITS_TO_LONGS(chip->ngpio);
- bitmap_zero(mask, chip->ngpio);
+ bits = mask + BITS_TO_LONGS(gc->ngpio);
+ bitmap_zero(mask, gc->ngpio);
if (!can_sleep)
- WARN_ON(chip->can_sleep);
+ WARN_ON(gc->can_sleep);
/* collect all inputs belonging to the same chip */
first = i;
@@ -3500,9 +3666,9 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
i = find_next_zero_bit(array_info->get_mask,
array_size, i);
} while ((i < array_size) &&
- (desc_array[i]->gdev->chip == chip));
+ (desc_array[i]->gdev->chip == gc));
- ret = gpio_chip_get_multiple(chip, mask, bits);
+ ret = gpio_chip_get_multiple(gc, mask, bits);
if (ret) {
if (mask != fastpath)
kfree(mask);
@@ -3640,13 +3806,13 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0;
- struct gpio_chip *chip = desc->gdev->chip;
+ struct gpio_chip *gc = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
- ret = chip->direction_input(chip, offset);
+ ret = gc->direction_input(gc, offset);
} else {
- ret = chip->direction_output(chip, offset, 0);
+ ret = gc->direction_output(gc, offset, 0);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
}
@@ -3665,15 +3831,15 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0;
- struct gpio_chip *chip = desc->gdev->chip;
+ struct gpio_chip *gc = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
- ret = chip->direction_output(chip, offset, 1);
+ ret = gc->direction_output(gc, offset, 1);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
} else {
- ret = chip->direction_input(chip, offset);
+ ret = gc->direction_input(gc, offset);
}
trace_gpio_direction(desc_to_gpio(desc), !value, ret);
if (ret < 0)
@@ -3684,33 +3850,34 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
- chip = desc->gdev->chip;
+ gc = desc->gdev->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- chip->set(chip, gpio_chip_hwgpio(desc), value);
+ gc->set(gc, gpio_chip_hwgpio(desc), value);
}
/*
* set multiple outputs on the same chip;
* use the chip's set_multiple function if available;
* otherwise set the outputs sequentially;
+ * @chip: the GPIO chip we operate on
* @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
* defines which outputs are to be changed
* @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
* defines the values the outputs specified by mask are to be set to
*/
-static void gpio_chip_set_multiple(struct gpio_chip *chip,
+static void gpio_chip_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
- if (chip->set_multiple) {
- chip->set_multiple(chip, mask, bits);
+ if (gc->set_multiple) {
+ gc->set_multiple(gc, mask, bits);
} else {
unsigned int i;
/* set outputs if the corresponding mask bit is set */
- for_each_set_bit(i, mask, chip->ngpio)
- chip->set(chip, i, test_bit(i, bits));
+ for_each_set_bit(i, mask, gc->ngpio)
+ gc->set(gc, i, test_bit(i, bits));
}
}
@@ -3749,26 +3916,26 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
}
while (i < array_size) {
- struct gpio_chip *chip = desc_array[i]->gdev->chip;
+ struct gpio_chip *gc = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
unsigned long *mask, *bits;
int count = 0;
- if (likely(chip->ngpio <= FASTPATH_NGPIO)) {
+ if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
mask = fastpath;
} else {
- mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio),
+ mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio),
sizeof(*mask),
can_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!mask)
return -ENOMEM;
}
- bits = mask + BITS_TO_LONGS(chip->ngpio);
- bitmap_zero(mask, chip->ngpio);
+ bits = mask + BITS_TO_LONGS(gc->ngpio);
+ bitmap_zero(mask, gc->ngpio);
if (!can_sleep)
- WARN_ON(chip->can_sleep);
+ WARN_ON(gc->can_sleep);
do {
struct gpio_desc *desc = desc_array[i];
@@ -3804,10 +3971,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
i = find_next_zero_bit(array_info->set_mask,
array_size, i);
} while ((i < array_size) &&
- (desc_array[i]->gdev->chip == chip));
+ (desc_array[i]->gdev->chip == gc));
/* push collected bits to outputs */
if (count != 0)
- gpio_chip_set_multiple(chip, mask, bits);
+ gpio_chip_set_multiple(gc, mask, bits);
if (mask != fastpath)
kfree(mask);
@@ -3969,7 +4136,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
*/
int gpiod_to_irq(const struct gpio_desc *desc)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
int offset;
/*
@@ -3980,10 +4147,10 @@ int gpiod_to_irq(const struct gpio_desc *desc)
if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
return -EINVAL;
- chip = desc->gdev->chip;
+ gc = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
- if (chip->to_irq) {
- int retirq = chip->to_irq(chip, offset);
+ if (gc->to_irq) {
+ int retirq = gc->to_irq(gc, offset);
/* Zero means NO_IRQ */
if (!retirq)
@@ -3997,17 +4164,17 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
/**
* gpiochip_lock_as_irq() - lock a GPIO to be used as IRQ
- * @chip: the chip the GPIO to lock belongs to
+ * @gc: the chip the GPIO to lock belongs to
* @offset: the offset of the GPIO to lock as IRQ
*
* This is used directly by GPIO drivers that want to lock down
* a certain GPIO line to be used for IRQs.
*/
-int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_desc *desc;
- desc = gpiochip_get_desc(chip, offset);
+ desc = gpiochip_get_desc(gc, offset);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -4015,18 +4182,18 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
* If it's fast: flush the direction setting if something changed
* behind our back
*/
- if (!chip->can_sleep && chip->get_direction) {
+ if (!gc->can_sleep && gc->get_direction) {
int dir = gpiod_get_direction(desc);
if (dir < 0) {
- chip_err(chip, "%s: cannot get GPIO direction\n",
+ chip_err(gc, "%s: cannot get GPIO direction\n",
__func__);
return dir;
}
}
if (test_bit(FLAG_IS_OUT, &desc->flags)) {
- chip_err(chip,
+ chip_err(gc,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
@@ -4049,17 +4216,17 @@ EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
/**
* gpiochip_unlock_as_irq() - unlock a GPIO used as IRQ
- * @chip: the chip the GPIO to lock belongs to
+ * @gc: the chip the GPIO to lock belongs to
* @offset: the offset of the GPIO to lock as IRQ
*
* This is used directly by GPIO drivers that want to indicate
* that a certain GPIO is no longer used exclusively for IRQ.
*/
-void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_desc *desc;
- desc = gpiochip_get_desc(chip, offset);
+ desc = gpiochip_get_desc(gc, offset);
if (IS_ERR(desc))
return;
@@ -4072,9 +4239,9 @@ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
-void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset)
{
- struct gpio_desc *desc = gpiochip_get_desc(chip, offset);
+ struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
!WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags)))
@@ -4082,9 +4249,9 @@ void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_disable_irq);
-void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
{
- struct gpio_desc *desc = gpiochip_get_desc(chip, offset);
+ struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
!WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
@@ -4094,63 +4261,63 @@ void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_enable_irq);
-bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset)
+bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
-int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset)
+int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset)
{
int ret;
- if (!try_module_get(chip->gpiodev->owner))
+ if (!try_module_get(gc->gpiodev->owner))
return -ENODEV;
- ret = gpiochip_lock_as_irq(chip, offset);
+ ret = gpiochip_lock_as_irq(gc, offset);
if (ret) {
- chip_err(chip, "unable to lock HW IRQ %u for IRQ\n", offset);
- module_put(chip->gpiodev->owner);
+ chip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset);
+ module_put(gc->gpiodev->owner);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_reqres_irq);
-void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset)
+void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset)
{
- gpiochip_unlock_as_irq(chip, offset);
- module_put(chip->gpiodev->owner);
+ gpiochip_unlock_as_irq(gc, offset);
+ module_put(gc->gpiodev->owner);
}
EXPORT_SYMBOL_GPL(gpiochip_relres_irq);
-bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset)
+bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_DRAIN, &chip->gpiodev->descs[offset].flags);
+ return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain);
-bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset)
+bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_SOURCE, &chip->gpiodev->descs[offset].flags);
+ return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
-bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
+bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset)
{
- if (offset >= chip->ngpio)
+ if (offset >= gc->ngpio)
return false;
- return !test_bit(FLAG_TRANSITORY, &chip->gpiodev->descs[offset].flags);
+ return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -4388,7 +4555,7 @@ EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table);
*/
void gpiod_add_hogs(struct gpiod_hog *hogs)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
struct gpiod_hog *hog;
mutex_lock(&gpio_machine_hogs_mutex);
@@ -4400,9 +4567,9 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
* The chip may have been registered earlier, so check if it
* exists and, if so, try to hog the line now.
*/
- chip = find_chip_by_name(hog->chip_label);
- if (chip)
- gpiochip_machine_hog(chip, hog);
+ gc = find_chip_by_name(hog->chip_label);
+ if (gc)
+ gpiochip_machine_hog(gc, hog);
}
mutex_unlock(&gpio_machine_hogs_mutex);
@@ -4452,7 +4619,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return desc;
for (p = &table->table[0]; p->chip_label; p++) {
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
/* idx must always match exactly */
if (p->idx != idx)
@@ -4462,9 +4629,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (p->con_id && (!con_id || strcmp(p->con_id, con_id)))
continue;
- chip = find_chip_by_name(p->chip_label);
+ gc = find_chip_by_name(p->chip_label);
- if (!chip) {
+ if (!gc) {
/*
* As the lookup table indicates a chip with
* p->chip_label should exist, assume it may
@@ -4477,15 +4644,15 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return ERR_PTR(-EPROBE_DEFER);
}
- if (chip->ngpio <= p->chip_hwnum) {
+ if (gc->ngpio <= p->chip_hwnum) {
dev_err(dev,
"requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
- idx, p->chip_hwnum, chip->ngpio - 1,
- chip->label);
+ idx, p->chip_hwnum, gc->ngpio - 1,
+ gc->label);
return ERR_PTR(-EINVAL);
}
- desc = gpiochip_get_desc(chip, p->chip_hwnum);
+ desc = gpiochip_get_desc(gc, p->chip_hwnum);
*flags = p->flags;
return desc;
@@ -4880,20 +5047,20 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags)
{
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
struct gpio_desc *local_desc;
int hwnum;
int ret;
- chip = gpiod_to_chip(desc);
+ gc = gpiod_to_chip(desc);
hwnum = gpio_chip_hwgpio(desc);
- local_desc = gpiochip_request_own_desc(chip, hwnum, name,
+ local_desc = gpiochip_request_own_desc(gc, hwnum, name,
lflags, dflags);
if (IS_ERR(local_desc)) {
ret = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
- name, chip->label, hwnum, ret);
+ name, gc->label, hwnum, ret);
return ret;
}
@@ -4911,15 +5078,15 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/**
* gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog
- * @chip: gpio chip to act on
+ * @gc: gpio chip to act on
*/
-static void gpiochip_free_hogs(struct gpio_chip *chip)
+static void gpiochip_free_hogs(struct gpio_chip *gc)
{
int id;
- for (id = 0; id < chip->ngpio; id++) {
- if (test_bit(FLAG_IS_HOGGED, &chip->gpiodev->descs[id].flags))
- gpiochip_free_own_desc(&chip->gpiodev->descs[id]);
+ for (id = 0; id < gc->ngpio; id++) {
+ if (test_bit(FLAG_IS_HOGGED, &gc->gpiodev->descs[id].flags))
+ gpiochip_free_own_desc(&gc->gpiodev->descs[id]);
}
}
@@ -4942,7 +5109,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
struct gpio_desc *desc;
struct gpio_descs *descs;
struct gpio_array *array_info = NULL;
- struct gpio_chip *chip;
+ struct gpio_chip *gc;
int count, bitmap_size;
count = gpiod_count(dev, con_id);
@@ -4962,7 +5129,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
descs->desc[descs->ndescs] = desc;
- chip = gpiod_to_chip(desc);
+ gc = gpiod_to_chip(desc);
/*
* If pin hardware number of array member 0 is also 0, select
* its chip as a candidate for fast bitmap processing path.
@@ -4970,8 +5137,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
struct gpio_descs *array;
- bitmap_size = BITS_TO_LONGS(chip->ngpio > count ?
- chip->ngpio : count);
+ bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
+ gc->ngpio : count);
array = kzalloc(struct_size(descs, desc, count) +
struct_size(array_info, invert_mask,
@@ -4994,7 +5161,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->desc = descs->desc;
array_info->size = count;
- array_info->chip = chip;
+ array_info->chip = gc;
bitmap_set(array_info->get_mask, descs->ndescs,
count - descs->ndescs);
bitmap_set(array_info->set_mask, descs->ndescs,
@@ -5002,7 +5169,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
descs->info = array_info;
}
/* Unmark array members which don't belong to the 'fast' chip */
- if (array_info && array_info->chip != chip) {
+ if (array_info && array_info->chip != gc) {
__clear_bit(descs->ndescs, array_info->get_mask);
__clear_bit(descs->ndescs, array_info->set_mask);
}
@@ -5027,8 +5194,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
}
} else if (array_info) {
/* Exclude open drain or open source from fast output */
- if (gpiochip_line_is_open_drain(chip, descs->ndescs) ||
- gpiochip_line_is_open_source(chip, descs->ndescs))
+ if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
+ gpiochip_line_is_open_source(gc, descs->ndescs))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -5116,10 +5283,15 @@ static int __init gpiolib_dev_init(void)
if (ret < 0) {
pr_err("gpiolib: failed to allocate char dev region\n");
bus_unregister(&gpio_bus_type);
- } else {
- gpiolib_initialized = true;
- gpiochip_setup_devs();
+ return ret;
}
+
+ gpiolib_initialized = true;
+ gpiochip_setup_devs();
+
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_register(&gpio_of_notifier));
+
return ret;
}
core_initcall(gpiolib_dev_init);
@@ -5129,7 +5301,7 @@ core_initcall(gpiolib_dev_init);
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
unsigned i;
- struct gpio_chip *chip = gdev->chip;
+ struct gpio_chip *gc = gdev->chip;
unsigned gpio = gdev->base;
struct gpio_desc *gdesc = &gdev->descs[0];
bool is_out;
@@ -5152,7 +5324,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s",
gpio, gdesc->name ? gdesc->name : "", gdesc->label,
is_out ? "out" : "in ",
- chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ",
+ gc->get ? (gc->get(gc, i) ? "hi" : "lo") : "? ",
is_irq ? "IRQ " : "",
active_low ? "ACTIVE LOW" : "");
seq_printf(s, "\n");
@@ -5204,10 +5376,10 @@ static void gpiolib_seq_stop(struct seq_file *s, void *v)
static int gpiolib_seq_show(struct seq_file *s, void *v)
{
struct gpio_device *gdev = v;
- struct gpio_chip *chip = gdev->chip;
+ struct gpio_chip *gc = gdev->chip;
struct device *parent;
- if (!chip) {
+ if (!gc) {
seq_printf(s, "%s%s: (dangling chip)", (char *)s->private,
dev_name(&gdev->dev));
return 0;
@@ -5216,19 +5388,19 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
seq_printf(s, "%s%s: GPIOs %d-%d", (char *)s->private,
dev_name(&gdev->dev),
gdev->base, gdev->base + gdev->ngpio - 1);
- parent = chip->parent;
+ parent = gc->parent;
if (parent)
seq_printf(s, ", parent: %s/%s",
parent->bus ? parent->bus->name : "no-bus",
dev_name(parent));
- if (chip->label)
- seq_printf(s, ", %s", chip->label);
- if (chip->can_sleep)
+ if (gc->label)
+ seq_printf(s, ", %s", gc->label);
+ if (gc->can_sleep)
seq_printf(s, ", can sleep");
seq_printf(s, ":\n");
- if (chip->dbg_show)
- chip->dbg_show(s, chip);
+ if (gc->dbg_show)
+ gc->dbg_show(s, gc);
else
gpiolib_dbg_show(s, gdev);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 3e0aab2945d8..853ce681b4a4 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -56,6 +56,7 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
+ struct atomic_notifier_head notifier;
#ifdef CONFIG_PINCTRL
/*
@@ -119,6 +120,9 @@ struct gpio_desc {
const char *label;
/* Name of the GPIO */
const char *name;
+#ifdef CONFIG_OF_DYNAMIC
+ struct device_node *hog;
+#endif
};
int gpiod_request(struct gpio_desc *desc, const char *label);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index f17d01f076c7..835c88318cec 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-y += drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
+obj-$(CONFIG_TRACE_GPU_MEM) += trace/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d0aa6cff2e02..43594978958e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -54,9 +54,6 @@ config DRM_DEBUG_MM
If in doubt, say "N".
-config DRM_EXPORT_FOR_TESTS
- bool
-
config DRM_DEBUG_SELFTEST
tristate "kselftests for DRM"
depends on DRM
@@ -389,6 +386,8 @@ source "drivers/gpu/drm/aspeed/Kconfig"
source "drivers/gpu/drm/mcde/Kconfig"
+source "drivers/gpu/drm/tidss/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
@@ -468,6 +467,9 @@ config DRM_SAVAGE
endif # DRM_LEGACY
+config DRM_EXPORT_FOR_TESTS
+ bool
+
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
config DRM_PANEL_ORIENTATION_QUIRKS
tristate
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6493088a0fdd..7f72ef5e7811 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
+drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
+ drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
@@ -122,3 +123,4 @@ obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
+obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 13340f353ea8..216d932a7831 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: MIT
menu "ACP (Audio CoProcessor) Configuration"
+ depends on DRM_AMDGPU
config DRM_AMD_ACP
bool "Enable AMD Audio CoProcessor IP support"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index da3bcff61b97..2992a49ad4a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -579,6 +579,7 @@ struct amdgpu_asic_funcs {
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+ void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
/* initialize doorbell layout for specific asic*/
@@ -969,6 +970,7 @@ struct amdgpu_device {
int pstate;
/* enable runtime pm on the device */
bool runpm;
+ bool in_runpm;
bool pm_sysfs_en;
bool ucode_sysfs_en;
@@ -992,6 +994,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
@@ -1174,9 +1178,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8609287620ea..abfbe89e805e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/dma-buf.h>
#include "amdgpu_xgmi.h"
+#include <uapi/linux/kfd_ioctl.h>
static const unsigned int compute_vmid_bitmap = 0xFF00;
@@ -126,7 +127,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* this is going to have a few of the MSBs set that we need to
* clear
*/
- bitmap_complement(gpu_resources.queue_bitmap,
+ bitmap_complement(gpu_resources.cp_queue_bitmap,
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
@@ -137,7 +138,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
- clear_bit(i, gpu_resources.queue_bitmap);
+ clear_bit(i, gpu_resources.cp_queue_bitmap);
amdgpu_doorbell_get_kfd_info(adev,
&gpu_resources.doorbell_physical_address,
@@ -178,18 +179,18 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev);
+ kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev);
+ r = kgd2kfd_resume(adev->kfd.dev, run_pm);
return r;
}
@@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9)
+ void **cpu_ptr, bool cp_mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
- if (mqd_gfx9)
- bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+ if (cp_mqd_gfx9)
+ bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
@@ -402,7 +403,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100;
- else if (adev->powerplay.pp_funcs) {
+ else if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0;
else
@@ -427,7 +428,7 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
/* the sclk is in quantas of 10kHz */
if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100;
- else if (adev->powerplay.pp_funcs)
+ else if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;
@@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
metadata_size, &metadata_flags);
if (flags) {
*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
- ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM
+ : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
}
out_put:
@@ -525,6 +527,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
return adev->gmc.xgmi.hive_id;
}
+
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->unique_id;
+}
+
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
{
struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
@@ -647,13 +657,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- uint32_t flush_type = 0;
+ const uint32_t flush_type = 0;
bool all_hub = false;
- if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20)
- flush_type = 2;
-
if (adev->family == AMDGPU_FAMILY_AI)
all_hub = true;
@@ -677,6 +683,11 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ return 0;
+}
+
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
@@ -713,11 +724,11 @@ void kgd2kfd_exit(void)
{
}
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
}
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 47b0f2957d1f..13feb313e9b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
struct amdkfd_process_info {
/* List head of all VMs that belong to a KFD process */
@@ -122,8 +123,8 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -171,6 +172,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
@@ -240,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config);
+
/* KGD2KFD callbacks */
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
@@ -249,8 +254,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd);
-int kgd2kfd_resume(struct kfd_dev *kfd);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 4bcc175a149d..6529caca88fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -79,7 +79,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
- /* fall through */
+ fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
@@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index a7b17c8deb00..4ec6d0c03201 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -42,38 +42,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
-#if 0
-/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but
- * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu
- * changes commented out related code, doing the same here for now but
- * need to sync with Ken et al
- */
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-#endif
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -805,7 +773,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
+ .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 8f052e98a3c6..0b7e78748540 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19a10db93d68..ccd635b812b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -41,31 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 8562afe5b761..df841c2ac5e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -48,28 +48,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -736,7 +714,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
+ .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 63d3e6683dfe..aedf67d57449 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa8ac9d19a7a..9dff792c9290 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -29,6 +29,7 @@
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
+#include <uapi/linux/kfd_ioctl.h>
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -276,6 +277,42 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ struct amdgpu_bo *root = bo;
+ struct amdgpu_vm_bo_base *vm_bo;
+ struct amdgpu_vm *vm;
+ struct amdkfd_process_info *info;
+ struct amdgpu_amdkfd_fence *ef;
+ int ret;
+
+ /* we can always get vm_bo from root PD bo.*/
+ while (root->parent)
+ root = root->parent;
+
+ vm_bo = root->vm_bo;
+ if (!vm_bo)
+ return 0;
+
+ vm = vm_bo->vm;
+ if (!vm)
+ return 0;
+
+ info = vm->process_info;
+ if (!info || !info->eviction_fence)
+ return 0;
+
+ ef = container_of(dma_fence_get(&info->eviction_fence->base),
+ struct amdgpu_amdkfd_fence, base);
+
+ BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
+ ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
+ dma_resv_unlock(bo->tbo.base.resv);
+
+ dma_fence_put(&ef->base);
+ return ret;
+}
+
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait)
{
@@ -364,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
- bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
uint32_t mapping_flags;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
switch (adev->asic_type) {
case CHIP_ARCTURUS:
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_adev == adev)
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -847,9 +884,9 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
vm_list_node) {
struct amdgpu_bo *pd = peer_vm->root.base.bo;
- ret = amdgpu_sync_resv(NULL,
- sync, pd->tbo.base.resv,
- AMDGPU_FENCE_OWNER_KFD, false);
+ ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_FENCE_OWNER_KFD);
if (ret)
return ret;
}
@@ -1044,6 +1081,8 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
list_del(&vm->vm_list_node);
mutex_unlock(&process_info->lock);
+ vm->process_info = NULL;
+
/* Release per-process resources when last compute VM is destroyed */
if (!process_info->n_vms) {
WARN_ON(!list_empty(&process_info->kfd_bo_list));
@@ -1122,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
/*
* Check on which domain to allocate BO
*/
- if (flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
- alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
+ alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- } else if (flags & ALLOC_MEM_FLAGS_GTT) {
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
- } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
alloc_flags = 0;
if (!offset || !*offset)
return -EINVAL;
user_addr = untagged_addr(*offset);
- } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
- ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
bo_type = ttm_bo_type_sg;
@@ -1160,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
}
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
+ (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
/* Workaround for AQL queue wraparound bug. Map the same
* memory twice. That means we only actually allocate half
@@ -1642,10 +1681,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
+
(*mem)->alloc_flags =
((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
- ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
- ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
+ | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
(*mem)->va = va;
@@ -2204,3 +2245,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
kfree(mem);
return 0;
}
+
+/* Returns GPU-specific tiling mode information */
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ /* Those values are not set from GFX9 onwards */
+ config->num_banks = adev->gfx.config.num_banks;
+ config->num_ranks = adev->gfx.config.num_ranks;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 50dff69a0f6e..b1172d93c99c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = adev->pdev->rom;
+ size_t romlen = adev->pdev->romlen;
+ void __iomem *bios;
adev->bios = NULL;
- bios = pci_platform_rom(adev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- adev->bios = kzalloc(size, GFP_KERNEL);
- if (adev->bios == NULL)
+ adev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!adev->bios)
return false;
- memcpy_fromio(adev->bios, bios, size);
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
- return false;
- }
+ memcpy_fromio(adev->bios, bios, romlen);
+ iounmap(bios);
- adev->bios_size = size;
+ if (!check_atom_bios(adev->bios, romlen))
+ goto free_bios;
+
+ adev->bios_size = romlen;
return true;
+free_bios:
+ kfree(adev->bios);
+ return false;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index a62cbc8199de..f355d9a752d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1461,6 +1461,20 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector
return MODE_OK;
}
+static int
+amdgpu_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ int r = 0;
+
+ if (amdgpu_connector->ddc_bus->has_aux) {
+ amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
+ r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
+ }
+
+ return r;
+}
+
static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
.get_modes = amdgpu_connector_dp_get_modes,
.mode_valid = amdgpu_connector_dp_mode_valid,
@@ -1475,6 +1489,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
@@ -1485,6 +1500,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
void
@@ -1931,7 +1947,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_connector_register(connector);
if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a52a084158b1..af91627b19b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -28,6 +28,7 @@
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/sync_file.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -415,7 +416,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero.
*/
- if (p->bytes_moved < p->bytes_moved_threshold) {
+ if (p->bytes_moved < p->bytes_moved_threshold &&
+ (!bo->tbo.base.dma_buf ||
+ list_empty(&bo->tbo.base.dma_buf->attachments))) {
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
@@ -651,16 +654,19 @@ out:
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
int r;
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv;
+ enum amdgpu_sync_mode sync_mode;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
- amdgpu_bo_explicit_sync(bo));
-
+ sync_mode = amdgpu_bo_explicit_sync(bo) ?
+ AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
+ &fpriv->vm);
if (r)
return r;
}
@@ -1202,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1211,7 +1216,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, entity, p->filp);
+ r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
if (r)
goto error_unlock;
@@ -1255,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 94a6c42f29ea..6ed36a2c5f73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
+static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+{
+ switch (prio) {
+ case DRM_SCHED_PRIORITY_HIGH_HW:
+ case DRM_SCHED_PRIORITY_KERNEL:
+ return AMDGPU_GFX_PIPE_PRIO_HIGH;
+ default:
+ return AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ }
+}
+
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
+ enum gfx_pipe_priority hw_prio;
enum drm_sched_priority priority;
int r;
@@ -79,46 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
switch (hw_ip) {
- case AMDGPU_HW_IP_GFX:
- sched = &adev->gfx.gfx_ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- scheds = adev->gfx.compute_sched;
- num_scheds = adev->gfx.num_compute_sched;
- break;
- case AMDGPU_HW_IP_DMA:
- scheds = adev->sdma.sdma_sched;
- num_scheds = adev->sdma.num_sdma_sched;
- break;
- case AMDGPU_HW_IP_UVD:
- sched = &adev->uvd.inst[0].ring.sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- sched = &adev->vce.ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- sched = &adev->uvd.inst[0].ring_enc[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- scheds = adev->vcn.vcn_dec_sched;
- num_scheds = adev->vcn.num_vcn_dec_sched;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- scheds = adev->vcn.vcn_enc_sched;
- num_scheds = adev->vcn.num_vcn_enc_sched;
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- scheds = adev->jpeg.jpeg_sched;
- num_scheds = adev->jpeg.num_jpeg_sched;
- break;
+ case AMDGPU_HW_IP_GFX:
+ sched = &adev->gfx.gfx_ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ break;
+ case AMDGPU_HW_IP_DMA:
+ scheds = adev->sdma.sdma_sched;
+ num_scheds = adev->sdma.num_sdma_sched;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ sched = &adev->uvd.inst[0].ring.sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ sched = &adev->vce.ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ sched = &adev->uvd.inst[0].ring_enc[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
+ adev->vcn.num_vcn_dec_sched);
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
+ adev->vcn.num_vcn_enc_sched);
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ scheds = adev->jpeg.jpeg_sched;
+ num_scheds = adev->jpeg.num_jpeg_sched;
+ break;
}
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -502,6 +519,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
+static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *aentity,
+ int hw_ip,
+ enum drm_sched_priority priority)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ enum gfx_pipe_priority hw_prio;
+ struct drm_gpu_scheduler **scheds = NULL;
+ unsigned num_scheds;
+
+ /* set sw priority */
+ drm_sched_entity_set_priority(&aentity->entity, priority);
+
+ /* set hw priority */
+ if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ drm_sched_entity_modify_sched(&aentity->entity, scheds,
+ num_scheds);
+ }
+}
+
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
@@ -514,13 +554,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
- struct drm_sched_entity *entity;
-
if (!ctx->entities[i][j])
continue;
- entity = &ctx->entities[i][j]->entity;
- drm_sched_entity_set_priority(entity, ctx_prio);
+ amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
+ i, ctx_prio);
}
}
}
@@ -628,20 +666,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(&mgr->lock);
}
+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+ int num_compute_sched_normal = 0;
+ int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+ int i;
+
+ /* use one drm sched array, gfx.compute_sched to store both high and
+ * normal priority drm compute schedulers */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ if (!adev->gfx.compute_ring[i].has_high_prio)
+ adev->gfx.compute_sched[num_compute_sched_normal++] =
+ &adev->gfx.compute_ring[i].sched;
+ else
+ adev->gfx.compute_sched[num_compute_sched_high--] =
+ &adev->gfx.compute_ring[i].sched;
+ }
+
+ /* compute ring only has two priority for now */
+ i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+
+ i = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
+ /* When compute has no high priority rings then use */
+ /* normal priority sched array */
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+ } else {
+ adev->gfx.compute_prio_sched[i] =
+ &adev->gfx.compute_sched[num_compute_sched_high - 1];
+ adev->gfx.num_compute_sched[i] =
+ adev->gfx.num_compute_rings - num_compute_sched_normal;
+ }
+}
+
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
{
int i, j;
+ amdgpu_ctx_init_compute_sched(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
adev->gfx.num_gfx_sched++;
}
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
- adev->gfx.num_compute_sched++;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
adev->sdma.num_sdma_sched++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 337d7cdce8e9..c0f9a651dc06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -31,6 +31,9 @@
#include <drm/drm_debugfs.h>
#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dm_debugfs.h"
+#include "amdgpu_ras.h"
/**
* amdgpu_debugfs_add_files - Add simple debugfs entries
@@ -176,7 +179,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
- WREG32(*pos >> 2, value);
+ amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
}
if (r) {
result = r;
@@ -840,6 +843,55 @@ err:
return result;
}
+/**
+ * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit zero to disable or a 32-bit non-zero to enable
+ */
+static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return r;
+ }
+
+ amdgpu_gfx_off_ctrl(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return result;
+}
+
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -888,6 +940,11 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
+ .owner = THIS_MODULE,
+ .write = amdgpu_debugfs_gfxoff_write,
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -897,6 +954,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_sensors_fops,
&amdgpu_debugfs_wave_fops,
&amdgpu_debugfs_gpr_fops,
+ &amdgpu_debugfs_gfxoff_fops,
};
static const char *debugfs_regs_names[] = {
@@ -908,6 +966,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_sensors",
"amdgpu_wave",
"amdgpu_gpr",
+ "amdgpu_gfxoff",
};
/**
@@ -934,18 +993,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- }
-}
-
static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1211,11 +1258,47 @@ failure:
return 0;
}
+static int amdgpu_debugfs_sclk_set(void *data, u64 val)
+{
+ int ret = 0;
+ uint32_t max_freq, min_freq;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
+ if (ret || val > max_freq || val < min_freq)
+ return -EINVAL;
+ ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
+ } else {
+ return 0;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
amdgpu_debugfs_ib_preempt, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
+ amdgpu_debugfs_sclk_set, "%llu\n");
+
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
+ int r, i;
+
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
adev->ddev->primary->debugfs_root, adev,
@@ -1225,24 +1308,78 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return -EIO;
}
+ adev->smu.debugfs_sclk =
+ debugfs_create_file("amdgpu_force_sclk", 0200,
+ adev->ddev->primary->debugfs_root, adev,
+ &fops_sclk_set);
+ if (!(adev->smu.debugfs_sclk)) {
+ DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+ return -EIO;
+ }
+
+ /* Register debugfs entries for amdgpu_ttm */
+ r = amdgpu_ttm_debugfs_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init debugfs\n");
+ return r;
+ }
+
+ r = amdgpu_debugfs_pm_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to register debugfs file for dpm!\n");
+ return r;
+ }
+
+ if (amdgpu_debugfs_sa_init(adev)) {
+ dev_err(adev->dev, "failed to register debugfs file for SA\n");
+ }
+
+ if (amdgpu_debugfs_fence_init(adev))
+ dev_err(adev->dev, "fence debugfs file creation failed\n");
+
+ r = amdgpu_debugfs_gem_init(adev);
+ if (r)
+ DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+
+ r = amdgpu_debugfs_regs_init(adev);
+ if (r)
+ DRM_ERROR("registering register debugfs failed (%d).\n", r);
+
+ r = amdgpu_debugfs_firmware_init(adev);
+ if (r)
+ DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (amdgpu_device_has_dc_support(adev)) {
+ if (dtn_debugfs_init(adev))
+ DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
+ }
+#endif
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring)
+ continue;
+
+ if (amdgpu_debugfs_ring_init(adev, ring)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
+ }
+
+ amdgpu_ras_debugfs_create_all(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
-{
- debugfs_remove(adev->debugfs_preempt);
-}
-
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
return 0;
}
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { }
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index f289d28ad6b2..de12d1101526 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -32,9 +32,8 @@ struct amdgpu_debugfs {
};
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev);
+void amdgpu_debugfs_fini(struct amdgpu_device *adev);
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
const struct drm_info_list *files,
unsigned nfiles);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b8975857d60d..faa3e7102156 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -183,20 +183,51 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write)
{
- uint64_t last;
unsigned long flags;
+ uint32_t hi = ~0;
+ uint64_t last;
+
+
+#ifdef CONFIG_64BIT
+ last = min(pos + size, adev->gmc.visible_vram_size);
+ if (last > pos) {
+ void __iomem *addr = adev->mman.aper_base_kaddr + pos;
+ size_t count = last - pos;
+
+ if (write) {
+ memcpy_toio(addr, buf, count);
+ mb();
+ amdgpu_asic_flush_hdp(adev, NULL);
+ } else {
+ amdgpu_asic_invalidate_hdp(adev, NULL);
+ mb();
+ memcpy_fromio(buf, addr, count);
+ }
+
+ if (count == size)
+ return;
+
+ pos += count;
+ buf += count / 4;
+ size -= count;
+ }
+#endif
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ for (last = pos + size; pos < last; pos += 4) {
+ uint32_t tmp = pos >> 31;
- last = size - 4;
- for (last += pos; pos <= last; pos += 4) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (tmp != hi) {
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ hi = tmp;
+ }
if (write)
WREG32_NO_KIQ(mmMM_DATA, *buf++);
else
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
/*
@@ -275,6 +306,26 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
BUG();
}
+void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+{
+ trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+
+ if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+ writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+ udelay(500);
+ }
+}
+
/**
* amdgpu_mm_wreg - write to a memory mapped IO register
*
@@ -288,8 +339,6 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
adev->last_mm_index = v;
}
@@ -297,20 +346,26 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
return amdgpu_kiq_wreg(adev, reg, v);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
- writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
+ amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+}
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
+/*
+ * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
+ *
+ * this function is invoked only the debugfs register access
+ * */
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
+{
+ if (amdgpu_sriov_fullaccess(adev) &&
+ adev->gfx.rlc.funcs &&
+ adev->gfx.rlc.funcs->is_rlcg_access_range) {
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
+ if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
+ return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
}
+
+ amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
}
/**
@@ -1136,7 +1191,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
@@ -2344,15 +2399,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
- if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
+ if(!amdgpu_sriov_vf(adev)){
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
+ }
}
}
-
adev->ip_blocks[i].status.hw = false;
}
@@ -2686,6 +2742,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (adev->asic_reset_res)
goto fail;
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
} else {
task_barrier_full(&hive->tb);
@@ -2800,7 +2859,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
- adev->usec_timeout *= 2;
+ adev->usec_timeout *= 10;
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
@@ -3088,22 +3147,6 @@ fence_driver_init:
} else
adev->ucode_sysfs_en = true;
- r = amdgpu_debugfs_gem_init(adev);
- if (r)
- DRM_ERROR("registering gem debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_regs_init(adev);
- if (r)
- DRM_ERROR("registering register debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_firmware_init(adev);
- if (r)
- DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_init(adev);
- if (r)
- DRM_ERROR("Creating debugfs files failed (%d).\n", r);
-
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -3177,6 +3220,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+ /* make sure IB test finished before entering exclusive mode
+ * to avoid preemption on IB test
+ * */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -3219,13 +3268,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- amdgpu_debugfs_regs_cleanup(adev);
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- amdgpu_debugfs_preempt_cleanup(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
amdgpu_discovery_fini(adev);
}
@@ -3309,7 +3356,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
}
}
- amdgpu_amdkfd_suspend(adev);
+ amdgpu_amdkfd_suspend(adev, !fbcon);
amdgpu_ras_suspend(adev);
@@ -3393,7 +3440,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
}
}
- r = amdgpu_amdkfd_resume(adev);
+ r = amdgpu_amdkfd_resume(adev, !fbcon);
if (r)
return r;
@@ -3866,8 +3913,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
}
- if (!r && amdgpu_ras_intr_triggered())
+ if (!r && amdgpu_ras_intr_triggered()) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev->mmhub.funcs &&
+ tmp_adev->mmhub.funcs->reset_ras_error_count)
+ tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+ }
+
amdgpu_ras_intr_cleared();
+ }
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index f95092741c38..27d8ae19a7a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -307,7 +307,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
- DRM_INFO("set register base offset for %s\n",
+ DRM_DEBUG("set register base offset for %s\n",
hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->number_instance] =
ip->base_address;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6d520a3eec40..84cee27cd7ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -99,7 +99,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(crtc)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -219,7 +219,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
if (!adev->enable_virtual_display)
work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+ amdgpu_get_vblank_counter_kms(crtc);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -924,3 +924,15 @@ int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
return AMDGPU_CRTC_IRQ_NONE;
}
}
+
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index a59cd47aa6c1..ffeb20f11c07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -223,6 +223,37 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
}
/**
+ * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
+ *
+ * @attach: attachment to pin down
+ *
+ * Pin the BO which is backing the DMA-buf so that it can't move any more.
+ */
+static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ /* pin buffer into GTT */
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+}
+
+/**
+ * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
+ *
+ * @attach: attachment to unpin
+ *
+ * Unpin a previously pinned BO to make it movable again.
+ */
+static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ amdgpu_bo_unpin(bo);
+}
+
+/**
* amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
* @dir: DMA direction
@@ -244,9 +275,19 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct sg_table *sgt;
long r;
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return ERR_PTR(r);
+ if (!bo->pin_count) {
+ /* move buffer into GTT */
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return ERR_PTR(r);
+
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ AMDGPU_GEM_DOMAIN_GTT)) {
+ return ERR_PTR(-EBUSY);
+ }
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
if (IS_ERR(sgt))
@@ -277,13 +318,9 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
- amdgpu_bo_unpin(bo);
}
/**
@@ -327,9 +364,10 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .dynamic_mapping = true,
.attach = amdgpu_dma_buf_attach,
.detach = amdgpu_dma_buf_detach,
+ .pin = amdgpu_dma_buf_pin,
+ .unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
.unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
@@ -413,6 +451,73 @@ error:
}
/**
+ * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
+ *
+ * @attach: the DMA-buf attachment
+ *
+ * Invalidate the DMA-buf attachment, making sure that the we re-create the
+ * mapping before the next use.
+ */
+static void
+amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_placement placement = {};
+ struct amdgpu_vm_bo_base *bo_base;
+ int r;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ return;
+
+ r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
+ return;
+ }
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+
+ if (ticket) {
+ /* When we get an error here it means that somebody
+ * else is holding the VM lock and updating page tables
+ * So we can just continue here.
+ */
+ r = dma_resv_lock(resv, ticket);
+ if (r)
+ continue;
+
+ } else {
+ /* TODO: This is more problematic and we actually need
+ * to allow page tables updates without holding the
+ * lock.
+ */
+ if (!dma_resv_trylock(resv))
+ continue;
+ }
+
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (!r)
+ r = amdgpu_vm_handle_moved(adev, vm);
+
+ if (r && r != -EBUSY)
+ DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+ r);
+
+ dma_resv_unlock(resv);
+ }
+}
+
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .move_notify = amdgpu_dma_buf_move_notify
+};
+
+/**
* amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
* @dev: DRM device
* @dma_buf: Shared DMA buffer
@@ -444,7 +549,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
if (IS_ERR(obj))
return obj;
- attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
+ &amdgpu_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
drm_gem_object_put(obj);
return ERR_CAST(attach);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index a2e8c3dfb4f1..ba1bb95a3cf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -1171,3 +1171,20 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+ uint32_t cstate)
+{
+ int ret = 0;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_set_df_cstate(smu, cstate);
+ else if (pp_funcs &&
+ pp_funcs->set_df_cstate)
+ ret = pp_funcs->set_df_cstate(pp_handle, cstate);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 902ca6c00cca..936d85aa0fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -448,6 +448,8 @@ struct amdgpu_pm {
/* powerplay feature */
uint32_t pp_feature;
+ /* Used for I2C access to various EEPROMs on relevant ASICs */
+ struct i2c_adapter smu_i2c;
};
#define R600_SSTU_DFLT 0
@@ -533,4 +535,7 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+ uint32_t cstate);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 42f4febe24c6..8ea86ffdea0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1021,6 +1021,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct drm_device *dev;
+ struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
int ret, retry = 0;
bool supports_atomic = false;
@@ -1090,6 +1091,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ amdgpu_driver_load_kms(dev, ent->driver_data);
+
retry_init:
ret = drm_dev_register(dev, ent->driver_data);
if (ret == -EAGAIN && ++retry <= 3) {
@@ -1100,6 +1103,11 @@ retry_init:
} else if (ret)
goto err_pci;
+ adev = dev->dev_private;
+ ret = amdgpu_debugfs_init(adev);
+ if (ret)
+ DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
+
return 0;
err_pci:
@@ -1119,9 +1127,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
#endif
DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
- drm_dev_put(dev);
+ amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ drm_dev_put(dev);
}
static void
@@ -1220,11 +1229,15 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
}
+ adev->in_runpm = true;
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
ret = amdgpu_device_suspend(drm_dev, false);
+ if (ret)
+ return ret;
+
if (amdgpu_device_supports_boco(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
@@ -1278,6 +1291,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ adev->in_runpm = false;
return 0;
}
@@ -1285,24 +1299,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_dev->dev_private;
- struct drm_crtc *crtc;
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ int ret = 1;
if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
+ if (amdgpu_device_has_dc_support(adev)) {
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(drm_dev);
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ if (crtc->state->active) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ drm_modeset_unlock_all(drm_dev);
+
+ } else {
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->dpms == DRM_MODE_DPMS_ON) {
+ ret = -EBUSY;
+ break;
+ }
}
+
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+ mutex_unlock(&drm_dev->mode_config.mutex);
}
+ if (ret == -EBUSY)
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
- /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
- return 1;
+ return ret;
}
long amdgpu_drm_ioctl(struct file *filp,
@@ -1377,32 +1422,15 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-static bool
-amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE,
- .load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms,
- .unload = amdgpu_driver_unload_kms,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = amdgpu_get_crtc_scanout_position,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 2672dc64a310..9ae7b61f696a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -336,15 +336,12 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
&amdgpu_fb_helper_funcs);
- ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
- AMDGPUFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
if (ret) {
kfree(rfbdev);
return ret;
}
- drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
-
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev))
drm_helper_disable_unused_functions(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3c01252b1e0e..7531527067df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -503,9 +503,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
- if (amdgpu_debugfs_fence_init(adev))
- dev_err(adev->dev, "fence debugfs file creation failed\n");
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 0f960b498792..6b9c9193cdfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
return adev->gfx.mec.num_mec > 1;
}
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue)
+{
+ /* Policy: make queue 0 of each pipe as high priority compute queue */
+ return (queue == 0);
+
+}
+
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
@@ -477,7 +485,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
- return amdgpu_ring_test_ring(kiq_ring);
+ return amdgpu_ring_test_helper(kiq_ring);
}
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
@@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_gfx_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index ca17ffb01301..5825692d07e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -41,6 +41,15 @@
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
+enum gfx_pipe_priority {
+ AMDGPU_GFX_PIPE_PRIO_NORMAL = 1,
+ AMDGPU_GFX_PIPE_PRIO_HIGH,
+ AMDGPU_GFX_PIPE_PRIO_MAX
+};
+
+#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
+#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+
struct amdgpu_mec {
struct amdgpu_bo *hpd_eop_obj;
u64 hpd_eop_gpu_addr;
@@ -151,6 +160,8 @@ struct amdgpu_gfx_config {
unsigned num_gpus;
unsigned multi_gpu_tile_size;
unsigned mc_arb_ramcfg;
+ unsigned num_banks;
+ unsigned num_ranks;
unsigned gb_addr_config;
unsigned num_rbs;
unsigned gs_vgt_table_depth;
@@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs {
u32 queue, u32 vmid);
int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
+ void (*reset_ras_error_count) (struct amdgpu_device *adev);
};
struct sq_work {
@@ -278,8 +290,9 @@ struct amdgpu_gfx {
uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+ struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
- uint32_t num_compute_sched;
+ uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue);
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 60655834d649..ccbd7acfc4cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -48,7 +48,6 @@
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
*/
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
/**
* amdgpu_ib_get - request an IB (Indirect Buffer)
@@ -295,9 +294,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
}
adev->ib_pool_ready = true;
- if (amdgpu_debugfs_sa_init(adev)) {
- dev_err(adev->dev, "failed to register debugfs file for SA\n");
- }
+
return 0;
}
@@ -421,7 +418,7 @@ static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
#endif
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d42be880a236..4981e443a884 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
drm_sched_job_cleanup(s_job);
- amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
int r;
if (!f)
@@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 60591dbc2097..fd1dc3236eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
goto done_free;
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_request_full_gpu(adev, false);
-
if (adev->runpm) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@@ -170,10 +167,17 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_supports_boco(dev) &&
- (amdgpu_runtime_pm != 0)) /* enable runpm by default */
+ (amdgpu_runtime_pm != 0)) /* enable runpm by default for boco */
+ adev->runpm = true;
+ else if (amdgpu_device_supports_baco(dev) &&
+ (amdgpu_runtime_pm != 0) &&
+ (adev->asic_type >= CHIP_TOPAZ) &&
+ (adev->asic_type != CHIP_VEGA10) &&
+ (adev->asic_type != CHIP_VEGA20) &&
+ (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */
adev->runpm = true;
else if (amdgpu_device_supports_baco(dev) &&
- (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
+ (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 on CI */
adev->runpm = true;
/* Call ACPI methods: require modeset init
@@ -1110,14 +1114,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
/**
* amdgpu_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int vpos, hpos, stat;
u32 count;
@@ -1177,14 +1182,15 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to enable vblank interrupt for
+ * @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
@@ -1194,13 +1200,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to disable vblank interrupt for
+ * @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
index 676c48c02d77..ead3dc572ec5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "mmhub_err_count",
- .debugfs_name = "mmhub_err_inject",
};
if (!adev->mmhub.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1cd78940cf82..e89fb35fec71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index eb9975f4decb..37ba07e2feb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -612,6 +612,11 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
/* fbdev layer */
int amdgpu_fbdev_init(struct amdgpu_device *adev);
void amdgpu_fbdev_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 7d5c3a9de9ea..6201a5f4b4fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "pcie_bif_err_count",
- .debugfs_name = "pcie_bif_err_inject",
};
if (!adev->nbio.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e3f16b49e970..c687f5415b3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -31,6 +31,7 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
@@ -925,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+ if (bo->tbo.base.import_attach)
+ dma_buf_pin(bo->tbo.base.import_attach);
+
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
@@ -1008,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
amdgpu_bo_subtract_pin_size(bo);
+ if (bo->tbo.base.import_attach)
+ dma_buf_unpin(bo->tbo.base.import_attach);
+
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
@@ -1274,6 +1281,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
+ if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ bo->mem.mem_type != TTM_PL_SYSTEM)
+ dma_buf_move_notify(abo->tbo.base.dma_buf);
+
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
@@ -1307,6 +1318,12 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_unreserve_memory_limit(abo);
+ /* We only remove the fence if the resv has individualized. */
+ WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
+ && bo->base.resv != &bo->base._resv);
+ if (bo->base.resv == &bo->base._resv)
+ amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+
if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
@@ -1403,30 +1420,52 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
}
/**
- * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
*
- * @bo: buffer object
+ * @adev: amdgpu device pointer
+ * @resv: reservation object to sync to
+ * @sync_mode: synchronization mode
* @owner: fence owner
* @intr: Whether the wait is interruptible
*
+ * Extract the fences from the reservation object and waits for them to finish.
+ *
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
+ amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
-
return r;
}
/**
+ * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
+ * @bo: buffer object to wait for
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Wrapper to wait for fences in a BO.
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER, owner, intr);
+}
+
+/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 36dec51d1ef1..5e39ecd8cc28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
@@ -316,6 +319,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
#endif
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
bool amdgpu_bo_support_uswc(u64 bo_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b03b1eb7ba04..f197f1be0969 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -41,8 +41,6 @@
#include "hwmgr.h"
#define WIDTH_4K 3840
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
-
static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
@@ -94,6 +92,9 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
if (adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
+
+ if (is_support_sw_smu(adev))
+ smu_set_ac_dc(&adev->smu);
}
}
@@ -3398,11 +3399,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file unique_id\n");
return ret;
}
- ret = amdgpu_debugfs_pm_init(adev);
- if (ret) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- return ret;
- }
if ((adev->asic_type >= CHIP_VEGA10) &&
!(adev->flags & AMD_IS_APU)) {
@@ -3669,7 +3665,7 @@ static const struct drm_info_list amdgpu_pm_info_list[] = {
};
#endif
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 3da1da277805..5db0ef86e84c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -43,4 +43,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 146f96661b6b..be50867ea644 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -24,6 +24,7 @@
*/
#include <linux/firmware.h>
+#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -38,6 +39,42 @@
static void psp_set_funcs(struct amdgpu_device *adev);
+static int psp_sysfs_init(struct amdgpu_device *adev);
+static void psp_sysfs_fini(struct amdgpu_device *adev);
+
+/*
+ * Due to DF Cstate management centralized to PMFW, the firmware
+ * loading sequence will be updated as below:
+ * - Load KDB
+ * - Load SYS_DRV
+ * - Load tOS
+ * - Load PMFW
+ * - Setup TMR
+ * - Load other non-psp fw
+ * - Load ASD
+ * - Load XGMI/RAS/HDCP/DTM TA if any
+ *
+ * This new sequence is required for
+ * - Arcturus
+ * - Navi12 and onwards
+ */
+static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ psp->pmfw_centralized_cstate_management = false;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ if ((adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type >= CHIP_NAVI12))
+ psp->pmfw_centralized_cstate_management = true;
+}
+
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -75,6 +112,8 @@ static int psp_early_init(void *handle)
psp->adev = adev;
+ psp_check_pmfw_centralized_cstate_management(psp);
+
return 0;
}
@@ -101,6 +140,13 @@ static int psp_sw_init(void *handle)
return ret;
}
+ if (adev->asic_type == CHIP_NAVI10) {
+ ret= psp_sysfs_init(adev);
+ if (ret) {
+ return ret;
+ }
+ }
+
return 0;
}
@@ -117,6 +163,10 @@ static int psp_sw_fini(void *handle)
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
}
+
+ if (adev->asic_type == CHIP_NAVI10)
+ psp_sysfs_fini(adev);
+
return 0;
}
@@ -150,6 +200,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
int ret;
int index;
int timeout = 2000;
+ bool ras_intr = false;
mutex_lock(&psp->mutex);
@@ -174,7 +225,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
* because gpu reset thread triggered and lock resource should
* be released for psp resume sequence.
*/
- if (amdgpu_ras_intr_triggered())
+ ras_intr = amdgpu_ras_intr_triggered();
+ if (ras_intr)
break;
msleep(1);
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
@@ -187,7 +239,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if (psp->cmd_buf_mem->resp.status || !timeout) {
+ if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
@@ -558,7 +610,7 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
}
-static int psp_xgmi_terminate(struct psp_context *psp)
+int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
@@ -579,7 +631,7 @@ static int psp_xgmi_terminate(struct psp_context *psp)
return 0;
}
-static int psp_xgmi_initialize(struct psp_context *psp)
+int psp_xgmi_initialize(struct psp_context *psp)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -1081,7 +1133,7 @@ static int psp_hw_start(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
int ret;
- if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
+ if (!amdgpu_sriov_vf(adev)) {
if (psp->kdb_bin_size &&
(psp->funcs->bootloader_load_kdb != NULL)) {
ret = psp_bootloader_load_kdb(psp);
@@ -1116,10 +1168,17 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
+ /*
+ * For those ASICs with DF Cstate management centralized
+ * to PMFW, TMR setup should be performed after PMFW
+ * loaded and before other non-psp firmware loaded.
+ */
+ if (!psp->pmfw_centralized_cstate_management) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
+ }
}
return 0;
@@ -1316,9 +1375,10 @@ static int psp_np_fw_load(struct psp_context *psp)
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
- if (psp->autoload_supported) {
+ if (psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
- if (!ucode->fw)
+ if (!ucode->fw || amdgpu_sriov_vf(adev))
goto out;
ret = psp_execute_np_fw_load(psp, ucode);
@@ -1326,6 +1386,14 @@ static int psp_np_fw_load(struct psp_context *psp)
return ret;
}
+ if (psp->pmfw_centralized_cstate_management) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
+ }
+ }
+
out:
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
@@ -1333,7 +1401,9 @@ out:
continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
- (psp_smu_reload_quirk(psp) || psp->autoload_supported))
+ (psp_smu_reload_quirk(psp) ||
+ psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management))
continue;
if (amdgpu_sriov_vf(adev) &&
@@ -1444,16 +1514,6 @@ skip_memalloc:
return ret;
}
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- ret = psp_xgmi_initialize(psp);
- /* Warning the XGMI seesion initialize failure
- * Instead of stop driver initialization
- */
- if (ret)
- dev_err(psp->adev->dev,
- "XGMI: Failed to initialize XGMI session\n");
- }
-
if (psp->adev->psp.ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
@@ -1518,10 +1578,6 @@ static int psp_hw_fini(void *handle)
void *tmr_buf;
void **pptr;
- if (adev->gmc.xgmi.num_physical_nodes > 1 &&
- psp->xgmi_context.initialized == 1)
- psp_xgmi_terminate(psp);
-
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
psp_dtm_terminate(psp);
@@ -1777,6 +1833,97 @@ static int psp_set_powergating_state(void *handle,
return 0;
}
+static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t fw_ver;
+ int ret;
+
+ if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ DRM_INFO("PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
+ mutex_unlock(&adev->psp.mutex);
+
+ if (ret) {
+ DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
+ return ret;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+}
+
+static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int ret;
+ char fw_name[100];
+ const struct firmware *usbc_pd_fw;
+
+ if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ DRM_INFO("PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
+ ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
+ if (ret)
+ goto fail;
+
+ /* We need contiguous physical mem to place the FW for psp to access */
+ cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
+
+ ret = dma_mapping_error(adev->dev, dma_addr);
+ if (ret)
+ goto rel_buf;
+
+ memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
+
+ /*
+ * x86 specific workaround.
+ * Without it the buffer is invisible in PSP.
+ *
+ * TODO Remove once PSP starts snooping CPU cache
+ */
+#ifdef CONFIG_X86
+ clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
+#endif
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
+ mutex_unlock(&adev->psp.mutex);
+
+rel_buf:
+ dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
+ release_firmware(usbc_pd_fw);
+
+fail:
+ if (ret) {
+ DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
+ psp_usbc_pd_fw_sysfs_read,
+ psp_usbc_pd_fw_sysfs_write);
+
+
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
@@ -1795,6 +1942,21 @@ const struct amd_ip_funcs psp_ip_funcs = {
.set_powergating_state = psp_set_powergating_state,
};
+static int psp_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
+
+ if (ret)
+ DRM_ERROR("Failed to create USBC PD FW control file!");
+
+ return ret;
+}
+
+static void psp_sysfs_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
+}
+
static const struct amdgpu_psp_funcs psp_funcs = {
.check_fw_loading_status = psp_check_fw_loading_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 611021514c52..297435c0c7c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -114,6 +114,8 @@ struct psp_funcs
int (*mem_training)(struct psp_context *psp, uint32_t ops);
uint32_t (*ring_get_wptr)(struct psp_context *psp);
void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
+ int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
+ int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -264,6 +266,8 @@ struct psp_context
atomic_t fence_value;
/* flag to mark whether gfx fw autoload is supported or not */
bool autoload_supported;
+ /* flag to mark whether df cstate management centralized to PMFW */
+ bool pmfw_centralized_cstate_management;
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
@@ -349,6 +353,14 @@ struct amdgpu_psp_funcs {
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
+#define psp_load_usbc_pd_fw(psp, dma_addr) \
+ ((psp)->funcs->load_usbc_pd_fw ? \
+ (psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
+
+#define psp_read_usbc_pd_fw(psp, fw_ver) \
+ ((psp)->funcs->read_usbc_pd_fw ? \
+ (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -362,6 +374,8 @@ int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
uint64_t cmd_gpu_addr, int cmd_size);
+int psp_xgmi_initialize(struct psp_context *psp);
+int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index cef94e2169fe..3c32a94d2424 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -31,6 +31,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
@@ -280,6 +281,11 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
struct ras_debug_if data;
int ret = 0;
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+ return size;
+ }
+
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
if (ret)
return -EINVAL;
@@ -393,6 +399,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
.head = obj->head,
};
+ if (amdgpu_ras_intr_triggered())
+ return snprintf(buf, PAGE_SIZE,
+ "Query currently inaccessible\n");
+
if (amdgpu_ras_error_query(obj->adev, &info))
return -EINVAL;
@@ -720,6 +730,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
if (adev->nbio.funcs->query_ras_error_count)
adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+ break;
default:
break;
}
@@ -742,20 +755,6 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0;
}
-uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
-{
- uint32_t df_inst_id;
-
- if ((!adev->df.funcs) ||
- (!adev->df.funcs->get_df_inst_id) ||
- (!adev->df.funcs->get_dram_base_addr))
- return addr;
-
- df_inst_id = adev->df.funcs->get_df_inst_id(adev);
-
- return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
-}
-
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -775,8 +774,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
/* Calculate XGMI relative offset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- block_info.address = get_xgmi_relative_phy_addr(adev,
- block_info.address);
+ block_info.address =
+ amdgpu_xgmi_get_relative_phy_addr(adev,
+ block_info.address);
}
switch (info->head.block) {
@@ -1122,6 +1122,32 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
&amdgpu_ras_debugfs_ops);
}
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ struct ras_fs_if fs_info;
+
+ /*
+ * it won't be called in resume path, no need to check
+ * suspend and gpu reset status
+ */
+ if (!con)
+ return;
+
+ amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ list_for_each_entry(obj, &con->head, node) {
+ if (amdgpu_ras_is_supported(adev, obj->head.block) &&
+ (obj->attr_inuse == 1)) {
+ sprintf(fs_info.debugfs_name, "%s_err_inject",
+ ras_block_str(obj->head.block));
+ fs_info.head = obj->head;
+ amdgpu_ras_debugfs_create(adev, &fs_info);
+ }
+ }
+}
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head)
{
@@ -1154,7 +1180,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{
amdgpu_ras_sysfs_create_feature_node(adev);
- amdgpu_ras_debugfs_create_ctrl_node(adev);
return 0;
}
@@ -1319,6 +1344,33 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
}
/* ih end */
+/* traversal all IPs except NBIO to query error counter */
+static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ /*
+ * PCIE_BIF IP has one different isr by ras controller
+ * interrupt, the specific ras counter query will be
+ * done in that isr. So skip such block from common
+ * sync flood interrupt isr calling.
+ */
+ if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
+ continue;
+
+ amdgpu_ras_error_query(adev, &info);
+ }
+}
+
/* recovery begin */
/* return 0 on success.
@@ -1373,6 +1425,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
+ /*
+ * Query and print non zero error counter per IP block for
+ * awareness before recovering GPU.
+ */
+ amdgpu_ras_log_on_err_counter(ras->adev);
+
if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0);
atomic_set(&ras->in_recovery, 0);
@@ -1713,18 +1771,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*hw_supported = 0;
*supported = 0;
- if (amdgpu_sriov_vf(adev) ||
+ if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
(adev->asic_type != CHIP_VEGA20 &&
adev->asic_type != CHIP_ARCTURUS))
return;
- if (adev->is_atom_fw &&
- (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
- amdgpu_atomfirmware_sram_ecc_supported(adev)))
- *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ DRM_INFO("HBM ECC is active.\n");
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ DRM_INFO("HBM ECC is not presented.\n");
+
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ DRM_INFO("SRAM ECC is active.\n");
+ *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ DRM_INFO("SRAM ECC is not presented.\n");
+
+ /* hw_supported needs to be aligned with RAS block mask. */
+ *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
*supported = amdgpu_ras_enable == 0 ?
- 0 : *hw_supported & amdgpu_ras_mask;
+ 0 : *hw_supported & amdgpu_ras_mask;
}
int amdgpu_ras_init(struct amdgpu_device *adev)
@@ -1825,8 +1895,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
goto interrupt;
}
- amdgpu_ras_debugfs_create(adev, fs_info);
-
r = amdgpu_ras_sysfs_create(adev, fs_info);
if (r)
goto sysfs;
@@ -1835,7 +1903,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
cleanup:
amdgpu_ras_sysfs_remove(adev, ras_block);
sysfs:
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
interrupt:
@@ -1852,7 +1919,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev,
return;
amdgpu_ras_sysfs_remove(adev, ras_block);
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
amdgpu_ras_feature_enable(adev, ras_block, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index a5fe29a9373e..55c3eceb390d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head);
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2a8e04895595..c0096097bbcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -25,10 +25,11 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include <linux/bits.h>
-#include "smu_v11_0_i2c.h"
+#include "atom.h"
-#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
-#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -55,6 +56,45 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
+static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
+ uint16_t *i2c_addr)
+{
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+ if (!i2c_addr || !atom_ctx)
+ return false;
+
+ if (strnstr(atom_ctx->vbios_version,
+ "D342",
+ sizeof(atom_ctx->vbios_version)))
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342;
+ else
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
+
+ return true;
+}
+
+static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ uint16_t *i2c_addr)
+{
+ if (!i2c_addr)
+ return false;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
+ break;
+
+ case CHIP_ARCTURUS:
+ return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
unsigned char *buff)
{
@@ -83,6 +123,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
unsigned char *buff)
{
int ret = 0;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
struct i2c_msg msg = {
.addr = 0,
.flags = 0,
@@ -96,15 +137,13 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
msg.addr = control->i2c_address;
- ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1)
DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
return ret;
}
-
-
static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
{
int i;
@@ -212,32 +251,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
.buf = buff,
};
- mutex_init(&control->tbl_mutex);
-
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
- ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
- break;
-
- case CHIP_ARCTURUS:
- control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
- ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
- break;
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.smu_i2c.algo)
+ return -ENOENT;
- default:
- return 0;
- }
+ if (!__get_eeprom_i2c_addr(adev, &control->i2c_address))
+ return -EINVAL;
- if (ret) {
- DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
- return ret;
- }
+ mutex_init(&control->tbl_mutex);
msg.addr = control->i2c_address;
-
/* Read/Create table header from EEPROM address 0 */
- ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1) {
DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
return ret;
@@ -263,23 +288,6 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return ret == 1 ? 0 : -EIO;
}
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
-{
- struct amdgpu_device *adev = to_amdgpu_device(control);
-
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
- break;
- case CHIP_ARCTURUS:
- smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
- break;
-
- default:
- return;
- }
-}
-
static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *record,
unsigned char *buff)
@@ -436,7 +444,7 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
control->next_addr += EEPROM_TABLE_RECORD_SIZE;
}
- ret = i2c_transfer(&control->eeprom_accessor, msgs, num);
+ ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num);
if (ret < 1) {
DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ca78f812d436..7e8647a05df7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -44,7 +44,6 @@ struct amdgpu_ras_eeprom_table_header {
struct amdgpu_ras_eeprom_control {
struct amdgpu_ras_eeprom_table_header tbl_hdr;
- struct i2c_adapter eeprom_accessor;
uint32_t next_addr;
unsigned int num_recs;
struct mutex tbl_mutex;
@@ -79,7 +78,6 @@ struct eeprom_table_record {
}__attribute__((__packed__));
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e5c83e164d82..a7e1d0425ed0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -48,9 +48,6 @@
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@@ -154,76 +151,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
/**
- * amdgpu_ring_priority_put - restore a ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Release a request for executing at @priority
- */
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- int i;
-
- if (!ring->funcs->set_priority)
- return;
-
- if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
- return;
-
- /* no need to restore if the job is already at the lowest priority */
- if (priority == DRM_SCHED_PRIORITY_NORMAL)
- return;
-
- mutex_lock(&ring->priority_mutex);
- /* something higher prio is executing, no need to decay */
- if (ring->priority > priority)
- goto out_unlock;
-
- /* decay priority to the next level with a job available */
- for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
- if (i == DRM_SCHED_PRIORITY_NORMAL
- || atomic_read(&ring->num_jobs[i])) {
- ring->priority = i;
- ring->funcs->set_priority(ring, i);
- break;
- }
- }
-
-out_unlock:
- mutex_unlock(&ring->priority_mutex);
-}
-
-/**
- * amdgpu_ring_priority_get - change the ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Request a ring's priority to be raised to @priority (refcounted).
- */
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- if (!ring->funcs->set_priority)
- return;
-
- if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
- return;
-
- mutex_lock(&ring->priority_mutex);
- if (priority <= ring->priority)
- goto out_unlock;
-
- ring->priority = priority;
- ring->funcs->set_priority(ring, priority);
-
-out_unlock:
- mutex_unlock(&ring->priority_mutex);
-}
-
-/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
@@ -334,10 +261,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
- if (amdgpu_debugfs_ring_init(adev, ring)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
- }
-
return 0;
}
@@ -351,12 +274,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- ring->sched.ready = false;
/* Not to finish a ring which is not initialized */
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
+ ring->sched.ready = false;
+
amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
@@ -367,8 +291,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
&ring->gpu_addr,
(void **)&ring->ring);
- amdgpu_debugfs_ring_fini(ring);
-
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
@@ -485,8 +407,8 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
#endif
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev->ddev->primary;
@@ -507,13 +429,6 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
return 0;
}
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
-{
-#if defined(CONFIG_DEBUG_FS)
- debugfs_remove(ring->ent);
-#endif
-}
-
/**
* amdgpu_ring_test_helper - tests ring and set sched readiness status
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 930316e60155..9a443013d70d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -167,9 +167,6 @@ struct amdgpu_ring_funcs {
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
- /* priority functions */
- void (*set_priority) (struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
/* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
@@ -222,6 +219,7 @@ struct amdgpu_ring {
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
+ bool has_high_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
@@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -328,4 +322,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index d3d4707f2168..60bb3e8b3118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -126,6 +126,9 @@ struct amdgpu_rlc_funcs {
void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
+ void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
+ void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+ bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
struct amdgpu_rlc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index a2ee30b16212..250a309e4dee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -70,7 +70,8 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
uint32_t index = 0;
int r;
- if (vmid == 0 || !amdgpu_mcbp)
+ /* don't enable OS preemption on SDMA under SRIOV */
+ if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
return 0;
r = amdgpu_sdma_get_index_from_ring(ring, &index);
@@ -92,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
struct ras_fs_if fs_info = {
.sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
};
if (!ih_info)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 485335267d78..4b352206354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs {
void (*ras_fini)(struct amdgpu_device *adev);
int (*query_ras_error_count)(struct amdgpu_device *adev,
uint32_t instance, void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
};
struct amdgpu_sdma {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a09b6b9c27d1..b86392253696 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -202,18 +202,17 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
- * @explicit_sync: true if we should only sync to the exclusive fence
+ * @mode: how owner affects which fences we sync to
+ * @owner: owner of the planned job submission
*
* Sync to the fence
*/
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct dma_resv *resv,
- void *owner, bool explicit_sync)
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
+ void *owner)
{
struct dma_resv_list *flist;
struct dma_fence *f;
- void *fence_owner;
unsigned i;
int r = 0;
@@ -229,30 +228,46 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r;
for (i = 0; i < flist->shared_count; ++i) {
+ void *fence_owner;
+
f = rcu_dereference_protected(flist->shared[i],
dma_resv_held(resv));
+
+ fence_owner = amdgpu_sync_get_owner(f);
+
+ /* Always sync to moves, no matter what */
+ if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
+ r = amdgpu_sync_fence(sync, f, false);
+ if (r)
+ break;
+ }
+
/* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise.
*/
- fence_owner = amdgpu_sync_get_owner(f);
if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
- if (amdgpu_sync_same_dev(adev, f)) {
- /* VM updates only sync with moves but not with user
- * command submissions or KFD evictions fences
- */
- if (owner == AMDGPU_FENCE_OWNER_VM &&
- fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ /* Ignore fences depending on the sync mode */
+ switch (mode) {
+ case AMDGPU_SYNC_ALWAYS:
+ break;
+
+ case AMDGPU_SYNC_NE_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner == owner)
continue;
+ break;
- /* Ignore fence from the same owner and explicit one as
- * long as it isn't undefined.
- */
- if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
- (fence_owner == owner || explicit_sync))
+ case AMDGPU_SYNC_EQ_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner != owner)
continue;
+ break;
+
+ case AMDGPU_SYNC_EXPLICIT:
+ continue;
}
r = amdgpu_sync_fence(sync, f, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index d62c2b81d92b..cfbe5788b8b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -31,6 +31,13 @@ struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
+enum amdgpu_sync_mode {
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_SYNC_EQ_OWNER,
+ AMDGPU_SYNC_EXPLICIT
+};
+
/*
* Container for fences used to sync command submissions.
*/
@@ -43,11 +50,9 @@ void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
bool explicit);
int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct dma_resv *resv,
- void *owner,
- bool explicit_sync);
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
+ void *owner);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c6e9885c071f..6309ff72bd78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -60,20 +60,14 @@
#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
+#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
+
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window,
struct amdgpu_ring *ring,
uint64_t *addr);
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
-static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
/**
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of
* memory request.
@@ -776,7 +770,6 @@ struct amdgpu_ttm_tt {
static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
(1 << 0), /* HMM_PFN_VALID */
(1 << 1), /* HMM_PFN_WRITE */
- 0 /* HMM_PFN_DEVICE_PRIVATE */
};
static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
@@ -857,7 +850,7 @@ retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
- r = hmm_range_fault(range, 0);
+ r = hmm_range_fault(range);
up_read(&mm->mmap_sem);
if (unlikely(r <= 0)) {
/*
@@ -1034,7 +1027,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
- if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
+ if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
uint64_t page_idx = 1;
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
@@ -1042,7 +1035,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
if (r)
goto gart_bind_fail;
- /* Patch mtype of the second part BO */
+ /* The memory type of the first page defaults to UC. Now
+ * modify the memory type to NC from the second page of
+ * the BO onward.
+ */
flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
@@ -1596,7 +1592,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3;
- uint32_t bytes = 4 - (pos & 3);
+ uint64_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8;
uint32_t mask = 0xffffffff << shift;
@@ -1605,20 +1601,28 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
bytes = len;
}
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
- if (!write || mask != 0xffffffff)
- value = RREG32_NO_KIQ(mmMM_DATA);
- if (write) {
- value &= ~mask;
- value |= (*(uint32_t *)buf << shift) & mask;
- WREG32_NO_KIQ(mmMM_DATA, value);
- }
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- if (!write) {
- value = (value & mask) >> shift;
- memcpy(buf, &value, bytes);
+ if (mask != 0xffffffff) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
+ if (!write || mask != 0xffffffff)
+ value = RREG32_NO_KIQ(mmMM_DATA);
+ if (write) {
+ value &= ~mask;
+ value |= (*(uint32_t *)buf << shift) & mask;
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ }
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if (!write) {
+ value = (value & mask) >> shift;
+ memcpy(buf, &value, bytes);
+ }
+ } else {
+ bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
+ bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
+
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
+ bytes, write);
}
ret += bytes;
@@ -1638,7 +1642,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
@@ -1836,9 +1839,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*The reserved vram for memory training must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
- r = amdgpu_ttm_training_reserve_vram_init(adev);
- if (r)
- return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_ttm_training_reserve_vram_init(adev);
+ if (r)
+ return r;
+ }
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
@@ -1911,12 +1916,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
- /* Register debugfs entries for amdgpu_ttm */
- r = amdgpu_ttm_debugfs_init(adev);
- if (r) {
- DRM_ERROR("Failed to init debugfs\n");
- return r;
- }
return 0;
}
@@ -1938,7 +1937,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized)
return;
- amdgpu_ttm_debugfs_fini(adev);
amdgpu_ttm_training_reserve_vram_fini(adev);
/* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
@@ -2113,8 +2111,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
}
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -2198,7 +2196,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -2279,7 +2278,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
- int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@@ -2287,27 +2285,19 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO;
+ size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
while (size) {
- unsigned long flags;
- uint32_t value;
-
- if (*pos >= adev->gmc.mc_vram_size)
- return result;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
+ uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ amdgpu_device_vram_access(adev, *pos, value, bytes, false);
+ if (copy_to_user(buf, value, bytes))
+ return -EFAULT;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
+ result += bytes;
+ buf += bytes;
+ *pos += bytes;
+ size -= bytes;
}
return result;
@@ -2544,7 +2534,7 @@ static const struct {
#endif
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned count;
@@ -2579,13 +2569,3 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return 0;
#endif
}
-
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
- debugfs_remove(adev->mman.debugfs_entries[i]);
-#endif
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0dddedc06ae3..bd05bbb4878d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -133,4 +133,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f4d40855147b..9dd51f0d2c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_umc_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a92f3b18e657..5fd32ad1c575 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1099,7 +1099,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free;
} else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f96464e2c157..a41272fbcba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
long r;
- /* temporarily disable ib test for sriov */
- if (amdgpu_sriov_vf(adev))
- return 0;
-
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
@@ -656,15 +654,10 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
long r;
- /* temporarily disable ib test for sriov */
- if (amdgpu_sriov_vf(adev))
- return 0;
-
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index daaf909d009a..f0128f745bd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -270,6 +270,9 @@ struct amdgpu_virt {
#define amdgpu_sriov_runtime(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
+#define amdgpu_sriov_fullaccess(adev) \
+(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d16231d6a790..6d9252a27916 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -120,23 +120,17 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
{
- unsigned shift = 0xff;
-
switch (level) {
case AMDGPU_VM_PDB2:
case AMDGPU_VM_PDB1:
case AMDGPU_VM_PDB0:
- shift = 9 * (AMDGPU_VM_PDB0 - level) +
+ return 9 * (AMDGPU_VM_PDB0 - level) +
adev->vm_manager.block_size;
- break;
case AMDGPU_VM_PTB:
- shift = 0;
- break;
+ return 0;
default:
- dev_err(adev->dev, "the level%d isn't supported.\n", level);
+ return ~0;
}
-
- return shift;
}
/**
@@ -235,19 +229,6 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
}
-
-/**
- * amdgpu_vm_bo_relocated - vm_bo is reloacted
- *
- * @vm_bo: vm_bo which is relocated
- *
- * State for PDs/PTs which needs to update their parent PD.
- */
-static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
-{
- list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
-}
-
/**
* amdgpu_vm_bo_moved - vm_bo is moved
*
@@ -291,6 +272,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
}
/**
+ * amdgpu_vm_bo_relocated - vm_bo is reloacted
+ *
+ * @vm_bo: vm_bo which is relocated
+ *
+ * State for PDs/PTs which needs to update their parent PD.
+ * For the root PD, just move to idle state.
+ */
+static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
+{
+ if (vm_bo->bo->parent)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+ else
+ amdgpu_vm_bo_idle(vm_bo);
+}
+
+/**
* amdgpu_vm_bo_done - vm_bo is done
*
* @vm_bo: vm_bo which is now done
@@ -588,8 +585,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- /* One for TTM and one for the CS job */
- entry->tv.num_shared = 2;
+ /* Two for VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 4;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -697,10 +694,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_moved(bo_base);
} else {
vm->update_funcs->map_table(bo);
- if (bo->parent)
- amdgpu_vm_bo_relocated(bo_base);
- else
- amdgpu_vm_bo_idle(bo_base);
+ amdgpu_vm_bo_relocated(bo_base);
}
}
@@ -803,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
params.vm = vm;
params.direct = direct;
- r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
return r;
@@ -1086,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct dma_fence *fence = NULL;
bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
+ bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
int r;
+ if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
@@ -1299,7 +1297,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.direct = direct;
- r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
return r;
@@ -1448,21 +1446,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- /* make sure that the page tables covering the address range are
- * actually allocated
- */
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
- params->direct);
- if (r)
- return r;
-
- pt = cursor.entry->base.bo;
-
- /* The root level can't be a huge page */
- if (cursor.level == adev->vm_manager.root_level) {
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
+ if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ /* make sure that the page tables covering the
+ * address range are actually allocated
+ */
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm,
+ &cursor, params->direct);
+ if (r)
+ return r;
}
shift = amdgpu_vm_level_shift(adev, cursor.level);
@@ -1480,25 +1471,38 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* smaller than the address shift. Go to the next
* child entry and try again.
*/
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
- } else if (frag >= parent_shift &&
- cursor.level - 1 != adev->vm_manager.root_level) {
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (frag >= parent_shift) {
/* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again
- * unless one level up is the root level.
+ * shift we should go up one level and check it again.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
- return -ENOENT;
+ return -EINVAL;
continue;
}
+ pt = cursor.entry->base.bo;
+ if (!pt) {
+ /* We need all PDs and PTs for mapping something, */
+ if (flags & AMDGPU_PTE_VALID)
+ return -ENOENT;
+
+ /* but unmapping something can happen at a higher
+ * level.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -EINVAL;
+
+ pt = cursor.entry->base.bo;
+ shift = parent_shift;
+ }
+
/* Looks good so far, calculate parameters for the update */
incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = (uint64_t)(mask + 1) << shift;
+ entry_end = ((uint64_t)mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
@@ -1506,6 +1510,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t upd_end = min(entry_end, frag_end);
unsigned nptes = (upd_end - frag_start) >> shift;
+ /* This can happen when we set higher level PDs to
+ * silent to stop fault floods.
+ */
+ nptes = max(nptes, 1u);
amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr,
flags | AMDGPU_PTE_FRAG(frag));
@@ -1550,7 +1558,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* @adev: amdgpu_device pointer
* @vm: requested vm
* @direct: direct submission in a page fault
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
@@ -1565,14 +1573,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct,
- struct dma_fence *exclusive,
+ struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
- void *owner = AMDGPU_FENCE_OWNER_VM;
+ enum amdgpu_sync_mode sync_mode;
int r;
memset(&params, 0, sizeof(params));
@@ -1581,9 +1589,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.direct = direct;
params.pages_addr = pages_addr;
- /* sync to everything except eviction fences on unmapping */
+ /* Implicitly sync to command submissions in the same VM before
+ * unmapping. Sync to moving fences before mapping.
+ */
if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_KFD;
+ sync_mode = AMDGPU_SYNC_EQ_OWNER;
+ else
+ sync_mode = AMDGPU_SYNC_EXPLICIT;
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
@@ -1591,7 +1603,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock;
}
- r = vm->update_funcs->prepare(&params, owner, exclusive);
+ if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ struct amdgpu_bo *root = vm->root.base.bo;
+
+ if (!dma_fence_is_signaled(vm->last_direct))
+ amdgpu_bo_fence(root, vm->last_direct, true);
+ }
+
+ r = vm->update_funcs->prepare(&params, resv, sync_mode);
if (r)
goto error_unlock;
@@ -1610,7 +1629,7 @@ error_unlock:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
@@ -1626,7 +1645,7 @@ error_unlock:
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct dma_fence *exclusive,
+ struct dma_resv *resv,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
@@ -1696,13 +1715,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
- } else if (flags & AMDGPU_PTE_VALID) {
+ } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
addr += bo_adev->vm_manager.vram_base_offset;
addr += pfn << PAGE_SHIFT;
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
start, last, flags, addr,
dma_addr, fence);
if (r)
@@ -1741,7 +1760,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
- struct dma_fence *exclusive, **last_update;
+ struct dma_fence **last_update;
+ struct dma_resv *resv;
uint64_t flags;
struct amdgpu_device *bo_adev = adev;
int r;
@@ -1749,7 +1769,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (clear || !bo) {
mem = NULL;
nodes = NULL;
- exclusive = NULL;
+ resv = vm->root.base.bo->tbo.base.resv;
} else {
struct ttm_dma_tt *ttm;
@@ -1759,7 +1779,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = bo->tbo.moving;
+ resv = bo->tbo.base.resv;
}
if (bo) {
@@ -1769,7 +1789,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
flags = 0x0;
}
- if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
+ if (clear || (bo && bo->tbo.base.resv ==
+ vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update;
else
last_update = &bo_va->last_pt_update;
@@ -1783,7 +1804,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
}
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
mapping, flags, bo_adev, nodes,
last_update);
if (r)
@@ -1978,6 +1999,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping;
uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
@@ -1992,7 +2014,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
mapping->start, mapping->last,
init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2563,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Don't evict VM page tables while they are updated */
- if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
- !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+ if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
@@ -2741,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0)
return timeout;
- timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
- if (timeout <= 0)
- return timeout;
-
- return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
+ return dma_fence_wait_timeout(vm->last_direct, true, timeout);
}
/**
@@ -2818,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
vm->last_direct = dma_fence_get_stub();
- vm->last_delayed = dma_fence_get_stub();
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2873,7 +2889,6 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_direct);
- dma_fence_put(vm->last_delayed);
drm_sched_entity_destroy(&vm->delayed);
error_free_direct:
@@ -3076,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_wait(vm->last_direct, false);
dma_fence_put(vm->last_direct);
- dma_fence_wait(vm->last_delayed, false);
- dma_fence_put(vm->last_delayed);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3188,6 +3201,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ long timeout = msecs_to_jiffies(2000);
int r;
switch (args->in.op) {
@@ -3199,6 +3213,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
+ if (amdgpu_sriov_runtime(adev))
+ timeout = 8 * timeout;
+
+ /* Wait vm idle to make sure the vmid set in SPM_VMID is
+ * not referenced anymore.
+ */
+ r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
+ if (r)
+ return r;
+
+ r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
+ if (r < 0)
+ return r;
+
+ amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index b4640ab38c95..06fe30e1492d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -227,8 +227,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo);
- int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
- struct dma_fence *exclusive);
+ int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -276,7 +276,6 @@ struct amdgpu_vm {
/* Last submission to the scheduler entities */
struct dma_fence *last_direct;
- struct dma_fence *last_delayed;
unsigned int pasid;
/* dedicated to vm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 73fec7a0ced5..e38516304070 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
* Returns:
* Negativ errno, 0 for success.
*/
-static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
- struct dma_fence *exclusive)
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
+ struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode)
{
- int r;
-
- /* Wait for any BO move to be completed */
- if (exclusive) {
- r = dma_fence_wait(exclusive, true);
- if (unlikely(r))
- return r;
- }
-
- /* Don't wait for submissions during page fault */
- if (p->direct)
+ if (!resv)
return 0;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+ return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
}
/**
@@ -86,6 +74,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
{
unsigned int i;
uint64_t value;
+ int r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r)
+ return r;
+ }
pe += (unsigned long)amdgpu_bo_kptr(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 19b7f80758f1..cf96c335b258 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- void *owner, struct dma_fence *exclusive)
+ struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode)
{
- struct amdgpu_bo *root = p->vm->root.base.bo;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
@@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
p->num_dw_left = ndw;
- /* Wait for moves to be completed */
- r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
- if (r)
- return r;
-
- /* Don't wait for any submissions during page fault handling */
- if (p->direct)
+ if (!resv)
return 0;
- return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
- owner, false);
+ return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
}
/**
@@ -111,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;
- tmp = dma_fence_get(f);
- if (p->direct)
+ if (p->direct) {
+ tmp = dma_fence_get(f);
swap(p->vm->last_direct, tmp);
- else
- swap(p->vm->last_delayed, tmp);
- dma_fence_put(tmp);
+ dma_fence_put(tmp);
+ } else {
+ dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+ }
if (fence && !p->direct)
swap(*fence, f);
@@ -147,7 +141,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
- pe += amdgpu_bo_gpu_offset(bo);
+ pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
@@ -174,7 +168,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
- pe += amdgpu_bo_gpu_offset(bo);
+ pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
@@ -208,6 +202,11 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t *pte;
int r;
+ /* Wait for PD/PT moves to be completed */
+ r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false);
+ if (r)
+ return r;
+
do {
ndw = p->num_dw_left;
ndw -= p->job->ibs->length_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a97af422575a..95b3327168ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -26,7 +26,12 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
#include "amdgpu_ras.h"
+#include "soc15.h"
#include "df/df_3_6_offset.h"
+#include "xgmi/xgmi_4_0_0_smn.h"
+#include "xgmi/xgmi_4_0_0_sh_mask.h"
+#include "wafl/wafl2_4_0_0_smn.h"
+#include "wafl/wafl2_4_0_0_sh_mask.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex);
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
+static const int xgmi_pcs_err_status_reg_vg20[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int wafl_pcs_err_status_reg_vg20[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int xgmi_pcs_err_status_reg_arct[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
+};
+
+/* same as vg20*/
+static const int wafl_pcs_err_status_reg_arct[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
+ {"XGMI PCS DataLossErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
+ {"XGMI PCS TrainingErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
+ {"XGMI PCS CRCErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
+ {"XGMI PCS BERExceededErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
+ {"XGMI PCS TxMetaDataErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"XGMI PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"XGMI PCS DataParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
+ {"XGMI PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"XGMI PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"XGMI PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"XGMI PCS DeskewErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
+ {"XGMI PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"XGMI PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"XGMI PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"XGMI PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"XGMI PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"XGMI PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"XGMI PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
+static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
+ {"WAFL PCS DataLossErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
+ {"WAFL PCS TrainingErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
+ {"WAFL PCS CRCErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
+ {"WAFL PCS BERExceededErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
+ {"WAFL PCS TxMetaDataErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"WAFL PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"WAFL PCS DataParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
+ {"WAFL PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"WAFL PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"WAFL PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"WAFL PCS DeskewErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
+ {"WAFL PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"WAFL PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"WAFL PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"WAFL PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"WAFL PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"WAFL PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"WAFL PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
return &hive->device_list;
@@ -365,6 +473,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
return 0;
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ ret = psp_xgmi_initialize(&adev->psp);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to initialize xgmi session\n");
+ return ret;
+ }
+
ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
if (ret) {
dev_err(adev->dev,
@@ -451,16 +566,16 @@ exit:
return ret;
}
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
struct amdgpu_hive_info *hive;
if (!adev->gmc.xgmi.supported)
- return;
+ return -EINVAL;
hive = amdgpu_get_xgmi_hive(adev, 1);
if (!hive)
- return;
+ return -EINVAL;
if (!(hive->number_devices--)) {
amdgpu_xgmi_sysfs_destroy(adev, hive);
@@ -471,6 +586,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
+
+ return psp_xgmi_terminate(&adev->psp);
}
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
@@ -481,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "xgmi_wafl_err_count",
- .debugfs_name = "xgmi_wafl_err_inject",
};
if (!adev->gmc.xgmi.supported ||
@@ -521,3 +637,129 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
kfree(ras_if);
}
}
+
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ uint32_t df_inst_id;
+ uint64_t dram_base_addr = 0;
+ const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
+
+ if ((!df_funcs) ||
+ (!df_funcs->get_df_inst_id) ||
+ (!df_funcs->get_dram_base_addr)) {
+ dev_warn(adev->dev,
+ "XGMI: relative phy_addr algorithm is not supported\n");
+ return addr;
+ }
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
+ dev_warn(adev->dev,
+ "failed to disable DF-Cstate, DF register may not be accessible\n");
+ return addr;
+ }
+
+ df_inst_id = df_funcs->get_df_inst_id(adev);
+ dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+
+ return addr + dram_base_addr;
+}
+
+static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
+ uint32_t value,
+ uint32_t *ue_count,
+ uint32_t *ce_count,
+ bool is_xgmi_pcs)
+{
+ int i;
+ int ue_cnt;
+
+ if (is_xgmi_pcs) {
+ /* query xgmi pcs error status,
+ * only ue is supported */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
+ ue_cnt = (value &
+ xgmi_pcs_ras_fields[i].pcs_err_mask) >>
+ xgmi_pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ xgmi_pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+ }
+ } else {
+ /* query wafl pcs error status,
+ * only ue is supported */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
+ ue_cnt = (value &
+ wafl_pcs_ras_fields[i].pcs_err_mask) >>
+ wafl_pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ wafl_pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ int i;
+ uint32_t data;
+ uint32_t ue_cnt = 0, ce_cnt = 0;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ case CHIP_VEGA20:
+ default:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ }
+
+ err_data->ue_count += ue_cnt;
+ err_data->ce_count += ce_cnt;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 74011fbc2251..4a92067fe595 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -37,15 +37,25 @@ struct amdgpu_hive_info {
struct task_barrier tb;
};
+struct amdgpu_pcs_ras_field {
+ const char *err_name;
+ uint32_t pcs_err_mask;
+ uint32_t pcs_err_shift;
+};
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index dd30f4e61a8c..cae426c7c086 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 5000)) {
- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > 10000)) {
+ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index ea702a64f807..9b74cfdba7b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -186,16 +186,10 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
{
- int ret;
-
amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
- amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
- ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
- if (!ret)
- amdgpu_connector->ddc_bus->has_aux = true;
-
- WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
+ drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
+ amdgpu_connector->ddc_bus->has_aux = true;
}
/***** general DP utility functions *****/
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 40d2ac723dd6..2512e7ebfedf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2494,6 +2494,10 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2685,6 +2689,7 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.prepare = dce_v10_0_crtc_prepare,
.commit = dce_v10_0_crtc_commit,
.disable = dce_v10_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 898ef72d423c..0dde22db9848 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2573,6 +2573,10 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2793,6 +2797,7 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.prepare = dce_v11_0_crtc_prepare,
.commit = dce_v11_0_crtc_commit,
.disable = dce_v11_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index db15a112becc..84219534bd38 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2388,6 +2388,10 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v6_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2575,6 +2579,7 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.prepare = dce_v6_0_crtc_prepare,
.commit = dce_v6_0_crtc_commit,
.disable = dce_v6_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f06c9022c1fd..3a640702d7d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2395,6 +2395,10 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2593,6 +2597,7 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.prepare = dce_v8_0_crtc_prepare,
.commit = dce_v8_0_crtc_commit,
.disable = dce_v8_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index e4f94863332c..13e12be667fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -123,6 +123,10 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_virtual_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -218,6 +222,7 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
.prepare = dce_virtual_crtc_prepare,
.commit = dce_virtual_crtc_commit,
.disable = dce_virtual_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
@@ -609,7 +614,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_register(connector);
/* link them */
drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 02702597ddeb..c8f2aa1db13b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -35,6 +35,8 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
+#include "smuio/smuio_11_0_0_sh_mask.h"
#include "navi10_enum.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
@@ -222,6 +224,49 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
};
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+}
+
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
{
/* Pending on emulation bring up */
@@ -500,29 +545,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
+ unsigned index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
long r;
- r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
return r;
- }
-
- WREG32(scratch, 0xCAFEDEAD);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ if (r)
goto err1;
- }
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
@@ -530,15 +574,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
- tmp = RREG32(scratch);
+ tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF)
r = 0;
else
@@ -547,8 +589,7 @@ err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
-
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1016,6 +1057,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1783,11 +1828,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
- WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
- WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
- WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
return 0;
}
@@ -1895,6 +1940,11 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
gfx_v10_0_rlc_enable_srm(adev);
} else {
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v10_0_init_csb(adev);
+ return 0;
+ }
+
adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
@@ -2395,7 +2445,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].sched.ready = false;
}
- WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
@@ -3168,12 +3218,7 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
- DRM_ERROR("kfq enable failed\n");
- kiq_ring->sched.ready = false;
- }
- return r;
+ return amdgpu_ring_test_helper(kiq_ring);
}
#endif
@@ -3216,6 +3261,22 @@ done:
return r;
}
+static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3341,6 +3402,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
+ /* set static priority for a compute queue/ring */
+ gfx_v10_0_compute_mqd_set_priority(ring, mqd);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -3790,7 +3854,7 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
- return amdgpu_ring_test_ring(kiq_ring);
+ return amdgpu_ring_test_helper(kiq_ring);
}
#endif
@@ -3930,9 +3994,8 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
@@ -4220,6 +4283,45 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0);
+}
+
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
.set_safe_mode = gfx_v10_0_set_safe_mode,
@@ -4230,7 +4332,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.resume = gfx_v10_0_rlc_resume,
.stop = gfx_v10_0_rlc_stop,
.reset = gfx_v10_0_rlc_reset,
- .start = gfx_v10_0_rlc_start
+ .start = gfx_v10_0_rlc_start,
+ .update_spm_vmid = gfx_v10_0_update_spm_vmid,
+ .rlcg_wreg = gfx_v10_rlcg_wreg,
+ .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
static int gfx_v10_0_set_powergating_state(void *handle,
@@ -4419,15 +4524,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v10_0_ring_emit_de_meta(ring,
- flags & AMDGPU_IB_PREEMPTED ? true : false);
+ (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
}
amdgpu_ring_write(ring, header);
@@ -4574,9 +4679,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
{
uint32_t dw2 = 0;
- if (amdgpu_mcbp)
+ if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev))
gfx_v10_0_ring_emit_ce_meta(ring,
- flags & AMDGPU_IB_PREEMPTED ? true : false);
+ (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
gfx_v10_0_ring_emit_tmz(ring, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8f20a5dd44fe..733d398c61cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32(mmRLC_SPM_VMID);
+
+ data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+ WREG32(mmRLC_SPM_VMID, data);
+}
+
static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
{
u32 data, orig, tmp, tmp2;
@@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
.resume = gfx_v7_0_rlc_resume,
.stop = gfx_v7_0_rlc_stop,
.reset = gfx_v7_0_rlc_reset,
- .start = gfx_v7_0_rlc_start
+ .start = gfx_v7_0_rlc_start,
+ .update_spm_vmid = gfx_v7_0_update_spm_vmid
};
static int gfx_v7_0_early_init(void *handle)
@@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+ adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fa245973de12..fc32586ef80b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+ adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
@@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
return r;
}
+static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
/* defaults */
mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
- mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
- mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
@@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
+ /* set static priority for a queue/ring */
+ gfx_v8_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
}
}
+static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32(mmRLC_SPM_VMID);
+
+ data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+ WREG32(mmRLC_SPM_VMID, data);
+}
+
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
.set_safe_mode = gfx_v8_0_set_safe_mode,
@@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.resume = gfx_v8_0_rlc_resume,
.stop = gfx_v8_0_rlc_stop,
.reset = gfx_v8_0_rlc_reset,
- .start = gfx_v8_0_rlc_start
+ .start = gfx_v8_0_rlc_start,
+ .update_spm_vmid = gfx_v8_0_update_spm_vmid
};
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v8_0_ring_emit_de_meta(ring);
}
@@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
-static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v8_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v8_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v8_0_hqd_set_priority(adev, ring, acquire);
- gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
@@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v8_0_ring_set_priority_compute,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 889154a78c4a..37c8231f1407 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -697,6 +697,11 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
+static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
+ {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
+ {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
+};
+
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
{
mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
@@ -721,6 +726,59 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
+void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ if (offset == grbm_cntl || offset == grbm_idx) {
+ if (offset == grbm_cntl)
+ writel(v, scratch_reg2);
+ else if (offset == grbm_idx)
+ writel(v, scratch_reg3);
+
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+ }
+
+}
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -738,9 +796,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
@@ -1106,10 +1164,11 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
- if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+ if ((adev->asic_type != CHIP_ARCTURUS) &&
+ ((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
- (adev->gfx.pfp_feature_version < 46))
+ (adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->asic_type) {
@@ -1846,6 +1905,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
break;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1916,7 +1979,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1928,7 +1991,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1992,7 +2055,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
.ras_error_inject = &gfx_v9_0_ras_error_inject,
- .query_ras_error_count = &gfx_v9_0_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};
static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
@@ -2003,7 +2067,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
.ras_error_inject = &gfx_v9_4_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -3309,6 +3374,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
+static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3445,6 +3526,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
+ /* set static priority for a queue/ring */
+ gfx_v9_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -3963,6 +4048,63 @@ static int gfx_v9_0_soft_reset(void *handle)
return 0;
}
+static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+ amdgpu_ring_write(ring, 9 | /* src: register*/
+ (5 << 8) | /* dst: memory */
+ (1 << 16) | /* count sel */
+ (1 << 20)); /* write confirm */
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+ kiq->reg_val_offs * 4));
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+ kiq->reg_val_offs * 4));
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
+ (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+
+failed_kiq_read:
+ pr_err("failed to read gpu clock\n");
+ return ~0;
+}
+
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -3970,16 +4112,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
- uint32_t tmp, lsb, msb, i = 0;
- do {
- if (i != 0)
- udelay(1);
- tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
- msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- i++;
- } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
- clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
@@ -4053,6 +4186,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf810000,
};
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+ 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
+ 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
+ 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
+ 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
+ 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
+ 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
+ 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
+ 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
+ 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
+ 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
+ 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
+ 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
+ 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
+ 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
+ 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
+ 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
+ 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
+ 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
+ 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
+ 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
+ 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
+ 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
+ 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
+ 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
+ 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
+ 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
+ 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
+ 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
+ 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
+ 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
+ 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
+ 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
+ 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
+ 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
+ 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
+ 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
+ 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
+ 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
+ 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
+ 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
+ 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
+ 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
+ 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
+ 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
+ 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
+ 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
+ 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
+ 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
+ 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
+ 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
+ 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
+ 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
+ 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
+ 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
+ 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
+ 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
+ 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
+ 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
+ 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
+ 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
+ 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
+ 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
+ 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
+ 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
+ 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
+ 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
+ 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
+ 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
+ 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
+ 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
+ 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
+ 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
+ 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
+ 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
+ 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
+ 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
+ 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
+ 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
+ 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
+ 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
+ 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
+ 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
+ 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
+ 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
+ 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
+ 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
+ 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
+ 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
+ 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
+ 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
+ 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
+ 0xbf84fff8, 0xbf810000,
+};
+
/* When below register arrays changed, please update gpr_reg_size,
and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
to cover all gfx9 ASICs */
@@ -4073,6 +4301,23 @@ static const struct soc15_reg_entry vgpr_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
+static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
+};
+
static const struct soc15_reg_entry sgpr1_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
@@ -4141,7 +4386,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
- { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
};
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
@@ -4204,7 +4448,10 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se;
int sgpr_work_group_size = 5;
- int gpr_reg_size = compute_dim_x / 16 + 6;
+ int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
+ int vgpr_init_shader_size;
+ const u32 *vgpr_init_shader_ptr;
+ const struct soc15_reg_entry *vgpr_init_regs_ptr;
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
@@ -4214,6 +4461,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
+ vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
+ } else {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
+ vgpr_init_regs_ptr = vgpr_init_regs;
+ }
+
total_size =
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
total_size +=
@@ -4222,7 +4479,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
- total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
+ total_size += ALIGN(vgpr_init_shader_size, 256);
sgpr_offset = total_size;
total_size += sizeof(sgpr_init_compute_shader);
@@ -4235,8 +4492,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* load the compute shaders */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
- ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
+ for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
+ ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
@@ -4248,9 +4505,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write the register state for the compute dispatch */
for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
- PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
+ ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
@@ -4262,7 +4519,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4342,18 +4599,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
goto fail;
}
- switch (adev->asic_type)
- {
- case CHIP_VEGA20:
- gfx_v9_0_clear_ras_edc_counter(adev);
- break;
- case CHIP_ARCTURUS:
- gfx_v9_4_clear_ras_edc_counter(adev);
- break;
- default:
- break;
- }
-
fail:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -4401,6 +4646,10 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
+ if (adev->gfx.funcs &&
+ adev->gfx.funcs->reset_ras_error_count)
+ adev->gfx.funcs->reset_ras_error_count(adev);
+
r = amdgpu_gfx_ras_late_init(adev);
if (r)
return r;
@@ -4705,6 +4954,47 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v9_0_check_rlcg_range(adev, offset,
+ (void *)rlcg_access_gc_9_0,
+ ARRAY_SIZE(rlcg_access_gc_9_0));
+}
+
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
.set_safe_mode = gfx_v9_0_set_safe_mode,
@@ -4716,7 +5006,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.resume = gfx_v9_0_rlc_resume,
.stop = gfx_v9_0_rlc_stop,
.reset = gfx_v9_0_rlc_reset,
- .start = gfx_v9_0_rlc_start
+ .start = gfx_v9_0_rlc_start,
+ .update_spm_vmid = gfx_v9_0_update_spm_vmid,
+ .rlcg_wreg = gfx_v9_0_rlcg_wreg,
+ .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4919,7 +5212,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v9_0_ring_emit_de_meta(ring);
}
@@ -5044,105 +5337,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
return wptr;
}
-static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- soc15_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
-static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v9_0_hqd_set_priority(adev, ring, acquire);
- gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -6322,10 +6516,13 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
return 0;
}
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
/* read back registers to clear the counters */
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
@@ -6513,7 +6710,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v9_0_ring_set_priority_compute,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index f099f13d7f1e..cceb46faf212 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -893,10 +893,13 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 2e3f6f755ad4..1ffecc5c0f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
+
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index b70c7b483c24..cc866c367939 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Disable AGP. */
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
-
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
-
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Disable AGP. */
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ + adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+ }
/* Program "protection fault". */
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
@@ -135,6 +142,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ /* These regs are not accessible for VF, PF will program these in SRIOV */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -256,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
gfxhub_v2_0_init_gart_aperture_regs(adev);
gfxhub_v2_0_init_system_aperture_regs(adev);
@@ -298,9 +297,11 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
- /* Setup L2 cache */
- WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index cc0c273a86f9..8606f877478f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -476,13 +476,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17;
- u32 j, inv_req, tmp;
+ u32 j, inv_req, inv_req2, tmp;
struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= adev->num_vmhubs);
hub = &adev->vmhub[vmhub];
- inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ if (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20) {
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a
+ * heavy-weight TLB flush (type 2), which flushes
+ * both. Due to a race condition with concurrent
+ * memory accesses using the same TLB cache line, we
+ * still need a second TLB flush after this.
+ */
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
+ inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ } else {
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ inv_req2 = 0;
+ }
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
@@ -521,21 +534,27 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
+ do {
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
- /*
- * Issue a dummy read to wait for the ACK register to be cleared
- * to avoid a false ACK due to the new fast GRBM interface.
- */
- if (vmhub == AMDGPU_GFXHUB_0)
- RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
+ /*
+ * Issue a dummy read to wait for the ACK register to
+ * be cleared to avoid a false ACK due to the new fast
+ * GRBM interface.
+ */
+ if (vmhub == AMDGPU_GFXHUB_0)
+ RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- if (tmp & (1 << vmid))
- break;
- udelay(1);
- }
+ for (j = 0; j < adev->usec_timeout; j++) {
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
+ if (tmp & (1 << vmid))
+ break;
+ udelay(1);
+ }
+
+ inv_req = inv_req2;
+ inv_req2 = 0;
+ } while (inv_req);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
if (use_semaphore)
@@ -577,9 +596,26 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
return -EIO;
if (ring->sched.ready) {
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a
+ * heavy-weight TLB flush (type 2), which flushes
+ * both. Due to a race condition with concurrent
+ * memory accesses using the same TLB cache line, we
+ * still need a second TLB flush after this.
+ */
+ bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20);
+ /* 2 dwords flush + 8 dwords fence */
+ unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
+
+ if (vega20_xgmi_wa)
+ ndw += kiq->pmf->invalidate_tlbs_size;
+
spin_lock(&adev->gfx.kiq.ring_lock);
/* 2 dwords flush + 8 dwords fence */
- amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+ amdgpu_ring_alloc(ring, ndw);
+ if (vega20_xgmi_wa)
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, 2, all_hub);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq);
@@ -886,32 +922,25 @@ static int gmc_v9_0_late_init(void *handle)
if (r)
return r;
/* Check if ecc is available */
- if (!amdgpu_sriov_vf(adev)) {
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- r = amdgpu_atomfirmware_mem_ecc_supported(adev);
- if (!r) {
- DRM_INFO("ECC is not present.\n");
- if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
- adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else {
- DRM_INFO("ECC is active.\n");
- }
-
- r = amdgpu_atomfirmware_sram_ecc_supported(adev);
- if (!r) {
- DRM_INFO("SRAM ECC is not present.\n");
- } else {
- DRM_INFO("SRAM ECC is active.\n");
- }
- break;
- default:
- break;
- }
+ if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
+ r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("ECC is not present.\n");
+ if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ } else
+ DRM_INFO("ECC is active.\n");
+
+ r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+ if (!r)
+ DRM_INFO("SRAM ECC is not present.\n");
+ else
+ DRM_INFO("SRAM ECC is active.\n");
}
+ if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
+
r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 49a3a56ec017..396c2a624de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ue_count += ded_count;
}
+static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* read back edc counter registers to reset the counters to 0 */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index bde189680521..fb3f228458e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+ }
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
@@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
mmhub_v2_0_init_gart_aperture_regs(adev);
mmhub_v2_0_init_system_aperture_regs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index a5281df8d84f..0d413fabd015 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1596,7 +1596,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
err_data->ue_count += ded_count;
}
+static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* read back edc counter registers to reset the counters to 0 */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
new file mode 100644
index 000000000000..1b5086c7d4e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V2_0_H__
+#define __MMSCH_V2_0_H__
+
+// addressBlock: uvd0_mmsch_dec
+// base address: 0x1e000
+#define mmMMSCH_UCODE_ADDR 0x0000
+#define mmMMSCH_UCODE_ADDR_BASE_IDX 0
+#define mmMMSCH_UCODE_DATA 0x0001
+#define mmMMSCH_UCODE_DATA_BASE_IDX 0
+#define mmMMSCH_SRAM_ADDR 0x0002
+#define mmMMSCH_SRAM_ADDR_BASE_IDX 0
+#define mmMMSCH_SRAM_DATA 0x0003
+#define mmMMSCH_SRAM_DATA_BASE_IDX 0
+#define mmMMSCH_VF_SRAM_OFFSET 0x0004
+#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_DB_SRAM_OFFSET 0x0005
+#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_CTX_SRAM_OFFSET 0x0006
+#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_CTL 0x0007
+#define mmMMSCH_CTL_BASE_IDX 0
+#define mmMMSCH_INTR 0x0008
+#define mmMMSCH_INTR_BASE_IDX 0
+#define mmMMSCH_INTR_ACK 0x0009
+#define mmMMSCH_INTR_ACK_BASE_IDX 0
+#define mmMMSCH_INTR_STATUS 0x000a
+#define mmMMSCH_INTR_STATUS_BASE_IDX 0
+#define mmMMSCH_VF_VMID 0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_CTX_SIZE 0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f
+#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010
+#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_SIZE 0x0011
+#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_HOST 0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_RESP 0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_0 0x0014
+#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015
+#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_1 0x0016
+#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017
+#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0
+#define mmMMSCH_CNTL 0x001c
+#define mmMMSCH_CNTL_BASE_IDX 0
+#define mmMMSCH_NONCACHE_OFFSET0 0x001d
+#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0
+#define mmMMSCH_NONCACHE_SIZE0 0x001e
+#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0
+#define mmMMSCH_NONCACHE_OFFSET1 0x001f
+#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0
+#define mmMMSCH_NONCACHE_SIZE1 0x0020
+#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0
+#define mmMMSCH_PDEBUG_STATUS 0x0021
+#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_EPC 0x0024
+#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0
+#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025
+#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0
+#define mmMMSCH_PROC_STATE1 0x0026
+#define mmMMSCH_PROC_STATE1_BASE_IDX 0
+#define mmMMSCH_LAST_MC_ADDR 0x0027
+#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0
+#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028
+#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0
+#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029
+#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0
+#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a
+#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmMMSCH_SCRATCH_0 0x002b
+#define mmMMSCH_SCRATCH_0_BASE_IDX 0
+#define mmMMSCH_SCRATCH_1 0x002c
+#define mmMMSCH_SCRATCH_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f
+#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_0 0x0033
+#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_0 0x0034
+#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_0 0x0035
+#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038
+#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_1 0x003c
+#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_1 0x003d
+#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_1 0x003e
+#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CNTXT 0x003f
+#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0
+#define mmMMSCH_SCRATCH_2 0x0040
+#define mmMMSCH_SCRATCH_2_BASE_IDX 0
+#define mmMMSCH_SCRATCH_3 0x0041
+#define mmMMSCH_SCRATCH_3_BASE_IDX 0
+#define mmMMSCH_SCRATCH_4 0x0042
+#define mmMMSCH_SCRATCH_4_BASE_IDX 0
+#define mmMMSCH_SCRATCH_5 0x0043
+#define mmMMSCH_SCRATCH_5_BASE_IDX 0
+#define mmMMSCH_SCRATCH_6 0x0044
+#define mmMMSCH_SCRATCH_6_BASE_IDX 0
+#define mmMMSCH_SCRATCH_7 0x0045
+#define mmMMSCH_SCRATCH_7_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046
+#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047
+#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048
+#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049
+#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0
+#define mmMMSCH_NACK_STATUS 0x004a
+#define mmMMSCH_NACK_STATUS_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX0_DATA 0x004b
+#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX1_DATA 0x004c
+#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053
+#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056
+#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_2 0x005a
+#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_2 0x005b
+#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_2 0x005c
+#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060
+#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061
+#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_0 0x0062
+#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_1 0x0063
+#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_2 0x0064
+#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0
+
+#define MMSCH_VERSION_MAJOR 2
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+enum mmsch_v2_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v2_0_init_header {
+ uint32_t version;
+ uint32_t header_size;
+ uint32_t vcn_init_status;
+ uint32_t vcn_table_offset;
+ uint32_t vcn_table_size;
+};
+
+struct mmsch_v2_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_direct_write {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v2_0_cmd_direct_read_modify_write {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v2_0_cmd_direct_polling {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v2_0_cmd_end {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v2_0_cmd_indirect_write {
+ struct mmsch_v2_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t value)
+{
+ direct_wt->cmd_header.reg_offset = reg_offset;
+ direct_wt->reg_value = value;
+ memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t data)
+{
+ direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
+ direct_rd_mod_wt->mask_value = mask;
+ direct_rd_mod_wt->write_data = data;
+ memcpy((void *)init_table, direct_rd_mod_wt,
+ sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t wait)
+{
+ direct_poll->cmd_header.reg_offset = reg_offset;
+ direct_poll->mask_value = mask;
+ direct_poll->wait_value = wait;
+ memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling));
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
+ init_table, (reg), \
+ (mask), (data)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \
+ mmsch_v2_0_insert_direct_wt(&direct_wt, \
+ init_table, (reg), \
+ (value)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ mmsch_v2_0_insert_direct_poll(&direct_poll, \
+ init_table, (reg), \
+ (mask), (wait)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index cf557a428298..e08245a446fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -32,6 +32,7 @@
#include "soc15_common.h"
#include "navi10_ih.h"
+#define MAX_REARM_RETRY 10
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
@@ -284,6 +285,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
}
/**
+ * navi10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t reg_rptr = 0;
+ uint32_t v = 0;
+ uint32_t i = 0;
+
+ if (ih == &adev->irq.ih)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ else if (ih == &adev->irq.ih1)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ else
+ return;
+
+ /* Rearm IRQ / re-write doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(reg_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
* navi10_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
@@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ navi10_ih_irq_rearm(adev, ih);
} else
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 65eb378fa035..149d386590df 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -318,6 +318,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
{
uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
+ struct ras_err_data err_data = {0, 0, 0, NULL};
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -332,7 +333,19 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
* clear error status after ras_controller_intr according to
* hw team and count ue number for query
*/
- nbio_v7_4_query_ras_error_count(adev, &obj->err_data);
+ nbio_v7_4_query_ras_error_count(adev, &err_data);
+
+ /* logging on error counter and printing for awareness */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+
+ if (err_data.ce_count)
+ DRM_INFO("%ld correctable errors detected in %s block\n",
+ obj->err_data.ce_count, adev->nbio.ras_if->name);
+
+ if (err_data.ue_count)
+ DRM_INFO("%ld uncorrectable errors detected in %s block\n",
+ obj->err_data.ue_count, adev->nbio.ras_if->name);
DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 2d1bebdf1603..033cbbca2072 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -516,7 +516,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 36b65797434e..a44fd6060d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -31,6 +31,9 @@
#define GFX_CMD_RESERVED_MASK 0x7FF00000
#define GFX_CMD_RESPONSE_MASK 0x80000000
+/* USBC PD FW version retrieval command */
+#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000
+
/* TEE Gfx Command IDs for the register interface.
* Command ID must be between 0x00010000 and 0x000F0000.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0829188c1a5c..0afd610a1263 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras.h"
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#include "psp_v11_0.h"
@@ -65,6 +66,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+/* For large FW files the time to complete can be very long */
+#define USBC_PD_POLLING_LIMIT_S 240
+
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -420,7 +424,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
- psp_v11_0_reroute_ih(psp);
+ if (!amdgpu_sriov_vf(adev))
+ psp_v11_0_reroute_ih(psp);
ring = &psp->km_ring;
@@ -864,6 +869,11 @@ static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
if (ret)
return -EINVAL;
+ /* If err_event_athub occurs error inject was successful, however
+ return status from TA is no long reliable */
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
return ras_cmd->ras_status;
}
@@ -1108,6 +1118,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
+static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t reg_status;
+ int ret, i = 0;
+
+ /* Write lower 32-bit address of the PD Controller FW */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr));
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the lower address */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ /* Write upper 32-bit address of the PD Controller FW */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr));
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the upper address */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000);
+
+ /* FW load takes very long time */
+ do {
+ msleep(1000);
+ reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+ if (reg_status & 0x80000000)
+ goto done;
+
+ } while (++i < USBC_PD_POLLING_LIMIT_S);
+
+ return -ETIME;
+done:
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (!ret)
+ *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
+
+ return ret;
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -1132,6 +1218,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.mem_training = psp_v11_0_memory_training,
.ring_get_wptr = psp_v11_0_ring_get_wptr,
.ring_set_wptr = psp_v11_0_ring_set_wptr,
+ .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
+ .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e55884d204bd..5f3a5ee2a3f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -677,7 +677,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
- * sdma_v4_0_ring_set_wptr - commit the write pointer
+ * sdma_v4_0_page_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
@@ -977,7 +977,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
@@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle)
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- int i;
- /* read back edc counter registers to clear the counters */
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- for (i = 0; i < adev->sdma.num_instances; i++)
- RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
- }
+ if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
+ adev->sdma.funcs->reset_ras_error_count(adev);
if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
return adev->sdma.funcs->ras_late_init(adev, &ih_info);
@@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
};
+static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ int i;
+
+ /* read back edc counter registers to clear the counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
+ }
+}
+
static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
.ras_late_init = amdgpu_sdma_ras_late_init,
.ras_fini = amdgpu_sdma_ras_fini,
.query_ras_error_count = sdma_v4_0_query_ras_error_count,
+ .reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
};
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 67b9830b7c7e..ebfd2cdf4e65 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -746,11 +746,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
sdma_v5_0_enable(adev, true);
}
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 4cb4c891120b..0860e85a2d35 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3439,7 +3439,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0xC3) ||
(adev->pdev->device == 0x6664) ||
(adev->pdev->device == 0x6665) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index c902f26cf50d..9bffbab35041 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -46,8 +46,7 @@
#define I2C_NO_STOP 1
#define I2C_RESTART 2
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
-#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor)
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
{
@@ -592,7 +591,8 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_lock(i2c)) {
DRM_ERROR("Failed to lock the bus from SMU");
@@ -610,7 +610,8 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_unlock(i2c)) {
DRM_ERROR("Failed to unlock the bus from SMU");
@@ -630,7 +631,8 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
int i, ret;
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!control->bus_locked) {
DRM_ERROR("I2C bus unlocked, stopping transaction!");
@@ -679,7 +681,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
res = i2c_add_adapter(control);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index d8945c31b622..a40499d51c93 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -852,6 +852,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev)
/* change this when we implement soft reset */
return true;
}
+
+static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+ return;
+ /*read back hdp ras counter to reset it to 0 */
+ RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
@@ -1019,6 +1028,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.get_config_memsize = &soc15_get_config_memsize,
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
+ .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1264,6 +1274,10 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
+ if (adev->asic_funcs &&
+ adev->asic_funcs->reset_hdp_ras_error_count)
+ adev->asic_funcs->reset_hdp_ras_error_count(adev);
+
if (adev->nbio.funcs->ras_late_init)
r = adev->nbio.funcs->ras_late_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index d0fb7a67c1a3..b03f950c486c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -42,6 +42,13 @@ struct soc15_reg_golden {
u32 or_mask;
};
+struct soc15_reg_rlcg {
+ u32 hwip;
+ u32 instance;
+ u32 segment;
+ u32 reg;
+};
+
struct soc15_reg_entry {
uint32_t hwip;
uint32_t inst;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 19e870c79896..c893c645a4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -70,10 +70,9 @@
} \
} while (0)
-#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
#define WREG32_RLC(reg, value) \
do { \
- if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t i = 0; \
uint32_t retries = 50000; \
uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
@@ -98,7 +97,7 @@
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
- if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 793bf70e64b1..14d346321a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_disable_umc_index_mode(adev);
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ DRM_WARN("Fail to disable DF-Cstate.\n");
+
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
umc_inst,
@@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
&(err_data->ue_count));
}
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ DRM_WARN("Fail to enable DF-Cstate\n");
+
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
}
@@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
}
- /* skip error address process if -ENOMEM */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ if (mc_umc_status == 0)
+ return;
+
if (!err_data->err_addr) {
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
@@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
}
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_disable_umc_index_mode(adev);
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ DRM_WARN("Fail to disable DF-Cstate.\n");
+
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
umc_inst,
@@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
umc_inst);
}
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ DRM_WARN("Fail to enable DF-Cstate\n");
+
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index b7f17342bbf0..ec8091a661df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -29,6 +29,7 @@
#include "soc15d.h"
#include "amdgpu_pm.h"
#include "amdgpu_psp.h"
+#include "mmsch_v2_0.h"
#include "vcn/vcn_2_0_0_offset.h"
#include "vcn/vcn_2_0_0_sh_mask.h"
@@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
-
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
* vcn_v2_0_early_init - set function pointers
*
@@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->vcn.num_vcn_inst = 1;
- adev->vcn.num_enc_rings = 2;
+ if (amdgpu_sriov_vf(adev))
+ adev->vcn.num_enc_rings = 1;
+ else
+ adev->vcn.num_enc_rings = 2;
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
@@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+ else
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
if (r)
@@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle)
adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle)
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, 0);
+ if (amdgpu_sriov_vf(adev))
+ vcn_v2_0_start_sriov(adev);
+
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
@@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* UVD disable CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* enable UVD CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
uint32_t data = 0;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
@@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
uint32_t data = 0;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
/* Before power off, this indicator has to be turned on */
data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
@@ -1215,6 +1246,9 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (enable) {
/* wait for STATUS to clear */
if (!vcn_v2_0_is_idle(handle))
@@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 4);
if (r)
@@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle,
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->vcn.cur_state)
return 0;
@@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle,
return ret;
}
+static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
+ struct amdgpu_mm_table *table)
+{
+ uint32_t data = 0, loop;
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v2_0_init_header *header;
+ uint32_t size;
+ int i;
+
+ header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
+ size = header->header_size + header->vcn_table_size;
+
+ /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+ * of memory descriptor location
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+ /* 2, update vmid of descriptor */
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+ adev->vcn.inst->ring_dec.wptr = 0;
+ adev->vcn.inst->ring_dec.wptr_old = 0;
+ vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ adev->vcn.inst->ring_enc[i].wptr = 0;
+ adev->vcn.inst->ring_enc[i].wptr_old = 0;
+ vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
+ }
+
+ /* 5, kick off the initialization and wait until
+ * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop = 1000;
+ while ((data & 0x10000002) != 0x10000002) {
+ udelay(10);
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop--;
+ if (!loop)
+ break;
+ }
+
+ if (!loop) {
+ DRM_ERROR("failed to init MMSCH, " \
+ "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
+{
+ int r;
+ uint32_t tmp;
+ struct amdgpu_ring *ring;
+ uint32_t offset, size;
+ uint32_t table_size = 0;
+ struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
+ struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
+ struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
+ struct mmsch_v2_0_cmd_end end = { {0} };
+ struct mmsch_v2_0_init_header *header;
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ uint8_t i = 0;
+
+ header = (struct mmsch_v2_0_init_header *)init_table;
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ direct_poll.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_POLLING;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
+ header->version = MMSCH_VERSION;
+ header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
+
+ header->vcn_table_offset = header->header_size;
+
+ init_table += header->vcn_table_offset;
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+ MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ 0xFFFFFFFF, 0x00000004);
+
+ /* mc resume*/
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ tmp = AMDGPU_UCODE_ID_VCN;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[tmp].tmr_mc_addr_lo);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[tmp].tmr_mc_addr_hi);
+ offset = 0;
+ } else {
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr));
+ offset = size;
+ }
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ size);
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
+ ring = &adev->vcn.inst->ring_enc[r];
+ ring->wptr = 0;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ ring->ring_size / 4);
+ }
+
+ ring = &adev->vcn.inst->ring_dec;
+ ring->wptr = 0;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+ /* force RBC into idle state */
+ tmp = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+ /* add end packet */
+ tmp = sizeof(struct mmsch_v2_0_cmd_end);
+ memcpy((void *)init_table, &end, tmp);
+ table_size += (tmp / 4);
+ header->vcn_table_size = table_size;
+
+ }
+ return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
+}
+
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 678253d81154..c6363f5ad564 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -74,29 +74,30 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v2_5_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS) {
- u32 harvest;
- int i;
-
- adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->vcn.harvest_config |= 1 << i;
- }
-
- if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
- /* both instances are harvested, disable the block */
- return -ENOENT;
- } else
- adev->vcn.num_vcn_inst = 1;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0;
adev->vcn.num_enc_rings = 1;
} else {
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ u32 harvest;
+ int i;
+
+ adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->vcn.harvest_config |= 1 << i;
+ }
+
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else
+ adev->vcn.num_vcn_inst = 1;
+
adev->vcn.num_enc_rings = 2;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 3f0300e53727..0ec5f25adf56 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -127,6 +127,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
return PTR_ERR(process);
if (kfd_is_locked()) {
+ dev_dbg(kfd_device, "kfd is locked!\n"
+ "process %d unreferenced", process->pasid);
kfd_unref_process(process);
return -EAGAIN;
}
@@ -1167,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
if (!dev)
return -EINVAL;
- dev->kfd2kgd->get_tile_config(dev->kgd, &config);
+ amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
args->gb_addr_config = config.gb_addr_config;
args->num_banks = config.num_banks;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 2a9e40131735..d5386f15c4a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -648,6 +648,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->get_hive_id)
kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
+ if (kfd->kfd2kgd->get_unique_id)
+ kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd);
+
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
@@ -710,7 +713,7 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- kgd2kfd_suspend(kfd);
+ kgd2kfd_suspend(kfd, false);
device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
@@ -731,7 +734,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
kfd->dqm->ops.pre_reset(kfd->dqm);
- kgd2kfd_suspend(kfd);
+ kgd2kfd_suspend(kfd, false);
kfd_signal_reset_event(kfd);
return 0;
@@ -765,21 +768,23 @@ bool kfd_is_locked(void)
return (atomic_read(&kfd_locked) > 0);
}
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
if (!kfd->init_complete)
return;
- /* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_locked) == 1)
- kfd_suspend_all_processes();
+ /* for runtime suspend, skip locking kfd */
+ if (!run_pm) {
+ /* For first KFD device suspend all the KFD processes */
+ if (atomic_inc_return(&kfd_locked) == 1)
+ kfd_suspend_all_processes();
+ }
kfd->dqm->ops.stop(kfd->dqm);
-
kfd_iommu_suspend(kfd);
}
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
int ret, count;
@@ -790,10 +795,13 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret)
return ret;
- count = atomic_dec_return(&kfd_locked);
- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
- if (count == 0)
- ret = kfd_resume_all_processes();
+ /* for runtime resume, skip unlocking kfd */
+ if (!run_pm) {
+ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+ if (count == 0)
+ ret = kfd_resume_all_processes();
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 80d22bf702e8..77ea0f0cb163 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
/* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
if (test_bit(pipe_offset + i,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
return true;
return false;
}
-unsigned int get_queues_num(struct device_queue_manager *dqm)
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{
- return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+ return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
KGD_MAX_QUEUES);
}
@@ -109,6 +109,11 @@ static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
return dqm->dev->device_info->num_xgmi_sdma_engines;
}
+static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
+{
+ return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
+}
+
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return dqm->dev->device_info->num_sdma_engines
@@ -132,6 +137,22 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
qpd->sh_mem_bases);
}
+void increment_queue_count(struct device_queue_manager *dqm,
+ enum kfd_queue_type type)
+{
+ dqm->active_queue_count++;
+ if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ dqm->active_cp_queue_count++;
+}
+
+void decrement_queue_count(struct device_queue_manager *dqm,
+ enum kfd_queue_type type)
+{
+ dqm->active_queue_count--;
+ if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ dqm->active_cp_queue_count--;
+}
+
static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{
struct kfd_dev *dev = qpd->dqm->dev;
@@ -281,8 +302,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
int retval;
- print_queue(q);
-
dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
@@ -359,12 +378,7 @@ add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
- dqm->queue_count++;
-
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
+ increment_queue_count(dqm, q->properties.type);
/*
* Unconditionally increment this counter, regardless of the queue's
@@ -446,15 +460,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
- if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
deallocate_hqd(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- } else {
+ else {
pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
return -EINVAL;
@@ -494,7 +506,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
}
qpd->queue_count--;
if (q->properties.is_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
return retval;
}
@@ -563,13 +575,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
/*
* check active state vs. the previous state and modify
* counter accordingly. map_queues_cpsch uses the
- * dqm->queue_count to determine whether a new runlist must be
+ * dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
if (q->properties.is_active && !prev_active)
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
else if (!q->properties.is_active && prev_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm);
@@ -618,7 +630,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -662,7 +674,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = false;
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
}
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
@@ -731,7 +743,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -786,7 +798,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = true;
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -899,16 +911,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
- dqm->queue_count = dqm->next_pipe_to_allocate = 0;
- dqm->sdma_queue_count = 0;
- dqm->xgmi_sdma_queue_count = 0;
+ dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->active_cp_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
if (test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
dqm->allocated_queues[pipe] |= 1 << queue;
}
@@ -924,7 +935,7 @@ static void uninitialize(struct device_queue_manager *dqm)
{
int i;
- WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+ WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -966,8 +977,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (dqm->sdma_bitmap == 0)
+ if (dqm->sdma_bitmap == 0) {
+ pr_err("No more SDMA queue to allocate\n");
return -ENOMEM;
+ }
+
bit = __ffs64(dqm->sdma_bitmap);
dqm->sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
@@ -976,8 +990,10 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
get_num_sdma_engines(dqm);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (dqm->xgmi_sdma_bitmap == 0)
+ if (dqm->xgmi_sdma_bitmap == 0) {
+ pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM;
+ }
bit = __ffs64(dqm->xgmi_sdma_bitmap);
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
@@ -1029,7 +1045,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
/ dqm->dev->shared_resources.num_pipe_per_mec;
- if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
+ if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
continue;
/* only acquire queues from the first MEC */
@@ -1064,9 +1080,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
- dqm->queue_count = dqm->processes_count = 0;
- dqm->sdma_queue_count = 0;
- dqm->xgmi_sdma_queue_count = 0;
+ dqm->active_queue_count = dqm->processes_count = 0;
+ dqm->active_cp_queue_count = 0;
+
dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1158,7 +1174,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
- dqm->queue_count++;
+ increment_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1172,7 +1188,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
dqm_lock(dqm);
list_del(&kq->list);
- dqm->queue_count--;
+ decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
@@ -1238,13 +1254,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
-
if (q->properties.is_active) {
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
+
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
@@ -1298,20 +1310,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}
-static int unmap_sdma_queues(struct device_queue_manager *dqm)
-{
- int i, retval = 0;
-
- for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
- dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
- retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
- if (retval)
- return retval;
- }
- return retval;
-}
-
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
@@ -1319,7 +1317,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
if (!dqm->sched_running)
return 0;
- if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+ if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
return 0;
if (dqm->active_runlist)
return 0;
@@ -1349,12 +1347,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
return retval;
- pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
- dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
-
- if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
- unmap_sdma_queues(dqm);
-
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
filter, filter_param, false, 0);
if (retval)
@@ -1427,18 +1419,15 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- }
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
@@ -1648,7 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
- dqm->queue_count--;
+ decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1656,16 +1645,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clear all user mode queues */
list_for_each_entry(q, &qpd->queues_list, list) {
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- }
if (q->properties.is_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
dqm->total_queue_count--;
}
@@ -1742,14 +1728,13 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- (dev->device_info->num_sdma_engines +
- dev->device_info->num_xgmi_sdma_engines) *
+ get_num_all_sdma_engines(dqm) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
- (void *)&(mem_obj->cpu_ptr), true);
+ (void *)&(mem_obj->cpu_ptr), false);
return retval;
}
@@ -1979,7 +1964,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
if (!test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
continue;
r = dqm->dev->kfd2kgd->hqd_dump(
@@ -1995,8 +1980,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
- get_num_xgmi_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 871d3b628d2d..50d919f814e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,9 +180,8 @@ struct device_queue_manager {
struct list_head queues;
unsigned int saved_flags;
unsigned int processes_count;
- unsigned int queue_count;
- unsigned int sdma_queue_count;
- unsigned int xgmi_sdma_queue_count;
+ unsigned int active_queue_count;
+ unsigned int active_cp_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
@@ -219,7 +218,7 @@ void device_queue_manager_init_v10_navi10(
struct device_queue_manager_asic_ops *asic_ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-unsigned int get_queues_num(struct device_queue_manager *dqm);
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 1f8365575b12..15476fca8fa6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd,
if (p->signal_mapped_size &&
p->signal_event_count == p->signal_mapped_size / 8) {
if (!p->signal_event_limit_reached) {
- pr_warn("Signal event wasn't created because limit was reached\n");
+ pr_debug("Signal event wasn't created because limit was reached\n");
p->signal_event_limit_reached = true;
}
return -ENOSPC;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index bb77b8890e77..78714f9a8b11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -316,7 +316,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
{
/*
* node id couldn't be 0 - the three MSB bits of
- * aperture shoudn't be 0
+ * aperture shouldn't be 0
*/
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 436b7f518979..48cda3073b70 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
int retval;
struct kfd_mem_obj *mqd_mem_obj = NULL;
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
+ /* For V9 only, due to a HW bug, the control stack of a user mode
+ * compute queue needs to be allocated just behind the page boundary
+ * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
+ * the first page of the buffer serves as the regular MQD buffer
+ * purpose and the remaining is for control stack. Although the two
+ * parts are in the same buffer object, they need different memory
+ * types: MQD part needs UC (uncached) as usual, while control stack
+ * needs NC (non coherent), which is different from the UC type which
+ * is used when control stack is allocated in user space.
+ *
+ * Because of all those, we use the gtt allocation function instead
+ * of sub-allocation function for this enlarged MQD buffer. Moreover,
+ * in order to achieve two memory types in a single buffer object, we
+ * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
+ * amdgpu memory functions to do so.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index dc406e6dee23..efdb75e7677b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -47,9 +47,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
struct kfd_dev *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
- queue_count = pm->dqm->queue_count;
- compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
- pm->dqm->xgmi_sdma_queue_count;
+ queue_count = pm->dqm->active_queue_count;
+ compute_queue_count = pm->dqm->active_cp_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
@@ -62,7 +61,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_queues_num(pm->dqm)) {
+ compute_queue_count > get_cp_queues_num(pm->dqm)) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
@@ -141,7 +140,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n",
- pm->dqm->processes_count, pm->dqm->queue_count);
+ pm->dqm->processes_count, pm->dqm->active_queue_count);
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6af1b5881f43..4a3049841086 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -41,6 +41,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
+#include <linux/swap.h>
#include "amd_shared.h"
@@ -293,6 +294,9 @@ struct kfd_dev {
/* xGMI */
uint64_t hive_id;
+
+ /* UUID */
+ uint64_t unique_id;
bool pci_atomic_requested;
@@ -502,6 +506,9 @@ struct queue {
struct kfd_process *process;
struct kfd_dev *device;
void *gws;
+
+ /* procfs */
+ struct kobject kobj;
};
/*
@@ -646,6 +653,7 @@ struct kfd_process_device {
* function.
*/
bool already_dequeued;
+ bool runtime_inuse;
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound;
@@ -729,6 +737,7 @@ struct kfd_process {
/* Kobj for our procfs */
struct kobject *kobj;
+ struct kobject *kobj_queues;
struct attribute attr_pasid;
};
@@ -835,6 +844,8 @@ extern struct device *kfd_device;
/* KFD's procfs */
void kfd_procfs_init(void);
void kfd_procfs_shutdown(void);
+int kfd_procfs_add_queue(struct queue *q);
+void kfd_procfs_del_queue(struct queue *q);
/* Topology */
int kfd_topology_init(void);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 25b90f70aecd..fe0cd49d4ea7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/file.h>
+#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
@@ -132,6 +133,88 @@ void kfd_procfs_shutdown(void)
}
}
+static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct queue *q = container_of(kobj, struct queue, kobj);
+
+ if (!strcmp(attr->name, "size"))
+ return snprintf(buffer, PAGE_SIZE, "%llu",
+ q->properties.queue_size);
+ else if (!strcmp(attr->name, "type"))
+ return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
+ else if (!strcmp(attr->name, "gpuid"))
+ return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
+ else
+ pr_err("Invalid attribute");
+
+ return 0;
+}
+
+static struct attribute attr_queue_size = {
+ .name = "size",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_type = {
+ .name = "type",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_gpuid = {
+ .name = "gpuid",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute *procfs_queue_attrs[] = {
+ &attr_queue_size,
+ &attr_queue_type,
+ &attr_queue_gpuid,
+ NULL
+};
+
+static const struct sysfs_ops procfs_queue_ops = {
+ .show = kfd_procfs_queue_show,
+};
+
+static struct kobj_type procfs_queue_type = {
+ .sysfs_ops = &procfs_queue_ops,
+ .default_attrs = procfs_queue_attrs,
+};
+
+int kfd_procfs_add_queue(struct queue *q)
+{
+ struct kfd_process *proc;
+ int ret;
+
+ if (!q || !q->process)
+ return -EINVAL;
+ proc = q->process;
+
+ /* Create proc/<pid>/queues/<queue id> folder */
+ if (!proc->kobj_queues)
+ return -EFAULT;
+ ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
+ proc->kobj_queues, "%u", q->properties.queue_id);
+ if (ret < 0) {
+ pr_warn("Creating proc/<pid>/queues/%u failed",
+ q->properties.queue_id);
+ kobject_put(&q->kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+void kfd_procfs_del_queue(struct queue *q)
+{
+ if (!q)
+ return;
+
+ kobject_del(&q->kobj);
+ kobject_put(&q->kobj);
+}
+
int kfd_process_create_wq(void)
{
if (!kfd_process_wq)
@@ -244,10 +327,10 @@ err_alloc_mem:
static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
- ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
- ALLOC_MEM_FLAGS_WRITABLE |
- ALLOC_MEM_FLAGS_EXECUTABLE;
+ uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
+ KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
+ KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
+ KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
@@ -323,6 +406,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (ret)
pr_warn("Creating pasid for pid %d failed",
(int)process->lead_thread->pid);
+
+ process->kobj_queues = kobject_create_and_add("queues",
+ process->kobj);
+ if (!process->kobj_queues)
+ pr_warn("Creating KFD proc/queues folder failed");
}
out:
if (!IS_ERR(process))
@@ -440,6 +528,16 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfree(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
+ /*
+ * before destroying pdd, make sure to report availability
+ * for auto suspend
+ */
+ if (pdd->runtime_inuse) {
+ pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
+ pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
+ pdd->runtime_inuse = false;
+ }
+
kfree(pdd);
}
}
@@ -457,6 +555,9 @@ static void kfd_process_wq_release(struct work_struct *work)
/* Remove the procfs files */
if (p->kobj) {
sysfs_remove_file(p->kobj, &p->attr_pasid);
+ kobject_del(p->kobj_queues);
+ kobject_put(p->kobj_queues);
+ p->kobj_queues = NULL;
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@@ -540,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
+ /* Signal the eviction fence after user mode queues are
+ * destroyed. This allows any BOs to be freed without
+ * triggering pointless evictions or waiting for fences.
+ */
+ dma_fence_signal(p->ef);
mutex_unlock(&p->mutex);
@@ -591,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
+ | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
@@ -754,6 +861,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
+ pdd->runtime_inuse = false;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@@ -843,15 +951,41 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
+ /*
+ * signal runtime-pm system to auto resume and prevent
+ * further runtime suspend once device pdd is created until
+ * pdd is destroyed.
+ */
+ if (!pdd->runtime_inuse) {
+ err = pm_runtime_get_sync(dev->ddev->dev);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
err = kfd_iommu_bind_process_to_device(pdd);
if (err)
- return ERR_PTR(err);
+ goto out;
err = kfd_process_device_init_vm(pdd, NULL);
if (err)
- return ERR_PTR(err);
+ goto out;
+
+ /*
+ * make sure that runtime_usage counter is incremented just once
+ * per pdd
+ */
+ pdd->runtime_inuse = true;
return pdd;
+
+out:
+ /* balance runpm reference count and exit with error */
+ if (!pdd->runtime_inuse) {
+ pm_runtime_mark_last_busy(dev->ddev->dev);
+ pm_runtime_put_autosuspend(dev->ddev->dev);
+ }
+
+ return ERR_PTR(err);
}
struct kfd_process_device *kfd_get_first_process_device_data(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 31fcd1b51f00..084c35f55d59 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -241,23 +241,18 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
- if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
- >= get_num_sdma_queues(dev->dqm)) ||
- (type == KFD_QUEUE_TYPE_SDMA_XGMI &&
- dev->dqm->xgmi_sdma_queue_count
- >= get_num_xgmi_sdma_queues(dev->dqm))) {
- pr_debug("Over-subscription is not allowed for SDMA.\n");
- retval = -EPERM;
- goto err_create_queue;
- }
-
+ /* SDMA queues are always allocated statically no matter
+ * which scheduler mode is used. We also do not need to
+ * check whether a SDMA queue can be allocated here, because
+ * allocate_sdma_queue() in create_queue() has the
+ * corresponding check logic.
+ */
retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
- pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
@@ -266,7 +261,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((dev->dqm->sched_policy ==
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
- (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+ (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
@@ -278,7 +273,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
- pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -299,7 +293,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
@@ -322,12 +316,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (q) {
pr_debug("PQM done creating queue\n");
+ kfd_procfs_add_queue(q);
print_queue_properties(&q->properties);
}
return retval;
err_create_queue:
+ uninit_queue(q);
+ if (kq)
+ kernel_queue_uninit(kq, false);
kfree(pqn);
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
@@ -378,6 +376,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
}
if (pqn->q) {
+ kfd_procfs_del_queue(pqn->q);
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 203c823d65f1..aa0bfa78a667 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -490,6 +490,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_queues_per_engine);
sysfs_show_32bit_prop(buffer, "num_cp_queues",
dev->node_props.num_cp_queues);
+ sysfs_show_64bit_prop(buffer, "unique_id",
+ dev->node_props.unique_id);
if (dev->gpu) {
log_max_watch_addr =
@@ -1318,7 +1320,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
- dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
+ dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
+ dev->node_props.unique_id = gpu->unique_id;
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 74e9b1682af8..46eeecaf1b68 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -54,6 +54,7 @@
struct kfd_node_properties {
uint64_t hive_id;
+ uint64_t unique_id;
uint32_t cpu_cores_count;
uint32_t simd_count;
uint32_t mem_banks_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6240259b3a93..d3674d805a0a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -98,6 +98,9 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -129,9 +132,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
/* removes and deallocates the drm structures, created by the above function */
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
-
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs,
@@ -383,8 +383,8 @@ static void dm_pflip_high_irq(void *interrupt_params)
* of pageflip completion, so last_flip_vblank is the forbidden count
* for queueing new pageflips if vsync + VRR is enabled.
*/
- amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
- amdgpu_crtc->crtc_id);
+ amdgpu_crtc->last_flip_vblank =
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@ -407,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling is done here after end of front-porch in
* vrr mode, as vblank timestamping will give valid results
@@ -458,8 +459,9 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling at start of front-porch is only possible
* in non-vrr mode, as only there vblank timestamping will give
@@ -522,7 +524,7 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state),
acrtc_state->active_planes);
@@ -813,10 +815,20 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
- memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
- fw_inst_const_size);
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+ * amdgpu_ucode_init_single_fw will load dmub firmware
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
+ * will be done by dm_dmub_hw_init
+ */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ }
+
memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
fw_bss_data_size);
+
+ /* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
adev->bios_size);
@@ -835,6 +847,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb_base = adev->gmc.fb_start;
hw_params.fb_offset = adev->gmc.aper_base;
+ /* backdoor load firmware and trigger dmub running */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ hw_params.load_inst_const = true;
+
if (dmcu)
hw_params.psp_version = dmcu->psp_version;
@@ -897,7 +913,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.chip_family = adev->family;
- init_data.asic_id.pci_revision_id = adev->rev_id;
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
init_data.asic_id.vram_width = adev->gmc.vram_width;
@@ -972,7 +988,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN) {
- adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
@@ -1003,11 +1019,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
-#if defined(CONFIG_DEBUG_FS)
- if (dtn_debugfs_init(adev))
- DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
-#endif
-
DRM_DEBUG_DRIVER("KMS initialized.\n");
return 0;
@@ -1091,9 +1102,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20:
case CHIP_NAVI10:
case CHIP_NAVI14:
- case CHIP_NAVI12:
case CHIP_RENOIR:
return 0;
+ case CHIP_NAVI12:
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+ break;
case CHIP_RAVEN:
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
@@ -1204,22 +1217,21 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
- return 0;
- }
-
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
- AMDGPU_UCODE_ID_DMCUB;
- adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+ adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
- adev->dm.dmcub_fw_version);
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+ }
+
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
dmub_srv = adev->dm.dmub_srv;
@@ -1839,8 +1851,63 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
};
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+ u32 max_cll, min_cll, max, min, q, r;
+ struct amdgpu_dm_backlight_caps *caps;
+ struct amdgpu_display_manager *dm;
+ struct drm_connector *conn_base;
+ struct amdgpu_device *adev;
+ static const u8 pre_computed_values[] = {
+ 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
+ 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
+
+ if (!aconnector || !aconnector->dc_link)
+ return;
+
+ conn_base = &aconnector->base;
+ adev = conn_base->dev->dev_private;
+ dm = &adev->dm;
+ caps = &dm->backlight_caps;
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ caps->aux_support = false;
+ max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+ min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
+
+ if (caps->ext_caps->bits.oled == 1 ||
+ caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+ caps->aux_support = true;
+
+ /* From the specification (CTA-861-G), for calculating the maximum
+ * luminance we need to use:
+ * Luminance = 50*2**(CV/32)
+ * Where CV is a one-byte value.
+ * For calculating this expression we may need float point precision;
+ * to avoid this complexity level, we take advantage that CV is divided
+ * by a constant. From the Euclids division algorithm, we know that CV
+ * can be written as: CV = 32*q + r. Next, we replace CV in the
+ * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
+ * need to pre-compute the value of r/32. For pre-computing the values
+ * We just used the following Ruby line:
+ * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
+ * The results of the above expressions can be verified at
+ * pre_computed_values.
+ */
+ q = max_cll >> 5;
+ r = max_cll % 32;
+ max = (1 << q) * pre_computed_values[r];
+
+ // min luminance: maxLum * (CV/255)^2 / 100
+ q = DIV_ROUND_CLOSEST(min_cll, 255);
+ min = max * DIV_ROUND_CLOSEST((q * q), 100);
+
+ caps->aux_max_input_signal = max;
+ caps->aux_min_input_signal = min;
+}
+
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
@@ -1953,7 +2020,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
aconnector->edid);
}
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
-
+ update_connector_ext_caps(aconnector);
} else {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
@@ -2169,10 +2236,10 @@ static void handle_hpd_rx_irq(void *param)
}
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
- if (adev->dm.hdcp_workqueue)
- hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
- }
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
@@ -2567,6 +2634,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@ -2581,9 +2649,11 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
if (caps.caps_valid) {
+ dm->backlight_caps.caps_valid = true;
+ if (caps.aux_support)
+ return;
dm->backlight_caps.min_input_signal = caps.min_input_signal;
dm->backlight_caps.max_input_signal = caps.max_input_signal;
- dm->backlight_caps.caps_valid = true;
} else {
dm->backlight_caps.min_input_signal =
AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
@@ -2591,40 +2661,95 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
}
#else
+ if (dm->backlight_caps.aux_support)
+ return;
+
dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
#endif
}
+static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
+{
+ bool rc;
+
+ if (!link)
+ return 1;
+
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+
+ return rc ? 0 : 1;
+}
+
+static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ const uint32_t user_brightness)
+{
+ u32 min, max, conversion_pace;
+ u32 brightness = user_brightness;
+
+ if (!caps)
+ goto out;
+
+ if (!caps->aux_support) {
+ max = caps->max_input_signal;
+ min = caps->min_input_signal;
+ /*
+ * The brightness input is in the range 0-255
+ * It needs to be rescaled to be between the
+ * requested min and max input signal
+ * It also needs to be scaled up by 0x101 to
+ * match the DC interface which has a range of
+ * 0 to 0xffff
+ */
+ conversion_pace = 0x101;
+ brightness =
+ user_brightness
+ * conversion_pace
+ * (max - min)
+ / AMDGPU_MAX_BL_LEVEL
+ + min * conversion_pace;
+ } else {
+ /* TODO
+ * We are doing a linear interpolation here, which is OK but
+ * does not provide the optimal result. We probably want
+ * something close to the Perceptual Quantizer (PQ) curve.
+ */
+ max = caps->aux_max_input_signal;
+ min = caps->aux_min_input_signal;
+
+ brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
+ + user_brightness * max;
+ // Multiple the value by 1000 since we use millinits
+ brightness *= 1000;
+ brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+ }
+
+out:
+ return brightness;
+}
+
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
struct amdgpu_dm_backlight_caps caps;
- uint32_t brightness = bd->props.brightness;
+ struct dc_link *link = NULL;
+ u32 brightness;
+ bool rc;
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
- /*
- * The brightness input is in the range 0-255
- * It needs to be rescaled to be between the
- * requested min and max input signal
- *
- * It also needs to be scaled up by 0x101 to
- * match the DC interface which has a range of
- * 0 to 0xffff
- */
- brightness =
- brightness
- * 0x101
- * (caps.max_input_signal - caps.min_input_signal)
- / AMDGPU_MAX_BL_LEVEL
- + caps.min_input_signal * 0x101;
-
- if (dc_link_set_backlight_level(dm->backlight_link,
- brightness, 0))
- return 0;
- else
- return 1;
+
+ link = (struct dc_link *)dm->backlight_link;
+
+ brightness = convert_brightness(&caps, bd->props.brightness);
+ // Change brightness based on AUX property
+ if (caps.aux_support)
+ return set_backlight_via_aux(link, brightness);
+
+ rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+
+ return rc ? 0 : 1;
}
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -2909,6 +3034,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ /* No userspace support. */
+ dm->dc->debug.disable_tri_buf = true;
+
return 0;
fail:
kfree(aencoder);
@@ -4200,9 +4328,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct dmcu *dmcu = core_dc->res_pool->dmcu;
stream->psr_version = dmcu->dmcu_version.psr_version;
- mod_build_vsc_infopacket(stream,
- &stream->vsc_infopacket,
- &stream->use_vsc_sdp_for_colorimetry);
+
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = false;
+ if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ stream->use_vsc_sdp_for_colorimetry =
+ aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
+ } else {
+ if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ stream->use_vsc_sdp_for_colorimetry = true;
+ }
+ }
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
}
}
finish:
@@ -4352,8 +4493,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static enum drm_connector_status
@@ -4574,6 +4717,19 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
return &new_state->base;
}
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+ return 0;
+}
+
static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.reset = amdgpu_dm_connector_funcs_reset,
.detect = amdgpu_dm_connector_detect,
@@ -4583,6 +4739,7 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_connector_late_register,
.early_unregister = amdgpu_dm_connector_unregister
};
@@ -4959,7 +5116,8 @@ static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
.disable = dm_crtc_helper_disable,
.atomic_check = dm_crtc_helper_atomic_check,
- .mode_fixup = dm_crtc_helper_mode_fixup
+ .mode_fixup = dm_crtc_helper_mode_fixup,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static void dm_encoder_helper_disable(struct drm_encoder *encoder)
@@ -5922,13 +6080,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
- drm_connector_register(&aconnector->base);
-#if defined(CONFIG_DEBUG_FS)
- connector_debugfs_init(aconnector);
- aconnector->debugfs_dpcd_address = 0;
- aconnector->debugfs_dpcd_size = 0;
-#endif
-
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector);
@@ -6414,7 +6565,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
- bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -6460,9 +6610,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane = dm_new_plane_state->dc_state;
- if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
- swizzle = false;
-
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -6563,7 +6710,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc.
*/
- last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
}
else {
/* For variable refresh rate mode only:
@@ -6592,7 +6739,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(target_vblank -
- amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
usleep_range(1000, 1100);
}
@@ -6671,8 +6818,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_feature_enabled &&
- !acrtc_state->stream->link->psr_allow_active &&
- swizzle) {
+ !acrtc_state->stream->link->psr_allow_active) {
amdgpu_dm_psr_enable(acrtc_state->stream);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 7ea9acb0358d..5cab3e65d992 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -90,15 +90,41 @@ struct dm_comressor_info {
};
/**
- * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
- * @min_input_signal: minimum possible input in range 0-255
- * @max_input_signal: maximum possible input in range 0-255
- * @caps_valid: true if these values are from the ACPI interface
+ * struct amdgpu_dm_backlight_caps - Information about backlight
+ *
+ * Describe the backlight support for ACPI or eDP AUX.
*/
struct amdgpu_dm_backlight_caps {
+ /**
+ * @ext_caps: Keep the data struct with all the information about the
+ * display support for HDR.
+ */
+ union dpcd_sink_ext_caps *ext_caps;
+ /**
+ * @aux_min_input_signal: Min brightness value supported by the display
+ */
+ u32 aux_min_input_signal;
+ /**
+ * @aux_max_input_signal: Max brightness value supported by the display
+ * in nits.
+ */
+ u32 aux_max_input_signal;
+ /**
+ * @min_input_signal: minimum possible input in range 0-255.
+ */
int min_input_signal;
+ /**
+ * @max_input_signal: maximum possible input in range 0-255.
+ */
int max_input_signal;
+ /**
+ * @caps_valid: true if these values are from the ACPI interface.
+ */
bool caps_valid;
+ /**
+ * @aux_support: Describes if the display supports AUX backlight.
+ */
+ bool aux_support;
};
/**
@@ -457,6 +483,9 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state);
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector);
+
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f81d3439ee8c..0461fecd68db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -32,6 +32,19 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
+#include "dmub/inc/dmub_srv.h"
+
+struct dmub_debugfs_trace_header {
+ uint32_t entry_count;
+ uint32_t reserved[3];
+};
+
+struct dmub_debugfs_trace_entry {
+ uint32_t trace_code;
+ uint32_t tick_count;
+ uint32_t param0;
+ uint32_t param1;
+};
/* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -675,6 +688,73 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return bytes_from_user;
}
+/**
+ * Returns the DMCUB tracebuffer contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
+ */
+static int dmub_tracebuffer_show(struct seq_file *m, void *data)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ struct dmub_debugfs_trace_entry *entries;
+ uint8_t *tbuf_base;
+ uint32_t tbuf_size, max_entries, num_entries, i;
+
+ if (!fb_info)
+ return 0;
+
+ tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
+ if (!tbuf_base)
+ return 0;
+
+ tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+ max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
+ sizeof(struct dmub_debugfs_trace_entry);
+
+ num_entries =
+ ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+
+ num_entries = min(num_entries, max_entries);
+
+ entries = (struct dmub_debugfs_trace_entry
+ *)(tbuf_base +
+ sizeof(struct dmub_debugfs_trace_header));
+
+ for (i = 0; i < num_entries; ++i) {
+ struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+ seq_printf(m,
+ "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+ entry->trace_code, entry->tick_count, entry->param0,
+ entry->param1);
+ }
+
+ return 0;
+}
+
+/**
+ * Returns the DMCUB firmware state contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
+ */
+static int dmub_fw_state_show(struct seq_file *m, void *data)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ uint8_t *state_base;
+ uint32_t state_size;
+
+ if (!fb_info)
+ return 0;
+
+ state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
+ if (!state_base)
+ return 0;
+
+ state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
+
+ return seq_write(m, state_base, state_size);
+}
+
/*
* Returns the current and maximum output bpc for the connector.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
@@ -880,6 +960,8 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
return read_size - r;
}
+DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
+DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(vrr_range);
@@ -1008,6 +1090,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);
+ connector->debugfs_dpcd_address = 0;
+ connector->debugfs_dpcd_size = 0;
+
}
/*
@@ -1188,5 +1273,11 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops);
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
+ adev, &dmub_tracebuffer_fops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
+ adev, &dmub_fw_state_fops);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 0acd3409dd6c..5b70ed3cdb88 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -28,6 +28,13 @@
#include "amdgpu_dm.h"
#include "dm_helpers.h"
#include <drm/drm_hdcp.h>
+#include "hdcp_psp.h"
+
+/*
+ * If the SRM version being loaded is less than or equal to the
+ * currently loaded SRM, psp will return 0xFFFF as the version
+ */
+#define PSP_SRM_VERSION_MAX 0xFFFF
static bool
lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
@@ -67,6 +74,59 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
+{
+
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+ return NULL;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return NULL;
+
+ *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
+ *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
+
+
+ return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
+}
+
+static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+{
+
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+ return -EINVAL;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
+ hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+ hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
+ return -EINVAL;
+
+ *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version;
+ return 0;
+}
+
static void process_output(struct hdcp_workqueue *hdcp_work)
{
struct mod_hdcp_output output = hdcp_work->output;
@@ -88,6 +148,18 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
}
+static void link_lock(struct hdcp_workqueue *work, bool lock)
+{
+
+ int i = 0;
+
+ for (i = 0; i < work->max_link; i++) {
+ if (lock)
+ mutex_lock(&work[i].mutex);
+ else
+ mutex_unlock(&work[i].mutex);
+ }
+}
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int link_index,
struct amdgpu_dm_connector *aconnector,
@@ -112,6 +184,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
if (enable_encryption) {
+ /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
+ * (s3 resume case)
+ */
+ if (hdcp_work->srm_size > 0)
+ psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
+ &hdcp_work->srm_version);
+
display->adjust.disable = 0;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -301,8 +380,9 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
}
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
-
}
static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -332,26 +412,170 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dig_be = config->link_enc_inst;
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ link->dp.mst_supported = config->mst_supported;
display->adjust.disable = 1;
link->adjust.auth_delay = 2;
hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
}
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+
+/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+ * will automatically call once or twice depending on the size
+ *
+ * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
+ *
+ * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096),
+ * srm_data_write can be called multiple times.
+ *
+ * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
+ * the last call we will send the full SRM. PSP will fail on every call before the last.
+ *
+ * This means we don't know if the SRM is good until the last call. And because of this limitation we
+ * cannot throw errors early as it will stop the kernel from writing to sysfs
+ *
+ * Example 1:
+ * Good SRM size = 5096
+ * first call to write 4096 -> PSP fails
+ * Second call to write 1000 -> PSP Pass -> SRM is set
+ *
+ * Example 2:
+ * Bad SRM size = 4096
+ * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ * is the last call)
+ *
+ * Solution?:
+ * 1: Parse the SRM? -> It is signed so we don't know the EOF
+ * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ * below
+ *
+ * Easy Solution:
+ * Always call get after Set to verify if set was successful.
+ * +----------------------+
+ * | Why it works: |
+ * +----------------------+
+ * PSP will only update its srm if its older than the one we are trying to load.
+ * Always do set first than get.
+ * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ * version and save it
+ *
+ * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ * same(newer) version back and save it
+ *
+ * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ * incorrect/corrupted and we should correct our SRM by getting it from PSP
+ */
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct hdcp_workqueue *work;
+ uint32_t srm_version = 0;
+
+ work = container_of(bin_attr, struct hdcp_workqueue, attr);
+ link_lock(work, true);
+
+ memcpy(work->srm_temp + pos, buffer, count);
+
+ if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
+ DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
+ memcpy(work->srm, work->srm_temp, pos + count);
+ work->srm_size = pos + count;
+ work->srm_version = srm_version;
+ }
+
+
+ link_lock(work, false);
+
+ return count;
+}
+
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct hdcp_workqueue *work;
+ uint8_t *srm = NULL;
+ uint32_t srm_version;
+ uint32_t srm_size;
+ size_t ret = count;
+
+ work = container_of(bin_attr, struct hdcp_workqueue, attr);
+
+ link_lock(work, true);
+
+ srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
+
+ if (!srm)
+ return -EINVAL;
+
+ if (pos >= srm_size)
+ ret = 0;
+
+ if (srm_size - pos < count) {
+ memcpy(buffer, srm + pos, srm_size - pos);
+ ret = srm_size - pos;
+ goto ret;
+ }
+
+ memcpy(buffer, srm + pos, count);
+
+ret:
+ link_lock(work, false);
+ return ret;
+}
+
+/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
+ *
+ * For example,
+ * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ * across boot/reboots/suspend/resume/shutdown
+ *
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
+ * to make the SRM persistent.
+ *
+ * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
+ * -The kernel cannot write to the file systems.
+ * -So we need usermode to do this for us, which is why an interface for usermode is needed
+ *
+ *
+ *
+ * Usermode can read/write to/from PSP using the sysfs interface
+ * For example:
+ * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ */
+static const struct bin_attribute data_attr = {
+ .attr = {.name = "hdcp_srm", .mode = 0664},
+ .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
+ .write = srm_data_write,
+ .read = srm_data_read,
+};
+
+
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
{
int max_caps = dc->caps.max_links;
- struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ struct hdcp_workqueue *hdcp_work;
int i = 0;
+ hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
if (hdcp_work == NULL)
+ return NULL;
+
+ hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+
+ if (hdcp_work->srm == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+
+ if (hdcp_work->srm_temp == NULL)
goto fail_alloc_context;
hdcp_work->max_link = max_caps;
for (i = 0; i < max_caps; i++) {
-
mutex_init(&hdcp_work[i].mutex);
INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
@@ -360,7 +584,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
@@ -371,9 +595,17 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
cp_psp->funcs.update_stream_config = update_config;
cp_psp->handle = hdcp_work;
+ /* File created at /sys/class/drm/card0/device/hdcp_srm*/
+ hdcp_work[0].attr = data_attr;
+
+ if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
+ DRM_WARN("Failed to create device file hdcp_srm");
+
return hdcp_work;
fail_alloc_context:
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
return NULL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 6abde86bce4a..5159b3a5e5b0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -30,6 +30,7 @@
#include "hdcp.h"
#include "dc.h"
#include "dm_cp_psp.h"
+#include "amdgpu.h"
struct mod_hdcp;
struct mod_hdcp_link;
@@ -52,6 +53,12 @@ struct hdcp_workqueue {
enum mod_hdcp_encryption_status encryption_status;
uint8_t max_link;
+
+ uint8_t *srm;
+ uint8_t *srm_temp;
+ uint32_t srm_version;
+ uint32_t srm_size;
+ struct bin_attribute attr;
};
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
@@ -64,6 +71,6 @@ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_destroy(struct hdcp_workqueue *work);
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 318b474ff20e..c20fb08c450b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
- return false;
+ DRM_ERROR("Failed to find connector for link!");
+ return false;
}
if (boot) {
@@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
- return;
+ DRM_ERROR("Failed to find connector for link!");
+ return;
}
DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c(
bool result;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link)
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- BUG_ON("Failed to found connector for link!");
+ BUG_ON("Failed to find connector for link!");
return true;
}
@@ -580,6 +580,20 @@ enum dc_edid_status dm_helpers_read_local_edid(
/* We don't need the original edid anymore */
kfree(edid);
+ /* connector->display_info will be parsed from EDID and saved
+ * into drm_connector->display_info from edid by call stack
+ * below:
+ * drm_parse_ycbcr420_deep_color_info
+ * drm_parse_hdmi_forum_vsdb
+ * drm_parse_cea_ext
+ * drm_add_display_info
+ * drm_connector_update_edid_property
+ *
+ * drm_connector->display_info will be used by amdgpu_dm funcs,
+ * like fill_stream_properties_from_drm_display_mode
+ */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
edid_status = dm_helpers_parse_edid_caps(
ctx,
&sink->dc_edid,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index da73161043d5..e8208df420d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -154,15 +154,18 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+ int r;
+
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
- amdgpu_dm_connector->debugfs_dpcd_address = 0;
- amdgpu_dm_connector->debugfs_dpcd_size = 0;
#endif
- return drm_dp_mst_connector_late_register(connector, port);
+ return r;
}
static void
@@ -204,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
dsc_caps, NULL,
- &dc_sink->sink_dsc_caps.dsc_dec_caps))
+ &dc_sink->dsc_caps.dsc_dec_caps))
return false;
return true;
@@ -259,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!validate_dsc_caps_on_connector(aconnector))
- memset(&aconnector->dc_sink->sink_dsc_caps,
- 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+ memset(&aconnector->dc_sink->dsc_caps,
+ 0, sizeof(aconnector->dc_sink->dsc_caps));
#endif
}
}
@@ -437,9 +440,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
- struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -455,39 +455,22 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
}
drm_connector_unregister(connector);
- if (adev->mode_info.rfbdev)
- drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
drm_connector_put(connector);
}
-static void dm_dp_mst_register_connector(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- if (adev->mode_info.rfbdev)
- drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
- else
- DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
-
- drm_connector_register(connector);
-}
-
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.destroy_connector = dm_dp_destroy_mst_connector,
- .register_connector = dm_dp_mst_register_connector
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
aconnector->dm_dp_aux.aux.name = "dmdc";
- aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
- drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
@@ -548,7 +531,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
- &params[i].sink->sink_dsc_caps.dsc_dec_caps,
+ &params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
0,
params[i].timing,
@@ -569,7 +552,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
- &param.sink->sink_dsc_caps.dsc_dec_caps,
+ &param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
(int) kbps, param.timing, &dsc_config);
@@ -766,14 +749,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].port = aconnector->port;
- params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+ params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
dsc_policy.min_target_bpp,
dsc_policy.max_target_bpp,
- &stream->sink->sink_dsc_caps.dsc_dec_caps,
+ &stream->sink->dsc_caps.dsc_dec_caps,
&stream->timing, &params[count].bw_range))
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
@@ -855,7 +838,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (!aconnector || !aconnector->dc_sink)
continue;
- if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+ if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
continue;
if (computed_streams[i])
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 2f1c9584ac32..37fa7b48250e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
- /* fall through */
+ fallthrough;
case OBJECT_TYPE_CONNECTOR:
case OBJECT_TYPE_GENERIC:
/* Both Generic and Connector Object ID
@@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
- /* fall through */
+ fallthrough;
default:
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index c4ba6e84db65..8edc2506d49e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -221,8 +221,8 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev;
- if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
- BREAK_TO_DEBUGGER();
+ BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
+
switch (crev) {
case 6:
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7388c987c595..204d7942a6e5 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -53,25 +53,18 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
+ case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
-#endif
-
case DCN_VERSION_2_0:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
case DCN_VERSION_2_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
- case DCE_VERSION_12_0:
- case DCE_VERSION_12_1:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
+#endif
default:
/* Unsupported DCE */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 5d081c42e81b..2c6db379afae 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx,
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[0].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[4], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[1].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[5], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[2].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[6], bw_int_to_fixed(1000)));
- if (ctx->dc->caps.max_slave_planes) {
- calcs_output->stutter_entry_wm_ns[3].b_mark =
+ calcs_output->stutter_entry_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[0], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[4].b_mark =
+ stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[1].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[1], bw_int_to_fixed(1000)));
- } else {
- calcs_output->stutter_entry_wm_ns[3].b_mark =
+ stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[2].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[7], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[4].b_mark =
+ stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_entry_wm_ns[5].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[8], bw_int_to_fixed(1000)));
- }
- calcs_output->stutter_entry_wm_ns[5].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[9], bw_int_to_fixed(1000)));
+ stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 1a37550731de..3960a8db94cb 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -703,11 +703,24 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
}
-unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
+unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
{
- /* for dali & pollock, the highest voltage level we want is 0 */
- if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
- return 0;
+ /* for low power RV2 variants, the highest voltage level we want is 0 */
+ if (ASICREV_IS_RAVEN2(hw_internal_rev))
+ switch (pci_revision_id) {
+ case PRID_DALI_DE:
+ case PRID_DALI_DF:
+ case PRID_DALI_E3:
+ case PRID_DALI_E4:
+ case PRID_POLLOCK_94:
+ case PRID_POLLOCK_95:
+ case PRID_POLLOCK_E9:
+ case PRID_POLLOCK_EA:
+ case PRID_POLLOCK_EB:
+ return 0;
+ default:
+ break;
+ }
/* we are ok with all levels */
return 4;
@@ -1277,7 +1290,9 @@ bool dcn_validate_bandwidth(
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
- if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev))
+ if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
+ dc->ctx->asic_id.hw_internal_rev,
+ dc->ctx->asic_id.pci_revision_id))
return true;
else
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a78e5c74c79c..8ec2dfe45d40 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -63,6 +63,25 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+int clk_mgr_helper_get_active_plane_cnt(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, total_plane_count;
+
+ total_plane_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_status stream_status = context->stream_status[i];
+
+ /*
+ * Sum up plane_count for all streams ( active and virtual ).
+ */
+ total_plane_count += stream_status.plane_count;
+ }
+
+ return total_plane_count;
+}
+
void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
{
struct dc_link *edp_link = get_edp_link(dc);
@@ -134,13 +153,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
- if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
- ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
- /* TEMP: this check has to come before ASICREV_IS_RENOIR */
- /* which also incorrectly returns true for Dali/Pollock*/
- rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
- break;
- }
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 49ce46b543ea..55d09adbf0d9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -115,12 +115,11 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
- if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) {
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
- }
}
}
@@ -158,6 +157,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
+ bool p_state_change_support;
+ int total_plane_count;
if (dc->work_arounds.skip_clock_update)
return;
@@ -213,9 +214,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000);
}
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 9ef3f7b91a1d..ab267ddd4abe 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -46,6 +46,7 @@
/* Constants */
#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
+#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */
/* Macros */
@@ -405,7 +406,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
}
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
{
int i, num_valid_sets;
@@ -465,16 +466,15 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
- struct pp_smu_wm_range_sets ranges = {0};
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
if (!debug->disable_pplib_wm_range) {
- build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+ build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges);
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
- pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges);
}
}
@@ -504,7 +504,7 @@ static struct clk_mgr_funcs dcn21_funcs = {
.notify_wm_ranges = rn_notify_wm_ranges
};
-struct clk_bw_params rn_bw_params = {
+static struct clk_bw_params rn_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.clk_table = {
@@ -544,7 +544,7 @@ struct clk_bw_params rn_bw_params = {
};
-struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
@@ -581,7 +581,7 @@ struct wm_table ddr4_wm_table = {
}
};
-struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
@@ -721,6 +721,13 @@ void rn_clk_mgr_construct(
} else {
struct clk_log_info log_info = {0};
+ clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
+
+ /* SMU Version 55.51.0 and up no longer have an issue
+ * that needs to limit minimum dispclk */
+ if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
+ debug->min_disp_clk_khz = 0;
+
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04441dbcba76..2ffb22177df9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -701,7 +701,7 @@ static bool dc_construct(struct dc *dc,
dc_ctx->created_bios = true;
}
-
+ dc->vendor_signature = init_params->vendor_signature;
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
@@ -761,6 +761,28 @@ static bool disable_all_writeback_pipes_for_stream(
return true;
}
+void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+{
+ int i = 0;
+
+ /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
+ if (dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, lock);
+ else {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Copied conditions that were previously in dce110_apply_ctx_for_surface
+ if (stream == pipe_ctx->stream) {
+ if (!pipe_ctx->top_pipe &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -786,11 +808,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (dc->hwss.apply_ctx_for_surface)
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
}
- if (dc->hwss.program_front_end_for_ctx)
- dc->hwss.program_front_end_for_ctx(dc, dangling_context);
}
current_ctx = dc->current_state;
@@ -1210,16 +1241,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mode_changed)
continue;
-
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
}
+ }
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1238,19 +1272,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
- if (dc->hwss.program_front_end_for_ctx)
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context);
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
/*
* enable stereo
@@ -1318,18 +1360,12 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
-bool dc_is_hw_initialized(struct dc *dc)
-{
- struct dc_bios *dcb = dc->ctx->dc_bios;
- return dcb->funcs->is_accelerated_mode(dcb);
-}
-
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
struct dc_state *context = dc->current_state;
- if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
+ if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0)
return true;
post_surface_trace(dc);
@@ -1341,9 +1377,11 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+ dc->hwss.optimize_bandwidth(dc, context);
+
dc->optimized_required = false;
+ dc->wm_optimized_required = false;
- dc->hwss.optimize_bandwidth(dc, context);
return true;
}
@@ -1734,14 +1772,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->wb_update)
su_flags->bits.wb_update = 1;
+
+ if (stream_update->dsc_config)
+ su_flags->bits.dsc_changed = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1;
-
- if (stream_update->dsc_config)
- overall_type = UPDATE_TYPE_FULL;
}
for (i = 0 ; i < surface_count; i++) {
@@ -1776,8 +1815,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
if (type == UPDATE_TYPE_FULL) {
- if (stream_update)
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
@@ -1790,7 +1832,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
// Else we fallback to mem compare.
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
- }
+ } else if (dc->wm_optimized_required)
+ dc->optimized_required = true;
}
return type;
@@ -1829,6 +1872,8 @@ static void copy_surface_update_to_plane(
surface->time.index++;
if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
surface->time.index = 0;
+
+ surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
}
if (srf_update->scaling_info) {
@@ -2093,18 +2138,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
- if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
- dp_update_dsc_config(pipe_ctx);
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
- }
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
- if (stream_update->dpms_off) {
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (stream_update->dsc_config)
+ dp_update_dsc_config(pipe_ctx);
+ if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx);
/* for dpms, keep acquired resources*/
@@ -2118,8 +2159,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
core_link_enable_stream(dc->current_state, pipe_ctx);
}
-
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -2175,6 +2214,32 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream &&
+ pipe_ctx->stream == stream) {
+ top_pipe_to_program = pipe_ctx;
+ }
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, true);
+ else
+ /* Lock the top pipe while updating plane addrs, since freesync requires
+ * plane addr update event triggers to be synchronized.
+ * top_pipe_to_program is expected to never be NULL
+ */
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+
+
// Stream updates
if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -2189,6 +2254,12 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+
+ dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -2224,8 +2295,6 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->stream == stream) {
struct dc_stream_status *stream_status = NULL;
- top_pipe_to_program = pipe_ctx;
-
if (!pipe_ctx->plane_state)
continue;
@@ -2270,12 +2339,6 @@ static void commit_planes_for_stream(struct dc *dc,
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
- /* Lock the top pipe while updating plane addrs, since freesync requires
- * plane addr update event triggers to be synchronized.
- * top_pipe_to_program is expected to never be NULL
- */
- dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
-
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2317,9 +2380,30 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
+ }
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
- }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+ top_pipe_to_program->stream_res.tg);
+ }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ dc->hwss.post_unlock_program_front_end(dc, context);
// Fire manual trigger only when bottom plane is flipped
for (j = 0; j < dc->res_pool->pipe_count; j++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a09119c10d7c..67cfff1586e9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,7 +45,7 @@
#include "dpcd_defs.h"
#include "dmcu.h"
#include "hw/clk_mgr.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
#define DC_LOGGER_INIT(logger)
@@ -585,20 +585,23 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
}
-static bool detect_dp(
- struct dc_link *link,
- struct display_sink_capability *sink_caps,
- bool *converter_disable_audio,
- struct audio_support *audio_support,
- enum dc_detect_reason reason)
+static bool detect_dp(struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ bool *converter_disable_audio,
+ struct audio_support *audio_support,
+ enum dc_detect_reason reason)
{
bool boot = false;
+
sink_caps->signal = link_detect_sink(link, reason);
sink_caps->transaction_type =
get_ddc_transaction_type(sink_caps->signal);
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ dpcd_set_source_specific_data(link);
+
if (!detect_dp_sink_caps(link))
return false;
@@ -606,9 +609,8 @@ static bool detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
- dal_ddc_service_set_transaction_type(
- link->ddc,
- sink_caps->transaction_type);
+ dal_ddc_service_set_transaction_type(link->ddc,
+ sink_caps->transaction_type);
/*
* This call will initiate MST topology discovery. Which
@@ -637,13 +639,10 @@ static bool detect_dp(
if (reason == DETECT_REASON_BOOT)
boot = true;
- dm_helpers_dp_update_branch_info(
- link->ctx,
- link);
+ dm_helpers_dp_update_branch_info(link->ctx, link);
- if (!dm_helpers_dp_mst_start_top_mgr(
- link->ctx,
- link, boot)) {
+ if (!dm_helpers_dp_mst_start_top_mgr(link->ctx,
+ link, boot)) {
/* MST not supported */
link->type = dc_connection_single;
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
@@ -651,7 +650,7 @@ static bool detect_dp(
}
if (link->type != dc_connection_mst_branch &&
- is_dp_active_dongle(link)) {
+ is_dp_active_dongle(link)) {
/* DP active dongles */
link->type = dc_connection_active_dongle;
if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
@@ -662,14 +661,15 @@ static bool detect_dp(
return true;
}
- if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ if (link->dpcd_caps.dongle_type !=
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER)
*converter_disable_audio = true;
}
} else {
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
- sink_caps,
- audio_support);
+ sink_caps,
+ audio_support);
}
return true;
@@ -769,8 +769,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
link->connector_signal == SIGNAL_TYPE_EDP) &&
- link->local_sink)
+ link->local_sink) {
+
+ // need to re-write OUI and brightness in resume case
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ dpcd_set_source_specific_data(link);
+ dc_link_set_default_brightness_aux(link); //TODO: use cached
+ }
+
return true;
+ }
if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
@@ -818,6 +826,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_EDP: {
+ read_current_link_settings_on_detect(link);
+
+ dpcd_set_source_specific_data(link);
+
detect_edp_sink_caps(link);
read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -958,10 +970,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
}
+ if (link->local_sink->edid_caps.panel_patch.disable_fec)
+ link->ctx->dc->debug.disable_fec = true;
+
// Check if edid is the same
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+ if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ link->ctx->dc->debug.hdmi20_disable = true;
+
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
@@ -1480,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
}
}
-static enum dc_status enable_link_dp(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
+static enum dc_status enable_link_dp(struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
@@ -1492,6 +1509,7 @@ static enum dc_status enable_link_dp(
bool fec_enable;
int i;
bool apply_seamless_boot_optimization = false;
+ uint32_t bl_oled_enable_delay = 50; // in ms
// check for seamless boot
for (i = 0; i < state->stream_count; i++) {
@@ -1513,31 +1531,45 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
if (state->clk_mgr && !apply_seamless_boot_optimization)
- state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+ state, false);
+
+ // during mode switch we do DP_SET_POWER off then on, and OUI is lost
+ dpcd_set_source_specific_data(link);
skip_video_pattern = true;
if (link_settings.link_rate == LINK_RATE_LOW)
- skip_video_pattern = false;
-
- if (perform_link_training_with_retries(
- &link_settings,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- pipe_ctx,
- pipe_ctx->stream->signal)) {
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(&link_settings,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal)) {
link->cur_link_settings = link_settings;
status = DC_OK;
- }
- else
+ } else {
status = DC_FAIL_DP_LINK_TRAINING;
+ }
- if (link->preferred_training_settings.fec_enable != NULL)
+ if (link->preferred_training_settings.fec_enable)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
dp_set_fec_enable(link, fec_enable);
+
+ // during mode set we do DP_SET_POWER off then on, aux writes are lost
+ if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+ dc_link_set_default_brightness_aux(link); // TODO: use cached if known
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ dc_link_backlight_enable_aux(link, true);
+ }
+
return status;
}
@@ -1733,8 +1765,7 @@ static void write_i2c_retimer_setting(
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1752,8 +1783,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1765,8 +1795,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1786,8 +1815,7 @@ static void write_i2c_retimer_setting(
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1805,8 +1833,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1818,8 +1845,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1837,8 +1863,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1849,8 +1874,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1861,10 +1885,14 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set retimer failed");
}
static void write_i2c_default_retimer_setting(
@@ -1889,8 +1917,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1901,8 +1928,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0B to 0xDA or 0xD8 */
buffer[0] = 0x0B;
@@ -1913,8 +1939,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1925,8 +1950,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0C to 0x1D or 0x91 */
buffer[0] = 0x0C;
@@ -1937,8 +1961,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1949,8 +1972,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
if (is_vga_mode) {
@@ -1965,8 +1987,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1977,8 +1998,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1989,9 +2009,13 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set default retimer failed");
}
static void write_i2c_redriver_setting(
@@ -2020,8 +2044,7 @@ static void write_i2c_redriver_setting(
slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ DC_LOG_DEBUG("Set redriver failed");
}
static void disable_link(struct dc_link *link, enum signal_type signal)
@@ -2400,8 +2423,8 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if ((psr != NULL) && link->psr_feature_enabled)
- psr->funcs->set_psr_enable(psr, allow_active);
+ if (psr != NULL && link->psr_feature_enabled)
+ psr->funcs->psr_enable(psr, allow_active);
else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
@@ -2417,7 +2440,7 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
struct dmub_psr *psr = dc->res_pool->psr;
if (psr != NULL && link->psr_feature_enabled)
- psr->funcs->get_psr_state(psr_state);
+ psr->funcs->psr_get_state(psr, psr_state);
else if (dmcu != NULL && link->psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
@@ -2589,7 +2612,7 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->frame_delay = 0;
if (psr)
- link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+ link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
else
link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
@@ -2922,10 +2945,13 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
memset(&config, 0, sizeof(config));
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ /*stream_enc_inst*/
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ config.mst_supported = (pipe_ctx->stream->signal ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
}
@@ -3061,6 +3087,9 @@ void core_link_enable_stream(
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
+ if (stream->sink_patches.delay_ignore_msa > 0)
+ msleep(stream->sink_patches.delay_ignore_msa);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_HDCP)
@@ -3373,7 +3402,7 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
- if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
/* Account for FEC overhead.
* We have to do it based on caps,
* and not based on FEC being set ready,
@@ -3417,3 +3446,11 @@ void dc_link_overwrite_extended_receiver_cap(
dp_overwrite_extended_receiver_cap(link);
}
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+ return (dc_is_dp_signal(link->connector_signal) &&
+ link->link_enc->features.fec_supported &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+ !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index a49c10d5df26..256889eed93e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -126,22 +126,16 @@ struct aux_payloads {
struct vector payloads;
};
-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+static bool dal_ddc_i2c_payloads_create(
+ struct dc_context *ctx,
+ struct i2c_payloads *payloads,
+ uint32_t count)
{
- struct i2c_payloads *payloads;
-
- payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
-
- if (!payloads)
- return NULL;
-
if (dal_vector_construct(
&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
- return payloads;
-
- kfree(payloads);
- return NULL;
+ return true;
+ return false;
}
static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
@@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
return p->payloads.count;
}
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
{
- if (!p || !*p)
+ if (!p)
return;
- dal_vector_destruct(&(*p)->payloads);
- kfree(*p);
- *p = NULL;
+ dal_vector_destruct(&p->payloads);
}
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
@@ -524,9 +516,13 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads;
+
if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
return false;
+ if (!payloads_num)
+ return false;
+
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
@@ -557,23 +553,25 @@ bool dal_ddc_service_query_ddc_data(
ret = dal_ddc_submit_aux_command(ddc, &payload);
}
} else {
- struct i2c_payloads *payloads =
- dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+ struct i2c_command command = {0};
+ struct i2c_payloads payloads;
- struct i2c_command command = {
- .payloads = dal_ddc_i2c_payloads_get(payloads),
- .number_of_payloads = 0,
- .engine = DDC_I2C_COMMAND_ENGINE,
- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+ if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+ return false;
+
+ command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+ command.number_of_payloads = 0;
+ command.engine = DDC_I2C_COMMAND_ENGINE;
+ command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
dal_ddc_i2c_payloads_add(
- payloads, address, write_size, write_buf, true);
+ &payloads, address, write_size, write_buf, true);
dal_ddc_i2c_payloads_add(
- payloads, address, read_size, read_buf, false);
+ &payloads, address, read_size, read_buf, false);
command.number_of_payloads =
- dal_ddc_i2c_payloads_get_count(payloads);
+ dal_ddc_i2c_payloads_get_count(&payloads);
ret = dm_helpers_submit_i2c(
ddc->ctx,
@@ -686,6 +684,10 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
uint8_t write_buffer[2] = {0};
/*Lower than 340 Scramble bit from SCDC caps*/
+ if (ddc_service->link->local_sink &&
+ ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ return;
+
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &sink_version, sizeof(sink_version));
if (sink_version == 1) {
@@ -715,6 +717,10 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
uint8_t tmds_config = 0;
+ if (ddc_service->link->local_sink &&
+ ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ return;
+
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index fd9e69634c50..7cbb1efb4f68 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -945,6 +945,17 @@ static enum link_training_result perform_channel_equalization_sequence(
}
#define TRAINING_AUX_RD_INTERVAL 100 //us
+static void start_clock_recovery_pattern_early(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
+ __func__);
+ dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
+ dp_set_hw_lane_settings(link, lt_settings, offset);
+ udelay(400);
+}
+
static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings,
@@ -962,7 +973,8 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr = 0;
retry_count = 0;
- dp_set_hw_training_pattern(link, tr_pattern, offset);
+ if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ dp_set_hw_training_pattern(link, tr_pattern, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -1434,6 +1446,13 @@ enum link_training_result dc_link_dp_perform_link_training(
&link->preferred_training_settings,
&lt_settings);
+ /* Configure lttpr mode */
+ if (!link->is_lttpr_mode_transparent)
+ configure_lttpr_mode(link);
+
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -1445,8 +1464,6 @@ enum link_training_result dc_link_dp_perform_link_training(
dp_set_fec_ready(link, fec_enable);
if (!link->is_lttpr_mode_transparent) {
- /* Configure lttpr mode */
- configure_lttpr_mode(link);
/* 2. perform link training (set link training done
* to false is done as well)
@@ -1654,6 +1671,8 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_set_panel_mode(link, panel_mode);
/* Attempt to train with given link training settings */
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
/* Set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -1892,6 +1911,16 @@ bool dp_verify_link_cap(
/* disable PHY done possible by BIOS, will be done by driver itself */
dp_disable_link_phy(link, link->connector_signal);
+ dp_cs_id = get_clock_source_id(link);
+
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+ cur_link_setting = initial_link_settings;
+
/* Temporary Renoir-specific workaround for SWDEV-215184;
* PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
* so add extra cycle of enabling and disabling the PHY before first link training.
@@ -1902,15 +1931,6 @@ bool dp_verify_link_cap(
dp_disable_link_phy(link, link->connector_signal);
}
- dp_cs_id = get_clock_source_id(link);
-
- /* link training starts with the maximum common settings
- * supported by both sink and ASIC.
- */
- initial_link_settings = get_common_supported_link_settings(
- *known_limit_link_setting,
- max_link_cap);
- cur_link_setting = initial_link_settings;
do {
skip_video_pattern = true;
@@ -2654,9 +2674,12 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
break;
}
- test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+ if (dpcd_test_params.bits.CLR_FORMAT == 0)
+ test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
+ else
+ test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
dc_link_dp_set_test_pattern(
link,
@@ -3165,6 +3188,23 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
link->wa_flags.dp_keep_receiver_powered = false;
}
+/* Read additional sink caps defined in source specific DPCD area
+ * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
+ */
+static bool dpcd_read_sink_ext_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data;
+
+ if (!link)
+ return false;
+
+ if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
+ return false;
+
+ link->dpcd_sink_ext_caps.raw = dpcd_data;
+ return true;
+}
+
static bool retrieve_link_cap(struct dc_link *link)
{
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -3448,6 +3488,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
}
+ if (!dpcd_read_sink_ext_caps(link))
+ link->dpcd_sink_ext_caps.raw = 0;
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -3600,6 +3643,8 @@ void detect_edp_sink_caps(struct dc_link *link)
}
}
link->verified_link_cap = link->reported_link_cap;
+
+ dc_link_set_default_brightness_aux(link);
}
void dc_link_dp_enable_hpd(const struct dc_link *link)
@@ -3691,7 +3736,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *odm_pipe;
enum controller_dp_color_space controller_color_space;
int opp_cnt = 1;
- int count;
+ int offset = 0;
+ int dpg_width = width;
switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -3713,33 +3759,30 @@ static void set_crtc_test_pattern(struct dc_link *link,
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
+ dpg_width = width / opp_cnt;
+ offset = dpg_width;
- width /= opp_cnt;
+ opp->funcs->opp_set_disp_pattern_generator(opp,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
controller_test_pattern,
controller_color_space,
color_depth,
NULL,
- width,
- height);
- }
- opp->funcs->opp_set_disp_pattern_generator(opp,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- width,
- height);
- /* wait for dpg to blank pixel data with test pattern */
- for (count = 0; count < 1000; count++) {
- if (opp->funcs->dpg_is_blanked(opp))
- break;
- udelay(100);
+ dpg_width,
+ height,
+ offset);
+ offset += offset;
}
}
}
@@ -3757,11 +3800,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
else if (opp->funcs->opp_set_disp_pattern_generator) {
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
+ int dpg_width = width;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
- width /= opp_cnt;
+ dpg_width = width / opp_cnt;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
@@ -3771,16 +3815,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- width,
- height);
+ dpg_width,
+ height,
+ 0);
}
opp->funcs->opp_set_disp_pattern_generator(opp,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- width,
- height);
+ dpg_width,
+ height,
+ 0);
}
}
break;
@@ -3958,6 +4004,11 @@ bool dc_link_dp_set_test_pattern(
default:
break;
}
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable)
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+ pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
/* update MSA to requested color space */
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream->timing,
@@ -3965,9 +4016,27 @@ bool dc_link_dp_set_test_pattern(
pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+ if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
+ else
+ pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
+ resource_build_info_frame(pipe_ctx);
+ link->dc->hwss.update_info_frame(pipe_ctx);
+ }
+
/* CRTC Patterns */
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
-
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable)
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+ pipe_ctx->stream_res.tg);
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
@@ -4097,8 +4166,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
struct link_encoder *link_enc = link->link_enc;
uint8_t fec_config = 0;
- if (link->dc->debug.disable_fec ||
- IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
return;
if (link_enc->funcs->fec_set_ready &&
@@ -4133,8 +4201,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
{
struct link_encoder *link_enc = link->link_enc;
- if (link->dc->debug.disable_fec ||
- IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
return;
if (link_enc->funcs->fec_set_enable &&
@@ -4157,3 +4224,148 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
}
}
+void dpcd_set_source_specific_data(struct dc_link *link)
+{
+ const uint32_t post_oui_delay = 30; // 30ms
+
+ if (!link->dc->vendor_signature.is_valid) {
+ struct dpcd_amd_signature amd_signature;
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+ amd_signature.device_id_byte1 =
+ (uint8_t)(link->ctx->asic_id.chip_id);
+ amd_signature.device_id_byte2 =
+ (uint8_t)(link->ctx->asic_id.chip_id >> 8);
+ memset(&amd_signature.zero, 0, 4);
+ amd_signature.dce_version =
+ (uint8_t)(link->ctx->dce_version);
+ amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+
+ } else {
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ link->dc->vendor_signature.data.raw,
+ sizeof(link->dc->vendor_signature.data.raw));
+ }
+
+ // Sink may need to configure internals based on vendor, so allow some
+ // time before proceeding with possibly vendor specific transactions
+ msleep(post_oui_delay);
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ struct dpcd_source_backlight_set dpcd_backlight_set;
+ uint8_t backlight_control = isHDR ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ // OLEDs have no PWM, they can only use AUX
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ backlight_control = 1;
+
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)(&dpcd_backlight_set),
+ sizeof(dpcd_backlight_set)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_control, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ union dpcd_source_backlight_get dpcd_backlight_get;
+
+ memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+ dpcd_backlight_get.raw,
+ sizeof(union dpcd_source_backlight_get)))
+ return false;
+
+ *backlight_millinits_avg =
+ dpcd_backlight_get.bytes.backlight_millinits_avg;
+ *backlight_millinits_peak =
+ dpcd_backlight_get.bytes.backlight_millinits_peak;
+
+ /* On non-supported panels dpcd_read usually succeeds with 0 returned */
+ if (*backlight_millinits_avg == 0 ||
+ *backlight_millinits_avg > *backlight_millinits_peak)
+ return false;
+
+ return true;
+}
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable)
+{
+ uint8_t backlight_enable = enable ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
+ &backlight_enable, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+// we read default from 0x320 because we expect BIOS wrote it there
+// regular get_backlight_nit reads from panel set at 0x326
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
+{
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *) backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+
+ return true;
+}
+
+bool dc_link_set_default_brightness_aux(struct dc_link *link)
+{
+ uint32_t default_backlight;
+
+ if (link &&
+ (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+ if (!dc_link_read_default_bl_aux(link, &default_backlight))
+ default_backlight = 150000;
+ // if < 5 nits or > 5000, it might be wrong readback
+ if (default_backlight < 5000 || default_backlight > 5000000)
+ default_backlight = 150000; //
+
+ return dc_link_set_backlight_level_nits(link, true,
+ default_backlight, 0);
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index ddb855045767..51e0ee6e7695 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -153,18 +153,19 @@ bool edp_receiver_ready_T9(struct dc_link *link)
unsigned char edpRev = 0;
enum dc_status result = DC_OK;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
- if (edpRev < DP_EDP_12)
- return true;
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- do {
- sinkstatus = 1;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 0)
- break;
- if (result != DC_OK)
- break;
- udelay(100); //MAx T9
- } while (++tries < 50);
+
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ }
if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
@@ -183,21 +184,22 @@ bool edp_receiver_ready_T7(struct dc_link *link)
unsigned long long time_taken_in_ns = 0;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
- if (result == DC_OK && edpRev < DP_EDP_12)
- return true;
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- enter_timestamp = dm_get_timestamp(link->ctx);
- do {
- sinkstatus = 0;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 1)
- break;
- if (result != DC_OK)
- break;
- udelay(25);
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
- } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
+ do {
+ sinkstatus = 0;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 1)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+ }
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
@@ -429,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -533,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
DC_LOG_DSC(" ");
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a0eb9e533a61..75c7ce4c7581 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -46,12 +46,12 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
+#include "dce120/dce120_resource.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/dcn10_resource.h"
-#endif
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
-#include "dce120/dce120_resource.h"
+#endif
#define DC_LOGGER_INIT(logger)
@@ -532,6 +532,51 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+int get_num_odm_splits(struct pipe_ctx *pipe)
+{
+ int odm_split_count = 0;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ while (next_pipe) {
+ odm_split_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_split_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+ return odm_split_count;
+}
+
+static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+{
+ *split_count = get_num_odm_splits(pipe_ctx);
+ *split_idx = 0;
+ if (*split_count == 0) {
+ /*Check for mpc split*/
+ struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ (*split_idx)++;
+ (*split_count)++;
+ split_pipe = split_pipe->top_pipe;
+ }
+ split_pipe = pipe_ctx->bottom_pipe;
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ (*split_count)++;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ } else {
+ /*Get odm split index*/
+ struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+
+ while (split_pipe) {
+ (*split_idx)++;
+ split_pipe = split_pipe->prev_odm_pipe;
+ }
+ }
+}
+
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -541,16 +586,16 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
struct rect clip, dest;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- bool pri_split = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
- bool sec_split = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ int split_count = 0;
+ int split_idx = 0;
bool orthogonal_rotation, flip_y_start, flip_x_start;
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pri_split = false;
- sec_split = false;
+ split_count = 0;
+ split_idx = 0;
}
/* The actual clip is an intersection between stream
@@ -609,23 +654,32 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport.height = clip.height * surf_src.height / dest.height;
/* Handle split */
- if (pri_split || sec_split) {
+ if (split_count) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = 0;
+
if (orthogonal_rotation) {
- if (flip_y_start != pri_split)
- data->viewport.height /= 2;
- else {
- data->viewport.y += data->viewport.height / 2;
- /* Ceil offset pipe */
- data->viewport.height = (data->viewport.height + 1) / 2;
- }
+ if (flip_y_start)
+ split_idx = split_count - split_idx;
+
+ epimo = split_count - data->viewport.height % (split_count + 1);
+
+ data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->viewport.y += split_idx - epimo - 1;
+ data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
} else {
- if (flip_x_start != pri_split)
- data->viewport.width /= 2;
- else {
- data->viewport.x += data->viewport.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- }
+ if (flip_x_start)
+ split_idx = split_count - split_idx;
+
+ epimo = split_count - data->viewport.width % (split_count + 1);
+
+ data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->viewport.x += split_idx - epimo - 1;
+ data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
}
}
@@ -644,58 +698,58 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_clip = plane_state->clip_rect;
- bool pri_split = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
- bool sec_split = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
- bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
-
- pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+ bool pri_split_tb = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ bool sec_split_tb = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ int split_count = 0;
+ int split_idx = 0;
+
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
+ data->recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x)
- pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
- - stream->src.x) * stream->dst.width
+ data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
/ stream->src.width;
- pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
- stream->dst.width / stream->src.width;
- if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
- stream->dst.x + stream->dst.width)
- pipe_ctx->plane_res.scl_data.recout.width =
- stream->dst.x + stream->dst.width
- - pipe_ctx->plane_res.scl_data.recout.x;
+ data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
+ if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
+ data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
- pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+ data->recout.y = stream->dst.y;
if (stream->src.y < surf_clip.y)
- pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
- - stream->src.y) * stream->dst.height
+ data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
/ stream->src.height;
- pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
- stream->dst.height / stream->src.height;
- if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
- stream->dst.y + stream->dst.height)
- pipe_ctx->plane_res.scl_data.recout.height =
- stream->dst.y + stream->dst.height
- - pipe_ctx->plane_res.scl_data.recout.y;
+ data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
+ if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
+ data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
/* Handle h & v split, handle rotation using viewport */
- if (sec_split && top_bottom_split) {
- pipe_ctx->plane_res.scl_data.recout.y +=
- pipe_ctx->plane_res.scl_data.recout.height / 2;
+ if (sec_split_tb) {
+ data->recout.y += data->recout.height / 2;
/* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height =
- (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
- } else if (pri_split && top_bottom_split)
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else if (sec_split) {
- pipe_ctx->plane_res.scl_data.recout.x +=
- pipe_ctx->plane_res.scl_data.recout.width / 2;
- /* Ceil offset pipe */
- pipe_ctx->plane_res.scl_data.recout.width =
- (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- } else if (pri_split)
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ data->recout.height = (data->recout.height + 1) / 2;
+ } else if (pri_split_tb)
+ data->recout.height /= 2;
+ else if (split_count) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = split_count - data->recout.width % (split_count + 1);
+
+ /*no recout offset due to odm */
+ if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
+ data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->recout.x += split_idx - epimo - 1;
+ }
+ data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+ }
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -832,12 +886,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
+ int odm_idx = 0;
/*
* Need to calculate the scan direction for viewport to make adjustments
@@ -869,6 +925,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
* stream->dst.width / stream->src.width -
src.x * plane_state->dst_rect.width / src.width
* stream->dst.width / stream->src.width);
+ /*modified recout_skip_h calculation due to odm having no recout offset*/
+ while (odm_pipe) {
+ odm_idx++;
+ odm_pipe = odm_pipe->prev_odm_pipe;
+ }
+ if (odm_idx)
+ recout_skip_h += odm_idx * data->recout.width;
+
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
src.y * plane_state->dst_rect.height / src.height
@@ -1021,6 +1085,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
store_h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
timing->v_border_top + timing->v_border_bottom;
+ if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
+ pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -2034,7 +2100,7 @@ enum dc_status resource_map_pool_resources(
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
- context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+ context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst;
context->stream_status[i].audio_inst =
pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;
@@ -2108,10 +2174,10 @@ enum dc_status dc_validate_global_state(
if (pipe_ctx->stream != stream)
continue;
- if (dc->res_pool->funcs->get_default_swizzle_mode &&
+ if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
- result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
+ result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state);
if (result != DC_OK)
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index a96d8de9380e..64cf24a9ab08 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
+
+ if (pa_config->is_hvm_enabled == 0)
+ dc->debug.nv12_iflip_vm_wa = false;
}
return num_vmids;
@@ -62,7 +65,7 @@ int dc_get_vmid_use_vector(struct dc *dc)
int i;
int in_use = 0;
- for (i = 0; i < dc->vm_helper->num_vmid; i++)
+ for (i = 0; i < MAX_HUBP; i++)
in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0]
| dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1];
return in_use;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8ff25b5dd2f6..d3ceb39e428e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.69"
+#define DC_VER "3.2.76"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -126,6 +126,7 @@ struct dc_bug_wa {
bool no_connect_phy_config;
bool dedcn20_305_wa;
bool skip_clock_update;
+ bool lt_early_cr_pattern;
};
struct dc_dcc_surface_param {
@@ -229,6 +230,7 @@ struct dc_config {
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
+ bool psr_on_dmub;
};
enum visual_confirm {
@@ -388,6 +390,7 @@ struct dc_debug_options {
int always_scale;
bool disable_pplib_clock_request;
bool disable_clock_gate;
+ bool disable_mem_low_power;
bool disable_dmcu;
bool disable_psr;
bool force_abm_enable;
@@ -453,6 +456,7 @@ struct dc_phy_addr_space_config {
} gart_config;
bool valid;
+ bool is_hvm_enabled;
uint64_t page_table_default_page_addr;
};
@@ -518,6 +522,7 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ bool wm_optimized_required;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
int optimize_seamless_boot_streams;
@@ -526,6 +531,7 @@ struct dc {
struct compressor *fbc_compressor;
struct dc_debug_data debug_data;
+ struct dpcd_vendor_signature vendor_signature;
const char *build_id;
struct vm_helper *vm_helper;
@@ -565,12 +571,14 @@ struct dc_init_data {
struct dc_reg_helper_state *dmub_offload;
struct dc_config flags;
- uint32_t log_mask;
+ uint64_t log_mask;
+
/**
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
+ struct dpcd_vendor_signature vendor_signature;
};
struct dc_callback_init {
@@ -682,7 +690,6 @@ struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
struct fixed31_32 hdr_multiplier;
- bool initialized; /*remove after diag fix*/
union dc_3dlut_state state;
struct dc_context *ctx;
};
@@ -865,6 +872,7 @@ struct dc_flip_addrs {
unsigned int flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
+ bool triplebuffer_flips;
};
bool dc_post_update_surfaces_to_stream(
@@ -979,6 +987,20 @@ struct dpcd_caps {
};
+union dpcd_sink_ext_caps {
+ struct {
+ /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
+ * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
+ */
+ uint8_t sdr_aux_backlight_control : 1;
+ uint8_t hdr_aux_backlight_control : 1;
+ uint8_t reserved_1 : 2;
+ uint8_t oled : 1;
+ uint8_t reserved : 3;
+ } bits;
+ uint8_t raw;
+};
+
#include "dc_link.h"
/*******************************************************************************
@@ -1004,6 +1026,11 @@ struct dc_sink_dsc_caps {
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
+struct dc_sink_fec_caps {
+ bool is_rx_fec_supported;
+ bool is_topology_fec_supported;
+};
+
/*
* The sink structure contains EDID and other display device properties
*/
@@ -1017,7 +1044,10 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
- struct dc_sink_dsc_caps sink_dsc_caps;
+ struct dc_sink_dsc_caps dsc_caps;
+ struct dc_sink_fec_caps fec_caps;
+
+ bool is_vsc_sdp_colorimetry_supported;
/* private to DC core */
struct dc_link *link;
@@ -1075,7 +1105,6 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
unsigned int dc_get_target_backlight_pwm(struct dc *dc);
bool dc_is_dmcu_initialized(struct dc *dc);
-bool dc_is_hw_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index dfe4472c9e40..bb2730e9521e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -432,6 +432,54 @@ struct dp_sink_hw_fw_revision {
uint8_t ieee_fw_rev[2];
};
+struct dpcd_vendor_signature {
+ bool is_valid;
+
+ union dpcd_ieee_vendor_signature {
+ struct {
+ uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+ uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+ uint8_t ieee_hw_rev;
+ uint8_t ieee_fw_rev[2];
+ };
+ uint8_t raw[12];
+ } data;
+};
+
+struct dpcd_amd_signature {
+ uint8_t AMD_IEEE_TxSignature_byte1;
+ uint8_t AMD_IEEE_TxSignature_byte2;
+ uint8_t AMD_IEEE_TxSignature_byte3;
+ uint8_t device_id_byte1;
+ uint8_t device_id_byte2;
+ uint8_t zero[4];
+ uint8_t dce_version;
+ uint8_t dal_version_byte1;
+ uint8_t dal_version_byte2;
+};
+
+struct dpcd_source_backlight_set {
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ } backlight_level_millinits;
+
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ } backlight_transition_time_ms;
+};
+
+union dpcd_source_backlight_get {
+ struct {
+ uint32_t backlight_millinits_peak; /* 326h */
+ uint32_t backlight_millinits_avg; /* 32Ah */
+ } bytes;
+ uint8_t raw[8];
+};
+
/*DPCD register of DP receiver capability field bits-*/
union edp_configuration_cap {
struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d25603128394..00ff5e98278c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -26,6 +26,7 @@
#ifndef DC_LINK_H_
#define DC_LINK_H_
+#include "dc.h"
#include "dc_types.h"
#include "grph_object_defs.h"
@@ -128,6 +129,7 @@ struct dc_link {
enum edp_revision edp_revision;
bool psr_feature_enabled;
bool psr_allow_active;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
/* MST record stream using this link */
struct link_flags {
@@ -178,6 +180,21 @@ bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp);
+/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits,
+ uint32_t *backlight_millinits_peak);
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable);
+
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits);
+bool dc_link_set_default_brightness_aux(struct dc_link *link);
+
int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
@@ -316,4 +333,7 @@ bool dc_submit_i2c_oem(
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
+
+bool dc_link_is_fec_supported(const struct dc_link *link);
+
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 92096de79dec..a5c7ef47b8d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -118,6 +118,7 @@ union stream_update_flags {
uint32_t dpms_off:1;
uint32_t gamut_remap:1;
uint32_t wb_update:1;
+ uint32_t dsc_changed : 1;
} bits;
uint32_t raw;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index e59532d98cb4..0d210104ba0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -229,7 +229,9 @@ struct dc_panel_patch {
unsigned int extra_t12_ms;
unsigned int extra_delay_backlight_off;
unsigned int extra_t7_ms;
- unsigned int manage_secondary_link;
+ unsigned int skip_scdc_overwrite;
+ unsigned int delay_ignore_msa;
+ unsigned int disable_fec;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index fdf3d8f87eee..fbfcff700971 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 68c4049cbc2a..743042d5905a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_TRANSACTION_REPLY_AUX_DEFER:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
retry_on_defer = true;
- /* fall through */
+ fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 30d953acd016..f0cebe721bcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -378,6 +378,11 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
+ struct dc_context *ctx = dmcu->ctx;
+ unsigned int i;
+ // 5 4 3 2 1 0
+ // F E D C B A - bit 0 is A, bit 5 is F
+ unsigned int tx_interrupt_mask = 0;
PERF_TRACE();
/* Definition of DC_DMCU_SCRATCH
@@ -387,6 +392,15 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
*/
dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH);
+ for (i = 0; i < ctx->dc->link_count; i++) {
+ if (ctx->dc->links[i]->link_enc->features.flags.bits.DP_IS_USB_C) {
+ if (ctx->dc->links[i]->link_enc->transmitter >= TRANSMITTER_UNIPHY_A &&
+ ctx->dc->links[i]->link_enc->transmitter <= TRANSMITTER_UNIPHY_F) {
+ tx_interrupt_mask |= 1 << ctx->dc->links[i]->link_enc->transmitter;
+ }
+ }
+ }
+
switch (dmcu->dmcu_state) {
case DMCU_UNLOADED:
status = false;
@@ -401,6 +415,8 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
/* Set backlight ramping stepsize */
REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
+ REG_WRITE(MASTER_COMM_DATA_REG3, tx_interrupt_mask);
+
/* Set command to initialize microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_INIT_DMCU);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 066188ba7949..24adec407972 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -267,6 +267,9 @@ static void set_speed(
uint32_t xtal_ref_div = 0;
uint32_t prescale = 0;
+ if (speed == 0)
+ return;
+
REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
if (xtal_ref_div == 0)
@@ -274,17 +277,15 @@ static void set_speed(
prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
- if (speed) {
- if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
- REG_UPDATE_N(SPEED, 3,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
- else
- REG_UPDATE_N(SPEED, 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
- }
+ if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+ REG_UPDATE_N(SPEED, 3,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+ else
+ REG_UPDATE_N(SPEED, 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
static bool setup_engine(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 8aa937f496c4..51481e922eb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -479,7 +479,7 @@ static void program_grph_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
sign = 1;
floating = 1;
- /* fall through */
+ fallthrough;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
grph_depth = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
index 48862bebf29e..7311f312369f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -22,1004 +22,1330 @@
* Authors: AMD
*
*/
-
#include "transform.h"
+//=========================================
+// <num_taps> = 2
+// <num_phases> = 16
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
static const uint16_t filter_2tap_16p[18] = {
- 4096, 0,
- 3840, 256,
- 3584, 512,
- 3328, 768,
- 3072, 1024,
- 2816, 1280,
- 2560, 1536,
- 2304, 1792,
- 2048, 2048
+ 0x1000, 0x0000,
+ 0x0FF0, 0x0010,
+ 0x0FB0, 0x0050,
+ 0x0F34, 0x00CC,
+ 0x0E68, 0x0198,
+ 0x0D44, 0x02BC,
+ 0x0BC4, 0x043C,
+ 0x09FC, 0x0604,
+ 0x0800, 0x0800
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_16p_upscale[27] = {
- 2048, 2048, 0,
- 1708, 2424, 16348,
- 1372, 2796, 16308,
- 1056, 3148, 16272,
- 768, 3464, 16244,
- 512, 3728, 16236,
- 296, 3928, 16252,
- 124, 4052, 16296,
- 0, 4096, 0
+ 0x0804, 0x07FC, 0x0000,
+ 0x06AC, 0x0978, 0x3FDC,
+ 0x055C, 0x0AF0, 0x3FB4,
+ 0x0420, 0x0C50, 0x3F90,
+ 0x0300, 0x0D88, 0x3F78,
+ 0x0200, 0x0E90, 0x3F70,
+ 0x0128, 0x0F5C, 0x3F7C,
+ 0x007C, 0x0FD8, 0x3FAC,
+ 0x0000, 0x1000, 0x0000
};
-static const uint16_t filter_3tap_16p_117[27] = {
- 2048, 2048, 0,
- 1824, 2276, 16376,
- 1600, 2496, 16380,
- 1376, 2700, 16,
- 1156, 2880, 52,
- 948, 3032, 108,
- 756, 3144, 192,
- 580, 3212, 296,
- 428, 3236, 428
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_116[27] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x0700, 0x0914, 0x3FEC,
+ 0x0604, 0x0A1C, 0x3FE0,
+ 0x050C, 0x0B14, 0x3FE0,
+ 0x041C, 0x0BF4, 0x3FF0,
+ 0x0340, 0x0CB0, 0x0010,
+ 0x0274, 0x0D3C, 0x0050,
+ 0x01C0, 0x0D94, 0x00AC,
+ 0x0128, 0x0DB4, 0x0124
};
-static const uint16_t filter_3tap_16p_150[27] = {
- 2048, 2048, 0,
- 1872, 2184, 36,
- 1692, 2308, 88,
- 1516, 2420, 156,
- 1340, 2516, 236,
- 1168, 2592, 328,
- 1004, 2648, 440,
- 844, 2684, 560,
- 696, 2696, 696
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_149[27] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x0730, 0x08CC, 0x0004,
+ 0x0660, 0x098C, 0x0014,
+ 0x0590, 0x0A3C, 0x0034,
+ 0x04C4, 0x0AD4, 0x0068,
+ 0x0400, 0x0B54, 0x00AC,
+ 0x0348, 0x0BB0, 0x0108,
+ 0x029C, 0x0BEC, 0x0178,
+ 0x0200, 0x0C00, 0x0200
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_16p_183[27] = {
- 2048, 2048, 0,
- 1892, 2104, 92,
- 1744, 2152, 196,
- 1592, 2196, 300,
- 1448, 2232, 412,
- 1304, 2256, 528,
- 1168, 2276, 648,
- 1032, 2288, 772,
- 900, 2292, 900
+ 0x0804, 0x07FC, 0x0000,
+ 0x0754, 0x0880, 0x002C,
+ 0x06A8, 0x08F0, 0x0068,
+ 0x05FC, 0x0954, 0x00B0,
+ 0x0550, 0x09AC, 0x0104,
+ 0x04A8, 0x09F0, 0x0168,
+ 0x0408, 0x0A20, 0x01D8,
+ 0x036C, 0x0A40, 0x0254,
+ 0x02DC, 0x0A48, 0x02DC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_16p_upscale[36] = {
- 0, 4096, 0, 0,
- 16240, 4056, 180, 16380,
- 16136, 3952, 404, 16364,
- 16072, 3780, 664, 16344,
- 16040, 3556, 952, 16312,
- 16036, 3284, 1268, 16272,
- 16052, 2980, 1604, 16224,
- 16084, 2648, 1952, 16176,
- 16128, 2304, 2304, 16128
+ 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+ 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+ 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+ 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+ 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+ 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+ 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+ 0x3F00, 0x08FC, 0x0900, 0x3F04
};
-static const uint16_t filter_4tap_16p_117[36] = {
- 428, 3236, 428, 0,
- 276, 3232, 604, 16364,
- 148, 3184, 800, 16340,
- 44, 3104, 1016, 16312,
- 16344, 2984, 1244, 16284,
- 16284, 2832, 1488, 16256,
- 16244, 2648, 1732, 16236,
- 16220, 2440, 1976, 16220,
- 16212, 2216, 2216, 16212
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_116[36] = {
+ 0x01A8, 0x0CB4, 0x01A4, 0x0000,
+ 0x0110, 0x0CB0, 0x0254, 0x3FEC,
+ 0x0090, 0x0C80, 0x031C, 0x3FD4,
+ 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+ 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+ 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+ 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+ 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+ 0x3F54, 0x08AC, 0x08AC, 0x3F54
};
-static const uint16_t filter_4tap_16p_150[36] = {
- 696, 2700, 696, 0,
- 560, 2700, 848, 16364,
- 436, 2676, 1008, 16348,
- 328, 2628, 1180, 16336,
- 232, 2556, 1356, 16328,
- 152, 2460, 1536, 16328,
- 84, 2344, 1716, 16332,
- 28, 2208, 1888, 16348,
- 16376, 2052, 2052, 16376
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_149[36] = {
+ 0x02B8, 0x0A90, 0x02B8, 0x0000,
+ 0x0230, 0x0A90, 0x0350, 0x3FF0,
+ 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+ 0x0148, 0x0A48, 0x049C, 0x3FD4,
+ 0x00E8, 0x0A00, 0x054C, 0x3FCC,
+ 0x0098, 0x09A0, 0x0600, 0x3FC8,
+ 0x0054, 0x0928, 0x06B4, 0x3FD0,
+ 0x001C, 0x08A4, 0x0760, 0x3FE0,
+ 0x3FFC, 0x0804, 0x0804, 0x3FFC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_16p_183[36] = {
- 940, 2208, 940, 0,
- 832, 2200, 1052, 4,
- 728, 2180, 1164, 16,
- 628, 2148, 1280, 36,
- 536, 2100, 1392, 60,
- 448, 2044, 1504, 92,
- 368, 1976, 1612, 132,
- 296, 1900, 1716, 176,
- 232, 1812, 1812, 232
+ 0x03B0, 0x08A0, 0x03B0, 0x0000,
+ 0x0348, 0x0898, 0x041C, 0x0004,
+ 0x02DC, 0x0884, 0x0490, 0x0010,
+ 0x0278, 0x0864, 0x0500, 0x0024,
+ 0x021C, 0x0838, 0x0570, 0x003C,
+ 0x01C8, 0x07FC, 0x05E0, 0x005C,
+ 0x0178, 0x07B8, 0x064C, 0x0084,
+ 0x0130, 0x076C, 0x06B0, 0x00B4,
+ 0x00F0, 0x0714, 0x0710, 0x00EC
};
+//=========================================
+// <num_taps> = 2
+// <num_phases> = 64
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
static const uint16_t filter_2tap_64p[66] = {
- 4096, 0,
- 4032, 64,
- 3968, 128,
- 3904, 192,
- 3840, 256,
- 3776, 320,
- 3712, 384,
- 3648, 448,
- 3584, 512,
- 3520, 576,
- 3456, 640,
- 3392, 704,
- 3328, 768,
- 3264, 832,
- 3200, 896,
- 3136, 960,
- 3072, 1024,
- 3008, 1088,
- 2944, 1152,
- 2880, 1216,
- 2816, 1280,
- 2752, 1344,
- 2688, 1408,
- 2624, 1472,
- 2560, 1536,
- 2496, 1600,
- 2432, 1664,
- 2368, 1728,
- 2304, 1792,
- 2240, 1856,
- 2176, 1920,
- 2112, 1984,
- 2048, 2048 };
+ 0x1000, 0x0000,
+ 0x1000, 0x0000,
+ 0x0FFC, 0x0004,
+ 0x0FF8, 0x0008,
+ 0x0FF0, 0x0010,
+ 0x0FE4, 0x001C,
+ 0x0FD8, 0x0028,
+ 0x0FC4, 0x003C,
+ 0x0FB0, 0x0050,
+ 0x0F98, 0x0068,
+ 0x0F7C, 0x0084,
+ 0x0F58, 0x00A8,
+ 0x0F34, 0x00CC,
+ 0x0F08, 0x00F8,
+ 0x0ED8, 0x0128,
+ 0x0EA4, 0x015C,
+ 0x0E68, 0x0198,
+ 0x0E28, 0x01D8,
+ 0x0DE4, 0x021C,
+ 0x0D98, 0x0268,
+ 0x0D44, 0x02BC,
+ 0x0CEC, 0x0314,
+ 0x0C90, 0x0370,
+ 0x0C2C, 0x03D4,
+ 0x0BC4, 0x043C,
+ 0x0B58, 0x04A8,
+ 0x0AE8, 0x0518,
+ 0x0A74, 0x058C,
+ 0x09FC, 0x0604,
+ 0x0980, 0x0680,
+ 0x0900, 0x0700,
+ 0x0880, 0x0780,
+ 0x0800, 0x0800
+};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_64p_upscale[99] = {
- 2048, 2048, 0,
- 1960, 2140, 16376,
- 1876, 2236, 16364,
- 1792, 2328, 16356,
- 1708, 2424, 16348,
- 1620, 2516, 16336,
- 1540, 2612, 16328,
- 1456, 2704, 16316,
- 1372, 2796, 16308,
- 1292, 2884, 16296,
- 1212, 2976, 16288,
- 1136, 3060, 16280,
- 1056, 3148, 16272,
- 984, 3228, 16264,
- 908, 3312, 16256,
- 836, 3388, 16248,
- 768, 3464, 16244,
- 700, 3536, 16240,
- 636, 3604, 16236,
- 572, 3668, 16236,
- 512, 3728, 16236,
- 456, 3784, 16236,
- 400, 3836, 16240,
- 348, 3884, 16244,
- 296, 3928, 16252,
- 252, 3964, 16260,
- 204, 4000, 16268,
- 164, 4028, 16284,
- 124, 4052, 16296,
- 88, 4072, 16316,
- 56, 4084, 16336,
- 24, 4092, 16356,
- 0, 4096, 0
+ 0x0804, 0x07FC, 0x0000,
+ 0x07A8, 0x0860, 0x3FF8,
+ 0x0754, 0x08BC, 0x3FF0,
+ 0x0700, 0x0918, 0x3FE8,
+ 0x06AC, 0x0978, 0x3FDC,
+ 0x0654, 0x09D8, 0x3FD4,
+ 0x0604, 0x0A34, 0x3FC8,
+ 0x05B0, 0x0A90, 0x3FC0,
+ 0x055C, 0x0AF0, 0x3FB4,
+ 0x050C, 0x0B48, 0x3FAC,
+ 0x04BC, 0x0BA0, 0x3FA4,
+ 0x0470, 0x0BF4, 0x3F9C,
+ 0x0420, 0x0C50, 0x3F90,
+ 0x03D8, 0x0C9C, 0x3F8C,
+ 0x038C, 0x0CF0, 0x3F84,
+ 0x0344, 0x0D40, 0x3F7C,
+ 0x0300, 0x0D88, 0x3F78,
+ 0x02BC, 0x0DD0, 0x3F74,
+ 0x027C, 0x0E14, 0x3F70,
+ 0x023C, 0x0E54, 0x3F70,
+ 0x0200, 0x0E90, 0x3F70,
+ 0x01C8, 0x0EC8, 0x3F70,
+ 0x0190, 0x0EFC, 0x3F74,
+ 0x015C, 0x0F2C, 0x3F78,
+ 0x0128, 0x0F5C, 0x3F7C,
+ 0x00FC, 0x0F7C, 0x3F88,
+ 0x00CC, 0x0FA4, 0x3F90,
+ 0x00A4, 0x0FC0, 0x3F9C,
+ 0x007C, 0x0FD8, 0x3FAC,
+ 0x0058, 0x0FE8, 0x3FC0,
+ 0x0038, 0x0FF4, 0x3FD4,
+ 0x0018, 0x1000, 0x3FE8,
+ 0x0000, 0x1000, 0x0000
};
-static const uint16_t filter_3tap_64p_117[99] = {
- 2048, 2048, 0,
- 1992, 2104, 16380,
- 1936, 2160, 16380,
- 1880, 2220, 16376,
- 1824, 2276, 16376,
- 1768, 2332, 16376,
- 1712, 2388, 16376,
- 1656, 2444, 16376,
- 1600, 2496, 16380,
- 1544, 2548, 0,
- 1488, 2600, 4,
- 1432, 2652, 8,
- 1376, 2700, 16,
- 1320, 2748, 20,
- 1264, 2796, 32,
- 1212, 2840, 40,
- 1156, 2880, 52,
- 1104, 2920, 64,
- 1052, 2960, 80,
- 1000, 2996, 92,
- 948, 3032, 108,
- 900, 3060, 128,
- 852, 3092, 148,
- 804, 3120, 168,
- 756, 3144, 192,
- 712, 3164, 216,
- 668, 3184, 240,
- 624, 3200, 268,
- 580, 3212, 296,
- 540, 3220, 328,
- 500, 3228, 360,
- 464, 3232, 392,
- 428, 3236, 428
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_116[99] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x07C0, 0x0844, 0x3FFC,
+ 0x0780, 0x0888, 0x3FF8,
+ 0x0740, 0x08D0, 0x3FF0,
+ 0x0700, 0x0914, 0x3FEC,
+ 0x06C0, 0x0958, 0x3FE8,
+ 0x0684, 0x0998, 0x3FE4,
+ 0x0644, 0x09DC, 0x3FE0,
+ 0x0604, 0x0A1C, 0x3FE0,
+ 0x05C4, 0x0A5C, 0x3FE0,
+ 0x0588, 0x0A9C, 0x3FDC,
+ 0x0548, 0x0ADC, 0x3FDC,
+ 0x050C, 0x0B14, 0x3FE0,
+ 0x04CC, 0x0B54, 0x3FE0,
+ 0x0490, 0x0B8C, 0x3FE4,
+ 0x0458, 0x0BC0, 0x3FE8,
+ 0x041C, 0x0BF4, 0x3FF0,
+ 0x03E0, 0x0C28, 0x3FF8,
+ 0x03A8, 0x0C58, 0x0000,
+ 0x0374, 0x0C88, 0x0004,
+ 0x0340, 0x0CB0, 0x0010,
+ 0x0308, 0x0CD8, 0x0020,
+ 0x02D8, 0x0CFC, 0x002C,
+ 0x02A0, 0x0D20, 0x0040,
+ 0x0274, 0x0D3C, 0x0050,
+ 0x0244, 0x0D58, 0x0064,
+ 0x0214, 0x0D70, 0x007C,
+ 0x01E8, 0x0D84, 0x0094,
+ 0x01C0, 0x0D94, 0x00AC,
+ 0x0198, 0x0DA0, 0x00C8,
+ 0x0170, 0x0DAC, 0x00E4,
+ 0x014C, 0x0DB0, 0x0104,
+ 0x0128, 0x0DB4, 0x0124
};
-static const uint16_t filter_3tap_64p_150[99] = {
- 2048, 2048, 0,
- 2004, 2080, 8,
- 1960, 2116, 16,
- 1916, 2148, 28,
- 1872, 2184, 36,
- 1824, 2216, 48,
- 1780, 2248, 60,
- 1736, 2280, 76,
- 1692, 2308, 88,
- 1648, 2336, 104,
- 1604, 2368, 120,
- 1560, 2392, 136,
- 1516, 2420, 156,
- 1472, 2444, 172,
- 1428, 2472, 192,
- 1384, 2492, 212,
- 1340, 2516, 236,
- 1296, 2536, 256,
- 1252, 2556, 280,
- 1212, 2576, 304,
- 1168, 2592, 328,
- 1124, 2608, 356,
- 1084, 2624, 384,
- 1044, 2636, 412,
- 1004, 2648, 440,
- 964, 2660, 468,
- 924, 2668, 500,
- 884, 2676, 528,
- 844, 2684, 560,
- 808, 2688, 596,
- 768, 2692, 628,
- 732, 2696, 664,
- 696, 2696, 696
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_149[99] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x07CC, 0x0834, 0x0000,
+ 0x0798, 0x0868, 0x0000,
+ 0x0764, 0x089C, 0x0000,
+ 0x0730, 0x08CC, 0x0004,
+ 0x0700, 0x08FC, 0x0004,
+ 0x06CC, 0x092C, 0x0008,
+ 0x0698, 0x095C, 0x000C,
+ 0x0660, 0x098C, 0x0014,
+ 0x062C, 0x09B8, 0x001C,
+ 0x05FC, 0x09E4, 0x0020,
+ 0x05C4, 0x0A10, 0x002C,
+ 0x0590, 0x0A3C, 0x0034,
+ 0x055C, 0x0A64, 0x0040,
+ 0x0528, 0x0A8C, 0x004C,
+ 0x04F8, 0x0AB0, 0x0058,
+ 0x04C4, 0x0AD4, 0x0068,
+ 0x0490, 0x0AF8, 0x0078,
+ 0x0460, 0x0B18, 0x0088,
+ 0x0430, 0x0B38, 0x0098,
+ 0x0400, 0x0B54, 0x00AC,
+ 0x03D0, 0x0B6C, 0x00C4,
+ 0x03A0, 0x0B88, 0x00D8,
+ 0x0374, 0x0B9C, 0x00F0,
+ 0x0348, 0x0BB0, 0x0108,
+ 0x0318, 0x0BC4, 0x0124,
+ 0x02EC, 0x0BD4, 0x0140,
+ 0x02C4, 0x0BE0, 0x015C,
+ 0x029C, 0x0BEC, 0x0178,
+ 0x0274, 0x0BF4, 0x0198,
+ 0x024C, 0x0BFC, 0x01B8,
+ 0x0228, 0x0BFC, 0x01DC,
+ 0x0200, 0x0C00, 0x0200
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_64p_183[99] = {
- 2048, 2048, 0,
- 2008, 2060, 20,
- 1968, 2076, 44,
- 1932, 2088, 68,
- 1892, 2104, 92,
- 1856, 2116, 120,
- 1816, 2128, 144,
- 1780, 2140, 168,
- 1744, 2152, 196,
- 1704, 2164, 220,
- 1668, 2176, 248,
- 1632, 2188, 272,
- 1592, 2196, 300,
- 1556, 2204, 328,
- 1520, 2216, 356,
- 1484, 2224, 384,
- 1448, 2232, 412,
- 1412, 2240, 440,
- 1376, 2244, 468,
- 1340, 2252, 496,
- 1304, 2256, 528,
- 1272, 2264, 556,
- 1236, 2268, 584,
- 1200, 2272, 616,
- 1168, 2276, 648,
- 1132, 2280, 676,
- 1100, 2284, 708,
- 1064, 2288, 740,
- 1032, 2288, 772,
- 996, 2292, 800,
- 964, 2292, 832,
- 932, 2292, 868,
- 900, 2292, 900
+ 0x0804, 0x07FC, 0x0000,
+ 0x07D4, 0x0824, 0x0008,
+ 0x07AC, 0x0840, 0x0014,
+ 0x0780, 0x0860, 0x0020,
+ 0x0754, 0x0880, 0x002C,
+ 0x0728, 0x089C, 0x003C,
+ 0x0700, 0x08B8, 0x0048,
+ 0x06D4, 0x08D4, 0x0058,
+ 0x06A8, 0x08F0, 0x0068,
+ 0x067C, 0x090C, 0x0078,
+ 0x0650, 0x0924, 0x008C,
+ 0x0628, 0x093C, 0x009C,
+ 0x05FC, 0x0954, 0x00B0,
+ 0x05D0, 0x096C, 0x00C4,
+ 0x05A8, 0x0980, 0x00D8,
+ 0x0578, 0x0998, 0x00F0,
+ 0x0550, 0x09AC, 0x0104,
+ 0x0528, 0x09BC, 0x011C,
+ 0x04FC, 0x09D0, 0x0134,
+ 0x04D4, 0x09E0, 0x014C,
+ 0x04A8, 0x09F0, 0x0168,
+ 0x0480, 0x09FC, 0x0184,
+ 0x045C, 0x0A08, 0x019C,
+ 0x0434, 0x0A14, 0x01B8,
+ 0x0408, 0x0A20, 0x01D8,
+ 0x03E0, 0x0A2C, 0x01F4,
+ 0x03B8, 0x0A34, 0x0214,
+ 0x0394, 0x0A38, 0x0234,
+ 0x036C, 0x0A40, 0x0254,
+ 0x0348, 0x0A44, 0x0274,
+ 0x0324, 0x0A48, 0x0294,
+ 0x0300, 0x0A48, 0x02B8,
+ 0x02DC, 0x0A48, 0x02DC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_64p_upscale[132] = {
- 0, 4096, 0, 0,
- 16344, 4092, 40, 0,
- 16308, 4084, 84, 16380,
- 16272, 4072, 132, 16380,
- 16240, 4056, 180, 16380,
- 16212, 4036, 232, 16376,
- 16184, 4012, 288, 16372,
- 16160, 3984, 344, 16368,
- 16136, 3952, 404, 16364,
- 16116, 3916, 464, 16360,
- 16100, 3872, 528, 16356,
- 16084, 3828, 596, 16348,
- 16072, 3780, 664, 16344,
- 16060, 3728, 732, 16336,
- 16052, 3676, 804, 16328,
- 16044, 3616, 876, 16320,
- 16040, 3556, 952, 16312,
- 16036, 3492, 1028, 16300,
- 16032, 3424, 1108, 16292,
- 16032, 3356, 1188, 16280,
- 16036, 3284, 1268, 16272,
- 16036, 3212, 1352, 16260,
- 16040, 3136, 1436, 16248,
- 16044, 3056, 1520, 16236,
- 16052, 2980, 1604, 16224,
- 16060, 2896, 1688, 16212,
- 16064, 2816, 1776, 16200,
- 16076, 2732, 1864, 16188,
- 16084, 2648, 1952, 16176,
- 16092, 2564, 2040, 16164,
- 16104, 2476, 2128, 16152,
- 16116, 2388, 2216, 16140,
- 16128, 2304, 2304, 16128 };
+ 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x3FDC, 0x0FFC, 0x0028, 0x0000,
+ 0x3FB4, 0x0FF8, 0x0054, 0x0000,
+ 0x3F94, 0x0FE8, 0x0084, 0x0000,
+ 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+ 0x3F58, 0x0FC4, 0x00E8, 0x3FFC,
+ 0x3F3C, 0x0FAC, 0x0120, 0x3FF8,
+ 0x3F24, 0x0F90, 0x0158, 0x3FF4,
+ 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+ 0x3EF8, 0x0F4C, 0x01D0, 0x3FEC,
+ 0x3EE8, 0x0F20, 0x0210, 0x3FE8,
+ 0x3ED8, 0x0EF4, 0x0254, 0x3FE0,
+ 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+ 0x3EC0, 0x0E90, 0x02DC, 0x3FD4,
+ 0x3EB8, 0x0E58, 0x0324, 0x3FCC,
+ 0x3EB0, 0x0E20, 0x036C, 0x3FC4,
+ 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+ 0x3EA8, 0x0DA4, 0x0404, 0x3FB0,
+ 0x3EA4, 0x0D60, 0x0454, 0x3FA8,
+ 0x3EA4, 0x0D1C, 0x04A4, 0x3F9C,
+ 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+ 0x3EA8, 0x0C88, 0x0548, 0x3F88,
+ 0x3EAC, 0x0C3C, 0x059C, 0x3F7C,
+ 0x3EB0, 0x0BF0, 0x05F0, 0x3F70,
+ 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+ 0x3EBC, 0x0B54, 0x0698, 0x3F58,
+ 0x3EC4, 0x0B00, 0x06F0, 0x3F4C,
+ 0x3ECC, 0x0AAC, 0x0748, 0x3F40,
+ 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+ 0x3EE0, 0x0A04, 0x07F8, 0x3F24,
+ 0x3EEC, 0x09AC, 0x0850, 0x3F18,
+ 0x3EF8, 0x0954, 0x08A8, 0x3F0C,
+ 0x3F00, 0x08FC, 0x0900, 0x3F04
+};
-static const uint16_t filter_4tap_64p_117[132] = {
- 420, 3248, 420, 0,
- 380, 3248, 464, 16380,
- 344, 3248, 508, 16372,
- 308, 3248, 552, 16368,
- 272, 3240, 596, 16364,
- 236, 3236, 644, 16356,
- 204, 3224, 692, 16352,
- 172, 3212, 744, 16344,
- 144, 3196, 796, 16340,
- 116, 3180, 848, 16332,
- 88, 3160, 900, 16324,
- 60, 3136, 956, 16320,
- 36, 3112, 1012, 16312,
- 16, 3084, 1068, 16304,
- 16380, 3056, 1124, 16296,
- 16360, 3024, 1184, 16292,
- 16340, 2992, 1244, 16284,
- 16324, 2956, 1304, 16276,
- 16308, 2920, 1364, 16268,
- 16292, 2880, 1424, 16264,
- 16280, 2836, 1484, 16256,
- 16268, 2792, 1548, 16252,
- 16256, 2748, 1608, 16244,
- 16248, 2700, 1668, 16240,
- 16240, 2652, 1732, 16232,
- 16232, 2604, 1792, 16228,
- 16228, 2552, 1856, 16224,
- 16220, 2500, 1916, 16220,
- 16216, 2444, 1980, 16216,
- 16216, 2388, 2040, 16216,
- 16212, 2332, 2100, 16212,
- 16212, 2276, 2160, 16212,
- 16212, 2220, 2220, 16212 };
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_116[132] = {
+ 0x01A8, 0x0CB4, 0x01A4, 0x0000,
+ 0x017C, 0x0CB8, 0x01D0, 0x3FFC,
+ 0x0158, 0x0CB8, 0x01F8, 0x3FF8,
+ 0x0130, 0x0CB4, 0x0228, 0x3FF4,
+ 0x0110, 0x0CB0, 0x0254, 0x3FEC,
+ 0x00EC, 0x0CA8, 0x0284, 0x3FE8,
+ 0x00CC, 0x0C9C, 0x02B4, 0x3FE4,
+ 0x00AC, 0x0C90, 0x02E8, 0x3FDC,
+ 0x0090, 0x0C80, 0x031C, 0x3FD4,
+ 0x0070, 0x0C70, 0x0350, 0x3FD0,
+ 0x0058, 0x0C5C, 0x0384, 0x3FC8,
+ 0x003C, 0x0C48, 0x03BC, 0x3FC0,
+ 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+ 0x0010, 0x0C10, 0x042C, 0x3FB4,
+ 0x3FFC, 0x0BF4, 0x0464, 0x3FAC,
+ 0x3FE8, 0x0BD4, 0x04A0, 0x3FA4,
+ 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+ 0x3FC4, 0x0B8C, 0x0518, 0x3F98,
+ 0x3FB4, 0x0B68, 0x0554, 0x3F90,
+ 0x3FA8, 0x0B40, 0x0590, 0x3F88,
+ 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+ 0x3F90, 0x0AEC, 0x0608, 0x3F7C,
+ 0x3F84, 0x0ABC, 0x0648, 0x3F78,
+ 0x3F7C, 0x0A90, 0x0684, 0x3F70,
+ 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+ 0x3F6C, 0x0A2C, 0x0700, 0x3F68,
+ 0x3F64, 0x09F8, 0x0740, 0x3F64,
+ 0x3F60, 0x09C4, 0x077C, 0x3F60,
+ 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+ 0x3F58, 0x0958, 0x07F8, 0x3F58,
+ 0x3F58, 0x091C, 0x0834, 0x3F58,
+ 0x3F54, 0x08E4, 0x0870, 0x3F58,
+ 0x3F54, 0x08AC, 0x08AC, 0x3F54
+};
-static const uint16_t filter_4tap_64p_150[132] = {
- 696, 2700, 696, 0,
- 660, 2704, 732, 16380,
- 628, 2704, 768, 16376,
- 596, 2704, 804, 16372,
- 564, 2700, 844, 16364,
- 532, 2696, 884, 16360,
- 500, 2692, 924, 16356,
- 472, 2684, 964, 16352,
- 440, 2676, 1004, 16352,
- 412, 2668, 1044, 16348,
- 384, 2656, 1088, 16344,
- 360, 2644, 1128, 16340,
- 332, 2632, 1172, 16336,
- 308, 2616, 1216, 16336,
- 284, 2600, 1260, 16332,
- 260, 2580, 1304, 16332,
- 236, 2560, 1348, 16328,
- 216, 2540, 1392, 16328,
- 196, 2516, 1436, 16328,
- 176, 2492, 1480, 16324,
- 156, 2468, 1524, 16324,
- 136, 2440, 1568, 16328,
- 120, 2412, 1612, 16328,
- 104, 2384, 1656, 16328,
- 88, 2352, 1700, 16332,
- 72, 2324, 1744, 16332,
- 60, 2288, 1788, 16336,
- 48, 2256, 1828, 16340,
- 36, 2220, 1872, 16344,
- 24, 2184, 1912, 16352,
- 12, 2148, 1952, 16356,
- 4, 2112, 1996, 16364,
- 16380, 2072, 2036, 16372 };
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_149[132] = {
+ 0x02B8, 0x0A90, 0x02B8, 0x0000,
+ 0x0294, 0x0A94, 0x02DC, 0x3FFC,
+ 0x0274, 0x0A94, 0x0300, 0x3FF8,
+ 0x0250, 0x0A94, 0x0328, 0x3FF4,
+ 0x0230, 0x0A90, 0x0350, 0x3FF0,
+ 0x0214, 0x0A8C, 0x0374, 0x3FEC,
+ 0x01F0, 0x0A88, 0x03A0, 0x3FE8,
+ 0x01D4, 0x0A80, 0x03C8, 0x3FE4,
+ 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+ 0x0198, 0x0A70, 0x041C, 0x3FDC,
+ 0x0180, 0x0A64, 0x0444, 0x3FD8,
+ 0x0164, 0x0A54, 0x0470, 0x3FD8,
+ 0x0148, 0x0A48, 0x049C, 0x3FD4,
+ 0x0130, 0x0A38, 0x04C8, 0x3FD0,
+ 0x0118, 0x0A24, 0x04F4, 0x3FD0,
+ 0x0100, 0x0A14, 0x0520, 0x3FCC,
+ 0x00E8, 0x0A00, 0x054C, 0x3FCC,
+ 0x00D4, 0x09E8, 0x057C, 0x3FC8,
+ 0x00C0, 0x09D0, 0x05A8, 0x3FC8,
+ 0x00AC, 0x09B8, 0x05D4, 0x3FC8,
+ 0x0098, 0x09A0, 0x0600, 0x3FC8,
+ 0x0084, 0x0984, 0x0630, 0x3FC8,
+ 0x0074, 0x0964, 0x065C, 0x3FCC,
+ 0x0064, 0x0948, 0x0688, 0x3FCC,
+ 0x0054, 0x0928, 0x06B4, 0x3FD0,
+ 0x0044, 0x0908, 0x06E0, 0x3FD4,
+ 0x0038, 0x08E8, 0x070C, 0x3FD4,
+ 0x002C, 0x08C4, 0x0738, 0x3FD8,
+ 0x001C, 0x08A4, 0x0760, 0x3FE0,
+ 0x0014, 0x087C, 0x078C, 0x3FE4,
+ 0x0008, 0x0858, 0x07B4, 0x3FEC,
+ 0x0000, 0x0830, 0x07DC, 0x3FF4,
+ 0x3FFC, 0x0804, 0x0804, 0x3FFC
+};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_64p_183[132] = {
- 944, 2204, 944, 0,
- 916, 2204, 972, 0,
- 888, 2200, 996, 0,
- 860, 2200, 1024, 4,
- 832, 2196, 1052, 4,
- 808, 2192, 1080, 8,
- 780, 2188, 1108, 12,
- 756, 2180, 1140, 12,
- 728, 2176, 1168, 16,
- 704, 2168, 1196, 20,
- 680, 2160, 1224, 24,
- 656, 2152, 1252, 28,
- 632, 2144, 1280, 36,
- 608, 2132, 1308, 40,
- 584, 2120, 1336, 48,
- 560, 2112, 1364, 52,
- 536, 2096, 1392, 60,
- 516, 2084, 1420, 68,
- 492, 2072, 1448, 76,
- 472, 2056, 1476, 84,
- 452, 2040, 1504, 92,
- 428, 2024, 1532, 100,
- 408, 2008, 1560, 112,
- 392, 1992, 1584, 120,
- 372, 1972, 1612, 132,
- 352, 1956, 1636, 144,
- 336, 1936, 1664, 156,
- 316, 1916, 1688, 168,
- 300, 1896, 1712, 180,
- 284, 1876, 1736, 192,
- 268, 1852, 1760, 208,
- 252, 1832, 1784, 220,
- 236, 1808, 1808, 236 };
+ 0x03B0, 0x08A0, 0x03B0, 0x0000,
+ 0x0394, 0x08A0, 0x03CC, 0x0000,
+ 0x037C, 0x089C, 0x03E8, 0x0000,
+ 0x0360, 0x089C, 0x0400, 0x0004,
+ 0x0348, 0x0898, 0x041C, 0x0004,
+ 0x032C, 0x0894, 0x0438, 0x0008,
+ 0x0310, 0x0890, 0x0454, 0x000C,
+ 0x02F8, 0x0888, 0x0474, 0x000C,
+ 0x02DC, 0x0884, 0x0490, 0x0010,
+ 0x02C4, 0x087C, 0x04AC, 0x0014,
+ 0x02AC, 0x0874, 0x04C8, 0x0018,
+ 0x0290, 0x086C, 0x04E4, 0x0020,
+ 0x0278, 0x0864, 0x0500, 0x0024,
+ 0x0264, 0x0858, 0x051C, 0x0028,
+ 0x024C, 0x084C, 0x0538, 0x0030,
+ 0x0234, 0x0844, 0x0554, 0x0034,
+ 0x021C, 0x0838, 0x0570, 0x003C,
+ 0x0208, 0x0828, 0x058C, 0x0044,
+ 0x01F0, 0x081C, 0x05A8, 0x004C,
+ 0x01DC, 0x080C, 0x05C4, 0x0054,
+ 0x01C8, 0x07FC, 0x05E0, 0x005C,
+ 0x01B4, 0x07EC, 0x05FC, 0x0064,
+ 0x019C, 0x07DC, 0x0618, 0x0070,
+ 0x018C, 0x07CC, 0x0630, 0x0078,
+ 0x0178, 0x07B8, 0x064C, 0x0084,
+ 0x0164, 0x07A8, 0x0664, 0x0090,
+ 0x0150, 0x0794, 0x0680, 0x009C,
+ 0x0140, 0x0780, 0x0698, 0x00A8,
+ 0x0130, 0x076C, 0x06B0, 0x00B4,
+ 0x0120, 0x0758, 0x06C8, 0x00C0,
+ 0x0110, 0x0740, 0x06E0, 0x00D0,
+ 0x0100, 0x072C, 0x06F8, 0x00DC,
+ 0x00F0, 0x0714, 0x0710, 0x00EC
+};
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_5tap_64p_upscale[165] = {
- 15936, 2496, 2496, 15936, 0,
- 15948, 2404, 2580, 15924, 0,
- 15960, 2312, 2664, 15912, 4,
- 15976, 2220, 2748, 15904, 8,
- 15992, 2128, 2832, 15896, 12,
- 16004, 2036, 2912, 15888, 16,
- 16020, 1944, 2992, 15880, 20,
- 16036, 1852, 3068, 15876, 20,
- 16056, 1760, 3140, 15876, 24,
- 16072, 1668, 3216, 15872, 28,
- 16088, 1580, 3284, 15872, 32,
- 16104, 1492, 3352, 15876, 32,
- 16120, 1404, 3420, 15876, 36,
- 16140, 1316, 3480, 15884, 40,
- 16156, 1228, 3540, 15892, 40,
- 16172, 1144, 3600, 15900, 40,
- 16188, 1060, 3652, 15908, 44,
- 16204, 980, 3704, 15924, 44,
- 16220, 900, 3756, 15936, 44,
- 16236, 824, 3800, 15956, 44,
- 16248, 744, 3844, 15972, 44,
- 16264, 672, 3884, 15996, 44,
- 16276, 600, 3920, 16020, 44,
- 16292, 528, 3952, 16044, 40,
- 16304, 460, 3980, 16072, 40,
- 16316, 396, 4008, 16104, 36,
- 16328, 332, 4032, 16136, 32,
- 16336, 272, 4048, 16172, 28,
- 16348, 212, 4064, 16208, 24,
- 16356, 156, 4080, 16248, 16,
- 16368, 100, 4088, 16292, 12,
- 16376, 48, 4092, 16336, 4,
- 0, 0, 4096, 0, 0 };
+ 0x3E40, 0x09C0, 0x09C0, 0x3E40, 0x0000,
+ 0x3E50, 0x0964, 0x0A18, 0x3E34, 0x0000,
+ 0x3E5C, 0x0908, 0x0A6C, 0x3E2C, 0x0004,
+ 0x3E6C, 0x08AC, 0x0AC0, 0x3E20, 0x0008,
+ 0x3E78, 0x0850, 0x0B14, 0x3E18, 0x000C,
+ 0x3E88, 0x07F4, 0x0B60, 0x3E14, 0x0010,
+ 0x3E98, 0x0798, 0x0BB0, 0x3E0C, 0x0014,
+ 0x3EA8, 0x073C, 0x0C00, 0x3E08, 0x0014,
+ 0x3EB8, 0x06E4, 0x0C48, 0x3E04, 0x0018,
+ 0x3ECC, 0x0684, 0x0C90, 0x3E04, 0x001C,
+ 0x3EDC, 0x062C, 0x0CD4, 0x3E04, 0x0020,
+ 0x3EEC, 0x05D4, 0x0D1C, 0x3E04, 0x0020,
+ 0x3EFC, 0x057C, 0x0D5C, 0x3E08, 0x0024,
+ 0x3F0C, 0x0524, 0x0D98, 0x3E10, 0x0028,
+ 0x3F20, 0x04CC, 0x0DD8, 0x3E14, 0x0028,
+ 0x3F30, 0x0478, 0x0E14, 0x3E1C, 0x0028,
+ 0x3F40, 0x0424, 0x0E48, 0x3E28, 0x002C,
+ 0x3F50, 0x03D4, 0x0E7C, 0x3E34, 0x002C,
+ 0x3F60, 0x0384, 0x0EAC, 0x3E44, 0x002C,
+ 0x3F6C, 0x0338, 0x0EDC, 0x3E54, 0x002C,
+ 0x3F7C, 0x02E8, 0x0F08, 0x3E68, 0x002C,
+ 0x3F8C, 0x02A0, 0x0F2C, 0x3E7C, 0x002C,
+ 0x3F98, 0x0258, 0x0F50, 0x3E94, 0x002C,
+ 0x3FA4, 0x0210, 0x0F74, 0x3EB0, 0x0028,
+ 0x3FB0, 0x01CC, 0x0F90, 0x3ECC, 0x0028,
+ 0x3FC0, 0x018C, 0x0FA8, 0x3EE8, 0x0024,
+ 0x3FC8, 0x014C, 0x0FC0, 0x3F0C, 0x0020,
+ 0x3FD4, 0x0110, 0x0FD4, 0x3F2C, 0x001C,
+ 0x3FE0, 0x00D4, 0x0FE0, 0x3F54, 0x0018,
+ 0x3FE8, 0x009C, 0x0FF0, 0x3F7C, 0x0010,
+ 0x3FF0, 0x0064, 0x0FFC, 0x3FA4, 0x000C,
+ 0x3FFC, 0x0030, 0x0FFC, 0x3FD4, 0x0004,
+ 0x0000, 0x0000, 0x1000, 0x0000, 0x0000
+};
-static const uint16_t filter_5tap_64p_117[165] = {
- 16056, 2372, 2372, 16056, 0,
- 16052, 2312, 2432, 16060, 0,
- 16052, 2252, 2488, 16064, 0,
- 16052, 2188, 2548, 16072, 0,
- 16052, 2124, 2600, 16076, 0,
- 16052, 2064, 2656, 16088, 0,
- 16052, 2000, 2708, 16096, 0,
- 16056, 1932, 2760, 16108, 0,
- 16060, 1868, 2808, 16120, 0,
- 16064, 1804, 2856, 16132, 0,
- 16068, 1740, 2904, 16148, 16380,
- 16076, 1676, 2948, 16164, 16380,
- 16080, 1612, 2992, 16180, 16376,
- 16088, 1544, 3032, 16200, 16372,
- 16096, 1480, 3072, 16220, 16372,
- 16104, 1420, 3108, 16244, 16368,
- 16112, 1356, 3144, 16268, 16364,
- 16120, 1292, 3180, 16292, 16360,
- 16128, 1232, 3212, 16320, 16356,
- 16136, 1168, 3240, 16344, 16352,
- 16144, 1108, 3268, 16376, 16344,
- 16156, 1048, 3292, 20, 16340,
- 16164, 988, 3316, 52, 16332,
- 16172, 932, 3336, 88, 16328,
- 16184, 872, 3356, 124, 16320,
- 16192, 816, 3372, 160, 16316,
- 16204, 760, 3388, 196, 16308,
- 16212, 708, 3400, 236, 16300,
- 16220, 656, 3412, 276, 16292,
- 16232, 604, 3420, 320, 16284,
- 16240, 552, 3424, 364, 16276,
- 16248, 504, 3428, 408, 16268,
- 16256, 456, 3428, 456, 16256 };
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_116[165] = {
+ 0x3EDC, 0x0924, 0x0924, 0x3EDC, 0x0000,
+ 0x3ED8, 0x08EC, 0x095C, 0x3EE0, 0x0000,
+ 0x3ED4, 0x08B0, 0x0994, 0x3EE8, 0x0000,
+ 0x3ED0, 0x0878, 0x09C8, 0x3EF0, 0x0000,
+ 0x3ED0, 0x083C, 0x09FC, 0x3EF8, 0x0000,
+ 0x3ED0, 0x0800, 0x0A2C, 0x3F04, 0x0000,
+ 0x3ED0, 0x07C4, 0x0A5C, 0x3F10, 0x0000,
+ 0x3ED0, 0x0788, 0x0A8C, 0x3F1C, 0x0000,
+ 0x3ED0, 0x074C, 0x0AC0, 0x3F28, 0x3FFC,
+ 0x3ED4, 0x0710, 0x0AE8, 0x3F38, 0x3FFC,
+ 0x3ED8, 0x06D0, 0x0B18, 0x3F48, 0x3FF8,
+ 0x3EDC, 0x0694, 0x0B3C, 0x3F5C, 0x3FF8,
+ 0x3EE0, 0x0658, 0x0B68, 0x3F6C, 0x3FF4,
+ 0x3EE4, 0x061C, 0x0B90, 0x3F80, 0x3FF0,
+ 0x3EEC, 0x05DC, 0x0BB4, 0x3F98, 0x3FEC,
+ 0x3EF0, 0x05A0, 0x0BD8, 0x3FB0, 0x3FE8,
+ 0x3EF8, 0x0564, 0x0BF8, 0x3FC8, 0x3FE4,
+ 0x3EFC, 0x0528, 0x0C1C, 0x3FE0, 0x3FE0,
+ 0x3F04, 0x04EC, 0x0C38, 0x3FFC, 0x3FDC,
+ 0x3F0C, 0x04B4, 0x0C54, 0x0014, 0x3FD8,
+ 0x3F14, 0x047C, 0x0C70, 0x0030, 0x3FD0,
+ 0x3F1C, 0x0440, 0x0C88, 0x0050, 0x3FCC,
+ 0x3F24, 0x0408, 0x0CA0, 0x0070, 0x3FC4,
+ 0x3F2C, 0x03D0, 0x0CB0, 0x0094, 0x3FC0,
+ 0x3F34, 0x0398, 0x0CC4, 0x00B8, 0x3FB8,
+ 0x3F3C, 0x0364, 0x0CD4, 0x00DC, 0x3FB0,
+ 0x3F48, 0x032C, 0x0CE0, 0x0100, 0x3FAC,
+ 0x3F50, 0x02F8, 0x0CEC, 0x0128, 0x3FA4,
+ 0x3F58, 0x02C4, 0x0CF8, 0x0150, 0x3F9C,
+ 0x3F60, 0x0290, 0x0D00, 0x017C, 0x3F94,
+ 0x3F68, 0x0260, 0x0D04, 0x01A8, 0x3F8C,
+ 0x3F74, 0x0230, 0x0D04, 0x01D4, 0x3F84,
+ 0x3F7C, 0x0200, 0x0D08, 0x0200, 0x3F7C
+};
-static const uint16_t filter_5tap_64p_150[165] = {
- 16368, 2064, 2064, 16368, 0,
- 16352, 2028, 2100, 16380, 16380,
- 16340, 1996, 2132, 12, 16376,
- 16328, 1960, 2168, 24, 16376,
- 16316, 1924, 2204, 44, 16372,
- 16308, 1888, 2236, 60, 16368,
- 16296, 1848, 2268, 76, 16364,
- 16288, 1812, 2300, 96, 16360,
- 16280, 1772, 2328, 116, 16356,
- 16272, 1736, 2360, 136, 16352,
- 16268, 1696, 2388, 160, 16348,
- 16260, 1656, 2416, 180, 16344,
- 16256, 1616, 2440, 204, 16340,
- 16248, 1576, 2464, 228, 16336,
- 16244, 1536, 2492, 252, 16332,
- 16240, 1496, 2512, 276, 16324,
- 16240, 1456, 2536, 304, 16320,
- 16236, 1416, 2556, 332, 16316,
- 16232, 1376, 2576, 360, 16312,
- 16232, 1336, 2592, 388, 16308,
- 16232, 1296, 2612, 416, 16300,
- 16232, 1256, 2628, 448, 16296,
- 16232, 1216, 2640, 480, 16292,
- 16232, 1172, 2652, 512, 16288,
- 16232, 1132, 2664, 544, 16284,
- 16232, 1092, 2676, 576, 16280,
- 16236, 1056, 2684, 608, 16272,
- 16236, 1016, 2692, 644, 16268,
- 16240, 976, 2700, 680, 16264,
- 16240, 936, 2704, 712, 16260,
- 16244, 900, 2708, 748, 16256,
- 16248, 860, 2708, 788, 16252,
- 16248, 824, 2708, 824, 16248 };
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_149[165] = {
+ 0x3FF4, 0x080C, 0x080C, 0x3FF4, 0x0000,
+ 0x3FE8, 0x07E8, 0x0830, 0x0000, 0x0000,
+ 0x3FDC, 0x07C8, 0x0850, 0x0010, 0x3FFC,
+ 0x3FD0, 0x07A4, 0x0878, 0x001C, 0x3FF8,
+ 0x3FC4, 0x0780, 0x0898, 0x0030, 0x3FF4,
+ 0x3FB8, 0x075C, 0x08B8, 0x0040, 0x3FF4,
+ 0x3FB0, 0x0738, 0x08D8, 0x0050, 0x3FF0,
+ 0x3FA8, 0x0710, 0x08F8, 0x0064, 0x3FEC,
+ 0x3FA0, 0x06EC, 0x0914, 0x0078, 0x3FE8,
+ 0x3F98, 0x06C4, 0x0934, 0x008C, 0x3FE4,
+ 0x3F90, 0x06A0, 0x094C, 0x00A4, 0x3FE0,
+ 0x3F8C, 0x0678, 0x0968, 0x00B8, 0x3FDC,
+ 0x3F84, 0x0650, 0x0984, 0x00D0, 0x3FD8,
+ 0x3F80, 0x0628, 0x099C, 0x00E8, 0x3FD4,
+ 0x3F7C, 0x0600, 0x09B8, 0x0100, 0x3FCC,
+ 0x3F78, 0x05D8, 0x09D0, 0x0118, 0x3FC8,
+ 0x3F74, 0x05B0, 0x09E4, 0x0134, 0x3FC4,
+ 0x3F70, 0x0588, 0x09F8, 0x0150, 0x3FC0,
+ 0x3F70, 0x0560, 0x0A08, 0x016C, 0x3FBC,
+ 0x3F6C, 0x0538, 0x0A20, 0x0188, 0x3FB4,
+ 0x3F6C, 0x0510, 0x0A30, 0x01A4, 0x3FB0,
+ 0x3F6C, 0x04E8, 0x0A3C, 0x01C4, 0x3FAC,
+ 0x3F6C, 0x04C0, 0x0A48, 0x01E4, 0x3FA8,
+ 0x3F6C, 0x0498, 0x0A58, 0x0200, 0x3FA4,
+ 0x3F6C, 0x0470, 0x0A60, 0x0224, 0x3FA0,
+ 0x3F6C, 0x0448, 0x0A70, 0x0244, 0x3F98,
+ 0x3F70, 0x0420, 0x0A78, 0x0264, 0x3F94,
+ 0x3F70, 0x03F8, 0x0A80, 0x0288, 0x3F90,
+ 0x3F74, 0x03D4, 0x0A84, 0x02A8, 0x3F8C,
+ 0x3F74, 0x03AC, 0x0A8C, 0x02CC, 0x3F88,
+ 0x3F78, 0x0384, 0x0A90, 0x02F0, 0x3F84,
+ 0x3F7C, 0x0360, 0x0A90, 0x0314, 0x3F80,
+ 0x3F7C, 0x033C, 0x0A90, 0x033C, 0x3F7C
+};
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_5tap_64p_183[165] = {
- 228, 1816, 1816, 228, 0,
- 216, 1792, 1836, 248, 16380,
- 200, 1772, 1860, 264, 16376,
- 184, 1748, 1884, 280, 16376,
- 168, 1728, 1904, 300, 16372,
- 156, 1704, 1928, 316, 16368,
- 144, 1680, 1948, 336, 16364,
- 128, 1656, 1968, 356, 16364,
- 116, 1632, 1988, 376, 16360,
- 104, 1604, 2008, 396, 16356,
- 96, 1580, 2024, 416, 16356,
- 84, 1556, 2044, 440, 16352,
- 72, 1528, 2060, 460, 16348,
- 64, 1504, 2076, 484, 16348,
- 52, 1476, 2092, 504, 16344,
- 44, 1448, 2104, 528, 16344,
- 36, 1424, 2120, 552, 16340,
- 28, 1396, 2132, 576, 16340,
- 20, 1368, 2144, 600, 16340,
- 12, 1340, 2156, 624, 16336,
- 4, 1312, 2168, 652, 16336,
- 0, 1284, 2180, 676, 16336,
- 16376, 1256, 2188, 700, 16332,
- 16372, 1228, 2196, 728, 16332,
- 16368, 1200, 2204, 752, 16332,
- 16364, 1172, 2212, 780, 16332,
- 16356, 1144, 2216, 808, 16332,
- 16352, 1116, 2220, 836, 16332,
- 16352, 1084, 2224, 860, 16332,
- 16348, 1056, 2228, 888, 16336,
- 16344, 1028, 2232, 916, 16336,
- 16340, 1000, 2232, 944, 16336,
- 16340, 972, 2232, 972, 16340 };
+ 0x0168, 0x069C, 0x0698, 0x0164, 0x0000,
+ 0x0154, 0x068C, 0x06AC, 0x0174, 0x0000,
+ 0x0144, 0x0674, 0x06C0, 0x0188, 0x0000,
+ 0x0138, 0x0664, 0x06D0, 0x0198, 0x3FFC,
+ 0x0128, 0x0654, 0x06E0, 0x01A8, 0x3FFC,
+ 0x0118, 0x0640, 0x06F0, 0x01BC, 0x3FFC,
+ 0x010C, 0x0630, 0x0700, 0x01CC, 0x3FF8,
+ 0x00FC, 0x061C, 0x0710, 0x01E0, 0x3FF8,
+ 0x00F0, 0x060C, 0x071C, 0x01F0, 0x3FF8,
+ 0x00E4, 0x05F4, 0x072C, 0x0204, 0x3FF8,
+ 0x00D8, 0x05E4, 0x0738, 0x0218, 0x3FF4,
+ 0x00CC, 0x05D0, 0x0744, 0x022C, 0x3FF4,
+ 0x00C0, 0x05B8, 0x0754, 0x0240, 0x3FF4,
+ 0x00B4, 0x05A4, 0x0760, 0x0254, 0x3FF4,
+ 0x00A8, 0x0590, 0x076C, 0x0268, 0x3FF4,
+ 0x009C, 0x057C, 0x0778, 0x027C, 0x3FF4,
+ 0x0094, 0x0564, 0x0780, 0x0294, 0x3FF4,
+ 0x0088, 0x0550, 0x0788, 0x02A8, 0x3FF8,
+ 0x0080, 0x0538, 0x0794, 0x02BC, 0x3FF8,
+ 0x0074, 0x0524, 0x079C, 0x02D4, 0x3FF8,
+ 0x006C, 0x0510, 0x07A4, 0x02E8, 0x3FF8,
+ 0x0064, 0x04F4, 0x07AC, 0x0300, 0x3FFC,
+ 0x005C, 0x04E4, 0x07B0, 0x0314, 0x3FFC,
+ 0x0054, 0x04C8, 0x07B8, 0x032C, 0x0000,
+ 0x004C, 0x04B4, 0x07C0, 0x0340, 0x0000,
+ 0x0044, 0x04A0, 0x07C4, 0x0358, 0x0000,
+ 0x003C, 0x0488, 0x07C8, 0x0370, 0x0004,
+ 0x0038, 0x0470, 0x07CC, 0x0384, 0x0008,
+ 0x0030, 0x045C, 0x07D0, 0x039C, 0x0008,
+ 0x002C, 0x0444, 0x07D0, 0x03B4, 0x000C,
+ 0x0024, 0x042C, 0x07D4, 0x03CC, 0x0010,
+ 0x0020, 0x0414, 0x07D4, 0x03E0, 0x0018,
+ 0x001C, 0x03FC, 0x07D4, 0x03F8, 0x001C
+};
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_6tap_64p_upscale[198] = {
- 0, 0, 4092, 0, 0, 0,
- 12, 16332, 4092, 52, 16368, 0,
- 24, 16280, 4088, 108, 16356, 0,
- 36, 16236, 4080, 168, 16340, 0,
- 44, 16188, 4064, 228, 16324, 0,
- 56, 16148, 4052, 292, 16308, 0,
- 64, 16108, 4032, 356, 16292, 4,
- 72, 16072, 4008, 424, 16276, 4,
- 80, 16036, 3980, 492, 16256, 4,
- 88, 16004, 3952, 564, 16240, 8,
- 96, 15972, 3920, 636, 16220, 8,
- 100, 15944, 3884, 712, 16204, 12,
- 108, 15916, 3844, 788, 16184, 16,
- 112, 15896, 3800, 864, 16164, 20,
- 116, 15872, 3756, 944, 16144, 20,
- 120, 15852, 3708, 1024, 16124, 24,
- 120, 15836, 3656, 1108, 16104, 28,
- 124, 15824, 3600, 1192, 16084, 32,
- 124, 15808, 3544, 1276, 16064, 36,
- 124, 15800, 3484, 1360, 16044, 40,
- 128, 15792, 3420, 1448, 16024, 44,
- 128, 15784, 3352, 1536, 16004, 48,
- 124, 15780, 3288, 1624, 15988, 52,
- 124, 15776, 3216, 1712, 15968, 56,
- 124, 15776, 3144, 1800, 15948, 64,
- 120, 15776, 3068, 1888, 15932, 68,
- 120, 15780, 2992, 1976, 15912, 72,
- 116, 15784, 2916, 2064, 15896, 76,
- 112, 15792, 2836, 2152, 15880, 80,
- 108, 15796, 2752, 2244, 15868, 84,
- 104, 15804, 2672, 2328, 15852, 88,
- 104, 15816, 2588, 2416, 15840, 92,
- 100, 15828, 2504, 2504, 15828, 100 };
+ 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000,
+ 0x000C, 0x3FD0, 0x0FFC, 0x0034, 0x3FF4, 0x0000,
+ 0x0018, 0x3F9C, 0x0FF8, 0x006C, 0x3FE8, 0x0000,
+ 0x0024, 0x3F6C, 0x0FF0, 0x00A8, 0x3FD8, 0x0000,
+ 0x002C, 0x3F44, 0x0FE4, 0x00E4, 0x3FC8, 0x0000,
+ 0x0038, 0x3F18, 0x0FD4, 0x0124, 0x3FB8, 0x0000,
+ 0x0040, 0x3EF0, 0x0FC0, 0x0164, 0x3FA8, 0x0004,
+ 0x0048, 0x3EC8, 0x0FAC, 0x01A8, 0x3F98, 0x0004,
+ 0x0050, 0x3EA8, 0x0F94, 0x01EC, 0x3F84, 0x0004,
+ 0x0058, 0x3E84, 0x0F74, 0x0234, 0x3F74, 0x0008,
+ 0x0060, 0x3E68, 0x0F54, 0x027C, 0x3F60, 0x0008,
+ 0x0064, 0x3E4C, 0x0F30, 0x02C8, 0x3F4C, 0x000C,
+ 0x006C, 0x3E30, 0x0F04, 0x0314, 0x3F3C, 0x0010,
+ 0x0070, 0x3E18, 0x0EDC, 0x0360, 0x3F28, 0x0014,
+ 0x0074, 0x3E04, 0x0EB0, 0x03B0, 0x3F14, 0x0014,
+ 0x0078, 0x3DF0, 0x0E80, 0x0400, 0x3F00, 0x0018,
+ 0x0078, 0x3DE0, 0x0E4C, 0x0454, 0x3EEC, 0x001C,
+ 0x007C, 0x3DD0, 0x0E14, 0x04A8, 0x3ED8, 0x0020,
+ 0x007C, 0x3DC4, 0x0DDC, 0x04FC, 0x3EC4, 0x0024,
+ 0x007C, 0x3DBC, 0x0DA0, 0x0550, 0x3EB0, 0x0028,
+ 0x0080, 0x3DB4, 0x0D5C, 0x05A8, 0x3E9C, 0x002C,
+ 0x0080, 0x3DAC, 0x0D1C, 0x0600, 0x3E88, 0x0030,
+ 0x007C, 0x3DA8, 0x0CDC, 0x0658, 0x3E74, 0x0034,
+ 0x007C, 0x3DA4, 0x0C94, 0x06B0, 0x3E64, 0x0038,
+ 0x007C, 0x3DA4, 0x0C48, 0x0708, 0x3E50, 0x0040,
+ 0x0078, 0x3DA4, 0x0C00, 0x0760, 0x3E40, 0x0044,
+ 0x0078, 0x3DA8, 0x0BB4, 0x07B8, 0x3E2C, 0x0048,
+ 0x0074, 0x3DAC, 0x0B68, 0x0810, 0x3E1C, 0x004C,
+ 0x0070, 0x3DB4, 0x0B18, 0x0868, 0x3E0C, 0x0050,
+ 0x006C, 0x3DBC, 0x0AC4, 0x08C4, 0x3DFC, 0x0054,
+ 0x0068, 0x3DC4, 0x0A74, 0x0918, 0x3DF0, 0x0058,
+ 0x0068, 0x3DCC, 0x0A20, 0x0970, 0x3DE0, 0x005C,
+ 0x0064, 0x3DD4, 0x09C8, 0x09C8, 0x3DD4, 0x0064
+};
-static const uint16_t filter_6tap_64p_117[198] = {
- 16168, 476, 3568, 476, 16168, 0,
- 16180, 428, 3564, 528, 16156, 0,
- 16192, 376, 3556, 584, 16144, 4,
- 16204, 328, 3548, 636, 16128, 4,
- 16216, 280, 3540, 692, 16116, 8,
- 16228, 232, 3524, 748, 16104, 12,
- 16240, 188, 3512, 808, 16092, 12,
- 16252, 148, 3492, 864, 16080, 16,
- 16264, 104, 3472, 924, 16068, 16,
- 16276, 64, 3452, 984, 16056, 20,
- 16284, 28, 3428, 1044, 16048, 24,
- 16296, 16376, 3400, 1108, 16036, 24,
- 16304, 16340, 3372, 1168, 16024, 28,
- 16316, 16304, 3340, 1232, 16016, 32,
- 16324, 16272, 3308, 1296, 16004, 32,
- 16332, 16244, 3272, 1360, 15996, 36,
- 16344, 16212, 3236, 1424, 15988, 36,
- 16352, 16188, 3200, 1488, 15980, 40,
- 16360, 16160, 3160, 1552, 15972, 40,
- 16368, 16136, 3116, 1616, 15964, 40,
- 16372, 16112, 3072, 1680, 15956, 44,
- 16380, 16092, 3028, 1744, 15952, 44,
- 0, 16072, 2980, 1808, 15948, 44,
- 8, 16052, 2932, 1872, 15944, 48,
- 12, 16036, 2880, 1936, 15940, 48,
- 16, 16020, 2828, 2000, 15936, 48,
- 20, 16008, 2776, 2064, 15936, 48,
- 24, 15996, 2724, 2128, 15936, 48,
- 28, 15984, 2668, 2192, 15936, 48,
- 32, 15972, 2612, 2252, 15940, 44,
- 36, 15964, 2552, 2316, 15940, 44,
- 40, 15956, 2496, 2376, 15944, 44,
- 40, 15952, 2436, 2436, 15952, 40 };
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_116[198] = {
+ 0x3F0C, 0x0240, 0x0D68, 0x0240, 0x3F0C, 0x0000,
+ 0x3F18, 0x0210, 0x0D64, 0x0274, 0x3F00, 0x0000,
+ 0x3F24, 0x01E0, 0x0D58, 0x02A8, 0x3EF8, 0x0004,
+ 0x3F2C, 0x01B0, 0x0D58, 0x02DC, 0x3EEC, 0x0004,
+ 0x3F38, 0x0180, 0x0D50, 0x0310, 0x3EE0, 0x0008,
+ 0x3F44, 0x0154, 0x0D40, 0x0348, 0x3ED8, 0x0008,
+ 0x3F50, 0x0128, 0x0D34, 0x037C, 0x3ECC, 0x000C,
+ 0x3F5C, 0x00FC, 0x0D20, 0x03B4, 0x3EC4, 0x0010,
+ 0x3F64, 0x00D4, 0x0D14, 0x03EC, 0x3EB8, 0x0010,
+ 0x3F70, 0x00AC, 0x0CFC, 0x0424, 0x3EB0, 0x0014,
+ 0x3F78, 0x0084, 0x0CE8, 0x0460, 0x3EA8, 0x0014,
+ 0x3F84, 0x0060, 0x0CCC, 0x0498, 0x3EA0, 0x0018,
+ 0x3F90, 0x003C, 0x0CB4, 0x04D0, 0x3E98, 0x0018,
+ 0x3F98, 0x0018, 0x0C9C, 0x050C, 0x3E90, 0x0018,
+ 0x3FA0, 0x3FFC, 0x0C78, 0x0548, 0x3E88, 0x001C,
+ 0x3FAC, 0x3FDC, 0x0C54, 0x0584, 0x3E84, 0x001C,
+ 0x3FB4, 0x3FBC, 0x0C3C, 0x05BC, 0x3E7C, 0x001C,
+ 0x3FBC, 0x3FA0, 0x0C14, 0x05F8, 0x3E78, 0x0020,
+ 0x3FC4, 0x3F84, 0x0BF0, 0x0634, 0x3E74, 0x0020,
+ 0x3FCC, 0x3F68, 0x0BCC, 0x0670, 0x3E70, 0x0020,
+ 0x3FD4, 0x3F50, 0x0BA4, 0x06AC, 0x3E6C, 0x0020,
+ 0x3FDC, 0x3F38, 0x0B78, 0x06E8, 0x3E6C, 0x0020,
+ 0x3FE0, 0x3F24, 0x0B50, 0x0724, 0x3E68, 0x0020,
+ 0x3FE8, 0x3F0C, 0x0B24, 0x0760, 0x3E68, 0x0020,
+ 0x3FF0, 0x3EFC, 0x0AF4, 0x0798, 0x3E68, 0x0020,
+ 0x3FF4, 0x3EE8, 0x0AC8, 0x07D4, 0x3E68, 0x0020,
+ 0x3FFC, 0x3ED8, 0x0A94, 0x0810, 0x3E6C, 0x001C,
+ 0x0000, 0x3EC8, 0x0A64, 0x0848, 0x3E70, 0x001C,
+ 0x0000, 0x3EB8, 0x0A38, 0x0880, 0x3E74, 0x001C,
+ 0x0004, 0x3EAC, 0x0A04, 0x08BC, 0x3E78, 0x0018,
+ 0x0008, 0x3EA4, 0x09D0, 0x08F4, 0x3E7C, 0x0014,
+ 0x000C, 0x3E98, 0x0998, 0x092C, 0x3E84, 0x0014,
+ 0x0010, 0x3E90, 0x0964, 0x0960, 0x3E8C, 0x0010
+};
-static const uint16_t filter_6tap_64p_150[198] = {
- 16148, 920, 2724, 920, 16148, 0,
- 16152, 880, 2724, 956, 16148, 0,
- 16152, 844, 2720, 996, 16144, 0,
- 16156, 804, 2716, 1032, 16144, 0,
- 16156, 768, 2712, 1072, 16144, 0,
- 16160, 732, 2708, 1112, 16144, 16380,
- 16164, 696, 2700, 1152, 16144, 16380,
- 16168, 660, 2692, 1192, 16148, 16380,
- 16172, 628, 2684, 1232, 16148, 16380,
- 16176, 592, 2672, 1272, 16152, 16376,
- 16180, 560, 2660, 1312, 16152, 16376,
- 16184, 524, 2648, 1348, 16156, 16376,
- 16192, 492, 2632, 1388, 16160, 16372,
- 16196, 460, 2616, 1428, 16164, 16372,
- 16200, 432, 2600, 1468, 16168, 16368,
- 16204, 400, 2584, 1508, 16176, 16364,
- 16212, 368, 2564, 1548, 16180, 16364,
- 16216, 340, 2544, 1588, 16188, 16360,
- 16220, 312, 2524, 1628, 16196, 16356,
- 16228, 284, 2504, 1668, 16204, 16356,
- 16232, 256, 2480, 1704, 16212, 16352,
- 16240, 232, 2456, 1744, 16224, 16348,
- 16244, 204, 2432, 1780, 16232, 16344,
- 16248, 180, 2408, 1820, 16244, 16340,
- 16256, 156, 2380, 1856, 16256, 16336,
- 16260, 132, 2352, 1896, 16268, 16332,
- 16268, 108, 2324, 1932, 16280, 16328,
- 16272, 88, 2296, 1968, 16292, 16324,
- 16276, 64, 2268, 2004, 16308, 16320,
- 16284, 44, 2236, 2036, 16324, 16312,
- 16288, 24, 2204, 2072, 16340, 16308,
- 16292, 8, 2172, 2108, 16356, 16304,
- 16300, 16372, 2140, 2140, 16372, 16300 };
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_149[198] = {
+ 0x3F14, 0x0394, 0x0AB0, 0x0394, 0x3F14, 0x0000,
+ 0x3F18, 0x036C, 0x0AB0, 0x03B8, 0x3F14, 0x0000,
+ 0x3F18, 0x0348, 0x0AAC, 0x03E0, 0x3F14, 0x0000,
+ 0x3F1C, 0x0320, 0x0AAC, 0x0408, 0x3F10, 0x0000,
+ 0x3F20, 0x02FC, 0x0AA8, 0x042C, 0x3F10, 0x0000,
+ 0x3F24, 0x02D8, 0x0AA0, 0x0454, 0x3F10, 0x0000,
+ 0x3F28, 0x02B4, 0x0A98, 0x047C, 0x3F10, 0x0000,
+ 0x3F28, 0x0290, 0x0A90, 0x04A4, 0x3F14, 0x0000,
+ 0x3F30, 0x026C, 0x0A84, 0x04CC, 0x3F14, 0x0000,
+ 0x3F34, 0x024C, 0x0A7C, 0x04F4, 0x3F14, 0x3FFC,
+ 0x3F38, 0x0228, 0x0A70, 0x051C, 0x3F18, 0x3FFC,
+ 0x3F3C, 0x0208, 0x0A64, 0x0544, 0x3F1C, 0x3FF8,
+ 0x3F40, 0x01E8, 0x0A54, 0x056C, 0x3F20, 0x3FF8,
+ 0x3F44, 0x01C8, 0x0A48, 0x0594, 0x3F24, 0x3FF4,
+ 0x3F4C, 0x01A8, 0x0A34, 0x05BC, 0x3F28, 0x3FF4,
+ 0x3F50, 0x0188, 0x0A28, 0x05E4, 0x3F2C, 0x3FF0,
+ 0x3F54, 0x016C, 0x0A10, 0x060C, 0x3F34, 0x3FF0,
+ 0x3F5C, 0x014C, 0x09FC, 0x0634, 0x3F3C, 0x3FEC,
+ 0x3F60, 0x0130, 0x09EC, 0x065C, 0x3F40, 0x3FE8,
+ 0x3F68, 0x0114, 0x09D0, 0x0684, 0x3F48, 0x3FE8,
+ 0x3F6C, 0x00F8, 0x09B8, 0x06AC, 0x3F54, 0x3FE4,
+ 0x3F74, 0x00E0, 0x09A0, 0x06D0, 0x3F5C, 0x3FE0,
+ 0x3F78, 0x00C4, 0x098C, 0x06F8, 0x3F64, 0x3FDC,
+ 0x3F7C, 0x00AC, 0x0970, 0x0720, 0x3F70, 0x3FD8,
+ 0x3F84, 0x0094, 0x0954, 0x0744, 0x3F7C, 0x3FD4,
+ 0x3F88, 0x007C, 0x093C, 0x0768, 0x3F88, 0x3FD0,
+ 0x3F90, 0x0064, 0x091C, 0x0790, 0x3F94, 0x3FCC,
+ 0x3F94, 0x0050, 0x08FC, 0x07B4, 0x3FA4, 0x3FC8,
+ 0x3F98, 0x003C, 0x08E0, 0x07D8, 0x3FB0, 0x3FC4,
+ 0x3FA0, 0x0024, 0x08C0, 0x07FC, 0x3FC0, 0x3FC0,
+ 0x3FA4, 0x0014, 0x08A4, 0x081C, 0x3FD0, 0x3FB8,
+ 0x3FAC, 0x0000, 0x0880, 0x0840, 0x3FE0, 0x3FB4,
+ 0x3FB0, 0x3FF0, 0x0860, 0x0860, 0x3FF0, 0x3FB0
+};
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_6tap_64p_183[198] = {
- 16296, 1032, 2196, 1032, 16296, 0,
- 16292, 1004, 2200, 1060, 16304, 16380,
- 16288, 976, 2200, 1088, 16308, 16380,
- 16284, 952, 2196, 1116, 16312, 16376,
- 16284, 924, 2196, 1144, 16320, 16376,
- 16280, 900, 2192, 1172, 16324, 16372,
- 16276, 872, 2192, 1200, 16332, 16368,
- 16276, 848, 2188, 1228, 16340, 16368,
- 16272, 820, 2180, 1256, 16348, 16364,
- 16272, 796, 2176, 1280, 16356, 16360,
- 16268, 768, 2168, 1308, 16364, 16360,
- 16268, 744, 2164, 1336, 16372, 16356,
- 16268, 716, 2156, 1364, 16380, 16352,
- 16264, 692, 2148, 1392, 4, 16352,
- 16264, 668, 2136, 1420, 16, 16348,
- 16264, 644, 2128, 1448, 28, 16344,
- 16264, 620, 2116, 1472, 36, 16340,
- 16264, 596, 2108, 1500, 48, 16340,
- 16268, 572, 2096, 1524, 60, 16336,
- 16268, 548, 2080, 1552, 72, 16332,
- 16268, 524, 2068, 1576, 88, 16328,
- 16268, 504, 2056, 1604, 100, 16324,
- 16272, 480, 2040, 1628, 112, 16324,
- 16272, 456, 2024, 1652, 128, 16320,
- 16272, 436, 2008, 1680, 144, 16316,
- 16276, 416, 1992, 1704, 156, 16312,
- 16276, 392, 1976, 1724, 172, 16308,
- 16280, 372, 1956, 1748, 188, 16308,
- 16280, 352, 1940, 1772, 204, 16304,
- 16284, 332, 1920, 1796, 224, 16300,
- 16288, 312, 1900, 1816, 240, 16296,
- 16288, 296, 1880, 1840, 256, 16296,
- 16292, 276, 1860, 1860, 276, 16292 };
+ 0x002C, 0x0420, 0x076C, 0x041C, 0x002C, 0x0000,
+ 0x0028, 0x040C, 0x0768, 0x0430, 0x0034, 0x0000,
+ 0x0020, 0x03F8, 0x0768, 0x0448, 0x003C, 0x3FFC,
+ 0x0018, 0x03E4, 0x0768, 0x045C, 0x0044, 0x3FFC,
+ 0x0014, 0x03D0, 0x0768, 0x0470, 0x004C, 0x3FF8,
+ 0x000C, 0x03BC, 0x0764, 0x0484, 0x0058, 0x3FF8,
+ 0x0008, 0x03A4, 0x0764, 0x049C, 0x0060, 0x3FF4,
+ 0x0004, 0x0390, 0x0760, 0x04B0, 0x0068, 0x3FF4,
+ 0x0000, 0x037C, 0x0760, 0x04C4, 0x0070, 0x3FF0,
+ 0x3FFC, 0x0364, 0x075C, 0x04D8, 0x007C, 0x3FF0,
+ 0x3FF8, 0x0350, 0x0758, 0x04F0, 0x0084, 0x3FEC,
+ 0x3FF4, 0x033C, 0x0750, 0x0504, 0x0090, 0x3FEC,
+ 0x3FF0, 0x0328, 0x074C, 0x0518, 0x009C, 0x3FE8,
+ 0x3FEC, 0x0314, 0x0744, 0x052C, 0x00A8, 0x3FE8,
+ 0x3FE8, 0x0304, 0x0740, 0x0540, 0x00B0, 0x3FE4,
+ 0x3FE4, 0x02EC, 0x073C, 0x0554, 0x00BC, 0x3FE4,
+ 0x3FE0, 0x02DC, 0x0734, 0x0568, 0x00C8, 0x3FE0,
+ 0x3FE0, 0x02C4, 0x072C, 0x057C, 0x00D4, 0x3FE0,
+ 0x3FDC, 0x02B4, 0x0724, 0x058C, 0x00E4, 0x3FDC,
+ 0x3FDC, 0x02A0, 0x0718, 0x05A0, 0x00F0, 0x3FDC,
+ 0x3FD8, 0x028C, 0x0714, 0x05B4, 0x00FC, 0x3FD8,
+ 0x3FD8, 0x0278, 0x0704, 0x05C8, 0x010C, 0x3FD8,
+ 0x3FD4, 0x0264, 0x0700, 0x05D8, 0x0118, 0x3FD8,
+ 0x3FD4, 0x0254, 0x06F0, 0x05EC, 0x0128, 0x3FD4,
+ 0x3FD0, 0x0244, 0x06E8, 0x05FC, 0x0134, 0x3FD4,
+ 0x3FD0, 0x0230, 0x06DC, 0x060C, 0x0144, 0x3FD4,
+ 0x3FD0, 0x021C, 0x06D0, 0x0620, 0x0154, 0x3FD0,
+ 0x3FD0, 0x0208, 0x06C4, 0x0630, 0x0164, 0x3FD0,
+ 0x3FD0, 0x01F8, 0x06B8, 0x0640, 0x0170, 0x3FD0,
+ 0x3FCC, 0x01E8, 0x06AC, 0x0650, 0x0180, 0x3FD0,
+ 0x3FCC, 0x01D8, 0x069C, 0x0660, 0x0190, 0x3FD0,
+ 0x3FCC, 0x01C4, 0x068C, 0x0670, 0x01A4, 0x3FD0,
+ 0x3FCC, 0x01B8, 0x0680, 0x067C, 0x01B4, 0x3FCC
+};
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_7tap_64p_upscale[231] = {
- 176, 15760, 2488, 2488, 15760, 176, 0,
- 172, 15772, 2404, 2572, 15752, 180, 16380,
- 168, 15784, 2324, 2656, 15740, 184, 16380,
- 164, 15800, 2240, 2736, 15732, 188, 16376,
- 160, 15812, 2152, 2816, 15728, 192, 16376,
- 152, 15828, 2068, 2896, 15724, 192, 16376,
- 148, 15848, 1984, 2972, 15720, 196, 16372,
- 140, 15864, 1896, 3048, 15720, 196, 16372,
- 136, 15884, 1812, 3124, 15720, 196, 16368,
- 128, 15900, 1724, 3196, 15720, 196, 16368,
- 120, 15920, 1640, 3268, 15724, 196, 16368,
- 116, 15940, 1552, 3336, 15732, 196, 16364,
- 108, 15964, 1468, 3400, 15740, 196, 16364,
- 104, 15984, 1384, 3464, 15748, 192, 16364,
- 96, 16004, 1300, 3524, 15760, 188, 16364,
- 88, 16028, 1216, 3584, 15776, 184, 16364,
- 84, 16048, 1132, 3640, 15792, 180, 16360,
- 76, 16072, 1048, 3692, 15812, 176, 16360,
- 68, 16092, 968, 3744, 15832, 168, 16360,
- 64, 16116, 888, 3788, 15856, 160, 16360,
- 56, 16140, 812, 3832, 15884, 152, 16360,
- 52, 16160, 732, 3876, 15912, 144, 16360,
- 44, 16184, 656, 3912, 15944, 136, 16364,
- 40, 16204, 584, 3944, 15976, 124, 16364,
- 32, 16228, 512, 3976, 16012, 116, 16364,
- 28, 16248, 440, 4004, 16048, 104, 16364,
- 24, 16268, 372, 4028, 16092, 88, 16368,
- 20, 16288, 304, 4048, 16132, 76, 16368,
- 12, 16308, 240, 4064, 16180, 60, 16372,
- 8, 16328, 176, 4076, 16228, 48, 16372,
- 4, 16348, 112, 4088, 16276, 32, 16376,
- 0, 16364, 56, 4092, 16328, 16, 16380,
- 0, 0, 0, 4096, 0, 0, 0 };
+ 0x00B0, 0x3D98, 0x09BC, 0x09B8, 0x3D94, 0x00B0, 0x0000,
+ 0x00AC, 0x3DA0, 0x0968, 0x0A10, 0x3D88, 0x00B4, 0x0000,
+ 0x00A8, 0x3DAC, 0x0914, 0x0A60, 0x3D80, 0x00B8, 0x0000,
+ 0x00A4, 0x3DB8, 0x08C0, 0x0AB4, 0x3D78, 0x00BC, 0x3FFC,
+ 0x00A0, 0x3DC8, 0x0868, 0x0B00, 0x3D74, 0x00C0, 0x3FFC,
+ 0x0098, 0x3DD8, 0x0818, 0x0B54, 0x3D6C, 0x00C0, 0x3FF8,
+ 0x0094, 0x3DE8, 0x07C0, 0x0B9C, 0x3D6C, 0x00C4, 0x3FF8,
+ 0x008C, 0x3DFC, 0x0768, 0x0BEC, 0x3D68, 0x00C4, 0x3FF8,
+ 0x0088, 0x3E0C, 0x0714, 0x0C38, 0x3D68, 0x00C4, 0x3FF4,
+ 0x0080, 0x3E20, 0x06BC, 0x0C80, 0x3D6C, 0x00C4, 0x3FF4,
+ 0x0078, 0x3E34, 0x0668, 0x0CC4, 0x3D70, 0x00C4, 0x3FF4,
+ 0x0074, 0x3E48, 0x0610, 0x0D08, 0x3D78, 0x00C4, 0x3FF0,
+ 0x006C, 0x3E5C, 0x05BC, 0x0D48, 0x3D80, 0x00C4, 0x3FF0,
+ 0x0068, 0x3E74, 0x0568, 0x0D84, 0x3D88, 0x00C0, 0x3FF0,
+ 0x0060, 0x3E88, 0x0514, 0x0DC8, 0x3D94, 0x00BC, 0x3FEC,
+ 0x0058, 0x3E9C, 0x04C0, 0x0E04, 0x3DA4, 0x00B8, 0x3FEC,
+ 0x0054, 0x3EB4, 0x046C, 0x0E38, 0x3DB4, 0x00B4, 0x3FEC,
+ 0x004C, 0x3ECC, 0x0418, 0x0E6C, 0x3DC8, 0x00B0, 0x3FEC,
+ 0x0044, 0x3EE0, 0x03C8, 0x0EA4, 0x3DDC, 0x00A8, 0x3FEC,
+ 0x0040, 0x3EF8, 0x0378, 0x0ED0, 0x3DF4, 0x00A0, 0x3FEC,
+ 0x0038, 0x3F0C, 0x032C, 0x0EFC, 0x3E10, 0x0098, 0x3FEC,
+ 0x0034, 0x3F24, 0x02DC, 0x0F24, 0x3E2C, 0x0090, 0x3FEC,
+ 0x002C, 0x3F38, 0x0294, 0x0F4C, 0x3E48, 0x0088, 0x3FEC,
+ 0x0028, 0x3F50, 0x0248, 0x0F68, 0x3E6C, 0x007C, 0x3FF0,
+ 0x0020, 0x3F64, 0x0200, 0x0F88, 0x3E90, 0x0074, 0x3FF0,
+ 0x001C, 0x3F7C, 0x01B8, 0x0FA4, 0x3EB4, 0x0068, 0x3FF0,
+ 0x0018, 0x3F90, 0x0174, 0x0FBC, 0x3EDC, 0x0058, 0x3FF4,
+ 0x0014, 0x3FA4, 0x0130, 0x0FD0, 0x3F08, 0x004C, 0x3FF4,
+ 0x000C, 0x3FB8, 0x00F0, 0x0FE4, 0x3F34, 0x003C, 0x3FF8,
+ 0x0008, 0x3FCC, 0x00B0, 0x0FF0, 0x3F64, 0x0030, 0x3FF8,
+ 0x0004, 0x3FDC, 0x0070, 0x0FFC, 0x3F98, 0x0020, 0x3FFC,
+ 0x0000, 0x3FF0, 0x0038, 0x0FFC, 0x3FCC, 0x0010, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000
+};
-static const uint16_t filter_7tap_64p_117[231] = {
- 92, 15868, 2464, 2464, 15868, 92, 0,
- 96, 15864, 2404, 2528, 15876, 88, 0,
- 100, 15860, 2344, 2584, 15884, 84, 0,
- 104, 15856, 2280, 2644, 15892, 76, 0,
- 108, 15852, 2216, 2700, 15904, 72, 0,
- 108, 15852, 2152, 2756, 15916, 64, 0,
- 112, 15852, 2088, 2812, 15932, 60, 0,
- 112, 15852, 2024, 2864, 15948, 52, 0,
- 112, 15856, 1960, 2916, 15964, 44, 0,
- 116, 15860, 1892, 2964, 15984, 36, 0,
- 116, 15864, 1828, 3016, 16004, 24, 4,
- 116, 15868, 1760, 3060, 16024, 16, 4,
- 116, 15876, 1696, 3108, 16048, 8, 8,
- 116, 15884, 1628, 3152, 16072, 16380, 8,
- 112, 15892, 1564, 3192, 16100, 16372, 8,
- 112, 15900, 1496, 3232, 16124, 16360, 12,
- 112, 15908, 1428, 3268, 16156, 16348, 12,
- 108, 15920, 1364, 3304, 16188, 16336, 16,
- 108, 15928, 1300, 3340, 16220, 16324, 20,
- 104, 15940, 1232, 3372, 16252, 16312, 20,
- 104, 15952, 1168, 3400, 16288, 16300, 24,
- 100, 15964, 1104, 3428, 16328, 16284, 28,
- 96, 15980, 1040, 3452, 16364, 16272, 28,
- 96, 15992, 976, 3476, 20, 16256, 32,
- 92, 16004, 916, 3496, 64, 16244, 36,
- 88, 16020, 856, 3516, 108, 16228, 40,
- 84, 16032, 792, 3532, 152, 16216, 44,
- 80, 16048, 732, 3544, 200, 16200, 48,
- 80, 16064, 676, 3556, 248, 16184, 48,
- 76, 16080, 616, 3564, 296, 16168, 52,
- 72, 16092, 560, 3568, 344, 16156, 56,
- 68, 16108, 504, 3572, 396, 16140, 60,
- 64, 16124, 452, 3576, 452, 16124, 64 };
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_116[231] = {
+ 0x0020, 0x3E58, 0x0988, 0x0988, 0x3E58, 0x0020, 0x0000,
+ 0x0024, 0x3E4C, 0x0954, 0x09C0, 0x3E64, 0x0018, 0x0000,
+ 0x002C, 0x3E44, 0x091C, 0x09F4, 0x3E70, 0x0010, 0x0000,
+ 0x0030, 0x3E3C, 0x08E8, 0x0A24, 0x3E80, 0x0008, 0x0000,
+ 0x0034, 0x3E34, 0x08AC, 0x0A5C, 0x3E90, 0x0000, 0x0000,
+ 0x003C, 0x3E30, 0x0870, 0x0A84, 0x3EA0, 0x3FFC, 0x0004,
+ 0x0040, 0x3E28, 0x0838, 0x0AB4, 0x3EB4, 0x3FF4, 0x0004,
+ 0x0044, 0x3E24, 0x07FC, 0x0AE4, 0x3EC8, 0x3FEC, 0x0004,
+ 0x0048, 0x3E24, 0x07C4, 0x0B08, 0x3EDC, 0x3FE4, 0x0008,
+ 0x0048, 0x3E20, 0x0788, 0x0B3C, 0x3EF4, 0x3FD8, 0x0008,
+ 0x004C, 0x3E20, 0x074C, 0x0B60, 0x3F0C, 0x3FD0, 0x000C,
+ 0x0050, 0x3E20, 0x0710, 0x0B8C, 0x3F24, 0x3FC4, 0x000C,
+ 0x0050, 0x3E20, 0x06D4, 0x0BB0, 0x3F40, 0x3FBC, 0x0010,
+ 0x0054, 0x3E24, 0x0698, 0x0BD4, 0x3F5C, 0x3FB0, 0x0010,
+ 0x0054, 0x3E24, 0x065C, 0x0BFC, 0x3F78, 0x3FA4, 0x0014,
+ 0x0054, 0x3E28, 0x0624, 0x0C1C, 0x3F98, 0x3F98, 0x0014,
+ 0x0058, 0x3E2C, 0x05E4, 0x0C3C, 0x3FB8, 0x3F8C, 0x0018,
+ 0x0058, 0x3E34, 0x05A8, 0x0C58, 0x3FD8, 0x3F80, 0x001C,
+ 0x0058, 0x3E38, 0x0570, 0x0C78, 0x3FF8, 0x3F74, 0x001C,
+ 0x0058, 0x3E40, 0x0534, 0x0C94, 0x0018, 0x3F68, 0x0020,
+ 0x0058, 0x3E48, 0x04F4, 0x0CAC, 0x0040, 0x3F5C, 0x0024,
+ 0x0058, 0x3E50, 0x04BC, 0x0CC4, 0x0064, 0x3F50, 0x0024,
+ 0x0054, 0x3E58, 0x0484, 0x0CD8, 0x008C, 0x3F44, 0x0028,
+ 0x0054, 0x3E60, 0x0448, 0x0CEC, 0x00B4, 0x3F38, 0x002C,
+ 0x0054, 0x3E68, 0x0410, 0x0CFC, 0x00E0, 0x3F28, 0x0030,
+ 0x0054, 0x3E74, 0x03D4, 0x0D0C, 0x010C, 0x3F1C, 0x0030,
+ 0x0050, 0x3E7C, 0x03A0, 0x0D18, 0x0138, 0x3F10, 0x0034,
+ 0x0050, 0x3E88, 0x0364, 0x0D24, 0x0164, 0x3F04, 0x0038,
+ 0x004C, 0x3E94, 0x0330, 0x0D30, 0x0194, 0x3EF4, 0x0038,
+ 0x004C, 0x3EA0, 0x02F8, 0x0D34, 0x01C4, 0x3EE8, 0x003C,
+ 0x0048, 0x3EAC, 0x02C0, 0x0D3C, 0x01F4, 0x3EDC, 0x0040,
+ 0x0048, 0x3EB8, 0x0290, 0x0D3C, 0x0224, 0x3ED0, 0x0040,
+ 0x0044, 0x3EC4, 0x0258, 0x0D40, 0x0258, 0x3EC4, 0x0044
+};
-static const uint16_t filter_7tap_64p_150[231] = {
- 16224, 16380, 2208, 2208, 16380, 16224, 0,
- 16232, 16360, 2172, 2236, 16, 16216, 0,
- 16236, 16340, 2140, 2268, 40, 16212, 0,
- 16244, 16324, 2104, 2296, 60, 16204, 4,
- 16252, 16304, 2072, 2324, 84, 16196, 4,
- 16256, 16288, 2036, 2352, 108, 16192, 4,
- 16264, 16268, 2000, 2380, 132, 16184, 8,
- 16272, 16252, 1960, 2408, 160, 16176, 8,
- 16276, 16240, 1924, 2432, 184, 16172, 8,
- 16284, 16224, 1888, 2456, 212, 16164, 8,
- 16288, 16212, 1848, 2480, 240, 16160, 12,
- 16296, 16196, 1812, 2500, 268, 16152, 12,
- 16300, 16184, 1772, 2524, 296, 16144, 12,
- 16308, 16172, 1736, 2544, 324, 16140, 12,
- 16312, 16164, 1696, 2564, 356, 16136, 12,
- 16320, 16152, 1656, 2584, 388, 16128, 12,
- 16324, 16144, 1616, 2600, 416, 16124, 12,
- 16328, 16136, 1576, 2616, 448, 16116, 12,
- 16332, 16128, 1536, 2632, 480, 16112, 12,
- 16340, 16120, 1496, 2648, 516, 16108, 12,
- 16344, 16112, 1456, 2660, 548, 16104, 12,
- 16348, 16104, 1416, 2672, 580, 16100, 12,
- 16352, 16100, 1376, 2684, 616, 16096, 12,
- 16356, 16096, 1336, 2696, 652, 16092, 12,
- 16360, 16092, 1296, 2704, 688, 16088, 12,
- 16364, 16088, 1256, 2712, 720, 16084, 12,
- 16368, 16084, 1220, 2720, 760, 16084, 8,
- 16368, 16080, 1180, 2724, 796, 16080, 8,
- 16372, 16080, 1140, 2732, 832, 16080, 8,
- 16376, 16076, 1100, 2732, 868, 16076, 4,
- 16380, 16076, 1060, 2736, 908, 16076, 4,
- 16380, 16076, 1020, 2740, 944, 16076, 0,
- 0, 16076, 984, 2740, 984, 16076, 0 };
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_149[231] = {
+ 0x3F68, 0x3FEC, 0x08A8, 0x08AC, 0x3FF0, 0x3F68, 0x0000,
+ 0x3F70, 0x3FDC, 0x0888, 0x08CC, 0x0000, 0x3F60, 0x0000,
+ 0x3F74, 0x3FC8, 0x0868, 0x08F0, 0x0014, 0x3F58, 0x0000,
+ 0x3F7C, 0x3FB4, 0x0844, 0x0908, 0x002C, 0x3F54, 0x0004,
+ 0x3F84, 0x3FA4, 0x0820, 0x0924, 0x0044, 0x3F4C, 0x0004,
+ 0x3F88, 0x3F90, 0x0800, 0x0944, 0x005C, 0x3F44, 0x0004,
+ 0x3F90, 0x3F80, 0x07D8, 0x095C, 0x0074, 0x3F40, 0x0008,
+ 0x3F98, 0x3F70, 0x07B0, 0x097C, 0x008C, 0x3F38, 0x0008,
+ 0x3F9C, 0x3F60, 0x0790, 0x0994, 0x00A8, 0x3F30, 0x0008,
+ 0x3FA4, 0x3F54, 0x0764, 0x09B0, 0x00C4, 0x3F28, 0x0008,
+ 0x3FA8, 0x3F48, 0x0740, 0x09C4, 0x00DC, 0x3F24, 0x000C,
+ 0x3FB0, 0x3F38, 0x0718, 0x09DC, 0x00FC, 0x3F1C, 0x000C,
+ 0x3FB4, 0x3F2C, 0x06F0, 0x09F4, 0x0118, 0x3F18, 0x000C,
+ 0x3FBC, 0x3F24, 0x06C8, 0x0A08, 0x0134, 0x3F10, 0x000C,
+ 0x3FC0, 0x3F18, 0x06A0, 0x0A1C, 0x0154, 0x3F08, 0x0010,
+ 0x3FC8, 0x3F10, 0x0678, 0x0A2C, 0x0170, 0x3F04, 0x0010,
+ 0x3FCC, 0x3F04, 0x0650, 0x0A40, 0x0190, 0x3F00, 0x0010,
+ 0x3FD0, 0x3EFC, 0x0628, 0x0A54, 0x01B0, 0x3EF8, 0x0010,
+ 0x3FD4, 0x3EF4, 0x0600, 0x0A64, 0x01D0, 0x3EF4, 0x0010,
+ 0x3FDC, 0x3EEC, 0x05D8, 0x0A6C, 0x01F4, 0x3EF0, 0x0010,
+ 0x3FE0, 0x3EE8, 0x05B0, 0x0A7C, 0x0214, 0x3EE8, 0x0010,
+ 0x3FE4, 0x3EE0, 0x0588, 0x0A88, 0x0238, 0x3EE4, 0x0010,
+ 0x3FE8, 0x3EDC, 0x055C, 0x0A98, 0x0258, 0x3EE0, 0x0010,
+ 0x3FEC, 0x3ED8, 0x0534, 0x0AA0, 0x027C, 0x3EDC, 0x0010,
+ 0x3FF0, 0x3ED4, 0x050C, 0x0AAC, 0x02A0, 0x3ED8, 0x000C,
+ 0x3FF4, 0x3ED0, 0x04E4, 0x0AB4, 0x02C4, 0x3ED4, 0x000C,
+ 0x3FF4, 0x3ECC, 0x04C0, 0x0ABC, 0x02E8, 0x3ED0, 0x000C,
+ 0x3FF8, 0x3ECC, 0x0494, 0x0AC0, 0x030C, 0x3ED0, 0x000C,
+ 0x3FFC, 0x3EC8, 0x046C, 0x0AC8, 0x0334, 0x3ECC, 0x0008,
+ 0x0000, 0x3EC8, 0x0444, 0x0AC8, 0x0358, 0x3ECC, 0x0008,
+ 0x0000, 0x3EC8, 0x041C, 0x0ACC, 0x0380, 0x3EC8, 0x0008,
+ 0x0000, 0x3EC8, 0x03F4, 0x0AD0, 0x03A8, 0x3EC8, 0x0004,
+ 0x0004, 0x3EC8, 0x03CC, 0x0AD0, 0x03CC, 0x3EC8, 0x0004
+};
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_7tap_64p_183[231] = {
- 16216, 324, 1884, 1884, 324, 16216, 0,
- 16220, 304, 1864, 1904, 344, 16216, 0,
- 16224, 284, 1844, 1924, 364, 16216, 0,
- 16224, 264, 1824, 1944, 384, 16212, 16380,
- 16228, 248, 1804, 1960, 408, 16212, 16380,
- 16228, 228, 1784, 1976, 428, 16208, 16380,
- 16232, 212, 1760, 1996, 452, 16208, 16380,
- 16236, 192, 1740, 2012, 472, 16208, 16376,
- 16240, 176, 1716, 2028, 496, 16208, 16376,
- 16240, 160, 1696, 2040, 516, 16208, 16376,
- 16244, 144, 1672, 2056, 540, 16208, 16376,
- 16248, 128, 1648, 2068, 564, 16208, 16372,
- 16252, 112, 1624, 2084, 588, 16208, 16372,
- 16256, 96, 1600, 2096, 612, 16208, 16368,
- 16256, 84, 1576, 2108, 636, 16208, 16368,
- 16260, 68, 1552, 2120, 660, 16208, 16368,
- 16264, 56, 1524, 2132, 684, 16212, 16364,
- 16268, 40, 1500, 2140, 712, 16212, 16364,
- 16272, 28, 1476, 2152, 736, 16216, 16360,
- 16276, 16, 1448, 2160, 760, 16216, 16356,
- 16280, 4, 1424, 2168, 788, 16220, 16356,
- 16284, 16376, 1396, 2176, 812, 16224, 16352,
- 16288, 16368, 1372, 2184, 840, 16224, 16352,
- 16292, 16356, 1344, 2188, 864, 16228, 16348,
- 16292, 16344, 1320, 2196, 892, 16232, 16344,
- 16296, 16336, 1292, 2200, 916, 16236, 16344,
- 16300, 16324, 1264, 2204, 944, 16240, 16340,
- 16304, 16316, 1240, 2208, 972, 16248, 16336,
- 16308, 16308, 1212, 2212, 996, 16252, 16332,
- 16312, 16300, 1184, 2216, 1024, 16256, 16332,
- 16316, 16292, 1160, 2216, 1052, 16264, 16328,
- 16316, 16284, 1132, 2216, 1076, 16268, 16324,
- 16320, 16276, 1104, 2216, 1104, 16276, 16320 };
+ 0x3FA4, 0x01E8, 0x0674, 0x0674, 0x01E8, 0x3FA4, 0x0000,
+ 0x3FA4, 0x01D4, 0x0668, 0x0684, 0x01F8, 0x3FA4, 0x0000,
+ 0x3FA4, 0x01C4, 0x0658, 0x0690, 0x0208, 0x3FA8, 0x0000,
+ 0x3FA0, 0x01B4, 0x064C, 0x06A0, 0x021C, 0x3FA8, 0x3FFC,
+ 0x3FA0, 0x01A4, 0x063C, 0x06AC, 0x022C, 0x3FAC, 0x3FFC,
+ 0x3FA0, 0x0194, 0x0630, 0x06B4, 0x0240, 0x3FAC, 0x3FFC,
+ 0x3FA0, 0x0184, 0x0620, 0x06C4, 0x0250, 0x3FB0, 0x3FF8,
+ 0x3FA0, 0x0174, 0x0614, 0x06CC, 0x0264, 0x3FB0, 0x3FF8,
+ 0x3FA0, 0x0164, 0x0604, 0x06D8, 0x0278, 0x3FB4, 0x3FF4,
+ 0x3FA0, 0x0154, 0x05F4, 0x06E4, 0x0288, 0x3FB8, 0x3FF4,
+ 0x3FA0, 0x0148, 0x05E4, 0x06EC, 0x029C, 0x3FBC, 0x3FF0,
+ 0x3FA0, 0x0138, 0x05D4, 0x06F4, 0x02B0, 0x3FC0, 0x3FF0,
+ 0x3FA0, 0x0128, 0x05C4, 0x0704, 0x02C4, 0x3FC0, 0x3FEC,
+ 0x3FA0, 0x011C, 0x05B4, 0x0708, 0x02D8, 0x3FC4, 0x3FEC,
+ 0x3FA4, 0x010C, 0x05A4, 0x0714, 0x02E8, 0x3FC8, 0x3FE8,
+ 0x3FA4, 0x0100, 0x0590, 0x0718, 0x02FC, 0x3FD0, 0x3FE8,
+ 0x3FA4, 0x00F0, 0x0580, 0x0724, 0x0310, 0x3FD4, 0x3FE4,
+ 0x3FA4, 0x00E4, 0x056C, 0x072C, 0x0324, 0x3FD8, 0x3FE4,
+ 0x3FA8, 0x00D8, 0x055C, 0x0730, 0x0338, 0x3FDC, 0x3FE0,
+ 0x3FA8, 0x00CC, 0x0548, 0x0738, 0x034C, 0x3FE4, 0x3FDC,
+ 0x3FA8, 0x00BC, 0x0538, 0x0740, 0x0360, 0x3FE8, 0x3FDC,
+ 0x3FAC, 0x00B0, 0x0528, 0x0744, 0x0374, 0x3FEC, 0x3FD8,
+ 0x3FAC, 0x00A4, 0x0514, 0x0748, 0x0388, 0x3FF4, 0x3FD8,
+ 0x3FB0, 0x0098, 0x0500, 0x074C, 0x039C, 0x3FFC, 0x3FD4,
+ 0x3FB0, 0x0090, 0x04EC, 0x0750, 0x03B0, 0x0000, 0x3FD4,
+ 0x3FB0, 0x0084, 0x04DC, 0x0758, 0x03C4, 0x0004, 0x3FD0,
+ 0x3FB4, 0x0078, 0x04CC, 0x0758, 0x03D8, 0x000C, 0x3FCC,
+ 0x3FB4, 0x006C, 0x04B8, 0x075C, 0x03EC, 0x0014, 0x3FCC,
+ 0x3FB8, 0x0064, 0x04A0, 0x0760, 0x0400, 0x001C, 0x3FC8,
+ 0x3FB8, 0x0058, 0x0490, 0x0760, 0x0414, 0x0024, 0x3FC8,
+ 0x3FBC, 0x0050, 0x047C, 0x0760, 0x0428, 0x002C, 0x3FC4,
+ 0x3FBC, 0x0048, 0x0464, 0x0764, 0x043C, 0x0034, 0x3FC4,
+ 0x3FC0, 0x003C, 0x0454, 0x0764, 0x0450, 0x003C, 0x3FC0
+};
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_8tap_64p_upscale[264] = {
- 0, 0, 0, 4096, 0, 0, 0, 0,
- 16376, 20, 16328, 4092, 56, 16364, 4, 0,
- 16372, 36, 16272, 4088, 116, 16340, 12, 0,
- 16364, 56, 16220, 4080, 180, 16320, 20, 0,
- 16360, 76, 16172, 4064, 244, 16296, 24, 16380,
- 16356, 92, 16124, 4048, 312, 16276, 32, 16380,
- 16352, 108, 16080, 4032, 380, 16252, 40, 16380,
- 16344, 124, 16036, 4008, 452, 16228, 48, 16380,
- 16340, 136, 15996, 3980, 524, 16204, 56, 16380,
- 16340, 152, 15956, 3952, 600, 16180, 64, 16376,
- 16336, 164, 15920, 3920, 672, 16156, 76, 16376,
- 16332, 176, 15888, 3884, 752, 16132, 84, 16376,
- 16328, 188, 15860, 3844, 828, 16104, 92, 16372,
- 16328, 200, 15828, 3800, 908, 16080, 100, 16372,
- 16324, 208, 15804, 3756, 992, 16056, 108, 16372,
- 16324, 216, 15780, 3708, 1072, 16032, 120, 16368,
- 16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
- 16320, 232, 15740, 3604, 1240, 15984, 136, 16364,
- 16320, 240, 15724, 3548, 1324, 15960, 144, 16364,
- 16320, 244, 15708, 3488, 1412, 15936, 152, 16360,
- 16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
- 16320, 252, 15688, 3364, 1584, 15892, 172, 16356,
- 16320, 256, 15680, 3296, 1672, 15868, 180, 16352,
- 16320, 256, 15672, 3228, 1756, 15848, 188, 16352,
- 16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
- 16320, 260, 15668, 3084, 1932, 15808, 200, 16348,
- 16320, 256, 15668, 3012, 2020, 15792, 208, 16344,
- 16324, 256, 15668, 2936, 2108, 15772, 216, 16344,
- 16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
- 16324, 252, 15676, 2776, 2280, 15740, 228, 16336,
- 16328, 252, 15684, 2696, 2364, 15728, 232, 16336,
- 16328, 248, 15692, 2616, 2448, 15716, 240, 16332,
- 16332, 244, 15704, 2532, 2532, 15704, 244, 16332 };
+ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3FFC, 0x0014, 0x3FC8, 0x1000, 0x0038, 0x3FEC, 0x0004, 0x0000,
+ 0x3FF4, 0x0024, 0x3F94, 0x0FFC, 0x0074, 0x3FD8, 0x000C, 0x0000,
+ 0x3FF0, 0x0038, 0x3F60, 0x0FEC, 0x00B4, 0x3FC4, 0x0014, 0x0000,
+ 0x3FEC, 0x004C, 0x3F2C, 0x0FE4, 0x00F4, 0x3FAC, 0x0018, 0x0000,
+ 0x3FE4, 0x005C, 0x3F00, 0x0FD4, 0x0138, 0x3F94, 0x0020, 0x0000,
+ 0x3FE0, 0x006C, 0x3ED0, 0x0FC4, 0x017C, 0x3F7C, 0x0028, 0x0000,
+ 0x3FDC, 0x007C, 0x3EA8, 0x0FA4, 0x01C4, 0x3F68, 0x0030, 0x0000,
+ 0x3FD8, 0x0088, 0x3E80, 0x0F90, 0x020C, 0x3F50, 0x0038, 0x3FFC,
+ 0x3FD4, 0x0098, 0x3E58, 0x0F70, 0x0258, 0x3F38, 0x0040, 0x3FFC,
+ 0x3FD0, 0x00A4, 0x3E34, 0x0F54, 0x02A0, 0x3F1C, 0x004C, 0x3FFC,
+ 0x3FD0, 0x00B0, 0x3E14, 0x0F28, 0x02F0, 0x3F04, 0x0054, 0x3FFC,
+ 0x3FCC, 0x00BC, 0x3DF4, 0x0F08, 0x033C, 0x3EEC, 0x005C, 0x3FF8,
+ 0x3FC8, 0x00C8, 0x3DD8, 0x0EDC, 0x038C, 0x3ED4, 0x0064, 0x3FF8,
+ 0x3FC8, 0x00D0, 0x3DC0, 0x0EAC, 0x03E0, 0x3EBC, 0x006C, 0x3FF4,
+ 0x3FC4, 0x00D8, 0x3DA8, 0x0E7C, 0x0430, 0x3EA4, 0x0078, 0x3FF4,
+ 0x3FC4, 0x00E0, 0x3D94, 0x0E48, 0x0484, 0x3E8C, 0x0080, 0x3FF0,
+ 0x3FC4, 0x00E8, 0x3D80, 0x0E10, 0x04D8, 0x3E74, 0x0088, 0x3FF0,
+ 0x3FC4, 0x00F0, 0x3D70, 0x0DD8, 0x052C, 0x3E5C, 0x0090, 0x3FEC,
+ 0x3FC0, 0x00F4, 0x3D60, 0x0DA0, 0x0584, 0x3E44, 0x0098, 0x3FEC,
+ 0x3FC0, 0x00F8, 0x3D54, 0x0D68, 0x05D8, 0x3E2C, 0x00A0, 0x3FE8,
+ 0x3FC0, 0x00FC, 0x3D48, 0x0D20, 0x0630, 0x3E18, 0x00AC, 0x3FE8,
+ 0x3FC0, 0x0100, 0x3D40, 0x0CE0, 0x0688, 0x3E00, 0x00B4, 0x3FE4,
+ 0x3FC4, 0x0100, 0x3D3C, 0x0C98, 0x06DC, 0x3DEC, 0x00BC, 0x3FE4,
+ 0x3FC4, 0x0100, 0x3D38, 0x0C58, 0x0734, 0x3DD8, 0x00C0, 0x3FE0,
+ 0x3FC4, 0x0104, 0x3D38, 0x0C0C, 0x078C, 0x3DC4, 0x00C8, 0x3FDC,
+ 0x3FC4, 0x0100, 0x3D38, 0x0BC4, 0x07E4, 0x3DB0, 0x00D0, 0x3FDC,
+ 0x3FC4, 0x0100, 0x3D38, 0x0B78, 0x083C, 0x3DA0, 0x00D8, 0x3FD8,
+ 0x3FC8, 0x0100, 0x3D3C, 0x0B28, 0x0890, 0x3D90, 0x00DC, 0x3FD8,
+ 0x3FC8, 0x00FC, 0x3D40, 0x0ADC, 0x08E8, 0x3D80, 0x00E4, 0x3FD4,
+ 0x3FCC, 0x00FC, 0x3D48, 0x0A84, 0x093C, 0x3D74, 0x00E8, 0x3FD4,
+ 0x3FCC, 0x00F8, 0x3D50, 0x0A38, 0x0990, 0x3D64, 0x00F0, 0x3FD0,
+ 0x3FD0, 0x00F4, 0x3D58, 0x09E0, 0x09E4, 0x3D5C, 0x00F4, 0x3FD0
+};
-static const uint16_t filter_8tap_64p_117[264] = {
- 116, 16100, 428, 3564, 428, 16100, 116, 0,
- 112, 16116, 376, 3564, 484, 16084, 120, 16380,
- 104, 16136, 324, 3560, 540, 16064, 124, 16380,
- 100, 16152, 272, 3556, 600, 16048, 128, 16380,
- 96, 16168, 220, 3548, 656, 16032, 136, 16376,
- 88, 16188, 172, 3540, 716, 16016, 140, 16376,
- 84, 16204, 124, 3528, 780, 16000, 144, 16376,
- 80, 16220, 76, 3512, 840, 15984, 148, 16372,
- 76, 16236, 32, 3496, 904, 15968, 152, 16372,
- 68, 16252, 16376, 3480, 968, 15952, 156, 16372,
- 64, 16268, 16332, 3456, 1032, 15936, 160, 16372,
- 60, 16284, 16292, 3432, 1096, 15920, 164, 16368,
- 56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
- 48, 16316, 16216, 3380, 1228, 15892, 168, 16368,
- 44, 16332, 16180, 3348, 1296, 15880, 168, 16368,
- 40, 16348, 16148, 3316, 1364, 15868, 172, 16364,
- 36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
- 32, 16376, 16084, 3248, 1496, 15848, 176, 16364,
- 28, 4, 16052, 3208, 1564, 15836, 176, 16364,
- 24, 16, 16028, 3168, 1632, 15828, 176, 16364,
- 20, 28, 16000, 3124, 1700, 15820, 176, 16364,
- 16, 40, 15976, 3080, 1768, 15812, 176, 16364,
- 12, 52, 15952, 3036, 1836, 15808, 176, 16364,
- 8, 64, 15932, 2988, 1904, 15800, 176, 16364,
- 4, 76, 15912, 2940, 1972, 15800, 172, 16364,
- 4, 84, 15892, 2888, 2040, 15796, 172, 16364,
- 0, 96, 15876, 2836, 2104, 15792, 168, 16364,
- 16380, 104, 15864, 2780, 2172, 15792, 164, 16364,
- 16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
- 16376, 120, 15836, 2668, 2300, 15796, 156, 16368,
- 16376, 128, 15828, 2608, 2364, 15800, 152, 16368,
- 16372, 136, 15816, 2548, 2428, 15804, 148, 16368,
- 16372, 140, 15812, 2488, 2488, 15812, 140, 16372 };
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_116[264] = {
+ 0x0080, 0x3E90, 0x0268, 0x0D14, 0x0264, 0x3E90, 0x0080, 0x0000,
+ 0x007C, 0x3E9C, 0x0238, 0x0D14, 0x0298, 0x3E84, 0x0080, 0x0000,
+ 0x0078, 0x3EAC, 0x0200, 0x0D10, 0x02D0, 0x3E78, 0x0084, 0x0000,
+ 0x0078, 0x3EB8, 0x01D0, 0x0D0C, 0x0304, 0x3E6C, 0x0084, 0x0000,
+ 0x0074, 0x3EC8, 0x01A0, 0x0D00, 0x033C, 0x3E60, 0x0088, 0x0000,
+ 0x0070, 0x3ED4, 0x0170, 0x0D00, 0x0374, 0x3E54, 0x0088, 0x3FFC,
+ 0x006C, 0x3EE4, 0x0140, 0x0CF8, 0x03AC, 0x3E48, 0x0088, 0x3FFC,
+ 0x006C, 0x3EF0, 0x0114, 0x0CE8, 0x03E4, 0x3E3C, 0x008C, 0x3FFC,
+ 0x0068, 0x3F00, 0x00E8, 0x0CD8, 0x041C, 0x3E34, 0x008C, 0x3FFC,
+ 0x0064, 0x3F10, 0x00BC, 0x0CCC, 0x0454, 0x3E28, 0x008C, 0x3FFC,
+ 0x0060, 0x3F1C, 0x0090, 0x0CBC, 0x0490, 0x3E20, 0x008C, 0x3FFC,
+ 0x005C, 0x3F2C, 0x0068, 0x0CA4, 0x04CC, 0x3E18, 0x008C, 0x3FFC,
+ 0x0058, 0x3F38, 0x0040, 0x0C94, 0x0504, 0x3E10, 0x008C, 0x3FFC,
+ 0x0054, 0x3F48, 0x001C, 0x0C7C, 0x0540, 0x3E08, 0x0088, 0x3FFC,
+ 0x0050, 0x3F54, 0x3FF8, 0x0C60, 0x057C, 0x3E04, 0x0088, 0x3FFC,
+ 0x004C, 0x3F64, 0x3FD4, 0x0C44, 0x05B8, 0x3DFC, 0x0088, 0x3FFC,
+ 0x0048, 0x3F70, 0x3FB4, 0x0C28, 0x05F4, 0x3DF8, 0x0084, 0x3FFC,
+ 0x0044, 0x3F80, 0x3F90, 0x0C0C, 0x0630, 0x3DF4, 0x0080, 0x3FFC,
+ 0x0040, 0x3F8C, 0x3F70, 0x0BE8, 0x066C, 0x3DF4, 0x0080, 0x3FFC,
+ 0x003C, 0x3F9C, 0x3F50, 0x0BC8, 0x06A8, 0x3DF0, 0x007C, 0x3FFC,
+ 0x0038, 0x3FA8, 0x3F34, 0x0BA0, 0x06E4, 0x3DF0, 0x0078, 0x0000,
+ 0x0034, 0x3FB4, 0x3F18, 0x0B80, 0x071C, 0x3DF0, 0x0074, 0x0000,
+ 0x0030, 0x3FC0, 0x3EFC, 0x0B5C, 0x0758, 0x3DF0, 0x0070, 0x0000,
+ 0x002C, 0x3FCC, 0x3EE4, 0x0B34, 0x0794, 0x3DF4, 0x0068, 0x0000,
+ 0x002C, 0x3FDC, 0x3ECC, 0x0B08, 0x07CC, 0x3DF4, 0x0064, 0x0000,
+ 0x0028, 0x3FE4, 0x3EB4, 0x0AE0, 0x0808, 0x3DF8, 0x0060, 0x0000,
+ 0x0024, 0x3FF0, 0x3EA0, 0x0AB0, 0x0840, 0x3E00, 0x0058, 0x0004,
+ 0x0020, 0x3FFC, 0x3E90, 0x0A84, 0x0878, 0x3E04, 0x0050, 0x0004,
+ 0x001C, 0x0004, 0x3E7C, 0x0A54, 0x08B0, 0x3E0C, 0x004C, 0x0008,
+ 0x0018, 0x000C, 0x3E68, 0x0A28, 0x08E8, 0x3E18, 0x0044, 0x0008,
+ 0x0018, 0x0018, 0x3E54, 0x09F4, 0x0920, 0x3E20, 0x003C, 0x000C,
+ 0x0014, 0x0020, 0x3E48, 0x09C0, 0x0954, 0x3E2C, 0x0034, 0x0010,
+ 0x0010, 0x002C, 0x3E3C, 0x098C, 0x0988, 0x3E38, 0x002C, 0x0010
+};
-static const uint16_t filter_8tap_64p_150[264] = {
- 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
- 0, 16020, 992, 2756, 1068, 16024, 16376, 0,
- 4, 16020, 952, 2752, 1108, 16024, 16372, 0,
- 8, 16020, 916, 2748, 1148, 16028, 16368, 0,
- 12, 16020, 876, 2744, 1184, 16032, 16364, 4,
- 16, 16020, 840, 2740, 1224, 16036, 16356, 4,
- 20, 16024, 800, 2732, 1264, 16040, 16352, 4,
- 20, 16024, 764, 2724, 1304, 16044, 16348, 8,
- 24, 16028, 728, 2716, 1344, 16052, 16340, 8,
- 28, 16028, 692, 2704, 1380, 16056, 16336, 12,
- 28, 16032, 656, 2696, 1420, 16064, 16328, 12,
- 32, 16036, 620, 2684, 1460, 16072, 16324, 12,
- 36, 16040, 584, 2668, 1500, 16080, 16316, 16,
- 36, 16044, 548, 2656, 1536, 16088, 16308, 16,
- 36, 16048, 516, 2640, 1576, 16096, 16304, 20,
- 40, 16052, 480, 2624, 1612, 16108, 16296, 20,
- 40, 16060, 448, 2608, 1652, 16120, 16288, 20,
- 44, 16064, 416, 2588, 1692, 16132, 16280, 24,
- 44, 16068, 384, 2568, 1728, 16144, 16276, 24,
- 44, 16076, 352, 2548, 1764, 16156, 16268, 28,
- 44, 16080, 320, 2528, 1804, 16168, 16260, 28,
- 44, 16088, 292, 2508, 1840, 16184, 16252, 28,
- 44, 16096, 264, 2484, 1876, 16200, 16244, 32,
- 48, 16100, 232, 2460, 1912, 16216, 16236, 32,
- 48, 16108, 204, 2436, 1948, 16232, 16228, 32,
- 48, 16116, 176, 2412, 1980, 16248, 16220, 36,
- 48, 16124, 152, 2384, 2016, 16264, 16216, 36,
- 44, 16128, 124, 2356, 2052, 16284, 16208, 36,
- 44, 16136, 100, 2328, 2084, 16304, 16200, 40,
- 44, 16144, 72, 2300, 2116, 16324, 16192, 40,
- 44, 16152, 48, 2272, 2148, 16344, 16184, 40,
- 44, 16160, 24, 2244, 2180, 16364, 16176, 40,
- 44, 16168, 4, 2212, 2212, 4, 16168, 44 };
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_149[264] = {
+ 0x0008, 0x3E8C, 0x03F8, 0x0AE8, 0x03F8, 0x3E8C, 0x0008, 0x0000,
+ 0x000C, 0x3E8C, 0x03D0, 0x0AE8, 0x0420, 0x3E90, 0x0000, 0x0000,
+ 0x000C, 0x3E8C, 0x03AC, 0x0AE8, 0x0444, 0x3E90, 0x0000, 0x0000,
+ 0x0010, 0x3E90, 0x0384, 0x0AE0, 0x046C, 0x3E94, 0x3FFC, 0x0000,
+ 0x0014, 0x3E90, 0x035C, 0x0ADC, 0x0494, 0x3E94, 0x3FF8, 0x0004,
+ 0x0018, 0x3E90, 0x0334, 0x0AD8, 0x04BC, 0x3E98, 0x3FF4, 0x0004,
+ 0x001C, 0x3E94, 0x0310, 0x0AD0, 0x04E4, 0x3E9C, 0x3FEC, 0x0004,
+ 0x0020, 0x3E98, 0x02E8, 0x0AC4, 0x050C, 0x3EA0, 0x3FE8, 0x0008,
+ 0x0020, 0x3E98, 0x02C4, 0x0AC0, 0x0534, 0x3EA4, 0x3FE4, 0x0008,
+ 0x0024, 0x3E9C, 0x02A0, 0x0AB4, 0x055C, 0x3EAC, 0x3FDC, 0x0008,
+ 0x0024, 0x3EA0, 0x027C, 0x0AA8, 0x0584, 0x3EB0, 0x3FD8, 0x000C,
+ 0x0028, 0x3EA4, 0x0258, 0x0A9C, 0x05AC, 0x3EB8, 0x3FD0, 0x000C,
+ 0x0028, 0x3EA8, 0x0234, 0x0A90, 0x05D4, 0x3EC0, 0x3FC8, 0x0010,
+ 0x002C, 0x3EAC, 0x0210, 0x0A80, 0x05FC, 0x3EC8, 0x3FC4, 0x0010,
+ 0x002C, 0x3EB4, 0x01F0, 0x0A70, 0x0624, 0x3ED0, 0x3FBC, 0x0010,
+ 0x002C, 0x3EB8, 0x01CC, 0x0A60, 0x064C, 0x3EDC, 0x3FB4, 0x0014,
+ 0x0030, 0x3EBC, 0x01A8, 0x0A50, 0x0674, 0x3EE4, 0x3FB0, 0x0014,
+ 0x0030, 0x3EC4, 0x0188, 0x0A38, 0x069C, 0x3EF0, 0x3FA8, 0x0018,
+ 0x0030, 0x3ECC, 0x0168, 0x0A28, 0x06C0, 0x3EFC, 0x3FA0, 0x0018,
+ 0x0030, 0x3ED0, 0x0148, 0x0A14, 0x06E8, 0x3F08, 0x3F98, 0x001C,
+ 0x0030, 0x3ED8, 0x012C, 0x0A00, 0x070C, 0x3F14, 0x3F90, 0x001C,
+ 0x0034, 0x3EE0, 0x0108, 0x09E4, 0x0734, 0x3F24, 0x3F8C, 0x001C,
+ 0x0034, 0x3EE4, 0x00EC, 0x09CC, 0x0758, 0x3F34, 0x3F84, 0x0020,
+ 0x0034, 0x3EEC, 0x00D0, 0x09B8, 0x077C, 0x3F40, 0x3F7C, 0x0020,
+ 0x0034, 0x3EF4, 0x00B4, 0x0998, 0x07A4, 0x3F50, 0x3F74, 0x0024,
+ 0x0030, 0x3EFC, 0x0098, 0x0980, 0x07C8, 0x3F64, 0x3F6C, 0x0024,
+ 0x0030, 0x3F04, 0x0080, 0x0968, 0x07E8, 0x3F74, 0x3F64, 0x0024,
+ 0x0030, 0x3F0C, 0x0060, 0x094C, 0x080C, 0x3F88, 0x3F5C, 0x0028,
+ 0x0030, 0x3F14, 0x0048, 0x0930, 0x0830, 0x3F98, 0x3F54, 0x0028,
+ 0x0030, 0x3F1C, 0x0030, 0x0914, 0x0850, 0x3FAC, 0x3F4C, 0x0028,
+ 0x0030, 0x3F24, 0x0018, 0x08F0, 0x0874, 0x3FC0, 0x3F44, 0x002C,
+ 0x002C, 0x3F2C, 0x0000, 0x08D4, 0x0894, 0x3FD8, 0x3F3C, 0x002C,
+ 0x002C, 0x3F34, 0x3FEC, 0x08B4, 0x08B4, 0x3FEC, 0x3F34, 0x002C
+};
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_8tap_64p_183[264] = {
- 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
- 16268, 16256, 1136, 2240, 1188, 16272, 16260, 0,
- 16272, 16248, 1108, 2240, 1216, 16280, 16256, 0,
- 16276, 16240, 1080, 2236, 1240, 16292, 16252, 0,
- 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
- 16284, 16224, 1028, 2232, 1292, 16312, 16244, 0,
- 16288, 16216, 1000, 2228, 1320, 16324, 16240, 0,
- 16292, 16212, 976, 2224, 1344, 16336, 16236, 0,
- 16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
- 16300, 16200, 920, 2212, 1396, 16360, 16228, 4,
- 16304, 16196, 896, 2204, 1424, 16372, 16224, 4,
- 16308, 16188, 868, 2200, 1448, 0, 16220, 4,
- 16312, 16184, 844, 2192, 1472, 12, 16216, 4,
- 16316, 16180, 816, 2184, 1500, 28, 16212, 4,
- 16320, 16176, 792, 2172, 1524, 40, 16208, 4,
- 16324, 16172, 764, 2164, 1548, 56, 16204, 0,
- 16328, 16172, 740, 2156, 1572, 72, 16200, 0,
- 16328, 16168, 712, 2144, 1596, 88, 16196, 0,
- 16332, 16164, 688, 2132, 1620, 100, 16192, 0,
- 16336, 16164, 664, 2120, 1644, 120, 16192, 0,
- 16340, 16160, 640, 2108, 1668, 136, 16188, 0,
- 16344, 16160, 616, 2096, 1688, 152, 16184, 0,
- 16344, 16160, 592, 2080, 1712, 168, 16180, 0,
- 16348, 16156, 568, 2068, 1736, 188, 16176, 16380,
- 16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
- 16352, 16156, 520, 2036, 1780, 224, 16172, 16380,
- 16356, 16156, 496, 2024, 1800, 244, 16172, 16380,
- 16360, 16156, 472, 2008, 1820, 260, 16168, 16376,
- 16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
- 16364, 16156, 428, 1972, 1860, 300, 16164, 16376,
- 16364, 16156, 408, 1956, 1880, 320, 16164, 16372,
- 16368, 16160, 384, 1936, 1900, 344, 16160, 16372,
- 16368, 16160, 364, 1920, 1920, 364, 16160, 16368 };
+ 0x3F88, 0x0048, 0x047C, 0x0768, 0x047C, 0x0048, 0x3F88, 0x0000,
+ 0x3F88, 0x003C, 0x0468, 0x076C, 0x0490, 0x0054, 0x3F84, 0x0000,
+ 0x3F8C, 0x0034, 0x0454, 0x0768, 0x04A4, 0x005C, 0x3F84, 0x0000,
+ 0x3F8C, 0x0028, 0x0444, 0x076C, 0x04B4, 0x0068, 0x3F80, 0x0000,
+ 0x3F90, 0x0020, 0x042C, 0x0768, 0x04C8, 0x0074, 0x3F80, 0x0000,
+ 0x3F90, 0x0018, 0x041C, 0x0764, 0x04DC, 0x0080, 0x3F7C, 0x0000,
+ 0x3F94, 0x0010, 0x0408, 0x075C, 0x04F0, 0x008C, 0x3F7C, 0x0000,
+ 0x3F94, 0x0004, 0x03F8, 0x0760, 0x0500, 0x0098, 0x3F7C, 0x3FFC,
+ 0x3F98, 0x0000, 0x03E0, 0x075C, 0x0514, 0x00A4, 0x3F78, 0x3FFC,
+ 0x3F9C, 0x3FF8, 0x03CC, 0x0754, 0x0528, 0x00B0, 0x3F78, 0x3FFC,
+ 0x3F9C, 0x3FF0, 0x03B8, 0x0754, 0x0538, 0x00BC, 0x3F78, 0x3FFC,
+ 0x3FA0, 0x3FE8, 0x03A4, 0x0750, 0x054C, 0x00CC, 0x3F74, 0x3FF8,
+ 0x3FA4, 0x3FE0, 0x0390, 0x074C, 0x055C, 0x00D8, 0x3F74, 0x3FF8,
+ 0x3FA4, 0x3FDC, 0x037C, 0x0744, 0x0570, 0x00E4, 0x3F74, 0x3FF8,
+ 0x3FA8, 0x3FD4, 0x0368, 0x0740, 0x0580, 0x00F4, 0x3F74, 0x3FF4,
+ 0x3FA8, 0x3FCC, 0x0354, 0x073C, 0x0590, 0x0104, 0x3F74, 0x3FF4,
+ 0x3FAC, 0x3FC8, 0x0340, 0x0730, 0x05A4, 0x0110, 0x3F74, 0x3FF4,
+ 0x3FB0, 0x3FC0, 0x0330, 0x0728, 0x05B4, 0x0120, 0x3F74, 0x3FF0,
+ 0x3FB0, 0x3FBC, 0x031C, 0x0724, 0x05C4, 0x0130, 0x3F70, 0x3FF0,
+ 0x3FB4, 0x3FB4, 0x0308, 0x0720, 0x05D4, 0x013C, 0x3F70, 0x3FF0,
+ 0x3FB8, 0x3FB0, 0x02F4, 0x0714, 0x05E4, 0x014C, 0x3F74, 0x3FEC,
+ 0x3FB8, 0x3FAC, 0x02E0, 0x0708, 0x05F8, 0x015C, 0x3F74, 0x3FEC,
+ 0x3FBC, 0x3FA8, 0x02CC, 0x0704, 0x0604, 0x016C, 0x3F74, 0x3FE8,
+ 0x3FC0, 0x3FA0, 0x02BC, 0x06F8, 0x0614, 0x017C, 0x3F74, 0x3FE8,
+ 0x3FC0, 0x3F9C, 0x02A8, 0x06F4, 0x0624, 0x018C, 0x3F74, 0x3FE4,
+ 0x3FC4, 0x3F98, 0x0294, 0x06E8, 0x0634, 0x019C, 0x3F74, 0x3FE4,
+ 0x3FC8, 0x3F94, 0x0284, 0x06D8, 0x0644, 0x01AC, 0x3F78, 0x3FE0,
+ 0x3FC8, 0x3F90, 0x0270, 0x06D4, 0x0650, 0x01BC, 0x3F78, 0x3FE0,
+ 0x3FCC, 0x3F8C, 0x025C, 0x06C8, 0x0660, 0x01D0, 0x3F78, 0x3FDC,
+ 0x3FCC, 0x3F8C, 0x024C, 0x06B8, 0x066C, 0x01E0, 0x3F7C, 0x3FDC,
+ 0x3FD0, 0x3F88, 0x0238, 0x06B0, 0x067C, 0x01F0, 0x3F7C, 0x3FD8,
+ 0x3FD4, 0x3F84, 0x0228, 0x069C, 0x0688, 0x0204, 0x3F80, 0x3FD8,
+ 0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
+};
const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_3tap_16p_117;
+ return filter_3tap_16p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_3tap_16p_150;
+ return filter_3tap_16p_149;
else
return filter_3tap_16p_183;
}
@@ -1029,9 +1355,9 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_3tap_64p_117;
+ return filter_3tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_3tap_64p_150;
+ return filter_3tap_64p_149;
else
return filter_3tap_64p_183;
}
@@ -1041,9 +1367,9 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_4tap_16p_117;
+ return filter_4tap_16p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_4tap_16p_150;
+ return filter_4tap_16p_149;
else
return filter_4tap_16p_183;
}
@@ -1053,9 +1379,9 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_4tap_64p_117;
+ return filter_4tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_4tap_64p_150;
+ return filter_4tap_64p_149;
else
return filter_4tap_64p_183;
}
@@ -1065,9 +1391,9 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_5tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_5tap_64p_117;
+ return filter_5tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_5tap_64p_150;
+ return filter_5tap_64p_149;
else
return filter_5tap_64p_183;
}
@@ -1077,9 +1403,9 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_6tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_6tap_64p_117;
+ return filter_6tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_6tap_64p_150;
+ return filter_6tap_64p_149;
else
return filter_6tap_64p_183;
}
@@ -1089,9 +1415,9 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_7tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_7tap_64p_117;
+ return filter_7tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_7tap_64p_150;
+ return filter_7tap_64p_149;
else
return filter_7tap_64p_183;
}
@@ -1101,9 +1427,9 @@ const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_8tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_8tap_64p_117;
+ return filter_8tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_8tap_64p_150;
+ return filter_8tap_64p_149;
else
return filter_8tap_64p_183;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
new file mode 100644
index 000000000000..bb0e1b80ec3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 225955ec6d39..bc109d4fc6e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -27,25 +27,55 @@
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../../dmub/inc/dmub_srv.h"
-#include "dmub_fw_state.h"
+#include "../../dmub/inc/dmub_gpint_cmd.h"
#include "core_types.h"
-#include "ipp.h"
#define MAX_PIPES 6
/**
* Get PSR state from firmware.
*/
-static void dmub_get_psr_state(uint32_t *psr_state)
+static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
{
- // Not yet implemented
- // Trigger GPINT interrupt from firmware
+ struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+
+ // Send gpint command and wait for ack
+ dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+
+ dmub_srv_get_gpint_response(srv, psr_state);
+}
+
+/**
+ * Set PSR version.
+ */
+static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ cmd.psr_set_version.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
+
+ if (stream->psr_version == 0x0) // Unsupported
+ return false;
+ else if (stream->psr_version == 0x1)
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
+ else if (stream->psr_version == 0x2)
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
+
+ cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
}
/**
* Enable/Disable PSR.
*/
-static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -67,13 +97,13 @@ static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
/**
* Set PSR level.
*/
-static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
{
union dmub_rb_cmd cmd;
uint32_t psr_state = 0;
struct dc_context *dc = dmub->ctx;
- dmub_get_psr_state(&psr_state);
+ dmub_psr_get_state(dmub, &psr_state);
if (psr_state == 0)
return;
@@ -91,7 +121,7 @@ static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
/**
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
-static bool dmub_setup_psr(struct dmub_psr *dmub,
+static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -101,21 +131,22 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
= &cmd.psr_copy_settings.psr_copy_settings_data;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
- for (int i = 0; i < MAX_PIPES; i++) {
- if (res_ctx &&
- res_ctx->pipe_ctx[i].stream &&
- res_ctx->pipe_ctx[i].stream->link &&
- res_ctx->pipe_ctx[i].stream->link == link &&
- res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
pipe_ctx = &res_ctx->pipe_ctx[i];
break;
}
}
- if (!pipe_ctx ||
- !&pipe_ctx->plane_res ||
- !&pipe_ctx->stream_res)
+ if (!pipe_ctx)
+ return false;
+
+ // First, set the psr version
+ if (!dmub_psr_set_version(dmub, pipe_ctx->stream))
return false;
// Program DP DPHY fast training registers
@@ -138,10 +169,6 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
- if (pipe_ctx->plane_res.hubp)
- copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst;
- else
- copy_settings_data->hubp_inst = 0;
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -157,18 +184,9 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
// Misc
copy_settings_data->psr_level = psr_context->psr_level.u32all;
- copy_settings_data->hyst_frames = psr_context->timehyst_frames;
- copy_settings_data->hyst_lines = psr_context->hyst_lines;
- copy_settings_data->phy_type = psr_context->phyType;
- copy_settings_data->aux_repeat = psr_context->aux_repeats;
- copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
- copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock;
+ copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
- copy_settings_data->smu_phy_id = psr_context->smuPhyId;
- copy_settings_data->num_of_controllers = psr_context->numberOfControllers;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
- copy_settings_data->phy_num = psr_context->frame_delay & 0x7;
- copy_settings_data->link_rate = psr_context->frame_delay & 0xF;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -178,10 +196,10 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
}
static const struct dmub_psr_funcs psr_funcs = {
- .set_psr_enable = dmub_set_psr_enable,
- .setup_psr = dmub_setup_psr,
- .get_psr_state = dmub_get_psr_state,
- .set_psr_level = dmub_set_psr_level,
+ .psr_copy_settings = dmub_psr_copy_settings,
+ .psr_enable = dmub_psr_enable,
+ .psr_get_state = dmub_psr_get_state,
+ .psr_set_level = dmub_psr_set_level,
};
/**
@@ -215,6 +233,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
*/
void dmub_psr_destroy(struct dmub_psr **dmub)
{
- kfree(dmub);
+ kfree(*dmub);
*dmub = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 229958de3035..f404fecd6410 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -27,6 +27,7 @@
#define _DMUB_PSR_H_
#include "os_types.h"
+#include "dc_link.h"
struct dmub_psr {
struct dc_context *ctx;
@@ -34,14 +35,14 @@ struct dmub_psr {
};
struct dmub_psr_funcs {
- void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
- bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
- void (*get_psr_state)(uint32_t *psr_state);
- void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
+ bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+ void (*psr_enable)(struct dmub_psr *dmub, bool enable);
+ void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
+ void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
void dmub_psr_destroy(struct dmub_psr **dmub);
-#endif /* _DCE_DMUB_H_ */
+#endif /* _DMUB_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5b689273ff44..0976e378659f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -71,6 +71,8 @@
#define PANEL_POWER_UP_TIMEOUT 300
#define PANEL_POWER_DOWN_TIMEOUT 500
#define HPD_CHECK_INTERVAL 10
+#define OLED_POST_T7_DELAY 100
+#define OLED_PRE_T11_DELAY 150
#define CTX \
hws->ctx
@@ -696,8 +698,10 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
}
/*todo: cloned in stream enc, fix*/
-static bool is_panel_backlight_on(struct dce_hwseq *hws)
+bool dce110_is_panel_backlight_on(struct dc_link *link)
{
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t value;
REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
@@ -705,11 +709,12 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
return value;
}
-static bool is_panel_powered_on(struct dce_hwseq *hws)
+bool dce110_is_panel_powered_on(struct dc_link *link)
{
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
@@ -816,7 +821,7 @@ void dce110_edp_power_control(
return;
}
- if (power_up != is_panel_powered_on(hwseq)) {
+ if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
/* Send VBIOS command to prompt eDP panel power */
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -896,7 +901,7 @@ void dce110_edp_backlight_control(
return;
}
- if (enable && is_panel_backlight_on(hws)) {
+ if (enable && hws->funcs.is_panel_backlight_on(link)) {
DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n",
__func__);
@@ -936,9 +941,21 @@ void dce110_edp_backlight_control(
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
edp_receiver_ready_T7(link);
link_transmitter_control(ctx->dc_bios, &cntl);
+
+ if (enable && link->dpcd_sink_ext_caps.bits.oled)
+ msleep(OLED_POST_T7_DELAY);
+
+ if (link->dpcd_sink_ext_caps.bits.oled ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
+ dc_link_backlight_enable_aux(link, enable);
+
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
edp_receiver_ready_T9(link);
+
+ if (!enable && link->dpcd_sink_ext_caps.bits.oled)
+ msleep(OLED_PRE_T11_DELAY);
}
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -2576,17 +2593,6 @@ static void dce110_apply_ctx_for_surface(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (stream == pipe_ctx->stream) {
- if (!pipe_ctx->top_pipe &&
- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream != stream)
continue;
@@ -2607,20 +2613,16 @@ static void dce110_apply_ctx_for_surface(
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if ((stream == pipe_ctx->stream) &&
- (!pipe_ctx->top_pipe) &&
- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
- }
-
if (dc->fbc_compressor)
enable_fbc(dc, context);
}
+static void dce110_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+}
+
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
@@ -2722,6 +2724,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.init_hw = init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+ .post_unlock_program_front_end = dce110_post_unlock_program_front_end,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
@@ -2736,6 +2739,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
+ .interdependent_update_lock = NULL,
.prepare_bandwidth = dce110_prepare_bandwidth,
.optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr,
@@ -2763,6 +2767,8 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 26a9c14a58b1..34be166e8ff0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -85,5 +85,9 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up);
+bool dce110_is_panel_backlight_on(struct dc_link *link);
+
+bool dce110_is_panel_powered_on(struct dc_link *link);
+
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index bbd6e01b3eca..47a39eb9400b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -316,6 +316,7 @@ bool cm_helper_translate_curve_to_hw_format(
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
+ struct pwl_result_data *rgb_minus_1;
int32_t region_start, region_end;
int32_t i;
@@ -465,9 +466,20 @@ bool cm_helper_translate_curve_to_hw_format(
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
+ rgb_minus_1 = rgb;
i = 1;
while (i != hw_points + 1) {
+
+ if (i >= hw_points - 1) {
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
+ }
+
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
@@ -482,6 +494,7 @@ bool cm_helper_translate_curve_to_hw_format(
}
++rgb_plus_1;
+ rgb_minus_1 = rgb;
++rgb;
++i;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 446ba0a7a4b3..deccab0228d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -128,8 +128,8 @@ bool hubbub1_verify_allow_pstate_change_high(
* pstate takes around ~100us on linux. Unknown currently as to
* why it takes that long on linux
*/
- static unsigned int pstate_wait_timeout_us = 200;
- static unsigned int pstate_wait_expected_timeout_us = 40;
+ const unsigned int pstate_wait_timeout_us = 200;
+ const unsigned int pstate_wait_expected_timeout_us = 40;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
@@ -147,8 +147,9 @@ bool hubbub1_verify_allow_pstate_change_high(
forced_pstate_allow = false;
}
- /* RV2:
- * dchubbubdebugind, at: 0xB
+ /* The following table only applies to DCN1 and DCN2,
+ * for newer DCNs, need to consult with HW IP folks to read RTL
+ * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
* description
* 0: Pipe0 Plane0 Allow Pstate Change
* 1: Pipe0 Plane1 Allow Pstate Change
@@ -181,64 +182,6 @@ bool hubbub1_verify_allow_pstate_change_high(
* 28: WB0 Allow Pstate Change
* 29: WB1 Allow Pstate Change
* 30: Arbiter's allow_pstate_change
- * 31: SOC pstate change request"
- */
- /*DCN2.x:
- HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
- 0: Pipe0 Plane0 Allow P-state Change
- 1: Pipe0 Plane1 Allow P-state Change
- 2: Pipe0 Cursor0 Allow P-state Change
- 3: Pipe0 Cursor1 Allow P-state Change
- 4: Pipe1 Plane0 Allow P-state Change
- 5: Pipe1 Plane1 Allow P-state Change
- 6: Pipe1 Cursor0 Allow P-state Change
- 7: Pipe1 Cursor1 Allow P-state Change
- 8: Pipe2 Plane0 Allow P-state Change
- 9: Pipe2 Plane1 Allow P-state Change
- 10: Pipe2 Cursor0 Allow P-state Change
- 11: Pipe2 Cursor1 Allow P-state Change
- 12: Pipe3 Plane0 Allow P-state Change
- 13: Pipe3 Plane1 Allow P-state Change
- 14: Pipe3 Cursor0 Allow P-state Change
- 15: Pipe3 Cursor1 Allow P-state Change
- 16: Pipe4 Plane0 Allow P-state Change
- 17: Pipe4 Plane1 Allow P-state Change
- 18: Pipe4 Cursor0 Allow P-state Change
- 19: Pipe4 Cursor1 Allow P-state Change
- 20: Pipe5 Plane0 Allow P-state Change
- 21: Pipe5 Plane1 Allow P-state Change
- 22: Pipe5 Cursor0 Allow P-state Change
- 23: Pipe5 Cursor1 Allow P-state Change
- 24: Pipe6 Plane0 Allow P-state Change
- 25: Pipe6 Plane1 Allow P-state Change
- 26: Pipe6 Cursor0 Allow P-state Change
- 27: Pipe6 Cursor1 Allow P-state Change
- 28: WB0 Allow P-state Change
- 29: WB1 Allow P-state Change
- 30: Arbiter`s Allow P-state Change
- 31: SOC P-state Change request
- */
- /* RV1:
- * dchubbubdebugind, at: 0x7
- * description "3-0: Pipe0 cursor0 QOS
- * 7-4: Pipe1 cursor0 QOS
- * 11-8: Pipe2 cursor0 QOS
- * 15-12: Pipe3 cursor0 QOS
- * 16: Pipe0 Plane0 Allow Pstate Change
- * 17: Pipe1 Plane0 Allow Pstate Change
- * 18: Pipe2 Plane0 Allow Pstate Change
- * 19: Pipe3 Plane0 Allow Pstate Change
- * 20: Pipe0 Plane1 Allow Pstate Change
- * 21: Pipe1 Plane1 Allow Pstate Change
- * 22: Pipe2 Plane1 Allow Pstate Change
- * 23: Pipe3 Plane1 Allow Pstate Change
- * 24: Pipe0 cursor0 Allow Pstate Change
- * 25: Pipe1 cursor0 Allow Pstate Change
- * 26: Pipe2 cursor0 Allow Pstate Change
- * 27: Pipe3 cursor0 Allow Pstate Change
- * 28: WB0 Allow Pstate Change
- * 29: WB1 Allow Pstate Change
- * 30: Arbiter's allow_pstate_change
* 31: SOC pstate change request
*/
@@ -300,7 +243,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -308,6 +251,7 @@ void hubbub1_program_urgent_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
@@ -321,7 +265,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
@@ -331,7 +276,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -344,7 +290,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
@@ -354,7 +301,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -367,7 +315,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
@@ -377,7 +326,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -390,7 +340,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
@@ -400,10 +351,13 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -411,6 +365,7 @@ void hubbub1_program_stutter_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -425,7 +380,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -439,7 +396,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -454,7 +413,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -468,7 +429,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -483,7 +446,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -497,7 +462,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -512,7 +479,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -526,11 +495,14 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+ return wm_pending;
}
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -538,6 +510,7 @@ void hubbub1_program_pstate_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
@@ -552,7 +525,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -567,7 +542,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -582,7 +559,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -597,23 +576,33 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -627,6 +616,7 @@ void hubbub1_program_watermarks(
DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
#endif
+ return wm_pending;
}
void hubbub1_update_dchub(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index af57751253de..343a537172c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -308,7 +308,7 @@ bool hubbub1_verify_allow_pstate_change_high(
void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -331,17 +331,17 @@ void hubbub1_construct(struct hubbub *hubbub,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask);
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1008ac8a0f2a..9cc3314966bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -48,8 +48,8 @@
#include "dc_link_dp.h"
#include "dccg.h"
#include "clk_mgr.h"
-
-
+#include "link_hwss.h"
+#include "dpcd_defs.h"
#include "dsc.h"
#define DC_LOGGER_INIT(logger)
@@ -82,7 +82,7 @@ void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
-static void dcn10_lock_all_pipes(struct dc *dc,
+void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
{
@@ -93,6 +93,7 @@ static void dcn10_lock_all_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
+
/*
* Only lock the top pipe's tg to prevent redundant
* (un)locking. Also skip if pipe is disabled.
@@ -103,9 +104,9 @@ static void dcn10_lock_all_pipes(struct dc *dc,
continue;
if (lock)
- tg->funcs->lock(tg);
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
else
- tg->funcs->unlock(tg);
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
}
@@ -900,6 +901,10 @@ static void dcn10_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ if (pipe_ctx->stream_res.abm)
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
@@ -1263,7 +1268,8 @@ void dcn10_init_hw(struct dc *dc)
}
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
return;
}
@@ -1317,6 +1323,31 @@ void dcn10_init_hw(struct dc *dc)
if (hws->funcs.dsc_pg_control != NULL)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+
+ /*
+ * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
+ * which needs to read dpcd info with the help of aconnector.
+ * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
+ * cannot be read.
+ */
+ if (dc->links[i]->priv) {
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -1325,6 +1356,9 @@ void dcn10_init_hw(struct dc *dc)
*/
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
hws->funcs.init_pipes(dc, dc->current_state);
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
for (i = 0; i < res_pool->audio_count; i++) {
@@ -1355,8 +1389,8 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
-
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -1576,7 +1610,7 @@ void dcn10_pipe_control_lock(
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
- if (pipe->top_pipe)
+ if (!pipe || pipe->top_pipe)
return;
if (dc->debug.sanity_checks)
@@ -2090,6 +2124,10 @@ void dcn10_get_hdr_visual_confirm_color(
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
+ } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
}
break;
case PIXEL_FORMAT_FP16:
@@ -2512,12 +2550,17 @@ void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
uint32_t underflow_check_delay_us;
- bool removed_pipe[4] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
dcn10_find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
+ // Clear pipe_ctx flag
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe_ctx->update_flags.raw = 0;
+ }
+
if (!top_pipe_to_program)
return;
@@ -2531,11 +2574,6 @@ void dcn10_apply_ctx_for_surface(
if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
- if (interdependent_update)
- dcn10_lock_all_pipes(dc, context, true);
- else
- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
-
if (underflow_check_delay_us != 0xFFFFFFFF)
udelay(underflow_check_delay_us);
@@ -2552,18 +2590,6 @@ void dcn10_apply_ctx_for_surface(
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dc->hwss.disable_plane(dc, old_pipe_ctx);
- }
if ((!pipe_ctx->plane_state ||
pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
@@ -2571,7 +2597,7 @@ void dcn10_apply_ctx_for_surface(
old_pipe_ctx->stream_res.tg == tg) {
hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ pipe_ctx->update_flags.bits.disable = 1;
DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
@@ -2597,21 +2623,35 @@ void dcn10_apply_ctx_for_surface(
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
}
+}
- if (interdependent_update)
- dcn10_lock_all_pipes(dc, context, false);
- else
- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+void dcn10_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
- if (num_planes == 0)
- false_optc_underflow_wa(dc, stream, tg);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream) {
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (context->stream_status[i].plane_count == 0)
+ false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
+ }
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i]) {
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
dc->hwss.optimize_bandwidth(dc, context);
break;
}
@@ -2656,7 +2696,7 @@ void dcn10_prepare_bandwidth(
false);
}
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -2693,6 +2733,7 @@ void dcn10_optimize_bandwidth(
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
+
dcn10_stereo_hw_frame_pack_wa(dc, context);
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
@@ -2884,6 +2925,7 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
bool flip_pending;
+ struct dc *dc = plane_state->ctx->dc;
if (plane_state == NULL)
return;
@@ -2901,6 +2943,19 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
}
+
+ if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
+ struct dce_hwseq *hwseq = dc->hwseq;
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+ unsigned int cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
+ }
+ }
}
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 4d20f6586bb5..16a50e05ffbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -70,11 +70,18 @@ void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_lock_all_pipes(
+ struct dc *dc,
+ struct dc_state *context,
+ bool lock);
void dcn10_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
int num_planes,
struct dc_state *context);
+void dcn10_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
void dcn10_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index e7e5352ec424..dd02d3983695 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -32,6 +32,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.init_hw = dcn10_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -49,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn10_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
.set_drr = dcn10_set_drr,
@@ -85,6 +87,8 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 1a37c90e9d43..d3617d6785a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -782,6 +782,11 @@ bool dcn10_link_encoder_validate_output_with_stream(
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
bool is_valid;
+ //if SCDC (340-600MHz) is disabled, set to HDMI 1.4 timing limit
+ if (stream->sink->edid_caps.panel_patch.skip_scdc_overwrite &&
+ enc10->base.features.max_hdmi_pixel_clock > 300000)
+ enc10->base.features.max_hdmi_pixel_clock = 300000;
+
switch (stream->signal) {
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index eb13589b9a81..762109174fb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -62,11 +62,11 @@
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
- SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
#define LE_DCN10_REG_LIST(id)\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
LE_DCN_COMMON_REG_LIST(id)
struct dcn10_link_enc_aux_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index a9a43b397db9..63acb8ff7462 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc,
uint32_t asic_blank_end;
uint32_t v_init;
uint32_t v_fp2 = 0;
- int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc,
patched_crtc_timing.v_border_top;
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
- vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
- if (vertical_line_start < 0)
- v_fp2 = -vertical_line_start;
+ if (optc1->vstartup_start > asic_blank_end)
+ v_fp2 = optc1->vstartup_start - asic_blank_end;
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
@@ -1195,7 +1193,7 @@ static void optc1_enable_stereo(struct timing_generator *optc,
REG_UPDATE_3(OTG_STEREO_CONTROL,
OTG_STEREO_EN, stereo_en,
OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
- OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+ OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
if (flags->PROGRAM_POLARITY)
REG_UPDATE(OTG_STEREO_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 3b71898e859e..261bdc3a8218 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -570,7 +570,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -598,7 +598,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -1233,7 +1233,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
return DC_OK;
}
-static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -1295,7 +1295,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
- .get_default_swizzle_mode = dcn10_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 376c4264d295..7eba9333c328 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1667,5 +1667,6 @@ void dcn10_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 50bffbfdd394..62cc2651e00c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -70,6 +70,8 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
+
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 13e057d7ee93..42bba7c9548b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes(
}
}
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
-
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps)
-{
- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
- dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
- return false;
-
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
- /* TODO: add lb check */
-
- /* No support for programming ratio of 8, drop to 7.99999.. */
- if (scl_data->ratios.horz.value == (8ll << 32))
- scl_data->ratios.horz.value--;
- if (scl_data->ratios.vert.value == (8ll << 32))
- scl_data->ratios.vert.value--;
- if (scl_data->ratios.horz_c.value == (8ll << 32))
- scl_data->ratios.horz_c.value--;
- if (scl_data->ratios.vert_c.value == (8ll << 32))
- scl_data->ratios.vert_c.value--;
-
- /* Set default taps if none are provided */
- if (in_taps->h_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
- scl_data->taps.h_taps = 8;
- else
- scl_data->taps.h_taps = 4;
- } else
- scl_data->taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
- scl_data->taps.v_taps = 8;
- else
- scl_data->taps.v_taps = 4;
- } else
- scl_data->taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
- scl_data->taps.v_taps_c = 4;
- else
- scl_data->taps.v_taps_c = 2;
- } else
- scl_data->taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
- scl_data->taps.h_taps_c = 4;
- else
- scl_data->taps.h_taps_c = 2;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- scl_data->taps.h_taps_c = in_taps->h_taps_c;
-
- if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
- scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
- scl_data->taps.v_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.horz_c))
- scl_data->taps.h_taps_c = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert_c))
- scl_data->taps.v_taps_c = 1;
- }
-
- return true;
-}
-
void oppn20_dummy_program_regamma_pwl(
struct dpp *dpp,
const struct pwl_params *params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 6bdfee20b6a7..1b1ae9ce2799 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
+ dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
@@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons
reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
- reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf;
}
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 9235f7d29454..c0b21d7450d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -562,19 +562,23 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
}
}
-static void hubbub2_program_watermarks(
+static bool hubbub2_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
/*
* There's a special case when going from p-state support to p-state unsupported
@@ -592,6 +596,7 @@ static void hubbub2_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ return wm_pending;
}
static const struct hubbub_funcs hubbub2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index a444fed94184..233318260da4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -307,7 +307,8 @@ void dcn20_init_blank(
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
if (num_opps == 2) {
bottom_opp->funcs->opp_set_disp_pattern_generator(
@@ -317,7 +318,8 @@ void dcn20_init_blank(
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
}
hws->funcs.wait_for_blank_complete(opp);
@@ -645,6 +647,9 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
+ dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -974,7 +979,8 @@ void dcn20_blank_pixel_data(
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
@@ -985,7 +991,8 @@ void dcn20_blank_pixel_data(
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
}
if (!blank)
@@ -1088,29 +1095,6 @@ void dcn20_enable_plane(
// }
}
-
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock)
-{
- if (lock) {
- pipe->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
- } else {
- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe->stream_res.tg);
- }
-}
-
void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1121,7 +1105,7 @@ void dcn20_pipe_control_lock(
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
- if (pipe->top_pipe)
+ if (!pipe || pipe->top_pipe)
return;
if (pipe->plane_state != NULL)
@@ -1536,48 +1520,32 @@ static void dcn20_program_pipe(
}
}
-static bool does_pipe_need_lock(struct pipe_ctx *pipe)
-{
- if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
- || pipe->update_flags.raw)
- return true;
- if (pipe->bottom_pipe)
- return does_pipe_need_lock(pipe->bottom_pipe);
-
- return false;
-}
-
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
{
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
int i;
struct dce_hwseq *hws = dc->hwseq;
- bool pipe_locked[MAX_PIPES] = {false};
DC_LOGGER_INIT(dc->ctx->logger);
- /* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
- context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
- dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL &&
+ !dc->debug.disable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+ }
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (!context->res_ctx.pipe_ctx[i].top_pipe &&
- does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
- if (!pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
- pipe_locked[i] = true;
- }
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1615,17 +1583,17 @@ void dcn20_program_front_end_for_ctx(
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
}
+}
- /* Unlock all locked pipes */
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (pipe_locked[i]) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ struct dce_hwseq *hwseq = dc->hwseq;
- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
- if (!pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
- }
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -1651,11 +1619,26 @@ void dcn20_program_front_end_for_ctx(
}
/* WA to apply WM setting*/
- if (dc->hwseq->wa.DEGVIDCN21)
+ if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-}
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
@@ -1668,7 +1651,7 @@ void dcn20_prepare_bandwidth(
false);
/* program dchubbub watermarks */
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2052,6 +2035,10 @@ static void dcn20_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ if (pipe_ctx->stream_res.abm)
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 02c9be5ebd47..63ce763f148e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -35,6 +35,9 @@ bool dcn20_set_shaper_3dlut(
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context);
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
@@ -58,10 +61,6 @@ void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 5e640f17d3d4..1e73357eda34 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -33,6 +33,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -50,7 +51,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -96,6 +97,8 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
index 3fccd5eeecbb..7bcee5894d2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
@@ -36,26 +36,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SR(reg_name)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SRI2(reg_name, block, id)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
-
#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 023cc71fad0f..138321e151eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator(
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height)
+ int height,
+ int offset)
{
struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
enum test_pattern_color_format bit_depth;
@@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator(
DPG_ACTIVE_WIDTH, width,
DPG_ACTIVE_HEIGHT, height);
+ /* set DPG offset */
+ REG_SET_2(DPG_OFFSET_SEGMENT, 0,
+ DPG_X_OFFSET, offset,
+ DPG_SEGMENT_WIDTH, 0);
+
switch (test_pattern) {
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 4093bec172c1..64c5b429c79a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -36,6 +36,7 @@
#define OPP_DPG_REG_LIST(id) \
SRI(DPG_CONTROL, DPG, id), \
SRI(DPG_DIMENSIONS, DPG, id), \
+ SRI(DPG_OFFSET_SEGMENT, DPG, id), \
SRI(DPG_COLOUR_B_CB, DPG, id), \
SRI(DPG_COLOUR_G_Y, DPG, id), \
SRI(DPG_COLOUR_R_CR, DPG, id), \
@@ -53,6 +54,7 @@
uint32_t FMT_422_CONTROL; \
uint32_t DPG_CONTROL; \
uint32_t DPG_DIMENSIONS; \
+ uint32_t DPG_OFFSET_SEGMENT; \
uint32_t DPG_COLOUR_B_CB; \
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
@@ -68,6 +70,8 @@
OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \
@@ -97,6 +101,8 @@
type DPG_HRES; \
type DPG_ACTIVE_WIDTH; \
type DPG_ACTIVE_HEIGHT; \
+ type DPG_X_OFFSET; \
+ type DPG_SEGMENT_WIDTH; \
type DPG_COLOUR0_R_CR; \
type DPG_COLOUR1_R_CR; \
type DPG_COLOUR0_B_CB; \
@@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator(
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e310d67c399a..a67395208991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
@@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
- .ptoi_supported = 0
+ .ptoi_supported = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
@@ -1039,7 +1041,7 @@ static const struct resource_caps res_cap_nv14 = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -1058,7 +1060,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -1254,6 +1256,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1668,7 +1671,7 @@ static void acquire_dsc(struct resource_context *res_ctx,
}
}
-static void release_dsc(struct resource_context *res_ctx,
+void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
struct display_stream_compressor **dsc)
{
@@ -1728,7 +1731,7 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream_res.dsc)
- release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
+ dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
}
}
@@ -1972,22 +1975,6 @@ void dcn20_populate_dml_writeback_from_context(
}
-static int get_num_odm_heads(struct pipe_ctx *pipe)
-{
- int odm_head_count = 0;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
- while (next_pipe) {
- odm_head_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
- odm_head_count++;
- pipe = pipe->prev_odm_pipe;
- }
- return odm_head_count ? odm_head_count + 1 : 0;
-}
-
int dcn20_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
@@ -2067,8 +2054,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
- case 2:
+ switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
+ case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
default:
@@ -2076,9 +2063,14 @@ int dcn20_populate_dml_pipes_from_context(
}
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
- == res_ctx->pipe_ctx[i].plane_state)
- pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
- else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
+ == res_ctx->pipe_ctx[i].plane_state) {
+ struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe;
+
+ while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
+ == res_ctx->pipe_ctx[i].plane_state)
+ first_pipe = first_pipe->top_pipe;
+ pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ } else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
while (first_pipe->prev_odm_pipe)
@@ -2163,16 +2155,20 @@ int dcn20_populate_dml_pipes_from_context(
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
/*
- * Use max cursor settings for calculations to minimize
+ * For graphic plane, cursor number is 1, nv12 is 0
* bw calculations due to cursor on/off
*/
- pipes[pipe_cnt].pipe.src.num_cursors = 2;
+ if (res_ctx->pipe_ctx[i].plane_state &&
+ res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pipes[pipe_cnt].pipe.src.num_cursors = 0;
+ else
+ pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
+
pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
- pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
- pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
if (!res_ctx->pipe_ctx[i].plane_state) {
+ pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
@@ -2198,19 +2194,21 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
- pipes[pipe_cnt].pipe.src.is_hsplit = 0;
- pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
+
+ if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) {
+ pipes[pipe_cnt].pipe.src.viewport_width /= 2;
+ pipes[pipe_cnt].pipe.dest.recout_width /= 2;
+ }
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
- pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
- && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
- || (res_ctx->pipe_ctx[i].top_pipe
- && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
+ pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
+ || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln)
+ || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
@@ -2235,18 +2233,22 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
- pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
- if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
- } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
+ pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
+ if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
+ pipes[pipe_cnt].pipe.dest.full_recout_width *= 2;
+ else {
+ struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pln) {
+ pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ split_pipe = res_ctx->pipe_ctx[i].top_pipe;
+ while (split_pipe && split_pipe->plane_state == pln) {
+ pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->top_pipe;
+ }
}
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
@@ -2413,6 +2415,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+ stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -2499,7 +2502,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
}
-void dcn20_merge_pipes_for_validate(
+static void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context)
{
@@ -2524,7 +2527,7 @@ void dcn20_merge_pipes_for_validate(
odm_pipe->prev_odm_pipe = NULL;
odm_pipe->next_odm_pipe = NULL;
if (odm_pipe->stream_res.dsc)
- release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
+ dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
/* Clear plane_res and stream_res */
memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
@@ -2562,41 +2565,29 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split)
+ bool *split,
+ bool *merge)
{
int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
bool force_split = false;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
- /* Single display loop, exits if there is more than one display */
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- bool exit_loop = false;
- if (!pipe->stream || pipe->top_pipe)
- continue;
-
- if (dc->debug.force_single_disp_pipe_split) {
- if (!force_split)
- force_split = true;
- else {
- force_split = false;
- exit_loop = true;
- }
- }
- if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
- if (avoid_split)
- avoid_split = false;
- else {
- avoid_split = true;
- exit_loop = true;
- }
- }
- if (exit_loop)
- break;
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
}
- /* TODO: fix dc bugs and remove this split threshold thing */
- if (context->stream_count > dc->res_pool->pipe_count / 2)
+ if (plane_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
@@ -2619,11 +2610,12 @@ int dcn20_validate_apply_pipe_split_flags(
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+ if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
split[i] = true;
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
@@ -2636,10 +2628,44 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
+ /*Already split odm pipe tree, don't try to split again*/
+ split[i] = false;
+ split[pipe->prev_odm_pipe->pipe_idx] = false;
+ } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
+ && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
+ split[i] = false;
+ split[pipe->top_pipe->pipe_idx] = false;
+ } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
+ if (split[i] == false) {
+ /*Exiting mpc/odm combine*/
+ merge[i] = true;
+ if (pipe->prev_odm_pipe) {
+ ASSERT(0); /*should not actually happen yet*/
+ merge[pipe->prev_odm_pipe->pipe_idx] = true;
+ } else
+ merge[pipe->top_pipe->pipe_idx] = true;
+ } else {
+ /*Transition from mpc combine to odm combine or vice versa*/
+ ASSERT(0); /*should not actually happen yet*/
+ split[i] = true;
+ merge[i] = true;
+ if (pipe->prev_odm_pipe) {
+ split[pipe->prev_odm_pipe->pipe_idx] = true;
+ merge[pipe->prev_odm_pipe->pipe_idx] = true;
+ } else {
+ split[pipe->top_pipe->pipe_idx] = true;
+ merge[pipe->top_pipe->pipe_idx] = true;
+ }
+ }
+ }
+
/* Adjust dppclk when split is forced, do not bother with dispclk */
if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
@@ -2681,7 +2707,7 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
@@ -2901,6 +2927,9 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+
/*
* An artifact of dml pipe split/odm is that pipes get merged back together for
* calculation. Therefore we need to only extract for first pipe in ascending index order
@@ -3138,7 +3167,7 @@ static struct dc_cap_funcs cap_funcs = {
};
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -3164,7 +3193,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
@@ -3886,6 +3915,15 @@ static bool dcn20_resource_construct(
dcn20_hw_sequencer_construct(dc);
+ // IF NV12, set PG function pointer to NULL. It's not that
+ // PG isn't supported for NV12, it's that we don't want to
+ // program the registers because that will cause more power
+ // to be consumed. We could have created dcn20_init_hw to get
+ // the same effect by checking ASIC rev, but there was a
+ // request at some point to not check ASIC rev on hw sequencer.
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ dc->hwseq->funcs.enable_power_gating_plane = NULL;
+
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index f5893840b79b..9d5bff9455fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -119,14 +119,15 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
-void dcn20_merge_pipes_for_validate(
- struct dc *dc,
- struct dc_state *context);
int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split);
+ bool *split,
+ bool *merge);
+void dcn20_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc);
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
@@ -159,7 +160,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
void dcn20_patch_bounding_box(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 9b70a1e7b962..99a7ef6ab878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -616,5 +616,6 @@ void dcn20_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
index 02fafb013fc6..f1ef46e8da5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
@@ -34,13 +34,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
#define DCN20_VMID_REG_LIST(id)\
SRI(CNTL, DCN_VM_CONTEXT, id),\
SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index f546260c15b7..5e2d14b897af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -141,7 +141,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
return NUM_VMID;
}
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -149,6 +149,7 @@ void hubbub21_program_urgent_watermarks(
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
@@ -163,7 +164,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -172,7 +174,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -180,14 +184,18 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
+
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -201,7 +209,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -210,7 +219,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -218,7 +229,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
@@ -226,7 +239,8 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -240,7 +254,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -249,7 +264,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -257,7 +274,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
@@ -265,7 +284,8 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -279,7 +299,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -288,7 +309,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -296,7 +319,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
@@ -304,10 +329,13 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -315,6 +343,7 @@ void hubbub21_program_stutter_watermarks(
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -330,7 +359,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -345,7 +376,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -361,7 +394,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -376,7 +411,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -392,7 +429,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -407,7 +446,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -423,7 +464,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -438,10 +481,14 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -450,6 +497,8 @@ void hubbub21_program_pstate_watermarks(
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
+
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
@@ -464,7 +513,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -480,7 +531,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = false;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -496,7 +549,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -512,20 +567,30 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+
+ if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
- hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
/*
* The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
@@ -549,6 +614,8 @@ void hubbub21_program_watermarks(
DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+ return wm_pending;
}
void hubbub21_wm_read_state(struct hubbub *hubbub,
@@ -635,6 +702,7 @@ static const struct hubbub_funcs hubbub21_funcs = {
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index c4840dfb1fa5..ef3ef28509ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -113,22 +113,22 @@
void dcn21_dchvm_init(struct hubbub *hubbub);
int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index cf09b9335728..d285ba622d61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -79,32 +79,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct _vcs_dpi_display_dlg_regs_st *dlg_attr)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- uint32_t cur_value;
+ uint32_t refcyc_per_vm_group_vblank;
+ uint32_t refcyc_per_vm_req_vblank;
+ uint32_t refcyc_per_vm_group_flip;
+ uint32_t refcyc_per_vm_req_flip;
+ const uint32_t uninitialized_hw_default = 0;
- REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_group_vblank)
+ REG_GET(VBLANK_PARAMETERS_5,
+ REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank);
+
+ if (refcyc_per_vm_group_vblank == uninitialized_hw_default ||
+ refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank)
REG_SET(VBLANK_PARAMETERS_5, 0,
REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank);
REG_GET(VBLANK_PARAMETERS_6,
- REFCYC_PER_VM_REQ_VBLANK,
- &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_req_vblank)
+ REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank);
+
+ if (refcyc_per_vm_req_vblank == uninitialized_hw_default ||
+ refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank)
REG_SET(VBLANK_PARAMETERS_6, 0,
REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank);
- REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_group_flip)
+ REG_GET(FLIP_PARAMETERS_3,
+ REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip);
+
+ if (refcyc_per_vm_group_flip == uninitialized_hw_default ||
+ refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip)
REG_SET(FLIP_PARAMETERS_3, 0,
REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip);
- REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_req_flip)
+ REG_GET(FLIP_PARAMETERS_4,
+ REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip);
+
+ if (refcyc_per_vm_req_flip == uninitialized_hw_default ||
+ refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip)
REG_SET(FLIP_PARAMETERS_4, 0,
REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip);
REG_SET(FLIP_PARAMETERS_5, 0,
REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c);
+
REG_SET(FLIP_PARAMETERS_6, 0,
REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c);
}
@@ -325,13 +340,9 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
- // The format of default addr is 48:12 of the 48 bit addr
- mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
-
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 081ad8e43d58..ada65b1a7eb1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -112,3 +112,25 @@ void dcn21_optimize_pwr_state(
true);
}
+/* If user hotplug a HDMI monitor while in monitor off,
+ * OS will do a mode set (with output timing) but keep output off.
+ * In this case DAL will ask vbios to power up the pll in the PHY.
+ * If user unplug the monitor (while we are on monitor off) or
+ * system attempt to enter modern standby (which we will disable PLL),
+ * PHY will hang on the next mode set attempt.
+ * if enable PLL follow by disable PLL (without executing lane enable/disable),
+ * RDPCS_PHY_DP_MPLLB_STATE remains 1,
+ * which indicate that PLL disable attempt actually didn’t go through.
+ * As a workaround, insert PHY lane enable/disable before PLL disable.
+ */
+void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx->stream->dpms_off)
+ return;
+
+ pipe_ctx->stream->dpms_off = false;
+ core_link_enable_stream(context, pipe_ctx);
+ core_link_disable_stream(pipe_ctx);
+ pipe_ctx->stream->dpms_off = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index 182736096123..26bf24d3b59f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -44,4 +44,7 @@ void dcn21_optimize_pwr_state(
const struct dc *dc,
struct dc_state *context);
+void dcn21_PLAT_58856_wa(struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index fddbd59bf4f9..b9ff9767e08f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -51,7 +52,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -104,6 +105,8 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -127,6 +130,7 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn20_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+ .PLAT_58856_wa = dcn21_PLAT_58856_wa,
};
void dcn21_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 33d0a176841a..51b5910cd05f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -84,7 +84,7 @@
#include "dcn21_resource.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -156,17 +156,18 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
- .ptoi_supported = 0
+ .ptoi_supported = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.clock_limits = {
{
.state = 0,
- .dcfclk_mhz = 304.0,
- .fabricclk_mhz = 600.0,
- .dispclk_mhz = 618.0,
- .dppclk_mhz = 440.0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 400.00,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
@@ -174,10 +175,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
{
.state = 1,
- .dcfclk_mhz = 304.0,
- .fabricclk_mhz = 600.0,
- .dispclk_mhz = 618.0,
- .dppclk_mhz = 618.0,
+ .dcfclk_mhz = 464.52,
+ .fabricclk_mhz = 800.0,
+ .dispclk_mhz = 654.55,
+ .dppclk_mhz = 626.09,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
@@ -185,32 +186,65 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
{
.state = 2,
- .dcfclk_mhz = 608.0,
- .fabricclk_mhz = 1066.0,
- .dispclk_mhz = 888.0,
- .dppclk_mhz = 888.0,
- .phyclk_mhz = 810.0,
+ .dcfclk_mhz = 514.29,
+ .fabricclk_mhz = 933.0,
+ .dispclk_mhz = 757.89,
+ .dppclk_mhz = 685.71,
+ .phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 287.67,
- .dram_speed_mts = 2133.0,
+ .dram_speed_mts = 1866.0,
},
{
.state = 3,
- .dcfclk_mhz = 676.0,
- .fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dcfclk_mhz = 576.00,
+ .fabricclk_mhz = 1067.0,
+ .dispclk_mhz = 847.06,
+ .dppclk_mhz = 757.89,
+ .phyclk_mhz = 600.0,
.socclk_mhz = 715.0,
.dscclk_mhz = 318.334,
- .dram_speed_mts = 4266.0,
+ .dram_speed_mts = 2134.0,
},
{
.state = 4,
- .dcfclk_mhz = 810.0,
+ .dcfclk_mhz = 626.09,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 900.00,
+ .dppclk_mhz = 847.06,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 953.0,
+ .dscclk_mhz = 489.0,
+ .dram_speed_mts = 2400.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 685.71,
+ .fabricclk_mhz = 1333.0,
+ .dispclk_mhz = 1028.57,
+ .dppclk_mhz = 960.00,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 278.0,
+ .dscclk_mhz = 287.67,
+ .dram_speed_mts = 2666.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 757.89,
+ .fabricclk_mhz = 1467.0,
+ .dispclk_mhz = 1107.69,
+ .dppclk_mhz = 1028.57,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 715.0,
+ .dscclk_mhz = 318.334,
+ .dram_speed_mts = 3200.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
- .dppclk_mhz = 1285.0,
+ .dppclk_mhz = 1285.00,
.phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
@@ -218,8 +252,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
/*Extra state, no dispclk ramping*/
{
- .state = 5,
- .dcfclk_mhz = 810.0,
+ .state = 8,
+ .dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
.dppclk_mhz = 1285.0,
@@ -266,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
- .num_states = 5
+ .num_states = 9
};
#ifndef MAX
@@ -821,11 +855,12 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
+ .min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
@@ -841,7 +876,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -962,6 +997,9 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
@@ -1335,26 +1373,78 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
{
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
- int i;
+ unsigned int i, j, k;
+ int closest_clk_lvl;
+
+ // diags does not retrieve proper values from SMU
+ // cap states to 5 and make state 5 the max state
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) || IS_DIAG_DC(dc->ctx->dce_environment)) {
+ dcn2_1_soc.num_states = 5;
+
+ dcn2_1_soc.clock_limits[5].state = 5;
+ dcn2_1_soc.clock_limits[5].dcfclk_mhz = 810.0;
+ dcn2_1_soc.clock_limits[5].fabricclk_mhz = 1600.0;
+ dcn2_1_soc.clock_limits[5].dispclk_mhz = 1395.0;
+ dcn2_1_soc.clock_limits[5].dppclk_mhz = 1285.0;
+ dcn2_1_soc.clock_limits[5].phyclk_mhz = 1325.0;
+ dcn2_1_soc.clock_limits[5].socclk_mhz = 953.0;
+ dcn2_1_soc.clock_limits[5].dscclk_mhz = 489.0;
+ dcn2_1_soc.clock_limits[5].dram_speed_mts = 4266.0;
+ } else {
+ dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
+ dcn2_1_soc.num_chans = bw_params->num_channels;
+
+ /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
+ dcn2_1_soc.clock_limits[0].state = 0;
+ dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
+ dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
+ dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
+ dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
+
+ /*
+ * Other levels: find cloest DCN clocks that fit the given clock limit using dcfclk
+ * as indicater
+ */
- dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
- dcn2_1_soc.num_chans = bw_params->num_channels;
+ closest_clk_lvl = -1;
+ /* index currently being filled */
+ k = 1;
+ for (i = 1; i < clk_table->num_entries; i++) {
+ /* loop backwards, skip duplicate state, +1 because SMU has precision issue */
+ for (j = dcn2_1_soc.num_states - 2; j >= k; j--) {
+ if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
- for (i = 0; i < clk_table->num_entries; i++) {
+ /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
+ if (closest_clk_lvl != -1) {
+ dcn2_1_soc.clock_limits[k].state = i;
+ dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ k++;
+ }
+ }
- dcn2_1_soc.clock_limits[i].state = i;
- dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+ /* duplicate last level */
+ dcn2_1_soc.clock_limits[k] = dcn2_1_soc.clock_limits[k - 1];
+ dcn2_1_soc.clock_limits[k].state = k;
+ dcn2_1_soc.num_states = k + 1;
}
- dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
- dcn2_1_soc.num_states = i;
- // diags does not retrieve proper values from SMU, do not update DML instance for diags
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
+ dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
/* Temporary Place holder until we can get them from fuse */
@@ -1476,6 +1566,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN21 = true;
+ hws->wa.disallow_self_refresh_during_multi_plane_transition = true;
}
return hws;
}
@@ -1499,6 +1590,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1639,6 +1731,19 @@ static int dcn21_populate_dml_pipes_from_context(
return pipe_cnt;
}
+enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ enum dc_status result = DC_OK;
+
+ if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
+ plane_state->dcc.enable = 1;
+ /* align to our worst case block width */
+ plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
+ }
+ result = dcn20_patch_unknown_plane_state(plane_state);
+ return result;
+}
+
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
@@ -1648,7 +1753,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = update_bw_bounding_box
@@ -1695,6 +1800,7 @@ static bool dcn21_resource_construct(
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1758,9 +1864,15 @@ static bool dcn21_resource_construct(
goto create_fail;
}
- // Leave as NULL to not affect current dmcu psr programming sequence
- // Will be uncommented when functionality is confirmed to be working
- pool->base.psr = NULL;
+ if (dc->debug.disable_dmcu) {
+ pool->base.psr = dmub_psr_create(ctx);
+
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 626d22d437f4..968c46dfb506 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -32,6 +32,7 @@ struct cp_psp_stream_config {
uint8_t otg_inst;
uint8_t link_enc_inst;
uint8_t stream_enc_inst;
+ uint8_t mst_supported;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 485a9c62ec58..5bbbafacc720 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2614,6 +2614,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
+ }
+ }
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 658f81e757e9..dfd3be452766 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,7 +25,7 @@
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
-#define MAX_CLOCK_LIMIT_STATES 8
+#define MAX_CLOCK_LIMIT_STATES 9
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
@@ -61,12 +61,15 @@ struct _vcs_dpi_voltage_scaling_st {
double dram_speed_mts;
double fabricclk_mhz;
double dispclk_mhz;
+ double dram_bw_per_chan_gbps;
double phyclk_mhz;
double dppclk_mhz;
double dtbclk_mhz;
};
struct _vcs_dpi_soc_bounding_box_st {
+ struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
double urgent_latency_us;
@@ -109,8 +112,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
- unsigned int num_states;
- struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ double min_dcfclk;
bool do_urgent_latency_adjustment;
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
@@ -189,7 +191,7 @@ struct _vcs_dpi_ip_params_st {
unsigned int min_vblank_lines;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
- unsigned int dcfclk_cstate_latency;
+ double dcfclk_cstate_latency;
unsigned int dppclk_delay_scl;
unsigned int dppclk_delay_scl_lb_only;
unsigned int dppclk_delay_cnvc_formatter;
@@ -202,6 +204,7 @@ struct _vcs_dpi_ip_params_st {
unsigned int LineBufferFixedBpp;
unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
unsigned int bug_forcing_LC_req_same_size_fixed;
+ unsigned int number_of_cursors;
};
struct _vcs_dpi_display_xfc_params_st {
@@ -325,7 +328,6 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
- unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index b3c96d9b472f..6b525c52124c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -266,8 +266,6 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
}
- mode_lib->vba.MinVoltageLevel = 0;
- mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
mode_lib->vba.DoUrgentLatencyAdjustment =
soc->do_urgent_latency_adjustment;
@@ -379,7 +377,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
- mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -396,11 +393,11 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_y_c;
mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
- mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height;
- mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width;
+ mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_y;
+ mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_y;
mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
- mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_c;
- mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_c;
+ mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_c;
+ mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_c;
mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
mode_lib->vba.DCCMetaPitchC[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch_c;
mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 2875efd85467..5d82fc5a7ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -389,7 +389,6 @@ struct vba_vars_st {
/* vba mode support */
/*inputs*/
- bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
@@ -842,8 +841,6 @@ struct vba_vars_st {
double DCCRateChroma[DC__NUM_DPP__MAX];
double PHYCLKD18PerState[DC__VOLTAGE_STATES + 1];
- int MinVoltageLevel;
- int MaxVoltageLevel;
bool WritebackSupportInterleaveAndUsingWholeBufferForASingleStream;
bool NumberOfHDMIFRLSupport;
@@ -880,7 +877,6 @@ struct vba_vars_st {
double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
- bool UseMinimumRequiredDCFCLK;
double WritebackDelayTime[DC__NUM_DPP__MAX];
unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index d2d36d48caaa..f252af1947c3 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -47,9 +47,9 @@
#include "dce120/hw_factory_dce120.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_factory_dcn10.h"
-#endif
#include "dcn20/hw_factory_dcn20.h"
#include "dcn21/hw_factory_dcn21.h"
+#endif
#include "diagnostics/hw_factory_diag.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 5d396657a1ee..04e2c0f74cb0 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -45,9 +45,9 @@
#include "dce120/hw_translate_dce120.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_translate_dcn10.h"
-#endif
#include "dcn20/hw_translate_dcn20.h"
#include "dcn21/hw_translate_dcn21.h"
+#endif
#include "diagnostics/hw_translate_diag.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f285b76888fb..d523fc9547e7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -124,7 +124,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream);
- enum dc_status (*get_default_swizzle_mode)(
+ enum dc_status (*patch_unknown_plane_state)(
struct dc_plane_state *plane_state);
struct stream_encoder *(*find_first_free_match_stream_enc_for_link)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 8b1f0ce6c2a7..e94e5fbf2aa2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -78,6 +78,8 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+void dpcd_set_source_specific_data(struct dc_link *link);
+
void dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index ac530c057ddd..ce65678c03b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -27,6 +27,7 @@
#define __DAL_CLK_MGR_H__
#include "dc.h"
+#include "dm_pp_smu.h"
#define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000
@@ -193,6 +194,7 @@ struct clk_mgr {
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
+ struct pp_smu_wm_range_sets ranges;
};
/* forward declarations */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 862952c0286a..9311d0de377f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -296,6 +296,10 @@ int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context);
+int clk_mgr_helper_get_active_plane_cnt(
+ struct dc *dc,
+ struct dc_state *context);
+
#endif //__DAL_CLK_MGR_INTERNAL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 05ee5295d2c1..336c80a18175 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -27,11 +27,12 @@
#define __DAL_DCCG_H__
#include "dc_types.h"
+#include "hw_shared.h"
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
-
+ int pipe_dppclk_khz[MAX_PIPES];
int ref_dppclk;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index c0dc1d0f5cae..f5dd0cc73c63 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -134,7 +134,7 @@ struct hubbub_funcs {
unsigned int dccg_ref_freq_inKhz,
unsigned int *dchub_ref_freq_inKhz);
- void (*program_watermarks)(
+ bool (*program_watermarks)(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index c59740084ebc..7c2a3328b208 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -39,6 +39,7 @@ struct dsc_config {
uint32_t pic_height;
enum dc_pixel_encoding pixel_encoding;
enum dc_color_depth color_depth; /* Bits per component */
+ bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 459f95f52486..f30ab4916242 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -25,16 +25,15 @@
#ifndef __DC_DWBC_H__
#define __DC_DWBC_H__
+#include "dal_types.h"
#include "dc_hw_types.h"
-
#define DWB_SW_V2 1
#define DWB_MCIF_BUF_COUNT 4
/* forward declaration of mcif_wb struct */
struct mcif_wb;
-enum dce_version;
enum dwb_sw_version {
dwb_ver_1_0 = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index fb748f082c56..c2b392a533b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -68,6 +68,7 @@ struct encoder_feature_support {
unsigned int max_hdmi_pixel_clock;
bool hdmi_ycbcr420_supported;
bool dp_ycbcr420_supported;
+ bool fec_supported;
};
union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7575564b2265..2717352eb697 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -310,7 +310,8 @@ struct opp_funcs {
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 351b387ad606..ac6523c0828e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -103,6 +103,7 @@ struct stream_encoder {
struct dc_context *ctx;
struct dc_bios *bp;
enum engine_id id;
+ uint32_t stream_enc_inst;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 209118f9f193..d4c1fb242c63 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -66,6 +66,8 @@ struct hw_sequencer_funcs {
int num_planes, struct dc_state *context);
void (*program_front_end_for_ctx)(struct dc *dc,
struct dc_state *context);
+ void (*post_unlock_program_front_end)(struct dc *dc,
+ struct dc_state *context);
void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
void (*update_dchub)(struct dce_hwseq *hws,
@@ -78,10 +80,10 @@ struct hw_sequencer_funcs {
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
/* Pipe Lock Related */
- void (*pipe_control_lock_global)(struct dc *dc,
- struct pipe_ctx *pipe, bool lock);
void (*pipe_control_lock)(struct dc *dc,
struct pipe_ctx *pipe, bool lock);
+ void (*interdependent_update_lock)(struct dc *dc,
+ struct dc_state *context, bool lock);
void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
bool flip_immediate);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index ecf566378ccd..52a26e6be066 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -40,10 +40,13 @@ struct dce_hwseq_wa {
bool false_optc_underflow;
bool DEGVIDCN10_254;
bool DEGVIDCN21;
+ bool disallow_self_refresh_during_multi_plane_transition;
};
struct hwseq_wa_state {
bool DEGVIDCN10_253_applied;
+ bool disallow_self_refresh_during_multi_plane_transition_applied;
+ unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
};
struct pipe_ctx;
@@ -97,6 +100,8 @@ struct hwseq_private_funcs {
struct dc *dc);
void (*edp_backlight_control)(struct dc_link *link,
bool enable);
+ bool (*is_panel_backlight_on)(struct dc_link *link);
+ bool (*is_panel_powered_on)(struct dc_link *link);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
@@ -140,6 +145,8 @@ struct hwseq_private_funcs {
const struct dc_plane_state *plane_state);
bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
+ void (*PLAT_58856_wa)(struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5ae8ada154ef..ca4c36c0c9bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -179,4 +179,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+
+int get_num_odm_splits(struct pipe_ctx *pipe);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index cd9532b4f14d..10b5fa9d2588 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -50,6 +50,7 @@ enum dmub_cmd_type {
DMUB_CMD__REG_REG_WAIT = 4,
DMUB_CMD__PLAT_54186_WA = 5,
DMUB_CMD__PSR = 64,
+ DMUB_CMD__ABM = 66,
DMUB_CMD__VBIOS = 128,
};
@@ -216,7 +217,6 @@ struct dmub_rb_cmd_dpphy_init {
struct dmub_cmd_psr_copy_settings_data {
uint16_t psr_level;
- uint8_t hubp_inst;
uint8_t dpp_inst;
uint8_t mpcc_inst;
uint8_t opp_inst;
@@ -225,17 +225,8 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t digbe_inst;
uint8_t dpphy_inst;
uint8_t aux_inst;
- uint8_t hyst_frames;
- uint8_t hyst_lines;
- uint8_t phy_num;
- uint8_t phy_type;
- uint8_t aux_repeat;
uint8_t smu_optimizations_en;
- uint8_t skip_wait_for_pll_lock;
uint8_t frame_delay;
- uint8_t smu_phy_id;
- uint8_t num_of_controllers;
- uint8_t link_rate;
uint8_t frame_cap_ind;
};
@@ -257,13 +248,59 @@ struct dmub_rb_cmd_psr_enable {
struct dmub_cmd_header header;
};
-struct dmub_cmd_psr_setup_data {
+struct dmub_cmd_psr_set_version_data {
enum psr_version version; // PSR version 1 or 2
};
-struct dmub_rb_cmd_psr_setup {
+struct dmub_rb_cmd_psr_set_version {
struct dmub_cmd_header header;
- struct dmub_cmd_psr_setup_data psr_setup_data;
+ struct dmub_cmd_psr_set_version_data psr_set_version_data;
+};
+
+struct dmub_cmd_abm_set_pipe_data {
+ uint32_t ramping_boundary;
+ uint32_t otg_inst;
+};
+
+struct dmub_rb_cmd_abm_set_pipe {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data;
+};
+
+struct dmub_cmd_abm_set_backlight_data {
+ uint32_t frame_ramp;
+};
+
+struct dmub_rb_cmd_abm_set_backlight {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data;
+};
+
+struct dmub_cmd_abm_set_level_data {
+ uint32_t level;
+};
+
+struct dmub_rb_cmd_abm_set_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_level_data abm_set_level_data;
+};
+
+struct dmub_cmd_abm_set_ambient_level_data {
+ uint32_t ambient_lux;
+};
+
+struct dmub_rb_cmd_abm_set_ambient_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data;
+};
+
+struct dmub_cmd_abm_set_pwm_frac_data {
+ uint32_t fractional_pwm;
+};
+
+struct dmub_rb_cmd_abm_set_pwm_frac {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
};
union dmub_rb_cmd {
@@ -277,11 +314,16 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating;
struct dmub_rb_cmd_dpphy_init dpphy_init;
struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
- struct dmub_rb_cmd_psr_enable psr_enable;
+ struct dmub_rb_cmd_psr_set_version psr_set_version;
struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
+ struct dmub_rb_cmd_psr_enable psr_enable;
struct dmub_rb_cmd_psr_set_level psr_set_level;
struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
- struct dmub_rb_cmd_psr_setup psr_setup;
+ struct dmub_rb_cmd_abm_set_pipe abm_set_pipe;
+ struct dmub_rb_cmd_abm_set_backlight abm_set_backlight;
+ struct dmub_rb_cmd_abm_set_level abm_set_level;
+ struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
+ struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index 7b69eb37f762..d37535d21928 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,7 +32,7 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_SETUP = 0,
+ DMUB_CMD__PSR_SET_VERSION = 0,
DMUB_CMD__PSR_COPY_SETTINGS = 1,
DMUB_CMD__PSR_ENABLE = 2,
DMUB_CMD__PSR_DISABLE = 3,
@@ -42,7 +42,16 @@ enum dmub_cmd_psr_type {
enum psr_version {
PSR_VERSION_1 = 0x10, // PSR Version 1
PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
- PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+ PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+};
+
+enum dmub_cmd_abm_type {
+ DMUB_CMD__ABM_INIT_CONFIG = 0,
+ DMUB_CMD__ABM_SET_PIPE = 1,
+ DMUB_CMD__ABM_SET_BACKLIGHT = 2,
+ DMUB_CMD__ABM_SET_LEVEL = 3,
+ DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4,
+ DMUB_CMD__ABM_SET_PWM_FRAC = 5,
};
#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
new file mode 100644
index 000000000000..652d6fc061b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_GPINT_CMD_H_
+#define _DMUB_GPINT_CMD_H_
+
+#include "dmub_types.h"
+
+/**
+ * The register format for sending a command via the GPINT.
+ */
+union dmub_gpint_data_register {
+ struct {
+ uint32_t param : 16;
+ uint32_t command_code : 12;
+ uint32_t status : 4;
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * The shifts and masks below may alternatively be used to format and read
+ * the command register bits.
+ */
+
+#define DMUB_GPINT_DATA_PARAM_MASK 0xFFFF
+#define DMUB_GPINT_DATA_PARAM_SHIFT 0
+
+#define DMUB_GPINT_DATA_COMMAND_CODE_MASK 0xFFF
+#define DMUB_GPINT_DATA_COMMAND_CODE_SHIFT 16
+
+#define DMUB_GPINT_DATA_STATUS_MASK 0xF
+#define DMUB_GPINT_DATA_STATUS_SHIFT 28
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_gpint_command {
+ DMUB_GPINT__INVALID_COMMAND = 0,
+ DMUB_GPINT__GET_FW_VERSION = 1,
+ DMUB_GPINT__STOP_FW = 2,
+ DMUB_GPINT__GET_PSR_STATE = 7,
+};
+
+/**
+ * Command responses.
+ */
+
+#define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD
+
+#endif /* _DMUB_GPINT_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index f8917594036a..c2671f2616c8 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -66,6 +66,7 @@
#include "dmub_types.h"
#include "dmub_cmd.h"
+#include "dmub_gpint_cmd.h"
#include "dmub_rb.h"
#if defined(__cplusplus)
@@ -103,7 +104,7 @@ enum dmub_window_id {
DMUB_WINDOW_4_MAILBOX,
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
- DMUB_WINDOW_7_RESERVED,
+ DMUB_WINDOW_7_SCRATCH_MEM,
DMUB_WINDOW_TOTAL,
};
@@ -262,6 +263,14 @@ struct dmub_srv_hw_funcs {
bool (*is_phy_init)(struct dmub_srv *dmub);
bool (*is_auto_load_done)(struct dmub_srv *dmub);
+
+ void (*set_gpint)(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+ bool (*is_gpint_acked)(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+ uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
};
/**
@@ -307,6 +316,7 @@ struct dmub_srv {
enum dmub_asic asic;
void *user_ctx;
bool is_virtual;
+ struct dmub_fb scratch_mem_fb;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -516,6 +526,45 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us);
+/**
+ * dmub_srv_send_gpint_command() - Sends a GPINT based command.
+ * @dmub: the dmub service
+ * @command_code: the command code to send
+ * @param: the command parameter to send
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Sends a command via the general purpose interrupt (GPINT).
+ * Waits for the number of microseconds specified by timeout_us
+ * for the command ACK before returning.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for ACK timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+ enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t timeout_us);
+
+/**
+ * dmub_srv_get_gpint_response() - Queries the GPINT response.
+ * @dmub: the dmub service
+ * @response: the response for the last GPINT
+ *
+ * Returns the response code for the last GPINT interrupt.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+ uint32_t *response);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index b2ca8e0dbac9..63bb9e2c81de 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -60,6 +60,12 @@ static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
{
uint32_t tmp;
+ if (dmub->fb_base || dmub->fb_offset) {
+ *fb_base = dmub->fb_base;
+ *fb_offset = dmub->fb_offset;
+ return;
+ }
+
REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
*fb_base = (uint64_t)tmp << 24;
@@ -77,11 +83,52 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn20_reset(struct dmub_srv *dmub)
{
+ union dmub_gpint_data_register cmd;
+ const uint32_t timeout = 30;
+ uint32_t in_reset, scratch, i;
+
+ REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &in_reset);
+
+ if (in_reset == 0) {
+ cmd.bits.status = 1;
+ cmd.bits.command_code = DMUB_GPINT__STOP_FW;
+ cmd.bits.param = 0;
+
+ dmub->hw_funcs.set_gpint(dmub, cmd);
+
+ /**
+ * Timeout covers both the ACK and the wait
+ * for remaining work to finish.
+ *
+ * This is mostly bound by the PHY disable sequence.
+ * Each register check will be greater than 1us, so
+ * don't bother using udelay.
+ */
+
+ for (i = 0; i < timeout; ++i) {
+ if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ break;
+ }
+
+ for (i = 0; i < timeout; ++i) {
+ scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ break;
+ }
+
+ /* Clear the GPINT command manually so we don't reset again. */
+ cmd.all = 0;
+ dmub->hw_funcs.set_gpint(dmub, cmd);
+
+ /* Force reset in case we timed out, DMCUB is likely hung. */
+ }
+
REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+ REG_WRITE(DMCUB_SCRATCH0, 0);
}
void dmub_dcn20_reset_release(struct dmub_srv *dmub)
@@ -217,3 +264,25 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
return supported;
}
+
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg)
+{
+ REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all);
+}
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg)
+{
+ union dmub_gpint_data_register test;
+
+ reg.bits.status = 0;
+ test.all = REG_READ(DMCUB_GPINT_DATAIN1);
+
+ return test.all == reg.all;
+}
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_SCRATCH7);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 04b0fa13153d..7f046c73927e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -91,6 +91,7 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH13) \
DMUB_SR(DMCUB_SCRATCH14) \
DMUB_SR(DMCUB_SCRATCH15) \
+ DMUB_SR(DMCUB_GPINT_DATAIN1) \
DMUB_SR(CC_DC_PIPE_DIS) \
DMUB_SR(MMHUBBUB_SOFT_RESET) \
DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
@@ -183,4 +184,12 @@ bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 85a518bf8a76..ce32cc7933c4 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -52,8 +52,11 @@
/* Default tracebuffer size if meta is absent. */
#define DMUB_TRACE_BUFFER_SIZE (1024)
+/* Default scratch mem size. */
+#define DMUB_SCRATCH_MEM_SIZE (256)
+
/* Number of windows in use. */
-#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
+#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
/* Base addresses. */
#define DMUB_CW0_BASE (0x60000000)
@@ -126,6 +129,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
funcs->is_hw_init = dmub_dcn20_is_hw_init;
+ funcs->set_gpint = dmub_dcn20_set_gpint;
+ funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
+ funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
if (asic == DMUB_ASIC_DCN21) {
dmub->regs = &dmub_srv_dcn21_regs;
@@ -208,9 +214,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+ struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
const struct dmub_fw_meta_info *fw_info;
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
@@ -253,7 +261,10 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
fw_state->base = dmub_align(trace_buff->top, 256);
fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
- out->fb_size = dmub_align(fw_state->top, 4096);
+ scratch_mem->base = dmub_align(fw_state->top, 256);
+ scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+
+ out->fb_size = dmub_align(scratch_mem->top, 4096);
return DMUB_STATUS_OK;
}
@@ -331,6 +342,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
+ struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
struct dmub_rb_init_params rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
@@ -367,7 +379,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->hw_funcs.reset(dmub);
if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
- fw_state_fb) {
+ fw_state_fb && scratch_mem_fb) {
cw2.offset.quad_part = data_fb->gpu_addr;
cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
cw2.region.top = cw2.region.base + data_fb->size;
@@ -393,6 +405,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->scratch_mem_fb = *scratch_mem_fb;
+
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
&cw5, &cw6);
@@ -522,3 +536,50 @@ enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+ enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t timeout_us)
+{
+ union dmub_gpint_data_register reg;
+ uint32_t i;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.set_gpint)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.is_gpint_acked)
+ return DMUB_STATUS_INVALID;
+
+ reg.bits.status = 1;
+ reg.bits.command_code = command_code;
+ reg.bits.param = param;
+
+ dmub->hw_funcs.set_gpint(dmub, reg);
+
+ for (i = 0; i < timeout_us; ++i) {
+ if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
+ return DMUB_STATUS_OK;
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+ uint32_t *response)
+{
+ *response = 0;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.get_gpint_response)
+ return DMUB_STATUS_INVALID;
+
+ *response = dmub->hw_funcs.get_gpint_response(dmub);
+
+ return DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index a2903985b9e8..8a87d0ed90ae 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,31 +134,27 @@
#define PICASSO_A0 0x41
/* DCN1_01 */
#define RAVEN2_A0 0x81
-#define RAVEN2_15D8_REV_94 0x94
-#define RAVEN2_15D8_REV_95 0x95
-#define RAVEN2_15D8_REV_E3 0xE3
-#define RAVEN2_15D8_REV_E4 0xE4
-#define RAVEN2_15D8_REV_E9 0xE9
-#define RAVEN2_15D8_REV_EA 0xEA
-#define RAVEN2_15D8_REV_EB 0xEB
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#endif
+#define PRID_DALI_DE 0xDE
+#define PRID_DALI_DF 0xDF
+#define PRID_DALI_E3 0xE3
+#define PRID_DALI_E4 0xE4
+
+#define PRID_POLLOCK_94 0x94
+#define PRID_POLLOCK_95 0x95
+#define PRID_POLLOCK_E9 0xE9
+#define PRID_POLLOCK_EA 0xEA
+#define PRID_POLLOCK_EB 0xEB
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
#ifndef ASICREV_IS_RAVEN2
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RENOIR_A0))
#endif
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
-#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
- || (eChipRev == RAVEN2_15D8_REV_E4))
-#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
- || eChipRev == RAVEN2_15D8_REV_95 \
- || eChipRev == RAVEN2_15D8_REV_E9 \
- || eChipRev == RAVEN2_15D8_REV_EA \
- || eChipRev == RAVEN2_15D8_REV_EB)
#define FAMILY_RV 142 /* DCN 1*/
@@ -177,7 +173,7 @@ enum {
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
#define RENOIR_A0 0x91
#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
-#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
+#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 2c90d1b46c8b..3d29646c7cb4 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -149,4 +149,12 @@ enum dpcd_psr_sink_states {
PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
};
+#define DP_SOURCE_TABLE_REVISION 0x310
+#define DP_SOURCE_PAYLOAD_SIZE 0x311
+#define DP_SOURCE_SINK_CAP 0x317
+#define DP_SOURCE_BACKLIGHT_LEVEL 0x320
+#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
+#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
+#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
+
#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 89a709267019..d66f9d8eefb4 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -124,36 +124,37 @@ enum dc_log_type {
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
(1 << LOG_DETECTION_EDID_PARSER))
-#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
- (1 << LOG_WARNING) | \
- (1 << LOG_EVENT_MODE_SET) | \
- (1 << LOG_EVENT_DETECTION) | \
- (1 << LOG_EVENT_LINK_TRAINING) | \
- (1 << LOG_EVENT_LINK_LOSS) | \
- (1 << LOG_EVENT_UNDERFLOW) | \
- (1 << LOG_RESOURCE) | \
- (1 << LOG_FEATURE_OVERRIDE) | \
- (1 << LOG_DETECTION_EDID_PARSER) | \
- (1 << LOG_DC) | \
- (1 << LOG_HW_HOTPLUG) | \
- (1 << LOG_HW_SET_MODE) | \
- (1 << LOG_HW_RESUME_S3) | \
- (1 << LOG_HW_HPD_IRQ) | \
- (1 << LOG_SYNC) | \
- (1 << LOG_BANDWIDTH_VALIDATION) | \
- (1 << LOG_MST) | \
- (1 << LOG_DETECTION_DP_CAPS) | \
- (1 << LOG_BACKLIGHT)) | \
- (1 << LOG_I2C_AUX) | \
- (1 << LOG_IF_TRACE) | \
- (1 << LOG_DTN) /* | \
- (1 << LOG_DEBUG) | \
- (1 << LOG_BIOS) | \
- (1 << LOG_SURFACE) | \
- (1 << LOG_SCALER) | \
- (1 << LOG_DML) | \
- (1 << LOG_HW_LINK_TRAINING) | \
- (1 << LOG_HW_AUDIO)| \
- (1 << LOG_BANDWIDTH_CALCS)*/
+#define DC_DEFAULT_LOG_MASK ((1ULL << LOG_ERROR) | \
+ (1ULL << LOG_WARNING) | \
+ (1ULL << LOG_EVENT_MODE_SET) | \
+ (1ULL << LOG_EVENT_DETECTION) | \
+ (1ULL << LOG_EVENT_LINK_TRAINING) | \
+ (1ULL << LOG_EVENT_LINK_LOSS) | \
+ (1ULL << LOG_EVENT_UNDERFLOW) | \
+ (1ULL << LOG_RESOURCE) | \
+ (1ULL << LOG_FEATURE_OVERRIDE) | \
+ (1ULL << LOG_DETECTION_EDID_PARSER) | \
+ (1ULL << LOG_DC) | \
+ (1ULL << LOG_HW_HOTPLUG) | \
+ (1ULL << LOG_HW_SET_MODE) | \
+ (1ULL << LOG_HW_RESUME_S3) | \
+ (1ULL << LOG_HW_HPD_IRQ) | \
+ (1ULL << LOG_SYNC) | \
+ (1ULL << LOG_BANDWIDTH_VALIDATION) | \
+ (1ULL << LOG_MST) | \
+ (1ULL << LOG_DETECTION_DP_CAPS) | \
+ (1ULL << LOG_BACKLIGHT)) | \
+ (1ULL << LOG_I2C_AUX) | \
+ (1ULL << LOG_IF_TRACE) | \
+ (1ULL << LOG_HDMI_FRL) | \
+ (1ULL << LOG_DTN) /* | \
+ (1ULL << LOG_DEBUG) | \
+ (1ULL << LOG_BIOS) | \
+ (1ULL << LOG_SURFACE) | \
+ (1ULL << LOG_SCALER) | \
+ (1ULL << LOG_DML) | \
+ (1ULL << LOG_HW_LINK_TRAINING) | \
+ (1ULL << LOG_HW_AUDIO)| \
+ (1ULL << LOG_BANDWIDTH_CALCS)*/
#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b9992ebf77a6..4e542826cd26 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -524,12 +524,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
infopacket->sb[6] |= 0x04;
/* PB7 = FreeSync Minimum refresh rate (Hz) */
- infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000);
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
/* PB8 = FreeSync Maximum refresh rate (Hz)
* Note: We should never go above the field rate of the mode timing set.
*/
- infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
//FreeSync HDR
@@ -747,10 +747,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
- /* Rounded to the nearest Hz */
- nominal_field_rate_in_uhz = 1000000ULL *
- div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
-
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 8aa528e874c4..e9fbd94f8635 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -52,8 +52,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->connection.displays[i].adjust.disable) {
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->displays[i].adjust.disable) {
is_auth_needed = 1;
break;
}
@@ -61,7 +61,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
is_auth_needed &&
- !hdcp->connection.link.adjust.hdcp1.disable;
+ !hdcp->connection.link.adjust.hdcp1.disable &&
+ !hdcp->connection.is_hdcp1_revoked;
}
static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
@@ -72,8 +73,8 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->connection.displays[i].adjust.disable) {
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->displays[i].adjust.disable) {
is_auth_needed = 1;
break;
}
@@ -103,8 +104,6 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- /* update topology event if hdcp is not desired */
- status = mod_hdcp_add_display_topology(hdcp);
} else if (is_in_hdcp1_states(hdcp)) {
status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
} else if (is_in_hdcp1_dp_states(hdcp)) {
@@ -115,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
} else if (is_in_hdcp2_dp_states(hdcp)) {
status = mod_hdcp_hdcp2_dp_execution(hdcp,
event_ctx, &input->hdcp2);
+ } else {
+ event_ctx->unexpected_event = 1;
+ goto out;
}
out:
return status;
@@ -191,14 +193,7 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
mod_hdcp_hdcp1_destroy_session(hdcp);
}
- if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
- }
+
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -212,25 +207,12 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
goto out;
}
}
- if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
- }
+
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
set_state_id(hdcp, output, HDCP_INITIALIZED);
} else if (is_in_cp_not_desired_state(hdcp)) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -337,16 +319,20 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- /* add display to connection */
- hdcp->connection.link = *link;
- *display_container = *display;
-
/* reset retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+ status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
/* request authentication */
if (current_state(hdcp) != HDCP_INITIALIZED)
set_state_id(hdcp, output, HDCP_INITIALIZED);
@@ -379,17 +365,20 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- /* remove display */
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
-
/* clear retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
- /* request authentication for remaining displays*/
- if (get_active_display_count(hdcp) > 0)
+ /* remove display */
+ status = mod_hdcp_remove_display_from_topology(hdcp, index);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+ memset(display, 0, sizeof(struct mod_hdcp_display));
+
+ /* request authentication when connection is not reset */
+ if (current_state(hdcp) != HDCP_UNINITIALIZED)
callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
output);
out:
@@ -496,10 +485,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
- mode = MOD_HDCP_MODE_DP;
- break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
- mode = MOD_HDCP_MODE_DP_MST;
+ mode = MOD_HDCP_MODE_DP;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index af78e4f1be68..60ff1a0028ac 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -41,7 +41,6 @@ enum mod_hdcp_trans_input_result {
struct mod_hdcp_transition_input_hdcp1 {
uint8_t bksv_read;
uint8_t bksv_validation;
- uint8_t add_topology;
uint8_t create_session;
uint8_t an_write;
uint8_t aksv_write;
@@ -71,7 +70,6 @@ struct mod_hdcp_transition_input_hdcp1 {
struct mod_hdcp_transition_input_hdcp2 {
uint8_t hdcp2version_read;
uint8_t hdcp2_capable_check;
- uint8_t add_topology;
uint8_t create_session;
uint8_t ake_init_prepare;
uint8_t ake_init_write;
@@ -167,9 +165,9 @@ struct mod_hdcp_auth_counters {
/* contains values per connection */
struct mod_hdcp_connection {
struct mod_hdcp_link link;
- struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
uint8_t is_repeater;
uint8_t is_km_stored;
+ uint8_t is_hdcp1_revoked;
uint8_t is_hdcp2_revoked;
struct mod_hdcp_trace trace;
uint8_t hdcp1_retry_count;
@@ -202,6 +200,8 @@ struct mod_hdcp {
struct mod_hdcp_config config;
/* per connection */
struct mod_hdcp_connection connection;
+ /* per displays */
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
/* per authentication attempt */
struct mod_hdcp_authentication auth;
/* per state in an authentication */
@@ -327,10 +327,10 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* TODO: add adjustment log */
/* psp functions */
-enum mod_hdcp_status mod_hdcp_add_display_topology(
- struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_remove_display_topology(
- struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+ struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
@@ -392,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
{
- return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
- hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP);
}
static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
{
- return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
+ hdcp->connection.link.dp.mst_supported);
}
static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
@@ -503,11 +503,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
}
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
- return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
{
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,35 +510,24 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
{
- uint8_t added_count = 0;
- uint8_t i;
-
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_active(&hdcp->connection.displays[i]))
- added_count++;
- return added_count;
-}
-
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
- uint8_t added_count = 0;
+ uint8_t active_count = 0;
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->connection.displays[i]))
- added_count++;
- return added_count;
+ if (is_display_active(&hdcp->displays[i]))
+ active_count++;
+ return active_count;
}
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
struct mod_hdcp *hdcp)
{
uint8_t i;
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
@@ -556,9 +540,9 @@ static inline struct mod_hdcp_display *get_active_display_at_index(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (hdcp->connection.displays[i].index == index &&
- is_display_active(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (hdcp->displays[i].index == index &&
+ is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
@@ -571,8 +555,8 @@ static inline struct mod_hdcp_display *get_empty_display_container(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (!is_display_active(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (!is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 37670db64855..f244b72e74e0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
@@ -168,10 +168,6 @@ static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
- &input->add_topology, &status,
- hdcp, "add_topology"))
- goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
&input->create_session, &status,
hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 76edcbe51f71..f3711914364e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -46,8 +46,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
break;
case H1_A1_EXCHANGE_KSVS:
- if (input->add_topology != PASS ||
- input->create_session != PASS) {
+ if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
@@ -173,8 +172,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
break;
case D1_A1_EXCHANGE_KSVS:
- if (input->add_topology != PASS ||
- input->create_session != PASS) {
+ if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
@@ -210,7 +208,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->rx_validation != PASS) {
- if (hdcp->state.stay_count < 2) {
+ if (hdcp->state.stay_count < 2 &&
+ !hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
@@ -231,6 +230,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
(!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
+ } else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
}
if (conn->is_repeater) {
set_watchdog_in_ms(hdcp, 5000, output);
@@ -290,7 +292,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->ksvlist_vp_validation != PASS) {
- if (hdcp->state.stay_count < 2) {
+ if (hdcp->state.stay_count < 2 &&
+ !hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 55246711700b..549c113abcf7 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -34,7 +34,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
if (is_dp_hdcp(hdcp))
is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
else
- is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) &&
+ is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) &&
(HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
return is_ready ? MOD_HDCP_STATUS_SUCCESS :
@@ -67,7 +67,7 @@ static inline enum mod_hdcp_status check_reauthentication_request(
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
else
- ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ?
+ ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[1]) ?
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
return ret;
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
@@ -259,6 +259,7 @@ static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
+
if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
&input->hdcp2version_read, &status,
hdcp, "hdcp2version_read"))
@@ -281,10 +282,7 @@ static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
- &input->add_topology, &status,
- hdcp, "add_topology"))
- goto out;
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
&input->create_session, &status,
hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index 8cae3e3aacd5..e738c7ae66ec 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -47,8 +47,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
}
break;
case H2_A1_SEND_AKE_INIT:
- if (input->add_topology != PASS ||
- input->create_session != PASS ||
+ if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
@@ -389,8 +388,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
}
break;
case D2_A1_SEND_AKE_INIT:
- if (input->add_topology != PASS ||
- input->create_session != PASS ||
+ if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index ff9d54812e62..bb5130f4228d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id {
MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
@@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = {
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
@@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = {
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
@@ -405,7 +408,7 @@ enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
+ hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
hdcp->auth.msg.hdcp2.ake_cert+1,
sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
@@ -423,7 +426,7 @@ enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
+ hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
hdcp->auth.msg.hdcp2.ake_h_prime+1,
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
@@ -441,7 +444,7 @@ enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
+ hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
hdcp->auth.msg.hdcp2.ake_pairing_info+1,
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
@@ -459,7 +462,7 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
+ hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
hdcp->auth.msg.hdcp2.lc_l_prime+1,
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
@@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
{
- enum mod_hdcp_status status;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
- status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
- hdcp->auth.msg.hdcp2.rx_id_list+1,
- sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
+ uint32_t device_count = 0;
+ uint32_t rx_id_list_size = 0;
+ uint32_t bytes_read = 0;
+ hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ hdcp->auth.msg.hdcp2.rx_id_list+1,
+ HDCP_MAX_AUX_TRANSACTION_SIZE);
+ if (status == MOD_HDCP_STATUS_SUCCESS) {
+ bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE;
+ device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+ (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+ rx_id_list_size = MIN((21 + 5 * device_count),
+ (sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1));
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
+ hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read,
+ (rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE);
+ }
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
hdcp->auth.msg.hdcp2.rx_id_list,
@@ -495,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 724ebcee9a19..44956f9ba178 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -90,10 +90,14 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index ff91373ebada..d3192b9d0c3d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -37,10 +37,11 @@
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
HDCP_LOG_ERR(hdcp, \
- "[Link %d] WARNING %s IN STATE %s", \
+ "[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \
hdcp->config.index, \
mod_hdcp_status_to_str(status), \
- mod_hdcp_state_id_to_str(hdcp->state.id))
+ mod_hdcp_state_id_to_str(hdcp->state.id), \
+ hdcp->state.stay_count)
#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 1.4 enabled on display %d", \
@@ -49,6 +50,15 @@
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 2.2 enabled on display %d", \
hdcp->config.index, displayIndex)
+#define HDCP_HDCP1_DISABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 disabled on display %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_HDCP2_DISABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 2.2 disabled on display %d", \
+ hdcp->config.index, displayIndex)
+
/* state machine logs */
#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
HDCP_LOG_FSM(hdcp, \
@@ -102,6 +112,9 @@
sizeof(hdcp->auth.msg.hdcp1.bksv)); \
HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
+ sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
sizeof(hdcp->auth.msg.hdcp1.an)); \
HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 7911dc157d5a..836e47954938 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -44,84 +44,78 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp,
in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg3_desc.msg_size = 0;
}
-enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
-{
-
- struct psp_context *psp = hdcp->config.psp.handle;
- struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display = NULL;
- uint8_t i;
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+ struct mod_hdcp *hdcp, uint8_t index)
+ {
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display =
+ get_active_display_at_index(hdcp, index);
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (is_display_added(&(hdcp->connection.displays[i]))) {
+ if (!display || !is_display_active(display))
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
- memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
- display = &hdcp->connection.displays[i];
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
- dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
- dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
- dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
- dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+ }
- display->state = MOD_HDCP_DISPLAY_ACTIVE;
- HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
- }
- }
-
- return MOD_HDCP_STATUS_SUCCESS;
-}
-
-enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display = NULL;
struct mod_hdcp_link *link = &hdcp->connection.link;
- uint8_t i;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
- display = &hdcp->connection.displays[i];
-
- memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
-
- dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
- dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
- dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
- dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
- dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
- dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
- dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
- dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
- dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
- TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
- dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
-
- psp_dtm_invoke(psp, dtm_cmd->cmd_id);
-
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
- display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
- }
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ if (is_dp_hdcp(hdcp))
+ dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported;
+
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
}
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -129,7 +123,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.hdcp_initialized) {
@@ -164,6 +158,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ uint8_t i = 0;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -177,6 +172,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(
+ &hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP1_DISABLED_TRACE(hdcp,
+ hdcp->displays[i].index);
+ }
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -210,6 +213,10 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
hdcp->connection.is_repeater = 0;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+ hdcp->connection.is_hdcp1_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
} else
return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
@@ -221,7 +228,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -245,6 +252,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -264,10 +272,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+ hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+ hdcp->connection.is_hdcp1_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -281,14 +298,13 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->connection.displays[i].adjust.disable)
+ if (hdcp->displays[i].adjust.disable)
continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
- hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -296,8 +312,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
- hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
- HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
return MOD_HDCP_STATUS_SUCCESS;
@@ -344,7 +360,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
@@ -385,6 +401,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ uint8_t i = 0;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -398,6 +415,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(
+ &hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP2_DISABLED_TRACE(hdcp,
+ hdcp->displays[i].index);
+ }
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -473,9 +498,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
return MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
}
- return MOD_HDCP_STATUS_FAILURE;
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -630,20 +658,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
- msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
-
- hdcp2_message_init(hdcp, msg_in);
-
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
- hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -695,6 +718,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
return MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
}
@@ -717,10 +743,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->connection.displays[i].adjust.disable)
+ if (hdcp->displays[i].adjust.disable)
continue;
- hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
@@ -729,8 +754,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
break;
- hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
- HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 82a5e997d573..1a663dbbf810 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -117,6 +117,8 @@ struct ta_dtm_shared_memory {
int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
uint64_t fence_mc_addr);
+enum { PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE = 5120 };
+
enum ta_hdcp_command {
TA_HDCP_COMMAND__INITIALIZE,
TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
@@ -134,7 +136,10 @@ enum ta_hdcp_command {
TA_HDCP_COMMAND__UNUSED_3,
TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2,
TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2,
- TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION
+ TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP_DESTROY_ALL_SESSIONS,
+ TA_HDCP_COMMAND__HDCP_SET_SRM,
+ TA_HDCP_COMMAND__HDCP_GET_SRM
};
enum ta_hdcp2_msg_id {
@@ -235,7 +240,8 @@ enum ta_hdcp_authentication_status {
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06,
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07,
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,
- TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED = 0x0A
};
enum ta_hdcp2_msg_authentication_status {
@@ -253,7 +259,8 @@ enum ta_hdcp2_msg_authentication_status {
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM,
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE,
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH,
- TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED
};
enum ta_hdcp_content_type {
@@ -415,6 +422,22 @@ struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input {
uint32_t display_handle;
};
+struct ta_hdcp_cmd_set_srm_input {
+ uint32_t srm_buf_size;
+ uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_set_srm_output {
+ uint8_t valid_signature;
+ uint32_t srm_version;
+};
+
+struct ta_hdcp_cmd_get_srm_output {
+ uint32_t srm_version;
+ uint32_t srm_buf_size;
+ uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
/**********************************************************/
/* Common input structure for HDCP callbacks */
union ta_hdcp_cmd_input {
@@ -432,6 +455,7 @@ union ta_hdcp_cmd_input {
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2
hdcp2_prepare_process_authentication_message_v2;
struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_set_srm_input hdcp_set_srm;
};
/* Common output structure for HDCP callbacks */
@@ -444,6 +468,8 @@ union ta_hdcp_cmd_output {
struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2
hdcp2_prepare_process_authentication_message_v2;
+ struct ta_hdcp_cmd_set_srm_output hdcp_set_srm;
+ struct ta_hdcp_cmd_get_srm_output hdcp_get_srm;
};
/**********************************************************/
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f2a0e1a064da..eae9309cfb24 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -56,8 +56,10 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED,
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
@@ -100,6 +102,7 @@ enum mod_hdcp_status {
struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_supported;
+ uint8_t mst_supported;
};
struct mod_hdcp_hdmi {
@@ -108,14 +111,12 @@ struct mod_hdcp_hdmi {
enum mod_hdcp_operation_mode {
MOD_HDCP_MODE_OFF,
MOD_HDCP_MODE_DEFAULT,
- MOD_HDCP_MODE_DP,
- MOD_HDCP_MODE_DP_MST
+ MOD_HDCP_MODE_DP
};
enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_INACTIVE = 0,
MOD_HDCP_DISPLAY_ACTIVE,
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
@@ -155,7 +156,8 @@ struct mod_hdcp_display_adjustment {
struct mod_hdcp_link_adjustment_hdcp1 {
uint8_t disable : 1;
uint8_t postpone_encryption : 1;
- uint8_t reserved : 6;
+ uint8_t min_auth_retries_wa : 1;
+ uint8_t reserved : 5;
};
enum mod_hdcp_force_hdcp_type {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 42cbeffac640..13c57ff2abdc 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -34,8 +34,7 @@ struct dc_info_packet;
struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet,
- bool *use_vsc_sdp_for_colorimetry);
+ struct dc_info_packet *info_packet);
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 6a8a056424b8..cff3ab15fc0c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -130,8 +130,7 @@ enum ColorimetryYCCDP {
};
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet,
- bool *use_vsc_sdp_for_colorimetry)
+ struct dc_info_packet *info_packet)
{
unsigned int vsc_packet_revision = vsc_packet_undefined;
unsigned int i;
@@ -139,11 +138,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
unsigned int colorimetryFormat = 0;
bool stereo3dSupport = false;
- /* Initialize first, later if infopacket is valid determine if VSC SDP
- * should be used to signal colorimetry format and pixel encoding.
- */
- *use_vsc_sdp_for_colorimetry = false;
-
if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
vsc_packet_revision = vsc_packet_rev1;
stereo3dSupport = true;
@@ -153,9 +147,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
if (stream->psr_version != 0)
vsc_packet_revision = vsc_packet_rev2;
- /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
- if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+ /* Update to revision 5 for extended colorimetry support */
+ if (stream->use_vsc_sdp_for_colorimetry)
vsc_packet_revision = vsc_packet_rev5;
/* VSC packet not needed based on the features
@@ -269,13 +262,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
- /* If we are using VSC SDP revision 05h, use this to signal for
- * colorimetry format and pixel encoding. HW should later be
- * programmed to set MSA MISC1 bit 6 to indicate ignore
- * colorimetry format and pixel encoding in the MSA.
- */
- *use_vsc_sdp_for_colorimetry = true;
-
/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
* Data Bytes DB 18~16
* Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
index f0a153704f6e..00f132f8ad55 100644
--- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
+++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
@@ -40,14 +40,18 @@ struct core_vmid {
static void add_ptb_to_table(struct core_vmid *core_vmid, unsigned int vmid, uint64_t ptb)
{
- core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
- core_vmid->num_vmids_available--;
+ if (vmid < MAX_VMID) {
+ core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
+ core_vmid->num_vmids_available--;
+ }
}
static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned int vmid)
{
- core_vmid->ptb_assigned_to_vmid[vmid] = 0;
- core_vmid->num_vmids_available++;
+ if (vmid < MAX_VMID) {
+ core_vmid->ptb_assigned_to_vmid[vmid] = 0;
+ core_vmid->num_vmids_available++;
+ }
}
static void evict_vmids(struct core_vmid *core_vmid)
@@ -57,7 +61,7 @@ static void evict_vmids(struct core_vmid *core_vmid)
// At this point any positions with value 0 are unused vmids, evict them
for (i = 1; i < core_vmid->num_vmid; i++) {
- if (ord & (1u << i))
+ if (!(ord & (1u << i)))
clear_entry_from_vmid_table(core_vmid, i);
}
}
@@ -91,7 +95,7 @@ static int get_next_available_vmid(struct core_vmid *core_vmid)
uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
{
struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid);
- unsigned int vmid = 0;
+ int vmid = 0;
// Physical address gets vmid 0
if (ptb == 0)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
new file mode 100644
index 000000000000..82b6cc25205e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SH_MASK_HEADER
+#define _wafl2_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
new file mode 100644
index 000000000000..4a51a90c611a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SMN_HEADER
+#define _wafl2_4_0_0_SMN_HEADER
+
+#define smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS 0x11cf0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
new file mode 100644
index 000000000000..f37712f05b03
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SH_MASK_HEADER
+#define _xgmi_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX16_PCS_ERROR_STATUS
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
new file mode 100644
index 000000000000..6ccbac4ce87e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SMN_HEADER
+#define _xgmi_4_0_0_SMN_HEADER
+
+#define smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS 0x11af0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index a607b1034962..a3c238c39ef5 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources {
uint32_t num_queue_per_pipe;
/* Bit n == 1 means Queue n is available for KFD */
- DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
+ DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
/* SDMA doorbell assignments (SOC15 and later chips only). Only
* specific doorbells are routed to each SDMA engine. Others
@@ -151,6 +151,7 @@ struct kgd2kfd_shared_resources {
/* Minor device number of the render node */
int drm_render_minor;
+
};
struct tile_config {
@@ -166,27 +167,6 @@ struct tile_config {
#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
-/*
- * Allocation flag domains
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
-#define ALLOC_MEM_FLAGS_GTT (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
-#define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
-
-/*
- * Allocation flags attributes/access options.
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
-#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
-#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
-#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
-#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
-#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
-
/**
* struct kfd2kgd_calls
*
@@ -222,8 +202,6 @@ struct tile_config {
* @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
* Only used for no cp scheduling mode
*
- * @get_tile_config: Returns GPU-specific tiling mode information
- *
* @set_vm_context_page_table_base: Program page table base for a VMID
*
* @invalidate_tlbs: Invalidate TLBs for a specific PASID
@@ -236,6 +214,8 @@ struct tile_config {
*
* @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
*
+ * @get_unique_id: Returns uuid id of current device
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -307,12 +287,11 @@ struct kfd2kgd_calls {
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
- int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
-
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
+ uint64_t (*get_unique_id)(struct kgd_dev *kgd);
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 96e81c7bc266..e8b27fab6aa1 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -23,15 +23,12 @@
#include <linux/firmware.h>
#include <linux/pci.h>
-#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu_v12_0.h"
#include "atom.h"
-#include "amd_pcie.h"
#include "vega20_ppt.h"
#include "arcturus_ppt.h"
#include "navi10_ppt.h"
@@ -121,20 +118,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
if (enabled) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
+ feature_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
+ feature_high, NULL);
if (ret)
return ret;
} else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
+ feature_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
+ feature_high, NULL);
if (ret)
return ret;
}
@@ -195,21 +192,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
return -EINVAL;
if (if_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, if_version);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret)
return ret;
}
if (smu_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, smu_version);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret)
return ret;
}
@@ -218,17 +207,19 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
}
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+ uint32_t min, uint32_t max, bool lock_needed)
{
int ret = 0;
- if (min < 0 && max < 0)
- return -EINVAL;
-
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -251,7 +242,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -259,7 +250,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -335,12 +326,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
- ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
- param);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, &param);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
+ param, &param);
if (ret)
return ret;
@@ -542,7 +529,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
- table_id | ((argument & 0xFFFF) << 16));
+ table_id | ((argument & 0xFFFF) << 16),
+ NULL);
if (ret)
return ret;
@@ -900,6 +888,7 @@ static int smu_sw_init(void *handle)
mutex_init(&smu->sensor_lock);
mutex_init(&smu->metrics_lock);
+ mutex_init(&smu->message_lock);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -943,6 +932,13 @@ static int smu_sw_init(void *handle)
return ret;
}
+ if (adev->smu.ppt_funcs->i2c_eeprom_init) {
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -952,6 +948,9 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
+ if (adev->smu.ppt_funcs->i2c_eeprom_fini)
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
kfree(smu->irq_source);
smu->irq_source = NULL;
@@ -1113,12 +1112,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ ret = smu_set_driver_table_location(smu);
+ if (ret)
+ return ret;
+
/* smu_dump_pptable(smu); */
if (!amdgpu_sriov_vf(adev)) {
- ret = smu_set_driver_table_location(smu);
- if (ret)
- return ret;
-
/*
* Copy pptable bo in the vram to smc with SMU MSGs such as
* SetDriverDramAddr and TransferTableDram2Smu.
@@ -1155,6 +1154,21 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
}
}
}
+
+ if (smu->ppt_funcs->set_power_source) {
+ /*
+ * For Navi1X, manually switch it to AC mode as PMFW
+ * may boot it with DC mode.
+ */
+ if (adev->pm.ac_power)
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ else
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+ if (ret) {
+ pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
+ return ret;
+ }
+ }
}
if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_notify_display_change(smu);
@@ -1454,29 +1468,84 @@ int smu_reset(struct smu_context *smu)
return ret;
}
+static int smu_disable_dpm(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t smu_version;
+ int ret = 0;
+ bool use_baco = !smu->is_apu &&
+ ((adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+ (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version.\n");
+ return ret;
+ }
+
+ /*
+ * Disable all enabled SMU features.
+ * This should be handled in SMU FW, as a backup
+ * driver can issue call to SMU FW until sequence
+ * in SMU FW is operational.
+ */
+ ret = smu_system_features_control(smu, false);
+ if (ret) {
+ pr_err("Failed to disable smu features.\n");
+ return ret;
+ }
+
+ /*
+ * Arcturus does not have BACO bit in disable feature mask.
+ * Enablement of BACO bit on Arcturus should be skipped.
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ if (use_baco && (smu_version > 0x360e00))
+ return 0;
+ }
+
+ /* For baco, need to leave BACO feature enabled */
+ if (use_baco) {
+ /*
+ * Correct the way for checking whether SMU_FEATURE_BACO_BIT
+ * is supported.
+ *
+ * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
+ * always return false as the 'smu_system_features_control(smu, false)'
+ * was just issued above which disabled all SMU features.
+ *
+ * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
+ * now for the checking.
+ */
+ if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
+ if (ret) {
+ pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
static int smu_suspend(void *handle)
{
- int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = false;
+ int ret;
- if (!smu->pm_enabled)
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if(!smu->is_apu)
- baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
-
- ret = smu_system_features_control(smu, false);
- if (ret)
- return ret;
+ if (!smu->pm_enabled)
+ return 0;
- if (baco_feature_is_enabled) {
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
- if (ret) {
- pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ if(!amdgpu_sriov_vf(adev)) {
+ ret = smu_disable_dpm(smu);
+ if (ret)
return ret;
- }
}
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
@@ -1942,7 +2011,7 @@ int smu_set_mp1_state(struct smu_context *smu,
return 0;
}
- ret = smu_send_smc_msg(smu, msg);
+ ret = smu_send_smc_msg(smu, msg, NULL);
if (ret)
pr_err("[PrepareMp1] Failed!\n");
@@ -2018,6 +2087,29 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
return 0;
}
+int smu_set_ac_dc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ /* controlled by firmware */
+ if (smu->dc_controlled_by_gpio)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->set_power_source) {
+ if (smu->adev->pm.ac_power)
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ else
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+ if (ret)
+ pr_err("Failed to switch to %s mode!\n",
+ smu->adev->pm.ac_power ? "AC" : "DC");
+ }
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
@@ -2620,12 +2712,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
return ret;
}
-
-int smu_send_smc_msg(struct smu_context *smu,
- enum smu_message_type msg)
-{
- int ret;
-
- ret = smu_send_smc_msg_with_param(smu, msg, 0);
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 14ba6aa876e2..c6d3bef15320 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -42,7 +41,7 @@
#include <linux/pci.h>
#include "amdgpu_ras.h"
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
@@ -127,6 +126,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(WaflTest, PPSMC_MSG_WaflTest),
MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -373,13 +373,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ &num_of_levels);
if (ret) {
pr_err("[%s] failed to get dpm levels!\n", __func__);
return ret;
}
- smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) {
pr_err("[%s] number of clk levels is invalid!\n", __func__);
return -EINVAL;
@@ -389,12 +389,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | i));
+ (clk_id << 16 | i),
+ &clk);
if (ret) {
pr_err("[%s] failed to get dpm freq by index!\n", __func__);
return ret;
}
- smu_read_smc_arg(smu, &clk);
if (!clk) {
pr_err("[%s] clk value is invalid!\n", __func__);
return -EINVAL;
@@ -552,13 +552,13 @@ static int arcturus_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
if (ret) {
pr_err("RunAfllBtc failed!\n");
return ret;
}
- return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -743,7 +743,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
@@ -758,7 +759,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_UCLK << 16) | (freq & 0xffff));
+ (PPCLK_UCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min");
@@ -773,7 +775,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min");
@@ -1288,12 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- power_src << 16);
+ power_src << 16, &asic_default_power_limit);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
}
- smu_read_smc_arg(smu, &asic_default_power_limit);
} else {
/* the last hope to figure out the ppt limit */
if (!pptable) {
@@ -1497,7 +1499,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (ret) {
pr_err("Fail to set workload type %d\n", workload_type);
return ret;
@@ -2187,7 +2190,7 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &arcturus_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
res = i2c_add_adapter(control);
if (res)
@@ -2214,6 +2217,27 @@ static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
+static int arcturus_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported by 54.15.0 and onwards */
+ if (smu_version < 0x360F00) {
+ pr_err("DFCstateControl is only supported by PMFW 54.15.0 and onwards\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2277,7 +2301,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -2307,6 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
+ .set_df_cstate = arcturus_set_df_cstate,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index bf04cfefb283..7740488999df 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
switch (sources) {
default:
pr_err("Unknown throttling event sources.");
- /* fall through */
+ fallthrough;
case 0:
protection = false;
/* src is unused */
@@ -3698,12 +3698,12 @@ static int smu7_request_link_speed_change_before_state_change(
data->force_pcie_gen = PP_PCIEGen2;
if (current_link_speed == PP_PCIEGen2)
break;
- /* fall through */
+ fallthrough;
case PP_PCIEGen2:
if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
break;
+ fallthrough;
#endif
- /* fall through */
default:
data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 92a65e3daff4..f29f95be1e56 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3382,7 +3382,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK | DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
@@ -3390,7 +3390,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
result = vega10_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3b3ec5666051..08b6ba39a6d7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -487,15 +487,16 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
int ret = 0;
+ bool use_baco = (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+ (adev->in_runpm && amdgpu_asic_supports_baco(adev));
ret = vega20_init_sclk_threshold(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to init sclk threshold!",
return ret);
- if (adev->in_gpu_reset &&
- (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
-
+ if (use_baco) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
pr_err("Failed to apply vega20 baco workaround!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 97b6714e83e6..ae2c318dd6fa 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -362,6 +362,7 @@ struct smu_context
struct mutex mutex;
struct mutex sensor_lock;
struct mutex metrics_lock;
+ struct mutex message_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
@@ -371,6 +372,9 @@ struct smu_context
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
void *od_settings;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_sclk;
+#endif
uint32_t pstate_sclk;
uint32_t pstate_mclk;
@@ -404,6 +408,7 @@ struct smu_context
uint32_t smc_if_version;
bool uploading_custom_pp_table;
+ bool dc_controlled_by_gpio;
};
struct i2c_adapter;
@@ -514,8 +519,7 @@ struct pptable_funcs {
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg_with_param)(struct smu_context *smu,
- enum smu_message_type msg, uint32_t param);
- int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
+ enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu);
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
@@ -567,6 +571,7 @@ struct pptable_funcs {
int (*override_pcie_parameters)(struct smu_context *smu);
uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
+ int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
};
int smu_load_microcode(struct smu_context *smu);
@@ -707,7 +712,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool lock_needed);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -715,6 +720,7 @@ int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
+int smu_set_ac_dc(struct smu_context *smu);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index e3291259b249..f736d773f9d6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -110,7 +110,11 @@
//Others
#define PPSMC_MSG_SetMemoryChannelEnable 0x39
-#define PPSMC_Message_Count 0x3A
+//OOB
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A
+
+#define PPSMC_MSG_DFCstateControl 0x3B
+#define PPSMC_Message_Count 0x3C
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index 822cd8b5bf90..cea65093b6ad 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -37,7 +37,7 @@
#define PP_ASSERT_WITH_CODE(cond, msg, code) \
do { \
if (!(cond)) { \
- pr_warn("%s\n", msg); \
+ pr_warn_ratelimited("%s\n", msg); \
code; \
} \
} while (0)
@@ -45,7 +45,7 @@
#define PP_ASSERT(cond, msg) \
do { \
if (!(cond)) { \
- pr_warn("%s\n", msg); \
+ pr_warn_ratelimited("%s\n", msg); \
} \
} while (0)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
index ac0120e384be..4b2da98afcd2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
@@ -701,7 +701,8 @@ typedef struct {
// APCC Settings
uint16_t PccThresholdLow;
uint16_t PccThresholdHigh;
- uint32_t PaddingAPCC[6]; //FIXME pending SPEC
+ uint32_t MGpuFanBoostLimitRpm;
+ uint32_t PaddingAPCC[5];
// Temperature Dependent Vmin
uint16_t VDDGFX_TVmin; //Celcius
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index d5314d12628a..674e426ed59b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -28,8 +28,9 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x33
-#define SMU11_DRIVER_IF_VERSION_NV14 0x34
+#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_NV12 0x33
+#define SMU11_DRIVER_IF_VERSION_NV14 0x36
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -182,9 +183,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param);
-
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+ uint32_t param,
+ uint32_t *read_arg);
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
@@ -267,4 +267,7 @@ uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
int smu_v11_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
+int smu_v11_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index d79e54b5ebf6..7fbebc1979cf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -40,14 +40,13 @@ struct smu_12_0_cmn2aisc_mapping {
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg);
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
-
int smu_v12_0_wait_for_response(struct smu_context *smu);
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param);
+ uint32_t param,
+ uint32_t *read_arg);
int smu_v12_0_check_fw_status(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index aed4d6e60907..9c60b38ab53a 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include <linux/pci.h>
#include "amdgpu.h"
@@ -31,7 +30,6 @@
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_navi10.h"
-#include "soc15_common.h"
#include "atom.h"
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
@@ -349,7 +347,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
- | FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
| FEATURE_MASK(FEATURE_FW_CTF_BIT)
@@ -393,6 +390,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+ if (smu->dc_controlled_by_gpio)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
if (is_asic_secure(smu)) {
/* only for navi10 A0 */
@@ -527,6 +527,9 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
+ if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
mutex_lock(&smu_baco->mutex);
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
@@ -661,14 +664,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -686,14 +689,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
if (ret)
return ret;
}
@@ -970,7 +973,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return size;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return size;
break;
@@ -1042,7 +1045,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
int ret = 0;
uint32_t max_freq = 0;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret)
return ret;
@@ -1066,7 +1069,8 @@ static int navi10_display_config_changed(struct smu_context *smu)
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
- smu->display_config->num_display);
+ smu->display_config->num_display,
+ NULL);
if (ret)
return ret;
}
@@ -1093,7 +1097,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -1119,7 +1123,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -1391,7 +1395,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
if (workload_type < 0)
return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type, NULL);
return ret;
}
@@ -1456,7 +1460,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcef_clock_in_sr/100);
+ min_clocks.dcef_clock_in_sr/100,
+ NULL);
if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret;
@@ -1678,10 +1683,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu)
return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -1746,10 +1751,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -1859,12 +1864,11 @@ static int navi10_get_power_limit(struct smu_context *smu,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- power_src << 16);
+ power_src << 16, &asic_default_power_limit);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
}
- smu_read_smc_arg(smu, &asic_default_power_limit);
} else {
/* the last hope to figure out the ppt limit */
if (!pptable) {
@@ -1904,7 +1908,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
+ smu_pcie_arg,
+ NULL);
if (ret)
return ret;
@@ -1950,13 +1955,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetVoltageByDpm,
- param);
+ param,
+ &value);
if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret;
}
- smu_read_smc_arg(smu, &value);
*voltage = (uint16_t)value;
return 0;
@@ -2213,7 +2218,7 @@ static int navi10_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
if (ret)
pr_err("RunBtc failed!\n");
@@ -2225,9 +2230,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
int result = 0;
if (!enable)
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
else
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
return result;
}
@@ -2336,7 +2341,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -2370,6 +2374,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_pptable_power_limit = navi10_get_pptable_power_limit,
.run_btc = navi10_run_btc,
.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
+ .set_power_source = smu_v11_0_set_power_source,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 3ad0f4aa3aa3..7bf52ecba01d 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -24,7 +24,6 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "soc15_common.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
#include "smu_v12_0.h"
@@ -342,14 +341,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -367,14 +366,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret)
return ret;
}
@@ -423,7 +422,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -456,7 +455,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -622,22 +621,24 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
soft_max_level == 0 ? min_freq :
- soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
+ NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
soft_min_level == 2 ? max_freq :
- soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
+ NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -645,10 +646,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -672,14 +673,19 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0) {
- pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ /*
+ * TODO: If some case need switch to powersave/default power mode
+ * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
+ */
+ pr_err_once("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
return -EINVAL;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (ret) {
- pr_err("Fail to set workload type %d\n", workload_type);
+ pr_err_once("Fail to set workload type %d\n", workload_type);
return ret;
}
@@ -697,7 +703,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
@@ -705,7 +711,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -910,7 +916,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.powergate_vcn = smu_v12_0_powergate_vcn,
.powergate_jpeg = smu_v12_0_powergate_jpeg,
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
- .read_smc_arg = smu_v12_0_read_arg,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
.gfx_off_control = smu_v12_0_gfx_off_control,
.init_smc_tables = smu_v12_0_init_smc_tables,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 7bd200ffcda8..40c35bcc5a0a 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -79,12 +79,13 @@
#define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
+#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0)
+
+static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) {
+ return smu_send_smc_msg_with_param(smu, msg, 0, read_arg);
+}
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
#define smu_alloc_dpm_context(smu) \
((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
#define smu_init_display_count(smu, count) \
@@ -210,4 +211,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
#define smu_disable_umc_cdr_12gbps_workaround(smu) \
((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
+#define smu_set_power_source(smu, power_src) \
+ ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c9e5ce135fd4..d19e1d0d56c0 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,7 +26,6 @@
#define SMU_11_0_PARTIAL_PPTABLE
-#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
@@ -64,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -92,7 +91,8 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param)
+ uint32_t param,
+ uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -101,11 +101,12 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
if (index < 0)
return index;
+ mutex_lock(&smu->message_lock);
ret = smu_v11_0_wait_for_response(smu);
if (ret) {
pr_err("Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
- return ret;
+ goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -115,10 +116,21 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v11_0_wait_for_response(smu);
- if (ret)
+ if (ret) {
pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
smu_get_message_name(smu, msg), index, param, ret);
-
+ goto out;
+ }
+ if (read_arg) {
+ ret = smu_v11_0_read_arg(smu, read_arg);
+ if (ret) {
+ pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param, ret);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&smu->message_lock);
return ret;
}
@@ -262,6 +274,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
case CHIP_NAVI10:
smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
+ case CHIP_NAVI12:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ break;
case CHIP_NAVI14:
smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
@@ -671,12 +686,14 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh,
- address_high);
+ address_high,
+ NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow,
- address_low);
+ address_low,
+ NULL);
if (ret)
return ret;
@@ -685,15 +702,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_low = (uint32_t)lower_32_bits(address);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
- address_high);
+ address_high, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
- address_low);
+ address_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
- (uint32_t)memory_pool->size);
+ (uint32_t)memory_pool->size, NULL);
if (ret)
return ret;
@@ -757,7 +774,7 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
int ret;
ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk);
+ SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
@@ -784,11 +801,13 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address));
+ upper_32_bits(driver_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address));
+ lower_32_bits(driver_table->mc_address),
+ NULL);
}
return ret;
@@ -802,11 +821,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
- upper_32_bits(tool_table->mc_address));
+ upper_32_bits(tool_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
- lower_32_bits(tool_table->mc_address));
+ lower_32_bits(tool_table->mc_address),
+ NULL);
}
return ret;
@@ -819,7 +840,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
if (!smu->pm_enabled)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
return ret;
}
@@ -837,12 +858,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
- feature_mask[1]);
+ feature_mask[1], NULL);
if (ret)
goto failed;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
- feature_mask[0]);
+ feature_mask[0], NULL);
if (ret)
goto failed;
@@ -862,17 +883,11 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
return -EINVAL;
if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
@@ -894,7 +909,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int ret = 0;
ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures));
+ SMU_MSG_DisableAllSmuFeatures), NULL);
if (ret)
return ret;
@@ -923,7 +938,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
return ret;
}
@@ -947,30 +962,24 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
- clk_id << 16);
+ clk_id << 16, clock);
if (ret) {
pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
}
- ret = smu_read_smc_arg(smu, clock);
- if (ret)
- return ret;
-
if (*clock != 0)
return 0;
/* if DC limit is zero, return AC limit */
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
- clk_id << 16);
+ clk_id << 16, clock);
if (ret) {
pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
}
- ret = smu_read_smc_arg(smu, clock);
-
- return ret;
+ return 0;
}
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
@@ -1106,7 +1115,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return -EOPNOTSUPP;
}
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
pr_err("[%s] Set power limit Failed!\n", __func__);
return ret;
@@ -1136,11 +1145,7 @@ int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
- (asic_clk_id << 16));
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, &freq);
+ (asic_clk_id << 16), &freq);
if (ret)
return ret;
}
@@ -1375,9 +1380,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
else
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
break;
default:
break;
@@ -1515,10 +1520,18 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
int ret = 0;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+ NULL);
return ret;
}
+static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
+}
+
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
@@ -1552,6 +1565,9 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
break;
}
+ } else if (client_id == SOC15_IH_CLIENTID_MP1) {
+ if (src_id == 0xfe)
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
}
return 0;
@@ -1591,6 +1607,12 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
if (ret)
return ret;
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ 0xfe,
+ irq_src);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -1628,14 +1650,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
+ ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
return ret;
}
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{
- return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
bool smu_v11_0_baco_is_support(struct smu_context *smu)
@@ -1704,12 +1726,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
}
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+ ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
goto out;
@@ -1777,19 +1799,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
param = (clk_id & 0xffff) << 16;
if (max) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
if (ret)
goto failed;
}
@@ -1811,7 +1827,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -1819,7 +1835,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -1939,3 +1955,18 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
return ret;
}
+int smu_v11_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src)
+{
+ int pwr_source;
+
+ pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
+ if (pwr_source < 0)
+ return -EINVAL;
+
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_NotifyPowerSource,
+ pwr_source,
+ NULL);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 518e6597bf2d..169ebdad87b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -50,7 +49,7 @@ int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -78,7 +77,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param)
+ uint32_t param,
+ uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -87,11 +87,12 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
if (index < 0)
return index;
+ mutex_lock(&smu->message_lock);
ret = smu_v12_0_wait_for_response(smu);
if (ret) {
pr_err("Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
- return ret;
+ goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -101,10 +102,21 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v12_0_wait_for_response(smu);
- if (ret)
+ if (ret) {
pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
index, ret, param);
-
+ goto out;
+ }
+ if (read_arg) {
+ ret = smu_v12_0_read_arg(smu, read_arg);
+ if (ret) {
+ pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&smu->message_lock);
return ret;
}
@@ -163,9 +175,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
}
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
@@ -174,9 +186,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL);
}
int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
@@ -185,9 +197,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
else
- return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
}
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
@@ -196,7 +208,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
return 0;
return smu_v12_0_send_msg_with_param(smu,
- SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
+ SMU_MSG_SetGfxCGPG,
+ enable ? 1 : 0,
+ NULL);
}
int smu_v12_0_read_sensor(struct smu_context *smu,
@@ -262,10 +276,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0, timeout = 500;
if (enable) {
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
/* confirm gfx is back to "on" state, timeout is 0.5 second */
while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
@@ -331,17 +345,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu,
if (!feature_mask || num < 2)
return -EINVAL;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
@@ -388,14 +396,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
if (ret) {
pr_err("Attempt to get max GX frequency from SMC Failed !\n");
goto failed;
}
- ret = smu_read_smc_arg(smu, max);
- if (ret)
- goto failed;
break;
case SMU_UCLK:
case SMU_FCLK:
@@ -419,14 +424,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
if (ret) {
pr_err("Attempt to get min GX frequency from SMC Failed !\n");
goto failed;
}
- ret = smu_read_smc_arg(smu, min);
- if (ret)
- goto failed;
break;
case SMU_UCLK:
case SMU_FCLK:
@@ -450,7 +452,7 @@ failed:
}
int smu_v12_0_mode2_reset(struct smu_context *smu){
- return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
}
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -461,39 +463,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
if (ret)
return ret;
break;
case SMU_FCLK:
case SMU_MCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_VCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
if (ret)
return ret;
break;
@@ -512,11 +514,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address));
+ upper_32_bits(driver_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address));
+ lower_32_bits(driver_table->mc_address),
+ NULL);
}
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 49e5ef3e3876..16aa171971d3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -33,6 +33,8 @@
#include "smu7_smumgr.h"
#include "vega20_hwmgr.h"
+#include "smu_v11_0_i2c.h"
+
/* MP Apertures */
#define MP0_Public 0x03800000
#define MP0_SRAM 0x03900000
@@ -406,6 +408,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
struct vega20_smumgr *priv;
unsigned long tools_size = 0x19000;
int ret = 0;
+ struct amdgpu_device *adev = hwmgr->adev;
struct cgs_firmware_info info = {0};
@@ -505,6 +508,10 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
+ ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+ if (ret)
+ goto err4;
+
return 0;
err4:
@@ -537,6 +544,9 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
@@ -560,6 +570,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
}
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 4ad8d6c14ee5..49ff3756bd9f 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -587,7 +586,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu)
static int vega20_run_btc_afll(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
}
#define FEATURE_MASK(feature) (1ULL << feature)
@@ -670,13 +669,13 @@ vega20_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ &num_of_levels);
if (ret) {
pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
return ret;
}
- smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) {
pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
return -EINVAL;
@@ -687,12 +686,12 @@ vega20_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | i));
+ (clk_id << 16 | i),
+ &clk);
if (ret) {
pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
return ret;
}
- smu_read_smc_arg(smu, &clk);
if (!clk) {
pr_err("[GetDpmFreqByIndex] clk value is invalid!");
return -EINVAL;
@@ -1200,7 +1199,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
@@ -1215,7 +1215,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_UCLK << 16) | (freq & 0xffff));
+ (PPCLK_UCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min");
@@ -1230,7 +1231,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min");
@@ -1245,7 +1247,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_FCLK << 16) | (freq & 0xffff));
+ (PPCLK_FCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s fclk !\n",
max ? "max" : "min");
@@ -1260,7 +1263,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
if (!max) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (freq & 0xffff));
+ (PPCLK_DCEFCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set hard min dcefclk !\n");
return ret;
@@ -1421,7 +1425,9 @@ static int vega20_force_clk_levels(struct smu_context *smu,
}
ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ SMU_MSG_SetMinLinkDpmByIndex,
+ soft_min_level,
+ NULL);
if (ret)
pr_err("Failed to set min link dpm level!\n");
@@ -1477,13 +1483,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetAVFSVoltageByDpm,
- ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+ voltage);
if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret;
}
- smu_read_smc_arg(smu, voltage);
*voltage = *voltage / VOLTAGE_SCALE;
return 0;
@@ -1956,8 +1962,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type,
+ NULL);
return ret;
}
@@ -2029,7 +2037,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level);
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
+ NULL);
if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__);
return ret;
@@ -2047,7 +2056,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu)
if (!smu->smu_dpm.dpm_context)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
ret = vega20_set_uclk_to_highest_dpm_level(smu,
&dpm_table->mem_table);
if (ret)
@@ -2074,7 +2083,8 @@ static int vega20_display_config_changed(struct smu_context *smu)
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_send_smc_msg_with_param(smu,
SMU_MSG_NumOfDisplays,
- smu->display_config->num_display);
+ smu->display_config->num_display,
+ NULL);
}
return ret;
@@ -2247,7 +2257,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcef_clock_in_sr/100);
+ min_clocks.dcef_clock_in_sr/100,
+ NULL);
if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret;
@@ -2262,7 +2273,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level);
+ (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level,
+ NULL);
if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__);
return ret;
@@ -2853,8 +2865,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
- (uint32_t)pptable->FanTargetTemperature);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetFanTemperatureTarget,
+ (uint32_t)pptable->FanTargetTemperature,
+ NULL);
return ret;
}
@@ -2864,15 +2878,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu,
{
int ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed);
if (ret) {
pr_err("Attempt to get current RPM from SMC Failed!\n");
return ret;
}
- smu_read_smc_arg(smu, speed);
-
return 0;
}
@@ -3137,7 +3149,7 @@ static int vega20_set_df_cstate(struct smu_context *smu,
return -EINVAL;
}
- return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
static int vega20_update_pcie_parameters(struct smu_context *smu,
@@ -3155,7 +3167,8 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
+ smu_pcie_arg,
+ NULL);
}
return ret;
@@ -3229,7 +3242,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 8ae1e1f97a73..be7c29cec318 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -9,7 +9,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
@@ -138,24 +137,9 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
~ARCPGU_CTRL_ENABLE_MASK);
}
-static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- struct drm_pending_vblank_event *event = crtc->state->event;
-
- if (event) {
- crtc->state->event = NULL;
-
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
- }
-}
-
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
.mode_valid = arc_pgu_crtc_mode_valid,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
- .atomic_begin = arc_pgu_crtc_atomic_begin,
.atomic_enable = arc_pgu_crtc_atomic_enable,
.atomic_disable = arc_pgu_crtc_atomic_disable,
};
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 8fd7094beece..52839934f2fb 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -40,7 +40,7 @@ int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
return ret;
/* Link drm_bridge to encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ac8a78bfda03..f2dc371bd8e5 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -129,18 +129,12 @@ int armada_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fbh, 1);
+ ret = drm_fb_helper_init(dev, fbh);
if (ret) {
DRM_ERROR("failed to initialize drm fb helper\n");
goto err_fb_helper;
}
- ret = drm_fb_helper_single_add_all_connectors(fbh);
- if (ret) {
- DRM_ERROR("failed to add fb connectors\n");
- goto err_fb_setup;
- }
-
ret = drm_fb_helper_initial_config(fbh, 32);
if (ret) {
DRM_ERROR("failed to set initial config\n");
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index f5d8780776ae..656d591b154b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -121,6 +121,7 @@ struct ast_private {
unsigned int next_index;
} cursor;
+ struct drm_encoder encoder;
struct drm_plane primary_plane;
struct drm_plane cursor_plane;
@@ -238,13 +239,8 @@ struct ast_crtc {
u8 offset_x, offset_y;
};
-struct ast_encoder {
- struct drm_encoder base;
-};
-
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
#define to_ast_connector(x) container_of(x, struct ast_connector, base)
-#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
struct ast_vbios_stdtable {
u8 misc;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index b79f484e9bd2..18a0a4ce00f6 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -388,31 +388,9 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
- const struct drm_display_mode *mode)
-{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGBA8888 */
-
- struct ast_private *ast = dev->dev_private;
- unsigned long fbsize, fbpages, max_fbpages;
-
- /* To support double buffering, a framebuffer may not
- * consume more than half of the available VRAM.
- */
- max_fbpages = (ast->vram_size / 2) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
- return MODE_MEM;
-
- return MODE_OK;
-}
-
static const struct drm_mode_config_funcs ast_mode_funcs = {
.fb_create = drm_gem_fb_create,
- .mode_valid = ast_mode_config_mode_valid,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 34608f0499eb..cdd6c46d6557 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -40,6 +40,7 @@
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "ast_drv.h"
#include "ast_tables.h"
@@ -833,8 +834,6 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct ast_vbios_mode_info *vbios_mode_info;
struct drm_display_mode *adjusted_mode;
- crtc->state->no_vblank = true;
-
ast_state = to_ast_crtc_state(crtc->state);
format = ast_state->format;
@@ -959,28 +958,18 @@ err_kfree:
* Encoder
*/
-static void ast_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_funcs ast_enc_funcs = {
- .destroy = ast_encoder_destroy,
-};
-
static int ast_encoder_init(struct drm_device *dev)
{
- struct ast_encoder *ast_encoder;
+ struct ast_private *ast = dev->dev_private;
+ struct drm_encoder *encoder = &ast->encoder;
+ int ret;
- ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
- if (!ast_encoder)
- return -ENOMEM;
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ if (ret)
+ return ret;
- drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ encoder->possible_crtcs = 1;
- ast_encoder->base.possible_crtcs = 1;
return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 121b62682d80..e2019fe97fff 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -114,7 +114,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
}
if (bridge) {
- ret = drm_bridge_attach(&output->encoder, bridge, NULL);
+ ret = drm_bridge_attach(&output->encoder, bridge, NULL, 0);
if (!ret)
return 0;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 10460878414e..addb0568c1af 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -23,7 +23,6 @@ static void bochs_unload(struct drm_device *dev)
bochs_kms_fini(bochs);
bochs_mm_fini(bochs);
- bochs_hw_fini(dev);
kfree(bochs);
dev->dev_private = NULL;
}
@@ -69,6 +68,7 @@ static struct drm_driver bochs_driver = {
.major = 1,
.minor = 0,
DRM_GEM_VRAM_DRIVER,
+ .release = bochs_unload,
};
/* ---------------------------------------------------------------------- */
@@ -148,9 +148,9 @@ static void bochs_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- drm_dev_unregister(dev);
- bochs_unload(dev);
+ bochs_hw_fini(dev);
drm_dev_put(dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index a4fc4e6aee39..dce4672e3fc8 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -4,6 +4,7 @@
#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include "bochs.h"
@@ -192,6 +193,8 @@ void bochs_hw_fini(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
+ /* TODO: shot down existing vram mappings */
+
if (bochs->mmio)
iounmap(bochs->mmio);
if (bochs->ioports)
@@ -205,6 +208,11 @@ void bochs_hw_fini(struct drm_device *dev)
void bochs_hw_setmode(struct bochs_device *bochs,
struct drm_display_mode *mode)
{
+ int idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
+
bochs->xres = mode->hdisplay;
bochs->yres = mode->vdisplay;
bochs->bpp = 32;
@@ -230,11 +238,18 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+
+ drm_dev_exit(idx);
}
void bochs_hw_setformat(struct bochs_device *bochs,
const struct drm_format_info *format)
{
+ int idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
+
DRM_DEBUG_DRIVER("format %c%c%c%c\n",
(format->format >> 0) & 0xff,
(format->format >> 8) & 0xff,
@@ -254,13 +269,18 @@ void bochs_hw_setformat(struct bochs_device *bochs,
__func__, format->format);
break;
}
+
+ drm_dev_exit(idx);
}
void bochs_hw_setbase(struct bochs_device *bochs,
int x, int y, int stride, u64 addr)
{
unsigned long offset;
- unsigned int vx, vy, vwidth;
+ unsigned int vx, vy, vwidth, idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
bochs->stride = stride;
offset = (unsigned long)addr +
@@ -275,4 +295,6 @@ void bochs_hw_setbase(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+
+ drm_dev_exit(idx);
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 3f0006c2470d..8066d7d370d5 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "bochs.h"
@@ -57,16 +56,8 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct bochs_device *bochs = pipe->crtc.dev->dev_private;
- struct drm_crtc *crtc = &pipe->crtc;
bochs_plane_update(bochs, pipe->plane.state);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
@@ -92,32 +83,11 @@ static int bochs_connector_get_modes(struct drm_connector *connector)
return count;
}
-static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct bochs_device *bochs =
- container_of(connector, struct bochs_device, connector);
- unsigned long size = mode->hdisplay * mode->vdisplay * 4;
-
- /*
- * Make sure we can fit two framebuffers into video memory.
- * This allows up to 1600x1200 with 16 MB (default size).
- * If you want more try this:
- * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
- */
- if (size * 2 > bochs->fb_size)
- return MODE_BAD;
-
- return MODE_OK;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
- .mode_valid = bochs_connector_mode_valid,
};
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -157,6 +127,7 @@ bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_config_funcs bochs_mode_funcs = {
.fb_create = bochs_gem_fb_create,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -192,6 +163,9 @@ int bochs_kms_init(struct bochs_device *bochs)
void bochs_kms_fini(struct bochs_device *bochs)
{
+ if (!bochs->dev->mode_config.num_connector)
+ return;
+
drm_atomic_helper_shutdown(bochs->dev);
drm_mode_config_cleanup(bochs->dev);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 0b9ca5862455..aaed2347ace9 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,13 +27,16 @@ config DRM_CDNS_DSI
Support Cadence DPI to DSI bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
-config DRM_DUMB_VGA_DAC
- tristate "Dumb VGA DAC Bridge support"
+config DRM_DISPLAY_CONNECTOR
+ tristate "Display connector support"
depends on OF
- select DRM_KMS_HELPER
help
- Support for non-programmable RGB to VGA DAC bridges, such as ADI
- ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
+ Driver for display connectors with support for DDC and hot-plug
+ detection. Most display controller handle display connectors
+ internally and don't need this driver, but the DRM subsystem is
+ moving towards separating connector handling from display controllers
+ on ARM-based platforms. Saying Y here when this driver is not needed
+ will not cause any issue.
config DRM_LVDS_CODEC
tristate "Transparent LVDS encoders and decoders support"
@@ -72,6 +75,17 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_PARADE_PS8640
+ tristate "Parade PS8640 MIPI DSI to eDP Converter"
+ depends on OF
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ help
+ Choose this option if you have PS8640 for display
+ The PS8640 is a high-performance and low-power
+ MIPI DSI to eDP converter
+
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
@@ -87,6 +101,7 @@ config DRM_SII902X
select DRM_KMS_HELPER
select REGMAP_I2C
select I2C_MUX
+ select SND_SOC_HDMI_CODEC if SND_SOC
---help---
Silicon Image sii902x bridge chip driver.
@@ -98,6 +113,14 @@ config DRM_SII9234
It is an I2C driver, that detects connection of MHL bridge
and starts encapsulation of HDMI signal.
+config DRM_SIMPLE_BRIDGE
+ tristate "Simple DRM bridge support"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Support for non-programmable DRM bridges, such as ADI ADV7123, TI
+ THS8134 and THS8135 or passive resistor ladder DACs.
+
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
@@ -122,6 +145,16 @@ config DRM_TOSHIBA_TC358767
---help---
Toshiba TC358767 eDP bridge chip driver.
+config DRM_TOSHIBA_TC358768
+ tristate "Toshiba TC358768 MIPI DSI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+
config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
@@ -139,6 +172,14 @@ config DRM_TI_SN65DSI86
help
Texas Instruments SN65DSI86 DSI to eDP Bridge driver
+config DRM_TI_TPD12S015
+ tristate "TI TPD12S015 HDMI level shifter and ESD protection"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Texas Instruments TPD12S015 HDMI level shifter and ESD protection
+ driver.
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cd16ce830270..6fb062b5b0f0 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,19 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
-obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
+obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
+obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 8a56ff81f4fb..47d4eb9e845d 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -4,8 +4,9 @@ config DRM_I2C_ADV7511
depends on OF
select DRM_KMS_HELPER
select REGMAP_I2C
+ select DRM_MIPI_DSI
help
- Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+ Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
config DRM_I2C_ADV7511_AUDIO
bool "ADV7511 HDMI Audio driver"
@@ -15,16 +16,8 @@ config DRM_I2C_ADV7511_AUDIO
Support the ADV7511 HDMI Audio interface. This is used in
conjunction with the AV7511 HDMI driver.
-config DRM_I2C_ADV7533
- bool "ADV7533 encoder"
- depends on DRM_I2C_ADV7511
- select DRM_MIPI_DSI
- default y
- help
- Support for the Analog Devices ADV7533 DSI to HDMI encoder.
-
config DRM_I2C_ADV7511_CEC
- bool "ADV7511/33 HDMI CEC driver"
+ bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
select CEC_CORE
default y
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index b46ebeb35fd4..d8ceb534b51f 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-adv7511-y := adv7511_drv.o
+adv7511-y := adv7511_drv.o adv7533.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
-adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 52b2adfdc877..a9bb734366ae 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -320,6 +320,7 @@ struct adv7511_video_config {
enum adv7511_type {
ADV7511,
ADV7533,
+ ADV7535,
};
#define ADV7511_MAX_ADDRS 3
@@ -393,7 +394,6 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
}
#endif
-#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
@@ -402,44 +402,6 @@ int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
void adv7533_detach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
-#else
-static inline void adv7533_dsi_power_on(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_dsi_power_off(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_mode_set(struct adv7511 *adv,
- const struct drm_display_mode *mode)
-{
-}
-
-static inline int adv7533_patch_registers(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline int adv7533_attach_dsi(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline void adv7533_detach_dsi(struct adv7511 *adv)
-{
-}
-
-static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
-{
- return -ENODEV;
-}
-#endif
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 9e13e466e72c..87b58c1acff4 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -367,7 +367,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_on(adv7511);
adv7511->powered = true;
}
@@ -387,7 +387,7 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
static void adv7511_power_off(struct adv7511 *adv7511)
{
__adv7511_power_off(adv7511);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_off(adv7511);
adv7511->powered = false;
}
@@ -761,7 +761,7 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_mode_set(adv7511, adj_mode);
drm_mode_copy(&adv7511->curr_mode, adj_mode);
@@ -847,11 +847,17 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
adv7511_mode_set(adv, mode, adj_mode);
}
-static int adv7511_bridge_attach(struct drm_bridge *bridge)
+static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -874,7 +880,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
&adv7511_connector_helper_funcs);
drm_connector_attach_encoder(&adv->connector, bridge->encoder);
- if (adv->type == ADV7533)
+ if (adv->type == ADV7533 || adv->type == ADV7535)
ret = adv7533_attach_dsi(adv);
if (adv->i2c_main->irq)
@@ -952,7 +958,7 @@ static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
struct i2c_client *i2c = to_i2c_client(dev);
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
reg -= ADV7533_REG_CEC_OFFSET;
switch (reg) {
@@ -994,7 +1000,7 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
goto err;
}
- if (adv->type == ADV7533) {
+ if (adv->type == ADV7533 || adv->type == ADV7535) {
ret = adv7533_patch_cec_registers(adv);
if (ret)
goto err;
@@ -1242,7 +1248,7 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
@@ -1266,9 +1272,8 @@ static const struct i2c_device_id adv7511_i2c_ids[] = {
{ "adv7511", ADV7511 },
{ "adv7511w", ADV7511 },
{ "adv7513", ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
{ "adv7533", ADV7533 },
-#endif
+ { "adv7535", ADV7535 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
@@ -1277,9 +1282,8 @@ static const struct of_device_id adv7511_of_ids[] = {
{ .compatible = "adi,adv7511", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7513", .data = (void *)ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
{ .compatible = "adi,adv7533", .data = (void *)ADV7533 },
-#endif
+ { .compatible = "adi,adv7535", .data = (void *)ADV7535 },
{ }
};
MODULE_DEVICE_TABLE(of, adv7511_of_ids);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 2dfa2fd2a23b..2bc6e4f85171 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -519,11 +519,17 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int anx6345_bridge_attach(struct drm_bridge *bridge)
+static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
int err;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -711,16 +717,20 @@ static int anx6345_i2c_probe(struct i2c_client *client,
DRM_DEBUG("No panel found\n");
/* 1.2V digital core power regulator */
- anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
+ anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12");
if (IS_ERR(anx6345->dvdd12)) {
- DRM_ERROR("dvdd12-supply not found\n");
+ if (PTR_ERR(anx6345->dvdd12) != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get dvdd12 supply (%ld)\n",
+ PTR_ERR(anx6345->dvdd12));
return PTR_ERR(anx6345->dvdd12);
}
/* 2.5V digital core power regulator */
- anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
+ anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25");
if (IS_ERR(anx6345->dvdd25)) {
- DRM_ERROR("dvdd25-supply not found\n");
+ if (PTR_ERR(anx6345->dvdd25) != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get dvdd25 supply (%ld)\n",
+ PTR_ERR(anx6345->dvdd25));
return PTR_ERR(anx6345->dvdd25);
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 41867be03751..0d5a5ad0c9ee 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -722,10 +722,9 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+ SP_DP_MAIN_LINK_BW_SET_REG,
+ anx78xx->dpcd[DP_MAX_LINK_RATE]);
if (err)
return err;
@@ -887,11 +886,17 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int anx78xx_bridge_attach(struct drm_bridge *bridge)
+static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
int err;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6effe532f820..9ded2cef57dd 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1216,13 +1216,19 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
+static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -1289,19 +1295,21 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
return conn_state->crtc;
}
-static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Don't touch the panel if we're coming back from PSR */
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
@@ -1366,20 +1374,22 @@ out_dp_clk_pre:
return ret;
}
-static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Not a full enable, just disable PSR and continue */
if (old_crtc_state && old_crtc_state->self_refresh_active) {
ret = analogix_dp_disable_psr(dp);
@@ -1440,18 +1450,20 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
goto out;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state)
goto out;
@@ -1463,20 +1475,21 @@ out:
analogix_dp_bridge_disable(bridge);
}
-static
-void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state || !new_crtc_state->self_refresh_active)
return;
@@ -1563,6 +1576,9 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
}
static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_pre_enable = analogix_dp_bridge_atomic_pre_enable,
.atomic_enable = analogix_dp_bridge_atomic_enable,
.atomic_disable = analogix_dp_bridge_atomic_disable,
@@ -1588,7 +1604,7 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev,
bridge->driver_private = dp;
bridge->funcs = &analogix_dp_bridge_funcs;
- ret = drm_bridge_attach(dp->encoder, bridge, NULL);
+ ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to attach drm bridge\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index b7c97f060241..69c3892caee5 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -644,7 +644,8 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
return 0;
}
-static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
+static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -656,7 +657,8 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge);
+ return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ flags);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
new file mode 100644
index 000000000000..4d278573cdb9
--- /dev/null
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+
+struct display_connector {
+ struct drm_bridge bridge;
+
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+};
+
+static inline struct display_connector *
+to_display_connector(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct display_connector, bridge);
+}
+
+static int display_connector_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static enum drm_connector_status
+display_connector_detect(struct drm_bridge *bridge)
+{
+ struct display_connector *conn = to_display_connector(bridge);
+
+ if (conn->hpd_gpio) {
+ if (gpiod_get_value_cansleep(conn->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ if (conn->bridge.ddc && drm_probe_ddc(conn->bridge.ddc))
+ return connector_status_connected;
+
+ switch (conn->bridge.type) {
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ /*
+ * For DVI and HDMI connectors a DDC probe failure indicates
+ * that no cable is connected.
+ */
+ return connector_status_disconnected;
+
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_VGA:
+ default:
+ /*
+ * Composite and S-Video connectors have no other detection
+ * mean than the HPD GPIO. For VGA connectors, even if we have
+ * an I2C bus, we can't assume that the cable is disconnected
+ * if drm_probe_ddc fails, as some cables don't wire the DDC
+ * pins.
+ */
+ return connector_status_unknown;
+ }
+}
+
+static struct edid *display_connector_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct display_connector *conn = to_display_connector(bridge);
+
+ return drm_get_edid(connector, conn->bridge.ddc);
+}
+
+static const struct drm_bridge_funcs display_connector_bridge_funcs = {
+ .attach = display_connector_attach,
+ .detect = display_connector_detect,
+ .get_edid = display_connector_get_edid,
+};
+
+static irqreturn_t display_connector_hpd_irq(int irq, void *arg)
+{
+ struct display_connector *conn = arg;
+ struct drm_bridge *bridge = &conn->bridge;
+
+ drm_bridge_hpd_notify(bridge, display_connector_detect(bridge));
+
+ return IRQ_HANDLED;
+}
+
+static int display_connector_probe(struct platform_device *pdev)
+{
+ struct display_connector *conn;
+ unsigned int type;
+ const char *label;
+ int ret;
+
+ conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, conn);
+
+ type = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ /* Get the exact connector type. */
+ switch (type) {
+ case DRM_MODE_CONNECTOR_DVII: {
+ bool analog, digital;
+
+ analog = of_property_read_bool(pdev->dev.of_node, "analog");
+ digital = of_property_read_bool(pdev->dev.of_node, "digital");
+ if (analog && !digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVIA;
+ } else if (!analog && digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVID;
+ } else if (analog && digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVII;
+ } else {
+ dev_err(&pdev->dev, "DVI connector with no type\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case DRM_MODE_CONNECTOR_HDMIA: {
+ const char *hdmi_type;
+
+ ret = of_property_read_string(pdev->dev.of_node, "type",
+ &hdmi_type);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "HDMI connector with no type\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(hdmi_type, "a") || !strcmp(hdmi_type, "c") ||
+ !strcmp(hdmi_type, "d") || !strcmp(hdmi_type, "e")) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ } else if (!strcmp(hdmi_type, "b")) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_HDMIB;
+ } else {
+ dev_err(&pdev->dev,
+ "Unsupported HDMI connector type '%s'\n",
+ hdmi_type);
+ return -EINVAL;
+ }
+
+ break;
+ }
+
+ default:
+ conn->bridge.type = type;
+ break;
+ }
+
+ /* All the supported connector types support interlaced modes. */
+ conn->bridge.interlace_allowed = true;
+
+ /* Get the optional connector label. */
+ of_property_read_string(pdev->dev.of_node, "label", &label);
+
+ /*
+ * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+ * edge interrupts, register an interrupt handler.
+ */
+ if (type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_HDMIA) {
+ conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
+ GPIOD_IN);
+ if (IS_ERR(conn->hpd_gpio)) {
+ if (PTR_ERR(conn->hpd_gpio) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to retrieve HPD GPIO\n");
+ return PTR_ERR(conn->hpd_gpio);
+ }
+
+ conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio);
+ } else {
+ conn->hpd_irq = -EINVAL;
+ }
+
+ if (conn->hpd_irq >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, conn->hpd_irq,
+ NULL, display_connector_hpd_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "HPD", conn);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "Failed to request HPD edge interrupt, falling back to polling\n");
+ conn->hpd_irq = -EINVAL;
+ }
+ }
+
+ /* Retrieve the DDC I2C adapter for DVI, HDMI and VGA connectors. */
+ if (type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_VGA) {
+ struct device_node *phandle;
+
+ phandle = of_parse_phandle(pdev->dev.of_node, "ddc-i2c-bus", 0);
+ if (phandle) {
+ conn->bridge.ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!conn->bridge.ddc)
+ return -EPROBE_DEFER;
+ } else {
+ dev_dbg(&pdev->dev,
+ "No I2C bus specified, disabling EDID readout\n");
+ }
+ }
+
+ conn->bridge.funcs = &display_connector_bridge_funcs;
+ conn->bridge.of_node = pdev->dev.of_node;
+
+ if (conn->bridge.ddc)
+ conn->bridge.ops |= DRM_BRIDGE_OP_EDID
+ | DRM_BRIDGE_OP_DETECT;
+ if (conn->hpd_gpio)
+ conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
+ if (conn->hpd_irq >= 0)
+ conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
+ dev_dbg(&pdev->dev,
+ "Found %s display connector '%s' %s DDC bus and %s HPD GPIO (ops 0x%x)\n",
+ drm_get_connector_type_name(conn->bridge.type),
+ label ? label : "<unlabelled>",
+ conn->bridge.ddc ? "with" : "without",
+ conn->hpd_gpio ? "with" : "without",
+ conn->bridge.ops);
+
+ drm_bridge_add(&conn->bridge);
+
+ return 0;
+}
+
+static int display_connector_remove(struct platform_device *pdev)
+{
+ struct display_connector *conn = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&conn->bridge);
+
+ if (!IS_ERR(conn->bridge.ddc))
+ i2c_put_adapter(conn->bridge.ddc);
+
+ return 0;
+}
+
+static const struct of_device_id display_connector_match[] = {
+ {
+ .compatible = "composite-video-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_Composite,
+ }, {
+ .compatible = "dvi-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_DVII,
+ }, {
+ .compatible = "hdmi-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_HDMIA,
+ }, {
+ .compatible = "svideo-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_SVIDEO,
+ }, {
+ .compatible = "vga-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_VGA,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, display_connector_match);
+
+static struct platform_driver display_connector_driver = {
+ .probe = display_connector_probe,
+ .remove = display_connector_remove,
+ .driver = {
+ .name = "display-connector",
+ .of_match_table = display_connector_match,
+ },
+};
+module_platform_driver(display_connector_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Display connector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
deleted file mode 100644
index cc33dc411b9e..000000000000
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015-2016 Free Electrons
- * Copyright (C) 2015-2016 NextThing Co
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- */
-
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-
-struct dumb_vga {
- struct drm_bridge bridge;
- struct drm_connector connector;
-
- struct i2c_adapter *ddc;
- struct regulator *vdd;
-};
-
-static inline struct dumb_vga *
-drm_bridge_to_dumb_vga(struct drm_bridge *bridge)
-{
- return container_of(bridge, struct dumb_vga, bridge);
-}
-
-static inline struct dumb_vga *
-drm_connector_to_dumb_vga(struct drm_connector *connector)
-{
- return container_of(connector, struct dumb_vga, connector);
-}
-
-static int dumb_vga_get_modes(struct drm_connector *connector)
-{
- struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
- struct edid *edid;
- int ret;
-
- if (!vga->ddc)
- goto fallback;
-
- edid = drm_get_edid(connector, vga->ddc);
- if (!edid) {
- DRM_INFO("EDID readout failed, falling back to standard modes\n");
- goto fallback;
- }
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- return ret;
-
-fallback:
- /*
- * In case we cannot retrieve the EDIDs (broken or missing i2c
- * bus), fallback on the XGA standards
- */
- ret = drm_add_modes_noedid(connector, 1920, 1200);
-
- /* And prefer a mode pretty much anyone can handle */
- drm_set_preferred_mode(connector, 1024, 768);
-
- return ret;
-}
-
-static const struct drm_connector_helper_funcs dumb_vga_con_helper_funcs = {
- .get_modes = dumb_vga_get_modes,
-};
-
-static enum drm_connector_status
-dumb_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
-
- /*
- * Even if we have an I2C bus, we can't assume that the cable
- * is disconnected if drm_probe_ddc fails. Some cables don't
- * wire the DDC pins, or the I2C bus might not be working at
- * all.
- */
- if (vga->ddc && drm_probe_ddc(vga->ddc))
- return connector_status_connected;
-
- return connector_status_unknown;
-}
-
-static const struct drm_connector_funcs dumb_vga_con_funcs = {
- .detect = dumb_vga_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int dumb_vga_attach(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
- int ret;
-
- if (!bridge->encoder) {
- DRM_ERROR("Missing encoder\n");
- return -ENODEV;
- }
-
- drm_connector_helper_add(&vga->connector,
- &dumb_vga_con_helper_funcs);
- ret = drm_connector_init_with_ddc(bridge->dev, &vga->connector,
- &dumb_vga_con_funcs,
- DRM_MODE_CONNECTOR_VGA,
- vga->ddc);
- if (ret) {
- DRM_ERROR("Failed to initialize connector\n");
- return ret;
- }
-
- drm_connector_attach_encoder(&vga->connector,
- bridge->encoder);
-
- return 0;
-}
-
-static void dumb_vga_enable(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
- int ret = 0;
-
- if (vga->vdd)
- ret = regulator_enable(vga->vdd);
-
- if (ret)
- DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
-}
-
-static void dumb_vga_disable(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
-
- if (vga->vdd)
- regulator_disable(vga->vdd);
-}
-
-static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
- .attach = dumb_vga_attach,
- .enable = dumb_vga_enable,
- .disable = dumb_vga_disable,
-};
-
-static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
-{
- struct device_node *phandle, *remote;
- struct i2c_adapter *ddc;
-
- remote = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!remote)
- return ERR_PTR(-EINVAL);
-
- phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- of_node_put(remote);
- if (!phandle)
- return ERR_PTR(-ENODEV);
-
- ddc = of_get_i2c_adapter_by_node(phandle);
- of_node_put(phandle);
- if (!ddc)
- return ERR_PTR(-EPROBE_DEFER);
-
- return ddc;
-}
-
-static int dumb_vga_probe(struct platform_device *pdev)
-{
- struct dumb_vga *vga;
-
- vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
- if (!vga)
- return -ENOMEM;
- platform_set_drvdata(pdev, vga);
-
- vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
- if (IS_ERR(vga->vdd)) {
- int ret = PTR_ERR(vga->vdd);
- if (ret == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- vga->vdd = NULL;
- dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
- }
-
- vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
- if (IS_ERR(vga->ddc)) {
- if (PTR_ERR(vga->ddc) == -ENODEV) {
- dev_dbg(&pdev->dev,
- "No i2c bus specified. Disabling EDID readout\n");
- vga->ddc = NULL;
- } else {
- dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
- return PTR_ERR(vga->ddc);
- }
- }
-
- vga->bridge.funcs = &dumb_vga_bridge_funcs;
- vga->bridge.of_node = pdev->dev.of_node;
- vga->bridge.timings = of_device_get_match_data(&pdev->dev);
-
- drm_bridge_add(&vga->bridge);
-
- return 0;
-}
-
-static int dumb_vga_remove(struct platform_device *pdev)
-{
- struct dumb_vga *vga = platform_get_drvdata(pdev);
-
- drm_bridge_remove(&vga->bridge);
-
- if (vga->ddc)
- i2c_put_adapter(vga->ddc);
-
- return 0;
-}
-
-/*
- * We assume the ADV7123 DAC is the "default" for historical reasons
- * Information taken from the ADV7123 datasheet, revision D.
- * NOTE: the ADV7123EP seems to have other timings and need a new timings
- * set if used.
- */
-static const struct drm_bridge_timings default_dac_timings = {
- /* Timing specifications, datasheet page 7 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- .setup_time_ps = 500,
- .hold_time_ps = 1500,
-};
-
-/*
- * Information taken from the THS8134, THS8134A, THS8134B datasheet named
- * "SLVS205D", dated May 1990, revised March 2000.
- */
-static const struct drm_bridge_timings ti_ths8134_dac_timings = {
- /* From timing diagram, datasheet page 9 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- /* From datasheet, page 12 */
- .setup_time_ps = 3000,
- /* I guess this means latched input */
- .hold_time_ps = 0,
-};
-
-/*
- * Information taken from the THS8135 datasheet named "SLAS343B", dated
- * May 2001, revised April 2013.
- */
-static const struct drm_bridge_timings ti_ths8135_dac_timings = {
- /* From timing diagram, datasheet page 14 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- /* From datasheet, page 16 */
- .setup_time_ps = 2000,
- .hold_time_ps = 500,
-};
-
-static const struct of_device_id dumb_vga_match[] = {
- {
- .compatible = "dumb-vga-dac",
- .data = NULL,
- },
- {
- .compatible = "adi,adv7123",
- .data = &default_dac_timings,
- },
- {
- .compatible = "ti,ths8135",
- .data = &ti_ths8135_dac_timings,
- },
- {
- .compatible = "ti,ths8134",
- .data = &ti_ths8134_dac_timings,
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, dumb_vga_match);
-
-static struct platform_driver dumb_vga_driver = {
- .probe = dumb_vga_probe,
- .remove = dumb_vga_remove,
- .driver = {
- .name = "dumb-vga-dac",
- .of_match_table = dumb_vga_match,
- },
-};
-module_platform_driver(dumb_vga_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_DESCRIPTION("Dumb VGA DAC bridge driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 5f04cc11227e..24fb1befdfa2 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -21,19 +21,23 @@ struct lvds_codec {
u32 connector_type;
};
-static int lvds_codec_attach(struct drm_bridge *bridge)
+static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ return container_of(bridge, struct lvds_codec, bridge);
+}
+
+static int lvds_codec_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
- bridge);
+ bridge, flags);
}
static void lvds_codec_enable(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
@@ -41,14 +45,13 @@ static void lvds_codec_enable(struct drm_bridge *bridge)
static void lvds_codec_disable(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
}
-static struct drm_bridge_funcs funcs = {
+static const struct drm_bridge_funcs funcs = {
.attach = lvds_codec_attach,
.enable = lvds_codec_enable,
.disable = lvds_codec_disable,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index e8a49f6146c6..6200f12a37e6 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -206,13 +206,19 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
+static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct drm_connector *connector = &ge_b850v3_lvds_ptr->connector;
struct i2c_client *stdp4028_i2c
= ge_b850v3_lvds_ptr->stdp4028_i2c;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 57ff01339559..438e566ce0a4 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -236,11 +236,17 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index f66777e24968..8461ee8304ba 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -53,12 +53,16 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int panel_bridge_attach(struct drm_bridge *bridge)
+static int panel_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
return -ENODEV;
@@ -120,6 +124,14 @@ static void panel_bridge_post_disable(struct drm_bridge *bridge)
drm_panel_unprepare(panel_bridge->panel);
}
+static int panel_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ return drm_panel_get_modes(panel_bridge->panel, connector);
+}
+
static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.attach = panel_bridge_attach,
.detach = panel_bridge_detach,
@@ -127,6 +139,11 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.enable = panel_bridge_enable,
.disable = panel_bridge_disable,
.post_disable = panel_bridge_post_disable,
+ .get_modes = panel_bridge_get_modes,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
};
/**
@@ -151,7 +168,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* known type. Calling this function with a panel whose connector type is
* DRM_MODE_CONNECTOR_Unknown will return NULL.
*
- * See devm_drm_panel_bridge_add() for an automatically manged version of this
+ * See devm_drm_panel_bridge_add() for an automatically managed version of this
* function.
*/
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
@@ -196,6 +213,8 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
#ifdef CONFIG_OF
panel_bridge->bridge.of_node = panel->dev->of_node;
#endif
+ panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
+ panel_bridge->bridge.type = connector_type;
drm_bridge_add(&panel_bridge->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 10c47c008b40..d789ea2a7fb9 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -476,11 +476,17 @@ static const struct drm_connector_funcs ps8622_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ps8622_attach(struct drm_bridge *bridge)
+static int ps8622_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
new file mode 100644
index 000000000000..d3a53442d449
--- /dev/null
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define PAGE2_GPIO_H 0xa7
+#define PS_GPIO9 BIT(1)
+#define PAGE2_I2C_BYPASS 0xea
+#define I2C_BYPASS_EN 0xd0
+#define PAGE2_MCS_EN 0xf3
+#define MCS_EN BIT(0)
+#define PAGE3_SET_ADD 0xfe
+#define VDO_CTL_ADD 0x13
+#define VDO_DIS 0x18
+#define VDO_EN 0x1c
+#define DP_NUM_LANES 4
+
+/*
+ * PS8640 uses multiple addresses:
+ * page[0]: for DP control
+ * page[1]: for VIDEO Bridge
+ * page[2]: for control top
+ * page[3]: for DSI Link Control1
+ * page[4]: for MIPI Phy
+ * page[5]: for VPLL
+ * page[6]: for DSI Link Control2
+ * page[7]: for SPI ROM mapping
+ */
+enum page_addr_offset {
+ PAGE0_DP_CNTL = 0,
+ PAGE1_VDO_BDG,
+ PAGE2_TOP_CNTL,
+ PAGE3_DSI_CNTL1,
+ PAGE4_MIPI_PHY,
+ PAGE5_VPLL,
+ PAGE6_DSI_CNTL2,
+ PAGE7_SPI_CNTL,
+ MAX_DEVS
+};
+
+enum ps8640_vdo_control {
+ DISABLE = VDO_DIS,
+ ENABLE = VDO_EN,
+};
+
+struct ps8640 {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+ struct mipi_dsi_device *dsi;
+ struct i2c_client *page[MAX_DEVS];
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_powerdown;
+};
+
+static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
+{
+ return container_of(e, struct ps8640, bridge);
+}
+
+static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
+ const enum ps8640_vdo_control ctrl)
+{
+ struct i2c_client *client = ps_bridge->page[PAGE3_DSI_CNTL1];
+ u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
+ sizeof(vdo_ctrl_buf),
+ vdo_ctrl_buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void ps8640_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
+ unsigned long timeout;
+ int ret, status;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret < 0) {
+ DRM_ERROR("cannot enable regulators %d\n", ret);
+ return;
+ }
+
+ gpiod_set_value(ps_bridge->gpio_powerdown, 0);
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ usleep_range(2000, 2500);
+ gpiod_set_value(ps_bridge->gpio_reset, 0);
+
+ /*
+ * Wait for the ps8640 embedded MCU to be ready
+ * First wait 200ms and then check the MCU ready flag every 20ms
+ */
+ msleep(200);
+
+ timeout = jiffies + msecs_to_jiffies(200) + 1;
+
+ while (time_is_after_jiffies(timeout)) {
+ status = i2c_smbus_read_byte_data(client, PAGE2_GPIO_H);
+ if (status < 0) {
+ DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", status);
+ goto err_regulators_disable;
+ }
+ if ((status & PS_GPIO9) == PS_GPIO9)
+ break;
+
+ msleep(20);
+ }
+
+ msleep(50);
+
+ /*
+ * The Manufacturer Command Set (MCS) is a device dependent interface
+ * intended for factory programming of the display module default
+ * parameters. Once the display module is configured, the MCS shall be
+ * disabled by the manufacturer. Once disabled, all MCS commands are
+ * ignored by the display interface.
+ */
+ status = i2c_smbus_read_byte_data(client, PAGE2_MCS_EN);
+ if (status < 0) {
+ DRM_ERROR("failed read PAGE2_MCS_EN: %d\n", status);
+ goto err_regulators_disable;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, PAGE2_MCS_EN,
+ status & ~MCS_EN);
+ if (ret < 0) {
+ DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
+ if (ret) {
+ DRM_ERROR("failed to enable VDO: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ /* Switch access edp panel's edid through i2c */
+ ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
+ I2C_BYPASS_EN);
+ if (ret < 0) {
+ DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ return;
+
+err_regulators_disable:
+ regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+}
+
+static void ps8640_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ int ret;
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+ if (ret < 0)
+ DRM_ERROR("failed to disable VDO: %d\n", ret);
+
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ gpiod_set_value(ps_bridge->gpio_powerdown, 1);
+ ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret < 0)
+ DRM_ERROR("cannot disable regulators %d\n", ret);
+}
+
+static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct device *dev = &ps_bridge->page[0]->dev;
+ struct device_node *in_ep, *dsi_node;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+ const struct mipi_dsi_device_info info = { .type = "ps8640",
+ .channel = 0,
+ .node = NULL,
+ };
+ /* port@0 is ps8640 dsi input port */
+ in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+ if (!in_ep)
+ return -ENODEV;
+
+ dsi_node = of_graph_get_remote_port_parent(in_ep);
+ of_node_put(in_ep);
+ if (!dsi_node)
+ return -ENODEV;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+ of_node_put(dsi_node);
+ if (!host)
+ return -ENODEV;
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ return ret;
+ }
+
+ ps_bridge->dsi = dsi;
+
+ dsi->host = host;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = DP_NUM_LANES;
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ goto err_dsi_attach;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ &ps_bridge->bridge, flags);
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+ return ret;
+}
+
+static const struct drm_bridge_funcs ps8640_bridge_funcs = {
+ .attach = ps8640_bridge_attach,
+ .post_disable = ps8640_post_disable,
+ .pre_enable = ps8640_pre_enable,
+};
+
+static int ps8640_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct ps8640 *ps_bridge;
+ struct drm_panel *panel;
+ int ret;
+ u32 i;
+
+ ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
+ if (!ps_bridge)
+ return -ENOMEM;
+
+ /* port@1 is ps8640 output port */
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+ if (ret < 0)
+ return ret;
+ if (!panel)
+ return -ENODEV;
+
+ panel->connector_type = DRM_MODE_CONNECTOR_eDP;
+
+ ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ps_bridge->panel_bridge))
+ return PTR_ERR(ps_bridge->panel_bridge);
+
+ ps_bridge->supplies[0].supply = "vdd33";
+ ps_bridge->supplies[1].supply = "vdd12";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret)
+ return ret;
+
+ ps_bridge->gpio_powerdown = devm_gpiod_get(&client->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_powerdown))
+ return PTR_ERR(ps_bridge->gpio_powerdown);
+
+ /*
+ * Assert the reset to avoid the bridge being initialized prematurely
+ */
+ ps_bridge->gpio_reset = devm_gpiod_get(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_reset))
+ return PTR_ERR(ps_bridge->gpio_reset);
+
+ ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
+ ps_bridge->bridge.of_node = dev->of_node;
+
+ ps_bridge->page[PAGE0_DP_CNTL] = client;
+
+ for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) {
+ ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ client->addr + i);
+ if (IS_ERR(ps_bridge->page[i])) {
+ dev_err(dev, "failed i2c dummy device, address %02x\n",
+ client->addr + i);
+ return PTR_ERR(ps_bridge->page[i]);
+ }
+ }
+
+ i2c_set_clientdata(client, ps_bridge);
+
+ drm_bridge_add(&ps_bridge->bridge);
+
+ return 0;
+}
+
+static int ps8640_remove(struct i2c_client *client)
+{
+ struct ps8640 *ps_bridge = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&ps_bridge->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ps8640_match[] = {
+ { .compatible = "parade,ps8640" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ps8640_match);
+
+static struct i2c_driver ps8640_driver = {
+ .probe_new = ps8640_probe,
+ .remove = ps8640_remove,
+ .driver = {
+ .name = "ps8640",
+ .of_match_table = ps8640_match,
+ },
+};
+module_i2c_driver(ps8640_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>");
+MODULE_AUTHOR("CK Hu <ck.hu@mediatek.com>");
+MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
+MODULE_DESCRIPTION("PARADE ps8640 DSI-eDP converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index b70e8c5cf2e1..6dad025f8da7 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -399,12 +399,18 @@ out:
mutex_unlock(&sii902x->mutex);
}
-static int sii902x_bridge_attach(struct drm_bridge *bridge)
+static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
drm_connector_helper_add(&sii902x->connector,
&sii902x_connector_helper_funcs);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 4c0eef406eb1..92acd336aa89 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2202,7 +2202,8 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
return container_of(bridge, struct sii8620, bridge);
}
-static int sii8620_attach(struct drm_bridge *bridge)
+static int sii8620_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
new file mode 100644
index 000000000000..a2dca7a3ef03
--- /dev/null
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015-2016 Free Electrons
+ * Copyright (C) 2015-2016 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+struct simple_bridge_info {
+ const struct drm_bridge_timings *timings;
+ unsigned int connector_type;
+};
+
+struct simple_bridge {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ const struct simple_bridge_info *info;
+
+ struct i2c_adapter *ddc;
+ struct regulator *vdd;
+ struct gpio_desc *enable;
+};
+
+static inline struct simple_bridge *
+drm_bridge_to_simple_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct simple_bridge, bridge);
+}
+
+static inline struct simple_bridge *
+drm_connector_to_simple_bridge(struct drm_connector *connector)
+{
+ return container_of(connector, struct simple_bridge, connector);
+}
+
+static int simple_bridge_get_modes(struct drm_connector *connector)
+{
+ struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+ struct edid *edid;
+ int ret;
+
+ if (!sbridge->ddc)
+ goto fallback;
+
+ edid = drm_get_edid(connector, sbridge->ddc);
+ if (!edid) {
+ DRM_INFO("EDID readout failed, falling back to standard modes\n");
+ goto fallback;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ return ret;
+
+fallback:
+ /*
+ * In case we cannot retrieve the EDIDs (broken or missing i2c
+ * bus), fallback on the XGA standards
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+ /* And prefer a mode pretty much anyone can handle */
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs simple_bridge_con_helper_funcs = {
+ .get_modes = simple_bridge_get_modes,
+};
+
+static enum drm_connector_status
+simple_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+
+ /*
+ * Even if we have an I2C bus, we can't assume that the cable
+ * is disconnected if drm_probe_ddc fails. Some cables don't
+ * wire the DDC pins, or the I2C bus might not be working at
+ * all.
+ */
+ if (sbridge->ddc && drm_probe_ddc(sbridge->ddc))
+ return connector_status_connected;
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs simple_bridge_con_funcs = {
+ .detect = simple_bridge_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int simple_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(&sbridge->connector,
+ &simple_bridge_con_helper_funcs);
+ ret = drm_connector_init_with_ddc(bridge->dev, &sbridge->connector,
+ &simple_bridge_con_funcs,
+ sbridge->info->connector_type,
+ sbridge->ddc);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_attach_encoder(&sbridge->connector,
+ bridge->encoder);
+
+ return 0;
+}
+
+static void simple_bridge_enable(struct drm_bridge *bridge)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+ int ret;
+
+ if (sbridge->vdd) {
+ ret = regulator_enable(sbridge->vdd);
+ if (ret)
+ DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
+ }
+
+ gpiod_set_value_cansleep(sbridge->enable, 1);
+}
+
+static void simple_bridge_disable(struct drm_bridge *bridge)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+
+ gpiod_set_value_cansleep(sbridge->enable, 0);
+
+ if (sbridge->vdd)
+ regulator_disable(sbridge->vdd);
+}
+
+static const struct drm_bridge_funcs simple_bridge_bridge_funcs = {
+ .attach = simple_bridge_attach,
+ .enable = simple_bridge_enable,
+ .disable = simple_bridge_disable,
+};
+
+static struct i2c_adapter *simple_bridge_retrieve_ddc(struct device *dev)
+{
+ struct device_node *phandle, *remote;
+ struct i2c_adapter *ddc;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return ERR_PTR(-EINVAL);
+
+ phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!phandle)
+ return ERR_PTR(-ENODEV);
+
+ ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return ddc;
+}
+
+static int simple_bridge_probe(struct platform_device *pdev)
+{
+ struct simple_bridge *sbridge;
+
+ sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
+ if (!sbridge)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, sbridge);
+
+ sbridge->info = of_device_get_match_data(&pdev->dev);
+
+ sbridge->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+ if (IS_ERR(sbridge->vdd)) {
+ int ret = PTR_ERR(sbridge->vdd);
+ if (ret == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ sbridge->vdd = NULL;
+ dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
+ }
+
+ sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sbridge->enable)) {
+ if (PTR_ERR(sbridge->enable) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to retrieve enable GPIO\n");
+ return PTR_ERR(sbridge->enable);
+ }
+
+ sbridge->ddc = simple_bridge_retrieve_ddc(&pdev->dev);
+ if (IS_ERR(sbridge->ddc)) {
+ if (PTR_ERR(sbridge->ddc) == -ENODEV) {
+ dev_dbg(&pdev->dev,
+ "No i2c bus specified. Disabling EDID readout\n");
+ sbridge->ddc = NULL;
+ } else {
+ dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
+ return PTR_ERR(sbridge->ddc);
+ }
+ }
+
+ sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
+ sbridge->bridge.of_node = pdev->dev.of_node;
+ sbridge->bridge.timings = sbridge->info->timings;
+
+ drm_bridge_add(&sbridge->bridge);
+
+ return 0;
+}
+
+static int simple_bridge_remove(struct platform_device *pdev)
+{
+ struct simple_bridge *sbridge = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&sbridge->bridge);
+
+ if (sbridge->ddc)
+ i2c_put_adapter(sbridge->ddc);
+
+ return 0;
+}
+
+/*
+ * We assume the ADV7123 DAC is the "default" for historical reasons
+ * Information taken from the ADV7123 datasheet, revision D.
+ * NOTE: the ADV7123EP seems to have other timings and need a new timings
+ * set if used.
+ */
+static const struct drm_bridge_timings default_bridge_timings = {
+ /* Timing specifications, datasheet page 7 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ .setup_time_ps = 500,
+ .hold_time_ps = 1500,
+};
+
+/*
+ * Information taken from the THS8134, THS8134A, THS8134B datasheet named
+ * "SLVS205D", dated May 1990, revised March 2000.
+ */
+static const struct drm_bridge_timings ti_ths8134_bridge_timings = {
+ /* From timing diagram, datasheet page 9 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ /* From datasheet, page 12 */
+ .setup_time_ps = 3000,
+ /* I guess this means latched input */
+ .hold_time_ps = 0,
+};
+
+/*
+ * Information taken from the THS8135 datasheet named "SLAS343B", dated
+ * May 2001, revised April 2013.
+ */
+static const struct drm_bridge_timings ti_ths8135_bridge_timings = {
+ /* From timing diagram, datasheet page 14 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ /* From datasheet, page 16 */
+ .setup_time_ps = 2000,
+ .hold_time_ps = 500,
+};
+
+static const struct of_device_id simple_bridge_match[] = {
+ {
+ .compatible = "dumb-vga-dac",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "adi,adv7123",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &default_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "ti,opa362",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_Composite,
+ },
+ }, {
+ .compatible = "ti,ths8135",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &ti_ths8135_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "ti,ths8134",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &ti_ths8134_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, simple_bridge_match);
+
+static struct platform_driver simple_bridge_driver = {
+ .probe = simple_bridge_probe,
+ .remove = simple_bridge_remove,
+ .driver = {
+ .name = "simple-bridge",
+ .of_match_table = simple_bridge_match,
+ },
+};
+module_platform_driver(simple_bridge_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Simple DRM bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 24965e53d351..383b1073d7de 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1820,13 +1820,32 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
unsigned int vdisplay, hdisplay;
- vmode->mtmdsclock = vmode->mpixelclock = mode->clock * 1000;
+ vmode->mpixelclock = mode->clock * 1000;
dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
+ vmode->mtmdsclock = vmode->mpixelclock;
+
+ if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
+ switch (hdmi_bus_fmt_color_depth(
+ hdmi->hdmi_data.enc_out_bus_format)) {
+ case 16:
+ vmode->mtmdsclock = vmode->mpixelclock * 2;
+ break;
+ case 12:
+ vmode->mtmdsclock = vmode->mpixelclock * 3 / 2;
+ break;
+ case 10:
+ vmode->mtmdsclock = vmode->mpixelclock * 5 / 4;
+ break;
+ }
+ }
+
if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
vmode->mtmdsclock /= 2;
+ dev_dbg(hdmi->dev, "final tmdsclock = %d\n", vmode->mtmdsclock);
+
/* Set up HDMI_FC_INVIDCONF */
inv_val = (hdmi->hdmi_data.hdcp_enable ||
(dw_hdmi_support_scdc(hdmi) &&
@@ -2084,11 +2103,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
- /* TOFIX: Get input format from plat data or fallback to RGB888 */
if (hdmi->plat_data->input_bus_format)
hdmi->hdmi_data.enc_in_bus_format =
hdmi->plat_data->input_bus_format;
- else
+ else if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED)
hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
/* TOFIX: Get input encoding from plat data or fallback to none */
@@ -2098,8 +2116,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
else
hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
- /* TOFIX: Default to RGB888 output format */
- hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
+ hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
@@ -2377,7 +2395,279 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs =
.atomic_check = dw_hdmi_connector_atomic_check,
};
-static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
+/*
+ * Possible output formats :
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48,
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_YUV16_1X48,
+ * - MEDIA_BUS_FMT_RGB161616_1X48,
+ * - MEDIA_BUS_FMT_UYVY12_1X24,
+ * - MEDIA_BUS_FMT_YUV12_1X36,
+ * - MEDIA_BUS_FMT_RGB121212_1X36,
+ * - MEDIA_BUS_FMT_UYVY10_1X20,
+ * - MEDIA_BUS_FMT_YUV10_1X30,
+ * - MEDIA_BUS_FMT_RGB101010_1X30,
+ * - MEDIA_BUS_FMT_UYVY8_1X16,
+ * - MEDIA_BUS_FMT_YUV8_1X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
+ */
+
+/* Can return a maximum of 11 possible output formats for a mode/connector */
+#define MAX_OUTPUT_SEL_FORMATS 11
+
+static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_display_info *info = &conn->display_info;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ u8 max_bpc = conn_state->max_requested_bpc;
+ bool is_hdmi2_sink = info->hdmi.scdc.supported ||
+ (info->color_formats & DRM_COLOR_FORMAT_YCRCB420);
+ u32 *output_fmts;
+ unsigned int i = 0;
+
+ *num_output_fmts = 0;
+
+ output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts),
+ GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ /* If dw-hdmi is the only bridge, avoid negociating with ourselves */
+ if (list_is_singular(&bridge->encoder->bridge_chain)) {
+ *num_output_fmts = 1;
+ output_fmts[0] = MEDIA_BUS_FMT_FIXED;
+
+ return output_fmts;
+ }
+
+ /*
+ * If the current mode enforces 4:2:0, force the output but format
+ * to 4:2:0 and do not add the YUV422/444/RGB formats
+ */
+ if (conn->ycbcr_420_allowed &&
+ (drm_mode_is_420_only(info, mode) ||
+ (is_hdmi2_sink && drm_mode_is_420_also(info, mode)))) {
+
+ /* Order bus formats from 16bit to 8bit if supported */
+ if (max_bpc >= 16 && info->bpc == 16 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY16_0_5X48;
+
+ if (max_bpc >= 12 && info->bpc >= 12 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY12_0_5X36;
+
+ if (max_bpc >= 10 && info->bpc >= 10 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY10_0_5X30;
+
+ /* Default 8bit fallback */
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
+
+ *num_output_fmts = i;
+
+ return output_fmts;
+ }
+
+ /*
+ * Order bus formats from 16bit to 8bit and from YUV422 to RGB
+ * if supported. In any case the default RGB888 format is added
+ */
+
+ if (max_bpc >= 16 && info->bpc == 16) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ }
+
+ if (max_bpc >= 12 && info->bpc >= 12) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ }
+
+ if (max_bpc >= 10 && info->bpc >= 10) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ }
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+
+ /* Default 8bit RGB fallback */
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+
+ *num_output_fmts = i;
+
+ return output_fmts;
+}
+
+/*
+ * Possible input formats :
+ * - MEDIA_BUS_FMT_RGB888_1X24
+ * - MEDIA_BUS_FMT_YUV8_1X24
+ * - MEDIA_BUS_FMT_UYVY8_1X16
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24
+ * - MEDIA_BUS_FMT_RGB101010_1X30
+ * - MEDIA_BUS_FMT_YUV10_1X30
+ * - MEDIA_BUS_FMT_UYVY10_1X20
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30
+ * - MEDIA_BUS_FMT_RGB121212_1X36
+ * - MEDIA_BUS_FMT_YUV12_1X36
+ * - MEDIA_BUS_FMT_UYVY12_1X24
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36
+ * - MEDIA_BUS_FMT_RGB161616_1X48
+ * - MEDIA_BUS_FMT_YUV16_1X48
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48
+ */
+
+/* Can return a maximum of 3 possible input formats for an output format */
+#define MAX_INPUT_SEL_FORMATS 3
+
+static u32 *dw_hdmi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+ unsigned int i = 0;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ switch (output_fmt) {
+ /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
+ case MEDIA_BUS_FMT_FIXED:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ /* 8bit */
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ break;
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+
+ /* 10bit */
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ break;
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ break;
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ break;
+
+ /* 12bit */
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ break;
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ break;
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ break;
+
+ /* 16bit */
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+ break;
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ break;
+
+ /*YUV 4:2:0 */
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ input_fmts[i++] = output_fmt;
+ break;
+ }
+
+ *num_input_fmts = i;
+
+ if (*num_input_fmts == 0) {
+ kfree(input_fmts);
+ input_fmts = NULL;
+ }
+
+ return input_fmts;
+}
+
+static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ hdmi->hdmi_data.enc_out_bus_format =
+ bridge_state->output_bus_cfg.format;
+
+ hdmi->hdmi_data.enc_in_bus_format =
+ bridge_state->input_bus_cfg.format;
+
+ dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n",
+ bridge_state->input_bus_cfg.format,
+ bridge_state->output_bus_cfg.format);
+
+ return 0;
+}
+
+static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
struct drm_encoder *encoder = bridge->encoder;
@@ -2385,6 +2675,11 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
struct cec_connector_info conn_info;
struct cec_notifier *notifier;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
connector->interlace_allowed = 1;
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2395,6 +2690,14 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
+ /*
+ * drm_connector_attach_max_bpc_property() requires the
+ * connector to have a state.
+ */
+ drm_atomic_helper_connector_reset(connector);
+
+ drm_connector_attach_max_bpc_property(connector, 8, 16);
+
if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
drm_object_attach_property(&connector->base,
connector->dev->mode_config.hdr_output_metadata_property, 0);
@@ -2479,8 +2782,14 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
}
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.attach = dw_hdmi_bridge_attach,
.detach = dw_hdmi_bridge_detach,
+ .atomic_check = dw_hdmi_bridge_atomic_check,
+ .atomic_get_output_bus_fmts = dw_hdmi_bridge_atomic_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = dw_hdmi_bridge_atomic_get_input_bus_fmts,
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
@@ -2949,6 +3258,12 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
+ if (hdmi->version >= 0x200a)
+ hdmi->connector.ycbcr_420_allowed =
+ hdmi->plat_data->ycbcr_420_allowed;
+ else
+ hdmi->connector.ycbcr_420_allowed = false;
+
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
@@ -3082,7 +3397,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
if (IS_ERR(hdmi))
return hdmi;
- ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0);
if (ret) {
dw_hdmi_remove(hdmi);
DRM_ERROR("Failed to initialize bridge with drm\n");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b18351b6760a..5ef0f154aa7b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -824,7 +824,8 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
* This needs to be fixed in the drm_bridge framework and the API
* needs to be updated to manage our own call chains...
*/
- dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+ if (dsi->panel_bridge->funcs->post_disable)
+ dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
if (phy_ops->power_off)
phy_ops->power_off(dsi->plat_data->priv_data);
@@ -935,7 +936,8 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return mode_status;
}
-static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
+static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -948,7 +950,8 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ flags);
}
static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
@@ -1119,7 +1122,7 @@ int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder)
{
int ret;
- ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 96207fcfde19..5ac1430fab04 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -349,12 +349,18 @@ static void tc358764_enable(struct drm_bridge *bridge)
dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
-static int tc358764_attach(struct drm_bridge *bridge)
+static int tc358764_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(drm, &ctx->connector,
&tc358764_connector_funcs,
@@ -369,7 +375,6 @@ static int tc358764_attach(struct drm_bridge *bridge)
drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
drm_panel_attach(ctx->panel, &ctx->connector);
ctx->connector.funcs->reset(&ctx->connector);
- drm_fb_helper_add_one_connector(drm->fb_helper, &ctx->connector);
drm_connector_register(&ctx->connector);
return 0;
@@ -378,10 +383,8 @@ static int tc358764_attach(struct drm_bridge *bridge)
static void tc358764_detach(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- struct drm_device *drm = bridge->dev;
drm_connector_unregister(&ctx->connector);
- drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
drm_panel_detach(ctx->panel);
ctx->panel = NULL;
drm_connector_put(&ctx->connector);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index fbdb42d4e772..e4c0ea03ae3a 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -31,6 +31,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/* Registers */
@@ -1403,13 +1404,19 @@ static const struct drm_connector_funcs tc_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int tc_bridge_attach(struct drm_bridge *bridge)
+static int tc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
struct tc_data *tc = bridge_to_tc(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
/* Create DP/eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
new file mode 100644
index 000000000000..1b39e8d37834
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+/* Global (16-bit addressable) */
+#define TC358768_CHIPID 0x0000
+#define TC358768_SYSCTL 0x0002
+#define TC358768_CONFCTL 0x0004
+#define TC358768_VSDLY 0x0006
+#define TC358768_DATAFMT 0x0008
+#define TC358768_GPIOEN 0x000E
+#define TC358768_GPIODIR 0x0010
+#define TC358768_GPIOIN 0x0012
+#define TC358768_GPIOOUT 0x0014
+#define TC358768_PLLCTL0 0x0016
+#define TC358768_PLLCTL1 0x0018
+#define TC358768_CMDBYTE 0x0022
+#define TC358768_PP_MISC 0x0032
+#define TC358768_DSITX_DT 0x0050
+#define TC358768_FIFOSTATUS 0x00F8
+
+/* Debug (16-bit addressable) */
+#define TC358768_VBUFCTRL 0x00E0
+#define TC358768_DBG_WIDTH 0x00E2
+#define TC358768_DBG_VBLANK 0x00E4
+#define TC358768_DBG_DATA 0x00E8
+
+/* TX PHY (32-bit addressable) */
+#define TC358768_CLW_DPHYCONTTX 0x0100
+#define TC358768_D0W_DPHYCONTTX 0x0104
+#define TC358768_D1W_DPHYCONTTX 0x0108
+#define TC358768_D2W_DPHYCONTTX 0x010C
+#define TC358768_D3W_DPHYCONTTX 0x0110
+#define TC358768_CLW_CNTRL 0x0140
+#define TC358768_D0W_CNTRL 0x0144
+#define TC358768_D1W_CNTRL 0x0148
+#define TC358768_D2W_CNTRL 0x014C
+#define TC358768_D3W_CNTRL 0x0150
+
+/* TX PPI (32-bit addressable) */
+#define TC358768_STARTCNTRL 0x0204
+#define TC358768_DSITXSTATUS 0x0208
+#define TC358768_LINEINITCNT 0x0210
+#define TC358768_LPTXTIMECNT 0x0214
+#define TC358768_TCLK_HEADERCNT 0x0218
+#define TC358768_TCLK_TRAILCNT 0x021C
+#define TC358768_THS_HEADERCNT 0x0220
+#define TC358768_TWAKEUP 0x0224
+#define TC358768_TCLK_POSTCNT 0x0228
+#define TC358768_THS_TRAILCNT 0x022C
+#define TC358768_HSTXVREGCNT 0x0230
+#define TC358768_HSTXVREGEN 0x0234
+#define TC358768_TXOPTIONCNTRL 0x0238
+#define TC358768_BTACNTRL1 0x023C
+
+/* TX CTRL (32-bit addressable) */
+#define TC358768_DSI_CONTROL 0x040C
+#define TC358768_DSI_STATUS 0x0410
+#define TC358768_DSI_INT 0x0414
+#define TC358768_DSI_INT_ENA 0x0418
+#define TC358768_DSICMD_RDFIFO 0x0430
+#define TC358768_DSI_ACKERR 0x0434
+#define TC358768_DSI_ACKERR_INTENA 0x0438
+#define TC358768_DSI_ACKERR_HALT 0x043c
+#define TC358768_DSI_RXERR 0x0440
+#define TC358768_DSI_RXERR_INTENA 0x0444
+#define TC358768_DSI_RXERR_HALT 0x0448
+#define TC358768_DSI_ERR 0x044C
+#define TC358768_DSI_ERR_INTENA 0x0450
+#define TC358768_DSI_ERR_HALT 0x0454
+#define TC358768_DSI_CONFW 0x0500
+#define TC358768_DSI_LPCMD 0x0500
+#define TC358768_DSI_RESET 0x0504
+#define TC358768_DSI_INT_CLR 0x050C
+#define TC358768_DSI_START 0x0518
+
+/* DSITX CTRL (16-bit addressable) */
+#define TC358768_DSICMD_TX 0x0600
+#define TC358768_DSICMD_TYPE 0x0602
+#define TC358768_DSICMD_WC 0x0604
+#define TC358768_DSICMD_WD0 0x0610
+#define TC358768_DSICMD_WD1 0x0612
+#define TC358768_DSICMD_WD2 0x0614
+#define TC358768_DSICMD_WD3 0x0616
+#define TC358768_DSI_EVENT 0x0620
+#define TC358768_DSI_VSW 0x0622
+#define TC358768_DSI_VBPR 0x0624
+#define TC358768_DSI_VACT 0x0626
+#define TC358768_DSI_HSW 0x0628
+#define TC358768_DSI_HBPR 0x062A
+#define TC358768_DSI_HACT 0x062C
+
+/* TC358768_DSI_CONTROL (0x040C) register */
+#define TC358768_DSI_CONTROL_DIS_MODE BIT(15)
+#define TC358768_DSI_CONTROL_TXMD BIT(7)
+#define TC358768_DSI_CONTROL_HSCKMD BIT(5)
+#define TC358768_DSI_CONTROL_EOTDIS BIT(0)
+
+/* TC358768_DSI_CONFW (0x0500) register */
+#define TC358768_DSI_CONFW_MODE_SET (5 << 29)
+#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
+#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
+
+static const char * const tc358768_supplies[] = {
+ "vddc", "vddmipi", "vddio"
+};
+
+struct tc358768_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+};
+
+struct tc358768_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(tc358768_supplies)];
+ struct clk *refclk;
+ int enabled;
+ int error;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct tc358768_dsi_output output;
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
+
+ /* Parameters for PLL programming */
+ u32 fbd; /* PLL feedback divider */
+ u32 prd; /* PLL input divider */
+ u32 frs; /* PLL Freqency range for HSCK (post divider) */
+
+ u32 dsiclk; /* pll_clk / 2 */
+};
+
+static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
+ *host)
+{
+ return container_of(host, struct tc358768_priv, dsi_host);
+}
+
+static inline struct tc358768_priv *bridge_to_tc358768(struct drm_bridge
+ *bridge)
+{
+ return container_of(bridge, struct tc358768_priv, bridge);
+}
+
+static int tc358768_clear_error(struct tc358768_priv *priv)
+{
+ int ret = priv->error;
+
+ priv->error = 0;
+ return ret;
+}
+
+static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
+{
+ size_t count = 2;
+
+ if (priv->error)
+ return;
+
+ /* 16-bit register? */
+ if (reg < 0x100 || reg >= 0x600)
+ count = 1;
+
+ priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+}
+
+static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
+{
+ size_t count = 2;
+
+ if (priv->error)
+ return;
+
+ /* 16-bit register? */
+ if (reg < 0x100 || reg >= 0x600) {
+ *val = 0;
+ count = 1;
+ }
+
+ priv->error = regmap_bulk_read(priv->regmap, reg, val, count);
+}
+
+static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ u32 val)
+{
+ u32 tmp, orig;
+
+ tc358768_read(priv, reg, &orig);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ if (tmp != orig)
+ tc358768_write(priv, reg, tmp);
+}
+
+static int tc358768_sw_reset(struct tc358768_priv *priv)
+{
+ /* Assert Reset */
+ tc358768_write(priv, TC358768_SYSCTL, 1);
+ /* Release Reset, Exit Sleep */
+ tc358768_write(priv, TC358768_SYSCTL, 0);
+
+ return tc358768_clear_error(priv);
+}
+
+static void tc358768_hw_enable(struct tc358768_priv *priv)
+{
+ int ret;
+
+ if (priv->enabled)
+ return;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+ if (priv->reset_gpio)
+ usleep_range(200, 300);
+
+ /*
+ * The RESX is active low (GPIO_ACTIVE_LOW).
+ * DEASSERT (value = 0) the reset_gpio to enable the chip
+ */
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+
+ /* wait for encoder clocks to stabilize */
+ usleep_range(1000, 2000);
+
+ priv->enabled = true;
+}
+
+static void tc358768_hw_disable(struct tc358768_priv *priv)
+{
+ int ret;
+
+ if (!priv->enabled)
+ return;
+
+ /*
+ * The RESX is active low (GPIO_ACTIVE_LOW).
+ * ASSERT (value = 1) the reset_gpio to disable the chip
+ */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+ priv->enabled = false;
+}
+
+static u32 tc358768_pll_to_pclk(struct tc358768_priv *priv, u32 pll_clk)
+{
+ return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->pd_lines);
+}
+
+static u32 tc358768_pclk_to_pll(struct tc358768_priv *priv, u32 pclk)
+{
+ return (u32)div_u64((u64)pclk * priv->pd_lines, priv->dsi_lanes);
+}
+
+static int tc358768_calc_pll(struct tc358768_priv *priv,
+ const struct drm_display_mode *mode,
+ bool verify_only)
+{
+ const u32 frs_limits[] = {
+ 1000000000,
+ 500000000,
+ 250000000,
+ 125000000,
+ 62500000
+ };
+ unsigned long refclk;
+ u32 prd, target_pll, i, max_pll, min_pll;
+ u32 frs, best_diff, best_pll, best_prd, best_fbd;
+
+ target_pll = tc358768_pclk_to_pll(priv, mode->clock * 1000);
+
+ /* pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)] */
+
+ for (i = 0; i < ARRAY_SIZE(frs_limits); i++)
+ if (target_pll >= frs_limits[i])
+ break;
+
+ if (i == ARRAY_SIZE(frs_limits) || i == 0)
+ return -EINVAL;
+
+ frs = i - 1;
+ max_pll = frs_limits[i - 1];
+ min_pll = frs_limits[i];
+
+ refclk = clk_get_rate(priv->refclk);
+
+ best_diff = UINT_MAX;
+ best_pll = 0;
+ best_prd = 0;
+ best_fbd = 0;
+
+ for (prd = 0; prd < 16; ++prd) {
+ u32 divisor = (prd + 1) * (1 << frs);
+ u32 fbd;
+
+ for (fbd = 0; fbd < 512; ++fbd) {
+ u32 pll, diff;
+
+ pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
+
+ if (pll >= max_pll || pll < min_pll)
+ continue;
+
+ diff = max(pll, target_pll) - min(pll, target_pll);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_pll = pll;
+ best_prd = prd;
+ best_fbd = fbd;
+
+ if (best_diff == 0)
+ goto found;
+ }
+ }
+ }
+
+ if (best_diff == UINT_MAX) {
+ dev_err(priv->dev, "could not find suitable PLL setup\n");
+ return -EINVAL;
+ }
+
+found:
+ if (verify_only)
+ return 0;
+
+ priv->fbd = best_fbd;
+ priv->prd = best_prd;
+ priv->frs = frs;
+ priv->dsiclk = best_pll / 2;
+
+ return 0;
+}
+
+static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct device_node *ep;
+ int ret;
+
+ if (dev->lanes > 4) {
+ dev_err(priv->dev, "unsupported number of data lanes(%u)\n",
+ dev->lanes);
+ return -EINVAL;
+ }
+
+ /*
+ * tc358768 supports both Video and Pulse mode, but the driver only
+ * implements Video (event) mode currently
+ */
+ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * tc358768 supports RGB888, RGB666, RGB666_PACKED and RGB565, but only
+ * RGB888 is verified.
+ */
+ if (dev->format != MIPI_DSI_FMT_RGB888) {
+ dev_warn(priv->dev, "Only MIPI_DSI_FMT_RGB888 tested!\n");
+ return -ENOTSUPP;
+ }
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel,
+ &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ priv->output.dev = dev;
+ priv->output.bridge = bridge;
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+ ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+ if (ep) {
+ ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
+
+ of_node_put(ep);
+ }
+
+ if (ret)
+ priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ drm_bridge_add(&priv->bridge);
+
+ return 0;
+}
+
+static int tc358768_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+
+ drm_bridge_remove(&priv->bridge);
+ if (priv->output.panel)
+ drm_panel_bridge_remove(priv->output.bridge);
+
+ return 0;
+}
+
+static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+ struct mipi_dsi_packet packet;
+ int ret;
+
+ if (!priv->enabled) {
+ dev_err(priv->dev, "Bridge is not enabled\n");
+ return -ENODEV;
+ }
+
+ if (msg->rx_len) {
+ dev_warn(priv->dev, "MIPI rx is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (msg->tx_len > 8) {
+ dev_warn(priv->dev, "Maximum 8 byte MIPI tx is supported\n");
+ return -ENOTSUPP;
+ }
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret)
+ return ret;
+
+ if (mipi_dsi_packet_format_is_short(msg->type)) {
+ tc358768_write(priv, TC358768_DSICMD_TYPE,
+ (0x10 << 8) | (packet.header[0] & 0x3f));
+ tc358768_write(priv, TC358768_DSICMD_WC, 0);
+ tc358768_write(priv, TC358768_DSICMD_WD0,
+ (packet.header[2] << 8) | packet.header[1]);
+ } else {
+ int i;
+
+ tc358768_write(priv, TC358768_DSICMD_TYPE,
+ (0x40 << 8) | (packet.header[0] & 0x3f));
+ tc358768_write(priv, TC358768_DSICMD_WC, packet.payload_length);
+ for (i = 0; i < packet.payload_length; i += 2) {
+ u16 val = packet.payload[i];
+
+ if (i + 1 < packet.payload_length)
+ val |= packet.payload[i + 1] << 8;
+
+ tc358768_write(priv, TC358768_DSICMD_WD0 + i, val);
+ }
+ }
+
+ /* start transfer */
+ tc358768_write(priv, TC358768_DSICMD_TX, 1);
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
+ dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+ else
+ ret = packet.size;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
+ .attach = tc358768_dsi_host_attach,
+ .detach = tc358768_dsi_host_detach,
+ .transfer = tc358768_dsi_host_transfer,
+};
+
+static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) {
+ dev_err(priv->dev, "needs atomic updates support\n");
+ return -ENOTSUPP;
+ }
+
+ return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ flags);
+}
+
+static enum drm_mode_status
+tc358768_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ if (tc358768_calc_pll(priv, mode, true))
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static void tc358768_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ int ret;
+
+ /* set FrmStop */
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(15), BIT(15));
+
+ /* wait at least for one frame */
+ msleep(50);
+
+ /* clear PP_en */
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), 0);
+
+ /* set RstPtr */
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(14), BIT(14));
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
+ dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+}
+
+static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ tc358768_hw_disable(priv);
+}
+
+static int tc358768_setup_pll(struct tc358768_priv *priv,
+ const struct drm_display_mode *mode)
+{
+ u32 fbd, prd, frs;
+ int ret;
+
+ ret = tc358768_calc_pll(priv, mode, false);
+ if (ret) {
+ dev_err(priv->dev, "PLL calculation failed: %d\n", ret);
+ return ret;
+ }
+
+ fbd = priv->fbd;
+ prd = priv->prd;
+ frs = priv->frs;
+
+ dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ clk_get_rate(priv->refclk), fbd, prd, frs);
+ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
+ priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+ mode->clock * 1000);
+
+ /* PRD[15:12] FBD[8:0] */
+ tc358768_write(priv, TC358768_PLLCTL0, (prd << 12) | fbd);
+
+ /* FRS[11:10] LBWS[9:8] CKEN[4] RESETB[1] EN[0] */
+ tc358768_write(priv, TC358768_PLLCTL1,
+ (frs << 10) | (0x2 << 8) | BIT(1) | BIT(0));
+
+ /* wait for lock */
+ usleep_range(1000, 2000);
+
+ /* FRS[11:10] LBWS[9:8] CKEN[4] PLL_CKEN[4] RESETB[1] EN[0] */
+ tc358768_write(priv, TC358768_PLLCTL1,
+ (frs << 10) | (0x2 << 8) | BIT(4) | BIT(1) | BIT(0));
+
+ return tc358768_clear_error(priv);
+}
+
+#define TC358768_PRECISION 1000
+static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
+{
+ return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
+}
+
+static u32 tc358768_to_ns(u32 nsk)
+{
+ return (nsk / TC358768_PRECISION);
+}
+
+static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ u32 val, val2, lptxcnt, hact, data_type;
+ const struct drm_display_mode *mode;
+ u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+ u32 dsiclk, dsibclk;
+ int ret, i;
+
+ tc358768_hw_enable(priv);
+
+ ret = tc358768_sw_reset(priv);
+ if (ret) {
+ dev_err(priv->dev, "Software reset failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ mode = &bridge->encoder->crtc->state->adjusted_mode;
+ ret = tc358768_setup_pll(priv, mode);
+ if (ret) {
+ dev_err(priv->dev, "PLL setup failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ dsiclk = priv->dsiclk;
+ dsibclk = dsiclk / 4;
+
+ /* Data Format Control Register */
+ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB888:
+ val |= (0x3 << 4);
+ hact = mode->hdisplay * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= (0x4 << 4);
+ hact = mode->hdisplay * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= (0x4 << 4) | BIT(3);
+ hact = mode->hdisplay * 18 / 8;
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ val |= (0x5 << 4);
+ hact = mode->hdisplay * 2;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ dev_err(priv->dev, "Invalid data format (%u)\n",
+ dsi_dev->format);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ /* VSDly[9:0] */
+ tc358768_write(priv, TC358768_VSDLY, 1);
+
+ tc358768_write(priv, TC358768_DATAFMT, val);
+ tc358768_write(priv, TC358768_DSITX_DT, data_type);
+
+ /* Enable D-PHY (HiZ->LP11) */
+ tc358768_write(priv, TC358768_CLW_CNTRL, 0x0000);
+ /* Enable lanes */
+ for (i = 0; i < dsi_dev->lanes; i++)
+ tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+
+ /* DSI Timings */
+ dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+ dsibclk);
+ dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+ ui_nsk = dsiclk_nsk / 2;
+ phy_delay_nsk = dsibclk_nsk + 2 * dsiclk_nsk;
+ dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+ dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+ dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
+ dev_dbg(priv->dev, "phy_delay_nsk: %u\n", phy_delay_nsk);
+
+ /* LP11 > 100us for D-PHY Rx Init */
+ val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+ dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_LINEINITCNT, val);
+
+ /* LPTimeCnt > 50ns */
+ val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
+ lptxcnt = val;
+ dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+
+ /* 38ns < TCLK_PREPARE < 95ns */
+ val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
+ /* TCLK_PREPARE > 300ns */
+ val2 = tc358768_ns_to_cnt(300 + tc358768_to_ns(3 * ui_nsk),
+ dsibclk_nsk);
+ val |= (val2 - tc358768_to_ns(phy_delay_nsk - dsibclk_nsk)) << 8;
+ dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+ /* TCLK_TRAIL > 60ns + 3*UI */
+ val = 60 + tc358768_to_ns(3 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 5;
+ dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+ /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+ val = 50 + tc358768_to_ns(4 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ /* THS_ZERO > 145ns + 10*UI */
+ val2 = tc358768_ns_to_cnt(145 - tc358768_to_ns(ui_nsk), dsibclk_nsk);
+ val |= (val2 - tc358768_to_ns(phy_delay_nsk)) << 8;
+ dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+ /* TWAKEUP > 1ms in lptxcnt steps */
+ val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
+ val = val / (lptxcnt + 1) - 1;
+ dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TWAKEUP, val);
+
+ /* TCLK_POSTCNT > 60ns + 52*UI */
+ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+ dsibclk_nsk) - 3;
+ dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+ /* 60ns + 4*UI < THS_PREPARE < 105ns + 12*UI */
+ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(15 * ui_nsk),
+ dsibclk_nsk) - 5;
+ dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+ val = BIT(0);
+ for (i = 0; i < dsi_dev->lanes; i++)
+ val |= BIT(i + 1);
+ tc358768_write(priv, TC358768_HSTXVREGEN, val);
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
+
+ /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+ val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+ dsibclk_nsk) - 2;
+ val |= val2 << 16;
+ dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ tc358768_write(priv, TC358768_BTACNTRL1, val);
+
+ /* START[0] */
+ tc358768_write(priv, TC358768_STARTCNTRL, 1);
+
+ /* Set event mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+ /* vsw (+ vbp) */
+ tc358768_write(priv, TC358768_DSI_VSW,
+ mode->vtotal - mode->vsync_start);
+ /* vbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_VBPR, 0);
+ /* vact */
+ tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+
+ /* (hsw + hbp) * byteclk * ndl / pclk */
+ val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+ ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+ mode->clock * 1000);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+ /* hbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_HBPR, 0);
+ /* hact (bytes) */
+ tc358768_write(priv, TC358768_DSI_HACT, hact);
+
+ /* VSYNC polarity */
+ if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
+ /* HSYNC polarity */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
+
+ /* Start DSI Tx */
+ tc358768_write(priv, TC358768_DSI_START, 0x1);
+
+ /* Configure DSI_Control register */
+ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= TC358768_DSI_CONTROL_TXMD | TC358768_DSI_CONTROL_HSCKMD |
+ 0x3 << 1 | TC358768_DSI_CONTROL_EOTDIS;
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ val = TC358768_DSI_CONFW_MODE_SET | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= (dsi_dev->lanes - 1) << 1;
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM))
+ val |= TC358768_DSI_CONTROL_TXMD;
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ val |= TC358768_DSI_CONTROL_HSCKMD;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
+ val |= TC358768_DSI_CONTROL_EOTDIS;
+
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= TC358768_DSI_CONTROL_DIS_MODE; /* DSI mode */
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+ dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+}
+
+static void tc358768_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ int ret;
+
+ if (!priv->enabled) {
+ dev_err(priv->dev, "Bridge is not enabled\n");
+ return;
+ }
+
+ /* clear FrmStop and RstPtr */
+ tc358768_update_bits(priv, TC358768_PP_MISC, 0x3 << 14, 0);
+
+ /* set PP_en */
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+ dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+}
+
+static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+ .attach = tc358768_bridge_attach,
+ .mode_valid = tc358768_bridge_mode_valid,
+ .pre_enable = tc358768_bridge_pre_enable,
+ .enable = tc358768_bridge_enable,
+ .disable = tc358768_bridge_disable,
+ .post_disable = tc358768_bridge_post_disable,
+};
+
+static const struct drm_bridge_timings default_tc358768_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static bool tc358768_is_reserved_reg(unsigned int reg)
+{
+ switch (reg) {
+ case 0x114 ... 0x13f:
+ case 0x200:
+ case 0x20c:
+ case 0x400 ... 0x408:
+ case 0x41c ... 0x42f:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tc358768_writeable_reg(struct device *dev, unsigned int reg)
+{
+ if (tc358768_is_reserved_reg(reg))
+ return false;
+
+ switch (reg) {
+ case TC358768_CHIPID:
+ case TC358768_FIFOSTATUS:
+ case TC358768_DSITXSTATUS ... (TC358768_DSITXSTATUS + 2):
+ case TC358768_DSI_CONTROL ... (TC358768_DSI_INT_ENA + 2):
+ case TC358768_DSICMD_RDFIFO ... (TC358768_DSI_ERR_HALT + 2):
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool tc358768_readable_reg(struct device *dev, unsigned int reg)
+{
+ if (tc358768_is_reserved_reg(reg))
+ return false;
+
+ switch (reg) {
+ case TC358768_STARTCNTRL:
+ case TC358768_DSI_CONFW ... (TC358768_DSI_CONFW + 2):
+ case TC358768_DSI_INT_CLR ... (TC358768_DSI_INT_CLR + 2):
+ case TC358768_DSI_START ... (TC358768_DSI_START + 2):
+ case TC358768_DBG_DATA:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config tc358768_regmap_config = {
+ .name = "tc358768",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = TC358768_DSI_HACT,
+ .cache_type = REGCACHE_NONE,
+ .writeable_reg = tc358768_writeable_reg,
+ .readable_reg = tc358768_readable_reg,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static const struct i2c_device_id tc358768_i2c_ids[] = {
+ { "tc358768", 0 },
+ { "tc358778", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);
+
+static const struct of_device_id tc358768_of_ids[] = {
+ { .compatible = "toshiba,tc358768", },
+ { .compatible = "toshiba,tc358778", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358768_of_ids);
+
+static int tc358768_get_regulators(struct tc358768_priv *priv)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(priv->supplies); ++i)
+ priv->supplies[i].supply = tc358768_supplies[i];
+
+ ret = devm_regulator_bulk_get(priv->dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "failed to get regulators: %d\n", ret);
+
+ return ret;
+}
+
+static int tc358768_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc358768_priv *priv;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->dev = dev;
+
+ ret = tc358768_get_regulators(priv);
+ if (ret)
+ return ret;
+
+ priv->refclk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(priv->refclk))
+ return PTR_ERR(priv->refclk);
+
+ /*
+ * RESX is low active, to disable tc358768 initially (keep in reset)
+ * the gpio line must be LOW. This is the ASSERTED state of
+ * GPIO_ACTIVE_LOW (GPIOD_OUT_HIGH == ASSERTED).
+ */
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ priv->regmap = devm_regmap_init_i2c(client, &tc358768_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "Failed to init regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->dsi_host.dev = dev;
+ priv->dsi_host.ops = &tc358768_dsi_host_ops;
+
+ priv->bridge.funcs = &tc358768_bridge_funcs;
+ priv->bridge.timings = &default_tc358768_timings;
+ priv->bridge.of_node = np;
+
+ i2c_set_clientdata(client, priv);
+
+ return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static int tc358768_i2c_remove(struct i2c_client *client)
+{
+ struct tc358768_priv *priv = i2c_get_clientdata(client);
+
+ mipi_dsi_host_unregister(&priv->dsi_host);
+
+ return 0;
+}
+
+static struct i2c_driver tc358768_driver = {
+ .driver = {
+ .name = "tc358768",
+ .of_match_table = tc358768_of_ids,
+ },
+ .id_table = tc358768_i2c_ids,
+ .probe = tc358768_i2c_probe,
+ .remove = tc358768_i2c_remove,
+};
+module_i2c_driver(tc358768_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("TC358768AXBG/TC358778XBG DSI bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 3d74129b2995..97d8129760e9 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -42,11 +42,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
return container_of(bridge, struct thc63_dev, bridge);
}
-static int thc63_attach(struct drm_bridge *bridge)
+static int thc63_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge);
+ return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 9a2dd986afa5..6ad688b320ae 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -51,6 +51,7 @@
#define SN_ENH_FRAME_REG 0x5A
#define VSTREAM_ENABLE BIT(3)
#define SN_DATA_FORMAT_REG 0x5B
+#define BPP_18_RGB BIT(0)
#define SN_HPD_DISABLE_REG 0x5C
#define HPD_DISABLE BIT(0)
#define SN_AUX_WDATA_REG(x) (0x64 + (x))
@@ -100,6 +101,7 @@ struct ti_sn_bridge {
struct drm_panel *panel;
struct gpio_desc *enable_gpio;
struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM];
+ int dp_lanes;
};
static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
@@ -264,7 +266,8 @@ static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
pdata->supplies);
}
-static int ti_sn_bridge_attach(struct drm_bridge *bridge)
+static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
int ret, val;
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
@@ -275,6 +278,11 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
.node = NULL,
};
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ret = drm_connector_init(bridge->dev, &pdata->connector,
&ti_sn_bridge_connector_funcs,
DRM_MODE_CONNECTOR_eDP);
@@ -312,7 +320,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
goto err_dsi_host;
}
- /* TODO: setting to 4 lanes always for now */
+ /* TODO: setting to 4 MIPI lanes always for now */
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
@@ -417,6 +425,32 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn_bridge *pdata)
REFCLK_FREQ(i));
}
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn_bridge *pdata)
+{
+ unsigned int bit_rate_mhz, clk_freq_mhz;
+ unsigned int val;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ /* set DSIA clk frequency */
+ bit_rate_mhz = (mode->clock / 1000) *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+
+ /* for each increment in val, frequency increases by 5MHz */
+ val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
+ (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
+ regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+}
+
+static unsigned int ti_sn_bridge_get_bpp(struct ti_sn_bridge *pdata)
+{
+ if (pdata->connector.display_info.bpc <= 6)
+ return 18;
+ else
+ return 24;
+}
+
/**
* LUT index corresponds to register value and
* LUT values corresponds to dp data rate supported
@@ -426,32 +460,106 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static void ti_sn_bridge_set_dsi_dp_rate(struct ti_sn_bridge *pdata)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn_bridge *pdata)
{
- unsigned int bit_rate_mhz, clk_freq_mhz, dp_rate_mhz;
- unsigned int val, i;
+ unsigned int bit_rate_khz, dp_rate_mhz;
+ unsigned int i;
struct drm_display_mode *mode =
&pdata->bridge.encoder->crtc->state->adjusted_mode;
- /* set DSIA clk frequency */
- bit_rate_mhz = (mode->clock / 1000) *
- mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
- clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+ /* Calculate minimum bit rate based on our pixel clock. */
+ bit_rate_khz = mode->clock * ti_sn_bridge_get_bpp(pdata);
- /* for each increment in val, frequency increases by 5MHz */
- val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
- (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
- regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+ /* Calculate minimum DP data rate, taking 80% as per DP spec */
+ dp_rate_mhz = DIV_ROUND_UP(bit_rate_khz * DP_CLK_FUDGE_NUM,
+ 1000 * pdata->dp_lanes * DP_CLK_FUDGE_DEN);
- /* set DP data rate */
- dp_rate_mhz = ((bit_rate_mhz / pdata->dsi->lanes) * DP_CLK_FUDGE_NUM) /
- DP_CLK_FUDGE_DEN;
- for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
+ for (i = 1; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
if (ti_sn_bridge_dp_rate_lut[i] > dp_rate_mhz)
break;
- regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
- DP_DATARATE_MASK, DP_DATARATE(i));
+ return i;
+}
+
+static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata,
+ bool rate_valid[])
+{
+ unsigned int rate_per_200khz;
+ unsigned int rate_mhz;
+ u8 dpcd_val;
+ int ret;
+ int i, j;
+
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_EDP_DPCD_REV, &dpcd_val);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read eDP rev (%d), assuming 1.1\n", ret);
+ dpcd_val = DP_EDP_11;
+ }
+
+ if (dpcd_val >= DP_EDP_14) {
+ /* eDP 1.4 devices must provide a custom table */
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ ret = drm_dp_dpcd_read(&pdata->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+
+ if (ret != sizeof(sink_rates)) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read supported rate table (%d)\n", ret);
+
+ /* By zeroing we'll fall back to DP_MAX_LINK_RATE. */
+ memset(sink_rates, 0, sizeof(sink_rates));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ rate_per_200khz = le16_to_cpu(sink_rates[i]);
+
+ if (!rate_per_200khz)
+ break;
+
+ rate_mhz = rate_per_200khz * 200 / 1000;
+ for (j = 0;
+ j < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+ j++) {
+ if (ti_sn_bridge_dp_rate_lut[j] == rate_mhz)
+ rate_valid[j] = true;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); i++) {
+ if (rate_valid[i])
+ return;
+ }
+ DRM_DEV_ERROR(pdata->dev,
+ "No matching eDP rates in table; falling back\n");
+ }
+
+ /* On older versions best we can do is use DP_MAX_LINK_RATE */
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LINK_RATE, &dpcd_val);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read max rate (%d); assuming 5.4 GHz\n",
+ ret);
+ dpcd_val = DP_LINK_BW_5_4;
+ }
+
+ switch (dpcd_val) {
+ default:
+ DRM_DEV_ERROR(pdata->dev,
+ "Unexpected max rate (%#x); assuming 5.4 GHz\n",
+ (int)dpcd_val);
+ /* fall through */
+ case DP_LINK_BW_5_4:
+ rate_valid[7] = 1;
+ /* fall through */
+ case DP_LINK_BW_2_7:
+ rate_valid[4] = 1;
+ /* fall through */
+ case DP_LINK_BW_1_62:
+ rate_valid[1] = 1;
+ break;
+ }
}
static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
@@ -493,24 +601,30 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
usleep_range(10000, 10500); /* 10ms delay recommended by spec */
}
-static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+static unsigned int ti_sn_get_max_lanes(struct ti_sn_bridge *pdata)
{
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
- unsigned int val;
+ u8 data;
int ret;
- /* DSI_A lane config */
- val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
- regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
- CHA_DSI_LANES_MASK, val);
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LANE_COUNT, &data);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read lane count (%d); assuming 4\n", ret);
+ return 4;
+ }
- /* DP lane config */
- val = DP_NUM_LANES(pdata->dsi->lanes - 1);
- regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
- val);
+ return data & DP_LANE_COUNT_MASK;
+}
- /* set dsi/dp clk frequency value */
- ti_sn_bridge_set_dsi_dp_rate(pdata);
+static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
+ const char **last_err_str)
+{
+ unsigned int val;
+ int ret;
+
+ /* set dp clk frequency value */
+ regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
+ DP_DATARATE_MASK, DP_DATARATE(dp_rate_idx));
/* enable DP PLL */
regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 1);
@@ -519,10 +633,62 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
val & DPPLL_SRC_DP_PLL_LOCK, 1000,
50 * 1000);
if (ret) {
- DRM_ERROR("DP_PLL_LOCK polling failed (%d)\n", ret);
- return;
+ *last_err_str = "DP_PLL_LOCK polling failed";
+ goto exit;
+ }
+
+ /* Semi auto link training mode */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+ val == ML_TX_MAIN_LINK_OFF ||
+ val == ML_TX_NORMAL_MODE, 1000,
+ 500 * 1000);
+ if (ret) {
+ *last_err_str = "Training complete polling failed";
+ } else if (val == ML_TX_MAIN_LINK_OFF) {
+ *last_err_str = "Link training failed, link is off";
+ ret = -EIO;
}
+exit:
+ /* Disable the PLL if we failed */
+ if (ret)
+ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0);
+
+ return ret;
+}
+
+static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ bool rate_valid[ARRAY_SIZE(ti_sn_bridge_dp_rate_lut)] = { };
+ const char *last_err_str = "No supported DP rate";
+ int dp_rate_idx;
+ unsigned int val;
+ int ret = -EINVAL;
+
+ /*
+ * Run with the maximum number of lanes that the DP sink supports.
+ *
+ * Depending use cases, we might want to revisit this later because:
+ * - It's plausible that someone may have run fewer lines to the
+ * sink than the sink actually supports, assuming that the lines
+ * will just be driven at a higher rate.
+ * - The DP spec seems to indicate that it's more important to minimize
+ * the number of lanes than the link rate.
+ *
+ * If we do revisit, it would be important to measure the power impact.
+ */
+ pdata->dp_lanes = ti_sn_get_max_lanes(pdata);
+
+ /* DSI_A lane config */
+ val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
+ regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
+ CHA_DSI_LANES_MASK, val);
+
+ /* set dsi clk frequency value */
+ ti_sn_bridge_set_dsi_rate(pdata);
+
/**
* The SN65DSI86 only supports ASSR Display Authentication method and
* this method is enabled by default. An eDP panel must support this
@@ -532,17 +698,30 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
- /* Semi auto link training mode */
- regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
- ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
- val == ML_TX_MAIN_LINK_OFF ||
- val == ML_TX_NORMAL_MODE, 1000,
- 500 * 1000);
+ /* Set the DP output format (18 bpp or 24 bpp) */
+ val = (ti_sn_bridge_get_bpp(pdata) == 18) ? BPP_18_RGB : 0;
+ regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val);
+
+ /* DP lane config */
+ val = DP_NUM_LANES(min(pdata->dp_lanes, 3));
+ regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
+ val);
+
+ ti_sn_bridge_read_valid_rates(pdata, rate_valid);
+
+ /* Train until we run out of rates */
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata);
+ dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+ dp_rate_idx++) {
+ if (!rate_valid[dp_rate_idx])
+ continue;
+
+ ret = ti_sn_link_training(pdata, dp_rate_idx, &last_err_str);
+ if (!ret)
+ break;
+ }
if (ret) {
- DRM_ERROR("Training complete polling failed (%d)\n", ret);
- return;
- } else if (val == ML_TX_MAIN_LINK_OFF) {
- DRM_ERROR("Link training failed, link is off\n");
+ DRM_DEV_ERROR(pdata->dev, "%s (%d)\n", last_err_str, ret);
return;
}
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index f195a4732e0b..e3eb6364c0f7 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -4,14 +4,12 @@
* Author: Jyri Sarha <jsarha@ti.com>
*/
-#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -24,16 +22,13 @@
struct tfp410 {
struct drm_bridge bridge;
struct drm_connector connector;
- unsigned int connector_type;
u32 bus_format;
- struct i2c_adapter *ddc;
- struct gpio_desc *hpd;
- int hpd_irq;
struct delayed_work hpd_work;
struct gpio_desc *powerdown;
struct drm_bridge_timings timings;
+ struct drm_bridge *next_bridge;
struct device *dev;
};
@@ -56,13 +51,18 @@ static int tfp410_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (!dvi->ddc)
- goto fallback;
+ edid = drm_bridge_get_edid(dvi->next_bridge, connector);
+ if (IS_ERR_OR_NULL(edid)) {
+ if (edid != ERR_PTR(-ENOTSUPP))
+ DRM_INFO("EDID read failed. Fallback to standard modes\n");
- edid = drm_get_edid(connector, dvi->ddc);
- if (!edid) {
- DRM_INFO("EDID read failed. Fallback to standard modes\n");
- goto fallback;
+ /*
+ * No EDID, fallback on the XGA standard modes and prefer a mode
+ * pretty much anything can handle.
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+ drm_set_preferred_mode(connector, 1024, 768);
+ return ret;
}
drm_connector_update_edid_property(connector, edid);
@@ -72,15 +72,6 @@ static int tfp410_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
-
-fallback:
- /* No EDID, fallback on the XGA standard modes */
- ret = drm_add_modes_noedid(connector, 1920, 1200);
-
- /* And prefer a mode pretty much anything can handle */
- drm_set_preferred_mode(connector, 1024, 768);
-
- return ret;
}
static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = {
@@ -92,21 +83,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- if (dvi->hpd) {
- if (gpiod_get_value_cansleep(dvi->hpd))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
-
- if (dvi->ddc) {
- if (drm_probe_ddc(dvi->ddc))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
-
- return connector_status_unknown;
+ return drm_bridge_detect(dvi->next_bridge);
}
static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -118,27 +95,60 @@ static const struct drm_connector_funcs tfp410_con_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int tfp410_attach(struct drm_bridge *bridge)
+static void tfp410_hpd_work_func(struct work_struct *work)
+{
+ struct tfp410 *dvi;
+
+ dvi = container_of(work, struct tfp410, hpd_work.work);
+
+ if (dvi->bridge.dev)
+ drm_helper_hpd_irq_event(dvi->bridge.dev);
+}
+
+static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
+{
+ struct tfp410 *dvi = arg;
+
+ mod_delayed_work(system_wq, &dvi->hpd_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+}
+
+static int tfp410_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
+ ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
if (!bridge->encoder) {
dev_err(dvi->dev, "Missing encoder\n");
return -ENODEV;
}
- if (dvi->hpd_irq >= 0)
+ if (dvi->next_bridge->ops & DRM_BRIDGE_OP_DETECT)
dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
else
dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ if (dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
+ drm_bridge_hpd_enable(dvi->next_bridge, tfp410_hpd_callback,
+ dvi);
+ }
+
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector,
&tfp410_con_funcs,
- dvi->connector_type,
- dvi->ddc);
+ dvi->next_bridge->type,
+ dvi->next_bridge->ddc);
if (ret) {
dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n",
ret);
@@ -148,12 +158,21 @@ static int tfp410_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector,
- bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
return 0;
}
+static void tfp410_detach(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ if (dvi->connector.dev && dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_disable(dvi->next_bridge);
+ cancel_delayed_work_sync(&dvi->hpd_work);
+ }
+}
+
static void tfp410_enable(struct drm_bridge *bridge)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
@@ -168,31 +187,25 @@ static void tfp410_disable(struct drm_bridge *bridge)
gpiod_set_value_cansleep(dvi->powerdown, 1);
}
-static const struct drm_bridge_funcs tfp410_bridge_funcs = {
- .attach = tfp410_attach,
- .enable = tfp410_enable,
- .disable = tfp410_disable,
-};
-
-static void tfp410_hpd_work_func(struct work_struct *work)
+static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
- struct tfp410 *dvi;
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
- dvi = container_of(work, struct tfp410, hpd_work.work);
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
- if (dvi->bridge.dev)
- drm_helper_hpd_irq_event(dvi->bridge.dev);
+ return MODE_OK;
}
-static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
-{
- struct tfp410 *dvi = arg;
-
- mod_delayed_work(system_wq, &dvi->hpd_work,
- msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
-
- return IRQ_HANDLED;
-}
+static const struct drm_bridge_funcs tfp410_bridge_funcs = {
+ .attach = tfp410_attach,
+ .detach = tfp410_detach,
+ .enable = tfp410_enable,
+ .disable = tfp410_disable,
+ .mode_valid = tfp410_mode_valid,
+};
static const struct drm_bridge_timings tfp410_default_timings = {
.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
@@ -271,51 +284,9 @@ static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c)
return 0;
}
-static int tfp410_get_connector_properties(struct tfp410 *dvi)
-{
- struct device_node *connector_node, *ddc_phandle;
- int ret = 0;
-
- /* port@1 is the connector node */
- connector_node = of_graph_get_remote_node(dvi->dev->of_node, 1, -1);
- if (!connector_node)
- return -ENODEV;
-
- if (of_device_is_compatible(connector_node, "hdmi-connector"))
- dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- else
- dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
- "hpd", 0, GPIOD_IN, "hpd");
- if (IS_ERR(dvi->hpd)) {
- ret = PTR_ERR(dvi->hpd);
- dvi->hpd = NULL;
- if (ret == -ENOENT)
- ret = 0;
- else
- goto fail;
- }
-
- ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
- if (!ddc_phandle)
- goto fail;
-
- dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
- if (dvi->ddc)
- dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
- else
- ret = -EPROBE_DEFER;
-
- of_node_put(ddc_phandle);
-
-fail:
- of_node_put(connector_node);
- return ret;
-}
-
static int tfp410_init(struct device *dev, bool i2c)
{
+ struct device_node *node;
struct tfp410 *dvi;
int ret;
@@ -327,21 +298,31 @@ static int tfp410_init(struct device *dev, bool i2c)
dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
if (!dvi)
return -ENOMEM;
+
+ dvi->dev = dev;
dev_set_drvdata(dev, dvi);
dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
- dvi->dev = dev;
+ dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
ret = tfp410_parse_timings(dvi, i2c);
if (ret)
- goto fail;
+ return ret;
- ret = tfp410_get_connector_properties(dvi);
- if (ret)
- goto fail;
+ /* Get the next bridge, connected to port@1. */
+ node = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!node)
+ return -ENODEV;
+
+ dvi->next_bridge = of_drm_find_bridge(node);
+ of_node_put(node);
+ if (!dvi->next_bridge)
+ return -EPROBE_DEFER;
+
+ /* Get the powerdown GPIO. */
dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
GPIOD_OUT_HIGH);
if (IS_ERR(dvi->powerdown)) {
@@ -349,48 +330,18 @@ static int tfp410_init(struct device *dev, bool i2c)
return PTR_ERR(dvi->powerdown);
}
- if (dvi->hpd)
- dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
- else
- dvi->hpd_irq = -ENXIO;
-
- if (dvi->hpd_irq >= 0) {
- INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
-
- ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
- NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "hdmi-hpd", dvi);
- if (ret) {
- DRM_ERROR("failed to register hpd interrupt\n");
- goto fail;
- }
- }
-
+ /* Register the DRM bridge. */
drm_bridge_add(&dvi->bridge);
return 0;
-fail:
- i2c_put_adapter(dvi->ddc);
- if (dvi->hpd)
- gpiod_put(dvi->hpd);
- return ret;
}
static int tfp410_fini(struct device *dev)
{
struct tfp410 *dvi = dev_get_drvdata(dev);
- if (dvi->hpd_irq >= 0)
- cancel_delayed_work_sync(&dvi->hpd_work);
-
drm_bridge_remove(&dvi->bridge);
- if (dvi->ddc)
- i2c_put_adapter(dvi->ddc);
- if (dvi->hpd)
- gpiod_put(dvi->hpd);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
new file mode 100644
index 000000000000..514cbf0eac75
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPD12S015 HDMI ESD protection & level shifter chip driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific encoder-opa362 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+
+struct tpd12s015_device {
+ struct drm_bridge bridge;
+
+ struct gpio_desc *ct_cp_hpd_gpio;
+ struct gpio_desc *ls_oe_gpio;
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+
+ struct drm_bridge *next_bridge;
+};
+
+static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tpd12s015_device, bridge);
+}
+
+static int tpd12s015_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ bridge, flags);
+ if (ret < 0)
+ return ret;
+
+ gpiod_set_value_cansleep(tpd->ls_oe_gpio, 1);
+
+ /* DC-DC converter needs at max 300us to get to 90% of 5V. */
+ usleep_range(300, 1000);
+
+ return 0;
+}
+
+static void tpd12s015_detach(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ls_oe_gpio, 0);
+}
+
+static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ if (gpiod_get_value_cansleep(tpd->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 1);
+}
+
+static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 0);
+}
+
+static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
+ .attach = tpd12s015_attach,
+ .detach = tpd12s015_detach,
+ .detect = tpd12s015_detect,
+ .hpd_enable = tpd12s015_hpd_enable,
+ .hpd_disable = tpd12s015_hpd_disable,
+};
+
+static irqreturn_t tpd12s015_hpd_isr(int irq, void *data)
+{
+ struct tpd12s015_device *tpd = data;
+ struct drm_bridge *bridge = &tpd->bridge;
+
+ drm_bridge_hpd_notify(bridge, tpd12s015_detect(bridge));
+
+ return IRQ_HANDLED;
+}
+
+static int tpd12s015_probe(struct platform_device *pdev)
+{
+ struct tpd12s015_device *tpd;
+ struct device_node *node;
+ struct gpio_desc *gpio;
+ int ret;
+
+ tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
+ if (!tpd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, tpd);
+
+ tpd->bridge.funcs = &tpd12s015_bridge_funcs;
+ tpd->bridge.of_node = pdev->dev.of_node;
+ tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
+
+ /* Get the next bridge, connected to port@1. */
+ node = of_graph_get_remote_node(pdev->dev.of_node, 1, -1);
+ if (!node)
+ return -ENODEV;
+
+ tpd->next_bridge = of_drm_find_bridge(node);
+ of_node_put(node);
+
+ if (!tpd->next_bridge)
+ return -EPROBE_DEFER;
+
+ /* Get the control and HPD GPIOs. */
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->ct_cp_hpd_gpio = gpio;
+
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->ls_oe_gpio = gpio;
+
+ gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2, GPIOD_IN);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->hpd_gpio = gpio;
+
+ /* Register the IRQ if the HPD GPIO is IRQ-capable. */
+ tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio);
+ if (tpd->hpd_irq) {
+ ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL,
+ tpd12s015_hpd_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "tpd12s015 hpd", tpd);
+ if (ret)
+ return ret;
+
+ tpd->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ }
+
+ /* Register the DRM bridge. */
+ drm_bridge_add(&tpd->bridge);
+
+ return 0;
+}
+
+static int __exit tpd12s015_remove(struct platform_device *pdev)
+{
+ struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&tpd->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id tpd12s015_of_match[] = {
+ { .compatible = "ti,tpd12s015", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
+
+static struct platform_driver tpd12s015_driver = {
+ .probe = tpd12s015_probe,
+ .remove = __exit_p(tpd12s015_remove),
+ .driver = {
+ .name = "tpd12s015",
+ .of_match_table = tpd12s015_of_match,
+ },
+};
+
+module_platform_driver(tpd12s015_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("TPD12S015 HDMI level shifter and ESD protection driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 248c9f765c45..d2ff63ce8eaf 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -38,7 +38,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
#define DRIVER_NAME "cirrus"
#define DRIVER_DESC "qemu cirrus vga"
@@ -152,9 +151,13 @@ static int cirrus_pitch(struct drm_framebuffer *fb)
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
+ int idx;
u32 addr;
u8 tmp;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return;
+
addr = offset >> 2;
wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
@@ -169,6 +172,8 @@ static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
tmp &= 0x7f;
tmp |= (addr >> 12) & 0x80;
wreg_crt(cirrus, 0x1d, tmp);
+
+ drm_dev_exit(idx);
}
static int cirrus_mode_set(struct cirrus_device *cirrus,
@@ -177,9 +182,12 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
{
int hsyncstart, hsyncend, htotal, hdispend;
int vtotal, vdispend;
- int tmp;
+ int tmp, idx;
int sr07 = 0, hdr = 0;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return -1;
+
htotal = mode->htotal / 8;
hsyncend = mode->hsync_end / 8;
hsyncstart = mode->hsync_start / 8;
@@ -265,6 +273,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
hdr = 0xc5;
break;
default:
+ drm_dev_exit(idx);
return -1;
}
@@ -293,6 +302,8 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
outb(0x20, 0x3c0);
+
+ drm_dev_exit(idx);
return 0;
}
@@ -301,10 +312,16 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
{
struct cirrus_device *cirrus = fb->dev->dev_private;
void *vmap;
+ int idx, ret;
+
+ ret = -ENODEV;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ goto out;
+ ret = -ENOMEM;
vmap = drm_gem_shmem_vmap(fb->obj[0]);
if (!vmap)
- return -ENOMEM;
+ goto out_dev_exit;
if (cirrus->cpp == fb->format->cpp[0])
drm_fb_memcpy_dstclip(cirrus->vram,
@@ -324,7 +341,12 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
WARN_ON_ONCE("cpp mismatch");
drm_gem_shmem_vunmap(fb->obj[0], vmap);
- return 0;
+ ret = 0;
+
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
+ return ret;
}
static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
@@ -434,13 +456,6 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
@@ -510,6 +525,14 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
+static void cirrus_release(struct drm_device *dev)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+
+ drm_mode_config_cleanup(dev);
+ kfree(cirrus);
+}
+
DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
@@ -523,6 +546,7 @@ static struct drm_driver cirrus_driver = {
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ .release = cirrus_release,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -606,12 +630,11 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct cirrus_device *cirrus = dev->dev_private;
- drm_dev_unregister(dev);
- drm_mode_config_cleanup(dev);
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
iounmap(cirrus->mmio);
iounmap(cirrus->vram);
drm_dev_put(dev);
- kfree(cirrus);
pci_release_regions(pdev);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index d33691512a8e..9ccfbf213d72 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -1018,6 +1019,122 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_get_bridge_state - get bridge state
+ * @state: global atomic state object
+ * @bridge: bridge to get state object for
+ *
+ * This function returns the bridge state for the given bridge, allocating it
+ * if needed. It will also grab the relevant bridge lock to make sure that the
+ * state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted.
+ */
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
+ if (IS_ERR(obj_state))
+ return ERR_CAST(obj_state);
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_bridge_state);
+
+/**
+ * drm_atomic_get_old_bridge_state - get old bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the old bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
+ if (!obj_state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
+
+/**
+ * drm_atomic_get_new_bridge_state - get new bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the new bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
+ if (!obj_state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
+
+/**
+ * drm_atomic_add_encoder_bridges - add bridges attached to an encoder
+ * @state: atomic state
+ * @encoder: DRM encoder
+ *
+ * This function adds all bridges attached to @encoder. This is needed to add
+ * bridge states to @state and make them available when
+ * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
+ * &drm_bridge_funcs.atomic_enable(),
+ * &drm_bridge_funcs.atomic_disable_post_disable() are called.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_state *bridge_state;
+ struct drm_bridge *bridge;
+
+ if (!encoder)
+ return 0;
+
+ DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n",
+ encoder->base.id, encoder->name, state);
+
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ /* Skip bridges that don't implement the atomic state hooks. */
+ if (!bridge->funcs->atomic_duplicate_state)
+ continue;
+
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ if (IS_ERR(bridge_state))
+ return PTR_ERR(bridge_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
+
+/**
* drm_atomic_add_affected_connectors - add connectors for CRTC
* @state: atomic state
* @crtc: DRM CRTC
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4511c2e07bb9..85d163f16801 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -437,12 +437,12 @@ mode_fixup(struct drm_atomic_state *state)
funcs = encoder->helper_private;
bridge = drm_bridge_chain_get_first_bridge(encoder);
- ret = drm_bridge_chain_mode_fixup(bridge,
- &new_crtc_state->mode,
- &new_crtc_state->adjusted_mode);
- if (!ret) {
- DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
- return -EINVAL;
+ ret = drm_atomic_bridge_chain_check(bridge,
+ new_crtc_state,
+ new_conn_state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
+ return ret;
}
if (funcs && funcs->atomic_check) {
@@ -583,6 +583,7 @@ mode_valid(struct drm_atomic_state *state)
* &drm_crtc_state.connectors_changed is set when a connector is added or
* removed from the CRTC. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
+ * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
@@ -649,6 +650,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return -EINVAL;
}
+
+ if (drm_dev_has_vblank(dev))
+ new_crtc_state->no_vblank = false;
+ else
+ new_crtc_state->no_vblank = true;
}
ret = handle_conflicting_encoders(state, false);
@@ -730,6 +736,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return ret;
}
+ /*
+ * Iterate over all connectors again, and add all affected bridges to
+ * the state.
+ */
+ for_each_oldnew_connector_in_state(state, connector,
+ old_connector_state,
+ new_connector_state, i) {
+ struct drm_encoder *encoder;
+
+ encoder = old_connector_state->best_encoder;
+ ret = drm_atomic_add_encoder_bridges(state, encoder);
+ if (ret)
+ return ret;
+
+ encoder = new_connector_state->best_encoder;
+ ret = drm_atomic_add_encoder_bridges(state, encoder);
+ if (ret)
+ return ret;
+ }
+
ret = mode_valid(state);
if (ret)
return ret;
@@ -2215,7 +2241,9 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* when a job is queued, and any change to the pipeline that does not touch the
* connector is leading to timeouts when calling
* drm_atomic_helper_wait_for_vblanks() or
- * drm_atomic_helper_wait_for_flip_done().
+ * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
+ * connectors, this function can also fake VBLANK events for CRTCs without
+ * VBLANK interrupt.
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@@ -3508,3 +3536,44 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
+
+/**
+ * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
+ * the input end of a bridge
+ * @bridge: bridge control structure
+ * @bridge_state: new bridge state
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ * @output_fmt: tested output bus format
+ * @num_input_fmts: will contain the size of the returned array
+ *
+ * This helper is a pluggable implementation of the
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
+ * modify the bus configuration between their input and their output. It
+ * returns an array of input formats with a single element set to @output_fmt.
+ *
+ * RETURNS:
+ * a valid format array of size @num_input_fmts, or NULL if the allocation
+ * failed
+ */
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 7cf3cf936547..8fce6a115dfe 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -551,3 +552,104 @@ void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj
memcpy(state, obj->state, sizeof(*state));
}
EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
+
+/**
+ * __drm_atomic_helper_bridge_duplicate_state() - Copy atomic bridge state
+ * @bridge: bridge object
+ * @state: atomic bridge state
+ *
+ * Copies atomic state from a bridge's current state and resets inferred values.
+ * This is useful for drivers that subclass the bridge state.
+ */
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ __drm_atomic_helper_private_obj_duplicate_state(&bridge->base,
+ &state->base);
+ state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_duplicate_state() - Duplicate a bridge state object
+ * @bridge: bridge object
+ *
+ * Allocates a new bridge state and initializes it with the current bridge
+ * state values. This helper is meant to be used as a bridge
+ * &drm_bridge_funcs.atomic_duplicate_state hook for bridges that don't
+ * subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge)
+{
+ struct drm_bridge_state *new;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (new)
+ __drm_atomic_helper_bridge_duplicate_state(bridge, new);
+
+ return new;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_destroy_state() - Destroy a bridge state object
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to destroy
+ *
+ * Destroys a bridge state previously created by
+ * &drm_atomic_helper_bridge_reset() or
+ * &drm_atomic_helper_bridge_duplicate_state(). This helper is meant to be
+ * used as a bridge &drm_bridge_funcs.atomic_destroy_state hook for bridges
+ * that don't subclass the bridge state.
+ */
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_destroy_state);
+
+/**
+ * __drm_atomic_helper_bridge_reset() - Initialize a bridge state to its
+ * default
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to initialize
+ *
+ * Initializes the bridge state to default values. This is meant to be called
+ * by the bridge &drm_bridge_funcs.atomic_reset hook for bridges that subclass
+ * the bridge state.
+ */
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ memset(state, 0, sizeof(*state));
+ state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset);
+
+/**
+ * drm_atomic_helper_bridge_reset() - Allocate and initialize a bridge state
+ * to its default
+ * @bridge: the bridge this state refers to
+ *
+ * Allocates the bridge state and initializes it to default values. This helper
+ * is meant to be used as a bridge &drm_bridge_funcs.atomic_reset hook for
+ * bridges that don't subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge)
+{
+ struct drm_bridge_state *bridge_state;
+
+ bridge_state = kzalloc(sizeof(*bridge_state), GFP_KERNEL);
+ if (!bridge_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_bridge_reset(bridge, bridge_state);
+ return bridge_state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_reset);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index cc9acd986c68..531b876d0ed8 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -153,11 +153,6 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
return -ENOMEM;
}
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, fpriv->master);
- if (ret)
- goto out_err;
- }
fpriv->is_master = 1;
fpriv->authenticated = 1;
@@ -332,9 +327,6 @@ static void drm_master_destroy(struct kref *kref)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_lease_destroy(master);
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
drm_legacy_master_rmmaps(dev, master);
idr_destroy(&master->magic_map);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c2cf0c90fa26..afdec8e5fc68 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
@@ -38,26 +39,56 @@
* encoder chain.
*
* A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge::
+ * either connected to it directly, or through a chain of bridges::
*
- * encoder ---> bridge B ---> bridge A
+ * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
*
- * Here, the output of the encoder feeds to bridge B, and that furthers feeds to
- * bridge A.
+ * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
+ * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
+ * Chaining multiple bridges to the output of a bridge, or the same bridge to
+ * the output of different bridges, is not supported.
*
- * The driver using the bridge is responsible to make the associations between
- * the encoder and bridges. Once these links are made, the bridges will
- * participate along with encoder functions to perform mode_set/enable/disable
- * through the ops provided in &drm_bridge_funcs.
+ * Display drivers are responsible for linking encoders with the first bridge
+ * in the chains. This is done by acquiring the appropriate bridge with
+ * of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
+ * panel with drm_panel_bridge_add_typed() (or the managed version
+ * devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
+ * attached to the encoder with a call to drm_bridge_attach().
*
- * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
+ * Bridges are responsible for linking themselves with the next bridge in the
+ * chain, if any. This is done the same way as for encoders, with the call to
+ * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
+ *
+ * Once these links are created, the bridges can participate along with encoder
+ * functions to perform mode validation and fixup (through
+ * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
+ * setting (through drm_bridge_chain_mode_set()), enable (through
+ * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
+ * and disable (through drm_atomic_bridge_chain_disable() and
+ * drm_atomic_bridge_chain_post_disable()). Those functions call the
+ * corresponding operations provided in &drm_bridge_funcs in sequence for all
+ * bridges in the chain.
+ *
+ * For display drivers that use the atomic helpers
+ * drm_atomic_helper_check_modeset(),
+ * drm_atomic_helper_commit_modeset_enables() and
+ * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
+ * commit check and commit tail handlers, or through the higher-level
+ * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
+ * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
+ * requires no intervention from the driver. For other drivers, the relevant
+ * DRM bridge chain functions shall be called manually.
+ *
+ * Bridges also participate in implementing the &drm_connector at the end of
+ * the bridge chain. Display drivers may use the drm_bridge_connector_init()
+ * helper to create the &drm_connector, or implement it manually on top of the
+ * connector-related operations exposed by the bridge (see the overview
+ * documentation of bridge operations for more details).
+ *
+ * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
* CRTCs, encoders or connectors and hence are not visible to userspace. They
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
- *
- * Bridges can also be chained up using the &drm_bridge.chain_node field.
- *
- * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
static DEFINE_MUTEX(bridge_lock);
@@ -70,6 +101,8 @@ static LIST_HEAD(bridge_list);
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
+ mutex_init(&bridge->hpd_mutex);
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
@@ -86,15 +119,43 @@ void drm_bridge_remove(struct drm_bridge *bridge)
mutex_lock(&bridge_lock);
list_del_init(&bridge->list);
mutex_unlock(&bridge_lock);
+
+ mutex_destroy(&bridge->hpd_mutex);
}
EXPORT_SYMBOL(drm_bridge_remove);
+static struct drm_private_state *
+drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
+{
+ struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+ struct drm_bridge_state *state;
+
+ state = bridge->funcs->atomic_duplicate_state(bridge);
+ return state ? &state->base : NULL;
+}
+
+static void
+drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
+ struct drm_private_state *s)
+{
+ struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
+ struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+
+ bridge->funcs->atomic_destroy_state(bridge, state);
+}
+
+static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
+ .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
+ .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
+};
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
* @encoder: DRM encoder
* @bridge: bridge to attach
* @previous: previous bridge in the chain (optional)
+ * @flags: DRM_BRIDGE_ATTACH_* flags
*
* Called by a kms driver to link the bridge to an encoder's chain. The previous
* argument specifies the previous bridge in the chain. If NULL, the bridge is
@@ -112,7 +173,8 @@ EXPORT_SYMBOL(drm_bridge_remove);
* Zero on success, error code on failure
*/
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
- struct drm_bridge *previous)
+ struct drm_bridge *previous,
+ enum drm_bridge_attach_flags flags)
{
int ret;
@@ -134,16 +196,36 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge);
- if (ret < 0) {
- list_del(&bridge->chain_node);
- bridge->dev = NULL;
- bridge->encoder = NULL;
- return ret;
+ ret = bridge->funcs->attach(bridge, flags);
+ if (ret < 0)
+ goto err_reset_bridge;
+ }
+
+ if (bridge->funcs->atomic_reset) {
+ struct drm_bridge_state *state;
+
+ state = bridge->funcs->atomic_reset(bridge);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ goto err_detach_bridge;
}
+
+ drm_atomic_private_obj_init(bridge->dev, &bridge->base,
+ &state->base,
+ &drm_bridge_priv_state_funcs);
}
return 0;
+
+err_detach_bridge:
+ if (bridge->funcs->detach)
+ bridge->funcs->detach(bridge);
+
+err_reset_bridge:
+ bridge->dev = NULL;
+ bridge->encoder = NULL;
+ list_del(&bridge->chain_node);
+ return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -155,6 +237,9 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
+ if (bridge->funcs->atomic_reset)
+ drm_atomic_private_obj_fini(&bridge->base);
+
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
@@ -163,14 +248,92 @@ void drm_bridge_detach(struct drm_bridge *bridge)
}
/**
- * DOC: bridge callbacks
+ * DOC: bridge operations
+ *
+ * Bridge drivers expose operations through the &drm_bridge_funcs structure.
+ * The DRM internals (atomic and CRTC helpers) use the helpers defined in
+ * drm_bridge.c to call bridge operations. Those operations are divided in
+ * three big categories to support different parts of the bridge usage.
+ *
+ * - The encoder-related operations support control of the bridges in the
+ * chain, and are roughly counterparts to the &drm_encoder_helper_funcs
+ * operations. They are used by the legacy CRTC and the atomic modeset
+ * helpers to perform mode validation, fixup and setting, and enable and
+ * disable the bridge automatically.
+ *
+ * The enable and disable operations are split in
+ * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
+ * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
+ * finer-grained control.
+ *
+ * Bridge drivers may implement the legacy version of those operations, or
+ * the atomic version (prefixed with atomic\_), in which case they shall also
+ * implement the atomic state bookkeeping operations
+ * (&drm_bridge_funcs.atomic_duplicate_state,
+ * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
+ * Mixing atomic and non-atomic versions of the operations is not supported.
+ *
+ * - The bus format negotiation operations
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
+ * negotiate the formats transmitted between bridges in the chain when
+ * multiple formats are supported. Negotiation for formats is performed
+ * transparently for display drivers by the atomic modeset helpers. Only
+ * atomic versions of those operations exist, bridge drivers that need to
+ * implement them shall thus also implement the atomic version of the
+ * encoder-related operations. This feature is not supported by the legacy
+ * CRTC helpers.
+ *
+ * - The connector-related operations support implementing a &drm_connector
+ * based on a chain of bridges. DRM bridges traditionally create a
+ * &drm_connector for bridges meant to be used at the end of the chain. This
+ * puts additional burden on bridge drivers, especially for bridges that may
+ * be used in the middle of a chain or at the end of it. Furthermore, it
+ * requires all operations of the &drm_connector to be handled by a single
+ * bridge, which doesn't always match the hardware architecture.
+ *
+ * To simplify bridge drivers and make the connector implementation more
+ * flexible, a new model allows bridges to unconditionally skip creation of
+ * &drm_connector and instead expose &drm_bridge_funcs operations to support
+ * an externally-implemented &drm_connector. Those operations are
+ * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
+ * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
+ * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
+ * implemented, display drivers shall create a &drm_connector instance for
+ * each chain of bridges, and implement those connector instances based on
+ * the bridge connector operations.
*
- * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
- * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
- * These helpers call a specific &drm_bridge_funcs op for all the bridges
- * during encoder configuration.
+ * Bridge drivers shall implement the connector-related operations for all
+ * the features that the bridge hardware support. For instance, if a bridge
+ * supports reading EDID, the &drm_bridge_funcs.get_edid shall be
+ * implemented. This however doesn't mean that the DDC lines are wired to the
+ * bridge on a particular platform, as they could also be connected to an I2C
+ * controller of the SoC. Support for the connector-related operations on the
+ * running platform is reported through the &drm_bridge.ops flags. Bridge
+ * drivers shall detect which operations they can support on the platform
+ * (usually this information is provided by ACPI or DT), and set the
+ * &drm_bridge.ops flags for all supported operations. A flag shall only be
+ * set if the corresponding &drm_bridge_funcs operation is implemented, but
+ * an implemented operation doesn't necessarily imply that the corresponding
+ * flag will be set. Display drivers shall use the &drm_bridge.ops flags to
+ * decide which bridge to delegate a connector operation to. This mechanism
+ * allows providing a single static const &drm_bridge_funcs instance in
+ * bridge drivers, improving security by storing function pointers in
+ * read-only memory.
*
- * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
+ * In order to ease transition, bridge drivers may support both the old and
+ * new models by making connector creation optional and implementing the
+ * connected-related bridge operations. Connector creation is then controlled
+ * by the flags argument to the drm_bridge_attach() function. Display drivers
+ * that support the new model and create connectors themselves shall set the
+ * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
+ * connector creation. For intermediate bridges in the chain, the flag shall
+ * be passed to the drm_bridge_attach() call for the downstream bridge.
+ * Bridge drivers that implement the new model only shall return an error
+ * from their &drm_bridge_funcs.attach handler when the
+ * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
+ * should use the new model, and convert the bridge drivers they use if
+ * needed, in order to gradually transition to the new model.
*/
/**
@@ -409,10 +572,19 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_disable)
- iter->funcs->atomic_disable(iter, old_state);
- else if (iter->funcs->disable)
+ if (iter->funcs->atomic_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ iter);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ iter->funcs->atomic_disable(iter, old_bridge_state);
+ } else if (iter->funcs->disable) {
iter->funcs->disable(iter);
+ }
if (iter == bridge)
break;
@@ -443,10 +615,20 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_post_disable)
- bridge->funcs->atomic_post_disable(bridge, old_state);
- else if (bridge->funcs->post_disable)
+ if (bridge->funcs->atomic_post_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_post_disable(bridge,
+ old_bridge_state);
+ } else if (bridge->funcs->post_disable) {
bridge->funcs->post_disable(bridge);
+ }
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
@@ -475,10 +657,19 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_pre_enable)
- iter->funcs->atomic_pre_enable(iter, old_state);
- else if (iter->funcs->pre_enable)
+ if (iter->funcs->atomic_pre_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ iter);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ iter->funcs->atomic_pre_enable(iter, old_bridge_state);
+ } else if (iter->funcs->pre_enable) {
iter->funcs->pre_enable(iter);
+ }
if (iter == bridge)
break;
@@ -508,14 +699,498 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_enable)
- bridge->funcs->atomic_enable(bridge, old_state);
- else if (bridge->funcs->enable)
+ if (bridge->funcs->atomic_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_enable(bridge, old_bridge_state);
+ } else if (bridge->funcs->enable) {
bridge->funcs->enable(bridge);
+ }
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
+static int drm_atomic_bridge_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ if (bridge->funcs->atomic_check) {
+ struct drm_bridge_state *bridge_state;
+ int ret;
+
+ bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ bridge);
+ if (WARN_ON(!bridge_state))
+ return -EINVAL;
+
+ ret = bridge->funcs->atomic_check(bridge, bridge_state,
+ crtc_state, conn_state);
+ if (ret)
+ return ret;
+ } else if (bridge->funcs->mode_fixup) {
+ if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
+ &crtc_state->adjusted_mode))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
+ struct drm_bridge *cur_bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 out_bus_fmt)
+{
+ struct drm_bridge_state *cur_state;
+ unsigned int num_in_bus_fmts, i;
+ struct drm_bridge *prev_bridge;
+ u32 *in_bus_fmts;
+ int ret;
+
+ prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
+ cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ cur_bridge);
+
+ /*
+ * If bus format negotiation is not supported by this bridge, let's
+ * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
+ * hope that it can handle this situation gracefully (by providing
+ * appropriate default values).
+ */
+ if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
+ if (cur_bridge != first_bridge) {
+ ret = select_bus_fmt_recursive(first_bridge,
+ prev_bridge, crtc_state,
+ conn_state,
+ MEDIA_BUS_FMT_FIXED);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Driver does not implement the atomic state hooks, but that's
+ * fine, as long as it does not access the bridge state.
+ */
+ if (cur_state) {
+ cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ }
+
+ return 0;
+ }
+
+ /*
+ * If the driver implements ->atomic_get_input_bus_fmts() it
+ * should also implement the atomic state hooks.
+ */
+ if (WARN_ON(!cur_state))
+ return -EINVAL;
+
+ in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
+ cur_state,
+ crtc_state,
+ conn_state,
+ out_bus_fmt,
+ &num_in_bus_fmts);
+ if (!num_in_bus_fmts)
+ return -ENOTSUPP;
+ else if (!in_bus_fmts)
+ return -ENOMEM;
+
+ if (first_bridge == cur_bridge) {
+ cur_state->input_bus_cfg.format = in_bus_fmts[0];
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ kfree(in_bus_fmts);
+ return 0;
+ }
+
+ for (i = 0; i < num_in_bus_fmts; i++) {
+ ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
+ crtc_state, conn_state,
+ in_bus_fmts[i]);
+ if (ret != -ENOTSUPP)
+ break;
+ }
+
+ if (!ret) {
+ cur_state->input_bus_cfg.format = in_bus_fmts[i];
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ }
+
+ kfree(in_bus_fmts);
+ return ret;
+}
+
+/*
+ * This function is called by &drm_atomic_bridge_chain_check() just before
+ * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
+ * It performs bus format negotiation between bridge elements. The negotiation
+ * happens in reverse order, starting from the last element in the chain up to
+ * @bridge.
+ *
+ * Negotiation starts by retrieving supported output bus formats on the last
+ * bridge element and testing them one by one. The test is recursive, meaning
+ * that for each tested output format, the whole chain will be walked backward,
+ * and each element will have to choose an input bus format that can be
+ * transcoded to the requested output format. When a bridge element does not
+ * support transcoding into a specific output format -ENOTSUPP is returned and
+ * the next bridge element will have to try a different format. If none of the
+ * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
+ *
+ * This implementation is relying on
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
+ * input/output formats.
+ *
+ * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
+ * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
+ * tries a single format: &drm_connector.display_info.bus_formats[0] if
+ * available, MEDIA_BUS_FMT_FIXED otherwise.
+ *
+ * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
+ * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
+ * bridge element that lacks this hook and asks the previous element in the
+ * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
+ * to do in that case (fail if they want to enforce bus format negotiation, or
+ * provide a reasonable default if they need to support pipelines where not
+ * all elements support bus format negotiation).
+ */
+static int
+drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_bridge_state *last_bridge_state;
+ unsigned int i, num_out_bus_fmts;
+ struct drm_bridge *last_bridge;
+ u32 *out_bus_fmts;
+ int ret = 0;
+
+ last_bridge = list_last_entry(&encoder->bridge_chain,
+ struct drm_bridge, chain_node);
+ last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ last_bridge);
+
+ if (last_bridge->funcs->atomic_get_output_bus_fmts) {
+ const struct drm_bridge_funcs *funcs = last_bridge->funcs;
+
+ /*
+ * If the driver implements ->atomic_get_output_bus_fmts() it
+ * should also implement the atomic state hooks.
+ */
+ if (WARN_ON(!last_bridge_state))
+ return -EINVAL;
+
+ out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
+ last_bridge_state,
+ crtc_state,
+ conn_state,
+ &num_out_bus_fmts);
+ if (!num_out_bus_fmts)
+ return -ENOTSUPP;
+ else if (!out_bus_fmts)
+ return -ENOMEM;
+ } else {
+ num_out_bus_fmts = 1;
+ out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
+ if (!out_bus_fmts)
+ return -ENOMEM;
+
+ if (conn->display_info.num_bus_formats &&
+ conn->display_info.bus_formats)
+ out_bus_fmts[0] = conn->display_info.bus_formats[0];
+ else
+ out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ }
+
+ for (i = 0; i < num_out_bus_fmts; i++) {
+ ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
+ conn_state, out_bus_fmts[i]);
+ if (ret != -ENOTSUPP)
+ break;
+ }
+
+ kfree(out_bus_fmts);
+
+ return ret;
+}
+
+static void
+drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
+ struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_state *bridge_state, *next_bridge_state;
+ struct drm_bridge *next_bridge;
+ u32 output_flags = 0;
+
+ bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+
+ /* No bridge state attached to this bridge => nothing to propagate. */
+ if (!bridge_state)
+ return;
+
+ next_bridge = drm_bridge_get_next_bridge(bridge);
+
+ /*
+ * Let's try to apply the most common case here, that is, propagate
+ * display_info flags for the last bridge, and propagate the input
+ * flags of the next bridge element to the output end of the current
+ * bridge when the bridge is not the last one.
+ * There are exceptions to this rule, like when signal inversion is
+ * happening at the board level, but that's something drivers can deal
+ * with from their &drm_bridge_funcs.atomic_check() implementation by
+ * simply overriding the flags value we've set here.
+ */
+ if (!next_bridge) {
+ output_flags = conn->display_info.bus_flags;
+ } else {
+ next_bridge_state = drm_atomic_get_new_bridge_state(state,
+ next_bridge);
+ /*
+ * No bridge state attached to the next bridge, just leave the
+ * flags to 0.
+ */
+ if (next_bridge_state)
+ output_flags = next_bridge_state->input_bus_cfg.flags;
+ }
+
+ bridge_state->output_bus_cfg.flags = output_flags;
+
+ /*
+ * Propage the output flags to the input end of the bridge. Again, it's
+ * not necessarily what all bridges want, but that's what most of them
+ * do, and by doing that by default we avoid forcing drivers to
+ * duplicate the "dummy propagation" logic.
+ */
+ bridge_state->input_bus_cfg.flags = output_flags;
+}
+
+/**
+ * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
+ * @bridge: bridge control structure
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ *
+ * First trigger a bus format negotiation before calling
+ * &drm_bridge_funcs.atomic_check() (falls back on
+ * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
+ * starting from the last bridge to the first. These are called before calling
+ * &drm_encoder_helper_funcs.atomic_check()
+ *
+ * RETURNS:
+ * 0 on success, a negative error code on failure
+ */
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+ int ret;
+
+ if (!bridge)
+ return 0;
+
+ ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
+ conn_state);
+ if (ret)
+ return ret;
+
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ int ret;
+
+ /*
+ * Bus flags are propagated by default. If a bridge needs to
+ * tweak the input bus flags for any reason, it should happen
+ * in its &drm_bridge_funcs.atomic_check() implementation such
+ * that preceding bridges in the chain can propagate the new
+ * bus flags.
+ */
+ drm_atomic_bridge_propagate_bus_flags(iter, conn,
+ crtc_state->state);
+
+ ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ if (iter == bridge)
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
+
+/**
+ * drm_bridge_detect - check if anything is attached to the bridge output
+ * @bridge: bridge control structure
+ *
+ * If the bridge supports output detection, as reported by the
+ * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
+ * bridge and return the connection status. Otherwise return
+ * connector_status_unknown.
+ *
+ * RETURNS:
+ * The detection status on success, or connector_status_unknown if the bridge
+ * doesn't support output detection.
+ */
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
+ return connector_status_unknown;
+
+ return bridge->funcs->detect(bridge);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_detect);
+
+/**
+ * drm_bridge_get_modes - fill all modes currently valid for the sink into the
+ * @connector
+ * @bridge: bridge control structure
+ * @connector: the connector to fill with modes
+ *
+ * If the bridge supports output modes retrieval, as reported by the
+ * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
+ * fill the connector with all valid modes and return the number of modes
+ * added. Otherwise return 0.
+ *
+ * RETURNS:
+ * The number of modes added to the connector.
+ */
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
+ return 0;
+
+ return bridge->funcs->get_modes(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
+
+/**
+ * drm_bridge_get_edid - get the EDID data of the connected display
+ * @bridge: bridge control structure
+ * @connector: the connector to read EDID for
+ *
+ * If the bridge supports output EDID retrieval, as reported by the
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
+ * get the EDID and return it. Otherwise return ERR_PTR(-ENOTSUPP).
+ *
+ * RETURNS:
+ * The retrieved EDID on success, or an error pointer otherwise.
+ */
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
+ return ERR_PTR(-ENOTSUPP);
+
+ return bridge->funcs->get_edid(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
+
+/**
+ * drm_bridge_hpd_enable - enable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ * @cb: hot-plug detection callback
+ * @data: data to be passed to the hot-plug detection callback
+ *
+ * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
+ * and @data as hot plug notification callback. From now on the @cb will be
+ * called with @data when an output status change is detected by the bridge,
+ * until hot plug notification gets disabled with drm_bridge_hpd_disable().
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ *
+ * Only one hot plug detection callback can be registered at a time, it is an
+ * error to call this function when hot plug detection is already enabled for
+ * the bridge.
+ */
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+ void (*cb)(void *data,
+ enum drm_connector_status status),
+ void *data)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+ return;
+
+ mutex_lock(&bridge->hpd_mutex);
+
+ if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
+ goto unlock;
+
+ bridge->hpd_cb = cb;
+ bridge->hpd_data = data;
+
+ if (bridge->funcs->hpd_enable)
+ bridge->funcs->hpd_enable(bridge);
+
+unlock:
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
+
+/**
+ * drm_bridge_hpd_disable - disable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ *
+ * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
+ * plug detection callback previously registered with drm_bridge_hpd_enable().
+ * Once this function returns the callback will not be called by the bridge
+ * when an output status change occurs.
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ */
+void drm_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+ return;
+
+ mutex_lock(&bridge->hpd_mutex);
+ if (bridge->funcs->hpd_disable)
+ bridge->funcs->hpd_disable(bridge);
+
+ bridge->hpd_cb = NULL;
+ bridge->hpd_data = NULL;
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
+
+/**
+ * drm_bridge_hpd_notify - notify hot plug detection events
+ * @bridge: bridge control structure
+ * @status: output connection status
+ *
+ * Bridge drivers shall call this function to report hot plug events when they
+ * detect a change in the output status, when hot plug detection has been
+ * enabled by drm_bridge_hpd_enable().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
+{
+ mutex_lock(&bridge->hpd_mutex);
+ if (bridge->hpd_cb)
+ bridge->hpd_cb(bridge->hpd_data, status);
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
+
#ifdef CONFIG_OF
/**
* of_drm_find_bridge - find the bridge corresponding to the device node in
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
new file mode 100644
index 000000000000..c6994fe673f3
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The DRM bridge connector helper object provides a DRM connector
+ * implementation that wraps a chain of &struct drm_bridge. The connector
+ * operations are fully implemented based on the operations of the bridges in
+ * the chain, and don't require any intervention from the display controller
+ * driver at runtime.
+ *
+ * To use the helper, display controller drivers create a bridge connector with
+ * a call to drm_bridge_connector_init(). This associates the newly created
+ * connector with the chain of bridges passed to the function and registers it
+ * with the DRM device. At that point the connector becomes fully usable, no
+ * further operation is needed.
+ *
+ * The DRM bridge connector operations are implemented based on the operations
+ * provided by the bridges in the chain. Each connector operation is delegated
+ * to the bridge closest to the connector (at the end of the chain) that
+ * provides the relevant functionality.
+ *
+ * To make use of this helper, all bridges in the chain shall report bridge
+ * operation flags (&drm_bridge->ops) and bridge output type
+ * (&drm_bridge->type), as well as the DRM_BRIDGE_ATTACH_NO_CONNECTOR attach
+ * flag (none of the bridges shall create a DRM connector directly).
+ */
+
+/**
+ * struct drm_bridge_connector - A connector backed by a chain of bridges
+ */
+struct drm_bridge_connector {
+ /**
+ * @base: The base DRM connector
+ */
+ struct drm_connector base;
+ /**
+ * @encoder:
+ *
+ * The encoder at the start of the bridges chain.
+ */
+ struct drm_encoder *encoder;
+ /**
+ * @bridge_edid:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * EDID read support, if any (see &DRM_BRIDGE_OP_EDID).
+ */
+ struct drm_bridge *bridge_edid;
+ /**
+ * @bridge_hpd:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * hot-plug detection notification, if any (see &DRM_BRIDGE_OP_HPD).
+ */
+ struct drm_bridge *bridge_hpd;
+ /**
+ * @bridge_detect:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector detection, if any (see &DRM_BRIDGE_OP_DETECT).
+ */
+ struct drm_bridge *bridge_detect;
+ /**
+ * @bridge_modes:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector modes detection, if any (see &DRM_BRIDGE_OP_MODES).
+ */
+ struct drm_bridge *bridge_modes;
+};
+
+#define to_drm_bridge_connector(x) \
+ container_of(x, struct drm_bridge_connector, base)
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Hot-Plug Handling
+ */
+
+static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /* Notify all bridges in the pipeline of hotplug events. */
+ drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ if (bridge->funcs->hpd_notify)
+ bridge->funcs->hpd_notify(bridge, status);
+ }
+}
+
+static void drm_bridge_connector_hpd_cb(void *cb_data,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *drm_bridge_connector = cb_data;
+ struct drm_connector *connector = &drm_bridge_connector->base;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+
+ mutex_lock(&dev->mode_config.mutex);
+ old_status = connector->status;
+ connector->status = status;
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (old_status == status)
+ return;
+
+ drm_bridge_connector_hpd_notify(connector, status);
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+/**
+ * drm_bridge_connector_enable_hpd - Enable hot-plug detection for the connector
+ * @connector: The DRM bridge connector
+ *
+ * This function enables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their resume handler.
+ */
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
+ bridge_connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_enable_hpd);
+
+/**
+ * drm_bridge_connector_disable_hpd - Disable hot-plug detection for the
+ * connector
+ * @connector: The DRM bridge connector
+ *
+ * This function disables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their suspend handler.
+ */
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_disable(hpd);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_disable_hpd);
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Functions
+ */
+
+static enum drm_connector_status
+drm_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *detect = bridge_connector->bridge_detect;
+ enum drm_connector_status status;
+
+ if (detect) {
+ status = detect->funcs->detect(detect);
+
+ drm_bridge_connector_hpd_notify(connector, status);
+ } else {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
+ status = connector_status_connected;
+ break;
+ default:
+ status = connector_status_unknown;
+ break;
+ }
+ }
+
+ return status;
+}
+
+static void drm_bridge_connector_destroy(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hpd) {
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ drm_bridge_hpd_disable(hpd);
+ }
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+
+ kfree(bridge_connector);
+}
+
+static const struct drm_connector_funcs drm_bridge_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = drm_bridge_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_bridge_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Helper Functions
+ */
+
+static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ enum drm_connector_status status;
+ struct edid *edid;
+ int n;
+
+ status = drm_bridge_connector_detect(connector, false);
+ if (status != connector_status_connected)
+ goto no_edid;
+
+ edid = bridge->funcs->get_edid(bridge, connector);
+ if (!edid || !drm_edid_is_valid(edid)) {
+ kfree(edid);
+ goto no_edid;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+ return n;
+
+no_edid:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
+static int drm_bridge_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /*
+ * If display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.
+ */
+ bridge = bridge_connector->bridge_edid;
+ if (bridge)
+ return drm_bridge_connector_get_modes_edid(connector, bridge);
+
+ /*
+ * Otherwise if the display pipeline reports modes (e.g. with a fixed
+ * resolution panel or an analog TV output), query it.
+ */
+ bridge = bridge_connector->bridge_modes;
+ if (bridge)
+ return bridge->funcs->get_modes(bridge, connector);
+
+ /*
+ * We can't retrieve modes, which can happen for instance for a DVI or
+ * VGA output with the DDC bus unconnected. The KMS core will add the
+ * default modes.
+ */
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
+ .get_modes = drm_bridge_connector_get_modes,
+ /* No need for .mode_valid(), the bridges are checked by the core. */
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Initialisation
+ */
+
+/**
+ * drm_bridge_connector_init - Initialise a connector for a chain of bridges
+ * @drm: the DRM device
+ * @encoder: the encoder where the bridge chain starts
+ *
+ * Allocate, initialise and register a &drm_bridge_connector with the @drm
+ * device. The connector is associated with a chain of bridges that starts at
+ * the @encoder. All bridges in the chain shall report bridge operation flags
+ * (&drm_bridge->ops) and bridge output type (&drm_bridge->type), and none of
+ * them may create a DRM connector directly.
+ *
+ * Returns a pointer to the new connector on success, or a negative error
+ * pointer otherwise.
+ */
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_connector *bridge_connector;
+ struct drm_connector *connector;
+ struct i2c_adapter *ddc = NULL;
+ struct drm_bridge *bridge;
+ int connector_type;
+
+ bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
+ if (!bridge_connector)
+ return ERR_PTR(-ENOMEM);
+
+ bridge_connector->encoder = encoder;
+
+ /*
+ * TODO: Handle doublescan_allowed, stereo_allowed and
+ * ycbcr_420_allowed.
+ */
+ connector = &bridge_connector->base;
+ connector->interlace_allowed = true;
+
+ /*
+ * Initialise connector status handling. First locate the furthest
+ * bridges in the pipeline that support HPD and output detection. Then
+ * initialise the connector polling mode, using HPD if available and
+ * falling back to polling if supported. If neither HPD nor output
+ * detection are available, we don't support hotplug detection at all.
+ */
+ connector_type = DRM_MODE_CONNECTOR_Unknown;
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->interlace_allowed)
+ connector->interlace_allowed = false;
+
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ bridge_connector->bridge_edid = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ bridge_connector->bridge_hpd = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ bridge_connector->bridge_detect = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ bridge_connector->bridge_modes = bridge;
+
+ if (!drm_bridge_get_next_bridge(bridge))
+ connector_type = bridge->type;
+
+ if (bridge->ddc)
+ ddc = bridge->ddc;
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown) {
+ kfree(bridge_connector);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drm_connector_init_with_ddc(drm, connector, &drm_bridge_connector_funcs,
+ connector_type, ddc);
+ drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
+
+ if (bridge_connector->bridge_hpd)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (bridge_connector->bridge_detect)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return connector;
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 8ce9d73fab4f..dcabf5698333 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -134,7 +134,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
shift, add);
}
-/**
+/*
* Core function to create a range of memory available for mapping by a
* non-root process.
*
@@ -149,7 +149,6 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
{
struct drm_local_map *map;
struct drm_map_list *list;
- drm_dma_handle_t *dmah;
unsigned long user_token;
int ret;
@@ -324,14 +323,14 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size);
- if (!dmah) {
+ map->handle = dma_alloc_coherent(&dev->pdev->dev,
+ map->size,
+ &map->offset,
+ GFP_KERNEL);
+ if (!map->handle) {
kfree(map);
return -ENOMEM;
}
- map->handle = dmah->vaddr;
- map->offset = (unsigned long)dmah->busaddr;
- kfree(dmah);
break;
default:
kfree(map);
@@ -399,7 +398,7 @@ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_legacy_findmap);
-/**
+/*
* Ioctl to specify a range of memory that is available for mapping by a
* non-root process.
*
@@ -500,7 +499,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*
@@ -513,7 +512,6 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
- drm_dma_handle_t dmah;
int found = 0;
struct drm_master *master;
@@ -554,10 +552,10 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_legacy_pci_free(dev, &dmah);
+ dma_free_coherent(&dev->pdev->dev,
+ map->size,
+ map->handle,
+ map->offset);
break;
}
kfree(map);
@@ -661,7 +659,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
return ret;
}
-/**
+/*
* Cleanup after an error on one of the addbufs() functions.
*
* \param dev DRM device.
@@ -696,7 +694,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev,
}
#if IS_ENABLED(CONFIG_AGP)
-/**
+/*
* Add AGP buffers for DMA transfers.
*
* \param dev struct drm_device to which the buffers are to be added.
@@ -1232,7 +1230,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
return 0;
}
-/**
+/*
* Add buffers for DMA transfers (ioctl).
*
* \param inode device inode.
@@ -1273,7 +1271,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
return ret;
}
-/**
+/*
* Get information about the buffer mappings.
*
* This was originally mean for debugging purposes, or by a sophisticated
@@ -1364,7 +1362,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
}
-/**
+/*
* Specifies a low and high water mark for buffer allocation
*
* \param inode device inode.
@@ -1413,7 +1411,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Unreserve the buffers in list, previously reserved using drmDMA.
*
* \param inode device inode.
@@ -1465,7 +1463,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Maps all of the DMA buffers into client-virtual space (ioctl).
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index b031b45aa8ef..6b0c6ef8b9b3 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright 2018 Noralf Trønnes
*/
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 3035584f6dc7..7443114bd713 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -1095,15 +1095,17 @@ out:
}
/**
- * drm_client_modeset_commit_force() - Force commit CRTC configuration
+ * drm_client_modeset_commit_locked() - Force commit CRTC configuration
* @client: DRM client
*
- * Commit modeset configuration to crtcs without checking if there is a DRM master.
+ * Commit modeset configuration to crtcs without checking if there is a DRM
+ * master. The assumption is that the caller already holds an internal DRM
+ * master reference acquired with drm_master_internal_acquire().
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_client_modeset_commit_force(struct drm_client_dev *client)
+int drm_client_modeset_commit_locked(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret;
@@ -1117,7 +1119,7 @@ int drm_client_modeset_commit_force(struct drm_client_dev *client)
return ret;
}
-EXPORT_SYMBOL(drm_client_modeset_commit_force);
+EXPORT_SYMBOL(drm_client_modeset_commit_locked);
/**
* drm_client_modeset_commit() - Commit CRTC configuration
@@ -1136,7 +1138,7 @@ int drm_client_modeset_commit(struct drm_client_dev *client)
if (!drm_master_internal_acquire(dev))
return -EBUSY;
- ret = drm_client_modeset_commit_force(client);
+ ret = drm_client_modeset_commit_locked(client);
drm_master_internal_release(dev);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2166000ed057..644f0ad10671 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -112,6 +112,21 @@ void drm_connector_ida_destroy(void)
}
/**
+ * drm_get_connector_type_name - return a string for connector type
+ * @type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * Returns: the name of the connector type, or NULL if the type is not valid.
+ */
+const char *drm_get_connector_type_name(unsigned int type)
+{
+ if (type < ARRAY_SIZE(drm_connector_enum_list))
+ return drm_connector_enum_list[type].name;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_get_connector_type_name);
+
+/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
*
@@ -140,6 +155,13 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
connector->force = mode->force;
}
+ if (mode->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) {
+ DRM_INFO("cmdline forces connector %s panel_orientation to %d\n",
+ connector->name, mode->panel_orientation);
+ drm_connector_set_panel_orientation(connector,
+ mode->panel_orientation);
+ }
+
DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
connector->name, mode->name,
mode->xres, mode->yres,
@@ -1139,7 +1161,8 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* coordinates, so if userspace rotates the picture to adjust for
* the orientation it must also apply the same transformation to the
* touchscreen input coordinates. This property is initialized by calling
- * drm_connector_init_panel_orientation_property().
+ * drm_connector_set_panel_orientation() or
+ * drm_connector_set_panel_orientation_with_quirk()
*
* scaling mode:
* This property defines how a non-native mode is upscaled to the native
@@ -2046,38 +2069,41 @@ void drm_connector_set_vrr_capable_property(
EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
/**
- * drm_connector_init_panel_orientation_property -
- * initialize the connecters panel_orientation property
- * @connector: connector for which to init the panel-orientation property.
- * @width: width in pixels of the panel, used for panel quirk detection
- * @height: height in pixels of the panel, used for panel quirk detection
+ * drm_connector_set_panel_orientation - sets the connecter's panel_orientation
+ * @connector: connector for which to set the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ *
+ * This function sets the connector's panel_orientation and attaches
+ * a "panel orientation" property to the connector.
*
- * This function should only be called for built-in panels, after setting
- * connector->display_info.panel_orientation first (if known).
+ * Calling this function on a connector where the panel_orientation has
+ * already been set is a no-op (e.g. the orientation has been overridden with
+ * a kernel commandline option).
*
- * This function will check for platform specific (e.g. DMI based) quirks
- * overriding display_info.panel_orientation first, then if panel_orientation
- * is not DRM_MODE_PANEL_ORIENTATION_UNKNOWN it will attach the
- * "panel orientation" property to the connector.
+ * It is allowed to call this function with a panel_orientation of
+ * DRM_MODE_PANEL_ORIENTATION_UNKNOWN, in which case it is a no-op.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_connector_init_panel_orientation_property(
- struct drm_connector *connector, int width, int height)
+int drm_connector_set_panel_orientation(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation)
{
struct drm_device *dev = connector->dev;
struct drm_display_info *info = &connector->display_info;
struct drm_property *prop;
- int orientation_quirk;
- orientation_quirk = drm_get_panel_orientation_quirk(width, height);
- if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- info->panel_orientation = orientation_quirk;
+ /* Already set? */
+ if (info->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return 0;
- if (info->panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ /* Don't attach the property if the orientation is unknown */
+ if (panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return 0;
+ info->panel_orientation = panel_orientation;
+
prop = dev->mode_config.panel_orientation_property;
if (!prop) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
@@ -2094,7 +2120,37 @@ int drm_connector_init_panel_orientation_property(
info->panel_orientation);
return 0;
}
-EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
+EXPORT_SYMBOL(drm_connector_set_panel_orientation);
+
+/**
+ * drm_connector_set_panel_orientation_with_quirk -
+ * set the connecter's panel_orientation after checking for quirks
+ * @connector: connector for which to init the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ * @width: width in pixels of the panel, used for panel quirk detection
+ * @height: height in pixels of the panel, used for panel quirk detection
+ *
+ * Like drm_connector_set_panel_orientation(), but with a check for platform
+ * specific (e.g. DMI based) quirks overriding the passed in panel_orientation.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_set_panel_orientation_with_quirk(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation,
+ int width, int height)
+{
+ int orientation_quirk;
+
+ orientation_quirk = drm_get_panel_orientation_quirk(width, height);
+ if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ panel_orientation = orientation_quirk;
+
+ return drm_connector_set_panel_orientation(connector,
+ panel_orientation);
+}
+EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk);
int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 1f802d8e5681..c99be950bf17 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -47,7 +47,7 @@ struct drm_ctx_list {
/** \name Context bitmap support */
/*@{*/
-/**
+/*
* Free a handle from the context bitmap.
*
* \param dev DRM device.
@@ -68,7 +68,7 @@ void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* Context bitmap allocation.
*
* \param dev DRM device.
@@ -88,7 +88,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
return ret;
}
-/**
+/*
* Context bitmap initialization.
*
* \param dev DRM device.
@@ -104,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
idr_init(&dev->ctx_idr);
}
-/**
+/*
* Context bitmap cleanup.
*
* \param dev DRM device.
@@ -163,7 +163,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
/** \name Per Context SAREA Support */
/*@{*/
-/**
+/*
* Get per-context SAREA.
*
* \param inode device inode.
@@ -211,7 +211,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Set per-context SAREA.
*
* \param inode device inode.
@@ -263,7 +263,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
/** \name The actual DRM context handling routines */
/*@{*/
-/**
+/*
* Switch context.
*
* \param dev DRM device.
@@ -290,7 +290,7 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
return 0;
}
-/**
+/*
* Complete context switch.
*
* \param dev DRM device.
@@ -318,7 +318,7 @@ static int drm_context_switch_complete(struct drm_device *dev,
return 0;
}
-/**
+/*
* Reserve contexts.
*
* \param inode device inode.
@@ -351,7 +351,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Add context.
*
* \param inode device inode.
@@ -404,7 +404,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Get context.
*
* \param inode device inode.
@@ -428,7 +428,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Switch context.
*
* \param inode device inode.
@@ -452,7 +452,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
-/**
+/*
* New context.
*
* \param inode device inode.
@@ -478,7 +478,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Remove context.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 93a4eec429e8..a4d36aca45ea 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -244,10 +244,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
/* Disable unused encoders */
if (encoder->crtc == NULL)
drm_encoder_disable(encoder);
- /* Disable encoders whose CRTC is about to change */
- if (encoder_funcs->get_crtc &&
- encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
- drm_encoder_disable(encoder);
}
}
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c7d5e4c21423..16f2413403aa 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -216,6 +216,8 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index eab0f2687cd6..4e673d318503 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -182,8 +182,7 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
for (i = 0; i < count; i++) {
u32 features = files[i].driver_features;
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
+ if (features && !drm_core_check_all_features(dev, features))
continue;
tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index e22b812c4b80..5d67a41f7c3a 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -372,7 +372,7 @@ void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
- debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ debugfs_create_file("control", S_IRUGO | S_IWUSR, crc_ent, crtc,
&drm_crtc_crc_control_fops);
debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
&drm_crtc_crc_data_fops);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index e45b07890c5a..a7add55a85b4 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -42,10 +42,10 @@
#include "drm_legacy.h"
/**
- * Initialize the DMA data.
+ * drm_legacy_dma_setup() - Initialize the DMA data.
*
- * \param dev DRM device.
- * \return zero on success or a negative value on failure.
+ * @dev: DRM device.
+ * Return: zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
@@ -71,9 +71,9 @@ int drm_legacy_dma_setup(struct drm_device *dev)
}
/**
- * Cleanup the DMA resources.
+ * drm_legacy_dma_takedown() - Cleanup the DMA resources.
*
- * \param dev DRM device.
+ * @dev: DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
@@ -120,10 +120,10 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
}
/**
- * Free a buffer.
+ * drm_legacy_free_buffer() - Free a buffer.
*
- * \param dev DRM device.
- * \param buf buffer to free.
+ * @dev: DRM device.
+ * @buf: buffer to free.
*
* Resets the fields of \p buf.
*/
@@ -139,9 +139,10 @@ void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
}
/**
- * Reclaim the buffers.
+ * drm_legacy_reclaim_buffers() - Reclaim the buffers.
*
- * \param file_priv DRM file private.
+ * @dev: DRM device.
+ * @file_priv: DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a5364b5192b8..c6fbe6e6bc9d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -362,6 +362,65 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
+ * drm_dp_send_real_edid_checksum() - send back real edid checksum value
+ * @aux: DisplayPort AUX channel
+ * @real_edid_checksum: real edid checksum for the last block
+ *
+ * Returns:
+ * True on success
+ */
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum)
+{
+ u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
+
+ if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req, 1) < 1) {
+ DRM_ERROR("DPCD failed read at register 0x%x\n",
+ DP_DEVICE_SERVICE_IRQ_VECTOR);
+ return false;
+ }
+ auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
+
+ if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ DRM_ERROR("DPCD failed read at register 0x%x\n",
+ DP_TEST_REQUEST);
+ return false;
+ }
+ link_edid_read &= DP_TEST_LINK_EDID_READ;
+
+ if (!auto_test_req || !link_edid_read) {
+ DRM_DEBUG_KMS("Source DUT does not support TEST_EDID_READ\n");
+ return false;
+ }
+
+ if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_DEVICE_SERVICE_IRQ_VECTOR);
+ return false;
+ }
+
+ /* send back checksum for the last edid extension block data */
+ if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
+ &real_edid_checksum, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_TEST_EDID_CHECKSUM);
+ return false;
+ }
+
+ test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
+ if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_TEST_RESPONSE);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_send_real_edid_checksum);
+
+/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
@@ -470,8 +529,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
- bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT;
+ bool branch_device = drm_dp_is_branch(dpcd);
seq_printf(m, "\tDP branch device present: %s\n",
branch_device ? "yes" : "no");
@@ -1222,6 +1280,85 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+struct edid_quirk {
+ u8 mfg_id[2];
+ u8 prod_id[2];
+ u32 quirks;
+};
+
+#define MFG(first, second) { (first), (second) }
+#define PROD_ID(first, second) { (first), (second) }
+
+/*
+ * Some devices have unreliable OUIDs where they don't set the device ID
+ * correctly, and as a result we need to use the EDID for finding additional
+ * DP quirks in such cases.
+ */
+static const struct edid_quirk edid_quirk_list[] = {
+ /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
+ * only supports DPCD backlight controls
+ */
+ { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ /*
+ * Some Dell CML 2020 systems have panels support both AUX and PWM
+ * backlight control, and some only support AUX backlight control. All
+ * said panels start up in AUX mode by default, and we don't have any
+ * support for disabling HDR mode on these panels which would be
+ * required to switch to PWM backlight control mode (plus, I'm not
+ * even sure we want PWM backlight controls over DPCD backlight
+ * controls anyway...). Until we have a better way of detecting these,
+ * force DPCD backlight mode on all of them.
+ */
+ { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+};
+
+#undef MFG
+#undef PROD_ID
+
+/**
+ * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
+ * DP-specific quirks
+ * @edid: The EDID to check
+ *
+ * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
+ * of manufacturers don't seem to like following standards and neglect to fill
+ * the dev-ID in, making it impossible to only use OUIDs for determining
+ * quirks in some cases. This function can be used to check the EDID and look
+ * up any additional DP quirks. The bits returned by this function correspond
+ * to the quirk bits in &drm_dp_quirk.
+ *
+ * Returns: a bitmask of quirks, if any. The driver can check this using
+ * drm_dp_has_quirk().
+ */
+u32 drm_dp_get_edid_quirks(const struct edid *edid)
+{
+ const struct edid_quirk *quirk;
+ u32 quirks = 0;
+ int i;
+
+ if (!edid)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+ if (memcmp(quirk->mfg_id, edid->mfg_id,
+ sizeof(edid->mfg_id)) == 0 &&
+ memcmp(quirk->prod_id, edid->prod_code,
+ sizeof(edid->prod_code)) == 0)
+ quirks |= quirk->quirks;
+ }
+
+ DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
+ (int)sizeof(edid->mfg_id), edid->mfg_id,
+ (int)sizeof(edid->prod_code), edid->prod_code, quirks);
+
+ return quirks;
+}
+EXPORT_SYMBOL(drm_dp_get_edid_quirks);
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ed0fea2ac322..70c4b7afed12 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -736,6 +736,10 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+ if (crc4 != msg->chunk[msg->curchunk_len - 1])
+ print_hex_dump(KERN_DEBUG, "wrong crc",
+ DUMP_PREFIX_NONE, 16, 1,
+ msg->chunk, msg->curchunk_len, false);
/* copy chunk into bigger msg */
memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
msg->curlen += msg->curchunk_len - 1;
@@ -1035,7 +1039,8 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
}
}
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
+ u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1045,17 +1050,14 @@ static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
req.u.dpcd_write.num_bytes = num_bytes;
req.u.dpcd_write.bytes = bytes;
drm_dp_encode_sideband_req(&req, msg);
-
- return 0;
}
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_LINK_ADDRESS;
drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
@@ -1067,7 +1069,8 @@ static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
return 0;
}
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
+ int port_num)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1078,10 +1081,11 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por
return 0;
}
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
- u8 vcpi, uint16_t pbn,
- u8 number_sdp_streams,
- u8 *sdp_stream_sink)
+static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
+ int port_num,
+ u8 vcpi, uint16_t pbn,
+ u8 number_sdp_streams,
+ u8 *sdp_stream_sink)
{
struct drm_dp_sideband_msg_req_body req;
memset(&req, 0, sizeof(req));
@@ -1094,11 +1098,10 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
number_sdp_streams);
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
- return 0;
}
-static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
- int port_num, bool power_up)
+static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+ int port_num, bool power_up)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1110,7 +1113,6 @@ static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
req.u.port_num.port_number = port_num;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
- return 0;
}
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
@@ -2061,7 +2063,7 @@ ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
* sideband messaging as drm_dp_dpcd_write() does for local
* devices via actual AUX CH.
*
- * Return: 0 on success, negative error code on failure.
+ * Return: number of bytes written on success, negative error code on failure.
*/
ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size)
@@ -2073,29 +2075,27 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
offset, size, buffer);
}
-static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
- int ret;
+ int ret = 0;
memcpy(mstb->guid, guid, 16);
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(
- mstb->mgr,
- mstb->port_parent,
- DP_GUID,
- 16,
- mstb->guid);
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
+ mstb->port_parent,
+ DP_GUID, 16, mstb->guid);
} else {
-
- ret = drm_dp_dpcd_write(
- mstb->mgr->aux,
- DP_GUID,
- mstb->guid,
- 16);
+ ret = drm_dp_dpcd_write(mstb->mgr->aux,
+ DP_GUID, mstb->guid, 16);
}
}
+
+ if (ret < 16 && ret > 0)
+ return -EPROTO;
+
+ return ret == 16 ? 0 : ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2178,7 +2178,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
drm_connector_set_tile_property(port->connector);
}
- mgr->cbs->register_connector(port->connector);
+ drm_connector_register(port->connector);
return;
error:
@@ -2641,7 +2641,8 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
return false;
}
-static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
+ u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -2650,8 +2651,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
req.u.dpcd_read.dpcd_address = offset;
req.u.dpcd_read.num_bytes = num_bytes;
drm_dp_encode_sideband_req(&req, msg);
-
- return 0;
}
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
@@ -2877,7 +2876,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_link_address_ack_reply *reply;
struct drm_dp_mst_port *port, *tmp;
- int i, len, ret, port_mask = 0;
+ int i, ret, port_mask = 0;
bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2885,7 +2884,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- len = build_link_address(txmsg);
+ build_link_address(txmsg);
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -2906,7 +2905,15 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
drm_dp_dump_link_address(reply);
- drm_dp_check_mstb_guid(mstb, reply->guid);
+ ret = drm_dp_check_mstb_guid(mstb, reply->guid);
+ if (ret) {
+ char buf[64];
+
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
+ DRM_ERROR("GUID check on %s failed: %d\n",
+ buf, ret);
+ goto out;
+ }
for (i = 0; i < reply->nports; i++) {
port_mask |= BIT(reply->ports[i].port_number);
@@ -2947,14 +2954,14 @@ void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_sideband_msg_tx *txmsg;
- int len, ret;
+ int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return;
txmsg->dst = mstb;
- len = build_clear_payload_id_table(txmsg);
+ build_clear_payload_id_table(txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -2972,7 +2979,6 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_enum_path_resources_ack_reply *path_res;
struct drm_dp_sideband_msg_tx *txmsg;
- int len;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2980,7 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- len = build_enum_path_resources(txmsg, port->port_num);
+ build_enum_path_resources(txmsg, port->port_num);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3073,7 +3079,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- int len, ret, port_num;
+ int ret, port_num;
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
@@ -3098,9 +3104,9 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
sinks[i] = i;
txmsg->dst = mstb;
- len = build_allocate_payload(txmsg, port_num,
- id,
- pbn, port->num_sdp_streams, sinks);
+ build_allocate_payload(txmsg, port_num,
+ id,
+ pbn, port->num_sdp_streams, sinks);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3129,7 +3135,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up)
{
struct drm_dp_sideband_msg_tx *txmsg;
- int len, ret;
+ int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
@@ -3142,7 +3148,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
}
txmsg->dst = port->parent;
- len = build_power_updown_phy(txmsg, port->port_num, power_up);
+ build_power_updown_phy(txmsg, port->port_num, power_up);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
@@ -3364,7 +3370,6 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
- int len;
int ret = 0;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
@@ -3379,7 +3384,7 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put;
}
- len = build_dpcd_read(txmsg, port->port_num, offset, size);
+ build_dpcd_read(txmsg, port->port_num, offset, size);
txmsg->dst = port->parent;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3417,7 +3422,6 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
- int len;
int ret;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
@@ -3432,18 +3436,15 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put;
}
- len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+ build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
txmsg->dst = mstb;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret > 0) {
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- ret = -EIO;
- else
- ret = 0;
- }
+ if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ ret = -EIO;
+
kfree(txmsg);
fail_put:
drm_dp_mst_topology_put_mstb(mstb);
@@ -3504,9 +3505,9 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
- int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
+ mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@@ -3514,6 +3515,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_state = mst_state;
/* set the device into MST mode */
if (mst_state) {
+ struct drm_dp_payload reset_pay;
+
WARN_ON(mgr->mst_primary);
/* get dpcd info */
@@ -3543,17 +3546,15 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0)
goto out_unlock;
- }
- {
- struct drm_dp_payload reset_pay;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
- }
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
queue_work(system_long_wq, &mgr->work);
@@ -3565,27 +3566,19 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
- mutex_lock(&mgr->payload_lock);
- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ memset(mgr->payloads, 0,
+ mgr->max_payloads * sizeof(mgr->payloads[0]));
+ memset(mgr->proposed_vcpis, 0,
+ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
- for (i = 0; i < mgr->max_payloads; i++) {
- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
-
- if (vcpi) {
- vcpi->vcpi = 0;
- vcpi->num_slots = 0;
- }
- mgr->proposed_vcpis[i] = NULL;
- }
mgr->vcpi_mask = 0;
- mutex_unlock(&mgr->payload_lock);
-
mgr->payload_id_table_cleared = false;
}
out_unlock:
mutex_unlock(&mgr->lock);
+ mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
return ret;
@@ -3682,7 +3675,12 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ if (ret) {
+ DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
+ goto out_fail;
+ }
/*
* For the final step of resuming the topology, we need to bring the
@@ -3709,7 +3707,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
- int replylen, origlen, curreply;
+ int replylen, curreply;
int ret;
struct drm_dp_sideband_msg_rx *msg;
int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
@@ -3729,7 +3727,6 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
- origlen = replylen;
replylen -= len;
curreply = len;
while (replylen > 0) {
@@ -4625,15 +4622,34 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
int ret;
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+ if (ret) {
+ seq_printf(m, "dpcd read failed\n");
+ goto out;
+ }
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
+
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret) {
+ seq_printf(m, "faux/mst read failed\n");
+ goto out;
+ }
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
+
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret) {
+ seq_printf(m, "mst ctrl read failed\n");
+ goto out;
+ }
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret) {
+ seq_printf(m, "branch oui read failed\n");
+ goto out;
+ }
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
+
for (i = 0x3; i < 0x8 && buf[i]; i++)
seq_printf(m, "%c", buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
@@ -4642,6 +4658,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
}
+out:
mutex_unlock(&mgr->lock);
}
@@ -4657,11 +4674,23 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
+static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+{
+ if (!port->connector)
+ return;
+
+ if (port->mgr->cbs->destroy_connector) {
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+ } else {
+ drm_connector_unregister(port->connector);
+ drm_connector_put(port->connector);
+ }
+}
+
static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- if (port->connector)
- port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+ drm_dp_destroy_connector(port);
drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
@@ -5529,7 +5558,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ if (drm_dp_has_quirk(&desc, 0,
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7c18a980cd4b..7b1a628d1f6e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -946,7 +946,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
struct drm_driver *driver = dev->driver;
int ret;
- mutex_lock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
ret = drm_minor_register(dev, DRM_MINOR_RENDER);
if (ret)
@@ -986,7 +987,8 @@ err_minors:
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
out_unlock:
- mutex_unlock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_register);
@@ -1079,17 +1081,14 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
DRM_DEBUG("\n");
- mutex_lock(&drm_global_mutex);
minor = drm_minor_acquire(iminor(inode));
- if (IS_ERR(minor)) {
- err = PTR_ERR(minor);
- goto out_unlock;
- }
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
new_fops = fops_get(minor->dev->driver->fops);
if (!new_fops) {
err = -ENODEV;
- goto out_release;
+ goto out;
}
replace_fops(filp, new_fops);
@@ -1098,10 +1097,9 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
else
err = 0;
-out_release:
+out:
drm_minor_release(minor);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 805fb004c8eb..116451101426 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1590,11 +1590,22 @@ static int validate_displayid(u8 *displayid, int length, int idx);
static int drm_edid_block_checksum(const u8 *raw_edid)
{
int i;
- u8 csum = 0;
- for (i = 0; i < EDID_LENGTH; i++)
+ u8 csum = 0, crc = 0;
+
+ for (i = 0; i < EDID_LENGTH - 1; i++)
csum += raw_edid[i];
- return csum;
+ crc = 0x100 - csum;
+
+ return crc;
+}
+
+static bool drm_edid_block_checksum_diff(const u8 *raw_edid, u8 real_checksum)
+{
+ if (raw_edid[EDID_LENGTH - 1] != real_checksum)
+ return true;
+ else
+ return false;
}
static bool drm_edid_is_zero(const u8 *in_edid, int length)
@@ -1652,7 +1663,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
}
csum = drm_edid_block_checksum(raw_edid);
- if (csum) {
+ if (drm_edid_block_checksum_diff(raw_edid, csum)) {
if (edid_corrupt)
*edid_corrupt = true;
@@ -1793,6 +1804,11 @@ static void connector_bad_edid(struct drm_connector *connector,
u8 *edid, int num_blocks)
{
int i;
+ u8 num_of_ext = edid[0x7e];
+
+ /* Calculate real checksum for the last edid extension block data */
+ connector->real_edid_checksum =
+ drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
@@ -2196,15 +2212,29 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_find_dmt);
+static bool is_display_descriptor(const u8 d[18], u8 tag)
+{
+ return d[0] == 0x00 && d[1] == 0x00 &&
+ d[2] == 0x00 && d[3] == tag;
+}
+
+static bool is_detailed_timing_descriptor(const u8 d[18])
+{
+ return d[0] != 0x00 || d[1] != 0x00;
+}
+
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
- int i, n = 0;
+ int i, n;
u8 d = ext[0x02];
u8 *det_base = ext + d;
+ if (d < 4 || d > 127)
+ return;
+
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
@@ -2254,9 +2284,12 @@ static void
is_rb(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE)
- if (r[15] & 0x10)
- *(bool *)data = true;
+
+ if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ if (r[15] & 0x10)
+ *(bool *)data = true;
}
/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
@@ -2276,7 +2309,11 @@ static void
find_gtf2(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+
+ if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ if (r[10] == 0x02)
*(u8 **)data = r;
}
@@ -2815,13 +2852,13 @@ do_inferred_modes(struct detailed_timing *timing, void *c)
struct detailed_non_pixel *data = &timing->data.other_data;
struct detailed_data_monitor_range *range = &data->data.range;
- if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
return;
closure->modes += drm_dmt_modes_for_range(closure->connector,
closure->edid,
timing);
-
+
if (!version_greater(closure->edid, 1, 1))
return; /* GTF not defined yet */
@@ -2894,10 +2931,11 @@ static void
do_established_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- if (data->type == EDID_DETAIL_EST_TIMINGS)
- closure->modes += drm_est3_modes(closure->connector, timing);
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_EST_TIMINGS))
+ return;
+
+ closure->modes += drm_est3_modes(closure->connector, timing);
}
/**
@@ -2946,19 +2984,19 @@ do_standard_modes(struct detailed_timing *timing, void *c)
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_connector *connector = closure->connector;
struct edid *edid = closure->edid;
+ int i;
- if (data->type == EDID_DETAIL_STD_MODES) {
- int i;
- for (i = 0; i < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_STD_MODES))
+ return;
- std = &data->data.timings[i];
- newmode = drm_mode_std(connector, edid, std);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- closure->modes++;
- }
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std = &data->data.timings[i];
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid, std);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
}
}
}
@@ -3053,15 +3091,16 @@ static void
do_cvt_mode(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- if (data->type == EDID_DETAIL_CVT_3BYTE)
- closure->modes += drm_cvt_modes(closure->connector, timing);
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_CVT_3BYTE))
+ return;
+
+ closure->modes += drm_cvt_modes(closure->connector, timing);
}
static int
add_cvt_modes(struct drm_connector *connector, struct edid *edid)
-{
+{
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
@@ -3083,27 +3122,28 @@ do_detailed_mode(struct detailed_timing *timing, void *c)
struct detailed_mode_closure *closure = c;
struct drm_display_mode *newmode;
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(closure->connector->dev,
- closure->edid, timing,
- closure->quirks);
- if (!newmode)
- return;
+ if (!is_detailed_timing_descriptor((const u8 *)timing))
+ return;
- if (closure->preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
- /*
- * Detailed modes are limited to 10kHz pixel clock resolution,
- * so fix up anything that looks like CEA/HDMI mode, but the clock
- * is just slightly off.
- */
- fixup_detailed_cea_mode_clock(newmode);
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(closure->connector, newmode);
- closure->modes++;
- closure->preferred = false;
- }
+ /*
+ * Detailed modes are limited to 10kHz pixel clock resolution,
+ * so fix up anything that looks like CEA/HDMI mode, but the clock
+ * is just slightly off.
+ */
+ fixup_detailed_cea_mode_clock(newmode);
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = false;
}
/*
@@ -3953,6 +3993,13 @@ cea_db_tag(const u8 *db)
static int
cea_revision(const u8 *cea)
{
+ /*
+ * FIXME is this correct for the DispID variant?
+ * The DispID spec doesn't really specify whether
+ * this is the revision of the CEA extension or
+ * the DispID CEA data block. And the only value
+ * given as an example is 0.
+ */
return cea[1];
}
@@ -3977,6 +4024,10 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
* no non-DTD data.
*/
if (cea[0] == DATA_BLOCK_CTA) {
+ /*
+ * for_each_displayid_db() has already verified
+ * that these stay within expected bounds.
+ */
*start = 3;
*end = *start + cea[2];
} else if (cea[0] == CEA_EXT) {
@@ -4282,8 +4333,10 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
static void
monitor_name(struct detailed_timing *t, void *data)
{
- if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
- *(u8 **)data = t->data.other_data.data.str.str;
+ if (!is_display_descriptor((const u8 *)t, EDID_DETAIL_MONITOR_NAME))
+ return;
+
+ *(u8 **)data = t->data.other_data.data.str.str;
}
static int get_monitor_name(struct edid *edid, char name[13])
@@ -4316,7 +4369,7 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize)
{
int name_length;
char buf[13];
-
+
if (bufsize <= 0)
return;
@@ -4381,6 +4434,7 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
if (cea_revision(cea) >= 3) {
int i, start, end;
+ int sad_count;
if (cea_db_offsets(cea, &start, &end)) {
start = 0;
@@ -4392,8 +4446,6 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
- int sad_count;
-
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = min(dbl / 3, 15 - total_sad_count);
@@ -4594,6 +4646,9 @@ EXPORT_SYMBOL(drm_av_sync_delay);
*
* Parse the CEA extension according to CEA-861-B.
*
+ * Drivers that have added the modes parsed from EDID to drm_display_info
+ * should use &drm_display_info.is_hdmi instead of calling this function.
+ *
* Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
@@ -4828,6 +4883,8 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
struct drm_display_info *info = &connector->display_info;
u8 len = cea_db_payload_len(db);
+ info->is_hdmi = true;
+
if (len >= 6)
info->dvi_dual = db[6] & 1;
if (len >= 7)
@@ -4880,6 +4937,47 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
}
+static
+void get_monitor_range(struct detailed_timing *timing,
+ void *info_monitor_range)
+{
+ struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ const struct detailed_non_pixel *data = &timing->data.other_data;
+ const struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
+ return;
+
+ monitor_range->min_vfreq = range->min_vfreq;
+ monitor_range->max_vfreq = range->max_vfreq;
+}
+
+static
+void drm_get_monitor_range(struct drm_connector *connector,
+ const struct edid *edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ if (!version_greater(edid, 1, 1))
+ return;
+
+ drm_for_each_detailed_block((u8 *)edid, get_monitor_range,
+ &info->monitor_range);
+
+ DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
+ info->monitor_range.min_vfreq,
+ info->monitor_range.max_vfreq);
+}
+
/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
* all of the values which would have been set from EDID
*/
@@ -4896,11 +4994,13 @@ drm_reset_display_info(struct drm_connector *connector)
info->cea_rev = 0;
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->is_hdmi = false;
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
info->non_desktop = 0;
+ memset(&info->monitor_range, 0, sizeof(info->monitor_range));
}
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
@@ -4916,6 +5016,8 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+ drm_get_monitor_range(connector, edid);
+
DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
if (edid->revision < 3)
@@ -5396,14 +5498,11 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
{
enum hdmi_picture_aspect picture_aspect;
u8 vic, hdmi_vic;
- int err;
if (!frame || !mode)
return -EINVAL;
- err = hdmi_avi_infoframe_init(frame);
- if (err < 0)
- return err;
+ hdmi_avi_infoframe_init(frame);
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4c7cbce7bae7..a9771de4d17e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -250,17 +250,7 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
- /*
- * TODO:
- * We should bail out here if there is a master by dropping _force.
- * Currently these igt tests fail if we do that:
- * - kms_fbcon_fbt@psr
- * - kms_fbcon_fbt@psr-suspend
- *
- * So first these tests need to be fixed so they drop master or don't
- * have an fd open.
- */
- ret = drm_client_modeset_commit_force(&fb_helper->client);
+ ret = drm_client_modeset_commit(&fb_helper->client);
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
@@ -294,7 +284,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
continue;
mutex_lock(&helper->lock);
- ret = drm_client_modeset_commit_force(&helper->client);
+ ret = drm_client_modeset_commit_locked(&helper->client);
if (ret)
error = true;
mutex_unlock(&helper->lock);
@@ -460,7 +450,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* drm_fb_helper_init - initialize a &struct drm_fb_helper
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
- * @max_conn_count: max connector count (not used)
*
* This allocates the structures for the fbdev helper with the given limits.
* Note that this won't yet touch the hardware (through the driver interfaces)
@@ -473,8 +462,7 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* Zero if everything went ok, nonzero otherwise.
*/
int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- int max_conn_count)
+ struct drm_fb_helper *fb_helper)
{
int ret;
@@ -1357,7 +1345,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
pan_set(fb_helper, var->xoffset, var->yoffset);
- ret = drm_client_modeset_commit_force(&fb_helper->client);
+ ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
@@ -2135,7 +2123,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, 0);
+ ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 92d16724f949..eb009d3ab48f 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -48,9 +48,45 @@
#include "drm_internal.h"
#include "drm_legacy.h"
+#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#include <uapi/asm/mman.h>
+#include <drm/drm_vma_manager.h>
+#endif
+
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
+bool drm_dev_needs_global_mutex(struct drm_device *dev)
+{
+ /*
+ * Legacy drivers rely on all kinds of BKL locking semantics, don't
+ * bother. They also still need BKL locking for their ioctls, so better
+ * safe than sorry.
+ */
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ return true;
+
+ /*
+ * The deprecated ->load callback must be called after the driver is
+ * already registered. This means such drivers rely on the BKL to make
+ * sure an open can't proceed until the driver is actually fully set up.
+ * Similar hilarity holds for the unload callback.
+ */
+ if (dev->driver->load || dev->driver->unload)
+ return true;
+
+ /*
+ * Drivers with the lastclose callback assume that it's synchronized
+ * against concurrent opens, which again needs the BKL. The proper fix
+ * is to use the drm_client infrastructure with proper locking for each
+ * client.
+ */
+ if (dev->driver->lastclose)
+ return true;
+
+ return false;
+}
+
/**
* DOC: file operations
*
@@ -220,7 +256,7 @@ void drm_file_free(struct drm_file *file)
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
(long)old_encode_dev(file->minor->kdev->devt),
- dev->open_count);
+ atomic_read(&dev->open_count));
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
dev->driver->preclose)
@@ -379,7 +415,10 @@ int drm_open(struct inode *inode, struct file *filp)
return PTR_ERR(minor);
dev = minor->dev;
- if (!dev->open_count++)
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
+
+ if (!atomic_fetch_inc(&dev->open_count))
need_setup = 1;
/* share address_space across all char-devs of a single device */
@@ -395,10 +434,16 @@ int drm_open(struct inode *inode, struct file *filp)
goto err_undo;
}
}
+
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
+
return 0;
err_undo:
- dev->open_count--;
+ atomic_dec(&dev->open_count);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
return retcode;
}
@@ -438,16 +483,18 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_minor *minor = file_priv->minor;
struct drm_device *dev = minor->dev;
- mutex_lock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
- DRM_DEBUG("open_count = %d\n", dev->open_count);
+ DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
drm_close_helper(filp);
- if (!--dev->open_count)
+ if (atomic_dec_and_test(&dev->open_count))
drm_lastclose(dev);
- mutex_unlock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
@@ -456,6 +503,40 @@ int drm_release(struct inode *inode, struct file *filp)
EXPORT_SYMBOL(drm_release);
/**
+ * drm_release_noglobal - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
+ *
+ * This function may be used by drivers as their &file_operations.release
+ * method. It frees any resources associated with the open file prior to taking
+ * the drm_global_mutex, which then calls the &drm_driver.postclose driver
+ * callback. If this is the last open file for the DRM device also proceeds to
+ * call the &drm_driver.lastclose driver callback.
+ *
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
+ */
+int drm_release_noglobal(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
+
+ drm_close_helper(filp);
+
+ if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
+ drm_lastclose(dev);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ drm_minor_release(minor);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_release_noglobal);
+
+/**
* drm_read - read method for DRM file
* @filp: file pointer
* @buffer: userspace destination pointer for the read
@@ -796,3 +877,139 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
return file;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
+
+#ifdef CONFIG_MMU
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * drm_addr_inflate() attempts to construct an aligned area by inflating
+ * the area size and skipping the unaligned start of the area.
+ * adapted from shmem_get_unmapped_area()
+ */
+static unsigned long drm_addr_inflate(unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags,
+ unsigned long huge_size)
+{
+ unsigned long offset, inflated_len;
+ unsigned long inflated_addr;
+ unsigned long inflated_offset;
+
+ offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
+ if (offset && offset + len < 2 * huge_size)
+ return addr;
+ if ((addr & (huge_size - 1)) == offset)
+ return addr;
+
+ inflated_len = len + huge_size - PAGE_SIZE;
+ if (inflated_len > TASK_SIZE)
+ return addr;
+ if (inflated_len < len)
+ return addr;
+
+ inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
+ 0, flags);
+ if (IS_ERR_VALUE(inflated_addr))
+ return addr;
+ if (inflated_addr & ~PAGE_MASK)
+ return addr;
+
+ inflated_offset = inflated_addr & (huge_size - 1);
+ inflated_addr += offset - inflated_offset;
+ if (inflated_offset > offset)
+ inflated_addr += huge_size;
+
+ if (inflated_addr > TASK_SIZE - len)
+ return addr;
+
+ return inflated_addr;
+}
+
+/**
+ * drm_get_unmapped_area() - Get an unused user-space virtual memory area
+ * suitable for huge page table entries.
+ * @file: The struct file representing the address space being mmap()'d.
+ * @uaddr: Start address suggested by user-space.
+ * @len: Length of the area.
+ * @pgoff: The page offset into the address space.
+ * @flags: mmap flags
+ * @mgr: The address space manager used by the drm driver. This argument can
+ * probably be removed at some point when all drivers use the same
+ * address space manager.
+ *
+ * This function attempts to find an unused user-space virtual memory area
+ * that can accommodate the size we want to map, and that is properly
+ * aligned to facilitate huge page table entries matching actual
+ * huge pages or huge page aligned memory in buffer objects. Buffer objects
+ * are assumed to start at huge page boundary pfns (io memory) or be
+ * populated by huge pages aligned to the start of the buffer object
+ * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
+ *
+ * Return: aligned user-space address.
+ */
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr)
+{
+ unsigned long addr;
+ unsigned long inflated_addr;
+ struct drm_vma_offset_node *node;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /*
+ * @pgoff is the file page-offset the huge page boundaries of
+ * which typically aligns to physical address huge page boundaries.
+ * That's not true for DRM, however, where physical address huge
+ * page boundaries instead are aligned with the offset from
+ * buffer object start. So adjust @pgoff to be the offset from
+ * buffer object start.
+ */
+ drm_vma_offset_lock_lookup(mgr);
+ node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
+ if (node)
+ pgoff -= node->vm_node.start;
+ drm_vma_offset_unlock_lookup(mgr);
+
+ addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ if (addr & ~PAGE_MASK)
+ return addr;
+ if (addr > TASK_SIZE - len)
+ return addr;
+
+ if (len < HPAGE_PMD_SIZE)
+ return addr;
+ if (flags & MAP_FIXED)
+ return addr;
+ /*
+ * Our priority is to support MAP_SHARED mapped hugely;
+ * and support MAP_PRIVATE mapped hugely too, until it is COWed.
+ * But if caller specified an address hint, respect that as before.
+ */
+ if (uaddr)
+ return addr;
+
+ inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
+ HPAGE_PMD_SIZE);
+
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
+ len >= HPAGE_PUD_SIZE)
+ inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
+ flags, HPAGE_PUD_SIZE);
+ return inflated_addr;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr)
+{
+ return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
+#endif /* CONFIG_MMU */
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 0897cb9aeaff..3b818f2b2392 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright (C) 2016 Noralf Trønnes
*
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57564318ceea..57ac94ce9b9e 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -31,6 +31,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_util.h>
@@ -548,7 +549,128 @@ int drm_mode_getfb(struct drm_device *dev,
out:
drm_framebuffer_put(fb);
+ return ret;
+}
+
+/**
+ * drm_mode_getfb2 - get extended FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
+ if (!fb)
+ return -ENOENT;
+
+ /* For multi-plane framebuffers, we require the driver to place the
+ * GEM objects directly in the drm_framebuffer. For single-plane
+ * framebuffers, we can fall back to create_handle.
+ */
+ if (!fb->obj[0] &&
+ (fb->format->num_planes > 1 || !fb->funcs->create_handle)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->pixel_format = fb->format->format;
+
+ r->flags = 0;
+ if (dev->mode_config.allow_fb_modifiers)
+ r->flags |= DRM_MODE_FB_MODIFIERS;
+
+ for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+ r->handles[i] = 0;
+ r->pitches[i] = 0;
+ r->offsets[i] = 0;
+ r->modifier[i] = 0;
+ }
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ r->pitches[i] = fb->pitches[i];
+ r->offsets[i] = fb->offsets[i];
+ if (dev->mode_config.allow_fb_modifiers)
+ r->modifier[i] = fb->modifier;
+ }
+
+ /* GET_FB2() is an unprivileged ioctl so we must not return a
+ * buffer-handle to non master/root processes! To match GET_FB()
+ * just return invalid handles (0) for non masters/root
+ * rather than making GET_FB2() privileged.
+ */
+ if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
+ ret = 0;
+ goto out;
+ }
+ for (i = 0; i < fb->format->num_planes; i++) {
+ int j;
+
+ /* If we reuse the same object for multiple planes, also
+ * return the same handle.
+ */
+ for (j = 0; j < i; j++) {
+ if (fb->obj[i] == fb->obj[j]) {
+ r->handles[i] = r->handles[j];
+ break;
+ }
+ }
+
+ if (r->handles[i])
+ continue;
+
+ if (fb->obj[i]) {
+ ret = drm_gem_handle_create(file_priv, fb->obj[i],
+ &r->handles[i]);
+ } else {
+ WARN_ON(i > 0);
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handles[i]);
+ }
+
+ if (ret != 0)
+ goto out;
+ }
+
+out:
+ if (ret != 0) {
+ /* Delete any previously-created handles on failure. */
+ for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+ int j;
+
+ if (r->handles[i])
+ drm_gem_handle_delete(file_priv, r->handles[i]);
+
+ /* Zero out any handles identical to the one we just
+ * deleted.
+ */
+ for (j = i + 1; j < ARRAY_SIZE(r->handles); j++) {
+ if (r->handles[j] == r->handles[i])
+ r->handles[j] = 0;
+ }
+ }
+ }
+
+ drm_framebuffer_put(fb);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a9e4a610445a..37627d06fb06 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -218,7 +218,7 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
bool final = false;
- if (WARN_ON(obj->handle_count == 0))
+ if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
return;
/*
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index a4863326061a..92a11bb42365 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1141,3 +1141,64 @@ void drm_vram_helper_release_mm(struct drm_device *dev)
dev->vram_mm = NULL;
}
EXPORT_SYMBOL(drm_vram_helper_release_mm);
+
+/*
+ * Mode-config helpers
+ */
+
+static enum drm_mode_status
+drm_vram_helper_mode_valid_internal(struct drm_device *dev,
+ const struct drm_display_mode *mode,
+ unsigned long max_bpp)
+{
+ struct drm_vram_mm *vmm = dev->vram_mm;
+ unsigned long fbsize, fbpages, max_fbpages;
+
+ if (WARN_ON(!dev->vram_mm))
+ return MODE_BAD;
+
+ max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
+
+ fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+ if (fbpages > max_fbpages)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+/**
+ * drm_vram_helper_mode_valid - Tests if a display mode's
+ * framebuffer fits into the available video memory.
+ * @dev: the DRM device
+ * @mode: the mode to test
+ *
+ * This function tests if enough video memory is available for using the
+ * specified display mode. Atomic modesetting requires importing the
+ * designated framebuffer into video memory before evicting the active
+ * one. Hence, any framebuffer may consume at most half of the available
+ * VRAM. Display modes that require a larger framebuffer can not be used,
+ * even if the CRTC does support them. Each framebuffer is assumed to
+ * have 32-bit color depth.
+ *
+ * Note:
+ * The function can only test if the display mode is supported in
+ * general. If there are too many framebuffers pinned to video memory,
+ * a display mode may still not be usable in practice. The color depth of
+ * 32-bit fits all current use case. A more flexible test can be added
+ * when necessary.
+ *
+ * Returns:
+ * MODE_OK if the display mode is supported, or an error code of type
+ * enum drm_mode_status otherwise.
+ */
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+
+ return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
+}
+EXPORT_SYMBOL(drm_vram_helper_mode_valid);
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index 9191633a3c43..7f386adcf872 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -23,14 +23,6 @@
#include "drm_internal.h"
-static struct hdcp_srm {
- u32 revoked_ksv_cnt;
- u8 *revoked_ksv_list;
-
- /* Mutex to protect above struct member */
- struct mutex mutex;
-} *srm_data;
-
static inline void drm_hdcp_print_ksv(const u8 *ksv)
{
DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
@@ -60,11 +52,11 @@ static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
return ksv_count;
}
-static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
+static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 **revoked_ksv_list,
u32 vrls_length)
{
- u32 parsed_bytes = 0, ksv_count = 0;
u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
+ u32 parsed_bytes = 0, ksv_count = 0;
do {
vrl_ksv_cnt = *buf;
@@ -74,10 +66,10 @@ static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
vrl_ksv_cnt);
- memcpy(revoked_ksv_list, buf, vrl_ksv_sz);
+ memcpy((*revoked_ksv_list) + (ksv_count * DRM_HDCP_KSV_LEN),
+ buf, vrl_ksv_sz);
ksv_count += vrl_ksv_cnt;
- revoked_ksv_list += vrl_ksv_sz;
buf += vrl_ksv_sz;
parsed_bytes += (vrl_ksv_sz + 1);
@@ -91,7 +83,8 @@ static inline u32 get_vrl_length(const u8 *buf)
return drm_hdcp_be24_to_cpu(buf);
}
-static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count;
@@ -131,29 +124,28 @@ static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
- return count;
+ return 0;
}
- kfree(srm_data->revoked_ksv_list);
- srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
- GFP_KERNEL);
- if (!srm_data->revoked_ksv_list) {
+ *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
- if (drm_hdcp_get_revoked_ksvs(buf, srm_data->revoked_ksv_list,
+ if (drm_hdcp_get_revoked_ksvs(buf, revoked_ksv_list,
vrl_length) != ksv_count) {
- srm_data->revoked_ksv_cnt = 0;
- kfree(srm_data->revoked_ksv_list);
+ *revoked_ksv_cnt = 0;
+ kfree(*revoked_ksv_list);
return -EINVAL;
}
- srm_data->revoked_ksv_cnt = ksv_count;
- return count;
+ *revoked_ksv_cnt = ksv_count;
+ return 0;
}
-static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count, ksv_sz;
@@ -195,13 +187,11 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
- return count;
+ return 0;
}
- kfree(srm_data->revoked_ksv_list);
- srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
- GFP_KERNEL);
- if (!srm_data->revoked_ksv_list) {
+ *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
@@ -210,10 +200,10 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
- memcpy(srm_data->revoked_ksv_list, buf, ksv_sz);
+ memcpy(*revoked_ksv_list, buf, ksv_sz);
- srm_data->revoked_ksv_cnt = ksv_count;
- return count;
+ *revoked_ksv_cnt = ksv_count;
+ return 0;
}
static inline bool is_srm_version_hdcp1(const u8 *buf)
@@ -226,22 +216,27 @@ static inline bool is_srm_version_hdcp2(const u8 *buf)
return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
}
-static void drm_hdcp_srm_update(const u8 *buf, size_t count)
+static int drm_hdcp_srm_update(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
if (count < sizeof(struct hdcp_srm_header))
- return;
+ return -EINVAL;
if (is_srm_version_hdcp1(buf))
- drm_hdcp_parse_hdcp1_srm(buf, count);
+ return drm_hdcp_parse_hdcp1_srm(buf, count, revoked_ksv_list,
+ revoked_ksv_cnt);
else if (is_srm_version_hdcp2(buf))
- drm_hdcp_parse_hdcp2_srm(buf, count);
+ return drm_hdcp_parse_hdcp2_srm(buf, count, revoked_ksv_list,
+ revoked_ksv_cnt);
+ else
+ return -EINVAL;
}
-static void drm_hdcp_request_srm(struct drm_device *drm_dev)
+static int drm_hdcp_request_srm(struct drm_device *drm_dev,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
char fw_name[36] = "display_hdcp_srm.bin";
const struct firmware *fw;
-
int ret;
ret = request_firmware_direct(&fw, (const char *)fw_name,
@@ -250,10 +245,12 @@ static void drm_hdcp_request_srm(struct drm_device *drm_dev)
goto exit;
if (fw->size && fw->data)
- drm_hdcp_srm_update(fw->data, fw->size);
+ ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
+ revoked_ksv_cnt);
exit:
release_firmware(fw);
+ return ret;
}
/**
@@ -279,71 +276,34 @@ exit:
* https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
*
* Returns:
- * TRUE on any of the KSV is revoked, else FALSE.
+ * Count of the revoked KSVs or -ve error number incase of the failure.
*/
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
- u32 ksv_count)
+int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
+ u32 ksv_count)
{
- u32 rev_ksv_cnt, cnt, i, j;
- u8 *rev_ksv_list;
-
- if (!srm_data)
- return false;
-
- mutex_lock(&srm_data->mutex);
- drm_hdcp_request_srm(drm_dev);
-
- rev_ksv_cnt = srm_data->revoked_ksv_cnt;
- rev_ksv_list = srm_data->revoked_ksv_list;
-
- /* If the Revoked ksv list is empty */
- if (!rev_ksv_cnt || !rev_ksv_list) {
- mutex_unlock(&srm_data->mutex);
- return false;
- }
-
- for (cnt = 0; cnt < ksv_count; cnt++) {
- rev_ksv_list = srm_data->revoked_ksv_list;
- for (i = 0; i < rev_ksv_cnt; i++) {
- for (j = 0; j < DRM_HDCP_KSV_LEN; j++)
- if (ksvs[j] != rev_ksv_list[j]) {
- break;
- } else if (j == (DRM_HDCP_KSV_LEN - 1)) {
- DRM_DEBUG("Revoked KSV is ");
- drm_hdcp_print_ksv(ksvs);
- mutex_unlock(&srm_data->mutex);
- return true;
- }
- /* Move the offset to next KSV in the revoked list */
- rev_ksv_list += DRM_HDCP_KSV_LEN;
- }
-
- /* Iterate to next ksv_offset */
- ksvs += DRM_HDCP_KSV_LEN;
- }
- mutex_unlock(&srm_data->mutex);
- return false;
+ u32 revoked_ksv_cnt = 0, i, j;
+ u8 *revoked_ksv_list = NULL;
+ int ret = 0;
+
+ ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
+ &revoked_ksv_cnt);
+
+ /* revoked_ksv_cnt will be zero when above function failed */
+ for (i = 0; i < revoked_ksv_cnt; i++)
+ for (j = 0; j < ksv_count; j++)
+ if (!memcmp(&ksvs[j * DRM_HDCP_KSV_LEN],
+ &revoked_ksv_list[i * DRM_HDCP_KSV_LEN],
+ DRM_HDCP_KSV_LEN)) {
+ DRM_DEBUG("Revoked KSV is ");
+ drm_hdcp_print_ksv(&ksvs[j * DRM_HDCP_KSV_LEN]);
+ ret++;
+ }
+
+ kfree(revoked_ksv_list);
+ return ret;
}
EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
-int drm_setup_hdcp_srm(struct class *drm_class)
-{
- srm_data = kzalloc(sizeof(*srm_data), GFP_KERNEL);
- if (!srm_data)
- return -ENOMEM;
- mutex_init(&srm_data->mutex);
-
- return 0;
-}
-
-void drm_teardown_hdcp_srm(struct class *drm_class)
-{
- if (srm_data) {
- kfree(srm_data->revoked_ksv_list);
- kfree(srm_data);
- }
-}
-
static struct drm_prop_enum_list drm_cp_enum_list[] = {
{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 6937bf923f05..5714a78365ac 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -41,6 +41,7 @@ struct drm_printer;
/* drm_file.c */
extern struct mutex drm_global_mutex;
+bool drm_dev_needs_global_mutex(struct drm_device *dev);
struct drm_file *drm_file_alloc(struct drm_minor *minor);
void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
@@ -235,7 +236,3 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
int drm_framebuffer_debugfs_init(struct drm_minor *minor);
-
-/* drm_hdcp.c */
-int drm_setup_hdcp_srm(struct class *drm_class);
-void drm_teardown_hdcp_srm(struct class *drm_class);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 5afb39688b55..9e41972c4bbc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -671,6 +671,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB2, drm_mode_getfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 03bce566a8c3..588be45abd7a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -111,10 +111,6 @@ int drm_irq_install(struct drm_device *dev, int irq)
if (irq == 0)
return -EINVAL;
- /* Driver must have been initialized */
- if (!dev->dev_private)
- return -EINVAL;
-
if (dev->irq_enabled)
return -EBUSY;
dev->irq_enabled = true;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 2c79e8199e3c..f16eefbf2829 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -46,7 +46,7 @@
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-/**
+/*
* Take the heavyweight lock.
*
* \param lock lock pointer.
@@ -93,7 +93,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
return 0;
}
-/**
+/*
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
*
@@ -150,7 +150,7 @@ static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
return 0;
}
-/**
+/*
* Lock ioctl.
*
* \param inode device inode.
@@ -243,7 +243,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Unlock ioctl.
*
* \param inode device inode.
@@ -275,7 +275,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
return 0;
}
-/**
+/*
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
@@ -287,7 +287,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
-
void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 16bff1be4b8a..558baf989f5a 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -24,7 +24,6 @@
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -238,6 +237,23 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(mipi_dbi_buf_copy);
+static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
+ unsigned int xs, unsigned int xe,
+ unsigned int ys, unsigned int ye)
+{
+ struct mipi_dbi *dbi = &dbidev->dbi;
+
+ xs += dbidev->left_offset;
+ xe += dbidev->left_offset;
+ ys += dbidev->top_offset;
+ ye += dbidev->top_offset;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, (xs >> 8) & 0xff,
+ xs & 0xff, (xe >> 8) & 0xff, xe & 0xff);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, (ys >> 8) & 0xff,
+ ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
+}
+
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
@@ -271,12 +287,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
tr = cma_obj->vaddr;
}
- mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
- (rect->x1 >> 8) & 0xff, rect->x1 & 0xff,
- ((rect->x2 - 1) >> 8) & 0xff, (rect->x2 - 1) & 0xff);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
- (rect->y1 >> 8) & 0xff, rect->y1 & 0xff,
- ((rect->y2 - 1) >> 8) & 0xff, (rect->y2 - 1) & 0xff);
+ mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
+ rect->y2 - 1);
ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr,
width * height * 2);
@@ -299,18 +311,10 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
mipi_dbi_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
EXPORT_SYMBOL(mipi_dbi_pipe_update);
@@ -366,10 +370,7 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
memset(dbidev->tx_buf, 0, len);
- mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
- ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
- ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
+ mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)dbidev->tx_buf, len);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2a6e34663146..bc6e208949e8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -45,6 +45,7 @@
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/seq_file.h>
+#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
@@ -366,6 +367,11 @@ next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
enum drm_mm_insert_mode mode)
{
+ /* Searching is slow; check if we ran out of time/patience */
+ cond_resched();
+ if (fatal_signal_pending(current))
+ return NULL;
+
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@@ -399,10 +405,10 @@ next_hole(struct drm_mm *mm,
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- u64 end = node->start + node->size;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
u64 adj_start, adj_end;
+ u64 end;
end = node->start + node->size;
if (unlikely(end <= node->start))
@@ -557,7 +563,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
return 0;
}
- return -ENOSPC;
+ return signal_pending(current) ? -ERESTARTSYS : -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f2e43d341980..81aa21561982 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -51,8 +51,6 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
- unsigned long addr;
- size_t sz;
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -68,47 +66,17 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
dmah->size = size;
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
&dmah->busaddr,
- GFP_KERNEL | __GFP_COMP);
+ GFP_KERNEL);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page((void *)addr));
- }
-
return dmah;
}
-
EXPORT_SYMBOL(drm_pci_alloc);
-/*
- * Free a PCI consistent memory block without freeing its descriptor.
- *
- * This function is for internal use in the Linux-specific DRM core code.
- */
-void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
- unsigned long addr;
- size_t sz;
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page((void *)addr));
- }
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
- dmah->busaddr);
- }
-}
-
/**
* drm_pci_free - Free a PCI consistent memory block
* @dev: DRM device
@@ -119,7 +87,8 @@ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- __drm_legacy_pci_free(dev, dmah);
+ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dmah->busaddr);
kfree(dmah);
}
@@ -197,6 +166,18 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return drm_pci_irq_by_busid(dev, p);
}
+void drm_pci_agp_destroy(struct drm_device *dev)
+{
+ if (dev->agp) {
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ drm_legacy_agp_clear(dev);
+ kfree(dev->agp);
+ dev->agp = NULL;
+ }
+}
+
+#ifdef CONFIG_DRM_LEGACY
+
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
@@ -211,33 +192,9 @@ static void drm_pci_agp_init(struct drm_device *dev)
}
}
-void drm_pci_agp_destroy(struct drm_device *dev)
-{
- if (dev->agp) {
- arch_phys_wc_del(dev->agp->agp_mtrr);
- drm_legacy_agp_clear(dev);
- kfree(dev->agp);
- dev->agp = NULL;
- }
-}
-
-/**
- * drm_get_pci_dev - Register a PCI device with the DRM subsystem
- * @pdev: PCI device
- * @ent: entry from the PCI ID table that matches @pdev
- * @driver: DRM device driver
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- *
- * NOTE: This function is deprecated, please use drm_dev_alloc() and
- * drm_dev_register() instead and remove your &drm_driver.load callback.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
+static int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -280,9 +237,6 @@ err_free:
drm_dev_put(dev);
return ret;
}
-EXPORT_SYMBOL(drm_get_pci_dev);
-
-#ifdef CONFIG_DRM_LEGACY
/**
* drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d5c386154246..ca520028b2cb 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -99,6 +99,9 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
+ if (request->size > SIZE_MAX - PAGE_SIZE)
+ return -EINVAL;
+
if (dev->sg)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 15fb516ae2d8..74946690aba4 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -26,12 +26,51 @@
* entity. Some flexibility for code reuse is provided through a separately
* allocated &drm_connector object and supporting optional &drm_bridge
* encoder drivers.
+ *
+ * Many drivers require only a very simple encoder that fulfills the minimum
+ * requirements of the display pipeline and does not add additional
+ * functionality. The function drm_simple_encoder_init() provides an
+ * implementation of such an encoder.
*/
-static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
+static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
.destroy = drm_encoder_cleanup,
};
+/**
+ * drm_simple_encoder_init - Initialize a preallocated encoder with
+ * basic functionality.
+ * @dev: drm device
+ * @encoder: the encoder to initialize
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * The encoder will be cleaned up automatically as part of the mode-setting
+ * cleanup.
+ *
+ * The caller of drm_simple_encoder_init() is responsible for freeing
+ * the encoder's memory after the encoder has been cleaned up. At the
+ * moment this only works reliably if the encoder data structure is
+ * stored in the device structure. Free the encoder's memory as part of
+ * the device release function.
+ *
+ * FIXME: Later improvements to DRM's resource management may allow for
+ * an automated kfree() of the encoder's memory.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_simple_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ int encoder_type)
+{
+ return drm_encoder_init(dev, encoder,
+ &drm_simple_encoder_funcs_cleanup,
+ encoder_type, NULL);
+}
+EXPORT_SYMBOL(drm_simple_encoder_init);
+
static enum drm_mode_status
drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -229,7 +268,7 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
struct drm_bridge *bridge)
{
- return drm_bridge_attach(&pipe->encoder, bridge, NULL);
+ return drm_bridge_attach(&pipe->encoder, bridge, NULL, 0);
}
EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
@@ -288,8 +327,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret || !connector)
return ret;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 669c93fe2500..42d46414f767 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -43,27 +43,66 @@
* - Signal a syncobj (set a trivially signaled fence)
* - Wait for a syncobj's fence to appear and be signaled
*
+ * The syncobj userspace API also provides operations to manipulate a syncobj
+ * in terms of a timeline of struct &dma_fence_chain rather than a single
+ * struct &dma_fence, through the following operations:
+ *
+ * - Signal a given point on the timeline
+ * - Wait for a given point to appear and/or be signaled
+ * - Import and export from/to a given point of a timeline
+ *
* At it's core, a syncobj is simply a wrapper around a pointer to a struct
* &dma_fence which may be NULL.
* When a syncobj is first created, its pointer is either NULL or a pointer
* to an already signaled fence depending on whether the
* &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
* &DRM_IOCTL_SYNCOBJ_CREATE.
- * When GPU work which signals a syncobj is enqueued in a DRM driver,
- * the syncobj fence is replaced with a fence which will be signaled by the
- * completion of that work.
- * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
- * driver retrieves syncobj's current fence at the time the work is enqueued
- * waits on that fence before submitting the work to hardware.
- * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
- * All manipulation of the syncobjs's fence happens in terms of the current
- * fence at the time the ioctl is called by userspace regardless of whether
- * that operation is an immediate host-side operation (signal or reset) or
- * or an operation which is enqueued in some driver queue.
- * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
- * manipulate a syncobj from the host by resetting its pointer to NULL or
+ *
+ * If the syncobj is considered as a binary (its state is either signaled or
+ * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
+ * the syncobj, the syncobj's fence is replaced with a fence which will be
+ * signaled by the completion of that work.
+ * If the syncobj is considered as a timeline primitive, when GPU work is
+ * enqueued in a DRM driver to signal the a given point of the syncobj, a new
+ * struct &dma_fence_chain pointing to the DRM driver's fence and also
+ * pointing to the previous fence that was in the syncobj. The new struct
+ * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
+ * completion of the DRM driver's work and also any work associated with the
+ * fence previously in the syncobj.
+ *
+ * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
+ * time the work is enqueued, it waits on the syncobj's fence before
+ * submitting the work to hardware. That fence is either :
+ *
+ * - The syncobj's current fence if the syncobj is considered as a binary
+ * primitive.
+ * - The struct &dma_fence associated with a given point if the syncobj is
+ * considered as a timeline primitive.
+ *
+ * If the syncobj's fence is NULL or not present in the syncobj's timeline,
+ * the enqueue operation is expected to fail.
+ *
+ * With binary syncobj, all manipulation of the syncobjs's fence happens in
+ * terms of the current fence at the time the ioctl is called by userspace
+ * regardless of whether that operation is an immediate host-side operation
+ * (signal or reset) or or an operation which is enqueued in some driver
+ * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
+ * to manipulate a syncobj from the host by resetting its pointer to NULL or
* setting its pointer to a fence which is already signaled.
*
+ * With a timeline syncobj, all manipulation of the synobj's fence happens in
+ * terms of a u64 value referring to point in the timeline. See
+ * dma_fence_chain_find_seqno() to see how a given point is found in the
+ * timeline.
+ *
+ * Note that applications should be careful to always use timeline set of
+ * ioctl() when dealing with syncobj considered as timeline. Using a binary
+ * set of ioctl() with a syncobj considered as timeline could result incorrect
+ * synchronization. The use of binary syncobj is supported through the
+ * timeline set of ioctl() by using a point value of 0, this will reproduce
+ * the behavior of the binary set of ioctl() (for example replace the
+ * syncobj's fence when signaling).
+ *
*
* Host-side wait on syncobjs
* --------------------------
@@ -87,6 +126,16 @@
* synchronize between the two.
* This requirement is inherited from the Vulkan fence API.
*
+ * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
+ * handles as well as an array of u64 points and does a host-side wait on all
+ * of syncobj fences at the given points simultaneously.
+ *
+ * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
+ * fence to materialize on the timeline without waiting for the fence to be
+ * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
+ * requirement is inherited from the wait-before-signal behavior required by
+ * the Vulkan timeline semaphore API.
+ *
*
* Import/export of syncobjs
* -------------------------
@@ -120,6 +169,18 @@
* Because sync files are immutable, resetting or signaling the syncobj
* will not affect any sync files whose fences have been imported into the
* syncobj.
+ *
+ *
+ * Import/export of timeline points in timeline syncobjs
+ * -----------------------------------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
+ * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
+ * into another syncobj.
+ *
+ * Note that if you want to transfer a struct &dma_fence_chain from a given
+ * point on a timeline syncobj from/into a binary syncobj, you can use the
+ * point 0 to mean take/replace the fence in the syncobj.
*/
#include <linux/anon_inodes.h>
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index dd2bc85f43cc..939f0032aab1 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -85,7 +85,6 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
- drm_setup_hdcp_srm(drm_class);
return 0;
}
@@ -98,7 +97,6 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
- drm_teardown_hdcp_srm(drm_class);
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
@@ -230,7 +228,7 @@ static ssize_t modes_show(struct device *device,
mutex_lock(&connector->dev->mode_config.mutex);
list_for_each_entry(mode, &connector->modes, head) {
- written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+ written += scnprintf(buf + written, PAGE_SIZE - written, "%s\n",
mode->name);
}
mutex_unlock(&connector->dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 1659b13b178c..da7b0b0c1090 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -69,6 +70,12 @@
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
+ *
+ * Drivers for hardware without support for vertical-blanking interrupts
+ * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * automatically generate fake vblank events as part of the display update.
+ * This functionality also can be controlled by the driver by enabling and
+ * disabling struct drm_crtc_state.no_vblank.
*/
/* Retry timestamp calculation up to 3 times to satisfy
@@ -137,10 +144,9 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
- }
-
- if (dev->driver->get_vblank_counter)
+ } else if (dev->driver->get_vblank_counter) {
return dev->driver->get_vblank_counter(dev, pipe);
+ }
return drm_vblank_no_hw_counter(dev, pipe);
}
@@ -332,7 +338,8 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) &&
+ !crtc->funcs->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -354,13 +361,11 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (WARN_ON(!crtc))
return;
- if (crtc->funcs->disable_vblank) {
+ if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
- return;
- }
+ } else {
+ dev->driver->disable_vblank(dev, pipe);
}
-
- dev->driver->disable_vblank(dev, pipe);
}
/*
@@ -480,19 +485,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
- /* Driver specific high-precision vblank timestamping supported? */
- if (dev->driver->get_vblank_timestamp)
- DRM_INFO("Driver supports precise vblank timestamp query.\n");
- else
- DRM_INFO("No driver support for vblank timestamp query.\n");
-
- /* Must have precise timestamping for reliable vblank instant disable */
- if (dev->vblank_disable_immediate && !dev->driver->get_vblank_timestamp) {
- dev->vblank_disable_immediate = false;
- DRM_INFO("Setting vblank_disable_immediate to false because "
- "get_vblank_timestamp == NULL\n");
- }
-
return 0;
err:
@@ -502,6 +494,28 @@ err:
EXPORT_SYMBOL(drm_vblank_init);
/**
+ * drm_dev_has_vblank - test if vblanking has been initialized for
+ * a device
+ * @dev: the device
+ *
+ * Drivers may call this function to test if vblank support is
+ * initialized for a device. For most hardware this means that vblanking
+ * can also be enabled.
+ *
+ * Atomic helpers use this function to initialize
+ * &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset().
+ *
+ * Returns:
+ * True if vblanking has been initialized for the given device, false
+ * otherwise.
+ */
+bool drm_dev_has_vblank(const struct drm_device *dev)
+{
+ return dev->num_crtcs != 0;
+}
+EXPORT_SYMBOL(drm_dev_has_vblank);
+
+/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
@@ -523,9 +537,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
*
* Calculate and store various constants which are later needed by vblank and
* swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos(). They are derived from CRTC's true
- * scanout timing, so they take things like panel scaling or other adjustments
- * into account.
+ * drm_crtc_vblank_helper_get_vblank_timestamp(). They are derived from
+ * CRTC's true scanout timing, so they take things like panel scaling or
+ * other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -576,9 +590,9 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
- * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * drm_crtc_vblank_helper_get_vblank_timestamp_internal - precise vblank
+ * timestamp helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to time which should receive the timestamp
@@ -586,11 +600,12 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
* if flag is set.
+ * @get_scanout_position:
+ * Callback function to retrieve the scanout position. See
+ * @struct drm_crtc_helper_funcs.get_scanout_position.
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
- * timings and current video scanout position of a CRTC. This can be directly
- * used as the &drm_driver.get_vblank_timestamp implementation of a kms driver
- * if &drm_driver.get_scanout_position is implemented.
+ * timings and current video scanout position of a CRTC.
*
* The current implementation only handles standard video modes. For double scan
* and interlaced modes the driver is supposed to adjust the hardware mode
@@ -606,34 +621,30 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe,
- int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq)
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq,
+ drm_vblank_get_scanout_position_func get_scanout_position)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct timespec64 ts_etime, ts_vblank_time;
ktime_t stime, etime;
bool vbl_status;
- struct drm_crtc *crtc;
const struct drm_display_mode *mode;
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int vpos, hpos, i;
int delta_ns, duration_ns;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return false;
-
- crtc = drm_crtc_from_index(dev, pipe);
-
- if (pipe >= dev->num_crtcs || !crtc) {
+ if (pipe >= dev->num_crtcs) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return false;
}
/* Scanout position query not supported? Should not happen. */
- if (!dev->driver->get_scanout_position) {
- DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ if (!get_scanout_position) {
+ DRM_ERROR("Called from CRTC w/o get_scanout_position()!?\n");
return false;
}
@@ -648,7 +659,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
if (mode->crtc_clock == 0) {
DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
WARN_ON_ONCE(drm_drv_uses_atomic_modeset(dev));
-
return false;
}
@@ -664,11 +674,10 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
- vbl_status = dev->driver->get_scanout_position(dev, pipe,
- in_vblank_irq,
- &vpos, &hpos,
- &stime, &etime,
- mode);
+ vbl_status = get_scanout_position(crtc, in_vblank_irq,
+ &vpos, &hpos,
+ &stime, &etime,
+ mode);
/* Return as no-op if scanout query unsupported or failed. */
if (!vbl_status) {
@@ -720,7 +729,49 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
return true;
}
-EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
+
+/**
+ * drm_crtc_vblank_helper_get_vblank_timestamp - precise vblank timestamp
+ * helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ * On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to time which should receive the timestamp
+ * @in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be directly
+ * used as the &drm_crtc_funcs.get_vblank_timestamp implementation of a kms
+ * driver if &drm_crtc_helper_funcs.get_scanout_position is implemented.
+ *
+ * The current implementation only handles standard video modes. For double scan
+ * and interlaced modes the driver is supposed to adjust the hardware mode
+ * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
+ * match the scanout position reported.
+ *
+ * Note that atomic drivers must call drm_calc_timestamping_constants() before
+ * enabling a CRTC. The atomic helpers already take care of that in
+ * drm_atomic_helper_update_legacy_modeset_state().
+ *
+ * Returns:
+ *
+ * Returns true on success, and false on failure, i.e. when no accurate
+ * timestamp could be acquired.
+ */
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ crtc->helper_private->get_scanout_position);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
@@ -747,15 +798,19 @@ static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
ktime_t *tvblank, bool in_vblank_irq)
{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
bool ret = false;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
/* Query driver if possible and precision timestamping enabled. */
- if (dev->driver->get_vblank_timestamp && (max_error > 0))
- ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
+ if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
tvblank, in_vblank_irq);
+ }
/* GPU high precision timestamp query unsupported or failed.
* Return current monotonic/gettimeofday timestamp as best estimate.
@@ -977,9 +1032,11 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
+ } else if (dev->driver->enable_vblank) {
+ return dev->driver->enable_vblank(dev, pipe);
}
- return dev->driver->enable_vblank(dev, pipe);
+ return -EINVAL;
}
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
@@ -1738,6 +1795,8 @@ done:
static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ bool high_prec = false;
struct drm_pending_vblank_event *e, *t;
ktime_t now;
u64 seq;
@@ -1760,8 +1819,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
send_vblank_event(dev, e, seq, now);
}
- trace_drm_vblank_event(pipe, seq, now,
- dev->driver->get_vblank_timestamp != NULL);
+ if (crtc && crtc->funcs->get_vblank_timestamp)
+ high_prec = true;
+
+ trace_drm_vblank_event(pipe, seq, now, high_prec);
}
/**
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 52e87e4869a5..aa88911bbc06 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -102,7 +102,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
return tmp;
}
-/**
+/*
* \c fault method for AGP virtual memory.
*
* \param vma virtual memory area.
@@ -192,7 +192,7 @@ static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
}
#endif
-/**
+/*
* \c nopage method for shared virtual memory.
*
* \param vma virtual memory area.
@@ -225,7 +225,7 @@ static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
return 0;
}
-/**
+/*
* \c close method for shared virtual memory.
*
* \param vma virtual memory area.
@@ -269,8 +269,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
}
if (!found_maps) {
- drm_dma_handle_t dmah;
-
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
@@ -284,10 +282,10 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_legacy_pci_free(dev, &dmah);
+ dma_free_coherent(&dev->pdev->dev,
+ map->size,
+ map->handle,
+ map->offset);
break;
}
kfree(map);
@@ -296,7 +294,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* \c fault method for DMA virtual memory.
*
* \param address access address.
@@ -331,7 +329,7 @@ static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
return 0;
}
-/**
+/*
* \c fault method for scatter-gather virtual memory.
*
* \param address access address.
@@ -437,7 +435,7 @@ static void drm_vm_close_locked(struct drm_device *dev,
}
}
-/**
+/*
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
@@ -455,7 +453,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
@@ -515,7 +513,7 @@ static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
#endif
}
-/**
+/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 32d9fac587f9..76d38561c910 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -12,6 +12,7 @@
#include "common.xml.h"
#include "state.xml.h"
+#include "state_blt.xml.h"
#include "state_hi.xml.h"
#include "state_3d.xml.h"
#include "cmdstream.xml.h"
@@ -233,6 +234,8 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 link_target, flush = 0;
+ bool has_blt = !!(gpu->identity.minor_features5 &
+ chipMinorFeatures5_BLT_ENGINE);
lockdep_assert_held(&gpu->lock);
@@ -248,16 +251,38 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
if (flush) {
unsigned int dwords = 7;
+ if (has_blt)
+ dwords += 10;
+
link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
- if (gpu->exec_state == ETNA_PIPE_3D)
- CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
- VIVS_TS_FLUSH_CACHE_FLUSH);
+ if (gpu->exec_state == ETNA_PIPE_3D) {
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ }
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
CMD_END(buffer);
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
@@ -323,6 +348,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
bool switch_mmu_context = gpu->mmu_context != mmu_context;
unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
+ bool has_blt = !!(gpu->identity.minor_features5 &
+ chipMinorFeatures5_BLT_ENGINE);
lockdep_assert_held(&gpu->lock);
@@ -433,6 +460,15 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
* 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
return_dwords = 7;
+
+ /*
+ * When the BLT engine is present we need 6 more dwords in the return
+ * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
+ * but we don't need the normal TS flush state.
+ */
+ if (has_blt)
+ return_dwords += 6;
+
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
CMD_LINK(cmdbuf, return_dwords, return_target);
@@ -447,11 +483,25 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
VIVS_GL_FLUSH_CACHE_DEPTH |
VIVS_GL_FLUSH_CACHE_COLOR);
- CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
- VIVS_TS_FLUSH_CACHE_FLUSH);
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
}
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+
+ if (has_blt) {
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+ CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+ }
+
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 6b43c1c94e8f..a8685b2e1803 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -551,6 +551,7 @@ static int etnaviv_bind(struct device *dev)
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
+ priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
if (IS_ERR(priv->cmdbuf_suballoc)) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index efc656efeb0f..4d8dc9236e5f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -35,6 +35,7 @@ struct etnaviv_drm_private {
int num_gpus;
struct device_dma_parameters dma_parms;
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+ gfp_t shm_gfp_mask;
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
struct etnaviv_iommu_global *mmu_global;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 6adea180d629..dc9ef302f517 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -602,6 +602,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
+ struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_gem_object *obj = NULL;
int ret;
@@ -624,8 +625,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
* above new_inode() why this is required _and_ expected if you're
* going to pin these pages.
*/
- mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
- __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
etnaviv_gem_obj_add(dev, obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 6b68fe16041b..98e60df882b6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -105,7 +105,7 @@ struct etnaviv_gem_submit {
unsigned int nr_pmrs;
struct etnaviv_perfmon_request *pmrs;
unsigned int nr_bos;
- struct etnaviv_gem_submit_bo bos[0];
+ struct etnaviv_gem_submit_bo bos[];
/* No new members here, the previous one is variable-length! */
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 799ec20b267d..a31eeff2b297 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -333,9 +333,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.revision = etnaviv_field(chipIdentity,
VIVS_HI_CHIP_IDENTITY_REVISION);
} else {
+ u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
+ gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
+ gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
+ gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
/*
* !!!! HACK ALERT !!!!
@@ -350,7 +354,6 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
/* Another special case */
if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
- u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
if (chipDate == 0x20080814 && chipTime == 0x12051100) {
@@ -373,6 +376,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.model = chipModel_GC3000;
gpu->identity.revision &= 0xffff;
}
+
+ if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
+ gpu->identity.eco_id = 1;
+
+ if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
+ gpu->identity.eco_id = 1;
}
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
@@ -506,7 +515,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* read idle register. */
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
- /* try reseting again if FE it not idle */
+ /* try resetting again if FE is not idle */
if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
dev_dbg(gpu->dev, "FE is not idle\n");
continue;
@@ -772,6 +781,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
}
+ /*
+ * If the GPU is part of a system with DMA addressing limitations,
+ * request pages for our SHM backend buffers from the DMA32 zone to
+ * hopefully avoid performance killing SWIOTLB bounce buffering.
+ */
+ if (dma_addressing_limited(gpu->dev))
+ priv->shm_gfp_mask |= GFP_DMA32;
+
/* Create buffer: */
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
PAGE_SIZE);
@@ -851,6 +868,13 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
verify_dma(gpu, &debug);
+ seq_puts(m, "\tidentity\n");
+ seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
+ seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
+ seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
+ seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
+ seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
+
seq_puts(m, "\tfeatures\n");
seq_printf(m, "\t major_features: 0x%08x\n",
gpu->identity.features);
@@ -930,6 +954,20 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
seq_puts(m, "\t FP is not idle\n");
if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
seq_puts(m, "\t TS is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
+ seq_puts(m, "\t BL is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
+ seq_puts(m, "\t ASYNCFE is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
+ seq_puts(m, "\t MC is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
+ seq_puts(m, "\t PPA is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
+ seq_puts(m, "\t WD is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
+ seq_puts(m, "\t NN is not idle\n");
+ if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
+ seq_puts(m, "\t TP is not idle\n");
if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
seq_puts(m, "\t AXI low power mode\n");
@@ -1805,11 +1843,15 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
if (atomic_read(&gpu->sched.hw_rq_count))
return -EBUSY;
- /* Check whether the hardware (except FE) is idle */
- mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
+ /* Check whether the hardware (except FE and MC) is idle */
+ mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
+ VIVS_HI_IDLE_STATE_MC);
idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
- if (idle != mask)
+ if (idle != mask) {
+ dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
+ idle);
return -EBUSY;
+ }
return etnaviv_gpu_hw_suspend(gpu);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 97bb48042b4d..8ea48697d132 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -15,11 +15,11 @@ struct etnaviv_gem_submit;
struct etnaviv_vram_mapping;
struct etnaviv_chip_identity {
- /* Chip model. */
u32 model;
-
- /* Revision value.*/
u32 revision;
+ u32 product_id;
+ u32 customer_id;
+ u32 eco_id;
/* Supported feature fields. */
u32 features;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 39b463db76c9..167971a09be7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -7,8 +7,42 @@
static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
{
+ .model = 0x400,
+ .revision = 0x4652,
+ .product_id = 0x70001,
+ .customer_id = 0x100,
+ .eco_id = 0,
+ .stream_count = 4,
+ .register_max = 64,
+ .thread_count = 128,
+ .shader_core_count = 1,
+ .vertex_cache_size = 8,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 256,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 8,
+ .features = 0xa0e9e004,
+ .minor_features0 = 0xe1299fff,
+ .minor_features1 = 0xbe13b219,
+ .minor_features2 = 0xce110010,
+ .minor_features3 = 0x8000001,
+ .minor_features4 = 0x20102,
+ .minor_features5 = 0x120000,
+ .minor_features6 = 0x0,
+ .minor_features7 = 0x0,
+ .minor_features8 = 0x0,
+ .minor_features9 = 0x0,
+ .minor_features10 = 0x0,
+ .minor_features11 = 0x0,
+ },
+ {
.model = 0x7000,
.revision = 0x6214,
+ .product_id = ~0U,
+ .customer_id = ~0U,
+ .eco_id = ~0U,
.stream_count = 16,
.register_max = 64,
.thread_count = 1024,
@@ -43,7 +77,13 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
if (etnaviv_chip_identities[i].model == ident->model &&
- etnaviv_chip_identities[i].revision == ident->revision) {
+ etnaviv_chip_identities[i].revision == ident->revision &&
+ (etnaviv_chip_identities[i].product_id == ident->product_id ||
+ etnaviv_chip_identities[i].product_id == ~0U) &&
+ (etnaviv_chip_identities[i].customer_id == ident->customer_id ||
+ etnaviv_chip_identities[i].customer_id == ~0U) &&
+ (etnaviv_chip_identities[i].eco_id == ident->eco_id ||
+ etnaviv_chip_identities[i].eco_id == ~0U)) {
memcpy(ident, &etnaviv_chip_identities[i],
sizeof(*ident));
return true;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 8adbf2861bff..e6795bafcbb9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -32,6 +32,7 @@ struct etnaviv_pm_domain {
};
struct etnaviv_pm_domain_meta {
+ unsigned int feature;
const struct etnaviv_pm_domain *domains;
u32 nr_domains;
};
@@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = {
static const struct etnaviv_pm_domain_meta doms_meta[] = {
{
+ .feature = chipFeatures_PIPE_3D,
.nr_domains = ARRAY_SIZE(doms_3d),
.domains = &doms_3d[0]
},
{
+ .feature = chipFeatures_PIPE_2D,
.nr_domains = ARRAY_SIZE(doms_2d),
.domains = &doms_2d[0]
},
{
+ .feature = chipFeatures_PIPE_VG,
.nr_domains = ARRAY_SIZE(doms_vg),
.domains = &doms_vg[0]
}
};
+static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
+{
+ unsigned int num = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+ if (gpu->identity.features & meta->feature)
+ num += meta->nr_domains;
+ }
+
+ return num;
+}
+
+static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
+ unsigned int index)
+{
+ const struct etnaviv_pm_domain *domain = NULL;
+ unsigned int offset = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+ if (!(gpu->identity.features & meta->feature))
+ continue;
+
+ if (meta->nr_domains < (index - offset)) {
+ offset += meta->nr_domains;
+ continue;
+ }
+
+ domain = meta->domains + (index - offset);
+ }
+
+ return domain;
+}
+
int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_domain *domain)
{
- const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
+ const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
- if (domain->iter >= meta->nr_domains)
+ if (domain->iter >= nr_domains)
return -EINVAL;
- dom = meta->domains + domain->iter;
+ dom = pm_domain(gpu, domain->iter);
+ if (!dom)
+ return -EINVAL;
domain->id = domain->iter;
domain->nr_signals = dom->nr_signals;
strncpy(domain->name, dom->name, sizeof(domain->name));
domain->iter++;
- if (domain->iter == meta->nr_domains)
+ if (domain->iter == nr_domains)
domain->iter = 0xff;
return 0;
@@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
struct drm_etnaviv_pm_signal *signal)
{
- const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
+ const unsigned int nr_domains = num_pm_domains(gpu);
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
- if (signal->domain >= meta->nr_domains)
+ if (signal->domain >= nr_domains)
return -EINVAL;
- dom = meta->domains + signal->domain;
+ dom = pm_domain(gpu, signal->domain);
+ if (!dom)
+ return -EINVAL;
if (signal->iter >= dom->nr_signals)
return -EINVAL;
diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h
index daae55995def..0e8bcf9dcc93 100644
--- a/drivers/gpu/drm/etnaviv/state_blt.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h
@@ -46,6 +46,8 @@ DEALINGS IN THE SOFTWARE.
/* This is a cut-down version of the state_blt.xml.h file */
+#define VIVS_BLT_SET_COMMAND 0x000140ac
+
#define VIVS_BLT_ENABLE 0x000140b8
#define VIVS_BLT_ENABLE_ENABLE 0x00000001
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 41d8da2b6f4f..deaaa99fa654 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
-- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
-- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
-- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
-- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
-- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
-- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
-- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
-- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
-
-Copyright (C) 2012-2018 by the following authors:
+- state.xml ( 26666 bytes, from 2019-12-20 21:20:35)
+- common.xml ( 35468 bytes, from 2018-02-10 13:09:26)
+- common_3d.xml ( 15058 bytes, from 2019-12-28 20:02:03)
+- state_hi.xml ( 30552 bytes, from 2019-12-28 20:02:48)
+- copyright.xml ( 1597 bytes, from 2018-02-10 13:09:26)
+- state_2d.xml ( 51552 bytes, from 2018-02-10 13:09:26)
+- state_3d.xml ( 83098 bytes, from 2019-12-28 20:02:03)
+- state_blt.xml ( 14252 bytes, from 2019-10-20 19:59:15)
+- state_vg.xml ( 5975 bytes, from 2018-02-10 13:09:26)
+
+Copyright (C) 2012-2019 by the following authors:
- Wladimir J. van der Laan <laanwj@gmail.com>
- Christian Gmeiner <christian.gmeiner@gmail.com>
- Lucas Stach <l.stach@pengutronix.de>
@@ -48,6 +48,9 @@ DEALINGS IN THE SOFTWARE.
#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001
#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002
#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003
+#define MMU_EXCEPTION_OUT_OF_BOUND 0x00000004
+#define MMU_EXCEPTION_READ_SECURITY_VIOLATION 0x00000005
+#define MMU_EXCEPTION_WRITE_SECURITY_VIOLATION 0x00000006
#define VIVS_HI 0x00000000
#define VIVS_HI_CLOCK_CONTROL 0x00000000
@@ -81,6 +84,13 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_HI_IDLE_STATE_IM 0x00000200
#define VIVS_HI_IDLE_STATE_FP 0x00000400
#define VIVS_HI_IDLE_STATE_TS 0x00000800
+#define VIVS_HI_IDLE_STATE_BL 0x00001000
+#define VIVS_HI_IDLE_STATE_ASYNCFE 0x00002000
+#define VIVS_HI_IDLE_STATE_MC 0x00004000
+#define VIVS_HI_IDLE_STATE_PPA 0x00008000
+#define VIVS_HI_IDLE_STATE_WD 0x00010000
+#define VIVS_HI_IDLE_STATE_NN 0x00020000
+#define VIVS_HI_IDLE_STATE_TP 0x00040000
#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000
#define VIVS_HI_AXI_CONFIG 0x00000008
@@ -140,6 +150,8 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_HI_CHIP_TIME 0x0000002c
+#define VIVS_HI_CHIP_CUSTOMER_ID 0x00000030
+
#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034
#define VIVS_HI_CACHE_CONTROL 0x00000038
@@ -237,6 +249,8 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_HI_BLT_INTR 0x000000d4
+#define VIVS_HI_CHIP_ECO_ID 0x000000e8
+
#define VIVS_HI_AUXBIT 0x000000ec
#define VIVS_PM 0x00000000
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 1eed3327999f..f2d87a7445c7 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -140,7 +140,7 @@ static void decon_ctx_remove(struct decon_context *ctx)
static u32 decon_calc_clkdiv(struct decon_context *ctx,
const struct drm_display_mode *mode)
{
- unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+ unsigned long ideal_clk = mode->clock;
u32 clkdiv;
/* Find the clock divider value that gets us closest to ideal_clk */
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4785885c0f4f..d23d3502ca91 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -106,7 +106,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
/* Pre-empt DP connector creation if there's a bridge */
if (dp->ptn_bridge) {
- ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
+ ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge,
+ 0);
if (ret) {
DRM_DEV_ERROR(dp->dev,
"Failed to attach bridge to drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ba0f868b2477..57defeb44522 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -270,7 +270,7 @@ static int exynos_drm_bind(struct device *dev)
struct drm_encoder *encoder;
struct drm_device *drm;
unsigned int clone_mask;
- int cnt, ret;
+ int ret;
drm = drm_dev_alloc(&exynos_drm_driver, dev);
if (IS_ERR(drm))
@@ -293,10 +293,9 @@ static int exynos_drm_bind(struct device *dev)
exynos_drm_mode_config_init(drm);
/* setup possible_clones. */
- cnt = 0;
clone_mask = 0;
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
- clone_mask |= (1 << (cnt++));
+ clone_mask |= drm_encoder_mask(encoder);
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
encoder->possible_clones = clone_mask;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a85365c56d4d..e080aa92338c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1514,7 +1514,6 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return 0;
connector->funcs->reset(connector);
- drm_fb_helper_add_one_connector(drm->fb_helper, connector);
drm_connector_register(connector);
return 0;
}
@@ -1540,7 +1539,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
out_bridge = of_drm_find_bridge(device->dev.of_node);
if (out_bridge) {
- drm_bridge_attach(encoder, out_bridge, NULL);
+ drm_bridge_attach(encoder, out_bridge, NULL, 0);
dsi->out_bridge = out_bridge;
list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
} else {
@@ -1717,7 +1716,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
if (dsi->in_bridge_node) {
in_bridge = of_drm_find_bridge(dsi->in_bridge_node);
if (in_bridge)
- drm_bridge_attach(encoder, in_bridge, NULL);
+ drm_bridge_attach(encoder, in_bridge, NULL, 0);
}
return mipi_dsi_host_register(&dsi->dsi_host);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 647a1fd1d815..e6ceaf36fb04 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -200,21 +200,13 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"failed to initialize drm fb helper.\n");
goto err_init;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "failed to register drm_fb_helper_connector.\n");
- goto err_setup;
-
- }
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index f141916eade6..1a7c828fc41d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -960,7 +960,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
- ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
+ ret = drm_bridge_attach(encoder, hdata->bridge, NULL, 0);
if (ret)
DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 9598ee3cc4d2..cff344367f81 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -151,5 +151,5 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
return fsl_dcu_attach_panel(fsl_dev, panel);
}
- return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
+ return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 1ed854f498b7..686385a66167 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -977,6 +977,9 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs cdv_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1459076d1980..1d8f67e4795a 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -513,14 +513,10 @@ int psb_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret)
- goto fini;
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index a1f9ce9465a5..0e6facf21e33 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -227,7 +227,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
*/
- struct child_device_config devices[0];
+ struct child_device_config devices[];
};
struct bdb_lvds_options {
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 6956c8e7501c..2411eb9827b8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -363,7 +363,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
drm_irq_install(dev, dev->pdev->irq);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- dev->driver->get_vblank_counter = psb_get_vblank_counter;
psb_modeset_init(dev);
psb_fbdev_init(dev);
@@ -507,9 +506,6 @@ static struct drm_driver driver = {
.irq_postinstall = psb_irq_postinstall,
.irq_uninstall = psb_irq_uninstall,
.irq_handler = psb_irq_handler,
- .enable_vblank = psb_enable_vblank,
- .disable_vblank = psb_disable_vblank,
- .get_vblank_counter = psb_get_vblank_counter,
.gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 3d4ef3071d45..956926341316 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -681,15 +681,15 @@ extern void psb_irq_turn_off_dpst(struct drm_device *dev);
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
+extern int psb_enable_vblank(struct drm_crtc *crtc);
+extern void psb_disable_vblank(struct drm_crtc *crtc);
void
psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
void
psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
-extern u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+extern u32 psb_get_vblank_counter(struct drm_crtc *crtc);
/* framebuffer.c */
extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index fed3b563e62e..531c5485be17 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -433,6 +433,9 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs psb_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 91f90016dba9..15eb3770d817 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -506,8 +506,10 @@ int psb_irq_disable_dpst(struct drm_device *dev)
/*
* It is used to enable VBLANK interrupt
*/
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int psb_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t reg_val = 0;
@@ -545,8 +547,10 @@ int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
/*
* It is used to disable VBLANK interrupt
*/
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void psb_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -618,8 +622,10 @@ void mdfld_disable_te(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 psb_get_vblank_counter(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
uint32_t high_frame = PIPEAFRAMEHIGH;
uint32_t low_frame = PIPEAFRAMEPIXEL;
uint32_t pipeconf_reg = PIPEACONF;
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 58fd502e3b9d..4f73998848d1 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -12,6 +12,7 @@
#ifndef _PSB_IRQ_H_
#define _PSB_IRQ_H_
+struct drm_crtc;
struct drm_device;
bool sysirq_init(struct drm_device *dev);
@@ -26,9 +27,9 @@ int psb_irq_enable_dpst(struct drm_device *dev);
int psb_irq_disable_dpst(struct drm_device *dev);
void psb_irq_turn_on_dpst(struct drm_device *dev);
void psb_irq_turn_off_dpst(struct drm_device *dev);
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+int psb_enable_vblank(struct drm_crtc *crtc);
+void psb_disable_vblank(struct drm_crtc *crtc);
+u32 psb_get_vblank_counter(struct drm_crtc *crtc);
int mdfld_enable_te(struct drm_device *dev, int pipe);
void mdfld_disable_te(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 7fa7d4933f60..55b46a7150a5 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -40,6 +40,7 @@ struct hibmc_dislay_pll_config {
};
static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
+ {640, 480, CRT_PLL1_HS_25MHZ, CRT_PLL2_HS_25MHZ},
{800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
{1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
{1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
@@ -47,6 +48,8 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
{1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
{1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1440, 900, CRT_PLL1_HS_106MHZ, CRT_PLL2_HS_106MHZ},
+ {1600, 900, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
{1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
@@ -80,6 +83,9 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (!crtc_state->enable)
+ return 0;
+
if (state->crtc_x + state->crtc_w >
crtc_state->adjusted_mode.hdisplay ||
state->crtc_y + state->crtc_h >
@@ -184,6 +190,20 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
return plane;
}
+static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ unsigned int reg;
+
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
+ reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_DPMS, dpms);
+ reg &= ~HIBMC_CRT_DISP_CTL_TIMING_MASK;
+ if (dpms == HIBMC_CRT_DPMS_ON)
+ reg |= HIBMC_CRT_DISP_CTL_TIMING(1);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -200,6 +220,7 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
reg |= HIBMC_CURR_GATE_DISPLAY(1);
hibmc_set_current_gate(priv, reg);
drm_crtc_vblank_on(crtc);
+ hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_ON);
}
static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -208,6 +229,7 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
drm_crtc_vblank_off(crtc);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
@@ -221,6 +243,25 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
hibmc_set_current_gate(priv, reg);
}
+static enum drm_mode_status
+hibmc_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int i = 0;
+ int vrefresh = drm_mode_vrefresh(mode);
+
+ if (vrefresh < 59 || vrefresh > 61)
+ return MODE_NOCLOCK;
+
+ for (i = 0; i < ARRAY_SIZE(hibmc_pll_table); i++) {
+ if (hibmc_pll_table[i].hdisplay == mode->hdisplay &&
+ hibmc_pll_table[i].vdisplay == mode->vdisplay)
+ return MODE_OK;
+ }
+
+ return MODE_BAD;
+}
+
static unsigned int format_pll_reg(void)
{
unsigned int pllreg = 0;
@@ -435,6 +476,42 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
}
+static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ void __iomem *mmio = priv->mmio;
+ u16 *r, *g, *b;
+ unsigned int reg;
+ int i;
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ unsigned int offset = i << 2;
+ u8 red = *r++ >> 8;
+ u8 green = *g++ >> 8;
+ u8 blue = *b++ >> 8;
+ u32 rgb = (red << 16) | (green << 8) | blue;
+
+ writel(rgb, mmio + HIBMC_CRT_PALETTE + offset);
+ }
+
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg |= HIBMC_FIELD(HIBMC_CTL_DISP_CTL_GAMMA, 1);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
+static int hibmc_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ hibmc_crtc_load_lut(crtc);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.set_config = drm_atomic_helper_set_config,
@@ -444,6 +521,7 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = hibmc_crtc_enable_vblank,
.disable_vblank = hibmc_crtc_disable_vblank,
+ .gamma_set = hibmc_crtc_gamma_set,
};
static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
@@ -452,6 +530,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
.atomic_flush = hibmc_crtc_atomic_flush,
.atomic_enable = hibmc_crtc_atomic_enable,
.atomic_disable = hibmc_crtc_atomic_disable,
+ .mode_valid = hibmc_crtc_mode_valid,
};
int hibmc_de_init(struct hibmc_drm_private *priv)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 4a8a4cfb4b75..222356a4f9a8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -91,11 +91,11 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
priv->dev->mode_config.min_width = 0;
priv->dev->mode_config.min_height = 0;
priv->dev->mode_config.max_width = 1920;
- priv->dev->mode_config.max_height = 1440;
+ priv->dev->mode_config.max_height = 1200;
priv->dev->mode_config.fb_base = priv->fb_base;
priv->dev->mode_config.preferred_depth = 24;
- priv->dev->mode_config.prefer_shadow = 0;
+ priv->dev->mode_config.prefer_shadow = 1;
priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -327,6 +327,11 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
struct drm_device *dev;
int ret;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+ "hibmcdrmfb");
+ if (ret)
+ return ret;
+
dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
if (IS_ERR(dev)) {
DRM_ERROR("failed to allocate drm_device\n");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
index b63a1ee15ceb..17b30c393b10 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
@@ -68,6 +68,12 @@
#define HIBMC_CRT_DISP_CTL 0x80200
+#define HIBMC_CRT_DISP_CTL_DPMS(x) ((x) << 30)
+#define HIBMC_CRT_DISP_CTL_DPMS_MASK 0xc0000000
+
+#define HIBMC_CRT_DPMS_ON 0
+#define HIBMC_CRT_DPMS_OFF 3
+
#define HIBMC_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25)
#define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000
@@ -85,6 +91,9 @@
#define HIBMC_CRT_DISP_CTL_TIMING(x) ((x) << 8)
#define HIBMC_CRT_DISP_CTL_TIMING_MASK 0x100
+#define HIBMC_CTL_DISP_CTL_GAMMA(x) ((x) << 3)
+#define HIBMC_CTL_DISP_CTL_GAMMA_MASK 0x08
+
#define HIBMC_CRT_DISP_CTL_PLANE(x) ((x) << 2)
#define HIBMC_CRT_DISP_CTL_PLANE_MASK 4
@@ -170,6 +179,7 @@
#define CRT_PLL1_HS_74MHZ 0x23941dc2
#define CRT_PLL1_HS_80MHZ 0x23941001
#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2
+#define CRT_PLL1_HS_106MHZ 0x237C1641
#define CRT_PLL1_HS_108MHZ 0x23b41b01
#define CRT_PLL1_HS_162MHZ 0x23480681
#define CRT_PLL1_HS_148MHZ 0x23541dc2
@@ -182,10 +192,13 @@
#define CRT_PLL2_HS_78MHZ 0x50E147AE
#define CRT_PLL2_HS_74MHZ 0x602B6AE7
#define CRT_PLL2_HS_80MHZ 0x70000000
+#define CRT_PLL2_HS_106MHZ 0x0075c28f
#define CRT_PLL2_HS_108MHZ 0x80000000
#define CRT_PLL2_HS_162MHZ 0xA0000000
#define CRT_PLL2_HS_148MHZ 0xB0CCCCCD
#define CRT_PLL2_HS_193MHZ 0xC0872B02
+#define HIBMC_CRT_PALETTE 0x80C00
+
#define HIBMC_FIELD(field, value) (field(value) & field##_MASK)
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 6d98fdc06f6c..678ac2ef2a93 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -11,8 +11,10 @@
* Jianhua Li <lijianhua@huawei.com>
*/
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include "hibmc_drm_drv.h"
@@ -20,7 +22,14 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
- return drm_add_modes_noedid(connector, 800, 600);
+ int count;
+
+ count = drm_add_modes_noedid(connector,
+ connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return count;
}
static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 50b988fdd5cc..99397ac3b363 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -54,6 +54,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index bdcf9c6ae9e9..f31068d74b18 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -777,7 +777,7 @@ static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
int ret;
/* associate the bridge to dsi encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to attach external bridge\n");
return ret;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a63790d32d75..c3332209f27a 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1356,10 +1356,16 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
-static int tda998x_bridge_attach(struct drm_bridge *bridge)
+static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
return tda998x_connector_init(priv, bridge->dev);
}
@@ -2022,7 +2028,7 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
if (ret)
goto err_encoder;
- ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
+ ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL, 0);
if (ret)
goto err_bridge;
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
index d9a77f3b59b2..81972dce1aff 100644
--- a/drivers/gpu/drm/i915/.gitignore
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.hdrtest
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 907c4471f591..9afa5c4a6bf0 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -42,16 +42,9 @@ config DRM_I915
If "M" is selected, the module will be called i915.
-config DRM_I915_ALPHA_SUPPORT
- bool "Enable alpha quality support for new Intel hardware by default"
- depends on DRM_I915
- help
- This option is deprecated. Use DRM_I915_FORCE_PROBE option instead.
-
config DRM_I915_FORCE_PROBE
string "Force probe driver for selected new Intel hardware"
depends on DRM_I915
- default "*" if DRM_I915_ALPHA_SUPPORT
help
This is the default value for the i915.force_probe module
parameter. Using the module parameter overrides this option.
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index c280b6ae38eb..0bfd276c19fe 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -20,6 +20,9 @@ config DRM_I915_HEARTBEAT_INTERVAL
check the health of the GPU and undertake regular house-keeping of
internal driver state.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/heartbeat_interval_ms
+
May be 0 to disable heartbeats and therefore disable automatic GPU
hang detection.
@@ -33,11 +36,18 @@ config DRM_I915_PREEMPT_TIMEOUT
expires, the HW will be reset to allow the more important context
to execute.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/preempt_timeout_ms
+
May be 0 to disable the timeout.
-config DRM_I915_SPIN_REQUEST
- int "Busywait for request completion (us)"
- default 5 # microseconds
+ The compiled in default may get overridden at driver probe time on
+ certain platforms and certain engines which will be reflected in the
+ sysfs control.
+
+config DRM_I915_MAX_REQUEST_BUSYWAIT
+ int "Busywait for request completion limit (ns)"
+ default 8000 # nanoseconds
help
Before sleeping waiting for a request (GPU operation) to complete,
we may spend some time polling for its completion. As the IRQ may
@@ -45,6 +55,9 @@ config DRM_I915_SPIN_REQUEST
check if the request will complete in the time it would have taken
us to enable the interrupt.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/max_busywait_duration_ns
+
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
@@ -60,6 +73,9 @@ config DRM_I915_STOP_TIMEOUT
that the reset itself may take longer and so be more disruptive to
interactive or low latency workloads.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/stop_timeout_ms
+
config DRM_I915_TIMESLICE_DURATION
int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
default 1 # milliseconds
@@ -73,4 +89,7 @@ config DRM_I915_TIMESLICE_DURATION
is scheduled for execution for the timeslice duration, before
switching to the next context.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/timeslice_duration_ms
+
May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a1f2411aa21b..9f887a86e555 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -46,15 +46,16 @@ i915-y += i915_drv.o \
i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
- intel_csr.o \
intel_device_info.o \
+ intel_dram.o \
intel_memory_region.o \
intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
intel_sideband.o \
intel_uncore.o \
- intel_wakeref.o
+ intel_wakeref.o \
+ vlv_suspend.o
# core library code
i915-y += \
@@ -66,7 +67,11 @@ i915-y += \
i915_user_extensions.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
+i915-$(CONFIG_DEBUG_FS) += \
+ i915_debugfs.o \
+ i915_debugfs_params.o \
+ display/intel_display_debugfs.o \
+ display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
@@ -75,9 +80,12 @@ gt-y += \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
gt/gen6_ppgtt.o \
+ gt/gen7_renderclear.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
+ gt/intel_context_param.o \
+ gt/intel_context_sseu.o \
gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
@@ -102,7 +110,8 @@ gt-y += \
gt/intel_rps.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
- gt/intel_workarounds.o
+ gt/intel_workarounds.o \
+ gt/sysfs_engines.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -179,6 +188,7 @@ i915-y += \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
+ display/intel_csr.o \
display/intel_display.o \
display/intel_display_power.o \
display/intel_dpio_phy.o \
@@ -187,6 +197,7 @@ i915-y += \
display/intel_fbc.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
+ display/intel_global_state.o \
display/intel_hdcp.o \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index f8e882101396..17cee6f80d8b 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -39,14 +39,14 @@
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
- return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
>> FREE_HEADER_CREDIT_SHIFT;
}
static inline int payload_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
- return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
>> FREE_PLOAD_CREDIT_SHIFT;
}
@@ -55,7 +55,7 @@ static void wait_for_header_credits(struct drm_i915_private *dev_priv,
{
if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
MAX_HEADER_CREDIT, 100))
- DRM_ERROR("DSI header credits not released\n");
+ drm_err(&dev_priv->drm, "DSI header credits not released\n");
}
static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
@@ -63,7 +63,7 @@ static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
{
if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
MAX_PLOAD_CREDIT, 100))
- DRM_ERROR("DSI payload credits not released\n");
+ drm_err(&dev_priv->drm, "DSI payload credits not released\n");
}
static enum transcoder dsi_port_to_transcoder(enum port port)
@@ -97,7 +97,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
dsi->channel = 0;
ret = mipi_dsi_dcs_nop(dsi);
if (ret < 0)
- DRM_ERROR("error sending DCS NOP command\n");
+ drm_err(&dev_priv->drm,
+ "error sending DCS NOP command\n");
}
/* wait for header credits to be released */
@@ -109,9 +110,9 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+ if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
LPTX_IN_PROGRESS), 20))
- DRM_ERROR("LPTX bit not cleared\n");
+ drm_err(&dev_priv->drm, "LPTX bit not cleared\n");
}
}
@@ -129,14 +130,15 @@ static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
free_credits = payload_credits_available(dev_priv, dsi_trans);
if (free_credits < 1) {
- DRM_ERROR("Payload credit not available\n");
+ drm_err(&dev_priv->drm,
+ "Payload credit not available\n");
return false;
}
for (j = 0; j < min_t(u32, len - i, 4); j++)
tmp |= *data++ << 8 * j;
- I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans), tmp);
}
return true;
@@ -154,11 +156,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
/* check if header credit available */
free_credits = header_credits_available(dev_priv, dsi_trans);
if (free_credits < 1) {
- DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+ drm_err(&dev_priv->drm,
+ "send pkt header failed, not enough hdr credits\n");
return -1;
}
- tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
if (pkt.payload)
tmp |= PAYLOAD_PRESENT;
@@ -175,7 +178,7 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
- I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
return 0;
}
@@ -212,53 +215,55 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+ intel_de_write(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
}
@@ -270,7 +275,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 dss_ctl1;
- dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -286,20 +291,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
- DRM_ERROR("DL buffer depth exceed max value\n");
+ drm_err(&dev_priv->drm,
+ "DL buffer depth exceed max value\n");
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- I915_WRITE(DSS_CTL2, dss_ctl2);
+ intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- I915_WRITE(DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
}
/* aka DSI 8X clock */
@@ -330,15 +336,15 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(ICL_DSI_ESC_CLK_DIV(port),
- esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
- POSTING_READ(ICL_DSI_ESC_CLK_DIV(port));
+ intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port));
}
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port),
- esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
- POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port));
+ intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
}
}
@@ -348,7 +354,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- WARN_ON(intel_dsi->io_wakeref[port]);
+ drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]);
intel_dsi->io_wakeref[port] =
intel_display_power_get(dev_priv,
port == PORT_A ?
@@ -365,9 +371,9 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
tmp |= COMBO_PHY_MODE_DSI;
- I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
}
get_dsi_io_power_domains(dev_priv, intel_dsi);
@@ -394,40 +400,46 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+ intel_de_write(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0);
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0x1);
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
+ tmp);
}
}
@@ -442,12 +454,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* clear common keeper enable bit */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
}
/*
@@ -456,19 +468,19 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* as part of lane phy sequence configuration
*/
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_CL_DW5(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
tmp |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
}
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
}
/* Program swing and de-emphasis */
@@ -476,12 +488,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Set training enable to trigger update */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
}
}
@@ -493,14 +505,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
tmp |= DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), tmp);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
- if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+ if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
500))
- DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+ drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n",
+ port_name(port));
}
}
@@ -516,28 +529,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
tmp &= ~MASTER_INIT_TIMER_MASK;
tmp |= intel_dsi->init_count;
- I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
}
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
/* shadow register inside display core */
- I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
}
/* Program DPHY data lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
- intel_dsi->dphy_data_lane_reg);
+ intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
/* shadow register inside display core */
- I915_WRITE(DSI_DATA_TIMING_PARAM(port),
- intel_dsi->dphy_data_lane_reg);
+ intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
}
/*
@@ -549,25 +564,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
if (IS_GEN(dev_priv, 11)) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp = intel_de_read(dev_priv,
+ DPHY_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+ intel_de_write(dev_priv,
+ DPHY_TA_TIMING_PARAM(port),
+ tmp);
/* shadow register inside display core */
- tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp = intel_de_read(dev_priv,
+ DSI_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ intel_de_write(dev_priv,
+ DSI_TA_TIMING_PARAM(port), tmp);
}
}
}
if (IS_ELKHARTLAKE(dev_priv)) {
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_DPHY_CHKN(phy));
+ tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
- I915_WRITE(ICL_DPHY_CHKN(phy), tmp);
+ intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
}
}
}
@@ -579,13 +599,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ mutex_lock(&dev_priv->dpll.lock);
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -595,13 +615,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ mutex_lock(&dev_priv->dpll.lock);
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
@@ -613,14 +633,14 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
}
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) {
if (INTEL_GEN(dev_priv) >= 12)
@@ -628,11 +648,11 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
else
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(ICL_DPCLKA_CFGCR0);
+ intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void
@@ -649,7 +669,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
if (intel_dsi->eotp_pkt)
tmp &= ~EOTP_DISABLED;
@@ -726,16 +746,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
}
}
- I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
}
/* enable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans));
tmp |= PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
/* configure stream splitting */
@@ -746,7 +768,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* select data lane width */
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
tmp &= ~DDI_PORT_WIDTH_MASK;
tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
@@ -772,15 +794,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* enable DDI buffer */
tmp |= TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
}
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
- LINK_READY), 2500))
- DRM_ERROR("DSI link not ready\n");
+ if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ LINK_READY), 2500))
+ drm_err(&dev_priv->drm, "DSI link not ready\n");
}
}
@@ -836,17 +858,18 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* minimum hactive as per bspec: 256 pixels */
if (adjusted_mode->crtc_hdisplay < 256)
- DRM_ERROR("hactive is less then 256 pixels\n");
+ drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n");
/* if RGB666 format, then hactive must be multiple of 4 pixels */
if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
- DRM_ERROR("hactive pixels are not multiple of 4\n");
+ drm_err(&dev_priv->drm,
+ "hactive pixels are not multiple of 4\n");
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(HTOTAL(dsi_trans),
- (hactive - 1) | ((htotal - 1) << 16));
+ intel_de_write(dev_priv, HTOTAL(dsi_trans),
+ (hactive - 1) | ((htotal - 1) << 16));
}
/* TRANS_HSYNC register to be programmed only for video mode */
@@ -855,11 +878,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
if (hsync_size < 16)
- DRM_ERROR("hsync size < 16 pixels\n");
+ drm_err(&dev_priv->drm,
+ "hsync size < 16 pixels\n");
}
if (hback_porch < 16)
- DRM_ERROR("hback porch < 16 pixels\n");
+ drm_err(&dev_priv->drm, "hback porch < 16 pixels\n");
if (intel_dsi->dual_link) {
hsync_start /= 2;
@@ -868,8 +892,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(HSYNC(dsi_trans),
- (hsync_start - 1) | ((hsync_end - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(dsi_trans),
+ (hsync_start - 1) | ((hsync_end - 1) << 16));
}
}
@@ -882,21 +906,21 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
- I915_WRITE(VTOTAL(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, VTOTAL(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
- DRM_ERROR("Invalid vsync_end value\n");
+ drm_err(&dev_priv->drm, "Invalid vsync_end value\n");
if (vsync_start < vactive)
- DRM_ERROR("vsync_start less than vactive\n");
+ drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
/* program TRANS_VSYNC register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
}
/*
@@ -907,15 +931,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
*/
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+ intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
}
/* program TRANS_VBLANK register, should be same as vtotal programmed */
if (INTEL_GEN(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VBLANK(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
}
}
}
@@ -930,14 +954,15 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
tmp |= PIPECONF_ENABLE;
- I915_WRITE(PIPECONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 10))
- DRM_ERROR("DSI transcoder not enabled\n");
+ drm_err(&dev_priv->drm,
+ "DSI transcoder not enabled\n");
}
}
@@ -968,26 +993,26 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
- tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
- I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
- tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
- I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
- tmp = I915_READ(DSI_TA_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
tmp &= ~TA_TIMEOUT_VALUE_MASK;
tmp |= TA_TIMEOUT_VALUE(ta_timeout);
- I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
}
}
@@ -1041,14 +1066,15 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
* FIXME: This uses the number of DW's currently in the payload
* receive queue. This is probably not what we want here.
*/
- tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans));
tmp &= NUMBER_RX_PLOAD_DW_MASK;
/* multiply "Number Rx Payload DW" by 4 to get max value */
tmp = tmp * 4;
dsi = intel_dsi->dsi_hosts[port]->device;
ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
if (ret < 0)
- DRM_ERROR("error setting max return pkt size%d\n", tmp);
+ drm_err(&dev_priv->drm,
+ "error setting max return pkt size%d\n", tmp);
}
/* panel power on related mipi dsi vbt sequences */
@@ -1077,8 +1103,6 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
/* step3b */
gen11_dsi_map_pll(encoder, pipe_config);
@@ -1092,13 +1116,24 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+}
+
+static void gen11_dsi_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ WARN_ON(crtc_state->has_pch_encoder);
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
/* step7: enable backlight */
- intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_panel_enable_backlight(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+
+ intel_crtc_vblank_on(crtc_state);
}
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
@@ -1113,14 +1148,15 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
tmp &= ~PIPECONF_ENABLE;
- I915_WRITE(PIPECONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 50))
- DRM_ERROR("DSI trancoder not disabled\n");
+ drm_err(&dev_priv->drm,
+ "DSI trancoder not disabled\n");
}
}
@@ -1147,32 +1183,34 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* put dsi link in ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans));
tmp |= LINK_ENTER_ULPS;
tmp &= ~LINK_ULPS_TYPE_LP11;
- I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp);
- if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
- LINK_IN_ULPS),
+ if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
+ LINK_IN_ULPS),
10))
- DRM_ERROR("DSI link not in ULPS\n");
+ drm_err(&dev_priv->drm, "DSI link not in ULPS\n");
}
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
tmp &= ~TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
}
/* disable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans));
tmp &= ~PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
}
}
@@ -1186,15 +1224,16 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
tmp &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), tmp);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
- if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+ if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
8))
- DRM_ERROR("DDI port:%c buffer not idle\n",
- port_name(port));
+ drm_err(&dev_priv->drm,
+ "DDI port:%c buffer not idle\n",
+ port_name(port));
}
gen11_dsi_gate_clocks(encoder);
}
@@ -1219,9 +1258,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
/* set mode to DDI */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
tmp &= ~COMBO_PHY_MODE_DSI;
- I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
}
}
@@ -1311,15 +1350,15 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsc_get_config(encoder, pipe_config);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pipe_config->port_clock =
- cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+ pipe_config->port_clock = intel_dpll_get_freq(i915,
+ pipe_config->shared_dpll);
pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
@@ -1357,11 +1396,13 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
return ret;
/* DSI specific sanity checks on the common code */
- WARN_ON(vdsc_cfg->vbr_enable);
- WARN_ON(vdsc_cfg->simple_422);
- WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width);
- WARN_ON(vdsc_cfg->slice_height < 8);
- WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422);
+ drm_WARN_ON(&dev_priv->drm,
+ vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8);
+ drm_WARN_ON(&dev_priv->drm,
+ vdsc_cfg->pic_height % vdsc_cfg->slice_height);
ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
if (ret)
@@ -1443,7 +1484,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
*pipe = PIPE_A;
@@ -1458,11 +1499,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
*pipe = PIPE_D;
break;
default:
- DRM_ERROR("Invalid PIPE input\n");
+ drm_err(&dev_priv->drm, "Invalid PIPE input\n");
goto out;
}
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
ret = tmp & PIPECONF_ENABLE;
}
out:
@@ -1582,7 +1623,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
*/
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+ drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n",
+ prepare_cnt);
prepare_cnt = ICL_PREPARE_CNT_MAX;
}
@@ -1590,28 +1632,33 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
ths_prepare_ns, tlpx_ns);
if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
}
/* trail cnt in escape clocks*/
trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+ drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n",
+ trail_cnt);
trail_cnt = ICL_TRAIL_CNT_MAX;
}
/* tclk pre count in escape clocks */
tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
}
/* tclk post count in escape clocks */
tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "tclk_post_cnt out of range (%d)\n",
+ tclk_post_cnt);
tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
}
@@ -1619,14 +1666,17 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
ths_prepare_ns, tlpx_ns);
if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n",
+ hs_zero_cnt);
hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
}
/* hs exit zero cnt in escape clocks */
exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "exit_zero_cnt out of range (%d)\n",
+ exit_zero_cnt);
exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
}
@@ -1668,9 +1718,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- connector->base.display_info.panel_orientation =
- intel_dsi_get_panel_orientation(connector);
- drm_connector_init_panel_orientation_property(&connector->base,
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
@@ -1708,6 +1757,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->enable = gen11_dsi_enable;
encoder->disable = gen11_dsi_disable;
encoder->post_disable = gen11_dsi_post_disable;
encoder->port = port;
@@ -1738,7 +1788,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
- DRM_ERROR("DSI fixed mode info missing\n");
+ drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
goto err;
}
@@ -1764,7 +1814,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- DRM_DEBUG_KMS("no device found\n");
+ drm_dbg_kms(&dev_priv->drm, "no device found\n");
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 3456d33feb46..e21fb14d5e07 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_acpi.h"
+#include "intel_display_types.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
@@ -156,3 +157,91 @@ void intel_register_dsm_handler(void)
void intel_unregister_dsm_handler(void)
{
}
+
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT 0
+#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT 8
+#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT 12
+#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT (1 << 16)
+#define ACPI_DEPENDS_ON_VGA (1 << 17)
+#define ACPI_PIPE_ID_SHIFT 18
+#define ACPI_PIPE_ID_MASK (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME (1ULL << 31)
+
+static u32 acpi_display_type(struct intel_connector *connector)
+{
+ u32 display_type;
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ display_type = ACPI_DISPLAY_TYPE_VGA;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ case DRM_MODE_CONNECTOR_TV:
+ display_type = ACPI_DISPLAY_TYPE_TV;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ default:
+ MISSING_CASE(connector->base.connector_type);
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ }
+
+ return display_type;
+}
+
+void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *drm_dev = &dev_priv->drm;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 display_index[16] = {};
+
+ /* Populate the ACPI IDs for all connectors for a given drm_device */
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 1c576b3fb712..e8b068661d22 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -6,12 +6,17 @@
#ifndef __INTEL_ACPI_H__
#define __INTEL_ACPI_H__
+struct drm_i915_private;
+
#ifdef CONFIG_ACPI
void intel_register_dsm_handler(void);
void intel_unregister_dsm_handler(void);
+void intel_acpi_device_id_update(struct drm_i915_private *i915);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
+static inline
+void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index c362eecdd414..d043057d2fa0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -35,7 +35,9 @@
#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -64,8 +66,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
else if (property == dev_priv->broadcast_rgb_property)
*val = intel_conn_state->broadcast_rgb;
else {
- DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
+ drm_dbg_atomic(&dev_priv->drm,
+ "Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -101,8 +104,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return 0;
}
- DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
+ drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -178,6 +181,8 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
/**
* intel_connector_needs_modeset - check if connector needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ * @connector: the connector
*/
bool
intel_connector_needs_modeset(struct intel_atomic_state *state,
@@ -314,7 +319,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
}
}
- if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+ if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+ "Cannot find scaler for %s:%d\n", name, idx))
return;
/* set scaler mode */
@@ -357,8 +363,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
mode = SKL_PS_SCALER_MODE_DYN;
}
- DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
+ drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
}
@@ -409,8 +415,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
/* fail if required scalers > available scalers */
if (num_scalers_need > intel_crtc->num_scalers){
- DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
- num_scalers_need, intel_crtc->num_scalers);
+ drm_dbg_kms(&dev_priv->drm,
+ "Too many scaling requests %d > %d\n",
+ num_scalers_need, intel_crtc->num_scalers);
return -EINVAL;
}
@@ -455,8 +462,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
- DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
- plane->base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to add [PLANE:%d] to drm_state\n",
+ plane->base.id);
return PTR_ERR(state);
}
}
@@ -465,7 +473,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
- if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_plane->pipe != intel_crtc->pipe))
continue;
plane_state = intel_atomic_get_new_plane_state(intel_state,
@@ -494,18 +503,28 @@ intel_atomic_state_alloc(struct drm_device *dev)
return &state->base;
}
+void intel_atomic_state_free(struct drm_atomic_state *_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+
+ drm_atomic_state_default_release(&state->base);
+ kfree(state->global_objs);
+
+ i915_sw_fence_fini(&state->commit_ready);
+
+ kfree(state);
+}
+
void intel_atomic_state_clear(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
+
drm_atomic_state_default_clear(&state->base);
+ intel_atomic_clear_global_state(state);
+
state->dpll_set = state->modeset = false;
state->global_state_changed = false;
state->active_pipes = 0;
- memset(&state->min_cdclk, 0, sizeof(state->min_cdclk));
- memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level));
- memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical));
- memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual));
- state->cdclk.pipe = INVALID_PIPE;
}
struct intel_crtc_state *
@@ -520,7 +539,7 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
-int intel_atomic_lock_global_state(struct intel_atomic_state *state)
+int _intel_atomic_lock_global_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -539,7 +558,7 @@ int intel_atomic_lock_global_state(struct intel_atomic_state *state)
return 0;
}
-int intel_atomic_serialize_global_state(struct intel_atomic_state *state)
+int _intel_atomic_serialize_global_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 74c749dbfb4f..11146292b06f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -45,6 +45,7 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc,
void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_free(struct drm_atomic_state *state);
void intel_atomic_state_clear(struct drm_atomic_state *state);
struct intel_crtc_state *
@@ -55,8 +56,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-int intel_atomic_lock_global_state(struct intel_atomic_state *state);
+int _intel_atomic_lock_global_state(struct intel_atomic_state *state);
-int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+int _intel_atomic_serialize_global_state(struct intel_atomic_state *state);
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 3e97af682b1b..457b258683d3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -37,6 +37,7 @@
#include "i915_trace.h"
#include "intel_atomic_plane.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_pm.h"
#include "intel_sprite.h"
@@ -132,15 +133,37 @@ intel_plane_destroy_state(struct drm_plane *plane,
kfree(plane_state);
}
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h),
+ dst_w * dst_h);
+}
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp;
+ unsigned int pixel_rate;
if (!plane_state->uapi.visible)
return 0;
+ pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
cpp = fb->format->cpp[0];
/*
@@ -152,45 +175,67 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
if (fb->format->is_yuv && fb->format->num_planes > 1)
cpp *= 4;
- return cpp * crtc_state->pixel_rate;
+ return pixel_rate * cpp;
}
-bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane)
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane,
+ bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
- struct intel_crtc_state *crtc_state;
+ const struct intel_cdclk_state *cdclk_state;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
if (!plane_state->uapi.visible || !plane->min_cdclk)
- return false;
+ return 0;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ new_crtc_state->min_cdclk[plane->id] =
+ plane->min_cdclk(new_crtc_state, plane_state);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ /*
+ * No need to check against the cdclk state if
+ * the min cdclk for the plane doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to plane
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_crtc_state->min_cdclk[plane->id] <=
+ old_crtc_state->min_cdclk[plane->id])
+ return 0;
- crtc_state->min_cdclk[plane->id] =
- plane->min_cdclk(crtc_state, plane_state);
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
/*
- * Does the cdclk need to be bumbed up?
+ * No need to recalculate the cdclk state if
+ * the min cdclk for the pipe doesn't increase.
*
- * Note: we obviously need to be called before the new
- * cdclk frequency is calculated so state->cdclk.logical
- * hasn't been populated yet. Hence we look at the old
- * cdclk state under dev_priv->cdclk.logical. This is
- * safe as long we hold at least one crtc mutex (which
- * must be true since we have crtc_state).
+ * Ie. we only ever increase the cdclk due to plane
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
*/
- if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
- plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id],
- dev_priv->cdclk.logical.cdclk);
- return true;
- }
+ if (new_crtc_state->min_cdclk[plane->id] <=
+ cdclk_state->min_cdclk[crtc->pipe])
+ return 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
+ plane->base.base.id, plane->base.name,
+ new_crtc_state->min_cdclk[plane->id],
+ crtc->base.base.id, crtc->base.name,
+ cdclk_state->min_cdclk[crtc->pipe]);
+ *need_cdclk_calc = true;
- return false;
+ return 0;
}
static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
@@ -225,12 +270,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane_state *new_plane_state)
{
struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
- const struct drm_framebuffer *fb;
+ const struct drm_framebuffer *fb = new_plane_state->hw.fb;
int ret;
- intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
- fb = new_plane_state->hw.fb;
-
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
@@ -292,6 +334,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
new_plane_state->uapi.visible = false;
if (!crtc)
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 5cedafdddb55..a6bbf42bae1f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -18,6 +18,9 @@ struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
@@ -46,7 +49,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *plane_state);
-bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane);
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane,
+ bool *need_cdclk_calc);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index b18040793d9e..62f234f641de 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -30,6 +30,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
@@ -148,6 +149,10 @@ static const struct {
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+ { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 },
+ { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 },
+ { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 },
+ { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 },
};
/* HDMI N/CTS table */
@@ -233,6 +238,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -242,6 +248,9 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
+ if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+ i = ARRAY_SIZE(hdmi_audio_clock);
+
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
@@ -291,18 +300,18 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
u32 tmp;
int i;
- tmp = I915_READ(reg_eldv);
+ tmp = intel_de_read(dev_priv, reg_eldv);
tmp &= bits_eldv;
if (!tmp)
return false;
- tmp = I915_READ(reg_elda);
+ tmp = intel_de_read(dev_priv, reg_elda);
tmp &= ~bits_elda;
- I915_WRITE(reg_elda, tmp);
+ intel_de_write(dev_priv, reg_elda, tmp);
for (i = 0; i < drm_eld_size(eld) / 4; i++)
- if (I915_READ(reg_edid) != *((const u32 *)eld + i))
+ if (intel_de_read(dev_priv, reg_edid) != *((const u32 *)eld + i))
return false;
return true;
@@ -315,18 +324,18 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 eldv, tmp;
- DRM_DEBUG_KMS("Disable audio codec\n");
+ drm_dbg_kms(&dev_priv->drm, "Disable audio codec\n");
- tmp = I915_READ(G4X_AUD_VID_DID);
+ tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
eldv = G4X_ELDV_DEVCTG;
/* Invalidate ELD */
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp &= ~eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
}
static void g4x_audio_codec_enable(struct intel_encoder *encoder,
@@ -340,9 +349,10 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm, "Enable audio codec, %u bytes ELD\n",
+ drm_eld_size(eld));
- tmp = I915_READ(G4X_AUD_VID_DID);
+ tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
@@ -354,19 +364,20 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
G4X_HDMIW_HDMIEDID))
return;
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
len = (tmp >> 9) & 0x1f; /* ELD buffer size */
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
len = min(drm_eld_size(eld) / 4, len);
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ drm_dbg(&dev_priv->drm, "ELD size %d\n", len);
for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
+ intel_de_write(dev_priv, G4X_HDMIW_HDMIEDID,
+ *((const u32 *)eld + i));
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp |= eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
}
static void
@@ -384,11 +395,12 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
nm = audio_config_dp_get_n_m(crtc_state, rate);
if (nm)
- DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+ drm_dbg_kms(&dev_priv->drm, "using Maud %u, Naud %u\n", nm->m,
+ nm->n);
else
- DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+ drm_dbg_kms(&dev_priv->drm, "using automatic Maud, Naud\n");
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -400,9 +412,9 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
@@ -413,7 +425,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_M_CTS_M_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -429,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -437,25 +449,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
- DRM_DEBUG_KMS("using N %d\n", n);
+ drm_dbg_kms(&dev_priv->drm, "using N %d\n", n);
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
- DRM_DEBUG_KMS("using automatic N\n");
+ drm_dbg_kms(&dev_priv->drm, "using automatic N\n");
}
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -476,26 +488,26 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n",
+ transcoder_name(cpu_transcoder));
mutex_lock(&dev_priv->av_mutex);
/* Disable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
/* Invalidate ELD */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
mutex_unlock(&dev_priv->av_mutex);
}
@@ -511,16 +523,17 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
- transcoder_name(cpu_transcoder), drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm,
+ "Enable audio codec on transcoder %s, %u bytes ELD\n",
+ transcoder_name(cpu_transcoder), drm_eld_size(eld));
mutex_lock(&dev_priv->av_mutex);
/* Enable audio presence detect, invalidate ELD */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
@@ -530,19 +543,20 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
*/
/* Reset ELD write address */
- tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
tmp &= ~IBX_ELD_ADDRESS_MASK;
- I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i));
+ intel_de_write(dev_priv, HSW_AUD_EDID_DATA(cpu_transcoder),
+ *((const u32 *)eld + i));
/* ELD valid */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_ELD_VALID(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
@@ -561,11 +575,12 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
- DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe));
- if (WARN_ON(port == PORT_A))
+ if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
return;
if (HAS_PCH_IBX(dev_priv)) {
@@ -580,21 +595,21 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
}
/* Disable timestamps */
- tmp = I915_READ(aud_config);
+ tmp = intel_de_read(dev_priv, aud_config);
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
- I915_WRITE(aud_config, tmp);
+ intel_de_write(dev_priv, aud_config, tmp);
eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp &= ~eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
}
static void ilk_audio_codec_enable(struct intel_encoder *encoder,
@@ -611,11 +626,12 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
- DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
- encoder->base.base.id, encoder->base.name,
- pipe_name(pipe), drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm,
+ "Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(eld));
- if (WARN_ON(port == PORT_A))
+ if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
return;
/*
@@ -646,27 +662,28 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp &= ~eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
/* Reset ELD write address */
- tmp = I915_READ(aud_cntl_st);
+ tmp = intel_de_read(dev_priv, aud_cntl_st);
tmp &= ~IBX_ELD_ADDRESS_MASK;
- I915_WRITE(aud_cntl_st, tmp);
+ intel_de_write(dev_priv, aud_cntl_st, tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
+ intel_de_write(dev_priv, hdmiw_hdmiedid,
+ *((const u32 *)eld + i));
/* ELD valid */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp |= eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
/* Enable timestamps */
- tmp = I915_READ(aud_config);
+ tmp = intel_de_read(dev_priv, aud_config);
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
@@ -674,7 +691,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(crtc_state);
- I915_WRITE(aud_config, tmp);
+ intel_de_write(dev_priv, aud_config, tmp);
}
/**
@@ -701,14 +718,15 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
/* FIXME precompute the ELD in .compute_config() */
if (!connector->eld[0])
- DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
- DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id,
- connector->name,
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg(&dev_priv->drm, "ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ encoder->base.base.id,
+ encoder->base.name);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -800,37 +818,61 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
+static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ bool enable)
+{
+ struct intel_cdclk_state *cdclk_state;
+ int ret;
+
+ /* need to hold at least one crtc lock for the global state */
+ ret = drm_modeset_lock(&crtc->base.mutex, state->base.acquire_ctx);
+ if (ret)
+ return ret;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ cdclk_state->force_min_cdclk_changed = true;
+ cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+
+ return drm_atomic_commit(&state->base);
+}
+
static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
+ struct intel_crtc *crtc;
int ret;
+ crtc = intel_get_first_crtc(dev_priv);
+ if (!crtc)
+ return;
+
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(&dev_priv->drm);
- if (WARN_ON(!state))
+ if (drm_WARN_ON(&dev_priv->drm, !state))
return;
state->acquire_ctx = &ctx;
retry:
- to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
- to_intel_atomic_state(state)->cdclk.force_min_cdclk =
- enable ? 2 * 96000 : 0;
-
- /* Protects dev_priv->cdclk.force_min_cdclk */
- ret = intel_atomic_lock_global_state(to_intel_atomic_state(state));
- if (!ret)
- ret = drm_atomic_commit(state);
-
+ ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc,
+ enable);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
- WARN_ON(ret);
+ drm_WARN_ON(&dev_priv->drm, ret);
drm_atomic_state_put(state);
@@ -850,9 +892,11 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
if (dev_priv->audio_power_refcount++ == 0) {
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
- I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
- DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
- dev_priv->audio_freq_cntrl);
+ intel_de_write(dev_priv, AUD_FREQ_CNTRL,
+ dev_priv->audio_freq_cntrl);
+ drm_dbg_kms(&dev_priv->drm,
+ "restored AUD_FREQ_CNTRL to 0x%x\n",
+ dev_priv->audio_freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@@ -860,9 +904,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
glk_force_audio_cdclk(dev_priv, true);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE(AUD_PIN_BUF_CTL,
- (I915_READ(AUD_PIN_BUF_CTL) |
- AUD_PIN_BUF_ENABLE));
+ intel_de_write(dev_priv, AUD_PIN_BUF_CTL,
+ (intel_de_read(dev_priv, AUD_PIN_BUF_CTL) | AUD_PIN_BUF_ENABLE));
}
return ret;
@@ -897,15 +940,15 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
*/
- tmp = I915_READ(HSW_AUD_CHICKENBIT);
+ tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
- I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+ intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
if (enable) {
- tmp = I915_READ(HSW_AUD_CHICKENBIT);
+ tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
- I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+ intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
}
@@ -917,7 +960,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
return -ENODEV;
return dev_priv->cdclk.hw.cdclk;
@@ -940,7 +983,8 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
/* MST */
if (pipe >= 0) {
- if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
return NULL;
encoder = dev_priv->av_enc_map[pipe];
@@ -992,7 +1036,8 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* 1. get the pipe */
encoder = get_saved_enc(dev_priv, port, pipe);
if (!encoder || !encoder->base.crtc) {
- DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ port_name(port));
err = -ENODEV;
goto unlock;
}
@@ -1023,7 +1068,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
- DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ port_name(port));
mutex_unlock(&dev_priv->av_mutex);
return ret;
}
@@ -1057,10 +1103,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
int i;
- if (WARN_ON(acomp->base.ops || acomp->base.dev))
+ if (drm_WARN_ON(&dev_priv->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
- if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !device_link_add(hda_kdev, i915_kdev,
+ DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(&dev_priv->drm);
@@ -1119,15 +1167,18 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
&i915_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
- DRM_ERROR("failed to add audio component (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
- dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
- DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
- dev_priv->audio_freq_cntrl);
+ dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
+ AUD_FREQ_CNTRL);
+ drm_dbg_kms(&dev_priv->drm,
+ "init value of AUD_FREQ_CNTRL of 0x%x\n",
+ dev_priv->audio_freq_cntrl);
}
dev_priv->audio_component_registered = true;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index ef4017a1baba..839124647202 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_dp_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_display.h"
#include "display/intel_display_types.h"
@@ -228,17 +227,20 @@ parse_panel_options(struct drm_i915_private *dev_priv,
ret = intel_opregion_get_panel_type(dev_priv);
if (ret >= 0) {
- WARN_ON(ret > 0xf);
+ drm_WARN_ON(&dev_priv->drm, ret > 0xf);
panel_type = ret;
- DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+ drm_dbg_kms(&dev_priv->drm, "Panel type: %d (OpRegion)\n",
+ panel_type);
} else {
if (lvds_options->panel_type > 0xf) {
- DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
- lvds_options->panel_type);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT panel type 0x%x\n",
+ lvds_options->panel_type);
return;
}
panel_type = lvds_options->panel_type;
- DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+ drm_dbg_kms(&dev_priv->drm, "Panel type: %d (VBT)\n",
+ panel_type);
}
dev_priv->vbt.panel_type = panel_type;
@@ -253,15 +255,17 @@ parse_panel_options(struct drm_i915_private *dev_priv,
switch (drrs_mode) {
case 0:
dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
- DRM_DEBUG_KMS("DRRS supported mode is static\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS supported mode is static\n");
break;
case 2:
dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
- DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS supported mode is seamless\n");
break;
default:
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
- DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS not supported (VBT input)\n");
break;
}
}
@@ -298,7 +302,8 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found panel mode in BIOS VBT legacy lfp table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -309,8 +314,9 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
- DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
- dev_priv->vbt.bios_lvds_val);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT initial LVDS value %x\n",
+ dev_priv->vbt.bios_lvds_val);
}
}
}
@@ -329,20 +335,22 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
return;
if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
- DRM_ERROR("GDTD size %u is too small.\n",
- generic_dtd->gdtd_size);
+ drm_err(&dev_priv->drm, "GDTD size %u is too small.\n",
+ generic_dtd->gdtd_size);
return;
} else if (generic_dtd->gdtd_size !=
sizeof(struct generic_dtd_entry)) {
- DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size);
+ drm_err(&dev_priv->drm, "Unexpected GDTD size %u\n",
+ generic_dtd->gdtd_size);
/* DTD has unknown fields, but keep going */
}
num_dtd = (get_blocksize(generic_dtd) -
sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
if (dev_priv->vbt.panel_type >= num_dtd) {
- DRM_ERROR("Panel type %d not found in table of %d DTD's\n",
- dev_priv->vbt.panel_type, num_dtd);
+ drm_err(&dev_priv->drm,
+ "Panel type %d not found in table of %d DTD's\n",
+ dev_priv->vbt.panel_type, num_dtd);
return;
}
@@ -385,7 +393,8 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found panel mode in BIOS VBT generic dtd table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
@@ -422,8 +431,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return;
if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
- DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
- backlight_data->entry_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported backlight data entry size %u\n",
+ backlight_data->entry_size);
return;
}
@@ -431,8 +441,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!dev_priv->vbt.backlight.present) {
- DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
- entry->type);
+ drm_dbg_kms(&dev_priv->drm,
+ "PWM backlight not present in VBT (type %u)\n",
+ entry->type);
return;
}
@@ -449,13 +460,14 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
- DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
- "active %s, min brightness %u, level %u, controller %u\n",
- dev_priv->vbt.backlight.pwm_freq_hz,
- dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
- dev_priv->vbt.backlight.min_brightness,
- backlight_data->level[panel_type],
- dev_priv->vbt.backlight.controller);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT backlight PWM modulation frequency %u Hz, "
+ "active %s, min brightness %u, level %u, controller %u\n",
+ dev_priv->vbt.backlight.pwm_freq_hz,
+ dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+ dev_priv->vbt.backlight.min_brightness,
+ backlight_data->level[panel_type],
+ dev_priv->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
@@ -469,7 +481,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
index = i915_modparams.vbt_sdvo_panel_type;
if (index == -2) {
- DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
}
@@ -495,7 +508,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
}
@@ -540,13 +554,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
} else {
dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- dev_priv->vbt.int_tv_support,
- dev_priv->vbt.int_crt_support,
- dev_priv->vbt.lvds_use_ssc,
- dev_priv->vbt.lvds_ssc_freq,
- dev_priv->vbt.display_clock_mode,
- dev_priv->vbt.fdi_rx_polarity_inverted);
+ drm_dbg_kms(&dev_priv->drm,
+ "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+ dev_priv->vbt.int_tv_support,
+ dev_priv->vbt.int_crt_support,
+ dev_priv->vbt.lvds_use_ssc,
+ dev_priv->vbt.lvds_ssc_freq,
+ dev_priv->vbt.display_clock_mode,
+ dev_priv->vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -568,7 +583,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
* accurate and doesn't have to be, as long as it's not too strict.
*/
if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
- DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
+ drm_dbg_kms(&dev_priv->drm, "Skipping SDVO device mapping\n");
return;
}
@@ -586,14 +601,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
if (child->dvo_port != DEVICE_PORT_DVOB &&
child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Incorrect SDVO port. Skip it\n");
continue;
}
- DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
- " %s port\n",
- child->slave_addr,
- (child->dvo_port == DEVICE_PORT_DVOB) ?
- "SDVOB" : "SDVOC");
+ drm_dbg_kms(&dev_priv->drm,
+ "the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ child->slave_addr,
+ (child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
@@ -602,28 +619,30 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
mapping->ddc_pin = child->ddc_pin;
mapping->i2c_pin = child->i2c_pin;
mapping->initialized = 1;
- DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
- mapping->dvo_port,
- mapping->slave_addr,
- mapping->dvo_wiring,
- mapping->ddc_pin,
- mapping->i2c_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ mapping->dvo_port, mapping->slave_addr,
+ mapping->dvo_wiring, mapping->ddc_pin,
+ mapping->i2c_pin);
} else {
- DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
- "two SDVO device.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
}
if (child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
- " is a SDVO device with multiple inputs.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
}
count++;
}
if (!count) {
/* No SDVO device info is found */
- DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No SDVO device info is found in VBT\n");
}
}
@@ -664,7 +683,8 @@ parse_driver_features(struct drm_i915_private *dev_priv,
}
if (bdb->version < 228) {
- DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ drm_dbg_kms(&dev_priv->drm, "DRRS State Enabled:%d\n",
+ driver->drrs_enabled);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
* This is because, VBT is configured in such a way that
@@ -688,7 +708,7 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
if (bdb->version < 228)
return;
- power = find_section(bdb, BDB_LVDS_POWER);
+ power = find_section(bdb, BDB_LFP_POWER);
if (!power)
return;
@@ -742,8 +762,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
- edp_link_params->rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP link rate value %u\n",
+ edp_link_params->rate);
break;
}
@@ -758,8 +779,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.lanes = 4;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
- edp_link_params->lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP lane count value %u\n",
+ edp_link_params->lanes);
break;
}
@@ -777,8 +799,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
- edp_link_params->preemphasis);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP pre-emphasis value %u\n",
+ edp_link_params->preemphasis);
break;
}
@@ -796,8 +819,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
- edp_link_params->vswing);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP voltage swing value %u\n",
+ edp_link_params->vswing);
break;
}
@@ -824,7 +848,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
psr = find_section(bdb, BDB_PSR);
if (!psr) {
- DRM_DEBUG_KMS("No PSR BDB found.\n");
+ drm_dbg_kms(&dev_priv->drm, "No PSR BDB found.\n");
return;
}
@@ -851,8 +875,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
- psr_table->lines_to_wait);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown PSR lines to wait %u\n",
+ psr_table->lines_to_wait);
break;
}
@@ -874,8 +899,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
- DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
- psr_table->tp1_wakeup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp1_wakeup_time);
/* fallthrough */
case 2:
dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
@@ -893,8 +919,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
- DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
- psr_table->tp2_tp3_wakeup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp2_tp3_wakeup_time);
/* fallthrough */
case 2:
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
@@ -1000,12 +1027,12 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
*/
start = find_section(bdb, BDB_MIPI_CONFIG);
if (!start) {
- DRM_DEBUG_KMS("No MIPI config BDB found");
+ drm_dbg_kms(&dev_priv->drm, "No MIPI config BDB found");
return;
}
- DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
- panel_type);
+ drm_dbg(&dev_priv->drm, "Found MIPI Config block, panel index = %d\n",
+ panel_type);
/*
* get hold of the correct configuration block and pps data as per
@@ -1220,7 +1247,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
int index, len;
- if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !data || dev_priv->vbt.dsi.seq_version != 1))
return 0;
/* index = 1 to skip sequence byte */
@@ -1273,7 +1301,8 @@ static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
if (!len)
return;
- DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Using init OTP fragment to deassert reset\n");
/* Copy the fragment, update seq byte and terminate it */
init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
@@ -1308,18 +1337,21 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
if (!sequence) {
- DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No MIPI Sequence found, parsing complete\n");
return;
}
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 4) {
- DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
- sequence->version);
+ drm_err(&dev_priv->drm,
+ "Unable to parse MIPI Sequence Block v%u\n",
+ sequence->version);
return;
}
- DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
+ drm_dbg(&dev_priv->drm, "Found MIPI sequence block v%u\n",
+ sequence->version);
seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
if (!seq_data)
@@ -1336,13 +1368,15 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
break;
if (seq_id >= MIPI_SEQ_MAX) {
- DRM_ERROR("Unknown sequence %u\n", seq_id);
+ drm_err(&dev_priv->drm, "Unknown sequence %u\n",
+ seq_id);
goto err;
}
/* Log about presence of sequences we won't run. */
if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
- DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported sequence %u\n", seq_id);
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
@@ -1351,7 +1385,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
else
index = goto_next_sequence(data, index, seq_size);
if (!index) {
- DRM_ERROR("Invalid sequence %u\n", seq_id);
+ drm_err(&dev_priv->drm, "Invalid sequence %u\n",
+ seq_id);
goto err;
}
}
@@ -1362,7 +1397,7 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
fixup_mipi_sequences(dev_priv);
- DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
+ drm_dbg(&dev_priv->drm, "MIPI related VBT parsing complete\n");
return;
err:
@@ -1387,13 +1422,15 @@ parse_compression_parameters(struct drm_i915_private *i915,
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
- DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: unsupported compression param entry size\n");
return;
}
block_size = get_blocksize(params);
if (block_size < sizeof(*params)) {
- DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: expected 16 compression param entries\n");
return;
}
}
@@ -1405,12 +1442,14 @@ parse_compression_parameters(struct drm_i915_private *i915,
continue;
if (!params) {
- DRM_DEBUG_KMS("VBT: compression params not available\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: compression params not available\n");
continue;
}
if (child->compression_method_cps) {
- DRM_DEBUG_KMS("VBT: CPS compression not supported\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: CPS compression not supported\n");
continue;
}
@@ -1458,10 +1497,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
if (p != PORT_NONE) {
- DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
- "disabling port %c DVI/HDMI support\n",
- port_name(port), info->alternate_ddc_pin,
- port_name(p), port_name(p));
+ drm_dbg_kms(&dev_priv->drm,
+ "port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(port), info->alternate_ddc_pin,
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedly sharing the
@@ -1509,10 +1549,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
if (p != PORT_NONE) {
- DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
- "disabling port %c DP support\n",
- port_name(port), info->alternate_aux_channel,
- port_name(p), port_name(p));
+ drm_dbg_kms(&dev_priv->drm,
+ "port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(port), info->alternate_aux_channel,
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedlt sharing the
@@ -1572,8 +1613,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
return ddc_pin_map[vbt_pin];
- DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
- vbt_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
return 0;
}
@@ -1624,8 +1666,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info = &dev_priv->vbt.ddi_port_info[port];
if (info->child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
return;
}
@@ -1636,8 +1679,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
- DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
- is_hdmi ? "/HDMI" : "");
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
is_dvi = false;
is_hdmi = false;
}
@@ -1653,11 +1697,12 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
- DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
- port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
- HAS_LSPCON(dev_priv) && child->lspcon,
- info->supports_typec_usb, info->supports_tbt,
- devdata->dsc != NULL);
+ drm_dbg_kms(&dev_priv->drm,
+ "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
+ HAS_LSPCON(dev_priv) && child->lspcon,
+ info->supports_typec_usb, info->supports_tbt,
+ devdata->dsc != NULL);
if (is_dvi) {
u8 ddc_pin;
@@ -1667,9 +1712,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info->alternate_ddc_pin = ddc_pin;
sanitize_ddc_pin(dev_priv, port);
} else {
- DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
- "sticking to defaults\n",
- port_name(port), ddc_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "Port %c has invalid DDC pin %d, "
+ "sticking to defaults\n",
+ port_name(port), ddc_pin);
}
}
@@ -1682,9 +1728,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
u8 hdmi_level_shift = child->hdmi_level_shifter_value;
- DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
- port_name(port),
- hdmi_level_shift);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI level shift for port %c: %d\n",
+ port_name(port),
+ hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
info->hdmi_level_shift_set = true;
}
@@ -1708,19 +1755,22 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
}
if (max_tmds_clock)
- DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
- port_name(port), max_tmds_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI max TMDS clock for port %c: %d kHz\n",
+ port_name(port), max_tmds_clock);
info->max_tmds_clock = max_tmds_clock;
}
/* Parse the I_boost config for SKL and above */
if (bdb_version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level);
- DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
- port_name(port), info->dp_boost_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT (e)DP boost level for port %c: %d\n",
+ port_name(port), info->dp_boost_level);
info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
- DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
- port_name(port), info->hdmi_boost_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI boost level for port %c: %d\n",
+ port_name(port), info->hdmi_boost_level);
}
/* DP max link rate for CNL+ */
@@ -1740,8 +1790,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info->dp_max_link_rate = 162000;
break;
}
- DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
- port_name(port), info->dp_max_link_rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT DP max link rate for port %c: %d\n",
+ port_name(port), info->dp_max_link_rate);
}
info->child = child;
@@ -1775,19 +1826,21 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!defs) {
- DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No general definition block is found, no devices defined.\n");
return;
}
block_size = get_blocksize(defs);
if (block_size < sizeof(*defs)) {
- DRM_DEBUG_KMS("General definitions block too small (%u)\n",
- block_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "General definitions block too small (%u)\n",
+ block_size);
return;
}
bus_pin = defs->crt_ddc_gmbus_pin;
- DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ drm_dbg_kms(&dev_priv->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
dev_priv->vbt.crt_ddc_pin = bus_pin;
@@ -1806,19 +1859,22 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
} else {
expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 39);
- DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
- bdb->version, expected_size);
+ drm_dbg(&dev_priv->drm,
+ "Expected child device config size for VBT version %u not known; assuming %u\n",
+ bdb->version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (defs->child_dev_size != expected_size)
- DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, bdb->version);
+ drm_err(&dev_priv->drm,
+ "Unexpected child device config size %u (expected %u for VBT version %u)\n",
+ defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- DRM_DEBUG_KMS("Child device config size %u is too small.\n",
- defs->child_dev_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Child device config size %u is too small.\n",
+ defs->child_dev_size);
return;
}
@@ -1830,8 +1886,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (!child->device_type)
continue;
- DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
- child->device_type);
+ drm_dbg_kms(&dev_priv->drm,
+ "Found VBT child device with type 0x%x\n",
+ child->device_type);
devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
if (!devdata)
@@ -1849,7 +1906,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
}
if (list_empty(&dev_priv->vbt.display_devices))
- DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
@@ -1882,7 +1940,8 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
*/
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
!HAS_PCH_SPLIT(dev_priv));
- DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
+ drm_dbg_kms(&dev_priv->drm, "Set default to SSC at %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
}
/* Defaults to initialize only if there is no VBT. */
@@ -1992,13 +2051,14 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
goto err_unmap_oprom;
if (sizeof(struct vbt_header) > size) {
- DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ drm_dbg(&dev_priv->drm, "VBT header incomplete\n");
goto err_unmap_oprom;
}
vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
if (vbt_size > size) {
- DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ drm_dbg(&dev_priv->drm,
+ "VBT incomplete (vbt_size overflows)\n");
goto err_unmap_oprom;
}
@@ -2041,7 +2101,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
- DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping VBT init due to disabled display.\n");
return;
}
@@ -2055,13 +2116,14 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
vbt = oprom_vbt;
- DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
+ drm_dbg_kms(&dev_priv->drm, "Found valid VBT in PCI ROM\n");
}
bdb = get_bdb_header(vbt);
- DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, bdb->version);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT signature \"%.*s\", BDB version %d\n",
+ (int)sizeof(vbt->signature), vbt->signature, bdb->version);
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
@@ -2086,7 +2148,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
out:
if (!vbt) {
- DRM_INFO("Failed to find VBIOS tables (VBT)\n");
+ drm_info(&dev_priv->drm,
+ "Failed to find VBIOS tables (VBT)\n");
init_vbt_missing_defaults(dev_priv);
}
@@ -2238,13 +2301,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
const struct ddi_vbt_port_info *port_info =
&dev_priv->vbt.ddi_port_info[port];
- return port_info->supports_dp ||
- port_info->supports_dvi ||
- port_info->supports_hdmi;
+ return port_info->child;
}
/* FIXME maybe deal with port A as well? */
- if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ if (drm_WARN_ON(&dev_priv->drm,
+ port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
@@ -2373,8 +2435,9 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
} else if (dvo_port == DVO_PORT_MIPIB ||
dvo_port == DVO_PORT_MIPIC ||
dvo_port == DVO_PORT_MIPID) {
- DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
- port_name(dvo_port - DVO_PORT_MIPIA));
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unsupported DSI port %c\n",
+ port_name(dvo_port - DVO_PORT_MIPIA));
}
}
@@ -2493,7 +2556,7 @@ intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
const struct child_device_config *child =
i915->vbt.ddi_port_info[port].child;
- if (WARN_ON_ONCE(!IS_GEN9_LP(i915)))
+ if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEN9_LP(i915)))
return false;
return child && child->hpd_invert;
@@ -2526,8 +2589,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
if (!info->alternate_aux_channel) {
aux_ch = (enum aux_ch)port;
- DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "using AUX %c for port %c (platform default)\n",
+ aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}
@@ -2559,8 +2623,78 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
break;
}
- DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "using AUX %c for port %c (VBT)\n",
+ aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}
+
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].max_tmds_clock;
+}
+
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct ddi_vbt_port_info *info =
+ &i915->vbt.ddi_port_info[encoder->port];
+
+ return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1;
+}
+
+int intel_bios_dp_boost_level(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].dp_boost_level;
+}
+
+int intel_bios_hdmi_boost_level(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].hdmi_boost_level;
+}
+
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].dp_max_link_rate;
+}
+
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin;
+}
+
+bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_dvi;
+}
+
+bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_hdmi;
+}
+
+bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_dp;
+}
+
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915,
+ enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_typec_usb;
+}
+
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_tbt;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index d6a0c29d37ac..e29e79faa01b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,8 +32,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
@@ -247,5 +245,16 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
+int intel_bios_dp_boost_level(struct intel_encoder *encoder);
+int intel_bios_hdmi_boost_level(struct intel_encoder *encoder);
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
+bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index b228671d5a5d..58b264bc318d 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -122,7 +122,8 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
@@ -132,9 +133,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
- i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
- sp->t_rcd, sp->t_rc);
+ drm_dbg_kms(&dev_priv->drm,
+ "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
+ i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
+ sp->t_rcd, sp->t_rc);
}
return 0;
@@ -187,7 +189,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
ret = icl_get_qgv_points(dev_priv, &qi);
if (ret) {
- DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
num_channels = qi.num_channels;
@@ -228,8 +231,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * 9 / 10); /* 90% */
- DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
- i, j, bi->num_planes, bi->deratedbw[j]);
+ drm_dbg_kms(&dev_priv->drm,
+ "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
+ i, j, bi->num_planes, bi->deratedbw[j]);
}
if (bi->num_planes == 1)
@@ -374,10 +378,9 @@ static struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_private_state *bw_state;
+ struct intel_global_state *bw_state;
- bw_state = drm_atomic_get_private_obj_state(&state->base,
- &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
@@ -392,7 +395,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int data_rate, max_data_rate;
unsigned int num_active_planes;
struct intel_crtc *crtc;
- int i;
+ int i, ret;
/* FIXME earlier gens need some checks too */
if (INTEL_GEN(dev_priv) < 11)
@@ -424,15 +427,20 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
bw_state->data_rate[crtc->pipe] = new_data_rate;
bw_state->num_active_planes[crtc->pipe] = new_active_planes;
- DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
}
if (!bw_state)
return 0;
+ ret = intel_atomic_lock_global_state(&bw_state->base);
+ if (ret)
+ return ret;
+
data_rate = intel_bw_data_rate(dev_priv, bw_state);
num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
@@ -441,15 +449,17 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
data_rate = DIV_ROUND_UP(data_rate, 1000);
if (data_rate > max_data_rate) {
- DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
- data_rate, max_data_rate, num_active_planes);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
+ data_rate, max_data_rate, num_active_planes);
return -EINVAL;
}
return 0;
}
-static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
+static struct intel_global_state *
+intel_bw_duplicate_state(struct intel_global_obj *obj)
{
struct intel_bw_state *state;
@@ -457,18 +467,16 @@ static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj
if (!state)
return NULL;
- __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
-
return &state->base;
}
-static void intel_bw_destroy_state(struct drm_private_obj *obj,
- struct drm_private_state *state)
+static void intel_bw_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
{
kfree(state);
}
-static const struct drm_private_state_funcs intel_bw_funcs = {
+static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_duplicate_state = intel_bw_duplicate_state,
.atomic_destroy_state = intel_bw_destroy_state,
};
@@ -481,13 +489,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
- &state->base, &intel_bw_funcs);
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
+ &state->base, &intel_bw_funcs);
return 0;
}
-
-void intel_bw_cleanup(struct drm_i915_private *dev_priv)
-{
- drm_atomic_private_obj_fini(&dev_priv->bw_obj);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 20b9ad241802..a8aa7624c5aa 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -9,13 +9,14 @@
#include <drm/drm_atomic.h>
#include "intel_display.h"
+#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc_state;
struct intel_bw_state {
- struct drm_private_state base;
+ struct intel_global_state base;
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
@@ -25,7 +26,6 @@ struct intel_bw_state {
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
-void intel_bw_cleanup(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0ce5926006ca..979a0241fdcb 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -55,43 +55,43 @@
*/
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
}
static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
}
static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
}
static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
}
static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 400000;
+ cdclk_config->cdclk = 400000;
}
static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
}
static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 hpllcc = 0;
@@ -102,7 +102,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
* FIXME is this the right way to detect 852GM/852GMV?
*/
if (pdev->revision == 0x1) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
@@ -116,24 +116,24 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
case GC_CLOCK_133_200:
case GC_CLOCK_133_200_2:
case GC_CLOCK_100_200:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
case GC_CLOCK_166_250:
- cdclk_state->cdclk = 250000;
+ cdclk_config->cdclk = 250000;
break;
case GC_CLOCK_100_133:
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
break;
case GC_CLOCK_133_266:
case GC_CLOCK_133_266_2:
case GC_CLOCK_166_266:
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
break;
}
}
static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -141,23 +141,23 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
pci_read_config_word(pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_320_MHZ:
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
break;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
- cdclk_state->cdclk = 190000;
+ cdclk_config->cdclk = 190000;
break;
}
}
static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -165,17 +165,17 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
pci_read_config_word(pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_320_MHZ:
- cdclk_state->cdclk = 320000;
+ cdclk_config->cdclk = 320000;
break;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
}
}
@@ -237,20 +237,21 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
else
return 0;
- tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
- HPLLVCO_MOBILE : HPLLVCO);
+ tmp = intel_de_read(dev_priv,
+ IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
- DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+ drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n",
+ tmp);
else
- DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+ drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco);
return vco;
}
static void g33_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
@@ -261,7 +262,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -270,7 +271,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
if (cdclk_sel >= ARRAY_SIZE(div_3200))
goto fail;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 3200000:
div_table = div_3200;
break;
@@ -287,18 +288,19 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
goto fail;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
- div_table[cdclk_sel]);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+ div_table[cdclk_sel]);
return;
fail:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 190476;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 190476;
}
static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -307,31 +309,32 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_267_MHZ_PNV:
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
break;
case GC_DISPLAY_CLOCK_333_MHZ_PNV:
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
break;
case GC_DISPLAY_CLOCK_444_MHZ_PNV:
- cdclk_state->cdclk = 444444;
+ cdclk_config->cdclk = 444444;
break;
case GC_DISPLAY_CLOCK_200_MHZ_PNV:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
default:
- DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ drm_err(&dev_priv->drm,
+ "Unknown pnv display core clock 0x%04x\n", gcfgc);
/* fall through */
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
break;
case GC_DISPLAY_CLOCK_167_MHZ_PNV:
- cdclk_state->cdclk = 166667;
+ cdclk_config->cdclk = 166667;
break;
}
}
static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
static const u8 div_3200[] = { 16, 10, 8 };
@@ -341,7 +344,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -350,7 +353,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
if (cdclk_sel >= ARRAY_SIZE(div_3200))
goto fail;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 3200000:
div_table = div_3200;
break;
@@ -364,62 +367,64 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
goto fail;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
- div_table[cdclk_sel]);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+ div_table[cdclk_sel]);
return;
fail:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 200000;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 200000;
}
static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
cdclk_sel = (tmp >> 12) & 0x1;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 2666667:
case 4000000:
case 5333333:
- cdclk_state->cdclk = cdclk_sel ? 333333 : 222222;
+ cdclk_config->cdclk = cdclk_sel ? 333333 : 222222;
break;
case 3200000:
- cdclk_state->cdclk = cdclk_sel ? 320000 : 228571;
+ cdclk_config->cdclk = cdclk_sel ? 320000 : 228571;
break;
default:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 222222;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 222222;
break;
}
}
static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
- cdclk_state->cdclk = 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 800000;
+ else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
else if (IS_HSW_ULT(dev_priv))
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
else
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
}
static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
@@ -462,17 +467,17 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
}
static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val;
vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
- cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
- CCK_DISPLAY_CLOCK_CONTROL,
- cdclk_state->vco);
+ cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
+ cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+ CCK_DISPLAY_CLOCK_CONTROL,
+ cdclk_config->vco);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -480,10 +485,10 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
if (IS_VALLEYVIEW(dev_priv))
- cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
+ cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
DSPFREQGUAR_SHIFT;
else
- cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
+ cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
DSPFREQGUAR_SHIFT_CHV;
}
@@ -510,25 +515,26 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
* WA - write default credits before re-programming
* FIXME: should we also set the resend bit here?
*/
- I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
- default_credits);
+ intel_de_write(dev_priv, GCI_CONTROL,
+ VGA_FAST_MODE_DISABLE | default_credits);
- I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
- credits | PFI_CREDIT_RESEND);
+ intel_de_write(dev_priv, GCI_CONTROL,
+ VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND);
/*
* FIXME is this guaranteed to clear
* immediately or should we poll for it?
*/
- WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
}
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- u32 val, cmd = cdclk_state->voltage_level;
+ int cdclk = cdclk_config->cdclk;
+ u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
switch (cdclk) {
@@ -563,7 +569,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
if (cdclk == 400000) {
@@ -581,7 +588,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
50))
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
/* adjust self-refresh exit latency value */
@@ -611,11 +619,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- u32 val, cmd = cdclk_state->voltage_level;
+ int cdclk = cdclk_config->cdclk;
+ u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
switch (cdclk) {
@@ -645,7 +653,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
vlv_punit_put(dev_priv);
@@ -685,68 +694,70 @@ static u8 bdw_calc_voltage_level(int cdclk)
}
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
- cdclk_state->cdclk = 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 800000;
+ else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_54O_BDW)
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
else
- cdclk_state->cdclk = 675000;
+ cdclk_config->cdclk = 675000;
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- bdw_calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ bdw_calc_voltage_level(cdclk_config->cdclk);
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
+ int cdclk = cdclk_config->cdclk;
u32 val;
int ret;
- if (WARN((I915_READ(LCPLL_CTL) &
- (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
- LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
- LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
- LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
- "trying to change cdclk frequency with cdclk not enabled\n"))
+ if (drm_WARN(&dev_priv->drm,
+ (intel_de_read(dev_priv, LCPLL_CTL) &
+ (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+ LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+ LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+ LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+ "trying to change cdclk frequency with cdclk not enabled\n"))
return;
ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
- DRM_ERROR("failed to inform pcode about cdclk change\n");
+ drm_err(&dev_priv->drm,
+ "failed to inform pcode about cdclk change\n");
return;
}
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
/*
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- if (wait_for_us(I915_READ(LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 100))
- DRM_ERROR("Switching to FCLK failed\n");
+ drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CLK_FREQ_MASK;
switch (cdclk) {
@@ -767,20 +778,21 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
break;
}
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
+ if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
- I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+ intel_de_write(dev_priv, CDCLK_FREQ,
+ DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
intel_update_cdclk(dev_priv);
}
@@ -821,26 +833,27 @@ static u8 skl_calc_voltage_level(int cdclk)
}
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val;
- cdclk_state->ref = 24000;
- cdclk_state->vco = 0;
+ cdclk_config->ref = 24000;
+ cdclk_config->vco = 0;
- val = I915_READ(LCPLL1_CTL);
+ val = intel_de_read(dev_priv, LCPLL1_CTL);
if ((val & LCPLL_PLL_ENABLE) == 0)
return;
- if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+ if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0))
return;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
- if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
- DPLL_CTRL1_SSC(SKL_DPLL0) |
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
return;
switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
@@ -848,11 +861,11 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
- cdclk_state->vco = 8100000;
+ cdclk_config->vco = 8100000;
break;
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
- cdclk_state->vco = 8640000;
+ cdclk_config->vco = 8640000;
break;
default:
MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
@@ -861,32 +874,32 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
}
static void skl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 cdctl;
- skl_dpll0_update(dev_priv, cdclk_state);
+ skl_dpll0_update(dev_priv, cdclk_config);
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+ cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref;
- if (cdclk_state->vco == 0)
+ if (cdclk_config->vco == 0)
goto out;
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
- if (cdclk_state->vco == 8640000) {
+ if (cdclk_config->vco == 8640000) {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
- cdclk_state->cdclk = 432000;
+ cdclk_config->cdclk = 432000;
break;
case CDCLK_FREQ_337_308:
- cdclk_state->cdclk = 308571;
+ cdclk_config->cdclk = 308571;
break;
case CDCLK_FREQ_540:
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
break;
case CDCLK_FREQ_675_617:
- cdclk_state->cdclk = 617143;
+ cdclk_config->cdclk = 617143;
break;
default:
MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
@@ -895,16 +908,16 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
} else {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
break;
case CDCLK_FREQ_337_308:
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
break;
case CDCLK_FREQ_540:
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
break;
case CDCLK_FREQ_675_617:
- cdclk_state->cdclk = 675000;
+ cdclk_config->cdclk = 675000;
break;
default:
MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
@@ -917,8 +930,8 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- skl_calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ skl_calc_voltage_level(cdclk_config->cdclk);
}
/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
@@ -942,7 +955,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
u32 val;
- WARN_ON(vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
* We always enable DPLL0 with the lowest link rate possible, but still
@@ -953,7 +966,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
* rate later on, with the constraint of choosing a frequency that
* works with vco.
*/
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
@@ -965,13 +978,14 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
SKL_DPLL0);
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
+ intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_posting_read(dev_priv, DPLL_CTRL1);
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, LCPLL1_CTL,
+ intel_de_read(dev_priv, LCPLL1_CTL) | LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
- DRM_ERROR("DPLL0 not locked\n");
+ drm_err(&dev_priv->drm, "DPLL0 not locked\n");
dev_priv->cdclk.hw.vco = vco;
@@ -981,19 +995,20 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, LCPLL1_CTL,
+ intel_de_read(dev_priv, LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
- DRM_ERROR("Couldn't disable DPLL0\n");
+ drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
u32 freq_select, cdclk_ctl;
int ret;
@@ -1005,23 +1020,25 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
* use the corresponding VCO freq as that always leads to using the
* minimum 308MHz CDCLK.
*/
- WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ IS_SKYLAKE(dev_priv) && vco == 8640000);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (%d)\n", ret);
return;
}
/* Choose frequency for this cdclk */
switch (cdclk) {
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
/* fall through */
case 308571:
case 337500:
@@ -1044,38 +1061,38 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
- cdclk_ctl = I915_READ(CDCLK_CTL);
+ cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
if (dev_priv->cdclk.hw.vco != vco) {
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
}
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
- POSTING_READ(CDCLK_CTL);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(dev_priv, CDCLK_CTL);
if (dev_priv->cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
- POSTING_READ(CDCLK_CTL);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(dev_priv, CDCLK_CTL);
/* inform PCU of the change */
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
intel_update_cdclk(dev_priv);
}
@@ -1089,11 +1106,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* There is SWF18 scratchpad register defined which is set by the
* pre-os which can be used by the OS drivers to check the status
*/
- if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
+ if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk.hw.vco == 0 ||
@@ -1106,7 +1123,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
if (cdctl == expected)
@@ -1114,7 +1131,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
return;
sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
dev_priv->cdclk.hw.cdclk = 0;
@@ -1122,9 +1139,9 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-static void skl_init_cdclk(struct drm_i915_private *dev_priv)
+static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state;
+ struct intel_cdclk_config cdclk_config;
skl_sanitize_cdclk(dev_priv);
@@ -1140,26 +1157,26 @@ static void skl_init_cdclk(struct drm_i915_private *dev_priv)
return;
}
- cdclk_state = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.vco = dev_priv->skl_preferred_vco_freq;
- if (cdclk_state.vco == 0)
- cdclk_state.vco = 8100000;
- cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
- cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
+ if (cdclk_config.vco == 0)
+ cdclk_config.vco = 8100000;
+ cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
+ cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
-static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = cdclk_config.bypass;
+ cdclk_config.vco = 0;
+ cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
static const struct intel_cdclk_vals bxt_cdclk_table[] = {
@@ -1223,8 +1240,9 @@ static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
table[i].cdclk >= min_cdclk)
return table[i].cdclk;
- WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
- min_cdclk, dev_priv->cdclk.hw.ref);
+ drm_WARN(&dev_priv->drm, 1,
+ "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->cdclk.hw.ref);
return 0;
}
@@ -1241,8 +1259,8 @@ static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
table[i].cdclk == cdclk)
return dev_priv->cdclk.hw.ref * table[i].ratio;
- WARN(1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
return 0;
}
@@ -1283,56 +1301,68 @@ static u8 ehl_calc_voltage_level(int cdclk)
return 0;
}
+static u8 tgl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 3;
+ else if (cdclk > 326400)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
+
static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
- cdclk_state->ref = 24000;
+ if (intel_de_read(dev_priv, SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+ cdclk_config->ref = 24000;
else
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
}
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
+ u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
switch (dssm) {
default:
MISSING_CASE(dssm);
/* fall through */
case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
- cdclk_state->ref = 24000;
+ cdclk_config->ref = 24000;
break;
case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
break;
case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
- cdclk_state->ref = 38400;
+ cdclk_config->ref = 38400;
break;
}
}
static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val, ratio;
if (INTEL_GEN(dev_priv) >= 11)
- icl_readout_refclk(dev_priv, cdclk_state);
+ icl_readout_refclk(dev_priv, cdclk_config);
else if (IS_CANNONLAKE(dev_priv))
- cnl_readout_refclk(dev_priv, cdclk_state);
+ cnl_readout_refclk(dev_priv, cdclk_config);
else
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
- val = I915_READ(BXT_DE_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
(val & BXT_DE_PLL_LOCK) == 0) {
/*
* CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
* setting it to zero is a way to signal that.
*/
- cdclk_state->vco = 0;
+ cdclk_config->vco = 0;
return;
}
@@ -1343,47 +1373,49 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 10)
ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
else
- ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+ ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- cdclk_state->vco = ratio * cdclk_state->ref;
+ cdclk_config->vco = ratio * cdclk_config->ref;
}
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 divider;
int div;
- bxt_de_pll_readout(dev_priv, cdclk_state);
+ bxt_de_pll_readout(dev_priv, cdclk_config);
if (INTEL_GEN(dev_priv) >= 12)
- cdclk_state->bypass = cdclk_state->ref / 2;
+ cdclk_config->bypass = cdclk_config->ref / 2;
else if (INTEL_GEN(dev_priv) >= 11)
- cdclk_state->bypass = 50000;
+ cdclk_config->bypass = 50000;
else
- cdclk_state->bypass = cdclk_state->ref;
+ cdclk_config->bypass = cdclk_config->ref;
- if (cdclk_state->vco == 0) {
- cdclk_state->cdclk = cdclk_state->bypass;
+ if (cdclk_config->vco == 0) {
+ cdclk_config->cdclk = cdclk_config->bypass;
goto out;
}
- divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+ divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
switch (divider) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
- "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm,
+ IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 8;
break;
default:
@@ -1391,25 +1423,25 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
return;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
out:
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
if (intel_de_wait_for_clear(dev_priv,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL unlock\n");
+ drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
}
@@ -1419,17 +1451,17 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
u32 val;
- val = I915_READ(BXT_DE_PLL_CTL);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_CTL);
val &= ~BXT_DE_PLL_RATIO_MASK;
val |= BXT_DE_PLL_RATIO(ratio);
- I915_WRITE(BXT_DE_PLL_CTL, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_CTL, val);
- I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
if (intel_de_wait_for_set(dev_priv,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL lock\n");
+ drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
}
@@ -1438,13 +1470,14 @@ static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(BXT_DE_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
val &= ~BXT_DE_PLL_PLL_ENABLE;
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
- DRM_ERROR("timeout waiting for CDCLK PLL unlock\n");
+ if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
+ drm_err(&dev_priv->drm,
+ "timeout waiting for CDCLK PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
}
@@ -1455,14 +1488,15 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
u32 val;
val = CNL_CDCLK_PLL_RATIO(ratio);
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
val |= BXT_DE_PLL_PLL_ENABLE;
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
- DRM_ERROR("timeout waiting for CDCLK PLL lock\n");
+ if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
+ drm_err(&dev_priv->drm,
+ "timeout waiting for CDCLK PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
}
@@ -1488,11 +1522,11 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
}
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
u32 val, divider;
int ret;
@@ -1512,30 +1546,34 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
0x80000000, 150, 2);
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n",
- ret, cdclk);
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
/* fall through */
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
case 3:
- WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
- "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm,
+ IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 8:
- WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
}
@@ -1566,14 +1604,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
*/
if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- I915_WRITE(CDCLK_CTL, val);
+ intel_de_write(dev_priv, CDCLK_CTL, val);
if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe);
if (INTEL_GEN(dev_priv) >= 10) {
ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
} else {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -1583,13 +1621,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
*/
ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level,
+ cdclk_config->voltage_level,
150, 2);
}
if (ret) {
- DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, cdclk);
+ drm_err(&dev_priv->drm,
+ "PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
@@ -1600,7 +1639,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+ dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level;
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
@@ -1609,7 +1648,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
int cdclk, vco;
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
@@ -1621,7 +1660,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
* set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
* so sanitize this register.
*/
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
/*
* Let's ignore the pipe field, since BIOS could have configured the
* dividers both synching to an active pipe, or asynchronously
@@ -1672,7 +1711,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
return;
sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
dev_priv->cdclk.hw.cdclk = 0;
@@ -1681,9 +1720,9 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state;
+ struct intel_cdclk_config cdclk_config;
bxt_sanitize_cdclk(dev_priv);
@@ -1691,35 +1730,35 @@ static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
dev_priv->cdclk.hw.vco != 0)
return;
- cdclk_state = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->cdclk.hw;
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
*/
- cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0);
- cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
- cdclk_state.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
+ cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
+ cdclk_config.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = cdclk_config.bypass;
+ cdclk_config.vco = 0;
+ cdclk_config.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
/**
- * intel_cdclk_init - Initialize CDCLK
+ * intel_cdclk_init_hw - Initialize CDCLK hardware
* @i915: i915 device
*
* Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
@@ -1727,39 +1766,41 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
* during the display core initialization sequence, after which the DMC will
* take care of turning CDCLK off/on as needed.
*/
-void intel_cdclk_init(struct drm_i915_private *i915)
+void intel_cdclk_init_hw(struct drm_i915_private *i915)
{
if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
- bxt_init_cdclk(i915);
+ bxt_cdclk_init_hw(i915);
else if (IS_GEN9_BC(i915))
- skl_init_cdclk(i915);
+ skl_cdclk_init_hw(i915);
}
/**
- * intel_cdclk_uninit - Uninitialize CDCLK
+ * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware
* @i915: i915 device
*
* Uninitialize CDCLK. This is done only during the display core
* uninitialization sequence.
*/
-void intel_cdclk_uninit(struct drm_i915_private *i915)
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
- bxt_uninit_cdclk(i915);
+ bxt_cdclk_uninit_hw(i915);
else if (IS_GEN9_BC(i915))
- skl_uninit_cdclk(i915);
+ skl_cdclk_uninit_hw(i915);
}
/**
- * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
+ * configurations requires a modeset on all pipes
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states require pipes to be off during reprogramming, false if not.
+ * True if changing between the two CDCLK configurations
+ * requires all pipes to be off, false if not.
*/
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
return a->cdclk != b->cdclk ||
a->vco != b->vco ||
@@ -1767,17 +1808,19 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
}
/**
- * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
- * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK
+ * configurations requires only a cd2x divider update
+ * @dev_priv: i915 device
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states require just a cd2x divider update, false if not.
+ * True if changing between the two CDCLK configurations
+ * can be done with just a cd2x divider update, false if not.
*/
-static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
/* Older hw doesn't have the capability */
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
@@ -1789,117 +1832,138 @@ static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
}
/**
- * intel_cdclk_changed - Determine if two CDCLK states are different
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_changed - Determine if two CDCLK configurations are different
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states don't match, false if they do.
+ * True if the CDCLK configurations don't match, false if they do.
*/
-static bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
return intel_cdclk_needs_modeset(a, b) ||
a->voltage_level != b->voltage_level;
}
-/**
- * intel_cdclk_swap_state - make atomic CDCLK configuration effective
- * @state: atomic state
- *
- * This is the CDCLK version of drm_atomic_helper_swap_state() since the
- * helper does not handle driver-specific global state.
- *
- * Similarly to the atomic helpers this function does a complete swap,
- * i.e. it also puts the old state into @state. This is used by the commit
- * code to determine how CDCLK has changed (for instance did it increase or
- * decrease).
- */
-void intel_cdclk_swap_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
- swap(state->cdclk.logical, dev_priv->cdclk.logical);
- swap(state->cdclk.actual, dev_priv->cdclk.actual);
-}
-
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context)
+void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
+ const char *context)
{
DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
- context, cdclk_state->cdclk, cdclk_state->vco,
- cdclk_state->ref, cdclk_state->bypass,
- cdclk_state->voltage_level);
+ context, cdclk_config->cdclk, cdclk_config->vco,
+ cdclk_config->ref, cdclk_config->bypass,
+ cdclk_config->voltage_level);
}
/**
- * intel_set_cdclk - Push the CDCLK state to the hardware
+ * intel_set_cdclk - Push the CDCLK configuration to the hardware
* @dev_priv: i915 device
- * @cdclk_state: new CDCLK state
+ * @cdclk_config: new CDCLK configuration
* @pipe: pipe with which to synchronize the update
*
* Program the hardware based on the passed in CDCLK state,
* if necessary.
*/
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
+ struct intel_encoder *encoder;
+
+ if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
return;
- if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk))
return;
- intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
+ intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
- dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
+ /*
+ * Lock aux/gmbus while we change cdclk in case those
+ * functions use cdclk. Not all platforms/ports do,
+ * but we'll lock them all for simplicity.
+ */
+ mutex_lock(&dev_priv->gmbus_mutex);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
+ &dev_priv->gmbus_mutex);
+ }
+
+ dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe);
- if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
- "cdclk state doesn't match!\n")) {
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
- intel_dump_cdclk_state(cdclk_state, "[sw state]");
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_unlock(&intel_dp->aux.hw_mutex);
+ }
+ mutex_unlock(&dev_priv->gmbus_mutex);
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+ "cdclk state doesn't match!\n")) {
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]");
+ intel_dump_cdclk_config(cdclk_config, "[sw state]");
}
}
/**
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
+ * @state: intel atomic state
*
- * Program the hardware before updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
+ * Program the hardware before updating the HW plane state based on the
+ * new CDCLK state, if necessary.
*/
void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe)
+intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
{
- if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
- intel_set_cdclk(dev_priv, new_state, pipe);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe = new_cdclk_state->pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+ return;
+
+ if (pipe == INVALID_PIPE ||
+ old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ }
}
/**
* intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
+ * @state: intel atomic state
*
- * Program the hardware after updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
+ * Program the hardware after updating the HW plane state based on the
+ * new CDCLK state, if necessary.
*/
void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe)
+intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
{
- if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
- intel_set_cdclk(dev_priv, new_state, pipe);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe = new_cdclk_state->pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+ return;
+
+ if (pipe != INVALID_PIPE &&
+ old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ }
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2017,25 +2081,24 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
if (min_cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
- min_cdclk, dev_priv->max_cdclk_freq);
+ drm_dbg_kms(&dev_priv->drm,
+ "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+ min_cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
enum pipe pipe;
- memcpy(state->min_cdclk, dev_priv->min_cdclk,
- sizeof(state->min_cdclk));
-
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret;
@@ -2043,19 +2106,19 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
if (min_cdclk < 0)
return min_cdclk;
- if (state->min_cdclk[i] == min_cdclk)
+ if (cdclk_state->min_cdclk[i] == min_cdclk)
continue;
- state->min_cdclk[i] = min_cdclk;
+ cdclk_state->min_cdclk[i] = min_cdclk;
- ret = intel_atomic_lock_global_state(state);
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
return ret;
}
- min_cdclk = state->cdclk.force_min_cdclk;
+ min_cdclk = cdclk_state->force_min_cdclk;
for_each_pipe(dev_priv, pipe)
- min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
return min_cdclk;
}
@@ -2073,8 +2136,9 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* future platforms this code will need to be
* adjusted.
*/
-static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
+static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
@@ -2082,9 +2146,6 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
int i;
enum pipe pipe;
- memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
- sizeof(state->min_voltage_level));
-
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret;
@@ -2093,57 +2154,58 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
else
min_voltage_level = 0;
- if (state->min_voltage_level[i] == min_voltage_level)
+ if (cdclk_state->min_voltage_level[i] == min_voltage_level)
continue;
- state->min_voltage_level[i] = min_voltage_level;
+ cdclk_state->min_voltage_level[i] = min_voltage_level;
- ret = intel_atomic_lock_global_state(state);
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
return ret;
}
min_voltage_level = 0;
for_each_pipe(dev_priv, pipe)
- min_voltage_level = max(state->min_voltage_level[pipe],
+ min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
min_voltage_level);
return min_voltage_level;
}
-static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!state->active_pipes) {
- cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk, cdclk;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
@@ -2153,31 +2215,32 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
*/
cdclk = bdw_calc_cdclk(min_cdclk);
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!state->active_pipes) {
- cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk);
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
bdw_calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int skl_dpll0_vco(struct intel_atomic_state *state)
+static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int vco, i;
- vco = state->cdclk.logical.vco;
+ vco = cdclk_state->logical.vco;
if (!vco)
vco = dev_priv->skl_preferred_vco_freq;
@@ -2206,15 +2269,15 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
return vco;
}
-static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk, cdclk, vco;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
- vco = skl_dpll0_vco(state);
+ vco = skl_dpll0_vco(cdclk_state);
/*
* FIXME should also account for plane ratio
@@ -2222,57 +2285,58 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
*/
cdclk = skl_calc_cdclk(min_cdclk, vco);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.vco = vco;
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!state->active_pipes) {
- cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
+ if (!cdclk_state->active_pipes) {
+ cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco);
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.vco = vco;
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
skl_calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, min_voltage_level, cdclk, vco;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
- min_voltage_level = bxt_compute_min_voltage_level(state);
+ min_voltage_level = bxt_compute_min_voltage_level(cdclk_state);
if (min_voltage_level < 0)
return min_voltage_level;
cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.vco = vco;
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
max_t(int, min_voltage_level,
dev_priv->display.calc_voltage_level(cdclk));
- if (!state->active_pipes) {
- cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.vco = vco;
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
dev_priv->display.calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
@@ -2317,7 +2381,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
return 0;
}
-static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk;
@@ -2326,54 +2390,113 @@ static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
* check that the required minimum frequency doesn't exceed
* the actual cdclk frequency.
*/
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
return 0;
}
+static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL);
+ if (!cdclk_state)
+ return NULL;
+
+ cdclk_state->force_min_cdclk_changed = false;
+ cdclk_state->pipe = INVALID_PIPE;
+
+ return &cdclk_state->base;
+}
+
+static void intel_cdclk_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_cdclk_funcs = {
+ .atomic_duplicate_state = intel_cdclk_duplicate_state,
+ .atomic_destroy_state = intel_cdclk_destroy_state,
+};
+
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj);
+ if (IS_ERR(cdclk_state))
+ return ERR_CAST(cdclk_state);
+
+ return to_intel_cdclk_state(cdclk_state);
+}
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
+ if (!cdclk_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj,
+ &cdclk_state->base, &intel_cdclk_funcs);
+
+ return 0;
+}
+
int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state;
+ struct intel_cdclk_state *new_cdclk_state;
enum pipe pipe;
int ret;
- ret = dev_priv->display.modeset_calc_cdclk(state);
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ new_cdclk_state->active_pipes =
+ intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
+
+ ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state);
if (ret)
return ret;
- /*
- * Writes to dev_priv->cdclk.{actual,logical} must protected
- * by holding all the crtc mutexes even if we don't end up
- * touching the hardware
- */
- if (intel_cdclk_changed(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ if (intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
/*
* Also serialize commits across all crtcs
* if the actual hw needs to be poked.
*/
- ret = intel_atomic_serialize_global_state(state);
+ ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- } else if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &state->cdclk.logical)) {
- ret = intel_atomic_lock_global_state(state);
+ } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
+ intel_cdclk_changed(&old_cdclk_state->logical,
+ &new_cdclk_state->logical)) {
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
return ret;
} else {
return 0;
}
- if (is_power_of_2(state->active_pipes) &&
- intel_cdclk_needs_cd2x_update(dev_priv,
- &dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ if (is_power_of_2(new_cdclk_state->active_pipes) &&
+ intel_cdclk_can_cd2x_update(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- pipe = ilog2(state->active_pipes);
+ pipe = ilog2(new_cdclk_state->active_pipes);
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -2387,28 +2510,32 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
}
if (pipe != INVALID_PIPE) {
- state->cdclk.pipe = pipe;
+ new_cdclk_state->pipe = pipe;
- DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n",
- pipe_name(pipe));
- } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk with pipe %c active\n",
+ pipe_name(pipe));
+ } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
/* All pipes must be switched off while we change the cdclk. */
ret = intel_modeset_all_pipes(state);
if (ret)
return ret;
- state->cdclk.pipe = INVALID_PIPE;
+ new_cdclk_state->pipe = INVALID_PIPE;
- DRM_DEBUG_KMS("Modeset required for cdclk change\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset required for cdclk change\n");
}
- DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- state->cdclk.logical.cdclk,
- state->cdclk.actual.cdclk);
- DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- state->cdclk.logical.voltage_level,
- state->cdclk.actual.voltage_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ new_cdclk_state->logical.cdclk,
+ new_cdclk_state->actual.cdclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "New voltage level calculated to be logical %u, actual %u\n",
+ new_cdclk_state->logical.voltage_level,
+ new_cdclk_state->actual.voltage_level);
return 0;
}
@@ -2453,11 +2580,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->max_cdclk_freq = 528000;
} else if (IS_GEN9_BC(dev_priv)) {
- u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
vco = dev_priv->skl_preferred_vco_freq;
- WARN_ON(vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
* Use the lower (vco 8640) cdclk values as a
@@ -2485,7 +2612,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
* How can we know if extra cooling is
* available? PCI ID, VTB, something else?
*/
- if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->max_cdclk_freq = 450000;
else if (IS_BDW_ULX(dev_priv))
dev_priv->max_cdclk_freq = 450000;
@@ -2504,11 +2631,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
- DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
- dev_priv->max_cdclk_freq);
+ drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
+ dev_priv->max_cdclk_freq);
- DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
- dev_priv->max_dotclk_freq);
+ drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
+ dev_priv->max_dotclk_freq);
}
/**
@@ -2528,8 +2655,8 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
* generate GMBus clock. This will vary with the cdclk freq.
*/
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- I915_WRITE(GMBUSFREQ_VLV,
- DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+ intel_de_write(dev_priv, GMBUSFREQ_VLV,
+ DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
}
static int cnp_rawclk(struct drm_i915_private *dev_priv)
@@ -2537,7 +2664,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
u32 rawclk;
int divider, fraction;
- if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
+ if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
/* 24 MHz */
divider = 24000;
fraction = 0;
@@ -2557,13 +2684,13 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
- I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk);
return divider + fraction;
}
static int pch_rawclk(struct drm_i915_private *dev_priv)
{
- return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+ return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
static int vlv_hrawclk(struct drm_i915_private *dev_priv)
@@ -2578,7 +2705,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
u32 clkcfg;
/* hrawclock is 1/4 the FSB frequency */
- clkcfg = I915_READ(CLKCFG);
+ clkcfg = intel_de_read(dev_priv, CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
return 100000;
@@ -2600,27 +2727,29 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
}
/**
- * intel_update_rawclk - Determine the current RAWCLK frequency
+ * intel_read_rawclk - Determine the current RAWCLK frequency
* @dev_priv: i915 device
*
* Determine the current RAWCLK frequency. RAWCLK is a fixed
* frequency clock so this needs to done only once.
*/
-void intel_update_rawclk(struct drm_i915_private *dev_priv)
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
{
+ u32 freq;
+
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
+ freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->rawclk_freq = pch_rawclk(dev_priv);
+ freq = pch_rawclk(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
+ freq = vlv_hrawclk(dev_priv);
else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
- dev_priv->rawclk_freq = g4x_hrawclk(dev_priv);
+ freq = g4x_hrawclk(dev_priv);
else
/* no rawclk on other platforms, or no need to know it */
- return;
+ return 0;
- DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+ return freq;
}
/**
@@ -2629,7 +2758,12 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ELKHARTLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
+ } else if (IS_ELKHARTLAKE(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
@@ -2709,8 +2843,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
else if (IS_I845G(dev_priv))
dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
else { /* 830 */
- WARN(!IS_I830(dev_priv),
- "Unknown platform. Assuming 133 MHz CDCLK\n");
+ drm_WARN(&dev_priv->drm, !IS_I830(dev_priv),
+ "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index cf71394cc79c..5731806e4cee 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,11 +8,12 @@
#include <linux/types.h>
+#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
-struct intel_cdclk_state;
struct intel_crtc_state;
struct intel_cdclk_vals {
@@ -22,28 +23,62 @@ struct intel_cdclk_vals {
u8 ratio;
};
+struct intel_cdclk_state {
+ struct intel_global_state base;
+
+ /*
+ * Logical configuration of cdclk (used for all scaling,
+ * watermark, etc. calculations and checks). This is
+ * computed as if all enabled crtcs were active.
+ */
+ struct intel_cdclk_config logical;
+
+ /*
+ * Actual configuration of cdclk, can be different from the
+ * logical configuration only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_config actual;
+
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
+
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
+
+ /* forced minimum cdclk for glk+ audio w/a */
+ int force_min_cdclk;
+ bool force_min_cdclk_changed;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+};
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_cdclk_init(struct drm_i915_private *i915);
-void intel_cdclk_uninit(struct drm_i915_private *i915);
+void intel_cdclk_init_hw(struct drm_i915_private *i915);
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915);
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-void intel_cdclk_swap_state(struct intel_atomic_state *state);
-void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe);
-void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe);
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context);
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b);
+void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
+void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
+void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
+ const char *context);
int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
+
+#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
+#define intel_atomic_get_old_cdclk_state(state) \
+ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+#define intel_atomic_get_new_cdclk_state(state) \
+ to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 3980e8b50c28..c1cce93a1c25 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -157,23 +157,29 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
if (INTEL_GEN(dev_priv) >= 7) {
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
+ postoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
+ postoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
+ postoff[2]);
}
}
@@ -185,22 +191,28 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
+ coeff[2] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
+ coeff[5] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
+ coeff[8] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
}
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
@@ -297,14 +309,16 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv));
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
ilk_csc_coeff_identity,
ilk_csc_off_zero);
}
- I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+ intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
@@ -330,51 +344,74 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_postoff_limited_range);
}
- I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+ intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
-/*
- * Set up the pipe CSC unit on CherryView.
- */
-static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void chv_load_cgm_csc(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_ctm *ctm = blob->data;
enum pipe pipe = crtc->pipe;
+ u16 coeffs[9];
+ int i;
- if (crtc_state->hw.ctm) {
- const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
- u16 coeffs[9] = {};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- u64 abs_coeff =
- ((1ULL << 63) - 1) & ctm->matrix[i];
-
- /* Round coefficient. */
- abs_coeff += 1 << (32 - 13);
- /* Clamp to hardware limits. */
- abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
-
- /* Write coefficients in S3.12 format. */
- if (ctm->matrix[i] & (1ULL << 63))
- coeffs[i] = 1 << 15;
- coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
- coeffs[i] |= (abs_coeff >> 20) & 0xfff;
- }
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i];
+
+ /* Round coefficient. */
+ abs_coeff += 1 << (32 - 13);
+ /* Clamp to hardware limits. */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+
+ coeffs[i] = 0;
+
+ /* Write coefficients in S3.12 format. */
+ if (ctm->matrix[i] & (1ULL << 63))
+ coeffs[i] |= 1 << 15;
- I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
- coeffs[1] << 16 | coeffs[0]);
- I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
- coeffs[3] << 16 | coeffs[2]);
- I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
- coeffs[5] << 16 | coeffs[4]);
- I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
- coeffs[7] << 16 | coeffs[6]);
- I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+ coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+ coeffs[i] |= (abs_coeff >> 20) & 0xfff;
}
- I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+ coeffs[8]);
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, int bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static u32 i9xx_lut_8(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 8) << 16 |
+ drm_color_lut_extract(color->green, 8) << 8 |
+ drm_color_lut_extract(color->blue, 8);
+}
+
+static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
+{
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8);
}
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
@@ -393,49 +430,34 @@ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
(color->blue >> 8);
}
-static u32 ilk_lut_10(const struct drm_color_lut *color)
+static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
- return drm_color_lut_extract(color->red, 10) << 20 |
- drm_color_lut_extract(color->green, 10) << 10 |
- drm_color_lut_extract(color->blue, 10);
+ entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, ldw);
+ entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, ldw);
}
-/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
- const struct drm_property_blob *blob)
+static u16 i965_lut_11p6_max_pack(u32 val)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int i;
-
- if (HAS_GMCH(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- assert_dsi_pll_enabled(dev_priv);
- else
- assert_pll_enabled(dev_priv, pipe);
- }
-
- if (blob) {
- const struct drm_color_lut *lut = blob->data;
-
- for (i = 0; i < 256; i++) {
- u32 word =
- (drm_color_lut_extract(lut[i].red, 8) << 16) |
- (drm_color_lut_extract(lut[i].green, 8) << 8) |
- drm_color_lut_extract(lut[i].blue, 8);
+ /* PIPEGCMAX is 11.6, clamp to 10.6 */
+ return clamp_val(val, 0, 0xffff);
+}
- if (HAS_GMCH(dev_priv))
- I915_WRITE(PALETTE(pipe, i), word);
- else
- I915_WRITE(LGC_PALETTE(pipe, i), word);
- }
- }
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
}
-static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
{
- i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
@@ -445,10 +467,10 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val;
- val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, PIPECONF(pipe));
val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- I915_WRITE(PIPECONF(pipe), val);
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
}
static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
@@ -458,10 +480,10 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val;
- val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, PIPECONF(pipe));
val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- I915_WRITE(PIPECONF(pipe), val);
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
ilk_load_csc_matrix(crtc_state);
}
@@ -471,7 +493,8 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+ intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
ilk_load_csc_matrix(crtc_state);
}
@@ -492,9 +515,10 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
if (crtc_state->csc_enable)
val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
- I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
+ intel_de_write(dev_priv, SKL_BOTTOM_COLOR(pipe), val);
- I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+ intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
if (INTEL_GEN(dev_priv) >= 11)
icl_load_csc_matrix(crtc_state);
@@ -502,6 +526,35 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
+static void i9xx_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
+ i9xx_load_lut_8(crtc, gamma_lut);
+}
+
static void i965_load_lut_10p6(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -511,28 +564,52 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- I915_WRITE(PALETTE(pipe, 2 * i + 0),
- i965_lut_10p6_ldw(&lut[i]));
- I915_WRITE(PALETTE(pipe, 2 * i + 1),
- i965_lut_10p6_udw(&lut[i]));
+ intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0),
+ i965_lut_10p6_ldw(&lut[i]));
+ intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1),
+ i965_lut_10p6_udw(&lut[i]));
}
- I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
- I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
- I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
}
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ i9xx_load_lut_8(crtc, gamma_lut);
else
i965_load_lut_10p6(crtc, gamma_lut);
}
+static void ilk_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
static void ilk_load_lut_10(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -542,7 +619,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++)
- I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
+ intel_de_write(dev_priv, PREC_PALETTE(pipe, i),
+ ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -551,7 +629,7 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
else
ilk_load_lut_10(crtc, gamma_lut);
}
@@ -584,15 +662,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc,
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
+ intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
/* On BDW+ the index auto increment mode actually works */
@@ -606,22 +685,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
- PAL_PREC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
/* We discard half the user entries in split gamma mode */
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
@@ -659,7 +739,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -682,7 +762,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -703,17 +783,17 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
- u32 i;
/*
* When setting the auto-increment bit, the hardware seems to
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
/*
@@ -729,12 +809,13 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe),
+ lut[i].green);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
@@ -742,26 +823,26 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- u32 i;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
/*
* When setting the auto-increment bit, the hardware seems to
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
u32 v = (i << 16) / (lut_size - 1);
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -783,7 +864,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
glk_load_degamma_lut_linear(crtc_state);
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
@@ -827,7 +908,7 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Super Fine segment (let's call it seg1)...
@@ -860,7 +941,7 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Fine segment (let's call it seg2)...
@@ -919,7 +1000,7 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
break;
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
@@ -945,6 +1026,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
return drm_color_lut_extract(color->red, 14);
}
+static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
+}
+
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -954,10 +1042,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0),
- chv_cgm_degamma_ldw(&lut[i]));
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1),
- chv_cgm_degamma_udw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
+ chv_cgm_degamma_ldw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
+ chv_cgm_degamma_udw(&lut[i]));
}
}
@@ -981,31 +1069,34 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0),
- chv_cgm_gamma_ldw(&lut[i]));
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1),
- chv_cgm_gamma_udw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
+ chv_cgm_gamma_ldw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
+ chv_cgm_gamma_udw(&lut[i]));
}
}
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *ctm = crtc_state->hw.ctm;
- cherryview_load_csc_matrix(crtc_state);
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
+ chv_load_cgm_csc(crtc, ctm);
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts(crtc_state);
- return;
- }
-
- if (degamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
chv_load_cgm_degamma(crtc, degamma_lut);
- if (gamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
chv_load_cgm_gamma(crtc, gamma_lut);
+ else
+ i965_load_luts(crtc_state);
+
+ intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe),
+ crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1167,7 +1258,8 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
/* C8 relies on its palette being stored in the legacy LUT */
if (crtc_state->c8_planes) {
- DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "C8 pixelformat requires the legacy LUT\n");
return -EINVAL;
}
@@ -1630,28 +1722,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
return true;
}
-/* convert hw value with given bit_precision to lut property val */
-static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
{
- u32 max = 0xffff >> (16 - bit_precision);
-
- val = clamp_val(val, 0, max);
-
- if (bit_precision < 16)
- val <<= 16 - bit_precision;
-
- return val;
-}
-
-static struct drm_property_blob *
-i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
+ int i;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
@@ -1659,20 +1736,12 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- if (HAS_GMCH(dev_priv))
- val = I915_READ(PALETTE(pipe, i));
- else
- val = I915_READ(LGC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_RED_MASK, val), 8);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_GREEN_MASK, val), 8);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_BLUE_MASK, val), 8);
+ u32 val = intel_de_read(dev_priv, PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
}
return blob;
@@ -1680,22 +1749,21 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
}
-static struct drm_property_blob *
-i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val1, val2;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1703,51 +1771,42 @@ i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
- val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
-
- blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_RED_MASK, val1);
- blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
- blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
+ u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
+
+ i965_lut_10p6_pack(&lut[i], ldw, udw);
}
- blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 0)));
- blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 1)));
- blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 2)));
+ lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
+ lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
+ lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
return blob;
}
static void i965_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc);
}
-static struct drm_property_blob *
-chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1755,18 +1814,13 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
-
- val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
+ u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
+
+ chv_cgm_gamma_pack(&lut[i], ldw, udw);
}
return blob;
@@ -1774,22 +1828,46 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
+ crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc);
else
i965_read_luts(crtc_state);
}
-static struct drm_property_blob *
-ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
+ }
+
+ return blob;
+}
+
+static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1797,17 +1875,12 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = I915_READ(PREC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
+
+ ilk_lut_10_pack(&lut[i], val);
}
return blob;
@@ -1815,6 +1888,8 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
static void ilk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
@@ -1822,21 +1897,19 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
}
-static struct drm_property_blob *
-glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc,
+ u32 prec_index)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int hw_lut_size = ivb_lut_10_size(prec_index);
+ int i, hw_lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * hw_lut_size,
@@ -1844,36 +1917,33 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
- PAL_PREC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
- val = I915_READ(PREC_PAL_DATA(pipe));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
+
+ ilk_lut_10_pack(&lut[i], val);
}
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
return blob;
}
static void glk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
}
void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 44bbc7e74fc3..9ff05ec12115 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -48,7 +48,7 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
const struct cnl_procmon *procmon;
u32 val;
- val = I915_READ(ICL_PORT_COMP_DW3(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -81,26 +81,27 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- val = I915_READ(ICL_PORT_COMP_DW1(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
- I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
static bool check_phy_reg(struct drm_i915_private *dev_priv,
enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
if ((val & mask) != expected_val) {
- DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: "
- "current %08x mask %08x expected %08x\n",
- phy_name(phy),
- reg.reg, val, mask, expected_val);
+ drm_dbg(&dev_priv->drm,
+ "Combo PHY %c reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ phy_name(phy),
+ reg.reg, val, mask, expected_val);
return false;
}
@@ -127,8 +128,8 @@ static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
{
- return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
- (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+ return !(intel_de_read(dev_priv, CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+ (intel_de_read(dev_priv, CNL_PORT_COMP_DW0) & COMP_INIT);
}
static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
@@ -151,20 +152,20 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(CHICKEN_MISC_2);
+ val = intel_de_read(dev_priv, CHICKEN_MISC_2);
val &= ~CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
cnl_set_procmon_ref_values(dev_priv, PHY_A);
- val = I915_READ(CNL_PORT_COMP_DW0);
+ val = intel_de_read(dev_priv, CNL_PORT_COMP_DW0);
val |= COMP_INIT;
- I915_WRITE(CNL_PORT_COMP_DW0, val);
+ intel_de_write(dev_priv, CNL_PORT_COMP_DW0, val);
- val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
}
static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
@@ -172,11 +173,12 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
u32 val;
if (!cnl_combo_phy_verify_state(dev_priv))
- DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+ drm_warn(&dev_priv->drm,
+ "Combo PHY HW state changed unexpectedly.\n");
- val = I915_READ(CHICKEN_MISC_2);
+ val = intel_de_read(dev_priv, CHICKEN_MISC_2);
val |= CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_2, val);
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
@@ -184,27 +186,65 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
{
/* The PHY C added by EHL has no PHY_MISC register */
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
- return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
- return !(I915_READ(ICL_PHY_MISC(phy)) &
+ return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
- (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+ (intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+}
+
+static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
+{
+ bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A);
+ bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D);
+ bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+ /*
+ * VBT's 'dvo port' field for child devices references the DDI, not
+ * the PHY. So if combo PHY A is wired up to drive an external
+ * display, we should see a child device present on PORT_D and
+ * nothing on PORT_A and no DSI.
+ */
+ if (ddi_d_present && !ddi_a_present && !dsi_present)
+ return true;
+
+ /*
+ * If we encounter a VBT that claims to have an external display on
+ * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+ * in the log and let the internal display win.
+ */
+ if (ddi_d_present)
+ drm_err(&i915->drm,
+ "VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
+
+ return false;
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy)
{
bool ret;
+ u32 expected_val = 0;
if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- if (phy == PHY_A)
+ if (phy == PHY_A) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ if (ehl_vbt_ddi_d_present(dev_priv))
+ expected_val = ICL_PHY_MISC_MUX_DDID;
+
+ ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy),
+ ICL_PHY_MISC_MUX_DDID,
+ expected_val);
+ }
+ }
+
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
@@ -219,7 +259,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
u32 val;
if (is_dsi) {
- WARN_ON(lane_reversal);
+ drm_WARN_ON(&dev_priv->drm, lane_reversal);
switch (lane_count) {
case 1:
@@ -257,36 +297,10 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = I915_READ(ICL_PORT_CL_DW10(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
val |= lane_mask << PWR_DOWN_LN_SHIFT;
- I915_WRITE(ICL_PORT_CL_DW10(phy), val);
-}
-
-static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
-{
- bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
- bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
- bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
-
- /*
- * VBT's 'dvo port' field for child devices references the DDI, not
- * the PHY. So if combo PHY A is wired up to drive an external
- * display, we should see a child device present on PORT_D and
- * nothing on PORT_A and no DSI.
- */
- if (ddi_d_present && !ddi_a_present && !dsi_present)
- return val | ICL_PHY_MISC_MUX_DDID;
-
- /*
- * If we encounter a VBT that claims to have an external display on
- * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
- * in the log and let the internal display win.
- */
- if (ddi_d_present)
- DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
-
- return val & ~ICL_PHY_MISC_MUX_DDID;
+ intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
@@ -297,8 +311,9 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
u32 val;
if (icl_combo_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n",
- phy_name(phy));
+ drm_dbg(&dev_priv->drm,
+ "Combo PHY %c already enabled, won't reprogram it.\n",
+ phy_name(phy));
continue;
}
@@ -318,28 +333,33 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
* based on whether our VBT indicates the presence of any
* "internal" child devices.
*/
- val = I915_READ(ICL_PHY_MISC(phy));
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A)
- val = ehl_combo_phy_a_mux(dev_priv, val);
+ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) {
+ val &= ~ICL_PHY_MISC_MUX_DDID;
+
+ if (ehl_vbt_ddi_d_present(dev_priv))
+ val |= ICL_PHY_MISC_MUX_DDID;
+ }
+
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(phy), val);
+ intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
cnl_set_procmon_ref_values(dev_priv, phy);
if (phy == PHY_A) {
- val = I915_READ(ICL_PORT_COMP_DW8(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
- I915_WRITE(ICL_PORT_COMP_DW8(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
}
- val = I915_READ(ICL_PORT_COMP_DW0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
- val = I915_READ(ICL_PORT_CL_DW5(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
}
}
@@ -352,7 +372,8 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (phy == PHY_A &&
!icl_combo_phy_verify_state(dev_priv, phy))
- DRM_WARN("Combo PHY %c HW state changed unexpectedly\n",
+ drm_warn(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
/*
@@ -363,14 +384,14 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
goto skip_phy_misc;
- val = I915_READ(ICL_PHY_MISC(phy));
+ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(phy), val);
+ intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
- val = I915_READ(ICL_PORT_COMP_DW0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
val &= ~COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 1133c4e97bb4..903e49659f56 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -153,7 +153,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
bool intel_connector_get_hw_state(struct intel_connector *connector)
{
enum pipe pipe = 0;
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
return encoder->get_hw_state(encoder, &pipe);
}
@@ -162,7 +162,8 @@ enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(dev,
+ !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (!connector->base.state->crtc)
return INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index f976b800b245..78f9b6cde810 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -32,7 +32,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -75,7 +74,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(adpa_reg);
+ val = intel_de_read(dev_priv, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -112,7 +111,7 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
- tmp = I915_READ(crt->adpa_reg);
+ tmp = intel_de_read(dev_priv, crt->adpa_reg);
if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -184,7 +183,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv))
- I915_WRITE(BCLRPAT(crtc->pipe), 0);
+ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -201,7 +200,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
break;
}
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
}
static void intel_disable_crt(struct intel_encoder *encoder,
@@ -230,7 +229,7 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!old_crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -258,7 +257,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
- WARN_ON(!old_crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -269,7 +268,7 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -282,7 +281,7 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -299,7 +298,13 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+
+ intel_enable_pipe(crtc_state);
+
+ lpt_pch_enable(crtc_state);
+
+ intel_crtc_vblank_on(crtc_state);
intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
@@ -414,7 +419,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
- DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LPT only supports 24bpp\n");
return -EINVAL;
}
@@ -442,34 +448,37 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = false;
- save_adpa = adpa = I915_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+ save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
if (intel_de_wait_for_clear(dev_priv,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- I915_WRITE(crt->adpa_reg, save_adpa);
- POSTING_READ(crt->adpa_reg);
+ intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
+ intel_de_posting_read(dev_priv, crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
- DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+ drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n",
+ adpa, ret);
return ret;
}
@@ -498,27 +507,30 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*/
reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
- save_adpa = adpa = I915_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+ save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
- I915_WRITE(crt->adpa_reg, save_adpa);
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_TRIGGER");
+ intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
- DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
if (reenable_hpd)
intel_hpd_enable(dev_priv, crt->base.hpd_pin);
@@ -558,15 +570,16 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* wait for FORCE_DETECT to go off */
if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 1000))
- DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_DETECT to go off");
}
- stat = I915_READ(PORT_HOTPLUG_STAT);
+ stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT);
if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
ret = true;
/* clear the interrupt we just generated, if any */
- I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+ intel_de_write(dev_priv, PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
@@ -629,13 +642,16 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
* have to check the EDID input spec of the attached device.
*/
if (!is_digital) {
- DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT detected via DDC:0x50 [EDID]\n");
ret = true;
} else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
} else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
kfree(edid);
@@ -660,7 +676,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
u8 st00;
enum drm_connector_status status;
- DRM_DEBUG_KMS("starting load-detect on CRT\n");
+ drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
bclrpat_reg = BCLRPAT(pipe);
vtotal_reg = VTOTAL(pipe);
@@ -706,7 +722,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = I915_READ(vsync_reg);
+ u32 vsync = intel_de_read(dev_priv, vsync_reg);
u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
@@ -801,9 +817,9 @@ intel_crt_detect(struct drm_connector *connector,
int status, ret;
struct intel_load_detect_pipe tmp;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, connector->name,
- force);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name,
+ force);
if (i915_modparams.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
@@ -824,11 +840,13 @@ intel_crt_detect(struct drm_connector *connector,
* only trust an assertion that the monitor is connected.
*/
if (intel_crt_detect_hotplug(connector)) {
- DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT detected via hotplug\n");
status = connector_status_connected;
goto out;
} else
- DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via hotplug\n");
}
if (intel_crt_detect_ddc(connector)) {
@@ -918,13 +936,13 @@ void intel_crt_reset(struct drm_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 5) {
u32 adpa;
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(crt->adpa_reg, adpa);
- POSTING_READ(crt->adpa_reg);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
+ intel_de_posting_read(dev_priv, crt->adpa_reg);
- DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
+ drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = true;
}
@@ -969,7 +987,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
adpa_reg = ADPA;
- adpa = I915_READ(adpa_reg);
+ adpa = intel_de_read(dev_priv, adpa_reg);
if ((adpa & ADPA_DAC_ENABLE) == 0) {
/*
* On some machines (some IVB at least) CRT can be
@@ -979,11 +997,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
* take. So the only way to tell is attempt to enable
* it and see what happens.
*/
- I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
- ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
- if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
+ intel_de_write(dev_priv, adpa_reg,
+ adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0)
return;
- I915_WRITE(adpa_reg, adpa);
+ intel_de_write(dev_priv, adpa_reg, adpa);
}
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
@@ -1027,6 +1045,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
if (HAS_DDI(dev_priv)) {
@@ -1057,14 +1078,6 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- if (!I915_HAS_HOTPLUG(dev_priv))
- intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-
- /*
- * Configure the automatic hotplug detection stuff
- */
- crt->force_hotplug_required = false;
-
/*
* TODO: find a proper way to discover whether we need to set the the
* polarity and link reversal bits or not, instead of relying on the
@@ -1074,7 +1087,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
+ dev_priv->fdi_rx_config = intel_de_read(dev_priv,
+ FDI_RX_CTL(PIPE_A)) & fdi_config;
}
intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 09870a31b4f0..3112572cfb7d 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_csr.h"
+#include "intel_de.h"
/**
* DOC: csr support for dmc
@@ -39,8 +40,8 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin"
-#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4)
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
@@ -276,11 +277,11 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
- val = I915_READ(DC_STATE_DEBUG);
+ val = intel_de_read(dev_priv, DC_STATE_DEBUG);
if ((val & mask) != mask) {
val |= mask;
- I915_WRITE(DC_STATE_DEBUG, val);
- POSTING_READ(DC_STATE_DEBUG);
+ intel_de_write(dev_priv, DC_STATE_DEBUG, val);
+ intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
}
@@ -298,12 +299,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
u32 i, fw_size;
if (!HAS_CSR(dev_priv)) {
- DRM_ERROR("No CSR support available for this platform\n");
+ drm_err(&dev_priv->drm,
+ "No CSR support available for this platform\n");
return;
}
if (!dev_priv->csr.dmc_payload) {
- DRM_ERROR("Tried to program CSR with empty payload\n");
+ drm_err(&dev_priv->drm,
+ "Tried to program CSR with empty payload\n");
return;
}
@@ -313,13 +316,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
preempt_disable();
for (i = 0; i < fw_size; i++)
- I915_WRITE_FW(CSR_PROGRAM(i), payload[i]);
+ intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i),
+ payload[i]);
preempt_enable();
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
- I915_WRITE(dev_priv->csr.mmioaddr[i],
- dev_priv->csr.mmiodata[i]);
+ intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i],
+ dev_priv->csr.mmiodata[i]);
}
dev_priv->csr.dc_state = 0;
@@ -607,7 +611,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- WARN_ON(dev_priv->csr.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
dev_priv->csr.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
@@ -636,16 +640,16 @@ static void csr_load_work_fn(struct work_struct *work)
intel_csr_load_program(dev_priv);
intel_csr_runtime_pm_put(dev_priv);
- DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->csr.fw_path,
- CSR_VERSION_MAJOR(csr->version),
+ drm_info(&dev_priv->drm,
+ "Finished loading DMC firmware %s (v%u.%u)\n",
+ dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
- dev_notice(dev_priv->drm.dev,
+ drm_notice(&dev_priv->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
csr->fw_path);
- dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
+ drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -712,7 +716,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (i915_modparams.dmc_firmware_path) {
if (strlen(i915_modparams.dmc_firmware_path) == 0) {
csr->fw_path = NULL;
- DRM_INFO("Disabling CSR firmware and runtime PM\n");
+ drm_info(&dev_priv->drm,
+ "Disabling CSR firmware and runtime PM\n");
return;
}
@@ -722,11 +727,12 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
}
if (csr->fw_path == NULL) {
- DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No known CSR firmware for platform, disabling runtime PM\n");
return;
}
- DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+ drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path);
schedule_work(&dev_priv->csr.work);
}
@@ -783,7 +789,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_csr_ucode_suspend(dev_priv);
- WARN_ON(dev_priv->csr.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
kfree(dev_priv->csr.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/display/intel_csr.h
index 03c64f8af7ab..03c64f8af7ab 100644
--- a/drivers/gpu/drm/i915/intel_csr.h
+++ b/drivers/gpu/drm/i915/display/intel_csr.h
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index d9a61f341070..73d0f4648c06 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -568,6 +568,20 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
+ { 0xC, 0x64, 0x30, 0x00, 0x0F }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x64, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
struct icl_mg_phy_ddi_buf_trans {
u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_11_6;
@@ -622,6 +636,34 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
{ 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
};
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -818,7 +860,7 @@ bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
@@ -839,7 +881,7 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
@@ -860,7 +902,7 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
@@ -901,15 +943,42 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return icl_combo_phy_ddi_translations_dp_hbr2;
}
-static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
{
- struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port];
+ if (type == INTEL_OUTPUT_DP && rate > 270000) {
+ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
+ return ehl_combo_phy_ddi_translations_hbr2_hbr3;
+ }
+
+ return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
+{
+ if (type != INTEL_OUTPUT_DP) {
+ return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+ } else if (rate > 270000) {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_combo_phy_ddi_translations_dp_hbr2;
+ }
+
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+}
+
+static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries, level, default_entry;
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
@@ -937,19 +1006,18 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
default_entry = 6;
} else {
- WARN(1, "ddi translation table missing\n");
+ drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
return 0;
}
- if (WARN_ON_ONCE(n_entries == 0))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
return 0;
- if (port_info->hdmi_level_shift_set)
- level = port_info->hdmi_level_shift;
- else
+ level = intel_bios_hdmi_level_shift(encoder);
+ if (level < 0)
level = default_entry;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -980,15 +1048,14 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
&n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) &&
- dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ if (IS_GEN9_BC(dev_priv) && intel_bios_dp_boost_level(encoder))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- I915_WRITE(DDI_BUF_TRANS_LO(port, i),
- ddi_translations[i].trans1 | iboost_bit);
- I915_WRITE(DDI_BUF_TRANS_HI(port, i),
- ddi_translations[i].trans2);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ ddi_translations[i].trans1 | iboost_bit);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ ddi_translations[i].trans2);
}
}
@@ -1008,21 +1075,20 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) &&
- dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+ if (IS_GEN9_BC(dev_priv) && intel_bios_hdmi_boost_level(encoder))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
- ddi_translations[level].trans1 | iboost_bit);
- I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
- ddi_translations[level].trans2);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ ddi_translations[level].trans1 | iboost_bit);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ ddi_translations[level].trans2);
}
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -1033,7 +1099,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
for (i = 0; i < 16; i++) {
udelay(1);
- if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
return;
}
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
@@ -1124,70 +1190,64 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
*
* WaFDIAutoLinkSetTimingOverrride:hsw
*/
- I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) |
- FDI_RX_PWRDN_LANE0_VAL(2) |
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
udelay(220);
/* Switch from Rawclk to PCDclk */
rx_ctl_val |= FDI_PCDCLK;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
- I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
- WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
+ intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+ drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
- I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 |
- DP_TP_CTL_ENABLE);
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
* DDI E does not support port reversal, the functionality is
* achieved on the PCH side in FDI_RX_CTL, so no need to set the
* port reversal bit */
- I915_WRITE(DDI_BUF_CTL(PORT_E),
- DDI_BUF_CTL_ENABLE |
- ((crtc_state->fdi_lanes - 1) << 1) |
- DDI_BUF_TRANS_SELECT(i / 2));
- POSTING_READ(DDI_BUF_CTL(PORT_E));
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
udelay(600);
/* Program PCH FDI Receiver TU */
- I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
/* Enable PCH FDI Receiver with auto-training */
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
/* Wait for FDI receiver lane calibration */
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- temp = I915_READ(FDI_RX_MISC(PIPE_A));
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
- POSTING_READ(FDI_RX_MISC(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
udelay(5);
- temp = I915_READ(DP_TP_STATUS(PORT_E));
+ temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
break;
@@ -1203,37 +1263,34 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
}
rx_ctl_val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
temp &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
- POSTING_READ(DDI_BUF_CTL(PORT_E));
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(PORT_E), temp);
- POSTING_READ(DP_TP_CTL(PORT_E));
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- temp = I915_READ(FDI_RX_MISC(PIPE_A));
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
- POSTING_READ(FDI_RX_MISC(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
}
/* Enable normal pixel sending for FDI */
- I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
}
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@@ -1260,175 +1317,18 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
}
if (num_encoders != 1)
- WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(crtc->pipe));
+ drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
+ num_encoders,
+ pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
}
-static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int refclk;
- int n, p, r;
- u32 wrpll;
-
- wrpll = I915_READ(reg);
- switch (wrpll & WRPLL_REF_MASK) {
- case WRPLL_REF_SPECIAL_HSW:
- /*
- * muxed-SSC for BDW.
- * non-SSC for non-ULT HSW. Check FUSE_STRAP3
- * for the non-SSC reference frequency.
- */
- if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
- if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- refclk = 24;
- else
- refclk = 135;
- break;
- }
- /* fall through */
- case WRPLL_REF_PCH_SSC:
- /*
- * We could calculate spread here, but our checking
- * code only cares about 5% accuracy, and spread is a max of
- * 0.5% downspread.
- */
- refclk = 135;
- break;
- case WRPLL_REF_LCPLL:
- refclk = 2700;
- break;
- default:
- MISSING_CASE(wrpll);
- return 0;
- }
-
- r = wrpll & WRPLL_DIVIDER_REF_MASK;
- p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
- n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
-
- /* Convert to KHz, p & r have a fixed point portion */
- return (refclk * n * 100) / (p * r);
-}
-
-static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq;
-
- p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
- p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
-
- if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR2_PDIV_1:
- p0 = 1;
- break;
- case DPLL_CFGCR2_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR2_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR2_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR2_KDIV_5:
- p2 = 5;
- break;
- case DPLL_CFGCR2_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR2_KDIV_3:
- p2 = 3;
- break;
- case DPLL_CFGCR2_KDIV_1:
- p2 = 1;
- break;
- }
-
- dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
- * 24 * 1000;
-
- dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
- * 24 * 1000) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq, ref_clock;
-
- p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
-
- if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
- DPLL_CFGCR1_QDIV_RATIO_SHIFT;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR1_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR1_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR1_PDIV_5:
- p0 = 5;
- break;
- case DPLL_CFGCR1_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR1_KDIV_1:
- p2 = 1;
- break;
- case DPLL_CFGCR1_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR1_KDIV_3:
- p2 = 3;
- break;
- }
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
- dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
- * ref_clock;
-
- dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
- u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+ u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
switch (val) {
case DDI_CLK_SEL_NONE:
@@ -1447,77 +1347,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *pll_state)
-{
- u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
- u64 tmp;
-
- ref_clock = dev_priv->cdclk.hw.ref;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
- m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
- m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
- m2_frac = pll_state->mg_pll_bias &
- DKL_PLL_BIAS_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
- } else {
- m2_frac = 0;
- }
- } else {
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
- m2_frac = pll_state->mg_pll_div0 &
- MG_PLL_DIV0_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
- } else {
- m2_frac = 0;
- }
- }
-
- switch (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
- div1 = 2;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
- div1 = 3;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
- div1 = 5;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
- div1 = 7;
- break;
- default:
- MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
- return 0;
- }
-
- div2 = (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
-
- /* div2 value of 0 is same as 1 means no div */
- if (div2 == 0)
- div2 = 1;
-
- /*
- * Adjust the original formula to delay the division by 2^22 in order to
- * minimize possible rounding errors.
- */
- tmp = (u64)m1 * m2_int * ref_clock +
- (((u64)m1 * m2_frac * ref_clock) >> 22);
- tmp = div_u64(tmp, 5 * div1 * div2);
-
- return tmp;
-}
-
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
@@ -1543,214 +1372,22 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
-static void icl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- int link_clock;
-
- if (intel_phy_is_combo(dev_priv, phy)) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
- pipe_config->shared_dpll);
-
- if (pll_id == DPLL_ID_ICL_TBTPLL)
- link_clock = icl_calc_tbt_pll_link(dev_priv, port);
- else
- link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void cnl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
-
- switch (link_clock) {
- case DPLL_CFGCR0_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2700:
- link_clock = 270000;
- break;
- case DPLL_CFGCR0_LINK_RATE_3240:
- link_clock = 324000;
- break;
- case DPLL_CFGCR0_LINK_RATE_4050:
- link_clock = 405000;
- break;
- default:
- WARN(1, "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- /*
- * ctrl1 register is already shifted for each pll, just use 0 to get
- * the internal shift for each field
- */
- if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
- link_clock = skl_calc_wrpll_link(pll_state);
- } else {
- link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
-
- switch (link_clock) {
- case DPLL_CTRL1_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CTRL1_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CTRL1_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CTRL1_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CTRL1_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CTRL1_LINK_RATE_2700:
- link_clock = 270000;
- break;
- default:
- WARN(1, "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void hsw_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 val, pll;
-
- val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
- switch (val & PORT_CLK_SEL_MASK) {
- case PORT_CLK_SEL_LCPLL_810:
- link_clock = 81000;
- break;
- case PORT_CLK_SEL_LCPLL_1350:
- link_clock = 135000;
- break;
- case PORT_CLK_SEL_LCPLL_2700:
- link_clock = 270000;
- break;
- case PORT_CLK_SEL_WRPLL1:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
- break;
- case PORT_CLK_SEL_WRPLL2:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
- break;
- case PORT_CLK_SEL_SPLL:
- pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK;
- if (pll == SPLL_FREQ_810MHz)
- link_clock = 81000;
- else if (pll == SPLL_FREQ_1350MHz)
- link_clock = 135000;
- else if (pll == SPLL_FREQ_2700MHz)
- link_clock = 270000;
- else {
- WARN(1, "bad spll freq\n");
- return;
- }
- break;
- default:
- WARN(1, "bad port clock sel\n");
- return;
- }
-
- pipe_config->port_clock = link_clock * 2;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
-{
- struct dpll clock;
-
- clock.m1 = 2;
- clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
-
- return chv_calc_dpll_params(100000, &clock);
-}
-
-static void bxt_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->port_clock =
- bxt_calc_pll_link(&pipe_config->dpll_hw_state);
-
- ddi_dotclock_get(pipe_config);
-}
-
static void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_ddi_clock_get(encoder, pipe_config);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_ddi_clock_get(encoder, pipe_config);
- else if (INTEL_GEN(dev_priv) <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
+ if (intel_phy_is_tc(dev_priv, phy) &&
+ intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) ==
+ DPLL_ID_ICL_TBTPLL)
+ pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv,
+ encoder->port);
+ else
+ pipe_config->port_clock =
+ intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll);
+
+ ddi_dotclock_get(pipe_config);
}
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
@@ -1764,7 +1401,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- WARN_ON(transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -1787,8 +1424,8 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
temp |= DP_MSA_MISC_COLOR_CEA_RGB;
@@ -1810,7 +1447,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
}
/*
@@ -1904,7 +1541,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- WARN_ON(master == INVALID_TRANSCODER);
+ drm_WARN_ON(&dev_priv->drm,
+ master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
} else {
@@ -1925,7 +1563,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
/*
@@ -1942,7 +1580,7 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
temp &= ~TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
@@ -1952,16 +1590,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
- val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
val &= ~TRANS_DDI_FUNC_ENABLE;
if (INTEL_GEN(dev_priv) >= 12) {
- if (!intel_dp_mst_is_master_trans(crtc_state))
- val &= ~TGL_TRANS_DDI_PORT_MASK;
+ if (!intel_dp_mst_is_master_trans(crtc_state)) {
+ val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_MODE_SELECT_MASK);
+ }
} else {
- val &= ~TRANS_DDI_PORT_MASK;
+ val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -1983,20 +1623,21 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
- if (WARN_ON(!wakeref))
+ if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
+ if (drm_WARN_ON(dev,
+ !intel_encoder->get_hw_state(intel_encoder, &pipe))) {
ret = -EIO;
goto out;
}
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe));
if (enable)
tmp |= TRANS_DDI_HDCP_SIGNALLING;
else
tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
- I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp);
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
@@ -2006,7 +1647,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
enum transcoder cpu_transcoder;
@@ -2030,7 +1671,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
else
cpu_transcoder = (enum transcoder) pipe;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
@@ -2083,12 +1724,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!wakeref)
return;
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -2128,7 +1770,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
@@ -2162,7 +1805,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
out:
if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
- tmp = I915_READ(BXT_PHY_CTL(port));
+ tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
@@ -2221,7 +1864,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* happen since fake-MST encoders don't set their get_power_domains()
* hook.
*/
- if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
dig_port = enc_to_dig_port(encoder);
@@ -2254,11 +1898,13 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
if (cpu_transcoder != TRANSCODER_EDP) {
if (INTEL_GEN(dev_priv) >= 12)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_PORT(port));
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_PORT(port));
else
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_PORT(port));
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
}
}
@@ -2269,11 +1915,13 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
if (cpu_transcoder != TRANSCODER_EDP) {
if (INTEL_GEN(dev_priv) >= 12)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_DISABLED);
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_DISABLED);
else
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
}
}
@@ -2282,13 +1930,13 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
{
u32 tmp;
- tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
@@ -2300,9 +1948,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
- iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+ iboost = intel_bios_hdmi_boost_level(encoder);
else
- iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+ iboost = intel_bios_dp_boost_level(encoder);
if (iboost == 0) {
const struct ddi_buf_trans *ddi_translations;
@@ -2315,9 +1963,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
else
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
iboost = ddi_translations[level].i_boost;
@@ -2350,9 +1998,9 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
else
ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
bxt_ddi_phy_set_signal_level(dev_priv, port,
@@ -2372,12 +2020,15 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, encoder->type,
+ tgl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
} else if (INTEL_GEN(dev_priv) == 11) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (IS_ELKHARTLAKE(dev_priv))
+ ehl_get_combo_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
+ else if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
@@ -2399,9 +2050,10 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
}
- if (WARN_ON(n_entries < 1))
+ if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
n_entries = 1;
- if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
return index_to_dp_signal_levels[n_entries - 1] &
@@ -2444,52 +2096,52 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
else
ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~SCALING_MODE_SEL_MASK;
val |= SCALING_MODE_SEL(2);
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(CNL_PORT_TX_DW2_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Rcomp scalar is fixed as 0x98 for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW5 */
/* All DW5 values are fixed for every table entry */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~RTERM_SELECT_MASK;
val |= RTERM_SELECT(6);
val |= TAP3_DISABLE;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW7 */
- val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val);
}
static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2515,12 +2167,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_PCS_DW1_LN0(port));
if (type != INTEL_OUTPUT_HDMI)
val |= COMMON_KEEPER_EN;
else
val &= ~COMMON_KEEPER_EN;
- I915_WRITE(CNL_PORT_PCS_DW1_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_PCS_DW1_GRP(port), val);
/* 2. Program loadgen select */
/*
@@ -2530,33 +2182,33 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~TX_TRAINING_EN;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
cnl_ddi_vswing_program(encoder, level, type);
/* 6. Set training enable to trigger update */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val |= TX_TRAINING_EN;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
@@ -2567,8 +2219,15 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
u32 n_entries, val;
int ln;
- ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
- &n_entries);
+ if (INTEL_GEN(dev_priv) >= 12)
+ ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
+ else if (IS_ELKHARTLAKE(dev_priv))
+ ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
+ else
+ ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
if (!ddi_translations)
return;
@@ -2578,41 +2237,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
}
/* Set PORT_TX_DW5 */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
- val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2641,12 +2300,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
if (type == INTEL_OUTPUT_HDMI)
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -2656,33 +2315,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(ICL_PORT_CL_DW5(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
/* 6. Set training enable to trigger update */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2706,33 +2365,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2740,9 +2399,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2750,7 +2409,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2761,17 +2420,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2779,9 +2438,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val);
- val = I915_READ(MG_TX2_DCC(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2789,18 +2448,22 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
+ val = intel_de_read(dev_priv,
+ MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ val);
- val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
+ val = intel_de_read(dev_priv,
+ MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ val);
}
}
@@ -2846,24 +2509,25 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
for (ln = 0; ln < 2; ln++) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, ln));
- I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0);
+ intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0);
/* All the registers are RMW */
- val = I915_READ(DKL_TX_DPCNTL0(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
- I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val);
- val = I915_READ(DKL_TX_DPCNTL1(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
- I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val);
- val = I915_READ(DKL_TX_DPCNTL2(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
- I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
}
}
@@ -2963,10 +2627,11 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ drm_WARN_ON(&dev_priv->drm,
+ (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
if (intel_phy_is_combo(dev_priv, phy)) {
/*
@@ -2981,14 +2646,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
*/
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(ICL_DPCLKA_CFGCR0);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
}
val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
@@ -2997,13 +2662,13 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
@@ -3012,7 +2677,7 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
enum port port;
u32 val;
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_port_masked(port, port_mask) {
enum phy phy = intel_port_to_phy(dev_priv, port);
bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
@@ -3025,13 +2690,13 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
* Punt on the case now where clock is gated, but it would
* be needed by the port. Something else is really broken then.
*/
- if (WARN_ON(ddi_clk_needed))
+ if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
continue;
DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
phy_name(phy));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
}
}
@@ -3057,7 +2722,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (WARN_ON(is_mst))
+ if (drm_WARN_ON(&dev_priv->drm, is_mst))
return;
}
@@ -3076,7 +2741,8 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (other_encoder == encoder)
continue;
- if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ port_mask & BIT(other_encoder->port)))
return;
}
/*
@@ -3098,52 +2764,54 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (WARN_ON(!pll))
+ if (drm_WARN_ON(&dev_priv->drm, !pll))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_phy_is_combo(dev_priv, phy))
- I915_WRITE(DDI_CLK_SEL(port),
- icl_pll_to_ddi_clk_sel(encoder, crtc_state));
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ icl_pll_to_ddi_clk_sel(encoder, crtc_state));
else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
/*
* MG does not exist but the programming is required
* to ungate DDIC and DDID
*/
- I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ DDI_CLK_SEL_MG);
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
- val = I915_READ(DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
/*
* Configure DPCLKA_CFGCR0 to turn on the clock for the DDI.
* This step and the step before must be done with separate
* register writes.
*/
- val = I915_READ(DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- I915_WRITE(DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
} else if (IS_GEN9_BC(dev_priv)) {
/* DDI -> PLL mapping */
- val = I915_READ(DPLL_CTRL2);
+ val = intel_de_read(dev_priv, DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- I915_WRITE(DPLL_CTRL2, val);
+ intel_de_write(dev_priv, DPLL_CTRL2, val);
} else if (INTEL_GEN(dev_priv) < 9) {
- I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(dev_priv, PORT_CLK_SEL(port),
+ hsw_pll_to_ddi_pll_sel(pll));
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
@@ -3155,15 +2823,17 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_phy_is_combo(dev_priv, phy) ||
(IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
- I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
- I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
- DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+ intel_de_write(dev_priv, DPCLKA_CFGCR0,
+ intel_de_read(dev_priv, DPCLKA_CFGCR0) | DPCLKA_CFGCR0_DDI_CLK_OFF(port));
} else if (IS_GEN9_BC(dev_priv)) {
- I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
- DPLL_CTRL2_DDI_CLK_OFF(port));
+ intel_de_write(dev_priv, DPLL_CTRL2,
+ intel_de_read(dev_priv, DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port));
} else if (INTEL_GEN(dev_priv) < 9) {
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(dev_priv, PORT_CLK_SEL(port),
+ PORT_CLK_SEL_NONE);
}
}
@@ -3180,13 +2850,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
return;
if (INTEL_GEN(dev_priv) >= 12) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
- ln0 = I915_READ(DKL_DP_MODE(tc_port));
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
- ln1 = I915_READ(DKL_DP_MODE(tc_port));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x0));
+ ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x1));
+ ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
} else {
- ln0 = I915_READ(MG_DP_MODE(0, tc_port));
- ln1 = I915_READ(MG_DP_MODE(1, tc_port));
+ ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
+ ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
}
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
@@ -3198,7 +2870,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
switch (pin_assignment) {
case 0x0:
- WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dig_port->tc_mode != TC_PORT_LEGACY);
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
@@ -3243,13 +2916,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
}
if (INTEL_GEN(dev_priv) >= 12) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
- I915_WRITE(DKL_DP_MODE(tc_port), ln0);
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
- I915_WRITE(DKL_DP_MODE(tc_port), ln1);
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x0));
+ intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0);
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x1));
+ intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1);
} else {
- I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
- I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
+ intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
+ intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
}
}
@@ -3274,9 +2949,9 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
return;
intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
@@ -3294,90 +2969,10 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
return;
intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
-}
-
-static void
-tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
-{
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
- u32 val;
-
- if (!cstate->dc3co_exitline)
- return;
-
- val = I915_READ(EXITLINE(cstate->cpu_transcoder));
- val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
- I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
-}
-
-static void
-tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
-{
- u32 val, exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
-
- if (!cstate->dc3co_exitline)
- return;
-
- exit_scanlines = cstate->dc3co_exitline;
- exit_scanlines <<= EXITLINE_SHIFT;
- val = I915_READ(EXITLINE(cstate->cpu_transcoder));
- val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
- val |= exit_scanlines;
- val |= EXITLINE_ENABLE;
- I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
-}
-
-static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *cstate)
-{
- u32 exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
- u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay;
-
- cstate->dc3co_exitline = 0;
-
- if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
- return;
-
- /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
- if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A ||
- encoder->port != PORT_A)
- return;
-
- if (!cstate->has_psr2 || !cstate->hw.active)
- return;
-
- /*
- * DC3CO Exit time 200us B.Spec 49196
- * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
- */
- exit_scanlines =
- intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1;
-
- if (WARN_ON(exit_scanlines > crtc_vdisplay))
- return;
-
- cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines;
- DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline);
-}
-
-static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
-{
- u32 val;
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (INTEL_GEN(dev_priv) < 12)
- return;
-
- val = I915_READ(EXITLINE(crtc_state->cpu_transcoder));
-
- if (val & EXITLINE_ENABLE)
- crtc_state->dc3co_exitline = val & EXITLINE_MASK;
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
}
static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
@@ -3392,7 +2987,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
int level = intel_ddi_dp_level(intel_dp);
enum transcoder transcoder = crtc_state->cpu_transcoder;
- tgl_set_psr2_transcoder_exitline(crtc_state);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -3534,9 +3128,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
int level = intel_ddi_dp_level(intel_dp);
if (INTEL_GEN(dev_priv) < 11)
- WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ drm_WARN_ON(&dev_priv->drm,
+ is_mst && (port == PORT_A || port == PORT_E));
else
- WARN_ON(is_mst && port == PORT_A);
+ drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -3607,8 +3202,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
/* MST will call a setting of MSA after an allocating of Virtual Channel
* from MST encoder pre_enable callback.
*/
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_ddi_set_dp_msa(crtc_state, conn_state);
+
+ intel_dp_set_m_n(crtc_state, M1_N1);
+ }
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -3618,8 +3216,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- int level = intel_ddi_hdmi_level(dev_priv, port);
+ int level = intel_ddi_hdmi_level(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
@@ -3673,7 +3270,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
* the DP link parameteres
*/
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
if (INTEL_GEN(dev_priv) >= 11)
icl_map_plls_to_ports(encoder, crtc_state);
@@ -3706,20 +3303,20 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
bool wait = false;
u32 val;
- val = I915_READ(DDI_BUF_CTL(port));
+ val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), val);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
wait = true;
}
if (intel_crtc_has_dp_encoder(crtc_state)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
}
/* Disable FEC in DP Sink */
@@ -3751,9 +3348,13 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 val;
- val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~TGL_TRANS_DDI_PORT_MASK;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_MODE_SELECT_MASK);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ val);
}
} else {
if (!is_mst)
@@ -3779,7 +3380,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
- tgl_clear_psr2_transcoder_exitline(old_crtc_state);
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3816,7 +3416,8 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
transcoder_name(old_crtc_state->cpu_transcoder));
- I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
}
static void intel_ddi_post_disable(struct intel_encoder *encoder,
@@ -3890,25 +3491,25 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_clk_disable(encoder);
- val = I915_READ(FDI_RX_MISC(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_PCDCLK;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
}
static void intel_enable_ddi_dp(struct intel_encoder *encoder,
@@ -3944,9 +3545,9 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
[PORT_E] = TRANSCODER_A,
};
- WARN_ON(INTEL_GEN(dev_priv) < 9);
+ drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) < 9);
- if (WARN_ON(port < PORT_A || port > PORT_E))
+ if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
port = PORT_A;
return CHICKEN_TRANS(trans[port]);
@@ -3964,8 +3565,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
+ "scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
@@ -3978,7 +3580,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3987,8 +3589,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
udelay(1);
@@ -3999,15 +3601,15 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port),
- dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port),
+ dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
if (crtc_state->has_audio)
intel_audio_codec_enable(encoder, crtc_state, conn_state);
@@ -4017,6 +3619,12 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ WARN_ON(crtc_state->has_pch_encoder);
+
+ intel_enable_pipe(crtc_state);
+
+ intel_crtc_vblank_on(crtc_state);
+
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
else
@@ -4096,43 +3704,11 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct intel_hdcp *hdcp = &connector->hdcp;
- bool content_protection_type_changed =
- (conn_state->hdcp_content_type != hdcp->content_type &&
- conn_state->content_protection !=
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
- /*
- * During the HDCP encryption session if Type change is requested,
- * disable the HDCP and reenable it with new TYPE value.
- */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
- content_protection_type_changed)
- intel_hdcp_disable(connector);
-
- /*
- * Mark the hdcp state as DESIRED after the hdcp disable of type
- * change procedure.
- */
- if (content_protection_type_changed) {
- mutex_lock(&hdcp->mutex);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- mutex_unlock(&hdcp->mutex);
- }
-
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED ||
- content_protection_type_changed)
- intel_hdcp_enable(connector,
- crtc_state->cpu_transcoder,
- (u8)conn_state->hdcp_content_type);
+ intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
}
static void
@@ -4197,20 +3773,20 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
u32 dp_tp_ctl, ddi_buf_ctl;
bool wait = false;
- dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl);
+ dp_tp_ctl = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
- ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
- I915_WRITE(DDI_BUF_CTL(port),
- ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port),
+ ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
wait = true;
}
dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -4225,12 +3801,12 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
- POSTING_READ(DDI_BUF_CTL(port));
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
udelay(600);
}
@@ -4244,14 +3820,16 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
return false;
- return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ if (INTEL_GEN(dev_priv) >= 12 && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
+ else if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 3;
else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
@@ -4268,12 +3846,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
- if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_dsc_get_config(encoder, pipe_config);
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4342,7 +3920,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
pipe_config->fec_enable =
- I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+ intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
encoder->base.base.id, encoder->base.name,
@@ -4365,9 +3943,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (encoder->type == INTEL_OUTPUT_EDP)
- tgl_dc3co_exitline_get_config(pipe_config);
-
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -4449,7 +4024,6 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
} else {
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
- tgl_dc3co_exitline_compute_config(encoder, pipe_config);
}
if (ret)
@@ -4470,6 +4044,112 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
return 0;
}
+static bool mode_equal(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2)
+{
+ return drm_mode_match(mode1, mode2,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode1->clock == mode2->clock; /* we want an exact match */
+}
+
+static bool m_n_equal(const struct intel_link_m_n *m_n_1,
+ const struct intel_link_m_n *m_n_2)
+{
+ return m_n_1->tu == m_n_2->tu &&
+ m_n_1->gmch_m == m_n_2->gmch_m &&
+ m_n_1->gmch_n == m_n_2->gmch_n &&
+ m_n_1->link_m == m_n_2->link_m &&
+ m_n_1->link_n == m_n_2->link_n;
+}
+
+static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
+ const struct intel_crtc_state *crtc_state2)
+{
+ return crtc_state1->hw.active && crtc_state2->hw.active &&
+ crtc_state1->output_types == crtc_state2->output_types &&
+ crtc_state1->output_format == crtc_state2->output_format &&
+ crtc_state1->lane_count == crtc_state2->lane_count &&
+ crtc_state1->port_clock == crtc_state2->port_clock &&
+ mode_equal(&crtc_state1->hw.adjusted_mode,
+ &crtc_state2->hw.adjusted_mode) &&
+ m_n_equal(&crtc_state1->dp_m_n, &crtc_state2->dp_m_n);
+}
+
+static u8
+intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
+ int tile_group_id)
+{
+ struct drm_connector *connector;
+ const struct drm_connector_state *conn_state;
+ struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(ref_crtc_state->uapi.state);
+ u8 transcoders = 0;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
+ return 0;
+
+ for_each_new_connector_in_state(&state->base, connector, conn_state, i) {
+ struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
+ const struct intel_crtc_state *crtc_state;
+
+ if (!crtc)
+ continue;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id !=
+ tile_group_id)
+ continue;
+ crtc_state = intel_atomic_get_new_crtc_state(state,
+ crtc);
+ if (!crtcs_port_sync_compatible(ref_crtc_state,
+ crtc_state))
+ continue;
+ transcoders |= BIT(crtc_state->cpu_transcoder);
+ }
+
+ return transcoders;
+}
+
+static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *connector = conn_state->connector;
+ u8 port_sync_transcoders = 0;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
+ encoder->base.base.id, encoder->base.name,
+ crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+
+ if (connector->has_tile)
+ port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
+ connector->tile_group->id);
+
+ /*
+ * EDP Transcoders cannot be ensalved
+ * make them a master always when present
+ */
+ if (port_sync_transcoders & BIT(TRANSCODER_EDP))
+ crtc_state->master_transcoder = TRANSCODER_EDP;
+ else
+ crtc_state->master_transcoder = ffs(port_sync_transcoders) - 1;
+
+ if (crtc_state->master_transcoder == crtc_state->cpu_transcoder) {
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->sync_mode_slaves_mask =
+ port_sync_transcoders & ~BIT(crtc_state->cpu_transcoder);
+ }
+
+ return 0;
+}
+
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
@@ -4569,7 +4249,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
+ drm_WARN_ON(&dev_priv->drm,
+ !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
return 0;
@@ -4636,7 +4317,8 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
/*
* Unpowered type-c dongles can take some time to boot and be
@@ -4716,7 +4398,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4739,15 +4421,14 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
- struct ddi_vbt_port_info *port_info =
- &dev_priv->vbt.ddi_port_info[port];
struct intel_digital_port *intel_dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum phy phy = intel_port_to_phy(dev_priv, port);
- init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
- init_dp = port_info->supports_dp;
+ init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) ||
+ intel_bios_port_supports_hdmi(dev_priv, port);
+ init_dp = intel_bios_port_supports_dp(dev_priv, port);
if (intel_bios_is_lspcon_present(dev_priv, port)) {
/*
@@ -4779,6 +4460,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
+ encoder->compute_config_late = intel_ddi_compute_config_late;
encoder->enable = intel_enable_ddi;
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
@@ -4797,10 +4479,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
- intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
+ DDI_BUF_CTL(port)) &
DDI_BUF_PORT_REVERSAL;
else
- intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
+ DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
@@ -4808,8 +4492,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (intel_phy_is_tc(dev_priv, phy)) {
- bool is_legacy = !port_info->supports_typec_usb &&
- !port_info->supports_tbt;
+ bool is_legacy =
+ !intel_bios_port_supports_typec_usb(dev_priv, port) &&
+ !intel_bios_port_supports_tbt(dev_priv, port);
intel_tc_port_init(intel_dig_port, is_legacy);
@@ -4817,7 +4502,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->update_complete = intel_ddi_update_complete;
}
- WARN_ON(port > PORT_I);
+ drm_WARN_ON(&dev_priv->drm, port > PORT_I);
intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
port - PORT_A;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 167c6579d972..55fd72b901fe 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -6,8 +6,6 @@
#ifndef __INTEL_DDI_H__
#define __INTEL_DDI_H__
-#include <drm/i915_drm.h>
-
#include "intel_display.h"
struct drm_connector_state;
@@ -47,7 +45,5 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *state);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
new file mode 100644
index 000000000000..00da10bf35f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DE_H__
+#define __INTEL_DE_H__
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_uncore.h"
+
+static inline u32
+intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ intel_uncore_posting_read(&i915->uncore, reg);
+}
+
+/* Note: read the warnings for intel_uncore_*_fw() functions! */
+static inline u32
+intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read_fw(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write(&i915->uncore, reg, val);
+}
+
+/* Note: read the warnings for intel_uncore_*_fw() functions! */
+static inline void
+intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(&i915->uncore, reg, val);
+}
+
+static inline void
+intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
+{
+ intel_uncore_rmw(&i915->uncore, reg, clear, set);
+}
+
+static inline int
+intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
+{
+ return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
+}
+
+static inline int
+intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
+}
+
+static inline int
+intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
+}
+
+#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index aa453953908b..346846609f45 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,7 +41,6 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
@@ -203,9 +202,9 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
val = vlv_cck_read(dev_priv, reg);
divider = val & CCK_FREQUENCY_VALUES;
- WARN((val & CCK_FREQUENCY_STATUS) !=
- (divider << CCK_FREQUENCY_STATUS_SHIFT),
- "%s change in progress\n", name);
+ drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+ (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ "%s change in progress\n", name);
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
@@ -235,7 +234,8 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
CCK_CZ_CLOCK_CONTROL);
- DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
+ drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
+ dev_priv->czclk_freq);
}
static inline u32 /* units of 100MHz */
@@ -518,13 +518,11 @@ static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) |
- DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) &
- ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
/* Wa_2006604312:icl */
@@ -533,11 +531,11 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
if (enable)
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
else
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
}
static bool
@@ -883,7 +881,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
return calculated_clock->p > best_clock->p;
}
- if (WARN_ON_ONCE(!target_freq))
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -1049,9 +1047,9 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
else
line_mask = DSL_LINEMASK_GEN3;
- line1 = I915_READ(reg) & line_mask;
+ line1 = intel_de_read(dev_priv, reg) & line_mask;
msleep(5);
- line2 = I915_READ(reg) & line_mask;
+ line2 = intel_de_read(dev_priv, reg) & line_mask;
return line1 != line2;
}
@@ -1063,8 +1061,9 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
/* Wait for the display line to settle/start moving */
if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
- DRM_ERROR("pipe %c scanline %s wait timed out\n",
- pipe_name(pipe), onoff(state));
+ drm_err(&dev_priv->drm,
+ "pipe %c scanline %s wait timed out\n",
+ pipe_name(pipe), onoff(state));
}
static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
@@ -1090,7 +1089,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
/* Wait for the Pipe State to go off */
if (intel_de_wait_for_clear(dev_priv, reg,
I965_PIPECONF_ACTIVE, 100))
- WARN(1, "pipe_off wait timed out\n");
+ drm_WARN(&dev_priv->drm, 1,
+ "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
@@ -1103,7 +1103,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(DPLL(pipe));
+ val = intel_de_read(dev_priv, DPLL(pipe));
cur_state = !!(val & DPLL_VCO_ENABLE);
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
@@ -1139,10 +1139,11 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
* so pipe->transcoder cast is fine here.
*/
enum transcoder cpu_transcoder = (enum transcoder)pipe;
- u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
- u32 val = I915_READ(FDI_TX_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
cur_state = !!(val & FDI_TX_ENABLE);
}
I915_STATE_WARN(cur_state != state,
@@ -1158,7 +1159,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(FDI_RX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
@@ -1180,7 +1181,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
if (HAS_DDI(dev_priv))
return;
- val = I915_READ(FDI_TX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
@@ -1190,7 +1191,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(FDI_RX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_PLL_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
@@ -1204,14 +1205,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
- if (WARN_ON(HAS_DDI(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
return;
if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
pp_reg = PP_CONTROL(0);
- port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
@@ -1238,13 +1239,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 port_sel;
pp_reg = PP_CONTROL(0);
- port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
- WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+ drm_WARN_ON(&dev_priv->drm,
+ port_sel != PANEL_PORT_SELECT_LVDS);
intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
- val = I915_READ(pp_reg);
+ val = intel_de_read(dev_priv, pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
@@ -1268,7 +1270,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
- u32 val = I915_READ(PIPECONF(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
intel_display_power_put(dev_priv, power_domain, wakeref);
@@ -1318,7 +1320,7 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
u32 val;
bool enabled;
- val = I915_READ(PCH_TRANSCONF(pipe));
+ val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
enabled = !!(val & TRANS_ENABLE);
I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
@@ -1392,12 +1394,12 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- DRM_ERROR("DPLL %d failed to lock\n", pipe);
+ drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
}
static void vlv_enable_pll(struct intel_crtc *crtc,
@@ -1414,8 +1416,9 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
_vlv_enable_pll(crtc, pipe_config);
- I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
@@ -1442,11 +1445,11 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
udelay(1);
/* Enable PLL */
- I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- DRM_ERROR("PLL %d failed to lock\n", pipe);
+ drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
}
static void chv_enable_pll(struct intel_crtc *crtc,
@@ -1470,19 +1473,23 @@ static void chv_enable_pll(struct intel_crtc *crtc,
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
- I915_WRITE(CBR4_VLV, 0);
+ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, CBR4_VLV, 0);
dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
} else {
- I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
}
@@ -1513,29 +1520,29 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
- I915_WRITE(reg, dpll);
+ intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, reg, dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE(DPLL_MD(crtc->pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
+ crtc_state->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
- I915_WRITE(reg, dpll);
+ intel_de_write(dev_priv, reg, dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, dpll);
+ intel_de_posting_read(dev_priv, reg);
udelay(150); /* wait for warmup */
}
}
@@ -1553,8 +1560,8 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
- I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1569,8 +1576,8 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1586,8 +1593,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
vlv_dpio_get(dev_priv);
@@ -1626,9 +1633,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
if (intel_de_wait_for_register(dev_priv, dpll_reg,
port_mask, expected_mask, 1000))
- WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
- dport->base.base.base.id, dport->base.base.name,
- I915_READ(dpll_reg) & port_mask, expected_mask);
+ drm_WARN(&dev_priv->drm, 1,
+ "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dport->base.base.base.id, dport->base.base.name,
+ intel_de_read(dev_priv, dpll_reg) & port_mask,
+ expected_mask);
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -1648,7 +1657,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
if (HAS_PCH_CPT(dev_priv)) {
reg = TRANS_CHICKEN2(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
/*
* Workaround: Set the timing override bit
* before enabling the pch transcoder.
@@ -1657,12 +1666,12 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
reg = PCH_TRANSCONF(pipe);
- val = I915_READ(reg);
- pipeconf_val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, reg);
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
@@ -1692,9 +1701,10 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_PROGRESSIVE;
}
- I915_WRITE(reg, val | TRANS_ENABLE);
+ intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
- DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
+ pipe_name(pipe));
}
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1706,16 +1716,16 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, PIPE_A);
- val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
PIPECONF_INTERLACED_ILK)
@@ -1723,10 +1733,10 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
else
val |= TRANS_PROGRESSIVE;
- I915_WRITE(LPT_TRANSCONF, val);
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
- DRM_ERROR("Failed to enable PCH transcoder\n");
+ drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
}
static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1743,19 +1753,20 @@ static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_ENABLE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -1763,18 +1774,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(LPT_TRANSCONF);
+ val = intel_de_read(dev_priv, LPT_TRANSCONF);
val &= ~TRANS_ENABLE;
- I915_WRITE(LPT_TRANSCONF, val);
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
- DRM_ERROR("Failed to disable PCH transcoder\n");
+ drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
}
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
@@ -1807,7 +1818,7 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state
return 0; /* Gen2 doesn't have a hardware frame counter */
}
-static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1825,7 +1836,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
assert_vblank_disabled(&crtc->base);
}
-static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1834,7 +1845,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
i915_reg_t reg;
u32 val;
- DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
assert_planes_disabled(crtc);
@@ -1862,15 +1873,15 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
trace_intel_pipe_enable(crtc);
reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
/* we keep both pipes enabled on 830 */
- WARN_ON(!IS_I830(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- I915_WRITE(reg, val | PIPECONF_ENABLE);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
@@ -1892,7 +1903,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg;
u32 val;
- DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
/*
* Make sure planes won't keep trying to pump pixels to us,
@@ -1903,7 +1914,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
trace_intel_pipe_disable(crtc);
reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
@@ -1918,7 +1929,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~PIPECONF_ENABLE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
if ((val & PIPECONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -2211,11 +2222,11 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int pinctl;
u32 alignment;
- if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
- if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
/* Note that the w/a also requires 64 PTE of padding following the
@@ -2386,7 +2397,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int cpp = fb->format->cpp[color_plane];
- WARN_ON(new_offset > old_offset);
+ drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
@@ -2537,8 +2548,9 @@ static int intel_fb_offset_to_xy(int *x, int *y,
alignment = 0;
if (alignment != 0 && fb->offsets[color_plane] % alignment) {
- DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
- fb->offsets[color_plane], color_plane);
+ drm_dbg_kms(&dev_priv->drm,
+ "Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
return -EINVAL;
}
@@ -2548,9 +2560,10 @@ static int intel_fb_offset_to_xy(int *x, int *y,
/* Catch potential overflows early */
if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
fb->offsets[color_plane])) {
- DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
- fb->offsets[color_plane], fb->pitches[color_plane],
- color_plane);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
return -ERANGE;
}
@@ -2706,9 +2719,10 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
/*
* We assume the primary plane for pipe A has
- * the highest stride limits of them all.
+ * the highest stride limits of them all,
+ * if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ crtc = intel_get_first_crtc(dev_priv);
if (!crtc)
return 0;
@@ -3034,8 +3048,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
ret = intel_fb_offset_to_xy(&x, &y, fb, i);
if (ret) {
- DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
+ drm_dbg_kms(&dev_priv->drm,
+ "bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
return ret;
}
@@ -3054,8 +3069,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
*/
if (i == 0 && i915_gem_object_is_tiled(obj) &&
(x + width) * cpp > fb->pitches[i]) {
- DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
+ drm_dbg_kms(&dev_priv->drm,
+ "bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
return -EINVAL;
}
@@ -3111,8 +3127,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
}
if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
- DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
- mul_u32_u32(max_size, tile_size), obj->base.size);
+ drm_dbg_kms(&dev_priv->drm,
+ "fb too big for bo (need %llu bytes, have %zu bytes)\n",
+ mul_u32_u32(max_size, tile_size), obj->base.size);
return -EINVAL;
}
@@ -3143,7 +3160,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- WARN_ON(is_ccs_modifier(fb->modifier));
+ drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
drm_rect_translate(&plane_state->uapi.src,
@@ -3184,7 +3201,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
DRM_MODE_ROTATE_0, tile_size);
offset /= tile_size;
- WARN_ON(i >= ARRAY_SIZE(info->plane));
+ drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
info->plane[i].offset = offset;
info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
tile_width * cpp);
@@ -3377,6 +3394,67 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
}
}
+static struct i915_vma *
+initial_plane_vma(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 base, size;
+
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base,
+ I915_GTT_MIN_ALIGNMENT);
+ size = round_up(plane_config->base + plane_config->size,
+ I915_GTT_MIN_ALIGNMENT);
+ size -= base;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (size * 2 > i915->stolen_usable_size)
+ return NULL;
+
+ obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+ if (IS_ERR(obj))
+ return NULL;
+
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ break;
+ case I915_TILING_X:
+ case I915_TILING_Y:
+ obj->tiling_and_stride =
+ plane_config->fb->base.pitches[0] |
+ plane_config->tiling;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ goto err_obj;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ !i915_vma_is_map_and_fenceable(vma))
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return NULL;
+}
+
static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
@@ -3385,22 +3463,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
- u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
- u32 size_aligned = round_up(plane_config->base + plane_config->size,
- PAGE_SIZE);
- struct drm_i915_gem_object *obj;
- bool ret = false;
-
- size_aligned -= base_aligned;
-
- if (plane_config->size == 0)
- return false;
-
- /* If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features. */
- if (size_aligned * 2 > dev_priv->stolen_usable_size)
- return false;
+ struct i915_vma *vma;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -3408,30 +3471,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
case I915_FORMAT_MOD_Y_TILED:
break;
default:
- DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
- fb->modifier);
+ drm_dbg(&dev_priv->drm,
+ "Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
return false;
}
- obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
- base_aligned,
- base_aligned,
- size_aligned);
- if (IS_ERR(obj))
+ vma = initial_plane_vma(dev_priv, plane_config);
+ if (!vma)
return false;
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- break;
- case I915_TILING_X:
- case I915_TILING_Y:
- obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto out;
- }
-
mode_cmd.pixel_format = fb->format->format;
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
@@ -3439,17 +3488,18 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
- if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
- DRM_DEBUG_KMS("intel fb init failed\n");
- goto out;
+ if (intel_framebuffer_init(to_intel_framebuffer(fb),
+ vma->obj, &mode_cmd)) {
+ drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ goto err_vma;
}
+ plane_config->vma = vma;
+ return true;
- DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
- ret = true;
-out:
- i915_gem_object_put(obj);
- return ret;
+err_vma:
+ i915_vma_put(vma);
+ return false;
}
static void
@@ -3493,9 +3543,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
- plane->base.base.id, plane->base.name,
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+ plane->base.base.id, plane->base.name,
+ crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
@@ -3547,17 +3598,17 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_plane_state *intel_state =
to_intel_plane_state(plane_state);
struct drm_framebuffer *fb;
+ struct i915_vma *vma;
if (!plane_config->fb)
return;
if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
fb = &plane_config->fb->base;
+ vma = plane_config->vma;
goto valid_fb;
}
- kfree(plane_config->fb);
-
/*
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
@@ -3577,7 +3628,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = state->hw.fb;
- drm_framebuffer_get(fb);
+ vma = state->vma;
goto valid_fb;
}
}
@@ -3600,21 +3651,11 @@ valid_fb:
intel_state->color_plane[0].stride =
intel_fb_pitch(fb, 0, intel_state->hw.rotation);
- intel_state->vma =
- intel_pin_and_fence_fb_obj(fb,
- &intel_state->view,
- intel_plane_uses_fence(intel_state),
- &intel_state->flags);
- if (IS_ERR(intel_state->vma)) {
- DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
- intel_crtc->pipe, PTR_ERR(intel_state->vma));
-
- intel_state->vma = NULL;
- drm_framebuffer_put(fb);
- return;
- }
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+ __i915_vma_pin(vma);
+ intel_state->vma = i915_vma_get(vma);
+ if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
+ if (vma->fence)
+ intel_state->flags |= PLANE_HAS_FENCE;
plane_state->src_x = 0;
plane_state->src_y = 0;
@@ -3633,9 +3674,13 @@ valid_fb:
dev_priv->preserve_bios_swizzle = true;
plane_state->fb = fb;
+ drm_framebuffer_get(fb);
+
plane_state->crtc = &intel_crtc->base;
intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
&to_intel_frontbuffer(fb)->bits);
}
@@ -3798,15 +3843,16 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
max_height = skl_max_plane_height();
if (w > max_width || h > max_height) {
- DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
return -EINVAL;
}
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
- if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
/*
@@ -3829,7 +3875,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
while ((x + w) * cpp > plane_state->color_plane[0].stride) {
if (offset == 0) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
@@ -3854,7 +3901,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (x != plane_state->color_plane[aux_plane].x ||
y != plane_state->color_plane[aux_plane].y) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
}
@@ -3875,6 +3923,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
int uv_plane = 1;
@@ -3892,8 +3941,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
/* FIXME not quite sure how/if these apply to the chroma plane */
if (w > max_width || h > max_height) {
- DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
+ drm_dbg_kms(&i915->drm,
+ "CbCr source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
return -EINVAL;
}
@@ -3922,7 +3972,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
if (x != plane_state->color_plane[ccs_plane].x ||
y != plane_state->color_plane[ccs_plane].y) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
}
@@ -4331,7 +4382,8 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+ plane_state->color_plane[0].stride);
if (INTEL_GEN(dev_priv) < 4) {
/*
@@ -4339,21 +4391,26 @@ static void i9xx_update_plane(struct intel_plane *plane,
* generator but let's assume we still need to
* program whatever is there.
*/
- I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(DSPSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(PRIMSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+ (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
- I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+ linear_offset);
+ intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+ (y << 16) | x);
}
/*
@@ -4361,15 +4418,13 @@ static void i9xx_update_plane(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE_FW(DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
else
- I915_WRITE_FW(DSPADDR(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -4396,11 +4451,11 @@ static void i9xx_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
else
- I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -4425,7 +4480,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- val = I915_READ(DSPCNTR(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
ret = val & DISPLAY_PLANE_ENABLE;
@@ -4444,10 +4499,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/*
@@ -4791,7 +4851,7 @@ __intel_display_resume(struct drm_device *dev,
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- WARN_ON(ret == -EDEADLK);
+ drm_WARN_ON(dev, ret == -EDEADLK);
return ret;
}
@@ -4819,7 +4879,8 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
- DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset potentially stuck, unbreaking through wedging\n");
intel_gt_set_wedged(&dev_priv->gt);
}
@@ -4843,13 +4904,15 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
state = drm_atomic_helper_duplicate_state(dev, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
- DRM_ERROR("Duplicating state failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+ ret);
return;
}
ret = drm_atomic_helper_disable_all(dev, ctx);
if (ret) {
- DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
drm_atomic_state_put(state);
return;
}
@@ -4878,7 +4941,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
/* for testing only restore the display */
ret = __intel_display_resume(dev, state, ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
@@ -4895,7 +4959,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
ret = __intel_display_resume(dev, state, ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
intel_hpd_init(dev_priv);
}
@@ -4915,7 +4980,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = I915_READ(PIPE_CHICKEN(pipe));
+ tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
/*
* Display WA #1153: icl
@@ -4930,7 +4995,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* across pipe
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
- I915_WRITE(PIPE_CHICKEN(pipe), tmp);
+ intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
@@ -4959,8 +5024,9 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state
/* Enable Transcoder Port Sync */
trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
- trans_ddi_func_ctl2_val);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
+ trans_ddi_func_ctl2_val);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
@@ -4973,7 +5039,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
/* enable normal train */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (IS_IVYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
@@ -4981,10 +5047,10 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
@@ -4992,16 +5058,16 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE;
}
- I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
/* wait one idle pattern time */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(1000);
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
- FDI_FE_ERRC_ENABLE);
+ intel_de_write(dev_priv, reg,
+ intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -5020,81 +5086,83 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
- I915_READ(reg);
+ intel_de_write(dev_priv, reg, temp);
+ intel_de_read(dev_priv, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
- FDI_RX_PHASE_SYNC_POINTER_EN);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
- DRM_DEBUG_KMS("FDI train 1 done.\n");
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
- DRM_ERROR("FDI train 1 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
break;
}
}
if (tries == 5)
- DRM_ERROR("FDI train 2 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
- DRM_DEBUG_KMS("FDI train done\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
}
@@ -5118,17 +5186,17 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -5136,13 +5204,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -5150,28 +5218,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done.\n");
break;
}
udelay(50);
@@ -5180,11 +5250,11 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- DRM_ERROR("FDI train 1 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
if (IS_GEN(dev_priv, 6)) {
@@ -5192,10 +5262,10 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -5203,28 +5273,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done.\n");
break;
}
udelay(50);
@@ -5233,9 +5305,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- DRM_ERROR("FDI train 2 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
- DRM_DEBUG_KMS("FDI train done.\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
}
/* Manual link training for Ivy Bridge A0 parts */
@@ -5251,111 +5323,117 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
- DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
/* Try each vswing and preemphasis setting twice before moving on */
for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
/* disable first in case we need to retry */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_TX_ENABLE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp &= ~FDI_RX_ENABLE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(1); /* should be 0.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK ||
- (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
- i);
+ (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done, level %i.\n",
+ i);
break;
}
udelay(1); /* should be 0.5us */
}
if (i == 4) {
- DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 fail on vswing %d\n", j / 2);
continue;
}
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK ||
- (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
- i);
+ (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done, level %i.\n",
+ i);
goto train_done;
}
udelay(2); /* should be 1.5us */
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 fail on vswing %d\n", j / 2);
}
train_done:
- DRM_DEBUG_KMS("FDI train done.\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
}
static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
@@ -5368,29 +5446,29 @@ static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- temp = I915_READ(reg);
- I915_WRITE(reg, temp | FDI_PCDCLK);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
}
@@ -5405,23 +5483,23 @@ static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_PCDCLK);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
/* Disable CPU FDI TX PLL */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
/* Wait for the clocks to turn off. */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
@@ -5434,32 +5512,33 @@ static void ilk_fdi_disable(struct intel_crtc *crtc)
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev_priv))
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -5469,10 +5548,10 @@ static void ilk_fdi_disable(struct intel_crtc *crtc)
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
@@ -5505,7 +5584,7 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
- I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
mutex_lock(&dev_priv->sb_lock);
@@ -5552,17 +5631,14 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
}
/* This should not happen with any sane values */
- WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
- ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
- ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
-
- DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- clock,
- auxdiv,
- divsel,
- phasedir,
- phaseinc);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ clock, auxdiv, divsel, phasedir, phaseinc);
mutex_lock(&dev_priv->sb_lock);
@@ -5592,7 +5668,7 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
/* Wait for initialization time */
udelay(24);
- I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
int lpt_get_iclkip(struct drm_i915_private *dev_priv)
@@ -5603,7 +5679,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
u32 desired_divisor;
u32 temp;
- if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
mutex_lock(&dev_priv->sb_lock);
@@ -5639,41 +5715,46 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
- I915_READ(HTOTAL(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
- I915_READ(HBLANK(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
- I915_READ(HSYNC(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
- I915_READ(VTOTAL(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
- I915_READ(VBLANK(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
- I915_READ(VSYNC(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- I915_READ(VSYNCSHIFT(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
}
static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
u32 temp;
- temp = I915_READ(SOUTH_CHICKEN1);
+ temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ FDI_RX_ENABLE);
temp &= ~FDI_BC_BIFURCATION_SELECT;
if (enable)
temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- POSTING_READ(SOUTH_CHICKEN1);
+ drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+ enable ? "en" : "dis");
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
+ intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
}
static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
@@ -5723,8 +5804,9 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
num_encoders++;
}
- WARN(num_encoders != 1, "%d encoders for pipe %c\n",
- num_encoders, pipe_name(crtc->pipe));
+ drm_WARN(encoder->base.dev, num_encoders != 1,
+ "%d encoders for pipe %c\n",
+ num_encoders, pipe_name(crtc->pipe));
return encoder;
}
@@ -5753,8 +5835,8 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
/* Write the TU size bits before fdi link training, so that error
* detection works. */
- I915_WRITE(FDI_RX_TUSIZE1(pipe),
- I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc, crtc_state);
@@ -5764,7 +5846,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
- temp = I915_READ(PCH_DPLL_SEL);
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (crtc_state->shared_dpll ==
@@ -5772,7 +5854,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
temp |= sel;
else
temp &= ~sel;
- I915_WRITE(PCH_DPLL_SEL, temp);
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
/* XXX: pch pll's can be enabled any time before we enable the PCH
@@ -5795,11 +5877,11 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
@@ -5812,17 +5894,16 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
port = intel_get_crtc_new_encoder(state, crtc_state)->port;
- WARN_ON(port < PORT_B || port > PORT_D);
+ drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
temp |= TRANS_DP_PORT_SEL(port);
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
}
ilk_enable_pch_transcoder(crtc_state);
}
-static void lpt_pch_enable(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
+void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5844,11 +5925,13 @@ static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
- temp = I915_READ(dslreg);
+ temp = intel_de_read(dev_priv, dslreg);
udelay(500);
- if (wait_for(I915_READ(dslreg) != temp, 5)) {
- if (wait_for(I915_READ(dslreg) != temp, 5))
- DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+ if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
+ if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
+ drm_err(&dev_priv->drm,
+ "mode set failed: pipe %c stuck\n",
+ pipe_name(pipe));
}
}
@@ -5963,7 +6046,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -5982,10 +6066,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
- DRM_DEBUG_KMS("scaler_user index %u.%u: "
- "Staged freeing scaler id %d scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, *scaler_id,
- scaler_state->scaler_users);
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: "
+ "Staged freeing scaler id %d scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, *scaler_id,
+ scaler_state->scaler_users);
*scaler_id = -1;
}
return 0;
@@ -5993,7 +6078,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Planar YUV: src dimensions not met\n");
return -EINVAL;
}
@@ -6006,18 +6092,20 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
(INTEL_GEN(dev_priv) < 11 &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
- DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
- "size is out of scaler range\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "size is out of scaler range\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h,
+ dst_w, dst_h);
return -EINVAL;
}
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
- DRM_DEBUG_KMS("scaler_user index %u.%u: "
- "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
- scaler_state->scaler_users);
+ drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ scaler_state->scaler_users);
return 0;
}
@@ -6036,7 +6124,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
bool need_scaler = false;
- if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ state->pch_pfit.enabled)
need_scaler = true;
return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
@@ -6088,9 +6177,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* check colorkey */
if (plane_state->ckey.flags) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
- intel_plane->base.base.id,
- intel_plane->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
return -EINVAL;
}
@@ -6128,9 +6218,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
break;
/* fall through */
default:
- DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, intel_plane->base.name,
- fb->base.id, fb->format->format);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->format->format);
return -EINVAL;
}
@@ -6157,9 +6248,11 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
if (crtc_state->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
+ unsigned long irqflags;
int id;
- if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
return;
pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
@@ -6172,14 +6265,21 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id;
- I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
- PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
- I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
- I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+ PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ crtc_state->pch_pfit.pos);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ crtc_state->pch_pfit.size);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
}
@@ -6195,12 +6295,15 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
- PF_PIPE_SEL_IVB(pipe));
+ intel_de_write(dev_priv, PF_CTL(pipe),
+ PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
else
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
+ intel_de_write(dev_priv, PF_CTL(pipe),
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe),
+ crtc_state->pch_pfit.pos);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe),
+ crtc_state->pch_pfit.size);
}
}
@@ -6218,25 +6321,26 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* This function is called from post_plane_update, which is run after
* a vblank wait.
*/
- WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+ drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
- IPS_ENABLE | IPS_PCODE_CONTROL));
+ drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
* so we need to just enable it and continue on.
*/
} else {
- I915_WRITE(IPS_CTL, IPS_ENABLE);
+ intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
/* The bit only becomes 1 in the next vblank, so this wait here
* is essentially intel_wait_for_vblank. If we don't have this
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
- DRM_ERROR("Timed out waiting for IPS enable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for IPS enable\n");
}
}
@@ -6250,17 +6354,19 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
return;
if (IS_BROADWELL(dev_priv)) {
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+ drm_WARN_ON(dev,
+ sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
- DRM_ERROR("Timed out waiting for IPS disable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for IPS disable\n");
} else {
- I915_WRITE(IPS_CTL, 0);
- POSTING_READ(IPS_CTL);
+ intel_de_write(dev_priv, IPS_CTL, 0);
+ intel_de_posting_read(dev_priv, IPS_CTL);
}
/* We need to wait for a vblank before we can disable the plane. */
@@ -6382,13 +6488,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(state, primary);
enum pipe pipe = crtc->pipe;
intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
@@ -6399,8 +6502,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
hsw_enable_ips(new_crtc_state);
- if (new_primary_state)
- intel_fbc_post_update(crtc);
+ intel_fbc_post_update(state, crtc);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
@@ -6415,20 +6517,16 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(state, primary);
enum pipe pipe = crtc->pipe;
if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
hsw_disable_ips(old_crtc_state);
- if (new_primary_state &&
- intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
+ if (intel_fbc_pre_update(state, crtc))
intel_wait_for_vblank(dev_priv, pipe);
/* Display WA 827 */
@@ -6770,7 +6868,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
/*
@@ -6863,7 +6961,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
enum pipe pipe, bool apply)
{
- u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
+ u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
if (apply)
@@ -6871,7 +6969,7 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
else
val &= ~mask;
- I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
}
static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
@@ -6890,7 +6988,17 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
val |= MBUS_DBOX_B_CREDIT(8);
}
- I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
+ intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
+}
+
+static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
+ HSW_LINETIME(crtc_state->linetime) |
+ HSW_IPS_LINETIME(crtc_state->ips_linetime));
}
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
@@ -6900,10 +7008,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
val |= HSW_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
static void hsw_crtc_enable(struct intel_atomic_state *state,
@@ -6916,7 +7024,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
bool psl_clkgate_wa;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
intel_encoders_pre_pll_enable(state, crtc);
@@ -6926,9 +7034,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_enable(state, crtc);
- if (intel_crtc_has_dp_encoder(new_crtc_state))
- intel_dp_set_m_n(new_crtc_state, M1_N1);
-
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(new_crtc_state);
@@ -6939,8 +7044,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(cpu_transcoder))
- I915_WRITE(PIPE_MULT(cpu_transcoder),
- new_crtc_state->pixel_multiplier - 1);
+ intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ new_crtc_state->pixel_multiplier - 1);
if (new_crtc_state->has_pch_encoder)
intel_cpu_transcoder_set_m_n(new_crtc_state,
@@ -6977,6 +7082,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) < 9)
intel_disable_primary_plane(new_crtc_state);
+ hsw_set_linetime_wm(new_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
@@ -6989,15 +7096,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 11)
icl_pipe_mbus_enable(crtc);
- /* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_enable_pipe(new_crtc_state);
-
- if (new_crtc_state->has_pch_encoder)
- lpt_pch_enable(state, new_crtc_state);
-
- intel_crtc_vblank_on(new_crtc_state);
-
intel_encoders_enable(state, crtc);
if (psl_clkgate_wa) {
@@ -7023,9 +7121,9 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (old_crtc_state->pch_pfit.enabled) {
- I915_WRITE(PF_CTL(pipe), 0);
- I915_WRITE(PF_WIN_POS(pipe), 0);
- I915_WRITE(PF_WIN_SZ(pipe), 0);
+ intel_de_write(dev_priv, PF_CTL(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
}
}
@@ -7067,16 +7165,16 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_PORT_SEL_MASK);
temp |= TRANS_DP_PORT_SEL_NONE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
/* disable DPLL_SEL */
- temp = I915_READ(PCH_DPLL_SEL);
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- I915_WRITE(PCH_DPLL_SEL, temp);
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
ilk_fdi_pll_disable(crtc);
@@ -7109,15 +7207,17 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
* The panel fitter should only be adjusted whilst the pipe is disabled,
* according to register description and PRM.
*/
- WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
- I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
- I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
+ intel_de_write(dev_priv, PFIT_PGM_RATIOS,
+ crtc_state->gmch_pfit.pgm_ratios);
+ intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
/* Border color in case we don't scale up to the full screen. Black by
* default, change to something else for debugging. */
- I915_WRITE(BCLRPAT(crtc->pipe), 0);
+ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
}
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
@@ -7304,7 +7404,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
if (intel_crtc_has_dp_encoder(new_crtc_state))
@@ -7314,8 +7414,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
- I915_WRITE(CHV_CANVAS(pipe), 0);
+ intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+ intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
}
i9xx_set_pipeconf(new_crtc_state);
@@ -7356,8 +7456,10 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
+ intel_de_write(dev_priv, FP0(crtc->pipe),
+ crtc_state->dpll_hw_state.fp0);
+ intel_de_write(dev_priv, FP1(crtc->pipe),
+ crtc_state->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct intel_atomic_state *state,
@@ -7368,7 +7470,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
i9xx_set_pll_dividers(new_crtc_state);
@@ -7418,9 +7520,9 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
- DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
- I915_READ(PFIT_CONTROL));
- I915_WRITE(PFIT_CONTROL, 0);
+ drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
+ intel_de_read(dev_priv, PFIT_CONTROL));
+ intel_de_write(dev_priv, PFIT_CONTROL, 0);
}
static void i9xx_crtc_disable(struct intel_atomic_state *state,
@@ -7477,6 +7579,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(dev_priv->bw_obj.state);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(dev_priv->cdclk.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
@@ -7500,8 +7604,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
- DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.base.id, crtc->base.name);
return;
}
@@ -7511,19 +7616,21 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
ret = drm_atomic_add_affected_connectors(state, &crtc->base);
- WARN_ON(IS_ERR(temp_crtc_state) || ret);
+ drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
- DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.base.id, crtc->base.name);
crtc->active = false;
crtc->base.enabled = false;
- WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ drm_WARN_ON(&dev_priv->drm,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
crtc_state->uapi.active = false;
crtc_state->uapi.connector_mask = 0;
crtc_state->uapi.encoder_mask = 0;
@@ -7543,8 +7650,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
crtc->enabled_power_domains = 0;
dev_priv->active_pipes &= ~BIT(pipe);
- dev_priv->min_cdclk[pipe] = 0;
- dev_priv->min_voltage_level[pipe] = 0;
+ cdclk_state->min_cdclk[pipe] = 0;
+ cdclk_state->min_voltage_level[pipe] = 0;
+ cdclk_state->active_pipes &= ~BIT(pipe);
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
@@ -7563,7 +7671,8 @@ int intel_display_suspend(struct drm_device *dev)
state = drm_atomic_helper_suspend(dev);
ret = PTR_ERR_OR_ZERO(state);
if (ret)
- DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
else
dev_priv->modeset_restore_state = state;
return ret;
@@ -7583,13 +7692,13 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.base.id,
- connector->base.name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
if (connector->get_hw_state(connector)) {
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
I915_STATE_WARN(!crtc_state,
"connector enabled without attached crtc\n");
@@ -7632,18 +7741,21 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
- DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
- DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
- pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
return -EINVAL;
} else {
return 0;
@@ -7668,15 +7780,17 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
return 0;
case PIPE_C:
if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
@@ -7687,7 +7801,8 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "fdi link B uses too many lanes to enable link C\n");
return -EINVAL;
}
return 0;
@@ -7701,6 +7816,7 @@ static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock, ret;
bool needs_recompute = false;
@@ -7713,7 +7829,7 @@ retry:
* Hence the bw of each lane in terms of the mode signal
* is:
*/
- link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
+ link_bw = intel_fdi_link_freq(i915, pipe_config);
fdi_dotclock = adjusted_mode->crtc_clock;
@@ -7731,8 +7847,9 @@ retry:
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
- DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
needs_recompute = true;
pipe_config->bw_constrained = true;
@@ -7774,15 +7891,17 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
return true;
}
-static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
+static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->uapi.crtc->dev);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->uapi.state);
+ crtc_state->ips_enabled = false;
+
if (!hsw_crtc_state_ips_capable(crtc_state))
- return false;
+ return 0;
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -7791,18 +7910,27 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
* completely disable it.
*/
if (crtc_state->crc_enabled)
- return false;
+ return 0;
/* IPS should be fine as long as at least one plane is enabled. */
if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
- return false;
+ return 0;
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) &&
- crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
- return false;
+ if (IS_BROADWELL(dev_priv)) {
+ const struct intel_cdclk_state *cdclk_state;
- return true;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ return 0;
+ }
+
+ crtc_state->ips_enabled = true;
+
+ return 0;
}
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
@@ -7884,9 +8012,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
if (adjusted_mode->crtc_clock > clock_limit) {
- DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
- adjusted_mode->crtc_clock, clock_limit,
- yesno(pipe_config->double_wide));
+ drm_dbg_kms(&dev_priv->drm,
+ "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ adjusted_mode->crtc_clock, clock_limit,
+ yesno(pipe_config->double_wide));
return -EINVAL;
}
@@ -7898,7 +8027,8 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* for output conversion from RGB->YCBCR. So if CTM is already
* applied we can't support YCBCR420 output.
*/
- DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR420 and CTM together are not possible\n");
return -EINVAL;
}
@@ -7910,13 +8040,15 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
*/
if (pipe_config->pipe_src_w & 1) {
if (pipe_config->double_wide) {
- DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Odd pipe source width not supported with double wide pipe\n");
return -EINVAL;
}
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev_priv)) {
- DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Odd pipe source width not supported with dual link LVDS\n");
return -EINVAL;
}
}
@@ -7997,13 +8129,15 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
* indicates as much.
*/
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
+ bool bios_lvds_use_ssc = intel_de_read(dev_priv,
+ PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
- enableddisabled(bios_lvds_use_ssc),
- enableddisabled(dev_priv->vbt.lvds_use_ssc));
+ drm_dbg_kms(&dev_priv->drm,
+ "SSC %s by BIOS, overriding VBT which says %s\n",
+ enableddisabled(bios_lvds_use_ssc),
+ enableddisabled(dev_priv->vbt.lvds_use_ssc));
dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
@@ -8090,10 +8224,11 @@ static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
- I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
- I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+ intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+ intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+ intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
}
static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
@@ -8119,33 +8254,42 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
enum transcoder transcoder = crtc_state->cpu_transcoder;
if (INTEL_GEN(dev_priv) >= 5) {
- I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
- I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
- I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
+ m_n->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
+ m_n->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
+ m_n->link_n);
/*
* M2_N2 registers are set only if DRRS is supported
* (to make sure the registers are not unnecessarily accessed).
*/
if (m2_n2 && crtc_state->has_drrs &&
transcoder_has_m2_n2(dev_priv, transcoder)) {
- I915_WRITE(PIPE_DATA_M2(transcoder),
- TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
- I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
- I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
- I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
+ TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
+ m2_n2->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
+ m2_n2->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
+ m2_n2->link_n);
}
} else {
- I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
- I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
- I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
}
}
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
{
const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (m_n == M1_N1) {
dp_m_n = &crtc_state->dp_m_n;
@@ -8158,7 +8302,7 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
*/
dp_m_n = &crtc_state->dp_m2_n2;
} else {
- DRM_ERROR("Unsupported divider value\n");
+ drm_err(&i915->drm, "Unsupported divider value\n");
return;
}
@@ -8212,9 +8356,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
u32 coreclk, reg_val;
/* Enable Refclk */
- I915_WRITE(DPLL(pipe),
- pipe_config->dpll_hw_state.dpll &
- ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
/* No need to actually set up the DPLL with DSI */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
@@ -8314,8 +8457,8 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
int vco;
/* Enable Refclk and SSC */
- I915_WRITE(DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
/* No need to actually set up the DPLL with DSI */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
@@ -8614,27 +8757,22 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
}
if (INTEL_GEN(dev_priv) > 3)
- I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
-
- I915_WRITE(HTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(cpu_transcoder),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(cpu_transcoder),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(cpu_transcoder),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(cpu_transcoder),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
+ vsyncshift);
+
+ intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ intel_de_write(dev_priv, HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
@@ -8642,7 +8780,8 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
* bits. */
if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, VTOTAL(pipe),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
}
@@ -8655,9 +8794,8 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
- I915_WRITE(PIPESRC(pipe),
- ((crtc_state->pipe_src_w - 1) << 16) |
- (crtc_state->pipe_src_h - 1));
+ intel_de_write(dev_priv, PIPESRC(pipe),
+ ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -8670,9 +8808,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
else
- return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+ return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -8683,33 +8821,33 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 tmp;
- tmp = I915_READ(HTOTAL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = I915_READ(HBLANK(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hblank_start =
(tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_hblank_end =
((tmp >> 16) & 0xffff) + 1;
}
- tmp = I915_READ(HSYNC(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
- tmp = I915_READ(VTOTAL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = I915_READ(VBLANK(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vblank_start =
(tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vblank_end =
((tmp >> 16) & 0xffff) + 1;
}
- tmp = I915_READ(VSYNC(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
@@ -8727,7 +8865,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
- tmp = I915_READ(PIPESRC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
@@ -8768,7 +8906,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
+ pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
if (crtc_state->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
@@ -8815,8 +8953,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
- POSTING_READ(PIPECONF(crtc->pipe));
+ intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
+ intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
}
static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -8833,7 +8971,9 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &intel_limits_i8xx_lvds;
@@ -8846,7 +8986,8 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8868,7 +9009,9 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
if (intel_is_dual_link_lvds(dev_priv))
@@ -8888,7 +9031,8 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8911,7 +9055,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &pnv_limits_lvds;
@@ -8922,7 +9068,8 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8945,7 +9092,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &intel_limits_i9xx_lvds;
@@ -8956,7 +9105,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8970,6 +9120,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_chv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8977,7 +9128,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8991,6 +9142,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_vlv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8998,7 +9150,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -9025,7 +9177,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
if (!i9xx_has_pfit(dev_priv))
return;
- tmp = I915_READ(PFIT_CONTROL);
+ tmp = intel_de_read(dev_priv, PFIT_CONTROL);
if (!(tmp & PFIT_ENABLE))
return;
@@ -9039,7 +9191,8 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
}
pipe_config->gmch_pfit.control = tmp;
- pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+ pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
+ PFIT_PGM_RATIOS);
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9087,11 +9240,11 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- WARN_ON(pipe != crtc->pipe);
+ drm_WARN_ON(dev, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- DRM_DEBUG_KMS("failed to alloc fb\n");
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
return;
}
@@ -9099,7 +9252,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
- val = I915_READ(DSPCNTR(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
@@ -9120,34 +9273,37 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->format = drm_format_info(fourcc);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = I915_READ(DSPOFFSET(i9xx_plane));
- base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+ offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
} else if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
- offset = I915_READ(DSPTILEOFF(i9xx_plane));
+ offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i9xx_plane));
else
- offset = I915_READ(DSPLINOFF(i9xx_plane));
- base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+ offset = intel_de_read(dev_priv,
+ DSPLINOFF(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
} else {
- base = I915_READ(DSPADDR(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
}
plane_config->base = base;
- val = I915_READ(PIPESRC(pipe));
+ val = intel_de_read(dev_priv, PIPESRC(pipe));
fb->width = ((val >> 16) & 0xfff) + 1;
fb->height = ((val >> 0) & 0xfff) + 1;
- val = I915_READ(DSPSTRIDE(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
plane_config->fb = intel_fb;
}
@@ -9192,11 +9348,12 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
if (tmp & PIPEMISC_YUV420_ENABLE) {
/* We support 4:2:0 in full blend mode only */
- WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
@@ -9214,7 +9371,7 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 tmp;
- tmp = I915_READ(DSPCNTR(i9xx_plane));
+ tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
if (tmp & DISPPLANE_GAMMA_ENABLE)
crtc_state->gamma_enable = true;
@@ -9245,7 +9402,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = false;
- tmp = I915_READ(PIPECONF(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
goto out;
@@ -9274,7 +9431,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
PIPECONF_GAMMA_MODE_SHIFT;
if (IS_CHERRYVIEW(dev_priv))
- pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+ pipe_config->cgm_mode = intel_de_read(dev_priv,
+ CGM_PIPE_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
@@ -9292,14 +9450,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
else
- tmp = I915_READ(DPLL_MD(crtc->pipe));
+ tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- tmp = I915_READ(DPLL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
>> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
@@ -9309,10 +9467,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
- pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+ pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
+ DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
- pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+ pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
+ FP0(crtc->pipe));
+ pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
+ FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
@@ -9381,8 +9542,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- u32 temp = I915_READ(PCH_DPLL(i));
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
continue;
@@ -9394,15 +9555,16 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
- has_panel, has_lvds, has_ck505, using_ssc_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- val = I915_READ(PCH_DREF_CONTROL);
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
/* As we must carefully and slowly disable/enable each source in turn,
* compute the final state we want first and check if we need to
@@ -9454,14 +9616,14 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- DRM_DEBUG_KMS("Using SSC on panel\n");
+ drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else
val &= ~DREF_SSC1_ENABLE;
/* Get SSC going before enabling the outputs */
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -9469,30 +9631,31 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- DRM_DEBUG_KMS("Using SSC on eDP\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
} else {
- DRM_DEBUG_KMS("Disabling CPU source output\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Turn off CPU output */
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
if (!using_ssc_source) {
- DRM_DEBUG_KMS("Disabling SSC source\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
/* Turn off the SSC source */
val &= ~DREF_SSC_SOURCE_MASK;
@@ -9501,8 +9664,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Turn off SSC1 */
val &= ~DREF_SSC1_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
}
}
@@ -9514,21 +9677,21 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
- if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+ if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- DRM_ERROR("FDI mPHY reset assert timeout\n");
+ drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
- tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
- if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+ if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
@@ -9617,10 +9780,11 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
{
u32 reg, tmp;
- if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+ if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ "FDI requires downspread\n"))
with_spread = true;
- if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
- with_fdi, "LP PCH doesn't have FDI\n"))
+ if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
@@ -9714,10 +9878,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
u32 tmp;
int idx = BEND_IDX(steps);
- if (WARN_ON(steps % 5 != 0))
+ if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
return;
- if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
mutex_lock(&dev_priv->sb_lock);
@@ -9740,8 +9904,8 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
{
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 ctl = I915_READ(SPLL_CTL);
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
@@ -9760,8 +9924,8 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 ctl = I915_READ(WRPLL_CTL(id));
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
@@ -9810,17 +9974,17 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
dev_priv->pch_ssc_use = 0;
if (spll_uses_pch_ssc(dev_priv)) {
- DRM_DEBUG_KMS("SPLL using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
@@ -9885,8 +10049,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* This would end up with an odd purple hue over
* the entire display. Make sure we don't do it.
*/
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
@@ -9898,8 +10062,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(PIPECONF(pipe), val);
- POSTING_READ(PIPECONF(pipe));
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
}
static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
@@ -9921,8 +10085,8 @@ static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
- I915_WRITE(PIPECONF(cpu_transcoder), val);
- POSTING_READ(PIPECONF(cpu_transcoder));
+ intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
}
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
@@ -9965,7 +10129,10 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
BIT(PLANE_CURSOR))) == 0)
val |= PIPEMISC_HDR_MODE_PRECISION;
- I915_WRITE(PIPEMISC(crtc->pipe), val);
+ if (INTEL_GEN(dev_priv) >= 12)
+ val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+
+ intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
}
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
@@ -9973,7 +10140,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
case PIPEMISC_DITHER_6_BPC:
@@ -10126,8 +10293,9 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
refclk = dev_priv->vbt.lvds_ssc_freq;
}
@@ -10149,15 +10317,17 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
ilk_compute_dpll(crtc, crtc_state, NULL);
if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
- DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
}
@@ -10171,12 +10341,12 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
- m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
- m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
- m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+ m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
+ m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
+ m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
- m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+ m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
+ m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
@@ -10189,30 +10359,38 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
if (INTEL_GEN(dev_priv) >= 5) {
- m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
- m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
- m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+ m_n->link_m = intel_de_read(dev_priv,
+ PIPE_LINK_M1(transcoder));
+ m_n->link_n = intel_de_read(dev_priv,
+ PIPE_LINK_N1(transcoder));
+ m_n->gmch_m = intel_de_read(dev_priv,
+ PIPE_DATA_M1(transcoder))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
- m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+ m_n->gmch_n = intel_de_read(dev_priv,
+ PIPE_DATA_N1(transcoder));
+ m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
- m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
- m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
- m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
+ m2_n2->link_m = intel_de_read(dev_priv,
+ PIPE_LINK_M2(transcoder));
+ m2_n2->link_n = intel_de_read(dev_priv,
+ PIPE_LINK_N2(transcoder));
+ m2_n2->gmch_m = intel_de_read(dev_priv,
+ PIPE_DATA_M2(transcoder))
& ~TU_SIZE_MASK;
- m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
- m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
+ m2_n2->gmch_n = intel_de_read(dev_priv,
+ PIPE_DATA_N2(transcoder));
+ m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
} else {
- m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
- m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
- m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+ m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
+ m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
+ m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
- m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+ m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
+ m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
}
@@ -10247,12 +10425,14 @@ static void skl_get_pfit_config(struct intel_crtc *crtc,
/* find scaler attached to this pipe */
for (i = 0; i < crtc->num_scalers; i++) {
- ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
+ ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
id = i;
pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
- pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+ pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
+ SKL_PS_WIN_POS(crtc->pipe, i));
+ pipe_config->pch_pfit.size = intel_de_read(dev_priv,
+ SKL_PS_WIN_SZ(crtc->pipe, i));
scaler_state->scalers[i].in_use = true;
break;
}
@@ -10284,11 +10464,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- WARN_ON(pipe != crtc->pipe);
+ drm_WARN_ON(dev, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- DRM_DEBUG_KMS("failed to alloc fb\n");
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
return;
}
@@ -10296,7 +10476,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
- val = I915_READ(PLANE_CTL(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
if (INTEL_GEN(dev_priv) >= 11)
pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
@@ -10304,7 +10484,8 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & PLANE_CTL_FORMAT_MASK;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
+ alpha = intel_de_read(dev_priv,
+ PLANE_COLOR_CTL(pipe, plane_id));
alpha &= PLANE_COLOR_ALPHA_MASK;
} else {
alpha = val & PLANE_CTL_ALPHA_MASK;
@@ -10368,16 +10549,16 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
val & PLANE_CTL_FLIP_HORIZONTAL)
plane_config->rotation |= DRM_MODE_REFLECT_X;
- base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
+ base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
- offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
+ offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
- val = I915_READ(PLANE_SIZE(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
fb->height = ((val >> 16) & 0xffff) + 1;
fb->width = ((val >> 0) & 0xffff) + 1;
- val = I915_READ(PLANE_STRIDE(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
@@ -10385,10 +10566,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
plane_config->fb = intel_fb;
return;
@@ -10404,19 +10586,21 @@ static void ilk_get_pfit_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
- tmp = I915_READ(PF_CTL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
if (tmp & PF_ENABLE) {
pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
- pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+ pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
+ PF_WIN_POS(crtc->pipe));
+ pipe_config->pch_pfit.size = intel_de_read(dev_priv,
+ PF_WIN_SZ(crtc->pipe));
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
if (IS_GEN(dev_priv, 7)) {
- WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
- PF_PIPE_SEL_IVB(crtc->pipe));
+ drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
+ PF_PIPE_SEL_IVB(crtc->pipe));
}
}
}
@@ -10441,7 +10625,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
- tmp = I915_READ(PIPECONF(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
goto out;
@@ -10478,18 +10662,19 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
PIPECONF_GAMMA_MODE_SHIFT;
- pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+ pipe_config->csc_mode = intel_de_read(dev_priv,
+ PIPE_CSC_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
- if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
+ if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
pipe_config->has_pch_encoder = true;
- tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
@@ -10502,7 +10687,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
*/
pll_id = (enum intel_dpll_id) crtc->pipe;
} else {
- tmp = I915_READ(PCH_DPLL_SEL);
+ tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
pll_id = DPLL_ID_PCH_PLL_B;
else
@@ -10513,8 +10698,8 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_shared_dpll_by_id(dev_priv, pll_id);
pll = pipe_config->shared_dpll;
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
@@ -10552,8 +10737,9 @@ static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
}
}
@@ -10567,10 +10753,10 @@ static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
enum intel_dpll_id id;
u32 temp;
- temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
+ if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10585,24 +10771,25 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
u32 temp;
if (intel_phy_is_combo(dev_priv, phy)) {
- temp = I915_READ(ICL_DPCLKA_CFGCR0) &
+ temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
} else if (intel_phy_is_tc(dev_priv, phy)) {
- u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+ u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
if (clk_sel == DDI_CLK_SEL_MG) {
id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
port));
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
} else {
- WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+ drm_WARN_ON(&dev_priv->drm,
+ clk_sel < DDI_CLK_SEL_TBT_162);
id = DPLL_ID_ICL_TBTPLL;
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
}
} else {
- WARN(1, "Invalid port %x\n", port);
+ drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
return;
}
@@ -10629,7 +10816,7 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
id = DPLL_ID_SKL_DPLL2;
break;
default:
- DRM_ERROR("Incorrect port type\n");
+ drm_err(&dev_priv->drm, "Incorrect port type\n");
return;
}
@@ -10642,10 +10829,10 @@ static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
enum intel_dpll_id id;
u32 temp;
- temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+ temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
id = temp >> (port * 3 + 1);
- if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
+ if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10655,7 +10842,7 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
- u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+ u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
switch (ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
@@ -10723,7 +10910,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
bool force_thru = false;
enum pipe trans_pipe;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(panel_transcoder));
if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
@@ -10738,8 +10926,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to transcoder %s\n",
- transcoder_name(panel_transcoder));
+ drm_WARN(dev, 1,
+ "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
force_thru = true;
@@ -10767,11 +10956,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
/*
* Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
*/
- WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
- enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+ drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10780,7 +10969,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
- tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
}
@@ -10805,7 +10994,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
cpu_transcoder = TRANSCODER_DSI_C;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10825,11 +11014,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
break;
/* XXX: this works for video mode only */
- tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
+ tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
if (!(tmp & DPI_ENABLE))
continue;
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
continue;
@@ -10853,7 +11042,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
port = (cpu_transcoder == TRANSCODER_DSI_A) ?
PORT_A : PORT_B;
} else {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 12)
port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
else
@@ -10873,7 +11063,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
pll = pipe_config->shared_dpll;
if (pll) {
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+ drm_WARN_ON(&dev_priv->drm,
+ !pll->info->funcs->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
}
@@ -10883,10 +11074,10 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
* the PCH transcoder is on.
*/
if (INTEL_GEN(dev_priv) < 9 &&
- (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
- tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
@@ -10899,7 +11090,8 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr
{
u32 trans_port_sync, master_select;
- trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+ trans_port_sync = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder));
if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -10943,8 +11135,9 @@ static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
intel_display_power_put(dev_priv, power_domain, trans_wakeref);
}
- WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
+ drm_WARN_ON(&dev_priv->drm,
+ crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
}
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
@@ -10955,6 +11148,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain power_domain;
u64 power_domain_mask;
bool active;
+ u32 tmp;
pipe_config->master_transcoder = INVALID_TRANSCODER;
@@ -10974,7 +11168,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (IS_GEN9_LP(dev_priv) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config,
&power_domain_mask, wakerefs)) {
- WARN_ON(active);
+ drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
@@ -10990,7 +11184,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_src_size(crtc, pipe_config);
if (IS_HASWELL(dev_priv)) {
- u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+ u32 tmp = intel_de_read(dev_priv,
+ PIPECONF(pipe_config->cpu_transcoder));
if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
@@ -11013,12 +11208,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
}
- pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+ pipe_config->gamma_mode = intel_de_read(dev_priv,
+ GAMMA_MODE(crtc->pipe));
- pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+ pipe_config->csc_mode = intel_de_read(dev_priv,
+ PIPE_CSC_MODE(crtc->pipe));
if (INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+ tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
pipe_config->gamma_enable = true;
@@ -11031,8 +11228,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
+ tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
+ pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ pipe_config->ips_linetime =
+ REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
+
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wf) {
@@ -11047,7 +11250,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (hsw_crtc_supports_ips(crtc)) {
if (IS_HASWELL(dev_priv))
- pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
+ pipe_config->ips_enabled = intel_de_read(dev_priv,
+ IPS_CTL) & IPS_ENABLE;
else {
/*
* We cannot readout IPS state on broadwell, set to
@@ -11061,7 +11265,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
- I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ intel_de_read(dev_priv,
+ PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -11150,7 +11355,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
plane_state, 0);
if (src_x != 0 || src_y != 0) {
- DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Arbitrary cursor panning not supported\n");
return -EINVAL;
}
@@ -11181,10 +11387,11 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
int ret;
if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
return -EINVAL;
}
@@ -11255,6 +11462,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
int ret;
ret = intel_check_cursor(crtc_state, plane_state);
@@ -11267,14 +11475,15 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
- DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
+ drm_dbg_kms(&i915->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
switch (fb->pitches[0]) {
case 256:
@@ -11283,8 +11492,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
case 2048:
break;
default:
- DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
- fb->pitches[0]);
+ drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
return -EINVAL;
}
@@ -11322,17 +11531,17 @@ static void i845_update_cursor(struct intel_plane *plane,
if (plane->cursor.base != base ||
plane->cursor.size != size ||
plane->cursor.cntl != cntl) {
- I915_WRITE_FW(CURCNTR(PIPE_A), 0);
- I915_WRITE_FW(CURBASE(PIPE_A), base);
- I915_WRITE_FW(CURSIZE, size);
- I915_WRITE_FW(CURPOS(PIPE_A), pos);
- I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+ intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+ intel_de_write_fw(dev_priv, CURSIZE, size);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
plane->cursor.base = base;
plane->cursor.size = size;
plane->cursor.cntl = cntl;
} else {
- I915_WRITE_FW(CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -11357,7 +11566,7 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+ ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
*pipe = PIPE_A;
@@ -11483,20 +11692,22 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
- DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
+ drm_dbg(&dev_priv->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
return -EINVAL;
}
@@ -11512,7 +11723,8 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -11573,17 +11785,18 @@ static void i9xx_update_cursor(struct intel_plane *plane,
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
if (HAS_CUR_FBC(dev_priv))
- I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
- I915_WRITE_FW(CURCNTR(pipe), cntl);
- I915_WRITE_FW(CURPOS(pipe), pos);
- I915_WRITE_FW(CURBASE(pipe), base);
+ intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+ fbc_ctl);
+ intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
plane->cursor.base = base;
plane->cursor.size = fbc_ctl;
plane->cursor.cntl = cntl;
} else {
- I915_WRITE_FW(CURPOS(pipe), pos);
- I915_WRITE_FW(CURBASE(pipe), base);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -11614,7 +11827,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- val = I915_READ(CURCNTR(plane->pipe));
+ val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
ret = val & MCURSOR_MODE;
@@ -11700,13 +11913,13 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_crtc_state *crtc_state;
int ret, i = -1;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, connector->name,
- encoder->base.id, encoder->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
old->restore_state = NULL;
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+ drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
/*
* Algorithm gets a little messy:
@@ -11753,7 +11966,8 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
* If we didn't find an unused CRTC, don't use any.
*/
if (!crtc) {
- DRM_DEBUG_KMS("no pipe available for load-detect\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no pipe available for load-detect\n");
ret = -ENODEV;
goto fail;
}
@@ -11804,13 +12018,16 @@ found:
if (!ret)
ret = drm_atomic_add_affected_planes(restore_state, crtc);
if (ret) {
- DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to create a copy of old state to restore: %i\n",
+ ret);
goto fail;
}
ret = drm_atomic_commit(state);
if (ret) {
- DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to set mode on load-detect pipe\n");
goto fail;
}
@@ -11843,20 +12060,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
{
struct intel_encoder *intel_encoder =
intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_atomic_state *state = old->restore_state;
int ret;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, connector->name,
- encoder->base.id, encoder->name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
if (!state)
return;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
if (ret)
- DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Couldn't release load detect pipe: %i\n", ret);
drm_atomic_state_put(state);
}
@@ -11921,8 +12140,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7 : 14;
break;
default:
- DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
- "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
return;
}
@@ -11931,7 +12151,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
+ u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
+ LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
@@ -12142,7 +12363,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
was_visible = old_plane_state->uapi.visible;
visible = plane_state->uapi.visible;
- if (!was_crtc_enabled && WARN_ON(was_visible))
+ if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
was_visible = false;
/*
@@ -12168,11 +12389,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- crtc->base.base.id, crtc->base.name,
- plane->base.base.id, plane->base.name,
- was_visible, visible,
- turn_off, turn_on, mode_changed);
+ drm_dbg_atomic(&dev_priv->drm,
+ "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ was_visible, visible,
+ turn_off, turn_on, mode_changed);
if (turn_on) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@@ -12349,8 +12571,9 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
}
if (!linked_state) {
- DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
- hweight8(crtc_state->nv12_planes));
+ drm_dbg_kms(&dev_priv->drm,
+ "Need %d free Y planes for planar YUV\n",
+ hweight8(crtc_state->nv12_planes));
return -EINVAL;
}
@@ -12361,7 +12584,8 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->planar_linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
- DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+ drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
+ linked->base.name, plane->base.name);
/* Copy parameters to slave plane */
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
@@ -12398,120 +12622,74 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static bool
-intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
+static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc)
- continue;
- if (connector->has_tile &&
- connector->tile_h_loc == connector->num_h_tile - 1 &&
- connector->tile_v_loc == connector->num_v_tile - 1)
- return true;
- }
+ if (!crtc_state->hw.enable)
+ return 0;
- return false;
+ return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ adjusted_mode->crtc_clock);
}
-static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
+static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_cdclk_state *cdclk_state)
{
- crtc_state->master_transcoder = INVALID_TRANSCODER;
- crtc_state->sync_mode_slaves_mask = 0;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ cdclk_state->logical.cdclk);
}
-static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
- struct intel_crtc_state *crtc_state,
- int num_tiled_conns)
+static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct drm_connector *master_connector;
- struct drm_connector_list_iter conn_iter;
- struct drm_crtc *master_crtc = NULL;
- struct drm_crtc_state *master_crtc_state;
- struct intel_crtc_state *master_pipe_config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ u16 linetime_wm;
- if (INTEL_GEN(dev_priv) < 11)
+ if (!crtc_state->hw.enable)
return 0;
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
- return 0;
+ linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
+ crtc_state->pixel_rate);
- /*
- * In case of tiled displays there could be one or more slaves but there is
- * only one master. Lets make the CRTC used by the connector corresponding
- * to the last horizonal and last vertical tile a master/genlock CRTC.
- * All the other CRTCs corresponding to other tiles of the same Tile group
- * are the slave CRTCs and hold a pointer to their genlock CRTC.
- * If all tiles not present do not make master slave assignments.
- */
- if (!connector->has_tile ||
- crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
- crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
- num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- reset_port_sync_mode_state(crtc_state);
- return 0;
- }
- /* Last Horizontal and last vertical tile connector is a master
- * Master's crtc state is already populated in slave for port sync
- */
- if (connector->tile_h_loc == connector->num_h_tile - 1 &&
- connector->tile_v_loc == connector->num_v_tile - 1)
- return 0;
+ /* Display WA #1135: BXT:ALL GLK:ALL */
+ if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
+ linetime_wm /= 2;
- /* Loop through all connectors and configure the Slave crtc_state
- * to point to the correct master.
- */
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(master_connector, &conn_iter) {
- struct drm_connector_state *master_conn_state = NULL;
+ return linetime_wm;
+}
- if (!(master_connector->has_tile &&
- master_connector->tile_group->id == connector->tile_group->id))
- continue;
- if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
- master_connector->tile_v_loc != master_connector->num_v_tile - 1)
- continue;
+static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_cdclk_state *cdclk_state;
- master_conn_state = drm_atomic_get_connector_state(&state->base,
- master_connector);
- if (IS_ERR(master_conn_state)) {
- drm_connector_list_iter_end(&conn_iter);
- return PTR_ERR(master_conn_state);
- }
- if (master_conn_state->crtc) {
- master_crtc = master_conn_state->crtc;
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (INTEL_GEN(dev_priv) >= 9)
+ crtc_state->linetime = skl_linetime_wm(crtc_state);
+ else
+ crtc_state->linetime = hsw_linetime_wm(crtc_state);
- if (!master_crtc) {
- DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
- crtc->base.id);
- return -EINVAL;
- }
+ if (!hsw_crtc_supports_ips(crtc))
+ return 0;
- master_crtc_state = drm_atomic_get_crtc_state(&state->base,
- master_crtc);
- if (IS_ERR(master_crtc_state))
- return PTR_ERR(master_crtc_state);
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- master_pipe_config = to_intel_crtc_state(master_crtc_state);
- crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
- master_pipe_config->sync_mode_slaves_mask |=
- BIT(crtc_state->cpu_transcoder);
- DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
- transcoder_name(crtc_state->master_transcoder),
- crtc->base.id,
- master_pipe_config->sync_mode_slaves_mask);
+ crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
+ cdclk_state);
return 0;
}
@@ -12531,7 +12709,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (mode_changed && crtc_state->hw.enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(crtc_state->shared_dpll)) {
+ !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
return ret;
@@ -12551,17 +12729,18 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- ret = 0;
if (dev_priv->display.compute_pipe_wm) {
ret = dev_priv->display.compute_pipe_wm(crtc_state);
if (ret) {
- DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Target pipe watermarks are invalid\n");
return ret;
}
}
if (dev_priv->display.compute_intermediate_wm) {
- if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !dev_priv->display.compute_pipe_wm))
return 0;
/*
@@ -12571,23 +12750,39 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
*/
ret = dev_priv->display.compute_intermediate_wm(crtc_state);
if (ret) {
- DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No valid intermediate pipe watermarks are possible\n");
return ret;
}
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed || crtc_state->update_pipe)
+ if (mode_changed || crtc_state->update_pipe) {
ret = skl_update_scaler_crtc(crtc_state);
- if (!ret)
- ret = intel_atomic_setup_scalers(dev_priv, crtc,
- crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+ if (ret)
+ return ret;
}
- if (HAS_IPS(dev_priv))
- crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
+ if (HAS_IPS(dev_priv)) {
+ ret = hsw_compute_ips_config(crtc_state);
+ if (ret)
+ return ret;
+ }
- return ret;
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ ret = hsw_compute_linetime_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ }
+
+ return 0;
}
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
@@ -12620,6 +12815,7 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
struct intel_crtc_state *pipe_config)
{
struct drm_connector *connector = conn_state->connector;
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
const struct drm_display_info *info = &connector->display_info;
int bpp;
@@ -12641,11 +12837,13 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
}
if (bpp < pipe_config->pipe_bpp) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
- "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
- connector->base.id, connector->name,
- bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+ "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+ connector->base.id, connector->name,
+ bpp, 3 * info->bpc,
+ 3 * conn_state->max_requested_bpc,
+ pipe_config->pipe_bpp);
pipe_config->pipe_bpp = bpp;
}
@@ -12705,10 +12903,13 @@ intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
{
- DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- id, lane_count,
- m_n->gmch_m, m_n->gmch_n,
- m_n->link_m, m_n->link_n, m_n->tu);
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+
+ drm_dbg_kms(&i915->drm,
+ "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->gmch_m, m_n->gmch_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
}
static void
@@ -12784,27 +12985,31 @@ static const char *output_formats(enum intel_output_format format)
static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_format_name_buf format_name;
if (!fb) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
- plane->base.base.id, plane->base.name,
- yesno(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ yesno(plane_state->uapi.visible));
return;
}
- DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
- plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->format->format, &format_name),
- yesno(plane_state->uapi.visible));
- DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id);
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->format->format, &format_name),
+ yesno(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id);
if (plane_state->uapi.visible)
- DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst));
+ drm_dbg_kms(&i915->drm,
+ "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
@@ -12818,22 +13023,24 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
char buf[64];
int i;
- DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
- crtc->base.base.id, crtc->base.name,
- yesno(pipe_config->hw.enable), context);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
+ crtc->base.base.id, crtc->base.name,
+ yesno(pipe_config->hw.enable), context);
if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
- yesno(pipe_config->hw.active),
- buf, pipe_config->output_types,
- output_formats(pipe_config->output_format));
+ drm_dbg_kms(&dev_priv->drm,
+ "active: %s, output_types: %s (0x%x), output format: %s\n",
+ yesno(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ output_formats(pipe_config->output_format));
- DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
- transcoder_name(pipe_config->cpu_transcoder),
- pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
@@ -12849,13 +13056,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
&pipe_config->dp_m2_n2);
}
- DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
- pipe_config->has_audio, pipe_config->has_infoframe,
- pipe_config->infoframes.enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
- DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+ drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
+ pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
@@ -12866,50 +13075,59 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
- DRM_DEBUG_KMS("requested mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
- DRM_DEBUG_KMS("adjusted mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
- pipe_config->port_clock,
- pipe_config->pipe_src_w, pipe_config->pipe_src_h,
- pipe_config->pixel_rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
+ pipe_config->port_clock,
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h,
+ pipe_config->pixel_rate);
+
+ drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
if (INTEL_GEN(dev_priv) >= 9)
- DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ drm_dbg_kms(&dev_priv->drm,
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
if (HAS_GMCH(dev_priv))
- DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
+ drm_dbg_kms(&dev_priv->drm,
+ "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
else
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
- enableddisabled(pipe_config->pch_pfit.enabled),
- yesno(pipe_config->pch_pfit.force_thru));
+ drm_dbg_kms(&dev_priv->drm,
+ "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ enableddisabled(pipe_config->pch_pfit.enabled),
+ yesno(pipe_config->pch_pfit.force_thru));
- DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide);
+ drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide);
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
if (IS_CHERRYVIEW(dev_priv))
- DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->cgm_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
else
- DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->csc_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
- DRM_DEBUG_KMS("MST master transcoder: %s\n",
- transcoder_name(pipe_config->mst_master_transcoder));
+ drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
dump_planes:
if (!state)
@@ -12957,24 +13175,21 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
encoder = to_intel_encoder(connector_state->best_encoder);
- WARN_ON(!connector_state->crtc);
+ drm_WARN_ON(dev, !connector_state->crtc);
switch (encoder->type) {
- unsigned int port_mask;
case INTEL_OUTPUT_DDI:
- if (WARN_ON(!HAS_DDI(to_i915(dev))))
+ if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
break;
/* else, fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
- port_mask = 1 << encoder->port;
-
/* the same port mustn't appear more than once */
- if (used_ports & port_mask)
+ if (used_ports & BIT(encoder->port))
ret = false;
- used_ports |= port_mask;
+ used_ports |= BIT(encoder->port);
break;
case INTEL_OUTPUT_DP_MST:
used_mst_ports |=
@@ -13055,15 +13270,6 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
- /*
- * Save the slave bitmask which gets filled for master crtc state during
- * slave atomic check call. For all other CRTCs reset the port sync variables
- * crtc_state->master_transcoder needs to be set to INVALID
- */
- reset_port_sync_mode_state(saved_state);
- if (intel_atomic_is_master_connector(crtc_state))
- saved_state->sync_mode_slaves_mask =
- crtc_state->sync_mode_slaves_mask;
memcpy(crtc_state, saved_state, sizeof(*crtc_state));
kfree(saved_state);
@@ -13078,11 +13284,10 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
{
struct drm_crtc *crtc = pipe_config->uapi.crtc;
struct drm_atomic_state *state = pipe_config->uapi.state;
- struct intel_encoder *encoder;
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int base_bpp, ret;
- int i, tile_group_id = -1, num_tiled_conns = 0;
+ int base_bpp, ret, i;
bool retry = true;
pipe_config->cpu_transcoder =
@@ -13121,13 +13326,15 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
&pipe_config->pipe_src_h);
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector_state->best_encoder);
+
if (connector_state->crtc != crtc)
continue;
- encoder = to_intel_encoder(connector_state->best_encoder);
-
if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
- DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ drm_dbg_kms(&i915->drm,
+ "rejecting invalid cloning configuration\n");
return -EINVAL;
}
@@ -13152,47 +13359,24 @@ encoder_retry:
drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
- /* Get tile_group_id of tiled connector */
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc == crtc &&
- connector->has_tile) {
- tile_group_id = connector->tile_group->id;
- break;
- }
- }
-
- /* Get total number of tiled connectors in state that belong to
- * this tile group.
- */
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector->has_tile &&
- connector->tile_group->id == tile_group_id)
- num_tiled_conns++;
- }
-
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector_state->best_encoder);
+
if (connector_state->crtc != crtc)
continue;
- ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
- num_tiled_conns);
- if (ret) {
- DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
- ret);
- return ret;
- }
-
- encoder = to_intel_encoder(connector_state->best_encoder);
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
if (ret < 0) {
if (ret != -EDEADLK)
- DRM_DEBUG_KMS("Encoder config failure: %d\n",
- ret);
+ drm_dbg_kms(&i915->drm,
+ "Encoder config failure: %d\n",
+ ret);
return ret;
}
}
@@ -13207,15 +13391,16 @@ encoder_retry:
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- DRM_DEBUG_KMS("CRTC fixup failed\n");
+ drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
return ret;
}
if (ret == RETRY) {
- if (WARN(!retry, "loop in pipe configuration computation\n"))
+ if (drm_WARN(&i915->drm, !retry,
+ "loop in pipe configuration computation\n"))
return -EINVAL;
- DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+ drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
retry = false;
goto encoder_retry;
}
@@ -13226,8 +13411,9 @@ encoder_retry:
*/
pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
!pipe_config->dither_force_disable;
- DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
- base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&i915->drm,
+ "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+ base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
/*
* Make drm_calc_timestamping_constants in
@@ -13238,6 +13424,35 @@ encoder_retry:
return 0;
}
+static int
+intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
+{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector,
+ conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ int ret;
+
+ if (conn_state->crtc != &crtc->base ||
+ !encoder->compute_config_late)
+ continue;
+
+ ret = encoder->compute_config_late(encoder, crtc_state,
+ conn_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;
@@ -13316,16 +13531,17 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
- DRM_DEBUG_KMS("expected:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s infoframe\n", name);
+ drm_dbg_kms(&dev_priv->drm, "expected:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- DRM_DEBUG_KMS("found:\n");
+ drm_dbg_kms(&dev_priv->drm, "found:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
} else {
- DRM_ERROR("mismatch in %s infoframe\n", name);
- DRM_ERROR("expected:\n");
+ drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
+ drm_err(&dev_priv->drm, "expected:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- DRM_ERROR("found:\n");
+ drm_err(&dev_priv->drm, "found:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
}
}
@@ -13334,6 +13550,7 @@ static void __printf(4, 5)
pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name, const char *format, ...)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct va_format vaf;
va_list args;
@@ -13342,11 +13559,12 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
vaf.va = &args;
if (fastset)
- DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
else
- DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
va_end(args);
}
@@ -13382,7 +13600,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
!(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
- DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "initial modeset and fastboot not set\n");
ret = false;
}
@@ -13584,7 +13803,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
- PIPE_CONF_CHECK_I(dc3co_exitline);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -13644,10 +13862,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(gamma_enable);
PIPE_CONF_CHECK_BOOL(csc_enable);
+ PIPE_CONF_CHECK_I(linetime);
+ PIPE_CONF_CHECK_I(ips_linetime);
+
bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
if (bp_gamma)
PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
-
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -13703,7 +13923,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
- PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
+ PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
PIPE_CONF_CHECK_I(dsc.compression_enable);
@@ -13737,9 +13957,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
- WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
- "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- fdi_dotclock, dotclock);
+ drm_WARN(&dev_priv->drm,
+ !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
}
}
@@ -13750,12 +13971,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
struct skl_hw_state {
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
- struct skl_ddb_allocation ddb;
struct skl_pipe_wm wm;
} *hw;
- struct skl_ddb_allocation *sw_ddb;
struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ u8 hw_enabled_slices;
const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -13771,14 +13991,14 @@ static void verify_wm_state(struct intel_crtc *crtc,
skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
- skl_ddb_get_hw_state(dev_priv, &hw->ddb);
- sw_ddb = &dev_priv->wm.skl_hw.ddb;
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 &&
- hw->ddb.enabled_slices != sw_ddb->enabled_slices)
- DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
- sw_ddb->enabled_slices,
- hw->ddb.enabled_slices);
+ hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
+ drm_err(&dev_priv->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ dev_priv->enabled_dbuf_slices_mask,
+ hw_enabled_slices);
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
@@ -13793,26 +14013,28 @@ static void verify_wm_state(struct intel_crtc *crtc,
&sw_plane_wm->wm[level]))
continue;
- DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1, level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1, level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
}
if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
&sw_plane_wm->trans_wm)) {
- DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1,
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1,
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
}
/* DDB */
@@ -13820,10 +14042,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
+ drm_err(&dev_priv->drm,
+ "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
@@ -13845,26 +14068,28 @@ static void verify_wm_state(struct intel_crtc *crtc,
&sw_plane_wm->wm[level]))
continue;
- DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
}
if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
&sw_plane_wm->trans_wm)) {
- DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe),
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe),
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
}
/* DDB */
@@ -13872,10 +14097,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe),
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
+ drm_err(&dev_priv->drm,
+ "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
@@ -13919,9 +14145,9 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat
bool enabled = false, found = false;
enum pipe pipe;
- DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ encoder->base.name);
for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
new_conn_state, i) {
@@ -13973,7 +14199,8 @@ verify_crtc_state(struct intel_crtc *crtc,
intel_crtc_state_reset(old_crtc_state, crtc);
old_crtc_state->uapi.state = state;
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
+ crtc->base.name);
active = dev_priv->display.get_pipe_config(crtc, pipe_config);
@@ -14048,7 +14275,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- DRM_DEBUG_KMS("%s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
@@ -14075,11 +14302,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
if (new_crtc_state->hw.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -14108,10 +14335,10 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
}
}
@@ -14135,8 +14362,10 @@ verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
{
int i;
- for (i = 0; i < dev_priv->num_shared_dpll; i++)
- verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(dev_priv,
+ &dev_priv->dpll.shared_dplls[i],
+ NULL, NULL);
}
static void
@@ -14280,35 +14509,35 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_modeset_checks(struct intel_atomic_state *state)
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+ u8 active_pipes)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- int ret, i;
+ int i;
- /* keep the current setting */
- if (!state->cdclk.force_min_cdclk_changed)
- state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->hw.active)
+ active_pipes |= BIT(crtc->pipe);
+ else
+ active_pipes &= ~BIT(crtc->pipe);
+ }
- state->modeset = true;
- state->active_pipes = dev_priv->active_pipes;
- state->cdclk.logical = dev_priv->cdclk.logical;
- state->cdclk.actual = dev_priv->cdclk.actual;
+ return active_pipes;
+}
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (new_crtc_state->hw.active)
- state->active_pipes |= BIT(crtc->pipe);
- else
- state->active_pipes &= ~BIT(crtc->pipe);
+static int intel_modeset_checks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
- if (old_crtc_state->hw.active != new_crtc_state->hw.active)
- state->active_pipe_changes |= BIT(crtc->pipe);
- }
+ state->modeset = true;
+ state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
+
+ state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
if (state->active_pipe_changes) {
- ret = intel_atomic_lock_global_state(state);
+ ret = _intel_atomic_lock_global_state(state);
if (ret)
return ret;
}
@@ -14399,7 +14628,7 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
}
static int intel_atomic_check_planes(struct intel_atomic_state *state,
- bool *need_modeset)
+ bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -14415,8 +14644,9 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
ret = intel_plane_atomic_check(state, plane);
if (ret) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
- plane->base.base.id, plane->base.name);
+ drm_dbg_atomic(&dev_priv->drm,
+ "[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
return ret;
}
}
@@ -14453,8 +14683,11 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
* affected planes are part of the state. We can now
* compute the minimum cdclk for each plane.
*/
- for_each_new_intel_plane_in_state(state, plane, plane_state, i)
- *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -14467,9 +14700,11 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret = intel_crtc_atomic_check(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_atomic(&i915->drm,
+ "[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.base.id, crtc->base.name);
return ret;
}
}
@@ -14494,76 +14729,6 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
return false;
}
-static int
-intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- int ret = 0;
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
-
- if (!connector->has_tile ||
- connector->tile_group->id != tile_grp_id)
- continue;
- conn_state = drm_atomic_get_connector_state(&state->base,
- connector);
- if (IS_ERR(conn_state)) {
- ret = PTR_ERR(conn_state);
- break;
- }
-
- if (!conn_state->crtc)
- continue;
-
- crtc_state = drm_atomic_get_crtc_state(&state->base,
- conn_state->crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- break;
- }
- crtc_state->mode_changed = true;
- ret = drm_atomic_add_affected_connectors(&state->base,
- conn_state->crtc);
- if (ret)
- break;
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return ret;
-}
-
-static int
-intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_connector *connector;
- struct drm_connector_state *old_conn_state, *new_conn_state;
- int i, ret;
-
- if (INTEL_GEN(dev_priv) < 11)
- return 0;
-
- /* Is tiled, mark all other tiled CRTCs as needing a modeset */
- for_each_oldnew_connector_in_state(&state->base, connector,
- old_conn_state, new_conn_state, i) {
- if (!connector->has_tile)
- continue;
- if (!intel_connector_needs_modeset(state, connector))
- continue;
-
- ret = intel_modeset_all_tiles(state, connector->tile_group->id);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -14575,6 +14740,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_cdclk_state *new_cdclk_state;
struct intel_crtc *crtc;
int ret, i;
bool any_ms = false;
@@ -14582,8 +14748,8 @@ static int intel_atomic_check(struct drm_device *dev,
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->hw.mode.private_flags !=
- old_crtc_state->hw.mode.private_flags)
+ if (new_crtc_state->uapi.mode.private_flags !=
+ old_crtc_state->uapi.mode.private_flags)
new_crtc_state->uapi.mode_changed = true;
}
@@ -14591,21 +14757,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /**
- * This check adds all the connectors in current state that belong to
- * the same tile group to a full modeset.
- * This function directly sets the mode_changed to true and we also call
- * drm_atomic_add_affected_connectors(). Hence we are not explicitly
- * calling drm_atomic_helper_check_modeset() after this.
- *
- * Fixme: Handle some corner cases where one of the
- * tiled connectors gets disconnected and tile info is lost but since it
- * was previously synced to other conn, we need to add that to the modeset.
- */
- ret = intel_atomic_check_tiled_conns(state);
- if (ret)
- goto fail;
-
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!needs_modeset(new_crtc_state)) {
@@ -14615,18 +14766,26 @@ static int intel_atomic_check(struct drm_device *dev,
continue;
}
- if (!new_crtc_state->uapi.enable) {
- intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
- continue;
- }
-
ret = intel_crtc_prepare_cleared_state(new_crtc_state);
if (ret)
goto fail;
+ if (!new_crtc_state->hw.enable)
+ continue;
+
ret = intel_modeset_pipe_config(new_crtc_state);
if (ret)
goto fail;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
+ continue;
+
+ ret = intel_modeset_pipe_config_late(new_crtc_state);
+ if (ret)
+ goto fail;
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -14656,8 +14815,10 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (is_trans_port_sync_mode(new_crtc_state)) {
- u8 trans = new_crtc_state->sync_mode_slaves_mask |
- BIT(new_crtc_state->master_transcoder);
+ u8 trans = new_crtc_state->sync_mode_slaves_mask;
+
+ if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
+ trans |= BIT(new_crtc_state->master_transcoder);
if (intel_cpu_transcoders_need_modeset(state, trans)) {
new_crtc_state->uapi.mode_changed = true;
@@ -14680,7 +14841,8 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (any_ms && !check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "rejecting conflicting digital port configuration\n");
ret = EINVAL;
goto fail;
}
@@ -14689,18 +14851,32 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- any_ms |= state->cdclk.force_min_cdclk_changed;
-
ret = intel_atomic_check_planes(state, &any_ms);
if (ret)
goto fail;
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
+ any_ms = true;
+
+ /*
+ * distrust_bios_wm will force a full dbuf recomputation
+ * but the hardware state will only get updated accordingly
+ * if state->modeset==true. Hence distrust_bios_wm==true &&
+ * state->modeset==false is an invalid combination which
+ * would cause the hardware and software dbuf state to get
+ * out of sync. We must prevent that.
+ *
+ * FIXME clean up this mess and introduce better
+ * state tracking for dbuf.
+ */
+ if (dev_priv->wm.distrust_bios_wm)
+ any_ms = true;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
- } else {
- state->cdclk.logical = dev_priv->cdclk.logical;
}
ret = intel_atomic_check_crtcs(state);
@@ -14806,6 +14982,18 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
ilk_pfit_disable(old_crtc_state);
}
+ /*
+ * The register is supposedly single buffered so perhaps
+ * not 100% correct to do this here. But SKL+ calculate
+ * this based on the adjust pixel rate so pfit changes do
+ * affect it and so it must be updated for fastsets.
+ * HSW/BDW only really need this here for fastboot, after
+ * that the value should not change without a full modeset.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_set_linetime_wm(new_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
}
@@ -14848,9 +15036,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
- struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state,
- to_intel_plane(crtc->base.primary));
if (modeset) {
intel_crtc_update_active_timings(new_crtc_state);
@@ -14873,8 +15058,8 @@ static void intel_update_crtc(struct intel_crtc *crtc,
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
intel_fbc_disable(crtc);
- else if (new_plane_state)
- intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+ else
+ intel_fbc_enable(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -14904,7 +15089,8 @@ static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *ne
struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
enum transcoder slave_transcoder;
- WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
+ drm_WARN_ON(&dev_priv->drm,
+ !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
return intel_get_crtc_for_pipe(dev_priv,
@@ -15021,7 +15207,7 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
if (conn_state->crtc == &crtc->base)
break;
}
- intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
+ intel_dp = intel_attached_dp(to_intel_connector(conn));
intel_dp_stop_link_train(intel_dp);
}
@@ -15036,15 +15222,12 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state,
- to_intel_plane(crtc->base.primary));
bool modeset = needs_modeset(new_crtc_state);
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
intel_fbc_disable(crtc);
- else if (new_plane_state)
- intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+ else
+ intel_fbc_enable(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -15068,18 +15251,20 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
struct intel_crtc_state *new_slave_crtc_state =
intel_atomic_get_new_crtc_state(state, slave_crtc);
struct intel_crtc_state *old_slave_crtc_state =
intel_atomic_get_old_crtc_state(state, slave_crtc);
- WARN_ON(!slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
+ drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
- DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
- crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
- slave_crtc->base.name);
+ drm_dbg_kms(&i915->drm,
+ "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
+ crtc->base.base.id, crtc->base.name,
+ slave_crtc->base.base.id, slave_crtc->base.name);
/* Enable seq for slave with with DP_TP_CTL left Idle until the
* master is ready
@@ -15109,35 +15294,53 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
state);
}
+static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+ u8 required_slices = state->enabled_dbuf_slices_mask;
+ u8 slices_union = hw_enabled_slices | required_slices;
+
+ /* If 2nd DBuf slice required, enable it here */
+ if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, slices_union);
+}
+
+static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+ u8 required_slices = state->enabled_dbuf_slices_mask;
+
+ /* If 2nd DBuf slice is no more required disable it */
+ if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, required_slices);
+}
+
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
u8 update_pipes = 0, modeset_pipes = 0;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
if (!new_crtc_state->hw.active)
continue;
/* ignore allocations for crtc's that have been turned off. */
if (!needs_modeset(new_crtc_state)) {
- entries[i] = old_crtc_state->wm.skl.ddb;
- update_pipes |= BIT(crtc->pipe);
+ entries[pipe] = old_crtc_state->wm.skl.ddb;
+ update_pipes |= BIT(pipe);
} else {
- modeset_pipes |= BIT(crtc->pipe);
+ modeset_pipes |= BIT(pipe);
}
}
- /* If 2nd DBuf slice required, enable it here */
- if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, required_slices);
-
/*
* Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations
@@ -15156,10 +15359,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i))
+ entries, I915_MAX_PIPES, pipe))
continue;
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
intel_update_crtc(crtc, state, old_crtc_state,
@@ -15193,10 +15396,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
is_trans_port_sync_slave(new_crtc_state))
continue;
- WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i));
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
if (is_trans_port_sync_mode(new_crtc_state)) {
@@ -15228,20 +15431,17 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
- WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i));
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
- WARN_ON(modeset_pipes);
+ drm_WARN_ON(&dev_priv->drm, modeset_pipes);
- /* If 2nd DBuf slice is no more required disable it */
- if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, required_slices);
}
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -15338,10 +15538,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
- intel_set_cdclk_pre_plane_update(dev_priv,
- &state->cdclk.actual,
- &dev_priv->cdclk.actual,
- state->cdclk.pipe);
+ intel_set_cdclk_pre_plane_update(state);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -15371,16 +15568,17 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_encoders_update_prepare(state);
+ /* Enable all new slices, we might need */
+ if (state->modeset)
+ icl_dbuf_slice_pre_update(state);
+
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
- intel_set_cdclk_post_plane_update(dev_priv,
- &state->cdclk.actual,
- &dev_priv->cdclk.actual,
- state->cdclk.pipe);
+ intel_set_cdclk_post_plane_update(state);
}
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -15427,6 +15625,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
dev_priv->display.optimize_watermarks(state, crtc);
}
+ /* Disable all slices, we don't need */
+ if (state->modeset)
+ icl_dbuf_slice_post_update(state);
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@@ -15570,7 +15772,8 @@ static int intel_atomic_commit(struct drm_device *dev,
ret = intel_atomic_prepare_commit(state);
if (ret) {
- DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ drm_dbg_atomic(&dev_priv->drm,
+ "Preparing state failed with %i\n", ret);
i915_sw_fence_commit(&state->commit_ready);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@@ -15579,6 +15782,8 @@ static int intel_atomic_commit(struct drm_device *dev,
ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
if (!ret)
ret = drm_atomic_helper_swap_state(&state->base, true);
+ if (!ret)
+ intel_atomic_swap_global_state(state);
if (ret) {
i915_sw_fence_commit(&state->commit_ready);
@@ -15594,14 +15799,7 @@ static int intel_atomic_commit(struct drm_device *dev,
if (state->global_state_changed) {
assert_global_state_locked(dev_priv);
- memcpy(dev_priv->min_cdclk, state->min_cdclk,
- sizeof(state->min_cdclk));
- memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
- sizeof(state->min_voltage_level));
dev_priv->active_pipes = state->active_pipes;
- dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
-
- intel_cdclk_swap_state(state);
}
drm_atomic_state_get(&state->base);
@@ -15729,7 +15927,7 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
- * @plane: drm plane to prepare for
+ * @_plane: drm plane to prepare for
* @_new_plane_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
@@ -15740,23 +15938,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
* Returns 0 on success, negative error code on failure.
*/
int
-intel_prepare_plane_fb(struct drm_plane *plane,
+intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_plane_state->hw.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
int ret;
if (old_obj) {
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(intel_state,
- to_intel_crtc(plane->state->crtc));
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state,
+ to_intel_crtc(old_plane_state->hw.crtc));
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -15770,7 +15970,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* can safely continue.
*/
if (needs_modeset(crtc_state)) {
- ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv, NULL,
false, 0,
GFP_KERNEL);
@@ -15780,7 +15980,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
if (new_plane_state->uapi.fence) { /* explicit fencing */
- ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
@@ -15807,12 +16007,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (!new_plane_state->uapi.fence) { /* implicit fencing */
struct dma_fence *fence;
- ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
obj->base.resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
- return ret;
+ goto unpin_fb;
fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
@@ -15833,12 +16033,17 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- if (!intel_state->rps_interactive) {
+ if (!state->rps_interactive) {
intel_rps_mark_interactive(&dev_priv->gt.rps, true);
- intel_state->rps_interactive = true;
+ state->rps_interactive = true;
}
return 0;
+
+unpin_fb:
+ intel_plane_unpin_fb(new_plane_state);
+
+ return ret;
}
/**
@@ -15854,13 +16059,17 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
{
struct intel_plane_state *old_plane_state =
to_intel_plane_state(_old_plane_state);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(old_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+
+ if (!obj)
+ return;
- if (intel_state->rps_interactive) {
+ if (state->rps_interactive) {
intel_rps_mark_interactive(&dev_priv->gt.rps, false);
- intel_state->rps_interactive = false;
+ state->rps_interactive = false;
}
/* Should only be called after a successful intel_prepare_plane_fb()! */
@@ -16029,6 +16238,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
new_plane_state->uapi.crtc_w = crtc_w;
new_plane_state->uapi.crtc_h = crtc_h;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
+
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
old_plane_state, new_plane_state);
if (ret)
@@ -16113,7 +16324,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u32 *formats;
int num_formats;
int ret, zpos;
@@ -16194,18 +16404,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
- possible_crtcs = BIT(pipe);
-
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -16247,7 +16455,6 @@ static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- unsigned int possible_crtcs;
struct intel_plane *cursor;
int ret, zpos;
@@ -16280,10 +16487,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- possible_crtcs, &intel_cursor_plane_funcs,
+ 0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
@@ -16328,6 +16533,7 @@ static const struct drm_crtc_funcs bdw_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = bdw_enable_vblank,
.disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs ilk_crtc_funcs = {
@@ -16336,6 +16542,7 @@ static const struct drm_crtc_funcs ilk_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = ilk_enable_vblank,
.disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs g4x_crtc_funcs = {
@@ -16344,6 +16551,7 @@ static const struct drm_crtc_funcs g4x_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = i965_enable_vblank,
.disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i965_crtc_funcs = {
@@ -16352,6 +16560,7 @@ static const struct drm_crtc_funcs i965_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i965_enable_vblank,
.disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i915gm_crtc_funcs = {
@@ -16360,6 +16569,7 @@ static const struct drm_crtc_funcs i915gm_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915gm_enable_vblank,
.disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i915_crtc_funcs = {
@@ -16368,6 +16578,7 @@ static const struct drm_crtc_funcs i915_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i8xx_enable_vblank,
.disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i8xx_crtc_funcs = {
@@ -16376,6 +16587,7 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
/* no hw vblank counter */
.enable_vblank = i8xx_enable_vblank,
.disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static struct intel_crtc *intel_crtc_alloc(void)
@@ -16405,6 +16617,18 @@ static void intel_crtc_free(struct intel_crtc *crtc)
kfree(crtc);
}
+static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ plane->pipe);
+
+ plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+ }
+}
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary, *cursor;
@@ -16483,7 +16707,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_color_init(crtc);
- WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
+ intel_crtc_crc_init(crtc);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
@@ -16543,10 +16769,10 @@ static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
if (!IS_MOBILE(dev_priv))
return false;
- if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+ if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
@@ -16561,11 +16787,11 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return false;
if (HAS_PCH_LPT_H(dev_priv) &&
- I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@@ -16591,10 +16817,10 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
pps_num = 1;
for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- u32 val = I915_READ(PP_CONTROL(pps_idx));
+ u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
- I915_WRITE(PP_CONTROL(pps_idx), val);
+ intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
}
}
@@ -16674,14 +16900,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* On SKL pre-D0 the strap isn't connected, so we assume
* it's there.
*/
- found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
if (found || IS_GEN9_BC(dev_priv))
intel_ddi_init(dev_priv, PORT_A);
/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
* register */
- found = I915_READ(SFUSE_STRAP);
+ found = intel_de_read(dev_priv, SFUSE_STRAP);
if (found & SFUSE_STRAP_DDIB_DETECTED)
intel_ddi_init(dev_priv, PORT_B);
@@ -16714,25 +16940,25 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (ilk_has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
- if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+ if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
if (!found)
intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
- if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+ if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
}
- if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+ if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
- if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+ if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
- if (I915_READ(PCH_DP_C) & DP_DETECTED)
+ if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
- if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
@@ -16757,16 +16983,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
*/
has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
- if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+ if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
- if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+ if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
if (IS_CHERRYVIEW(dev_priv)) {
@@ -16775,9 +17001,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* so no need to worry about it
*/
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
@@ -16793,11 +17019,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_crt_init(dev_priv);
- if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
- DRM_DEBUG_KMS("probing SDVOB\n");
+ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
if (!found && IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "probing HDMI on SDVOB\n");
intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
}
@@ -16807,22 +17034,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
- DRM_DEBUG_KMS("probing SDVOC\n");
+ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
}
- if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
+ if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
if (IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "probing HDMI on SDVOC\n");
intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
}
if (IS_G4X(dev_priv))
intel_dp_init(dev_priv, DP_C, PORT_C);
}
- if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
+ if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
intel_dp_init(dev_priv, DP_D, PORT_D);
if (SUPPORTS_TV(dev_priv))
@@ -16864,9 +17092,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
unsigned int *handle)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
if (obj->userptr.mm) {
- DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
+ drm_dbg(&i915->drm,
+ "attempting to use a userptr for a framebuffer, denied\n");
return -EINVAL;
}
@@ -16920,14 +17150,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (tiling != I915_TILING_NONE &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode doesn't match fb modifier\n");
goto err;
}
} else {
if (tiling == I915_TILING_X) {
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
} else if (tiling == I915_TILING_Y) {
- DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No Y tiling for legacy addfb\n");
goto err;
}
}
@@ -16937,10 +17169,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->modifier[0])) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name),
- mode_cmd->modifier[0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
+ mode_cmd->modifier[0]);
goto err;
}
@@ -16950,17 +17183,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (INTEL_GEN(dev_priv) < 4 &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
goto err;
}
max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > max_stride) {
- DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
- "tiled" : "linear",
- mode_cmd->pitches[0], max_stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s pitch (%u) must be at most %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
+ "tiled" : "linear",
+ mode_cmd->pitches[0], max_stride);
goto err;
}
@@ -16969,15 +17204,17 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* the fb pitch and fence stride match.
*/
if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
goto err;
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0) {
- DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
- mode_cmd->offsets[0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
goto err;
}
@@ -16987,14 +17224,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
u32 stride_alignment;
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
- DRM_DEBUG_KMS("bad plane %d handle\n", i);
+ drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
+ i);
goto err;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
if (fb->pitches[i] & (stride_alignment - 1)) {
- DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
- i, fb->pitches[i], stride_alignment);
+ drm_dbg_kms(&dev_priv->drm,
+ "plane %d pitch (%d) must be at least %u byte aligned\n",
+ i, fb->pitches[i], stride_alignment);
goto err;
}
@@ -17002,9 +17241,10 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
- DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
- i,
- fb->pitches[i], ccs_aux_stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
goto err;
}
}
@@ -17018,7 +17258,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
+ drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
goto err;
}
@@ -17048,17 +17288,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
-static void intel_atomic_state_free(struct drm_atomic_state *state)
-{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-
- drm_atomic_state_default_release(state);
-
- i915_sw_fence_fini(&intel_state->commit_ready);
-
- kfree(state);
-}
-
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -17290,9 +17519,36 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
void intel_modeset_init_hw(struct drm_i915_private *i915)
{
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->cdclk.obj.state);
+
intel_update_cdclk(i915);
- intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
- i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
+ intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+}
+
+static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, state->dev) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ drm_for_each_plane(plane, state->dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
}
/*
@@ -17305,9 +17561,8 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-static void sanitize_watermarks(struct drm_device *dev)
+static void sanitize_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
struct intel_crtc *crtc;
@@ -17320,26 +17575,17 @@ static void sanitize_watermarks(struct drm_device *dev)
if (!dev_priv->display.optimize_watermarks)
return;
- /*
- * We need to hold connection_mutex before calling duplicate_state so
- * that the connector loop is protected.
- */
- drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- } else if (WARN_ON(ret)) {
- goto fail;
- }
-
- state = drm_atomic_helper_duplicate_state(dev, &ctx);
- if (WARN_ON(IS_ERR(state)))
- goto fail;
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
intel_state = to_intel_atomic_state(state);
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
@@ -17348,22 +17594,13 @@ retry:
if (!HAS_GMCH(dev_priv))
intel_state->skip_intermediate_wm = true;
- ret = intel_atomic_check(dev, state);
- if (ret) {
- /*
- * If we fail here, it means that the hardware appears to be
- * programmed in a way that shouldn't be possible, given our
- * understanding of watermark requirements. This might mean a
- * mistake in the hardware readout code or a mistake in the
- * watermark calculations for a given platform. Raise a WARN
- * so that this is noticeable.
- *
- * If this actually happens, we'll have to just leave the
- * BIOS-programmed watermarks untouched and hope for the best.
- */
- WARN(true, "Could not determine valid watermarks for inherited state\n");
- goto put_state;
- }
+ ret = sanitize_watermarks_add_affected(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check(&dev_priv->drm, state);
+ if (ret)
+ goto fail;
/* Write calculated watermark values back */
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
@@ -17373,9 +17610,29 @@ retry:
to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
-put_state:
- drm_atomic_state_put(state);
fail:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
+
+ drm_atomic_state_put(state);
+
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
@@ -17384,7 +17641,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
{
if (IS_GEN(dev_priv, 5)) {
u32 fdi_pll_clk =
- I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+ intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
@@ -17393,7 +17650,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
return;
}
- DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+ drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
}
static int intel_initial_commit(struct drm_device *dev)
@@ -17476,6 +17733,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
struct drm_mode_config *mode_config = &i915->drm.mode_config;
drm_mode_config_init(&i915->drm);
+ INIT_LIST_HEAD(&i915->global_obj_list);
mode_config->min_width = 0;
mode_config->min_height = 0;
@@ -17517,11 +17775,31 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
}
}
-int intel_modeset_init(struct drm_i915_private *i915)
+static void intel_mode_config_cleanup(struct drm_i915_private *i915)
+{
+ intel_atomic_global_obj_cleanup(i915);
+ drm_mode_config_cleanup(&i915->drm);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+ if (plane_config->fb) {
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+
+ /* We may only have the stub and not a full framebuffer */
+ if (drm_framebuffer_read_refcount(fb))
+ drm_framebuffer_put(fb);
+ else
+ kfree(fb);
+ }
+
+ if (plane_config->vma)
+ i915_vma_put(plane_config->vma);
+}
+
+/* part #1: call before irq install */
+int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
- struct drm_device *dev = &i915->drm;
- enum pipe pipe;
- struct intel_crtc *crtc;
int ret;
i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
@@ -17530,6 +17808,10 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_mode_config_init(i915);
+ ret = intel_cdclk_init(i915);
+ if (ret)
+ return ret;
+
ret = intel_bw_init(i915);
if (ret)
return ret;
@@ -17542,26 +17824,38 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_fbc_init(i915);
+ return 0;
+}
+
+/* part #2: call after irq install */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
+
intel_init_pm(i915);
intel_panel_sanitize_ssc(i915);
intel_gmbus_setup(i915);
- DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_NUM_PIPES(i915),
- INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+ drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
for_each_pipe(i915, pipe) {
ret = intel_crtc_init(i915, pipe);
if (ret) {
- drm_mode_config_cleanup(dev);
+ intel_mode_config_cleanup(i915);
return ret;
}
}
}
+ intel_plane_possible_crtcs_init(i915);
intel_shared_dpll_init(dev);
intel_update_fdi_pll_freq(i915);
@@ -17601,6 +17895,8 @@ int intel_modeset_init(struct drm_i915_private *i915)
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, &plane_config);
+
+ plane_config_fini(&plane_config);
}
/*
@@ -17609,7 +17905,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(i915))
- sanitize_watermarks(dev);
+ sanitize_watermarks(i915);
/*
* Force all active planes to recompute their states. So that on
@@ -17619,7 +17915,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
*/
ret = intel_initial_commit(dev);
if (ret)
- DRM_DEBUG_KMS("Initial commit in probe failed.\n");
+ drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
return 0;
}
@@ -17638,10 +17934,12 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 dpll, fp;
int i;
- WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
+ drm_WARN_ON(&dev_priv->drm,
+ i9xx_calc_dpll_params(48000, &clock) != 25154);
- DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
- pipe_name(pipe), clock.vco, clock.dot);
+ drm_dbg_kms(&dev_priv->drm,
+ "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
+ pipe_name(pipe), clock.vco, clock.dot);
fp = i9xx_dpll_compute_fp(&clock);
dpll = DPLL_DVO_2X_MODE |
@@ -17651,27 +17949,27 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- I915_WRITE(FP0(pipe), fp);
- I915_WRITE(FP1(pipe), fp);
+ intel_de_write(dev_priv, FP0(pipe), fp);
+ intel_de_write(dev_priv, FP1(pipe), fp);
- I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
- I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
- I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
- I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
- I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
- I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
- I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
+ intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
+ intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
+ intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
- I915_WRITE(DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -17679,17 +17977,18 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
*
* So write it again.
*/
- I915_WRITE(DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
/* We do this three times for luck */
for (i = 0; i < 3 ; i++) {
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150); /* wait for warmup */
}
- I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
- POSTING_READ(PIPECONF(pipe));
+ intel_de_write(dev_priv, PIPECONF(pipe),
+ PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
intel_wait_for_pipe_scanline_moving(crtc);
}
@@ -17698,22 +17997,30 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
- pipe_name(pipe));
-
- WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
- WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
-
- I915_WRITE(PIPECONF(pipe), 0);
- POSTING_READ(PIPECONF(pipe));
+ drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
+ pipe_name(pipe));
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
+
+ intel_de_write(dev_priv, PIPECONF(pipe), 0);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
intel_wait_for_pipe_scanline_stopped(crtc);
- I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void
@@ -17736,8 +18043,9 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
if (pipe == crtc->pipe)
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
- plane->base.base.id, plane->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
@@ -17787,18 +18095,18 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
if (transcoder_is_dsi(cpu_transcoder))
return;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
val |= HSW_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
} else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~PIPECONF_FRAME_START_DELAY_MASK;
val |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
if (!crtc_state->has_pch_encoder)
@@ -17808,19 +18116,19 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
} else {
enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -17852,9 +18160,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
* gamma and CSC to match how we program our planes.
*/
if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE |
- SKL_BOTTOM_COLOR_CSC_ENABLE);
+ intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
+ SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
}
/* Adjust the state of the output pipe according to whether we
@@ -17926,16 +18233,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+ pipe_name(crtc->pipe));
has_active_crtc = false;
}
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ encoder->base.name);
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
@@ -17943,9 +18252,10 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (crtc_state) {
struct drm_encoder *best_encoder;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
/* avoid oopsing in case the hooks consult best_encoder */
best_encoder = connector->base.state->best_encoder;
@@ -17998,9 +18308,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
intel_set_plane_visible(crtc_state, plane_state, visible);
- DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
- plane->base.base.id, plane->base.name,
- enableddisabled(visible), pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ enableddisabled(visible), pipe_name(pipe));
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -18014,14 +18325,14 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
static void intel_modeset_readout_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(dev_priv->cdclk.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- int i;
-
- dev_priv->active_pipes = 0;
+ u8 active_pipes = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -18038,41 +18349,19 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = crtc_state->hw.active;
if (crtc_state->hw.active)
- dev_priv->active_pipes |= BIT(crtc->pipe);
+ active_pipes |= BIT(crtc->pipe);
- DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
- crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc_state->hw.active));
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ enableddisabled(crtc_state->hw.active));
}
- readout_plane_state(dev_priv);
-
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
- pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
- &pll->state.hw_state);
-
- if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- pll->wakeref = intel_display_power_get(dev_priv,
- POWER_DOMAIN_DPLL_DC_OFF);
- }
-
- pll->state.crtc_mask = 0;
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (crtc_state->hw.active &&
- crtc_state->shared_dpll == pll)
- pll->state.crtc_mask |= 1 << crtc->pipe;
- }
- pll->active_mask = pll->state.crtc_mask;
+ readout_plane_state(dev_priv);
- DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->info->name, pll->state.crtc_mask, pll->on);
- }
+ intel_dpll_readout_hw_state(dev_priv);
for_each_intel_encoder(dev, encoder) {
pipe = 0;
@@ -18089,10 +18378,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->base.crtc = NULL;
}
- DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- enableddisabled(encoder->base.crtc),
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ enableddisabled(encoder->base.crtc),
+ pipe_name(pipe));
}
drm_connector_list_iter_begin(dev, &conn_iter);
@@ -18103,7 +18393,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.dpms = DRM_MODE_DPMS_ON;
- encoder = connector->encoder;
+ encoder = intel_attached_encoder(connector);
connector->base.encoder = &encoder->base;
crtc = to_intel_crtc(encoder->base.crtc);
@@ -18124,9 +18414,10 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id, connector->base.name,
- enableddisabled(connector->base.encoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id, connector->base.name,
+ enableddisabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
@@ -18190,19 +18481,20 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state->min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
- plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
}
if (crtc_state->hw.active) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (WARN_ON(min_cdclk < 0))
+ if (drm_WARN_ON(dev, min_cdclk < 0))
min_cdclk = 0;
}
- dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
- dev_priv->min_voltage_level[crtc->pipe] =
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ cdclk_state->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
intel_bw_crtc_update(bw_state, crtc_state);
@@ -18241,53 +18533,55 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
* Also known as Wa_14010480278.
*/
if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
- DARBF_GATING_DIS);
+ intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
+ intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
if (IS_HASWELL(dev_priv)) {
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ intel_de_write(dev_priv, CHICKEN_PAR1_1,
+ intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
}
}
static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
enum port port, i915_reg_t hdmi_reg)
{
- u32 val = I915_READ(hdmi_reg);
+ u32 val = intel_de_read(dev_priv, hdmi_reg);
if (val & SDVO_ENABLE ||
(val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
return;
- DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for HDMI %c\n",
+ port_name(port));
val &= ~SDVO_PIPE_SEL_MASK;
val |= SDVO_PIPE_SEL(PIPE_A);
- I915_WRITE(hdmi_reg, val);
+ intel_de_write(dev_priv, hdmi_reg, val);
}
static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
enum port port, i915_reg_t dp_reg)
{
- u32 val = I915_READ(dp_reg);
+ u32 val = intel_de_read(dev_priv, dp_reg);
if (val & DP_PORT_EN ||
(val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
return;
- DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for DP %c\n",
+ port_name(port));
val &= ~DP_PIPE_SEL_MASK;
val |= DP_PIPE_SEL(PIPE_A);
- I915_WRITE(dp_reg, val);
+ intel_de_write(dev_priv, dp_reg, val);
}
static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
@@ -18324,7 +18618,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
- int i;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
@@ -18377,18 +18670,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_update_connector_atomic_state(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- if (!pll->on || pll->active_mask)
- continue;
-
- DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
- pll->info->name);
-
- pll->info->funcs->disable(dev_priv, pll);
- pll->on = false;
- }
+ intel_dpll_sanitize_state(dev_priv);
if (IS_G4X(dev_priv)) {
g4x_wm_get_hw_state(dev_priv);
@@ -18408,7 +18690,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
u64 put_domains;
put_domains = modeset_get_crtc_power_domains(crtc_state);
- if (WARN_ON(put_domains))
+ if (drm_WARN_ON(dev, put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -18444,7 +18726,8 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
}
@@ -18467,21 +18750,19 @@ static void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
+/* part #1: call before irq uninstall */
void intel_modeset_driver_remove(struct drm_i915_private *i915)
{
flush_workqueue(i915->flip_wq);
flush_workqueue(i915->modeset_wq);
flush_work(&i915->atomic_helper.free_work);
- WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
-
- /*
- * Interrupts and polling as the first thing to avoid creating havoc.
- * Too much stuff here (turning of connectors, ...) would
- * experience fancy races otherwise.
- */
- intel_irq_uninstall(i915);
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
+}
+/* part #2: call after irq uninstall */
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
+{
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -18507,14 +18788,12 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
intel_hdcp_component_fini(i915);
- drm_mode_config_cleanup(&i915->drm);
+ intel_mode_config_cleanup(i915);
intel_overlay_cleanup(i915);
intel_gmbus_teardown(i915);
- intel_bw_cleanup(i915);
-
destroy_workqueue(i915->flip_wq);
destroy_workqueue(i915->modeset_wq);
@@ -18523,6 +18802,15 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+static bool
+has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+{
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return HAS_TRANSCODER_EDP(dev_priv);
+ else
+ return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
+}
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -18589,7 +18877,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
+ error->power_well_driver = intel_de_read(dev_priv,
+ HSW_PWR_WELL_CTL2);
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
@@ -18598,33 +18887,39 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
if (!error->pipe[i].power_domain_on)
continue;
- error->cursor[i].control = I915_READ(CURCNTR(i));
- error->cursor[i].position = I915_READ(CURPOS(i));
- error->cursor[i].base = I915_READ(CURBASE(i));
+ error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
+ error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
+ error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
- error->plane[i].control = I915_READ(DSPCNTR(i));
- error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
+ error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
if (INTEL_GEN(dev_priv) <= 3) {
- error->plane[i].size = I915_READ(DSPSIZE(i));
- error->plane[i].pos = I915_READ(DSPPOS(i));
+ error->plane[i].size = intel_de_read(dev_priv,
+ DSPSIZE(i));
+ error->plane[i].pos = intel_de_read(dev_priv,
+ DSPPOS(i));
}
if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
- error->plane[i].addr = I915_READ(DSPADDR(i));
+ error->plane[i].addr = intel_de_read(dev_priv,
+ DSPADDR(i));
if (INTEL_GEN(dev_priv) >= 4) {
- error->plane[i].surface = I915_READ(DSPSURF(i));
- error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ error->plane[i].surface = intel_de_read(dev_priv,
+ DSPSURF(i));
+ error->plane[i].tile_offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i));
}
- error->pipe[i].source = I915_READ(PIPESRC(i));
+ error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
if (HAS_GMCH(dev_priv))
- error->pipe[i].stat = I915_READ(PIPESTAT(i));
+ error->pipe[i].stat = intel_de_read(dev_priv,
+ PIPESTAT(i));
}
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+ if (!has_transcoder(dev_priv, cpu_transcoder))
continue;
error->transcoder[i].available = true;
@@ -18636,13 +18931,20 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->transcoder[i].cpu_transcoder = cpu_transcoder;
- error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
- error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ error->transcoder[i].conf = intel_de_read(dev_priv,
+ PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = intel_de_read(dev_priv,
+ HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = intel_de_read(dev_priv,
+ HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = intel_de_read(dev_priv,
+ HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = intel_de_read(dev_priv,
+ VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = intel_de_read(dev_priv,
+ VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = intel_de_read(dev_priv,
+ VSYNC(cpu_transcoder));
}
return error;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 028aab728514..adb1225a3480 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -26,7 +26,6 @@
#define _INTEL_DISPLAY_H_
#include <drm/drm_util.h>
-#include <drm/i915_drm.h>
enum link_m_n_set;
struct dpll;
@@ -40,12 +39,15 @@ struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
struct drm_i915_private;
+struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
struct i915_ggtt_view;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
struct intel_encoder;
@@ -54,7 +56,6 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
-struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -312,10 +313,11 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
+ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
+ for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
+ for_each_pipe(__dev_priv, __p) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -469,6 +471,8 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+ u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
@@ -486,6 +490,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state);
void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -495,6 +500,7 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
+void lpt_pch_enable(const struct intel_crtc_state *crtc_state);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
@@ -520,6 +526,7 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
@@ -608,8 +615,10 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init_noirq(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
void intel_modeset_driver_remove(struct drm_i915_private *i915);
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
new file mode 100644
index 000000000000..1e6eb7f2f72d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -0,0 +1,2134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_debugfs.h"
+#include "intel_csr.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_fbc.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sideband.h"
+
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+ return to_i915(node->minor->dev);
+}
+
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+
+ seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+ dev_priv->fb_tracking.busy_bits);
+
+ seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+ dev_priv->fb_tracking.flip_bits);
+
+ return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&fbc->lock);
+
+ if (intel_fbc_is_active(dev_priv))
+ seq_puts(m, "FBC enabled\n");
+ else
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+ if (intel_fbc_is_active(dev_priv)) {
+ u32 mask;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
+ else if (IS_G4X(dev_priv))
+ mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+ else
+ mask = intel_de_read(dev_priv, FBC_STATUS) &
+ (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
+
+ seq_printf(m, "Compressing: %s\n", yesno(mask));
+ }
+
+ mutex_unlock(&fbc->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_fbc_false_color_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ *val = dev_priv->fbc.false_color;
+
+ return 0;
+}
+
+static int i915_fbc_false_color_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ u32 reg;
+
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ mutex_lock(&dev_priv->fbc.lock);
+
+ reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
+ dev_priv->fbc.false_color = val;
+
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL,
+ val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
+
+ mutex_unlock(&dev_priv->fbc.lock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
+ i915_fbc_false_color_get, i915_fbc_false_color_set,
+ "%llu\n");
+
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ yesno(i915_modparams.enable_ips));
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ bool sr_enabled = false;
+
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ /* no global SR status; inspect per-plane WM */;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
+
+ return 0;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ return 0;
+}
+
+static int i915_vbt(struct seq_file *m, void *unused)
+{
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+ if (opregion->vbt)
+ seq_write(m, opregion->vbt, opregion->vbt_size);
+
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_framebuffer *fbdev_fb = NULL;
+ struct drm_framebuffer *drm_fb;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
+ fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fbdev_fb->base.width,
+ fbdev_fb->base.height,
+ fbdev_fb->base.format->depth,
+ fbdev_fb->base.format->cpp[0] * 8,
+ fbdev_fb->base.modifier,
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
+ seq_putc(m, '\n');
+ }
+#endif
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, dev) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.format->depth,
+ fb->base.format->cpp[0] * 8,
+ fb->base.modifier,
+ drm_framebuffer_read_refcount(&fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
+ seq_putc(m, '\n');
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ u8 val;
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+ int ret;
+
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, status_val;
+ const char *status = "unknown";
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_STATUS(dev_priv->psr.transcoder));
+ status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR_STATUS(dev_priv->psr.transcoder));
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ }
+
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_psr *psr = &dev_priv->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
+ if (psr->dp)
+ seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
+
+ if (!psr->sink_support)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+ else
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
+
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ yesno(psr->sink_not_reliable));
+
+ goto unlock;
+ }
+
+ if (psr->psr2_enabled) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ enableddisabled(enabled), val);
+ psr_source_status(dev_priv, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
+
+ /*
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
+
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+ }
+
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
+
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(dev_priv,
+ PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
+ }
+
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ intel_wakeref_t wakeref;
+ int ret;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ ret = intel_psr_debug_set(dev_priv, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ *val = READ_ONCE(dev_priv->psr.debug);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", power_well->desc->name,
+ power_well->count);
+
+ for_each_power_domain(power_domain, power_well->desc->domains)
+ seq_printf(m, " %-23s %d\n",
+ intel_display_power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ return 0;
+}
+
+static int i915_dmc_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ struct intel_csr *csr;
+ i915_reg_t dc5_reg, dc6_reg = {};
+
+ if (!HAS_CSR(dev_priv))
+ return -ENODEV;
+
+ csr = &dev_priv->csr;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
+ seq_printf(m, "path: %s\n", csr->fw_path);
+
+ if (!csr->dmc_payload)
+ goto out;
+
+ seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
+ CSR_VERSION_MINOR(csr->version));
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n",
+ intel_de_read(dev_priv, DMC_DEBUG3));
+ } else {
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT;
+ if (!IS_GEN9_LP(dev_priv))
+ dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ }
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n",
+ intel_de_read(dev_priv, dc5_reg));
+ if (dc6_reg.reg)
+ seq_printf(m, "DC5 -> DC6 count: %d\n",
+ intel_de_read(dev_priv, dc6_reg));
+
+out:
+ seq_printf(m, "program base: 0x%08x\n",
+ intel_de_read(dev_priv, CSR_PROGRAM(0)));
+ seq_printf(m, "ssp base: 0x%08x\n",
+ intel_de_read(dev_priv, CSR_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+ const struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ seq_putc(m, '\t');
+
+ seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_encoder_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+ encoder->base.base.id, encoder->base.name);
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_state *conn_state =
+ connector->state;
+
+ if (conn_state->best_encoder != &encoder->base)
+ continue;
+
+ seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+ const struct drm_display_mode *mode = panel->fixed_mode;
+
+ seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_hdcp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ bool hdcp_cap, hdcp2_cap;
+
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+ seq_puts(m, "\n");
+}
+
+static void intel_dp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+
+ seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+ seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
+ if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ intel_panel_info(m, &intel_connector->panel);
+
+ drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+ &intel_dp->aux);
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
+}
+
+static void intel_dp_mst_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp_mst_encoder *intel_mst =
+ enc_to_mst(intel_encoder);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+ intel_connector->port);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
+}
+
+static void intel_lvds_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ const struct drm_display_mode *mode;
+
+ seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(connector->status));
+
+ if (connector->status == connector_status_disconnected)
+ return;
+
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
+
+ if (!encoder)
+ return;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ intel_dp_mst_info(m, intel_connector);
+ else
+ intel_dp_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (encoder->type == INTEL_OUTPUT_HDMI ||
+ encoder->type == INTEL_OUTPUT_DDI)
+ intel_hdmi_info(m, intel_connector);
+ break;
+ default:
+ break;
+ }
+
+ seq_printf(m, "\tmodes:\n");
+ list_for_each_entry(mode, &connector->modes, head)
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static const char *plane_type(enum drm_plane_type type)
+{
+ switch (type) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ return "OVL";
+ case DRM_PLANE_TYPE_PRIMARY:
+ return "PRI";
+ case DRM_PLANE_TYPE_CURSOR:
+ return "CUR";
+ /*
+ * Deliberately omitting default: to generate compiler warnings
+ * when a new drm_plane_type gets added.
+ */
+ }
+
+ return "unknown";
+}
+
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
+{
+ /*
+ * According to doc only one DRM_MODE_ROTATE_ is allowed but this
+ * will print them all to visualize if the values are misused
+ */
+ snprintf(buf, bufsize,
+ "%s%s%s%s%s%s(0x%08x)",
+ (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
+ (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
+ (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
+ (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
+ (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
+ (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
+ rotation);
+}
+
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->uapi.fb;
+ struct drm_format_name_buf format_name;
+ struct drm_rect src, dst;
+ char rot_str[48];
+
+ src = drm_plane_state_src(&plane_state->uapi);
+ dst = drm_plane_state_dest(&plane_state->uapi);
+
+ if (fb)
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->uapi.rotation);
+
+ seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+ fb ? fb->width : 0, fb ? fb->height : 0,
+ DRM_RECT_FP_ARG(&src),
+ DRM_RECT_ARG(&dst),
+ rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_format_name_buf format_name;
+ char rot_str[48];
+
+ if (!fb)
+ return;
+
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->hw.rotation);
+
+ seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, format_name.str,
+ fb->width, fb->height,
+ yesno(plane_state->uapi.visible),
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst),
+ rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+ plane->base.base.id, plane->base.name,
+ plane_type(plane->base.type));
+ intel_plane_uapi_info(m, plane);
+ intel_plane_hw_info(m, plane);
+ }
+}
+
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int num_scalers = crtc->num_scalers;
+ int i;
+
+ /* Not all platformas have a scaler */
+ if (num_scalers) {
+ seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
+ num_scalers,
+ crtc_state->scaler_state.scaler_users,
+ crtc_state->scaler_state.scaler_id);
+
+ for (i = 0; i < num_scalers; i++) {
+ const struct intel_scaler *sc =
+ &crtc_state->scaler_state.scalers[i];
+
+ seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
+ i, yesno(sc->in_use), sc->mode);
+ }
+ seq_puts(m, "\n");
+ } else {
+ seq_puts(m, "\tNo scalers available on this platform\n");
+ }
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_encoder *encoder;
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->uapi.enable),
+ yesno(crtc_state->uapi.active),
+ DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+ if (crtc_state->hw.enable) {
+ seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->hw.active),
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+ seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+ intel_scaler_info(m, crtc);
+ }
+
+ for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask)
+ intel_encoder_info(m, crtc, encoder);
+
+ intel_plane_info(m, crtc);
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "CRTC info\n");
+ seq_printf(m, "---------\n");
+ for_each_intel_crtc(dev, crtc)
+ intel_crtc_info(m, crtc);
+
+ seq_printf(m, "\n");
+ seq_printf(m, "Connector info\n");
+ seq_printf(m, "--------------\n");
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
+ intel_connector_info(m, connector);
+ drm_connector_list_iter_end(&conn_iter);
+
+ drm_modeset_unlock_all(dev);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ int i;
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ dev_priv->dpll.ref_clks.nssc,
+ dev_priv->dpll.ref_clks.ssc);
+
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
+ pll->info->id);
+ seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
+ pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n",
+ pll->state.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
+ seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
+ seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
+ seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
+ pll->state.hw_state.mg_refclkin_ctl);
+ seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_coreclkctl1);
+ seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_hsclkctl);
+ seq_printf(m, " mg_pll_div0: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div0);
+ seq_printf(m, " mg_pll_div1: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div1);
+ seq_printf(m, " mg_pll_lf: 0x%08x\n",
+ pll->state.hw_state.mg_pll_lf);
+ seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
+ pll->state.hw_state.mg_pll_frac_lock);
+ seq_printf(m, " mg_pll_ssc: 0x%08x\n",
+ pll->state.hw_state.mg_pll_ssc);
+ seq_printf(m, " mg_pll_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_bias);
+ seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_tdc_coldst_bias);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int i915_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ yesno(dev_priv->ipc_enabled));
+ return 0;
+}
+
+static int i915_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (!HAS_IPC(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, i915_ipc_status_show, dev_priv);
+}
+
+static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ intel_wakeref_t wakeref;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ if (!dev_priv->ipc_enabled && enable)
+ drm_info(&dev_priv->drm,
+ "Enabling IPC: WM will be proper only after next commit\n");
+ dev_priv->wm.distrust_bios_wm = true;
+ dev_priv->ipc_enabled = enable;
+ intel_enable_ipc(dev_priv);
+ }
+
+ return len;
+}
+
+static const struct file_operations i915_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_ipc_status_write
+};
+
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct skl_ddb_entry *entry;
+ struct intel_crtc *crtc;
+
+ if (INTEL_GEN(dev_priv) < 9)
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
+ seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
+ entry->start, entry->end,
+ skl_ddb_entry_size(entry));
+ }
+
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
+ entry->end, skl_ddb_entry_size(entry));
+ }
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static void drrs_status_per_crtc(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_drrs *drrs = &dev_priv->drrs;
+ int vrefresh = 0;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->state->crtc != &intel_crtc->base)
+ continue;
+
+ seq_printf(m, "%s:\n", connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ seq_puts(m, "\n");
+
+ if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
+ struct intel_panel *panel;
+
+ mutex_lock(&drrs->mutex);
+ /* DRRS Supported */
+ seq_puts(m, "\tDRRS Supported: Yes\n");
+
+ /* disable_drrs() will make drrs->dp NULL */
+ if (!drrs->dp) {
+ seq_puts(m, "Idleness DRRS: Disabled\n");
+ if (dev_priv->psr.enabled)
+ seq_puts(m,
+ "\tAs PSR is enabled, DRRS is not enabled\n");
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+
+ panel = &drrs->dp->attached_connector->panel;
+ seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+ drrs->busy_frontbuffer_bits);
+
+ seq_puts(m, "\n\t\t");
+ if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+ seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+ vrefresh = panel->fixed_mode->vrefresh;
+ } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+ seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+ vrefresh = panel->downclock_mode->vrefresh;
+ } else {
+ seq_printf(m, "DRRS_State: Unknown(%d)\n",
+ drrs->refresh_rate_type);
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+ seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+ seq_puts(m, "\n\t\t");
+ mutex_unlock(&drrs->mutex);
+ } else {
+ /* DRRS not supported. Print the VBT parameter*/
+ seq_puts(m, "\tDRRS Supported : No");
+ }
+ seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *intel_crtc;
+ int active_crtc_cnt = 0;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_crtc(dev, intel_crtc) {
+ if (intel_crtc->base.state->active) {
+ active_crtc_cnt++;
+ seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
+
+ drrs_status_per_crtc(m, dev, intel_crtc);
+ }
+ }
+ drm_modeset_unlock_all(dev);
+
+ if (!active_crtc_cnt)
+ seq_puts(m, "No active crtc found\n");
+
+ return 0;
+}
+
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ intel_encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ intel_dig_port = enc_to_dig_port(intel_encoder);
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
+ drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static ssize_t i915_displayport_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *input_buffer;
+ int status = 0;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+ int val = 0;
+
+ dev = ((struct seq_file *)file->private_data)->private;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = memdup_user_nul(ubuf, len);
+ if (IS_ERR(input_buffer))
+ return PTR_ERR(input_buffer);
+
+ drm_dbg(&to_i915(dev)->drm,
+ "Copied %d bytes from user\n", (unsigned int)len);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0)
+ break;
+ drm_dbg(&to_i915(dev)->drm,
+ "Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ intel_dp->compliance.test_active = true;
+ else
+ intel_dp->compliance.test_active = false;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ kfree(input_buffer);
+ if (status < 0)
+ return status;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_active)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_displayport_test_active_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_EDID_READ)
+ seq_printf(m, "%lx",
+ intel_dp->compliance.test_data.edid);
+ else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_VIDEO_PATTERN) {
+ seq_printf(m, "hdisplay: %d\n",
+ intel_dp->compliance.test_data.hdisplay);
+ seq_printf(m, "vdisplay: %d\n",
+ intel_dp->compliance.test_data.vdisplay);
+ seq_printf(m, "bpc: %u\n",
+ intel_dp->compliance.test_data.bpc);
+ }
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ seq_printf(m, "%02lx", intel_dp->compliance.test_type);
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ int level;
+ int num_levels;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev_priv))
+ num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
+ else
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9/vlv/chv
+ */
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level], latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.pri_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.spr_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.cur_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, u16 wm[8])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ u16 new[8] = { 0 };
+ int num_levels;
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (IS_CHERRYVIEW(dev_priv))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev_priv))
+ num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
+ else
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
+ if (ret != num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(dev);
+
+ return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.cur_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ intel_synchronize_irq(dev_priv);
+ flush_work(&dev_priv->hotplug.dig_port_work);
+ flush_delayed_work(&dev_priv->hotplug.hotplug_work);
+
+ seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
+ seq_printf(m, "Detected: %s\n",
+ yesno(delayed_work_pending(&hotplug->reenable_work)));
+
+ return 0;
+}
+
+static ssize_t i915_hpd_storm_ctl_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ unsigned int new_threshold;
+ int i;
+ char *newline;
+ char tmp[16];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ if (strcmp(tmp, "reset") == 0)
+ new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ else if (kstrtouint(tmp, 10, &new_threshold) != 0)
+ return -EINVAL;
+
+ if (new_threshold > 0)
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting HPD storm detection threshold to %d\n",
+ new_threshold);
+ else
+ drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_storm_threshold = new_threshold;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
+}
+
+static const struct file_operations i915_hpd_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_storm_ctl_write
+};
+
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Enabled: %s\n",
+ yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+ return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_short_storm_ctl_show,
+ inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ char *newline;
+ char tmp[16];
+ int i;
+ bool new_state;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ /* Reset to the "default" state for this system */
+ if (strcmp(tmp, "reset") == 0)
+ new_state = !HAS_DP_MST(dev_priv);
+ else if (kstrtobool(tmp, &new_state) != 0)
+ return -EINVAL;
+
+ drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ new_state ? "En" : "Dis");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_short_storm_enabled = new_state;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_short_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_short_storm_ctl_write,
+};
+
+static int i915_drrs_ctl_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+
+ if (INTEL_GEN(dev_priv) < 7)
+ return -ENODEV;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct drm_connector_list_iter conn_iter;
+ struct intel_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_crtc_commit *commit;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->hw.active ||
+ !crtc_state->has_drrs)
+ goto out;
+
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ goto out;
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp;
+
+ if (!(crtc_state->uapi.connector_mask &
+ drm_connector_mask(connector)))
+ continue;
+
+ encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ drm_dbg(&dev_priv->drm,
+ "Manually %sabling DRRS. %llu\n",
+ val ? "en" : "dis", val);
+
+ intel_dp = enc_to_intel_dp(encoder);
+ if (val)
+ intel_edp_drrs_enable(intel_dp,
+ crtc_state);
+ else
+ intel_edp_drrs_disable(intel_dp,
+ crtc_state);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+out:
+ drm_modeset_unlock(&crtc->base.mutex);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
+
+static ssize_t
+i915_fifo_underrun_reset_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct drm_i915_private *dev_priv = filp->private_data;
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = &dev_priv->drm;
+ int ret;
+ bool reset;
+
+ ret = kstrtobool_from_user(ubuf, cnt, &reset);
+ if (ret)
+ return ret;
+
+ if (!reset)
+ return cnt;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ struct drm_crtc_commit *commit;
+ struct intel_crtc_state *crtc_state;
+
+ ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(intel_crtc->base.state);
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (!ret)
+ ret = wait_for_completion_interruptible(&commit->flip_done);
+ }
+
+ if (!ret && crtc_state->hw.active) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Re-arming FIFO underruns on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+
+ intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
+ }
+
+ drm_modeset_unlock(&intel_crtc->base.mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_fbc_reset_underrun(dev_priv);
+ if (ret)
+ return ret;
+
+ return cnt;
+}
+
+static const struct file_operations i915_fifo_underrun_reset_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = i915_fifo_underrun_reset_write,
+ .llseek = default_llseek,
+};
+
+static const struct drm_info_list intel_display_debugfs_list[] = {
+ {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_ips_status", i915_ips_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
+ {"i915_opregion", i915_opregion, 0},
+ {"i915_vbt", i915_vbt, 0},
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ {"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_power_domain_info", i915_power_domain_info, 0},
+ {"i915_dmc_info", i915_dmc_info, 0},
+ {"i915_display_info", i915_display_info, 0},
+ {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+ {"i915_dp_mst_info", i915_dp_mst_info, 0},
+ {"i915_ddb_info", i915_ddb_info, 0},
+ {"i915_drrs_status", i915_drrs_status, 0},
+};
+
+static const struct {
+ const char *name;
+ const struct file_operations *fops;
+} intel_display_debugfs_files[] = {
+ {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
+ {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+ {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+ {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
+ {"i915_fbc_false_color", &i915_fbc_false_color_fops},
+ {"i915_dp_test_data", &i915_displayport_test_data_fops},
+ {"i915_dp_test_type", &i915_displayport_test_type_fops},
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+ {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
+ {"i915_ipc_status", &i915_ipc_status_fops},
+ {"i915_drrs_ctl", &i915_drrs_ctl_fops},
+ {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
+};
+
+int intel_display_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
+ debugfs_create_file(intel_display_debugfs_files[i].name,
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ to_i915(minor->dev),
+ intel_display_debugfs_files[i].fops);
+ }
+
+ return drm_debugfs_create_files(intel_display_debugfs_list,
+ ARRAY_SIZE(intel_display_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->backlight_off_delay);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_panel);
+
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ /* HDCP is supported by connector */
+ if (!intel_connector->hdcp.shim)
+ return -EINVAL;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name,
+ connector->base.id);
+ intel_hdcp_info(m, intel_connector);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ struct drm_modeset_acquire_ctx ctx;
+ struct intel_crtc_state *crtc_state = NULL;
+ int ret = 0;
+ bool try_again = false;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+ do {
+ try_again = false;
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ &ctx);
+ if (ret) {
+ if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+ try_again = true;
+ continue;
+ }
+ break;
+ }
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ break;
+ }
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret) {
+ try_again = true;
+ continue;
+ }
+ break;
+ } else if (ret) {
+ break;
+ }
+ intel_dp = intel_attached_dp(to_intel_connector(connector));
+ crtc_state = to_intel_crtc_state(crtc->state);
+ seq_printf(m, "DSC_Enabled: %s\n",
+ yesno(crtc_state->dsc.compression_enable));
+ seq_printf(m, "DSC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ seq_printf(m, "Force_DSC_Enable: %s\n",
+ yesno(intel_dp->force_dsc_en));
+ if (!intel_dp_is_edp(intel_dp))
+ seq_printf(m, "FEC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+ } while (try_again);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ bool dsc_enable = false;
+ int ret;
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (len == 0)
+ return 0;
+
+ drm_dbg(&i915->drm,
+ "Copied %zu bytes from user to force DSC\n", len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
+ (dsc_enable) ? "true" : "false");
+ intel_dp->force_dsc_en = dsc_enable;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fec_support_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fec_support_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fec_support_write
+};
+
+/**
+ * intel_connector_debugfs_add - add i915 specific connector debugfs files
+ * @connector: pointer to a registered drm_connector
+ *
+ * Cleanup will be done by drm_connector_unregister() through a call to
+ * drm_debugfs_connector_remove().
+ *
+ * Returns 0 on success, negative error codes on error.
+ */
+int intel_connector_debugfs_add(struct drm_connector *connector)
+{
+ struct dentry *root = connector->debugfs_entry;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
+ /* The connector must have been registered beforehands. */
+ if (!root)
+ return -ENODEV;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ connector, &i915_hdcp_sink_capability_fops);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+ debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+ connector, &i915_dsc_fec_support_fops);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
new file mode 100644
index 000000000000..a3bea1ce04c2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DEBUGFS_H__
+#define __INTEL_DISPLAY_DEBUGFS_H__
+
+struct drm_connector;
+struct drm_i915_private;
+
+#ifdef CONFIG_DEBUG_FS
+int intel_display_debugfs_register(struct drm_i915_private *i915);
+int intel_connector_debugfs_add(struct drm_connector *connector);
+#else
+static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+#endif
+
+#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 46c40db992dd..246e406bb385 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -15,6 +15,7 @@
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
+#include "intel_pm.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
@@ -159,7 +160,7 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
power_well->desc->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
@@ -167,7 +168,7 @@ static void intel_power_well_enable(struct drm_i915_private *dev_priv,
static void intel_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
power_well->hw_enabled = false;
power_well->desc->ops->disable(dev_priv, power_well);
}
@@ -182,8 +183,9 @@ static void intel_power_well_get(struct drm_i915_private *dev_priv,
static void intel_power_well_put(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN(!power_well->count, "Use count on power well %s is already zero",
- power_well->desc->name);
+ drm_WARN(&dev_priv->drm, !power_well->count,
+ "Use count on power well %s is already zero",
+ power_well->desc->name);
if (!--power_well->count)
intel_power_well_disable(dev_priv, power_well);
@@ -289,11 +291,11 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
if (intel_de_wait_for_set(dev_priv, regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
- DRM_DEBUG_KMS("%s power well enable timeout\n",
- power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ power_well->desc->name);
/* An AUX timeout is expected if the TBT DP tunnel is down. */
- WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+ drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
}
}
@@ -304,11 +306,11 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
u32 ret;
- ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
- ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+ ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
if (regs->kvmr.reg)
- ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
- ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
+ ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
return ret;
}
@@ -330,23 +332,25 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(I915_READ(regs->driver) &
+ wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
(reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
if (disabled)
return;
- DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
- power_well->desc->name,
- !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+ drm_dbg_kms(&dev_priv->drm,
+ "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ power_well->desc->name,
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -372,17 +376,18 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well);
/* Display WA #1178: cnl */
if (IS_CANNONLAKE(dev_priv) &&
pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
- val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
+ val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
+ intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
}
if (wait_fuses)
@@ -403,8 +408,9 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_power_well_pre_disable(dev_priv,
power_well->desc->hsw.irq_pipe_mask);
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -419,14 +425,16 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
if (INTEL_GEN(dev_priv) < 12) {
- val = I915_READ(ICL_PORT_CL_DW12(phy));
- I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val | ICL_LANE_ENABLE_AUX);
}
hsw_wait_for_power_well_enable(dev_priv, power_well);
@@ -434,9 +442,9 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
!intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+ val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+ intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
}
}
@@ -449,13 +457,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = I915_READ(ICL_PORT_CL_DW12(phy));
- I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val & ~ICL_LANE_ENABLE_AUX);
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -485,7 +495,7 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
int refs = hweight64(power_well->desc->domains &
async_put_domains_mask(&dev_priv->power_domains));
- WARN_ON(refs > power_well->count);
+ drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
return refs;
}
@@ -515,7 +525,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
continue;
dig_port = enc_to_dig_port(encoder);
- if (WARN_ON(!dig_port))
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
continue;
if (dig_port->aux_ch != aux_ch) {
@@ -526,10 +536,10 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
break;
}
- if (WARN_ON(!dig_port))
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
- WARN_ON(!intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
}
#else
@@ -552,11 +562,11 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
icl_tc_port_assert_ref_held(dev_priv, power_well);
- val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+ val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
if (power_well->desc->hsw.is_tc_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
- I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+ intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
@@ -564,11 +574,13 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
DKL_CMN_UC_DW27_UC_HEALTH, 1))
- DRM_WARN("Timeout waiting TC uC health\n");
+ drm_warn(&dev_priv->drm,
+ "Timeout waiting TC uC health\n");
}
}
@@ -596,7 +608,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
HSW_PWR_WELL_CTL_STATE(pw_idx);
u32 val;
- val = I915_READ(regs->driver);
+ val = intel_de_read(dev_priv, regs->driver);
/*
* On GEN9 big core due to a DMC bug the driver's request bits for PW1
@@ -606,22 +618,26 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
*/
if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= I915_READ(regs->bios);
+ val |= intel_de_read(dev_priv, regs->bios);
return (val & mask) == mask;
}
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
- HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
- "Power well 2 on.\n");
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
/*
* TODO: check for the following to verify the conditions to enter DC9
@@ -634,10 +650,12 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
/*
* TODO: check for the following to verify DC9 state was indeed
@@ -655,7 +673,7 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
int rereads = 0;
u32 v;
- I915_WRITE(DC_STATE_EN, state);
+ intel_de_write(dev_priv, DC_STATE_EN, state);
/* It has been observed that disabling the dc6 state sometimes
* doesn't stick and dmc keeps returning old value. Make sure
@@ -663,10 +681,10 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
* we are confident that state is exactly what we want.
*/
do {
- v = I915_READ(DC_STATE_EN);
+ v = intel_de_read(dev_priv, DC_STATE_EN);
if (v != state) {
- I915_WRITE(DC_STATE_EN, state);
+ intel_de_write(dev_priv, DC_STATE_EN, state);
rewrites++;
rereads = 0;
} else if (rereads++ > 5) {
@@ -676,13 +694,15 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
} while (rewrites < 100);
if (v != state)
- DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
- state, v);
+ drm_err(&dev_priv->drm,
+ "Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
/* Most of the times we need one retry, avoid spam */
if (rewrites > 1)
- DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
- state, rewrites);
+ drm_dbg_kms(&dev_priv->drm,
+ "Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
}
static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
@@ -708,10 +728,11 @@ static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+ val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
- DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
- dev_priv->csr.dc_state, val);
+ drm_dbg_kms(&dev_priv->drm,
+ "Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->csr.dc_state, val);
dev_priv->csr.dc_state = val;
}
@@ -743,18 +764,19 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
u32 val;
u32 mask;
- if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ state & ~dev_priv->csr.allowed_dc_mask))
state &= dev_priv->csr.allowed_dc_mask;
- val = I915_READ(DC_STATE_EN);
+ val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
- DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
- val & mask, state);
+ drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
+ val & mask, state);
/* Check if DMC is ignoring our DC state requests */
if ((val & mask) != dev_priv->csr.dc_state)
- DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->csr.dc_state, val & mask);
+ drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
val &= ~mask;
val |= state;
@@ -791,7 +813,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_KMS("Enabling DC3CO\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}
@@ -799,10 +821,10 @@ static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Disabling DC3CO\n");
- val = I915_READ(DC_STATE_EN);
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
+ val = intel_de_read(dev_priv, DC_STATE_EN);
val &= ~DC_STATE_DC3CO_STATUS;
- I915_WRITE(DC_STATE_EN, val);
+ intel_de_write(dev_priv, DC_STATE_EN, val);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* Delay of 200us DC3CO Exit time B.Spec 49196
@@ -814,7 +836,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
- DRM_DEBUG_KMS("Enabling DC9\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
/*
* Power sequencer reset is not needed on
* platforms with South Display Engine on PCH,
@@ -829,7 +851,7 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc9(dev_priv);
- DRM_DEBUG_KMS("Disabling DC9\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -838,10 +860,13 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
- "CSR program storage start is NULL\n");
- WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
- WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ !intel_de_read(dev_priv, CSR_PROGRAM(0)),
+ "CSR program storage start is NULL\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
+ "CSR SSP Base Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
+ "CSR HTP Not fine\n");
}
static struct i915_power_well *
@@ -861,7 +886,9 @@ lookup_power_well(struct drm_i915_private *dev_priv,
* the first power well and hope the WARN gets reported so we can fix
* our driver.
*/
- WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+ drm_WARN(&dev_priv->drm, 1,
+ "Power well %d not defined for this platform\n",
+ power_well_id);
return &dev_priv->power_domains.power_wells[0];
}
@@ -884,7 +911,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
- if (WARN_ON(!power_well))
+ if (drm_WARN_ON(&dev_priv->drm, !power_well))
goto unlock;
state = sanitize_target_dc_state(dev_priv, state);
@@ -912,13 +939,22 @@ unlock:
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
- bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
- SKL_DISP_PW_2);
+ enum i915_power_well_id high_pg;
- WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+ /* Power wells at this level and above must be disabled for DC5 entry */
+ if (INTEL_GEN(dev_priv) >= 12)
+ high_pg = TGL_DISP_PW_3;
+ else
+ high_pg = SKL_DISP_PW_2;
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
- "DC5 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_display_power_well_is_enabled(dev_priv, high_pg),
+ "Power wells above platform's DC5 limit still enabled.\n");
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
assert_csr_loaded(dev_priv);
@@ -928,22 +964,25 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
- DRM_DEBUG_KMS("Enabling DC5\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
/* Wa Display #1183: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
assert_csr_loaded(dev_priv);
}
@@ -952,12 +991,12 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
- DRM_DEBUG_KMS("Enabling DC6\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
/* Wa Display #1183: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -968,15 +1007,15 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = I915_READ(regs->bios);
+ u32 bios_req = intel_de_read(dev_priv, regs->bios);
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
- u32 drv_req = I915_READ(regs->driver);
+ u32 drv_req = intel_de_read(dev_priv, regs->driver);
if (!(drv_req & mask))
- I915_WRITE(regs->driver, drv_req | mask);
- I915_WRITE(regs->bios, bios_req & ~mask);
+ intel_de_write(dev_priv, regs->driver, drv_req | mask);
+ intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
}
}
@@ -1022,22 +1061,25 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
- (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
+ return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
- u32 tmp = I915_READ(DBUF_CTL);
+ u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
- WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
- (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
- "Unexpected DBuf power power state (0x%08x)\n", tmp);
+ drm_WARN(&dev_priv->drm,
+ hw_enabled_dbuf_slices != enabled_dbuf_slices,
+ "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+ hw_enabled_dbuf_slices,
+ enabled_dbuf_slices);
}
static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = {};
+ struct intel_cdclk_config cdclk_config = {};
if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
@@ -1046,9 +1088,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+ dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
- WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
+ drm_WARN_ON(&dev_priv->drm,
+ intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ &cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1108,9 +1152,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_A);
- if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_B);
}
@@ -1124,8 +1168,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+ intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
}
static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1163,9 +1207,10 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
#undef COND
@@ -1204,8 +1249,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
- state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
if (state == ctrl)
enabled = true;
@@ -1214,7 +1259,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
+ drm_WARN_ON(&dev_priv->drm, ctrl != state);
vlv_punit_put(dev_priv);
@@ -1231,21 +1276,22 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
/*
* Disable trickle feed and enable pnd deadline calculation
*/
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- I915_WRITE(CBR1_VLV, 0);
-
- WARN_ON(dev_priv->rawclk_freq == 0);
+ intel_de_write(dev_priv, MI_ARB_VLV,
+ MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ intel_de_write(dev_priv, CBR1_VLV, 0);
- I915_WRITE(RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
+ drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+ 1000));
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
@@ -1262,13 +1308,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
for_each_pipe(dev_priv, pipe) {
- u32 val = I915_READ(DPLL(pipe));
+ u32 val = intel_de_read(dev_priv, DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
+ intel_de_write(dev_priv, DPLL(pipe), val);
}
vlv_init_display_clock_gating(dev_priv);
@@ -1348,7 +1394,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
}
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1360,7 +1407,8 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
vlv_set_power_well(dev_priv, power_well, false);
}
@@ -1422,7 +1470,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
*/
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
- (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
if (BITS_SET(phy_control,
@@ -1467,9 +1515,10 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
*/
if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
phy_status_mask, phy_status, 10))
- DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
+ drm_err(&dev_priv->drm,
+ "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
}
#undef BITS_SET
@@ -1481,8 +1530,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
u32 tmp;
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
pipe = PIPE_A;
@@ -1499,7 +1549,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
/* Poll for phypwrgood signal */
if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy), 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
+ drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
+ phy);
vlv_dpio_get(dev_priv);
@@ -1527,10 +1578,12 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_put(dev_priv);
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
}
@@ -1540,8 +1593,9 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
phy = DPIO_PHY0;
@@ -1553,12 +1607,14 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
}
dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
- DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
/* PHY is fully reset now, so we can enable the PHY state asserts */
dev_priv->chv_phy_assert[phy] = true;
@@ -1621,11 +1677,13 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
- WARN(actual != expected,
- "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
- !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
- !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
- reg, val);
+ drm_WARN(&dev_priv->drm, actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN),
+ !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN),
+ !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
}
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
@@ -1646,10 +1704,12 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
else
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1677,10 +1737,12 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
else
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1703,7 +1765,8 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
/*
@@ -1711,7 +1774,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
+ drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
vlv_punit_put(dev_priv);
@@ -1742,9 +1805,10 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
#undef COND
@@ -1752,6 +1816,13 @@ out:
vlv_punit_put(dev_priv);
}
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+}
+
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -1981,12 +2052,13 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
power_domains = &dev_priv->power_domains;
- WARN(!power_domains->domain_use_count[domain],
- "Use count on domain %s is already zero\n",
- name);
- WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
- "Async disabling of domain %s is pending\n",
- name);
+ drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+ "Use count on domain %s is already zero\n",
+ name);
+ drm_WARN(&dev_priv->drm,
+ async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ "Async disabling of domain %s is pending\n",
+ name);
power_domains->domain_use_count[domain]--;
@@ -2131,7 +2203,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
goto out_verify;
}
- WARN_ON(power_domains->domain_use_count[domain] != 1);
+ drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
@@ -2206,7 +2278,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
verify_async_put_domains_state(power_domains);
- WARN_ON(power_domains->async_put_wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -2674,7 +2746,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- TGL_PW_2_POWER_DOMAINS | \
+ TGL_PW_3_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -2734,7 +2806,7 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
};
static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
+ .sync_hw = chv_pipe_power_well_sync_hw,
.enable = chv_pipe_power_well_enable,
.disable = chv_pipe_power_well_disable,
.is_enabled = chv_pipe_power_well_enabled,
@@ -3870,7 +3942,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "power well 3",
.domains = TGL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = TGL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4204,11 +4276,13 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
} else if (enable_dc == -1) {
requested_dc = max_dc;
} else if (enable_dc > max_dc && enable_dc <= 4) {
- DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
- enable_dc, max_dc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adjusting requested max DC state (%d->%d)\n",
+ enable_dc, max_dc);
requested_dc = max_dc;
} else {
- DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+ drm_err(&dev_priv->drm,
+ "Unexpected value for enable_dc (%d)\n", enable_dc);
requested_dc = max_dc;
}
@@ -4227,7 +4301,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
break;
}
- DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+ drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
return mask;
}
@@ -4371,16 +4445,16 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
{
u32 val, status;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
udelay(10);
- status = I915_READ(reg) & DBUF_POWER_STATE;
+ status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
if ((enable && !status) || (!enable && status)) {
- DRM_ERROR("DBus power %s timeout!\n",
- enable ? "enable" : "disable");
+ drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
+ enable ? "enable" : "disable");
return false;
}
return true;
@@ -4388,80 +4462,60 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+ icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
-}
-
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 11)
- return 1;
- return 2;
+ icl_dbuf_slices_update(dev_priv, 0);
}
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- bool ret;
+ int i;
+ int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
- if (req_slices > intel_dbuf_max_slices(dev_priv)) {
- DRM_ERROR("Invalid number of dbuf slices requested\n");
- return;
- }
+ drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
+ "Invalid number of dbuf slices requested\n");
- if (req_slices == hw_enabled_slices || req_slices == 0)
- return;
+ DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
- if (req_slices > hw_enabled_slices)
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
- else
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+ /*
+ * Might be running this in parallel to gen9_dc_off_power_well_enable
+ * being called from intel_dp_detect for instance,
+ * which causes assertion triggered by race condition,
+ * as gen9_assert_dbuf_enabled might preempt this when registers
+ * were already updated, while dev_priv was not.
+ */
+ mutex_lock(&power_domains->lock);
- if (ret)
- dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+ for (i = 0; i < max_slices; i++) {
+ intel_dbuf_slice_set(dev_priv,
+ DBUF_CTL_S(i),
+ (req_slices & BIT(i)) != 0);
+ }
+
+ dev_priv->enabled_dbuf_slices_mask = req_slices;
+
+ mutex_unlock(&power_domains->lock);
}
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
- else
- /*
- * FIXME: for now pretend that we only have 1 slice, see
- * intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+ skl_ddb_get_hw_state(dev_priv);
+ /*
+ * Just power up at least 1 slice, we will
+ * figure out later which slices we have and what we need.
+ */
+ icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
+ BIT(DBUF_S1));
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power disable timeout!\n");
- else
- /*
- * FIXME: for now pretend that the first slice is always
- * enabled, see intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+ icl_dbuf_slices_update(dev_priv, 0);
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
@@ -4472,19 +4526,21 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
MBUS_ABOX_B_CREDIT_MASK |
MBUS_ABOX_BW_CREDIT_MASK;
-
- val = I915_READ(MBUS_ABOX_CTL);
- val &= ~mask;
- val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
+ val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) |
MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1);
- I915_WRITE(MBUS_ABOX_CTL, val);
+
+ intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
+ intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
+ }
}
static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
{
- u32 val = I915_READ(LCPLL_CTL);
+ u32 val = intel_de_read(dev_priv, LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@@ -4493,13 +4549,13 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
*/
if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
+ drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
+ drm_err(&dev_priv->drm, "LCPLL is disabled\n");
if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
- DRM_ERROR("LCPLL not using non-SSC reference\n");
+ drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
}
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -4511,26 +4567,26 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
+ I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
"Display power well on\n");
- I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
"SPLL enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
"WRPLL1 enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
"WRPLL2 enabled\n");
- I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
+ I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
"Panel power on\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev_priv))
- I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
- I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
- I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
"PCH GTC enabled\n");
/*
@@ -4545,9 +4601,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv))
- return I915_READ(D_COMP_HSW);
+ return intel_de_read(dev_priv, D_COMP_HSW);
else
- return I915_READ(D_COMP_BDW);
+ return intel_de_read(dev_priv, D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
@@ -4555,10 +4611,11 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
if (IS_HASWELL(dev_priv)) {
if (sandybridge_pcode_write(dev_priv,
GEN6_PCODE_WRITE_D_COMP, val))
- DRM_DEBUG_KMS("Failed to write to D_COMP\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to write to D_COMP\n");
} else {
- I915_WRITE(D_COMP_BDW, val);
- POSTING_READ(D_COMP_BDW);
+ intel_de_write(dev_priv, D_COMP_BDW, val);
+ intel_de_posting_read(dev_priv, D_COMP_BDW);
}
}
@@ -4577,25 +4634,25 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
assert_can_disable_lcpll(dev_priv);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us(I915_READ(LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
- DRM_ERROR("Switching to FCLK failed\n");
+ drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
}
val |= LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
- DRM_ERROR("LCPLL still locked\n");
+ drm_err(&dev_priv->drm, "LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
@@ -4604,13 +4661,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
if (wait_for((hsw_read_dcomp(dev_priv) &
D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
- DRM_ERROR("D_COMP RCOMP still in progress\n");
+ drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val |= LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
}
}
@@ -4622,7 +4679,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
@@ -4636,8 +4693,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
}
val = hsw_read_dcomp(dev_priv);
@@ -4645,27 +4702,28 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
- DRM_ERROR("LCPLL not locked yet\n");
+ drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us((I915_READ(LCPLL_CTL) &
+ if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
+ drm_err(&dev_priv->drm,
+ "Switching back to LCPLL failed\n");
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
}
/*
@@ -4695,12 +4753,12 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Enabling package C8+\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
lpt_disable_clkout_dp(dev_priv);
@@ -4711,15 +4769,15 @@ static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Disabling package C8+\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
}
@@ -4737,14 +4795,14 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (enable)
val |= reset_bits;
else
val &= ~reset_bits;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
@@ -4769,7 +4827,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -4786,7 +4844,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
@@ -4830,7 +4888,7 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -4847,7 +4905,7 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -4889,7 +4947,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
/* 5. Enable CD clock */
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
@@ -4911,7 +4969,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -4970,25 +5028,23 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
break;
if (table[i].page_mask == 0) {
- DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
- I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
- I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+ drm_dbg(&dev_priv->drm,
+ "Unknown memory configuration; disabling address buddy logic.\n");
+ intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
+ intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
} else {
- u32 val;
-
- I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
- I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+ intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
+ table[i].page_mask);
+ intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
+ table[i].page_mask);
/* Wa_22010178259:tgl */
- val = I915_READ(BW_BUDDY1_CTL);
- val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
- val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
- I915_WRITE(BW_BUDDY1_CTL, val);
-
- val = I915_READ(BW_BUDDY2_CTL);
- val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
- val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
- I915_WRITE(BW_BUDDY2_CTL, val);
+ intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
+ intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
}
}
@@ -5016,7 +5072,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
/* 4. Enable CDCLK. */
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
/* 5. Enable DBUF. */
icl_dbuf_enable(dev_priv);
@@ -5045,7 +5101,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
icl_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -5090,7 +5146,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* current lane status.
*/
if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- u32 status = I915_READ(DPLL(PIPE_A));
+ u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
@@ -5121,7 +5177,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
}
if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- u32 status = I915_READ(DPIO_PHY_STATUS);
+ u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@@ -5142,10 +5198,10 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
dev_priv->chv_phy_assert[DPIO_PHY1] = true;
}
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
- dev_priv->chv_phy_control);
+ /* Defer application of initial phy_control to enabling the powerwell */
}
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
@@ -5158,10 +5214,10 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
/* If the display might be already active skip this */
if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
- I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
return;
- DRM_DEBUG_KMS("toggling display PHY side reset\n");
+ drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
disp2d->desc->ops->enable(dev_priv, disp2d);
@@ -5189,8 +5245,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
{
- WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
- "VED not power gated\n");
+ drm_WARN(&dev_priv->drm,
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
}
static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
@@ -5201,9 +5258,9 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
{}
};
- WARN(!pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
- "ISP not power gated\n");
+ drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
}
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
@@ -5230,9 +5287,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
- /* Must happen before power domain init on VLV/CHV */
- intel_update_rawclk(i915);
-
if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
@@ -5336,7 +5390,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- WARN_ON(power_domains->wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
@@ -5418,7 +5472,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- WARN_ON(power_domains->wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
@@ -5436,13 +5490,13 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
- DRM_DEBUG_DRIVER("%-25s %d\n",
- power_well->desc->name, power_well->count);
+ drm_dbg(&i915->drm, "%-25s %d\n",
+ power_well->desc->name, power_well->count);
for_each_power_domain(domain, power_well->desc->domains)
- DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg(&i915->drm, " %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
}
@@ -5475,19 +5529,21 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
enabled = power_well->desc->ops->is_enabled(i915, power_well);
if ((power_well->count || power_well->desc->always_on) !=
enabled)
- DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
- power_well->desc->name,
- power_well->count, enabled);
+ drm_err(&i915->drm,
+ "power well %s state mismatch (refcount %d/enabled %d)",
+ power_well->desc->name,
+ power_well->count, enabled);
domains_count = 0;
for_each_power_domain(domain, power_well->desc->domains)
domains_count += power_domains->domain_use_count[domain];
if (power_well->count != domains_count) {
- DRM_ERROR("power well %s refcount/domain refcount mismatch "
- "(refcount %d/domains refcount %d)\n",
- power_well->desc->name, power_well->count,
- domains_count);
+ drm_err(&i915->drm,
+ "power well %s refcount/domain refcount mismatch "
+ "(refcount %d/domains refcount %d)\n",
+ power_well->desc->name, power_well->count,
+ domains_count);
dump_domain_info = true;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2608a65af7fa..da64a5edae7a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -100,6 +100,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
+ TGL_DISP_PW_3,
SKL_DISP_DC_OFF,
};
@@ -307,6 +308,11 @@ intel_display_power_put_async(struct drm_i915_private *i915,
}
#endif
+enum dbuf_slice {
+ DBUF_S1,
+ DBUF_S2,
+};
+
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 888ea8a170d1..5e00e611f077 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -39,13 +39,14 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/i915_drm.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
#include "i915_drv.h"
+#include "intel_de.h"
struct drm_printer;
+struct __intel_global_objs_state;
/*
* Display related stuff
@@ -139,6 +140,9 @@ struct intel_encoder {
int (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
+ int (*compute_config_late)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
void (*update_prepare)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
@@ -214,6 +218,9 @@ struct intel_panel {
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
+ /* DPCD backlight */
+ u8 pwmgen_bit_count;
+
struct backlight_device *device;
/* Connector and platform specific backlight functions */
@@ -458,25 +465,8 @@ struct intel_atomic_state {
intel_wakeref_t wakeref;
- struct {
- /*
- * Logical state of cdclk (used for all scaling, watermark,
- * etc. calculations and checks). This is computed as if all
- * enabled crtcs were active.
- */
- struct intel_cdclk_state logical;
-
- /*
- * Actual state of cdclk, can be different from the logical
- * state only when all crtc's are DPMS off.
- */
- struct intel_cdclk_state actual;
-
- int force_min_cdclk;
- bool force_min_cdclk_changed;
- /* pipe to which cd2x update is synchronized */
- enum pipe pipe;
- } cdclk;
+ struct __intel_global_objs_state *global_objs;
+ int num_global_objs;
bool dpll_set, modeset;
@@ -491,10 +481,6 @@ struct intel_atomic_state {
u8 active_pipe_changes;
u8 active_pipes;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -508,14 +494,11 @@ struct intel_atomic_state {
/*
* active_pipes
- * min_cdclk[]
- * min_voltage_level[]
- * cdclk.*
*/
bool global_state_changed;
- /* Gen9+ only */
- struct skl_ddb_values wm_results;
+ /* Number of enabled DBuf slices */
+ u8 enabled_dbuf_slices_mask;
struct i915_sw_fence commit_ready;
@@ -611,6 +594,7 @@ struct intel_plane_state {
struct intel_initial_plane_config {
struct intel_framebuffer *fb;
+ struct i915_vma *vma;
unsigned int tiling;
int size;
u32 base;
@@ -657,15 +641,30 @@ struct intel_crtc_scaler_state {
/* Flag to use the scanline counter instead of the pixel counter */
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+struct intel_wm_level {
+ bool enable;
+ u32 pri_val;
+ u32 spr_val;
+ u32 cur_val;
+ u32 fbc_val;
+};
+
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- u32 linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
bool sprites_scaled;
};
+struct skl_wm_level {
+ u16 min_ddb_alloc;
+ u16 plane_res_b;
+ u8 plane_res_l;
+ bool plane_en;
+ bool ignore_lines;
+};
+
struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
@@ -675,7 +674,6 @@ struct skl_plane_wm {
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
- u32 linetime;
};
enum vlv_wm_level {
@@ -1046,6 +1044,10 @@ struct intel_crtc_state {
struct drm_dsc_config config;
} dsc;
+ /* HSW+ linetime watermarks */
+ u16 linetime;
+ u16 ips_linetime;
+
/* Forward Error correction State */
bool fec_enable;
@@ -1059,6 +1061,32 @@ struct intel_crtc_state {
enum transcoder mst_master_transcoder;
};
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ int skipped;
+ enum intel_pipe_crc_source source;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -1102,6 +1130,10 @@ struct intel_crtc {
/* per pipe DSB related info */
struct intel_dsb dsb;
+
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc;
+#endif
};
struct intel_plane {
@@ -1181,8 +1213,6 @@ struct intel_hdmi {
};
struct intel_dp_mst_encoder;
-#define DP_MAX_DOWNSTREAM_PORTS 0x10
-
/*
* enum link_m_n_set:
* When platform provides two set of M_N registers for dp, we can
@@ -1250,6 +1280,7 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
+ u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
@@ -1422,8 +1453,17 @@ vlv_pipe_to_channel(enum pipe pipe)
}
static inline struct intel_crtc *
+intel_get_first_crtc(struct drm_i915_private *dev_priv)
+{
+ return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0));
+}
+
+static inline struct intel_crtc *
intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */
+ drm_WARN_ON(&dev_priv->drm,
+ !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe)));
return dev_priv->pipe_to_crtc_mapping[pipe];
}
@@ -1469,7 +1509,7 @@ enc_to_dig_port(struct intel_encoder *encoder)
}
static inline struct intel_digital_port *
-conn_to_dig_port(struct intel_connector *connector)
+intel_attached_dig_port(struct intel_connector *connector)
{
return enc_to_dig_port(intel_attached_encoder(connector));
}
@@ -1486,6 +1526,11 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
return &enc_to_dig_port(encoder)->dp;
}
+static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector)
+{
+ return enc_to_intel_dp(intel_attached_encoder(connector));
+}
+
static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
{
switch (encoder->type) {
@@ -1608,11 +1653,15 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_DP_MST) |
(1 << INTEL_OUTPUT_EDP));
}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(&dev_priv->drm, pipe);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_wait_one_vblank(&crtc->base);
}
+
static inline void
intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 208457005a11..804b1d966f66 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -40,7 +40,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -49,6 +48,7 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -146,11 +146,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
-{
- return enc_to_intel_dp(intel_attached_encoder(connector));
-}
-
static void intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -272,7 +267,7 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
/* Low voltage SKUs are limited to max of 5.4G */
if (voltage == VOLTAGE_INFO_0_85V)
@@ -323,14 +318,14 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
162000, 270000
};
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[dig_port->base.port];
const int *source_rates;
- int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
+ int size, max_rate = 0, vbt_max_rate;
/* This should only be done once */
- WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->source_rates || intel_dp->num_source_rates);
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
@@ -354,6 +349,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(g4x_rates);
}
+ vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
if (max_rate && vbt_max_rate)
max_rate = min(max_rate, vbt_max_rate);
else if (vbt_max_rate)
@@ -519,12 +515,13 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
*/
bits_per_pixel = (link_clock * lane_count * 8) /
intel_dp_mode_to_fec_clock(mode_clock);
- DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+ drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
mode_hdisplay;
- DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+ drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
+ max_bpp_small_joiner_ram);
/*
* Greatest allowed DSC BPP = MIN (output BPP from available Link BW
@@ -534,8 +531,8 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
/* Error out if the max bpp is less than smallest allowed valid bpp */
if (bits_per_pixel < valid_dsc_bpp[0]) {
- DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
- bits_per_pixel, valid_dsc_bpp[0]);
+ drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
return 0;
}
@@ -760,20 +757,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
- if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name))
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name))
return;
- DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
@@ -783,7 +782,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
else
DP |= DP_PIPE_SEL(pipe);
- pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+ pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
/*
* The DPLL for the pipe must be enabled for this to work.
@@ -795,8 +794,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
- DRM_ERROR("Failed to force on pll for pipe %c!\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm,
+ "Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
return;
}
}
@@ -807,14 +807,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
- I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
- I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
if (!pll_enabled) {
vlv_force_pll_off(dev_priv, pipe);
@@ -837,13 +837,16 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe !=
+ intel_dp->pps_pipe);
if (intel_dp->pps_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->pps_pipe);
} else {
- WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps_pipe != INVALID_PIPE);
if (intel_dp->active_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->active_pipe);
@@ -866,10 +869,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe != intel_dp->pps_pipe);
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
@@ -880,16 +883,17 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (WARN_ON(pipe == INVALID_PIPE))
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
pipe = PIPE_A;
vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
- DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe),
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe),
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -913,7 +917,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps_reset)
return backlight_controller;
@@ -935,13 +939,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- return I915_READ(PP_STATUS(pipe)) & PP_ON;
+ return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
}
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+ return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
}
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
@@ -958,7 +962,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe;
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
+ u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel != PANEL_PORT_SELECT_VLV(port))
@@ -997,16 +1001,18 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "no initial power sequencer for [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return;
}
- DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
- pipe_name(intel_dp->pps_pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@ -1016,8 +1022,10 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !(IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_GEN9_LP(dev_priv))))
return;
/*
@@ -1033,7 +1041,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
@@ -1119,12 +1128,13 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
pp_ctrl_reg = PP_CONTROL(pipe);
pp_div_reg = PP_DIVISOR(pipe);
- pp_div = I915_READ(pp_div_reg);
+ pp_div = intel_de_read(dev_priv, pp_div_reg);
pp_div &= PP_REFERENCE_DIVIDER_MASK;
/* 0x1F write to PP_DIV_REG sets max cycle delay */
- I915_WRITE(pp_div_reg, pp_div | 0x1F);
- I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
+ intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
+ intel_de_write(dev_priv, pp_ctrl_reg,
+ PANEL_UNLOCK_REGS);
msleep(intel_dp->panel_power_cycle_delay);
}
}
@@ -1142,7 +1152,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
intel_dp->pps_pipe == INVALID_PIPE)
return false;
- return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
+ return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
@@ -1155,7 +1165,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
intel_dp->pps_pipe == INVALID_PIPE)
return false;
- return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+ return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
static void
@@ -1167,10 +1177,11 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- WARN(1, "eDP powered off while attempting aux channel communication.\n");
- DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
- I915_READ(_pp_stat_reg(intel_dp)),
- I915_READ(_pp_ctrl_reg(intel_dp)));
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
+ drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+ intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
}
}
@@ -1191,8 +1202,9 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (!done)
- DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n",
- intel_dp->aux.name, timeout_ms, status);
+ drm_err(&i915->drm,
+ "%s: did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
#undef C
return status;
@@ -1209,13 +1221,14 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 freq;
if (index)
return 0;
@@ -1226,9 +1239,10 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
+ freq = dev_priv->cdclk.hw.cdclk;
else
- return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+ freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -1378,8 +1392,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
const u32 status = intel_uncore_read(uncore, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
- WARN(1, "dp_aux_ch not started status 0x%08x\n",
- status);
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
intel_dp->aux_busy_last_status = status;
}
@@ -1388,7 +1403,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
/* Only 5 data registers! */
- if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
ret = -E2BIG;
goto out;
}
@@ -1440,7 +1455,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -EBUSY;
goto out;
}
@@ -1450,7 +1466,8 @@ done:
* Timeouts occur when the sink is not connected
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -EIO;
goto out;
}
@@ -1458,7 +1475,8 @@ done:
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -ETIMEDOUT;
goto out;
}
@@ -1473,8 +1491,9 @@ done:
* drm layer takes care for the necessary retries.
*/
if (recv_bytes == 0 || recv_bytes > 20) {
- DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
- recv_bytes);
+ drm_dbg_kms(&i915->drm,
+ "%s: Forbidden recv_bytes = %d on aux transaction\n",
+ intel_dp->aux.name, recv_bytes);
ret = -EBUSY;
goto out;
}
@@ -1742,7 +1761,8 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
+ aux_ch_name(dig_port->aux_ch),
port_name(encoder->port));
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
@@ -1918,8 +1938,9 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
- DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp.bpp);
bpp = dev_priv->vbt.edp.bpp;
}
}
@@ -2115,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
/* Min Input BPC for ICL+ is 8 */
if (pipe_bpp < 8 * 3) {
- DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No DSC support for less than 8bpc\n");
return -EINVAL;
}
@@ -2150,7 +2172,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay);
if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
- DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Compressed BPP/Slice Count not supported\n");
return -EINVAL;
}
pipe_config->dsc.compressed_bpp = min_t(u16,
@@ -2167,26 +2190,28 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (pipe_config->dsc.slice_count > 1) {
pipe_config->dsc.dsc_split = true;
} else {
- DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
}
}
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
- DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
- "Compressed BPP = %d\n",
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp);
return ret;
}
pipe_config->dsc.compression_enable = true;
- DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
- "Compressed Bpp = %d Slice Count = %d\n",
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp,
- pipe_config->dsc.slice_count);
+ drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
return 0;
}
@@ -2214,7 +2239,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->max_link_rate);
/* No common link rates between source and sink */
- WARN_ON(common_len <= 0);
+ drm_WARN_ON(encoder->base.dev, common_len <= 0);
limits.min_clock = 0;
limits.max_clock = common_len - 1;
@@ -2373,7 +2398,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
@@ -2515,7 +2540,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
@@ -2539,12 +2564,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
trans_dp |= TRANS_DP_ENH_FRAMING;
else
trans_dp &= ~TRANS_DP_ENH_FRAMING;
- I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
+ intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
@@ -2590,18 +2615,20 @@ static void wait_panel_status(struct intel_dp *intel_dp,
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
- mask, value,
- I915_READ(pp_stat_reg),
- I915_READ(pp_ctrl_reg));
+ drm_dbg_kms(&dev_priv->drm,
+ "mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
mask, value, 5000))
- DRM_ERROR("Panel status timeout: status %08x control %08x\n",
- I915_READ(pp_stat_reg),
- I915_READ(pp_ctrl_reg));
+ drm_err(&dev_priv->drm,
+ "Panel status timeout: status %08x control %08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
- DRM_DEBUG_KMS("Wait complete\n");
+ drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
}
static void wait_panel_on(struct intel_dp *intel_dp)
@@ -2660,9 +2687,9 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- control = I915_READ(_pp_ctrl_reg(intel_dp));
- if (WARN_ON(!HAS_DDI(dev_priv) &&
- (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
}
@@ -2696,9 +2723,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
- DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2709,17 +2736,19 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
- DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] panel power wasn't enabled\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2759,14 +2788,14 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- WARN_ON(intel_dp->want_panel_vdd);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2774,12 +2803,13 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
/* Make sure sequencer is idle before allowing subsequent activity */
- DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
@@ -2851,14 +2881,14 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
- if (WARN(edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
@@ -2868,24 +2898,24 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (IS_GEN(dev_priv, 5)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
pp |= PANEL_POWER_ON;
if (!IS_GEN(dev_priv, 5))
pp |= PANEL_POWER_RESET;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
if (IS_GEN(dev_priv, 5)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
}
@@ -2913,11 +2943,12 @@ static void edp_panel_off(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
- WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2929,8 +2960,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->want_panel_vdd = false;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
wait_panel_off(intel_dp);
intel_dp->panel_power_off_time = ktime_get_boottime();
@@ -2971,8 +3002,8 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
}
@@ -3007,8 +3038,8 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
intel_dp->last_backlight_off = jiffies;
@@ -3059,7 +3090,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
+ bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
@@ -3070,7 +3101,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
- bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
+ bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -3089,8 +3120,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
- DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
- pipe_config->port_clock);
+ drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+ pipe_config->port_clock);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
@@ -3099,8 +3130,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(500);
/*
@@ -3114,8 +3145,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
intel_dp->DP |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(200);
}
@@ -3129,12 +3160,12 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
- DRM_DEBUG_KMS("disabling eDP PLL\n");
+ drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
intel_dp->DP &= ~DP_PLL_ENABLE;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(200);
}
@@ -3149,7 +3180,7 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
* FIXME should really check all downstream ports...
*/
return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
- intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+ drm_dp_is_branch(intel_dp->dpcd) &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
@@ -3214,7 +3245,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
enum pipe p;
for_each_pipe(dev_priv, p) {
- u32 val = I915_READ(TRANS_DP_CTL(p));
+ u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
*pipe = p;
@@ -3222,7 +3253,8 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
}
}
- DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+ port_name(port));
/* must initialize pipe to something for the asserts */
*pipe = PIPE_A;
@@ -3237,7 +3269,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
bool ret;
u32 val;
- val = I915_READ(dp_reg);
+ val = intel_de_read(dev_priv, dp_reg);
ret = val & DP_PORT_EN;
@@ -3289,12 +3321,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
else
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- tmp = I915_READ(intel_dp->output_reg);
+ tmp = intel_de_read(dev_priv, intel_dp->output_reg);
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ u32 trans_dp = intel_de_read(dev_priv,
+ TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -3328,7 +3361,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dp_get_m_n(crtc, pipe_config);
if (port == PORT_A) {
- if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -3353,8 +3386,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
}
@@ -3447,11 +3481,12 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
if (dp_train_pat & train_pat_mask)
- DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
- dp_train_pat & train_pat_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DP training pattern TPS%d\n",
+ dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
+ u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3477,7 +3512,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
- I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -3494,7 +3529,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
- DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
@@ -3513,7 +3549,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2;
break;
}
@@ -3539,8 +3576,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
if (old_crtc_state->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_encoder *encoder,
@@ -3550,11 +3587,11 @@ static void intel_enable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- u32 dp_reg = I915_READ(intel_dp->output_reg);
+ u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
- if (WARN_ON(dp_reg & DP_PORT_EN))
+ if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
return;
with_pps_lock(intel_dp, wakeref) {
@@ -3583,8 +3620,8 @@ static void intel_enable_dp(struct intel_encoder *encoder,
intel_dp_stop_link_train(intel_dp);
if (pipe_config->has_audio) {
- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
- pipe_name(pipe));
+ drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
+ pipe_name(pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
@@ -3625,9 +3662,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return;
edp_panel_vdd_off_sync(intel_dp);
@@ -3641,11 +3678,12 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- I915_WRITE(pp_on_reg, 0);
- POSTING_READ(pp_on_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
+ intel_de_write(dev_priv, pp_on_reg, 0);
+ intel_de_posting_read(dev_priv, pp_on_reg);
intel_dp->pps_pipe = INVALID_PIPE;
}
@@ -3660,17 +3698,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- WARN(intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
- DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
@@ -3686,7 +3725,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
lockdep_assert_held(&dev_priv->pps_mutex);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
if (intel_dp->pps_pipe != INVALID_PIPE &&
intel_dp->pps_pipe != crtc->pipe) {
@@ -3712,9 +3751,10 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
- DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
+ encoder->base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -4140,18 +4180,22 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
}
if (mask)
- DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
-
- DRM_DEBUG_KMS("Using vswing level %d\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
- DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
+ drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "");
intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
void
@@ -4164,8 +4208,8 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -4178,10 +4222,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (!HAS_DDI(dev_priv))
return;
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
@@ -4195,7 +4239,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
- DRM_ERROR("Timed out waiting for DP idle patterns\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DP idle patterns\n");
}
static void
@@ -4208,10 +4253,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 DP = intel_dp->DP;
- if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+ if (drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, intel_dp->output_reg) &
+ DP_PORT_EN) == 0))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -4221,12 +4268,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
DP &= ~DP_LINK_TRAIN_MASK;
DP |= DP_LINK_TRAIN_PAT_IDLE;
}
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -4245,12 +4292,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
DP_LINK_TRAIN_PAT_1;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
DP &= ~DP_PORT_EN;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -4370,7 +4417,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
/* this function is meant to be called only once */
- WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -4390,8 +4437,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
- DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
- intel_dp->edp_dpcd);
+ drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+ (int)sizeof(intel_dp->edp_dpcd),
+ intel_dp->edp_dpcd);
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
@@ -4466,7 +4514,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* it don't care about read it here and in intel_edp_init_dpcd().
*/
if (!intel_dp_is_edp(intel_dp) &&
- !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
+ !drm_dp_has_quirk(&intel_dp->desc, 0,
+ DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -5125,7 +5174,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
+ drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
return 0;
@@ -5195,7 +5244,8 @@ intel_dp_hotplug(struct intel_encoder *encoder,
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
/*
* Keeping it consistent with intel_ddi_hotplug() and
@@ -5281,7 +5331,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
intel_psr_short_pulse(intel_dp);
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
- DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
drm_kms_helper_hotplug_event(&dev_priv->drm);
}
@@ -5370,7 +5421,7 @@ static bool ibx_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool cpt_digital_port_connected(struct intel_encoder *encoder)
@@ -5393,7 +5444,7 @@ static bool cpt_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool spt_digital_port_connected(struct intel_encoder *encoder)
@@ -5412,7 +5463,7 @@ static bool spt_digital_port_connected(struct intel_encoder *encoder)
return cpt_digital_port_connected(encoder);
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
@@ -5435,7 +5486,7 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
static bool gm45_digital_port_connected(struct intel_encoder *encoder)
@@ -5458,7 +5509,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
@@ -5466,7 +5517,7 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
else
return ibx_digital_port_connected(encoder);
}
@@ -5476,7 +5527,7 @@ static bool snb_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(encoder);
}
@@ -5486,7 +5537,7 @@ static bool ivb_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
else
return cpt_digital_port_connected(encoder);
}
@@ -5496,7 +5547,7 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(encoder);
}
@@ -5521,16 +5572,16 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
enum phy phy)
{
if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
- return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+ return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
- return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
+ return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
}
static bool icp_digital_port_connected(struct intel_encoder *encoder)
@@ -5631,6 +5682,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = drm_detect_monitor_audio(edid);
drm_dp_cec_set_edid(&intel_dp->aux, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -5643,6 +5695,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_connector->detect_edid = NULL;
intel_dp->has_audio = false;
+ intel_dp->edid_quirks = 0;
}
static int
@@ -5656,9 +5709,10 @@ intel_dp_detect(struct drm_connector *connector,
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
@@ -5673,9 +5727,10 @@ intel_dp_detect(struct drm_connector *connector,
memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
if (intel_dp->is_mst) {
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst,
- intel_dp->mst_mgr.mst_state);
+ drm_dbg_kms(&dev_priv->drm,
+ "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -5763,8 +5818,8 @@ intel_dp_force(struct drm_connector *connector)
intel_aux_power_domain(dig_port);
intel_wakeref_t wakeref;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
intel_dp_unset_edid(intel_dp);
if (connector->status != connector_status_connected)
@@ -5815,7 +5870,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- i915_debugfs_connector_add(connector);
+ intel_connector_debugfs_add(connector);
DRM_DEBUG_KMS("registering %s bus for %s\n",
intel_dp->aux.name, connector->kdev->kobj.name);
@@ -6396,6 +6451,7 @@ static
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
bool is_repeater, u8 content_type)
{
+ int ret;
struct hdcp2_dp_errata_stream_type stream_type_msg;
if (is_repeater)
@@ -6411,8 +6467,11 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
stream_type_msg.stream_type = content_type;
- return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
sizeof(stream_type_msg));
+
+ return ret < 0 ? ret : 0;
+
}
static
@@ -6492,7 +6551,8 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
- DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD left on by BIOS, adjusting state tracking\n");
intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
edp_panel_vdd_schedule_off(intel_dp);
@@ -6519,7 +6579,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
- intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
if (lspcon->active)
lspcon_resume(lspcon);
@@ -6545,6 +6605,140 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
}
}
+static int intel_modeset_tile_group(struct intel_atomic_state *state,
+ int tile_group_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id != tile_group_id)
+ continue;
+
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ break;
+ }
+
+ crtc = to_intel_crtc(conn_state->crtc);
+
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ if (transcoders == 0)
+ return 0;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->hw.enable)
+ continue;
+
+ if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
+ continue;
+
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ transcoders &= ~BIT(crtc_state->cpu_transcoder);
+ }
+
+ drm_WARN_ON(&dev_priv->drm, transcoders != 0);
+
+ return 0;
+}
+
+static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
+ struct drm_connector *connector)
+{
+ const struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(&state->base, connector);
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc;
+ u8 transcoders;
+
+ crtc = to_intel_crtc(old_conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+
+ if (!old_crtc_state->hw.active)
+ return 0;
+
+ transcoders = old_crtc_state->sync_mode_slaves_mask;
+ if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
+ transcoders |= BIT(old_crtc_state->master_transcoder);
+
+ return intel_modeset_affected_transcoders(state,
+ transcoders);
+}
+
+static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(conn->dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(conn, &state->base);
+ if (ret)
+ return ret;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ if (!intel_connector_needs_modeset(state, conn))
+ return 0;
+
+ if (conn->has_tile) {
+ ret = intel_modeset_tile_group(state, conn->tile_group->id);
+ if (ret)
+ return ret;
+ }
+
+ return intel_modeset_synced_crtcs(state, conn);
+}
+
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -6561,7 +6755,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.detect_ctx = intel_dp_detect,
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .atomic_check = intel_digital_connector_atomic_check,
+ .atomic_check = intel_dp_connector_atomic_check,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -6697,10 +6891,10 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
/* Ensure PPS is unlocked */
if (!HAS_DDI(dev_priv))
- I915_WRITE(regs.pp_ctrl, pp_ctl);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- pp_on = I915_READ(regs.pp_on);
- pp_off = I915_READ(regs.pp_off);
+ pp_on = intel_de_read(dev_priv, regs.pp_on);
+ pp_off = intel_de_read(dev_priv, regs.pp_off);
/* Pull timing values out of registers */
seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
@@ -6711,7 +6905,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
- pp_div = I915_READ(regs.pp_div);
+ pp_div = intel_de_read(dev_priv, regs.pp_div);
seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
@@ -6768,8 +6962,9 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
- DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
}
/* T11_T12 delay is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
@@ -6811,12 +7006,15 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay,
+ intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+ drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay,
+ intel_dp->backlight_off_delay);
/*
* We override the HW backlight delays to 1 because we do manual waits
@@ -6841,7 +7039,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp_on, pp_off, port_sel = 0;
- int div = dev_priv->rawclk_freq / 1000;
+ int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
@@ -6865,14 +7063,16 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
if (force_disable_vdd) {
u32 pp = ilk_get_pp_control(intel_dp);
- WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
if (pp & EDP_FORCE_VDD)
- DRM_DEBUG_KMS("VDD already on, disabling first\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD already on, disabling first\n");
pp &= ~EDP_FORCE_VDD;
- I915_WRITE(regs.pp_ctrl, pp);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp);
}
pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
@@ -6903,31 +7103,31 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
pp_on |= port_sel;
- I915_WRITE(regs.pp_on, pp_on);
- I915_WRITE(regs.pp_off, pp_off);
+ intel_de_write(dev_priv, regs.pp_on, pp_on);
+ intel_de_write(dev_priv, regs.pp_off, pp_off);
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
if (i915_mmio_reg_valid(regs.pp_div)) {
- I915_WRITE(regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ intel_de_write(dev_priv, regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
} else {
u32 pp_ctl;
- pp_ctl = I915_READ(regs.pp_ctrl);
+ pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- I915_WRITE(regs.pp_ctrl, pp_ctl);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
}
- DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- I915_READ(regs.pp_on),
- I915_READ(regs.pp_off),
- i915_mmio_reg_valid(regs.pp_div) ?
- I915_READ(regs.pp_div) :
- (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ intel_de_read(dev_priv, regs.pp_on),
+ intel_de_read(dev_priv, regs.pp_off),
+ i915_mmio_reg_valid(regs.pp_div) ?
+ intel_de_read(dev_priv, regs.pp_div) :
+ (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
static void intel_dp_pps_init(struct intel_dp *intel_dp)
@@ -6964,22 +7164,24 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
- DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Refresh rate should be positive non-zero.\n");
return;
}
if (intel_dp == NULL) {
- DRM_DEBUG_KMS("DRRS not supported.\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
return;
}
if (!intel_crtc) {
- DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS: intel_crtc not initialized\n");
return;
}
if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
- DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
+ drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
return;
}
@@ -6988,13 +7190,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
index = DRRS_LOW_RR;
if (index == dev_priv->drrs.refresh_rate_type) {
- DRM_DEBUG_KMS(
- "DRRS requested for previously set RR...ignoring\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS requested for previously set RR...ignoring\n");
return;
}
if (!crtc_state->hw.active) {
- DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "eDP encoder disabled. CRTC not Active\n");
return;
}
@@ -7008,13 +7211,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
break;
case DRRS_MAX_RR:
default:
- DRM_ERROR("Unsupported refreshrate type\n");
+ drm_err(&dev_priv->drm,
+ "Unsupported refreshrate type\n");
}
} else if (INTEL_GEN(dev_priv) > 6) {
i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (index > DRRS_HIGH_RR) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
@@ -7026,12 +7230,13 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
else
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
dev_priv->drrs.refresh_rate_type = index;
- DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+ drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
+ refresh_rate);
}
/**
@@ -7047,18 +7252,19 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!crtc_state->has_drrs) {
- DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
+ drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
return;
}
if (dev_priv->psr.enabled) {
- DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR enabled. Not enabling DRRS.\n");
return;
}
mutex_lock(&dev_priv->drrs.mutex);
if (dev_priv->drrs.dp) {
- DRM_DEBUG_KMS("DRRS already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
goto unlock;
}
@@ -7284,25 +7490,28 @@ intel_dp_drrs_init(struct intel_connector *connector,
mutex_init(&dev_priv->drrs.mutex);
if (INTEL_GEN(dev_priv) <= 6) {
- DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS supported for Gen7 and above\n");
return NULL;
}
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
- DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
+ drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
return NULL;
}
downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
if (!downclock_mode) {
- DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Downclock mode is not found. DRRS not supported\n");
return NULL;
}
dev_priv->drrs.type = dev_priv->vbt.drrs_type;
dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
- DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
@@ -7331,8 +7540,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(dev_priv)) {
- WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- DRM_INFO("LVDS was detected, not registering eDP\n");
+ drm_WARN_ON(dev,
+ !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ drm_info(&dev_priv->drm,
+ "LVDS was detected, not registering eDP\n");
return false;
}
@@ -7348,7 +7559,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!has_dpcd) {
/* if this fails, presume the device is a ghost */
- DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ drm_info(&dev_priv->drm,
+ "failed to retrieve link info, disabling eDP\n");
goto out_vdd_off;
}
@@ -7356,8 +7568,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector,
- edid);
+ drm_connector_update_edid_property(connector, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7393,17 +7605,20 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
- DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "using pipe %c for initial backlight setup\n",
+ pipe_name(pipe));
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
- if (fixed_mode)
- drm_connector_init_panel_orientation_property(
- connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+ if (fixed_mode) {
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ dev_priv->vbt.orientation,
+ fixed_mode->hdisplay, fixed_mode->vdisplay);
+ }
return true;
@@ -7459,10 +7674,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
INIT_WORK(&intel_connector->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
- if (WARN(intel_dig_port->max_lanes < 1,
- "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@@ -7472,7 +7687,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->active_pipe = INVALID_PIPE;
/* Preserve the current hw state. */
- intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
if (intel_dp_is_port_edp(dev_priv, port)) {
@@ -7480,7 +7695,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- WARN_ON(intel_phy_is_tc(dev_priv, phy));
+ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -7498,14 +7713,16 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp_is_edp(intel_dp) &&
- port != PORT_B && port != PORT_C))
+ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp_is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
return false;
- DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
- type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adding %s connector on [ENCODER:%d:%s]\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ intel_encoder->base.base.id, intel_encoder->base.name);
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -7518,6 +7735,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->ycbcr_420_allowed = true;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_dp_aux_init(intel_dp);
@@ -7543,7 +7761,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP init failed, skipping.\n");
}
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -7551,8 +7770,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* generated on the port when a cable is not attached.
*/
if (IS_G45(dev_priv)) {
- u32 temp = I915_READ(PEG_BAND_GAP_DATA);
- I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+ intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+ (temp & ~0xf) | 0xd);
}
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 3da166054788..0c7be8ed1423 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
enum pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7c653f8c307f..3e706bb850a8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -57,10 +57,27 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
*/
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
u8 read_val[2] = { 0x0 };
+ u8 mode_reg;
u16 level = 0;
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &mode_reg) != 1) {
+ DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ return 0;
+ }
+
+ /*
+ * If we're not in DPCD control mode yet, the programmed brightness
+ * value is meaningless and we should assume max brightness
+ */
+ if ((mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) !=
+ DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD)
+ return connector->panel.backlight.max;
+
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
@@ -82,7 +99,7 @@ static void
intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -110,62 +127,29 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
- u8 pn, pn_min, pn_max;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+ int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
- /* Find desired value of (F x P)
- * Note that, if F x P is out of supported range, the maximum value or
- * minimum value will applied automatically. So no need to check that.
- */
freq = dev_priv->vbt.backlight.pwm_freq_hz;
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
if (!freq) {
DRM_DEBUG_KMS("Use panel default backlight frequency\n");
return false;
}
fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+ f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+ fxp_actual = f << pn;
- /* Use highest possible value of Pn for more granularity of brightness
- * adjustment while satifying the conditions below.
- * - Pn is in the range of Pn_min and Pn_max
- * - F is in the range of 1 and 255
- * - FxP is within 25% of desired value.
- * Note: 25% is arbitrary value and may need some tweak.
- */
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
- return false;
- }
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
- return false;
- }
- pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
- pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
-
+ /* Ensure frequency is within 25% of desired value */
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
- if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
- DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
- return false;
- }
-
- for (pn = pn_max; pn >= pn_min; pn--) {
- f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
- fxp_actual = f << pn;
- if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
- break;
- }
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
+ DRM_DEBUG_KMS("Actual frequency out of range\n");
return false;
}
+
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
@@ -178,7 +162,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
@@ -197,6 +182,12 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT,
+ panel->backlight.pwmgen_bit_count) < 0)
+ DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+
break;
/* Do nothing when it is already DPCD mode */
@@ -216,8 +207,9 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
}
+ intel_dp_aux_set_backlight(conn_state,
+ connector->panel.backlight.level);
set_aux_backlight_enable(intel_dp, true);
- intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
}
static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -226,20 +218,91 @@ static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old
false);
}
+static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_panel *panel = &connector->panel;
+ u32 max_backlight = 0;
+ int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
+ u8 pn, pn_min, pn_max;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT, &pn) == 1) {
+ pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ max_backlight = (1 << pn) - 1;
+ }
+
+ /* Find desired value of (F x P)
+ * Note that, if F x P is out of supported range, the maximum value or
+ * minimum value will applied automatically. So no need to check that.
+ */
+ freq = i915->vbt.backlight.pwm_freq_hz;
+ DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+ if (!freq) {
+ DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ return max_backlight;
+ }
+
+ fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+
+ /* Use highest possible value of Pn for more granularity of brightness
+ * adjustment while satifying the conditions below.
+ * - Pn is in the range of Pn_min and Pn_max
+ * - F is in the range of 1 and 255
+ * - FxP is within 25% of desired value.
+ * Note: 25% is arbitrary value and may need some tweak.
+ */
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+ return max_backlight;
+ }
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+ return max_backlight;
+ }
+ pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
+ fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
+ if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
+ DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+ return max_backlight;
+ }
+
+ for (pn = pn_max; pn >= pn_min; pn--) {
+ f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+ fxp_actual = f << pn;
+ if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
+ break;
+ }
+
+ DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ return max_backlight;
+ }
+ panel->backlight.pwmgen_bit_count = pn;
+
+ max_backlight = (1 << pn) - 1;
+
+ return max_backlight;
+}
+
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct intel_panel *panel = &connector->panel;
- if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
- panel->backlight.max = 0xFFFF;
- else
- panel->backlight.max = 0xFF;
+ panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+ if (!panel->backlight.max)
+ return -ENODEV;
panel->backlight.min = 0;
panel->backlight.level = intel_dp_aux_get_backlight(connector);
-
panel->backlight.enabled = panel->backlight.level != 0;
return 0;
@@ -248,7 +311,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
@@ -265,15 +328,31 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
- struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (i915_modparams.enable_dpcd_backlight == 0 ||
- (i915_modparams.enable_dpcd_backlight == -1 &&
- dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
+ !intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
- if (!intel_dp_aux_display_control_capable(intel_connector))
+ /*
+ * There are a lot of machines that don't advertise the backlight
+ * control interface to use properly in their VBIOS, :\
+ */
+ if (dev_priv->vbt.backlight.type !=
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
+ !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
+ DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
+ DRM_DEV_INFO(dev->dev,
+ "Panel advertises DPCD backlight support, but "
+ "VBT disagrees. If your backlight controls "
+ "don't work try booting with "
+ "i915.enable_dpcd_backlight=1. If your machine "
+ "needs this, please file a _new_ bug report on "
+ "drm/i915, see " FDO_BUG_URL " for details.\n");
return -ENODEV;
+ }
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2a1130dd1ad0..a7defb37ab00 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -130,6 +130,7 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 voltage;
int voltage_tries, cr_tries, max_cr_tries;
bool max_vswing_reached = false;
@@ -143,9 +144,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
&link_bw, &rate_select);
if (link_bw)
- DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw);
+ drm_dbg_kms(&i915->drm,
+ "Using LINK_BW_SET value %02x\n", link_bw);
else
- DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select);
+ drm_dbg_kms(&i915->drm,
+ "Using LINK_RATE_SET value %02x\n", rate_select);
/* Write the link configuration data */
link_config[0] = link_bw;
@@ -169,7 +172,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
if (!intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
- DRM_ERROR("failed to enable link training\n");
+ drm_err(&i915->drm, "failed to enable link training\n");
return false;
}
@@ -193,22 +196,23 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("failed to get link status\n");
+ drm_err(&i915->drm, "failed to get link status\n");
return false;
}
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
- DRM_DEBUG_KMS("clock recovery OK\n");
+ drm_dbg_kms(&i915->drm, "clock recovery OK\n");
return true;
}
if (voltage_tries == 5) {
- DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+ drm_dbg_kms(&i915->drm,
+ "Same voltage tried 5 times\n");
return false;
}
if (max_vswing_reached) {
- DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+ drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
return false;
}
@@ -217,7 +221,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
- DRM_ERROR("failed to update link training\n");
+ drm_err(&i915->drm,
+ "failed to update link training\n");
return false;
}
@@ -231,7 +236,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
max_vswing_reached = true;
}
- DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries);
+ drm_err(&i915->drm,
+ "Failed clock recovery %d times, giving up!\n", max_cr_tries);
return false;
}
@@ -256,9 +262,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
return DP_TRAINING_PATTERN_4;
} else if (intel_dp->link_rate == 810000) {
if (!source_tps4)
- DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "8.1 Gbps link rate without source HBR3/TPS4 support\n");
if (!sink_tps4)
- DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "8.1 Gbps link rate without sink TPS4 support\n");
}
/*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
@@ -271,9 +279,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
return DP_TRAINING_PATTERN_3;
} else if (intel_dp->link_rate >= 540000) {
if (!source_tps3)
- DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
- DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
}
return DP_TRAINING_PATTERN_2;
@@ -282,6 +292,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int tries;
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
@@ -295,7 +306,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
training_pattern)) {
- DRM_ERROR("failed to start channel equalization\n");
+ drm_err(&i915->drm, "failed to start channel equalization\n");
return false;
}
@@ -303,7 +314,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("failed to get link status\n");
+ drm_err(&i915->drm,
+ "failed to get link status\n");
break;
}
@@ -311,23 +323,25 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp_dump_link_status(link_status);
- DRM_DEBUG_KMS("Clock recovery check failed, cannot "
- "continue channel equalization\n");
+ drm_dbg_kms(&i915->drm,
+ "Clock recovery check failed, cannot "
+ "continue channel equalization\n");
break;
}
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
channel_eq = true;
- DRM_DEBUG_KMS("Channel EQ done. DP Training "
- "successful\n");
+ drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
+ "successful\n");
break;
}
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
- DRM_ERROR("failed to update link training\n");
+ drm_err(&i915->drm,
+ "failed to update link training\n");
break;
}
}
@@ -335,7 +349,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* Try 5 times, else fail and try at lower BW */
if (tries == 5) {
intel_dp_dump_link_status(link_status);
- DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
+ drm_dbg_kms(&i915->drm,
+ "Channel equalization failed 5 times\n");
}
intel_dp_set_idle_link_train(intel_dp);
@@ -362,17 +377,19 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (!intel_dp_link_training_channel_equalization(intel_dp))
goto failure_handling;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
return;
failure_handling:
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
if (!intel_dp_get_link_train_fallback_values(intel_dp,
intel_dp->link_rate,
intel_dp->lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index cba68c5a80fa..44f3fd251ca1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -50,7 +50,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
void *port = connector->port;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
@@ -352,8 +352,9 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
- WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
- !intel_dp_mst_is_master_trans(old_crtc_state));
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+ !intel_dp_mst_is_master_trans(old_crtc_state));
intel_crtc_vblank_off(old_crtc_state);
@@ -361,9 +362,12 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
- val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
+ val);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
@@ -437,8 +441,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
connector->encoder = encoder;
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
- WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
- !intel_dp_mst_is_master_trans(pipe_config));
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+ !intel_dp_mst_is_master_trans(pipe_config));
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
@@ -459,8 +464,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = I915_READ(intel_dp->regs.dp_tp_status);
- I915_WRITE(intel_dp->regs.dp_tp_status, temp);
+ temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
@@ -475,6 +480,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_enable_pipe_clock(pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
+
+ intel_dp_set_m_n(pipe_config, M1_N1);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -486,6 +493,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+
+ intel_enable_pipe(pipe_config);
+
+ intel_crtc_vblank_on(pipe_config);
+
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
@@ -535,12 +548,41 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
+static int
+intel_dp_mst_connector_late_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = drm_dp_mst_connector_late_register(connector,
+ intel_connector->port);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_connector_register(connector);
+ if (ret < 0)
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+
+ return ret;
+}
+
+static void
+intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_connector_unregister(connector);
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+}
+
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
- .early_unregister = intel_connector_unregister,
+ .late_register = intel_dp_mst_connector_late_register,
+ .early_unregister = intel_dp_mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -632,9 +674,9 @@ static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
{
- if (connector->encoder && connector->base.state->crtc) {
+ if (intel_attached_encoder(connector) && connector->base.state->crtc) {
enum pipe pipe;
- if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+ if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
return false;
return true;
}
@@ -706,36 +748,8 @@ err:
return NULL;
}
-static void intel_dp_register_mst_connector(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- if (dev_priv->fbdev)
- drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
- connector);
-
- drm_connector_register(connector);
-}
-
-static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
- drm_connector_unregister(connector);
-
- if (dev_priv->fbdev)
- drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
- connector);
-
- drm_connector_put(connector);
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
- .register_connector = intel_dp_register_mst_connector,
- .destroy_connector = intel_dp_destroy_mst_connector,
};
static struct intel_dp_mst_encoder *
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 6fb1f7a7364e..399a7edb4568 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -259,7 +259,8 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
}
}
- WARN(1, "PHY not found for PORT %c", port_name(port));
+ drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c",
+ port_name(port));
*phy = DPIO_PHY0;
*ch = DPIO_CH0;
}
@@ -278,33 +279,34 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
val &= ~SCALE_DCOMP_METHOD;
if (enable)
val |= SCALE_DCOMP_METHOD;
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+ drm_err(&dev_priv->drm,
+ "Disabled scaling while ouniqetrangenmethod was set");
- I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
val &= ~DE_EMPHASIS;
val |= deemphasis << DEEMPH_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
}
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
@@ -314,20 +316,20 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
phy_info = bxt_get_phy_info(dev_priv, phy);
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
+ if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
return false;
- if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) &
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
- phy);
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d powered, but power hasn't settled\n", phy);
return false;
}
- if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
- phy);
+ if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d powered, but still in reset\n", phy);
return false;
}
@@ -337,7 +339,7 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+ u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
@@ -347,7 +349,8 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
{
if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
GRC_DONE, 10))
- DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+ drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n",
+ phy);
}
static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
@@ -364,18 +367,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
- "won't reprogram it\n", phy);
+ drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
return;
}
- DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
- "force reprogramming it\n", phy);
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
}
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
val |= phy_info->pwron_mask;
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
/*
* The PHY registers start out inaccessible and respond to reads with
@@ -390,29 +394,30 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
PHY_RESERVED | PHY_POWER_GOOD,
PHY_POWER_GOOD,
1))
- DRM_ERROR("timeout during PHY%d power on\n", phy);
+ drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
+ phy);
/* Program PLL Rcomp code offset */
- val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
val &= ~IREF0RC_OFFSET_MASK;
val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
- val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
val &= ~IREF1RC_OFFSET_MASK;
val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
/* Program power gating */
- val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
SUS_CLK_CONFIG;
- I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
if (phy_info->dual_channel) {
- val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
}
if (phy_info->rcomp_phy != -1) {
@@ -430,19 +435,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
- I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+ intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
- val = I915_READ(BXT_PORT_REF_DW8(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
val |= GRC_DIS | GRC_RDY_OVRD;
- I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
}
if (phy_info->reset_delay)
udelay(phy_info->reset_delay);
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -452,13 +457,13 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
phy_info = bxt_get_phy_info(dev_priv, phy);
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
val &= ~phy_info->pwron_mask;
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -496,7 +501,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
va_list args;
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if ((val & mask) == expected)
return true;
@@ -504,7 +509,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
vaf.fmt = reg_fmt;
vaf.va = &args;
- DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
"current %08x, expected %08x (mask %08x)\n",
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
mask);
@@ -599,7 +604,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+ u32 val = intel_de_read(dev_priv,
+ BXT_PORT_TX_DW14_LN(phy, ch, lane));
/*
* Note that on CHV this flag is called UPAR, but has
@@ -609,7 +615,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
if (lane_lat_optim_mask & BIT(lane))
val |= LATENCY_OPTIM;
- I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+ val);
}
}
@@ -627,7 +634,8 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
mask = 0;
for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+ u32 val = intel_de_read(dev_priv,
+ BXT_PORT_TX_DW14_LN(phy, ch, lane));
if (val & LATENCY_OPTIM)
mask |= BIT(lane);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c75e34d87111..2d47f1f756a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -45,6 +45,22 @@
* commit phase.
*/
+struct intel_dpll_mgr {
+ const struct dpll_info *dpll_info;
+
+ bool (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*update_ref_clks)(struct drm_i915_private *i915);
+ void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state);
+};
+
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll_state *shared_dpll)
@@ -52,8 +68,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id i;
/* Copy shared dpll state */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
shared_dpll[i] = pll->state;
}
@@ -88,7 +104,7 @@ struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- return &dev_priv->shared_dplls[id];
+ return &dev_priv->dpll.shared_dplls[id];
}
/**
@@ -103,11 +119,14 @@ enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- if (WARN_ON(pll < dev_priv->shared_dplls||
- pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+ long pll_idx = pll - dev_priv->dpll.shared_dplls;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ pll_idx < 0 ||
+ pll_idx >= dev_priv->dpll.num_shared_dpll))
return -1;
- return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+ return pll_idx;
}
/* For ILK+ */
@@ -118,7 +137,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
+ if (drm_WARN(&dev_priv->drm, !pll,
+ "asserting DPLL %s with no DPLL\n", onoff(state)))
return;
cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
@@ -140,19 +160,19 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (WARN_ON(pll == NULL))
+ if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
- WARN_ON(!pll->state.crtc_mask);
+ mutex_lock(&dev_priv->dpll.lock);
+ drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
if (!pll->active_mask) {
- DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
- WARN_ON(pll->on);
+ drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
+ drm_WARN_ON(&dev_priv->drm, pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
pll->info->funcs->prepare(dev_priv, pll);
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -169,35 +189,36 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
unsigned int old_mask;
- if (WARN_ON(pll == NULL))
+ if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
old_mask = pll->active_mask;
- if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
- WARN_ON(pll->active_mask & crtc_mask))
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
+ drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
goto out;
pll->active_mask |= crtc_mask;
- DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
- pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "enable %s (active %x, on? %d) for crtc %d\n",
+ pll->info->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
if (old_mask) {
- WARN_ON(!pll->on);
+ drm_WARN_ON(&dev_priv->drm, !pll->on);
assert_shared_dpll_enabled(dev_priv, pll);
goto out;
}
- WARN_ON(pll->on);
+ drm_WARN_ON(&dev_priv->drm, pll->on);
- DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
pll->info->funcs->enable(dev_priv, pll);
pll->on = true;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -220,27 +241,28 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (pll == NULL)
return;
- mutex_lock(&dev_priv->dpll_lock);
- if (WARN_ON(!(pll->active_mask & crtc_mask)))
+ mutex_lock(&dev_priv->dpll.lock);
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
goto out;
- DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
- pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "disable %s (active %x, on? %d) for crtc %d\n",
+ pll->info->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
assert_shared_dpll_enabled(dev_priv, pll);
- WARN_ON(!pll->on);
+ drm_WARN_ON(&dev_priv->drm, !pll->on);
pll->active_mask &= ~crtc_mask;
if (pll->active_mask)
goto out;
- DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
pll->info->funcs->disable(dev_priv, pll);
pll->on = false;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static struct intel_shared_dpll *
@@ -256,10 +278,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+ drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
/* Only want to check enabled timings first */
if (shared_dpll[i].crtc_mask == 0) {
@@ -271,20 +293,21 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
if (memcmp(pll_state,
&shared_dpll[i].hw_state,
sizeof(*pll_state)) == 0) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
- crtc->base.base.id, crtc->base.name,
- pll->info->name,
- shared_dpll[i].crtc_mask,
- pll->active_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name,
+ shared_dpll[i].crtc_mask,
+ pll->active_mask);
return pll;
}
}
/* Ok no matching timings, maybe there's a free one? */
if (unused_pll) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
- crtc->base.base.id, crtc->base.name,
- unused_pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ unused_pll->info->name);
return unused_pll;
}
@@ -297,6 +320,7 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_shared_dpll_state *shared_dpll;
const enum intel_dpll_id id = pll->info->id;
@@ -305,8 +329,8 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
if (shared_dpll[id].crtc_mask == 0)
shared_dpll[id].hw_state = *pll_state;
- DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
- pipe_name(crtc->pipe));
+ drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
+ pipe_name(crtc->pipe));
shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
}
@@ -357,9 +381,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
struct intel_shared_dpll *pll =
- &dev_priv->shared_dplls[i];
+ &dev_priv->dpll.shared_dplls[i];
swap(pll->state, shared_dpll[i]);
}
@@ -378,10 +402,10 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(PCH_DPLL(id));
+ val = intel_de_read(dev_priv, PCH_DPLL(id));
hw_state->dpll = val;
- hw_state->fp0 = I915_READ(PCH_FP0(id));
- hw_state->fp1 = I915_READ(PCH_FP1(id));
+ hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
+ hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -393,8 +417,8 @@ static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
- I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
+ intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
+ intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
}
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
@@ -404,7 +428,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- val = I915_READ(PCH_DREF_CONTROL);
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
@@ -418,10 +442,10 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(id));
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -429,8 +453,8 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
- POSTING_READ(PCH_DPLL(id));
+ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(200);
}
@@ -439,8 +463,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(PCH_DPLL(id), 0);
- POSTING_READ(PCH_DPLL(id));
+ intel_de_write(dev_priv, PCH_DPLL(id), 0);
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(200);
}
@@ -457,11 +481,12 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
if (HAS_PCH_IBX(dev_priv)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
- DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
- crtc->base.base.id, crtc->base.name,
- pll->info->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name);
} else {
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
@@ -484,12 +509,13 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
@@ -499,21 +525,34 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
.get_hw_state = ibx_pch_dpll_get_hw_state,
};
+static const struct dpll_info pch_plls[] = {
+ { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+ { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+ .dpll_info = pch_plls,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .dump_hw_state = ibx_dump_hw_state,
+};
+
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
- POSTING_READ(WRPLL_CTL(id));
+ intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
+ intel_de_posting_read(dev_priv, WRPLL_CTL(id));
udelay(20);
}
static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
- POSTING_READ(SPLL_CTL);
+ intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
+ intel_de_posting_read(dev_priv, SPLL_CTL);
udelay(20);
}
@@ -523,9 +562,9 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(WRPLL_CTL(id));
- I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL(id));
+ val = intel_de_read(dev_priv, WRPLL_CTL(id));
+ intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, WRPLL_CTL(id));
/*
* Try to set up the PCH reference clock once all DPLLs
@@ -541,9 +580,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(SPLL_CTL);
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
+ val = intel_de_read(dev_priv, SPLL_CTL);
+ intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, SPLL_CTL);
/*
* Try to set up the PCH reference clock once all DPLLs
@@ -566,7 +605,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(WRPLL_CTL(id));
+ val = intel_de_read(dev_priv, WRPLL_CTL(id));
hw_state->wrpll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -586,7 +625,7 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(SPLL_CTL);
+ val = intel_de_read(dev_priv, SPLL_CTL);
hw_state->spll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -811,8 +850,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
}
static struct intel_shared_dpll *
-hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -839,8 +878,47 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
return pll;
}
+static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ int refclk;
+ int n, p, r;
+ u32 wrpll = pll->state.hw_state.wrpll;
+
+ switch (wrpll & WRPLL_REF_MASK) {
+ case WRPLL_REF_SPECIAL_HSW:
+ /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
+ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ refclk = dev_priv->dpll.ref_clks.nssc;
+ break;
+ }
+ /* fall through */
+ case WRPLL_REF_PCH_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = dev_priv->dpll.ref_clks.ssc;
+ break;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700000;
+ break;
+ default:
+ MISSING_CASE(wrpll);
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n / 10) / (p * r) * 2;
+}
+
static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
+hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
@@ -858,7 +936,8 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
pll_id = DPLL_ID_LCPLL_2700;
break;
default:
- DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+ drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
+ clock);
return NULL;
}
@@ -870,6 +949,69 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
+static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->info->id) {
+ case DPLL_ID_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case DPLL_ID_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_ID_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad port clock sel\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ return NULL;
+
+ crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
+ SPLL_REF_MUXED_SSC;
+
+ return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SPLL));
+}
+
+static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
+ case SPLL_FREQ_810MHz:
+ link_clock = 81000;
+ break;
+ case SPLL_FREQ_1350MHz:
+ link_clock = 135000;
+ break;
+ case SPLL_FREQ_2700MHz:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad spll freq\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool hsw_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -881,23 +1023,14 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(state, crtc);
- } else if (intel_crtc_has_dp_encoder(crtc_state)) {
- pll = hsw_ddi_dp_get_dpll(crtc_state);
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- if (WARN_ON(crtc_state->port_clock / 2 != 135000))
- return false;
-
- crtc_state->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
-
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SPLL));
- } else {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ pll = hsw_ddi_wrpll_get_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ pll = hsw_ddi_lcpll_get_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ pll = hsw_ddi_spll_get_dpll(state, crtc);
+ else
return false;
- }
if (!pll)
return false;
@@ -910,23 +1043,35 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 135000;
+ /* Non-SSC is only used on non-ULT HSW. */
+ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ i915->dpll.ref_clks.nssc = 24000;
+ else
+ i915->dpll.ref_clks.nssc = 135000;
+}
+
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- hw_state->wrpll, hw_state->spll);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
}
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
.get_hw_state = hsw_ddi_wrpll_get_hw_state,
+ .get_freq = hsw_ddi_wrpll_get_freq,
};
static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
.enable = hsw_ddi_spll_enable,
.disable = hsw_ddi_spll_disable,
.get_hw_state = hsw_ddi_spll_get_hw_state,
+ .get_freq = hsw_ddi_spll_get_freq,
};
static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
@@ -950,6 +1095,25 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
.enable = hsw_ddi_lcpll_enable,
.disable = hsw_ddi_lcpll_disable,
.get_hw_state = hsw_ddi_lcpll_get_hw_state,
+ .get_freq = hsw_ddi_lcpll_get_freq,
+};
+
+static const struct dpll_info hsw_plls[] = {
+ { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
+ { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
+ { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
+ { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+ { },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+ .dpll_info = hsw_plls,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = hsw_update_dpll_ref_clks,
+ .dump_hw_state = hsw_dump_hw_state,
};
struct skl_dpll_regs {
@@ -989,15 +1153,15 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
DPLL_CTRL1_SSC(id) |
DPLL_CTRL1_LINK_RATE_MASK(id));
val |= pll->state.hw_state.ctrl1 << (id * 6);
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
+ intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_posting_read(dev_priv, DPLL_CTRL1);
}
static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1008,17 +1172,17 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
skl_ddi_pll_write_ctrl1(dev_priv, pll);
- I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
- I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
- POSTING_READ(regs[id].cfgcr1);
- POSTING_READ(regs[id].cfgcr2);
+ intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
+ intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+ intel_de_posting_read(dev_priv, regs[id].cfgcr1);
+ intel_de_posting_read(dev_priv, regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- I915_WRITE(regs[id].ctl,
- I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, regs[id].ctl,
+ intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
- DRM_ERROR("DPLL %d not locked\n", id);
+ drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
}
static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
@@ -1034,9 +1198,9 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- I915_WRITE(regs[id].ctl,
- I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
- POSTING_READ(regs[id].ctl);
+ intel_de_write(dev_priv, regs[id].ctl,
+ intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, regs[id].ctl);
}
static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
@@ -1061,17 +1225,17 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(regs[id].ctl);
+ val = intel_de_read(dev_priv, regs[id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
goto out;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CTRL1_HDMI_MODE(id)) {
- hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
- hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
+ hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
+ hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
}
ret = true;
@@ -1099,11 +1263,11 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
/* DPLL0 is always enabled since it drives CDCLK */
- val = I915_READ(regs[id].ctl);
- if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
+ val = intel_de_read(dev_priv, regs[id].ctl);
+ if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
goto out;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
ret = true;
@@ -1222,6 +1386,7 @@ struct skl_wrpll_params {
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
u64 afe_clock,
+ int ref_clock,
u64 central_freq,
u32 p0, u32 p1, u32 p2)
{
@@ -1281,14 +1446,15 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
* Intermediate values are in Hz.
* Divide by MHz to match bsepc
*/
- params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
params->dco_fraction =
- div_u64((div_u64(dco_freq, 24) -
+ div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}
static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
@@ -1354,14 +1520,15 @@ skip_remaining_dividers:
*/
p0 = p1 = p2 = 0;
skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
- skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
- p0, p1, p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
+ ctx.central_freq, p0, p1, p2);
return true;
}
static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
@@ -1374,6 +1541,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->dpll.ref_clks.nssc,
&wrpll_params))
return false;
@@ -1396,6 +1564,64 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ int ref_clock = i915->dpll.ref_clks.nssc;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ ref_clock / 0x8000;
+
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
static bool
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -1436,25 +1662,62 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch ((pll->state.hw_state.ctrl1 &
+ DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
+ case DPLL_CTRL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool skl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
bool bret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not get HDMI pll dividers.\n");
return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not set DP dpll HW state.\n");
return false;
}
} else {
@@ -1482,10 +1745,29 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return skl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: "
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
hw_state->ctrl1,
hw_state->cfgcr1,
@@ -1496,12 +1778,30 @@ static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
.get_hw_state = skl_ddi_pll_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
};
static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
.enable = skl_ddi_dpll0_enable,
.disable = skl_ddi_dpll0_disable,
.get_hw_state = skl_ddi_dpll0_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
+};
+
+static const struct dpll_info skl_plls[] = {
+ { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+ .dpll_info = skl_plls,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = skl_update_dpll_ref_clks,
+ .dump_hw_state = skl_dump_hw_state,
};
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1515,113 +1815,114 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_REF_SEL;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_POWER_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
- if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+ if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
- DRM_ERROR("Power state not set for PLL:%d\n", port);
+ drm_err(&dev_priv->drm,
+ "Power state not set for PLL:%d\n", port);
}
/* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->state.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->state.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->state.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->state.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->state.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->state.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->state.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->state.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->state.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->state.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+ if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
200))
- DRM_ERROR("PLL %d not locked\n", port);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
temp |= DCC_DELAY_RANGE_2;
- I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
}
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->state.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1630,19 +1931,20 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
u32 temp;
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_POWER_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
- if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
- DRM_ERROR("Power state not reset for PLL:%d\n", port);
+ if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ drm_err(&dev_priv->drm,
+ "Power state not reset for PLL:%d\n", port);
}
}
@@ -1666,40 +1968,40 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
goto out;
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+ hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+ hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+ hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+ hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+ hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+ hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+ hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+ hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+ hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@@ -1708,11 +2010,14 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
- DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
- hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
+ hw_state->pcsdw12 = intel_de_read(dev_priv,
+ BXT_PORT_PCS_DW12_LN01(phy, ch));
+ if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
+ drm_dbg(&dev_priv->drm,
+ "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+ hw_state->pcsdw12,
+ intel_de_read(dev_priv,
+ BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;
@@ -1751,6 +2056,7 @@ static bool
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct dpll best_clock;
@@ -1760,9 +2066,9 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
* i9xx_crtc_compute_clock
*/
if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
- DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- crtc_state->port_clock,
- pipe_name(crtc->pipe));
+ drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
+ crtc_state->port_clock,
+ pipe_name(crtc->pipe));
return false;
}
@@ -1799,6 +2105,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
const struct bxt_clk_div *clk_div)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
int clock = crtc_state->port_clock;
int vco = clk_div->vco;
@@ -1824,7 +2131,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
gain_ctl = 1;
targ_cnt = 9;
} else {
- DRM_ERROR("Invalid VCO\n");
+ drm_err(&i915->drm, "Invalid VCO\n");
return false;
}
@@ -1885,6 +2192,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+ return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+}
+
static bool bxt_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1907,8 +2231,8 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
id = (enum intel_dpll_id) encoder->port;
pll = intel_get_shared_dpll_by_id(dev_priv, id);
- DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
- crtc->base.base.id, crtc->base.name, pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->info->name);
intel_reference_shared_dpll(state, crtc,
pll, &crtc_state->dpll_hw_state);
@@ -1918,89 +2242,37 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 100000;
+ i915->dpll.ref_clks.nssc = 100000;
+ /* DSI non-SSC ref 19.2MHz */
+}
+
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
- "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- hw_state->ebb0,
- hw_state->ebb4,
- hw_state->pll0,
- hw_state->pll1,
- hw_state->pll2,
- hw_state->pll3,
- hw_state->pll6,
- hw_state->pll8,
- hw_state->pll9,
- hw_state->pll10,
- hw_state->pcsdw12);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0,
+ hw_state->ebb4,
+ hw_state->pll0,
+ hw_state->pll1,
+ hw_state->pll2,
+ hw_state->pll3,
+ hw_state->pll6,
+ hw_state->pll8,
+ hw_state->pll9,
+ hw_state->pll10,
+ hw_state->pcsdw12);
}
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
.get_hw_state = bxt_ddi_pll_get_hw_state,
-};
-
-struct intel_dpll_mgr {
- const struct dpll_info *dpll_info;
-
- bool (*get_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*put_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*update_active_dpll)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*dump_hw_state)(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *hw_state);
-};
-
-static const struct dpll_info pch_plls[] = {
- { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
- { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr pch_pll_mgr = {
- .dpll_info = pch_plls,
- .get_dplls = ibx_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = ibx_dump_hw_state,
-};
-
-static const struct dpll_info hsw_plls[] = {
- { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
- { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
- { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
- { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
- { },
-};
-
-static const struct intel_dpll_mgr hsw_pll_mgr = {
- .dpll_info = hsw_plls,
- .get_dplls = hsw_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = hsw_dump_hw_state,
-};
-
-static const struct dpll_info skl_plls[] = {
- { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
- { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr skl_pll_mgr = {
- .dpll_info = skl_plls,
- .get_dplls = skl_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = skl_dump_hw_state,
+ .get_freq = bxt_ddi_pll_get_freq,
};
static const struct dpll_info bxt_plls[] = {
@@ -2014,6 +2286,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
.get_dplls = bxt_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = bxt_update_dpll_ref_clks,
.dump_hw_state = bxt_dump_hw_state,
};
@@ -2024,32 +2297,32 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
u32 val;
/* 1. Enable DPLL power in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val |= PLL_POWER_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
PLL_POWER_STATE, 5))
- DRM_ERROR("PLL %d Power not enabled\n", id);
+ drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
/*
* 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
* select DP mode, and set DP link rate.
*/
val = pll->state.hw_state.cfgcr0;
- I915_WRITE(CNL_DPLL_CFGCR0(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
/* 4. Reab back to ensure writes completed */
- POSTING_READ(CNL_DPLL_CFGCR0(id));
+ intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
/* 3. Configure DPLL_CFGCR0 */
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
val = pll->state.hw_state.cfgcr1;
- I915_WRITE(CNL_DPLL_CFGCR1(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
/* 4. Reab back to ensure writes completed */
- POSTING_READ(CNL_DPLL_CFGCR1(id));
+ intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
}
/*
@@ -2062,13 +2335,13 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
*/
/* 6. Enable DPLL in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val |= PLL_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
- DRM_ERROR("PLL %d not locked\n", id);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
/*
* 8. If the frequency will result in a change to the voltage
@@ -2106,13 +2379,13 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
*/
/* 3. Disable DPLL through DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val &= ~PLL_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
- DRM_ERROR("PLL %d locked\n", id);
+ drm_err(&dev_priv->drm, "PLL %d locked\n", id);
/*
* 5. If the frequency will result in a change to the voltage
@@ -2124,14 +2397,14 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
*/
/* 6. Disable DPLL power in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val &= ~PLL_POWER_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
PLL_POWER_STATE, 5))
- DRM_ERROR("PLL %d Power not disabled\n", id);
+ drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
}
static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2150,16 +2423,17 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
if (!(val & PLL_ENABLE))
goto out;
- val = I915_READ(CNL_DPLL_CFGCR0(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
hw_state->cfgcr0 = val;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CFGCR0_HDMI_MODE) {
- hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ CNL_DPLL_CFGCR1(id));
}
ret = true;
@@ -2256,27 +2530,12 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
params->dco_fraction = dco & 0x7fff;
}
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
-{
- int ref_clock = dev_priv->cdclk.hw.ref;
-
- /*
- * For ICL+, the spec states: if reference frequency is 38.4,
- * use 19.2 because the DPLL automatically divides that by 2.
- */
- if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
- ref_clock = 19200;
-
- return ref_clock;
-}
-
static bool
-cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *wrpll_params)
+__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params,
+ int ref_clock)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 afe_clock = crtc_state->port_clock * 5;
- u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2308,15 +2567,22 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
return false;
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
pdiv, qdiv, kdiv);
return true;
}
+static bool
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ i915->dpll.ref_clks.nssc);
+}
+
static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
u32 cfgcr0, cfgcr1;
@@ -2344,6 +2610,68 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll,
+ int ref_clock)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR1_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR1_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR1_PDIV_5:
+ p0 = 5;
+ break;
+ case DPLL_CFGCR1_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR1_KDIV_1:
+ p2 = 1;
+ break;
+ case DPLL_CFGCR1_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
+
+ if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
+}
+
static bool
cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -2389,30 +2717,72 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
+ case DPLL_CFGCR0_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_3240:
+ link_clock = 324000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_4050:
+ link_clock = 405000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool cnl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
bool bret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not get HDMI pll dividers.\n");
return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not set DP dpll HW state.\n");
return false;
}
} else {
- DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
- crtc_state->output_types);
+ drm_dbg_kms(&i915->drm,
+ "Skip DPLL setup for output_types 0x%x\n",
+ crtc_state->output_types);
return false;
}
@@ -2422,7 +2792,7 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_SKL_DPLL1) |
BIT(DPLL_ID_SKL_DPLL0));
if (!pll) {
- DRM_DEBUG_KMS("No PLL selected\n");
+ drm_dbg_kms(&i915->drm, "No PLL selected\n");
return false;
}
@@ -2434,19 +2804,35 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
+ return cnl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return cnl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC reference */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: "
- "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
- hw_state->cfgcr0,
- hw_state->cfgcr1);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
+ "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
+ hw_state->cfgcr0,
+ hw_state->cfgcr1);
}
static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
.enable = cnl_ddi_pll_enable,
.disable = cnl_ddi_pll_disable,
.get_hw_state = cnl_ddi_pll_get_hw_state,
+ .get_freq = cnl_ddi_pll_get_freq,
};
static const struct dpll_info cnl_plls[] = {
@@ -2460,6 +2846,7 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
.get_dplls = cnl_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = cnl_update_dpll_ref_clks,
.dump_hw_state = cnl_dump_hw_state,
};
@@ -2555,7 +2942,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
- dev_priv->cdclk.hw.ref == 24000 ?
+ dev_priv->dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2578,9 +2965,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) >= 12) {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2591,9 +2978,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2608,6 +2995,49 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * The PLL outputs multiple frequencies at the same time, selection is
+ * made at DDI clock mux level.
+ */
+ drm_WARN_ON(&i915->drm, 1);
+
+ return 0;
+}
+
+static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
+{
+ int ref_clock = i915->dpll.ref_clks.nssc;
+
+ /*
+ * For ICL+, the spec states: if reference frequency is 38.4,
+ * use 19.2 because the DPLL automatically divides that by 2.
+ */
+ if (ref_clock == 38400)
+ ref_clock = 19200;
+
+ return ref_clock;
+}
+
+static bool
+icl_calc_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ icl_wrpll_ref_clock(i915));
+}
+
+static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll,
+ icl_wrpll_ref_clock(i915));
+}
+
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
struct intel_dpll_hw_state *pll_state)
@@ -2622,7 +3052,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
+ ret = icl_calc_wrpll(crtc_state, &pll_params);
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
@@ -2745,7 +3175,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int refclk_khz = dev_priv->cdclk.hw.ref;
+ int refclk_khz = dev_priv->dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2761,7 +3191,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state, is_dkl)) {
- DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to find divisors for clock %d\n", clock);
return false;
}
@@ -2774,8 +3205,9 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
if (m2div_int > 255) {
- DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
- clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to find mdiv for clock %d\n",
+ clock);
return false;
}
}
@@ -2944,6 +3376,78 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
+ u64 tmp;
+
+ ref_clock = dev_priv->dpll.ref_clks.nssc;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
+
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+ div1 = 2;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+ div1 = 3;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+ div1 = 5;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+ div1 = 7;
+ break;
+ default:
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ return 0;
+ }
+
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
+ /* div2 value of 0 is same as 1 means no div */
+ if (div2 == 0)
+ div2 = 1;
+
+ /*
+ * Adjust the original formula to delay the division by 2^22 in order to
+ * minimize possible rounding errors.
+ */
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
+ tmp = div_u64(tmp, 5 * div1 * div2);
+
+ return tmp;
+}
+
/**
* icl_set_active_port_dpll - select the active port DPLL for a given CRTC
* @crtc_state: state for the CRTC to select the DPLL for
@@ -2996,7 +3500,8 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
unsigned long dpll_mask;
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate combo PHY PLL state.\n");
return false;
}
@@ -3013,8 +3518,9 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
&port_dpll->hw_state,
dpll_mask);
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
- encoder->base.base.id, encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "No combo PHY PLL found for [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name);
return false;
}
@@ -3038,7 +3544,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate TBT PLL state.\n");
return false;
}
@@ -3046,7 +3553,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
&port_dpll->hw_state,
BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
+ drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
return false;
}
intel_reference_shared_dpll(state, crtc,
@@ -3055,7 +3562,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate MG PHY PLL state.\n");
goto err_unreference_tbt_pll;
}
@@ -3065,7 +3573,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
&port_dpll->hw_state,
BIT(dpll_id));
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No MG PHY PLL found\n");
+ drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
}
intel_reference_shared_dpll(state, crtc,
@@ -3140,37 +3648,39 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
if (!(val & PLL_ENABLE))
goto out;
- hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+ MG_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_clktop2_hsclkctl =
- I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
- hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
- hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
- hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
- hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+ hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
+ MG_PLL_FRAC_LOCK(tc_port));
+ hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
- hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->cdclk.hw.ref == 38400) {
+ if (dev_priv->dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3202,7 +3712,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
if (!(val & PLL_ENABLE))
goto out;
@@ -3210,13 +3720,15 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
* All registers read here have the same HIP_INDEX_REG even though
* they are on different building blocks
*/
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
- hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+ DKL_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_hsclkctl =
- I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
@@ -3224,32 +3736,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
DKL_PLL_DIV0_PROP_COEFF_MASK |
DKL_PLL_DIV0_FBPREDIV_MASK |
DKL_PLL_DIV0_FBDIV_INT_MASK);
- hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
- hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
- hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
@@ -3274,20 +3786,26 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
if (!(val & PLL_ENABLE))
goto out;
if (INTEL_GEN(dev_priv) >= 12) {
- hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ TGL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ TGL_DPLL_CFGCR1(id));
} else {
if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR0(4));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR1(4));
} else {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR1(id));
}
}
@@ -3338,9 +3856,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
}
}
- I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
- I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
- POSTING_READ(cfgcr1_reg);
+ intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
+ intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
+ intel_de_posting_read(dev_priv, cfgcr1_reg);
}
static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
@@ -3356,41 +3874,42 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- val = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
+ intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
- val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
- I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
- I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
- I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
- I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
- I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
+ intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+ intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+ intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+ intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
+ hw_state->mg_pll_frac_lock);
+ intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- val = I915_READ(MG_PLL_BIAS(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
val &= ~hw_state->mg_pll_bias_mask;
val |= hw_state->mg_pll_bias;
- I915_WRITE(MG_PLL_BIAS(tc_port), val);
+ intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
- val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
val |= hw_state->mg_pll_tdc_coldst_bias;
- I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
- POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
}
static void dkl_pll_write(struct drm_i915_private *dev_priv,
@@ -3404,62 +3923,63 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
* All registers programmed here have the same HIP_INDEX_REG even
* though on different building block
*/
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
/* All the registers are RMW */
- val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
+ intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
- val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
- val = I915_READ(DKL_PLL_DIV0(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
DKL_PLL_DIV0_PROP_COEFF_MASK |
DKL_PLL_DIV0_FBPREDIV_MASK |
DKL_PLL_DIV0_FBDIV_INT_MASK);
val |= hw_state->mg_pll_div0;
- I915_WRITE(DKL_PLL_DIV0(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
- val = I915_READ(DKL_PLL_DIV1(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
val |= hw_state->mg_pll_div1;
- I915_WRITE(DKL_PLL_DIV1(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
- val = I915_READ(DKL_PLL_SSC(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
val |= hw_state->mg_pll_ssc;
- I915_WRITE(DKL_PLL_SSC(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
- val = I915_READ(DKL_PLL_BIAS(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
val |= hw_state->mg_pll_bias;
- I915_WRITE(DKL_PLL_BIAS(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
- val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
val |= hw_state->mg_pll_tdc_coldst_bias;
- I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
- POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
}
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
@@ -3468,16 +3988,17 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val |= PLL_POWER_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/*
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
+ pll->info->id);
}
static void icl_pll_enable(struct drm_i915_private *dev_priv,
@@ -3486,13 +4007,13 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val |= PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/* Timeout is actually 600us. */
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
- DRM_ERROR("PLL %d not locked\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
}
static void combo_pll_enable(struct drm_i915_private *dev_priv,
@@ -3584,26 +4105,27 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* nothign here.
*/
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val &= ~PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/* Timeout is actually 1us. */
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
- DRM_ERROR("PLL %d locked\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val &= ~PLL_POWER_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/*
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
+ pll->info->id);
}
static void combo_pll_disable(struct drm_i915_private *dev_priv,
@@ -3639,44 +4161,54 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
icl_pll_disable(dev_priv, pll, enable_reg);
}
+static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
- "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
- "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
- "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
- "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
- "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
- hw_state->cfgcr0, hw_state->cfgcr1,
- hw_state->mg_refclkin_ctl,
- hw_state->mg_clktop2_coreclkctl1,
- hw_state->mg_clktop2_hsclkctl,
- hw_state->mg_pll_div0,
- hw_state->mg_pll_div1,
- hw_state->mg_pll_lf,
- hw_state->mg_pll_frac_lock,
- hw_state->mg_pll_ssc,
- hw_state->mg_pll_bias,
- hw_state->mg_pll_tdc_coldst_bias);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+ "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+ "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+ "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+ "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+ "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+ hw_state->cfgcr0, hw_state->cfgcr1,
+ hw_state->mg_refclkin_ctl,
+ hw_state->mg_clktop2_coreclkctl1,
+ hw_state->mg_clktop2_hsclkctl,
+ hw_state->mg_pll_div0,
+ hw_state->mg_pll_div1,
+ hw_state->mg_pll_lf,
+ hw_state->mg_pll_frac_lock,
+ hw_state->mg_pll_ssc,
+ hw_state->mg_pll_bias,
+ hw_state->mg_pll_tdc_coldst_bias);
}
static const struct intel_shared_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
.get_hw_state = combo_pll_get_hw_state,
+ .get_freq = icl_ddi_combo_pll_get_freq,
};
static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
+ .get_freq = icl_ddi_tbt_pll_get_freq,
};
static const struct intel_shared_dpll_funcs mg_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = mg_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info icl_plls[] = {
@@ -3695,6 +4227,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3709,6 +4242,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3716,6 +4250,7 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info tgl_plls[] = {
@@ -3736,6 +4271,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3770,22 +4306,22 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
- dev_priv->num_shared_dpll = 0;
+ dev_priv->dpll.num_shared_dpll = 0;
return;
}
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
- WARN_ON(i != dpll_info[i].id);
- dev_priv->shared_dplls[i].info = &dpll_info[i];
+ drm_WARN_ON(dev, i != dpll_info[i].id);
+ dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
}
- dev_priv->dpll_mgr = dpll_mgr;
- dev_priv->num_shared_dpll = i;
- mutex_init(&dev_priv->dpll_lock);
+ dev_priv->dpll.mgr = dpll_mgr;
+ dev_priv->dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->dpll.lock);
- BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+ BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
}
/**
@@ -3812,9 +4348,9 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
- if (WARN_ON(!dpll_mgr))
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return false;
return dpll_mgr->get_dplls(state, crtc, encoder);
@@ -3835,7 +4371,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -3864,35 +4400,114 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
- if (WARN_ON(!dpll_mgr))
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return;
dpll_mgr->update_active_dpll(state, crtc, encoder);
}
/**
+ * intel_dpll_get_freq - calculate the DPLL's output frequency
+ * @i915: i915 device
+ * @pll: DPLL for which to calculate the output frequency
+ *
+ * Return the output frequency corresponding to @pll's current state.
+ */
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
+ return 0;
+
+ return pll->info->funcs->get_freq(i915, pll);
+}
+
+static void readout_dpll_hw_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_crtc *crtc;
+
+ pll->on = pll->info->funcs->get_hw_state(i915, pll,
+ &pll->state.hw_state);
+
+ if (IS_ELKHARTLAKE(i915) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
+ pll->state.crtc_mask = 0;
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
+ pll->state.crtc_mask |= 1 << crtc->pipe;
+ }
+ pll->active_mask = pll->state.crtc_mask;
+
+ drm_dbg_kms(&i915->drm,
+ "%s hw state readout: crtc_mask 0x%08x, on %i\n",
+ pll->info->name, pll->state.crtc_mask, pll->on);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
+ i915->dpll.mgr->update_ref_clks(i915);
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+static void sanitize_dpll_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (!pll->on || pll->active_mask)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "%s enabled but not in use, disabling\n",
+ pll->info->name);
+
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+}
+
+void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+/**
* intel_shared_dpll_dump_hw_state - write hw_state to dmesg
* @dev_priv: i915 drm device
* @hw_state: hw state to be written to the log
*
- * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
+ * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
*/
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- if (dev_priv->dpll_mgr) {
- dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+ if (dev_priv->dpll.mgr) {
+ dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
- DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2a104c64291d..5d9a2bc371e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -278,6 +278,15 @@ struct intel_shared_dpll_funcs {
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
+
+ /**
+ * @get_freq:
+ *
+ * Hook for calculating the pll's output frequency based on its
+ * current state.
+ */
+ int (*get_freq)(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
};
/**
@@ -372,15 +381,18 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
+void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state);
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index ada006a690df..d7a6bf2277df 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -40,7 +40,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
+ return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
}
static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
@@ -50,16 +50,16 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl |= DSB_ENABLE;
- I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
- POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
return true;
}
@@ -70,16 +70,16 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl &= ~DSB_ENABLE;
- I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
- POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
return true;
}
@@ -115,20 +115,20 @@ intel_dsb_get(struct intel_crtc *crtc)
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
- DRM_ERROR("Gem object creation failed\n");
+ drm_err(&i915->drm, "Gem object creation failed\n");
goto out;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
- DRM_ERROR("Vma creation failed\n");
+ drm_err(&i915->drm, "Vma creation failed\n");
i915_gem_object_put(obj);
goto out;
}
buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
- DRM_ERROR("Command buffer creation failed\n");
+ drm_err(&i915->drm, "Command buffer creation failed\n");
goto out;
}
@@ -165,7 +165,7 @@ void intel_dsb_put(struct intel_dsb *dsb)
if (!HAS_DSB(i915))
return;
- if (WARN_ON(dsb->refcount == 0))
+ if (drm_WARN_ON(&i915->drm, dsb->refcount == 0))
return;
if (--dsb->refcount == 0) {
@@ -198,12 +198,12 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
u32 reg_val;
if (!buf) {
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
return;
}
- if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -272,12 +272,12 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
u32 *buf = dsb->cmd_buf;
if (!buf) {
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
return;
}
- if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -310,10 +310,12 @@ void intel_dsb_commit(struct intel_dsb *dsb)
goto reset;
if (is_dsb_busy(dsb)) {
- DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
}
- I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
+ intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma));
tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
if (tail > dsb->free_pos * 4)
@@ -321,14 +323,18 @@ void intel_dsb_commit(struct intel_dsb *dsb)
(tail - dsb->free_pos * 4));
if (is_dsb_busy(dsb)) {
- DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
}
- DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma), tail);
- I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
+ drm_dbg_kms(&dev_priv->drm,
+ "DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
+ intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma) + tail);
if (wait_for(!is_dsb_busy(dsb), 1)) {
- DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSB workload completion.\n");
goto reset;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index c87838843d0b..b53c50372918 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -45,7 +45,7 @@
static u32 dcs_get_backlight(struct intel_connector *connector)
{
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi_device;
u8 data = 0;
@@ -160,13 +160,13 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
struct intel_panel *panel = &intel_connector->panel;
if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
return -ENODEV;
- if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
+ if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
return -EINVAL;
panel->backlight.setup = dcs_setup_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 04f953ba8f00..574dcfec9577 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -36,7 +36,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <video/mipi_display.h>
@@ -136,7 +135,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u16 len;
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
flags = *data++;
type = *data++;
@@ -158,7 +157,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
- DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ port_name(port));
goto out;
}
@@ -182,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
+ drm_dbg(&dev_priv->drm,
+ "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -194,7 +195,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
+ drm_dbg(&dev_priv->drm,
+ "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
@@ -212,9 +214,10 @@ out:
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
u32 delay = *((const u32 *) data);
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
usleep_range(delay, delay + 10);
data += 4;
@@ -231,7 +234,8 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
u8 port;
if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
- DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+ drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
+ gpio_index);
return;
}
@@ -244,10 +248,11 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
- DRM_DEBUG_KMS("SC gpio not supported\n");
+ drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
return;
} else {
- DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "unknown gpio source %u\n", gpio_source);
return;
}
}
@@ -291,13 +296,15 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
- DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
- DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
- gpio_index);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid gpio index %u for GPIO N\n",
+ gpio_index);
return;
}
@@ -332,8 +339,9 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
GPIOD_OUT_HIGH);
if (IS_ERR_OR_NULL(gpio_desc)) {
- DRM_ERROR("GPIO index %u request failed (%ld)\n",
- gpio_index, PTR_ERR(gpio_desc));
+ drm_err(&dev_priv->drm,
+ "GPIO index %u request failed (%ld)\n",
+ gpio_index, PTR_ERR(gpio_desc));
return;
}
@@ -346,7 +354,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
static void icl_exec_gpio(struct drm_i915_private *dev_priv,
u8 gpio_source, u8 gpio_index, bool value)
{
- DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+ drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
@@ -356,7 +364,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (dev_priv->vbt.dsi.seq_version >= 3)
gpio_index = *data++;
@@ -494,13 +502,16 @@ err_bus:
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
- DRM_DEBUG_KMS("Skipping SPI element execution\n");
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
#ifdef CONFIG_PMIC_OPREGION
u32 value, mask, reg_address;
u16 i2c_address;
@@ -516,9 +527,10 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
reg_address,
value, mask);
if (ret)
- DRM_ERROR("%s failed, error: %d\n", __func__, ret);
+ drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
#else
- DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
+ drm_err(&i915->drm,
+ "Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
#endif
return data + 15;
@@ -570,17 +582,18 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
return;
data = dev_priv->vbt.dsi.sequence[seq_id];
if (!data)
return;
- WARN_ON(*data != seq_id);
+ drm_WARN_ON(&dev_priv->drm, *data != seq_id);
- DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
- seq_id, sequence_name(seq_id));
+ drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
data++;
@@ -612,18 +625,21 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
/* Consistency check if we have size. */
if (operation_size && data != next) {
- DRM_ERROR("Inconsistent operation size\n");
+ drm_err(&dev_priv->drm,
+ "Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
- DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
- operation_byte);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported MIPI operation byte %u\n",
+ operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
- DRM_ERROR("Unsupported MIPI operation byte %u\n",
- operation_byte);
+ drm_err(&dev_priv->drm,
+ "Unsupported MIPI operation byte %u\n",
+ operation_byte);
return;
}
}
@@ -658,40 +674,54 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
- DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
- DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
- DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- DRM_DEBUG_KMS("Video mode format %s\n",
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
- "burst" : "<unknown>");
- DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
- DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
- DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
- DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
- DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
+ drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
+ intel_dsi->pixel_overlap);
+ drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
+ drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_dbg_kms(&i915->drm, "Video mode format %s\n",
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
+ "burst" : "<unknown>");
+ drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
+ intel_dsi->burst_mode_ratio);
+ drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_dbg_kms(&i915->drm, "Eot %s\n",
+ enableddisabled(intel_dsi->eotp_pkt));
+ drm_dbg_kms(&i915->drm, "Clockstop %s\n",
+ enableddisabled(!intel_dsi->clock_stop));
+ drm_dbg_kms(&i915->drm, "Mode %s\n",
+ intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ drm_dbg_kms(&i915->drm,
+ "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ drm_dbg_kms(&i915->drm,
+ "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
- DRM_DEBUG_KMS("Dual link: NONE\n");
- DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
- DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
- DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
- DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
- DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
- DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
- DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
- DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
- DRM_DEBUG_KMS("BTA %s\n",
- enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
+ drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
+ intel_dsi->lp_rx_timeout);
+ drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
+ intel_dsi->turn_arnd_val);
+ drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
+ intel_dsi->hs_to_lp_count);
+ drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
+ intel_dsi->clk_lp_to_hs_count);
+ drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
+ intel_dsi->clk_hs_to_lp_count);
+ drm_dbg_kms(&i915->drm, "BTA %s\n",
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
@@ -704,7 +734,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u16 burst_mode_ratio;
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -763,7 +793,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
- DRM_ERROR("Burst mode freq is less than computed\n");
+ drm_err(&dev_priv->drm,
+ "Burst mode freq is less than computed\n");
return false;
}
@@ -773,7 +804,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
} else {
- DRM_ERROR("Burst mode target is not set\n");
+ drm_err(&dev_priv->drm,
+ "Burst mode target is not set\n");
return false;
}
} else
@@ -856,17 +888,20 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
ARRAY_SIZE(soc_pwm_pinctrl_map));
if (ret)
- DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
+ drm_err(&dev_priv->drm,
+ "Failed to register pwm0 pinmux mapping\n");
pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
if (IS_ERR(pinctrl))
- DRM_ERROR("Failed to set pinmux to PWM\n");
+ drm_err(&dev_priv->drm,
+ "Failed to set pinmux to PWM\n");
}
if (want_panel_gpio) {
intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
- DRM_ERROR("Failed to own gpio for panel control\n");
+ drm_err(&dev_priv->drm,
+ "Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
}
@@ -875,7 +910,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
intel_dsi->gpio_backlight =
gpiod_get(dev->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
- DRM_ERROR("Failed to own gpio for backlight control\n");
+ drm_err(&dev_priv->drm,
+ "Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 86a337c9d85d..341d5ce8b062 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -30,7 +30,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -44,6 +43,7 @@
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
+#define INTEL_DVO_CHIP_LVDS_NO_FIXED 5
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
@@ -101,13 +101,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.dev_ops = &ch7017_ops,
},
{
- .type = INTEL_DVO_CHIP_TMDS,
+ .type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
.name = "ns2501",
.dvo_reg = DVOB,
.dvo_srcdim_reg = DVOB_SRCDIM,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
- }
+ },
};
struct intel_dvo {
@@ -137,7 +137,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
u32 tmp;
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
if (!(tmp & DVO_ENABLE))
return false;
@@ -152,7 +152,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
*pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
@@ -168,7 +168,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -190,11 +190,11 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = I915_READ(dvo_reg);
+ u32 temp = intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE);
+ intel_de_read(dev_priv, dvo_reg);
}
static void intel_enable_dvo(struct intel_encoder *encoder,
@@ -204,14 +204,14 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = I915_READ(dvo_reg);
+ u32 temp = intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- I915_WRITE(dvo_reg, temp | DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE);
+ intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -286,7 +286,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
/* Save the data order, since I don't know what it should be set to. */
- dvo_val = I915_READ(dvo_reg) &
+ dvo_val = intel_de_read(dev_priv, dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
@@ -301,11 +301,10 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
- I915_WRITE(dvo_srcdim_reg,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
- (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+ intel_de_write(dev_priv, dvo_srcdim_reg,
+ (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
/*I915_WRITE(DVOB, dvo_val);*/
- I915_WRITE(dvo_reg, dvo_val);
+ intel_de_write(dev_priv, dvo_reg, dvo_val);
}
static enum drm_connector_status
@@ -481,15 +480,16 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
* initialize the device.
*/
for_each_pipe(dev_priv, pipe) {
- dpll[pipe] = I915_READ(DPLL(pipe));
- I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE);
+ dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe),
+ dpll[pipe] | DPLL_DVO_2X_MODE);
}
dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(DPLL(pipe), dpll[pipe]);
+ intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
}
intel_gmbus_force_bit(i2c, false);
@@ -507,17 +507,21 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->port = port;
intel_encoder->pipe_mask = ~0;
- switch (dvo->type) {
- case INTEL_DVO_CHIP_TMDS:
+ if (dvo->type != INTEL_DVO_CHIP_LVDS)
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
(1 << INTEL_OUTPUT_DVO);
+
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
+ case INTEL_DVO_CHIP_LVDS_NO_FIXED:
case INTEL_DVO_CHIP_LVDS:
- intel_encoder->cloneable = 0;
drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a1048ece541e..2e5d835a9eaa 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -41,15 +41,12 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-static inline bool fbc_supported(struct drm_i915_private *dev_priv)
-{
- return HAS_FBC(dev_priv);
-}
-
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -97,12 +94,12 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 fbc_ctl;
/* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
+ intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
@@ -132,7 +129,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG(i), 0);
+ intel_de_write(dev_priv, FBC_TAG(i), 0);
if (IS_GEN(dev_priv, 4)) {
u32 fbc_ctl2;
@@ -142,12 +139,13 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
if (params->fence_id >= 0)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
+ intel_de_write(dev_priv, FBC_FENCE_OFF,
+ params->crtc.fence_y_offset);
}
/* enable it... */
- fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev_priv))
@@ -155,12 +153,12 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
if (params->fence_id >= 0)
fbc_ctl |= params->fence_id;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
+ intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
}
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+ return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
}
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
@@ -176,13 +174,14 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
- I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, DPFC_FENCE_YOFF,
+ params->crtc.fence_y_offset);
} else {
- I915_WRITE(DPFC_FENCE_YOFF, 0);
+ intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
}
/* enable it... */
- I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
}
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
@@ -190,23 +189,27 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl);
}
}
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
}
/* This function forces a CFB recompression through the nuke operation. */
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
{
- I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
- POSTING_READ(MSG_FBC_REND_STATE);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ trace_intel_fbc_nuke(fbc->crtc);
+
+ intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
+ intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
}
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
@@ -237,22 +240,22 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 5))
dpfc_ctl |= params->fence_id;
if (IS_GEN(dev_priv, 6)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE |
- params->fence_id);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET,
- params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | params->fence_id);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
+ params->crtc.fence_y_offset);
}
} else {
if (IS_GEN(dev_priv, 6)) {
- I915_WRITE(SNB_DPFC_CTL_SA, 0);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
}
- I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
+ params->crtc.fence_y_offset);
/* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
}
@@ -262,16 +265,16 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl);
}
}
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
@@ -282,14 +285,14 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
/* Display WA #0529: skl, kbl, bxt. */
if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
- u32 val = I915_READ(CHICKEN_MISC_4);
+ u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
if (params->gen9_wa_cfb_stride)
val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
- I915_WRITE(CHICKEN_MISC_4, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_4, val);
}
dpfc_ctl = 0;
@@ -314,13 +317,13 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE |
- params->fence_id);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
- } else {
- I915_WRITE(SNB_DPFC_CTL_SA,0);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | params->fence_id);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
+ params->crtc.fence_y_offset);
+ } else if (dev_priv->ggtt.num_fences) {
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
if (dev_priv->fbc.false_color)
@@ -328,21 +331,20 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_IVYBRIDGE(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
+ intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
+ intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
- I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
- HSW_FBCQ_DIS);
+ intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
+ intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
}
if (INTEL_GEN(dev_priv) >= 11)
/* Wa_1409120013:icl,ehl,tgl */
- I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
}
@@ -361,6 +363,8 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
+ trace_intel_fbc_activate(fbc->crtc);
+
fbc->active = true;
fbc->activated = true;
@@ -378,6 +382,8 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
+ trace_intel_fbc_deactivate(fbc->crtc);
+
fbc->active = false;
if (INTEL_GEN(dev_priv) >= 5)
@@ -407,7 +413,7 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
@@ -471,7 +477,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
- WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
+ drm_WARN_ON(&dev_priv->drm,
+ drm_mm_node_allocated(&fbc->compressed_fb));
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
@@ -485,9 +492,11 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
fbc->threshold = ret;
if (INTEL_GEN(dev_priv) >= 5)
- I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
+ intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
+ fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
- I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
+ intel_de_write(dev_priv, DPFC_CB_BASE,
+ fbc->compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
@@ -500,16 +509,16 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
fbc->compressed_llb = compressed_llb;
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_fb.start,
- U32_MAX));
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_llb->start,
- U32_MAX));
- I915_WRITE(FBC_CFB_BASE,
- dev_priv->dsm.start + fbc->compressed_fb.start);
- I915_WRITE(FBC_LL_BASE,
- dev_priv->dsm.start + compressed_llb->start);
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_fb.start,
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_llb->start,
+ U32_MAX));
+ intel_de_write(dev_priv, FBC_CFB_BASE,
+ dev_priv->dsm.start + fbc->compressed_fb.start);
+ intel_de_write(dev_priv, FBC_LL_BASE,
+ dev_priv->dsm.start + compressed_llb->start);
}
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
@@ -530,20 +539,22 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (drm_mm_node_allocated(&fbc->compressed_fb))
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+ if (!drm_mm_node_allocated(&fbc->compressed_fb))
+ return;
if (fbc->compressed_llb) {
i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
kfree(fbc->compressed_llb);
}
+
+ i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
}
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
@@ -555,7 +566,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride)
{
/* This should have been caught earlier. */
- if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
return false;
/* Below are the additional FBC restrictions. */
@@ -663,8 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
- WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
- !plane_state->vma->fence);
+ drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
+ !plane_state->vma->fence);
if (plane_state->flags & PLANE_HAS_FENCE &&
plane_state->vma->fence)
@@ -681,12 +692,37 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
fbc->compressed_fb.size * fbc->threshold;
}
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (intel_vgpu_active(dev_priv)) {
+ fbc->no_fbc_reason = "VGPU is active";
+ return false;
+ }
+
+ if (!i915_modparams.enable_fbc) {
+ fbc->no_fbc_reason = "disabled per module param or by default";
+ return false;
+ }
+
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
+ return true;
+}
+
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ if (!intel_fbc_can_enable(dev_priv))
+ return false;
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -785,28 +821,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- if (intel_vgpu_active(dev_priv)) {
- fbc->no_fbc_reason = "VGPU is active";
- return false;
- }
-
- if (!i915_modparams.enable_fbc) {
- fbc->no_fbc_reason = "disabled per module param or by default";
- return false;
- }
-
- if (fbc->underrun_detected) {
- fbc->no_fbc_reason = "underrun detected";
- return false;
- }
-
- return true;
-}
-
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
@@ -867,16 +881,20 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
return true;
}
-bool intel_fbc_pre_update(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
const char *reason = "update pending";
bool need_vblank_wait = false;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return need_vblank_wait;
mutex_lock(&fbc->lock);
@@ -926,9 +944,9 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_crtc *crtc = fbc->crtc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!fbc->crtc);
- WARN_ON(fbc->active);
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
+ drm_WARN_ON(&dev_priv->drm, fbc->active);
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
@@ -942,7 +960,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
if (fbc->crtc != crtc)
return;
@@ -967,12 +985,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
-void intel_fbc_post_update(struct intel_crtc *crtc)
+void intel_fbc_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return;
mutex_lock(&fbc->lock);
@@ -994,7 +1016,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
@@ -1015,7 +1037,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
@@ -1099,24 +1121,26 @@ out:
/**
* intel_fbc_enable: tries to enable FBC on the CRTC
* @crtc: the CRTC
- * @crtc_state: corresponding &drm_crtc_state for @crtc
- * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
+ * @state: corresponding &drm_crtc_state for @crtc
*
* This function checks if the given CRTC was chosen for FBC, then enables it if
* possible. Notice that it doesn't activate FBC. It is valid to call
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
-void intel_fbc_enable(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+void intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return;
mutex_lock(&fbc->lock);
@@ -1129,7 +1153,7 @@ void intel_fbc_enable(struct intel_crtc *crtc,
__intel_fbc_disable(dev_priv);
}
- WARN_ON(fbc->active);
+ drm_WARN_ON(&dev_priv->drm, fbc->active);
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
@@ -1139,14 +1163,14 @@ void intel_fbc_enable(struct intel_crtc *crtc,
if (intel_fbc_alloc_cfb(dev_priv,
intel_fbc_calculate_cfb_size(dev_priv, cache),
- fb->format->cpp[0])) {
+ plane_state->hw.fb->format->cpp[0])) {
cache->plane.visible = false;
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
- fb->modifier != I915_FORMAT_MOD_X_TILED)
+ plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED)
cache->gen9_wa_cfb_stride =
DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
else
@@ -1169,9 +1193,10 @@ out:
void intel_fbc_disable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc)
return;
mutex_lock(&fbc->lock);
@@ -1190,12 +1215,12 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
if (fbc->crtc) {
- WARN_ON(fbc->crtc->active);
+ drm_WARN_ON(&dev_priv->drm, fbc->crtc->active);
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&fbc->lock);
@@ -1267,7 +1292,7 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
/* There's no guarantee that underrun_detected won't be set to true
@@ -1348,7 +1373,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
/* This value was pulled out of someone's hat */
if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+ intel_de_write(dev_priv, FBC_CONTROL,
+ 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index c8a5e5098687..6dc1edefe81b 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,14 +19,13 @@ struct intel_plane_state;
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-bool intel_fbc_pre_update(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void intel_fbc_post_update(struct intel_crtc *crtc);
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_fbc_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
+void intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1e98e432c9fa..3bc804212a99 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -40,7 +40,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -191,7 +190,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
- if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
+ if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
@@ -410,9 +409,9 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
if (!crtc->state->active)
continue;
- WARN(!crtc->primary->state->fb,
- "re-used BIOS config but lost an fb on crtc %d\n",
- crtc->base.id);
+ drm_WARN(dev, !crtc->primary->state->fb,
+ "re-used BIOS config but lost an fb on crtc %d\n",
+ crtc->base.id);
}
@@ -439,7 +438,8 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
+ if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) ||
+ !INTEL_DISPLAY_ENABLED(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -452,7 +452,7 @@ int intel_fbdev_init(struct drm_device *dev)
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
- ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
+ ret = drm_fb_helper_init(dev, &ifbdev->helper);
if (ret) {
kfree(ifbdev);
return ret;
@@ -461,8 +461,6 @@ int intel_fbdev_init(struct drm_device *dev)
dev_priv->fbdev = ifbdev;
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
- drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
-
return 0;
}
@@ -569,7 +567,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
- WARN_ON(state != FBINFO_STATE_RUNNING);
+ drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
if (!console_trylock()) {
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 6c83b350525d..813a4f7033e1 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -95,15 +95,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
lockdep_assert_held(&dev_priv->irq_lock);
- if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ if ((intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
- I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(dev_priv, reg);
trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
- DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
}
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -118,11 +118,13 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
if (enable) {
u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg,
+ enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(dev_priv, reg);
} else {
- if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS)
+ drm_err(&dev_priv->drm, "pipe %c underrun\n",
+ pipe_name(pipe));
}
}
@@ -143,18 +145,18 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
- POSTING_READ(GEN7_ERR_INT);
+ intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+ intel_de_posting_read(dev_priv, GEN7_ERR_INT);
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
}
static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -163,7 +165,8 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+ intel_de_write(dev_priv, GEN7_ERR_INT,
+ ERR_INT_FIFO_UNDERRUN(pipe));
if (!ivb_can_enable_err_int(dev))
return;
@@ -173,9 +176,10 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
if (old &&
- I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- DRM_ERROR("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
+ intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ drm_err(&dev_priv->drm,
+ "uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
}
}
}
@@ -209,19 +213,20 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
- u32 serr_int = I915_READ(SERR_INT);
+ u32 serr_int = intel_de_read(dev_priv, SERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
- I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
- POSTING_READ(SERR_INT);
+ intel_de_write(dev_priv, SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+ intel_de_posting_read(dev_priv, SERR_INT);
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -231,8 +236,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
- I915_WRITE(SERR_INT,
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+ intel_de_write(dev_priv, SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
if (!cpt_can_enable_serr_int(dev))
return;
@@ -241,10 +246,11 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
} else {
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- if (old && I915_READ(SERR_INT) &
+ if (old && intel_de_read(dev_priv, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm,
+ "uncleared pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
}
}
@@ -378,8 +384,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("CPU pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
@@ -400,8 +406,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false)) {
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("PCH transcoder %c FIFO underrun\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
+ pipe_name(pch_transcoder));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
new file mode 100644
index 000000000000..a0cc894c3868
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_global_state.h"
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+ struct intel_global_obj *obj,
+ struct intel_global_state *state,
+ const struct intel_global_state_funcs *funcs)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ obj->state = state;
+ obj->funcs = funcs;
+ list_add_tail(&obj->head, &dev_priv->global_obj_list);
+}
+
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
+{
+ struct intel_global_obj *obj, *next;
+
+ list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
+ list_del(&obj->head);
+ obj->funcs->atomic_destroy_state(obj, obj->state);
+ }
+}
+
+static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ drm_modeset_lock_assert_held(&crtc->base.mutex);
+}
+
+static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
+ struct drm_modeset_lock *lock)
+{
+ struct drm_modeset_lock *l;
+
+ list_for_each_entry(l, &ctx->locked, head) {
+ if (lock == l)
+ return true;
+ }
+
+ return false;
+}
+
+static void assert_global_state_read_locked(struct intel_atomic_state *state)
+{
+ struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (modeset_lock_is_held(ctx, &crtc->base.mutex))
+ return;
+ }
+
+ WARN(1, "Global state not read locked\n");
+}
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int index, num_objs, i;
+ size_t size;
+ struct __intel_global_objs_state *arr;
+ struct intel_global_state *obj_state;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].state;
+
+ assert_global_state_read_locked(state);
+
+ num_objs = state->num_global_objs + 1;
+ size = sizeof(*state->global_objs) * num_objs;
+ arr = krealloc(state->global_objs, size, GFP_KERNEL);
+ if (!arr)
+ return ERR_PTR(-ENOMEM);
+
+ state->global_objs = arr;
+ index = state->num_global_objs;
+ memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
+
+ obj_state = obj->funcs->atomic_duplicate_state(obj);
+ if (!obj_state)
+ return ERR_PTR(-ENOMEM);
+
+ obj_state->changed = false;
+
+ state->global_objs[index].state = obj_state;
+ state->global_objs[index].old_state = obj->state;
+ state->global_objs[index].new_state = obj_state;
+ state->global_objs[index].ptr = obj;
+ obj_state->state = state;
+
+ state->num_global_objs = num_objs;
+
+ DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
+ obj, obj_state, state);
+
+ return obj_state;
+}
+
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].old_state;
+
+ return NULL;
+}
+
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].new_state;
+
+ return NULL;
+}
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *old_obj_state, *new_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
+ new_obj_state, i) {
+ WARN_ON(obj->state != old_obj_state);
+
+ /*
+ * If the new state wasn't modified (and properly
+ * locked for write access) we throw it away.
+ */
+ if (!new_obj_state->changed)
+ continue;
+
+ assert_global_state_write_locked(dev_priv);
+
+ old_obj_state->state = state;
+ new_obj_state->state = NULL;
+
+ state->global_objs[i].state = old_obj_state;
+ obj->state = new_obj_state;
+ }
+}
+
+void intel_atomic_clear_global_state(struct intel_atomic_state *state)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++) {
+ struct intel_global_obj *obj = state->global_objs[i].ptr;
+
+ obj->funcs->atomic_destroy_state(obj,
+ state->global_objs[i].state);
+ state->global_objs[i].ptr = NULL;
+ state->global_objs[i].state = NULL;
+ state->global_objs[i].old_state = NULL;
+ state->global_objs[i].new_state = NULL;
+ }
+ state->num_global_objs = 0;
+}
+
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
+{
+ struct intel_atomic_state *state = obj_state->state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex,
+ state->base.acquire_ctx);
+ if (ret)
+ return ret;
+ }
+
+ obj_state->changed = true;
+
+ return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
+{
+ struct intel_atomic_state *state = obj_state->state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ obj_state->changed = true;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
new file mode 100644
index 000000000000..e6163a469029
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GLOBAL_STATE_H__
+#define __INTEL_GLOBAL_STATE_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_global_obj;
+struct intel_global_state;
+
+struct intel_global_state_funcs {
+ struct intel_global_state *(*atomic_duplicate_state)(struct intel_global_obj *obj);
+ void (*atomic_destroy_state)(struct intel_global_obj *obj,
+ struct intel_global_state *state);
+};
+
+struct intel_global_obj {
+ struct list_head head;
+ struct intel_global_state *state;
+ const struct intel_global_state_funcs *funcs;
+};
+
+#define intel_for_each_global_obj(obj, dev_priv) \
+ list_for_each_entry(obj, &(dev_priv)->global_obj_list, head)
+
+#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+struct intel_global_state {
+ struct intel_atomic_state *state;
+ bool changed;
+};
+
+struct __intel_global_objs_state {
+ struct intel_global_obj *ptr;
+ struct intel_global_state *state, *old_state, *new_state;
+};
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+ struct intel_global_obj *obj,
+ struct intel_global_state *state,
+ const struct intel_global_state_funcs *funcs);
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv);
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state);
+void intel_atomic_clear_global_state(struct intel_atomic_state *state);
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 3d4d19ac1d14..1fd3a5a6296b 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -143,8 +142,8 @@ to_intel_gmbus(struct i2c_adapter *i2c)
void
intel_gmbus_reset(struct drm_i915_private *dev_priv)
{
- I915_WRITE(GMBUS0, 0);
- I915_WRITE(GMBUS4, 0);
+ intel_de_write(dev_priv, GMBUS0, 0);
+ intel_de_write(dev_priv, GMBUS4, 0);
}
static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -153,12 +152,12 @@ static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
if (!enable)
val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -166,12 +165,12 @@ static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
if (!enable)
val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -179,12 +178,12 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(GEN9_CLKGATE_DIS_4);
+ val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4);
if (!enable)
val |= BXT_GMBUS_GATING_DIS;
else
val &= ~BXT_GMBUS_GATING_DIS;
- I915_WRITE(GEN9_CLKGATE_DIS_4, val);
+ intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val);
}
static u32 get_reserved(struct intel_gmbus *bus)
@@ -337,14 +336,16 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
irq_en = 0;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- I915_WRITE_FW(GMBUS4, irq_en);
+ intel_de_write_fw(dev_priv, GMBUS4, irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+ ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ 2);
if (ret)
- ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
+ ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ 50);
- I915_WRITE_FW(GMBUS4, 0);
+ intel_de_write_fw(dev_priv, GMBUS4, 0);
remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
@@ -366,13 +367,13 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
irq_enable = GMBUS_IDLE_EN;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- I915_WRITE_FW(GMBUS4, irq_enable);
+ intel_de_write_fw(dev_priv, GMBUS4, irq_enable);
ret = intel_wait_for_register_fw(&dev_priv->uncore,
GMBUS2, GMBUS_ACTIVE, 0,
10);
- I915_WRITE_FW(GMBUS4, 0);
+ intel_de_write_fw(dev_priv, GMBUS4, 0);
remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
return ret;
@@ -404,15 +405,12 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
len++;
}
size = len % 256 + 256;
- I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+ intel_de_write_fw(dev_priv, GMBUS0,
+ gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
}
- I915_WRITE_FW(GMBUS1,
- gmbus1_index |
- GMBUS_CYCLE_WAIT |
- (size << GMBUS_BYTE_COUNT_SHIFT) |
- (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS1,
+ gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
@@ -421,7 +419,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- val = I915_READ_FW(GMBUS3);
+ val = intel_de_read_fw(dev_priv, GMBUS3);
do {
if (extra_byte_added && len == 1)
break;
@@ -432,7 +430,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (burst_read && len == size - 4)
/* Reset the override bit */
- I915_WRITE_FW(GMBUS0, gmbus0_reg);
+ intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg);
}
return 0;
@@ -489,12 +487,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
len -= 1;
}
- I915_WRITE_FW(GMBUS3, val);
- I915_WRITE_FW(GMBUS1,
- gmbus1_index | GMBUS_CYCLE_WAIT |
- (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
- (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS3, val);
+ intel_de_write_fw(dev_priv, GMBUS1,
+ gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -503,7 +498,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- I915_WRITE_FW(GMBUS3, val);
+ intel_de_write_fw(dev_priv, GMBUS3, val);
ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
@@ -568,7 +563,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* GMBUS5 holds 16-bit index */
if (gmbus5)
- I915_WRITE_FW(GMBUS5, gmbus5);
+ intel_de_write_fw(dev_priv, GMBUS5, gmbus5);
if (msgs[1].flags & I2C_M_RD)
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
@@ -578,7 +573,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
- I915_WRITE_FW(GMBUS5, 0);
+ intel_de_write_fw(dev_priv, GMBUS5, 0);
return ret;
}
@@ -601,7 +596,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
pch_gmbus_clock_gating(dev_priv, false);
retry:
- I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0);
+ intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
@@ -629,18 +624,19 @@ retry:
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
- I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
- I915_WRITE_FW(GMBUS0, 0);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
ret = ret ?: i;
goto out;
@@ -660,8 +656,9 @@ clear_err:
*/
ret = -ENXIO;
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out after NAK\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
@@ -669,13 +666,13 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
- I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
- I915_WRITE_FW(GMBUS1, 0);
- I915_WRITE_FW(GMBUS0, 0);
+ intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT);
+ intel_de_write_fw(dev_priv, GMBUS1, 0);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
- DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
- adapter->name, msgs[i].addr,
- (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
/*
* Passive adapters sometimes NAK the first probe. Retry the first
@@ -684,17 +681,19 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
goto retry;
}
goto out;
timeout:
- DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
- bus->adapter.name, bus->reg0 & 0xff);
- I915_WRITE_FW(GMBUS0, 0);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -908,7 +907,8 @@ err:
struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_gmbus_is_valid_pin(dev_priv, pin)))
return NULL;
return &dev_priv->gmbus[pin].adapter;
@@ -929,9 +929,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_lock(&dev_priv->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
- DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
- force_bit ? "en" : "dis", adapter->name,
- bus->force_bit);
+ drm_dbg_kms(&dev_priv->drm,
+ "%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
mutex_unlock(&dev_priv->gmbus_mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 0fdbd39f6641..ee0f27ea2810 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -43,6 +43,7 @@ static
int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
@@ -54,7 +55,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
break;
}
if (i == tries) {
- DRM_DEBUG_KMS("Bksv is invalid\n");
+ drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
return -ENODEV;
}
@@ -64,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_capable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bool capable = false;
u8 bksv[5];
@@ -85,8 +86,8 @@ bool intel_hdcp_capable(struct intel_connector *connector)
/* Is HDCP2.2 capable on Platform and Sink */
bool intel_hdcp2_capable(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
@@ -112,7 +113,8 @@ static inline
bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
- return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ return intel_de_read(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
HDCP_STATUS_ENC;
}
@@ -120,7 +122,8 @@ static inline
bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
- return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ return intel_de_read(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS;
}
@@ -184,9 +187,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
{
- I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
- I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
- HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
}
static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
@@ -194,7 +197,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
int ret;
u32 val;
- val = I915_READ(HDCP_KEY_STATUS);
+ val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
return 0;
@@ -203,7 +206,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* out of reset. So if Key is not already loaded, its an error state.
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+ if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
return -ENXIO;
/*
@@ -217,12 +220,13 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
- DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to initiate HDCP key load (%d)\n",
+ ret);
return ret;
}
} else {
- I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
}
/* Wait for the keys to load (500us) */
@@ -235,7 +239,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
return -ENXIO;
/* Send Aksv over to PCH display for use in authentication */
- I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
return 0;
}
@@ -243,9 +247,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
/* Returns updated SHA-1 index */
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
- I915_WRITE(HDCP_SHA_TEXT, sha_text);
+ intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 ready\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
return 0;
@@ -270,7 +274,8 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
return HDCP_TRANSD_REP_PRESENT |
HDCP_TRANSD_SHA1_M0;
default:
- DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
+ drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
+ cpu_transcoder);
return -EINVAL;
}
}
@@ -287,7 +292,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
- DRM_ERROR("Unknown port %d\n", port);
+ drm_err(&dev_priv->drm, "Unknown port %d\n", port);
return -EINVAL;
}
}
@@ -297,21 +302,19 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
- struct drm_i915_private *dev_priv;
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port = intel_dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
- dev_priv = intel_dig_port->base.base.dev->dev_private;
-
/* Process V' values from the receiver */
for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
if (ret)
return ret;
- I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
+ intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
}
/*
@@ -328,7 +331,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_text = 0;
sha_leftovers = 0;
rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
@@ -345,7 +348,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Programming guide writes this every 64 bytes */
sha_idx += sizeof(sha_text);
if (!(sha_idx % 64))
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
/* Store the leftover bytes from the ksv in sha_text */
sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
@@ -377,7 +381,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
*/
if (sha_leftovers == 0) {
/* Write 16 bits of text, 16 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_16);
ret = intel_write_sha_text(dev_priv,
bstatus[0] << 8 | bstatus[1]);
if (ret < 0)
@@ -385,14 +390,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 16 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_16);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
@@ -400,7 +407,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
} else if (sha_leftovers == 1) {
/* Write 24 bits of text, 8 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_24);
sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
/* Only 24-bits of data, must be in the LSB */
sha_text = (sha_text & 0xffffff00) >> 8;
@@ -410,14 +418,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 24 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
@@ -425,7 +435,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
} else if (sha_leftovers == 2) {
/* Write 32 bits of text */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
@@ -433,7 +444,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 64 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
for (i = 0; i < 2; i++) {
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
@@ -442,7 +454,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
}
} else if (sha_leftovers == 3) {
/* Write 32 bits of text */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 24;
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
@@ -450,32 +463,35 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 8 bits of text, 24 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, bstatus[1]);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 8 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_24);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
} else {
- DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
- sha_leftovers);
+ drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
+ sha_leftovers);
return -EINVAL;
}
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
ret = intel_write_sha_text(dev_priv, 0);
@@ -495,14 +511,15 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
return ret;
/* Tell the HW we're done with the hash and wait for it to ACK */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_COMPLETE_HASH);
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 complete\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
- if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
- DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
+ if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+ drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
return -ENXIO;
}
@@ -513,15 +530,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
static
int intel_hdcp_auth_downstream(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
- struct drm_device *dev = connector->base.dev;
u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, tries = 3;
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
- DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -531,7 +549,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -544,13 +562,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
if (num_downstream == 0) {
- DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Repeater with zero downstream devices\n");
return -EINVAL;
}
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
if (!ksv_fifo) {
- DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
+ drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
return -ENOMEM;
}
@@ -558,8 +577,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (ret)
goto err;
- if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
- DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
+ num_downstream)) {
+ drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
ret = -EPERM;
goto err;
}
@@ -577,12 +597,13 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
}
if (i == tries) {
- DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "V Prime validation failed.(%d)\n", ret);
goto err;
}
- DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
- num_downstream);
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
+ num_downstream);
ret = 0;
err:
kfree(ksv_fifo);
@@ -592,13 +613,12 @@ err:
/* Implements Part 1 of the HDCP authorization procedure */
static int intel_hdcp_auth(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
const struct intel_hdcp_shim *shim = hdcp->shim;
- struct drm_i915_private *dev_priv;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
- enum port port;
+ enum port port = intel_dig_port->base.port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
union {
@@ -615,10 +635,6 @@ static int intel_hdcp_auth(struct intel_connector *connector)
} ri;
bool repeater_present, hdcp_capable;
- dev_priv = intel_dig_port->base.base.dev->dev_private;
-
- port = intel_dig_port->base.port;
-
/*
* Detects whether the display is HDCP capable. Although we check for
* valid Bksv below, the HDCP over DP spec requires that we check
@@ -630,28 +646,32 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret)
return ret;
if (!hdcp_capable) {
- DRM_DEBUG_KMS("Panel is not HDCP capable\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel is not HDCP capable\n");
return -EINVAL;
}
}
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
- I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
- get_random_u32());
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
- HDCP_CONF_CAPTURE_AN);
+ intel_de_write(dev_priv,
+ HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+ get_random_u32());
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
- DRM_ERROR("Timed out waiting for An\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for An\n");
return -ETIMEDOUT;
}
- an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
- an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
+ an.reg[0] = intel_de_read(dev_priv,
+ HDCP_ANLO(dev_priv, cpu_transcoder, port));
+ an.reg[1] = intel_de_read(dev_priv,
+ HDCP_ANHI(dev_priv, cpu_transcoder, port));
ret = shim->write_an_aksv(intel_dig_port, an.shim);
if (ret)
return ret;
@@ -664,33 +684,34 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret < 0)
return ret;
- if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
- DRM_ERROR("BKSV is revoked\n");
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
+ drm_err(&dev_priv->drm, "BKSV is revoked\n");
return -EPERM;
}
- I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
- I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
+ intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
+ bksv.reg[0]);
+ intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
+ bksv.reg[1]);
ret = shim->repeater_present(intel_dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
- I915_WRITE(HDCP_REP_CTL,
- intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
- port));
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
ret = shim->toggle_signalling(intel_dig_port, true);
if (ret)
return ret;
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
- HDCP_CONF_AUTH_AND_ENC);
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
- DRM_ERROR("Timed out waiting for R0 ready\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -716,19 +737,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
if (ret)
return ret;
- I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
+ intel_de_write(dev_priv,
+ HDCP_RPRIME(dev_priv, cpu_transcoder, port),
+ ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+ if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
- DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ drm_dbg_kms(&dev_priv->drm,
+ "Timed out waiting for Ri prime match (%x)\n",
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
+ cpu_transcoder, port)));
return -ETIMEDOUT;
}
@@ -737,7 +760,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- DRM_ERROR("Timed out waiting for encryption\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -749,52 +772,53 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
- DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
return 0;
}
static int _intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
+ connector->base.name, connector->base.base.id);
hdcp->hdcp_encrypted = false;
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
+ drm_err(&dev_priv->drm,
+ "Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
- DRM_ERROR("Failed to disable HDCP signalling\n");
+ drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
return ret;
}
- DRM_DEBUG_KMS("HDCP is disabled\n");
+ drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
return 0;
}
static int _intel_hdcp_enable(struct intel_connector *connector)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
int i, ret, tries = 3;
- DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
+ connector->base.name, connector->base.base.id);
if (!hdcp_key_loadable(dev_priv)) {
- DRM_ERROR("HDCP key Load is not possible\n");
+ drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
return -ENXIO;
}
@@ -805,7 +829,8 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
intel_hdcp_clear_keys(dev_priv);
}
if (ret) {
- DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
+ ret);
return ret;
}
@@ -817,13 +842,14 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
return 0;
}
- DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
/* Ensuring HDCP encryption and signalling are stopped. */
_intel_hdcp_disable(connector);
}
- DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP authentication failed (%d tries/%d)\n", tries, ret);
return ret;
}
@@ -836,9 +862,9 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
/* Implements Part 3 of the HDCP authorization procedure */
static int intel_hdcp_check_link(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -853,11 +879,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
- DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
- connector->base.name, connector->base.base.id,
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
+ drm_err(&dev_priv->drm,
+ "%s:%d HDCP link stopped encryption,%x\n",
+ connector->base.name, connector->base.base.id,
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -872,12 +899,13 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
ret = _intel_hdcp_disable(connector);
if (ret) {
- DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -885,7 +913,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
ret = _intel_hdcp_enable(connector);
if (ret) {
- DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -901,9 +929,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
- struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
/*
@@ -916,13 +944,13 @@ static void intel_hdcp_prop_work(struct work_struct *work)
hdcp->value);
mutex_unlock(&hdcp->mutex);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
}
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
- /* PORT E doesn't have HDCP, and PORT F is disabled */
- return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
+ return INTEL_INFO(dev_priv)->display.has_hdcp &&
+ (INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
}
static int
@@ -944,7 +972,8 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
if (ret)
- DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -974,7 +1003,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
rx_cert, paired,
ek_pub_km, msg_sz);
if (ret < 0)
- DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -998,7 +1028,7 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
if (ret < 0)
- DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1023,7 +1053,8 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
if (ret < 0)
- DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1048,7 +1079,8 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
if (ret < 0)
- DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1073,7 +1105,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
if (ret < 0)
- DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1097,7 +1130,8 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
if (ret < 0)
- DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1126,7 +1160,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
rep_topology,
rep_send_ack);
if (ret < 0)
- DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Verify rep topology failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1151,7 +1186,7 @@ hdcp2_verify_mprime(struct intel_connector *connector,
ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
if (ret < 0)
- DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1174,7 +1209,8 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
if (ret < 0)
- DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1209,9 +1245,9 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
/* Authentication flow starts from here */
static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_ake_init ake_init;
struct hdcp2_ake_send_cert send_cert;
@@ -1242,15 +1278,16 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
return ret;
if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
- DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
+ drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
}
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
- if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+ msgs.send_cert.cert_rx.receiver_id,
1)) {
- DRM_ERROR("Receiver ID is revoked\n");
+ drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
return -EPERM;
}
@@ -1297,7 +1334,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
static int hdcp2_locality_check(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_lc_init lc_init;
@@ -1333,7 +1370,7 @@ static int hdcp2_locality_check(struct intel_connector *connector)
static int hdcp2_session_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp2_ske_send_eks send_eks;
int ret;
@@ -1353,7 +1390,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
static
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
@@ -1404,9 +1441,9 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static
int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_rep_send_receiverid_list recvid_list;
struct hdcp2_rep_send_ack rep_ack;
@@ -1425,7 +1462,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
- DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
return -EINVAL;
}
@@ -1433,17 +1470,24 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
+ if (!hdcp->hdcp2_encrypted && seq_num_v) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Non zero Seq_num_v at first RecvId_List msg\n");
+ return -EINVAL;
+ }
+
if (seq_num_v < hdcp->seq_num_v) {
/* Roll over of the seq_num_v from repeater. Reauthenticate. */
- DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+ drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
return -EINVAL;
}
device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
- if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+ msgs.recvid_list.receiver_ids,
device_cnt)) {
- DRM_ERROR("Revoked receiver ID(s) is in list\n");
+ drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
return -EPERM;
}
@@ -1475,26 +1519,28 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
static int hdcp2_authenticate_sink(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
int ret;
ret = hdcp2_authentication_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_locality_check(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Locality Check failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_session_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
return ret;
}
@@ -1509,7 +1555,8 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
if (hdcp->is_repeater) {
ret = hdcp2_authenticate_repeater(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Repeater Auth Failed. Err: %d\n", ret);
return ret;
}
}
@@ -1524,31 +1571,32 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
- DRM_ERROR("Failed to enable HDCP signalling. %d\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to enable HDCP signalling. %d\n",
+ ret);
return ret;
}
}
- if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_AUTH_STATUS) {
/* Link is Authenticated. Now set for Encryption */
- I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
- I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
- port)) |
- CTL_LINK_ENCRYPTION_REQ);
+ intel_de_write(dev_priv,
+ HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
}
ret = intel_de_wait_for_set(dev_priv,
@@ -1562,19 +1610,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
static int hdcp2_disable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS));
+ drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
- I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
- I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
- ~CTL_LINK_ENCRYPTION_REQ);
+ intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
ret = intel_de_wait_for_clear(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -1582,13 +1629,14 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
- DRM_DEBUG_KMS("Disable Encryption Timedout");
+ drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
- DRM_ERROR("Failed to disable HDCP signalling. %d\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to disable HDCP signalling. %d\n",
+ ret);
return ret;
}
}
@@ -1598,6 +1646,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret, i, tries = 3;
for (i = 0; i < tries; i++) {
@@ -1606,10 +1655,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
break;
/* Clearing the mei hdcp session */
- DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
- i + 1, tries, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
if (i != tries) {
@@ -1620,9 +1669,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
ret = hdcp2_enable_encryption(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Encryption Enable Failed.(%d)\n", ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
}
@@ -1631,23 +1681,24 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
- DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
- hdcp->content_type, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
return ret;
}
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
hdcp->hdcp2_encrypted = true;
return 0;
@@ -1655,15 +1706,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
@@ -1673,10 +1725,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
/* Implements the Link Integrity Check for HDCP2.2 */
static int intel_hdcp2_check_link(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -1690,10 +1742,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
- DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
- I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
- port)));
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
+ drm_err(&dev_priv->drm,
+ "HDCP2.2 link stopped the encryption, %x\n",
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -1713,25 +1766,29 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
- DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP2.2 Downstream topology change\n");
ret = hdcp2_authenticate_repeater_topology(connector);
if (!ret) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&hdcp->prop_work);
goto out;
}
- DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
- connector->base.name, connector->base.base.id,
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] Repeater topology auth failed.(%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
} else {
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] HDCP2.2 link failed, retrying auth\n",
+ connector->base.name, connector->base.base.id);
}
ret = _intel_hdcp2_disable(connector);
if (ret) {
- DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id, ret);
+ drm_err(&dev_priv->drm,
+ "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id, ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -1739,9 +1796,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
ret = _intel_hdcp2_enable(connector);
if (ret) {
- DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id,
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -1772,7 +1830,7 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
{
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
- DRM_DEBUG("I915 HDCP comp bind\n");
+ drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
dev_priv->hdcp_master->mei_dev = mei_kdev;
@@ -1786,7 +1844,7 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
{
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
- DRM_DEBUG("I915 HDCP comp unbind\n");
+ drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_master = NULL;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1830,7 +1888,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
if (INTEL_GEN(dev_priv) < 12)
data->fw_ddi =
- intel_get_mei_fw_ddi_index(connector->encoder->port);
+ intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
else
/*
* As per ME FW API expectation, for GEN 12+, fw_ddi is filled
@@ -1854,7 +1912,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
sizeof(struct hdcp2_streamid_type),
GFP_KERNEL);
if (!data->streams) {
- DRM_ERROR("Out of Memory\n");
+ drm_err(&dev_priv->drm, "Out of Memory\n");
return -ENOMEM;
}
@@ -1881,14 +1939,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
return;
mutex_lock(&dev_priv->hdcp_comp_mutex);
- WARN_ON(dev_priv->hdcp_comp_added);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
dev_priv->hdcp_comp_added = true;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
I915_COMPONENT_HDCP);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
+ ret);
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_comp_added = false;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1899,12 +1958,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
static void intel_hdcp2_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
- DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+ drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
}
@@ -1954,7 +2014,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
return -ENOENT;
mutex_lock(&hdcp->mutex);
- WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_WARN_ON(&dev_priv->drm,
+ hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
if (INTEL_GEN(dev_priv) >= 12) {
@@ -2014,6 +2075,46 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
+void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool content_protection_type_changed =
+ (conn_state->hdcp_content_type != hdcp->content_type &&
+ conn_state->content_protection !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
+ /*
+ * During the HDCP encryption session if Type change is requested,
+ * disable the HDCP and reenable it with new TYPE value.
+ */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_disable(connector);
+
+ /*
+ * Mark the hdcp state as DESIRED after the hdcp disable of type
+ * change procedure.
+ */
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_enable(connector,
+ crtc_state->cpu_transcoder,
+ (u8)conn_state->hdcp_content_type);
+}
+
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->hdcp_comp_mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index f3c3272e712a..7c12ad609b1f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,12 +8,12 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
struct intel_hdcp_shim;
enum port;
enum transcoder;
@@ -26,6 +26,9 @@ int intel_hdcp_init(struct intel_connector *connector,
int intel_hdcp_enable(struct intel_connector *connector,
enum transcoder cpu_transcoder, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
+void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
bool intel_hdcp2_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 93ac0f296852..39930232b253 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -36,7 +36,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
-#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
#include "i915_debugfs.h"
@@ -45,6 +44,7 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -72,17 +72,19 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
- "HDMI port enabled, expecting disabled\n");
+ drm_WARN(dev,
+ intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
}
static void
assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
- WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
- TRANS_DDI_FUNC_ENABLE,
- "HDMI transcoder function enabled, expecting disabled\n");
+ drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+ TRANS_DDI_FUNC_ENABLE,
+ "HDMI transcoder function enabled, expecting disabled\n");
}
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
@@ -215,32 +217,33 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(VIDEO_DIP_CTL, val);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(VIDEO_DIP_DATA, *data);
+ intel_de_write(dev_priv, VIDEO_DIP_DATA, *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(VIDEO_DIP_DATA, 0);
+ intel_de_write(dev_priv, VIDEO_DIP_DATA, 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(VIDEO_DIP_CTL, val);
- POSTING_READ(VIDEO_DIP_CTL);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_posting_read(dev_priv, VIDEO_DIP_CTL);
}
static void g4x_read_infoframe(struct intel_encoder *encoder,
@@ -252,22 +255,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(VIDEO_DIP_CTL);
+ val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(VIDEO_DIP_CTL, val);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(VIDEO_DIP_DATA);
+ *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
}
static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -288,32 +291,34 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void ibx_read_infoframe(struct intel_encoder *encoder,
@@ -326,15 +331,15 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
@@ -343,7 +348,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -365,10 +370,11 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
@@ -378,22 +384,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void cpt_read_infoframe(struct intel_encoder *encoder,
@@ -406,15 +413,15 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
@@ -422,7 +429,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -441,32 +448,35 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv,
+ VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv,
+ VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void vlv_read_infoframe(struct intel_encoder *encoder,
@@ -479,15 +489,16 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv,
+ VLV_TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
@@ -495,7 +506,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -519,28 +530,30 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
int data_size;
int i;
- u32 val = I915_READ(ctl_reg);
+ u32 val = intel_de_read(dev_priv, ctl_reg);
data_size = hsw_dip_data_size(dev_priv, type);
- WARN_ON(len > data_size);
+ drm_WARN_ON(&dev_priv->drm, len > data_size);
val &= ~hsw_infoframe_enable(type);
- I915_WRITE(ctl_reg, val);
+ intel_de_write(dev_priv, ctl_reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2), *data);
+ intel_de_write(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < data_size; i += 4)
- I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2), 0);
+ intel_de_write(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ 0);
val |= hsw_infoframe_enable(type);
- I915_WRITE(ctl_reg, val);
- POSTING_READ(ctl_reg);
+ intel_de_write(dev_priv, ctl_reg, val);
+ intel_de_posting_read(dev_priv, ctl_reg);
}
static void hsw_read_infoframe(struct intel_encoder *encoder,
@@ -553,18 +566,19 @@ static void hsw_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv, HSW_TVIDEO_DIP_CTL(cpu_transcoder));
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2));
+ *data++ = intel_de_read(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
}
static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
u32 mask;
mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -655,12 +669,12 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(type)) == 0)
return;
- if (WARN_ON(frame->any.type != type))
+ if (drm_WARN_ON(encoder->base.dev, frame->any.type != type))
return;
/* see comment above for the reason for this offset */
len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
- if (WARN_ON(len < 0))
+ if (drm_WARN_ON(encoder->base.dev, len < 0))
return;
/* Insert the 'hole' (see big comment above) at position 3 */
@@ -734,8 +748,8 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
/* nonsense combination */
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
drm_hdmi_avi_infoframe_quant_range(frame, connector,
@@ -753,7 +767,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
/* TODO: handle pixel repetition for YCBCR420 outputs */
ret = hdmi_avi_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -774,13 +788,13 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
frame->sdi = HDMI_SPD_SDI_PC;
ret = hdmi_spd_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -806,11 +820,11 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
&crtc_state->hw.adjusted_mode);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
ret = hdmi_vendor_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -844,7 +858,7 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
}
ret = hdmi_drm_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(&dev_priv->drm, ret))
return false;
return true;
@@ -859,7 +873,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -885,8 +899,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
@@ -904,8 +918,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -982,7 +996,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
- I915_WRITE(reg, crtc_state->infoframes.gcp);
+ intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp);
return true;
}
@@ -1007,7 +1021,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
else
return;
- crtc_state->infoframes.gcp = I915_READ(reg);
+ crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg);
}
static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
@@ -1042,7 +1056,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1056,15 +1070,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- WARN(val & VIDEO_DIP_ENABLE,
- "DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
@@ -1077,8 +1091,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1100,7 +1114,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1113,8 +1127,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
@@ -1126,8 +1140,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1149,7 +1163,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1163,15 +1177,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- WARN(val & VIDEO_DIP_ENABLE,
- "DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
@@ -1184,8 +1198,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1205,7 +1219,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
assert_hdmi_transcoder_func_disabled(dev_priv,
crtc_state->cpu_transcoder);
@@ -1216,16 +1230,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_DRM_GLK);
if (!enable) {
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1260,10 +1274,9 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
unsigned int offset, void *buffer, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
u8 start = offset & 0xff;
@@ -1290,10 +1303,9 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
unsigned int offset, void *buffer, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
u8 *write_buf;
@@ -1325,10 +1337,9 @@ static
int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
@@ -1447,7 +1458,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_crtc *crtc = connector->base.state->crtc;
struct intel_crtc *intel_crtc = container_of(crtc,
struct intel_crtc, base);
@@ -1455,7 +1466,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
int ret;
for (;;) {
- scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+ scanline = intel_de_read(dev_priv, PIPEDSL(intel_crtc->pipe));
if (scanline > 100 && scanline < 200)
break;
usleep_range(25, 50);
@@ -1507,8 +1518,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_connector *connector =
intel_dig_port->hdmi.attached_connector;
enum port port = intel_dig_port->base.port;
@@ -1523,14 +1533,13 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (ret)
return false;
- I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
+ intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
return false;
}
return true;
@@ -1767,8 +1776,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
- I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -1802,7 +1811,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- tmp = I915_READ(intel_hdmi->hdmi_reg);
+ tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -1863,7 +1872,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- WARN_ON(!pipe_config->has_hdmi_sink);
+ drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
@@ -1878,14 +1887,14 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
temp |= HDMI_AUDIO_ENABLE;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
if (pipe_config->has_audio)
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
@@ -1900,7 +1909,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
@@ -1910,10 +1919,10 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to toggle enable bit off and on
@@ -1924,17 +1933,18 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 24 &&
pipe_config->pixel_multiplier > 1) {
- I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
+ temp & ~SDVO_ENABLE);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
if (pipe_config->has_audio)
@@ -1952,7 +1962,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
enum pipe pipe = crtc->pipe;
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
@@ -1969,27 +1979,25 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 24) {
- I915_WRITE(TRANS_CHICKEN1(pipe),
- I915_READ(TRANS_CHICKEN1(pipe)) |
- TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
}
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- I915_WRITE(TRANS_CHICKEN1(pipe),
- I915_READ(TRANS_CHICKEN1(pipe)) &
- ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
if (pipe_config->has_audio)
@@ -2014,11 +2022,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -2039,14 +2047,14 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
temp &= ~SDVO_ENABLE;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -2090,9 +2098,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder,
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[encoder->port];
- int max_tmds_clock;
+ int max_tmds_clock, vbt_max_tmds_clock;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
max_tmds_clock = 594000;
@@ -2103,15 +2109,23 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
else
max_tmds_clock = 165000;
- if (info->max_tmds_clock)
- max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+ vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+ if (vbt_max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
return max_tmds_clock;
}
+static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi,
+ const struct drm_connector_state *conn_state)
+{
+ return hdmi->has_hdmi_sink &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
- bool force_dvi)
+ bool has_hdmi_sink)
{
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
@@ -2127,7 +2141,7 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
if (info->max_tmds_clock)
max_tmds_clock = min(max_tmds_clock,
info->max_tmds_clock);
- else if (!hdmi->has_hdmi_sink || force_dvi)
+ else if (!has_hdmi_sink)
max_tmds_clock = min(max_tmds_clock, 165000);
}
@@ -2137,13 +2151,14 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits,
- bool force_dvi)
+ bool has_hdmi_sink)
{
struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
- if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
+ if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits,
+ has_hdmi_sink))
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
@@ -2165,16 +2180,13 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
- int clock;
+ int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- bool force_dvi =
- READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- clock = mode->clock;
-
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
@@ -2188,18 +2200,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock /= 2;
/* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
+ status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink);
- if (hdmi->has_hdmi_sink && !force_dvi) {
+ if (has_hdmi_sink) {
/* if we can't do 8bpc we may still be able to do 12bpc */
if (status != MODE_OK && !HAS_GMCH(dev_priv))
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
- true, force_dvi);
+ true, has_hdmi_sink);
/* if we can't do 8,12bpc we may still be able to do 10bpc */
if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
- true, force_dvi);
+ true, has_hdmi_sink);
}
if (status != MODE_OK)
return status;
@@ -2263,14 +2275,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
}
}
- /* Display WA #1139: glk */
- if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
- adjusted_mode->htotal > 5460)
- return false;
-
- /* Display Wa_1405510057:icl */
+ /* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
+ bpc == 10 && IS_GEN(dev_priv, 11) &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -2315,7 +2322,7 @@ static int intel_hdmi_port_clock(int clock, int bpc)
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- int clock, bool force_dvi)
+ int clock)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int bpc;
@@ -2324,7 +2331,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
if (hdmi_deep_color_possible(crtc_state, bpc) &&
hdmi_port_clock_valid(intel_hdmi,
intel_hdmi_port_clock(clock, bpc),
- true, force_dvi) == MODE_OK)
+ true, crtc_state->has_hdmi_sink) == MODE_OK)
return bpc;
}
@@ -2332,8 +2339,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
}
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- bool force_dvi)
+ struct intel_crtc_state *crtc_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
@@ -2347,8 +2353,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
clock /= 2;
- bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
- clock, force_dvi);
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock);
crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
@@ -2364,7 +2369,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
bpc, crtc_state->pipe_bpp);
if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
- false, force_dvi) != MODE_OK) {
+ false, crtc_state->has_hdmi_sink) != MODE_OK) {
DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
crtc_state->port_clock);
return -EINVAL;
@@ -2412,14 +2417,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
+ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_hdmi,
+ conn_state);
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
@@ -2448,7 +2453,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
- ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
+ ret = intel_hdmi_compute_clock(encoder, pipe_config);
if (ret)
return ret;
@@ -2808,7 +2813,7 @@ intel_hdmi_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- i915_debugfs_connector_add(connector);
+ intel_connector_debugfs_add(connector);
intel_hdmi_create_i2c_symlink(connector);
@@ -3002,7 +3007,7 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
else if (intel_phy_is_tc(dev_priv, phy))
return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
- WARN(1, "Unknown port:%c\n", port_name(port));
+ drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
@@ -3052,17 +3057,17 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
-static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
u8 ddc_pin;
- if (info->alternate_ddc_pin) {
+ ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+ if (ddc_pin) {
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
- info->alternate_ddc_pin, port_name(port));
- return info->alternate_ddc_pin;
+ ddc_pin, port_name(port));
+ return ddc_pin;
}
if (HAS_PCH_MCC(dev_priv))
@@ -3139,16 +3144,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
- if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A))
+ if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
- if (WARN(intel_dig_port->max_lanes < 4,
- "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ if (drm_WARN(dev, intel_dig_port->max_lanes < 4,
+ "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return;
- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder);
ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
drm_connector_init_with_ddc(dev, connector,
@@ -3165,6 +3170,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->ycbcr_420_allowed = true;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -3188,8 +3194,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* generated on the port when a cable is not attached.
*/
if (IS_G45(dev_priv)) {
- u32 temp = I915_READ(PEG_BAND_GAP_DATA);
- I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+ intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+ (temp & ~0xf) | 0xd);
}
cec_fill_conn_info_from_drm(&conn_info, connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index d3659d0b408b..8ff1f76a63df 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -9,8 +9,6 @@
#include <linux/hdmi.h>
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 99d3a3c7989e..a091442efba4 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -23,8 +23,6 @@
#include <linux/kernel.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
@@ -89,29 +87,16 @@
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port)
{
- switch (port) {
- case PORT_A:
- return HPD_PORT_A;
- case PORT_B:
- return HPD_PORT_B;
- case PORT_C:
- return HPD_PORT_C;
- case PORT_D:
- return HPD_PORT_D;
- case PORT_E:
- return HPD_PORT_E;
- case PORT_F:
- if (IS_CNL_WITH_PORT_F(dev_priv))
- return HPD_PORT_E;
- return HPD_PORT_F;
- case PORT_G:
- return HPD_PORT_G;
- case PORT_H:
- return HPD_PORT_H;
- case PORT_I:
- return HPD_PORT_I;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ switch (phy) {
+ case PHY_F:
+ return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
+ case PHY_A ... PHY_E:
+ case PHY_G ... PHY_I:
+ return HPD_PORT_A + phy - PHY_A;
default:
- MISSING_CASE(port);
+ MISSING_CASE(phy);
return HPD_NONE;
}
}
@@ -120,6 +105,20 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
#define HPD_RETRY_DELAY 1000
+static enum hpd_pin
+intel_connector_hpd_pin(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ /*
+ * MST connectors get their encoder attached dynamically
+ * so need to make sure we have an encoder here. But since
+ * MST encoders have their hpd_pin set to HPD_NONE we don't
+ * have to special case them beyond that.
+ */
+ return encoder ? encoder->hpd_pin : HPD_NONE;
+}
+
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
* @dev_priv: private driver data pointer
@@ -171,10 +170,13 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+ drm_dbg_kms(&dev_priv->drm,
+ "Received HPD interrupt on PIN %d - cnt: %d\n",
+ pin,
hpd->stats[pin].count);
}
@@ -185,37 +187,32 @@ static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *intel_connector;
- struct intel_encoder *intel_encoder;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- enum hpd_pin pin;
+ struct intel_connector *connector;
bool hpd_disabled = false;
lockdep_assert_held(&dev_priv->irq_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->polled != DRM_CONNECTOR_POLL_HPD)
- continue;
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
- intel_connector = to_intel_connector(connector);
- intel_encoder = intel_connector->encoder;
- if (!intel_encoder)
+ if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
continue;
- pin = intel_encoder->hpd_pin;
+ pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- DRM_INFO("HPD interrupt storm detected on connector %s: "
+ drm_info(&dev_priv->drm,
+ "HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
- connector->name);
+ connector->base.name);
dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT
- | DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
}
drm_connector_list_iter_end(&conn_iter);
@@ -234,40 +231,38 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
intel_wakeref_t wakeref;
enum hpd_pin pin;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_hpd_pin(pin) {
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE ||
+ dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
continue;
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- /* Don't check MST ports, they don't have pins */
- if (!intel_connector->mst_port &&
- intel_connector->encoder->hpd_pin == pin) {
- if (connector->polled != intel_connector->polled)
- DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- connector->name);
- connector->polled = intel_connector->polled;
- if (!connector->polled)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (connector->base.polled != connector->polled)
+ drm_dbg(&dev_priv->drm,
+ "Reenabling HPD on connector %s\n",
+ connector->base.name);
+ connector->base.polled = connector->polled;
}
+ drm_connector_list_iter_end(&conn_iter);
+
+ for_each_hpd_pin(pin) {
+ if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ }
+
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
+
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -281,7 +276,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->base.status;
connector->base.status =
@@ -290,11 +285,12 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
if (old_status == connector->base.status)
return INTEL_HOTPLUG_UNCHANGED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.base.id,
- connector->base.name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->base.status));
+ drm_dbg_kms(&to_i915(dev)->drm,
+ "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.base.id,
+ connector->base.name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->base.status));
return INTEL_HOTPLUG_CHANGED;
}
@@ -361,16 +357,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
container_of(work, struct drm_i915_private,
hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *intel_connector;
- struct intel_encoder *intel_encoder;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
u32 changed = 0, retry = 0;
u32 hpd_event_bits;
u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
+ drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
spin_lock_irq(&dev_priv->irq_lock);
@@ -385,21 +379,25 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
u32 hpd_bit;
- intel_connector = to_intel_connector(connector);
- if (!intel_connector->encoder)
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
continue;
- intel_encoder = intel_connector->encoder;
- hpd_bit = BIT(intel_encoder->hpd_pin);
+
+ hpd_bit = BIT(pin);
if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
- DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- connector->name, intel_encoder->hpd_pin);
+ struct intel_encoder *encoder =
+ intel_attached_encoder(connector);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s (pin %i) received hotplug event.\n",
+ connector->base.name, pin);
- switch (intel_encoder->hotplug(intel_encoder,
- intel_connector,
- hpd_event_bits & hpd_bit)) {
+ switch (encoder->hotplug(encoder, connector,
+ hpd_event_bits & hpd_bit)) {
case INTEL_HOTPLUG_UNCHANGED:
break;
case INTEL_HOTPLUG_CHANGED:
@@ -481,9 +479,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
- encoder->base.base.id, encoder->base.name,
- long_hpd ? "long" : "short");
+ drm_dbg(&dev_priv->drm,
+ "digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
+ long_hpd ? "long" : "short");
queue_dig = true;
if (long_hpd) {
@@ -509,8 +508,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- WARN_ONCE(!HAS_GMCH(dev_priv),
- "Received HPD interrupt on pin %d although disabled\n", pin);
+ drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ "Received HPD interrupt on pin %d although disabled\n",
+ pin);
continue;
}
@@ -601,8 +601,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
container_of(work, struct drm_i915_private,
hotplug.poll_init_work);
struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
bool enabled;
mutex_lock(&dev->mode_config.mutex);
@@ -610,23 +610,18 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_connector *intel_connector =
- to_intel_connector(connector);
- connector->polled = intel_connector->polled;
-
- /* MST has a dynamic intel_connector->encoder and it's reprobing
- * is all handled by the MST helpers. */
- if (intel_connector->mst_port)
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
+
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
- intel_connector->encoder->hpd_pin > HPD_NONE) {
- connector->polled = enabled ?
- DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT :
- DRM_CONNECTOR_POLL_HPD;
- }
+ connector->base.polled = connector->polled;
+
+ if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
}
drm_connector_list_iter_end(&conn_iter);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 087b5f57b321..1e6b4fda2900 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 0b67f7887cd0..ad5cc13037ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -71,6 +71,7 @@
#include <drm/intel_lpe_audio.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_lpe_audio.h"
#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
@@ -126,7 +127,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(pdata);
if (IS_ERR(platdev)) {
- DRM_ERROR("Failed to allocate LPE audio platform device\n");
+ drm_err(&dev_priv->drm,
+ "Failed to allocate LPE audio platform device\n");
return platdev;
}
@@ -166,7 +168,7 @@ static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
{
int irq = dev_priv->lpe_audio.irq;
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
&lpe_audio_irqchip,
handle_simple_irq,
@@ -189,7 +191,8 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
+ drm_info(&dev_priv->drm,
+ "HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
@@ -202,18 +205,19 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
dev_priv->lpe_audio.irq = irq_alloc_desc(0);
if (dev_priv->lpe_audio.irq < 0) {
- DRM_ERROR("Failed to allocate IRQ desc: %d\n",
+ drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
dev_priv->lpe_audio.irq);
ret = dev_priv->lpe_audio.irq;
goto err;
}
- DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq);
ret = lpe_audio_irq_init(dev_priv);
if (ret) {
- DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to initialize irqchip for lpe audio: %d\n",
ret);
goto err_free_irq;
}
@@ -222,7 +226,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
if (IS_ERR(dev_priv->lpe_audio.platdev)) {
ret = PTR_ERR(dev_priv->lpe_audio.platdev);
- DRM_ERROR("Failed to create lpe audio platform device: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to create lpe audio platform device: %d\n",
ret);
goto err_free_irq;
}
@@ -230,7 +235,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
/* enable chicken bit; at least this is required for Dell Wyse 3040
* with DP outputs (but only sometimes by some reason!)
*/
- I915_WRITE(VLV_AUD_CHICKEN_BIT_REG, VLV_CHICKEN_BIT_DBG_ENABLE);
+ intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG,
+ VLV_CHICKEN_BIT_DBG_ENABLE);
return 0;
err_free_irq:
@@ -257,8 +263,8 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
ret = generic_handle_irq(dev_priv->lpe_audio.irq);
if (ret)
- DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
- ret);
+ drm_err_ratelimited(&dev_priv->drm,
+ "error handling LPE audio irq: %d\n", ret);
}
/**
@@ -276,7 +282,8 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
if (lpe_audio_detect(dev_priv)) {
ret = lpe_audio_setup(dev_priv);
if (ret < 0)
- DRM_ERROR("failed to setup LPE Audio bridge\n");
+ drm_err(&dev_priv->drm,
+ "failed to setup LPE Audio bridge\n");
}
return ret;
}
@@ -334,7 +341,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
- audio_enable = I915_READ(VLV_AUD_PORT_EN_DBG(port));
+ audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port));
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
@@ -343,8 +350,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = dp_output;
/* Unmute the amp for both DP and HDMI */
- I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
- audio_enable & ~VLV_AMP_MUTE);
+ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ audio_enable & ~VLV_AMP_MUTE);
} else {
memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
ppdata->pipe = -1;
@@ -352,8 +359,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = false;
/* Mute the amp for both DP and HDMI */
- I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
- audio_enable | VLV_AMP_MUTE);
+ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ audio_enable | VLV_AMP_MUTE);
}
if (pdata->notify_audio_lpe)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 10696bb99dcf..9a067effcfa0 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,7 +37,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -85,7 +84,7 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(lvds_reg);
+ val = intel_de_read(dev_priv, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -125,7 +124,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
- tmp = I915_READ(lvds_encoder->reg);
+ tmp = intel_de_read(dev_priv, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
@@ -143,7 +142,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_GEN(dev_priv) < 4) {
- tmp = I915_READ(PFIT_CONTROL);
+ tmp = intel_de_read(dev_priv, PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
@@ -156,18 +155,18 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
{
u32 val;
- pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
+ pps->powerdown_on_reset = intel_de_read(dev_priv, PP_CONTROL(0)) & PANEL_POWER_RESET;
- val = I915_READ(PP_ON_DELAYS(0));
+ val = intel_de_read(dev_priv, PP_ON_DELAYS(0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
- val = I915_READ(PP_OFF_DELAYS(0));
+ val = intel_de_read(dev_priv, PP_OFF_DELAYS(0));
pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
- val = I915_READ(PP_DIVISOR(0));
+ val = intel_de_read(dev_priv, PP_DIVISOR(0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
@@ -182,8 +181,9 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) <= 4 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
- DRM_DEBUG_KMS("Panel power timings uninitialized, "
- "setting defaults\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel power timings uninitialized, "
+ "setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
pps->t1_t2 = 40 * 10;
pps->t5 = 200 * 10;
@@ -192,10 +192,10 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->tx = 200 * 10;
}
- DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
- "divider %d port %d powerdown_on_reset %d\n",
- pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
- pps->divider, pps->port, pps->powerdown_on_reset);
+ drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ "divider %d port %d powerdown_on_reset %d\n",
+ pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+ pps->divider, pps->port, pps->powerdown_on_reset);
}
static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -203,25 +203,21 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(PP_CONTROL(0));
- WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
+ val = intel_de_read(dev_priv, PP_CONTROL(0));
+ drm_WARN_ON(&dev_priv->drm,
+ (val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
if (pps->powerdown_on_reset)
val |= PANEL_POWER_RESET;
- I915_WRITE(PP_CONTROL(0), val);
+ intel_de_write(dev_priv, PP_CONTROL(0), val);
- I915_WRITE(PP_ON_DELAYS(0),
- REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
- REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ intel_de_write(dev_priv, PP_ON_DELAYS(0),
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
- I915_WRITE(PP_OFF_DELAYS(0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
- REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ intel_de_write(dev_priv, PP_OFF_DELAYS(0),
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
- I915_WRITE(PP_DIVISOR(0),
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
- DIV_ROUND_UP(pps->t4, 1000) + 1));
+ intel_de_write(dev_priv, PP_DIVISOR(0),
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@@ -299,7 +295,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(lvds_encoder->reg, temp);
+ intel_de_write(dev_priv, lvds_encoder->reg, temp);
}
/*
@@ -313,13 +309,16 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dev);
- I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
+ intel_de_write(dev_priv, lvds_encoder->reg,
+ intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
- I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
- POSTING_READ(lvds_encoder->reg);
+ intel_de_write(dev_priv, PP_CONTROL(0),
+ intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+ intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
- DRM_ERROR("timed out waiting for panel to power on\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
}
@@ -331,12 +330,15 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ intel_de_write(dev_priv, PP_CONTROL(0),
+ intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power off\n");
- I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_encoder->reg);
+ intel_de_write(dev_priv, lvds_encoder->reg,
+ intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+ intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
static void gmch_disable_lvds(struct intel_encoder *encoder,
@@ -398,7 +400,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
- DRM_ERROR("Can't support LVDS on pipe A\n");
+ drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -408,8 +410,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
lvds_bpp = 6*3;
if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
pipe_config->pipe_bpp = lvds_bpp;
}
@@ -791,7 +794,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = I915_READ(lvds_encoder->reg);
+ val = intel_de_read(dev_priv, lvds_encoder->reg);
if (HAS_PCH_CPT(dev_priv))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
@@ -827,13 +830,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- WARN(!dev_priv->vbt.int_lvds_support,
- "Useless DMI match. Internal LVDS support disabled by VBT\n");
+ drm_WARN(dev, !dev_priv->vbt.int_lvds_support,
+ "Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
if (!dev_priv->vbt.int_lvds_support) {
- DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Internal LVDS support disabled by VBT\n");
return;
}
@@ -842,7 +846,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
else
lvds_reg = LVDS;
- lvds = I915_READ(lvds_reg);
+ lvds = intel_de_read(dev_priv, lvds_reg);
if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
@@ -852,10 +856,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT\n");
return;
}
- DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT, but enabled anyway\n");
}
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
@@ -969,7 +975,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
*/
fixed_mode = intel_encoder_current_mode(intel_encoder);
if (fixed_mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_dbg_kms(&dev_priv->drm, "using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
@@ -985,8 +991,8 @@ out:
intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
- lvds_encoder->is_dual_link ? "dual" : "single");
+ drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -995,7 +1001,7 @@ out:
failed:
mutex_unlock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+ drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index e59b4992ba1b..cc6b00959586 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,11 +30,10 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_panel.h"
#include "i915_drv.h"
+#include "intel_acpi.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
@@ -242,29 +241,6 @@ struct opregion_asle_ext {
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
-/*
- * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
- * Attached to the Display Adapter).
- */
-#define ACPI_DISPLAY_INDEX_SHIFT 0
-#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
-#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
-#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
-#define ACPI_DISPLAY_TYPE_SHIFT 8
-#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
-#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
-#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
-#define ACPI_DISPLAY_TYPE_TV (2 << 8)
-#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
-#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
-#define ACPI_VENDOR_SPECIFIC_SHIFT 12
-#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
-#define ACPI_BIOS_CAN_DETECT (1 << 16)
-#define ACPI_DEPENDS_ON_VGA (1 << 17)
-#define ACPI_PIPE_ID_SHIFT 18
-#define ACPI_PIPE_ID_MASK (7 << 18)
-#define ACPI_DEVICE_ID_SCHEME (1 << 31)
-
#define MAX_DSLP 1500
static int swsci(struct drm_i915_private *dev_priv,
@@ -311,7 +287,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* The spec tells us to do this, but we are the only user... */
scic = swsci->scic;
if (scic & SWSCI_SCIC_INDICATOR) {
- DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+ drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n");
return -EBUSY;
}
@@ -335,7 +311,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
if (wait_for(C, dslp)) {
- DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+ drm_dbg(&dev_priv->drm, "SWSCI request timed out\n");
return -ETIMEDOUT;
}
@@ -344,7 +320,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Note: scic == 0 is an error! */
if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
- DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+ drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic);
return -EIO;
}
@@ -403,8 +379,9 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
break;
default:
- WARN_ONCE(1, "unsupported intel_encoder type %d\n",
- intel_encoder->type);
+ drm_WARN_ONCE(&dev_priv->drm, 1,
+ "unsupported intel_encoder type %d\n",
+ intel_encoder->type);
return -EINVAL;
}
@@ -448,10 +425,11 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
struct opregion_asle *asle = dev_priv->opregion.asle;
struct drm_device *dev = &dev_priv->drm;
- DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+ drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
if (acpi_video_get_backlight_type() == acpi_backlight_native) {
- DRM_DEBUG_KMS("opregion backlight request ignored\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "opregion backlight request ignored\n");
return 0;
}
@@ -468,7 +446,8 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
* Update backlight on all connectors that support backlight (usually
* only one).
*/
- DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+ drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n",
+ bclp);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
@@ -485,13 +464,13 @@ static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
- DRM_DEBUG_DRIVER("Illum is not supported\n");
+ drm_dbg(&dev_priv->drm, "Illum is not supported\n");
return ASLC_ALS_ILLUM_FAILED;
}
static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
{
- DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+ drm_dbg(&dev_priv->drm, "PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
@@ -499,30 +478,36 @@ static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
- DRM_DEBUG_DRIVER("Pfit is not supported\n");
+ drm_dbg(&dev_priv->drm, "Pfit is not supported\n");
return ASLC_PFIT_FAILED;
}
static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
{
- DRM_DEBUG_DRIVER("SROT is not supported\n");
+ drm_dbg(&dev_priv->drm, "SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
{
if (!iuer)
- DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (nothing)\n");
if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (rotation lock)\n");
if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (volume down)\n");
if (iuer & ASLE_IUER_VOLUME_UP_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (volume up)\n");
if (iuer & ASLE_IUER_WINDOWS_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (windows)\n");
if (iuer & ASLE_IUER_POWER_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (power)\n");
return ASLC_BUTTON_ARRAY_FAILED;
}
@@ -530,9 +515,11 @@ static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
- DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+ drm_dbg(&dev_priv->drm,
+ "Convertible is not supported (clamshell)\n");
else
- DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+ drm_dbg(&dev_priv->drm,
+ "Convertible is not supported (slate)\n");
return ASLC_CONVERTIBLE_FAILED;
}
@@ -540,16 +527,17 @@ static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
- DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+ drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n");
else
- DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+ drm_dbg(&dev_priv->drm,
+ "Docking is not supported (undocked)\n");
return ASLC_DOCKING_FAILED;
}
static u32 asle_isct_state(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_DRIVER("ISCT is not supported\n");
+ drm_dbg(&dev_priv->drm, "ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
}
@@ -569,8 +557,8 @@ static void asle_work(struct work_struct *work)
aslc_req = asle->aslc;
if (!(aslc_req & ASLC_REQ_MSK)) {
- DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
- aslc_req);
+ drm_dbg(&dev_priv->drm,
+ "No request on ASLC interrupt 0x%08x\n", aslc_req);
return;
}
@@ -662,54 +650,12 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static u32 acpi_display_type(struct intel_connector *connector)
-{
- u32 display_type;
-
- switch (connector->base.connector_type) {
- case DRM_MODE_CONNECTOR_VGA:
- case DRM_MODE_CONNECTOR_DVIA:
- display_type = ACPI_DISPLAY_TYPE_VGA;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_9PinDIN:
- case DRM_MODE_CONNECTOR_TV:
- display_type = ACPI_DISPLAY_TYPE_TV;
- break;
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- case DRM_MODE_CONNECTOR_eDP:
- case DRM_MODE_CONNECTOR_DSI:
- display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
- break;
- case DRM_MODE_CONNECTOR_Unknown:
- case DRM_MODE_CONNECTOR_VIRTUAL:
- display_type = ACPI_DISPLAY_TYPE_OTHER;
- break;
- default:
- MISSING_CASE(connector->base.connector_type);
- display_type = ACPI_DISPLAY_TYPE_OTHER;
- break;
- }
-
- return display_type;
-}
-
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
- int display_index[16] = {};
/*
* In theory, did2, the extended didl, gets added at opregion version
@@ -721,29 +667,22 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
+ intel_acpi_device_id_update(dev_priv);
+
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
- u32 device_id, type;
-
- device_id = acpi_display_type(connector);
-
- /* Use display type specific display index. */
- type = (device_id & ACPI_DISPLAY_TYPE_MASK)
- >> ACPI_DISPLAY_TYPE_SHIFT;
- device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
-
- connector->acpi_device_id = device_id;
if (i < max_outputs)
- set_did(opregion, i, device_id);
+ set_did(opregion, i, connector->acpi_device_id);
i++;
}
drm_connector_list_iter_end(&conn_iter);
- DRM_DEBUG_KMS("%d outputs detected\n", i);
+ drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i);
if (i > max_outputs)
- DRM_ERROR("More than %d outputs in connector list\n",
- max_outputs);
+ drm_err(&dev_priv->drm,
+ "More than %d outputs in connector list\n",
+ max_outputs);
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
@@ -823,7 +762,9 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
if (requested_callbacks) {
u32 req = opregion->swsci_sbcb_sub_functions;
if ((req & tmp) != req)
- DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+ drm_dbg(&dev_priv->drm,
+ "SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n",
+ req, tmp);
/* XXX: for now, trust the requested callbacks */
/* opregion->swsci_sbcb_sub_functions &= tmp; */
} else {
@@ -831,9 +772,10 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
}
}
- DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
- opregion->swsci_gbda_sub_functions,
- opregion->swsci_sbcb_sub_functions);
+ drm_dbg(&dev_priv->drm,
+ "SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+ opregion->swsci_gbda_sub_functions,
+ opregion->swsci_sbcb_sub_functions);
}
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -867,15 +809,17 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
if (ret) {
- DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
- name, ret);
+ drm_err(&dev_priv->drm,
+ "Requesting VBT firmware \"%s\" failed (%d)\n",
+ name, ret);
return ret;
}
if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (opregion->vbt_firmware) {
- DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT firmware \"%s\"\n", name);
opregion->vbt = opregion->vbt_firmware;
opregion->vbt_size = fw->size;
ret = 0;
@@ -883,7 +827,8 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
ret = -ENOMEM;
}
} else {
- DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name);
+ drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
+ name);
ret = -EINVAL;
}
@@ -910,9 +855,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
pci_read_config_dword(pdev, ASLS, &asls);
- DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+ drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n",
+ asls);
if (asls == 0) {
- DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+ drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
@@ -925,21 +871,21 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
memcpy(buf, base, sizeof(buf));
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ drm_dbg(&dev_priv->drm, "opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = base;
opregion->lid_state = base + ACPI_CLID;
- DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
- opregion->header->over.major,
- opregion->header->over.minor,
- opregion->header->over.revision);
+ drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
+ opregion->header->over.major,
+ opregion->header->over.minor,
+ opregion->header->over.revision);
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
/*
* Indicate we handle monitor hotplug events ourselves so we do
@@ -951,20 +897,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
}
if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG_DRIVER("SWSCI supported\n");
+ drm_dbg(&dev_priv->drm, "SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
swsci_setup(dev_priv);
}
if (mboxes & MBOX_ASLE) {
- DRM_DEBUG_DRIVER("ASLE supported\n");
+ drm_dbg(&dev_priv->drm, "ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
}
if (mboxes & MBOX_ASLE_EXT)
- DRM_DEBUG_DRIVER("ASLE extension supported\n");
+ drm_dbg(&dev_priv->drm, "ASLE extension supported\n");
if (intel_load_vbt_firmware(dev_priv) == 0)
goto out;
@@ -984,7 +930,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
*/
if (opregion->header->over.major > 2 ||
opregion->header->over.minor >= 1) {
- WARN_ON(rvda < OPREGION_SIZE);
+ drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE);
rvda += asls;
}
@@ -995,12 +941,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
goto out;
} else {
- DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT in ACPI OpRegion (RVDA)\n");
memunmap(opregion->rvda);
opregion->rvda = NULL;
}
@@ -1018,11 +966,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
vbt_size -= OPREGION_VBT_OFFSET;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
} else {
- DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
}
out:
@@ -1058,20 +1008,22 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret) {
- DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get panel details from OpRegion (%d)\n",
+ ret);
return ret;
}
ret = (panel_details >> 8) & 0xff;
if (ret > 0x10) {
- DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid OpRegion panel type 0x%x\n", ret);
return -EINVAL;
}
/* fall back to VBT panel type? */
if (ret == 0x0) {
- DRM_DEBUG_KMS("No panel type in OpRegion\n");
+ drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n");
return -ENODEV;
}
@@ -1081,7 +1033,8 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* via a quirk list :(
*/
if (!dmi_check_system(intel_use_opregion_panel_type)) {
- DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring OpRegion panel type (%d)\n", ret - 1);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index e40c3a0e2cd7..481187223101 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,7 +27,6 @@
*/
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
#include "gt/intel_ring.h"
@@ -204,9 +203,10 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- I915_WRITE(DSPCLK_GATE_D, 0);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, 0);
else
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+ intel_de_write(dev_priv, DSPCLK_GATE_D,
+ OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
pci_bus_read_config_byte(pdev->bus,
@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs;
- WARN_ON(overlay->active);
+ drm_WARN_ON(&dev_priv->drm, overlay->active);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -315,15 +315,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
- WARN_ON(!overlay->active);
+ drm_WARN_ON(&dev_priv->drm, !overlay->active);
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
/* check for underruns */
- tmp = I915_READ(DOVSTA);
+ tmp = intel_de_read(dev_priv, DOVSTA);
if (tmp & (1 << 17))
- DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+ drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -456,7 +456,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -759,7 +759,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct i915_vma *vma;
int ret, tmp_width;
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -857,7 +858,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = overlay->i915;
int ret;
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -891,7 +893,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 pfit_control = intel_de_read(dev_priv, PFIT_CONTROL);
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
@@ -899,12 +901,12 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
*/
if (INTEL_GEN(dev_priv) >= 4) {
/* on i965 use the PGM reg to read out the autoscaler values */
- ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
if (pfit_control & VERT_AUTO_SCALE)
- ratio = I915_READ(PFIT_AUTO_RATIOS);
+ ratio = intel_de_read(dev_priv, PFIT_AUTO_RATIOS);
else
- ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS);
ratio >>= PFIT_VERT_SCALE_SHIFT;
}
@@ -1066,7 +1068,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1090,7 +1092,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
if (i915_gem_object_is_tiled(new_bo)) {
- DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
}
@@ -1225,7 +1228,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1239,12 +1242,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->saturation = overlay->saturation;
if (!IS_GEN(dev_priv, 2)) {
- attrs->gamma0 = I915_READ(OGAMC0);
- attrs->gamma1 = I915_READ(OGAMC1);
- attrs->gamma2 = I915_READ(OGAMC2);
- attrs->gamma3 = I915_READ(OGAMC3);
- attrs->gamma4 = I915_READ(OGAMC4);
- attrs->gamma5 = I915_READ(OGAMC5);
+ attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
+ attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
+ attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
+ attrs->gamma3 = intel_de_read(dev_priv, OGAMC3);
+ attrs->gamma4 = intel_de_read(dev_priv, OGAMC4);
+ attrs->gamma5 = intel_de_read(dev_priv, OGAMC5);
}
} else {
if (attrs->brightness < -128 || attrs->brightness > 127)
@@ -1274,12 +1277,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out_unlock;
- I915_WRITE(OGAMC0, attrs->gamma0);
- I915_WRITE(OGAMC1, attrs->gamma1);
- I915_WRITE(OGAMC2, attrs->gamma2);
- I915_WRITE(OGAMC3, attrs->gamma3);
- I915_WRITE(OGAMC4, attrs->gamma4);
- I915_WRITE(OGAMC5, attrs->gamma5);
+ intel_de_write(dev_priv, OGAMC0, attrs->gamma0);
+ intel_de_write(dev_priv, OGAMC1, attrs->gamma1);
+ intel_de_write(dev_priv, OGAMC2, attrs->gamma2);
+ intel_de_write(dev_priv, OGAMC3, attrs->gamma3);
+ intel_de_write(dev_priv, OGAMC4, attrs->gamma4);
+ intel_de_write(dev_priv, OGAMC5, attrs->gamma5);
}
}
overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
@@ -1369,7 +1372,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_reg_attrs(overlay, overlay->regs);
dev_priv->overlay = overlay;
- DRM_INFO("Initialized overlay support.\n");
+ drm_info(&dev_priv->drm, "Initialized overlay support.\n");
return;
out_free:
@@ -1389,7 +1392,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already.
*/
- WARN_ON(overlay->active);
+ drm_WARN_ON(&dev_priv->drm, overlay->active);
i915_gem_object_put(overlay->reg_bo);
i915_active_fini(&overlay->last_flip);
@@ -1419,8 +1422,8 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
if (error == NULL)
return NULL;
- error->dovsta = I915_READ(DOVSTA);
- error->isr = I915_READ(GEN2_ISR);
+ error->dovsta = intel_de_read(dev_priv, DOVSTA);
+ error->isr = intel_de_read(dev_priv, GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 7b3ec6eb3382..276f43870802 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -96,8 +96,9 @@ intel_panel_edid_downclock_mode(struct intel_connector *connector,
if (!downclock_mode)
return NULL;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using downclock mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(downclock_mode);
return downclock_mode;
@@ -122,8 +123,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
if (!fixed_mode)
return NULL;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using preferred mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
return fixed_mode;
@@ -138,8 +140,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using first mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
return fixed_mode;
@@ -162,8 +165,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] using mode from VBT: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
info->width_mm = fixed_mode->width_mm;
@@ -423,15 +426,15 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
default:
- WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+ drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
+ fitting_mode);
return;
}
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_GEN(dev_priv) >= 4)
- pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
- PFIT_FILTER_FUZZY);
+ pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -520,7 +523,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
if (i915_modparams.invert_brightness < 0)
return val;
@@ -537,14 +540,14 @@ static u32 lpt_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 pch_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 i9xx_get_backlight(struct intel_connector *connector)
@@ -553,7 +556,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
u32 val;
- val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (INTEL_GEN(dev_priv) < 4)
val >>= 1;
@@ -569,10 +572,10 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
- return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 vlv_get_backlight(struct intel_connector *connector)
@@ -588,7 +591,8 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
+ return intel_de_read(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
static u32 pwm_get_backlight(struct intel_connector *connector)
@@ -605,8 +609,8 @@ static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+ u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -615,8 +619,8 @@ static void pch_set_backlight(const struct drm_connector_state *conn_state, u32
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
}
static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -626,7 +630,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
@@ -643,8 +647,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
}
- tmp = I915_READ(BLC_PWM_CTL) & ~mask;
- I915_WRITE(BLC_PWM_CTL, tmp | level);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
+ intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
}
static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -654,8 +658,8 @@ static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 tmp;
- tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
}
static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -664,7 +668,8 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -709,7 +714,7 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
mutex_lock(&dev_priv->backlight_lock);
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -742,14 +747,16 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
* This needs rework if we need to add support for CPU PWM on PCH split
* platforms.
*/
- tmp = I915_READ(BLC_PWM_CPU_CTL2);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("cpu backlight was enabled, disabling\n");
- I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu backlight was enabled, disabling\n");
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ tmp & ~BLM_PWM_ENABLE);
}
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -760,11 +767,11 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BLC_PWM_CPU_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -779,8 +786,8 @@ static void i965_disable_backlight(const struct drm_connector_state *old_conn_st
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -792,8 +799,9 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ tmp & ~BLM_PWM_ENABLE);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -805,14 +813,15 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
val &= ~UTIL_PIN_ENABLE;
- I915_WRITE(UTIL_PIN_CTL, val);
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
}
}
@@ -825,9 +834,10 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
}
static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -857,7 +867,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
* another client is not activated.
*/
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ drm_dbg(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
return;
}
@@ -879,31 +890,31 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, schicken;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- DRM_DEBUG_KMS("pch backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (HAS_PCH_LPT(dev_priv)) {
- schicken = I915_READ(SOUTH_CHICKEN2);
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
if (panel->backlight.alternate_pwm_increment)
schicken |= LPT_PWM_GRANULARITY;
else
schicken &= ~LPT_PWM_GRANULARITY;
- I915_WRITE(SOUTH_CHICKEN2, schicken);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
} else {
- schicken = I915_READ(SOUTH_CHICKEN1);
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
if (panel->backlight.alternate_pwm_increment)
schicken |= SPT_PWM_GRANULARITY;
else
schicken &= ~SPT_PWM_GRANULARITY;
- I915_WRITE(SOUTH_CHICKEN1, schicken);
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
}
pch_ctl2 = panel->backlight.max << 16;
- I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
@@ -913,9 +924,10 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (HAS_PCH_LPT(dev_priv))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
- POSTING_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -930,41 +942,42 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("cpu backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
cpu_ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- DRM_DEBUG_KMS("pch backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (cpu_transcoder == TRANSCODER_EDP)
cpu_ctl2 = BLM_TRANSCODER_EDP;
else
cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
- POSTING_READ(BLC_PWM_CPU_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
pch_ctl2 = panel->backlight.max << 16;
- I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
- POSTING_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -975,10 +988,10 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- DRM_DEBUG_KMS("backlight already enabled\n");
- I915_WRITE(BLC_PWM_CTL, 0);
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ intel_de_write(dev_priv, BLC_PWM_CTL, 0);
}
freq = panel->backlight.max;
@@ -991,8 +1004,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
- I915_WRITE(BLC_PWM_CTL, ctl);
- POSTING_READ(BLC_PWM_CTL);
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL);
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1003,7 +1016,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* that has backlight.
*/
if (IS_GEN(dev_priv, 2))
- I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1015,11 +1028,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 ctl, ctl2, freq;
- ctl2 = I915_READ(BLC_PWM_CTL2);
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(BLC_PWM_CTL2, ctl2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
}
freq = panel->backlight.max;
@@ -1027,16 +1040,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
freq /= 0xff;
ctl = freq << 16;
- I915_WRITE(BLC_PWM_CTL, ctl);
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- I915_WRITE(BLC_PWM_CTL2, ctl2);
- POSTING_READ(BLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
}
@@ -1050,15 +1063,15 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
ctl = panel->backlight.max << 16;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1066,9 +1079,10 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = 0;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
- POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ ctl2 | BLM_PWM_ENABLE);
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1082,30 +1096,34 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- DRM_DEBUG_KMS("util pin already enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "util pin already enabled\n");
val &= ~UTIL_PIN_ENABLE;
- I915_WRITE(UTIL_PIN_CTL, val);
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
}
val = 0;
if (panel->backlight.util_pin_active_low)
val |= UTIL_PIN_POLARITY;
- I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) |
- UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
+ intel_de_write(dev_priv, UTIL_PIN_CTL,
+ val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
}
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
}
- I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.max);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1113,10 +1131,12 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
}
static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1127,16 +1147,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
}
- I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.max);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1144,10 +1167,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
}
static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1194,7 +1219,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
if (!panel->backlight.present)
return;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
mutex_lock(&dev_priv->backlight_lock);
@@ -1219,7 +1244,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_unlock(&dev_priv->backlight_lock);
- DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
}
@@ -1237,7 +1262,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
mutex_lock(&dev_priv->backlight_lock);
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
hw_level = scale_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -1380,7 +1405,8 @@ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz);
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz);
}
/*
@@ -1441,7 +1467,8 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz * 128);
}
/*
@@ -1458,7 +1485,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_PINEVIEW(dev_priv))
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
clock = KHz(dev_priv->cdclk.hw.cdclk);
@@ -1476,7 +1503,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_G4X(dev_priv))
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
clock = KHz(dev_priv->cdclk.hw.cdclk);
@@ -1493,14 +1520,14 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int mul, clock;
- if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
if (IS_CHERRYVIEW(dev_priv))
clock = KHz(19200);
else
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
mul = 128;
}
@@ -1515,22 +1542,26 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
u32 pwm;
if (!panel->backlight.hz_to_pwm) {
- DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion not supported\n");
return 0;
}
if (pwm_freq_hz) {
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
- pwm_freq_hz);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT defined backlight frequency %u Hz\n",
+ pwm_freq_hz);
} else {
pwm_freq_hz = 200;
- DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
- pwm_freq_hz);
+ drm_dbg_kms(&dev_priv->drm,
+ "default backlight frequency %u Hz\n",
+ pwm_freq_hz);
}
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
- DRM_DEBUG_KMS("backlight frequency conversion failed\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion failed\n");
return 0;
}
@@ -1546,7 +1577,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
int min;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1557,8 +1588,9 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
*/
min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
if (min != dev_priv->vbt.backlight.min_brightness) {
- DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
- dev_priv->vbt.backlight.min_brightness, min);
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping VBT min backlight %d/255 to %d/255\n",
+ dev_priv->vbt.backlight.min_brightness, min);
}
/* vbt value is a coefficient in range [0..255] */
@@ -1573,18 +1605,18 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
bool alt, cpu_mode;
if (HAS_PCH_LPT(dev_priv))
- alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
- alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
panel->backlight.alternate_pwm_increment = alt;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1608,13 +1640,16 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.max);
if (cpu_mode) {
- DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, panel->backlight.level);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ cpu_ctl2 & ~BLM_PWM_ENABLE);
}
return 0;
@@ -1626,10 +1661,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
if (!panel->backlight.max)
@@ -1645,7 +1680,7 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
@@ -1658,7 +1693,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
@@ -1697,11 +1732,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
- ctl2 = I915_READ(BLC_PWM_CTL2);
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
@@ -1731,13 +1766,13 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
@@ -1767,18 +1802,20 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.controller = dev_priv->vbt.backlight.controller;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
val & UTIL_PIN_POLARITY;
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.max =
- I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1812,11 +1849,13 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
*/
panel->backlight.controller = 0;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.max =
- I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1843,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
const char *desc;
+ u32 level, ns;
int retval;
/* Get the right PWM chip for DSI backlight according to VBT */
@@ -1855,7 +1895,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
}
if (IS_ERR(panel->backlight.pwm)) {
- DRM_ERROR("Failed to get the %s PWM chip\n", desc);
+ drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
+ desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1866,23 +1907,27 @@ static int pwm_setup_backlight(struct intel_connector *connector,
*/
pwm_apply_args(panel->backlight.pwm);
- retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
- CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.min = 0; /* 0% */
+ panel->backlight.max = 100; /* 100% */
+ level = intel_panel_compute_brightness(connector, 100);
+ ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+ retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS);
if (retval < 0) {
- DRM_ERROR("Failed to configure the pwm chip\n");
+ drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n");
pwm_put(panel->backlight.pwm);
panel->backlight.pwm = NULL;
return retval;
}
- panel->backlight.min = 0; /* 0% */
- panel->backlight.max = 100; /* 100% */
- panel->backlight.level = DIV_ROUND_UP(
- pwm_get_duty_cycle(panel->backlight.pwm) * 100,
- CRC_PMIC_PWM_PERIOD_NS);
+ level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+ CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.level =
+ intel_panel_compute_brightness(connector, level);
panel->backlight.enabled = panel->backlight.level != 0;
- DRM_INFO("Using %s PWM for LCD backlight control\n", desc);
+ drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+ desc);
return 0;
}
@@ -1913,15 +1958,17 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
if (!dev_priv->vbt.backlight.present) {
if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
- DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT, but present per quirk\n");
} else {
- DRM_DEBUG_KMS("no backlight present per VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT\n");
return 0;
}
}
/* ensure intel_panel has been initialized first */
- if (WARN_ON(!panel->backlight.setup))
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
return -ENODEV;
/* set level and max in panel struct */
@@ -1930,17 +1977,19 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
- DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
- connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to setup backlight for connector %s\n",
+ connector->name);
return ret;
}
panel->backlight.present = true;
- DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->name,
- enableddisabled(panel->backlight.enabled),
- panel->backlight.level, panel->backlight.max);
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->name,
+ enableddisabled(panel->backlight.enabled),
+ panel->backlight.level, panel->backlight.max);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 520408e83681..a9a5df2fee4d 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -110,8 +110,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- WARN(1, "nonexisting DP port %c\n",
- port_name(dig_port->base.port));
+ drm_WARN(dev, 1, "nonexisting DP port %c\n",
+ port_name(dig_port->base.port));
break;
}
break;
@@ -172,7 +172,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- u32 tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -188,7 +188,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
default:
return -EINVAL;
}
- I915_WRITE(PORT_DFT2_G4X, tmp);
+ intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
}
return 0;
@@ -237,7 +237,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
switch (pipe) {
case PIPE_A:
@@ -254,7 +254,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
- I915_WRITE(PORT_DFT2_G4X, tmp);
+ intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -328,7 +328,8 @@ put_state:
drm_atomic_state_put(state);
unlock:
- WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+ drm_WARN(&dev_priv->drm, ret,
+ "Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
@@ -440,15 +441,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
+void intel_crtc_crc_init(struct intel_crtc *crtc)
{
- enum pipe pipe;
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
- for_each_pipe(dev_priv, pipe) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
- spin_lock_init(&pipe_crc->lock);
- }
+ spin_lock_init(&pipe_crc->lock);
}
static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
@@ -570,7 +567,7 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
@@ -586,7 +583,8 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
intel_wakeref_t wakeref;
@@ -595,14 +593,15 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref) {
- DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -615,8 +614,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
goto out;
pipe_crc->source = source;
- I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
if (!source) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -638,7 +637,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
u32 val = 0;
if (!crtc->crc.opened)
@@ -650,22 +649,22 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
}
void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
/* Swallow crc's until we stop generating them. */
spin_lock_irq(&pipe_crc->lock);
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), 0);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index db258a756fc6..43012b189415 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -13,7 +13,7 @@ struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
+void intel_crtc_crc_init(struct intel_crtc *crtc);
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source_name, size_t *values_cnt);
@@ -22,7 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
+static inline void intel_crtc_crc_init(struct intel_crtc *crtc) {}
#define intel_crtc_set_crc_source NULL
#define intel_crtc_verify_crc_source NULL
#define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 83025052c965..fd9b146e3aba 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -59,11 +59,28 @@
* get called by the frontbuffer tracking code. Note that because of locking
* issues the self-refresh re-enable code is done from a work queue, which
* must be correctly synchronized/cancelled when shutting down the pipe."
+ *
+ * DC3CO (DC3 clock off)
+ *
+ * On top of PSR2, GEN12 adds a intermediate power savings state that turns
+ * clock off automatically during PSR2 idle state.
+ * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
+ * entry/exit allows the HW to enter a low-power state even when page flipping
+ * periodically (for instance a 30fps video playback scenario).
+ *
+ * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
+ * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
+ * frames, if no other flip occurs and the function above is executed, DC3CO is
+ * disabled and PSR2 is configured to enter deep sleep, resetting again in case
+ * of another flip.
+ * Front buffer modifications do not trigger DC3CO activation on purpose as it
+ * would bring a lot of complexity and most of the moderns systems will only
+ * use page flips.
*/
-static bool psr_global_enabled(u32 debug)
+static bool psr_global_enabled(struct drm_i915_private *i915)
{
- switch (debug & I915_PSR_DEBUG_MODE_MASK) {
+ switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
return i915_modparams.enable_psr;
case I915_PSR_DEBUG_DISABLE:
@@ -77,8 +94,8 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
/* Cannot enable DSC and PSR2 simultaneously */
- WARN_ON(crtc_state->dsc.compression_enable &&
- crtc_state->has_psr2);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->dsc.compression_enable &&
+ crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
@@ -114,10 +131,10 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
EDP_PSR_PRE_ENTRY(trans_shift);
/* Warning: it is masking/setting reserved bits too */
- val = I915_READ(imr_reg);
+ val = intel_de_read(dev_priv, imr_reg);
val &= ~EDP_PSR_TRANS_MASK(trans_shift);
val |= ~mask;
- I915_WRITE(imr_reg, val);
+ intel_de_write(dev_priv, imr_reg, val);
}
static void psr_event_print(u32 val, bool psr2_enabled)
@@ -174,20 +191,24 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
}
if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
dev_priv->psr.last_exit = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 9) {
- u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ PSR_EVENT(cpu_transcoder));
bool psr2_enabled = dev_priv->psr.psr2_enabled;
- I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
+ val);
psr_event_print(val, psr2_enabled);
}
}
@@ -195,7 +216,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
u32 val;
- DRM_WARN("[transcoder %s] PSR aux error\n",
+ drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
dev_priv->psr.irq_aux_error = true;
@@ -208,9 +229,9 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- val = I915_READ(imr_reg);
+ val = intel_de_read(dev_priv, imr_reg);
val |= EDP_PSR_ERROR(trans_shift);
- I915_WRITE(imr_reg, val);
+ intel_de_write(dev_priv, imr_reg, val);
schedule_work(&dev_priv->psr.work);
}
@@ -270,7 +291,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
if (dev_priv->psr.dp) {
- DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
+ drm_warn(&dev_priv->drm,
+ "More than one eDP panel found, PSR support should be extended\n");
return;
}
@@ -279,16 +301,18 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp->psr_dpcd[0])
return;
- DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
- intel_dp->psr_dpcd[0]);
+ drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+ if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -316,8 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* GTC first.
*/
dev_priv->psr.sink_psr2_support = y_req && alpm;
- DRM_DEBUG_KMS("PSR2 %ssupported\n",
- dev_priv->psr.sink_psr2_support ? "" : "not ");
+ drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
+ dev_priv->psr.sink_psr2_support ? "" : "not ");
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
@@ -380,8 +404,9 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
- intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+ intel_de_write(dev_priv,
+ EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+ intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -391,7 +416,8 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
+ intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+ aux_ctl);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@@ -454,22 +480,30 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
return val;
}
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
+static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 max_sleep_time = 0x1f;
- u32 val = EDP_PSR_ENABLE;
+ int idle_frames;
/* Let's use 6 as the minimum to cover all known cases including the
* off-by-one issue that HW has in some cases.
*/
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- /* sink_sync_latency of 8 means source has to wait for more than 8
- * frames, we'll go with 9 frames for now
- */
+ idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+ idle_frames = 0xf;
+
+ return idle_frames;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
+
+ val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
if (IS_HASWELL(dev_priv))
@@ -483,9 +517,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
@@ -493,13 +527,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- /* Let's use 6 as the minimum to cover all known cases including the
- * off-by-one issue that HW has in some cases.
- */
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+ val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -521,9 +549,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+ intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
}
static bool
@@ -552,10 +580,10 @@ static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
val &= ~EDP_PSR2_IDLE_FRAME_MASK;
val |= idle_frames;
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
}
static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -566,29 +594,22 @@ static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
{
- int idle_frames;
+ struct intel_dp *intel_dp = dev_priv->psr.dp;
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- /*
- * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
- * cases including the off-by-one issue that HW has in some cases.
- */
- idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- psr2_program_idle_frames(dev_priv, idle_frames);
+ psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
}
-static void tgl_dc5_idle_thread(struct work_struct *work)
+static void tgl_dc3co_disable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.idle_work.work);
+ container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
mutex_lock(&dev_priv->psr.lock);
/* If delayed work is pending, it is not idle */
- if (delayed_work_pending(&dev_priv->psr.idle_work))
+ if (delayed_work_pending(&dev_priv->psr.dc3co_work))
goto unlock;
- DRM_DEBUG_KMS("DC5/6 idle thread\n");
tgl_psr2_disable_dc3co(dev_priv);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -599,11 +620,41 @@ static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.dc3co_enabled)
return;
- cancel_delayed_work(&dev_priv->psr.idle_work);
+ cancel_delayed_work(&dev_priv->psr.dc3co_work);
/* Before PSR2 exit disallow dc3co*/
tgl_psr2_disable_dc3co(dev_priv);
}
+static void
+tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 exit_scanlines;
+
+ if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ return;
+
+ /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
+ if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
+ dig_port->base.port != PORT_A)
+ return;
+
+ /*
+ * DC3CO Exit time 200us B.Spec 49196
+ * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+ */
+ exit_scanlines =
+ intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
+
+ if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
+ return;
+
+ crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -616,8 +667,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
- DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
- transcoder_name(crtc_state->cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not supported in transcoder %s\n",
+ transcoder_name(crtc_state->cpu_transcoder));
return false;
}
@@ -627,7 +679,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable) {
- DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
@@ -646,15 +699,17 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
- DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
- crtc_hdisplay, crtc_vdisplay,
- psr_max_h, psr_max_v);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+ crtc_hdisplay, crtc_vdisplay,
+ psr_max_h, psr_max_v);
return false;
}
if (crtc_state->pipe_bpp > max_bpp) {
- DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
- crtc_state->pipe_bpp, max_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, pipe bpp %d > max supported %d\n",
+ crtc_state->pipe_bpp, max_bpp);
return false;
}
@@ -665,16 +720,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* x granularity.
*/
if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
- DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
- crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+ crtc_hdisplay, dev_priv->psr.su_x_granularity);
return false;
}
if (crtc_state->crc_enabled) {
- DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
return false;
}
+ tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
}
@@ -700,31 +758,36 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* hardcoded to PORT_A
*/
if (dig_port->base.port != PORT_A) {
- DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Port not supported\n");
return;
}
if (dev_priv->psr.sink_not_reliable) {
- DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR sink implementation is not reliable\n");
return;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Interlaced mode enabled\n");
return;
}
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
if (psr_setup_time < 0) {
- DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
- intel_dp->psr_dpcd[1]);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
return;
}
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
- psr_setup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
return;
}
@@ -737,10 +800,12 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
- WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
- WARN_ON(dev_priv->psr.active);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
/* psr1 and psr2 are mutually exclusive.*/
@@ -768,11 +833,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
- u32 chicken = I915_READ(reg);
+ u32 chicken = intel_de_read(dev_priv, reg);
chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT;
- I915_WRITE(reg, chicken);
+ intel_de_write(dev_priv, reg, chicken);
}
/*
@@ -789,9 +854,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
+ intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+ mask);
psr_irq_control(dev_priv);
+
+ if (crtc_state->dc3co_exitline) {
+ u32 val;
+
+ /*
+ * TODO: if future platforms supports DC3CO in more than one
+ * transcoder, EXITLINE will need to be unset when disabling PSR
+ */
+ val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
+ val &= ~EXITLINE_MASK;
+ val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
+ val |= EXITLINE_ENABLE;
+ intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
+ }
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -800,14 +880,16 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
struct intel_dp *intel_dp = dev_priv->psr.dp;
u32 val;
- WARN_ON(dev_priv->psr.enabled);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
- dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+ /* DC5/DC6 requires at least 6 idle frames */
+ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
+ dev_priv->psr.dc3co_exit_delay = val;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -818,20 +900,22 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
* to avoid any rendering problems.
*/
if (INTEL_GEN(dev_priv) >= 12) {
- val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv,
+ TRANS_PSR_IIR(dev_priv->psr.transcoder));
val &= EDP_PSR_ERROR(0);
} else {
- val = I915_READ(EDP_PSR_IIR);
+ val = intel_de_read(dev_priv, EDP_PSR_IIR);
val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
}
if (val) {
dev_priv->psr.sink_not_reliable = true;
- DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR interruption error set, not enabling PSR\n");
return;
}
- DRM_DEBUG_KMS("Enabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ dev_priv->psr.psr2_enabled ? "2" : "1");
intel_psr_setup_vsc(intel_dp, crtc_state);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
@@ -860,12 +944,12 @@ void intel_psr_enable(struct intel_dp *intel_dp,
if (!crtc_state->has_psr)
return;
- WARN_ON(dev_priv->drrs.dp);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
- if (!psr_global_enabled(dev_priv->psr.debug)) {
- DRM_DEBUG_KMS("PSR disabled by flag\n");
+ if (!psr_global_enabled(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
goto unlock;
}
@@ -881,27 +965,33 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.active) {
if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- WARN_ON(val & EDP_PSR2_ENABLE);
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- WARN_ON(val & EDP_PSR_ENABLE);
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
return;
}
if (dev_priv->psr.psr2_enabled) {
tgl_disallow_dc3co_on_psr2_exit(dev_priv);
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- WARN_ON(!(val & EDP_PSR2_ENABLE));
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
val &= ~EDP_PSR2_ENABLE;
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
} else {
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- WARN_ON(!(val & EDP_PSR_ENABLE));
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
val &= ~EDP_PSR_ENABLE;
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
dev_priv->psr.active = false;
}
@@ -917,8 +1007,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (!dev_priv->psr.enabled)
return;
- DRM_DEBUG_KMS("Disabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ dev_priv->psr.psr2_enabled ? "2" : "1");
intel_psr_exit(dev_priv);
@@ -933,7 +1023,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Wait till PSR is idle */
if (intel_de_wait_for_clear(dev_priv, psr_status,
psr_status_mask, 2000))
- DRM_ERROR("Timed out waiting PSR idle state\n");
+ drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -959,7 +1049,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
if (!old_crtc_state->has_psr)
return;
- if (WARN_ON(!CAN_PSR(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
return;
mutex_lock(&dev_priv->psr.lock);
@@ -968,7 +1058,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
- cancel_delayed_work_sync(&dev_priv->psr.idle_work);
+ cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
}
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
@@ -983,7 +1073,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* but it makes more sense write to the current active
* pipe.
*/
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+ intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
else
/*
* A write to CURSURFLIVE do not cause HW tracking to exit PSR
@@ -1015,7 +1105,7 @@ void intel_psr_update(struct intel_dp *intel_dp,
mutex_lock(&dev_priv->psr.lock);
- enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+ enable = crtc_state->has_psr && psr_global_enabled(dev_priv);
psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
@@ -1103,7 +1193,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&dev_priv->psr.lock);
@@ -1167,7 +1258,7 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_FORCE_PSR1) {
- DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
+ drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
}
@@ -1279,14 +1370,12 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
* When we will be completely rely on PSR2 S/W tracking in future,
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
* event also therefore tgl_dc3co_flush() require to be changed
- * accrodingly in future.
+ * accordingly in future.
*/
static void
tgl_dc3co_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
- u32 delay;
-
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.dc3co_enabled)
@@ -1304,10 +1393,8 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
goto unlock;
tgl_psr2_enable_dc3co(dev_priv);
- /* DC5/DC6 required idle frames = 6 */
- delay = 6 * dev_priv->psr.dc3co_exit_delay;
- mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
- usecs_to_jiffies(delay));
+ mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
+ dev_priv->psr.dc3co_exit_delay);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -1391,7 +1478,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
- INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
+ INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
mutex_init(&dev_priv->psr.lock);
}
@@ -1427,14 +1514,15 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
if (r != 1) {
- DRM_ERROR("Error reading ALPM status\n");
+ drm_err(&dev_priv->drm, "Error reading ALPM status\n");
return;
}
if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "ALPM lock timeout error, disabling PSR\n");
/* Clearing error */
drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
@@ -1450,14 +1538,15 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
if (r != 1) {
- DRM_ERROR("Error reading DP_PSR_ESI\n");
+ drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
return;
}
if (val & DP_PSR_CAPS_CHANGE) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Sink PSR capability changed, disabling PSR\n");
/* Clearing it */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
@@ -1482,7 +1571,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
- DRM_ERROR("Error reading PSR status or error status\n");
+ drm_err(&dev_priv->drm,
+ "Error reading PSR status or error status\n");
goto exit;
}
@@ -1492,17 +1582,22 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
}
if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
- DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR RFB storage error, disabling PSR\n");
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR VSC SDP uncorrectable error, disabling PSR\n");
if (error_status & DP_PSR_LINK_CRC_ERROR)
- DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR Link CRC error, disabling PSR\n");
if (error_status & ~errors)
- DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
- error_status & ~errors);
+ drm_err(&dev_priv->drm,
+ "PSR_ERROR_STATUS unhandled errors %x\n",
+ error_status & ~errors);
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
@@ -1542,7 +1637,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
return;
intel_connector = to_intel_connector(connector);
- dig_port = enc_to_dig_port(intel_connector->encoder);
+ dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
if (dev_priv->psr.dp != &dig_port->dp)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 399b1542509f..46beb155d835 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -14,7 +14,7 @@
static void quirk_ssc_force_disable(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
- DRM_INFO("applying lvds SSC disable quirk\n");
+ drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
}
/*
@@ -24,14 +24,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915)
static void quirk_invert_brightness(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
- DRM_INFO("applying inverted panel brightness quirk\n");
+ drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
- DRM_INFO("applying backlight present quirk\n");
+ drm_info(&i915->drm, "applying backlight present quirk\n");
}
/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
@@ -40,7 +40,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915)
static void quirk_increase_t12_delay(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INCREASE_T12_DELAY;
- DRM_INFO("Applying T12 delay quirk\n");
+ drm_info(&i915->drm, "Applying T12 delay quirk\n");
}
/*
@@ -50,7 +50,7 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915)
static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
- DRM_INFO("Applying Increase DDI Disabled quirk\n");
+ drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
}
struct intel_quirk {
@@ -82,6 +82,16 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
DMI_MATCH(DMI_PRODUCT_NAME, ""),
},
},
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "Thundersoft TST178 tablet",
+ /* DMI strings are too generic, also match on BIOS date */
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"),
+ },
+ },
{ } /* terminating entry */
},
.hook = quirk_invert_brightness,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index e8819fd21e03..637d8fe2f8c2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -34,7 +34,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -217,23 +216,23 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
int i;
if (HAS_PCH_SPLIT(dev_priv)) {
- I915_WRITE(intel_sdvo->sdvo_reg, val);
- POSTING_READ(intel_sdvo->sdvo_reg);
+ intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
if (HAS_PCH_IBX(dev_priv)) {
- I915_WRITE(intel_sdvo->sdvo_reg, val);
- POSTING_READ(intel_sdvo->sdvo_reg);
+ intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
}
return;
}
if (intel_sdvo->port == PORT_B)
- cval = I915_READ(GEN3_SDVOC);
+ cval = intel_de_read(dev_priv, GEN3_SDVOC);
else
- bval = I915_READ(GEN3_SDVOB);
+ bval = intel_de_read(dev_priv, GEN3_SDVOB);
/*
* Write the registers twice for luck. Sometimes,
@@ -241,11 +240,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++) {
- I915_WRITE(GEN3_SDVOB, bval);
- POSTING_READ(GEN3_SDVOB);
+ intel_de_write(dev_priv, GEN3_SDVOB, bval);
+ intel_de_posting_read(dev_priv, GEN3_SDVOB);
- I915_WRITE(GEN3_SDVOC, cval);
- POSTING_READ(GEN3_SDVOC);
+ intel_de_write(dev_priv, GEN3_SDVOC, cval);
+ intel_de_posting_read(dev_priv, GEN3_SDVOC);
}
}
@@ -414,12 +413,10 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
{
const char *cmd_name;
int i, pos = 0;
-#define BUF_LEN 256
- char buffer[BUF_LEN];
+ char buffer[64];
#define BUF_PRINT(args...) \
- pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
-
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
for (i = 0; i < args_len; i++) {
BUF_PRINT("%02X ", ((u8 *)args)[i]);
@@ -433,9 +430,9 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
BUF_PRINT("(%s)", cmd_name);
else
BUF_PRINT("(%02X)", cmd);
- BUG_ON(pos >= BUF_LEN - 1);
+
+ WARN_ON(pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
-#undef BUF_LEN
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
}
@@ -540,8 +537,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i, pos = 0;
-#define BUF_LEN 256
- char buffer[BUF_LEN];
+ char buffer[64];
buffer[0] = '\0';
@@ -581,7 +577,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
}
#define BUF_PRINT(args...) \
- pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
cmd_status = sdvo_cmd_status(status);
if (cmd_status)
@@ -600,9 +596,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
goto log_fail;
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- BUG_ON(pos >= BUF_LEN - 1);
+
+ WARN_ON(pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
-#undef BUF_LEN
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
return true;
@@ -1267,6 +1263,13 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
pipe_config->clock_set = true;
}
+static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
+ const struct drm_connector_state *conn_state)
+{
+ return sdvo->has_hdmi_monitor &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1322,12 +1325,15 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
- if (intel_sdvo_state->base.force_audio != HDMI_AUDIO_OFF_DVI)
- pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
+ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
- if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON ||
- (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO && intel_sdvo->has_hdmi_audio))
- pipe_config->has_audio = true;
+ if (pipe_config->has_hdmi_sink) {
+ if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO)
+ pipe_config->has_audio = intel_sdvo->has_hdmi_audio;
+ else
+ pipe_config->has_audio =
+ intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
+ }
if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
@@ -1470,7 +1476,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
- DRM_INFO("Setting output timings on %s failed\n",
+ drm_info(&dev_priv->drm,
+ "Setting output timings on %s failed\n",
SDVO_NAME(intel_sdvo));
/* Set the input timing to the screen. Assume always input 0. */
@@ -1494,12 +1501,14 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
- DRM_INFO("Setting input timings on %s failed\n",
+ drm_info(&dev_priv->drm,
+ "Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
switch (crtc_state->pixel_multiplier) {
default:
- WARN(1, "unknown pixel multiplier specified\n");
+ drm_WARN(&dev_priv->drm, 1,
+ "unknown pixel multiplier specified\n");
/* fall through */
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
@@ -1518,7 +1527,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
- sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
if (intel_sdvo->port == PORT_B)
sdvox &= SDVOB_PRESERVE_MASK;
else
@@ -1564,7 +1573,7 @@ bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(sdvo_reg);
+ val = intel_de_read(dev_priv, sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -1607,7 +1616,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
- sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
@@ -1615,7 +1624,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* Some sdvo encoders are not spec compliant and don't
* implement the mandatory get_timings function.
*/
- DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
+ drm_dbg(&dev_priv->drm, "failed to retrieve SDVO DTD\n");
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
@@ -1667,9 +1676,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
}
- WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
- "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
- pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+ drm_WARN(dev,
+ encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+ "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -1734,7 +1744,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_OFF);
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1791,7 +1801,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
int i;
bool success;
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1806,8 +1816,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
* a given it the status is a success, we succeeded.
*/
if (success && !input1) {
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
}
if (0)
@@ -2219,8 +2230,8 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -2709,6 +2720,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_encoder->hotplug = intel_sdvo_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
} else {
@@ -3229,9 +3241,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
enum port port)
{
if (HAS_PCH_SPLIT(dev_priv))
- WARN_ON(port != PORT_B);
+ drm_WARN_ON(&dev_priv->drm, port != PORT_B);
else
- WARN_ON(port != PORT_B && port != PORT_C);
+ drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C);
}
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
@@ -3269,8 +3281,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- DRM_DEBUG_KMS("No SDVO device found on %s\n",
- SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
}
@@ -3293,8 +3306,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
- DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
- SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
/* Output_setup can leave behind connectors! */
goto err_output;
}
@@ -3331,7 +3345,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
&intel_sdvo->pixel_clock_max))
goto err_output;
- DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index a66f224aa17d..72065e4360d5 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index fca77ec1e0dd..deda351719db 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -37,10 +37,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_atomic_plane.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
@@ -104,7 +104,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (min <= 0 || max <= 0)
goto irq_disable;
- if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
+ if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
goto irq_disable;
/*
@@ -113,8 +113,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
* re-entry as well.
*/
if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
- DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
- psr_status);
+ drm_err(&dev_priv->drm,
+ "PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
local_irq_disable();
@@ -135,8 +136,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
break;
if (!timeout) {
- DRM_ERROR("Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_err(&dev_priv->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
break;
}
@@ -204,7 +206,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
* event outside of the critical section - the spinlock might spin for a
* while ... */
if (new_crtc_state->uapi.event) {
- WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
drm_crtc_arm_vblank_event(&crtc->base,
@@ -221,17 +224,20 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
if (crtc->debug.start_vbl_count &&
crtc->debug.start_vbl_count != end_vbl_count) {
- DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
- pipe_name(pipe), crtc->debug.start_vbl_count,
- end_vbl_count,
- ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
- crtc->debug.min_vbl, crtc->debug.max_vbl,
- crtc->debug.scanline_start, scanline_end);
+ drm_err(&dev_priv->drm,
+ "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+ pipe_name(pipe), crtc->debug.start_vbl_count,
+ end_vbl_count,
+ ktime_us_delta(end_vbl_time,
+ crtc->debug.start_vbl_time),
+ crtc->debug.min_vbl, crtc->debug.max_vbl,
+ crtc->debug.scanline_start, scanline_end);
}
#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
VBLANK_EVASION_TIME_US)
- DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ drm_warn(&dev_priv->drm,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
pipe_name(pipe),
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
VBLANK_EVASION_TIME_US);
@@ -278,6 +284,16 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
+ */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
+
+ /*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
@@ -291,26 +307,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
drm_rect_init(src, src_x << 16, src_y << 16,
src_w << 16, src_h << 16);
- if (!fb->format->is_yuv)
- return 0;
-
- /* YUV specific checks */
- if (!rotated) {
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
- } else {
- hsub = vsub = max(fb->format->hsub, fb->format->vsub);
}
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
+
if (src_x % hsub || src_w % hsub) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_x, src_w, hsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, yesno(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
- DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_y, src_h, vsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, yesno(rotated));
return -EINVAL;
}
@@ -349,9 +365,8 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- unsigned int pixel_rate = crtc_state->pixel_rate;
- unsigned int src_w, src_h, dst_w, dst_h;
unsigned int num, den;
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
skl_plane_ratio(crtc_state, plane_state, &num, &den);
@@ -359,17 +374,7 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
den *= 2;
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- dst_w = drm_rect_width(&plane_state->uapi.dst);
- dst_h = drm_rect_height(&plane_state->uapi.dst);
-
- /* Downscaling limits the maximum pixel rate */
- dst_w = min(src_w, dst_w);
- dst_h = min(src_h, dst_h);
-
- return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
- mul_u32_u32(den, dst_w * dst_h));
+ return DIV_ROUND_UP(pixel_rate * num, den);
}
static unsigned int
@@ -434,14 +439,16 @@ skl_program_scaler(struct intel_plane *plane,
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
}
- I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
- PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
- I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
- I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ (crtc_x << 16) | crtc_y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ (crtc_w << 16) | crtc_h);
}
/* Preoffset values for YUV to RGB Conversion */
@@ -547,28 +554,37 @@ icl_program_input_csc(struct intel_plane *plane,
else
csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
- GOFF(csc[1]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
- GOFF(csc[4]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
- GOFF(csc[7]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
-
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
- PREOFF_YUV_TO_RGB_HI);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+ ROFF(csc[0]) | GOFF(csc[1]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+ BOFF(csc[2]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+ ROFF(csc[3]) | GOFF(csc[4]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+ BOFF(csc[5]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+ ROFF(csc[6]) | GOFF(csc[7]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+ BOFF(csc[8]));
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ 0);
else
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
- PREOFF_YUV_TO_RGB_LO);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
}
static void
@@ -623,44 +639,49 @@ skl_program_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+ intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
+ intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+ (src_h << 16) | src_w);
if (INTEL_GEN(dev_priv) < 12)
aux_dist |= aux_stride;
- I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
if (icl_is_hdr_plane(dev_priv, plane_id))
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl);
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
+ plane_state->cus_ctl);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
- I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
- I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax);
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ (y << 16) | x);
if (INTEL_GEN(dev_priv) < 11)
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) |
- plane_state->color_plane[1].x);
+ intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
if (plane_state->scaler_id >= 0)
skl_program_scaler(plane, crtc_state, plane_state);
@@ -693,12 +714,12 @@ skl_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (icl_is_hdr_plane(dev_priv, plane_id))
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
skl_write_plane_wm(plane, crtc_state);
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -718,7 +739,7 @@ skl_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+ ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
*pipe = plane->pipe;
@@ -774,23 +795,36 @@ chv_update_csc(const struct intel_plane_state *plane_state)
if (!fb->format->is_yuv)
return;
- I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-
- I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
- I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
- I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
- I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
- I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(csc[8]));
-
- I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
- I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
- I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
-
- I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ intel_de_write_fw(dev_priv, SPCSCC01(plane_id),
+ SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
+ intel_de_write_fw(dev_priv, SPCSCC23(plane_id),
+ SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
+ intel_de_write_fw(dev_priv, SPCSCC45(plane_id),
+ SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
+ intel_de_write_fw(dev_priv, SPCSCC67(plane_id),
+ SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
+ intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
+
+ intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id),
+ SPCSC_IMAX(1023) | SPCSC_IMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id),
+ SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+ intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id),
+ SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+
+ intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
#define SIN_0 0
@@ -829,10 +863,10 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
}
/* FIXME these register are single buffered :( */
- I915_WRITE_FW(SPCLRC0(pipe, plane_id),
- SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
- I915_WRITE_FW(SPCLRC1(pipe, plane_id),
- SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
+ intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id),
+ SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
+ intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id),
+ SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
static void
@@ -1019,10 +1053,8 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
- gamma[i] << 16 |
- gamma[i] << 8 |
- gamma[i]);
+ intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1),
+ gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
static void
@@ -1055,32 +1087,37 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
+ (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
if (key->flags) {
- I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
- I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+ intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id),
+ key->max_value);
}
- I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
- I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
- I915_WRITE_FW(SPSURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl);
+ intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_update_clrc(plane_state);
vlv_update_gamma(plane_state);
@@ -1099,8 +1136,8 @@ vlv_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
- I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1120,7 +1157,7 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+ ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
*pipe = plane->pipe;
@@ -1424,19 +1461,17 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- I915_WRITE_FW(SPRGAMC(pipe, i),
- gamma[i] << 20 |
- gamma[i] << 10 |
- gamma[i]);
-
- I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
- I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
- I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC(pipe, i),
+ gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
+
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]);
i++;
- I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
- I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
- I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]);
i++;
}
@@ -1476,25 +1511,27 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
if (key->flags) {
- I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
- I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value);
}
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPROFFSET(pipe), (y << 16) | x);
} else {
- I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
- I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), (y << 16) | x);
}
/*
@@ -1502,9 +1539,9 @@ ivb_update_plane(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(SPRCTL(pipe), sprctl);
- I915_WRITE_FW(SPRSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl);
+ intel_de_write_fw(dev_priv, SPRSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
ivb_update_gamma(plane_state);
@@ -1521,11 +1558,11 @@ ivb_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPRCTL(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRCTL(pipe), 0);
/* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), 0);
- I915_WRITE_FW(SPRSURF(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRSURF(pipe), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1544,7 +1581,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+ ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
*pipe = plane->pipe;
@@ -1710,10 +1747,8 @@ static void g4x_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
- gamma[i] << 16 |
- gamma[i] << 8 |
- gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1),
+ gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
static void ilk_sprite_linear_gamma(u16 gamma[17])
@@ -1741,14 +1776,12 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
- gamma[i] << 20 |
- gamma[i] << 10 |
- gamma[i]);
-
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i),
+ gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
+
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
i++;
}
@@ -1788,28 +1821,30 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
if (key->flags) {
- I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
- I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
- I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
- I915_WRITE_FW(DVSSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr);
+ intel_de_write_fw(dev_priv, DVSSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
if (IS_G4X(dev_priv))
g4x_update_gamma(plane_state);
@@ -1829,10 +1864,10 @@ g4x_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DVSCNTR(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0);
/* Disable the scaler */
- I915_WRITE_FW(DVSSCALE(pipe), 0);
- I915_WRITE_FW(DVSSURF(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSSURF(pipe), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1851,7 +1886,7 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+ ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE;
*pipe = plane->pipe;
@@ -1999,7 +2034,8 @@ int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
if (IS_CHERRYVIEW(dev_priv) &&
rotation & DRM_MODE_ROTATE_180 &&
rotation & DRM_MODE_REFLECT_X) {
- DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot rotate and reflect at the same time\n");
return -EINVAL;
}
@@ -2040,6 +2076,18 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
+static bool intel_format_is_p01x(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -2054,21 +2102,24 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
is_ccs_modifier(fb->modifier)) {
- DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
- rotation);
+ drm_dbg_kms(&dev_priv->drm,
+ "RC support only with 0/180 degree rotation (%x)\n",
+ rotation);
return -EINVAL;
}
if (rotation & DRM_MODE_REFLECT_X &&
fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "horizontal flip is not supported with linear surface formats\n");
return -EINVAL;
}
if (drm_rotation_90_or_270(rotation)) {
if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
- DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
@@ -2091,9 +2142,10 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
return -EINVAL;
default:
break;
@@ -2109,7 +2161,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
- DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /* Wa_1606054188:tgl */
+ if (IS_TIGERLAKE(dev_priv) &&
+ plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+ intel_format_is_p01x(fb->format->format)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Source color keying not supported with P01x formats\n");
return -EINVAL;
}
@@ -2136,10 +2198,11 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
*/
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
(crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
- DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
- crtc_x + crtc_w < 4 ? "end" : "start",
- crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
- 4, pipe_src_w - 4);
+ drm_dbg_kms(&dev_priv->drm,
+ "requested plane X %s position %d invalid (valid range %d-%d)\n",
+ crtc_x + crtc_w < 4 ? "end" : "start",
+ crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+ 4, pipe_src_w - 4);
return -ERANGE;
}
@@ -2968,7 +3031,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u64 *modifiers;
const u32 *formats;
int num_formats;
@@ -3023,10 +3085,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -3077,7 +3137,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
- unsigned long possible_crtcs;
unsigned int supported_rotations;
const u64 *modifiers;
const u32 *formats;
@@ -3162,10 +3221,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->id = PLANE_SPRITE0 + sprite;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, sprite));
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 7773169b7331..9b850c11aa78 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -61,7 +61,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
lane_mask = intel_uncore_read(uncore,
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
- WARN_ON(lane_mask == 0xffffffff);
+ drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -76,7 +76,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
pin_mask = intel_uncore_read(uncore,
PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
- WARN_ON(pin_mask == 0xffffffff);
+ drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -120,7 +120,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
struct intel_uncore *uncore = &i915->uncore;
u32 val;
- WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+ drm_WARN_ON(&i915->drm,
+ lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
@@ -181,8 +182,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, nothing connected\n",
+ dig_port->tc_port_name);
return mask;
}
@@ -195,7 +197,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
- if (!WARN_ON(hweight32(mask) > 1))
+ if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
tc_port_fixup_legacy_flag(dig_port, mask);
return mask;
@@ -210,8 +212,9 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
return false;
}
@@ -228,8 +231,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
- dig_port->tc_port_name,
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
+ dig_port->tc_port_name,
enableddisabled(enable));
return false;
@@ -243,8 +247,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
- DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY complete clear timed out\n",
+ dig_port->tc_port_name);
return true;
}
@@ -258,8 +263,9 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assume safe mode\n",
+ dig_port->tc_port_name);
return true;
}
@@ -409,16 +415,17 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
- WARN_ON(intel_display_power_is_enabled(i915,
- intel_aux_power_domain(dig_port)));
+ drm_WARN_ON(&i915->drm,
+ intel_display_power_is_enabled(i915,
+ intel_aux_power_domain(dig_port)));
icl_tc_phy_disconnect(dig_port);
icl_tc_phy_connect(dig_port, required_lanes);
- DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(old_tc_mode),
- tc_port_mode_name(dig_port->tc_mode));
+ drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(old_tc_mode),
+ tc_port_mode_name(dig_port->tc_mode));
}
static void
@@ -503,7 +510,7 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
intel_tc_port_needs_reset(dig_port))
intel_tc_port_reset_mode(dig_port, required_lanes);
- WARN_ON(dig_port->tc_lock_wakeref);
+ drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
dig_port->tc_lock_wakeref = wakeref;
}
@@ -550,7 +557,7 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(i915, port);
- if (WARN_ON(tc_port == PORT_TC_NONE))
+ if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE))
return;
snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index c75e0ceecee6..d2e3a3a323e9 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,7 +33,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -907,7 +906,7 @@ static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 tmp = I915_READ(TV_CTL);
+ u32 tmp = intel_de_read(dev_priv, TV_CTL);
*pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
@@ -926,7 +925,8 @@ intel_enable_tv(struct intel_encoder *encoder,
intel_wait_for_vblank(dev_priv,
to_intel_crtc(pipe_config->uapi.crtc)->pipe);
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+ intel_de_write(dev_priv, TV_CTL,
+ intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
}
static void
@@ -937,7 +937,8 @@ intel_disable_tv(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+ intel_de_write(dev_priv, TV_CTL,
+ intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
@@ -1095,11 +1096,11 @@ intel_tv_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
- tv_ctl = I915_READ(TV_CTL);
- hctl1 = I915_READ(TV_H_CTL_1);
- hctl3 = I915_READ(TV_H_CTL_3);
- vctl1 = I915_READ(TV_V_CTL_1);
- vctl2 = I915_READ(TV_V_CTL_2);
+ tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ hctl1 = intel_de_read(dev_priv, TV_H_CTL_1);
+ hctl3 = intel_de_read(dev_priv, TV_H_CTL_3);
+ vctl1 = intel_de_read(dev_priv, TV_V_CTL_1);
+ vctl2 = intel_de_read(dev_priv, TV_V_CTL_2);
tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
@@ -1134,17 +1135,17 @@ intel_tv_get_config(struct intel_encoder *encoder,
break;
}
- tmp = I915_READ(TV_WIN_POS);
+ tmp = intel_de_read(dev_priv, TV_WIN_POS);
xpos = tmp >> 16;
ypos = tmp & 0xffff;
- tmp = I915_READ(TV_WIN_SIZE);
+ tmp = intel_de_read(dev_priv, TV_WIN_SIZE);
xsize = tmp >> 16;
ysize = tmp & 0xffff;
intel_tv_mode_to_mode(&mode, &tv_mode);
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(&mode);
intel_tv_scale_mode_horiz(&mode, hdisplay,
@@ -1200,7 +1201,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+ drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
pipe_config->port_clock = tv_mode->clock;
@@ -1215,7 +1216,8 @@ intel_tv_compute_config(struct intel_encoder *encoder,
extra = adjusted_mode->crtc_vdisplay - vdisplay;
if (extra < 0) {
- DRM_DEBUG_KMS("No vertical scaling for >1024 pixel wide modes\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No vertical scaling for >1024 pixel wide modes\n");
return -EINVAL;
}
@@ -1248,7 +1250,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
tv_conn_state->bypass_vfilter = false;
}
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(adjusted_mode);
/*
@@ -1380,16 +1382,16 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
- I915_WRITE(TV_H_CTL_1, hctl1);
- I915_WRITE(TV_H_CTL_2, hctl2);
- I915_WRITE(TV_H_CTL_3, hctl3);
- I915_WRITE(TV_V_CTL_1, vctl1);
- I915_WRITE(TV_V_CTL_2, vctl2);
- I915_WRITE(TV_V_CTL_3, vctl3);
- I915_WRITE(TV_V_CTL_4, vctl4);
- I915_WRITE(TV_V_CTL_5, vctl5);
- I915_WRITE(TV_V_CTL_6, vctl6);
- I915_WRITE(TV_V_CTL_7, vctl7);
+ intel_de_write(dev_priv, TV_H_CTL_1, hctl1);
+ intel_de_write(dev_priv, TV_H_CTL_2, hctl2);
+ intel_de_write(dev_priv, TV_H_CTL_3, hctl3);
+ intel_de_write(dev_priv, TV_V_CTL_1, vctl1);
+ intel_de_write(dev_priv, TV_V_CTL_2, vctl2);
+ intel_de_write(dev_priv, TV_V_CTL_3, vctl3);
+ intel_de_write(dev_priv, TV_V_CTL_4, vctl4);
+ intel_de_write(dev_priv, TV_V_CTL_5, vctl5);
+ intel_de_write(dev_priv, TV_V_CTL_6, vctl6);
+ intel_de_write(dev_priv, TV_V_CTL_7, vctl7);
}
static void set_color_conversion(struct drm_i915_private *dev_priv,
@@ -1398,18 +1400,18 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
if (!color_conversion)
return;
- I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
- color_conversion->gy);
- I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
- color_conversion->ay);
- I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
- color_conversion->gu);
- I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
- color_conversion->au);
- I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
- color_conversion->gv);
- I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
- color_conversion->av);
+ intel_de_write(dev_priv, TV_CSC_Y,
+ (color_conversion->ry << 16) | color_conversion->gy);
+ intel_de_write(dev_priv, TV_CSC_Y2,
+ (color_conversion->by << 16) | color_conversion->ay);
+ intel_de_write(dev_priv, TV_CSC_U,
+ (color_conversion->ru << 16) | color_conversion->gu);
+ intel_de_write(dev_priv, TV_CSC_U2,
+ (color_conversion->bu << 16) | color_conversion->au);
+ intel_de_write(dev_priv, TV_CSC_V,
+ (color_conversion->rv << 16) | color_conversion->gv);
+ intel_de_write(dev_priv, TV_CSC_V2,
+ (color_conversion->bv << 16) | color_conversion->av);
}
static void intel_tv_pre_enable(struct intel_encoder *encoder,
@@ -1434,7 +1436,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
- tv_ctl = I915_READ(TV_CTL);
+ tv_ctl = intel_de_read(dev_priv, TV_CTL);
tv_ctl &= TV_CTL_SAVE;
switch (intel_tv->type) {
@@ -1511,21 +1513,20 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
- I915_WRITE(TV_SC_CTL_1, scctl1);
- I915_WRITE(TV_SC_CTL_2, scctl2);
- I915_WRITE(TV_SC_CTL_3, scctl3);
+ intel_de_write(dev_priv, TV_SC_CTL_1, scctl1);
+ intel_de_write(dev_priv, TV_SC_CTL_2, scctl2);
+ intel_de_write(dev_priv, TV_SC_CTL_3, scctl3);
set_color_conversion(dev_priv, color_conversion);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000);
else
- I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+ intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000);
if (video_levels)
- I915_WRITE(TV_CLR_LEVEL,
- ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
- (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+ intel_de_write(dev_priv, TV_CLR_LEVEL,
+ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
@@ -1533,7 +1534,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
tv_filter_ctl = TV_AUTO_SCALE;
if (tv_conn_state->bypass_vfilter)
tv_filter_ctl |= TV_V_FILTER_BYPASS;
- I915_WRITE(TV_FILTER_CTL_1, tv_filter_ctl);
+ intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl);
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
ysize = intel_tv_mode_vdisplay(tv_mode);
@@ -1544,20 +1545,25 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
conn_state->tv.margins.right);
ysize -= (tv_conn_state->margins.top +
tv_conn_state->margins.bottom);
- I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
- I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+ intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos);
+ intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize);
j = 0;
for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_H_LUMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_H_CHROMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_V_LUMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]);
- I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
- I915_WRITE(TV_CTL, tv_ctl);
+ intel_de_write(dev_priv, TV_V_CHROMA(i),
+ tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_DAC,
+ intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE);
+ intel_de_write(dev_priv, TV_CTL, tv_ctl);
}
static int
@@ -1581,8 +1587,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
spin_unlock_irq(&dev_priv->irq_lock);
}
- save_tv_dac = tv_dac = I915_READ(TV_DAC);
- save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+ save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC);
+ save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL);
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
@@ -1608,15 +1614,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
- I915_WRITE(TV_CTL, tv_ctl);
- I915_WRITE(TV_DAC, tv_dac);
- POSTING_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_CTL, tv_ctl);
+ intel_de_write(dev_priv, TV_DAC, tv_dac);
+ intel_de_posting_read(dev_priv, TV_DAC);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
type = -1;
- tv_dac = I915_READ(TV_DAC);
- DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ tv_dac = intel_de_read(dev_priv, TV_DAC);
+ drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
/*
* A B C
* 0 1 1 Composite
@@ -1624,22 +1630,25 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n");
type = -1;
}
- I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- I915_WRITE(TV_CTL, save_tv_ctl);
- POSTING_READ(TV_CTL);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ intel_de_write(dev_priv, TV_CTL, save_tv_ctl);
+ intel_de_posting_read(dev_priv, TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -1794,7 +1803,7 @@ intel_tv_get_modes(struct drm_connector *connector)
*/
intel_tv_mode_to_mode(mode, tv_mode);
if (count == 0) {
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(mode);
}
intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
@@ -1870,11 +1879,11 @@ intel_tv_init(struct drm_i915_private *dev_priv)
int i, initial_mode = 0;
struct drm_connector_state *state;
- if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
if (!intel_bios_is_tv_present(dev_priv)) {
- DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n");
return;
}
@@ -1882,15 +1891,15 @@ intel_tv_init(struct drm_i915_private *dev_priv)
* Sanity check the TV output by checking to see if the
* DAC register holds a value
*/
- save_tv_dac = I915_READ(TV_DAC);
+ save_tv_dac = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
- tv_dac_on = I915_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- tv_dac_off = I915_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac);
/*
* If the register does not hold the state change enable
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 4d0c23b29248..05c7cbe32eb4 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -111,7 +111,7 @@ enum bdb_block_id {
BDB_LVDS_LFP_DATA_PTRS = 41,
BDB_LVDS_LFP_DATA = 42,
BDB_LVDS_BACKLIGHT = 43,
- BDB_LVDS_POWER = 44,
+ BDB_LFP_POWER = 44,
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
BDB_COMPRESSION_PARAMETERS = 56,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 9e6aaa302e40..95ad87d4ccb3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -6,8 +6,6 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -374,7 +372,7 @@ static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
return false;
/* There's no pipe A DSC engine on ICL */
- WARN_ON(crtc->pipe == PIPE_A);
+ drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A);
return true;
}
@@ -518,119 +516,149 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
pps_val |= DSC_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
- DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_0,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_1 registers */
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
- DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_1,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_2 registers */
pps_val = 0;
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
- DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_2,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_3 registers */
pps_val = 0;
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
- DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_3,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_4 registers */
pps_val = 0;
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
- DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_4,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_5 registers */
pps_val = 0;
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
- DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_5,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_6 registers */
@@ -639,80 +667,100 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
- DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_6,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_7 registers */
pps_val = 0;
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
- DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_7,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_8 registers */
pps_val = 0;
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
- DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_8,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_9 registers */
pps_val = 0;
pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
- DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_9,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_10 registers */
@@ -721,20 +769,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
- DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ intel_de_write(dev_priv,
+ DSCC_PICTURE_PARAMETER_SET_10, pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
}
/* Populate Picture parameter set 16 */
@@ -744,20 +797,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
vdsc_cfg->slice_width) |
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
- DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ intel_de_write(dev_priv,
+ DSCC_PICTURE_PARAMETER_SET_16, pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
}
/* Populate the RC_BUF_THRESH registers */
@@ -766,42 +824,50 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
rc_buf_thresh_dword[i / 4] |=
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
- DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+ drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
- I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
- I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
- I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(DSCC_RC_BUF_THRESH_0,
- rc_buf_thresh_dword[0]);
- I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
- rc_buf_thresh_dword[1]);
- I915_WRITE(DSCC_RC_BUF_THRESH_1,
- rc_buf_thresh_dword[2]);
- I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
}
} else {
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
- rc_buf_thresh_dword[0]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
- rc_buf_thresh_dword[1]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
- rc_buf_thresh_dword[2]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
- rc_buf_thresh_dword[0]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
- rc_buf_thresh_dword[1]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
- rc_buf_thresh_dword[2]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
}
}
@@ -815,78 +881,94 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
RC_MAX_QP_SHIFT) |
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
- DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
- rc_range_params_dword[0]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
- rc_range_params_dword[1]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
- rc_range_params_dword[2]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
- rc_range_params_dword[3]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
- rc_range_params_dword[4]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
- rc_range_params_dword[5]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
- rc_range_params_dword[6]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
- rc_range_params_dword[0]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
- rc_range_params_dword[1]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
- rc_range_params_dword[2]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
- rc_range_params_dword[3]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
- rc_range_params_dword[4]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
- rc_range_params_dword[5]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
- rc_range_params_dword[6]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
}
} else {
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
- rc_range_params_dword[0]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
- rc_range_params_dword[1]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
- rc_range_params_dword[2]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
- rc_range_params_dword[3]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
- rc_range_params_dword[4]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
- rc_range_params_dword[5]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
- rc_range_params_dword[6]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
- rc_range_params_dword[0]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
- rc_range_params_dword[1]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
- rc_range_params_dword[2]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
- rc_range_params_dword[3]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
- rc_range_params_dword[4]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
- rc_range_params_dword[5]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
- rc_range_params_dword[6]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
}
}
}
@@ -912,11 +994,11 @@ void intel_dsc_get_config(struct intel_encoder *encoder,
return;
if (!is_pipe_dsc(crtc_state)) {
- dss_ctl1 = I915_READ(DSS_CTL1);
- dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
} else {
- dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe));
- dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe));
+ dss_ctl1 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+ dss_ctl2 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL2(pipe));
}
crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
@@ -930,9 +1012,10 @@ void intel_dsc_get_config(struct intel_encoder *encoder,
/* PPS1 */
if (!is_pipe_dsc(crtc_state))
- val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1);
+ val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
else
- val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+ val = intel_de_read(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
vdsc_cfg->bits_per_pixel = val;
crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
out:
@@ -1013,8 +1096,8 @@ void intel_dsc_enable(struct intel_encoder *encoder,
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
- I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
- I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val);
}
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
@@ -1035,17 +1118,17 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
}
- dss_ctl1_val = I915_READ(dss_ctl1_reg);
+ dss_ctl1_val = intel_de_read(dev_priv, dss_ctl1_reg);
if (dss_ctl1_val & JOINER_ENABLE)
dss_ctl1_val &= ~JOINER_ENABLE;
- I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val);
- dss_ctl2_val = I915_READ(dss_ctl2_reg);
+ dss_ctl2_val = intel_de_read(dev_priv, dss_ctl2_reg);
if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
RIGHT_BRANCH_VDSC_ENABLE);
- I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val);
/* Disable Power wells for VDSC/joining */
intel_display_power_put_unchecked(dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 2ff7293986d4..be333699c515 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -9,6 +9,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_vga.h"
static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
@@ -36,16 +37,17 @@ void intel_vga_disable(struct drm_i915_private *dev_priv)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
+ intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE);
+ intel_de_posting_read(dev_priv, vga_reg);
}
void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
{
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Something enabled VGA plane, disabling it\n");
intel_vga_disable(dev_priv);
}
}
@@ -98,7 +100,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
u16 gmch_ctrl;
if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
- DRM_ERROR("failed to read control word\n");
+ drm_err(&i915->drm, "failed to read control word\n");
return -EIO;
}
@@ -111,7 +113,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
- DRM_ERROR("failed to write control word\n");
+ drm_err(&i915->drm, "failed to write control word\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index daf4fc3dab6f..f4c362dc6e15 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -85,7 +85,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
mask, 100))
- DRM_ERROR("DPI FIFOs are not empty\n");
+ drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n");
}
static void write_data(struct drm_i915_private *dev_priv,
@@ -100,7 +100,7 @@ static void write_data(struct drm_i915_private *dev_priv,
for (j = 0; j < min_t(u32, len - i, 4); j++)
val |= *data++ << 8 * j;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -111,7 +111,7 @@ static void read_data(struct drm_i915_private *dev_priv,
u32 i, j;
for (i = 0; i < len; i += 4) {
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
for (j = 0; j < min_t(u32, len - i, 4); j++)
*data++ = val >> 8 * j;
@@ -154,29 +154,34 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
if (packet.payload_length) {
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
data_mask, 50))
- DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
packet.payload_length);
}
if (msg->rx_len) {
- I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port),
+ GEN_READ_DATA_AVAIL);
}
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 50)) {
- DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for HS/LP CTRL FIFO !full\n");
}
- I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
+ intel_de_write(dev_priv, ctrl_reg,
+ header[2] << 16 | header[1] << 8 | header[0]);
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
data_mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
}
@@ -223,17 +228,19 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
cmd |= DPI_LP_MODE;
/* clear bit */
- I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
- if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
- DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd);
+ if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port)))
+ drm_dbg_kms(&dev_priv->drm,
+ "Same special packet %02x twice in a row.\n", cmd);
- I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
+ intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
- DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+ drm_err(&dev_priv->drm,
+ "Video mode command 0x%08x send failed.\n", cmd);
return 0;
}
@@ -265,7 +272,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (fixed_mode) {
@@ -328,36 +335,37 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
- I915_WRITE(MIPI_CTRL(port), tmp | GLK_MIPIIO_ENABLE);
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ tmp | GLK_MIPIIO_ENABLE);
}
/* Put the IO into reset */
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
tmp &= ~GLK_LP_WAKE;
else
tmp |= GLK_LP_WAKE;
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 20))
- DRM_ERROR("MIPIO port is powergated\n");
+ drm_err(&dev_priv->drm, "MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
cold_boot |=
- !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
+ !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY);
}
return cold_boot;
@@ -374,48 +382,49 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not ON\n");
+ drm_err(&dev_priv->drm, "PHY is not ON\n");
}
/* Get IO out of reset */
- val = I915_READ(MIPI_CTRL(PORT_A));
- I915_WRITE(MIPI_CTRL(PORT_A), val | GLK_MIPIIO_RESET_RELEASED);
+ val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ val | GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= DEVICE_READY;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_ULPS_NOT_ACTIVE, 20))
- DRM_ERROR("ULPS not active\n");
+ drm_err(&dev_priv->drm, "ULPS not active\n");
/* Exit ULPS */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
/* Enter Normal Mode */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
- val = I915_READ(MIPI_CTRL(port));
+ val = intel_de_read(dev_priv, MIPI_CTRL(port));
val &= ~GLK_LP_WAKE;
- I915_WRITE(MIPI_CTRL(port), val);
+ intel_de_write(dev_priv, MIPI_CTRL(port), val);
}
}
@@ -423,14 +432,16 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_DATA_LANE_STOP_STATE, 20))
- DRM_ERROR("Date lane not in STOP state\n");
+ drm_err(&dev_priv->drm,
+ "Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT, 20))
- DRM_ERROR("D-PHY not entering LP-11 state\n");
+ drm_err(&dev_priv->drm,
+ "D-PHY not entering LP-11 state\n");
}
}
@@ -441,23 +452,24 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(BXT_MIPI_PORT_CTRL(port));
- I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+ intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ val | LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
/* Clear ULPS and set device ready */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
usleep_range(2000, 2500);
val |= DEVICE_READY;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
}
}
@@ -468,7 +480,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_flisdsi_get(dev_priv);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
@@ -481,21 +493,25 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_ENTER);
usleep_range(2500, 3000);
/* Enable MIPI PHY transparent latch
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- val = I915_READ(MIPI_PORT_CTRL(PORT_A));
- I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
+ intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
+ val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_EXIT);
usleep_range(2500, 3000);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY);
usleep_range(2500, 3000);
}
}
@@ -521,24 +537,25 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Enter ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
}
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not turning OFF\n");
+ drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 20))
- DRM_ERROR("MIPI IO Port is not powergated\n");
+ drm_err(&dev_priv->drm,
+ "MIPI IO Port is not powergated\n");
}
}
@@ -550,22 +567,22 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
u32 tmp;
/* Put the IO into reset */
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not turning OFF\n");
+ drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
}
/* Clear MIPI mode */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~GLK_MIPIIO_ENABLE;
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
}
@@ -581,23 +598,23 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_EXIT);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
/*
@@ -607,14 +624,14 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
intel_de_wait_for_clear(dev_priv, port_ctrl,
AFE_LATCHOUT, 30))
- DRM_ERROR("DSI LP not going Low\n");
+ drm_err(&dev_priv->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- val = I915_READ(port_ctrl);
- I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
}
@@ -631,18 +648,20 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
u32 temp;
if (IS_GEN9_LP(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports) {
- temp = I915_READ(MIPI_CTRL(port));
+ temp = intel_de_read(dev_priv,
+ MIPI_CTRL(port));
temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
BXT_PIXEL_OVERLAP_CNT_SHIFT;
- I915_WRITE(MIPI_CTRL(port), temp);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ temp);
}
} else {
- temp = I915_READ(VLV_CHICKEN_3);
+ temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
PIXEL_OVERLAP_CNT_SHIFT;
- I915_WRITE(VLV_CHICKEN_3, temp);
+ intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
}
}
@@ -651,7 +670,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
- temp = I915_READ(port_ctrl);
+ temp = intel_de_read(dev_priv, port_ctrl);
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
@@ -671,8 +690,8 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
temp |= DITHERING_ENABLE;
/* assert ip_tg_enable signal */
- I915_WRITE(port_ctrl, temp | DPI_ENABLE);
- POSTING_READ(port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE);
+ intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -689,9 +708,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
u32 temp;
/* de-assert ip_tg_enable signal */
- temp = I915_READ(port_ctrl);
- I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
- POSTING_READ(port_ctrl);
+ temp = intel_de_read(dev_priv, port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+ intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -753,7 +772,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
u32 val;
bool glk_cold_boot = false;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -771,22 +790,22 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
- val | MIPIO_RST_CTRL);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+ val | MIPIO_RST_CTRL);
/* Power up DSI regulator */
- I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
/* Disable DPOunit clock gating, can stall pipe */
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val |= DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -820,7 +839,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* recommendation, port should be enabled befor plane & pipe */
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ intel_de_write(dev_priv,
+ MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
} else {
@@ -838,6 +858,15 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
+static void bxt_dsi_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ WARN_ON(crtc_state->has_pch_encoder);
+
+ intel_crtc_vblank_on(crtc_state);
+}
+
/*
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
@@ -886,7 +915,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (IS_GEN9_LP(dev_priv)) {
intel_crtc_vblank_off(old_crtc_state);
@@ -917,13 +946,14 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv)) {
/* Power down DSI regulator to save power */
- I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL,
+ HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
- val & ~MIPIO_RST_CTRL);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+ val & ~MIPIO_RST_CTRL);
}
if (IS_GEN9_LP(dev_priv)) {
@@ -933,9 +963,9 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
vlv_dsi_pll_disable(encoder);
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
/* Assert reset */
@@ -960,7 +990,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum port port;
bool active = false;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
@@ -979,7 +1009,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
+ bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -988,26 +1018,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
- u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ u32 tmp = intel_de_read(dev_priv,
+ MIPI_DSI_FUNC_PRG(port));
enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
}
if (!enabled)
continue;
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
continue;
if (IS_GEN9_LP(dev_priv)) {
- u32 tmp = I915_READ(MIPI_CTRL(port));
+ u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
- if (WARN_ON(tmp > PIPE_C))
+ if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C))
continue;
*pipe = tmp;
@@ -1051,11 +1082,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
* encoder->get_hw_state() returns true.
*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
break;
}
- fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
@@ -1067,21 +1098,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
- I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_HACTIVE(port));
adjusted_mode->crtc_vdisplay =
- I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
- I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_VTOTAL(port));
hactive = adjusted_mode->crtc_hdisplay;
- hfp = I915_READ(MIPI_HFP_COUNT(port));
+ hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port));
/*
* Meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes
*/
- hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
- hbp = I915_READ(MIPI_HBP_COUNT(port));
+ hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port));
+ hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port));
/* harizontal values are in terms of high speed byte clock */
hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
@@ -1098,9 +1132,9 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
}
/* vertical values are in terms of lines */
- vfp = I915_READ(MIPI_VFP_COUNT(port));
- vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
- vbp = I915_READ(MIPI_VBP_COUNT(port));
+ vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
+ vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
+ vbp = intel_de_read(dev_priv, MIPI_VBP_COUNT(port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
@@ -1191,7 +1225,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
@@ -1268,26 +1302,29 @@ static void set_dsi_timings(struct drm_encoder *encoder,
* vactive, as they are calculated per channel basis,
* whereas these values should be based on resolution.
*/
- I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
- adjusted_mode->crtc_hdisplay);
- I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
- adjusted_mode->crtc_vdisplay);
- I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
- adjusted_mode->crtc_vtotal);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port),
+ adjusted_mode->crtc_hdisplay);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port),
+ adjusted_mode->crtc_vdisplay);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port),
+ adjusted_mode->crtc_vtotal);
}
- I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
- I915_WRITE(MIPI_HFP_COUNT(port), hfp);
+ intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port),
+ hactive);
+ intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp);
/* meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes */
- I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
- I915_WRITE(MIPI_HBP_COUNT(port), hbp);
+ intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port),
+ hsync);
+ intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp);
/* vertical values are in terms of lines */
- I915_WRITE(MIPI_VFP_COUNT(port), vfp);
- I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
- I915_WRITE(MIPI_VBP_COUNT(port), vbp);
+ intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp);
+ intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port),
+ vsync);
+ intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp);
}
}
@@ -1322,7 +1359,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
u32 val, tmp;
u16 mode_hdisplay;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(intel_crtc->pipe));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
@@ -1338,35 +1375,35 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
*/
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp |
- ESCAPE_CLOCK_DIVIDER_1);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
- I915_WRITE(MIPI_CTRL(port), tmp |
- READ_REQUEST_PRIORITY_HIGH);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ tmp | READ_REQUEST_PRIORITY_HIGH);
} else if (IS_GEN9_LP(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK;
tmp |= BXT_PIPE_SELECT(pipe);
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
/* XXX: why here, why like this? handling in irq handler?! */
- I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
- I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff);
+ intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff);
- I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, MIPI_DPHY_PARAM(port),
+ intel_dsi->dphy_reg);
- I915_WRITE(MIPI_DPI_RESOLUTION(port),
- adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT |
- mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port),
+ adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
set_dsi_timings(encoder, adjusted_mode);
@@ -1393,7 +1430,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
@@ -1414,28 +1451,24 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
- txbyteclkhs(adjusted_mode->crtc_htotal, bpp,
- intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
+ intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
} else {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
- txbyteclkhs(adjusted_mode->crtc_vtotal *
- adjusted_mode->crtc_htotal,
- bpp, intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
+ intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
}
- I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
- I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
- intel_dsi->turn_arnd_val);
- I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
- intel_dsi->rst_timer_val);
+ intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port),
+ intel_dsi->lp_rx_timeout);
+ intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_dsi->turn_arnd_val);
+ intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port),
+ intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(port),
- txclkesc(intel_dsi->escape_clk_div, 100));
+ intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ txclkesc(intel_dsi->escape_clk_div, 100));
if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
/*
@@ -1444,24 +1477,25 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* getting used. So write the other port
* if not in dual link mode.
*/
- I915_WRITE(MIPI_INIT_COUNT(port ==
- PORT_A ? PORT_C : PORT_A),
- intel_dsi->init_count);
+ intel_de_write(dev_priv,
+ MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A),
+ intel_dsi->init_count);
}
/* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp);
/* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
+ intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
- I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
- intel_dsi->hs_to_lp_count);
+ intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock.
* the number of byte clocks occupied in one low power clock.
@@ -1469,14 +1503,15 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
- I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+ intel_de_write(dev_priv, MIPI_LP_BYTECLK(port),
+ intel_dsi->lp_byte_clk);
if (IS_GEMINILAKE(dev_priv)) {
- I915_WRITE(MIPI_TLPX_TIME_COUNT(port),
- intel_dsi->lp_byte_clk);
+ intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port),
+ intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
- I915_WRITE(MIPI_CLK_LANE_TIMING(port),
- intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port),
+ intel_dsi->dphy_reg);
}
/* the bw essential for transmitting 16 long packets containing
@@ -1484,21 +1519,18 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
- I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
+ intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port),
+ intel_dsi->bw_timer);
- I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
- intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
- intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+ intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
/* Some panels might have resolution which is not a
* multiple of 64 like 1366 x 768. Enable RANDOM
* resolution support for such panels by default */
- I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
- intel_dsi->video_frmt_cfg_bits |
- intel_dsi->video_mode_format |
- IP_TG_CONFIG |
- RANDOM_DPI_DISPLAY_RESOLUTION);
+ intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port),
+ intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format | IP_TG_CONFIG | RANDOM_DPI_DISPLAY_RESOLUTION);
}
}
@@ -1514,19 +1546,19 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
if (IS_GEN9_LP(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
- val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
val &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
}
}
@@ -1559,59 +1591,6 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static enum drm_panel_orientation
-vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_encoder *encoder = connector->encoder;
- enum intel_display_power_domain power_domain;
- enum drm_panel_orientation orientation;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- intel_wakeref_t wakeref;
- enum pipe pipe;
- u32 val;
-
- if (!encoder->get_hw_state(encoder, &pipe))
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- plane = to_intel_plane(crtc->base.primary);
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- val = I915_READ(DSPCNTR(plane->i9xx_plane));
-
- if (!(val & DISPLAY_PLANE_ENABLE))
- orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- else if (val & DISPPLANE_ROTATE_180)
- orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
- else
- orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return orientation;
-}
-
-static enum drm_panel_orientation
-vlv_dsi_get_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum drm_panel_orientation orientation;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- orientation = vlv_dsi_get_hw_panel_orientation(connector);
- if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- return orientation;
- }
-
- return intel_dsi_get_panel_orientation(connector);
-}
-
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1628,10 +1607,9 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- connector->base.display_info.panel_orientation =
- vlv_dsi_get_panel_orientation(connector);
- drm_connector_init_panel_orientation_property(
+ drm_connector_set_panel_orientation_with_quirk(
&connector->base,
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
@@ -1703,7 +1681,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
+ drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ prepare_cnt);
prepare_cnt = PREPARE_CNT_MAX;
}
@@ -1723,7 +1702,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
exit_zero_cnt += 1;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ exit_zero_cnt);
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
}
@@ -1733,7 +1713,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* ui_den, ui_num * mul);
if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ clk_zero_cnt);
clk_zero_cnt = CLK_ZERO_CNT_MAX;
}
@@ -1742,7 +1723,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (trail_cnt > TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
+ drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
}
@@ -1817,7 +1799,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
enum port port;
enum pipe pipe;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -1849,6 +1831,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
+ if (IS_GEN9_LP(dev_priv))
+ intel_encoder->enable = bxt_dsi_enable;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1894,18 +1878,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- DRM_DEBUG_KMS("no device found\n");
+ drm_dbg_kms(&dev_priv->drm, "no device found\n");
goto err;
}
/* Use clock read-back from current hw-state for fastboot */
current_mode = intel_encoder_current_mode(intel_encoder);
if (current_mode) {
- DRM_DEBUG_KMS("Calculated pclk %d GOP %d\n",
- intel_dsi->pclk, current_mode->clock);
+ drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ intel_dsi->pclk, current_mode->clock);
if (intel_fuzzy_clock_check(intel_dsi->pclk,
current_mode->clock)) {
- DRM_DEBUG_KMS("Using GOP pclk\n");
+ drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
intel_dsi->pclk = current_mode->clock;
}
@@ -1933,7 +1917,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
+ drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 6b89e67b120f..d0a514301575 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -64,7 +64,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
- DRM_ERROR("DSI CLK Out of Range\n");
+ drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
return -ECHRNG;
}
@@ -126,7 +126,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
if (ret) {
- DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+ drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
return ret;
}
@@ -138,8 +138,8 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
- DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
- config->dsi_pll.div, config->dsi_pll.ctrl);
+ drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ config->dsi_pll.div, config->dsi_pll.ctrl);
return 0;
}
@@ -149,7 +149,7 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
@@ -169,12 +169,12 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DSI_PLL_LOCK, 20)) {
vlv_cck_put(dev_priv);
- DRM_ERROR("DSI PLL lock failed\n");
+ drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
return;
}
vlv_cck_put(dev_priv);
- DRM_DEBUG_KMS("DSI PLL locked\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
}
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
@@ -182,7 +182,7 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
@@ -201,7 +201,7 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
u32 mask;
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
enabled = (val & mask) == mask;
if (!enabled)
@@ -215,15 +215,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
- val = I915_READ(BXT_DSI_PLL_CTL);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
if (IS_GEMINILAKE(dev_priv)) {
if (!(val & BXT_DSIA_16X_MASK)) {
- DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
@@ -236,11 +238,11 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
val &= ~BXT_DSI_PLL_DO_ENABLE;
- I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
/*
* PLL lock should deassert within 200us.
@@ -248,7 +250,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
*/
if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
- DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for PLL lock deassertion\n");
}
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
@@ -263,7 +266,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
int i;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
@@ -292,7 +295,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
p--;
if (!p) {
- DRM_ERROR("wrong P1 divisor\n");
+ drm_err(&dev_priv->drm, "wrong P1 divisor\n");
return 0;
}
@@ -302,7 +305,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
}
if (i == ARRAY_SIZE(lfsr_converts)) {
- DRM_ERROR("wrong m_seed programmed\n");
+ drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
return 0;
}
@@ -325,7 +328,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
+ config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
@@ -333,7 +336,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
- DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
+ drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
return pclk;
}
@@ -343,11 +346,10 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- temp = I915_READ(MIPI_CTRL(port));
+ temp = intel_de_read(dev_priv, MIPI_CTRL(port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(port), temp |
- intel_dsi->escape_clk_div <<
- ESCAPE_CLOCK_DIVIDER_SHIFT);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
static void glk_dsi_program_esc_clock(struct drm_device *dev,
@@ -393,8 +395,10 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
else
txesc2_div = 10;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
@@ -412,7 +416,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
u32 mipi_8by3_divider;
/* Clear old configurations */
- tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -448,7 +452,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
- I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
}
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
@@ -478,10 +482,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
- DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
+ drm_err(&dev_priv->drm,
+ "Cant get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
- DRM_DEBUG_KMS("DSI PLL calculation is Done!!\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -507,11 +512,11 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* Configure PLL vales */
- I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
- POSTING_READ(BXT_DSI_PLL_CTL);
+ intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
if (IS_BROXTON(dev_priv)) {
@@ -522,18 +527,19 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
}
/* Enable DSI PLL */
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
val |= BXT_DSI_PLL_DO_ENABLE;
- I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
- DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSI PLL to lock\n");
return;
}
- DRM_DEBUG_KMS("DSI PLL locked\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
}
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@@ -544,20 +550,20 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
/* Clear old configurations */
if (IS_BROXTON(dev_priv)) {
- tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
- I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- tmp = I915_READ(MIPIO_TXESC_CLK_DIV1);
+ tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
- tmp = I915_READ(MIPIO_TXESC_CLK_DIV2);
+ tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
}
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 81366aa4812b..0598e5382a1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -217,7 +217,7 @@ static void clear_pages_worker(struct work_struct *work)
0);
out_request:
if (unlikely(err)) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 151a1e8ae36a..68326ad3b2e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -67,14 +67,11 @@
#include <linux/log2.h>
#include <linux/nospec.h>
-#include <drm/i915_drm.h>
-
#include "gt/gen6_ppgtt.h"
#include "gt/intel_context.h"
+#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
-#include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h"
#include "i915_gem_context.h"
@@ -245,7 +242,6 @@ static void __free_engines(struct i915_gem_engines *e, unsigned int count)
if (!e->engines[count])
continue;
- RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
intel_context_put(e->engines[count]);
}
kfree(e);
@@ -258,7 +254,51 @@ static void free_engines(struct i915_gem_engines *e)
static void free_engines_rcu(struct rcu_head *rcu)
{
- free_engines(container_of(rcu, struct i915_gem_engines, rcu));
+ struct i915_gem_engines *engines =
+ container_of(rcu, struct i915_gem_engines, rcu);
+
+ i915_sw_fence_fini(&engines->fence);
+ free_engines(engines);
+}
+
+static int __i915_sw_fence_call
+engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct i915_gem_engines *engines =
+ container_of(fence, typeof(*engines), fence);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ if (!list_empty(&engines->link)) {
+ struct i915_gem_context *ctx = engines->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->stale.lock, flags);
+ list_del(&engines->link);
+ spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ }
+ i915_gem_context_put(engines->ctx);
+ break;
+
+ case FENCE_FREE:
+ init_rcu_head(&engines->rcu);
+ call_rcu(&engines->rcu, free_engines_rcu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct i915_gem_engines *alloc_engines(unsigned int count)
+{
+ struct i915_gem_engines *e;
+
+ e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ i915_sw_fence_init(&e->fence, engines_notify);
+ return e;
}
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
@@ -268,11 +308,10 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
struct i915_gem_engines *e;
enum intel_engine_id id;
- e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+ e = alloc_engines(I915_NUM_ENGINES);
if (!e)
return ERR_PTR(-ENOMEM);
- init_rcu_head(&e->rcu);
for_each_engine(engine, gt, id) {
struct intel_context *ce;
@@ -306,7 +345,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
- free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
@@ -421,7 +459,7 @@ static struct intel_engine_cs *__active_engine(struct i915_request *rq)
}
engine = NULL;
- if (i915_request_is_active(rq) && !rq->fence.error)
+ if (i915_request_is_active(rq) && rq->fence.error != -EIO)
engine = rq->engine;
spin_unlock_irq(&locked->active.lock);
@@ -452,7 +490,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
return engine;
}
-static void kill_context(struct i915_gem_context *ctx)
+static void kill_engines(struct i915_gem_engines *engines)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -464,7 +502,7 @@ static void kill_context(struct i915_gem_context *ctx)
* However, we only care about pending requests, so only include
* engines on which there are incomplete requests.
*/
- for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+ for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
if (intel_context_set_banned(ce))
@@ -486,8 +524,82 @@ static void kill_context(struct i915_gem_context *ctx)
* the context from the GPU, we have to resort to a full
* reset. We hope the collateral damage is worth it.
*/
- __reset_context(ctx, engine);
+ __reset_context(engines->ctx, engine);
+ }
+}
+
+static void kill_stale_engines(struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *pos, *next;
+
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
+ if (!i915_sw_fence_await(&pos->fence)) {
+ list_del_init(&pos->link);
+ continue;
+ }
+
+ spin_unlock_irq(&ctx->stale.lock);
+
+ kill_engines(pos);
+
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
+ list_safe_reset_next(pos, next, link);
+ list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
+
+ i915_sw_fence_complete(&pos->fence);
}
+ spin_unlock_irq(&ctx->stale.lock);
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+ kill_stale_engines(ctx);
+}
+
+static void engines_idle_release(struct i915_gem_context *ctx,
+ struct i915_gem_engines *engines)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ INIT_LIST_HEAD(&engines->link);
+
+ engines->ctx = i915_gem_context_get(ctx);
+
+ for_each_gem_engine(ce, engines, it) {
+ struct dma_fence *fence;
+ int err = 0;
+
+ /* serialises with execbuf */
+ set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+ if (!intel_context_pin_if_active(ce))
+ continue;
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ err = i915_sw_fence_await_dma_fence(&engines->fence,
+ fence, 0,
+ GFP_KERNEL);
+ dma_fence_put(fence);
+ }
+ intel_context_unpin(ce);
+ if (err < 0)
+ goto kill;
+ }
+
+ spin_lock_irq(&ctx->stale.lock);
+ if (!i915_gem_context_is_closed(ctx))
+ list_add_tail(&engines->link, &ctx->stale.engines);
+ spin_unlock_irq(&ctx->stale.lock);
+
+kill:
+ if (list_empty(&engines->link)) /* raced, already closed */
+ kill_engines(engines);
+
+ i915_sw_fence_commit(&engines->fence);
}
static void set_closed_name(struct i915_gem_context *ctx)
@@ -511,11 +623,16 @@ static void context_close(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
+ /* Flush any concurrent set_engines() */
+ mutex_lock(&ctx->engines_mutex);
+ engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
i915_gem_context_set_closed(ctx);
- set_closed_name(ctx);
+ mutex_unlock(&ctx->engines_mutex);
mutex_lock(&ctx->mutex);
+ set_closed_name(ctx);
+
vm = i915_gem_context_vm(ctx);
if (vm)
i915_vm_close(vm);
@@ -604,6 +721,9 @@ __create_context(struct drm_i915_private *i915)
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
+ spin_lock_init(&ctx->stale.lock);
+ INIT_LIST_HEAD(&ctx->stale.engines);
+
mutex_init(&ctx->engines_mutex);
e = default_engines(ctx);
if (IS_ERR(e)) {
@@ -637,23 +757,30 @@ err_free:
return ERR_PTR(err);
}
-static void
+static int
context_apply_all(struct i915_gem_context *ctx,
- void (*fn)(struct intel_context *ce, void *data),
+ int (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ int err = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
- fn(ce, data);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ err = fn(ce, data);
+ if (err)
+ break;
+ }
i915_gem_context_unlock_engines(ctx);
+
+ return err;
}
-static void __apply_ppgtt(struct intel_context *ce, void *vm)
+static int __apply_ppgtt(struct intel_context *ce, void *vm)
{
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
+ return 0;
}
static struct i915_address_space *
@@ -691,9 +818,10 @@ static void __set_timeline(struct intel_timeline **dst,
intel_timeline_put(old);
}
-static void __apply_timeline(struct intel_context *ce, void *timeline)
+static int __apply_timeline(struct intel_context *ce, void *timeline)
{
__set_timeline(&ce->timeline, timeline);
+ return 0;
}
static void __assign_timeline(struct i915_gem_context *ctx,
@@ -724,8 +852,8 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt)) {
- DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
- PTR_ERR(ppgtt));
+ drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
+ PTR_ERR(ppgtt));
context_close(ctx);
return ERR_CAST(ppgtt);
}
@@ -767,20 +895,15 @@ static void init_contexts(struct i915_gem_contexts *gc)
void i915_gem_init__contexts(struct drm_i915_private *i915)
{
init_contexts(&i915->gem.contexts);
- DRM_DEBUG_DRIVER("%s context support initialized\n",
- DRIVER_CAPS(i915)->has_logical_contexts ?
- "logical" : "fake");
+ drm_dbg(&i915->drm, "%s context support initialized\n",
+ DRIVER_CAPS(i915)->has_logical_contexts ?
+ "logical" : "fake");
}
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
flush_work(&i915->gem.contexts.free_work);
-}
-
-static int vm_idr_cleanup(int id, void *p, void *data)
-{
- i915_vm_put(p);
- return 0;
+ rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -820,8 +943,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
- mutex_init(&file_priv->vm_idr_lock);
- idr_init_base(&file_priv->vm_idr, 1);
+ /* 0 reserved for invalid/unassigned ppgtt */
+ xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx)) {
@@ -839,9 +962,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
err_ctx:
context_close(ctx);
err:
- idr_destroy(&file_priv->vm_idr);
+ xa_destroy(&file_priv->vm_xa);
xa_destroy(&file_priv->context_xa);
- mutex_destroy(&file_priv->vm_idr_lock);
return err;
}
@@ -849,6 +971,7 @@ void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx;
@@ -856,9 +979,9 @@ void i915_gem_context_close(struct drm_file *file)
context_close(ctx);
xa_destroy(&file_priv->context_xa);
- idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
- idr_destroy(&file_priv->vm_idr);
- mutex_destroy(&file_priv->vm_idr_lock);
+ xa_for_each(&file_priv->vm_xa, idx, vm)
+ i915_vm_put(vm);
+ xa_destroy(&file_priv->vm_xa);
contexts_flush_free(&i915->gem.contexts);
}
@@ -870,6 +993,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_vm_control *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_ppgtt *ppgtt;
+ u32 id;
int err;
if (!HAS_FULL_PPGTT(i915))
@@ -892,23 +1016,15 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
goto err_put;
}
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
+ xa_limit_32b, GFP_KERNEL);
if (err)
goto err_put;
- err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
- if (err < 0)
- goto err_unlock;
-
- GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
-
- mutex_unlock(&file_priv->vm_idr_lock);
-
- args->vm_id = err;
+ GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+ args->vm_id = id;
return 0;
-err_unlock:
- mutex_unlock(&file_priv->vm_idr_lock);
err_put:
i915_vm_put(&ppgtt->vm);
return err;
@@ -920,8 +1036,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_vm_control *args = data;
struct i915_address_space *vm;
- int err;
- u32 id;
if (args->flags)
return -EINVAL;
@@ -929,17 +1043,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
if (args->extensions)
return -EINVAL;
- id = args->vm_id;
- if (!id)
- return -ENOENT;
-
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (err)
- return err;
-
- vm = idr_remove(&file_priv->vm_idr, id);
-
- mutex_unlock(&file_priv->vm_idr_lock);
+ vm = xa_erase(&file_priv->vm_xa, args->vm_id);
if (!vm)
return -ENOENT;
@@ -965,6 +1069,30 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *engines;
+
+ rcu_read_lock();
+ do {
+ engines = rcu_dereference(ctx->engines);
+ if (unlikely(!engines))
+ break;
+
+ if (unlikely(!i915_sw_fence_await(&engines->fence)))
+ continue;
+
+ if (likely(engines == rcu_access_pointer(ctx->engines)))
+ break;
+
+ i915_sw_fence_complete(&engines->fence);
+ } while (1);
+ rcu_read_unlock();
+
+ return engines;
+}
+
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
@@ -975,6 +1103,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
{
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
+ struct i915_gem_engines *e;
struct intel_context *ce;
int err = 0;
@@ -991,7 +1120,13 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ e = __context_engines_await(ctx);
+ if (!e) {
+ i915_active_release(&cb->base);
+ return -ENOENT;
+ }
+
+ for_each_gem_engine(ce, e, it) {
struct i915_request *rq;
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
@@ -1022,7 +1157,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (err)
break;
}
- i915_gem_context_unlock_engines(ctx);
+ i915_sw_fence_complete(&e->fence);
cb->task = err ? NULL : task; /* caller needs to unwind instead */
cb->data = data;
@@ -1037,7 +1172,8 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct drm_i915_gem_context_param *args)
{
struct i915_address_space *vm;
- int ret;
+ int err;
+ u32 id;
if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
@@ -1045,27 +1181,22 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
rcu_read_lock();
vm = context_get_vm_rcu(ctx);
rcu_read_unlock();
+ if (!vm)
+ return -ENODEV;
- ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (ret)
+ err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+ if (err)
goto err_put;
- ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
- GEM_BUG_ON(!ret);
- if (ret < 0)
- goto err_unlock;
-
i915_vm_open(vm);
+ GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+ args->value = id;
args->size = 0;
- args->value = ret;
- ret = 0;
-err_unlock:
- mutex_unlock(&file_priv->vm_idr_lock);
err_put:
i915_vm_put(vm);
- return ret;
+ return err;
}
static void set_ppgtt_barrier(void *data)
@@ -1167,7 +1298,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
return -ENOENT;
rcu_read_lock();
- vm = idr_find(&file_priv->vm_idr, args->value);
+ vm = xa_load(&file_priv->vm_xa, args->value);
if (vm && !kref_get_unless_zero(&vm->ref))
vm = NULL;
rcu_read_unlock();
@@ -1213,87 +1344,61 @@ out:
return err;
}
-static int gen8_emit_rpcs_config(struct i915_request *rq,
- struct intel_context *ce,
- struct intel_sseu sseu)
+static int __apply_ringsize(struct intel_context *ce, void *sz)
{
- u64 offset;
- u32 *cs;
+ return intel_context_set_ring_size(ce, (unsigned long)sz);
+}
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+static int set_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
+
+ if (args->size)
+ return -EINVAL;
- offset = i915_ggtt_offset(ce->state) +
- LRC_STATE_PN * PAGE_SIZE +
- CTX_R_PWR_CLK_STATE * 4;
+ if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
+ return -EINVAL;
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+ if (args->value < I915_GTT_PAGE_SIZE)
+ return -EINVAL;
- intel_ring_advance(rq, cs);
+ if (args->value > 128 * I915_GTT_PAGE_SIZE)
+ return -EINVAL;
- return 0;
+ return context_apply_all(ctx,
+ __apply_ringsize,
+ __intel_context_ring_size(args->value));
}
-static int
-gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
+static int __get_ringsize(struct intel_context *ce, void *arg)
{
- struct i915_request *rq;
- int ret;
-
- lockdep_assert_held(&ce->pin_mutex);
-
- /*
- * If the context is not idle, we have to submit an ordered request to
- * modify its context image via the kernel context (writing to our own
- * image, or into the registers directory, does not stick). Pristine
- * and idle contexts will be configured on pinning.
- */
- if (!intel_context_pin_if_active(ce))
- return 0;
+ long sz;
- rq = intel_engine_create_kernel_request(ce->engine);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- goto out_unpin;
- }
-
- /* Serialise with the remote context */
- ret = intel_context_prepare_remote_request(ce, rq);
- if (ret == 0)
- ret = gen8_emit_rpcs_config(rq, ce, sseu);
+ sz = intel_context_get_ring_size(ce);
+ GEM_BUG_ON(sz > INT_MAX);
- i915_request_add(rq);
-out_unpin:
- intel_context_unpin(ce);
- return ret;
+ return sz; /* stop on first engine */
}
-static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
+static int get_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- int ret;
+ int sz;
- GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
-
- ret = intel_context_lock_pinned(ce);
- if (ret)
- return ret;
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
- /* Nothing to do if unmodified. */
- if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
- goto unlock;
+ if (args->size)
+ return -EINVAL;
- ret = gen8_modify_rpcs(ce, sseu);
- if (!ret)
- ce->sseu = sseu;
+ sz = context_apply_all(ctx, __get_ringsize, NULL);
+ if (sz < 0)
+ return sz;
-unlock:
- intel_context_unlock_pinned(ce);
- return ret;
+ args->value = sz;
+ return 0;
}
static int
@@ -1460,6 +1565,7 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
struct i915_context_engines_load_balance __user *ext =
container_of_user(base, typeof(*ext), base);
const struct set_engines *set = data;
+ struct drm_i915_private *i915 = set->ctx->i915;
struct intel_engine_cs *stack[16];
struct intel_engine_cs **siblings;
struct intel_context *ce;
@@ -1467,24 +1573,25 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
unsigned int n;
int err;
- if (!HAS_EXECLISTS(set->ctx->i915))
+ if (!HAS_EXECLISTS(i915))
return -ENODEV;
- if (USES_GUC_SUBMISSION(set->ctx->i915))
+ if (intel_uc_uses_guc_submission(&i915->gt.uc))
return -ENODEV; /* not implement yet */
if (get_user(idx, &ext->engine_index))
return -EFAULT;
if (idx >= set->engines->num_engines) {
- DRM_DEBUG("Invalid placement value, %d >= %d\n",
- idx, set->engines->num_engines);
+ drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
+ idx, set->engines->num_engines);
return -EINVAL;
}
idx = array_index_nospec(idx, set->engines->num_engines);
if (set->engines->engines[idx]) {
- DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
+ drm_dbg(&i915->drm,
+ "Invalid placement[%d], already occupied\n", idx);
return -EEXIST;
}
@@ -1516,12 +1623,13 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
goto out_siblings;
}
- siblings[n] = intel_engine_lookup_user(set->ctx->i915,
+ siblings[n] = intel_engine_lookup_user(i915,
ci.engine_class,
ci.engine_instance);
if (!siblings[n]) {
- DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Invalid sibling[%d]: { class:%d, inst:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
err = -EINVAL;
goto out_siblings;
}
@@ -1554,6 +1662,7 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
struct i915_context_engines_bond __user *ext =
container_of_user(base, typeof(*ext), base);
const struct set_engines *set = data;
+ struct drm_i915_private *i915 = set->ctx->i915;
struct i915_engine_class_instance ci;
struct intel_engine_cs *virtual;
struct intel_engine_cs *master;
@@ -1564,14 +1673,15 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
return -EFAULT;
if (idx >= set->engines->num_engines) {
- DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
- idx, set->engines->num_engines);
+ drm_dbg(&i915->drm,
+ "Invalid index for virtual engine: %d >= %d\n",
+ idx, set->engines->num_engines);
return -EINVAL;
}
idx = array_index_nospec(idx, set->engines->num_engines);
if (!set->engines->engines[idx]) {
- DRM_DEBUG("Invalid engine at %d\n", idx);
+ drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
return -EINVAL;
}
virtual = set->engines->engines[idx]->engine;
@@ -1589,11 +1699,12 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
if (copy_from_user(&ci, &ext->master, sizeof(ci)))
return -EFAULT;
- master = intel_engine_lookup_user(set->ctx->i915,
+ master = intel_engine_lookup_user(i915,
ci.engine_class, ci.engine_instance);
if (!master) {
- DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
- ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Unrecognised master engine: { class:%u, instance:%u }\n",
+ ci.engine_class, ci.engine_instance);
return -EINVAL;
}
@@ -1606,12 +1717,13 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
return -EFAULT;
- bond = intel_engine_lookup_user(set->ctx->i915,
+ bond = intel_engine_lookup_user(i915,
ci.engine_class,
ci.engine_instance);
if (!bond) {
- DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+ n, ci.engine_class, ci.engine_instance);
return -EINVAL;
}
@@ -1640,6 +1752,7 @@ static int
set_engines(struct i915_gem_context *ctx,
const struct drm_i915_gem_context_param *args)
{
+ struct drm_i915_private *i915 = ctx->i915;
struct i915_context_param_engines __user *user =
u64_to_user_ptr(args->value);
struct set_engines set = { .ctx = ctx };
@@ -1661,8 +1774,8 @@ set_engines(struct i915_gem_context *ctx,
BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
if (args->size < sizeof(*user) ||
!IS_ALIGNED(args->size, sizeof(*user->engines))) {
- DRM_DEBUG("Invalid size for engine array: %d\n",
- args->size);
+ drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
+ args->size);
return -EINVAL;
}
@@ -1671,13 +1784,10 @@ set_engines(struct i915_gem_context *ctx,
* first 64 engines defined here.
*/
num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
-
- set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
- GFP_KERNEL);
+ set.engines = alloc_engines(num_engines);
if (!set.engines)
return -ENOMEM;
- init_rcu_head(&set.engines->rcu);
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
@@ -1698,8 +1808,9 @@ set_engines(struct i915_gem_context *ctx,
ci.engine_class,
ci.engine_instance);
if (!engine) {
- DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Invalid engine[%d]: { class:%d, instance:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
__free_engines(set.engines, n);
return -ENOENT;
}
@@ -1729,6 +1840,11 @@ set_engines(struct i915_gem_context *ctx,
replace:
mutex_lock(&ctx->engines_mutex);
+ if (i915_gem_context_is_closed(ctx)) {
+ mutex_unlock(&ctx->engines_mutex);
+ free_engines(set.engines);
+ return -ENOENT;
+ }
if (args->size)
i915_gem_context_set_user_engines(ctx);
else
@@ -1736,7 +1852,8 @@ replace:
set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
mutex_unlock(&ctx->engines_mutex);
- call_rcu(&set.engines->rcu, free_engines_rcu);
+ /* Keep track of old engine sets for kill_context() */
+ engines_idle_release(ctx, set.engines);
return 0;
}
@@ -1747,11 +1864,10 @@ __copy_engines(struct i915_gem_engines *e)
struct i915_gem_engines *copy;
unsigned int n;
- copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ copy = alloc_engines(e->num_engines);
if (!copy)
return ERR_PTR(-ENOMEM);
- init_rcu_head(&copy->rcu);
for (n = 0; n < e->num_engines; n++) {
if (e->engines[n])
copy->engines[n] = intel_context_get(e->engines[n]);
@@ -1852,17 +1968,19 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
-static void __apply_priority(struct intel_context *ce, void *arg)
+static int __apply_priority(struct intel_context *ce, void *arg)
{
struct i915_gem_context *ctx = arg;
if (!intel_engine_has_semaphores(ce->engine))
- return;
+ return 0;
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
intel_context_set_use_semaphores(ce);
else
intel_context_clear_use_semaphores(ce);
+
+ return 0;
}
static int set_priority(struct i915_gem_context *ctx,
@@ -1955,6 +2073,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_persistence(ctx, args);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = set_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1983,6 +2105,18 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
}
+static int copy_ring_size(struct intel_context *dst,
+ struct intel_context *src)
+{
+ long sz;
+
+ sz = intel_context_get_ring_size(src);
+ if (sz < 0)
+ return sz;
+
+ return intel_context_set_ring_size(dst, sz);
+}
+
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
@@ -1991,11 +2125,10 @@ static int clone_engines(struct i915_gem_context *dst,
bool user_engines;
unsigned long n;
- clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
- init_rcu_head(&clone->rcu);
for (n = 0; n < e->num_engines; n++) {
struct intel_engine_cs *engine;
@@ -2025,6 +2158,12 @@ static int clone_engines(struct i915_gem_context *dst,
}
intel_context_set_gem(clone->engines[n], dst);
+
+ /* Copy across the preferred ringsize */
+ if (copy_ring_size(clone->engines[n], e->engines[n])) {
+ __free_engines(clone, n + 1);
+ goto err_unlock;
+ }
}
clone->num_engines = n;
@@ -2032,8 +2171,7 @@ static int clone_engines(struct i915_gem_context *dst,
i915_gem_context_unlock_engines(src);
/* Serialised by constructor */
- free_engines(__context_engines_static(dst));
- RCU_INIT_POINTER(dst->engines, clone);
+ engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
if (user_engines)
i915_gem_context_set_user_engines(dst);
else
@@ -2213,8 +2351,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
ext_data.fpriv = file->driver_priv;
if (client_is_banned(ext_data.fpriv)) {
- DRM_DEBUG("client %s[%d] banned from creating ctx\n",
- current->comm, task_pid_nr(current));
+ drm_dbg(&i915->drm,
+ "client %s[%d] banned from creating ctx\n",
+ current->comm, task_pid_nr(current));
return -EIO;
}
@@ -2236,7 +2375,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
goto err_ctx;
args->ctx_id = id;
- DRM_DEBUG("HW context %d created\n", args->ctx_id);
+ drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
return 0;
@@ -2386,6 +2525,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_persistent(ctx);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = get_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2459,6 +2602,9 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
const struct i915_gem_engines *e = it->engines;
struct intel_context *ctx;
+ if (unlikely(!e))
+ return NULL;
+
do {
if (it->idx >= e->num_engines)
return NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 3ae61a355d87..f1d884d304bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -192,12 +192,16 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{
- struct intel_context *ce = ERR_PTR(-EINVAL);
+ struct intel_context *ce;
rcu_read_lock(); {
struct i915_gem_engines *e = rcu_dereference(ctx->engines);
- if (likely(idx < e->num_engines && e->engines[idx]))
+ if (unlikely(!e)) /* context was closed! */
+ ce = ERR_PTR(-ENOENT);
+ else if (likely(idx < e->num_engines && e->engines[idx]))
ce = intel_context_get(e->engines[idx]);
+ else
+ ce = ERR_PTR(-EINVAL);
} rcu_read_unlock();
return ce;
@@ -207,7 +211,6 @@ static inline void
i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
struct i915_gem_engines *engines)
{
- GEM_BUG_ON(!engines);
it->engines = engines;
it->idx = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 017ca803ab47..28760bd03265 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -20,6 +20,7 @@
#include "gt/intel_context_types.h"
#include "i915_scheduler.h"
+#include "i915_sw_fence.h"
struct pid;
@@ -30,7 +31,12 @@ struct intel_timeline;
struct intel_ring;
struct i915_gem_engines {
- struct rcu_head rcu;
+ union {
+ struct list_head link;
+ struct rcu_head rcu;
+ };
+ struct i915_sw_fence fence;
+ struct i915_gem_context *ctx;
unsigned int num_engines;
struct intel_context *engines[];
};
@@ -173,6 +179,11 @@ struct i915_gem_context {
* context in messages.
*/
char name[TASK_COMM_LEN + 8];
+
+ struct {
+ spinlock_t lock;
+ struct list_head engines;
+ } stale;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 372b57ca0efc..7db5a793739d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -48,7 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
src = sg_next(src);
}
- if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ if (!dma_map_sg_attrs(attachment->dev,
+ st->sgl, st->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
ret = -ENOMEM;
goto err_free_sg;
}
@@ -71,7 +73,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ dma_unmap_sg_attrs(attachment->dev,
+ sg->sgl, sg->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sg);
kfree(sg);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7643a30ba4cd..36d069504836 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -10,7 +10,6 @@
#include <linux/uaccess.h>
#include <drm/drm_syncobj.h>
-#include <drm/i915_drm.h>
#include "display/intel_frontbuffer.h"
@@ -28,6 +27,19 @@
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
+struct eb_vma {
+ struct i915_vma *vma;
+ unsigned int flags;
+
+ /** This vma's place in the execbuf reservation list */
+ struct drm_i915_gem_exec_object2 *exec;
+ struct list_head bind_link;
+ struct list_head reloc_link;
+
+ struct hlist_node node;
+ u32 handle;
+};
+
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -35,17 +47,15 @@ enum {
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
-#define __EXEC_OBJECT_HAS_REF BIT(31)
-#define __EXEC_OBJECT_HAS_PIN BIT(30)
-#define __EXEC_OBJECT_HAS_FENCE BIT(29)
-#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
-#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
-#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
+#define __EXEC_OBJECT_HAS_PIN BIT(31)
+#define __EXEC_OBJECT_HAS_FENCE BIT(30)
+#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
+#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
+#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
-#define __EXEC_VALIDATED BIT(30)
-#define __EXEC_INTERNAL_FLAGS (~0u << 30)
+#define __EXEC_INTERNAL_FLAGS (~0u << 31)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -220,15 +230,14 @@ struct i915_execbuffer {
struct drm_file *file; /** per-file lookup tables and limits */
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
- struct i915_vma **vma;
- unsigned int *flags;
+ struct eb_vma *vma;
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_request *request; /** our request to build */
- struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct eb_vma *batch; /** identity of the batch obj/vma */
struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
@@ -276,8 +285,6 @@ struct i915_execbuffer {
struct hlist_head *buckets; /** ht for relocation handles */
};
-#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -364,9 +371,9 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
- struct i915_vma *vma)
+ struct eb_vma *ev)
{
- unsigned int exec_flags = *vma->exec_flags;
+ struct i915_vma *vma = ev->vma;
u64 pin_flags;
if (vma->node.size)
@@ -375,24 +382,24 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags = entry->offset & PIN_OFFSET_MASK;
pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
return false;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma);
return false;
}
if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- return !eb_vma_misplaced(entry, vma, exec_flags);
+ ev->flags |= __EXEC_OBJECT_HAS_PIN;
+ return !eb_vma_misplaced(entry, vma, ev->flags);
}
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
@@ -406,13 +413,13 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
}
static inline void
-eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
+eb_unreserve_vma(struct eb_vma *ev)
{
- if (!(*flags & __EXEC_OBJECT_HAS_PIN))
+ if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
return;
- __eb_unreserve_vma(vma, *flags);
- *flags &= ~__EXEC_OBJECT_RESERVED;
+ __eb_unreserve_vma(ev->vma, ev->flags);
+ ev->flags &= ~__EXEC_OBJECT_RESERVED;
}
static int
@@ -442,13 +449,6 @@ eb_validate_vma(struct i915_execbuffer *eb,
} else {
entry->pad_to_size = 0;
}
-
- if (unlikely(vma->exec_flags)) {
- DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
- entry->handle, (int)(entry - eb->exec));
- return -EINVAL;
- }
-
/*
* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
@@ -471,41 +471,29 @@ eb_validate_vma(struct i915_execbuffer *eb,
return 0;
}
-static int
+static void
eb_add_vma(struct i915_execbuffer *eb,
unsigned int i, unsigned batch_idx,
struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- int err;
+ struct eb_vma *ev = &eb->vma[i];
GEM_BUG_ON(i915_vma_is_closed(vma));
- if (!(eb->args->flags & __EXEC_VALIDATED)) {
- err = eb_validate_vma(eb, entry, vma);
- if (unlikely(err))
- return err;
- }
+ ev->vma = i915_vma_get(vma);
+ ev->exec = entry;
+ ev->flags = entry->flags;
if (eb->lut_size > 0) {
- vma->exec_handle = entry->handle;
- hlist_add_head(&vma->exec_node,
+ ev->handle = entry->handle;
+ hlist_add_head(&ev->node,
&eb->buckets[hash_32(entry->handle,
eb->lut_size)]);
}
if (entry->relocation_count)
- list_add_tail(&vma->reloc_link, &eb->relocs);
-
- /*
- * Stash a pointer from the vma to execobj, so we can query its flags,
- * size, alignment etc as provided by the user. Also we stash a pointer
- * to the vma inside the execobj so that we can use a direct lookup
- * to find the right target VMA when doing relocations.
- */
- eb->vma[i] = vma;
- eb->flags[i] = entry->flags;
- vma->exec_flags = &eb->flags[i];
+ list_add_tail(&ev->reloc_link, &eb->relocs);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
@@ -518,30 +506,23 @@ eb_add_vma(struct i915_execbuffer *eb,
*/
if (i == batch_idx) {
if (entry->relocation_count &&
- !(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ !(ev->flags & EXEC_OBJECT_PINNED))
+ ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+ ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
- eb->batch = vma;
+ eb->batch = ev;
}
- err = 0;
- if (eb_pin_vma(eb, entry, vma)) {
+ if (eb_pin_vma(eb, entry, ev)) {
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
- eb_unreserve_vma(vma, vma->exec_flags);
-
- list_add_tail(&vma->exec_link, &eb->unbound);
- if (drm_mm_node_allocated(&vma->node))
- err = i915_vma_unbind(vma);
- if (unlikely(err))
- vma->exec_flags = NULL;
+ eb_unreserve_vma(ev);
+ list_add_tail(&ev->bind_link, &eb->unbound);
}
- return err;
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -562,14 +543,14 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
}
static int eb_reserve_vma(const struct i915_execbuffer *eb,
- struct i915_vma *vma)
+ struct eb_vma *ev,
+ u64 pin_flags)
{
- struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- unsigned int exec_flags = *vma->exec_flags;
- u64 pin_flags;
+ struct drm_i915_gem_exec_object2 *entry = ev->exec;
+ unsigned int exec_flags = ev->flags;
+ struct i915_vma *vma = ev->vma;
int err;
- pin_flags = PIN_USER | PIN_NONBLOCK;
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
pin_flags |= PIN_GLOBAL;
@@ -583,11 +564,16 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
pin_flags |= PIN_MAPPABLE;
- if (exec_flags & EXEC_OBJECT_PINNED) {
+ if (exec_flags & EXEC_OBJECT_PINNED)
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
- } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
+ else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ if (drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(entry, vma, ev->flags)) {
+ err = i915_vma_unbind(vma);
+ if (err)
+ return err;
}
err = i915_vma_pin(vma,
@@ -612,8 +598,8 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
+ ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
}
@@ -621,10 +607,11 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
static int eb_reserve(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
+ unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
struct list_head last;
- struct i915_vma *vma;
+ struct eb_vma *ev;
unsigned int i, pass;
- int err;
+ int err = 0;
/*
* Attempt to pin all of the buffers into the GTT.
@@ -640,44 +627,54 @@ static int eb_reserve(struct i915_execbuffer *eb)
* room for the earlier objects *unless* we need to defragment.
*/
+ if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
+ return -EINTR;
+
pass = 0;
- err = 0;
do {
- list_for_each_entry(vma, &eb->unbound, exec_link) {
- err = eb_reserve_vma(eb, vma);
+ list_for_each_entry(ev, &eb->unbound, bind_link) {
+ err = eb_reserve_vma(eb, ev, pin_flags);
if (err)
break;
}
- if (err != -ENOSPC)
- return err;
+ if (!(err == -ENOSPC || err == -EAGAIN))
+ break;
/* Resort *all* the objects into priority order */
INIT_LIST_HEAD(&eb->unbound);
INIT_LIST_HEAD(&last);
for (i = 0; i < count; i++) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ unsigned int flags;
+ ev = &eb->vma[i];
+ flags = ev->flags;
if (flags & EXEC_OBJECT_PINNED &&
flags & __EXEC_OBJECT_HAS_PIN)
continue;
- eb_unreserve_vma(vma, &eb->flags[i]);
+ eb_unreserve_vma(ev);
if (flags & EXEC_OBJECT_PINNED)
/* Pinned must have their slot */
- list_add(&vma->exec_link, &eb->unbound);
+ list_add(&ev->bind_link, &eb->unbound);
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
/* Map require the lowest 256MiB (aperture) */
- list_add_tail(&vma->exec_link, &eb->unbound);
+ list_add_tail(&ev->bind_link, &eb->unbound);
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
/* Prioritise 4GiB region for restricted bo */
- list_add(&vma->exec_link, &last);
+ list_add(&ev->bind_link, &last);
else
- list_add_tail(&vma->exec_link, &last);
+ list_add_tail(&ev->bind_link, &last);
}
list_splice_tail(&last, &eb->unbound);
+ if (err == -EAGAIN) {
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ flush_workqueue(eb->i915->mm.userptr_wq);
+ mutex_lock(&eb->i915->drm.struct_mutex);
+ continue;
+ }
+
switch (pass++) {
case 0:
break;
@@ -688,13 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb)
err = i915_gem_evict_vm(eb->context->vm);
mutex_unlock(&eb->context->vm->mutex);
if (err)
- return err;
+ goto unlock;
break;
default:
- return -ENOSPC;
+ err = -ENOSPC;
+ goto unlock;
}
+
+ pin_flags = PIN_USER;
} while (1);
+
+unlock:
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ return err;
}
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
@@ -731,17 +735,14 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
+ if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
+ return -ENOENT;
+
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
batch = eb_batch_index(eb);
- mutex_lock(&eb->gem_context->mutex);
- if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
- err = -ENOENT;
- goto err_ctx;
- }
-
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -786,45 +787,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
i915_gem_object_unlock(obj);
add_vma:
- err = eb_add_vma(eb, i, batch, vma);
+ err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err))
goto err_vma;
- GEM_BUG_ON(vma != eb->vma[i]);
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
- eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
+ eb_add_vma(eb, i, batch, vma);
}
- mutex_unlock(&eb->gem_context->mutex);
-
- eb->args->flags |= __EXEC_VALIDATED;
- return eb_reserve(eb);
+ return 0;
err_obj:
i915_gem_object_put(obj);
err_vma:
- eb->vma[i] = NULL;
-err_ctx:
- mutex_unlock(&eb->gem_context->mutex);
+ eb->vma[i].vma = NULL;
return err;
}
-static struct i915_vma *
+static struct eb_vma *
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
{
if (eb->lut_size < 0) {
if (handle >= -eb->lut_size)
return NULL;
- return eb->vma[handle];
+ return &eb->vma[handle];
} else {
struct hlist_head *head;
- struct i915_vma *vma;
+ struct eb_vma *ev;
head = &eb->buckets[hash_32(handle, eb->lut_size)];
- hlist_for_each_entry(vma, head, exec_node) {
- if (vma->exec_handle == handle)
- return vma;
+ hlist_for_each_entry(ev, head, node) {
+ if (ev->handle == handle)
+ return ev;
}
return NULL;
}
@@ -836,32 +829,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
unsigned int i;
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
- unsigned int flags = eb->flags[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
if (!vma)
break;
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- vma->exec_flags = NULL;
- eb->vma[i] = NULL;
+ eb->vma[i].vma = NULL;
- if (flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, flags);
+ if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+ __eb_unreserve_vma(vma, ev->flags);
- if (flags & __EXEC_OBJECT_HAS_REF)
- i915_vma_put(vma);
+ i915_vma_put(vma);
}
}
-static void eb_reset_vmas(const struct i915_execbuffer *eb)
-{
- eb_release_vmas(eb);
- if (eb->lut_size > 0)
- memset(eb->buckets, 0,
- sizeof(struct hlist_head) << eb->lut_size);
-}
-
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
@@ -1197,7 +1179,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto out_pool;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1328,10 +1310,11 @@ out:
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+ struct eb_vma *ev,
const struct drm_i915_gem_relocation_entry *reloc)
{
- struct i915_vma *target;
+ struct drm_i915_private *i915 = eb->i915;
+ struct eb_vma *target;
int err;
/* we've already hold a reference to all valid objects */
@@ -1341,7 +1324,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
- DRM_DEBUG("reloc with multiple write domains: "
+ drm_dbg(&i915->drm, "reloc with multiple write domains: "
"target %d offset %d "
"read %08x write %08x",
reloc->target_handle,
@@ -1352,7 +1335,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
- DRM_DEBUG("reloc with read/write non-GPU domains: "
+ drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
"target %d offset %d "
"read %08x write %08x",
reloc->target_handle,
@@ -1363,7 +1346,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (reloc->write_domain) {
- *target->exec_flags |= EXEC_OBJECT_WRITE;
+ target->flags |= EXEC_OBJECT_WRITE;
/*
* Sandybridge PPGTT errata: We need a global gtt mapping
@@ -1373,7 +1356,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
- err = i915_vma_bind(target, target->obj->cache_level,
+ err = i915_vma_bind(target->vma,
+ target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
@@ -1386,21 +1370,21 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
+ gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset >
- vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
- DRM_DEBUG("Relocation beyond object bounds: "
+ ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
+ drm_dbg(&i915->drm, "Relocation beyond object bounds: "
"target %d offset %d size %d.\n",
reloc->target_handle,
(int)reloc->offset,
- (int)vma->size);
+ (int)ev->vma->size);
return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
- DRM_DEBUG("Relocation not 4-byte aligned: "
+ drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
"target %d offset %d.\n",
reloc->target_handle,
(int)reloc->offset);
@@ -1415,18 +1399,18 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* do relocations we are already stalling, disable the user's opt
* out of our synchronisation.
*/
- *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
+ ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(vma, reloc, eb, target);
+ return relocate_entry(ev->vma, reloc, eb, target->vma);
}
-static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
+static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *urelocs;
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
+ const struct drm_i915_gem_exec_object2 *entry = ev->exec;
unsigned int remain;
urelocs = u64_to_user_ptr(entry->relocs_ptr);
@@ -1456,9 +1440,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* we would try to acquire the struct mutex again. Obviously
* this is bad and so lockdep complains vehemently.
*/
- pagefault_disable();
- copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
- pagefault_enable();
+ copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
@@ -1466,7 +1448,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
remain -= count;
do {
- u64 offset = eb_relocate_entry(eb, vma, r);
+ u64 offset = eb_relocate_entry(eb, ev, r);
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
@@ -1508,281 +1490,34 @@ out:
return remain;
}
-static int
-eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
-{
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- struct drm_i915_gem_relocation_entry *relocs =
- u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- unsigned int i;
- int err;
-
- for (i = 0; i < entry->relocation_count; i++) {
- u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
-
- if ((s64)offset < 0) {
- err = (int)offset;
- goto err;
- }
- }
- err = 0;
-err:
- reloc_cache_reset(&eb->reloc_cache);
- return err;
-}
-
-static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
-{
- const char __user *addr, *end;
- unsigned long size;
- char __maybe_unused c;
-
- size = entry->relocation_count;
- if (size == 0)
- return 0;
-
- if (size > N_RELOC(ULONG_MAX))
- return -EINVAL;
-
- addr = u64_to_user_ptr(entry->relocs_ptr);
- size *= sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(addr, size))
- return -EFAULT;
-
- end = addr + size;
- for (; addr < end; addr += PAGE_SIZE) {
- int err = __get_user(c, addr);
- if (err)
- return err;
- }
- return __get_user(c, end - 1);
-}
-
-static int eb_copy_relocations(const struct i915_execbuffer *eb)
+static int eb_relocate(struct i915_execbuffer *eb)
{
- struct drm_i915_gem_relocation_entry *relocs;
- const unsigned int count = eb->buffer_count;
- unsigned int i;
int err;
- for (i = 0; i < count; i++) {
- const unsigned int nreloc = eb->exec[i].relocation_count;
- struct drm_i915_gem_relocation_entry __user *urelocs;
- unsigned long size;
- unsigned long copied;
-
- if (nreloc == 0)
- continue;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- goto err;
-
- urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
- size = nreloc * sizeof(*relocs);
-
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
- if (!relocs) {
- err = -ENOMEM;
- goto err;
- }
-
- /* copy_from_user is limited to < 4GiB */
- copied = 0;
- do {
- unsigned int len =
- min_t(u64, BIT_ULL(31), size - copied);
-
- if (__copy_from_user((char *)relocs + copied,
- (char __user *)urelocs + copied,
- len))
- goto end;
-
- copied += len;
- } while (copied < size);
-
- /*
- * As we do not update the known relocation offsets after
- * relocating (due to the complexities in lock handling),
- * we need to mark them as invalid now so that we force the
- * relocation processing next time. Just in case the target
- * object is evicted and then rebound into its old
- * presumed_offset before the next execbuffer - if that
- * happened we would make the mistake of assuming that the
- * relocations were valid.
- */
- if (!user_access_begin(urelocs, size))
- goto end;
-
- for (copied = 0; copied < nreloc; copied++)
- unsafe_put_user(-1,
- &urelocs[copied].presumed_offset,
- end_user);
- user_access_end();
-
- eb->exec[i].relocs_ptr = (uintptr_t)relocs;
- }
-
- return 0;
-
-end_user:
- user_access_end();
-end:
- kvfree(relocs);
- err = -EFAULT;
-err:
- while (i--) {
- relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
- if (eb->exec[i].relocation_count)
- kvfree(relocs);
- }
- return err;
-}
-
-static int eb_prefault_relocations(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- if (unlikely(i915_modparams.prefault_disable))
- return 0;
-
- for (i = 0; i < count; i++) {
- int err;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
-{
- struct drm_device *dev = &eb->i915->drm;
- bool have_copy = false;
- struct i915_vma *vma;
- int err = 0;
-
-repeat:
- if (signal_pending(current)) {
- err = -ERESTARTSYS;
- goto out;
- }
-
- /* We may process another execbuffer during the unlock... */
- eb_reset_vmas(eb);
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * We take 3 passes through the slowpatch.
- *
- * 1 - we try to just prefault all the user relocation entries and
- * then attempt to reuse the atomic pagefault disabled fast path again.
- *
- * 2 - we copy the user entries to a local buffer here outside of the
- * local and allow ourselves to wait upon any rendering before
- * relocations
- *
- * 3 - we already have a local copy of the relocation entries, but
- * were interrupted (EAGAIN) whilst waiting for the objects, try again.
- */
- if (!err) {
- err = eb_prefault_relocations(eb);
- } else if (!have_copy) {
- err = eb_copy_relocations(eb);
- have_copy = err == 0;
- } else {
- cond_resched();
- err = 0;
- }
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* A frequent cause for EAGAIN are currently unavailable client pages */
- flush_workqueue(eb->i915->mm.userptr_wq);
-
- err = i915_mutex_lock_interruptible(dev);
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* reacquire the objects */
+ mutex_lock(&eb->gem_context->mutex);
err = eb_lookup_vmas(eb);
+ mutex_unlock(&eb->gem_context->mutex);
if (err)
- goto err;
-
- GEM_BUG_ON(!eb->batch);
-
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (!have_copy) {
- pagefault_disable();
- err = eb_relocate_vma(eb, vma);
- pagefault_enable();
- if (err)
- goto repeat;
- } else {
- err = eb_relocate_vma_slow(eb, vma);
- if (err)
- goto err;
- }
- }
-
- /*
- * Leave the user relocations as are, this is the painfully slow path,
- * and we want to avoid the complication of dropping the lock whilst
- * having buffers reserved in the aperture and so causing spurious
- * ENOSPC for random operations.
- */
-
-err:
- if (err == -EAGAIN)
- goto repeat;
-
-out:
- if (have_copy) {
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry =
- &eb->exec[i];
- struct drm_i915_gem_relocation_entry *relocs;
-
- if (!entry->relocation_count)
- continue;
+ return err;
- relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- kvfree(relocs);
- }
+ if (!list_empty(&eb->unbound)) {
+ err = eb_reserve(eb);
+ if (err)
+ return err;
}
- return err;
-}
-
-static int eb_relocate(struct i915_execbuffer *eb)
-{
- if (eb_lookup_vmas(eb))
- goto slow;
-
/* The objects are in their final locations, apply the relocations. */
if (eb->args->flags & __EXEC_HAS_RELOC) {
- struct i915_vma *vma;
+ struct eb_vma *ev;
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (eb_relocate_vma(eb, vma))
- goto slow;
+ list_for_each_entry(ev, &eb->relocs, reloc_link) {
+ err = eb_relocate_vma(eb, ev);
+ if (err)
+ return err;
}
}
return 0;
-
-slow:
- return eb_relocate_slow(eb);
}
static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1795,27 +1530,19 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_init(&acquire, &reservation_ww_class);
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
- if (!err)
- continue;
-
- GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
-
if (err == -EDEADLK) {
GEM_BUG_ON(i == 0);
do {
int j = i - 1;
- ww_mutex_unlock(&eb->vma[j]->resv->lock);
+ ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
- swap(eb->flags[i], eb->flags[j]);
swap(eb->vma[i], eb->vma[j]);
- eb->vma[i]->exec_flags = &eb->flags[i];
} while (--i);
- GEM_BUG_ON(vma != eb->vma[0]);
- vma->exec_flags = &eb->flags[0];
err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
&acquire);
@@ -1826,8 +1553,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_done(&acquire);
while (i--) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+ unsigned int flags = ev->flags;
struct drm_i915_gem_object *obj = vma->obj;
assert_vma_held(vma);
@@ -1871,10 +1599,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
i915_vma_unlock(vma);
__eb_unreserve_vma(vma, flags);
- vma->exec_flags = NULL;
+ i915_vma_put(vma);
- if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
- i915_vma_put(vma);
+ ev->vma = NULL;
}
ww_acquire_fini(&acquire);
@@ -1888,7 +1615,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
return 0;
err_skip:
- i915_request_skip(eb->request, err);
+ i915_request_set_error_once(eb->request, err);
return err;
}
@@ -1922,7 +1649,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
int i;
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
- DRM_DEBUG("sol reset is gen7/rcs only\n");
+ drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2009,7 +1736,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (!pw)
return -ENOMEM;
- err = i915_active_acquire(&eb->batch->active);
+ err = i915_active_acquire(&eb->batch->vma->active);
if (err)
goto err_free;
@@ -2026,7 +1753,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
- pw->batch = eb->batch;
+ pw->batch = eb->batch->vma;
pw->batch_offset = eb->batch_start_offset;
pw->batch_length = eb->batch_len;
pw->shadow = shadow;
@@ -2068,7 +1795,7 @@ err_trampoline:
err_shadow:
i915_active_release(&shadow->active);
err_batch:
- i915_active_release(&eb->batch->active);
+ i915_active_release(&eb->batch->vma->active);
err_free:
kfree(pw);
return err;
@@ -2076,6 +1803,7 @@ err_free:
static int eb_parse(struct i915_execbuffer *eb)
{
+ struct drm_i915_private *i915 = eb->i915;
struct intel_engine_pool_node *pool;
struct i915_vma *shadow, *trampoline;
unsigned int len;
@@ -2091,7 +1819,8 @@ static int eb_parse(struct i915_execbuffer *eb)
* post-scan tampering
*/
if (!eb->context->vm->has_read_only) {
- DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ drm_dbg(&i915->drm,
+ "Cannot prevent post-scan tampering without RO capable vm\n");
return -EINVAL;
}
} else {
@@ -2129,15 +1858,12 @@ static int eb_parse(struct i915_execbuffer *eb)
if (err)
goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(shadow);
- eb->flags[eb->buffer_count] =
- __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- shadow->exec_flags = &eb->flags[eb->buffer_count];
- eb->buffer_count++;
+ eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
+ eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batch = &eb->vma[eb->buffer_count++];
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- eb->batch = shadow;
shadow->private = pool;
return 0;
@@ -2164,7 +1890,7 @@ add_to_client(struct i915_request *rq, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
-static int eb_submit(struct i915_execbuffer *eb)
+static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
@@ -2191,7 +1917,7 @@ static int eb_submit(struct i915_execbuffer *eb)
}
err = eb->engine->emit_bb_start(eb->request,
- eb->batch->node.start +
+ batch->node.start +
eb->batch_start_offset,
eb->batch_len,
eb->batch_flags);
@@ -2326,15 +2052,22 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
intel_context_timeline_unlock(tl);
if (rq) {
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- i915_request_put(rq);
- err = -EINTR;
- goto err_exit;
- }
+ bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ long timeout;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (nonblock)
+ timeout = 0;
+
+ timeout = i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ timeout);
i915_request_put(rq);
+
+ if (timeout < 0) {
+ err = nonblock ? -EWOULDBLOCK : timeout;
+ goto err_exit;
+ }
}
eb->engine = ce->engine;
@@ -2372,8 +2105,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
if (user_ring_id != I915_EXEC_BSD &&
(args->flags & I915_EXEC_BSD_MASK)) {
- DRM_DEBUG("execbuf with non bsd ring but with invalid "
- "bsd dispatch flags: %d\n", (int)(args->flags));
+ drm_dbg(&i915->drm,
+ "execbuf with non bsd ring but with invalid "
+ "bsd dispatch flags: %d\n", (int)(args->flags));
return -1;
}
@@ -2387,8 +2121,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
bsd_idx >>= I915_EXEC_BSD_SHIFT;
bsd_idx--;
} else {
- DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
- bsd_idx);
+ drm_dbg(&i915->drm,
+ "execbuf with unknown bsd ring: %u\n",
+ bsd_idx);
return -1;
}
@@ -2396,7 +2131,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
}
if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
- DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+ drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
+ user_ring_id);
return -1;
}
@@ -2556,6 +2292,73 @@ signal_fence_array(struct i915_execbuffer *eb,
}
}
+static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (rq == end || !i915_request_retire(rq))
+ break;
+}
+
+static void eb_request_add(struct i915_execbuffer *eb)
+{
+ struct i915_request *rq = eb->request;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
+ struct i915_request *prev;
+
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+
+ prev = __i915_request_commit(rq);
+
+ /* Check that the context wasn't destroyed before submission */
+ if (likely(!intel_context_is_closed(eb->context))) {
+ attr = eb->gem_context->sched;
+
+ /*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+ } else {
+ /* Serialise with context_close via the add_to_timeline */
+ i915_request_set_error_once(rq, -ENOENT);
+ __i915_request_skip(rq);
+ }
+
+ local_bh_disable();
+ __i915_request_queue(rq, &attr);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /* Try to clean up the client's timeline after submitting the request */
+ if (prev)
+ retire_requests(tl, prev);
+
+ mutex_unlock(&tl->mutex);
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
@@ -2568,6 +2371,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
+ struct i915_vma *batch;
int out_fence_fd = -1;
int err;
@@ -2582,9 +2386,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
- eb.vma[0] = NULL;
- eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
+ eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+ eb.vma[0].vma = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2652,10 +2455,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_context;
- err = i915_mutex_lock_interruptible(dev);
- if (err)
- goto err_engine;
-
err = eb_relocate(&eb);
if (err) {
/*
@@ -2669,20 +2468,23 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
- DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+ if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
+ drm_dbg(&i915->drm,
+ "Attempting to use self-modifying batch buffer\n");
err = -EINVAL;
goto err_vma;
}
- if (eb.batch_start_offset > eb.batch->size ||
- eb.batch_len > eb.batch->size - eb.batch_start_offset) {
- DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+
+ if (range_overflows_t(u64,
+ eb.batch_start_offset, eb.batch_len,
+ eb.batch->vma->size)) {
+ drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
err = -EINVAL;
goto err_vma;
}
if (eb.batch_len == 0)
- eb.batch_len = eb.batch->size - eb.batch_start_offset;
+ eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
err = eb_parse(&eb);
if (err)
@@ -2692,6 +2494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
+ batch = eb.batch->vma;
if (eb.batch_flags & I915_DISPATCH_SECURE) {
struct i915_vma *vma;
@@ -2705,13 +2508,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* fitting due to fragmentation.
* So this is actually safe.
*/
- vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_vma;
+ goto err_parse;
}
- eb.batch = vma;
+ batch = vma;
}
/* All GPU relocation batches must be submitted prior to the user rq */
@@ -2758,16 +2561,16 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
- eb.request->batch = eb.batch;
- if (eb.batch->private)
- intel_engine_pool_mark_active(eb.batch->private, eb.request);
+ eb.request->batch = batch;
+ if (batch->private)
+ intel_engine_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
- err = eb_submit(&eb);
+ err = eb_submit(&eb, batch);
err_request:
add_to_client(eb.request, file);
i915_request_get(eb.request);
- i915_request_add(eb.request);
+ eb_request_add(&eb);
if (fences)
signal_fence_array(&eb, fences);
@@ -2786,16 +2589,15 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
- i915_vma_unpin(eb.batch);
- if (eb.batch->private)
- intel_engine_pool_put(eb.batch->private);
+ i915_vma_unpin(batch);
+err_parse:
+ if (batch->private)
+ intel_engine_pool_put(batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
- mutex_unlock(&dev->struct_mutex);
-err_engine:
eb_unpin_engine(&eb);
err_context:
i915_gem_context_put(eb.gem_context);
@@ -2813,9 +2615,7 @@ err_in_fence:
static size_t eb_element_size(void)
{
- return (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
+ return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
}
static bool check_buffer_count(size_t count)
@@ -2839,6 +2639,7 @@ int
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -2848,7 +2649,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
int err;
if (!check_buffer_count(count)) {
- DRM_DEBUG("execbuf2 with %zd buffers\n", count);
+ drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2873,8 +2674,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
+ drm_dbg(&i915->drm,
+ "Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
kvfree(exec_list);
kvfree(exec2_list);
return -ENOMEM;
@@ -2883,8 +2685,8 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * count);
if (err) {
- DRM_DEBUG("copy %d exec entries failed %d\n",
- args->buffer_count, err);
+ drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
+ args->buffer_count, err);
kvfree(exec_list);
kvfree(exec2_list);
return -EFAULT;
@@ -2931,6 +2733,7 @@ int
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
struct drm_syncobj **fences = NULL;
@@ -2938,7 +2741,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
int err;
if (!check_buffer_count(count)) {
- DRM_DEBUG("execbuf2 with %zd buffers\n", count);
+ drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2950,14 +2753,14 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
- count);
+ drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
+ count);
return -ENOMEM;
}
if (copy_from_user(exec2_list,
u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * count)) {
- DRM_DEBUG("copy %zd exec entries failed\n", count);
+ drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
kvfree(exec2_list);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 9cfb0e41ff06..cbbff81aa0af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -8,8 +8,6 @@
#include <linux/slab.h>
#include <linux/swiotlb.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0b6a442108de..b39c24dae64e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -613,8 +613,7 @@ __assign_mmap_offset(struct drm_file *file,
if (!obj)
return -ENOENT;
- if (mmap_type == I915_MMAP_TYPE_GTT &&
- i915_gem_object_never_bind_ggtt(obj)) {
+ if (i915_gem_object_never_mmap(obj)) {
err = -ENODEV;
goto out;
}
@@ -776,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
struct file *file;
rcu_read_lock();
- file = i915->gem.mmap_singleton;
+ file = READ_ONCE(i915->gem.mmap_singleton);
if (file && !get_file_rcu(file))
file = NULL;
rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 9c86f2dea947..2faa481cc18f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -11,8 +11,6 @@
#include <drm/drm_file.h>
#include <drm/drm_device.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
@@ -194,9 +192,9 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
}
static inline bool
-i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
+i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 70809d8897cd..e00792158f13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -186,7 +186,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
0);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
@@ -196,6 +196,17 @@ out_unpin:
return err;
}
+/* Wa_1209644611:icl,ehl */
+static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
+{
+ u32 height = size >> PAGE_SHIFT;
+
+ if (!IS_GEN(i915, 11))
+ return false;
+
+ return height % 4 == 3 && height <= 8;
+}
+
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *src,
struct i915_vma *dst)
@@ -237,7 +248,8 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
size = min_t(u64, rem, block_size);
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (INTEL_GEN(i915) >= 9) {
+ if (INTEL_GEN(i915) >= 9 &&
+ !wa_1209644611_applies(i915, size)) {
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
*cmd++ = 0;
@@ -385,7 +397,7 @@ out_unlock:
drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index c2174da35bb0..a0b10bcd8d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -34,7 +34,7 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
#define I915_GEM_OBJECT_IS_PROXY BIT(3)
-#define I915_GEM_OBJECT_NO_GGTT BIT(4)
+#define I915_GEM_OBJECT_NO_MMAP BIT(4)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 54aca5c9101e..24f4cadea114 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -83,10 +83,12 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
- DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ drm_dbg(&i915->drm,
+ "Attempting to obtain a purgeable object\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index b07bb40edd5a..698e22420dc5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -194,10 +194,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages)) {
+ if (!IS_ERR_OR_NULL(pages))
i915_gem_shmem_ops.put_pages(obj, pages);
- i915_gem_object_release_memory_region(obj);
- }
+
+ i915_gem_object_release_memory_region(obj);
+
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index c8264eb036bf..3d215164dd5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -85,7 +85,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ drm_WARN_ON(&i915->drm,
+ i915_gem_object_set_to_gtt_domain(obj, false));
i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index a2a980d9d241..5d5d7eef3f43 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -148,7 +148,8 @@ rebuild_st:
last_pfn = page_to_pfn(page);
/* Check that the i965g/gm workaround works. */
- WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+ drm_WARN_ON(&i915->drm,
+ (gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
if (sg) { /* loop terminated early; short sg table */
sg_page_sizes |= sg->length;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 59b387ade49c..03e5eb4c99d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
-#include <drm/i915_drm.h>
#include "i915_trace.h"
@@ -401,19 +400,22 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
i915->mm.shrinker.seeks = DEFAULT_SEEKS;
i915->mm.shrinker.batch = 4096;
- WARN_ON(register_shrinker(&i915->mm.shrinker));
+ drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
+ drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
- WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
+ drm_WARN_ON(&i915->drm,
+ register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
{
- WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
- WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
+ drm_WARN_ON(&i915->drm,
+ unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
+ drm_WARN_ON(&i915->drm,
+ unregister_oom_notifier(&i915->mm.oom_notifier));
unregister_shrinker(&i915->mm.shrinker);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 451f3078d60d..5557dfa83a7b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,7 @@
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "i915_vgpu.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -110,8 +111,11 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
- DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
- DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
+ drm_dbg(&i915->drm,
+ "GTT within stolen memory at %pR\n",
+ &ggtt_res);
+ drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
+ dsm);
}
}
@@ -142,8 +146,9 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
* range. Apparently this works.
*/
if (!r && !IS_GEN(i915, 3)) {
- DRM_ERROR("conflict detected with stolen region: %pR\n",
- dsm);
+ drm_err(&i915->drm,
+ "conflict detected with stolen region: %pR\n",
+ dsm);
return -EBUSY;
}
@@ -171,8 +176,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
ELK_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
- IS_GM45(i915) ? "CTG" : "ELK", reg_val);
+ drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
+ IS_GM45(i915) ? "CTG" : "ELK", reg_val);
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
return;
@@ -181,14 +186,16 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
- reg_val);
+ drm_WARN(&i915->drm, IS_GEN(i915, 5),
+ "ILK stolen reserved found? 0x%08x\n",
+ reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
return;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
- WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
+ drm_WARN_ON(&i915->drm,
+ (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
*size = stolen_top - *base;
}
@@ -200,7 +207,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -234,7 +241,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -262,7 +269,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -289,7 +296,7 @@ static void chv_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -323,7 +330,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -342,7 +349,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
{
u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
@@ -453,8 +460,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
* it likely means we failed to read the registers correctly.
*/
if (!reserved_base) {
- DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
- &reserved_base, &reserved_size);
+ drm_err(&i915->drm,
+ "inconsistent reservation %pa + %pa; ignoring\n",
+ &reserved_base, &reserved_size);
reserved_base = stolen_top;
reserved_size = 0;
}
@@ -463,8 +471,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
- DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
- &i915->dsm_reserved, &i915->dsm);
+ drm_err(&i915->drm,
+ "Stolen reserved area %pR outside stolen memory %pR\n",
+ &i915->dsm_reserved, &i915->dsm);
return 0;
}
@@ -472,9 +481,10 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&i915->dsm) >> 10,
- ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
+ drm_dbg(&i915->drm,
+ "Memory reserved for graphics device: %lluK, usable: %lluK\n",
+ (u64)resource_size(&i915->dsm) >> 10,
+ ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
i915->stolen_usable_size =
resource_size(&i915->dsm) - reserved_total;
@@ -677,26 +687,24 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
- resource_size_t gtt_offset,
resource_size_t size)
{
struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
- struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
- struct i915_vma *vma;
int ret;
if (!drm_mm_initialized(&i915->mm.stolen))
return ERR_PTR(-ENODEV);
- DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
- &stolen_offset, &gtt_offset, &size);
+ drm_dbg(&i915->drm,
+ "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
+ &stolen_offset, &size);
/* KISS and expect everything to be page-aligned */
- if (WARN_ON(size == 0) ||
- WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
- WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
+ if (GEM_WARN_ON(size == 0) ||
+ GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+ GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
@@ -709,68 +717,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
mutex_unlock(&i915->mm.stolen_lock);
if (ret) {
- DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
- kfree(stolen);
- return ERR_PTR(ret);
+ obj = ERR_PTR(ret);
+ goto err_free;
}
obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj)) {
- DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
- i915_gem_stolen_remove_node(i915, stolen);
- kfree(stolen);
- return obj;
- }
-
- /* Some objects just need physical mem from stolen space */
- if (gtt_offset == I915_GTT_OFFSET_NONE)
- return obj;
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- vma = i915_vma_instance(obj, &ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_pages;
- }
-
- /* To simplify the initialisation sequence between KMS and GTT,
- * we allow construction of the stolen object prior to
- * setting up the GTT space. The actual reservation will occur
- * later.
- */
- mutex_lock(&ggtt->vm.mutex);
- ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- size, gtt_offset, obj->cache_level,
- 0);
- if (ret) {
- DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
- mutex_unlock(&ggtt->vm.mutex);
- goto err_pages;
- }
-
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
- GEM_BUG_ON(vma->pages);
- vma->pages = obj->mm.pages;
- atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
-
- set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
- __i915_vma_set_map_and_fenceable(vma);
-
- list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
- mutex_unlock(&ggtt->vm.mutex);
-
- GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
- atomic_inc(&obj->bind_count);
+ if (IS_ERR(obj))
+ goto err_stolen;
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
return obj;
-err_pages:
- i915_gem_object_unpin_pages(obj);
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(ret);
+err_stolen:
+ i915_gem_stolen_remove_node(i915, stolen);
+err_free:
+ kfree(stolen);
+ return obj;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index c1040627fbf3..e15c0adad8af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -28,7 +28,6 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
resource_size_t stolen_offset,
- resource_size_t gtt_offset,
resource_size_t size);
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 6c7825a2dc2a..37f77aee1212 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -6,7 +6,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 580319b7bf1a..7ffd7afeb7a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -10,8 +10,6 @@
#include <linux/swap.h>
#include <linux/sched/mm.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -704,7 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
- I915_GEM_OBJECT_NO_GGTT |
+ I915_GEM_OBJECT_NO_MMAP |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
@@ -770,6 +768,23 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
+ /*
+ * XXX: There is a prevalence of the assumption that we fit the
+ * object's page count inside a 32bit _signed_ variable. Let's document
+ * this and catch if we ever need to fix it. In the meantime, if you do
+ * spot such a local variable, please consider fixing!
+ *
+ * Aside from our own locals (for which we have no excuse!):
+ * - sg_table embeds unsigned int for num_pages
+ * - get_user_pages*() mixed ints with longs
+ */
+
+ if (args->user_size >> PAGE_SHIFT > INT_MAX)
+ return -E2BIG;
+
+ if (overflows_type(args->user_size, obj->base.size))
+ return -E2BIG;
+
if (!args->user_size)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 9311250d7d6f..2d0fd50c5312 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1208,107 +1208,6 @@ static int igt_write_huge(struct i915_gem_context *ctx,
return err;
}
-static int igt_ppgtt_exhaust_huge(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
- static unsigned int pages[ARRAY_SIZE(page_sizes)];
- struct drm_i915_gem_object *obj;
- unsigned int size_mask;
- unsigned int page_mask;
- int n, i;
- int err = -ENODEV;
-
- if (supported == I915_GTT_PAGE_SIZE_4K)
- return 0;
-
- /*
- * Sanity check creating objects with a varying mix of page sizes --
- * ensuring that our writes lands in the right place.
- */
-
- n = 0;
- for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
- pages[n++] = BIT(i);
-
- for (size_mask = 2; size_mask < BIT(n); size_mask++) {
- unsigned int size = 0;
-
- for (i = 0; i < n; i++) {
- if (size_mask & BIT(i))
- size |= pages[i];
- }
-
- /*
- * For our page mask we want to enumerate all the page-size
- * combinations which will fit into our chosen object size.
- */
- for (page_mask = 2; page_mask <= size_mask; page_mask++) {
- unsigned int page_sizes = 0;
-
- for (i = 0; i < n; i++) {
- if (page_mask & BIT(i))
- page_sizes |= pages[i];
- }
-
- /*
- * Ensure that we can actually fill the given object
- * with our chosen page mask.
- */
- if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
- continue;
-
- obj = huge_pages_object(i915, size, page_sizes);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_device;
- }
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- i915_gem_object_put(obj);
-
- if (err == -ENOMEM) {
- pr_info("unable to get pages, size=%u, pages=%u\n",
- size, page_sizes);
- err = 0;
- break;
- }
-
- pr_err("pin_pages failed, size=%u, pages=%u\n",
- size_mask, page_mask);
-
- goto out_device;
- }
-
- /* Force the page-size for the gtt insertion */
- obj->mm.page_sizes.sg = page_sizes;
-
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("exhaust write-huge failed with size=%u\n",
- size);
- goto out_unpin;
- }
-
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj);
- i915_gem_object_put(obj);
- }
- }
-
- goto out_device;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
- i915_gem_object_put(obj);
-out_device:
- mkwrite_device_info(i915)->page_sizes = supported;
-
- return err;
-}
-
typedef struct drm_i915_gem_object *
(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
@@ -1900,7 +1799,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_shrink_thp),
SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
- SUBTEST(igt_ppgtt_exhaust_huge),
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7fc46861a54d..54b86cf7f5d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1004,7 +1004,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
@@ -1465,9 +1465,12 @@ out_file:
static int check_scratch(struct i915_address_space *vm, u64 offset)
{
- struct drm_mm_node *node =
- __drm_mm_interval_first(&vm->mm,
- offset, offset + sizeof(u32) - 1);
+ struct drm_mm_node *node;
+
+ mutex_lock(&vm->mutex);
+ node = __drm_mm_interval_first(&vm->mm,
+ offset, offset + sizeof(u32) - 1);
+ mutex_unlock(&vm->mutex);
if (!node || node->start > offset)
return 0;
@@ -1492,6 +1495,10 @@ static int write_to_scratch(struct i915_gem_context *ctx,
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+ err = check_scratch(ctx_vm(ctx), offset);
+ if (err)
+ return err;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1528,10 +1535,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto out_vm;
- err = check_scratch(vm, offset);
- if (err)
- goto err_unpin;
-
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
@@ -1556,7 +1559,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1575,64 +1578,95 @@ static int read_from_scratch(struct i915_gem_context *ctx,
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
- const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
struct i915_vma *vma;
+ unsigned int flags;
u32 *cmd;
int err;
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+ err = check_scratch(ctx_vm(ctx), offset);
+ if (err)
+ return err;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto out;
- }
-
- memset(cmd, POISON_INUSE, PAGE_SIZE);
if (INTEL_GEN(i915) >= 8) {
+ const u32 GPR0 = engine->mmio_base + 0x600;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_vm;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto out_vm;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
- *cmd++ = RCS_GPR0;
+ *cmd++ = GPR0;
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
- *cmd++ = RCS_GPR0;
+ *cmd++ = GPR0;
*cmd++ = result;
*cmd++ = 0;
+ *cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ flags = 0;
} else {
+ const u32 reg = engine->mmio_base + 0x420;
+
+ /* hsw: register access even to 3DPRIM! is protected */
+ vm = i915_vm_get(&engine->gt->ggtt->vm);
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_vm;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_vm;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
*cmd++ = MI_LOAD_REGISTER_MEM;
- *cmd++ = RCS_GPR0;
+ *cmd++ = reg;
*cmd++ = offset;
- *cmd++ = MI_STORE_REGISTER_MEM;
- *cmd++ = RCS_GPR0;
- *cmd++ = result;
- }
- *cmd = MI_BATCH_BUFFER_END;
+ *cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ *cmd++ = reg;
+ *cmd++ = vma->node.start + result;
+ *cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(engine->gt);
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_vm;
+ flags = I915_DISPATCH_SECURE;
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
- if (err)
- goto out_vm;
-
- err = check_scratch(vm, offset);
- if (err)
- goto err_unpin;
+ intel_gt_chipset_flush(engine->gt);
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -1640,7 +1674,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
if (err)
goto err_request;
@@ -1674,7 +1708,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1686,6 +1720,39 @@ out:
return err;
}
+static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
+{
+ struct i915_address_space *vm;
+ struct page *page;
+ u32 *vaddr;
+ int err = 0;
+
+ vm = ctx_vm(ctx);
+ if (!vm)
+ return -ENODEV;
+
+ page = vm->scratch[0].base.page;
+ if (!page) {
+ pr_err("No scratch page!\n");
+ return -EINVAL;
+ }
+
+ vaddr = kmap(page);
+ if (!vaddr) {
+ pr_err("No (mappable) scratch page!\n");
+ return -EINVAL;
+ }
+
+ memcpy(out, vaddr, sizeof(*out));
+ if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
+ pr_err("Inconsistent initial state of scratch page!\n");
+ err = -EINVAL;
+ }
+ kunmap(page);
+
+ return err;
+}
+
static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -1696,6 +1763,7 @@ static int igt_vm_isolation(void *arg)
I915_RND_STATE(prng);
struct file *file;
u64 vm_total;
+ u32 expected;
int err;
if (INTEL_GEN(i915) < 7)
@@ -1730,9 +1798,17 @@ static int igt_vm_isolation(void *arg)
if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
goto out_file;
+ /* Read the initial state of the scratch page */
+ err = check_scratch_page(ctx_a, &expected);
+ if (err)
+ goto out_file;
+
+ err = check_scratch_page(ctx_b, &expected);
+ if (err)
+ goto out_file;
+
vm_total = ctx_vm(ctx_a)->total;
GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
- vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
num_engines = 0;
@@ -1743,14 +1819,18 @@ static int igt_vm_isolation(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
+ /* Not all engines have their own GPR! */
+ if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS)
+ continue;
+
while (!__igt_timeout(end_time, NULL)) {
u32 value = 0xc5c5c5c5;
u64 offset;
- div64_u64_rem(i915_prandom_u64_state(&prng),
- vm_total, &offset);
- offset = round_down(offset, alignof_dword);
- offset += I915_GTT_PAGE_SIZE;
+ /* Leave enough space at offset 0 for the batch */
+ offset = igt_random_offset(&prng,
+ I915_GTT_PAGE_SIZE, vm_total,
+ sizeof(u32), alignof_dword);
err = write_to_scratch(ctx_a, engine,
offset, 0xdeadbeef);
@@ -1760,7 +1840,7 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_file;
- if (value) {
+ if (value != expected) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
engine->name, value,
upper_32_bits(offset),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..31549ad83fa6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total, max;
int err;
ctx = thread->ctx;
@@ -225,27 +226,32 @@ static int igt_fill_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ max = ce->vm->total;
+ if (i915_is_ggtt(ce->vm) || thread->ctx)
+ max = div_u64(max, thread->n_cpus);
+ max >>= 4;
+
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- /*
- * If we have a tiny shared address space, like for the GGTT
- * then we can't be too greedy.
- */
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, max);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size + 1;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
+ phys_sz = min(phys_sz, sz);
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
@@ -276,13 +282,14 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -293,6 +300,8 @@ static int igt_fill_blt_thread(void *arg)
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
@@ -319,6 +328,7 @@ static int igt_copy_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total, max;
int err;
ctx = thread->ctx;
@@ -334,23 +344,32 @@ static int igt_copy_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ max = ce->vm->total;
+ if (i915_is_ggtt(ce->vm) || thread->ctx)
+ max = div_u64(max, thread->n_cpus);
+ max >>= 4;
+
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, max);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size + 1;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
+ phys_sz = min(phys_sz, sz);
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
@@ -397,13 +416,14 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -416,6 +436,8 @@ static int igt_copy_blt_thread(void *arg)
i915_gem_object_put(src);
i915_gem_object_put(dst);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 6718da20f35d..772d8cba7da9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -159,7 +159,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 384143aa7776..e7e3c620f542 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,9 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ spin_lock_init(&ctx->stale.lock);
+ INIT_LIST_HEAD(&ctx->stale.engines);
+
i915_gem_context_set_persistence(ctx);
mutex_init(&ctx->engines_mutex);
@@ -37,7 +40,7 @@ mock_context(struct drm_i915_private *i915,
if (name) {
struct i915_ppgtt *ppgtt;
- strncpy(ctx->name, name, sizeof(ctx->name));
+ strncpy(ctx->name, name, sizeof(ctx->name) - 1);
ppgtt = mock_ppgtt(i915, name);
if (!ppgtt)
@@ -83,6 +86,8 @@ live_context(struct drm_i915_private *i915, struct file *file)
if (IS_ERR(ctx))
return ctx;
+ i915_gem_context_set_no_error_capture(ctx);
+
err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
if (err < 0)
goto err_ctx;
@@ -105,6 +110,7 @@ kernel_context(struct drm_i915_private *i915)
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_persistence(ctx);
+ i915_gem_context_set_no_error_capture(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
new file mode 100644
index 000000000000..de595b66a746
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "intel_gpu_commands.h"
+
+#define MAX_URB_ENTRIES 64
+#define STATE_SIZE (4 * 1024)
+#define GT3_INLINE_DATA_DELAYS 0x1E00
+#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
+
+struct cb_kernel {
+ const void *data;
+ u32 size;
+};
+
+#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
+
+#include "ivb_clear_kernel.c"
+static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
+
+#include "hsw_clear_kernel.c"
+static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
+
+struct batch_chunk {
+ struct i915_vma *vma;
+ u32 offset;
+ u32 *start;
+ u32 *end;
+ u32 max_items;
+};
+
+struct batch_vals {
+ u32 max_primitives;
+ u32 max_urb_entries;
+ u32 cmd_size;
+ u32 state_size;
+ u32 state_start;
+ u32 batch_size;
+ u32 surface_height;
+ u32 surface_width;
+ u32 scratch_size;
+ u32 max_size;
+};
+
+static void
+batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
+{
+ if (IS_HASWELL(i915)) {
+ bv->max_primitives = 280;
+ bv->max_urb_entries = MAX_URB_ENTRIES;
+ bv->surface_height = 16 * 16;
+ bv->surface_width = 32 * 2 * 16;
+ } else {
+ bv->max_primitives = 128;
+ bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ bv->surface_height = 16 * 8;
+ bv->surface_width = 32 * 16;
+ }
+ bv->cmd_size = bv->max_primitives * 4096;
+ bv->state_size = STATE_SIZE;
+ bv->state_start = bv->cmd_size;
+ bv->batch_size = bv->cmd_size + bv->state_size;
+ bv->scratch_size = bv->surface_height * bv->surface_width;
+ bv->max_size = bv->batch_size + bv->scratch_size;
+}
+
+static void batch_init(struct batch_chunk *bc,
+ struct i915_vma *vma,
+ u32 *start, u32 offset, u32 max_bytes)
+{
+ bc->vma = vma;
+ bc->offset = offset;
+ bc->start = start + bc->offset / sizeof(*bc->start);
+ bc->end = bc->start;
+ bc->max_items = max_bytes / sizeof(*bc->start);
+}
+
+static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
+{
+ return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
+}
+
+static u32 batch_addr(const struct batch_chunk *bc)
+{
+ return bc->vma->node.start;
+}
+
+static void batch_add(struct batch_chunk *bc, const u32 d)
+{
+ GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
+ *bc->end++ = d;
+}
+
+static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
+{
+ u32 *map;
+
+ if (align) {
+ u32 *end = PTR_ALIGN(bc->end, align);
+
+ memset32(bc->end, 0, end - bc->end);
+ bc->end = end;
+ }
+
+ map = bc->end;
+ bc->end += items;
+
+ return map;
+}
+
+static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
+{
+ GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
+ return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
+}
+
+static u32
+gen7_fill_surface_state(struct batch_chunk *state,
+ const u32 dst_offset,
+ const struct batch_vals *bv)
+{
+ u32 surface_h = bv->surface_height;
+ u32 surface_w = bv->surface_width;
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+#define SURFACE_2D 1
+#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
+#define RENDER_CACHE_READ_WRITE 1
+
+ *cs++ = SURFACE_2D << 29 |
+ (SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
+ (RENDER_CACHE_READ_WRITE << 8);
+
+ *cs++ = batch_addr(state) + dst_offset;
+
+ *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
+ *cs++ = surface_w;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+#define SHADER_CHANNELS(r, g, b, a) \
+ (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
+ *cs++ = SHADER_CHANNELS(4, 5, 6, 7);
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_binding_table(struct batch_chunk *state,
+ const struct batch_vals *bv)
+{
+ u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = surface_start - state->offset;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_kernel_data(struct batch_chunk *state,
+ const u32 *data,
+ const u32 size)
+{
+ return batch_offset(state,
+ memcpy(batch_alloc_bytes(state, 64, size),
+ data, size));
+}
+
+static u32
+gen7_fill_interface_descriptor(struct batch_chunk *state,
+ const struct batch_vals *bv,
+ const struct cb_kernel *kernel,
+ unsigned int count)
+{
+ u32 kernel_offset =
+ gen7_fill_kernel_data(state, kernel->data, kernel->size);
+ u32 binding_table = gen7_fill_binding_table(state, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8 * count);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = kernel_offset;
+ *cs++ = (1 << 7) | (1 << 13);
+ *cs++ = 0;
+ *cs++ = (binding_table - state->offset) | 1;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* 1 - 63dummy idds */
+ memset32(cs, 0x00, (count - 1) * 8);
+ batch_advance(state, cs + (count - 1) * 8);
+
+ return offset;
+}
+
+static void
+gen7_emit_state_base_address(struct batch_chunk *batch,
+ u32 surface_state_base)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 12);
+
+ *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ /* general */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* surface */
+ *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+ /* dynamic */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* indirect */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* instruction */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+
+ /* general/dynamic/indirect/instruction access Bound */
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_vfe_state(struct batch_chunk *batch,
+ const struct batch_vals *bv,
+ u32 urb_size, u32 curbe_size,
+ u32 mode)
+{
+ u32 urb_entries = bv->max_urb_entries;
+ u32 threads = bv->max_primitives - 1;
+ u32 *cs = batch_alloc_items(batch, 32, 8);
+
+ *cs++ = MEDIA_VFE_STATE | (8 - 2);
+
+ /* scratch buffer */
+ *cs++ = 0;
+
+ /* number of threads & urb entries for GPGPU vs Media Mode */
+ *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+
+ *cs++ = 0;
+
+ /* urb entry size & curbe size in 256 bits unit */
+ *cs++ = urb_size << 16 | curbe_size;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
+ const u32 interface_descriptor,
+ unsigned int count)
+{
+ u32 *cs = batch_alloc_items(batch, 8, 4);
+
+ *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
+ *cs++ = 0;
+ *cs++ = count * 8 * sizeof(*cs);
+
+ /*
+ * interface descriptor address - it is relative to the dynamics base
+ * address
+ */
+ *cs++ = interface_descriptor;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_media_object(struct batch_chunk *batch,
+ unsigned int media_object_index)
+{
+ unsigned int x_offset = (media_object_index % 16) * 64;
+ unsigned int y_offset = (media_object_index / 16) * 16;
+ unsigned int inline_data_size;
+ unsigned int media_batch_size;
+ unsigned int i;
+ u32 *cs;
+
+ inline_data_size = 112 * 8;
+ media_batch_size = inline_data_size + 6;
+
+ cs = batch_alloc_items(batch, 8, media_batch_size);
+
+ *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+
+ /* interface descriptor offset */
+ *cs++ = 0;
+
+ /* without indirect data */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* inline */
+ *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = 0;
+ *cs++ = GT3_INLINE_DATA_DELAYS;
+ for (i = 3; i < inline_data_size; i++)
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 5);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void emit_batch(struct i915_vma * const vma,
+ u32 *start,
+ const struct batch_vals *bv)
+{
+ struct drm_i915_private *i915 = vma->vm->i915;
+ unsigned int desc_count = 64;
+ const u32 urb_size = 112;
+ struct batch_chunk cmds, state;
+ u32 interface_descriptor;
+ unsigned int i;
+
+ batch_init(&cmds, vma, start, 0, bv->cmd_size);
+ batch_init(&state, vma, start, bv->state_start, bv->state_size);
+
+ interface_descriptor =
+ gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+ gen7_emit_pipeline_flush(&cmds);
+ batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
+ batch_add(&cmds, MI_NOOP);
+ gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_flush(&cmds);
+
+ gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+
+ gen7_emit_interface_descriptor_load(&cmds,
+ interface_descriptor,
+ desc_count);
+
+ for (i = 0; i < bv->max_primitives; i++)
+ gen7_emit_media_object(&cmds, i);
+
+ batch_add(&cmds, MI_BATCH_BUFFER_END);
+}
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ struct batch_vals bv;
+ u32 *batch;
+
+ batch_get_defaults(engine->i915, &bv);
+ if (!vma)
+ return bv.max_size;
+
+ GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+
+ batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.h b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
new file mode 100644
index 000000000000..bb100748e2c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __GEN7_RENDERCLEAR_H__
+#define __GEN7_RENDERCLEAR_H__
+
+struct intel_engine_cs;
+struct i915_vma;
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma);
+
+#endif /* __GEN7_RENDERCLEAR_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 4d1de2d97d5c..94e746af8926 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -8,6 +8,7 @@
#include "gen8_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gtt.h"
@@ -25,6 +26,30 @@ static u64 gen8_pde_encode(const dma_addr_t addr,
return pde;
}
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *i915 = ppgtt->vm.i915;
@@ -706,6 +731,8 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
+ ppgtt->vm.pte_encode = gen8_pte_encode;
+
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
diff --git a/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
new file mode 100644
index 000000000000..b47f9d4a0848
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC
+ */
+
+static const u32 hsw_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x00000160,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffe0,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffc0,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 57e8a051ddc2..aea992e46c42 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -51,6 +51,11 @@ int intel_context_alloc_state(struct intel_context *ce)
return -EINTR;
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_is_banned(ce)) {
+ err = -EIO;
+ goto unlock;
+ }
+
err = ce->ops->alloc(ce);
if (unlikely(err))
goto unlock;
@@ -92,6 +97,8 @@ int __intel_context_do_pin(struct intel_context *ce)
{
int err;
+ GEM_BUG_ON(intel_context_is_closed(ce));
+
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
@@ -116,7 +123,8 @@ int __intel_context_do_pin(struct intel_context *ce)
if (unlikely(err))
goto err_active;
- CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+ CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
+ i915_ggtt_offset(ce->ring->vma),
ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
@@ -219,7 +227,9 @@ static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
- CE_TRACE(ce, "retire\n");
+ CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
+ intel_context_get_total_runtime_ns(ce),
+ intel_context_get_avg_runtime_ns(ce));
set_bit(CONTEXT_VALID_BIT, &ce->flags);
if (ce->state)
@@ -280,6 +290,8 @@ intel_context_init(struct intel_context *ce,
ce->sseu = engine->sseu;
ce->ring = __intel_context_ring_size(SZ_4K);
+ ewma_runtime_init(&ce->runtime.avg);
+
ce->vm = i915_vm_get(engine->gt->vm);
INIT_LIST_HEAD(&ce->signal_link);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 30bd248827d8..07be021882cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include "i915_active.h"
+#include "i915_drv.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
#include "intel_ring_types.h"
@@ -35,6 +36,9 @@ int intel_context_alloc_state(struct intel_context *ce);
void intel_context_free(struct intel_context *ce);
+int intel_context_reconfigure_sseu(struct intel_context *ce,
+ const struct intel_sseu sseu);
+
/**
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
* @ce - the context
@@ -169,6 +173,11 @@ static inline bool intel_context_is_barrier(const struct intel_context *ce)
return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
}
+static inline bool intel_context_is_closed(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+}
+
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
@@ -224,4 +233,20 @@ intel_context_clear_nopreempt(struct intel_context *ce)
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
+static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
+{
+ const u32 period =
+ RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+
+ return READ_ONCE(ce->runtime.total) * period;
+}
+
+static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+{
+ const u32 period =
+ RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+
+ return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c
new file mode 100644
index 000000000000..65dcd090245d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_active.h"
+#include "intel_context.h"
+#include "intel_context_param.h"
+#include "intel_ring.h"
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz)
+{
+ int err;
+
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ err = i915_active_wait(&ce->active);
+ if (err < 0)
+ goto unlock;
+
+ if (intel_context_is_pinned(ce)) {
+ err = -EBUSY; /* In active use, come back later! */
+ goto unlock;
+ }
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ struct intel_ring *ring;
+
+ /* Replace the existing ringbuffer */
+ ring = intel_engine_create_ring(ce->engine, sz);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto unlock;
+ }
+
+ intel_ring_put(ce->ring);
+ ce->ring = ring;
+
+ /* Context image will be updated on next pin */
+ } else {
+ ce->ring = __intel_context_ring_size(sz);
+ }
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return err;
+}
+
+long intel_context_get_ring_size(struct intel_context *ce)
+{
+ long sz = (unsigned long)READ_ONCE(ce->ring);
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ sz = ce->ring->size;
+ intel_context_unlock_pinned(ce);
+ }
+
+ return sz;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
new file mode 100644
index 000000000000..f053d8633fe2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_CONTEXT_PARAM_H
+#define INTEL_CONTEXT_PARAM_H
+
+struct intel_context;
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz);
+long intel_context_get_ring_size(struct intel_context *ce);
+
+#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
new file mode 100644
index 000000000000..57a30956c922
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_ring.h"
+#include "intel_sseu.h"
+
+static int gen8_emit_rpcs_config(struct i915_request *rq,
+ const struct intel_context *ce,
+ const struct intel_sseu sseu)
+{
+ u64 offset;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ offset = i915_ggtt_offset(ce->state) +
+ LRC_STATE_PN * PAGE_SIZE +
+ CTX_R_PWR_CLK_STATE * 4;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
+{
+ struct i915_request *rq;
+ int ret;
+
+ lockdep_assert_held(&ce->pin_mutex);
+
+ /*
+ * If the context is not idle, we have to submit an ordered request to
+ * modify its context image via the kernel context (writing to our own
+ * image, or into the registers directory, does not stick). Pristine
+ * and idle contexts will be configured on pinning.
+ */
+ if (!intel_context_pin_if_active(ce))
+ return 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ /* Serialise with the remote context */
+ ret = intel_context_prepare_remote_request(ce, rq);
+ if (ret == 0)
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
+
+ i915_request_add(rq);
+out_unpin:
+ intel_context_unpin(ce);
+ return ret;
+}
+
+int
+intel_context_reconfigure_sseu(struct intel_context *ce,
+ const struct intel_sseu sseu)
+{
+ int ret;
+
+ GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
+
+ ret = intel_context_lock_pinned(ce);
+ if (ret)
+ return ret;
+
+ /* Nothing to do if unmodified. */
+ if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
+ goto unlock;
+
+ ret = gen8_modify_rpcs(ce, sseu);
+ if (!ret)
+ ce->sseu = sseu;
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index ca1420fb8b53..07cb83a0d017 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -7,6 +7,7 @@
#ifndef __INTEL_CONTEXT_TYPES__
#define __INTEL_CONTEXT_TYPES__
+#include <linux/average.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -19,6 +20,8 @@
#define CONTEXT_REDZONE POISON_INUSE
+DECLARE_EWMA(runtime, 3, 8);
+
struct i915_gem_context;
struct i915_vma;
struct intel_context;
@@ -42,8 +45,8 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
@@ -59,15 +62,25 @@ struct intel_context {
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
#define CONTEXT_VALID_BIT 2
-#define CONTEXT_USE_SEMAPHORES 3
-#define CONTEXT_BANNED 4
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
-#define CONTEXT_NOPREEMPT 6
+#define CONTEXT_CLOSED_BIT 3
+#define CONTEXT_USE_SEMAPHORES 4
+#define CONTEXT_BANNED 5
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
+#define CONTEXT_NOPREEMPT 7
u32 *lrc_reg_state;
u64 lrc_desc;
u32 tag; /* cookie passed to HW to track this context on submission */
+ /* Time on GPU as tracked by the hw. */
+ struct {
+ struct ewma_runtime avg;
+ u64 total;
+ u32 last;
+ I915_SELFTEST_DECLARE(u32 num_underflow);
+ I915_SELFTEST_DECLARE(u32 max_underflow);
+ } runtime;
+
unsigned int active_count; /* protected by timeline->mutex */
atomic_t pin_count;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 5df003061e44..b469de0dd9b6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -107,7 +107,20 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
static inline struct i915_request *
execlists_active(const struct intel_engine_execlists *execlists)
{
- return *READ_ONCE(execlists->active);
+ struct i915_request * const *cur, * const *old, *active;
+
+ cur = READ_ONCE(execlists->active);
+ smp_rmb(); /* pairs with overwrite protection in process_csb() */
+ do {
+ old = cur;
+
+ active = READ_ONCE(*cur);
+ cur = READ_ONCE(execlists->active);
+
+ smp_rmb(); /* and complete the seqlock retry */
+ } while (unlikely(cur != old));
+
+ return active;
}
static inline void
@@ -192,6 +205,8 @@ void intel_engines_free(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
+int intel_engine_resume(struct intel_engine_cs *engine);
+
int intel_ring_submission_setup(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
@@ -303,26 +318,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine);
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-
-static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
-{
- if (!execlists->preempt_hang.inject_hang)
- return false;
-
- complete(&execlists->preempt_hang.completion);
- return true;
-}
-
-#else
-
-static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
-{
- return false;
-}
-
-#endif
-
void intel_engine_init_active(struct intel_engine_cs *engine,
unsigned int subclass);
#define ENGINE_PHYSICAL 0
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 06ff7695fa29..3aa8a652c16d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -35,6 +35,7 @@
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
+#include "intel_gt_pm.h"
#include "intel_lrc.h"
#include "intel_reset.h"
#include "intel_ring.h"
@@ -199,10 +200,10 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
* out in the wash.
*/
cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
- DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
- INTEL_GEN(gt->i915),
- cxt_size * 64,
- cxt_size - 1);
+ drm_dbg(&gt->i915->drm,
+ "gen%d CXT_SIZE = %d bytes [0x%08x]\n",
+ INTEL_GEN(gt->i915), cxt_size * 64,
+ cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
case 2:
@@ -274,6 +275,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
@@ -300,11 +302,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
- engine->i915 = gt->i915;
+ engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+ engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
@@ -312,6 +314,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.heartbeat_interval_ms =
CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.max_busywait_duration_ns =
+ CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
engine->props.preempt_timeout_ms =
CONFIG_DRM_I915_PREEMPT_TIMEOUT;
engine->props.stop_timeout_ms =
@@ -319,11 +323,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
+ /* Override to uninterruptible for OpenCL workloads. */
+ if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
+ engine->props.preempt_timeout_ms = 0;
+
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
- DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
+ DRIVER_CAPS(i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -339,7 +347,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- gt->i915->engine[id] = engine;
+ i915->engine[id] = engine;
return 0;
}
@@ -392,8 +400,24 @@ void intel_engines_release(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /*
+ * Before we release the resources held by engine, we must be certain
+ * that the HW is no longer accessing them -- having the GPU scribble
+ * to or read from a page being used for something else causes no end
+ * of fun.
+ *
+ * The GPU should be reset by this point, but assume the worst just
+ * in case we aborted before completely initialising the engines.
+ */
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(gt, ALL_ENGINES);
+
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
+ intel_wakeref_wait_for_idle(&engine->wakeref);
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+
if (!engine->release)
continue;
@@ -432,9 +456,9 @@ int intel_engines_init_mmio(struct intel_gt *gt)
unsigned int i;
int err;
- WARN_ON(engine_mask == 0);
- WARN_ON(engine_mask &
- GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+ drm_WARN_ON(&i915->drm, engine_mask == 0);
+ drm_WARN_ON(&i915->drm, engine_mask &
+ GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_probe_failure(i915))
return -ENODEV;
@@ -455,7 +479,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != engine_mask))
+ if (drm_WARN_ON(&i915->drm, mask != engine_mask))
device_info->engine_mask = mask;
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
@@ -510,7 +534,6 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
{
unsigned int flags;
- flags = PIN_GLOBAL;
if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
@@ -523,11 +546,11 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
* above the mappable region (even though we never
* actually map it).
*/
- flags |= PIN_MAPPABLE;
+ flags = PIN_MAPPABLE;
else
- flags |= PIN_HIGH;
+ flags = PIN_HIGH;
- return i915_vma_pin(vma, 0, 0, flags);
+ return i915_ggtt_pin(vma, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -546,7 +569,8 @@ static int init_status_page(struct intel_engine_cs *engine)
*/
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate status page\n");
+ drm_err(&engine->i915->drm,
+ "Failed to allocate status page\n");
return PTR_ERR(obj);
}
@@ -614,15 +638,15 @@ static int engine_setup_common(struct intel_engine_cs *engine)
struct measure_breadcrumb {
struct i915_request rq;
- struct intel_timeline timeline;
struct intel_ring ring;
u32 cs[1024];
};
-static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
+static int measure_breadcrumb_dw(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
struct measure_breadcrumb *frame;
- int dw = -ENOMEM;
+ int dw;
GEM_BUG_ON(!engine->gt->scratch);
@@ -630,39 +654,27 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
if (!frame)
return -ENOMEM;
- if (intel_timeline_init(&frame->timeline,
- engine->gt,
- engine->status_page.vma))
- goto out_frame;
-
- mutex_lock(&frame->timeline.mutex);
+ frame->rq.i915 = engine->i915;
+ frame->rq.engine = engine;
+ frame->rq.context = ce;
+ rcu_assign_pointer(frame->rq.timeline, ce->timeline);
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
intel_ring_update_space(&frame->ring);
-
- frame->rq.i915 = engine->i915;
- frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
- rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
-
- dw = intel_timeline_pin(&frame->timeline);
- if (dw < 0)
- goto out_timeline;
+ mutex_lock(&ce->timeline->mutex);
spin_lock_irq(&engine->active.lock);
+
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+
spin_unlock_irq(&engine->active.lock);
+ mutex_unlock(&ce->timeline->mutex);
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
- intel_timeline_unpin(&frame->timeline);
-
-out_timeline:
- mutex_unlock(&frame->timeline.mutex);
- intel_timeline_fini(&frame->timeline);
-out_frame:
kfree(frame);
return dw;
}
@@ -737,12 +749,6 @@ static int engine_init_common(struct intel_engine_cs *engine)
engine->set_default_submission(engine);
- ret = measure_breadcrumb_dw(engine);
- if (ret < 0)
- return ret;
-
- engine->emit_fini_breadcrumb_dw = ret;
-
/*
* We may need to do things with the shrinker which
* require us to immediately switch back to the default
@@ -755,9 +761,18 @@ static int engine_init_common(struct intel_engine_cs *engine)
if (IS_ERR(ce))
return PTR_ERR(ce);
+ ret = measure_breadcrumb_dw(ce);
+ if (ret < 0)
+ goto err_context;
+
+ engine->emit_fini_breadcrumb_dw = ret;
engine->kernel_context = ce;
return 0;
+
+err_context:
+ intel_context_put(ce);
+ return ret;
}
int intel_engines_init(struct intel_gt *gt)
@@ -824,6 +839,20 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_wa_list_free(&engine->whitelist);
}
+/**
+ * intel_engine_resume - re-initializes the HW state of the engine
+ * @engine: Engine to resume.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+int intel_engine_resume(struct intel_engine_cs *engine)
+{
+ intel_engine_apply_workarounds(engine);
+ intel_engine_apply_whitelist(engine);
+
+ return engine->resume(engine);
+}
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -982,6 +1011,12 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+ if (INTEL_GEN(i915) >= 12) {
+ instdone->slice_common_extra[0] =
+ intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
+ instdone->slice_common_extra[1] =
+ intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
+ }
for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(engine, slice, subslice,
@@ -1276,8 +1311,14 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
if (INTEL_GEN(dev_priv) >= 6) {
- drm_printf(m, "\tRING_IMR: %08x\n",
+ drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
+ drm_printf(m, "\tRING_ESR: 0x%08x\n",
+ ENGINE_READ(engine, RING_ESR));
+ drm_printf(m, "\tRING_EMR: 0x%08x\n",
+ ENGINE_READ(engine, RING_EMR));
+ drm_printf(m, "\tRING_EIR: 0x%08x\n",
+ ENGINE_READ(engine, RING_EIR));
}
addr = intel_engine_get_active_head(engine);
@@ -1342,25 +1383,27 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
execlists_active_lock_bh(execlists);
rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
- char hdr[80];
+ char hdr[160];
int len;
- len = snprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ",
- (int)(port - execlists->active));
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d]: ",
+ (int)(port - execlists->active));
if (!i915_request_signaled(rq)) {
struct intel_timeline *tl = get_timeline(rq);
- len += snprintf(hdr + len, sizeof(hdr) - len,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq));
+ len += scnprintf(hdr + len, sizeof(hdr) - len,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
if (tl)
intel_timeline_put(tl);
}
- snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
@@ -1657,6 +1700,23 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* we only care about the snapshot of this moment.
*/
lockdep_assert_held(&engine->active.lock);
+
+ rcu_read_lock();
+ request = execlists_active(&engine->execlists);
+ if (request) {
+ struct intel_timeline *tl = request->context->timeline;
+
+ list_for_each_entry_from_reverse(request, &tl->requests, link) {
+ if (i915_request_completed(request))
+ break;
+
+ active = request;
+ }
+ }
+ rcu_read_unlock();
+ if (active)
+ return active;
+
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 6c6fd185457c..dd825718e4e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -180,7 +180,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- int err = 0;
+ int err;
if (!intel_engine_has_preemption(engine))
return -ENODEV;
@@ -188,8 +188,10 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine))
return 0;
- if (mutex_lock_interruptible(&ce->timeline->mutex))
+ if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = -EINTR;
goto out_rpm;
+ }
intel_context_enter(ce);
rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
@@ -204,6 +206,8 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
__i915_request_commit(rq);
__i915_request_queue(rq, &attr);
+ GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+ err = 0;
out_unlock:
mutex_unlock(&ce->timeline->mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ea90ab3e396e..b6cf284e3a2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -112,7 +112,7 @@ __queue_and_release_pm(struct i915_request *rq,
{
struct intel_gt_timelines *timelines = &engine->gt->timelines;
- ENGINE_TRACE(engine, "\n");
+ ENGINE_TRACE(engine, "parking\n");
/*
* We have to serialise all potential retirement paths with our
@@ -249,7 +249,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (!switch_to_kernel_context(engine))
return -EBUSY;
- ENGINE_TRACE(engine, "\n");
+ ENGINE_TRACE(engine, "parked\n");
call_idle_barriers(engine); /* cleanup after wedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 92be41a6903c..80cdde712842 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -75,6 +75,7 @@ struct intel_instdone {
u32 instdone;
/* The following exist only in the RCS engine */
u32 slice_common;
+ u32 slice_common_extra[2];
u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
@@ -126,7 +127,6 @@ DECLARE_EWMA(_engine_latency, 6, 4)
struct st_preempt_hang {
struct completion completion;
unsigned int count;
- bool inject_hang;
};
/**
@@ -157,6 +157,16 @@ struct intel_engine_execlists {
struct i915_priolist default_priolist;
/**
+ * @error_interrupt: CS Master EIR
+ *
+ * The CS generates an interrupt when it detects an error. We capture
+ * the first error interrupt, record the EIR and schedule the tasklet.
+ * In the tasklet, we process the pending CS events to ensure we have
+ * the guilty request, and then reset the engine.
+ */
+ u32 error_interrupt;
+
+ /**
* @no_priolist: priority lists disabled
*/
bool no_priolist;
@@ -537,6 +547,7 @@ struct intel_engine_cs {
struct {
unsigned long heartbeat_interval_ms;
+ unsigned long max_busywait_duration_ns;
unsigned long preempt_timeout_ms;
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 9e7f12bef828..848decee9066 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -278,7 +278,8 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
}
}
- if (WARN(errors, "Invalid UABI engine mapping found"))
+ if (drm_WARN(&i915->drm, errors,
+ "Invalid UABI engine mapping found"))
i915->uabi_engines = RB_ROOT;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 531d501be01f..aed498a0d032 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -8,6 +8,8 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
+#include <drm/i915_drm.h>
+
#include "intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -104,27 +106,17 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
}
-static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- /*
- * Don't bother messing with faults pre GEN6 as we have little
- * documentation supporting that it's a good idea.
- */
- if (INTEL_GEN(i915) < 6)
- return;
+ struct i915_vma *vma;
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+ i915_vma_wait_for_bind(vma);
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
ggtt->invalidate(ggtt);
-}
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
-{
- ggtt_suspend_mappings(&i915->ggtt);
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -167,6 +159,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_gtt_chipset_flush();
}
+static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ return addr | _PAGE_PRESENT;
+}
+
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -182,7 +181,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
ggtt->invalidate(ggtt);
}
@@ -195,7 +194,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
dma_addr_t addr;
/*
@@ -350,31 +349,6 @@ static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
-struct clear_range {
- struct i915_address_space *vm;
- u64 start;
- u64 length;
-};
-
-static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
-{
- struct clear_range *arg = _arg;
-
- gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
- u64 start,
- u64 length)
-{
- struct clear_range arg = { vm, start, length };
-
- stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
-}
-
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@@ -462,7 +436,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
u64 size;
int ret;
- if (!USES_GUC(ggtt->vm.i915))
+ if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
@@ -472,7 +446,8 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
PIN_NOEVICT);
if (ret)
- DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Failed to reserve top of GGTT for GuC\n");
return ret;
}
@@ -544,8 +519,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg_kms(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
@@ -879,8 +855,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->vm.clear_range != nop_clear_range)
- ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ ggtt->vm.bind_async_flags =
+ I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
ggtt->invalidate = gen8_ggtt_invalidate;
@@ -890,7 +866,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
- ggtt->vm.pte_encode = gen8_pte_encode;
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
setup_private_pat(ggtt->vm.gt->uncore);
@@ -1180,7 +1156,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
-static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
struct i915_vma *vma;
bool flush = false;
@@ -1188,8 +1164,6 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
intel_gt_check_and_clear_faults(ggtt->vm.gt);
- mutex_lock(&ggtt->vm.mutex);
-
/* First fill our portion of the GTT with scratch pages */
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
@@ -1216,19 +1190,10 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
atomic_set(&ggtt->vm.open, open);
ggtt->invalidate(ggtt);
- mutex_unlock(&ggtt->vm.mutex);
-
if (flush)
wbinvd_on_all_cpus();
-}
-
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
-{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
- ggtt_restore_mappings(ggtt);
- if (INTEL_GEN(i915) >= 8)
+ if (INTEL_GEN(ggtt->vm.i915) >= 8)
setup_private_pat(ggtt->vm.gt->uncore);
}
@@ -1267,6 +1232,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
unsigned int size = intel_rotation_info_size(rot_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
@@ -1296,8 +1262,9 @@ err_sg_alloc:
kfree(st);
err_st_alloc:
- DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+ drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width,
+ rot_info->plane[0].height, size);
return ERR_PTR(ret);
}
@@ -1349,6 +1316,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
struct drm_i915_gem_object *obj)
{
unsigned int size = intel_remapped_info_size(rem_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
@@ -1380,8 +1348,9 @@ err_sg_alloc:
kfree(st);
err_st_alloc:
- DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+ drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width,
+ rem_info->plane[0].height, size);
return ERR_PTR(ret);
}
@@ -1479,8 +1448,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
if (IS_ERR(vma->pages)) {
ret = PTR_ERR(vma->pages);
vma->pages = NULL;
- DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
+ drm_err(&vma->vm->i915->drm,
+ "Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 51b8718513bc..f04214a54f75 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -292,10 +292,21 @@
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
-#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
-#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
-#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define STATE_BASE_ADDRESS \
+ ((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
+#define BASE_ADDRESS_MODIFY REG_BIT(0)
+#define PIPELINE_SELECT \
+ ((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16))
+#define PIPELINE_SELECT_MEDIA REG_BIT(0)
+#define GFX_OP_3DSTATE_VF_STATISTICS \
+ ((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16))
+#define MEDIA_VFE_STATE \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16))
#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define MEDIA_INTERFACE_DESCRIPTOR_LOAD \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16))
+#define MEDIA_OBJECT \
+ ((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16))
#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index da2b6e2ae692..d09f7596cb98 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -198,16 +198,16 @@ static void gen6_check_faults(struct intel_gt *gt)
for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ?
- "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ drm_dbg(&engine->i915->drm, "Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
}
@@ -239,18 +239,17 @@ static void gen8_check_faults(struct intel_gt *gt)
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr),
- lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
@@ -345,7 +344,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
goto err_unref;
}
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ ret = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (ret)
goto err_unref;
@@ -455,6 +454,11 @@ err_rq:
if (!rq)
continue;
+ if (rq->fence.error) {
+ err = -EIO;
+ goto out;
+ }
+
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
state = rq->context->state;
if (!state)
@@ -538,6 +542,10 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
err = -EIO;
}
+ /* Flush and restore the kernel context for safety */
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
+ err = -EIO;
+
return err;
}
@@ -584,7 +592,9 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_engines;
- intel_uc_init(&gt->uc);
+ err = intel_uc_init(&gt->uc);
+ if (err)
+ goto err_engines;
err = intel_gt_resume(gt);
if (err)
@@ -634,6 +644,13 @@ void intel_gt_driver_remove(struct intel_gt *gt)
void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_rps_driver_unregister(&gt->rps);
+
+ /*
+ * Upon unregistering the device to prevent any new users, cancel
+ * all in-flight requests so that we can quickly unbind the active
+ * resources.
+ */
+ intel_gt_set_wedged(gt);
}
void intel_gt_driver_release(struct intel_gt *gt)
@@ -650,6 +667,9 @@ void intel_gt_driver_release(struct intel_gt *gt)
void intel_gt_driver_late_release(struct intel_gt *gt)
{
+ /* We need to wait for inflight RCU frees to release their grip */
+ rcu_barrier();
+
intel_uc_driver_late_release(&gt->uc);
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 1dac441cb8f4..4fac043750aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -14,7 +14,7 @@ struct drm_i915_private;
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
- GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
+ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
##__VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f796bdf1ed30..f0e7fd95165a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -24,6 +24,21 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
bool tasklet = false;
+ if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+ u32 eir;
+
+ eir = ENGINE_READ(engine, RING_EIR);
+ ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+ /* Disable the error interrupt until after the reset */
+ if (likely(eir)) {
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, eir);
+ WRITE_ONCE(engine->execlists.error_interrupt, eir);
+ tasklet = true;
+ }
+ }
+
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true;
@@ -210,7 +225,10 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
- const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+ const u32 irqs =
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT;
struct intel_uncore *uncore = gt->uncore;
const u32 dmask = irqs << 16 | irqs;
const u32 smask = irqs << 16;
@@ -279,66 +297,56 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ GT_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(gt->i915))
gen7_parity_error_irq_handler(gt, gt_iir);
}
-void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
{
void __iomem * const regs = gt->uncore->regs;
+ u32 iir;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
- if (likely(gt_iir[0]))
- raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
- if (likely(gt_iir[1]))
- raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
- if (likely(gt_iir[2]))
- raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
- if (likely(gt_iir[3]))
- raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
- }
-}
-
-void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
-{
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
- gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
- gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
+ iir >> GEN8_RCS_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
+ iir >> GEN8_BCS_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(0), iir);
+ }
}
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
- gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
- gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ iir >> GEN8_VCS0_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ iir >> GEN8_VCS1_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(1), iir);
+ }
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
- gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ iir >> GEN8_VECS_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(3), iir);
+ }
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
- guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(iir)) {
+ gen6_rps_irq_handler(&gt->rps, iir);
+ guc_irq_handler(&gt->uc.guc, iir >> 16);
+ raw_reg_write(regs, GEN8_GT_IIR(2), iir);
+ }
}
}
@@ -354,25 +362,18 @@ void gen8_gt_irq_reset(struct intel_gt *gt)
void gen8_gt_irq_postinstall(struct intel_gt *gt)
{
- struct intel_uncore *uncore = gt->uncore;
-
/* These are interrupts we'll toggle with the ring mask register */
- u32 gt_interrupts[] = {
- (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
+ const u32 irqs =
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT;
+ const u32 gt_interrupts[] = {
+ irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
+ irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
0,
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ irqs << GEN8_VECS_IRQ_SHIFT,
};
+ struct intel_uncore *uncore = gt->uncore;
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
index 8f37593712c9..886c5cf408a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -36,9 +36,8 @@ void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
-void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl);
void gen8_gt_irq_reset(struct intel_gt *gt);
-void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
void gen8_gt_irq_postinstall(struct intel_gt *gt);
#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index d1c2f034296a..8b653c0f5e5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -216,7 +216,7 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
- err = engine->resume(engine);
+ err = intel_engine_resume(engine);
intel_engine_pm_put(engine);
if (err) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 16acdc5d6734..2a72cce63fd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -171,7 +171,9 @@ void __i915_vm_close(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
- mutex_lock(&vm->mutex);
+ if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
+ return;
+
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -186,6 +188,7 @@ void __i915_vm_close(struct i915_address_space *vm)
i915_gem_object_put(obj);
}
GEM_BUG_ON(!list_empty(&vm->bound_list));
+
mutex_unlock(&vm->mutex);
}
@@ -299,6 +302,25 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
}
+static void poison_scratch_page(struct page *page, unsigned long size)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ do {
+ void *vaddr;
+
+ vaddr = kmap(page);
+ memset(vaddr, POISON_FREE, PAGE_SIZE);
+ kunmap(page);
+
+ page = pfn_to_page(page_to_pfn(page) + 1);
+ size -= PAGE_SIZE;
+ } while (size);
+}
+
int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
unsigned long size;
@@ -331,6 +353,17 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
goto skip;
+ /*
+ * Use a non-zero scratch page for debugging.
+ *
+ * We want a value that should be reasonably obvious
+ * to spot in the error state, while also causing a GPU hang
+ * if executed. We prefer using a clear page in production, so
+ * should it ever be accidentally used, the effect should be
+ * fairly benign.
+ */
+ poison_scratch_page(page, size);
+
addr = dma_map_page_attrs(vm->dma,
page, 0, size,
PCI_DMA_BIDIRECTIONAL,
@@ -448,36 +481,12 @@ void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
HSW_GTT_CACHE_EN,
can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
- WARN_ON_ONCE(can_use_gtt_cache &&
- intel_uncore_read(uncore,
- HSW_GTT_CACHE_EN) == 0);
+ drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
}
}
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
-
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC;
- break;
- default:
- pte |= PPAT_CACHED;
- break;
- }
-
- return pte;
-}
-
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 7da7681c20b1..b3116fe8d180 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -429,8 +429,7 @@ static inline void
i915_vm_close(struct i915_address_space *vm)
{
GEM_BUG_ON(!atomic_read(&vm->open));
- if (atomic_dec_and_test(&vm->open))
- __i915_vm_close(vm);
+ __i915_vm_close(vm);
i915_vm_put(vm);
}
@@ -512,12 +511,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
-
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags);
+void i915_ggtt_suspend(struct i915_ggtt *gtt);
+void i915_ggtt_resume(struct i915_ggtt *ggtt);
int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index ceb785b75c25..e3f637b3650e 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -50,6 +50,9 @@ static bool get_ia_constants(struct intel_llc *llc,
struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
struct intel_rps *rps = &llc_to_gt(llc)->rps;
+ if (!HAS_LLC(i915) || IS_DGFX(i915))
+ return false;
+
if (rps->max_freq <= rps->min_freq)
return false;
@@ -147,8 +150,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
void intel_llc_enable(struct intel_llc *llc)
{
- if (HAS_LLC(llc_to_gt(llc)->i915))
- gen6_update_ring_freq(llc);
+ gen6_update_ring_freq(llc);
}
void intel_llc_disable(struct intel_llc *llc)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 31455eceeb0c..683014e7bc51 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -176,8 +176,6 @@
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-#define WA_TAIL_DWORDS 2
-#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
struct virtual_engine {
struct intel_engine_cs base;
@@ -247,7 +245,7 @@ static void mark_eio(struct i915_request *rq)
GEM_BUG_ON(i915_request_signaled(rq));
- dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -295,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority;
+ return READ_ONCE(rq->sched.attr.priority);
}
static int effective_prio(const struct i915_request *rq)
@@ -1006,7 +1004,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
i915_request_cancel_breadcrumb(rq);
spin_unlock(&rq->lock);
}
- rq->engine = owner;
+ WRITE_ONCE(rq->engine, owner);
owner->submit_request(rq);
active = NULL;
}
@@ -1197,6 +1195,48 @@ static void reset_active(struct i915_request *rq,
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
+static u32 intel_context_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
+{
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ ce->runtime.num_underflow += dt < 0;
+ ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
+}
+
+static void intel_context_update_runtime(struct intel_context *ce)
+{
+ u32 old;
+ s32 dt;
+
+ if (intel_context_is_barrier(ce))
+ return;
+
+ old = ce->runtime.last;
+ ce->runtime.last = intel_context_get_runtime(ce);
+ dt = ce->runtime.last - old;
+
+ if (unlikely(dt <= 0)) {
+ CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+ old, ce->runtime.last, dt);
+ st_update_runtime_underflow(ce, dt);
+ return;
+ }
+
+ ewma_runtime_add(&ce->runtime.avg, dt);
+ ce->runtime.total += dt;
+}
+
static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -1211,12 +1251,12 @@ __execlists_schedule_in(struct i915_request *rq)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
execlists_check_context(ce, engine);
+ ce->lrc_desc &= ~GENMASK_ULL(47, 37);
if (ce->tag) {
/* Use a fixed tag for OA and friends */
ce->lrc_desc |= (u64)ce->tag << 32;
} else {
/* We don't need a strict matching tag, just different values */
- ce->lrc_desc &= ~GENMASK_ULL(47, 37);
ce->lrc_desc |=
(u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
GEN11_SW_CTX_ID_SHIFT;
@@ -1276,10 +1316,11 @@ __execlists_schedule_out(struct i915_request *rq,
* If we have just completed this context, the engine may now be
* idle and we want to re-enter powersaving.
*/
- if (list_is_last(&rq->link, &ce->timeline->requests) &&
+ if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
i915_request_completed(rq))
intel_engine_add_retire(engine, ce->timeline);
+ intel_context_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
intel_gt_pm_put_async(engine->gt);
@@ -1395,15 +1436,26 @@ trace_ports(const struct intel_engine_execlists *execlists,
ports[1] ? ports[1]->fence.seqno : 0);
}
+static inline bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
static __maybe_unused bool
assert_pending_valid(const struct intel_engine_execlists *execlists,
const char *msg)
{
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
+ bool sentinel = false;
trace_ports(execlists, msg, execlists->pending);
+ /* We may be messing around with the lists during reset, lalala */
+ if (reset_in_progress(execlists))
+ return true;
+
if (!execlists->pending[0]) {
GEM_TRACE_ERR("Nothing pending for promotion!\n");
return false;
@@ -1430,6 +1482,26 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
ce = rq->context;
+ /*
+ * Sentinels are supposed to be lonely so they flush the
+ * current exection off the HW. Check that they are the
+ * only request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ sentinel = i915_request_has_sentinel(rq);
+ if (sentinel && port != execlists->pending) {
+ GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
continue;
@@ -1525,6 +1597,11 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
@@ -1542,7 +1619,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
- if (unlikely((prev->fence.flags ^ next->fence.flags) &
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
(BIT(I915_FENCE_FLAG_NOPREEMPT) |
BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
@@ -1550,6 +1627,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (!can_merge_ctx(prev->context, next->context))
return false;
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
return true;
}
@@ -1585,7 +1663,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
}
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
- struct intel_engine_cs *engine)
+ struct i915_request *rq)
{
struct intel_engine_cs *old = ve->siblings[0];
@@ -1593,9 +1671,19 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_lock(&old->breadcrumbs.irq_lock);
if (!list_empty(&ve->context.signal_link)) {
- list_move_tail(&ve->context.signal_link,
- &engine->breadcrumbs.signalers);
- intel_engine_signal_breadcrumbs(engine);
+ list_del_init(&ve->context.signal_link);
+
+ /*
+ * We cannot acquire the new engine->breadcrumbs.irq_lock
+ * (as we are holding a breadcrumbs.irq_lock already),
+ * so attach this request to the signaler on submission.
+ * The queued irq_work will occur when we finally drop
+ * the engine->active.lock after dequeue.
+ */
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
+
+ /* Also transfer the pending irq_work for the old breadcrumb. */
+ intel_engine_signal_breadcrumbs(rq->engine);
}
spin_unlock(&old->breadcrumbs.irq_lock);
}
@@ -1605,6 +1693,11 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
&(rq__)->sched.waiters_list, \
wait_link)
+#define for_each_signaler(p__, rq__) \
+ list_for_each_entry_rcu(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
+
static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
LIST_HEAD(list);
@@ -1693,12 +1786,13 @@ timeslice(const struct intel_engine_cs *engine)
static unsigned long
active_timeslice(const struct intel_engine_cs *engine)
{
- const struct i915_request *rq = *engine->execlists.active;
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ const struct i915_request *rq = *execlists->active;
if (!rq || i915_request_completed(rq))
return 0;
- if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
return 0;
return timeslice(engine);
@@ -1715,8 +1809,11 @@ static void set_timeslice(struct intel_engine_cs *engine)
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
+ int prio = queue_prio(execlists);
- execlists->switch_priority_hint = execlists->queue_priority_hint;
+ WRITE_ONCE(execlists->switch_priority_hint, prio);
+ if (prio == INT_MIN)
+ return;
if (timer_pending(&execlists->timer))
return;
@@ -1938,13 +2035,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
"",
yesno(engine != ve->siblings[0]));
- ve->request = NULL;
- ve->base.execlists.queue_priority_hint = INT_MIN;
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint,
+ INT_MIN);
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- rq->engine = engine;
+ WRITE_ONCE(rq->engine, engine);
if (engine != ve->siblings[0]) {
u32 *regs = ve->context.lrc_reg_state;
@@ -1957,7 +2055,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
engine);
if (!list_empty(&ve->context.signals))
- virtual_xfer_breadcrumbs(ve, engine);
+ virtual_xfer_breadcrumbs(ve, rq);
/*
* Move the bound engine to the top of the list
@@ -2064,6 +2162,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(last &&
!can_merge_ctx(last->context,
rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
submit = true;
last = rq;
@@ -2134,6 +2235,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
execlists_schedule_out(*port);
clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+ smp_wmb(); /* complete the seqlock for execlists_active() */
WRITE_ONCE(execlists->active, execlists->inflight);
}
@@ -2144,12 +2246,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
clflush((void *)last);
}
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
/*
* Starting with Gen12, the status has a new format:
*
@@ -2240,7 +2336,6 @@ static void process_csb(struct intel_engine_cs *engine)
*/
head = execlists->csb_head;
tail = READ_ONCE(*execlists->csb_write);
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
if (unlikely(head == tail))
return;
@@ -2254,6 +2349,7 @@ static void process_csb(struct intel_engine_cs *engine)
*/
rmb();
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
do {
bool promote;
@@ -2288,11 +2384,13 @@ static void process_csb(struct intel_engine_cs *engine)
if (promote) {
struct i915_request * const *old = execlists->active;
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+
+ ring_set_paused(engine, 0);
+
/* Point active to the new ELSP; prevent overwriting */
WRITE_ONCE(execlists->active, execlists->pending);
-
- if (!inject_preempt_hang(execlists))
- ring_set_paused(engine, 0);
+ smp_wmb(); /* notify execlists_active() */
/* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", old);
@@ -2300,12 +2398,12 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_schedule_out(*old++);
/* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- WRITE_ONCE(execlists->active,
- memcpy(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists) *
- sizeof(*execlists->pending)));
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
WRITE_ONCE(execlists->pending[0], NULL);
} else {
@@ -2320,8 +2418,37 @@ static void process_csb(struct intel_engine_cs *engine)
* coherent (visible from the CPU) before the
* user interrupt and CSB is processed.
*/
- GEM_BUG_ON(!i915_request_completed(*execlists->active) &&
- !reset_in_progress(execlists));
+ if (GEM_SHOW_DEBUG() &&
+ !i915_request_completed(*execlists->active) &&
+ !reset_in_progress(execlists)) {
+ struct i915_request *rq __maybe_unused =
+ *execlists->active;
+ const u32 *regs __maybe_unused =
+ rq->context->lrc_reg_state;
+
+ ENGINE_TRACE(engine,
+ "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+ ENGINE_READ(engine, RING_START),
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_MI_MODE));
+ ENGINE_TRACE(engine,
+ "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->head, rq->tail,
+ rq->fence.context,
+ lower_32_bits(rq->fence.seqno),
+ hwsp_seqno(rq));
+ ENGINE_TRACE(engine,
+ "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+ regs[CTX_RING_START],
+ regs[CTX_RING_HEAD],
+ regs[CTX_RING_TAIL]);
+
+ GEM_BUG_ON("context completed before request");
+ }
+
execlists_schedule_out(*execlists->active++);
GEM_BUG_ON(execlists->active - execlists->inflight >
@@ -2349,7 +2476,7 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->active.lock);
- if (!engine->execlists.pending[0]) {
+ if (!READ_ONCE(engine->execlists.pending[0])) {
rcu_read_lock(); /* protect peeking at execlists->active */
execlists_dequeue(engine);
rcu_read_unlock();
@@ -2366,12 +2493,12 @@ static void __execlists_hold(struct i915_request *rq)
if (i915_request_is_active(rq))
__i915_request_unsubmit(rq);
- RQ_TRACE(rq, "on hold\n");
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
list_move_tail(&rq->sched.link, &rq->engine->active.hold);
i915_request_set_hold(rq);
+ RQ_TRACE(rq, "on hold\n");
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
@@ -2385,7 +2512,7 @@ static void __execlists_hold(struct i915_request *rq)
if (i915_request_completed(w))
continue;
- if (i915_request_on_hold(rq))
+ if (i915_request_on_hold(w))
continue;
list_move_tail(&w->sched.link, &list);
@@ -2443,6 +2570,7 @@ static bool execlists_hold(struct intel_engine_cs *engine,
GEM_BUG_ON(i915_request_on_hold(rq));
GEM_BUG_ON(rq->engine != engine);
__execlists_hold(rq);
+ GEM_BUG_ON(list_empty(&engine->active.hold));
unlock:
spin_unlock_irq(&engine->active.lock);
@@ -2452,23 +2580,27 @@ unlock:
static bool hold_request(const struct i915_request *rq)
{
struct i915_dependency *p;
+ bool result = false;
/*
* If one of our ancestors is on hold, we must also be on hold,
* otherwise we will bypass it and execute before it.
*/
- list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+ rcu_read_lock();
+ for_each_signaler(p, rq) {
const struct i915_request *s =
container_of(p->signaler, typeof(*s), sched);
if (s->engine != rq->engine)
continue;
- if (i915_request_on_hold(s))
- return true;
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
}
+ rcu_read_unlock();
- return false;
+ return result;
}
static void __execlists_unhold(struct i915_request *rq)
@@ -2478,6 +2610,8 @@ static void __execlists_unhold(struct i915_request *rq)
do {
struct i915_dependency *p;
+ RQ_TRACE(rq, "hold release\n");
+
GEM_BUG_ON(!i915_request_on_hold(rq));
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
@@ -2486,21 +2620,24 @@ static void __execlists_unhold(struct i915_request *rq)
i915_sched_lookup_priolist(rq->engine,
rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
- RQ_TRACE(rq, "hold release\n");
/* Also release any children on this engine that are ready */
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
if (w->engine != rq->engine)
continue;
- if (!i915_request_on_hold(rq))
+ if (!i915_request_on_hold(w))
continue;
/* Check that no other parents are also on hold */
- if (hold_request(rq))
+ if (hold_request(w))
continue;
list_move_tail(&w->sched.link, &list);
@@ -2615,13 +2752,13 @@ static bool execlists_capture(struct intel_engine_cs *engine)
if (!cap)
return true;
+ spin_lock_irq(&engine->active.lock);
cap->rq = execlists_active(&engine->execlists);
- GEM_BUG_ON(!cap->rq);
-
- rcu_read_lock();
- cap->rq = active_request(cap->rq->context->timeline, cap->rq);
- cap->rq = i915_request_get_rcu(cap->rq);
- rcu_read_unlock();
+ if (cap->rq) {
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ }
+ spin_unlock_irq(&engine->active.lock);
if (!cap->rq)
goto err_free;
@@ -2660,27 +2797,25 @@ err_free:
return false;
}
-static noinline void preempt_reset(struct intel_engine_cs *engine)
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
{
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
- if (i915_modparams.reset < 3)
+ if (!intel_has_reset_engine(engine->gt))
return;
if (test_and_set_bit(bit, lock))
return;
+ ENGINE_TRACE(engine, "reset for %s\n", msg);
+
/* Mark this tasklet as disabled to avoid waiting for it to complete */
tasklet_disable_nosync(&engine->execlists.tasklet);
- ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
- READ_ONCE(engine->props.preempt_timeout_ms),
- jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
-
ring_set_paused(engine, 1); /* Freeze the current request in place */
if (execlists_capture(engine))
- intel_engine_reset(engine, "preemption time out");
+ intel_engine_reset(engine, msg);
else
ring_set_paused(engine, 0);
@@ -2711,6 +2846,13 @@ static void execlists_submission_tasklet(unsigned long data)
bool timeout = preempt_timeout(engine);
process_csb(engine);
+
+ if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ engine->execlists.error_interrupt = 0;
+ if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */
+ execlists_reset(engine, "CS error");
+ }
+
if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
unsigned long flags;
@@ -2719,8 +2861,8 @@ static void execlists_submission_tasklet(unsigned long data)
spin_unlock_irqrestore(&engine->active.lock, flags);
/* Recheck after serialising with direct-submission */
- if (timeout && preempt_timeout(engine))
- preempt_reset(engine);
+ if (unlikely(timeout && preempt_timeout(engine)))
+ execlists_reset(engine, "preemption time out");
}
}
@@ -2793,6 +2935,7 @@ static void execlists_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->active.lock, flags);
if (unlikely(ancestor_on_hold(engine, request))) {
+ RQ_TRACE(request, "ancestor on hold\n");
list_add_tail(&request->sched.link, &engine->active.hold);
i915_request_set_hold(request);
} else {
@@ -2874,6 +3017,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD] = head;
regs[CTX_RING_TAIL] = ring->tail;
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
/* RPCS */
if (engine->class == RENDER_CLASS) {
@@ -2921,22 +3065,6 @@ static void execlists_context_reset(struct intel_context *ce)
CE_TRACE(ce, "reset\n");
GEM_BUG_ON(!intel_context_is_pinned(ce));
- /*
- * Because we emit WA_TAIL_DWORDS there may be a disparity
- * between our bookkeeping in ce->ring->head and ce->ring->tail and
- * that stored in context. As we only write new commands from
- * ce->ring->tail onwards, everything before that is junk. If the GPU
- * starts reading from its RING_HEAD from the context, it may try to
- * execute that junk and die.
- *
- * The contexts that are stilled pinned on resume belong to the
- * kernel, and are local to each engine. All other contexts will
- * have their head/tail sanitized upon pinning before use, so they
- * will never see garbage,
- *
- * So to avoid that we reset the context images upon resume. For
- * simplicity, we just zero everything out.
- */
intel_ring_reset(ce->ring, ce->ring->emit);
/* Scrub away the garbage */
@@ -2964,7 +3092,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
- GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -3257,7 +3386,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
goto err;
}
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (err)
goto err;
@@ -3346,6 +3475,49 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+ u32 status;
+
+ engine->execlists.error_interrupt = 0;
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+ status = ENGINE_READ(engine, RING_ESR);
+ if (unlikely(status)) {
+ dev_err(engine->i915->drm.dev,
+ "engine '%s' resumed still in error: %08x\n",
+ engine->name, status);
+ __intel_gt_reset(engine->gt, engine->mask);
+ }
+
+ /*
+ * On current gen8+, we have 2 signals to play with
+ *
+ * - I915_ERROR_INSTUCTION (bit 0)
+ *
+ * Generate an error if the command parser encounters an invalid
+ * instruction
+ *
+ * This is a fatal error.
+ *
+ * - CP_PRIV (bit 2)
+ *
+ * Generate an error on privilege violation (where the CP replaces
+ * the instruction with a no-op). This also fires for writes into
+ * read-only scratch pages.
+ *
+ * This is a non-fatal error, parsing continues.
+ *
+ * * there are a few others defined for odd HW that we do not use
+ *
+ * Since CP_PRIV fires for cases where we have chosen to ignore the
+ * error (as the HW is validating and suppressing the mistakes), we
+ * only unmask the instruction error bit.
+ */
+ ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
static void enable_execlists(struct intel_engine_cs *engine)
{
u32 mode;
@@ -3367,6 +3539,8 @@ static void enable_execlists(struct intel_engine_cs *engine)
i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+ enable_error_interrupt(engine);
+
engine->context_tag = 0;
}
@@ -3384,9 +3558,6 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int execlists_resume(struct intel_engine_cs *engine)
{
- intel_engine_apply_workarounds(engine);
- intel_engine_apply_whitelist(engine);
-
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -3517,9 +3688,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!rq)
goto unwind;
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
@@ -3529,8 +3697,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
goto out_replay;
}
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
rq = active_request(ce->timeline, rq);
head = intel_ring_wrap(ce->ring, rq->head);
GEM_BUG_ON(head == ce->ring->tail);
@@ -3604,7 +3776,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
static void nop_submission_tasklet(unsigned long data)
{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
/* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
}
static void execlists_reset_cancel(struct intel_engine_cs *engine)
@@ -4273,6 +4448,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -4514,8 +4690,13 @@ populate_lr_context(struct intel_context *ce,
inhibit = false;
}
- /* The second page of the context object contains some fields which must
- * be set up prior to the first execution. */
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /*
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
+ */
execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
ce, engine, ring, inhibit);
@@ -4553,8 +4734,17 @@ static int __execlists_context_alloc(struct intel_context *ce,
if (!ce->timeline) {
struct intel_timeline *tl;
+ struct i915_vma *hwsp;
- tl = intel_timeline_create(engine->gt, NULL);
+ /*
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
+ */
+ hwsp = NULL;
+ if (unlikely(intel_context_is_barrier(ce)))
+ hwsp = engine->status_page.vma;
+
+ tl = intel_timeline_create(engine->gt, hwsp);
if (IS_ERR(tl)) {
ret = PTR_ERR(tl);
goto error_deref_obj;
@@ -4723,7 +4913,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
mask = rq->execution_mask;
if (unlikely(!mask)) {
/* Invalid selection, submit to a random engine in error */
- i915_request_skip(rq, -ENODEV);
+ i915_request_set_error_once(rq, -ENODEV);
mask = ve->siblings[0]->mask;
}
@@ -4737,7 +4927,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
static void virtual_submission_tasklet(unsigned long data)
{
struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = ve->base.execlists.queue_priority_hint;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
@@ -5133,11 +5323,15 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tE ");
}
- last = NULL;
- count = 0;
+ if (execlists->switch_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tSwitch priority hint: %d\n",
+ READ_ONCE(execlists->switch_priority_hint));
if (execlists->queue_priority_hint != INT_MIN)
drm_printf(m, "\t\tQueue priority hint: %d\n",
- execlists->queue_priority_hint);
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
int i;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 08a3be65f700..d39b72590e40 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -17,6 +17,7 @@
#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_STATE (0x10 + 1)
#define CTX_BB_PER_CTX_PTR (0x18 + 1)
+#define CTX_TIMESTAMP (0x22 + 1)
#define CTX_PDP3_UDW (0x24 + 1)
#define CTX_PDP3_LDW (0x26 + 1)
#define CTX_PDP2_UDW (0x28 + 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index eeef90b55c64..632e08a4592b 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -280,9 +280,32 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
GEN11_MOCS_ENTRIES
};
-static bool get_mocs_settings(const struct drm_i915_private *i915,
- struct drm_i915_mocs_table *table)
+enum {
+ HAS_GLOBAL_MOCS = BIT(0),
+ HAS_ENGINE_MOCS = BIT(1),
+ HAS_RENDER_L3CC = BIT(2),
+};
+
+static bool has_l3cc(const struct drm_i915_private *i915)
{
+ return true;
+}
+
+static bool has_global_mocs(const struct drm_i915_private *i915)
+{
+ return HAS_GLOBAL_MOCS_REGISTERS(i915);
+}
+
+static bool has_mocs(const struct drm_i915_private *i915)
+{
+ return !IS_DGFX(i915);
+}
+
+static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+ struct drm_i915_mocs_table *table)
+{
+ unsigned int flags;
+
if (INTEL_GEN(i915) >= 12) {
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
@@ -300,13 +323,13 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
} else {
- WARN_ONCE(INTEL_GEN(i915) >= 9,
- "Platform that should have a MOCS table does not.\n");
- return false;
+ drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9,
+ "Platform that should have a MOCS table does not.\n");
+ return 0;
}
if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
- return false;
+ return 0;
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (IS_GEN(i915, 9)) {
@@ -315,10 +338,20 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
for (i = 0; i < table->size; i++)
if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
(L3_ESC(1) | L3_SCC(0x7))))
- return false;
+ return 0;
}
- return true;
+ flags = 0;
+ if (has_mocs(i915)) {
+ if (has_global_mocs(i915))
+ flags |= HAS_GLOBAL_MOCS;
+ else
+ flags |= HAS_ENGINE_MOCS;
+ }
+ if (has_l3cc(i915))
+ flags |= HAS_RENDER_L3CC;
+
+ return flags;
}
/*
@@ -411,18 +444,20 @@ static void init_l3cc_table(struct intel_engine_cs *engine,
void intel_mocs_init_engine(struct intel_engine_cs *engine)
{
struct drm_i915_mocs_table table;
+ unsigned int flags;
/* Called under a blanket forcewake */
assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
- if (!get_mocs_settings(engine->i915, &table))
+ flags = get_mocs_settings(engine->i915, &table);
+ if (!flags)
return;
/* Platforms with global MOCS do not need per-engine initialization. */
- if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ if (flags & HAS_ENGINE_MOCS)
init_mocs_table(engine, &table);
- if (engine->class == RENDER_CLASS)
+ if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
init_l3cc_table(engine, &table);
}
@@ -431,26 +466,17 @@ static u32 global_mocs_offset(void)
return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
}
-static void init_global_mocs(struct intel_gt *gt)
+void intel_mocs_init(struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
+ unsigned int flags;
/*
* LLC and eDRAM control values are not applicable to dgfx
*/
- if (IS_DGFX(gt->i915))
- return;
-
- if (!get_mocs_settings(gt->i915, &table))
- return;
-
- __init_mocs_table(gt->uncore, &table, global_mocs_offset());
-}
-
-void intel_mocs_init(struct intel_gt *gt)
-{
- if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
- init_global_mocs(gt);
+ flags = get_mocs_settings(gt->i915, &table);
+ if (flags & HAS_GLOBAL_MOCS)
+ __init_mocs_table(gt->uncore, &table, global_mocs_offset());
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9e303c29d6e3..3847ee44b181 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
@@ -226,10 +227,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
set(uncore, GEN6_RC_SLEEP, 0);
set(uncore, GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(i915))
- set(uncore, GEN6_RC6_THRESHOLD, 125000);
- else
- set(uncore, GEN6_RC6_THRESHOLD, 50000);
+ set(uncore, GEN6_RC6_THRESHOLD, 50000);
set(uncore, GEN6_RC6p_THRESHOLD, 150000);
set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
@@ -299,7 +297,6 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
pctx = i915_gem_object_create_stolen_for_preallocated(i915,
pcbr_offset,
- I915_GTT_OFFSET_NONE,
pctx_size);
if (IS_ERR(pctx))
return PTR_ERR(pctx);
@@ -323,10 +320,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
return PTR_ERR(pctx);
}
- GEM_BUG_ON(range_overflows_t(u64,
- i915->dsm.start,
- pctx->stolen->start,
- U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
pctx_paddr = i915->dsm.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
@@ -542,6 +539,8 @@ void intel_rc6_init(struct intel_rc6 *rc6)
void intel_rc6_sanitize(struct intel_rc6 *rc6)
{
+ memset(rc6->prev_hw_residency, 0, sizeof(rc6->prev_hw_residency));
+
if (rc6->enabled) { /* unbalanced suspend/resume */
rpm_get(rc6);
rc6->enabled = false;
@@ -604,6 +603,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6)
void intel_rc6_park(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ unsigned int target;
if (!rc6->enabled)
return;
@@ -618,7 +618,14 @@ void intel_rc6_park(struct intel_rc6 *rc6)
/* Turn off the HW timers and go directly to rc6 */
set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
- set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ target = 0x6; /* deepest rc6 */
+ else if (HAS_RC6p(rc6_to_i915(rc6)))
+ target = 0x5; /* deep rc6 */
+ else
+ target = 0x4; /* normal rc6 */
+ set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
}
void intel_rc6_disable(struct intel_rc6 *rc6)
@@ -713,7 +720,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
*/
i = (i915_mmio_reg_offset(reg) -
i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
- if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+ if (drm_WARN_ON_ONCE(&i915->drm, i >= ARRAY_SIZE(rc6->cur_residency)))
return 0;
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index beee0cf89bce..80db3c9d785e 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -48,8 +48,10 @@ static void engine_skip_context(struct i915_request *rq)
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
- if (rq->context == hung_ctx)
- i915_request_skip(rq, -EIO);
+ if (rq->context == hung_ctx) {
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -72,9 +74,10 @@ static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
if (score) {
atomic_add(score, &file_priv->ban_score);
- DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
- ctx->name, score,
- atomic_read(&file_priv->ban_score));
+ drm_dbg(&ctx->i915->drm,
+ "client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
}
}
@@ -85,19 +88,18 @@ static bool mark_guilty(struct i915_request *rq)
bool banned;
int i;
+ if (intel_context_is_closed(rq->context)) {
+ intel_context_set_banned(rq->context);
+ return true;
+ }
+
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref))
ctx = NULL;
rcu_read_unlock();
if (!ctx)
- return false;
-
- if (i915_gem_context_is_closed(ctx)) {
- intel_context_set_banned(rq->context);
- banned = true;
- goto out;
- }
+ return intel_context_is_banned(rq->context);
atomic_inc(&ctx->guilty_count);
@@ -122,8 +124,8 @@ static bool mark_guilty(struct i915_request *rq)
if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
banned = true;
if (banned) {
- DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
- ctx->name, atomic_read(&ctx->guilty_count));
+ drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count));
intel_context_set_banned(rq->context);
}
@@ -153,11 +155,12 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
- i915_request_skip(rq, -EIO);
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
if (mark_guilty(rq))
engine_skip_context(rq);
} else {
- dma_fence_set_error(&rq->fence, -EAGAIN);
+ i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
}
rcu_read_unlock();
@@ -226,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
goto out;
}
@@ -234,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
goto out;
}
@@ -260,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
goto out;
}
@@ -271,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
goto out;
}
@@ -300,8 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
500, 0,
NULL);
if (err)
- DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
+ drm_dbg(&gt->i915->drm,
+ "Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
return err;
}
@@ -401,7 +405,8 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return 0;
if (ret) {
- DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ drm_dbg(&engine->i915->drm,
+ "Wait for SFC forced lock ack failed\n");
return ret;
}
@@ -515,9 +520,10 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
- DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
- engine->name, request,
- intel_uncore_read_fw(uncore, reg));
+ drm_err(&engine->i915->drm,
+ "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+ engine->name, request,
+ intel_uncore_read_fw(uncore, reg));
return ret;
}
@@ -781,7 +787,7 @@ static void nop_submit_request(struct i915_request *request)
unsigned long flags;
RQ_TRACE(request, "-EIO\n");
- dma_fence_set_error(&request->fence, -EIO);
+ i915_request_set_error_once(request, -EIO);
spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
@@ -800,13 +806,6 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (test_bit(I915_WEDGED, &gt->reset.flags))
return;
- if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- for_each_engine(engine, gt, id)
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
-
GT_TRACE(gt, "start\n");
/*
@@ -845,10 +844,30 @@ void intel_gt_set_wedged(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
+ return;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
mutex_lock(&gt->reset.mutex);
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- __intel_gt_set_wedged(gt);
+
+ if (GEM_SHOW_DEBUG()) {
+ struct drm_printer p = drm_debug_printer(__func__);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_is_idle(engine))
+ continue;
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+ }
+
+ __intel_gt_set_wedged(gt);
+
mutex_unlock(&gt->reset.mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
static bool __intel_gt_unset_wedged(struct intel_gt *gt)
@@ -969,7 +988,7 @@ static int resume(struct intel_gt *gt)
int ret;
for_each_engine(engine, gt, id) {
- ret = engine->resume(engine);
+ ret = intel_engine_resume(engine);
if (ret)
return ret;
}
@@ -1022,7 +1041,7 @@ void intel_gt_reset(struct intel_gt *gt,
if (i915_modparams.reset)
dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
- DRM_DEBUG_DRIVER("GPU reset disabled\n");
+ drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
goto error;
}
@@ -1049,8 +1068,9 @@ void intel_gt_reset(struct intel_gt *gt,
*/
ret = intel_gt_init_hw(gt);
if (ret) {
- DRM_ERROR("Failed to initialise HW following reset (%d)\n",
- ret);
+ drm_err(&gt->i915->drm,
+ "Failed to initialise HW following reset (%d)\n",
+ ret);
goto taint;
}
@@ -1126,9 +1146,8 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- uses_guc ? "GuC " : "",
- engine->name, ret);
+ drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
+ uses_guc ? "GuC " : "", engine->name, ret);
goto out;
}
@@ -1144,7 +1163,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
- ret = engine->resume(engine);
+ ret = intel_engine_resume(engine);
out:
intel_engine_cancel_stop_cs(engine);
@@ -1165,7 +1184,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
- DRM_DEBUG_DRIVER("resetting chip\n");
+ drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 6ff803f397c4..8cda1b7e17ba 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -31,17 +31,15 @@ int intel_ring_pin(struct intel_ring *ring)
if (atomic_fetch_inc(&ring->pin_count))
return 0;
- flags = PIN_GLOBAL;
-
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+ flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
- ret = i915_vma_pin(vma, 0, 0, flags);
+ ret = i915_ggtt_pin(vma, 0, flags);
if (unlikely(ret))
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index bc44fe8e5ffa..fdc3f10e12aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -29,11 +29,10 @@
#include <linux/log2.h>
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gen6_ppgtt.h"
+#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
@@ -568,7 +567,8 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
return;
/* ring should be idle before issuing a sync flush*/
- WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
ENGINE_WRITE(engine, RING_INSTPM,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -626,6 +626,27 @@ static bool stop_ring(struct intel_engine_cs *engine)
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
+static struct i915_address_space *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+ return vm;
+}
+
+static void set_pp_dir(struct intel_engine_cs *engine)
+{
+ struct i915_address_space *vm = vm_alias(engine->gt->vm);
+
+ if (vm) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+ ENGINE_WRITE(engine, RING_PP_DIR_BASE,
+ px_base(ppgtt->pd)->ggtt_offset << 10);
+ }
+}
+
static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -684,6 +705,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
intel_ring_update_space(ring);
+ set_pp_dir(engine);
+
/* First wake the ring up to an empty/idle ring */
ENGINE_WRITE(engine, RING_HEAD, ring->head);
ENGINE_WRITE(engine, RING_TAIL, ring->head);
@@ -857,43 +880,6 @@ static int rcs_resume(struct intel_engine_cs *engine)
intel_uncore_write(uncore, ECOSKPD,
_MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
- /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (IS_GEN_RANGE(i915, 4, 6))
- intel_uncore_write(uncore, MI_MODE,
- _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
-
- /* We need to disable the AsyncFlip performance optimisations in order
- * to use MI_WAIT_FOR_EVENT within the CS. It should already be
- * programmed to '1' on all products.
- *
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
- */
- if (IS_GEN_RANGE(i915, 6, 7))
- intel_uncore_write(uncore, MI_MODE,
- _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
-
- /* Required for the hardware to program scanline values for waiting */
- /* WaEnableFlushTlbInvalidationMode:snb */
- if (IS_GEN(i915, 6))
- intel_uncore_write(uncore, GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
-
- /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN(i915, 7))
- intel_uncore_write(uncore, GFX_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
- _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
-
- if (IS_GEN(i915, 6)) {
- /* From the Sandybridge PRM, volume 1 part 3, page 24:
- * "If this bit is set, STCunit will have LRA as replacement
- * policy. [...] This bit must be reset. LRA replacement
- * policy is not supported."
- */
- intel_uncore_write(uncore, CACHE_MODE_0,
- _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
- }
-
if (IS_GEN_RANGE(i915, 6, 7))
intel_uncore_write(uncore, INSTPM,
_MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
@@ -910,9 +896,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
@@ -1197,23 +1181,12 @@ static void ring_context_destroy(struct kref *ref)
intel_context_free(ce);
}
-static struct i915_address_space *vm_alias(struct intel_context *ce)
-{
- struct i915_address_space *vm;
-
- vm = ce->vm;
- if (i915_is_ggtt(vm))
- vm = &i915_vm_to_ggtt(vm)->alias->vm;
-
- return vm;
-}
-
static int __context_pin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
int err = 0;
- vm = vm_alias(ce);
+ vm = vm_alias(ce->vm);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
@@ -1224,7 +1197,7 @@ static void __context_unpin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
- vm = vm_alias(ce);
+ vm = vm_alias(ce->vm);
if (vm)
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
@@ -1384,7 +1357,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
@@ -1459,7 +1434,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->context->state) | flags;
+ *cs++ = i915_ggtt_offset(ce->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1574,21 +1549,64 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
}
+static int clear_residuals(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int ret;
+
+ ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
+ if (ret)
+ return ret;
+
+ if (engine->kernel_context->state) {
+ ret = mi_set_context(rq,
+ engine->kernel_context,
+ MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
+ if (ret)
+ return ret;
+ }
+
+ ret = engine->emit_bb_start(rq,
+ engine->wa_ctx.vma->node.start, 0,
+ 0);
+ if (ret)
+ return ret;
+
+ ret = engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /* Always invalidate before the next switch_mm() */
+ return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
static int switch_context(struct i915_request *rq)
{
+ struct intel_engine_cs *engine = rq->engine;
struct intel_context *ce = rq->context;
+ void **residuals = NULL;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- ret = switch_mm(rq, vm_alias(ce));
+ if (engine->wa_ctx.vma && ce != engine->kernel_context) {
+ if (engine->wa_ctx.vma->private != ce) {
+ ret = clear_residuals(rq);
+ if (ret)
+ return ret;
+
+ residuals = &engine->wa_ctx.vma->private;
+ }
+ }
+
+ ret = switch_mm(rq, vm_alias(ce->vm));
if (ret)
return ret;
if (ce->state) {
u32 flags;
- GEM_BUG_ON(rq->engine->id != RCS0);
+ GEM_BUG_ON(engine->id != RCS0);
/* For resource streamer on HSW+ and power context elsewhere */
BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
@@ -1600,7 +1618,7 @@ static int switch_context(struct i915_request *rq)
else
flags |= MI_RESTORE_INHIBIT;
- ret = mi_set_context(rq, flags);
+ ret = mi_set_context(rq, ce, flags);
if (ret)
return ret;
}
@@ -1609,6 +1627,20 @@ static int switch_context(struct i915_request *rq)
if (ret)
return ret;
+ /*
+ * Now past the point of no return, this request _will_ be emitted.
+ *
+ * Or at least this preamble will be emitted, the request may be
+ * interrupted prior to submitting the user payload. If so, we
+ * still submit the "empty" request in order to preserve global
+ * state tracking such as this, our tracking of the current
+ * dirty context.
+ */
+ if (residuals) {
+ intel_context_put(*residuals);
+ *residuals = intel_context_get(ce);
+ }
+
return 0;
}
@@ -1662,7 +1694,8 @@ static void gen6_bsd_submit_request(struct i915_request *request)
GEN6_BSD_SLEEP_INDICATOR,
0,
1000, 0, NULL))
- DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+ drm_err(&uncore->i915->drm,
+ "timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
i9xx_submit_request(request);
@@ -1787,11 +1820,16 @@ static void ring_release(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
+ if (engine->wa_ctx.vma) {
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+ }
+
intel_ring_unpin(engine->legacy.ring);
intel_ring_put(engine->legacy.ring);
@@ -1939,6 +1977,64 @@ static void setup_vecs(struct intel_engine_cs *engine)
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
}
+static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ return gen7_setup_clear_gpr_bb(engine, vma);
+}
+
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size;
+ int err;
+
+ size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+ if (size <= 0)
+ return size;
+
+ size = ALIGN(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ goto err_private;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_unpin;
+
+ err = gen7_ctx_switch_bb_setup(engine, vma);
+ if (err)
+ goto err_unpin;
+
+ engine->wa_ctx.vma = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err_private:
+ intel_context_put(vma->private);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
struct intel_timeline *timeline;
@@ -1992,11 +2088,19 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+ err = gen7_ctx_switch_bb_init(engine);
+ if (err)
+ goto err_ring_unpin;
+ }
+
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
return 0;
+err_ring_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err_timeline_unpin:
@@ -2007,3 +2111,7 @@ err:
intel_engine_cleanup_common(engine);
return err;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring_submission.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index d2a3d935d186..cfaf141bac4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
@@ -55,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= rps->pm_events;
+ mask &= READ_ONCE(rps->pm_events);
return rps_pm_sanitize_mask(rps, ~mask);
}
@@ -68,17 +70,19 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
rps_reset_ei(rps);
if (IS_VALLEYVIEW(gt->i915))
/* WaGsvRC0ResidencyMethod:vlv */
- rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ events = GEN6_PM_RP_UP_EI_EXPIRED;
else
- rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
+ events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+ WRITE_ONCE(rps->pm_events, events);
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock);
@@ -115,8 +119,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- rps->pm_events = 0;
-
+ WRITE_ONCE(rps->pm_events, 0);
set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock);
@@ -642,7 +645,7 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
mutex_lock(&rps->power.mutex);
if (interactive) {
- if (!rps->power.interactive++ && rps->active)
+ if (!rps->power.interactive++ && READ_ONCE(rps->active))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
@@ -719,11 +722,15 @@ void intel_rps_unpark(struct intel_rps *rps)
* performance, jump directly to RPe as our starting frequency.
*/
mutex_lock(&rps->lock);
- rps->active = true;
+
+ WRITE_ONCE(rps->active, true);
+
freq = max(rps->cur_freq, rps->efficient_freq),
freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
intel_rps_set(rps, freq);
+
rps->last_adj = 0;
+
mutex_unlock(&rps->lock);
if (INTEL_GEN(rps_to_i915(rps)) >= 6)
@@ -743,7 +750,7 @@ void intel_rps_park(struct intel_rps *rps)
if (INTEL_GEN(i915) >= 6)
rps_disable_interrupts(rps);
- rps->active = false;
+ WRITE_ONCE(rps->active, false);
if (rps->last_freq <= rps->idle_freq)
return;
@@ -763,14 +770,27 @@ void intel_rps_park(struct intel_rps *rps)
intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
rps_set(rps, rps->idle_freq, false);
intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+
+ /*
+ * Since we will try and restart from the previously requested
+ * frequency on unparking, treat this idle point as a downclock
+ * interrupt and reduce the frequency for resume. If we park/unpark
+ * more frequently than the rps worker can run, we will not respond
+ * to any EI and never see a change in frequency.
+ *
+ * (Note we accommodate Cherryview's limitation of only using an
+ * even bin by applying it to all.)
+ */
+ rps->cur_freq =
+ max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
}
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &rq->engine->gt->rps;
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
unsigned long flags;
- if (i915_request_signaled(rq) || !rps->active)
+ if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
return;
/* Serializes with i915_request_retire() */
@@ -1026,7 +1046,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+ drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+ "GPLL not enabled\n");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -1123,7 +1144,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+ drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+ "GPLL not enabled\n");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -1191,11 +1213,11 @@ void intel_rps_enable(struct intel_rps *rps)
if (!rps->enabled)
return;
- WARN_ON(rps->max_freq < rps->min_freq);
- WARN_ON(rps->idle_freq > rps->max_freq);
+ drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
+ drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
- WARN_ON(rps->efficient_freq < rps->min_freq);
- WARN_ON(rps->efficient_freq > rps->max_freq);
+ drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
+ drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
}
static void gen6_rps_disable(struct intel_rps *rps)
@@ -1390,9 +1412,9 @@ static void chv_rps_init(struct intel_rps *rps)
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
- rps->min_freq) & 1,
- "Odd GPU freq values\n");
+ drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
+ rps->rp1_freq | rps->min_freq) & 1,
+ "Odd GPU freq values\n");
}
static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
@@ -1451,12 +1473,12 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0;
spin_lock_irq(&gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(&gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
- if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ if (!pm_iir && !client_boost)
goto out;
mutex_lock(&rps->lock);
@@ -1552,11 +1574,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
- if (pm_iir & rps->pm_events) {
+ events = pm_iir & READ_ONCE(rps->pm_events);
+ if (events) {
spin_lock(&gt->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
- rps->pm_iir |= pm_iir & rps->pm_events;
+
+ gen6_gt_pm_mask_irq(gt, events);
+ rps->pm_iir |= events;
+
schedule_work(&rps->work);
spin_unlock(&gt->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index d8d9f1179c2b..91debbc97c9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -312,7 +312,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
- err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH);
if (err)
return err;
@@ -410,6 +410,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
void *vaddr;
int err;
+ might_lock(&tl->gt->ggtt->vm.mutex);
+
/*
* If there is an outstanding GPU reference to this cacheline,
* such as it being sampled by a HW semaphore on another timeline,
@@ -435,7 +437,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
goto err_rollback;
}
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (err) {
__idle_hwsp_free(vma->private, cacheline);
goto err_rollback;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 6c2f8462e0f3..5176ad1a3976 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -116,17 +116,17 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
} else {
wa_ = &wal->list[mid];
- if ((wa->mask & ~wa_->mask) == 0) {
- DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+ if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
+ DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
i915_mmio_reg_offset(wa_->reg),
- wa_->mask, wa_->val);
+ wa_->clr, wa_->set);
- wa_->val &= ~wa->mask;
+ wa_->set &= ~wa->clr;
}
wal->wa_count++;
- wa_->val |= wa->val;
- wa_->mask |= wa->mask;
+ wa_->set |= wa->set;
+ wa_->clr |= wa->clr;
wa_->read |= wa->read;
return;
}
@@ -147,13 +147,13 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
}
}
-static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val, u32 read_mask)
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 clear, u32 set, u32 read_mask)
{
struct i915_wa wa = {
.reg = reg,
- .mask = mask,
- .val = val,
+ .clr = clear,
+ .set = set,
.read = read_mask,
};
@@ -161,38 +161,43 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
}
static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val)
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
- wa_add(wal, reg, mask, val, mask);
+ wa_add(wal, reg, clear, set, clear);
}
static void
-wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
+ wa_write_masked_or(wal, reg, ~0, set);
}
static void
-wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, ~0, val);
+ wa_write_masked_or(wal, reg, set, set);
}
static void
-wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_write_masked_or(wal, reg, val, val);
+ wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
+}
+
+static void
+wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
}
#define WA_SET_BIT_MASKED(addr, mask) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
+ wa_masked_en(wal, (addr), (mask))
#define WA_CLR_BIT_MASKED(addr, mask) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
+ wa_masked_dis(wal, (addr), (mask))
#define WA_SET_FIELD_MASKED(addr, mask, value) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
+ wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
@@ -570,12 +575,29 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* allow headerless messages for preemptible GPGPU context */
WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+
+ /* Wa_1604278689:icl,ehl */
+ wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
+ wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
+
+ /* Wa_1406306137:icl,ehl */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- /* Wa_1409142259:tgl */
+ /*
+ * Wa_1409142259:tgl
+ * Wa_1409347922:tgl
+ * Wa_1409252684:tgl
+ * Wa_1409217633:tgl
+ * Wa_1409207793:tgl
+ * Wa_1409178076:tgl
+ * Wa_1408979724:tgl
+ */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
@@ -588,6 +610,11 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128, 0);
+
+ /* WaDisableGPGPUMidThreadPreemption:tgl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
static void
@@ -657,7 +684,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
*cs++ = i915_mmio_reg_offset(wa->reg);
- *cs++ = wa->val;
+ *cs++ = wa->set;
}
*cs++ = MI_NOOP;
@@ -822,7 +849,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
- WARN_ON(!subslice);
+ drm_WARN_ON(&i915->drm, !subslice);
}
subslice--;
@@ -893,11 +920,6 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SLICE_UNIT_LEVEL_CLKGATE,
MSCUNIT_CLKGATE_DIS);
- /* Wa_1406680159:icl */
- wa_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE,
- GWUNIT_CLKGATE_DIS);
-
/* Wa_1406838659:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
@@ -926,7 +948,7 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
- /* Wa_1409180338:tgl */
+ /* Wa_1607087056:tgl also know as BUG:1409180338 */
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
@@ -986,11 +1008,10 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
- if ((cur ^ wa->val) & wa->read) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ if ((cur ^ wa->set) & wa->read) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
- cur, cur & wa->read,
- wa->val, wa->mask);
+ cur, cur & wa->read, wa->set);
return false;
}
@@ -1015,7 +1036,10 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
+ if (wa->clr)
+ intel_uncore_rmw_fw(uncore, wa->reg, wa->clr, wa->set);
+ else
+ intel_uncore_write_fw(uncore, wa->reg, wa->set);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
wa_verify(wa,
intel_uncore_read_fw(uncore, wa->reg),
@@ -1239,6 +1263,7 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ * Wa_1408556865:tgl
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
@@ -1249,6 +1274,12 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
+
+ /* Wa_1808121037:tgl */
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
+
+ /* Wa_1806527549:tgl */
+ whitelist_reg(w, HIZ_CHICKEN);
break;
default:
break;
@@ -1315,19 +1346,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
- /* Wa_1606700617:tgl */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
-
- /* Wa_1607138336:tgl */
+ /*
+ * Wa_1607138336:tgl
+ * Wa_1607063988:tgl
+ */
wa_write_or(wal,
GEN9_CTX_PREEMPT_REG,
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- /* Wa_1607030317:tgl */
- /* Wa_1607186500:tgl */
- /* Wa_1607297627:tgl */
+ /*
+ * Wa_1607030317:tgl
+ * Wa_1607186500:tgl
+ * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2
+ * of then says it is fixed on B0 the other one says it is
+ * permanent
+ */
wa_masked_en(wal,
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
@@ -1340,6 +1373,35 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
+
+ /* Wa_1407928979:tgl */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+ /* Wa_1408615072:tgl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
+ }
+
+ if (IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
+
+ /* Wa_1409804808:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_PUSH_CONST_DEREF_HOLD_DIS);
+
+ /* Wa_1606700617:tgl */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN(i915, 11)) {
@@ -1405,10 +1467,38 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_SCRATCH2,
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
0);
+
+ /* WaEnable32PlaneMode:icl */
+ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+ GEN11_ENABLE_32_PLANE_MODE);
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ PSDUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl,ehl */
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
+
+ /*
+ * Wa_1408767742:icl[a2..forever],ehl[all]
+ * Wa_1605460711:icl[a0..c0]
+ */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
}
- if (IS_GEN_RANGE(i915, 9, 11)) {
- /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
+ if (IS_GEN_RANGE(i915, 9, 12)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1452,6 +1542,52 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
}
+
+ if (IS_GEN(i915, 7))
+ /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
+ wa_masked_en(wal,
+ GFX_MODE_GEN7,
+ GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+
+ if (IS_GEN_RANGE(i915, 6, 7))
+ /*
+ * We need to disable the AsyncFlip performance optimisations in
+ * order to use MI_WAIT_FOR_EVENT within the CS. It should
+ * already be programmed to '1' on all products.
+ *
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+ */
+ wa_masked_en(wal,
+ MI_MODE,
+ ASYNC_FLIP_PERF_DISABLE);
+
+ if (IS_GEN(i915, 6)) {
+ /*
+ * Required for the hardware to program scanline values for
+ * waiting
+ * WaEnableFlushTlbInvalidationMode:snb
+ */
+ wa_masked_en(wal,
+ GFX_MODE,
+ GFX_TLB_INVALIDATE_EXPLICIT);
+
+ /*
+ * From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ wa_masked_dis(wal,
+ CACHE_MODE_0,
+ CM0_STC_EVICT_DISABLE_LRA_SNB);
+ }
+
+ if (IS_GEN_RANGE(i915, 4, 6))
+ /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
+ wa_add(wal, MI_MODE,
+ 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
+ /* XXX bit doesn't stick on Broadwater */
+ IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
}
static void
@@ -1470,7 +1606,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
+ if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
return;
if (engine->class == RENDER_CLASS)
@@ -1483,7 +1619,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
- if (INTEL_GEN(engine->i915) < 8)
+ if (INTEL_GEN(engine->i915) < 4)
return;
wa_init_start(wal, "engine", engine->name);
@@ -1626,6 +1762,16 @@ static int engine_wa_list_verify(struct intel_context *ce,
goto err_vma;
}
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err) {
+ i915_request_add(rq);
+ goto err_vma;
+ }
+
err = wa_list_srm(rq, wal, vma);
if (err)
goto err_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index e27ab1b710b3..d166a7145720 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -13,8 +13,8 @@
struct i915_wa {
i915_reg_t reg;
- u32 mask;
- u32 val;
+ u32 clr;
+ u32 set;
u32 read;
};
diff --git a/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
new file mode 100644
index 000000000000..610ca7687735
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC
+ */
+
+static const u32 ivb_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x0000002c,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffffc,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffff8,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index f2806381733f..4a53ded7c2dd 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -65,6 +65,9 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
return NULL;
}
i915_active_init(&ring->vma->active, NULL, NULL);
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
+ ring->vma->node.size = sz;
intel_ring_update_space(ring);
@@ -241,9 +244,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 43d4d589749f..697114dd1f47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -142,6 +142,24 @@ out:
return err;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
@@ -152,9 +170,11 @@ static int live_idle_flush(void *arg)
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err)
break;
}
@@ -172,9 +192,11 @@ static int live_idle_pulse(void *arg)
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_pulse);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err && err != -ENODEV)
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 3e5e6c86e843..2b2efff6e19d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -268,7 +268,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
@@ -1640,7 +1640,7 @@ static int igt_reset_engines_atomic(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
igt_global_reset_lock(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index fd3770e48ac7..a912159693fd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -18,10 +18,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
- if (!get_ia_constants(llc, &consts)) {
- err = -ENODEV;
+ if (!get_ia_constants(llc, &consts))
goto out_rpm;
- }
for (gpu_freq = consts.min_gpu_freq;
gpu_freq <= consts.max_gpu_freq;
@@ -71,10 +69,5 @@ out_rpm:
int st_llc_verify(struct intel_llc *llc)
{
- int err = 0;
-
- if (HAS_LLC(llc_to_gt(llc)->i915))
- err = gen6_verify_ring_freq(llc);
-
- return err;
+ return gen6_verify_ring_freq(llc);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index b292f8cbd0bf..6f06ba750a0a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -68,6 +68,71 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
engine->props.heartbeat_interval_ms = saved;
}
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_is_active(rq))
+ return 0;
+
+ if (i915_request_started(rq)) /* that was quick! */
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIME;
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -285,6 +350,84 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
+static int live_pin_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * We have to be careful not to trust intel_ring too much, for example
+ * ring->head is updated upon retire which is out of sync with pinning
+ * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+ * or else we risk writing an older, stale value.
+ *
+ * To simulate this, let's apply a bit of deliberate sabotague.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct intel_ring *ring;
+ struct igt_live_test t;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ break;
+ }
+
+ /* Keep the context awake while we play games */
+ err = i915_active_acquire(&ce->active);
+ if (err) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ break;
+ }
+ ring = ce->ring;
+
+ /* Poison the ring, and offset the next request from HEAD */
+ memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+ ring->emit = ring->size / 2;
+ ring->tail = ring->emit;
+ GEM_BUG_ON(ring->head);
+
+ intel_context_unpin(ce);
+
+ /* Submit a simple nop request */
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+ rq = intel_context_create_request(ce);
+ i915_active_release(&ce->active); /* e.g. async retire */
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+ GEM_BUG_ON(!rq->head);
+ i915_request_add(rq);
+
+ /* Expect not to hang! */
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
static int live_hold_reset(void *arg)
{
struct intel_gt *gt = arg;
@@ -386,6 +529,152 @@ out:
return err;
}
+static const char *error_repr(int err)
+{
+ return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+ static const struct error_phase {
+ enum { GOOD = 0, BAD = -EIO } error[2];
+ } phases[] = {
+ { { BAD, GOOD } },
+ { { BAD, BAD } },
+ { { BAD, GOOD } },
+ { { GOOD, GOOD } }, /* sentinel */
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+ * of invalid commands in user batches that will cause a GPU hang.
+ * This is a faster mechanism than using hangcheck/heartbeats, but
+ * only detects problems the HW knows about -- it will not warn when
+ * we kill the HW!
+ *
+ * To verify our detection and reset, we throw some invalid commands
+ * at the HW and wait for the interrupt.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ const struct error_phase *p;
+ unsigned long heartbeat;
+ int err = 0;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ for (p = phases; p->error[0] != GOOD; p++) {
+ struct i915_request *client[ARRAY_SIZE(phases->error)];
+ u32 *cs;
+ int i;
+
+ memset(client, 0, sizeof(*client));
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ if (p->error[i]) {
+ *cs++ = 0xdeadbeef;
+ *cs++ = 0xdeadbeef;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ client[i] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ err = wait_for_submit(engine, client[0], HZ / 2);
+ if (err) {
+ pr_err("%s: first request did not start within time!\n",
+ engine->name);
+ err = -ETIME;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+ pr_debug("%s: %s request incomplete!\n",
+ engine->name,
+ error_repr(p->error[i]));
+
+ if (!i915_request_started(client[i])) {
+ pr_debug("%s: %s request not stated!\n",
+ engine->name,
+ error_repr(p->error[i]));
+ err = -ETIME;
+ goto out;
+ }
+
+ /* Kick the tasklet to process the error */
+ intel_engine_flush_submission(engine);
+ if (client[i]->fence.error != p->error[i]) {
+ pr_err("%s: %s request completed with wrong error code: %d\n",
+ engine->name,
+ error_repr(p->error[i]),
+ client[i]->fence.error);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ if (client[i])
+ i915_request_put(client[i]);
+ if (err) {
+ pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+ engine->name, p - phases,
+ p->error[0], p->error[1]);
+ break;
+ }
+ }
+
+ engine_heartbeat_enable(engine, heartbeat);
+ if (err) {
+ intel_gt_set_wedged(gt);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -580,6 +869,10 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
for_each_prime_number_from(count, 1, 16) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -614,33 +907,227 @@ err_obj:
return err;
}
-static struct i915_request *nop_request(struct intel_engine_cs *engine)
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+ struct i915_request *wait,
+ void *slot, int idx)
{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
struct i915_request *rq;
+ u32 *cs;
+ int err;
- rq = intel_engine_create_kernel_request(engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
+ if (wait) {
+ err = i915_request_await_dma_fence(rq, &wait->fence);
+ if (err)
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
i915_request_get(rq);
i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
return rq;
}
-static int wait_for_submit(struct intel_engine_cs *engine,
- struct i915_request *rq,
- unsigned long timeout)
+static int live_timeslice_rewind(void *arg)
{
- timeout += jiffies;
- do {
- cond_resched();
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * The usual presumption on timeslice expiration is that we replace
+ * the active context with another. However, given a chain of
+ * dependencies we may end up with replacing the context with itself,
+ * but only a few of those requests, forcing us to rewind the
+ * RING_TAIL of the original request.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ enum { A1, A2, B1 };
+ enum { X = 1, Y, Z };
+ struct i915_request *rq[3] = {};
+ struct intel_context *ce;
+ unsigned long heartbeat;
+ unsigned long timeslice;
+ int i, err = 0;
+ u32 *slot;
+
+ if (!intel_engine_has_timeslices(engine))
+ continue;
+
+ /*
+ * A:rq1 -- semaphore wait, timestamp X
+ * A:rq2 -- write timestamp Y
+ *
+ * B:rq1 [await A:rq1] -- write timestamp Z
+ *
+ * Force timeslice, release semaphore.
+ *
+ * Expect execution/evaluation order XZY
+ */
+
+ engine_heartbeat_disable(engine, &heartbeat);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[0] = create_rewinder(ce, NULL, slot, 1);
+ if (IS_ERR(rq[0])) {
+ intel_context_put(ce);
+ goto err;
+ }
+
+ rq[1] = create_rewinder(ce, NULL, slot, 2);
+ intel_context_put(ce);
+ if (IS_ERR(rq[1]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[1], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit first context\n",
+ engine->name);
+ goto err;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[2] = create_rewinder(ce, rq[0], slot, 3);
+ intel_context_put(ce);
+ if (IS_ERR(rq[2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[2], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit second context\n",
+ engine->name);
+ goto err;
+ }
+ GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
+
+ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[A2]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
intel_engine_flush_submission(engine);
- if (i915_request_is_active(rq))
- return 0;
- } while (time_before(jiffies, timeout));
- return -ETIME;
+ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+ GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+ /* Release the hounds! */
+ slot[0] = 1;
+ wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+ for (i = 1; i <= 3; i++) {
+ unsigned long timeout = jiffies + HZ / 2;
+
+ while (!READ_ONCE(slot[i]) &&
+ time_before(jiffies, timeout))
+ ;
+
+ if (!time_before(jiffies, timeout)) {
+ pr_err("%s: rq[%d] timed out\n",
+ engine->name, i - 1);
+ err = -ETIME;
+ goto err;
+ }
+
+ pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+ }
+
+ /* XZY: XZ < XY */
+ if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+ pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+ engine->name,
+ slot[Z] - slot[X],
+ slot[Y] - slot[X]);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(&slot[0], -1, 4);
+ wmb();
+
+ engine->props.timeslice_duration_ms = timeslice;
+ engine_heartbeat_enable(engine, heartbeat);
+ for (i = 0; i < 3; i++)
+ i915_request_put(rq[i]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
}
static long timeslice_threshold(const struct intel_engine_cs *engine)
@@ -688,6 +1175,10 @@ static int live_timeslice_queue(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
@@ -774,6 +1265,7 @@ err_heartbeat:
break;
}
+err_pin:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
@@ -832,6 +1324,10 @@ static int live_busywait_preempt(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_vma;
+
for_each_engine(engine, gt, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
@@ -1352,14 +1848,9 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -1417,10 +1908,9 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
igt_spinner_end(&arg->a.spin);
- if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != 0) {
pr_err("Normal inflight0 request did not complete\n");
@@ -1500,10 +1990,9 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != -EIO) {
pr_err("Cancelled inflight0 request did not report -EIO\n");
@@ -1561,14 +2050,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -1656,7 +2140,7 @@ static int live_suppress_self_preempt(void *arg)
if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0; /* presume black blox */
if (intel_vgpu_active(gt->i915))
@@ -2279,117 +2763,6 @@ static int live_preempt_gang(void *arg)
return 0;
}
-static int live_preempt_hang(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- GEM_TRACE("lo spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- init_completion(&engine->execlists.preempt_hang.completion);
- engine->execlists.preempt_hang.inject_hang = true;
-
- i915_request_add(rq);
-
- if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
- HZ / 10)) {
- pr_err("Preemption did not occur within timeout!");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- intel_engine_reset(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-
- engine->execlists.preempt_hang.inject_hang = false;
-
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- GEM_TRACE("hi spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-}
-
static int live_preempt_timeout(void *arg)
{
struct intel_gt *gt = arg;
@@ -2882,7 +3255,7 @@ static int live_virtual_engine(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for_each_engine(engine, gt, id) {
@@ -3015,7 +3388,7 @@ static int live_virtual_mask(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3055,6 +3428,10 @@ static int preserved_virtual_engine(struct intel_gt *gt,
if (IS_ERR(scratch))
return PTR_ERR(scratch);
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto out_scratch;
+
ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
@@ -3153,7 +3530,7 @@ static int live_virtual_preserved(void *arg)
* are preserved.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
/* As we use CS_GPR we cannot run before they existed on all engines. */
@@ -3243,15 +3620,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
rq[0] = ERR_PTR(-ENOMEM);
for_each_engine(master, gt, id) {
struct i915_sw_fence fence = {};
+ struct intel_context *ce;
if (master->class == class)
continue;
+ ce = intel_context_create(master);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
- rq[0] = igt_spinner_create_request(&spin,
- master->kernel_context,
- MI_NOOP);
+ rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
goto out;
@@ -3377,7 +3760,7 @@ static int live_virtual_bond(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3538,7 +3921,7 @@ static int live_virtual_reset(void *arg)
* forgotten.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
if (!intel_has_reset_engine(gt))
@@ -3571,8 +3954,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
+ SUBTEST(live_pin_rewind),
SUBTEST(live_hold_reset),
+ SUBTEST(live_error_interrupt),
SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_rewind),
SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
@@ -3583,7 +3969,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_gang),
- SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
@@ -3631,6 +4016,62 @@ static void hexdump(const void *buf, size_t len)
}
}
+static int emit_semaphore_signal(struct intel_context *ce, void *slot)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+ return 0;
+}
+
+static int context_flush(struct intel_context *ce, long timeout)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+ int err = 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ rmb(); /* We know the request is written, make sure all state is too! */
+ return err;
+}
+
static int live_lrc_layout(void *arg)
{
struct intel_gt *gt = arg;
@@ -3797,6 +4238,11 @@ static int live_lrc_fixed(void *arg)
CTX_BB_STATE - 1,
"BB_STATE"
},
+ {
+ i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
+ CTX_TIMESTAMP - 1,
+ "RING_CTX_TIMESTAMP"
+ },
{ },
}, *t;
u32 *hw;
@@ -3880,8 +4326,16 @@ static int __live_lrc_state(struct intel_engine_cs *engine,
*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
*cs++ = 0;
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+
i915_request_get(rq);
i915_request_add(rq);
+ if (err)
+ goto err_rq;
intel_engine_flush_submission(engine);
expected[RING_TAIL_IDX] = ce->ring->tail;
@@ -3947,13 +4401,13 @@ static int live_lrc_state(void *arg)
return err;
}
-static int gpr_make_dirty(struct intel_engine_cs *engine)
+static int gpr_make_dirty(struct intel_context *ce)
{
struct i915_request *rq;
u32 *cs;
int n;
- rq = intel_engine_create_kernel_request(engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -3965,20 +4419,79 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
*cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
for (n = 0; n < NUM_GPR_DW; n++) {
- *cs++ = CS_GPR(engine, n);
+ *cs++ = CS_GPR(ce->engine, n);
*cs++ = STACK_MAGIC;
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_add(rq);
return 0;
}
-static int __live_gpr_clear(struct intel_engine_cs *engine,
- struct i915_vma *scratch)
+static struct i915_request *
+__gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return ERR_CAST(cs);
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(ce->engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ rq = ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int __live_lrc_gpr(struct intel_engine_cs *engine,
+ struct i915_vma *scratch,
+ bool preempt)
+{
+ u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
struct intel_context *ce;
struct i915_request *rq;
u32 *cs;
@@ -3988,7 +4501,7 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
return 0; /* GPR only on rcs0 for gen8 */
- err = gpr_make_dirty(engine);
+ err = gpr_make_dirty(engine->kernel_context);
if (err)
return err;
@@ -3996,28 +4509,28 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
if (IS_ERR(ce))
return PTR_ERR(ce);
- rq = intel_context_create_request(ce);
+ rq = __gpr_read(ce, scratch, slot);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_put;
}
- cs = intel_ring_begin(rq, 4 * NUM_GPR_DW);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(rq);
- goto err_put;
- }
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
- for (n = 0; n < NUM_GPR_DW; n++) {
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = CS_GPR(engine, n);
- *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
- *cs++ = 0;
- }
+ if (preempt) {
+ err = gpr_make_dirty(engine->kernel_context);
+ if (err)
+ goto err_rq;
- i915_request_get(rq);
- i915_request_add(rq);
+ err = emit_semaphore_signal(engine->kernel_context, slot);
+ if (err)
+ goto err_rq;
+ } else {
+ slot[0] = 1;
+ wmb();
+ }
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
@@ -4044,13 +4557,15 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
i915_gem_object_unpin_map(scratch->obj);
err_rq:
+ memset32(&slot[0], -1, 4);
+ wmb();
i915_request_put(rq);
err_put:
intel_context_put(ce);
return err;
}
-static int live_gpr_clear(void *arg)
+static int live_lrc_gpr(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@@ -4068,7 +4583,971 @@ static int live_gpr_clear(void *arg)
return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_gpr_clear(engine, scratch);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ err = __live_lrc_gpr(engine, scratch, false);
+ if (err)
+ goto err;
+
+ err = __live_lrc_gpr(engine, scratch, true);
+ if (err)
+ goto err;
+
+err:
+ engine_heartbeat_enable(engine, heartbeat);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static struct i915_request *
+create_timestamp(struct intel_context *ce, void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+struct lrc_timestamp {
+ struct intel_engine_cs *engine;
+ struct intel_context *ce[2];
+ u32 poison;
+};
+
+static bool timestamp_advanced(u32 start, u32 end)
+{
+ return (s32)(end - start) > 0;
+}
+
+static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
+{
+ u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
+ struct i915_request *rq;
+ u32 timestamp;
+ int err = 0;
+
+ arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
+ rq = create_timestamp(arg->ce[0], slot, 1);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = wait_for_submit(rq->engine, rq, HZ / 2);
+ if (err)
+ goto err;
+
+ if (preempt) {
+ arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
+ err = emit_semaphore_signal(arg->ce[1], slot);
+ if (err)
+ goto err;
+ } else {
+ slot[0] = 1;
+ wmb();
+ }
+
+ /* And wait for switch to kernel (to save our context to memory) */
+ err = context_flush(arg->ce[0], HZ / 2);
+ if (err)
+ goto err;
+
+ if (!timestamp_advanced(arg->poison, slot[1])) {
+ pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
+ arg->engine->name, preempt ? "preempt" : "simple",
+ arg->poison, slot[1]);
+ err = -EINVAL;
+ }
+
+ timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
+ if (!timestamp_advanced(slot[1], timestamp)) {
+ pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
+ arg->engine->name, preempt ? "preempt" : "simple",
+ slot[1], timestamp);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(slot, -1, 4);
+ i915_request_put(rq);
+ return err;
+}
+
+static int live_lrc_timestamp(void *arg)
+{
+ struct lrc_timestamp data = {};
+ struct intel_gt *gt = arg;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ 0,
+ S32_MAX,
+ (u32)S32_MAX + 1,
+ U32_MAX,
+ };
+
+ /*
+ * We want to verify that the timestamp is saved and restore across
+ * context switches and is monotonic.
+ *
+ * So we do this with a little bit of LRC poisoning to check various
+ * boundary conditions, and see what happens if we preempt the context
+ * with a second request (carrying more poison into the timestamp).
+ */
+
+ for_each_engine(data.engine, gt, id) {
+ unsigned long heartbeat;
+ int i, err = 0;
+
+ engine_heartbeat_disable(data.engine, &heartbeat);
+
+ for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(data.engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err;
+ }
+
+ data.ce[i] = tmp;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ data.poison = poison[i];
+
+ err = __lrc_timestamp(&data, false);
+ if (err)
+ break;
+
+ err = __lrc_timestamp(&data, true);
+ if (err)
+ break;
+ }
+
+err:
+ engine_heartbeat_enable(data.engine, heartbeat);
+ for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+ if (!data.ce[i])
+ break;
+
+ intel_context_unpin(data.ce[i]);
+ intel_context_put(data.ce[i]);
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_user_vma(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_vma *
+store_context(struct intel_context *ce, struct i915_vma *scratch)
+{
+ struct i915_vma *batch;
+ u32 dw, x, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ x = 0;
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = hw[dw];
+ *cs++ = lower_32_bits(scratch->node.start + x);
+ *cs++ = upper_32_bits(scratch->node.start + x);
+
+ dw += 2;
+ x += 4;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int move_to_active(struct i915_request *rq,
+ struct i915_vma *vma,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, flags);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static struct i915_request *
+record_registers(struct intel_context *ce,
+ struct i915_vma *before,
+ struct i915_vma *after,
+ u32 *sema)
+{
+ struct i915_vma *b_before, *b_after;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ b_before = store_context(ce, before);
+ if (IS_ERR(b_before))
+ return ERR_CAST(b_before);
+
+ b_after = store_context(ce, after);
+ if (IS_ERR(b_after)) {
+ rq = ERR_CAST(b_after);
+ goto err_before;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_after;
+
+ err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_before, 0);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_after, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_before->node.start);
+ *cs++ = upper_32_bits(b_before->node.start);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_after->node.start);
+ *cs++ = upper_32_bits(b_after->node.start);
+
+ intel_ring_advance(rq, cs);
+
+ WRITE_ONCE(*sema, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+err_after:
+ i915_vma_put(b_after);
+err_before:
+ i915_vma_put(b_before);
+ return rq;
+
+err_rq:
+ i915_request_add(rq);
+ rq = ERR_PTR(err);
+ goto err_after;
+}
+
+static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
+{
+ struct i915_vma *batch;
+ u32 dw, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ *cs++ = MI_LOAD_REGISTER_IMM(len);
+ while (len--) {
+ *cs++ = hw[dw];
+ *cs++ = poison;
+ dw += 2;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ u32 *cs;
+ int err;
+
+ batch = load_context(ce, poison);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = move_to_active(rq, batch, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(batch->node.start);
+ *cs++ = upper_32_bits(batch->node.start);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+err_rq:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_put(batch);
+ return err;
+}
+
+static bool is_moving(u32 a, u32 b)
+{
+ return a != b;
+}
+
+static int compare_isolation(struct intel_engine_cs *engine,
+ struct i915_vma *ref[2],
+ struct i915_vma *result[2],
+ struct intel_context *ce,
+ u32 poison)
+{
+ u32 x, dw, *hw, *lrc;
+ u32 *A[2], *B[2];
+ int err = 0;
+
+ A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+ if (IS_ERR(A[0]))
+ return PTR_ERR(A[0]);
+
+ A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+ if (IS_ERR(A[1])) {
+ err = PTR_ERR(A[1]);
+ goto err_A0;
+ }
+
+ B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+ if (IS_ERR(B[0])) {
+ err = PTR_ERR(B[0]);
+ goto err_A1;
+ }
+
+ B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+ if (IS_ERR(B[1])) {
+ err = PTR_ERR(B[1]);
+ goto err_B0;
+ }
+
+ lrc = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
+ goto err_B1;
+ }
+ lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ x = 0;
+ dw = 0;
+ hw = engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ if (!is_moving(A[0][x], A[1][x]) &&
+ (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
+ switch (hw[dw] & 4095) {
+ case 0x30: /* RING_HEAD */
+ case 0x34: /* RING_TAIL */
+ break;
+
+ default:
+ pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
+ engine->name, dw,
+ hw[dw], hw[dw + 1],
+ A[0][x], B[0][x], B[1][x],
+ poison, lrc[dw + 1]);
+ err = -EINVAL;
+ break;
+ }
+ }
+ dw += 2;
+ x++;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ i915_gem_object_unpin_map(ce->state->obj);
+err_B1:
+ i915_gem_object_unpin_map(result[1]->obj);
+err_B0:
+ i915_gem_object_unpin_map(result[0]->obj);
+err_A1:
+ i915_gem_object_unpin_map(ref[1]->obj);
+err_A0:
+ i915_gem_object_unpin_map(ref[0]->obj);
+ return err;
+}
+
+static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
+{
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
+ struct i915_vma *ref[2], *result[2];
+ struct intel_context *A, *B;
+ struct i915_request *rq;
+ int err;
+
+ A = intel_context_create(engine);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ B = intel_context_create(engine);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto err_A;
+ }
+
+ ref[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[0])) {
+ err = PTR_ERR(ref[0]);
+ goto err_B;
+ }
+
+ ref[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[1])) {
+ err = PTR_ERR(ref[1]);
+ goto err_ref0;
+ }
+
+ rq = record_registers(A, ref[0], ref[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ref1;
+ }
+
+ WRITE_ONCE(*sema, 1);
+ wmb();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ref1;
+ }
+ i915_request_put(rq);
+
+ result[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[0])) {
+ err = PTR_ERR(result[0]);
+ goto err_ref1;
+ }
+
+ result[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[1])) {
+ err = PTR_ERR(result[1]);
+ goto err_result0;
+ }
+
+ rq = record_registers(A, result[0], result[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_result1;
+ }
+
+ err = poison_registers(B, poison, sema);
+ if (err) {
+ WRITE_ONCE(*sema, -1);
+ i915_request_put(rq);
+ goto err_result1;
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_result1;
+ }
+ i915_request_put(rq);
+
+ err = compare_isolation(engine, ref, result, A, poison);
+
+err_result1:
+ i915_vma_put(result[1]);
+err_result0:
+ i915_vma_put(result[0]);
+err_ref1:
+ i915_vma_put(ref[1]);
+err_ref0:
+ i915_vma_put(ref[0]);
+err_B:
+ intel_context_put(B);
+err_A:
+ intel_context_put(A);
+ return err;
+}
+
+static bool skip_isolation(const struct intel_engine_cs *engine)
+{
+ if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
+ return true;
+
+ if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
+ return true;
+
+ return false;
+}
+
+static int live_lrc_isolation(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ STACK_MAGIC,
+ 0x3a3a3a3a,
+ 0x5c5c5c5c,
+ 0xffffffff,
+ 0xffff0000,
+ };
+
+ /*
+ * Our goal is try and verify that per-context state cannot be
+ * tampered with by another non-privileged client.
+ *
+ * We take the list of context registers from the LRI in the default
+ * context image and attempt to modify that list from a remote context.
+ */
+
+ for_each_engine(engine, gt, id) {
+ int err = 0;
+ int i;
+
+ /* Just don't even ask */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
+ skip_isolation(engine))
+ continue;
+
+ intel_engine_pm_get(engine);
+ if (engine->pinned_default_state) {
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ err = __lrc_isolation(engine, poison[i]);
+ if (err)
+ break;
+
+ err = __lrc_isolation(engine, ~poison[i]);
+ if (err)
+ break;
+ }
+ }
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void garbage_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ tasklet_disable(&engine->execlists.tasklet);
+
+ if (!rq->fence.error)
+ intel_engine_reset(engine, NULL);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static struct i915_request *garbage(struct intel_context *ce,
+ struct rnd_state *prng)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (err)
+ return ERR_PTR(err);
+
+ prandom_bytes_state(prng,
+ ce->lrc_reg_state,
+ ce->engine->context_size -
+ LRC_STATE_PN * PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+
+err_unpin:
+ intel_context_unpin(ce);
+ return ERR_PTR(err);
+}
+
+static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+ struct intel_context *ce;
+ struct i915_request *hang;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ hang = garbage(ce, prng);
+ if (IS_ERR(hang)) {
+ err = PTR_ERR(hang);
+ goto err_ce;
+ }
+
+ if (wait_for_submit(engine, hang, HZ / 2)) {
+ i915_request_put(hang);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ intel_context_set_banned(ce);
+ garbage_reset(engine, hang);
+
+ intel_engine_flush_submission(engine);
+ if (!hang->fence.error) {
+ i915_request_put(hang);
+ pr_err("%s: corrupted context was not reset\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_ce;
+ }
+
+ if (i915_request_wait(hang, 0, HZ / 2) < 0) {
+ pr_err("%s: corrupted context did not recover\n",
+ engine->name);
+ i915_request_put(hang);
+ err = -EIO;
+ goto err_ce;
+ }
+ i915_request_put(hang);
+
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_garbage(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Verify that we can recover if one context state is completely
+ * corrupted.
+ */
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ I915_RND_STATE(prng);
+ int err = 0, i;
+
+ if (!intel_has_reset_engine(engine->gt))
+ continue;
+
+ intel_engine_pm_get(engine);
+ for (i = 0; i < 3; i++) {
+ err = __lrc_garbage(engine, &prng);
+ if (err)
+ break;
+ }
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ ce->runtime.num_underflow = 0;
+ ce->runtime.max_underflow = 0;
+
+ do {
+ unsigned int loop = 1024;
+
+ while (loop) {
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_rq;
+ }
+
+ if (--loop == 0)
+ i915_request_get(rq);
+
+ i915_request_add(rq);
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+
+ i915_request_put(rq);
+ } while (1);
+
+ err = i915_request_wait(rq, 0, HZ / 5);
+ if (err < 0) {
+ pr_err("%s: request not completed!\n", engine->name);
+ goto err_wait;
+ }
+
+ igt_flush_test(engine->i915);
+
+ pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
+ engine->name,
+ intel_context_get_total_runtime_ns(ce),
+ intel_context_get_avg_runtime_ns(ce));
+
+ err = 0;
+ if (ce->runtime.num_underflow) {
+ pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
+ engine->name,
+ ce->runtime.num_underflow,
+ ce->runtime.max_underflow);
+ GEM_TRACE_DUMP();
+ err = -EOVERFLOW;
+ }
+
+err_wait:
+ i915_request_put(rq);
+err_rq:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_pphwsp_runtime(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that cumulative context runtime as stored in the pphwsp[16]
+ * is monotonic.
+ */
+
+ for_each_engine(engine, gt, id) {
+ err = __live_pphwsp_runtime(engine);
if (err)
break;
}
@@ -4076,7 +5555,6 @@ static int live_gpr_clear(void *arg)
if (igt_flush_test(gt->i915))
err = -EIO;
- i915_vma_unpin_and_release(&scratch, 0);
return err;
}
@@ -4086,7 +5564,11 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_layout),
SUBTEST(live_lrc_fixed),
SUBTEST(live_lrc_state),
- SUBTEST(live_gpr_clear),
+ SUBTEST(live_lrc_gpr),
+ SUBTEST(live_lrc_isolation),
+ SUBTEST(live_lrc_timestamp),
+ SUBTEST(live_lrc_garbage),
+ SUBTEST(live_pphwsp_runtime),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index de1f83100fb6..8831ffee2061 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -12,7 +12,8 @@
#include "selftests/igt_spinner.h"
struct live_mocs {
- struct drm_i915_mocs_table table;
+ struct drm_i915_mocs_table mocs;
+ struct drm_i915_mocs_table l3cc;
struct i915_vma *scratch;
void *vaddr;
};
@@ -70,11 +71,22 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
+ struct drm_i915_mocs_table table;
+ unsigned int flags;
int err;
- if (!get_mocs_settings(gt->i915, &arg->table))
+ memset(arg, 0, sizeof(*arg));
+
+ flags = get_mocs_settings(gt->i915, &table);
+ if (!flags)
return -EINVAL;
+ if (flags & HAS_RENDER_L3CC)
+ arg->l3cc = table;
+
+ if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
+ arg->mocs = table;
+
arg->scratch = create_scratch(gt);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
@@ -223,9 +235,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma);
if (!err)
- err = read_mocs_table(rq, &arg->table, &offset);
+ err = read_mocs_table(rq, &arg->mocs, &offset);
if (!err && ce->engine->class == RENDER_CLASS)
- err = read_l3cc_table(rq, &arg->table, &offset);
+ err = read_l3cc_table(rq, &arg->l3cc, &offset);
offset -= i915_ggtt_offset(vma);
GEM_BUG_ON(offset > PAGE_SIZE);
@@ -236,9 +248,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Compare the results against the expected tables */
vaddr = arg->vaddr;
if (!err)
- err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+ err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
if (!err && ce->engine->class == RENDER_CLASS)
- err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+ err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 8cc55a0e9e06..95b165faeba7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -12,6 +12,21 @@
#include "selftests/i915_random.h"
+static u64 rc6_residency(struct intel_rc6 *rc6)
+{
+ u64 result;
+
+ /* XXX VLV_GT_MEDIA_RC6? */
+
+ result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ if (HAS_RC6p(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p);
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp);
+
+ return result;
+}
+
int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
@@ -38,9 +53,9 @@ int live_rc6_manual(void *arg)
__intel_rc6_disable(rc6);
msleep(1); /* wakeup is not immediate, takes about 100us on icl */
- res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[0] = rc6_residency(rc6);
msleep(250);
- res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
(res[1] - res[0]) >> 10);
@@ -51,14 +66,15 @@ int live_rc6_manual(void *arg)
/* Manually enter RC6 */
intel_rc6_park(rc6);
- res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[0] = rc6_residency(rc6);
msleep(100);
- res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
- pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
+ pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
- intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL),
+ res[0]);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 6ad6aca315f6..35406ecdf0b2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -115,7 +115,7 @@ static int igt_atomic_engine_reset(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
intel_gt_pm_get(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
new file mode 100644
index 000000000000..9995faadd7e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static struct i915_vma *create_wally(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ err = i915_vma_sync(vma);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(cs);
+ }
+
+ if (INTEL_GEN(engine->i915) >= 6) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = 0;
+ } else if (INTEL_GEN(engine->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ }
+ *cs++ = vma->node.start + 4000;
+ *cs++ = STACK_MAGIC;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ vma = ERR_CAST(vma->private);
+ i915_gem_object_put(obj);
+ }
+
+ return vma;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int new_context_sync(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = context_sync(ce);
+ intel_context_put(ce);
+
+ return err;
+}
+
+static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
+{
+ int pass;
+ int err;
+
+ for (pass = 0; pass < 2; pass++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(engine->kernel_context);
+ if (err || READ_ONCE(*result)) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb emitted for the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+
+ err = context_sync(engine->kernel_context);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
+{
+ struct i915_vma *bb;
+ u32 *result;
+ int err;
+
+ bb = create_wally(engine);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ intel_context_put(bb->private);
+ i915_vma_unpin_and_release(&bb, 0);
+ return PTR_ERR(result);
+ }
+ result += 1000;
+
+ engine->wa_ctx.vma = bb;
+
+ err = mixed_contexts_sync(engine, result);
+ if (err)
+ goto out;
+
+ err = double_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+ err = kernel_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+out:
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_ctx_switch_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Exercise the inter-context wa batch.
+ *
+ * Between each user context we run a wa batch, and since it may
+ * have implications for user visible state, we have to check that
+ * we do actually execute it.
+ *
+ * The trick we use is to replace the normal wa batch with a custom
+ * one that writes to a marker within it, and we can then look for
+ * that marker to confirm if the batch was run when we expect it,
+ * and equally important it was wasn't run when we don't!
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_vma *saved_wa;
+ int err;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (IS_GEN_RANGE(gt->i915, 4, 5))
+ continue; /* MI_STORE_DWORD is privileged! */
+
+ saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+
+ intel_engine_pm_get(engine);
+ err = __live_ctx_switch_wa(engine);
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ engine->wa_ctx.vma = saved_wa;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_ctx_switch_wa),
+ };
+
+ if (HAS_EXECLISTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index e2d78cc22fb4..c2578a0f2f14 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -6,6 +6,8 @@
#include <linux/prime_numbers.h>
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
@@ -604,7 +606,6 @@ static int live_hwsp_alternate(void *arg)
tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
- intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
@@ -750,6 +751,189 @@ out_free:
return err;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
+static int live_hwsp_rollover_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Run the host for long enough, and even the kernel context will
+ * see a seqno rollover.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq[3] = {};
+ unsigned long heartbeat;
+ int i;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+ if (intel_gt_wait_for_idle(gt, HZ / 2)) {
+ err = -EIO;
+ goto out;
+ }
+
+ GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
+ tl->seqno = 0;
+ timeline_rollback(tl);
+ timeline_rollback(tl);
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ struct i915_request *this;
+
+ this = i915_request_create(ce);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out;
+ }
+
+ pr_debug("%s: create fence.seqnp:%d\n",
+ engine->name,
+ lower_32_bits(this->fence.seqno));
+
+ GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
+ }
+
+ /* We expected a wrap! */
+ GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline wrap timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ if (!i915_request_completed(rq[i])) {
+ pr_err("Pre-wrap request not completed!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(rq); i++)
+ i915_request_put(rq[i]);
+ engine_heartbeat_enable(engine, heartbeat);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
+static int live_hwsp_rollover_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Simulate a long running user context, and force the seqno wrap
+ * on the user's timeline.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq[3] = {};
+ struct intel_timeline *tl;
+ struct intel_context *ce;
+ int i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_alloc_state(ce);
+ if (err)
+ goto out;
+
+ tl = ce->timeline;
+ if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ goto out;
+
+ timeline_rollback(tl);
+ timeline_rollback(tl);
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ struct i915_request *this;
+
+ this = intel_context_create_request(ce);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out;
+ }
+
+ pr_debug("%s: create fence.seqnp:%d\n",
+ engine->name,
+ lower_32_bits(this->fence.seqno));
+
+ GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
+ }
+
+ /* We expected a wrap! */
+ GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline wrap timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ if (!i915_request_completed(rq[i])) {
+ pr_err("Pre-wrap request not completed!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(rq); i++)
+ i915_request_put(rq[i]);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
static int live_hwsp_recycle(void *arg)
{
struct intel_gt *gt = arg;
@@ -827,6 +1011,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_engine),
SUBTEST(live_hwsp_alternate),
SUBTEST(live_hwsp_wrap),
+ SUBTEST(live_hwsp_rollover_kernel),
+ SUBTEST(live_hwsp_rollover_user),
};
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index ac1921854cbf..5ed323254ee1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -583,6 +583,15 @@ static int check_dirty_whitelist(struct intel_context *ce)
if (err)
goto err_request;
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(scratch, rq,
+ EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+ if (err)
+ goto err_request;
+
err = engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
0);
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
new file mode 100644
index 000000000000..8f9b2f33dbaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
+#include "sysfs_engines.h"
+
+struct kobj_engine {
+ struct kobject base;
+ struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static struct kobj_attribute name_attr =
+__ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static struct kobj_attribute class_attr =
+__ATTR(class, 0444, class_show, NULL);
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static struct kobj_attribute inst_attr =
+__ATTR(instance, 0444, inst_show, NULL);
+
+static ssize_t
+mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+}
+
+static struct kobj_attribute mmio_attr =
+__ATTR(mmio_base, 0444, mmio_show, NULL);
+
+static const char * const vcs_caps[] = {
+ [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static const char * const vecs_caps[] = {
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static ssize_t repr_trim(char *buf, ssize_t len)
+{
+ /* Trim off the trailing space and replace with a newline */
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len > 0)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+__caps_show(struct intel_engine_cs *engine,
+ u32 caps, char *buf, bool show_unknown)
+{
+ const char * const *repr;
+ int count, n;
+ ssize_t len;
+
+ BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ repr = vcs_caps;
+ count = ARRAY_SIZE(vcs_caps);
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ repr = vecs_caps;
+ count = ARRAY_SIZE(vecs_caps);
+ break;
+
+ default:
+ repr = NULL;
+ count = 0;
+ break;
+ }
+ GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
+
+ len = 0;
+ for_each_set_bit(n,
+ (unsigned long *)&caps,
+ show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
+ if (n >= count || !repr[n]) {
+ if (GEM_WARN_ON(show_unknown))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "[%x] ", n);
+ } else {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s ", repr[n]);
+ }
+ if (GEM_WARN_ON(len >= PAGE_SIZE))
+ break;
+ }
+ return repr_trim(buf, len);
+}
+
+static ssize_t
+caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return __caps_show(engine, engine->uabi_capabilities, buf, true);
+}
+
+static struct kobj_attribute caps_attr =
+__ATTR(capabilities, 0444, caps_show, NULL);
+
+static ssize_t
+all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return __caps_show(kobj_to_engine(kobj), -1, buf, false);
+}
+
+static struct kobj_attribute all_caps_attr =
+__ATTR(known_capabilities, 0444, all_caps_show, NULL);
+
+static ssize_t
+max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When waiting for a request, if is it currently being executed
+ * on the GPU, we busywait for a short while before sleeping. The
+ * premise is that most requests are short, and if it is already
+ * executing then there is a good chance that it will complete
+ * before we can setup the interrupt handler and go to sleep.
+ * We try to offset the cost of going to sleep, by first spinning
+ * on the request -- if it completed in less time than it would take
+ * to go sleep, process the interrupt and return back to the client,
+ * then we have saved the client some latency, albeit at the cost
+ * of spinning on an expensive CPU core.
+ *
+ * While we try to avoid waiting at all for a request that is unlikely
+ * to complete, deciding how long it is worth spinning is for is an
+ * arbitrary decision: trading off power vs latency.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_nsecs(2))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+
+ return count;
+}
+
+static ssize_t
+max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_attr =
+__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
+
+static ssize_t
+timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * Execlists uses a scheduling quantum (a timeslice) to alternate
+ * execution between ready-to-run contexts of equal priority. This
+ * ensures that all users (though only if they of equal importance)
+ * have the opportunity to run and prevents livelocks where contexts
+ * may have implicit ordering due to userspace semaphores.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+
+ if (execlists_active(&engine->execlists))
+ set_timer_ms(&engine->execlists.timer, duration);
+
+ return count;
+}
+
+static ssize_t
+timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_attr =
+__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
+
+static ssize_t
+stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When we allow ourselves to sleep before a GPU reset after disabling
+ * submission, even for a few milliseconds, gives an innocent context
+ * the opportunity to clear the GPU before the reset occurs. However,
+ * how long to sleep depends on the typical non-preemptible duration
+ * (a similar problem to determining the ideal preempt-reset timeout
+ * or even the heartbeat interval).
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+ return count;
+}
+
+static ssize_t
+stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_attr =
+__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
+
+static ssize_t
+preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long timeout;
+ int err;
+
+ /*
+ * After initialising a preemption request, we give the current
+ * resident a small amount of time to vacate the GPU. The preemption
+ * request is for a higher priority context and should be immediate to
+ * maintain high quality of service (and avoid priority inversion).
+ * However, the preemption granularity of the GPU can be quite coarse
+ * and so we need a compromise.
+ */
+
+ err = kstrtoull(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ set_timer_ms(&engine->execlists.preempt, timeout);
+
+ return count;
+}
+
+static ssize_t
+preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
+
+static ssize_t
+heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long delay;
+ int err;
+
+ /*
+ * We monitor the health of the system via periodic heartbeat pulses.
+ * The pulses also provide the opportunity to perform garbage
+ * collection. However, we interpret an incomplete pulse (a missed
+ * heartbeat) as an indication that the system is no longer responsive,
+ * i.e. hung, and perform an engine or full GPU reset. Given that the
+ * preemption granularity can be very coarse on a system, the optimal
+ * value for any workload is unknowable!
+ */
+
+ err = kstrtoull(buf, 0, &delay);
+ if (err)
+ return err;
+
+ if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ err = intel_engine_set_heartbeat(engine, delay);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t
+heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_attr =
+__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+ .release = kobj_engine_release,
+ .sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return NULL;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = engine;
+
+ if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+ kobject_put(&ke->base);
+ return NULL;
+ }
+
+ /* xfer ownership to sysfs tree */
+ return &ke->base;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+ static const struct attribute *files[] = {
+ &name_attr.attr,
+ &class_attr.attr,
+ &inst_attr.attr,
+ &mmio_attr.attr,
+ &caps_attr.attr,
+ &all_caps_attr.attr,
+ &max_spin_attr.attr,
+ &stop_timeout_attr.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_attr.attr,
+#endif
+ NULL
+ };
+
+ struct device *kdev = i915->drm.primary->kdev;
+ struct intel_engine_cs *engine;
+ struct kobject *dir;
+
+ dir = kobject_create_and_add("engine", &kdev->kobj);
+ if (!dir)
+ return;
+
+ for_each_uabi_engine(engine, i915) {
+ struct kobject *kobj;
+
+ kobj = kobj_engine(dir, engine);
+ if (!kobj)
+ goto err_engine;
+
+ if (sysfs_create_files(kobj, files))
+ goto err_object;
+
+ if (intel_engine_has_timeslices(engine) &&
+ sysfs_create_file(kobj, &timeslice_duration_attr.attr))
+ goto err_engine;
+
+ if (intel_engine_has_preempt_reset(engine) &&
+ sysfs_create_file(kobj, &preempt_timeout_attr.attr))
+ goto err_engine;
+
+ if (0) {
+err_object:
+ kobject_put(kobj);
+err_engine:
+ dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.h b/drivers/gpu/drm/i915/gt/sysfs_engines.h
new file mode 100644
index 000000000000..9546fffe03a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5d00a3b2d914..819f09ef51fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -207,7 +207,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (!intel_guc_is_submission_supported(guc))
+ if (!intel_guc_submission_is_used(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -217,7 +217,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (intel_guc_is_submission_supported(guc)) {
+ if (intel_guc_submission_is_used(guc)) {
u32 ctxnum, base;
base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -333,7 +333,7 @@ int intel_guc_init(struct intel_guc *guc)
ret = intel_uc_fw_init(&guc->fw);
if (ret)
- goto err_fetch;
+ goto out;
ret = intel_guc_log_create(&guc->log);
if (ret)
@@ -348,7 +348,7 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_ads;
- if (intel_guc_is_submission_supported(guc)) {
+ if (intel_guc_submission_is_used(guc)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -364,6 +364,8 @@ int intel_guc_init(struct intel_guc *guc)
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(gt->ggtt);
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
return 0;
err_ct:
@@ -374,9 +376,8 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_fw:
intel_uc_fw_fini(&guc->fw);
-err_fetch:
- intel_uc_fw_cleanup_fetch(&guc->fw);
- DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
+out:
+ i915_probe_error(gt->i915, "failed with %d\n", ret);
return ret;
}
@@ -384,12 +385,12 @@ void intel_guc_fini(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- if (!intel_uc_fw_is_available(&guc->fw))
+ if (!intel_uc_fw_is_loadable(&guc->fw))
return;
i915_ggtt_disable_guc(gt->ggtt);
- if (intel_guc_is_submission_supported(guc))
+ if (intel_guc_submission_is_used(guc))
intel_guc_submission_fini(guc);
intel_guc_ct_fini(&guc->ct);
@@ -397,9 +398,6 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
- intel_uc_fw_cleanup_fetch(&guc->fw);
-
- intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
}
/*
@@ -544,7 +542,7 @@ int intel_guc_suspend(struct intel_guc *guc)
* If GuC communication is enabled but submission is not supported,
* we do not need to suspend the GuC.
*/
- if (!intel_guc_submission_is_enabled(guc))
+ if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
return 0;
/*
@@ -609,7 +607,7 @@ int intel_guc_resume(struct intel_guc *guc)
* we do not need to resume the GuC but we do need to enable the
* GuC communication on resume (above).
*/
- if (!intel_guc_submission_is_enabled(guc))
+ if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
return 0;
return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -678,8 +676,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(vma))
goto err;
- flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- ret = i915_vma_pin(vma, 0, 0, flags);
+ flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+ ret = i915_ggtt_pin(vma, 0, flags);
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 910d49590068..4594ccbeaa34 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -39,7 +39,7 @@ struct intel_guc {
void (*disable)(struct intel_guc *guc);
} interrupts;
- bool submission_supported;
+ bool submission_selected;
struct i915_vma *ads_vma;
struct __guc_ads_blob *ads_blob;
@@ -143,29 +143,36 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc)
return intel_uc_fw_is_supported(&guc->fw);
}
-static inline bool intel_guc_is_enabled(struct intel_guc *guc)
+static inline bool intel_guc_is_wanted(struct intel_guc *guc)
{
return intel_uc_fw_is_enabled(&guc->fw);
}
-static inline bool intel_guc_is_running(struct intel_guc *guc)
+static inline bool intel_guc_is_used(struct intel_guc *guc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&guc->fw);
+}
+
+static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
{
return intel_uc_fw_is_running(&guc->fw);
}
+static inline bool intel_guc_is_ready(struct intel_guc *guc)
+{
+ return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
+}
+
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
intel_uc_fw_sanitize(&guc->fw);
+ intel_guc_ct_sanitize(&guc->ct);
guc->mmio_msg = 0;
return 0;
}
-static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
-{
- return guc->submission_supported;
-}
-
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
{
spin_lock_irq(&guc->irq_lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index c6f971a049f9..11742fca0e9e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -5,11 +5,15 @@
#include "i915_drv.h"
#include "intel_guc_ct.h"
+#include "gt/intel_gt.h"
+#define CT_ERROR(_ct, _fmt, ...) \
+ DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_I915_DEBUG_GUC
-#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
+#define CT_DEBUG(_ct, _fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
#else
-#define CT_DEBUG_DRIVER(...) do { } while (0)
+#define CT_DEBUG(...) do { } while (0)
#endif
struct ct_request {
@@ -48,6 +52,21 @@ static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
return container_of(ct, struct intel_guc, ct);
}
+static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
+{
+ return guc_to_gt(ct_to_guc(ct));
+}
+
+static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
+{
+ return ct_to_gt(ct)->i915;
+}
+
+static inline struct device *ct_to_dev(struct intel_guc_ct *ct)
+{
+ return ct_to_i915(ct)->drm.dev;
+}
+
static inline const char *guc_ct_buffer_type_to_str(u32 type)
{
switch (type) {
@@ -63,7 +82,6 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
u32 cmds_addr, u32 size)
{
- CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
memset(desc, 0, sizeof(*desc));
desc->addr = cmds_addr;
desc->size = size;
@@ -72,8 +90,6 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
{
- CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
- desc, desc->head, desc->tail);
desc->head = 0;
desc->tail = 0;
desc->is_in_error = 0;
@@ -89,31 +105,40 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
sizeof(struct guc_ct_buffer_desc),
type
};
- int err;
/* Can't use generic send(), CT registration must go over MMIO */
- err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
- if (err)
- DRM_ERROR("CT: register %s buffer failed; err=%d\n",
- guc_ct_buffer_type_to_str(type), err);
+ return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+}
+
+static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
+{
+ int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type);
+
+ if (unlikely(err))
+ CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
-static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
- u32 type)
+static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
CTB_OWNER_HOST,
type
};
- int err;
/* Can't use generic send(), CT deregistration must go over MMIO */
- err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
- if (err)
- DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
- guc_ct_buffer_type_to_str(type), err);
+ return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+}
+
+static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
+{
+ int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
+
+ if (unlikely(err))
+ CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
@@ -157,13 +182,12 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
*/
err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
- if (err) {
- DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
+ if (unlikely(err)) {
+ CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err);
return err;
}
- CT_DEBUG_DRIVER("CT: vma base=%#x\n",
- intel_guc_ggtt_offset(guc, ct->vma));
+ CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma));
/* store pointers to desc and cmds */
for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
@@ -197,7 +221,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- u32 base;
+ u32 base, cmds, size;
int err;
int i;
@@ -212,23 +236,23 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
*/
for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- guc_ct_buffer_desc_init(ct->ctbs[i].desc,
- base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
- PAGE_SIZE/4);
+ cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
+ size = PAGE_SIZE / 4;
+ CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size);
+ guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size);
}
- /* register buffers, starting wirh RECV buffer
- * descriptors are in first half of the blob
+ /*
+ * Register both CT buffers starting with RECV buffer.
+ * Descriptors are in first half of the blob.
*/
- err = guc_action_register_ct_buffer(guc,
- base + PAGE_SIZE/4 * CTB_RECV,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
if (unlikely(err))
goto err_out;
- err = guc_action_register_ct_buffer(guc,
- base + PAGE_SIZE/4 * CTB_SEND,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
+ INTEL_GUC_CT_BUFFER_TYPE_SEND);
if (unlikely(err))
goto err_deregister;
@@ -237,10 +261,9 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
return 0;
err_deregister:
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
err_out:
- DRM_ERROR("CT: can't open channel; err=%d\n", err);
+ CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err);
return err;
}
@@ -256,18 +279,16 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
ct->enabled = false;
- if (intel_guc_is_running(guc)) {
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ if (intel_guc_is_fw_running(guc)) {
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
}
}
static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{
/* For now it's trivial */
- return ++ct->requests.next_fence;
+ return ++ct->requests.last_fence;
}
/**
@@ -288,25 +309,33 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
* ^-----------------len-------------------^
*/
-static int ctb_write(struct intel_guc_ct_buffer *ctb,
- const u32 *action,
- u32 len /* in dwords */,
- u32 fence,
- bool want_response)
+static int ct_write(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len /* in dwords */,
+ u32 fence,
+ bool want_response)
{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
struct guc_ct_buffer_desc *desc = ctb->desc;
- u32 head = desc->head / 4; /* in dwords */
- u32 tail = desc->tail / 4; /* in dwords */
- u32 size = desc->size / 4; /* in dwords */
- u32 used; /* in dwords */
+ u32 head = desc->head;
+ u32 tail = desc->tail;
+ u32 size = desc->size;
+ u32 used;
u32 header;
u32 *cmds = ctb->cmds;
unsigned int i;
- GEM_BUG_ON(desc->size % 4);
- GEM_BUG_ON(desc->head % 4);
- GEM_BUG_ON(desc->tail % 4);
- GEM_BUG_ON(tail >= size);
+ if (unlikely(desc->is_in_error))
+ return -EPIPE;
+
+ if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+ (tail | head) >= size))
+ goto corrupted;
+
+ /* later calculations will be done in dwords */
+ head /= 4;
+ tail /= 4;
+ size /= 4;
/*
* tail == head condition indicates empty. GuC FW does not support
@@ -332,9 +361,8 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
(want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
(action[0] << GUC_CT_MSG_ACTION_SHIFT);
- CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
- 4, &header, 4, &fence,
- 4 * (len - 1), &action[1]);
+ CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
+ 4, &header, 4, &fence, 4 * (len - 1), &action[1]);
cmds[tail] = header;
tail = (tail + 1) % size;
@@ -346,12 +374,17 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
cmds[tail] = action[i];
tail = (tail + 1) % size;
}
+ GEM_BUG_ON(tail > size);
/* now update desc tail (back in bytes) */
desc->tail = tail * 4;
- GEM_BUG_ON(desc->tail > desc->size);
-
return 0;
+
+corrupted:
+ CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
+ desc->addr, desc->head, desc->tail, desc->size);
+ desc->is_in_error = 1;
+ return -EPIPE;
}
/**
@@ -469,7 +502,7 @@ static int ct_send(struct intel_guc_ct *ct,
list_add_tail(&request.link, &ct->requests.pending);
spin_unlock_irqrestore(&ct->requests.lock, flags);
- err = ctb_write(ctb, action, len, fence, !!response_buf);
+ err = ct_write(ct, action, len, fence, !!response_buf);
if (unlikely(err))
goto unlink;
@@ -526,11 +559,11 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
- DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
- action[0], ret, status);
+ CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
+ action[0], ret, status);
} else if (unlikely(ret)) {
- CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
- action[0], ret, ret);
+ CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
+ action[0], ret, ret);
}
mutex_unlock(&guc->send_mutex);
@@ -552,22 +585,29 @@ static inline bool ct_header_is_response(u32 header)
return !!(header & GUC_CT_MSG_IS_RESPONSE);
}
-static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
+static int ct_read(struct intel_guc_ct *ct, u32 *data)
{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
struct guc_ct_buffer_desc *desc = ctb->desc;
- u32 head = desc->head / 4; /* in dwords */
- u32 tail = desc->tail / 4; /* in dwords */
- u32 size = desc->size / 4; /* in dwords */
+ u32 head = desc->head;
+ u32 tail = desc->tail;
+ u32 size = desc->size;
u32 *cmds = ctb->cmds;
- s32 available; /* in dwords */
+ s32 available;
unsigned int len;
unsigned int i;
- GEM_BUG_ON(desc->size % 4);
- GEM_BUG_ON(desc->head % 4);
- GEM_BUG_ON(desc->tail % 4);
- GEM_BUG_ON(tail >= size);
- GEM_BUG_ON(head >= size);
+ if (unlikely(desc->is_in_error))
+ return -EPIPE;
+
+ if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+ (tail | head) >= size))
+ goto corrupted;
+
+ /* later calculations will be done in dwords */
+ head /= 4;
+ tail /= 4;
+ size /= 4;
/* tail == head condition indicates empty */
available = tail - head;
@@ -577,7 +617,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
/* beware of buffer wrap case */
if (unlikely(available < 0))
available += size;
- CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
+ CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
GEM_BUG_ON(available < 0);
data[0] = cmds[head];
@@ -586,23 +626,29 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
/* message len with header */
len = ct_header_get_len(data[0]) + 1;
if (unlikely(len > (u32)available)) {
- DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
- 4, data,
- 4 * (head + available - 1 > size ?
- size - head : available - 1), &cmds[head],
- 4 * (head + available - 1 > size ?
- available - 1 - size + head : 0), &cmds[0]);
- return -EPROTO;
+ CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
+ 4, data,
+ 4 * (head + available - 1 > size ?
+ size - head : available - 1), &cmds[head],
+ 4 * (head + available - 1 > size ?
+ available - 1 - size + head : 0), &cmds[0]);
+ goto corrupted;
}
for (i = 1; i < len; i++) {
data[i] = cmds[head];
head = (head + 1) % size;
}
- CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
+ CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
desc->head = head * 4;
return 0;
+
+corrupted:
+ CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
+ desc->addr, desc->head, desc->tail, desc->size);
+ desc->is_in_error = 1;
+ return -EPIPE;
}
/**
@@ -627,7 +673,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
{
u32 header = msg[0];
u32 len = ct_header_get_len(header);
- u32 msglen = len + 1; /* total message length including header */
+ u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
u32 fence;
u32 status;
u32 datalen;
@@ -639,7 +685,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
/* Response payload shall at least include fence and status */
if (unlikely(len < 2)) {
- DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
return -EPROTO;
}
@@ -649,22 +695,22 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
/* Format of the status follows RESPONSE message */
if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
- DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
return -EPROTO;
}
- CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
+ CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
spin_lock(&ct->requests.lock);
list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
- CT_DEBUG_DRIVER("CT: request %u awaits response\n",
- req->fence);
+ CT_DEBUG(ct, "request %u awaits response\n",
+ req->fence);
continue;
}
if (unlikely(datalen > req->response_len)) {
- DRM_ERROR("CT: response %u too long %*ph\n",
- req->fence, 4 * msglen, msg);
+ CT_ERROR(ct, "Response for %u is too long %*ph\n",
+ req->fence, msgsize, msg);
datalen = 0;
}
if (datalen)
@@ -677,7 +723,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
spin_unlock(&ct->requests.lock);
if (!found)
- DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
return 0;
}
@@ -687,7 +733,7 @@ static void ct_process_request(struct intel_guc_ct *ct,
struct intel_guc *guc = ct_to_guc(ct);
int ret;
- CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
+ CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
@@ -698,8 +744,8 @@ static void ct_process_request(struct intel_guc_ct *ct,
default:
fail_unexpected:
- DRM_ERROR("CT: unexpected request %x %*ph\n",
- action, 4 * len, payload);
+ CT_ERROR(ct, "Unexpected request %x %*ph\n",
+ action, 4 * len, payload);
break;
}
}
@@ -767,18 +813,18 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
{
u32 header = msg[0];
u32 len = ct_header_get_len(header);
- u32 msglen = len + 1; /* total message length including header */
+ u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
struct ct_incoming_request *request;
unsigned long flags;
GEM_BUG_ON(ct_header_is_response(header));
- request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
+ request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
if (unlikely(!request)) {
- DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
return 0; /* XXX: -ENOMEM ? */
}
- memcpy(request->msg, msg, 4 * msglen);
+ memcpy(request->msg, msg, msgsize);
spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->requests.incoming);
@@ -794,7 +840,6 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
*/
void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
- struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
@@ -804,7 +849,7 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
}
do {
- err = ctb_read(ctb, msg);
+ err = ct_read(ct, msg);
if (err)
break;
@@ -813,10 +858,4 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
else
err = ct_handle_request(ct, msg);
} while (!err);
-
- if (GEM_WARN_ON(err == -EPROTO)) {
- DRM_ERROR("CT: corrupted message detected!\n");
- ctb->desc->is_in_error = 1;
- }
}
-
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 3e7fe237cfa5..494a51a5200f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -49,7 +49,7 @@ struct intel_guc_ct {
struct intel_guc_ct_buffer ctbs[2];
struct {
- u32 next_fence; /* fence to be used with next request to send */
+ u32 last_fence; /* last fence used to send request */
spinlock_t lock; /* protects pending requests list */
struct list_head pending; /* requests waiting for response */
@@ -65,6 +65,11 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
+static inline void intel_guc_ct_sanitize(struct intel_guc_ct *ct)
+{
+ ct->enabled = false;
+}
+
static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
{
return ct->enabled;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9e42324fdecd..fe7778c28d2d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -456,9 +456,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -660,12 +658,9 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_proc_desc_fini(guc);
}
-static bool __guc_submission_support(struct intel_guc *guc)
+static bool __guc_submission_selected(struct intel_guc *guc)
{
- /* XXX: GuC submission is unavailable for now */
- return false;
-
- if (!intel_guc_is_supported(guc))
+ if (!intel_guc_submission_is_supported(guc))
return false;
return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
@@ -673,7 +668,7 @@ static bool __guc_submission_support(struct intel_guc *guc)
void intel_guc_submission_init_early(struct intel_guc *guc)
{
- guc->submission_supported = __guc_submission_support(guc);
+ guc->submission_selected = __guc_submission_selected(guc);
}
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index e402a2932592..4cf9d3e50263 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -8,7 +8,8 @@
#include <linux/types.h>
-struct intel_guc;
+#include "intel_guc.h"
+
struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
@@ -20,4 +21,20 @@ int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
+static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
+{
+ /* XXX: GuC submission is unavailable for now */
+ return false;
+}
+
+static inline bool intel_guc_submission_is_wanted(struct intel_guc *guc)
+{
+ return guc->submission_selected;
+}
+
+static inline bool intel_guc_submission_is_used(struct intel_guc *guc)
+{
+ return intel_guc_is_used(guc) && intel_guc_submission_is_wanted(guc);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 32a069841c14..a74b65694512 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -121,19 +121,20 @@ int intel_huc_init(struct intel_huc *huc)
if (err)
goto out_fini;
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
return 0;
out_fini:
intel_uc_fw_fini(&huc->fw);
out:
- intel_uc_fw_cleanup_fetch(&huc->fw);
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err);
+ i915_probe_error(i915, "failed with %d\n", err);
return err;
}
void intel_huc_fini(struct intel_huc *huc)
{
- if (!intel_uc_fw_is_available(&huc->fw))
+ if (!intel_uc_fw_is_loadable(&huc->fw))
return;
intel_huc_rsa_data_destroy(huc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 644c059fe01d..a40b9cfc6c22 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -41,11 +41,17 @@ static inline bool intel_huc_is_supported(struct intel_huc *huc)
return intel_uc_fw_is_supported(&huc->fw);
}
-static inline bool intel_huc_is_enabled(struct intel_huc *huc)
+static inline bool intel_huc_is_wanted(struct intel_huc *huc)
{
return intel_uc_fw_is_enabled(&huc->fw);
}
+static inline bool intel_huc_is_used(struct intel_huc *huc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&huc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&huc->fw);
+}
+
static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
{
return intel_uc_fw_is_running(&huc->fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index eee193bf2cc4..9cdf4cbe691c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -20,7 +20,7 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
struct drm_i915_private *i915 = gt->i915;
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
- intel_uc_uses_guc(uc),
+ intel_uc_wants_guc(uc),
INTEL_INFO(i915)->platform, INTEL_REVID(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 64934a876a50..a4cbe06e06bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -48,17 +48,17 @@ static void __confirm_options(struct intel_uc *uc)
DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
"enable_guc=%d (guc:%s submission:%s huc:%s)\n",
i915_modparams.enable_guc,
- yesno(intel_uc_uses_guc(uc)),
- yesno(intel_uc_uses_guc_submission(uc)),
- yesno(intel_uc_uses_huc(uc)));
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_wants_huc(uc)));
if (i915_modparams.enable_guc == -1)
return;
if (i915_modparams.enable_guc == 0) {
- GEM_BUG_ON(intel_uc_uses_guc(uc));
- GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
- GEM_BUG_ON(intel_uc_uses_huc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_wants_huc(uc));
return;
}
@@ -93,7 +93,7 @@ void intel_uc_init_early(struct intel_uc *uc)
__confirm_options(uc);
- if (intel_uc_uses_guc(uc))
+ if (intel_uc_wants_guc(uc))
uc->ops = &uc_ops_on;
else
uc->ops = &uc_ops_off;
@@ -257,13 +257,13 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
{
int err;
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
err = intel_uc_fw_fetch(&uc->guc.fw);
if (err)
return;
- if (intel_uc_uses_huc(uc))
+ if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
}
@@ -273,25 +273,38 @@ static void __uc_cleanup_firmwares(struct intel_uc *uc)
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
-static void __uc_init(struct intel_uc *uc)
+static int __uc_init(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret;
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
+
+ if (!intel_uc_uses_guc(uc))
+ return 0;
+
+ if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
+ return -ENOMEM;
/* XXX: GuC submission is unavailable for now */
- GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
ret = intel_guc_init(guc);
- if (ret) {
- intel_uc_fw_cleanup_fetch(&huc->fw);
- return;
+ if (ret)
+ return ret;
+
+ if (intel_uc_uses_huc(uc)) {
+ ret = intel_huc_init(huc);
+ if (ret)
+ goto out_guc;
}
- if (intel_uc_uses_huc(uc))
- intel_huc_init(huc);
+ return 0;
+
+out_guc:
+ intel_guc_fini(guc);
+ return ret;
}
static void __uc_fini(struct intel_uc *uc)
@@ -402,12 +415,12 @@ static int __uc_init_hw(struct intel_uc *uc)
int ret, attempts;
GEM_BUG_ON(!intel_uc_supports_guc(uc));
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
- if (!intel_uc_fw_is_available(&guc->fw)) {
+ if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
- intel_uc_supports_guc_submission(uc) ?
+ intel_uc_wants_guc_submission(uc) ?
intel_uc_fw_status_to_error(guc->fw.status) : 0;
goto err_out;
}
@@ -459,14 +472,14 @@ static int __uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_communication;
- if (intel_uc_supports_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found,
"submission",
- enableddisabled(intel_uc_supports_guc_submission(uc)));
+ enableddisabled(intel_uc_uses_guc_submission(uc)));
if (intel_uc_uses_huc(uc)) {
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
@@ -505,10 +518,10 @@ static void __uc_fini_hw(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_fw_running(guc))
return;
- if (intel_uc_supports_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_disable(guc);
if (guc_communication_enabled(guc))
@@ -527,7 +540,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
guc_disable_communication(guc);
@@ -539,7 +552,7 @@ void intel_uc_runtime_suspend(struct intel_uc *uc)
struct intel_guc *guc = &uc->guc;
int err;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
err = intel_guc_suspend(guc);
@@ -554,7 +567,7 @@ void intel_uc_suspend(struct intel_uc *uc)
struct intel_guc *guc = &uc->guc;
intel_wakeref_t wakeref;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
@@ -566,7 +579,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
struct intel_guc *guc = &uc->guc;
int err;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_fw_running(guc))
return 0;
/* Make sure we enable communication if and only if it's disabled */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 49c913524686..5ae7b50b7dc1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -7,6 +7,7 @@
#define _INTEL_UC_H_
#include "intel_guc.h"
+#include "intel_guc_submission.h"
#include "intel_huc.h"
#include "i915_params.h"
@@ -16,7 +17,7 @@ struct intel_uc_ops {
int (*sanitize)(struct intel_uc *uc);
void (*init_fw)(struct intel_uc *uc);
void (*fini_fw)(struct intel_uc *uc);
- void (*init)(struct intel_uc *uc);
+ int (*init)(struct intel_uc *uc);
void (*fini)(struct intel_uc *uc);
int (*init_hw)(struct intel_uc *uc);
void (*fini_hw)(struct intel_uc *uc);
@@ -40,35 +41,44 @@ void intel_uc_runtime_suspend(struct intel_uc *uc);
int intel_uc_resume(struct intel_uc *uc);
int intel_uc_runtime_resume(struct intel_uc *uc);
-static inline bool intel_uc_supports_guc(struct intel_uc *uc)
-{
- return intel_guc_is_supported(&uc->guc);
-}
-
-static inline bool intel_uc_uses_guc(struct intel_uc *uc)
-{
- return intel_guc_is_enabled(&uc->guc);
-}
+/*
+ * We need to know as early as possible if we're going to use GuC or not to
+ * take the correct setup paths. Additionally, once we've started loading the
+ * GuC, it is unsafe to keep executing without it because some parts of the HW,
+ * a subset of which is not cleaned on GT reset, will start expecting the GuC FW
+ * to be running.
+ * To solve both these requirements, we commit to using the microcontrollers if
+ * the relevant modparam is set and the blobs are found on the system. At this
+ * stage, the only thing that can stop us from attempting to load the blobs on
+ * the HW and use them is a fundamental issue (e.g. no memory for our
+ * structures); if we hit such a problem during driver load we're broken even
+ * without GuC, so there is no point in trying to fall back.
+ *
+ * Given the above, we can be in one of 4 states, with the last one implying
+ * we're committed to using the microcontroller:
+ * - Not supported: not available in HW and/or firmware not defined.
+ * - Supported: available in HW and firmware defined.
+ * - Wanted: supported + enabled in modparam.
+ * - In use: wanted + firmware found on the system and successfully fetched.
+ */
-static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc)
-{
- return intel_guc_is_submission_supported(&uc->guc);
+#define __uc_state_checker(x, func, state, required) \
+static inline bool intel_uc_##state##_##func(struct intel_uc *uc) \
+{ \
+ return intel_##func##_is_##required(&uc->x); \
}
-static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc)
-{
- return intel_guc_is_submission_supported(&uc->guc);
-}
+#define uc_state_checkers(x, func) \
+__uc_state_checker(x, func, supports, supported) \
+__uc_state_checker(x, func, wants, wanted) \
+__uc_state_checker(x, func, uses, used)
-static inline bool intel_uc_supports_huc(struct intel_uc *uc)
-{
- return intel_uc_supports_guc(uc);
-}
+uc_state_checkers(guc, guc);
+uc_state_checkers(huc, huc);
+uc_state_checkers(guc, guc_submission);
-static inline bool intel_uc_uses_huc(struct intel_uc *uc)
-{
- return intel_huc_is_enabled(&uc->huc);
-}
+#undef uc_state_checkers
+#undef __uc_state_checker
#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
@@ -80,7 +90,7 @@ static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
intel_uc_ops_function(sanitize, sanitize, int, 0);
intel_uc_ops_function(fetch_firmwares, init_fw, void, );
intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
-intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(init, init, int, 0);
intel_uc_ops_function(fini, fini, void, );
intel_uc_ops_function(init_hw, init_hw, int, 0);
intel_uc_ops_function(fini_hw, fini_hw, void, );
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 8ee0a0c7f447..18c755203688 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -43,7 +43,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* features.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
@@ -279,7 +279,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
err = i915_inject_probe_error(i915, -ENXIO);
if (err)
- return err;
+ goto fail;
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
@@ -501,7 +501,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
if (err)
return err;
- if (!intel_uc_fw_is_available(uc_fw))
+ if (!intel_uc_fw_is_loadable(uc_fw))
return -ENOEXEC;
/* Call custom loader */
@@ -544,7 +544,10 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
- intel_uc_fw_cleanup_fetch(uc_fw);
+ if (i915_gem_object_has_pinned_pages(uc_fw->obj))
+ i915_gem_object_unpin_pages(uc_fw->obj);
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 1f30543d0d2d..888ff0de0244 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -29,8 +29,11 @@ struct intel_gt;
* | | SELECTED |
* +------------+- / | \ -+
* | | MISSING <--/ | \--> ERROR |
- * | fetch | | |
- * | | /------> AVAILABLE <---<-----------\ |
+ * | fetch | V |
+ * | | AVAILABLE |
+ * +------------+- | -+
+ * | init | V |
+ * | | /------> LOADABLE <----<-----------\ |
* +------------+- \ / \ \ \ -+
* | | FAIL <--< \--> TRANSFERRED \ |
* | upload | \ / \ / |
@@ -46,6 +49,7 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
@@ -115,6 +119,8 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
return "ERROR";
case INTEL_UC_FIRMWARE_AVAILABLE:
return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_LOADABLE:
+ return "LOADABLE";
case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
case INTEL_UC_FIRMWARE_TRANSFERRED:
@@ -143,6 +149,7 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
case INTEL_UC_FIRMWARE_SELECTED:
return -ESTALE;
case INTEL_UC_FIRMWARE_AVAILABLE:
+ case INTEL_UC_FIRMWARE_LOADABLE:
case INTEL_UC_FIRMWARE_TRANSFERRED:
case INTEL_UC_FIRMWARE_RUNNING:
return 0;
@@ -184,6 +191,11 @@ static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
}
+static inline bool intel_uc_fw_is_loadable(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE;
+}
+
static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
{
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
@@ -202,7 +214,7 @@ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
if (intel_uc_fw_is_loaded(uc_fw))
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOADABLE);
}
static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 771420453f82..8b13f091cee2 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -41,7 +41,7 @@
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_gt *gt = gvt->gt;
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}
- mutex_lock(&dev_priv->ggtt.vm.mutex);
- mmio_hw_access_pre(dev_priv);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
+ mutex_lock(&gt->ggtt->vm.mutex);
+ mmio_hw_access_pre(gt);
+ ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(dev_priv);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mmio_hw_access_post(gt);
+ mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_gt *gt = gvt->gt;
int ret;
ret = alloc_gm(vgpu, false);
@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
out_free_aperture:
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gt->ggtt->vm.mutex);
return ret;
}
static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gt *gt = gvt->gt;
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gt->ggtt->vm.mutex);
}
/**
@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->gt->i915;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ assert_rpm_wakelock_held(uncore->rpm);
- if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
+ if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
- if (WARN_ON(!reg))
+ if (drm_WARN_ON(&i915->drm, !reg))
return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
- I915_WRITE(fence_reg_lo, 0);
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write(uncore, fence_reg_lo, 0);
+ intel_uncore_posting_read(uncore, fence_reg_lo);
- I915_WRITE(fence_reg_hi, upper_32_bits(value));
- I915_WRITE(fence_reg_lo, lower_32_bits(value));
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
+ intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
+ intel_uncore_posting_read(uncore, fence_reg_lo);
}
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
+ intel_wakeref_t wakeref;
u32 i;
- if (WARN_ON(!vgpu_fence_sz(vgpu)))
+ if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
return;
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gvt->gt->ggtt->vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
+ intel_wakeref_t wakeref;
int i;
- intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
/* Request fences from host */
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gvt->gt->ggtt->vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
- reg = i915_reserve_fence(&dev_priv->ggtt);
+ reg = i915_reserve_fence(gvt->gt->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
- intel_runtime_pm_put_unchecked(rpm);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
+
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
@@ -220,8 +224,8 @@ out_free_fence:
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
- intel_runtime_pm_put_unchecked(rpm);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+ intel_runtime_pm_put_unchecked(uncore->rpm);
return -ENOSPC;
}
@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(&dev_priv->runtime_pm);
- _clear_vgpu_fence(vgpu);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
+ _clear_vgpu_fence(vgpu);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 19cf1bbe059d..072725a448db 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -106,10 +106,13 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 4))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+ if (drm_WARN_ON(&i915->drm,
+ offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
@@ -297,34 +300,36 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;
- if (WARN_ON(bytes > 4))
+ if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+ if (drm_WARN_ON(&i915->drm,
+ offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
- if (WARN_ON(bytes > 2))
+ if (drm_WARN_ON(&i915->drm, bytes > 2))
return -EINVAL;
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
}
switch (rounddown(offset, 4)) {
case PCI_ROM_ADDRESS:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
case INTEL_GVT_PCI_SWSCI:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
if (ret)
@@ -332,7 +337,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
break;
case INTEL_GVT_PCI_OPREGION:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_opregion_base_write_handler(vgpu,
*(u32 *)p_data);
@@ -391,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+ pci_resource_len(gvt->gt->i915->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+ pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21a176cd8acc..9e065ad0658f 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -462,7 +462,7 @@ enum {
struct parser_exec_state {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
int buf_type;
@@ -635,39 +635,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
},
};
-static inline u32 get_opcode(u32 cmd, int ring_id)
+static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
return cmd >> (32 - d_info->op_len);
}
-static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
- unsigned int opcode, int ring_id)
+static inline const struct cmd_info *
+find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
+ const struct intel_engine_cs *engine)
{
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
+ if (opcode == e->info->opcode &&
+ e->info->rings & engine->mask)
return e->info;
}
return NULL;
}
-static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
- u32 cmd, int ring_id)
+static inline const struct cmd_info *
+get_cmd_info(struct intel_gvt *gvt, u32 cmd,
+ const struct intel_engine_cs *engine)
{
u32 opcode;
- opcode = get_opcode(cmd, ring_id);
+ opcode = get_opcode(cmd, engine);
if (opcode == INVALID_OP)
return NULL;
- return find_cmd_entry(gvt, opcode, ring_id);
+ return find_cmd_entry(gvt, opcode, engine);
}
static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
@@ -675,12 +678,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
}
-static inline void print_opcode(u32 cmd, int ring_id)
+static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
int i;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
@@ -709,10 +712,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
- gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
- " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
- s->ring_id, s->ring_start, s->ring_start + s->ring_size,
- s->ring_head, s->ring_tail);
+ gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n",
+ s->vgpu->id, s->engine->name,
+ s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
@@ -729,7 +733,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
- print_opcode(cmd_val(s, 0), s->ring_id);
+ print_opcode(cmd_val(s, 0), s->engine);
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
@@ -840,7 +844,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
unsigned int data;
u32 ring_base;
u32 nopid;
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (!strcmp(cmd, "lri"))
data = cmd_val(s, index + 1);
@@ -850,7 +853,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
return -EINVAL;
}
- ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+ ring_base = s->engine->mmio_base;
nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
@@ -926,9 +929,9 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* update reg values in it into vregs, so LRIs in workload with
* inhibit context will restore with correct values
*/
- if (IS_GEN(gvt->dev_priv, 9) &&
- intel_gvt_mmio_is_in_ctx(gvt, offset) &&
- !strncmp(cmd, "lri", 3)) {
+ if (IS_GEN(s->engine->i915, 9) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
@@ -964,7 +967,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
- struct intel_gvt *gvt = s->vgpu->gvt;
u32 valid_len = CMD_LEN(1);
/*
@@ -979,8 +981,8 @@ static int cmd_handler_lri(struct parser_exec_state *s)
}
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
- if (s->ring_id == BCS0 &&
+ if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
+ if (s->engine->id == BCS0 &&
cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
@@ -1001,9 +1003,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= ((cmd_reg_inhibit(s, i) ||
- (cmd_reg_inhibit(s, i + 1)))) ?
+ (cmd_reg_inhibit(s, i + 1)))) ?
-EBADRQC : 0;
if (ret)
break;
@@ -1029,7 +1031,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
- if (IS_BROADWELL(gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
@@ -1141,7 +1143,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 2), gma);
val = cmd_val(s, 1) & (~(1 << 21));
@@ -1155,15 +1157,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
return ret;
if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
- set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
+ s->workload->pending_events);
return 0;
}
static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
- set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
+ s->workload->pending_events);
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
@@ -1213,7 +1215,7 @@ struct plane_code_mapping {
static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1230,7 +1232,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
dword2 = cmd_val(s, 2);
v = (dword0 & GENMASK(21, 19)) >> 19;
- if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
return -EBADRQC;
info->pipe = gen8_plane_code[v].pipe;
@@ -1250,7 +1252,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->stride_reg = SPRSTRIDE(info->pipe);
info->surf_reg = SPRSURF(info->pipe);
} else {
- WARN_ON(1);
+ drm_WARN_ON(&dev_priv->drm, 1);
return -EBADRQC;
}
return 0;
@@ -1259,7 +1261,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1318,13 +1320,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
static int gen8_check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 stride, tile;
if (!info->async_flip)
return 0;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1347,7 +1348,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1378,11 +1379,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
static int decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
return gen8_decode_mi_display_flip(s, info);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1667,7 +1666,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 1), gma);
val = cmd_val(s, 0) & (~(1 << 21));
@@ -1676,8 +1675,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
- set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
+ s->workload->pending_events);
return ret;
}
@@ -1725,12 +1724,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
/* Decide privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
- !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
+ if (cmd_val(s, 0) & BIT(8) &&
+ !(s->vgpu->scan_nonprivbb & s->engine->mask))
return 0;
+
return 1;
}
+static const char *repr_addr_type(unsigned int type)
+{
+ return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
+}
+
static int find_bb_size(struct parser_exec_state *s,
unsigned long *bb_size,
unsigned long *bb_end_cmd_offset)
@@ -1753,24 +1758,24 @@ static int find_bb_size(struct parser_exec_state *s,
return -EFAULT;
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
do {
if (copy_gma_to_hva(s->vgpu, mm,
- gma, gma + 4, &cmd) < 0)
+ gma, gma + 4, &cmd) < 0)
return -EFAULT;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1799,12 +1804,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va)
u32 cmd = *(u32 *)va;
const struct cmd_info *info;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1857,7 +1862,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (bb->ppgtt)
start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+ bb->obj = i915_gem_object_create_shmem(s->engine->i915,
round_up(bb_size + start_offset,
PAGE_SIZE));
if (IS_ERR(bb->obj)) {
@@ -2666,25 +2671,25 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (cmd == MI_NOOP)
info = &cmd_info[mi_noop_index];
else
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
s->info = info;
- trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+ trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
ret = gvt_check_valid_cmd_length(cmd_length(s),
- info->valid_len);
+ info->valid_len);
if (ret)
return ret;
}
@@ -2781,7 +2786,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = workload->rb_start;
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head;
@@ -2790,8 +2795,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.workload = workload;
s.is_ctx_wa = false;
- if ((bypass_scan_mask & (1 << workload->ring_id)) ||
- gma_head == gma_tail)
+ if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
return 0;
ret = ip_gma_set(&s, gma_head);
@@ -2830,7 +2834,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
@@ -2855,7 +2859,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
- int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2868,21 +2871,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+ if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
void *p;
/* realloc the new ring buffer if needed */
- p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
- GFP_KERNEL);
+ p = krealloc(s->ring_scan_buffer[workload->engine->id],
+ workload->rb_len, GFP_KERNEL);
if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- s->ring_scan_buffer[ring_id] = p;
- s->ring_scan_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[workload->engine->id] = p;
+ s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
}
- shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
@@ -2940,7 +2943,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int ret = 0;
void *map;
- obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create_shmem(workload->engine->i915,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
@@ -3029,30 +3032,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, unsigned long rings)
-{
- const struct cmd_info *info = NULL;
- unsigned int ring;
-
- for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
- info = find_cmd_entry(gvt, opcode, ring);
- if (info)
- break;
- }
- return info;
-}
-
static int init_cmd_table(struct intel_gvt *gvt)
{
+ unsigned int gen_type = intel_gvt_get_device_type(gvt);
int i;
- struct cmd_entry *e;
- const struct cmd_info *info;
- unsigned int gen_type;
-
- gen_type = intel_gvt_get_device_type(gvt);
for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ struct cmd_entry *e;
+
if (!(cmd_info[i].devices & gen_type))
continue;
@@ -3061,23 +3048,16 @@ static int init_cmd_table(struct intel_gvt *gvt)
return -ENOMEM;
e->info = &cmd_info[i];
- info = find_cmd_entry_any_ring(gvt,
- e->info->opcode, e->info->rings);
- if (info) {
- gvt_err("%s %s duplicated\n", e->info->name,
- info->name);
- kfree(e);
- return -EEXIST;
- }
if (cmd_info[i].opcode == OP_MI_NOOP)
mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
- e->info->name, e->info->opcode, e->info->flag,
- e->info->devices, e->info->rings);
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
}
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 285f6011a537..ec47d4114554 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
- struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data;
struct diff_mmio *node;
u32 preg, vreg;
- preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
+ preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->dev_priv);
+ mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->dev_priv);
+ mmio_hw_access_post(gvt->gt);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
@@ -128,6 +127,7 @@ static int
vgpu_scan_nonprivbb_get(void *data, u64 *val)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
*val = vgpu->scan_nonprivbb;
return 0;
}
@@ -142,42 +142,7 @@ static int
vgpu_scan_nonprivbb_set(void *data, u64 val)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- enum intel_engine_id id;
- char buf[128], *s;
- int len;
-
- val &= (1 << I915_NUM_ENGINES) - 1;
-
- if (vgpu->scan_nonprivbb == val)
- return 0;
-
- if (!val)
- goto done;
-
- len = sprintf(buf,
- "gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:",
- vgpu->id);
-
- s = buf + len;
-
- for (id = 0; id < I915_NUM_ENGINES; id++) {
- struct intel_engine_cs *engine;
-
- engine = dev_priv->engine[id];
- if (engine && (val & (1 << id))) {
- len = snprintf(s, 4, "%d, ", engine->id);
- s += len;
- } else
- val &= ~(1 << id);
- }
-
- if (val)
- sprintf(s, "low performance expected.");
-
- pr_warn("%s\n", buf);
-done:
vgpu->scan_nonprivbb = val;
return 0;
}
@@ -220,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->dev_priv->drm.primary;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a62bdf9be682..6e5c9885d9fe 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
@@ -69,9 +69,10 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+ if (drm_WARN_ON(&dev_priv->drm,
+ pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
@@ -168,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -319,9 +320,10 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
- if (WARN_ON(resolution >= GVT_EDID_NUM))
+ if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
return -EINVAL;
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
@@ -389,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -421,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
int pipe;
mutex_lock(&vgpu->vgpu_lock);
- for_each_pipe(vgpu->gvt->dev_priv, pipe)
+ for_each_pipe(vgpu->gvt->gt->i915, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock);
}
@@ -454,11 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
*/
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* TODO: add more platforms support */
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv)) {
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
@@ -484,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
*/
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
@@ -506,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
*/
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
intel_vgpu_init_i2c_edid(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index ae139f0877ae..37fc460414a8 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -67,11 +67,11 @@ static int vgpu_gem_get_pages(
u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
- if (WARN_ON(!fb_info))
+ if (drm_WARN_ON(&dev_priv->drm, !fb_info))
return -ENODEV;
vgpu = fb_info->obj->vgpu;
- if (WARN_ON(!vgpu))
+ if (drm_WARN_ON(&dev_priv->drm, !vgpu))
return -ENODEV;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
{
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct vfio_device_gfx_plane_info *gfx_plane_info = args;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct intel_vgpu_fb_info fb_info;
@@ -523,7 +523,7 @@ out:
/* To associate an exposed dmabuf with the dmabuf_obj */
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
{
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 1fe6124918f1..190651df5db1 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0)
return 0;
- if (IS_BROXTON(dev_priv))
+ if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select);
- else if (IS_COFFEELAKE(dev_priv))
+ else if (IS_COFFEELAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
- if (WARN_ON(port < 0))
+ if (drm_WARN_ON(&i915->drm, port < 0))
return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS;
@@ -276,7 +276,9 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- WARN_ON(1);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ drm_WARN_ON(&i915->drm, 1);
return 0;
}
@@ -371,7 +373,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
@@ -399,7 +403,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
@@ -473,6 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
unsigned int offset,
void *p_data)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size;
int msg, addr, ctrl, op;
@@ -532,9 +539,9 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
* support the gfx driver to do EDID access.
*/
} else {
- if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+ if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ))
return;
- if (WARN_ON(msg_length != 4))
+ if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
if (i2c_edid->edid_available && i2c_edid->slave_selected) {
unsigned char val = edid_get_byte(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index d6e7a1189bad..dd25c3024370 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -39,8 +39,7 @@
#define _EL_OFFSET_STATUS_BUF 0x370
#define _EL_OFFSET_STATUS_PTR 0x3A0
-#define execlist_ring_mmio(gvt, ring_id, offset) \
- (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
@@ -54,12 +53,12 @@ static int context_switch_events[] = {
[VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(unsigned int ring_id)
+static int to_context_switch_event(const struct intel_engine_cs *engine)
{
- if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
- return context_switch_events[ring_id];
+ return context_switch_events[engine->id];
}
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
struct execlist_ctx_descriptor_format *desc = execlist->running_context;
struct intel_vgpu *vgpu = execlist->vgpu;
struct execlist_status_format status;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt,
- ring_id, _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
}
static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
- struct execlist_context_status_format *status,
- bool trigger_interrupt_later)
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
unsigned long hwsp_gpa;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
- ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_BUF);
+ ctx_status_ptr_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
/* Update the CSB and CSB write pointer in HWSP */
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- vgpu->hws_pga[ring_id]);
+ vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
- write_pointer * 8,
- status, 8);
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
+ status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa +
- intel_hws_csb_write_index(dev_priv) * 4,
- &write_pointer, 4);
+ hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
+ &write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
- vgpu->id, write_pointer, offset, status->ldw, status->udw);
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
if (trigger_interrupt_later)
return;
intel_vgpu_trigger_virtual_event(vgpu,
- ring_id_to_context_switch_event(execlist->ring_id));
+ to_context_switch_event(execlist->engine));
}
static int emulate_execlist_ctx_schedule_out(
@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
struct execlist_status_format status;
status.ldw = vgpu_vreg(vgpu, status_reg);
@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
int ret;
if (!workload->emulate_schedule_in)
@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+ ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
+ ctx);
if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n");
return ret;
@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist =
+ &s->execlist[workload->engine->id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
bool lite_restore = false;
int ret = 0;
- gvt_dbg_el("complete workload %p status %d\n", workload,
- workload->status);
+ gvt_dbg_el("complete workload %p status %d\n",
+ workload, workload->status);
- if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
+ if (workload->status || vgpu->resetting_eng & workload->engine->mask)
goto out;
- if (!list_empty(workload_q_head(vgpu, ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, workload->engine))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
@@ -436,14 +429,15 @@ out:
return ret;
}
-static int submit_context(struct intel_vgpu *vgpu, int ring_id,
- struct execlist_ctx_descriptor_format *desc,
- bool emulate_schedule_in)
+static int submit_context(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL;
- workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+ workload = intel_vgpu_create_workload(vgpu, engine, desc);
if (IS_ERR(workload))
return PTR_ERR(workload);
@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->emulate_schedule_in = emulate_schedule_in;
if (emulate_schedule_in)
- workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
+ workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
- emulate_schedule_in);
+ emulate_schedule_in);
intel_vgpu_queue_workload(workload);
return 0;
}
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i]->valid)
continue;
- ret = submit_context(vgpu, ring_id, desc[i], i == 0);
+ ret = submit_context(vgpu, engine, desc[i], i == 0);
if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
@@ -504,22 +499,22 @@ inv_desc:
return -EINVAL;
}
-static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+static void init_vgpu_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
memset(execlist, 0, sizeof(*execlist));
execlist->vgpu = vgpu;
- execlist->ring_id = ring_id;
+ execlist->engine = engine;
execlist->slot[0].index = 0;
execlist->slot[1].index = 1;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
+ ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
@@ -529,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
@@ -544,12 +539,12 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
- init_vgpu_execlist(vgpu, engine->id);
+ init_vgpu_execlist(vgpu, engine);
}
static int init_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5c0c1fd30c83..d62cd14605a3 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -170,16 +170,17 @@ struct intel_vgpu_execlist {
struct intel_vgpu_execlist_slot *running_slot;
struct intel_vgpu_execlist_slot *pending_slot;
struct execlist_ctx_descriptor_format *running_context;
- int ring_id;
struct intel_vgpu *vgpu;
struct intel_vgpu_elsp_dwords elsp_dwords;
+ const struct intel_engine_cs *engine;
};
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 8bb292b01271..0889ad8291b0 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu)
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, fmt;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode)
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index b0c1fda32977..990a181094e3 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
- struct drm_i915_private *i915 = gvt->dev_priv;
-
- *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
+ *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
_MMIO(offset));
return 0;
}
@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h;
void *firmware;
void *p;
@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private);
@@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h;
unsigned long id, crc32_start;
const void *mem;
@@ -208,8 +205,7 @@ invalid_firmware:
int intel_gvt_load_firmware(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h;
const struct firmware *fw;
@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path);
- ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+ ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
kfree(path);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4a4828074cb7..2a4b23f8aa74 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -71,8 +71,10 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
/* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
{
- if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
- "invalid guest gmadr %llx\n", g_addr))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
+ "invalid guest gmadr %llx\n", g_addr))
return -EACCES;
if (vgpu_gmadr_is_aperture(vgpu, g_addr))
@@ -87,8 +89,10 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
/* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
{
- if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
- "invalid host gmadr %llx\n", h_addr))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+ "invalid host gmadr %llx\n", h_addr))
return -EACCES;
if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
@@ -275,24 +279,23 @@ static inline int get_pse_type(int type)
return gtt_type_table[type].pse_entry_type;
}
-static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
{
- void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
return readq(addr);
}
-static void ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(dev_priv);
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(dev_priv);
+ mmio_hw_access_pre(gt);
+ intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ mmio_hw_access_post(gt);
}
-static void write_pte64(struct drm_i915_private *dev_priv,
- unsigned long index, u64 pte)
+static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
{
- void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
writeq(pte, addr);
}
@@ -315,7 +318,7 @@ static inline int gtt_get_entry64(void *pt,
if (WARN_ON(ret))
return ret;
} else if (!pt) {
- e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+ e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
} else {
e->val64 = *((u64 *)pt + index);
}
@@ -340,7 +343,7 @@ static inline int gtt_set_entry64(void *pt,
if (WARN_ON(ret))
return ret;
} else if (!pt) {
- write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+ write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
} else {
*((u64 *)pt + index) = e->val64;
}
@@ -734,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
- struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
@@ -819,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
- struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr;
int ret;
@@ -940,6 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
enum intel_gvt_gtt_type cur_pt_type;
@@ -952,7 +956,9 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
if (!gtt_type_is_pt(cur_pt_type) ||
!gtt_type_is_pt(cur_pt_type + 1)) {
- WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+ drm_WARN(&i915->drm, 1,
+ "Invalid page table type, cur_pt_type is: %d\n",
+ cur_pt_type);
return -EINVAL;
}
@@ -1044,7 +1050,7 @@ fail:
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
@@ -1153,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn;
- if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+ if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
@@ -2314,7 +2320,7 @@ out:
ggtt_invalidate_pte(vgpu, &e);
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
- ggtt_invalidate(gvt->dev_priv);
+ ggtt_invalidate(gvt->gt);
return 0;
}
@@ -2347,16 +2353,18 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
enum intel_gvt_gtt_type type)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
- if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ if (drm_WARN_ON(&i915->drm,
+ type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
@@ -2410,7 +2418,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
int i;
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@ -2682,7 +2690,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page;
- struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2731,7 +2739,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
- struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
@@ -2779,7 +2787,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
struct intel_gvt_gtt_entry old_entry;
@@ -2809,7 +2816,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
}
- ggtt_invalidate(dev_priv);
+ ggtt_invalidate(gvt->gt);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 8f37eefa0a02..9e1787867894 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -35,6 +35,7 @@
#include <linux/kthread.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "gvt.h"
#include <linux/vfio.h>
#include <linux/mdev.h>
@@ -49,15 +50,15 @@ static const char * const supported_hypervisors[] = {
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name)
{
+ const char *driver_name =
+ dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
int i;
- struct intel_vgpu_type *t;
- const char *driver_name = dev_driver_string(
- &gvt->dev_priv->drm.pdev->dev);
+ name += strlen(driver_name) + 1;
for (i = 0; i < gvt->num_types; i++) {
- t = &gvt->types[i];
- if (!strncmp(t->name, name + strlen(driver_name) + 1,
- sizeof(t->name)))
+ struct intel_vgpu_type *t = &gvt->types[i];
+
+ if (!strncmp(t->name, name, sizeof(t->name)))
return t;
}
@@ -120,10 +121,8 @@ static struct attribute_group *gvt_vgpu_type_groups[] = {
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
};
-static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
- struct attribute_group ***intel_vgpu_type_groups)
+static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
{
- *type_attrs = gvt_type_attrs;
*intel_vgpu_type_groups = gvt_vgpu_type_groups;
return true;
}
@@ -191,7 +190,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@ -257,17 +256,17 @@ static int init_service_thread(struct intel_gvt *gvt)
/**
* intel_gvt_clean_device - clean a GVT device
- * @dev_priv: i915 private
+ * @i915: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
*
*/
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+void intel_gvt_clean_device(struct drm_i915_private *i915)
{
- struct intel_gvt *gvt = to_gvt(dev_priv);
+ struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
- if (WARN_ON(!gvt))
+ if (drm_WARN_ON(&i915->drm, !gvt))
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
@@ -285,13 +284,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- kfree(dev_priv->gvt);
- dev_priv->gvt = NULL;
+ kfree(i915->gvt);
}
/**
* intel_gvt_init_device - initialize a GVT device
- * @dev_priv: drm i915 private data
+ * @i915: drm i915 private data
*
* This function is called at the initialization stage, to initialize
* necessary GVT components.
@@ -300,13 +298,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
* Zero on success, negative error code if failed.
*
*/
-int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+int intel_gvt_init_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt;
struct intel_vgpu *vgpu;
int ret;
- if (WARN_ON(dev_priv->gvt))
+ if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
@@ -319,7 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
- gvt->dev_priv = dev_priv;
+ gvt->gt = &i915->gt;
+ i915->gvt = gvt;
init_device_info(gvt);
@@ -378,8 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
- dev_priv->gvt = gvt;
- intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+ intel_gvt_host.dev = &i915->drm.pdev->dev;
intel_gvt_host.initialized = true;
return 0;
@@ -404,6 +402,7 @@ out_clean_mmio_info:
out_clean_idr:
idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
+ i915->gvt = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b47c6acaf9c0..58c2c7932e3f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -196,41 +196,21 @@ struct intel_vgpu {
struct dentry *debugfs;
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
- struct {
- struct mdev_device *mdev;
- struct vfio_region *region;
- int num_regions;
- struct eventfd_ctx *intx_trigger;
- struct eventfd_ctx *msi_trigger;
-
- /*
- * Two caches are used to avoid mapping duplicated pages (eg.
- * scratch pages). This help to reduce dma setup overhead.
- */
- struct rb_root gfn_cache;
- struct rb_root dma_addr_cache;
- unsigned long nr_cache_entries;
- struct mutex cache_lock;
-
- struct notifier_block iommu_notifier;
- struct notifier_block group_notifier;
- struct kvm *kvm;
- struct work_struct release_work;
- atomic_t released;
- struct vfio_device *vfio_device;
- } vdev;
-#endif
+ /* Hypervisor-specific device state. */
+ void *vdev;
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
- struct completion vblank_done;
-
u32 scan_nonprivbb;
};
+static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
+{
+ return vgpu->vdev;
+}
+
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
@@ -306,7 +286,7 @@ struct intel_gvt {
/* scheduler scope lock, protect gvt and vgpu schedule related data */
struct mutex sched_lock;
- struct drm_i915_private *dev_priv;
+ struct intel_gt *gt;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
@@ -376,14 +356,15 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4
+#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
+
/* Aperture/GM space definitions for GVT device */
-#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
+#define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
+#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
-#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
-#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
+#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
@@ -394,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
+#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
@@ -570,8 +551,7 @@ struct intel_gvt_ops {
void (*vgpu_deactivate)(struct intel_vgpu *);
struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
const char *name);
- bool (*get_gvt_attrs)(struct attribute ***type_attrs,
- struct attribute_group ***intel_vgpu_type_groups);
+ bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
@@ -586,14 +566,14 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_post(struct intel_gt *gt)
{
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put_unchecked(gt->uncore->rpm);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 6d28d72e6c7e..0182e2a5acff 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -49,15 +49,17 @@
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
- if (IS_BROADWELL(gvt->dev_priv))
+ struct drm_i915_private *i915 = gvt->gt->i915;
+
+ if (IS_BROADWELL(i915))
return D_BDW;
- else if (IS_SKYLAKE(gvt->dev_priv))
+ else if (IS_SKYLAKE(i915))
return D_SKL;
- else if (IS_KABYLAKE(gvt->dev_priv))
+ else if (IS_KABYLAKE(i915))
return D_KBL;
- else if (IS_BROXTON(gvt->dev_priv))
+ else if (IS_BROXTON(i915))
return D_BXT;
- else if (IS_COFFEELAKE(gvt->dev_priv))
+ else if (IS_COFFEELAKE(i915))
return D_CFL;
return 0;
@@ -142,25 +144,25 @@ static int new_mmio_info(struct intel_gvt *gvt,
}
/**
- * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
- * Ring ID on success, negative error code if failed.
+ * The engine containing the offset within its mmio page.
*/
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int offset)
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
{
- enum intel_engine_id id;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
offset &= ~GENMASK(11, 0);
- for_each_engine(engine, gvt->dev_priv, id) {
+ for_each_engine(engine, gvt->gt, id)
if (engine->mmio_base == offset)
- return id;
- }
- return -ENODEV;
+ return engine;
+
+ return NULL;
}
#define offset_to_fence_num(offset) \
@@ -217,7 +219,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
{
u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
- if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+ if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
else if (!ips)
@@ -253,7 +255,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
int ret;
@@ -262,10 +264,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(dev_priv);
+ mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(dev_priv);
+ mmio_hw_access_post(gvt->gt);
return 0;
}
@@ -283,7 +285,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
+ if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -345,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
}
- engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
+ engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -492,7 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
};
/* a simple bsearch */
-static inline bool in_whitelist(unsigned int reg)
+static inline bool in_whitelist(u32 reg)
{
int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
i915_reg_t *array = force_nonpriv_white_list;
@@ -514,26 +516,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
- u32 ring_base;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ret = -EINVAL;
-
- if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
- gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
- vgpu->id, ring_id, offset, bytes);
- return ret;
- }
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
- ring_base = dev_priv->engine[ring_id]->mmio_base;
+ if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, offset, bytes);
+ return -EINVAL;
+ }
- if (in_whitelist(reg_nonpriv) ||
- reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
- ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
- bytes);
- } else
+ if (!in_whitelist(reg_nonpriv) &&
+ reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
- vgpu->id, *(u32 *)p_data, offset);
+ vgpu->id, reg_nonpriv, offset);
+ } else
+ intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
return 0;
}
@@ -756,7 +753,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 pipe = DSPSURF_TO_PIPE(offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
@@ -797,7 +794,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
@@ -821,7 +818,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
unsigned int reg)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum intel_gvt_event_type event;
if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
@@ -836,7 +833,7 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
event = AUX_CHANNEL_D;
else {
- WARN_ON(true);
+ drm_WARN_ON(&dev_priv->drm, true);
return -EINVAL;
}
@@ -924,11 +921,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
+ if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
- } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+ } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
/* write to the data registers */
return 0;
@@ -1244,8 +1241,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+ struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
char *env[3] = {NULL, NULL, NULL};
char vmid_str[20];
char display_ready_str[20];
@@ -1306,13 +1302,15 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int pf_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 val = *(u32 *)p_data;
if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
- WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
- vgpu->id);
+ drm_WARN_ONCE(&i915->drm, true,
+ "VM(%d): guest is trying to scaling a plane\n",
+ vgpu->id);
return 0;
}
@@ -1360,13 +1358,15 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 mode;
write_vreg(vgpu, offset, p_data, bytes);
mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
- WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
+ drm_WARN_ONCE(&i915->drm, 1,
+ "VM(%d): iGVT-g doesn't support GuC\n",
vgpu->id);
return 0;
}
@@ -1377,10 +1377,12 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 trtte = *(u32 *)p_data;
if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
- WARN(1, "VM(%d): Use physical address for TRTT!\n",
+ drm_WARN(&i915->drm, 1,
+ "VM(%d): Use physical address for TRTT!\n",
vgpu->id);
return -EINVAL;
}
@@ -1427,9 +1429,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+ IS_COFFEELAKE(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1439,7 +1441,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
- } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1452,9 +1454,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
}
break;
case SKL_PCODE_CDCLK_CONTROL:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+ IS_COFFEELAKE(vgpu->gvt->gt->i915))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -1478,24 +1480,26 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = *(u32 *)p_data;
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value);
return -EINVAL;
}
+
/*
* Need to emulate all the HWSP register write to ensure host can
* update the VM CSB status correctly. Here listed registers can
* support BDW, SKL or other platforms with same HWSP registers.
*/
- if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
+ if (unlikely(!engine)) {
gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
offset);
return -EINVAL;
}
- vgpu->hws_pga[ring_id] = value;
+ vgpu->hws_pga[engine->id] = value;
gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
vgpu->id, value, offset);
@@ -1507,7 +1511,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{
u32 v = *(u32 *)p_data;
- if (IS_BROXTON(vgpu->gvt->dev_priv))
+ if (IS_BROXTON(vgpu->gvt->gt->i915))
v &= (1 << 31) | (1 << 29);
else
v &= (1 << 31) | (1 << 29) | (1 << 9) |
@@ -1654,26 +1658,24 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- int ring_id;
- u32 ring_base;
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(gvt, offset);
- ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
/**
* Read HW reg in following case
* a. the offset isn't a ring mmio
* b. the offset's ring is running on hw.
* c. the offset is ring time stamp mmio
*/
- if (ring_id >= 0)
- ring_base = dev_priv->engine[ring_id]->mmio_base;
-
- if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
- offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
- offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
+
+ if (!engine ||
+ vgpu == gvt->scheduler.engine_owner[engine->id] ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
+ mmio_hw_access_pre(gvt->gt);
+ vgpu_vreg(vgpu, offset) =
+ intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
+ mmio_hw_access_post(gvt->gt);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -1682,22 +1684,23 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret = 0;
- if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
+ if (drm_WARN_ON(&i915->drm, !engine))
return -EINVAL;
- execlist = &vgpu->submission.execlist[ring_id];
+ execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
- ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ ret = intel_vgpu_submit_execlist(vgpu, engine);
if(ret)
- gvt_vgpu_err("fail submit workload on ring %d\n",
- ring_id);
+ gvt_vgpu_err("fail submit workload on ring %s\n",
+ engine->name);
}
++execlist->elsp_dwords.index;
@@ -1709,12 +1712,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data = *(u32 *)p_data;
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
bool enable_execlist;
int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915))
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
@@ -1723,7 +1727,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
data & _MASKED_BIT_ENABLE(2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -1743,16 +1747,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
- gvt_dbg_core("EXECLIST %s on ring %d\n",
- (enable_execlist ? "enabling" : "disabling"),
- ring_id);
+ gvt_dbg_core("EXECLIST %s on ring %s\n",
+ (enable_execlist ? "enabling" : "disabling"),
+ engine->name);
if (!enable_execlist)
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- BIT(ring_id),
- INTEL_VGPU_EXECLIST_SUBMISSION);
+ engine->mask,
+ INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1876,7 +1880,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
@@ -2415,9 +2419,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
MMIO_D(SPLL_CTL, D_ALL);
MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
@@ -2693,7 +2697,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2882,7 +2886,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
@@ -2902,7 +2906,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
- MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+ MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -3131,7 +3135,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
@@ -3367,7 +3371,7 @@ static struct gvt_mmio_block mmio_blocks[] = {
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->gt->i915;
int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
int ret;
@@ -3379,20 +3383,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret)
goto err;
- if (IS_BROADWELL(dev_priv)) {
+ if (IS_BROADWELL(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_COFFEELAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_BROXTON(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
@@ -3541,13 +3545,14 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info;
struct gvt_mmio_block *mmio_block;
gvt_mmio_func func;
int ret;
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 11accd3e1023..540017fed908 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -245,6 +245,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
u32 ier = *(u32 *)p_data;
@@ -255,7 +256,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
if (info->has_upstream_irq)
@@ -282,6 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
@@ -289,7 +291,7 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
(vgpu_vreg(vgpu, reg) ^ iir));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
vgpu_vreg(vgpu, reg) &= ~iir;
@@ -319,6 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = {
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL;
@@ -340,7 +343,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
if (!up_irq_info)
up_irq_info = irq->info[map->up_irq_group];
else
- WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+ drm_WARN_ON(&i915->drm, up_irq_info !=
+ irq->info[map->up_irq_group]);
bit = map->up_irq_bit;
@@ -350,7 +354,7 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- if (WARN_ON(!up_irq_info))
+ if (drm_WARN_ON(&i915->drm, !up_irq_info))
return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
@@ -536,7 +540,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
+ if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
@@ -568,7 +572,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
- if (IS_BROADWELL(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->gt->i915)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
@@ -581,7 +585,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
+ } else if (INTEL_GEN(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -618,13 +622,14 @@ static struct intel_gvt_irq_ops gen8_irq_ops = {
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
handler = get_event_virt_handler(irq, event);
- WARN_ON(!handler);
+ drm_WARN_ON(&i915->drm, !handler);
handler(irq, event, vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 3259a1fa69e1..074c4efb58eb 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -108,6 +108,36 @@ struct gvt_dma {
struct kref ref;
};
+struct kvmgt_vdev {
+ struct intel_vgpu *vgpu;
+ struct mdev_device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+
+ /*
+ * Two caches are used to avoid mapping duplicated pages (eg.
+ * scratch pages). This help to reduce dma setup overhead.
+ */
+ struct rb_root gfn_cache;
+ struct rb_root dma_addr_cache;
+ unsigned long nr_cache_entries;
+ struct mutex cache_lock;
+
+ struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ atomic_t released;
+ struct vfio_device *vfio_device;
+};
+
+static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
+{
+ return intel_vgpu_vdev(vgpu);
+}
+
static inline bool handle_valid(unsigned long handle)
{
return !!(handle & ~0xff);
@@ -120,6 +150,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int total_pages;
int npage;
int ret;
@@ -129,8 +160,8 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
- WARN_ON(ret != 1);
+ ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
+ drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -152,7 +183,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+ ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
@@ -187,7 +218,7 @@ err:
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct page *page = NULL;
int ret;
@@ -210,7 +241,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size);
@@ -219,7 +250,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
- struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+ struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -237,7 +268,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
+ struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -258,6 +289,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
@@ -270,7 +302,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
- link = &vgpu->vdev.gfn_cache.rb_node;
+ link = &vdev->gfn_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, gfn_node);
@@ -281,11 +313,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->gfn_node, parent, link);
- rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
+ rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
/* dma_addr_cache maps dma addr to struct gvt_dma. */
parent = NULL;
- link = &vgpu->vdev.dma_addr_cache.rb_node;
+ link = &vdev->dma_addr_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
@@ -296,46 +328,51 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->dma_addr_node, parent, link);
- rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+ rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
- vgpu->vdev.nr_cache_entries++;
+ vdev->nr_cache_entries++;
return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
- rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
- rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+ rb_erase(&entry->gfn_node, &vdev->gfn_cache);
+ rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
kfree(entry);
- vgpu->vdev.nr_cache_entries--;
+ vdev->nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
for (;;) {
- mutex_lock(&vgpu->vdev.cache_lock);
- node = rb_first(&vgpu->vdev.gfn_cache);
+ mutex_lock(&vdev->cache_lock);
+ node = rb_first(&vdev->gfn_cache);
if (!node) {
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
{
- vgpu->vdev.gfn_cache = RB_ROOT;
- vgpu->vdev.dma_addr_cache = RB_ROOT;
- vgpu->vdev.nr_cache_entries = 0;
- mutex_init(&vgpu->vdev.cache_lock);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+ vdev->gfn_cache = RB_ROOT;
+ vdev->dma_addr_cache = RB_ROOT;
+ vdev->nr_cache_entries = 0;
+ mutex_init(&vdev->cache_lock);
}
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
@@ -409,16 +446,18 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
- void *base = vgpu->vdev.region[i].data;
+ void *base = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- if (pos >= vgpu->vdev.region[i].size || iswrite) {
+
+ if (pos >= vdev->region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
- count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+ count = min(count, (size_t)(vdev->region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
@@ -512,7 +551,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
struct vfio_edid_region *region =
- (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+ (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) {
@@ -544,32 +583,34 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct vfio_region *region;
- region = krealloc(vgpu->vdev.region,
- (vgpu->vdev.num_regions + 1) * sizeof(*region),
+ region = krealloc(vdev->region,
+ (vdev->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
- vgpu->vdev.region = region;
- vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
- vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
- vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
- vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
- vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
- vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
- vgpu->vdev.num_regions++;
+ vdev->region = region;
+ vdev->region[vdev->num_regions].type = type;
+ vdev->region[vdev->num_regions].subtype = subtype;
+ vdev->region[vdev->num_regions].ops = ops;
+ vdev->region[vdev->num_regions].size = size;
+ vdev->region[vdev->num_regions].flags = flags;
+ vdev->region[vdev->num_regions].data = data;
+ vdev->num_regions++;
return 0;
}
static int kvmgt_get_vfio_device(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- vgpu->vdev.vfio_device = vfio_device_get_from_dev(
- mdev_dev(vgpu->vdev.mdev));
- if (!vgpu->vdev.vfio_device) {
+ vdev->vfio_device = vfio_device_get_from_dev(
+ mdev_dev(vdev->mdev));
+ if (!vdev->vfio_device) {
gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV;
}
@@ -637,10 +678,12 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
static void kvmgt_put_vfio_device(void *vgpu)
{
- if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+ struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
+
+ if (WARN_ON(!vdev->vfio_device))
return;
- vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+ vfio_device_put(vdev->vfio_device);
}
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
@@ -669,9 +712,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
goto out;
}
- INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+ INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
- vgpu->vdev.mdev = mdev;
+ kvmgt_vdev(vgpu)->mdev = mdev;
mdev_set_drvdata(mdev, vgpu);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
@@ -696,9 +739,10 @@ static int intel_vgpu_remove(struct mdev_device *mdev)
static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu,
- vdev.iommu_notifier);
+ struct kvmgt_vdev *vdev = container_of(nb,
+ struct kvmgt_vdev,
+ iommu_notifier);
+ struct intel_vgpu *vgpu = vdev->vgpu;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -708,7 +752,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
iov_pfn = unmap->iova >> PAGE_SHIFT;
end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
- mutex_lock(&vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
for (; iov_pfn < end_iov_pfn; iov_pfn++) {
entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
if (!entry)
@@ -718,7 +762,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
return NOTIFY_OK;
@@ -727,16 +771,16 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
static int intel_vgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu,
- vdev.group_notifier);
+ struct kvmgt_vdev *vdev = container_of(nb,
+ struct kvmgt_vdev,
+ group_notifier);
/* the only action we care about */
if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
- vgpu->vdev.kvm = data;
+ vdev->kvm = data;
if (!data)
- schedule_work(&vgpu->vdev.release_work);
+ schedule_work(&vdev->release_work);
}
return NOTIFY_OK;
@@ -745,15 +789,16 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
static int intel_vgpu_open(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events;
int ret;
- vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
- vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+ vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
- &vgpu->vdev.iommu_notifier);
+ &vdev->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
@@ -762,7 +807,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
events = VFIO_GROUP_NOTIFY_SET_KVM;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
- &vgpu->vdev.group_notifier);
+ &vdev->group_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
@@ -781,51 +826,56 @@ static int intel_vgpu_open(struct mdev_device *mdev)
intel_gvt_ops->vgpu_activate(vgpu);
- atomic_set(&vgpu->vdev.released, 0);
+ atomic_set(&vdev->released, 0);
return ret;
undo_group:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
- &vgpu->vdev.group_notifier);
+ &vdev->group_notifier);
undo_iommu:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &vgpu->vdev.iommu_notifier);
+ &vdev->iommu_notifier);
out:
return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct eventfd_ctx *trigger;
- trigger = vgpu->vdev.msi_trigger;
+ trigger = vdev->msi_trigger;
if (trigger) {
eventfd_ctx_put(trigger);
- vgpu->vdev.msi_trigger = NULL;
+ vdev->msi_trigger = NULL;
}
}
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_guest_info *info;
int ret;
if (!handle_valid(vgpu->handle))
return;
- if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+ if (atomic_cmpxchg(&vdev->released, 0, 1))
return;
intel_gvt_ops->vgpu_release(vgpu);
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
- &vgpu->vdev.iommu_notifier);
- WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
+ &vdev->iommu_notifier);
+ drm_WARN(&i915->drm, ret,
+ "vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
- &vgpu->vdev.group_notifier);
- WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
+ &vdev->group_notifier);
+ drm_WARN(&i915->drm, ret,
+ "vfio_unregister_notifier for group failed: %d\n", ret);
/* dereference module reference taken at open */
module_put(THIS_MODULE);
@@ -835,7 +885,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
- vgpu->vdev.kvm = NULL;
+ vdev->kvm = NULL;
vgpu->handle = 0;
}
@@ -848,10 +898,10 @@ static void intel_vgpu_release(struct mdev_device *mdev)
static void intel_vgpu_release_work(struct work_struct *work)
{
- struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
- vdev.release_work);
+ struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
+ release_work);
- __intel_vgpu_release(vgpu);
+ __intel_vgpu_release(vdev->vgpu);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -913,7 +963,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EINVAL;
}
- aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+ aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
ALIGN_DOWN(off, PAGE_SIZE),
count + offset_in_page(off));
if (!aperture_va)
@@ -933,12 +983,13 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -967,11 +1018,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_ROM_REGION_INDEX:
break;
default:
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
- return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+ return vdev->region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
}
@@ -1224,7 +1275,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
- vgpu->vdev.msi_trigger = trigger;
+ kvmgt_vdev(vgpu)->msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
@@ -1276,6 +1327,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
@@ -1294,7 +1346,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions;
+ vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1385,22 +1437,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
.header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions)
+ vdev->num_regions)
return -EINVAL;
info.index =
array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions);
+ vdev->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset =
VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->vdev.region[i].size;
- info.flags = vgpu->vdev.region[i].flags;
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
- cap_type.type = vgpu->vdev.region[i].type;
- cap_type.subtype = vgpu->vdev.region[i].subtype;
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
ret = vfio_info_add_capability(&caps,
&cap_type.header,
@@ -1597,12 +1649,10 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- struct attribute **kvm_type_attrs;
struct attribute_group **kvm_vgpu_type_groups;
intel_gvt_ops = ops;
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
- &kvm_vgpu_type_groups))
+ if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
return -EFAULT;
intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
@@ -1742,13 +1792,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct kvm *kvm;
vgpu = mdev_get_drvdata(mdev);
if (handle_valid(vgpu->handle))
return -EEXIST;
- kvm = vgpu->vdev.kvm;
+ vdev = kvmgt_vdev(vgpu);
+ kvm = vdev->kvm;
if (!kvm || kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
@@ -1769,8 +1821,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- init_completion(&vgpu->vblank_done);
-
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
@@ -1778,7 +1828,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
info->debugfs_cache_entries = debugfs_create_ulong(
"kvmgt_nr_cache_entries",
0444, vgpu->debugfs,
- &vgpu->vdev.nr_cache_entries);
+ &vdev->nr_cache_entries);
return 0;
}
@@ -1795,9 +1845,17 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
return true;
}
-static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
{
- /* nothing to do here */
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
+
+ if (!vgpu->vdev)
+ return -ENOMEM;
+
+ kvmgt_vdev(vgpu)->vgpu = vgpu;
+
return 0;
}
@@ -1805,29 +1863,34 @@ static void kvmgt_detach_vgpu(void *p_vgpu)
{
int i;
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- if (!vgpu->vdev.region)
+ if (!vdev->region)
return;
- for (i = 0; i < vgpu->vdev.num_regions; i++)
- if (vgpu->vdev.region[i].ops->release)
- vgpu->vdev.region[i].ops->release(vgpu,
- &vgpu->vdev.region[i]);
- vgpu->vdev.num_regions = 0;
- kfree(vgpu->vdev.region);
- vgpu->vdev.region = NULL;
+ for (i = 0; i < vdev->num_regions; i++)
+ if (vdev->region[i].ops->release)
+ vdev->region[i].ops->release(vgpu,
+ &vdev->region[i]);
+ vdev->num_regions = 0;
+ kfree(vdev->region);
+ vdev->region = NULL;
+
+ kfree(vdev);
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
+ vdev = kvmgt_vdev(vgpu);
/*
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's
@@ -1838,10 +1901,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
- if (vgpu->vdev.msi_trigger == NULL)
+ if (vdev->msi_trigger == NULL)
return 0;
- if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+ if (eventfd_signal(vdev->msi_trigger, 1) == 1)
return 0;
return -EFAULT;
@@ -1867,26 +1930,26 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
- struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret;
if (!handle_valid(handle))
return -EINVAL;
- info = (struct kvmgt_guest_info *)handle;
- vgpu = info->vgpu;
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+ vdev = kvmgt_vdev(vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
- entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+ entry = __gvt_cache_find_gfn(vgpu, gfn);
if (!entry) {
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else if (entry->size != size) {
@@ -1898,7 +1961,7 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else {
@@ -1906,19 +1969,20 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
*dma_addr = entry->dma_addr;
}
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return 0;
err_unmap:
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return ret;
}
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
struct kvmgt_guest_info *info;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret = 0;
@@ -1926,14 +1990,15 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
return -ENODEV;
info = (struct kvmgt_guest_info *)handle;
+ vdev = kvmgt_vdev(info->vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
if (entry)
kref_get(&entry->ref);
else
ret = -ENOMEM;
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return ret;
}
@@ -1949,19 +2014,21 @@ static void __gvt_dma_release(struct kref *ref)
static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
- struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
if (!handle_valid(handle))
return;
- info = (struct kvmgt_guest_info *)handle;
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+ vdev = kvmgt_vdev(vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
- entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ mutex_lock(&vdev->cache_lock);
+ entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_put(&entry->ref, __gvt_dma_release);
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index a55178884d67..291993615af9 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -103,6 +103,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0;
int ret = -EINVAL;
@@ -114,15 +115,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+ !IS_ALIGNED(offset, 8)))
goto err;
- if (WARN_ON(bytes != 4 && bytes != 8))
+ if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
goto err;
- if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm,
+ !reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
@@ -132,16 +135,16 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
goto out;
}
- if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
goto out;
}
- if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm, !reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, bytes)))
goto err;
}
@@ -175,6 +178,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0;
int ret = -EINVAL;
@@ -187,15 +191,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+ !IS_ALIGNED(offset, 8)))
goto err;
- if (WARN_ON(bytes != 4 && bytes != 8))
+ if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
goto err;
- if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm,
+ !reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
@@ -205,7 +211,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
goto out;
}
- if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
goto out;
}
@@ -245,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
- if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ if (IS_BROXTON(vgpu->gvt->gt->i915)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 2e68f4b02c94..cc4812648bf4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -69,8 +69,8 @@ struct intel_gvt_mmio_info {
struct hlist_node node;
};
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int reg);
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index aaf15916d29a..2ccaf78f96e8 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = {
[VECS0] = 0xcb00,
};
-static void load_render_mocs(struct drm_i915_private *dev_priv)
+static void load_render_mocs(const struct intel_engine_cs *engine)
{
- struct intel_gvt *gvt = dev_priv->gvt;
- i915_reg_t offset;
+ struct intel_gvt *gvt = engine->i915->gvt;
+ struct intel_uncore *uncore = engine->uncore;
u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
+ i915_reg_t offset;
int ring_id, i;
/* Platform doesn't have mocs mmios. */
@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
return;
for (ring_id = 0; ring_id < cnt; ring_id++) {
- if (!HAS_ENGINE(dev_priv, ring_id))
+ if (!HAS_ENGINE(engine->i915, ring_id))
continue;
+
offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
- I915_READ_FW(offset);
+ intel_uncore_read_fw(uncore, offset);
offset.reg += 4;
}
}
@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] =
- I915_READ_FW(offset);
+ intel_uncore_read_fw(uncore, offset);
offset.reg += 4;
}
gen9_render_mocs.initialized = true;
@@ -214,13 +216,11 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
*cs++ = MI_LOAD_REGISTER_IMM(count);
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->ring_id != ring_id ||
- !mmio->in_context)
+ if (mmio->id != ring_id || !mmio->in_context)
continue;
*cs++ = i915_mmio_reg_offset(mmio->reg);
- *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
- (mmio->mask << 16);
+ *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, ring_id);
}
@@ -344,10 +344,10 @@ static u32 gen8_tlb_mmio_offset_list[] = {
[VECS0] = 0x4270,
};
-static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
@@ -357,13 +357,13 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (!regs)
return;
- if (WARN_ON(ring_id >= cnt))
+ if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
return;
- if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
+ if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
return;
- reg = _MMIO(regs[ring_id]);
+ reg = _MMIO(regs[engine->id]);
/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
* we need to put a forcewake when invalidating RCS TLB caches,
@@ -372,30 +372,27 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
+ if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw);
intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
- gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
+ gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
+ engine->name);
else
vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(uncore, fw);
- gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+ gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
}
static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
- int ring_id)
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
- i915_reg_t offset, l3_offset;
- u32 old_v, new_v;
-
u32 regs[] = {
[RCS0] = 0xc800,
[VCS0] = 0xc900,
@@ -403,36 +400,38 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
[BCS0] = 0xcc00,
[VECS0] = 0xcb00,
};
+ struct intel_uncore *uncore = engine->uncore;
+ i915_reg_t offset, l3_offset;
+ u32 old_v, new_v;
int i;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
return;
- if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
+ if (engine->id == RCS0 && IS_GEN(engine->i915, 9))
return;
if (!pre && !gen9_render_mocs.initialized)
- load_render_mocs(dev_priv);
+ load_render_mocs(engine);
- offset.reg = regs[ring_id];
+ offset.reg = regs[engine->id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
if (pre)
old_v = vgpu_vreg_t(pre, offset);
else
- old_v = gen9_render_mocs.control_table[ring_id][i];
+ old_v = gen9_render_mocs.control_table[engine->id][i];
if (next)
new_v = vgpu_vreg_t(next, offset);
else
- new_v = gen9_render_mocs.control_table[ring_id][i];
+ new_v = gen9_render_mocs.control_table[engine->id][i];
if (old_v != new_v)
- I915_WRITE_FW(offset, new_v);
+ intel_uncore_write_fw(uncore, offset, new_v);
offset.reg += 4;
}
- if (ring_id == RCS0) {
+ if (engine->id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -445,7 +444,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
new_v = gen9_render_mocs.l3cc_table[i];
if (old_v != new_v)
- I915_WRITE_FW(l3_offset, new_v);
+ intel_uncore_write_fw(uncore, l3_offset, new_v);
l3_offset.reg += 4;
}
@@ -467,38 +466,40 @@ bool is_inhibit_context(struct intel_context *ce)
/* Switch ring mmio values (context). */
static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next,
- int ring_id)
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
+ struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s;
struct engine_mmio *mmio;
u32 old_v, new_v;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (INTEL_GEN(dev_priv) >= 9)
- switch_mocs(pre, next, ring_id);
+ if (INTEL_GEN(engine->i915) >= 9)
+ switch_mocs(pre, next, engine);
- for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+ for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->ring_id != ring_id)
+ if (mmio->id != engine->id)
continue;
/*
* No need to do save or restore of the mmio which is in context
* state image on gen9, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_GEN(dev_priv, 9) && mmio->in_context)
+ if (IS_GEN(engine->i915, 9) && mmio->in_context)
continue;
// save
if (pre) {
- vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+ vgpu_vreg_t(pre, mmio->reg) =
+ intel_uncore_read_fw(uncore, mmio->reg);
if (mmio->mask)
vgpu_vreg_t(pre, mmio->reg) &=
- ~(mmio->mask << 16);
+ ~(mmio->mask << 16);
old_v = vgpu_vreg_t(pre, mmio->reg);
- } else
- old_v = mmio->value = I915_READ_FW(mmio->reg);
+ } else {
+ old_v = mmio->value =
+ intel_uncore_read_fw(uncore, mmio->reg);
+ }
// restore
if (next) {
@@ -509,12 +510,12 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow[ring_id]))
+ !is_inhibit_context(s->shadow[engine->id]))
continue;
if (mmio->mask)
new_v = vgpu_vreg_t(next, mmio->reg) |
- (mmio->mask << 16);
+ (mmio->mask << 16);
else
new_v = vgpu_vreg_t(next, mmio->reg);
} else {
@@ -526,7 +527,7 @@ static void switch_mmio(struct intel_vgpu *pre,
new_v = mmio->value;
}
- I915_WRITE_FW(mmio->reg, new_v);
+ intel_uncore_write_fw(uncore, mmio->reg, new_v);
trace_render_mmio(pre ? pre->id : 0,
next ? next->id : 0,
@@ -536,39 +537,37 @@ static void switch_mmio(struct intel_vgpu *pre,
}
if (next)
- handle_tlb_pending_event(next, ring_id);
+ handle_tlb_pending_event(next, engine);
}
/**
* intel_gvt_switch_render_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
- * @ring_id: specify the engine
+ * @engine: the engine
*
* If pre is null indicates that host own the engine. If next is null
* indicates that we are switching to host workload.
*/
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id)
+ struct intel_vgpu *next,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
-
- if (WARN_ON(!pre && !next))
+ if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
+ engine->name))
return;
- gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+ gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-
/**
* We are using raw mmio access wrapper to improve the
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ switch_mmio(pre, next, engine);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
/**
@@ -580,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->dev_priv) >= 9) {
+ if (INTEL_GEN(gvt->gt->i915) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
@@ -595,7 +594,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context) {
- gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index f7eaa442403f..970704b18f23 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -37,7 +37,7 @@
#define __GVT_RENDER_H__
struct engine_mmio {
- int ring_id;
+ enum intel_engine_id id;
i915_reg_t reg;
u32 mask;
bool in_context;
@@ -45,7 +45,8 @@ struct engine_mmio {
};
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id);
+ struct intel_vgpu *next,
+ const struct intel_engine_cs *engine);
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 2369d4a9af94..036b74fe9298 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -39,8 +39,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
enum intel_engine_id i;
struct intel_engine_cs *engine;
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- if (!list_empty(workload_q_head(vgpu, i)))
+ for_each_engine(engine, vgpu->gvt->gt, i) {
+ if (!list_empty(workload_q_head(vgpu, engine)))
return true;
}
@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = true;
/* still have uncompleted workload? */
- for_each_engine(engine, gvt->dev_priv, i) {
- if (scheduler->current_workload[i])
+ for_each_engine(engine, gvt->gt, i) {
+ if (scheduler->current_workload[engine->id])
return;
}
@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = false;
/* wake up workload dispatch thread */
- for_each_engine(engine, gvt->dev_priv, i)
- wake_up(&scheduler->waitq[i]);
+ for_each_engine(engine, gvt->gt, i)
+ wake_up(&scheduler->waitq[engine->id]);
}
static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
@@ -444,9 +444,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
- int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (!vgpu_data->active)
return;
@@ -467,10 +468,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
- if (scheduler->engine_owner[ring_id] == vgpu) {
- intel_gvt_switch_mmio(vgpu, NULL, ring_id);
- scheduler->engine_owner[ring_id] = NULL;
+ for_each_engine(engine, vgpu->gvt->gt, id) {
+ if (scheduler->engine_owner[engine->id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, engine);
+ scheduler->engine_owner[engine->id] = NULL;
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 685d1e04a5ff..1c95bf8cbed0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0;
@@ -98,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS0)
+ if (workload->engine->id != RCS0)
return;
if (save) {
@@ -128,7 +128,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -154,7 +153,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS0) {
+ if (workload->engine->id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -175,14 +174,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
return 0;
- gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring %s workload lrca %x",
+ workload->engine->name,
+ workload->ctx_desc.lrca);
+ context_page_num = workload->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
+ if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -210,38 +209,43 @@ static inline bool is_gvt_request(struct i915_request *rq)
return intel_context_force_single_submission(rq->context);
}
-static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+static void save_ring_hw_state(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+ struct intel_uncore *uncore = engine->uncore;
i915_reg_t reg;
- reg = RING_INSTDONE(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
- reg = RING_ACTHD(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
- reg = RING_ACTHD_UDW(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_INSTDONE(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
+
+ reg = RING_ACTHD(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
+
+ reg = RING_ACTHD_UDW(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct i915_request *req = data;
+ struct i915_request *rq = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
- shadow_ctx_notifier_block[req->engine->id]);
+ shadow_ctx_notifier_block[rq->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- enum intel_engine_id ring_id = req->engine->id;
+ enum intel_engine_id ring_id = rq->engine->id;
struct intel_vgpu_workload *workload;
unsigned long flags;
- if (!is_gvt_request(req)) {
+ if (!is_gvt_request(rq)) {
spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
- NULL, ring_id);
+ NULL, rq->engine);
scheduler->engine_owner[ring_id] = NULL;
}
spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
@@ -259,7 +263,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
- workload->vgpu, ring_id);
+ workload->vgpu, rq->engine);
scheduler->engine_owner[ring_id] = workload->vgpu;
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
@@ -268,11 +272,11 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- save_ring_hw_state(workload->vgpu, ring_id);
+ save_ring_hw_state(workload->vgpu, rq->engine);
atomic_set(&workload->shadow_ctx_active, 0);
break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
- save_ring_hw_state(workload->vgpu, ring_id);
+ save_ring_hw_state(workload->vgpu, rq->engine);
break;
default:
WARN_ON(1);
@@ -391,7 +395,7 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
if (workload->req)
return 0;
- rq = i915_request_create(s->shadow[workload->ring_id]);
+ rq = i915_request_create(s->shadow[workload->engine->id]);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
return PTR_ERR(rq);
@@ -420,15 +424,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(s->shadow[workload->ring_id],
+ if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(s->shadow[workload->engine->id],
workload);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
return ret;
- if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
+ if (workload->engine->id == RCS0 &&
+ workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -436,6 +441,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload->shadow = true;
return 0;
+
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
return ret;
@@ -567,12 +573,8 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 ring_base;
-
- ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
- vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
+ vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
+ workload->rb_start;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -608,7 +610,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -625,7 +626,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload);
- set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
+ set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
@@ -677,11 +678,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct i915_request *rq;
- int ring_id = workload->ring_id;
int ret;
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
- ring_id, workload);
+ gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
+ workload->engine->name, workload);
mutex_lock(&vgpu->vgpu_lock);
@@ -710,8 +710,8 @@ out:
}
if (!IS_ERR_OR_NULL(workload->req)) {
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
- ring_id, workload->req);
+ gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
+ workload->engine->name, workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
@@ -722,8 +722,8 @@ err_req:
return ret;
}
-static struct intel_vgpu_workload *pick_next_workload(
- struct intel_gvt *gvt, int ring_id)
+static struct intel_vgpu_workload *
+pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
@@ -735,27 +735,27 @@ static struct intel_vgpu_workload *pick_next_workload(
* bail out
*/
if (!scheduler->current_vgpu) {
- gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+ gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
goto out;
}
if (scheduler->need_reschedule) {
- gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+ gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
goto out;
}
if (!scheduler->current_vgpu->active ||
- list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+ list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out;
/*
* still have current workload, maybe the workload disptacher
* fail to submit it for some reason, resubmit it.
*/
- if (scheduler->current_workload[ring_id]) {
- workload = scheduler->current_workload[ring_id];
- gvt_dbg_sched("ring id %d still have current workload %p\n",
- ring_id, workload);
+ if (scheduler->current_workload[engine->id]) {
+ workload = scheduler->current_workload[engine->id];
+ gvt_dbg_sched("ring %s still have current workload %p\n",
+ engine->name, workload);
goto out;
}
@@ -765,13 +765,14 @@ static struct intel_vgpu_workload *pick_next_workload(
* will wait the current workload is finished when trying to
* schedule out a vgpu.
*/
- scheduler->current_workload[ring_id] = container_of(
- workload_q_head(scheduler->current_vgpu, ring_id)->next,
- struct intel_vgpu_workload, list);
+ scheduler->current_workload[engine->id] =
+ list_first_entry(workload_q_head(scheduler->current_vgpu,
+ engine),
+ struct intel_vgpu_workload, list);
- workload = scheduler->current_workload[ring_id];
+ workload = scheduler->current_workload[engine->id];
- gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+ gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
@@ -783,14 +784,12 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 ring_base;
u32 head, tail;
u16 wrap_count;
@@ -811,14 +810,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
- ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ ring_base = rq->engine->mmio_base;
vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
+ if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -869,7 +868,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
@@ -966,54 +965,47 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
mutex_unlock(&vgpu->vgpu_lock);
}
-struct workload_thread_param {
- struct intel_gvt *gvt;
- int ring_id;
-};
-
-static int workload_thread(void *priv)
+static int workload_thread(void *arg)
{
- struct workload_thread_param *p = (struct workload_thread_param *)priv;
- struct intel_gvt *gvt = p->gvt;
- int ring_id = p->ring_id;
+ struct intel_engine_cs *engine = arg;
+ const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
+ struct intel_gvt *gvt = engine->i915->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
-
- kfree(p);
- gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+ gvt_dbg_core("workload thread for ring %s started\n", engine->name);
while (!kthread_should_stop()) {
- add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ intel_wakeref_t wakeref;
+
+ add_wait_queue(&scheduler->waitq[engine->id], &wait);
do {
- workload = pick_next_workload(gvt, ring_id);
+ workload = pick_next_workload(gvt, engine);
if (workload)
break;
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
} while (!kthread_should_stop());
- remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+ remove_wait_queue(&scheduler->waitq[engine->id], &wait);
if (!workload)
break;
- gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
- workload->ring_id, workload,
- workload->vgpu->id);
+ gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
+ engine->name, workload,
+ workload->vgpu->id);
- intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get(engine->uncore->rpm);
- gvt_dbg_sched("ring id %d will dispatch workload %p\n",
- workload->ring_id, workload);
+ gvt_dbg_sched("ring %s will dispatch workload %p\n",
+ engine->name, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
- FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore,
+ FORCEWAKE_ALL);
/*
* Update the vReg of the vGPU which submitted this
* workload. The vGPU may use these registers for checking
@@ -1030,21 +1022,21 @@ static int workload_thread(void *priv)
goto complete;
}
- gvt_dbg_sched("ring id %d wait workload %p\n",
- workload->ring_id, workload);
+ gvt_dbg_sched("ring %s wait workload %p\n",
+ engine->name, workload);
i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
- workload, workload->status);
+ workload, workload->status);
- complete_current_workload(gvt, ring_id);
+ complete_current_workload(gvt, engine->id);
if (need_force_wake)
- intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
- FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore,
+ FORCEWAKE_ALL);
- intel_runtime_pm_put_unchecked(rpm);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1073,7 +1065,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
gvt_dbg_core("clean workload scheduler\n");
- for_each_engine(engine, gvt->dev_priv, i) {
+ for_each_engine(engine, gvt->gt, i) {
atomic_notifier_chain_unregister(
&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
@@ -1084,7 +1076,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- struct workload_thread_param *param = NULL;
struct intel_engine_cs *engine;
enum intel_engine_id i;
int ret;
@@ -1093,20 +1084,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
init_waitqueue_head(&scheduler->workload_complete_wq);
- for_each_engine(engine, gvt->dev_priv, i) {
+ for_each_engine(engine, gvt->gt, i) {
init_waitqueue_head(&scheduler->waitq[i]);
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param) {
- ret = -ENOMEM;
- goto err;
- }
-
- param->gvt = gvt;
- param->ring_id = i;
-
- scheduler->thread[i] = kthread_run(workload_thread, param,
- "gvt workload %d", i);
+ scheduler->thread[i] = kthread_run(workload_thread, engine,
+ "gvt:%s", engine->name);
if (IS_ERR(scheduler->thread[i])) {
gvt_err("fail to create workload thread\n");
ret = PTR_ERR(scheduler->thread[i]);
@@ -1118,11 +1100,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
atomic_notifier_chain_register(&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
}
+
return 0;
+
err:
intel_gvt_clean_workload_scheduler(gvt);
- kfree(param);
- param = NULL;
return ret;
}
@@ -1160,7 +1142,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, id)
+ for_each_engine(engine, vgpu->gvt->gt, id)
intel_context_unpin(s->shadow[id]);
kmem_cache_destroy(s->workloads);
@@ -1217,7 +1199,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_ppgtt *ppgtt;
@@ -1230,7 +1212,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
i915_context_ppgtt_root_save(s, ppgtt);
- for_each_engine(engine, i915, i) {
+ for_each_engine(engine, vgpu->gvt->gt, i) {
struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]);
@@ -1246,7 +1228,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
ce->vm = i915_vm_get(&ppgtt->vm);
intel_context_set_single_submission(ce);
- if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+ /* Max ring buffer size */
+ if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
const unsigned int ring_size = 512 * SZ_4K;
ce->ring = __intel_context_ring_size(ring_size);
@@ -1282,7 +1265,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
out_shadow_ctx:
i915_context_ppgtt_root_restore(s, ppgtt);
- for_each_engine(engine, i915, i) {
+ for_each_engine(engine, vgpu->gvt->gt, i) {
if (IS_ERR(s->shadow[i]))
break;
@@ -1309,6 +1292,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask,
unsigned int interface)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission;
const struct intel_vgpu_submission_ops *ops[] = {
[INTEL_VGPU_EXECLIST_SUBMISSION] =
@@ -1316,10 +1300,11 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
};
int ret;
- if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+ if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
return -EINVAL;
- if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+ if (drm_WARN_ON(&i915->drm,
+ interface == 0 && engine_mask != ALL_ENGINES))
return -EINVAL;
if (s->active)
@@ -1441,7 +1426,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
- * @ring_id: ring index
+ * @engine: the engine
* @desc: a guest context descriptor
*
* This function is called when creating a vGPU workload.
@@ -1452,14 +1437,14 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
*
*/
struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct list_head *q = workload_q_head(vgpu, engine);
struct intel_vgpu_workload *last_workload = NULL;
struct intel_vgpu_workload *workload = NULL;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
u32 guest_head;
@@ -1486,10 +1471,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
list_for_each_entry_reverse(last_workload, q, list) {
if (same_context(&last_workload->ctx_desc, desc)) {
- gvt_dbg_el("ring id %d cur workload == last\n",
- ring_id);
+ gvt_dbg_el("ring %s cur workload == last\n",
+ engine->name);
gvt_dbg_el("ctx head %x real head %lx\n", head,
- last_workload->rb_tail);
+ last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
@@ -1499,7 +1484,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
}
}
- gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+ gvt_dbg_el("ring %s begin a new workload\n", engine->name);
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1519,7 +1504,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (IS_ERR(workload))
return workload;
- workload->ring_id = ring_id;
+ workload->engine = engine;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
@@ -1528,7 +1513,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS0) {
+ if (engine->id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1566,8 +1551,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
}
}
- gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
- workload, ring_id, head, tail, start, ctl);
+ gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
+ workload, engine->name, head, tail, start, ctl);
ret = prepare_mm(workload);
if (ret) {
@@ -1578,10 +1563,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
/* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow.
*/
- if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(&dev_priv->runtime_pm);
- ret = intel_gvt_scan_and_shadow_workload(workload);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ if (list_empty(q)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
+ ret = intel_gvt_scan_and_shadow_workload(workload);
}
if (ret) {
@@ -1601,7 +1587,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{
list_add_tail(&workload->list,
- workload_q_head(workload->vgpu, workload->ring_id));
+ workload_q_head(workload->vgpu, workload->engine));
intel_gvt_kick_schedule(workload->vgpu->gvt);
- wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
+ wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index c50d14a9ce85..bf7fc0ca4cb1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -79,7 +79,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
@@ -129,8 +129,8 @@ struct intel_vgpu_shadow_bb {
bool ppgtt;
};
-#define workload_q_head(vgpu, ring_id) \
- (&(vgpu->submission.workload_q_head[ring_id]))
+#define workload_q_head(vgpu, e) \
+ (&(vgpu)->submission.workload_q_head[(e)->id])
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
@@ -155,7 +155,8 @@ extern const struct intel_vgpu_submission_ops
intel_vgpu_execlist_submission_ops;
struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc);
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 345c2aa3b491..1d5ff88078bd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -37,6 +37,7 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* setup the ballooning information */
vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
@@ -69,7 +70,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
- WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+ drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
#define VGPU_MAX_WEIGHT 16
@@ -148,12 +149,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN(gvt->dev_priv, 8))
+ if (IS_GEN(gvt->gt->i915, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
- vgpu_types[i].name);
- else if (IS_GEN(gvt->dev_priv, 9))
+ vgpu_types[i].name);
+ else if (IS_GEN(gvt->gt->i915, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
- vgpu_types[i].name);
+ vgpu_types[i].name);
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
@@ -271,8 +272,9 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
- WARN(vgpu->active, "vGPU is still active!\n");
+ drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
@@ -432,9 +434,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- /*TODO: add more platforms support */
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
- ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index b0a499753526..c4048628188a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,6 +7,7 @@
#include <linux/debugobjects.h>
#include "gt/intel_context.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"
@@ -390,13 +391,23 @@ out:
return err;
}
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{
+ struct dma_fence *prev;
+
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
- if (!__i915_active_fence_set(&ref->excl, f))
+ rcu_read_lock();
+ prev = __i915_active_fence_set(&ref->excl, f);
+ if (prev)
+ prev = dma_fence_get_rcu(prev);
+ else
atomic_inc(&ref->count);
+ rcu_read_unlock();
+
+ return prev;
}
bool i915_active_acquire_if_busy(struct i915_active *ref)
@@ -442,6 +453,9 @@ static void enable_signaling(struct i915_active_fence *active)
{
struct dma_fence *fence;
+ if (unlikely(is_barrier(active)))
+ return;
+
fence = i915_active_fence_get(active);
if (!fence)
return;
@@ -450,26 +464,49 @@ static void enable_signaling(struct i915_active_fence *active)
dma_fence_put(fence);
}
-int i915_active_wait(struct i915_active *ref)
+static int flush_barrier(struct active_node *it)
{
- struct active_node *it, *n;
- int err = 0;
+ struct intel_engine_cs *engine;
- might_sleep();
+ if (likely(!is_barrier(&it->base)))
+ return 0;
- if (!i915_active_acquire_if_busy(ref))
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
return 0;
- /* Flush lazy signals */
+ return intel_engine_flush_barriers(engine);
+}
+
+static int flush_lazy_signals(struct i915_active *ref)
+{
+ struct active_node *it, *n;
+ int err = 0;
+
enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- if (is_barrier(&it->base)) /* unconnected idle barrier */
- continue;
+ err = flush_barrier(it); /* unconnected idle barrier? */
+ if (err)
+ break;
enable_signaling(&it->base);
}
- /* Any fence added after the wait begins will not be auto-signaled */
+ return err;
+}
+
+int i915_active_wait(struct i915_active *ref)
+{
+ int err;
+
+ might_sleep();
+
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+
+ /* Any fence added after the wait begins will not be auto-signaled */
+ err = flush_lazy_signals(ref);
i915_active_release(ref);
if (err)
return err;
@@ -481,25 +518,81 @@ int i915_active_wait(struct i915_active *ref)
return 0;
}
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int __await_active(struct i915_active_fence *active,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
+{
+ struct dma_fence *fence;
+
+ if (is_barrier(active)) /* XXX flush the barrier? */
+ return 0;
+
+ fence = i915_active_fence_get(active);
+ if (fence) {
+ int err;
+
+ err = fn(arg, fence);
+ dma_fence_put(fence);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int await_active(struct i915_active *ref,
+ unsigned int flags,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
{
int err = 0;
+ /* We must always wait for the exclusive fence! */
if (rcu_access_pointer(ref->excl.fence)) {
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&ref->excl.fence);
- rcu_read_unlock();
- if (fence) {
- err = i915_request_await_dma_fence(rq, fence);
- dma_fence_put(fence);
+ err = __await_active(&ref->excl, fn, arg);
+ if (err)
+ return err;
+ }
+
+ if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ err = __await_active(&it->base, fn, arg);
+ if (err)
+ break;
}
+ i915_active_release(ref);
+ if (err)
+ return err;
}
- /* In the future we may choose to await on all fences */
+ return 0;
+}
- return err;
+static int rq_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_request_await_dma_fence(arg, fence);
+}
+
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, rq_await_fence, rq);
+}
+
+static int sw_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_sw_fence_await_dma_fence(arg, fence, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+}
+
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, sw_await_fence, fence);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -623,6 +716,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* We can then use the preallocated nodes in
* i915_active_acquire_barrier()
*/
+ GEM_BUG_ON(!mask);
for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
struct llist_node *prev = first;
@@ -812,7 +906,6 @@ __i915_active_fence_set(struct i915_active_fence *active,
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
}
- GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
list_add_tail(&active->cb.node, &fence->cb_list);
spin_unlock_irqrestore(fence->lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 51e1e854ca55..b3282ae7913c 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -173,7 +173,8 @@ i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
}
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
static inline bool i915_active_has_exclusive(struct i915_active *ref)
{
@@ -182,7 +183,13 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags);
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 66883af64ca1..20babbdb297d 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -312,7 +312,8 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
return block;
out_free:
- __i915_buddy_free(mm, block);
+ if (i != order)
+ __i915_buddy_free(mm, block);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a0e437aa65b7..189b573d02be 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -803,10 +803,11 @@ static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
u32 curr = desc->cmd.value & desc->cmd.mask;
if (curr < previous) {
- DRM_ERROR("CMD: %s [%d] command table not sorted: "
- "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
- engine->name, engine->id,
- i, j, curr, previous);
+ drm_err(&engine->i915->drm,
+ "CMD: %s [%d] command table not sorted: "
+ "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+ engine->name, engine->id,
+ i, j, curr, previous);
ret = false;
}
@@ -829,10 +830,11 @@ static bool check_sorted(const struct intel_engine_cs *engine,
u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
if (curr < previous) {
- DRM_ERROR("CMD: %s [%d] register table not sorted: "
- "entry=%d reg=0x%08X prev=0x%08X\n",
- engine->name, engine->id,
- i, curr, previous);
+ drm_err(&engine->i915->drm,
+ "CMD: %s [%d] register table not sorted: "
+ "entry=%d reg=0x%08X prev=0x%08X\n",
+ engine->name, engine->id,
+ i, curr, previous);
ret = false;
}
@@ -1010,18 +1012,21 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
}
if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
- DRM_ERROR("%s: command descriptions are not sorted\n",
- engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: command descriptions are not sorted\n",
+ engine->name);
return;
}
if (!validate_regs_sorted(engine)) {
- DRM_ERROR("%s: registers are not sorted\n", engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: registers are not sorted\n", engine->name);
return;
}
ret = init_hash_table(engine, cmd_tables, cmd_table_count);
if (ret) {
- DRM_ERROR("%s: initialised failed!\n", engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: initialised failed!\n", engine->name);
fini_hash_table(engine);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d5a9b8a964c2..6ca797128aa1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,14 +30,6 @@
#include <linux/sort.h>
#include <drm/drm_debugfs.h>
-#include <drm/drm_fourcc.h>
-
-#include "display/intel_display_types.h"
-#include "display/intel_dp.h"
-#include "display/intel_fbc.h"
-#include "display/intel_hdcp.h"
-#include "display/intel_hdmi.h"
-#include "display/intel_psr.h"
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_pm.h"
@@ -48,9 +40,9 @@
#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
+#include "i915_debugfs_params.h"
#include "i915_irq.h"
#include "i915_trace.h"
-#include "intel_csr.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -127,8 +119,8 @@ stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
}
}
-static void
-describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+void
+i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
@@ -673,7 +665,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
if (!vma)
seq_puts(m, "unused");
else
- describe_obj(m, vma->obj);
+ i915_debugfs_describe_obj(m, vma->obj);
seq_putc(m, '\n');
}
rcu_read_unlock();
@@ -1004,367 +996,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static int ilk_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- u32 rgvmodectl, rstdbyctl;
- u16 crstandvid;
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
- rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
- crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
-
- seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
- seq_printf(m, "Boost freq: %d\n",
- (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
- MEMMODE_BOOST_FREQ_SHIFT);
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
- seq_printf(m, "SW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_SWMODE_EN));
- seq_printf(m, "Gated voltage change: %s\n",
- yesno(rgvmodectl & MEMMODE_RCLK_GATE));
- seq_printf(m, "Starting frequency: P%d\n",
- (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
- seq_printf(m, "Max P-state: P%d\n",
- (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
- seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
- seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
- seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
- seq_printf(m, "Render standby enabled: %s\n",
- yesno(!(rstdbyctl & RCX_SW_EXIT)));
- seq_puts(m, "Current RS state: ");
- switch (rstdbyctl & RSX_STATUS_MASK) {
- case RSX_STATUS_ON:
- seq_puts(m, "on\n");
- break;
- case RSX_STATUS_RC1:
- seq_puts(m, "RC1\n");
- break;
- case RSX_STATUS_RC1E:
- seq_puts(m, "RC1E\n");
- break;
- case RSX_STATUS_RS1:
- seq_puts(m, "RS1\n");
- break;
- case RSX_STATUS_RS2:
- seq_puts(m, "RS2 (RC6)\n");
- break;
- case RSX_STATUS_RS3:
- seq_puts(m, "RC3 (RC6+)\n");
- break;
- default:
- seq_puts(m, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int i915_forcewake_domains(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- struct intel_uncore_forcewake_domain *fw_domain;
- unsigned int tmp;
-
- seq_printf(m, "user.bypass_count = %u\n",
- uncore->user_forcewake_count);
-
- for_each_fw_domain(fw_domain, uncore, tmp)
- seq_printf(m, "%s.wake_count = %u\n",
- intel_uncore_forcewake_domain_to_str(fw_domain->id),
- READ_ONCE(fw_domain->wake_count));
-
- return 0;
-}
-
-static void print_rc6_res(struct seq_file *m,
- const char *title,
- const i915_reg_t reg)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- seq_printf(m, "%s %u (%llu us)\n", title,
- intel_uncore_read(&i915->uncore, reg),
- intel_rc6_residency_us(&i915->gt.rc6, reg));
-}
-
-static int vlv_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rcctl1, pw_status;
-
- pw_status = I915_READ(VLV_GTLC_PW_STATUS);
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
-
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_EI_MODE(1))));
- seq_printf(m, "Render Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
-
- print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
- print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int gen6_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 gt_core_status, rcctl1, rc6vids = 0;
- u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
-
- gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
-
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
- if (INTEL_GEN(dev_priv) >= 9) {
- gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
- gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
- }
-
- if (INTEL_GEN(dev_priv) <= 7)
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
-
- seq_printf(m, "RC1e Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
- seq_printf(m, "Media Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
- }
- seq_printf(m, "Deep RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
- seq_printf(m, "Deepest RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_puts(m, "Current RC state: ");
- switch (gt_core_status & GEN6_RCn_MASK) {
- case GEN6_RC0:
- if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_puts(m, "Core Power Down\n");
- else
- seq_puts(m, "on\n");
- break;
- case GEN6_RC3:
- seq_puts(m, "RC3\n");
- break;
- case GEN6_RC6:
- seq_puts(m, "RC6\n");
- break;
- case GEN6_RC7:
- seq_puts(m, "RC7\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
-
- seq_printf(m, "Core Power Down: %s\n",
- yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
- }
-
- /* Not exactly sure what this is */
- print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
- GEN6_GT_GFX_RC6_LOCKED);
- print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
- print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
- print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
-
- if (INTEL_GEN(dev_priv) <= 7) {
- seq_printf(m, "RC6 voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
- seq_printf(m, "RC6+ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
- seq_printf(m, "RC6++ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
- }
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int i915_drpc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- int err = -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = vlv_drpc_info(m);
- else if (INTEL_GEN(dev_priv) >= 6)
- err = gen6_drpc_info(m);
- else
- err = ilk_drpc_info(m);
- }
-
- return err;
-}
-
-static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
-
- seq_printf(m, "FB tracking busy bits: 0x%08x\n",
- dev_priv->fb_tracking.busy_bits);
-
- seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- dev_priv->fb_tracking.flip_bits);
-
- return 0;
-}
-
-static int i915_fbc_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_fbc *fbc = &dev_priv->fbc;
- intel_wakeref_t wakeref;
-
- if (!HAS_FBC(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&fbc->lock);
-
- if (intel_fbc_is_active(dev_priv))
- seq_puts(m, "FBC enabled\n");
- else
- seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
-
- if (intel_fbc_is_active(dev_priv)) {
- u32 mask;
-
- if (INTEL_GEN(dev_priv) >= 8)
- mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
- else if (INTEL_GEN(dev_priv) >= 7)
- mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
- else if (INTEL_GEN(dev_priv) >= 5)
- mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
- else if (IS_G4X(dev_priv))
- mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
- else
- mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
- FBC_STAT_COMPRESSED);
-
- seq_printf(m, "Compressing: %s\n", yesno(mask));
- }
-
- mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_fbc_false_color_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
- return -ENODEV;
-
- *val = dev_priv->fbc.false_color;
-
- return 0;
-}
-
-static int i915_fbc_false_color_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- u32 reg;
-
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
- return -ENODEV;
-
- mutex_lock(&dev_priv->fbc.lock);
-
- reg = I915_READ(ILK_DPFC_CONTROL);
- dev_priv->fbc.false_color = val;
-
- I915_WRITE(ILK_DPFC_CONTROL, val ?
- (reg | FBC_CTL_FALSE_COLOR) :
- (reg & ~FBC_CTL_FALSE_COLOR));
-
- mutex_unlock(&dev_priv->fbc.lock);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
- i915_fbc_false_color_get, i915_fbc_false_color_set,
- "%llu\n");
-
-static int i915_ips_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!HAS_IPS(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(i915_modparams.enable_ips));
-
- if (INTEL_GEN(dev_priv) >= 8) {
- seq_puts(m, "Currently: unknown\n");
- } else {
- if (I915_READ(IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "Currently: enabled\n");
- else
- seq_puts(m, "Currently: disabled\n");
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_sr_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- bool sr_enabled = false;
-
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
- if (INTEL_GEN(dev_priv) >= 9)
- /* no global SR status; inspect per-plane WM */;
- else if (HAS_PCH_SPLIT(dev_priv))
- sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
- else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
- IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- else if (IS_I915GM(dev_priv))
- sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
- else if (IS_PINEVIEW(dev_priv))
- sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
- seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
-
- return 0;
-}
-
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1406,70 +1037,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
-static int i915_opregion(struct seq_file *m, void *unused)
-{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
-
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
-
- return 0;
-}
-
-static int i915_vbt(struct seq_file *m, void *unused)
-{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
-
- if (opregion->vbt)
- seq_write(m, opregion->vbt, opregion->vbt_size);
-
- return 0;
-}
-
-static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_framebuffer *fbdev_fb = NULL;
- struct drm_framebuffer *drm_fb;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
- fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
-
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fbdev_fb->base.width,
- fbdev_fb->base.height,
- fbdev_fb->base.format->depth,
- fbdev_fb->base.format->cpp[0] * 8,
- fbdev_fb->base.modifier,
- drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, intel_fb_obj(&fbdev_fb->base));
- seq_putc(m, '\n');
- }
-#endif
-
- mutex_lock(&dev->mode_config.fb_lock);
- drm_for_each_fb(drm_fb, dev) {
- struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
- if (fb == fbdev_fb)
- continue;
-
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fb->base.width,
- fb->base.height,
- fb->base.format->depth,
- fb->base.format->cpp[0] * 8,
- fb->base.modifier,
- drm_framebuffer_read_refcount(&fb->base));
- describe_obj(m, intel_fb_obj(&fb->base));
- seq_putc(m, '\n');
- }
- mutex_unlock(&dev->mode_config.fb_lock);
-
- return 0;
-}
-
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
@@ -1515,7 +1082,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (intel_context_pin_if_active(ce)) {
seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
- describe_obj(m, ce->state->obj);
+ i915_debugfs_describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
intel_context_unpin(ce);
@@ -1752,10 +1319,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
return "";
}
-static void i915_guc_log_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
+static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
{
- struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
if (!intel_guc_log_relay_created(log)) {
@@ -1779,11 +1344,12 @@ static void i915_guc_log_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- i915_guc_log_info(m, dev_priv);
+ i915_guc_log_info(m, &uc->guc.log);
/* Add more as required ... */
@@ -1793,11 +1359,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->gt.uc.guc;
- struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
+ struct intel_uc *uc = &dev_priv->gt.uc;
+ struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
int index;
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!intel_uc_uses_guc_submission(uc))
return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
@@ -1884,11 +1450,12 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
static int i915_guc_log_level_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
+ *val = intel_guc_log_get_level(&uc->guc.log);
return 0;
}
@@ -1896,11 +1463,12 @@ static int i915_guc_log_level_get(void *data, u64 *val)
static int i915_guc_log_level_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
+ return intel_guc_log_set_level(&uc->guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -1913,7 +1481,7 @@ static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
struct intel_guc *guc = &i915->gt.uc.guc;
struct intel_guc_log *log = &guc->log;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return -ENODEV;
file->private_data = log;
@@ -1963,253 +1531,6 @@ static const struct file_operations i915_guc_log_relay_fops = {
.release = i915_guc_log_relay_release,
};
-static int i915_psr_sink_status_show(struct seq_file *m, void *data)
-{
- u8 val;
- static const char * const sink_status[] = {
- "inactive",
- "transition to active, capture and display",
- "active, display from RFB",
- "active, capture and display on sink device timings",
- "transition to inactive, capture and display, timing re-sync",
- "reserved",
- "reserved",
- "sink internal error",
- };
- struct drm_connector *connector = m->private;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- int ret;
-
- if (!CAN_PSR(dev_priv)) {
- seq_puts(m, "PSR Unsupported\n");
- return -ENODEV;
- }
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
-
- if (ret == 1) {
- const char *str = "unknown";
-
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- str = sink_status[val];
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
- } else {
- return ret;
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
-
-static void
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
-{
- u32 val, status_val;
- const char *status = "unknown";
-
- if (dev_priv->psr.psr2_enabled) {
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
- val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
- status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
- EDP_PSR2_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- } else {
- static const char * const live_status[] = {
- "IDLE",
- "SRDONACK",
- "SRDENT",
- "BUFOFF",
- "BUFON",
- "AUXACK",
- "SRDOFFACK",
- "SRDENT_ON",
- };
- val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
- status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
- EDP_PSR_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- }
-
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
-}
-
-static int i915_edp_psr_status(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_psr *psr = &dev_priv->psr;
- intel_wakeref_t wakeref;
- const char *status;
- bool enabled;
- u32 val;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
- if (psr->dp)
- seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
- seq_puts(m, "\n");
-
- if (!psr->sink_support)
- return 0;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&psr->lock);
-
- if (psr->enabled)
- status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
- else
- status = "disabled";
- seq_printf(m, "PSR mode: %s\n", status);
-
- if (!psr->enabled) {
- seq_printf(m, "PSR sink not reliable: %s\n",
- yesno(psr->sink_not_reliable));
-
- goto unlock;
- }
-
- if (psr->psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- enabled = val & EDP_PSR2_ENABLE;
- } else {
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- enabled = val & EDP_PSR_ENABLE;
- }
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
- enableddisabled(enabled), val);
- psr_source_status(dev_priv, m);
- seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
- psr->busy_frontbuffer_bits);
-
- /*
- * SKL+ Perf counter is reset to 0 everytime DC state is entered
- */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
- val &= EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance counter: %u\n", val);
- }
-
- if (psr->debug & I915_PSR_DEBUG_IRQ) {
- seq_printf(m, "Last attempted entry at: %lld\n",
- psr->last_entry_attempt);
- seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
- }
-
- if (psr->psr2_enabled) {
- u32 su_frames_val[3];
- int frame;
-
- /*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
- */
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
- frame));
- su_frames_val[frame / 3] = val;
- }
-
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
-
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
-
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
- }
- }
-
-unlock:
- mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int
-i915_edp_psr_debug_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
- int ret;
-
- if (!CAN_PSR(dev_priv))
- return -ENODEV;
-
- DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- ret = intel_psr_debug_set(dev_priv, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return ret;
-}
-
-static int
-i915_edp_psr_debug_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (!CAN_PSR(dev_priv))
- return -ENODEV;
-
- *val = READ_ONCE(dev_priv->psr.debug);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
- i915_edp_psr_debug_get, i915_edp_psr_debug_set,
- "%llu\n");
-
-static int i915_energy_uJ(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- unsigned long long power;
- intel_wakeref_t wakeref;
- u32 units;
-
- if (INTEL_GEN(dev_priv) < 6)
- return -ENODEV;
-
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
- return -ENODEV;
-
- units = (power & 0x1f00) >> 8;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- power = I915_READ(MCH_SECP_NRG_STTS);
-
- power = (1000000 * power) >> units; /* convert to uJ */
- seq_printf(m, "%llu", power);
-
- return 0;
-}
-
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2243,452 +1564,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_power_domain_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int i;
-
- mutex_lock(&power_domains->lock);
-
- seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
- for (i = 0; i < power_domains->power_well_count; i++) {
- struct i915_power_well *power_well;
- enum intel_display_power_domain power_domain;
-
- power_well = &power_domains->power_wells[i];
- seq_printf(m, "%-25s %d\n", power_well->desc->name,
- power_well->count);
-
- for_each_power_domain(power_domain, power_well->desc->domains)
- seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(power_domain),
- power_domains->domain_use_count[power_domain]);
- }
-
- mutex_unlock(&power_domains->lock);
-
- return 0;
-}
-
-static int i915_dmc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct intel_csr *csr;
- i915_reg_t dc5_reg, dc6_reg = {};
-
- if (!HAS_CSR(dev_priv))
- return -ENODEV;
-
- csr = &dev_priv->csr;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
- seq_printf(m, "path: %s\n", csr->fw_path);
-
- if (!csr->dmc_payload)
- goto out;
-
- seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
-
- if (INTEL_GEN(dev_priv) >= 12) {
- dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
- dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
- /*
- * NOTE: DMC_DEBUG3 is a general purpose reg.
- * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
- * reg for DC3CO debugging and validation,
- * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
- */
- seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
- } else {
- dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
- SKL_CSR_DC3_DC5_COUNT;
- if (!IS_GEN9_LP(dev_priv))
- dc6_reg = SKL_CSR_DC5_DC6_COUNT;
- }
-
- seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
- if (dc6_reg.reg)
- seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
-
-out:
- seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
- seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
- seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static void intel_seq_print_mode(struct seq_file *m, int tabs,
- const struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < tabs; i++)
- seq_putc(m, '\t');
-
- seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-}
-
-static void intel_encoder_info(struct seq_file *m,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_connector_list_iter conn_iter;
- struct drm_connector *connector;
-
- seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
- encoder->base.base.id, encoder->base.name);
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- const struct drm_connector_state *conn_state =
- connector->state;
-
- if (conn_state->best_encoder != &encoder->base)
- continue;
-
- seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- }
- drm_connector_list_iter_end(&conn_iter);
-}
-
-static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
-{
- const struct drm_display_mode *mode = panel->fixed_mode;
-
- seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-}
-
-static void intel_hdcp_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- bool hdcp_cap, hdcp2_cap;
-
- hdcp_cap = intel_hdcp_capable(intel_connector);
- hdcp2_cap = intel_hdcp2_capable(intel_connector);
-
- if (hdcp_cap)
- seq_puts(m, "HDCP1.4 ");
- if (hdcp2_cap)
- seq_puts(m, "HDCP2.2 ");
-
- if (!hdcp_cap && !hdcp2_cap)
- seq_puts(m, "None");
-
- seq_puts(m, "\n");
-}
-
-static void intel_dp_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
-
- seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
- seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
- if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
- intel_panel_info(m, &intel_connector->panel);
-
- drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
- &intel_dp->aux);
- if (intel_connector->hdcp.shim) {
- seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
- }
-}
-
-static void intel_dp_mst_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(intel_encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
- intel_connector->port);
-
- seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
-}
-
-static void intel_hdmi_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
-
- seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
- if (intel_connector->hdcp.shim) {
- seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
- }
-}
-
-static void intel_lvds_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- intel_panel_info(m, &intel_connector->panel);
-}
-
-static void intel_connector_info(struct seq_file *m,
- struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_connector_state *conn_state = connector->state;
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
- const struct drm_display_mode *mode;
-
- seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
- connector->base.id, connector->name,
- drm_get_connector_status_name(connector->status));
-
- if (connector->status == connector_status_disconnected)
- return;
-
- seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
- connector->display_info.width_mm,
- connector->display_info.height_mm);
- seq_printf(m, "\tsubpixel order: %s\n",
- drm_get_subpixel_order_name(connector->display_info.subpixel_order));
- seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
-
- if (!encoder)
- return;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (encoder->type == INTEL_OUTPUT_DP_MST)
- intel_dp_mst_info(m, intel_connector);
- else
- intel_dp_info(m, intel_connector);
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- if (encoder->type == INTEL_OUTPUT_LVDS)
- intel_lvds_info(m, intel_connector);
- break;
- case DRM_MODE_CONNECTOR_HDMIA:
- if (encoder->type == INTEL_OUTPUT_HDMI ||
- encoder->type == INTEL_OUTPUT_DDI)
- intel_hdmi_info(m, intel_connector);
- break;
- default:
- break;
- }
-
- seq_printf(m, "\tmodes:\n");
- list_for_each_entry(mode, &connector->modes, head)
- intel_seq_print_mode(m, 2, mode);
-}
-
-static const char *plane_type(enum drm_plane_type type)
-{
- switch (type) {
- case DRM_PLANE_TYPE_OVERLAY:
- return "OVL";
- case DRM_PLANE_TYPE_PRIMARY:
- return "PRI";
- case DRM_PLANE_TYPE_CURSOR:
- return "CUR";
- /*
- * Deliberately omitting default: to generate compiler warnings
- * when a new drm_plane_type gets added.
- */
- }
-
- return "unknown";
-}
-
-static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
-{
- /*
- * According to doc only one DRM_MODE_ROTATE_ is allowed but this
- * will print them all to visualize if the values are misused
- */
- snprintf(buf, bufsize,
- "%s%s%s%s%s%s(0x%08x)",
- (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
- (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
- (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
- (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
- (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
- (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
- rotation);
-}
-
-static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
-{
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- const struct drm_framebuffer *fb = plane_state->uapi.fb;
- struct drm_format_name_buf format_name;
- struct drm_rect src, dst;
- char rot_str[48];
-
- src = drm_plane_state_src(&plane_state->uapi);
- dst = drm_plane_state_dest(&plane_state->uapi);
-
- if (fb)
- drm_get_format_name(fb->format->format, &format_name);
-
- plane_rotation(rot_str, sizeof(rot_str),
- plane_state->uapi.rotation);
-
- seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
- fb ? fb->width : 0, fb ? fb->height : 0,
- DRM_RECT_FP_ARG(&src),
- DRM_RECT_ARG(&dst),
- rot_str);
-}
-
-static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
-{
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_format_name_buf format_name;
- char rot_str[48];
-
- if (!fb)
- return;
-
- drm_get_format_name(fb->format->format, &format_name);
-
- plane_rotation(rot_str, sizeof(rot_str),
- plane_state->hw.rotation);
-
- seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb->base.id, format_name.str,
- fb->width, fb->height,
- yesno(plane_state->uapi.visible),
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst),
- rot_str);
-}
-
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
- plane->base.base.id, plane->base.name,
- plane_type(plane->base.type));
- intel_plane_uapi_info(m, plane);
- intel_plane_hw_info(m, plane);
- }
-}
-
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int num_scalers = crtc->num_scalers;
- int i;
-
- /* Not all platformas have a scaler */
- if (num_scalers) {
- seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
- num_scalers,
- crtc_state->scaler_state.scaler_users,
- crtc_state->scaler_state.scaler_id);
-
- for (i = 0; i < num_scalers; i++) {
- const struct intel_scaler *sc =
- &crtc_state->scaler_state.scalers[i];
-
- seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
- i, yesno(sc->in_use), sc->mode);
- }
- seq_puts(m, "\n");
- } else {
- seq_puts(m, "\tNo scalers available on this platform\n");
- }
-}
-
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_encoder *encoder;
-
- seq_printf(m, "[CRTC:%d:%s]:\n",
- crtc->base.base.id, crtc->base.name);
-
- seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
- yesno(crtc_state->uapi.enable),
- yesno(crtc_state->uapi.active),
- DRM_MODE_ARG(&crtc_state->uapi.mode));
-
- if (crtc_state->hw.enable) {
- seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
- yesno(crtc_state->hw.active),
- DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
-
- seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
- crtc_state->pipe_src_w, crtc_state->pipe_src_h,
- yesno(crtc_state->dither), crtc_state->pipe_bpp);
-
- intel_scaler_info(m, crtc);
- }
-
- for_each_intel_encoder_mask(&dev_priv->drm, encoder,
- crtc_state->uapi.encoder_mask)
- intel_encoder_info(m, crtc, encoder);
-
- intel_plane_info(m, crtc);
-
- seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
- yesno(!crtc->cpu_fifo_underrun_disabled),
- yesno(!crtc->pch_fifo_underrun_disabled));
-}
-
-static int i915_display_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- drm_modeset_lock_all(dev);
-
- seq_printf(m, "CRTC info\n");
- seq_printf(m, "---------\n");
- for_each_intel_crtc(dev, crtc)
- intel_crtc_info(m, crtc);
-
- seq_printf(m, "\n");
- seq_printf(m, "Connector info\n");
- seq_printf(m, "--------------\n");
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter)
- intel_connector_info(m, connector);
- drm_connector_list_iter_end(&conn_iter);
-
- drm_modeset_unlock_all(dev);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2733,55 +1608,6 @@ static int i915_shrinker_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_shared_dplls_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int i;
-
- drm_modeset_lock_all(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
- pll->info->id);
- seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
- pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
- seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n",
- pll->state.hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
- seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
- seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
- seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
- pll->state.hw_state.mg_refclkin_ctl);
- seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_coreclkctl1);
- seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_hsclkctl);
- seq_printf(m, " mg_pll_div0: 0x%08x\n",
- pll->state.hw_state.mg_pll_div0);
- seq_printf(m, " mg_pll_div1: 0x%08x\n",
- pll->state.hw_state.mg_pll_div1);
- seq_printf(m, " mg_pll_lf: 0x%08x\n",
- pll->state.hw_state.mg_pll_lf);
- seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
- pll->state.hw_state.mg_pll_frac_lock);
- seq_printf(m, " mg_pll_ssc: 0x%08x\n",
- pll->state.hw_state.mg_pll_ssc);
- seq_printf(m, " mg_pll_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_bias);
- seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_tdc_coldst_bias);
- }
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -2802,7 +1628,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
for (wa = wal->list; count--; wa++)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
i915_mmio_reg_offset(wa->reg),
- wa->val, wa->mask);
+ wa->set, wa->clr);
seq_printf(m, "\n");
}
@@ -2810,646 +1636,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ipc_status_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Isochronous Priority Control: %s\n",
- yesno(dev_priv->ipc_enabled));
- return 0;
-}
-
-static int i915_ipc_status_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (!HAS_IPC(dev_priv))
- return -ENODEV;
-
- return single_open(file, i915_ipc_status_show, dev_priv);
-}
-
-static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- intel_wakeref_t wakeref;
- bool enable;
- int ret;
-
- ret = kstrtobool_from_user(ubuf, len, &enable);
- if (ret < 0)
- return ret;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (!dev_priv->ipc_enabled && enable)
- DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
- dev_priv->ipc_enabled = enable;
- intel_enable_ipc(dev_priv);
- }
-
- return len;
-}
-
-static const struct file_operations i915_ipc_status_fops = {
- .owner = THIS_MODULE,
- .open = i915_ipc_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_ipc_status_write
-};
-
-static int i915_ddb_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_entry *entry;
- struct intel_crtc *crtc;
-
- if (INTEL_GEN(dev_priv) < 9)
- return -ENODEV;
-
- drm_modeset_lock_all(dev);
-
- seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
-
- seq_printf(m, "Pipe %c\n", pipe_name(pipe));
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
- entry->start, entry->end,
- skl_ddb_entry_size(entry));
- }
-
- entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
- seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
- entry->end, skl_ddb_entry_size(entry));
- }
-
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-
-static void drrs_status_per_crtc(struct seq_file *m,
- struct drm_device *dev,
- struct intel_crtc *intel_crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_drrs *drrs = &dev_priv->drrs;
- int vrefresh = 0;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->state->crtc != &intel_crtc->base)
- continue;
-
- seq_printf(m, "%s:\n", connector->name);
- }
- drm_connector_list_iter_end(&conn_iter);
-
- if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
- seq_puts(m, "\tVBT: DRRS_type: Static");
- else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
- seq_puts(m, "\tVBT: DRRS_type: Seamless");
- else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
- seq_puts(m, "\tVBT: DRRS_type: None");
- else
- seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
-
- seq_puts(m, "\n\n");
-
- if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
- struct intel_panel *panel;
-
- mutex_lock(&drrs->mutex);
- /* DRRS Supported */
- seq_puts(m, "\tDRRS Supported: Yes\n");
-
- /* disable_drrs() will make drrs->dp NULL */
- if (!drrs->dp) {
- seq_puts(m, "Idleness DRRS: Disabled\n");
- if (dev_priv->psr.enabled)
- seq_puts(m,
- "\tAs PSR is enabled, DRRS is not enabled\n");
- mutex_unlock(&drrs->mutex);
- return;
- }
-
- panel = &drrs->dp->attached_connector->panel;
- seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
- drrs->busy_frontbuffer_bits);
-
- seq_puts(m, "\n\t\t");
- if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
- seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
- vrefresh = panel->fixed_mode->vrefresh;
- } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
- seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
- vrefresh = panel->downclock_mode->vrefresh;
- } else {
- seq_printf(m, "DRRS_State: Unknown(%d)\n",
- drrs->refresh_rate_type);
- mutex_unlock(&drrs->mutex);
- return;
- }
- seq_printf(m, "\t\tVrefresh: %d", vrefresh);
-
- seq_puts(m, "\n\t\t");
- mutex_unlock(&drrs->mutex);
- } else {
- /* DRRS not supported. Print the VBT parameter*/
- seq_puts(m, "\tDRRS Supported : No");
- }
- seq_puts(m, "\n");
-}
-
-static int i915_drrs_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *intel_crtc;
- int active_crtc_cnt = 0;
-
- drm_modeset_lock_all(dev);
- for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.state->active) {
- active_crtc_cnt++;
- seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
-
- drrs_status_per_crtc(m, dev, intel_crtc);
- }
- }
- drm_modeset_unlock_all(dev);
-
- if (!active_crtc_cnt)
- seq_puts(m, "No active crtc found\n");
-
- return 0;
-}
-
-static int i915_dp_mst_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- intel_encoder = intel_attached_encoder(to_intel_connector(connector));
- if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- intel_dig_port = enc_to_dig_port(intel_encoder);
- if (!intel_dig_port->dp.can_mst)
- continue;
-
- seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-
-static ssize_t i915_displayport_test_active_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- char *input_buffer;
- int status = 0;
- struct drm_device *dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
- int val = 0;
-
- dev = ((struct seq_file *)file->private_data)->private;
-
- if (len == 0)
- return 0;
-
- input_buffer = memdup_user_nul(ubuf, len);
- if (IS_ERR(input_buffer))
- return PTR_ERR(input_buffer);
-
- DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- status = kstrtoint(input_buffer, 10, &val);
- if (status < 0)
- break;
- DRM_DEBUG_DRIVER("Got %d for test active\n", val);
- /* To prevent erroneous activation of the compliance
- * testing code, only accept an actual value of 1 here
- */
- if (val == 1)
- intel_dp->compliance.test_active = true;
- else
- intel_dp->compliance.test_active = false;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- kfree(input_buffer);
- if (status < 0)
- return status;
-
- *offp += len;
- return len;
-}
-
-static int i915_displayport_test_active_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->compliance.test_active)
- seq_puts(m, "1");
- else
- seq_puts(m, "0");
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-
-static int i915_displayport_test_active_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, i915_displayport_test_active_show,
- inode->i_private);
-}
-
-static const struct file_operations i915_displayport_test_active_fops = {
- .owner = THIS_MODULE,
- .open = i915_displayport_test_active_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_displayport_test_active_write
-};
-
-static int i915_displayport_test_data_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_EDID_READ)
- seq_printf(m, "%lx",
- intel_dp->compliance.test_data.edid);
- else if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_VIDEO_PATTERN) {
- seq_printf(m, "hdisplay: %d\n",
- intel_dp->compliance.test_data.hdisplay);
- seq_printf(m, "vdisplay: %d\n",
- intel_dp->compliance.test_data.vdisplay);
- seq_printf(m, "bpc: %u\n",
- intel_dp->compliance.test_data.bpc);
- }
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
-
-static int i915_displayport_test_type_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- seq_printf(m, "%02lx", intel_dp->compliance.test_type);
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-
-static void wm_latency_show(struct seq_file *m, const u16 wm[8])
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- int level;
- int num_levels;
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- drm_modeset_lock_all(dev);
-
- for (level = 0; level < num_levels; level++) {
- unsigned int latency = wm[level];
-
- /*
- * - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9/vlv/chv
- */
- if (INTEL_GEN(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level], latency / 10, latency % 10);
- }
-
- drm_modeset_unlock_all(dev);
-}
-
-static int pri_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.pri_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int spr_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.spr_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int cur_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.cur_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- return -ENODEV;
-
- return single_open(file, pri_wm_latency_show, dev_priv);
-}
-
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, spr_wm_latency_show, dev_priv);
-}
-
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, cur_wm_latency_show, dev_priv);
-}
-
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, u16 wm[8])
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- u16 new[8] = { 0 };
- int num_levels;
- int level;
- int ret;
- char tmp[32];
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
- &new[0], &new[1], &new[2], &new[3],
- &new[4], &new[5], &new[6], &new[7]);
- if (ret != num_levels)
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
-
- for (level = 0; level < num_levels; level++)
- wm[level] = new[level];
-
- drm_modeset_unlock_all(dev);
-
- return len;
-}
-
-
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.pri_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.spr_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.cur_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static const struct file_operations i915_pri_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = pri_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pri_wm_latency_write
-};
-
-static const struct file_operations i915_spr_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = spr_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = spr_wm_latency_write
-};
-
-static const struct file_operations i915_cur_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = cur_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cur_wm_latency_write
-};
-
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -3641,7 +1827,8 @@ i915_cache_sharing_set(void *data, u64 val)
if (val > 3)
return -EINVAL;
- DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Manually setting uncore sharing to %llu\n", val);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 snpcr;
@@ -3947,292 +2134,6 @@ static const struct file_operations i915_forcewake_fops = {
.release = i915_forcewake_release,
};
-static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
-
- /* Synchronize with everything first in case there's been an HPD
- * storm, but we haven't finished handling it in the kernel yet
- */
- intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->hotplug.hotplug_work);
-
- seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
- seq_printf(m, "Detected: %s\n",
- yesno(delayed_work_pending(&hotplug->reenable_work)));
-
- return 0;
-}
-
-static ssize_t i915_hpd_storm_ctl_write(struct file *file,
- const char __user *ubuf, size_t len,
- loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
- unsigned int new_threshold;
- int i;
- char *newline;
- char tmp[16];
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- /* Strip newline, if any */
- newline = strchr(tmp, '\n');
- if (newline)
- *newline = '\0';
-
- if (strcmp(tmp, "reset") == 0)
- new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
- else if (kstrtouint(tmp, 10, &new_threshold) != 0)
- return -EINVAL;
-
- if (new_threshold > 0)
- DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
- new_threshold);
- else
- DRM_DEBUG_KMS("Disabling HPD storm detection\n");
-
- spin_lock_irq(&dev_priv->irq_lock);
- hotplug->hpd_storm_threshold = new_threshold;
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
- for_each_hpd_pin(i)
- hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
-
- return len;
-}
-
-static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
-}
-
-static const struct file_operations i915_hpd_storm_ctl_fops = {
- .owner = THIS_MODULE,
- .open = i915_hpd_storm_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_hpd_storm_ctl_write
-};
-
-static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Enabled: %s\n",
- yesno(dev_priv->hotplug.hpd_short_storm_enabled));
-
- return 0;
-}
-
-static int
-i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_hpd_short_storm_ctl_show,
- inode->i_private);
-}
-
-static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
- char *newline;
- char tmp[16];
- int i;
- bool new_state;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- /* Strip newline, if any */
- newline = strchr(tmp, '\n');
- if (newline)
- *newline = '\0';
-
- /* Reset to the "default" state for this system */
- if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
- else if (kstrtobool(tmp, &new_state) != 0)
- return -EINVAL;
-
- DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
- new_state ? "En" : "Dis");
-
- spin_lock_irq(&dev_priv->irq_lock);
- hotplug->hpd_short_storm_enabled = new_state;
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
- for_each_hpd_pin(i)
- hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
-
- return len;
-}
-
-static const struct file_operations i915_hpd_short_storm_ctl_fops = {
- .owner = THIS_MODULE,
- .open = i915_hpd_short_storm_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_hpd_short_storm_ctl_write,
-};
-
-static int i915_drrs_ctl_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- if (INTEL_GEN(dev_priv) < 7)
- return -ENODEV;
-
- for_each_intel_crtc(dev, crtc) {
- struct drm_connector_list_iter conn_iter;
- struct intel_crtc_state *crtc_state;
- struct drm_connector *connector;
- struct drm_crtc_commit *commit;
- int ret;
-
- ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- if (!crtc_state->hw.active ||
- !crtc_state->has_drrs)
- goto out;
-
- commit = crtc_state->uapi.commit;
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (ret)
- goto out;
- }
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp;
-
- if (!(crtc_state->uapi.connector_mask &
- drm_connector_mask(connector)))
- continue;
-
- encoder = intel_attached_encoder(to_intel_connector(connector));
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
- val ? "en" : "dis", val);
-
- intel_dp = enc_to_intel_dp(encoder);
- if (val)
- intel_edp_drrs_enable(intel_dp,
- crtc_state);
- else
- intel_edp_drrs_disable(intel_dp,
- crtc_state);
- }
- drm_connector_list_iter_end(&conn_iter);
-
-out:
- drm_modeset_unlock(&crtc->base.mutex);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
-
-static ssize_t
-i915_fifo_underrun_reset_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct drm_i915_private *dev_priv = filp->private_data;
- struct intel_crtc *intel_crtc;
- struct drm_device *dev = &dev_priv->drm;
- int ret;
- bool reset;
-
- ret = kstrtobool_from_user(ubuf, cnt, &reset);
- if (ret)
- return ret;
-
- if (!reset)
- return cnt;
-
- for_each_intel_crtc(dev, intel_crtc) {
- struct drm_crtc_commit *commit;
- struct intel_crtc_state *crtc_state;
-
- ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(intel_crtc->base.state);
- commit = crtc_state->uapi.commit;
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (!ret)
- ret = wait_for_completion_interruptible(&commit->flip_done);
- }
-
- if (!ret && crtc_state->hw.active) {
- DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
- pipe_name(intel_crtc->pipe));
-
- intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
- }
-
- drm_modeset_unlock(&intel_crtc->base.mutex);
-
- if (ret)
- return ret;
- }
-
- ret = intel_fbc_reset_underrun(dev_priv);
- if (ret)
- return ret;
-
- return cnt;
-}
-
-static const struct file_operations i915_fifo_underrun_reset_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = i915_fifo_underrun_reset_write,
- .llseek = default_llseek,
-};
-
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -4245,34 +2146,16 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_drpc_info", i915_drpc_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
- {"i915_fbc_status", i915_fbc_status, 0},
- {"i915_ips_status", i915_ips_status, 0},
- {"i915_sr_status", i915_sr_status, 0},
- {"i915_opregion", i915_opregion, 0},
- {"i915_vbt", i915_vbt, 0},
- {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
- {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_llc", i915_llc, 0},
- {"i915_edp_psr_status", i915_edp_psr_status, 0},
- {"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
- {"i915_power_domain_info", i915_power_domain_info, 0},
- {"i915_dmc_info", i915_dmc_info, 0},
- {"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
{"i915_rcs_topology", i915_rcs_topology, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
- {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
- {"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
- {"i915_ddb_info", i915_ddb_info, 0},
{"i915_sseu_status", i915_sseu_status, 0},
- {"i915_drrs_status", i915_drrs_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -4289,21 +2172,8 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
- {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
- {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
- {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
- {"i915_fbc_false_color", &i915_fbc_false_color_fops},
- {"i915_dp_test_data", &i915_displayport_test_data_fops},
- {"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_level", &i915_guc_log_level_fops},
{"i915_guc_log_relay", &i915_guc_log_relay_fops},
- {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
- {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
- {"i915_ipc_status", &i915_ipc_status_fops},
- {"i915_drrs_ctl", &i915_drrs_ctl_fops},
- {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -4311,9 +2181,10 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
struct drm_minor *minor = dev_priv->drm.primary;
int i;
+ i915_debugfs_params(dev_priv);
+
debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
to_i915(minor->dev), &i915_forcewake_fops);
-
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
debugfs_create_file(i915_debugfs_files[i].name,
S_IRUGO | S_IWUSR,
@@ -4326,254 +2197,3 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
}
-
-struct dpcd_block {
- /* DPCD dump start address. */
- unsigned int offset;
- /* DPCD dump end address, inclusive. If unset, .size will be used. */
- unsigned int end;
- /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
- size_t size;
- /* Only valid for eDP. */
- bool edp;
-};
-
-static const struct dpcd_block i915_dpcd_debug[] = {
- { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
- { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
- { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
- { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
- { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
- { .offset = DP_SET_POWER },
- { .offset = DP_EDP_DPCD_REV },
- { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
- { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
- { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
-};
-
-static int i915_dpcd_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- u8 buf[16];
- ssize_t err;
- int i;
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
- const struct dpcd_block *b = &i915_dpcd_debug[i];
- size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
-
- if (b->edp &&
- connector->connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
- /* low tech for now */
- if (WARN_ON(size > sizeof(buf)))
- continue;
-
- err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
- if (err < 0)
- seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
- else
- seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
-
-static int i915_panel_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->panel_power_up_delay);
- seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->panel_power_down_delay);
- seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->backlight_on_delay);
- seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->backlight_off_delay);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_panel);
-
-static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- /* HDCP is supported by connector */
- if (!intel_connector->hdcp.shim)
- return -EINVAL;
-
- seq_printf(m, "%s:%d HDCP version: ", connector->name,
- connector->base.id);
- intel_hdcp_info(m, intel_connector);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-
-static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
- struct drm_crtc *crtc;
- struct intel_dp *intel_dp;
- struct drm_modeset_acquire_ctx ctx;
- struct intel_crtc_state *crtc_state = NULL;
- int ret = 0;
- bool try_again = false;
-
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
- do {
- try_again = false;
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
- &ctx);
- if (ret) {
- if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
- try_again = true;
- continue;
- }
- break;
- }
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
- ret = -ENODEV;
- break;
- }
- ret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret) {
- try_again = true;
- continue;
- }
- break;
- } else if (ret) {
- break;
- }
- intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- crtc_state = to_intel_crtc_state(crtc->state);
- seq_printf(m, "DSC_Enabled: %s\n",
- yesno(crtc_state->dsc.compression_enable));
- seq_printf(m, "DSC_Sink_Support: %s\n",
- yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
- seq_printf(m, "Force_DSC_Enable: %s\n",
- yesno(intel_dp->force_dsc_en));
- if (!intel_dp_is_edp(intel_dp))
- seq_printf(m, "FEC_Sink_Support: %s\n",
- yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
- } while (try_again);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
-static ssize_t i915_dsc_fec_support_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- bool dsc_enable = false;
- int ret;
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (len == 0)
- return 0;
-
- DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
- len);
-
- ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
- if (ret < 0)
- return ret;
-
- DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
- (dsc_enable) ? "true" : "false");
- intel_dp->force_dsc_en = dsc_enable;
-
- *offp += len;
- return len;
-}
-
-static int i915_dsc_fec_support_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, i915_dsc_fec_support_show,
- inode->i_private);
-}
-
-static const struct file_operations i915_dsc_fec_support_fops = {
- .owner = THIS_MODULE,
- .open = i915_dsc_fec_support_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_dsc_fec_support_write
-};
-
-/**
- * i915_debugfs_connector_add - add i915 specific connector debugfs files
- * @connector: pointer to a registered drm_connector
- *
- * Cleanup will be done by drm_connector_unregister() through a call to
- * drm_debugfs_connector_remove().
- *
- * Returns 0 on success, negative error codes on error.
- */
-int i915_debugfs_connector_add(struct drm_connector *connector)
-{
- struct dentry *root = connector->debugfs_entry;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- /* The connector must have been registered beforehands. */
- if (!root)
- return -ENODEV;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_dpcd", S_IRUGO, root,
- connector, &i915_dpcd_fops);
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- debugfs_create_file("i915_panel_timings", S_IRUGO, root,
- connector, &i915_panel_fops);
- debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
- connector, &i915_psr_sink_status_fops);
- }
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
- connector, &i915_hdcp_sink_capability_fops);
- }
-
- if (INTEL_GEN(dev_priv) >= 10 &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP))
- debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
- connector, &i915_dsc_fec_support_fops);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
index c0cd22eb916d..6da39c76ab5e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.h
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -6,15 +6,17 @@
#ifndef __I915_DEBUGFS_H__
#define __I915_DEBUGFS_H__
-struct drm_i915_private;
struct drm_connector;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct seq_file;
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);
-int i915_debugfs_connector_add(struct drm_connector *connector);
+void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
#else
static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
-static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; }
+static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
#endif
#endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
new file mode 100644
index 000000000000..62b2c5f0495d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include "i915_debugfs_params.h"
+#include "i915_drv.h"
+#include "i915_params.h"
+
+/* int param */
+static int i915_param_int_show(struct seq_file *m, void *data)
+{
+ int *value = m->private;
+
+ seq_printf(m, "%d\n", *value);
+
+ return 0;
+}
+
+static int i915_param_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_int_show, inode->i_private);
+}
+
+static ssize_t i915_param_int_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int *value = m->private;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations i915_param_int_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_int_open,
+ .read = seq_read,
+ .write = i915_param_int_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_int_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_int_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* unsigned int param */
+static int i915_param_uint_show(struct seq_file *m, void *data)
+{
+ unsigned int *value = m->private;
+
+ seq_printf(m, "%u\n", *value);
+
+ return 0;
+}
+
+static int i915_param_uint_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_uint_show, inode->i_private);
+}
+
+static ssize_t i915_param_uint_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ unsigned int *value = m->private;
+ int ret;
+
+ ret = kstrtouint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations i915_param_uint_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_uint_open,
+ .read = seq_read,
+ .write = i915_param_uint_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_uint_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_uint_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* char * param */
+static int i915_param_charp_show(struct seq_file *m, void *data)
+{
+ const char **s = m->private;
+
+ seq_printf(m, "%s\n", *s);
+
+ return 0;
+}
+
+static int i915_param_charp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_charp_show, inode->i_private);
+}
+
+static ssize_t i915_param_charp_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ char **s = m->private;
+ char *new, *old;
+
+ /* FIXME: remove locking after params aren't the module params */
+ kernel_param_lock(THIS_MODULE);
+
+ old = *s;
+ new = strndup_user(ubuf, PAGE_SIZE);
+ if (IS_ERR(new)) {
+ len = PTR_ERR(new);
+ goto out;
+ }
+
+ *s = new;
+
+ kfree(old);
+out:
+ kernel_param_unlock(THIS_MODULE);
+
+ return len;
+}
+
+static const struct file_operations i915_param_charp_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_charp_open,
+ .read = seq_read,
+ .write = i915_param_charp_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_charp_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_charp_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+#define RO(mode) (((mode) & 0222) == 0)
+
+static struct dentry *
+i915_debugfs_create_int(const char *name, umode_t mode,
+ struct dentry *parent, int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &i915_param_int_fops_ro :
+ &i915_param_int_fops);
+}
+
+static struct dentry *
+i915_debugfs_create_uint(const char *name, umode_t mode,
+ struct dentry *parent, unsigned int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &i915_param_uint_fops_ro :
+ &i915_param_uint_fops);
+}
+
+static struct dentry *
+i915_debugfs_create_charp(const char *name, umode_t mode,
+ struct dentry *parent, char **value)
+{
+ return debugfs_create_file(name, mode, parent, value,
+ RO(mode) ? &i915_param_charp_fops_ro :
+ &i915_param_charp_fops);
+}
+
+static __always_inline void
+_i915_param_create_file(struct dentry *parent, const char *name,
+ const char *type, int mode, void *value)
+{
+ if (!mode)
+ return;
+
+ if (!__builtin_strcmp(type, "bool"))
+ debugfs_create_bool(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "int"))
+ i915_debugfs_create_int(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ i915_debugfs_create_uint(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "unsigned long"))
+ debugfs_create_ulong(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "char *"))
+ i915_debugfs_create_charp(name, mode, parent, value);
+ else
+ WARN(1, "no debugfs fops defined for param type %s (i915.%s)\n",
+ type, name);
+}
+
+/* add a subdirectory with files for each i915 param */
+struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ struct i915_params *params = &i915_modparams;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("i915_params", minor->debugfs_root);
+ if (IS_ERR(dir))
+ return dir;
+
+ /*
+ * Note: We could create files for params needing special handling
+ * here. Set mode in params to 0 to skip the generic create file, or
+ * just let the generic create file fail silently with -EEXIST.
+ */
+
+#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, #T, mode, &params->x);
+ I915_PARAMS_FOR_EACH(REGISTER);
+#undef REGISTER
+
+ return dir;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.h b/drivers/gpu/drm/i915/i915_debugfs_params.h
new file mode 100644
index 000000000000..66567076546b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_DEBUGFS_PARAMS__
+#define __I915_DEBUGFS_PARAMS__
+
+struct dentry;
+struct drm_i915_private;
+
+struct dentry *i915_debugfs_params(struct drm_i915_private *i915);
+
+#endif /* __I915_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index eec1d6a6aa22..81a4621853db 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,12 +44,13 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
+#include "display/intel_csr.h"
+#include "display/intel_display_debugfs.h"
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
@@ -69,6 +70,7 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_ioc32.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
#include "i915_perf.h"
@@ -78,74 +80,14 @@
#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_csr.h"
+#include "intel_dram.h"
+#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pm.h"
+#include "vlv_suspend.h"
static struct drm_driver driver;
-struct vlv_s0ix_state {
- /* GAM */
- u32 wr_watermark;
- u32 gfx_prio_ctrl;
- u32 arb_mode;
- u32 gfx_pend_tlb0;
- u32 gfx_pend_tlb1;
- u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
- u32 media_max_req_count;
- u32 gfx_max_req_count;
- u32 render_hwsp;
- u32 ecochk;
- u32 bsd_hwsp;
- u32 blt_hwsp;
- u32 tlb_rd_addr;
-
- /* MBC */
- u32 g3dctl;
- u32 gsckgctl;
- u32 mbctl;
-
- /* GCP */
- u32 ucgctl1;
- u32 ucgctl3;
- u32 rcgctl1;
- u32 rcgctl2;
- u32 rstctl;
- u32 misccpctl;
-
- /* GPM */
- u32 gfxpause;
- u32 rpdeuhwtc;
- u32 rpdeuc;
- u32 ecobus;
- u32 pwrdwnupctl;
- u32 rp_down_timeout;
- u32 rp_deucsw;
- u32 rcubmabdtmr;
- u32 rcedata;
- u32 spare2gh;
-
- /* Display 1 CZ domain */
- u32 gt_imr;
- u32 gt_ier;
- u32 pm_imr;
- u32 pm_ier;
- u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
-
- /* GT SA CZ domain */
- u32 tilectl;
- u32 gt_fifoctl;
- u32 gtlc_wake_ctrl;
- u32 gtlc_survive;
- u32 pmwgicz;
-
- /* Display 2 CZ domain */
- u32 gu_ctl0;
- u32 gu_ctl1;
- u32 pcbr;
- u32 clock_gate_dis2;
-};
-
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
@@ -153,7 +95,7 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
dev_priv->bridge_dev =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
- DRM_ERROR("bridge device not found\n");
+ drm_err(&dev_priv->drm, "bridge device not found\n");
return -1;
}
return 0;
@@ -190,7 +132,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
return ret;
}
@@ -273,7 +215,8 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+/* part #1: call before irq install */
+static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
{
int ret;
@@ -293,25 +236,32 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
if (ret)
goto out;
- intel_register_dsm_handler();
+ intel_power_domains_init_hw(i915, false);
+
+ intel_csr_ucode_init(i915);
- ret = i915_switcheroo_register(i915);
+ ret = intel_modeset_init_noirq(i915);
if (ret)
goto cleanup_vga_client;
- intel_power_domains_init_hw(i915, false);
+ return 0;
- intel_csr_ucode_init(i915);
+cleanup_vga_client:
+ intel_vga_unregister(i915);
+out:
+ return ret;
+}
- ret = intel_irq_install(i915);
- if (ret)
- goto cleanup_csr;
+/* part #2: call after irq install */
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+{
+ int ret;
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
ret = intel_modeset_init(i915);
if (ret)
- goto cleanup_irq;
+ goto out;
ret = i915_gem_init(i915);
if (ret)
@@ -340,29 +290,27 @@ cleanup_gem:
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
cleanup_modeset:
+ /* FIXME */
intel_modeset_driver_remove(i915);
-cleanup_irq:
intel_irq_uninstall(i915);
-cleanup_csr:
- intel_csr_ucode_fini(i915);
- intel_power_domains_driver_remove(i915);
- i915_switcheroo_unregister(i915);
-cleanup_vga_client:
- intel_vga_unregister(i915);
+ intel_modeset_driver_remove_noirq(i915);
out:
return ret;
}
+/* part #1: call before irq uninstall */
static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
intel_modeset_driver_remove(i915);
+}
- intel_irq_uninstall(i915);
+/* part #2: call after irq uninstall */
+static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
+{
+ intel_modeset_driver_remove_noirq(i915);
intel_bios_driver_remove(i915);
- i915_switcheroo_unregister(i915);
-
intel_vga_unregister(i915);
intel_csr_ucode_fini(i915);
@@ -412,7 +360,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
out_free_wq:
destroy_workqueue(dev_priv->wq);
out_err:
- DRM_ERROR("Failed to allocate workqueues.\n");
+ drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
return -ENOMEM;
}
@@ -441,37 +389,15 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
+ pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
if (pre) {
- DRM_ERROR("This is a pre-production stepping. "
+ drm_err(&dev_priv->drm, "This is a pre-production stepping. "
"It may not be fully functional.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
}
}
-static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
-{
- if (!IS_VALLEYVIEW(i915))
- return 0;
-
- /* we write all the values in the struct, so no need to zero it out */
- i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
- GFP_KERNEL);
- if (!i915->vlv_s0ix_state)
- return -ENOMEM;
-
- return 0;
-}
-
-static void vlv_free_s0ix_state(struct drm_i915_private *i915)
-{
- if (!i915->vlv_s0ix_state)
- return;
-
- kfree(i915->vlv_s0ix_state);
- i915->vlv_s0ix_state = NULL;
-}
-
static void sanitize_gpu(struct drm_i915_private *i915)
{
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -519,7 +445,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = vlv_alloc_s0ix_state(dev_priv);
+ ret = vlv_suspend_init(dev_priv);
if (ret < 0)
goto err_workqueues;
@@ -541,7 +467,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- intel_display_crc_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -550,7 +475,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
- vlv_free_s0ix_state(dev_priv);
+ vlv_suspend_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -567,7 +492,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
- vlv_free_s0ix_state(dev_priv);
+ vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
cpu_latency_qos_remove_request(&dev_priv->sb_qos);
@@ -640,487 +565,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
-#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-
-static const char *intel_dram_type_str(enum intel_dram_type type)
-{
- static const char * const str[] = {
- DRAM_TYPE_STR(UNKNOWN),
- DRAM_TYPE_STR(DDR3),
- DRAM_TYPE_STR(DDR4),
- DRAM_TYPE_STR(LPDDR3),
- DRAM_TYPE_STR(LPDDR4),
- };
-
- if (type >= ARRAY_SIZE(str))
- type = INTEL_DRAM_UNKNOWN;
-
- return str[type];
-}
-
-#undef DRAM_TYPE_STR
-
-static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
-{
- return dimm->ranks * 64 / (dimm->width ?: 1);
-}
-
-/* Returns total GB for the whole DIMM */
-static int skl_get_dimm_size(u16 val)
-{
- return val & SKL_DRAM_SIZE_MASK;
-}
-
-static int skl_get_dimm_width(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & SKL_DRAM_WIDTH_MASK) {
- case SKL_DRAM_WIDTH_X8:
- case SKL_DRAM_WIDTH_X16:
- case SKL_DRAM_WIDTH_X32:
- val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int skl_get_dimm_ranks(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-/* Returns total GB for the whole DIMM */
-static int cnl_get_dimm_size(u16 val)
-{
- return (val & CNL_DRAM_SIZE_MASK) / 2;
-}
-
-static int cnl_get_dimm_width(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & CNL_DRAM_WIDTH_MASK) {
- case CNL_DRAM_WIDTH_X8:
- case CNL_DRAM_WIDTH_X16:
- case CNL_DRAM_WIDTH_X32:
- val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int cnl_get_dimm_ranks(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-static bool
-skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
-{
- /* Convert total GB to Gb per DRAM device */
- return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
-}
-
-static void
-skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
- struct dram_dimm_info *dimm,
- int channel, char dimm_name, u16 val)
-{
- if (INTEL_GEN(dev_priv) >= 10) {
- dimm->size = cnl_get_dimm_size(val);
- dimm->width = cnl_get_dimm_width(val);
- dimm->ranks = cnl_get_dimm_ranks(val);
- } else {
- dimm->size = skl_get_dimm_size(val);
- dimm->width = skl_get_dimm_width(val);
- dimm->ranks = skl_get_dimm_ranks(val);
- }
-
- DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
- channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
- yesno(skl_is_16gb_dimm(dimm)));
-}
-
-static int
-skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
- struct dram_channel_info *ch,
- int channel, u32 val)
-{
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
- channel, 'L', val & 0xffff);
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
- channel, 'S', val >> 16);
-
- if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
- DRM_DEBUG_KMS("CH%u not populated\n", channel);
- return -EINVAL;
- }
-
- if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
- ch->ranks = 2;
- else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
- ch->ranks = 2;
- else
- ch->ranks = 1;
-
- ch->is_16gb_dimm =
- skl_is_16gb_dimm(&ch->dimm_l) ||
- skl_is_16gb_dimm(&ch->dimm_s);
-
- DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
- channel, ch->ranks, yesno(ch->is_16gb_dimm));
-
- return 0;
-}
-
-static bool
-intel_is_dram_symmetric(const struct dram_channel_info *ch0,
- const struct dram_channel_info *ch1)
-{
- return !memcmp(ch0, ch1, sizeof(*ch0)) &&
- (ch0->dimm_s.size == 0 ||
- !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
-}
-
-static int
-skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- struct dram_channel_info ch0 = {}, ch1 = {};
- u32 val;
- int ret;
-
- val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- if (dram_info->num_channels == 0) {
- DRM_INFO("Number of memory channels is zero\n");
- return -EINVAL;
- }
-
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
- DRM_INFO("couldn't get memory rank information\n");
- return -EINVAL;
- }
-
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
-
- dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
-
- DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
- yesno(dram_info->symmetric_memory));
- return 0;
-}
-
-static enum intel_dram_type
-skl_get_dram_type(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
-
- switch (val & SKL_DRAM_DDR_TYPE_MASK) {
- case SKL_DRAM_DDR_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case SKL_DRAM_DDR_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case SKL_DRAM_DDR_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case SKL_DRAM_DDR_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static int
-skl_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 mem_freq_khz, val;
- int ret;
-
- dram_info->type = skl_get_dram_type(dev_priv);
- DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
-
- ret = skl_dram_get_channels_info(dev_priv);
- if (ret)
- return ret;
-
- val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
- mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
- SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
- DRM_INFO("Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-/* Returns Gb per DRAM device */
-static int bxt_get_dimm_size(u32 val)
-{
- switch (val & BXT_DRAM_SIZE_MASK) {
- case BXT_DRAM_SIZE_4GBIT:
- return 4;
- case BXT_DRAM_SIZE_6GBIT:
- return 6;
- case BXT_DRAM_SIZE_8GBIT:
- return 8;
- case BXT_DRAM_SIZE_12GBIT:
- return 12;
- case BXT_DRAM_SIZE_16GBIT:
- return 16;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int bxt_get_dimm_width(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
-
- return 8 << val;
-}
-
-static int bxt_get_dimm_ranks(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- switch (val & BXT_DRAM_RANK_MASK) {
- case BXT_DRAM_RANK_SINGLE:
- return 1;
- case BXT_DRAM_RANK_DUAL:
- return 2;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static enum intel_dram_type bxt_get_dimm_type(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return INTEL_DRAM_UNKNOWN;
-
- switch (val & BXT_DRAM_TYPE_MASK) {
- case BXT_DRAM_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case BXT_DRAM_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case BXT_DRAM_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case BXT_DRAM_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
- u32 val)
-{
- dimm->width = bxt_get_dimm_width(val);
- dimm->ranks = bxt_get_dimm_ranks(val);
-
- /*
- * Size in register is Gb per DRAM device. Convert to total
- * GB to match the way we report this for non-LP platforms.
- */
- dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
-}
-
-static int
-bxt_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 dram_channels;
- u32 mem_freq_khz, val;
- u8 num_active_channels;
- int i;
-
- val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
- mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
- BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
- num_active_channels = hweight32(dram_channels);
-
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
- DRM_INFO("Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- /*
- * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
- */
- for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
- struct dram_dimm_info dimm;
- enum intel_dram_type type;
-
- val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
- if (val == 0xFFFFFFFF)
- continue;
-
- dram_info->num_channels++;
-
- bxt_get_dimm_info(&dimm, val);
- type = bxt_get_dimm_type(val);
-
- WARN_ON(type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != type);
-
- DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
- i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
-
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
-
- if (type != INTEL_DRAM_UNKNOWN)
- dram_info->type = type;
- }
-
- if (dram_info->type == INTEL_DRAM_UNKNOWN ||
- dram_info->ranks == 0) {
- DRM_INFO("couldn't get memory information\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-static void
-intel_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- int ret;
-
- /*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
- */
- dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
-
- if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
- return;
-
- if (IS_GEN9_LP(dev_priv))
- ret = bxt_get_dram_info(dev_priv);
- else
- ret = skl_get_dram_info(dev_priv);
- if (ret)
- return;
-
- DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps,
- dram_info->num_channels);
-
- DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
-}
-
-static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
-{
- static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- static const u8 sets[4] = { 1, 1, 2, 2 };
-
- return EDRAM_NUM_BANKS(cap) *
- ways[EDRAM_WAYS_IDX(cap)] *
- sets[EDRAM_SETS_IDX(cap)];
-}
-
-static void edram_detect(struct drm_i915_private *dev_priv)
-{
- u32 edram_cap = 0;
-
- if (!(IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv) ||
- INTEL_GEN(dev_priv) >= 9))
- return;
-
- edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
-
- /* NB: We can't write IDICR yet because we don't have gt funcs set up */
-
- if (!(edram_cap & EDRAM_ENABLED))
- return;
-
- /*
- * The needed capability bits for size calculation are not there with
- * pre gen9 so return 128MB always.
- */
- if (INTEL_GEN(dev_priv) < 9)
- dev_priv->edram_size_mb = 128;
- else
- dev_priv->edram_size_mb =
- gen9_edram_size_mb(dev_priv, edram_cap);
-
- dev_info(dev_priv->drm.dev,
- "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
-}
-
/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
@@ -1164,7 +608,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
/* needs to be done before ggtt probe */
- edram_detect(dev_priv);
+ intel_dram_edram_detect(dev_priv);
i915_perf_init(dev_priv);
@@ -1188,7 +632,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
- DRM_ERROR("failed to enable GGTT\n");
+ drm_err(&dev_priv->drm, "failed to enable GGTT\n");
goto err_mem_regions;
}
@@ -1204,7 +648,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
- DRM_ERROR("failed to set DMA mask\n");
+ drm_err(&dev_priv->drm, "failed to set DMA mask\n");
goto err_mem_regions;
}
@@ -1222,7 +666,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- DRM_ERROR("failed to set DMA mask\n");
+ drm_err(&dev_priv->drm, "failed to set DMA mask\n");
goto err_mem_regions;
}
@@ -1253,7 +697,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
- DRM_DEBUG_DRIVER("can't enable MSI");
+ drm_dbg(&dev_priv->drm, "can't enable MSI");
}
ret = intel_gvt_init(dev_priv);
@@ -1265,7 +709,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* Fill the dram structure to get the system raw bandwidth and
* dram info. This will be used for memory latency calculation.
*/
- intel_get_dram_info(dev_priv);
+ intel_dram_detect(dev_priv);
intel_bw_init_hw(dev_priv);
@@ -1314,22 +758,19 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
- /*
- * Notify a valid surface after modesetting,
- * when running inside a VM.
- */
- if (intel_vgpu_active(dev_priv))
- I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+ intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
+ intel_display_debugfs_register(dev_priv);
i915_setup_sysfs(dev_priv);
/* Depends on sysfs having been initialized */
i915_perf_register(dev_priv);
} else
- DRM_ERROR("Failed to register driver for userspace access!\n");
+ drm_err(&dev_priv->drm,
+ "Failed to register driver for userspace access!\n");
if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
/* Must be done after probing outputs */
@@ -1359,6 +800,11 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_power_domains_enable(dev_priv);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
+
+ intel_register_dsm_handler();
+
+ if (i915_switcheroo_register(dev_priv))
+ drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
}
/**
@@ -1367,6 +813,10 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ i915_switcheroo_unregister(dev_priv);
+
+ intel_unregister_dsm_handler();
+
intel_runtime_pm_disable(&dev_priv->runtime_pm);
intel_power_domains_disable(dev_priv);
@@ -1411,11 +861,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- DRM_INFO("DRM_I915_DEBUG enabled\n");
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
- DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
+ drm_info(&dev_priv->drm,
+ "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
}
static struct drm_i915_private *
@@ -1437,8 +888,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return ERR_PTR(err);
}
- i915->drm.dev_private = i915;
-
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
@@ -1478,16 +927,16 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
int ret;
- dev_priv = i915_driver_create(pdev, ent);
- if (IS_ERR(dev_priv))
- return PTR_ERR(dev_priv);
+ i915 = i915_driver_create(pdev, ent);
+ if (IS_ERR(i915))
+ return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
- dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+ i915->drm.driver_features &= ~DRIVER_ATOMIC;
/*
* Check if we support fake LMEM -- for now we only unleash this for
@@ -1495,13 +944,13 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
- if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
+ if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
i915_modparams.fake_lmem_start) {
- mkwrite_device_info(dev_priv)->memory_regions =
+ mkwrite_device_info(i915)->memory_regions =
REGION_SMEM | REGION_LMEM | REGION_STOLEN;
- mkwrite_device_info(dev_priv)->is_dgfx = true;
- GEM_BUG_ON(!HAS_LMEM(dev_priv));
- GEM_BUG_ON(!IS_DGFX(dev_priv));
+ mkwrite_device_info(i915)->is_dgfx = true;
+ GEM_BUG_ON(!HAS_LMEM(i915));
+ GEM_BUG_ON(!IS_DGFX(i915));
}
}
#endif
@@ -1510,48 +959,60 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_fini;
- ret = i915_driver_early_probe(dev_priv);
+ ret = i915_driver_early_probe(i915);
if (ret < 0)
goto out_pci_disable;
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_detect_vgpu(dev_priv);
+ intel_vgpu_detect(i915);
- ret = i915_driver_mmio_probe(dev_priv);
+ ret = i915_driver_mmio_probe(i915);
if (ret < 0)
goto out_runtime_pm_put;
- ret = i915_driver_hw_probe(dev_priv);
+ ret = i915_driver_hw_probe(i915);
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_driver_modeset_probe(dev_priv);
+ ret = i915_driver_modeset_probe_noirq(i915);
if (ret < 0)
goto out_cleanup_hw;
- i915_driver_register(dev_priv);
+ ret = intel_irq_install(i915);
+ if (ret)
+ goto out_cleanup_modeset;
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ ret = i915_driver_modeset_probe(i915);
+ if (ret < 0)
+ goto out_cleanup_irq;
+
+ i915_driver_register(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_welcome_messages(dev_priv);
+ i915_welcome_messages(i915);
return 0;
+out_cleanup_irq:
+ intel_irq_uninstall(i915);
+out_cleanup_modeset:
+ /* FIXME */
out_cleanup_hw:
- i915_driver_hw_remove(dev_priv);
- intel_memory_regions_driver_release(dev_priv);
- i915_ggtt_driver_release(dev_priv);
+ i915_driver_hw_remove(i915);
+ intel_memory_regions_driver_release(i915);
+ i915_ggtt_driver_release(i915);
out_cleanup_mmio:
- i915_driver_mmio_release(dev_priv);
+ i915_driver_mmio_release(i915);
out_runtime_pm_put:
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- i915_driver_late_release(dev_priv);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ i915_driver_late_release(i915);
out_pci_disable:
pci_disable_device(pdev);
out_fini:
- i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
- i915_driver_destroy(dev_priv);
+ i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
+ i915_driver_destroy(i915);
return ret;
}
@@ -1561,13 +1022,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_unregister(i915);
- /*
- * After unregistering the device to prevent any new users, cancel
- * all in-flight requests so that we can quickly unbind the active
- * resources.
- */
- intel_gt_set_wedged(&i915->gt);
-
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
@@ -1579,6 +1033,10 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_modeset_remove(i915);
+ intel_irq_uninstall(i915);
+
+ i915_driver_modeset_remove_noirq(i915);
+
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
@@ -1665,10 +1123,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
drm_modeset_unlock_all(dev);
}
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume);
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
-
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
@@ -1720,7 +1174,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_hw(dev_priv);
- i915_gem_suspend_gtt_mappings(dev_priv);
+ i915_ggtt_suspend(&dev_priv->ggtt);
i915_save_state(dev_priv);
@@ -1755,7 +1209,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
disable_rpm_wakeref_asserts(rpm);
@@ -1768,11 +1222,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_power_suspend_late(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_suspend_complete(dev_priv);
-
+ ret = vlv_suspend_complete(dev_priv);
if (ret) {
- DRM_ERROR("Suspend complete failed: %d\n", ret);
+ drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
intel_power_domains_resume(dev_priv);
goto out;
@@ -1806,8 +1258,8 @@ int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
- if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
- state.event != PM_EVENT_FREEZE))
+ if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
+ state.event != PM_EVENT_FREEZE))
return -EINVAL;
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1831,9 +1283,9 @@ static int i915_drm_resume(struct drm_device *dev)
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
- DRM_ERROR("failed to re-enable GGTT\n");
+ drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
- i915_gem_restore_gtt_mappings(dev_priv);
+ i915_ggtt_resume(&dev_priv->ggtt);
i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1920,7 +1372,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
*/
ret = pci_set_power_state(pdev, PCI_D0);
if (ret) {
- DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "failed to set PCI D0 power state (%d)\n", ret);
return ret;
}
@@ -1944,11 +1397,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_resume_prepare(dev_priv, false);
+ ret = vlv_resume_prepare(dev_priv, false);
if (ret)
- DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Resume prepare failed: %d, continuing anyway\n", ret);
intel_uncore_resume_early(&dev_priv->uncore);
@@ -2115,391 +1567,16 @@ static int i915_pm_restore(struct device *kdev)
return i915_pm_resume(kdev);
}
-/*
- * Save all Gunit registers that may be lost after a D3 and a subsequent
- * S0i[R123] transition. The list of registers needing a save/restore is
- * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
- * registers in the following way:
- * - Driver: saved/restored by the driver
- * - Punit : saved/restored by the Punit firmware
- * - No, w/o marking: no need to save/restore, since the register is R/O or
- * used internally by the HW in a way that doesn't depend
- * keeping the content across a suspend/resume.
- * - Debug : used for debugging
- *
- * We save/restore all registers marked with 'Driver', with the following
- * exceptions:
- * - Registers out of use, including also registers marked with 'Debug'.
- * These have no effect on the driver's operation, so we don't save/restore
- * them to reduce the overhead.
- * - Registers that are fully setup by an initialization function called from
- * the resume path. For example many clock gating and RPS/RC6 registers.
- * - Registers that provide the right functionality with their reset defaults.
- *
- * TODO: Except for registers that based on the above 3 criteria can be safely
- * ignored, we save/restore all others, practically treating the HW context as
- * a black-box for the driver. Further investigation is needed to reduce the
- * saved/restored registers even further, by following the same 3 criteria.
- */
-static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
- int i;
-
- if (!s)
- return;
-
- /* GAM 0x4000-0x4770 */
- s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
- s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
- s->arb_mode = I915_READ(ARB_MODE);
- s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
- s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
-
- for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
- s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
-
- s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
- s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
-
- s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
- s->ecochk = I915_READ(GAM_ECOCHK);
- s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
- s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
-
- s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
-
- /* MBC 0x9024-0x91D0, 0x8500 */
- s->g3dctl = I915_READ(VLV_G3DCTL);
- s->gsckgctl = I915_READ(VLV_GSCKGCTL);
- s->mbctl = I915_READ(GEN6_MBCTL);
-
- /* GCP 0x9400-0x9424, 0x8100-0x810C */
- s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
- s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
- s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
- s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
- s->rstctl = I915_READ(GEN6_RSTCTL);
- s->misccpctl = I915_READ(GEN7_MISCCPCTL);
-
- /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
- s->gfxpause = I915_READ(GEN6_GFXPAUSE);
- s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
- s->rpdeuc = I915_READ(GEN6_RPDEUC);
- s->ecobus = I915_READ(ECOBUS);
- s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
- s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
- s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
- s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
- s->rcedata = I915_READ(VLV_RCEDATA);
- s->spare2gh = I915_READ(VLV_SPAREG2H);
-
- /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
- s->gt_imr = I915_READ(GTIMR);
- s->gt_ier = I915_READ(GTIER);
- s->pm_imr = I915_READ(GEN6_PMIMR);
- s->pm_ier = I915_READ(GEN6_PMIER);
-
- for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
- s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
-
- /* GT SA CZ domain, 0x100000-0x138124 */
- s->tilectl = I915_READ(TILECTL);
- s->gt_fifoctl = I915_READ(GTFIFOCTL);
- s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
- s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- s->pmwgicz = I915_READ(VLV_PMWGICZ);
-
- /* Gunit-Display CZ domain, 0x182028-0x1821CF */
- s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
- s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
- s->pcbr = I915_READ(VLV_PCBR);
- s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
-
- /*
- * Not saving any of:
- * DFT, 0x9800-0x9EC0
- * SARB, 0xB000-0xB1FC
- * GAC, 0x5208-0x524C, 0x14000-0x14C000
- * PCI CFG
- */
-}
-
-static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
- u32 val;
- int i;
-
- if (!s)
- return;
-
- /* GAM 0x4000-0x4770 */
- I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
- I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
- I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
- I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
- I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
-
- for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
- I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
-
- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
- I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
-
- I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
- I915_WRITE(GAM_ECOCHK, s->ecochk);
- I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
- I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
-
- I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
-
- /* MBC 0x9024-0x91D0, 0x8500 */
- I915_WRITE(VLV_G3DCTL, s->g3dctl);
- I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
- I915_WRITE(GEN6_MBCTL, s->mbctl);
-
- /* GCP 0x9400-0x9424, 0x8100-0x810C */
- I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
- I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
- I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
- I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
- I915_WRITE(GEN6_RSTCTL, s->rstctl);
- I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
-
- /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
- I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
- I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
- I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
- I915_WRITE(ECOBUS, s->ecobus);
- I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
- I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
- I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
- I915_WRITE(VLV_RCEDATA, s->rcedata);
- I915_WRITE(VLV_SPAREG2H, s->spare2gh);
-
- /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
- I915_WRITE(GTIMR, s->gt_imr);
- I915_WRITE(GTIER, s->gt_ier);
- I915_WRITE(GEN6_PMIMR, s->pm_imr);
- I915_WRITE(GEN6_PMIER, s->pm_ier);
-
- for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
- I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
-
- /* GT SA CZ domain, 0x100000-0x138124 */
- I915_WRITE(TILECTL, s->tilectl);
- I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
- /*
- * Preserve the GT allow wake and GFX force clock bit, they are not
- * be restored, as they are used to control the s0ix suspend/resume
- * sequence by the caller.
- */
- val = I915_READ(VLV_GTLC_WAKE_CTRL);
- val &= VLV_GTLC_ALLOWWAKEREQ;
- val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
- I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
-
- val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- val &= VLV_GFX_CLK_FORCE_ON_BIT;
- val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
- I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
- I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
-
- /* Gunit-Display CZ domain, 0x182028-0x1821CF */
- I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
- I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
- I915_WRITE(VLV_PCBR, s->pcbr);
- I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
-}
-
-static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
- u32 mask, u32 val)
-{
- i915_reg_t reg = VLV_GTLC_PW_STATUS;
- u32 reg_value;
- int ret;
-
- /* The HW does not like us polling for PW_STATUS frequently, so
- * use the sleeping loop rather than risk the busy spin within
- * intel_wait_for_register().
- *
- * Transitioning between RC6 states should be at most 2ms (see
- * valleyview_enable_rps) so use a 3ms timeout.
- */
- ret = wait_for(((reg_value =
- intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
- == val, 3);
-
- /* just trace the final value */
- trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
-
- return ret;
-}
-
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
-{
- u32 val;
- int err;
-
- val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
- if (force_on)
- val |= VLV_GFX_CLK_FORCE_ON_BIT;
- I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
- if (!force_on)
- return 0;
-
- err = intel_wait_for_register(&dev_priv->uncore,
- VLV_GTLC_SURVIVABILITY_REG,
- VLV_GFX_CLK_STATUS_BIT,
- VLV_GFX_CLK_STATUS_BIT,
- 20);
- if (err)
- DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
- I915_READ(VLV_GTLC_SURVIVABILITY_REG));
-
- return err;
-}
-
-static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
-{
- u32 mask;
- u32 val;
- int err;
-
- val = I915_READ(VLV_GTLC_WAKE_CTRL);
- val &= ~VLV_GTLC_ALLOWWAKEREQ;
- if (allow)
- val |= VLV_GTLC_ALLOWWAKEREQ;
- I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
- POSTING_READ(VLV_GTLC_WAKE_CTRL);
-
- mask = VLV_GTLC_ALLOWWAKEACK;
- val = allow ? mask : 0;
-
- err = vlv_wait_for_pw_status(dev_priv, mask, val);
- if (err)
- DRM_ERROR("timeout disabling GT waking\n");
-
- return err;
-}
-
-static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
- bool wait_for_on)
-{
- u32 mask;
- u32 val;
-
- mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
- val = wait_for_on ? mask : 0;
-
- /*
- * RC6 transitioning can be delayed up to 2 msec (see
- * valleyview_enable_rps), use 3 msec for safety.
- *
- * This can fail to turn off the rc6 if the GPU is stuck after a failed
- * reset and we are trying to force the machine to sleep.
- */
- if (vlv_wait_for_pw_status(dev_priv, mask, val))
- DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
- onoff(wait_for_on));
-}
-
-static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
-{
- if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
- return;
-
- DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
- I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
-}
-
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
-{
- u32 mask;
- int err;
-
- /*
- * Bspec defines the following GT well on flags as debug only, so
- * don't treat them as hard failures.
- */
- vlv_wait_for_gt_wells(dev_priv, false);
-
- mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
- WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
-
- vlv_check_no_gt_access(dev_priv);
-
- err = vlv_force_gfx_clock(dev_priv, true);
- if (err)
- goto err1;
-
- err = vlv_allow_gt_wake(dev_priv, false);
- if (err)
- goto err2;
-
- vlv_save_gunit_s0ix_state(dev_priv);
-
- err = vlv_force_gfx_clock(dev_priv, false);
- if (err)
- goto err2;
-
- return 0;
-
-err2:
- /* For safety always re-enable waking and disable gfx clock forcing */
- vlv_allow_gt_wake(dev_priv, true);
-err1:
- vlv_force_gfx_clock(dev_priv, false);
-
- return err;
-}
-
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- int err;
- int ret;
-
- /*
- * If any of the steps fail just try to continue, that's the best we
- * can do at this point. Return the first error code (which will also
- * leave RPM permanently disabled).
- */
- ret = vlv_force_gfx_clock(dev_priv, true);
-
- vlv_restore_gunit_s0ix_state(dev_priv);
-
- err = vlv_allow_gt_wake(dev_priv, true);
- if (!ret)
- ret = err;
-
- err = vlv_force_gfx_clock(dev_priv, false);
- if (!ret)
- ret = err;
-
- vlv_check_no_gt_access(dev_priv);
-
- if (rpm_resume)
- intel_init_clock_gating(dev_priv);
-
- return ret;
-}
-
static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- DRM_DEBUG_KMS("Suspending device\n");
+ drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
disable_rpm_wakeref_asserts(rpm);
@@ -2517,11 +1594,10 @@ static int intel_runtime_suspend(struct device *kdev)
intel_display_power_suspend(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_suspend_complete(dev_priv);
-
+ ret = vlv_suspend_complete(dev_priv);
if (ret) {
- DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Runtime suspend failed, disabling it (%d)\n", ret);
intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2539,7 +1615,8 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_driver_release(rpm);
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
- DRM_ERROR("Unclaimed access detected prior to suspending\n");
+ drm_err(&dev_priv->drm,
+ "Unclaimed access detected prior to suspending\n");
rpm->suspended = true;
@@ -2571,7 +1648,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
- DRM_DEBUG_KMS("Device suspended\n");
+ drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
return 0;
}
@@ -2579,25 +1656,25 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- DRM_DEBUG_KMS("Resuming device\n");
+ drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
- WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
+ drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
disable_rpm_wakeref_asserts(rpm);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
- DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
+ drm_dbg(&dev_priv->drm,
+ "Unclaimed access during suspend, bios?\n");
intel_display_power_resume(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_resume_prepare(dev_priv, true);
+ ret = vlv_resume_prepare(dev_priv, true);
intel_uncore_runtime_resume(&dev_priv->uncore);
@@ -2623,9 +1700,10 @@ static int intel_runtime_resume(struct device *kdev)
enable_rpm_wakeref_asserts(rpm);
if (ret)
- DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Runtime resume failed, disabling it (%d)\n", ret);
else
- DRM_DEBUG_KMS("Device resumed\n");
+ drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
return ret;
}
@@ -2673,12 +1751,12 @@ const struct dev_pm_ops i915_pm_ops = {
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .release = drm_release,
+ .release = drm_release_noglobal,
.unlocked_ioctl = drm_ioctl,
.mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
- .compat_ioctl = i915_compat_ioctl,
+ .compat_ioctl = i915_ioc32_compat_ioctl,
.llseek = noop_llseek,
};
@@ -2770,9 +1848,6 @@ static struct drm_driver driver = {
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = i915_get_crtc_scanoutpos,
-
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 810e3ccd56ec..1f5b9a584f71 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,7 +59,6 @@
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
-#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -70,6 +69,7 @@
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
+#include "display/intel_global_state.h"
#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
@@ -104,18 +104,23 @@
#include "intel_region_lmem.h"
-#include "intel_gvt.h"
-
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200114"
-#define DRIVER_TIMESTAMP 1579001978
+#define DRIVER_DATE "20200313"
+#define DRIVER_TIMESTAMP 1584144591
struct drm_i915_gem_object;
+/*
+ * The code assumes that the hpd_pins below have consecutive values and
+ * starting with HPD_PORT_A, the HPD pin associated with any port can be
+ * retrieved by adding the corresponding port (or phy) enum value to
+ * HPD_PORT_A in most cases. For example:
+ * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
+ */
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -203,9 +208,7 @@ struct drm_i915_file_private {
} mm;
struct xarray context_xa;
-
- struct idr vm_idr;
- struct mutex vm_idr_lock; /* guards vm_idr */
+ struct xarray vm_xa;
unsigned int bsd_engine;
@@ -255,18 +258,19 @@ struct sdvo_device_mapping {
struct intel_connector;
struct intel_encoder;
struct intel_atomic_state;
-struct intel_crtc_state;
+struct intel_cdclk_config;
+struct intel_cdclk_state;
+struct intel_cdclk_vals;
struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
-struct intel_cdclk_state;
struct drm_i915_display_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state);
+ struct intel_cdclk_config *cdclk_config);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
@@ -280,7 +284,7 @@ struct drm_i915_display_funcs {
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
- int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
u8 (*calc_voltage_level)(int cdclk);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -504,7 +508,7 @@ struct i915_psr {
u16 su_x_granularity;
bool dc3co_enabled;
u32 dc3co_exit_delay;
- struct delayed_work idle_work;
+ struct delayed_work dc3co_work;
bool force_mode_changed;
};
@@ -732,19 +736,10 @@ enum intel_ddb_partitioning {
INTEL_DDB_PART_5_6, /* IVB+ */
};
-struct intel_wm_level {
- bool enable;
- u32 pri_val;
- u32 spr_val;
- u32 cur_val;
- u32 fbc_val;
-};
-
struct ilk_wm_values {
u32 wm_pipe[3];
u32 wm_lp[3];
u32 wm_lp_spr[3];
- u32 wm_linetime[3];
bool enable_fbc_wm;
enum intel_ddb_partitioning partitioning;
};
@@ -799,65 +794,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-struct skl_ddb_allocation {
- u8 enabled_slices; /* GEN11 has configurable 2 slices */
-};
-
-struct skl_ddb_values {
- unsigned dirty_pipes;
- struct skl_ddb_allocation ddb;
-};
-
-struct skl_wm_level {
- u16 min_ddb_alloc;
- u16 plane_res_b;
- u8 plane_res_l;
- bool plane_en;
- bool ignore_lines;
-};
-
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
- bool x_tiled, y_tiled;
- bool rc_surface;
- bool is_planar;
- u32 width;
- u8 cpp;
- u32 plane_pixel_rate;
- u32 y_min_scanlines;
- u32 plane_bytes_per_line;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t y_tile_minimum;
- u32 linetime_us;
- u32 dbuf_block_size;
-};
-
-enum intel_pipe_crc_source {
- INTEL_PIPE_CRC_SOURCE_NONE,
- INTEL_PIPE_CRC_SOURCE_PLANE1,
- INTEL_PIPE_CRC_SOURCE_PLANE2,
- INTEL_PIPE_CRC_SOURCE_PLANE3,
- INTEL_PIPE_CRC_SOURCE_PLANE4,
- INTEL_PIPE_CRC_SOURCE_PLANE5,
- INTEL_PIPE_CRC_SOURCE_PLANE6,
- INTEL_PIPE_CRC_SOURCE_PLANE7,
- INTEL_PIPE_CRC_SOURCE_PIPE,
- /* TV/DP on pre-gen5/vlv can't use the pipe source. */
- INTEL_PIPE_CRC_SOURCE_TV,
- INTEL_PIPE_CRC_SOURCE_DP_B,
- INTEL_PIPE_CRC_SOURCE_DP_C,
- INTEL_PIPE_CRC_SOURCE_DP_D,
- INTEL_PIPE_CRC_SOURCE_AUTO,
- INTEL_PIPE_CRC_SOURCE_MAX,
-};
-
-#define INTEL_PIPE_CRC_ENTRIES_NR 128
-struct intel_pipe_crc {
- spinlock_t lock;
- int skipped;
- enum intel_pipe_crc_source source;
-};
-
struct i915_frontbuffer_tracking {
spinlock_t lock;
@@ -875,14 +811,7 @@ struct i915_virtual_gpu {
u32 caps;
};
-/* used in computing the new watermarks state */
-struct intel_wm_config {
- unsigned int num_pipes_active;
- bool sprites_enabled;
- bool sprites_scaled;
-};
-
-struct intel_cdclk_state {
+struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
};
@@ -1002,33 +931,18 @@ struct drm_i915_private {
unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
- unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int fdi_pll_freq;
unsigned int czclk_freq;
- /*
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
- */
struct {
- /*
- * The current logical cdclk state.
- * See intel_atomic_state.cdclk.logical
- */
- struct intel_cdclk_state logical;
- /*
- * The current actual cdclk state.
- * See intel_atomic_state.cdclk.actual
- */
- struct intel_cdclk_state actual;
- /* The current hardware cdclk state */
- struct intel_cdclk_state hw;
+ /* The current hardware cdclk configuration */
+ struct intel_cdclk_config hw;
/* cdclk, divider, and ratio table from bspec */
const struct intel_cdclk_vals *table;
- int force_min_cdclk;
+ struct intel_global_obj obj;
} cdclk;
/**
@@ -1068,31 +982,32 @@ struct drm_i915_private {
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
-#ifdef CONFIG_DEBUG_FS
- struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
-#endif
+ /**
+ * dpll and cdclk state is protected by connection_mutex
+ * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
+ * Must be global rather than per dpll, because on some platforms plls
+ * share registers.
+ */
+ struct {
+ struct mutex lock;
- /* dpll and cdclk state is protected by connection_mutex */
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- const struct intel_dpll_mgr *dpll_mgr;
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
- /*
- * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
- * Must be global rather than per dpll, because on some platforms
- * plls share registers.
- */
- struct mutex dpll_lock;
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+ } dpll;
+
+ struct list_head global_obj_list;
/*
- * For reading active_pipes, min_cdclk, min_voltage_level holding
- * any crtc lock is sufficient, for writing must hold all of them.
+ * For reading active_pipes holding any crtc lock is
+ * sufficient, for writing must hold all of them.
*/
u8 active_pipes;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@@ -1105,8 +1020,6 @@ struct drm_i915_private {
struct work_struct free_work;
} atomic_helper;
- u16 orig_clock;
-
bool mchbar_need_disable;
struct intel_l3_parity l3_parity;
@@ -1191,7 +1104,6 @@ struct drm_i915_private {
/* current hardware state */
union {
struct ilk_wm_values hw;
- struct skl_ddb_values skl_hw;
struct vlv_wm_values vlv;
struct g4x_wm_values g4x;
};
@@ -1213,6 +1125,8 @@ struct drm_i915_private {
bool distrust_bios_wm;
} wm;
+ u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
+
struct dram_info {
bool valid;
bool is_16gb_dimm;
@@ -1236,7 +1150,7 @@ struct drm_i915_private {
u8 num_planes;
} max_bw[6];
- struct drm_private_obj bw_obj;
+ struct intel_global_obj bw_obj;
struct intel_runtime_pm runtime_pm;
@@ -1300,16 +1214,6 @@ struct drm_i915_private {
*/
};
-struct dram_dimm_info {
- u8 size, width, ranks;
-};
-
-struct dram_channel_info {
- struct dram_dimm_info dimm_l, dimm_s;
- u8 ranks;
- bool is_16gb_dimm;
-};
-
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return container_of(dev, struct drm_i915_private, drm);
@@ -1580,6 +1484,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
+#define GLK_REVID_A2 0x2
+#define GLK_REVID_B0 0x3
#define IS_GLK_REVID(dev_priv, since, until) \
(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
@@ -1718,10 +1624,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* Having GuC is not the same as using GuC */
-#define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc)
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
-
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
@@ -1767,11 +1669,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
/* i915_drv.c */
-#ifdef CONFIG_COMPAT
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
-#define i915_compat_ioctl NULL
-#endif
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -1780,18 +1677,6 @@ void i915_driver_remove(struct drm_i915_private *i915);
int i915_resume_switcheroo(struct drm_i915_private *i915);
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gvt;
-}
-
-static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.active;
-}
-
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1856,12 +1741,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __must_check
-i915_mutex_lock_interruptible(struct drm_device *dev)
-{
- return mutex_lock_interruptible(&dev->struct_mutex);
-}
-
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2010,20 +1889,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-/* register wait wrappers for display regs */
-#define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \
- intel_wait_for_register(&(dev_priv_)->uncore, \
- (reg_), (mask_), (value_), (timeout_))
-
-#define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({ \
- u32 mask__ = (mask_); \
- intel_de_wait_for_register((dev_priv_), (reg_), \
- mask__, mask__, (timeout_)); \
-})
-
-#define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \
- intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_))
-
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2046,10 +1911,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
-{
- return intel_guc_is_submission_supported(guc) &&
- intel_guc_is_running(guc);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5f6e63952821..ca5420012a22 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_vma_manager.h>
-#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
@@ -941,9 +940,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if (i915_gem_object_never_bind_ggtt(obj))
- return ERR_PTR(-ENODEV);
-
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/*
@@ -1009,6 +1005,12 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
if (ret)
return ERR_PTR(ret);
+ ret = i915_vma_wait_for_bind(vma);
+ if (ret) {
+ i915_vma_unpin(vma);
+ return ERR_PTR(ret);
+ }
+
return vma;
}
@@ -1153,7 +1155,7 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
- i915_gem_restore_gtt_mappings(dev_priv);
+ i915_ggtt_resume(&dev_priv->ggtt);
i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
}
@@ -1201,7 +1203,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
+ drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
@@ -1229,7 +1231,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
- WARN_ON(dev_priv->mm.shrink_count);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -1269,7 +1271,8 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+ drm_WARN_ON(&i915->drm,
+ i915_gem_object_set_to_cpu_domain(obj, true));
i915_gem_object_unlock(obj);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 0697bedebeef..4518b9b35c3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,8 +26,6 @@
*
*/
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_requests.h"
@@ -292,7 +290,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
- /* If we are using coloring to insert guard pages between
+ /*
+ * If we are using coloring to insert guard pages between
* different cache domains within the address space, we have
* to check whether the objects on either side of our range
* abutt and conflict. If they are in conflict, then we evict
@@ -309,22 +308,18 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
}
}
- if (flags & PIN_NONBLOCK &&
- (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
+ if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
break;
}
- /* Overlap of objects in the same batch? */
- if (i915_vma_is_pinned(vma)) {
+ if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
ret = -ENOSPC;
- if (vma->exec_flags &&
- *vma->exec_flags & EXEC_OBJECT_PINNED)
- ret = -EINVAL;
break;
}
- /* Never show fear in the face of dragons!
+ /*
+ * Never show fear in the face of dragons!
*
* We cannot directly remove this node from within this
* iterator and as with i915_gem_evict_something() we employ
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d9c34a23cd67..d152b648c73c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,10 +21,9 @@
* IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
@@ -237,11 +236,12 @@ static int fence_update(struct i915_fence_reg *fence,
if (!i915_vma_is_map_and_fenceable(vma))
return -EINVAL;
- if (WARN(!i915_gem_object_get_stride(vma->obj) ||
- !i915_gem_object_get_tiling(vma->obj),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- i915_gem_object_get_stride(vma->obj),
- i915_gem_object_get_tiling(vma->obj)))
+ if (drm_WARN(&uncore->i915->drm,
+ !i915_gem_object_get_stride(vma->obj) ||
+ !i915_gem_object_get_tiling(vma->obj),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ i915_gem_object_get_stride(vma->obj),
+ i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
ret = i915_vma_sync(vma);
@@ -713,7 +713,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
}
if (dcc == 0xffffffff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
+ drm_err(&i915->drm, "Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e039eb56900f..cb43381b0d37 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -15,8 +15,6 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
@@ -63,7 +61,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
/* XXX This does not prevent more requests being submitted! */
if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
-MAX_SCHEDULE_TIMEOUT)) {
- DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
+ drm_err(&dev_priv->drm,
+ "Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9e401a5fcae8..2a4cd0ba5464 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -37,6 +37,7 @@
#include <drm/drm_print.h>
#include "display/intel_atomic.h"
+#include "display/intel_csr.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -47,7 +48,6 @@
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
#include "i915_scatterlist.h"
-#include "intel_csr.h"
#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
@@ -450,6 +450,14 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
+
+ if (INTEL_GEN(m->i915) < 12)
+ return;
+
+ err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
+ ee->instdone.slice_common_extra[0]);
+ err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
+ ee->instdone.slice_common_extra[1]);
}
static void error_print_request(struct drm_i915_error_state_buf *m,
@@ -473,9 +481,13 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+ const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+
+ err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
- ctx->guilty, ctx->active);
+ ctx->guilty, ctx->active,
+ ctx->total_runtime * period,
+ mul_u32_u32(ctx->avg_runtime, period));
}
static struct i915_vma_coredump *
@@ -515,6 +527,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
(u32)(ee->acthd>>32), (u32)ee->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
+ err_printf(m, " ESR: 0x%08x\n", ee->esr);
error_print_instdone(m, ee);
@@ -1102,6 +1115,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
}
if (INTEL_GEN(i915) >= 4) {
+ ee->esr = ENGINE_READ(engine, RING_ESR);
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
@@ -1228,7 +1242,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
{
struct i915_gem_context *ctx;
struct task_struct *task;
- bool capture;
+ bool simulated;
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
@@ -1236,7 +1250,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
ctx = NULL;
rcu_read_unlock();
if (!ctx)
- return false;
+ return true;
rcu_read_lock();
task = pid_task(ctx->pid, PIDTYPE_PID);
@@ -1250,10 +1264,13 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
- capture = i915_gem_context_no_error_capture(ctx);
+ e->total_runtime = rq->context->runtime.total;
+ e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
+
+ simulated = i915_gem_context_no_error_capture(ctx);
i915_gem_context_put(ctx);
- return capture;
+ return simulated;
}
struct intel_engine_capture_vma {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index e4a6afed3bbf..0d1f6c8ff355 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -75,6 +75,7 @@ struct intel_engine_coredump {
u32 hws;
u32 ipeir;
u32 ipehr;
+ u32 esr;
u32 bbstate;
u32 instpm;
u32 instps;
@@ -87,6 +88,10 @@ struct intel_engine_coredump {
struct i915_gem_context_coredump {
char comm[TASK_COMM_LEN];
+
+ u64 total_runtime;
+ u32 avg_runtime;
+
pid_t pid;
int active;
int guilty;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index c1007245f46d..8e45ca3d2ede 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,9 +28,10 @@
*/
#include <linux/compat.h>
-#include <drm/i915_drm.h>
#include <drm/drm_ioctl.h>
+
#include "i915_drv.h"
+#include "i915_ioc32.h"
struct drm_i915_getparam32 {
s32 param;
@@ -67,7 +68,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
};
/**
- * i915_compat_ioctl - handle the mistakes of the past
+ * i915_ioc32_compat_ioctl - handle the mistakes of the past
* @filp: the file pointer
* @cmd: the ioctl command (and encoded flags)
* @arg: the ioctl argument (from userspace)
@@ -75,7 +76,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*/
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.h b/drivers/gpu/drm/i915/i915_ioc32.h
new file mode 100644
index 000000000000..40dcd55ca213
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_IOC32_H__
+#define __I915_IOC32_H__
+
+#ifdef CONFIG_COMPAT
+struct file;
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#else
+#define i915_ioc32_compat_ioctl NULL
+#endif
+
+#endif /* __I915_IOC32_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afc6aad9bf8c..9f0653cf0510 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,7 +34,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_irq.h>
-#include <drm/i915_drm.h>
#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
@@ -79,7 +78,7 @@ static const u32 hpd_ibx[HPD_NUM_PINS] = {
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
- [HPD_PORT_D] = SDE_PORTD_HOTPLUG
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
};
static const u32 hpd_cpt[HPD_NUM_PINS] = {
@@ -87,7 +86,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
- [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
};
static const u32 hpd_spt[HPD_NUM_PINS] = {
@@ -95,7 +94,7 @@ static const u32 hpd_spt[HPD_NUM_PINS] = {
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
- [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+ [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
};
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
@@ -104,7 +103,7 @@ static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
};
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
@@ -113,7 +112,7 @@ static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
};
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
@@ -122,21 +121,21 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
};
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
- [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
+ [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
};
static const u32 hpd_gen11[HPD_NUM_PINS] = {
[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
- [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+ [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
};
static const u32 hpd_gen12[HPD_NUM_PINS] = {
@@ -145,7 +144,7 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = {
[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
- [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
+ [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
};
static const u32 hpd_icp[HPD_NUM_PINS] = {
@@ -169,6 +168,14 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
+static void
+intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_handle_vblank(&crtc->base);
+}
+
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
@@ -208,8 +215,9 @@ static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
if (val == 0)
return;
- WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(reg), val);
+ drm_WARN(&uncore->i915->drm, 1,
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(reg), val);
intel_uncore_write(uncore, reg, 0xffffffff);
intel_uncore_posting_read(uncore, reg);
intel_uncore_write(uncore, reg, 0xffffffff);
@@ -223,8 +231,9 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
if (val == 0)
return;
- WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(GEN2_IIR), val);
+ drm_WARN(&uncore->i915->drm, 1,
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(GEN2_IIR), val);
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
intel_uncore_posting_read16(uncore, GEN2_IIR);
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
@@ -262,7 +271,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
u32 val;
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(bits & ~mask);
+ drm_WARN_ON(&dev_priv->drm, bits & ~mask);
val = I915_READ(PORT_HOTPLUG_EN);
val &= ~mask;
@@ -305,9 +314,9 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->irq_mask;
@@ -336,9 +345,9 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
old_val = I915_READ(GEN8_DE_PORT_IMR);
@@ -369,9 +378,9 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->de_irq_mask[pipe];
@@ -399,11 +408,11 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
I915_WRITE(SDEIMR, sdeimr);
@@ -425,13 +434,15 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
* On pipe A we don't support the PSR interrupt yet,
* on pipe B and C the same bit MBZ.
*/
- if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ status_mask & PIPE_A_PSR_STATUS_VLV))
return 0;
/*
* On pipe B and C we don't support the PSR interrupt yet, on pipe
* A the same bit is for perf counters which we don't use either.
*/
- if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ status_mask & PIPE_B_PSR_STATUS_VLV))
return 0;
enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
@@ -443,10 +454,11 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
out:
- WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
- pipe_name(pipe), enable_mask, status_mask);
+ drm_WARN_ONCE(&dev_priv->drm,
+ enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask);
return enable_mask;
}
@@ -457,12 +469,12 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: status_mask=0x%x\n",
- pipe_name(pipe), status_mask);
+ drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
@@ -480,12 +492,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: status_mask=0x%x\n",
- pipe_name(pipe), status_mask);
+ drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
return;
@@ -624,9 +636,9 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc)
* register.
*/
do {
- high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
- low = I915_READ_FW(low_frame);
- high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
+ high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = intel_de_read_fw(dev_priv, low_frame);
+ high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -683,15 +695,17 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
* pipe frame time stamp. The time stamp value
* is sampled at every start of vertical blank.
*/
- scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+ scan_prev_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
/*
* The TIMESTAMP_CTR register has the current
* time stamp value.
*/
- scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
+ scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
- scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+ scan_post_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
@@ -702,7 +716,10 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
return scanline;
}
-/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
+/*
+ * intel_de_read_fw(), only for fast reads of display block, no need for
+ * forcewake etc.
+ */
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -726,9 +743,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vtotal /= 2;
if (IS_GEN(dev_priv, 2))
- position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
- position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
/*
* On HSW, the DSL reg (0x70000) appears to return 0 if we
@@ -747,7 +764,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
for (i = 0; i < 100; i++) {
udelay(1);
- temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+ temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
if (temp != position) {
position = temp;
break;
@@ -762,13 +779,15 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *dev = _crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
@@ -777,9 +796,10 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
- if (WARN_ON(!mode->crtc_clock)) {
- DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
- "pipe %c\n", pipe_name(pipe));
+ if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
+ drm_dbg(&dev_priv->drm,
+ "trying to get scanoutpos for disabled "
+ "pipe %c\n", pipe_name(pipe));
return false;
}
@@ -818,7 +838,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
* We can split this into vertical and horizontal
* scanout position.
*/
- position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+ position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
/* convert to pixel counts */
vbl_start *= htotal;
@@ -879,6 +899,14 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
return true;
}
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ i915_get_crtc_scanoutpos);
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -918,7 +946,7 @@ static void ivb_parity_work(struct work_struct *work)
mutex_lock(&dev_priv->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
- if (WARN_ON(!dev_priv->l3_parity.which_slice))
+ if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
goto out;
misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -929,7 +957,8 @@ static void ivb_parity_work(struct work_struct *work)
i915_reg_t reg;
slice--;
- if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ slice >= NUM_L3_SLICES(dev_priv)))
break;
dev_priv->l3_parity.which_slice &= ~(1<<slice);
@@ -966,7 +995,7 @@ static void ivb_parity_work(struct work_struct *work)
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
out:
- WARN_ON(dev_priv->l3_parity.which_slice);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
spin_lock_irq(&gt->irq_lock);
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&gt->irq_lock);
@@ -1165,8 +1194,9 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
*long_mask |= BIT(pin);
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+ drm_dbg(&dev_priv->drm,
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
@@ -1187,8 +1217,8 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
trace_intel_pipe_crc(crtc, crcs);
@@ -1351,7 +1381,7 @@ static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1369,7 +1399,7 @@ static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1393,7 +1423,7 @@ static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1419,7 +1449,7 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1463,9 +1493,9 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
}
- WARN_ONCE(1,
- "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- I915_READ(PORT_HOTPLUG_STAT));
+ drm_WARN_ONCE(&dev_priv->drm, 1,
+ "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+ I915_READ(PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -1603,7 +1633,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 master_ctl, iir;
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
- u32 gt_iir[4];
u32 ier = 0;
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
@@ -1631,7 +1660,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
- gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1655,8 +1684,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
-
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1710,8 +1737,8 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
- port_name(port));
+ drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ port_name(port));
}
if (pch_iir & SDE_AUX_MASK)
@@ -1721,25 +1748,27 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- DRM_ERROR("PCH poison interrupt\n");
+ drm_err(&dev_priv->drm, "PCH poison interrupt\n");
- if (pch_iir & SDE_FDI_MASK)
+ if (pch_iir & SDE_FDI_MASK) {
for_each_pipe(dev_priv, pipe)
- DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
- pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+ drm_dbg(&dev_priv->drm,
+ "PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
@@ -1754,7 +1783,7 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- DRM_ERROR("Poison interrupt\n");
+ drm_err(&dev_priv->drm, "Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
@@ -1777,7 +1806,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- DRM_ERROR("PCH poison interrupt\n");
+ drm_err(&dev_priv->drm, "PCH poison interrupt\n");
for_each_pipe(dev_priv, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
@@ -1796,8 +1825,8 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
- port_name(port));
+ drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ port_name(port));
}
if (pch_iir & SDE_AUX_MASK_CPT)
@@ -1807,16 +1836,17 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+ drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+ drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
- if (pch_iir & SDE_FDI_MASK_CPT)
+ if (pch_iir & SDE_FDI_MASK_CPT) {
for_each_pipe(dev_priv, pipe)
- DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
- pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & SDE_ERROR_CPT)
cpt_serr_int_handler(dev_priv);
@@ -1844,8 +1874,9 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
pins = hpd_icp;
} else {
- WARN(!HAS_PCH_ICP(dev_priv),
- "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
+ drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
+ "Unrecognized PCH type 0x%x\n",
+ INTEL_PCH_TYPE(dev_priv));
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
@@ -1952,11 +1983,11 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(dev_priv);
if (de_iir & DE_POISON)
- DRM_ERROR("Poison interrupt\n");
+ drm_err(&dev_priv->drm, "Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2009,7 +2040,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
}
/* check event from PCH */
@@ -2153,7 +2184,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (pin_mask)
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
else
- DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+ drm_err(&dev_priv->drm,
+ "Unexpected DE HPD interrupt 0x%08x\n", iir);
}
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
@@ -2226,7 +2258,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (!found)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
+ drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
}
static irqreturn_t
@@ -2243,7 +2275,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
- DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE MISC)!\n");
}
}
@@ -2254,7 +2287,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
gen11_hpd_irq_handler(dev_priv, iir);
} else {
- DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -2294,10 +2328,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (!found)
- DRM_ERROR("Unexpected DE Port interrupt\n");
+ drm_err(&dev_priv->drm,
+ "Unexpected DE Port interrupt\n");
}
else
- DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(dev_priv, pipe) {
@@ -2308,7 +2344,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE PIPE)!\n");
continue;
}
@@ -2316,7 +2353,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
@@ -2326,9 +2363,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
- pipe_name(pipe),
- fault_errors);
+ drm_err(&dev_priv->drm,
+ "Fault errors on pipe %c: 0x%08x\n",
+ pipe_name(pipe),
+ fault_errors);
}
if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
@@ -2354,7 +2392,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
+ drm_dbg(&dev_priv->drm,
+ "The master control interrupt lied (SDE)!\n");
}
}
@@ -2384,7 +2423,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = arg;
void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
- u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -2395,8 +2433,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
- /* Find, clear, then process each source of interrupt */
- gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
+ /* Find, queue (onto bottom-halves), then clear each source */
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@@ -2407,8 +2445,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
-
return IRQ_HANDLED;
}
@@ -2491,7 +2527,7 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
return IRQ_NONE;
}
- /* Find, clear, then process each source of interrupt. */
+ /* Find, queue (onto bottom-halves), then clear each source */
gen11_gt_irq_handler(gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
@@ -2686,7 +2722,7 @@ static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
if (HAS_PCH_NOP(dev_priv))
return;
- WARN_ON(I915_READ(SDEIER) != 0);
+ drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
}
@@ -2733,7 +2769,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- WARN_ON(dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
@@ -3163,8 +3199,9 @@ static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE;
- DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
- hotplug, enabled_irqs);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invert bit setting: hp_ctl:%x hp_port:%x\n",
+ hotplug, enabled_irqs);
hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
/*
@@ -3418,7 +3455,7 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
u32 mask = SDE_GMBUS_ICP;
- WARN_ON(I915_READ(SDEIER) != 0);
+ drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
@@ -3547,7 +3584,8 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
if (eir_stuck)
- DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+ drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
+ eir_stuck);
}
static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
@@ -3584,7 +3622,8 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
if (eir_stuck)
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+ drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
+ eir_stuck);
}
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 812c47a9c2d6..25f25cd95818 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -101,10 +101,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq);
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1dd1f3652795..add00ec1f787 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,7 +35,7 @@
MODULE_PARM_DESC(name, desc)
struct i915_params i915_modparams __read_mostly = {
-#define MEMBER(T, member, value) .member = (value),
+#define MEMBER(T, member, value, ...) .member = (value),
I915_PARAMS_FOR_EACH(MEMBER)
#undef MEMBER
};
@@ -92,9 +92,6 @@ i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe the driver for specified devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
-i915_param_named_unsafe(alpha_support, bool, 0400,
- "Deprecated. See i915.force_probe.");
-
i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
@@ -106,10 +103,6 @@ i915_param_named(fastboot, int, 0600,
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
-i915_param_named_unsafe(prefault_disable, bool, 0600,
- "Disable page prefaulting for pread/pwrite/reloc (default:false). "
- "For developers only.");
-
i915_param_named_unsafe(load_detect_test, bool, 0600,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
@@ -172,7 +165,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, int, 0600,
"Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)");
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 31b88f297fbc..45323732f099 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -36,49 +36,49 @@ struct drm_printer;
/*
* Invoke param, a function-like macro, for each i915 param, with arguments:
*
- * param(type, name, value)
+ * param(type, name, value, mode)
*
- * type: parameter type, one of {bool, int, unsigned int, char *}
+ * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *}
* name: name of the parameter
* value: initial/default value of the parameter
+ * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create
+ * debugfs file
*/
#define I915_PARAMS_FOR_EACH(param) \
- param(char *, vbt_firmware, NULL) \
- param(int, modeset, -1) \
- param(int, lvds_channel_mode, 0) \
- param(int, panel_use_ssc, -1) \
- param(int, vbt_sdvo_panel_type, -1) \
- param(int, enable_dc, -1) \
- param(int, enable_fbc, -1) \
- param(int, enable_psr, -1) \
- param(int, disable_power_well, -1) \
- param(int, enable_ips, 1) \
- param(int, invert_brightness, 0) \
- param(int, enable_guc, 0) \
- param(int, guc_log_level, -1) \
- param(char *, guc_firmware_path, NULL) \
- param(char *, huc_firmware_path, NULL) \
- param(char *, dmc_firmware_path, NULL) \
- param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
- param(int, edp_vswing, 0) \
- param(int, reset, 3) \
- param(unsigned int, inject_probe_failure, 0) \
- param(int, fastboot, -1) \
- param(int, enable_dpcd_backlight, 0) \
- param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
- param(unsigned long, fake_lmem_start, 0) \
+ param(char *, vbt_firmware, NULL, 0400) \
+ param(int, modeset, -1, 0400) \
+ param(int, lvds_channel_mode, 0, 0400) \
+ param(int, panel_use_ssc, -1, 0600) \
+ param(int, vbt_sdvo_panel_type, -1, 0400) \
+ param(int, enable_dc, -1, 0400) \
+ param(int, enable_fbc, -1, 0600) \
+ param(int, enable_psr, -1, 0600) \
+ param(int, disable_power_well, -1, 0400) \
+ param(int, enable_ips, 1, 0600) \
+ param(int, invert_brightness, 0, 0600) \
+ param(int, enable_guc, 0, 0400) \
+ param(int, guc_log_level, -1, 0400) \
+ param(char *, guc_firmware_path, NULL, 0400) \
+ param(char *, huc_firmware_path, NULL, 0400) \
+ param(char *, dmc_firmware_path, NULL, 0400) \
+ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
+ param(int, edp_vswing, 0, 0400) \
+ param(unsigned int, reset, 3, 0600) \
+ param(unsigned int, inject_probe_failure, 0, 0600) \
+ param(int, fastboot, -1, 0600) \
+ param(int, enable_dpcd_backlight, -1, 0600) \
+ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
+ param(unsigned long, fake_lmem_start, 0, 0400) \
/* leave bools at the end to not create holes */ \
- param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
- param(bool, enable_hangcheck, true) \
- param(bool, prefault_disable, false) \
- param(bool, load_detect_test, false) \
- param(bool, force_reset_modeset_test, false) \
- param(bool, error_capture, true) \
- param(bool, disable_display, false) \
- param(bool, verbose_state_checks, true) \
- param(bool, nuclear_pageflip, false) \
- param(bool, enable_dp_mst, true) \
- param(bool, enable_gvt, false)
+ param(bool, enable_hangcheck, true, 0600) \
+ param(bool, load_detect_test, false, 0600) \
+ param(bool, force_reset_modeset_test, false, 0600) \
+ param(bool, error_capture, true, 0600) \
+ param(bool, disable_display, false, 0400) \
+ param(bool, verbose_state_checks, true, 0) \
+ param(bool, nuclear_pageflip, false, 0400) \
+ param(bool, enable_dp_mst, true, 0600) \
+ param(bool, enable_gvt, false, 0400)
#define MEMBER(T, member, ...) T member;
struct i915_params {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f631f6d21127..2c80a0194c80 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
+#include <drm/i915_pciids.h>
#include "display/intel_fbdev.h"
@@ -615,7 +616,8 @@ static const struct intel_device_info chv_info = {
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
.display.has_ipc = 1, \
- .ddb_size = 896
+ .ddb_size = 896, \
+ .num_supported_dbuf_slices = 1
#define SKL_PLATFORM \
GEN9_FEATURES, \
@@ -650,6 +652,7 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
+ .num_supported_dbuf_slices = 1, \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
@@ -774,6 +777,7 @@ static const struct intel_device_info cnl_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
+ .num_supported_dbuf_slices = 2, \
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
@@ -819,11 +823,9 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .require_force_probe = 1,
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
- .has_rps = false, /* XXX disabled for debugging */
};
#define GEN12_DGFX_FEATURES \
@@ -928,13 +930,6 @@ static bool force_probe(u16 device_id, const char *devices)
char *s, *p, *tok;
bool ret;
- /* FIXME: transitional */
- if (i915_modparams.alpha_support) {
- DRM_INFO("i915.alpha_support is deprecated, use i915.force_probe=%04x instead\n",
- device_id);
- return true;
- }
-
if (!devices || !*devices)
return false;
@@ -968,7 +963,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (intel_info->require_force_probe &&
!force_probe(pdev->device, i915_modparams.force_probe)) {
- DRM_INFO("Your graphics device %04x is not properly supported by the driver in this\n"
+ dev_info(&pdev->dev,
+ "Your graphics device %04x is not properly supported by the driver in this\n"
"kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
"module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
"or (recommended) check for kernel updates.\n",
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3b6b913bd27a..551be589d6f4 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -555,8 +555,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aging_tail = hw_tail;
stream->oa_buffer.aging_timestamp = now;
} else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
- hw_tail);
+ drm_err(&stream->perf->i915->drm,
+ "Ignoring spurious out of range OA buffer tail pointer = %x\n",
+ hw_tail);
}
}
@@ -686,7 +687,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
u32 taken;
int ret = 0;
- if (WARN_ON(!stream->enabled))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
return -EIO;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -718,10 +719,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* only be incremented by multiples of the report size (notably also
* all a power of two).
*/
- if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
- tail > OA_BUFFER_SIZE || tail % report_size,
- "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
- head, tail))
+ if (drm_WARN_ONCE(&uncore->i915->drm,
+ head > OA_BUFFER_SIZE || head % report_size ||
+ tail > OA_BUFFER_SIZE || tail % report_size,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
return -EIO;
@@ -742,8 +744,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* here would imply a driver bug that would result
* in an overrun.
*/
- if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ if (drm_WARN_ON(&uncore->i915->drm,
+ (OA_BUFFER_SIZE - head) < report_size)) {
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -896,7 +900,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
i915_reg_t oastatus_reg;
int ret;
- if (WARN_ON(!stream->oa_buffer.vaddr))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
@@ -986,7 +990,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
u32 taken;
int ret = 0;
- if (WARN_ON(!stream->enabled))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
return -EIO;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -1015,10 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* only be incremented by multiples of the report size (notably also
* all a power of two).
*/
- if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
- tail > OA_BUFFER_SIZE || tail % report_size,
- "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
- head, tail))
+ if (drm_WARN_ONCE(&uncore->i915->drm,
+ head > OA_BUFFER_SIZE || head % report_size ||
+ tail > OA_BUFFER_SIZE || tail % report_size,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
return -EIO;
@@ -1036,8 +1041,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* here would imply a driver bug that would result
* in an overrun.
*/
- if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ if (drm_WARN_ON(&uncore->i915->drm,
+ (OA_BUFFER_SIZE - head) < report_size)) {
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -1110,7 +1117,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
u32 oastatus1;
int ret;
- if (WARN_ON(!stream->oa_buffer.vaddr))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
@@ -1319,7 +1326,13 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 12: {
stream->specific_ctx_id_mask =
((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
- stream->specific_ctx_id = stream->specific_ctx_id_mask;
+ /*
+ * Pick an unused context id
+ * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts
+ * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
+ */
+ stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+ BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG);
break;
}
@@ -1327,11 +1340,12 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
MISSING_CASE(INTEL_GEN(ce->engine->i915));
}
- ce->tag = stream->specific_ctx_id_mask;
+ ce->tag = stream->specific_ctx_id;
- DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
- stream->specific_ctx_id,
- stream->specific_ctx_id_mask);
+ drm_dbg(&stream->perf->i915->drm,
+ "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ stream->specific_ctx_id,
+ stream->specific_ctx_id_mask);
return 0;
}
@@ -1391,8 +1405,10 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
+ *
+ * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
*/
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -1575,11 +1591,12 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *i915 = stream->perf->i915;
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
int ret;
- if (WARN_ON(stream->oa_buffer.vma))
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
return -ENODEV;
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
@@ -1587,7 +1604,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
- DRM_ERROR("Failed to allocate OA buffer\n");
+ drm_err(&i915->drm, "Failed to allocate OA buffer\n");
return PTR_ERR(bo);
}
@@ -1669,7 +1686,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
bo = i915_gem_object_create_internal(i915, 4096);
if (IS_ERR(bo)) {
- DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
+ drm_err(&i915->drm,
+ "Failed to allocate NOA wait batchbuffer\n");
return PTR_ERR(bo);
}
@@ -2184,7 +2202,9 @@ static int gen8_modify_self(struct intel_context *ce,
struct i915_request *rq;
int err;
+ intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2653,7 +2673,8 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen8_oa_disable(struct i915_perf_stream *stream)
@@ -2664,7 +2685,8 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen12_oa_disable(struct i915_perf_stream *stream)
@@ -2676,7 +2698,16 @@ static void gen12_oa_disable(struct i915_perf_stream *stream)
GEN12_OAG_OACONTROL,
GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
+
+ intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
+ if (intel_wait_for_register(uncore,
+ GEN12_OA_TLB_INV_CR,
+ 1, 0,
+ 50))
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA tlb invalidate timed out\n");
}
/**
@@ -2740,6 +2771,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props)
{
+ struct drm_i915_private *i915 = stream->perf->i915;
struct i915_perf *perf = stream->perf;
int format_size;
int ret;
@@ -2796,7 +2828,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->sample_size += format_size;
stream->oa_buffer.format_size = format_size;
- if (WARN_ON(stream->oa_buffer.format_size == 0))
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
return -EINVAL;
stream->hold_preemption = props->hold_preemption;
@@ -2849,7 +2881,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
- perf->exclusive_stream = stream;
+ WRITE_ONCE(perf->exclusive_stream, stream);
ret = i915_perf_stream_enable_sync(stream);
if (ret) {
@@ -2869,7 +2901,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -2895,12 +2927,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
{
struct i915_perf_stream *stream;
- /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
-
if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.exclusive_stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+ stream = READ_ONCE(engine->i915->perf.exclusive_stream);
/*
* For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
* is already doing that, so nothing to be done for gen12 here.
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index aa729d04abe2..2c062534eac1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -448,7 +448,7 @@ static void engine_event_destroy(struct perf_event *event)
engine = intel_engine_lookup_user(i915,
engine_event_class(event),
engine_event_instance(event));
- if (WARN_ON_ONCE(!engine))
+ if (drm_WARN_ON_ONCE(&i915->drm, !engine))
return;
if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
@@ -584,7 +584,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
engine_event_class(event),
engine_event_instance(event));
- if (WARN_ON_ONCE(!engine)) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
/* Do nothing */
} else if (sample == I915_SAMPLE_BUSY &&
intel_engine_supports_stats(engine)) {
@@ -1188,7 +1188,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
if (!pmu->base.event_init)
return;
- WARN_ON(pmu->enable);
+ drm_WARN_ON(&i915->drm, pmu->enable);
hrtimer_cancel(&pmu->timer);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index f1d6cad0d7d5..941f0c14037c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -10,7 +10,7 @@
#include <linux/hrtimer.h>
#include <linux/perf_event.h>
#include <linux/spinlock_types.h>
-#include <drm/i915_drm.h>
+#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3575fd30756b..59e64acc2c56 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -693,6 +693,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3)
+#define GEN12_OA_TLB_INV_CR _MMIO(0xceec)
+
/* Gen12 OAR unit */
#define GEN12_OAR_OACONTROL _MMIO(0x2960)
#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
@@ -2626,6 +2628,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IPEIR_I965 _MMIO(0x2064)
#define IPEHR_I965 _MMIO(0x2068)
#define GEN7_SC_INSTDONE _MMIO(0x7100)
+#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
+#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
@@ -2639,6 +2643,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
#define RING_IPEIR(base) _MMIO((base) + 0x64)
#define RING_IPEHR(base) _MMIO((base) + 0x68)
+#define RING_EIR(base) _MMIO((base) + 0xb0)
+#define RING_EMR(base) _MMIO((base) + 0xb4)
+#define RING_ESR(base) _MMIO((base) + 0xb8)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
@@ -2860,6 +2867,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
#define MBUS_ABOX_CTL _MMIO(0x45038)
+#define MBUS_ABOX1_CTL _MMIO(0x45048)
+#define MBUS_ABOX2_CTL _MMIO(0x4504C)
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -3088,7 +3097,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
-#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
+#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
#define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2)
#define GT_RENDER_DEBUG_INTERRUPT (1 << 1)
#define GT_RENDER_USER_INTERRUPT (1 << 0)
@@ -3160,6 +3169,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
+#define GEN12_FF_TESSELATION_DOP_GATE_DISABLE BIT(19)
#define GEN7_FF_TS_SCHED_HS1 (0x5 << 16)
#define GEN7_FF_TS_SCHED_HS0 (0x3 << 16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16)
@@ -3277,6 +3287,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE _MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE (1 << 31)
@@ -3743,8 +3754,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
-#define MCH_SECP_NRG_STTS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c)
-
/* Clocking configuration register */
#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -4124,6 +4133,9 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
+#define TGL_VRH_GATING_DIS REG_BIT(31)
+
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
@@ -4851,16 +4863,6 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON REG_BIT(31)
-
-#define _PP_CONTROL_1 0xc7204
-#define _PP_CONTROL_2 0xc7304
-#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
- _PP_CONTROL_2)
-#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
-#define VDD_OVERRIDE_FORCE REG_BIT(3)
-#define BACKLIGHT_ENABLE REG_BIT(2)
-#define PWR_DOWN_ON_RESET REG_BIT(1)
-#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -4921,6 +4923,7 @@ enum {
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
+#define PFIT_PIPE(pipe) ((pipe) << 29)
#define VERT_INTERP_DISABLE (0 << 10)
#define VERT_INTERP_BILINEAR (1 << 10)
#define VERT_INTERP_MASK (3 << 10)
@@ -5870,7 +5873,6 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
@@ -5879,6 +5881,7 @@ enum {
#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
#define PIPEMISC_DITHER_8_BPC (0 << 5)
#define PIPEMISC_DITHER_10_BPC (1 << 5)
@@ -7745,9 +7748,9 @@ enum {
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define DISP_DATA_PARTITION_5_6 (1 << 6)
#define DISP_IPC_ENABLE (1 << 3)
-#define DBUF_CTL _MMIO(0x45008)
-#define DBUF_CTL_S1 _MMIO(0x45008)
-#define DBUF_CTL_S2 _MMIO(0x44FE8)
+#define _DBUF_CTL_S1 0x45008
+#define _DBUF_CTL_S2 0x44FE8
+#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
#define DBUF_POWER_REQUEST (1 << 31)
#define DBUF_POWER_STATE (1 << 30)
#define GEN7_MSG_CTL _MMIO(0x45010)
@@ -7767,6 +7770,7 @@ enum {
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
+#define CNL_DELAY_PMRSP (1 << 22)
#define MASK_WAKEMEM (1 << 13)
#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
@@ -8988,6 +8992,8 @@ enum {
#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
#define GEN7_PCODE_TIMEOUT 0x2
#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
+#define GEN11_PCODE_LOCKED 0x6
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
@@ -9136,12 +9142,19 @@ enum {
#define THROTTLE_12_5 (7 << 2)
#define DISABLE_EARLY_EOT (1 << 1)
-#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
+#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1 << 0)
#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -9242,6 +9255,10 @@ enum {
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
@@ -10533,13 +10550,13 @@ enum skl_power_gate {
#define D_COMP_COMP_DISABLE (1 << 0)
/* Pipe WM_LINETIME - watermark line time */
-#define _PIPE_WM_LINETIME_A 0x45270
-#define _PIPE_WM_LINETIME_B 0x45274
-#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
-#define PIPE_WM_LINETIME_MASK (0x1ff)
-#define PIPE_WM_LINETIME_TIME(x) ((x))
-#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff << 16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x) << 16)
+#define _WM_LINETIME_A 0x45270
+#define _WM_LINETIME_B 0x45274
+#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
+#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
+#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
+#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
+#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
/* SFUSE_STRAP */
#define SFUSE_STRAP _MMIO(0xc2014)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a18b2a244706..c0df71d7d0ff 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -51,7 +51,6 @@ struct execute_cb {
static struct i915_global_request {
struct i915_global base;
struct kmem_cache *slab_requests;
- struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_execute_cbs;
} global;
@@ -203,6 +202,19 @@ static void free_capture_list(struct i915_request *request)
}
}
+static void __i915_request_fill(struct i915_request *rq, u8 val)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset(vaddr + head, val, rq->ring->size - head);
+ head = 0;
+ }
+ memset(vaddr + head, val, rq->postfix - head);
+}
+
static void remove_from_engine(struct i915_request *rq)
{
struct intel_engine_cs *engine, *locked;
@@ -247,6 +259,9 @@ bool i915_request_retire(struct i915_request *rq)
*/
GEM_BUG_ON(!list_is_first(&rq->link,
&i915_request_timeline(rq)->requests));
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ /* Poison before we release our space in the ring */
+ __i915_request_fill(rq, POISON_FREE);
rq->ring->head = rq->postfix;
/*
@@ -275,7 +290,7 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
- list_del_rcu(&rq->link);
+ __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
@@ -348,6 +363,50 @@ __await_execution(struct i915_request *rq,
return 0;
}
+static bool fatal_error(int error)
+{
+ switch (error) {
+ case 0: /* not an error! */
+ case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
+ case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+ return false;
+ default:
+ return true;
+ }
+}
+
+void __i915_request_skip(struct i915_request *rq)
+{
+ GEM_BUG_ON(!fatal_error(rq->fence.error));
+
+ if (rq->infix == rq->postfix)
+ return;
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ __i915_request_fill(rq, 0);
+ rq->infix = rq->postfix;
+}
+
+void i915_request_set_error_once(struct i915_request *rq, int error)
+{
+ int old;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+
+ if (i915_request_signaled(rq))
+ return;
+
+ old = READ_ONCE(rq->fence.error);
+ do {
+ if (fatal_error(old))
+ return;
+ } while (!try_cmpxchg(&rq->fence.error, &old, error));
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -377,8 +436,10 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request))
goto xfer;
- if (intel_context_is_banned(request->context))
- i915_request_skip(request, -EIO);
+ if (unlikely(intel_context_is_banned(request->context)))
+ i915_request_set_error_once(request, -EIO);
+ if (unlikely(fatal_error(request->fence.error)))
+ __i915_request_skip(request);
/*
* Are we using semaphores when the gpu is already saturated?
@@ -504,7 +565,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
trace_i915_request_submit(request);
if (unlikely(fence->error))
- i915_request_skip(request, fence->error);
+ i915_request_set_error_once(request, fence->error);
/*
* We need to serialize use of the submit_request() callback
@@ -688,6 +749,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
+ GEM_BUG_ON(i915_request_completed(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -857,7 +919,7 @@ already_busywaiting(struct i915_request *rq)
*
* See the are-we-too-late? check in __i915_request_submit().
*/
- return rq->sched.semaphores | rq->engine->saturated;
+ return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
}
static int
@@ -915,8 +977,16 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
+ const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+
+ if (!intel_context_use_semaphores(to->context))
+ goto await_fence;
+
+ if (!rcu_access_pointer(from->hwsp_cacheline))
+ goto await_fence;
+
/* Just emit the first semaphore we see as request space is limited. */
- if (already_busywaiting(to) & from->engine->mask)
+ if (already_busywaiting(to) & mask)
goto await_fence;
if (i915_request_await_start(to, from) < 0)
@@ -929,7 +999,7 @@ emit_semaphore_wait(struct i915_request *to,
if (__emit_semaphore_wait(to, from, from->fence.seqno))
goto await_fence;
- to->sched.semaphores |= from->engine->mask;
+ to->sched.semaphores |= mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
@@ -960,12 +1030,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- else if (intel_context_use_semaphores(to->context))
- ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
else
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
if (ret < 0)
return ret;
@@ -1064,6 +1130,8 @@ __i915_request_await_execution(struct i915_request *to,
{
int err;
+ GEM_BUG_ON(intel_context_is_barrier(from->context));
+
/* Submit both requests at the same time */
err = __await_execution(to, from, hook, I915_FENCE_GFP);
if (err)
@@ -1074,14 +1142,45 @@ __i915_request_await_execution(struct i915_request *to,
&from->fence))
return 0;
- /* Ensure both start together [after all semaphores in signal] */
- if (intel_engine_has_semaphores(to->engine))
- err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
- else
- err = i915_request_await_start(to, from);
+ /*
+ * Wait until the start of this request.
+ *
+ * The execution cb fires when we submit the request to HW. But in
+ * many cases this may be long before the request itself is ready to
+ * run (consider that we submit 2 requests for the same context, where
+ * the request of interest is behind an indefinite spinner). So we hook
+ * up to both to reduce our queues and keep the execution lag minimised
+ * in the worst case, though we hope that the await_start is elided.
+ */
+ err = i915_request_await_start(to, from);
if (err < 0)
return err;
+ /*
+ * Ensure both start together [after all semaphores in signal]
+ *
+ * Now that we are queued to the HW at roughly the same time (thanks
+ * to the execute cb) and are ready to run at roughly the same time
+ * (thanks to the await start), our signaler may still be indefinitely
+ * delayed by waiting on a semaphore from a remote engine. If our
+ * signaler depends on a semaphore, so indirectly do we, and we do not
+ * want to start our payload until our signaler also starts theirs.
+ * So we wait.
+ *
+ * However, there is also a second condition for which we need to wait
+ * for the precise start of the signaler. Consider that the signaler
+ * was submitted in a chain of requests following another context
+ * (with just an ordinary intra-engine fence dependency between the
+ * two). In this case the signaler is queued to HW, but not for
+ * immediate execution, and so we must wait until it reaches the
+ * active slot.
+ */
+ if (intel_engine_has_semaphores(to->engine)) {
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ if (err < 0)
+ return err;
+ }
+
/* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
err = i915_sched_node_add_dependency(&to->sched, &from->sched);
@@ -1202,31 +1301,6 @@ i915_request_await_object(struct i915_request *to,
return ret;
}
-void i915_request_skip(struct i915_request *rq, int error)
-{
- void *vaddr = rq->ring->vaddr;
- u32 head;
-
- GEM_BUG_ON(!IS_ERR_VALUE((long)error));
- dma_fence_set_error(&rq->fence, error);
-
- if (rq->infix == rq->postfix)
- return;
-
- /*
- * As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- head = rq->infix;
- if (rq->postfix < head) {
- memset(vaddr + head, 0, rq->ring->size - head);
- head = 0;
- }
- memset(vaddr + head, 0, rq->postfix - head);
- rq->infix = rq->postfix;
-}
-
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
@@ -1256,7 +1330,17 @@ __i915_request_add_to_timeline(struct i915_request *rq)
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
if (prev && !i915_request_completed(prev)) {
- if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ /*
+ * The requests are supposed to be kept in order. However,
+ * we need to be wary in case the timeline->last_request
+ * is used as a barrier for external modification to this
+ * context.
+ */
+ GEM_BUG_ON(prev->context == rq->context &&
+ i915_seqno_passed(prev->fence.seqno,
+ rq->fence.seqno));
+
+ if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
&prev->submit,
&rq->submitq);
@@ -1340,39 +1424,23 @@ void i915_request_add(struct i915_request *rq)
{
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_sched_attr attr = {};
- struct i915_request *prev;
+ struct i915_gem_context *ctx;
lockdep_assert_held(&tl->mutex);
lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
+ __i915_request_commit(rq);
- prev = __i915_request_commit(rq);
-
- if (rcu_access_pointer(rq->context->gem_context))
- attr = i915_request_gem_context(rq)->sched;
+ /* XXX placeholder for selftests */
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ attr = ctx->sched;
+ rcu_read_unlock();
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
if (list_empty(&rq->sched.signalers_list))
attr.priority |= I915_PRIORITY_WAIT;
@@ -1380,32 +1448,10 @@ void i915_request_add(struct i915_request *rq)
__i915_request_queue(rq, &attr);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
- /*
- * In typical scenarios, we do not expect the previous request on
- * the timeline to be still tracked by timeline->last_request if it
- * has been completed. If the completed request is still here, that
- * implies that request retirement is a long way behind submission,
- * suggesting that we haven't been retiring frequently enough from
- * the combination of retire-before-alloc, waiters and the background
- * retirement worker. So if the last request on this timeline was
- * already completed, do a catch up pass, flushing the retirement queue
- * up to this client. Since we have now moved the heaviest operations
- * during retirement onto secondary workers, such as freeing objects
- * or contexts, retiring a bunch of requests is mostly list management
- * (and cache misses), and so we should not be overly penalizing this
- * client by performing excess work, though we may still performing
- * work on behalf of others -- but instead we should benefit from
- * improved resource management. (Well, that's the theory at least.)
- */
- if (prev &&
- i915_request_completed(prev) &&
- rcu_access_pointer(prev->timeline) == tl)
- i915_request_retire_upto(prev);
-
mutex_unlock(&tl->mutex);
}
-static unsigned long local_clock_us(unsigned int *cpu)
+static unsigned long local_clock_ns(unsigned int *cpu)
{
unsigned long t;
@@ -1422,7 +1468,7 @@ static unsigned long local_clock_us(unsigned int *cpu)
* stop busywaiting, see busywait_stop().
*/
*cpu = get_cpu();
- t = local_clock() >> 10;
+ t = local_clock();
put_cpu();
return t;
@@ -1432,15 +1478,15 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{
unsigned int this_cpu;
- if (time_after(local_clock_us(&this_cpu), timeout))
+ if (time_after(local_clock_ns(&this_cpu), timeout))
return true;
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request * const rq,
- int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq, int state)
{
+ unsigned long timeout_ns;
unsigned int cpu;
/*
@@ -1468,7 +1514,8 @@ static bool __i915_spin_request(const struct i915_request * const rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- timeout_us += local_clock_us(&cpu);
+ timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
+ timeout_ns += local_clock_ns(&cpu);
do {
if (i915_request_completed(rq))
return true;
@@ -1476,7 +1523,7 @@ static bool __i915_spin_request(const struct i915_request * const rq,
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout_us, cpu))
+ if (busywait_stop(timeout_ns, cpu))
break;
cpu_relax();
@@ -1562,8 +1609,8 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
- __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
+ if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+ __i915_spin_request(rq, state)) {
dma_fence_signal(&rq->fence);
goto out;
}
@@ -1598,6 +1645,8 @@ long i915_request_wait(struct i915_request *rq,
break;
}
+ intel_engine_flush_submission(rq->engine);
+
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
@@ -1608,7 +1657,6 @@ long i915_request_wait(struct i915_request *rq,
break;
}
- intel_engine_flush_submission(rq->engine);
timeout = io_schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
@@ -1628,14 +1676,12 @@ out:
static void i915_global_request_shrink(void)
{
- kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_execute_cbs);
kmem_cache_shrink(global.slab_requests);
}
static void i915_global_request_exit(void)
{
- kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_execute_cbs);
kmem_cache_destroy(global.slab_requests);
}
@@ -1665,17 +1711,9 @@ int __init i915_global_request_init(void)
if (!global.slab_execute_cbs)
goto err_requests;
- global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!global.slab_dependencies)
- goto err_execute_cbs;
-
i915_global_register(&global.base);
return 0;
-err_execute_cbs:
- kmem_cache_destroy(global.slab_execute_cbs);
err_requests:
kmem_cache_destroy(global.slab_requests);
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index fccc339949ec..3c552bfea67a 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -305,6 +305,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void __i915_request_skip(struct i915_request *rq);
+
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
@@ -354,8 +357,6 @@ void i915_request_add(struct i915_request *rq);
bool __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
-void i915_request_skip(struct i915_request *request, int error);
-
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
@@ -397,7 +398,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
static inline u32 __hwsp_seqno(const struct i915_request *rq)
{
- return READ_ONCE(*rq->hwsp_seqno);
+ const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
+
+ return READ_ONCE(*hwsp);
}
/**
@@ -481,7 +484,7 @@ static inline bool i915_request_is_running(const struct i915_request *rq)
}
/**
- * i915_request_is_running - check if the request is ready for execution
+ * i915_request_is_ready - check if the request is ready for execution
* @rq: the request
*
* Upon construction, the request is instructed to wait upon various
@@ -511,7 +514,8 @@ static inline bool i915_request_completed(const struct i915_request *rq)
static inline void i915_request_mark_complete(struct i915_request *rq)
{
- rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
+ WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
+ (u32 *)&rq->fence.seqno);
}
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 34b654b4e58a..68b06a7ba667 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
+ engine->execlists.queue_priority_hint = prio;
+
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
@@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
if (inflight->context == rq->context)
goto unlock;
- engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
@@ -227,10 +228,10 @@ unlock:
static void __i915_schedule(struct i915_sched_node *node,
const struct i915_sched_attr *attr)
{
+ const int prio = max(attr->priority, node->attr.priority);
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
- const int prio = attr->priority;
struct sched_cache cache;
LIST_HEAD(dfs);
@@ -238,9 +239,6 @@ static void __i915_schedule(struct i915_sched_node *node,
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (prio <= READ_ONCE(node->attr.priority))
- return;
-
if (node_signaled(node))
return;
@@ -324,7 +322,7 @@ static void __i915_schedule(struct i915_sched_node *node,
GEM_BUG_ON(node_to_request(node)->engine != engine);
- node->attr.priority = prio;
+ WRITE_ONCE(node->attr.priority, prio);
/*
* Once the request is ready, it will be placed into the
@@ -363,6 +361,9 @@ static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
{
struct i915_sched_attr attr = node->attr;
+ if (attr.priority & bump)
+ return;
+
attr.priority |= bump;
__i915_schedule(node, &attr);
}
@@ -433,7 +434,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
/* All set, now publish. Beware the lockless walkers. */
- list_add(&dep->signal_link, &node->signalers_list);
+ list_add_rcu(&dep->signal_link, &node->signalers_list);
list_add_rcu(&dep->wait_link, &signal->waiters_list);
/*
@@ -486,7 +487,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!list_empty(&dep->dfs_link));
- list_del(&dep->wait_link);
+ list_del_rcu(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
@@ -497,7 +498,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
- list_del(&dep->signal_link);
+ list_del_rcu(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
@@ -526,7 +527,8 @@ static struct i915_global_scheduler global = { {
int __init i915_global_scheduler_init(void)
{
global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN);
+ SLAB_HWCACHE_ALIGN |
+ SLAB_TYPESAFE_BY_RCU);
if (!global.slab_dependencies)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8812cdd9007f..ed2be3489f8e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,8 +24,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 51ba97daf2a0..a3d38e089b6e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -211,10 +211,21 @@ void i915_sw_fence_complete(struct i915_sw_fence *fence)
__i915_sw_fence_complete(fence, NULL);
}
-void i915_sw_fence_await(struct i915_sw_fence *fence)
+bool i915_sw_fence_await(struct i915_sw_fence *fence)
{
- debug_fence_assert(fence);
- WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+ int pending;
+
+ /*
+ * It is only safe to add a new await to the fence while it has
+ * not yet been signaled (i.e. there are still existing signalers).
+ */
+ pending = atomic_read(&fence->pending);
+ do {
+ if (pending < 1)
+ return false;
+ } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1));
+
+ return true;
}
void __i915_sw_fence_init(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 19e806ce43bc..30a863353ee6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -91,7 +91,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
unsigned long timeout,
gfp_t gfp);
-void i915_sw_fence_await(struct i915_sw_fence *fence);
+bool i915_sw_fence_await(struct i915_sw_fence *fence);
void i915_sw_fence_complete(struct i915_sw_fence *fence);
static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 39c79e1c5b52..ed69b5d4a375 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -43,7 +43,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && i915->drm.open_count == 0;
+ return i915 && atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 0cef3130db05..45d32ef42787 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
+#include "gt/sysfs_engines.h"
#include "i915_drv.h"
#include "i915_sysfs.h"
@@ -525,7 +526,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- DRM_DEBUG_DRIVER("Resetting error state\n");
+ drm_dbg(&dev_priv->drm, "Resetting error state\n");
i915_reset_error_state(dev_priv);
return count;
@@ -564,31 +565,36 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
ret = sysfs_merge_group(&kdev->kobj,
&rc6_attr_group);
if (ret)
- DRM_ERROR("RC6 residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "RC6 residency sysfs setup failed\n");
}
if (HAS_RC6p(dev_priv)) {
ret = sysfs_merge_group(&kdev->kobj,
&rc6p_attr_group);
if (ret)
- DRM_ERROR("RC6p residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "RC6p residency sysfs setup failed\n");
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ret = sysfs_merge_group(&kdev->kobj,
&media_rc6_attr_group);
if (ret)
- DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "Media RC6 residency sysfs setup failed\n");
}
#endif
if (HAS_L3_DPF(dev_priv)) {
ret = device_create_bin_file(kdev, &dpf_attrs);
if (ret)
- DRM_ERROR("l3 parity sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "l3 parity sysfs setup failed\n");
if (NUM_L3_SLICES(dev_priv) > 1) {
ret = device_create_bin_file(kdev,
&dpf_attrs_1);
if (ret)
- DRM_ERROR("l3 parity slice 1 setup failed\n");
+ drm_err(&dev_priv->drm,
+ "l3 parity slice 1 setup failed\n");
}
}
@@ -598,9 +604,11 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 6)
ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
if (ret)
- DRM_ERROR("RPS sysfs setup failed\n");
+ drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
i915_setup_error_capture(kdev);
+
+ intel_engines_add_sysfs(dev_priv);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 233a97a2c276..bc854ad60954 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -339,6 +339,68 @@ TRACE_EVENT(intel_disable_plane,
__entry->frame, __entry->scanline)
);
+/* fbc */
+
+TRACE_EVENT(intel_fbc_activate,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_deactivate,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_nuke,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
/* pipe updates */
TRACE_EVENT(intel_pipe_update_start,
@@ -738,7 +800,7 @@ TRACE_EVENT(i915_request_in,
__field(u16, instance)
__field(u32, seqno)
__field(u32, port)
- __field(u32, prio)
+ __field(s32, prio)
),
TP_fast_assign(
@@ -751,7 +813,7 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->ctx, __entry->seqno,
__entry->prio, __entry->port)
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 632d6953c78d..029854ae65fc 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,7 +8,6 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
void
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index d34141f7dcd8..03a73d2bd50d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -34,6 +34,8 @@
struct drm_i915_private;
struct timer_list;
+#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -100,12 +102,24 @@ bool i915_error_injected(void);
typeof(max) max__ = (max); \
(void)(&start__ == &size__); \
(void)(&start__ == &max__); \
- start__ > max__ || size__ > max__ - start__; \
+ start__ >= max__ || size__ > max__ - start__; \
})
#define range_overflows_t(type, start, size, max) \
range_overflows((type)(start), (type)(size), (type)(max))
+#define range_overflows_end(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+#define range_overflows_end_t(type, start, size, max) \
+ range_overflows_end((type)(start), (type)(size), (type)(max))
+
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
@@ -246,6 +260,12 @@ static inline void __list_del_many(struct list_head *head,
WRITE_ONCE(head->next, first);
}
+static inline int list_is_last_rcu(const struct list_head *list,
+ const struct list_head *head)
+{
+ return READ_ONCE(list->next) == head;
+}
+
/*
* Wait until the work is finally complete, even if it tries to postpone
* by requeueing itself. Note, that if the worker never cancels itself,
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 968be26735c5..70fca72f5162 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,6 +21,8 @@
* SOFTWARE.
*/
+#include "i915_drv.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
@@ -51,13 +53,13 @@
*/
/**
- * i915_detect_vgpu - detect virtual GPU
+ * intel_vgpu_detect - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_detect_vgpu(struct drm_i915_private *dev_priv)
+void intel_vgpu_detect(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
@@ -77,7 +79,8 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
if (!shared_area) {
- DRM_ERROR("failed to map MMIO bar to check for VGT\n");
+ drm_err(&dev_priv->drm,
+ "failed to map MMIO bar to check for VGT\n");
return;
}
@@ -87,7 +90,7 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
version_major = readw(shared_area + vgtif_offset(version_major));
if (version_major < VGT_VERSION_MAJOR) {
- DRM_INFO("VGT interface version mismatch!\n");
+ drm_info(&dev_priv->drm, "VGT interface version mismatch!\n");
goto out;
}
@@ -95,17 +98,42 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
dev_priv->vgpu.active = true;
mutex_init(&dev_priv->vgpu.lock);
- DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+ drm_info(&dev_priv->drm, "Virtual GPU for Intel GVT-g detected.\n");
out:
pci_iounmap(pdev, shared_area);
}
+void intel_vgpu_register(struct drm_i915_private *i915)
+{
+ /*
+ * Notify a valid surface after modesetting, when running inside a VM.
+ */
+ if (intel_vgpu_active(i915))
+ intel_uncore_write(&i915->uncore, vgtif_reg(display_ready),
+ VGT_DRV_DISPLAY_READY);
+}
+
+bool intel_vgpu_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.active;
+}
+
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable/unmappable graphic
@@ -120,13 +148,15 @@ static struct _balloon_info_ bl_info;
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
if (!drm_mm_node_allocated(node))
return;
- DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
- node->start,
- node->start + node->size,
- node->size / 1024);
+ drm_dbg(&dev_priv->drm,
+ "deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
+ node->start,
+ node->start + node->size,
+ node->size / 1024);
ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
@@ -141,12 +171,13 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
*/
void intel_vgt_deballoon(struct i915_ggtt *ggtt)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
int i;
if (!intel_vgpu_active(ggtt->vm.i915))
return;
- DRM_DEBUG("VGT deballoon.\n");
+ drm_dbg(&dev_priv->drm, "VGT deballoon.\n");
for (i = 0; i < 4; i++)
vgt_deballoon_space(ggtt, &bl_info.space[i]);
@@ -156,13 +187,15 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
unsigned long size = end - start;
int ret;
if (start >= end)
return -EINVAL;
- DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
+ drm_info(&dev_priv->drm,
+ "balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
@@ -219,7 +252,8 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
*/
int intel_vgt_balloon(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
@@ -241,16 +275,18 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
mappable_end = mappable_base + mappable_size;
unmappable_end = unmappable_base + unmappable_size;
- DRM_INFO("VGT ballooning configuration:\n");
- DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
+ drm_info(&dev_priv->drm, "VGT ballooning configuration:\n");
+ drm_info(&dev_priv->drm,
+ "Mappable graphic memory: base 0x%lx size %ldKiB\n",
mappable_base, mappable_size / 1024);
- DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
+ drm_info(&dev_priv->drm,
+ "Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
if (mappable_end > ggtt->mappable_end ||
unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_end) {
- DRM_ERROR("Invalid ballooning configuration!\n");
+ drm_err(&dev_priv->drm, "Invalid ballooning configuration!\n");
return -EINVAL;
}
@@ -287,7 +323,7 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
goto err_below_mappable;
}
- DRM_INFO("VGT balloon successfully\n");
+ drm_info(&dev_priv->drm, "VGT balloon successfully\n");
return 0;
err_below_mappable:
@@ -297,6 +333,6 @@ err_upon_unmappable:
err_upon_mappable:
vgt_deballoon_space(ggtt, &bl_info.space[2]);
err:
- DRM_ERROR("VGT balloon fail\n");
+ drm_err(&dev_priv->drm, "VGT balloon fail\n");
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 8b3663dad193..ffbb77d08048 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,24 +24,17 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
-#include "i915_drv.h"
-#include "i915_pvinfo.h"
+#include <linux/types.h>
-void i915_detect_vgpu(struct drm_i915_private *dev_priv);
+struct drm_i915_private;
+struct i915_ggtt;
-bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
-
-static inline bool
-intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
-}
-
-static inline bool
-intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
-}
+void intel_vgpu_detect(struct drm_i915_private *i915);
+bool intel_vgpu_active(struct drm_i915_private *i915);
+void intel_vgpu_register(struct drm_i915_private *i915);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915);
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915);
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915);
int intel_vgt_balloon(struct i915_ggtt *ggtt);
void intel_vgt_deballoon(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4ff380770b32..08699fa069aa 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -294,6 +294,7 @@ struct i915_vma_work {
struct dma_fence_work base;
struct i915_vma *vma;
struct drm_i915_gem_object *pinned;
+ struct i915_sw_dma_fence_cb cb;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -339,6 +340,25 @@ struct i915_vma_work *i915_vma_work(void)
return vw;
}
+int i915_vma_wait_for_bind(struct i915_vma *vma)
+{
+ int err = 0;
+
+ if (rcu_access_pointer(vma->active.excl.fence)) {
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(fence);
+ }
+ }
+
+ return err;
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
@@ -386,6 +406,8 @@ int i915_vma_bind(struct i915_vma *vma,
trace_i915_vma_bind(vma, bind_flags);
if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ struct dma_fence *prev;
+
work->vma = vma;
work->cache_level = cache_level;
work->flags = bind_flags | I915_VMA_ALLOC;
@@ -399,8 +421,14 @@ int i915_vma_bind(struct i915_vma *vma,
* part of the obj->resv->excl_fence as it only affects
* execution and not content or object's backing store lifetime.
*/
- GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
- i915_active_set_exclusive(&vma->active, &work->base.dma);
+ prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
+ if (prev) {
+ __i915_sw_fence_await_dma_fence(&work->base.chain,
+ prev,
+ &work->cb);
+ dma_fence_put(prev);
+ }
+
work->base.dma.error = 0; /* enable the queue_work() */
if (vma->obj) {
@@ -408,7 +436,6 @@ int i915_vma_bind(struct i915_vma *vma,
work->pinned = vma->obj;
}
} else {
- GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
@@ -614,7 +641,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
- GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -892,6 +918,11 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (err)
goto err_fence;
+ if (unlikely(i915_vma_is_closed(vma))) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
bound = atomic_read(&vma->flags);
if (unlikely(bound & I915_VMA_ERROR)) {
err = -ENOMEM;
@@ -977,8 +1008,14 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
do {
err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
- if (err != -ENOSPC)
+ if (err != -ENOSPC) {
+ if (!err) {
+ err = i915_vma_wait_for_bind(vma);
+ if (err)
+ i915_vma_unpin(vma);
+ }
return err;
+ }
/* Unlike i915_vma_pin, we don't take no for an answer! */
flush_idle_contexts(vm->gt);
@@ -1060,6 +1097,7 @@ void i915_vma_release(struct kref *ref)
void i915_vma_parked(struct intel_gt *gt)
{
struct i915_vma *vma, *next;
+ LIST_HEAD(closed);
spin_lock_irq(&gt->closed_lock);
list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
@@ -1071,28 +1109,26 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (i915_vm_tryopen(vm)) {
- list_del_init(&vma->closed_link);
- } else {
+ if (!i915_vm_tryopen(vm)) {
i915_gem_object_put(obj);
- obj = NULL;
+ continue;
}
- spin_unlock_irq(&gt->closed_lock);
+ list_move(&vma->closed_link, &closed);
+ }
+ spin_unlock_irq(&gt->closed_lock);
- if (obj) {
- __i915_vma_put(vma);
- i915_gem_object_put(obj);
- }
+ /* As the GT is held idle, no vma can be reopened as we destroy them */
+ list_for_each_entry_safe(vma, next, &closed, closed_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
- i915_vm_close(vm);
+ INIT_LIST_HEAD(&vma->closed_link);
+ __i915_vma_put(vma);
- /* Restart after dropping lock */
- spin_lock_irq(&gt->closed_lock);
- next = list_first_entry(&gt->closed_vma,
- typeof(*next), closed_link);
+ i915_gem_object_put(obj);
+ i915_vm_close(vm);
}
- spin_unlock_irq(&gt->closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -1136,7 +1172,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active);
+ err = i915_request_await_active(rq, &vma->active, 0);
if (err)
return err;
@@ -1228,9 +1264,15 @@ int __i915_vma_unbind(struct i915_vma *vma)
* before the unbind, other due to non-strict nature of those
* indirect writes they may end up referencing the GGTT PTE
* after the unbind.
+ *
+ * Note that we may be concurrently poking at the GGTT_WRITE
+ * bit from set-domain, as we mark all GGTT vma associated
+ * with an object. We know this is for another vma, as we
+ * are currently unbinding this one -- so if this vma will be
+ * reused, it will be refaulted and have its dirty bit set
+ * before the next write.
*/
i915_vma_flush_writes(vma);
- GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
ret = i915_vma_revoke_fence(vma);
@@ -1250,7 +1292,8 @@ int __i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma);
}
- atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
+ atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
+ &vma->flags);
i915_vma_detach(vma);
vma_unbind_pages(vma);
@@ -1272,16 +1315,21 @@ int i915_vma_unbind(struct i915_vma *vma)
/* XXX not always required: nop_clear_range */
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+ /* Optimistic wait before taking the mutex */
+ err = i915_vma_sync(vma);
+ if (err)
+ goto out_rpm;
+
err = mutex_lock_interruptible(&vm->mutex);
if (err)
- return err;
+ goto out_rpm;
err = __i915_vma_unbind(vma);
mutex_unlock(&vm->mutex);
+out_rpm:
if (wakeref)
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 02b31a62951e..e1ced1df13e1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -375,6 +375,8 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
+int i915_vma_wait_for_bind(struct i915_vma *vma);
+
static inline int i915_vma_sync(struct i915_vma *vma)
{
/* Wait for the asynchronous bindings and pending GPU reads */
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index e0942efd5236..63831cdb7402 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -273,21 +273,10 @@ struct i915_vma {
struct rb_node obj_node;
struct hlist_node obj_hash;
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
-
/** This vma's place in the eviction list */
struct list_head evict_link;
struct list_head closed_link;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- unsigned int *exec_flags;
- struct hlist_node exec_node;
- u32 exec_handle;
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 6670a0763be2..d7fe12734db8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -23,7 +23,9 @@
*/
#include <drm/drm_print.h>
+#include <drm/i915_pciids.h>
+#include "display/intel_cdclk.h"
#include "intel_device_info.h"
#include "i915_drv.h"
@@ -132,6 +134,7 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
{
sseu_dump(&info->sseu, p);
+ drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
drm_printf(p, "CS timestamp frequency: %u kHz\n",
info->cs_timestamp_frequency_khz);
}
@@ -743,7 +746,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* hclks." (through the “Clocking Configurationâ€
* (“CLKCFGâ€) MCHBAR register)
*/
- return dev_priv->rawclk_freq / 16;
+ return RUNTIME_INFO(dev_priv)->rawclk_freq / 16;
} else if (INTEL_GEN(dev_priv) <= 8) {
/* PRMs say:
*
@@ -974,10 +977,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
- DRM_INFO("Display fused off, disabling\n");
+ drm_info(&dev_priv->drm,
+ "Display fused off, disabling\n");
info->pipe_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
- DRM_INFO("PipeC fused off\n");
+ drm_info(&dev_priv->drm, "PipeC fused off\n");
info->pipe_mask &= ~BIT(PIPE_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
@@ -1000,8 +1004,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
* in the mask.
*/
if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
- DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
- enabled_mask);
+ drm_err(&dev_priv->drm,
+ "invalid pipe fuse configuration: enabled_mask=0x%x\n",
+ enabled_mask);
else
info->pipe_mask = enabled_mask;
@@ -1036,12 +1041,26 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
gen12_sseu_info_init(dev_priv);
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
- DRM_INFO("Disabling ppGTT for VT-d support\n");
+ drm_info(&dev_priv->drm,
+ "Disabling ppGTT for VT-d support\n");
info->ppgtt_type = INTEL_PPGTT_NONE;
}
+ runtime->rawclk_freq = intel_read_rawclk(dev_priv);
+ drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
+
/* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+ runtime->cs_timestamp_frequency_khz =
+ read_timestamp_frequency(dev_priv);
+ if (runtime->cs_timestamp_frequency_khz) {
+ runtime->cs_timestamp_period_ns =
+ div_u64(1e6, runtime->cs_timestamp_frequency_khz);
+ drm_dbg(&dev_priv->drm,
+ "CS timestamp wraparound in %lldms\n",
+ div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
+ S32_MAX),
+ USEC_PER_SEC));
+ }
}
void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -1084,7 +1103,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
- DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
continue;
}
@@ -1096,8 +1115,8 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
- DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(dev_priv));
+ drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(dev_priv));
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
for (i = 0; i < I915_MAX_VECS; i++) {
@@ -1108,10 +1127,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
- DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
}
}
- DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(dev_priv));
+ drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(dev_priv));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 2725cb7fc169..1ecb9df2de91 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -180,6 +180,7 @@ struct intel_device_info {
} display;
u16 ddb_size; /* in blocks */
+ u8 num_supported_dbuf_slices; /* number of DBuf slices */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -215,7 +216,10 @@ struct intel_runtime_info {
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
+ u32 rawclk_freq;
+
u32 cs_timestamp_frequency_khz;
+ u32 cs_timestamp_period_ns;
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
new file mode 100644
index 000000000000..6b922efb1d7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_dram.h"
+
+struct dram_dimm_info {
+ u8 size, width, ranks;
+};
+
+struct dram_channel_info {
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
+ bool is_16gb_dimm;
+};
+
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
+{
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
+
+ return str[type];
+}
+
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
+{
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
+
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return val & SKL_DRAM_SIZE_MASK;
+}
+
+static int skl_get_dimm_width(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+ return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & CNL_DRAM_WIDTH_MASK) {
+ case CNL_DRAM_WIDTH_X8:
+ case CNL_DRAM_WIDTH_X16:
+ case CNL_DRAM_WIDTH_X32:
+ val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total GB to Gb per DRAM device */
+ return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (INTEL_GEN(i915) >= 10) {
+ dimm->size = cnl_get_dimm_size(val);
+ dimm->width = cnl_get_dimm_width(val);
+ dimm->ranks = cnl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *i915,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(i915, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(i915, &ch->dimm_s,
+ channel, 'S', val >> 16);
+
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
+ return -EINVAL;
+ }
+
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
+ else
+ ch->ranks = 1;
+
+ ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
+
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, yesno(ch->is_16gb_dimm));
+
+ return 0;
+}
+
+static bool
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
+{
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
+ int ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ if (dram_info->num_channels == 0) {
+ drm_info(&i915->drm, "Number of memory channels is zero\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If any of the channel is single rank channel, worst case output
+ * will be same as if single rank memory, so consider single rank
+ * memory.
+ */
+ if (ch0.ranks == 1 || ch1.ranks == 1)
+ dram_info->ranks = 1;
+ else
+ dram_info->ranks = max(ch0.ranks, ch1.ranks);
+
+ if (dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
+
+ drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
+ yesno(dram_info->symmetric_memory));
+
+ return 0;
+}
+
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 mem_freq_khz, val;
+ int ret;
+
+ dram_info->type = skl_get_dram_type(i915);
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
+ ret = skl_dram_get_channels_info(i915);
+ if (ret)
+ return ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
+ SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_info->bandwidth_kbps = dram_info->num_channels *
+ mem_freq_khz * 8;
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+ return 0;
+}
+
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * GB to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
+static int bxt_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 dram_channels;
+ u32 mem_freq_khz, val;
+ u8 num_active_channels;
+ int i;
+
+ val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
+ mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
+ BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
+ num_active_channels = hweight32(dram_channels);
+
+ /* Each active bit represents 4-byte channel */
+ dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+ */
+ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
+
+ val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
+ if (val == 0xFFFFFFFF)
+ continue;
+
+ dram_info->num_channels++;
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
+
+ /*
+ * If any of the channel is single rank channel,
+ * worst case output will be same as if single rank
+ * memory, so consider single rank memory.
+ */
+ if (dram_info->ranks == 0)
+ dram_info->ranks = dimm.ranks;
+ else if (dimm.ranks == 1)
+ dram_info->ranks = 1;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
+ }
+
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory information\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+
+ return 0;
+}
+
+void intel_dram_detect(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ int ret;
+
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+
+ if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
+ return;
+
+ if (IS_GEN9_LP(i915))
+ ret = bxt_get_dram_info(i915);
+ else
+ ret = skl_get_dram_info(i915);
+ if (ret)
+ return;
+
+ drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
+ dram_info->bandwidth_kbps, dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
+ dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
+{
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+void intel_dram_edram_detect(struct drm_i915_private *i915)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
+ return;
+
+ edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(i915) < 9)
+ i915->edram_size_mb = 128;
+ else
+ i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
+
+ dev_info(i915->drm.dev,
+ "Found %uMB of eDRAM\n", i915->edram_size_mb);
+}
diff --git a/drivers/gpu/drm/i915/intel_dram.h b/drivers/gpu/drm/i915/intel_dram.h
new file mode 100644
index 000000000000..4ba13c13162c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DRAM_H__
+#define __INTEL_DRAM_H__
+
+struct drm_i915_private;
+
+void intel_dram_edram_detect(struct drm_i915_private *i915);
+void intel_dram_detect(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 2b6c016387c2..21b91313cc5d 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -22,6 +22,7 @@
*/
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gvt.h"
/**
@@ -67,12 +68,13 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
return;
if (intel_vgpu_active(dev_priv)) {
- DRM_INFO("GVT-g is disabled for guest\n");
+ drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
goto bail;
}
if (!is_supported_device(dev_priv)) {
- DRM_INFO("Unsupported device. GVT-g is disabled\n");
+ drm_info(&dev_priv->drm,
+ "Unsupported device. GVT-g is disabled\n");
goto bail;
}
@@ -99,18 +101,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return -ENODEV;
if (!i915_modparams.enable_gvt) {
- DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
+ drm_dbg(&dev_priv->drm,
+ "GVT-g is disabled by kernel params\n");
return 0;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
- DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
+ if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) {
+ drm_err(&dev_priv->drm,
+ "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
ret = intel_gvt_init_device(dev_priv);
if (ret) {
- DRM_DEBUG_DRIVER("Fail to init GVT device\n");
+ drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
goto bail;
}
@@ -121,6 +125,11 @@ bail:
return 0;
}
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gvt;
+}
+
/**
* intel_gvt_driver_remove - cleanup GVT components when i915 driver is
* unbinding
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d0d038b3cd79..6b5e9d88646d 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -265,7 +265,9 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
if (IS_ERR(mem)) {
err = PTR_ERR(mem);
- DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
+ drm_err(&i915->drm,
+ "Failed to setup region(%d) type=%d\n",
+ err, type);
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 4ed60e1f01db..20ab9a5023b5 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -13,91 +13,106 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 5));
+ drm_WARN_ON(&dev_priv->drm, !IS_GEN(dev_priv, 5));
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_WPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
/* KBP is SPT compatible */
return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm,
"Found Cannon Lake LP PCH (CNP-LP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- WARN_ON(!IS_TIGERLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
return PCH_JSP;
default:
return PCH_NONE;
@@ -188,7 +203,8 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
pch_type = intel_pch_type(dev_priv, id);
/* Sanity check virtual PCH id */
- if (WARN_ON(id && pch_type == PCH_NONE))
+ if (drm_WARN_ON(&dev_priv->drm,
+ id && pch_type == PCH_NONE))
id = 0;
dev_priv->pch_type = pch_type;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index bd2d30ecc030..8375054ba27d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -40,12 +40,36 @@
#include "gt/intel_llc.h"
#include "i915_drv.h"
+#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -128,16 +152,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
-
- /* WaDDIIOTimeout:glk */
- if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
- u32 val = I915_READ(CHICKEN_MISC_2);
- val &= ~(GLK_CL0_PWR_DOWN |
- GLK_CL1_PWR_DOWN |
- GLK_CL2_PWR_DOWN);
- I915_WRITE(CHICKEN_MISC_2, val);
- }
-
}
static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -469,9 +483,9 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
+ u32 dsparb, dsparb2, dsparb3;
switch (pipe) {
- u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
@@ -1969,6 +1983,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
+ u32 dsparb, dsparb2, dsparb3;
if (!crtc_state->fifo_changed)
return;
@@ -1977,8 +1992,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
- WARN_ON(fifo_size != 511);
+ drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1994,7 +2009,6 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
spin_lock(&uncore->lock);
switch (crtc->pipe) {
- u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = intel_uncore_read_fw(uncore, DSPARB);
dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
@@ -2776,7 +2790,7 @@ static bool ilk_validate_wm_level(int level,
}
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
- const struct intel_crtc *intel_crtc,
+ const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *pristate,
@@ -2810,34 +2824,6 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static u32
-hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- u32 linetime, ips_linetime;
-
- if (!crtc_state->hw.active)
- return 0;
- if (WARN_ON(adjusted_mode->crtc_clock == 0))
- return 0;
- if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
- return 0;
-
- /* The WM are computed with base on how long it takes to fill a single
- * row at the given clock rate, multiplied by 8.
- * */
- linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- adjusted_mode->crtc_clock);
- ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- intel_state->cdclk.logical.cdclk);
-
- return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
- PIPE_WM_LINETIME_TIME(linetime);
-}
-
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[8])
{
@@ -3135,7 +3121,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_pipe_wm *pipe_wm;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
@@ -3175,12 +3161,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
-
if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
@@ -3189,7 +3172,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -3417,7 +3400,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* level is disabled. Doing otherwise could cause underruns.
*/
if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
- WARN_ON(wm_lp != 1);
+ drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
results->wm_lp_spr[wm_lp - 1] = r->spr_val;
@@ -3426,14 +3409,12 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
/* LP0 register values */
for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
- const struct intel_wm_level *r =
- &intel_crtc->wm.active.ilk.wm[0];
+ const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk;
+ const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (WARN_ON(!r->enable))
+ if (drm_WARN_ON(&dev_priv->drm, !r->enable))
continue;
- results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
-
results->wm_pipe[pipe] =
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
@@ -3472,7 +3453,6 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
/* dirty bits used to track which watermarks need changes */
#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
#define WM_DIRTY_FBC (1 << 24)
@@ -3487,12 +3467,6 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
int wm_lp;
for_each_pipe(dev_priv, pipe) {
- if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
- dirty |= WM_DIRTY_LINETIME(pipe);
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3584,13 +3558,6 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_PIPE(PIPE_C))
I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_A))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_B))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_C))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
-
if (dirty & WM_DIRTY_DDB) {
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = I915_READ(WM_MISC);
@@ -3644,26 +3611,18 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
{
- u8 enabled_slices;
-
- /* Slice 1 will always be enabled */
- enabled_slices = 1;
-
- /* Gen prior to GEN11 have only one DBuf slice */
- if (INTEL_GEN(dev_priv) < 11)
- return enabled_slices;
+ int i;
+ int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ u8 enabled_slices_mask = 0;
- /*
- * FIXME: for now we'll only ever use 1 slice; pretend that we have
- * only that 1 slice enabled until we have a proper way for on-demand
- * toggling of the second slice.
- */
- if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
- enabled_slices++;
+ for (i = 0; i < max_slices; i++) {
+ if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+ enabled_slices_mask |= BIT(i);
+ }
- return enabled_slices;
+ return enabled_slices_mask;
}
/*
@@ -3864,47 +3823,46 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
return true;
}
-static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- const u64 total_data_rate,
- const int num_active,
- struct skl_ddb_allocation *ddb)
+/*
+ * Calculate initial DBuf slice offset, based on slice size
+ * and mask(i.e if slice size is 1024 and second slice is enabled
+ * offset would be 1024)
+ */
+static unsigned int
+icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
+ u32 slice_size,
+ u32 ddb_size)
+{
+ unsigned int offset = 0;
+
+ if (!dbuf_slice_mask)
+ return 0;
+
+ offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+
+ WARN_ON(offset >= ddb_size);
+ return offset;
+}
+
+static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
{
- const struct drm_display_mode *adjusted_mode;
- u64 total_data_bw;
u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- WARN_ON(ddb_size == 0);
+ drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
- adjusted_mode = &crtc_state->hw.adjusted_mode;
- total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
-
- /*
- * 12GB/s is maximum BW supported by single DBuf slice.
- *
- * FIXME dbuf slice code is broken:
- * - must wait for planes to stop using the slice before powering it off
- * - plane straddling both slices is illegal in multi-pipe scenarios
- * - should validate we stay within the hw bandwidth limits
- */
- if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
- ddb->enabled_slices = 2;
- } else {
- ddb->enabled_slices = 1;
- ddb_size /= 2;
- }
-
return ddb_size;
}
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+ u8 active_pipes);
+
static void
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
- struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
@@ -3912,12 +3870,19 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
const struct intel_crtc *crtc;
- u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+ u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
u16 ddb_size;
+ u32 ddb_range_size;
u32 i;
-
- if (WARN_ON(!state) || !crtc_state->hw.active) {
+ u32 dbuf_slice_mask;
+ u32 active_pipes;
+ u32 offset;
+ u32 slice_size;
+ u32 total_slice_mask;
+ u32 start, end;
+
+ if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
alloc->start = 0;
alloc->end = 0;
*num_active = hweight8(dev_priv->active_pipes);
@@ -3925,12 +3890,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
}
if (intel_state->active_pipe_changes)
- *num_active = hweight8(intel_state->active_pipes);
+ active_pipes = intel_state->active_pipes;
else
- *num_active = hweight8(dev_priv->active_pipes);
+ active_pipes = dev_priv->active_pipes;
+
+ *num_active = hweight8(active_pipes);
- ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
- *num_active, ddb);
+ ddb_size = intel_get_ddb_size(dev_priv);
+
+ slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
/*
* If the state doesn't change the active CRTC's or there is no
@@ -3950,30 +3918,95 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
}
/*
+ * Get allowed DBuf slices for correspondent pipe and platform.
+ */
+ dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
+
+ DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
+ dbuf_slice_mask,
+ pipe_name(for_pipe), active_pipes);
+
+ /*
+ * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
+ * and slice size is 1024, the offset would be 1024
+ */
+ offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
+ slice_size, ddb_size);
+
+ /*
+ * Figure out total size of allowed DBuf slices, which is basically
+ * a number of allowed slices for that pipe multiplied by slice size.
+ * Inside of this
+ * range ddb entries are still allocated in proportion to display width.
+ */
+ ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
+
+ /*
* Watermark/ddb requirement highly depends upon width of the
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
+ total_slice_mask = dbuf_slice_mask;
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
int hdisplay, vdisplay;
+ u32 pipe_dbuf_slice_mask;
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
+ active_pipes);
+
+ /*
+ * According to BSpec pipe can share one dbuf slice with another
+ * pipes or pipe can use multiple dbufs, in both cases we
+ * account for other pipes only if they have exactly same mask.
+ * However we need to account how many slices we should enable
+ * in total.
+ */
+ total_slice_mask |= pipe_dbuf_slice_mask;
- if (!crtc_state->hw.enable)
+ /*
+ * Do not account pipes using other slice sets
+ * luckily as of current BSpec slice sets do not partially
+ * intersect(pipes share either same one slice or same slice set
+ * i.e no partial intersection), so it is enough to check for
+ * equality for now.
+ */
+ if (dbuf_slice_mask != pipe_dbuf_slice_mask)
continue;
drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
- total_width += hdisplay;
+
+ total_width_in_range += hdisplay;
if (pipe < for_pipe)
- width_before_pipe += hdisplay;
+ width_before_pipe_in_range += hdisplay;
else if (pipe == for_pipe)
pipe_width = hdisplay;
}
- alloc->start = ddb_size * width_before_pipe / total_width;
- alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
+
+ start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
+ end = ddb_range_size *
+ (width_before_pipe_in_range + pipe_width) / total_width_in_range;
+
+ alloc->start = offset + start;
+ alloc->end = offset + end;
+
+ DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
+ alloc->start, alloc->end);
+ DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
+ intel_state->enabled_dbuf_slices_mask,
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
}
static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4002,7 +4035,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0);
- WARN_ON(ret);
+ drm_WARN_ON(&dev_priv->drm, ret);
for (level = 0; level <= max_level; level++) {
skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
@@ -4091,10 +4124,10 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
intel_display_power_put(dev_priv, power_domain, wakeref);
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
{
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ dev_priv->enabled_dbuf_slices_mask =
+ intel_enabled_dbuf_slices_mask(dev_priv);
}
/*
@@ -4144,6 +4177,254 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
return mul_fixed16(downscale_w, downscale_h);
}
+struct dbuf_slice_conf_entry {
+ u8 active_pipes;
+ u8 dbuf_mask[I915_MAX_PIPES];
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].dbuf_mask[pipe];
+ }
+ return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+{
+ /*
+ * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+ * required calculating "pipe ratio" in order to determine
+ * if one or two slices can be used for single pipe configurations
+ * as additional constraint to the existing table.
+ * However based on recent info, it should be not "pipe ratio"
+ * but rather ratio between pixel_rate and cdclk with additional
+ * constants, so for now we are using only table until this is
+ * clarified. Also this is the reason why crtc_state param is
+ * still here - we will need it once those additional constraints
+ * pop up.
+ */
+ return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+}
+
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+{
+ return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+}
+
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+ u8 active_pipes)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (IS_GEN(dev_priv, 12))
+ return tgl_compute_dbuf_slices(pipe, active_pipes);
+ else if (IS_GEN(dev_priv, 11))
+ return icl_compute_dbuf_slices(pipe, active_pipes);
+ /*
+ * For anything else just return one slice yet.
+ * Should be extended for other platforms.
+ */
+ return BIT(DBUF_S1);
+}
+
static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
@@ -4195,14 +4476,10 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4230,9 +4507,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!crtc_state->uapi.state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4271,13 +4545,10 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
}
static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
- struct skl_ddb_allocation *ddb /* out */)
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
@@ -4294,9 +4565,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (WARN_ON(!state))
- return 0;
-
if (!crtc_state->hw.active) {
alloc->start = alloc->end = 0;
return 0;
@@ -4314,7 +4582,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
- ddb, alloc, &num_active);
+ alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -4335,13 +4603,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
*/
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
- WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
+ drm_WARN_ON(&dev_priv->drm,
+ wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
}
@@ -4371,7 +4640,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* watermark level, plus an extra share of the leftover blocks
* proportional to its relative data rate.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
u64 rate;
@@ -4406,11 +4675,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
alloc_size -= extra;
total_data_rate -= rate;
}
- WARN_ON(alloc_size != 0 || total_data_rate != 0);
+ drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
@@ -4420,7 +4689,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
continue;
/* Gen11+ uses a separate plane for UV watermarks */
- WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
/* Leave disabled planes at (0,0) */
if (total[plane_id]) {
@@ -4443,7 +4713,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* that aren't actually possible.
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4480,7 +4750,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* Go back and disable the transition watermark if it turns out we
* don't have enough DDB blocks for it.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4844,45 +5114,36 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
}
}
-static u32
-skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- uint_fixed_16_16_t linetime_us;
- u32 linetime_wm;
-
- linetime_us = intel_get_linetime_us(crtc_state);
- linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
-
- /* Display WA #1135: BXT:ALL GLK:ALL */
- if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
- linetime_wm /= 2;
-
- return linetime_wm;
-}
-
static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- u16 trans_min, trans_y_tile_min;
- const u16 trans_amount = 10; /* This is configurable amount */
+ u16 trans_min, trans_amount, trans_y_tile_min;
u16 wm0_sel_res_b, trans_offset_b, res_blocks;
- /* Transition WM are not recommended by HW team for GEN9 */
- if (INTEL_GEN(dev_priv) <= 9)
- return;
-
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
return;
- trans_min = 14;
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
+ return;
+
if (INTEL_GEN(dev_priv) >= 11)
trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
trans_offset_b = trans_min + trans_amount;
@@ -4909,7 +5170,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
res_blocks += 1;
-
}
/*
@@ -5049,8 +5309,6 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
return ret;
}
- pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
-
return 0;
}
@@ -5059,9 +5317,10 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
+ intel_de_write_fw(dev_priv, reg,
+ (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE_FW(reg, 0);
+ intel_de_write_fw(dev_priv, reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5077,7 +5336,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_b;
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
- I915_WRITE_FW(reg, val);
+ intel_de_write_fw(dev_priv, reg, val);
}
void skl_write_plane_wm(struct intel_plane *plane,
@@ -5153,31 +5412,18 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
- if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
- !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
return false;
}
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
-static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
- const struct skl_pipe_wm *wm1,
- const struct skl_pipe_wm *wm2)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (!skl_plane_wm_equals(dev_priv,
- &wm1->planes[plane_id],
- &wm2->planes[plane_id]))
- return false;
- }
-
- return wm1->linetime == wm2->linetime;
-}
-
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
const struct skl_ddb_entry *b)
{
@@ -5231,18 +5477,17 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
- const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+ state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
+ ret = skl_allocate_pipe_ddb(new_crtc_state);
if (ret)
return ret;
@@ -5439,8 +5684,6 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
-
ret = intel_add_all_pipes(state);
if (ret)
return ret;
@@ -5515,12 +5758,8 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
- struct skl_ddb_values *results = &state->wm_results;
int ret, i;
- /* Clear all dirty flags */
- results->dirty_pipes = 0;
-
ret = skl_ddb_add_affected_pipes(state);
if (ret)
return ret;
@@ -5528,68 +5767,36 @@ skl_compute_wm(struct intel_atomic_state *state)
/*
* Calculate WM's for all pipes that are part of this transaction.
* Note that skl_ddb_add_affected_pipes may have added more CRTC's that
- * weren't otherwise being modified (and set bits in dirty_pipes) if
- * pipe allocations had to change.
+ * weren't otherwise being modified if pipe allocations had to change.
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
-
- ret = skl_wm_add_affected_planes(state, crtc);
- if (ret)
- return ret;
-
- if (!skl_pipe_wm_equals(crtc,
- &old_crtc_state->wm.skl.optimal,
- &new_crtc_state->wm.skl.optimal))
- results->dirty_pipes |= BIT(crtc->pipe);
}
ret = skl_compute_ddb(state);
if (ret)
return ret;
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
skl_print_wm_changes(state);
return 0;
}
-static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- enum pipe pipe = crtc->pipe;
-
- if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
- return;
-
- I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-}
-
-static void skl_initial_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_ddb_values *results = &state->wm_results;
-
- if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
- return;
-
- mutex_lock(&dev_priv->wm.wm_mutex);
-
- if (crtc_state->uapi.active_changed)
- skl_atomic_update_crtc_wm(state, crtc);
-
- mutex_unlock(&dev_priv->wm.wm_mutex);
-}
-
static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
@@ -5712,25 +5919,18 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
if (!crtc->active)
return;
-
- out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
- struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- skl_ddb_get_hw_state(dev_priv, ddb);
+ skl_ddb_get_hw_state(dev_priv);
for_each_intel_crtc(&dev_priv->drm, crtc) {
crtc_state = to_intel_crtc_state(crtc->base.state);
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
-
- if (crtc->active)
- hw->dirty_pipes |= BIT(crtc->pipe);
}
if (dev_priv->active_pipes) {
@@ -5754,8 +5954,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
memset(active, 0, sizeof(*active));
@@ -5774,7 +5972,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
- active->linetime = hw->wm_linetime[pipe];
} else {
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -6629,20 +6826,9 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
- /* WaEnable32PlaneMode:icl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
-
- /*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
- 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /* Wa_1407352427:icl,ehl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, PSDUNIT_CLKGATE_DIS);
+ /*Wa_14010594013:icl, ehl */
+ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ 0, CNL_DELAY_PMRSP);
}
static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6650,10 +6836,6 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
- /* Wa_1408615072:tgl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, VSUNIT_CLKGATE_DIS_TGL);
-
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
if (HAS_ENGINE(dev_priv, _VCS(i)))
@@ -6663,6 +6845,11 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(POWERGATE_ENABLE,
I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
+
+ /* Wa_1409825376:tgl (pre-prod)*/
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ TGL_VRH_GATING_DIS);
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7248,8 +7435,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
- dev_priv->display.initial_watermarks = skl_initial_wm;
- dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index c06c6a846d9a..d60a85421c5a 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -17,7 +17,6 @@ struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
-struct skl_ddb_allocation;
struct skl_ddb_entry;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -33,11 +32,11 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 0648eda309e4..3f13baaef058 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -242,8 +242,9 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
* FIXME: There might be some registers where all 1's is a valid value,
* so ideally we should check the register offset instead...
*/
- WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
- pipe_name(pipe), reg, val);
+ drm_WARN(&i915->drm, val == 0xffffffff,
+ "DPIO read pipe %c reg 0x%x == 0x%x\n",
+ pipe_name(pipe), reg, val);
return val;
}
@@ -366,6 +367,10 @@ static inline int gen7_check_mailbox_status(u32 mbox)
return -ETIMEDOUT;
case GEN7_PCODE_ILLEGAL_DATA:
return -EINVAL;
+ case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
+ return -ENXIO;
+ case GEN11_PCODE_LOCKED:
+ return -EBUSY;
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
return -EOVERFLOW;
default:
@@ -526,7 +531,7 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
*/
drm_dbg_kms(&i915->drm,
"PCODE timeout, retrying with preemption disabled\n");
- WARN_ON_ONCE(timeout_base_ms > 3);
+ drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
preempt_enable();
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 5f2cf6f43b8b..abb18b90d7c3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -324,8 +324,9 @@ static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
- "GT thread status wait timed out\n");
+ drm_WARN_ONCE(&uncore->i915->drm,
+ wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
+ "GT thread status wait timed out\n");
}
static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
@@ -441,7 +442,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
cond_resched();
}
- WARN_ON(active_domains);
+ drm_WARN_ON(&uncore->i915->drm, active_domains);
fw = uncore->fw_domains_active;
if (fw)
@@ -757,9 +758,9 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore)
if (!uncore->funcs.force_wake_get)
return;
- WARN(uncore->fw_domains_active,
- "Expected all fw_domains to be inactive, but %08x are still on\n",
- uncore->fw_domains_active);
+ drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
+ "Expected all fw_domains to be inactive, but %08x are still on\n",
+ uncore->fw_domains_active);
}
void assert_forcewakes_active(struct intel_uncore *uncore,
@@ -779,9 +780,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
assert_rpm_wakelock_held(uncore->rpm);
fw_domains &= uncore->fw_domains;
- WARN(fw_domains & ~uncore->fw_domains_active,
- "Expected %08x fw_domains to be active, but %08x are off\n",
- fw_domains, fw_domains & ~uncore->fw_domains_active);
+ drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
+ "Expected %08x fw_domains to be active, but %08x are off\n",
+ fw_domains, fw_domains & ~uncore->fw_domains_active);
/*
* Check that the caller has an explicit wakeref and we don't mistake
@@ -794,9 +795,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
if (uncore->fw_domains_timer & domain->mask)
expect++; /* pending automatic release */
- if (WARN(actual < expect,
- "Expected domain %d to be held awake by caller, count=%d\n",
- domain->id, actual))
+ if (drm_WARN(&uncore->i915->drm, actual < expect,
+ "Expected domain %d to be held awake by caller, count=%d\n",
+ domain->id, actual))
break;
}
@@ -866,9 +867,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
if (entry->domains == FORCEWAKE_ALL)
return uncore->fw_domains;
- WARN(entry->domains & ~uncore->fw_domains,
- "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
- entry->domains & ~uncore->fw_domains, offset);
+ drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
+ "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
+ entry->domains & ~uncore->fw_domains, offset);
return entry->domains;
}
@@ -1158,10 +1159,11 @@ __unclaimed_reg_debug(struct intel_uncore *uncore,
const bool read,
const bool before)
{
- if (WARN(check_for_unclaimed_mmio(uncore) && !before,
- "Unclaimed %s register 0x%x\n",
- read ? "read from" : "write to",
- i915_mmio_reg_offset(reg)))
+ if (drm_WARN(&uncore->i915->drm,
+ check_for_unclaimed_mmio(uncore) && !before,
+ "Unclaimed %s register 0x%x\n",
+ read ? "read from" : "write to",
+ i915_mmio_reg_offset(reg)))
/* Only report the first N failures */
i915_modparams.mmio_debug--;
}
@@ -1436,8 +1438,8 @@ static int __fw_domain_init(struct intel_uncore *uncore,
if (!d)
return -ENOMEM;
- WARN_ON(!i915_mmio_reg_valid(reg_set));
- WARN_ON(!i915_mmio_reg_valid(reg_ack));
+ drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
+ drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
d->uncore = uncore;
d->wake_count = 0;
@@ -1482,8 +1484,8 @@ static void fw_domain_fini(struct intel_uncore *uncore,
return;
uncore->fw_domains &= ~BIT(domain_id);
- WARN_ON(d->wake_count);
- WARN_ON(hrtimer_cancel(&d->timer));
+ drm_WARN_ON(&uncore->i915->drm, d->wake_count);
+ drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
kfree(d);
}
@@ -1613,7 +1615,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
#undef fw_domain_init
/* All future platforms are expected to require complex power gating */
- WARN_ON(!ret && uncore->fw_domains == 0);
+ drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
out:
if (ret)
@@ -2108,7 +2110,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
{
enum forcewake_domains fw_domains = 0;
- WARN_ON(!op);
+ drm_WARN_ON(&uncore->i915->drm, !op);
if (!intel_uncore_has_forcewake(uncore))
return 0;
@@ -2119,7 +2121,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
if (op & FW_REG_WRITE)
fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
- WARN_ON(fw_domains & ~uncore->fw_domains);
+ drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index ef572a0c2566..68bbb1580162 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -201,11 +201,57 @@ static int live_active_retire(void *arg)
return err;
}
+static int live_active_barrier(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_active *active;
+ int err = 0;
+
+ /* Check that we get a callback when requests retire upon waiting */
+
+ active = __live_alloc(i915);
+ if (!active)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&active->base);
+ if (err)
+ goto out;
+
+ for_each_uabi_engine(engine, i915) {
+ err = i915_active_acquire_preallocate_barrier(&active->base,
+ engine);
+ if (err)
+ break;
+
+ i915_active_acquire_barrier(&active->base);
+ }
+
+ i915_active_release(&active->base);
+
+ if (err == 0)
+ err = i915_active_wait(&active->base);
+
+ if (err == 0 && !READ_ONCE(active->retired)) {
+ pr_err("i915_active not retired after flushing barriers!\n");
+ err = -EINVAL;
+ }
+
+out:
+ __live_put(active);
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ return err;
+}
+
int i915_active_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_active_wait),
SUBTEST(live_active_retire),
+ SUBTEST(live_active_barrier),
};
if (intel_gt_is_wedged(&i915->gt))
@@ -265,28 +311,40 @@ static void spin_unlock_wait(spinlock_t *lock)
spin_unlock_irq(lock);
}
+static void active_flush(struct i915_active *ref,
+ struct i915_active_fence *active)
+{
+ struct dma_fence *fence;
+
+ fence = xchg(__active_fence_slot(active), NULL);
+ if (!fence)
+ return;
+
+ spin_lock_irq(fence->lock);
+ __list_del_entry(&active->cb.node);
+ spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+ atomic_dec(&ref->count);
+
+ GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
void i915_active_unlock_wait(struct i915_active *ref)
{
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
+ /* Wait for all active callbacks */
rcu_read_lock();
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- struct dma_fence *f;
-
- /* Wait for all active callbacks */
- f = rcu_dereference(it->base.fence);
- if (f)
- spin_unlock_wait(f->lock);
- }
+ active_flush(ref, &ref->excl);
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+ active_flush(ref, &it->base);
rcu_read_unlock();
i915_active_release(ref);
}
/* And wait for the retire callback */
- spin_lock_irq(&ref->tree_lock);
- spin_unlock_irq(&ref->tree_lock);
+ spin_unlock_wait(&ref->tree_lock);
/* ... which may have been on a thread instead */
flush_work(&ref->work);
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 1b856bae67b5..939a6caebb03 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -298,10 +298,12 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
static int igt_buddy_alloc_smoke(void *arg)
{
struct i915_buddy_mm mm;
- int max_order;
+ IGT_TIMEOUT(end_time);
+ I915_RND_STATE(prng);
u64 chunk_size;
u64 mm_size;
- int err;
+ int *order;
+ int err, i;
igt_mm_config(&mm_size, &chunk_size);
@@ -313,10 +315,16 @@ static int igt_buddy_alloc_smoke(void *arg)
return err;
}
- for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ order = i915_random_order(mm.max_order + 1, &prng);
+ if (!order)
+ goto out_fini;
+
+ for (i = 0; i <= mm.max_order; ++i) {
struct i915_buddy_block *block;
- int order;
+ int max_order = order[i];
+ bool timeout = false;
LIST_HEAD(blocks);
+ int order;
u64 total;
err = igt_check_mm(&mm);
@@ -360,6 +368,11 @@ retry:
}
total += i915_buddy_block_size(&mm, block);
+
+ if (__igt_timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
} while (total < mm.size);
if (!err)
@@ -373,7 +386,7 @@ retry:
pr_err("post-mm check failed\n");
}
- if (err)
+ if (err || timeout)
break;
cond_resched();
@@ -382,6 +395,8 @@ retry:
if (err == -ENOMEM)
err = 0;
+ kfree(order);
+out_fini:
i915_buddy_fini(&mm);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 78f36faf2bbe..623759b73bb4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -98,7 +98,7 @@ static void pm_suspend(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_suspend_gtt_mappings(i915);
+ i915_ggtt_suspend(&i915->ggtt);
i915_gem_suspend_late(i915);
}
}
@@ -108,7 +108,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_suspend_gtt_mappings(i915);
+ i915_ggtt_suspend(&i915->ggtt);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
@@ -124,7 +124,7 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_restore_gtt_mappings(i915);
+ i915_ggtt_resume(&i915->ggtt);
i915_gem_restore_fences(&i915->ggtt);
i915_gem_resume(i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 34138c7bdd15..0a953bfc0585 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -43,6 +43,7 @@ selftest(reset, intel_reset_live_selftests)
selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
+selftest(ring_submission, intel_ring_submission_live_selftests)
selftest(perf, i915_perf_live_selftests)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index 5a577a1332f5..3bf7f53e9924 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -17,3 +17,4 @@
*/
selftest(engine_cs, intel_engine_cs_perf_selftests)
selftest(blt, i915_gem_object_blt_perf_selftests)
+selftest(region, intel_memory_region_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index e8a58fe49c39..9ad4ab088466 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -183,7 +183,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 3ef3620e0da5..2a1d4ba1f9f3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -4,6 +4,7 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/sort.h>
#include "../i915_selftest.h"
@@ -19,6 +20,7 @@
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "i915_memcpy.h"
#include "selftests/igt_flush_test.h"
#include "selftests/i915_random.h"
@@ -572,6 +574,195 @@ out_put:
return err;
}
+static const char *repr_type(u32 type)
+{
+ switch (type) {
+ case I915_MAP_WB:
+ return "WB";
+ case I915_MAP_WC:
+ return "WC";
+ }
+
+ return "";
+}
+
+static struct drm_i915_gem_object *
+create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
+ void **out_addr)
+{
+ struct drm_i915_gem_object *obj;
+ void *addr;
+
+ obj = i915_gem_object_create_region(mr, size, 0);
+ if (IS_ERR(obj))
+ return obj;
+
+ addr = i915_gem_object_pin_map(obj, type);
+ if (IS_ERR(addr)) {
+ i915_gem_object_put(obj);
+ if (PTR_ERR(addr) == -ENXIO)
+ return ERR_PTR(-ENODEV);
+ return addr;
+ }
+
+ *out_addr = addr;
+ return obj;
+}
+
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static void igt_memcpy_long(void *dst, const void *src, size_t size)
+{
+ unsigned long *tmp = dst;
+ const unsigned long *s = src;
+
+ size = size / sizeof(unsigned long);
+ while (size--)
+ *tmp++ = *s++;
+}
+
+static inline void igt_memcpy(void *dst, const void *src, size_t size)
+{
+ memcpy(dst, src, size);
+}
+
+static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
+{
+ i915_memcpy_from_wc(dst, src, size);
+}
+
+static int _perf_memcpy(struct intel_memory_region *src_mr,
+ struct intel_memory_region *dst_mr,
+ u64 size, u32 src_type, u32 dst_type)
+{
+ struct drm_i915_private *i915 = src_mr->i915;
+ const struct {
+ const char *name;
+ void (*copy)(void *dst, const void *src, size_t size);
+ bool skip;
+ } tests[] = {
+ {
+ "memcpy",
+ igt_memcpy,
+ },
+ {
+ "memcpy_long",
+ igt_memcpy_long,
+ },
+ {
+ "memcpy_from_wc",
+ igt_memcpy_from_wc,
+ !i915_has_memcpy_from_wc(),
+ },
+ };
+ struct drm_i915_gem_object *src, *dst;
+ void *src_addr, *dst_addr;
+ int ret = 0;
+ int i;
+
+ src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto out;
+ }
+
+ dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto out_unpin_src;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+ ktime_t t[5];
+ int pass;
+
+ if (tests[i].skip)
+ continue;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ tests[i].copy(dst_addr, src_addr, size);
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
+ __func__,
+ src_mr->name,
+ repr_type(src_type),
+ dst_mr->name,
+ repr_type(dst_type),
+ tests[i].name,
+ size >> 10,
+ div64_u64(mul_u32_u32(4 * size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+
+ cond_resched();
+ }
+
+ i915_gem_object_unpin_map(dst);
+ i915_gem_object_put(dst);
+out_unpin_src:
+ i915_gem_object_unpin_map(src);
+ i915_gem_object_put(src);
+
+ i915_gem_drain_freed_objects(i915);
+out:
+ if (ret == -ENODEV)
+ ret = 0;
+
+ return ret;
+}
+
+static int perf_memcpy(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const u32 types[] = {
+ I915_MAP_WB,
+ I915_MAP_WC,
+ };
+ static const u32 sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_4M,
+ };
+ struct intel_memory_region *src_mr, *dst_mr;
+ int src_id, dst_id;
+ int i, j, k;
+ int ret;
+
+ for_each_memory_region(src_mr, i915, src_id) {
+ for_each_memory_region(dst_mr, i915, dst_id) {
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ for (j = 0; j < ARRAY_SIZE(types); ++j) {
+ for (k = 0; k < ARRAY_SIZE(types); ++k) {
+ ret = _perf_memcpy(src_mr,
+ dst_mr,
+ sizes[i],
+ types[j],
+ types[k]);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
int intel_memory_region_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -619,3 +810,15 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
return i915_live_subtests(tests, i915);
}
+
+int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_memcpy),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 3b8986983afc..754d0eb6beaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -144,7 +144,6 @@ struct drm_i915_private *mock_gem_device(void)
goto put_device;
}
i915->drm.pdev = pdev;
- i915->drm.dev_private = i915;
intel_runtime_pm_init_early(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
new file mode 100644
index 000000000000..23adb64d640a
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_trace.h"
+#include "i915_utils.h"
+#include "intel_pm.h"
+#include "vlv_suspend.h"
+
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 pcbr;
+ u32 clock_gate_dis2;
+};
+
+/*
+ * Save all Gunit registers that may be lost after a D3 and a subsequent
+ * S0i[R123] transition. The list of registers needing a save/restore is
+ * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
+ * registers in the following way:
+ * - Driver: saved/restored by the driver
+ * - Punit : saved/restored by the Punit firmware
+ * - No, w/o marking: no need to save/restore, since the register is R/O or
+ * used internally by the HW in a way that doesn't depend
+ * keeping the content across a suspend/resume.
+ * - Debug : used for debugging
+ *
+ * We save/restore all registers marked with 'Driver', with the following
+ * exceptions:
+ * - Registers out of use, including also registers marked with 'Debug'.
+ * These have no effect on the driver's operation, so we don't save/restore
+ * them to reduce the overhead.
+ * - Registers that are fully setup by an initialization function called from
+ * the resume path. For example many clock gating and RPS/RC6 registers.
+ * - Registers that provide the right functionality with their reset defaults.
+ *
+ * TODO: Except for registers that based on the above 3 criteria can be safely
+ * ignored, we save/restore all others, practically treating the HW context as
+ * a black-box for the driver. Further investigation is needed to reduce the
+ * saved/restored registers even further, by following the same 3 criteria.
+ */
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915)
+{
+ struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
+ struct intel_uncore *uncore = &i915->uncore;
+ int i;
+
+ if (!s)
+ return;
+
+ /* GAM 0x4000-0x4770 */
+ s->wr_watermark = intel_uncore_read(uncore, GEN7_WR_WATERMARK);
+ s->gfx_prio_ctrl = intel_uncore_read(uncore, GEN7_GFX_PRIO_CTRL);
+ s->arb_mode = intel_uncore_read(uncore, ARB_MODE);
+ s->gfx_pend_tlb0 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB0);
+ s->gfx_pend_tlb1 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ s->lra_limits[i] = intel_uncore_read(uncore, GEN7_LRA_LIMITS(i));
+
+ s->media_max_req_count = intel_uncore_read(uncore, GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = intel_uncore_read(uncore, GEN7_GFX_MAX_REQ_COUNT);
+
+ s->render_hwsp = intel_uncore_read(uncore, RENDER_HWS_PGA_GEN7);
+ s->ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ s->bsd_hwsp = intel_uncore_read(uncore, BSD_HWS_PGA_GEN7);
+ s->blt_hwsp = intel_uncore_read(uncore, BLT_HWS_PGA_GEN7);
+
+ s->tlb_rd_addr = intel_uncore_read(uncore, GEN7_TLB_RD_ADDR);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ s->g3dctl = intel_uncore_read(uncore, VLV_G3DCTL);
+ s->gsckgctl = intel_uncore_read(uncore, VLV_GSCKGCTL);
+ s->mbctl = intel_uncore_read(uncore, GEN6_MBCTL);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ s->ucgctl1 = intel_uncore_read(uncore, GEN6_UCGCTL1);
+ s->ucgctl3 = intel_uncore_read(uncore, GEN6_UCGCTL3);
+ s->rcgctl1 = intel_uncore_read(uncore, GEN6_RCGCTL1);
+ s->rcgctl2 = intel_uncore_read(uncore, GEN6_RCGCTL2);
+ s->rstctl = intel_uncore_read(uncore, GEN6_RSTCTL);
+ s->misccpctl = intel_uncore_read(uncore, GEN7_MISCCPCTL);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ s->gfxpause = intel_uncore_read(uncore, GEN6_GFXPAUSE);
+ s->rpdeuhwtc = intel_uncore_read(uncore, GEN6_RPDEUHWTC);
+ s->rpdeuc = intel_uncore_read(uncore, GEN6_RPDEUC);
+ s->ecobus = intel_uncore_read(uncore, ECOBUS);
+ s->pwrdwnupctl = intel_uncore_read(uncore, VLV_PWRDWNUPCTL);
+ s->rp_down_timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_TIMEOUT);
+ s->rp_deucsw = intel_uncore_read(uncore, GEN6_RPDEUCSW);
+ s->rcubmabdtmr = intel_uncore_read(uncore, GEN6_RCUBMABDTMR);
+ s->rcedata = intel_uncore_read(uncore, VLV_RCEDATA);
+ s->spare2gh = intel_uncore_read(uncore, VLV_SPAREG2H);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ s->gt_imr = intel_uncore_read(uncore, GTIMR);
+ s->gt_ier = intel_uncore_read(uncore, GTIER);
+ s->pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+ s->pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ s->gt_scratch[i] = intel_uncore_read(uncore, GEN7_GT_SCRATCH(i));
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ s->tilectl = intel_uncore_read(uncore, TILECTL);
+ s->gt_fifoctl = intel_uncore_read(uncore, GTFIFOCTL);
+ s->gtlc_wake_ctrl = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ s->gtlc_survive = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ s->pmwgicz = intel_uncore_read(uncore, VLV_PMWGICZ);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ s->gu_ctl0 = intel_uncore_read(uncore, VLV_GU_CTL0);
+ s->gu_ctl1 = intel_uncore_read(uncore, VLV_GU_CTL1);
+ s->pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ s->clock_gate_dis2 = intel_uncore_read(uncore, VLV_GUNIT_CLOCK_GATE2);
+
+ /*
+ * Not saving any of:
+ * DFT, 0x9800-0x9EC0
+ * SARB, 0xB000-0xB1FC
+ * GAC, 0x5208-0x524C, 0x14000-0x14C000
+ * PCI CFG
+ */
+}
+
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
+{
+ struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+ int i;
+
+ if (!s)
+ return;
+
+ /* GAM 0x4000-0x4770 */
+ intel_uncore_write(uncore, GEN7_WR_WATERMARK, s->wr_watermark);
+ intel_uncore_write(uncore, GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
+ intel_uncore_write(uncore, ARB_MODE, s->arb_mode | (0xffff << 16));
+ intel_uncore_write(uncore, GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
+ intel_uncore_write(uncore, GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ intel_uncore_write(uncore, GEN7_LRA_LIMITS(i), s->lra_limits[i]);
+
+ intel_uncore_write(uncore, GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+ intel_uncore_write(uncore, GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+ intel_uncore_write(uncore, RENDER_HWS_PGA_GEN7, s->render_hwsp);
+ intel_uncore_write(uncore, GAM_ECOCHK, s->ecochk);
+ intel_uncore_write(uncore, BSD_HWS_PGA_GEN7, s->bsd_hwsp);
+ intel_uncore_write(uncore, BLT_HWS_PGA_GEN7, s->blt_hwsp);
+
+ intel_uncore_write(uncore, GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ intel_uncore_write(uncore, VLV_G3DCTL, s->g3dctl);
+ intel_uncore_write(uncore, VLV_GSCKGCTL, s->gsckgctl);
+ intel_uncore_write(uncore, GEN6_MBCTL, s->mbctl);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ intel_uncore_write(uncore, GEN6_UCGCTL1, s->ucgctl1);
+ intel_uncore_write(uncore, GEN6_UCGCTL3, s->ucgctl3);
+ intel_uncore_write(uncore, GEN6_RCGCTL1, s->rcgctl1);
+ intel_uncore_write(uncore, GEN6_RCGCTL2, s->rcgctl2);
+ intel_uncore_write(uncore, GEN6_RSTCTL, s->rstctl);
+ intel_uncore_write(uncore, GEN7_MISCCPCTL, s->misccpctl);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ intel_uncore_write(uncore, GEN6_GFXPAUSE, s->gfxpause);
+ intel_uncore_write(uncore, GEN6_RPDEUHWTC, s->rpdeuhwtc);
+ intel_uncore_write(uncore, GEN6_RPDEUC, s->rpdeuc);
+ intel_uncore_write(uncore, ECOBUS, s->ecobus);
+ intel_uncore_write(uncore, VLV_PWRDWNUPCTL, s->pwrdwnupctl);
+ intel_uncore_write(uncore, GEN6_RP_DOWN_TIMEOUT, s->rp_down_timeout);
+ intel_uncore_write(uncore, GEN6_RPDEUCSW, s->rp_deucsw);
+ intel_uncore_write(uncore, GEN6_RCUBMABDTMR, s->rcubmabdtmr);
+ intel_uncore_write(uncore, VLV_RCEDATA, s->rcedata);
+ intel_uncore_write(uncore, VLV_SPAREG2H, s->spare2gh);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ intel_uncore_write(uncore, GTIMR, s->gt_imr);
+ intel_uncore_write(uncore, GTIER, s->gt_ier);
+ intel_uncore_write(uncore, GEN6_PMIMR, s->pm_imr);
+ intel_uncore_write(uncore, GEN6_PMIER, s->pm_ier);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ intel_uncore_write(uncore, GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ intel_uncore_write(uncore, TILECTL, s->tilectl);
+ intel_uncore_write(uncore, GTFIFOCTL, s->gt_fifoctl);
+ /*
+ * Preserve the GT allow wake and GFX force clock bit, they are not
+ * be restored, as they are used to control the s0ix suspend/resume
+ * sequence by the caller.
+ */
+ val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ val &= VLV_GTLC_ALLOWWAKEREQ;
+ val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
+ intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+
+ val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ val &= VLV_GFX_CLK_FORCE_ON_BIT;
+ val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
+ intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+
+ intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ intel_uncore_write(uncore, VLV_GU_CTL0, s->gu_ctl0);
+ intel_uncore_write(uncore, VLV_GU_CTL1, s->gu_ctl1);
+ intel_uncore_write(uncore, VLV_PCBR, s->pcbr);
+ intel_uncore_write(uncore, VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
+}
+
+static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
+ u32 mask, u32 val)
+{
+ i915_reg_t reg = VLV_GTLC_PW_STATUS;
+ u32 reg_value;
+ int ret;
+
+ /* The HW does not like us polling for PW_STATUS frequently, so
+ * use the sleeping loop rather than risk the busy spin within
+ * intel_wait_for_register().
+ *
+ * Transitioning between RC6 states should be at most 2ms (see
+ * valleyview_enable_rps) so use a 3ms timeout.
+ */
+ ret = wait_for(((reg_value =
+ intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
+ == val, 3);
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
+
+ return ret;
+}
+
+static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+ int err;
+
+ val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
+ if (force_on)
+ val |= VLV_GFX_CLK_FORCE_ON_BIT;
+ intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+
+ if (!force_on)
+ return 0;
+
+ err = intel_wait_for_register(uncore,
+ VLV_GTLC_SURVIVABILITY_REG,
+ VLV_GFX_CLK_STATUS_BIT,
+ VLV_GFX_CLK_STATUS_BIT,
+ 20);
+ if (err)
+ drm_err(&i915->drm,
+ "timeout waiting for GFX clock force-on (%08x)\n",
+ intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG));
+
+ return err;
+}
+
+static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 mask;
+ u32 val;
+ int err;
+
+ val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ val &= ~VLV_GTLC_ALLOWWAKEREQ;
+ if (allow)
+ val |= VLV_GTLC_ALLOWWAKEREQ;
+ intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+ intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL);
+
+ mask = VLV_GTLC_ALLOWWAKEACK;
+ val = allow ? mask : 0;
+
+ err = vlv_wait_for_pw_status(i915, mask, val);
+ if (err)
+ drm_err(&i915->drm, "timeout disabling GT waking\n");
+
+ return err;
+}
+
+static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+ bool wait_for_on)
+{
+ u32 mask;
+ u32 val;
+
+ mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
+ val = wait_for_on ? mask : 0;
+
+ /*
+ * RC6 transitioning can be delayed up to 2 msec (see
+ * valleyview_enable_rps), use 3 msec for safety.
+ *
+ * This can fail to turn off the rc6 if the GPU is stuck after a failed
+ * reset and we are trying to force the machine to sleep.
+ */
+ if (vlv_wait_for_pw_status(dev_priv, mask, val))
+ drm_dbg(&dev_priv->drm,
+ "timeout waiting for GT wells to go %s\n",
+ onoff(wait_for_on));
+}
+
+static void vlv_check_no_gt_access(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+
+ if (!(intel_uncore_read(uncore, VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
+ return;
+
+ drm_dbg(&i915->drm, "GT register access while GT waking disabled\n");
+ intel_uncore_write(uncore, VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
+}
+
+int vlv_suspend_complete(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+ int err;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ return 0;
+
+ /*
+ * Bspec defines the following GT well on flags as debug only, so
+ * don't treat them as hard failures.
+ */
+ vlv_wait_for_gt_wells(dev_priv, false);
+
+ mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_uncore_read(&dev_priv->uncore, VLV_GTLC_WAKE_CTRL) & mask) != mask);
+
+ vlv_check_no_gt_access(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, true);
+ if (err)
+ goto err1;
+
+ err = vlv_allow_gt_wake(dev_priv, false);
+ if (err)
+ goto err2;
+
+ vlv_save_gunit_s0ix_state(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ /* For safety always re-enable waking and disable gfx clock forcing */
+ vlv_allow_gt_wake(dev_priv, true);
+err1:
+ vlv_force_gfx_clock(dev_priv, false);
+
+ return err;
+}
+
+int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume)
+{
+ int err;
+ int ret;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ return 0;
+
+ /*
+ * If any of the steps fail just try to continue, that's the best we
+ * can do at this point. Return the first error code (which will also
+ * leave RPM permanently disabled).
+ */
+ ret = vlv_force_gfx_clock(dev_priv, true);
+
+ vlv_restore_gunit_s0ix_state(dev_priv);
+
+ err = vlv_allow_gt_wake(dev_priv, true);
+ if (!ret)
+ ret = err;
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (!ret)
+ ret = err;
+
+ vlv_check_no_gt_access(dev_priv);
+
+ if (rpm_resume)
+ intel_init_clock_gating(dev_priv);
+
+ return ret;
+}
+
+int vlv_suspend_init(struct drm_i915_private *i915)
+{
+ if (!IS_VALLEYVIEW(i915))
+ return 0;
+
+ /* we write all the values in the struct, so no need to zero it out */
+ i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
+ GFP_KERNEL);
+ if (!i915->vlv_s0ix_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void vlv_suspend_cleanup(struct drm_i915_private *i915)
+{
+ if (!i915->vlv_s0ix_state)
+ return;
+
+ kfree(i915->vlv_s0ix_state);
+ i915->vlv_s0ix_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/vlv_suspend.h b/drivers/gpu/drm/i915/vlv_suspend.h
new file mode 100644
index 000000000000..895091cb1f62
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_suspend.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __VLV_SUSPEND_H__
+#define __VLV_SUSPEND_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int vlv_suspend_init(struct drm_i915_private *i915);
+void vlv_suspend_cleanup(struct drm_i915_private *i915);
+int vlv_suspend_complete(struct drm_i915_private *i915);
+int vlv_resume_prepare(struct drm_i915_private *i915, bool rpm_resume);
+
+#endif /* __VLV_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8cb2665b2c74..4da22a94790c 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -446,7 +446,7 @@ static int imx_ldb_register(struct drm_device *drm,
if (imx_ldb_ch->bridge) {
ret = drm_bridge_attach(&imx_ldb_ch->encoder,
- imx_ldb_ch->bridge, NULL);
+ imx_ldb_ch->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 28826c0aa24a..6776ebb3246d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -359,7 +359,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!state->crtc)
+ if (WARN_ON(!state->crtc))
return -EINVAL;
crtc_state =
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 3dca424059f7..08fafa4bf8c2 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -24,6 +24,7 @@
struct imx_parallel_display {
struct drm_connector connector;
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct device *dev;
void *edid;
int edid_len;
@@ -31,7 +32,7 @@ struct imx_parallel_display {
u32 bus_flags;
struct drm_display_mode mode;
struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
};
static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
@@ -44,6 +45,11 @@ static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
return container_of(e, struct imx_parallel_display, encoder);
}
+static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
+{
+ return container_of(b, struct imx_parallel_display, bridge);
+}
+
static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
@@ -89,37 +95,148 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
return &imxpd->encoder;
}
-static void imx_pd_encoder_enable(struct drm_encoder *encoder)
+static void imx_pd_bridge_enable(struct drm_bridge *bridge)
{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
drm_panel_prepare(imxpd->panel);
drm_panel_enable(imxpd->panel);
}
-static void imx_pd_encoder_disable(struct drm_encoder *encoder)
+static void imx_pd_bridge_disable(struct drm_bridge *bridge)
{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
drm_panel_disable(imxpd->panel);
drm_panel_unprepare(imxpd->panel);
}
-static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static const u32 imx_pd_bus_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_GBR888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
+ MEDIA_BUS_FMT_RGB565_1X16,
+};
+
+static u32 *
+imx_pd_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
{
- struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *output_fmts;
- if (!imxpd->bus_format && di->num_bus_formats) {
- imx_crtc_state->bus_flags = di->bus_flags;
- imx_crtc_state->bus_format = di->bus_formats[0];
- } else {
- imx_crtc_state->bus_flags = imxpd->bus_flags;
- imx_crtc_state->bus_format = imxpd->bus_format;
+ if (!imxpd->bus_format && !di->num_bus_formats) {
+ *num_output_fmts = ARRAY_SIZE(imx_pd_bus_fmts);
+ return kmemdup(imx_pd_bus_fmts, sizeof(imx_pd_bus_fmts),
+ GFP_KERNEL);
+ }
+
+ *num_output_fmts = 1;
+ output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ if (!imxpd->bus_format && di->num_bus_formats)
+ output_fmts[0] = di->bus_formats[0];
+ else
+ output_fmts[0] = imxpd->bus_format;
+
+ return output_fmts;
+}
+
+static bool imx_pd_format_supported(u32 output_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx_pd_bus_fmts); i++) {
+ if (imx_pd_bus_fmts[i] == output_fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx_pd_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *input_fmts;
+
+ /*
+ * If the next bridge does not support bus format negotiation, let's
+ * use the static bus format definition (imxpd->bus_format) if it's
+ * specified, RGB888 when it's not.
+ */
+ if (output_fmt == MEDIA_BUS_FMT_FIXED)
+ output_fmt = imxpd->bus_format ? : MEDIA_BUS_FMT_RGB888_1X24;
+
+ /* Now make sure the requested output format is supported. */
+ if ((imxpd->bus_format && imxpd->bus_format != output_fmt) ||
+ !imx_pd_format_supported(output_fmt)) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+
+static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ struct drm_bridge_state *next_bridge_state = NULL;
+ struct drm_bridge *next_bridge;
+ u32 bus_flags, bus_fmt;
+
+ next_bridge = drm_bridge_get_next_bridge(bridge);
+ if (next_bridge)
+ next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ next_bridge);
+
+ if (next_bridge_state)
+ bus_flags = next_bridge_state->input_bus_cfg.flags;
+ else if (!imxpd->bus_format && di->num_bus_formats)
+ bus_flags = di->bus_flags;
+ else
+ bus_flags = imxpd->bus_flags;
+
+ bus_fmt = bridge_state->input_bus_cfg.format;
+ if (!imx_pd_format_supported(bus_fmt))
+ return -EINVAL;
+
+ if (bus_flags &
+ ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) {
+ dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags);
+ return -EINVAL;
}
+
+ bridge_state->output_bus_cfg.flags = bus_flags;
+ bridge_state->input_bus_cfg.flags = bus_flags;
+ imx_crtc_state->bus_flags = bus_flags;
+ imx_crtc_state->bus_format = bridge_state->input_bus_cfg.format;
imx_crtc_state->di_hsync_pin = 2;
imx_crtc_state->di_vsync_pin = 3;
@@ -143,10 +260,15 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
.destroy = imx_drm_encoder_destroy,
};
-static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
- .enable = imx_pd_encoder_enable,
- .disable = imx_pd_encoder_disable,
- .atomic_check = imx_pd_encoder_atomic_check,
+static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
+ .enable = imx_pd_bridge_enable,
+ .disable = imx_pd_bridge_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_check = imx_pd_bridge_atomic_check,
+ .atomic_get_input_bus_fmts = imx_pd_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
};
static int imx_pd_register(struct drm_device *drm,
@@ -166,11 +288,13 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
- if (!imxpd->bridge) {
+ imxpd->bridge.funcs = &imx_pd_bridge_funcs;
+ drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+
+ if (!imxpd->next_bridge) {
drm_connector_helper_add(&imxpd->connector,
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
@@ -181,8 +305,9 @@ static int imx_pd_register(struct drm_device *drm,
if (imxpd->panel)
drm_panel_attach(imxpd->panel, &imxpd->connector);
- if (imxpd->bridge) {
- ret = drm_bridge_attach(encoder, imxpd->bridge, NULL);
+ if (imxpd->next_bridge) {
+ ret = drm_bridge_attach(encoder, imxpd->next_bridge,
+ &imxpd->bridge, 0);
if (ret < 0) {
dev_err(imxpd->dev, "failed to attach bridge: %d\n",
ret);
@@ -227,7 +352,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->bus_format = bus_format;
/* port@1 is the output port */
- ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
+ &imxpd->next_bridge);
if (ret && ret != -ENODEV)
return ret;
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 6d47ef7b148c..9dfe7cb530e1 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -737,7 +737,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return ret;
}
- ret = drm_bridge_attach(&priv->encoder, bridge, NULL);
+ ret = drm_bridge_attach(&priv->encoder, bridge, NULL, 0);
if (ret) {
dev_err(dev, "Unable to attach bridge");
return ret;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 124efe4fa97b..2daac64d8955 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -15,10 +15,14 @@
#include "lima_vm.h"
int lima_sched_timeout_ms;
+uint lima_heap_init_nr_pages = 8;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
+MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
+module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
+
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
@@ -68,7 +72,7 @@ static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_
if (args->pad)
return -EINVAL;
- if (args->flags)
+ if (args->flags & ~(LIMA_BO_FLAG_HEAP))
return -EINVAL;
if (args->size == 0)
@@ -241,6 +245,12 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
+/**
+ * Changelog:
+ *
+ * - 1.1.0 - add heap buffer support
+ */
+
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
@@ -250,9 +260,9 @@ static struct drm_driver lima_drm_driver = {
.fops = &lima_drm_driver_fops,
.name = "lima",
.desc = "lima DRM",
- .date = "20190217",
+ .date = "20191231",
.major = 1,
- .minor = 0,
+ .minor = 1,
.patchlevel = 0,
.gem_create_object = lima_gem_create_object,
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index 69c7344715c9..f492ecc6a5d9 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -9,6 +9,7 @@
#include "lima_ctx.h"
extern int lima_sched_timeout_ms;
+extern uint lima_heap_init_nr_pages;
struct lima_vm;
struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index d0059d8c97d8..5404e0d668db 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -4,6 +4,8 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-mapping.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -15,6 +17,83 @@
#include "lima_gem.h"
#include "lima_vm.h"
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+{
+ struct page **pages;
+ struct address_space *mapping = bo->base.base.filp->f_mapping;
+ struct device *dev = bo->base.base.dev->dev;
+ size_t old_size = bo->heap_size;
+ size_t new_size = bo->heap_size ? bo->heap_size * 2 :
+ (lima_heap_init_nr_pages << PAGE_SHIFT);
+ struct sg_table sgt;
+ int i, ret;
+
+ if (bo->heap_size >= bo->base.base.size)
+ return -ENOSPC;
+
+ new_size = min(new_size, bo->base.base.size);
+
+ mutex_lock(&bo->base.pages_lock);
+
+ if (bo->base.pages) {
+ pages = bo->base.pages;
+ } else {
+ pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+ sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
+ if (!pages) {
+ mutex_unlock(&bo->base.pages_lock);
+ return -ENOMEM;
+ }
+
+ bo->base.pages = pages;
+ bo->base.pages_use_count = 1;
+
+ mapping_set_unevictable(mapping);
+ }
+
+ for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
+ struct page *page = shmem_read_mapping_page(mapping, i);
+
+ if (IS_ERR(page)) {
+ mutex_unlock(&bo->base.pages_lock);
+ return PTR_ERR(page);
+ }
+ pages[i] = page;
+ }
+
+ mutex_unlock(&bo->base.pages_lock);
+
+ ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
+ new_size, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (bo->base.sgt) {
+ dma_unmap_sg(dev, bo->base.sgt->sgl,
+ bo->base.sgt->nents, DMA_BIDIRECTIONAL);
+ sg_free_table(bo->base.sgt);
+ } else {
+ bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+ if (!bo->base.sgt) {
+ sg_free_table(&sgt);
+ return -ENOMEM;
+ }
+ }
+
+ dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
+
+ *bo->base.sgt = sgt;
+
+ if (vm) {
+ ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+ }
+
+ bo->heap_size = new_size;
+ return 0;
+}
+
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
@@ -22,7 +101,8 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
gfp_t mask;
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
- struct sg_table *sgt;
+ struct lima_bo *bo;
+ bool is_heap = flags & LIMA_BO_FLAG_HEAP;
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
@@ -36,10 +116,18 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
mask |= __GFP_DMA32;
mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- sgt = drm_gem_shmem_get_pages_sgt(obj);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto out;
+ if (is_heap) {
+ bo = to_lima_bo(obj);
+ err = lima_heap_alloc(bo, NULL);
+ if (err)
+ goto out;
+ } else {
+ struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
+
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
}
err = drm_gem_handle_create(file, obj, handle);
@@ -79,17 +167,47 @@ static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *f
lima_vm_bo_del(vm, bo);
}
+static int lima_gem_pin(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return -EINVAL;
+
+ return drm_gem_shmem_pin(obj);
+}
+
+static void *lima_gem_vmap(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_shmem_vmap(obj);
+}
+
+static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return -EINVAL;
+
+ return drm_gem_shmem_mmap(obj, vma);
+}
+
static const struct drm_gem_object_funcs lima_gem_funcs = {
.free = lima_gem_free_object,
.open = lima_gem_object_open,
.close = lima_gem_object_close,
.print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
+ .pin = lima_gem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
+ .vmap = lima_gem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .mmap = lima_gem_mmap,
};
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 1800feb3e47f..ccea06142f4b 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -7,12 +7,15 @@
#include <drm/drm_gem_shmem_helper.h>
struct lima_submit;
+struct lima_vm;
struct lima_bo {
struct drm_gem_shmem_object base;
struct mutex lock;
struct list_head va;
+
+ size_t heap_size;
};
static inline struct lima_bo *
@@ -31,6 +34,7 @@ static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
return bo->base.base.resv;
}
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm);
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index ccf49faedebf..d8841c870d90 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -11,6 +11,8 @@
#include "lima_device.h"
#include "lima_gp.h"
#include "lima_regs.h"
+#include "lima_gem.h"
+#include "lima_vm.h"
#define gp_write(reg, data) writel(data, ip->iomem + reg)
#define gp_read(reg) readl(ip->iomem + reg)
@@ -20,6 +22,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
struct lima_ip *ip = data;
struct lima_device *dev = ip->dev;
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ struct lima_sched_task *task = pipe->current_task;
u32 state = gp_read(LIMA_GP_INT_STAT);
u32 status = gp_read(LIMA_GP_STATUS);
bool done = false;
@@ -29,8 +32,16 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
return IRQ_NONE;
if (state & LIMA_GP_IRQ_MASK_ERROR) {
- dev_err(dev->dev, "gp error irq state=%x status=%x\n",
- state, status);
+ if ((state & LIMA_GP_IRQ_MASK_ERROR) ==
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM) {
+ dev_dbg(dev->dev, "gp out of heap irq status=%x\n",
+ status);
+ } else {
+ dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+ state, status);
+ if (task)
+ task->recoverable = false;
+ }
/* mask all interrupts before hard reset */
gp_write(LIMA_GP_INT_MASK, 0);
@@ -43,6 +54,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
LIMA_GP_STATUS_PLBU_ACTIVE);
done = valid && !active;
+ pipe->error = false;
}
gp_write(LIMA_GP_INT_CLEAR, state);
@@ -121,6 +133,22 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
u32 cmd = 0;
int i;
+ /* update real heap buffer size for GP */
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+
+ if (bo->heap_size &&
+ lima_vm_get_va(task->vm, bo) ==
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]) {
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] +
+ bo->heap_size;
+ task->recoverable = true;
+ task->heap = bo;
+ break;
+ }
+ }
+
if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
f[LIMA_GP_VSCL_END_ADDR >> 2])
cmd |= LIMA_GP_CMD_START_VS;
@@ -184,6 +212,36 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+ struct lima_sched_task *task = pipe->current_task;
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ size_t fail_size =
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] -
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2];
+
+ if (fail_size == task->heap->heap_size) {
+ int ret;
+
+ ret = lima_heap_alloc(task->heap, task->vm);
+ if (ret < 0)
+ return ret;
+ }
+
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+ /* Resume from where we stopped, i.e. new start is old end */
+ gp_write(LIMA_GP_PLBU_ALLOC_START_ADDR,
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size;
+ gp_write(LIMA_GP_PLBU_ALLOC_END_ADDR,
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+ return 0;
+}
+
static void lima_gp_print_version(struct lima_ip *ip)
{
u32 version, major, minor;
@@ -270,6 +328,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
pipe->task_fini = lima_gp_task_fini;
pipe->task_error = lima_gp_task_error;
pipe->task_mmu_error = lima_gp_task_mmu_error;
+ pipe->task_recover = lima_gp_task_recover;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 97ec09dee572..f79d2af427e7 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -99,6 +99,11 @@ void lima_mmu_fini(struct lima_ip *ip)
}
+void lima_mmu_flush_tlb(struct lima_ip *ip)
+{
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+}
+
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
{
struct lima_device *dev = ip->dev;
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
index 8c78319bcc8e..4f8ccbebcba1 100644
--- a/drivers/gpu/drm/lima/lima_mmu.h
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -10,6 +10,7 @@ struct lima_vm;
int lima_mmu_init(struct lima_ip *ip);
void lima_mmu_fini(struct lima_ip *ip);
+void lima_mmu_flush_tlb(struct lima_ip *ip);
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
void lima_mmu_page_fault_resume(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
index ace8ecefbe90..0124c90e0153 100644
--- a/drivers/gpu/drm/lima/lima_regs.h
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -239,6 +239,7 @@
#define LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
#define LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
#define LIMA_MMU_STATUS_BUS_ID(x) ((x >> 6) & 0x1F)
+#define LIMA_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
#define LIMA_MMU_COMMAND 0x0008
#define LIMA_MMU_COMMAND_ENABLE_PAGING 0x00
#define LIMA_MMU_COMMAND_DISABLE_PAGING 0x01
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index b561dd05bd62..3886999b4533 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -313,6 +313,26 @@ static const struct drm_sched_backend_ops lima_sched_ops = {
.free_job = lima_sched_free_job,
};
+static void lima_sched_recover_work(struct work_struct *work)
+{
+ struct lima_sched_pipe *pipe =
+ container_of(work, struct lima_sched_pipe, recover_work);
+ int i;
+
+ for (i = 0; i < pipe->num_l2_cache; i++)
+ lima_l2_cache_flush(pipe->l2_cache[i]);
+
+ if (pipe->bcast_mmu) {
+ lima_mmu_flush_tlb(pipe->bcast_mmu);
+ } else {
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_flush_tlb(pipe->mmu[i]);
+ }
+
+ if (pipe->task_recover(pipe))
+ drm_sched_fault(&pipe->base);
+}
+
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
@@ -321,6 +341,8 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
+ INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
+
return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
msecs_to_jiffies(timeout), name);
}
@@ -332,11 +354,14 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
- if (pipe->error)
- drm_sched_fault(&pipe->base);
- else {
- struct lima_sched_task *task = pipe->current_task;
-
+ struct lima_sched_task *task = pipe->current_task;
+
+ if (pipe->error) {
+ if (task && task->recoverable)
+ schedule_work(&pipe->recover_work);
+ else
+ drm_sched_fault(&pipe->base);
+ } else {
pipe->task_fini(pipe);
dma_fence_signal(task->fence);
}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 1d814fecbcc0..d64393fb50a9 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -20,6 +20,9 @@ struct lima_sched_task {
struct lima_bo **bos;
int num_bos;
+ bool recoverable;
+ struct lima_bo *heap;
+
/* pipe fence */
struct dma_fence *fence;
};
@@ -68,6 +71,9 @@ struct lima_sched_pipe {
void (*task_fini)(struct lima_sched_pipe *pipe);
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+ int (*task_recover)(struct lima_sched_pipe *pipe);
+
+ struct work_struct recover_work;
};
int lima_sched_task_init(struct lima_sched_task *task,
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 840e2350d872..5b92fb82674a 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -155,6 +155,7 @@ err_out0:
void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
{
struct lima_bo_va *bo_va;
+ u32 size;
mutex_lock(&bo->lock);
@@ -166,8 +167,9 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
+ size = bo->heap_size ? bo->heap_size : bo_va->node.size;
lima_vm_unmap_range(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ bo_va->node.start + size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -277,3 +279,45 @@ void lima_vm_print(struct lima_vm *vm)
}
}
}
+
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
+{
+ struct lima_bo_va *bo_va;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
+ u32 base;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (!bo_va) {
+ err = -ENOENT;
+ goto err_out0;
+ }
+
+ mutex_lock(&vm->lock);
+
+ base = bo_va->node.start + (pageoff << PAGE_SHIFT);
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
+ bo->base.sgt->nents, pageoff) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ base + offset);
+ if (err)
+ goto err_out1;
+
+ offset += PAGE_SIZE;
+ }
+
+ mutex_unlock(&vm->lock);
+
+ mutex_unlock(&bo->lock);
+ return 0;
+
+err_out1:
+ if (offset)
+ lima_vm_unmap_range(vm, base, base + offset - 1);
+ mutex_unlock(&vm->lock);
+err_out0:
+ mutex_unlock(&bo->lock);
+ return err;
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index e0bdedcf14dd..22aeec77d84d 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -58,5 +58,6 @@ static inline void lima_vm_put(struct lima_vm *vm)
}
void lima_vm_print(struct lima_vm *vm);
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff);
#endif
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9008ddcfc528..f28cb7a576ba 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -20,11 +20,11 @@
* input formats including most variants of RGB and YUV.
*
* The hardware has four display pipes, and the layout is a little
- * bit like this:
+ * bit like this::
*
- * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
- * External 0..5 0..3 A,B, 3 x DSI bridge
- * source 0..9 C0,C1 2 x DPI
+ * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
+ * External 0..5 0..3 A,B, 3 x DSI bridge
+ * source 0..9 C0,C1 2 x DPI
*
* FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
* panels with embedded buffer.
@@ -43,6 +43,7 @@
* to change as we exploit more of the hardware capabilities.
*
* TODO:
+ *
* - Enabled damaged rectangles using drm_plane_enable_fb_damage_clips()
* so we can selectively just transmit the damaged area to a
* command-only display.
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index bb6528b01cd0..7af5ebb0c436 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -986,7 +986,8 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
clk_disable_unprepare(d->lp_clk);
}
-static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
+static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
struct drm_device *drm = bridge->dev;
@@ -998,7 +999,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
+ ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
if (ret) {
dev_err(d->dev, "failed to attach the DSI bridge\n");
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 01fa8b8d763d..4f0ce4cd5b8c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -607,7 +607,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
/* Currently DPI0 is fixed to be driven by OVL1 */
dpi->encoder.possible_crtcs = BIT(1);
- ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL);
+ ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "Failed to attach bridge: %d\n", ret);
goto err_cleanup;
@@ -664,6 +664,16 @@ static unsigned int mt2701_calculate_factor(int clock)
return 1;
}
+static unsigned int mt8183_calculate_factor(int clock)
+{
+ if (clock <= 27000)
+ return 8;
+ else if (clock <= 167000)
+ return 4;
+ else
+ return 2;
+}
+
static const struct mtk_dpi_conf mt8173_conf = {
.cal_factor = mt8173_calculate_factor,
.reg_h_fre_con = 0xe0,
@@ -675,6 +685,11 @@ static const struct mtk_dpi_conf mt2701_conf = {
.edge_sel_en = true,
};
+static const struct mtk_dpi_conf mt8183_conf = {
+ .cal_factor = mt8183_calculate_factor,
+ .reg_h_fre_con = 0xe0,
+};
+
static int mtk_dpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -770,6 +785,9 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8173-dpi",
.data = &mt8173_conf,
},
+ { .compatible = "mediatek,mt8183-dpi",
+ .data = &mt8183_conf,
+ },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5fa1073cf26b..0ede69830a9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -904,7 +904,7 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
/* If there's a bridge, attach to it and let it create the connector */
if (dsi->bridge) {
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5e4a4dbda443..ff43a3d80410 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -169,6 +170,9 @@ struct mtk_hdmi {
bool audio_enable;
bool powered;
bool enabled;
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ struct mutex update_plugged_status_lock;
};
static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
@@ -1194,13 +1198,26 @@ static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
}
+static enum drm_connector_status
+mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
+{
+ bool connected;
+
+ mutex_lock(&hdmi->update_plugged_status_lock);
+ connected = mtk_cec_hpd_high(hdmi->cec_dev);
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, connected);
+ mutex_unlock(&hdmi->update_plugged_status_lock);
+
+ return connected ?
+ connector_status_connected : connector_status_disconnected;
+}
+
static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
bool force)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
-
- return mtk_cec_hpd_high(hdmi->cec_dev) ?
- connector_status_connected : connector_status_disconnected;
+ return mtk_hdmi_update_plugged_status(hdmi);
}
static void hdmi_conn_destroy(struct drm_connector *conn)
@@ -1297,11 +1314,17 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
* Bridge callbacks
*/
-static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
+static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
&mtk_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
@@ -1326,7 +1349,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
if (hdmi->next_bridge) {
ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
- bridge);
+ bridge, flags);
if (ret) {
dev_err(hdmi->dev,
"Failed to attach external bridge: %d\n", ret);
@@ -1651,20 +1674,39 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
return 0;
}
+static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_hdmi *hdmi = data;
+
+ mutex_lock(&hdmi->update_plugged_status_lock);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ mutex_unlock(&hdmi->update_plugged_status_lock);
+
+ mtk_hdmi_update_plugged_status(hdmi);
+
+ return 0;
+}
+
static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.hw_params = mtk_hdmi_audio_hw_params,
.audio_startup = mtk_hdmi_audio_startup,
.audio_shutdown = mtk_hdmi_audio_shutdown,
.digital_mute = mtk_hdmi_audio_digital_mute,
.get_eld = mtk_hdmi_audio_get_eld,
+ .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
};
-static void mtk_hdmi_register_audio_driver(struct device *dev)
+static int mtk_hdmi_register_audio_driver(struct device *dev)
{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_hdmi_audio_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
+ .data = hdmi,
};
struct platform_device *pdev;
@@ -1672,9 +1714,10 @@ static void mtk_hdmi_register_audio_driver(struct device *dev)
PLATFORM_DEVID_AUTO, &codec_data,
sizeof(codec_data));
if (IS_ERR(pdev))
- return;
+ return PTR_ERR(pdev);
DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
+ return 0;
}
static int mtk_drm_hdmi_probe(struct platform_device *pdev)
@@ -1700,6 +1743,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
return ret;
}
+ mutex_init(&hdmi->update_plugged_status_lock);
platform_set_drvdata(pdev, hdmi);
ret = mtk_hdmi_output_init(hdmi);
@@ -1708,7 +1752,11 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
return ret;
}
- mtk_hdmi_register_audio_driver(dev);
+ ret = mtk_hdmi_register_audio_driver(dev);
+ if (ret) {
+ dev_err(dev, "Failed to register audio driver: %d\n", ret);
+ return ret;
+ }
hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 3bb7ffe5fc39..e8c94915a4fc 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -16,6 +16,7 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
@@ -135,6 +136,7 @@ struct meson_dw_hdmi_data {
struct meson_dw_hdmi {
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
@@ -148,9 +150,12 @@ struct meson_dw_hdmi {
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
+ unsigned long output_bus_fmt;
};
#define encoder_to_meson_dw_hdmi(x) \
container_of(x, struct meson_dw_hdmi, encoder)
+#define bridge_to_meson_dw_hdmi(x) \
+ container_of(x, struct meson_dw_hdmi, bridge)
static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
const char *compat)
@@ -297,6 +302,10 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
struct meson_drm *priv = dw_hdmi->priv;
unsigned int pixel_clock = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ pixel_clock /= 2;
+
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
if (pixel_clock >= 371250) {
@@ -368,29 +377,40 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
}
static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+ unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
vclk_freq = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
if (!vic) {
- meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
- vclk_freq, vclk_freq, false);
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
+ vclk_freq, vclk_freq, vclk_freq, false);
return;
}
+ /* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
- if (meson_venc_hdmi_venc_repeat(vic))
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
@@ -398,11 +418,11 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- DRM_DEBUG_DRIVER("vclk:%d venc=%d hdmi=%d enci=%d\n",
- vclk_freq, venc_freq, hdmi_freq,
+ DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+ phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
- meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, vclk_freq,
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
}
@@ -437,8 +457,9 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* Enable normal output to PHY */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
- /* TMDS pattern setup (TOFIX Handle the YUV420 case) */
- if (mode->clock > 340000) {
+ /* TMDS pattern setup */
+ if (mode->clock > 340000 &&
+ dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
@@ -613,6 +634,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct meson_drm *priv = connector->dev->dev_private;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+ unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
@@ -621,9 +644,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
- /* If sink max TMDS clock, we reject the mode */
+ /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
if (connector->display_info.max_tmds_clock &&
- mode->clock > connector->display_info.max_tmds_clock)
+ mode->clock > connector->display_info.max_tmds_clock &&
+ !drm_mode_is_420_only(&connector->display_info, mode) &&
+ !drm_mode_is_420_also(&connector->display_info, mode))
return MODE_BAD;
/* Check against non-VIC supported modes */
@@ -639,6 +664,15 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
vclk_freq = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (drm_mode_is_420_only(&connector->display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(&connector->display_info, mode)))
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
/* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
@@ -646,8 +680,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
- /* VENC double pixels for 1080i and 720p modes */
- if (meson_venc_hdmi_venc_repeat(vic))
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ drm_mode_is_420_only(&connector->display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(&connector->display_info, mode)))
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
@@ -655,14 +692,19 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
- vclk_freq, venc_freq, hdmi_freq);
+ dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+ __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
- return meson_vclk_vic_supported_freq(vclk_freq);
+ return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
}
/* Encoder */
+static const u32 meson_dw_hdmi_out_bus_fmts[] = {
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+};
+
static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -672,16 +714,54 @@ static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
.destroy = meson_venc_hdmi_encoder_destroy,
};
-static int meson_venc_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+static u32 *
+meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts = NULL;
+ int i;
+
+ *num_input_fmts = 0;
+
+ for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) {
+ if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) {
+ *num_input_fmts = 1;
+ input_fmts = kcalloc(*num_input_fmts,
+ sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+
+ break;
+ }
+ }
+
+ return input_fmts;
+}
+
+static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
+
+ dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
+
+ DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt);
+
return 0;
}
-static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("\n");
@@ -693,9 +773,9 @@ static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
}
-static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
@@ -706,32 +786,47 @@ static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
}
-static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+ unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
+ bool yuv420_mode = false;
DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
+ ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
+ yuv420_mode = true;
+ }
+
/* VENC + VENC-DVI Mode setup */
- meson_venc_hdmi_mode_set(priv, vic, mode);
+ meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
/* VCLK Set clock */
dw_hdmi_set_vclk(dw_hdmi, mode);
- /* Setup YUV444 to HDMI-TX, no 10bit diphering */
- writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ /* Setup YUV420 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(2 | (2 << 2),
+ priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ else
+ /* Setup YUV444 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
}
-static const struct drm_encoder_helper_funcs
- meson_venc_hdmi_encoder_helper_funcs = {
- .atomic_check = meson_venc_hdmi_encoder_atomic_check,
- .disable = meson_venc_hdmi_encoder_disable,
- .enable = meson_venc_hdmi_encoder_enable,
- .mode_set = meson_venc_hdmi_encoder_mode_set,
+static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_check = meson_venc_hdmi_encoder_atomic_check,
+ .enable = meson_venc_hdmi_encoder_enable,
+ .disable = meson_venc_hdmi_encoder_disable,
+ .mode_set = meson_venc_hdmi_encoder_mode_set,
};
/* DW HDMI Regmap */
@@ -852,6 +947,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
struct dw_hdmi_plat_data *dw_plat_data;
+ struct drm_bridge *next_bridge;
struct drm_encoder *encoder;
struct resource *res;
int irq;
@@ -953,8 +1049,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
/* Encoder */
- drm_encoder_helper_add(encoder, &meson_venc_hdmi_encoder_helper_funcs);
-
ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, "meson_hdmi");
if (ret) {
@@ -962,6 +1056,9 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return ret;
}
+ meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs;
+ drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0);
+
encoder->possible_crtcs = BIT(0);
DRM_DEBUG_DRIVER("encoder initialized\n");
@@ -974,8 +1071,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
dw_plat_data->phy_name = "meson_dw_hdmi_phy";
dw_plat_data->phy_data = meson_dw_hdmi;
- dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ dw_plat_data->ycbcr_420_allowed = true;
if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
@@ -984,11 +1081,16 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
platform_set_drvdata(pdev, meson_dw_hdmi);
- meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
- &meson_dw_hdmi->dw_plat_data);
+ meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev,
+ &meson_dw_hdmi->dw_plat_data);
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
+ next_bridge = of_drm_find_bridge(pdev->dev.of_node);
+ if (next_bridge)
+ drm_bridge_attach(encoder, next_bridge,
+ &meson_dw_hdmi->bridge, 0);
+
DRM_DEBUG_DRIVER("HDMI controller initialized\n");
return 0;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f690793ae2d5..fdf26dac9fa8 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -354,12 +354,17 @@ enum {
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
MESON_VCLK_HDMI_297000,
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
- MESON_VCLK_HDMI_594000
+ MESON_VCLK_HDMI_594000,
+/* 2970 /1 /1 /1 /5 /1 => /1 /2 */
+ MESON_VCLK_HDMI_594000_YUV420,
};
struct meson_vclk_params {
+ unsigned int pll_freq;
+ unsigned int phy_freq;
+ unsigned int vclk_freq;
+ unsigned int venc_freq;
unsigned int pixel_freq;
- unsigned int pll_base_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
@@ -367,8 +372,11 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
+ .pll_freq = 4320000,
+ .phy_freq = 270000,
+ .vclk_freq = 54000,
+ .venc_freq = 54000,
.pixel_freq = 54000,
- .pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -376,8 +384,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
- .pixel_freq = 54000,
- .pll_base_freq = 4320000,
+ .pll_freq = 4320000,
+ .phy_freq = 270000,
+ .vclk_freq = 54000,
+ .venc_freq = 54000,
+ .pixel_freq = 27000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -385,8 +396,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
- .pixel_freq = 148500,
- .pll_base_freq = 2970000,
+ .pll_freq = 2970000,
+ .phy_freq = 742500,
+ .vclk_freq = 148500,
+ .venc_freq = 148500,
+ .pixel_freq = 74250,
.pll_od1 = 4,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -394,8 +408,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
+ .pll_freq = 2970000,
+ .phy_freq = 742500,
+ .vclk_freq = 74250,
+ .venc_freq = 74250,
.pixel_freq = 74250,
- .pll_base_freq = 2970000,
.pll_od1 = 2,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -403,8 +420,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
+ .pll_freq = 2970000,
+ .phy_freq = 1485000,
+ .vclk_freq = 148500,
+ .venc_freq = 148500,
.pixel_freq = 148500,
- .pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -412,8 +432,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
+ .pll_freq = 5940000,
+ .phy_freq = 2970000,
+ .venc_freq = 297000,
+ .vclk_freq = 297000,
.pixel_freq = 297000,
- .pll_base_freq = 5940000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -421,14 +444,29 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
+ .pll_freq = 5940000,
+ .phy_freq = 5940000,
+ .venc_freq = 594000,
+ .vclk_freq = 594000,
.pixel_freq = 594000,
- .pll_base_freq = 5940000,
.pll_od1 = 1,
.pll_od2 = 1,
.pll_od3 = 2,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ [MESON_VCLK_HDMI_594000_YUV420] = {
+ .pll_freq = 5940000,
+ .phy_freq = 2970000,
+ .venc_freq = 594000,
+ .vclk_freq = 594000,
+ .pixel_freq = 297000,
+ .pll_od1 = 2,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
{ /* sentinel */ },
};
@@ -701,6 +739,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
unsigned int od, m, frac, od1, od2, od3;
if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+ /* OD2 goes to the PHY, and needs to be *10, so keep OD3=1 */
od3 = 1;
if (od < 4) {
od1 = 2;
@@ -723,21 +762,28 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq)
+meson_vclk_vic_supported_freq(unsigned int phy_freq,
+ unsigned int vclk_freq)
{
int i;
- DRM_DEBUG_DRIVER("freq = %d\n", freq);
+ DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
+ phy_freq, vclk_freq);
for (i = 0 ; params[i].pixel_freq ; ++i) {
DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
i, params[i].pixel_freq,
FREQ_1000_1001(params[i].pixel_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ i, params[i].phy_freq,
+ FREQ_1000_1001(params[i].phy_freq/10)*10);
/* Match strict frequency */
- if (freq == params[i].pixel_freq)
+ if (phy_freq == params[i].phy_freq &&
+ vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (freq == FREQ_1000_1001(params[i].pixel_freq))
+ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+ vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -965,8 +1011,9 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci)
+ unsigned int phy_freq, unsigned int vclk_freq,
+ unsigned int venc_freq, unsigned int dac_freq,
+ bool hdmi_use_enci)
{
bool vic_alternate_clock = false;
unsigned int freq;
@@ -986,7 +1033,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
* - venc_div = 1
* - encp encoder
*/
- meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+ meson_vclk_set(priv, phy_freq, 0, 0, 0,
VID_PLL_DIV_5, 2, 1, 1, false, false);
return;
}
@@ -1008,9 +1055,11 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
- if (vclk_freq == params[freq].pixel_freq ||
- vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
- if (vclk_freq != params[freq].pixel_freq)
+ if ((phy_freq == params[freq].phy_freq ||
+ phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+ (vclk_freq == params[freq].vclk_freq ||
+ vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
vic_alternate_clock = false;
@@ -1039,7 +1088,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- meson_vclk_set(priv, params[freq].pll_base_freq,
+ meson_vclk_set(priv, params[freq].pll_freq,
params[freq].pll_od1, params[freq].pll_od2,
params[freq].pll_od3, params[freq].vid_pll_div,
params[freq].vclk_div, hdmi_tx_div, venc_div,
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index b62125540aef..aed0ab2efa71 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -25,10 +25,11 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq);
+meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci);
+ unsigned int phy_freq, unsigned int vclk_freq,
+ unsigned int venc_freq, unsigned int dac_freq,
+ bool hdmi_use_enci);
#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 4efd7864d5bf..f93c725b6f02 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -946,7 +946,9 @@ bool meson_venc_hdmi_venc_repeat(int vic)
EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode)
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
+ const struct drm_display_mode *mode)
{
union meson_hdmi_venc_mode *vmode = NULL;
union meson_hdmi_venc_mode vmode_dmt;
@@ -1528,14 +1530,14 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
reg |= VPU_HDMI_INV_VSYNC;
- /* Output data format: CbYCr */
- reg |= VPU_HDMI_OUTPUT_CBYCR;
+ /* Output data format */
+ reg |= ycrcb_map;
/*
* Write rate to the async FIFO between VENC and HDMI.
* One write every 2 wr_clk.
*/
- if (venc_repeat)
+ if (venc_repeat || yuv420_mode)
reg |= VPU_HDMI_WR_RATE(2);
/*
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 576768bdd08d..9138255ffc9e 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -60,7 +60,9 @@ extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode);
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
+ const struct drm_display_mode *mode);
unsigned int meson_venci_get_field(struct meson_drm *priv);
void meson_venc_enable_vsync(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 1bd6b6d15ffb..541f9eb2a135 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -213,8 +213,10 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
meson_venci_cvbs_mode_set(priv, meson_mode->enci);
/* Setup 27MHz vclk2 for ENCI and VDAC */
- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
- MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ true);
}
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index aa32aad222c2..9691252d6233 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -95,7 +95,6 @@
#define MATROX_DPMS_CLEARED (-1)
#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
-#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
#define to_mga_connector(x) container_of(x, struct mga_connector, base)
struct mga_crtc {
@@ -110,12 +109,6 @@ struct mga_mode_info {
struct mga_crtc *crtc;
};
-struct mga_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-
struct mga_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -185,6 +178,8 @@ struct mga_device {
/* SE model number stored in reg 0x1e24 */
u32 unique_rev_id;
+
+ struct drm_encoder encoder;
};
static inline enum mga_type
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 62a8e9ccb16d..d90e83959fca 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -15,6 +15,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mgag200_drv.h"
@@ -1449,76 +1450,6 @@ static void mga_crtc_init(struct mga_device *mdev)
drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
}
-/*
- * The encoder comes after the CRTC in the output pipeline, but before
- * the connector. It's responsible for ensuring that the digital
- * stream is appropriately converted into the output format. Setup is
- * very simple in this case - all we have to do is inform qemu of the
- * colour depth in order to ensure that it displays appropriately
- */
-
-/*
- * These functions are analagous to those in the CRTC code, but are intended
- * to handle any encoder-specific limitations
- */
-static void mga_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
-{
- return;
-}
-
-static void mga_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mga_encoder);
-}
-
-static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
- .dpms = mga_encoder_dpms,
- .mode_set = mga_encoder_mode_set,
- .prepare = mga_encoder_prepare,
- .commit = mga_encoder_commit,
-};
-
-static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
- .destroy = mga_encoder_destroy,
-};
-
-static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- struct mga_encoder *mga_encoder;
-
- mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
- if (!mga_encoder)
- return NULL;
-
- encoder = &mga_encoder->base;
- encoder->possible_crtcs = 0x1;
-
- drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
-
- return encoder;
-}
-
-
static int mga_vga_get_modes(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1686,8 +1617,9 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
int mgag200_modeset_init(struct mga_device *mdev)
{
- struct drm_encoder *encoder;
+ struct drm_encoder *encoder = &mdev->encoder;
struct drm_connector *connector;
+ int ret;
mdev->mode_info.mode_config_initialized = true;
@@ -1698,11 +1630,15 @@ int mgag200_modeset_init(struct mga_device *mdev)
mga_crtc_init(mdev);
- encoder = mga_encoder_init(mdev->dev);
- if (!encoder) {
- DRM_ERROR("mga_encoder_init failed\n");
- return -1;
+ ret = drm_simple_encoder_init(mdev->dev, encoder,
+ DRM_MODE_ENCODER_DAC);
+ if (ret) {
+ drm_err(mdev->dev,
+ "drm_simple_encoder_init() failed, error %d\n",
+ ret);
+ return ret;
}
+ encoder->possible_crtcs = 0x1;
connector = mga_vga_init(mdev->dev);
if (!connector) {
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7d9e63e20ded..724024a2243a 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = {
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
- u32 bin, val;
+ u32 val;
+
+ /*
+ * If the OPP table specifies a opp-supported-hw property then we have
+ * to set something with dev_pm_opp_set_supported_hw() or the table
+ * doesn't get populated so pick an arbitrary value that should
+ * ensure the default frequencies are selected but not conflict with any
+ * actual bins
+ */
+ val = 0x80;
cell = nvmem_cell_get(dev, "speed_bin");
- /* If a nvmem cell isn't defined, nothing to do */
- if (IS_ERR(cell))
- return;
+ if (!IS_ERR(cell)) {
+ void *buf = nvmem_cell_read(cell, NULL);
+
+ if (!IS_ERR(buf)) {
+ u8 bin = *((u8 *) buf);
- bin = *((u32 *) nvmem_cell_read(cell, NULL));
- nvmem_cell_put(cell);
+ val = (1 << bin);
+ kfree(buf);
+ }
- val = (1 << bin);
+ nvmem_cell_put(cell);
+ }
dev_pm_opp_set_supported_hw(dev, &val, 1);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 748cd379065f..c4e71abbdd53 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
@@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
{
- int count, i;
- u64 iova;
-
if (IS_ERR_OR_NULL(bo))
return;
- count = bo->size >> PAGE_SHIFT;
- iova = bo->iova;
-
- for (i = 0; i < count; i++, iova += PAGE_SIZE) {
- iommu_unmap(gmu->domain, iova, PAGE_SIZE);
- __free_pages(bo->pages[i], 0);
- }
-
- kfree(bo->pages);
+ dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
kfree(bo);
}
@@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
size_t size)
{
struct a6xx_gmu_bo *bo;
- int ret, count, i;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
@@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
bo->size = PAGE_ALIGN(size);
- count = bo->size >> PAGE_SHIFT;
+ bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
- bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
- if (!bo->pages) {
+ if (!bo->virt) {
kfree(bo);
return ERR_PTR(-ENOMEM);
}
- for (i = 0; i < count; i++) {
- bo->pages[i] = alloc_page(GFP_KERNEL);
- if (!bo->pages[i])
- goto err;
- }
-
- bo->iova = gmu->uncached_iova_base;
-
- for (i = 0; i < count; i++) {
- ret = iommu_map(gmu->domain,
- bo->iova + (PAGE_SIZE * i),
- page_to_phys(bo->pages[i]), PAGE_SIZE,
- IOMMU_READ | IOMMU_WRITE);
-
- if (ret) {
- DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
-
- for (i = i - 1 ; i >= 0; i--)
- iommu_unmap(gmu->domain,
- bo->iova + (PAGE_SIZE * i),
- PAGE_SIZE);
-
- goto err;
- }
- }
-
- bo->virt = vmap(bo->pages, count, VM_IOREMAP,
- pgprot_writecombine(PAGE_KERNEL));
- if (!bo->virt)
- goto err;
-
- /* Align future IOVA addresses on 1MB boundaries */
- gmu->uncached_iova_base += ALIGN(size, SZ_1M);
-
return bo;
-
-err:
- for (i = 0; i < count; i++) {
- if (bo->pages[i])
- __free_pages(bo->pages[i], 0);
- }
-
- kfree(bo->pages);
- kfree(bo);
-
- return ERR_PTR(-ENOMEM);
-}
-
-static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
-{
- int ret;
-
- /*
- * The GMU address space is hardcoded to treat the range
- * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
- * between the GMU and the CPU will live in this space
- */
- gmu->uncached_iova_base = 0x60000000;
-
-
- gmu->domain = iommu_domain_alloc(&platform_bus_type);
- if (!gmu->domain)
- return -ENODEV;
-
- ret = iommu_attach_device(gmu->domain, gmu->dev);
-
- if (ret) {
- iommu_domain_free(gmu->domain);
- gmu->domain = NULL;
- }
-
- return ret;
}
/* Return the 'arc-level' for the given frequency */
@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
a6xx_gmu_memory_free(gmu, gmu->hfi);
- iommu_detach_device(gmu->domain, gmu->dev);
-
- iommu_domain_free(gmu->domain);
-
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ /* Pass force_dma false to require the DT to set the dma region */
+ ret = of_dma_configure(gmu->dev, node, false);
+ if (ret)
+ return ret;
+
+ /* Set the mask after the of_dma_configure() */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
+ if (ret)
+ return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
- /* Set up the IOMMU context bank */
- ret = a6xx_gmu_memory_probe(gmu);
- if (ret)
- goto err_put_device;
-
/* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi))
@@ -1375,11 +1291,6 @@ err_mmio:
err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi);
- if (gmu->domain) {
- iommu_detach_device(gmu->domain, gmu->dev);
-
- iommu_domain_free(gmu->domain);
- }
ret = -ENODEV;
err_put_device:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 2af91ed7ed0c..4af65a36d5ca 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -12,8 +12,7 @@
struct a6xx_gmu_bo {
void *virt;
size_t size;
- u64 iova;
- struct page **pages;
+ dma_addr_t iova;
};
/*
@@ -49,9 +48,6 @@ struct a6xx_gmu {
int hfi_irq;
int gmu_irq;
- struct iommu_domain *domain;
- u64 uncached_iova_base;
-
struct device *gxpd;
int idle_level;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e67c20c415af..24c974c293e5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers {
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
- "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 7fd29829b2fa..1d5c43c22269 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
return NULL;
for (i = 0; i < l; i++)
- buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
ascii85_encode(src[i], out));
return buf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index bf513411b243..17448505a9b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1272,6 +1272,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.atomic_destroy_state = dpu_crtc_destroy_state,
.late_register = dpu_crtc_late_register,
.early_unregister = dpu_crtc_early_unregister,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f8ac3bf60fd6..a1b79ee2bd9d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -164,7 +164,6 @@ enum dpu_enc_rc_states {
* clks and resources after IDLE_TIMEOUT time.
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
- * @mode_set_complete: flag to indicate modeset completion
* @idle_timeout: idle timeout duration in milliseconds
*/
struct dpu_encoder_virt {
@@ -202,7 +201,6 @@ struct dpu_encoder_virt {
struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work;
struct msm_display_topology topology;
- bool mode_set_complete;
u32 idle_timeout;
};
@@ -461,7 +459,7 @@ void dpu_encoder_helper_split_config(
struct msm_display_info *disp_info;
if (!phys_enc->hw_mdptop || !phys_enc->parent) {
- DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
@@ -512,7 +510,6 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
if (cur_mode->vdisplay == adj_mode->vdisplay &&
cur_mode->hdisplay == adj_mode->hdisplay &&
drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
- adj_mode->private = cur_mode->private;
adj_mode->private_flags |= cur_mode->private_flags;
}
}
@@ -563,12 +560,13 @@ static int dpu_encoder_virt_atomic_check(
const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
+ struct dpu_global_state *global_state;
int i = 0;
int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) {
DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
- drm_enc != 0, crtc_state != 0, conn_state != 0);
+ drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
return -EINVAL;
}
@@ -579,6 +577,7 @@ static int dpu_encoder_virt_atomic_check(
dpu_kms = to_dpu_kms(priv->kms);
mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_atomic_check(DRMID(drm_enc));
/*
@@ -610,17 +609,15 @@ static int dpu_encoder_virt_atomic_check(
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
- /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ /* Reserve dynamic resources now. */
if (!ret) {
/*
* Avoid reserving resources when mode set is pending. Topology
* info may not be available to complete reservation.
*/
- if (drm_atomic_crtc_needs_modeset(crtc_state)
- && dpu_enc->mode_set_complete) {
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
- topology, true);
- dpu_enc->mode_set_complete = false;
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ drm_enc, crtc_state, topology);
}
}
@@ -957,12 +954,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct drm_connector *conn = NULL, *conn_iter;
struct drm_crtc *drm_crtc;
struct dpu_crtc_state *cstate;
- struct dpu_rm_hw_iter hw_iter;
+ struct dpu_global_state *global_state;
struct msm_display_topology topology;
- struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
- struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
- int num_lm = 0, num_ctl = 0;
- int i, j, ret;
+ struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_pp;
+ int i, j;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
@@ -976,6 +974,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_kms = to_dpu_kms(priv->kms);
connector_list = &dpu_kms->dev->mode_config.connector_list;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ if (IS_ERR_OR_NULL(global_state)) {
+ DPU_ERROR("Failed to get global state");
+ return;
+ }
+
trace_dpu_enc_mode_set(DRMID(drm_enc));
list_for_each_entry(conn_iter, connector_list, head)
@@ -996,77 +1000,57 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
- /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
- topology, false);
- if (ret) {
- DPU_ERROR_ENC(dpu_enc,
- "failed to reserve hw resources, %d\n", ret);
- return;
- }
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
- }
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
- num_ctl++;
- }
+ /* Query resource that have been reserved in atomic check step. */
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
- num_lm++;
- }
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
+ : NULL;
cstate = to_dpu_crtc_state(drm_crtc->state);
for (i = 0; i < num_lm; i++) {
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
- cstate->mixers[i].hw_lm = hw_lm[i];
- cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
}
cstate->num_mixers = num_lm;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ int num_blk;
+ struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i);
- goto error;
+ return;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
- goto error;
+ return;
}
phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = hw_ctl[i];
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
- DPU_HW_BLK_INTF);
- for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
+ global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
+ hw_blk, ARRAY_SIZE(hw_blk));
+ for (j = 0; j < num_blk; j++) {
struct dpu_hw_intf *hw_intf;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
-
- hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ hw_intf = to_dpu_hw_intf(hw_blk[i]);
if (hw_intf->idx == phys->intf_idx)
phys->hw_intf = hw_intf;
}
@@ -1074,18 +1058,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
if (!phys->hw_intf) {
DPU_ERROR_ENC(dpu_enc,
"no intf block assigned at idx: %d\n", i);
- goto error;
+ return;
}
phys->connector = conn->state->connector;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
-
- dpu_enc->mode_set_complete = true;
-
-error:
- dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1182,6 +1161,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct dpu_encoder_virt *dpu_enc = NULL;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
+ struct dpu_global_state *global_state;
int i = 0;
if (!drm_enc) {
@@ -1200,6 +1180,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_disable(DRMID(drm_enc));
@@ -1229,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- dpu_rm_release(&dpu_kms->rm, drm_enc);
+ dpu_rm_release(global_state, drm_enc);
mutex_unlock(&dpu_enc->enc_lock);
}
@@ -1965,7 +1946,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
PTR_ERR(enc));
- return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ return enc == NULL ? -EINVAL : PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -1978,7 +1959,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (IS_ERR_OR_NULL(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
PTR_ERR(enc));
- return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ return enc == NULL ? -EINVAL : PTR_ERR(enc);
}
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -2009,7 +1990,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc) {
- DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
+ DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 39e1e280ba44..8493d68ad841 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
- DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
return;
}
@@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
u32 flush_mask = 0;
if (!phys_enc->hw_pp) {
- DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index c71c18de5966..b5a49050d131 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
- DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+ DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
return;
}
@@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
if (!phys_enc->hw_intf) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 85468981632d..0ead64d3f63d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -90,6 +90,16 @@ struct dpu_hw_intf {
};
/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
* @idx: interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3d6f46b1db30..d73cb73e938b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -97,6 +97,16 @@ struct dpu_hw_pingpong {
};
/**
+ * to_dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index cb08fafb1dc1..ce19f1d39367 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
struct dpu_debugfs_regset32 *regset = s->private;
struct dpu_kms *dpu_kms = regset->dpu_kms;
- struct drm_device *dev;
- struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
if (!dpu_kms->mmio)
return 0;
- dev = dpu_kms->dev;
- priv = dev->dev_private;
base = dpu_kms->mmio + regset->offset;
/* insert padding spaces, if needed */
@@ -228,6 +224,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
}
#endif
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct dpu_global_state *
+dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
+{
+ return to_dpu_global_state(dpu_kms->global_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_private_state *priv_state;
+ int ret;
+
+ ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv_state = drm_atomic_get_private_obj_state(s,
+ &dpu_kms->global_state);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_dpu_global_state(priv_state);
+}
+
+static struct drm_private_state *
+dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dpu_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dpu_global_state *dpu_state = to_dpu_global_state(state);
+
+ kfree(dpu_state);
+}
+
+static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
+ .atomic_duplicate_state = dpu_kms_global_duplicate_state,
+ .atomic_destroy_state = dpu_kms_global_destroy_state,
+};
+
+static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct dpu_global_state *state;
+
+ drm_modeset_lock_init(&dpu_kms->global_state_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
+ &state->base,
+ &dpu_kms_global_state_funcs);
+ return 0;
+}
+
static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
return dpu_crtc_vblank(crtc, true);
@@ -267,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
- struct dpu_kms *dpu_kms;
- struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
@@ -276,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
if (!kms)
return;
- dpu_kms = to_dpu_kms(kms);
- dev = dpu_kms->dev;
/* Call prepare_commit for all affected encoders */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
@@ -552,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
{
- struct drm_device *dev;
int i;
- dev = dpu_kms->dev;
-
if (dpu_kms->hw_intr)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
@@ -760,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
struct drm_device *dev;
- struct msm_drm_private *priv;
int i, rc = -EINVAL;
if (!kms) {
@@ -770,7 +837,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
- priv = dev->dev_private;
+
+ rc = dpu_kms_global_obj_init(dpu_kms);
+ if (rc)
+ return rc;
atomic_set(&dpu_kms->bandwidth_ref, 0);
@@ -1018,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
- struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
- ddev = dpu_kms->dev;
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index c6169e7df19d..211f5de99a44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -111,6 +111,13 @@ struct dpu_kms {
struct dpu_core_perf perf;
+ /*
+ * Global private object state, Do not access directly, use
+ * dpu_kms_global_get_state()
+ */
+ struct drm_modeset_lock global_state_lock;
+ struct drm_private_obj global_state;
+
struct dpu_rm rm;
bool rm_init;
@@ -139,6 +146,25 @@ struct vsync_info {
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
+
+/* Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+struct dpu_global_state {
+ struct drm_private_state base;
+
+ uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_enc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
+ uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
+};
+
+struct dpu_global_state
+ *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms);
+struct dpu_global_state
+ *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s);
+
/**
* Debugfs functions - extra helper functions for debugfs support
*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 23f5b1433b35..9b62451b01ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -12,8 +12,12 @@
#include "dpu_encoder.h"
#include "dpu_trace.h"
-#define RESERVED_BY_OTHER(h, r) \
- ((h)->enc_id && (h)->enc_id != r)
+
+static inline bool reserved_by_other(uint32_t *res_map, int idx,
+ uint32_t enc_id)
+{
+ return res_map[idx] && res_map[idx] != enc_id;
+}
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
@@ -25,171 +29,43 @@ struct dpu_rm_requirements {
struct dpu_encoder_hw_resources hw_res;
};
-
-/**
- * struct dpu_rm_hw_blk - hardware block tracking list member
- * @list: List head for list of all hardware blocks tracking items
- * @id: Hardware ID number, within it's own space, ie. LM_X
- * @enc_id: Encoder id to which this blk is binded
- * @hw: Pointer to the hardware register access object for this block
- */
-struct dpu_rm_hw_blk {
- struct list_head list;
- uint32_t id;
- uint32_t enc_id;
- struct dpu_hw_blk *hw;
-};
-
-void dpu_rm_init_hw_iter(
- struct dpu_rm_hw_iter *iter,
- uint32_t enc_id,
- enum dpu_hw_blk_type type)
-{
- memset(iter, 0, sizeof(*iter));
- iter->enc_id = enc_id;
- iter->type = type;
-}
-
-static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+int dpu_rm_destroy(struct dpu_rm *rm)
{
- struct list_head *blk_list;
-
- if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
- DPU_ERROR("invalid rm\n");
- return false;
- }
+ int i;
- i->hw = NULL;
- blk_list = &rm->hw_blks[i->type];
+ for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
+ struct dpu_hw_pingpong *hw;
- if (i->blk && (&i->blk->list == blk_list)) {
- DPU_DEBUG("attempt resume iteration past last\n");
- return false;
- }
-
- i->blk = list_prepare_entry(i->blk, blk_list, list);
-
- list_for_each_entry_continue(i->blk, blk_list, list) {
- if (i->enc_id == i->blk->enc_id) {
- i->hw = i->blk->hw;
- DPU_DEBUG("found type %d id %d for enc %d\n",
- i->type, i->blk->id, i->enc_id);
- return true;
+ if (rm->pingpong_blks[i]) {
+ hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
+ dpu_hw_pingpong_destroy(hw);
}
}
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
+ struct dpu_hw_mixer *hw;
- DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
-
- return false;
-}
-
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
-{
- bool ret;
-
- mutex_lock(&rm->rm_lock);
- ret = _dpu_rm_get_hw_locked(rm, i);
- mutex_unlock(&rm->rm_lock);
-
- return ret;
-}
-
-static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
-{
- switch (type) {
- case DPU_HW_BLK_LM:
- dpu_hw_lm_destroy(hw);
- break;
- case DPU_HW_BLK_CTL:
- dpu_hw_ctl_destroy(hw);
- break;
- case DPU_HW_BLK_PINGPONG:
- dpu_hw_pingpong_destroy(hw);
- break;
- case DPU_HW_BLK_INTF:
- dpu_hw_intf_destroy(hw);
- break;
- case DPU_HW_BLK_SSPP:
- /* SSPPs are not managed by the resource manager */
- case DPU_HW_BLK_TOP:
- /* Top is a singleton, not managed in hw_blks list */
- case DPU_HW_BLK_MAX:
- default:
- DPU_ERROR("unsupported block type %d\n", type);
- break;
- }
-}
-
-int dpu_rm_destroy(struct dpu_rm *rm)
-{
- struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
- enum dpu_hw_blk_type type;
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
- list) {
- list_del(&hw_cur->list);
- _dpu_rm_hw_destroy(type, hw_cur->hw);
- kfree(hw_cur);
+ if (rm->mixer_blks[i]) {
+ hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
+ dpu_hw_lm_destroy(hw);
}
}
+ for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
+ struct dpu_hw_ctl *hw;
- mutex_destroy(&rm->rm_lock);
-
- return 0;
-}
-
-static int _dpu_rm_hw_blk_create(
- struct dpu_rm *rm,
- const struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- enum dpu_hw_blk_type type,
- uint32_t id,
- const void *hw_catalog_info)
-{
- struct dpu_rm_hw_blk *blk;
- void *hw;
-
- switch (type) {
- case DPU_HW_BLK_LM:
- hw = dpu_hw_lm_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_CTL:
- hw = dpu_hw_ctl_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_PINGPONG:
- hw = dpu_hw_pingpong_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_INTF:
- hw = dpu_hw_intf_init(id, mmio, cat);
- break;
- case DPU_HW_BLK_SSPP:
- /* SSPPs are not managed by the resource manager */
- case DPU_HW_BLK_TOP:
- /* Top is a singleton, not managed in hw_blks list */
- case DPU_HW_BLK_MAX:
- default:
- DPU_ERROR("unsupported block type %d\n", type);
- return -EINVAL;
- }
-
- if (IS_ERR_OR_NULL(hw)) {
- DPU_ERROR("failed hw object creation: type %d, err %ld\n",
- type, PTR_ERR(hw));
- return -EFAULT;
+ if (rm->ctl_blks[i]) {
+ hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
+ dpu_hw_ctl_destroy(hw);
+ }
}
+ for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) {
+ struct dpu_hw_intf *hw;
- blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (!blk) {
- _dpu_rm_hw_destroy(type, hw);
- return -ENOMEM;
+ if (rm->intf_blks[i]) {
+ hw = to_dpu_hw_intf(rm->intf_blks[i]);
+ dpu_hw_intf_destroy(hw);
+ }
}
- blk->id = id;
- blk->hw = hw;
- blk->enc_id = 0;
- list_add_tail(&blk->list, &rm->hw_blks[type]);
-
return 0;
}
@@ -198,7 +74,6 @@ int dpu_rm_init(struct dpu_rm *rm,
void __iomem *mmio)
{
int rc, i;
- enum dpu_hw_blk_type type;
if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n");
@@ -208,13 +83,9 @@ int dpu_rm_init(struct dpu_rm *rm,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
- mutex_init(&rm->rm_lock);
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++)
- INIT_LIST_HEAD(&rm->hw_blks[type]);
-
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
if (lm->pingpong == PINGPONG_MAX) {
@@ -222,12 +93,17 @@ int dpu_rm_init(struct dpu_rm *rm,
continue;
}
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
- cat->mixer[i].id, &cat->mixer[i]);
- if (rc) {
- DPU_ERROR("failed: lm hw not available\n");
+ if (lm->id < LM_0 || lm->id >= LM_MAX) {
+ DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
+ continue;
+ }
+ hw = dpu_hw_lm_init(lm->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed lm object creation: err %d\n", rc);
goto fail;
}
+ rm->mixer_blks[lm->id - LM_0] = &hw->base;
if (!rm->lm_max_width) {
rm->lm_max_width = lm->sblk->maxwidth;
@@ -243,35 +119,59 @@ int dpu_rm_init(struct dpu_rm *rm,
}
for (i = 0; i < cat->pingpong_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
- cat->pingpong[i].id, &cat->pingpong[i]);
- if (rc) {
- DPU_ERROR("failed: pp hw not available\n");
+ struct dpu_hw_pingpong *hw;
+ const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
+
+ if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
+ DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
+ continue;
+ }
+ hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed pingpong object creation: err %d\n",
+ rc);
goto fail;
}
+ rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
}
for (i = 0; i < cat->intf_count; i++) {
- if (cat->intf[i].type == INTF_NONE) {
+ struct dpu_hw_intf *hw;
+ const struct dpu_intf_cfg *intf = &cat->intf[i];
+
+ if (intf->type == INTF_NONE) {
DPU_DEBUG("skip intf %d with type none\n", i);
continue;
}
-
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
- cat->intf[i].id, &cat->intf[i]);
- if (rc) {
- DPU_ERROR("failed: intf hw not available\n");
+ if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
+ DPU_ERROR("skip intf %d with invalid id\n", intf->id);
+ continue;
+ }
+ hw = dpu_hw_intf_init(intf->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed intf object creation: err %d\n", rc);
goto fail;
}
+ rm->intf_blks[intf->id - INTF_0] = &hw->base;
}
for (i = 0; i < cat->ctl_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
- cat->ctl[i].id, &cat->ctl[i]);
- if (rc) {
- DPU_ERROR("failed: ctl hw not available\n");
+ struct dpu_hw_ctl *hw;
+ const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
+
+ if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
+ DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
+ continue;
+ }
+ hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed ctl object creation: err %d\n", rc);
goto fail;
}
+ rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
}
return 0;
@@ -279,7 +179,7 @@ int dpu_rm_init(struct dpu_rm *rm,
fail:
dpu_rm_destroy(rm);
- return rc;
+ return rc ? rc : -EFAULT;
}
static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
@@ -288,85 +188,81 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
}
/**
+ * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
+ * @rm: dpu resource manager handle
+ * @primary_idx: index of primary mixer in rm->mixer_blks[]
+ * @peer_idx: index of other mixer in rm->mixer_blks[]
+ * @Return: true if rm->mixer_blks[peer_idx] is a peer of
+ * rm->mixer_blks[primary_idx]
+ */
+static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
+ int peer_idx)
+{
+ const struct dpu_lm_cfg *prim_lm_cfg;
+ const struct dpu_lm_cfg *peer_cfg;
+
+ prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
+ peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
+
+ if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
+ peer_cfg->id);
+ return false;
+ }
+ return true;
+}
+
+/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
* @enc_id: encoder id requesting for allocation
- * @reqs: proposed use case requirements
- * @lm: proposed layer mixer, function checks if lm, and all other hardwired
- * blocks connected to the lm (pp) is available and appropriate
- * @pp: output parameter, pingpong block attached to the layer mixer.
- * NULL if pp was not available, or not matching requirements.
- * @primary_lm: if non-null, this function check if lm is compatible primary_lm
- * as well as satisfying all other requirements
+ * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
+ * if lm, and all other hardwired blocks connected to the lm (pp) is
+ * available and appropriate
+ * @pp_idx: output parameter, index of pingpong block attached to the layer
+ * mixer in rm->pongpong_blks[].
* @Return: true if lm matches all requirements, false otherwise
*/
-static bool _dpu_rm_check_lm_and_get_connected_blks(
- struct dpu_rm *rm,
- uint32_t enc_id,
- struct dpu_rm_requirements *reqs,
- struct dpu_rm_hw_blk *lm,
- struct dpu_rm_hw_blk **pp,
- struct dpu_rm_hw_blk *primary_lm)
+static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id, int lm_idx, int *pp_idx)
{
- const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
- struct dpu_rm_hw_iter iter;
-
- *pp = NULL;
-
- DPU_DEBUG("check lm %d pp %d\n",
- lm_cfg->id, lm_cfg->pingpong);
-
- /* Check if this layer mixer is a peer of the proposed primary LM */
- if (primary_lm) {
- const struct dpu_lm_cfg *prim_lm_cfg =
- to_dpu_hw_mixer(primary_lm->hw)->cap;
-
- if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
- DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
- prim_lm_cfg->id);
- return false;
- }
- }
+ const struct dpu_lm_cfg *lm_cfg;
+ int idx;
/* Already reserved? */
- if (RESERVED_BY_OTHER(lm, enc_id)) {
- DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false;
}
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- if (iter.blk->id == lm_cfg->pingpong) {
- *pp = iter.blk;
- break;
- }
- }
-
- if (!*pp) {
+ lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
+ idx = lm_cfg->pingpong - PINGPONG_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
return false;
}
- if (RESERVED_BY_OTHER(*pp, enc_id)) {
- DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
- (*pp)->id);
+ if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
+ lm_cfg->pingpong);
return false;
}
-
+ *pp_idx = idx;
return true;
}
-static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
struct dpu_rm_requirements *reqs)
{
- struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
- struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
- struct dpu_rm_hw_iter iter_i, iter_j;
- int lm_count = 0;
- int i, rc = 0;
+ int lm_idx[MAX_BLOCKS];
+ int pp_idx[MAX_BLOCKS];
+ int i, j, lm_count = 0;
if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
@@ -374,36 +270,40 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
}
/* Find a primary mixer */
- dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology.num_lm &&
- _dpu_rm_get_hw_locked(rm, &iter_i)) {
- memset(&lm, 0, sizeof(lm));
- memset(&pp, 0, sizeof(pp));
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; i++) {
+ if (!rm->mixer_blks[i])
+ continue;
lm_count = 0;
- lm[lm_count] = iter_i.blk;
+ lm_idx[lm_count] = i;
- if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, enc_id, reqs, lm[lm_count],
- &pp[lm_count], NULL))
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
+ enc_id, i, &pp_idx[lm_count])) {
continue;
+ }
++lm_count;
/* Valid primary mixer found, find matching peers */
- dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+ for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; j++) {
+ if (!rm->mixer_blks[j])
+ continue;
- while (lm_count != reqs->topology.num_lm &&
- _dpu_rm_get_hw_locked(rm, &iter_j)) {
- if (iter_i.blk == iter_j.blk)
+ if (!_dpu_rm_check_lm_peer(rm, i, j)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
+ LM_0 + i);
continue;
+ }
- if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, enc_id, reqs, iter_j.blk,
- &pp[lm_count], iter_i.blk))
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
+ global_state, enc_id, j,
+ &pp_idx[lm_count])) {
continue;
+ }
- lm[lm_count] = iter_j.blk;
+ lm_idx[lm_count] = j;
++lm_count;
}
}
@@ -413,65 +313,65 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
return -ENAVAIL;
}
- for (i = 0; i < ARRAY_SIZE(lm); i++) {
- if (!lm[i])
- break;
+ for (i = 0; i < lm_count; i++) {
+ global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
+ global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
- lm[i]->enc_id = enc_id;
- pp[i]->enc_id = enc_id;
-
- trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ pp_idx[i] + PINGPONG_0);
}
- return rc;
+ return 0;
}
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
const struct msm_display_topology *top)
{
- struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
- struct dpu_rm_hw_iter iter;
- int i = 0, num_ctls = 0;
- bool needs_split_display = false;
-
- memset(&ctls, 0, sizeof(ctls));
+ int ctl_idx[MAX_BLOCKS];
+ int i = 0, j, num_ctls;
+ bool needs_split_display;
/* each hw_intf needs its own hw_ctrl to program its control path */
num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
- unsigned long features = ctl->caps->features;
+ for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
+ const struct dpu_hw_ctl *ctl;
+ unsigned long features;
bool has_split_display;
- if (RESERVED_BY_OTHER(iter.blk, enc_id))
+ if (!rm->ctl_blks[j])
+ continue;
+ if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
continue;
+ ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
+ features = ctl->caps->features;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
- DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+ DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
if (needs_split_display != has_split_display)
continue;
- ctls[i] = iter.blk;
- DPU_DEBUG("ctl %d match\n", iter.blk->id);
+ ctl_idx[i] = j;
+ DPU_DEBUG("ctl %d match\n", j + CTL_0);
if (++i == num_ctls)
break;
+
}
if (i != num_ctls)
return -ENAVAIL;
- for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
- ctls[i]->enc_id = enc_id;
- trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
+ for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
+ global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
}
return 0;
@@ -479,40 +379,34 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
- uint32_t id,
- enum dpu_hw_blk_type type)
+ uint32_t id)
{
- struct dpu_rm_hw_iter iter;
- int ret = 0;
-
- /* Find the block entry in the rm, and note the reservation */
- dpu_rm_init_hw_iter(&iter, 0, type);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- if (iter.blk->id != id)
- continue;
+ int idx = id - INTF_0;
- if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
- DPU_ERROR("type %d id %d already reserved\n", type, id);
- return -ENAVAIL;
- }
-
- iter.blk->enc_id = enc_id;
- trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
- break;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
+ DPU_ERROR("invalid intf id: %d", id);
+ return -EINVAL;
}
- /* Shouldn't happen since intfs are fixed at probe */
- if (!iter.hw) {
- DPU_ERROR("couldn't find type %d id %d\n", type, id);
+ if (!rm->intf_blks[idx]) {
+ DPU_ERROR("couldn't find intf id %d\n", id);
return -EINVAL;
}
- return ret;
+ if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
+ DPU_ERROR("intf id %d already reserved\n", id);
+ return -ENAVAIL;
+ }
+
+ global_state->intf_to_enc_id[idx] = enc_id;
+ return 0;
}
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res)
{
@@ -523,8 +417,7 @@ static int _dpu_rm_reserve_intf_related_hw(
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
id = i + INTF_0;
- ret = _dpu_rm_reserve_intf(rm, enc_id, id,
- DPU_HW_BLK_INTF);
+ ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
if (ret)
return ret;
}
@@ -534,25 +427,27 @@ static int _dpu_rm_reserve_intf_related_hw(
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
+ ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
+ ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
+ ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
+ &reqs->hw_res);
if (ret)
return ret;
@@ -560,9 +455,7 @@ static int _dpu_rm_make_reservation(
}
static int _dpu_rm_populate_requirements(
- struct dpu_rm *rm,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
@@ -577,37 +470,36 @@ static int _dpu_rm_populate_requirements(
return 0;
}
-static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
+static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
+ uint32_t enc_id)
{
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->enc_id == enc_id) {
- blk->enc_id = 0;
- DPU_DEBUG("rel enc %d %d %d\n", enc_id,
- type, blk->id);
- }
- }
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ if (res_mapping[i] == enc_id)
+ res_mapping[i] = 0;
}
}
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc)
{
- mutex_lock(&rm->rm_lock);
-
- _dpu_rm_release_reservation(rm, enc->base.id);
-
- mutex_unlock(&rm->rm_lock);
+ _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
+ ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
+ ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
+ ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->intf_to_enc_id,
+ ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
}
int dpu_rm_reserve(
struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology,
- bool test_only)
+ struct msm_display_topology topology)
{
struct dpu_rm_requirements reqs;
int ret;
@@ -616,31 +508,75 @@ int dpu_rm_reserve(
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
- DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
- enc->base.id, crtc_state->crtc->base.id, test_only);
+ if (IS_ERR(global_state)) {
+ DPU_ERROR("failed to global state\n");
+ return PTR_ERR(global_state);
+ }
- mutex_lock(&rm->rm_lock);
+ DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
+ enc->base.id, crtc_state->crtc->base.id);
- ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
- topology);
+ ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
- goto end;
+ return ret;
}
- ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
- if (ret) {
+ ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+ if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_reservation(rm, enc->base.id);
- } else if (test_only) {
- /* test_only: test the reservation and then undo */
- DPU_DEBUG("test_only: discard test [enc: %d]\n",
- enc->base.id);
- _dpu_rm_release_reservation(rm, enc->base.id);
- }
-end:
- mutex_unlock(&rm->rm_lock);
+
return ret;
}
+
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
+{
+ struct dpu_hw_blk **hw_blks;
+ uint32_t *hw_to_enc_id;
+ int i, num_blks, max_blks;
+
+ switch (type) {
+ case DPU_HW_BLK_PINGPONG:
+ hw_blks = rm->pingpong_blks;
+ hw_to_enc_id = global_state->pingpong_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->pingpong_blks);
+ break;
+ case DPU_HW_BLK_LM:
+ hw_blks = rm->mixer_blks;
+ hw_to_enc_id = global_state->mixer_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->mixer_blks);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw_blks = rm->ctl_blks;
+ hw_to_enc_id = global_state->ctl_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->ctl_blks);
+ break;
+ case DPU_HW_BLK_INTF:
+ hw_blks = rm->intf_blks;
+ hw_to_enc_id = global_state->intf_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->intf_blks);
+ break;
+ default:
+ DPU_ERROR("blk type %d not managed by rm\n", type);
+ return 0;
+ }
+
+ num_blks = 0;
+ for (i = 0; i < max_blks; i++) {
+ if (hw_to_enc_id[i] != enc_id)
+ continue;
+
+ if (num_blks == blks_size) {
+ DPU_ERROR("More than %d resources assigned to enc %d\n",
+ blks_size, enc_id);
+ break;
+ }
+ blks[num_blks++] = hw_blks[i];
+ }
+
+ return num_blks;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 9c580a017094..6d2b04f306f0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -11,37 +11,24 @@
#include "msm_kms.h"
#include "dpu_hw_top.h"
+struct dpu_global_state;
+
/**
* struct dpu_rm - DPU dynamic hardware resource manager
- * @hw_blks: array of lists of hardware resources present in the system, one
- * list per type of hardware block
+ * @pingpong_blks: array of pingpong hardware resources
+ * @mixer_blks: array of layer mixer hardware resources
+ * @ctl_blks: array of ctl hardware resources
+ * @intf_blks: array of intf hardware resources
* @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex
*/
struct dpu_rm {
- struct list_head hw_blks[DPU_HW_BLK_MAX];
- uint32_t lm_max_width;
- struct mutex rm_lock;
-};
+ struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
+ struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
+ struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
+ struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
-/**
- * struct dpu_rm_hw_blk - resource manager internal structure
- * forward declaration for single iterator definition without void pointer
- */
-struct dpu_rm_hw_blk;
-
-/**
- * struct dpu_rm_hw_iter - iterator for use with dpu_rm
- * @hw: dpu_hw object requested, or NULL on failure
- * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-struct dpu_rm_hw_iter {
- void *hw;
- struct dpu_rm_hw_blk *blk;
- uint32_t enc_id;
- enum dpu_hw_blk_type type;
+ uint32_t lm_max_width;
};
/**
@@ -74,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm);
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
* @topology: Pointer to topology info for the display
- * @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology,
- bool test_only);
+ struct msm_display_topology topology);
/**
* dpu_rm_reserve - Given the encoder for the display chain, release any
@@ -90,31 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm,
* @enc: DRM Encoder handle
* @Return: 0 on Success otherwise -ERROR
*/
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc);
/**
- * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
- * using dpu_rm_get_hw
- * @iter: iter object to initialize
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-void dpu_rm_init_hw_iter(
- struct dpu_rm_hw_iter *iter,
- uint32_t enc_id,
- enum dpu_hw_blk_type type);
-/**
- * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
- * Meant to do a single pass through the hardware list to iteratively
- * retrieve hardware blocks of a given type for a given encoder.
- * Initialize an iterator object.
- * Set hw block type of interest. Set encoder id of interest, 0 for any.
- * Function returns first hw of type for that encoder.
- * Subsequent calls will return the next reserved hw of that type in-order.
- * Iterator HW pointer will be null on failure to find hw.
- * @rm: DPU Resource Manager handle
- * @iter: iterator object
- * @Return: true on match found, false on no match found
+ * Get hw resources of the given type that are assigned to this encoder.
*/
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
#endif /* __DPU_RM_H__ */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 93ab36bd8df3..5e8c3f3e6625 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
int rc;
if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
- DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
@@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
u32 val;
if (!vbif || !vbif->cap) {
- DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
return -EINVAL;
}
@@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
- vbif != 0, mdp != 0);
+ vbif != NULL, mdp != NULL);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index f34dca5d4532..c9239b07fe4f 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -481,6 +481,8 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index e1cc541e0ef2..998bef1190a3 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -405,6 +405,83 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pipe = crtc->index;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return false;
+ }
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = mdp5_encoder_get_linecount(encoder);
+
+ if (line < vactive_start)
+ line -= vactive_start;
+ else if (line > vactive_end)
+ line = line - vfp_end - vactive_start;
+ else
+ line -= vactive_start;
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder)
+ return 0;
+
+ return mdp5_encoder_get_framecount(encoder);
+}
+
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1054,6 +1131,10 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.cursor_set = mdp5_crtc_cursor_set,
.cursor_move = mdp5_crtc_cursor_move,
.atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
@@ -1063,6 +1144,7 @@ static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.atomic_flush = mdp5_crtc_atomic_flush,
.atomic_enable = mdp5_crtc_atomic_enable,
.atomic_disable = mdp5_crtc_atomic_disable,
+ .get_scanout_position = mdp5_crtc_get_scanout_position,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index e43ecd4be10a..6650f478b226 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -583,98 +583,6 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
return 0;
}
-static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- drm_for_each_encoder(encoder, dev)
- if (encoder->crtc == crtc)
- return encoder;
-
- return NULL;
-}
-
-static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
-
- crtc = priv->crtcs[pipe];
- if (!crtc) {
- DRM_ERROR("Invalid crtc %d\n", pipe);
- return false;
- }
-
- encoder = get_encoder_from_crtc(crtc);
- if (!encoder) {
- DRM_ERROR("no encoder found for crtc %d\n", pipe);
- return false;
- }
-
- vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
- vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
-
- /*
- * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
- * the end of VFP. Translate the porch values relative to the line
- * counter positions.
- */
-
- vactive_start = vsw + vbp + 1;
-
- vactive_end = vactive_start + mode->crtc_vdisplay;
-
- /* last scan line before VSYNC */
- vfp_end = mode->crtc_vtotal;
-
- if (stime)
- *stime = ktime_get();
-
- line = mdp5_encoder_get_linecount(encoder);
-
- if (line < vactive_start) {
- line -= vactive_start;
- } else if (line > vactive_end) {
- line = line - vfp_end - vactive_start;
- } else {
- line -= vactive_start;
- }
-
- *vpos = line;
- *hpos = 0;
-
- if (etime)
- *etime = ktime_get();
-
- return true;
-}
-
-static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
-
- if (pipe >= priv->num_crtcs)
- return 0;
-
- crtc = priv->crtcs[pipe];
- if (!crtc)
- return 0;
-
- encoder = get_encoder_from_crtc(crtc);
- if (!encoder)
- return 0;
-
- return mdp5_encoder_get_framecount(encoder);
-}
-
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -762,9 +670,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.max_width = 0xffff;
dev->mode_config.max_height = 0xffff;
- dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
- dev->driver->get_scanout_position = mdp5_get_scanoutpos;
- dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4864b9558f65..4b363bd7ddff 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -689,7 +689,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto fail;
@@ -718,7 +718,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
encoder = msm_dsi->encoder;
/* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge);
+ drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
/*
* we need the drm_connector created by the external bridge
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index ad4e963ccd9b..106a67473af5 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- ret = drm_bridge_attach(encoder, edp->bridge, NULL);
- if (ret)
- goto fail;
-
priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector;
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index b65b5cc2dba2..c69a37e0c708 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -97,7 +97,7 @@ struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
bridge = &edp_bridge->base;
bridge->funcs = &edp_bridge_funcs;
- ret = drm_bridge_attach(edp->encoder, bridge, NULL);
+ ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1a9b6289637d..737453b6e596 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
- if (ret)
- goto fail;
-
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index ba81338a9bf8..6e380db9287b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -287,7 +287,7 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
- ret = drm_bridge_attach(hdmi->encoder, bridge, NULL);
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e4b750b0c2d3..29295dee2a2e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
+ if (!dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_msm_uninit;
+ }
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
@@ -668,8 +670,10 @@ static void msm_irq_uninstall(struct drm_device *dev)
kms->funcs->irq_uninstall(kms);
}
-static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
@@ -678,8 +682,10 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
return vblank_ctrl_queue_work(priv, pipe, true);
}
-static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void msm_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
@@ -1004,8 +1010,6 @@ static struct drm_driver msm_driver = {
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
.irq_uninstall = msm_irq_uninstall,
- .enable_vblank = msm_enable_vblank,
- .disable_vblank = msm_disable_vblank,
.gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 740bf7c70d8f..194d900a460e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -232,6 +232,9 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
+int msm_crtc_enable_vblank(struct drm_crtc *crtc);
+void msm_crtc_disable_vblank(struct drm_crtc *crtc);
+
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index db48867df47d..47235f8c5922 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -160,16 +160,12 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
+ ret = drm_fb_helper_init(dev, helper);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret)
- goto fini;
-
/* the fw fb could be anywhere in memory */
drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9e0953c2b7ce..30584eaf8cc8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -157,7 +157,17 @@ struct msm_gem_submit {
uint32_t handle;
};
uint64_t iova;
- } bos[0];
+ } bos[];
};
+/* helper to determine of a buffer in submit should be dumped, used for both
+ * devcoredump and debugfs cmdstream dumping:
+ */
+static inline bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+ extern bool rd_full;
+ return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 18f3a5c53ffb..615c5cda5389 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -355,16 +355,34 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
state->cmd = kstrdup(cmd, GFP_KERNEL);
if (submit) {
- int i;
-
- state->bos = kcalloc(submit->nr_cmds,
+ int i, nr = 0;
+
+ /* count # of buffers to dump: */
+ for (i = 0; i < submit->nr_bos; i++)
+ if (should_dump(submit, i))
+ nr++;
+ /* always dump cmd bo's, but don't double count them: */
+ for (i = 0; i < submit->nr_cmds; i++)
+ if (!should_dump(submit, submit->cmd[i].idx))
+ nr++;
+
+ state->bos = kcalloc(nr,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (should_dump(submit, i)) {
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova, submit->bos[i].flags);
+ }
+ }
+
for (i = 0; state->bos && i < submit->nr_cmds; i++) {
int idx = submit->cmd[i].idx;
- msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
- submit->bos[idx].iova, submit->bos[idx].flags);
+ if (!should_dump(submit, submit->cmd[i].idx)) {
+ msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+ submit->bos[idx].iova, submit->bos[idx].flags);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index af7ceb246c7c..732f65df5c4f 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -43,7 +43,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
-static bool rd_full = false;
+bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
@@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
msm_gem_put_vaddr(&obj->base);
}
-static bool
-should_dump(struct msm_gem_submit *submit, int idx)
-{
- return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
-}
-
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 37c50ea8f847..1f08de4241e0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1248,6 +1248,9 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.page_flip = nv04_crtc_page_flip,
.destroy = nv_crtc_destroy,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
@@ -1258,6 +1261,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.disable = nv_crtc_disable,
+ .get_scanout_position = nouveau_display_scanoutpos,
};
static const uint32_t modeset_formats[] = {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a3dc2ba19fb2..4d1c58468dbc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1256,30 +1256,6 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
}
}
-static void
-nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nv50_mstc *mstc = nv50_mstc(connector);
-
- drm_connector_unregister(&mstc->connector);
-
- drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
-
- drm_connector_put(&mstc->connector);
-}
-
-static void
-nv50_mstm_register_connector(struct drm_connector *connector)
-{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
-
- drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
-
- drm_connector_register(connector);
-}
-
static struct drm_connector *
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, const char *path)
@@ -1298,8 +1274,6 @@ nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
static const struct drm_dp_mst_topology_cbs
nv50_mstm = {
.add_connector = nv50_mstm_add_connector,
- .register_connector = nv50_mstm_register_connector,
- .destroy_connector = nv50_mstm_destroy_connector,
};
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d9d64602947d..8f6455697ba7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_vblank.h>
#include "nouveau_connector.h"
void
nv50_head_flush_clr(struct nv50_head *head,
@@ -413,6 +414,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
static const struct drm_crtc_helper_funcs
nv50_head_help = {
.atomic_check = nv50_head_atomic_check,
+ .get_scanout_position = nouveau_display_scanoutpos,
};
static void
@@ -481,6 +483,9 @@ nv50_head_func = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
.atomic_destroy_state = nv50_head_atomic_destroy_state,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
struct nv50_head *
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1b62ccc57aef..2b4b21b02e40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -647,13 +647,6 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
}
static int
-nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- /* We'll do this from user space. */
- return 0;
-}
-
-static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -1697,7 +1690,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
- .invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 53f9bceaf17a..700817dc4fa0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -54,15 +54,10 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
}
int
-nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_enable(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- crtc = drm_crtc_from_index(dev, pipe);
- if (!crtc)
- return -EINVAL;
-
nv_crtc = nouveau_crtc(crtc);
nvif_notify_get(&nv_crtc->vblank);
@@ -70,15 +65,10 @@ nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
}
void
-nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_disable(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- crtc = drm_crtc_from_index(dev, pipe);
- if (!crtc)
- return;
-
nv_crtc = nouveau_crtc(crtc);
nvif_notify_put(&nv_crtc->vblank);
}
@@ -136,21 +126,13 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
}
bool
-nouveau_display_scanoutpos(struct drm_device *dev, unsigned int pipe,
+nouveau_display_scanoutpos(struct drm_crtc *crtc,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (nouveau_crtc(crtc)->index == pipe) {
- return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
- stime, etime);
- }
- }
-
- return false;
+ return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+ stime, etime);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 6e8e66882e45..de004018ab5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -61,11 +61,12 @@ int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
-int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
-void nouveau_display_vblank_disable(struct drm_device *, unsigned int);
-bool nouveau_display_scanoutpos(struct drm_device *, unsigned int,
- bool, int *, int *, ktime_t *,
- ktime_t *, const struct drm_display_mode *);
+int nouveau_display_vblank_enable(struct drm_crtc *crtc);
+void nouveau_display_vblank_disable(struct drm_crtc *crtc);
+bool nouveau_display_scanoutpos(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 0ad5d87b5a8e..ad89e09a0be3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -28,6 +28,7 @@
#include <nvif/class.h>
#include <nvif/object.h>
+#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
@@ -176,6 +177,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
.end = vmf->address + PAGE_SIZE,
.src = &src,
.dst = &dst,
+ .src_owner = drm->dev,
};
/*
@@ -526,6 +528,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
drm->dmem->pagemap.res = *res;
drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
+ drm->dmem->pagemap.owner = drm->dev;
if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
goto out_free;
@@ -669,12 +672,6 @@ out:
return ret;
}
-static inline bool
-nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
-{
- return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
-}
-
void
nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range)
@@ -690,18 +687,12 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
if (page == NULL)
continue;
- if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
+ if (!is_device_private_page(page))
continue;
- }
-
- if (!nouveau_dmem_page(drm, page)) {
- WARN(1, "Some unknown device memory !\n");
- range->pfns[i] = 0;
- continue;
- }
addr = nouveau_dmem_page_addr(page);
range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
+ range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index b65ae817eabf..6b1629c14dd7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1120,11 +1120,6 @@ driver_stub = {
.debugfs_init = nouveau_drm_debugfs_init,
#endif
- .enable_vblank = nouveau_display_vblank_enable,
- .disable_vblank = nouveau_display_vblank_disable,
- .get_scanout_position = nouveau_display_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
.ioctls = nouveau_ioctls,
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0c5cdda3c336..24d543a01f43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -558,14 +558,10 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
- ret = drm_fb_helper_init(dev, &fbcon->helper, 4);
+ ret = drm_fb_helper_init(dev, &fbcon->helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&fbcon->helper);
- if (ret)
- goto fini;
-
if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index df9bf1fd1bc0..e3797b2d4d17 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -367,7 +367,6 @@ static const u64
nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
[HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
[HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
- [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
};
static const u64
@@ -541,7 +540,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
range.default_flags = 0;
range.pfn_flags_mask = -1UL;
down_read(&mm->mmap_sem);
- ret = hmm_range_fault(&range, 0);
+ ret = hmm_range_fault(&range);
up_read(&mm->mmap_sem);
if (ret <= 0) {
if (ret == 0 || ret == -EBUSY)
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index d865d8aeac3c..c85dd8afa3c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 9b91da09dc5f..8d9812a51ef6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name)
else
return ERR_PTR(-ENODEV);
+ if (!pdev->rom || pdev->romlen == 0)
+ return ERR_PTR(-ENODEV);
+
if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
+ priv->size = pdev->romlen;
if (ret = -ENODEV,
- (priv->rom = pci_platform_rom(pdev, &priv->size)))
+ (priv->rom = ioremap(pdev->rom, pdev->romlen)))
return priv;
kfree(priv);
}
@@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(ret);
}
+static void
+platform_fini(void *data)
+{
+ struct priv *priv = data;
+
+ iounmap(priv->rom);
+ kfree(priv);
+}
+
const struct nvbios_source
nvbios_platform = {
.name = "PLATFORM",
.init = platform_init,
- .fini = (void(*)(void *))kfree,
+ .fini = platform_fini,
.read = pcirom_read,
.rw = true,
};
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index b562a8cd61bf..f2be594c7eff 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,28 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "OMAPDRM External Display Device Drivers"
-config DRM_OMAP_ENCODER_OPA362
- tristate "OPA362 external analog amplifier"
- help
- Driver for OPA362 external analog TV amplifier controlled
- through a GPIO.
-
-config DRM_OMAP_ENCODER_TPD12S015
- tristate "TPD12S015 HDMI ESD protection and level shifter"
- help
- Driver for TPD12S015, which offers HDMI ESD protection and level
- shifting.
-
-config DRM_OMAP_CONNECTOR_HDMI
- tristate "HDMI Connector"
- help
- Driver for a generic HDMI connector.
-
-config DRM_OMAP_CONNECTOR_ANALOG_TV
- tristate "Analog TV Connector"
- help
- Driver for a generic analog TV connector.
-
config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index cb76859dc574..488ddf153613 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,6 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
deleted file mode 100644
index 0d20fab605d7..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Analog TV Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct device *dev;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tvc_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void tvc_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static const struct omap_dss_device_ops tvc_ops = {
- .connect = tvc_connect,
- .disconnect = tvc_disconnect,
-};
-
-static int tvc_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->dev = &pdev->dev;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tvc_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tvc_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tvc_of_match[] = {
- { .compatible = "omapdss,svideo-connector", },
- { .compatible = "omapdss,composite-video-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tvc_of_match);
-
-static struct platform_driver tvc_connector_driver = {
- .probe = tvc_probe,
- .remove = __exit_p(tvc_remove),
- .driver = {
- .name = "connector-analog-tv",
- .of_match_table = tvc_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tvc_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Analog TV Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
deleted file mode 100644
index f5d69d810bb8..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HDMI Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- struct mutex hpd_lock;
-
- struct device *dev;
-
- struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int hdmic_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void hdmic_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static bool hdmic_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops hdmic_ops = {
- .connect = hdmic_connect,
- .disconnect = hdmic_disconnect,
-
- .detect = hdmic_detect,
- .register_hpd_cb = hdmic_register_hpd_cb,
- .unregister_hpd_cb = hdmic_unregister_hpd_cb,
-};
-
-static irqreturn_t hdmic_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (hdmic_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int hdmic_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->dev = &pdev->dev;
-
- mutex_init(&ddata->hpd_lock);
-
- /* HPD GPIO */
- gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse HPD gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->hpd_gpio = gpio;
-
- if (ddata->hpd_gpio) {
- r = devm_request_threaded_irq(&pdev->dev,
- gpiod_to_irq(ddata->hpd_gpio),
- NULL, hdmic_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- "hdmic hpd", ddata);
- if (r)
- return r;
- }
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &hdmic_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = ddata->hpd_gpio
- ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD
- : 0;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit hdmic_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id hdmic_of_match[] = {
- { .compatible = "omapdss,hdmi-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, hdmic_of_match);
-
-static struct platform_driver hdmi_connector_driver = {
- .probe = hdmic_probe,
- .remove = __exit_p(hdmic_remove),
- .driver = {
- .name = "connector-hdmi",
- .of_match_table = hdmic_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(hdmi_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("HDMI Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
deleted file mode 100644
index b992387ed674..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * OPA362 analog video amplifier with output/power control
- *
- * Copyright (C) 2014 Golden Delicious Computers
- * Author: H. Nikolaus Schaller <hns@goldelico.com>
- *
- * based on encoder-tfp410
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *enable_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int opa362_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void opa362_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static void opa362_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-}
-
-static void opa362_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-}
-
-static const struct omap_dss_device_ops opa362_ops = {
- .connect = opa362_connect,
- .disconnect = opa362_disconnect,
- .enable = opa362_enable,
- .disable = opa362_disable,
-};
-
-static int opa362_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
-
- dev_dbg(&pdev->dev, "probe\n");
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->enable_gpio = gpio;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &opa362_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit opa362_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- opa362_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id opa362_of_match[] = {
- { .compatible = "omapdss,ti,opa362", },
- {},
-};
-MODULE_DEVICE_TABLE(of, opa362_of_match);
-
-static struct platform_driver opa362_driver = {
- .probe = opa362_probe,
- .remove = __exit_p(opa362_remove),
- .driver = {
- .name = "amplifier-opa362",
- .of_match_table = opa362_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(opa362_driver);
-
-MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
-MODULE_DESCRIPTION("OPA362 analog video amplifier with output/power control");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
deleted file mode 100644
index 089105c5aa0a..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * TPD12S015 HDMI ESD protection & level shifter chip driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/mutex.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- struct mutex hpd_lock;
-
- struct gpio_desc *ct_cp_hpd_gpio;
- struct gpio_desc *ls_oe_gpio;
- struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tpd_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
-
- /* DC-DC converter needs at max 300us to get to 90% of 5V */
- udelay(300);
-
- return 0;
-}
-
-static void tpd_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
-
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static bool tpd_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void tpd_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops tpd_ops = {
- .connect = tpd_connect,
- .disconnect = tpd_disconnect,
- .detect = tpd_detect,
- .register_hpd_cb = tpd_register_hpd_cb,
- .unregister_hpd_cb = tpd_unregister_hpd_cb,
-};
-
-static irqreturn_t tpd_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (tpd_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int tpd_probe(struct platform_device *pdev)
-{
- struct omap_dss_device *dssdev;
- struct panel_drv_data *ddata;
- int r;
- struct gpio_desc *gpio;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->ct_cp_hpd_gpio = gpio;
-
- gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->ls_oe_gpio = gpio;
-
- gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
- GPIOD_IN);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->hpd_gpio = gpio;
-
- mutex_init(&ddata->hpd_lock);
-
- r = devm_request_threaded_irq(&pdev->dev, gpiod_to_irq(ddata->hpd_gpio),
- NULL, tpd_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "tpd12s015 hpd", ddata);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tpd_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_HPD;
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tpd_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tpd_of_match[] = {
- { .compatible = "omapdss,ti,tpd12s015", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tpd_of_match);
-
-static struct platform_driver tpd_driver = {
- .probe = tpd_probe,
- .remove = __exit_p(tpd_remove),
- .driver = {
- .name = "tpd12s015",
- .of_match_table = tpd_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tpd_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("TPD12S015 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 564e3e1a1891..3484b5d4a91c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -678,7 +678,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- ddata->enabled = 1;
+ ddata->enabled = true;
if (!ddata->intro_printed) {
dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
@@ -729,7 +729,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
if (ddata->vpnl)
regulator_disable(ddata->vpnl);
- ddata->enabled = 0;
+ ddata->enabled = false;
}
static int dsicm_panel_reset(struct panel_drv_data *ddata)
@@ -1265,7 +1265,7 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->display = true;
dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
+ dssdev->of_port = 0;
dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 5950c3f52c2e..f967e6948f2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o dss-of.o output.o
+omapdss-base-y := base.o display.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index a1970b9db6ab..c7650a7c155d 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -149,8 +149,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
goto done;
}
- if (dssdev->id &&
- (dssdev->next || dssdev->bridge || dssdev->panel))
+ if (dssdev->id && (dssdev->next || dssdev->bridge))
goto done;
}
@@ -185,11 +184,10 @@ int omapdss_device_connect(struct dss_device *dss,
if (!dst) {
/*
* The destination is NULL when the source is connected to a
- * bridge or panel instead of a DSS device. Stop here, we will
- * attach the bridge or panel later when we will have a DRM
- * encoder.
+ * bridge instead of a DSS device. Stop here, we will attach
+ * the bridge later when we will have a DRM encoder.
*/
- return src && (src->bridge || src->panel) ? 0 : -EINVAL;
+ return src && src->bridge ? 0 : -EINVAL;
}
if (omapdss_device_is_connected(dst))
@@ -197,10 +195,12 @@ int omapdss_device_connect(struct dss_device *dss,
dst->dss = dss;
- ret = dst->ops->connect(src, dst);
- if (ret < 0) {
- dst->dss = NULL;
- return ret;
+ if (dst->ops && dst->ops->connect) {
+ ret = dst->ops->connect(src, dst);
+ if (ret < 0) {
+ dst->dss = NULL;
+ return ret;
+ }
}
return 0;
@@ -217,7 +217,7 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
dst ? dev_name(dst->dev) : "NULL");
if (!dst) {
- WARN_ON(!src->bridge && !src->panel);
+ WARN_ON(!src->bridge);
return;
}
@@ -228,29 +228,18 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
- dst->ops->disconnect(src, dst);
+ if (dst->ops && dst->ops->disconnect)
+ dst->ops->disconnect(src, dst);
dst->dss = NULL;
}
EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- omapdss_device_pre_enable(dssdev->next);
-
- if (dssdev->ops->pre_enable)
- dssdev->ops->pre_enable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_pre_enable);
-
void omapdss_device_enable(struct omap_dss_device *dssdev)
{
if (!dssdev)
return;
- if (dssdev->ops->enable)
+ if (dssdev->ops && dssdev->ops->enable)
dssdev->ops->enable(dssdev);
omapdss_device_enable(dssdev->next);
@@ -266,25 +255,11 @@ void omapdss_device_disable(struct omap_dss_device *dssdev)
omapdss_device_disable(dssdev->next);
- if (dssdev->ops->disable)
+ if (dssdev->ops && dssdev->ops->disable)
dssdev->ops->disable(dssdev);
}
EXPORT_SYMBOL_GPL(omapdss_device_disable);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- if (dssdev->ops->post_disable)
- dssdev->ops->post_disable(dssdev);
-
- omapdss_device_post_disable(dssdev->next);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_post_disable);
-
/* -----------------------------------------------------------------------------
* Components Handling
*/
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8a3f61f5825f..3b82158b1bfd 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -40,15 +40,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL_GPL(omapdss_display_init);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
-{
- while (output->next)
- output = output->next;
-
- return omapdss_device_get(output);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get);
-
int omapdss_display_get_modes(struct drm_connector *connector,
const struct videomode *vm)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 95147437b990..5110acb0c6c1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -9,20 +9,22 @@
#define DSS_SUBSYS_NAME "DPI"
-#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
-#include <linux/of.h>
-#include <linux/clk.h>
#include <linux/sys_soc.h>
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
#include "dss.h"
+#include "omapdss.h"
struct dpi_data {
struct platform_device *pdev;
@@ -34,19 +36,19 @@ struct dpi_data {
enum dss_clk_source clk_src;
struct dss_pll *pll;
- struct mutex lock;
-
struct dss_lcd_mgr_config mgr_config;
unsigned long pixelclock;
int data_lines;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
-{
- return container_of(dssdev, struct dpi_data, output);
-}
+#define drm_bridge_to_dpi(bridge) container_of(bridge, struct dpi_data, bridge)
+
+/* -----------------------------------------------------------------------------
+ * Clock Handling and PLL
+ */
static enum dss_clk_source dpi_get_clk_src_dra7xx(struct dpi_data *dpi,
enum omap_channel channel)
@@ -283,9 +285,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
-static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
- unsigned long pck_req, unsigned long *fck, int *lck_div,
- int *pck_div)
+static int dpi_set_pll_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -299,19 +299,15 @@ static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
if (r)
return r;
- dss_select_lcd_clk_source(dpi->dss, channel, dpi->clk_src);
+ dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
+ dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
- *lck_div = ctx.dispc_cinfo.lck_div;
- *pck_div = ctx.dispc_cinfo.pck_div;
-
return 0;
}
-static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
- unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -327,29 +323,19 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.fck;
- *lck_div = ctx.dispc_cinfo.lck_div;
- *pck_div = ctx.dispc_cinfo.pck_div;
-
return 0;
}
static int dpi_set_mode(struct dpi_data *dpi)
{
- int lck_div = 0, pck_div = 0;
- unsigned long fck = 0;
- int r = 0;
+ int r;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
- dpi->pixelclock, &fck, &lck_div, &pck_div);
+ r = dpi_set_pll_clk(dpi, dpi->pixelclock);
else
- r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck,
- &lck_div, &pck_div);
- if (r)
- return r;
+ r = dpi_set_dispc_clk(dpi, dpi->pixelclock);
- return 0;
+ return r;
}
static void dpi_config_lcd_manager(struct dpi_data *dpi)
@@ -366,25 +352,149 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
-static void dpi_display_enable(struct omap_dss_device *dssdev)
+static int dpi_clock_update(struct dpi_data *dpi, unsigned long *clock)
+{
+ int lck_div, pck_div;
+ unsigned long fck;
+ struct dpi_clk_calc_ctx ctx;
+
+ if (dpi->pll) {
+ if (!dpi_pll_clk_calc(dpi, *clock, &ctx))
+ return -EINVAL;
+
+ fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
+ } else {
+ if (!dpi_dss_clk_calc(dpi, *clock, &ctx))
+ return -EINVAL;
+
+ fck = ctx.fck;
+ }
+
+ lck_div = ctx.dispc_cinfo.lck_div;
+ pck_div = ctx.dispc_cinfo.pck_div;
+
+ *clock = fck / lck_div / pck_div;
+
+ return 0;
+}
+
+static int dpi_verify_pll(struct dss_pll *pll)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- struct omap_dss_device *out = &dpi->output;
int r;
- mutex_lock(&dpi->lock);
+ /* do initial setup with the PLL to see if it is operational */
+
+ r = dss_pll_enable(pll);
+ if (r)
+ return r;
+
+ dss_pll_disable(pll);
+
+ return 0;
+}
+
+static void dpi_init_pll(struct dpi_data *dpi)
+{
+ struct dss_pll *pll;
+
+ if (dpi->pll)
+ return;
+
+ dpi->clk_src = dpi_get_clk_src(dpi);
+
+ pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
+ if (!pll)
+ return;
+
+ if (dpi_verify_pll(pll)) {
+ DSSWARN("PLL not operational\n");
+ return;
+ }
+
+ dpi->pll = pll;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dpi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ dpi_init_pll(dpi);
+
+ return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+dpi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ unsigned long clock = mode->clock * 1000;
+ int ret;
+
+ if (mode->hdisplay % 8 != 0)
+ return MODE_BAD_WIDTH;
+
+ if (mode->clock == 0)
+ return MODE_NOCLOCK;
+
+ ret = dpi_clock_update(dpi, &clock);
+ if (ret < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool dpi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ unsigned long clock = mode->clock * 1000;
+ int ret;
+
+ ret = dpi_clock_update(dpi, &clock);
+ if (ret < 0)
+ return false;
+
+ adjusted_mode->clock = clock / 1000;
+
+ return true;
+}
+
+static void dpi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+ dpi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void dpi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ int r;
if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
- goto err_reg_enable;
+ return;
}
r = dispc_runtime_get(dpi->dss->dispc);
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel);
+ r = dss_dpi_select_source(dpi->dss, dpi->id, dpi->output.dispc_channel);
if (r)
goto err_src_sel;
@@ -406,8 +516,6 @@ static void dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_mgr_enable;
- mutex_unlock(&dpi->lock);
-
return;
err_mgr_enable:
@@ -420,15 +528,11 @@ err_src_sel:
err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
-err_reg_enable:
- mutex_unlock(&dpi->lock);
}
-static void dpi_display_disable(struct omap_dss_device *dssdev)
+static void dpi_bridge_disable(struct drm_bridge *bridge)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- mutex_lock(&dpi->lock);
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
dss_mgr_disable(&dpi->output);
@@ -442,99 +546,34 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
-
- mutex_unlock(&dpi->lock);
}
-static void dpi_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- DSSDBG("dpi_set_timings\n");
-
- mutex_lock(&dpi->lock);
-
- dpi->pixelclock = mode->clock * 1000;
-
- mutex_unlock(&dpi->lock);
-}
+static const struct drm_bridge_funcs dpi_bridge_funcs = {
+ .attach = dpi_bridge_attach,
+ .mode_valid = dpi_bridge_mode_valid,
+ .mode_fixup = dpi_bridge_mode_fixup,
+ .mode_set = dpi_bridge_mode_set,
+ .enable = dpi_bridge_enable,
+ .disable = dpi_bridge_disable,
+};
-static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
+static void dpi_bridge_init(struct dpi_data *dpi)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- int lck_div, pck_div;
- unsigned long fck;
- unsigned long pck;
- struct dpi_clk_calc_ctx ctx;
- bool ok;
+ dpi->bridge.funcs = &dpi_bridge_funcs;
+ dpi->bridge.of_node = dpi->pdev->dev.of_node;
+ dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
- if (mode->hdisplay % 8 != 0)
- return -EINVAL;
-
- if (mode->clock == 0)
- return -EINVAL;
-
- if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx);
- if (!ok)
- return -EINVAL;
-
- fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
- } else {
- ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx);
- if (!ok)
- return -EINVAL;
-
- fck = ctx.fck;
- }
-
- lck_div = ctx.dispc_cinfo.lck_div;
- pck_div = ctx.dispc_cinfo.pck_div;
-
- pck = fck / lck_div / pck_div;
-
- mode->clock = pck / 1000;
-
- return 0;
+ drm_bridge_add(&dpi->bridge);
}
-static int dpi_verify_pll(struct dss_pll *pll)
+static void dpi_bridge_cleanup(struct dpi_data *dpi)
{
- int r;
-
- /* do initial setup with the PLL to see if it is operational */
-
- r = dss_pll_enable(pll);
- if (r)
- return r;
-
- dss_pll_disable(pll);
-
- return 0;
+ drm_bridge_remove(&dpi->bridge);
}
-static void dpi_init_pll(struct dpi_data *dpi)
-{
- struct dss_pll *pll;
-
- if (dpi->pll)
- return;
-
- dpi->clk_src = dpi_get_clk_src(dpi);
-
- pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
- if (!pll)
- return;
-
- if (dpi_verify_pll(pll)) {
- DSSWARN("PLL not operational\n");
- return;
- }
-
- dpi->pll = pll;
-}
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
/*
* Return a hardcoded channel for the DPI output. This should work for
@@ -572,39 +611,14 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi)
}
}
-static int dpi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
-
- dpi_init_pll(dpi);
-
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void dpi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops dpi_ops = {
- .connect = dpi_connect,
- .disconnect = dpi_disconnect,
-
- .enable = dpi_display_enable,
- .disable = dpi_display_disable,
-
- .check_timings = dpi_check_timings,
- .set_timings = dpi_set_timings,
-};
-
static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
struct omap_dss_device *out = &dpi->output;
u32 port_num = 0;
int r;
+ dpi_bridge_init(dpi);
+
of_property_read_u32(port, "reg", &port_num);
dpi->id = port_num <= 2 ? port_num : 0;
@@ -625,13 +639,14 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->id = OMAP_DSS_OUTPUT_DPI;
out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
- out->of_ports = BIT(port_num);
- out->ops = &dpi_ops;
+ out->of_port = port_num;
out->owner = THIS_MODULE;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &dpi->bridge);
+ if (r < 0) {
+ dpi_bridge_cleanup(dpi);
return r;
+ }
omapdss_device_register(out);
@@ -645,8 +660,14 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ dpi_bridge_cleanup(dpi);
}
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
+
static const struct soc_device_attribute dpi_soc_devices[] = {
{ .machine = "OMAP3[456]*" },
{ .machine = "[AD]M37*" },
@@ -706,8 +727,6 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
dpi->dss = dss;
port->data = dpi;
- mutex_init(&dpi->lock);
-
r = dpi_init_regulator(dpi);
if (r)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index da16ea095f13..79ddfbfd1b58 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5116,12 +5116,12 @@ static int dsi_init_output(struct dsi_data *dsi)
out->dispc_channel = dsi_get_channel(dsi);
out->ops = &dsi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
+ out->of_port = 0;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_device_init_output(out);
+ r = omapdss_device_init_output(out, NULL);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
deleted file mode 100644
index b7981f3b80ad..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-
-#include "omapdss.h"
-
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
-{
- struct device_node *remote_node;
- struct omap_dss_device *dssdev;
-
- remote_node = of_graph_get_remote_node(node, port, 0);
- if (!remote_node)
- return NULL;
-
- dssdev = omapdss_find_device_by_node(remote_node);
- of_node_put(remote_node);
-
- return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER);
-}
-EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 225ec808b01a..4d5739fa4a5d 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1151,46 +1151,38 @@ static const struct dss_features dra7xx_dss_feats = {
.has_lcd_clk_src = true,
};
-static int dss_init_ports(struct dss_device *dss)
+static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
unsigned int i;
- int r;
- for (i = 0; i < dss->feat->num_ports; i++) {
+ for (i = 0; i < num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- r = dpi_init_port(dss, pdev, port, dss->feat->model);
- if (r)
- return r;
+ dpi_uninit_port(port);
break;
-
case OMAP_DISPLAY_TYPE_SDI:
- r = sdi_init_port(dss, pdev, port);
- if (r)
- return r;
+ sdi_uninit_port(port);
break;
-
default:
break;
}
}
-
- return 0;
}
-static void dss_uninit_ports(struct dss_device *dss)
+static int dss_init_ports(struct dss_device *dss)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
- int i;
+ unsigned int i;
+ int r;
for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
@@ -1199,15 +1191,32 @@ static void dss_uninit_ports(struct dss_device *dss)
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_uninit_port(port);
+ r = dpi_init_port(dss, pdev, port, dss->feat->model);
+ if (r)
+ goto error;
break;
+
case OMAP_DISPLAY_TYPE_SDI:
- sdi_uninit_port(port);
+ r = sdi_init_port(dss, pdev, port);
+ if (r)
+ goto error;
break;
+
default:
break;
}
}
+
+ return 0;
+
+error:
+ __dss_uninit_ports(dss, i);
+ return r;
+}
+
+static void dss_uninit_ports(struct dss_device *dss)
+{
+ __dss_uninit_ports(dss, dss->feat->num_ports);
}
static int dss_video_pll_probe(struct dss_device *dss)
@@ -1339,9 +1348,15 @@ static int dss_component_compare(struct device *dev, void *data)
return dev == child;
}
+struct dss_component_match_data {
+ struct device *dev;
+ struct component_match **match;
+};
+
static int dss_add_child_component(struct device *dev, void *data)
{
- struct component_match **match = data;
+ struct dss_component_match_data *cmatch = data;
+ struct component_match **match = cmatch->match;
/*
* HACK
@@ -1352,7 +1367,17 @@ static int dss_add_child_component(struct device *dev, void *data)
if (strstr(dev_name(dev), "rfbi"))
return 0;
- component_match_add(dev->parent, match, dss_component_compare, dev);
+ /*
+ * Handle possible interconnect target modules defined within the DSS.
+ * The DSS components can be children of an interconnect target module
+ * after the device tree has been updated for the module data.
+ * See also omapdss_boot_init() for compatible fixup.
+ */
+ if (strstr(dev_name(dev), "target-module"))
+ return device_for_each_child(dev, cmatch,
+ dss_add_child_component);
+
+ component_match_add(cmatch->dev, match, dss_component_compare, dev);
return 0;
}
@@ -1395,6 +1420,7 @@ static int dss_probe_hardware(struct dss_device *dss)
static int dss_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
+ struct dss_component_match_data cmatch;
struct component_match *match = NULL;
struct resource *dss_mem;
struct dss_device *dss;
@@ -1472,7 +1498,9 @@ static int dss_probe(struct platform_device *pdev)
omapdss_gather_components(&pdev->dev);
- device_for_each_child(&pdev->dev, &match, dss_add_child_component);
+ cmatch.dev = &pdev->dev;
+ cmatch.match = &match;
+ device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
if (r)
@@ -1543,7 +1571,8 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
for_each_dss_output(dssdev) {
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
+ dssdev->ops && dssdev->ops->disable)
dssdev->ops->disable(dssdev);
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index c867552c925c..3a40833d3368 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -14,6 +14,7 @@
#include <linux/hdmi.h>
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
+#include <drm/drm_bridge.h>
#include "omapdss.h"
#include "dss.h"
@@ -364,6 +365,7 @@ struct omap_hdmi {
bool core_enabled;
struct omap_dss_device output;
+ struct drm_bridge bridge;
struct platform_device *audio_pdev;
void (*audio_abort_cb)(struct device *dev);
@@ -378,6 +380,6 @@ struct omap_hdmi {
bool display_enabled;
};
-#define dssdev_to_hdmi(dssdev) container_of(dssdev, struct omap_hdmi, output)
+#define drm_bridge_to_hdmi(b) container_of(b, struct omap_hdmi, bridge)
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 0f557fad4513..2578c95570f6 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -28,6 +28,9 @@
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
#include "omapdss.h"
#include "hdmi4_core.h"
#include "hdmi4_cec.h"
@@ -237,20 +240,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- mutex_lock(&hdmi->lock);
-
- drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
- dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
- mutex_unlock(&hdmi->lock);
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -272,57 +261,139 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
return 0;
}
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
+static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- int r;
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi4_audio_start(&hd->core, &hd->wp);
+}
- mutex_lock(&hdmi->lock);
+static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+{
+ hdmi4_audio_stop(&hd->core, &hd->wp);
+ hdmi_wp_audio_enable(&hd->wp, false);
+}
- r = hdmi_runtime_get(hdmi);
- BUG_ON(r);
+int hdmi4_core_enable(struct hdmi_core_data *core)
+{
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+ int r = 0;
- r = hdmi4_read_edid(&hdmi->core, buf, len);
+ DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ r = hdmi_power_on_core(hdmi);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
- hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
+ return 0;
+err0:
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_start_audio_stream(struct omap_hdmi *hd)
+void hdmi4_core_disable(struct hdmi_core_data *core)
{
- hdmi_wp_audio_enable(&hd->wp, true);
- hdmi4_audio_start(&hd->core, &hd->wp);
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+
+ DSSDBG("Enter omapdss_hdmi4_core_disable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ hdmi_power_off_core(hdmi);
+
+ mutex_unlock(&hdmi->lock);
}
-static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- hdmi4_audio_stop(&hd->core, &hd->wp);
- hdmi_wp_audio_enable(&hd->wp, false);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ bridge, flags);
}
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi4_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ mutex_lock(&hdmi->lock);
+
+ drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+ dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+ mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi4_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
unsigned long flags;
- int r;
+ int ret;
+
+ /*
+ * None of these should fail, as the bridge can't be enabled without a
+ * valid CRTC to connector path with fully populated new states.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+ ? HDMI_HDMI : HDMI_DVI;
- DSSDBG("ENTER hdmi_display_enable\n");
+ if (connector->display_info.is_hdmi) {
+ const struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi;
+
+ mode = &crtc_state->adjusted_mode;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ mode);
+ if (ret == 0)
+ hdmi->cfg.infoframe = avi;
+ }
mutex_lock(&hdmi->lock);
- r = hdmi_power_on_full(hdmi);
- if (r) {
+ ret = hdmi_power_on_full(hdmi);
+ if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
- r = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
- &hdmi->audio_config,
- hdmi->cfg.vm.pixelclock);
- if (r) {
- DSSERR("Error restoring audio configuration: %d", r);
+ ret = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
+ if (ret) {
+ DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
@@ -338,13 +409,12 @@ done:
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi4_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
- DSSDBG("Enter hdmi_display_disable\n");
-
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
@@ -357,58 +427,21 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi->lock);
}
-int hdmi4_core_enable(struct hdmi_core_data *core)
+static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
{
- struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
-
- mutex_lock(&hdmi->lock);
-
- r = hdmi_power_on_core(hdmi);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
- }
-
- mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
-}
-
-void hdmi4_core_disable(struct hdmi_core_data *core)
-{
- struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
-
- DSSDBG("Enter omapdss_hdmi4_core_disable\n");
-
- mutex_lock(&hdmi->lock);
-
- hdmi_power_off_core(hdmi);
-
- mutex_unlock(&hdmi->lock);
-}
-
-static int hdmi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
-static void hdmi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
+ if (status == connector_status_disconnected)
+ hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
+static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct edid *edid = NULL;
+ unsigned int cec_addr;
bool need_enable;
int r;
@@ -417,63 +450,65 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
if (need_enable) {
r = hdmi4_core_enable(&hdmi->core);
if (r)
- return r;
+ return NULL;
}
- r = read_edid(hdmi, edid, len);
- if (r >= 256)
- hdmi4_cec_set_phys_addr(&hdmi->core,
- cec_get_edid_phys_addr(edid, r, NULL));
- else
- hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
- if (need_enable)
- hdmi4_core_disable(&hdmi->core);
+ mutex_lock(&hdmi->lock);
+ r = hdmi_runtime_get(hdmi);
+ BUG_ON(r);
- return r;
-}
+ r = hdmi4_core_ddc_init(&hdmi->core);
+ if (r)
+ goto done;
-static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core);
- hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
-}
+done:
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ if (edid && edid->extensions) {
+ unsigned int len = (edid->extensions + 1) * EDID_LENGTH;
- hdmi->cfg.infoframe = *avi;
- return 0;
-}
+ cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL);
+ } else {
+ cec_addr = CEC_PHYS_ADDR_INVALID;
+ }
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ hdmi4_cec_set_phys_addr(&hdmi->core, cec_addr);
- hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
- return 0;
-}
+ if (need_enable)
+ hdmi4_core_disable(&hdmi->core);
-static const struct omap_dss_device_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
+ return edid;
+}
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
+static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
+ .attach = hdmi4_bridge_attach,
+ .mode_set = hdmi4_bridge_mode_set,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = hdmi4_bridge_enable,
+ .atomic_disable = hdmi4_bridge_disable,
+ .hpd_notify = hdmi4_bridge_hpd_notify,
+ .get_edid = hdmi4_bridge_get_edid,
+};
- .set_timings = hdmi_display_set_timings,
+static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
+{
+ hdmi->bridge.funcs = &hdmi4_bridge_funcs;
+ hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- .read_edid = hdmi_read_edid,
+ drm_bridge_add(&hdmi->bridge);
+}
- .hdmi = {
- .lost_hotplug = hdmi_lost_hotplug,
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
- },
-};
+static void hdmi4_bridge_cleanup(struct omap_hdmi *hdmi)
+{
+ drm_bridge_remove(&hdmi->bridge);
+}
/* -----------------------------------------------------------------------------
* Audio Callbacks
@@ -666,19 +701,21 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
struct omap_dss_device *out = &hdmi->output;
int r;
+ hdmi4_bridge_init(hdmi);
+
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &hdmi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
- out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+ out->of_port = 0;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &hdmi->bridge);
+ if (r < 0) {
+ hdmi4_bridge_cleanup(hdmi);
return r;
+ }
omapdss_device_register(out);
@@ -691,6 +728,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ hdmi4_bridge_cleanup(hdmi);
}
static int hdmi4_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ea5d5c228534..751985a2679a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -32,7 +32,7 @@ static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
return core->base + HDMI_CORE_AV;
}
-static int hdmi_core_ddc_init(struct hdmi_core_data *core)
+int hdmi4_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
@@ -74,13 +74,11 @@ static int hdmi_core_ddc_init(struct hdmi_core_data *core)
return 0;
}
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
- u8 *pedid, int ext)
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u32 i;
- char checksum;
- u32 offset = 0;
/* HDMI_CORE_DDC_STATUS_IN_PROG */
if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
@@ -89,24 +87,21 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
return -ETIMEDOUT;
}
- if (ext % 2 != 0)
- offset = 0x80;
-
/* Load Segment Address Register */
- REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, block / 2, 7, 0);
/* Load Slave Address Register */
REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
/* Load Offset Address Register */
- REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, block % 2 ? 0x80 : 0, 7, 0);
/* Load Byte Count */
- REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, len, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
/* Set DDC_CMD */
- if (ext)
+ if (block)
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
else
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
@@ -122,7 +117,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
return -EIO;
}
- for (i = 0; i < 0x80; ++i) {
+ for (i = 0; i < len; ++i) {
int t;
/* IN_PROG */
@@ -141,48 +136,12 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
udelay(1);
}
- pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
- }
-
- checksum = 0;
- for (i = 0; i < 0x80; ++i)
- checksum += pedid[i];
-
- if (checksum != 0) {
- DSSERR("E-EDID checksum failed!!\n");
- return -EIO;
+ buf[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
}
return 0;
}
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
- int r, l;
-
- if (len < 128)
- return -EINVAL;
-
- r = hdmi_core_ddc_init(core);
- if (r)
- return r;
-
- r = hdmi_core_ddc_edid(core, edid, 0);
- if (r)
- return r;
-
- l = 128;
-
- if (len >= 128 * 2 && edid[0x7e] > 0) {
- r = hdmi_core_ddc_edid(core, edid + 0x80, 1);
- if (r)
- return r;
- l += 128;
- }
-
- return l;
-}
-
static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
{
DSSDBG("Enter hdmi_core_init\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index 11c4b7ba1eee..dc64ae2aa300 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -249,7 +249,9 @@ struct hdmi_core_packet_enable_repeat {
u32 generic_pkt_repeat;
};
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+int hdmi4_core_ddc_init(struct hdmi_core_data *core);
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+
void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index d9463b332554..4d4c1fabd0a1 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -31,6 +31,9 @@
#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
@@ -236,20 +239,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- mutex_lock(&hdmi->lock);
-
- drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
- dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
- mutex_unlock(&hdmi->lock);
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -271,66 +260,138 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
return 0;
}
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
+static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- int r;
- int idlemode;
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi_wp_audio_core_req_enable(&hd->wp, true);
+}
- mutex_lock(&hdmi->lock);
+static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+{
+ hdmi_wp_audio_core_req_enable(&hd->wp, false);
+ hdmi_wp_audio_enable(&hd->wp, false);
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+}
- r = hdmi_runtime_get(hdmi);
- BUG_ON(r);
+static int hdmi_core_enable(struct omap_hdmi *hdmi)
+{
+ int r = 0;
- idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
- /* No-idle mode */
- REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ DSSDBG("ENTER omapdss_hdmi_core_enable\n");
- r = hdmi5_read_edid(&hdmi->core, buf, len);
+ mutex_lock(&hdmi->lock);
- REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
+ r = hdmi_power_on_core(hdmi);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
- hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
+ return 0;
+err0:
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_start_audio_stream(struct omap_hdmi *hd)
+static void hdmi_core_disable(struct omap_hdmi *hdmi)
{
- REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
- hdmi_wp_audio_enable(&hd->wp, true);
- hdmi_wp_audio_core_req_enable(&hd->wp, true);
+ DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ hdmi_power_off_core(hdmi);
+
+ mutex_unlock(&hdmi->lock);
}
-static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- hdmi_wp_audio_core_req_enable(&hd->wp, false);
- hdmi_wp_audio_enable(&hd->wp, false);
- REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ bridge, flags);
}
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi5_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- unsigned long flags;
- int r;
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ mutex_lock(&hdmi->lock);
- DSSDBG("ENTER hdmi_display_enable\n");
+ drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+ dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+ mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi5_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * None of these should fail, as the bridge can't be enabled without a
+ * valid CRTC to connector path with fully populated new states.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+ ? HDMI_HDMI : HDMI_DVI;
+
+ if (connector->display_info.is_hdmi) {
+ const struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi;
+
+ mode = &crtc_state->adjusted_mode;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ mode);
+ if (ret == 0)
+ hdmi->cfg.infoframe = avi;
+ }
mutex_lock(&hdmi->lock);
- r = hdmi_power_on_full(hdmi);
- if (r) {
+ ret = hdmi_power_on_full(hdmi);
+ if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
- r = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
- &hdmi->audio_config,
- hdmi->cfg.vm.pixelclock);
- if (r) {
- DSSERR("Error restoring audio configuration: %d", r);
+ ret = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
+ if (ret) {
+ DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
@@ -346,13 +407,12 @@ done:
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi5_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
- DSSDBG("Enter hdmi_display_disable\n");
-
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
@@ -365,109 +425,74 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi->lock);
}
-static int hdmi_core_enable(struct omap_hdmi *hdmi)
+static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct edid *edid;
+ bool need_enable;
+ int idlemode;
+ int r;
- mutex_lock(&hdmi->lock);
+ need_enable = hdmi->core_enabled == false;
- r = hdmi_power_on_core(hdmi);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
+ if (need_enable) {
+ r = hdmi_core_enable(hdmi);
+ if (r)
+ return NULL;
}
- mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
-}
-
-static void hdmi_core_disable(struct omap_hdmi *hdmi)
-{
- DSSDBG("Enter omapdss_hdmi_core_disable\n");
-
mutex_lock(&hdmi->lock);
+ r = hdmi_runtime_get(hdmi);
+ BUG_ON(r);
- hdmi_power_off_core(hdmi);
-
- mutex_unlock(&hdmi->lock);
-}
-
-static int hdmi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
+ idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ /* No-idle mode */
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
-static void hdmi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
+ hdmi5_core_ddc_init(&hdmi->core);
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- bool need_enable;
- int r;
+ edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core);
- need_enable = hdmi->core_enabled == false;
+ hdmi5_core_ddc_uninit(&hdmi->core);
- if (need_enable) {
- r = hdmi_core_enable(hdmi);
- if (r)
- return r;
- }
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
- r = read_edid(hdmi, edid, len);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
if (need_enable)
hdmi_core_disable(hdmi);
- return r;
+ return (struct edid *)edid;
}
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
+static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
+ .attach = hdmi5_bridge_attach,
+ .mode_set = hdmi5_bridge_mode_set,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = hdmi5_bridge_enable,
+ .atomic_disable = hdmi5_bridge_disable,
+ .get_edid = hdmi5_bridge_get_edid,
+};
+
+static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ hdmi->bridge.funcs = &hdmi5_bridge_funcs;
+ hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- hdmi->cfg.infoframe = *avi;
- return 0;
+ drm_bridge_add(&hdmi->bridge);
}
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
+static void hdmi5_bridge_cleanup(struct omap_hdmi *hdmi)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
- return 0;
+ drm_bridge_remove(&hdmi->bridge);
}
-static const struct omap_dss_device_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
-
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
-
- .set_timings = hdmi_display_set_timings,
-
- .read_edid = hdmi_read_edid,
-
- .hdmi = {
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
- },
-};
-
/* -----------------------------------------------------------------------------
* Audio Callbacks
*/
@@ -650,19 +675,21 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
struct omap_dss_device *out = &hdmi->output;
int r;
+ hdmi5_bridge_init(hdmi);
+
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &hdmi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
- out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+ out->of_port = 0;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &hdmi->bridge);
+ if (r < 0) {
+ hdmi5_bridge_cleanup(hdmi);
return r;
+ }
omapdss_device_register(out);
@@ -675,6 +702,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ hdmi5_bridge_cleanup(hdmi);
}
static int hdmi5_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index ff4d35c8771f..7dd587035160 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,7 +23,7 @@
#include "hdmi5_core.h"
-static void hdmi_core_ddc_init(struct hdmi_core_data *core)
+void hdmi5_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
@@ -102,7 +102,7 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x0, 2, 2);
}
-static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
@@ -112,14 +112,14 @@ static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2);
}
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u8 cur_addr;
- char checksum = 0;
const int retries = 1000;
- u8 seg_ptr = ext / 2;
- u8 edidbase = ((ext % 2) * 0x80);
+ u8 seg_ptr = block / 2;
+ u8 edidbase = ((block % 2) * EDID_LENGTH);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGPTR, seg_ptr, 7, 0);
@@ -127,7 +127,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
* TODO: We use polling here, although we probably should use proper
* interrupts.
*/
- for (cur_addr = 0; cur_addr < 128; ++cur_addr) {
+ for (cur_addr = 0; cur_addr < len; ++cur_addr) {
int i;
/* clear ERROR and DONE */
@@ -164,45 +164,13 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
return -EIO;
}
- pedid[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
- checksum += pedid[cur_addr];
+ buf[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
}
return 0;
}
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
- int r, n, i;
- int max_ext_blocks = (len / 128) - 1;
-
- if (len < 128)
- return -EINVAL;
-
- hdmi_core_ddc_init(core);
-
- r = hdmi_core_ddc_edid(core, edid, 0);
- if (r)
- goto out;
-
- n = edid[0x7e];
-
- if (n > max_ext_blocks)
- n = max_ext_blocks;
-
- for (i = 1; i <= n; i++) {
- r = hdmi_core_ddc_edid(core, edid + i * EDID_LENGTH, i);
- if (r)
- goto out;
- }
-
-out:
- hdmi_core_ddc_uninit(core);
-
- return r ? r : len;
-}
-
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
index f10b8a283011..65eadefdb3f9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
@@ -281,7 +281,10 @@ struct csc_table {
u16 c1, c2, c3, c4;
};
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+void hdmi5_core_ddc_init(struct hdmi_core_data *core);
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core);
+
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 31502857f013..72a7da7bfff1 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -174,34 +174,38 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
};
static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
- { .compatible = "composite-video-connector" },
- { .compatible = "hdmi-connector" },
{ .compatible = "panel-dsi-cm" },
- { .compatible = "svideo-connector" },
- { .compatible = "ti,opa362" },
- { .compatible = "ti,tpd12s015" },
{},
};
+static void __init omapdss_find_children(struct device_node *np)
+{
+ struct device_node *child;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_find_property(child, "compatible", NULL))
+ continue;
+
+ omapdss_walk_device(child, true);
+
+ if (of_device_is_compatible(child, "ti,sysc"))
+ omapdss_find_children(child);
+ }
+}
+
static int __init omapdss_boot_init(void)
{
- struct device_node *dss, *child;
+ struct device_node *dss;
INIT_LIST_HEAD(&dss_conv_list);
dss = of_find_matching_node(NULL, omapdss_of_match);
if (dss == NULL || !of_device_is_available(dss))
- return 0;
+ goto put_node;
omapdss_walk_device(dss, true);
-
- for_each_available_child_of_node(dss, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
- omapdss_walk_device(child, true);
- }
+ omapdss_find_children(dss);
while (!list_empty(&dss_conv_list)) {
struct dss_conv_node *n;
@@ -217,6 +221,8 @@ static int __init omapdss_boot_init(void)
kfree(n);
}
+put_node:
+ of_node_put(dss);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 79f6b195c7cf..ab19d4af8de7 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -285,13 +285,6 @@ struct omap_dss_writeback_info {
u8 pre_mult_alpha;
};
-struct omapdss_hdmi_ops {
- void (*lost_hotplug)(struct omap_dss_device *dssdev);
- int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
- int (*set_infoframe)(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi);
-};
-
struct omapdss_dsi_ops {
void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
bool enter_ulps);
@@ -349,46 +342,23 @@ struct omap_dss_device_ops {
void (*disconnect)(struct omap_dss_device *dssdev,
struct omap_dss_device *dst);
- void (*pre_enable)(struct omap_dss_device *dssdev);
void (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev);
- void (*post_disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
struct drm_display_mode *mode);
- void (*set_timings)(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode);
-
- bool (*detect)(struct omap_dss_device *dssdev);
-
- void (*register_hpd_cb)(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data);
- void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
-
- int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
int (*get_modes)(struct omap_dss_device *dssdev,
struct drm_connector *connector);
- union {
- const struct omapdss_hdmi_ops hdmi;
- const struct omapdss_dsi_ops dsi;
- };
+ const struct omapdss_dsi_ops dsi;
};
/**
* enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
- * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
- * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID
* @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
*/
enum omap_dss_device_ops_flag {
- OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
- OMAP_DSS_DEVICE_OP_HPD = BIT(1),
- OMAP_DSS_DEVICE_OP_EDID = BIT(2),
OMAP_DSS_DEVICE_OP_MODES = BIT(3),
};
@@ -400,6 +370,7 @@ struct omap_dss_device {
struct dss_device *dss;
struct omap_dss_device *next;
struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
struct drm_panel *panel;
struct list_head list;
@@ -436,8 +407,8 @@ struct omap_dss_device {
/* output instance */
enum omap_dss_output_id id;
- /* bitmask of port numbers in DT */
- unsigned int of_ports;
+ /* port number in DT */
+ unsigned int of_port;
};
struct omap_dss_driver {
@@ -461,7 +432,6 @@ static inline bool omapdss_is_initialized(void)
}
void omapdss_display_init(struct omap_dss_device *dssdev);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
int omapdss_display_get_modes(struct drm_connector *connector,
const struct videomode *vm);
@@ -475,10 +445,8 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev);
void omapdss_device_enable(struct omap_dss_device *dssdev);
void omapdss_device_disable(struct omap_dss_device *dssdev);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
@@ -487,7 +455,8 @@ int omap_dss_get_num_overlays(void);
#define for_each_dss_output(d) \
while ((d = omapdss_device_next_output(d)) != NULL)
struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from);
-int omapdss_device_init_output(struct omap_dss_device *out);
+int omapdss_device_init_output(struct omap_dss_device *out,
+ struct drm_bridge *local_bridge);
void omapdss_device_cleanup_output(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
@@ -502,9 +471,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
}
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port);
-
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
DSS_WB_LCD2_MGR = 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 0693d34fca1b..ce21c798cca6 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,7 +4,6 @@
* Author: Archit Taneja <archit@ti.com>
*/
-#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,12 +17,14 @@
#include "dss.h"
#include "omapdss.h"
-int omapdss_device_init_output(struct omap_dss_device *out)
+int omapdss_device_init_output(struct omap_dss_device *out,
+ struct drm_bridge *local_bridge)
{
struct device_node *remote_node;
+ int ret;
remote_node = of_graph_get_remote_node(out->dev->of_node,
- ffs(out->of_ports) - 1, 0);
+ out->of_port, 0);
if (!remote_node) {
dev_dbg(out->dev, "failed to find video sink\n");
return 0;
@@ -39,17 +40,55 @@ int omapdss_device_init_output(struct omap_dss_device *out)
if (out->next && out->type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
- omapdss_device_put(out->next);
- out->next = NULL;
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
- return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER;
+ if (out->panel) {
+ struct drm_bridge *bridge;
+
+ bridge = drm_panel_bridge_add(out->panel);
+ if (IS_ERR(bridge)) {
+ dev_err(out->dev,
+ "unable to create panel bridge (%ld)\n",
+ PTR_ERR(bridge));
+ ret = PTR_ERR(bridge);
+ goto error;
+ }
+
+ out->bridge = bridge;
+ }
+
+ if (local_bridge) {
+ if (!out->bridge) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ out->next_bridge = out->bridge;
+ out->bridge = local_bridge;
+ }
+
+ if (!out->next && !out->bridge) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ omapdss_device_cleanup_output(out);
+ out->next = NULL;
+ return ret;
}
EXPORT_SYMBOL(omapdss_device_init_output);
void omapdss_device_cleanup_output(struct omap_dss_device *out)
{
+ if (out->bridge && out->panel)
+ drm_panel_bridge_remove(out->next_bridge ?
+ out->next_bridge : out->bridge);
+
if (out->next)
omapdss_device_put(out->next);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 3b447c01fa2a..417a8740ad0a 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -6,17 +6,19 @@
#define DSS_SUBSYS_NAME "SDI"
-#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/regulator/consumer.h>
#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/string.h>
-#include <linux/of.h>
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
#include "dss.h"
+#include "omapdss.h"
struct sdi_device {
struct platform_device *pdev;
@@ -30,9 +32,11 @@ struct sdi_device {
int datapairs;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-#define dssdev_to_sdi(dssdev) container_of(dssdev, struct sdi_device, output)
+#define drm_bridge_to_sdi(bridge) \
+ container_of(bridge, struct sdi_device, bridge)
struct sdi_clk_calc_ctx {
struct sdi_device *sdi;
@@ -118,9 +122,82 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
-static void sdi_display_enable(struct omap_dss_device *dssdev)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int sdi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+sdi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ unsigned long pixelclock = mode->clock * 1000;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ int ret;
+
+ if (pixelclock == 0)
+ return MODE_NOCLOCK;
+
+ ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+ if (ret < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool sdi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ unsigned long pixelclock = mode->clock * 1000;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ unsigned long pck;
+ int ret;
+
+ ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+ if (ret < 0)
+ return false;
+
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
+
+ if (pck != pixelclock)
+ dev_dbg(&sdi->pdev->dev,
+ "pixel clock adjusted from %lu Hz to %lu Hz\n",
+ pixelclock, pck);
+
+ adjusted_mode->clock = pck / 1000;
+
+ return true;
+}
+
+static void sdi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+ sdi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void sdi_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int r;
@@ -181,9 +258,10 @@ err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_display_disable(struct omap_dss_device *dssdev)
+static void sdi_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
dss_mgr_disable(&sdi->output);
@@ -194,86 +272,56 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
-
- sdi->pixelclock = mode->clock * 1000;
-}
+static const struct drm_bridge_funcs sdi_bridge_funcs = {
+ .attach = sdi_bridge_attach,
+ .mode_valid = sdi_bridge_mode_valid,
+ .mode_fixup = sdi_bridge_mode_fixup,
+ .mode_set = sdi_bridge_mode_set,
+ .atomic_enable = sdi_bridge_enable,
+ .atomic_disable = sdi_bridge_disable,
+};
-static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
+static void sdi_bridge_init(struct sdi_device *sdi)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- struct dispc_clock_info dispc_cinfo;
- unsigned long pixelclock = mode->clock * 1000;
- unsigned long fck;
- unsigned long pck;
- int r;
+ sdi->bridge.funcs = &sdi_bridge_funcs;
+ sdi->bridge.of_node = sdi->pdev->dev.of_node;
+ sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- if (pixelclock == 0)
- return -EINVAL;
-
- r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
- if (r)
- return r;
-
- pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
-
- if (pck != pixelclock) {
- DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
- pixelclock, pck);
-
- mode->clock = pck / 1000;
- }
-
- return 0;
+ drm_bridge_add(&sdi->bridge);
}
-static int sdi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void sdi_bridge_cleanup(struct sdi_device *sdi)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ drm_bridge_remove(&sdi->bridge);
}
-static void sdi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops sdi_ops = {
- .connect = sdi_connect,
- .disconnect = sdi_disconnect,
-
- .enable = sdi_display_enable,
- .disable = sdi_display_disable,
-
- .check_timings = sdi_check_timings,
- .set_timings = sdi_set_timings,
-};
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
static int sdi_init_output(struct sdi_device *sdi)
{
struct omap_dss_device *out = &sdi->output;
int r;
+ sdi_bridge_init(sdi);
+
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
out->type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
- out->of_ports = BIT(1);
- out->ops = &sdi_ops;
+ out->of_port = 1;
out->owner = THIS_MODULE;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
| DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &sdi->bridge);
+ if (r < 0) {
+ sdi_bridge_cleanup(sdi);
return r;
+ }
omapdss_device_register(out);
@@ -284,6 +332,8 @@ static void sdi_uninit_output(struct sdi_device *sdi)
{
omapdss_device_unregister(&sdi->output);
omapdss_device_cleanup_output(&sdi->output);
+
+ sdi_bridge_cleanup(sdi);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 596a297d5813..766553bb2f87 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -26,6 +25,8 @@
#include <linux/component.h>
#include <linux/sys_soc.h>
+#include <drm/drm_bridge.h>
+
#include "omapdss.h"
#include "dss.h"
@@ -289,7 +290,6 @@ static const struct drm_display_mode omap_dss_ntsc_mode = {
struct venc_device {
struct platform_device *pdev;
void __iomem *base;
- struct mutex venc_lock;
struct regulator *vdda_dac_reg;
struct dss_device *dss;
@@ -303,9 +303,10 @@ struct venc_device {
bool requires_tv_dac_clk;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-#define dssdev_to_venc(dssdev) container_of(dssdev, struct venc_device, output)
+#define drm_bridge_to_venc(b) container_of(b, struct venc_device, bridge)
static inline void venc_write_reg(struct venc_device *venc, int idx, u32 val)
{
@@ -477,56 +478,6 @@ static void venc_power_off(struct venc_device *venc)
venc_runtime_put(venc);
}
-static void venc_display_enable(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- DSSDBG("venc_display_enable\n");
-
- mutex_lock(&venc->venc_lock);
-
- venc_power_on(venc);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static void venc_display_disable(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- DSSDBG("venc_display_disable\n");
-
- mutex_lock(&venc->venc_lock);
-
- venc_power_off(venc);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- static const struct drm_display_mode *modes[] = {
- &omap_dss_pal_mode,
- &omap_dss_ntsc_mode,
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(modes); ++i) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, modes[i]);
- if (!mode)
- return i;
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return ARRAY_SIZE(modes);
-}
-
static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
{
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
@@ -545,57 +496,6 @@ static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mod
return VENC_MODE_UNKNOWN;
}
-static void venc_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
- enum venc_videomode venc_mode = venc_get_videomode(mode);
-
- DSSDBG("venc_set_timings\n");
-
- mutex_lock(&venc->venc_lock);
-
- switch (venc_mode) {
- default:
- WARN_ON_ONCE(1);
- /* Fall-through */
- case VENC_MODE_PAL:
- venc->config = &venc_config_pal_trm;
- break;
-
- case VENC_MODE_NTSC:
- venc->config = &venc_config_ntsc_trm;
- break;
- }
-
- dispc_set_tv_pclk(venc->dss->dispc, 13500000);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
-{
- DSSDBG("venc_check_timings\n");
-
- switch (venc_get_videomode(mode)) {
- case VENC_MODE_PAL:
- drm_mode_copy(mode, &omap_dss_pal_mode);
- break;
-
- case VENC_MODE_NTSC:
- drm_mode_copy(mode, &omap_dss_ntsc_mode);
- break;
-
- default:
- return -EINVAL;
- }
-
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- drm_mode_set_name(mode);
- return 0;
-}
-
static int venc_dump_regs(struct seq_file *s, void *p)
{
struct venc_device *venc = s->private;
@@ -673,31 +573,149 @@ static int venc_get_clocks(struct venc_device *venc)
return 0;
}
-static int venc_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int venc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+venc_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ switch (venc_get_videomode(mode)) {
+ case VENC_MODE_PAL:
+ case VENC_MODE_NTSC:
+ return MODE_OK;
+
+ default:
+ return MODE_BAD;
+ }
+}
+
+static bool venc_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *venc_mode;
+
+ switch (venc_get_videomode(adjusted_mode)) {
+ case VENC_MODE_PAL:
+ venc_mode = &omap_dss_pal_mode;
+ break;
+
+ case VENC_MODE_NTSC:
+ venc_mode = &omap_dss_ntsc_mode;
+ break;
+
+ default:
+ return false;
+ }
+
+ drm_mode_copy(adjusted_mode, venc_mode);
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_name(adjusted_mode);
+
+ return true;
+}
+
+static void venc_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+ enum venc_videomode venc_mode = venc_get_videomode(adjusted_mode);
+
+ switch (venc_mode) {
+ default:
+ WARN_ON_ONCE(1);
+ /* Fall-through */
+ case VENC_MODE_PAL:
+ venc->config = &venc_config_pal_trm;
+ break;
+
+ case VENC_MODE_NTSC:
+ venc->config = &venc_config_ntsc_trm;
+ break;
+ }
+
+ dispc_set_tv_pclk(venc->dss->dispc, 13500000);
+}
+
+static void venc_bridge_enable(struct drm_bridge *bridge)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ venc_power_on(venc);
}
-static void venc_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void venc_bridge_disable(struct drm_bridge *bridge)
{
- omapdss_device_disconnect(dst, dst->next);
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ venc_power_off(venc);
}
-static const struct omap_dss_device_ops venc_ops = {
- .connect = venc_connect,
- .disconnect = venc_disconnect,
+static int venc_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ static const struct drm_display_mode *modes[] = {
+ &omap_dss_pal_mode,
+ &omap_dss_ntsc_mode,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, modes[i]);
+ if (!mode)
+ return i;
- .enable = venc_display_enable,
- .disable = venc_display_disable,
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
- .check_timings = venc_check_timings,
- .set_timings = venc_set_timings,
+ return ARRAY_SIZE(modes);
+}
- .get_modes = venc_get_modes,
+static const struct drm_bridge_funcs venc_bridge_funcs = {
+ .attach = venc_bridge_attach,
+ .mode_valid = venc_bridge_mode_valid,
+ .mode_fixup = venc_bridge_mode_fixup,
+ .mode_set = venc_bridge_mode_set,
+ .enable = venc_bridge_enable,
+ .disable = venc_bridge_disable,
+ .get_modes = venc_bridge_get_modes,
};
+static void venc_bridge_init(struct venc_device *venc)
+{
+ venc->bridge.funcs = &venc_bridge_funcs;
+ venc->bridge.of_node = venc->pdev->dev.of_node;
+ venc->bridge.ops = DRM_BRIDGE_OP_MODES;
+ venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
+ venc->bridge.interlace_allowed = true;
+
+ drm_bridge_add(&venc->bridge);
+}
+
+static void venc_bridge_cleanup(struct venc_device *venc)
+{
+ drm_bridge_remove(&venc->bridge);
+}
+
/* -----------------------------------------------------------------------------
* Component Bind & Unbind
*/
@@ -747,19 +765,22 @@ static int venc_init_output(struct venc_device *venc)
struct omap_dss_device *out = &venc->output;
int r;
+ venc_bridge_init(venc);
+
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &venc_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
+ out->of_port = 0;
out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &venc->bridge);
+ if (r < 0) {
+ venc_bridge_cleanup(venc);
return r;
+ }
omapdss_device_register(out);
@@ -770,6 +791,8 @@ static void venc_uninit_output(struct venc_device *venc)
{
omapdss_device_unregister(&venc->output);
omapdss_device_cleanup_output(&venc->output);
+
+ venc_bridge_cleanup(venc);
}
static int venc_probe_of(struct venc_device *venc)
@@ -839,8 +862,6 @@ static int venc_probe(struct platform_device *pdev)
if (soc_device_match(venc_soc_devices))
venc->requires_tv_dac_clk = true;
- mutex_init(&venc->venc_lock);
-
venc->config = &venc_config_pal_trm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 94cded387174..528764566b17 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -6,7 +6,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "omap_drv.h"
@@ -20,124 +19,12 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *output;
- struct omap_dss_device *hpd;
- bool hdmi_mode;
};
-static void omap_connector_hpd_notify(struct drm_connector *connector,
- enum drm_connector_status status)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
-
- if (status != connector_status_disconnected)
- return;
-
- /*
- * Notify all devics in the pipeline of disconnection. This is required
- * to let the HDMI encoders reset their internal state related to
- * connection status, such as the CEC address.
- */
- for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
- if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
- dssdev->ops->hdmi.lost_hotplug(dssdev);
- }
-}
-
-static void omap_connector_hpd_cb(void *cb_data,
- enum drm_connector_status status)
-{
- struct omap_connector *omap_connector = cb_data;
- struct drm_connector *connector = &omap_connector->base;
- struct drm_device *dev = connector->dev;
- enum drm_connector_status old_status;
-
- mutex_lock(&dev->mode_config.mutex);
- old_status = connector->status;
- connector->status = status;
- mutex_unlock(&dev->mode_config.mutex);
-
- if (old_status == status)
- return;
-
- omap_connector_hpd_notify(connector, status);
-
- drm_kms_helper_hotplug_event(dev);
-}
-
-void omap_connector_enable_hpd(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- if (hpd)
- hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb,
- omap_connector);
-}
-
-void omap_connector_disable_hpd(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- if (hpd)
- hpd->ops->unregister_hpd_cb(hpd);
-}
-
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- return omap_connector->hdmi_mode;
-}
-
-static struct omap_dss_device *
-omap_connector_find_device(struct drm_connector *connector,
- enum omap_dss_device_ops_flag op)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = NULL;
- struct omap_dss_device *d;
-
- for (d = omap_connector->output; d; d = d->next) {
- if (d->ops_flags & op)
- dssdev = d;
- }
-
- return dssdev;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
- struct omap_dss_device *dssdev;
- enum drm_connector_status status;
-
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_DETECT);
-
- if (dssdev) {
- status = dssdev->ops->detect(dssdev)
- ? connector_status_connected
- : connector_status_disconnected;
-
- omap_connector_hpd_notify(connector, status);
- } else {
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DPI:
- case DRM_MODE_CONNECTOR_LVDS:
- case DRM_MODE_CONNECTOR_DSI:
- status = connector_status_connected;
- break;
- default:
- status = connector_status_unknown;
- break;
- }
- }
-
- VERB("%s: %d (force=%d)", connector->name, status, force);
-
- return status;
+ return connector_status_connected;
}
static void omap_connector_destroy(struct drm_connector *connector)
@@ -146,14 +33,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
DBG("%s", connector->name);
- if (omap_connector->hpd) {
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- hpd->ops->unregister_hpd_cb(hpd);
- omapdss_device_put(hpd);
- omap_connector->hpd = NULL;
- }
-
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -162,81 +41,27 @@ static void omap_connector_destroy(struct drm_connector *connector)
kfree(omap_connector);
}
-#define MAX_EDID 512
-
-static int omap_connector_get_modes_edid(struct drm_connector *connector,
- struct omap_dss_device *dssdev)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- enum drm_connector_status status;
- void *edid;
- int n;
-
- status = omap_connector_detect(connector, false);
- if (status != connector_status_connected)
- goto no_edid;
-
- edid = kzalloc(MAX_EDID, GFP_KERNEL);
- if (!edid)
- goto no_edid;
-
- if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 ||
- !drm_edid_is_valid(edid)) {
- kfree(edid);
- goto no_edid;
- }
-
- drm_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
-
- omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid);
-
- kfree(edid);
- return n;
-
-no_edid:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static int omap_connector_get_modes(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *d;
DBG("%s", connector->name);
/*
- * If display exposes EDID, then we parse that in the normal way to
- * build table of supported modes.
+ * If the display pipeline reports modes (e.g. with a fixed resolution
+ * panel or an analog TV output), query it.
*/
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_EDID);
- if (dssdev)
- return omap_connector_get_modes_edid(connector, dssdev);
+ for (d = omap_connector->output; d; d = d->next) {
+ if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
+ dssdev = d;
+ }
- /*
- * Otherwise if the display pipeline reports modes (e.g. with a fixed
- * resolution panel or an analog TV output), query it.
- */
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_MODES);
if (dssdev)
return dssdev->ops->get_modes(dssdev, connector);
- /*
- * Otherwise if the display pipeline uses a drm_panel, we delegate the
- * operation to the panel API.
- */
- if (omap_connector->output->panel)
- return drm_panel_get_modes(omap_connector->output->panel,
- connector);
-
- /*
- * We can't retrieve modes, which can happen for instance for a DVI or
- * VGA output with the DDC bus unconnected. The KMS core will add the
- * default modes.
- */
+ /* We can't retrieve modes. The KMS core will add the default modes. */
return 0;
}
@@ -249,7 +74,7 @@ enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
drm_mode_copy(adjusted_mode, mode);
for (; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
+ if (!dssdev->ops || !dssdev->ops->check_timings)
continue;
ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
@@ -298,35 +123,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
-static int omap_connector_get_type(struct omap_dss_device *output)
-{
- struct omap_dss_device *display;
- enum omap_display_type type;
-
- display = omapdss_display_get(output);
- type = display->type;
- omapdss_device_put(display);
-
- switch (type) {
- case OMAP_DISPLAY_TYPE_HDMI:
- return DRM_MODE_CONNECTOR_HDMIA;
- case OMAP_DISPLAY_TYPE_DVI:
- return DRM_MODE_CONNECTOR_DVID;
- case OMAP_DISPLAY_TYPE_DSI:
- return DRM_MODE_CONNECTOR_DSI;
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- return DRM_MODE_CONNECTOR_DPI;
- case OMAP_DISPLAY_TYPE_VENC:
- /* TODO: This could also be composite */
- return DRM_MODE_CONNECTOR_SVIDEO;
- case OMAP_DISPLAY_TYPE_SDI:
- return DRM_MODE_CONNECTOR_LVDS;
- default:
- return DRM_MODE_CONNECTOR_Unknown;
- }
-}
-
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
@@ -334,7 +130,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
- struct omap_dss_device *dssdev;
DBG("%s", output->name);
@@ -349,27 +144,9 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- omap_connector_get_type(output));
+ DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
- /*
- * Initialize connector status handling. First try to find a device that
- * supports hot-plug reporting. If it fails, fall back to a device that
- * support polling. If that fails too, we don't support hot-plug
- * detection at all.
- */
- dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD);
- if (dssdev) {
- omap_connector->hpd = omapdss_device_get(dssdev);
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- } else {
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_DETECT);
- if (dssdev)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- }
-
return connector;
fail:
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 13607bda33d8..0ecd4f1655b7 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -21,9 +21,6 @@ struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
struct drm_encoder *encoder);
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void omap_connector_enable_hpd(struct drm_connector *connector);
-void omap_connector_disable_hpd(struct drm_connector *connector);
enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 3c5ddbf30e97..fce7e944a280 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -831,7 +831,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
* tables so lets use that. Size of HW gamma table can be
* extracted with dispc_mgr_gamma_size(). If it returns 0
- * gamma table is not supprted.
+ * gamma table is not supported.
*/
if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 252f5ebb1acc..42ec51bb7b1b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -82,12 +82,11 @@ static const u32 reg[][4] = {
static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
{
- struct dma_device *dma_dev = dmm->wa_dma_chan->device;
struct dma_async_tx_descriptor *tx;
enum dma_status status;
dma_cookie_t cookie;
- tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
+ tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
if (!tx) {
dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -99,7 +98,6 @@ static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
return -EIO;
}
- dma_async_issue_pending(dmm->wa_dma_chan);
status = dma_sync_wait(dmm->wa_dma_chan, cookie);
if (status != DMA_COMPLETE)
dev_err(dmm->dev, "i878 wa DMA copy failure\n");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d2750f60f519..cdafd7ef1c32 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
@@ -134,9 +135,6 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
- if (pipe->output->panel)
- drm_panel_detach(pipe->output->panel);
-
omapdss_device_disconnect(NULL, pipe->output);
omapdss_device_put(pipe->output);
@@ -209,11 +207,12 @@ static int omap_display_id(struct omap_dss_device *output)
struct device_node *node = NULL;
if (output->next) {
- struct omap_dss_device *display;
+ struct omap_dss_device *display = output;
+
+ while (display->next)
+ display = display->next;
- display = omapdss_display_get(output);
node = display->dev->of_node;
- omapdss_device_put(display);
} else if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
@@ -221,8 +220,6 @@ static int omap_display_id(struct omap_dss_device *output)
bridge = drm_bridge_get_next_bridge(bridge);
node = bridge->of_node;
- } else if (output->panel) {
- node = output->panel->dev->of_node;
}
return node ? of_alias_get_id(node, "display") : -ENODEV;
@@ -297,9 +294,14 @@ static int omap_modeset_init(struct drm_device *dev)
if (pipe->output->bridge) {
ret = drm_bridge_attach(pipe->encoder,
- pipe->output->bridge, NULL);
- if (ret < 0)
+ pipe->output->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "unable to attach bridge %pOF\n",
+ pipe->output->bridge->of_node);
return ret;
+ }
}
id = omap_display_id(pipe->output);
@@ -330,20 +332,28 @@ static int omap_modeset_init(struct drm_device *dev)
struct drm_encoder *encoder = pipe->encoder;
struct drm_crtc *crtc;
- if (!pipe->output->bridge) {
+ if (pipe->output->next) {
pipe->connector = omap_connector_init(dev, pipe->output,
encoder);
if (!pipe->connector)
return -ENOMEM;
+ } else {
+ pipe->connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(pipe->connector)) {
+ dev_err(priv->dev,
+ "unable to create bridge connector for %s\n",
+ pipe->output->name);
+ return PTR_ERR(pipe->connector);
+ }
+ }
- drm_connector_attach_encoder(pipe->connector, encoder);
+ drm_connector_attach_encoder(pipe->connector, encoder);
- if (pipe->output->panel) {
- ret = drm_panel_attach(pipe->output->panel,
- pipe->connector);
- if (ret < 0)
- return ret;
- }
+ if (pipe->output->panel) {
+ ret = drm_panel_attach(pipe->output->panel,
+ pipe->connector);
+ if (ret < 0)
+ return ret;
}
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
@@ -382,6 +392,23 @@ static int omap_modeset_init(struct drm_device *dev)
return 0;
}
+static void omap_modeset_fini(struct drm_device *ddev)
+{
+ struct omap_drm_private *priv = ddev->dev_private;
+ unsigned int i;
+
+ omap_drm_irq_uninstall(ddev);
+
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+
+ if (pipe->output->panel)
+ drm_panel_detach(pipe->output->panel);
+ }
+
+ drm_mode_config_cleanup(ddev);
+}
+
/*
* Enable the HPD in external components if supported
*/
@@ -391,8 +418,13 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
- if (priv->pipes[i].connector)
- omap_connector_enable_hpd(priv->pipes[i].connector);
+ struct drm_connector *connector = priv->pipes[i].connector;
+
+ if (!connector)
+ continue;
+
+ if (priv->pipes[i].output->bridge)
+ drm_bridge_connector_enable_hpd(connector);
}
}
@@ -405,8 +437,13 @@ static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
- if (priv->pipes[i].connector)
- omap_connector_disable_hpd(priv->pipes[i].connector);
+ struct drm_connector *connector = priv->pipes[i].connector;
+
+ if (!connector)
+ continue;
+
+ if (priv->pipes[i].output->bridge)
+ drm_bridge_connector_disable_hpd(connector);
}
}
@@ -629,8 +666,7 @@ err_cleanup_helpers:
omap_fbdev_fini(ddev);
err_cleanup_modeset:
- drm_mode_config_cleanup(ddev);
- omap_drm_irq_uninstall(ddev);
+ omap_modeset_fini(ddev);
err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
@@ -655,9 +691,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_atomic_helper_shutdown(ddev);
- drm_mode_config_cleanup(ddev);
-
- omap_drm_irq_uninstall(ddev);
+ omap_modeset_fini(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4f2165a37795..ae4b867a67a3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -10,7 +10,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
-#include <drm/drm_panel.h>
#include "omap_drv.h"
@@ -70,30 +69,6 @@ static void omap_encoder_update_videomode_flags(struct videomode *vm,
}
}
-static void omap_encoder_hdmi_mode_set(struct drm_connector *connector,
- struct drm_encoder *encoder,
- struct drm_display_mode *adjusted_mode)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- bool hdmi_mode;
-
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
-
- if (dssdev->ops->hdmi.set_hdmi_mode)
- dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
- if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
- struct hdmi_avi_infoframe avi;
- int r;
-
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
- adjusted_mode);
- if (r == 0)
- dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
- }
-}
-
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -138,17 +113,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
bus_flags = connector->display_info.bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
- /* Set timings for all devices in the display pipeline. */
+ /* Set timings for the dss manager. */
dss_mgr_set_timings(output, &vm);
-
- for (dssdev = output; dssdev; dssdev = dssdev->next) {
- if (dssdev->ops->set_timings)
- dssdev->ops->set_timings(dssdev, adjusted_mode);
- }
-
- /* Set the HDMI mode and HDMI infoframe if applicable. */
- if (output->type == OMAP_DISPLAY_TYPE_HDMI)
- omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
@@ -159,33 +125,12 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
- /* Disable the panel if present. */
- if (dssdev->panel) {
- drm_panel_disable(dssdev->panel);
- drm_panel_unprepare(dssdev->panel);
- }
-
/*
* Disable the chain of external devices, starting at the one at the
- * internal encoder's output.
+ * internal encoder's output. This is used for DSI outputs only, as
+ * dssdev->next is NULL for all other outputs.
*/
omapdss_device_disable(dssdev->next);
-
- /*
- * Disable the internal encoder. This will disable the DSS output. The
- * DSI is treated as an exception as DSI pipelines still use the legacy
- * flow where the pipeline output controls the encoder.
- */
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
- dssdev->ops->disable(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- }
-
- /*
- * Perform the post-disable operations on the chain of external devices
- * to complete the display pipeline disable.
- */
- omapdss_device_post_disable(dssdev->next);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
@@ -196,30 +141,12 @@ static void omap_encoder_enable(struct drm_encoder *encoder)
dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
- /* Prepare the chain of external devices for pipeline enable. */
- omapdss_device_pre_enable(dssdev->next);
-
- /*
- * Enable the internal encoder. This will enable the DSS output. The
- * DSI is treated as an exception as DSI pipelines still use the legacy
- * flow where the pipeline output controls the encoder.
- */
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
- dssdev->ops->enable(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- }
-
/*
* Enable the chain of external devices, starting at the one at the
- * internal encoder's output.
+ * internal encoder's output. This is used for DSI outputs only, as
+ * dssdev->next is NULL for all other outputs.
*/
omapdss_device_enable(dssdev->next);
-
- /* Enable the panel if present. */
- if (dssdev->panel) {
- drm_panel_prepare(dssdev->panel);
- drm_panel_enable(dssdev->panel);
- }
}
static int omap_encoder_atomic_check(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b06e5cbfd03a..09a84919ef73 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -242,14 +242,10 @@ void omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_pipes);
+ ret = drm_fb_helper_init(dev, helper);
if (ret)
goto fail;
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret)
- goto fini;
-
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index ae44ac2ec106..a1723c1b5fbf 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -29,6 +29,15 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_BOE_TV101WUM_NL6
+ tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
+ 45NA WUXGA PANEL DSI Video Mode panel
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -50,6 +59,25 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_ELIDA_KD35T133
+ tristate "Elida KD35T133 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Elida
+ KD35T133 controller for 320x480 LCD panels with MIPI-DSI
+ system interfaces.
+
+config DRM_PANEL_FEIXIN_K101_IM2BA02
+ tristate "Feixin K101 IM2BA02 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Feixin K101 IM2BA02
+ 4-lane 800x1280 MIPI DSI panel.
+
config DRM_PANEL_FEIYANG_FY07024DI26A30D
tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"
depends on OF
@@ -149,6 +177,16 @@ config DRM_PANEL_NEC_NL8048HL11
panel (found on the Zoom2/3/3630 SDP boards). To compile this driver
as a module, choose M here.
+config DRM_PANEL_NOVATEK_NT35510
+ tristate "Novatek NT35510 RGB panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT35510 display controller, such as some
+ Hydis panels.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -275,6 +313,12 @@ config DRM_PANEL_SAMSUNG_S6E63M0
Say Y here if you want to enable support for Samsung S6E63M0
AMOLED LCD panel.
+config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
+ tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
+ depends on OF
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 7c4d3c581fd4..96a883cd6630 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
+obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
@@ -13,6 +16,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
@@ -28,6 +32,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
new file mode 100644
index 000000000000..48a164257d18
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned int bpc;
+
+ /**
+ * @width_mm: width of the panel's active display area
+ * @height_mm: height of the panel's active display area
+ */
+ struct {
+ unsigned int width_mm;
+ unsigned int height_mm;
+ } size;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ const struct panel_init_cmd *init_cmds;
+ unsigned int lanes;
+ bool discharge_on_disable;
+};
+
+struct boe_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ const struct panel_desc *desc;
+
+ struct regulator *pp1800;
+ struct regulator *avee;
+ struct regulator *avdd;
+ struct gpio_desc *enable_gpio;
+
+ bool prepared;
+};
+
+enum dsi_cmd_type {
+ INIT_DCS_CMD,
+ DELAY_CMD,
+};
+
+struct panel_init_cmd {
+ enum dsi_cmd_type type;
+ size_t len;
+ const char *data;
+};
+
+#define _INIT_DCS_CMD(...) { \
+ .type = INIT_DCS_CMD, \
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+#define _INIT_DELAY_CMD(...) { \
+ .type = DELAY_CMD,\
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+static const struct panel_init_cmd boe_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0xB0, 0x05),
+ _INIT_DCS_CMD(0xB1, 0xE5),
+ _INIT_DCS_CMD(0xB3, 0x52),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB3, 0x88),
+ _INIT_DCS_CMD(0xB0, 0x04),
+ _INIT_DCS_CMD(0xB8, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB6, 0x03),
+ _INIT_DCS_CMD(0xBA, 0x8B),
+ _INIT_DCS_CMD(0xBF, 0x1A),
+ _INIT_DCS_CMD(0xC0, 0x0F),
+ _INIT_DCS_CMD(0xC2, 0x0C),
+ _INIT_DCS_CMD(0xC3, 0x02),
+ _INIT_DCS_CMD(0xC4, 0x0C),
+ _INIT_DCS_CMD(0xC5, 0x02),
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xE0, 0x26),
+ _INIT_DCS_CMD(0xE1, 0x26),
+ _INIT_DCS_CMD(0xDC, 0x00),
+ _INIT_DCS_CMD(0xDD, 0x00),
+ _INIT_DCS_CMD(0xCC, 0x26),
+ _INIT_DCS_CMD(0xCD, 0x26),
+ _INIT_DCS_CMD(0xC8, 0x00),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xD2, 0x03),
+ _INIT_DCS_CMD(0xD3, 0x03),
+ _INIT_DCS_CMD(0xE6, 0x04),
+ _INIT_DCS_CMD(0xE7, 0x04),
+ _INIT_DCS_CMD(0xC4, 0x09),
+ _INIT_DCS_CMD(0xC5, 0x09),
+ _INIT_DCS_CMD(0xD8, 0x0A),
+ _INIT_DCS_CMD(0xD9, 0x0A),
+ _INIT_DCS_CMD(0xC2, 0x0B),
+ _INIT_DCS_CMD(0xC3, 0x0B),
+ _INIT_DCS_CMD(0xD6, 0x0C),
+ _INIT_DCS_CMD(0xD7, 0x0C),
+ _INIT_DCS_CMD(0xC0, 0x05),
+ _INIT_DCS_CMD(0xC1, 0x05),
+ _INIT_DCS_CMD(0xD4, 0x06),
+ _INIT_DCS_CMD(0xD5, 0x06),
+ _INIT_DCS_CMD(0xCA, 0x07),
+ _INIT_DCS_CMD(0xCB, 0x07),
+ _INIT_DCS_CMD(0xDE, 0x08),
+ _INIT_DCS_CMD(0xDF, 0x08),
+ _INIT_DCS_CMD(0xB0, 0x02),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xC1, 0x0D),
+ _INIT_DCS_CMD(0xC2, 0x17),
+ _INIT_DCS_CMD(0xC3, 0x26),
+ _INIT_DCS_CMD(0xC4, 0x31),
+ _INIT_DCS_CMD(0xC5, 0x1C),
+ _INIT_DCS_CMD(0xC6, 0x2C),
+ _INIT_DCS_CMD(0xC7, 0x33),
+ _INIT_DCS_CMD(0xC8, 0x31),
+ _INIT_DCS_CMD(0xC9, 0x37),
+ _INIT_DCS_CMD(0xCA, 0x37),
+ _INIT_DCS_CMD(0xCB, 0x37),
+ _INIT_DCS_CMD(0xCC, 0x39),
+ _INIT_DCS_CMD(0xCD, 0x2E),
+ _INIT_DCS_CMD(0xCE, 0x2F),
+ _INIT_DCS_CMD(0xCF, 0x2F),
+ _INIT_DCS_CMD(0xD0, 0x07),
+ _INIT_DCS_CMD(0xD2, 0x00),
+ _INIT_DCS_CMD(0xD3, 0x0D),
+ _INIT_DCS_CMD(0xD4, 0x17),
+ _INIT_DCS_CMD(0xD5, 0x26),
+ _INIT_DCS_CMD(0xD6, 0x31),
+ _INIT_DCS_CMD(0xD7, 0x3F),
+ _INIT_DCS_CMD(0xD8, 0x3F),
+ _INIT_DCS_CMD(0xD9, 0x3F),
+ _INIT_DCS_CMD(0xDA, 0x3F),
+ _INIT_DCS_CMD(0xDB, 0x37),
+ _INIT_DCS_CMD(0xDC, 0x37),
+ _INIT_DCS_CMD(0xDD, 0x37),
+ _INIT_DCS_CMD(0xDE, 0x39),
+ _INIT_DCS_CMD(0xDF, 0x2E),
+ _INIT_DCS_CMD(0xE0, 0x2F),
+ _INIT_DCS_CMD(0xE1, 0x2F),
+ _INIT_DCS_CMD(0xE2, 0x07),
+ _INIT_DCS_CMD(0xB0, 0x03),
+ _INIT_DCS_CMD(0xC8, 0x0B),
+ _INIT_DCS_CMD(0xC9, 0x07),
+ _INIT_DCS_CMD(0xC3, 0x00),
+ _INIT_DCS_CMD(0xE7, 0x00),
+ _INIT_DCS_CMD(0xC5, 0x2A),
+ _INIT_DCS_CMD(0xDE, 0x2A),
+ _INIT_DCS_CMD(0xCA, 0x43),
+ _INIT_DCS_CMD(0xC9, 0x07),
+ _INIT_DCS_CMD(0xE4, 0xC0),
+ _INIT_DCS_CMD(0xE5, 0x0D),
+ _INIT_DCS_CMD(0xCB, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x06),
+ _INIT_DCS_CMD(0xB8, 0xA5),
+ _INIT_DCS_CMD(0xC0, 0xA5),
+ _INIT_DCS_CMD(0xC7, 0x0F),
+ _INIT_DCS_CMD(0xD5, 0x32),
+ _INIT_DCS_CMD(0xB8, 0x00),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xBC, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x07),
+ _INIT_DCS_CMD(0xB1, 0x00),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x0F),
+ _INIT_DCS_CMD(0xB4, 0x25),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4E),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x97),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x22),
+ _INIT_DCS_CMD(0xBB, 0xA4),
+ _INIT_DCS_CMD(0xBC, 0x2B),
+ _INIT_DCS_CMD(0xBD, 0x2F),
+ _INIT_DCS_CMD(0xBE, 0xA9),
+ _INIT_DCS_CMD(0xBF, 0x25),
+ _INIT_DCS_CMD(0xC0, 0x61),
+ _INIT_DCS_CMD(0xC1, 0x97),
+ _INIT_DCS_CMD(0xC2, 0xB2),
+ _INIT_DCS_CMD(0xC3, 0xCD),
+ _INIT_DCS_CMD(0xC4, 0xD9),
+ _INIT_DCS_CMD(0xC5, 0xE7),
+ _INIT_DCS_CMD(0xC6, 0xF4),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x08),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x05),
+ _INIT_DCS_CMD(0xB3, 0x11),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x98),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x23),
+ _INIT_DCS_CMD(0xBB, 0xA6),
+ _INIT_DCS_CMD(0xBC, 0x2C),
+ _INIT_DCS_CMD(0xBD, 0x30),
+ _INIT_DCS_CMD(0xBE, 0xAA),
+ _INIT_DCS_CMD(0xBF, 0x26),
+ _INIT_DCS_CMD(0xC0, 0x62),
+ _INIT_DCS_CMD(0xC1, 0x9B),
+ _INIT_DCS_CMD(0xC2, 0xB5),
+ _INIT_DCS_CMD(0xC3, 0xCF),
+ _INIT_DCS_CMD(0xC4, 0xDB),
+ _INIT_DCS_CMD(0xC5, 0xE8),
+ _INIT_DCS_CMD(0xC6, 0xF5),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x09),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x16),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x3B),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x73),
+ _INIT_DCS_CMD(0xB8, 0x99),
+ _INIT_DCS_CMD(0xB9, 0xE0),
+ _INIT_DCS_CMD(0xBA, 0x26),
+ _INIT_DCS_CMD(0xBB, 0xAD),
+ _INIT_DCS_CMD(0xBC, 0x36),
+ _INIT_DCS_CMD(0xBD, 0x3A),
+ _INIT_DCS_CMD(0xBE, 0xAE),
+ _INIT_DCS_CMD(0xBF, 0x2A),
+ _INIT_DCS_CMD(0xC0, 0x66),
+ _INIT_DCS_CMD(0xC1, 0x9E),
+ _INIT_DCS_CMD(0xC2, 0xB8),
+ _INIT_DCS_CMD(0xC3, 0xD1),
+ _INIT_DCS_CMD(0xC4, 0xDD),
+ _INIT_DCS_CMD(0xC5, 0xE9),
+ _INIT_DCS_CMD(0xC6, 0xF6),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0A),
+ _INIT_DCS_CMD(0xB1, 0x00),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x0F),
+ _INIT_DCS_CMD(0xB4, 0x25),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4E),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x97),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x22),
+ _INIT_DCS_CMD(0xBB, 0xA4),
+ _INIT_DCS_CMD(0xBC, 0x2B),
+ _INIT_DCS_CMD(0xBD, 0x2F),
+ _INIT_DCS_CMD(0xBE, 0xA9),
+ _INIT_DCS_CMD(0xBF, 0x25),
+ _INIT_DCS_CMD(0xC0, 0x61),
+ _INIT_DCS_CMD(0xC1, 0x97),
+ _INIT_DCS_CMD(0xC2, 0xB2),
+ _INIT_DCS_CMD(0xC3, 0xCD),
+ _INIT_DCS_CMD(0xC4, 0xD9),
+ _INIT_DCS_CMD(0xC5, 0xE7),
+ _INIT_DCS_CMD(0xC6, 0xF4),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0B),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x05),
+ _INIT_DCS_CMD(0xB3, 0x11),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x98),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x23),
+ _INIT_DCS_CMD(0xBB, 0xA6),
+ _INIT_DCS_CMD(0xBC, 0x2C),
+ _INIT_DCS_CMD(0xBD, 0x30),
+ _INIT_DCS_CMD(0xBE, 0xAA),
+ _INIT_DCS_CMD(0xBF, 0x26),
+ _INIT_DCS_CMD(0xC0, 0x62),
+ _INIT_DCS_CMD(0xC1, 0x9B),
+ _INIT_DCS_CMD(0xC2, 0xB5),
+ _INIT_DCS_CMD(0xC3, 0xCF),
+ _INIT_DCS_CMD(0xC4, 0xDB),
+ _INIT_DCS_CMD(0xC5, 0xE8),
+ _INIT_DCS_CMD(0xC6, 0xF5),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0C),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x16),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x3B),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x73),
+ _INIT_DCS_CMD(0xB8, 0x99),
+ _INIT_DCS_CMD(0xB9, 0xE0),
+ _INIT_DCS_CMD(0xBA, 0x26),
+ _INIT_DCS_CMD(0xBB, 0xAD),
+ _INIT_DCS_CMD(0xBC, 0x36),
+ _INIT_DCS_CMD(0xBD, 0x3A),
+ _INIT_DCS_CMD(0xBE, 0xAE),
+ _INIT_DCS_CMD(0xBF, 0x2A),
+ _INIT_DCS_CMD(0xC0, 0x66),
+ _INIT_DCS_CMD(0xC1, 0x9E),
+ _INIT_DCS_CMD(0xC2, 0xB8),
+ _INIT_DCS_CMD(0xC3, 0xD1),
+ _INIT_DCS_CMD(0xC4, 0xDD),
+ _INIT_DCS_CMD(0xC5, 0xE9),
+ _INIT_DCS_CMD(0xC6, 0xF6),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB3, 0x08),
+ _INIT_DCS_CMD(0xB0, 0x04),
+ _INIT_DCS_CMD(0xB8, 0x68),
+ _INIT_DELAY_CMD(150),
+ {},
+};
+
+static const struct panel_init_cmd auo_kd101n80_45na_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(120),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(120),
+ {},
+};
+
+static const struct panel_init_cmd auo_b101uan08_3_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xC0, 0x48),
+ _INIT_DCS_CMD(0xC1, 0x48),
+ _INIT_DCS_CMD(0xC2, 0x47),
+ _INIT_DCS_CMD(0xC3, 0x47),
+ _INIT_DCS_CMD(0xC4, 0x46),
+ _INIT_DCS_CMD(0xC5, 0x46),
+ _INIT_DCS_CMD(0xC6, 0x45),
+ _INIT_DCS_CMD(0xC7, 0x45),
+ _INIT_DCS_CMD(0xC8, 0x64),
+ _INIT_DCS_CMD(0xC9, 0x64),
+ _INIT_DCS_CMD(0xCA, 0x4F),
+ _INIT_DCS_CMD(0xCB, 0x4F),
+ _INIT_DCS_CMD(0xCC, 0x40),
+ _INIT_DCS_CMD(0xCD, 0x40),
+ _INIT_DCS_CMD(0xCE, 0x66),
+ _INIT_DCS_CMD(0xCF, 0x66),
+ _INIT_DCS_CMD(0xD0, 0x4F),
+ _INIT_DCS_CMD(0xD1, 0x4F),
+ _INIT_DCS_CMD(0xD2, 0x41),
+ _INIT_DCS_CMD(0xD3, 0x41),
+ _INIT_DCS_CMD(0xD4, 0x48),
+ _INIT_DCS_CMD(0xD5, 0x48),
+ _INIT_DCS_CMD(0xD6, 0x47),
+ _INIT_DCS_CMD(0xD7, 0x47),
+ _INIT_DCS_CMD(0xD8, 0x46),
+ _INIT_DCS_CMD(0xD9, 0x46),
+ _INIT_DCS_CMD(0xDA, 0x45),
+ _INIT_DCS_CMD(0xDB, 0x45),
+ _INIT_DCS_CMD(0xDC, 0x64),
+ _INIT_DCS_CMD(0xDD, 0x64),
+ _INIT_DCS_CMD(0xDE, 0x4F),
+ _INIT_DCS_CMD(0xDF, 0x4F),
+ _INIT_DCS_CMD(0xE0, 0x40),
+ _INIT_DCS_CMD(0xE1, 0x40),
+ _INIT_DCS_CMD(0xE2, 0x66),
+ _INIT_DCS_CMD(0xE3, 0x66),
+ _INIT_DCS_CMD(0xE4, 0x4F),
+ _INIT_DCS_CMD(0xE5, 0x4F),
+ _INIT_DCS_CMD(0xE6, 0x41),
+ _INIT_DCS_CMD(0xE7, 0x41),
+ _INIT_DELAY_CMD(150),
+ {},
+};
+
+static inline struct boe_panel *to_boe_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_panel, base);
+}
+
+static int boe_panel_init_dcs_cmd(struct boe_panel *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ struct drm_panel *panel = &boe->base;
+ int i, err = 0;
+
+ if (boe->desc->init_cmds) {
+ const struct panel_init_cmd *init_cmds = boe->desc->init_cmds;
+
+ for (i = 0; init_cmds[i].len != 0; i++) {
+ const struct panel_init_cmd *cmd = &init_cmds[i];
+
+ switch (cmd->type) {
+ case DELAY_CMD:
+ msleep(cmd->data[0]);
+ err = 0;
+ break;
+
+ case INIT_DCS_CMD:
+ err = mipi_dsi_dcs_write(dsi, cmd->data[0],
+ cmd->len <= 1 ? NULL :
+ &cmd->data[1],
+ cmd->len - 1);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to write command %u\n", i);
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ int ret;
+
+ if (!boe->prepared)
+ return 0;
+
+ ret = boe_panel_enter_sleep_mode(boe);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+ return ret;
+ }
+
+ msleep(150);
+
+ if (boe->desc->discharge_on_disable) {
+ regulator_disable(boe->avee);
+ regulator_disable(boe->avdd);
+ usleep_range(5000, 7000);
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ } else {
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(500, 1000);
+ regulator_disable(boe->avee);
+ regulator_disable(boe->avdd);
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ }
+
+ boe->prepared = false;
+
+ return 0;
+}
+
+static int boe_panel_prepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ int ret;
+
+ if (boe->prepared)
+ return 0;
+
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(1000, 1500);
+
+ ret = regulator_enable(boe->pp1800);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 5000);
+
+ ret = regulator_enable(boe->avdd);
+ if (ret < 0)
+ goto poweroff1v8;
+ ret = regulator_enable(boe->avee);
+ if (ret < 0)
+ goto poweroffavdd;
+
+ usleep_range(5000, 10000);
+
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(6000, 10000);
+
+ ret = boe_panel_init_dcs_cmd(boe);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to init panel: %d\n", ret);
+ goto poweroff;
+ }
+
+ boe->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(boe->avee);
+poweroffavdd:
+ regulator_disable(boe->avdd);
+poweroff1v8:
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ gpiod_set_value(boe->enable_gpio, 0);
+
+ return ret;
+}
+
+static int boe_panel_enable(struct drm_panel *panel)
+{
+ msleep(130);
+ return 0;
+}
+
+static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
+ .clock = 159425,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 100,
+ .hsync_end = 1200 + 100 + 40,
+ .htotal = 1200 + 100 + 40 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc boe_tv101wum_nl6_desc = {
+ .modes = &boe_tv101wum_nl6_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+ .discharge_on_disable = false,
+};
+
+static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
+ .clock = 157000,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 36,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 16,
+ .vsync_end = 1920 + 16 + 4,
+ .vtotal = 1920 + 16 + 4 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_kd101n80_45na_desc = {
+ .modes = &auo_kd101n80_45na_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_kd101n80_45na_init_cmd,
+ .discharge_on_disable = true,
+};
+
+static const struct drm_display_mode boe_tv101wum_n53_default_mode = {
+ .clock = 159916,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 60,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 20,
+ .vsync_end = 1920 + 20 + 4,
+ .vtotal = 1920 + 20 + 4 + 10,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv101wum_n53_desc = {
+ .modes = &boe_tv101wum_n53_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+};
+
+static const struct drm_display_mode auo_b101uan08_3_default_mode = {
+ .clock = 159667,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 4,
+ .htotal = 1200 + 60 + 4 + 80,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 34,
+ .vsync_end = 1920 + 34 + 2,
+ .vtotal = 1920 + 34 + 2 + 24,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc auo_b101uan08_3_desc = {
+ .modes = &auo_b101uan08_3_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_b101uan08_3_init_cmd,
+};
+
+static int boe_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ const struct drm_display_mode *m = boe->desc->modes;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ return -ENOMEM;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = boe->desc->size.width_mm;
+ connector->display_info.height_mm = boe->desc->size.height_mm;
+ connector->display_info.bpc = boe->desc->bpc;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs boe_panel_funcs = {
+ .unprepare = boe_panel_unprepare,
+ .prepare = boe_panel_prepare,
+ .enable = boe_panel_enable,
+ .get_modes = boe_panel_get_modes,
+};
+
+static int boe_panel_add(struct boe_panel *boe)
+{
+ struct device *dev = &boe->dsi->dev;
+ int err;
+
+ boe->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(boe->avdd))
+ return PTR_ERR(boe->avdd);
+
+ boe->avee = devm_regulator_get(dev, "avee");
+ if (IS_ERR(boe->avee))
+ return PTR_ERR(boe->avee);
+
+ boe->pp1800 = devm_regulator_get(dev, "pp1800");
+ if (IS_ERR(boe->pp1800))
+ return PTR_ERR(boe->pp1800);
+
+ boe->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(boe->enable_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(boe->enable_gpio));
+ return PTR_ERR(boe->enable_gpio);
+ }
+
+ gpiod_set_value(boe->enable_gpio, 0);
+
+ drm_panel_init(&boe->base, dev, &boe_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&boe->base);
+ if (err)
+ return err;
+
+ boe->base.funcs = &boe_panel_funcs;
+ boe->base.dev = &boe->dsi->dev;
+
+ return drm_panel_add(&boe->base);
+}
+
+static int boe_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe;
+ int ret;
+ const struct panel_desc *desc;
+
+ boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
+ if (!boe)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->lanes = desc->lanes;
+ dsi->format = desc->format;
+ dsi->mode_flags = desc->mode_flags;
+ boe->desc = desc;
+ boe->dsi = dsi;
+ ret = boe_panel_add(boe);
+ if (ret < 0)
+ return ret;
+
+ mipi_dsi_set_drvdata(dsi, boe);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ drm_panel_remove(&boe->base);
+
+ return ret;
+}
+
+static void boe_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&boe->base);
+ drm_panel_unprepare(&boe->base);
+}
+
+static int boe_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ boe_panel_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ if (boe->base.dev)
+ drm_panel_remove(&boe->base);
+
+ return 0;
+}
+
+static const struct of_device_id boe_of_match[] = {
+ { .compatible = "boe,tv101wum-nl6",
+ .data = &boe_tv101wum_nl6_desc
+ },
+ { .compatible = "auo,kd101n80-45na",
+ .data = &auo_kd101n80_45na_desc
+ },
+ { .compatible = "boe,tv101wum-n53",
+ .data = &boe_tv101wum_n53_desc
+ },
+ { .compatible = "auo,b101uan08.3",
+ .data = &auo_b101uan08_3_desc
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_of_match);
+
+static struct mipi_dsi_driver boe_panel_driver = {
+ .driver = {
+ .name = "panel-boe-tv101wum-nl6",
+ .of_match_table = boe_of_match,
+ },
+ .probe = boe_panel_probe,
+ .remove = boe_panel_remove,
+ .shutdown = boe_panel_shutdown,
+};
+module_mipi_dsi_driver(boe_panel_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>");
+MODULE_DESCRIPTION("BOE tv101wum-nl6 1200x1920 video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
new file mode 100644
index 000000000000..711ded453c44
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Elida kd35t133 5.5" MIPI-DSI panel driver
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ *
+ * based on
+ *
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* Manufacturer specific Commands send via DSI */
+#define KD35T133_CMD_INTERFACEMODECTRL 0xb0
+#define KD35T133_CMD_FRAMERATECTRL 0xb1
+#define KD35T133_CMD_DISPLAYINVERSIONCTRL 0xb4
+#define KD35T133_CMD_DISPLAYFUNCTIONCTRL 0xb6
+#define KD35T133_CMD_POWERCONTROL1 0xc0
+#define KD35T133_CMD_POWERCONTROL2 0xc1
+#define KD35T133_CMD_VCOMCONTROL 0xc5
+#define KD35T133_CMD_POSITIVEGAMMA 0xe0
+#define KD35T133_CMD_NEGATIVEGAMMA 0xe1
+#define KD35T133_CMD_SETIMAGEFUNCTION 0xe9
+#define KD35T133_CMD_ADJUSTCONTROL3 0xf7
+
+struct kd35t133 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vdd;
+ struct regulator *iovcc;
+ bool prepared;
+};
+
+static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
+{
+ return container_of(panel, struct kd35t133, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int kd35t133_init_sequence(struct kd35t133 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+
+ /*
+ * Init sequence was supplied by the panel vendor with minimal
+ * documentation.
+ */
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
+ 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
+ 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
+ 0x20, 0x02);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
+ 0xa9, 0x51, 0x2c, 0x82);
+ mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int kd35t133_unprepare(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vdd);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int kd35t133_prepare(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vdd);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vdd supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ msleep(20);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ msleep(20);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(250);
+
+ ret = kd35t133_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vdd:
+ regulator_disable(ctx->vdd);
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 320,
+ .hsync_start = 320 + 130,
+ .hsync_end = 320 + 130 + 4,
+ .htotal = 320 + 130 + 4 + 130,
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 1,
+ .vtotal = 480 + 2 + 1 + 2,
+ .vrefresh = 60,
+ .clock = 17000,
+ .width_mm = 42,
+ .height_mm = 82,
+};
+
+static int kd35t133_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs kd35t133_funcs = {
+ .unprepare = kd35t133_unprepare,
+ .prepare = kd35t133_prepare,
+ .get_modes = kd35t133_get_modes,
+};
+
+static int kd35t133_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct kd35t133 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(ctx->vdd)) {
+ ret = PTR_ERR(ctx->vdd);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vdd regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 1;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int kd35t133_remove(struct mipi_dsi_device *dsi)
+{
+ struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ kd35t133_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id kd35t133_of_match[] = {
+ { .compatible = "elida,kd35t133" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kd35t133_of_match);
+
+static struct mipi_dsi_driver kd35t133_driver = {
+ .driver = {
+ .name = "panel-elida-kd35t133",
+ .of_match_table = kd35t133_of_match,
+ },
+ .probe = kd35t133_probe,
+ .remove = kd35t133_remove,
+ .shutdown = kd35t133_shutdown,
+};
+module_mipi_dsi_driver(kd35t133_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Elida kd35t133 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
new file mode 100644
index 000000000000..fddbfddf6566
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2020 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define K101_IM2BA02_INIT_CMD_LEN 2
+
+static const char * const regulator_names[] = {
+ "dvdd",
+ "avdd",
+ "cvdd"
+};
+
+struct k101_im2ba02 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+ struct gpio_desc *reset;
+};
+
+static inline struct k101_im2ba02 *panel_to_k101_im2ba02(struct drm_panel *panel)
+{
+ return container_of(panel, struct k101_im2ba02, panel);
+}
+
+struct k101_im2ba02_init_cmd {
+ u8 data[K101_IM2BA02_INIT_CMD_LEN];
+};
+
+static const struct k101_im2ba02_init_cmd k101_im2ba02_init_cmds[] = {
+ /* Switch to page 0 */
+ { .data = { 0xE0, 0x00 } },
+
+ /* Seems to be some password */
+ { .data = { 0xE1, 0x93} },
+ { .data = { 0xE2, 0x65 } },
+ { .data = { 0xE3, 0xF8 } },
+
+ /* Lane number, 0x02 - 3 lanes, 0x03 - 4 lanes */
+ { .data = { 0x80, 0x03 } },
+
+ /* Sequence control */
+ { .data = { 0x70, 0x02 } },
+ { .data = { 0x71, 0x23 } },
+ { .data = { 0x72, 0x06 } },
+
+ /* Switch to page 1 */
+ { .data = { 0xE0, 0x01 } },
+
+ /* Set VCOM */
+ { .data = { 0x00, 0x00 } },
+ { .data = { 0x01, 0x66 } },
+ /* Set VCOM_Reverse */
+ { .data = { 0x03, 0x00 } },
+ { .data = { 0x04, 0x25 } },
+
+ /* Set Gamma Power, VG[MS][PN] */
+ { .data = { 0x17, 0x00 } },
+ { .data = { 0x18, 0x6D } },
+ { .data = { 0x19, 0x00 } },
+ { .data = { 0x1A, 0x00 } },
+ { .data = { 0x1B, 0xBF } }, /* VGMN = -4.5V */
+ { .data = { 0x1C, 0x00 } },
+
+ /* Set Gate Power */
+ { .data = { 0x1F, 0x3E } }, /* VGH_R = 15V */
+ { .data = { 0x20, 0x28 } }, /* VGL_R = -11V */
+ { .data = { 0x21, 0x28 } }, /* VGL_R2 = -11V */
+ { .data = { 0x22, 0x0E } }, /* PA[6:4] = 0, PA[0] = 0 */
+
+ /* Set Panel */
+ { .data = { 0x37, 0x09 } }, /* SS = 1, BGR = 1 */
+
+ /* Set RGBCYC */
+ { .data = { 0x38, 0x04 } }, /* JDT = 100 column inversion */
+ { .data = { 0x39, 0x08 } }, /* RGB_N_EQ1 */
+ { .data = { 0x3A, 0x12 } }, /* RGB_N_EQ2 */
+ { .data = { 0x3C, 0x78 } }, /* set EQ3 for TE_H */
+ { .data = { 0x3D, 0xFF } }, /* set CHGEN_ON */
+ { .data = { 0x3E, 0xFF } }, /* set CHGEN_OFF */
+ { .data = { 0x3F, 0x7F } }, /* set CHGEN_OFF2 */
+
+ /* Set TCON parameter */
+ { .data = { 0x40, 0x06 } }, /* RSO = 800 points */
+ { .data = { 0x41, 0xA0 } }, /* LN = 1280 lines */
+
+ /* Set power voltage */
+ { .data = { 0x55, 0x0F } }, /* DCDCM */
+ { .data = { 0x56, 0x01 } },
+ { .data = { 0x57, 0x69 } },
+ { .data = { 0x58, 0x0A } },
+ { .data = { 0x59, 0x0A } },
+ { .data = { 0x5A, 0x45 } },
+ { .data = { 0x5B, 0x15 } },
+
+ /* Set gamma */
+ { .data = { 0x5D, 0x7C } },
+ { .data = { 0x5E, 0x65 } },
+ { .data = { 0x5F, 0x55 } },
+ { .data = { 0x60, 0x49 } },
+ { .data = { 0x61, 0x44 } },
+ { .data = { 0x62, 0x35 } },
+ { .data = { 0x63, 0x3A } },
+ { .data = { 0x64, 0x23 } },
+ { .data = { 0x65, 0x3D } },
+ { .data = { 0x66, 0x3C } },
+ { .data = { 0x67, 0x3D } },
+ { .data = { 0x68, 0x5D } },
+ { .data = { 0x69, 0x4D } },
+ { .data = { 0x6A, 0x56 } },
+ { .data = { 0x6B, 0x48 } },
+ { .data = { 0x6C, 0x45 } },
+ { .data = { 0x6D, 0x38 } },
+ { .data = { 0x6E, 0x25 } },
+ { .data = { 0x6F, 0x00 } },
+ { .data = { 0x70, 0x7C } },
+ { .data = { 0x71, 0x65 } },
+ { .data = { 0x72, 0x55 } },
+ { .data = { 0x73, 0x49 } },
+ { .data = { 0x74, 0x44 } },
+ { .data = { 0x75, 0x35 } },
+ { .data = { 0x76, 0x3A } },
+ { .data = { 0x77, 0x23 } },
+ { .data = { 0x78, 0x3D } },
+ { .data = { 0x79, 0x3C } },
+ { .data = { 0x7A, 0x3D } },
+ { .data = { 0x7B, 0x5D } },
+ { .data = { 0x7C, 0x4D } },
+ { .data = { 0x7D, 0x56 } },
+ { .data = { 0x7E, 0x48 } },
+ { .data = { 0x7F, 0x45 } },
+ { .data = { 0x80, 0x38 } },
+ { .data = { 0x81, 0x25 } },
+ { .data = { 0x82, 0x00 } },
+
+ /* Switch to page 2, for GIP */
+ { .data = { 0xE0, 0x02 } },
+
+ { .data = { 0x00, 0x1E } },
+ { .data = { 0x01, 0x1E } },
+ { .data = { 0x02, 0x41 } },
+ { .data = { 0x03, 0x41 } },
+ { .data = { 0x04, 0x43 } },
+ { .data = { 0x05, 0x43 } },
+ { .data = { 0x06, 0x1F } },
+ { .data = { 0x07, 0x1F } },
+ { .data = { 0x08, 0x1F } },
+ { .data = { 0x09, 0x1F } },
+ { .data = { 0x0A, 0x1E } },
+ { .data = { 0x0B, 0x1E } },
+ { .data = { 0x0C, 0x1F } },
+ { .data = { 0x0D, 0x47 } },
+ { .data = { 0x0E, 0x47 } },
+ { .data = { 0x0F, 0x45 } },
+ { .data = { 0x10, 0x45 } },
+ { .data = { 0x11, 0x4B } },
+ { .data = { 0x12, 0x4B } },
+ { .data = { 0x13, 0x49 } },
+ { .data = { 0x14, 0x49 } },
+ { .data = { 0x15, 0x1F } },
+
+ { .data = { 0x16, 0x1E } },
+ { .data = { 0x17, 0x1E } },
+ { .data = { 0x18, 0x40 } },
+ { .data = { 0x19, 0x40 } },
+ { .data = { 0x1A, 0x42 } },
+ { .data = { 0x1B, 0x42 } },
+ { .data = { 0x1C, 0x1F } },
+ { .data = { 0x1D, 0x1F } },
+ { .data = { 0x1E, 0x1F } },
+ { .data = { 0x1F, 0x1f } },
+ { .data = { 0x20, 0x1E } },
+ { .data = { 0x21, 0x1E } },
+ { .data = { 0x22, 0x1f } },
+ { .data = { 0x23, 0x46 } },
+ { .data = { 0x24, 0x46 } },
+ { .data = { 0x25, 0x44 } },
+ { .data = { 0x26, 0x44 } },
+ { .data = { 0x27, 0x4A } },
+ { .data = { 0x28, 0x4A } },
+ { .data = { 0x29, 0x48 } },
+ { .data = { 0x2A, 0x48 } },
+ { .data = { 0x2B, 0x1f } },
+
+ { .data = { 0x2C, 0x1F } },
+ { .data = { 0x2D, 0x1F } },
+ { .data = { 0x2E, 0x42 } },
+ { .data = { 0x2F, 0x42 } },
+ { .data = { 0x30, 0x40 } },
+ { .data = { 0x31, 0x40 } },
+ { .data = { 0x32, 0x1E } },
+ { .data = { 0x33, 0x1E } },
+ { .data = { 0x34, 0x1F } },
+ { .data = { 0x35, 0x1F } },
+ { .data = { 0x36, 0x1E } },
+ { .data = { 0x37, 0x1E } },
+ { .data = { 0x38, 0x1F } },
+ { .data = { 0x39, 0x48 } },
+ { .data = { 0x3A, 0x48 } },
+ { .data = { 0x3B, 0x4A } },
+ { .data = { 0x3C, 0x4A } },
+ { .data = { 0x3D, 0x44 } },
+ { .data = { 0x3E, 0x44 } },
+ { .data = { 0x3F, 0x46 } },
+ { .data = { 0x40, 0x46 } },
+ { .data = { 0x41, 0x1F } },
+
+ { .data = { 0x42, 0x1F } },
+ { .data = { 0x43, 0x1F } },
+ { .data = { 0x44, 0x43 } },
+ { .data = { 0x45, 0x43 } },
+ { .data = { 0x46, 0x41 } },
+ { .data = { 0x47, 0x41 } },
+ { .data = { 0x48, 0x1E } },
+ { .data = { 0x49, 0x1E } },
+ { .data = { 0x4A, 0x1E } },
+ { .data = { 0x4B, 0x1F } },
+ { .data = { 0x4C, 0x1E } },
+ { .data = { 0x4D, 0x1E } },
+ { .data = { 0x4E, 0x1F } },
+ { .data = { 0x4F, 0x49 } },
+ { .data = { 0x50, 0x49 } },
+ { .data = { 0x51, 0x4B } },
+ { .data = { 0x52, 0x4B } },
+ { .data = { 0x53, 0x45 } },
+ { .data = { 0x54, 0x45 } },
+ { .data = { 0x55, 0x47 } },
+ { .data = { 0x56, 0x47 } },
+ { .data = { 0x57, 0x1F } },
+
+ { .data = { 0x58, 0x10 } },
+ { .data = { 0x59, 0x00 } },
+ { .data = { 0x5A, 0x00 } },
+ { .data = { 0x5B, 0x30 } },
+ { .data = { 0x5C, 0x02 } },
+ { .data = { 0x5D, 0x40 } },
+ { .data = { 0x5E, 0x01 } },
+ { .data = { 0x5F, 0x02 } },
+ { .data = { 0x60, 0x30 } },
+ { .data = { 0x61, 0x01 } },
+ { .data = { 0x62, 0x02 } },
+ { .data = { 0x63, 0x6A } },
+ { .data = { 0x64, 0x6A } },
+ { .data = { 0x65, 0x05 } },
+ { .data = { 0x66, 0x12 } },
+ { .data = { 0x67, 0x74 } },
+ { .data = { 0x68, 0x04 } },
+ { .data = { 0x69, 0x6A } },
+ { .data = { 0x6A, 0x6A } },
+ { .data = { 0x6B, 0x08 } },
+
+ { .data = { 0x6C, 0x00 } },
+ { .data = { 0x6D, 0x04 } },
+ { .data = { 0x6E, 0x04 } },
+ { .data = { 0x6F, 0x88 } },
+ { .data = { 0x70, 0x00 } },
+ { .data = { 0x71, 0x00 } },
+ { .data = { 0x72, 0x06 } },
+ { .data = { 0x73, 0x7B } },
+ { .data = { 0x74, 0x00 } },
+ { .data = { 0x75, 0x07 } },
+ { .data = { 0x76, 0x00 } },
+ { .data = { 0x77, 0x5D } },
+ { .data = { 0x78, 0x17 } },
+ { .data = { 0x79, 0x1F } },
+ { .data = { 0x7A, 0x00 } },
+ { .data = { 0x7B, 0x00 } },
+ { .data = { 0x7C, 0x00 } },
+ { .data = { 0x7D, 0x03 } },
+ { .data = { 0x7E, 0x7B } },
+
+ { .data = { 0xE0, 0x04 } },
+ { .data = { 0x2B, 0x2B } },
+ { .data = { 0x2E, 0x44 } },
+
+ { .data = { 0xE0, 0x01 } },
+ { .data = { 0x0E, 0x01 } },
+
+ { .data = { 0xE0, 0x03 } },
+ { .data = { 0x98, 0x2F } },
+
+ { .data = { 0xE0, 0x00 } },
+ { .data = { 0xE6, 0x02 } },
+ { .data = { 0xE7, 0x02 } },
+
+ { .data = { 0x11, 0x00 } },
+};
+
+static const struct k101_im2ba02_init_cmd timed_cmds[] = {
+ { .data = { 0x29, 0x00 } },
+ { .data = { 0x35, 0x00 } },
+};
+
+static int k101_im2ba02_prepare(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ unsigned int i;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+
+ msleep(30);
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(50);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(50);
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(200);
+
+ for (i = 0; i < ARRAY_SIZE(k101_im2ba02_init_cmds); i++) {
+ const struct k101_im2ba02_init_cmd *cmd = &k101_im2ba02_init_cmds[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+ if (ret < 0)
+ goto powerdown;
+ }
+
+ return 0;
+
+powerdown:
+ gpiod_set_value(ctx->reset, 0);
+ msleep(50);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int k101_im2ba02_enable(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ const struct k101_im2ba02_init_cmd *cmd = &timed_cmds[1];
+ int ret;
+
+ msleep(150);
+
+ ret = mipi_dsi_dcs_set_display_on(ctx->dsi);
+ if (ret < 0)
+ return ret;
+
+ msleep(50);
+
+ return mipi_dsi_dcs_write_buffer(ctx->dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+}
+
+static int k101_im2ba02_disable(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int k101_im2ba02_unprepare(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+
+ msleep(200);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(20);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static const struct drm_display_mode k101_im2ba02_default_mode = {
+ .clock = 70000,
+ .vrefresh = 60,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 20,
+ .hsync_end = 800 + 20 + 20,
+ .htotal = 800 + 20 + 20 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 4,
+ .vtotal = 1280 + 16 + 4 + 4,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .width_mm = 136,
+ .height_mm = 217,
+};
+
+static int k101_im2ba02_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &k101_im2ba02_default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ k101_im2ba02_default_mode.hdisplay,
+ k101_im2ba02_default_mode.vdisplay,
+ k101_im2ba02_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs k101_im2ba02_funcs = {
+ .disable = k101_im2ba02_disable,
+ .unprepare = k101_im2ba02_unprepare,
+ .prepare = k101_im2ba02_prepare,
+ .enable = k101_im2ba02_enable,
+ .get_modes = k101_im2ba02_get_modes,
+};
+
+static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct k101_im2ba02 *ctx;
+ unsigned int i;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get regulators\n");
+ return ret;
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id k101_im2ba02_of_match[] = {
+ { .compatible = "feixin,k101-im2ba02", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, k101_im2ba02_of_match);
+
+static struct mipi_dsi_driver k101_im2ba02_driver = {
+ .probe = k101_im2ba02_dsi_probe,
+ .remove = k101_im2ba02_dsi_remove,
+ .driver = {
+ .name = "feixin-k101-im2ba02",
+ .of_match_table = k101_im2ba02_of_match,
+ },
+};
+module_mipi_dsi_driver(k101_im2ba02_driver);
+
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_DESCRIPTION("Feixin K101 IM2BA02 MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index f394d53a7da4..09935520e606 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -540,7 +540,7 @@ static int ili9322_enable(struct drm_panel *panel)
/* Serial RGB modes */
static const struct drm_display_mode srgb_320x240_mode = {
- .clock = 2453500,
+ .clock = 24535,
.hdisplay = 320,
.hsync_start = 320 + 359,
.hsync_end = 320 + 359 + 1,
@@ -554,7 +554,7 @@ static const struct drm_display_mode srgb_320x240_mode = {
};
static const struct drm_display_mode srgb_360x240_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 360,
.hsync_start = 360 + 35,
.hsync_end = 360 + 35 + 1,
@@ -569,7 +569,7 @@ static const struct drm_display_mode srgb_360x240_mode = {
/* This is the only mode listed for parallel RGB in the datasheet */
static const struct drm_display_mode prgb_320x240_mode = {
- .clock = 6400000,
+ .clock = 64000,
.hdisplay = 320,
.hsync_start = 320 + 38,
.hsync_end = 320 + 38 + 1,
@@ -584,7 +584,7 @@ static const struct drm_display_mode prgb_320x240_mode = {
/* YUV modes */
static const struct drm_display_mode yuv_640x320_mode = {
- .clock = 2454000,
+ .clock = 24540,
.hdisplay = 640,
.hsync_start = 640 + 252,
.hsync_end = 640 + 252 + 1,
@@ -598,7 +598,7 @@ static const struct drm_display_mode yuv_640x320_mode = {
};
static const struct drm_display_mode yuv_720x360_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 720,
.hsync_start = 720 + 252,
.hsync_end = 720 + 252 + 1,
@@ -613,7 +613,7 @@ static const struct drm_display_mode yuv_720x360_mode = {
/* BT.656 VGA mode, 640x480 */
static const struct drm_display_mode itu_r_bt_656_640_mode = {
- .clock = 2454000,
+ .clock = 24540,
.hdisplay = 640,
.hsync_start = 640 + 3,
.hsync_end = 640 + 3 + 1,
@@ -628,7 +628,7 @@ static const struct drm_display_mode itu_r_bt_656_640_mode = {
/* BT.656 D1 mode 720x480 */
static const struct drm_display_mode itu_r_bt_656_720_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 720,
.hsync_start = 720 + 3,
.hsync_end = 720 + 3 + 1,
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index b262b53dbd85..5907f2503755 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -197,7 +197,7 @@ static int lg4573_enable(struct drm_panel *panel)
}
static const struct drm_display_mode default_mode = {
- .clock = 27000,
+ .clock = 28341,
.hdisplay = 480,
.hsync_start = 480 + 10,
.hsync_end = 480 + 10 + 59,
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
new file mode 100644
index 000000000000..4a8fa908a2cf
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Novatek NT35510 panel driver
+ * Copyright (C) 2020 Linus Walleij <linus.walleij@linaro.org>
+ * Based on code by Robert Teather (C) 2012 Samsung
+ *
+ * This display driver (and I refer to the physical component NT35510,
+ * not this Linux kernel software driver) can handle:
+ * 480x864, 480x854, 480x800, 480x720 and 480x640 pixel displays.
+ * It has 480x840x24bit SRAM embedded for storing a frame.
+ * When powered on the display is by default in 480x800 mode.
+ *
+ * The actual panels using this component have different names, but
+ * the code needed to set up and configure the panel will be similar,
+ * so they should all use the NT35510 driver with appropriate configuration
+ * per-panel, e.g. for physical size.
+ *
+ * This driver is for the DSI interface to panels using the NT35510.
+ *
+ * The NT35510 can also use an RGB (DPI) interface combined with an
+ * I2C or SPI interface for setting up the NT35510. If this is needed
+ * this panel driver should be refactored to also support that use
+ * case.
+ */
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
+#define MCS_CMD_READ_ID1 0xDA
+#define MCS_CMD_READ_ID2 0xDB
+#define MCS_CMD_READ_ID3 0xDC
+#define MCS_CMD_MTP_READ_SETTING 0xF8 /* Uncertain about name */
+#define MCS_CMD_MTP_READ_PARAM 0xFF /* Uncertain about name */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 0.
+ */
+#define NT35510_P0_DOPCTR 0xB1
+#define NT35510_P0_SDHDTCTR 0xB6
+#define NT35510_P0_GSEQCTR 0xB7
+#define NT35510_P0_SDEQCTR 0xB8
+#define NT35510_P0_SDVPCTR 0xBA
+#define NT35510_P0_DPFRCTR1 0xBD
+#define NT35510_P0_DPFRCTR2 0xBE
+#define NT35510_P0_DPFRCTR3 0xBF
+#define NT35510_P0_DPMCTR12 0xCC
+
+#define NT35510_P0_DOPCTR_LEN 2
+#define NT35510_P0_GSEQCTR_LEN 2
+#define NT35510_P0_SDEQCTR_LEN 4
+#define NT35510_P0_SDVPCTR_LEN 1
+#define NT35510_P0_DPFRCTR1_LEN 5
+#define NT35510_P0_DPFRCTR2_LEN 5
+#define NT35510_P0_DPFRCTR3_LEN 5
+#define NT35510_P0_DPMCTR12_LEN 3
+
+#define NT35510_DOPCTR_0_RAMKP BIT(7) /* Contents kept in sleep */
+#define NT35510_DOPCTR_0_DSITE BIT(6) /* Enable TE signal */
+#define NT35510_DOPCTR_0_DSIG BIT(5) /* Enable generic read/write */
+#define NT35510_DOPCTR_0_DSIM BIT(4) /* Enable video mode on DSI */
+#define NT35510_DOPCTR_0_EOTP BIT(3) /* Support EoTP */
+#define NT35510_DOPCTR_0_N565 BIT(2) /* RGB or BGR pixel format */
+#define NT35510_DOPCTR_1_TW_PWR_SEL BIT(4) /* TE power selector */
+#define NT35510_DOPCTR_1_CRGB BIT(3) /* RGB or BGR byte order */
+#define NT35510_DOPCTR_1_CTB BIT(2) /* Vertical scanning direction */
+#define NT35510_DOPCTR_1_CRL BIT(1) /* Source driver data shift */
+#define NT35510_P0_SDVPCTR_PRG BIT(2) /* 0 = normal operation, 1 = VGLO */
+#define NT35510_P0_SDVPCTR_AVDD 0 /* source driver output = AVDD */
+#define NT35510_P0_SDVPCTR_OFFCOL 1 /* source driver output = off color */
+#define NT35510_P0_SDVPCTR_AVSS 2 /* source driver output = AVSS */
+#define NT35510_P0_SDVPCTR_HI_Z 3 /* source driver output = High impedance */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 1.
+ */
+#define NT35510_P1_SETAVDD 0xB0
+#define NT35510_P1_SETAVEE 0xB1
+#define NT35510_P1_SETVCL 0xB2
+#define NT35510_P1_SETVGH 0xB3
+#define NT35510_P1_SETVRGH 0xB4
+#define NT35510_P1_SETVGL 0xB5
+#define NT35510_P1_BT1CTR 0xB6
+#define NT35510_P1_BT2CTR 0xB7
+#define NT35510_P1_BT3CTR 0xB8
+#define NT35510_P1_BT4CTR 0xB9 /* VGH boosting times/freq */
+#define NT35510_P1_BT5CTR 0xBA
+#define NT35510_P1_PFMCTR 0xBB
+#define NT35510_P1_SETVGP 0xBC
+#define NT35510_P1_SETVGN 0xBD
+#define NT35510_P1_SETVCMOFF 0xBE
+#define NT35510_P1_VGHCTR 0xBF /* VGH output ctrl */
+#define NT35510_P1_SET_GAMMA_RED_POS 0xD1
+#define NT35510_P1_SET_GAMMA_GREEN_POS 0xD2
+#define NT35510_P1_SET_GAMMA_BLUE_POS 0xD3
+#define NT35510_P1_SET_GAMMA_RED_NEG 0xD4
+#define NT35510_P1_SET_GAMMA_GREEN_NEG 0xD5
+#define NT35510_P1_SET_GAMMA_BLUE_NEG 0xD6
+
+/* AVDD and AVEE setting 3 bytes */
+#define NT35510_P1_AVDD_LEN 3
+#define NT35510_P1_AVEE_LEN 3
+#define NT35510_P1_VGH_LEN 3
+#define NT35510_P1_VGL_LEN 3
+#define NT35510_P1_VGP_LEN 3
+#define NT35510_P1_VGN_LEN 3
+/* BT1CTR thru BT5CTR setting 3 bytes */
+#define NT35510_P1_BT1CTR_LEN 3
+#define NT35510_P1_BT2CTR_LEN 3
+#define NT35510_P1_BT4CTR_LEN 3
+#define NT35510_P1_BT5CTR_LEN 3
+/* 52 gamma parameters times two per color: positive and negative */
+#define NT35510_P1_GAMMA_LEN 52
+
+/**
+ * struct nt35510_config - the display-specific NT35510 configuration
+ *
+ * Some of the settings provide an array of bytes, A, B C which mean:
+ * A = normal / idle off mode
+ * B = idle on mode
+ * C = partial / idle off mode
+ *
+ * Gamma correction arrays are 10bit numbers, two consecutive bytes
+ * makes out one point on the gamma correction curve. The points are
+ * not linearly placed along the X axis, we get points 0, 1, 3, 5
+ * 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160, 192, 208, 224, 232,
+ * 240, 244, 248, 250, 252, 254, 255. The voltages tuples form
+ * V0, V1, V3 ... V255, with 0x0000 being the lowest voltage and
+ * 0x03FF being the highest voltage.
+ *
+ * Each value must be strictly higher than the previous value forming
+ * a rising curve like this:
+ *
+ * ^
+ * | V255
+ * | V254
+ * | ....
+ * | V5
+ * | V3
+ * | V1
+ * | V0
+ * +------------------------------------------->
+ *
+ * The details about all settings can be found in the NT35510 Application
+ * Note.
+ */
+struct nt35510_config {
+ /**
+ * @width_mm: physical panel width [mm]
+ */
+ u32 width_mm;
+ /**
+ * @height_mm: physical panel height [mm]
+ */
+ u32 height_mm;
+ /**
+ * @mode: the display mode. This is only relevant outside the panel
+ * in video mode: in command mode this is configuring the internal
+ * timing in the display controller.
+ */
+ const struct drm_display_mode mode;
+ /**
+ * @avdd: setting for AVDD ranging from 0x00 = 6.5V to 0x14 = 4.5V
+ * in 0.1V steps the default is 0x05 which means 6.0V
+ */
+ u8 avdd[NT35510_P1_AVDD_LEN];
+ /**
+ * @bt1ctr: setting for boost power control for the AVDD step-up
+ * circuit (1)
+ * bits 0..2 in the lower nibble controls PCK, the booster clock
+ * frequency for the step-up circuit:
+ * 0 = Hsync/32
+ * 1 = Hsync/16
+ * 2 = Hsync/8
+ * 3 = Hsync/4
+ * 4 = Hsync/2
+ * 5 = Hsync
+ * 6 = Hsync x 2
+ * 7 = Hsync x 4
+ * bits 4..6 in the upper nibble controls BTP, the boosting
+ * amplification for the the step-up circuit:
+ * 0 = Disable
+ * 1 = 1.5 x VDDB
+ * 2 = 1.66 x VDDB
+ * 3 = 2 x VDDB
+ * 4 = 2.5 x VDDB
+ * 5 = 3 x VDDB
+ * The defaults are 4 and 4 yielding 0x44
+ */
+ u8 bt1ctr[NT35510_P1_BT1CTR_LEN];
+ /**
+ * @avee: setting for AVEE ranging from 0x00 = -6.5V to 0x14 = -4.5V
+ * in 0.1V steps the default is 0x05 which means -6.0V
+ */
+ u8 avee[NT35510_P1_AVEE_LEN];
+ /**
+ * @bt2ctr: setting for boost power control for the AVEE step-up
+ * circuit (2)
+ * bits 0..2 in the lower nibble controls NCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTN, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = Disable
+ * 1 = -1.5 x VDDB
+ * 2 = -2 x VDDB
+ * 3 = -2.5 x VDDB
+ * 4 = -3 x VDDB
+ * The defaults are 4 and 3 yielding 0x34
+ */
+ u8 bt2ctr[NT35510_P1_BT2CTR_LEN];
+ /**
+ * @vgh: setting for VGH ranging from 0x00 = 7.0V to 0x0B = 18.0V
+ * in 1V steps, the default is 0x08 which means 15V
+ */
+ u8 vgh[NT35510_P1_VGH_LEN];
+ /**
+ * @bt4ctr: setting for boost power control for the VGH step-up
+ * circuit (4)
+ * bits 0..2 in the lower nibble controls HCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTH, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = AVDD + VDDB
+ * 1 = AVDD - AVEE
+ * 2 = AVDD - AVEE + VDDB
+ * 3 = AVDD x 2 - AVEE
+ * The defaults are 4 and 3 yielding 0x34
+ */
+ u8 bt4ctr[NT35510_P1_BT4CTR_LEN];
+ /**
+ * @vgl: setting for VGL ranging from 0x00 = -2V to 0x0f = -15V in
+ * 1V steps, the default is 0x08 which means -10V
+ */
+ u8 vgl[NT35510_P1_VGL_LEN];
+ /**
+ * @bt5ctr: setting for boost power control for the VGL step-up
+ * circuit (5)
+ * bits 0..2 in the lower nibble controls LCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTL, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = AVEE + VCL
+ * 1 = AVEE - AVDD
+ * 2 = AVEE + VCL - AVDD
+ * 3 = AVEE x 2 - AVDD
+ * The defaults are 3 and 2 yielding 0x32
+ */
+ u8 bt5ctr[NT35510_P1_BT5CTR_LEN];
+ /**
+ * @vgp: setting for VGP, the positive gamma divider voltages
+ * VGMP the high voltage and VGSP the low voltage.
+ * The first byte contains bit 8 of VGMP and VGSP in bits 4 and 0
+ * The second byte contains bit 0..7 of VGMP
+ * The third byte contains bit 0..7 of VGSP
+ * VGMP 0x00 = 3.0V .. 0x108 = 6.3V in steps of 12.5mV
+ * VGSP 0x00 = 0V .. 0x111 = 3.7V in steps of 12.5mV
+ */
+ u8 vgp[NT35510_P1_VGP_LEN];
+ /**
+ * @vgn: setting for VGN, the negative gamma divider voltages,
+ * same layout of bytes as @vgp.
+ */
+ u8 vgn[NT35510_P1_VGN_LEN];
+ /**
+ * @sdeqctr: Source driver control settings, first byte is
+ * 0 for mode 1 and 1 for mode 2. Mode 1 uses two steps and
+ * mode 2 uses three steps meaning EQS3 is not used in mode
+ * 1. Mode 2 is default. The last three parameters are EQS1, EQS2
+ * and EQS3, setting the rise time for each equalizer step:
+ * 0x00 = 0.0 us to 0x0f = 7.5 us in steps of 0.5us. The default
+ * is 0x07 = 3.5 us.
+ */
+ u8 sdeqctr[NT35510_P0_SDEQCTR_LEN];
+ /**
+ * @sdvpctr: power/voltage behaviour during vertical porch time
+ */
+ u8 sdvpctr;
+ /**
+ * @t1: the number of pixel clocks on one scanline, range
+ * 0x100 (258 ticks) .. 0x3FF (1024 ticks) so the value + 1
+ * clock ticks.
+ */
+ u16 t1;
+ /**
+ * @vbp: vertical back porch toward the PANEL note: not toward
+ * the DSI host; these are separate interfaces, in from DSI host
+ * and out to the panel.
+ */
+ u8 vbp;
+ /**
+ * @vfp: vertical front porch toward the PANEL.
+ */
+ u8 vfp;
+ /**
+ * @psel: pixel clock divisor: 0 = 1, 1 = 2, 2 = 4, 3 = 8.
+ */
+ u8 psel;
+ /**
+ * @dpmctr12: Display timing control 12
+ * Byte 1 bit 4 selects LVGL voltage level: 0 = VGLX, 1 = VGL_REG
+ * Byte 1 bit 1 selects gate signal mode: 0 = non-overlap, 1 = overlap
+ * Byte 1 bit 0 selects output signal control R/L swap, 0 = normal
+ * 1 = swap all O->E, L->R
+ * Byte 2 is CLW delay clock for CK O/E and CKB O/E signals:
+ * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+ * Byte 3 is FTI_H0 delay time for STP O/E signals:
+ * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+ */
+ u8 dpmctr12[NT35510_P0_DPMCTR12_LEN];
+ /**
+ * @gamma_corr_pos_r: Red gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_r[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_pos_g: Green gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_g[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_pos_b: Blue gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_b[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_r: Red gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_r[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_g: Green gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_g[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_b: Blue gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_b[NT35510_P1_GAMMA_LEN];
+};
+
+/**
+ * struct nt35510 - state container for the NT35510 panel
+ */
+struct nt35510 {
+ /**
+ * @dev: the container device
+ */
+ struct device *dev;
+ /**
+ * @conf: the specific panel configuration, as the NT35510
+ * can be combined with many physical panels, they can have
+ * different physical dimensions and gamma correction etc,
+ * so this is stored in the config.
+ */
+ const struct nt35510_config *conf;
+ /**
+ * @panel: the DRM panel object for the instance
+ */
+ struct drm_panel panel;
+ /**
+ * @supplies: regulators supplying the panel
+ */
+ struct regulator_bulk_data supplies[2];
+ /**
+ * @reset_gpio: the reset line
+ */
+ struct gpio_desc *reset_gpio;
+};
+
+/* Manufacturer command has strictly this byte sequence */
+static const u8 nt35510_mauc_select_page_0[] = { 0x55, 0xAA, 0x52, 0x08, 0x00 };
+static const u8 nt35510_mauc_select_page_1[] = { 0x55, 0xAA, 0x52, 0x08, 0x01 };
+static const u8 nt35510_vgh_on[] = { 0x01 };
+
+static inline struct nt35510 *panel_to_nt35510(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt35510, panel);
+}
+
+#define NT35510_ROTATE_0_SETTING 0x02
+#define NT35510_ROTATE_180_SETTING 0x00
+
+static int nt35510_send_long(struct nt35510 *nt, struct mipi_dsi_device *dsi,
+ u8 cmd, u8 cmdlen, const u8 *seq)
+{
+ const u8 *seqp = seq;
+ int cmdwritten = 0;
+ int chunk = cmdlen;
+ int ret;
+
+ if (chunk > 15)
+ chunk = 15;
+ ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev,
+ "error sending DCS command seq cmd %02x\n",
+ cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+
+ while (cmdwritten < cmdlen) {
+ chunk = cmdlen - cmdwritten;
+ if (chunk > 15)
+ chunk = 15;
+ ret = mipi_dsi_generic_write(dsi, seqp, chunk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev,
+ "error sending generic write seq %02x\n",
+ cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+ }
+ DRM_DEV_DEBUG(nt->dev, "sent command %02x %02x bytes\n",
+ cmd, cmdlen);
+ return 0;
+}
+
+static int nt35510_read_id(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 id1, id2, id3;
+ int ret;
+
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID1, &id1, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID1\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID2, &id2, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID2\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID3, &id3, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID3\n");
+ return ret;
+ }
+
+ /*
+ * Multi-Time Programmable (?) memory contains manufacturer
+ * ID (e.g. Hydis 0x55), driver ID (e.g. NT35510 0xc0) and
+ * version.
+ */
+ DRM_DEV_INFO(nt->dev,
+ "MTP ID manufacturer: %02x version: %02x driver: %02x\n",
+ id1, id2, id3);
+
+ return 0;
+}
+
+/**
+ * nt35510_setup_power() - set up power config in page 1
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_power(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVDD,
+ NT35510_P1_AVDD_LEN,
+ nt->conf->avdd);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT1CTR,
+ NT35510_P1_BT1CTR_LEN,
+ nt->conf->bt1ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVEE,
+ NT35510_P1_AVEE_LEN,
+ nt->conf->avee);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT2CTR,
+ NT35510_P1_BT2CTR_LEN,
+ nt->conf->bt2ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGH,
+ NT35510_P1_VGH_LEN,
+ nt->conf->vgh);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT4CTR,
+ NT35510_P1_BT4CTR_LEN,
+ nt->conf->bt4ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_VGHCTR,
+ ARRAY_SIZE(nt35510_vgh_on),
+ nt35510_vgh_on);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGL,
+ NT35510_P1_VGL_LEN,
+ nt->conf->vgl);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT5CTR,
+ NT35510_P1_BT5CTR_LEN,
+ nt->conf->bt5ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGP,
+ NT35510_P1_VGP_LEN,
+ nt->conf->vgp);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGN,
+ NT35510_P1_VGN_LEN,
+ nt->conf->vgn);
+ if (ret)
+ return ret;
+
+ /* Typically 10 ms */
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+/**
+ * nt35510_setup_display() - set up display config in page 0
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_display(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ const struct nt35510_config *conf = nt->conf;
+ u8 dopctr[NT35510_P0_DOPCTR_LEN];
+ u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
+ u8 dpfrctr[NT35510_P0_DPFRCTR1_LEN];
+ /* FIXME: set up any rotation (assume none for now) */
+ u8 addr_mode = NT35510_ROTATE_0_SETTING;
+ u8 val;
+ int ret;
+
+ /* Enable TE, EoTP and RGB pixel format */
+ dopctr[0] = NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
+ NT35510_DOPCTR_0_N565;
+ dopctr[1] = NT35510_DOPCTR_1_CTB;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DOPCTR,
+ NT35510_P0_DOPCTR_LEN,
+ dopctr);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &addr_mode,
+ sizeof(addr_mode));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Source data hold time, default 0x05 = 2.5us
+ * 0x00..0x3F = 0 .. 31.5us in steps of 0.5us
+ * 0x0A = 5us
+ */
+ val = 0x0A;
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &val,
+ sizeof(val));
+ if (ret < 0)
+ return ret;
+
+ /* EQ control for gate signals, 0x00 = 0 us */
+ gseqctr[0] = 0x00;
+ gseqctr[1] = 0x00;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_GSEQCTR,
+ NT35510_P0_GSEQCTR_LEN,
+ gseqctr);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_SDEQCTR,
+ NT35510_P0_SDEQCTR_LEN,
+ conf->sdeqctr);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDVPCTR,
+ &conf->sdvpctr, 1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Display timing control for active and idle off mode:
+ * the first byte contains
+ * the two high bits of T1A and second byte the low 8 bits, and
+ * the valid range is 0x100 (257) to 0x3ff (1023) representing
+ * 258..1024 (+1) pixel clock ticks for one scanline. At 20MHz pixel
+ * clock this covers the range of 12.90us .. 51.20us in steps of
+ * 0.05us, the default is 0x184 (388) representing 389 ticks.
+ * The third byte is VBPDA, vertical back porch display active
+ * and the fourth VFPDA, vertical front porch display active,
+ * both given in number of scanlines in the range 0x02..0xff
+ * for 2..255 scanlines. The fifth byte is 2 bits selecting
+ * PSEL for active and idle off mode, how much the 20MHz clock
+ * is divided by 0..3. This needs to be adjusted to get the right
+ * frame rate.
+ */
+ dpfrctr[0] = (conf->t1 >> 8) & 0xFF;
+ dpfrctr[1] = conf->t1 & 0xFF;
+ /* Vertical back porch */
+ dpfrctr[2] = conf->vbp;
+ /* Vertical front porch */
+ dpfrctr[3] = conf->vfp;
+ dpfrctr[4] = conf->psel;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR1,
+ NT35510_P0_DPFRCTR1_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+ /* For idle and partial idle off mode we decrease front porch by one */
+ dpfrctr[3]--;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR2,
+ NT35510_P0_DPFRCTR2_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR3,
+ NT35510_P0_DPFRCTR3_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+
+ /* Enable TE on vblank */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ /* Turn on the pads? */
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPMCTR12,
+ NT35510_P0_DPMCTR12_LEN,
+ conf->dpmctr12);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_set_brightness(struct backlight_device *bl)
+{
+ struct nt35510 *nt = bl_get_data(bl);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 brightness = bl->props.brightness;
+ int ret;
+
+ DRM_DEV_DEBUG(nt->dev, "set brightness %d\n", brightness);
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &brightness,
+ sizeof(brightness));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct backlight_ops nt35510_bl_ops = {
+ .update_status = nt35510_set_brightness,
+};
+
+/*
+ * This power-on sequence
+ */
+static int nt35510_power_on(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(nt->supplies), nt->supplies);
+ if (ret < 0) {
+ dev_err(nt->dev, "unable to enable regulators\n");
+ return ret;
+ }
+
+ /* Toggle RESET in accordance with datasheet page 370 */
+ if (nt->reset_gpio) {
+ gpiod_set_value(nt->reset_gpio, 1);
+ /* Active min 10 us according to datasheet, let's say 20 */
+ usleep_range(20, 1000);
+ gpiod_set_value(nt->reset_gpio, 0);
+ /*
+ * 5 ms during sleep mode, 120 ms during sleep out mode
+ * according to datasheet, let's use 120-140 ms.
+ */
+ usleep_range(120000, 140000);
+ }
+
+ ret = nt35510_read_id(nt);
+ if (ret)
+ return ret;
+
+ /* Set up stuff in manufacturer control, page 1 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+ ARRAY_SIZE(nt35510_mauc_select_page_1),
+ nt35510_mauc_select_page_1);
+ if (ret)
+ return ret;
+
+ ret = nt35510_setup_power(nt);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_b);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_b);
+ if (ret)
+ return ret;
+
+ /* Set up stuff in manufacturer control, page 0 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+ ARRAY_SIZE(nt35510_mauc_select_page_0),
+ nt35510_mauc_select_page_0);
+ if (ret)
+ return ret;
+
+ ret = nt35510_setup_display(nt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_power_off(struct nt35510 *nt)
+{
+ int ret;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(nt->supplies), nt->supplies);
+ if (ret)
+ return ret;
+
+ if (nt->reset_gpio)
+ gpiod_set_value(nt->reset_gpio, 1);
+
+ return 0;
+}
+
+static int nt35510_unprepare(struct drm_panel *panel)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to turn display off (%d)\n",
+ ret);
+ return ret;
+ }
+ usleep_range(10000, 20000);
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to enter sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Wait 4 frames, how much is that 5ms in the vendor driver */
+ usleep_range(5000, 10000);
+
+ ret = nt35510_power_off(nt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_prepare(struct drm_panel *panel)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = nt35510_power_on(nt);
+ if (ret)
+ return ret;
+
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to exit sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Up to 120 ms */
+ usleep_range(120000, 150000);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to turn display on (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Some 10 ms */
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int nt35510_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+
+ info = &connector->display_info;
+ info->width_mm = nt->conf->width_mm;
+ info->height_mm = nt->conf->height_mm;
+ mode = drm_mode_duplicate(connector->dev, &nt->conf->mode);
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ mode->width_mm = nt->conf->width_mm;
+ mode->height_mm = nt->conf->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs nt35510_drm_funcs = {
+ .unprepare = nt35510_unprepare,
+ .prepare = nt35510_prepare,
+ .get_modes = nt35510_get_modes,
+};
+
+static int nt35510_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct nt35510 *nt;
+ int ret;
+
+ nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, nt);
+ nt->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ /*
+ * Datasheet suggests max HS rate for NT35510 is 250 MHz
+ * (period time 4ns, see figure 7.6.4 page 365) and max LP rate is
+ * 20 MHz (period time 50ns, see figure 7.6.6. page 366).
+ * However these frequencies appear in source code for the Hydis
+ * HVA40WV1 panel and setting up the LP frequency makes the panel
+ * not work.
+ *
+ * TODO: if other panels prove to be closer to the datasheet,
+ * maybe make this a per-panel config in struct nt35510_config?
+ */
+ dsi->hs_rate = 349440000;
+ dsi->lp_rate = 9600000;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ /*
+ * Every new incarnation of this display must have a unique
+ * data entry for the system in this driver.
+ */
+ nt->conf = of_device_get_match_data(dev);
+ if (!nt->conf) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ nt->supplies[0].supply = "vdd"; /* 2.3-4.8 V */
+ nt->supplies[1].supply = "vddi"; /* 1.65-3.3V */
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->supplies),
+ nt->supplies);
+ if (ret < 0)
+ return ret;
+ ret = regulator_set_voltage(nt->supplies[0].consumer,
+ 2300000, 4800000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(nt->supplies[1].consumer,
+ 1650000, 3300000);
+ if (ret)
+ return ret;
+
+ nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(nt->reset_gpio)) {
+ dev_err(dev, "error getting RESET GPIO\n");
+ return PTR_ERR(nt->reset_gpio);
+ }
+
+ drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ /*
+ * First, try to locate an external backlight (such as on GPIO)
+ * if this fails, assume we will want to use the internal backlight
+ * control.
+ */
+ ret = drm_panel_of_backlight(&nt->panel);
+ if (ret) {
+ dev_err(dev, "error getting external backlight %d\n", ret);
+ return ret;
+ }
+ if (!nt->panel.backlight) {
+ struct backlight_device *bl;
+
+ bl = devm_backlight_device_register(dev, "nt35510", dev, nt,
+ &nt35510_bl_ops, NULL);
+ if (IS_ERR(bl)) {
+ DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+ return PTR_ERR(bl);
+ }
+ bl->props.max_brightness = 255;
+ bl->props.brightness = 255;
+ bl->props.power = FB_BLANK_POWERDOWN;
+ nt->panel.backlight = bl;
+ }
+
+ ret = drm_panel_add(&nt->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&nt->panel);
+
+ return 0;
+}
+
+static int nt35510_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt35510 *nt = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ mipi_dsi_detach(dsi);
+ /* Power off */
+ ret = nt35510_power_off(nt);
+ drm_panel_remove(&nt->panel);
+
+ return ret;
+}
+
+/*
+ * These gamma correction values are 10bit tuples, so only bits 0 and 1 is
+ * ever used in the first byte. They form a positive and negative gamma
+ * correction curve for each color, values must be strictly higher for each
+ * step on the curve. As can be seen these default curves goes from 0x0001
+ * to 0x03FE.
+ */
+#define NT35510_GAMMA_POS_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+ 0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+ 0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+ 0x83, 0x02, 0x78, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+ 0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+ 0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+#define NT35510_GAMMA_NEG_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+ 0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+ 0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+ 0x43, 0x02, 0x50, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+ 0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+ 0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+/*
+ * The Hydis HVA40WV1 panel
+ */
+static const struct nt35510_config nt35510_hydis_hva40wv1 = {
+ .width_mm = 52,
+ .height_mm = 86,
+ /**
+ * As the Hydis panel is used in command mode, the porches etc
+ * are settings programmed internally into the NT35510 controller
+ * and generated toward the physical display. As the panel is not
+ * used in video mode, these are not really exposed to the DSI
+ * host.
+ *
+ * Display frame rate control:
+ * Frame rate = (20 MHz / 1) / (389 * (7 + 50 + 800)) ~= 60 Hz
+ */
+ .mode = {
+ /* The internal pixel clock of the NT35510 is 20 MHz */
+ .clock = 20000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2, /* HFP = 2 */
+ .hsync_end = 480 + 2 + 0, /* HSync = 0 */
+ .htotal = 480 + 2 + 0 + 5, /* HFP = 5 */
+ .vdisplay = 800,
+ .vsync_start = 800 + 2, /* VFP = 2 */
+ .vsync_end = 800 + 2 + 0, /* VSync = 0 */
+ .vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
+ .vrefresh = 60, /* Calculated */
+ .flags = 0,
+ },
+ /* 0x09: AVDD = 5.6V */
+ .avdd = { 0x09, 0x09, 0x09 },
+ /* 0x34: PCK = Hsync/2, BTP = 2 x VDDB */
+ .bt1ctr = { 0x34, 0x34, 0x34 },
+ /* 0x09: AVEE = -5.6V */
+ .avee = { 0x09, 0x09, 0x09 },
+ /* 0x24: NCK = Hsync/2, BTN = -2 x VDDB */
+ .bt2ctr = { 0x24, 0x24, 0x24 },
+ /* 0x05 = 12V */
+ .vgh = { 0x05, 0x05, 0x05 },
+ /* 0x24: NCKA = Hsync/2, VGH = 2 x AVDD - AVEE */
+ .bt4ctr = { 0x24, 0x24, 0x24 },
+ /* 0x0B = -13V */
+ .vgl = { 0x0B, 0x0B, 0x0B },
+ /* 0x24: LCKA = Hsync, VGL = AVDD + VCL - AVDD */
+ .bt5ctr = { 0x24, 0x24, 0x24 },
+ /* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+ .vgp = { 0x00, 0xA3, 0x00 },
+ /* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+ .vgn = { 0x00, 0xA3, 0x00 },
+ /* SDEQCTR: source driver EQ mode 2, 2.5 us rise time on each step */
+ .sdeqctr = { 0x01, 0x05, 0x05, 0x05 },
+ /* SDVPCTR: Normal operation off color during v porch */
+ .sdvpctr = 0x01,
+ /* T1: number of pixel clocks on one scanline: 0x184 = 389 clocks */
+ .t1 = 0x0184,
+ /* VBP: vertical back porch toward the panel */
+ .vbp = 7,
+ /* VFP: vertical front porch toward the panel */
+ .vfp = 50,
+ /* PSEL: divide pixel clock 20MHz with 1 (no clock downscaling) */
+ .psel = 0,
+ /* DPTMCTR12: 0x03: LVGL = VGLX, overlap mode, swap R->L O->E */
+ .dpmctr12 = { 0x03, 0x00, 0x00, },
+ /* Default gamma correction values */
+ .gamma_corr_pos_r = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_pos_g = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_pos_b = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_neg_r = { NT35510_GAMMA_NEG_DEFAULT },
+ .gamma_corr_neg_g = { NT35510_GAMMA_NEG_DEFAULT },
+ .gamma_corr_neg_b = { NT35510_GAMMA_NEG_DEFAULT },
+};
+
+static const struct of_device_id nt35510_of_match[] = {
+ {
+ .compatible = "hydis,hva40wv1",
+ .data = &nt35510_hydis_hva40wv1,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nt35510_of_match);
+
+static struct mipi_dsi_driver nt35510_driver = {
+ .probe = nt35510_probe,
+ .remove = nt35510_remove,
+ .driver = {
+ .name = "panel-novatek-nt35510",
+ .of_match_table = nt35510_of_match,
+ },
+};
+module_mipi_dsi_driver(nt35510_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("NT35510-based panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3c52f15f7a1c..9bb2e8c7934a 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -373,6 +373,12 @@ static const struct of_device_id ld9040_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ld9040_of_match);
+static const struct spi_device_id ld9040_ids[] = {
+ { "ld9040", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, ld9040_ids);
+
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
new file mode 100644
index 000000000000..9d843fcc3a22
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019, Michael Srba
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct s6e88a0_ams452ef01 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct
+s6e88a0_ams452ef01 *to_s6e88a0_ams452ef01(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e88a0_ams452ef01, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+}
+
+static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+ dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ // set default brightness/gama
+ dsi_dcs_write_seq(dsi, 0xca,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
+ 0x80, 0x80, 0x80, // V203 R,G,B
+ 0x80, 0x80, 0x80, // V151 R,G,B
+ 0x80, 0x80, 0x80, // V87 R,G,B
+ 0x80, 0x80, 0x80, // V51 R,G,B
+ 0x80, 0x80, 0x80, // V35 R,G,B
+ 0x80, 0x80, 0x80, // V23 R,G,B
+ 0x80, 0x80, 0x80, // V11 R,G,B
+ 0x6b, 0x68, 0x71, // V3 R,G,B
+ 0x00, 0x00, 0x00); // V1 R,G,B
+ // set default Amoled Off Ratio
+ dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+ dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+ dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
+ dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(35);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ s6e88a0_ams452ef01_reset(ctx);
+
+ ret = s6e88a0_ams452ef01_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = s6e88a0_ams452ef01_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode s6e88a0_ams452ef01_mode = {
+ .clock = (540 + 88 + 4 + 20) * (960 + 14 + 2 + 8) * 60 / 1000,
+ .hdisplay = 540,
+ .hsync_start = 540 + 88,
+ .hsync_end = 540 + 88 + 4,
+ .htotal = 540 + 88 + 4 + 20,
+ .vdisplay = 960,
+ .vsync_start = 960 + 14,
+ .vsync_end = 960 + 14 + 2,
+ .vtotal = 960 + 14 + 2 + 8,
+ .vrefresh = 60,
+ .width_mm = 56,
+ .height_mm = 100,
+};
+
+static int s6e88a0_ams452ef01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &s6e88a0_ams452ef01_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e88a0_ams452ef01_panel_funcs = {
+ .unprepare = s6e88a0_ams452ef01_unprepare,
+ .prepare = s6e88a0_ams452ef01_prepare,
+ .get_modes = s6e88a0_ams452ef01_get_modes,
+};
+
+static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e88a0_ams452ef01 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+ return ret;
+ }
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+ drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add panel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e88a0_ams452ef01 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id s6e88a0_ams452ef01_of_match[] = {
+ { .compatible = "samsung,s6e88a0-ams452ef01" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6e88a0_ams452ef01_of_match);
+
+static struct mipi_dsi_driver s6e88a0_ams452ef01_driver = {
+ .probe = s6e88a0_ams452ef01_probe,
+ .remove = s6e88a0_ams452ef01_remove,
+ .driver = {
+ .name = "panel-s6e88a0-ams452ef01",
+ .of_match_table = s6e88a0_ams452ef01_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e88a0_ams452ef01_driver);
+
+MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
+MODULE_DESCRIPTION("MIPI-DSI based Panel Driver for AMS452EF01 AMOLED LCD with a S6E88A0 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index e14c14ac62b5..0ce81b1f36af 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -351,6 +351,65 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
+static struct panel_desc panel_dpi;
+
+static int panel_dpi_probe(struct device *dev,
+ struct panel_simple *panel)
+{
+ struct display_timing *timing;
+ const struct device_node *np;
+ struct panel_desc *desc;
+ unsigned int bus_flags;
+ struct videomode vm;
+ const char *mapping;
+ int ret;
+
+ np = dev->of_node;
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
+ if (!timing)
+ return -ENOMEM;
+
+ ret = of_get_display_timing(np, "panel-timing", timing);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: no panel-timing node found for \"panel-dpi\" binding\n",
+ np);
+ return ret;
+ }
+
+ desc->timings = timing;
+ desc->num_timings = 1;
+
+ of_property_read_u32(np, "width-mm", &desc->size.width);
+ of_property_read_u32(np, "height-mm", &desc->size.height);
+
+ of_property_read_string(np, "data-mapping", &mapping);
+ if (!strcmp(mapping, "rgb24"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ else if (!strcmp(mapping, "rgb565"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+ else if (!strcmp(mapping, "bgr666"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ else if (!strcmp(mapping, "lvds666"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ /* Extract bus_flags from display_timing */
+ bus_flags = 0;
+ vm.flags = timing->flags;
+ drm_bus_flags_from_videomode(&vm, &bus_flags);
+ desc->bus_flags = bus_flags;
+
+ /* We do not know the connector for the DT node, so guess it */
+ desc->connector_type = DRM_MODE_CONNECTOR_DPI;
+
+ panel->desc = desc;
+
+ return 0;
+}
+
#define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
(to_check->field.typ >= bounds->field.min && \
to_check->field.typ <= bounds->field.max)
@@ -437,8 +496,15 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -EPROBE_DEFER;
}
- if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
- panel_simple_parse_panel_timing_node(dev, panel, &dt);
+ if (desc == &panel_dpi) {
+ /* Handle the generic panel-dpi binding */
+ err = panel_dpi_probe(dev, panel);
+ if (err)
+ goto free_ddc;
+ } else {
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+ }
drm_panel_init(&panel->base, dev, &panel_simple_funcs,
desc->connector_type);
@@ -1301,6 +1367,37 @@ static const struct panel_desc edt_et035012dm6 = {
.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
+static const struct drm_display_mode edt_etm043080dh6gp_mode = {
+ .clock = 10870,
+ .hdisplay = 480,
+ .hsync_start = 480 + 8,
+ .hsync_end = 480 + 8 + 4,
+ .htotal = 480 + 8 + 4 + 41,
+
+ /*
+ * IWG22M: Y resolution changed for "dc_linuxfb" module crashing while
+ * fb_align
+ */
+
+ .vdisplay = 288,
+ .vsync_start = 288 + 2,
+ .vsync_end = 288 + 2 + 4,
+ .vtotal = 288 + 2 + 4 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc edt_etm043080dh6gp = {
+ .modes = &edt_etm043080dh6gp_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 100,
+ .height = 65,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode edt_etm0430g0dh6_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -1440,6 +1537,33 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode frida_frd350h54004_mode = {
+ .clock = 6000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 44,
+ .hsync_end = 320 + 44 + 16,
+ .htotal = 320 + 44 + 16 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 2,
+ .vsync_end = 240 + 2 + 6,
+ .vtotal = 240 + 2 + 6 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc frida_frd350h54004 = {
+ .modes = &frida_frd350h54004_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 77,
+ .height = 64,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode friendlyarm_hd702e_mode = {
.clock = 67185,
.hdisplay = 800,
@@ -2080,6 +2204,64 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct display_timing logictechno_lt161010_2nh_timing = {
+ .pixelclock = { 26400000, 33300000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 20, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 10, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt161010_2nh = {
+ .timings = &logictechno_lt161010_2nh_timing,
+ .num_timings = 1,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
+static const struct display_timing logictechno_lt170410_2whc_timing = {
+ .pixelclock = { 68900000, 71100000, 73400000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 23, 60, 71 },
+ .hback_porch = { 23, 60, 71 },
+ .hsync_len = { 15, 40, 47 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 7, 10 },
+ .vback_porch = { 5, 7, 10 },
+ .vsync_len = { 6, 9, 12 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt170410_2whc = {
+ .timings = &logictechno_lt170410_2whc_timing,
+ .num_timings = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.clock = 30400,
.hdisplay = 800,
@@ -2095,7 +2277,7 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
};
static const struct drm_display_mode logicpd_type_28_mode = {
- .clock = 9000,
+ .clock = 9107,
.hdisplay = 480,
.hsync_start = 480 + 3,
.hsync_end = 480 + 3 + 42,
@@ -2224,6 +2406,51 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
+ {
+ .clock = 138500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }, {
+ .clock = 110920,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .vrefresh = 48,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }
+};
+
+static const struct panel_desc neweast_wjfh116008a = {
+ .modes = neweast_wjfh116008a_modes,
+ .num_modes = 2,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 150,
+ },
+ .delay = {
+ .prepare = 110,
+ .enable = 20,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2390,15 +2617,15 @@ static const struct panel_desc ontat_yx700wv03 = {
};
static const struct drm_display_mode ortustech_com37h3m_mode = {
- .clock = 22153,
+ .clock = 22230,
.hdisplay = 480,
- .hsync_start = 480 + 8,
- .hsync_end = 480 + 8 + 10,
- .htotal = 480 + 8 + 10 + 10,
+ .hsync_start = 480 + 40,
+ .hsync_end = 480 + 40 + 10,
+ .htotal = 480 + 40 + 10 + 40,
.vdisplay = 640,
.vsync_start = 640 + 4,
- .vsync_end = 640 + 4 + 3,
- .vtotal = 640 + 4 + 3 + 4,
+ .vsync_end = 640 + 4 + 2,
+ .vtotal = 640 + 4 + 2 + 4,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2439,6 +2666,7 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
@@ -2464,7 +2692,8 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
@@ -2546,6 +2775,35 @@ static const struct panel_desc rocktech_rk070er9427 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = {
+ .clock = 71100,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 2,
+ .vsync_end = 800 + 2 + 5,
+ .vtotal = 800 + 2 + 5 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc rocktech_rk101ii01d_ct = {
+ .modes = &rocktech_rk101ii01d_ct_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
.clock = 271560,
.hdisplay = 2560,
@@ -2768,30 +3026,6 @@ static const struct panel_desc sharp_lq123p1jx31 = {
},
};
-static const struct drm_display_mode sharp_lq150x1lg11_mode = {
- .clock = 71100,
- .hdisplay = 1024,
- .hsync_start = 1024 + 168,
- .hsync_end = 1024 + 168 + 64,
- .htotal = 1024 + 168 + 64 + 88,
- .vdisplay = 768,
- .vsync_start = 768 + 37,
- .vsync_end = 768 + 37 + 2,
- .vtotal = 768 + 37 + 2 + 8,
- .vrefresh = 60,
-};
-
-static const struct panel_desc sharp_lq150x1lg11 = {
- .modes = &sharp_lq150x1lg11_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 304,
- .height = 228,
- },
- .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
-};
-
static const struct display_timing sharp_ls020b1dd01d_timing = {
.pixelclock = { 2000000, 4200000, 5000000 },
.hactive = { 240, 240, 240 },
@@ -3023,7 +3257,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
.width = 194,
.height = 116,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -3286,6 +3520,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,et035012dm6",
.data = &edt_et035012dm6,
}, {
+ .compatible = "edt,etm043080dh6gp",
+ .data = &edt_etm043080dh6gp,
+ }, {
.compatible = "edt,etm0430g0dh6",
.data = &edt_etm0430g0dh6,
}, {
@@ -3310,6 +3547,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
+ .compatible = "frida,frd350h54004",
+ .data = &frida_frd350h54004,
+ }, {
.compatible = "friendlyarm,hd702e",
.data = &friendlyarm_hd702e,
}, {
@@ -3388,6 +3628,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
+ .compatible = "logictechno,lt161010-2nhc",
+ .data = &logictechno_lt161010_2nh,
+ }, {
+ .compatible = "logictechno,lt161010-2nhr",
+ .data = &logictechno_lt161010_2nh,
+ }, {
+ .compatible = "logictechno,lt170410-2whc",
+ .data = &logictechno_lt170410_2whc,
+ }, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
@@ -3400,6 +3649,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
+ .compatible = "neweast,wjfh116008a",
+ .data = &neweast_wjfh116008a,
+ }, {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
@@ -3439,6 +3691,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk070er9427",
.data = &rocktech_rk070er9427,
}, {
+ .compatible = "rocktech,rk101ii01d-ct",
+ .data = &rocktech_rk101ii01d_ct,
+ }, {
.compatible = "samsung,lsn122dl01-c01",
.data = &samsung_lsn122dl01_c01,
}, {
@@ -3466,9 +3721,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq123p1jx31",
.data = &sharp_lq123p1jx31,
}, {
- .compatible = "sharp,lq150x1lg11",
- .data = &sharp_lq150x1lg11,
- }, {
.compatible = "sharp,ls020b1dd01d",
.data = &sharp_ls020b1dd01d,
}, {
@@ -3526,6 +3778,10 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
+ /* Must be the last entry */
+ .compatible = "panel-dpi",
+ .data = &panel_dpi,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
index de0abf76ae6f..c91e55b2d7a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -48,7 +48,7 @@ struct acx424akp {
};
static const struct drm_display_mode sony_acx424akp_vid_mode = {
- .clock = 330000,
+ .clock = 27234,
.hdisplay = 480,
.hsync_start = 480 + 15,
.hsync_end = 480 + 15 + 0,
@@ -68,7 +68,7 @@ static const struct drm_display_mode sony_acx424akp_vid_mode = {
* command mode using the maximum HS frequency.
*/
static const struct drm_display_mode sony_acx424akp_cmd_mode = {
- .clock = 420160,
+ .clock = 35478,
.hdisplay = 480,
.hsync_start = 480 + 154,
.hsync_end = 480 + 154 + 16,
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index cf29405a2dbe..aeca15dfeb3c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,7 +86,12 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
+/*
+ * noinline_for_stack so we don't get multiple copies of tx_buf
+ * on the stack in case of gcc-plugin-structleak
+ */
+static int noinline_for_stack
+jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf = JBT_COMMAND | reg;
@@ -105,8 +110,9 @@ static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
return ret;
}
-static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
- u8 reg, u8 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_1(struct td028ttec1_panel *lcd,
+ u8 reg, u8 data, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf[2];
@@ -128,8 +134,9 @@ static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
return ret;
}
-static int jbt_reg_write_2(struct td028ttec1_panel *lcd,
- u8 reg, u16 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_2(struct td028ttec1_panel *lcd,
+ u8 reg, u16 data, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf[3];
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 238fb6d54df4..8136babd3ba9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
@@ -87,18 +88,27 @@ static void panfrost_clk_fini(struct panfrost_device *pfdev)
static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
- int ret;
+ int ret, i;
- pfdev->regulator = devm_regulator_get(pfdev->dev, "mali");
- if (IS_ERR(pfdev->regulator)) {
- ret = PTR_ERR(pfdev->regulator);
- dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+ if (WARN(pfdev->comp->num_supplies > ARRAY_SIZE(pfdev->regulators),
+ "Too many supplies in compatible structure.\n"))
+ return -EINVAL;
+
+ for (i = 0; i < pfdev->comp->num_supplies; i++)
+ pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
+
+ ret = devm_regulator_bulk_get(pfdev->dev,
+ pfdev->comp->num_supplies,
+ pfdev->regulators);
+ if (ret < 0) {
+ dev_err(pfdev->dev, "failed to get regulators: %d\n", ret);
return ret;
}
- ret = regulator_enable(pfdev->regulator);
+ ret = regulator_bulk_enable(pfdev->comp->num_supplies,
+ pfdev->regulators);
if (ret < 0) {
- dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+ dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
@@ -107,7 +117,81 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
- regulator_disable(pfdev->regulator);
+ regulator_bulk_disable(pfdev->comp->num_supplies,
+ pfdev->regulators);
+}
+
+static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
+ if (!pfdev->pm_domain_devs[i])
+ break;
+
+ if (pfdev->pm_domain_links[i])
+ device_link_del(pfdev->pm_domain_links[i]);
+
+ dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
+ }
+}
+
+static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
+{
+ int err;
+ int i, num_domains;
+
+ num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+ "power-domains",
+ "#power-domain-cells");
+
+ /*
+ * Single domain is handled by the core, and, if only a single power
+ * the power domain is requested, the property is optional.
+ */
+ if (num_domains < 2 && pfdev->comp->num_pm_domains < 2)
+ return 0;
+
+ if (num_domains != pfdev->comp->num_pm_domains) {
+ dev_err(pfdev->dev,
+ "Incorrect number of power domains: %d provided, %d needed\n",
+ num_domains, pfdev->comp->num_pm_domains);
+ return -EINVAL;
+ }
+
+ if (WARN(num_domains > ARRAY_SIZE(pfdev->pm_domain_devs),
+ "Too many supplies in compatible structure.\n"))
+ return -EINVAL;
+
+ for (i = 0; i < num_domains; i++) {
+ pfdev->pm_domain_devs[i] =
+ dev_pm_domain_attach_by_name(pfdev->dev,
+ pfdev->comp->pm_domain_names[i]);
+ if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
+ err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
+ pfdev->pm_domain_devs[i] = NULL;
+ dev_err(pfdev->dev,
+ "failed to get pm-domain %s(%d): %d\n",
+ pfdev->comp->pm_domain_names[i], i, err);
+ goto err;
+ }
+
+ pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
+ pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+ if (!pfdev->pm_domain_links[i]) {
+ dev_err(pfdev->pm_domain_devs[i],
+ "adding device link failed!\n");
+ err = -ENODEV;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ panfrost_pm_domain_fini(pfdev);
+ return err;
}
int panfrost_device_init(struct panfrost_device *pfdev)
@@ -140,37 +224,43 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto err_out1;
}
+ err = panfrost_pm_domain_init(pfdev);
+ if (err)
+ goto err_out2;
+
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
if (IS_ERR(pfdev->iomem)) {
dev_err(pfdev->dev, "failed to ioremap iomem\n");
err = PTR_ERR(pfdev->iomem);
- goto err_out2;
+ goto err_out3;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto err_out2;
+ goto err_out3;
err = panfrost_mmu_init(pfdev);
if (err)
- goto err_out3;
+ goto err_out4;
err = panfrost_job_init(pfdev);
if (err)
- goto err_out4;
+ goto err_out5;
err = panfrost_perfcnt_init(pfdev);
if (err)
- goto err_out5;
+ goto err_out6;
return 0;
-err_out5:
+err_out6:
panfrost_job_fini(pfdev);
-err_out4:
+err_out5:
panfrost_mmu_fini(pfdev);
-err_out3:
+err_out4:
panfrost_gpu_fini(pfdev);
+err_out3:
+ panfrost_pm_domain_fini(pfdev);
err_out2:
panfrost_reset_fini(pfdev);
err_out1:
@@ -186,6 +276,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
+ panfrost_pm_domain_fini(pfdev);
panfrost_reset_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 06713811b92c..c30c719a8059 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
+#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
@@ -19,6 +20,8 @@ struct panfrost_job;
struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
+#define MAX_REGULATORS 2
+#define MAX_PM_DOMAINS 3
struct panfrost_features {
u16 id;
@@ -51,6 +54,23 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
+/*
+ * Features that cannot be automatically detected and need matching using the
+ * compatible string, typically SoC-specific.
+ */
+struct panfrost_compatible {
+ /* Supplies count and names. */
+ int num_supplies;
+ const char * const *supply_names;
+ /*
+ * Number of power domains required, note that values 0 and 1 are
+ * handled identically, as only values > 1 need special handling.
+ */
+ int num_pm_domains;
+ /* Only required if num_pm_domains > 1. */
+ const char * const *pm_domain_names;
+};
+
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -59,10 +79,14 @@ struct panfrost_device {
void __iomem *iomem;
struct clk *clock;
struct clk *bus_clock;
- struct regulator *regulator;
+ struct regulator_bulk_data regulators[MAX_REGULATORS];
struct reset_control *rstc;
+ /* pm_domains for devices with more than one. */
+ struct device *pm_domain_devs[MAX_PM_DOMAINS];
+ struct device_link *pm_domain_links[MAX_PM_DOMAINS];
struct panfrost_features features;
+ const struct panfrost_compatible *comp;
spinlock_t as_lock;
unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index b7a618db3ee2..882fecc33fdb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -584,6 +584,10 @@ static int panfrost_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pfdev);
+ pfdev->comp = of_device_get_match_data(&pdev->dev);
+ if (!pfdev->comp)
+ return -ENODEV;
+
/* Allocate and initialze the DRM device. */
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
@@ -655,16 +659,24 @@ static int panfrost_remove(struct platform_device *pdev)
return 0;
}
+static const char * const default_supplies[] = { "mali" };
+static const struct panfrost_compatible default_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies),
+ .supply_names = default_supplies,
+ .num_pm_domains = 1, /* optional */
+ .pm_domain_names = NULL,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "arm,mali-t604" },
- { .compatible = "arm,mali-t624" },
- { .compatible = "arm,mali-t628" },
- { .compatible = "arm,mali-t720" },
- { .compatible = "arm,mali-t760" },
- { .compatible = "arm,mali-t820" },
- { .compatible = "arm,mali-t830" },
- { .compatible = "arm,mali-t860" },
- { .compatible = "arm,mali-t880" },
+ { .compatible = "arm,mali-t604", .data = &default_data, },
+ { .compatible = "arm,mali-t624", .data = &default_data, },
+ { .compatible = "arm,mali-t628", .data = &default_data, },
+ { .compatible = "arm,mali-t720", .data = &default_data, },
+ { .compatible = "arm,mali-t760", .data = &default_data, },
+ { .compatible = "arm,mali-t820", .data = &default_data, },
+ { .compatible = "arm,mali-t830", .data = &default_data, },
+ { .compatible = "arm,mali-t860", .data = &default_data, },
+ { .compatible = "arm,mali-t880", .data = &default_data, },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 8822ec13a0d6..f2c1ddc41a9b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -308,28 +308,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == pfdev->features.l2_present, 100, 1000);
-
- gpu_write(pfdev, STACK_PWRON_LO, pfdev->features.stack_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + STACK_READY_LO,
- val, val == pfdev->features.stack_present, 100, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu L2");
gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == pfdev->features.shader_present, 100, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
val, val == pfdev->features.tiler_present, 100, 1000);
-
if (ret)
- dev_err(pfdev->dev, "error powering up gpu");
+ dev_err(pfdev->dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
gpu_write(pfdev, TILER_PWROFF_LO, 0);
gpu_write(pfdev, SHADER_PWROFF_LO, 0);
- gpu_write(pfdev, STACK_PWROFF_LO, 0);
gpu_write(pfdev, L2_PWROFF_LO, 0);
}
@@ -351,7 +349,7 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
return -ENODEV;
err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
- IRQF_SHARED, "gpu", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request gpu irq");
return err;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9a1a72a748e7..7914b1570841 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -508,7 +508,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
return -ENODEV;
ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
- IRQF_SHARED, "job", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
if (ret) {
dev_err(pfdev->dev, "failed to request job irq");
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 5d75f8cf6477..ed28aeba6d59 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -640,9 +640,11 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
if (irq <= 0)
return -ENODEV;
- err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ err = devm_request_threaded_irq(pfdev->dev, irq,
+ panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
- IRQF_SHARED, "mmu", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-mmu",
+ pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 09aeaffb7660..4f325c410b5d 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -19,6 +19,7 @@ static struct regmap *versatile_syscon_map;
* We detect the different syscon types from the compatible strings.
*/
enum versatile_clcd {
+ INTEGRATOR_IMPD1,
INTEGRATOR_CLCD_CM,
VERSATILE_CLCD,
REALVIEW_CLCD_EB,
@@ -65,6 +66,14 @@ static const struct of_device_id versatile_clcd_of_match[] = {
{},
};
+static const struct of_device_id impd1_clcd_of_match[] = {
+ {
+ .compatible = "arm,im-pd1-syscon",
+ .data = (void *)INTEGRATOR_IMPD1,
+ },
+ {},
+};
+
/*
* Core module CLCD control on the Integrator/CP, bits
* 8 thru 19 of the CM_CONTROL register controls a bunch
@@ -125,6 +134,36 @@ static void pl111_integrator_enable(struct drm_device *drm, u32 format)
val);
}
+#define IMPD1_CTRL_OFFSET 0x18
+#define IMPD1_CTRL_DISP_LCD (0 << 0)
+#define IMPD1_CTRL_DISP_VGA (1 << 0)
+#define IMPD1_CTRL_DISP_LCD1 (2 << 0)
+#define IMPD1_CTRL_DISP_ENABLE (1 << 2)
+#define IMPD1_CTRL_DISP_MASK (7 << 0)
+
+static void pl111_impd1_enable(struct drm_device *drm, u32 format)
+{
+ u32 val;
+
+ dev_info(drm->dev, "enable IM-PD1 CLCD connectors\n");
+ val = IMPD1_CTRL_DISP_VGA | IMPD1_CTRL_DISP_ENABLE;
+
+ regmap_update_bits(versatile_syscon_map,
+ IMPD1_CTRL_OFFSET,
+ IMPD1_CTRL_DISP_MASK,
+ val);
+}
+
+static void pl111_impd1_disable(struct drm_device *drm)
+{
+ dev_info(drm->dev, "disable IM-PD1 CLCD connectors\n");
+
+ regmap_update_bits(versatile_syscon_map,
+ IMPD1_CTRL_OFFSET,
+ IMPD1_CTRL_DISP_MASK,
+ 0);
+}
+
/*
* This configuration register in the Versatile and RealView
* family is uniformly present but appears more and more
@@ -271,6 +310,20 @@ static const struct pl111_variant_data pl110_integrator = {
};
/*
+ * The IM-PD1 variant is a PL110 with a bunch of broken, or not
+ * yet implemented features
+ */
+static const struct pl111_variant_data pl110_impd1 = {
+ .name = "PL110 IM-PD1",
+ .is_pl110 = true,
+ .broken_clockdivider = true,
+ .broken_vblank = true,
+ .formats = pl110_integrator_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
+ .fb_bpp = 16,
+};
+
+/*
* This is the in-between PL110 variant found in the ARM Versatile,
* supporting RGB565/BGR565
*/
@@ -322,8 +375,21 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
/* Non-ARM reference designs, just bail out */
return 0;
}
+
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ /*
+ * On the Integrator, check if we should use the IM-PD1 instead,
+ * if we find it, it will take precedence. This is on the Integrator/AP
+ * which only has this option for PL110 graphics.
+ */
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
+ &clcd_id);
+ if (np)
+ versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ }
+
/* Versatile Express special handling */
if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
struct platform_device *pdev;
@@ -367,6 +433,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
priv->variant_display_enable = pl111_integrator_enable;
dev_info(dev, "set up callbacks for Integrator PL110\n");
break;
+ case INTEGRATOR_IMPD1:
+ versatile_syscon_map = map;
+ priv->variant = &pl110_impd1;
+ priv->variant_display_enable = pl111_impd1_enable;
+ priv->variant_display_disable = pl111_impd1_disable;
+ dev_info(dev, "set up callbacks for IM-PD1 PL110\n");
+ break;
case VERSATILE_CLCD:
versatile_syscon_map = map;
/* This can do RGB565 with external PLD */
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index ef09dc6bc635..d1086b2a6892 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -36,7 +36,7 @@ static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
struct ring {
struct qxl_ring_header header;
- uint8_t elements[0];
+ uint8_t elements[];
};
struct qxl_ring {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 16d73b22f3f5..09583a08e141 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -31,7 +31,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
+#include <drm/drm_simple_kms_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -372,19 +372,6 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_pending_vblank_event *event;
- unsigned long flags;
-
- if (crtc->state && crtc->state->event) {
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
qxl_crtc_update_monitors_config(crtc, "flush");
}
@@ -1021,9 +1008,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
return &qxl_output->enc;
}
-static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
-};
-
static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
.get_modes = qxl_conn_get_modes,
.mode_valid = qxl_conn_mode_valid,
@@ -1073,15 +1057,6 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static void qxl_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs qxl_enc_funcs = {
- .destroy = qxl_enc_destroy,
-};
-
static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
{
if (qdev->hotplug_mode_update_property)
@@ -1100,6 +1075,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
+ int ret;
qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
if (!qxl_output)
@@ -1112,15 +1088,19 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_connector_init(dev, &qxl_output->base,
&qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
- drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ ret = drm_simple_encoder_init(dev, &qxl_output->enc,
+ DRM_MODE_ENCODER_VIRTUAL);
+ if (ret) {
+ drm_err(dev, "drm_simple_encoder_init() failed, error %d\n",
+ ret);
+ goto err_drm_connector_cleanup;
+ }
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder->possible_crtcs = 1 << num_output;
drm_connector_attach_encoder(&qxl_output->base,
&qxl_output->enc);
- drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
drm_object_attach_property(&connector->base,
@@ -1130,6 +1110,11 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
return 0;
+
+err_drm_connector_cleanup:
+ drm_connector_cleanup(&qxl_output->base);
+ kfree(qxl_output);
+ return ret;
}
static struct drm_framebuffer *
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1d601f57a6ba..4fda3f9b29f4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -34,6 +34,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_modeset_helper.h>
@@ -132,21 +133,30 @@ free_dev:
return ret;
}
+static void qxl_drm_release(struct drm_device *dev)
+{
+ struct qxl_device *qdev = dev->dev_private;
+
+ /*
+ * TODO: qxl_device_fini() call should be in qxl_pci_remove(),
+ * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ * non-trivial though.
+ */
+ qxl_modeset_fini(qdev);
+ qxl_device_fini(qdev);
+ dev->dev_private = NULL;
+ kfree(qdev);
+}
+
static void
qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct qxl_device *qdev = dev->dev_private;
drm_dev_unregister(dev);
-
- qxl_modeset_fini(qdev);
- qxl_device_fini(qdev);
+ drm_atomic_helper_shutdown(dev);
if (is_vga(pdev))
vga_put(pdev, VGA_RSRC_LEGACY_IO);
-
- dev->dev_private = NULL;
- kfree(qdev);
drm_dev_put(dev);
}
@@ -279,6 +289,8 @@ static struct drm_driver qxl_driver = {
.major = 0,
.minor = 1,
.patchlevel = 0,
+
+ .release = qxl_drm_release,
};
static int __init qxl_init(void)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index bfc1631093e9..70b20ee4741a 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -299,12 +299,12 @@ void qxl_device_fini(struct qxl_device *qdev)
{
qxl_bo_unref(&qdev->current_release_bo[0]);
qxl_bo_unref(&qdev->current_release_bo[1]);
+ qxl_gem_fini(qdev);
+ qxl_bo_fini(qdev);
flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
- qxl_gem_fini(qdev);
- qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
iounmap(qdev->ram_header);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 16a5e903533d..62a5e424971b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,11 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -256,7 +251,6 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
- .invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
index 403eb3a5891f..9c1a94153983 100644
--- a/drivers/gpu/drm/radeon/.gitignore
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mkregtable
*_reg_safe.h
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index be583695427a..91811757104c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2231,6 +2231,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.disable = atombios_crtc_disable,
+ .get_scanout_position = radeon_get_crtc_scanout_position,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index c42f73fad3e3..bb29cf02974d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -108,25 +108,33 @@ static bool radeon_read_bios(struct radeon_device *rdev)
static bool radeon_read_platform_bios(struct radeon_device *rdev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = rdev->pdev->rom;
+ size_t romlen = rdev->pdev->romlen;
+ void __iomem *bios;
rdev->bios = NULL;
- bios = pci_platform_rom(rdev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ rdev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!rdev->bios)
return false;
- }
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
- if (rdev->bios == NULL) {
- return false;
- }
+
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
+
+ memcpy_fromio(rdev->bios, bios, romlen);
+ iounmap(bios);
+
+ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa)
+ goto free_bios;
return true;
+free_bios:
+ kfree(rdev->bios);
+ return false;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a522e092038b..266e3cbbd09b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1263,7 +1263,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d07c7db0c815..35db79a168bf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -45,6 +45,10 @@
#include "atom.h"
#include "radeon.h"
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc);
+int radeon_enable_vblank_kms(struct drm_crtc *crtc);
+void radeon_disable_vblank_kms(struct drm_crtc *crtc);
+
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -460,7 +464,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(!ASIC_IS_AVIVO(rdev) ||
((int) (work->target_vblank -
- dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
+ crtc->funcs->get_vblank_counter(crtc)) > 0)))
usleep_range(1000, 2000);
/* We borrow the event spin lock for protecting flip_status */
@@ -576,7 +580,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
}
work->base = base;
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- dev->driver->get_vblank_counter(dev, work->crtc_id);
+ crtc->funcs->get_vblank_counter(crtc);
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -668,6 +672,10 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = {
.set_config = radeon_crtc_set_config,
.destroy = radeon_crtc_destroy,
.page_flip_target = radeon_crtc_page_flip_target,
+ .get_vblank_counter = radeon_get_vblank_counter_kms,
+ .enable_vblank = radeon_enable_vblank_kms,
+ .disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -1973,3 +1981,16 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
return ret;
}
+
+bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 28eef9282874..008308780443 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -301,35 +301,8 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
return connector;
}
-static void radeon_dp_register_mst_connector(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_fb_add_connector(rdev, connector);
-
- drm_connector_register(connector);
-}
-
-static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct radeon_device *rdev = dev->dev_private;
-
- drm_connector_unregister(connector);
- radeon_fb_remove_connector(rdev, connector);
- drm_connector_cleanup(connector);
-
- kfree(connector);
- DRM_DEBUG_KMS("\n");
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
- .register_connector = radeon_dp_register_mst_connector,
- .destroy_connector = radeon_dp_destroy_mst_connector,
};
static struct
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8696af1ee14d..59f8186a2415 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -120,9 +120,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
int radeon_suspend_kms(struct drm_device *dev, bool suspend,
bool fbcon, bool freeze);
int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -602,16 +599,6 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
-static bool
-radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_GEM | DRIVER_RENDER,
@@ -620,11 +607,6 @@ static struct drm_driver kms_driver = {
.postclose = radeon_driver_postclose_kms,
.lastclose = radeon_driver_lastclose_kms,
.unload = radeon_driver_unload_kms,
- .get_vblank_counter = radeon_get_vblank_counter_kms,
- .enable_vblank = radeon_enable_vblank_kms,
- .disable_vblank = radeon_disable_vblank_kms,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = radeon_get_crtc_scanout_position,
.irq_preinstall = radeon_driver_irq_preinstall_kms,
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_uninstall = radeon_driver_irq_uninstall_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ec0b7d6c994d..cf3156a65fc1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -354,15 +354,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
&radeon_fb_helper_funcs);
- ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
- RADEONFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
- if (ret)
- goto fini;
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(rdev->ddev);
@@ -404,15 +399,3 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
return true;
return false;
}
-
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
-
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dd2f19b8022b..58176db85952 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -745,14 +745,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/**
* radeon_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
int vpos, hpos, stat;
u32 count;
struct radeon_device *rdev = dev->dev_private;
@@ -814,25 +815,26 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* radeon_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
* @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+int radeon_enable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
int r;
- if (crtc < 0 || crtc >= rdev->num_crtc) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.crtc_vblank_int[crtc] = true;
+ rdev->irq.crtc_vblank_int[pipe] = true;
r = radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
return r;
@@ -841,23 +843,24 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
/**
* radeon_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
* @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+void radeon_disable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
- if (crtc < 0 || crtc >= rdev->num_crtc) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.crtc_vblank_int[crtc] = false;
+ rdev->irq.crtc_vblank_int[pipe] = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index a1985a552794..8817fd033cd0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1111,7 +1111,8 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
- .disable = radeon_crtc_disable
+ .disable = radeon_crtc_disable,
+ .get_scanout_position = radeon_get_crtc_scanout_position,
};
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 96565171d13e..c7f223743d46 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -880,6 +880,12 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+extern bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
@@ -980,9 +986,6 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
-
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index b3380ffab4c2..5d50c9edbe80 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -66,11 +66,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
-static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -774,7 +769,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
- .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 05e8b4d0af3f..2cb85dbe728f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2979,7 +2979,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
(rdev->pdev->revision == 0xC3) ||
(rdev->pdev->device == 0x6664) ||
(rdev->pdev->device == 0x6665) ||
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 3cd83a030a04..c07c6a88aff0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -121,7 +121,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
* Attach the bridge to the encoder. The bridge will create the
* connector.
*/
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
drm_encoder_cleanup(encoder);
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 8ffa4fbbdeb3..ab0d49618cf9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -23,6 +23,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "rcar_lvds.h"
@@ -590,8 +591,9 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_crtc *crtc;
@@ -603,7 +605,7 @@ static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+ struct drm_bridge_state *old_bridge_state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -618,7 +620,8 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
/* Disable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
- lvds->companion->funcs->atomic_disable(lvds->companion, state);
+ lvds->companion->funcs->atomic_disable(lvds->companion,
+ old_bridge_state);
clk_disable_unprepare(lvds->clocks.mod);
}
@@ -641,7 +644,8 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
return true;
}
-static int rcar_lvds_attach(struct drm_bridge *bridge)
+static int rcar_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
@@ -651,7 +655,12 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
- bridge);
+ bridge, flags);
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
/* Otherwise if we have a panel, create a connector. */
if (!lvds->panel)
@@ -682,6 +691,9 @@ static void rcar_lvds_detach(struct drm_bridge *bridge)
static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
.attach = rcar_lvds_attach,
.detach = rcar_lvds_detach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = rcar_lvds_atomic_enable,
.atomic_disable = rcar_lvds_atomic_disable,
.mode_fixup = rcar_lvds_mode_fixup,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 521fe42ac5e2..2fdc455c4ad7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -124,7 +124,7 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize drm fb helper - %d.\n",
@@ -132,13 +132,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
return ret;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to add connectors - %d.\n", ret);
- goto err_drm_fb_helper_fini;
- }
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 7582d0e6a60a..0d1884684dcb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -6,6 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d04b3492bdac..cecb2cc781f5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -724,7 +724,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_HELPER_NO_SCALING;
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 0b3d18c457b2..cc672620d6e0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -328,7 +328,7 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
{
int act_height;
- act_height = (src_h + vskiplines - 1) / vskiplines;
+ act_height = DIV_ROUND_UP(src_h, vskiplines);
if (act_height == dst_h)
return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f25a36743cbd..449a62908d21 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -646,7 +646,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index ae730275a34f..90784781e515 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -98,7 +98,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
if (of_property_read_u32(endpoint, "reg", &endpoint_id))
endpoint_id = 0;
- if (rockchip_drm_endpoint_is_subdriver(endpoint) > 0)
+ /* if subdriver (> 0) or error case (< 0), ignore entry */
+ if (rockchip_drm_endpoint_is_subdriver(endpoint) != 0)
continue;
child_count++;
@@ -144,7 +145,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->bridge = bridge;
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index d79086498aff..877ce9b127f1 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -59,6 +59,33 @@ TRACE_EVENT(drm_sched_job,
__entry->job_count, __entry->hw_job_count)
);
+TRACE_EVENT(drm_run_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+ TP_ARGS(sched_job, entity),
+ TP_STRUCT__entry(
+ __field(struct drm_sched_entity *, entity)
+ __field(struct dma_fence *, fence)
+ __field(const char *, name)
+ __field(uint64_t, id)
+ __field(u32, job_count)
+ __field(int, hw_job_count)
+ ),
+
+ TP_fast_assign(
+ __entry->entity = entity;
+ __entry->id = sched_job->id;
+ __entry->fence = &sched_job->s_fence->finished;
+ __entry->name = sched_job->sched->name;
+ __entry->job_count = spsc_queue_count(&entity->job_queue);
+ __entry->hw_job_count = atomic_read(
+ &sched_job->sched->hw_rq_count);
+ ),
+ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->id,
+ __entry->fence, __entry->name,
+ __entry->job_count, __entry->hw_job_count)
+);
+
TRACE_EVENT(drm_sched_process_job,
TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 63bccd201b97..c803e14eed91 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -84,6 +84,24 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
+ * drm_sched_entity_modify_sched - Modify sched of an entity
+ * @entity: scheduler entity to init
+ * @sched_list: the list of new drm scheds which will replace
+ * existing entity->sched_list
+ * @num_sched_list: number of drm sched in sched_list
+ */
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ WARN_ON(!num_sched_list || !sched_list);
+
+ entity->sched_list = sched_list;
+ entity->num_sched_list = num_sched_list;
+}
+EXPORT_SYMBOL(drm_sched_entity_modify_sched);
+
+/**
* drm_sched_entity_is_idle - Check if entity is idle
*
* @entity: scheduler entity
@@ -120,38 +138,6 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
}
/**
- * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- *
- * @entity: scheduler entity
- *
- * Return the pointer to the rq with least load.
- */
-static struct drm_sched_rq *
-drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
-{
- struct drm_sched_rq *rq = NULL;
- unsigned int min_score = UINT_MAX, num_score;
- int i;
-
- for (i = 0; i < entity->num_sched_list; ++i) {
- struct drm_gpu_scheduler *sched = entity->sched_list[i];
-
- if (!entity->sched_list[i]->ready) {
- DRM_WARN("sched%s is not ready, skipping", sched->name);
- continue;
- }
-
- num_score = atomic_read(&sched->score);
- if (num_score < min_score) {
- min_score = num_score;
- rq = &entity->sched_list[i]->sched_rq[entity->priority];
- }
- }
-
- return rq;
-}
-
-/**
* drm_sched_entity_flush - Flush a context entity
*
* @entity: scheduler entity
@@ -461,6 +447,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
+ struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
@@ -471,7 +458,8 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
return;
spin_lock(&entity->rq_lock);
- rq = drm_sched_entity_get_free_sched(entity);
+ sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
+ rq = sched ? &sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
@@ -498,7 +486,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->score);
+ atomic_inc(&entity->rq->sched->num_jobs);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 60c4c6a1aac6..8e731ed0d9d9 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,7 +92,6 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -111,7 +110,6 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -222,8 +220,7 @@ EXPORT_SYMBOL(drm_sched_fault);
*
* Suspend the delayed work timeout for the scheduler. This is done by
* modifying the delayed work timeout to an arbitrary large value,
- * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
- * called from an IRQ context.
+ * MAX_SCHEDULE_TIMEOUT in this case.
*
* Returns the timeout remaining
*
@@ -252,46 +249,41 @@ EXPORT_SYMBOL(drm_sched_suspend_timeout);
* @sched: scheduler instance for which to resume the timeout
* @remaining: remaining timeout
*
- * Resume the delayed work timeout for the scheduler. Note that
- * this function can be called from an IRQ context.
+ * Resume the delayed work timeout for the scheduler.
*/
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
unsigned long remaining)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
if (list_empty(&sched->ring_mirror_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(system_wq, &sched->work_tdr, remaining);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
EXPORT_SYMBOL(drm_sched_resume_timeout);
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
@@ -302,7 +294,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* is parked at which point it's safe.
*/
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job);
@@ -315,12 +307,12 @@ static void drm_sched_job_timedout(struct work_struct *work)
sched->free_guilty = false;
}
} else {
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -383,7 +375,6 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job, *tmp;
- unsigned long flags;
kthread_park(sched->thread);
@@ -417,9 +408,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* remove job from ring_mirror_list.
* Locking here is for concurrent resume timeout
*/
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
/*
* Wait for job's HW fence callback to finish using s_job
@@ -462,7 +453,6 @@ EXPORT_SYMBOL(drm_sched_stop);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
{
struct drm_sched_job *s_job, *tmp;
- unsigned long flags;
int r;
/*
@@ -491,9 +481,9 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
}
if (full_recovery) {
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
kthread_unpark(sched->thread);
@@ -657,7 +647,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
+ atomic_dec(&sched->num_jobs);
trace_drm_sched_process_job(s_fence);
@@ -679,7 +669,6 @@ static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
- unsigned long flags;
/*
* Don't destroy jobs while the timeout worker is running OR thread
@@ -690,7 +679,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
__kthread_should_park(sched->thread))
return NULL;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
@@ -704,12 +693,48 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
drm_sched_start_timeout(sched);
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
return job;
}
/**
+ * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
+ * @sched_list: list of drm_gpu_schedulers
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list
+ *
+ * Returns pointer of the sched with the least load or NULL if none of the
+ * drm_gpu_schedulers are ready
+ */
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ struct drm_gpu_scheduler *sched, *picked_sched = NULL;
+ int i;
+ unsigned int min_jobs = UINT_MAX, num_jobs;
+
+ for (i = 0; i < num_sched_list; ++i) {
+ sched = sched_list[i];
+
+ if (!sched->ready) {
+ DRM_WARN("scheduler %s is not ready, skipping",
+ sched->name);
+ continue;
+ }
+
+ num_jobs = atomic_read(&sched->num_jobs);
+ if (num_jobs < min_jobs) {
+ min_jobs = num_jobs;
+ picked_sched = sched;
+ }
+ }
+
+ return picked_sched;
+}
+EXPORT_SYMBOL(drm_sched_pick_best);
+
+/**
* drm_sched_blocked - check if the scheduler is blocked
*
* @sched: scheduler instance
@@ -775,6 +800,7 @@ static int drm_sched_main(void *param)
atomic_inc(&sched->hw_rq_count);
drm_sched_job_begin(sched_job);
+ trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
drm_sched_fence_scheduled(s_fence);
@@ -834,7 +860,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->score, 0);
+ atomic_set(&sched->num_jobs, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index dc64fbfc4e61..49e6cb8f5836 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -279,12 +279,13 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
return 0;
}
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int sti_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
- struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
@@ -297,8 +298,10 @@ int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
+static void sti_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *drm_dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
@@ -330,6 +333,8 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.late_register = sti_crtc_late_register,
+ .enable_vblank = sti_crtc_enable_vblank,
+ .disable_vblank = sti_crtc_disable_vblank,
};
bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
index df489ab14e2b..1132b4586712 100644
--- a/drivers/gpu/drm/sti/sti_crtc.h
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -15,8 +15,6 @@ struct sti_mixer;
int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor);
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void sti_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
int sti_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data);
bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index a39fc36f815b..50870d8cbb76 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -21,7 +21,6 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include "sti_crtc.h"
#include "sti_drv.h"
#include "sti_plane.h"
@@ -146,9 +145,6 @@ static struct drm_driver sti_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.fops = &sti_driver_fops,
- .enable_vblank = sti_crtc_enable_vblank,
- .disable_vblank = sti_crtc_disable_vblank,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index b2778ec1cdd7..3d04bfca21a0 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -467,7 +467,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
bridge->of_node = dvo->dev.of_node;
drm_bridge_add(bridge);
- err = drm_bridge_attach(encoder, bridge, NULL);
+ err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err) {
DRM_ERROR("Failed to attach bridge\n");
return err;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 2bb32009d117..f3f28d79b0e4 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -701,7 +701,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hda;
bridge->funcs = &sti_hda_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL);
+ drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 64ed102033c8..18eaf786ffa4 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1281,7 +1281,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hdmi;
bridge->funcs = &sti_hdmi_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL);
+ drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 5a9f9aca8bc2..ea9fcbdc68b3 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -72,8 +72,6 @@ static struct drm_driver drv_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
- .get_scanout_position = ltdc_crtc_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
};
static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 4b165635b2d4..2e1f2664495d 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -377,7 +377,9 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
- DRM_ERROR("Unable to get pll reference clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Unable to get pll reference clock: %d\n",
+ ret);
goto err_clk_get;
}
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index c2815e8ae1da..df585fe64f61 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -636,38 +636,13 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
- .mode_valid = ltdc_crtc_mode_valid,
- .mode_fixup = ltdc_crtc_mode_fixup,
- .mode_set_nofb = ltdc_crtc_mode_set_nofb,
- .atomic_flush = ltdc_crtc_atomic_flush,
- .atomic_enable = ltdc_crtc_atomic_enable,
- .atomic_disable = ltdc_crtc_atomic_disable,
-};
-
-static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
- DRM_DEBUG_DRIVER("\n");
- reg_set(ldev->regs, LTDC_IER, IER_LIE);
-
- return 0;
-}
-
-static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
- DRM_DEBUG_DRIVER("\n");
- reg_clear(ldev->regs, LTDC_IER, IER_LIE);
-}
-
-bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool ltdc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *ddev = crtc->dev;
struct ltdc_device *ldev = ddev->dev_private;
int line, vactive_start, vactive_end, vtotal;
@@ -710,6 +685,39 @@ bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
return true;
}
+static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
+ .mode_valid = ltdc_crtc_mode_valid,
+ .mode_fixup = ltdc_crtc_mode_fixup,
+ .mode_set_nofb = ltdc_crtc_mode_set_nofb,
+ .atomic_flush = ltdc_crtc_atomic_flush,
+ .atomic_enable = ltdc_crtc_atomic_enable,
+ .atomic_disable = ltdc_crtc_atomic_disable,
+ .get_scanout_position = ltdc_crtc_get_scanout_position,
+};
+
+static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_crtc_state *state = crtc->state;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (state->enable)
+ reg_set(ldev->regs, LTDC_IER, IER_LIE);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+
+ DRM_DEBUG_DRIVER("\n");
+ reg_clear(ldev->regs, LTDC_IER, IER_LIE);
+}
+
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
@@ -719,6 +727,7 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
};
@@ -1100,7 +1109,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
drm_encoder_cleanup(encoder);
return -EINVAL;
@@ -1146,12 +1155,14 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.pad_max_freq_hz = 90000000;
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
+ ldev->caps.nb_irq = 2;
break;
case HWVER_20101:
ldev->caps.reg_ofs = REG_OFS_4;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.non_alpha_only_l1 = false;
ldev->caps.pad_max_freq_hz = 150000000;
+ ldev->caps.nb_irq = 4;
break;
default:
return -ENODEV;
@@ -1251,13 +1262,21 @@ int ltdc_load(struct drm_device *ddev)
reg_clear(ldev->regs, LTDC_IER,
IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
- for (i = 0; i < MAX_IRQ; i++) {
+ ret = ltdc_get_caps(ddev);
+ if (ret) {
+ DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
+ ldev->caps.hw_version);
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+
+ for (i = 0; i < ldev->caps.nb_irq; i++) {
irq = platform_get_irq(pdev, i);
- if (irq == -EPROBE_DEFER)
+ if (irq < 0) {
+ ret = irq;
goto err;
-
- if (irq < 0)
- continue;
+ }
ret = devm_request_threaded_irq(dev, irq, ltdc_irq,
ltdc_irq_thread, IRQF_ONESHOT,
@@ -1268,16 +1287,6 @@ int ltdc_load(struct drm_device *ddev)
}
}
-
- ret = ltdc_get_caps(ddev);
- if (ret) {
- DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
- ldev->caps.hw_version);
- goto err;
- }
-
- DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
-
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
if (panel[i]) {
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index a1ad0ae3b006..f153b908c70e 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -19,6 +19,7 @@ struct ltdc_caps {
const u32 *pix_fmt_hw; /* supported pixel formats */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
int pad_max_freq_hz; /* max frequency supported by pad */
+ int nb_irq; /* number of hardware interrupts */
};
#define LTDC_MAX_LAYER 4
@@ -39,11 +40,6 @@ struct ltdc_device {
struct drm_atomic_state *suspend_state;
};
-bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
void ltdc_suspend(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 65b7a8739666..26e5c7ceb8ff 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -156,7 +156,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index b27f16af50f5..3b23d5be3cf3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -253,7 +253,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
if (rgb->bridge) {
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c81cdce6ed55..624437b27cdc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -114,46 +114,71 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
}
}
+static void sun4i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN4I_TCON0_LVDS_ANA0_CK_EN |
+ SUN4I_TCON0_LVDS_ANA0_REG_V |
+ SUN4I_TCON0_LVDS_ANA0_REG_C |
+ SUN4I_TCON0_LVDS_ANA0_EN_MB |
+ SUN4I_TCON0_LVDS_ANA0_PD |
+ SUN4I_TCON0_LVDS_ANA0_DCHS);
+
+ udelay(2); /* delay at least 1200 ns */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+ SUN4I_TCON0_LVDS_ANA1_INIT,
+ SUN4I_TCON0_LVDS_ANA1_INIT);
+ udelay(1); /* delay at least 120 ns */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+ SUN4I_TCON0_LVDS_ANA1_UPDATE,
+ SUN4I_TCON0_LVDS_ANA1_UPDATE);
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN4I_TCON0_LVDS_ANA0_EN_MB,
+ SUN4I_TCON0_LVDS_ANA0_EN_MB);
+}
+
+static void sun6i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ u8 val;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_C(2) |
+ SUN6I_TCON0_LVDS_ANA0_V(3) |
+ SUN6I_TCON0_LVDS_ANA0_PD(2) |
+ SUN6I_TCON0_LVDS_ANA0_EN_LDO);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
+
+ if (sun4i_tcon_get_pixel_depth(encoder) == 18)
+ val = 7;
+ else
+ val = 0xf;
+
+ regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+}
+
static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
bool enabled)
{
if (enabled) {
- u8 val;
-
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN,
SUN4I_TCON0_LVDS_IF_EN);
-
- /*
- * As their name suggest, these values only apply to the A31
- * and later SoCs. We'll have to rework this when merging
- * support for the older SoCs.
- */
- regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_C(2) |
- SUN6I_TCON0_LVDS_ANA0_V(3) |
- SUN6I_TCON0_LVDS_ANA0_PD(2) |
- SUN6I_TCON0_LVDS_ANA0_EN_LDO);
- udelay(2);
-
- regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_MB,
- SUN6I_TCON0_LVDS_ANA0_EN_MB);
- udelay(2);
-
- regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
-
- if (sun4i_tcon_get_pixel_depth(encoder) == 18)
- val = 7;
- else
- val = 0xf;
-
- regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
- SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+ if (tcon->quirks->setup_lvds_phy)
+ tcon->quirks->setup_lvds_phy(tcon, encoder);
} else {
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN, 0);
@@ -1453,6 +1478,16 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.dclk_min_div = 1,
};
+static const struct sun4i_tcon_quirks sun7i_a20_tcon0_quirks = {
+ .supports_lvds = true,
+ .has_channel_0 = true,
+ .has_channel_1 = true,
+ .dclk_min_div = 4,
+ /* Same display pipeline structure as A10 */
+ .set_mux = sun4i_a10_tcon_set_mux,
+ .setup_lvds_phy = sun4i_tcon_setup_lvds_phy,
+};
+
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
@@ -1465,12 +1500,15 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
.dclk_min_div = 1,
+ .setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
+ .supports_lvds = true,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
.dclk_min_div = 1,
+ .setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1505,6 +1543,8 @@ const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon0", .data = &sun7i_a20_tcon0_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon1", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index a62ec826ae71..cfbf4e6c1679 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -193,6 +193,13 @@
#define SUN4I_TCON_MUX_CTRL_REG 0x200
#define SUN4I_TCON0_LVDS_ANA0_REG 0x220
+#define SUN4I_TCON0_LVDS_ANA0_DCHS BIT(16)
+#define SUN4I_TCON0_LVDS_ANA0_PD (BIT(20) | BIT(21))
+#define SUN4I_TCON0_LVDS_ANA0_EN_MB BIT(22)
+#define SUN4I_TCON0_LVDS_ANA0_REG_C (BIT(24) | BIT(25))
+#define SUN4I_TCON0_LVDS_ANA0_REG_V (BIT(26) | BIT(27))
+#define SUN4I_TCON0_LVDS_ANA0_CK_EN (BIT(29) | BIT(28))
+
#define SUN6I_TCON0_LVDS_ANA0_EN_MB BIT(31)
#define SUN6I_TCON0_LVDS_ANA0_EN_LDO BIT(30)
#define SUN6I_TCON0_LVDS_ANA0_EN_DRVC BIT(24)
@@ -201,6 +208,10 @@
#define SUN6I_TCON0_LVDS_ANA0_V(x) (((x) & 3) << 8)
#define SUN6I_TCON0_LVDS_ANA0_PD(x) (((x) & 3) << 4)
+#define SUN4I_TCON0_LVDS_ANA1_REG 0x224
+#define SUN4I_TCON0_LVDS_ANA1_INIT (0x1f << 26 | 0x1f << 10)
+#define SUN4I_TCON0_LVDS_ANA1_UPDATE (0x1f << 16 | 0x1f << 00)
+
#define SUN4I_TCON1_FILL_CTL_REG 0x300
#define SUN4I_TCON1_FILL_BEG0_REG 0x304
#define SUN4I_TCON1_FILL_END0_REG 0x308
@@ -228,6 +239,9 @@ struct sun4i_tcon_quirks {
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
+ /* handler for LVDS setup routine */
+ void (*setup_lvds_phy)(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder);
};
struct sun4i_tcon {
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a75fcb113172..059939789730 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -14,7 +14,6 @@
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -27,7 +26,6 @@
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
-#include "sun4i_drv.h"
#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
@@ -722,10 +720,31 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
union phy_configure_opts opts = { 0 };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
u16 delay;
+ int err;
DRM_DEBUG_DRIVER("Enabling DSI output\n");
- pm_runtime_get_sync(dsi->dev);
+ err = regulator_enable(dsi->regulator);
+ if (err)
+ dev_warn(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+
+ reset_control_deassert(dsi->reset);
+ clk_prepare_enable(dsi->mod_clk);
+
+ /*
+ * Enable the DSI block.
+ */
+ regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
+
+ regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+ SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
+
+ regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
+ regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
+
+ sun6i_dsi_inst_init(dsi, dsi->device);
+
+ regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
delay = sun6i_dsi_get_video_start_delay(dsi, mode);
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
@@ -749,7 +768,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
phy_configure(dsi->dphy, &opts);
phy_power_on(dsi->dphy);
- if (!IS_ERR(dsi->panel))
+ if (dsi->panel)
drm_panel_prepare(dsi->panel);
/*
@@ -764,7 +783,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
* ordering on the panels I've tested it with, so I guess this
* will do for now, until that IP is better understood.
*/
- if (!IS_ERR(dsi->panel))
+ if (dsi->panel)
drm_panel_enable(dsi->panel);
sun6i_dsi_start(dsi, DSI_START_HSC);
@@ -780,7 +799,7 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling DSI output\n");
- if (!IS_ERR(dsi->panel)) {
+ if (dsi->panel) {
drm_panel_disable(dsi->panel);
drm_panel_unprepare(dsi->panel);
}
@@ -788,7 +807,9 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
phy_power_off(dsi->dphy);
phy_exit(dsi->dphy);
- pm_runtime_put(dsi->dev);
+ clk_disable_unprepare(dsi->mod_clk);
+ reset_control_assert(dsi->reset);
+ regulator_disable(dsi->regulator);
}
static int sun6i_dsi_get_modes(struct drm_connector *connector)
@@ -805,7 +826,10 @@ static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
static enum drm_connector_status
sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
{
- return connector_status_connected;
+ struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
+
+ return dsi->panel ? connector_status_connected :
+ connector_status_disconnected;
}
static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
@@ -942,11 +966,18 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+ struct drm_panel *panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+ if (!dsi->drm || !dsi->drm->registered)
+ return -EPROBE_DEFER;
+
+ dsi->panel = panel;
dsi->device = device;
- dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (IS_ERR(dsi->panel))
- return PTR_ERR(dsi->panel);
+
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ drm_kms_helper_hotplug_event(dsi->drm);
dev_info(host->dev, "Attached device %s\n", device->name);
@@ -957,10 +988,14 @@ static int sun6i_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+ struct drm_panel *panel = dsi->panel;
dsi->panel = NULL;
dsi->device = NULL;
+ drm_panel_detach(panel);
+ drm_kms_helper_hotplug_event(dsi->drm);
+
return 0;
}
@@ -1022,15 +1057,9 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
- struct sun4i_drv *drv = drm->dev_private;
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
int ret;
- if (!dsi->panel)
- return -EPROBE_DEFER;
-
- dsi->drv = drv;
-
drm_encoder_helper_add(&dsi->encoder,
&sun6i_dsi_enc_helper_funcs);
ret = drm_encoder_init(drm,
@@ -1056,7 +1085,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
}
drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
- drm_panel_attach(dsi->panel, &dsi->connector);
+
+ dsi->drm = drm;
return 0;
@@ -1070,7 +1100,7 @@ static void sun6i_dsi_unbind(struct device *dev, struct device *master,
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
- drm_panel_detach(dsi->panel);
+ dsi->drm = NULL;
}
static const struct component_ops sun6i_dsi_ops = {
@@ -1157,12 +1187,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
goto err_unprotect_clk;
}
- pm_runtime_enable(dev);
-
ret = mipi_dsi_host_register(&dsi->host);
if (ret) {
dev_err(dev, "Couldn't register MIPI-DSI host\n");
- goto err_pm_disable;
+ goto err_unprotect_clk;
}
ret = component_add(&pdev->dev, &sun6i_dsi_ops);
@@ -1175,8 +1203,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
err_remove_dsi_host:
mipi_dsi_host_unregister(&dsi->host);
-err_pm_disable:
- pm_runtime_disable(dev);
err_unprotect_clk:
clk_rate_exclusive_put(dsi->mod_clk);
err_attach_clk:
@@ -1192,7 +1218,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
component_del(&pdev->dev, &sun6i_dsi_ops);
mipi_dsi_host_unregister(&dsi->host);
- pm_runtime_disable(dev);
clk_rate_exclusive_put(dsi->mod_clk);
if (!IS_ERR(dsi->bus_clk))
@@ -1201,59 +1226,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
-{
- struct sun6i_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- err = regulator_enable(dsi->regulator);
- if (err) {
- dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
- return err;
- }
-
- reset_control_deassert(dsi->reset);
- clk_prepare_enable(dsi->mod_clk);
-
- /*
- * Enable the DSI block.
- *
- * Some part of it can only be done once we get a number of
- * lanes, see sun6i_dsi_inst_init
- */
- regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
-
- regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
- SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
-
- regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
- regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
-
- if (dsi->device)
- sun6i_dsi_inst_init(dsi, dsi->device);
-
- regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
-
- return 0;
-}
-
-static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
-{
- struct sun6i_dsi *dsi = dev_get_drvdata(dev);
-
- clk_disable_unprepare(dsi->mod_clk);
- reset_control_assert(dsi->reset);
- regulator_disable(dsi->regulator);
-
- return 0;
-}
-
-static const struct dev_pm_ops sun6i_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(sun6i_dsi_runtime_suspend,
- sun6i_dsi_runtime_resume,
- NULL)
-};
-
static const struct of_device_id sun6i_dsi_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
{ .compatible = "allwinner,sun50i-a64-mipi-dsi" },
@@ -1267,7 +1239,6 @@ static struct platform_driver sun6i_dsi_platform_driver = {
.driver = {
.name = "sun6i-mipi-dsi",
.of_match_table = sun6i_dsi_of_table,
- .pm = &sun6i_dsi_pm_ops,
},
};
module_platform_driver(sun6i_dsi_platform_driver);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 3f4846f581ef..c863900ae3b4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -28,8 +28,8 @@ struct sun6i_dsi {
struct phy *dphy;
struct device *dev;
- struct sun4i_drv *drv;
struct mipi_dsi_device *device;
+ struct drm_device *drm;
struct drm_panel *panel;
};
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 7c70fd31a4c2..1a7b08f35776 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2503,7 +2503,6 @@ static int tegra_dc_couple(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -2560,8 +2559,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
tegra_powergate_power_off(dc->powergate);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dc->regs))
return PTR_ERR(dc->regs);
@@ -2573,7 +2571,13 @@ static int tegra_dc_probe(struct platform_device *pdev)
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV) {
- dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dc->dev, "failed to probe RGB output: %d\n",
+ err);
return err;
}
@@ -2588,10 +2592,16 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto disable_pm;
}
return 0;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+ tegra_dc_rgb_remove(dc);
+
+ return err;
}
static int tegra_dc_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 84f0e01e3428..b8a328f53862 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -314,19 +314,13 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
struct drm_device *drm = fbdev->base.dev;
int err;
- err = drm_fb_helper_init(drm, &fbdev->base, max_connectors);
+ err = drm_fb_helper_init(drm, &fbdev->base);
if (err < 0) {
dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
err);
return err;
}
- err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
- if (err < 0) {
- dev_err(drm->dev, "failed to add connectors: %d\n", err);
- goto fini;
- }
-
err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
if (err < 0) {
dev_err(drm->dev, "failed to set initial configuration: %d\n",
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6f117628f257..38252c0f068d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1648,6 +1648,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
static int tegra_hdmi_probe(struct platform_device *pdev)
{
+ const char *level = KERN_ERR;
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
@@ -1686,21 +1687,36 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
}
hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
- if (IS_ERR(hdmi->hdmi)) {
- dev_err(&pdev->dev, "failed to get HDMI regulator\n");
- return PTR_ERR(hdmi->hdmi);
+ err = PTR_ERR_OR_ZERO(hdmi->hdmi);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get HDMI regulator: %d\n", err);
+ return err;
}
hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
- if (IS_ERR(hdmi->pll)) {
- dev_err(&pdev->dev, "failed to get PLL regulator\n");
- return PTR_ERR(hdmi->pll);
+ err = PTR_ERR_OR_ZERO(hdmi->pll);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get PLL regulator: %d\n", err);
+ return err;
}
hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(hdmi->vdd)) {
- dev_err(&pdev->dev, "failed to get VDD regulator\n");
- return PTR_ERR(hdmi->vdd);
+ err = PTR_ERR_OR_ZERO(hdmi->vdd);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get VDD regulator: %d\n", err);
+ return err;
}
hdmi->output.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
new file mode 100644
index 000000000000..f790a5215302
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -0,0 +1,14 @@
+config DRM_TIDSS
+ tristate "DRM Support for TI Keystone"
+ depends on DRM && OF
+ depends on ARM || ARM64 || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ The TI Keystone family SoCs introduced a new generation of
+ Display SubSystem. There is currently three Keystone family
+ SoCs released with DSS. Each with somewhat different version
+ of it. The SoCs are 66AK2Gx, AM65x, and J721E. Set this to Y
+ or M to add display support for TI Keystone family
+ platforms.
diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile
new file mode 100644
index 000000000000..312645271014
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+tidss-y := tidss_crtc.o \
+ tidss_drv.o \
+ tidss_encoder.o \
+ tidss_kms.o \
+ tidss_irq.o \
+ tidss_plane.o \
+ tidss_scale_coefs.o \
+ tidss_dispc.o
+
+obj-$(CONFIG_DRM_TIDSS) += tidss.o
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
new file mode 100644
index 000000000000..d4ce9bab8c7e
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* Page flip and frame done IRQs */
+
+static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
+{
+ struct drm_device *ddev = tcrtc->crtc.dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+ bool busy;
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+
+ /*
+ * New settings are taken into use at VFP, and GO bit is cleared at
+ * the same time. This happens before the vertical blank interrupt.
+ * So there is a small change that the driver sets GO bit after VFP, but
+ * before vblank, and we have to check for that case here.
+ */
+ busy = dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport);
+ if (busy) {
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+ return;
+ }
+
+ event = tcrtc->event;
+ tcrtc->event = NULL;
+
+ if (!event) {
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+ return;
+ }
+
+ drm_crtc_send_vblank_event(&tcrtc->crtc, event);
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&tcrtc->crtc);
+}
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ drm_crtc_handle_vblank(crtc);
+
+ tidss_crtc_finish_page_flip(tcrtc);
+}
+
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ complete(&tcrtc->framedone_completion);
+}
+
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ dev_err_ratelimited(crtc->dev->dev, "CRTC%u SYNC LOST: (irq %llx)\n",
+ tcrtc->hw_videoport, irqstatus);
+}
+
+/* drm_crtc_helper_funcs */
+
+static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct dispc_device *dispc = tidss->dispc;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ const struct drm_display_mode *mode;
+ enum drm_mode_status ok;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->enable)
+ return 0;
+
+ mode = &state->adjusted_mode;
+
+ ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
+ if (ok != MODE_OK) {
+ dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n",
+ __func__, mode->hdisplay, mode->vdisplay, mode->clock);
+ return -EINVAL;
+ }
+
+ return dispc_vp_bus_check(dispc, hw_videoport, state);
+}
+
+/*
+ * This needs all affected planes to be present in the atomic
+ * state. The untouched planes are added to the state in
+ * tidss_atomic_check().
+ */
+static void tidss_crtc_position_planes(struct tidss_device *tidss,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state,
+ bool newmodeset)
+{
+ struct drm_atomic_state *ostate = old_state->state;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_crtc_state *cstate = crtc->state;
+ int layer;
+
+ if (!newmodeset && !cstate->zpos_changed &&
+ !to_tidss_crtc_state(cstate)->plane_pos_changed)
+ return;
+
+ for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+ struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ bool layer_active = false;
+ int i;
+
+ for_each_new_plane_in_state(ostate, plane, pstate, i) {
+ if (pstate->crtc != crtc || !pstate->visible)
+ continue;
+
+ if (pstate->normalized_zpos == layer) {
+ layer_active = true;
+ break;
+ }
+ }
+
+ if (layer_active) {
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dispc_ovr_set_plane(tidss->dispc, tplane->hw_plane_id,
+ tcrtc->hw_videoport,
+ pstate->crtc_x, pstate->crtc_y,
+ layer);
+ }
+ dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
+ layer_active);
+ }
+}
+
+static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+
+ dev_dbg(ddev->dev,
+ "%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
+ crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
+ crtc->state->enable, crtc->state->event);
+
+ /* There is nothing to do if CRTC is not going to be enabled. */
+ if (!crtc->state->enable)
+ return;
+
+ /*
+ * Flush CRTC changes with go bit only if new modeset is not
+ * coming, so CRTC is enabled trough out the commit.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc->state))
+ return;
+
+ /* If the GO bit is stuck we better quit here. */
+ if (WARN_ON(dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport)))
+ return;
+
+ /* We should have event if CRTC is enabled through out this commit. */
+ if (WARN_ON(!crtc->state->event))
+ return;
+
+ /* Write vp properties to HW if needed. */
+ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, false);
+
+ /* Update plane positions if needed. */
+ tidss_crtc_position_planes(tidss, crtc, old_crtc_state, false);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ dispc_vp_go(tidss->dispc, tcrtc->hw_videoport);
+
+ WARN_ON(tcrtc->event);
+
+ tcrtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ unsigned long flags;
+ int r;
+
+ dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+ tidss_runtime_get(tidss);
+
+ r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport,
+ mode->clock * 1000);
+ if (r != 0)
+ return;
+
+ r = dispc_vp_enable_clk(tidss->dispc, tcrtc->hw_videoport);
+ if (r != 0)
+ return;
+
+ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, true);
+ tidss_crtc_position_planes(tidss, crtc, old_state, true);
+
+ /* Turn vertical blanking interrupt reporting on. */
+ drm_crtc_vblank_on(crtc);
+
+ dispc_vp_prepare(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+ dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+
+ dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+ reinit_completion(&tcrtc->framedone_completion);
+
+ dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
+
+ if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
+ msecs_to_jiffies(500)))
+ dev_err(tidss->dev, "Timeout waiting for framedone on crtc %d",
+ tcrtc->hw_videoport);
+
+ dispc_vp_unprepare(tidss->dispc, tcrtc->hw_videoport);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+ drm_crtc_vblank_off(crtc);
+
+ dispc_vp_disable_clk(tidss->dispc, tcrtc->hw_videoport);
+
+ tidss_runtime_put(tidss);
+}
+
+static
+enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
+}
+
+static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
+ .atomic_check = tidss_crtc_atomic_check,
+ .atomic_flush = tidss_crtc_atomic_flush,
+ .atomic_enable = tidss_crtc_atomic_enable,
+ .atomic_disable = tidss_crtc_atomic_disable,
+
+ .mode_valid = tidss_crtc_mode_valid,
+};
+
+/* drm_crtc_funcs */
+
+static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_runtime_get(tidss);
+
+ tidss_irq_enable_vblank(crtc);
+
+ return 0;
+}
+
+static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_irq_disable_vblank(crtc);
+
+ tidss_runtime_put(tidss);
+}
+
+static void tidss_crtc_reset(struct drm_crtc *crtc)
+{
+ struct tidss_crtc_state *tcrtc;
+
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(crtc->state);
+
+ tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
+ if (!tcrtc) {
+ crtc->state = NULL;
+ return;
+ }
+
+ crtc->state = &tcrtc->base;
+ crtc->state->crtc = crtc;
+}
+
+static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct tidss_crtc_state *state, *current_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ current_state = to_tidss_crtc_state(crtc->state);
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ state->plane_pos_changed = false;
+
+ state->bus_format = current_state->bus_format;
+ state->bus_flags = current_state->bus_flags;
+
+ return &state->base;
+}
+
+static const struct drm_crtc_funcs tidss_crtc_funcs = {
+ .reset = tidss_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = tidss_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = tidss_crtc_enable_vblank,
+ .disable_vblank = tidss_crtc_disable_vblank,
+};
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+ u32 hw_videoport,
+ struct drm_plane *primary)
+{
+ struct tidss_crtc *tcrtc;
+ struct drm_crtc *crtc;
+ unsigned int gamma_lut_size = 0;
+ bool has_ctm = tidss->feat->vp_feat.color.has_ctm;
+ int ret;
+
+ tcrtc = devm_kzalloc(tidss->dev, sizeof(*tcrtc), GFP_KERNEL);
+ if (!tcrtc)
+ return ERR_PTR(-ENOMEM);
+
+ tcrtc->hw_videoport = hw_videoport;
+ init_completion(&tcrtc->framedone_completion);
+
+ crtc = &tcrtc->crtc;
+
+ ret = drm_crtc_init_with_planes(&tidss->ddev, crtc, primary,
+ NULL, &tidss_crtc_funcs, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_crtc_helper_add(crtc, &tidss_crtc_helper_funcs);
+
+ /*
+ * The dispc gamma functions adapt to what ever size we ask
+ * from it no matter what HW supports. X-server assumes 256
+ * element gamma tables so lets use that.
+ */
+ if (tidss->feat->vp_feat.color.gamma_size)
+ gamma_lut_size = 256;
+
+ drm_crtc_enable_color_mgmt(crtc, 0, has_ctm, gamma_lut_size);
+ if (gamma_lut_size)
+ drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+
+ return tcrtc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.h b/drivers/gpu/drm/tidss/tidss_crtc.h
new file mode 100644
index 000000000000..09e773666228
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_CRTC_H__
+#define __TIDSS_CRTC_H__
+
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <drm/drm_crtc.h>
+
+#define to_tidss_crtc(c) container_of((c), struct tidss_crtc, crtc)
+
+struct tidss_device;
+
+struct tidss_crtc {
+ struct drm_crtc crtc;
+
+ u32 hw_videoport;
+
+ struct drm_pending_vblank_event *event;
+
+ struct completion framedone_completion;
+};
+
+#define to_tidss_crtc_state(x) container_of(x, struct tidss_crtc_state, base)
+
+struct tidss_crtc_state {
+ /* Must be first. */
+ struct drm_crtc_state base;
+
+ bool plane_pos_changed;
+
+ u32 bus_format;
+ u32 bus_flags;
+};
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc);
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc);
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus);
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+ u32 hw_videoport,
+ struct drm_plane *primary);
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
new file mode 100644
index 000000000000..29f42768e294
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_panel.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+#include "tidss_dispc_regs.h"
+#include "tidss_scale_coefs.h"
+
+static const u16 tidss_k2g_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x00,
+ [DSS_SYSCONFIG_OFF] = 0x04,
+ [DSS_SYSSTATUS_OFF] = 0x08,
+ [DISPC_IRQ_EOI_OFF] = 0x20,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x24,
+ [DISPC_IRQSTATUS_OFF] = 0x28,
+ [DISPC_IRQENABLE_SET_OFF] = 0x2c,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x30,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x40,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0x44,
+
+ [DISPC_DBG_CONTROL_OFF] = 0x4c,
+ [DISPC_DBG_STATUS_OFF] = 0x50,
+
+ [DISPC_CLKGATING_DISABLE_OFF] = 0x54,
+};
+
+const struct dispc_features dispc_k2g_feats = {
+ .min_pclk_khz = 4375,
+
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 150000,
+ },
+
+ /*
+ * XXX According TRM the RGB input buffer width up to 2560 should
+ * work on 3 taps, but in practice it only works up to 1280.
+ */
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 1280,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 2560,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_K2G,
+
+ .common = "common",
+
+ .common_regs = tidss_k2g_common_regs,
+
+ .num_vps = 1,
+ .vp_name = { "vp1" },
+ .ovr_name = { "ovr1" },
+ .vpclk_name = { "vp1" },
+ .vp_bus_type = { DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 1,
+ .vid_name = { "vid1" },
+ .vid_lite = { false },
+ .vid_order = { 0 },
+};
+
+static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x4,
+ [DSS_SYSCONFIG_OFF] = 0x8,
+ [DSS_SYSSTATUS_OFF] = 0x20,
+ [DISPC_IRQ_EOI_OFF] = 0x24,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x28,
+ [DISPC_IRQSTATUS_OFF] = 0x2c,
+ [DISPC_IRQENABLE_SET_OFF] = 0x30,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x40,
+ [DISPC_VID_IRQENABLE_OFF] = 0x44,
+ [DISPC_VID_IRQSTATUS_OFF] = 0x58,
+ [DISPC_VP_IRQENABLE_OFF] = 0x70,
+ [DISPC_VP_IRQSTATUS_OFF] = 0x7c,
+
+ [WB_IRQENABLE_OFF] = 0x88,
+ [WB_IRQSTATUS_OFF] = 0x8c,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x90,
+ [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x94,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0x98,
+ [DSS_CBA_CFG_OFF] = 0x9c,
+ [DISPC_DBG_CONTROL_OFF] = 0xa0,
+ [DISPC_DBG_STATUS_OFF] = 0xa4,
+ [DISPC_CLKGATING_DISABLE_OFF] = 0xa8,
+ [DISPC_SECURE_DISABLE_OFF] = 0xac,
+};
+
+const struct dispc_features dispc_am65x_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ [DISPC_VP_OLDI] = 165000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM65X,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+
+ .errata = {
+ .i2000 = true,
+ },
+};
+
+static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x4,
+ [DSS_SYSCONFIG_OFF] = 0x8,
+ [DSS_SYSSTATUS_OFF] = 0x20,
+ [DISPC_IRQ_EOI_OFF] = 0x80,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x28,
+ [DISPC_IRQSTATUS_OFF] = 0x2c,
+ [DISPC_IRQENABLE_SET_OFF] = 0x30,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x34,
+ [DISPC_VID_IRQENABLE_OFF] = 0x38,
+ [DISPC_VID_IRQSTATUS_OFF] = 0x48,
+ [DISPC_VP_IRQENABLE_OFF] = 0x58,
+ [DISPC_VP_IRQSTATUS_OFF] = 0x68,
+
+ [WB_IRQENABLE_OFF] = 0x78,
+ [WB_IRQSTATUS_OFF] = 0x7c,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x98,
+ [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x9c,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0xa0,
+ [DSS_CBA_CFG_OFF] = 0xa4,
+ [DISPC_DBG_CONTROL_OFF] = 0xa8,
+ [DISPC_DBG_STATUS_OFF] = 0xac,
+ [DISPC_CLKGATING_DISABLE_OFF] = 0xb0,
+ [DISPC_SECURE_DISABLE_OFF] = 0x90,
+
+ [FBDC_REVISION_1_OFF] = 0xb8,
+ [FBDC_REVISION_2_OFF] = 0xbc,
+ [FBDC_REVISION_3_OFF] = 0xc0,
+ [FBDC_REVISION_4_OFF] = 0xc4,
+ [FBDC_REVISION_5_OFF] = 0xc8,
+ [FBDC_REVISION_6_OFF] = 0xcc,
+ [FBDC_COMMON_CONTROL_OFF] = 0xd0,
+ [FBDC_CONSTANT_COLOR_0_OFF] = 0xd4,
+ [FBDC_CONSTANT_COLOR_1_OFF] = 0xd8,
+ [DISPC_CONNECTIONS_OFF] = 0xe4,
+ [DISPC_MSS_VP1_OFF] = 0xe8,
+ [DISPC_MSS_VP3_OFF] = 0xec,
+};
+
+const struct dispc_features dispc_j721e_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 170000,
+ [DISPC_VP_INTERNAL] = 600000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 2048,
+ .in_width_max_3tap_rgb = 4096,
+ .in_width_max_5tap_yuv = 4096,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_J721E,
+
+ .common = "common_m",
+ .common_regs = tidss_j721e_common_regs,
+
+ .num_vps = 4,
+ .vp_name = { "vp1", "vp2", "vp3", "vp4" },
+ .ovr_name = { "ovr1", "ovr2", "ovr3", "ovr4" },
+ .vpclk_name = { "vp1", "vp2", "vp3", "vp4" },
+ /* Currently hard coded VP routing (see dispc_initial_config()) */
+ .vp_bus_type = { DISPC_VP_INTERNAL, DISPC_VP_DPI,
+ DISPC_VP_INTERNAL, DISPC_VP_DPI, },
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 1024,
+ .gamma_type = TIDSS_GAMMA_10BIT,
+ },
+ },
+ .num_planes = 4,
+ .vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
+ .vid_lite = { 0, 1, 0, 1, },
+ .vid_order = { 1, 3, 0, 2 },
+};
+
+static const u16 *dispc_common_regmap;
+
+struct dss_vp_data {
+ u32 *gamma_table;
+};
+
+struct dispc_device {
+ struct tidss_device *tidss;
+ struct device *dev;
+
+ void __iomem *base_common;
+ void __iomem *base_vid[TIDSS_MAX_PLANES];
+ void __iomem *base_ovr[TIDSS_MAX_PORTS];
+ void __iomem *base_vp[TIDSS_MAX_PORTS];
+
+ struct regmap *oldi_io_ctrl;
+
+ struct clk *vp_clk[TIDSS_MAX_PORTS];
+
+ const struct dispc_features *feat;
+
+ struct clk *fclk;
+
+ bool is_enabled;
+
+ struct dss_vp_data vp_data[TIDSS_MAX_PORTS];
+
+ u32 *fourccs;
+ u32 num_fourccs;
+
+ u32 memory_bandwidth_limit;
+};
+
+static void dispc_write(struct dispc_device *dispc, u16 reg, u32 val)
+{
+ iowrite32(val, dispc->base_common + reg);
+}
+
+static u32 dispc_read(struct dispc_device *dispc, u16 reg)
+{
+ return ioread32(dispc->base_common + reg);
+}
+
+static
+void dispc_vid_write(struct dispc_device *dispc, u32 hw_plane, u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_vid[hw_plane];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_vid_read(struct dispc_device *dispc, u32 hw_plane, u16 reg)
+{
+ void __iomem *base = dispc->base_vid[hw_plane];
+
+ return ioread32(base + reg);
+}
+
+static void dispc_ovr_write(struct dispc_device *dispc, u32 hw_videoport,
+ u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_ovr[hw_videoport];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_ovr_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+ void __iomem *base = dispc->base_ovr[hw_videoport];
+
+ return ioread32(base + reg);
+}
+
+static void dispc_vp_write(struct dispc_device *dispc, u32 hw_videoport,
+ u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_vp[hw_videoport];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+ void __iomem *base = dispc->base_vp[hw_videoport];
+
+ return ioread32(base + reg);
+}
+
+/*
+ * TRM gives bitfields as start:end, where start is the higher bit
+ * number. For example 7:0
+ */
+
+static u32 FLD_MASK(u32 start, u32 end)
+{
+ return ((1 << (start - end + 1)) - 1) << end;
+}
+
+static u32 FLD_VAL(u32 val, u32 start, u32 end)
+{
+ return (val << end) & FLD_MASK(start, end);
+}
+
+static u32 FLD_GET(u32 val, u32 start, u32 end)
+{
+ return (val & FLD_MASK(start, end)) >> end;
+}
+
+static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end)
+{
+ return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end);
+}
+
+static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end)
+{
+ return FLD_GET(dispc_read(dispc, idx), start, end);
+}
+
+static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val,
+ u32 start, u32 end)
+{
+ dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val,
+ start, end));
+}
+
+static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end);
+}
+
+static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+ u32 val, u32 start, u32 end)
+{
+ dispc_vid_write(dispc, hw_plane, idx,
+ FLD_MOD(dispc_vid_read(dispc, hw_plane, idx),
+ val, start, end));
+}
+
+static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end);
+}
+
+static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val,
+ u32 start, u32 end)
+{
+ dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx),
+ val, start, end));
+}
+
+__maybe_unused
+static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end);
+}
+
+static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx,
+ u32 val, u32 start, u32 end)
+{
+ dispc_ovr_write(dispc, ovr, idx,
+ FLD_MOD(dispc_ovr_read(dispc, ovr, idx),
+ val, start, end));
+}
+
+static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport)
+{
+ dispc_irq_t vp_stat = 0;
+
+ if (stat & BIT(0))
+ vp_stat |= DSS_IRQ_VP_FRAME_DONE(hw_videoport);
+ if (stat & BIT(1))
+ vp_stat |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport);
+ if (stat & BIT(2))
+ vp_stat |= DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+ if (stat & BIT(4))
+ vp_stat |= DSS_IRQ_VP_SYNC_LOST(hw_videoport);
+
+ return vp_stat;
+}
+
+static u32 dispc_vp_irq_to_raw(dispc_irq_t vpstat, u32 hw_videoport)
+{
+ u32 stat = 0;
+
+ if (vpstat & DSS_IRQ_VP_FRAME_DONE(hw_videoport))
+ stat |= BIT(0);
+ if (vpstat & DSS_IRQ_VP_VSYNC_EVEN(hw_videoport))
+ stat |= BIT(1);
+ if (vpstat & DSS_IRQ_VP_VSYNC_ODD(hw_videoport))
+ stat |= BIT(2);
+ if (vpstat & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+ stat |= BIT(4);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_vid_irq_from_raw(u32 stat, u32 hw_plane)
+{
+ dispc_irq_t vid_stat = 0;
+
+ if (stat & BIT(0))
+ vid_stat |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane);
+
+ return vid_stat;
+}
+
+static u32 dispc_vid_irq_to_raw(dispc_irq_t vidstat, u32 hw_plane)
+{
+ u32 stat = 0;
+
+ if (vidstat & DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane))
+ stat |= BIT(0);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS);
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS);
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE);
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_set_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqenable(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE);
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_set_irqenable(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE, stat);
+}
+
+static void dispc_k2g_clear_irqstatus(struct dispc_device *dispc,
+ dispc_irq_t mask)
+{
+ dispc_k2g_vp_write_irqstatus(dispc, 0, mask);
+ dispc_k2g_vid_write_irqstatus(dispc, 0, mask);
+}
+
+static
+dispc_irq_t dispc_k2g_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ dispc_irq_t stat = 0;
+
+ /* always clear the top level irqstatus */
+ dispc_write(dispc, DISPC_IRQSTATUS,
+ dispc_read(dispc, DISPC_IRQSTATUS));
+
+ stat |= dispc_k2g_vp_read_irqstatus(dispc, 0);
+ stat |= dispc_k2g_vid_read_irqstatus(dispc, 0);
+
+ dispc_k2g_clear_irqstatus(dispc, stat);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_k2g_read_irqenable(struct dispc_device *dispc)
+{
+ dispc_irq_t stat = 0;
+
+ stat |= dispc_k2g_vp_read_irqenable(dispc, 0);
+ stat |= dispc_k2g_vid_read_irqenable(dispc, 0);
+
+ return stat;
+}
+
+static
+void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+ dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
+
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
+
+ dispc_k2g_vp_set_irqenable(dispc, 0, mask);
+ dispc_k2g_vid_set_irqenable(dispc, 0, mask);
+
+ dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+
+ /* flush posted write */
+ dispc_k2g_read_irqenable(dispc);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_read(dispc, DISPC_VP_IRQSTATUS(hw_videoport));
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_write(dispc, DISPC_VP_IRQSTATUS(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_read(dispc, DISPC_VP_IRQENABLE(hw_videoport));
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_write(dispc, DISPC_VP_IRQENABLE(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+}
+
+static
+void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
+{
+ unsigned int i;
+ u32 top_clear = 0;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i) {
+ if (clearmask & DSS_IRQ_VP_MASK(i)) {
+ dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
+ top_clear |= BIT(i);
+ }
+ }
+ for (i = 0; i < dispc->feat->num_planes; ++i) {
+ if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
+ dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
+ top_clear |= BIT(4 + i);
+ }
+ }
+ if (dispc->feat->subrev == DISPC_K2G)
+ return;
+
+ dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
+
+ /* Flush posted writes */
+ dispc_read(dispc, DISPC_IRQSTATUS);
+}
+
+static
+dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ dispc_irq_t status = 0;
+ unsigned int i;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i)
+ status |= dispc_k3_vp_read_irqstatus(dispc, i);
+
+ for (i = 0; i < dispc->feat->num_planes; ++i)
+ status |= dispc_k3_vid_read_irqstatus(dispc, i);
+
+ dispc_k3_clear_irqstatus(dispc, status);
+
+ return status;
+}
+
+static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
+{
+ dispc_irq_t enable = 0;
+ unsigned int i;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i)
+ enable |= dispc_k3_vp_read_irqenable(dispc, i);
+
+ for (i = 0; i < dispc->feat->num_planes; ++i)
+ enable |= dispc_k3_vid_read_irqenable(dispc, i);
+
+ return enable;
+}
+
+static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+ dispc_irq_t mask)
+{
+ unsigned int i;
+ u32 main_enable = 0, main_disable = 0;
+ dispc_irq_t old_mask;
+
+ old_mask = dispc_k3_read_irqenable(dispc);
+
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
+
+ for (i = 0; i < dispc->feat->num_vps; ++i) {
+ dispc_k3_vp_set_irqenable(dispc, i, mask);
+ if (mask & DSS_IRQ_VP_MASK(i))
+ main_enable |= BIT(i); /* VP IRQ */
+ else
+ main_disable |= BIT(i); /* VP IRQ */
+ }
+
+ for (i = 0; i < dispc->feat->num_planes; ++i) {
+ dispc_k3_vid_set_irqenable(dispc, i, mask);
+ if (mask & DSS_IRQ_PLANE_MASK(i))
+ main_enable |= BIT(i + 4); /* VID IRQ */
+ else
+ main_disable |= BIT(i + 4); /* VID IRQ */
+ }
+
+ if (main_enable)
+ dispc_write(dispc, DISPC_IRQENABLE_SET, main_enable);
+
+ if (main_disable)
+ dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+
+ /* Flush posted writes */
+ dispc_read(dispc, DISPC_IRQENABLE_SET);
+}
+
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ return dispc_k2g_read_and_clear_irqstatus(dispc);
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ return dispc_k3_read_and_clear_irqstatus(dispc);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_set_irqenable(dispc, mask);
+ break;
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ dispc_k3_set_irqenable(dispc, mask);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+
+struct dispc_bus_format {
+ u32 bus_fmt;
+ u32 data_width;
+ bool is_oldi_fmt;
+ enum dispc_oldi_mode_reg_val oldi_mode_reg_val;
+};
+
+static const struct dispc_bus_format dispc_bus_formats[] = {
+ { MEDIA_BUS_FMT_RGB444_1X12, 12, false, 0 },
+ { MEDIA_BUS_FMT_RGB565_1X16, 16, false, 0 },
+ { MEDIA_BUS_FMT_RGB666_1X18, 18, false, 0 },
+ { MEDIA_BUS_FMT_RGB888_1X24, 24, false, 0 },
+ { MEDIA_BUS_FMT_RGB101010_1X30, 30, false, 0 },
+ { MEDIA_BUS_FMT_RGB121212_1X36, 36, false, 0 },
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, true, SPWG_18 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, true, SPWG_24 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, true, JEIDA_24 },
+};
+
+static const
+struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc,
+ u32 hw_videoport,
+ u32 bus_fmt, u32 bus_flags)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_bus_formats); ++i) {
+ if (dispc_bus_formats[i].bus_fmt == bus_fmt)
+ return &dispc_bus_formats[i];
+ }
+
+ return NULL;
+}
+
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ const struct dispc_bus_format *fmt;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+ if (!fmt) {
+ dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n",
+ __func__, tstate->bus_format);
+ return -EINVAL;
+ }
+
+ if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI &&
+ fmt->is_oldi_fmt) {
+ dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+ __func__, dispc->feat->vp_name[hw_videoport]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
+{
+ u32 val = power ? 0 : OLDI_PWRDN_TX;
+
+ if (WARN_ON(!dispc->oldi_io_ctrl))
+ return;
+
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+}
+
+static void dispc_set_num_datalines(struct dispc_device *dispc,
+ u32 hw_videoport, int num_lines)
+{
+ int v;
+
+ switch (num_lines) {
+ case 12:
+ v = 0; break;
+ case 16:
+ v = 1; break;
+ case 18:
+ v = 2; break;
+ case 24:
+ v = 3; break;
+ case 30:
+ v = 4; break;
+ case 36:
+ v = 5; break;
+ default:
+ WARN_ON(1);
+ v = 3;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
+}
+
+static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_bus_format *fmt)
+{
+ u32 oldi_cfg = 0;
+ u32 oldi_reset_bit = BIT(5 + hw_videoport);
+ int count = 0;
+
+ /*
+ * For the moment DUALMODESYNC, MASTERSLAVE, MODE, and SRC
+ * bits of DISPC_VP_DSS_OLDI_CFG are set statically to 0.
+ */
+
+ if (fmt->data_width == 24)
+ oldi_cfg |= BIT(8); /* MSB */
+ else if (fmt->data_width != 18)
+ dev_warn(dispc->dev, "%s: %d port width not supported\n",
+ __func__, fmt->data_width);
+
+ oldi_cfg |= BIT(7); /* DEPOL */
+
+ oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
+
+ oldi_cfg |= BIT(12); /* SOFTRST */
+
+ oldi_cfg |= BIT(0); /* ENABLE */
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
+
+ while (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)) &&
+ count < 10000)
+ count++;
+
+ if (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)))
+ dev_warn(dispc->dev, "%s: timeout waiting OLDI reset done\n",
+ __func__);
+}
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ const struct dispc_bus_format *fmt;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+
+ if (WARN_ON(!fmt))
+ return;
+
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ dispc_oldi_tx_power(dispc, true);
+
+ dispc_enable_oldi(dispc, hw_videoport, fmt);
+ }
+}
+
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct drm_display_mode *mode = &state->adjusted_mode;
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ bool align, onoff, rf, ieo, ipc, ihs, ivs;
+ const struct dispc_bus_format *fmt;
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+
+ if (WARN_ON(!fmt))
+ return;
+
+ dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width);
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H,
+ FLD_VAL(hsw - 1, 7, 0) |
+ FLD_VAL(hfp - 1, 19, 8) |
+ FLD_VAL(hbp - 1, 31, 20));
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V,
+ FLD_VAL(vsw - 1, 7, 0) |
+ FLD_VAL(vfp, 19, 8) |
+ FLD_VAL(vbp, 31, 20));
+
+ ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+
+ ihs = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+ ieo = !!(tstate->bus_flags & DRM_BUS_FLAG_DE_LOW);
+
+ ipc = !!(tstate->bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE);
+
+ /* always use the 'rf' setting */
+ onoff = true;
+
+ rf = !!(tstate->bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE);
+
+ /* always use aligned syncs */
+ align = true;
+
+ /* always use DE_HIGH for OLDI */
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI)
+ ieo = false;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
+ FLD_VAL(align, 18, 18) |
+ FLD_VAL(onoff, 17, 17) |
+ FLD_VAL(rf, 16, 16) |
+ FLD_VAL(ieo, 15, 15) |
+ FLD_VAL(ipc, 14, 14) |
+ FLD_VAL(ihs, 13, 13) |
+ FLD_VAL(ivs, 12, 12));
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN,
+ FLD_VAL(mode->hdisplay - 1, 11, 0) |
+ FLD_VAL(mode->vdisplay - 1, 27, 16));
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0);
+}
+
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
+{
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
+{
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
+
+ dispc_oldi_tx_power(dispc, false);
+ }
+}
+
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport)
+{
+ return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5);
+}
+
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport)
+{
+ WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5));
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5);
+}
+
+enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN };
+
+static u16 c8_to_c12(u8 c8, enum c8_to_c12_mode mode)
+{
+ u16 c12;
+
+ c12 = c8 << 4;
+
+ switch (mode) {
+ case C8_TO_C12_REPLICATE:
+ /* Copy c8 4 MSB to 4 LSB for full scale c12 */
+ c12 |= c8 >> 4;
+ break;
+ case C8_TO_C12_MAX:
+ c12 |= 0xF;
+ break;
+ default:
+ case C8_TO_C12_MIN:
+ break;
+ }
+
+ return c12;
+}
+
+static u64 argb8888_to_argb12121212(u32 argb8888, enum c8_to_c12_mode m)
+{
+ u8 a, r, g, b;
+ u64 v;
+
+ a = (argb8888 >> 24) & 0xff;
+ r = (argb8888 >> 16) & 0xff;
+ g = (argb8888 >> 8) & 0xff;
+ b = (argb8888 >> 0) & 0xff;
+
+ v = ((u64)c8_to_c12(a, m) << 36) | ((u64)c8_to_c12(r, m) << 24) |
+ ((u64)c8_to_c12(g, m) << 12) | (u64)c8_to_c12(b, m);
+
+ return v;
+}
+
+static void dispc_vp_set_default_color(struct dispc_device *dispc,
+ u32 hw_videoport, u32 default_color)
+{
+ u64 v;
+
+ v = argb8888_to_argb12121212(default_color, C8_TO_C12_REPLICATE);
+
+ dispc_ovr_write(dispc, hw_videoport,
+ DISPC_OVR_DEFAULT_COLOR, v & 0xffffffff);
+ dispc_ovr_write(dispc, hw_videoport,
+ DISPC_OVR_DEFAULT_COLOR2, (v >> 32) & 0xffff);
+}
+
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_display_mode *mode)
+{
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
+ enum dispc_vp_bus_type bus_type;
+ int max_pclk;
+
+ bus_type = dispc->feat->vp_bus_type[hw_videoport];
+
+ max_pclk = dispc->feat->max_pclk_khz[bus_type];
+
+ if (WARN_ON(max_pclk == 0))
+ return MODE_BAD;
+
+ if (mode->clock < dispc->feat->min_pclk_khz)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > max_pclk)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 4096)
+ return MODE_BAD;
+
+ if (mode->vdisplay > 4096)
+ return MODE_BAD;
+
+ /* TODO: add interlace support */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /*
+ * Enforce the output width is divisible by 2. Actually this
+ * is only needed in following cases:
+ * - YUV output selected (BT656, BT1120)
+ * - Dithering enabled
+ * - TDM with TDMCycleFormat == 3
+ * But for simplicity we enforce that always.
+ */
+ if ((mode->hdisplay % 2) != 0)
+ return MODE_BAD_HVALUE;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ if (hsw < 1 || hsw > 256 ||
+ hfp < 1 || hfp > 4096 ||
+ hbp < 1 || hbp > 4096)
+ return MODE_BAD_HVALUE;
+
+ if (vsw < 1 || vsw > 256 ||
+ vfp > 4095 || vbp > 4095)
+ return MODE_BAD_VVALUE;
+
+ if (dispc->memory_bandwidth_limit) {
+ const unsigned int bpp = 4;
+ u64 bandwidth;
+
+ bandwidth = 1000 * mode->clock;
+ bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp;
+ bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal);
+
+ if (dispc->memory_bandwidth_limit < bandwidth)
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+ int ret = clk_prepare_enable(dispc->vp_clk[hw_videoport]);
+
+ if (ret)
+ dev_err(dispc->dev, "%s: enabling clk failed: %d\n", __func__,
+ ret);
+
+ return ret;
+}
+
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+ clk_disable_unprepare(dispc->vp_clk[hw_videoport]);
+}
+
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+static
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
+{
+ int r = rate / 100, rr = real_rate / 100;
+
+ return (unsigned int)(abs(((rr - r) * 100) / r));
+}
+
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long rate)
+{
+ int r;
+ unsigned long new_rate;
+
+ r = clk_set_rate(dispc->vp_clk[hw_videoport], rate);
+ if (r) {
+ dev_err(dispc->dev, "vp%d: failed to set clk rate to %lu\n",
+ hw_videoport, rate);
+ return r;
+ }
+
+ new_rate = clk_get_rate(dispc->vp_clk[hw_videoport]);
+
+ if (dispc_pclk_diff(rate, new_rate) > 5)
+ dev_warn(dispc->dev,
+ "vp%d: Clock rate %lu differs over 5%% from requested %lu\n",
+ hw_videoport, new_rate, rate);
+
+ dev_dbg(dispc->dev, "vp%d: new rate %lu Hz (requested %lu Hz)\n",
+ hw_videoport, clk_get_rate(dispc->vp_clk[hw_videoport]), rate);
+
+ return 0;
+}
+
+/* OVR */
+static void dispc_k2g_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ /* On k2g there is only one plane and no need for ovr */
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_POSITION,
+ x | (y << 16));
+}
+
+static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ hw_plane, 4, 1);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ x, 17, 6);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ y, 30, 19);
+}
+
+static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ hw_plane, 4, 1);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+ x, 13, 0);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+ y, 29, 16);
+}
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+ u32 hw_videoport, u32 x, u32 y, u32 layer)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ case DISPC_AM65X:
+ dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ case DISPC_J721E:
+ dispc_j721e_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+ u32 hw_videoport, u32 layer, bool enable)
+{
+ if (dispc->feat->subrev == DISPC_K2G)
+ return;
+
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ !!enable, 0, 0);
+}
+
+/* CSC */
+enum csc_ctm {
+ CSC_RR, CSC_RG, CSC_RB,
+ CSC_GR, CSC_GG, CSC_GB,
+ CSC_BR, CSC_BG, CSC_BB,
+};
+
+enum csc_yuv2rgb {
+ CSC_RY, CSC_RCB, CSC_RCR,
+ CSC_GY, CSC_GCB, CSC_GCR,
+ CSC_BY, CSC_BCB, CSC_BCR,
+};
+
+enum csc_rgb2yuv {
+ CSC_YR, CSC_YG, CSC_YB,
+ CSC_CBR, CSC_CBG, CSC_CBB,
+ CSC_CRR, CSC_CRG, CSC_CRB,
+};
+
+struct dispc_csc_coef {
+ void (*to_regval)(const struct dispc_csc_coef *csc, u32 *regval);
+ int m[9];
+ int preoffset[3];
+ int postoffset[3];
+ enum { CLIP_LIMITED_RANGE = 0, CLIP_FULL_RANGE = 1, } cliping;
+ const char *name;
+};
+
+#define DISPC_CSC_REGVAL_LEN 8
+
+static
+void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19))
+ regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]);
+ regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]);
+ regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]);
+#undef OVAL
+}
+
+#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16))
+static
+void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_RY], csc->m[CSC_RCR]);
+ regval[1] = CVAL(csc->m[CSC_RCB], csc->m[CSC_GY]);
+ regval[2] = CVAL(csc->m[CSC_GCR], csc->m[CSC_GCB]);
+ regval[3] = CVAL(csc->m[CSC_BY], csc->m[CSC_BCR]);
+ regval[4] = CVAL(csc->m[CSC_BCB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+__maybe_unused static
+void dispc_csc_rgb2yuv_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_YR], csc->m[CSC_YG]);
+ regval[1] = CVAL(csc->m[CSC_YB], csc->m[CSC_CRR]);
+ regval[2] = CVAL(csc->m[CSC_CRG], csc->m[CSC_CRB]);
+ regval[3] = CVAL(csc->m[CSC_CBR], csc->m[CSC_CBG]);
+ regval[4] = CVAL(csc->m[CSC_CBB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+static void dispc_csc_cpr_regval(const struct dispc_csc_coef *csc,
+ u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_RR], csc->m[CSC_RG]);
+ regval[1] = CVAL(csc->m[CSC_RB], csc->m[CSC_GR]);
+ regval[2] = CVAL(csc->m[CSC_GG], csc->m[CSC_GB]);
+ regval[3] = CVAL(csc->m[CSC_BR], csc->m[CSC_BG]);
+ regval[4] = CVAL(csc->m[CSC_BB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vid_csc_coef_reg[] = {
+ DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+ DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+ DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+ DISPC_VID_CSC_COEF(6), /* K2G has no post offset support */
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ if (regval[7] != 0)
+ dev_warn(dispc->dev, "%s: No post offset support for %s\n",
+ __func__, csc->name);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+ dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k3_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vid_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+ DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+ DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+ DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+ DISPC_VID_CSC_COEF(6), DISPC_VID_CSC_COEF7,
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+ dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+ regval[i]);
+}
+
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_full = {
+ dispc_csc_yuv2rgb_regval,
+ { 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
+ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+ 256, 452, 0, }, /* by, bcb, bcr |1.000 1.772 0.000|*/
+ { 0, -2048, -2048, }, /* full range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.601 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_lim = {
+ dispc_csc_yuv2rgb_regval,
+ { 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
+ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+ 298, 516, 0, }, /* by, bcb, bcr |1.164 2.017 0.000|*/
+ { -256, -2048, -2048, }, /* limited range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.601 Limited",
+};
+
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_full = {
+ dispc_csc_yuv2rgb_regval,
+ { 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
+ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+ 256, 475, 0, }, /* by, bcb, bcr |1.000 1.856 0.000|*/
+ { 0, -2048, -2048, }, /* full range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.709 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_lim = {
+ dispc_csc_yuv2rgb_regval,
+ { 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
+ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+ 298, 541, 0, }, /* by, bcb, bcr |1.164 2.112 0.000|*/
+ { -256, -2048, -2048, }, /* limited range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.709 Limited",
+};
+
+static const struct {
+ enum drm_color_encoding encoding;
+ enum drm_color_range range;
+ const struct dispc_csc_coef *csc;
+} dispc_csc_table[] = {
+ { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
+ &csc_yuv2rgb_bt601_full, },
+ { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
+ &csc_yuv2rgb_bt601_lim, },
+ { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_FULL_RANGE,
+ &csc_yuv2rgb_bt709_full, },
+ { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE,
+ &csc_yuv2rgb_bt709_lim, },
+};
+
+static const
+struct dispc_csc_coef *dispc_find_csc(enum drm_color_encoding encoding,
+ enum drm_color_range range)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_csc_table); i++) {
+ if (dispc_csc_table[i].encoding == encoding &&
+ dispc_csc_table[i].range == range) {
+ return dispc_csc_table[i].csc;
+ }
+ }
+ return NULL;
+}
+
+static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state)
+{
+ const struct dispc_csc_coef *coef;
+
+ coef = dispc_find_csc(state->color_encoding, state->color_range);
+ if (!coef) {
+ dev_err(dispc->dev, "%s: CSC (%u,%u) not found\n",
+ __func__, state->color_encoding, state->color_range);
+ return;
+ }
+
+ if (dispc->feat->subrev == DISPC_K2G)
+ dispc_k2g_vid_write_csc(dispc, hw_plane, coef);
+ else
+ dispc_k3_vid_write_csc(dispc, hw_plane, coef);
+}
+
+static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
+ bool enable)
+{
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9);
+}
+
+/* SCALER */
+
+static u32 dispc_calc_fir_inc(u32 in, u32 out)
+{
+ return (u32)div_u64(0x200000ull * in, out);
+}
+
+enum dispc_vid_fir_coef_set {
+ DISPC_VID_FIR_COEF_HORIZ,
+ DISPC_VID_FIR_COEF_HORIZ_UV,
+ DISPC_VID_FIR_COEF_VERT,
+ DISPC_VID_FIR_COEF_VERT_UV,
+};
+
+static void dispc_vid_write_fir_coefs(struct dispc_device *dispc,
+ u32 hw_plane,
+ enum dispc_vid_fir_coef_set coef_set,
+ const struct tidss_scale_coefs *coefs)
+{
+ static const u16 c0_regs[] = {
+ [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H0,
+ [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H0_C,
+ [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V0,
+ [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V0_C,
+ };
+
+ static const u16 c12_regs[] = {
+ [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H12,
+ [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H12_C,
+ [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V12,
+ [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V12_C,
+ };
+
+ const u16 c0_base = c0_regs[coef_set];
+ const u16 c12_base = c12_regs[coef_set];
+ int phase;
+
+ if (!coefs) {
+ dev_err(dispc->dev, "%s: No coefficients given.\n", __func__);
+ return;
+ }
+
+ for (phase = 0; phase <= 8; ++phase) {
+ u16 reg = c0_base + phase * 4;
+ u16 c0 = coefs->c0[phase];
+
+ dispc_vid_write(dispc, hw_plane, reg, c0);
+ }
+
+ for (phase = 0; phase <= 15; ++phase) {
+ u16 reg = c12_base + phase * 4;
+ s16 c1, c2;
+ u32 c12;
+
+ c1 = coefs->c1[phase];
+ c2 = coefs->c2[phase];
+ c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20);
+
+ dispc_vid_write(dispc, hw_plane, reg, c12);
+ }
+}
+
+static bool dispc_fourcc_is_yuv(u32 fourcc)
+{
+ switch (fourcc) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct dispc_scaling_params {
+ int xinc, yinc;
+ u32 in_w, in_h, in_w_uv, in_h_uv;
+ u32 fir_xinc, fir_yinc, fir_xinc_uv, fir_yinc_uv;
+ bool scale_x, scale_y;
+ const struct tidss_scale_coefs *xcoef, *ycoef, *xcoef_uv, *ycoef_uv;
+ bool five_taps;
+};
+
+static int dispc_vid_calc_scaling(struct dispc_device *dispc,
+ const struct drm_plane_state *state,
+ struct dispc_scaling_params *sp,
+ bool lite_plane)
+{
+ const struct dispc_features_scaling *f = &dispc->feat->scaling;
+ u32 fourcc = state->fb->format->format;
+ u32 in_width_max_5tap = f->in_width_max_5tap_rgb;
+ u32 in_width_max_3tap = f->in_width_max_3tap_rgb;
+ u32 downscale_limit;
+ u32 in_width_max;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->xinc = 1;
+ sp->yinc = 1;
+ sp->in_w = state->src_w >> 16;
+ sp->in_w_uv = sp->in_w;
+ sp->in_h = state->src_h >> 16;
+ sp->in_h_uv = sp->in_h;
+
+ sp->scale_x = sp->in_w != state->crtc_w;
+ sp->scale_y = sp->in_h != state->crtc_h;
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ in_width_max_5tap = f->in_width_max_5tap_yuv;
+ in_width_max_3tap = f->in_width_max_3tap_yuv;
+
+ sp->in_w_uv >>= 1;
+ sp->scale_x = true;
+
+ if (fourcc == DRM_FORMAT_NV12) {
+ sp->in_h_uv >>= 1;
+ sp->scale_y = true;
+ }
+ }
+
+ /* Skip the rest if no scaling is used */
+ if ((!sp->scale_x && !sp->scale_y) || lite_plane)
+ return 0;
+
+ if (sp->in_w > in_width_max_5tap) {
+ sp->five_taps = false;
+ in_width_max = in_width_max_3tap;
+ downscale_limit = f->downscale_limit_3tap;
+ } else {
+ sp->five_taps = true;
+ in_width_max = in_width_max_5tap;
+ downscale_limit = f->downscale_limit_5tap;
+ }
+
+ if (sp->scale_x) {
+ sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+
+ if (sp->fir_xinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+ dev_dbg(dispc->dev,
+ "%s: X-scaling factor %u/%u > %u\n",
+ __func__, state->crtc_w, state->src_w >> 16,
+ f->upscale_limit);
+ return -EINVAL;
+ }
+
+ if (sp->fir_xinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+ sp->xinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_w,
+ state->crtc_w),
+ downscale_limit);
+
+ if (sp->xinc > f->xinc_max) {
+ dev_dbg(dispc->dev,
+ "%s: X-scaling factor %u/%u < 1/%u\n",
+ __func__, state->crtc_w,
+ state->src_w >> 16,
+ downscale_limit * f->xinc_max);
+ return -EINVAL;
+ }
+
+ sp->in_w = (state->src_w >> 16) / sp->xinc;
+ }
+
+ while (sp->in_w > in_width_max) {
+ sp->xinc++;
+ sp->in_w = (state->src_w >> 16) / sp->xinc;
+ }
+
+ if (sp->xinc > f->xinc_max) {
+ dev_dbg(dispc->dev,
+ "%s: Too wide input buffer %u > %u\n", __func__,
+ state->src_w >> 16, in_width_max * f->xinc_max);
+ return -EINVAL;
+ }
+
+ /*
+ * We need even line length for YUV formats. Decimation
+ * can lead to odd length, so we need to make it even
+ * again.
+ */
+ if (dispc_fourcc_is_yuv(fourcc))
+ sp->in_w &= ~1;
+
+ sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+ }
+
+ if (sp->scale_y) {
+ sp->fir_yinc = dispc_calc_fir_inc(sp->in_h, state->crtc_h);
+
+ if (sp->fir_yinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+ dev_dbg(dispc->dev,
+ "%s: Y-scaling factor %u/%u > %u\n",
+ __func__, state->crtc_h, state->src_h >> 16,
+ f->upscale_limit);
+ return -EINVAL;
+ }
+
+ if (sp->fir_yinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+ sp->yinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_h,
+ state->crtc_h),
+ downscale_limit);
+
+ sp->in_h /= sp->yinc;
+ sp->fir_yinc = dispc_calc_fir_inc(sp->in_h,
+ state->crtc_h);
+ }
+ }
+
+ dev_dbg(dispc->dev,
+ "%s: %ux%u decim %ux%u -> %ux%u firinc %u.%03ux%u.%03u taps %u -> %ux%u\n",
+ __func__, state->src_w >> 16, state->src_h >> 16,
+ sp->xinc, sp->yinc, sp->in_w, sp->in_h,
+ sp->fir_xinc / 0x200000u,
+ ((sp->fir_xinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+ sp->fir_yinc / 0x200000u,
+ ((sp->fir_yinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+ sp->five_taps ? 5 : 3,
+ state->crtc_w, state->crtc_h);
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (sp->scale_x) {
+ sp->in_w_uv /= sp->xinc;
+ sp->fir_xinc_uv = dispc_calc_fir_inc(sp->in_w_uv,
+ state->crtc_w);
+ sp->xcoef_uv = tidss_get_scale_coefs(dispc->dev,
+ sp->fir_xinc_uv,
+ true);
+ }
+ if (sp->scale_y) {
+ sp->in_h_uv /= sp->yinc;
+ sp->fir_yinc_uv = dispc_calc_fir_inc(sp->in_h_uv,
+ state->crtc_h);
+ sp->ycoef_uv = tidss_get_scale_coefs(dispc->dev,
+ sp->fir_yinc_uv,
+ sp->five_taps);
+ }
+ }
+
+ if (sp->scale_x)
+ sp->xcoef = tidss_get_scale_coefs(dispc->dev, sp->fir_xinc,
+ true);
+
+ if (sp->scale_y)
+ sp->ycoef = tidss_get_scale_coefs(dispc->dev, sp->fir_yinc,
+ sp->five_taps);
+
+ return 0;
+}
+
+static void dispc_vid_set_scaling(struct dispc_device *dispc,
+ u32 hw_plane,
+ struct dispc_scaling_params *sp,
+ u32 fourcc)
+{
+ /* HORIZONTAL RESIZE ENABLE */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->scale_x, 7, 7);
+
+ /* VERTICAL RESIZE ENABLE */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->scale_y, 8, 8);
+
+ /* Skip the rest if no scaling is used */
+ if (!sp->scale_x && !sp->scale_y)
+ return;
+
+ /* VERTICAL 5-TAPS */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->five_taps, 21, 21);
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (sp->scale_x) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH2,
+ sp->fir_xinc_uv);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_HORIZ_UV,
+ sp->xcoef_uv);
+ }
+ if (sp->scale_y) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV2,
+ sp->fir_yinc_uv);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_VERT_UV,
+ sp->ycoef_uv);
+ }
+ }
+
+ if (sp->scale_x) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH, sp->fir_xinc);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_HORIZ,
+ sp->xcoef);
+ }
+
+ if (sp->scale_y) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV, sp->fir_yinc);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_VERT, sp->ycoef);
+ }
+}
+
+/* OTHER */
+
+static const struct {
+ u32 fourcc;
+ u8 dss_code;
+} dispc_color_formats[] = {
+ { DRM_FORMAT_ARGB4444, 0x0, },
+ { DRM_FORMAT_ABGR4444, 0x1, },
+ { DRM_FORMAT_RGBA4444, 0x2, },
+
+ { DRM_FORMAT_RGB565, 0x3, },
+ { DRM_FORMAT_BGR565, 0x4, },
+
+ { DRM_FORMAT_ARGB1555, 0x5, },
+ { DRM_FORMAT_ABGR1555, 0x6, },
+
+ { DRM_FORMAT_ARGB8888, 0x7, },
+ { DRM_FORMAT_ABGR8888, 0x8, },
+ { DRM_FORMAT_RGBA8888, 0x9, },
+ { DRM_FORMAT_BGRA8888, 0xa, },
+
+ { DRM_FORMAT_RGB888, 0xb, },
+ { DRM_FORMAT_BGR888, 0xc, },
+
+ { DRM_FORMAT_ARGB2101010, 0xe, },
+ { DRM_FORMAT_ABGR2101010, 0xf, },
+
+ { DRM_FORMAT_XRGB4444, 0x20, },
+ { DRM_FORMAT_XBGR4444, 0x21, },
+ { DRM_FORMAT_RGBX4444, 0x22, },
+
+ { DRM_FORMAT_ARGB1555, 0x25, },
+ { DRM_FORMAT_ABGR1555, 0x26, },
+
+ { DRM_FORMAT_XRGB8888, 0x27, },
+ { DRM_FORMAT_XBGR8888, 0x28, },
+ { DRM_FORMAT_RGBX8888, 0x29, },
+ { DRM_FORMAT_BGRX8888, 0x2a, },
+
+ { DRM_FORMAT_XRGB2101010, 0x2e, },
+ { DRM_FORMAT_XBGR2101010, 0x2f, },
+
+ { DRM_FORMAT_YUYV, 0x3e, },
+ { DRM_FORMAT_UYVY, 0x3f, },
+
+ { DRM_FORMAT_NV12, 0x3d, },
+};
+
+static void dispc_plane_set_pixel_format(struct dispc_device *dispc,
+ u32 hw_plane, u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (dispc_color_formats[i].fourcc == fourcc) {
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ dispc_color_formats[i].dss_code,
+ 6, 1);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len)
+{
+ WARN_ON(!dispc->fourccs);
+
+ *len = dispc->num_fourccs;
+
+ return dispc->fourccs;
+}
+
+static s32 pixinc(int pixels, u8 ps)
+{
+ if (pixels == 1)
+ return 1;
+ else if (pixels > 1)
+ return 1 + (pixels - 1) * ps;
+ else if (pixels < 0)
+ return 1 - (-pixels + 1) * ps;
+
+ WARN_ON(1);
+ return 0;
+}
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
+{
+ bool lite = dispc->feat->vid_lite[hw_plane];
+ u32 fourcc = state->fb->format->format;
+ bool need_scaling = state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h;
+ struct dispc_scaling_params scaling;
+ int ret;
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (!dispc_find_csc(state->color_encoding,
+ state->color_range)) {
+ dev_dbg(dispc->dev,
+ "%s: Unsupported CSC (%u,%u) for HW plane %u\n",
+ __func__, state->color_encoding,
+ state->color_range, hw_plane);
+ return -EINVAL;
+ }
+ }
+
+ if (need_scaling) {
+ if (lite) {
+ dev_dbg(dispc->dev,
+ "%s: Lite plane %u can't scale %ux%u!=%ux%u\n",
+ __func__, hw_plane,
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_w, state->crtc_h);
+ return -EINVAL;
+ }
+ ret = dispc_vid_calc_scaling(dispc, state, &scaling, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static
+dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 x = state->src_x >> 16;
+ u32 y = state->src_y >> 16;
+
+ gem = drm_fb_cma_get_gem_obj(state->fb, 0);
+
+ return gem->paddr + fb->offsets[0] + x * fb->format->cpp[0] +
+ y * fb->pitches[0];
+}
+
+static
+dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 x = state->src_x >> 16;
+ u32 y = state->src_y >> 16;
+
+ if (WARN_ON(state->fb->format->num_planes != 2))
+ return 0;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+
+ return gem->paddr + fb->offsets[1] +
+ (x * fb->format->cpp[1] / fb->format->hsub) +
+ (y * fb->pitches[1] / fb->format->vsub);
+}
+
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
+{
+ bool lite = dispc->feat->vid_lite[hw_plane];
+ u32 fourcc = state->fb->format->format;
+ u16 cpp = state->fb->format->cpp[0];
+ u32 fb_width = state->fb->pitches[0] / cpp;
+ dma_addr_t paddr = dispc_plane_state_paddr(state);
+ struct dispc_scaling_params scale;
+
+ dispc_vid_calc_scaling(dispc, state, &scale, lite);
+
+ dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, paddr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)paddr >> 32);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, paddr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)paddr >> 32);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
+ (scale.in_w - 1) | ((scale.in_h - 1) << 16));
+
+ /* For YUV422 format we use the macropixel size for pixel inc */
+ if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+ pixinc(scale.xinc, cpp * 2));
+ else
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+ pixinc(scale.xinc, cpp));
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC,
+ pixinc(1 + (scale.yinc * fb_width -
+ scale.xinc * scale.in_w),
+ cpp));
+
+ if (state->fb->format->num_planes == 2) {
+ u16 cpp_uv = state->fb->format->cpp[1];
+ u32 fb_width_uv = state->fb->pitches[1] / cpp_uv;
+ dma_addr_t p_uv_addr = dispc_plane_state_p_uv_addr(state);
+
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_0, p_uv_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_EXT_0, (u64)p_uv_addr >> 32);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_1, p_uv_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_EXT_1, (u64)p_uv_addr >> 32);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC_UV,
+ pixinc(1 + (scale.yinc * fb_width_uv -
+ scale.xinc * scale.in_w_uv),
+ cpp_uv));
+ }
+
+ if (!lite) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE,
+ (state->crtc_w - 1) |
+ ((state->crtc_h - 1) << 16));
+
+ dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc);
+ }
+
+ /* enable YUV->RGB color conversion */
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ dispc_vid_csc_setup(dispc, hw_plane, state);
+ dispc_vid_csc_enable(dispc, hw_plane, true);
+ } else {
+ dispc_vid_csc_enable(dispc, hw_plane, false);
+ }
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA,
+ 0xFF & (state->alpha >> 8));
+
+ if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+ 28, 28);
+ else
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+ 28, 28);
+
+ return 0;
+}
+
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
+{
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
+
+ return 0;
+}
+
+static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
+{
+ return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
+}
+
+static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc,
+ u32 hw_plane, u32 low, u32 high)
+{
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD,
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_vid_set_buf_threshold(struct dispc_device *dispc,
+ u32 hw_plane, u32 low, u32 high)
+{
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_k2g_plane_init(struct dispc_device *dispc)
+{
+ unsigned int hw_plane;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ /* MFLAG_CTRL = ENABLED */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ /* MFLAG_START = MFLAGNORMALSTARTMODE */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+ for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+ u32 thr_low, thr_high;
+ u32 mflag_low, mflag_high;
+ u32 preload;
+
+ thr_high = size - 1;
+ thr_low = size / 2;
+
+ mflag_high = size * 2 / 3;
+ mflag_low = size / 3;
+
+ preload = thr_low;
+
+ dev_dbg(dispc->dev,
+ "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+ dispc->feat->vid_name[hw_plane],
+ size,
+ thr_high, thr_low,
+ mflag_high, mflag_low,
+ preload);
+
+ dispc_vid_set_buf_threshold(dispc, hw_plane,
+ thr_low, thr_high);
+ dispc_vid_set_mflag_threshold(dispc, hw_plane,
+ mflag_low, mflag_high);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+ /*
+ * Prefetch up to fifo high-threshold value to minimize the
+ * possibility of underflows. Note that this means the PRELOAD
+ * register is ignored.
+ */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+ 19, 19);
+ }
+}
+
+static void dispc_k3_plane_init(struct dispc_device *dispc)
+{
+ unsigned int hw_plane;
+ u32 cba_lo_pri = 1;
+ u32 cba_hi_pri = 0;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
+
+ /* MFLAG_CTRL = ENABLED */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ /* MFLAG_START = MFLAGNORMALSTARTMODE */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+ for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+ u32 thr_low, thr_high;
+ u32 mflag_low, mflag_high;
+ u32 preload;
+
+ thr_high = size - 1;
+ thr_low = size / 2;
+
+ mflag_high = size * 2 / 3;
+ mflag_low = size / 3;
+
+ preload = thr_low;
+
+ dev_dbg(dispc->dev,
+ "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+ dispc->feat->vid_name[hw_plane],
+ size,
+ thr_high, thr_low,
+ mflag_high, mflag_low,
+ preload);
+
+ dispc_vid_set_buf_threshold(dispc, hw_plane,
+ thr_low, thr_high);
+ dispc_vid_set_mflag_threshold(dispc, hw_plane,
+ mflag_low, mflag_high);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+ /* Prefech up to PRELOAD value */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+ 19, 19);
+ }
+}
+
+static void dispc_plane_init(struct dispc_device *dispc)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_plane_init(dispc);
+ break;
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ dispc_k3_plane_init(dispc);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void dispc_vp_init(struct dispc_device *dispc)
+{
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ /* Enable the gamma Shadow bit-field for all VPs*/
+ for (i = 0; i < dispc->feat->num_vps; i++)
+ VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
+}
+
+static void dispc_initial_config(struct dispc_device *dispc)
+{
+ dispc_plane_init(dispc);
+ dispc_vp_init(dispc);
+
+ /* Note: Hardcoded DPI routing on J721E for now */
+ if (dispc->feat->subrev == DISPC_J721E) {
+ dispc_write(dispc, DISPC_CONNECTIONS,
+ FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */
+ FLD_VAL(8, 7, 4) /* VP3 to DPI1 */
+ );
+ }
+}
+
+static void dispc_k2g_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ v |= i << 24;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_GAMMA_TABLE,
+ v);
+ }
+}
+
+static void dispc_am65x_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ v |= i << 24;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+ }
+}
+
+static void dispc_j721e_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_10BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ if (i == 0)
+ v |= 1 << 31;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+ }
+}
+
+static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ case DISPC_AM65X:
+ dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ case DISPC_J721E:
+ dispc_j721e_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static const struct drm_color_lut dispc_vp_gamma_default_lut[] = {
+ { .red = 0, .green = 0, .blue = 0, },
+ { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
+};
+
+static void dispc_vp_set_gamma(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_color_lut *lut,
+ unsigned int length)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ u32 hwbits;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d, lut len %u, hw len %u\n",
+ __func__, hw_videoport, length, hwlen);
+
+ if (dispc->feat->vp_feat.color.gamma_type == TIDSS_GAMMA_10BIT)
+ hwbits = 10;
+ else
+ hwbits = 8;
+
+ if (!lut || length < 2) {
+ lut = dispc_vp_gamma_default_lut;
+ length = ARRAY_SIZE(dispc_vp_gamma_default_lut);
+ }
+
+ for (i = 0; i < length - 1; ++i) {
+ unsigned int first = i * (hwlen - 1) / (length - 1);
+ unsigned int last = (i + 1) * (hwlen - 1) / (length - 1);
+ unsigned int w = last - first;
+ u16 r, g, b;
+ unsigned int j;
+
+ if (w == 0)
+ continue;
+
+ for (j = 0; j <= w; j++) {
+ r = (lut[i].red * (w - j) + lut[i + 1].red * j) / w;
+ g = (lut[i].green * (w - j) + lut[i + 1].green * j) / w;
+ b = (lut[i].blue * (w - j) + lut[i + 1].blue * j) / w;
+
+ r >>= 16 - hwbits;
+ g >>= 16 - hwbits;
+ b >>= 16 - hwbits;
+
+ table[first + j] = (r << (hwbits * 2)) |
+ (g << hwbits) | b;
+ }
+ }
+
+ dispc_vp_write_gamma_table(dispc, hw_videoport);
+}
+
+static s16 dispc_S31_32_to_s2_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+ s16 ret;
+
+ if (cbits & sign_bit)
+ ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x200);
+ else
+ ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1FF);
+
+ return ret;
+}
+
+static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm,
+ struct dispc_csc_coef *cpr)
+{
+ memset(cpr, 0, sizeof(*cpr));
+
+ cpr->to_regval = dispc_csc_cpr_regval;
+ cpr->m[CSC_RR] = dispc_S31_32_to_s2_8(ctm->matrix[0]);
+ cpr->m[CSC_RG] = dispc_S31_32_to_s2_8(ctm->matrix[1]);
+ cpr->m[CSC_RB] = dispc_S31_32_to_s2_8(ctm->matrix[2]);
+ cpr->m[CSC_GR] = dispc_S31_32_to_s2_8(ctm->matrix[3]);
+ cpr->m[CSC_GG] = dispc_S31_32_to_s2_8(ctm->matrix[4]);
+ cpr->m[CSC_GB] = dispc_S31_32_to_s2_8(ctm->matrix[5]);
+ cpr->m[CSC_BR] = dispc_S31_32_to_s2_8(ctm->matrix[6]);
+ cpr->m[CSC_BG] = dispc_S31_32_to_s2_8(ctm->matrix[7]);
+ cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]);
+}
+
+#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \
+ FLD_VAL(xB, 31, 22))
+
+static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc,
+ u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_BB], csc->m[CSC_BG], csc->m[CSC_BR]);
+ regval[1] = CVAL(csc->m[CSC_GB], csc->m[CSC_GG], csc->m[CSC_GR]);
+ regval[2] = CVAL(csc->m[CSC_RB], csc->m[CSC_RG], csc->m[CSC_RR]);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vp_cpr_coef_reg[] = {
+ DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+ /* K2G CPR is packed to three registers. */
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ dispc_k2g_vp_csc_cpr_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vp_cpr_coef_reg); i++)
+ dispc_vp_write(dispc, hw_videoport, dispc_vp_cpr_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+ struct drm_color_ctm *ctm)
+{
+ u32 cprenable = 0;
+
+ if (ctm) {
+ struct dispc_csc_coef cpr;
+
+ dispc_k2g_cpr_from_ctm(ctm, &cpr);
+ dispc_k2g_vp_write_csc(dispc, hw_videoport, &cpr);
+ cprenable = 1;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+ cprenable, 15, 15);
+}
+
+static s16 dispc_S31_32_to_s3_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+ s16 ret;
+
+ if (cbits & sign_bit)
+ ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x400);
+ else
+ ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x3FF);
+
+ return ret;
+}
+
+static void dispc_csc_from_ctm(const struct drm_color_ctm *ctm,
+ struct dispc_csc_coef *cpr)
+{
+ memset(cpr, 0, sizeof(*cpr));
+
+ cpr->to_regval = dispc_csc_cpr_regval;
+ cpr->m[CSC_RR] = dispc_S31_32_to_s3_8(ctm->matrix[0]);
+ cpr->m[CSC_RG] = dispc_S31_32_to_s3_8(ctm->matrix[1]);
+ cpr->m[CSC_RB] = dispc_S31_32_to_s3_8(ctm->matrix[2]);
+ cpr->m[CSC_GR] = dispc_S31_32_to_s3_8(ctm->matrix[3]);
+ cpr->m[CSC_GG] = dispc_S31_32_to_s3_8(ctm->matrix[4]);
+ cpr->m[CSC_GB] = dispc_S31_32_to_s3_8(ctm->matrix[5]);
+ cpr->m[CSC_BR] = dispc_S31_32_to_s3_8(ctm->matrix[6]);
+ cpr->m[CSC_BG] = dispc_S31_32_to_s3_8(ctm->matrix[7]);
+ cpr->m[CSC_BB] = dispc_S31_32_to_s3_8(ctm->matrix[8]);
+}
+
+static void dispc_k3_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vp_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+ DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+ DISPC_VP_CSC_COEF3, DISPC_VP_CSC_COEF4, DISPC_VP_CSC_COEF5,
+ DISPC_VP_CSC_COEF6, DISPC_VP_CSC_COEF7,
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(regval); i++)
+ dispc_vp_write(dispc, hw_videoport, dispc_vp_csc_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+ struct drm_color_ctm *ctm)
+{
+ u32 colorconvenable = 0;
+
+ if (ctm) {
+ struct dispc_csc_coef csc;
+
+ dispc_csc_from_ctm(ctm, &csc);
+ dispc_k3_vp_write_csc(dispc, hw_videoport, &csc);
+ colorconvenable = 1;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+ colorconvenable, 24, 24);
+}
+
+static void dispc_vp_set_color_mgmt(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_crtc_state *state,
+ bool newmodeset)
+{
+ struct drm_color_lut *lut = NULL;
+ struct drm_color_ctm *ctm = NULL;
+ unsigned int length = 0;
+
+ if (!(state->color_mgmt_changed || newmodeset))
+ return;
+
+ if (state->gamma_lut) {
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ length = state->gamma_lut->length / sizeof(*lut);
+ }
+
+ dispc_vp_set_gamma(dispc, hw_videoport, lut, length);
+
+ if (state->ctm)
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (dispc->feat->subrev == DISPC_K2G)
+ dispc_k2g_vp_set_ctm(dispc, hw_videoport, ctm);
+ else
+ dispc_k3_vp_set_ctm(dispc, hw_videoport, ctm);
+}
+
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state, bool newmodeset)
+{
+ dispc_vp_set_default_color(dispc, hw_videoport, 0);
+ dispc_vp_set_color_mgmt(dispc, hw_videoport, state, newmodeset);
+}
+
+int dispc_runtime_suspend(struct dispc_device *dispc)
+{
+ dev_dbg(dispc->dev, "suspend\n");
+
+ dispc->is_enabled = false;
+
+ clk_disable_unprepare(dispc->fclk);
+
+ return 0;
+}
+
+int dispc_runtime_resume(struct dispc_device *dispc)
+{
+ dev_dbg(dispc->dev, "resume\n");
+
+ clk_prepare_enable(dispc->fclk);
+
+ if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+ dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
+
+ dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
+ dispc_read(dispc, DSS_REVISION));
+
+ dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
+ REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
+ REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+
+ if (dispc->feat->subrev == DISPC_AM65X)
+ dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
+ REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
+ REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+
+ dev_dbg(dispc->dev, "DISPC IDLE %d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+
+ dispc_initial_config(dispc);
+
+ dispc->is_enabled = true;
+
+ tidss_irq_resume(dispc->tidss);
+
+ return 0;
+}
+
+void dispc_remove(struct tidss_device *tidss)
+{
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ tidss->dispc = NULL;
+}
+
+static int dispc_iomap_resource(struct platform_device *pdev, const char *name,
+ void __iomem **base)
+{
+ struct resource *res;
+ void __iomem *b;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot get mem resource '%s'\n", name);
+ return -EINVAL;
+ }
+
+ b = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(b)) {
+ dev_err(&pdev->dev, "cannot ioremap resource '%s'\n", name);
+ return PTR_ERR(b);
+ }
+
+ *base = b;
+
+ return 0;
+}
+
+static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
+ struct dispc_device *dispc)
+{
+ dispc->oldi_io_ctrl =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "ti,am65x-oldi-io-ctrl");
+ if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) {
+ dispc->oldi_io_ctrl = NULL;
+ } else if (IS_ERR(dispc->oldi_io_ctrl)) {
+ dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n",
+ __func__, PTR_ERR(dispc->oldi_io_ctrl));
+ return PTR_ERR(dispc->oldi_io_ctrl);
+ }
+ return 0;
+}
+
+int dispc_init(struct tidss_device *tidss)
+{
+ struct device *dev = tidss->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dispc_device *dispc;
+ const struct dispc_features *feat;
+ unsigned int i, num_fourccs;
+ int r = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ feat = tidss->feat;
+
+ if (feat->subrev != DISPC_K2G) {
+ r = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (r)
+ dev_warn(dev, "cannot set DMA masks to 48-bit\n");
+ }
+
+ dispc = devm_kzalloc(dev, sizeof(*dispc), GFP_KERNEL);
+ if (!dispc)
+ return -ENOMEM;
+
+ dispc->fourccs = devm_kcalloc(dev, ARRAY_SIZE(dispc_color_formats),
+ sizeof(*dispc->fourccs), GFP_KERNEL);
+ if (!dispc->fourccs)
+ return -ENOMEM;
+
+ num_fourccs = 0;
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (feat->errata.i2000 &&
+ dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
+ continue;
+ dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
+ }
+ dispc->num_fourccs = num_fourccs;
+ dispc->tidss = tidss;
+ dispc->dev = dev;
+ dispc->feat = feat;
+
+ dispc_common_regmap = dispc->feat->common_regs;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->common,
+ &dispc->base_common);
+ if (r)
+ return r;
+
+ for (i = 0; i < dispc->feat->num_planes; i++) {
+ r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+ &dispc->base_vid[i]);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < dispc->feat->num_vps; i++) {
+ u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
+ u32 *gamma_table;
+ struct clk *clk;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->ovr_name[i],
+ &dispc->base_ovr[i]);
+ if (r)
+ return r;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->vp_name[i],
+ &dispc->base_vp[i]);
+ if (r)
+ return r;
+
+ clk = devm_clk_get(dev, dispc->feat->vpclk_name[i]);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "%s: Failed to get clk %s:%ld\n", __func__,
+ dispc->feat->vpclk_name[i], PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ dispc->vp_clk[i] = clk;
+
+ gamma_table = devm_kmalloc_array(dev, gamma_size,
+ sizeof(*gamma_table),
+ GFP_KERNEL);
+ if (!gamma_table)
+ return -ENOMEM;
+ dispc->vp_data[i].gamma_table = gamma_table;
+ }
+
+ if (feat->subrev == DISPC_AM65X) {
+ r = dispc_init_am65x_oldi_io_ctrl(dev, dispc);
+ if (r)
+ return r;
+ }
+
+ dispc->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(dispc->fclk)) {
+ dev_err(dev, "%s: Failed to get fclk: %ld\n",
+ __func__, PTR_ERR(dispc->fclk));
+ return PTR_ERR(dispc->fclk);
+ }
+ dev_dbg(dev, "DSS fclk %lu Hz\n", clk_get_rate(dispc->fclk));
+
+ of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
+ &dispc->memory_bandwidth_limit);
+
+ tidss->dispc = dispc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
new file mode 100644
index 000000000000..a4a68249e44b
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_H__
+#define __TIDSS_DISPC_H__
+
+#include "tidss_drv.h"
+
+struct dispc_device;
+
+struct drm_crtc_state;
+
+enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT };
+
+struct tidss_vp_feat {
+ struct tidss_vp_color_feat {
+ u32 gamma_size;
+ enum tidss_gamma_type gamma_type;
+ bool has_ctm;
+ } color;
+};
+
+struct tidss_plane_feat {
+ struct tidss_plane_color_feat {
+ u32 encodings;
+ u32 ranges;
+ enum drm_color_encoding default_encoding;
+ enum drm_color_range default_range;
+ } color;
+ struct tidss_plane_blend_feat {
+ bool global_alpha;
+ } blend;
+};
+
+struct dispc_features_scaling {
+ u32 in_width_max_5tap_rgb;
+ u32 in_width_max_3tap_rgb;
+ u32 in_width_max_5tap_yuv;
+ u32 in_width_max_3tap_yuv;
+ u32 upscale_limit;
+ u32 downscale_limit_5tap;
+ u32 downscale_limit_3tap;
+ u32 xinc_max;
+};
+
+struct dispc_errata {
+ bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
+};
+
+enum dispc_vp_bus_type {
+ DISPC_VP_DPI, /* DPI output */
+ DISPC_VP_OLDI, /* OLDI (LVDS) output */
+ DISPC_VP_INTERNAL, /* SoC internal routing */
+ DISPC_VP_MAX_BUS_TYPE,
+};
+
+enum dispc_dss_subrevision {
+ DISPC_K2G,
+ DISPC_AM65X,
+ DISPC_J721E,
+};
+
+struct dispc_features {
+ int min_pclk_khz;
+ int max_pclk_khz[DISPC_VP_MAX_BUS_TYPE];
+
+ struct dispc_features_scaling scaling;
+
+ enum dispc_dss_subrevision subrev;
+
+ const char *common;
+ const u16 *common_regs;
+ u32 num_vps;
+ const char *vp_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+ const char *ovr_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+ const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
+ const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
+ struct tidss_vp_feat vp_feat;
+ u32 num_planes;
+ const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
+ bool vid_lite[TIDSS_MAX_PLANES];
+ u32 vid_order[TIDSS_MAX_PLANES];
+
+ struct dispc_errata errata;
+};
+
+extern const struct dispc_features dispc_k2g_feats;
+extern const struct dispc_features dispc_am65x_feats;
+extern const struct dispc_features dispc_j721e_feats;
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+ u32 hw_videoport, u32 x, u32 y, u32 layer);
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+ u32 hw_videoport, u32 layer, bool enable);
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport);
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_display_mode *mode);
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long rate);
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state, bool newmodeset);
+
+int dispc_runtime_suspend(struct dispc_device *dispc);
+int dispc_runtime_resume(struct dispc_device *dispc);
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len);
+
+int dispc_init(struct tidss_device *tidss);
+void dispc_remove(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
new file mode 100644
index 000000000000..88a83a41b6e3
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_REGS_H
+#define __TIDSS_DISPC_REGS_H
+
+enum dispc_common_regs {
+ NOT_APPLICABLE_OFF = 0,
+ DSS_REVISION_OFF,
+ DSS_SYSCONFIG_OFF,
+ DSS_SYSSTATUS_OFF,
+ DISPC_IRQ_EOI_OFF,
+ DISPC_IRQSTATUS_RAW_OFF,
+ DISPC_IRQSTATUS_OFF,
+ DISPC_IRQENABLE_SET_OFF,
+ DISPC_IRQENABLE_CLR_OFF,
+ DISPC_VID_IRQENABLE_OFF,
+ DISPC_VID_IRQSTATUS_OFF,
+ DISPC_VP_IRQENABLE_OFF,
+ DISPC_VP_IRQSTATUS_OFF,
+ WB_IRQENABLE_OFF,
+ WB_IRQSTATUS_OFF,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF,
+ DISPC_GLOBAL_OUTPUT_ENABLE_OFF,
+ DISPC_GLOBAL_BUFFER_OFF,
+ DSS_CBA_CFG_OFF,
+ DISPC_DBG_CONTROL_OFF,
+ DISPC_DBG_STATUS_OFF,
+ DISPC_CLKGATING_DISABLE_OFF,
+ DISPC_SECURE_DISABLE_OFF,
+ FBDC_REVISION_1_OFF,
+ FBDC_REVISION_2_OFF,
+ FBDC_REVISION_3_OFF,
+ FBDC_REVISION_4_OFF,
+ FBDC_REVISION_5_OFF,
+ FBDC_REVISION_6_OFF,
+ FBDC_COMMON_CONTROL_OFF,
+ FBDC_CONSTANT_COLOR_0_OFF,
+ FBDC_CONSTANT_COLOR_1_OFF,
+ DISPC_CONNECTIONS_OFF,
+ DISPC_MSS_VP1_OFF,
+ DISPC_MSS_VP3_OFF,
+ DISPC_COMMON_REG_TABLE_LEN,
+};
+
+/*
+ * dispc_common_regmap should be defined as const u16 * and pointing
+ * to a valid dss common register map for the platform, before the
+ * macros bellow can be used.
+ */
+
+#define REG(r) (dispc_common_regmap[r ## _OFF])
+
+#define DSS_REVISION REG(DSS_REVISION)
+#define DSS_SYSCONFIG REG(DSS_SYSCONFIG)
+#define DSS_SYSSTATUS REG(DSS_SYSSTATUS)
+#define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI)
+#define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW)
+#define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS)
+#define DISPC_IRQENABLE_SET REG(DISPC_IRQENABLE_SET)
+#define DISPC_IRQENABLE_CLR REG(DISPC_IRQENABLE_CLR)
+#define DISPC_VID_IRQENABLE(n) (REG(DISPC_VID_IRQENABLE) + (n) * 4)
+#define DISPC_VID_IRQSTATUS(n) (REG(DISPC_VID_IRQSTATUS) + (n) * 4)
+#define DISPC_VP_IRQENABLE(n) (REG(DISPC_VP_IRQENABLE) + (n) * 4)
+#define DISPC_VP_IRQSTATUS(n) (REG(DISPC_VP_IRQSTATUS) + (n) * 4)
+#define WB_IRQENABLE REG(WB_IRQENABLE)
+#define WB_IRQSTATUS REG(WB_IRQSTATUS)
+
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE)
+#define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE)
+#define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER)
+#define DSS_CBA_CFG REG(DSS_CBA_CFG)
+#define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL)
+#define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS)
+#define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE)
+#define DISPC_SECURE_DISABLE REG(DISPC_SECURE_DISABLE)
+
+#define FBDC_REVISION_1 REG(FBDC_REVISION_1)
+#define FBDC_REVISION_2 REG(FBDC_REVISION_2)
+#define FBDC_REVISION_3 REG(FBDC_REVISION_3)
+#define FBDC_REVISION_4 REG(FBDC_REVISION_4)
+#define FBDC_REVISION_5 REG(FBDC_REVISION_5)
+#define FBDC_REVISION_6 REG(FBDC_REVISION_6)
+#define FBDC_COMMON_CONTROL REG(FBDC_COMMON_CONTROL)
+#define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0)
+#define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1)
+#define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS)
+#define DISPC_MSS_VP1 REG(DISPC_MSS_VP1)
+#define DISPC_MSS_VP3 REG(DISPC_MSS_VP3)
+
+/* VID */
+
+#define DISPC_VID_ACCUH_0 0x0
+#define DISPC_VID_ACCUH_1 0x4
+#define DISPC_VID_ACCUH2_0 0x8
+#define DISPC_VID_ACCUH2_1 0xc
+#define DISPC_VID_ACCUV_0 0x10
+#define DISPC_VID_ACCUV_1 0x14
+#define DISPC_VID_ACCUV2_0 0x18
+#define DISPC_VID_ACCUV2_1 0x1c
+#define DISPC_VID_ATTRIBUTES 0x20
+#define DISPC_VID_ATTRIBUTES2 0x24
+#define DISPC_VID_BA_0 0x28
+#define DISPC_VID_BA_1 0x2c
+#define DISPC_VID_BA_UV_0 0x30
+#define DISPC_VID_BA_UV_1 0x34
+#define DISPC_VID_BUF_SIZE_STATUS 0x38
+#define DISPC_VID_BUF_THRESHOLD 0x3c
+#define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4)
+
+#define DISPC_VID_FIRH 0x5c
+#define DISPC_VID_FIRH2 0x60
+#define DISPC_VID_FIRV 0x64
+#define DISPC_VID_FIRV2 0x68
+
+#define DISPC_VID_FIR_COEFS_H0 0x6c
+#define DISPC_VID_FIR_COEF_H0(phase) (0x6c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H0_C 0x90
+#define DISPC_VID_FIR_COEF_H0_C(phase) (0x90 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_H12 0xb4
+#define DISPC_VID_FIR_COEF_H12(phase) (0xb4 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H12_C 0xf4
+#define DISPC_VID_FIR_COEF_H12_C(phase) (0xf4 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V0 0x134
+#define DISPC_VID_FIR_COEF_V0(phase) (0x134 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V0_C 0x158
+#define DISPC_VID_FIR_COEF_V0_C(phase) (0x158 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V12 0x17c
+#define DISPC_VID_FIR_COEF_V12(phase) (0x17c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V12_C 0x1bc
+#define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4)
+
+#define DISPC_VID_GLOBAL_ALPHA 0x1fc
+#define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */
+#define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */
+#define DISPC_VID_MFLAG_THRESHOLD 0x208
+#define DISPC_VID_PICTURE_SIZE 0x20c
+#define DISPC_VID_PIXEL_INC 0x210
+#define DISPC_VID_K2G_POSITION 0x214 /* K2G */
+#define DISPC_VID_PRELOAD 0x218
+#define DISPC_VID_ROW_INC 0x21c
+#define DISPC_VID_SIZE 0x220
+#define DISPC_VID_BA_EXT_0 0x22c
+#define DISPC_VID_BA_EXT_1 0x230
+#define DISPC_VID_BA_UV_EXT_0 0x234
+#define DISPC_VID_BA_UV_EXT_1 0x238
+#define DISPC_VID_CSC_COEF7 0x23c
+#define DISPC_VID_ROW_INC_UV 0x248
+#define DISPC_VID_CLUT 0x260
+#define DISPC_VID_SAFETY_ATTRIBUTES 0x2a0
+#define DISPC_VID_SAFETY_CAPT_SIGNATURE 0x2a4
+#define DISPC_VID_SAFETY_POSITION 0x2a8
+#define DISPC_VID_SAFETY_REF_SIGNATURE 0x2ac
+#define DISPC_VID_SAFETY_SIZE 0x2b0
+#define DISPC_VID_SAFETY_LFSR_SEED 0x2b4
+#define DISPC_VID_LUMAKEY 0x2b8
+#define DISPC_VID_DMA_BUFSIZE 0x2bc /* J721E */
+
+/* OVR */
+
+#define DISPC_OVR_CONFIG 0x0
+#define DISPC_OVR_VIRTVP 0x4 /* J721E */
+#define DISPC_OVR_DEFAULT_COLOR 0x8
+#define DISPC_OVR_DEFAULT_COLOR2 0xc
+#define DISPC_OVR_TRANS_COLOR_MAX 0x10
+#define DISPC_OVR_TRANS_COLOR_MAX2 0x14
+#define DISPC_OVR_TRANS_COLOR_MIN 0x18
+#define DISPC_OVR_TRANS_COLOR_MIN2 0x1c
+#define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4)
+#define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */
+/* VP */
+
+#define DISPC_VP_CONFIG 0x0
+#define DISPC_VP_CONTROL 0x4
+#define DISPC_VP_CSC_COEF0 0x8
+#define DISPC_VP_CSC_COEF1 0xc
+#define DISPC_VP_CSC_COEF2 0x10
+#define DISPC_VP_DATA_CYCLE_0 0x14
+#define DISPC_VP_DATA_CYCLE_1 0x18
+#define DISPC_VP_K2G_GAMMA_TABLE 0x20 /* K2G */
+#define DISPC_VP_K2G_IRQENABLE 0x3c /* K2G */
+#define DISPC_VP_K2G_IRQSTATUS 0x40 /* K2G */
+#define DISPC_VP_DATA_CYCLE_2 0x1c
+#define DISPC_VP_LINE_NUMBER 0x44
+#define DISPC_VP_POL_FREQ 0x4c
+#define DISPC_VP_SIZE_SCREEN 0x50
+#define DISPC_VP_TIMING_H 0x54
+#define DISPC_VP_TIMING_V 0x58
+#define DISPC_VP_CSC_COEF3 0x5c
+#define DISPC_VP_CSC_COEF4 0x60
+#define DISPC_VP_CSC_COEF5 0x64
+#define DISPC_VP_CSC_COEF6 0x68
+#define DISPC_VP_CSC_COEF7 0x6c
+#define DISPC_VP_SAFETY_ATTRIBUTES_0 0x70
+#define DISPC_VP_SAFETY_ATTRIBUTES_1 0x74
+#define DISPC_VP_SAFETY_ATTRIBUTES_2 0x78
+#define DISPC_VP_SAFETY_ATTRIBUTES_3 0x7c
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_0 0x90
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_1 0x94
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_2 0x98
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_3 0x9c
+#define DISPC_VP_SAFETY_POSITION_0 0xb0
+#define DISPC_VP_SAFETY_POSITION_1 0xb4
+#define DISPC_VP_SAFETY_POSITION_2 0xb8
+#define DISPC_VP_SAFETY_POSITION_3 0xbc
+#define DISPC_VP_SAFETY_REF_SIGNATURE_0 0xd0
+#define DISPC_VP_SAFETY_REF_SIGNATURE_1 0xd4
+#define DISPC_VP_SAFETY_REF_SIGNATURE_2 0xd8
+#define DISPC_VP_SAFETY_REF_SIGNATURE_3 0xdc
+#define DISPC_VP_SAFETY_SIZE_0 0xf0
+#define DISPC_VP_SAFETY_SIZE_1 0xf4
+#define DISPC_VP_SAFETY_SIZE_2 0xf8
+#define DISPC_VP_SAFETY_SIZE_3 0xfc
+#define DISPC_VP_SAFETY_LFSR_SEED 0x110
+#define DISPC_VP_GAMMA_TABLE 0x120
+#define DISPC_VP_DSS_OLDI_CFG 0x160
+#define DISPC_VP_DSS_OLDI_STATUS 0x164
+#define DISPC_VP_DSS_OLDI_LB 0x168
+#define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE 0x170 /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE_STATUS 0x174 /* J721E */
+
+/*
+ * OLDI IO_CTRL register offsets. On AM654 the registers are found
+ * from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from
+ * CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL
+ * register range.
+ */
+#define OLDI_DAT0_IO_CTRL 0x00
+#define OLDI_DAT1_IO_CTRL 0x04
+#define OLDI_DAT2_IO_CTRL 0x08
+#define OLDI_DAT3_IO_CTRL 0x0C
+#define OLDI_CLK_IO_CTRL 0x10
+
+#define OLDI_PWRDN_TX BIT(8)
+
+#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
new file mode 100644
index 000000000000..d95e4be2c7b9
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/console.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
+
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_kms.h"
+#include "tidss_irq.h"
+
+/* Power management */
+
+int tidss_runtime_get(struct tidss_device *tidss)
+{
+ int r;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ r = pm_runtime_get_sync(tidss->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+void tidss_runtime_put(struct tidss_device *tidss)
+{
+ int r;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ r = pm_runtime_put_sync(tidss->dev);
+ WARN_ON(r < 0);
+}
+
+static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return dispc_runtime_suspend(tidss->dispc);
+}
+
+static int __maybe_unused tidss_pm_runtime_resume(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+ int r;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ r = dispc_runtime_resume(tidss->dispc);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int __maybe_unused tidss_suspend(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return drm_mode_config_helper_suspend(&tidss->ddev);
+}
+
+static int __maybe_unused tidss_resume(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return drm_mode_config_helper_resume(&tidss->ddev);
+}
+
+#ifdef CONFIG_PM
+
+static const struct dev_pm_ops tidss_pm_ops = {
+ .runtime_suspend = tidss_pm_runtime_suspend,
+ .runtime_resume = tidss_pm_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume)
+};
+
+#endif /* CONFIG_PM */
+
+/* DRM device Information */
+
+static void tidss_release(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ drm_kms_helper_poll_fini(ddev);
+
+ tidss_modeset_cleanup(tidss);
+
+ drm_dev_fini(ddev);
+
+ kfree(tidss);
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
+
+static struct drm_driver tidss_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &tidss_fops,
+ .release = tidss_release,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .name = "tidss",
+ .desc = "TI Keystone DSS",
+ .date = "20180215",
+ .major = 1,
+ .minor = 0,
+
+ .irq_preinstall = tidss_irq_preinstall,
+ .irq_postinstall = tidss_irq_postinstall,
+ .irq_handler = tidss_irq_handler,
+ .irq_uninstall = tidss_irq_uninstall,
+};
+
+static int tidss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tidss_device *tidss;
+ struct drm_device *ddev;
+ int ret;
+ int irq;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* Can't use devm_* since drm_device's lifetime may exceed dev's */
+ tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
+ if (!tidss)
+ return -ENOMEM;
+
+ ddev = &tidss->ddev;
+
+ ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
+ if (ret) {
+ kfree(ddev);
+ return ret;
+ }
+
+ tidss->dev = dev;
+ tidss->feat = of_device_get_match_data(dev);
+
+ platform_set_drvdata(pdev, tidss);
+
+ ddev->dev_private = tidss;
+
+ ret = dispc_init(tidss);
+ if (ret) {
+ dev_err(dev, "failed to initialize dispc: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+#ifndef CONFIG_PM
+ /* If we don't have PM, we need to call resume manually */
+ dispc_runtime_resume(tidss->dispc);
+#endif
+
+ ret = tidss_modeset_init(tidss);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to init DRM/KMS (%d)\n", ret);
+ goto err_runtime_suspend;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_runtime_suspend;
+ }
+
+ ret = drm_irq_install(ddev, irq);
+ if (ret) {
+ dev_err(dev, "drm_irq_install failed: %d\n", ret);
+ goto err_runtime_suspend;
+ }
+
+ drm_kms_helper_poll_init(ddev);
+
+ drm_mode_config_reset(ddev);
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret) {
+ dev_err(dev, "failed to register DRM device\n");
+ goto err_irq_uninstall;
+ }
+
+ drm_fbdev_generic_setup(ddev, 32);
+
+ dev_dbg(dev, "%s done\n", __func__);
+
+ return 0;
+
+err_irq_uninstall:
+ drm_irq_uninstall(ddev);
+
+err_runtime_suspend:
+#ifndef CONFIG_PM
+ dispc_runtime_suspend(tidss->dispc);
+#endif
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int tidss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tidss_device *tidss = platform_get_drvdata(pdev);
+ struct drm_device *ddev = &tidss->ddev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ drm_dev_unregister(ddev);
+
+ drm_atomic_helper_shutdown(ddev);
+
+ drm_irq_uninstall(ddev);
+
+#ifndef CONFIG_PM
+ /* If we don't have PM, we need to call suspend manually */
+ dispc_runtime_suspend(tidss->dispc);
+#endif
+ pm_runtime_disable(dev);
+
+ /* devm allocated dispc goes away with the dev so mark it NULL */
+ dispc_remove(tidss);
+
+ dev_dbg(dev, "%s done\n", __func__);
+
+ return 0;
+}
+
+static void tidss_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
+static const struct of_device_id tidss_of_table[] = {
+ { .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
+ { .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
+ { .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, tidss_of_table);
+
+static struct platform_driver tidss_platform_driver = {
+ .probe = tidss_probe,
+ .remove = tidss_remove,
+ .shutdown = tidss_shutdown,
+ .driver = {
+ .name = "tidss",
+#ifdef CONFIG_PM
+ .pm = &tidss_pm_ops,
+#endif
+ .of_match_table = tidss_of_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(tidss_platform_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("TI Keystone DSS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
new file mode 100644
index 000000000000..e2aa6436ad18
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_DRV_H__
+#define __TIDSS_DRV_H__
+
+#include <linux/spinlock.h>
+
+#define TIDSS_MAX_PORTS 4
+#define TIDSS_MAX_PLANES 4
+
+typedef u32 dispc_irq_t;
+
+struct tidss_device {
+ struct drm_device ddev; /* DRM device for DSS */
+ struct device *dev; /* Underlying DSS device */
+
+ const struct dispc_features *feat;
+ struct dispc_device *dispc;
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[TIDSS_MAX_PORTS];
+
+ unsigned int num_planes;
+ struct drm_plane *planes[TIDSS_MAX_PLANES];
+
+ spinlock_t wait_lock; /* protects the irq masks */
+ dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
+
+ struct drm_atomic_state *saved_state;
+};
+
+int tidss_runtime_get(struct tidss_device *tidss);
+void tidss_runtime_put(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
new file mode 100644
index 000000000000..83785b0a66a9
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/export.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include "tidss_crtc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+
+static int tidss_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct tidss_crtc_state *tcrtc_state = to_tidss_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_bridge *bridge;
+ bool bus_flags_set = false;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ /*
+ * Take the bus_flags from the first bridge that defines
+ * bridge timings, or from the connector's display_info if no
+ * bridge defines the timings.
+ */
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->timings)
+ continue;
+
+ tcrtc_state->bus_flags = bridge->timings->input_bus_flags;
+ bus_flags_set = true;
+ break;
+ }
+
+ if (!di->bus_formats || di->num_bus_formats == 0) {
+ dev_err(ddev->dev, "%s: No bus_formats in connected display\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ // XXX any cleaner way to set bus format and flags?
+ tcrtc_state->bus_format = di->bus_formats[0];
+ if (!bus_flags_set)
+ tcrtc_state->bus_flags = di->bus_flags;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .atomic_check = tidss_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+ u32 encoder_type, u32 possible_crtcs)
+{
+ struct drm_encoder *enc;
+ int ret;
+
+ enc = devm_kzalloc(tidss->dev, sizeof(*enc), GFP_KERNEL);
+ if (!enc)
+ return ERR_PTR(-ENOMEM);
+
+ enc->possible_crtcs = possible_crtcs;
+
+ ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
+ encoder_type, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_encoder_helper_add(enc, &encoder_helper_funcs);
+
+ dev_dbg(tidss->dev, "Encoder create done\n");
+
+ return enc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.h b/drivers/gpu/drm/tidss/tidss_encoder.h
new file mode 100644
index 000000000000..06854d66e7e6
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_ENCODER_H__
+#define __TIDSS_ENCODER_H__
+
+#include <drm/drm_encoder.h>
+
+struct tidss_device;
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+ u32 encoder_type, u32 possible_crtcs);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
new file mode 100644
index 000000000000..612c046738e5
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_print.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* call with wait_lock and dispc runtime held */
+static void tidss_irq_update(struct tidss_device *tidss)
+{
+ assert_spin_locked(&tidss->wait_lock);
+
+ dispc_set_irqenable(tidss->dispc, tidss->irq_mask);
+}
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport));
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+irqreturn_t tidss_irq_handler(int irq, void *arg)
+{
+ struct drm_device *ddev = (struct drm_device *)arg;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned int id;
+ dispc_irq_t irqstatus;
+
+ if (WARN_ON(!ddev->irq_enabled))
+ return IRQ_NONE;
+
+ irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+
+ for (id = 0; id < tidss->num_crtcs; id++) {
+ struct drm_crtc *crtc = tidss->crtcs[id];
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+
+ if (irqstatus & (DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport)))
+ tidss_crtc_vblank_irq(crtc);
+
+ if (irqstatus & (DSS_IRQ_VP_FRAME_DONE(hw_videoport)))
+ tidss_crtc_framedone_irq(crtc);
+
+ if (irqstatus & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+ tidss_crtc_error_irq(crtc, irqstatus);
+ }
+
+ if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR)
+ dev_err_ratelimited(tidss->dev, "OCP error\n");
+
+ return IRQ_HANDLED;
+}
+
+void tidss_irq_resume(struct tidss_device *tidss)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_preinstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ spin_lock_init(&tidss->wait_lock);
+
+ tidss_runtime_get(tidss);
+
+ dispc_set_irqenable(tidss->dispc, 0);
+ dispc_read_and_clear_irqstatus(tidss->dispc);
+
+ tidss_runtime_put(tidss);
+}
+
+int tidss_irq_postinstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+ unsigned int i;
+
+ tidss_runtime_get(tidss);
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+
+ tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
+
+ for (i = 0; i < tidss->num_crtcs; ++i) {
+ struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
+
+ tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
+
+ tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
+ }
+
+ tidss_irq_update(tidss);
+
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+
+ tidss_runtime_put(tidss);
+
+ return 0;
+}
+
+void tidss_irq_uninstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ tidss_runtime_get(tidss);
+ dispc_set_irqenable(tidss->dispc, 0);
+ tidss_runtime_put(tidss);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_irq.h b/drivers/gpu/drm/tidss/tidss_irq.h
new file mode 100644
index 000000000000..aa92db403cca
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_IRQ_H__
+#define __TIDSS_IRQ_H__
+
+#include <linux/types.h>
+
+#include "tidss_drv.h"
+
+/*
+ * The IRQ status from various DISPC IRQ registers are packed into a single
+ * value, where the bits are defined as follows:
+ *
+ * bit group |dev|wb |mrg0|mrg1|mrg2|mrg3|plane0-3| <unused> |
+ * bit use |D |fou|FEOL|FEOL|FEOL|FEOL| UUUU | |
+ * bit number|0 |1-3|4-7 |8-11| 12-19 | 20-23 | 24-31 |
+ *
+ * device bits: D = OCP error
+ * WB bits: f = frame done wb, o = wb buffer overflow,
+ * u = wb buffer uncomplete
+ * vp bits: F = frame done, E = vsync even, O = vsync odd, L = sync lost
+ * plane bits: U = fifo underflow
+ */
+
+#define DSS_IRQ_DEVICE_OCP_ERR BIT(0)
+
+#define DSS_IRQ_DEVICE_FRAMEDONEWB BIT(1)
+#define DSS_IRQ_DEVICE_WBBUFFEROVERFLOW BIT(2)
+#define DSS_IRQ_DEVICE_WBUNCOMPLETEERROR BIT(3)
+#define DSS_IRQ_DEVICE_WB_MASK GENMASK(3, 1)
+
+#define DSS_IRQ_VP_BIT_N(ch, bit) (4 + 4 * (ch) + (bit))
+#define DSS_IRQ_PLANE_BIT_N(plane, bit) \
+ (DSS_IRQ_VP_BIT_N(TIDSS_MAX_PORTS, 0) + 1 * (plane) + (bit))
+
+#define DSS_IRQ_VP_BIT(ch, bit) BIT(DSS_IRQ_VP_BIT_N((ch), (bit)))
+#define DSS_IRQ_PLANE_BIT(plane, bit) \
+ BIT(DSS_IRQ_PLANE_BIT_N((plane), (bit)))
+
+static inline dispc_irq_t DSS_IRQ_VP_MASK(u32 ch)
+{
+ return GENMASK(DSS_IRQ_VP_BIT_N((ch), 3), DSS_IRQ_VP_BIT_N((ch), 0));
+}
+
+static inline dispc_irq_t DSS_IRQ_PLANE_MASK(u32 plane)
+{
+ return GENMASK(DSS_IRQ_PLANE_BIT_N((plane), 0),
+ DSS_IRQ_PLANE_BIT_N((plane), 0));
+}
+
+#define DSS_IRQ_VP_FRAME_DONE(ch) DSS_IRQ_VP_BIT((ch), 0)
+#define DSS_IRQ_VP_VSYNC_EVEN(ch) DSS_IRQ_VP_BIT((ch), 1)
+#define DSS_IRQ_VP_VSYNC_ODD(ch) DSS_IRQ_VP_BIT((ch), 2)
+#define DSS_IRQ_VP_SYNC_LOST(ch) DSS_IRQ_VP_BIT((ch), 3)
+
+#define DSS_IRQ_PLANE_FIFO_UNDERFLOW(plane) DSS_IRQ_PLANE_BIT((plane), 0)
+
+struct drm_crtc;
+struct drm_device;
+
+struct tidss_device;
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc);
+void tidss_irq_disable_vblank(struct drm_crtc *crtc);
+
+void tidss_irq_preinstall(struct drm_device *ddev);
+int tidss_irq_postinstall(struct drm_device *ddev);
+void tidss_irq_uninstall(struct drm_device *ddev);
+irqreturn_t tidss_irq_handler(int irq, void *arg);
+
+void tidss_irq_resume(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
new file mode 100644
index 000000000000..7d419960b030
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+#include "tidss_kms.h"
+#include "tidss_plane.h"
+
+static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *ddev = old_state->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_runtime_get(tidss);
+
+ drm_atomic_helper_commit_modeset_disables(ddev, old_state);
+ drm_atomic_helper_commit_planes(ddev, old_state, 0);
+ drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_wait_for_flip_done(ddev, old_state);
+
+ drm_atomic_helper_cleanup_planes(ddev, old_state);
+
+ tidss_runtime_put(tidss);
+}
+
+static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
+ .atomic_commit_tail = tidss_atomic_commit_tail,
+};
+
+static int tidss_atomic_check(struct drm_device *ddev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *opstate;
+ struct drm_plane_state *npstate;
+ struct drm_plane *plane;
+ struct drm_crtc_state *cstate;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ ret = drm_atomic_helper_check(ddev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * Add all active planes on a CRTC to the atomic state, if
+ * x/y/z position or activity of any plane on that CRTC
+ * changes. This is needed for updating the plane positions in
+ * tidss_crtc_position_planes() which is called from
+ * crtc_atomic_enable() and crtc_atomic_flush(). We have an
+ * extra flag to to mark x,y-position changes and together
+ * with zpos_changed the condition recognizes all the above
+ * cases.
+ */
+ for_each_oldnew_plane_in_state(state, plane, opstate, npstate, i) {
+ if (!npstate->crtc || !npstate->visible)
+ continue;
+
+ if (!opstate->crtc || opstate->crtc_x != npstate->crtc_x ||
+ opstate->crtc_y != npstate->crtc_y) {
+ cstate = drm_atomic_get_crtc_state(state,
+ npstate->crtc);
+ if (IS_ERR(cstate))
+ return PTR_ERR(cstate);
+ to_tidss_crtc_state(cstate)->plane_pos_changed = true;
+ }
+ }
+
+ for_each_new_crtc_in_state(state, crtc, cstate, i) {
+ if (to_tidss_crtc_state(cstate)->plane_pos_changed ||
+ cstate->zpos_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = tidss_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int tidss_dispc_modeset_init(struct tidss_device *tidss)
+{
+ struct device *dev = tidss->dev;
+ unsigned int fourccs_len;
+ const u32 *fourccs = dispc_plane_formats(tidss->dispc, &fourccs_len);
+ unsigned int i;
+
+ struct pipe {
+ u32 hw_videoport;
+ struct drm_bridge *bridge;
+ u32 enc_type;
+ };
+
+ const struct dispc_features *feat = tidss->feat;
+ u32 max_vps = feat->num_vps;
+ u32 max_planes = feat->num_planes;
+
+ struct pipe pipes[TIDSS_MAX_PORTS];
+ u32 num_pipes = 0;
+ u32 crtc_mask;
+
+ /* first find all the connected panels & bridges */
+
+ for (i = 0; i < max_vps; i++) {
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ u32 enc_type = DRM_MODE_ENCODER_NONE;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, i, 0,
+ &panel, &bridge);
+ if (ret == -ENODEV) {
+ dev_dbg(dev, "no panel/bridge for port %d\n", i);
+ continue;
+ } else if (ret) {
+ dev_dbg(dev, "port %d probe returned %d\n", i, ret);
+ return ret;
+ }
+
+ if (panel) {
+ u32 conn_type;
+
+ dev_dbg(dev, "Setting up panel for port %d\n", i);
+
+ switch (feat->vp_bus_type[i]) {
+ case DISPC_VP_OLDI:
+ enc_type = DRM_MODE_ENCODER_LVDS;
+ conn_type = DRM_MODE_CONNECTOR_LVDS;
+ break;
+ case DISPC_VP_DPI:
+ enc_type = DRM_MODE_ENCODER_DPI;
+ conn_type = DRM_MODE_CONNECTOR_LVDS;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (panel->connector_type != conn_type) {
+ dev_err(dev,
+ "%s: Panel %s has incompatible connector type for vp%d (%d != %d)\n",
+ __func__, dev_name(panel->dev), i,
+ panel->connector_type, conn_type);
+ return -EINVAL;
+ }
+
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(bridge)) {
+ dev_err(dev,
+ "failed to set up panel bridge for port %d\n",
+ i);
+ return PTR_ERR(bridge);
+ }
+ }
+
+ pipes[num_pipes].hw_videoport = i;
+ pipes[num_pipes].bridge = bridge;
+ pipes[num_pipes].enc_type = enc_type;
+ num_pipes++;
+ }
+
+ /* all planes can be on any crtc */
+ crtc_mask = (1 << num_pipes) - 1;
+
+ /* then create a plane, a crtc and an encoder for each panel/bridge */
+
+ for (i = 0; i < num_pipes; ++i) {
+ struct tidss_plane *tplane;
+ struct tidss_crtc *tcrtc;
+ struct drm_encoder *enc;
+ u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+ int ret;
+
+ tplane = tidss_plane_create(tidss, hw_plane_id,
+ DRM_PLANE_TYPE_PRIMARY, crtc_mask,
+ fourccs, fourccs_len);
+ if (IS_ERR(tplane)) {
+ dev_err(tidss->dev, "plane create failed\n");
+ return PTR_ERR(tplane);
+ }
+
+ tidss->planes[tidss->num_planes++] = &tplane->plane;
+
+ tcrtc = tidss_crtc_create(tidss, pipes[i].hw_videoport,
+ &tplane->plane);
+ if (IS_ERR(tcrtc)) {
+ dev_err(tidss->dev, "crtc create failed\n");
+ return PTR_ERR(tcrtc);
+ }
+
+ tidss->crtcs[tidss->num_crtcs++] = &tcrtc->crtc;
+
+ enc = tidss_encoder_create(tidss, pipes[i].enc_type,
+ 1 << tcrtc->crtc.index);
+ if (IS_ERR(enc)) {
+ dev_err(tidss->dev, "encoder create failed\n");
+ return PTR_ERR(enc);
+ }
+
+ ret = drm_bridge_attach(enc, pipes[i].bridge, NULL, 0);
+ if (ret) {
+ dev_err(tidss->dev, "bridge attach failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* create overlay planes of the leftover planes */
+
+ while (tidss->num_planes < max_planes) {
+ struct tidss_plane *tplane;
+ u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+
+ tplane = tidss_plane_create(tidss, hw_plane_id,
+ DRM_PLANE_TYPE_OVERLAY, crtc_mask,
+ fourccs, fourccs_len);
+
+ if (IS_ERR(tplane)) {
+ dev_err(tidss->dev, "plane create failed\n");
+ return PTR_ERR(tplane);
+ }
+
+ tidss->planes[tidss->num_planes++] = &tplane->plane;
+ }
+
+ return 0;
+}
+
+int tidss_modeset_init(struct tidss_device *tidss)
+{
+ struct drm_device *ddev = &tidss->ddev;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ drm_mode_config_init(ddev);
+
+ ddev->mode_config.min_width = 8;
+ ddev->mode_config.min_height = 8;
+ ddev->mode_config.max_width = 8096;
+ ddev->mode_config.max_height = 8096;
+ ddev->mode_config.normalize_zpos = true;
+ ddev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.helper_private = &mode_config_helper_funcs;
+
+ ret = tidss_dispc_modeset_init(tidss);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ ret = drm_vblank_init(ddev, tidss->num_crtcs);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ /* Start with vertical blanking interrupt reporting disabled. */
+ for (i = 0; i < tidss->num_crtcs; ++i)
+ drm_crtc_vblank_reset(tidss->crtcs[i]);
+
+ drm_mode_config_reset(ddev);
+
+ dev_dbg(tidss->dev, "%s done\n", __func__);
+
+ return 0;
+
+err_mode_config_cleanup:
+ drm_mode_config_cleanup(ddev);
+ return ret;
+}
+
+void tidss_modeset_cleanup(struct tidss_device *tidss)
+{
+ struct drm_device *ddev = &tidss->ddev;
+
+ drm_mode_config_cleanup(ddev);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h
new file mode 100644
index 000000000000..dda5625d0128
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_KMS_H__
+#define __TIDSS_KMS_H__
+
+struct tidss_device;
+
+int tidss_modeset_init(struct tidss_device *tidss);
+void tidss_modeset_cleanup(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
new file mode 100644
index 000000000000..ff99b2dd4a17
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_plane.h"
+
+/* drm_plane_helper_funcs */
+
+static int tidss_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+ const struct drm_format_info *finfo;
+ struct drm_crtc_state *crtc_state;
+ u32 hw_plane = tplane->hw_plane_id;
+ u32 hw_videoport;
+ int ret;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->crtc) {
+ /*
+ * The visible field is not reset by the DRM core but only
+ * updated by drm_plane_helper_check_state(), set it manually.
+ */
+ state->visible = false;
+ return 0;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0,
+ INT_MAX, true, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The HW is only able to start drawing at subpixel boundary
+ * (the two first checks bellow). At the end of a row the HW
+ * can only jump integer number of subpixels forward to the
+ * beginning of the next row. So we can only show picture with
+ * integer subpixel width (the third check). However, after
+ * reaching the end of the drawn picture the drawing starts
+ * again at the absolute memory address where top left corner
+ * position of the drawn picture is (so there is no need to
+ * check for odd height).
+ */
+
+ finfo = drm_format_info(state->fb->format->format);
+
+ if ((state->src_x >> 16) % finfo->hsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: x-position %u not divisible subpixel size %u\n",
+ __func__, (state->src_x >> 16), finfo->hsub);
+ return -EINVAL;
+ }
+
+ if ((state->src_y >> 16) % finfo->vsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: y-position %u not divisible subpixel size %u\n",
+ __func__, (state->src_y >> 16), finfo->vsub);
+ return -EINVAL;
+ }
+
+ if ((state->src_w >> 16) % finfo->hsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: src width %u not divisible by subpixel size %u\n",
+ __func__, (state->src_w >> 16), finfo->hsub);
+ return -EINVAL;
+ }
+
+ if (!state->visible)
+ return 0;
+
+ hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+ ret = dispc_plane_check(tidss->dispc, hw_plane, state, hw_videoport);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void tidss_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ u32 hw_videoport;
+ int ret;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->visible) {
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+ return;
+ }
+
+ hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+ ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
+ state, hw_videoport);
+
+ if (ret) {
+ dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
+ __func__, tplane->hw_plane_id);
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+ return;
+ }
+
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
+}
+
+static void tidss_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+}
+
+static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+ .atomic_check = tidss_plane_atomic_check,
+ .atomic_update = tidss_plane_atomic_update,
+ .atomic_disable = tidss_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs tidss_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = drm_atomic_helper_plane_reset,
+ .destroy = drm_plane_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ u32 hw_plane_id, u32 plane_type,
+ u32 crtc_mask, const u32 *formats,
+ u32 num_formats)
+{
+ struct tidss_plane *tplane;
+ enum drm_plane_type type;
+ u32 possible_crtcs;
+ u32 num_planes = tidss->feat->num_planes;
+ u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709));
+ u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE));
+ u32 default_encoding = DRM_COLOR_YCBCR_BT601;
+ u32 default_range = DRM_COLOR_YCBCR_FULL_RANGE;
+ u32 blend_modes = (BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ int ret;
+
+ tplane = devm_kzalloc(tidss->dev, sizeof(*tplane), GFP_KERNEL);
+ if (!tplane)
+ return ERR_PTR(-ENOMEM);
+
+ tplane->hw_plane_id = hw_plane_id;
+
+ possible_crtcs = crtc_mask;
+ type = plane_type;
+
+ ret = drm_universal_plane_init(&tidss->ddev, &tplane->plane,
+ possible_crtcs,
+ &tidss_plane_funcs,
+ formats, num_formats,
+ NULL, type, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
+
+ drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
+ num_planes - 1);
+
+ ret = drm_plane_create_color_properties(&tplane->plane,
+ color_encodings,
+ color_ranges,
+ default_encoding,
+ default_range);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_plane_create_alpha_property(&tplane->plane);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_plane_create_blend_mode_property(&tplane->plane, blend_modes);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return tplane;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
new file mode 100644
index 000000000000..80ff1c5a2535
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_PLANE_H__
+#define __TIDSS_PLANE_H__
+
+#define to_tidss_plane(p) container_of((p), struct tidss_plane, plane)
+
+struct tidss_device;
+
+struct tidss_plane {
+ struct drm_plane plane;
+
+ u32 hw_plane_id;
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ u32 hw_plane_id, u32 plane_type,
+ u32 crtc_mask, const u32 *formats,
+ u32 num_formats);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.c b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
new file mode 100644
index 000000000000..5ec68389cc68
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include "tidss_scale_coefs.h"
+
+/*
+ * These are interpolated with a custom python script from DSS5
+ * (drivers/gpu/drm/omapdrm/dss/dispc_coef.c) coefficients.
+ */
+static const struct tidss_scale_coefs coef5_m32 = {
+ .c2 = { 28, 34, 40, 46, 52, 58, 64, 70, 0, 2, 4, 8, 12, 16, 20, 24, },
+ .c1 = { 132, 138, 144, 150, 156, 162, 168, 174, 76, 84, 92, 98, 104, 110, 116, 124, },
+ .c0 = { 192, 192, 192, 190, 188, 186, 184, 182, 180, },
+};
+
+static const struct tidss_scale_coefs coef5_m26 = {
+ .c2 = { 24, 28, 32, 38, 44, 50, 56, 64, 0, 2, 4, 6, 8, 12, 16, 20, },
+ .c1 = { 132, 138, 144, 152, 160, 166, 172, 178, 72, 80, 88, 94, 100, 108, 116, 124, },
+ .c0 = { 200, 202, 204, 202, 200, 196, 192, 188, 184, },
+};
+
+static const struct tidss_scale_coefs coef5_m22 = {
+ .c2 = { 16, 20, 24, 30, 36, 42, 48, 56, 0, 0, 0, 2, 4, 8, 12, 14, },
+ .c1 = { 132, 140, 148, 156, 164, 172, 180, 186, 64, 72, 80, 88, 96, 104, 112, 122, },
+ .c0 = { 216, 216, 216, 214, 212, 208, 204, 198, 192, },
+};
+
+static const struct tidss_scale_coefs coef5_m19 = {
+ .c2 = { 12, 14, 16, 22, 28, 34, 40, 48, 0, 0, 0, 2, 4, 4, 4, 8, },
+ .c1 = { 128, 140, 152, 160, 168, 176, 184, 192, 56, 64, 72, 82, 92, 100, 108, 118, },
+ .c0 = { 232, 232, 232, 226, 220, 218, 216, 208, 200, },
+};
+
+static const struct tidss_scale_coefs coef5_m16 = {
+ .c2 = { 0, 2, 4, 8, 12, 18, 24, 32, 0, 0, 0, -2, -4, -4, -4, -2, },
+ .c1 = { 124, 138, 152, 164, 176, 186, 196, 206, 40, 48, 56, 68, 80, 90, 100, 112, },
+ .c0 = { 264, 262, 260, 254, 248, 242, 236, 226, 216, },
+};
+
+static const struct tidss_scale_coefs coef5_m14 = {
+ .c2 = { -8, -6, -4, -2, 0, 6, 12, 18, 0, -2, -4, -6, -8, -8, -8, -8, },
+ .c1 = { 120, 134, 148, 164, 180, 194, 208, 220, 24, 32, 40, 52, 64, 78, 92, 106, },
+ .c0 = { 288, 286, 284, 280, 276, 266, 256, 244, 232, },
+};
+
+static const struct tidss_scale_coefs coef5_m13 = {
+ .c2 = { -12, -12, -12, -10, -8, -4, 0, 6, 0, -2, -4, -6, -8, -10, -12, -12, },
+ .c1 = { 112, 130, 148, 164, 180, 196, 212, 228, 12, 22, 32, 44, 56, 70, 84, 98, },
+ .c0 = { 312, 308, 304, 298, 292, 282, 272, 258, 244, },
+};
+
+static const struct tidss_scale_coefs coef5_m12 = {
+ .c2 = { -16, -18, -20, -18, -16, -14, -12, -6, 0, -2, -4, -6, -8, -10, -12, -14, },
+ .c1 = { 104, 124, 144, 164, 184, 202, 220, 238, 0, 10, 20, 30, 40, 56, 72, 88, },
+ .c0 = { 336, 332, 328, 320, 312, 300, 288, 272, 256, },
+};
+
+static const struct tidss_scale_coefs coef5_m11 = {
+ .c2 = { -20, -22, -24, -24, -24, -24, -24, -20, 0, -2, -4, -6, -8, -10, -12, -16, },
+ .c1 = { 92, 114, 136, 158, 180, 204, 228, 250, -16, -8, 0, 12, 24, 38, 52, 72, },
+ .c0 = { 368, 364, 360, 350, 340, 326, 312, 292, 272, },
+};
+
+static const struct tidss_scale_coefs coef5_m10 = {
+ .c2 = { -16, -20, -24, -28, -32, -34, -36, -34, 0, 0, 0, -2, -4, -8, -12, -14, },
+ .c1 = { 72, 96, 120, 148, 176, 204, 232, 260, -32, -26, -20, -10, 0, 16, 32, 52, },
+ .c0 = { 400, 398, 396, 384, 372, 354, 336, 312, 288, },
+};
+
+static const struct tidss_scale_coefs coef5_m9 = {
+ .c2 = { -12, -18, -24, -28, -32, -38, -44, -46, 0, 2, 4, 2, 0, -2, -4, -8, },
+ .c1 = { 40, 68, 96, 128, 160, 196, 232, 268, -48, -46, -44, -36, -28, -14, 0, 20, },
+ .c0 = { 456, 450, 444, 428, 412, 388, 364, 334, 304, },
+};
+
+static const struct tidss_scale_coefs coef5_m8 = {
+ .c2 = { 0, -4, -8, -16, -24, -32, -40, -48, 0, 2, 4, 6, 8, 6, 4, 2, },
+ .c1 = { 0, 28, 56, 94, 132, 176, 220, 266, -56, -60, -64, -62, -60, -50, -40, -20, },
+ .c0 = { 512, 506, 500, 478, 456, 424, 392, 352, 312, },
+};
+
+static const struct tidss_scale_coefs coef3_m32 = {
+ .c1 = { 108, 92, 76, 62, 48, 36, 24, 140, 256, 236, 216, 198, 180, 162, 144, 126, },
+ .c0 = { 296, 294, 292, 288, 284, 278, 272, 136, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m26 = {
+ .c1 = { 104, 90, 76, 60, 44, 32, 20, 138, 256, 236, 216, 198, 180, 160, 140, 122, },
+ .c0 = { 304, 300, 296, 292, 288, 282, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m22 = {
+ .c1 = { 100, 84, 68, 54, 40, 30, 20, 138, 256, 236, 216, 196, 176, 156, 136, 118, },
+ .c0 = { 312, 310, 308, 302, 296, 286, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m19 = {
+ .c1 = { 96, 80, 64, 50, 36, 26, 16, 136, 256, 236, 216, 194, 172, 152, 132, 114, },
+ .c0 = { 320, 318, 316, 310, 304, 292, 280, 140, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m16 = {
+ .c1 = { 88, 72, 56, 44, 32, 22, 12, 134, 256, 234, 212, 190, 168, 148, 128, 108, },
+ .c0 = { 336, 332, 328, 320, 312, 300, 288, 144, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m14 = {
+ .c1 = { 80, 64, 48, 36, 24, 16, 8, 132, 256, 232, 208, 186, 164, 142, 120, 100, },
+ .c0 = { 352, 348, 344, 334, 324, 310, 296, 148, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m13 = {
+ .c1 = { 72, 56, 40, 30, 20, 12, 4, 130, 256, 232, 208, 184, 160, 136, 112, 92, },
+ .c0 = { 368, 364, 360, 346, 332, 316, 300, 150, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m12 = {
+ .c1 = { 64, 50, 36, 26, 16, 10, 4, 130, 256, 230, 204, 178, 152, 128, 104, 84, },
+ .c0 = { 384, 378, 372, 358, 344, 324, 304, 152, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m11 = {
+ .c1 = { 56, 40, 24, 16, 8, 4, 0, 128, 256, 228, 200, 172, 144, 120, 96, 76, },
+ .c0 = { 400, 396, 392, 376, 360, 336, 312, 156, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m10 = {
+ .c1 = { 40, 26, 12, 6, 0, -2, -4, 126, 256, 226, 196, 166, 136, 110, 84, 62, },
+ .c0 = { 432, 424, 416, 396, 376, 348, 320, 160, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m9 = {
+ .c1 = { 24, 12, 0, -4, -8, -8, -8, 124, 256, 222, 188, 154, 120, 92, 64, 44, },
+ .c0 = { 464, 456, 448, 424, 400, 366, 332, 166, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m8 = {
+ .c1 = { 0, -8, -16, -16, -16, -12, -8, 124, 256, 214, 172, 134, 96, 66, 36, 18, },
+ .c0 = { 512, 502, 492, 462, 432, 390, 348, 174, 256, },
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+ u32 firinc,
+ bool five_taps)
+{
+ int i;
+ int inc;
+ static const struct {
+ int mmin;
+ int mmax;
+ const struct tidss_scale_coefs *coef3;
+ const struct tidss_scale_coefs *coef5;
+ const char *name;
+ } coefs[] = {
+ { 27, 32, &coef3_m32, &coef5_m32, "M32" },
+ { 23, 26, &coef3_m26, &coef5_m26, "M26" },
+ { 20, 22, &coef3_m22, &coef5_m22, "M22" },
+ { 17, 19, &coef3_m19, &coef5_m19, "M19" },
+ { 15, 16, &coef3_m16, &coef5_m16, "M16" },
+ { 14, 14, &coef3_m14, &coef5_m14, "M14" },
+ { 13, 13, &coef3_m13, &coef5_m13, "M13" },
+ { 12, 12, &coef3_m12, &coef5_m12, "M12" },
+ { 11, 11, &coef3_m11, &coef5_m11, "M11" },
+ { 10, 10, &coef3_m10, &coef5_m10, "M10" },
+ { 9, 9, &coef3_m9, &coef5_m9, "M9" },
+ { 4, 8, &coef3_m8, &coef5_m8, "M8" },
+ /*
+ * When upscaling more than two times, blockiness and outlines
+ * around the image are observed when M8 tables are used. M11,
+ * M16 and M19 tables are used to prevent this.
+ */
+ { 3, 3, &coef3_m11, &coef5_m11, "M11" },
+ { 2, 2, &coef3_m16, &coef5_m16, "M16" },
+ { 0, 1, &coef3_m19, &coef5_m19, "M19" },
+ };
+
+ /*
+ * inc is result of 0x200000 * in_size / out_size. This dividing
+ * by 0x40000 scales it down to 8 * in_size / out_size. After
+ * division the actual scaling factor is 8/inc.
+ */
+ inc = firinc / 0x40000;
+ for (i = 0; i < ARRAY_SIZE(coefs); ++i) {
+ if (inc >= coefs[i].mmin && inc <= coefs[i].mmax) {
+ if (five_taps)
+ return coefs[i].coef5;
+ else
+ return coefs[i].coef3;
+ }
+ }
+
+ dev_err(dev, "%s: Coefficients not found for firinc 0x%08x, inc %d\n",
+ __func__, firinc, inc);
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
new file mode 100644
index 000000000000..64b5af5b5361
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_COEF_H__
+#define __TIDSS_DISPC_COEF_H__
+
+#include <linux/types.h>
+
+struct tidss_scale_coefs {
+ s16 c2[16];
+ s16 c1[16];
+ u16 c0[9];
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+ u32 firinc,
+ bool five_taps);
+
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 51d034e095f4..28b7f703236e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -95,7 +95,7 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
priv->external_encoder->possible_crtcs = BIT(0);
- ret = drm_bridge_attach(priv->external_encoder, bridge, NULL);
+ ret = drm_bridge_attach(priv->external_encoder, bridge, NULL, 0);
if (ret) {
dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index a46ac284dd5e..4160e74e4751 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -47,6 +47,20 @@ config TINYDRM_ILI9341
If M is selected the module will be called ili9341.
+config TINYDRM_ILI9486
+ tristate "DRM support for ILI9486 display panels"
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the following Ilitek ILI9486 panels:
+ * PISCREEN 3.5" 320x480 TFT (Ozzmaker 3.5")
+ * RPILCD 3.5" 320x480 TFT (Waveshare 3.5")
+
+ If M is selected the module will be called ili9486.
+
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
@@ -85,14 +99,16 @@ config TINYDRM_ST7586
If M is selected the module will be called st7586.
config TINYDRM_ST7735R
- tristate "DRM support for Sitronix ST7735R display panels"
+ tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
- DRM driver Sitronix ST7735R with one of the following LCDs:
- * JD-T18003-T01 1.8" 128x160 TFT
+ DRM driver for Sitronix ST7715R/ST7735R with one of the following
+ LCDs:
+ * Jianda JD-T18003-T01 1.8" 128x160 TFT
+ * Okaya RH128128T 1.44" 128x128 TFT
If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 896cf31132d3..c96ceee71453 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
+obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 94fb1f593564..a48173441ae0 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -22,7 +22,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
static bool eco_mode;
module_param(eco_mode, bool, 0644);
@@ -610,18 +609,10 @@ static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index c66acc566c2b..802fb8dde1b6 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -26,7 +26,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#define ILI9225_DRIVER_READ_CODE 0x00
#define ILI9225_DRIVER_OUTPUT_CONTROL 0x01
@@ -165,18 +164,10 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
ili9225_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
new file mode 100644
index 000000000000..532560aebb1e
--- /dev/null
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9486 panels
+ *
+ * Copyright 2020 Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modeset_helper.h>
+
+#define ILI9486_ITFCTR1 0xb0
+#define ILI9486_PWCTRL1 0xc2
+#define ILI9486_VMCTRL1 0xc5
+#define ILI9486_PGAMCTRL 0xe0
+#define ILI9486_NGAMCTRL 0xe1
+#define ILI9486_DGAMCTRL 0xe2
+#define ILI9486_MADCTL_BGR BIT(3)
+#define ILI9486_MADCTL_MV BIT(5)
+#define ILI9486_MADCTL_MX BIT(6)
+#define ILI9486_MADCTL_MY BIT(7)
+
+/*
+ * The PiScreen/waveshare rpi-lcd-35 has a SPI to 16-bit parallel bus converter
+ * in front of the display controller. This means that 8-bit values have to be
+ * transferred as 16-bit.
+ */
+static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ size_t num)
+{
+ struct spi_device *spi = mipi->spi;
+ void *data = par;
+ u32 speed_hz;
+ int i, ret;
+ __be16 *buf;
+
+ buf = kmalloc(32 * sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /*
+ * The displays are Raspberry Pi HATs and connected to the 8-bit only
+ * SPI controller, so 16-bit command and parameters need byte swapping
+ * before being transferred as 8-bit on the big endian SPI bus.
+ * Pixel data bytes have already been swapped before this function is
+ * called.
+ */
+ buf[0] = cpu_to_be16(*cmd);
+ gpiod_set_value_cansleep(mipi->dc, 0);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 2);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, buf, 2);
+ if (ret || !num)
+ goto free;
+
+ /* 8-bit configuration data, not 16-bit pixel data */
+ if (num <= 32) {
+ for (i = 0; i < num; i++)
+ buf[i] = cpu_to_be16(par[i]);
+ num *= 2;
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ data = buf;
+ }
+
+ gpiod_set_value_cansleep(mipi->dc, 1);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
+ free:
+ kfree(buf);
+
+ return ret;
+}
+
+static void waveshare_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ u8 addr_mode;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
+ if (ret < 0)
+ goto out_exit;
+ if (ret == 1)
+ goto out_enable;
+
+ mipi_dbi_command(dbi, ILI9486_ITFCTR1);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(250);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+
+ mipi_dbi_command(dbi, ILI9486_PWCTRL1, 0x44);
+
+ mipi_dbi_command(dbi, ILI9486_VMCTRL1, 0x00, 0x00, 0x00, 0x00);
+
+ mipi_dbi_command(dbi, ILI9486_PGAMCTRL,
+ 0x0F, 0x1F, 0x1C, 0x0C, 0x0F, 0x08, 0x48, 0x98,
+ 0x37, 0x0A, 0x13, 0x04, 0x11, 0x0D, 0x0);
+ mipi_dbi_command(dbi, ILI9486_NGAMCTRL,
+ 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+ mipi_dbi_command(dbi, ILI9486_DGAMCTRL,
+ 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+ out_enable:
+ switch (dbidev->rotation) {
+ case 90:
+ addr_mode = ILI9486_MADCTL_MY;
+ break;
+ case 180:
+ addr_mode = ILI9486_MADCTL_MV;
+ break;
+ case 270:
+ addr_mode = ILI9486_MADCTL_MX;
+ break;
+ default:
+ addr_mode = ILI9486_MADCTL_MV | ILI9486_MADCTL_MY |
+ ILI9486_MADCTL_MX;
+ break;
+ }
+ addr_mode |= ILI9486_MADCTL_BGR;
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
+ out_exit:
+ drm_dev_exit(idx);
+}
+
+static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
+ .enable = waveshare_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = mipi_dbi_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode waveshare_mode = {
+ DRM_SIMPLE_MODE(480, 320, 73, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
+
+static struct drm_driver ili9486_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &ili9486_fops,
+ .release = mipi_dbi_release,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "ili9486",
+ .desc = "Ilitek ILI9486",
+ .date = "20200118",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9486_of_match[] = {
+ { .compatible = "waveshare,rpi-lcd-35" },
+ { .compatible = "ozzmaker,piscreen" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ili9486_of_match);
+
+static const struct spi_device_id ili9486_id[] = {
+ { "ili9486", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ili9486_id);
+
+static int ili9486_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
+ struct drm_device *drm;
+ struct mipi_dbi *dbi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
+ return -ENOMEM;
+
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
+ if (ret) {
+ kfree(dbidev);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(dbi->reset);
+ }
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
+ if (ret)
+ return ret;
+
+ dbi->command = waveshare_command;
+ dbi->read_commands = NULL;
+
+ ret = mipi_dbi_dev_init(dbidev, &waveshare_pipe_funcs,
+ &waveshare_mode, rotation);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
+}
+
+static int ili9486_remove(struct spi_device *spi)
+{
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
+
+static void ili9486_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
+
+static struct spi_driver ili9486_spi_driver = {
+ .driver = {
+ .name = "ili9486",
+ .of_match_table = ili9486_of_match,
+ },
+ .id_table = ili9486_id,
+ .probe = ili9486_probe,
+ .remove = ili9486_remove,
+ .shutdown = ili9486_shutdown,
+};
+module_spi_driver(ili9486_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9486 DRM driver");
+MODULE_AUTHOR("Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 76d179200775..f5ebcaf7ee3a 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -17,7 +17,7 @@
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/sched/clock.h>
#include <linux/spi/spi.h>
#include <linux/thermal.h>
@@ -33,13 +33,13 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#define REPAPER_RID_G2_COG_ID 0x12
enum repaper_model {
+ /* 0 is reserved to avoid clashing with NULL */
E1144CS021 = 1,
E1190CS021,
E2200CS021,
@@ -856,18 +856,10 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
repaper_fb_dirty(state->fb);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
@@ -995,21 +987,21 @@ static int repaper_probe(struct spi_device *spi)
{
const struct drm_display_mode *mode;
const struct spi_device_id *spi_id;
- const struct of_device_id *match;
struct device *dev = &spi->dev;
enum repaper_model model;
const char *thermal_zone;
struct repaper_epd *epd;
size_t line_buffer_size;
struct drm_device *drm;
+ const void *match;
int ret;
- match = of_match_device(repaper_of_match, dev);
+ match = device_get_match_data(dev);
if (match) {
- model = (enum repaper_model)match->data;
+ model = (enum repaper_model)match;
} else {
spi_id = spi_get_device_id(spi);
- model = spi_id->driver_data;
+ model = (enum repaper_model)spi_id->driver_data;
}
/* The SPI device is used to allocate dma memory */
@@ -1197,7 +1189,6 @@ static void repaper_shutdown(struct spi_device *spi)
static struct spi_driver repaper_spi_driver = {
.driver = {
.name = "repaper",
- .owner = THIS_MODULE,
.of_match_table = repaper_of_match,
},
.id_table = repaper_id,
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 060cc756194f..9ef559dd3191 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -23,7 +23,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
/* controller-specific commands */
#define ST7586_DISP_MODE_GRAY 0x38
@@ -159,18 +158,10 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
st7586_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 3f4487c71684..3cd9b8d9888d 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * DRM driver for Sitronix ST7735R panels
+ * DRM driver for display panels connected to a Sitronix ST7715R or ST7735R
+ * display controller in SPI mode.
*
* Copyright 2017 David Lechner <david@lechnology.com>
+ * Copyright (C) 2019 Glider bvba
*/
#include <linux/backlight.h>
@@ -37,12 +39,28 @@
#define ST7735R_MY BIT(7)
#define ST7735R_MX BIT(6)
#define ST7735R_MV BIT(5)
+#define ST7735R_RGB BIT(3)
+
+struct st7735r_cfg {
+ const struct drm_display_mode mode;
+ unsigned int left_offset;
+ unsigned int top_offset;
+ unsigned int write_only:1;
+ unsigned int rgb:1; /* RGB (vs. BGR) */
+};
+
+struct st7735r_priv {
+ struct mipi_dbi_dev dbidev; /* Must be first for .release() */
+ const struct st7735r_cfg *cfg;
+};
-static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static void st7735r_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct st7735r_priv *priv = container_of(dbidev, struct st7735r_priv,
+ dbidev);
struct mipi_dbi *dbi = &dbidev->dbi;
int ret, idx;
u8 addr_mode;
@@ -87,6 +105,10 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
addr_mode = ST7735R_MY | ST7735R_MV;
break;
}
+
+ if (priv->cfg->rgb)
+ addr_mode |= ST7735R_RGB;
+
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
@@ -109,15 +131,24 @@ out_exit:
drm_dev_exit(idx);
}
-static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
- .enable = jd_t18003_t01_pipe_enable,
+static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
+ .enable = st7735r_pipe_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
-static const struct drm_display_mode jd_t18003_t01_mode = {
- DRM_SIMPLE_MODE(128, 160, 28, 35),
+static const struct st7735r_cfg jd_t18003_t01_cfg = {
+ .mode = { DRM_SIMPLE_MODE(128, 160, 28, 35) },
+ /* Cannot read from Adafruit 1.8" display via SPI */
+ .write_only = true,
+};
+
+static const struct st7735r_cfg rh128128t_cfg = {
+ .mode = { DRM_SIMPLE_MODE(128, 128, 25, 26) },
+ .left_offset = 2,
+ .top_offset = 3,
+ .rgb = true,
};
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -136,13 +167,14 @@ static struct drm_driver st7735r_driver = {
};
static const struct of_device_id st7735r_of_match[] = {
- { .compatible = "jianda,jd-t18003-t01" },
+ { .compatible = "jianda,jd-t18003-t01", .data = &jd_t18003_t01_cfg },
+ { .compatible = "okaya,rh128128t", .data = &rh128128t_cfg },
{ },
};
MODULE_DEVICE_TABLE(of, st7735r_of_match);
static const struct spi_device_id st7735r_id[] = {
- { "jd-t18003-t01", 0 },
+ { "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg },
{ },
};
MODULE_DEVICE_TABLE(spi, st7735r_id);
@@ -150,17 +182,26 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ const struct st7735r_cfg *cfg;
struct mipi_dbi_dev *dbidev;
+ struct st7735r_priv *priv;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
+ cfg = device_get_match_data(&spi->dev);
+ if (!cfg)
+ cfg = (void *)spi_get_device_id(spi)->driver_data;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ dbidev = &priv->dbidev;
+ priv->cfg = cfg;
+
dbi = &dbidev->dbi;
drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
@@ -193,10 +234,14 @@ static int st7735r_probe(struct spi_device *spi)
if (ret)
return ret;
- /* Cannot read from Adafruit 1.8" display via SPI */
- dbi->read_commands = NULL;
+ if (cfg->write_only)
+ dbi->read_commands = NULL;
+
+ dbidev->left_offset = cfg->left_offset;
+ dbidev->top_offset = cfg->top_offset;
- ret = mipi_dbi_dev_init(dbidev, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &st7735r_pipe_funcs, &cfg->mode,
+ rotation);
if (ret)
return ret;
@@ -231,7 +276,6 @@ static void st7735r_shutdown(struct spi_device *spi)
static struct spi_driver st7735r_spi_driver = {
.driver = {
.name = "st7735r",
- .owner = THIS_MODULE,
.of_match_table = st7735r_of_match,
},
.id_table = st7735r_id,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5df596fb0280..9e07c3f75156 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -145,34 +145,12 @@ static inline uint32_t ttm_bo_type_flags(unsigned type)
return 1 << (type);
}
-static void ttm_bo_release_list(struct kref *list_kref)
-{
- struct ttm_buffer_object *bo =
- container_of(list_kref, struct ttm_buffer_object, list_kref);
- size_t acc_size = bo->acc_size;
-
- BUG_ON(kref_read(&bo->list_kref));
- BUG_ON(kref_read(&bo->kref));
- BUG_ON(bo->mem.mm_node != NULL);
- BUG_ON(!list_empty(&bo->lru));
- BUG_ON(!list_empty(&bo->ddestroy));
- ttm_tt_destroy(bo->ttm);
- atomic_dec(&ttm_bo_glob.bo_count);
- dma_fence_put(bo->moving);
- if (!ttm_bo_uses_embedded_gem_object(bo))
- dma_resv_fini(&bo->base._resv);
- bo->destroy(bo);
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
-}
-
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- dma_resv_assert_held(bo->base.resv);
-
if (!list_empty(&bo->lru))
return;
@@ -181,21 +159,14 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
man = &bdev->man[mem->mem_type];
list_add_tail(&bo->lru, &man->lru[bo->priority]);
- kref_get(&bo->list_kref);
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
- kref_get(&bo->list_kref);
}
}
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -203,12 +174,10 @@ static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
if (!list_empty(&bo->lru)) {
list_del_init(&bo->lru);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
@@ -372,14 +341,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
moved:
- if (bo->evicted) {
- if (bdev->driver->invalidate_caches) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
- }
- bo->evicted = false;
- }
+ bo->evicted = false;
if (bo->mem.mm_node)
bo->offset = (bo->mem.start << PAGE_SHIFT) +
@@ -428,92 +390,49 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
BUG_ON(!dma_resv_trylock(&bo->base._resv));
r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
+ dma_resv_unlock(&bo->base._resv);
if (r)
- dma_resv_unlock(&bo->base._resv);
+ return r;
+
+ if (bo->type != ttm_bo_type_sg) {
+ /* This works because the BO is about to be destroyed and nobody
+ * reference it any more. The only tricky case is the trylock on
+ * the resv object while holding the lru_lock.
+ */
+ spin_lock(&ttm_bo_glob.lru_lock);
+ bo->base.resv = &bo->base._resv;
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ }
return r;
}
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
+ struct dma_resv *resv = &bo->base._resv;
struct dma_resv_list *fobj;
struct dma_fence *fence;
int i;
- fobj = dma_resv_get_list(&bo->base._resv);
- fence = dma_resv_get_excl(&bo->base._resv);
+ rcu_read_lock();
+ fobj = rcu_dereference(resv->fence);
+ fence = rcu_dereference(resv->fence_excl);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(bo->base.resv));
+ fence = rcu_dereference(fobj->shared[i]);
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
}
-}
-
-static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret;
-
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle
- */
- dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
- 30 * HZ);
- spin_lock(&ttm_bo_glob.lru_lock);
- goto error;
- }
-
- spin_lock(&ttm_bo_glob.lru_lock);
- ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
- if (!ret) {
- if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
- ttm_bo_del_from_lru(bo);
- spin_unlock(&ttm_bo_glob.lru_lock);
- if (bo->base.resv != &bo->base._resv)
- dma_resv_unlock(&bo->base._resv);
-
- ttm_bo_cleanup_memtype_use(bo);
- dma_resv_unlock(bo->base.resv);
- return;
- }
-
- ttm_bo_flush_all_fences(bo);
-
- /*
- * Make NO_EVICT bos immediately available to
- * shrinkers, now that they are queued for
- * destruction.
- */
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_move_to_lru_tail(bo, NULL);
- }
-
- dma_resv_unlock(bo->base.resv);
- }
- if (bo->base.resv != &bo->base._resv)
- dma_resv_unlock(&bo->base._resv);
-
-error:
- kref_get(&bo->list_kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&ttm_bo_glob.lru_lock);
-
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
+ rcu_read_unlock();
}
/**
* function ttm_bo_cleanup_refs
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, do nothing.
+ * If bo idle, remove from lru lists, and unref.
+ * If not idle, block if possible.
*
* Must be called with lru_lock and reservation held, this function
* will drop the lru lock and optionally the reservation lock before returning.
@@ -527,14 +446,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct dma_resv *resv;
+ struct dma_resv *resv = &bo->base._resv;
int ret;
- if (unlikely(list_empty(&bo->ddestroy)))
- resv = bo->base.resv;
- else
- resv = &bo->base._resv;
-
if (dma_resv_test_signaled_rcu(resv, true))
ret = 0;
else
@@ -547,9 +461,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&ttm_bo_glob.lru_lock);
- lret = dma_resv_wait_timeout_rcu(resv, true,
- interruptible,
- 30 * HZ);
+ lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
+ 30 * HZ);
if (lret < 0)
return lret;
@@ -581,14 +494,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
-
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+
return 0;
}
@@ -610,8 +523,9 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
ddestroy);
- kref_get(&bo->list_kref);
list_move_tail(&bo->ddestroy, &removed);
+ if (!ttm_bo_get_unless_zero(bo))
+ continue;
if (remove_all || bo->base.resv != &bo->base._resv) {
spin_unlock(&glob->lru_lock);
@@ -626,7 +540,7 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
spin_unlock(&glob->lru_lock);
}
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
spin_lock(&glob->lru_lock);
}
list_splice_tail(&removed, &bdev->ddestroy);
@@ -652,16 +566,69 @@ static void ttm_bo_release(struct kref *kref)
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ size_t acc_size = bo->acc_size;
+ int ret;
- if (bo->bdev->driver->release_notify)
- bo->bdev->driver->release_notify(bo);
+ if (!bo->deleted) {
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
+ 30 * HZ);
+ }
- drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
- ttm_mem_io_lock(man, false);
- ttm_mem_io_free_vm(bo);
- ttm_mem_io_unlock(man);
- ttm_bo_cleanup_refs_or_queue(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ if (bo->bdev->driver->release_notify)
+ bo->bdev->driver->release_notify(bo);
+
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
+ }
+
+ if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+ /* The BO is not idle, resurrect it for delayed destroy */
+ ttm_bo_flush_all_fences(bo);
+ bo->deleted = true;
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+
+ /*
+ * Make NO_EVICT bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ */
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+ bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ }
+
+ kref_init(&bo->kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ return;
+ }
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ list_del(&bo->ddestroy);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+
+ ttm_bo_cleanup_memtype_use(bo);
+
+ BUG_ON(bo->mem.mm_node != NULL);
+ atomic_dec(&ttm_bo_glob.bo_count);
+ dma_fence_put(bo->moving);
+ if (!ttm_bo_uses_embedded_gem_object(bo))
+ dma_resv_fini(&bo->base._resv);
+ bo->destroy(bo);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
void ttm_bo_put(struct ttm_buffer_object *bo)
@@ -764,8 +731,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
- if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
- || !list_empty(&bo->ddestroy))
+ if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
ret = true;
*locked = false;
if (busy)
@@ -846,6 +812,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
dma_resv_unlock(bo->base.resv);
continue;
}
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
break;
}
@@ -857,21 +828,19 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (!bo) {
- if (busy_bo)
- kref_get(&busy_bo->list_kref);
+ if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
+ busy_bo = NULL;
spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
- kref_put(&busy_bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(busy_bo);
return ret;
}
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
ctx->no_wait_gpu, locked);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -881,7 +850,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (locked)
ttm_bo_unreserve(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -1226,6 +1195,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
+
+ /*
+ * Remove the backing store if no placement is given.
+ */
+ if (!placement->num_placement && !placement->num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
+
/*
* Check whether we need to move buffer.
*/
@@ -1293,7 +1274,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
- kref_init(&bo->list_kref);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1813,11 +1793,18 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
- NULL)) {
- ret = 0;
- break;
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ NULL))
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ continue;
}
+
+ ret = 0;
+ break;
}
if (!ret)
break;
@@ -1828,11 +1815,9 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
return ret;
}
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -1886,7 +1871,7 @@ out:
*/
if (locked)
dma_resv_unlock(bo->base.resv);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_swapout);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 953c82a4f573..52d2b71f1588 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -507,11 +507,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
- kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- if (bo->base.resv == &bo->base._resv)
+ if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 389128b8c4dd..6ee3b96f0d13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -59,9 +59,10 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* If possible, avoid waiting for GPU with mmap_sem
- * held.
+ * held. We only do this if the fault allows retry and this
+ * is the first attempt.
*/
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault_flag_allow_retry_first(vmf->flags)) {
ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
@@ -135,7 +136,12 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+ /*
+ * If the fault allows retry and this is the first
+ * fault attempt, we try to release the mmap_sem
+ * before waiting
+ */
+ if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
@@ -156,6 +162,89 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/**
+ * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
+ * @vmf: Fault data
+ * @bo: The buffer object
+ * @page_offset: Page offset from bo start
+ * @fault_page_size: The size of the fault in pages.
+ * @pgprot: The page protections.
+ * Does additional checking whether it's possible to insert a PUD or PMD
+ * pfn and performs the insertion.
+ *
+ * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
+ * a huge fault was not possible, or on insertion error.
+ */
+static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
+ struct ttm_buffer_object *bo,
+ pgoff_t page_offset,
+ pgoff_t fault_page_size,
+ pgprot_t pgprot)
+{
+ pgoff_t i;
+ vm_fault_t ret;
+ unsigned long pfn;
+ pfn_t pfnt;
+ struct ttm_tt *ttm = bo->ttm;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+ /* Fault should not cross bo boundary. */
+ page_offset &= ~(fault_page_size - 1);
+ if (page_offset + fault_page_size > bo->num_pages)
+ goto out_fallback;
+
+ if (bo->mem.bus.is_iomem)
+ pfn = ttm_bo_io_mem_pfn(bo, page_offset);
+ else
+ pfn = page_to_pfn(ttm->pages[page_offset]);
+
+ /* pfn must be fault_page_size aligned. */
+ if ((pfn & (fault_page_size - 1)) != 0)
+ goto out_fallback;
+
+ /* Check that memory is contiguous. */
+ if (!bo->mem.bus.is_iomem) {
+ for (i = 1; i < fault_page_size; ++i) {
+ if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
+ goto out_fallback;
+ }
+ } else if (bo->bdev->driver->io_mem_pfn) {
+ for (i = 1; i < fault_page_size; ++i) {
+ if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
+ goto out_fallback;
+ }
+ }
+
+ pfnt = __pfn_to_pfn_t(pfn, PFN_DEV);
+ if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT))
+ ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT))
+ ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write);
+#endif
+ else
+ WARN_ON_ONCE(ret = VM_FAULT_FALLBACK);
+
+ if (ret != VM_FAULT_NOPAGE)
+ goto out_fallback;
+
+ return VM_FAULT_NOPAGE;
+out_fallback:
+ count_vm_event(THP_FAULT_FALLBACK);
+ return VM_FAULT_FALLBACK;
+}
+#else
+static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
+ struct ttm_buffer_object *bo,
+ pgoff_t page_offset,
+ pgoff_t fault_page_size,
+ pgprot_t pgprot)
+{
+ return VM_FAULT_FALLBACK;
+}
+#endif
+
/**
* ttm_bo_vm_fault_reserved - TTM fault helper
* @vmf: The struct vm_fault given as argument to the fault callback
@@ -163,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve);
* @num_prefault: Maximum number of prefault pages. The caller may want to
* specify this based on madvice settings and the size of the GPU object
* backed by the memory.
+ * @fault_page_size: The size of the fault in pages.
*
* This function inserts one or more page table entries pointing to the
* memory backing the buffer object, and then returns a return code
@@ -176,7 +266,8 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve);
*/
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
- pgoff_t num_prefault)
+ pgoff_t num_prefault,
+ pgoff_t fault_page_size)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -268,6 +359,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
prot = pgprot_decrypted(prot);
}
+ /* We don't prefault on huge faults. Yet. */
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) {
+ ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset,
+ fault_page_size, prot);
+ goto out_io_unlock;
+ }
+
/*
* Speculatively prefault a number of pages. Only error on
* first page.
@@ -334,7 +432,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
return ret;
prot = vma->vm_page_prot;
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -344,6 +442,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
}
EXPORT_SYMBOL(ttm_bo_vm_fault);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/**
+ * ttm_pgprot_is_wrprotecting - Is a page protection value write-protecting?
+ * @prot: The page protection value
+ *
+ * Return: true if @prot is write-protecting. false otherwise.
+ */
+static bool ttm_pgprot_is_wrprotecting(pgprot_t prot)
+{
+ /*
+ * This is meant to say "pgprot_wrprotect(prot) == prot" in a generic
+ * way. Unfortunately there is no generic pgprot_wrprotect.
+ */
+ return pte_val(pte_wrprotect(__pte(pgprot_val(prot)))) ==
+ pgprot_val(prot);
+}
+
+static vm_fault_t ttm_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ pgprot_t prot;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ vm_fault_t ret;
+ pgoff_t fault_page_size = 0;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+ switch (pe_size) {
+ case PE_SIZE_PMD:
+ fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
+ break;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ case PE_SIZE_PUD:
+ fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ return VM_FAULT_FALLBACK;
+ }
+
+ /* Fallback on write dirty-tracking or COW */
+ if (write && ttm_pgprot_is_wrprotecting(vma->vm_page_prot))
+ return VM_FAULT_FALLBACK;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
+#endif
+
void ttm_bo_vm_open(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -445,7 +603,10 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
+ .access = ttm_bo_vm_access,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ .huge_fault = ttm_bo_vm_huge_fault,
+#endif
};
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bf876faea592..faefaaef7909 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -604,7 +604,7 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
p = pool->name;
for (i = 0; i < ARRAY_SIZE(t); i++) {
if (type & t[i]) {
- p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
"%s", n[i]);
}
}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index e9671d38b4a0..0afdfb0d1fe1 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -109,7 +109,6 @@ static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
};
static const struct drm_connector_funcs udl_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 22af17959053..d59ebac70b15 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -375,8 +375,6 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
- crtc_state->no_vblank = true;
-
buf = (char *)udl->mode_buf;
/* This first section has to do with setting the base address on the
@@ -428,14 +426,6 @@ udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
}
-static int
-udl_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
-{
- return 0;
-}
-
static void
udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
@@ -457,7 +447,6 @@ struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
.mode_valid = udl_simple_display_pipe_mode_valid,
.enable = udl_simple_display_pipe_enable,
.disable = udl_simple_display_pipe_disable,
- .check = udl_simple_display_pipe_check,
.update = udl_simple_display_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9a35c555ec52..ac2603334587 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -254,27 +254,42 @@ struct v3d_csd_job {
};
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define wait_for(COND, MS) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
break; \
} \
- msleep(1); \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
/* nsecs_to_jiffies64() does not guard against overflow */
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 19612132c8a3..0883a435e62b 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -18,7 +18,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "hgsmi_channels.h"
#include "vbox_drv.h"
@@ -226,17 +225,6 @@ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_pending_vblank_event *event;
- unsigned long flags;
-
- if (crtc->state && crtc->state->event) {
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
}
static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
@@ -838,6 +826,7 @@ static int vbox_connector_init(struct drm_device *dev,
static const struct drm_mode_config_funcs vbox_mode_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index 0592004f71aa..a5de40fe1a76 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -138,7 +138,7 @@ struct vbva_buffer {
u32 data_len;
/* variable size for the rest of the vbva_buffer area in VRAM. */
- u8 data[0];
+ u8 data[];
} __packed;
#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index b00e20f5ce05..1208258ad3b2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -84,13 +84,14 @@ static const struct debugfs_reg32 crtc_regs[] = {
VC4_REG32(PV_HACT_ACT),
};
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 val;
int fifo_lines;
@@ -1030,6 +1031,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -1039,6 +1041,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.atomic_flush = vc4_crtc_atomic_flush,
.atomic_enable = vc4_crtc_atomic_enable,
.atomic_disable = vc4_crtc_atomic_disable,
+ .get_scanout_position = vc4_crtc_get_scanout_position,
};
static const struct vc4_crtc_data pv0_data = {
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index c586325de2a5..6dfede03396e 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -252,7 +252,7 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DPI);
- return drm_bridge_attach(dpi->encoder, bridge, NULL);
+ return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5e6fb6c2307f..76f93b662766 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -190,9 +190,6 @@ static struct drm_driver vc4_drm_driver = {
.irq_postinstall = vc4_irq_postinstall,
.irq_uninstall = vc4_irq_uninstall,
- .get_scanout_position = vc4_crtc_get_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 6627b20c99e9..139d25a8328e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -65,7 +65,7 @@ struct vc4_perfmon {
* Note that counter values can't be reset, but you can fake a reset by
* destroying the perfmon and creating a new one.
*/
- u64 counters[0];
+ u64 counters[];
};
struct vc4_dev {
@@ -677,32 +677,41 @@ struct vc4_validated_shader_info {
};
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define _wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
break; \
} \
- if (W && drm_can_sleep()) { \
- msleep(W); \
- } else { \
- cpu_relax(); \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
} \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* vc4_bo.c */
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
@@ -743,10 +752,6 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_txp_armed(struct drm_crtc_state *state);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index fd8a2eb60505..d99b1d526651 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1619,7 +1619,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
- ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+ ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "bridge attach failed: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4934127f0d76..91e408f7a56e 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -139,7 +139,7 @@ static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
static bool plane_enabled(struct drm_plane_state *state)
{
- return state->fb && state->crtc;
+ return state->fb && !WARN_ON(!state->crtc);
}
static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5156e6b279db..e27120d512b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -47,6 +47,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
+ virtio_add_bool(m, "indirect", vgdev->has_indirect);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 0966208ec30d..2b7e6ae65546 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,7 +30,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "virtgpu_drv.h"
@@ -91,6 +90,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
crtc->mode.hdisplay,
crtc->mode.vdisplay, 0, 0);
+ virtio_gpu_notify(vgdev);
}
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -109,6 +109,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+ virtio_gpu_notify(vgdev);
output->enabled = false;
}
@@ -121,13 +122,6 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- unsigned long flags;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (crtc->state->event)
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -332,6 +326,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
+ drm_atomic_helper_fake_vblank(state);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(dev, state);
@@ -375,6 +370,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
- drm_atomic_helper_shutdown(vgdev->ddev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 8cf27af3ad53..ab4bed78e656 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -135,7 +136,8 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
- drm_dev_unregister(dev);
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
virtio_gpu_deinit(dev);
drm_dev_put(dev);
}
@@ -214,4 +216,6 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
+
+ .release = virtio_gpu_release,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7e69c06e168e..c1824bdf2418 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -32,6 +32,7 @@
#include <linux/virtio_gpu.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
@@ -68,15 +69,21 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
-
- struct sg_table *pages;
- uint32_t mapped;
bool dumb;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base)
+struct virtio_gpu_object_shmem {
+ struct virtio_gpu_object base;
+ struct sg_table *pages;
+ uint32_t mapped;
+};
+
+#define to_virtio_gpu_shmem(virtio_gpu_object) \
+ container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
+
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
@@ -114,6 +121,7 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
virtio_gpu_resp_cb resp_cb;
+ void *resp_cb_data;
struct virtio_gpu_object_array *objs;
struct list_head list;
@@ -175,10 +183,8 @@ struct virtio_gpu_device {
struct virtio_gpu_queue ctrlq;
struct virtio_gpu_queue cursorq;
struct kmem_cache *vbufs;
- bool vqs_ready;
- bool disable_notify;
- bool pending_notify;
+ atomic_t pending_commands;
struct ida resource_ida;
@@ -193,6 +199,7 @@ struct virtio_gpu_device {
bool has_virgl_3d;
bool has_edid;
+ bool has_indirect;
struct work_struct config_changed_work;
@@ -207,6 +214,8 @@ struct virtio_gpu_device {
struct virtio_gpu_fpriv {
uint32_t ctx_id;
+ bool context_created;
+ struct mutex context_lock;
};
/* virtio_ioctl.c */
@@ -216,6 +225,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
+void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
@@ -262,7 +272,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id);
+ struct virtio_gpu_object *bo);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -279,9 +289,8 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence);
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj);
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -332,8 +341,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
void virtio_gpu_dequeue_fence_func(struct work_struct *work);
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
/* virtio_gpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
@@ -355,12 +363,16 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
+
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
+
/* virtgpu_prime.c */
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 0a2b62279647..0d6152c99a27 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -123,6 +123,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
+ virtio_gpu_notify(vgdev);
return 0;
}
@@ -143,6 +144,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
objs);
+ virtio_gpu_notify(vgdev);
}
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 205ec4abae2b..336cc9143205 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -33,13 +33,34 @@
#include "virtgpu_drv.h"
+static void virtio_gpu_create_context(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ char dbgname[TASK_COMM_LEN];
+
+ mutex_lock(&vfpriv->context_lock);
+ if (vfpriv->context_created)
+ goto out_unlock;
+
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ strlen(dbgname), dbgname);
+ virtio_gpu_notify(vgdev);
+ vfpriv->context_created = true;
+
+out_unlock:
+ mutex_unlock(&vfpriv->context_lock);
+}
+
static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
+ return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
virtio_gpu_map->handle,
&virtio_gpu_map->offset);
}
@@ -51,11 +72,11 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
* VIRTIO_GPUReleaseInfo struct (first XXX bytes)
*/
static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *drm_file)
+ struct drm_file *file)
{
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence *out_fence;
int ret;
uint32_t *bo_handles = NULL;
@@ -74,6 +95,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
exbuf->fence_fd = -1;
+ virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
@@ -116,7 +138,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+ buflist = virtio_gpu_array_from_handles(file, bo_handles,
exbuf->num_bo_handles);
if (!buflist) {
ret = -ENOENT;
@@ -126,22 +148,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
bo_handles = NULL;
}
- if (buflist) {
- ret = virtio_gpu_array_lock_resv(buflist);
- if (ret)
- goto out_unused_fd;
- }
-
buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
- goto out_unresv;
+ goto out_unused_fd;
+ }
+
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ goto out_memdup;
}
out_fence = virtio_gpu_fence_alloc(vgdev);
if(!out_fence) {
ret = -ENOMEM;
- goto out_memdup;
+ goto out_unresv;
}
if (out_fence_fd >= 0) {
@@ -158,13 +180,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, buflist, out_fence);
+ virtio_gpu_notify(vgdev);
return 0;
-out_memdup:
- kvfree(buf);
out_unresv:
if (buflist)
virtio_gpu_array_unlock_resv(buflist);
+out_memdup:
+ kvfree(buf);
out_unused_fd:
kvfree(bo_handles);
if (buflist)
@@ -177,7 +200,7 @@ out_unused_fd:
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_getparam *param = data;
@@ -200,7 +223,7 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
}
static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
@@ -211,7 +234,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle = 0;
struct virtio_gpu_object_params params = { 0 };
- if (vgdev->has_virgl_3d == false) {
+ if (vgdev->has_virgl_3d) {
+ virtio_gpu_create_context(dev, file);
+ params.virgl = true;
+ params.target = rc->target;
+ params.bind = rc->bind;
+ params.depth = rc->depth;
+ params.array_size = rc->array_size;
+ params.last_level = rc->last_level;
+ params.nr_samples = rc->nr_samples;
+ params.flags = rc->flags;
+ } else {
if (rc->depth > 1)
return -EINVAL;
if (rc->nr_samples > 1)
@@ -228,16 +261,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
params.width = rc->width;
params.height = rc->height;
params.size = rc->size;
- if (vgdev->has_virgl_3d) {
- params.virgl = true;
- params.target = rc->target;
- params.bind = rc->bind;
- params.depth = rc->depth;
- params.array_size = rc->array_size;
- params.last_level = rc->last_level;
- params.nr_samples = rc->nr_samples;
- params.flags = rc->flags;
- }
/* allocate a single page size object */
if (params.size == 0)
params.size = PAGE_SIZE;
@@ -251,7 +274,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
return ret;
obj = &qobj->base.base;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ ret = drm_gem_handle_create(file, obj, &handle);
if (ret) {
drm_gem_object_release(obj);
return ret;
@@ -264,13 +287,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
}
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_virtgpu_resource_info *ri = data;
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
- gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
+ gobj = drm_gem_object_lookup(file, ri->bo_handle);
if (gobj == NULL)
return -ENOENT;
@@ -297,6 +320,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
+ virtio_gpu_create_context(dev, file);
objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
if (objs == NULL)
return -ENOENT;
@@ -314,6 +338,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
(vgdev, vfpriv->ctx_id, offset, args->level,
&args->box, objs, fence);
dma_fence_put(&fence->f);
+ virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@@ -344,6 +369,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->box.w, args->box.h, args->box.x, args->box.y,
objs, NULL);
} else {
+ virtio_gpu_create_context(dev, file);
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -359,6 +385,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &args->box, objs, fence);
dma_fence_put(&fence->f);
}
+ virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@@ -445,6 +472,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
/* not in cache - need to talk to hw */
virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
&cache_ent);
+ virtio_gpu_notify(vgdev);
copy_exit:
ret = wait_event_timeout(vgdev->resp_wq,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 2f5773e43557..023a030ca7b9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -44,6 +44,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
@@ -51,22 +52,11 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
events_clear, &events_clear);
}
-static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
- uint32_t nlen, const char *name)
-{
- int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
-
- if (handle < 0)
- return handle;
- handle += 1;
- virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
- return handle;
-}
-
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t ctx_id)
{
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
+ virtio_gpu_notify(vgdev);
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
}
@@ -92,6 +82,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
}
for (i = 0; i < num_capsets; i++) {
virtio_gpu_cmd_get_capset_info(vgdev, i);
+ virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
if (ret == 0) {
@@ -159,6 +150,9 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+ vgdev->has_indirect = true;
+ }
DRM_INFO("features: %cvirgl %cedid\n",
vgdev->has_virgl_3d ? '+' : '-',
@@ -196,13 +190,13 @@ int virtio_gpu_init(struct drm_device *dev)
virtio_gpu_modeset_init(vgdev);
virtio_device_ready(vgdev->vdev);
- vgdev->vqs_ready = true;
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
return 0;
@@ -231,12 +225,16 @@ void virtio_gpu_deinit(struct drm_device *dev)
struct virtio_gpu_device *vgdev = dev->dev_private;
flush_work(&vgdev->obj_free_work);
- vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
vgdev->vdev->config->reset(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
+}
+
+void virtio_gpu_release(struct drm_device *dev)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
@@ -249,8 +247,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
- int id;
- char dbgname[TASK_COMM_LEN];
+ int handle;
/* can't create contexts without 3d renderer */
if (!vgdev->has_virgl_3d)
@@ -261,14 +258,15 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
if (!vfpriv)
return -ENOMEM;
- get_task_comm(dbgname, current);
- id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
- if (id < 0) {
+ mutex_init(&vfpriv->context_lock);
+
+ handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
+ if (handle < 0) {
kfree(vfpriv);
- return id;
+ return handle;
}
- vfpriv->ctx_id = id;
+ vfpriv->ctx_id = handle + 1;
file->driver_priv = vfpriv;
return 0;
}
@@ -284,6 +282,7 @@ void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
vfpriv = file->driver_priv;
virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+ mutex_destroy(&vfpriv->context_lock);
kfree(vfpriv);
file->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 3af7ec80c7da..2bfb13d1932e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
@@ -61,21 +62,46 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
}
}
-static void virtio_gpu_free_object(struct drm_gem_object *obj)
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- if (bo->pages)
- virtio_gpu_object_detach(vgdev, bo);
- if (bo->created)
- virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+ if (virtio_gpu_is_shmem(bo)) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
+ if (shmem->pages) {
+ if (shmem->mapped) {
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl, shmem->mapped,
+ DMA_TO_DEVICE);
+ shmem->mapped = 0;
+ }
+
+ sg_free_table(shmem->pages);
+ shmem->pages = NULL;
+ drm_gem_shmem_unpin(&bo->base.base);
+ }
+
+ drm_gem_shmem_free_object(&bo->base.base);
+ }
+}
+
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- drm_gem_shmem_free_object(obj);
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ /* completion handler calls virtio_gpu_cleanup_object() */
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
}
-static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
@@ -86,9 +112,14 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = &drm_gem_shmem_mmap,
+ .mmap = drm_gem_shmem_mmap,
};
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
+{
+ return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
+}
+
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size)
{
@@ -98,11 +129,58 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
if (!bo)
return NULL;
- bo->base.base.funcs = &virtio_gpu_gem_funcs;
+ bo->base.base.funcs = &virtio_gpu_shmem_funcs;
bo->base.map_cached = true;
return &bo->base.base;
}
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents)
+{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+ struct scatterlist *sg;
+ int si, ret;
+
+ ret = drm_gem_shmem_pin(&bo->base.base);
+ if (ret < 0)
+ return -EINVAL;
+
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+ if (!shmem->pages) {
+ drm_gem_shmem_unpin(&bo->base.base);
+ return -EINVAL;
+ }
+
+ if (use_dma_api) {
+ shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl,
+ shmem->pages->nents,
+ DMA_TO_DEVICE);
+ *nents = shmem->mapped;
+ } else {
+ *nents = shmem->pages->nents;
+ }
+
+ *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(shmem->pages->sgl, sg, *nents, si) {
+ (*ents)[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
+ (*ents)[si].length = cpu_to_le32(sg->length);
+ (*ents)[si].padding = 0;
+ }
+ return 0;
+}
+
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
@@ -111,6 +189,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
+ struct virtio_gpu_mem_entry *ents;
+ unsigned int nents;
int ret;
*bo_ptr = NULL;
@@ -147,12 +227,19 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
objs, fence);
}
- ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
+ return ret;
+ }
+
+ ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
+ virtio_gpu_notify(vgdev);
*bo_ptr = bo;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index d1c3f5fbfee4..52d24179bcec 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -148,14 +148,13 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
0, 0);
+ virtio_gpu_notify(vgdev);
return;
}
if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
return;
- virtio_gpu_disable_notify(vgdev);
-
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
if (bo->dumb)
virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
@@ -186,8 +185,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1);
-
- virtio_gpu_enable_notify(vgdev);
+ virtio_gpu_notify(vgdev);
}
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -266,6 +264,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
plane->state->crtc_w,
plane->state->crtc_h,
0, 0, objs, vgfb->fence);
+ virtio_gpu_notify(vgdev);
dma_fence_wait(&vgfb->fence->f, true);
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5914e79d3429..73854915ec34 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -95,7 +95,8 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
if (!vbuf)
return ERR_PTR(-ENOMEM);
- BUG_ON(size > MAX_INLINE_CMD_SIZE);
+ BUG_ON(size > MAX_INLINE_CMD_SIZE ||
+ size < sizeof(struct virtio_gpu_ctrl_hdr));
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
vbuf->size = size;
@@ -109,21 +110,14 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
return vbuf;
}
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer **vbuffer_p,
- int size)
+static struct virtio_gpu_ctrl_hdr *
+virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_vbuffer *vbuf;
-
- vbuf = virtio_gpu_get_vbuf(vgdev, size,
- sizeof(struct virtio_gpu_ctrl_hdr),
- NULL, NULL);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
- *vbuffer_p = vbuf;
- return vbuf->buf;
+ /* this assumes a vbuf contains a command that starts with a
+ * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
+ * virtqueues.
+ */
+ return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
}
static struct virtio_gpu_update_cursor*
@@ -161,6 +155,25 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_command *)vbuf->buf;
}
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size,
+ virtio_gpu_resp_cb cb)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
static void free_vbuf(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -209,12 +222,12 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
- if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+ if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
- cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
- DRM_ERROR("response 0x%x (command 0x%x)\n",
- le32_to_cpu(resp->type),
- le32_to_cpu(cmd->type));
+ cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
+ DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
+ le32_to_cpu(resp->type),
+ le32_to_cpu(cmd->type));
} else
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
@@ -307,109 +320,107 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
-static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct scatterlist *vout)
- __releases(&vgdev->ctrlq.qlock)
- __acquires(&vgdev->ctrlq.qlock)
+static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_fence *fence,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vresp;
- int outcnt = 0, incnt = 0;
- bool notify = false;
- int ret;
+ int ret, idx;
- if (!vgdev->vqs_ready)
- return notify;
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ free_vbuf(vgdev, vbuf);
+ return;
+ }
- sg_init_one(&vcmd, vbuf->buf, vbuf->size);
- sgs[outcnt + incnt] = &vcmd;
- outcnt++;
+ if (vgdev->has_indirect)
+ elemcnt = 1;
- if (vout) {
- sgs[outcnt + incnt] = vout;
- outcnt++;
+again:
+ spin_lock(&vgdev->ctrlq.qlock);
+
+ if (vq->num_free < elemcnt) {
+ spin_unlock(&vgdev->ctrlq.qlock);
+ virtio_gpu_notify(vgdev);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
+ goto again;
}
- if (vbuf->resp_size) {
- sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
- sgs[outcnt + incnt] = &vresp;
- incnt++;
+ /* now that the position of the vbuf in the virtqueue is known, we can
+ * finally set the fence id
+ */
+ if (fence) {
+ virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ fence);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
}
-retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
- if (ret == -ENOSPC) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
- spin_lock(&vgdev->ctrlq.qlock);
- goto retry;
- } else {
- trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ WARN_ON(ret);
- notify = virtqueue_kick_prepare(vq);
- }
- return notify;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+
+ atomic_inc(&vgdev->pending_commands);
+
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ drm_dev_exit(idx);
}
static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
struct virtio_gpu_fence *fence)
{
- struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *vout = NULL, sg;
+ struct scatterlist *sgs[3], vcmd, vout, vresp;
struct sg_table *sgt = NULL;
- bool notify;
- int outcnt = 0;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vout */
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
+ int sg_ents;
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
- &outcnt);
- if (!sgt)
+ &sg_ents);
+ if (!sgt) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
return;
- vout = sgt->sgl;
+ }
+
+ elemcnt += sg_ents;
+ sgs[outcnt] = sgt->sgl;
} else {
- sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
- vout = &sg;
- outcnt = 1;
+ sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
+ elemcnt++;
+ sgs[outcnt] = &vout;
}
+ outcnt++;
}
-again:
- spin_lock(&vgdev->ctrlq.qlock);
-
- /*
- * Make sure we have enouth space in the virtqueue. If not
- * wait here until we have.
- *
- * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
- * to wait for free space, which can result in fence ids being
- * submitted out-of-order.
- */
- if (vq->num_free < 2 + outcnt) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
- goto again;
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
}
- if (hdr && fence) {
- virtio_gpu_fence_emit(vgdev, hdr, fence);
- if (vbuf->objs) {
- virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
- virtio_gpu_array_unlock_resv(vbuf->objs);
- }
- }
- notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
- spin_unlock(&vgdev->ctrlq.qlock);
- if (notify) {
- if (vgdev->disable_notify)
- vgdev->pending_notify = true;
- else
- virtqueue_notify(vgdev->ctrlq.vq);
- }
+ virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
+ incnt);
if (sgt) {
sg_free_table(sgt);
@@ -417,25 +428,26 @@ again:
}
}
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
- vgdev->disable_notify = true;
-}
-
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
-{
- vgdev->disable_notify = false;
+ bool notify;
- if (!vgdev->pending_notify)
+ if (!atomic_read(&vgdev->pending_commands))
return;
- vgdev->pending_notify = false;
- virtqueue_notify(vgdev->ctrlq.vq);
+
+ spin_lock(&vgdev->ctrlq.qlock);
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
}
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
}
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@ -443,12 +455,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
+ int idx, ret, outcnt;
bool notify;
- int ret;
- int outcnt;
- if (!vgdev->vqs_ready)
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ free_vbuf(vgdev, vbuf);
return;
+ }
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -464,7 +477,7 @@ retry:
goto retry;
} else {
trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ virtio_gpu_vbuf_ctrl_hdr(vbuf));
notify = virtqueue_kick_prepare(vq);
}
@@ -473,6 +486,8 @@ retry:
if (notify)
virtqueue_notify(vq);
+
+ drm_dev_exit(idx);
}
/* just create gem objects for userspace and long lived objects,
@@ -499,39 +514,36 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p->width = cpu_to_le32(params->width);
cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
bo->created = true;
}
-void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_resource_unref *cmd_p;
- struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_object *bo;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- memset(cmd_p, 0, sizeof(*cmd_p));
+ bo = vbuf->resp_cb_data;
+ vbuf->resp_cb_data = NULL;
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
- cmd_p->resource_id = cpu_to_le32(resource_id);
-
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_cleanup_object(bo);
}
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence *fence)
+void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
{
- struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_resource_unref *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
+ virtio_gpu_cmd_unref_cb);
memset(cmd_p, 0, sizeof(*cmd_p));
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ vbuf->resp_cb_data = bo;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -588,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -606,7 +619,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void
@@ -629,7 +642,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
vbuf->data_buf = ents;
vbuf->data_size = sizeof(*ents) * nents;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
@@ -939,7 +952,6 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
-
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
@@ -988,7 +1000,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
cmd_p->flags = cpu_to_le32(params->flags);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+
bo->created = true;
}
@@ -1003,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1021,7 +1035,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
@@ -1047,7 +1061,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
@@ -1070,94 +1084,19 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->size = cpu_to_le32(data_size);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence)
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents)
{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_mem_entry *ents;
- struct scatterlist *sg;
- int si, nents, ret;
-
- if (WARN_ON_ONCE(!obj->created))
- return -EINVAL;
- if (WARN_ON_ONCE(obj->pages))
- return -EINVAL;
-
- ret = drm_gem_shmem_pin(&obj->base.base);
- if (ret < 0)
- return -EINVAL;
-
- obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
- if (obj->pages == NULL) {
- drm_gem_shmem_unpin(&obj->base.base);
- return -EINVAL;
- }
-
- if (use_dma_api) {
- obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->pages->nents,
- DMA_TO_DEVICE);
- nents = obj->mapped;
- } else {
- nents = obj->pages->nents;
- }
-
- /* gets freed when the ring has consumed it */
- ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
- GFP_KERNEL);
- if (!ents) {
- DRM_ERROR("failed to allocate ent list\n");
- return -ENOMEM;
- }
-
- for_each_sg(obj->pages->sgl, sg, nents, si) {
- ents[si].addr = cpu_to_le64(use_dma_api
- ? sg_dma_address(sg)
- : sg_phys(sg));
- ents[si].length = cpu_to_le32(sg->length);
- ents[si].padding = 0;
- }
-
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
- ents, nents,
- fence);
+ ents, nents, NULL);
return 0;
}
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj)
-{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
- if (WARN_ON_ONCE(!obj->pages))
- return;
-
- if (use_dma_api && obj->mapped) {
- struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
- /* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
- dma_fence_wait(&fence->f, true);
- dma_fence_put(&fence->f);
-
- /* ... then tear down iommu mappings */
- dma_unmap_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->mapped,
- DMA_TO_DEVICE);
- obj->mapped = 0;
- } else {
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
- }
-
- sg_free_table(obj->pages);
- obj->pages = NULL;
-
- drm_gem_shmem_unpin(&obj->base.base);
-}
-
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 74f703b8d22a..ac85e17428f8 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -76,10 +76,12 @@ static void vkms_disable_vblank(struct drm_crtc *crtc)
hrtimer_cancel(&out->vblank_hrtimer);
}
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq)
+static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -154,6 +156,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
.atomic_destroy_state = vkms_atomic_crtc_destroy_state,
.enable_vblank = vkms_enable_vblank,
.disable_vblank = vkms_disable_vblank,
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
.get_crc_sources = vkms_get_crc_sources,
.set_crc_source = vkms_set_crc_source,
.verify_crc_source = vkms_verify_crc_source,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 25bd7519295f..860de052e820 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -103,7 +103,6 @@ static struct drm_driver vkms_driver = {
.dumb_create = vkms_dumb_create,
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
- .get_vblank_timestamp = vkms_get_vblank_timestamp,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = vkms_prime_import_sg_table,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 7d52e24564db..eda04ffba7b1 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -111,10 +111,6 @@ struct vkms_gem_object {
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq);
-
int vkms_output_init(struct vkms_device *vkmsdev, int index);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 5fc8f85aaf3d..6d31265a2ab7 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -117,7 +117,7 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
bool can_position = false;
int ret;
- if (!state->fb | !state->crtc)
+ if (!state->fb || WARN_ON(!state->crtc))
return 0;
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index c877a21a0739..31f85f09f1fc 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,7 +8,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
- vmwgfx_validation.o vmwgfx_page_dirty.o \
+ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
ttm_object.o ttm_lock.o
+vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 9cbba0e8ce6a..799bc0963f7a 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2020 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -104,12 +104,12 @@ typedef enum {
SVGA_3D_CMD_DEAD1 = 1083,
SVGA_3D_CMD_DEAD2 = 1084,
- SVGA_3D_CMD_LOGICOPS_BITBLT = 1085,
- SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1086,
- SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1087,
- SVGA_3D_CMD_LOGICOPS_COLORFILL = 1088,
- SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1089,
- SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1090,
+ SVGA_3D_CMD_DEAD12 = 1085,
+ SVGA_3D_CMD_DEAD13 = 1086,
+ SVGA_3D_CMD_DEAD14 = 1087,
+ SVGA_3D_CMD_DEAD15 = 1088,
+ SVGA_3D_CMD_DEAD16 = 1089,
+ SVGA_3D_CMD_DEAD17 = 1090,
SVGA_3D_CMD_SET_OTABLE_BASE = 1091,
SVGA_3D_CMD_READBACK_OTABLE = 1092,
@@ -261,30 +261,23 @@ typedef enum {
SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET = 1220,
SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET = 1221,
SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET = 1222,
-
- /*
- * Reserve some IDs to be used for the SM5 shader types.
- */
- SVGA_3D_CMD_DX_RESERVED1 = 1223,
- SVGA_3D_CMD_DX_RESERVED2 = 1224,
- SVGA_3D_CMD_DX_RESERVED3 = 1225,
+ SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET = 1223,
+ SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET = 1224,
+ SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET = 1225,
SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER = 1226,
SVGA_3D_CMD_DX_MAX = 1227,
SVGA_3D_CMD_SCREEN_COPY = 1227,
- /*
- * Reserve some IDs to be used for video.
- */
- SVGA_3D_CMD_VIDEO_RESERVED1 = 1228,
- SVGA_3D_CMD_VIDEO_RESERVED2 = 1229,
- SVGA_3D_CMD_VIDEO_RESERVED3 = 1230,
- SVGA_3D_CMD_VIDEO_RESERVED4 = 1231,
- SVGA_3D_CMD_VIDEO_RESERVED5 = 1232,
- SVGA_3D_CMD_VIDEO_RESERVED6 = 1233,
- SVGA_3D_CMD_VIDEO_RESERVED7 = 1234,
- SVGA_3D_CMD_VIDEO_RESERVED8 = 1235,
+ SVGA_3D_CMD_RESERVED1 = 1228,
+ SVGA_3D_CMD_RESERVED2 = 1229,
+ SVGA_3D_CMD_RESERVED3 = 1230,
+ SVGA_3D_CMD_RESERVED4 = 1231,
+ SVGA_3D_CMD_RESERVED5 = 1232,
+ SVGA_3D_CMD_RESERVED6 = 1233,
+ SVGA_3D_CMD_RESERVED7 = 1234,
+ SVGA_3D_CMD_RESERVED8 = 1235,
SVGA_3D_CMD_GROW_OTABLE = 1236,
SVGA_3D_CMD_DX_GROW_COTABLE = 1237,
@@ -298,7 +291,46 @@ typedef enum {
SVGA_3D_CMD_DX_PRED_CONVERT = 1243,
SVGA_3D_CMD_WHOLE_SURFACE_COPY = 1244,
- SVGA_3D_CMD_MAX = 1245,
+ SVGA_3D_CMD_DX_DEFINE_UA_VIEW = 1245,
+ SVGA_3D_CMD_DX_DESTROY_UA_VIEW = 1246,
+ SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT = 1247,
+ SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT = 1248,
+ SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT = 1249,
+ SVGA_3D_CMD_DX_SET_UA_VIEWS = 1250,
+
+ SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT = 1251,
+ SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT = 1252,
+ SVGA_3D_CMD_DX_DISPATCH = 1253,
+ SVGA_3D_CMD_DX_DISPATCH_INDIRECT = 1254,
+
+ SVGA_3D_CMD_WRITE_ZERO_SURFACE = 1255,
+ SVGA_3D_CMD_HINT_ZERO_SURFACE = 1256,
+ SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER = 1257,
+ SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT = 1258,
+
+ SVGA_3D_CMD_LOGICOPS_BITBLT = 1259,
+ SVGA_3D_CMD_LOGICOPS_TRANSBLT = 1260,
+ SVGA_3D_CMD_LOGICOPS_STRETCHBLT = 1261,
+ SVGA_3D_CMD_LOGICOPS_COLORFILL = 1262,
+ SVGA_3D_CMD_LOGICOPS_ALPHABLEND = 1263,
+ SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND = 1264,
+
+ SVGA_3D_CMD_RESERVED2_1 = 1265,
+
+ SVGA_3D_CMD_RESERVED2_2 = 1266,
+ SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 = 1267,
+ SVGA_3D_CMD_DX_SET_CS_UA_VIEWS = 1268,
+ SVGA_3D_CMD_DX_SET_MIN_LOD = 1269,
+ SVGA_3D_CMD_RESERVED2_3 = 1270,
+ SVGA_3D_CMD_RESERVED2_4 = 1271,
+ SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 = 1272,
+ SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB = 1273,
+ SVGA_3D_CMD_DX_SET_SHADER_IFACE = 1274,
+ SVGA_3D_CMD_DX_BIND_STREAMOUTPUT = 1275,
+ SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS = 1276,
+ SVGA_3D_CMD_DX_BIND_SHADER_IFACE = 1277,
+
+ SVGA_3D_CMD_MAX = 1278,
SVGA_3D_CMD_FUTURE_MAX = 3000
} SVGAFifo3dCmdId;
@@ -334,6 +366,7 @@ struct {
uint32 sid;
SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
+
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
* structures must have the same value of numMipLevels field.
@@ -341,6 +374,7 @@ struct {
* numMipLevels set to 0.
*/
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
@@ -360,6 +394,7 @@ struct {
uint32 sid;
SVGA3dSurface1Flags surfaceFlags;
SVGA3dSurfaceFormat format;
+
/*
* If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
* structures must have the same value of numMipLevels field.
@@ -369,6 +404,7 @@ struct {
SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
+
/*
* Followed by an SVGA3dSize structure for each mip level in each face.
*
@@ -517,6 +553,18 @@ typedef
struct {
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dest;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBltNonMSToMS;
+/* SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
SVGA3dBox boxSrc;
SVGA3dBox boxDest;
SVGA3dStretchBltMode mode;
@@ -555,6 +603,7 @@ struct {
SVGAGuestImage guest;
SVGA3dSurfaceImageId host;
SVGA3dTransferType transfer;
+
/*
* Followed by variable number of SVGA3dCopyBox structures. For consistency
* in all clipping logic and coordinate translation, we define the
@@ -789,7 +838,7 @@ struct {
uint32 indexBufferSid; /* Valid index buffer sid. */
uint32 indexBufferOffset; /* Byte offset into the vertex buffer, almost */
- /* always 0 for DX9 guests, non-zero for OpenGL */
+ /* always 0 for pre SM guests, non-zero for OpenGL */
/* guests. We can't represent non-multiple of */
/* stride offsets in D3D9Renderer... */
uint8 indexBufferStride; /* Allowable values = 1, 2, or 4 */
@@ -1228,6 +1277,7 @@ struct SVGA3dCmdLogicOpsBitBlt {
SVGA3dSurfaceImageId src;
SVGA3dSurfaceImageId dst;
SVGA3dLogicOp logicOp;
+ SVGA3dLogicOpRop3 logicOpRop3;
/* Followed by variable number of SVGA3dCopyBox structures */
}
#include "vmware_pack_end.h"
@@ -1247,7 +1297,8 @@ struct SVGA3dCmdLogicOpsTransBlt {
uint32 color;
uint32 flags;
SVGA3dBox srcBox;
- SVGA3dBox dstBox;
+ SVGA3dSignedBox dstBox;
+ SVGA3dBox clipBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsTransBlt; /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
@@ -1266,7 +1317,8 @@ struct SVGA3dCmdLogicOpsStretchBlt {
uint16 mode;
uint16 flags;
SVGA3dBox srcBox;
- SVGA3dBox dstBox;
+ SVGA3dSignedBox dstBox;
+ SVGA3dBox clipBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsStretchBlt; /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
@@ -1283,6 +1335,7 @@ struct SVGA3dCmdLogicOpsColorFill {
SVGA3dSurfaceImageId dst;
uint32 color;
SVGA3dLogicOp logicOp;
+ SVGA3dLogicOpRop3 logicOpRop3;
/* Followed by variable number of SVGA3dRect structures. */
}
#include "vmware_pack_end.h"
@@ -1302,7 +1355,8 @@ struct SVGA3dCmdLogicOpsAlphaBlend {
uint32 alphaVal;
uint32 flags;
SVGA3dBox srcBox;
- SVGA3dBox dstBox;
+ SVGA3dSignedBox dstBox;
+ SVGA3dBox clipBox;
}
#include "vmware_pack_end.h"
SVGA3dCmdLogicOpsAlphaBlend; /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
@@ -1365,8 +1419,9 @@ struct {
SVGA3dSurface2Flags surface2Flags;
uint8 multisamplePattern;
uint8 qualityLevel;
- uint8 pad0[2];
- uint32 pad1[3];
+ uint16 bufferByteStride;
+ float minLOD;
+ uint32 pad0[2];
}
#include "vmware_pack_end.h"
SVGAOTableSurfaceEntry;
@@ -1543,7 +1598,7 @@ typedef
#include "vmware_pack_begin.h"
struct {
SVGAOTableType type;
- PPN baseAddress;
+ PPN32 baseAddress;
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
@@ -1599,7 +1654,7 @@ typedef
struct SVGA3dCmdDefineGBMob {
SVGAMobId mobid;
SVGAMobFormat ptDepth;
- PPN base;
+ PPN32 base;
uint32 sizeInBytes;
}
#include "vmware_pack_end.h"
@@ -1618,7 +1673,6 @@ struct SVGA3dCmdDestroyGBMob {
#include "vmware_pack_end.h"
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
-
/*
* Define a memory object (Mob) in the OTable with a PPN64 base.
*/
@@ -1719,6 +1773,27 @@ struct SVGA3dCmdDefineGBSurface_v3 {
SVGA3dCmdDefineGBSurface_v3; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */
/*
+ * Defines a guest-backed surface, adding buffer byte stride.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v4 {
+ uint32 sid;
+ SVGA3dSurfaceAllFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ uint32 numMipLevels;
+ uint32 multisampleCount;
+ SVGA3dMSPattern multisamplePattern;
+ SVGA3dMSQualityLevel qualityLevel;
+ SVGA3dTextureFilter autogenFilter;
+ SVGA3dSize size;
+ uint32 arraySize;
+ uint32 bufferByteStride;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v4; /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 */
+
+/*
* Destroy a guest-backed surface.
*/
@@ -2181,4 +2256,20 @@ SVGA3dCmdScreenCopy; /* SVGA_3D_CMD_SCREEN_COPY */
#define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01
#define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWriteZeroSurface; /* SVGA_3D_CMD_WRITE_ZERO_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHintZeroSurface; /* SVGA_3D_CMD_HINT_ZERO_SURFACE */
+
#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index f256560049bf..617b468c626c 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2019 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -39,6 +39,8 @@
#include "includeCheck.h"
+#include "svga3d_types.h"
+
/*
* 3D Hardware Version
*
@@ -69,381 +71,408 @@ typedef enum {
* DevCap indexes.
*/
-typedef enum {
- SVGA3D_DEVCAP_INVALID = ((uint32)-1),
- SVGA3D_DEVCAP_3D = 0,
- SVGA3D_DEVCAP_MAX_LIGHTS = 1,
-
- /*
- * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
- * fixed-function texture units available. Each of these units
- * work in both FFP and Shader modes, and they support texture
- * transforms and texture coordinates. The host may have additional
- * texture image units that are only usable with shaders.
- */
- SVGA3D_DEVCAP_MAX_TEXTURES = 2,
- SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
- SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
- SVGA3D_DEVCAP_VERTEX_SHADER = 5,
- SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
- SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
- SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
- SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
- SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
- SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
- SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12,
- SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13,
- SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14,
- SVGA3D_DEVCAP_QUERY_TYPES = 15,
- SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
- SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
- SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
- SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
- SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
- SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
- SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
- SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
- SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
- SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
- SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
- SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
- SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
- SVGA3D_DEVCAP_TEXTURE_OPS = 31,
- SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
- SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
- SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
- SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
- SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
- SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
- SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
- SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
- SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
- SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
- SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
- SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
- SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
- SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
- SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
- SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
- SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
- SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
- SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
- SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
- SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
- SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
- SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
- SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
- SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
-
- /*
- * There is a hole in our devcap definitions for
- * historical reasons.
- *
- * Define a constant just for completeness.
- */
- SVGA3D_DEVCAP_MISSING62 = 62,
-
- SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
-
- /*
- * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
- * render targets. This does not include the depth or stencil targets.
- */
- SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
-
- SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
- SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
- SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
- SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
- SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
- SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70,
- SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71,
- SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72,
- SVGA3D_DEVCAP_SUPERSAMPLE = 73,
- SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74,
- SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75,
- SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76,
-
- /*
- * This is the maximum number of SVGA context IDs that the guest
- * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
- */
- SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77,
-
- /*
- * This is the maximum number of SVGA surface IDs that the guest
- * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
- */
- SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78,
-
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79,
- SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80,
- SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81,
-
- SVGA3D_DEVCAP_SURFACEFMT_ATI1 = 82,
- SVGA3D_DEVCAP_SURFACEFMT_ATI2 = 83,
-
- /*
- * Deprecated.
- */
- SVGA3D_DEVCAP_DEAD1 = 84,
-
- /*
- * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
- * ored together, one for every type of video decoding supported.
- */
- SVGA3D_DEVCAP_VIDEO_DECODE = 85,
-
- /*
- * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
- * ored together, one for every type of video processing supported.
- */
- SVGA3D_DEVCAP_VIDEO_PROCESS = 86,
-
- SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */
- SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */
- SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */
- SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */
-
- SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91,
-
- /*
- * Does the host support the SVGA logic ops commands?
- */
- SVGA3D_DEVCAP_LOGICOPS = 92,
-
- /*
- * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
- */
- SVGA3D_DEVCAP_TS_COLOR_KEY = 93, /* boolean */
-
- /*
- * Deprecated.
- */
- SVGA3D_DEVCAP_DEAD2 = 94,
-
- /*
- * Does the device support DXContexts?
- */
- SVGA3D_DEVCAP_DXCONTEXT = 95,
-
- /*
- * What is the maximum size of a texture array?
- *
- * (Even if this cap is zero, cubemaps are still allowed.)
- */
- SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE = 96,
-
- /*
- * What is the maximum number of vertex buffers or vertex input registers
- * that can be expected to work correctly with a DXContext?
- *
- * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
- * anything in excess of this cap is not guaranteed to render correctly.
- *
- * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
- * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
- * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
- * but only the registers up to this cap value are guaranteed to render
- * correctly.
- *
- * If guest-drivers are able to expose a lower-limit, it's recommended
- * that they clamp to this value. Otherwise, the host will make a
- * best-effort on case-by-case basis if guests exceed this.
- */
- SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS = 97,
-
- /*
- * What is the maximum number of constant buffers that can be expected to
- * work correctly with a DX context?
- *
- * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
- * anything in excess of this cap is not guaranteed to render correctly.
- *
- * If guest-drivers are able to expose a lower-limit, it's recommended
- * that they clamp to this value. Otherwise, the host will make a
- * best-effort on case-by-case basis if guests exceed this.
- */
- SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS = 98,
-
- /*
- * Does the device support provoking vertex control?
- *
- * If this cap is present, the provokingVertexLast field in the
- * rasterizer state is enabled. (Guests can then set it to FALSE,
- * meaning that the first vertex is the provoking vertex, or TRUE,
- * meaning that the last verteix is the provoking vertex.)
- *
- * If this cap is FALSE, then guests should set the provokingVertexLast
- * to FALSE, otherwise rendering behavior is undefined.
- */
- SVGA3D_DEVCAP_DX_PROVOKING_VERTEX = 99,
-
- SVGA3D_DEVCAP_DXFMT_X8R8G8B8 = 100,
- SVGA3D_DEVCAP_DXFMT_A8R8G8B8 = 101,
- SVGA3D_DEVCAP_DXFMT_R5G6B5 = 102,
- SVGA3D_DEVCAP_DXFMT_X1R5G5B5 = 103,
- SVGA3D_DEVCAP_DXFMT_A1R5G5B5 = 104,
- SVGA3D_DEVCAP_DXFMT_A4R4G4B4 = 105,
- SVGA3D_DEVCAP_DXFMT_Z_D32 = 106,
- SVGA3D_DEVCAP_DXFMT_Z_D16 = 107,
- SVGA3D_DEVCAP_DXFMT_Z_D24S8 = 108,
- SVGA3D_DEVCAP_DXFMT_Z_D15S1 = 109,
- SVGA3D_DEVCAP_DXFMT_LUMINANCE8 = 110,
- SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 = 111,
- SVGA3D_DEVCAP_DXFMT_LUMINANCE16 = 112,
- SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 = 113,
- SVGA3D_DEVCAP_DXFMT_DXT1 = 114,
- SVGA3D_DEVCAP_DXFMT_DXT2 = 115,
- SVGA3D_DEVCAP_DXFMT_DXT3 = 116,
- SVGA3D_DEVCAP_DXFMT_DXT4 = 117,
- SVGA3D_DEVCAP_DXFMT_DXT5 = 118,
- SVGA3D_DEVCAP_DXFMT_BUMPU8V8 = 119,
- SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 = 120,
- SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 = 121,
- SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 = 122,
- SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 = 123,
- SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 = 124,
- SVGA3D_DEVCAP_DXFMT_A2R10G10B10 = 125,
- SVGA3D_DEVCAP_DXFMT_V8U8 = 126,
- SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 = 127,
- SVGA3D_DEVCAP_DXFMT_CxV8U8 = 128,
- SVGA3D_DEVCAP_DXFMT_X8L8V8U8 = 129,
- SVGA3D_DEVCAP_DXFMT_A2W10V10U10 = 130,
- SVGA3D_DEVCAP_DXFMT_ALPHA8 = 131,
- SVGA3D_DEVCAP_DXFMT_R_S10E5 = 132,
- SVGA3D_DEVCAP_DXFMT_R_S23E8 = 133,
- SVGA3D_DEVCAP_DXFMT_RG_S10E5 = 134,
- SVGA3D_DEVCAP_DXFMT_RG_S23E8 = 135,
- SVGA3D_DEVCAP_DXFMT_BUFFER = 136,
- SVGA3D_DEVCAP_DXFMT_Z_D24X8 = 137,
- SVGA3D_DEVCAP_DXFMT_V16U16 = 138,
- SVGA3D_DEVCAP_DXFMT_G16R16 = 139,
- SVGA3D_DEVCAP_DXFMT_A16B16G16R16 = 140,
- SVGA3D_DEVCAP_DXFMT_UYVY = 141,
- SVGA3D_DEVCAP_DXFMT_YUY2 = 142,
- SVGA3D_DEVCAP_DXFMT_NV12 = 143,
- SVGA3D_DEVCAP_DXFMT_AYUV = 144,
- SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS = 145,
- SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT = 146,
- SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT = 147,
- SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS = 148,
- SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT = 149,
- SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT = 150,
- SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT = 151,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS = 152,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT = 153,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM = 154,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT = 155,
- SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS = 156,
- SVGA3D_DEVCAP_DXFMT_R32G32_UINT = 157,
- SVGA3D_DEVCAP_DXFMT_R32G32_SINT = 158,
- SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS = 159,
- SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT = 160,
- SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 = 161,
- SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT = 162,
- SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS = 163,
- SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT = 164,
- SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT = 165,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS = 166,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM = 167,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB = 168,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT = 169,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT = 170,
- SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS = 171,
- SVGA3D_DEVCAP_DXFMT_R16G16_UINT = 172,
- SVGA3D_DEVCAP_DXFMT_R16G16_SINT = 173,
- SVGA3D_DEVCAP_DXFMT_R32_TYPELESS = 174,
- SVGA3D_DEVCAP_DXFMT_D32_FLOAT = 175,
- SVGA3D_DEVCAP_DXFMT_R32_UINT = 176,
- SVGA3D_DEVCAP_DXFMT_R32_SINT = 177,
- SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS = 178,
- SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT = 179,
- SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 = 180,
- SVGA3D_DEVCAP_DXFMT_X24_G8_UINT = 181,
- SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS = 182,
- SVGA3D_DEVCAP_DXFMT_R8G8_UNORM = 183,
- SVGA3D_DEVCAP_DXFMT_R8G8_UINT = 184,
- SVGA3D_DEVCAP_DXFMT_R8G8_SINT = 185,
- SVGA3D_DEVCAP_DXFMT_R16_TYPELESS = 186,
- SVGA3D_DEVCAP_DXFMT_R16_UNORM = 187,
- SVGA3D_DEVCAP_DXFMT_R16_UINT = 188,
- SVGA3D_DEVCAP_DXFMT_R16_SNORM = 189,
- SVGA3D_DEVCAP_DXFMT_R16_SINT = 190,
- SVGA3D_DEVCAP_DXFMT_R8_TYPELESS = 191,
- SVGA3D_DEVCAP_DXFMT_R8_UNORM = 192,
- SVGA3D_DEVCAP_DXFMT_R8_UINT = 193,
- SVGA3D_DEVCAP_DXFMT_R8_SNORM = 194,
- SVGA3D_DEVCAP_DXFMT_R8_SINT = 195,
- SVGA3D_DEVCAP_DXFMT_P8 = 196,
- SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP = 197,
- SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM = 198,
- SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM = 199,
- SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS = 200,
- SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB = 201,
- SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS = 202,
- SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB = 203,
- SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS = 204,
- SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB = 205,
- SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS = 206,
- SVGA3D_DEVCAP_DXFMT_ATI1 = 207,
- SVGA3D_DEVCAP_DXFMT_BC4_SNORM = 208,
- SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS = 209,
- SVGA3D_DEVCAP_DXFMT_ATI2 = 210,
- SVGA3D_DEVCAP_DXFMT_BC5_SNORM = 211,
- SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM = 212,
- SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS = 213,
- SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB = 214,
- SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS = 215,
- SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB = 216,
- SVGA3D_DEVCAP_DXFMT_Z_DF16 = 217,
- SVGA3D_DEVCAP_DXFMT_Z_DF24 = 218,
- SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT = 219,
- SVGA3D_DEVCAP_DXFMT_YV12 = 220,
- SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT = 221,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT = 222,
- SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM = 223,
- SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT = 224,
- SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM = 225,
- SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM = 226,
- SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT = 227,
- SVGA3D_DEVCAP_DXFMT_R16G16_UNORM = 228,
- SVGA3D_DEVCAP_DXFMT_R16G16_SNORM = 229,
- SVGA3D_DEVCAP_DXFMT_R32_FLOAT = 230,
- SVGA3D_DEVCAP_DXFMT_R8G8_SNORM = 231,
- SVGA3D_DEVCAP_DXFMT_R16_FLOAT = 232,
- SVGA3D_DEVCAP_DXFMT_D16_UNORM = 233,
- SVGA3D_DEVCAP_DXFMT_A8_UNORM = 234,
- SVGA3D_DEVCAP_DXFMT_BC1_UNORM = 235,
- SVGA3D_DEVCAP_DXFMT_BC2_UNORM = 236,
- SVGA3D_DEVCAP_DXFMT_BC3_UNORM = 237,
- SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM = 238,
- SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM = 239,
- SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM = 240,
- SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM = 241,
- SVGA3D_DEVCAP_DXFMT_BC4_UNORM = 242,
- SVGA3D_DEVCAP_DXFMT_BC5_UNORM = 243,
-
- /*
- * Advertises shaderModel 4.1 support, independent blend-states,
- * cube-map arrays, and a higher vertex input registers limit.
- *
- * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
- */
- SVGA3D_DEVCAP_SM41 = 244,
-
- SVGA3D_DEVCAP_MULTISAMPLE_2X = 245,
- SVGA3D_DEVCAP_MULTISAMPLE_4X = 246,
-
- SVGA3D_DEVCAP_MAX /* This must be the last index. */
-} SVGA3dDevCapIndex;
+typedef uint32 SVGA3dDevCapIndex;
+
+#define SVGA3D_DEVCAP_INVALID ((uint32)-1)
+#define SVGA3D_DEVCAP_3D 0
+#define SVGA3D_DEVCAP_MAX_LIGHTS 1
+
+/*
+ * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ * fixed-function texture units available. Each of these units
+ * work in both FFP and Shader modes, and they support texture
+ * transforms and texture coordinates. The host may have additional
+ * texture image units that are only usable with shaders.
+ */
+#define SVGA3D_DEVCAP_MAX_TEXTURES 2
+#define SVGA3D_DEVCAP_MAX_CLIP_PLANES 3
+#define SVGA3D_DEVCAP_VERTEX_SHADER_VERSION 4
+#define SVGA3D_DEVCAP_VERTEX_SHADER 5
+#define SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION 6
+#define SVGA3D_DEVCAP_FRAGMENT_SHADER 7
+#define SVGA3D_DEVCAP_MAX_RENDER_TARGETS 8
+#define SVGA3D_DEVCAP_S23E8_TEXTURES 9
+#define SVGA3D_DEVCAP_S10E5_TEXTURES 10
+#define SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND 11
+#define SVGA3D_DEVCAP_D16_BUFFER_FORMAT 12
+#define SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT 13
+#define SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT 14
+#define SVGA3D_DEVCAP_QUERY_TYPES 15
+#define SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING 16
+#define SVGA3D_DEVCAP_MAX_POINT_SIZE 17
+#define SVGA3D_DEVCAP_MAX_SHADER_TEXTURES 18
+#define SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH 19
+#define SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT 20
+#define SVGA3D_DEVCAP_MAX_VOLUME_EXTENT 21
+#define SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT 22
+#define SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO 23
+#define SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY 24
+#define SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT 25
+#define SVGA3D_DEVCAP_MAX_VERTEX_INDEX 26
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS 27
+#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS 28
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS 29
+#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS 30
+#define SVGA3D_DEVCAP_TEXTURE_OPS 31
+#define SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 32
+#define SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 33
+#define SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 34
+#define SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 35
+#define SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 36
+#define SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 37
+#define SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 38
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 39
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 40
+#define SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 41
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 42
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D16 43
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 44
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 45
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT1 46
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT2 47
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT3 48
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT4 49
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT5 50
+#define SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 51
+#define SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 52
+#define SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 53
+#define SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 54
+#define SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 55
+#define SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 56
+#define SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 57
+#define SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 58
+#define SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 59
+#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 60
+#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 61
+
+/*
+ * There is a hole in our devcap definitions for
+ * historical reasons.
+ *
+ * Define a constant just for completeness.
+ */
+#define SVGA3D_DEVCAP_MISSING62 62
+
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES 63
+
+/*
+ * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+ * render targets. This does not include the depth or stencil targets.
+ */
+#define SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS 64
+
+#define SVGA3D_DEVCAP_SURFACEFMT_V16U16 65
+#define SVGA3D_DEVCAP_SURFACEFMT_G16R16 66
+#define SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 67
+#define SVGA3D_DEVCAP_SURFACEFMT_UYVY 68
+#define SVGA3D_DEVCAP_SURFACEFMT_YUY2 69
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD4 70
+#define SVGA3D_DEVCAP_DEAD5 71
+#define SVGA3D_DEVCAP_DEAD7 72
+#define SVGA3D_DEVCAP_DEAD6 73
+
+#define SVGA3D_DEVCAP_AUTOGENMIPMAPS 74
+#define SVGA3D_DEVCAP_SURFACEFMT_NV12 75
+#define SVGA3D_DEVCAP_DEAD10 76
+
+/*
+ * This is the maximum number of SVGA context IDs that the guest
+ * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+ */
+#define SVGA3D_DEVCAP_MAX_CONTEXT_IDS 77
+
+/*
+ * This is the maximum number of SVGA surface IDs that the guest
+ * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+ */
+#define SVGA3D_DEVCAP_MAX_SURFACE_IDS 78
+
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 79
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 80
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT 81
+
+#define SVGA3D_DEVCAP_SURFACEFMT_ATI1 82
+#define SVGA3D_DEVCAP_SURFACEFMT_ATI2 83
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD1 84
+#define SVGA3D_DEVCAP_DEAD8 85
+#define SVGA3D_DEVCAP_DEAD9 86
+
+#define SVGA3D_DEVCAP_LINE_AA 87 /* boolean */
+#define SVGA3D_DEVCAP_LINE_STIPPLE 88 /* boolean */
+#define SVGA3D_DEVCAP_MAX_LINE_WIDTH 89 /* float */
+#define SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH 90 /* float */
+
+#define SVGA3D_DEVCAP_SURFACEFMT_YV12 91
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD3 92
+
+/*
+ * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+ */
+#define SVGA3D_DEVCAP_TS_COLOR_KEY 93 /* boolean */
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD2 94
+
+/*
+ * Does the device support DXContexts?
+ */
+#define SVGA3D_DEVCAP_DXCONTEXT 95
+
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD11 96
+
+/*
+ * What is the maximum number of vertex buffers or vertex input registers
+ * that can be expected to work correctly with a DXContext?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
+ * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
+ * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
+ * but only the registers up to this cap value are guaranteed to render
+ * correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
+ */
+#define SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS 97
+
+/*
+ * What is the maximum number of constant buffers that can be expected to
+ * work correctly with a DX context?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value. Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
+ */
+#define SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS 98
+
+/*
+ * Does the device support provoking vertex control?
+ *
+ * If this cap is present, the provokingVertexLast field in the
+ * rasterizer state is enabled. (Guests can then set it to FALSE,
+ * meaning that the first vertex is the provoking vertex, or TRUE,
+ * meaning that the last verteix is the provoking vertex.)
+ *
+ * If this cap is FALSE, then guests should set the provokingVertexLast
+ * to FALSE, otherwise rendering behavior is undefined.
+ */
+#define SVGA3D_DEVCAP_DX_PROVOKING_VERTEX 99
+
+#define SVGA3D_DEVCAP_DXFMT_X8R8G8B8 100
+#define SVGA3D_DEVCAP_DXFMT_A8R8G8B8 101
+#define SVGA3D_DEVCAP_DXFMT_R5G6B5 102
+#define SVGA3D_DEVCAP_DXFMT_X1R5G5B5 103
+#define SVGA3D_DEVCAP_DXFMT_A1R5G5B5 104
+#define SVGA3D_DEVCAP_DXFMT_A4R4G4B4 105
+#define SVGA3D_DEVCAP_DXFMT_Z_D32 106
+#define SVGA3D_DEVCAP_DXFMT_Z_D16 107
+#define SVGA3D_DEVCAP_DXFMT_Z_D24S8 108
+#define SVGA3D_DEVCAP_DXFMT_Z_D15S1 109
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8 110
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4 111
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE16 112
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8 113
+#define SVGA3D_DEVCAP_DXFMT_DXT1 114
+#define SVGA3D_DEVCAP_DXFMT_DXT2 115
+#define SVGA3D_DEVCAP_DXFMT_DXT3 116
+#define SVGA3D_DEVCAP_DXFMT_DXT4 117
+#define SVGA3D_DEVCAP_DXFMT_DXT5 118
+#define SVGA3D_DEVCAP_DXFMT_BUMPU8V8 119
+#define SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5 120
+#define SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8 121
+#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1 122
+#define SVGA3D_DEVCAP_DXFMT_ARGB_S10E5 123
+#define SVGA3D_DEVCAP_DXFMT_ARGB_S23E8 124
+#define SVGA3D_DEVCAP_DXFMT_A2R10G10B10 125
+#define SVGA3D_DEVCAP_DXFMT_V8U8 126
+#define SVGA3D_DEVCAP_DXFMT_Q8W8V8U8 127
+#define SVGA3D_DEVCAP_DXFMT_CxV8U8 128
+#define SVGA3D_DEVCAP_DXFMT_X8L8V8U8 129
+#define SVGA3D_DEVCAP_DXFMT_A2W10V10U10 130
+#define SVGA3D_DEVCAP_DXFMT_ALPHA8 131
+#define SVGA3D_DEVCAP_DXFMT_R_S10E5 132
+#define SVGA3D_DEVCAP_DXFMT_R_S23E8 133
+#define SVGA3D_DEVCAP_DXFMT_RG_S10E5 134
+#define SVGA3D_DEVCAP_DXFMT_RG_S23E8 135
+#define SVGA3D_DEVCAP_DXFMT_BUFFER 136
+#define SVGA3D_DEVCAP_DXFMT_Z_D24X8 137
+#define SVGA3D_DEVCAP_DXFMT_V16U16 138
+#define SVGA3D_DEVCAP_DXFMT_G16R16 139
+#define SVGA3D_DEVCAP_DXFMT_A16B16G16R16 140
+#define SVGA3D_DEVCAP_DXFMT_UYVY 141
+#define SVGA3D_DEVCAP_DXFMT_YUY2 142
+#define SVGA3D_DEVCAP_DXFMT_NV12 143
+#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD2 144
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS 145
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT 146
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT 147
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS 148
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT 149
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT 150
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT 151
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS 152
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT 153
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM 154
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT 155
+#define SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS 156
+#define SVGA3D_DEVCAP_DXFMT_R32G32_UINT 157
+#define SVGA3D_DEVCAP_DXFMT_R32G32_SINT 158
+#define SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS 159
+#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT 160
+#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24 161
+#define SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT 162
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS 163
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT 164
+#define SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT 165
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS 166
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM 167
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB 168
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT 169
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT 170
+#define SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS 171
+#define SVGA3D_DEVCAP_DXFMT_R16G16_UINT 172
+#define SVGA3D_DEVCAP_DXFMT_R16G16_SINT 173
+#define SVGA3D_DEVCAP_DXFMT_R32_TYPELESS 174
+#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT 175
+#define SVGA3D_DEVCAP_DXFMT_R32_UINT 176
+#define SVGA3D_DEVCAP_DXFMT_R32_SINT 177
+#define SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS 178
+#define SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT 179
+#define SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8 180
+#define SVGA3D_DEVCAP_DXFMT_X24_G8_UINT 181
+#define SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS 182
+#define SVGA3D_DEVCAP_DXFMT_R8G8_UNORM 183
+#define SVGA3D_DEVCAP_DXFMT_R8G8_UINT 184
+#define SVGA3D_DEVCAP_DXFMT_R8G8_SINT 185
+#define SVGA3D_DEVCAP_DXFMT_R16_TYPELESS 186
+#define SVGA3D_DEVCAP_DXFMT_R16_UNORM 187
+#define SVGA3D_DEVCAP_DXFMT_R16_UINT 188
+#define SVGA3D_DEVCAP_DXFMT_R16_SNORM 189
+#define SVGA3D_DEVCAP_DXFMT_R16_SINT 190
+#define SVGA3D_DEVCAP_DXFMT_R8_TYPELESS 191
+#define SVGA3D_DEVCAP_DXFMT_R8_UNORM 192
+#define SVGA3D_DEVCAP_DXFMT_R8_UINT 193
+#define SVGA3D_DEVCAP_DXFMT_R8_SNORM 194
+#define SVGA3D_DEVCAP_DXFMT_R8_SINT 195
+#define SVGA3D_DEVCAP_DXFMT_P8 196
+#define SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP 197
+#define SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM 198
+#define SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM 199
+#define SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS 200
+#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB 201
+#define SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS 202
+#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB 203
+#define SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS 204
+#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB 205
+#define SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS 206
+#define SVGA3D_DEVCAP_DXFMT_ATI1 207
+#define SVGA3D_DEVCAP_DXFMT_BC4_SNORM 208
+#define SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS 209
+#define SVGA3D_DEVCAP_DXFMT_ATI2 210
+#define SVGA3D_DEVCAP_DXFMT_BC5_SNORM 211
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM 212
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS 213
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB 214
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS 215
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB 216
+#define SVGA3D_DEVCAP_DXFMT_Z_DF16 217
+#define SVGA3D_DEVCAP_DXFMT_Z_DF24 218
+#define SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT 219
+#define SVGA3D_DEVCAP_DXFMT_YV12 220
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT 221
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT 222
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM 223
+#define SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT 224
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM 225
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM 226
+#define SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT 227
+#define SVGA3D_DEVCAP_DXFMT_R16G16_UNORM 228
+#define SVGA3D_DEVCAP_DXFMT_R16G16_SNORM 229
+#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT 230
+#define SVGA3D_DEVCAP_DXFMT_R8G8_SNORM 231
+#define SVGA3D_DEVCAP_DXFMT_R16_FLOAT 232
+#define SVGA3D_DEVCAP_DXFMT_D16_UNORM 233
+#define SVGA3D_DEVCAP_DXFMT_A8_UNORM 234
+#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM 235
+#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM 236
+#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM 237
+#define SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM 238
+#define SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM 239
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM 240
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM 241
+#define SVGA3D_DEVCAP_DXFMT_BC4_UNORM 242
+#define SVGA3D_DEVCAP_DXFMT_BC5_UNORM 243
+
+/*
+ * Advertises shaderModel 4.1 support, independent blend-states,
+ * cube-map arrays, and a higher vertex input registers limit.
+ *
+ * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
+ */
+#define SVGA3D_DEVCAP_SM41 244
+#define SVGA3D_DEVCAP_MULTISAMPLE_2X 245
+#define SVGA3D_DEVCAP_MULTISAMPLE_4X 246
+
+/*
+ * Indicates that the device has rendering support for
+ * the full multisample quality. If this cap is not present,
+ * the host may or may not support full quality rendering.
+ *
+ * See also SVGA_REG_MS_HINT_RESOLVED.
+ */
+#define SVGA3D_DEVCAP_MS_FULL_QUALITY 247
+
+/*
+ * Advertises support for the SVGA3D LogicOps commands.
+ */
+#define SVGA3D_DEVCAP_LOGICOPS 248
+
+/*
+ * Advertises support for using logicOps in the DXBlendStates.
+ */
+#define SVGA3D_DEVCAP_LOGIC_BLENDOPS 249
+
+/*
+* Note DXFMT range is now non-contiguous.
+*/
+#define SVGA3D_DEVCAP_RESERVED_1 250
+#define SVGA3D_DEVCAP_DXFMT_BC6H_TYPELESS 251
+#define SVGA3D_DEVCAP_DXFMT_BC6H_UF16 252
+#define SVGA3D_DEVCAP_DXFMT_BC6H_SF16 253
+#define SVGA3D_DEVCAP_DXFMT_BC7_TYPELESS 254
+#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM 255
+#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM_SRGB 256
+#define SVGA3D_DEVCAP_RESERVED_2 257
+
+#define SVGA3D_DEVCAP_SM5 258
+#define SVGA3D_DEVCAP_MULTISAMPLE_8X 259
+
+/* This must be the last index. */
+#define SVGA3D_DEVCAP_MAX 260
/*
* Bit definitions for DXFMT devcaps
@@ -472,10 +501,10 @@ typedef enum {
#define SVGA3D_DXFMT_MAX (1 << 10)
typedef union {
- Bool b;
+ SVGA3dBool b;
uint32 u;
- int32 i;
- float f;
+ int32 i;
+ float f;
} SVGA3dDevCapResult;
#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 7a49c94df221..f703ac2b1768 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2012-2015 VMware, Inc.
+ * Copyright 2012-2019 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -118,12 +118,14 @@ typedef uint8 SVGA3dMultisampleRastEnable;
#define SVGA3D_DX_MAX_SRVIEWS 128
#define SVGA3D_DX_MAX_CONSTBUFFERS 16
#define SVGA3D_DX_MAX_SAMPLERS 16
+#define SVGA3D_DX_MAX_CLASS_INSTANCES 253
#define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32))
typedef uint32 SVGA3dShaderResourceViewId;
typedef uint32 SVGA3dRenderTargetViewId;
typedef uint32 SVGA3dDepthStencilViewId;
+typedef uint32 SVGA3dUAViewId;
typedef uint32 SVGA3dShaderId;
typedef uint32 SVGA3dElementLayoutId;
@@ -145,6 +147,17 @@ typedef union {
float value[4];
} SVGA3dRGBAFloat;
+typedef union {
+ struct {
+ uint32 r;
+ uint32 g;
+ uint32 b;
+ uint32 a;
+ };
+
+ uint32 value[4];
+} SVGA3dRGBAUint32;
+
typedef
#include "vmware_pack_begin.h"
struct {
@@ -249,6 +262,39 @@ struct SVGA3dCmdDXSetShader {
#include "vmware_pack_end.h"
SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
+typedef union {
+ struct {
+ uint32 cbOffset : 12;
+ uint32 cbId : 4;
+ uint32 baseSamp : 4;
+ uint32 baseTex : 7;
+ uint32 reserved : 5;
+ };
+ uint32 value;
+} SVGA3dIfaceData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderIface {
+ SVGA3dShaderType type;
+ uint32 numClassInstances;
+ uint32 index;
+ uint32 iface;
+ SVGA3dIfaceData data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderIface; /* SVGA_3D_CMD_DX_SET_SHADER_IFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShaderIface {
+ uint32 cid;
+ SVGAMobId mobid;
+ uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShaderIface; /* SVGA_3D_CMD_DX_BIND_SHADER_IFACE */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetSamplers {
@@ -306,6 +352,26 @@ SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstancedIndirect {
+ SVGA3dSurfaceId argsBufferSid;
+ uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstancedIndirect;
+/* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstancedIndirect {
+ SVGA3dSurfaceId argsBufferSid;
+ uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstancedIndirect;
+/* SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDrawAuto {
uint32 pad0;
}
@@ -314,6 +380,27 @@ SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDispatch {
+ uint32 threadGroupCountX;
+ uint32 threadGroupCountY;
+ uint32 threadGroupCountZ;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDispatch;
+/* SVGA_3D_CMD_DX_DISPATCH */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDispatchIndirect {
+ SVGA3dSurfaceId argsBufferSid;
+ uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDispatchIndirect;
+/* SVGA_3D_CMD_DX_DISPATCH_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
struct SVGA3dCmdDXSetInputLayout {
SVGA3dElementLayoutId elementLayoutId;
}
@@ -525,7 +612,7 @@ struct MKS3dDXSOState {
uint32 offset; /* Starting offset */
uint32 intOffset; /* Internal offset */
uint32 vertexCount; /* vertices written */
- uint32 sizeInBytes; /* max bytes to write */
+ uint32 dead;
}
#include "vmware_pack_end.h"
SVGA3dDXSOState;
@@ -786,6 +873,31 @@ struct SVGA3dCmdDXTransferFromBuffer {
SVGA3dCmdDXTransferFromBuffer; /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
+#define SVGA3D_TRANSFER_TO_BUFFER_READBACK (1 << 0)
+#define SVGA3D_TRANSFER_TO_BUFFER_FLAGS_MASK (1 << 0)
+typedef uint32 SVGA3dTransferToBufferFlags;
+
+/*
+ * Raw byte wise transfer to a buffer surface from another surface
+ * of the requested box. Supported if SVGA_CAP_DX2 is set. This
+ * command does not take a context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferToBuffer {
+ SVGA3dSurfaceId srcSid;
+ uint32 srcSubResource;
+ SVGA3dBox srcBox;
+ SVGA3dSurfaceId destSid;
+ uint32 destOffset;
+ uint32 destPitch;
+ uint32 destSlicePitch;
+ SVGA3dTransferToBufferFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferToBuffer; /* SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER */
+
+
/*
* Raw byte wise transfer from a buffer surface into another surface
* of the requested box. Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
@@ -905,6 +1017,20 @@ typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset;
typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset;
/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetHSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetDSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetCSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET */
+
+
+#define SVGA3D_BUFFEREX_SRV_RAW (1 << 0)
+#define SVGA3D_BUFFEREX_SRV_FLAGS_MAX (1 << 1)
+#define SVGA3D_BUFFEREX_SRV_FLAGS_MASK (SVGA3D_BUFFEREX_SRV_FLAGS_MAX - 1)
+typedef uint32 SVGA3dBufferExFlags;
typedef
#include "vmware_pack_begin.h"
@@ -925,7 +1051,7 @@ struct {
struct {
uint32 firstElement;
uint32 numElements;
- uint32 flags;
+ SVGA3dBufferExFlags flags;
uint32 pad0;
} bufferex;
};
@@ -1072,6 +1198,32 @@ struct SVGA3dCmdDXDefineDepthStencilView {
SVGA3dCmdDXDefineDepthStencilView;
/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
+/*
+ * Version 2 needed in order to start validating and using the flags
+ * field. Unfortunately the device wasn't validating or using the
+ * flags field and the driver wasn't initializing it in shipped code,
+ * so a new version of the command is needed to allow that code to
+ * continue to work.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView_v2 {
+ SVGA3dDepthStencilViewId depthStencilViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ SVGA3DCreateDSViewFlags flags;
+ uint8 pad0;
+ uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView_v2;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyDepthStencilView {
@@ -1081,6 +1233,138 @@ struct SVGA3dCmdDXDestroyDepthStencilView {
SVGA3dCmdDXDestroyDepthStencilView;
/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
+
+#define SVGA3D_UABUFFER_RAW (1 << 0)
+#define SVGA3D_UABUFFER_APPEND (1 << 1)
+#define SVGA3D_UABUFFER_COUNTER (1 << 2)
+typedef uint32 SVGA3dUABufferFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ union {
+ struct {
+ uint32 firstElement;
+ uint32 numElements;
+ SVGA3dUABufferFlags flags;
+ uint32 padding0;
+ uint32 padding1;
+ } buffer;
+ struct {
+ uint32 mipSlice;
+ uint32 firstArraySlice;
+ uint32 arraySize;
+ uint32 padding0;
+ uint32 padding1;
+ } tex; /* 1d, 2d */
+ struct {
+ uint32 mipSlice;
+ uint32 firstW;
+ uint32 wSize;
+ uint32 padding0;
+ uint32 padding1;
+ } tex3D;
+ };
+}
+#include "vmware_pack_end.h"
+SVGA3dUAViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+ SVGA3dUAViewDesc desc;
+ uint32 structureCount;
+ uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXUAViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineUAView {
+ SVGA3dUAViewId uaViewId;
+
+ SVGA3dSurfaceId sid;
+ SVGA3dSurfaceFormat format;
+ SVGA3dResourceType resourceDimension;
+
+ SVGA3dUAViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineUAView;
+/* SVGA_3D_CMD_DX_DEFINE_UA_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyUAView {
+ SVGA3dUAViewId uaViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyUAView;
+/* SVGA_3D_CMD_DX_DESTROY_UA_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearUAViewUint {
+ SVGA3dUAViewId uaViewId;
+ SVGA3dRGBAUint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearUAViewUint;
+/* SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearUAViewFloat {
+ SVGA3dUAViewId uaViewId;
+ SVGA3dRGBAFloat value;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearUAViewFloat;
+/* SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXCopyStructureCount {
+ SVGA3dUAViewId srcUAViewId;
+ SVGA3dSurfaceId destSid;
+ uint32 destByteOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXCopyStructureCount;
+/* SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStructureCount {
+ SVGA3dUAViewId uaViewId;
+ uint32 structureCount;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStructureCount;
+/* SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetUAViews {
+ uint32 uavSpliceIndex;
+ /* Followed by a variable number of SVGA3dUAViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetUAViews; /* SVGA_3D_CMD_DX_SET_UA_VIEWS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCSUAViews {
+ uint32 startIndex;
+ /* Followed by a variable number of SVGA3dUAViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCSUAViews; /* SVGA_3D_CMD_DX_SET_CS_UA_VIEWS */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dInputElementDesc {
@@ -1099,7 +1383,7 @@ typedef
struct {
uint32 elid;
uint32 numDescs;
- SVGA3dInputElementDesc desc[32];
+ SVGA3dInputElementDesc descs[32];
uint32 pad[62];
}
#include "vmware_pack_end.h"
@@ -1261,7 +1545,8 @@ struct {
uint8 lineStippleEnable;
uint8 lineStippleFactor;
uint16 lineStipplePattern;
- uint32 forcedSampleCount;
+ uint8 forcedSampleCount;
+ uint8 mustBeZero[3];
}
#include "vmware_pack_end.h"
SVGACOTableDXRasterizerStateEntry;
@@ -1352,6 +1637,71 @@ struct SVGA3dCmdDXDestroySamplerState {
#include "vmware_pack_end.h"
SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
+
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_UNDEFINED 0
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_POSITION 1
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_CLIP_DISTANCE 2
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_CULL_DISTANCE 3
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_RENDER_TARGET_ARRAY_INDEX 4
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_VIEWPORT_ARRAY_INDEX 5
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_VERTEX_ID 6
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_PRIMITIVE_ID 7
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_INSTANCE_ID 8
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_IS_FRONT_FACE 9
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_SAMPLE_INDEX 10
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_0_EDGE_TESSFACTOR 11
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_0_EDGE_TESSFACTOR 12
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_1_EDGE_TESSFACTOR 13
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_1_EDGE_TESSFACTOR 14
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_INSIDE_TESSFACTOR 15
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_INSIDE_TESSFACTOR 16
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_U_EQ_0_EDGE_TESSFACTOR 17
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_V_EQ_0_EDGE_TESSFACTOR 18
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_W_EQ_0_EDGE_TESSFACTOR 19
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_INSIDE_TESSFACTOR 20
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DETAIL_TESSFACTOR 21
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DENSITY_TESSFACTOR 22
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_MAX 23
+typedef uint32 SVGA3dDXSignatureSemanticName;
+
+#define SVGADX_SIGNATURE_REGISTER_COMPONENT_UNKNOWN 0
+typedef uint32 SVGA3dDXSignatureRegisterComponentType;
+
+#define SVGADX_SIGNATURE_MIN_PRECISION_DEFAULT 0
+typedef uint32 SVGA3dDXSignatureMinPrecision;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXSignatureEntry {
+ uint32 registerIndex;
+ SVGA3dDXSignatureSemanticName semanticName;
+ uint32 mask; /* Lower 4 bits represent X, Y, Z, W channels */
+ SVGA3dDXSignatureRegisterComponentType componentType;
+ SVGA3dDXSignatureMinPrecision minPrecision;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXShaderSignatureEntry;
+
+#define SVGADX_SIGNATURE_HEADER_VERSION_0 0x08a92d12
+
+/*
+ * The SVGA3dDXSignatureHeader structure is added after the shader
+ * body in the mob that is bound to the shader. It is followed by the
+ * specified number of SVGA3dDXSignatureEntry structures for each of
+ * the three types of signatures in the order (input, output, patch
+ * constants).
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXSignatureHeader {
+ uint32 headerVersion;
+ uint32 numInputSignatures;
+ uint32 numOutputSignatures;
+ uint32 numPatchConstantSignatures;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXShaderSignatureHeader;
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDefineShader {
@@ -1415,7 +1765,8 @@ SVGA3dCmdDXCondBindAllShader; /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */
/*
* The maximum number of streamout decl's in each streamout entry.
*/
-#define SVGA3D_MAX_STREAMOUT_DECLS 64
+#define SVGA3D_MAX_DX10_STREAMOUT_DECLS 64
+#define SVGA3D_MAX_STREAMOUT_DECLS 512
typedef
#include "vmware_pack_begin.h"
@@ -1434,10 +1785,16 @@ typedef
#include "vmware_pack_begin.h"
struct SVGAOTableStreamOutputEntry {
uint32 numOutputStreamEntries;
- SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+ SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS];
uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
uint32 rasterizedStream;
- uint32 pad[250];
+ uint32 numOutputStreamStrides;
+ uint32 mobid;
+ uint32 offsetInBytes;
+ uint8 usesMob;
+ uint8 pad0;
+ uint16 pad1;
+ uint32 pad2[246];
}
#include "vmware_pack_end.h"
SVGACOTableDXStreamOutputEntry;
@@ -1447,13 +1804,47 @@ typedef
struct SVGA3dCmdDXDefineStreamOutput {
SVGA3dStreamOutputId soid;
uint32 numOutputStreamEntries;
- SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+ SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS];
uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
uint32 rasterizedStream;
}
#include "vmware_pack_end.h"
SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
+/*
+ * Version 2 needed in order to start validating and using the
+ * rasterizedStream field. Unfortunately the device wasn't validating
+ * or using this field and the driver wasn't initializing it in shipped
+ * code, so a new version of the command is needed to allow that code
+ * to continue to work. Also added new numOutputStreamStrides field.
+ */
+
+#define SVGA3D_DX_SO_NO_RASTERIZED_STREAM 0xFFFFFFFF
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutputWithMob {
+ SVGA3dStreamOutputId soid;
+ uint32 numOutputStreamEntries;
+ uint32 numOutputStreamStrides;
+ uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+ uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutputWithMob;
+/* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindStreamOutput {
+ SVGA3dStreamOutputId soid;
+ uint32 mobid;
+ uint32 offsetInBytes;
+ uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindStreamOutput; /* SVGA_3D_CMD_DX_BIND_STREAMOUTPUT */
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXDestroyStreamOutput {
@@ -1472,6 +1863,15 @@ SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
typedef
#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetMinLOD {
+ SVGA3dSurfaceId sid;
+ float minLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetMinLOD; /* SVGA_3D_CMD_DX_SET_MIN_LOD */
+
+typedef
+#include "vmware_pack_begin.h"
struct {
uint64 value;
uint32 mobId;
@@ -1581,33 +1981,38 @@ struct SVGADXContextMobFormat {
uint32 rasterizerStateId;
uint32 depthStencilViewId;
uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
- uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
} renderState;
+ uint32 pad0[8];
+
struct {
uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
uint32 soid;
} streamOut;
- uint32 pad0[11];
+
+ uint32 pad1[10];
+
+ uint32 uavSpliceIndex;
uint8 numViewports;
uint8 numScissorRects;
- uint16 pad1[1];
+ uint16 pad2[1];
- uint32 pad2[3];
+ uint32 pad3[3];
SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
- uint32 pad3[32];
+ uint32 pad4[32];
SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
- uint32 pad4[64];
+ uint32 pad5[64];
struct {
uint32 queryID;
uint32 value;
} predication;
- uint32 pad5[2];
+ SVGAMobId shaderIfaceMobid;
+ uint32 shaderIfaceOffset;
struct {
uint32 shaderId;
SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
@@ -1619,11 +2024,38 @@ struct SVGADXContextMobFormat {
SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
- uint32 pad7[380];
+
+ uint32 pad7[64];
+
+ uint32 uaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS];
+ uint32 csuaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS];
+
+ uint32 pad8[188];
}
#include "vmware_pack_end.h"
SVGADXContextMobFormat;
+/*
+ * There is conflicting documentation on max class instances (253 vs 256). The
+ * lower value is the one used throughout the device, but since mob format is
+ * more involved to increase if needed, conservatively use the higher one here.
+ */
+#define SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED 256
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXShaderIfaceMobFormat {
+ struct {
+ uint32 numClassInstances;
+ uint32 iface[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED];
+ SVGA3dIfaceData data[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED];
+ } shaderIfaceState[SVGA3D_NUM_SHADERTYPE];
+
+ uint32 pad0[1018];
+}
+#include "vmware_pack_end.h"
+SVGADXShaderIfaceMobFormat;
+
typedef
#include "vmware_pack_begin.h"
struct SVGA3dCmdDXTempSetContext {
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index b22a67f15660..f4375a41b3aa 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2007-2015 VMware, Inc.
+ * Copyright 2007-2019 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -40,11 +40,25 @@
#include "includeCheck.h"
#define SVGA3D_NUM_CLIPPLANES 6
+#define SVGA3D_MAX_CONTEXT_IDS 256
+#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+
+/*
+ * While there are separate bind-points for RenderTargetViews and
+ * UnorderedAccessViews in a DXContext, there is in fact one shared
+ * semantic space that the guest-driver can use on any given draw call.
+ * So there are really only 8 slots that can be spilt up between them, with the
+ * spliceIndex controlling where the UAV's sit in the collapsed array.
+ */
#define SVGA3D_MAX_RENDER_TARGETS 8
#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS (SVGA3D_MAX_RENDER_TARGETS)
#define SVGA3D_MAX_UAVIEWS 8
-#define SVGA3D_MAX_CONTEXT_IDS 256
-#define SVGA3D_MAX_SURFACE_IDS (32 * 1024)
+#define SVGA3D_DX11_1_MAX_UAVIEWS 64
+
+/*
+ * Maximum canonical size of a surface in host-backed mode (pre-GBObjects).
+ */
+#define SVGA3D_HB_MAX_SURFACE_SIZE MBYTES_2_BYTES(128)
/*
* Maximum ID a shader can be assigned on a given context.
@@ -59,6 +73,8 @@
#define SVGA3D_NUM_TEXTURE_UNITS 32
#define SVGA3D_NUM_LIGHTS 8
+#define SVGA3D_MAX_VIDEOPROCESSOR_SAMPLERS 32
+
/*
* Maximum size in dwords of shader text the SVGA device will allow.
* Currently 8 MB.
@@ -67,6 +83,11 @@
#define SVGA3D_MAX_SHADER_MEMORY (SVGA3D_MAX_SHADER_MEMORY_BYTES / \
sizeof(uint32))
+/*
+ * The maximum value of threadGroupCount in each dimension
+ */
+#define SVGA3D_MAX_SHADER_THREAD_GROUPS 65535
+
#define SVGA3D_MAX_CLIP_PLANES 6
/*
@@ -85,7 +106,9 @@
/*
* Maximum number of array indexes in a GB surface (with DX enabled).
*/
-#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+#define SVGA3D_SM4_MAX_SURFACE_ARRAYSIZE 512
+#define SVGA3D_SM5_MAX_SURFACE_ARRAYSIZE 2048
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE SVGA3D_SM5_MAX_SURFACE_ARRAYSIZE
/*
* The maximum number of vertex arrays we're guaranteed to support in
@@ -99,4 +122,9 @@
*/
#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+/*
+ * The maximum number of samples that can be contained in a surface.
+ */
+#define SVGA3D_MAX_SAMPLES 8
+
#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 61414f105c67..4db25bd9fa22 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -131,6 +131,8 @@ enum svga3d_block_desc {
SVGA3DBLOCKDESC_BC3 = 1 << 26,
SVGA3DBLOCKDESC_BC4 = 1 << 27,
SVGA3DBLOCKDESC_BC5 = 1 << 28,
+ SVGA3DBLOCKDESC_BC6H = 1 << 29,
+ SVGA3DBLOCKDESC_BC7 = 1 << 30,
SVGA3DBLOCKDESC_A_UINT = SVGA3DBLOCKDESC_ALPHA |
SVGA3DBLOCKDESC_UINT |
@@ -290,6 +292,18 @@ enum svga3d_block_desc {
SVGA3DBLOCKDESC_COMP_UNORM,
SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 |
SVGA3DBLOCKDESC_COMP_SNORM,
+ SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS = SVGA3DBLOCKDESC_BC6H |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC6H_COMP_UF16 = SVGA3DBLOCKDESC_BC6H |
+ SVGA3DBLOCKDESC_COMPRESSED,
+ SVGA3DBLOCKDESC_BC6H_COMP_SF16 = SVGA3DBLOCKDESC_BC6H |
+ SVGA3DBLOCKDESC_COMPRESSED,
+ SVGA3DBLOCKDESC_BC7_COMP_TYPELESS = SVGA3DBLOCKDESC_BC7 |
+ SVGA3DBLOCKDESC_COMP_TYPELESS,
+ SVGA3DBLOCKDESC_BC7_COMP_UNORM = SVGA3DBLOCKDESC_BC7 |
+ SVGA3DBLOCKDESC_COMP_UNORM,
+ SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC7_COMP_UNORM |
+ SVGA3DBLOCKDESC_SRGB,
SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_YUV_VIDEO |
SVGA3DBLOCKDESC_PLANAR_YUV |
@@ -494,7 +508,7 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
- {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL,
+ {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 3, 3,
{{8}, {8}, {8}, {0}},
{{16}, {8}, {0}, {0}}},
@@ -604,7 +618,7 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{{0}, {0}, {48}, {0}},
{{0}, {0}, {0}, {0}}},
- {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+ {SVGA3D_FORMAT_DEAD2, SVGA3DBLOCKDESC_NONE,
{1, 1, 1}, 4, 4,
{{8}, {8}, {8}, {8}},
{{0}, {8}, {16}, {24}}},
@@ -1103,6 +1117,46 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
{4, 4, 1}, 16, 16,
{{0}, {0}, {128}, {0}},
{{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_B4G4R4A4_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+ {1, 1, 1}, 2, 2,
+ {{4}, {4}, {4}, {4}},
+ {{0}, {4}, {8}, {12}}},
+
+ {SVGA3D_BC6H_TYPELESS, SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC6H_UF16, SVGA3DBLOCKDESC_BC6H_COMP_UF16,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC6H_SF16, SVGA3DBLOCKDESC_BC6H_COMP_SF16,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC7_TYPELESS, SVGA3DBLOCKDESC_BC7_COMP_TYPELESS,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC7_UNORM, SVGA3DBLOCKDESC_BC7_COMP_UNORM,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_BC7_UNORM_SRGB, SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB,
+ {4, 4, 1}, 16, 16,
+ {{0}, {0}, {128}, {0}},
+ {{0}, {0}, {0}, {0}}},
+
+ {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+ {1, 1, 1}, 4, 4,
+ {{8}, {8}, {8}, {8}},
+ {{0}, {8}, {16}, {24}}},
};
static inline u32 clamped_umul32(u32 a, u32 b)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 308370665a8e..77e338a65791 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -116,6 +116,19 @@ SVGA3dBox;
typedef
#include "vmware_pack_begin.h"
struct {
+ int32 x;
+ int32 y;
+ int32 z;
+ int32 w;
+ int32 h;
+ int32 d;
+}
+#include "vmware_pack_end.h"
+SVGA3dSignedBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
uint32 x;
uint32 y;
uint32 z;
@@ -198,8 +211,7 @@ typedef enum SVGA3dSurfaceFormat {
/* Planar video formats */
SVGA3D_NV12 = 44,
- /* Video format with alpha */
- SVGA3D_AYUV = 45,
+ SVGA3D_FORMAT_DEAD2 = 45,
SVGA3D_R32G32B32A32_TYPELESS = 46,
SVGA3D_R32G32B32A32_UINT = 47,
@@ -305,6 +317,18 @@ typedef enum SVGA3dSurfaceFormat {
SVGA3D_B8G8R8X8_UNORM = 142,
SVGA3D_BC4_UNORM = 143,
SVGA3D_BC5_UNORM = 144,
+ SVGA3D_B4G4R4A4_UNORM = 145,
+
+ /* DX11 compressed formats */
+ SVGA3D_BC6H_TYPELESS = 146,
+ SVGA3D_BC6H_UF16 = 147,
+ SVGA3D_BC6H_SF16 = 148,
+ SVGA3D_BC7_TYPELESS = 149,
+ SVGA3D_BC7_UNORM = 150,
+ SVGA3D_BC7_UNORM_SRGB = 151,
+
+ /* Video format with alpha */
+ SVGA3D_AYUV = 152,
SVGA3D_FORMAT_MAX
} SVGA3dSurfaceFormat;
@@ -326,10 +350,10 @@ typedef enum SVGA3dSurfaceFormat {
#define SVGA3D_SURFACE_HINT_RENDERTARGET (CONST64U(1) << 6)
#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL (CONST64U(1) << 7)
#define SVGA3D_SURFACE_HINT_WRITEONLY (CONST64U(1) << 8)
-#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS (CONST64U(1) << 9)
+#define SVGA3D_SURFACE_DEAD2 (CONST64U(1) << 9)
#define SVGA3D_SURFACE_AUTOGENMIPMAPS (CONST64U(1) << 10)
-#define SVGA3D_SURFACE_DECODE_RENDERTARGET (CONST64U(1) << 11)
+#define SVGA3D_SURFACE_DEAD1 (CONST64U(1) << 11)
/*
* Is this surface using a base-level pitch for it's mob backing?
@@ -387,7 +411,7 @@ typedef enum SVGA3dSurfaceFormat {
* Setting this flag allow this surface to be used with the
* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command. It is only valid for
* buffer surfaces, and no bind flags are allowed to be set on surfaces
- * with this flag.
+ * with this flag except SVGA3D_SURFACE_TRANSFER_TO_BUFFER.
*/
#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30)
@@ -402,7 +426,31 @@ typedef enum SVGA3dSurfaceFormat {
*/
#define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32)
-#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 33)
+/*
+ * Specified that the surface is allowed to be bound to a UAView.
+ */
+#define SVGA3D_SURFACE_BIND_UAVIEW (CONST64U(1) << 33)
+
+/*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER command. It is only valid for
+ * buffer surfaces, and no bind flags are allowed to be set on surfaces
+ * with this flag except SVGA3D_SURFACE_TRANSFER_FROM_BUFFER.
+ */
+#define SVGA3D_SURFACE_TRANSFER_TO_BUFFER (CONST64U(1) << 34)
+
+#define SVGA3D_SURFACE_BIND_LOGICOPS (CONST64U(1) << 35)
+
+/*
+ * Optional flags for use with SVGA3D_SURFACE_BIND_UAVIEW
+ */
+#define SVGA3D_SURFACE_BIND_RAW_VIEWS (CONST64U(1) << 36)
+#define SVGA3D_SURFACE_BUFFER_STRUCTURED (CONST64U(1) << 37)
+
+#define SVGA3D_SURFACE_DRAWINDIRECT_ARGS (CONST64U(1) << 38)
+#define SVGA3D_SURFACE_RESOURCE_CLAMP (CONST64U(1) << 39)
+
+#define SVGA3D_SURFACE_FLAG_MAX (CONST64U(1) << 40)
/*
* Surface flags types:
@@ -428,17 +476,25 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
- SVGA3D_SURFACE_MULTISAMPLE \
+ SVGA3D_SURFACE_RESERVED1 | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK \
( SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_RESERVED1 | \
SVGA3D_SURFACE_MULTISAMPLE \
)
#define SVGA3D_SURFACE_2D_DISALLOWED_MASK \
( SVGA3D_SURFACE_CUBEMAP | \
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
@@ -448,7 +504,14 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
- SVGA3D_SURFACE_MULTISAMPLE \
+ SVGA3D_SURFACE_RESERVED1 | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK \
@@ -456,6 +519,7 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_RESERVED1 | \
SVGA3D_SURFACE_MULTISAMPLE \
)
@@ -474,7 +538,14 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_STAGING_DOWNLOAD | \
SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
- SVGA3D_SURFACE_MULTISAMPLE \
+ SVGA3D_SURFACE_RESERVED1 | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK \
@@ -482,10 +553,11 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_AUTOGENMIPMAPS | \
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
- SVGA3D_SURFACE_MASKABLE_ANTIALIAS | \
+ SVGA3D_SURFACE_DEAD2 | \
SVGA3D_SURFACE_ARRAY | \
SVGA3D_SURFACE_MULTISAMPLE | \
- SVGA3D_SURFACE_MOB_PITCH \
+ SVGA3D_SURFACE_MOB_PITCH | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK \
@@ -494,14 +566,23 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_VOLUME | \
SVGA3D_SURFACE_1D | \
SVGA3D_SURFACE_SCREENTARGET | \
- SVGA3D_SURFACE_MOB_PITCH \
+ SVGA3D_SURFACE_MOB_PITCH | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_RESERVED1 | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS \
)
-#define SVGA3D_SURFACE_DX_ONLY_MASK \
- ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
- SVGA3D_SURFACE_STAGING_UPLOAD | \
- SVGA3D_SURFACE_STAGING_DOWNLOAD | \
- SVGA3D_SURFACE_TRANSFER_FROM_BUFFER \
+#define SVGA3D_SURFACE_DX_ONLY_MASK \
+ ( SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER \
)
#define SVGA3D_SURFACE_STAGING_MASK \
@@ -516,9 +597,135 @@ typedef uint64 SVGA3dSurfaceAllFlags;
SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
SVGA3D_SURFACE_BIND_RENDER_TARGET | \
SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
- SVGA3D_SURFACE_BIND_STREAM_OUTPUT \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS \
+ )
+
+#define SVGA3D_SURFACE_VADECODE_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_HINT_STATIC | \
+ SVGA3D_SURFACE_HINT_DYNAMIC | \
+ SVGA3D_SURFACE_HINT_INDEXBUFFER | \
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER | \
+ SVGA3D_SURFACE_HINT_TEXTURE | \
+ SVGA3D_SURFACE_HINT_RENDERTARGET | \
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \
+ SVGA3D_SURFACE_HINT_WRITEONLY | \
+ SVGA3D_SURFACE_DEAD2 | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_HINT_RT_LOCKABLE | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_RENDER_TARGET | \
+ SVGA3D_SURFACE_BIND_SHADER_RESOURCE | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_INACTIVE | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
+ )
+
+#define SVGA3D_SURFACE_VAPROCESSFRAME_OUTPUT_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_HINT_INDEXBUFFER | \
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER | \
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \
+ SVGA3D_SURFACE_DEAD2 | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_INACTIVE | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_VADECODE | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
+ )
+
+#define SVGA3D_SURFACE_VAPROCESSFRAME_INPUT_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_HINT_INDEXBUFFER | \
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER | \
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL | \
+ SVGA3D_SURFACE_DEAD2 | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_SCREENTARGET | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_STAGING_UPLOAD | \
+ SVGA3D_SURFACE_STAGING_DOWNLOAD | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_LOGICOPS | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
+ )
+
+#define SVGA3D_SURFACE_LOGICOPS_DISALLOWED_MASK \
+ ( SVGA3D_SURFACE_CUBEMAP | \
+ SVGA3D_SURFACE_DEAD2 | \
+ SVGA3D_SURFACE_AUTOGENMIPMAPS | \
+ SVGA3D_SURFACE_VOLUME | \
+ SVGA3D_SURFACE_1D | \
+ SVGA3D_SURFACE_BIND_VERTEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_INDEX_BUFFER | \
+ SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+ SVGA3D_SURFACE_BIND_DEPTH_STENCIL | \
+ SVGA3D_SURFACE_BIND_STREAM_OUTPUT | \
+ SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+ SVGA3D_SURFACE_VADECODE | \
+ SVGA3D_SURFACE_MULTISAMPLE | \
+ SVGA3D_SURFACE_BIND_UAVIEW | \
+ SVGA3D_SURFACE_TRANSFER_TO_BUFFER | \
+ SVGA3D_SURFACE_BIND_RAW_VIEWS | \
+ SVGA3D_SURFACE_BUFFER_STRUCTURED | \
+ SVGA3D_SURFACE_DRAWINDIRECT_ARGS | \
+ SVGA3D_SURFACE_RESOURCE_CLAMP \
)
+#define SVGA3D_BUFFER_STRUCTURED_STRIDE_MAX 2048
+
+
+/*
+ * These are really the D3DFORMAT_OP defines from the wdk. We need
+ * them so that we can query the host for what the supported surface
+ * operations are (when we're using the D3D backend, in particular),
+ * and so we can send those operations to the guest.
+ */
typedef enum {
SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
@@ -1338,7 +1545,40 @@ typedef enum {
SVGA3D_PRIMITIVE_LINESTRIP_ADJ = 8,
SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ = 9,
SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ = 10,
- SVGA3D_PRIMITIVE_MAX
+ SVGA3D_PRIMITIVE_DX10_MAX = 11,
+ SVGA3D_PRIMITIVE_1_CONTROL_POINT_PATCH = 11,
+ SVGA3D_PRIMITIVE_2_CONTROL_POINT_PATCH = 12,
+ SVGA3D_PRIMITIVE_3_CONTROL_POINT_PATCH = 13,
+ SVGA3D_PRIMITIVE_4_CONTROL_POINT_PATCH = 14,
+ SVGA3D_PRIMITIVE_5_CONTROL_POINT_PATCH = 15,
+ SVGA3D_PRIMITIVE_6_CONTROL_POINT_PATCH = 16,
+ SVGA3D_PRIMITIVE_7_CONTROL_POINT_PATCH = 17,
+ SVGA3D_PRIMITIVE_8_CONTROL_POINT_PATCH = 18,
+ SVGA3D_PRIMITIVE_9_CONTROL_POINT_PATCH = 19,
+ SVGA3D_PRIMITIVE_10_CONTROL_POINT_PATCH = 20,
+ SVGA3D_PRIMITIVE_11_CONTROL_POINT_PATCH = 21,
+ SVGA3D_PRIMITIVE_12_CONTROL_POINT_PATCH = 22,
+ SVGA3D_PRIMITIVE_13_CONTROL_POINT_PATCH = 23,
+ SVGA3D_PRIMITIVE_14_CONTROL_POINT_PATCH = 24,
+ SVGA3D_PRIMITIVE_15_CONTROL_POINT_PATCH = 25,
+ SVGA3D_PRIMITIVE_16_CONTROL_POINT_PATCH = 26,
+ SVGA3D_PRIMITIVE_17_CONTROL_POINT_PATCH = 27,
+ SVGA3D_PRIMITIVE_18_CONTROL_POINT_PATCH = 28,
+ SVGA3D_PRIMITIVE_19_CONTROL_POINT_PATCH = 29,
+ SVGA3D_PRIMITIVE_20_CONTROL_POINT_PATCH = 30,
+ SVGA3D_PRIMITIVE_21_CONTROL_POINT_PATCH = 31,
+ SVGA3D_PRIMITIVE_22_CONTROL_POINT_PATCH = 32,
+ SVGA3D_PRIMITIVE_23_CONTROL_POINT_PATCH = 33,
+ SVGA3D_PRIMITIVE_24_CONTROL_POINT_PATCH = 34,
+ SVGA3D_PRIMITIVE_25_CONTROL_POINT_PATCH = 35,
+ SVGA3D_PRIMITIVE_26_CONTROL_POINT_PATCH = 36,
+ SVGA3D_PRIMITIVE_27_CONTROL_POINT_PATCH = 37,
+ SVGA3D_PRIMITIVE_28_CONTROL_POINT_PATCH = 38,
+ SVGA3D_PRIMITIVE_29_CONTROL_POINT_PATCH = 39,
+ SVGA3D_PRIMITIVE_30_CONTROL_POINT_PATCH = 40,
+ SVGA3D_PRIMITIVE_31_CONTROL_POINT_PATCH = 41,
+ SVGA3D_PRIMITIVE_32_CONTROL_POINT_PATCH = 42,
+ SVGA3D_PRIMITIVE_MAX = 43
} SVGA3dPrimitiveType;
typedef enum {
@@ -1442,16 +1682,15 @@ typedef enum {
SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS = 5,
SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE = 6,
SVGA3D_QUERYTYPE_OCCLUSION64 = 7,
- SVGA3D_QUERYTYPE_EVENT = 8,
- SVGA3D_QUERYTYPE_DX10_MAX = 9,
- SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 9,
- SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 10,
- SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 11,
- SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 12,
- SVGA3D_QUERYTYPE_SOP_STREAM0 = 13,
- SVGA3D_QUERYTYPE_SOP_STREAM1 = 14,
- SVGA3D_QUERYTYPE_SOP_STREAM2 = 15,
- SVGA3D_QUERYTYPE_SOP_STREAM3 = 16,
+ SVGA3D_QUERYTYPE_DX10_MAX = 8,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM0 = 8,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM1 = 9,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM2 = 10,
+ SVGA3D_QUERYTYPE_SOSTATS_STREAM3 = 11,
+ SVGA3D_QUERYTYPE_SOP_STREAM0 = 12,
+ SVGA3D_QUERYTYPE_SOP_STREAM1 = 13,
+ SVGA3D_QUERYTYPE_SOP_STREAM2 = 14,
+ SVGA3D_QUERYTYPE_SOP_STREAM3 = 15,
SVGA3D_QUERYTYPE_MAX
} SVGA3dQueryType;
@@ -1584,28 +1823,33 @@ typedef enum {
SVGA3D_READ_HOST_VRAM = 2,
} SVGA3dTransferType;
-typedef enum {
- SVGA3D_LOGICOP_INVALID = 0,
- SVGA3D_LOGICOP_MIN = 1,
- SVGA3D_LOGICOP_COPY = 1,
- SVGA3D_LOGICOP_NOT = 2,
- SVGA3D_LOGICOP_AND = 3,
- SVGA3D_LOGICOP_OR = 4,
- SVGA3D_LOGICOP_XOR = 5,
- SVGA3D_LOGICOP_NXOR = 6,
- SVGA3D_LOGICOP_ROP3MIN = 30, /* 7-29 are reserved for future logic ops. */
- SVGA3D_LOGICOP_ROP3MAX = (SVGA3D_LOGICOP_ROP3MIN + 255),
- SVGA3D_LOGICOP_MAX = (SVGA3D_LOGICOP_ROP3MAX + 1),
-} SVGA3dLogicOp;
+#define SVGA3D_LOGICOP_INVALID 0
+#define SVGA3D_LOGICOP_MIN 1
+#define SVGA3D_LOGICOP_COPY 1
+#define SVGA3D_LOGICOP_NOT 2
+#define SVGA3D_LOGICOP_AND 3
+#define SVGA3D_LOGICOP_OR 4
+#define SVGA3D_LOGICOP_XOR 5
+#define SVGA3D_LOGICOP_NXOR 6
+#define SVGA3D_LOGICOP_ROP3 7
+#define SVGA3D_LOGICOP_MAX 8
+
+typedef uint16 SVGA3dLogicOp;
+
+#define SVGA3D_LOGICOP_ROP3_INVALID ((uint16) -1)
+#define SVGA3D_LOGICOP_ROP3_MIN 0
+#define SVGA3D_LOGICOP_ROP3_MAX 256
+
+typedef uint16 SVGA3dLogicOpRop3;
typedef
#include "vmware_pack_begin.h"
struct {
union {
struct {
- uint16 function; /* SVGA3dFogFunction */
- uint8 type; /* SVGA3dFogType */
- uint8 base; /* SVGA3dFogBase */
+ uint16 function; // SVGA3dFogFunction
+ uint8 type; // SVGA3dFogType
+ uint8 base; // SVGA3dFogBase
};
uint32 uintValue;
};
@@ -1742,4 +1986,15 @@ typedef enum SVGA3dMSQualityLevel {
SVGA3D_MS_QUALITY_MAX = 2,
} SVGA3dMSQualityLevel;
+/*
+ * Screen Target Update Flags
+ */
+
+typedef enum SVGA3dFrameUpdateType {
+ SVGA3D_FRAME_END = 0,
+ SVGA3D_FRAME_PARTIAL = 1,
+ SVGA3D_FRAME_UNKNOWN = 2,
+ SVGA3D_FRAME_MAX = 3,
+} SVGA3dFrameUpdateType;
+
#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 056f54b35d73..19fb9e3299e7 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -70,8 +70,7 @@ typedef uint32 SVGAMobId;
/*
* Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
- * cursor bypass mode. This is still supported, but no new guest
- * drivers should use it.
+ * cursor bypass mode.
*/
#define SVGA_CURSOR_ON_HIDE 0x0
#define SVGA_CURSOR_ON_SHOW 0x1
@@ -137,6 +136,17 @@ typedef uint32 SVGAMobId;
#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
/*
+ * The byte-size is the size of the actual cursor data,
+ * possibly after expanding it to the current bit depth.
+ *
+ * 40K is sufficient memory for two 32-bit planes for a 64 x 64 cursor.
+ *
+ * The dimension limit is a bound on the maximum width or height.
+ */
+#define SVGA_MAX_CURSOR_CMD_BYTES (40 * 1024)
+#define SVGA_MAX_CURSOR_CMD_DIMENSION 1024
+
+/*
* Registers
*/
@@ -169,7 +179,7 @@ enum {
SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
SVGA_REG_GUEST_ID = 23, /* (Deprecated) */
- SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
+ SVGA_REG_DEAD = 24, /* Drivers should never write this. */
SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
@@ -208,7 +218,13 @@ enum {
SVGA_REG_MAX_PRIMARY_MEM = 50,
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,
- SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
+ /*
+ * Legacy version of SVGA_REG_GBOBJECT_MEM_SIZE_KB for drivers that
+ * don't know how to convert to a 64-bit byte value without overflowing.
+ * (See SVGA_REG_GBOBJECT_MEM_SIZE_KB).
+ */
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51,
+
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
SVGA_REG_CMD_PREPEND_HIGH = 54,
@@ -218,7 +234,59 @@ enum {
SVGA_REG_BLANK_SCREEN_TARGETS = 58,
SVGA_REG_CAP2 = 59,
SVGA_REG_DEVEL_CAP = 60,
- SVGA_REG_TOP = 61, /* Must be 1 more than the last register */
+
+ /*
+ * Allow the guest to hint to the device which driver is running.
+ *
+ * This should not generally change device behavior, but might be
+ * convenient to work-around specific bugs in guest drivers.
+ *
+ * Drivers should first write their id value into SVGA_REG_GUEST_DRIVER_ID,
+ * and then fill out all of the version registers that they have defined.
+ *
+ * After the driver has written all of the registers, they should
+ * then write the value SVGA_REG_GUEST_DRIVER_ID_SUBMIT to the
+ * SVGA_REG_GUEST_DRIVER_ID register, to signal that they have finished.
+ *
+ * The SVGA_REG_GUEST_DRIVER_ID values are defined below by the
+ * SVGARegGuestDriverId enum.
+ *
+ * The SVGA_REG_GUEST_DRIVER_VERSION fields are driver-specific,
+ * but ideally should encode a monotonically increasing number that allows
+ * the device to perform inequality checks against ranges of driver versions.
+ */
+ SVGA_REG_GUEST_DRIVER_ID = 61,
+ SVGA_REG_GUEST_DRIVER_VERSION1 = 62,
+ SVGA_REG_GUEST_DRIVER_VERSION2 = 63,
+ SVGA_REG_GUEST_DRIVER_VERSION3 = 64,
+ SVGA_REG_CURSOR_MOBID = 65,
+ SVGA_REG_CURSOR_MAX_BYTE_SIZE = 66,
+ SVGA_REG_CURSOR_MAX_DIMENSION = 67,
+
+ SVGA_REG_FIFO_CAPS = 68,
+ SVGA_REG_FENCE = 69,
+
+ SVGA_REG_RESERVED1 = 70,
+ SVGA_REG_RESERVED2 = 71,
+ SVGA_REG_RESERVED3 = 72,
+ SVGA_REG_RESERVED4 = 73,
+ SVGA_REG_RESERVED5 = 74,
+ SVGA_REG_SCREENDMA = 75,
+
+ /*
+ * The maximum amount of guest-backed objects that the device can have
+ * resident at a time. Guest-drivers should keep their working set size
+ * below this limit for best performance.
+ *
+ * Note that this value is in kilobytes, and not bytes, because the actual
+ * number of bytes might be larger than can fit in a 32-bit register.
+ *
+ * PLEASE USE A 64-BIT VALUE WHEN CONVERTING THIS INTO BYTES.
+ * (See SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB).
+ */
+ SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76,
+
+ SVGA_REG_TOP = 77, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -229,6 +297,20 @@ enum {
the use of the current SVGA driver. */
};
+
+/*
+ * Values for SVGA_REG_GUEST_DRIVER_ID.
+ */
+typedef enum SVGARegGuestDriverId {
+ SVGA_REG_GUEST_DRIVER_ID_UNKNOWN = 0,
+ SVGA_REG_GUEST_DRIVER_ID_WDDM = 1,
+ SVGA_REG_GUEST_DRIVER_ID_LINUX = 2,
+ SVGA_REG_GUEST_DRIVER_ID_MAX,
+
+ SVGA_REG_GUEST_DRIVER_ID_SUBMIT = MAX_UINT32,
+} SVGARegGuestDriverId;
+
+
/*
* Guest memory regions (GMRs):
*
@@ -416,7 +498,6 @@ typedef enum {
SVGA_CB_CONTEXT_0 = 0x0,
SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
SVGA_CB_CONTEXT_MAX = 0x2,
- SVGA_CB_CONTEXT_HP_MAX = 0x2,
} SVGACBContext;
@@ -733,9 +814,6 @@ SVGASignedPoint;
* and must not be reused. Those capabilities will never be reported
* by new versions of the SVGA device.
*
- * XXX: Add longer descriptions for each capability, including a list
- * of the new features that each capability provides.
- *
* SVGA_CAP_IRQMASK --
* Provides device interrupts. Adds device register SVGA_REG_IRQMASK
* to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
@@ -842,17 +920,51 @@ SVGASignedPoint;
* Allow the IntraSurfaceCopy command.
*
* SVGA_CAP2_DX2 --
- * Allow the DefineGBSurface_v3, WholeSurfaceCopy.
+ * Allow the DefineGBSurface_v3, WholeSurfaceCopy, WriteZeroSurface, and
+ * HintZeroSurface commands, and the SVGA_REG_GUEST_DRIVER_ID register.
+ *
+ * SVGA_CAP2_GB_MEMSIZE_2 --
+ * Allow the SVGA_REG_GBOBJECT_MEM_SIZE_KB register.
+ *
+ * SVGA_CAP2_SCREENDMA_REG --
+ * Allow the SVGA_REG_SCREENDMA register.
+ *
+ * SVGA_CAP2_OTABLE_PTDEPTH_2 --
+ * Allow 2 level page tables for OTable commands.
+ *
+ * SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT --
+ * Allow a stretch blt from a non-multisampled surface to a multisampled
+ * surface.
+ *
+ * SVGA_CAP2_CURSOR_MOB --
+ * Allow the SVGA_REG_CURSOR_MOBID register.
+ *
+ * SVGA_CAP2_MSHINT --
+ * Allow the SVGA_REG_MSHINT register.
+ *
+ * SVGA_CAP2_DX3 --
+ * Allows the DefineGBSurface_v4 command.
+ * Allows the DXDefineDepthStencilView_v2, DXDefineStreamOutputWithMob,
+ * and DXBindStreamOutput commands if 3D is also available.
+ * Allows the DXPredStagingCopy and DXStagingCopy commands if SM41
+ * is also available.
*
* SVGA_CAP2_RESERVED --
* Reserve the last bit for extending the SVGA capabilities to some
* future mechanisms.
*/
-#define SVGA_CAP2_NONE 0x00000000
-#define SVGA_CAP2_GROW_OTABLE 0x00000001
-#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
-#define SVGA_CAP2_DX2 0x00000004
-#define SVGA_CAP2_RESERVED 0x80000000
+#define SVGA_CAP2_NONE 0x00000000
+#define SVGA_CAP2_GROW_OTABLE 0x00000001
+#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
+#define SVGA_CAP2_DX2 0x00000004
+#define SVGA_CAP2_GB_MEMSIZE_2 0x00000008
+#define SVGA_CAP2_SCREENDMA_REG 0x00000010
+#define SVGA_CAP2_OTABLE_PTDEPTH_2 0x00000020
+#define SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT 0x00000040
+#define SVGA_CAP2_CURSOR_MOB 0x00000080
+#define SVGA_CAP2_MSHINT 0x00000100
+#define SVGA_CAP2_DX3 0x00000400
+#define SVGA_CAP2_RESERVED 0x80000000
/*
@@ -875,7 +987,9 @@ typedef enum {
SVGABackdoorCapFifoCaps = 1,
SVGABackdoorCap3dHWVersion = 2,
SVGABackdoorCapDeviceCaps2 = 3,
- SVGABackdoorCapMax = 4,
+ SVGABackdoorCapDevelCaps = 4,
+ SVGABackdoorDevelRenderer = 5,
+ SVGABackdoorCapMax = 6,
} SVGABackdoorCapType;
@@ -1055,103 +1169,80 @@ enum {
/*
* FIFO Synchronization Registers
*
- * This explains the relationship between the various FIFO
- * sync-related registers in IOSpace and in FIFO space.
- *
* SVGA_REG_SYNC --
*
- * The SYNC register can be used in two different ways by the guest:
- *
- * 1. If the guest wishes to fully sync (drain) the FIFO,
- * it will write once to SYNC then poll on the BUSY
- * register. The FIFO is sync'ed once BUSY is zero.
- *
- * 2. If the guest wants to asynchronously wake up the host,
- * it will write once to SYNC without polling on BUSY.
- * Ideally it will do this after some new commands have
- * been placed in the FIFO, and after reading a zero
- * from SVGA_FIFO_BUSY.
- *
- * (1) is the original behaviour that SYNC was designed to
- * support. Originally, a write to SYNC would implicitly
- * trigger a read from BUSY. This causes us to synchronously
- * process the FIFO.
- *
- * This behaviour has since been changed so that writing SYNC
- * will *not* implicitly cause a read from BUSY. Instead, it
- * makes a channel call which asynchronously wakes up the MKS
- * thread.
- *
- * New guests can use this new behaviour to implement (2)
- * efficiently. This lets guests get the host's attention
- * without waiting for the MKS to poll, which gives us much
- * better CPU utilization on SMP hosts and on UP hosts while
- * we're blocked on the host GPU.
- *
- * Old guests shouldn't notice the behaviour change. SYNC was
- * never guaranteed to process the entire FIFO, since it was
- * bounded to a particular number of CPU cycles. Old guests will
- * still loop on the BUSY register until the FIFO is empty.
- *
- * Writing to SYNC currently has the following side-effects:
- *
- * - Sets SVGA_REG_BUSY to TRUE (in the monitor)
- * - Asynchronously wakes up the MKS thread for FIFO processing
- * - The value written to SYNC is recorded as a "reason", for
- * stats purposes.
- *
- * If SVGA_FIFO_BUSY is available, drivers are advised to only
- * write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
- * SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
- * eventually set SVGA_FIFO_BUSY on its own, but this approach
- * lets the driver avoid sending multiple asynchronous wakeup
- * messages to the MKS thread.
+ * The SYNC register can be used by the guest driver to signal to the
+ * device that the guest driver is waiting for previously submitted
+ * commands to complete.
+ *
+ * When the guest driver writes to the SYNC register, the device sets
+ * the BUSY register to TRUE, and starts processing the submitted commands
+ * (if it was not already doing so). When all previously submitted
+ * commands are finished and the device is idle again, it sets the BUSY
+ * register back to FALSE. (If the guest driver submits new commands
+ * after writing the SYNC register, the new commands are not guaranteed
+ * to have been procesesd.)
+ *
+ * When guest drivers are submitting commands using the FIFO, the device
+ * periodically polls to check for new FIFO commands when idle, which may
+ * introduce a delay in command processing. If the guest-driver wants
+ * the commands to be processed quickly (which it typically does), it
+ * should write SYNC after each batch of commands is committed to the
+ * FIFO to immediately wake up the device. For even better performance,
+ * the guest can use the SVGA_FIFO_BUSY register to avoid these extra
+ * SYNC writes if the device is already active, using the technique known
+ * as "Ringing the Doorbell" (described below). (Note that command
+ * buffer submission implicitly wakes up the device, and so doesn't
+ * suffer from this problem.)
+ *
+ * The SYNC register can also be used in combination with BUSY to
+ * synchronously ensure that all SVGA commands are processed (with both
+ * the FIFO and command-buffers). To do this, the guest driver should
+ * write to SYNC, and then loop reading BUSY until BUSY returns FALSE.
+ * This technique is known as a "Legacy Sync".
*
* SVGA_REG_BUSY --
*
* This register is set to TRUE when SVGA_REG_SYNC is written,
- * and it reads as FALSE when the FIFO has been completely
- * drained.
- *
- * Every read from this register causes us to synchronously
- * process FIFO commands. There is no guarantee as to how many
- * commands each read will process.
+ * and is set back to FALSE when the device has finished processing
+ * all commands and is idle again.
*
- * CPU time spent processing FIFO commands will be billed to
- * the guest.
+ * Every read from the BUSY reigster will block for an undefined
+ * amount of time (normally until the device finishes some interesting
+ * work unit), or the device is idle.
*
- * New drivers should avoid using this register unless they
- * need to guarantee that the FIFO is completely drained. It
- * is overkill for performing a sync-to-fence. Older drivers
- * will use this register for any type of synchronization.
+ * Guest drivers can also do a partial Legacy Sync to check for some
+ * particular condition, for instance by stopping early when a fence
+ * passes before BUSY has been set back to FALSE. This is particularly
+ * useful if the guest-driver knows that it is blocked waiting on the
+ * device, because it will yield CPU time back to the host.
*
* SVGA_FIFO_BUSY --
*
- * This register is a fast way for the guest driver to check
- * whether the FIFO is already being processed. It reads and
- * writes at normal RAM speeds, with no monitor intervention.
- *
- * If this register reads as TRUE, the host is guaranteeing that
- * any new commands written into the FIFO will be noticed before
- * the MKS goes back to sleep.
+ * The SVGA_FIFO_BUSY register is a fast way for the guest driver to check
+ * whether the device is actively processing FIFO commands before writing
+ * the more expensive SYNC register.
*
- * If this register reads as FALSE, no such guarantee can be
- * made.
+ * If this register reads as TRUE, the device is actively processing
+ * FIFO commands.
*
- * The guest should use this register to quickly determine
- * whether or not it needs to wake up the host. If the guest
- * just wrote a command or group of commands that it would like
- * the host to begin processing, it should:
+ * If this register reads as FALSE, the device may not be actively
+ * processing commands, and the guest driver should try
+ * "Ringing the Doorbell".
*
- * 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
- * action is necessary.
+ * To Ring the Doorbell, the guest should:
*
- * 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
- * code that we've already sent a SYNC to the host and we
- * don't need to send a duplicate.
+ * 1. Have already written their batch of commands into the FIFO.
+ * 2. Check if the SVGA_FIFO_BUSY register is available by reading
+ * SVGA_FIFO_MIN.
+ * 3. Read SVGA_FIFO_BUSY. If it reads as TRUE, the device is actively
+ * processing FIFO commands, and no further action is necessary.
+ * 4. If SVGA_FIFO_BUSY was FALSE, write TRUE to SVGA_REG_SYNC.
*
- * 3. Write a reason to SVGA_REG_SYNC. This will send an
- * asynchronous wakeup to the MKS thread.
+ * For maximum performance, this procedure should be followed after
+ * every meaningful batch of commands has been written into the FIFO.
+ * (Normally when the underlying application signals it's finished a
+ * meaningful work unit by calling Flush.)
*/
@@ -1164,9 +1255,6 @@ enum {
* Video -- SVGA Video overlay units are supported
* Escape -- Escape command is supported
*
- * XXX: Add longer descriptions for each capability, including a list
- * of the new features that each capability provides.
- *
* SVGA_FIFO_CAP_SCREEN_OBJECT --
*
* Provides dynamic multi-screen rendering, for improved Unity and
@@ -1279,6 +1367,15 @@ enum {
/*
+ * ScreenDMA Register Values
+ */
+
+#define SVGA_SCREENDMA_REG_UNDEFINED 0
+#define SVGA_SCREENDMA_REG_NOT_PRESENT 1
+#define SVGA_SCREENDMA_REG_PRESENT 2
+#define SVGA_SCREENDMA_REG_MAX 3
+
+/*
* Video overlay support
*/
@@ -1665,6 +1762,80 @@ SVGAFifoCmdDefineAlphaCursor;
/*
+ * Provide a new large cursor image, as an AND/XOR mask.
+ *
+ * Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ uint32 andMaskDepth;
+ uint32 xorMaskDepth;
+ /*
+ * Followed by scanline data for AND mask, then XOR mask.
+ * Each scanline is padded to a 32-bit boundary.
+ */
+}
+#include "vmware_pack_end.h"
+SVGAGBColorCursorHeader;
+
+
+/*
+ * Provide a new large cursor image, in 32-bit BGRA format.
+ *
+ * Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ /* Followed by scanline data */
+}
+#include "vmware_pack_end.h"
+SVGAGBAlphaCursorHeader;
+
+ /*
+ * Define the SVGA guest backed cursor types
+ */
+
+typedef enum {
+ SVGA_COLOR_CURSOR = 0,
+ SVGA_ALPHA_CURSOR = 1,
+} SVGAGBCursorType;
+
+/*
+ * Provide a new large cursor image.
+ *
+ * Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+ SVGAGBCursorType type;
+ union {
+ SVGAGBColorCursorHeader colorHeader;
+ SVGAGBAlphaCursorHeader alphaHeader;
+ } header;
+ uint32 sizeInBytes;
+ /*
+ * Followed by the cursor data
+ */
+}
+#include "vmware_pack_end.h"
+SVGAGBCursorHeader;
+
+
+/*
* SVGA_CMD_UPDATE_VERBOSE --
*
* Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
@@ -2061,9 +2232,12 @@ SVGAFifoCmdRemapGMR2;
#define SVGA_VRAM_MAX_SIZE (128 * 1024 * 1024)
#define SVGA_MEMORY_SIZE_MAX (1024 * 1024 * 1024)
#define SVGA_FIFO_SIZE_MAX (2 * 1024 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_MAX (2 * 1024 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_2GB (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_3GB (3 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_4GB (4 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_8GB (8 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
#define SVGA_VRAM_SIZE_W2K (64 * 1024 * 1024) /* 64 MB */
@@ -2086,4 +2260,6 @@ SVGAFifoCmdRemapGMR2;
#define SVGA_FIFO_SIZE_GBOBJECTS (256 * 1024)
#define SVGA_VRAM_SIZE_GBOBJECTS (4 * 1024 * 1024)
+#define SVGA_PCI_REGS_PAGES (1)
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
index 350bbc6fab02..beddccee40f6 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -37,6 +37,7 @@ typedef s8 int8;
typedef uint64 PA;
typedef uint32 PPN;
+typedef uint32 PPN32;
typedef uint64 PPN64;
typedef bool Bool;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 66e14e38d5e8..f41550797970 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -57,9 +57,11 @@
#define VMW_BINDING_RT_BIT 0
#define VMW_BINDING_PS_BIT 1
-#define VMW_BINDING_SO_BIT 2
+#define VMW_BINDING_SO_T_BIT 2
#define VMW_BINDING_VB_BIT 3
-#define VMW_BINDING_NUM_BITS 4
+#define VMW_BINDING_UAV_BIT 4
+#define VMW_BINDING_CS_UAV_BIT 5
+#define VMW_BINDING_NUM_BITS 6
#define VMW_BINDING_PS_SR_BIT 0
@@ -75,6 +77,8 @@
* @vertex_buffers: Vertex buffer bindings.
* @index_buffer: Index buffer binding.
* @per_shader: Per shader-type bindings.
+ * @ua_views: UAV bindings.
+ * @so_state: StreamOutput bindings.
* @dirty: Bitmap tracking per binding-type changes that have not yet
* been emitted to the device.
* @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
@@ -95,10 +99,12 @@ struct vmw_ctx_binding_state {
struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
struct vmw_ctx_bindinfo_view ds_view;
- struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+ struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
struct vmw_ctx_bindinfo_ib index_buffer;
- struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+ struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
+ struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
+ struct vmw_ctx_bindinfo_so so_state;
unsigned long dirty;
DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
@@ -115,12 +121,16 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
bool rebind);
static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+
static void vmw_binding_build_asserts(void) __attribute__ ((unused));
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
@@ -151,6 +161,9 @@ static const size_t vmw_binding_shader_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
+ offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
};
static const size_t vmw_binding_rt_offsets[] = {
offsetof(struct vmw_ctx_binding_state, render_targets),
@@ -162,6 +175,9 @@ static const size_t vmw_binding_cb_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
+ offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
};
static const size_t vmw_binding_dx_ds_offsets[] = {
offsetof(struct vmw_ctx_binding_state, ds_view),
@@ -170,8 +186,11 @@ static const size_t vmw_binding_sr_offsets[] = {
offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
+ offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
};
-static const size_t vmw_binding_so_offsets[] = {
+static const size_t vmw_binding_so_target_offsets[] = {
offsetof(struct vmw_ctx_binding_state, so_targets),
};
static const size_t vmw_binding_vb_offsets[] = {
@@ -180,6 +199,15 @@ static const size_t vmw_binding_vb_offsets[] = {
static const size_t vmw_binding_ib_offsets[] = {
offsetof(struct vmw_ctx_binding_state, index_buffer),
};
+static const size_t vmw_binding_uav_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
+};
+static const size_t vmw_binding_cs_uav_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
+};
+static const size_t vmw_binding_so_offsets[] = {
+ offsetof(struct vmw_ctx_binding_state, so_state),
+};
static const struct vmw_binding_info vmw_binding_infos[] = {
[vmw_ctx_binding_shader] = {
@@ -214,10 +242,10 @@ static const struct vmw_binding_info vmw_binding_infos[] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_dx_ds_offsets,
.scrub_func = vmw_binding_scrub_dx_rt},
- [vmw_ctx_binding_so] = {
- .size = sizeof(struct vmw_ctx_bindinfo_so),
- .offsets = vmw_binding_so_offsets,
- .scrub_func = vmw_binding_scrub_so},
+ [vmw_ctx_binding_so_target] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_so_target),
+ .offsets = vmw_binding_so_target_offsets,
+ .scrub_func = vmw_binding_scrub_so_target},
[vmw_ctx_binding_vb] = {
.size = sizeof(struct vmw_ctx_bindinfo_vb),
.offsets = vmw_binding_vb_offsets,
@@ -226,6 +254,18 @@ static const struct vmw_binding_info vmw_binding_infos[] = {
.size = sizeof(struct vmw_ctx_bindinfo_ib),
.offsets = vmw_binding_ib_offsets,
.scrub_func = vmw_binding_scrub_ib},
+ [vmw_ctx_binding_uav] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_uav_offsets,
+ .scrub_func = vmw_binding_scrub_uav},
+ [vmw_ctx_binding_cs_uav] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_view),
+ .offsets = vmw_binding_cs_uav_offsets,
+ .scrub_func = vmw_binding_scrub_cs_uav},
+ [vmw_ctx_binding_so] = {
+ .size = sizeof(struct vmw_ctx_bindinfo_so),
+ .offsets = vmw_binding_so_offsets,
+ .scrub_func = vmw_binding_scrub_so},
};
/**
@@ -312,6 +352,18 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
}
/**
+ * vmw_binding_add_uav_index - Add UAV index for tracking.
+ * @cbs: Pointer to the context binding state tracker.
+ * @slot: UAV type to which bind this index.
+ * @index: The splice index to track.
+ */
+void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
+ uint32 index)
+{
+ cbs->ua_views[slot].index = index;
+}
+
+/**
* vmw_binding_transfer: Transfer a context binding tracking entry.
*
* @cbs: Pointer to the persistent context binding state tracker.
@@ -450,6 +502,10 @@ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
vmw_binding_transfer(to, from, entry);
vmw_binding_drop(entry);
}
+
+ /* Also transfer uav splice indices */
+ to->ua_views[0].index = from->ua_views[0].index;
+ to->ua_views[1].index = from->ua_views[1].index;
}
/**
@@ -828,8 +884,8 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *bi,
u32 max_num)
{
- const struct vmw_ctx_bindinfo_so *biso =
- container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+ const struct vmw_ctx_bindinfo_so_target *biso =
+ container_of(bi, struct vmw_ctx_bindinfo_so_target, bi);
unsigned long i;
SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
@@ -854,11 +910,11 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
}
/**
- * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ * vmw_emit_set_so_target - Issue delayed streamout binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*/
-static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
{
const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
struct {
@@ -1005,6 +1061,66 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
return 0;
}
+static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetUAViews body;
+ } *cmd;
+ size_t cmd_size, view_id_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+ cmd_size = sizeof(*cmd) + view_id_size;
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
+ cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+ /* Splice index is specified user-space */
+ cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+ return 0;
+}
+
+static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
+{
+ const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetCSUAViews body;
+ } *cmd;
+ size_t cmd_size, view_id_size;
+ const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+ vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+ view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+ cmd_size = sizeof(*cmd) + view_id_size;
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
+ cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+ /* Start index is specified user-space */
+ cmd->body.startIndex = cbs->ua_views[1].index;
+
+ memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+ vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+ return 0;
+}
+
/**
* vmw_binding_emit_dirty - Issue delayed binding commands
*
@@ -1030,12 +1146,18 @@ static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
case VMW_BINDING_PS_BIT:
ret = vmw_binding_emit_dirty_ps(cbs);
break;
- case VMW_BINDING_SO_BIT:
- ret = vmw_emit_set_so(cbs);
+ case VMW_BINDING_SO_T_BIT:
+ ret = vmw_emit_set_so_target(cbs);
break;
case VMW_BINDING_VB_BIT:
ret = vmw_emit_set_vb(cbs);
break;
+ case VMW_BINDING_UAV_BIT:
+ ret = vmw_emit_set_uav(cbs);
+ break;
+ case VMW_BINDING_CS_UAV_BIT:
+ ret = vmw_emit_set_cs_uav(cbs);
+ break;
default:
BUG();
}
@@ -1089,18 +1211,18 @@ static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
}
/**
- * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding
* scrub from a context
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
-static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_binding_state *cbs =
vmw_context_binding_state(bi->ctx);
- __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+ __set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
return 0;
}
@@ -1162,6 +1284,49 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
return 0;
}
+static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
+
+ __set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
+ return 0;
+}
+
+static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
+
+ __set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
+ return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
+ * @bi: Single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+ struct vmw_ctx_bindinfo_so *binding =
+ container_of(bi, typeof(*binding), bi);
+ struct vmw_private *dev_priv = bi->ctx->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetStreamOutput body;
+ } *cmd;
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ return 0;
+}
+
/**
* vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
* memory accounting.
@@ -1248,8 +1413,8 @@ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
* Each time a resource is put on the validation list as the result of a
* context binding referencing it, we need to determine whether that resource
* will be dirtied (written to by the GPU) as a result of the corresponding
- * GPU operation. Currently rendertarget-, depth-stencil-, and
- * stream-output-target bindings are capable of dirtying its resource.
+ * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
+ * and unordered access view bindings are capable of dirtying its resource.
*
* Return: Whether the binding type dirties the resource its binding points to.
*/
@@ -1259,11 +1424,13 @@ u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
[vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
[vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
[vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
- [vmw_ctx_binding_so] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
};
/* Review this function as new bindings are added. */
- BUILD_BUG_ON(vmw_ctx_binding_max != 11);
+ BUILD_BUG_ON(vmw_ctx_binding_max != 14);
return is_binding_dirtying[binding_type];
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index cd9805c045cb..dcb71fd0bb3b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -33,6 +33,8 @@
#define VMW_MAX_VIEW_BINDINGS 128
+#define VMW_MAX_UAV_BIND_TYPE 2
+
struct vmw_private;
struct vmw_ctx_binding_state;
@@ -48,9 +50,12 @@ enum vmw_ctx_binding_type {
vmw_ctx_binding_dx_rt,
vmw_ctx_binding_sr,
vmw_ctx_binding_ds,
- vmw_ctx_binding_so,
+ vmw_ctx_binding_so_target,
vmw_ctx_binding_vb,
vmw_ctx_binding_ib,
+ vmw_ctx_binding_uav,
+ vmw_ctx_binding_cs_uav,
+ vmw_ctx_binding_so,
vmw_ctx_binding_max
};
@@ -128,14 +133,14 @@ struct vmw_ctx_bindinfo_view {
};
/**
- * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ * struct vmw_ctx_bindinfo_so_target - StreamOutput binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @offset: Device data used to reconstruct binding command.
* @size: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
-struct vmw_ctx_bindinfo_so {
+struct vmw_ctx_bindinfo_so_target {
struct vmw_ctx_bindinfo bi;
uint32 offset;
uint32 size;
@@ -189,9 +194,31 @@ struct vmw_dx_shader_bindings {
unsigned long dirty;
};
+/**
+ * struct vmw_ctx_bindinfo_uav - UAV context binding state.
+ * @views: UAV view bindings.
+ * @splice_index: The device splice index set by user-space.
+ */
+struct vmw_ctx_bindinfo_uav {
+ struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS];
+ uint32 index;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - Stream output binding metadata.
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+ struct vmw_ctx_bindinfo bi;
+ uint32 slot;
+};
+
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci,
u32 shader_slot, u32 slot);
+extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs,
+ uint32 slot, uint32 splice_index);
extern void
vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
struct vmw_ctx_binding_state *from);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 065015d2a8f6..3b41cf63110a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1241,7 +1241,8 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
* actually call into the already enabled manager, when
* binding the MOB.
*/
- if (!(dev_priv->capabilities & SVGA_CAP_DX))
+ if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
+ !dev_priv->has_mob)
return -ENOMEM;
ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index a56c9d802382..61c246335e66 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -36,7 +36,7 @@ struct vmw_user_context {
struct vmw_resource res;
struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man;
- struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+ struct vmw_resource *cotables[SVGA_COTABLE_MAX];
spinlock_t cotable_lock;
struct vmw_buffer_object *dx_query_mob;
};
@@ -116,12 +116,15 @@ static const struct vmw_res_func vmw_dx_context_func = {
* Context management:
*/
-static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
+ struct vmw_user_context *uctx)
{
struct vmw_resource *res;
int i;
+ u32 cotable_max = has_sm5_context(dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
- for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ for (i = 0; i < cotable_max; ++i) {
spin_lock(&uctx->cotable_lock);
res = uctx->cotables[i];
uctx->cotables[i] = NULL;
@@ -155,7 +158,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
- vmw_context_cotables_unref(uctx);
+ vmw_context_cotables_unref(dev_priv, uctx);
return;
}
@@ -208,7 +211,9 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
spin_lock_init(&uctx->cotable_lock);
if (dx) {
- for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ u32 cotable_max = has_sm5_context(dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
+ for (i = 0; i < cotable_max; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
if (IS_ERR(uctx->cotables[i])) {
@@ -222,7 +227,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
return 0;
out_cotables:
- vmw_context_cotables_unref(uctx);
+ vmw_context_cotables_unref(dev_priv, uctx);
out_err:
if (res_free)
res_free(res);
@@ -545,10 +550,12 @@ void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
{
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
+ u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
int i;
vmw_binding_state_scrub(uctx->cbs);
- for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ for (i = 0; i < cotable_max; ++i) {
struct vmw_resource *res;
/* Avoid racing with ongoing cotable destruction. */
@@ -731,7 +738,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
};
int ret;
- if (!dev_priv->has_dx && dx) {
+ if (!has_sm4_context(dev_priv) && dx) {
VMW_DEBUG_USER("DX contexts not supported by device.\n");
return -EINVAL;
}
@@ -839,7 +846,10 @@ struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
SVGACOTableType cotable_type)
{
- if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+ u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
+
+ if (cotable_type >= cotable_max)
return ERR_PTR(-EINVAL);
return container_of(ctx, struct vmw_user_context, res)->
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 3ca5cf375b01..65e8e7a97724 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -80,9 +80,10 @@ static const struct vmw_cotable_info co_info[] = {
{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
- {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+ {1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
{1, sizeof(SVGACOTableDXQueryEntry), NULL},
- {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+ {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
+ {1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
};
/*
@@ -102,6 +103,7 @@ const SVGACOTableType vmw_cotable_scrub_order[] = {
SVGA_COTABLE_SAMPLER,
SVGA_COTABLE_STREAMOUTPUT,
SVGA_COTABLE_DXQUERY,
+ SVGA_COTABLE_UAVIEW,
};
static int vmw_cotable_bind(struct vmw_resource *res,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 827458f49112..c2247a893ed4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/mem_encrypt.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
@@ -289,6 +290,8 @@ static void vmw_print_capabilities2(uint32_t capabilities2)
DRM_INFO(" Grow oTable.\n");
if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
DRM_INFO(" IntraSurface copy.\n");
+ if (capabilities2 & SVGA_CAP2_DX3)
+ DRM_INFO(" DX3.\n");
}
static void vmw_print_capabilities(uint32_t capabilities)
@@ -448,7 +451,7 @@ static int vmw_request_device(struct vmw_private *dev_priv)
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
if (IS_ERR(dev_priv->cman)) {
dev_priv->cman = NULL;
- dev_priv->has_dx = false;
+ dev_priv->sm_type = VMW_SM_LEGACY;
}
ret = vmw_request_device_late(dev_priv);
@@ -575,6 +578,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+ /* TTM currently doesn't fully support SEV encryption. */
+ if (mem_encrypt_active())
+ return -EINVAL;
+
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
@@ -682,8 +689,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
- DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+ DRM_INFO("Restricting capabilities since DMA not available.\n");
refuse_dma = true;
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ DRM_INFO("Disabling 3D acceleration.\n");
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
@@ -711,9 +720,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->max_mob_pages = 0;
dev_priv->max_mob_size = 0;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
- uint64_t mem_size =
- vmw_read(dev_priv,
- SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ uint64_t mem_size;
+
+ if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
+ mem_size = vmw_read(dev_priv,
+ SVGA_REG_GBOBJECT_MEM_SIZE_KB);
+ else
+ mem_size =
+ vmw_read(dev_priv,
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
/*
* Workaround for low memory 2D VMs to compensate for the
@@ -866,7 +881,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->has_gmr = false;
}
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
@@ -876,14 +891,32 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
}
- if (dev_priv->has_mob) {
+ if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
- dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+ dev_priv->sm_type = VMW_SM_4;
spin_unlock(&dev_priv->cap_lock);
}
vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
+
+ /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
+ if (has_sm4_context(dev_priv) &&
+ (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
+
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+ dev_priv->sm_type = VMW_SM_4_1;
+
+ if (has_sm4_1_context(dev_priv) &&
+ (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+ dev_priv->sm_type = VMW_SM_5;
+ }
+ }
+
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
@@ -893,23 +926,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
goto out_no_fifo;
- if (dev_priv->has_dx) {
- /*
- * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
- * support
- */
- if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
- vmw_write(dev_priv, SVGA_REG_DEV_CAP,
- SVGA3D_DEVCAP_SM41);
- dev_priv->has_sm4_1 = vmw_read(dev_priv,
- SVGA_REG_DEV_CAP);
- }
- }
-
- DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
? "yes." : "no.");
- DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
+ if (dev_priv->sm_type == VMW_SM_5)
+ DRM_INFO("SM5 support available.\n");
+ if (dev_priv->sm_type == VMW_SM_4_1)
+ DRM_INFO("SM4_1 support available.\n");
+ if (dev_priv->sm_type == VMW_SM_4)
+ DRM_INFO("SM4 support available.\n");
snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
VMWGFX_REPO, VMWGFX_GIT_VERSION);
@@ -1223,6 +1247,18 @@ static void vmw_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static unsigned long
+vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+
+ return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
+ &dev_priv->vma_manager);
+}
+
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr)
{
@@ -1394,14 +1430,12 @@ static const struct file_operations vmwgfx_driver_fops = {
.compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
+ .get_unmapped_area = vmw_get_unmapped_area,
};
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
- .get_vblank_counter = vmw_get_vblank_counter,
- .enable_vblank = vmw_enable_vblank,
- .disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 86b69397d166..8cdcd6e5f9e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -58,7 +58,7 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DATE "20200114"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 17
+#define VMWGFX_DRIVER_MINOR 18
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -202,6 +202,7 @@ enum vmw_res_type {
vmw_res_dx_context,
vmw_res_cotable,
vmw_res_view,
+ vmw_res_streamoutput,
vmw_res_max
};
@@ -210,7 +211,8 @@ enum vmw_res_type {
*/
enum vmw_cmdbuf_res_type {
vmw_cmdbuf_res_shader,
- vmw_cmdbuf_res_view
+ vmw_cmdbuf_res_view,
+ vmw_cmdbuf_res_streamoutput
};
struct vmw_cmdbuf_res_manager;
@@ -223,24 +225,58 @@ struct vmw_cursor_snooper {
struct vmw_framebuffer;
struct vmw_surface_offset;
-struct vmw_surface {
- struct vmw_resource res;
- SVGA3dSurfaceAllFlags flags;
- uint32_t format;
- uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+/**
+ * struct vmw_surface_metadata - Metadata describing a surface.
+ *
+ * @flags: Device flags.
+ * @format: Surface SVGA3D_x format.
+ * @mip_levels: Mip level for each face. For GB first index is used only.
+ * @multisample_count: Sample count.
+ * @multisample_pattern: Sample patterns.
+ * @quality_level: Quality level.
+ * @autogen_filter: Filter for automatically generated mipmaps.
+ * @array_size: Number of array elements for a 1D/2D texture. For cubemap
+ texture number of faces * array_size. This should be 0 for pre
+ SM4 device.
+ * @buffer_byte_stride: Buffer byte stride.
+ * @num_sizes: Size of @sizes. For GB surface this should always be 1.
+ * @base_size: Surface dimension.
+ * @sizes: Array representing mip sizes. Legacy only.
+ * @scanout: Whether this surface will be used for scanout.
+ *
+ * This tracks metadata for both legacy and guest backed surface.
+ */
+struct vmw_surface_metadata {
+ u64 flags;
+ u32 format;
+ u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ u32 multisample_count;
+ u32 multisample_pattern;
+ u32 quality_level;
+ u32 autogen_filter;
+ u32 array_size;
+ u32 num_sizes;
+ u32 buffer_byte_stride;
struct drm_vmw_size base_size;
struct drm_vmw_size *sizes;
- uint32_t num_sizes;
bool scanout;
- uint32_t array_size;
- /* TODO so far just a extra pointer */
+};
+
+/**
+ * struct vmw_surface: Resource structure for a surface.
+ *
+ * @res: The base resource for this surface.
+ * @metadata: Metadata for this surface resource.
+ * @snooper: Cursor data. Legacy surface only.
+ * @offsets: Legacy surface only.
+ * @view_list: List of views bound to this surface.
+ */
+struct vmw_surface {
+ struct vmw_resource res;
+ struct vmw_surface_metadata metadata;
struct vmw_cursor_snooper snooper;
struct vmw_surface_offset *offsets;
- SVGA3dTextureFilter autogen_filter;
- uint32_t multisample_count;
struct list_head view_list;
- SVGA3dMSPattern multisample_pattern;
- SVGA3dMSQualityLevel quality_level;
};
struct vmw_marker_queue {
@@ -441,6 +477,22 @@ enum {
VMW_IRQTHREAD_MAX
};
+/**
+ * enum vmw_sm_type - Graphics context capability supported by device.
+ * @VMW_SM_LEGACY: Pre DX context.
+ * @VMW_SM_4: Context support upto SM4.
+ * @VMW_SM_4_1: Context support upto SM4_1.
+ * @VMW_SM_5: Context support up to SM5.
+ * @VMW_SM_MAX: Should be the last.
+ */
+enum vmw_sm_type {
+ VMW_SM_LEGACY = 0,
+ VMW_SM_4,
+ VMW_SM_4_1,
+ VMW_SM_5,
+ VMW_SM_MAX
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
@@ -475,22 +527,9 @@ struct vmw_private {
bool has_mob;
spinlock_t hw_lock;
spinlock_t cap_lock;
- bool has_dx;
bool assume_16bpp;
- bool has_sm4_1;
-
- /*
- * VGA registers.
- */
-
- struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
- uint32_t vga_width;
- uint32_t vga_height;
- uint32_t vga_bpp;
- uint32_t vga_bpl;
- uint32_t vga_pitchlock;
- uint32_t num_displays;
+ enum vmw_sm_type sm_type;
/*
* Framebuffer info.
@@ -661,6 +700,39 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val;
}
+/**
+ * has_sm4_context - Does the device support SM4 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM4 context or not.
+ */
+static inline bool has_sm4_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_4);
+}
+
+/**
+ * has_sm4_1_context - Does the device support SM4_1 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM4_1 context or not.
+ */
+static inline bool has_sm4_1_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_4_1);
+}
+
+/**
+ * has_sm5_context - Does the device support SM5 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM5 context or not.
+ */
+static inline bool has_sm5_context(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->sm_type >= VMW_SM_5);
+}
+
extern void vmw_svga_enable(struct vmw_private *dev_priv);
extern void vmw_svga_disable(struct vmw_private *dev_priv);
@@ -900,7 +972,6 @@ extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
-extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
@@ -929,6 +1000,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
size_t gran);
+
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
@@ -947,7 +1019,6 @@ extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
-extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
extern const struct vmw_sg_table *
@@ -1085,8 +1156,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv);
int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
-int vmw_kms_save_vga(struct vmw_private *vmw_priv);
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
@@ -1100,9 +1169,9 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc);
+int vmw_enable_vblank(struct drm_crtc *crtc);
+void vmw_disable_vblank(struct drm_crtc *crtc);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -1139,7 +1208,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv);
int vmw_overlay_close(struct vmw_private *dev_priv);
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vmw_overlay_stop_all(struct vmw_private *dev_priv);
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
@@ -1186,10 +1254,6 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
extern const struct vmw_user_resource_conv *user_context_converter;
-extern int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
@@ -1219,7 +1283,6 @@ vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
extern const struct vmw_user_resource_conv *user_surface_converter;
-extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -1230,11 +1293,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
SVGA3dSurfaceAllFlags svga3d_flags,
@@ -1254,6 +1312,11 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file_priv);
+int vmw_gb_surface_define(struct vmw_private *dev_priv,
+ uint32_t user_accounting_size,
+ const struct vmw_surface_metadata *req,
+ struct vmw_surface **srf_out);
+
/*
* Shader management - vmwgfx_shader.c
*/
@@ -1287,6 +1350,24 @@ vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type);
/*
+ * Streamoutput management
+ */
+struct vmw_resource *
+vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key);
+int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx,
+ SVGA3dStreamOutputId user_key,
+ struct list_head *list);
+void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size);
+int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
+ SVGA3dStreamOutputId user_key,
+ struct list_head *list);
+void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback);
+
+/*
* Command buffer managed resources - vmwgfx_cmdbuf_res.c
*/
@@ -1430,6 +1511,17 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size);
+#endif
+
+/* Transparent hugepage support - vmwgfx_thp.c */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern const struct ttm_mem_type_manager_func vmw_thp_func;
+#else
+#define vmw_thp_func ttm_bo_manager_func
+#endif
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 73489a45decb..367d5b87ee6a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -459,10 +459,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
int ret = 0;
struct vmw_resource *res;
u32 i;
+ u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+ SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
/* Add all cotables to the validation list. */
- if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
- for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ if (has_sm4_context(dev_priv) &&
+ vmw_res_type(ctx) == vmw_res_dx_context) {
+ for (i = 0; i < cotable_max; ++i) {
res = vmw_context_cotable(ctx, i);
if (IS_ERR(res))
continue;
@@ -489,7 +492,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
break;
}
- if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+ if (has_sm4_context(dev_priv) &&
+ vmw_res_type(ctx) == vmw_res_dx_context) {
struct vmw_buffer_object *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
@@ -2116,6 +2120,9 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
+ SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
+ SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
+
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_cb binding;
@@ -2139,7 +2146,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
binding.size = cmd->body.sizeInBytes;
binding.slot = cmd->body.slot;
- if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+ if (binding.shader_slot >= max_shader_num ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
@@ -2167,12 +2174,15 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
container_of(header, typeof(*cmd), header);
+ SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+ SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
+
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
- cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ cmd->body.type >= max_allowed) {
VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
@@ -2196,6 +2206,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
+ SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+ SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
@@ -2206,7 +2218,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
- if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
+ if (cmd->body.type >= max_allowed ||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
@@ -2467,7 +2479,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
- struct vmw_ctx_bindinfo_so binding;
+ struct vmw_ctx_bindinfo_so_target binding;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
@@ -2497,7 +2509,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
- binding.bi.bt = vmw_ctx_binding_so,
+ binding.bi.bt = vmw_ctx_binding_so_target,
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
@@ -2804,6 +2816,352 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
&cmd->body.surface.sid, NULL);
}
+static int vmw_cmd_sm5(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
+}
+
+static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
+}
+
+static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearUAViewUint body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
+ cmd->body.uaViewId);
+
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXClearUAViewFloat body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ struct vmw_resource *ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
+ cmd->body.uaViewId);
+
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetUAViews body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dUAViewId);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ VMW_DEBUG_USER("Invalid UAV binding.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
+ vmw_ctx_binding_uav, 0, (void *)&cmd[1],
+ num_uav, 0);
+ if (ret)
+ return ret;
+
+ vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
+ cmd->body.uavSpliceIndex);
+
+ return ret;
+}
+
+static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetCSUAViews body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
+ sizeof(SVGA3dUAViewId);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (num_uav > SVGA3D_MAX_UAVIEWS) {
+ VMW_DEBUG_USER("Invalid UAV binding.\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
+ vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
+ num_uav, 0);
+ if (ret)
+ return ret;
+
+ vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
+ cmd->body.startIndex);
+
+ return ret;
+}
+
+static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineStreamOutputWithMob body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
+ ret = vmw_cotable_notify(res, cmd->body.soid);
+ if (ret)
+ return ret;
+
+ return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
+ cmd->body.soid,
+ &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When device does not support SM5 then streamoutput with mob command is
+ * not available to user-space. Simply return in this case.
+ */
+ if (!has_sm5_context(dev_priv))
+ return 0;
+
+ /*
+ * With SM5 capable device if lookup fails then user-space probably used
+ * old streamoutput define command. Return without an error.
+ */
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res))
+ return 0;
+
+ return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
+ &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res)) {
+ DRM_ERROR("Cound not find streamoutput to bind.\n");
+ return PTR_ERR(res);
+ }
+
+ vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
+
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ return ret;
+ }
+
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
+ &cmd->body.mobid,
+ cmd->body.offsetInBytes);
+}
+
+static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res;
+ struct vmw_ctx_bindinfo_so binding;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXSetStreamOutput body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ if (!ctx_node) {
+ DRM_ERROR("DX Context not set.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->body.soid == SVGA3D_INVALID_ID)
+ return 0;
+
+ /*
+ * When device does not support SM5 then streamoutput with mob command is
+ * not available to user-space. Simply return in this case.
+ */
+ if (!has_sm5_context(dev_priv))
+ return 0;
+
+ /*
+ * With SM5 capable device if lookup fails then user-space probably used
+ * old streamoutput define command. Return without an error.
+ */
+ res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+ cmd->body.soid);
+ if (IS_ERR(res)) {
+ return 0;
+ }
+
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
+ if (ret) {
+ DRM_ERROR("Error creating resource validation node.\n");
+ return ret;
+ }
+
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
+ binding.bi.bt = vmw_ctx_binding_so;
+ binding.slot = 0; /* Only one SO set to context at a time. */
+
+ vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
+ binding.slot);
+
+ return ret;
+}
+
+static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_draw_indexed_instanced_indirect_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDrawIndexedInstancedIndirect body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.argsBufferSid, NULL);
+}
+
+static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_draw_instanced_indirect_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDrawInstancedIndirect body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.argsBufferSid, NULL);
+}
+
+static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dispatch_indirect_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDispatchIndirect body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ if (!has_sm5_context(dev_priv))
+ return -EINVAL;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.argsBufferSid, NULL);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -2922,18 +3280,12 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
- false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
- false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
@@ -3141,9 +3493,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
&vmw_cmd_dx_so_define, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
- &vmw_cmd_dx_cid_check, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
- true, false, true),
+ &vmw_cmd_dx_destroy_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
+ &vmw_cmd_dx_set_streamoutput, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
&vmw_cmd_dx_set_so_targets, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
@@ -3159,6 +3511,37 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
true, false, true),
+
+ /*
+ * SM5 commands
+ */
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
+ true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
+ &vmw_cmd_clear_uav_float, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
+ false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
+ true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
+ &vmw_cmd_indexed_instanced_indirect, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
+ &vmw_cmd_instanced_indirect, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
+ &vmw_cmd_dispatch_indirect, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
+ false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
+ &vmw_cmd_sm5_view_define, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
+ &vmw_cmd_dx_define_streamoutput, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
+ &vmw_cmd_dx_bind_streamoutput, true, false, true),
};
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e5252ef3812f..6941689085ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -169,10 +169,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
u32 *fifo_mem = dev_priv->mmio_virt;
- preempt_disable();
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
- preempt_enable();
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a15375eb476e..f681b7b4df1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,10 +114,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
(dev_priv->active_display_unit == vmw_du_screen_target);
break;
case DRM_VMW_PARAM_DX:
- param->value = dev_priv->has_dx;
+ param->value = has_sm4_context(dev_priv);
break;
case DRM_VMW_PARAM_SM4_1:
- param->value = dev_priv->has_sm4_1;
+ param->value = has_sm4_1_context(dev_priv);
+ break;
+ case DRM_VMW_PARAM_SM5:
+ param->value = has_sm5_context(dev_priv);
break;
default:
return -EINVAL;
@@ -126,14 +129,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
+static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value)
{
/*
* A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
* to check the sample count supported by virtual device. Since there
* never was support for multisample count for backing MOB return 0.
+ *
+ * MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual
+ * device.
*/
- if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
+ if (cap == SVGA3D_DEVCAP_DEAD5)
return 0;
return fmt_value;
@@ -164,7 +170,7 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
- compat_cap->pairs[i][1] = vmw_mask_multisample
+ compat_cap->pairs[i][1] = vmw_mask_legacy_multisample
(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
}
spin_unlock(&dev_priv->cap_lock);
@@ -220,7 +226,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
- *bounce32++ = vmw_mask_multisample
+ *bounce32++ = vmw_mask_legacy_multisample
(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
}
spin_unlock(&dev_priv->cap_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f47d5710cc95..04d66592f605 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -905,14 +905,14 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
*/
/* Surface must be marked as a scanout. */
- if (unlikely(!surface->scanout))
+ if (unlikely(!surface->metadata.scanout))
return -EINVAL;
- if (unlikely(surface->mip_levels[0] != 1 ||
- surface->num_sizes != 1 ||
- surface->base_size.width < mode_cmd->width ||
- surface->base_size.height < mode_cmd->height ||
- surface->base_size.depth != 1)) {
+ if (unlikely(surface->metadata.mip_levels[0] != 1 ||
+ surface->metadata.num_sizes != 1 ||
+ surface->metadata.base_size.width < mode_cmd->width ||
+ surface->metadata.base_size.height < mode_cmd->height ||
+ surface->metadata.base_size.depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
@@ -941,7 +941,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* For DX, surface format validation is done when surface->scanout
* is set.
*/
- if (!dev_priv->has_dx && format != surface->format) {
+ if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
@@ -1144,8 +1144,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
struct vmw_buffer_object *bo_mob,
struct vmw_surface **srf_out)
{
+ struct vmw_surface_metadata metadata = {0};
uint32_t format;
- struct drm_vmw_size content_base_size = {0};
struct vmw_resource *res;
unsigned int bytes_pp;
struct drm_format_name_buf format_name;
@@ -1175,22 +1175,15 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
return -EINVAL;
}
- content_base_size.width = mode_cmd->pitches[0] / bytes_pp;
- content_base_size.height = mode_cmd->height;
- content_base_size.depth = 1;
-
- ret = vmw_surface_gb_priv_define(dev,
- 0, /* kernel visible only */
- 0, /* flags */
- format,
- true, /* can be a scanout buffer */
- 1, /* num of mip levels */
- 0,
- 0,
- content_base_size,
- SVGA3D_MS_PATTERN_NONE,
- SVGA3D_MS_QUALITY_NONE,
- srf_out);
+ metadata.format = format;
+ metadata.mip_levels[0] = 1;
+ metadata.num_sizes = 1;
+ metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
+ metadata.base_size.height = mode_cmd->height;
+ metadata.base_size.depth = 1;
+ metadata.scanout = true;
+
+ ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
if (ret) {
DRM_ERROR("Failed to allocate proxy content buffer\n");
return ret;
@@ -1897,87 +1890,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
-int vmw_kms_save_vga(struct vmw_private *vmw_priv)
-{
- struct vmw_vga_topology_state *save;
- uint32_t i;
-
- vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
- vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
- if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
- vmw_priv->vga_pitchlock =
- vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
- else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
-
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
- return 0;
-
- vmw_priv->num_displays = vmw_read(vmw_priv,
- SVGA_REG_NUM_GUEST_DISPLAYS);
-
- if (vmw_priv->num_displays == 0)
- vmw_priv->num_displays = 1;
-
- for (i = 0; i < vmw_priv->num_displays; ++i) {
- save = &vmw_priv->vga_save[i];
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
- save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
- save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
- save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
- save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
- save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- if (i == 0 && vmw_priv->num_displays == 1 &&
- save->width == 0 && save->height == 0) {
-
- /*
- * It should be fairly safe to assume that these
- * values are uninitialized.
- */
-
- save->width = vmw_priv->vga_width - save->pos_x;
- save->height = vmw_priv->vga_height - save->pos_y;
- }
- }
-
- return 0;
-}
-
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
-{
- struct vmw_vga_topology_state *save;
- uint32_t i;
-
- vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
- if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
- vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
- vmw_priv->vga_pitchlock);
- else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_mmio_write(vmw_priv->vga_pitchlock,
- vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
-
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
- return 0;
-
- for (i = 0; i < vmw_priv->num_displays; ++i) {
- save = &vmw_priv->vga_save[i];
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
-
- return 0;
-}
-
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
@@ -1991,7 +1903,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
/**
* Function called by DRM code called with vbl_lock held.
*/
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
{
return 0;
}
@@ -1999,7 +1911,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
/**
* Function called by DRM code called with vbl_lock held.
*/
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int vmw_enable_vblank(struct drm_crtc *crtc)
{
return -EINVAL;
}
@@ -2007,7 +1919,7 @@ int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
/**
* Function called by DRM code called with vbl_lock held.
*/
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void vmw_disable_vblank(struct drm_crtc *crtc)
{
}
@@ -2597,7 +2509,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
int increment)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+ struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBImage body;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5702219ec38f..16dafff5cab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -236,6 +236,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 0a6bbac00896..e8eb42933ca2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,7 +320,7 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
struct vmw_otable **otables = &dev_priv->otable_batch.otables;
int ret;
- if (dev_priv->has_dx) {
+ if (has_sm4_context(dev_priv)) {
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
if (!(*otables))
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index fdb52f6d29fb..cd7ed1650d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -354,37 +354,6 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
}
/**
- * Stop all streams.
- *
- * Used by the fb code when starting.
- *
- * Takes the overlay lock.
- */
-int vmw_overlay_stop_all(struct vmw_private *dev_priv)
-{
- struct vmw_overlay *overlay = dev_priv->overlay_priv;
- int i, ret;
-
- if (!overlay)
- return 0;
-
- mutex_lock(&overlay->mutex);
-
- for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
- struct vmw_stream *stream = &overlay->stream[i];
- if (!stream->buf)
- continue;
-
- ret = vmw_overlay_stop(dev_priv, i, false, false);
- WARN_ON(ret != 0);
- }
-
- mutex_unlock(&overlay->mutex);
-
- return 0;
-}
-
-/**
* Try to resume all paused streams.
*
* Used by the kms code after moving a new scanout buffer to vram.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index f07aa857587c..d4d66532f9c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -69,7 +69,7 @@ struct vmw_bo_dirty {
unsigned int ref_count;
unsigned long bitmap_size;
size_t size;
- unsigned long bitmap[0];
+ unsigned long bitmap[];
};
/**
@@ -473,11 +473,11 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
* a lot of unnecessary write faults.
*/
if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
- prot = vma->vm_page_prot;
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
else
prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -486,3 +486,75 @@ out_unlock:
return ret;
}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ pgprot_t prot;
+ vm_fault_t ret;
+ pgoff_t fault_page_size;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+ bool is_cow_mapping =
+ (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+
+ switch (pe_size) {
+ case PE_SIZE_PMD:
+ fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
+ break;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+ case PE_SIZE_PUD:
+ fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ return VM_FAULT_FALLBACK;
+ }
+
+ /* Always do write dirty-tracking and COW on PTE level. */
+ if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
+ return VM_FAULT_FALLBACK;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ if (vbo->dirty) {
+ pgoff_t allowed_prefault;
+ unsigned long page_offset;
+
+ page_offset = vmf->pgoff -
+ drm_vma_node_start(&bo->base.vma_node);
+ if (page_offset >= bo->num_pages ||
+ vmw_resources_clean(vbo, page_offset,
+ page_offset + PAGE_SIZE,
+ &allowed_prefault)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ /*
+ * Write protect, so we get a new fault on write, and can
+ * split.
+ */
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
+ } else {
+ prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index e5a283263211..32a22e4eddb1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -319,6 +319,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 63807361e16f..3f97b61dd5d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -319,7 +319,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
static const size_t vmw_view_define_sizes[] = {
[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
- [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+ [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView),
+ [vmw_view_ua] = sizeof(SVGA3dCmdDXDefineUAView)
};
struct vmw_private *dev_priv = ctx->dev_priv;
@@ -499,8 +500,8 @@ struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
* Each time a resource is put on the validation list as the result of a
* view pointing to it, we need to determine whether that resource will
* be dirtied (written to by the GPU) as a result of the corresponding
- * GPU operation. Currently only rendertarget- and depth-stencil views are
- * capable of dirtying its resource.
+ * GPU operation. Currently only rendertarget-, depth-stencil and unordered
+ * access views are capable of dirtying its resource.
*
* Return: Whether the view type of @res dirties the resource it points to.
*/
@@ -509,10 +510,11 @@ u32 vmw_view_dirtying(struct vmw_resource *res)
static u32 view_is_dirtying[vmw_view_max] = {
[vmw_view_rt] = VMW_RES_DIRTY_SET,
[vmw_view_ds] = VMW_RES_DIRTY_SET,
+ [vmw_view_ua] = VMW_RES_DIRTY_SET,
};
/* Update this function as we add more view types */
- BUILD_BUG_ON(vmw_view_max != 3);
+ BUILD_BUG_ON(vmw_view_max != 4);
return view_is_dirtying[vmw_view(res)->view_type];
}
@@ -520,12 +522,14 @@ const u32 vmw_view_destroy_cmds[] = {
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+ [vmw_view_ua] = SVGA_3D_CMD_DX_DESTROY_UA_VIEW,
};
const SVGACOTableType vmw_view_cotables[] = {
[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+ [vmw_view_ua] = SVGA_COTABLE_UAVIEW,
};
const SVGACOTableType vmw_so_cotables[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index 12565047bc55..f48b84bfeeac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -30,6 +30,7 @@ enum vmw_view_type {
vmw_view_sr,
vmw_view_rt,
vmw_view_ds,
+ vmw_view_ua,
vmw_view_max,
};
@@ -61,6 +62,7 @@ union vmw_view_destroy {
struct SVGA3dCmdDXDestroyRenderTargetView rtv;
struct SVGA3dCmdDXDestroyShaderResourceView srv;
struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+ struct SVGA3dCmdDXDestroyUAView uav;
u32 view_id;
};
@@ -87,6 +89,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
{
u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+ if (id == SVGA_3D_CMD_DX_DEFINE_UA_VIEW ||
+ id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW)
+ return vmw_view_ua;
+
if (tmp > (u32)vmw_view_max)
return vmw_view_max;
@@ -123,6 +129,7 @@ static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
return vmw_so_ss;
case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+ case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB:
case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
return vmw_so_so;
default:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 41a96fb49835..9ffa9c75a5da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -590,7 +590,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
return;
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
- dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base;
dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
@@ -916,6 +916,9 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
@@ -1038,7 +1041,6 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
enum stdu_content_type new_content_type;
struct vmw_framebuffer_surface *new_vfbs;
- struct drm_crtc *crtc = new_state->crtc;
uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h;
int ret;
@@ -1055,8 +1057,9 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vfb = vmw_framebuffer_to_vfb(new_fb);
new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
- if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
- new_vfbs->surface->base_size.height == vdisplay)
+ if (new_vfbs &&
+ new_vfbs->surface->metadata.base_size.width == hdisplay &&
+ new_vfbs->surface->metadata.base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY;
else if (vfb->bo)
new_content_type = SEPARATE_BO;
@@ -1064,12 +1067,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
new_content_type = SEPARATE_SURFACE;
if (new_content_type != SAME_AS_DISPLAY) {
- struct vmw_surface content_srf;
- struct drm_vmw_size display_base_size = {0};
+ struct vmw_surface_metadata metadata = {0};
- display_base_size.width = hdisplay;
- display_base_size.height = vdisplay;
- display_base_size.depth = 1;
+ metadata.base_size.width = hdisplay;
+ metadata.base_size.height = vdisplay;
+ metadata.base_size.depth = 1;
/*
* If content buffer is a buffer object, then we have to
@@ -1079,15 +1081,15 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
switch (new_fb->format->cpp[0]*8) {
case 32:
- content_srf.format = SVGA3D_X8R8G8B8;
+ metadata.format = SVGA3D_X8R8G8B8;
break;
case 16:
- content_srf.format = SVGA3D_R5G6B5;
+ metadata.format = SVGA3D_R5G6B5;
break;
case 8:
- content_srf.format = SVGA3D_P8;
+ metadata.format = SVGA3D_P8;
break;
default:
@@ -1095,22 +1097,20 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
return -EINVAL;
}
- content_srf.flags = 0;
- content_srf.mip_levels[0] = 1;
- content_srf.multisample_count = 0;
- content_srf.multisample_pattern =
- SVGA3D_MS_PATTERN_NONE;
- content_srf.quality_level = SVGA3D_MS_QUALITY_NONE;
+ metadata.mip_levels[0] = 1;
+ metadata.num_sizes = 1;
+ metadata.scanout = true;
} else {
- content_srf = *new_vfbs->surface;
+ metadata = new_vfbs->surface->metadata;
}
if (vps->surf) {
- struct drm_vmw_size cur_base_size = vps->surf->base_size;
+ struct drm_vmw_size cur_base_size =
+ vps->surf->metadata.base_size;
- if (cur_base_size.width != display_base_size.width ||
- cur_base_size.height != display_base_size.height ||
- vps->surf->format != content_srf.format) {
+ if (cur_base_size.width != metadata.base_size.width ||
+ cur_base_size.height != metadata.base_size.height ||
+ vps->surf->metadata.format != metadata.format) {
WARN_ON(vps->pinned != 0);
vmw_surface_unreference(&vps->surf);
}
@@ -1118,20 +1118,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
}
if (!vps->surf) {
- ret = vmw_surface_gb_priv_define
- (crtc->dev,
- /* Kernel visible only */
- 0,
- content_srf.flags,
- content_srf.format,
- true, /* a scanout buffer */
- content_srf.mip_levels[0],
- content_srf.multisample_count,
- 0,
- display_base_size,
- content_srf.multisample_pattern,
- content_srf.quality_level,
- &vps->surf);
+ ret = vmw_gb_surface_define(dev_priv, 0, &metadata,
+ &vps->surf);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
return ret;
@@ -1308,7 +1296,7 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
diff.cpp = stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base;
- dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
src_bo = &vfbbo->buffer->base;
@@ -1885,7 +1873,7 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
/* Do nothing if Screen Target support is turned off */
- if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+ if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
return -ENOSYS;
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
new file mode 100644
index 000000000000..193192456663
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
+
+/**
+ * struct vmw_dx_streamoutput - Streamoutput resource metadata.
+ * @res: Base resource struct.
+ * @ctx: Non-refcounted context to which @res belong.
+ * @cotable: Refcounted cotable holding this Streamoutput.
+ * @cotable_head: List head for cotable-so_res list.
+ * @id: User-space provided identifier.
+ * @size: User-space provided mob size.
+ * @committed: Whether streamoutput is actually created or pending creation.
+ */
+struct vmw_dx_streamoutput {
+ struct vmw_resource res;
+ struct vmw_resource *ctx;
+ struct vmw_resource *cotable;
+ struct list_head cotable_head;
+ u32 id;
+ u32 size;
+ bool committed;
+};
+
+static int vmw_dx_streamoutput_create(struct vmw_resource *res);
+static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
+ struct ttm_validate_buffer *val_buf);
+static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state);
+
+static size_t vmw_streamoutput_size;
+
+static const struct vmw_res_func vmw_dx_streamoutput_func = {
+ .res_type = vmw_res_streamoutput,
+ .needs_backup = true,
+ .may_evict = false,
+ .type_name = "DX streamoutput",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_streamoutput_create,
+ .destroy = NULL, /* Command buffer managed resource. */
+ .bind = vmw_dx_streamoutput_bind,
+ .unbind = vmw_dx_streamoutput_unbind,
+ .commit_notify = vmw_dx_streamoutput_commit_notify,
+};
+
+static inline struct vmw_dx_streamoutput *
+vmw_res_to_dx_streamoutput(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_dx_streamoutput, res);
+}
+
+/**
+ * vmw_dx_streamoutput_unscrub - Reattach the MOB to streamoutput.
+ * @res: The streamoutput resource.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
+{
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd;
+
+ if (!list_empty(&so->cotable_head) || !so->committed )
+ return 0;
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = so->id;
+ cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.offsetInBytes = res->backup_offset;
+ cmd->body.sizeInBytes = so->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ vmw_cotable_add_resource(so->cotable, &so->cotable_head);
+
+ return 0;
+}
+
+static int vmw_dx_streamoutput_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ int ret = 0;
+
+ WARN_ON_ONCE(!so->committed);
+
+ if (vmw_resource_mob_attached(res)) {
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+
+ res->id = so->id;
+
+ return ret;
+}
+
+static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ int ret;
+
+ if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
+ return -EINVAL;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_unscrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ return ret;
+}
+
+/**
+ * vmw_dx_streamoutput_scrub - Unbind the MOB from streamoutput.
+ * @res: The streamoutput resource.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindStreamOutput body;
+ } *cmd;
+
+ if (list_empty(&so->cotable_head))
+ return 0;
+
+ WARN_ON_ONCE(!so->committed);
+
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.soid = res->id;
+ cmd->body.mobid = SVGA3D_INVALID_ID;
+ cmd->body.offsetInBytes = 0;
+ cmd->body.sizeInBytes = so->size;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+ res->id = -1;
+ list_del_init(&so->cotable_head);
+
+ return 0;
+}
+
+static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_fence_obj *fence;
+ int ret;
+
+ if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
+ return -EINVAL;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ ret = vmw_dx_streamoutput_scrub(res);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ if (ret)
+ return ret;
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ vmw_bo_fence_single(val_buf->bo, fence);
+
+ if (fence != NULL)
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
+ enum vmw_cmdbuf_res_state state)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ if (state == VMW_CMDBUF_RES_ADD) {
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_cotable_add_resource(so->cotable, &so->cotable_head);
+ so->committed = true;
+ res->id = so->id;
+ mutex_unlock(&dev_priv->binding_mutex);
+ } else {
+ mutex_lock(&dev_priv->binding_mutex);
+ list_del_init(&so->cotable_head);
+ so->committed = false;
+ res->id = -1;
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+}
+
+/**
+ * vmw_dx_streamoutput_lookup - Do a streamoutput resource lookup by user key.
+ * @man: Command buffer managed resource manager for current context.
+ * @user_key: User-space identifier for lookup.
+ *
+ * Return: Valid refcounted vmw_resource on success, error pointer on failure.
+ */
+struct vmw_resource *
+vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key)
+{
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_streamoutput,
+ user_key);
+}
+
+static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ vmw_resource_unreference(&so->cotable);
+ kfree(so);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
+}
+
+static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
+{
+ /* Destroyed by user-space cmd buf or as part of context takedown. */
+ res->id = -1;
+}
+
+/**
+ * vmw_dx_streamoutput_add - Add a streamoutput as a cmd buf managed resource.
+ * @man: Command buffer managed resource manager for current context.
+ * @ctx: Pointer to context resource.
+ * @user_key: The identifier for this streamoutput.
+ * @list: The list of staged command buffer managed resources.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_resource *ctx, u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_dx_streamoutput *so;
+ struct vmw_resource *res;
+ struct vmw_private *dev_priv = ctx->dev_priv;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ if (!vmw_streamoutput_size)
+ vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_streamoutput_size, &ttm_opt_ctx);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for streamout.\n");
+ return ret;
+ }
+
+ so = kmalloc(sizeof(*so), GFP_KERNEL);
+ if (!so) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_streamoutput_size);
+ return -ENOMEM;
+ }
+
+ res = &so->res;
+ so->ctx = ctx;
+ so->cotable = vmw_resource_reference
+ (vmw_context_cotable(ctx, SVGA_COTABLE_STREAMOUTPUT));
+ so->id = user_key;
+ so->committed = false;
+ INIT_LIST_HEAD(&so->cotable_head);
+ ret = vmw_resource_init(dev_priv, res, true,
+ vmw_dx_streamoutput_res_free,
+ &vmw_dx_streamoutput_func);
+ if (ret)
+ goto out_resource_init;
+
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_streamoutput, user_key,
+ res, list);
+ if (ret)
+ goto out_resource_init;
+
+ res->id = so->id;
+ res->hw_destroy = vmw_dx_streamoutput_hw_destroy;
+
+out_resource_init:
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+/**
+ * vmw_dx_streamoutput_set_size - Sets streamoutput mob size in res struct.
+ * @res: The streamoutput res for which need to set size.
+ * @size: The size provided by user-space to set.
+ */
+void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size)
+{
+ struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+ so->size = size;
+}
+
+/**
+ * vmw_dx_streamoutput_remove - Stage streamoutput for removal.
+ * @man: Command buffer managed resource manager for current context.
+ * @user_key: The identifier for this streamoutput.
+ * @list: The list of staged command buffer managed resources.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_resource *r;
+
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_streamoutput,
+ (u32)user_key, list, &r);
+}
+
+/**
+ * vmw_dx_streamoutput_cotable_list_scrub - cotable unbind_func callback.
+ * @dev_priv: Device private.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ */
+void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
+ struct list_head *list,
+ bool readback)
+{
+ struct vmw_dx_streamoutput *entry, *next;
+
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
+
+ list_for_each_entry_safe(entry, next, list, cotable_head) {
+ WARN_ON(vmw_dx_streamoutput_scrub(&entry->res));
+ if (!readback)
+ entry->committed =false;
+ }
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3ce630aa4fde..7ef51fa84b01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -79,7 +79,7 @@ struct vmw_surface_dirty {
struct svga3dsurface_cache cache;
size_t size;
u32 num_subres;
- SVGA3dBox boxes[0];
+ SVGA3dBox boxes[];
};
static void vmw_user_surface_free(struct vmw_resource *res);
@@ -199,7 +199,7 @@ struct vmw_surface_destroy {
*/
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
{
- return srf->num_sizes * sizeof(struct vmw_surface_dma);
+ return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
}
@@ -213,7 +213,7 @@ static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
*/
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
{
- return sizeof(struct vmw_surface_define) + srf->num_sizes *
+ return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
sizeof(SVGA3dSize);
}
@@ -262,7 +262,8 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
uint32_t cmd_len;
int i;
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+ cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
+ sizeof(SVGA3dSize);
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
cmd->header.size = cmd_len;
@@ -272,16 +273,16 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
* since driver internally stores as 64 bit.
* For legacy surface define only 32 bit flag is supported.
*/
- cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
- cmd->body.format = srf->format;
+ cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
+ cmd->body.format = srf->metadata.format;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+ cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
cmd += 1;
cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
+ src_size = srf->metadata.sizes;
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
cmd_size->width = src_size->width;
cmd_size->height = src_size->height;
cmd_size->depth = src_size->depth;
@@ -305,15 +306,15 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf,
uint32_t i;
struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
const struct svga3d_surface_desc *desc =
- svga3dsurface_get_desc(srf->format);
+ svga3dsurface_get_desc(srf->metadata.format);
- for (i = 0; i < srf->num_sizes; ++i) {
+ for (i = 0; i < srf->metadata.num_sizes; ++i) {
SVGA3dCmdHeader *header = &cmd->header;
SVGA3dCmdSurfaceDMA *body = &cmd->body;
SVGA3dCopyBox *cb = &cmd->cb;
SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
- const struct drm_vmw_size *cur_size = &srf->sizes[i];
+ const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
header->id = SVGA_3D_CMD_SURFACE_DMA;
header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
@@ -669,7 +670,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
- kfree(srf->sizes);
+ kfree(srf->metadata.sizes);
kfree(srf->snooper.image);
ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
@@ -728,6 +729,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
+ struct vmw_surface_metadata *metadata;
struct vmw_resource *res;
struct vmw_resource *tmp;
union drm_vmw_surface_create_arg *arg =
@@ -793,43 +795,45 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf = &user_srf->srf;
+ metadata = &srf->metadata;
res = &srf->res;
/* Driver internally stores as 64-bit flags */
- srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
- srf->format = req->format;
- srf->scanout = req->scanout;
+ metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
+ metadata->format = req->format;
+ metadata->scanout = req->scanout;
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = num_sizes;
+ memcpy(metadata->mip_levels, req->mip_levels,
+ sizeof(metadata->mip_levels));
+ metadata->num_sizes = num_sizes;
user_srf->size = size;
- srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
- req->size_addr,
- sizeof(*srf->sizes) * srf->num_sizes);
- if (IS_ERR(srf->sizes)) {
- ret = PTR_ERR(srf->sizes);
+ metadata->sizes =
+ memdup_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+ sizeof(*metadata->sizes) * metadata->num_sizes);
+ if (IS_ERR(metadata->sizes)) {
+ ret = PTR_ERR(metadata->sizes);
goto out_no_sizes;
}
- srf->offsets = kmalloc_array(srf->num_sizes,
- sizeof(*srf->offsets),
+ srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
GFP_KERNEL);
if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
}
- srf->base_size = *srf->sizes;
- srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->multisample_count = 0;
- srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
- srf->quality_level = SVGA3D_MS_QUALITY_NONE;
+ metadata->base_size = *srf->metadata.sizes;
+ metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ metadata->multisample_count = 0;
+ metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
cur_bo_offset = 0;
cur_offset = srf->offsets;
- cur_size = srf->sizes;
+ cur_size = metadata->sizes;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- for (j = 0; j < srf->mip_levels[i]; ++j) {
+ for (j = 0; j < metadata->mip_levels[i]; ++j) {
uint32_t stride = svga3dsurface_calculate_pitch
(desc, cur_size);
@@ -843,11 +847,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->backup_size = cur_bo_offset;
- if (srf->scanout &&
- srf->num_sizes == 1 &&
- srf->sizes[0].width == 64 &&
- srf->sizes[0].height == 64 &&
- srf->format == SVGA3D_A8R8G8B8) {
+ if (metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == 64 &&
+ metadata->sizes[0].height == 64 &&
+ metadata->format == SVGA3D_A8R8G8B8) {
srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
if (!srf->snooper.image) {
@@ -911,7 +915,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
out_no_copy:
kfree(srf->offsets);
out_no_offsets:
- kfree(srf->sizes);
+ kfree(metadata->sizes);
out_no_sizes:
ttm_prime_object_kfree(user_srf, prime);
out_no_user_srf:
@@ -1031,18 +1035,19 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
srf = &user_srf->srf;
/* Downcast of flags when sending back to user space */
- rep->flags = (uint32_t)srf->flags;
- rep->format = srf->format;
- memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ rep->flags = (uint32_t)srf->metadata.flags;
+ rep->format = srf->metadata.format;
+ memcpy(rep->mip_levels, srf->metadata.mip_levels,
+ sizeof(srf->metadata.mip_levels));
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
rep->size_addr;
if (user_sizes)
- ret = copy_to_user(user_sizes, &srf->base_size,
- sizeof(srf->base_size));
+ ret = copy_to_user(user_sizes, &srf->metadata.base_size,
+ sizeof(srf->metadata.base_size));
if (unlikely(ret != 0)) {
VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
- srf->num_sizes);
+ srf->metadata.num_sizes);
ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
@@ -1062,6 +1067,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_surface_metadata *metadata = &srf->metadata;
uint32_t cmd_len, cmd_id, submit_len;
int ret;
struct {
@@ -1076,6 +1082,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBSurface_v3 body;
} *cmd3;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineGBSurface_v4 body;
+ } *cmd4;
if (likely(res->id != -1))
return 0;
@@ -1092,12 +1102,16 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
goto out_no_fifo;
}
- if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
+ cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
+ cmd_len = sizeof(cmd4->body);
+ submit_len = sizeof(*cmd4);
+ } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
cmd_len = sizeof(cmd3->body);
submit_len = sizeof(*cmd3);
- } else if (srf->array_size > 0) {
- /* has_dx checked on creation time. */
+ } else if (metadata->array_size > 0) {
+ /* VMW_SM_4 support verified at creation time. */
cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
cmd_len = sizeof(cmd2->body);
submit_len = sizeof(*cmd2);
@@ -1110,51 +1124,68 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
+ cmd4 = (typeof(cmd4))cmd;
if (unlikely(!cmd)) {
ret = -ENOMEM;
goto out_no_fifo;
}
- if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+ if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
+ cmd4->header.id = cmd_id;
+ cmd4->header.size = cmd_len;
+ cmd4->body.sid = srf->res.id;
+ cmd4->body.surfaceFlags = metadata->flags;
+ cmd4->body.format = metadata->format;
+ cmd4->body.numMipLevels = metadata->mip_levels[0];
+ cmd4->body.multisampleCount = metadata->multisample_count;
+ cmd4->body.multisamplePattern = metadata->multisample_pattern;
+ cmd4->body.qualityLevel = metadata->quality_level;
+ cmd4->body.autogenFilter = metadata->autogen_filter;
+ cmd4->body.size.width = metadata->base_size.width;
+ cmd4->body.size.height = metadata->base_size.height;
+ cmd4->body.size.depth = metadata->base_size.depth;
+ cmd4->body.arraySize = metadata->array_size;
+ cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
+ } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
cmd3->header.id = cmd_id;
cmd3->header.size = cmd_len;
cmd3->body.sid = srf->res.id;
- cmd3->body.surfaceFlags = srf->flags;
- cmd3->body.format = srf->format;
- cmd3->body.numMipLevels = srf->mip_levels[0];
- cmd3->body.multisampleCount = srf->multisample_count;
- cmd3->body.multisamplePattern = srf->multisample_pattern;
- cmd3->body.qualityLevel = srf->quality_level;
- cmd3->body.autogenFilter = srf->autogen_filter;
- cmd3->body.size.width = srf->base_size.width;
- cmd3->body.size.height = srf->base_size.height;
- cmd3->body.size.depth = srf->base_size.depth;
- cmd3->body.arraySize = srf->array_size;
- } else if (srf->array_size > 0) {
+ cmd3->body.surfaceFlags = metadata->flags;
+ cmd3->body.format = metadata->format;
+ cmd3->body.numMipLevels = metadata->mip_levels[0];
+ cmd3->body.multisampleCount = metadata->multisample_count;
+ cmd3->body.multisamplePattern = metadata->multisample_pattern;
+ cmd3->body.qualityLevel = metadata->quality_level;
+ cmd3->body.autogenFilter = metadata->autogen_filter;
+ cmd3->body.size.width = metadata->base_size.width;
+ cmd3->body.size.height = metadata->base_size.height;
+ cmd3->body.size.depth = metadata->base_size.depth;
+ cmd3->body.arraySize = metadata->array_size;
+ } else if (metadata->array_size > 0) {
cmd2->header.id = cmd_id;
cmd2->header.size = cmd_len;
cmd2->body.sid = srf->res.id;
- cmd2->body.surfaceFlags = srf->flags;
- cmd2->body.format = srf->format;
- cmd2->body.numMipLevels = srf->mip_levels[0];
- cmd2->body.multisampleCount = srf->multisample_count;
- cmd2->body.autogenFilter = srf->autogen_filter;
- cmd2->body.size.width = srf->base_size.width;
- cmd2->body.size.height = srf->base_size.height;
- cmd2->body.size.depth = srf->base_size.depth;
- cmd2->body.arraySize = srf->array_size;
+ cmd2->body.surfaceFlags = metadata->flags;
+ cmd2->body.format = metadata->format;
+ cmd2->body.numMipLevels = metadata->mip_levels[0];
+ cmd2->body.multisampleCount = metadata->multisample_count;
+ cmd2->body.autogenFilter = metadata->autogen_filter;
+ cmd2->body.size.width = metadata->base_size.width;
+ cmd2->body.size.height = metadata->base_size.height;
+ cmd2->body.size.depth = metadata->base_size.depth;
+ cmd2->body.arraySize = metadata->array_size;
} else {
cmd->header.id = cmd_id;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = srf->format;
- cmd->body.numMipLevels = srf->mip_levels[0];
- cmd->body.multisampleCount = srf->multisample_count;
- cmd->body.autogenFilter = srf->autogen_filter;
- cmd->body.size.width = srf->base_size.width;
- cmd->body.size.height = srf->base_size.height;
- cmd->body.size.depth = srf->base_size.depth;
+ cmd->body.surfaceFlags = metadata->flags;
+ cmd->body.format = metadata->format;
+ cmd->body.numMipLevels = metadata->mip_levels[0];
+ cmd->body.multisampleCount = metadata->multisample_count;
+ cmd->body.autogenFilter = metadata->autogen_filter;
+ cmd->body.size.width = metadata->base_size.width;
+ cmd->body.size.height = metadata->base_size.height;
+ cmd->body.size.depth = metadata->base_size.depth;
}
vmw_fifo_commit(dev_priv, submit_len);
@@ -1314,7 +1345,6 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0;
}
-
/**
* vmw_gb_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
@@ -1336,6 +1366,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
req_ext.svga3d_flags_upper_32_bits = 0;
req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
+ req_ext.buffer_byte_stride = 0;
req_ext.must_be_zero = 0;
return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
@@ -1371,171 +1402,6 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_surface_gb_priv_define - Define a private GB surface
- *
- * @dev: Pointer to a struct drm_device
- * @user_accounting_size: Used to track user-space memory usage, set
- * to 0 for kernel mode only memory
- * @svga3d_flags: SVGA3d surface flags for the device
- * @format: requested surface format
- * @for_scanout: true if inteded to be used for scanout buffer
- * @num_mip_levels: number of MIP levels
- * @multisample_count:
- * @array_size: Surface array size.
- * @size: width, heigh, depth of the surface requested
- * @multisample_pattern: Multisampling pattern when msaa is supported
- * @quality_level: Precision settings
- * @user_srf_out: allocated user_srf. Set to NULL on failure.
- *
- * GB surfaces allocated by this function will not have a user mode handle, and
- * thus will only be visible to vmwgfx. For optimization reasons the
- * surface may later be given a user mode handle by another function to make
- * it available to user mode drivers.
- */
-int vmw_surface_gb_priv_define(struct drm_device *dev,
- uint32_t user_accounting_size,
- SVGA3dSurfaceAllFlags svga3d_flags,
- SVGA3dSurfaceFormat format,
- bool for_scanout,
- uint32_t num_mip_levels,
- uint32_t multisample_count,
- uint32_t array_size,
- struct drm_vmw_size size,
- SVGA3dMSPattern multisample_pattern,
- SVGA3dMSQualityLevel quality_level,
- struct vmw_surface **srf_out)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
- struct vmw_surface *srf;
- int ret;
- u32 num_layers = 1;
- u32 sample_count = 1;
-
- *srf_out = NULL;
-
- if (for_scanout) {
- if (!svga3dsurface_is_screen_target_format(format)) {
- VMW_DEBUG_USER("Invalid Screen Target surface format.");
- return -EINVAL;
- }
-
- if (size.width > dev_priv->texture_max_width ||
- size.height > dev_priv->texture_max_height) {
- VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
- size.width, size.height,
- dev_priv->texture_max_width,
- dev_priv->texture_max_height);
- return -EINVAL;
- }
- } else {
- const struct svga3d_surface_desc *desc;
-
- desc = svga3dsurface_get_desc(format);
- if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- VMW_DEBUG_USER("Invalid surface format.\n");
- return -EINVAL;
- }
- }
-
- /* array_size must be null for non-GL3 host. */
- if (array_size > 0 && !dev_priv->has_dx) {
- VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
- return -EINVAL;
- }
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- user_accounting_size, &ctx);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
-
- user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(!user_srf)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
-
- *srf_out = &user_srf->srf;
- user_srf->size = user_accounting_size;
- user_srf->prime.base.shareable = false;
- user_srf->prime.base.tfile = NULL;
-
- srf = &user_srf->srf;
- srf->flags = svga3d_flags;
- srf->format = format;
- srf->scanout = for_scanout;
- srf->mip_levels[0] = num_mip_levels;
- srf->num_sizes = 1;
- srf->sizes = NULL;
- srf->offsets = NULL;
- srf->base_size = size;
- srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
- srf->array_size = array_size;
- srf->multisample_count = multisample_count;
- srf->multisample_pattern = multisample_pattern;
- srf->quality_level = quality_level;
-
- if (array_size)
- num_layers = array_size;
- else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
- num_layers = SVGA3D_MAX_SURFACE_FACES;
-
- if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
- sample_count = srf->multisample_count;
-
- srf->res.backup_size =
- svga3dsurface_get_serialized_size_extended(srf->format,
- srf->base_size,
- srf->mip_levels[0],
- num_layers,
- sample_count);
-
- if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
- srf->res.backup_size += sizeof(SVGA3dDXSOState);
-
- /*
- * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
- * size greater than STDU max width/height. This is really a workaround
- * to support creation of big framebuffer requested by some user-space
- * for whole topology. That big framebuffer won't really be used for
- * binding with screen target as during prepare_fb a separate surface is
- * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
- */
- if (dev_priv->active_display_unit == vmw_du_screen_target &&
- for_scanout && size.width <= dev_priv->stdu_max_width &&
- size.height <= dev_priv->stdu_max_height)
- srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
-
- /*
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
-
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
- return ret;
-}
-
-/**
* vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
* the user surface define functionality.
*
@@ -1588,43 +1454,60 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
struct drm_vmw_gb_surface_create_rep *rep,
struct drm_file *file_priv)
{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
+ struct vmw_surface_metadata metadata = {0};
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret;
+ int ret = 0;
uint32_t size;
uint32_t backup_handle = 0;
SVGA3dSurfaceAllFlags svga3d_flags_64 =
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
req->base.svga3d_flags);
- if (!dev_priv->has_sm4_1) {
- /*
- * If SM4_1 is not support then cannot send 64-bit flag to
- * device.
- */
+ /* array_size must be null for non-GL3 host. */
+ if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
+ VMW_DEBUG_USER("SM4 surface not supported.\n");
+ return -EINVAL;
+ }
+
+ if (!has_sm4_1_context(dev_priv)) {
if (req->svga3d_flags_upper_32_bits != 0)
- return -EINVAL;
+ ret = -EINVAL;
if (req->base.multisample_count != 0)
- return -EINVAL;
+ ret = -EINVAL;
if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
- return -EINVAL;
+ ret = -EINVAL;
if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
- return -EINVAL;
+ ret = -EINVAL;
+
+ if (ret) {
+ VMW_DEBUG_USER("SM4.1 surface not supported.\n");
+ return ret;
+ }
+ }
+
+ if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
+ VMW_DEBUG_USER("SM5 surface not supported.\n");
+ return -EINVAL;
}
if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
- req->base.multisample_count == 0)
+ req->base.multisample_count == 0) {
+ VMW_DEBUG_USER("Invalid sample count.\n");
return -EINVAL;
+ }
- if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
+ VMW_DEBUG_USER("Invalid mip level.\n");
return -EINVAL;
+ }
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
@@ -1632,22 +1515,25 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
size = vmw_user_surface_size;
+ metadata.flags = svga3d_flags_64;
+ metadata.format = req->base.format;
+ metadata.mip_levels[0] = req->base.mip_levels;
+ metadata.multisample_count = req->base.multisample_count;
+ metadata.multisample_pattern = req->multisample_pattern;
+ metadata.quality_level = req->quality_level;
+ metadata.array_size = req->base.array_size;
+ metadata.buffer_byte_stride = req->buffer_byte_stride;
+ metadata.num_sizes = 1;
+ metadata.base_size = req->base.base_size;
+ metadata.scanout = req->base.drm_surface_flags &
+ drm_vmw_surface_flag_scanout;
+
/* Define a surface based on the parameters. */
- ret = vmw_surface_gb_priv_define(dev,
- size,
- svga3d_flags_64,
- req->base.format,
- req->base.drm_surface_flags &
- drm_vmw_surface_flag_scanout,
- req->base.mip_levels,
- req->base.multisample_count,
- req->base.array_size,
- req->base.base_size,
- req->multisample_pattern,
- req->quality_level,
- &srf);
- if (unlikely(ret != 0))
+ ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf);
+ if (ret != 0) {
+ VMW_DEBUG_USER("Failed to define surface.\n");
return ret;
+ }
user_srf = container_of(srf, struct vmw_user_surface, srf);
if (drm_is_primary_client(file_priv))
@@ -1762,6 +1648,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
+ struct vmw_surface_metadata *metadata;
struct ttm_base_object *base;
uint32_t backup_handle;
int ret = -EINVAL;
@@ -1777,6 +1664,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
+ metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
@@ -1790,15 +1678,15 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
goto out_bad_resource;
}
- rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
- rep->creq.base.format = srf->format;
- rep->creq.base.mip_levels = srf->mip_levels[0];
+ rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
+ rep->creq.base.format = metadata->format;
+ rep->creq.base.mip_levels = metadata->mip_levels[0];
rep->creq.base.drm_surface_flags = 0;
- rep->creq.base.multisample_count = srf->multisample_count;
- rep->creq.base.autogen_filter = srf->autogen_filter;
- rep->creq.base.array_size = srf->array_size;
+ rep->creq.base.multisample_count = metadata->multisample_count;
+ rep->creq.base.autogen_filter = metadata->autogen_filter;
+ rep->creq.base.array_size = metadata->array_size;
rep->creq.base.buffer_handle = backup_handle;
- rep->creq.base.base_size = srf->base_size;
+ rep->creq.base.base_size = metadata->base_size;
rep->crep.handle = user_srf->prime.base.handle;
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
@@ -1808,9 +1696,9 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
- SVGA3D_FLAGS_UPPER_32(srf->flags);
- rep->creq.multisample_pattern = srf->multisample_pattern;
- rep->creq.quality_level = srf->quality_level;
+ SVGA3D_FLAGS_UPPER_32(metadata->flags);
+ rep->creq.multisample_pattern = metadata->multisample_pattern;
+ rep->creq.quality_level = metadata->quality_level;
rep->creq.must_be_zero = 0;
out_bad_resource:
@@ -1968,7 +1856,7 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
start >= res->backup_offset + res->backup_size))
return;
- if (srf->format == SVGA3D_BUFFER)
+ if (srf->metadata.format == SVGA3D_BUFFER)
vmw_surface_buf_dirty_range_add(res, start, end);
else
vmw_surface_tex_dirty_range_add(res, start, end);
@@ -2058,6 +1946,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
static int vmw_surface_dirty_alloc(struct vmw_resource *res)
{
struct vmw_surface *srf = vmw_res_to_srf(res);
+ const struct vmw_surface_metadata *metadata = &srf->metadata;
struct vmw_surface_dirty *dirty;
u32 num_layers = 1;
u32 num_mip;
@@ -2070,12 +1959,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
};
int ret;
- if (srf->array_size)
- num_layers = srf->array_size;
- else if (srf->flags & SVGA3D_SURFACE_CUBEMAP)
+ if (metadata->array_size)
+ num_layers = metadata->array_size;
+ else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
num_layers *= SVGA3D_MAX_SURFACE_FACES;
- num_mip = srf->mip_levels[0];
+ num_mip = metadata->mip_levels[0];
if (!num_mip)
num_mip = 1;
@@ -2096,9 +1985,10 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
goto out_no_dirty;
}
- num_samples = max_t(u32, 1, srf->multisample_count);
- ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip,
- num_layers, num_samples, &dirty->cache);
+ num_samples = max_t(u32, 1, metadata->multisample_count);
+ ret = svga3dsurface_setup_cache(&metadata->base_size, metadata->format,
+ num_mip, num_layers, num_samples,
+ &dirty->cache);
if (ret)
goto out_no_cache;
@@ -2153,3 +2043,147 @@ static int vmw_surface_clean(struct vmw_resource *res)
return 0;
}
+
+/*
+ * vmw_gb_surface_define - Define a private GB surface
+ *
+ * @dev_priv: Pointer to a device private.
+ * @user_accounting_size: Used to track user-space memory usage, set
+ * to 0 for kernel mode only memory
+ * @metadata: Metadata representing the surface to create.
+ * @user_srf_out: allocated user_srf. Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx. For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_gb_surface_define(struct vmw_private *dev_priv,
+ uint32_t user_accounting_size,
+ const struct vmw_surface_metadata *req,
+ struct vmw_surface **srf_out)
+{
+ struct vmw_surface_metadata *metadata;
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
+ u32 sample_count = 1;
+ u32 num_layers = 1;
+ int ret;
+
+ *srf_out = NULL;
+
+ if (req->scanout) {
+ if (!svga3dsurface_is_screen_target_format(req->format)) {
+ VMW_DEBUG_USER("Invalid Screen Target surface format.");
+ return -EINVAL;
+ }
+
+ if (req->base_size.width > dev_priv->texture_max_width ||
+ req->base_size.height > dev_priv->texture_max_height) {
+ VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
+ req->base_size.width,
+ req->base_size.height,
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ return -EINVAL;
+ }
+ } else {
+ const struct svga3d_surface_desc *desc =
+ svga3dsurface_get_desc(req->format);
+
+ if (desc->block_desc == SVGA3DBLOCKDESC_NONE) {
+ VMW_DEBUG_USER("Invalid surface format.\n");
+ return -EINVAL;
+ }
+ }
+
+ if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
+ return -EINVAL;
+
+ if (req->num_sizes != 1)
+ return -EINVAL;
+
+ if (req->sizes != NULL)
+ return -EINVAL;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ user_accounting_size, &ctx);
+ if (ret != 0) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(!user_srf)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ *srf_out = &user_srf->srf;
+ user_srf->size = user_accounting_size;
+ user_srf->prime.base.shareable = false;
+ user_srf->prime.base.tfile = NULL;
+
+ srf = &user_srf->srf;
+ srf->metadata = *req;
+ srf->offsets = NULL;
+
+ metadata = &srf->metadata;
+
+ if (metadata->array_size)
+ num_layers = req->array_size;
+ else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
+ num_layers = SVGA3D_MAX_SURFACE_FACES;
+
+ if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
+ sample_count = metadata->multisample_count;
+
+ srf->res.backup_size =
+ svga3dsurface_get_serialized_size_extended(metadata->format,
+ metadata->base_size,
+ metadata->mip_levels[0],
+ num_layers,
+ sample_count);
+
+ if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+ srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+ /*
+ * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+ * size greater than STDU max width/height. This is really a workaround
+ * to support creation of big framebuffer requested by some user-space
+ * for whole topology. That big framebuffer won't really be used for
+ * binding with screen target as during prepare_fb a separate surface is
+ * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+ */
+ if (dev_priv->active_display_unit == vmw_du_screen_target &&
+ metadata->scanout &&
+ metadata->base_size.width <= dev_priv->stdu_max_width &&
+ metadata->base_size.height <= dev_priv->stdu_max_height)
+ metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+ /*
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
new file mode 100644
index 000000000000..b7c816ba7166
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Huge page-table-entry support for IO memory.
+ *
+ * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
+ */
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+
+/**
+ * struct vmw_thp_manager - Range manager implementing huge page alignment
+ *
+ * @mm: The underlying range manager. Protected by @lock.
+ * @lock: Manager lock.
+ */
+struct vmw_thp_manager {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
+static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long align_pages,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem,
+ unsigned long lpfn,
+ enum drm_mm_insert_mode mode)
+{
+ if (align_pages >= mem->page_alignment &&
+ (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
+ return drm_mm_insert_node_in_range(mm, node,
+ mem->num_pages,
+ align_pages, 0,
+ place->fpfn, lpfn, mode);
+ }
+
+ return -ENOSPC;
+}
+
+static int vmw_thp_get_node(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+ struct drm_mm_node *node;
+ unsigned long align_pages;
+ unsigned long lpfn;
+ enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ lpfn = place->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+
+ mode = DRM_MM_INSERT_BEST;
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mode = DRM_MM_INSERT_HIGH;
+
+ spin_lock(&rman->lock);
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
+ align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
+ if (mem->num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(mm, node, align_pages,
+ place, mem, lpfn, mode);
+ if (!ret)
+ goto found_unlock;
+ }
+ }
+
+ align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
+ if (mem->num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
+ lpfn, mode);
+ if (!ret)
+ goto found_unlock;
+ }
+
+ ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ mem->page_alignment, 0,
+ place->fpfn, lpfn, mode);
+found_unlock:
+ spin_unlock(&rman->lock);
+
+ if (unlikely(ret)) {
+ kfree(node);
+ } else {
+ mem->mm_node = node;
+ mem->start = node->start;
+ }
+
+ return 0;
+}
+
+
+
+static void vmw_thp_put_node(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+
+ if (mem->mm_node) {
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(mem->mm_node);
+ spin_unlock(&rman->lock);
+
+ kfree(mem->mm_node);
+ mem->mm_node = NULL;
+ }
+}
+
+static int vmw_thp_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct vmw_thp_manager *rman;
+
+ rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+ if (!rman)
+ return -ENOMEM;
+
+ drm_mm_init(&rman->mm, 0, p_size);
+ spin_lock_init(&rman->lock);
+ man->priv = rman;
+ return 0;
+}
+
+static int vmw_thp_takedown(struct ttm_mem_type_manager *man)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
+
+ spin_lock(&rman->lock);
+ if (drm_mm_clean(mm)) {
+ drm_mm_takedown(mm);
+ spin_unlock(&rman->lock);
+ kfree(rman);
+ man->priv = NULL;
+ return 0;
+ }
+ spin_unlock(&rman->lock);
+ return -EBUSY;
+}
+
+static void vmw_thp_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
+{
+ struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+
+ spin_lock(&rman->lock);
+ drm_mm_print(&rman->mm, printer);
+ spin_unlock(&rman->lock);
+}
+
+const struct ttm_mem_type_manager_func vmw_thp_func = {
+ .init = vmw_thp_init,
+ .takedown = vmw_thp_takedown,
+ .get_node = vmw_thp_get_node,
+ .put_node = vmw_thp_put_node,
+ .debug = vmw_thp_debug
+};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index d8ea3dd10af0..bf0bc4697959 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -736,11 +736,6 @@ out_no_init:
return NULL;
}
-static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -754,7 +749,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
+ man->func = &vmw_thp_func;
man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_CACHED;
@@ -866,7 +861,6 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
- .invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index aa7e50f63b94..3c03b1746661 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -34,7 +34,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
.page_mkwrite = vmw_bo_vm_mkwrite,
.fault = vmw_bo_vm_fault,
.open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close
+ .close = ttm_bo_vm_close,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ .huge_fault = vmw_bo_vm_huge_fault,
+#endif
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 4f34c5208180..78096bbcd226 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -220,6 +220,24 @@ static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
return false;
}
+static int display_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ /*
+ * Xen doesn't initialize vblanking via drm_vblank_init(), so
+ * DRM helpers assume that it doesn't handle vblanking and start
+ * sending out fake VBLANK events automatically.
+ *
+ * As xen contains it's own logic for sending out VBLANK events
+ * in send_pending_event(), disable no_vblank (i.e., the xen
+ * driver has vblanking support).
+ */
+ crtc_state->no_vblank = false;
+
+ return 0;
+}
+
static void display_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
{
@@ -284,6 +302,7 @@ static const struct drm_simple_display_pipe_funcs display_funcs = {
.enable = display_enable,
.disable = display_disable,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .check = display_check,
.update = display_update,
};
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 086c50fac689..c8f7b21fa09e 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -54,7 +54,7 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane,
int min_scale = FRAC_16_16(1, 8);
int max_scale = FRAC_16_16(8, 1);
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
@@ -281,7 +281,7 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state;
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
diff --git a/drivers/gpu/trace/Kconfig b/drivers/gpu/trace/Kconfig
new file mode 100644
index 000000000000..c24e9edd022e
--- /dev/null
+++ b/drivers/gpu/trace/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config TRACE_GPU_MEM
+ bool
diff --git a/drivers/gpu/trace/Makefile b/drivers/gpu/trace/Makefile
new file mode 100644
index 000000000000..b70fbdc5847f
--- /dev/null
+++ b/drivers/gpu/trace/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_TRACE_GPU_MEM) += trace_gpu_mem.o
diff --git a/drivers/gpu/trace/trace_gpu_mem.c b/drivers/gpu/trace/trace_gpu_mem.c
new file mode 100644
index 000000000000..01e855897b6d
--- /dev/null
+++ b/drivers/gpu/trace/trace_gpu_mem.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPU memory trace points
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu_mem.h>
+
+EXPORT_TRACEPOINT_SYMBOL(gpu_mem_total);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 494a39e74939..7c89edbd6c5a 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -362,6 +362,13 @@ config HID_GFRM
---help---
Support for Google Fiber TV Box remote controls
+config HID_GLORIOUS
+ tristate "Glorious PC Gaming Race mice"
+ depends on HID
+ help
+ Support for Glorious PC Gaming Race mice such as
+ the Glorious Model O, O- and D.
+
config HID_HOLTEK
tristate "Holtek HID devices"
depends on USB_HID
@@ -1039,7 +1046,7 @@ config HID_U2FZERO
U2F Zero only supports blinking its LED, so this driver doesn't
allow setting the brightness to anything but 1, which will
- trigger a single blink and immediately reset to back 0.
+ trigger a single blink and immediately reset back to 0.
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
@@ -1145,6 +1152,16 @@ config HID_ALPS
Say Y here if you have a Alps touchpads over i2c-hid or usbhid
and want support for its special functionalities.
+config HID_MCP2221
+ tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support"
+ depends on USB_HID && I2C
+ ---help---
+ Provides I2C and SMBUS host adapter functionality over USB-HID
+ through MCP2221 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-mcp2221.ko.
+
endmenu
endif # HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index bfefa365b1ce..d8ea4b8c95af 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_HID_ELO) += hid-elo.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
+obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o
obj-$(CONFIG_HID_GOOGLE_HAMMER) += hid-google-hammer.o
obj-$(CONFIG_HID_GT683R) += hid-gt683r.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
@@ -70,6 +71,7 @@ obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MACALLY) += hid-macally.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MALTRON) += hid-maltron.o
+obj-$(CONFIG_HID_MCP2221) += hid-mcp2221.o
obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index bf8d4afe0d6a..8deded185725 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -283,11 +283,9 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
int ret;
struct appleir *appleir;
- appleir = kzalloc(sizeof(struct appleir), GFP_KERNEL);
- if (!appleir) {
- ret = -ENOMEM;
- goto allocfail;
- }
+ appleir = devm_kzalloc(&hid->dev, sizeof(struct appleir), GFP_KERNEL);
+ if (!appleir)
+ return -ENOMEM;
appleir->hid = hid;
@@ -313,8 +311,7 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
return 0;
fail:
- kfree(appleir);
-allocfail:
+ devm_kfree(&hid->dev, appleir);
return ret;
}
@@ -323,7 +320,6 @@ static void appleir_remove(struct hid_device *hid)
struct appleir *appleir = hid_get_drvdata(hid);
hid_hw_stop(hid);
del_timer_sync(&appleir->key_up_timer);
- kfree(appleir);
}
static const struct hid_device_id appleir_devices[] = {
diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c
new file mode 100644
index 000000000000..558eb08c19ef
--- /dev/null
+++ b/drivers/hid/hid-glorious.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * USB HID driver for Glorious PC Gaming Race
+ * Glorious Model O, O- and D mice.
+ *
+ * Copyright (c) 2020 Samuel ÄŒavoj <sammko@sammserver.com>
+ */
+
+/*
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Samuel ÄŒavoj <sammko@sammserver.com>");
+MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice");
+
+/*
+ * Glorious Model O and O- specify the const flag in the consumer input
+ * report descriptor, which leads to inputs being ignored. Fix this
+ * by patching the descriptor.
+ */
+static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize == 213 &&
+ rdesc[84] == 129 && rdesc[112] == 129 && rdesc[140] == 129 &&
+ rdesc[85] == 3 && rdesc[113] == 3 && rdesc[141] == 3) {
+ hid_info(hdev, "patching Glorious Model O consumer control report descriptor\n");
+ rdesc[85] = rdesc[113] = rdesc[141] = \
+ HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE;
+ }
+ return rdesc;
+}
+
+static void glorious_update_name(struct hid_device *hdev)
+{
+ const char *model = "Device";
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_GLORIOUS_MODEL_O:
+ model = "Model O"; break;
+ case USB_DEVICE_ID_GLORIOUS_MODEL_D:
+ model = "Model D"; break;
+ }
+
+ snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model);
+}
+
+static int glorious_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ glorious_update_name(hdev);
+
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static const struct hid_device_id glorious_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
+ USB_DEVICE_ID_GLORIOUS_MODEL_O) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
+ USB_DEVICE_ID_GLORIOUS_MODEL_D) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, glorious_devices);
+
+static struct hid_driver glorious_driver = {
+ .name = "glorious",
+ .id_table = glorious_devices,
+ .probe = glorious_probe,
+ .report_fixup = glorious_report_fixup
+};
+
+module_hid_driver(glorious_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9f2213426556..b18b13147a6f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -464,6 +464,10 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+#define USB_VENDOR_ID_GLORIOUS 0x258a
+#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
+#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
+
#define I2C_VENDOR_ID_GOODIX 0x27c6
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
@@ -821,6 +825,7 @@
#define USB_DEVICE_ID_PICK16F1454 0x0042
#define USB_DEVICE_ID_PICK16F1454_V2 0xf2f7
#define USB_DEVICE_ID_LUXAFOR 0xf372
+#define USB_DEVICE_ID_MCP2221 0x00dd
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index 8a9268a5c66a..ad4b5412a9f4 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -803,8 +803,10 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
if (ret < 0) {
- hid_err(hdev, "Error disabling keyboard emulation for the G-keys\n");
- goto error_hw_stop;
+ hid_err(hdev, "Error %d disabling keyboard emulation for the G-keys, falling back to generic hid-input driver\n",
+ ret);
+ hid_set_drvdata(hdev, NULL);
+ return 0;
}
/* Get initial brightness levels */
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index bb50d6e7745b..ed9b1c1f460d 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -16,11 +16,11 @@
#include <asm/unaligned.h>
#include "hid-ids.h"
-#define DJ_MAX_PAIRED_DEVICES 6
+#define DJ_MAX_PAIRED_DEVICES 7
#define DJ_MAX_NUMBER_NOTIFS 8
#define DJ_RECEIVER_INDEX 0
#define DJ_DEVICE_INDEX_MIN 1
-#define DJ_DEVICE_INDEX_MAX 6
+#define DJ_DEVICE_INDEX_MAX 7
#define DJREPORT_SHORT_LENGTH 15
#define DJREPORT_LONG_LENGTH 32
@@ -980,6 +980,11 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
break;
}
+ /* custom receiver device (eg. powerplay) */
+ if (hidpp_report->device_index == 7) {
+ workitem.reports_supported |= HIDPP;
+ }
+
if (workitem.type == WORKITEM_TYPE_EMPTY) {
hid_warn(hdev,
"unusable device of type %s (0x%02x) connected on slot %d",
@@ -1368,6 +1373,8 @@ static int logi_dj_ll_parse(struct hid_device *hid)
}
if (djdev->reports_supported & HIDPP) {
+ dbg_hid("%s: sending a HID++ descriptor, reports_supported: %llx\n",
+ __func__, djdev->reports_supported);
rdcat(rdesc, &rsize, hidpp_descriptor,
sizeof(hidpp_descriptor));
}
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
new file mode 100644
index 000000000000..d958475f8c81
--- /dev/null
+++ b/drivers/hid/hid-mcp2221.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MCP2221A - Microchip USB to I2C Host Protocol Bridge
+ *
+ * Copyright (c) 2020, Rishi Gupta <gupt21@gmail.com>
+ *
+ * Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20005565B.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hid.h>
+#include <linux/hidraw.h>
+#include <linux/i2c.h>
+#include "hid-ids.h"
+
+/* Commands codes in a raw output report */
+enum {
+ MCP2221_I2C_WR_DATA = 0x90,
+ MCP2221_I2C_WR_NO_STOP = 0x94,
+ MCP2221_I2C_RD_DATA = 0x91,
+ MCP2221_I2C_RD_RPT_START = 0x93,
+ MCP2221_I2C_GET_DATA = 0x40,
+ MCP2221_I2C_PARAM_OR_STATUS = 0x10,
+ MCP2221_I2C_SET_SPEED = 0x20,
+ MCP2221_I2C_CANCEL = 0x10,
+};
+
+/* Response codes in a raw input report */
+enum {
+ MCP2221_SUCCESS = 0x00,
+ MCP2221_I2C_ENG_BUSY = 0x01,
+ MCP2221_I2C_START_TOUT = 0x12,
+ MCP2221_I2C_STOP_TOUT = 0x62,
+ MCP2221_I2C_WRADDRL_TOUT = 0x23,
+ MCP2221_I2C_WRDATA_TOUT = 0x44,
+ MCP2221_I2C_WRADDRL_NACK = 0x25,
+ MCP2221_I2C_MASK_ADDR_NACK = 0x40,
+ MCP2221_I2C_WRADDRL_SEND = 0x21,
+ MCP2221_I2C_ADDR_NACK = 0x25,
+ MCP2221_I2C_READ_COMPL = 0x55,
+};
+
+/*
+ * There is no way to distinguish responses. Therefore next command
+ * is sent only after response to previous has been received. Mutex
+ * lock is used for this purpose mainly.
+ */
+struct mcp2221 {
+ struct hid_device *hdev;
+ struct i2c_adapter adapter;
+ struct mutex lock;
+ struct completion wait_in_report;
+ u8 *rxbuf;
+ u8 txbuf[64];
+ int rxbuf_idx;
+ int status;
+ u8 cur_i2c_clk_div;
+};
+
+/*
+ * Default i2c bus clock frequency 400 kHz. Modify this if you
+ * want to set some other frequency (min 50 kHz - max 400 kHz).
+ */
+static uint i2c_clk_freq = 400;
+
+/* Synchronously send output report to the device */
+static int mcp_send_report(struct mcp2221 *mcp,
+ u8 *out_report, size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(out_report, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* mcp2221 uses interrupt endpoint for out reports */
+ ret = hid_hw_output_report(mcp->hdev, buf, len);
+ kfree(buf);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/*
+ * Send o/p report to the device and wait for i/p report to be
+ * received from the device. If the device does not respond,
+ * we timeout.
+ */
+static int mcp_send_data_req_status(struct mcp2221 *mcp,
+ u8 *out_report, int len)
+{
+ int ret;
+ unsigned long t;
+
+ reinit_completion(&mcp->wait_in_report);
+
+ ret = mcp_send_report(mcp, out_report, len);
+ if (ret)
+ return ret;
+
+ t = wait_for_completion_timeout(&mcp->wait_in_report,
+ msecs_to_jiffies(4000));
+ if (!t)
+ return -ETIMEDOUT;
+
+ return mcp->status;
+}
+
+/* Check pass/fail for actual communication with i2c slave */
+static int mcp_chk_last_cmd_status(struct mcp2221 *mcp)
+{
+ memset(mcp->txbuf, 0, 8);
+ mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS;
+
+ return mcp_send_data_req_status(mcp, mcp->txbuf, 8);
+}
+
+/* Cancels last command releasing i2c bus just in case occupied */
+static int mcp_cancel_last_cmd(struct mcp2221 *mcp)
+{
+ memset(mcp->txbuf, 0, 8);
+ mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS;
+ mcp->txbuf[2] = MCP2221_I2C_CANCEL;
+
+ return mcp_send_data_req_status(mcp, mcp->txbuf, 8);
+}
+
+static int mcp_set_i2c_speed(struct mcp2221 *mcp)
+{
+ int ret;
+
+ memset(mcp->txbuf, 0, 8);
+ mcp->txbuf[0] = MCP2221_I2C_PARAM_OR_STATUS;
+ mcp->txbuf[3] = MCP2221_I2C_SET_SPEED;
+ mcp->txbuf[4] = mcp->cur_i2c_clk_div;
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 8);
+ if (ret) {
+ /* Small delay is needed here */
+ usleep_range(980, 1000);
+ mcp_cancel_last_cmd(mcp);
+ }
+
+ return 0;
+}
+
+/*
+ * An output report can contain minimum 1 and maximum 60 user data
+ * bytes. If the number of data bytes is more then 60, we send it
+ * in chunks of 60 bytes. Last chunk may contain exactly 60 or less
+ * bytes. Total number of bytes is informed in very first report to
+ * mcp2221, from that point onwards it first collect all the data
+ * from host and then send to i2c slave device.
+ */
+static int mcp_i2c_write(struct mcp2221 *mcp,
+ struct i2c_msg *msg, int type, u8 last_status)
+{
+ int ret, len, idx, sent;
+
+ idx = 0;
+ sent = 0;
+ if (msg->len < 60)
+ len = msg->len;
+ else
+ len = 60;
+
+ do {
+ mcp->txbuf[0] = type;
+ mcp->txbuf[1] = msg->len & 0xff;
+ mcp->txbuf[2] = msg->len >> 8;
+ mcp->txbuf[3] = (u8)(msg->addr << 1);
+
+ memcpy(&mcp->txbuf[4], &msg->buf[idx], len);
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, len + 4);
+ if (ret)
+ return ret;
+
+ usleep_range(980, 1000);
+
+ if (last_status) {
+ ret = mcp_chk_last_cmd_status(mcp);
+ if (ret)
+ return ret;
+ }
+
+ sent = sent + len;
+ if (sent >= msg->len)
+ break;
+
+ idx = idx + len;
+ if ((msg->len - sent) < 60)
+ len = msg->len - sent;
+ else
+ len = 60;
+
+ /*
+ * Testing shows delay is needed between successive writes
+ * otherwise next write fails on first-try from i2c core.
+ * This value is obtained through automated stress testing.
+ */
+ usleep_range(980, 1000);
+ } while (len > 0);
+
+ return ret;
+}
+
+/*
+ * Device reads all data (0 - 65535 bytes) from i2c slave device and
+ * stores it in device itself. This data is read back from device to
+ * host in multiples of 60 bytes using input reports.
+ */
+static int mcp_i2c_smbus_read(struct mcp2221 *mcp,
+ struct i2c_msg *msg, int type, u16 smbus_addr,
+ u8 smbus_len, u8 *smbus_buf)
+{
+ int ret;
+ u16 total_len;
+
+ mcp->txbuf[0] = type;
+ if (msg) {
+ mcp->txbuf[1] = msg->len & 0xff;
+ mcp->txbuf[2] = msg->len >> 8;
+ mcp->txbuf[3] = (u8)(msg->addr << 1);
+ total_len = msg->len;
+ mcp->rxbuf = msg->buf;
+ } else {
+ mcp->txbuf[1] = smbus_len;
+ mcp->txbuf[2] = 0;
+ mcp->txbuf[3] = (u8)(smbus_addr << 1);
+ total_len = smbus_len;
+ mcp->rxbuf = smbus_buf;
+ }
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 4);
+ if (ret)
+ return ret;
+
+ mcp->rxbuf_idx = 0;
+
+ do {
+ memset(mcp->txbuf, 0, 4);
+ mcp->txbuf[0] = MCP2221_I2C_GET_DATA;
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ if (ret)
+ return ret;
+
+ ret = mcp_chk_last_cmd_status(mcp);
+ if (ret)
+ return ret;
+
+ usleep_range(980, 1000);
+ } while (mcp->rxbuf_idx < total_len);
+
+ return ret;
+}
+
+static int mcp_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msgs[], int num)
+{
+ int ret;
+ struct mcp2221 *mcp = i2c_get_adapdata(adapter);
+
+ hid_hw_power(mcp->hdev, PM_HINT_FULLON);
+
+ mutex_lock(&mcp->lock);
+
+ /* Setting speed before every transaction is required for mcp2221 */
+ ret = mcp_set_i2c_speed(mcp);
+ if (ret)
+ goto exit;
+
+ if (num == 1) {
+ if (msgs->flags & I2C_M_RD) {
+ ret = mcp_i2c_smbus_read(mcp, msgs, MCP2221_I2C_RD_DATA,
+ 0, 0, NULL);
+ } else {
+ ret = mcp_i2c_write(mcp, msgs, MCP2221_I2C_WR_DATA, 1);
+ }
+ if (ret)
+ goto exit;
+ ret = num;
+ } else if (num == 2) {
+ /* Ex transaction; send reg address and read its contents */
+ if (msgs[0].addr == msgs[1].addr &&
+ !(msgs[0].flags & I2C_M_RD) &&
+ (msgs[1].flags & I2C_M_RD)) {
+
+ ret = mcp_i2c_write(mcp, &msgs[0],
+ MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, &msgs[1],
+ MCP2221_I2C_RD_RPT_START,
+ 0, 0, NULL);
+ if (ret)
+ goto exit;
+ ret = num;
+ } else {
+ dev_err(&adapter->dev,
+ "unsupported multi-msg i2c transaction\n");
+ ret = -EOPNOTSUPP;
+ }
+ } else {
+ dev_err(&adapter->dev,
+ "unsupported multi-msg i2c transaction\n");
+ ret = -EOPNOTSUPP;
+ }
+
+exit:
+ hid_hw_power(mcp->hdev, PM_HINT_NORMAL);
+ mutex_unlock(&mcp->lock);
+ return ret;
+}
+
+static int mcp_smbus_write(struct mcp2221 *mcp, u16 addr,
+ u8 command, u8 *buf, u8 len, int type,
+ u8 last_status)
+{
+ int data_len, ret;
+
+ mcp->txbuf[0] = type;
+ mcp->txbuf[1] = len + 1; /* 1 is due to command byte itself */
+ mcp->txbuf[2] = 0;
+ mcp->txbuf[3] = (u8)(addr << 1);
+ mcp->txbuf[4] = command;
+
+ switch (len) {
+ case 0:
+ data_len = 5;
+ break;
+ case 1:
+ mcp->txbuf[5] = buf[0];
+ data_len = 6;
+ break;
+ case 2:
+ mcp->txbuf[5] = buf[0];
+ mcp->txbuf[6] = buf[1];
+ data_len = 7;
+ break;
+ default:
+ memcpy(&mcp->txbuf[5], buf, len);
+ data_len = len + 5;
+ }
+
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, data_len);
+ if (ret)
+ return ret;
+
+ if (last_status) {
+ usleep_range(980, 1000);
+
+ ret = mcp_chk_last_cmd_status(mcp);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mcp_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size,
+ union i2c_smbus_data *data)
+{
+ int ret;
+ struct mcp2221 *mcp = i2c_get_adapdata(adapter);
+
+ hid_hw_power(mcp->hdev, PM_HINT_FULLON);
+
+ mutex_lock(&mcp->lock);
+
+ ret = mcp_set_i2c_speed(mcp);
+ if (ret)
+ goto exit;
+
+ switch (size) {
+
+ case I2C_SMBUS_QUICK:
+ if (read_write == I2C_SMBUS_READ)
+ ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_DATA,
+ addr, 0, &data->byte);
+ else
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_DATA, 1);
+ break;
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_READ)
+ ret = mcp_i2c_smbus_read(mcp, NULL, MCP2221_I2C_RD_DATA,
+ addr, 1, &data->byte);
+ else
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_DATA, 1);
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, NULL,
+ MCP2221_I2C_RD_RPT_START,
+ addr, 1, &data->byte);
+ } else {
+ ret = mcp_smbus_write(mcp, addr, command, &data->byte,
+ 1, MCP2221_I2C_WR_DATA, 1);
+ }
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, NULL,
+ MCP2221_I2C_RD_RPT_START,
+ addr, 2, (u8 *)&data->word);
+ } else {
+ ret = mcp_smbus_write(mcp, addr, command,
+ (u8 *)&data->word, 2,
+ MCP2221_I2C_WR_DATA, 1);
+ }
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_NO_STOP, 1);
+ if (ret)
+ goto exit;
+
+ mcp->rxbuf_idx = 0;
+ mcp->rxbuf = data->block;
+ mcp->txbuf[0] = MCP2221_I2C_GET_DATA;
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ if (ret)
+ goto exit;
+ } else {
+ if (!data->block[0]) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = mcp_smbus_write(mcp, addr, command, data->block,
+ data->block[0] + 1,
+ MCP2221_I2C_WR_DATA, 1);
+ }
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_READ) {
+ ret = mcp_smbus_write(mcp, addr, command, NULL,
+ 0, MCP2221_I2C_WR_NO_STOP, 1);
+ if (ret)
+ goto exit;
+
+ mcp->rxbuf_idx = 0;
+ mcp->rxbuf = data->block;
+ mcp->txbuf[0] = MCP2221_I2C_GET_DATA;
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ if (ret)
+ goto exit;
+ } else {
+ if (!data->block[0]) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = mcp_smbus_write(mcp, addr, command,
+ &data->block[1], data->block[0],
+ MCP2221_I2C_WR_DATA, 1);
+ }
+ break;
+ case I2C_SMBUS_PROC_CALL:
+ ret = mcp_smbus_write(mcp, addr, command,
+ (u8 *)&data->word,
+ 2, MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, NULL,
+ MCP2221_I2C_RD_RPT_START,
+ addr, 2, (u8 *)&data->word);
+ break;
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ ret = mcp_smbus_write(mcp, addr, command, data->block,
+ data->block[0] + 1,
+ MCP2221_I2C_WR_NO_STOP, 0);
+ if (ret)
+ goto exit;
+
+ ret = mcp_i2c_smbus_read(mcp, NULL,
+ MCP2221_I2C_RD_RPT_START,
+ addr, I2C_SMBUS_BLOCK_MAX,
+ data->block);
+ break;
+ default:
+ dev_err(&mcp->adapter.dev,
+ "unsupported smbus transaction size:%d\n", size);
+ ret = -EOPNOTSUPP;
+ }
+
+exit:
+ hid_hw_power(mcp->hdev, PM_HINT_NORMAL);
+ mutex_unlock(&mcp->lock);
+ return ret;
+}
+
+static u32 mcp_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_PEC);
+}
+
+static const struct i2c_algorithm mcp_i2c_algo = {
+ .master_xfer = mcp_i2c_xfer,
+ .smbus_xfer = mcp_smbus_xfer,
+ .functionality = mcp_i2c_func,
+};
+
+/* Gives current state of i2c engine inside mcp2221 */
+static int mcp_get_i2c_eng_state(struct mcp2221 *mcp,
+ u8 *data, u8 idx)
+{
+ int ret;
+
+ switch (data[idx]) {
+ case MCP2221_I2C_WRADDRL_NACK:
+ case MCP2221_I2C_WRADDRL_SEND:
+ ret = -ENXIO;
+ break;
+ case MCP2221_I2C_START_TOUT:
+ case MCP2221_I2C_STOP_TOUT:
+ case MCP2221_I2C_WRADDRL_TOUT:
+ case MCP2221_I2C_WRDATA_TOUT:
+ ret = -ETIMEDOUT;
+ break;
+ case MCP2221_I2C_ENG_BUSY:
+ ret = -EAGAIN;
+ break;
+ case MCP2221_SUCCESS:
+ ret = 0x00;
+ break;
+ default:
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*
+ * MCP2221 uses interrupt endpoint for input reports. This function
+ * is called by HID layer when it receives i/p report from mcp2221,
+ * which is actually a response to the previously sent command.
+ *
+ * MCP2221A firmware specific return codes are parsed and 0 or
+ * appropriate negative error code is returned. Delayed response
+ * results in timeout error and stray reponses results in -EIO.
+ */
+static int mcp2221_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ u8 *buf;
+ struct mcp2221 *mcp = hid_get_drvdata(hdev);
+
+ switch (data[0]) {
+
+ case MCP2221_I2C_WR_DATA:
+ case MCP2221_I2C_WR_NO_STOP:
+ case MCP2221_I2C_RD_DATA:
+ case MCP2221_I2C_RD_RPT_START:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ mcp->status = 0;
+ break;
+ default:
+ mcp->status = mcp_get_i2c_eng_state(mcp, data, 2);
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ case MCP2221_I2C_PARAM_OR_STATUS:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if ((mcp->txbuf[3] == MCP2221_I2C_SET_SPEED) &&
+ (data[3] != MCP2221_I2C_SET_SPEED)) {
+ mcp->status = -EAGAIN;
+ break;
+ }
+ if (data[20] & MCP2221_I2C_MASK_ADDR_NACK) {
+ mcp->status = -ENXIO;
+ break;
+ }
+ mcp->status = mcp_get_i2c_eng_state(mcp, data, 8);
+ break;
+ default:
+ mcp->status = -EIO;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ case MCP2221_I2C_GET_DATA:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if (data[2] == MCP2221_I2C_ADDR_NACK) {
+ mcp->status = -ENXIO;
+ break;
+ }
+ if (!mcp_get_i2c_eng_state(mcp, data, 2)
+ && (data[3] == 0)) {
+ mcp->status = 0;
+ break;
+ }
+ if (data[3] == 127) {
+ mcp->status = -EIO;
+ break;
+ }
+ if (data[2] == MCP2221_I2C_READ_COMPL) {
+ buf = mcp->rxbuf;
+ memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]);
+ mcp->rxbuf_idx = mcp->rxbuf_idx + data[3];
+ mcp->status = 0;
+ break;
+ }
+ mcp->status = -EIO;
+ break;
+ default:
+ mcp->status = -EIO;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ default:
+ mcp->status = -EIO;
+ complete(&mcp->wait_in_report);
+ }
+
+ return 1;
+}
+
+static int mcp2221_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct mcp2221 *mcp;
+
+ mcp = devm_kzalloc(&hdev->dev, sizeof(*mcp), GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "can't parse reports\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "can't start hardware\n");
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "can't open device\n");
+ goto err_hstop;
+ }
+
+ mutex_init(&mcp->lock);
+ init_completion(&mcp->wait_in_report);
+ hid_set_drvdata(hdev, mcp);
+ mcp->hdev = hdev;
+
+ /* Set I2C bus clock diviser */
+ if (i2c_clk_freq > 400)
+ i2c_clk_freq = 400;
+ if (i2c_clk_freq < 50)
+ i2c_clk_freq = 50;
+ mcp->cur_i2c_clk_div = (12000000 / (i2c_clk_freq * 1000)) - 3;
+
+ mcp->adapter.owner = THIS_MODULE;
+ mcp->adapter.class = I2C_CLASS_HWMON;
+ mcp->adapter.algo = &mcp_i2c_algo;
+ mcp->adapter.retries = 1;
+ mcp->adapter.dev.parent = &hdev->dev;
+ snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
+ "MCP2221 usb-i2c bridge on hidraw%d",
+ ((struct hidraw *)hdev->hidraw)->minor);
+
+ ret = i2c_add_adapter(&mcp->adapter);
+ if (ret) {
+ hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret);
+ goto err_i2c;
+ }
+ i2c_set_adapdata(&mcp->adapter, mcp);
+
+ return 0;
+
+err_i2c:
+ hid_hw_close(mcp->hdev);
+err_hstop:
+ hid_hw_stop(mcp->hdev);
+ return ret;
+}
+
+static void mcp2221_remove(struct hid_device *hdev)
+{
+ struct mcp2221 *mcp = hid_get_drvdata(hdev);
+
+ i2c_del_adapter(&mcp->adapter);
+ hid_hw_close(mcp->hdev);
+ hid_hw_stop(mcp->hdev);
+}
+
+static const struct hid_device_id mcp2221_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_MCP2221) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mcp2221_devices);
+
+static struct hid_driver mcp2221_driver = {
+ .name = "mcp2221",
+ .id_table = mcp2221_devices,
+ .probe = mcp2221_probe,
+ .remove = mcp2221_remove,
+ .raw_event = mcp2221_raw_event,
+};
+
+/* Register with HID core */
+module_hid_driver(mcp2221_driver);
+
+MODULE_AUTHOR("Rishi Gupta <gupt21@gmail.com>");
+MODULE_DESCRIPTION("MCP2221 Microchip HID USB to I2C master bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 3735546bb524..ebec818344af 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -398,9 +398,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
#endif
-#if IS_ENABLED(CONFIG_HID_ITE)
- { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
-#endif
#if IS_ENABLED(CONFIG_HID_ICADE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
#endif
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9ce22acdfaca..8cffa84c9650 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -217,7 +217,6 @@ static int rmi_hid_read_block(struct rmi_transport_dev *xport, u16 addr,
ret = rmi_write_report(hdev, data->writeReport,
data->output_report_size);
if (ret != data->output_report_size) {
- clear_bit(RMI_READ_REQUEST_PENDING, &data->flags);
dev_err(&hdev->dev,
"failed to write request output report (%d)\n",
ret);
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.h b/drivers/hid/intel-ish-hid/ishtp/hbm.h
index bb85985b1620..7c445b203f2a 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.h
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.h
@@ -82,7 +82,7 @@ struct ishtp_msg_hdr {
struct ishtp_bus_message {
uint8_t hbm_cmd;
- uint8_t data[0];
+ uint8_t data[];
} __packed;
/**
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 39e0e6c73adf..1cc6364aa957 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -214,7 +214,7 @@ struct ishtp_device {
const struct ishtp_hw_ops *ops;
size_t mtu;
uint32_t ishtp_msg_hdr;
- char hw[0] __aligned(sizeof(void *));
+ char hw[] __aligned(sizeof(void *));
};
static inline unsigned long ishtp_secs_to_jiffies(unsigned long sec)
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index a02ce43d778d..32e3bc0aa665 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -533,7 +533,6 @@ struct hv_dynmem_device {
* State to synchronize hot-add.
*/
struct completion ol_waitevent;
- bool ha_waiting;
/*
* This thread handles hot-add
* requests from the host as well as notifying
@@ -634,10 +633,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case MEM_ONLINE:
case MEM_CANCEL_ONLINE:
- if (dm_device.ha_waiting) {
- dm_device.ha_waiting = false;
- complete(&dm_device.ol_waitevent);
- }
+ complete(&dm_device.ol_waitevent);
break;
case MEM_OFFLINE:
@@ -726,8 +722,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
has->covered_end_pfn += processed_pfn;
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
- init_completion(&dm_device.ol_waitevent);
- dm_device.ha_waiting = !memhp_auto_online;
+ reinit_completion(&dm_device.ol_waitevent);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
@@ -753,15 +748,14 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
}
/*
- * Wait for the memory block to be onlined when memory onlining
- * is done outside of kernel (memhp_auto_online). Since the hot
- * add has succeeded, it is ok to proceed even if the pages in
- * the hot added region have not been "onlined" within the
- * allowed time.
+ * Wait for memory to get onlined. If the kernel onlined the
+ * memory when adding it, this will return directly. Otherwise,
+ * it will wait for user space to online the memory. This helps
+ * to avoid adding memory faster than it is getting onlined. As
+ * adding succeeded, it is ok to proceed even if the memory was
+ * not onlined in time.
*/
- if (dm_device.ha_waiting)
- wait_for_completion_timeout(&dm_device.ol_waitevent,
- 5*HZ);
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
post_status(&dm_device);
}
}
@@ -1706,6 +1700,7 @@ static int balloon_probe(struct hv_device *dev,
#ifdef CONFIG_MEMORY_HOTPLUG
set_online_page_callback(&hv_online_page);
+ init_completion(&dm_device.ol_waitevent);
register_memory_notifier(&hv_memory_nb);
#endif
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 37740e992cfa..826a1054100d 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -9,7 +9,7 @@ menuconfig HWSPINLOCK
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
depends on HWSPINLOCK
- depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3
+ depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3 || COMPILE_TEST
help
Say y here to support the OMAP Hardware Spinlock device (firstly
introduced in OMAP4).
@@ -19,7 +19,7 @@ config HWSPINLOCK_OMAP
config HWSPINLOCK_QCOM
tristate "Qualcomm Hardware Spinlock device"
depends on HWSPINLOCK
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
select MFD_SYSCON
help
Say y here to support the Qualcomm Hardware Mutex functionality, which
@@ -31,7 +31,7 @@ config HWSPINLOCK_QCOM
config HWSPINLOCK_SIRF
tristate "SIRF Hardware Spinlock device"
depends on HWSPINLOCK
- depends on ARCH_SIRF
+ depends on ARCH_SIRF || COMPILE_TEST
help
Say y here to support the SIRF Hardware Spinlock device, which
provides a synchronisation mechanism for the various processors
@@ -42,7 +42,7 @@ config HWSPINLOCK_SIRF
config HWSPINLOCK_SPRD
tristate "SPRD Hardware Spinlock device"
- depends on ARCH_SPRD
+ depends on ARCH_SPRD || COMPILE_TEST
depends on HWSPINLOCK
help
Say y here to support the SPRD Hardware Spinlock device.
@@ -51,7 +51,7 @@ config HWSPINLOCK_SPRD
config HWSPINLOCK_STM32
tristate "STM32 Hardware Spinlock device"
- depends on MACH_STM32MP157
+ depends on MACH_STM32MP157 || COMPILE_TEST
depends on HWSPINLOCK
help
Say y here to support the STM32 Hardware Spinlock device.
@@ -61,7 +61,7 @@ config HWSPINLOCK_STM32
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
depends on HWSPINLOCK
- depends on ARCH_U8500
+ depends on ARCH_U8500 || COMPILE_TEST
help
Say y here to support the STE Hardware Semaphore functionality, which
provides a synchronisation mechanism for the various processor on the
diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
index 9eb6bd020dc7..29892767bb7a 100644
--- a/drivers/hwspinlock/hwspinlock_internal.h
+++ b/drivers/hwspinlock/hwspinlock_internal.h
@@ -56,7 +56,7 @@ struct hwspinlock_device {
const struct hwspinlock_ops *ops;
int base_id;
int num_locks;
- struct hwspinlock lock[0];
+ struct hwspinlock lock[];
};
static inline int hwlock_to_id(struct hwspinlock *hwlock)
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 6ff30e25af55..83e841be1081 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -110,4 +110,25 @@ config CORESIGHT_CPU_DEBUG
properly, please refer Documentation/trace/coresight-cpu-debug.rst
for detailed description and the example for usage.
+config CORESIGHT_CTI
+ bool "CoreSight Cross Trigger Interface (CTI) driver"
+ depends on ARM || ARM64
+ help
+ This driver provides support for CoreSight CTI and CTM components.
+ These provide hardware triggering events between CoreSight trace
+ source and sink components. These can be used to halt trace or
+ inject events into the trace stream. CTI also provides a software
+ control to trigger the same halt events. This can provide fast trace
+ halt compared to disabling sources and sinks normally in driver
+ software.
+
+config CORESIGHT_CTI_INTEGRATION_REGS
+ bool "Access CTI CoreSight Integration Registers"
+ depends on CORESIGHT_CTI
+ help
+ This option adds support for the CoreSight integration registers on
+ this device. The integration registers allow the exploration of the
+ CTI trigger connections between this and other devices.These
+ registers are not used in normal operation and can leave devices in
+ an inconsistent state.
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 3c0ac421e211..0e3e72f0f510 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -17,3 +17,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
+obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o \
+ coresight-cti-platform.o \
+ coresight-cti-sysfs.o
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
new file mode 100644
index 000000000000..b44d83142b62
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linaro Limited. All rights reserved.
+ */
+
+#include <dt-bindings/arm/coresight-cti-dt.h>
+#include <linux/of.h>
+
+#include "coresight-cti.h"
+
+/* Number of CTI signals in the v8 architecturally defined connection */
+#define NR_V8PE_IN_SIGS 2
+#define NR_V8PE_OUT_SIGS 3
+#define NR_V8ETM_INOUT_SIGS 4
+
+/* CTI device tree trigger connection node keyword */
+#define CTI_DT_CONNS "trig-conns"
+
+/* CTI device tree connection property keywords */
+#define CTI_DT_V8ARCH_COMPAT "arm,coresight-cti-v8-arch"
+#define CTI_DT_CSDEV_ASSOC "arm,cs-dev-assoc"
+#define CTI_DT_TRIGIN_SIGS "arm,trig-in-sigs"
+#define CTI_DT_TRIGOUT_SIGS "arm,trig-out-sigs"
+#define CTI_DT_TRIGIN_TYPES "arm,trig-in-types"
+#define CTI_DT_TRIGOUT_TYPES "arm,trig-out-types"
+#define CTI_DT_FILTER_OUT_SIGS "arm,trig-filters"
+#define CTI_DT_CONN_NAME "arm,trig-conn-name"
+#define CTI_DT_CTM_ID "arm,cti-ctm-id"
+
+#ifdef CONFIG_OF
+/*
+ * CTI can be bound to a CPU, or a system device.
+ * CPU can be declared at the device top level or in a connections node
+ * so need to check relative to node not device.
+ */
+static int of_cti_get_cpu_at_node(const struct device_node *node)
+{
+ int cpu;
+ struct device_node *dn;
+
+ if (node == NULL)
+ return -1;
+
+ dn = of_parse_phandle(node, "cpu", 0);
+ /* CTI affinity defaults to no cpu */
+ if (!dn)
+ return -1;
+ cpu = of_cpu_node_to_id(dn);
+ of_node_put(dn);
+
+ /* No Affinity if no cpu nodes are found */
+ return (cpu < 0) ? -1 : cpu;
+}
+
+#else
+static int of_cti_get_cpu_at_node(const struct device_node *node)
+{
+ return -1;
+}
+
+#endif
+
+/*
+ * CTI can be bound to a CPU, or a system device.
+ * CPU can be declared at the device top level or in a connections node
+ * so need to check relative to node not device.
+ */
+static int cti_plat_get_cpu_at_node(struct fwnode_handle *fwnode)
+{
+ if (is_of_node(fwnode))
+ return of_cti_get_cpu_at_node(to_of_node(fwnode));
+ return -1;
+}
+
+const char *cti_plat_get_node_name(struct fwnode_handle *fwnode)
+{
+ if (is_of_node(fwnode))
+ return of_node_full_name(to_of_node(fwnode));
+ return "unknown";
+}
+
+/*
+ * Extract a name from the fwnode.
+ * If the device associated with the node is a coresight_device, then return
+ * that name and the coresight_device pointer, otherwise return the node name.
+ */
+static const char *
+cti_plat_get_csdev_or_node_name(struct fwnode_handle *fwnode,
+ struct coresight_device **csdev)
+{
+ const char *name = NULL;
+ *csdev = coresight_find_csdev_by_fwnode(fwnode);
+ if (*csdev)
+ name = dev_name(&(*csdev)->dev);
+ else
+ name = cti_plat_get_node_name(fwnode);
+ return name;
+}
+
+static bool cti_plat_node_name_eq(struct fwnode_handle *fwnode,
+ const char *name)
+{
+ if (is_of_node(fwnode))
+ return of_node_name_eq(to_of_node(fwnode), name);
+ return false;
+}
+
+static int cti_plat_create_v8_etm_connection(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ int ret = -ENOMEM, i;
+ struct fwnode_handle *root_fwnode, *cs_fwnode;
+ const char *assoc_name = NULL;
+ struct coresight_device *csdev;
+ struct cti_trig_con *tc = NULL;
+
+ root_fwnode = dev_fwnode(dev);
+ if (IS_ERR_OR_NULL(root_fwnode))
+ return -EINVAL;
+
+ /* Can optionally have an etm node - return if not */
+ cs_fwnode = fwnode_find_reference(root_fwnode, CTI_DT_CSDEV_ASSOC, 0);
+ if (IS_ERR_OR_NULL(cs_fwnode))
+ return 0;
+
+ /* allocate memory */
+ tc = cti_allocate_trig_con(dev, NR_V8ETM_INOUT_SIGS,
+ NR_V8ETM_INOUT_SIGS);
+ if (!tc)
+ goto create_v8_etm_out;
+
+ /* build connection data */
+ tc->con_in->used_mask = 0xF0; /* sigs <4,5,6,7> */
+ tc->con_out->used_mask = 0xF0; /* sigs <4,5,6,7> */
+
+ /*
+ * The EXTOUT type signals from the ETM are connected to a set of input
+ * triggers on the CTI, the EXTIN being connected to output triggers.
+ */
+ for (i = 0; i < NR_V8ETM_INOUT_SIGS; i++) {
+ tc->con_in->sig_types[i] = ETM_EXTOUT;
+ tc->con_out->sig_types[i] = ETM_EXTIN;
+ }
+
+ /*
+ * We look to see if the ETM coresight device associated with this
+ * handle has been registered with the system - i.e. probed before
+ * this CTI. If so csdev will be non NULL and we can use the device
+ * name and pass the csdev to the connection entry function where
+ * the association will be recorded.
+ * If not, then simply record the name in the connection data, the
+ * probing of the ETM will call into the CTI driver API to update the
+ * association then.
+ */
+ assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode, &csdev);
+ ret = cti_add_connection_entry(dev, drvdata, tc, csdev, assoc_name);
+
+create_v8_etm_out:
+ fwnode_handle_put(cs_fwnode);
+ return ret;
+}
+
+/*
+ * Create an architecturally defined v8 connection
+ * must have a cpu, can have an ETM.
+ */
+static int cti_plat_create_v8_connections(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ struct cti_device *cti_dev = &drvdata->ctidev;
+ struct cti_trig_con *tc = NULL;
+ int cpuid = 0;
+ char cpu_name_str[16];
+ int ret = -ENOMEM;
+
+ /* Must have a cpu node */
+ cpuid = cti_plat_get_cpu_at_node(dev_fwnode(dev));
+ if (cpuid < 0) {
+ dev_warn(dev,
+ "ARM v8 architectural CTI connection: missing cpu\n");
+ return -EINVAL;
+ }
+ cti_dev->cpu = cpuid;
+
+ /* Allocate the v8 cpu connection memory */
+ tc = cti_allocate_trig_con(dev, NR_V8PE_IN_SIGS, NR_V8PE_OUT_SIGS);
+ if (!tc)
+ goto of_create_v8_out;
+
+ /* Set the v8 PE CTI connection data */
+ tc->con_in->used_mask = 0x3; /* sigs <0 1> */
+ tc->con_in->sig_types[0] = PE_DBGTRIGGER;
+ tc->con_in->sig_types[1] = PE_PMUIRQ;
+ tc->con_out->used_mask = 0x7; /* sigs <0 1 2 > */
+ tc->con_out->sig_types[0] = PE_EDBGREQ;
+ tc->con_out->sig_types[1] = PE_DBGRESTART;
+ tc->con_out->sig_types[2] = PE_CTIIRQ;
+ scnprintf(cpu_name_str, sizeof(cpu_name_str), "cpu%d", cpuid);
+
+ ret = cti_add_connection_entry(dev, drvdata, tc, NULL, cpu_name_str);
+ if (ret)
+ goto of_create_v8_out;
+
+ /* Create the v8 ETM associated connection */
+ ret = cti_plat_create_v8_etm_connection(dev, drvdata);
+ if (ret)
+ goto of_create_v8_out;
+
+ /* filter pe_edbgreq - PE trigout sig <0> */
+ drvdata->config.trig_out_filter |= 0x1;
+
+of_create_v8_out:
+ return ret;
+}
+
+static int cti_plat_check_v8_arch_compatible(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (is_of_node(fwnode))
+ return of_device_is_compatible(to_of_node(fwnode),
+ CTI_DT_V8ARCH_COMPAT);
+ return 0;
+}
+
+static int cti_plat_count_sig_elements(const struct fwnode_handle *fwnode,
+ const char *name)
+{
+ int nr_elem = fwnode_property_count_u32(fwnode, name);
+
+ return (nr_elem < 0 ? 0 : nr_elem);
+}
+
+static int cti_plat_read_trig_group(struct cti_trig_grp *tgrp,
+ const struct fwnode_handle *fwnode,
+ const char *grp_name)
+{
+ int idx, err = 0;
+ u32 *values;
+
+ if (!tgrp->nr_sigs)
+ return 0;
+
+ values = kcalloc(tgrp->nr_sigs, sizeof(u32), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ err = fwnode_property_read_u32_array(fwnode, grp_name,
+ values, tgrp->nr_sigs);
+
+ if (!err) {
+ /* set the signal usage mask */
+ for (idx = 0; idx < tgrp->nr_sigs; idx++)
+ tgrp->used_mask |= BIT(values[idx]);
+ }
+
+ kfree(values);
+ return err;
+}
+
+static int cti_plat_read_trig_types(struct cti_trig_grp *tgrp,
+ const struct fwnode_handle *fwnode,
+ const char *type_name)
+{
+ int items, err = 0, nr_sigs;
+ u32 *values = NULL, i;
+
+ /* allocate an array according to number of signals in connection */
+ nr_sigs = tgrp->nr_sigs;
+ if (!nr_sigs)
+ return 0;
+
+ /* see if any types have been included in the device description */
+ items = cti_plat_count_sig_elements(fwnode, type_name);
+ if (items > nr_sigs)
+ return -EINVAL;
+
+ /* need an array to store the values iff there are any */
+ if (items) {
+ values = kcalloc(items, sizeof(u32), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ err = fwnode_property_read_u32_array(fwnode, type_name,
+ values, items);
+ if (err)
+ goto read_trig_types_out;
+ }
+
+ /*
+ * Match type id to signal index, 1st type to 1st index etc.
+ * If fewer types than signals default remainder to GEN_IO.
+ */
+ for (i = 0; i < nr_sigs; i++) {
+ if (i < items) {
+ tgrp->sig_types[i] =
+ values[i] < CTI_TRIG_MAX ? values[i] : GEN_IO;
+ } else {
+ tgrp->sig_types[i] = GEN_IO;
+ }
+ }
+
+read_trig_types_out:
+ kfree(values);
+ return err;
+}
+
+static int cti_plat_process_filter_sigs(struct cti_drvdata *drvdata,
+ const struct fwnode_handle *fwnode)
+{
+ struct cti_trig_grp *tg = NULL;
+ int err = 0, nr_filter_sigs;
+
+ nr_filter_sigs = cti_plat_count_sig_elements(fwnode,
+ CTI_DT_FILTER_OUT_SIGS);
+ if (nr_filter_sigs == 0)
+ return 0;
+
+ if (nr_filter_sigs > drvdata->config.nr_trig_max)
+ return -EINVAL;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return -ENOMEM;
+
+ err = cti_plat_read_trig_group(tg, fwnode, CTI_DT_FILTER_OUT_SIGS);
+ if (!err)
+ drvdata->config.trig_out_filter |= tg->used_mask;
+
+ kfree(tg);
+ return err;
+}
+
+static int cti_plat_create_connection(struct device *dev,
+ struct cti_drvdata *drvdata,
+ struct fwnode_handle *fwnode)
+{
+ struct cti_trig_con *tc = NULL;
+ int cpuid = -1, err = 0;
+ struct fwnode_handle *cs_fwnode = NULL;
+ struct coresight_device *csdev = NULL;
+ const char *assoc_name = "unknown";
+ char cpu_name_str[16];
+ int nr_sigs_in, nr_sigs_out;
+
+ /* look to see how many in and out signals we have */
+ nr_sigs_in = cti_plat_count_sig_elements(fwnode, CTI_DT_TRIGIN_SIGS);
+ nr_sigs_out = cti_plat_count_sig_elements(fwnode, CTI_DT_TRIGOUT_SIGS);
+
+ if ((nr_sigs_in > drvdata->config.nr_trig_max) ||
+ (nr_sigs_out > drvdata->config.nr_trig_max))
+ return -EINVAL;
+
+ tc = cti_allocate_trig_con(dev, nr_sigs_in, nr_sigs_out);
+ if (!tc)
+ return -ENOMEM;
+
+ /* look for the signals properties. */
+ err = cti_plat_read_trig_group(tc->con_in, fwnode,
+ CTI_DT_TRIGIN_SIGS);
+ if (err)
+ goto create_con_err;
+
+ err = cti_plat_read_trig_types(tc->con_in, fwnode,
+ CTI_DT_TRIGIN_TYPES);
+ if (err)
+ goto create_con_err;
+
+ err = cti_plat_read_trig_group(tc->con_out, fwnode,
+ CTI_DT_TRIGOUT_SIGS);
+ if (err)
+ goto create_con_err;
+
+ err = cti_plat_read_trig_types(tc->con_out, fwnode,
+ CTI_DT_TRIGOUT_TYPES);
+ if (err)
+ goto create_con_err;
+
+ err = cti_plat_process_filter_sigs(drvdata, fwnode);
+ if (err)
+ goto create_con_err;
+
+ /* read the connection name if set - may be overridden by later */
+ fwnode_property_read_string(fwnode, CTI_DT_CONN_NAME, &assoc_name);
+
+ /* associated cpu ? */
+ cpuid = cti_plat_get_cpu_at_node(fwnode);
+ if (cpuid >= 0) {
+ drvdata->ctidev.cpu = cpuid;
+ scnprintf(cpu_name_str, sizeof(cpu_name_str), "cpu%d", cpuid);
+ assoc_name = cpu_name_str;
+ } else {
+ /* associated device ? */
+ cs_fwnode = fwnode_find_reference(fwnode,
+ CTI_DT_CSDEV_ASSOC, 0);
+ if (!IS_ERR_OR_NULL(cs_fwnode)) {
+ assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
+ &csdev);
+ fwnode_handle_put(cs_fwnode);
+ }
+ }
+ /* set up a connection */
+ err = cti_add_connection_entry(dev, drvdata, tc, csdev, assoc_name);
+
+create_con_err:
+ return err;
+}
+
+static int cti_plat_create_impdef_connections(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ int rc = 0;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_handle *child = NULL;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
+ fwnode_for_each_child_node(fwnode, child) {
+ if (cti_plat_node_name_eq(child, CTI_DT_CONNS))
+ rc = cti_plat_create_connection(dev, drvdata,
+ child);
+ if (rc != 0)
+ break;
+ }
+ fwnode_handle_put(child);
+
+ return rc;
+}
+
+/* get the hardware configuration & connection data. */
+int cti_plat_get_hw_data(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ int rc = 0;
+ struct cti_device *cti_dev = &drvdata->ctidev;
+
+ /* get any CTM ID - defaults to 0 */
+ device_property_read_u32(dev, CTI_DT_CTM_ID, &cti_dev->ctm_id);
+
+ /* check for a v8 architectural CTI device */
+ if (cti_plat_check_v8_arch_compatible(dev))
+ rc = cti_plat_create_v8_connections(dev, drvdata);
+ else
+ rc = cti_plat_create_impdef_connections(dev, drvdata);
+ if (rc)
+ return rc;
+
+ /* if no connections, just add a single default based on max IN-OUT */
+ if (cti_dev->nr_trig_con == 0)
+ rc = cti_add_default_connection(dev, drvdata);
+ return rc;
+}
+
+struct coresight_platform_data *
+coresight_cti_get_platform_data(struct device *dev)
+{
+ int ret = -ENOENT;
+ struct coresight_platform_data *pdata = NULL;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (IS_ERR_OR_NULL(fwnode))
+ goto error;
+
+ /*
+ * Alloc platform data but leave it zero init. CTI does not use the
+ * same connection infrastructuree as trace path components but an
+ * empty struct enables us to use the standard coresight component
+ * registration code.
+ */
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* get some CTI specifics */
+ ret = cti_plat_get_hw_data(dev, drvdata);
+
+ if (!ret)
+ return pdata;
+error:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
new file mode 100644
index 000000000000..1f8fb7c15e80
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#include <linux/coresight.h>
+
+#include "coresight-cti.h"
+
+/*
+ * Declare the number of static declared attribute groups
+ * Value includes groups + NULL value at end of table.
+ */
+#define CORESIGHT_CTI_STATIC_GROUPS_MAX 5
+
+/*
+ * List of trigger signal type names. Match the constants declared in
+ * include\dt-bindings\arm\coresight-cti-dt.h
+ */
+static const char * const sig_type_names[] = {
+ "genio", /* GEN_IO */
+ "intreq", /* GEN_INTREQ */
+ "intack", /* GEN_INTACK */
+ "haltreq", /* GEN_HALTREQ */
+ "restartreq", /* GEN_RESTARTREQ */
+ "pe_edbgreq", /* PE_EDBGREQ */
+ "pe_dbgrestart",/* PE_DBGRESTART */
+ "pe_ctiirq", /* PE_CTIIRQ */
+ "pe_pmuirq", /* PE_PMUIRQ */
+ "pe_dbgtrigger",/* PE_DBGTRIGGER */
+ "etm_extout", /* ETM_EXTOUT */
+ "etm_extin", /* ETM_EXTIN */
+ "snk_full", /* SNK_FULL */
+ "snk_acqcomp", /* SNK_ACQCOMP */
+ "snk_flushcomp",/* SNK_FLUSHCOMP */
+ "snk_flushin", /* SNK_FLUSHIN */
+ "snk_trigin", /* SNK_TRIGIN */
+ "stm_asyncout", /* STM_ASYNCOUT */
+ "stm_tout_spte",/* STM_TOUT_SPTE */
+ "stm_tout_sw", /* STM_TOUT_SW */
+ "stm_tout_hete",/* STM_TOUT_HETE */
+ "stm_hwevent", /* STM_HWEVENT */
+ "ela_tstart", /* ELA_TSTART */
+ "ela_tstop", /* ELA_TSTOP */
+ "ela_dbgreq", /* ELA_DBGREQ */
+};
+
+/* Show function pointer used in the connections dynamic declared attributes*/
+typedef ssize_t (*p_show_fn)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+
+/* Connection attribute types */
+enum cti_conn_attr_type {
+ CTI_CON_ATTR_NAME,
+ CTI_CON_ATTR_TRIGIN_SIG,
+ CTI_CON_ATTR_TRIGOUT_SIG,
+ CTI_CON_ATTR_TRIGIN_TYPES,
+ CTI_CON_ATTR_TRIGOUT_TYPES,
+ CTI_CON_ATTR_MAX,
+};
+
+/* Names for the connection attributes */
+static const char * const con_attr_names[CTI_CON_ATTR_MAX] = {
+ "name",
+ "in_signals",
+ "out_signals",
+ "in_types",
+ "out_types",
+};
+
+/* basic attributes */
+static ssize_t enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int enable_req;
+ bool enabled, powered;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ enable_req = atomic_read(&drvdata->config.enable_req_count);
+ spin_lock(&drvdata->spinlock);
+ powered = drvdata->config.hw_powered;
+ enabled = drvdata->config.hw_enabled;
+ spin_unlock(&drvdata->spinlock);
+
+ if (powered)
+ return sprintf(buf, "%d\n", enabled);
+ else
+ return sprintf(buf, "%d\n", !!enable_req);
+}
+
+static ssize_t enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val)
+ ret = cti_enable(drvdata->csdev);
+ else
+ ret = cti_disable(drvdata->csdev);
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR_RW(enable);
+
+static ssize_t powered_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ bool powered;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ powered = drvdata->config.hw_powered;
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%d\n", powered);
+}
+static DEVICE_ATTR_RO(powered);
+
+static ssize_t ctmid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", drvdata->ctidev.ctm_id);
+}
+static DEVICE_ATTR_RO(ctmid);
+
+static ssize_t nr_trigger_cons_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", drvdata->ctidev.nr_trig_con);
+}
+static DEVICE_ATTR_RO(nr_trigger_cons);
+
+/* attribute and group sysfs tables. */
+static struct attribute *coresight_cti_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_powered.attr,
+ &dev_attr_ctmid.attr,
+ &dev_attr_nr_trigger_cons.attr,
+ NULL,
+};
+
+/* register based attributes */
+
+/* macro to access RO registers with power check only (no enable check). */
+#define coresight_cti_reg(name, offset) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ u32 val = 0; \
+ pm_runtime_get_sync(dev->parent); \
+ spin_lock(&drvdata->spinlock); \
+ if (drvdata->config.hw_powered) \
+ val = readl_relaxed(drvdata->base + offset); \
+ spin_unlock(&drvdata->spinlock); \
+ pm_runtime_put_sync(dev->parent); \
+ return sprintf(buf, "0x%x\n", val); \
+} \
+static DEVICE_ATTR_RO(name)
+
+/* coresight management registers */
+coresight_cti_reg(devaff0, CTIDEVAFF0);
+coresight_cti_reg(devaff1, CTIDEVAFF1);
+coresight_cti_reg(authstatus, CORESIGHT_AUTHSTATUS);
+coresight_cti_reg(devarch, CORESIGHT_DEVARCH);
+coresight_cti_reg(devid, CORESIGHT_DEVID);
+coresight_cti_reg(devtype, CORESIGHT_DEVTYPE);
+coresight_cti_reg(pidr0, CORESIGHT_PERIPHIDR0);
+coresight_cti_reg(pidr1, CORESIGHT_PERIPHIDR1);
+coresight_cti_reg(pidr2, CORESIGHT_PERIPHIDR2);
+coresight_cti_reg(pidr3, CORESIGHT_PERIPHIDR3);
+coresight_cti_reg(pidr4, CORESIGHT_PERIPHIDR4);
+
+static struct attribute *coresight_cti_mgmt_attrs[] = {
+ &dev_attr_devaff0.attr,
+ &dev_attr_devaff1.attr,
+ &dev_attr_authstatus.attr,
+ &dev_attr_devarch.attr,
+ &dev_attr_devid.attr,
+ &dev_attr_devtype.attr,
+ &dev_attr_pidr0.attr,
+ &dev_attr_pidr1.attr,
+ &dev_attr_pidr2.attr,
+ &dev_attr_pidr3.attr,
+ &dev_attr_pidr4.attr,
+ NULL,
+};
+
+/* CTI low level programming registers */
+
+/*
+ * Show a simple 32 bit value if enabled and powered.
+ * If inaccessible & pcached_val not NULL then show cached value.
+ */
+static ssize_t cti_reg32_show(struct device *dev, char *buf,
+ u32 *pcached_val, int reg_offset)
+{
+ u32 val = 0;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ if ((reg_offset >= 0) && cti_active(config)) {
+ CS_UNLOCK(drvdata->base);
+ val = readl_relaxed(drvdata->base + reg_offset);
+ if (pcached_val)
+ *pcached_val = val;
+ CS_LOCK(drvdata->base);
+ } else if (pcached_val) {
+ val = *pcached_val;
+ }
+ spin_unlock(&drvdata->spinlock);
+ return sprintf(buf, "%#x\n", val);
+}
+
+/*
+ * Store a simple 32 bit value.
+ * If pcached_val not NULL, then copy to here too,
+ * if reg_offset >= 0 then write through if enabled.
+ */
+static ssize_t cti_reg32_store(struct device *dev, const char *buf,
+ size_t size, u32 *pcached_val, int reg_offset)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /* local store */
+ if (pcached_val)
+ *pcached_val = (u32)val;
+
+ /* write through if offset and enabled */
+ if ((reg_offset >= 0) && cti_active(config))
+ cti_write_single_reg(drvdata, reg_offset, val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+
+/* Standard macro for simple rw cti config registers */
+#define cti_config_reg32_rw(name, cfgname, offset) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ return cti_reg32_show(dev, buf, \
+ &drvdata->config.cfgname, offset); \
+} \
+ \
+static ssize_t name##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ return cti_reg32_store(dev, buf, size, \
+ &drvdata->config.cfgname, offset); \
+} \
+static DEVICE_ATTR_RW(name)
+
+static ssize_t inout_sel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = (u32)drvdata->config.ctiinout_sel;
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t inout_sel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+ if (val > (CTIINOUTEN_MAX - 1))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.ctiinout_sel = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(inout_sel);
+
+static ssize_t inen_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ int index;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ index = drvdata->config.ctiinout_sel;
+ val = drvdata->config.ctiinen[index];
+ spin_unlock(&drvdata->spinlock);
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t inen_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ int index;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ index = config->ctiinout_sel;
+ config->ctiinen[index] = val;
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIINEN(index), val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(inen);
+
+static ssize_t outen_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ int index;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ index = drvdata->config.ctiinout_sel;
+ val = drvdata->config.ctiouten[index];
+ spin_unlock(&drvdata->spinlock);
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t outen_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ int index;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ index = config->ctiinout_sel;
+ config->ctiouten[index] = val;
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIOUTEN(index), val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(outen);
+
+static ssize_t intack_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ cti_write_intack(dev, val);
+ return size;
+}
+static DEVICE_ATTR_WO(intack);
+
+cti_config_reg32_rw(gate, ctigate, CTIGATE);
+cti_config_reg32_rw(asicctl, asicctl, ASICCTL);
+cti_config_reg32_rw(appset, ctiappset, CTIAPPSET);
+
+static ssize_t appclear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+
+ /* a 1'b1 in appclr clears down the same bit in appset*/
+ config->ctiappset &= ~val;
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIAPPCLEAR, val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_WO(appclear);
+
+static ssize_t apppulse_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIAPPPULSE, val);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_WO(apppulse);
+
+coresight_cti_reg(triginstatus, CTITRIGINSTATUS);
+coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS);
+coresight_cti_reg(chinstatus, CTICHINSTATUS);
+coresight_cti_reg(choutstatus, CTICHOUTSTATUS);
+
+/*
+ * Define CONFIG_CORESIGHT_CTI_INTEGRATION_REGS to enable the access to the
+ * integration control registers. Normally only used to investigate connection
+ * data.
+ */
+#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
+
+/* macro to access RW registers with power check only (no enable check). */
+#define coresight_cti_reg_rw(name, offset) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ u32 val = 0; \
+ pm_runtime_get_sync(dev->parent); \
+ spin_lock(&drvdata->spinlock); \
+ if (drvdata->config.hw_powered) \
+ val = readl_relaxed(drvdata->base + offset); \
+ spin_unlock(&drvdata->spinlock); \
+ pm_runtime_put_sync(dev->parent); \
+ return sprintf(buf, "0x%x\n", val); \
+} \
+ \
+static ssize_t name##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ unsigned long val = 0; \
+ if (kstrtoul(buf, 0, &val)) \
+ return -EINVAL; \
+ \
+ pm_runtime_get_sync(dev->parent); \
+ spin_lock(&drvdata->spinlock); \
+ if (drvdata->config.hw_powered) \
+ cti_write_single_reg(drvdata, offset, val); \
+ spin_unlock(&drvdata->spinlock); \
+ pm_runtime_put_sync(dev->parent); \
+ return size; \
+} \
+static DEVICE_ATTR_RW(name)
+
+/* macro to access WO registers with power check only (no enable check). */
+#define coresight_cti_reg_wo(name, offset) \
+static ssize_t name##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
+ unsigned long val = 0; \
+ if (kstrtoul(buf, 0, &val)) \
+ return -EINVAL; \
+ \
+ pm_runtime_get_sync(dev->parent); \
+ spin_lock(&drvdata->spinlock); \
+ if (drvdata->config.hw_powered) \
+ cti_write_single_reg(drvdata, offset, val); \
+ spin_unlock(&drvdata->spinlock); \
+ pm_runtime_put_sync(dev->parent); \
+ return size; \
+} \
+static DEVICE_ATTR_WO(name)
+
+coresight_cti_reg_rw(itchout, ITCHOUT);
+coresight_cti_reg_rw(ittrigout, ITTRIGOUT);
+coresight_cti_reg_rw(itctrl, CORESIGHT_ITCTRL);
+coresight_cti_reg_wo(itchinack, ITCHINACK);
+coresight_cti_reg_wo(ittriginack, ITTRIGINACK);
+coresight_cti_reg(ittrigin, ITTRIGIN);
+coresight_cti_reg(itchin, ITCHIN);
+coresight_cti_reg(itchoutack, ITCHOUTACK);
+coresight_cti_reg(ittrigoutack, ITTRIGOUTACK);
+
+#endif /* CORESIGHT_CTI_INTEGRATION_REGS */
+
+static struct attribute *coresight_cti_regs_attrs[] = {
+ &dev_attr_inout_sel.attr,
+ &dev_attr_inen.attr,
+ &dev_attr_outen.attr,
+ &dev_attr_gate.attr,
+ &dev_attr_asicctl.attr,
+ &dev_attr_intack.attr,
+ &dev_attr_appset.attr,
+ &dev_attr_appclear.attr,
+ &dev_attr_apppulse.attr,
+ &dev_attr_triginstatus.attr,
+ &dev_attr_trigoutstatus.attr,
+ &dev_attr_chinstatus.attr,
+ &dev_attr_choutstatus.attr,
+#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
+ &dev_attr_itctrl.attr,
+ &dev_attr_ittrigin.attr,
+ &dev_attr_itchin.attr,
+ &dev_attr_ittrigout.attr,
+ &dev_attr_itchout.attr,
+ &dev_attr_itchoutack.attr,
+ &dev_attr_ittrigoutack.attr,
+ &dev_attr_ittriginack.attr,
+ &dev_attr_itchinack.attr,
+#endif
+ NULL,
+};
+
+/* CTI channel x-trigger programming */
+static int
+cti_trig_op_parse(struct device *dev, enum cti_chan_op op,
+ enum cti_trig_dir dir, const char *buf, size_t size)
+{
+ u32 chan_idx;
+ u32 trig_idx;
+ int items, err = -EINVAL;
+
+ /* extract chan idx and trigger idx */
+ items = sscanf(buf, "%d %d", &chan_idx, &trig_idx);
+ if (items == 2) {
+ err = cti_channel_trig_op(dev, op, dir, chan_idx, trig_idx);
+ if (!err)
+ err = size;
+ }
+ return err;
+}
+
+static ssize_t trigin_attach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return cti_trig_op_parse(dev, CTI_CHAN_ATTACH, CTI_TRIG_IN,
+ buf, size);
+}
+static DEVICE_ATTR_WO(trigin_attach);
+
+static ssize_t trigin_detach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return cti_trig_op_parse(dev, CTI_CHAN_DETACH, CTI_TRIG_IN,
+ buf, size);
+}
+static DEVICE_ATTR_WO(trigin_detach);
+
+static ssize_t trigout_attach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return cti_trig_op_parse(dev, CTI_CHAN_ATTACH, CTI_TRIG_OUT,
+ buf, size);
+}
+static DEVICE_ATTR_WO(trigout_attach);
+
+static ssize_t trigout_detach_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return cti_trig_op_parse(dev, CTI_CHAN_DETACH, CTI_TRIG_OUT,
+ buf, size);
+}
+static DEVICE_ATTR_WO(trigout_detach);
+
+
+static ssize_t chan_gate_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = 0, channel = 0;
+
+ if (kstrtoint(buf, 0, &channel))
+ return -EINVAL;
+
+ err = cti_channel_gate_op(dev, CTI_GATE_CHAN_ENABLE, channel);
+ return err ? err : size;
+}
+
+static ssize_t chan_gate_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ unsigned long ctigate_bitmask = cfg->ctigate;
+ int size = 0;
+
+ if (cfg->ctigate == 0)
+ size = sprintf(buf, "\n");
+ else
+ size = bitmap_print_to_pagebuf(true, buf, &ctigate_bitmask,
+ cfg->nr_ctm_channels);
+ return size;
+}
+static DEVICE_ATTR_RW(chan_gate_enable);
+
+static ssize_t chan_gate_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = 0, channel = 0;
+
+ if (kstrtoint(buf, 0, &channel))
+ return -EINVAL;
+
+ err = cti_channel_gate_op(dev, CTI_GATE_CHAN_DISABLE, channel);
+ return err ? err : size;
+}
+static DEVICE_ATTR_WO(chan_gate_disable);
+
+static int
+chan_op_parse(struct device *dev, enum cti_chan_set_op op, const char *buf)
+{
+ int err = 0, channel = 0;
+
+ if (kstrtoint(buf, 0, &channel))
+ return -EINVAL;
+
+ err = cti_channel_setop(dev, op, channel);
+ return err;
+
+}
+
+static ssize_t chan_set_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = chan_op_parse(dev, CTI_CHAN_SET, buf);
+
+ return err ? err : size;
+}
+static DEVICE_ATTR_WO(chan_set);
+
+static ssize_t chan_clear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = chan_op_parse(dev, CTI_CHAN_CLR, buf);
+
+ return err ? err : size;
+}
+static DEVICE_ATTR_WO(chan_clear);
+
+static ssize_t chan_pulse_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err = chan_op_parse(dev, CTI_CHAN_PULSE, buf);
+
+ return err ? err : size;
+}
+static DEVICE_ATTR_WO(chan_pulse);
+
+static ssize_t trig_filter_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->config.trig_filter_enable;
+ spin_unlock(&drvdata->spinlock);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t trig_filter_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.trig_filter_enable = !!val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(trig_filter_enable);
+
+static ssize_t trigout_filtered_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ int size = 0, nr_trig_max = cfg->nr_trig_max;
+ unsigned long mask = cfg->trig_out_filter;
+
+ if (mask)
+ size = bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
+ return size;
+}
+static DEVICE_ATTR_RO(trigout_filtered);
+
+/* clear all xtrigger / channel programming */
+static ssize_t chan_xtrigs_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int i;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+
+ /* clear the CTI trigger / channel programming registers */
+ for (i = 0; i < config->nr_trig_max; i++) {
+ config->ctiinen[i] = 0;
+ config->ctiouten[i] = 0;
+ }
+
+ /* clear the other regs */
+ config->ctigate = GENMASK(config->nr_ctm_channels - 1, 0);
+ config->asicctl = 0;
+ config->ctiappset = 0;
+ config->ctiinout_sel = 0;
+ config->xtrig_rchan_sel = 0;
+
+ /* if enabled then write through */
+ if (cti_active(config))
+ cti_write_all_hw_regs(drvdata);
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_WO(chan_xtrigs_reset);
+
+/*
+ * Write to select a channel to view, read to display the
+ * cross triggers for the selected channel.
+ */
+static ssize_t chan_xtrigs_sel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+ if (val > (drvdata->config.nr_ctm_channels - 1))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.xtrig_rchan_sel = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+
+static ssize_t chan_xtrigs_sel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ spin_lock(&drvdata->spinlock);
+ val = drvdata->config.xtrig_rchan_sel;
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%ld\n", val);
+}
+static DEVICE_ATTR_RW(chan_xtrigs_sel);
+
+static ssize_t chan_xtrigs_in_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ int used = 0, reg_idx;
+ int nr_trig_max = drvdata->config.nr_trig_max;
+ u32 chan_mask = BIT(cfg->xtrig_rchan_sel);
+
+ for (reg_idx = 0; reg_idx < nr_trig_max; reg_idx++) {
+ if (chan_mask & cfg->ctiinen[reg_idx])
+ used += sprintf(buf + used, "%d ", reg_idx);
+ }
+
+ used += sprintf(buf + used, "\n");
+ return used;
+}
+static DEVICE_ATTR_RO(chan_xtrigs_in);
+
+static ssize_t chan_xtrigs_out_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ int used = 0, reg_idx;
+ int nr_trig_max = drvdata->config.nr_trig_max;
+ u32 chan_mask = BIT(cfg->xtrig_rchan_sel);
+
+ for (reg_idx = 0; reg_idx < nr_trig_max; reg_idx++) {
+ if (chan_mask & cfg->ctiouten[reg_idx])
+ used += sprintf(buf + used, "%d ", reg_idx);
+ }
+
+ used += sprintf(buf + used, "\n");
+ return used;
+}
+static DEVICE_ATTR_RO(chan_xtrigs_out);
+
+static ssize_t print_chan_list(struct device *dev,
+ char *buf, bool inuse)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+ int size, i;
+ unsigned long inuse_bits = 0, chan_mask;
+
+ /* scan regs to get bitmap of channels in use. */
+ spin_lock(&drvdata->spinlock);
+ for (i = 0; i < config->nr_trig_max; i++) {
+ inuse_bits |= config->ctiinen[i];
+ inuse_bits |= config->ctiouten[i];
+ }
+ spin_unlock(&drvdata->spinlock);
+
+ /* inverse bits if printing free channels */
+ if (!inuse)
+ inuse_bits = ~inuse_bits;
+
+ /* list of channels, or 'none' */
+ chan_mask = GENMASK(config->nr_ctm_channels - 1, 0);
+ if (inuse_bits & chan_mask)
+ size = bitmap_print_to_pagebuf(true, buf, &inuse_bits,
+ config->nr_ctm_channels);
+ else
+ size = sprintf(buf, "\n");
+ return size;
+}
+
+static ssize_t chan_inuse_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return print_chan_list(dev, buf, true);
+}
+static DEVICE_ATTR_RO(chan_inuse);
+
+static ssize_t chan_free_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return print_chan_list(dev, buf, false);
+}
+static DEVICE_ATTR_RO(chan_free);
+
+static struct attribute *coresight_cti_channel_attrs[] = {
+ &dev_attr_trigin_attach.attr,
+ &dev_attr_trigin_detach.attr,
+ &dev_attr_trigout_attach.attr,
+ &dev_attr_trigout_detach.attr,
+ &dev_attr_trig_filter_enable.attr,
+ &dev_attr_trigout_filtered.attr,
+ &dev_attr_chan_gate_enable.attr,
+ &dev_attr_chan_gate_disable.attr,
+ &dev_attr_chan_set.attr,
+ &dev_attr_chan_clear.attr,
+ &dev_attr_chan_pulse.attr,
+ &dev_attr_chan_inuse.attr,
+ &dev_attr_chan_free.attr,
+ &dev_attr_chan_xtrigs_sel.attr,
+ &dev_attr_chan_xtrigs_in.attr,
+ &dev_attr_chan_xtrigs_out.attr,
+ &dev_attr_chan_xtrigs_reset.attr,
+ NULL,
+};
+
+/* Create the connections trigger groups and attrs dynamically */
+/*
+ * Each connection has dynamic group triggers<N> + name, trigin/out sigs/types
+ * attributes, + each device has static nr_trigger_cons giving the number
+ * of groups. e.g. in sysfs:-
+ * /cti_<name>/triggers0
+ * /cti_<name>/triggers1
+ * /cti_<name>/nr_trigger_cons
+ * where nr_trigger_cons = 2
+ */
+static ssize_t con_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+
+ return sprintf(buf, "%s\n", con->con_dev_name);
+}
+
+static ssize_t trigin_sig_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ unsigned long mask = con->con_in->used_mask;
+
+ return bitmap_print_to_pagebuf(true, buf, &mask, cfg->nr_trig_max);
+}
+
+static ssize_t trigout_sig_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *cfg = &drvdata->config;
+ unsigned long mask = con->con_out->used_mask;
+
+ return bitmap_print_to_pagebuf(true, buf, &mask, cfg->nr_trig_max);
+}
+
+/* convert a sig type id to a name */
+static const char *
+cti_sig_type_name(struct cti_trig_con *con, int used_count, bool in)
+{
+ int idx = 0;
+ struct cti_trig_grp *grp = in ? con->con_in : con->con_out;
+
+ if (used_count < grp->nr_sigs)
+ idx = grp->sig_types[used_count];
+ return sig_type_names[idx];
+}
+
+static ssize_t trigin_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+ int sig_idx, used = 0;
+ const char *name;
+
+ for (sig_idx = 0; sig_idx < con->con_in->nr_sigs; sig_idx++) {
+ name = cti_sig_type_name(con, sig_idx, true);
+ used += sprintf(buf + used, "%s ", name);
+ }
+ used += sprintf(buf + used, "\n");
+ return used;
+}
+
+static ssize_t trigout_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ext_attr =
+ container_of(attr, struct dev_ext_attribute, attr);
+ struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
+ int sig_idx, used = 0;
+ const char *name;
+
+ for (sig_idx = 0; sig_idx < con->con_out->nr_sigs; sig_idx++) {
+ name = cti_sig_type_name(con, sig_idx, false);
+ used += sprintf(buf + used, "%s ", name);
+ }
+ used += sprintf(buf + used, "\n");
+ return used;
+}
+
+/*
+ * Array of show function names declared above to allow selection
+ * for the connection attributes
+ */
+static p_show_fn show_fns[CTI_CON_ATTR_MAX] = {
+ con_name_show,
+ trigin_sig_show,
+ trigout_sig_show,
+ trigin_type_show,
+ trigout_type_show,
+};
+
+static int cti_create_con_sysfs_attr(struct device *dev,
+ struct cti_trig_con *con,
+ enum cti_conn_attr_type attr_type,
+ int attr_idx)
+{
+ struct dev_ext_attribute *eattr = 0;
+ char *name = 0;
+
+ eattr = devm_kzalloc(dev, sizeof(struct dev_ext_attribute),
+ GFP_KERNEL);
+ if (eattr) {
+ name = devm_kstrdup(dev, con_attr_names[attr_type],
+ GFP_KERNEL);
+ if (name) {
+ /* fill out the underlying attribute struct */
+ eattr->attr.attr.name = name;
+ eattr->attr.attr.mode = 0444;
+
+ /* now the device_attribute struct */
+ eattr->attr.show = show_fns[attr_type];
+ } else {
+ return -ENOMEM;
+ }
+ } else {
+ return -ENOMEM;
+ }
+ eattr->var = con;
+ con->con_attrs[attr_idx] = &eattr->attr.attr;
+ return 0;
+}
+
+static struct attribute_group *
+cti_create_con_sysfs_group(struct device *dev, struct cti_device *ctidev,
+ int con_idx, struct cti_trig_con *tc)
+{
+ struct attribute_group *group = NULL;
+ int grp_idx;
+
+ group = devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
+ if (!group)
+ return NULL;
+
+ group->name = devm_kasprintf(dev, GFP_KERNEL, "triggers%d", con_idx);
+ if (!group->name)
+ return NULL;
+
+ grp_idx = con_idx + CORESIGHT_CTI_STATIC_GROUPS_MAX - 1;
+ ctidev->con_groups[grp_idx] = group;
+ tc->attr_group = group;
+ return group;
+}
+
+/* create a triggers connection group and the attributes for that group */
+static int cti_create_con_attr_set(struct device *dev, int con_idx,
+ struct cti_device *ctidev,
+ struct cti_trig_con *tc)
+{
+ struct attribute_group *attr_group = NULL;
+ int attr_idx = 0;
+ int err = -ENOMEM;
+
+ attr_group = cti_create_con_sysfs_group(dev, ctidev, con_idx, tc);
+ if (!attr_group)
+ return -ENOMEM;
+
+ /* allocate NULL terminated array of attributes */
+ tc->con_attrs = devm_kcalloc(dev, CTI_CON_ATTR_MAX + 1,
+ sizeof(struct attribute *), GFP_KERNEL);
+ if (!tc->con_attrs)
+ return -ENOMEM;
+
+ err = cti_create_con_sysfs_attr(dev, tc, CTI_CON_ATTR_NAME,
+ attr_idx++);
+ if (err)
+ return err;
+
+ if (tc->con_in->nr_sigs > 0) {
+ err = cti_create_con_sysfs_attr(dev, tc,
+ CTI_CON_ATTR_TRIGIN_SIG,
+ attr_idx++);
+ if (err)
+ return err;
+
+ err = cti_create_con_sysfs_attr(dev, tc,
+ CTI_CON_ATTR_TRIGIN_TYPES,
+ attr_idx++);
+ if (err)
+ return err;
+ }
+
+ if (tc->con_out->nr_sigs > 0) {
+ err = cti_create_con_sysfs_attr(dev, tc,
+ CTI_CON_ATTR_TRIGOUT_SIG,
+ attr_idx++);
+ if (err)
+ return err;
+
+ err = cti_create_con_sysfs_attr(dev, tc,
+ CTI_CON_ATTR_TRIGOUT_TYPES,
+ attr_idx++);
+ if (err)
+ return err;
+ }
+ attr_group->attrs = tc->con_attrs;
+ return 0;
+}
+
+/* create the array of group pointers for the CTI sysfs groups */
+int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
+{
+ int nr_groups;
+
+ /* nr groups = dynamic + static + NULL terminator */
+ nr_groups = ctidev->nr_trig_con + CORESIGHT_CTI_STATIC_GROUPS_MAX;
+ ctidev->con_groups = devm_kcalloc(dev, nr_groups,
+ sizeof(struct attribute_group *),
+ GFP_KERNEL);
+ if (!ctidev->con_groups)
+ return -ENOMEM;
+ return 0;
+}
+
+int cti_create_cons_sysfs(struct device *dev, struct cti_drvdata *drvdata)
+{
+ struct cti_device *ctidev = &drvdata->ctidev;
+ int err = 0, con_idx = 0, i;
+ struct cti_trig_con *tc = NULL;
+
+ err = cti_create_cons_groups(dev, ctidev);
+ if (err)
+ return err;
+
+ /* populate first locations with the static set of groups */
+ for (i = 0; i < (CORESIGHT_CTI_STATIC_GROUPS_MAX - 1); i++)
+ ctidev->con_groups[i] = coresight_cti_groups[i];
+
+ /* add dynamic set for each connection */
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ err = cti_create_con_attr_set(dev, con_idx++, ctidev, tc);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* attribute and group sysfs tables. */
+static const struct attribute_group coresight_cti_group = {
+ .attrs = coresight_cti_attrs,
+};
+
+static const struct attribute_group coresight_cti_mgmt_group = {
+ .attrs = coresight_cti_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group coresight_cti_regs_group = {
+ .attrs = coresight_cti_regs_attrs,
+ .name = "regs",
+};
+
+static const struct attribute_group coresight_cti_channels_group = {
+ .attrs = coresight_cti_channel_attrs,
+ .name = "channels",
+};
+
+const struct attribute_group *
+coresight_cti_groups[CORESIGHT_CTI_STATIC_GROUPS_MAX] = {
+ &coresight_cti_group,
+ &coresight_cti_mgmt_group,
+ &coresight_cti_regs_group,
+ &coresight_cti_channels_group,
+ NULL,
+};
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
new file mode 100644
index 000000000000..aa6e0249bd70
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#include <linux/property.h>
+#include "coresight-cti.h"
+
+/**
+ * CTI devices can be associated with a PE, or be connected to CoreSight
+ * hardware. We have a list of all CTIs irrespective of CPU bound or
+ * otherwise.
+ *
+ * We assume that the non-CPU CTIs are always powered as we do with sinks etc.
+ *
+ * We leave the client to figure out if all the CTIs are interconnected with
+ * the same CTM, in general this is the case but does not always have to be.
+ */
+
+/* net of CTI devices connected via CTM */
+LIST_HEAD(ect_net);
+
+/* protect the list */
+static DEFINE_MUTEX(ect_mutex);
+
+#define csdev_to_cti_drvdata(csdev) \
+ dev_get_drvdata(csdev->dev.parent)
+
+/*
+ * CTI naming. CTI bound to cores will have the name cti_cpu<N> where
+ * N is the CPU ID. System CTIs will have the name cti_sys<I> where I
+ * is an index allocated by order of discovery.
+ *
+ * CTI device name list - for CTI not bound to cores.
+ */
+DEFINE_CORESIGHT_DEVLIST(cti_sys_devs, "cti_sys");
+
+/* write set of regs to hardware - call with spinlock claimed */
+void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ int i;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* disable CTI before writing registers */
+ writel_relaxed(0, drvdata->base + CTICONTROL);
+
+ /* write the CTI trigger registers */
+ for (i = 0; i < config->nr_trig_max; i++) {
+ writel_relaxed(config->ctiinen[i], drvdata->base + CTIINEN(i));
+ writel_relaxed(config->ctiouten[i],
+ drvdata->base + CTIOUTEN(i));
+ }
+
+ /* other regs */
+ writel_relaxed(config->ctigate, drvdata->base + CTIGATE);
+ writel_relaxed(config->asicctl, drvdata->base + ASICCTL);
+ writel_relaxed(config->ctiappset, drvdata->base + CTIAPPSET);
+
+ /* re-enable CTI */
+ writel_relaxed(1, drvdata->base + CTICONTROL);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void cti_enable_hw_smp_call(void *info)
+{
+ struct cti_drvdata *drvdata = info;
+
+ cti_write_all_hw_regs(drvdata);
+}
+
+/* write regs to hardware and enable */
+static int cti_enable_hw(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ struct device *dev = &drvdata->csdev->dev;
+ int rc = 0;
+
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+
+ /* no need to do anything if enabled or unpowered*/
+ if (config->hw_enabled || !config->hw_powered)
+ goto cti_state_unchanged;
+
+ /* claim the device */
+ rc = coresight_claim_device(drvdata->base);
+ if (rc)
+ goto cti_err_not_enabled;
+
+ if (drvdata->ctidev.cpu >= 0) {
+ rc = smp_call_function_single(drvdata->ctidev.cpu,
+ cti_enable_hw_smp_call,
+ drvdata, 1);
+ if (rc)
+ goto cti_err_not_enabled;
+ } else {
+ cti_write_all_hw_regs(drvdata);
+ }
+
+ config->hw_enabled = true;
+ atomic_inc(&drvdata->config.enable_req_count);
+ spin_unlock(&drvdata->spinlock);
+ return rc;
+
+cti_state_unchanged:
+ atomic_inc(&drvdata->config.enable_req_count);
+
+ /* cannot enable due to error */
+cti_err_not_enabled:
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put(dev->parent);
+ return rc;
+}
+
+/* disable hardware */
+static int cti_disable_hw(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ struct device *dev = &drvdata->csdev->dev;
+
+ spin_lock(&drvdata->spinlock);
+
+ /* check refcount - disable on 0 */
+ if (atomic_dec_return(&drvdata->config.enable_req_count) > 0)
+ goto cti_not_disabled;
+
+ /* no need to do anything if disabled or cpu unpowered */
+ if (!config->hw_enabled || !config->hw_powered)
+ goto cti_not_disabled;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* disable CTI */
+ writel_relaxed(0, drvdata->base + CTICONTROL);
+ config->hw_enabled = false;
+
+ coresight_disclaim_device_unlocked(drvdata->base);
+ CS_LOCK(drvdata->base);
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put(dev);
+ return 0;
+
+ /* not disabled this call */
+cti_not_disabled:
+ spin_unlock(&drvdata->spinlock);
+ return 0;
+}
+
+void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value)
+{
+ CS_UNLOCK(drvdata->base);
+ writel_relaxed(value, drvdata->base + offset);
+ CS_LOCK(drvdata->base);
+}
+
+void cti_write_intack(struct device *dev, u32 ackval)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ /* write if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIINTACK, ackval);
+ spin_unlock(&drvdata->spinlock);
+}
+
+/*
+ * Look at the HW DEVID register for some of the HW settings.
+ * DEVID[15:8] - max number of in / out triggers.
+ */
+#define CTI_DEVID_MAXTRIGS(devid_val) ((int) BMVAL(devid_val, 8, 15))
+
+/* DEVID[19:16] - number of CTM channels */
+#define CTI_DEVID_CTMCHANNELS(devid_val) ((int) BMVAL(devid_val, 16, 19))
+
+static void cti_set_default_config(struct device *dev,
+ struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ u32 devid;
+
+ devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+ config->nr_trig_max = CTI_DEVID_MAXTRIGS(devid);
+
+ /*
+ * no current hardware should exceed this, but protect the driver
+ * in case of fault / out of spec hw
+ */
+ if (config->nr_trig_max > CTIINOUTEN_MAX) {
+ dev_warn_once(dev,
+ "Limiting HW MaxTrig value(%d) to driver max(%d)\n",
+ config->nr_trig_max, CTIINOUTEN_MAX);
+ config->nr_trig_max = CTIINOUTEN_MAX;
+ }
+
+ config->nr_ctm_channels = CTI_DEVID_CTMCHANNELS(devid);
+
+ /* Most regs default to 0 as zalloc'ed except...*/
+ config->trig_filter_enable = true;
+ config->ctigate = GENMASK(config->nr_ctm_channels - 1, 0);
+ atomic_set(&config->enable_req_count, 0);
+}
+
+/*
+ * Add a connection entry to the list of connections for this
+ * CTI device.
+ */
+int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata,
+ struct cti_trig_con *tc,
+ struct coresight_device *csdev,
+ const char *assoc_dev_name)
+{
+ struct cti_device *cti_dev = &drvdata->ctidev;
+
+ tc->con_dev = csdev;
+ /*
+ * Prefer actual associated CS device dev name to supplied value -
+ * which is likely to be node name / other conn name.
+ */
+ if (csdev)
+ tc->con_dev_name = dev_name(&csdev->dev);
+ else if (assoc_dev_name != NULL) {
+ tc->con_dev_name = devm_kstrdup(dev,
+ assoc_dev_name, GFP_KERNEL);
+ if (!tc->con_dev_name)
+ return -ENOMEM;
+ }
+ list_add_tail(&tc->node, &cti_dev->trig_cons);
+ cti_dev->nr_trig_con++;
+
+ /* add connection usage bit info to overall info */
+ drvdata->config.trig_in_use |= tc->con_in->used_mask;
+ drvdata->config.trig_out_use |= tc->con_out->used_mask;
+
+ return 0;
+}
+
+/* create a trigger connection with appropriately sized signal groups */
+struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs,
+ int out_sigs)
+{
+ struct cti_trig_con *tc = NULL;
+ struct cti_trig_grp *in = NULL, *out = NULL;
+
+ tc = devm_kzalloc(dev, sizeof(struct cti_trig_con), GFP_KERNEL);
+ if (!tc)
+ return tc;
+
+ in = devm_kzalloc(dev,
+ offsetof(struct cti_trig_grp, sig_types[in_sigs]),
+ GFP_KERNEL);
+ if (!in)
+ return NULL;
+
+ out = devm_kzalloc(dev,
+ offsetof(struct cti_trig_grp, sig_types[out_sigs]),
+ GFP_KERNEL);
+ if (!out)
+ return NULL;
+
+ tc->con_in = in;
+ tc->con_out = out;
+ tc->con_in->nr_sigs = in_sigs;
+ tc->con_out->nr_sigs = out_sigs;
+ return tc;
+}
+
+/*
+ * Add a default connection if nothing else is specified.
+ * single connection based on max in/out info, no assoc device
+ */
+int cti_add_default_connection(struct device *dev, struct cti_drvdata *drvdata)
+{
+ int ret = 0;
+ int n_trigs = drvdata->config.nr_trig_max;
+ u32 n_trig_mask = GENMASK(n_trigs - 1, 0);
+ struct cti_trig_con *tc = NULL;
+
+ /*
+ * Assume max trigs for in and out,
+ * all used, default sig types allocated
+ */
+ tc = cti_allocate_trig_con(dev, n_trigs, n_trigs);
+ if (!tc)
+ return -ENOMEM;
+
+ tc->con_in->used_mask = n_trig_mask;
+ tc->con_out->used_mask = n_trig_mask;
+ ret = cti_add_connection_entry(dev, drvdata, tc, NULL, "default");
+ return ret;
+}
+
+/** cti channel api **/
+/* attach/detach channel from trigger - write through if enabled. */
+int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
+ enum cti_trig_dir direction, u32 channel_idx,
+ u32 trigger_idx)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+ u32 trig_bitmask;
+ u32 chan_bitmask;
+ u32 reg_value;
+ int reg_offset;
+
+ /* ensure indexes in range */
+ if ((channel_idx >= config->nr_ctm_channels) ||
+ (trigger_idx >= config->nr_trig_max))
+ return -EINVAL;
+
+ trig_bitmask = BIT(trigger_idx);
+
+ /* ensure registered triggers and not out filtered */
+ if (direction == CTI_TRIG_IN) {
+ if (!(trig_bitmask & config->trig_in_use))
+ return -EINVAL;
+ } else {
+ if (!(trig_bitmask & config->trig_out_use))
+ return -EINVAL;
+
+ if ((config->trig_filter_enable) &&
+ (config->trig_out_filter & trig_bitmask))
+ return -EINVAL;
+ }
+
+ /* update the local register values */
+ chan_bitmask = BIT(channel_idx);
+ reg_offset = (direction == CTI_TRIG_IN ? CTIINEN(trigger_idx) :
+ CTIOUTEN(trigger_idx));
+
+ spin_lock(&drvdata->spinlock);
+
+ /* read - modify write - the trigger / channel enable value */
+ reg_value = direction == CTI_TRIG_IN ? config->ctiinen[trigger_idx] :
+ config->ctiouten[trigger_idx];
+ if (op == CTI_CHAN_ATTACH)
+ reg_value |= chan_bitmask;
+ else
+ reg_value &= ~chan_bitmask;
+
+ /* write local copy */
+ if (direction == CTI_TRIG_IN)
+ config->ctiinen[trigger_idx] = reg_value;
+ else
+ config->ctiouten[trigger_idx] = reg_value;
+
+ /* write through if enabled */
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, reg_offset, reg_value);
+ spin_unlock(&drvdata->spinlock);
+ return 0;
+}
+
+int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op,
+ u32 channel_idx)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+ u32 chan_bitmask;
+ u32 reg_value;
+ int err = 0;
+
+ if (channel_idx >= config->nr_ctm_channels)
+ return -EINVAL;
+
+ chan_bitmask = BIT(channel_idx);
+
+ spin_lock(&drvdata->spinlock);
+ reg_value = config->ctigate;
+ switch (op) {
+ case CTI_GATE_CHAN_ENABLE:
+ reg_value |= chan_bitmask;
+ break;
+
+ case CTI_GATE_CHAN_DISABLE:
+ reg_value &= ~chan_bitmask;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if (err == 0) {
+ config->ctigate = reg_value;
+ if (cti_active(config))
+ cti_write_single_reg(drvdata, CTIGATE, reg_value);
+ }
+ spin_unlock(&drvdata->spinlock);
+ return err;
+}
+
+int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
+ u32 channel_idx)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_config *config = &drvdata->config;
+ u32 chan_bitmask;
+ u32 reg_value;
+ u32 reg_offset;
+ int err = 0;
+
+ if (channel_idx >= config->nr_ctm_channels)
+ return -EINVAL;
+
+ chan_bitmask = BIT(channel_idx);
+
+ spin_lock(&drvdata->spinlock);
+ reg_value = config->ctiappset;
+ switch (op) {
+ case CTI_CHAN_SET:
+ config->ctiappset |= chan_bitmask;
+ reg_value = config->ctiappset;
+ reg_offset = CTIAPPSET;
+ break;
+
+ case CTI_CHAN_CLR:
+ config->ctiappset &= ~chan_bitmask;
+ reg_value = chan_bitmask;
+ reg_offset = CTIAPPCLEAR;
+ break;
+
+ case CTI_CHAN_PULSE:
+ config->ctiappset &= ~chan_bitmask;
+ reg_value = chan_bitmask;
+ reg_offset = CTIAPPPULSE;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if ((err == 0) && cti_active(config))
+ cti_write_single_reg(drvdata, reg_offset, reg_value);
+ spin_unlock(&drvdata->spinlock);
+
+ return err;
+}
+
+/*
+ * Look for a matching connection device name in the list of connections.
+ * If found then swap in the csdev name, set trig con association pointer
+ * and return found.
+ */
+static bool
+cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
+ struct coresight_device *csdev)
+{
+ struct cti_trig_con *tc;
+
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev_name) {
+ if (!strcmp(node_name, tc->con_dev_name)) {
+ /* match: so swap in csdev name & dev */
+ tc->con_dev_name = dev_name(&csdev->dev);
+ tc->con_dev = csdev;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/*
+ * Search the cti list to add an associated CTI into the supplied CS device
+ * This will set the association if CTI declared before the CS device.
+ * (called from coresight_register() with coresight_mutex locked).
+ */
+void cti_add_assoc_to_csdev(struct coresight_device *csdev)
+{
+ struct cti_drvdata *ect_item;
+ struct cti_device *ctidev;
+ const char *node_name = NULL;
+
+ /* protect the list */
+ mutex_lock(&ect_mutex);
+
+ /* exit if current is an ECT device.*/
+ if ((csdev->type == CORESIGHT_DEV_TYPE_ECT) || list_empty(&ect_net))
+ goto cti_add_done;
+
+ /* if we didn't find the csdev previously we used the fwnode name */
+ node_name = cti_plat_get_node_name(dev_fwnode(csdev->dev.parent));
+ if (!node_name)
+ goto cti_add_done;
+
+ /* for each CTI in list... */
+ list_for_each_entry(ect_item, &ect_net, node) {
+ ctidev = &ect_item->ctidev;
+ if (cti_match_fixup_csdev(ctidev, node_name, csdev)) {
+ /*
+ * if we found a matching csdev then update the ECT
+ * association pointer for the device with this CTI.
+ */
+ csdev->ect_dev = ect_item->csdev;
+ break;
+ }
+ }
+cti_add_done:
+ mutex_unlock(&ect_mutex);
+}
+EXPORT_SYMBOL_GPL(cti_add_assoc_to_csdev);
+
+/*
+ * Removing the associated devices is easier.
+ * A CTI will not have a value for csdev->ect_dev.
+ */
+void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
+{
+ struct cti_drvdata *ctidrv;
+ struct cti_trig_con *tc;
+ struct cti_device *ctidev;
+
+ mutex_lock(&ect_mutex);
+ if (csdev->ect_dev) {
+ ctidrv = csdev_to_cti_drvdata(csdev->ect_dev);
+ ctidev = &ctidrv->ctidev;
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev == csdev->ect_dev) {
+ tc->con_dev = NULL;
+ break;
+ }
+ }
+ csdev->ect_dev = NULL;
+ }
+ mutex_unlock(&ect_mutex);
+}
+EXPORT_SYMBOL_GPL(cti_remove_assoc_from_csdev);
+
+/*
+ * Update the cross references where the associated device was found
+ * while we were building the connection info. This will occur if the
+ * assoc device was registered before the CTI.
+ */
+static void cti_update_conn_xrefs(struct cti_drvdata *drvdata)
+{
+ struct cti_trig_con *tc;
+ struct cti_device *ctidev = &drvdata->ctidev;
+
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev)
+ /* set tc->con_dev->ect_dev */
+ coresight_set_assoc_ectdev_mutex(tc->con_dev,
+ drvdata->csdev);
+ }
+}
+
+static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
+{
+ struct cti_trig_con *tc;
+ struct cti_device *ctidev = &drvdata->ctidev;
+
+ list_for_each_entry(tc, &ctidev->trig_cons, node) {
+ if (tc->con_dev) {
+ coresight_set_assoc_ectdev_mutex(tc->con_dev,
+ NULL);
+ }
+ }
+}
+
+/** cti ect operations **/
+int cti_enable(struct coresight_device *csdev)
+{
+ struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
+
+ return cti_enable_hw(drvdata);
+}
+
+int cti_disable(struct coresight_device *csdev)
+{
+ struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
+
+ return cti_disable_hw(drvdata);
+}
+
+const struct coresight_ops_ect cti_ops_ect = {
+ .enable = cti_enable,
+ .disable = cti_disable,
+};
+
+const struct coresight_ops cti_ops = {
+ .ect_ops = &cti_ops_ect,
+};
+
+/*
+ * Free up CTI specific resources
+ * called by dev->release, need to call down to underlying csdev release.
+ */
+static void cti_device_release(struct device *dev)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cti_drvdata *ect_item, *ect_tmp;
+
+ mutex_lock(&ect_mutex);
+ cti_remove_conn_xrefs(drvdata);
+
+ /* remove from the list */
+ list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) {
+ if (ect_item == drvdata) {
+ list_del(&ect_item->node);
+ break;
+ }
+ }
+ mutex_unlock(&ect_mutex);
+
+ if (drvdata->csdev_release)
+ drvdata->csdev_release(dev);
+}
+
+static int cti_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret = 0;
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct cti_drvdata *drvdata = NULL;
+ struct coresight_desc cti_desc;
+ struct coresight_platform_data *pdata = NULL;
+ struct resource *res = &adev->res;
+
+ /* driver data*/
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ dev_info(dev, "%s, mem err\n", __func__);
+ goto err_out;
+ }
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ dev_err(dev, "%s, remap err\n", __func__);
+ goto err_out;
+ }
+ drvdata->base = base;
+
+ dev_set_drvdata(dev, drvdata);
+
+ /* default CTI device info */
+ drvdata->ctidev.cpu = -1;
+ drvdata->ctidev.nr_trig_con = 0;
+ drvdata->ctidev.ctm_id = 0;
+ INIT_LIST_HEAD(&drvdata->ctidev.trig_cons);
+
+ spin_lock_init(&drvdata->spinlock);
+
+ /* initialise CTI driver config values */
+ cti_set_default_config(dev, drvdata);
+
+ pdata = coresight_cti_get_platform_data(dev);
+ if (IS_ERR(pdata)) {
+ dev_err(dev, "coresight_cti_get_platform_data err\n");
+ ret = PTR_ERR(pdata);
+ goto err_out;
+ }
+
+ /* default to powered - could change on PM notifications */
+ drvdata->config.hw_powered = true;
+
+ /* set up device name - will depend if cpu bound or otherwise */
+ if (drvdata->ctidev.cpu >= 0)
+ cti_desc.name = devm_kasprintf(dev, GFP_KERNEL, "cti_cpu%d",
+ drvdata->ctidev.cpu);
+ else
+ cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev);
+ if (!cti_desc.name) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* create dynamic attributes for connections */
+ ret = cti_create_cons_sysfs(dev, drvdata);
+ if (ret) {
+ dev_err(dev, "%s: create dynamic sysfs entries failed\n",
+ cti_desc.name);
+ goto err_out;
+ }
+
+ /* set up coresight component description */
+ cti_desc.pdata = pdata;
+ cti_desc.type = CORESIGHT_DEV_TYPE_ECT;
+ cti_desc.subtype.ect_subtype = CORESIGHT_DEV_SUBTYPE_ECT_CTI;
+ cti_desc.ops = &cti_ops;
+ cti_desc.groups = drvdata->ctidev.con_groups;
+ cti_desc.dev = dev;
+ drvdata->csdev = coresight_register(&cti_desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto err_out;
+ }
+
+ /* add to list of CTI devices */
+ mutex_lock(&ect_mutex);
+ list_add(&drvdata->node, &ect_net);
+ /* set any cross references */
+ cti_update_conn_xrefs(drvdata);
+ mutex_unlock(&ect_mutex);
+
+ /* set up release chain */
+ drvdata->csdev_release = drvdata->csdev->dev.release;
+ drvdata->csdev->dev.release = cti_device_release;
+
+ /* all done - dec pm refcount */
+ pm_runtime_put(&adev->dev);
+ dev_info(&drvdata->csdev->dev, "CTI initialized\n");
+ return 0;
+
+err_out:
+ return ret;
+}
+
+static struct amba_cs_uci_id uci_id_cti[] = {
+ {
+ /* CTI UCI data */
+ .devarch = 0x47701a14, /* CTI v2 */
+ .devarch_mask = 0xfff0ffff,
+ .devtype = 0x00000014, /* maj(0x4-debug) min(0x1-ECT) */
+ }
+};
+
+static const struct amba_id cti_ids[] = {
+ CS_AMBA_ID(0x000bb906), /* Coresight CTI (SoC 400), C-A72, C-A57 */
+ CS_AMBA_ID(0x000bb922), /* CTI - C-A8 */
+ CS_AMBA_ID(0x000bb9a8), /* CTI - C-A53 */
+ CS_AMBA_ID(0x000bb9aa), /* CTI - C-A73 */
+ CS_AMBA_UCI_ID(0x000bb9da, uci_id_cti), /* CTI - C-A35 */
+ CS_AMBA_UCI_ID(0x000bb9ed, uci_id_cti), /* Coresight CTI (SoC 600) */
+ { 0, 0},
+};
+
+static struct amba_driver cti_driver = {
+ .drv = {
+ .name = "coresight-cti",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+ .probe = cti_probe,
+ .id_table = cti_ids,
+};
+builtin_amba_driver(cti_driver);
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
new file mode 100644
index 000000000000..004df3ab9dd0
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#ifndef _CORESIGHT_CORESIGHT_CTI_H
+#define _CORESIGHT_CORESIGHT_CTI_H
+
+#include <asm/local.h>
+#include <linux/spinlock.h>
+#include "coresight-priv.h"
+
+/*
+ * Device registers
+ * 0x000 - 0x144: CTI programming and status
+ * 0xEDC - 0xEF8: CTI integration test.
+ * 0xF00 - 0xFFC: Coresight management registers.
+ */
+/* CTI programming registers */
+#define CTICONTROL 0x000
+#define CTIINTACK 0x010
+#define CTIAPPSET 0x014
+#define CTIAPPCLEAR 0x018
+#define CTIAPPPULSE 0x01C
+#define CTIINEN(n) (0x020 + (4 * n))
+#define CTIOUTEN(n) (0x0A0 + (4 * n))
+#define CTITRIGINSTATUS 0x130
+#define CTITRIGOUTSTATUS 0x134
+#define CTICHINSTATUS 0x138
+#define CTICHOUTSTATUS 0x13C
+#define CTIGATE 0x140
+#define ASICCTL 0x144
+/* Integration test registers */
+#define ITCHINACK 0xEDC /* WO CTI CSSoc 400 only*/
+#define ITTRIGINACK 0xEE0 /* WO CTI CSSoc 400 only*/
+#define ITCHOUT 0xEE4 /* WO RW-600 */
+#define ITTRIGOUT 0xEE8 /* WO RW-600 */
+#define ITCHOUTACK 0xEEC /* RO CTI CSSoc 400 only*/
+#define ITTRIGOUTACK 0xEF0 /* RO CTI CSSoc 400 only*/
+#define ITCHIN 0xEF4 /* RO */
+#define ITTRIGIN 0xEF8 /* RO */
+/* management registers */
+#define CTIDEVAFF0 0xFA8
+#define CTIDEVAFF1 0xFAC
+
+/*
+ * CTI CSSoc 600 has a max of 32 trigger signals per direction.
+ * CTI CSSoc 400 has 8 IO triggers - other CTIs can be impl def.
+ * Max of in and out defined in the DEVID register.
+ * - pick up actual number used from .dts parameters if present.
+ */
+#define CTIINOUTEN_MAX 32
+
+/**
+ * Group of related trigger signals
+ *
+ * @nr_sigs: number of signals in the group.
+ * @used_mask: bitmask representing the signal indexes in the group.
+ * @sig_types: array of types for the signals, length nr_sigs.
+ */
+struct cti_trig_grp {
+ int nr_sigs;
+ u32 used_mask;
+ int sig_types[];
+};
+
+/**
+ * Trigger connection - connection between a CTI and other (coresight) device
+ * lists input and output trigger signals for the device
+ *
+ * @con_in: connected CTIIN signals for the device.
+ * @con_out: connected CTIOUT signals for the device.
+ * @con_dev: coresight device connected to the CTI, NULL if not CS device
+ * @con_dev_name: name of connected device (CS or CPU)
+ * @node: entry node in list of connections.
+ * @con_attrs: Dynamic sysfs attributes specific to this connection.
+ * @attr_group: Dynamic attribute group created for this connection.
+ */
+struct cti_trig_con {
+ struct cti_trig_grp *con_in;
+ struct cti_trig_grp *con_out;
+ struct coresight_device *con_dev;
+ const char *con_dev_name;
+ struct list_head node;
+ struct attribute **con_attrs;
+ struct attribute_group *attr_group;
+};
+
+/**
+ * struct cti_device - description of CTI device properties.
+ *
+ * @nt_trig_con: Number of external devices connected to this device.
+ * @ctm_id: which CTM this device is connected to (by default it is
+ * assumed there is a single CTM per SoC, ID 0).
+ * @trig_cons: list of connections to this device.
+ * @cpu: CPU ID if associated with CPU, -1 otherwise.
+ * @con_groups: combined static and dynamic sysfs groups for trigger
+ * connections.
+ */
+struct cti_device {
+ int nr_trig_con;
+ u32 ctm_id;
+ struct list_head trig_cons;
+ int cpu;
+ const struct attribute_group **con_groups;
+};
+
+/**
+ * struct cti_config - configuration of the CTI device hardware
+ *
+ * @nr_trig_max: Max number of trigger signals implemented on device.
+ * (max of trig_in or trig_out) - from ID register.
+ * @nr_ctm_channels: number of available CTM channels - from ID register.
+ * @enable_req_count: CTI is enabled alongside >=1 associated devices.
+ * @hw_enabled: true if hw is currently enabled.
+ * @hw_powered: true if associated cpu powered on, or no cpu.
+ * @trig_in_use: bitfield of in triggers registered as in use.
+ * @trig_out_use: bitfield of out triggers registered as in use.
+ * @trig_out_filter: bitfield of out triggers that are blocked if filter
+ * enabled. Typically this would be dbgreq / restart on
+ * a core CTI.
+ * @trig_filter_enable: 1 if filtering enabled.
+ * @xtrig_rchan_sel: channel selection for xtrigger connection show.
+ * @ctiappset: CTI Software application channel set.
+ * @ctiinout_sel: register selector for INEN and OUTEN regs.
+ * @ctiinen: enable input trigger to a channel.
+ * @ctiouten: enable output trigger from a channel.
+ * @ctigate: gate channel output from CTI to CTM.
+ * @asicctl: asic control register.
+ */
+struct cti_config {
+ /* hardware description */
+ int nr_ctm_channels;
+ int nr_trig_max;
+
+ /* cti enable control */
+ atomic_t enable_req_count;
+ bool hw_enabled;
+ bool hw_powered;
+
+ /* registered triggers and filtering */
+ u32 trig_in_use;
+ u32 trig_out_use;
+ u32 trig_out_filter;
+ bool trig_filter_enable;
+ u8 xtrig_rchan_sel;
+
+ /* cti cross trig programmable regs */
+ u32 ctiappset;
+ u8 ctiinout_sel;
+ u32 ctiinen[CTIINOUTEN_MAX];
+ u32 ctiouten[CTIINOUTEN_MAX];
+ u32 ctigate;
+ u32 asicctl;
+};
+
+/**
+ * struct cti_drvdata - specifics for the CTI device
+ * @base: Memory mapped base address for this component..
+ * @csdev: Standard CoreSight device information.
+ * @ctidev: Extra information needed by the CTI/CTM framework.
+ * @spinlock: Control data access to one at a time.
+ * @config: Configuration data for this CTI device.
+ * @node: List entry of this device in the list of CTI devices.
+ * @csdev_release: release function for underlying coresight_device.
+ */
+struct cti_drvdata {
+ void __iomem *base;
+ struct coresight_device *csdev;
+ struct cti_device ctidev;
+ spinlock_t spinlock;
+ struct cti_config config;
+ struct list_head node;
+ void (*csdev_release)(struct device *dev);
+};
+
+/*
+ * Channel operation types.
+ */
+enum cti_chan_op {
+ CTI_CHAN_ATTACH,
+ CTI_CHAN_DETACH,
+};
+
+enum cti_trig_dir {
+ CTI_TRIG_IN,
+ CTI_TRIG_OUT,
+};
+
+enum cti_chan_gate_op {
+ CTI_GATE_CHAN_ENABLE,
+ CTI_GATE_CHAN_DISABLE,
+};
+
+enum cti_chan_set_op {
+ CTI_CHAN_SET,
+ CTI_CHAN_CLR,
+ CTI_CHAN_PULSE,
+};
+
+/* private cti driver fns & vars */
+extern const struct attribute_group *coresight_cti_groups[];
+int cti_add_default_connection(struct device *dev,
+ struct cti_drvdata *drvdata);
+int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata,
+ struct cti_trig_con *tc,
+ struct coresight_device *csdev,
+ const char *assoc_dev_name);
+struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs,
+ int out_sigs);
+int cti_enable(struct coresight_device *csdev);
+int cti_disable(struct coresight_device *csdev);
+void cti_write_all_hw_regs(struct cti_drvdata *drvdata);
+void cti_write_intack(struct device *dev, u32 ackval);
+void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value);
+int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
+ enum cti_trig_dir direction, u32 channel_idx,
+ u32 trigger_idx);
+int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op,
+ u32 channel_idx);
+int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
+ u32 channel_idx);
+int cti_create_cons_sysfs(struct device *dev, struct cti_drvdata *drvdata);
+struct coresight_platform_data *
+coresight_cti_get_platform_data(struct device *dev);
+const char *cti_plat_get_node_name(struct fwnode_handle *fwnode);
+
+/* cti powered and enabled */
+static inline bool cti_active(struct cti_config *cfg)
+{
+ return cfg->hw_powered && cfg->hw_enabled;
+}
+
+#endif /* _CORESIGHT_CORESIGHT_CTI_H */
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 3c5bee429105..43418a2126ff 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -57,6 +57,26 @@ coresight_find_device_by_fwnode(struct fwnode_handle *fwnode)
return bus_find_device_by_fwnode(&amba_bustype, fwnode);
}
+/*
+ * Find a registered coresight device from a device fwnode.
+ * The node info is associated with the AMBA parent, but the
+ * csdev keeps a copy so iterate round the coresight bus to
+ * find the device.
+ */
+struct coresight_device *
+coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode)
+{
+ struct device *dev;
+ struct coresight_device *csdev = NULL;
+
+ dev = bus_find_device_by_fwnode(&coresight_bustype, r_fwnode);
+ if (dev) {
+ csdev = to_coresight_device(dev);
+ put_device(dev);
+ }
+ return csdev;
+}
+
#ifdef CONFIG_OF
static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
{
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 82e563cdc879..890f9a5c97c6 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -22,6 +22,7 @@
#define CORESIGHT_CLAIMCLR 0xfa4
#define CORESIGHT_LAR 0xfb0
#define CORESIGHT_LSR 0xfb4
+#define CORESIGHT_DEVARCH 0xfbc
#define CORESIGHT_AUTHSTATUS 0xfb8
#define CORESIGHT_DEVID 0xfc8
#define CORESIGHT_DEVTYPE 0xfcc
@@ -161,6 +162,16 @@ static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
#endif
+#ifdef CONFIG_CORESIGHT_CTI
+extern void cti_add_assoc_to_csdev(struct coresight_device *csdev);
+extern void cti_remove_assoc_from_csdev(struct coresight_device *csdev);
+
+#else
+static inline void cti_add_assoc_to_csdev(struct coresight_device *csdev) {}
+static inline void
+cti_remove_assoc_from_csdev(struct coresight_device *csdev) {}
+#endif
+
/*
* Macros and inline functions to handle CoreSight UCI data and driver
* private data in AMBA ID table entries, and extract data values.
@@ -201,5 +212,9 @@ static inline void *coresight_get_uci_data(const struct amba_id *id)
}
void coresight_release_platform_data(struct coresight_platform_data *pdata);
+struct coresight_device *
+coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
+void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
+ struct coresight_device *ect_csdev);
#endif
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index ef20f74c85fa..c71553c09f8e 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -216,6 +216,44 @@ void coresight_disclaim_device(void __iomem *base)
CS_LOCK(base);
}
+/* enable or disable an associated CTI device of the supplied CS device */
+static int
+coresight_control_assoc_ectdev(struct coresight_device *csdev, bool enable)
+{
+ int ect_ret = 0;
+ struct coresight_device *ect_csdev = csdev->ect_dev;
+
+ if (!ect_csdev)
+ return 0;
+
+ if (enable) {
+ if (ect_ops(ect_csdev)->enable)
+ ect_ret = ect_ops(ect_csdev)->enable(ect_csdev);
+ } else {
+ if (ect_ops(ect_csdev)->disable)
+ ect_ret = ect_ops(ect_csdev)->disable(ect_csdev);
+ }
+
+ /* output warning if ECT enable is preventing trace operation */
+ if (ect_ret)
+ dev_info(&csdev->dev, "Associated ECT device (%s) %s failed\n",
+ dev_name(&ect_csdev->dev),
+ enable ? "enable" : "disable");
+ return ect_ret;
+}
+
+/*
+ * Set the associated ect / cti device while holding the coresight_mutex
+ * to avoid a race with coresight_enable that may try to use this value.
+ */
+void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
+ struct coresight_device *ect_csdev)
+{
+ mutex_lock(&coresight_mutex);
+ csdev->ect_dev = ect_csdev;
+ mutex_unlock(&coresight_mutex);
+}
+
static int coresight_enable_sink(struct coresight_device *csdev,
u32 mode, void *data)
{
@@ -228,9 +266,14 @@ static int coresight_enable_sink(struct coresight_device *csdev,
if (!sink_ops(csdev)->enable)
return -EINVAL;
- ret = sink_ops(csdev)->enable(csdev, mode, data);
+ ret = coresight_control_assoc_ectdev(csdev, true);
if (ret)
return ret;
+ ret = sink_ops(csdev)->enable(csdev, mode, data);
+ if (ret) {
+ coresight_control_assoc_ectdev(csdev, false);
+ return ret;
+ }
csdev->enable = true;
return 0;
@@ -246,6 +289,7 @@ static void coresight_disable_sink(struct coresight_device *csdev)
ret = sink_ops(csdev)->disable(csdev);
if (ret)
return;
+ coresight_control_assoc_ectdev(csdev, false);
csdev->enable = false;
}
@@ -269,8 +313,15 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0)
return outport;
- if (link_ops(csdev)->enable)
- ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (link_ops(csdev)->enable) {
+ ret = coresight_control_assoc_ectdev(csdev, true);
+ if (!ret) {
+ ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (ret)
+ coresight_control_assoc_ectdev(csdev, false);
+ }
+ }
+
if (!ret)
csdev->enable = true;
@@ -300,8 +351,10 @@ static void coresight_disable_link(struct coresight_device *csdev,
nr_conns = 1;
}
- if (link_ops(csdev)->disable)
+ if (link_ops(csdev)->disable) {
link_ops(csdev)->disable(csdev, inport, outport);
+ coresight_control_assoc_ectdev(csdev, false);
+ }
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->refcnt[i]) != 0)
@@ -322,9 +375,14 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
if (!csdev->enable) {
if (source_ops(csdev)->enable) {
- ret = source_ops(csdev)->enable(csdev, NULL, mode);
+ ret = coresight_control_assoc_ectdev(csdev, true);
if (ret)
return ret;
+ ret = source_ops(csdev)->enable(csdev, NULL, mode);
+ if (ret) {
+ coresight_control_assoc_ectdev(csdev, false);
+ return ret;
+ };
}
csdev->enable = true;
}
@@ -347,6 +405,7 @@ static bool coresight_disable_source(struct coresight_device *csdev)
if (atomic_dec_return(csdev->refcnt) == 0) {
if (source_ops(csdev)->disable)
source_ops(csdev)->disable(csdev, NULL);
+ coresight_control_assoc_ectdev(csdev, false);
csdev->enable = false;
}
return !csdev->enable;
@@ -955,12 +1014,16 @@ static struct device_type coresight_dev_type[] = {
{
.name = "helper",
},
+ {
+ .name = "ect",
+ },
};
static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
+ cti_remove_assoc_from_csdev(csdev);
fwnode_handle_put(csdev->dev.fwnode);
kfree(csdev->refcnt);
kfree(csdev);
@@ -1027,17 +1090,11 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_connection *conn = &csdev->pdata->conns[i];
- struct device *dev = NULL;
- dev = bus_find_device_by_fwnode(&coresight_bustype, conn->child_fwnode);
- if (dev) {
- conn->child_dev = to_coresight_device(dev);
- /* and put reference from 'bus_find_device()' */
- put_device(dev);
- } else {
+ conn->child_dev =
+ coresight_find_csdev_by_fwnode(conn->child_fwnode);
+ if (!conn->child_dev)
csdev->orphan = true;
- conn->child_dev = NULL;
- }
}
}
@@ -1249,6 +1306,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
coresight_fixup_device_conns(csdev);
coresight_fixup_orphan_conns(csdev);
+ cti_add_assoc_to_csdev(csdev);
mutex_unlock(&coresight_mutex);
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 6f4f5486fe6d..5fe694708b7a 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -47,11 +47,13 @@ struct intel_th_output {
/**
* struct intel_th_drvdata - describes hardware capabilities and quirks
* @tscu_enable: device needs SW to enable time stamping unit
+ * @multi_is_broken: device has multiblock mode is broken
* @has_mintctl: device has interrupt control (MINTCTL) register
* @host_mode_only: device can only operate in 'host debugger' mode
*/
struct intel_th_drvdata {
unsigned int tscu_enable : 1,
+ multi_is_broken : 1,
has_mintctl : 1,
host_mode_only : 1;
};
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 255f8f41c8ff..3a77551fb4fc 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -138,6 +138,7 @@ struct msc {
struct list_head win_list;
struct sg_table single_sgt;
struct msc_window *cur_win;
+ struct msc_window *switch_on_unlock;
unsigned long nr_pages;
unsigned long single_sz;
unsigned int single_wrap : 1;
@@ -154,10 +155,13 @@ struct msc {
struct list_head iter_list;
+ bool stop_on_full;
+
/* config */
unsigned int enabled : 1,
wrap : 1,
- do_irq : 1;
+ do_irq : 1,
+ multi_is_broken : 1;
unsigned int mode;
unsigned int burst_len;
unsigned int index;
@@ -1665,7 +1669,7 @@ static int intel_th_msc_init(struct msc *msc)
{
atomic_set(&msc->user_count, -1);
- msc->mode = MSC_MODE_MULTI;
+ msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
mutex_init(&msc->buf_mutex);
INIT_LIST_HEAD(&msc->win_list);
INIT_LIST_HEAD(&msc->iter_list);
@@ -1717,6 +1721,10 @@ void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
return;
msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
+ if (msc->switch_on_unlock == win) {
+ msc->switch_on_unlock = NULL;
+ msc_win_switch(msc);
+ }
}
EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
@@ -1757,7 +1765,11 @@ static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
/* next window: if READY, proceed, if LOCKED, stop the trace */
if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
- schedule_work(&msc->work);
+ if (msc->stop_on_full)
+ schedule_work(&msc->work);
+ else
+ msc->switch_on_unlock = next_win;
+
return IRQ_HANDLED;
}
@@ -1877,6 +1889,9 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
return -EINVAL;
found:
+ if (i == MSC_MODE_MULTI && msc->multi_is_broken)
+ return -EOPNOTSUPP;
+
mutex_lock(&msc->buf_mutex);
ret = 0;
@@ -2047,11 +2062,36 @@ win_switch_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_WO(win_switch);
+static ssize_t stop_on_full_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", msc->stop_on_full);
+}
+
+static ssize_t stop_on_full_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = kstrtobool(buf, &msc->stop_on_full);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(stop_on_full);
+
static struct attribute *msc_output_attrs[] = {
&dev_attr_wrap.attr,
&dev_attr_mode.attr,
&dev_attr_nr_pages.attr,
&dev_attr_win_switch.attr,
+ &dev_attr_stop_on_full.attr,
NULL,
};
@@ -2083,6 +2123,9 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
if (!res)
msc->do_irq = 1;
+ if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
+ msc->multi_is_broken = 1;
+
msc->index = thdev->id;
msc->thdev = thdev;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 86aa6a46bcba..7ccac74553a6 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -120,6 +120,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
+static const struct intel_th_drvdata intel_th_1x_multi_is_broken = {
+ .multi_is_broken = 1,
+};
+
static const struct intel_th_drvdata intel_th_2x = {
.tscu_enable = 1,
.has_mintctl = 1,
@@ -152,7 +156,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
{
/* Kaby Lake PCH-H */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
- .driver_data = (kernel_ulong_t)0,
+ .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken,
},
{
/* Denverton */
@@ -207,7 +211,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
{
/* Comet Lake PCH-V */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
- .driver_data = (kernel_ulong_t)&intel_th_2x,
+ .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken,
},
{
/* Ice Lake NNPI */
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 5ac93f41bfec..dff4e178c732 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -459,17 +459,17 @@ static int pca_init(struct i2c_adapter *adap)
/* To avoid integer overflow, use clock/100 for calculations */
clock = pca_clock(pca_data) / 100;
- if (pca_data->i2c_clock > 1000000) {
+ if (pca_data->i2c_clock > I2C_MAX_FAST_MODE_PLUS_FREQ) {
mode = I2C_PCA_MODE_TURBO;
min_tlow = 14;
min_thi = 5;
raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */
- } else if (pca_data->i2c_clock > 400000) {
+ } else if (pca_data->i2c_clock > I2C_MAX_FAST_MODE_FREQ) {
mode = I2C_PCA_MODE_FASTP;
min_tlow = 17;
min_thi = 9;
raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */
- } else if (pca_data->i2c_clock > 100000) {
+ } else if (pca_data->i2c_clock > I2C_MAX_STANDARD_MODE_FREQ) {
mode = I2C_PCA_MODE_FAST;
min_tlow = 44;
min_thi = 20;
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 1de23b4f3809..20ef63820c77 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -147,7 +147,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
(ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_TCT_SHFT);
u32 t_high, t_low;
- if (idev->bus_clk_rate <= 100000) {
+ if (idev->bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ) {
tmp &= ~ALTR_I2C_CTRL_BSPEED;
/* Standard mode SCL 50/50 */
t_high = divisor * 1 / 2;
@@ -423,10 +423,10 @@ static int altr_i2c_probe(struct platform_device *pdev)
&idev->bus_clk_rate);
if (val) {
dev_err(&pdev->dev, "Default to 100kHz\n");
- idev->bus_clk_rate = 100000; /* default clock rate */
+ idev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */
}
- if (idev->bus_clk_rate > 400000) {
+ if (idev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ) {
dev_err(&pdev->dev, "invalid clock-frequency %d\n",
idev->bus_clk_rate);
return -EINVAL;
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index f5b3f00c6559..17df9e8845b6 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -201,32 +201,37 @@ static int i2c_amd_resume(struct amd_i2c_common *i2c_common)
}
#endif
+static const u32 supported_speeds[] = {
+ I2C_MAX_HIGH_SPEED_MODE_FREQ,
+ I2C_MAX_TURBO_MODE_FREQ,
+ I2C_MAX_FAST_MODE_PLUS_FREQ,
+ I2C_MAX_FAST_MODE_FREQ,
+ I2C_MAX_STANDARD_MODE_FREQ,
+};
+
static enum speed_enum i2c_amd_get_bus_speed(struct platform_device *pdev)
{
u32 acpi_speed;
int i;
- static const u32 supported_speeds[] = {
- 0, 100000, 400000, 1000000, 1400000, 3400000
- };
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
/* round down to the lowest standard speed */
- for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
- if (acpi_speed < supported_speeds[i])
+ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed >= supported_speeds[i])
break;
}
- acpi_speed = supported_speeds[i - 1];
+ acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0;
switch (acpi_speed) {
- case 100000:
+ case I2C_MAX_STANDARD_MODE_FREQ:
return speed100k;
- case 400000:
+ case I2C_MAX_FAST_MODE_FREQ:
return speed400k;
- case 1000000:
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
return speed1000k;
- case 1400000:
+ case I2C_MAX_TURBO_MODE_FREQ:
return speed1400k;
- case 3400000:
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
return speed3400k;
default:
return speed400k;
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index a7be6f24450b..07c1993274c5 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -997,7 +997,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"Could not read bus-frequency property\n");
- bus->bus_frequency = 100000;
+ bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ;
}
match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node);
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index 7a862e00b475..0aba51a7df32 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -18,11 +18,13 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dma-atmel.h>
#include <linux/pm_runtime.h>
@@ -478,6 +480,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
unsigned long time_left;
bool has_unre_flag = dev->pdata->has_unre_flag;
bool has_alt_cmd = dev->pdata->has_alt_cmd;
+ struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
/*
* WARNING: the TXCOMP bit in the Status Register is NOT a clear on
@@ -637,6 +640,13 @@ error:
at91_twi_write(dev, AT91_TWI_CR,
AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
}
+
+ if (rinfo->get_sda && !(rinfo->get_sda(&dev->adapter))) {
+ dev_dbg(dev->dev,
+ "SDA is down; clear bus using gpio\n");
+ i2c_recover_bus(&dev->adapter);
+ }
+
return ret;
}
@@ -806,6 +816,70 @@ error:
return ret;
}
+static void at91_prepare_twi_recovery(struct i2c_adapter *adap)
+{
+ struct at91_twi_dev *dev = i2c_get_adapdata(adap);
+
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
+}
+
+static void at91_unprepare_twi_recovery(struct i2c_adapter *adap)
+{
+ struct at91_twi_dev *dev = i2c_get_adapdata(adap);
+
+ pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
+}
+
+static int at91_init_twi_recovery_info(struct platform_device *pdev,
+ struct at91_twi_dev *dev)
+{
+ struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
+
+ dev->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!dev->pinctrl || IS_ERR(dev->pinctrl)) {
+ dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
+ return PTR_ERR(dev->pinctrl);
+ }
+
+ dev->pinctrl_pins_default = pinctrl_lookup_state(dev->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl,
+ "gpio");
+ rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
+ if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl",
+ GPIOD_OUT_HIGH_OPEN_DRAIN);
+ if (PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(rinfo->sda_gpiod) ||
+ IS_ERR(rinfo->scl_gpiod) ||
+ IS_ERR(dev->pinctrl_pins_default) ||
+ IS_ERR(dev->pinctrl_pins_gpio)) {
+ dev_info(&pdev->dev, "recovery information incomplete\n");
+ if (!IS_ERR(rinfo->sda_gpiod)) {
+ gpiod_put(rinfo->sda_gpiod);
+ rinfo->sda_gpiod = NULL;
+ }
+ if (!IS_ERR(rinfo->scl_gpiod)) {
+ gpiod_put(rinfo->scl_gpiod);
+ rinfo->scl_gpiod = NULL;
+ }
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "using scl, sda for recovery\n");
+
+ rinfo->prepare_recovery = at91_prepare_twi_recovery;
+ rinfo->unprepare_recovery = at91_unprepare_twi_recovery;
+ rinfo->recover_bus = i2c_generic_scl_recovery;
+ dev->adapter.bus_recovery_info = rinfo;
+
+ return 0;
+}
+
int at91_twi_probe_master(struct platform_device *pdev,
u32 phy_addr, struct at91_twi_dev *dev)
{
@@ -838,6 +912,10 @@ int at91_twi_probe_master(struct platform_device *pdev,
"i2c-analog-filter");
at91_calc_twi_clock(dev);
+ rc = at91_init_twi_recovery_info(pdev, dev);
+ if (rc == -EPROBE_DEFER)
+ return rc;
+
dev->adapter.algo = &at91_twi_algorithm;
dev->adapter.quirks = &at91_twi_quirks;
diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h
index 977a67bc0f88..f57a6cab96b4 100644
--- a/drivers/i2c/busses/i2c-at91.h
+++ b/drivers/i2c/busses/i2c-at91.h
@@ -151,6 +151,10 @@ struct at91_twi_dev {
u32 fifo_size;
struct at91_twi_dma dma;
bool slave_detected;
+ struct i2c_bus_recovery_info rinfo;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_pins_default;
+ struct pinctrl_state *pinctrl_pins_gpio;
#ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL
unsigned smr;
struct i2c_client *slave;
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 0214daa913ff..be3681d08a8d 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -199,7 +199,7 @@ static int axxia_i2c_init(struct axxia_i2c_dev *idev)
/* Enable Master Mode */
writel(0x1, idev->base + GLOBAL_CONTROL);
- if (idev->bus_clk_rate <= 100000) {
+ if (idev->bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ) {
/* Standard mode SCL 50/50, tSU:DAT = 250 ns */
t_high = divisor * 1 / 2;
t_low = divisor * 1 / 2;
@@ -765,7 +765,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
of_property_read_u32(np, "clock-frequency", &idev->bus_clk_rate);
if (idev->bus_clk_rate == 0)
- idev->bus_clk_rate = 100000; /* default clock rate */
+ idev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */
ret = clk_prepare_enable(idev->i2c_clk);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 30efb7913b2e..44be0926b566 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -858,25 +858,25 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
if (ret < 0) {
dev_info(iproc_i2c->device,
"unable to interpret clock-frequency DT property\n");
- bus_speed = 100000;
+ bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
}
- if (bus_speed < 100000) {
+ if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) {
dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
bus_speed);
dev_err(iproc_i2c->device,
"valid speeds are 100khz and 400khz\n");
return -EINVAL;
- } else if (bus_speed < 400000) {
- bus_speed = 100000;
+ } else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) {
+ bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
} else {
- bus_speed = 400000;
+ bus_speed = I2C_MAX_FAST_MODE_FREQ;
}
iproc_i2c->bus_speed = bus_speed;
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
val &= ~BIT(TIM_CFG_MODE_400_SHIFT);
- val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
+ val |= (bus_speed == I2C_MAX_FAST_MODE_FREQ) << TIM_CFG_MODE_400_SHIFT;
iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
@@ -1029,7 +1029,7 @@ static int bcm_iproc_i2c_resume(struct device *dev)
/* configure to the desired bus speed */
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
val &= ~BIT(TIM_CFG_MODE_400_SHIFT);
- val |= (iproc_i2c->bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
+ val |= (iproc_i2c->bus_speed == I2C_MAX_FAST_MODE_FREQ) << TIM_CFG_MODE_400_SHIFT;
iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
bcm_iproc_i2c_enable_disable(iproc_i2c, true);
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 4e489a9d16fb..572aebbb254e 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -722,16 +722,16 @@ static int bcm_kona_i2c_assign_bus_speed(struct bcm_kona_i2c_dev *dev)
}
switch (bus_speed) {
- case 100000:
+ case I2C_MAX_STANDARD_MODE_FREQ:
dev->std_cfg = &std_cfg_table[BCM_SPD_100K];
break;
- case 400000:
+ case I2C_MAX_FAST_MODE_FREQ:
dev->std_cfg = &std_cfg_table[BCM_SPD_400K];
break;
- case 1000000:
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
dev->std_cfg = &std_cfg_table[BCM_SPD_1MHZ];
break;
- case 3400000:
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
/* Send mastercode at 100k */
dev->std_cfg = &std_cfg_table[BCM_SPD_100K];
dev->hs_cfg = &hs_cfg_table[BCM_SPD_3P4MHZ];
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 5ab901ad615d..d9b86fcc3825 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -439,7 +439,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_warn(&pdev->dev,
"Could not read clock-frequency property\n");
- bus_clk_rate = 100000;
+ bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ;
}
ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 506991596b68..169a2836922d 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -580,6 +580,31 @@ static void brcmstb_i2c_set_bsc_reg_defaults(struct brcmstb_i2c_dev *dev)
brcmstb_i2c_set_bus_speed(dev);
}
+#define AUTOI2C_CTRL0 0x26c
+#define AUTOI2C_CTRL0_RELEASE_BSC BIT(1)
+
+static int bcm2711_release_bsc(struct brcmstb_i2c_dev *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->device);
+ struct resource *iomem;
+ void __iomem *autoi2c;
+
+ /* Map hardware registers */
+ iomem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "auto-i2c");
+ autoi2c = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(autoi2c))
+ return PTR_ERR(autoi2c);
+
+ writel(AUTOI2C_CTRL0_RELEASE_BSC, autoi2c + AUTOI2C_CTRL0);
+ devm_iounmap(&pdev->dev, autoi2c);
+
+ /* We need to reset the controller after the release */
+ dev->bsc_regmap->iic_enable = 0;
+ bsc_writel(dev, dev->bsc_regmap->iic_enable, iic_enable);
+
+ return 0;
+}
+
static int brcmstb_i2c_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -609,6 +634,13 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
goto probe_errorout;
}
+ if (of_device_is_compatible(dev->device->of_node,
+ "brcm,bcm2711-hdmi-i2c")) {
+ rc = bcm2711_release_bsc(dev);
+ if (rc)
+ goto probe_errorout;
+ }
+
rc = of_property_read_string(dev->device->of_node, "interrupt-names",
&int_name);
if (rc < 0)
@@ -705,6 +737,7 @@ static SIMPLE_DEV_PM_OPS(brcmstb_i2c_pm, brcmstb_i2c_suspend,
static const struct of_device_id brcmstb_i2c_of_match[] = {
{.compatible = "brcm,brcmstb-i2c"},
{.compatible = "brcm,brcmper-i2c"},
+ {.compatible = "brcm,bcm2711-hdmi-i2c"},
{},
};
MODULE_DEVICE_TABLE(of, brcmstb_i2c_of_match);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 1105aee6634a..89d58f7d2a25 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -104,9 +104,6 @@
#define DRIVER_NAME "cdns-i2c"
-#define CDNS_I2C_SPEED_MAX 400000
-#define CDNS_I2C_SPEED_DEFAULT 100000
-
#define CDNS_I2C_DIVA_MAX 4
#define CDNS_I2C_DIVB_MAX 64
@@ -949,8 +946,8 @@ static int cdns_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&id->i2c_clk);
- if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX))
- id->i2c_clk = CDNS_I2C_SPEED_DEFAULT;
+ if (ret || (id->i2c_clk > I2C_MAX_FAST_MODE_FREQ))
+ id->i2c_clk = I2C_MAX_STANDARD_MODE_FREQ;
cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS,
CDNS_I2C_CR_OFFSET);
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index 33da07d64494..c6a7a00e1d52 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Intel BayTrail PMIC I2C bus semaphore implementaion
+ * Intel BayTrail PMIC I2C bus semaphore implementation
* Copyright (c) 2014, Intel Corporation.
*/
#include <linux/device.h>
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 2de7452fcd6d..c70c6fc09ee3 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -102,7 +102,7 @@ int i2c_dw_set_reg_access(struct dw_i2c_dev *dev)
i2c_dw_release_lock(dev);
if (reg == swab32(DW_IC_COMP_TYPE_VALUE)) {
- /* Configure register endianess access */
+ /* Configure register endianness access */
dev->flags |= ACCESS_SWAP;
} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
/* Configure register access mode 16bit */
@@ -190,10 +190,10 @@ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
/*
* Workaround for avoiding TX arbitration lost in case I2C
- * slave pulls SDA down "too quickly" after falling egde of
+ * slave pulls SDA down "too quickly" after falling edge of
* SCL by enabling non-zero SDA RX hold. Specification says it
* extends incoming SDA low to high transition while SCL is
- * high but it apprears to help also above issue.
+ * high but it appears to help also above issue.
*/
if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
@@ -344,6 +344,28 @@ int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
return -EIO;
}
+void i2c_dw_set_fifo_size(struct dw_i2c_dev *dev)
+{
+ u32 param, tx_fifo_depth, rx_fifo_depth;
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec.
+ */
+ param = dw_readl(dev, DW_IC_COMP_PARAM_1);
+ tx_fifo_depth = ((param >> 16) & 0xff) + 1;
+ rx_fifo_depth = ((param >> 8) & 0xff) + 1;
+ if (!dev->tx_fifo_depth) {
+ dev->tx_fifo_depth = tx_fifo_depth;
+ dev->rx_fifo_depth = rx_fifo_depth;
+ } else if (tx_fifo_depth >= 2) {
+ dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
+ tx_fifo_depth);
+ dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
+ rx_fifo_depth);
+ }
+}
+
u32 i2c_dw_func(struct i2c_adapter *adap)
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
@@ -356,7 +378,7 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
/* Disable controller */
__i2c_dw_disable(dev);
- /* Disable all interupts */
+ /* Disable all interrupts */
dw_writel(dev, 0, DW_IC_INTR_MASK);
dw_readl(dev, DW_IC_CLR_INTR);
}
@@ -366,11 +388,5 @@ void i2c_dw_disable_int(struct dw_i2c_dev *dev)
dw_writel(dev, 0, DW_IC_INTR_MASK);
}
-u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
-{
- return dw_readl(dev, DW_IC_COMP_PARAM_1);
-}
-EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
-
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 67edbbde1070..b220ad64c38d 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -297,6 +297,7 @@ int i2c_dw_acquire_lock(struct dw_i2c_dev *dev);
void i2c_dw_release_lock(struct dw_i2c_dev *dev);
int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev);
int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev);
+void i2c_dw_set_fifo_size(struct dw_i2c_dev *dev);
u32 i2c_dw_func(struct i2c_adapter *adap);
void i2c_dw_disable(struct dw_i2c_dev *dev);
void i2c_dw_disable_int(struct dw_i2c_dev *dev);
@@ -313,7 +314,6 @@ static inline void __i2c_dw_disable_nowait(struct dw_i2c_dev *dev)
void __i2c_dw_disable(struct dw_i2c_dev *dev);
-extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
extern int i2c_dw_probe(struct dw_i2c_dev *dev);
#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_SLAVE)
extern int i2c_dw_probe_slave(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index e8b328242256..3a58eef20936 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -521,7 +521,7 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
/*
* The IC_INTR_STAT register just indicates "enabled" interrupts.
- * Ths unmasked raw version of interrupt status bits are available
+ * The unmasked raw version of interrupt status bits is available
* in the IC_RAW_INTR_STAT register.
*
* That is,
@@ -698,6 +698,8 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
if (ret)
return ret;
+ i2c_dw_set_fifo_size(dev);
+
ret = dev->init(dev);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 05b35ac33ce3..7a0b65b5b5b5 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -109,7 +109,7 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
/*
- * On Intel Merrifield the user visible i2c busses are enumerated
+ * On Intel Merrifield the user visible i2c buses are enumerated
* [1..7]. So, we add 1 to shift the default range. Besides that the
* first PCI slot provides 4 functions, that's why we have to add 0 to
* the first slot and 4 to the next one.
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 3b7d58c2fe85..c98befe2a92e 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -99,16 +99,16 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
switch (t->bus_freq_hz) {
- case 100000:
+ case I2C_MAX_STANDARD_MODE_FREQ:
dev->sda_hold_time = ss_ht;
break;
- case 1000000:
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
dev->sda_hold_time = fp_ht;
break;
- case 3400000:
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
dev->sda_hold_time = hs_ht;
break;
- case 400000:
+ case I2C_MAX_FAST_MODE_FREQ:
default:
dev->sda_hold_time = fs_ht;
break;
@@ -198,10 +198,10 @@ static void i2c_dw_configure_master(struct dw_i2c_dev *dev)
dev->mode = DW_IC_MASTER;
switch (t->bus_freq_hz) {
- case 100000:
+ case I2C_MAX_STANDARD_MODE_FREQ:
dev->master_cfg |= DW_IC_CON_SPEED_STD;
break;
- case 3400000:
+ case I2C_MAX_HIGH_SPEED_MODE_FREQ:
dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
break;
default:
@@ -219,28 +219,6 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
dev->mode = DW_IC_SLAVE;
}
-static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev)
-{
- u32 param, tx_fifo_depth, rx_fifo_depth;
-
- /*
- * Try to detect the FIFO depth if not set by interface driver,
- * the depth could be from 2 to 256 from HW spec.
- */
- param = i2c_dw_read_comp_param(dev);
- tx_fifo_depth = ((param >> 16) & 0xff) + 1;
- rx_fifo_depth = ((param >> 8) & 0xff) + 1;
- if (!dev->tx_fifo_depth) {
- dev->tx_fifo_depth = tx_fifo_depth;
- dev->rx_fifo_depth = rx_fifo_depth;
- } else if (tx_fifo_depth >= 2) {
- dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
- tx_fifo_depth);
- dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
- rx_fifo_depth);
- }
-}
-
static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev)
{
pm_runtime_disable(dev->dev);
@@ -249,6 +227,13 @@ static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev)
pm_runtime_put_noidle(dev->dev);
}
+static const u32 supported_speeds[] = {
+ I2C_MAX_HIGH_SPEED_MODE_FREQ,
+ I2C_MAX_FAST_MODE_PLUS_FREQ,
+ I2C_MAX_FAST_MODE_FREQ,
+ I2C_MAX_STANDARD_MODE_FREQ,
+};
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -258,9 +243,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
u32 acpi_speed;
struct resource *mem;
int i, irq, ret;
- static const int supported_speeds[] = {
- 0, 100000, 400000, 1000000, 3400000
- };
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -296,11 +278,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
* Some DSTDs use a non standard speed, round down to the lowest
* standard speed.
*/
- for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
- if (acpi_speed < supported_speeds[i])
+ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed >= supported_speeds[i])
break;
}
- acpi_speed = supported_speeds[i - 1];
+ acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0;
/*
* Find bus speed from the "clock-frequency" device property, ACPI
@@ -311,7 +293,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
else if (acpi_speed || t->bus_freq_hz)
t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed);
else
- t->bus_freq_hz = 400000;
+ t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
dev->flags |= (uintptr_t)device_get_match_data(&pdev->dev);
@@ -325,8 +307,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
* Only standard mode at 100kHz, fast mode at 400kHz,
* fast mode plus at 1MHz and high speed mode at 3.4MHz are supported.
*/
- if (t->bus_freq_hz != 100000 && t->bus_freq_hz != 400000 &&
- t->bus_freq_hz != 1000000 && t->bus_freq_hz != 3400000) {
+ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (t->bus_freq_hz == supported_speeds[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(supported_speeds)) {
dev_err(&pdev->dev,
"%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
t->bus_freq_hz);
@@ -362,8 +347,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000);
}
- dw_i2c_set_fifo_size(dev);
-
adap = &dev->adapter;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index f5f001738df5..f5ecf76c0d02 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -107,7 +107,7 @@ static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev)
/*
* The IC_INTR_STAT register just indicates "enabled" interrupts.
- * Ths unmasked raw version of interrupt status bits are available
+ * The unmasked raw version of interrupt status bits is available
* in the IC_RAW_INTR_STAT register.
*
* That is,
@@ -260,6 +260,8 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
if (ret)
return ret;
+ i2c_dw_set_fifo_size(dev);
+
ret = dev->init(dev);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 3adf72540db1..056a5c4f0833 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -18,7 +18,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
-#define DEFAULT_FREQ 100000
#define TIMEOUT_MS 100
#define II_CONTROL 0x0
@@ -300,7 +299,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&i2c->frequency))
- i2c->frequency = DEFAULT_FREQ;
+ i2c->frequency = I2C_MAX_STANDARD_MODE_FREQ;
i2c->dev = &pdev->dev;
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index 382f105e0fe3..b48b7888936f 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -64,8 +64,6 @@
#define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */
#define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1)
-#define U2C_I2C_FREQ_FAST 400000
-#define U2C_I2C_FREQ_STD 100000
#define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10))
#define DIOLAN_USB_TIMEOUT 100 /* in ms */
@@ -87,7 +85,7 @@ struct i2c_diolan_u2c {
int ocount; /* Number of enqueued messages */
};
-static uint frequency = U2C_I2C_FREQ_STD; /* I2C clock frequency in Hz */
+static uint frequency = I2C_MAX_STANDARD_MODE_FREQ; /* I2C clock frequency in Hz */
module_param(frequency, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
@@ -299,12 +297,12 @@ static int diolan_init(struct i2c_diolan_u2c *dev)
{
int speed, ret;
- if (frequency >= 200000) {
+ if (frequency >= 2 * I2C_MAX_STANDARD_MODE_FREQ) {
speed = U2C_I2C_SPEED_FAST;
- frequency = U2C_I2C_FREQ_FAST;
- } else if (frequency >= 100000 || frequency == 0) {
+ frequency = I2C_MAX_FAST_MODE_FREQ;
+ } else if (frequency >= I2C_MAX_STANDARD_MODE_FREQ || frequency == 0) {
speed = U2C_I2C_SPEED_STD;
- frequency = U2C_I2C_FREQ_STD;
+ frequency = I2C_MAX_STANDARD_MODE_FREQ;
} else {
speed = U2C_I2C_SPEED(frequency);
if (speed > U2C_I2C_SPEED_2KHZ)
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index a8c6323e7f44..18cca8f56da8 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -388,7 +388,7 @@ static int efm32_i2c_probe(struct platform_device *pdev)
if (!ret) {
dev_dbg(&pdev->dev, "using frequency %u\n", frequency);
} else {
- frequency = 100000;
+ frequency = I2C_MAX_STANDARD_MODE_FREQ;
dev_info(&pdev->dev, "defaulting to 100 kHz\n");
}
ddata->frequency = frequency;
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index e7514c16b756..527030953ba1 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -164,13 +164,6 @@
#define HSI2C_MASTER_ID(x) ((x & 0xff) << 24)
#define MASTER_ID(x) ((x & 0x7) + 0x08)
-/*
- * Controller operating frequency, timing values for operation
- * are calculated against this frequency
- */
-#define HSI2C_HS_TX_CLOCK 1000000
-#define HSI2C_FS_TX_CLOCK 100000
-
#define EXYNOS5_I2C_TIMEOUT (msecs_to_jiffies(100))
enum i2c_type_exynos {
@@ -264,6 +257,9 @@ static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c)
* exynos5_i2c_set_timing: updates the registers with appropriate
* timing values calculated
*
+ * Timing values for operation are calculated against either 100kHz
+ * or 1MHz controller operating frequency.
+ *
* Returns 0 on success, -EINVAL if the cycle length cannot
* be calculated.
*/
@@ -281,7 +277,7 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
unsigned int t_ftl_cycle;
unsigned int clkin = clk_get_rate(i2c->clk);
unsigned int op_clk = hs_timings ? i2c->op_clock :
- (i2c->op_clock >= HSI2C_HS_TX_CLOCK) ? HSI2C_FS_TX_CLOCK :
+ (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ) ? I2C_MAX_STANDARD_MODE_FREQ :
i2c->op_clock;
int div, clk_cycle, temp;
@@ -353,7 +349,7 @@ static int exynos5_hsi2c_clock_setup(struct exynos5_i2c *i2c)
/* always set Fast Speed timings */
int ret = exynos5_i2c_set_timing(i2c, false);
- if (ret < 0 || i2c->op_clock < HSI2C_HS_TX_CLOCK)
+ if (ret < 0 || i2c->op_clock < I2C_MAX_FAST_MODE_PLUS_FREQ)
return ret;
return exynos5_i2c_set_timing(i2c, true);
@@ -376,7 +372,7 @@ static void exynos5_i2c_init(struct exynos5_i2c *i2c)
i2c->regs + HSI2C_CTL);
writel(HSI2C_TRAILING_COUNT, i2c->regs + HSI2C_TRAILIG_CTL);
- if (i2c->op_clock >= HSI2C_HS_TX_CLOCK) {
+ if (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ) {
writel(HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr)),
i2c->regs + HSI2C_ADDR);
i2c_conf |= HSI2C_HS_MODE;
@@ -748,7 +744,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
if (of_property_read_u32(np, "clock-frequency", &i2c->op_clock))
- i2c->op_clock = HSI2C_FS_TX_CLOCK;
+ i2c->op_clock = I2C_MAX_STANDARD_MODE_FREQ;
strlcpy(i2c->adap.name, "exynos5-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 224f830f77f9..6610304b6dc6 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -68,8 +68,6 @@
#define I2C_ARBITRATE_INTR BIT(1)
#define I2C_OVER_INTR BIT(0)
-#define HIX5I2C_MAX_FREQ 400000 /* 400k */
-
enum hix5hd2_i2c_state {
HIX5I2C_STAT_RW_ERR = -1,
HIX5I2C_STAT_INIT,
@@ -400,12 +398,12 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-frequency", &freq)) {
/* use 100k as default value */
- priv->freq = 100000;
+ priv->freq = I2C_MAX_STANDARD_MODE_FREQ;
} else {
- if (freq > HIX5I2C_MAX_FREQ) {
- priv->freq = HIX5I2C_MAX_FREQ;
+ if (freq > I2C_MAX_FAST_MODE_FREQ) {
+ priv->freq = I2C_MAX_FAST_MODE_FREQ;
dev_warn(priv->dev, "use max freq %d instead\n",
- HIX5I2C_MAX_FREQ);
+ I2C_MAX_FAST_MODE_FREQ);
} else {
priv->freq = freq;
}
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 20a4fbc53007..422097a31c95 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -304,7 +304,7 @@ static struct img_i2c_timings timings[] = {
/* Standard mode */
{
.name = "standard",
- .max_bitrate = 100000,
+ .max_bitrate = I2C_MAX_STANDARD_MODE_FREQ,
.tckh = 4000,
.tckl = 4700,
.tsdh = 4700,
@@ -316,7 +316,7 @@ static struct img_i2c_timings timings[] = {
/* Fast mode */
{
.name = "fast",
- .max_bitrate = 400000,
+ .max_bitrate = I2C_MAX_FAST_MODE_FREQ,
.tckh = 600,
.tckl = 1300,
.tsdh = 600,
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index c92b56485fa6..94743ba581fe 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -75,12 +75,6 @@
#define I2C_CLK_RATIO 2
#define CHUNK_DATA 256
-#define LPI2C_DEFAULT_RATE 100000
-#define STARDARD_MAX_BITRATE 400000
-#define FAST_MAX_BITRATE 1000000
-#define FAST_PLUS_MAX_BITRATE 3400000
-#define HIGHSPEED_MAX_BITRATE 5000000
-
#define I2C_PM_TIMEOUT 10 /* ms */
enum lpi2c_imx_mode {
@@ -152,13 +146,13 @@ static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
unsigned int bitrate = lpi2c_imx->bitrate;
enum lpi2c_imx_mode mode;
- if (bitrate < STARDARD_MAX_BITRATE)
+ if (bitrate < I2C_MAX_FAST_MODE_FREQ)
mode = STANDARD;
- else if (bitrate < FAST_MAX_BITRATE)
+ else if (bitrate < I2C_MAX_FAST_MODE_PLUS_FREQ)
mode = FAST;
- else if (bitrate < FAST_PLUS_MAX_BITRATE)
+ else if (bitrate < I2C_MAX_HIGH_SPEED_MODE_FREQ)
mode = FAST_PLUS;
- else if (bitrate < HIGHSPEED_MAX_BITRATE)
+ else if (bitrate < I2C_MAX_ULTRA_FAST_MODE_FREQ)
mode = HS;
else
mode = ULTRA_FAST;
@@ -578,7 +572,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &lpi2c_imx->bitrate);
if (ret)
- lpi2c_imx->bitrate = LPI2C_DEFAULT_RATE;
+ lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
pdev->name, lpi2c_imx);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index a3b61336fe55..0ab5381aa012 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -49,9 +50,6 @@
/* This will be the driver name the kernel reports */
#define DRIVER_NAME "imx-i2c"
-/* Default value */
-#define IMX_I2C_BIT_RATE 100000 /* 100kHz */
-
/*
* Enable DMA if transfer byte size is bigger than this threshold.
* As the hardware request, it must bigger than 4 bytes.\
@@ -414,7 +412,7 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
dma->chan_using = NULL;
}
-static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
+static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
{
unsigned long orig_jiffies = jiffies;
unsigned int temp;
@@ -444,15 +442,37 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
"<%s> I2C bus is busy\n", __func__);
return -ETIMEDOUT;
}
- schedule();
+ if (atomic)
+ udelay(100);
+ else
+ schedule();
}
return 0;
}
-static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
+static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
{
- wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
+ if (atomic) {
+ void __iomem *addr = i2c_imx->base + (IMX_I2C_I2SR << i2c_imx->hwdata->regshift);
+ unsigned int regval;
+
+ /*
+ * The formula for the poll timeout is documented in the RM
+ * Rev.5 on page 1878:
+ * T_min = 10/F_scl
+ * Set the value hard as it is done for the non-atomic use-case.
+ * Use 10 kHz for the calculation since this is the minimum
+ * allowed SMBus frequency. Also add an offset of 100us since it
+ * turned out that the I2SR_IIF bit isn't set correctly within
+ * the minimum timeout in polling mode.
+ */
+ readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100);
+ i2c_imx->i2csr = regval;
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ } else {
+ wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
+ }
if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
@@ -530,7 +550,7 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
+static int i2c_imx_start(struct imx_i2c_struct *i2c_imx, bool atomic)
{
unsigned int temp = 0;
int result;
@@ -543,23 +563,29 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode, i2c_imx, IMX_I2C_I2CR);
/* Wait controller to be stable */
- usleep_range(50, 150);
+ if (atomic)
+ udelay(50);
+ else
+ usleep_range(50, 150);
/* Start I2C transaction */
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_MSTA;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- result = i2c_imx_bus_busy(i2c_imx, 1);
+ result = i2c_imx_bus_busy(i2c_imx, 1, atomic);
if (result)
return result;
temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
+ if (atomic)
+ temp &= ~I2CR_IIEN; /* Disable interrupt */
+
temp &= ~I2CR_DMAEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
return result;
}
-static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
+static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic)
{
unsigned int temp = 0;
@@ -581,7 +607,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
}
if (!i2c_imx->stopped)
- i2c_imx_bus_busy(i2c_imx, 0);
+ i2c_imx_bus_busy(i2c_imx, 0, atomic);
/* Disable I2C controller */
temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
@@ -662,7 +688,7 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
/* The last data byte must be transferred by the CPU. */
imx_i2c_write_reg(msgs->buf[msgs->len-1],
i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, false);
if (result)
return result;
@@ -721,7 +747,7 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
/* read n byte data */
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, false);
if (result)
return result;
@@ -734,7 +760,7 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0);
+ i2c_imx_bus_busy(i2c_imx, 0, false);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -752,7 +778,8 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
return 0;
}
-static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
+static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
+ bool atomic)
{
int i, result;
@@ -761,7 +788,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
/* write slave address */
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, atomic);
if (result)
return result;
result = i2c_imx_acked(i2c_imx);
@@ -775,7 +802,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
"<%s> write byte: B%d=0x%X\n",
__func__, i, msgs->buf[i]);
imx_i2c_write_reg(msgs->buf[i], i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, atomic);
if (result)
return result;
result = i2c_imx_acked(i2c_imx);
@@ -785,7 +812,8 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
return 0;
}
-static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bool is_lastmsg)
+static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
+ bool is_lastmsg, bool atomic)
{
int i, result;
unsigned int temp;
@@ -798,7 +826,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
/* write slave address */
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, atomic);
if (result)
return result;
result = i2c_imx_acked(i2c_imx);
@@ -831,7 +859,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
for (i = 0; i < msgs->len; i++) {
u8 len = 0;
- result = i2c_imx_trx_complete(i2c_imx);
+ result = i2c_imx_trx_complete(i2c_imx, atomic);
if (result)
return result;
/*
@@ -859,7 +887,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0);
+ i2c_imx_bus_busy(i2c_imx, 0, atomic);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -890,8 +918,8 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
return 0;
}
-static int i2c_imx_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs, int num)
+static int i2c_imx_xfer_common(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num, bool atomic)
{
unsigned int i, temp;
int result;
@@ -900,16 +928,16 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
- result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
- if (result < 0)
- goto out;
-
/* Start I2C transfer */
- result = i2c_imx_start(i2c_imx);
+ result = i2c_imx_start(i2c_imx, atomic);
if (result) {
- if (i2c_imx->adapter.bus_recovery_info) {
+ /*
+ * Bus recovery uses gpiod_get_value_cansleep() which is not
+ * allowed within atomic context.
+ */
+ if (!atomic && i2c_imx->adapter.bus_recovery_info) {
i2c_recover_bus(&i2c_imx->adapter);
- result = i2c_imx_start(i2c_imx);
+ result = i2c_imx_start(i2c_imx, atomic);
}
}
@@ -927,7 +955,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_RSTA;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- result = i2c_imx_bus_busy(i2c_imx, 1);
+ result = i2c_imx_bus_busy(i2c_imx, 1, atomic);
if (result)
goto fail0;
}
@@ -951,13 +979,14 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
(temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
(temp & I2SR_RXAK ? 1 : 0));
#endif
- if (msgs[i].flags & I2C_M_RD)
- result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg);
- else {
- if (i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD)
+ if (msgs[i].flags & I2C_M_RD) {
+ result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic);
+ } else {
+ if (!atomic &&
+ i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD)
result = i2c_imx_dma_write(i2c_imx, &msgs[i]);
else
- result = i2c_imx_write(i2c_imx, &msgs[i]);
+ result = i2c_imx_write(i2c_imx, &msgs[i], atomic);
}
if (result)
goto fail0;
@@ -965,18 +994,49 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
fail0:
/* Stop I2C transfer */
- i2c_imx_stop(i2c_imx);
-
- pm_runtime_mark_last_busy(i2c_imx->adapter.dev.parent);
- pm_runtime_put_autosuspend(i2c_imx->adapter.dev.parent);
+ i2c_imx_stop(i2c_imx, atomic);
-out:
dev_dbg(&i2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
(result < 0) ? "error" : "success msg",
(result < 0) ? result : num);
return (result < 0) ? result : num;
}
+static int i2c_imx_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
+ int result;
+
+ result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
+ if (result < 0)
+ return result;
+
+ result = i2c_imx_xfer_common(adapter, msgs, num, false);
+
+ pm_runtime_mark_last_busy(i2c_imx->adapter.dev.parent);
+ pm_runtime_put_autosuspend(i2c_imx->adapter.dev.parent);
+
+ return result;
+}
+
+static int i2c_imx_xfer_atomic(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
+ int result;
+
+ result = clk_enable(i2c_imx->clk);
+ if (result)
+ return result;
+
+ result = i2c_imx_xfer_common(adapter, msgs, num, true);
+
+ clk_disable(i2c_imx->clk);
+
+ return result;
+}
+
static void i2c_imx_prepare_recovery(struct i2c_adapter *adap)
{
struct imx_i2c_struct *i2c_imx;
@@ -1049,8 +1109,9 @@ static u32 i2c_imx_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm i2c_imx_algo = {
- .master_xfer = i2c_imx_xfer,
- .functionality = i2c_imx_func,
+ .master_xfer = i2c_imx_xfer,
+ .master_xfer_atomic = i2c_imx_xfer_atomic,
+ .functionality = i2c_imx_func,
};
static int i2c_imx_probe(struct platform_device *pdev)
@@ -1066,10 +1127,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "<%s>\n", __func__);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "can't get irq number\n");
+ if (irq < 0)
return irq;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -1139,7 +1198,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
goto rpm_disable;
/* Set up clock divider */
- i2c_imx->bitrate = IMX_I2C_BIT_RATE;
+ i2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
ret = of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &i2c_imx->bitrate);
if (ret < 0 && pdata && pdata->bitrate)
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index deea18b14add..13b0c12e2dba 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -396,7 +396,7 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&bus_clk_rate);
if (ret)
- bus_clk_rate = 100000; /* 100 kHz default clock rate */
+ bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ;
clkrate = clk_get_rate(i2c->clk);
if (clkrate == 0) {
@@ -407,9 +407,9 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
/* Setup I2C dividers to generate clock with proper duty cycle */
clkrate = clkrate / bus_clk_rate;
- if (bus_clk_rate <= 100000)
+ if (bus_clk_rate <= I2C_MAX_STANDARD_MODE_FREQ)
scl_high = (clkrate * I2C_STD_MODE_DUTY) / 100;
- else if (bus_clk_rate <= 400000)
+ else if (bus_clk_rate <= I2C_MAX_FAST_MODE_FREQ)
scl_high = (clkrate * I2C_FAST_MODE_DUTY) / 100;
else
scl_high = (clkrate * I2C_FAST_MODE_PLUS_DUTY) / 100;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 2152ec5f535c..0ca6c38a15eb 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -56,9 +56,6 @@
#define I2C_DMA_4G_MODE 0x0001
#define I2C_DEFAULT_CLK_DIV 5
-#define I2C_DEFAULT_SPEED 100000 /* hz */
-#define MAX_FS_MODE_SPEED 400000
-#define MAX_HS_MODE_SPEED 3400000
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
#define MAX_HS_STEP_CNT_DIV 8
@@ -450,10 +447,10 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src,
unsigned int best_mul;
unsigned int cnt_mul;
- if (target_speed > MAX_HS_MODE_SPEED)
- target_speed = MAX_HS_MODE_SPEED;
+ if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
+ target_speed = I2C_MAX_FAST_MODE_PLUS_FREQ;
- if (target_speed > MAX_FS_MODE_SPEED)
+ if (target_speed > I2C_MAX_FAST_MODE_FREQ)
max_step_cnt = MAX_HS_STEP_CNT_DIV;
else
max_step_cnt = MAX_STEP_CNT_DIV;
@@ -514,9 +511,9 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
clk_src = parent_clk / i2c->clk_src_div;
target_speed = i2c->speed_hz;
- if (target_speed > MAX_FS_MODE_SPEED) {
+ if (target_speed > I2C_MAX_FAST_MODE_FREQ) {
/* Set master code speed register */
- ret = mtk_i2c_calculate_speed(i2c, clk_src, MAX_FS_MODE_SPEED,
+ ret = mtk_i2c_calculate_speed(i2c, clk_src, I2C_MAX_FAST_MODE_FREQ,
&l_step_cnt, &l_sample_cnt);
if (ret < 0)
return ret;
@@ -581,7 +578,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
- if ((i2c->speed_hz > MAX_FS_MODE_SPEED) || (left_num >= 1))
+ if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
control_reg |= I2C_CONTROL_RS;
if (i2c->op == I2C_MASTER_WRRD)
@@ -590,7 +587,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
/* set start condition */
- if (i2c->speed_hz <= I2C_DEFAULT_SPEED)
+ if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
mtk_i2c_writew(i2c, I2C_ST_START_CON, OFFSET_EXT_CONF);
else
mtk_i2c_writew(i2c, I2C_FS_START_CON, OFFSET_EXT_CONF);
@@ -798,7 +795,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
}
}
- if (i2c->auto_restart && num >= 2 && i2c->speed_hz > MAX_FS_MODE_SPEED)
+ if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
/* ignore the first restart irq after the master code,
* otherwise the first transfer will be discarded.
*/
@@ -893,7 +890,7 @@ static int mtk_i2c_parse_dt(struct device_node *np, struct mtk_i2c *i2c)
ret = of_property_read_u32(np, "clock-frequency", &i2c->speed_hz);
if (ret < 0)
- i2c->speed_hz = I2C_DEFAULT_SPEED;
+ i2c->speed_hz = I2C_MAX_STANDARD_MODE_FREQ;
ret = of_property_read_u32(np, "clock-div", &i2c->clk_src_div);
if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 62df8379bc89..45fe4a7fe0c0 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -300,7 +300,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&i2c->bus_freq))
- i2c->bus_freq = 100000;
+ i2c->bus_freq = I2C_MAX_STANDARD_MODE_FREQ;
if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index febb7c7ea72b..9b8f1d8552ea 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -810,7 +810,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
tclk = clk_get_rate(drv_data->clk);
if (of_property_read_u32(np, "clock-frequency", &bus_freq))
- bus_freq = 100000; /* 100kHz by default */
+ bus_freq = I2C_MAX_STANDARD_MODE_FREQ; /* 100kHz by default */
if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") ||
of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
@@ -846,14 +846,14 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
drv_data->offload_enabled = true;
/* The delay is only needed in standard mode (100kHz) */
- if (bus_freq <= 100000)
+ if (bus_freq <= I2C_MAX_STANDARD_MODE_FREQ)
drv_data->errata_delay = true;
}
if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
drv_data->offload_enabled = false;
/* The delay is only needed in standard mode (100kHz) */
- if (bus_freq <= 100000)
+ if (bus_freq <= I2C_MAX_STANDARD_MODE_FREQ)
drv_data->errata_delay = true;
}
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 89224913f578..9587347447f0 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -731,7 +731,7 @@ static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, uint32_t speed)
* This is compensated for by subtracting the respective constants
* from the values written to the timing registers.
*/
- if (speed > 100000) {
+ if (speed > I2C_MAX_STANDARD_MODE_FREQ) {
/* fast mode */
low_count = DIV_ROUND_CLOSEST(divider * 13, (13 + 6));
high_count = DIV_ROUND_CLOSEST(divider * 6, (13 + 6));
@@ -769,7 +769,7 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
ret = of_property_read_u32(node, "clock-frequency", &speed);
if (ret) {
dev_warn(dev, "No I2C speed selected, using 100kHz\n");
- speed = 100000;
+ speed = I2C_MAX_STANDARD_MODE_FREQ;
}
mxs_i2c_derive_timing(i2c, speed);
@@ -836,10 +836,10 @@ static int mxs_i2c_probe(struct platform_device *pdev)
}
/* Setup the DMA */
- i2c->dmach = dma_request_slave_channel(dev, "rx-tx");
- if (!i2c->dmach) {
+ i2c->dmach = dma_request_chan(dev, "rx-tx");
+ if (IS_ERR(i2c->dmach)) {
dev_err(dev, "Failed to request dma\n");
- return -ENODEV;
+ return PTR_ERR(i2c->dmach);
}
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 01a7d72e5511..e1e8d4ef9aa7 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -396,7 +396,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* 2 whereas it is 3 for fast and fastplus mode of
* operation. TODO - high speed support.
*/
- div = (dev->clk_freq > 100000) ? 3 : 2;
+ div = (dev->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
/*
* generate the mask for baud rate counters. The controller
@@ -420,7 +420,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
if (dev->sm > I2C_FREQ_MODE_FAST) {
dev_err(&dev->adev->dev,
"do not support this mode defaulting to std. mode\n");
- brcr2 = i2c_clk/(100000 * 2) & 0xffff;
+ brcr2 = i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2) & 0xffff;
writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
writel(I2C_FREQ_MODE_STANDARD << 4,
dev->virtbase + I2C_CR);
@@ -949,10 +949,10 @@ static void nmk_i2c_of_probe(struct device_node *np,
{
/* Default to 100 kHz if no frequency is given in the node */
if (of_property_read_u32(np, "clock-frequency", &nmk->clk_freq))
- nmk->clk_freq = 100000;
+ nmk->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
/* This driver only supports 'standard' and 'fast' modes of operation. */
- if (nmk->clk_freq <= 100000)
+ if (nmk->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
nmk->sm = I2C_FREQ_MODE_STANDARD;
else
nmk->sm = I2C_FREQ_MODE_FAST;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 2dfea357b131..71b4637c86b7 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1355,7 +1355,6 @@ omap_i2c_probe(struct platform_device *pdev)
{
struct omap_i2c_dev *omap;
struct i2c_adapter *adap;
- struct resource *mem;
const struct omap_i2c_bus_platform_data *pdata =
dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
@@ -1375,14 +1374,13 @@ omap_i2c_probe(struct platform_device *pdev)
if (!omap)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- omap->base = devm_ioremap_resource(&pdev->dev, mem);
+ omap->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(omap->base))
return PTR_ERR(omap->base);
match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
if (match) {
- u32 freq = 100000; /* default to 100000 Hz */
+ u32 freq = I2C_MAX_STANDARD_MODE_FREQ;
pdata = match->data;
omap->flags = pdata->flags;
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
index b6b5a495118b..3ab8be62c581 100644
--- a/drivers/i2c/busses/i2c-owl.c
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -87,9 +87,6 @@
#define OWL_I2C_MAX_RETRIES 50
-#define OWL_I2C_DEF_SPEED_HZ 100000
-#define OWL_I2C_MAX_SPEED_HZ 400000
-
struct owl_i2c_dev {
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -419,11 +416,11 @@ static int owl_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency",
&i2c_dev->bus_freq))
- i2c_dev->bus_freq = OWL_I2C_DEF_SPEED_HZ;
+ i2c_dev->bus_freq = I2C_MAX_STANDARD_MODE_FREQ;
/* We support only frequencies of 100k and 400k for now */
- if (i2c_dev->bus_freq != OWL_I2C_DEF_SPEED_HZ &&
- i2c_dev->bus_freq != OWL_I2C_MAX_SPEED_HZ) {
+ if (i2c_dev->bus_freq != I2C_MAX_STANDARD_MODE_FREQ &&
+ i2c_dev->bus_freq != I2C_MAX_FAST_MODE_FREQ) {
dev_err(dev, "invalid clock-frequency %d\n", i2c_dev->bus_freq);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 81eb441b2387..a535889acca6 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -333,13 +333,17 @@ static void i2c_parport_attach(struct parport *port)
/* Setup SMBus alert if supported */
if (adapter_parm[type].smbus_alert) {
- adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
- &adapter->alert_data);
- if (adapter->ara)
+ struct i2c_client *ara;
+
+ ara = i2c_new_smbus_alert_device(&adapter->adapter,
+ &adapter->alert_data);
+ if (!IS_ERR(ara)) {
+ adapter->ara = ara;
parport_enable_irq(port);
- else
+ } else {
dev_warn(&adapter->pdev->dev,
"Failed to register ARA client\n");
+ }
}
/* Add the new adapter to the list */
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 973e5339033c..d565714c1f13 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -279,14 +279,13 @@ static bool i2c_powermac_get_type(struct i2c_adapter *adap,
{
char tmp[16];
- /* Note: we to _NOT_ want the standard
- * i2c drivers to match with any of our powermac stuff
- * unless they have been specifically modified to handle
- * it on a case by case basis. For example, for thermal
- * control, things like lm75 etc... shall match with their
- * corresponding windfarm drivers, _NOT_ the generic ones,
- * so we force a prefix of AAPL, onto the modalias to
- * make that happen
+ /*
+ * Note: we do _NOT_ want the standard i2c drivers to match with any of
+ * our powermac stuff unless they have been specifically modified to
+ * handle it on a case by case basis. For example, for thermal control,
+ * things like lm75 etc... shall match with their corresponding
+ * windfarm drivers, _NOT_ the generic ones, so we force a prefix of
+ * 'MAC', onto the modalias to make that happen
*/
/* First try proper modalias */
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 17abf60c94ae..18d1e4fd4cf3 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
@@ -502,45 +501,40 @@ static int geni_i2c_probe(struct platform_device *pdev)
struct resource *res;
u32 proto, tx_depth;
int ret;
+ struct device *dev = &pdev->dev;
- gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
+ gi2c = devm_kzalloc(dev, sizeof(*gi2c), GFP_KERNEL);
if (!gi2c)
return -ENOMEM;
- gi2c->se.dev = &pdev->dev;
- gi2c->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ gi2c->se.dev = dev;
+ gi2c->se.wrapper = dev_get_drvdata(dev->parent);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gi2c->se.base = devm_ioremap_resource(&pdev->dev, res);
+ gi2c->se.base = devm_ioremap_resource(dev, res);
if (IS_ERR(gi2c->se.base))
return PTR_ERR(gi2c->se.base);
- gi2c->se.clk = devm_clk_get(&pdev->dev, "se");
- if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(&pdev->dev)) {
- ret = PTR_ERR(gi2c->se.clk);
- dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
- return ret;
- }
+ gi2c->se.clk = devm_clk_get(dev, "se");
+ if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(dev))
+ return PTR_ERR(gi2c->se.clk);
- ret = device_property_read_u32(&pdev->dev, "clock-frequency",
- &gi2c->clk_freq_out);
+ ret = device_property_read_u32(dev, "clock-frequency",
+ &gi2c->clk_freq_out);
if (ret) {
- dev_info(&pdev->dev,
- "Bus frequency not specified, default to 100kHz.\n");
+ dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
gi2c->clk_freq_out = KHZ(100);
}
- if (has_acpi_companion(&pdev->dev))
- ACPI_COMPANION_SET(&gi2c->adap.dev, ACPI_COMPANION(&pdev->dev));
+ if (has_acpi_companion(dev))
+ ACPI_COMPANION_SET(&gi2c->adap.dev, ACPI_COMPANION(dev));
gi2c->irq = platform_get_irq(pdev, 0);
- if (gi2c->irq < 0) {
- dev_err(&pdev->dev, "IRQ error for i2c-geni\n");
+ if (gi2c->irq < 0)
return gi2c->irq;
- }
ret = geni_i2c_clk_map_idx(gi2c);
if (ret) {
- dev_err(&pdev->dev, "Invalid clk frequency %d Hz: %d\n",
+ dev_err(dev, "Invalid clk frequency %d Hz: %d\n",
gi2c->clk_freq_out, ret);
return ret;
}
@@ -549,29 +543,29 @@ static int geni_i2c_probe(struct platform_device *pdev)
init_completion(&gi2c->done);
spin_lock_init(&gi2c->lock);
platform_set_drvdata(pdev, gi2c);
- ret = devm_request_irq(&pdev->dev, gi2c->irq, geni_i2c_irq,
- IRQF_TRIGGER_HIGH, "i2c_geni", gi2c);
+ ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0,
+ dev_name(dev), gi2c);
if (ret) {
- dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+ dev_err(dev, "Request_irq failed:%d: err:%d\n",
gi2c->irq, ret);
return ret;
}
/* Disable the interrupt so that the system can enter low-power mode */
disable_irq(gi2c->irq);
i2c_set_adapdata(&gi2c->adap, gi2c);
- gi2c->adap.dev.parent = &pdev->dev;
- gi2c->adap.dev.of_node = pdev->dev.of_node;
+ gi2c->adap.dev.parent = dev;
+ gi2c->adap.dev.of_node = dev->of_node;
strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
- dev_err(&pdev->dev, "Error turning on resources %d\n", ret);
+ dev_err(dev, "Error turning on resources %d\n", ret);
return ret;
}
proto = geni_se_read_proto(&gi2c->se);
tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
if (proto != GENI_SE_I2C) {
- dev_err(&pdev->dev, "Invalid proto %d\n", proto);
+ dev_err(dev, "Invalid proto %d\n", proto);
geni_se_resources_off(&gi2c->se);
return -ENXIO;
}
@@ -581,11 +575,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
true, true, true);
ret = geni_se_resources_off(&gi2c->se);
if (ret) {
- dev_err(&pdev->dev, "Error turning off resources %d\n", ret);
+ dev_err(dev, "Error turning off resources %d\n", ret);
return ret;
}
- dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
+ dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
gi2c->suspended = 1;
pm_runtime_set_suspended(gi2c->se.dev);
@@ -595,12 +589,12 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&gi2c->adap);
if (ret) {
- dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+ dev_err(dev, "Error adding i2c adapter %d\n", ret);
pm_runtime_disable(gi2c->se.dev);
return ret;
}
- dev_dbg(&pdev->dev, "Geni-I2C adaptor successfully added\n");
+ dev_dbg(dev, "Geni-I2C adaptor successfully added\n");
return 0;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 2d7dabe12723..748872a9b0fc 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -136,13 +136,8 @@
*/
#define TOUT_MIN 2
-/* I2C Frequency Modes */
-#define I2C_STANDARD_FREQ 100000
-#define I2C_FAST_MODE_FREQ 400000
-#define I2C_FAST_MODE_PLUS_FREQ 1000000
-
/* Default values. Use these if FW query fails */
-#define DEFAULT_CLK_FREQ I2C_STANDARD_FREQ
+#define DEFAULT_CLK_FREQ I2C_MAX_STANDARD_MODE_FREQ
#define DEFAULT_SRC_CLK 20000000
/*
@@ -1756,7 +1751,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
nodma:
/* We support frequencies up to FAST Mode Plus (1MHz) */
- if (!clk_freq || clk_freq > I2C_FAST_MODE_PLUS_FREQ) {
+ if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) {
dev_err(qup->dev, "clock frequency not supported %d\n",
clk_freq);
return -EINVAL;
@@ -1861,7 +1856,7 @@ nodma:
qup->in_fifo_sz = qup->in_blk_sz * (2 << size);
hs_div = 3;
- if (clk_freq <= I2C_STANDARD_FREQ) {
+ if (clk_freq <= I2C_MAX_STANDARD_MODE_FREQ) {
fs_div = ((src_clk_freq / clk_freq) / 2) - 3;
qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff);
} else {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 879f0e61a496..3b5397aa4ca6 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -235,17 +235,20 @@ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
return i2c_recover_bus(&priv->adap);
}
-static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timings *t)
+static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
{
u32 scgd, cdf, round, ick, sum, scl, cdf_width;
unsigned long rate;
struct device *dev = rcar_i2c_priv_to_dev(priv);
+ struct i2c_timings t = {
+ .bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ,
+ .scl_fall_ns = 35,
+ .scl_rise_ns = 200,
+ .scl_int_delay_ns = 50,
+ };
/* Fall back to previously used values if not supplied */
- t->bus_freq_hz = t->bus_freq_hz ?: 100000;
- t->scl_fall_ns = t->scl_fall_ns ?: 35;
- t->scl_rise_ns = t->scl_rise_ns ?: 200;
- t->scl_int_delay_ns = t->scl_int_delay_ns ?: 50;
+ i2c_parse_fw_timings(dev, &t, false);
switch (priv->devtype) {
case I2C_RCAR_GEN1:
@@ -291,7 +294,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timin
* = F[sum * ick / 1000000000]
* = F[(ick / 1000000) * sum / 1000]
*/
- sum = t->scl_fall_ns + t->scl_rise_ns + t->scl_int_delay_ns;
+ sum = t.scl_fall_ns + t.scl_rise_ns + t.scl_int_delay_ns;
round = (ick + 500000) / 1000000 * sum;
round = (round + 500) / 1000;
@@ -309,7 +312,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timin
*/
for (scgd = 0; scgd < 0x40; scgd++) {
scl = ick / (20 + (scgd * 8) + round);
- if (scl <= t->bus_freq_hz)
+ if (scl <= t.bus_freq_hz)
goto scgd_find;
}
dev_err(dev, "it is impossible to calculate best SCL\n");
@@ -317,7 +320,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timin
scgd_find:
dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
- scl, t->bus_freq_hz, rate, round, cdf, scgd);
+ scl, t.bus_freq_hz, rate, round, cdf, scgd);
/* keep icccr value */
priv->icccr = scgd << cdf_width | cdf;
@@ -920,7 +923,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
struct rcar_i2c_priv *priv;
struct i2c_adapter *adap;
struct device *dev = &pdev->dev;
- struct i2c_timings i2c_t;
int ret;
/* Otherwise logic will break because some bytes must always use PIO */
@@ -957,8 +959,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, priv);
strlcpy(adap->name, pdev->name, sizeof(adap->name));
- i2c_parse_fw_timings(dev, &i2c_t, false);
-
/* Init DMA */
sg_init_table(&priv->sg, 1);
priv->dma_direction = DMA_NONE;
@@ -967,7 +967,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
/* Activate device for clock calculation */
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ret = rcar_i2c_clock_calculate(priv, &i2c_t);
+ ret = rcar_i2c_clock_calculate(priv);
if (ret < 0)
goto out_pm_put;
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 800414886f6b..4eccc0f69861 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -287,10 +287,10 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
pm_runtime_get_sync(riic->adapter.dev.parent);
- if (t->bus_freq_hz > 400000) {
+ if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ) {
dev_err(&riic->adapter.dev,
- "unsupported bus speed (%dHz). 400000 max\n",
- t->bus_freq_hz);
+ "unsupported bus speed (%dHz). %d max\n",
+ t->bus_freq_hz, I2C_MAX_FAST_MODE_FREQ);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 1a33007b03e9..73272d4296bb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -539,9 +539,9 @@ out:
*/
static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
{
- if (speed <= 100000)
+ if (speed <= I2C_MAX_STANDARD_MODE_FREQ)
return &standard_mode_spec;
- else if (speed <= 400000)
+ else if (speed <= I2C_MAX_FAST_MODE_FREQ)
return &fast_mode_spec;
else
return &fast_mode_plus_spec;
@@ -578,8 +578,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
int ret = 0;
/* Only support standard-mode and fast-mode */
- if (WARN_ON(t->bus_freq_hz > 400000))
- t->bus_freq_hz = 400000;
+ if (WARN_ON(t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ))
+ t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
/* prevent scl_rate_khz from becoming 0 */
if (WARN_ON(t->bus_freq_hz < 1000))
@@ -758,8 +758,8 @@ static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate,
int ret = 0;
/* Support standard-mode, fast-mode and fast-mode plus */
- if (WARN_ON(t->bus_freq_hz > 1000000))
- t->bus_freq_hz = 1000000;
+ if (WARN_ON(t->bus_freq_hz > I2C_MAX_FAST_MODE_PLUS_FREQ))
+ t->bus_freq_hz = I2C_MAX_FAST_MODE_PLUS_FREQ;
/* prevent scl_rate_khz from becoming 0 */
if (WARN_ON(t->bus_freq_hz < 1000))
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index c98ef4c4a0c9..5a5638e1daa1 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -835,11 +835,11 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
int freq;
i2c->clkrate = clkin;
- clkin /= 1000; /* clkin now in KHz */
+ clkin /= 1000; /* clkin now in KHz */
dev_dbg(i2c->dev, "pdata desired frequency %lu\n", pdata->frequency);
- target_frequency = pdata->frequency ? pdata->frequency : 100000;
+ target_frequency = pdata->frequency ?: I2C_MAX_STANDARD_MODE_FREQ;
target_frequency /= 1000; /* Target frequency now in KHz */
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 82b3b795e0bd..d83ca4028fa0 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -145,9 +145,6 @@ struct sh_mobile_dt_config {
#define IIC_FLAG_HAS_ICIC67 (1 << 0)
-#define STANDARD_MODE 100000
-#define FAST_MODE 400000
-
/* Register offsets */
#define ICDR 0x00
#define ICCR 0x04
@@ -270,11 +267,11 @@ static int sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
i2c_clk_khz = clk_get_rate(pd->clk) / 1000 / pd->clks_per_count;
- if (pd->bus_speed == STANDARD_MODE) {
+ if (pd->bus_speed == I2C_MAX_STANDARD_MODE_FREQ) {
tLOW = 47; /* tLOW = 4.7 us */
tHIGH = 40; /* tHD;STA = tHIGH = 4.0 us */
tf = 3; /* tf = 0.3 us */
- } else if (pd->bus_speed == FAST_MODE) {
+ } else if (pd->bus_speed == I2C_MAX_FAST_MODE_FREQ) {
tLOW = 13; /* tLOW = 1.3 us */
tHIGH = 6; /* tHD;STA = tHIGH = 0.6 us */
tf = 3; /* tf = 0.3 us */
@@ -851,7 +848,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
return PTR_ERR(pd->reg);
ret = of_property_read_u32(dev->dev.of_node, "clock-frequency", &bus_speed);
- pd->bus_speed = (ret || !bus_speed) ? STANDARD_MODE : bus_speed;
+ pd->bus_speed = (ret || !bus_speed) ? I2C_MAX_STANDARD_MODE_FREQ : bus_speed;
pd->clks_per_count = 1;
/* Newer variants come with two new bits in ICIC */
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index fb7a046b3226..a459e00c6851 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -62,7 +62,6 @@
#define SIRFSOC_I2C_STOP BIT(6)
#define SIRFSOC_I2C_START BIT(7)
-#define SIRFSOC_I2C_DEFAULT_SPEED 100000
#define SIRFSOC_I2C_ERR_NOACK 1
#define SIRFSOC_I2C_ERR_TIMEOUT 2
@@ -353,7 +352,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
err = of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &bitrate);
if (err < 0)
- bitrate = SIRFSOC_I2C_DEFAULT_SPEED;
+ bitrate = I2C_MAX_STANDARD_MODE_FREQ;
/*
* Due to some hardware design issues, we need to tune the formula.
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index b432e7580458..123a42bfe3b1 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -337,9 +337,9 @@ static void sprd_i2c_set_clk(struct sprd_i2c *i2c_dev, u32 freq)
writel(div1, i2c_dev->base + ADDR_DVD1);
/* Start hold timing = hold time(us) * source clock */
- if (freq == 400000)
+ if (freq == I2C_MAX_FAST_MODE_FREQ)
writel((6 * apb_clk) / 10000000, i2c_dev->base + ADDR_STA0_DVD);
- else if (freq == 100000)
+ else if (freq == I2C_MAX_STANDARD_MODE_FREQ)
writel((4 * apb_clk) / 1000000, i2c_dev->base + ADDR_STA0_DVD);
}
@@ -502,7 +502,7 @@ static int sprd_i2c_probe(struct platform_device *pdev)
snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name),
"%s", "sprd-i2c");
- i2c_dev->bus_freq = 100000;
+ i2c_dev->bus_freq = I2C_MAX_STANDARD_MODE_FREQ;
i2c_dev->adap.owner = THIS_MODULE;
i2c_dev->dev = dev;
i2c_dev->adap.retries = 3;
@@ -516,7 +516,8 @@ static int sprd_i2c_probe(struct platform_device *pdev)
i2c_dev->bus_freq = prop;
/* We only support 100k and 400k now, otherwise will return error. */
- if (i2c_dev->bus_freq != 100000 && i2c_dev->bus_freq != 400000)
+ if (i2c_dev->bus_freq != I2C_MAX_STANDARD_MODE_FREQ &&
+ i2c_dev->bus_freq != I2C_MAX_FAST_MODE_FREQ)
return -EINVAL;
ret = sprd_i2c_clk_init(i2c_dev);
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index f7f7b5b64720..faa81a95551f 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -213,7 +213,7 @@ static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask)
*/
static struct st_i2c_timings i2c_timings[] = {
[I2C_MODE_STANDARD] = {
- .rate = 100000,
+ .rate = I2C_MAX_STANDARD_MODE_FREQ,
.rep_start_hold = 4400,
.rep_start_setup = 5170,
.start_hold = 4400,
@@ -222,7 +222,7 @@ static struct st_i2c_timings i2c_timings[] = {
.bus_free_time = 5170,
},
[I2C_MODE_FAST] = {
- .rate = 400000,
+ .rate = I2C_MAX_FAST_MODE_FREQ,
.rep_start_hold = 660,
.rep_start_setup = 660,
.start_hold = 660,
@@ -836,7 +836,7 @@ static int st_i2c_probe(struct platform_device *pdev)
i2c_dev->mode = I2C_MODE_STANDARD;
ret = of_property_read_u32(np, "clock-frequency", &clk_rate);
- if ((!ret) && (clk_rate == 400000))
+ if (!ret && (clk_rate == I2C_MAX_FAST_MODE_FREQ))
i2c_dev->mode = I2C_MODE_FAST;
i2c_dev->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index ba600d77a3f8..d6a69dfcac3f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -232,10 +232,10 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev)
* In standard mode:
* t_scl_high = t_scl_low = CCR * I2C parent clk period
* So to reach 100 kHz, we have:
- * CCR = I2C parent rate / 100 kHz >> 1
+ * CCR = I2C parent rate / (100 kHz * 2)
*
* For example with parent rate = 2 MHz:
- * CCR = 2000000 / (100000 << 1) = 10
+ * CCR = 2000000 / (100000 * 2) = 10
* t_scl_high = t_scl_low = 10 * (1 / 2000000) = 5000 ns
* t_scl_high + t_scl_low = 10000 ns so 100 kHz is reached
*
@@ -243,7 +243,7 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev)
* parent rate is not higher than 46 MHz . As a result val
* is at most 8 bits wide and so fits into the CCR bits [11:0].
*/
- val = i2c_dev->parent_rate / (100000 << 1);
+ val = i2c_dev->parent_rate / (I2C_MAX_STANDARD_MODE_FREQ * 2);
} else {
/*
* In fast mode, we compute CCR with duty = 0 as with low
@@ -263,7 +263,7 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev)
* parent rate is not higher than 46 MHz . As a result val
* is at most 6 bits wide and so fits into the CCR bits [11:0].
*/
- val = DIV_ROUND_UP(i2c_dev->parent_rate, 400000 * 3);
+ val = DIV_ROUND_UP(i2c_dev->parent_rate, I2C_MAX_FAST_MODE_FREQ * 3);
/* Select Fast mode */
ccr |= STM32F4_I2C_CCR_FS;
@@ -807,7 +807,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
ret = of_property_read_u32(np, "clock-frequency", &clk_rate);
- if (!ret && clk_rate >= 400000)
+ if (!ret && clk_rate >= I2C_MAX_FAST_MODE_FREQ)
i2c_dev->speed = STM32_I2C_SPEED_FAST;
i2c_dev->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 5c3e8ac6ad92..330ffed011e0 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -49,6 +50,7 @@
/* STM32F7 I2C control 1 */
#define STM32F7_I2C_CR1_PECEN BIT(23)
+#define STM32F7_I2C_CR1_WUPEN BIT(18)
#define STM32F7_I2C_CR1_SBC BIT(16)
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
#define STM32F7_I2C_CR1_TXDMAEN BIT(14)
@@ -174,7 +176,6 @@
* @cr2: Control register 2
* @oar1: Own address 1 register
* @oar2: Own address 2 register
- * @pecr: PEC register
* @tmgr: Timing register
*/
struct stm32f7_i2c_regs {
@@ -182,7 +183,6 @@ struct stm32f7_i2c_regs {
u32 cr2;
u32 oar1;
u32 oar2;
- u32 pecr;
u32 tmgr;
};
@@ -221,6 +221,7 @@ struct stm32f7_i2c_spec {
* @fall_time: Fall time (ns)
* @dnf: Digital filter coefficient (0-16)
* @analog_filter: Analog filter delay (On/Off)
+ * @fmp_clr_offset: Fast Mode Plus clear register offset from set register
*/
struct stm32f7_i2c_setup {
enum stm32_i2c_speed speed;
@@ -230,6 +231,7 @@ struct stm32f7_i2c_setup {
u32 fall_time;
u8 dnf;
bool analog_filter;
+ u32 fmp_clr_offset;
};
/**
@@ -301,6 +303,10 @@ struct stm32f7_i2c_msg {
* @dma: dma data
* @use_dma: boolean to know if dma is used in the current transfer
* @regmap: holds SYSCFG phandle for Fast Mode Plus bits
+ * @fmp_sreg: register address for setting Fast Mode Plus bits
+ * @fmp_creg: register address for clearing Fast Mode Plus bits
+ * @fmp_mask: mask for Fast Mode Plus bits in set register
+ * @wakeup_src: boolean to know if the device is a wakeup source
*/
struct stm32f7_i2c_dev {
struct i2c_adapter adap;
@@ -323,6 +329,10 @@ struct stm32f7_i2c_dev {
struct stm32_i2c_dma *dma;
bool use_dma;
struct regmap *regmap;
+ u32 fmp_sreg;
+ u32 fmp_creg;
+ u32 fmp_mask;
+ bool wakeup_src;
};
/*
@@ -334,9 +344,9 @@ struct stm32f7_i2c_dev {
*/
static struct stm32f7_i2c_spec i2c_specs[] = {
[STM32_I2C_SPEED_STANDARD] = {
- .rate = 100000,
- .rate_min = 80000,
- .rate_max = 100000,
+ .rate = I2C_MAX_STANDARD_MODE_FREQ,
+ .rate_min = I2C_MAX_STANDARD_MODE_FREQ * 8 / 10, /* 80% */
+ .rate_max = I2C_MAX_STANDARD_MODE_FREQ,
.fall_max = 300,
.rise_max = 1000,
.hddat_min = 0,
@@ -346,9 +356,9 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
.h_min = 4000,
},
[STM32_I2C_SPEED_FAST] = {
- .rate = 400000,
- .rate_min = 320000,
- .rate_max = 400000,
+ .rate = I2C_MAX_FAST_MODE_FREQ,
+ .rate_min = I2C_MAX_FAST_MODE_FREQ * 8 / 10, /* 80% */
+ .rate_max = I2C_MAX_FAST_MODE_FREQ,
.fall_max = 300,
.rise_max = 300,
.hddat_min = 0,
@@ -358,9 +368,9 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
.h_min = 600,
},
[STM32_I2C_SPEED_FAST_PLUS] = {
- .rate = 1000000,
- .rate_min = 800000,
- .rate_max = 1000000,
+ .rate = I2C_MAX_FAST_MODE_PLUS_FREQ,
+ .rate_min = I2C_MAX_FAST_MODE_PLUS_FREQ * 8 / 10, /* 80% */
+ .rate_max = I2C_MAX_FAST_MODE_PLUS_FREQ,
.fall_max = 100,
.rise_max = 120,
.hddat_min = 0,
@@ -378,6 +388,14 @@ static const struct stm32f7_i2c_setup stm32f7_setup = {
.analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
};
+static const struct stm32f7_i2c_setup stm32mp15_setup = {
+ .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
+ .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
+ .dnf = STM32F7_I2C_DNF_DEFAULT,
+ .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
+ .fmp_clr_offset = 0x40,
+};
+
static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask)
{
writel_relaxed(readl_relaxed(reg) | mask, reg);
@@ -592,8 +610,25 @@ exit:
static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
struct stm32f7_i2c_setup *setup)
{
+ struct i2c_timings timings, *t = &timings;
int ret = 0;
+ t->bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ t->scl_rise_ns = i2c_dev->setup.rise_time;
+ t->scl_fall_ns = i2c_dev->setup.fall_time;
+
+ i2c_parse_fw_timings(i2c_dev->dev, t, false);
+
+ if (t->bus_freq_hz >= I2C_MAX_FAST_MODE_PLUS_FREQ)
+ i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS;
+ else if (t->bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ)
+ i2c_dev->speed = STM32_I2C_SPEED_FAST;
+ else
+ i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
+
+ i2c_dev->setup.rise_time = t->scl_rise_ns;
+ i2c_dev->setup.fall_time = t->scl_fall_ns;
+
setup->speed = i2c_dev->speed;
setup->speed_freq = i2c_specs[setup->speed].rate;
setup->clock_src = clk_get_rate(i2c_dev->clk);
@@ -1691,6 +1726,24 @@ pm_free:
return ret;
}
+static void stm32f7_i2c_enable_wakeup(struct stm32f7_i2c_dev *i2c_dev,
+ bool enable)
+{
+ void __iomem *base = i2c_dev->base;
+ u32 mask = STM32F7_I2C_CR1_WUPEN;
+
+ if (!i2c_dev->wakeup_src)
+ return;
+
+ if (enable) {
+ device_set_wakeup_enable(i2c_dev->dev, true);
+ stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
+ } else {
+ device_set_wakeup_enable(i2c_dev->dev, false);
+ stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1, mask);
+ }
+}
+
static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
{
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(slave->adapter);
@@ -1717,6 +1770,9 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
if (ret < 0)
return ret;
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev))
+ stm32f7_i2c_enable_wakeup(i2c_dev, true);
+
if (id == 0) {
/* Configure Own Address 1 */
oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1);
@@ -1758,6 +1814,9 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
ret = 0;
pm_free:
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev))
+ stm32f7_i2c_enable_wakeup(i2c_dev, false);
+
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -1791,8 +1850,10 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
i2c_dev->slave[id] = NULL;
- if (!(stm32f7_i2c_is_slave_registered(i2c_dev)))
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev)) {
stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK);
+ stm32f7_i2c_enable_wakeup(i2c_dev, false);
+ }
pm_runtime_mark_last_busy(i2c_dev->dev);
pm_runtime_put_autosuspend(i2c_dev->dev);
@@ -1800,28 +1861,51 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
return 0;
}
+static int stm32f7_i2c_write_fm_plus_bits(struct stm32f7_i2c_dev *i2c_dev,
+ bool enable)
+{
+ int ret;
+
+ if (i2c_dev->speed != STM32_I2C_SPEED_FAST_PLUS ||
+ IS_ERR_OR_NULL(i2c_dev->regmap))
+ /* Optional */
+ return 0;
+
+ if (i2c_dev->fmp_sreg == i2c_dev->fmp_creg)
+ ret = regmap_update_bits(i2c_dev->regmap,
+ i2c_dev->fmp_sreg,
+ i2c_dev->fmp_mask,
+ enable ? i2c_dev->fmp_mask : 0);
+ else
+ ret = regmap_write(i2c_dev->regmap,
+ enable ? i2c_dev->fmp_sreg :
+ i2c_dev->fmp_creg,
+ i2c_dev->fmp_mask);
+
+ return ret;
+}
+
static int stm32f7_i2c_setup_fm_plus_bits(struct platform_device *pdev,
struct stm32f7_i2c_dev *i2c_dev)
{
struct device_node *np = pdev->dev.of_node;
int ret;
- u32 reg, mask;
i2c_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg-fmp");
- if (IS_ERR(i2c_dev->regmap)) {
+ if (IS_ERR(i2c_dev->regmap))
/* Optional */
return 0;
- }
- ret = of_property_read_u32_index(np, "st,syscfg-fmp", 1, &reg);
+ ret = of_property_read_u32_index(np, "st,syscfg-fmp", 1,
+ &i2c_dev->fmp_sreg);
if (ret)
return ret;
- ret = of_property_read_u32_index(np, "st,syscfg-fmp", 2, &mask);
- if (ret)
- return ret;
+ i2c_dev->fmp_creg = i2c_dev->fmp_sreg +
+ i2c_dev->setup.fmp_clr_offset;
- return regmap_update_bits(i2c_dev->regmap, reg, mask, mask);
+ return of_property_read_u32_index(np, "st,syscfg-fmp", 2,
+ &i2c_dev->fmp_mask);
}
static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
@@ -1847,7 +1931,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
struct stm32f7_i2c_dev *i2c_dev;
const struct stm32f7_i2c_setup *setup;
struct resource *res;
- u32 clk_rate, rise_time, fall_time;
struct i2c_adapter *adap;
struct reset_control *rst;
dma_addr_t phy_addr;
@@ -1879,6 +1962,9 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
return irq_error ? : -ENOENT;
}
+ i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
+ "wakeup-source");
+
i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c_dev->clk)) {
dev_err(&pdev->dev, "Error: Missing controller clock\n");
@@ -1891,20 +1977,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
return ret;
}
- i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
- ret = device_property_read_u32(&pdev->dev, "clock-frequency",
- &clk_rate);
- if (!ret && clk_rate >= 1000000) {
- i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS;
- ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
- if (ret)
- goto clk_free;
- } else if (!ret && clk_rate >= 400000) {
- i2c_dev->speed = STM32_I2C_SPEED_FAST;
- } else if (!ret && clk_rate >= 100000) {
- i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
- }
-
rst = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(rst)) {
dev_err(&pdev->dev, "Error: Missing controller reset\n");
@@ -1944,20 +2016,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
i2c_dev->setup = *setup;
- ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
- &rise_time);
- if (!ret)
- i2c_dev->setup.rise_time = rise_time;
-
- ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
- &fall_time);
- if (!ret)
- i2c_dev->setup.fall_time = fall_time;
-
ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
if (ret)
goto clk_free;
+ if (i2c_dev->speed == STM32_I2C_SPEED_FAST_PLUS) {
+ ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
+ if (ret)
+ goto clk_free;
+ ret = stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
+ if (ret)
+ goto clk_free;
+ }
+
adap = &i2c_dev->adap;
i2c_set_adapdata(adap, i2c_dev);
snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)",
@@ -1982,7 +2053,17 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"Failed to request dma error %i\n", ret);
- goto clk_free;
+ goto fmp_clear;
+ }
+
+ if (i2c_dev->wakeup_src) {
+ device_set_wakeup_capable(i2c_dev->dev, true);
+
+ ret = dev_pm_set_wake_irq(i2c_dev->dev, irq_event);
+ if (ret) {
+ dev_err(i2c_dev->dev, "Failed to set wake up irq\n");
+ goto clr_wakeup_capable;
+ }
}
platform_set_drvdata(pdev, i2c_dev);
@@ -2014,11 +2095,21 @@ pm_disable:
pm_runtime_set_suspended(i2c_dev->dev);
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
+ if (i2c_dev->wakeup_src)
+ dev_pm_clear_wake_irq(i2c_dev->dev);
+
+clr_wakeup_capable:
+ if (i2c_dev->wakeup_src)
+ device_set_wakeup_capable(i2c_dev->dev, false);
+
if (i2c_dev->dma) {
stm32_i2c_dma_free(i2c_dev->dma);
i2c_dev->dma = NULL;
}
+fmp_clear:
+ stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
+
clk_free:
clk_disable_unprepare(i2c_dev->clk);
@@ -2032,6 +2123,15 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c_dev->adap);
pm_runtime_get_sync(i2c_dev->dev);
+ if (i2c_dev->wakeup_src) {
+ dev_pm_clear_wake_irq(i2c_dev->dev);
+ /*
+ * enforce that wakeup is disabled and that the device
+ * is marked as non wakeup capable
+ */
+ device_init_wakeup(i2c_dev->dev, false);
+ }
+
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);
pm_runtime_set_suspended(i2c_dev->dev);
@@ -2042,6 +2142,8 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
i2c_dev->dma = NULL;
}
+ stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
+
clk_disable_unprepare(i2c_dev->clk);
return 0;
@@ -2073,8 +2175,8 @@ static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused
-stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
+#ifdef CONFIG_PM_SLEEP
+static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
{
int ret;
struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
@@ -2087,16 +2189,15 @@ stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
backup_regs->cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2);
backup_regs->oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1);
backup_regs->oar2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR2);
- backup_regs->pecr = readl_relaxed(i2c_dev->base + STM32F7_I2C_PECR);
backup_regs->tmgr = readl_relaxed(i2c_dev->base + STM32F7_I2C_TIMINGR);
+ stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
pm_runtime_put_sync(i2c_dev->dev);
return ret;
}
-static int __maybe_unused
-stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
+static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
{
u32 cr1;
int ret;
@@ -2120,48 +2221,55 @@ stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
writel_relaxed(backup_regs->cr2, i2c_dev->base + STM32F7_I2C_CR2);
writel_relaxed(backup_regs->oar1, i2c_dev->base + STM32F7_I2C_OAR1);
writel_relaxed(backup_regs->oar2, i2c_dev->base + STM32F7_I2C_OAR2);
- writel_relaxed(backup_regs->pecr, i2c_dev->base + STM32F7_I2C_PECR);
+ stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
pm_runtime_put_sync(i2c_dev->dev);
return ret;
}
-static int __maybe_unused stm32f7_i2c_suspend(struct device *dev)
+static int stm32f7_i2c_suspend(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
i2c_mark_adapter_suspended(&i2c_dev->adap);
- ret = stm32f7_i2c_regs_backup(i2c_dev);
- if (ret < 0) {
- i2c_mark_adapter_resumed(&i2c_dev->adap);
- return ret;
- }
- pinctrl_pm_select_sleep_state(dev);
- pm_runtime_force_suspend(dev);
+ if (!device_may_wakeup(dev) && !dev->power.wakeup_path) {
+ ret = stm32f7_i2c_regs_backup(i2c_dev);
+ if (ret < 0) {
+ i2c_mark_adapter_resumed(&i2c_dev->adap);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
+ pm_runtime_force_suspend(dev);
+ }
return 0;
}
-static int __maybe_unused stm32f7_i2c_resume(struct device *dev)
+static int stm32f7_i2c_resume(struct device *dev)
{
struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
- ret = pm_runtime_force_resume(dev);
- if (ret < 0)
- return ret;
- pinctrl_pm_select_default_state(dev);
+ if (!device_may_wakeup(dev) && !dev->power.wakeup_path) {
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+ pinctrl_pm_select_default_state(dev);
+
+ ret = stm32f7_i2c_regs_restore(i2c_dev);
+ if (ret < 0)
+ return ret;
+ }
- ret = stm32f7_i2c_regs_restore(i2c_dev);
- if (ret < 0)
- return ret;
i2c_mark_adapter_resumed(&i2c_dev->adap);
return 0;
}
+#endif
static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
SET_RUNTIME_PM_OPS(stm32f7_i2c_runtime_suspend,
@@ -2171,6 +2279,7 @@ static const struct dev_pm_ops stm32f7_i2c_pm_ops = {
static const struct of_device_id stm32f7_i2c_match[] = {
{ .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup},
+ { .compatible = "st,stm32mp15-i2c", .data = &stm32mp15_setup},
{},
};
MODULE_DEVICE_TABLE(of, stm32f7_i2c_match);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 42e0a53e7fa4..ba6b60caa45e 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -132,7 +132,7 @@ enum stu300_error {
#define NUM_ADDR_RESEND_ATTEMPTS 12
/* I2C clock speed, in Hz 0-400kHz*/
-static unsigned int scl_frequency = 100000;
+static unsigned int scl_frequency = I2C_MAX_STANDARD_MODE_FREQ;
module_param(scl_frequency, uint, 0644);
/**
@@ -497,7 +497,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
dev_dbg(&dev->pdev->dev, "Clock rate %lu Hz, I2C bus speed %d Hz "
"virtbase %p\n", clkrate, dev->speed, dev->virtbase);
- if (dev->speed > 100000)
+ if (dev->speed > I2C_MAX_STANDARD_MODE_FREQ)
/* Fast Mode I2C */
val = ((clkrate/dev->speed) - 9)/3 + 1;
else
@@ -518,7 +518,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
return -EINVAL;
}
- if (dev->speed > 100000) {
+ if (dev->speed > I2C_MAX_STANDARD_MODE_FREQ) {
/* CC6..CC0 */
stu300_wr8((val & I2C_CCR_CC_MASK) | I2C_CCR_FMSM,
dev->virtbase + I2C_CCR);
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index 7c07ce116e38..e5293f0b3318 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -186,7 +186,7 @@ static int p2wi_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device_node *childnp;
unsigned long parent_clk_freq;
- u32 clk_freq = 100000;
+ u32 clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
struct resource *r;
struct p2wi *p2wi;
u32 slave_addr;
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 86026798b4f7..9099d0a67ace 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -67,10 +67,10 @@
/* STANDARD MODE frequency */
#define SYNQUACER_I2C_CLK_MASTER_STD(rate) \
- DIV_ROUND_UP(DIV_ROUND_UP((rate), 100000) - 2, 2)
+ DIV_ROUND_UP(DIV_ROUND_UP((rate), I2C_MAX_STANDARD_MODE_FREQ) - 2, 2)
/* FAST MODE frequency */
#define SYNQUACER_I2C_CLK_MASTER_FAST(rate) \
- DIV_ROUND_UP((DIV_ROUND_UP((rate), 400000) - 2) * 2, 3)
+ DIV_ROUND_UP((DIV_ROUND_UP((rate), I2C_MAX_FAST_MODE_FREQ) - 2) * 2, 3)
/* (clkrate <= 18000000) */
/* calculate the value of CS bits in CCR register on standard mode */
@@ -602,7 +602,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
i2c->adapter.nr = pdev->id;
init_completion(&i2c->completion);
- if (bus_speed < 400000)
+ if (bus_speed < I2C_MAX_FAST_MODE_FREQ)
i2c->speed_khz = SYNQUACER_I2C_SPEED_SM;
else
i2c->speed_khz = SYNQUACER_I2C_SPEED_FM;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index cbc2ad49043e..4c4d17ddc96b 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -123,10 +123,6 @@
#define I2C_THIGH_SHIFT 8
#define I2C_INTERFACE_TIMING_1 0x98
-#define I2C_STANDARD_MODE 100000
-#define I2C_FAST_MODE 400000
-#define I2C_FAST_PLUS_MODE 1000000
-
/* Packet header size in bytes */
#define I2C_PACKET_HEADER_SIZE 12
@@ -737,8 +733,8 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT;
i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR);
- if (i2c_dev->bus_clk_rate > I2C_STANDARD_MODE &&
- i2c_dev->bus_clk_rate <= I2C_FAST_PLUS_MODE) {
+ if (i2c_dev->bus_clk_rate > I2C_MAX_STANDARD_MODE_FREQ &&
+ i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_PLUS_FREQ) {
tlow = i2c_dev->hw->tlow_fast_fastplus_mode;
thigh = i2c_dev->hw->thigh_fast_fastplus_mode;
tsu_thd = i2c_dev->hw->setup_hold_time_fast_fast_plus_mode;
@@ -1341,7 +1337,7 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
ret = of_property_read_u32(np, "clock-frequency",
&i2c_dev->bus_clk_rate);
if (ret)
- i2c_dev->bus_clk_rate = 100000; /* default clock rate */
+ i2c_dev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */
multi_mode = of_property_read_bool(np, "multi-master");
i2c_dev->is_multimaster_mode = multi_mode;
@@ -1640,12 +1636,12 @@ static int tegra_i2c_probe(struct platform_device *pdev)
}
}
- if (i2c_dev->bus_clk_rate > I2C_FAST_MODE &&
- i2c_dev->bus_clk_rate <= I2C_FAST_PLUS_MODE)
+ if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ &&
+ i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_PLUS_FREQ)
i2c_dev->clk_divisor_non_hs_mode =
i2c_dev->hw->clk_divisor_fast_plus_mode;
- else if (i2c_dev->bus_clk_rate > I2C_STANDARD_MODE &&
- i2c_dev->bus_clk_rate <= I2C_FAST_MODE)
+ else if (i2c_dev->bus_clk_rate > I2C_MAX_STANDARD_MODE_FREQ &&
+ i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_FREQ)
i2c_dev->clk_divisor_non_hs_mode =
i2c_dev->hw->clk_divisor_fast_mode;
else
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 19f8eec38717..12c90aa0900e 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -118,6 +118,8 @@ static void thunder_i2c_clock_disable(struct device *dev, struct clk *clk)
static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
struct device_node *node)
{
+ struct i2c_client *ara;
+
if (!node)
return -EINVAL;
@@ -125,9 +127,12 @@ static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
if (!i2c->alert_data.irq)
return -EINVAL;
- i2c->ara = i2c_setup_smbus_alert(&i2c->adap, &i2c->alert_data);
- if (!i2c->ara)
- return -ENODEV;
+ ara = i2c_new_smbus_alert_device(&i2c->adap, &i2c->alert_data);
+ if (IS_ERR(ara))
+ return PTR_ERR(ara);
+
+ i2c->ara = ara;
+
return 0;
}
@@ -178,7 +183,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
thunder_i2c_clock_enable(dev, i2c);
ret = device_property_read_u32(dev, "clock-frequency", &i2c->twsi_freq);
if (ret)
- i2c->twsi_freq = 100000;
+ i2c->twsi_freq = I2C_MAX_STANDARD_MODE_FREQ;
init_waitqueue_head(&i2c->queue);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index 4241aac79e7e..2b258d54d68c 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -73,8 +73,6 @@
#define UNIPHIER_FI2C_BYTE_WISE BIT(3)
#define UNIPHIER_FI2C_DEFER_STOP_COMP BIT(4)
-#define UNIPHIER_FI2C_DEFAULT_SPEED 100000
-#define UNIPHIER_FI2C_MAX_SPEED 400000
#define UNIPHIER_FI2C_FIFO_SIZE 8
struct uniphier_fi2c_priv {
@@ -537,9 +535,9 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
}
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
- bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
+ bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > UNIPHIER_FI2C_MAX_SPEED) {
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 0270090c0360..668b1fa2b0ef 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -35,9 +35,6 @@
#define UNIPHIER_I2C_NOISE 0x1c /* noise filter control */
#define UNIPHIER_I2C_SETUP 0x20 /* setup time control */
-#define UNIPHIER_I2C_DEFAULT_SPEED 100000
-#define UNIPHIER_I2C_MAX_SPEED 400000
-
struct uniphier_i2c_priv {
struct completion comp;
struct i2c_adapter adap;
@@ -333,9 +330,9 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
}
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
- bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
+ bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > UNIPHIER_I2C_MAX_SPEED) {
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 524017f7034e..88f5aafdce5b 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -399,7 +399,7 @@ static int wmt_i2c_probe(struct platform_device *pdev)
i2c_dev->mode = I2C_MODE_STANDARD;
err = of_property_read_u32(np, "clock-frequency", &clk_rate);
- if ((!err) && (clk_rate == 400000))
+ if (!err && (clk_rate == I2C_MAX_FAST_MODE_FREQ))
i2c_dev->mode = I2C_MODE_FAST;
i2c_dev->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 8a873975cf12..391c878a7cdc 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -71,8 +71,6 @@
#define XLP9XX_I2C_SLAVEADDR_ADDR_SHIFT 1
#define XLP9XX_I2C_IP_CLK_FREQ 133000000UL
-#define XLP9XX_I2C_DEFAULT_FREQ 100000
-#define XLP9XX_I2C_HIGH_FREQ 400000
#define XLP9XX_I2C_FIFO_SIZE 0x80U
#define XLP9XX_I2C_TIMEOUT_MS 1000
#define XLP9XX_I2C_BUSY_TIMEOUT 50
@@ -476,12 +474,12 @@ static int xlp9xx_i2c_get_frequency(struct platform_device *pdev,
err = device_property_read_u32(&pdev->dev, "clock-frequency", &freq);
if (err) {
- freq = XLP9XX_I2C_DEFAULT_FREQ;
+ freq = I2C_MAX_STANDARD_MODE_FREQ;
dev_dbg(&pdev->dev, "using default frequency %u\n", freq);
- } else if (freq == 0 || freq > XLP9XX_I2C_HIGH_FREQ) {
+ } else if (freq == 0 || freq > I2C_MAX_FAST_MODE_FREQ) {
dev_warn(&pdev->dev, "invalid frequency %u, using default\n",
freq);
- freq = XLP9XX_I2C_DEFAULT_FREQ;
+ freq = I2C_MAX_STANDARD_MODE_FREQ;
}
priv->clk_hz = freq;
@@ -491,12 +489,16 @@ static int xlp9xx_i2c_get_frequency(struct platform_device *pdev,
static int xlp9xx_i2c_smbus_setup(struct xlp9xx_i2c_dev *priv,
struct platform_device *pdev)
{
+ struct i2c_client *ara;
+
if (!priv->alert_data.irq)
return -EINVAL;
- priv->ara = i2c_setup_smbus_alert(&priv->adapter, &priv->alert_data);
- if (!priv->ara)
- return -ENODEV;
+ ara = i2c_new_smbus_alert_device(&priv->adapter, &priv->alert_data);
+ if (IS_ERR(ara))
+ return PTR_ERR(ara);
+
+ priv->ara = ara;
return 0;
}
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 34cd4b308540..282f161a8b08 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -404,7 +404,7 @@ static int xlr_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&busfreq))
- busfreq = 100000;
+ busfreq = I2C_MAX_STANDARD_MODE_FREQ;
clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(clk)) {
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 8b0ff780919b..c8f42f2037cb 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -318,7 +318,7 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
lookup->min_speed = lookup->speed;
if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
- lookup->force_speed = 400000;
+ lookup->force_speed = I2C_MAX_FAST_MODE_FREQ;
return AE_OK;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index cefad0881942..5cc0b0ec5570 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1593,32 +1593,30 @@ EXPORT_SYMBOL(i2c_del_adapter);
* @dev: The device to scan for I2C timing properties
* @t: the i2c_timings struct to be filled with values
* @use_defaults: bool to use sane defaults derived from the I2C specification
- * when properties are not found, otherwise use 0
+ * when properties are not found, otherwise don't update
*
* Scan the device for the generic I2C properties describing timing parameters
* for the signal and fill the given struct with the results. If a property was
* not found and use_defaults was true, then maximum timings are assumed which
* are derived from the I2C specification. If use_defaults is not used, the
- * results will be 0, so drivers can apply their own defaults later. The latter
- * is mainly intended for avoiding regressions of existing drivers which want
- * to switch to this function. New drivers almost always should use the defaults.
+ * results will be as before, so drivers can apply their own defaults before
+ * calling this helper. The latter is mainly intended for avoiding regressions
+ * of existing drivers which want to switch to this function. New drivers
+ * almost always should use the defaults.
*/
-
void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults)
{
int ret;
- memset(t, 0, sizeof(*t));
-
ret = device_property_read_u32(dev, "clock-frequency", &t->bus_freq_hz);
if (ret && use_defaults)
- t->bus_freq_hz = 100000;
+ t->bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
ret = device_property_read_u32(dev, "i2c-scl-rising-time-ns", &t->scl_rise_ns);
if (ret && use_defaults) {
- if (t->bus_freq_hz <= 100000)
+ if (t->bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ)
t->scl_rise_ns = 1000;
- else if (t->bus_freq_hz <= 400000)
+ else if (t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ)
t->scl_rise_ns = 300;
else
t->scl_rise_ns = 120;
@@ -1626,25 +1624,31 @@ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_de
ret = device_property_read_u32(dev, "i2c-scl-falling-time-ns", &t->scl_fall_ns);
if (ret && use_defaults) {
- if (t->bus_freq_hz <= 400000)
+ if (t->bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ)
t->scl_fall_ns = 300;
else
t->scl_fall_ns = 120;
}
- device_property_read_u32(dev, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns);
+ ret = device_property_read_u32(dev, "i2c-scl-internal-delay-ns", &t->scl_int_delay_ns);
+ if (ret && use_defaults)
+ t->scl_int_delay_ns = 0;
ret = device_property_read_u32(dev, "i2c-sda-falling-time-ns", &t->sda_fall_ns);
if (ret && use_defaults)
t->sda_fall_ns = t->scl_fall_ns;
- device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
+ ret = device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
+ if (ret && use_defaults)
+ t->sda_hold_ns = 0;
- device_property_read_u32(dev, "i2c-digital-filter-width-ns",
- &t->digital_filter_width_ns);
+ ret = device_property_read_u32(dev, "i2c-digital-filter-width-ns", &t->digital_filter_width_ns);
+ if (ret && use_defaults)
+ t->digital_filter_width_ns = 0;
- device_property_read_u32(dev, "i2c-analog-filter-cutoff-frequency",
- &t->analog_filter_cutoff_freq_hz);
+ ret = device_property_read_u32(dev, "i2c-analog-filter-cutoff-frequency", &t->analog_filter_cutoff_freq_hz);
+ if (ret && use_defaults)
+ t->analog_filter_cutoff_freq_hz = 0;
}
EXPORT_SYMBOL_GPL(i2c_parse_fw_timings);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 3ac426a8ab5a..b34d2ff06931 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -666,7 +666,7 @@ s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
/**
- * i2c_setup_smbus_alert - Setup SMBus alert support
+ * i2c_new_smbus_alert_device - get ara client for SMBus alert support
* @adapter: the target adapter
* @setup: setup data for the SMBus alert handler
* Context: can sleep
@@ -676,31 +676,25 @@ EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
* Handling can be done either through our IRQ handler, or by the
* adapter (from its handler, periodic polling, or whatever).
*
- * NOTE that if we manage the IRQ, we *MUST* know if it's level or
- * edge triggered in order to hand it to the workqueue correctly.
- * If triggering the alert seems to wedge the system, you probably
- * should have said it's level triggered.
- *
* This returns the ara client, which should be saved for later use with
- * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
- * to indicate an error.
+ * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or an
+ * ERRPTR to indicate an error.
*/
-struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
- struct i2c_smbus_alert_setup *setup)
+struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup)
{
struct i2c_board_info ara_board_info = {
I2C_BOARD_INFO("smbus_alert", 0x0c),
.platform_data = setup,
};
- return i2c_new_device(adapter, &ara_board_info);
+ return i2c_new_client_device(adapter, &ara_board_info);
}
-EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
+EXPORT_SYMBOL_GPL(i2c_new_smbus_alert_device);
#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter)
{
- struct i2c_client *client;
int irq;
irq = of_property_match_string(adapter->dev.of_node, "interrupt-names",
@@ -710,11 +704,7 @@ int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter)
else if (irq < 0)
return irq;
- client = i2c_setup_smbus_alert(adapter, NULL);
- if (!client)
- return -ENODEV;
-
- return 0;
+ return PTR_ERR_OR_ZERO(i2c_new_smbus_alert_device(adapter, NULL));
}
EXPORT_SYMBOL_GPL(of_i2c_setup_smbus_alert);
#endif
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 2ea4585d18c5..da020acc9bbd 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -15,6 +15,7 @@
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
#include <linux/cdev.h>
+#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/i2c-dev.h>
@@ -27,7 +28,6 @@
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/compat.h>
/*
* An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
@@ -40,7 +40,7 @@
struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
- struct device *dev;
+ struct device dev;
struct cdev cdev;
};
@@ -84,12 +84,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
return i2c_dev;
}
-static void put_i2c_dev(struct i2c_dev *i2c_dev)
+static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev)
{
spin_lock(&i2c_dev_list_lock);
list_del(&i2c_dev->list);
spin_unlock(&i2c_dev_list_lock);
- kfree(i2c_dev);
+ if (del_cdev)
+ cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev);
+ put_device(&i2c_dev->dev);
}
static ssize_t name_show(struct device *dev,
@@ -628,6 +630,14 @@ static const struct file_operations i2cdev_fops = {
static struct class *i2c_dev_class;
+static void i2cdev_dev_release(struct device *dev)
+{
+ struct i2c_dev *i2c_dev;
+
+ i2c_dev = container_of(dev, struct i2c_dev, dev);
+ kfree(i2c_dev);
+}
+
static int i2cdev_attach_adapter(struct device *dev, void *dummy)
{
struct i2c_adapter *adap;
@@ -644,27 +654,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
cdev_init(&i2c_dev->cdev, &i2cdev_fops);
i2c_dev->cdev.owner = THIS_MODULE;
- res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1);
- if (res)
- goto error_cdev;
-
- /* register this i2c device with the driver core */
- i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
- MKDEV(I2C_MAJOR, adap->nr), NULL,
- "i2c-%d", adap->nr);
- if (IS_ERR(i2c_dev->dev)) {
- res = PTR_ERR(i2c_dev->dev);
- goto error;
+
+ device_initialize(&i2c_dev->dev);
+ i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr);
+ i2c_dev->dev.class = i2c_dev_class;
+ i2c_dev->dev.parent = &adap->dev;
+ i2c_dev->dev.release = i2cdev_dev_release;
+ dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
+ if (res) {
+ put_i2c_dev(i2c_dev, false);
+ return res;
}
pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
adap->name, adap->nr);
return 0;
-error:
- cdev_del(&i2c_dev->cdev);
-error_cdev:
- put_i2c_dev(i2c_dev);
- return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
@@ -680,9 +686,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
- cdev_del(&i2c_dev->cdev);
- put_i2c_dev(i2c_dev);
- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
+ put_i2c_dev(i2c_dev, true);
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
return 0;
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index db9763cb4dae..cb415b10642f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -96,7 +96,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
+ eeprom = dev_get_drvdata(kobj_to_dev(kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
memcpy(buf, &eeprom->buffer[off], count);
@@ -111,7 +111,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
+ eeprom = dev_get_drvdata(kobj_to_dev(kobj));
spin_lock_irqsave(&eeprom->buffer_lock, flags);
memcpy(&eeprom->buffer[off], buf, count);
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 7e2f5d0eacdb..809bcf8387d0 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -184,7 +184,7 @@ static struct i2c_driver smbalert_driver = {
* corresponding I2C device driver's alert function.
*
* It is assumed that ara is a valid i2c client previously returned by
- * i2c_setup_smbus_alert().
+ * i2c_new_smbus_alert_device().
*/
int i2c_handle_smbus_alert(struct i2c_client *ara)
{
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index acf874800ca4..b0411a1827a3 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -89,8 +89,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
static int __init ide_scan_pcibus(void)
{
struct pci_dev *dev = NULL;
- struct pci_driver *d;
- struct list_head *l, *n;
+ struct pci_driver *d, *tmp;
pre_init = 0;
for_each_pci_dev(dev)
@@ -101,9 +100,8 @@ static int __init ide_scan_pcibus(void)
* are post init.
*/
- list_for_each_safe(l, n, &ide_pci_drivers) {
- list_del(l);
- d = list_entry(l, struct pci_driver, node);
+ list_for_each_entry_safe(d, tmp, &ide_pci_drivers, node) {
+ list_del(&d->node);
if (__pci_register_driver(d, d->driver.owner,
d->driver.mod_name))
printk(KERN_ERR "%s: failed to register %s driver\n",
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 17bfedd24cc3..717b798cddad 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -46,7 +46,7 @@
struct ib_pkey_cache {
int table_len;
- u16 table[0];
+ u16 table[];
};
struct ib_update_work {
@@ -973,6 +973,23 @@ done:
EXPORT_SYMBOL(rdma_query_gid);
/**
+ * rdma_read_gid_hw_context - Read the HW GID context from GID attribute
+ * @attr: Potinter to the GID attribute
+ *
+ * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
+ * to the SGID attr. Callers are required to already be holding the reference
+ * to an existing GID entry.
+ *
+ * Returns the HW GID context
+ *
+ */
+void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
+{
+ return container_of(attr, struct ib_gid_table_entry, attr)->context;
+}
+EXPORT_SYMBOL(rdma_read_gid_hw_context);
+
+/**
* rdma_find_gid - Returns SGID attributes if the matching GID is found.
* @device: The device to query.
* @gid: The GID value to search for.
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 15e99a888427..4794113ecd59 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -80,8 +80,19 @@ const char *__attribute_const__ ibcm_reject_msg(int reason)
}
EXPORT_SYMBOL(ibcm_reject_msg);
+struct cm_id_private;
static void cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data);
+static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ struct ib_cm_sidr_rep_param *param);
+static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
+ const void *private_data, u8 private_data_len);
+static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
+ void *private_data, u8 private_data_len);
+static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ enum ib_cm_rej_reason reason, void *ari,
+ u8 ari_length, const void *private_data,
+ u8 private_data_len);
static struct ib_client cm_client = {
.name = "cm",
@@ -197,7 +208,7 @@ struct cm_device {
struct ib_device *ib_device;
u8 ack_delay;
int going_down;
- struct cm_port *port[0];
+ struct cm_port *port[];
};
struct cm_av {
@@ -216,7 +227,7 @@ struct cm_work {
__be32 local_id; /* Established / timewait */
__be32 remote_id;
struct ib_cm_event cm_event;
- struct sa_path_rec path[0];
+ struct sa_path_rec path[];
};
struct cm_timewait_info {
@@ -261,7 +272,6 @@ struct cm_id_private {
__be16 pkey;
u8 private_data_len;
u8 max_cm_retries;
- u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
u8 retry_count;
@@ -572,18 +582,6 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
return 0;
}
-static int cm_alloc_id(struct cm_id_private *cm_id_priv)
-{
- int err;
- u32 id;
-
- err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
- xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
-
- cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
- return err;
-}
-
static u32 cm_local_id(__be32 local_id)
{
return (__force u32) (local_id ^ cm.random_id_operand);
@@ -633,22 +631,44 @@ static int be64_gt(__be64 a, __be64 b)
return (__force u64) a > (__force u64) b;
}
-static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
+/*
+ * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
+ * if the new ID was inserted, NULL if it could not be inserted due to a
+ * collision, or the existing cm_id_priv ready for shared usage.
+ */
+static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
+ ib_cm_handler shared_handler)
{
struct rb_node **link = &cm.listen_service_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id;
__be64 service_mask = cm_id_priv->id.service_mask;
+ unsigned long flags;
+ spin_lock_irqsave(&cm.lock, flags);
while (*link) {
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node);
if ((cur_cm_id_priv->id.service_mask & service_id) ==
(service_mask & cur_cm_id_priv->id.service_id) &&
- (cm_id_priv->id.device == cur_cm_id_priv->id.device))
+ (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
+ /*
+ * Sharing an ib_cm_id with different handlers is not
+ * supported
+ */
+ if (cur_cm_id_priv->id.cm_handler != shared_handler ||
+ cur_cm_id_priv->id.context ||
+ WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
+ spin_unlock_irqrestore(&cm.lock, flags);
+ return NULL;
+ }
+ refcount_inc(&cur_cm_id_priv->refcount);
+ cur_cm_id_priv->listen_sharecount++;
+ spin_unlock_irqrestore(&cm.lock, flags);
return cur_cm_id_priv;
+ }
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
link = &(*link)->rb_left;
@@ -661,9 +681,11 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
else
link = &(*link)->rb_right;
}
+ cm_id_priv->listen_sharecount++;
rb_link_node(&cm_id_priv->service_node, parent, link);
rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
- return NULL;
+ spin_unlock_irqrestore(&cm.lock, flags);
+ return cm_id_priv;
}
static struct cm_id_private * cm_find_listen(struct ib_device *device,
@@ -810,21 +832,12 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
return NULL;
}
-static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
- enum ib_cm_sidr_status status)
-{
- struct ib_cm_sidr_rep_param param;
-
- memset(&param, 0, sizeof param);
- param.status = status;
- ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
-}
-
-struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
- ib_cm_handler cm_handler,
- void *context)
+static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ void *context)
{
struct cm_id_private *cm_id_priv;
+ u32 id;
int ret;
cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
@@ -836,10 +849,9 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
cm_id_priv->id.cm_handler = cm_handler;
cm_id_priv->id.context = context;
cm_id_priv->id.remote_cm_qpn = 1;
- ret = cm_alloc_id(cm_id_priv);
- if (ret)
- goto error;
+ RB_CLEAR_NODE(&cm_id_priv->service_node);
+ RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
@@ -847,11 +859,42 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
refcount_set(&cm_id_priv->refcount, 1);
- return &cm_id_priv->id;
+
+ ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
+ &cm.local_id_next, GFP_KERNEL);
+ if (ret)
+ goto error;
+ cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
+
+ return cm_id_priv;
error:
kfree(cm_id_priv);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Make the ID visible to the MAD handlers and other threads that use the
+ * xarray.
+ */
+static void cm_finalize_id(struct cm_id_private *cm_id_priv)
+{
+ xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
+ cm_id_priv, GFP_KERNEL);
+}
+
+struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
+ ib_cm_handler cm_handler,
+ void *context)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
+ if (IS_ERR(cm_id_priv))
+ return ERR_CAST(cm_id_priv);
+
+ cm_finalize_id(cm_id_priv);
+ return &cm_id_priv->id;
}
EXPORT_SYMBOL(ib_create_cm_id);
@@ -932,6 +975,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
unsigned long flags;
struct cm_device *cm_dev;
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
if (!cm_dev)
return;
@@ -963,6 +1008,8 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
@@ -979,54 +1026,51 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
struct cm_work *work;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-retest:
spin_lock_irq(&cm_id_priv->lock);
+retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
- spin_unlock_irq(&cm_id_priv->lock);
-
- spin_lock_irq(&cm.lock);
+ spin_lock(&cm.lock);
if (--cm_id_priv->listen_sharecount > 0) {
/* The id is still shared. */
+ WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
+ spin_unlock(&cm.lock);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_deref_id(cm_id_priv);
- spin_unlock_irq(&cm.lock);
return;
}
+ cm_id->state = IB_CM_IDLE;
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
- spin_unlock_irq(&cm.lock);
+ RB_CLEAR_NODE(&cm_id_priv->service_node);
+ spin_unlock(&cm.lock);
break;
case IB_CM_SIDR_REQ_SENT:
cm_id->state = IB_CM_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irq(&cm_id_priv->lock);
break;
case IB_CM_SIDR_REQ_RCVD:
- spin_unlock_irq(&cm_id_priv->lock);
- cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
- spin_lock_irq(&cm.lock);
- if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
- rb_erase(&cm_id_priv->sidr_id_node,
- &cm.remote_sidr_table);
- spin_unlock_irq(&cm.lock);
+ cm_send_sidr_rep_locked(cm_id_priv,
+ &(struct ib_cm_sidr_rep_param){
+ .status = IB_SIDR_REJECT });
+ /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
+ cm_id->state = IB_CM_IDLE;
break;
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
- &cm_id_priv->id.device->node_guid,
- sizeof cm_id_priv->id.device->node_guid,
- NULL, 0);
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
+ &cm_id_priv->id.device->node_guid,
+ sizeof(cm_id_priv->id.device->node_guid),
+ NULL, 0);
break;
case IB_CM_REQ_RCVD:
if (err == -ENOMEM) {
/* Do not reject to allow future retries. */
cm_reset_to_idle(cm_id_priv);
- spin_unlock_irq(&cm_id_priv->lock);
} else {
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
- NULL, 0, NULL, 0);
+ cm_send_rej_locked(cm_id_priv,
+ IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+ NULL, 0);
}
break;
case IB_CM_REP_SENT:
@@ -1036,38 +1080,56 @@ retest:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
- NULL, 0, NULL, 0);
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, NULL, 0);
break;
case IB_CM_ESTABLISHED:
- spin_unlock_irq(&cm_id_priv->lock);
- if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
+ if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
+ cm_id->state = IB_CM_IDLE;
break;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ }
+ cm_send_dreq_locked(cm_id_priv, NULL, 0);
goto retest;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
- spin_unlock_irq(&cm_id_priv->lock);
- break;
+ goto retest;
case IB_CM_DREQ_RCVD:
- spin_unlock_irq(&cm_id_priv->lock);
- ib_send_cm_drep(cm_id, NULL, 0);
+ cm_send_drep_locked(cm_id_priv, NULL, 0);
+ WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
+ goto retest;
+ case IB_CM_TIMEWAIT:
+ /*
+ * The cm_acquire_id in cm_timewait_handler will stop working
+ * once we do cm_free_id() below, so just move to idle here for
+ * consistency.
+ */
+ cm_id->state = IB_CM_IDLE;
break;
- default:
- spin_unlock_irq(&cm_id_priv->lock);
+ case IB_CM_IDLE:
break;
}
+ WARN_ON(cm_id->state != IB_CM_IDLE);
- spin_lock_irq(&cm.lock);
+ spin_lock(&cm.lock);
+ /* Required for cleanup paths related cm_req_handler() */
+ if (cm_id_priv->timewait_info) {
+ cm_cleanup_timewait(cm_id_priv->timewait_info);
+ kfree(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
+ }
if (!list_empty(&cm_id_priv->altr_list) &&
(!cm_id_priv->altr_send_port_not_ready))
list_del(&cm_id_priv->altr_list);
if (!list_empty(&cm_id_priv->prim_list) &&
(!cm_id_priv->prim_send_port_not_ready))
list_del(&cm_id_priv->prim_list);
- spin_unlock_irq(&cm.lock);
+ WARN_ON(cm_id_priv->listen_sharecount);
+ WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
+ if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
+ rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ spin_unlock(&cm.lock);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
@@ -1087,8 +1149,27 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id)
}
EXPORT_SYMBOL(ib_destroy_cm_id);
+static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
+ __be64 service_mask)
+{
+ service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
+ service_id &= service_mask;
+ if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
+ (service_id != IB_CM_ASSIGN_SERVICE_ID))
+ return -EINVAL;
+
+ if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
+ cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ } else {
+ cm_id_priv->id.service_id = service_id;
+ cm_id_priv->id.service_mask = service_mask;
+ }
+ return 0;
+}
+
/**
- * __ib_cm_listen - Initiates listening on the specified service ID for
+ * ib_cm_listen - Initiates listening on the specified service ID for
* connection and service ID resolution requests.
* @cm_id: Connection identifier associated with the listen request.
* @service_id: Service identifier matched against incoming connection
@@ -1100,51 +1181,33 @@ EXPORT_SYMBOL(ib_destroy_cm_id);
* exactly. This parameter is ignored if %service_id is set to
* IB_CM_ASSIGN_SERVICE_ID.
*/
-static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
- __be64 service_mask)
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
{
- struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
- int ret = 0;
-
- service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
- service_id &= service_mask;
- if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
- (service_id != IB_CM_ASSIGN_SERVICE_ID))
- return -EINVAL;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- if (cm_id->state != IB_CM_IDLE)
- return -EINVAL;
-
- cm_id->state = IB_CM_LISTEN;
- ++cm_id_priv->listen_sharecount;
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
- if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
- cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
- cm_id->service_mask = ~cpu_to_be64(0);
- } else {
- cm_id->service_id = service_id;
- cm_id->service_mask = service_mask;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->id.state != IB_CM_IDLE) {
+ ret = -EINVAL;
+ goto out;
}
- cur_cm_id_priv = cm_insert_listen(cm_id_priv);
- if (cur_cm_id_priv) {
- cm_id->state = IB_CM_IDLE;
- --cm_id_priv->listen_sharecount;
+ ret = cm_init_listen(cm_id_priv, service_id, service_mask);
+ if (ret)
+ goto out;
+
+ if (!cm_insert_listen(cm_id_priv, NULL)) {
ret = -EBUSY;
+ goto out;
}
- return ret;
-}
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cm.lock, flags);
- ret = __ib_cm_listen(cm_id, service_id, service_mask);
- spin_unlock_irqrestore(&cm.lock, flags);
+ cm_id_priv->id.state = IB_CM_LISTEN;
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_cm_listen);
@@ -1169,51 +1232,38 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
ib_cm_handler cm_handler,
__be64 service_id)
{
+ struct cm_id_private *listen_id_priv;
struct cm_id_private *cm_id_priv;
- struct ib_cm_id *cm_id;
- unsigned long flags;
int err = 0;
/* Create an ID in advance, since the creation may sleep */
- cm_id = ib_create_cm_id(device, cm_handler, NULL);
- if (IS_ERR(cm_id))
- return cm_id;
+ cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
+ if (IS_ERR(cm_id_priv))
+ return ERR_CAST(cm_id_priv);
- spin_lock_irqsave(&cm.lock, flags);
-
- if (service_id == IB_CM_ASSIGN_SERVICE_ID)
- goto new_id;
+ err = cm_init_listen(cm_id_priv, service_id, 0);
+ if (err)
+ return ERR_PTR(err);
- /* Find an existing ID */
- cm_id_priv = cm_find_listen(device, service_id);
- if (cm_id_priv) {
- if (cm_id->cm_handler != cm_handler || cm_id->context) {
- /* Sharing an ib_cm_id with different handlers is not
- * supported */
- spin_unlock_irqrestore(&cm.lock, flags);
- ib_destroy_cm_id(cm_id);
+ spin_lock_irq(&cm_id_priv->lock);
+ listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
+ if (listen_id_priv != cm_id_priv) {
+ spin_unlock_irq(&cm_id_priv->lock);
+ ib_destroy_cm_id(&cm_id_priv->id);
+ if (!listen_id_priv)
return ERR_PTR(-EINVAL);
- }
- refcount_inc(&cm_id_priv->refcount);
- ++cm_id_priv->listen_sharecount;
- spin_unlock_irqrestore(&cm.lock, flags);
-
- ib_destroy_cm_id(cm_id);
- cm_id = &cm_id_priv->id;
- return cm_id;
+ return &listen_id_priv->id;
}
+ cm_id_priv->id.state = IB_CM_LISTEN;
+ spin_unlock_irq(&cm_id_priv->lock);
-new_id:
- /* Use newly created ID */
- err = __ib_cm_listen(cm_id, service_id, 0);
-
- spin_unlock_irqrestore(&cm.lock, flags);
+ /*
+ * A listen ID does not need to be in the xarray since it does not
+ * receive mads, is not placed in the remote_id or remote_qpn rbtree,
+ * and does not enter timewait.
+ */
- if (err) {
- ib_destroy_cm_id(cm_id);
- return ERR_PTR(err);
- }
- return cm_id;
+ return &cm_id_priv->id;
}
EXPORT_SYMBOL(ib_cm_insert_listen);
@@ -1381,10 +1431,6 @@ static void cm_format_req(struct cm_req_msg *req_msg,
static int cm_validate_req_param(struct ib_cm_req_param *param)
{
- /* peer-to-peer not supported */
- if (param->peer_to_peer)
- return -EINVAL;
-
if (!param->primary_path)
return -EINVAL;
@@ -1419,7 +1465,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
/* Verify that we're not in timewait. */
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_IDLE) {
+ if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = -EINVAL;
goto out;
@@ -1437,12 +1483,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
param->ppath_sgid_attr, &cm_id_priv->av,
cm_id_priv);
if (ret)
- goto error1;
+ goto out;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path, NULL,
&cm_id_priv->alt_av, cm_id_priv);
if (ret)
- goto error1;
+ goto out;
}
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
@@ -1460,7 +1506,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
if (ret)
- goto error1;
+ goto out;
req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
@@ -1483,7 +1529,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
return 0;
error2: cm_free_msg(cm_id_priv->msg);
-error1: kfree(cm_id_priv->timewait_info);
out: return ret;
}
EXPORT_SYMBOL(ib_send_cm_req);
@@ -1789,6 +1834,8 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
const void *private_data,
u8 private_data_len)
{
+ lockdep_assert_held(&cm_id_priv->lock);
+
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
@@ -1838,8 +1885,12 @@ static void cm_dup_req_handler(struct cm_work *work,
counter[CM_REQ_COUNTER]);
/* Quick state check to discard duplicate REQs. */
- if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
+ spin_lock_irq(&cm_id_priv->lock);
+ if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
+ spin_unlock_irq(&cm_id_priv->lock);
return;
+ }
+ spin_unlock_irq(&cm_id_priv->lock);
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
@@ -1924,14 +1975,10 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
- goto out;
+ return NULL;
}
refcount_inc(&listen_cm_id_priv->refcount);
- refcount_inc(&cm_id_priv->refcount);
- cm_id_priv->id.state = IB_CM_REQ_RCVD;
- atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
-out:
return listen_cm_id_priv;
}
@@ -1973,7 +2020,6 @@ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
static int cm_req_handler(struct cm_work *work)
{
- struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_req_msg *req_msg;
const struct ib_global_route *grh;
@@ -1982,13 +2028,33 @@ static int cm_req_handler(struct cm_work *work)
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
- cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
- if (IS_ERR(cm_id))
- return PTR_ERR(cm_id);
+ cm_id_priv =
+ cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
+ if (IS_ERR(cm_id_priv))
+ return PTR_ERR(cm_id_priv);
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
cm_id_priv->id.remote_id =
cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
+ cm_id_priv->id.service_id =
+ cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ cm_id_priv->tid = req_msg->hdr.tid;
+ cm_id_priv->timeout_ms = cm_convert_to_ms(
+ IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
+ cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
+ cm_id_priv->remote_qpn =
+ cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+ cm_id_priv->initiator_depth =
+ IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
+ cm_id_priv->responder_resources =
+ IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
+ cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
+ cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
+ cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
+ cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
+ cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
+ cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
+
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
@@ -2000,27 +2066,26 @@ static int cm_req_handler(struct cm_work *work)
ret = PTR_ERR(cm_id_priv->timewait_info);
goto destroy;
}
- cm_id_priv->timewait_info->work.remote_id =
- cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
+ cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
cm_id_priv->timewait_info->remote_ca_guid =
cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
- cm_id_priv->timewait_info->remote_qpn =
- cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
+ cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
+
+ /*
+ * Note that the ID pointer is not in the xarray at this point,
+ * so this set is only visible to the local thread.
+ */
+ cm_id_priv->id.state = IB_CM_REQ_RCVD;
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
- be32_to_cpu(cm_id->local_id));
+ be32_to_cpu(cm_id_priv->id.local_id));
+ cm_id_priv->id.state = IB_CM_IDLE;
ret = -EINVAL;
- goto free_timeinfo;
+ goto destroy;
}
- cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
- cm_id_priv->id.context = listen_cm_id_priv->id.context;
- cm_id_priv->id.service_id =
- cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
-
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
memset(&work->path[0], 0, sizeof(work->path[0]));
@@ -2058,10 +2123,10 @@ static int cm_req_handler(struct cm_work *work)
work->port->port_num, 0,
&work->path[0].sgid);
if (err)
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
NULL, 0, NULL, 0);
else
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
&work->path[0].sgid,
sizeof(work->path[0].sgid),
NULL, 0);
@@ -2071,41 +2136,40 @@ static int cm_req_handler(struct cm_work *work)
ret = cm_init_av_by_path(&work->path[1], NULL,
&cm_id_priv->alt_av, cm_id_priv);
if (ret) {
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
+ ib_send_cm_rej(&cm_id_priv->id,
+ IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
sizeof(work->path[0].sgid), NULL, 0);
goto rejected;
}
}
- cm_id_priv->tid = req_msg->hdr.tid;
- cm_id_priv->timeout_ms = cm_convert_to_ms(
- IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
- cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
- cm_id_priv->remote_qpn =
- cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
- cm_id_priv->initiator_depth =
- IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
- cm_id_priv->responder_resources =
- IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
- cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
- cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
- cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
- cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
- cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
- cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+ cm_id_priv->id.context = listen_cm_id_priv->id.context;
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
+
+ /* Now MAD handlers can see the new ID */
+ spin_lock_irq(&cm_id_priv->lock);
+ cm_finalize_id(cm_id_priv);
+
+ /* Refcount belongs to the event, pairs with cm_process_work() */
+ refcount_inc(&cm_id_priv->refcount);
+ atomic_inc(&cm_id_priv->work_count);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_process_work(cm_id_priv, work);
+ /*
+ * Since this ID was just created and was not made visible to other MAD
+ * handlers until the cm_finalize_id() above we know that the
+ * cm_process_work() will deliver the event and the listen_cm_id
+ * embedded in the event can be derefed here.
+ */
cm_deref_id(listen_cm_id_priv);
return 0;
rejected:
- refcount_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
-free_timeinfo:
- kfree(cm_id_priv->timewait_info);
destroy:
- ib_destroy_cm_id(cm_id);
+ ib_destroy_cm_id(&cm_id_priv->id);
return ret;
}
@@ -2189,6 +2253,9 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
+ WARN_ONCE(param->qp_num & 0xFF000000,
+ "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
+ param->qp_num);
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -2359,13 +2426,13 @@ static int cm_rep_handler(struct cm_work *work)
case IB_CM_MRA_REQ_RCVD:
break;
default:
- spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
pr_debug(
"%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
__func__, cm_id_priv->id.state,
IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
+ spin_unlock_irq(&cm_id_priv->lock);
goto error;
}
@@ -2434,8 +2501,6 @@ static int cm_rep_handler(struct cm_work *work)
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->alt_av.timeout - 1);
- /* todo: handle peer_to_peer */
-
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
@@ -2546,35 +2611,32 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
private_data_len);
}
-int ib_send_cm_dreq(struct ib_cm_id *cm_id,
- const void *private_data,
- u8 private_data_len)
+static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
+ const void *private_data, u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
- unsigned long flags;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_ESTABLISHED) {
+ if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
- be32_to_cpu(cm_id->local_id), cm_id->state);
- ret = -EINVAL;
- goto out;
+ be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
+ return -EINVAL;
}
- if (cm_id->lap_state == IB_CM_LAP_SENT ||
- cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
+ cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret) {
cm_enter_timewait(cm_id_priv);
- goto out;
+ return ret;
}
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
@@ -2585,14 +2647,26 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_enter_timewait(cm_id_priv);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
- cm_id->state = IB_CM_DREQ_SENT;
+ cm_id_priv->id.state = IB_CM_DREQ_SENT;
cm_id_priv->msg = msg;
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return 0;
+}
+
+int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_dreq);
@@ -2613,51 +2687,60 @@ static void cm_format_drep(struct cm_drep_msg *drep_msg,
private_data_len);
}
-int ib_send_cm_drep(struct ib_cm_id *cm_id,
- const void *private_data,
- u8 private_data_len)
+static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
+ void *private_data, u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
- unsigned long flags;
- void *data;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
return -EINVAL;
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_DREQ_RCVD) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
- pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
- __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
+ if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
+ pr_debug(
+ "%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
+ kfree(private_data);
return -EINVAL;
}
- cm_set_private_data(cm_id_priv, data, private_data_len);
+ cm_set_private_data(cm_id_priv, private_data, private_data_len);
cm_enter_timewait(cm_id_priv);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
- goto out;
+ return ret;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
+ return 0;
+}
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ void *data;
+ int ret;
+
+ data = cm_copy_private_data(private_data, private_data_len);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_drep);
@@ -2816,65 +2899,72 @@ out:
return -EINVAL;
}
-int ib_send_cm_rej(struct ib_cm_id *cm_id,
- enum ib_cm_rej_reason reason,
- void *ari,
- u8 ari_length,
- const void *private_data,
- u8 private_data_len)
+static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ enum ib_cm_rej_reason reason, void *ari,
+ u8 ari_length, const void *private_data,
+ u8 private_data_len)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
- unsigned long flags;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- switch (cm_id->state) {
+ switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (!ret)
- cm_format_rej((struct cm_rej_msg *) msg->mad,
- cm_id_priv, reason, ari, ari_length,
- private_data, private_data_len);
-
cm_reset_to_idle(cm_id_priv);
+ ret = cm_alloc_msg(cm_id_priv, &msg);
+ if (ret)
+ return ret;
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
+ ari, ari_length, private_data, private_data_len);
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
- ret = cm_alloc_msg(cm_id_priv, &msg);
- if (!ret)
- cm_format_rej((struct cm_rej_msg *) msg->mad,
- cm_id_priv, reason, ari, ari_length,
- private_data, private_data_len);
-
cm_enter_timewait(cm_id_priv);
+ ret = cm_alloc_msg(cm_id_priv, &msg);
+ if (ret)
+ return ret;
+ cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
+ ari, ari_length, private_data, private_data_len);
break;
default:
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
- be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
- ret = -EINVAL;
- goto out;
+ be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
+ return -EINVAL;
}
- if (ret)
- goto out;
-
ret = ib_post_send_mad(msg, NULL);
- if (ret)
+ if (ret) {
cm_free_msg(msg);
+ return ret;
+ }
-out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return 0;
+}
+
+int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
+ void *ari, u8 ari_length, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
+ private_data, private_data_len);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rej);
@@ -2972,10 +3062,10 @@ static int cm_rej_handler(struct cm_work *work)
}
/* fall through */
default:
- spin_unlock_irq(&cm_id_priv->lock);
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
__func__, be32_to_cpu(cm_id_priv->id.local_id),
cm_id_priv->id.state);
+ spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto out;
}
@@ -3502,20 +3592,27 @@ static void cm_format_sidr_req_event(struct cm_work *work,
static int cm_sidr_req_handler(struct cm_work *work)
{
- struct ib_cm_id *cm_id;
- struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
+ struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
int ret;
- cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
- if (IS_ERR(cm_id))
- return PTR_ERR(cm_id);
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ cm_id_priv =
+ cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
+ if (IS_ERR(cm_id_priv))
+ return PTR_ERR(cm_id_priv);
/* Record SGID/SLID and request ID for lookup. */
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
+
+ cm_id_priv->id.remote_id =
+ cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
+ cm_id_priv->id.service_id =
+ cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
+ cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ cm_id_priv->tid = sidr_req_msg->hdr.tid;
+
wc = work->mad_recv_wc->wc;
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
@@ -3525,41 +3622,46 @@ static int cm_sidr_req_handler(struct cm_work *work)
if (ret)
goto out;
- cm_id_priv->id.remote_id =
- cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
- cm_id_priv->tid = sidr_req_msg->hdr.tid;
- atomic_inc(&cm_id_priv->work_count);
-
spin_lock_irq(&cm.lock);
- cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
- if (cur_cm_id_priv) {
+ listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+ if (listen_cm_id_priv) {
spin_unlock_irq(&cm.lock);
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
- cur_cm_id_priv = cm_find_listen(
- cm_id->device,
- cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)));
- if (!cur_cm_id_priv) {
+ listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
+ cm_id_priv->id.service_id);
+ if (!listen_cm_id_priv) {
spin_unlock_irq(&cm.lock);
- cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
+ ib_send_cm_sidr_rep(&cm_id_priv->id,
+ &(struct ib_cm_sidr_rep_param){
+ .status = IB_SIDR_UNSUPPORTED });
goto out; /* No match. */
}
- refcount_inc(&cur_cm_id_priv->refcount);
- refcount_inc(&cm_id_priv->refcount);
+ refcount_inc(&listen_cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
- cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
- cm_id_priv->id.context = cur_cm_id_priv->id.context;
- cm_id_priv->id.service_id =
- cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+ cm_id_priv->id.context = listen_cm_id_priv->id.context;
- cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
- cm_process_work(cm_id_priv, work);
- cm_deref_id(cur_cm_id_priv);
+ /*
+ * A SIDR ID does not need to be in the xarray since it does not receive
+ * mads, is not placed in the remote_id or remote_qpn rbtree, and does
+ * not enter timewait.
+ */
+
+ cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
+ cm_free_work(work);
+ /*
+ * A pointer to the listen_cm_id is held in the event, so this deref
+ * must be after the event is delivered above.
+ */
+ cm_deref_id(listen_cm_id_priv);
+ if (ret)
+ cm_destroy_id(&cm_id_priv->id, ret);
return 0;
out:
ib_destroy_cm_id(&cm_id_priv->id);
@@ -3589,50 +3691,52 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
param->private_data, param->private_data_len);
}
-int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
- struct ib_cm_sidr_rep_param *param)
+static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ struct ib_cm_sidr_rep_param *param)
{
- struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
- unsigned long flags;
int ret;
+ lockdep_assert_held(&cm_id_priv->lock);
+
if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
(param->private_data &&
param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
return -EINVAL;
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
- ret = -EINVAL;
- goto error;
- }
+ if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
+ return -EINVAL;
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
- goto error;
+ return ret;
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
param);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
- cm_id->state = IB_CM_IDLE;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-
- spin_lock_irqsave(&cm.lock, flags);
+ cm_id_priv->id.state = IB_CM_IDLE;
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
- spin_unlock_irqrestore(&cm.lock, flags);
return 0;
+}
-error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
+ struct ib_cm_sidr_rep_param *param)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ ret = cm_send_sidr_rep_locked(cm_id_priv, param);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2dec3a02ab9f..26e6f7df247b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -199,7 +199,7 @@ struct cma_device {
struct list_head list;
struct ib_device *device;
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
struct list_head id_list;
enum ib_gid_type *default_gid_type;
u8 *default_roce_tos;
@@ -247,9 +247,15 @@ enum {
CMA_OPTION_AFONLY,
};
-void cma_ref_dev(struct cma_device *cma_dev)
+void cma_dev_get(struct cma_device *cma_dev)
{
- atomic_inc(&cma_dev->refcount);
+ refcount_inc(&cma_dev->refcount);
+}
+
+void cma_dev_put(struct cma_device *cma_dev)
+{
+ if (refcount_dec_and_test(&cma_dev->refcount))
+ complete(&cma_dev->comp);
}
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
@@ -267,7 +273,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
}
if (found_cma_dev)
- cma_ref_dev(found_cma_dev);
+ cma_dev_get(found_cma_dev);
mutex_unlock(&lock);
return found_cma_dev;
}
@@ -463,7 +469,7 @@ static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev)
{
- cma_ref_dev(cma_dev);
+ cma_dev_get(cma_dev);
id_priv->cma_dev = cma_dev;
id_priv->id.device = cma_dev->device;
id_priv->id.route.addr.dev_addr.transport =
@@ -484,12 +490,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
rdma_start_port(cma_dev->device)];
}
-void cma_deref_dev(struct cma_device *cma_dev)
-{
- if (atomic_dec_and_test(&cma_dev->refcount))
- complete(&cma_dev->comp);
-}
-
static inline void release_mc(struct kref *kref)
{
struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
@@ -502,7 +502,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
{
mutex_lock(&lock);
list_del(&id_priv->list);
- cma_deref_dev(id_priv->cma_dev);
+ cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
mutex_unlock(&lock);
}
@@ -728,8 +728,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
+ unsigned int port;
union ib_gid gid;
- u8 port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB)
@@ -753,7 +753,7 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
}
list_for_each_entry(cma_dev, &dev_list, list) {
- for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
+ rdma_for_each_port (cma_dev->device, port) {
if (listen_id_priv->cma_dev == cma_dev &&
listen_id_priv->id.port_num == port)
continue;
@@ -786,8 +786,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
struct cma_device *cma_dev, *cur_dev;
struct sockaddr_ib *addr;
union ib_gid gid, sgid, *dgid;
+ unsigned int p;
u16 pkey, index;
- u8 p;
enum ib_port_state port_state;
int i;
@@ -798,7 +798,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) {
- for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+ rdma_for_each_port (cur_dev->device, p) {
if (!rdma_cap_af_ib(cur_dev->device, p))
continue;
@@ -840,9 +840,14 @@ found:
return 0;
}
-static void cma_deref_id(struct rdma_id_private *id_priv)
+static void cma_id_get(struct rdma_id_private *id_priv)
+{
+ refcount_inc(&id_priv->refcount);
+}
+
+static void cma_id_put(struct rdma_id_private *id_priv)
{
- if (atomic_dec_and_test(&id_priv->refcount))
+ if (refcount_dec_and_test(&id_priv->refcount))
complete(&id_priv->comp);
}
@@ -870,7 +875,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex);
init_completion(&id_priv->comp);
- atomic_set(&id_priv->refcount, 1);
+ refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex);
INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list);
@@ -1846,11 +1851,11 @@ void rdma_destroy_id(struct rdma_cm_id *id)
}
cma_release_port(id_priv);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
wait_for_completion(&id_priv->comp);
if (id_priv->internal_id)
- cma_deref_id(id_priv->id.context);
+ cma_id_put(id_priv->id.context);
kfree(id_priv->id.route.path_rec);
@@ -2187,7 +2192,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
* Protect against the user destroying conn_id from another thread
* until we're done accessing it.
*/
- atomic_inc(&conn_id->refcount);
+ cma_id_get(conn_id);
ret = cma_cm_event_handler(conn_id, &event);
if (ret)
goto err3;
@@ -2204,13 +2209,13 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
- cma_deref_id(conn_id);
+ cma_id_put(conn_id);
if (net_dev)
dev_put(net_dev);
return 0;
err3:
- cma_deref_id(conn_id);
+ cma_id_put(conn_id);
/* Destroy the CM ID by returning a non-zero value. */
conn_id->cm_id.ib = NULL;
err2:
@@ -2391,7 +2396,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
* Protect against the user destroying conn_id from another thread
* until we're done accessing it.
*/
- atomic_inc(&conn_id->refcount);
+ cma_id_get(conn_id);
ret = cma_cm_event_handler(conn_id, &event);
if (ret) {
/* User wants to destroy the CM ID */
@@ -2399,13 +2404,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
- cma_deref_id(conn_id);
+ cma_id_put(conn_id);
rdma_destroy_id(&conn_id->id);
return ret;
}
mutex_unlock(&conn_id->handler_mutex);
- cma_deref_id(conn_id);
+ cma_id_put(conn_id);
out:
mutex_unlock(&listen_id->handler_mutex);
@@ -2492,7 +2497,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
_cma_attach_to_dev(dev_id_priv, cma_dev);
list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
dev_id_priv->internal_id = 1;
dev_id_priv->afonly = id_priv->afonly;
dev_id_priv->tos_set = id_priv->tos_set;
@@ -2647,7 +2652,7 @@ static void cma_work_handler(struct work_struct *_work)
}
out:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
if (destroy)
rdma_destroy_id(&id_priv->id);
kfree(work);
@@ -2671,7 +2676,7 @@ static void cma_ndev_work_handler(struct work_struct *_work)
out:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
if (destroy)
rdma_destroy_id(&id_priv->id);
kfree(work);
@@ -2687,14 +2692,19 @@ static void cma_init_resolve_route_work(struct cma_work *work,
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
}
-static void cma_init_resolve_addr_work(struct cma_work *work,
- struct rdma_id_private *id_priv)
+static void enqueue_resolve_addr_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
{
+ /* Balances with cma_id_put() in cma_work_handler */
+ cma_id_get(id_priv);
+
work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler);
work->old_state = RDMA_CM_ADDR_QUERY;
work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+
+ queue_work(cma_wq, &work->work);
}
static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
@@ -2968,6 +2978,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
err2:
kfree(route->path_rec);
route->path_rec = NULL;
+ route->num_paths = 0;
err1:
kfree(work);
return ret;
@@ -2982,7 +2993,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
return -EINVAL;
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms);
else if (rdma_protocol_roce(id->device, id->port_num))
@@ -2998,7 +3009,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_route);
@@ -3025,9 +3036,9 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
struct cma_device *cma_dev, *cur_dev;
union ib_gid gid;
enum ib_port_state port_state;
+ unsigned int p;
u16 pkey;
int ret;
- u8 p;
cma_dev = NULL;
mutex_lock(&lock);
@@ -3039,7 +3050,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
if (!cma_dev)
cma_dev = cur_dev;
- for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+ rdma_for_each_port (cur_dev->device, p) {
if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
port_state == IB_PORT_ACTIVE) {
cma_dev = cur_dev;
@@ -3148,9 +3159,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- atomic_inc(&id_priv->refcount);
- cma_init_resolve_addr_work(work, id_priv);
- queue_work(cma_wq, &work->work);
+ enqueue_resolve_addr_work(work, id_priv);
return 0;
err:
kfree(work);
@@ -3175,9 +3184,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
- atomic_inc(&id_priv->refcount);
- cma_init_resolve_addr_work(work, id_priv);
- queue_work(cma_wq, &work->work);
+ enqueue_resolve_addr_work(work, id_priv);
return 0;
err:
kfree(work);
@@ -4588,7 +4595,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
INIT_WORK(&work->work, cma_ndev_work_handler);
work->id = id_priv;
work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
queue_work(cma_wq, &work->work);
}
@@ -4663,7 +4670,7 @@ static void cma_add_one(struct ib_device *device)
}
init_completion(&cma_dev->comp);
- atomic_set(&cma_dev->refcount, 1);
+ refcount_set(&cma_dev->refcount, 1);
INIT_LIST_HEAD(&cma_dev->id_list);
ib_set_client_data(device, &cma_client, cma_dev);
@@ -4722,11 +4729,11 @@ static void cma_process_remove(struct cma_device *cma_dev)
list_del(&id_priv->listen_list);
list_del_init(&id_priv->list);
- atomic_inc(&id_priv->refcount);
+ cma_id_get(id_priv);
mutex_unlock(&lock);
ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
- cma_deref_id(id_priv);
+ cma_id_put(id_priv);
if (ret)
rdma_destroy_id(&id_priv->id);
@@ -4734,7 +4741,7 @@ static void cma_process_remove(struct cma_device *cma_dev)
}
mutex_unlock(&lock);
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
wait_for_completion(&cma_dev->comp);
}
@@ -4790,6 +4797,19 @@ static int __init cma_init(void)
{
int ret;
+ /*
+ * There is a rare lock ordering dependency in cma_netdev_callback()
+ * that only happens when bonding is enabled. Teach lockdep that rtnl
+ * must never be nested under lock so it can find these without having
+ * to test with bonding.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ rtnl_lock();
+ mutex_lock(&lock);
+ mutex_unlock(&lock);
+ rtnl_unlock();
+ }
+
cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
if (!cma_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 8b0b5ae22e4c..c672a4978bfd 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -94,7 +94,7 @@ static int cma_configfs_params_get(struct config_item *item,
static void cma_configfs_params_put(struct cma_device *cma_dev)
{
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
}
static ssize_t default_roce_mode_show(struct config_item *item,
@@ -312,12 +312,12 @@ static struct config_group *make_cma_dev(struct config_group *group,
configfs_add_default_group(&cma_dev_group->ports_group,
&cma_dev_group->device_group);
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
return &cma_dev_group->device_group;
fail:
if (cma_dev)
- cma_deref_dev(cma_dev);
+ cma_dev_put(cma_dev);
kfree(cma_dev_group);
return ERR_PTR(err);
}
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index ca7307277518..5edcf44a9307 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -66,7 +66,7 @@ struct rdma_id_private {
struct mutex qp_mutex;
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
struct mutex handler_mutex;
int backlog;
@@ -111,8 +111,8 @@ static inline void cma_configfs_exit(void)
}
#endif
-void cma_ref_dev(struct cma_device *dev);
-void cma_deref_dev(struct cma_device *dev);
+void cma_dev_get(struct cma_device *dev);
+void cma_dev_put(struct cma_device *dev);
typedef bool (*cma_device_filter)(struct ib_device *, void *);
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
void *cookie);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 956b3a7dfed7..403d8673a2f9 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -79,13 +79,13 @@ struct ib_mad_private {
struct ib_mad_private_header header;
size_t mad_size;
struct ib_grh grh;
- u8 mad[0];
+ u8 mad[];
} __packed;
struct ib_rmpp_segment {
struct list_head list;
u32 num;
- u8 data[0];
+ u8 data[];
};
struct ib_mad_agent_private {
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index cd338ddc4a39..9c2d8b7f1af9 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -71,7 +71,7 @@ struct mcast_device {
struct ib_event_handler event_handler;
int start_port;
int end_port;
- struct mcast_port port[0];
+ struct mcast_port port[];
};
enum mcast_state {
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 06e5b6787443..557efbf29197 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -391,13 +391,13 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
if (!ret)
return -ENOMEM;
sg_cnt = ret;
if (prot_sg_cnt) {
- ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir);
+ ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir);
if (!ret) {
ret = -ENOMEM;
goto out_unmap_sg;
@@ -466,9 +466,9 @@ out_free_ctx:
kfree(ctx->reg);
out_unmap_prot_sg:
if (prot_sg_cnt)
- ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
out_unmap_sg:
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
@@ -628,9 +628,9 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
kfree(ctx->reg);
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
if (prot_sg_cnt)
- ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 30d4c126a2db..74e0058fcf9e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -101,7 +101,7 @@ struct ib_sa_port {
struct ib_sa_device {
int start_port, end_port;
struct ib_event_handler event_handler;
- struct ib_sa_port port[0];
+ struct ib_sa_port port[];
};
struct ib_sa_query {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 0274e9b704be..16b6cf57fa85 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -85,12 +85,13 @@ struct ucma_file {
struct ucma_context {
u32 id;
struct completion comp;
- atomic_t ref;
+ refcount_t ref;
int events_reported;
int backlog;
struct ucma_file *file;
struct rdma_cm_id *cm_id;
+ struct mutex mutex;
u64 uid;
struct list_head list;
@@ -152,7 +153,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
if (ctx->closing)
ctx = ERR_PTR(-EIO);
else
- atomic_inc(&ctx->ref);
+ refcount_inc(&ctx->ref);
}
xa_unlock(&ctx_table);
return ctx;
@@ -160,7 +161,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
static void ucma_put_ctx(struct ucma_context *ctx)
{
- if (atomic_dec_and_test(&ctx->ref))
+ if (refcount_dec_and_test(&ctx->ref))
complete(&ctx->comp);
}
@@ -212,10 +213,11 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return NULL;
INIT_WORK(&ctx->close_work, ucma_close_id);
- atomic_set(&ctx->ref, 1);
+ refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp);
INIT_LIST_HEAD(&ctx->mc_list);
ctx->file = file;
+ mutex_init(&ctx->mutex);
if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
goto error;
@@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
}
events_reported = ctx->events_reported;
+ mutex_destroy(&ctx->mutex);
kfree(ctx);
return events_reported;
}
@@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ mutex_unlock(&ctx->mutex);
+
ucma_put_ctx(ctx);
return ret;
}
@@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
memset(&resp, 0, sizeof resp);
addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
@@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
out:
+ mutex_unlock(&ctx->mutex);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
switch (cmd.option) {
case RDMA_USER_CM_QUERY_ADDR:
ret = ucma_query_addr(ctx, response, out_len);
@@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file,
ret = -ENOSYS;
break;
}
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -1045,7 +1063,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
dst->retry_count = src->retry_count;
dst->rnr_retry_count = src->rnr_retry_count;
dst->srq = src->srq;
- dst->qp_num = src->qp_num;
+ dst->qp_num = src->qp_num & 0xFFFFFF;
dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
}
@@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+ mutex_lock(&ctx->mutex);
ret = rdma_connect(ctx->cm_id, &conn_param);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
cmd.backlog : max_backlog;
+ mutex_lock(&ctx->mutex);
ret = rdma_listen(ctx->cm_id, ctx->backlog);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&file->mut);
+ mutex_lock(&ctx->mutex);
ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
+ mutex_unlock(&ctx->mutex);
if (!ret)
ctx->uid = cmd.uid;
mutex_unlock(&file->mut);
- } else
+ } else {
+ mutex_lock(&ctx->mutex);
ret = __rdma_accept(ctx->cm_id, NULL, NULL);
-
+ mutex_unlock(&ctx->mutex);
+ }
ucma_put_ctx(ctx);
return ret;
}
@@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
ret = rdma_disconnect(ctx->cm_id);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
+ mutex_lock(&ctx->mutex);
ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
+ mutex_unlock(&ctx->mutex);
if (ret)
goto out;
@@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path);
+ mutex_lock(&ctx->mutex);
ret = rdma_set_ib_path(ctx->cm_id, &opa);
+ mutex_unlock(&ctx->mutex);
} else {
+ mutex_lock(&ctx->mutex);
ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
+ mutex_unlock(&ctx->mutex);
}
if (ret)
return ret;
@@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
switch (level) {
case RDMA_OPTION_ID:
+ mutex_lock(&ctx->mutex);
ret = ucma_set_option_id(ctx, optname, optval, optlen);
+ mutex_unlock(&ctx->mutex);
break;
case RDMA_OPTION_IB:
ret = ucma_set_option_ib(ctx, optname, optval, optlen);
@@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ mutex_lock(&ctx->mutex);
if (ctx->cm_id->device)
ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
+ mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
mc->join_state = join_state;
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
+ mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
+ mutex_unlock(&ctx->mutex);
if (ret)
goto err2;
@@ -1502,7 +1544,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
mc = ERR_PTR(-ENOENT);
else if (mc->ctx->file != file)
mc = ERR_PTR(-EINVAL);
- else if (!atomic_inc_not_zero(&mc->ctx->ref))
+ else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO);
else
__xa_erase(&multicast_table, mc->id);
@@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
goto out;
}
+ mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
+ mutex_unlock(&mc->ctx->mutex);
+
mutex_lock(&mc->ctx->file->mut);
ucma_cleanup_mc_events(mc);
list_del(&mc->list);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 06b6125b5ae1..82455a1392f1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -197,6 +197,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
unsigned long lock_limit;
unsigned long new_pinned;
unsigned long cur_base;
+ unsigned long dma_attr = 0;
struct mm_struct *mm;
unsigned long npages;
int ret;
@@ -278,10 +279,12 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg(device,
- umem->sg_head.sgl,
- umem->sg_nents,
- DMA_BIDIRECTIONAL);
+ if (access & IB_ACCESS_RELAXED_ORDERING)
+ dma_attr |= DMA_ATTR_WEAK_ORDERING;
+
+ umem->nmap =
+ ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
+ DMA_BIDIRECTIONAL, dma_attr);
if (!umem->nmap) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e62c9dfc7837..56a71337112c 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -54,8 +54,6 @@
#include "core_priv.h"
#include <trace/events/rdma_core.h>
-#include <trace/events/rdma_core.h>
-
static int ib_resolve_eth_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr);
@@ -1127,8 +1125,7 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
EXPORT_SYMBOL(ib_open_qp);
static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+ struct ib_qp_init_attr *qp_init_attr)
{
struct ib_qp *real_qp = qp;
@@ -1150,9 +1147,18 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
return qp;
}
-struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+/**
+ * ib_create_qp - Creates a kernel QP associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the QP.
+ * @qp_init_attr: A list of initial attributes required to create the
+ * QP. If QP creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created QP.
+ *
+ * NOTE: for user qp use ib_create_qp_user with valid udata!
+ */
+struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr)
{
struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
struct ib_qp *qp;
@@ -1187,7 +1193,7 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
struct ib_qp *xrc_qp =
- create_xrc_qp_user(qp, qp_init_attr, udata);
+ create_xrc_qp_user(qp, qp_init_attr);
if (IS_ERR(xrc_qp)) {
ret = PTR_ERR(xrc_qp);
@@ -1243,7 +1249,7 @@ err:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_create_qp_user);
+EXPORT_SYMBOL(ib_create_qp);
static const struct {
int valid;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 725b2350e349..a300588634c5 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -89,6 +89,15 @@
#define BNXT_RE_DEFAULT_ACK_DELAY 16
+struct bnxt_re_ring_attr {
+ dma_addr_t *dma_arr;
+ int pages;
+ int type;
+ u32 depth;
+ u32 lrid; /* Logical ring id */
+ u8 mode;
+};
+
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
@@ -104,6 +113,14 @@ struct bnxt_re_sqp_entries {
struct bnxt_re_qp *qp1_qp;
};
+#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024
+struct bnxt_re_gsi_context {
+ struct bnxt_re_qp *gsi_qp;
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_sqp_entries *sqp_tbl;
+};
+
#define BNXT_RE_MIN_MSIX 2
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
@@ -115,7 +132,6 @@ struct bnxt_re_dev {
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
-#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
#define BNXT_RE_FLAG_GOT_MSIX 2
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
@@ -125,7 +141,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
- struct bnxt_qplib_chip_ctx chip_ctx;
+ struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
@@ -160,15 +176,11 @@ struct bnxt_re_dev {
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
- atomic_t sched_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
/* QP for for handling QP1 packets */
- u32 sqp_id;
- struct bnxt_re_qp *qp1_sqp;
- struct bnxt_re_ah *sqp_ah;
- struct bnxt_re_sqp_entries sqp_tbl[1024];
+ struct bnxt_re_gsi_context gsi_ctx;
atomic_t nq_alloc_cnt;
u32 is_virtfn;
u32 num_vfs;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 52b6a4d85460..95f6d493d1b9 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -312,9 +312,9 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
*/
if (ctx->idx == 0 &&
rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
- ctx->refcnt == 1 && rdev->qp1_sqp) {
- dev_dbg(rdev_to_dev(rdev),
- "Trying to delete GID0 while QP1 is alive\n");
+ ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
+ ibdev_dbg(&rdev->ibdev,
+ "Trying to delete GID0 while QP1 is alive\n");
return -EFAULT;
}
ctx->refcnt--;
@@ -322,8 +322,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
vlan_id, true);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to remove GID: %#x", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to remove GID: %#x", rc);
} else {
ctx_tbl = sgid_tbl->ctx;
ctx_tbl[ctx->idx] = NULL;
@@ -360,7 +360,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
}
if (rc < 0) {
- dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
+ ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
return rc;
}
@@ -423,12 +423,12 @@ static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
wqe.bind.r_key = fence->bind_rkey;
fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
- dev_dbg(rdev_to_dev(qp->rdev),
- "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
+ ibdev_dbg(&qp->rdev->ibdev,
+ "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
wqe.bind.r_key, qp->qplib_qp.id, pd);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
if (rc) {
- dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
+ ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
return rc;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
@@ -479,7 +479,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
+ ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
rc = -EIO;
fence->dma_addr = 0;
goto fail;
@@ -499,7 +499,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
+ ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
goto fail;
}
@@ -511,7 +511,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
+ ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
goto fail;
}
mr->ib_mr.rkey = mr->qplib_mr.rkey;
@@ -519,8 +519,8 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
/* Create a fence MW only for kernel consumers */
mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
if (IS_ERR(mw)) {
- dev_err(rdev_to_dev(rdev),
- "Failed to create fence-MW for PD: %p\n", pd);
+ ibdev_err(&rdev->ibdev,
+ "Failed to create fence-MW for PD: %p\n", pd);
rc = PTR_ERR(mw);
goto fail;
}
@@ -558,7 +558,7 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
pd->rdev = rdev;
if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
+ ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
rc = -ENOMEM;
goto fail;
}
@@ -585,16 +585,16 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to copy user response\n");
+ ibdev_err(&rdev->ibdev,
+ "Failed to copy user response\n");
goto dbfail;
}
}
if (!udata)
if (bnxt_re_create_fence_mr(pd))
- dev_warn(rdev_to_dev(rdev),
- "Failed to create Fence-MR\n");
+ ibdev_warn(&rdev->ibdev,
+ "Failed to create Fence-MR\n");
return 0;
dbfail:
bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -639,12 +639,13 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct bnxt_re_dev *rdev = pd->rdev;
const struct ib_gid_attr *sgid_attr;
+ struct bnxt_re_gid_ctx *ctx;
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
u8 nw_type;
int rc;
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
- dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
+ ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
return -EINVAL;
}
@@ -654,19 +655,18 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
/* Supply the configuration for the HW */
memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
sizeof(union ib_gid));
- /*
- * If RoCE V2 is enabled, stack will have two entries for
- * each GID entry. Avoiding this duplicte entry in HW. Dividing
- * the GID index by 2 for RoCE V2
+ sgid_attr = grh->sgid_attr;
+ /* Get the HW context of the GID. The reference
+ * of GID table entry is already taken by the caller.
*/
- ah->qplib_ah.sgid_index = grh->sgid_index / 2;
+ ctx = rdma_read_gid_hw_context(sgid_attr);
+ ah->qplib_ah.sgid_index = ctx->idx;
ah->qplib_ah.host_sgid_index = grh->sgid_index;
ah->qplib_ah.traffic_class = grh->traffic_class;
ah->qplib_ah.flow_label = grh->flow_label;
ah->qplib_ah.hop_limit = grh->hop_limit;
ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
- sgid_attr = grh->sgid_attr;
/* Get network header type for this GID */
nw_type = rdma_gid_attr_network_type(sgid_attr);
ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
@@ -675,7 +675,7 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
!(flags & RDMA_CREATE_AH_SLEEPABLE));
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
+ ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
return rc;
}
@@ -742,6 +742,49 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
+static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+{
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_dev *rdev;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+
+ /* remove from active qp list */
+ mutex_lock(&rdev->qp_lock);
+ list_del(&gsi_sqp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
+ bnxt_qplib_destroy_ah(&rdev->qplib_res,
+ &gsi_sah->qplib_ah,
+ true);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+
+ ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
+ goto fail;
+ }
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+
+ kfree(rdev->gsi_ctx.sqp_tbl);
+ kfree(gsi_sah);
+ kfree(gsi_sqp);
+ rdev->gsi_ctx.gsi_sqp = NULL;
+ rdev->gsi_ctx.gsi_sah = NULL;
+ rdev->gsi_ctx.sqp_tbl = NULL;
+
+ return 0;
+fail:
+ return rc;
+}
+
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
@@ -750,10 +793,16 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
+ mutex_lock(&rdev->qp_lock);
+ list_del(&qp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
+
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
+ ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
return rc;
}
@@ -765,40 +814,19 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
- bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah,
- false);
-
- bnxt_qplib_clean_qp(&qp->qplib_qp);
- rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to destroy Shadow QP");
- return rc;
- }
- bnxt_qplib_free_qp_res(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- mutex_lock(&rdev->qp_lock);
- list_del(&rdev->qp1_sqp->list);
- atomic_dec(&rdev->qp_count);
- mutex_unlock(&rdev->qp_lock);
-
- kfree(rdev->sqp_ah);
- kfree(rdev->qp1_sqp);
- rdev->qp1_sqp = NULL;
- rdev->sqp_ah = NULL;
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
+ rc = bnxt_re_destroy_gsi_sqp(qp);
+ if (rc)
+ goto sh_fail;
}
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
- mutex_lock(&rdev->qp_lock);
- list_del(&qp->list);
- atomic_dec(&rdev->qp_count);
- mutex_unlock(&rdev->qp_lock);
kfree(qp);
return 0;
+sh_fail:
+ return rc;
}
static u8 __from_ib_qp_type(enum ib_qp_type type)
@@ -831,7 +859,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
/* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
- psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
bytes += (qplib_qp->sq.max_wqe * psn_sz);
@@ -843,9 +871,11 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
return PTR_ERR(umem);
qp->sumem = umem;
- qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
+ qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl;
qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
qplib_qp->sq.sg_info.nmap = umem->nmap;
+ qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
+ qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
qplib_qp->qp_handle = ureq.qp_handle;
if (!qp->qplib_qp.srq) {
@@ -856,9 +886,11 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
- qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
+ qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl;
qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
qplib_qp->rq.sg_info.nmap = umem->nmap;
+ qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
+ qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
}
qplib_qp->dpi = &cntx->dpi;
@@ -906,8 +938,8 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate HW AH for Shadow QP");
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate HW AH for Shadow QP");
goto fail;
}
@@ -948,6 +980,8 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.sq.q_full_delta = 1;
+ qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
+ qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq;
@@ -956,6 +990,8 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.rq.q_full_delta = 1;
+ qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
+ qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
qp->qplib_qp.mtu = qp1_qp->mtu;
@@ -967,8 +1003,6 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
if (rc)
goto fail;
- rdev->sqp_id = qp->qplib_qp.id;
-
spin_lock_init(&qp->sq_lock);
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
@@ -981,205 +1015,378 @@ fail:
return NULL;
}
-struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
{
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_qp *qp;
- struct bnxt_re_cq *cq;
- struct bnxt_re_srq *srq;
- int rc, entries;
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
- if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
- (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
- (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
- (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
- (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
- return ERR_PTR(-EINVAL);
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ if (init_attr->srq) {
+ struct bnxt_re_srq *srq;
- qp->rdev = rdev;
- ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
- qp->qplib_qp.pd = &pd->qplib_pd;
- qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
- qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
+ srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
+ if (!srq) {
+ ibdev_err(&rdev->ibdev, "SRQ not found");
+ return -EINVAL;
+ }
+ qplqp->srq = &srq->qplib_srq;
+ qplqp->rq.max_wqe = 0;
+ } else {
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty.
+ */
+ entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
+ qplqp->rq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
- if (qp_init_attr->qp_type == IB_QPT_GSI &&
- bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
- qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
- if (qp->qplib_qp.type == IB_QPT_MAX) {
- dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
- qp->qplib_qp.type);
- rc = -EINVAL;
- goto fail;
+ qplqp->rq.q_full_delta = qplqp->rq.max_wqe -
+ init_attr->cap.max_recv_wr;
+ qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ }
+ qplqp->rq.sg_info.pgsize = PAGE_SIZE;
+ qplqp->rq.sg_info.pgshft = PAGE_SHIFT;
+
+ return 0;
+}
+
+static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ qplqp->rq.max_sge = 6;
+}
+
+static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ qplqp->sq.max_sge = init_attr->cap.max_send_sge;
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ /*
+ * Change the SQ depth if user has requested minimum using
+ * configfs. Only supported for kernel consumers
+ */
+ entries = init_attr->cap.max_send_wr;
+ /* Allocate 128 + 1 more than what's provided */
+ entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+ * post one extra entry in this case. But allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qplqp->sq.q_full_delta -= 1;
+ qplqp->sq.sg_info.pgsize = PAGE_SIZE;
+ qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
+}
+
+static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
+ qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
+ init_attr->cap.max_send_wr;
+ qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+}
+
+static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ int qptype;
+
+ chip_ctx = rdev->chip_ctx;
+
+ qptype = __from_ib_qp_type(init_attr->qp_type);
+ if (qptype == IB_QPT_MAX) {
+ ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
+ qptype = -EOPNOTSUPP;
+ goto out;
}
- qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
- qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
- IB_SIGNAL_ALL_WR) ? true : false);
+ if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
+ init_attr->qp_type == IB_QPT_GSI)
+ qptype = CMDQ_CREATE_QP_TYPE_GSI;
+out:
+ return qptype;
+}
- qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
+static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int rc = 0, qptype;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ /* Setup misc params */
+ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
+ qplqp->pd = &pd->qplib_pd;
+ qplqp->qp_handle = (u64)qplqp;
+ qplqp->max_inline_data = init_attr->cap.max_inline_data;
+ qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
+ true : false);
+ qptype = bnxt_re_init_qp_type(rdev, init_attr);
+ if (qptype < 0) {
+ rc = qptype;
+ goto out;
+ }
+ qplqp->type = (u8)qptype;
+
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
+ qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
+ }
+ qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
+ if (init_attr->create_flags)
+ ibdev_dbg(&rdev->ibdev,
+ "QP create flags 0x%x not supported",
+ init_attr->create_flags);
- if (qp_init_attr->send_cq) {
- cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
- ib_cq);
+ /* Setup CQs */
+ if (init_attr->send_cq) {
+ cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
if (!cq) {
- dev_err(rdev_to_dev(rdev), "Send CQ not found");
+ ibdev_err(&rdev->ibdev, "Send CQ not found");
rc = -EINVAL;
- goto fail;
+ goto out;
}
- qp->qplib_qp.scq = &cq->qplib_cq;
+ qplqp->scq = &cq->qplib_cq;
qp->scq = cq;
}
- if (qp_init_attr->recv_cq) {
- cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
- ib_cq);
+ if (init_attr->recv_cq) {
+ cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
if (!cq) {
- dev_err(rdev_to_dev(rdev), "Receive CQ not found");
+ ibdev_err(&rdev->ibdev, "Receive CQ not found");
rc = -EINVAL;
- goto fail;
+ goto out;
}
- qp->qplib_qp.rcq = &cq->qplib_cq;
+ qplqp->rcq = &cq->qplib_cq;
qp->rcq = cq;
}
- if (qp_init_attr->srq) {
- srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
- ib_srq);
- if (!srq) {
- dev_err(rdev_to_dev(rdev), "SRQ not found");
- rc = -EINVAL;
- goto fail;
- }
- qp->qplib_qp.srq = &srq->qplib_srq;
- qp->qplib_qp.rq.max_wqe = 0;
- } else {
- /* Allocate 1 more than what's provided so posting max doesn't
- * mean empty
- */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
- qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
+ /* Setup RQ/SRQ */
+ rc = bnxt_re_init_rq_attr(qp, init_attr);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_rq_attr(qp);
- qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
- qp_init_attr->cap.max_recv_wr;
+ /* Setup SQ */
+ bnxt_re_init_sq_attr(qp, init_attr, udata);
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
- qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
+ if (udata) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+out:
+ return rc;
+}
+
+static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
+ struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_sqp_entries *sqp_tbl = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_qp *sqp;
+ struct bnxt_re_ah *sah;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ /* Create a shadow QP to handle the QP1 traffic */
+ sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
+ GFP_KERNEL);
+ if (!sqp_tbl)
+ return -ENOMEM;
+ rdev->gsi_ctx.sqp_tbl = sqp_tbl;
+
+ sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
+ if (!sqp) {
+ rc = -ENODEV;
+ ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
+ goto out;
+ }
+ rdev->gsi_ctx.gsi_sqp = sqp;
+
+ sqp->rcq = qp->rcq;
+ sqp->scq = qp->scq;
+ sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
+ &qp->qplib_qp);
+ if (!sah) {
+ bnxt_qplib_destroy_qp(&rdev->qplib_res,
+ &sqp->qplib_qp);
+ rc = -ENODEV;
+ ibdev_err(&rdev->ibdev,
+ "Failed to create AH entry for ShadowQP");
+ goto out;
}
+ rdev->gsi_ctx.gsi_sah = sah;
- qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ return 0;
+out:
+ kfree(sqp_tbl);
+ return rc;
+}
- if (qp_init_attr->qp_type == IB_QPT_GSI &&
- !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
- /* Allocate 1 more than what's provided */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
- qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
- qp_init_attr->cap.max_send_wr;
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
- qp->qplib_qp.sq.max_sge++;
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
-
- qp->qplib_qp.rq_hdr_buf_size =
- BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
-
- qp->qplib_qp.sq_hdr_buf_size =
- BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
- rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
- goto fail;
- }
- /* Create a shadow QP to handle the QP1 traffic */
- rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
- &qp->qplib_qp);
- if (!rdev->qp1_sqp) {
- rc = -EINVAL;
- dev_err(rdev_to_dev(rdev),
- "Failed to create Shadow QP for QP1");
- goto qp_destroy;
- }
- rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
- &qp->qplib_qp);
- if (!rdev->sqp_ah) {
- bnxt_qplib_destroy_qp(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- rc = -EINVAL;
- dev_err(rdev_to_dev(rdev),
- "Failed to create AH entry for ShadowQP");
- goto qp_destroy;
- }
+static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_qp *qplqp;
+ int rc = 0;
- } else {
- /* Allocate 128 + 1 more than what's provided */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
- /*
- * Reserving one slot for Phantom WQE. Application can
- * post one extra entry in this case. But allowing this to avoid
- * unexpected Queue full condition
- */
+ qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
+ qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
- qp->qplib_qp.sq.q_full_delta -= 1;
+ rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
+ goto out;
+ }
- qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
- qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
- if (udata) {
- rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
- if (rc)
- goto fail;
- } else {
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
- }
+ rc = bnxt_re_create_shadow_gsi(qp, pd);
+out:
+ return rc;
+}
+
+static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_qplib_dev_attr *dev_attr)
+{
+ bool rc = true;
+
+ if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
+ ibdev_err(&rdev->ibdev,
+ "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
+ init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_inline_data,
+ dev_attr->max_inline_data);
+ rc = false;
+ }
+ return rc;
+}
+struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_qp *qp;
+ int rc;
+
+ rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
+ if (!rc) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ qp->rdev = rdev;
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
+ if (rc)
+ goto fail;
+
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+ !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
+ rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
+ if (rc == -ENODEV)
+ goto qp_destroy;
+ if (rc)
+ goto fail;
+ } else {
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
+ ibdev_err(&rdev->ibdev, "Failed to create HW QP");
goto free_umem;
}
+ if (udata) {
+ struct bnxt_re_qp_resp resp;
+
+ resp.qpid = qp->qplib_qp.id;
+ resp.rsvd = 0;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
+ goto qp_destroy;
+ }
+ }
}
qp->ib_qp.qp_num = qp->qplib_qp.id;
+ if (qp_init_attr->qp_type == IB_QPT_GSI)
+ rdev->gsi_ctx.gsi_qp = qp;
spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
-
- if (udata) {
- struct bnxt_re_qp_resp resp;
-
- resp.qpid = qp->ib_qp.qp_num;
- resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
- goto qp_destroy;
- }
- }
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
list_add_tail(&qp->list, &rdev->qp_list);
- atomic_inc(&rdev->qp_count);
mutex_unlock(&rdev->qp_lock);
+ atomic_inc(&rdev->qp_count);
return &qp->ib_qp;
qp_destroy:
@@ -1189,6 +1396,7 @@ free_umem:
ib_umem_release(qp->sumem);
fail:
kfree(qp);
+exit:
return ERR_PTR(rc);
}
@@ -1311,9 +1519,11 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
return PTR_ERR(umem);
srq->umem = umem;
- qplib_srq->sg_info.sglist = umem->sg_head.sgl;
+ qplib_srq->sg_info.sghead = umem->sg_head.sgl;
qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
qplib_srq->sg_info.nmap = umem->nmap;
+ qplib_srq->sg_info.pgsize = PAGE_SIZE;
+ qplib_srq->sg_info.pgshft = PAGE_SHIFT;
qplib_srq->srq_handle = ureq.srq_handle;
qplib_srq->dpi = &cntx->dpi;
@@ -1334,7 +1544,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
int rc, entries;
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
- dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
+ ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
rc = -EINVAL;
goto exit;
}
@@ -1369,7 +1579,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
+ ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
goto fail;
}
@@ -1379,7 +1589,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
resp.srqid = srq->qplib_srq.id;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
- dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
+ ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
bnxt_qplib_destroy_srq(&rdev->qplib_res,
&srq->qplib_srq);
goto fail;
@@ -1418,7 +1628,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
srq->qplib_srq.threshold = srq_attr->srq_limit;
rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
+ ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
return rc;
}
/* On success, update the shadow */
@@ -1426,8 +1636,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
/* No need to Build and send response back to udata */
break;
default:
- dev_err(rdev_to_dev(rdev),
- "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ ibdev_err(&rdev->ibdev,
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
return -EINVAL;
}
return 0;
@@ -1445,7 +1655,7 @@ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
tsrq.qplib_srq.id = srq->qplib_srq.id;
rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
+ ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
return rc;
}
srq_attr->max_wr = srq->qplib_srq.max_wqe;
@@ -1487,7 +1697,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
{
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
+ struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
int rc = 0;
if (qp_attr_mask & IB_QP_STATE) {
@@ -1511,8 +1721,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc)
- dev_err(rdev_to_dev(rdev),
- "Failed to modify Shadow QP for QP1");
+ ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
return rc;
}
@@ -1533,15 +1742,15 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
new_qp_state = qp_attr->qp_state;
if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
ib_qp->qp_type, qp_attr_mask)) {
- dev_err(rdev_to_dev(rdev),
- "Invalid attribute mask: %#x specified ",
- qp_attr_mask);
- dev_err(rdev_to_dev(rdev),
- "for qpn: %#x type: %#x",
- ib_qp->qp_num, ib_qp->qp_type);
- dev_err(rdev_to_dev(rdev),
- "curr_qp_state=0x%x, new_qp_state=0x%x\n",
- curr_qp_state, new_qp_state);
+ ibdev_err(&rdev->ibdev,
+ "Invalid attribute mask: %#x specified ",
+ qp_attr_mask);
+ ibdev_err(&rdev->ibdev,
+ "for qpn: %#x type: %#x",
+ ib_qp->qp_num, ib_qp->qp_type);
+ ibdev_err(&rdev->ibdev,
+ "curr_qp_state=0x%x, new_qp_state=0x%x\n",
+ curr_qp_state, new_qp_state);
return -EINVAL;
}
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
@@ -1549,18 +1758,16 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
- dev_dbg(rdev_to_dev(rdev),
- "Move QP = %p to flush list\n",
- qp);
+ ibdev_dbg(&rdev->ibdev,
+ "Move QP = %p to flush list\n", qp);
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
}
if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
- dev_dbg(rdev_to_dev(rdev),
- "Move QP = %p out of flush list\n",
- qp);
+ ibdev_dbg(&rdev->ibdev,
+ "Move QP = %p out of flush list\n", qp);
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_clean_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
@@ -1593,6 +1800,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
const struct ib_global_route *grh =
rdma_ah_read_grh(&qp_attr->ah_attr);
const struct ib_gid_attr *sgid_attr;
+ struct bnxt_re_gid_ctx *ctx;
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
@@ -1604,11 +1812,12 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
sizeof(qp->qplib_qp.ah.dgid.data));
qp->qplib_qp.ah.flow_label = grh->flow_label;
- /* If RoCE V2 is enabled, stack will have two entries for
- * each GID entry. Avoiding this duplicte entry in HW. Dividing
- * the GID index by 2 for RoCE V2
+ sgid_attr = grh->sgid_attr;
+ /* Get the HW context of the GID. The reference
+ * of GID table entry is already taken by the caller.
*/
- qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
+ ctx = rdma_read_gid_hw_context(sgid_attr);
+ qp->qplib_qp.ah.sgid_index = ctx->idx;
qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
qp->qplib_qp.ah.hop_limit = grh->hop_limit;
qp->qplib_qp.ah.traffic_class = grh->traffic_class;
@@ -1616,7 +1825,6 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
ether_addr_copy(qp->qplib_qp.ah.dmac,
qp_attr->ah_attr.roce.dmac);
- sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
&qp->qplib_qp.smac[0]);
if (rc)
@@ -1690,10 +1898,10 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (qp_attr->max_dest_rd_atomic >
dev_attr->max_qp_init_rd_atom) {
- dev_err(rdev_to_dev(rdev),
- "max_dest_rd_atomic requested%d is > dev_max%d",
- qp_attr->max_dest_rd_atomic,
- dev_attr->max_qp_init_rd_atom);
+ ibdev_err(&rdev->ibdev,
+ "max_dest_rd_atomic requested%d is > dev_max%d",
+ qp_attr->max_dest_rd_atomic,
+ dev_attr->max_qp_init_rd_atom);
return -EINVAL;
}
@@ -1714,8 +1922,8 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
(qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
(qp_attr->cap.max_inline_data >=
dev_attr->max_inline_data)) {
- dev_err(rdev_to_dev(rdev),
- "Create QP failed - max exceeded");
+ ibdev_err(&rdev->ibdev,
+ "Create QP failed - max exceeded");
return -EINVAL;
}
entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
@@ -1748,10 +1956,10 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
+ ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
return rc;
}
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
return rc;
}
@@ -1773,7 +1981,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
+ ibdev_err(&rdev->ibdev, "Failed to query HW QP");
goto out;
}
qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
@@ -1978,7 +2186,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
wqe->num_sge++;
} else {
- dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
+ ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
rc = -ENOMEM;
}
return rc;
@@ -1995,9 +2203,12 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
+ struct bnxt_re_sqp_entries *sqp_entry;
struct bnxt_qplib_sge ref, sge;
+ struct bnxt_re_dev *rdev;
u32 rq_prod_index;
- struct bnxt_re_sqp_entries *sqp_entry;
+
+ rdev = qp->rdev;
rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
@@ -2012,7 +2223,7 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
ref.lkey = wqe->sg_list[0].lkey;
ref.size = wqe->sg_list[0].size;
- sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
/* SGE 1 */
wqe->sg_list[0].addr = sge.addr;
@@ -2164,7 +2375,7 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
wqe->frmr.page_list = mr->pages;
wqe->frmr.page_list_len = mr->npages;
- wqe->frmr.levels = qplib_frpl->hwq.level + 1;
+ wqe->frmr.levels = qplib_frpl->hwq.level;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
/* Need unconditional fence for reg_mr
@@ -2211,8 +2422,8 @@ static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
if ((sge_len + wqe->inline_len) >
BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
- dev_err(rdev_to_dev(rdev),
- "Inline data size requested > supported value");
+ ibdev_err(&rdev->ibdev,
+ "Inline data size requested > supported value");
return -EINVAL;
}
sge_len = wr->sg_list[i].length;
@@ -2259,21 +2470,18 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp,
const struct ib_send_wr *wr)
{
- struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0;
unsigned long flags;
spin_lock_irqsave(&qp->sq_lock, flags);
- memset(&wqe, 0, sizeof(wqe));
while (wr) {
- /* House keeping */
- memset(&wqe, 0, sizeof(wqe));
+ struct bnxt_qplib_swqe wqe = {};
/* Common */
wqe.num_sge = wr->num_sge;
if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
- dev_err(rdev_to_dev(rdev),
- "Limit exceeded for Send SGEs");
+ ibdev_err(&rdev->ibdev,
+ "Limit exceeded for Send SGEs");
rc = -EINVAL;
goto bad;
}
@@ -2292,9 +2500,9 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
bad:
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Post send failed opcode = %#x rc = %d",
- wr->opcode, rc);
+ ibdev_err(&rdev->ibdev,
+ "Post send failed opcode = %#x rc = %d",
+ wr->opcode, rc);
break;
}
wr = wr->next;
@@ -2321,8 +2529,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
/* Common */
wqe.num_sge = wr->num_sge;
if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
- dev_err(rdev_to_dev(qp->rdev),
- "Limit exceeded for Send SGEs");
+ ibdev_err(&qp->rdev->ibdev,
+ "Limit exceeded for Send SGEs");
rc = -EINVAL;
goto bad;
}
@@ -2367,8 +2575,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = bnxt_re_build_atomic_wqe(wr, &wqe);
break;
case IB_WR_RDMA_READ_WITH_INV:
- dev_err(rdev_to_dev(qp->rdev),
- "RDMA Read with Invalidate is not supported");
+ ibdev_err(&qp->rdev->ibdev,
+ "RDMA Read with Invalidate is not supported");
rc = -EINVAL;
goto bad;
case IB_WR_LOCAL_INV:
@@ -2379,8 +2587,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
break;
default:
/* Unsupported WRs */
- dev_err(rdev_to_dev(qp->rdev),
- "WR (%#x) is not supported", wr->opcode);
+ ibdev_err(&qp->rdev->ibdev,
+ "WR (%#x) is not supported", wr->opcode);
rc = -EINVAL;
goto bad;
}
@@ -2388,9 +2596,9 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
bad:
if (rc) {
- dev_err(rdev_to_dev(qp->rdev),
- "post_send failed op:%#x qps = %#x rc = %d\n",
- wr->opcode, qp->qplib_qp.state, rc);
+ ibdev_err(&qp->rdev->ibdev,
+ "post_send failed op:%#x qps = %#x rc = %d\n",
+ wr->opcode, qp->qplib_qp.state, rc);
*bad_wr = wr;
break;
}
@@ -2418,8 +2626,8 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
/* Common */
wqe.num_sge = wr->num_sge;
if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
- dev_err(rdev_to_dev(rdev),
- "Limit exceeded for Receive SGEs");
+ ibdev_err(&rdev->ibdev,
+ "Limit exceeded for Receive SGEs");
rc = -EINVAL;
break;
}
@@ -2455,8 +2663,8 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
/* Common */
wqe.num_sge = wr->num_sge;
if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
- dev_err(rdev_to_dev(qp->rdev),
- "Limit exceeded for Receive SGEs");
+ ibdev_err(&qp->rdev->ibdev,
+ "Limit exceeded for Receive SGEs");
rc = -EINVAL;
*bad_wr = wr;
break;
@@ -2527,7 +2735,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
/* Validate CQ fields */
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
- dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
+ ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
return -EINVAL;
}
@@ -2538,6 +2746,8 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;
+ cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
+ cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
if (udata) {
struct bnxt_re_cq_req req;
struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
@@ -2554,7 +2764,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = PTR_ERR(cq->umem);
goto fail;
}
- cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
+ cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl;
cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
cq->qplib_cq.dpi = &uctx->dpi;
@@ -2581,7 +2791,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
+ ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
goto fail;
}
@@ -2601,7 +2811,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
resp.rsvd = 0;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
+ ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
goto c2fail;
}
@@ -2832,12 +3042,13 @@ static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
return rc;
}
-static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
+static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
struct bnxt_qplib_cqe *cqe)
{
- struct bnxt_re_dev *rdev = qp1_qp->rdev;
+ struct bnxt_re_dev *rdev = gsi_qp->rdev;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
+ struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
struct ib_send_wr *swr;
struct ib_ud_wr udwr;
struct ib_recv_wr rwr;
@@ -2860,26 +3071,26 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
swr = &udwr.wr;
tbl_idx = cqe->wr_id;
- rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
- (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
- rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
+ rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
+ (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
+ rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
tbl_idx);
/* Shadow QP header buffer */
- shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
+ shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
tbl_idx);
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
/* Store this cqe */
memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
- sqp_entry->qp1_qp = qp1_qp;
+ sqp_entry->qp1_qp = gsi_qp;
/* Find packet type from the cqe */
pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
cqe->raweth_qp1_flags2);
if (pkt_type < 0) {
- dev_err(rdev_to_dev(rdev), "Invalid packet\n");
+ ibdev_err(&rdev->ibdev, "Invalid packet\n");
return -EINVAL;
}
@@ -2926,10 +3137,10 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
rwr.wr_id = tbl_idx;
rwr.next = NULL;
- rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
+ rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to post Rx buffers to shadow QP");
+ ibdev_err(&rdev->ibdev,
+ "Failed to post Rx buffers to shadow QP");
return -ENOMEM;
}
@@ -2938,13 +3149,13 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
swr->wr_id = tbl_idx;
swr->opcode = IB_WR_SEND;
swr->next = NULL;
-
- udwr.ah = &rdev->sqp_ah->ib_ah;
- udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
- udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+ udwr.ah = &gsi_sah->ib_ah;
+ udwr.remote_qpn = gsi_sqp->qplib_qp.id;
+ udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
/* post data received in the send queue */
- rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
+ rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
return 0;
}
@@ -2998,12 +3209,12 @@ static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
}
-static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
+static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
- struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_re_qp *qp1_qp = NULL;
+ struct bnxt_re_dev *rdev = gsi_sqp->rdev;
+ struct bnxt_re_qp *gsi_qp = NULL;
struct bnxt_qplib_cqe *orig_cqe = NULL;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
int nw_type;
@@ -3013,13 +3224,13 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
tbl_idx = cqe->wr_id;
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
- qp1_qp = sqp_entry->qp1_qp;
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+ gsi_qp = sqp_entry->qp1_qp;
orig_cqe = &sqp_entry->cqe;
wc->wr_id = sqp_entry->wrid;
wc->byte_len = orig_cqe->length;
- wc->qp = &qp1_qp->ib_qp;
+ wc->qp = &gsi_qp->ib_qp;
wc->ex.imm_data = orig_cqe->immdata;
wc->src_qp = orig_cqe->src_qp;
@@ -3084,11 +3295,11 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp)
rc = bnxt_re_bind_fence_mw(lib_qp);
if (!rc) {
lib_qp->sq.phantom_wqe_cnt++;
- dev_dbg(&lib_qp->sq.hwq.pdev->dev,
- "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
- lib_qp->id, lib_qp->sq.hwq.prod,
- HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
- lib_qp->sq.phantom_wqe_cnt);
+ ibdev_dbg(&qp->rdev->ibdev,
+ "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
+ lib_qp->id, lib_qp->sq.hwq.prod,
+ HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
+ lib_qp->sq.phantom_wqe_cnt);
}
spin_unlock_irqrestore(&qp->sq_lock, flags);
@@ -3098,7 +3309,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp)
int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
{
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- struct bnxt_re_qp *qp;
+ struct bnxt_re_qp *qp, *sh_qp;
struct bnxt_qplib_cqe *cqe;
int i, ncqe, budget;
struct bnxt_qplib_q *sq;
@@ -3111,7 +3322,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
budget = min_t(u32, num_entries, cq->max_cql);
num_entries = budget;
if (!cq->cql) {
- dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
+ ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
goto exit;
}
cqe = &cq->cql[0];
@@ -3124,8 +3335,8 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
qp = container_of(lib_qp,
struct bnxt_re_qp, qplib_qp);
if (send_phantom_wqe(qp) == -ENOMEM)
- dev_err(rdev_to_dev(cq->rdev),
- "Phantom failed! Scheduled to send again\n");
+ ibdev_err(&cq->rdev->ibdev,
+ "Phantom failed! Scheduled to send again\n");
else
sq->send_phantom = false;
}
@@ -3149,8 +3360,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
(unsigned long)(cqe->qp_handle),
struct bnxt_re_qp, qplib_qp);
if (!qp) {
- dev_err(rdev_to_dev(cq->rdev),
- "POLL CQ : bad QP handle");
+ ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle");
continue;
}
wc->qp = &qp->ib_qp;
@@ -3162,8 +3372,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
switch (cqe->opcode) {
case CQ_BASE_CQE_TYPE_REQ:
- if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
- qp->rdev->qp1_sqp->qplib_qp.id) {
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
*/
@@ -3189,7 +3400,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
* stored in the table
*/
tbl_idx = cqe->wr_id;
- sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
+ sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
wc->wr_id = sqp_entry->wrid;
bnxt_re_process_res_rawqp1_wc(wc, cqe);
break;
@@ -3197,8 +3408,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
bnxt_re_process_res_rc_wc(wc, cqe);
break;
case CQ_BASE_CQE_TYPE_RES_UD:
- if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
- qp->rdev->qp1_sqp->qplib_qp.id) {
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
*/
@@ -3213,9 +3425,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
bnxt_re_process_res_ud_wc(qp, wc, cqe);
break;
default:
- dev_err(rdev_to_dev(cq->rdev),
- "POLL CQ : type 0x%x not handled",
- cqe->opcode);
+ ibdev_err(&cq->rdev->ibdev,
+ "POLL CQ : type 0x%x not handled",
+ cqe->opcode);
continue;
}
wc++;
@@ -3308,7 +3520,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+ ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
return rc;
}
@@ -3355,7 +3567,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
int rc;
if (type != IB_MR_TYPE_MEM_REG) {
- dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
+ ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
return ERR_PTR(-EINVAL);
}
if (max_num_sg > MAX_PBL_LVL_1_PGS)
@@ -3385,8 +3597,8 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
&mr->qplib_frpl, max_num_sg);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate HW FR page list");
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate HW FR page list");
goto fail_mr;
}
@@ -3421,7 +3633,7 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
+ ibdev_err(&rdev->ibdev, "Allocate MW failed!");
goto fail;
}
mw->ib_mw.rkey = mw->qplib_mw.rkey;
@@ -3442,7 +3654,7 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
+ ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
return rc;
}
@@ -3494,8 +3706,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
int umem_pgs, page_shift, rc;
if (length > BNXT_RE_MAX_MR_SIZE) {
- dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
- length, BNXT_RE_MAX_MR_SIZE);
+ ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
+ length, BNXT_RE_MAX_MR_SIZE);
return ERR_PTR(-ENOMEM);
}
@@ -3510,7 +3722,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR");
goto free_mr;
}
/* The fixed portion of the rkey is the same as the lkey */
@@ -3518,7 +3730,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem)) {
- dev_err(rdev_to_dev(rdev), "Failed to get umem");
+ ibdev_err(&rdev->ibdev, "Failed to get umem");
rc = -EFAULT;
goto free_mrw;
}
@@ -3527,7 +3739,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->qplib_mr.va = virt_addr;
umem_pgs = ib_umem_page_count(umem);
if (!umem_pgs) {
- dev_err(rdev_to_dev(rdev), "umem is invalid!");
+ ibdev_err(&rdev->ibdev, "umem is invalid!");
rc = -EINVAL;
goto free_umem;
}
@@ -3544,15 +3756,15 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
virt_addr));
if (!bnxt_re_page_size_ok(page_shift)) {
- dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
+ ibdev_err(&rdev->ibdev, "umem page size unsupported!");
rc = -EFAULT;
goto fail;
}
if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
length > BNXT_RE_MAX_MR_SIZE_LOW) {
- dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
- length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
+ ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu",
+ length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
rc = -EINVAL;
goto fail;
}
@@ -3562,7 +3774,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
umem_pgs, false, 1 << page_shift);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to register user MR");
+ ibdev_err(&rdev->ibdev, "Failed to register user MR");
goto fail;
}
@@ -3595,12 +3807,11 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
u32 chip_met_rev_num = 0;
int rc;
- dev_dbg(rdev_to_dev(rdev), "ABI version requested %u",
- ibdev->ops.uverbs_abi_ver);
+ ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
- dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
- BNXT_RE_ABI_VERSION);
+ ibdev_dbg(ibdev, " is different from the device %d ",
+ BNXT_RE_ABI_VERSION);
return -EPERM;
}
@@ -3614,10 +3825,10 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
spin_lock_init(&uctx->sh_lock);
resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
- chip_met_rev_num = rdev->chip_ctx.chip_num;
- chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) <<
+ chip_met_rev_num = rdev->chip_ctx->chip_num;
+ chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
- chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) <<
+ chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
resp.chip_id0 = chip_met_rev_num;
/* Future extension of chip info */
@@ -3632,7 +3843,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to copy user context");
+ ibdev_err(ibdev, "Failed to copy user context");
rc = -EFAULT;
goto cfail;
}
@@ -3682,15 +3893,14 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
PAGE_SIZE, vma->vm_page_prot)) {
- dev_err(rdev_to_dev(rdev), "Failed to map DPI");
+ ibdev_err(&rdev->ibdev, "Failed to map DPI");
return -EAGAIN;
}
} else {
pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
if (remap_pfn_range(vma, vma->vm_start,
pfn, PAGE_SIZE, vma->vm_page_prot)) {
- dev_err(rdev_to_dev(rdev),
- "Failed to map shared page");
+ ibdev_err(&rdev->ibdev, "Failed to map shared page");
return -EAGAIN;
}
}
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 793c97251588..b12fbc857f94 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -78,26 +78,43 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
/* Mutex to protect the list of bnxt_re devices added */
static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
+static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
+static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
+static void bnxt_re_stop_irq(void *handle);
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+
+ if (!rdev->chip_ctx)
+ return;
+ chip_ctx = rdev->chip_ctx;
+ rdev->chip_ctx = NULL;
rdev->rcfw.res = NULL;
rdev->qplib_res.cctx = NULL;
+ rdev->qplib_res.pdev = NULL;
+ rdev->qplib_res.netdev = NULL;
+ kfree(chip_ctx);
}
static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
struct bnxt *bp;
en_dev = rdev->en_dev;
bp = netdev_priv(en_dev->net);
- rdev->chip_ctx.chip_num = bp->chip_num;
+ chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
+ if (!chip_ctx)
+ return -ENOMEM;
+ chip_ctx->chip_num = bp->chip_num;
+
+ rdev->chip_ctx = chip_ctx;
/* rest members to follow eventually */
- rdev->qplib_res.cctx = &rdev->chip_ctx;
+ rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
return 0;
@@ -136,9 +153,9 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
attr->max_srq);
ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
- if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
+ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
+ rdev->qplib_ctx.tqm_ctx.qcount[i] =
rdev->dev_attr.tqm_alloc_reqs[i];
}
@@ -185,7 +202,7 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
bnxt_re_limit_pf_res(rdev);
- num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
if (num_vfs)
bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
@@ -208,7 +225,7 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
return;
rdev->num_vfs = num_vfs;
- if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) {
+ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
&rdev->qplib_ctx);
@@ -221,8 +238,10 @@ static void bnxt_re_shutdown(void *p)
if (!rdev)
return;
-
- bnxt_re_ib_unreg(rdev);
+ ASSERT_RTNL();
+ /* Release the MSIx vectors before queuing unregister */
+ bnxt_re_stop_irq(rdev);
+ ib_unregister_device_queued(&rdev->ibdev);
}
static void bnxt_re_stop_irq(void *handle)
@@ -254,7 +273,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
* to f/w will timeout and that will set the
* timeout bit.
*/
- dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
+ ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
return;
}
@@ -271,8 +290,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
msix_ent[indx].vector, false);
if (rc)
- dev_warn(rdev_to_dev(rdev),
- "Failed to reinit NQ index %d\n", indx - 1);
+ ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
+ indx - 1);
}
}
@@ -358,9 +377,9 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
goto done;
}
if (num_msix_got != num_msix_want) {
- dev_warn(rdev_to_dev(rdev),
- "Requested %d MSI-X vectors, got %d\n",
- num_msix_want, num_msix_got);
+ ibdev_warn(&rdev->ibdev,
+ "Requested %d MSI-X vectors, got %d\n",
+ num_msix_want, num_msix_got);
}
rdev->num_msix = num_msix_got;
done:
@@ -407,14 +426,14 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc)
- dev_err(rdev_to_dev(rdev),
- "Failed to free HW ring:%d :%#x", req.ring_id, rc);
+ ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
+ req.ring_id, rc);
return rc;
}
-static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
- int pages, int type, u32 ring_mask,
- u32 map_index, u16 *fw_ring_id)
+static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ring_attr *ring_attr,
+ u16 *fw_ring_id)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_alloc_input req = {0};
@@ -428,18 +447,18 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0;
- req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
- if (pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
+ if (ring_attr->pages > 1) {
/* Page size is in log2 units */
req.page_size = BNXT_PAGE_SHIFT;
req.page_tbl_depth = 1;
}
req.fbo = 0;
/* Association of ring index with doorbell index and MSIX number */
- req.logical_id = cpu_to_le16(map_index);
- req.length = cpu_to_le32(ring_mask + 1);
- req.ring_type = type;
- req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req.logical_id = cpu_to_le16(ring_attr->lrid);
+ req.length = cpu_to_le32(ring_attr->depth + 1);
+ req.ring_type = ring_attr->type;
+ req.int_mode = ring_attr->mode;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
@@ -468,8 +487,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc)
- dev_err(rdev_to_dev(rdev),
- "Failed to free HW stats context %#x", rc);
+ ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
+ rc);
return rc;
}
@@ -524,17 +543,12 @@ static bool is_bnxt_re_dev(struct net_device *netdev)
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
{
- struct bnxt_re_dev *rdev;
+ struct ib_device *ibdev =
+ ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
+ if (!ibdev)
+ return NULL;
- rcu_read_lock();
- list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) {
- if (rdev->netdev == netdev) {
- rcu_read_unlock();
- return rdev;
- }
- }
- rcu_read_unlock();
- return NULL;
+ return container_of(ibdev, struct bnxt_re_dev, ibdev);
}
static void bnxt_re_dev_unprobe(struct net_device *netdev,
@@ -608,11 +622,6 @@ static const struct attribute_group bnxt_re_dev_attr_group = {
.attrs = bnxt_re_attributes,
};
-static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev)
-{
- ib_unregister_device(&rdev->ibdev);
-}
-
static const struct ib_device_ops bnxt_re_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_BNXT_RE,
@@ -627,6 +636,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.create_cq = bnxt_re_create_cq,
.create_qp = bnxt_re_create_qp,
.create_srq = bnxt_re_create_srq,
+ .dealloc_driver = bnxt_re_dealloc_driver,
.dealloc_pd = bnxt_re_dealloc_pd,
.dealloc_ucontext = bnxt_re_dealloc_ucontext,
.del_gid = bnxt_re_del_gid,
@@ -723,15 +733,11 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
{
dev_put(rdev->netdev);
rdev->netdev = NULL;
-
mutex_lock(&bnxt_re_dev_lock);
list_del_rcu(&rdev->list);
mutex_unlock(&bnxt_re_dev_lock);
synchronize_rcu();
-
- ib_dealloc_device(&rdev->ibdev);
- /* rdev is gone */
}
static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
@@ -742,8 +748,8 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
/* Allocate bnxt_re_dev instance here */
rdev = ib_alloc_device(bnxt_re_dev, ibdev);
if (!rdev) {
- dev_err(NULL, "%s: bnxt_re_dev allocation failure!",
- ROCE_DRV_MODULE_NAME);
+ ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!",
+ ROCE_DRV_MODULE_NAME);
return NULL;
}
/* Default values */
@@ -872,8 +878,8 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
int rc = 0;
if (!srq) {
- dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
- ROCE_DRV_MODULE_NAME);
+ ibdev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
+ ROCE_DRV_MODULE_NAME);
rc = -EINVAL;
goto done;
}
@@ -900,8 +906,8 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
qplib_cq);
if (!cq) {
- dev_err(NULL, "%s: CQ is NULL, CQN not handled",
- ROCE_DRV_MODULE_NAME);
+ ibdev_err(NULL, "%s: CQ is NULL, CQN not handled",
+ ROCE_DRV_MODULE_NAME);
return -EINVAL;
}
if (cq->ib_cq.comp_handler) {
@@ -916,7 +922,7 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
{
- return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
BNXT_RE_GEN_P5_PF_NQ_DB) :
rdev->msix_entries[indx].db_offset;
@@ -948,8 +954,8 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to enable NQ with rc = 0x%x", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to enable NQ with rc = 0x%x", rc);
goto fail;
}
num_vec_enabled++;
@@ -967,10 +973,10 @@ static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
int i;
for (i = 0; i < rdev->num_msix - 1; i++) {
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
- rdev->nq[i].res = NULL;
bnxt_qplib_free_nq(&rdev->nq[i]);
+ rdev->nq[i].res = NULL;
}
}
@@ -991,10 +997,10 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
+ struct bnxt_re_ring_attr rattr = {};
+ struct bnxt_qplib_ctx *qplib_ctx;
int num_vec_created = 0;
- dma_addr_t *pg_map;
int rc = 0, i;
- int pages;
u8 type;
/* Configure and allocate resources for qplib */
@@ -1015,27 +1021,31 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc)
goto dealloc_res;
+ qplib_ctx = &rdev->qplib_ctx;
for (i = 0; i < rdev->num_msix - 1; i++) {
- rdev->nq[i].res = &rdev->qplib_res;
- rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
- BNXT_RE_MAX_SRQC_COUNT + 2;
- rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
+ struct bnxt_qplib_nq *nq;
+
+ nq = &rdev->nq[i];
+ nq->hwq.max_elements = (qplib_ctx->cq_count +
+ qplib_ctx->srqc_count + 2);
+ rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
- i, rc);
+ ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
+ i, rc);
goto free_nq;
}
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
- pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr;
- pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count;
- rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
- BNXT_QPLIB_NQE_MAX_CNT - 1,
- rdev->msix_entries[i + 1].ring_idx,
- &rdev->nq[i].ring_id);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+ rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
+ rattr.type = type;
+ rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
+ rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
+ rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate NQ fw id with rc = 0x%x",
- rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate NQ fw id with rc = 0x%x",
+ rc);
bnxt_qplib_free_nq(&rdev->nq[i]);
goto free_nq;
}
@@ -1043,8 +1053,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
}
return 0;
free_nq:
- for (i = num_vec_created; i >= 0; i--) {
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
+ for (i = num_vec_created - 1; i >= 0; i--) {
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
bnxt_qplib_free_nq(&rdev->nq[i]);
}
@@ -1109,10 +1119,10 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
return rc;
if (resp.queue_cfg_info) {
- dev_warn(rdev_to_dev(rdev),
- "Asymmetric cos queue configuration detected");
- dev_warn(rdev_to_dev(rdev),
- " on device, QoS may not be fully functional\n");
+ ibdev_warn(&rdev->ibdev,
+ "Asymmetric cos queue configuration detected");
+ ibdev_warn(&rdev->ibdev,
+ " on device, QoS may not be fully functional\n");
}
qcfgmap = &resp.pri0_cos_queue_id;
tmp_map = (u8 *)cid_map;
@@ -1125,7 +1135,8 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp)
{
- return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp);
+ return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
+ (qp == rdev->gsi_ctx.gsi_sqp);
}
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
@@ -1160,12 +1171,13 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
u16 gid_idx, index;
int rc = 0;
- if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
+ if (!ib_device_try_get(&rdev->ibdev))
return 0;
if (!sgid_tbl) {
- dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
- return -EINVAL;
+ ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
+ rc = -EINVAL;
+ goto out;
}
for (index = 0; index < sgid_tbl->active; index++) {
@@ -1185,7 +1197,8 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
rdev->qplib_res.netdev->dev_addr);
}
-
+out:
+ ib_device_put(&rdev->ibdev);
return rc;
}
@@ -1241,7 +1254,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
/* Get cosq id for this priority */
rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
if (rc) {
- dev_warn(rdev_to_dev(rdev), "no cos for p_mask %x\n", prio_map);
+ ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map);
return rc;
}
/* Parse CoS IDs for app priority */
@@ -1250,8 +1263,8 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
/* Config BONO. */
rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
if (rc) {
- dev_warn(rdev_to_dev(rdev), "no tc for cos{%x, %x}\n",
- rdev->cosq[0], rdev->cosq[1]);
+ ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n",
+ rdev->cosq[0], rdev->cosq[1]);
return rc;
}
@@ -1286,8 +1299,8 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to query HW version, rc = 0x%x", rc);
+ ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
+ rc);
return;
}
rdev->qplib_ctx.hwrm_intf_ver =
@@ -1297,15 +1310,35 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
le16_to_cpu(resp.hwrm_intf_patch);
}
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
+static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
+{
+ int rc = 0;
+ u32 event;
+
+ /* Register ib dev */
+ rc = bnxt_re_register_ib(rdev);
+ if (rc) {
+ pr_err("Failed to register with IB: %#x\n", rc);
+ return rc;
+ }
+ dev_info(rdev_to_dev(rdev), "Device registered successfully");
+ ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+ &rdev->active_width);
+ set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
+
+ event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
+
+ return rc;
+}
+
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
{
u8 type;
int rc;
- if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
- /* Cleanup ib dev */
- bnxt_re_unregister_ib(rdev);
- }
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -1318,28 +1351,28 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
if (rc)
- dev_warn(rdev_to_dev(rdev),
- "Failed to deinitialize RCFW: %#x", rc);
+ ibdev_warn(&rdev->ibdev,
+ "Failed to deinitialize RCFW: %#x", rc);
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
- bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
+ bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
rc = bnxt_re_free_msix(rdev);
if (rc)
- dev_warn(rdev_to_dev(rdev),
- "Failed to free MSI-X vectors: %#x", rc);
+ ibdev_warn(&rdev->ibdev,
+ "Failed to free MSI-X vectors: %#x", rc);
}
bnxt_re_destroy_chip_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
rc = bnxt_re_unregister_netdev(rdev);
if (rc)
- dev_warn(rdev_to_dev(rdev),
- "Failed to unregister with netdev: %#x", rc);
+ ibdev_warn(&rdev->ibdev,
+ "Failed to unregister with netdev: %#x", rc);
}
}
@@ -1353,31 +1386,29 @@ static void bnxt_re_worker(struct work_struct *work)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
-static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
+static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
{
- dma_addr_t *pg_map;
- u32 db_offt, ridx;
- int pages, vid;
- bool locked;
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_re_ring_attr rattr;
+ u32 db_offt;
+ int vid;
u8 type;
int rc;
- /* Acquire rtnl lock through out this function */
- rtnl_lock();
- locked = true;
-
/* Registered a new RoCE device instance to netdev */
+ memset(&rattr, 0, sizeof(rattr));
rc = bnxt_re_register_netdev(rdev);
if (rc) {
rtnl_unlock();
- pr_err("Failed to register with netedev: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with netedev: %#x\n", rc);
return -EINVAL;
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
rc = bnxt_re_setup_chip_ctx(rdev);
if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to get chip context\n");
+ ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
return -EINVAL;
}
@@ -1386,7 +1417,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
rc = bnxt_re_request_msix(rdev);
if (rc) {
- pr_err("Failed to get MSI-X vectors: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to get MSI-X vectors: %#x\n", rc);
rc = -EINVAL;
goto fail;
}
@@ -1397,31 +1429,36 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
- rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
+ rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
&rdev->qplib_ctx,
BNXT_RE_MAX_QPC_COUNT);
if (rc) {
- pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate RCFW Channel: %#x\n", rc);
goto fail;
}
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
- pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr;
- pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count;
- ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
- rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
- BNXT_QPLIB_CREQE_MAX_CNT - 1,
- ridx, &rdev->rcfw.creq_ring_id);
+
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ creq = &rdev->rcfw.creq;
+ rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+ rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
+ rattr.type = type;
+ rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
+ rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
if (rc) {
- pr_err("Failed to allocate CREQ: %#x\n", rc);
+ ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
- rc = bnxt_qplib_enable_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
+ rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt, rdev->is_virtfn,
&bnxt_re_aeq_handler);
if (rc) {
- pr_err("Failed to enable RCFW channel: %#x\n", rc);
+ ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
+ rc);
goto free_ring;
}
@@ -1432,24 +1469,27 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
bnxt_re_set_resource_limits(rdev);
- rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
- bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
+ rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
+ bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
if (rc) {
- pr_err("Failed to allocate QPLIB context: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate QPLIB context: %#x\n", rc);
goto disable_rcfw;
}
rc = bnxt_re_net_stats_ctx_alloc(rdev,
rdev->qplib_ctx.stats.dma_map,
&rdev->qplib_ctx.stats.fw_id);
if (rc) {
- pr_err("Failed to allocate stats context: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate stats context: %#x\n", rc);
goto free_ctx;
}
rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
rdev->is_virtfn);
if (rc) {
- pr_err("Failed to initialize RCFW: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to initialize RCFW: %#x\n", rc);
goto free_sctx;
}
set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
@@ -1457,13 +1497,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Resources based on the 'new' device caps */
rc = bnxt_re_alloc_res(rdev);
if (rc) {
- pr_err("Failed to allocate resources: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate resources: %#x\n", rc);
goto fail;
}
set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
rc = bnxt_re_init_res(rdev);
if (rc) {
- pr_err("Failed to initialize resources: %#x\n", rc);
+ ibdev_err(&rdev->ibdev,
+ "Failed to initialize resources: %#x\n", rc);
goto fail;
}
@@ -1472,46 +1514,28 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
if (!rdev->is_virtfn) {
rc = bnxt_re_setup_qos(rdev);
if (rc)
- pr_info("RoCE priority not yet configured\n");
+ ibdev_info(&rdev->ibdev,
+ "RoCE priority not yet configured\n");
INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
- rtnl_unlock();
- locked = false;
-
- /* Register ib dev */
- rc = bnxt_re_register_ib(rdev);
- if (rc) {
- pr_err("Failed to register with IB: %#x\n", rc);
- goto fail;
- }
- set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
- dev_info(rdev_to_dev(rdev), "Device registered successfully");
- ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
- &rdev->active_width);
- set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
-
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
free_ctx:
- bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
+ bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring:
- type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type);
+ type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail:
- if (!locked)
- rtnl_lock();
- bnxt_re_ib_unreg(rdev);
- rtnl_unlock();
+ bnxt_re_dev_uninit(rdev);
return rc;
}
@@ -1538,7 +1562,8 @@ static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
en_dev = bnxt_re_dev_probe(netdev);
if (IS_ERR(en_dev)) {
if (en_dev != ERR_PTR(-ENODEV))
- pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME);
+ ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n",
+ ROCE_DRV_MODULE_NAME);
rc = PTR_ERR(en_dev);
goto exit;
}
@@ -1552,9 +1577,47 @@ exit:
return rc;
}
-static void bnxt_re_remove_one(struct bnxt_re_dev *rdev)
+static void bnxt_re_remove_device(struct bnxt_re_dev *rdev)
{
+ bnxt_re_dev_uninit(rdev);
pci_dev_put(rdev->en_dev->pdev);
+ bnxt_re_dev_unreg(rdev);
+}
+
+static int bnxt_re_add_device(struct bnxt_re_dev **rdev,
+ struct net_device *netdev)
+{
+ int rc;
+
+ rc = bnxt_re_dev_reg(rdev, netdev);
+ if (rc == -ENODEV)
+ return rc;
+ if (rc) {
+ pr_err("Failed to register with the device %s: %#x\n",
+ netdev->name, rc);
+ return rc;
+ }
+
+ pci_dev_get((*rdev)->en_dev->pdev);
+ rc = bnxt_re_dev_init(*rdev);
+ if (rc) {
+ pci_dev_put((*rdev)->en_dev->pdev);
+ bnxt_re_dev_unreg(*rdev);
+ }
+
+ return rc;
+}
+
+static void bnxt_re_dealloc_driver(struct ib_device *ib_dev)
+{
+ struct bnxt_re_dev *rdev =
+ container_of(ib_dev, struct bnxt_re_dev, ibdev);
+
+ dev_info(rdev_to_dev(rdev), "Unregistering Device");
+
+ rtnl_lock();
+ bnxt_re_remove_device(rdev);
+ rtnl_unlock();
}
/* Handle all deferred netevents tasks */
@@ -1567,21 +1630,23 @@ static void bnxt_re_task(struct work_struct *work)
re_work = container_of(work, struct bnxt_re_work, work);
rdev = re_work->rdev;
- if (re_work->event != NETDEV_REGISTER &&
- !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
- return;
-
- switch (re_work->event) {
- case NETDEV_REGISTER:
- rc = bnxt_re_ib_reg(rdev);
+ if (re_work->event == NETDEV_REGISTER) {
+ rc = bnxt_re_ib_init(rdev);
if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to register with IB: %#x", rc);
- bnxt_re_remove_one(rdev);
- bnxt_re_dev_unreg(rdev);
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with IB: %#x", rc);
+ rtnl_lock();
+ bnxt_re_remove_device(rdev);
+ rtnl_unlock();
goto exit;
}
- break;
+ goto exit;
+ }
+
+ if (!ib_device_try_get(&rdev->ibdev))
+ goto exit;
+
+ switch (re_work->event) {
case NETDEV_UP:
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
IB_EVENT_PORT_ACTIVE);
@@ -1601,17 +1666,12 @@ static void bnxt_re_task(struct work_struct *work)
default:
break;
}
- smp_mb__before_atomic();
- atomic_dec(&rdev->sched_count);
+ ib_device_put(&rdev->ibdev);
exit:
+ put_device(&rdev->ibdev.dev);
kfree(re_work);
}
-static void bnxt_re_init_one(struct bnxt_re_dev *rdev)
-{
- pci_dev_get(rdev->en_dev->pdev);
-}
-
/*
* "Notifier chain callback can be invoked for the same chain from
* different CPUs at the same time".
@@ -1634,6 +1694,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
struct bnxt_re_dev *rdev;
int rc = 0;
bool sch_work = false;
+ bool release = true;
real_dev = rdma_vlan_dev_real_dev(netdev);
if (!real_dev)
@@ -1641,7 +1702,8 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
rdev = bnxt_re_from_netdev(real_dev);
if (!rdev && event != NETDEV_REGISTER)
- goto exit;
+ return NOTIFY_OK;
+
if (real_dev != netdev)
goto exit;
@@ -1649,27 +1711,14 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
case NETDEV_REGISTER:
if (rdev)
break;
- rc = bnxt_re_dev_reg(&rdev, real_dev);
- if (rc == -ENODEV)
- break;
- if (rc) {
- pr_err("Failed to register with the device %s: %#x\n",
- real_dev->name, rc);
- break;
- }
- bnxt_re_init_one(rdev);
- sch_work = true;
+ rc = bnxt_re_add_device(&rdev, real_dev);
+ if (!rc)
+ sch_work = true;
+ release = false;
break;
case NETDEV_UNREGISTER:
- /* netdev notifier will call NETDEV_UNREGISTER again later since
- * we are still holding the reference to the netdev
- */
- if (atomic_read(&rdev->sched_count) > 0)
- goto exit;
- bnxt_re_ib_unreg(rdev);
- bnxt_re_remove_one(rdev);
- bnxt_re_dev_unreg(rdev);
+ ib_unregister_device_queued(&rdev->ibdev);
break;
default:
@@ -1680,17 +1729,19 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
/* Allocate for the deferred task */
re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
if (re_work) {
+ get_device(&rdev->ibdev.dev);
re_work->rdev = rdev;
re_work->event = event;
re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task);
- atomic_inc(&rdev->sched_count);
queue_work(bnxt_re_wq, &re_work->work);
}
}
exit:
+ if (rdev && release)
+ ib_device_put(&rdev->ibdev);
return NOTIFY_DONE;
}
@@ -1726,36 +1777,21 @@ err_netdev:
static void __exit bnxt_re_mod_exit(void)
{
- struct bnxt_re_dev *rdev, *next;
- LIST_HEAD(to_be_deleted);
+ struct bnxt_re_dev *rdev;
- mutex_lock(&bnxt_re_dev_lock);
- /* Free all adapter allocated resources */
- if (!list_empty(&bnxt_re_dev_list))
- list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
- mutex_unlock(&bnxt_re_dev_lock);
- /*
- * Cleanup the devices in reverse order so that the VF device
- * cleanup is done before PF cleanup
- */
- list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
- dev_info(rdev_to_dev(rdev), "Unregistering Device");
- /*
- * Flush out any scheduled tasks before destroying the
- * resources
- */
- flush_workqueue(bnxt_re_wq);
- bnxt_re_dev_stop(rdev);
- /* Acquire the rtnl_lock as the L2 resources are freed here */
- rtnl_lock();
- bnxt_re_ib_unreg(rdev);
- rtnl_unlock();
- bnxt_re_remove_one(rdev);
- bnxt_re_dev_unreg(rdev);
- }
unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
if (bnxt_re_wq)
destroy_workqueue(bnxt_re_wq);
+ list_for_each_entry(rdev, &bnxt_re_dev_list, list) {
+ /* VF device removal should be called before the removal
+ * of PF device. Queue VFs unregister first, so that VFs
+ * shall be removed before the PF during the call of
+ * ib_unregister_driver.
+ */
+ if (rdev->is_virtfn)
+ ib_unregister_device(&rdev->ibdev);
+ }
+ ib_unregister_driver(RDMA_DRIVER_BNXT_RE);
}
module_init(bnxt_re_mod_init);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 020f70e6865e..899a5d2c100e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -43,6 +43,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/prefetch.h>
#include <linux/if_ether.h>
@@ -53,9 +54,7 @@
#include "qplib_sp.h"
#include "qplib_fp.h"
-static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
-static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
{
@@ -233,6 +232,70 @@ fail:
return rc;
}
+static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_hwq *hwq = &nq->hwq;
+ struct nq_base *nqe, **nq_ptr;
+ int budget = nq->budget;
+ u32 sw_cons, raw_cons;
+ uintptr_t q_handle;
+ u16 type;
+
+ spin_lock_bh(&hwq->lock);
+ /* Service the NQ until empty */
+ raw_cons = hwq->cons;
+ while (budget--) {
+ sw_cons = HWQ_CMP(raw_cons, hwq);
+ nq_ptr = (struct nq_base **)hwq->pbl_ptr;
+ nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
+ if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
+ break;
+
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
+ switch (type) {
+ case NQ_BASE_TYPE_CQ_NOTIFICATION:
+ {
+ struct nq_cn *nqcne = (struct nq_cn *)nqe;
+
+ q_handle = le32_to_cpu(nqcne->cq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
+ << 32;
+ if ((unsigned long)cq == q_handle) {
+ nqcne->cq_handle_low = 0;
+ nqcne->cq_handle_high = 0;
+ cq->cnq_events++;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ raw_cons++;
+ }
+ spin_unlock_bh(&hwq->lock);
+}
+
+/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
+ * this CQ.
+ */
+static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
+{
+ u32 retry_cnt = 100;
+
+ while (retry_cnt--) {
+ if (cnq_events == cq->cnq_events)
+ return;
+ usleep_range(50, 100);
+ clean_nq(cq->nq, cq);
+ }
+}
+
static void bnxt_qplib_service_nq(unsigned long data)
{
struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
@@ -241,12 +304,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct bnxt_qplib_cq *cq;
int num_cqne_processed = 0;
int num_srqne_processed = 0;
- u32 sw_cons, raw_cons;
- u16 type;
int budget = nq->budget;
+ u32 sw_cons, raw_cons;
uintptr_t q_handle;
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
+ u16 type;
+ spin_lock_bh(&hwq->lock);
/* Service the NQ until empty */
raw_cons = hwq->cons;
while (budget--) {
@@ -272,7 +335,10 @@ static void bnxt_qplib_service_nq(unsigned long data)
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
<< 32;
cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
- bnxt_qplib_arm_cq_enable(cq);
+ if (!cq)
+ break;
+ bnxt_qplib_armen_db(&cq->dbinfo,
+ DBC_DBC_TYPE_CQ_ARMENA);
spin_lock_bh(&cq->compl_lock);
atomic_set(&cq->arm_state, 0);
if (!nq->cqn_handler(nq, (cq)))
@@ -280,19 +346,22 @@ static void bnxt_qplib_service_nq(unsigned long data)
else
dev_warn(&nq->pdev->dev,
"cqn - type 0x%x not handled\n", type);
+ cq->cnq_events++;
spin_unlock_bh(&cq->compl_lock);
break;
}
case NQ_BASE_TYPE_SRQ_EVENT:
{
+ struct bnxt_qplib_srq *srq;
struct nq_srq_event *nqsrqe =
(struct nq_srq_event *)nqe;
q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
<< 32;
- bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
- DBC_DBC_TYPE_SRQ_ARMENA);
+ srq = (struct bnxt_qplib_srq *)q_handle;
+ bnxt_qplib_armen_db(&srq->dbinfo,
+ DBC_DBC_TYPE_SRQ_ARMENA);
if (!nq->srqn_handler(nq,
(struct bnxt_qplib_srq *)q_handle,
nqsrqe->event))
@@ -314,10 +383,9 @@ static void bnxt_qplib_service_nq(unsigned long data)
}
if (hwq->cons != raw_cons) {
hwq->cons = raw_cons;
- bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons,
- hwq->max_elements, nq->ring_id,
- gen_p5);
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
}
+ spin_unlock_bh(&hwq->lock);
}
static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
@@ -333,25 +401,23 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
/* Fan out to CPU affinitized kthreads? */
- tasklet_schedule(&nq->worker);
+ tasklet_schedule(&nq->nq_tasklet);
return IRQ_HANDLED;
}
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
{
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
- tasklet_disable(&nq->worker);
+ tasklet_disable(&nq->nq_tasklet);
/* Mask h/w interrupt */
- bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons,
- nq->hwq.max_elements, nq->ring_id, gen_p5);
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
/* Sync with last running IRQ handler */
- synchronize_irq(nq->vector);
+ synchronize_irq(nq->msix_vec);
if (kill)
- tasklet_kill(&nq->worker);
+ tasklet_kill(&nq->nq_tasklet);
if (nq->requested) {
- irq_set_affinity_hint(nq->vector, NULL);
- free_irq(nq->vector, nq);
+ irq_set_affinity_hint(nq->msix_vec, NULL);
+ free_irq(nq->msix_vec, nq);
nq->requested = false;
}
}
@@ -364,89 +430,108 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
}
/* Make sure the HW is stopped! */
- if (nq->requested)
- bnxt_qplib_nq_stop_irq(nq, true);
+ bnxt_qplib_nq_stop_irq(nq, true);
- if (nq->bar_reg_iomem)
- iounmap(nq->bar_reg_iomem);
- nq->bar_reg_iomem = NULL;
+ if (nq->nq_db.reg.bar_reg) {
+ iounmap(nq->nq_db.reg.bar_reg);
+ nq->nq_db.reg.bar_reg = NULL;
+ }
nq->cqn_handler = NULL;
nq->srqn_handler = NULL;
- nq->vector = 0;
+ nq->msix_vec = 0;
}
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
int msix_vector, bool need_init)
{
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
int rc;
if (nq->requested)
return -EFAULT;
- nq->vector = msix_vector;
+ nq->msix_vec = msix_vector;
if (need_init)
- tasklet_init(&nq->worker, bnxt_qplib_service_nq,
+ tasklet_init(&nq->nq_tasklet, bnxt_qplib_service_nq,
(unsigned long)nq);
else
- tasklet_enable(&nq->worker);
+ tasklet_enable(&nq->nq_tasklet);
snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
+ rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
if (rc)
return rc;
cpumask_clear(&nq->mask);
cpumask_set_cpu(nq_indx, &nq->mask);
- rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+ rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
if (rc) {
dev_warn(&nq->pdev->dev,
"set affinity failed; vector: %d nq_idx: %d\n",
- nq->vector, nq_indx);
+ nq->msix_vec, nq_indx);
}
nq->requested = true;
- bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons,
- nq->hwq.max_elements, nq->ring_id, gen_p5);
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
+
+ return rc;
+}
+
+static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
+{
+ resource_size_t reg_base;
+ struct bnxt_qplib_nq_db *nq_db;
+ struct pci_dev *pdev;
+ int rc = 0;
+
+ pdev = nq->pdev;
+ nq_db = &nq->nq_db;
+
+ nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
+ nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
+ if (!nq_db->reg.bar_base) {
+ dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
+ nq_db->reg.bar_id);
+ rc = -ENOMEM;
+ goto fail;
+ }
+ reg_base = nq_db->reg.bar_base + reg_offt;
+ /* Unconditionally map 8 bytes to support 57500 series */
+ nq_db->reg.len = 8;
+ nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
+ if (!nq_db->reg.bar_reg) {
+ dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
+ nq_db->reg.bar_id);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ nq_db->dbinfo.db = nq_db->reg.bar_reg;
+ nq_db->dbinfo.hwq = &nq->hwq;
+ nq_db->dbinfo.xid = nq->ring_id;
+fail:
return rc;
}
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *),
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_srq *,
- u8 event))
+ cqn_handler_t cqn_handler,
+ srqn_handler_t srqn_handler)
{
- resource_size_t nq_base;
int rc = -1;
- if (cqn_handler)
- nq->cqn_handler = cqn_handler;
-
- if (srqn_handler)
- nq->srqn_handler = srqn_handler;
+ nq->pdev = pdev;
+ nq->cqn_handler = cqn_handler;
+ nq->srqn_handler = srqn_handler;
/* Have a task to schedule CQ notifiers in post send case */
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
if (!nq->cqn_wq)
return -ENOMEM;
- nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
- nq->bar_reg_off = bar_reg_offset;
- nq_base = pci_resource_start(pdev, nq->bar_reg);
- if (!nq_base) {
- rc = -ENOMEM;
- goto fail;
- }
- /* Unconditionally map 8 bytes to support 57500 series */
- nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8);
- if (!nq->bar_reg_iomem) {
- rc = -ENOMEM;
+ rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
+ if (rc)
goto fail;
- }
rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
if (rc) {
@@ -464,49 +549,38 @@ fail:
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
{
if (nq->hwq.max_elements) {
- bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
+ bnxt_qplib_free_hwq(nq->res, &nq->hwq);
nq->hwq.max_elements = 0;
}
}
-int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
+int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
{
- u8 hwq_type;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
- nq->pdev = pdev;
+ nq->pdev = res->pdev;
+ nq->res = res;
if (!nq->hwq.max_elements ||
nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
- hwq_type = bnxt_qplib_get_hwq_type(nq->res);
- if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL,
- &nq->hwq.max_elements,
- BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
- PAGE_SIZE, hwq_type))
- return -ENOMEM;
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.depth = nq->hwq.max_elements;
+ hwq_attr.stride = sizeof(struct nq_base);
+ hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
+ if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
+ dev_err(&nq->pdev->dev, "FP NQ allocation failed");
+ return -ENOMEM;
+ }
nq->budget = 8;
return 0;
}
/* SRQ */
-static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
-{
- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- void __iomem *db;
- u32 sw_prod;
- u64 val = 0;
-
- /* Ring DB */
- sw_prod = (arm_type == DBC_DBC_TYPE_SRQ_ARM) ?
- srq->threshold : HWQ_CMP(srq_hwq->prod, srq_hwq);
- db = (arm_type == DBC_DBC_TYPE_SRQ_ARMENA) ? srq->dbr_base :
- srq->dpi->dbr;
- val = ((srq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
- val <<= 32;
- val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
- writeq(val, db);
-}
-
void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
@@ -526,24 +600,26 @@ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
kfree(srq->swq);
if (rc)
return;
- bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ bnxt_qplib_free_hwq(res, &srq->hwq);
}
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_create_srq req;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
struct creq_create_srq_resp resp;
+ struct cmdq_create_srq req;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
int rc, idx;
- srq->hwq.max_elements = srq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, &srq->sg_info,
- &srq->hwq.max_elements,
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &srq->sg_info;
+ hwq_attr.depth = srq->max_wqe;
+ hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
goto exit;
@@ -595,14 +671,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
srq->swq[srq->last_idx].next_idx = -1;
srq->id = le32_to_cpu(resp.xid);
- srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
+ srq->dbinfo.hwq = &srq->hwq;
+ srq->dbinfo.xid = srq->id;
+ srq->dbinfo.db = srq->dpi->dbr;
+ srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
if (srq->threshold)
- bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARMENA);
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
srq->arm_req = false;
return 0;
fail:
- bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ bnxt_qplib_free_hwq(res, &srq->hwq);
kfree(srq->swq);
exit:
return rc;
@@ -621,7 +700,7 @@ int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
srq_hwq->max_elements - sw_cons + sw_prod;
if (count > srq->threshold) {
srq->arm_req = false;
- bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
+ bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
} else {
/* Deferred arming */
srq->arm_req = true;
@@ -709,10 +788,10 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
srq_hwq->max_elements - sw_cons + sw_prod;
spin_unlock(&srq_hwq->lock);
/* Ring DB */
- bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ);
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
if (srq->arm_req == true && count > srq->threshold) {
srq->arm_req = false;
- bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
+ bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
}
done:
return rc;
@@ -721,15 +800,16 @@ done:
/* QP */
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_create_qp1 req;
- struct creq_create_qp1_resp resp;
- struct bnxt_qplib_pbl *pbl;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
- int rc;
+ struct creq_create_qp1_resp resp;
+ struct cmdq_create_qp1 req;
+ struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
u32 qp_flags = 0;
+ int rc;
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
@@ -739,11 +819,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.qp_handle = cpu_to_le64(qp->qp_handle);
/* SQ */
- sq->hwq.max_elements = sq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL,
- &sq->hwq.max_elements,
- BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sg_info;
+ hwq_attr.depth = sq->max_wqe;
+ hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
goto exit;
@@ -778,11 +859,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (rq->max_wqe) {
- rq->hwq.max_elements = qp->rq.max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL,
- &rq->hwq.max_elements,
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.depth = qp->rq.max_wqe;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto fail_sq;
@@ -840,6 +922,15 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ qp->cctx = res->cctx;
+ sq->dbinfo.hwq = &sq->hwq;
+ sq->dbinfo.xid = qp->id;
+ sq->dbinfo.db = qp->dpi->dbr;
+ if (rq->max_wqe) {
+ rq->dbinfo.hwq = &rq->hwq;
+ rq->dbinfo.xid = qp->id;
+ rq->dbinfo.db = qp->dpi->dbr;
+ }
rcfw->qp_tbl[qp->id].qp_id = qp->id;
rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
@@ -848,10 +939,10 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
fail:
bnxt_qplib_free_qp_hdr_buf(res, qp);
fail_rq:
- bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
+ bnxt_qplib_free_hwq(res, &rq->hwq);
kfree(rq->swq);
fail_sq:
- bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
+ bnxt_qplib_free_hwq(res, &sq->hwq);
kfree(sq->swq);
exit:
return rc;
@@ -860,7 +951,9 @@ exit:
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
unsigned long int psn_search, poff = 0;
+ struct bnxt_qplib_sg_info sginfo = {};
struct sq_psn_search **psn_search_ptr;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
@@ -887,12 +980,15 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
}
- sq->hwq.max_elements = sq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info,
- &sq->hwq.max_elements,
- BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
- psn_sz,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sg_info;
+ hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.depth = sq->max_wqe;
+ hwq_attr.aux_stride = psn_sz;
+ hwq_attr.aux_depth = hwq_attr.depth;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
goto exit;
@@ -956,12 +1052,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (rq->max_wqe) {
- rq->hwq.max_elements = rq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq,
- &rq->sg_info,
- &rq->hwq.max_elements,
- BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.depth = rq->max_wqe;
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto fail_sq;
@@ -1029,10 +1127,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req_size = xrrq->max_elements *
BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
req_size &= ~(PAGE_SIZE - 1);
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
- &xrrq->max_elements,
- BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
- 0, req_size, HWQ_TYPE_CTX);
+ sginfo.pgsize = req_size;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.depth = xrrq->max_elements;
+ hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
if (rc)
goto fail_buf_free;
pbl = &xrrq->pbl[PBL_LVL_0];
@@ -1044,11 +1149,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req_size = xrrq->max_elements *
BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
req_size &= ~(PAGE_SIZE - 1);
-
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
- &xrrq->max_elements,
- BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
- 0, req_size, HWQ_TYPE_CTX);
+ sginfo.pgsize = req_size;
+ hwq_attr.depth = xrrq->max_elements;
+ hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
if (rc)
goto fail_orrq;
@@ -1064,9 +1168,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
- qp->cctx = res->cctx;
INIT_LIST_HEAD(&qp->sq_flush);
INIT_LIST_HEAD(&qp->rq_flush);
+ qp->cctx = res->cctx;
+ sq->dbinfo.hwq = &sq->hwq;
+ sq->dbinfo.xid = qp->id;
+ sq->dbinfo.db = qp->dpi->dbr;
+ if (rq->max_wqe) {
+ rq->dbinfo.hwq = &rq->hwq;
+ rq->dbinfo.xid = qp->id;
+ rq->dbinfo.db = qp->dpi->dbr;
+ }
rcfw->qp_tbl[qp->id].qp_id = qp->id;
rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
@@ -1074,17 +1186,17 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
fail:
if (qp->irrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
+ bnxt_qplib_free_hwq(res, &qp->irrq);
fail_orrq:
if (qp->orrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
+ bnxt_qplib_free_hwq(res, &qp->orrq);
fail_buf_free:
bnxt_qplib_free_qp_hdr_buf(res, qp);
fail_rq:
- bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
+ bnxt_qplib_free_hwq(res, &rq->hwq);
kfree(rq->swq);
fail_sq:
- bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
+ bnxt_qplib_free_hwq(res, &sq->hwq);
kfree(sq->swq);
exit:
return rc;
@@ -1440,16 +1552,16 @@ void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
{
bnxt_qplib_free_qp_hdr_buf(res, qp);
- bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
+ bnxt_qplib_free_hwq(res, &qp->sq.hwq);
kfree(qp->sq.swq);
- bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
+ bnxt_qplib_free_hwq(res, &qp->rq.hwq);
kfree(qp->rq.swq);
if (qp->irrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
+ bnxt_qplib_free_hwq(res, &qp->irrq);
if (qp->orrq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
+ bnxt_qplib_free_hwq(res, &qp->orrq);
}
@@ -1506,16 +1618,8 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *sq = &qp->sq;
- u32 sw_prod;
- u64 val = 0;
- val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
- DBC_DBC_TYPE_SQ);
- val <<= 32;
- sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
- val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
- /* Flush all the WQE writes to HW */
- writeq(val, qp->dpi->dbr);
+ bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
}
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
@@ -1807,16 +1911,8 @@ done:
void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *rq = &qp->rq;
- u32 sw_prod;
- u64 val = 0;
- val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
- DBC_DBC_TYPE_RQ);
- val <<= 32;
- sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
- val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
- /* Flush the writes to HW Rx WQE before the ringing Rx DB */
- writeq(val, qp->dpi->dbr);
+ bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
}
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
@@ -1896,48 +1992,22 @@ done:
}
/* CQ */
-
-/* Spinlock must be held */
-static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
-{
- u64 val = 0;
-
- val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
- DBC_DBC_TYPE_CQ_ARMENA;
- val <<= 32;
- /* Flush memory writes before enabling the CQ */
- writeq(val, cq->dbr_base);
-}
-
-static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
-{
- struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
- u32 sw_cons;
- u64 val = 0;
-
- /* Ring DB */
- val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
- val <<= 32;
- sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
- val |= (sw_cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
- /* flush memory writes before arming the CQ */
- writeq(val, cq->dpi->dbr);
-}
-
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_create_cq req;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
struct creq_create_cq_resp resp;
+ struct cmdq_create_cq req;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
int rc;
- cq->hwq.max_elements = cq->max_wqe;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info,
- &cq->hwq.max_elements,
- BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_QUEUE);
+ hwq_attr.res = res;
+ hwq_attr.depth = cq->max_wqe;
+ hwq_attr.stride = sizeof(struct cq_base);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ hwq_attr.sginfo = &cq->sg_info;
+ rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
if (rc)
goto exit;
@@ -1976,7 +2046,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
goto fail;
cq->id = le32_to_cpu(resp.xid);
- cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
init_waitqueue_head(&cq->waitq);
INIT_LIST_HEAD(&cq->sqf_head);
@@ -1984,11 +2053,17 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
spin_lock_init(&cq->compl_lock);
spin_lock_init(&cq->flush_lock);
- bnxt_qplib_arm_cq_enable(cq);
+ cq->dbinfo.hwq = &cq->hwq;
+ cq->dbinfo.xid = cq->id;
+ cq->dbinfo.db = cq->dpi->dbr;
+ cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
+
+ bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
+
return 0;
fail:
- bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
+ bnxt_qplib_free_hwq(res, &cq->hwq);
exit:
return rc;
}
@@ -1998,6 +2073,7 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_cq req;
struct creq_destroy_cq_resp resp;
+ u16 total_cnq_events;
u16 cmd_flags = 0;
int rc;
@@ -2008,7 +2084,9 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
(void *)&resp, NULL, 0);
if (rc)
return rc;
- bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
+ total_cnq_events = le16_to_cpu(resp.total_cnq_events);
+ __wait_for_all_nqes(cq, total_cnq_events);
+ bnxt_qplib_free_hwq(res, &cq->hwq);
return 0;
}
@@ -2141,8 +2219,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
sq->send_phantom = true;
/* TODO: Only ARM if the previous SQE is ARMALL */
- bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL);
-
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
rc = -EAGAIN;
goto out;
}
@@ -2426,7 +2503,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
}
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
- cqe->length = (u32)le16_to_cpu(hwcqe->length);
+ cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
cqe->flags = le16_to_cpu(hwcqe->flags);
@@ -2812,7 +2889,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
}
if (cq->hwq.cons != raw_cons) {
cq->hwq.cons = raw_cons;
- bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ);
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
}
exit:
return num_cqes - budget;
@@ -2821,7 +2898,7 @@ exit:
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
{
if (arm_type)
- bnxt_qplib_arm_cq(cq, arm_type);
+ bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
/* Using cq->arm_state variable to track whether to issue cq handler */
atomic_set(&cq->arm_state, 1);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 99e0a13cbefa..7edb70b6bb16 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -42,7 +42,7 @@
struct bnxt_qplib_srq {
struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi;
- void __iomem *dbr_base;
+ struct bnxt_qplib_db_info dbinfo;
u64 srq_handle;
u32 id;
u32 max_wqe;
@@ -236,6 +236,7 @@ struct bnxt_qplib_swqe {
struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
u16 q_full_delta;
@@ -370,7 +371,7 @@ struct bnxt_qplib_cqe {
#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
struct bnxt_qplib_cq {
struct bnxt_qplib_dpi *dpi;
- void __iomem *dbr_base;
+ struct bnxt_qplib_db_info dbinfo;
u32 max_wqe;
u32 id;
u16 count;
@@ -401,6 +402,7 @@ struct bnxt_qplib_cq {
* of the same QP while manipulating the flush list.
*/
spinlock_t flush_lock; /* QP flush management */
+ u16 cnq_events;
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
@@ -433,66 +435,32 @@ struct bnxt_qplib_cq {
NQ_DB_IDX_VALID | \
NQ_DB_IRQ_DIS)
-static inline void bnxt_qplib_ring_nq_db64(void __iomem *db, u32 index,
- u32 xid, bool arm)
-{
- u64 val;
-
- val = xid & DBC_DBC_XID_MASK;
- val |= DBC_DBC_PATH_ROCE;
- val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
- val <<= 32;
- val |= index & DBC_DBC_INDEX_MASK;
- writeq(val, db);
-}
-
-static inline void bnxt_qplib_ring_nq_db_rearm(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_nq_db64(db, index, xid, true);
- else
- writel(NQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK), db);
-}
+struct bnxt_qplib_nq_db {
+ struct bnxt_qplib_reg_desc reg;
+ struct bnxt_qplib_db_info dbinfo;
+};
-static inline void bnxt_qplib_ring_nq_db(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_nq_db64(db, index, xid, false);
- else
- writel(NQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK), db);
-}
+typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *cq);
+typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *srq, u8 event);
struct bnxt_qplib_nq {
- struct pci_dev *pdev;
- struct bnxt_qplib_res *res;
-
- int vector;
- cpumask_t mask;
- int budget;
- bool requested;
- struct tasklet_struct worker;
- struct bnxt_qplib_hwq hwq;
-
- u16 bar_reg;
- u32 bar_reg_off;
- u16 ring_id;
- void __iomem *bar_reg_iomem;
-
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *cq);
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_srq *srq,
- u8 event);
- struct workqueue_struct *cqn_wq;
- char name[32];
+ struct pci_dev *pdev;
+ struct bnxt_qplib_res *res;
+ char name[32];
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_nq_db nq_db;
+ u16 ring_id;
+ int msix_vec;
+ cpumask_t mask;
+ struct tasklet_struct nq_tasklet;
+ bool requested;
+ int budget;
+
+ cqn_handler_t cqn_handler;
+ srqn_handler_t srqn_handler;
+ struct workqueue_struct *cqn_wq;
};
struct bnxt_qplib_nq_work {
@@ -507,11 +475,8 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
int msix_vector, bool need_init);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
- int (*cqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *cq),
- int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_srq *srq,
- u8 event));
+ cqn_handler_t cqn_handler,
+ srqn_handler_t srq_handler);
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
@@ -550,7 +515,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
-int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
+int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 1291b12287a5..f01e864bb611 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -55,12 +55,14 @@ static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */
static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
u16 cbit;
int rc;
+ cmdq = &rcfw->cmdq;
cbit = cookie % rcfw->cmdq_depth;
- rc = wait_event_timeout(rcfw->waitq,
- !test_bit(cbit, rcfw->cmdq_bitmap),
+ rc = wait_event_timeout(cmdq->waitq,
+ !test_bit(cbit, cmdq->cmdq_bitmap),
msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
return rc ? 0 : -ETIMEDOUT;
};
@@ -68,15 +70,17 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
u16 cbit;
+ cmdq = &rcfw->cmdq;
cbit = cookie % rcfw->cmdq_depth;
- if (!test_bit(cbit, rcfw->cmdq_bitmap))
+ if (!test_bit(cbit, cmdq->cmdq_bitmap))
goto done;
do {
mdelay(1); /* 1m sec */
bnxt_qplib_service_creq((unsigned long)rcfw);
- } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
+ } while (test_bit(cbit, cmdq->cmdq_bitmap) && --count);
done:
return count ? 0 : -ETIMEDOUT;
};
@@ -84,56 +88,60 @@ done:
static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
struct creq_base *resp, void *sb, u8 is_block)
{
- struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
- struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_cmdqe *cmdqe, **hwq_ptr;
+ struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
+ struct bnxt_qplib_crsqe *crsqe;
u32 cmdq_depth = rcfw->cmdq_depth;
- struct bnxt_qplib_crsq *crsqe;
u32 sw_prod, cmdq_prod;
+ struct pci_dev *pdev;
unsigned long flags;
u32 size, opcode;
u16 cookie, cbit;
u8 *preq;
+ pdev = rcfw->pdev;
+
opcode = req->opcode;
- if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
+ if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
- dev_err(&rcfw->pdev->dev,
+ dev_err(&pdev->dev,
"RCFW not initialized, reject opcode 0x%x\n", opcode);
return -EINVAL;
}
- if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
+ if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
- dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n");
+ dev_err(&pdev->dev, "RCFW already initialized!\n");
return -EINVAL;
}
- if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
+ if (test_bit(FIRMWARE_TIMED_OUT, &cmdq->flags))
return -ETIMEDOUT;
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
- spin_lock_irqsave(&cmdq->lock, flags);
- if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
- dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n");
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ spin_lock_irqsave(&hwq->lock, flags);
+ if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) {
+ dev_err(&pdev->dev, "RCFW: CMDQ is full!\n");
+ spin_unlock_irqrestore(&hwq->lock, flags);
return -EAGAIN;
}
- cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
+ cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
cbit = cookie % rcfw->cmdq_depth;
if (is_block)
cookie |= RCFW_CMD_IS_BLOCKING;
- set_bit(cbit, rcfw->cmdq_bitmap);
+ set_bit(cbit, cmdq->cmdq_bitmap);
req->cookie = cpu_to_le16(cookie);
crsqe = &rcfw->crsqe_tbl[cbit];
if (crsqe->resp) {
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ spin_unlock_irqrestore(&hwq->lock, flags);
return -EBUSY;
}
@@ -155,15 +163,15 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
BNXT_QPLIB_CMDQE_UNITS;
}
- cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
+ hwq_ptr = (struct bnxt_qplib_cmdqe **)hwq->pbl_ptr;
preq = (u8 *)req;
do {
/* Locate the next cmdq slot */
- sw_prod = HWQ_CMP(cmdq->prod, cmdq);
- cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)]
+ sw_prod = HWQ_CMP(hwq->prod, hwq);
+ cmdqe = &hwq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)]
[get_cmdq_idx(sw_prod, cmdq_depth)];
if (!cmdqe) {
- dev_err(&rcfw->pdev->dev,
+ dev_err(&pdev->dev,
"RCFW request failed with no cmdqe!\n");
goto done;
}
@@ -172,31 +180,27 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
preq += min_t(u32, size, sizeof(*cmdqe));
size -= min_t(u32, size, sizeof(*cmdqe));
- cmdq->prod++;
- rcfw->seq_num++;
+ hwq->prod++;
} while (size > 0);
+ cmdq->seq_num++;
- rcfw->seq_num++;
-
- cmdq_prod = cmdq->prod;
- if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
+ cmdq_prod = hwq->prod;
+ if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
/* The very first doorbell write
* is required to set this flag
* which prompts the FW to reset
* its internal pointers
*/
cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
- clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
+ clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
}
/* ring CMDQ DB */
wmb();
- writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
- rcfw->cmdq_bar_reg_prod_off);
- writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
- rcfw->cmdq_bar_reg_trig_off);
+ writel(cmdq_prod, cmdq->cmdq_mbox.prod);
+ writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
done:
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ spin_unlock_irqrestore(&hwq->lock, flags);
/* Return the CREQ response pointer */
return 0;
}
@@ -236,7 +240,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/* timed out */
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
- set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
+ set_bit(FIRMWARE_TIMED_OUT, &rcfw->cmdq.flags);
return rc;
}
@@ -253,6 +257,8 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_func_event *func_event)
{
+ int rc;
+
switch (func_event->event) {
case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
break;
@@ -286,37 +292,41 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
default:
return -EINVAL;
}
- return 0;
+
+ rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL);
+ return rc;
}
static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event)
{
- struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
struct creq_qp_error_notification *err_event;
- struct bnxt_qplib_crsq *crsqe;
- unsigned long flags;
+ struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
+ struct bnxt_qplib_crsqe *crsqe;
struct bnxt_qplib_qp *qp;
u16 cbit, blocked = 0;
- u16 cookie;
+ struct pci_dev *pdev;
+ unsigned long flags;
__le16 mcookie;
+ u16 cookie;
+ int rc = 0;
u32 qp_id;
+ pdev = rcfw->pdev;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
qp = rcfw->qp_tbl[qp_id].qp_handle;
- dev_dbg(&rcfw->pdev->dev,
- "Received QP error notification\n");
- dev_dbg(&rcfw->pdev->dev,
+ dev_dbg(&pdev->dev, "Received QP error notification\n");
+ dev_dbg(&pdev->dev,
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
if (!qp)
break;
bnxt_qplib_mark_qp_error(qp);
- rcfw->aeq_handler(rcfw, qp_event, qp);
+ rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
break;
default:
/*
@@ -328,7 +338,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
*
*/
- spin_lock_irqsave_nested(&cmdq->lock, flags,
+ spin_lock_irqsave_nested(&hwq->lock, flags,
SINGLE_DEPTH_NESTING);
cookie = le16_to_cpu(qp_event->cookie);
mcookie = qp_event->cookie;
@@ -342,44 +352,44 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
crsqe->resp = NULL;
} else {
if (crsqe->resp && crsqe->resp->cookie)
- dev_err(&rcfw->pdev->dev,
+ dev_err(&pdev->dev,
"CMD %s cookie sent=%#x, recd=%#x\n",
crsqe->resp ? "mismatch" : "collision",
crsqe->resp ? crsqe->resp->cookie : 0,
mcookie);
}
- if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
- dev_warn(&rcfw->pdev->dev,
+ if (!test_and_clear_bit(cbit, rcfw->cmdq.cmdq_bitmap))
+ dev_warn(&pdev->dev,
"CMD bit %d was not requested\n", cbit);
- cmdq->cons += crsqe->req_size;
+ hwq->cons += crsqe->req_size;
crsqe->req_size = 0;
if (!blocked)
- wake_up(&rcfw->waitq);
- spin_unlock_irqrestore(&cmdq->lock, flags);
+ wake_up(&rcfw->cmdq.waitq);
+ spin_unlock_irqrestore(&hwq->lock, flags);
}
- return 0;
+ return rc;
}
/* SP - CREQ Completion handlers */
static void bnxt_qplib_service_creq(unsigned long data)
{
struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
- struct bnxt_qplib_hwq *creq = &rcfw->creq;
+ struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
- struct creq_base *creqe, **creq_ptr;
+ struct bnxt_qplib_hwq *hwq = &creq->hwq;
+ struct creq_base *creqe, **hwq_ptr;
u32 sw_cons, raw_cons;
unsigned long flags;
/* Service the CREQ until budget is over */
- spin_lock_irqsave(&creq->lock, flags);
- raw_cons = creq->cons;
+ spin_lock_irqsave(&hwq->lock, flags);
+ raw_cons = hwq->cons;
while (budget > 0) {
- sw_cons = HWQ_CMP(raw_cons, creq);
- creq_ptr = (struct creq_base **)creq->pbl_ptr;
- creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
- if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
+ sw_cons = HWQ_CMP(raw_cons, hwq);
+ hwq_ptr = (struct creq_base **)hwq->pbl_ptr;
+ creqe = &hwq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
+ if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
break;
/* The valid test of the entry must be done first before
* reading any further.
@@ -391,12 +401,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
case CREQ_BASE_TYPE_QP_EVENT:
bnxt_qplib_process_qp_event
(rcfw, (struct creq_qp_event *)creqe);
- rcfw->creq_qp_event_processed++;
+ creq->stats.creq_qp_event_processed++;
break;
case CREQ_BASE_TYPE_FUNC_EVENT:
if (!bnxt_qplib_process_func_event
(rcfw, (struct creq_func_event *)creqe))
- rcfw->creq_func_event_processed++;
+ creq->stats.creq_func_event_processed++;
else
dev_warn(&rcfw->pdev->dev,
"aeqe:%#x Not handled\n", type);
@@ -412,28 +422,30 @@ static void bnxt_qplib_service_creq(unsigned long data)
budget--;
}
- if (creq->cons != raw_cons) {
- creq->cons = raw_cons;
- bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem,
- raw_cons, creq->max_elements,
- rcfw->creq_ring_id, gen_p5);
+ if (hwq->cons != raw_cons) {
+ hwq->cons = raw_cons;
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
+ rcfw->res->cctx, true);
}
- spin_unlock_irqrestore(&creq->lock, flags);
+ spin_unlock_irqrestore(&hwq->lock, flags);
}
static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_rcfw *rcfw = dev_instance;
- struct bnxt_qplib_hwq *creq = &rcfw->creq;
+ struct bnxt_qplib_creq_ctx *creq;
struct creq_base **creq_ptr;
+ struct bnxt_qplib_hwq *hwq;
u32 sw_cons;
+ creq = &rcfw->creq;
+ hwq = &creq->hwq;
/* Prefetch the CREQ element */
- sw_cons = HWQ_CMP(creq->cons, creq);
- creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
+ sw_cons = HWQ_CMP(hwq->cons, hwq);
+ creq_ptr = (struct creq_base **)creq->hwq.pbl_ptr;
prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
- tasklet_schedule(&rcfw->worker);
+ tasklet_schedule(&creq->creq_tasklet);
return IRQ_HANDLED;
}
@@ -452,7 +464,7 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
if (rc)
return rc;
- clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
+ clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
return 0;
}
@@ -520,9 +532,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
level = ctx->tim_tbl.level;
req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
__get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
- level = ctx->tqm_pde_level;
- req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
+ level = ctx->tqm_ctx.pde.level;
+ req.tqm_pg_size_tqm_lvl =
+ (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
+ __get_pbl_pg_idx(&ctx->tqm_ctx.pde.pbl[level]);
req.qpc_page_dir =
cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
@@ -535,7 +548,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.tim_page_dir =
cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.tqm_page_dir =
- cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
+ cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]);
req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
@@ -555,33 +568,46 @@ skip_ctx_setup:
NULL, 0);
if (rc)
return rc;
- set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
+ set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
return 0;
}
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
+ kfree(rcfw->cmdq.cmdq_bitmap);
kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
- bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
- bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
+ bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
+ bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
rcfw->pdev = NULL;
}
-int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
+int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx,
int qp_tbl_sz)
{
- u8 hwq_type;
-
- rcfw->pdev = pdev;
- rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
- hwq_type = bnxt_qplib_get_hwq_type(rcfw->res);
- if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL,
- &rcfw->creq.max_elements,
- BNXT_QPLIB_CREQE_UNITS,
- 0, PAGE_SIZE, hwq_type)) {
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ u32 bmap_size = 0;
+
+ rcfw->pdev = res->pdev;
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ rcfw->res = res;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = rcfw->res;
+ hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;
+ hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;
+ hwq_attr.type = bnxt_qplib_get_hwq_type(res);
+
+ if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
dev_err(&rcfw->pdev->dev,
"HW channel CREQ allocation failed\n");
goto fail;
@@ -591,23 +617,28 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
else
rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192;
- rcfw->cmdq.max_elements = rcfw->cmdq_depth;
- if (bnxt_qplib_alloc_init_hwq
- (rcfw->pdev, &rcfw->cmdq, NULL,
- &rcfw->cmdq.max_elements,
- BNXT_QPLIB_CMDQE_UNITS, 0,
- bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth),
- HWQ_TYPE_CTX)) {
+ sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
+ hwq_attr.depth = rcfw->cmdq_depth;
+ hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
dev_err(&rcfw->pdev->dev,
"HW channel CMDQ allocation failed\n");
goto fail;
}
- rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
+ rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
if (!rcfw->crsqe_tbl)
goto fail;
+ bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
+ cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
+ if (!cmdq->cmdq_bitmap)
+ goto fail;
+
+ cmdq->bmap_size = bmap_size;
+
rcfw->qp_tbl_size = qp_tbl_sz;
rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
GFP_KERNEL);
@@ -623,137 +654,199 @@ fail:
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
{
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
+ struct bnxt_qplib_creq_ctx *creq;
- tasklet_disable(&rcfw->worker);
+ creq = &rcfw->creq;
+ tasklet_disable(&creq->creq_tasklet);
/* Mask h/w interrupts */
- bnxt_qplib_ring_creq_db(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
- rcfw->creq.max_elements, rcfw->creq_ring_id,
- gen_p5);
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
/* Sync with last running IRQ-handler */
- synchronize_irq(rcfw->vector);
+ synchronize_irq(creq->msix_vec);
if (kill)
- tasklet_kill(&rcfw->worker);
+ tasklet_kill(&creq->creq_tasklet);
- if (rcfw->requested) {
- free_irq(rcfw->vector, rcfw);
- rcfw->requested = false;
+ if (creq->requested) {
+ free_irq(creq->msix_vec, rcfw);
+ creq->requested = false;
}
}
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
unsigned long indx;
+ creq = &rcfw->creq;
+ cmdq = &rcfw->cmdq;
+ /* Make sure the HW channel is stopped! */
bnxt_qplib_rcfw_stop_irq(rcfw, true);
- iounmap(rcfw->cmdq_bar_reg_iomem);
- iounmap(rcfw->creq_bar_reg_iomem);
+ iounmap(cmdq->cmdq_mbox.reg.bar_reg);
+ iounmap(creq->creq_db.reg.bar_reg);
- indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
- if (indx != rcfw->bmap_size)
+ indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size);
+ if (indx != cmdq->bmap_size)
dev_err(&rcfw->pdev->dev,
"disabling RCFW with pending cmd-bit %lx\n", indx);
- kfree(rcfw->cmdq_bitmap);
- rcfw->bmap_size = 0;
- rcfw->cmdq_bar_reg_iomem = NULL;
- rcfw->creq_bar_reg_iomem = NULL;
- rcfw->aeq_handler = NULL;
- rcfw->vector = 0;
+ cmdq->cmdq_mbox.reg.bar_reg = NULL;
+ creq->creq_db.reg.bar_reg = NULL;
+ creq->aeq_handler = NULL;
+ creq->msix_vec = 0;
}
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
bool need_init)
{
- bool gen_p5 = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx);
+ struct bnxt_qplib_creq_ctx *creq;
int rc;
- if (rcfw->requested)
+ creq = &rcfw->creq;
+
+ if (creq->requested)
return -EFAULT;
- rcfw->vector = msix_vector;
+ creq->msix_vec = msix_vector;
if (need_init)
- tasklet_init(&rcfw->worker,
+ tasklet_init(&creq->creq_tasklet,
bnxt_qplib_service_creq, (unsigned long)rcfw);
else
- tasklet_enable(&rcfw->worker);
- rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
+ tasklet_enable(&creq->creq_tasklet);
+ rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
"bnxt_qplib_creq", rcfw);
if (rc)
return rc;
- rcfw->requested = true;
- bnxt_qplib_ring_creq_db_rearm(rcfw->creq_bar_reg_iomem,
- rcfw->creq.cons, rcfw->creq.max_elements,
- rcfw->creq_ring_id, gen_p5);
+ creq->requested = true;
+
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, true);
return 0;
}
-int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
- struct bnxt_qplib_rcfw *rcfw,
- int msix_vector,
- int cp_bar_reg_off, int virt_fn,
- int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- void *, void *))
+static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw, bool is_vf)
{
- resource_size_t res_base;
- struct cmdq_init init;
- u16 bmap_size;
- int rc;
+ struct bnxt_qplib_cmdq_mbox *mbox;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
+ u16 prod_offt;
+ int rc = 0;
- /* General */
- rcfw->seq_num = 0;
- set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
- bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
- rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
- if (!rcfw->cmdq_bitmap)
- return -ENOMEM;
- rcfw->bmap_size = bmap_size;
+ pdev = rcfw->pdev;
+ mbox = &rcfw->cmdq.cmdq_mbox;
- /* CMDQ */
- rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
- res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
- if (!res_base)
+ mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;
+ mbox->reg.len = RCFW_COMM_SIZE;
+ mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
+ if (!mbox->reg.bar_base) {
+ dev_err(&pdev->dev,
+ "QPLIB: CMDQ BAR region %d resc start is 0!\n",
+ mbox->reg.bar_id);
return -ENOMEM;
+ }
- rcfw->cmdq_bar_reg_iomem = ioremap(res_base +
- RCFW_COMM_BASE_OFFSET,
- RCFW_COMM_SIZE);
- if (!rcfw->cmdq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n",
- rcfw->cmdq_bar_reg);
+ bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;
+ mbox->reg.len = RCFW_COMM_SIZE;
+ mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
+ if (!mbox->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "QPLIB: CMDQ BAR region %d mapping failed\n",
+ mbox->reg.bar_id);
return -ENOMEM;
}
- rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
- RCFW_PF_COMM_PROD_OFFSET;
+ prod_offt = is_vf ? RCFW_VF_COMM_PROD_OFFSET :
+ RCFW_PF_COMM_PROD_OFFSET;
+ mbox->prod = (void __iomem *)(mbox->reg.bar_reg + prod_offt);
+ mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET);
+ return rc;
+}
- rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
+static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
+{
+ struct bnxt_qplib_creq_db *creq_db;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
- /* CREQ */
- rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
- res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
- if (!res_base)
- dev_err(&rcfw->pdev->dev,
- "CREQ BAR region %d resc start is 0!\n",
- rcfw->creq_bar_reg);
+ pdev = rcfw->pdev;
+ creq_db = &rcfw->creq.creq_db;
+
+ creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
+ creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
+ if (!creq_db->reg.bar_id)
+ dev_err(&pdev->dev,
+ "QPLIB: CREQ BAR region %d resc start is 0!",
+ creq_db->reg.bar_id);
+
+ bar_reg = creq_db->reg.bar_base + reg_offt;
/* Unconditionally map 8 bytes to support 57500 series */
- rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off,
- 8);
- if (!rcfw->creq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
- rcfw->creq_bar_reg);
- iounmap(rcfw->cmdq_bar_reg_iomem);
- rcfw->cmdq_bar_reg_iomem = NULL;
+ creq_db->reg.len = 8;
+ creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
+ if (!creq_db->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "QPLIB: CREQ BAR region %d mapping failed",
+ creq_db->reg.bar_id);
return -ENOMEM;
}
- rcfw->creq_qp_event_processed = 0;
- rcfw->creq_func_event_processed = 0;
+ creq_db->dbinfo.db = creq_db->reg.bar_reg;
+ creq_db->dbinfo.hwq = &rcfw->creq.hwq;
+ creq_db->dbinfo.xid = rcfw->creq.ring_id;
+ return 0;
+}
- if (aeq_handler)
- rcfw->aeq_handler = aeq_handler;
- init_waitqueue_head(&rcfw->waitq);
+static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_cmdq_mbox *mbox;
+ struct cmdq_init init = {0};
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ mbox = &cmdq->cmdq_mbox;
+
+ init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
+ init.cmdq_size_cmdq_lvl =
+ cpu_to_le16(((rcfw->cmdq_depth <<
+ CMDQ_INIT_CMDQ_SIZE_SFT) &
+ CMDQ_INIT_CMDQ_SIZE_MASK) |
+ ((cmdq->hwq.level <<
+ CMDQ_INIT_CMDQ_LVL_SFT) &
+ CMDQ_INIT_CMDQ_LVL_MASK));
+ init.creq_ring_id = cpu_to_le16(creq->ring_id);
+ /* Write to the Bono mailbox register */
+ __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
+}
+
+int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
+ int msix_vector,
+ int cp_bar_reg_off, int virt_fn,
+ aeq_handler_t aeq_handler)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ int rc;
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+
+ /* Clear to defaults */
+
+ cmdq->seq_num = 0;
+ set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
+ init_waitqueue_head(&cmdq->waitq);
+
+ creq->stats.creq_qp_event_processed = 0;
+ creq->stats.creq_func_event_processed = 0;
+ creq->aeq_handler = aeq_handler;
+
+ rc = bnxt_qplib_map_cmdq_mbox(rcfw, virt_fn);
+ if (rc)
+ return rc;
+
+ rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
+ if (rc)
+ return rc;
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
if (rc) {
@@ -763,16 +856,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
return rc;
}
- init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
- init.cmdq_size_cmdq_lvl = cpu_to_le16(
- ((rcfw->cmdq_depth << CMDQ_INIT_CMDQ_SIZE_SFT) &
- CMDQ_INIT_CMDQ_SIZE_MASK) |
- ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
- CMDQ_INIT_CMDQ_LVL_MASK));
- init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
+ bnxt_qplib_start_rcfw(rcfw);
- /* Write to the Bono mailbox register */
- __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index dfeadc192e17..411fce3493b6 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -206,8 +206,9 @@ static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons,
#define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */
+typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *);
-struct bnxt_qplib_crsq {
+struct bnxt_qplib_crsqe {
struct creq_qp_event *resp;
u32 req_size;
};
@@ -225,41 +226,53 @@ struct bnxt_qplib_qp_node {
#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
+#define FIRMWARE_INITIALIZED_FLAG (0)
+#define FIRMWARE_FIRST_FLAG (31)
+#define FIRMWARE_TIMED_OUT (3)
+struct bnxt_qplib_cmdq_mbox {
+ struct bnxt_qplib_reg_desc reg;
+ void __iomem *prod;
+ void __iomem *db;
+};
+
+struct bnxt_qplib_cmdq_ctx {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_cmdq_mbox cmdq_mbox;
+ wait_queue_head_t waitq;
+ unsigned long flags;
+ unsigned long *cmdq_bitmap;
+ u32 bmap_size;
+ u32 seq_num;
+};
+
+struct bnxt_qplib_creq_db {
+ struct bnxt_qplib_reg_desc reg;
+ struct bnxt_qplib_db_info dbinfo;
+};
+
+struct bnxt_qplib_creq_stat {
+ u64 creq_qp_event_processed;
+ u64 creq_func_event_processed;
+};
+
+struct bnxt_qplib_creq_ctx {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_creq_db creq_db;
+ struct bnxt_qplib_creq_stat stats;
+ struct tasklet_struct creq_tasklet;
+ aeq_handler_t aeq_handler;
+ u16 ring_id;
+ int msix_vec;
+ bool requested; /*irq handler installed */
+};
+
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
struct bnxt_qplib_res *res;
- int vector;
- struct tasklet_struct worker;
- bool requested;
- unsigned long *cmdq_bitmap;
- u32 bmap_size;
- unsigned long flags;
-#define FIRMWARE_INITIALIZED_FLAG 0
-#define FIRMWARE_FIRST_FLAG 31
-#define FIRMWARE_TIMED_OUT 3
- wait_queue_head_t waitq;
- int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- void *, void *);
- u32 seq_num;
-
- /* Bar region info */
- void __iomem *cmdq_bar_reg_iomem;
- u16 cmdq_bar_reg;
- u16 cmdq_bar_reg_prod_off;
- u16 cmdq_bar_reg_trig_off;
- u16 creq_ring_id;
- u16 creq_bar_reg;
- void __iomem *creq_bar_reg_iomem;
-
- /* Cmd-Resp and Async Event notification queue */
- struct bnxt_qplib_hwq creq;
- u64 creq_qp_event_processed;
- u64 creq_func_event_processed;
-
- /* Actual Cmd and Resp Queues */
- struct bnxt_qplib_hwq cmdq;
- struct bnxt_qplib_crsq *crsqe_tbl;
+ struct bnxt_qplib_cmdq_ctx cmdq;
+ struct bnxt_qplib_creq_ctx creq;
+ struct bnxt_qplib_crsqe *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
u64 oos_prev;
@@ -268,7 +281,7 @@ struct bnxt_qplib_rcfw {
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
-int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
+int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx,
int qp_tbl_sz);
@@ -276,12 +289,10 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
bool need_init);
-int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
- struct bnxt_qplib_rcfw *rcfw,
+int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
- int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- void *aeqe, void *obj));
+ aeq_handler_t aeq_handler);
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 60ea1b924b67..cab1adf1fed9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -44,6 +44,7 @@
#include <linux/inetdevice.h>
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -55,9 +56,10 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats);
/* PBL */
-static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
+static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
bool is_umem)
{
+ struct pci_dev *pdev = res->pdev;
int i;
if (!is_umem) {
@@ -74,35 +76,56 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
pbl->pg_arr[i] = NULL;
}
}
- kfree(pbl->pg_arr);
+ vfree(pbl->pg_arr);
pbl->pg_arr = NULL;
- kfree(pbl->pg_map_arr);
+ vfree(pbl->pg_map_arr);
pbl->pg_map_arr = NULL;
pbl->pg_count = 0;
pbl->pg_size = 0;
}
-static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
- struct scatterlist *sghead, u32 pages,
- u32 nmaps, u32 pg_size)
+static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
{
+ struct scatterlist *sghead = sginfo->sghead;
struct sg_dma_page_iter sg_iter;
+ int i = 0;
+
+ for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
+ pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+ pbl->pg_arr[i] = NULL;
+ pbl->pg_count++;
+ i++;
+ }
+}
+
+static int __alloc_pbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
+{
+ struct pci_dev *pdev = res->pdev;
+ struct scatterlist *sghead;
bool is_umem = false;
+ u32 pages;
int i;
+ if (sginfo->nopte)
+ return 0;
+ pages = sginfo->npages;
+ sghead = sginfo->sghead;
/* page ptr arrays */
- pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
+ pbl->pg_arr = vmalloc(pages * sizeof(void *));
if (!pbl->pg_arr)
return -ENOMEM;
- pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
+ pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
if (!pbl->pg_map_arr) {
- kfree(pbl->pg_arr);
+ vfree(pbl->pg_arr);
pbl->pg_arr = NULL;
return -ENOMEM;
}
pbl->pg_count = 0;
- pbl->pg_size = pg_size;
+ pbl->pg_size = sginfo->pgsize;
if (!sghead) {
for (i = 0; i < pages; i++) {
@@ -115,25 +138,19 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
pbl->pg_count++;
}
} else {
- i = 0;
is_umem = true;
- for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) {
- pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
- pbl->pg_arr[i] = NULL;
- pbl->pg_count++;
- i++;
- }
+ bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
}
return 0;
-
fail:
- __free_pbl(pdev, pbl, is_umem);
+ __free_pbl(res, pbl, is_umem);
return -ENOMEM;
}
/* HWQ */
-void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_hwq *hwq)
{
int i;
@@ -144,9 +161,9 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
for (i = 0; i < hwq->level + 1; i++) {
if (i == hwq->level)
- __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
+ __free_pbl(res, &hwq->pbl[i], hwq->is_user);
else
- __free_pbl(pdev, &hwq->pbl[i], false);
+ __free_pbl(res, &hwq->pbl[i], false);
}
hwq->level = PBL_LVL_MAX;
@@ -158,79 +175,113 @@ void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
}
/* All HWQs are power of 2 in size */
-int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
- struct bnxt_qplib_sg_info *sg_info,
- u32 *elements, u32 element_size, u32 aux,
- u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
+
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_hwq_attr *hwq_attr)
{
- u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0;
+ u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
+ struct bnxt_qplib_sg_info sginfo = {};
+ u32 depth, stride, npbl, npde;
dma_addr_t *src_phys_ptr, **dst_virt_ptr;
struct scatterlist *sghead = NULL;
- int i, rc;
-
+ struct bnxt_qplib_res *res;
+ struct pci_dev *pdev;
+ int i, rc, lvl;
+
+ res = hwq_attr->res;
+ pdev = res->pdev;
+ sghead = hwq_attr->sginfo->sghead;
+ pg_size = hwq_attr->sginfo->pgsize;
hwq->level = PBL_LVL_MAX;
- slots = roundup_pow_of_two(*elements);
- if (aux) {
- aux_size = roundup_pow_of_two(aux);
- aux_pages = (slots * aux_size) / pg_size;
- if ((slots * aux_size) % pg_size)
+ depth = roundup_pow_of_two(hwq_attr->depth);
+ stride = roundup_pow_of_two(hwq_attr->stride);
+ if (hwq_attr->aux_depth) {
+ aux_slots = hwq_attr->aux_depth;
+ aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
+ aux_pages = (aux_slots * aux_size) / pg_size;
+ if ((aux_slots * aux_size) % pg_size)
aux_pages++;
}
- size = roundup_pow_of_two(element_size);
-
- if (sg_info)
- sghead = sg_info->sglist;
if (!sghead) {
hwq->is_user = false;
- pages = (slots * size) / pg_size + aux_pages;
- if ((slots * size) % pg_size)
- pages++;
- if (!pages)
+ npages = (depth * stride) / pg_size + aux_pages;
+ if ((depth * stride) % pg_size)
+ npages++;
+ if (!npages)
return -EINVAL;
- maps = 0;
+ hwq_attr->sginfo->npages = npages;
} else {
hwq->is_user = true;
- pages = sg_info->npages;
- maps = sg_info->nmap;
+ npages = hwq_attr->sginfo->npages;
+ npages = (npages * PAGE_SIZE) /
+ BIT_ULL(hwq_attr->sginfo->pgshft);
+ if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
+ BIT_ULL(hwq_attr->sginfo->pgshft))
+ if (!npages)
+ npages++;
}
- /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
- if (sghead && (pages == MAX_PBL_LVL_0_PGS))
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
- pages, maps, pg_size);
- else
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL,
- 1, 0, pg_size);
- if (rc)
- goto fail;
-
- hwq->level = PBL_LVL_0;
+ if (npages == MAX_PBL_LVL_0_PGS) {
+ /* This request is Level 0, map PTE */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_0;
+ }
- if (pages > MAX_PBL_LVL_0_PGS) {
- if (pages > MAX_PBL_LVL_1_PGS) {
+ if (npages > MAX_PBL_LVL_0_PGS) {
+ if (npages > MAX_PBL_LVL_1_PGS) {
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
+ 0 : PTU_PTE_VALID;
/* 2 levels of indirection */
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
- MAX_PBL_LVL_1_PGS_FOR_LVL_2,
- 0, pg_size);
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ npde = npbl >> MAX_PDL_LVL_SHIFT;
+ if (npbl % BIT(MAX_PDL_LVL_SHIFT))
+ npde++;
+ /* Alloc PDE pages */
+ sginfo.pgsize = npde * pg_size;
+ sginfo.npages = 1;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
+
+ /* Alloc PBL pages */
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
if (rc)
goto fail;
- /* Fill in lvl0 PBL */
+ /* Fill PDL with PBL page pointers */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
- dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
- src_phys_ptr[i] | PTU_PDE_VALID;
- hwq->level = PBL_LVL_1;
-
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
- pages, maps, pg_size);
+ if (hwq_attr->type == HWQ_TYPE_MR) {
+ /* For MR it is expected that we supply only 1 contigous
+ * page i.e only 1 entry in the PDL that will contain
+ * all the PBLs for the user supplied memory region
+ */
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
+ i++)
+ dst_virt_ptr[0][i] = src_phys_ptr[i] |
+ flag;
+ } else {
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
+ i++)
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] |
+ PTU_PDE_VALID;
+ }
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
+ hwq_attr->sginfo);
if (rc)
goto fail;
-
- /* Fill in lvl1 PBL */
+ hwq->level = PBL_LVL_2;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBLs with PTE pointers */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
@@ -238,7 +289,7 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] | PTU_PTE_VALID;
}
- if (hwq_type == HWQ_TYPE_QUEUE) {
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
/* Find the last pg of the size */
i = hwq->pbl[PBL_LVL_2].pg_count;
dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
@@ -248,25 +299,36 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
[PTR_IDX(i - 2)] |=
PTU_PTE_NEXT_TO_LAST;
}
- hwq->level = PBL_LVL_2;
- } else {
- u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
- PTU_PTE_VALID;
+ } else { /* pages < 512 npbl = 1, npde = 0 */
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
+ 0 : PTU_PTE_VALID;
/* 1 level of indirection */
- rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
- pages, maps, pg_size);
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ /* Alloc PBL page */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
if (rc)
goto fail;
- /* Fill in lvl0 PBL */
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
+ hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_1;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBL with PTE pointers */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] | flag;
- }
- if (hwq_type == HWQ_TYPE_QUEUE) {
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
/* Find the last pg of the size */
i = hwq->pbl[PBL_LVL_1].pg_count;
dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
@@ -276,42 +338,141 @@ int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
[PTR_IDX(i - 2)] |=
PTU_PTE_NEXT_TO_LAST;
}
- hwq->level = PBL_LVL_1;
}
}
- hwq->pdev = pdev;
- spin_lock_init(&hwq->lock);
+done:
hwq->prod = 0;
hwq->cons = 0;
- *elements = hwq->max_elements = slots;
- hwq->element_size = size;
-
+ hwq->pdev = pdev;
+ hwq->depth = hwq_attr->depth;
+ hwq->max_elements = depth;
+ hwq->element_size = stride;
/* For direct access to the elements */
- hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
- hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
+ lvl = hwq->level;
+ if (hwq_attr->sginfo->nopte && hwq->level)
+ lvl = hwq->level - 1;
+ hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
+ hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
+ spin_lock_init(&hwq->lock);
return 0;
-
fail:
- bnxt_qplib_free_hwq(pdev, hwq);
+ bnxt_qplib_free_hwq(res, hwq);
return -ENOMEM;
}
/* Context Tables */
-void bnxt_qplib_free_ctx(struct pci_dev *pdev,
+void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx)
{
int i;
- bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
- bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
- bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
- bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
- bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
+ bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
- bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
- bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
+ /* restore original pde level before destroy */
+ ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
+ bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
+ bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
+}
+
+static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_tqm_ctx *tqmctx;
+ int rc = 0;
+ int i;
+
+ tqmctx = &ctx->tqm_ctx;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = res;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ hwq_attr.depth = 512;
+ hwq_attr.stride = sizeof(u64);
+ /* Alloc pdl buffer */
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
+ if (rc)
+ goto out;
+ /* Save original pdl level */
+ tqmctx->pde_level = tqmctx->pde.level;
+
+ hwq_attr.stride = 1;
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
+ if (!tqmctx->qcount[i])
+ continue;
+ hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
+ if (rc)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
+{
+ struct bnxt_qplib_hwq *tbl;
+ dma_addr_t *dma_ptr;
+ __le64 **pbl_ptr, *ptr;
+ int i, j, k;
+ int fnz_idx = -1;
+ int pg_count;
+
+ pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
+
+ for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
+ i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
+ tbl = &ctx->qtbl[i];
+ if (!tbl->max_elements)
+ continue;
+ if (fnz_idx == -1)
+ fnz_idx = i; /* first non-zero index */
+ switch (tbl->level) {
+ case PBL_LVL_2:
+ pg_count = tbl->pbl[PBL_LVL_1].pg_count;
+ for (k = 0; k < pg_count; k++) {
+ ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
+ dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
+ *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
+ }
+ break;
+ case PBL_LVL_1:
+ case PBL_LVL_0:
+ default:
+ ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
+ *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
+ PTU_PTE_VALID);
+ break;
+ }
+ }
+ if (fnz_idx == -1)
+ fnz_idx = 0;
+ /* update pde level as per page table programming */
+ ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
+ ctx->qtbl[fnz_idx].level + 1;
+}
+
+static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
+{
+ int rc = 0;
+
+ rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
+ if (rc)
+ goto fail;
+
+ bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
+fail:
+ return rc;
}
/*
@@ -335,120 +496,72 @@ void bnxt_qplib_free_ctx(struct pci_dev *pdev,
* Returns:
* 0 if success, else -ERRORS
*/
-int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
+int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx,
bool virt_fn, bool is_p5)
{
- int i, j, k, rc = 0;
- int fnz_idx = -1;
- __le64 **pbl_ptr;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ int rc = 0;
if (virt_fn || is_p5)
goto stats_alloc;
/* QPC Tables */
- ctx->qpc_tbl.max_elements = ctx->qpc_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL,
- &ctx->qpc_tbl.max_elements,
- BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo = &sginfo;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = ctx->qpc_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
if (rc)
goto fail;
/* MRW Tables */
- ctx->mrw_tbl.max_elements = ctx->mrw_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL,
- &ctx->mrw_tbl.max_elements,
- BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ hwq_attr.depth = ctx->mrw_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
if (rc)
goto fail;
/* SRQ Tables */
- ctx->srqc_tbl.max_elements = ctx->srqc_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL,
- &ctx->srqc_tbl.max_elements,
- BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ hwq_attr.depth = ctx->srqc_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
if (rc)
goto fail;
/* CQ Tables */
- ctx->cq_tbl.max_elements = ctx->cq_count;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL,
- &ctx->cq_tbl.max_elements,
- BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ hwq_attr.depth = ctx->cq_count;
+ hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
if (rc)
goto fail;
/* TQM Buffer */
- ctx->tqm_pde.max_elements = 512;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL,
- &ctx->tqm_pde.max_elements, sizeof(u64),
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
+ rc = bnxt_qplib_setup_tqm_rings(res, ctx);
if (rc)
goto fail;
-
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
- if (!ctx->tqm_count[i])
- continue;
- ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
- ctx->tqm_count[i];
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL,
- &ctx->tqm_tbl[i].max_elements, 1,
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
- if (rc)
- goto fail;
- }
- pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
- for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
- i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
- if (!ctx->tqm_tbl[i].max_elements)
- continue;
- if (fnz_idx == -1)
- fnz_idx = i;
- switch (ctx->tqm_tbl[i].level) {
- case PBL_LVL_2:
- for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
- k++)
- pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
- cpu_to_le64(
- ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
- | PTU_PTE_VALID);
- break;
- case PBL_LVL_1:
- case PBL_LVL_0:
- default:
- pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
- ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
- PTU_PTE_VALID);
- break;
- }
- }
- if (fnz_idx == -1)
- fnz_idx = 0;
- ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
- PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
-
/* TIM Buffer */
ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
- rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL,
- &ctx->tim_tbl.max_elements, 1,
- 0, PAGE_SIZE, HWQ_TYPE_CTX);
+ hwq_attr.depth = ctx->qpc_count * 16;
+ hwq_attr.stride = 1;
+ rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
if (rc)
goto fail;
-
stats_alloc:
/* Stats */
- rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
if (rc)
goto fail;
return 0;
fail:
- bnxt_qplib_free_ctx(pdev, ctx);
+ bnxt_qplib_free_ctx(res, ctx);
return rc;
}
@@ -808,9 +921,6 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
-
- res->netdev = NULL;
- res->pdev = NULL;
}
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index aaa76d792185..95b645dbbc2d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -55,7 +55,8 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
enum bnxt_qplib_hwq_type {
HWQ_TYPE_CTX,
HWQ_TYPE_QUEUE,
- HWQ_TYPE_L2_CMPL
+ HWQ_TYPE_L2_CMPL,
+ HWQ_TYPE_MR
};
#define MAX_PBL_LVL_0_PGS 1
@@ -63,6 +64,7 @@ enum bnxt_qplib_hwq_type {
#define MAX_PBL_LVL_1_PGS_SHIFT 9
#define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
#define MAX_PBL_LVL_2_PGS (256 * 512)
+#define MAX_PDL_LVL_SHIFT 9
enum bnxt_qplib_pbl_lvl {
PBL_LVL_0,
@@ -78,6 +80,13 @@ enum bnxt_qplib_pbl_lvl {
#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
+struct bnxt_qplib_reg_desc {
+ u8 bar_id;
+ resource_size_t bar_base;
+ void __iomem *bar_reg;
+ size_t len;
+};
+
struct bnxt_qplib_pbl {
u32 pg_count;
u32 pg_size;
@@ -85,17 +94,37 @@ struct bnxt_qplib_pbl {
dma_addr_t *pg_map_arr;
};
+struct bnxt_qplib_sg_info {
+ struct scatterlist *sghead;
+ u32 nmap;
+ u32 npages;
+ u32 pgshft;
+ u32 pgsize;
+ bool nopte;
+};
+
+struct bnxt_qplib_hwq_attr {
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_sg_info *sginfo;
+ enum bnxt_qplib_hwq_type type;
+ u32 depth;
+ u32 stride;
+ u32 aux_stride;
+ u32 aux_depth;
+};
+
struct bnxt_qplib_hwq {
struct pci_dev *pdev;
/* lock to protect qplib_hwq */
spinlock_t lock;
- struct bnxt_qplib_pbl pbl[PBL_LVL_MAX];
+ struct bnxt_qplib_pbl pbl[PBL_LVL_MAX + 1];
enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */
/* ptr for easy access to the PBL entries */
void **pbl_ptr;
/* ptr for easy access to the dma_addr */
dma_addr_t *pbl_dma_ptr;
u32 max_elements;
+ u32 depth;
u16 element_size; /* Size of each entry */
u32 prod; /* raw */
@@ -104,6 +133,13 @@ struct bnxt_qplib_hwq {
u8 is_user;
};
+struct bnxt_qplib_db_info {
+ void __iomem *db;
+ void __iomem *priv_db;
+ struct bnxt_qplib_hwq *hwq;
+ u32 xid;
+};
+
/* Tables */
struct bnxt_qplib_pd_tbl {
unsigned long *tbl;
@@ -159,6 +195,15 @@ struct bnxt_qplib_vf_res {
#define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64
#define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128
+#define MAX_TQM_ALLOC_REQ 48
+#define MAX_TQM_ALLOC_BLK_SIZE 8
+struct bnxt_qplib_tqm_ctx {
+ struct bnxt_qplib_hwq pde;
+ u8 pde_level; /* Original level */
+ struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ];
+ u8 qcount[MAX_TQM_ALLOC_REQ];
+};
+
struct bnxt_qplib_ctx {
u32 qpc_count;
struct bnxt_qplib_hwq qpc_tbl;
@@ -169,12 +214,7 @@ struct bnxt_qplib_ctx {
u32 cq_count;
struct bnxt_qplib_hwq cq_tbl;
struct bnxt_qplib_hwq tim_tbl;
-#define MAX_TQM_ALLOC_REQ 48
-#define MAX_TQM_ALLOC_BLK_SIZE 8
- u8 tqm_count[MAX_TQM_ALLOC_REQ];
- struct bnxt_qplib_hwq tqm_pde;
- u32 tqm_pde_level;
- struct bnxt_qplib_hwq tqm_tbl[MAX_TQM_ALLOC_REQ];
+ struct bnxt_qplib_tqm_ctx tqm_ctx;
struct bnxt_qplib_stats stats;
struct bnxt_qplib_vf_res vf_res;
u64 hwrm_intf_ver;
@@ -223,11 +263,6 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
}
-struct bnxt_qplib_sg_info {
- struct scatterlist *sglist;
- u32 nmap;
- u32 npages;
-};
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
@@ -235,11 +270,10 @@ struct bnxt_qplib_sg_info {
struct bnxt_qplib_pd;
struct bnxt_qplib_dev_attr;
-void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq);
-int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
- struct bnxt_qplib_sg_info *sg_info, u32 *elements,
- u32 elements_per_page, u32 aux, u32 pg_size,
- enum bnxt_qplib_hwq_type hwq_type);
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_hwq *hwq);
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_hwq_attr *hwq_attr);
void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
struct bnxt_qplib_pd *pd);
@@ -258,9 +292,80 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
struct net_device *netdev,
struct bnxt_qplib_dev_attr *dev_attr);
-void bnxt_qplib_free_ctx(struct pci_dev *pdev,
+void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx);
-int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
+int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx,
bool virt_fn, bool is_p5);
+
+static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
+ bool arm)
+{
+ u32 key;
+
+ key = info->hwq->cons & (info->hwq->max_elements - 1);
+ key |= (CMPL_DOORBELL_IDX_VALID |
+ (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
+ if (!arm)
+ key |= CMPL_DOORBELL_MASK;
+ writel(key, info->db);
+}
+
+static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
+ key <<= 32;
+ key |= (info->hwq->cons & (info->hwq->max_elements - 1)) &
+ DBC_DBC_INDEX_MASK;
+ writeq(key, info->db);
+}
+
+static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
+ key <<= 32;
+ key |= (info->hwq->prod & (info->hwq->max_elements - 1)) &
+ DBC_DBC_INDEX_MASK;
+ writeq(key, info->db);
+}
+
+static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
+ key <<= 32;
+ writeq(key, info->priv_db);
+}
+
+static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info,
+ u32 th)
+{
+ u64 key = 0;
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | th;
+ key <<= 32;
+ key |= th & DBC_DBC_INDEX_MASK;
+ writeq(key, info->priv_db);
+}
+
+static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
+ struct bnxt_qplib_chip_ctx *cctx,
+ bool arm)
+{
+ u32 type;
+
+ type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
+ if (bnxt_qplib_is_chip_gen_p5(cctx))
+ bnxt_qplib_ring_db(info, type);
+ else
+ bnxt_qplib_ring_db32(info, arm);
+}
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 40296b97d21e..66954ff6a2f2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -585,7 +585,7 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
/* Free the qplib's MRW memory */
if (mrw->hwq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
+ bnxt_qplib_free_hwq(res, &mrw->hwq);
return 0;
}
@@ -646,7 +646,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
if (mrw->hwq.max_elements) {
mrw->va = 0;
mrw->total_size = 0;
- bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
+ bnxt_qplib_free_hwq(res, &mrw->hwq);
}
return 0;
@@ -656,10 +656,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_register_mr req;
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
struct creq_register_mr_resp resp;
- u16 cmd_flags = 0, level;
+ struct cmdq_register_mr req;
int pg_ptrs, pages, i, rc;
+ u16 cmd_flags = 0, level;
dma_addr_t **pbl_ptr;
u32 pg_size;
@@ -674,20 +676,23 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
if (pages > MAX_PBL_LVL_1_PGS) {
dev_err(&res->pdev->dev,
- "SP: Reg MR pages requested (0x%x) exceeded max (0x%x)\n",
+ "SP: Reg MR: pages requested (0x%x) exceeded max (0x%x)\n",
pages, MAX_PBL_LVL_1_PGS);
return -ENOMEM;
}
/* Free the hwq if it already exist, must be a rereg */
if (mr->hwq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
-
- mr->hwq.max_elements = pages;
+ bnxt_qplib_free_hwq(res, &mr->hwq);
/* Use system PAGE_SIZE */
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL,
- &mr->hwq.max_elements,
- PAGE_SIZE, 0, PAGE_SIZE,
- HWQ_TYPE_CTX);
+ hwq_attr.res = res;
+ hwq_attr.depth = pages;
+ hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.type = HWQ_TYPE_MR;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.sginfo->npages = pages;
+ hwq_attr.sginfo->pgsize = PAGE_SIZE;
+ hwq_attr.sginfo->pgshft = PAGE_SHIFT;
+ rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
if (rc) {
dev_err(&res->pdev->dev,
"SP: Reg MR memory allocation failed\n");
@@ -734,7 +739,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
fail:
if (mr->hwq.max_elements)
- bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
+ bnxt_qplib_free_hwq(res, &mr->hwq);
return rc;
}
@@ -742,6 +747,8 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl,
int max_pg_ptrs)
{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
int pg_ptrs, pages, rc;
/* Re-calculate the max to fit the HWQ allocation model */
@@ -753,10 +760,15 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
if (pages > MAX_PBL_LVL_1_PGS)
return -ENOMEM;
- frpl->hwq.max_elements = pages;
- rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL,
- &frpl->hwq.max_elements, PAGE_SIZE, 0,
- PAGE_SIZE, HWQ_TYPE_CTX);
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.nopte = true;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = pg_ptrs;
+ hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr);
if (!rc)
frpl->max_pg_ptrs = pg_ptrs;
@@ -766,7 +778,7 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl)
{
- bnxt_qplib_free_hwq(res->pdev, &frpl->hwq);
+ bnxt_qplib_free_hwq(res, &frpl->hwq);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7d06b0f8d49a..e8e11bd95e42 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -707,7 +707,7 @@ struct mpa_message {
u8 flags;
u8 revision;
__be16 private_data_size;
- u8 private_data[0];
+ u8 private_data[];
};
struct mpa_v2_conn_params {
@@ -719,7 +719,7 @@ struct terminate_message {
u8 layer_etype;
u8 ecode;
__be16 hdrct_rsvd;
- u8 len_hdrs[0];
+ u8 len_hdrs[];
};
#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 89ac2f9ae6dd..ac48012c992f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2127,7 +2127,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
php = to_c4iw_pd(pd);
rhp = php->rhp;
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index cbdb300a4794..a2f5e29ef226 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -123,7 +123,7 @@ struct fw_ri_dsgl {
__be32 len0;
__be64 addr0;
#ifndef C99_NOT_SUPPORTED
- struct fw_ri_dsge_pair sge[0];
+ struct fw_ri_dsge_pair sge[];
#endif
};
@@ -139,7 +139,7 @@ struct fw_ri_isgl {
__be16 nsge;
__be32 r2;
#ifndef C99_NOT_SUPPORTED
- struct fw_ri_sge sge[0];
+ struct fw_ri_sge sge[];
#endif
};
@@ -149,7 +149,7 @@ struct fw_ri_immd {
__be16 r2;
__be32 immdlen;
#ifndef C99_NOT_SUPPORTED
- __u8 data[0];
+ __u8 data[];
#endif
};
@@ -321,7 +321,7 @@ struct fw_ri_res_wr {
__be32 len16_pkd;
__u64 cookie;
#ifndef C99_NOT_SUPPORTED
- struct fw_ri_res res[0];
+ struct fw_ri_res res[];
#endif
};
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 74b787a90660..96b104ab5415 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_CMDS_H_
@@ -801,21 +801,16 @@ struct efa_admin_mmio_req_read_less_resp {
/* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
-#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_SHIFT 1
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
/* reg_mr_cmd */
#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
-#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
-#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT 2
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
/* create_cq_cmd */
-#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
-#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_SHIFT 6
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h
index c8e0c8b905be..29d53ed63b3e 100644
--- a/drivers/infiniband/hw/efa/efa_admin_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_H_
@@ -121,9 +121,7 @@ struct efa_admin_aenq_entry {
/* aq_common_desc */
#define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
-#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
-#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
/* acq_common_desc */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 0778f4f7dccd..7fce69f5568f 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -16,26 +16,13 @@
#define EFA_ASYNC_QUEUE_DEPTH 16
#define EFA_ADMIN_QUEUE_DEPTH 32
-#define MIN_EFA_VER\
- ((EFA_ADMIN_API_VERSION_MAJOR << EFA_REGS_VERSION_MAJOR_VERSION_SHIFT) | \
- (EFA_ADMIN_API_VERSION_MINOR & EFA_REGS_VERSION_MINOR_VERSION_MASK))
-
#define EFA_CTRL_MAJOR 0
#define EFA_CTRL_MINOR 0
#define EFA_CTRL_SUB_MINOR 1
-#define MIN_EFA_CTRL_VER \
- (((EFA_CTRL_MAJOR) << \
- (EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
- ((EFA_CTRL_MINOR) << \
- (EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
- (EFA_CTRL_SUB_MINOR))
-
#define EFA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
#define EFA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
-#define EFA_REGS_ADMIN_INTR_MASK 1
-
enum efa_cmd_status {
EFA_CMD_SUBMITTED,
EFA_CMD_COMPLETED,
@@ -84,7 +71,7 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
struct efa_admin_mmio_req_read_less_resp *read_resp;
unsigned long exp_time;
- u32 mmio_read_reg;
+ u32 mmio_read_reg = 0;
u32 err;
read_resp = mmio_read->read_resp;
@@ -94,10 +81,9 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
/* trash DMA req_id to identify when hardware is done */
read_resp->req_id = mmio_read->seq_num + 0x9aL;
- mmio_read_reg = (offset << EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
- EFA_REGS_MMIO_REG_READ_REG_OFF_MASK;
- mmio_read_reg |= mmio_read->seq_num &
- EFA_REGS_MMIO_REG_READ_REQ_ID_MASK;
+ EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset);
+ EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID,
+ mmio_read->seq_num);
writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
@@ -137,9 +123,9 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev)
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_com_admin_sq *sq = &aq->sq;
u16 size = aq->depth * sizeof(*sq->entries);
+ u32 aq_caps = 0;
u32 addr_high;
u32 addr_low;
- u32 aq_caps;
sq->entries =
dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
@@ -160,10 +146,9 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev)
writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
- aq_caps = aq->depth & EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
- aq_caps |= (sizeof(struct efa_admin_aq_entry) <<
- EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
- EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
+ EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
+ EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_aq_entry));
writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
@@ -175,9 +160,9 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev)
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_com_admin_cq *cq = &aq->cq;
u16 size = aq->depth * sizeof(*cq->entries);
+ u32 acq_caps = 0;
u32 addr_high;
u32 addr_low;
- u32 acq_caps;
cq->entries =
dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
@@ -195,13 +180,11 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev)
writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
- acq_caps = aq->depth & EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
- acq_caps |= (sizeof(struct efa_admin_acq_entry) <<
- EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
- EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
- acq_caps |= (aq->msix_vector_idx <<
- EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT) &
- EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK;
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_acq_entry));
+ EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR,
+ aq->msix_vector_idx);
writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
@@ -212,7 +195,8 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
struct efa_aenq_handlers *aenq_handlers)
{
struct efa_com_aenq *aenq = &edev->aenq;
- u32 addr_low, addr_high, aenq_caps;
+ u32 addr_low, addr_high;
+ u32 aenq_caps = 0;
u16 size;
if (!aenq_handlers) {
@@ -237,13 +221,11 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
- aenq_caps = aenq->depth & EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
- aenq_caps |= (sizeof(struct efa_admin_aenq_entry) <<
- EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
- EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
- aenq_caps |= (aenq->msix_vector_idx
- << EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT) &
- EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK;
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth);
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE,
+ sizeof(struct efa_admin_aenq_entry));
+ EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR,
+ aenq->msix_vector_idx);
writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
/*
@@ -280,8 +262,8 @@ static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
struct efa_comp_ctx *comp_ctx)
{
- u16 cmd_id = comp_ctx->user_cqe->acq_common_descriptor.command &
- EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+ u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command,
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
u16 ctx_id = cmd_id & (aq->depth - 1);
ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
@@ -335,8 +317,8 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
cmd->aq_common_descriptor.command_id = cmd_id;
- cmd->aq_common_descriptor.flags |= aq->sq.phase &
- EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
+ EFA_SET(&cmd->aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
if (!comp_ctx) {
@@ -427,8 +409,8 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
struct efa_comp_ctx *comp_ctx;
u16 cmd_id;
- cmd_id = cqe->acq_common_descriptor.command &
- EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
+ cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
if (!comp_ctx) {
@@ -705,7 +687,7 @@ void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
u32 mask_value = 0;
if (polling)
- mask_value = EFA_REGS_ADMIN_INTR_MASK;
+ EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1);
writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
if (polling)
@@ -743,7 +725,7 @@ int efa_com_admin_init(struct efa_com_dev *edev,
int err;
dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
- if (!(dev_sts & EFA_REGS_DEV_STS_READY_MASK)) {
+ if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
ibdev_err(edev->efa_dev,
"Device isn't ready, abort com init %#x\n", dev_sts);
return -ENODEV;
@@ -778,8 +760,7 @@ int efa_com_admin_init(struct efa_com_dev *edev,
goto err_destroy_cq;
cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
- timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
- EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
if (timeout)
/* the resolution of timeout reg is 100ms */
aq->completion_timeout = timeout * 100000;
@@ -940,7 +921,9 @@ void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
int efa_com_validate_version(struct efa_com_dev *edev)
{
+ u32 min_ctrl_ver = 0;
u32 ctrl_ver_masked;
+ u32 min_ver = 0;
u32 ctrl_ver;
u32 ver;
@@ -953,33 +936,42 @@ int efa_com_validate_version(struct efa_com_dev *edev)
EFA_REGS_CONTROLLER_VERSION_OFF);
ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
- (ver & EFA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- EFA_REGS_VERSION_MAJOR_VERSION_SHIFT,
- ver & EFA_REGS_VERSION_MINOR_VERSION_MASK);
-
- if (ver < MIN_EFA_VER) {
+ EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION),
+ EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION));
+
+ EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
+ EFA_ADMIN_API_VERSION_MAJOR);
+ EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
+ EFA_ADMIN_API_VERSION_MINOR);
+ if (ver < min_ver) {
ibdev_err(edev->efa_dev,
"EFA version is lower than the minimal version the driver supports\n");
return -EOPNOTSUPP;
}
- ibdev_dbg(edev->efa_dev,
- "efa controller version: %d.%d.%d implementation version %d\n",
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
- EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
- EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
- EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+ ibdev_dbg(
+ edev->efa_dev,
+ "efa controller version: %d.%d.%d implementation version %d\n",
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION),
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION),
+ EFA_GET(&ctrl_ver,
+ EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION),
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID));
ctrl_ver_masked =
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
- (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
-
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) |
+ EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) |
+ EFA_GET(&ctrl_ver,
+ EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION);
+
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
+ EFA_CTRL_MAJOR);
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
+ EFA_CTRL_MINOR);
+ EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
+ EFA_CTRL_SUB_MINOR);
/* Validate the ctrl version without the implementation ID */
- if (ctrl_ver_masked < MIN_EFA_CTRL_VER) {
+ if (ctrl_ver_masked < min_ctrl_ver) {
ibdev_err(edev->efa_dev,
"EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
return -EOPNOTSUPP;
@@ -1002,8 +994,7 @@ int efa_com_get_dma_width(struct efa_com_dev *edev)
u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
int width;
- width = (caps & EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
- EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
+ width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH);
ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
@@ -1017,16 +1008,14 @@ int efa_com_get_dma_width(struct efa_com_dev *edev)
return width;
}
-static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout,
- u16 exp_state)
+static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on)
{
u32 val, i;
for (i = 0; i < timeout; i++) {
val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
- if ((val & EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
- exp_state)
+ if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on)
return 0;
ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
@@ -1046,36 +1035,34 @@ static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout,
int efa_com_dev_reset(struct efa_com_dev *edev,
enum efa_regs_reset_reason_types reset_reason)
{
- u32 stat, timeout, cap, reset_val;
+ u32 stat, timeout, cap;
+ u32 reset_val = 0;
int err;
stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
- if (!(stat & EFA_REGS_DEV_STS_READY_MASK)) {
+ if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) {
ibdev_err(edev->efa_dev,
"Device isn't ready, can't reset device\n");
return -EINVAL;
}
- timeout = (cap & EFA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
- EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT);
if (!timeout) {
ibdev_err(edev->efa_dev, "Invalid timeout value\n");
return -EINVAL;
}
/* start reset */
- reset_val = EFA_REGS_DEV_CTL_DEV_RESET_MASK;
- reset_val |= (reset_reason << EFA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
- EFA_REGS_DEV_CTL_RESET_REASON_MASK;
+ EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1);
+ EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason);
writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
/* reset clears the mmio readless address, restore it */
efa_com_mmio_reg_read_resp_addr_init(edev);
- err = wait_for_reset_state(edev, timeout,
- EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ err = wait_for_reset_state(edev, timeout, 1);
if (err) {
ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
return err;
@@ -1089,8 +1076,7 @@ int efa_com_dev_reset(struct efa_com_dev *edev,
return err;
}
- timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
- EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
if (timeout)
/* the resolution of timeout reg is 100ms */
edev->aq.completion_timeout = timeout * 100000;
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index e20bd84a1014..eea5574a62e8 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -161,8 +161,9 @@ int efa_com_create_cq(struct efa_com_dev *edev,
int err;
create_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_CQ;
- create_cmd.cq_caps_2 = (params->entry_size_in_bytes / 4) &
- EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ EFA_SET(&create_cmd.cq_caps_2,
+ EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS,
+ params->entry_size_in_bytes / 4);
create_cmd.cq_depth = params->cq_depth;
create_cmd.num_sub_cqs = params->num_sub_cqs;
create_cmd.uar = params->uarn;
@@ -227,8 +228,8 @@ int efa_com_register_mr(struct efa_com_dev *edev,
mr_cmd.aq_common_desc.opcode = EFA_ADMIN_REG_MR;
mr_cmd.pd = params->pd;
mr_cmd.mr_length = params->mr_length_in_bytes;
- mr_cmd.flags |= params->page_shift &
- EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
+ EFA_SET(&mr_cmd.flags, EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT,
+ params->page_shift);
mr_cmd.iova = params->iova;
mr_cmd.permissions = params->permissions;
@@ -242,11 +243,11 @@ int efa_com_register_mr(struct efa_com_dev *edev,
params->pbl.pbl.address.mem_addr_low;
mr_cmd.pbl.pbl.address.mem_addr_high =
params->pbl.pbl.address.mem_addr_high;
- mr_cmd.aq_common_desc.flags |=
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+ EFA_SET(&mr_cmd.aq_common_desc.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
if (params->indirect)
- mr_cmd.aq_common_desc.flags |=
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ EFA_SET(&mr_cmd.aq_common_desc.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
}
err = efa_com_cmd_exec(aq,
@@ -386,9 +387,8 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
get_cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_FEATURE;
if (control_buff_size)
- get_cmd.aq_common_descriptor.flags =
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
-
+ EFA_SET(&get_cmd.aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&get_cmd.control_buffer.address.mem_addr_high,
@@ -538,8 +538,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
set_cmd->aq_common_descriptor.opcode = EFA_ADMIN_SET_FEATURE;
if (control_buff_size) {
- set_cmd->aq_common_descriptor.flags =
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ set_cmd->aq_common_descriptor.flags = 0;
+ EFA_SET(&set_cmd->aq_common_descriptor.flags,
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&set_cmd->control_buffer.address.mem_addr_high,
&set_cmd->control_buffer.address.mem_addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_common_defs.h b/drivers/infiniband/hw/efa/efa_common_defs.h
index c559ec08898e..90af1c82c9c6 100644
--- a/drivers/infiniband/hw/efa/efa_common_defs.h
+++ b/drivers/infiniband/hw/efa/efa_common_defs.h
@@ -1,14 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COMMON_H_
#define _EFA_COMMON_H_
+#include <linux/bitfield.h>
+
#define EFA_COMMON_SPEC_VERSION_MAJOR 2
#define EFA_COMMON_SPEC_VERSION_MINOR 0
+#define EFA_GET(ptr, mask) FIELD_GET(mask##_MASK, *(ptr))
+
+#define EFA_SET(ptr, mask, value) \
+ ({ \
+ typeof(ptr) _ptr = ptr; \
+ *_ptr = (*_ptr & ~(mask##_MASK)) | \
+ FIELD_PREP(mask##_MASK, value); \
+ })
+
struct efa_common_mem_addr {
u32 mem_addr_low;
diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h
index bb9cad3d6a15..4017982fe13b 100644
--- a/drivers/infiniband/hw/efa/efa_regs_defs.h
+++ b/drivers/infiniband/hw/efa/efa_regs_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_REGS_H_
@@ -45,69 +45,52 @@ enum efa_regs_reset_reason_types {
/* version register */
#define EFA_REGS_VERSION_MINOR_VERSION_MASK 0xff
-#define EFA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
#define EFA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
/* controller_version register */
#define EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
-#define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
#define EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
-#define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
#define EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
-#define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
#define EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
/* caps register */
#define EFA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
-#define EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
#define EFA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
-#define EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
#define EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
-#define EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
#define EFA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
#define EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
-#define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
#define EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
/* acq_caps register */
#define EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
-#define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
#define EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xff0000
-#define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT 24
#define EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK 0xff000000
/* aenq_caps register */
#define EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
-#define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
#define EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xff0000
-#define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT 24
#define EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK 0xff000000
+/* intr_mask register */
+#define EFA_REGS_INTR_MASK_EN_MASK 0x1
+
/* dev_ctl register */
#define EFA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
-#define EFA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
#define EFA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
-#define EFA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
#define EFA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
#define EFA_REGS_DEV_STS_READY_MASK 0x1
-#define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
#define EFA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
-#define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
#define EFA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
-#define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
#define EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
-#define EFA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
#define EFA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
-#define EFA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
#define EFA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
/* mmio_reg_read register */
#define EFA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
-#define EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
#define EFA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
#endif /* _EFA_REGS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index ec5545870554..5c57098a4aee 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -144,9 +144,6 @@ static inline bool is_rdma_read_cap(struct efa_dev *dev)
return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
}
-#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
- sizeof_field(typeof(x), fld) <= (sz))
-
#define is_reserved_cleared(reserved) \
!memchr_inv(reserved, 0, sizeof(reserved))
@@ -169,6 +166,14 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
return addr;
}
+static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
+ dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
+ free_pages_exact(cpu_addr, size);
+}
+
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata)
@@ -402,6 +407,9 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
int err;
ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
+
+ efa_qp_user_mmap_entries_remove(qp);
+
err = efa_destroy_qp_handle(dev, qp->qp_handle);
if (err)
return err;
@@ -411,11 +419,10 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
"qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
qp->rq_cpu_addr, qp->rq_size,
&qp->rq_dma_addr);
- dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
- DMA_TO_DEVICE);
+ efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
+ qp->rq_size, DMA_TO_DEVICE);
}
- efa_qp_user_mmap_entries_remove(qp);
kfree(qp);
return 0;
}
@@ -599,7 +606,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
if (err)
goto err_out;
- if (!field_avail(cmd, driver_qp_type, udata->inlen)) {
+ if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, no input udata\n");
err = -EINVAL;
@@ -720,13 +727,9 @@ err_remove_mmap_entries:
err_destroy_qp:
efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
err_free_mapped:
- if (qp->rq_size) {
- dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
- DMA_TO_DEVICE);
-
- if (!qp->rq_mmap_entry)
- free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
- }
+ if (qp->rq_size)
+ efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
+ qp->rq_size, DMA_TO_DEVICE);
err_free_qp:
kfree(qp);
err_out:
@@ -845,10 +848,10 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
"Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
- efa_destroy_cq_idx(dev, cq->cq_idx);
- dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
rdma_user_mmap_entry_remove(cq->mmap_entry);
+ efa_destroy_cq_idx(dev, cq->cq_idx);
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
}
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
@@ -890,7 +893,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_out;
}
- if (!field_avail(cmd, num_sub_cqs, udata->inlen)) {
+ if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
ibdev_dbg(ibdev,
"Incompatible ABI params, no input udata\n");
err = -EINVAL;
@@ -985,10 +988,8 @@ err_remove_mmap:
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped:
- dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
- if (!cq->mmap_entry)
- free_pages_exact(cq->cpu_addr, cq->size);
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
err_out:
atomic64_inc(&dev->stats.sw_stats.create_cq_err);
@@ -1550,10 +1551,6 @@ void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
- /* DMA mapping is already gone, now free the pages */
- if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
- free_pages_exact(phys_to_virt(entry->address),
- entry->rdma_entry.npages * PAGE_SIZE);
kfree(entry);
}
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 986c12153e62..0dfbcfb048ca 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -222,11 +222,11 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
while (bit < bitsize) {
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
if (zero - 1 != bit)
- size += snprintf(data + size,
+ size += scnprintf(data + size,
datalen - size - 1,
"0x%lx-0x%lx,", bit, zero - 1);
else
- size += snprintf(data + size,
+ size += scnprintf(data + size,
datalen - size - 1, "0x%lx,",
bit);
bit = find_next_bit(fault->opcodes, bitsize, zero);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 259115886d35..e7fdd70c6e78 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -209,7 +209,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd->mm = current->mm;
mmgrab(fd->mm);
fd->dd = dd;
- kobject_get(&fd->dd->kobj);
fp->private_data = fd;
return 0;
nomem:
@@ -713,7 +712,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
deallocate_ctxt(uctxt);
done:
mmdrop(fdata->mm);
- kobject_put(&dd->kobj);
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
@@ -1696,7 +1694,7 @@ static int user_add(struct hfi1_devdata *dd)
snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
&dd->user_cdev, &dd->user_device,
- true, &dd->kobj);
+ true, &dd->verbs_dev.rdi.ibdev.dev.kobj);
if (ret)
user_remove(dd);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index cae12f416ca0..b06c2594105a 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1413,8 +1413,6 @@ struct hfi1_devdata {
bool aspm_enabled; /* ASPM state: enabled/disabled */
struct rhashtable *sdma_rht;
- struct kobject kobj;
-
/* vnic data */
struct hfi1_vnic_data vnic;
/* Lock to protect IRQ SRC register access */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index e3acda7a0800..3759d9233a1c 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1198,13 +1198,13 @@ static void finalize_asic_data(struct hfi1_devdata *dd,
}
/**
- * hfi1_clean_devdata - cleans up per-unit data structure
+ * hfi1_free_devdata - cleans up and frees per-unit data structure
* @dd: pointer to a valid devdata structure
*
- * It cleans up all data structures set up by
+ * It cleans up and frees all data structures set up by
* by hfi1_alloc_devdata().
*/
-static void hfi1_clean_devdata(struct hfi1_devdata *dd)
+void hfi1_free_devdata(struct hfi1_devdata *dd)
{
struct hfi1_asic_data *ad;
unsigned long flags;
@@ -1231,23 +1231,6 @@ static void hfi1_clean_devdata(struct hfi1_devdata *dd)
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
-static void __hfi1_free_devdata(struct kobject *kobj)
-{
- struct hfi1_devdata *dd =
- container_of(kobj, struct hfi1_devdata, kobj);
-
- hfi1_clean_devdata(dd);
-}
-
-static struct kobj_type hfi1_devdata_type = {
- .release = __hfi1_free_devdata,
-};
-
-void hfi1_free_devdata(struct hfi1_devdata *dd)
-{
- kobject_put(&dd->kobj);
-}
-
/**
* hfi1_alloc_devdata - Allocate our primary per-unit data structure.
* @pdev: Valid PCI device
@@ -1333,11 +1316,10 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
- kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
bail:
- hfi1_clean_devdata(dd);
+ hfi1_free_devdata(dd);
return ERR_PTR(ret);
}
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index a51bcd2b4391..7073f237a949 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -2381,7 +2381,7 @@ struct opa_port_status_rsp {
__be64 port_vl_rcv_bubble;
__be64 port_vl_mark_fecn;
__be64 port_vl_xmit_discards;
- } vls[0]; /* real array size defined by # bits set in vl_select_mask */
+ } vls[]; /* real array size defined by # bits set in vl_select_mask */
};
enum counter_selects {
@@ -2423,7 +2423,7 @@ struct opa_aggregate {
__be16 attr_id;
__be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
__be32 attr_mod;
- u8 data[0];
+ u8 data[];
};
#define MSK_LLI 0x000000f0
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 2f48e6953629..889e63d3f2cc 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -165,7 +165,7 @@ struct opa_mad_notice_attr {
} __packed ntc_2048;
};
- u8 class_data[0];
+ u8 class_data[];
};
#define IB_VLARB_LOWPRI_0_31 1
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index c9a58b642bdd..0102262343c0 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -243,7 +243,7 @@ struct sc_config_sizes {
*/
struct pio_map_elem {
u32 mask;
- struct send_context *ksc[0];
+ struct send_context *ksc[];
};
/*
@@ -263,7 +263,7 @@ struct pio_vl_map {
u32 mask;
u8 actual_vls;
u8 vls;
- struct pio_map_elem *map[0];
+ struct pio_map_elem *map[];
};
int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index a51525647ac8..c93ea021cf49 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -833,7 +833,7 @@ struct sdma_engine *sdma_select_engine_sc(
struct sdma_rht_map_elem {
u32 mask;
u8 ctr;
- struct sdma_engine *sde[0];
+ struct sdma_engine *sde[];
};
struct sdma_rht_node {
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 1e2e40f79cb2..7a851191f987 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -1002,7 +1002,7 @@ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
*/
struct sdma_map_elem {
u32 mask;
- struct sdma_engine *sde[0];
+ struct sdma_engine *sde[];
};
/**
@@ -1024,7 +1024,7 @@ struct sdma_vl_map {
u32 mask;
u8 actual_vls;
u8 vls;
- struct sdma_map_elem *map[0];
+ struct sdma_map_elem *map[];
};
int sdma_map_init(
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 90f62c4bddba..074ec71772d2 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping sc2vl sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail;
+ /*
+ * Based on the documentation for kobject_init_and_add(), the
+ * caller should call kobject_put even if this call fails.
+ */
+ goto bail_sc2vl;
}
kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
@@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping sl2sc sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sc2vl;
+ goto bail_sl2sc;
}
kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
@@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping vl2mtu sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sl2sc;
+ goto bail_vl2mtu;
}
kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
@@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_vl2mtu;
+ goto bail_cc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
@@ -742,7 +746,6 @@ bail_sl2sc:
kobject_put(&ppd->sl2sc_kobj);
bail_sc2vl:
kobject_put(&ppd->sc2vl_kobj);
-bail:
return ret;
}
@@ -853,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
return 0;
bail:
- for (i = 0; i < dd->num_sdma; i++)
- kobject_del(&dd->per_sdma[i].kobj);
+ /*
+ * The function kobject_put() will call kobject_del() if the kobject
+ * has been added successfully. The sysfs files created under the
+ * kobject directory will also be removed during the process.
+ */
+ for (; i >= 0; i--)
+ kobject_put(&dd->per_sdma[i].kobj);
return ret;
}
@@ -867,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
struct hfi1_pportdata *ppd;
int i;
+ /* Unwind operations in hfi1_verbs_register_sysfs() */
+ for (i = 0; i < dd->num_sdma; i++)
+ kobject_put(&dd->per_sdma[i].kobj);
+
for (i = 0; i < dd->num_pports; i++) {
ppd = &dd->pport[i];
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 6257eee083a1..332abb446861 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -73,7 +73,7 @@ struct tid_rb_node {
dma_addr_t dma_addr;
bool freed;
unsigned int npages;
- struct page *pages[0];
+ struct page *pages[];
};
static inline int num_user_pages(unsigned long addr,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 5ffe4c996ed3..5bfb52ffd590 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -257,8 +257,8 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
return ret;
}
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(*resp))) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
&hr_cq->db);
if (ret) {
@@ -321,8 +321,8 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
udata, struct hns_roce_ucontext, ibucontext);
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(*resp)))
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags))
hns_roce_db_unmap_user(context, &hr_cq->db);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index a7c4ff975c28..f6b3cf6b95d6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -641,6 +641,19 @@ struct hns_roce_rinl_buf {
u32 wqe_cnt;
};
+enum {
+ HNS_ROCE_FLUSH_FLAG = 0,
+};
+
+struct hns_roce_work {
+ struct hns_roce_dev *hr_dev;
+ struct work_struct work;
+ u32 qpn;
+ u32 cqn;
+ int event_type;
+ int sub_type;
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
@@ -656,11 +669,6 @@ struct hns_roce_qp {
struct ib_umem *umem;
struct hns_roce_mtt mtt;
struct hns_roce_mtr mtr;
-
- /* this define must less than HNS_ROCE_MAX_BT_REGION */
-#define HNS_ROCE_WQE_REGION_MAX 3
- struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX];
- int region_cnt;
int wqe_bt_pg_shift;
u32 buff_size;
@@ -684,6 +692,9 @@ struct hns_roce_qp {
struct hns_roce_sge sge;
u32 next_sge;
+ /* 0: flush needed, 1: unneeded */
+ unsigned long flush_flag;
+ struct hns_roce_work flush_work;
struct hns_roce_rinl_buf rq_inl_buf;
struct list_head node; /* all qps are on a list */
struct list_head rq_node; /* all recv qps are on a list */
@@ -762,14 +773,8 @@ struct hns_roce_eq {
int eqe_ba_pg_sz;
int eqe_buf_pg_sz;
int hop_num;
- u64 *bt_l0; /* Base address table for L0 */
- u64 **bt_l1; /* Base address table for L1 */
- u64 **buf;
- dma_addr_t l0_dma;
- dma_addr_t *l1_dma;
- dma_addr_t *buf_dma;
- u32 l0_last_num; /* L0 last chunk num */
- u32 l1_last_num; /* L1 last chunk num */
+ struct hns_roce_mtr mtr;
+ struct hns_roce_buf buf;
int eq_max_cnt;
int eq_period;
int shift;
@@ -881,7 +886,7 @@ struct hns_roce_caps {
u32 cqc_timer_ba_pg_sz;
u32 cqc_timer_buf_pg_sz;
u32 cqc_timer_hop_num;
- u32 cqe_ba_pg_sz;
+ u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
u32 srqwqe_ba_pg_sz;
@@ -906,15 +911,6 @@ struct hns_roce_caps {
u16 default_ceq_arm_st;
};
-struct hns_roce_work {
- struct hns_roce_dev *hr_dev;
- struct work_struct work;
- u32 qpn;
- u32 cqn;
- int event_type;
- int sub_type;
-};
-
struct hns_roce_dfx_hw {
int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
@@ -1237,9 +1233,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
-void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
-void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
-void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
+void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
+void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n);
+void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
@@ -1248,9 +1245,8 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
-void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
-void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
- int cnt);
+void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata);
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index e82215774032..263338b90d7a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -39,6 +39,16 @@
#define DMA_ADDR_T_SHIFT 12
#define BT_BA_SHIFT 32
+#define HEM_INDEX_BUF BIT(0)
+#define HEM_INDEX_L0 BIT(1)
+#define HEM_INDEX_L1 BIT(2)
+struct hns_roce_hem_index {
+ u64 buf;
+ u64 l0;
+ u64 l1;
+ u32 inited; /* indicate which index is available */
+};
+
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
{
int hop_num = 0;
@@ -84,25 +94,27 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
return hop_num ? true : false;
}
-static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
- u32 bt_chunk_num, u64 hem_max_num)
+static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
+ u32 bt_chunk_num, u64 hem_max_num)
{
+ u64 start_idx = round_down(hem_idx, bt_chunk_num);
u64 check_max_num = start_idx + bt_chunk_num;
u64 i;
for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
- if (hem[i])
+ if (i != hem_idx && hem[i])
return false;
return true;
}
-static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
+static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
{
+ u64 start_idx = round_down(ba_idx, bt_chunk_num);
int i;
for (i = 0; i < bt_chunk_num; i++)
- if (bt[start_idx + i])
+ if (i != ba_idx && bt[start_idx + i])
return false;
return true;
@@ -434,178 +446,235 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret;
}
-static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long obj)
+static int calc_hem_config(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_hem_mhop mhop;
- struct hns_roce_hem_iter iter;
- u32 buf_chunk_size;
- u32 bt_chunk_size;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long mhop_obj = obj;
+ u32 l0_idx, l1_idx, l2_idx;
u32 chunk_ba_num;
- u32 hop_num;
- u32 size;
u32 bt_num;
- u64 hem_idx;
- u64 bt_l1_idx = 0;
- u64 bt_l0_idx = 0;
- u64 bt_ba;
- unsigned long mhop_obj = obj;
- int bt_l1_allocated = 0;
- int bt_l0_allocated = 0;
- int step_idx;
int ret;
- ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
if (ret)
return ret;
- buf_chunk_size = mhop.buf_chunk_size;
- bt_chunk_size = mhop.bt_chunk_size;
- hop_num = mhop.hop_num;
- chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
-
- bt_num = hns_roce_get_bt_num(table->type, hop_num);
+ l0_idx = mhop->l0_idx;
+ l1_idx = mhop->l1_idx;
+ l2_idx = mhop->l2_idx;
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
switch (bt_num) {
case 3:
- hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
- mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
- bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
- bt_l0_idx = mhop.l0_idx;
+ index->l1 = l0_idx * chunk_ba_num + l1_idx;
+ index->l0 = l0_idx;
+ index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
+ l1_idx * chunk_ba_num + l2_idx;
break;
case 2:
- hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
- bt_l0_idx = mhop.l0_idx;
+ index->l0 = l0_idx;
+ index->buf = l0_idx * chunk_ba_num + l1_idx;
break;
case 1:
- hem_idx = mhop.l0_idx;
+ index->buf = l0_idx;
break;
default:
- dev_err(dev, "Table %d not support hop_num = %d!\n",
- table->type, hop_num);
+ ibdev_err(ibdev, "Table %d not support mhop.hop_num = %d!\n",
+ table->type, mhop->hop_num);
return -EINVAL;
}
- if (unlikely(hem_idx >= table->num_hem)) {
- dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n",
- table->type, hem_idx, table->num_hem);
+ if (unlikely(index->buf >= table->num_hem)) {
+ ibdev_err(ibdev, "Table %d exceed hem limt idx %llu,max %lu!\n",
+ table->type, index->buf, table->num_hem);
return -EINVAL;
}
- mutex_lock(&table->mutex);
+ return 0;
+}
- if (table->hem[hem_idx]) {
- ++table->hem[hem_idx]->refcount;
- goto out;
+static void free_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ u32 bt_size = mhop->bt_chunk_size;
+ struct device *dev = hr_dev->dev;
+
+ if (index->inited & HEM_INDEX_BUF) {
+ hns_roce_free_hem(hr_dev, table->hem[index->buf]);
+ table->hem[index->buf] = NULL;
+ }
+
+ if (index->inited & HEM_INDEX_L1) {
+ dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
+ table->bt_l1_dma_addr[index->l1]);
+ table->bt_l1[index->l1] = NULL;
}
+ if (index->inited & HEM_INDEX_L0) {
+ dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
+ table->bt_l0_dma_addr[index->l0]);
+ table->bt_l0[index->l0] = NULL;
+ }
+}
+
+static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ u32 bt_size = mhop->bt_chunk_size;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_hem_iter iter;
+ gfp_t flag;
+ u64 bt_ba;
+ u32 size;
+ int ret;
+
/* alloc L1 BA's chunk */
- if ((check_whether_bt_num_3(table->type, hop_num) ||
- check_whether_bt_num_2(table->type, hop_num)) &&
- !table->bt_l0[bt_l0_idx]) {
- table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
- &(table->bt_l0_dma_addr[bt_l0_idx]),
+ if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
+ check_whether_bt_num_2(table->type, mhop->hop_num)) &&
+ !table->bt_l0[index->l0]) {
+ table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
+ &table->bt_l0_dma_addr[index->l0],
GFP_KERNEL);
- if (!table->bt_l0[bt_l0_idx]) {
+ if (!table->bt_l0[index->l0]) {
ret = -ENOMEM;
goto out;
}
- bt_l0_allocated = 1;
-
- /* set base address to hardware */
- if (table->type < HEM_TYPE_MTT) {
- step_idx = 0;
- if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
- ret = -ENODEV;
- dev_err(dev, "set HEM base address to HW failed!\n");
- goto err_dma_alloc_l1;
- }
- }
+ index->inited |= HEM_INDEX_L0;
}
/* alloc L2 BA's chunk */
- if (check_whether_bt_num_3(table->type, hop_num) &&
- !table->bt_l1[bt_l1_idx]) {
- table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
- &(table->bt_l1_dma_addr[bt_l1_idx]),
+ if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
+ !table->bt_l1[index->l1]) {
+ table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
+ &table->bt_l1_dma_addr[index->l1],
GFP_KERNEL);
- if (!table->bt_l1[bt_l1_idx]) {
+ if (!table->bt_l1[index->l1]) {
ret = -ENOMEM;
- goto err_dma_alloc_l1;
- }
- bt_l1_allocated = 1;
- *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
- table->bt_l1_dma_addr[bt_l1_idx];
-
- /* set base address to hardware */
- step_idx = 1;
- if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
- ret = -ENODEV;
- dev_err(dev, "set HEM base address to HW failed!\n");
- goto err_alloc_hem_buf;
+ goto err_alloc_hem;
}
+ index->inited |= HEM_INDEX_L1;
+ *(table->bt_l0[index->l0] + mhop->l1_idx) =
+ table->bt_l1_dma_addr[index->l1];
}
/*
* alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
* alloc bt space chunk for MTT/CQE.
*/
- size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
- table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
- size >> PAGE_SHIFT,
- size,
- (table->lowmem ? GFP_KERNEL :
- GFP_HIGHUSER) | __GFP_NOWARN);
- if (!table->hem[hem_idx]) {
+ size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
+ flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
+ table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
+ size, flag);
+ if (!table->hem[index->buf]) {
ret = -ENOMEM;
- goto err_alloc_hem_buf;
+ goto err_alloc_hem;
}
- hns_roce_hem_first(table->hem[hem_idx], &iter);
+ index->inited |= HEM_INDEX_BUF;
+ hns_roce_hem_first(table->hem[index->buf], &iter);
bt_ba = hns_roce_hem_addr(&iter);
-
if (table->type < HEM_TYPE_MTT) {
- if (hop_num == 2) {
- *(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
- step_idx = 2;
- } else if (hop_num == 1) {
- *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
- step_idx = 1;
- } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
- step_idx = 0;
- } else {
- ret = -EINVAL;
- goto err_dma_alloc_l1;
+ if (mhop->hop_num == 2)
+ *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
+ else if (mhop->hop_num == 1)
+ *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
+ } else if (mhop->hop_num == 2) {
+ *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
+ }
+
+ return 0;
+err_alloc_hem:
+ free_mhop_hem(hr_dev, table, mhop, index);
+out:
+ return ret;
+}
+
+static int set_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int step_idx;
+ int ret = 0;
+
+ if (index->inited & HEM_INDEX_L0) {
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM step 0 failed!\n");
+ goto out;
}
+ }
- /* set HEM base address to hardware */
- if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
- ret = -ENODEV;
- dev_err(dev, "set HEM base address to HW failed!\n");
- goto err_alloc_hem_buf;
+ if (index->inited & HEM_INDEX_L1) {
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM step 1 failed!\n");
+ goto out;
}
- } else if (hop_num == 2) {
- *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
}
- ++table->hem[hem_idx]->refcount;
- goto out;
+ if (index->inited & HEM_INDEX_BUF) {
+ if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
+ step_idx = 0;
+ else
+ step_idx = mhop->hop_num;
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
+ if (ret)
+ ibdev_err(ibdev, "set HEM step last failed!\n");
+ }
+out:
+ return ret;
+}
-err_alloc_hem_buf:
- if (bt_l1_allocated) {
- dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
- table->bt_l1_dma_addr[bt_l1_idx]);
- table->bt_l1[bt_l1_idx] = NULL;
+static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_hem_index index = {};
+ struct hns_roce_hem_mhop mhop = {};
+ int ret;
+
+ ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "calc hem config failed!\n");
+ return ret;
+ }
+
+ mutex_lock(&table->mutex);
+ if (table->hem[index.buf]) {
+ ++table->hem[index.buf]->refcount;
+ goto out;
}
-err_dma_alloc_l1:
- if (bt_l0_allocated) {
- dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
- table->bt_l0_dma_addr[bt_l0_idx]);
- table->bt_l0[bt_l0_idx] = NULL;
+ ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "alloc mhop hem failed!\n");
+ goto out;
}
+ /* set HEM base address to hardware */
+ if (table->type < HEM_TYPE_MTT) {
+ ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM address to HW failed!\n");
+ goto err_alloc;
+ }
+ }
+
+ ++table->hem[index.buf]->refcount;
+ goto out;
+
+err_alloc:
+ free_mhop_hem(hr_dev, table, &mhop, &index);
out:
mutex_unlock(&table->mutex);
return ret;
@@ -656,116 +725,75 @@ out:
return ret;
}
+static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 hop_num = mhop->hop_num;
+ u32 chunk_ba_num;
+ int step_idx;
+
+ index->inited = HEM_INDEX_BUF;
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ if (check_whether_bt_num_2(table->type, hop_num)) {
+ if (hns_roce_check_hem_null(table->hem, index->buf,
+ chunk_ba_num, table->num_hem))
+ index->inited |= HEM_INDEX_L0;
+ } else if (check_whether_bt_num_3(table->type, hop_num)) {
+ if (hns_roce_check_hem_null(table->hem, index->buf,
+ chunk_ba_num, table->num_hem)) {
+ index->inited |= HEM_INDEX_L1;
+ if (hns_roce_check_bt_null(table->bt_l1, index->l1,
+ chunk_ba_num))
+ index->inited |= HEM_INDEX_L0;
+ }
+ }
+
+ if (table->type < HEM_TYPE_MTT) {
+ if (hop_num == HNS_ROCE_HOP_NUM_0)
+ step_idx = 0;
+ else
+ step_idx = hop_num;
+
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
+ ibdev_warn(ibdev, "Clear hop%d HEM failed.\n", hop_num);
+
+ if (index->inited & HEM_INDEX_L1)
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+ ibdev_warn(ibdev, "Clear HEM step 1 failed.\n");
+
+ if (index->inited & HEM_INDEX_L0)
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+ ibdev_warn(ibdev, "Clear HEM step 0 failed.\n");
+ }
+}
+
static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj,
int check_refcount)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_hem_mhop mhop;
- unsigned long mhop_obj = obj;
- u32 bt_chunk_size;
- u32 chunk_ba_num;
- u32 hop_num;
- u32 start_idx;
- u32 bt_num;
- u64 hem_idx;
- u64 bt_l1_idx = 0;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_hem_index index = {};
+ struct hns_roce_hem_mhop mhop = {};
int ret;
- ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
- if (ret)
- return;
-
- bt_chunk_size = mhop.bt_chunk_size;
- hop_num = mhop.hop_num;
- chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
-
- bt_num = hns_roce_get_bt_num(table->type, hop_num);
- switch (bt_num) {
- case 3:
- hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
- mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
- bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
- break;
- case 2:
- hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
- break;
- case 1:
- hem_idx = mhop.l0_idx;
- break;
- default:
- dev_err(dev, "Table %d not support hop_num = %d!\n",
- table->type, hop_num);
+ ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "calc hem config failed!\n");
return;
}
mutex_lock(&table->mutex);
-
- if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
+ if (check_refcount && (--table->hem[index.buf]->refcount > 0)) {
mutex_unlock(&table->mutex);
return;
}
- if (table->type < HEM_TYPE_MTT && hop_num == 1) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
- dev_warn(dev, "Clear HEM base address failed.\n");
- } else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
- dev_warn(dev, "Clear HEM base address failed.\n");
- } else if (table->type < HEM_TYPE_MTT &&
- hop_num == HNS_ROCE_HOP_NUM_0) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
- dev_warn(dev, "Clear HEM base address failed.\n");
- }
-
- /*
- * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
- * free bt space chunk for MTT/CQE.
- */
- hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
- table->hem[hem_idx] = NULL;
-
- if (check_whether_bt_num_2(table->type, hop_num)) {
- start_idx = mhop.l0_idx * chunk_ba_num;
- if (hns_roce_check_hem_null(table->hem, start_idx,
- chunk_ba_num, table->num_hem)) {
- if (table->type < HEM_TYPE_MTT &&
- hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
- dev_warn(dev, "Clear HEM base address failed.\n");
-
- dma_free_coherent(dev, bt_chunk_size,
- table->bt_l0[mhop.l0_idx],
- table->bt_l0_dma_addr[mhop.l0_idx]);
- table->bt_l0[mhop.l0_idx] = NULL;
- }
- } else if (check_whether_bt_num_3(table->type, hop_num)) {
- start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
- mhop.l1_idx * chunk_ba_num;
- if (hns_roce_check_hem_null(table->hem, start_idx,
- chunk_ba_num, table->num_hem)) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
- dev_warn(dev, "Clear HEM base address failed.\n");
-
- dma_free_coherent(dev, bt_chunk_size,
- table->bt_l1[bt_l1_idx],
- table->bt_l1_dma_addr[bt_l1_idx]);
- table->bt_l1[bt_l1_idx] = NULL;
-
- start_idx = mhop.l0_idx * chunk_ba_num;
- if (hns_roce_check_bt_null(table->bt_l1, start_idx,
- chunk_ba_num)) {
- if (hr_dev->hw->clear_hem(hr_dev, table, obj,
- 0))
- dev_warn(dev, "Clear HEM base address failed.\n");
-
- dma_free_coherent(dev, bt_chunk_size,
- table->bt_l0[mhop.l0_idx],
- table->bt_l0_dma_addr[mhop.l0_idx]);
- table->bt_l0[mhop.l0_idx] = NULL;
- }
- }
- }
+ clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ free_mhop_hem(hr_dev, table, &mhop, &index);
mutex_unlock(&table->mutex);
}
@@ -1383,6 +1411,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
void *cpu_base;
u64 phy_base;
int ret = 0;
+ int ba_num;
int offset;
int total;
int step;
@@ -1393,12 +1422,16 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
if (root_hem)
return 0;
+ ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
+ if (ba_num < 1)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&temp_root);
- total = r->offset;
+ offset = r->offset;
/* indicate to last region */
r = &regions[region_cnt - 1];
- root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1,
- unit, true, 0);
+ root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
+ ba_num, true, 0);
if (!root_hem)
return -ENOMEM;
list_add(&root_hem->list, &temp_root);
@@ -1410,7 +1443,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
INIT_LIST_HEAD(&temp_list[i]);
total = 0;
- for (i = 0; i < region_cnt && total < unit; i++) {
+ for (i = 0; i < region_cnt && total < ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@@ -1443,7 +1476,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem,
&hem_list->mid_bt[i][1], list) {
- offset = hem->start / step * BA_BYTE_LEN;
+ offset = (hem->start - r->offset) / step *
+ BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset,
hem->dma_addr);
total++;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index c6e66586e533..5ff028d77be3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -69,7 +69,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
struct hns_roce_wqe_data_seg *dseg = NULL;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_sq_db sq_db;
+ struct hns_roce_sq_db sq_db = {};
int ps_opcode = 0, i = 0;
unsigned long flags = 0;
void *wqe = NULL;
@@ -106,7 +106,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
goto out;
}
- wqe = get_send_wqe(qp, wqe_idx);
+ wqe = hns_roce_get_send_wqe(qp, wqe_idx);
qp->sq.wrid[wqe_idx] = wr->wr_id;
/* Corresponding to the RC and RD type wqe process separately */
@@ -318,8 +318,6 @@ out:
/* Memory barrier */
wmb();
- sq_db.u32_4 = 0;
- sq_db.u32_8 = 0;
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
SQ_DOORBELL_U32_4_SQ_HEAD_S,
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
@@ -351,7 +349,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_rq_db rq_db;
+ struct hns_roce_rq_db rq_db = {};
__le32 doorbell[2] = {0};
unsigned long flags = 0;
unsigned int wqe_idx;
@@ -380,7 +378,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
goto out;
}
- ctrl = get_recv_wqe(hr_qp, wqe_idx);
+ ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
roce_set_field(ctrl->rwqe_byte_12,
RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
@@ -418,9 +416,6 @@ out:
ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
} else {
- rq_db.u32_4 = 0;
- rq_db.u32_8 = 0;
-
roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
RQ_DOORBELL_U32_4_RQ_HEAD_S,
hr_qp->rq.head);
@@ -2289,9 +2284,10 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
if (is_send) {
/* SQ conrespond to CQE */
- sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
+ sq_wqe = hns_roce_get_send_wqe(*cur_qp,
+ roce_get_field(cqe->cqe_byte_4,
CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S)&
+ CQE_BYTE_4_WQE_INDEX_S) &
((*cur_qp)->sq.wqe_cnt-1));
switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
case HNS_ROCE_WQE_OPCODE_SEND:
@@ -3623,26 +3619,11 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (send_cq && send_cq != recv_cq)
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
}
- hns_roce_unlock_cqs(send_cq, recv_cq);
-
hns_roce_qp_remove(hr_dev, hr_qp);
- hns_roce_qp_free(hr_dev, hr_qp);
-
- /* RC QP, release QPN */
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
-
- hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
-
- ib_umem_release(hr_qp->umem);
- if (!udata) {
- kfree(hr_qp->sq.wrid);
- kfree(hr_qp->rq.wrid);
+ hns_roce_unlock_cqs(send_cq, recv_cq);
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- }
+ hns_roce_qp_destroy(hr_dev, hr_qp, udata);
- kfree(hr_qp);
return 0;
}
@@ -3954,10 +3935,8 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
eq->cons_index++;
aeqes_found = 1;
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
- dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
eq->cons_index = 0;
- }
}
set_eq_cons_index_v1(eq, 0);
@@ -4007,11 +3986,8 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
ceqes_found = 1;
if (eq->cons_index >
- EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) {
- dev_warn(&eq->hr_dev->pdev->dev,
- "cons_index overflow, set back to 0.\n");
+ EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
eq->cons_index = 0;
- }
}
set_eq_cons_index_v1(eq, 0);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 12c4cd8e9378..c3316672b70e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -56,11 +56,45 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+/*
+ * mapped-value = 1 + real-value
+ * The hns wr opcode real value is start from 0, In order to distinguish between
+ * initialized and uninitialized map values, we plus 1 to the actual value when
+ * defining the mapping, so that the validity can be identified by checking the
+ * mapped value is greater than 0.
+ */
+#define HR_OPC_MAP(ib_key, hr_key) \
+ [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
+
+static const u32 hns_roce_op_code[] = {
+ HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
+ HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
+ HR_OPC_MAP(SEND, SEND),
+ HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
+ HR_OPC_MAP(RDMA_READ, RDMA_READ),
+ HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
+ HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
+ HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
+ HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
+ HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
+ HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
+ HR_OPC_MAP(REG_MR, FAST_REG_PMR),
+};
+
+static u32 to_hr_opcode(u32 ib_opcode)
+{
+ if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
+ return HNS_ROCE_V2_WQE_OP_MASK;
+
+ return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
+ HNS_ROCE_V2_WQE_OP_MASK;
+}
+
static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
- struct hns_roce_wqe_frmr_seg *fseg,
- const struct ib_reg_wr *wr)
+ void *wqe, const struct ib_reg_wr *wr)
{
struct hns_roce_mr *mr = to_hr_mr(wr->mr);
+ struct hns_roce_wqe_frmr_seg *fseg = wqe;
/* use ib_access_flags */
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
@@ -92,16 +126,26 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
}
-static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
- const struct ib_atomic_wr *wr)
+static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ int valid_num_sge)
{
- if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
- aseg->cmp_data = cpu_to_le64(wr->compare_add);
+ struct hns_roce_wqe_atomic_seg *aseg;
+
+ set_data_seg_v2(wqe, wr->sg_list);
+ aseg = wqe + sizeof(struct hns_roce_v2_wqe_data_seg);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
+ aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
} else {
- aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
+ aseg->fetchadd_swap_data =
+ cpu_to_le64(atomic_wr(wr)->compare_add);
aseg->cmp_data = 0;
}
+
+ roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
}
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
@@ -127,7 +171,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
* should calculate how many sges in the first page and the second
* page.
*/
- dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
+ dseg = hns_roce_get_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
(uintptr_t)dseg) /
sizeof(struct hns_roce_v2_wqe_data_seg);
@@ -137,7 +181,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
set_data_seg_v2(dseg++, sg + i);
(*sge_ind)++;
}
- dseg = get_send_extend_sge(qp,
+ dseg = hns_roce_get_extend_sge(qp,
(*sge_ind) & (qp->sge.sge_cnt - 1));
for (i = 0; i < se_sge_num; i++) {
set_data_seg_v2(dseg++, sg + fi_sge_num + i);
@@ -154,11 +198,11 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
- int valid_num_sge,
- const struct ib_send_wr **bad_wr)
+ int valid_num_sge)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
int j = 0;
int i;
@@ -166,15 +210,14 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
if (le32_to_cpu(rc_sq_wqe->msg_len) >
hr_dev->caps.max_sq_inline) {
- *bad_wr = wr;
- dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
- rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
+ ibdev_err(ibdev, "inline len(1-%d)=%d, illegal",
+ rc_sq_wqe->msg_len,
+ hr_dev->caps.max_sq_inline);
return -EINVAL;
}
if (wr->opcode == IB_WR_RDMA_READ) {
- *bad_wr = wr;
- dev_err(hr_dev->dev, "Not support inline data!\n");
+ ibdev_err(ibdev, "Not support inline data!\n");
return -EINVAL;
}
@@ -220,62 +263,287 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
return 0;
}
-static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state);
-
static int check_send_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct ib_qp *ibqp = &hr_qp->ibqp;
- struct device *dev = hr_dev->dev;
if (unlikely(ibqp->qp_type != IB_QPT_RC &&
ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) {
- dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
+ ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
+ ibqp->qp_type);
return -EOPNOTSUPP;
} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
hr_qp->state == IB_QPS_INIT ||
hr_qp->state == IB_QPS_RTR)) {
- dev_err(dev, "Post WQE fail, QP state %d!\n", hr_qp->state);
+ ibdev_err(ibdev, "failed to post WQE, QP state %d!\n",
+ hr_qp->state);
return -EINVAL;
} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
- dev_err(dev, "Post WQE fail, dev state %d!\n", hr_dev->state);
+ ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
+ hr_dev->state);
return -EIO;
}
return 0;
}
+static inline int calc_wr_sge_num(const struct ib_send_wr *wr, u32 *sge_len)
+{
+ int valid_num = 0;
+ u32 len = 0;
+ int i;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ len += wr->sg_list[i].length;
+ valid_num++;
+ }
+ }
+
+ *sge_len = len;
+ return valid_num;
+}
+
+static inline int set_ud_wqe(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ void *wqe, unsigned int *sge_idx,
+ unsigned int owner_bit)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
+ unsigned int curr_idx = *sge_idx;
+ int valid_num_sge;
+ u32 msg_len = 0;
+ bool loopback;
+ u8 *smac;
+
+ valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+ memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
+
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
+ V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
+ V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
+ V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
+ V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
+ roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, ah->av.mac[4]);
+ roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]);
+
+ /* MAC loopback */
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
+ loopback = ether_addr_equal_unaligned(ah->av.mac, smac) ? 1 : 0;
+
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
+
+ roce_set_field(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
+
+ ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ud_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
+ break;
+ default:
+ ud_sq_wqe->immtdata = 0;
+ break;
+ }
+
+ /* Set sig attr */
+ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ /* Set se attr */
+ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
+ owner_bit);
+
+ roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
+ V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
+
+ roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
+
+ roce_set_field(ud_sq_wqe->byte_20,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ curr_idx & (qp->sge.sge_cnt - 1));
+
+ roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
+ ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
+ qp->qkey : ud_wr(wr)->remote_qkey);
+ roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
+
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
+ roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
+ roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
+ V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
+ roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port);
+
+ roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
+ ah->av.vlan_en ? 1 : 0);
+ roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index);
+
+ memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2);
+
+ set_extend_sge(qp, wr, &curr_idx, valid_num_sge);
+
+ *sge_idx = curr_idx;
+
+ return 0;
+}
+
+static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ void *wqe, unsigned int *sge_idx,
+ unsigned int owner_bit)
+{
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+ unsigned int curr_idx = *sge_idx;
+ int valid_num_sge;
+ u32 msg_len = 0;
+ int ret = 0;
+
+ valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+ memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+
+ rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
+ break;
+ case IB_WR_SEND_WITH_INV:
+ rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ default:
+ rc_sq_wqe->immtdata = 0;
+ break;
+ }
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
+ owner_bit);
+
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
+ rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ case IB_WR_REG_MR:
+ set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr));
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
+ break;
+ default:
+ break;
+ }
+
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ to_hr_opcode(wr->opcode));
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ set_atomic_seg(wr, wqe, rc_sq_wqe, valid_num_sge);
+ else if (wr->opcode != IB_WR_REG_MR)
+ ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
+ wqe, &curr_idx, valid_num_sge);
+
+ *sge_idx = curr_idx;
+
+ return ret;
+}
+
+static inline void update_sq_db(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *qp)
+{
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
+ if (qp->state == IB_QPS_ERR) {
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+ } else {
+ struct hns_roce_v2_db sq_db = {};
+
+ roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
+ V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
+ roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
+ V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
+ V2_DB_PARAMETER_IDX_S,
+ qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
+ V2_DB_PARAMETER_SL_S, qp->sl);
+
+ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
+ }
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
- struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
- struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
- struct hns_roce_wqe_frmr_seg *fseg;
- struct device *dev = hr_dev->dev;
- struct hns_roce_v2_db sq_db;
- struct ib_qp_attr attr;
+ unsigned long flags = 0;
unsigned int owner_bit;
unsigned int sge_idx;
unsigned int wqe_idx;
- unsigned long flags;
- int valid_num_sge;
void *wqe = NULL;
- bool loopback;
- int attr_mask;
- u32 tmp_len;
- u32 hr_op;
- u8 *smac;
int nreq;
int ret;
- int i;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -298,327 +566,37 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
- dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, qp->sq.max_gs);
+ ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, qp->sq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
- wqe = get_send_wqe(qp, wqe_idx);
+ wqe = hns_roce_get_send_wqe(qp, wqe_idx);
qp->sq.wrid[wqe_idx] = wr->wr_id;
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
- valid_num_sge = 0;
- tmp_len = 0;
-
- for (i = 0; i < wr->num_sge; i++) {
- if (likely(wr->sg_list[i].length)) {
- tmp_len += wr->sg_list[i].length;
- valid_num_sge++;
- }
- }
/* Corresponding to the QP type, wqe process separately */
- if (ibqp->qp_type == IB_QPT_GSI) {
- ud_sq_wqe = wqe;
- memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
-
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
- V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
- V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
- V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
- roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
- V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
- roce_set_field(ud_sq_wqe->byte_48,
- V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
- V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
- ah->av.mac[4]);
- roce_set_field(ud_sq_wqe->byte_48,
- V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
- V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
- ah->av.mac[5]);
-
- /* MAC loopback */
- smac = (u8 *)hr_dev->dev_addr[qp->port];
- loopback = ether_addr_equal_unaligned(ah->av.mac,
- smac) ? 1 : 0;
-
- roce_set_bit(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
-
- roce_set_field(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
- V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
-
- ud_sq_wqe->msg_len =
- cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
-
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ud_sq_wqe->immtdata =
- cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
- break;
- default:
- ud_sq_wqe->immtdata = 0;
- break;
- }
+ if (ibqp->qp_type == IB_QPT_GSI)
+ ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ else if (ibqp->qp_type == IB_QPT_RC)
+ ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
- /* Set sig attr */
- roce_set_bit(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
-
- /* Set se attr */
- roce_set_bit(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
-
- roce_set_bit(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
-
- roce_set_field(ud_sq_wqe->byte_16,
- V2_UD_SEND_WQE_BYTE_16_PD_M,
- V2_UD_SEND_WQE_BYTE_16_PD_S,
- to_hr_pd(ibqp->pd)->pdn);
-
- roce_set_field(ud_sq_wqe->byte_16,
- V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
- valid_num_sge);
-
- roce_set_field(ud_sq_wqe->byte_20,
- V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
- V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_idx & (qp->sge.sge_cnt - 1));
-
- roce_set_field(ud_sq_wqe->byte_24,
- V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
- V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
- ud_sq_wqe->qkey =
- cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
- qp->qkey : ud_wr(wr)->remote_qkey);
- roce_set_field(ud_sq_wqe->byte_32,
- V2_UD_SEND_WQE_BYTE_32_DQPN_M,
- V2_UD_SEND_WQE_BYTE_32_DQPN_S,
- ud_wr(wr)->remote_qpn);
-
- roce_set_field(ud_sq_wqe->byte_36,
- V2_UD_SEND_WQE_BYTE_36_VLAN_M,
- V2_UD_SEND_WQE_BYTE_36_VLAN_S,
- ah->av.vlan_id);
- roce_set_field(ud_sq_wqe->byte_36,
- V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
- V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
- ah->av.hop_limit);
- roce_set_field(ud_sq_wqe->byte_36,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
- V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
- ah->av.tclass);
- roce_set_field(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
- V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
- ah->av.flowlabel);
- roce_set_field(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_SL_M,
- V2_UD_SEND_WQE_BYTE_40_SL_S,
- ah->av.sl);
- roce_set_field(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_PORTN_M,
- V2_UD_SEND_WQE_BYTE_40_PORTN_S,
- qp->port);
-
- roce_set_bit(ud_sq_wqe->byte_40,
- V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
- ah->av.vlan_en ? 1 : 0);
- roce_set_field(ud_sq_wqe->byte_48,
- V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
- V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
- hns_get_gid_index(hr_dev, qp->phy_port,
- ah->av.gid_index));
-
- memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
- GID_LEN_V2);
-
- set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
- } else if (ibqp->qp_type == IB_QPT_RC) {
- rc_sq_wqe = wqe;
- memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
-
- rc_sq_wqe->msg_len =
- cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
-
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- rc_sq_wqe->immtdata =
- cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
- break;
- case IB_WR_SEND_WITH_INV:
- rc_sq_wqe->inv_key =
- cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- default:
- rc_sq_wqe->immtdata = 0;
- break;
- }
-
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_FENCE_S,
- (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
-
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
-
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
-
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
-
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
- rc_sq_wqe->rkey =
- cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE:
- hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
- rc_sq_wqe->rkey =
- cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
- rc_sq_wqe->rkey =
- cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_SEND:
- hr_op = HNS_ROCE_V2_WQE_OP_SEND;
- break;
- case IB_WR_SEND_WITH_INV:
- hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
- break;
- case IB_WR_SEND_WITH_IMM:
- hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
- break;
- case IB_WR_LOCAL_INV:
- hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
- rc_sq_wqe->inv_key =
- cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- case IB_WR_REG_MR:
- hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
- fseg = wqe;
- set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
- rc_sq_wqe->rkey =
- cpu_to_le32(atomic_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(atomic_wr(wr)->remote_addr);
- break;
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
- rc_sq_wqe->rkey =
- cpu_to_le32(atomic_wr(wr)->rkey);
- rc_sq_wqe->va =
- cpu_to_le64(atomic_wr(wr)->remote_addr);
- break;
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- hr_op =
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
- break;
- case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- hr_op =
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
- break;
- default:
- hr_op = HNS_ROCE_V2_WQE_OP_MASK;
- break;
- }
-
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
-
- if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
- struct hns_roce_v2_wqe_data_seg *dseg;
-
- dseg = wqe;
- set_data_seg_v2(dseg, wr->sg_list);
- wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
- set_atomic_seg(wqe, atomic_wr(wr));
- roce_set_field(rc_sq_wqe->byte_16,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
- valid_num_sge);
- } else if (wr->opcode != IB_WR_REG_MR) {
- ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
- wqe, &sge_idx,
- valid_num_sge, bad_wr);
- if (ret)
- goto out;
- }
- } else {
- dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
- spin_unlock_irqrestore(&qp->sq.lock, flags);
+ if (ret) {
*bad_wr = wr;
- return -EOPNOTSUPP;
+ goto out;
}
}
out:
if (likely(nreq)) {
qp->sq.head += nreq;
+ qp->next_sge = sge_idx;
/* Memory barrier */
wmb();
-
- sq_db.byte_4 = 0;
- sq_db.parameter = 0;
-
- roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
- V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
- roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
- V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
- V2_DB_PARAMETER_IDX_S,
- qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
- V2_DB_PARAMETER_SL_S, qp->sl);
-
- hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
-
- qp->next_sge = sge_idx;
-
- if (qp->state == IB_QPS_ERR) {
- attr_mask = IB_QP_STATE;
- attr.qp_state = IB_QPS_ERR;
-
- ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
- qp->state, IB_QPS_ERR);
- if (ret) {
- spin_unlock_irqrestore(&qp->sq.lock, flags);
- *bad_wr = wr;
- return ret;
- }
- }
+ update_sq_db(hr_dev, qp);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -643,13 +621,11 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list;
- struct device *dev = hr_dev->dev;
- struct ib_qp_attr attr;
unsigned long flags;
void *wqe = NULL;
- int attr_mask;
u32 wqe_idx;
int nreq;
int ret;
@@ -675,14 +651,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
- dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, hr_qp->rq.max_gs);
+ ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
+ wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
- wqe = get_recv_wqe(hr_qp, wqe_idx);
+ wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
for (i = 0; i < wr->num_sge; i++) {
if (!wr->sg_list[i].length)
@@ -717,20 +693,21 @@ out:
/* Memory barrier */
wmb();
- *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
-
+ /*
+ * Hip08 hardware cannot flush the WQEs in RQ if the QP state
+ * gets into errored mode. Hence, as a workaround to this
+ * hardware limitation, driver needs to assist in flushing. But
+ * the flushing operation uses mailbox to convey the QP state to
+ * the hardware and which can sleep due to the mutex protection
+ * around the mailbox calls. Hence, use the deferred flush for
+ * now.
+ */
if (hr_qp->state == IB_QPS_ERR) {
- attr_mask = IB_QP_STATE;
- attr.qp_state = IB_QPS_ERR;
-
- ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
- attr_mask, hr_qp->state,
- IB_QPS_ERR);
- if (ret) {
- spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
- *bad_wr = wr;
- return ret;
- }
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
+ &hr_qp->flush_flag))
+ init_flush_work(hr_dev, hr_qp);
+ } else {
+ *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
}
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -1448,82 +1425,63 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
-
- if (i == 0) {
- roce_set_field(req_a->vf_qpc_bt_idx_num,
- VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
- VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_qpc_bt_idx_num,
- VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
- VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
- HNS_ROCE_VF_QPC_BT_NUM);
-
- roce_set_field(req_a->vf_srqc_bt_idx_num,
- VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
- VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_srqc_bt_idx_num,
- VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
- VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
- HNS_ROCE_VF_SRQC_BT_NUM);
-
- roce_set_field(req_a->vf_cqc_bt_idx_num,
- VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
- VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
- roce_set_field(req_a->vf_cqc_bt_idx_num,
- VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
- VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
- HNS_ROCE_VF_CQC_BT_NUM);
-
- roce_set_field(req_a->vf_mpt_bt_idx_num,
- VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
- VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
- roce_set_field(req_a->vf_mpt_bt_idx_num,
- VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
- VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
- HNS_ROCE_VF_MPT_BT_NUM);
-
- roce_set_field(req_a->vf_eqc_bt_idx_num,
- VF_RES_A_DATA_5_VF_EQC_IDX_M,
- VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
- roce_set_field(req_a->vf_eqc_bt_idx_num,
- VF_RES_A_DATA_5_VF_EQC_NUM_M,
- VF_RES_A_DATA_5_VF_EQC_NUM_S,
- HNS_ROCE_VF_EQC_NUM);
- } else {
- roce_set_field(req_b->vf_smac_idx_num,
- VF_RES_B_DATA_1_VF_SMAC_IDX_M,
- VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
- roce_set_field(req_b->vf_smac_idx_num,
- VF_RES_B_DATA_1_VF_SMAC_NUM_M,
- VF_RES_B_DATA_1_VF_SMAC_NUM_S,
- HNS_ROCE_VF_SMAC_NUM);
-
- roce_set_field(req_b->vf_sgid_idx_num,
- VF_RES_B_DATA_2_VF_SGID_IDX_M,
- VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
- roce_set_field(req_b->vf_sgid_idx_num,
- VF_RES_B_DATA_2_VF_SGID_NUM_M,
- VF_RES_B_DATA_2_VF_SGID_NUM_S,
- HNS_ROCE_VF_SGID_NUM);
-
- roce_set_field(req_b->vf_qid_idx_sl_num,
- VF_RES_B_DATA_3_VF_QID_IDX_M,
- VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
- roce_set_field(req_b->vf_qid_idx_sl_num,
- VF_RES_B_DATA_3_VF_SL_NUM_M,
- VF_RES_B_DATA_3_VF_SL_NUM_S,
- HNS_ROCE_VF_SL_NUM);
-
- roce_set_field(req_b->vf_sccc_idx_num,
- VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
- VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
- roce_set_field(req_b->vf_sccc_idx_num,
- VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
- VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
- HNS_ROCE_VF_SCCC_BT_NUM);
- }
}
+ roce_set_field(req_a->vf_qpc_bt_idx_num,
+ VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
+ VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_qpc_bt_idx_num,
+ VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
+ VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
+
+ roce_set_field(req_a->vf_srqc_bt_idx_num,
+ VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
+ VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_srqc_bt_idx_num,
+ VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
+ VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
+ HNS_ROCE_VF_SRQC_BT_NUM);
+
+ roce_set_field(req_a->vf_cqc_bt_idx_num,
+ VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
+ VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_cqc_bt_idx_num,
+ VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
+ VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
+
+ roce_set_field(req_a->vf_mpt_bt_idx_num,
+ VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
+ VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_mpt_bt_idx_num,
+ VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
+ VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
+
+ roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
+ VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
+ roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
+ VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
+
+ roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
+ VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
+ roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
+ VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
+
+ roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
+ VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
+ roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
+ VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
+
+ roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
+ VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
+ roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
+ VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
+
+ roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
+ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
+ roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
+ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
+ HNS_ROCE_VF_SCCC_BT_NUM);
+
return hns_roce_cmq_send(hr_dev, desc, 2);
}
@@ -1691,7 +1649,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
@@ -1939,7 +1897,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
&caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
caps->sccc_hop_num = ctx_hop_num;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
@@ -1999,7 +1957,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
- if (hr_dev->pci_dev->revision == 0x21) {
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
ret = hns_roce_query_pf_timer_resource(hr_dev);
if (ret) {
dev_err(hr_dev->dev,
@@ -2007,16 +1965,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret);
return ret;
}
- }
-
- ret = hns_roce_alloc_vf_resource(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
- ret);
- return ret;
- }
- if (hr_dev->pci_dev->revision == 0x21) {
ret = hns_roce_set_vf_switch_param(hr_dev, 0);
if (ret) {
dev_err(hr_dev->dev,
@@ -2046,6 +1995,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
if (ret)
set_default_caps(hr_dev);
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
@@ -2298,7 +2254,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
- if (hr_dev->pci_dev->revision == 0x21)
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B)
hns_roce_function_clear(hr_dev);
hns_roce_free_link_table(hr_dev, &priv->tpq);
@@ -2461,7 +2417,9 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
if (ret)
- dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to configure sgid table, ret = %d!\n",
+ ret);
return ret;
}
@@ -2757,7 +2715,7 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
- *hr_cq->set_ci_db = cons_index & 0xffffff;
+ *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -2942,7 +2900,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
- wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
+ wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
data_len = wc->byte_len;
for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
@@ -3013,13 +2971,11 @@ out:
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
struct hns_roce_srq *srq = NULL;
- struct hns_roce_dev *hr_dev;
struct hns_roce_v2_cqe *cqe;
struct hns_roce_qp *hr_qp;
struct hns_roce_wq *wq;
- struct ib_qp_attr attr;
- int attr_mask;
int is_send;
u16 wqe_ctr;
u32 opcode;
@@ -3043,16 +2999,17 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
V2_CQE_BYTE_16_LCL_QPN_S);
if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
- hr_dev = to_hr_dev(hr_cq->ib_cq.device);
hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (unlikely(!hr_qp)) {
- dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
- hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
+ ibdev_err(&hr_dev->ib_dev,
+ "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
return -EINVAL;
}
*cur_qp = hr_qp;
}
+ hr_qp = *cur_qp;
wc->qp = &(*cur_qp)->ibqp;
wc->vendor_err = 0;
@@ -3137,14 +3094,24 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
break;
}
- /* flush cqe if wc status is error, excluding flush error */
- if ((wc->status != IB_WC_SUCCESS) &&
- (wc->status != IB_WC_WR_FLUSH_ERR)) {
- attr_mask = IB_QP_STATE;
- attr.qp_state = IB_QPS_ERR;
- return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
- &attr, attr_mask,
- (*cur_qp)->state, IB_QPS_ERR);
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
+ * into errored mode. Hence, as a workaround to this hardware
+ * limitation, driver needs to assist in flushing. But the flushing
+ * operation uses mailbox to convey the QP state to the hardware and
+ * which can sleep due to the mutex protection around the mailbox calls.
+ * Hence, use the deferred flush for now. Once wc error detected, the
+ * flushing operation is needed.
+ */
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR) {
+ ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n",
+ status & HNS_ROCE_V2_CQE_STATUS_MASK);
+
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
+ init_flush_work(hr_dev, hr_qp);
+
+ return 0;
}
if (wc->status == IB_WC_WR_FLUSH_ERR)
@@ -3262,14 +3229,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->port_num = roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
wc->pkey_index = 0;
- memcpy(wc->smac, cqe->smac, 4);
- wc->smac[4] = roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_SMAC_4_M,
- V2_CQE_BYTE_28_SMAC_4_S);
- wc->smac[5] = roce_get_field(cqe->byte_28,
- V2_CQE_BYTE_28_SMAC_5_M,
- V2_CQE_BYTE_28_SMAC_5_S);
- wc->wc_flags |= IB_WC_WITH_SMAC;
+
if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_VID_M,
@@ -3567,14 +3527,9 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
-
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
@@ -3582,9 +3537,6 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
hr_qp->ibqp.srq) ? 0 :
ilog2((unsigned int)hr_qp->rq.wqe_cnt));
-
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
- V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
@@ -3604,280 +3556,53 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
*/
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
- V2_QPC_BYTE_4_TST_S, 0);
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
- roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
- V2_QPC_BYTE_4_SQPN_S, 0);
roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
- roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
- V2_QPC_BYTE_16_PD_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
- roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
- V2_QPC_BYTE_20_RQWS_S, 0);
set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
/* No VLAN need to set 0xFFF */
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, 0);
-
- /*
- * Set some fields in context to zero, Because the default values
- * of all fields in context are zero, we need not set them to 0 again.
- * but we should set the relevant fields of context mask to 0.
- */
- roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
- roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
- roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
- roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
- roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
- V2_QPC_BYTE_60_TEMPID_S, 0);
-
- roce_set_field(qpc_mask->byte_60_qpst_tempid,
- V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_tempid,
- V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
- roce_set_bit(qpc_mask->byte_60_qpst_tempid,
- V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
- roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
- roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
-
- if (hr_qp->rdb_en) {
+ if (hr_qp->rdb_en)
roce_set_bit(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
- roce_set_bit(qpc_mask->byte_68_rq_db,
- V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
- }
roce_set_field(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
((u32)hr_qp->rdb.dma) >> 1);
- roce_set_field(qpc_mask->byte_68_rq_db,
- V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
- V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
- qpc_mask->rq_db_record_addr = 0;
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
- roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
- V2_QPC_BYTE_80_RX_CQN_S, 0);
if (ibqp->srq) {
roce_set_field(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
to_hr_srq(ibqp->srq)->srqn);
- roce_set_field(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_SRQ_EN_S, 1);
- roce_set_bit(qpc_mask->byte_76_srqn_op_en,
- V2_QPC_BYTE_76_SRQ_EN_S, 0);
}
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
-
- roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
- V2_QPC_BYTE_92_SRQ_INFO_S, 0);
-
- roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
- V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
-
- roce_set_field(qpc_mask->byte_104_rq_sge,
- V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
- V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
-
- roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
- roce_set_field(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
- V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
- roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
- V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
-
- qpc_mask->rq_rnr_timer = 0;
- qpc_mask->rx_msg_len = 0;
- qpc_mask->rx_rkey_pkt_info = 0;
- qpc_mask->rx_va = 0;
-
- roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
- V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
- roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
- V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
-
- roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
- 0);
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
- V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
- V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
-
- roce_set_field(qpc_mask->byte_144_raq,
- V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
- V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
- roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
- V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
- roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
-
- roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
- V2_QPC_BYTE_148_RQ_MSN_S, 0);
- roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
- V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
-
- roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
- V2_QPC_BYTE_152_RAQ_PSN_S, 0);
- roce_set_field(qpc_mask->byte_152_raq,
- V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
- V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
-
- roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
- V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
-
- roce_set_field(qpc_mask->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
- roce_set_field(qpc_mask->byte_160_sq_ci_pi,
- V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
-
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
- roce_set_bit(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
- V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
-
roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
- roce_set_field(qpc_mask->byte_172_sq_psn,
- V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
-
- roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
- 0);
roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
- roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
-
- roce_set_field(qpc_mask->byte_176_msg_pktn,
- V2_QPC_BYTE_176_MSG_USE_PKTN_M,
- V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
- roce_set_field(qpc_mask->byte_176_msg_pktn,
- V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
- V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
-
- roce_set_field(qpc_mask->byte_184_irrl_idx,
- V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
- V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
-
- qpc_mask->cur_sge_offset = 0;
-
- roce_set_field(qpc_mask->byte_192_ext_sge,
- V2_QPC_BYTE_192_CUR_SGE_IDX_M,
- V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
- roce_set_field(qpc_mask->byte_192_ext_sge,
- V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
- V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
-
- roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
- V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
-
- roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
- V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
- roce_set_field(qpc_mask->byte_200_sq_max,
- V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
- V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
-
- roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
- roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
-
- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
- V2_QPC_BYTE_212_CHECK_FLG_S, 0);
-
- qpc_mask->sq_timer = 0;
-
- roce_set_field(qpc_mask->byte_220_retry_psn_msn,
- V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
- V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
- roce_set_field(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
- V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
-
- roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
- 0);
- roce_set_bit(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
- roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
- 0);
-
- qpc_mask->irrl_cur_sge_offset = 0;
-
- roce_set_field(qpc_mask->byte_240_irrl_tail,
- V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
- V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
- roce_set_field(qpc_mask->byte_240_irrl_tail,
- V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
- V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
- roce_set_field(qpc_mask->byte_240_irrl_tail,
- V2_QPC_BYTE_240_RX_ACK_MSN_M,
- V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
-
- roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
- V2_QPC_BYTE_248_IRRL_PSN_S, 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
- 0);
- roce_set_field(qpc_mask->byte_248_ack_psn,
- V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
- V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
- 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn,
- V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
- roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
- 0);
hr_qp->access_flags = attr->qp_access_flags;
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
- roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, 0);
-
- roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
- V2_QPC_BYTE_252_ERR_TYPE_S, 0);
-
- roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
- V2_QPC_BYTE_256_RQ_CQE_IDX_M,
- V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
- roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
- V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
- V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
@@ -3987,21 +3712,22 @@ static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, int mtt_cnt,
u32 page_size)
{
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
if (hr_qp->rq.wqe_cnt < 1)
return true;
if (mtt_cnt < 1) {
- dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
- hr_qp->qpn);
+ ibdev_err(ibdev, "failed to find RQWQE buf ba of QP(0x%lx)\n",
+ hr_qp->qpn);
return false;
}
if (mtt_cnt < MTT_MIN_COUNT &&
(hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
- dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
- hr_qp->qpn);
+ ibdev_err(ibdev,
+ "failed to find next RQWQE buf ba of QP(0x%lx)\n",
+ hr_qp->qpn);
return false;
}
@@ -4016,7 +3742,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle_3;
dma_addr_t dma_handle_2;
@@ -4043,7 +3769,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
hr_qp->qpn, &dma_handle_2);
if (!mtts_2) {
- dev_err(dev, "qp irrl_table find failed\n");
+ ibdev_err(ibdev, "failed to find QP irrl_table\n");
return -EINVAL;
}
@@ -4051,12 +3777,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
hr_qp->qpn, &dma_handle_3);
if (!mtts_3) {
- dev_err(dev, "qp trrl_table find failed\n");
+ ibdev_err(ibdev, "failed to find QP trrl_table\n");
return -EINVAL;
}
if (attr_mask & IB_QP_ALT_PATH) {
- dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
+ ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error\n",
+ attr_mask);
return -EINVAL;
}
@@ -4201,7 +3928,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
- V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
+ V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
@@ -4259,7 +3986,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
u64 sge_cur_blk = 0;
u64 sq_cur_blk = 0;
u32 page_size;
@@ -4268,7 +3995,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
/* Search qp buf's mtts */
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
if (count < 1) {
- dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
+ ibdev_err(ibdev, "failed to find buf pa of QP(0x%lx)\n",
+ hr_qp->qpn);
return -EINVAL;
}
@@ -4278,16 +4006,15 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
hr_qp->sge.offset / page_size,
&sge_cur_blk, 1, NULL);
if (count < 1) {
- dev_err(dev, "qp(0x%lx) sge pa find failed\n",
- hr_qp->qpn);
+ ibdev_err(ibdev, "failed to find sge pa of QP(0x%lx)\n",
+ hr_qp->qpn);
return -EINVAL;
}
}
/* Not support alternate path and path migration */
- if ((attr_mask & IB_QP_ALT_PATH) ||
- (attr_mask & IB_QP_PATH_MIG_STATE)) {
- dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
+ if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
+ ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
return -EINVAL;
}
@@ -4405,6 +4132,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
const struct ib_gid_attr *gid_attr = NULL;
int is_roce_protocol;
u16 vlan_id = 0xffff;
@@ -4446,13 +4174,13 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
- dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
- grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
+ ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
+ grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
return -EINVAL;
}
if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
- dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
+ ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
return -EINVAL;
}
@@ -4475,7 +4203,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
- if (hr_dev->pci_dev->revision == 0x21 && is_udp)
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B && is_udp)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
else
@@ -4530,7 +4258,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
/* Nothing */
;
} else {
- dev_err(hr_dev->dev, "Illegal state for QP!\n");
+ ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
ret = -EINVAL;
goto out;
}
@@ -4565,8 +4293,8 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
0);
} else {
- dev_warn(hr_dev->dev,
- "Local ACK timeout shall be 0 to 30.\n");
+ ibdev_warn(&hr_dev->ib_dev,
+ "Local ACK timeout shall be 0 to 30.\n");
}
}
@@ -4734,7 +4462,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context ctx[2];
struct hns_roce_v2_qp_context *context = ctx;
struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long sq_flag = 0;
+ unsigned long rq_flag = 0;
int ret;
/*
@@ -4752,6 +4482,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* When QP state is err, SQ and RQ WQE should be flushed */
if (new_state == IB_QPS_ERR) {
+ spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ hr_qp->state = IB_QPS_ERR;
roce_set_field(context->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
@@ -4759,8 +4491,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+ spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
if (!ibqp->srq) {
+ spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
@@ -4768,6 +4502,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+ spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
}
}
@@ -4791,7 +4526,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* SW pass context to HW */
ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
if (ret) {
- dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
+ ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
goto out;
}
@@ -4848,10 +4583,8 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
- if (ret) {
- dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
+ if (ret)
goto out;
- }
memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
@@ -4867,7 +4600,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_qp_context context = {};
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
int tmp_qp_state;
int state;
int ret;
@@ -4885,7 +4618,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
if (ret) {
- dev_err(dev, "query qpc error\n");
+ ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret);
ret = -EINVAL;
goto out;
}
@@ -4894,7 +4627,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) {
- dev_err(dev, "Illegal ib_qp_state\n");
+ ibdev_err(ibdev, "Illegal ib_qp_state\n");
ret = -EINVAL;
goto out;
}
@@ -4992,8 +4725,8 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_udata *udata)
{
- struct hns_roce_cq *send_cq, *recv_cq;
struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_cq *send_cq, *recv_cq;
unsigned long flags;
int ret = 0;
@@ -5002,7 +4735,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
if (ret)
- ibdev_err(ibdev, "modify QP to Reset failed.\n");
+ ibdev_err(ibdev,
+ "failed to modify QP to RST, ret = %d\n",
+ ret);
}
send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
@@ -5011,10 +4746,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
hns_roce_lock_cqs(send_cq, recv_cq);
- list_del(&hr_qp->node);
- list_del(&hr_qp->sq_node);
- list_del(&hr_qp->rq_node);
-
if (!udata) {
if (recv_cq)
__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
@@ -5032,43 +4763,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
- hns_roce_qp_free(hr_dev, hr_qp);
-
- /* Not special_QP, free their QPN */
- if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
- (hr_qp->ibqp.qp_type == IB_QPT_UD))
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
-
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
-
- if (udata) {
- struct hns_roce_ucontext *context =
- rdma_udata_to_drv_context(
- udata,
- struct hns_roce_ucontext,
- ibucontext);
-
- if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
- hns_roce_db_unmap_user(context, &hr_qp->sdb);
-
- if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
- hns_roce_db_unmap_user(context, &hr_qp->rdb);
- } else {
- kfree(hr_qp->sq.wrid);
- kfree(hr_qp->rq.wrid);
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- if (hr_qp->rq.wqe_cnt)
- hns_roce_free_db(hr_dev, &hr_qp->rdb);
- }
- ib_umem_release(hr_qp->umem);
-
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hr_qp->rq.wqe_cnt) {
- kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
- kfree(hr_qp->rq_inl_buf.wqe_list);
- }
-
return ret;
}
@@ -5080,17 +4774,19 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
if (ret)
- ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to destroy QP 0x%06lx, ret = %d\n",
hr_qp->qpn, ret);
- kfree(hr_qp);
+ hns_roce_qp_destroy(hr_dev, hr_qp, udata);
return 0;
}
static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp)
+ struct hns_roce_qp *hr_qp)
{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_sccc_clr_done *resp;
struct hns_roce_sccc_clr *clr;
struct hns_roce_cmq_desc desc;
@@ -5102,7 +4798,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
- dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
+ ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret);
goto out;
}
@@ -5112,7 +4808,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
clr->qpn = cpu_to_le32(hr_qp->qpn);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
- dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
+ ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret);
goto out;
}
@@ -5123,7 +4819,8 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
HNS_ROCE_OPC_QUERY_SCCC, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
- dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
+ ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
+ ret);
goto out;
}
@@ -5133,7 +4830,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
msleep(20);
}
- dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
+ ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
ret = -ETIMEDOUT;
out:
@@ -5177,99 +4874,65 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
- dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when modifying CQ, ret = %d\n",
+ ret);
return ret;
}
-static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
-{
- struct hns_roce_qp *hr_qp;
- struct ib_qp_attr attr;
- int attr_mask;
- int ret;
-
- hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
- if (!hr_qp) {
- dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
- return;
- }
-
- if (hr_qp->ibqp.uobject) {
- if (hr_qp->sdb_en == 1) {
- hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
- if (hr_qp->rdb_en == 1)
- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
- } else {
- dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
- return;
- }
- }
-
- attr_mask = IB_QP_STATE;
- attr.qp_state = IB_QPS_ERR;
- ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
- hr_qp->state, IB_QPS_ERR);
- if (ret)
- dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
- qpn);
-}
-
static void hns_roce_irq_work_handle(struct work_struct *work)
{
struct hns_roce_work *irq_work =
container_of(work, struct hns_roce_work, work);
- struct device *dev = irq_work->hr_dev->dev;
+ struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
u32 qpn = irq_work->qpn;
u32 cqn = irq_work->cqn;
switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_info(dev, "Path migrated succeeded.\n");
+ ibdev_info(ibdev, "Path migrated succeeded.\n");
break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "Path migration failed.\n");
+ ibdev_warn(ibdev, "Path migration failed.\n");
break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "Send queue drained.\n");
+ ibdev_warn(ibdev, "Send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
- qpn, irq_work->sub_type);
- hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
+ qpn, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_err(dev, "Invalid request local work queue 0x%x error.\n",
- qpn);
- hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
+ qpn);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
- qpn, irq_work->sub_type);
- hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
+ qpn, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- dev_warn(dev, "SRQ limit reach.\n");
+ ibdev_warn(ibdev, "SRQ limit reach.\n");
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ last wqe reach.\n");
+ ibdev_warn(ibdev, "SRQ last wqe reach.\n");
break;
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- dev_err(dev, "SRQ catas error.\n");
+ ibdev_err(ibdev, "SRQ catas error.\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_err(dev, "CQ 0x%x access err.\n", cqn);
+ ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- dev_warn(dev, "DB overflow.\n");
+ ibdev_warn(ibdev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
- dev_warn(dev, "Function level reset.\n");
+ ibdev_warn(ibdev, "Function level reset.\n");
break;
default:
break;
@@ -5326,44 +4989,24 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
hns_roce_write64(hr_dev, doorbell, eq->doorbell);
}
-static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+static inline void *get_eqe_buf(struct hns_roce_eq *eq, unsigned long offset)
{
u32 buf_chk_sz;
- unsigned long off;
buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
- off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
-
- return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
- off % buf_chk_sz);
-}
-
-static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
-{
- u32 buf_chk_sz;
- unsigned long off;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
-
- off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
-
- if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
- return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
- off % buf_chk_sz);
+ if (eq->buf.nbufs == 1)
+ return eq->buf.direct.buf + offset % buf_chk_sz;
else
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
+ return eq->buf.page_list[offset / buf_chk_sz].buf +
+ offset % buf_chk_sz;
}
static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_aeqe *aeqe;
- if (!eq->hop_num)
- aeqe = get_aeqe_v2(eq, eq->cons_index);
- else
- aeqe = mhop_get_aeqe(eq, eq->cons_index);
-
+ aeqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE);
return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
}
@@ -5456,44 +5099,12 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
return aeqe_found;
}
-static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
-{
- u32 buf_chk_sz;
- unsigned long off;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
- off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
-
- return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
- off % buf_chk_sz);
-}
-
-static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
-{
- u32 buf_chk_sz;
- unsigned long off;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
-
- off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
-
- if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
- return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
- off % buf_chk_sz);
- else
- return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
- buf_chk_sz]) + off % buf_chk_sz);
-}
-
static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_ceqe *ceqe;
- if (!eq->hop_num)
- ceqe = get_ceqe_v2(eq, eq->cons_index);
- else
- ceqe = mhop_get_ceqe(eq, eq->cons_index);
-
+ ceqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE);
return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
@@ -5501,7 +5112,6 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
struct hns_roce_eq *eq)
{
- struct device *dev = hr_dev->dev;
struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
int ceqe_found = 0;
u32 cqn;
@@ -5520,10 +5130,8 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
ceqe_found = 1;
- if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) {
- dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1))
eq->cons_index = 0;
- }
ceqe = next_ceqe_sw_v2(eq);
}
@@ -5653,90 +5261,11 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
}
-static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = hr_dev->dev;
- u64 idx;
- u64 size;
- u32 buf_chk_sz;
- u32 bt_chk_sz;
- u32 mhop_num;
- int eqe_alloc;
- int i = 0;
- int j = 0;
-
- mhop_num = hr_dev->caps.eqe_hop_num;
- buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
- bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0) {
- dma_free_coherent(dev, (unsigned int)(eq->entries *
- eq->eqe_size), eq->bt_l0, eq->l0_dma);
- return;
- }
-
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
- if (mhop_num == 1) {
- for (i = 0; i < eq->l0_last_num; i++) {
- if (i == eq->l0_last_num - 1) {
- eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
- size = (eq->entries - eqe_alloc) * eq->eqe_size;
- dma_free_coherent(dev, size, eq->buf[i],
- eq->buf_dma[i]);
- break;
- }
- dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
- eq->buf_dma[i]);
- }
- } else if (mhop_num == 2) {
- for (i = 0; i < eq->l0_last_num; i++) {
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
- eq->l1_dma[i]);
-
- for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
- idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
- if ((i == eq->l0_last_num - 1)
- && j == eq->l1_last_num - 1) {
- eqe_alloc = (buf_chk_sz / eq->eqe_size)
- * idx;
- size = (eq->entries - eqe_alloc)
- * eq->eqe_size;
- dma_free_coherent(dev, size,
- eq->buf[idx],
- eq->buf_dma[idx]);
- break;
- }
- dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
- eq->buf_dma[idx]);
- }
- }
- }
- kfree(eq->buf_dma);
- kfree(eq->buf);
- kfree(eq->l1_dma);
- kfree(eq->bt_l1);
- eq->buf_dma = NULL;
- eq->buf = NULL;
- eq->l1_dma = NULL;
- eq->bt_l1 = NULL;
-}
-
-static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
- u32 buf_chk_sz;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
-
- if (hr_dev->caps.eqe_hop_num) {
- hns_roce_mhop_free_eq(hr_dev, eq);
- return;
- }
-
- dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
- eq->buf_list->map);
- kfree(eq->buf_list);
+ if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
+ hns_roce_buf_free(hr_dev, eq->buf.size, &eq->buf);
}
static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
@@ -5744,6 +5273,8 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
void *mb_buf)
{
struct hns_roce_eq_context *eqc;
+ u64 ba[MTT_MIN_COUNT] = { 0 };
+ int count;
eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context));
@@ -5759,10 +5290,23 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
eq->shift = ilog2((unsigned int)eq->entries);
- if (!eq->hop_num)
- eq->eqe_ba = eq->buf_list->map;
- else
- eq->eqe_ba = eq->l0_dma;
+ /* if not muti-hop, eqe buffer only use one trunk */
+ if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) {
+ eq->eqe_ba = eq->buf.direct.map;
+ eq->cur_eqe_ba = eq->eqe_ba;
+ if (eq->buf.npages > 1)
+ eq->nxt_eqe_ba = eq->eqe_ba + (1 << eq->eqe_buf_pg_sz);
+ else
+ eq->nxt_eqe_ba = eq->eqe_ba;
+ } else {
+ count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, ba,
+ MTT_MIN_COUNT, &eq->eqe_ba);
+ eq->cur_eqe_ba = ba[0];
+ if (count > 1)
+ eq->nxt_eqe_ba = ba[1];
+ else
+ eq->nxt_eqe_ba = ba[0];
+ }
/* set eqc state */
roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
@@ -5860,220 +5404,97 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
}
-static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
+static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
+ u32 page_shift)
{
- struct device *dev = hr_dev->dev;
- int eq_alloc_done = 0;
- int eq_buf_cnt = 0;
- int eqe_alloc;
- u32 buf_chk_sz;
- u32 bt_chk_sz;
- u32 mhop_num;
- u64 size;
- u64 idx;
+ struct hns_roce_buf_region region = {};
+ dma_addr_t *buf_list = NULL;
int ba_num;
- int bt_num;
- int record_i;
- int record_j;
- int i = 0;
- int j = 0;
-
- mhop_num = hr_dev->caps.eqe_hop_num;
- buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
- bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+ int ret;
ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
- buf_chk_sz);
- bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0) {
- if (eq->entries > buf_chk_sz / eq->eqe_size) {
- dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
- eq->entries);
- return -EINVAL;
- }
- eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
- &(eq->l0_dma), GFP_KERNEL);
- if (!eq->bt_l0)
- return -ENOMEM;
+ 1 << page_shift);
+ hns_roce_init_buf_region(&region, hr_dev->caps.eqe_hop_num, 0, ba_num);
- eq->cur_eqe_ba = eq->l0_dma;
- eq->nxt_eqe_ba = 0;
+ /* alloc a tmp list for storing eq buf address */
+ ret = hns_roce_alloc_buf_list(&region, &buf_list, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "alloc eq buf_list error\n");
+ return ret;
+ }
- return 0;
+ ba_num = hns_roce_get_kmem_bufs(hr_dev, buf_list, region.count,
+ region.offset, &eq->buf);
+ if (ba_num != region.count) {
+ dev_err(hr_dev->dev, "get eqe buf err,expect %d,ret %d.\n",
+ region.count, ba_num);
+ ret = -ENOBUFS;
+ goto done;
}
- eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
- if (!eq->buf_dma)
- return -ENOMEM;
- eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
- if (!eq->buf)
- goto err_kcalloc_buf;
-
- if (mhop_num == 2) {
- eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
- if (!eq->l1_dma)
- goto err_kcalloc_l1_dma;
-
- eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
- if (!eq->bt_l1)
- goto err_kcalloc_bt_l1;
- }
-
- /* alloc L0 BT */
- eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
- if (!eq->bt_l0)
- goto err_dma_alloc_l0;
-
- if (mhop_num == 1) {
- if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
- dev_err(dev, "ba_num %d is too large for 1 hop\n",
- ba_num);
-
- /* alloc buf */
- for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
- if (eq_buf_cnt + 1 < ba_num) {
- size = buf_chk_sz;
- } else {
- eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
- size = (eq->entries - eqe_alloc) * eq->eqe_size;
- }
- eq->buf[i] = dma_alloc_coherent(dev, size,
- &(eq->buf_dma[i]),
- GFP_KERNEL);
- if (!eq->buf[i])
- goto err_dma_alloc_buf;
+ hns_roce_mtr_init(&eq->mtr, PAGE_SHIFT + hr_dev->caps.eqe_ba_pg_sz,
+ page_shift);
+ ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, &region, 1);
+ if (ret)
+ dev_err(hr_dev->dev, "mtr attach error for eqe\n");
- *(eq->bt_l0 + i) = eq->buf_dma[i];
+ goto done;
- eq_buf_cnt++;
- if (eq_buf_cnt >= ba_num)
- break;
- }
- eq->cur_eqe_ba = eq->buf_dma[0];
- if (ba_num > 1)
- eq->nxt_eqe_ba = eq->buf_dma[1];
-
- } else if (mhop_num == 2) {
- /* alloc L1 BT and buf */
- for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
- eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
- &(eq->l1_dma[i]),
- GFP_KERNEL);
- if (!eq->bt_l1[i])
- goto err_dma_alloc_l1;
- *(eq->bt_l0 + i) = eq->l1_dma[i];
-
- for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
- idx = i * bt_chk_sz / BA_BYTE_LEN + j;
- if (eq_buf_cnt + 1 < ba_num) {
- size = buf_chk_sz;
- } else {
- eqe_alloc = (buf_chk_sz / eq->eqe_size)
- * idx;
- size = (eq->entries - eqe_alloc)
- * eq->eqe_size;
- }
- eq->buf[idx] = dma_alloc_coherent(dev, size,
- &(eq->buf_dma[idx]),
- GFP_KERNEL);
- if (!eq->buf[idx])
- goto err_dma_alloc_buf;
-
- *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
-
- eq_buf_cnt++;
- if (eq_buf_cnt >= ba_num) {
- eq_alloc_done = 1;
- break;
- }
- }
+ hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
+done:
+ hns_roce_free_buf_list(&buf_list, 1);
- if (eq_alloc_done)
- break;
- }
- eq->cur_eqe_ba = eq->buf_dma[0];
- if (ba_num > 1)
- eq->nxt_eqe_ba = eq->buf_dma[1];
- }
+ return ret;
+}
- eq->l0_last_num = i + 1;
- if (mhop_num == 2)
- eq->l1_last_num = j + 1;
+static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ struct hns_roce_buf *buf = &eq->buf;
+ bool is_mhop = false;
+ u32 page_shift;
+ u32 mhop_num;
+ u32 max_size;
+ int ret;
- return 0;
+ page_shift = PAGE_SHIFT + hr_dev->caps.eqe_buf_pg_sz;
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ if (!mhop_num) {
+ max_size = 1 << page_shift;
+ buf->size = max_size;
+ } else if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ max_size = eq->entries * eq->eqe_size;
+ buf->size = max_size;
+ } else {
+ max_size = 1 << page_shift;
+ buf->size = PAGE_ALIGN(eq->entries * eq->eqe_size);
+ is_mhop = true;
+ }
-err_dma_alloc_l1:
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
- eq->bt_l0 = NULL;
- eq->l0_dma = 0;
- for (i -= 1; i >= 0; i--) {
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
- eq->l1_dma[i]);
-
- for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
- idx = i * bt_chk_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
- eq->buf_dma[idx]);
- }
+ ret = hns_roce_buf_alloc(hr_dev, buf->size, max_size, buf, page_shift);
+ if (ret) {
+ dev_err(hr_dev->dev, "alloc eq buf error\n");
+ return ret;
}
- goto err_dma_alloc_l0;
-
-err_dma_alloc_buf:
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
- eq->bt_l0 = NULL;
- eq->l0_dma = 0;
-
- if (mhop_num == 1)
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
- eq->buf_dma[i]);
- else if (mhop_num == 2) {
- record_i = i;
- record_j = j;
- for (; i >= 0; i--) {
- dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
- eq->l1_dma[i]);
-
- for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
- if (i == record_i && j >= record_j)
- break;
-
- idx = i * bt_chk_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, buf_chk_sz,
- eq->buf[idx],
- eq->buf_dma[idx]);
- }
+
+ if (is_mhop) {
+ ret = map_eq_buf(hr_dev, eq, page_shift);
+ if (ret) {
+ dev_err(hr_dev->dev, "map roce buf error\n");
+ goto err_alloc;
}
}
-err_dma_alloc_l0:
- kfree(eq->bt_l1);
- eq->bt_l1 = NULL;
-
-err_kcalloc_bt_l1:
- kfree(eq->l1_dma);
- eq->l1_dma = NULL;
-
-err_kcalloc_l1_dma:
- kfree(eq->buf);
- eq->buf = NULL;
-
-err_kcalloc_buf:
- kfree(eq->buf_dma);
- eq->buf_dma = NULL;
-
- return -ENOMEM;
+ return 0;
+err_alloc:
+ hns_roce_buf_free(hr_dev, buf->size, buf);
+ return ret;
}
static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
struct hns_roce_eq *eq,
unsigned int eq_cmd)
{
- struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
- u32 buf_chk_sz = 0;
int ret;
/* Allocate mailbox memory */
@@ -6081,38 +5502,17 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- if (!hr_dev->caps.eqe_hop_num) {
- buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
-
- eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
- GFP_KERNEL);
- if (!eq->buf_list) {
- ret = -ENOMEM;
- goto free_cmd_mbox;
- }
-
- eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
- &(eq->buf_list->map),
- GFP_KERNEL);
- if (!eq->buf_list->buf) {
- ret = -ENOMEM;
- goto err_alloc_buf;
- }
-
- } else {
- ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
- if (ret) {
- ret = -ENOMEM;
- goto free_cmd_mbox;
- }
+ ret = alloc_eq_buf(hr_dev, eq);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
}
-
hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
- dev_err(dev, "[mailbox cmd] create eqc failed.\n");
+ dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
goto err_cmd_mbox;
}
@@ -6121,16 +5521,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
return 0;
err_cmd_mbox:
- if (!hr_dev->caps.eqe_hop_num)
- dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
- eq->buf_list->map);
- else {
- hns_roce_mhop_free_eq(hr_dev, eq);
- goto free_cmd_mbox;
- }
-
-err_alloc_buf:
- kfree(eq->buf_list);
+ free_eq_buf(hr_dev, eq);
free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -6292,8 +5683,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
goto err_request_irq_fail;
}
- hr_dev->irq_workq =
- create_singlethread_workqueue("hns_roce_irq_workqueue");
+ hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
if (!hr_dev->irq_workq) {
dev_err(dev, "Create irq workqueue failed!\n");
ret = -ENOMEM;
@@ -6310,7 +5700,7 @@ err_request_irq_fail:
err_create_eq_fail:
for (i -= 1; i >= 0; i--)
- hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+ free_eq_buf(hr_dev, &eq_table->eq[i]);
kfree(eq_table->eq);
return ret;
@@ -6332,7 +5722,7 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < eq_num; i++) {
hns_roce_v2_destroy_eqc(hr_dev, i);
- hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+ free_eq_buf(hr_dev, &eq_table->eq[i]);
}
kfree(eq_table->eq);
@@ -6472,8 +5862,9 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(hr_dev->dev,
- "MODIFY SRQ Failed to cmd mailbox.\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when modifying SRQ, ret = %d\n",
+ ret);
return ret;
}
}
@@ -6499,7 +5890,9 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
HNS_ROCE_CMD_QUERY_SRQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
- dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying SRQ, ret = %d\n",
+ ret);
goto out;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 2a117ff6a6be..82dd9f6f4845 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -50,15 +50,14 @@
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ 0x100000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
-#define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
+#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
-#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
-#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
-#define HNS_ROCE_V2_MAX_SRQ_SGE_NUM 0x100
+#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
+#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
@@ -163,7 +162,7 @@ enum {
#define GID_LEN_V2 16
-#define HNS_ROCE_V2_CQE_QPN_MASK 0x3ffff
+#define HNS_ROCE_V2_CQE_QPN_MASK 0xfffff
enum {
HNS_ROCE_V2_WQE_OP_SEND = 0x0,
@@ -460,8 +459,8 @@ enum hns_roce_v2_qp_state {
HNS_ROCE_QP_ST_INIT,
HNS_ROCE_QP_ST_RTR,
HNS_ROCE_QP_ST_RTS,
- HNS_ROCE_QP_ST_SQER,
HNS_ROCE_QP_ST_SQD,
+ HNS_ROCE_QP_ST_SQER,
HNS_ROCE_QP_ST_ERR,
HNS_ROCE_QP_ST_SQ_DRAINING,
HNS_ROCE_QP_NUM_ST
@@ -1056,11 +1055,6 @@ struct hns_roce_v2_mpt_entry {
#define V2_DB_PARAMETER_SL_S 16
#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
-struct hns_roce_v2_cq_db {
- __le32 byte_4;
- __le32 parameter;
-};
-
#define V2_CQ_DB_BYTE_4_TAG_S 0
#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index b9898e71655a..176f34692f88 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -243,7 +243,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
/* Allocate MTT entry */
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
mtt->mtt_type);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 780c780fdb22..b10c50b8736e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -60,14 +60,12 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ib_device *ib_dev = ibpd->device;
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd = to_hr_pd(ibpd);
int ret;
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) {
- dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
+ ibdev_err(ib_dev, "failed to alloc pd, ret = %d\n", ret);
return ret;
}
@@ -76,7 +74,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
- dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
+ ibdev_err(ib_dev, "failed to copy to udata\n");
return -EFAULT;
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 3257ad11be48..6317901c4b4f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -43,6 +43,45 @@
#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
+static void flush_work_handle(struct work_struct *work)
+{
+ struct hns_roce_work *flush_work = container_of(work,
+ struct hns_roce_work, work);
+ struct hns_roce_qp *hr_qp = container_of(flush_work,
+ struct hns_roce_qp, flush_work);
+ struct device *dev = flush_work->hr_dev->dev;
+ struct ib_qp_attr attr;
+ int attr_mask;
+ int ret;
+
+ attr_mask = IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+
+ if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
+ ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
+ if (ret)
+ dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
+ ret);
+ }
+
+ /*
+ * make sure we signal QP destroy leg that flush QP was completed
+ * so that it can safely proceed ahead now and destroy QP
+ */
+ if (atomic_dec_and_test(&hr_qp->refcount))
+ complete(&hr_qp->free);
+}
+
+void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_work *flush_work = &hr_qp->flush_work;
+
+ flush_work->hr_dev = hr_dev;
+ INIT_WORK(&flush_work->work, flush_work_handle);
+ atomic_inc(&hr_qp->refcount);
+ queue_work(hr_dev->irq_workq, &flush_work->work);
+}
+
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
struct device *dev = hr_dev->dev;
@@ -59,6 +98,15 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
return;
}
+ if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
+ (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
+ qp->state = IB_QPS_ERR;
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+ }
+
qp->event(qp, (enum hns_roce_event)event_type);
if (atomic_dec_and_test(&qp->refcount))
@@ -108,15 +156,34 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
}
}
-static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
- int align, unsigned long *base)
+static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
- struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ unsigned long num = 0;
+ int ret;
- return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
- base) ?
- -ENOMEM :
- 0;
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ /* when hw version is v1, the sqpn is allocated */
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
+ else
+ num = 1;
+
+ hr_qp->doorbell_qpn = 1;
+ } else {
+ ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap,
+ 1, 1, &num);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n");
+ return -ENOMEM;
+ }
+
+ hr_qp->doorbell_qpn = (u32)num;
+ }
+
+ hr_qp->qpn = num;
+
+ return 0;
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
@@ -139,50 +206,75 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
}
}
-static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
- struct hns_roce_qp *hr_qp)
+static void add_qp_to_list(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_cq *send_cq, struct ib_cq *recv_cq)
+{
+ struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
+ unsigned long flags;
+
+ hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
+ hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
+
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+ hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
+
+ list_add_tail(&hr_qp->node, &hr_dev->qp_list);
+ if (hr_send_cq)
+ list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
+ if (hr_recv_cq)
+ list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
+
+ hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
+}
+
+static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr)
{
struct xarray *xa = &hr_dev->qp_table_xa;
int ret;
- if (!qpn)
+ if (!hr_qp->qpn)
return -EINVAL;
- hr_qp->qpn = qpn;
- atomic_set(&hr_qp->refcount, 1);
- init_completion(&hr_qp->free);
-
- ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1),
- hr_qp, GFP_KERNEL));
+ ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
if (ret)
- dev_err(hr_dev->dev, "QPC xa_store failed\n");
+ dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
+ else
+ /* add QP to device's QP list for softwc */
+ add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
+ init_attr->recv_cq);
return ret;
}
-static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
- struct hns_roce_qp *hr_qp)
+static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
struct device *dev = hr_dev->dev;
int ret;
- if (!qpn)
+ if (!hr_qp->qpn)
return -EINVAL;
- hr_qp->qpn = qpn;
+ /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
+ hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ return 0;
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "QPC table get failed\n");
+ dev_err(dev, "Failed to get QPC table\n");
goto err_out;
}
/* Alloc memory for IRRL */
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "IRRL table get failed\n");
+ dev_err(dev, "Failed to get IRRL table\n");
goto err_put_qp;
}
@@ -191,7 +283,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "TRRL table get failed\n");
+ dev_err(dev, "Failed to get TRRL table\n");
goto err_put_irrl;
}
}
@@ -201,22 +293,13 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "SCC CTX table get failed\n");
+ dev_err(dev, "Failed to get SCC CTX table\n");
goto err_put_trrl;
}
}
- ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
- if (ret)
- goto err_put_sccc;
-
return 0;
-err_put_sccc:
- if (hr_dev->caps.sccc_entry_sz)
- hns_roce_table_put(hr_dev, &qp_table->sccc_table,
- hr_qp->qpn);
-
err_put_trrl:
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
@@ -236,88 +319,84 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
struct xarray *xa = &hr_dev->qp_table_xa;
unsigned long flags;
+ list_del(&hr_qp->node);
+ list_del(&hr_qp->sq_node);
+ list_del(&hr_qp->rq_node);
+
xa_lock_irqsave(xa, flags);
__xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
xa_unlock_irqrestore(xa, flags);
}
-void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- if (atomic_dec_and_test(&hr_qp->refcount))
- complete(&hr_qp->free);
- wait_for_completion(&hr_qp->free);
+ /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
+ hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ return;
- if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
- if (hr_dev->caps.trrl_entry_sz)
- hns_roce_table_put(hr_dev, &qp_table->trrl_table,
- hr_qp->qpn);
- hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
- }
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
}
-void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
- int cnt)
+static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- if (base_qpn < hr_dev->caps.reserved_qps)
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ return;
+
+ if (hr_qp->qpn < hr_dev->caps.reserved_qps)
return;
- hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
+ hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
}
-static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
+static int set_rq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, bool is_user, int has_rq,
struct hns_roce_qp *hr_qp)
{
- struct device *dev = hr_dev->dev;
u32 max_cnt;
- /* Check the validity of QP support capacity */
- if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
- cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
- dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
- cap->max_recv_wr, cap->max_recv_sge);
- return -EINVAL;
- }
-
/* If srq exist, set zero for relative number of rq */
if (!has_rq) {
hr_qp->rq.wqe_cnt = 0;
hr_qp->rq.max_gs = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
- } else {
- if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
- dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
- return -EINVAL;
- }
- if (hr_dev->caps.min_wqes)
- max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
- else
- max_cnt = cap->max_recv_wr;
+ return 0;
+ }
- hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
+ /* Check the validity of QP support capacity */
+ if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
+ cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
+ ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
+ cap->max_recv_wr, cap->max_recv_sge);
+ return -EINVAL;
+ }
- if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
- dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
- return -EINVAL;
- }
+ max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
- max_cnt = max(1U, cap->max_recv_sge);
- hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
- if (hr_dev->caps.max_rq_sg <= 2)
- hr_qp->rq.wqe_shift =
- ilog2(hr_dev->caps.max_rq_desc_sz);
- else
- hr_qp->rq.wqe_shift =
- ilog2(hr_dev->caps.max_rq_desc_sz
- * hr_qp->rq.max_gs);
+ hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
+ if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
+ ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
+ cap->max_recv_wr);
+ return -EINVAL;
}
+ max_cnt = max(1U, cap->max_recv_sge);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
+
+ if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+ else
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+ hr_qp->rq.max_gs);
+
cap->max_recv_wr = hr_qp->rq.wqe_cnt;
cap->max_recv_sge = hr_qp->rq.max_gs;
@@ -334,12 +413,12 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
/* Sanity check SQ size before proceeding */
if (ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
- ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
+ ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n");
return -EINVAL;
}
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
- ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n",
+ ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n",
cap->max_send_sge);
return -EINVAL;
}
@@ -347,10 +426,9 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
- struct ib_qp_cap *cap,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_ib_create_qp *ucmd)
+static int set_user_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
{
u32 ex_sge_num;
u32 page_size;
@@ -363,27 +441,28 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
- ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
+ ibdev_err(&hr_dev->ib_dev, "Failed to check user SQ size limit\n");
return ret;
}
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->caps.max_sq_sg <= 2)
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
- if (hr_qp->sq.max_gs > 2)
+ if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
- if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
+ if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE &&
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- dev_err(hr_dev->dev,
- "The extended sge cnt error! sge_cnt=%d\n",
- hr_qp->sge.sge_cnt);
+ ibdev_err(&hr_dev->ib_dev,
+ "Failed to check extended SGE size limit %d\n",
+ hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
@@ -392,7 +471,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */
- if (hr_dev->caps.max_sq_sg <= 2) {
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
round_up((hr_qp->sq.wqe_cnt <<
@@ -492,30 +571,6 @@ static int split_wqe_buf_region(struct hns_roce_dev *hr_dev,
return region_cnt;
}
-static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
- struct hns_roce_buf_region *regions,
- int region_cnt)
-{
- int bt_pg_shift;
- int ba_num;
- int ret;
-
- bt_pg_shift = PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz;
-
- /* all root ba entries must in one bt page */
- do {
- ba_num = (1 << bt_pg_shift) / BA_BYTE_LEN;
- ret = hns_roce_hem_list_calc_root_ba(regions, region_cnt,
- ba_num);
- if (ret <= ba_num)
- break;
-
- bt_pg_shift++;
- } while (ret > ba_num);
-
- return bt_pg_shift - PAGE_SHIFT;
-}
-
static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
@@ -528,13 +583,15 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
}
/* ud sqwqe's sge use extend sge */
- if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
+ hr_qp->ibqp.qp_type == IB_QPT_GSI) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs);
hr_qp->sge.sge_shift = 4;
}
- if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
+ if (hr_qp->sq.max_gs > 2 &&
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
@@ -545,46 +602,43 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
return 0;
}
-static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
- struct ib_qp_cap *cap,
- struct hns_roce_qp *hr_qp)
+static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
{
- struct device *dev = hr_dev->dev;
u32 page_size;
u32 max_cnt;
int size;
int ret;
- if (cap->max_send_wr > hr_dev->caps.max_wqes ||
+ if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
- dev_err(dev, "SQ WR or sge or inline data error!\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
- if (hr_dev->caps.min_wqes)
- max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
- else
- max_cnt = cap->max_send_wr;
+ max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
- dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
+ ibdev_err(&hr_dev->ib_dev,
+ "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->caps.max_sq_sg <= 2)
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
ret = set_extend_sge_param(hr_dev, hr_qp);
if (ret) {
- dev_err(dev, "set extend sge parameters fail\n");
+ ibdev_err(&hr_dev->ib_dev, "set extend sge parameters fail\n");
return ret;
}
@@ -593,7 +647,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.offset = 0;
size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
- if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+ if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
@@ -677,362 +731,449 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
kfree(hr_qp->rq_inl_buf.wqe_list);
}
-static void add_qp_to_list(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct ib_cq *send_cq, struct ib_cq *recv_cq)
-{
- struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
- unsigned long flags;
-
- hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
- hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
-
- spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
- hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
-
- list_add_tail(&hr_qp->node, &hr_dev->qp_list);
- if (hr_send_cq)
- list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
- if (hr_recv_cq)
- list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
-
- hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
- spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
-}
-
-static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
- struct ib_pd *ib_pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, unsigned long sqpn,
- struct hns_roce_qp *hr_qp)
+static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ u32 page_shift, bool is_user)
{
- dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL };
- struct device *dev = hr_dev->dev;
- struct hns_roce_ib_create_qp ucmd;
- struct hns_roce_ib_create_qp_resp resp = {};
- struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
+/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */
+#define HNS_ROCE_WQE_REGION_MAX 3
+ struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {};
+ dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
- unsigned long qpn = 0;
- u32 page_shift;
+ int region_count;
int buf_count;
int ret;
int i;
- mutex_init(&hr_qp->mutex);
- spin_lock_init(&hr_qp->sq.lock);
- spin_lock_init(&hr_qp->rq.lock);
-
- hr_qp->state = IB_QPS_RESET;
-
- hr_qp->ibqp.qp_type = init_attr->qp_type;
-
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
- hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
- else
- hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+ region_count = split_wqe_buf_region(hr_dev, hr_qp, regions,
+ ARRAY_SIZE(regions), page_shift);
- ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
- hns_roce_qp_has_rq(init_attr), hr_qp);
+ /* alloc a tmp list to store WQE buffers address */
+ ret = hns_roce_alloc_buf_list(regions, buf_list, region_count);
if (ret) {
- dev_err(dev, "hns_roce_set_rq_size failed\n");
- goto err_out;
+ ibdev_err(ibdev, "Failed to alloc WQE buffer list\n");
+ return ret;
}
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hns_roce_qp_has_rq(init_attr)) {
- ret = alloc_rq_inline_buf(hr_qp, init_attr);
- if (ret) {
- dev_err(dev, "allocate receive inline buffer failed\n");
- goto err_out;
+ for (i = 0; i < region_count; i++) {
+ r = &regions[i];
+ if (is_user)
+ buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i],
+ r->count, r->offset, hr_qp->umem,
+ page_shift);
+ else
+ buf_count = hns_roce_get_kmem_bufs(hr_dev, buf_list[i],
+ r->count, r->offset, &hr_qp->hr_buf);
+
+ if (buf_count != r->count) {
+ ibdev_err(ibdev, "Failed to get %s WQE buf, expect %d = %d.\n",
+ is_user ? "user" : "kernel",
+ r->count, buf_count);
+ ret = -ENOBUFS;
+ goto done;
}
}
- page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
- if (udata) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- dev_err(dev, "ib_copy_from_udata error for create qp\n");
- ret = -EFAULT;
- goto err_alloc_rq_inline_buf;
- }
+ hr_qp->wqe_bt_pg_shift = hr_dev->caps.mtt_ba_pg_sz;
+ hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
+ page_shift);
+ ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, regions,
+ region_count);
+ if (ret)
+ ibdev_err(ibdev, "Failed to attach WQE's mtr\n");
+
+ goto done;
- ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
- &ucmd);
+ hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
+done:
+ hns_roce_free_buf_list(buf_list, region_count);
+
+ return ret;
+}
+
+static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, unsigned long addr)
+{
+ u32 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ bool is_rq_buf_inline;
+ int ret;
+
+ is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+ hns_roce_qp_has_rq(init_attr);
+ if (is_rq_buf_inline) {
+ ret = alloc_rq_inline_buf(hr_qp, init_attr);
if (ret) {
- dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
- goto err_alloc_rq_inline_buf;
+ ibdev_err(ibdev, "Failed to alloc inline RQ buffer\n");
+ return ret;
}
+ }
- hr_qp->umem = ib_umem_get(ib_pd->device, ucmd.buf_addr,
- hr_qp->buff_size, 0);
+ if (udata) {
+ hr_qp->umem = ib_umem_get(ibdev, addr, hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
- dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
- goto err_alloc_rq_inline_buf;
- }
- hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
- hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
- page_shift);
- ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
- hr_qp->region_cnt);
- if (ret) {
- dev_err(dev, "alloc buf_list error for create qp\n");
- goto err_alloc_list;
+ goto err_inline;
}
+ } else {
+ ret = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
+ (1 << page_shift) * 2,
+ &hr_qp->hr_buf, page_shift);
+ if (ret)
+ goto err_inline;
+ }
- for (i = 0; i < hr_qp->region_cnt; i++) {
- r = &hr_qp->regions[i];
- buf_count = hns_roce_get_umem_bufs(hr_dev,
- buf_list[i], r->count, r->offset,
- hr_qp->umem, page_shift);
- if (buf_count != r->count) {
- dev_err(dev,
- "get umem buf err, expect %d,ret %d.\n",
- r->count, buf_count);
- ret = -ENOBUFS;
- goto err_get_bufs;
- }
- }
+ ret = map_wqe_buf(hr_dev, hr_qp, page_shift, udata);
+ if (ret)
+ goto err_alloc;
+
+ return 0;
+
+err_inline:
+ if (is_rq_buf_inline)
+ free_rq_inline_buf(hr_qp);
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
- (udata->inlen >= sizeof(ucmd)) &&
- (udata->outlen >= sizeof(resp)) &&
- hns_roce_qp_has_sq(init_attr)) {
- ret = hns_roce_db_map_user(uctx, udata, ucmd.sdb_addr,
+err_alloc:
+ if (udata) {
+ ib_umem_release(hr_qp->umem);
+ hr_qp->umem = NULL;
+ } else {
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+ }
+
+ ibdev_err(ibdev, "Failed to alloc WQE buffer, ret %d.\n", ret);
+
+ return ret;
+}
+
+static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
+ if (hr_qp->umem) {
+ ib_umem_release(hr_qp->umem);
+ hr_qp->umem = NULL;
+ }
+
+ if (hr_qp->hr_buf.nbufs > 0)
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+ hr_qp->rq.wqe_cnt)
+ free_rq_inline_buf(hr_qp);
+}
+
+static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
+ hns_roce_qp_has_sq(init_attr) &&
+ udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
+}
+
+static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
+ hns_roce_qp_has_rq(init_attr));
+}
+
+static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
+ struct ib_qp_init_attr *init_attr)
+{
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ hns_roce_qp_has_rq(init_attr));
+}
+
+static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp *ucmd,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ if (udata) {
+ if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
+ ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
&hr_qp->sdb);
if (ret) {
- dev_err(dev, "sq record doorbell map failed!\n");
- goto err_get_bufs;
+ ibdev_err(ibdev,
+ "Failed to map user SQ doorbell\n");
+ goto err_out;
}
-
- /* indicate kernel supports sq record db */
- resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
hr_qp->sdb_en = 1;
+ resp->cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
}
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(resp)) &&
- hns_roce_qp_has_rq(init_attr)) {
- ret = hns_roce_db_map_user(uctx, udata, ucmd.db_addr,
+ if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
+ ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
&hr_qp->rdb);
if (ret) {
- dev_err(dev, "rq record doorbell map failed!\n");
- goto err_sq_dbmap;
+ ibdev_err(ibdev,
+ "Failed to map user RQ doorbell\n");
+ goto err_sdb;
}
-
- /* indicate kernel supports rq record db */
- resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
hr_qp->rdb_en = 1;
+ resp->cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
}
} else {
- if (init_attr->create_flags &
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- dev_err(dev, "init_attr->create_flags error!\n");
- ret = -EINVAL;
- goto err_alloc_rq_inline_buf;
- }
-
- if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
- dev_err(dev, "init_attr->create_flags error!\n");
- ret = -EINVAL;
- goto err_alloc_rq_inline_buf;
- }
-
- /* Set SQ size */
- ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
- hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
- goto err_alloc_rq_inline_buf;
- }
-
/* QP doorbell register address */
hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- hns_roce_qp_has_rq(init_attr)) {
+ if (kernel_qp_has_rdb(hr_dev, init_attr)) {
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
if (ret) {
- dev_err(dev, "rq record doorbell alloc failed!\n");
- goto err_alloc_rq_inline_buf;
+ ibdev_err(ibdev,
+ "Failed to alloc kernel RQ doorbell\n");
+ goto err_out;
}
*hr_qp->rdb.db_record = 0;
hr_qp->rdb_en = 1;
}
+ }
- /* Allocate QP buf */
- if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
- (1 << page_shift) * 2,
- &hr_qp->hr_buf, page_shift)) {
- dev_err(dev, "hns_roce_buf_alloc error!\n");
+ return 0;
+err_sdb:
+ if (udata && hr_qp->sdb_en)
+ hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+err_out:
+ return ret;
+}
+
+static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct hns_roce_ucontext, ibucontext);
+
+ if (udata) {
+ if (hr_qp->rdb_en)
+ hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
+ if (hr_qp->sdb_en)
+ hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+ } else {
+ if (hr_qp->rdb_en)
+ hns_roce_free_db(hr_dev, &hr_qp->rdb);
+ }
+}
+
+static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 *sq_wrid = NULL;
+ u64 *rq_wrid = NULL;
+ int ret;
+
+ sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(sq_wrid)) {
+ ibdev_err(ibdev, "Failed to alloc SQ wrid\n");
+ return -ENOMEM;
+ }
+
+ if (hr_qp->rq.wqe_cnt) {
+ rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(rq_wrid)) {
+ ibdev_err(ibdev, "Failed to alloc RQ wrid\n");
ret = -ENOMEM;
- goto err_db;
- }
- hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
- hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
- page_shift);
- ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
- hr_qp->region_cnt);
- if (ret) {
- dev_err(dev, "alloc buf_list error for create qp!\n");
- goto err_alloc_list;
+ goto err_sq;
}
+ }
- for (i = 0; i < hr_qp->region_cnt; i++) {
- r = &hr_qp->regions[i];
- buf_count = hns_roce_get_kmem_bufs(hr_dev,
- buf_list[i], r->count, r->offset,
- &hr_qp->hr_buf);
- if (buf_count != r->count) {
- dev_err(dev,
- "get kmem buf err, expect %d,ret %d.\n",
- r->count, buf_count);
- ret = -ENOBUFS;
- goto err_get_bufs;
- }
+ hr_qp->sq.wrid = sq_wrid;
+ hr_qp->rq.wrid = rq_wrid;
+ return 0;
+err_sq:
+ kfree(sq_wrid);
+
+ return ret;
+}
+
+static void free_kernel_wrid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ kfree(hr_qp->rq.wrid);
+ kfree(hr_qp->sq.wrid);
+}
+
+static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ hr_qp->ibqp.qp_type = init_attr->qp_type;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
+ else
+ hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+
+ ret = set_rq_size(hr_dev, &init_attr->cap, udata,
+ hns_roce_qp_has_rq(init_attr), hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to set user RQ size\n");
+ return ret;
+ }
+
+ if (udata) {
+ if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) {
+ ibdev_err(ibdev, "Failed to copy QP ucmd\n");
+ return -EFAULT;
}
- hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
- GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) {
- ret = -ENOMEM;
- goto err_get_bufs;
+ ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
+ if (ret)
+ ibdev_err(ibdev, "Failed to set user SQ size\n");
+ } else {
+ if (init_attr->create_flags &
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
+ ibdev_err(ibdev, "Failed to check multicast loopback\n");
+ return -EINVAL;
}
- if (hr_qp->rq.wqe_cnt) {
- hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
- GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) {
- ret = -ENOMEM;
- goto err_sq_wrid;
- }
+ if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
+ return -EINVAL;
}
+
+ ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
+ if (ret)
+ ibdev_err(ibdev, "Failed to set kernel SQ size\n");
}
- if (sqpn) {
- qpn = sqpn;
- } else {
- /* Get QPN */
- ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
+ return ret;
+}
+
+static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_ib_create_qp_resp resp = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_qp ucmd;
+ int ret;
+
+ mutex_init(&hr_qp->mutex);
+ spin_lock_init(&hr_qp->sq.lock);
+ spin_lock_init(&hr_qp->rq.lock);
+
+ hr_qp->state = IB_QPS_RESET;
+ hr_qp->flush_flag = 0;
+
+ ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to set QP param\n");
+ return ret;
+ }
+
+ if (!udata) {
+ ret = alloc_kernel_wrid(hr_dev, hr_qp);
if (ret) {
- dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
- goto err_wrid;
+ ibdev_err(ibdev, "Failed to alloc wrid\n");
+ return ret;
}
}
- hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions,
- hr_qp->region_cnt);
- hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
- page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list,
- hr_qp->regions, hr_qp->region_cnt);
+ ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
if (ret) {
- dev_err(dev, "mtr attach error for create qp\n");
- goto err_mtr;
+ ibdev_err(ibdev, "Failed to alloc QP doorbell\n");
+ goto err_wrid;
}
- if (init_attr->qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- /* In v1 engine, GSI QP context in RoCE engine's register */
- ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_qp_alloc failed!\n");
- goto err_qpn;
- }
- } else {
- ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_qp_alloc failed!\n");
- goto err_qpn;
- }
+ ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc QP buffer\n");
+ goto err_db;
}
- if (sqpn)
- hr_qp->doorbell_qpn = 1;
- else
- hr_qp->doorbell_qpn = (u32)hr_qp->qpn;
+ ret = alloc_qpn(hr_dev, hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc QPN\n");
+ goto err_buf;
+ }
+
+ ret = alloc_qpc(hr_dev, hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc QP context\n");
+ goto err_qpn;
+ }
+
+ ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to store QP\n");
+ goto err_qpc;
+ }
if (udata) {
ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)));
- if (ret)
- goto err_qp;
+ if (ret) {
+ ibdev_err(ibdev, "copy qp resp failed!\n");
+ goto err_store;
+ }
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret)
- goto err_qp;
+ goto err_store;
}
+ hr_qp->ibqp.qp_num = hr_qp->qpn;
hr_qp->event = hns_roce_ib_qp_event;
-
- add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, init_attr->recv_cq);
-
- hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
+ atomic_set(&hr_qp->refcount, 1);
+ init_completion(&hr_qp->free);
return 0;
-err_qp:
- if (init_attr->qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- hns_roce_qp_remove(hr_dev, hr_qp);
- else
- hns_roce_qp_free(hr_dev, hr_qp);
-
+err_store:
+ hns_roce_qp_remove(hr_dev, hr_qp);
+err_qpc:
+ free_qpc(hr_dev, hr_qp);
err_qpn:
- if (!sqpn)
- hns_roce_release_range_qp(hr_dev, qpn, 1);
-
-err_mtr:
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
-
+ free_qpn(hr_dev, hr_qp);
+err_buf:
+ free_qp_buf(hr_dev, hr_qp);
+err_db:
+ free_qp_db(hr_dev, hr_qp, udata);
err_wrid:
- if (udata) {
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
- (udata->outlen >= sizeof(resp)) &&
- hns_roce_qp_has_rq(init_attr))
- hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
- } else {
- if (hr_qp->rq.wqe_cnt)
- kfree(hr_qp->rq.wrid);
- }
-
-err_sq_dbmap:
- if (udata)
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
- (udata->inlen >= sizeof(ucmd)) &&
- (udata->outlen >= sizeof(resp)) &&
- hns_roce_qp_has_sq(init_attr))
- hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
-
-err_sq_wrid:
- if (!udata)
- kfree(hr_qp->sq.wrid);
-
-err_get_bufs:
- hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
-
-err_alloc_list:
- if (!hr_qp->umem)
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- ib_umem_release(hr_qp->umem);
+ free_kernel_wrid(hr_dev, hr_qp);
+ return ret;
+}
-err_db:
- if (!udata && hns_roce_qp_has_rq(init_attr) &&
- (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
- hns_roce_free_db(hr_dev, &hr_qp->rdb);
+void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_udata *udata)
+{
+ if (atomic_dec_and_test(&hr_qp->refcount))
+ complete(&hr_qp->free);
+ wait_for_completion(&hr_qp->free);
-err_alloc_rq_inline_buf:
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hns_roce_qp_has_rq(init_attr))
- free_rq_inline_buf(hr_qp);
+ free_qpc(hr_dev, hr_qp);
+ free_qpn(hr_dev, hr_qp);
+ free_qp_buf(hr_dev, hr_qp);
+ free_kernel_wrid(hr_dev, hr_qp);
+ free_qp_db(hr_dev, hr_qp, udata);
-err_out:
- return ret;
+ kfree(hr_qp);
}
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
@@ -1050,7 +1191,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
if (!hr_qp)
return ERR_PTR(-ENOMEM);
- ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
+ ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp);
if (ret) {
ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
@@ -1059,8 +1200,6 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(ret);
}
- hr_qp->ibqp.qp_num = hr_qp->qpn;
-
break;
}
case IB_QPT_GSI: {
@@ -1077,15 +1216,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- /* when hw version is v1, the sqpn is allocated */
- if (hr_dev->caps.max_sq_sg <= 2)
- hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
- else
- hr_qp->ibqp.qp_num = 1;
-
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
- hr_qp->ibqp.qp_num, hr_qp);
+ hr_qp);
if (ret) {
ibdev_err(ibdev, "Create GSI QP failed!\n");
kfree(hr_qp);
@@ -1097,7 +1229,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
default:{
ibdev_err(ibdev, "not support QP type %d\n",
init_attr->qp_type);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
}
@@ -1230,11 +1362,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- if (hr_dev->caps.min_wqes) {
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
ret = -EPERM;
ibdev_err(&hr_dev->ib_dev,
- "cur_state=%d new_state=%d\n", cur_state,
- new_state);
+ "RST2RST state is not supported\n");
} else {
ret = 0;
}
@@ -1306,17 +1437,17 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
}
-void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
}
-void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
}
-void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
+void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
{
return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
(n << hr_qp->sge.sge_shift));
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index c6d5f06f9cde..5b3dd1a337d4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -381,7 +381,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
srq->max_gs = init_attr->attr.max_sge;
- srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
+ srq_desc_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+ HNS_ROCE_SGE_SIZE * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size);
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 8feec35f95a7..3c62c9327a9c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -67,7 +67,7 @@
#include "i40iw_user.h"
#include "i40iw_puda.h"
-#define I40IW_FW_VERSION 2
+#define I40IW_FW_VER_DEFAULT 2
#define I40IW_HW_VERSION 2
#define I40IW_ARP_ADD 1
@@ -326,6 +326,26 @@ struct i40iw_handler {
};
/**
+ * i40iw_fw_major_ver - get firmware major version
+ * @dev: iwarp device
+ **/
+static inline u64 i40iw_fw_major_ver(struct i40iw_sc_dev *dev)
+{
+ return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
+ I40IW_FW_VER_MAJOR);
+}
+
+/**
+ * i40iw_fw_minor_ver - get firmware minor version
+ * @dev: iwarp device
+ **/
+static inline u64 i40iw_fw_minor_ver(struct i40iw_sc_dev *dev)
+{
+ return RS_64(dev->feature_info[I40IW_FEATURE_FW_INFO],
+ I40IW_FW_VER_MINOR);
+}
+
+/**
* to_iwdev - get device
* @ibdev: ib device
**/
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 66dc1ba03389..6e43e4d730f4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -85,7 +85,7 @@ struct ietf_mpa_v1 {
u8 flags;
u8 rev;
__be16 priv_data_len;
- u8 priv_data[0];
+ u8 priv_data[];
};
#define ietf_mpa_req_resp_frame ietf_mpa_frame
@@ -101,7 +101,7 @@ struct ietf_mpa_v2 {
u8 rev;
__be16 priv_data_len;
struct ietf_rtr_msg rtr_msg;
- u8 priv_data[0];
+ u8 priv_data[];
};
struct i40iw_cm_node;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 4d841a3c68f3..e8b4b3743661 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1022,6 +1022,95 @@ static enum i40iw_status_code i40iw_sc_commit_fpm_values(
}
/**
+ * i40iw_sc_query_rdma_features_done - poll cqp for query features done
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code
+i40iw_sc_query_rdma_features_done(struct i40iw_sc_cqp *cqp)
+{
+ return i40iw_sc_poll_for_cqp_op_done(
+ cqp, I40IW_CQP_OP_QUERY_RDMA_FEATURES, NULL);
+}
+
+/**
+ * i40iw_sc_query_rdma_features - query rdma features
+ * @cqp: struct for cqp hw
+ * @feat_mem: holds PA for HW to use
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code
+i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
+ struct i40iw_dma_mem *feat_mem, u64 scratch)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 32, feat_mem->pa);
+
+ header = LS_64(I40IW_CQP_OP_QUERY_RDMA_FEATURES, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | feat_mem->size;
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY RDMA FEATURES WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * i40iw_get_rdma_features - get RDMA features
+ * @dev - sc device struct
+ */
+enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev)
+{
+ enum i40iw_status_code ret_code;
+ struct i40iw_dma_mem feat_buf;
+ u64 temp;
+ u16 byte_idx, feat_type, feat_cnt;
+
+ ret_code = i40iw_allocate_dma_mem(dev->hw, &feat_buf,
+ I40IW_FEATURE_BUF_SIZE,
+ I40IW_FEATURE_BUF_ALIGNMENT);
+
+ if (ret_code)
+ return I40IW_ERR_NO_MEMORY;
+
+ ret_code = i40iw_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
+ if (!ret_code)
+ ret_code = i40iw_sc_query_rdma_features_done(dev->cqp);
+
+ if (ret_code)
+ goto exit;
+
+ get_64bit_val(feat_buf.va, 0, &temp);
+ feat_cnt = RS_64(temp, I40IW_FEATURE_CNT);
+ if (feat_cnt < I40IW_MAX_FEATURES) {
+ ret_code = I40IW_ERR_INVALID_FEAT_CNT;
+ goto exit;
+ } else if (feat_cnt > I40IW_MAX_FEATURES) {
+ i40iw_debug(dev, I40IW_DEBUG_CQP,
+ "features buf size insufficient\n");
+ }
+
+ for (byte_idx = 0, feat_type = 0; feat_type < I40IW_MAX_FEATURES;
+ feat_type++, byte_idx += 8) {
+ get_64bit_val((u64 *)feat_buf.va, byte_idx, &temp);
+ dev->feature_info[feat_type] = RS_64(temp, I40IW_FEATURE_INFO);
+ }
+exit:
+ i40iw_free_dma_mem(dev->hw, &feat_buf);
+
+ return ret_code;
+}
+
+/**
* i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
* @cqp: struct for cqp hw
*/
@@ -4265,6 +4354,13 @@ static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
true,
I40IW_CQP_WAIT_EVENT);
break;
+ case OP_QUERY_RDMA_FEATURES:
+ values_mem.pa = pcmdinfo->in.u.query_rdma_features.cap_pa;
+ values_mem.va = pcmdinfo->in.u.query_rdma_features.cap_va;
+ status = i40iw_sc_query_rdma_features(
+ pcmdinfo->in.u.query_rdma_features.cqp, &values_mem,
+ pcmdinfo->in.u.query_rdma_features.scratch);
+ break;
default:
status = I40IW_NOT_SUPPORTED;
break;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 6ddaeec87d2f..e8367d67575d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -403,7 +403,7 @@
#define I40IW_CQP_OP_MANAGE_ARP 0x0f
#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10
#define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11
-#define I40IW_CQP_OP_MANAGE_PE_TEAM 0x12
+#define I40IW_CQP_OP_QUERY_RDMA_FEATURES 0x12
#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13
#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14
#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
@@ -431,6 +431,24 @@
#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b
#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d
+#define I40IW_FEATURE_BUF_SIZE (8 * I40IW_MAX_FEATURES)
+
+#define I40IW_FW_VER_MINOR_SHIFT 0
+#define I40IW_FW_VER_MINOR_MASK \
+ (0xffffULL << I40IW_FW_VER_MINOR_SHIFT)
+
+#define I40IW_FW_VER_MAJOR_SHIFT 16
+#define I40IW_FW_VER_MAJOR_MASK \
+ (0xffffULL << I40IW_FW_VER_MAJOR_SHIFT)
+
+#define I40IW_FEATURE_INFO_SHIFT 0
+#define I40IW_FEATURE_INFO_MASK \
+ (0xffffULL << I40IW_FEATURE_INFO_SHIFT)
+
+#define I40IW_FEATURE_CNT_SHIFT 32
+#define I40IW_FEATURE_CNT_MASK \
+ (0xffffULL << I40IW_FEATURE_CNT_SHIFT)
+
#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16
#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT)
@@ -1529,7 +1547,8 @@ enum i40iw_alignment {
I40IW_AEQ_ALIGNMENT = 0x100,
I40IW_CEQ_ALIGNMENT = 0x100,
I40IW_CQ0_ALIGNMENT = 0x100,
- I40IW_SD_BUF_ALIGNMENT = 0x80
+ I40IW_SD_BUF_ALIGNMENT = 0x80,
+ I40IW_FEATURE_BUF_ALIGNMENT = 0x8
};
#define I40IW_WQE_SIZE_64 64
@@ -1732,6 +1751,7 @@ enum i40iw_alignment {
#define OP_REQUESTED_COMMANDS 31
#define OP_COMPLETED_COMMANDS 32
#define OP_GEN_AE 33
-#define OP_SIZE_CQP_STAT_ARRAY 34
+#define OP_QUERY_RDMA_FEATURES 34
+#define OP_SIZE_CQP_STAT_ARRAY 35
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 238614370927..9c96ece5e7f3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1212,22 +1212,19 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
{
struct net_device *dev;
struct in_device *idev;
- bool got_lock = true;
u32 ip_addr;
- if (!rtnl_trylock())
- got_lock = false;
-
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
(rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
- (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+ (dev == iwdev->netdev)) && (READ_ONCE(dev->flags) & IFF_UP)) {
const struct in_ifaddr *ifa;
- idev = in_dev_get(dev);
+ idev = __in_dev_get_rcu(dev);
if (!idev)
continue;
- in_dev_for_each_ifa_rtnl(ifa, idev) {
+ in_dev_for_each_ifa_rcu(ifa, idev) {
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
"IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
@@ -1239,12 +1236,9 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
true,
I40IW_ARP_ADD);
}
-
- in_dev_put(idev);
}
}
- if (got_lock)
- rtnl_unlock();
+ rcu_read_unlock();
}
/**
@@ -1689,6 +1683,12 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_setup_ceqs(iwdev, ldev);
if (status)
break;
+
+ status = i40iw_get_rdma_features(dev);
+ if (status)
+ dev->feature_info[I40IW_FEATURE_FW_INFO] =
+ I40IW_FW_VER_DEFAULT;
+
iwdev->init_state = CEQ_CREATED;
status = i40iw_initialize_hw_resources(iwdev);
if (status)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index 11d3a2a72100..4c429567bbb4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -105,6 +105,7 @@ enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *
bool poll_registers);
enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);
+enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev);
void free_sd_mem(struct i40iw_sc_dev *dev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
index f7013f11d808..d1c5855bd8c3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -95,7 +95,8 @@ enum i40iw_status_code {
I40IW_ERR_INVALID_MAC_ADDR = -65,
I40IW_ERR_BAD_STAG = -66,
I40IW_ERR_CQ_COMPL_ERROR = -67,
- I40IW_ERR_QUEUE_DESTROYED = -68
+ I40IW_ERR_QUEUE_DESTROYED = -68,
+ I40IW_ERR_INVALID_FEAT_CNT = -69
};
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index adc8d2ec523d..54c323c40d96 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -234,6 +234,11 @@ enum i40iw_hw_stats_index_64b {
I40IW_HW_STAT_INDEX_MAX_64
};
+enum i40iw_feature_type {
+ I40IW_FEATURE_FW_INFO = 0,
+ I40IW_MAX_FEATURES
+};
+
struct i40iw_dev_hw_stats_offsets {
u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
@@ -501,6 +506,7 @@ struct i40iw_sc_dev {
const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
+ u64 feature_info[I40IW_MAX_FEATURES];
u32 debug_mask;
u8 hmc_fn_id;
bool is_pf;
@@ -1340,6 +1346,12 @@ struct cqp_info {
struct i40iw_sc_qp *qp;
u64 scratch;
} suspend_resume;
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ void *cap_va;
+ u64 cap_pa;
+ u64 scratch;
+ } query_rdma_features;
} u;
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index c335de91508f..1b6fb1380961 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -64,7 +64,8 @@ static int i40iw_query_device(struct ib_device *ibdev,
return -EINVAL;
memset(props, 0, sizeof(*props));
ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
- props->fw_ver = I40IW_FW_VERSION;
+ props->fw_ver = i40iw_fw_major_ver(&iwdev->sc_dev) << 32 |
+ i40iw_fw_minor_ver(&iwdev->sc_dev);
props->device_cap_flags = iwdev->device_cap_flags;
props->vendor_id = iwdev->ldev->pcidev->vendor;
props->vendor_part_id = iwdev->ldev->pcidev->device;
@@ -617,7 +618,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
if (init_attr->qp_type != IB_QPT_RC) {
- err_code = -EINVAL;
+ err_code = -EOPNOTSUPP;
goto error;
}
if (iwdev->push_mode)
@@ -2534,10 +2535,11 @@ static const char * const i40iw_hw_stat_names[] = {
static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
{
- u32 firmware_version = I40IW_FW_VERSION;
+ struct i40iw_device *iwdev = to_iwdev(dev);
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
- (firmware_version & 0x000000ff));
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%llu.%llu",
+ i40iw_fw_major_ver(&iwdev->sc_dev),
+ i40iw_fw_minor_ver(&iwdev->sc_dev));
}
/**
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 2f5d9b181848..a66518a5c938 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -434,9 +434,6 @@ int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
return real_index;
}
-#define field_avail(type, fld, sz) (offsetof(type, fld) + \
- sizeof(((type *)0)->fld) <= (sz))
-
static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -447,7 +444,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
int err;
int have_ib_ports;
struct mlx4_uverbs_ex_query_device cmd;
- struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
+ struct mlx4_uverbs_ex_query_device_resp resp = {};
struct mlx4_clock_params clock_params;
if (uhw->inlen) {
@@ -602,7 +599,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx4_wqe_data_seg);
}
- if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
+ if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
if (props->rss_caps.supported_qpts) {
resp.rss_caps.rx_hash_function =
MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
@@ -626,7 +623,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(resp.rss_caps);
}
- if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+ if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
if (dev->dev->caps.max_gso_sz &&
((mlx4_ib_port_link_layer(ibdev, 1) ==
IB_LINK_LAYER_ETHERNET) ||
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 26425dd2d960..2f9f78912267 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1636,7 +1636,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
}
default:
/* Don't support raw QPs */
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
return &qp->ibqp;
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index d0a043ccbe58..2a334800f109 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -8,3 +8,4 @@ mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += flow.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += qos.o
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 8ba439fabf7f..de4da92b81a6 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -47,6 +47,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
"rp_byte_reset",
"rp_threshold",
"rp_ai_rate",
+ "rp_max_rate",
"rp_hai_rate",
"rp_min_dec_fac",
"rp_min_rate",
@@ -56,6 +57,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
"rp_rate_reduce_monitor_period",
"rp_initial_alpha_value",
"rp_gd",
+ "np_min_time_between_cnps",
"np_cnp_dscp",
"np_cnp_prio_mode",
"np_cnp_prio",
@@ -66,6 +68,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
#define MLX5_IB_RP_TIME_RESET_ATTR BIT(3)
#define MLX5_IB_RP_BYTE_RESET_ATTR BIT(4)
#define MLX5_IB_RP_THRESHOLD_ATTR BIT(5)
+#define MLX5_IB_RP_MAX_RATE_ATTR BIT(6)
#define MLX5_IB_RP_AI_RATE_ATTR BIT(7)
#define MLX5_IB_RP_HAI_RATE_ATTR BIT(8)
#define MLX5_IB_RP_MIN_DEC_FAC_ATTR BIT(9)
@@ -77,6 +80,7 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
#define MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR BIT(15)
#define MLX5_IB_RP_GD_ATTR BIT(16)
+#define MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR BIT(2)
#define MLX5_IB_NP_CNP_DSCP_ATTR BIT(3)
#define MLX5_IB_NP_CNP_PRIO_MODE_ATTR BIT(4)
@@ -111,6 +115,9 @@ static u32 mlx5_get_cc_param_val(void *field, int offset)
case MLX5_IB_DBG_CC_RP_AI_RATE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_ai_rate);
+ case MLX5_IB_DBG_CC_RP_MAX_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_max_rate);
case MLX5_IB_DBG_CC_RP_HAI_RATE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_hai_rate);
@@ -138,6 +145,9 @@ static u32 mlx5_get_cc_param_val(void *field, int offset)
case MLX5_IB_DBG_CC_RP_GD:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_gd);
+ case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ min_time_between_cnps);
case MLX5_IB_DBG_CC_NP_CNP_DSCP:
return MLX5_GET(cong_control_r_roce_ecn_np, field,
cnp_dscp);
@@ -186,6 +196,11 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_ai_rate, var);
break;
+ case MLX5_IB_DBG_CC_RP_MAX_RATE:
+ *attr_mask |= MLX5_IB_RP_MAX_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_max_rate, var);
+ break;
case MLX5_IB_DBG_CC_RP_HAI_RATE:
*attr_mask |= MLX5_IB_RP_HAI_RATE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
@@ -231,6 +246,11 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_gd, var);
break;
+ case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS:
+ *attr_mask |= MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field,
+ min_time_between_cnps, var);
+ break;
case MLX5_IB_DBG_CC_NP_CNP_DSCP:
*attr_mask |= MLX5_IB_NP_CNP_DSCP_ATTR;
MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_dscp, var);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 3dec3de903b7..146ba2966744 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -715,17 +715,19 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- ucmdlen = udata->inlen < sizeof(ucmd) ?
- (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
+ if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
+ return -EINVAL;
if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
- if (ucmdlen == sizeof(ucmd) &&
- (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
+ if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
+ MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX)))
return -EINVAL;
- if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
+ if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
+ ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
*cqe_size = ucmd.cqe_size;
@@ -762,7 +764,14 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- *index = context->bfregi.sys_pages[0];
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
+ *index = ucmd.uar_page_index;
+ } else if (context->bfregi.lib_uar_dyn) {
+ err = -EINVAL;
+ goto err_cqb;
+ } else {
+ *index = context->bfregi.sys_pages[0];
+ }
if (ucmd.cqe_comp_en == 1) {
int mini_cqe_format;
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index dbee17d22d50..862b7bf3e646 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -35,6 +35,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
*namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3efa7493456b..6679756506e6 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -39,9 +39,6 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
-#if defined(CONFIG_X86)
-#include <asm/memtype.h>
-#endif
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
@@ -898,7 +895,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->raw_packet_caps |=
IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
- if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
if (max_tso) {
resp.tso_caps.max_tso = 1 << max_tso;
@@ -908,7 +905,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
resp.rss_caps.rx_hash_function =
MLX5_RX_HASH_FUNC_TOEPLITZ;
resp.rss_caps.rx_hash_fields_mask =
@@ -928,9 +925,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.rss_caps);
}
} else {
- if (field_avail(typeof(resp), tso_caps, uhw_outlen))
+ if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
resp.response_length += sizeof(resp.tso_caps);
- if (field_avail(typeof(resp), rss_caps, uhw_outlen))
+ if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
resp.response_length += sizeof(resp.rss_caps);
}
@@ -1072,7 +1069,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MAX_CQ_PERIOD;
}
- if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
resp.response_length += sizeof(resp.cqe_comp_caps);
if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
@@ -1090,7 +1087,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
+ if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
@@ -1108,8 +1105,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.packet_pacing_caps);
}
- if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
- uhw_outlen)) {
+ if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
+ uhw_outlen) {
if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
resp.mlx5_ib_support_multi_pkt_send_wqes =
MLX5_IB_ALLOW_MPW;
@@ -1122,7 +1119,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
- if (field_avail(typeof(resp), flags, uhw_outlen)) {
+ if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
resp.response_length += sizeof(resp.flags);
if (MLX5_CAP_GEN(mdev, cqe_compression_128))
@@ -1138,7 +1135,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
}
- if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
resp.response_length += sizeof(resp.sw_parsing_caps);
if (MLX5_CAP_ETH(mdev, swp)) {
resp.sw_parsing_caps.sw_parsing_offloads |=
@@ -1158,7 +1155,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
+ if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
@@ -1181,7 +1178,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
+ if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
resp.response_length += sizeof(resp.tunnel_offloads_caps);
if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
resp.tunnel_offloads_caps |=
@@ -1192,12 +1189,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_GRE;
- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_GRE)
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_UDP)
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
@@ -1791,6 +1786,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
max_cqe_version);
u32 dump_fill_mkey;
bool lib_uar_4k;
+ bool lib_uar_dyn;
if (!dev->ib_active)
return -EAGAIN;
@@ -1849,8 +1845,14 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
}
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
+ lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
bfregi = &context->bfregi;
+ if (lib_uar_dyn) {
+ bfregi->lib_uar_dyn = lib_uar_dyn;
+ goto uar_done;
+ }
+
/* updates req->total_num_bfregs */
err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
@@ -1877,6 +1879,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
if (err)
goto out_sys_pages;
+uar_done:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
err = mlx5_ib_devx_create(dev, true);
if (err < 0)
@@ -1898,19 +1901,19 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
- resp.tot_bfregs = req.total_num_bfregs;
+ resp.tot_bfregs = lib_uar_dyn ? 0 : req.total_num_bfregs;
resp.num_ports = dev->num_ports;
- if (field_avail(typeof(resp), cqe_version, udata->outlen))
+ if (offsetofend(typeof(resp), cqe_version) <= udata->outlen)
resp.response_length += sizeof(resp.cqe_version);
- if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
+ if (offsetofend(typeof(resp), cmds_supp_uhw) <= udata->outlen) {
resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
resp.response_length += sizeof(resp.cmds_supp_uhw);
}
- if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
+ if (offsetofend(typeof(resp), eth_min_inline) <= udata->outlen) {
if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
resp.eth_min_inline++;
@@ -1918,7 +1921,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
resp.response_length += sizeof(resp.eth_min_inline);
}
- if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
+ if (offsetofend(typeof(resp), clock_info_versions) <= udata->outlen) {
if (mdev->clock_info)
resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
resp.response_length += sizeof(resp.clock_info_versions);
@@ -1930,7 +1933,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
* pretend we don't support reading the HCA's core clock. This is also
* forced by mmap function.
*/
- if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ if (offsetofend(typeof(resp), hca_core_clock_offset) <= udata->outlen) {
if (PAGE_SIZE <= 4096) {
resp.comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
@@ -1940,18 +1943,18 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
resp.response_length += sizeof(resp.hca_core_clock_offset);
}
- if (field_avail(typeof(resp), log_uar_size, udata->outlen))
+ if (offsetofend(typeof(resp), log_uar_size) <= udata->outlen)
resp.response_length += sizeof(resp.log_uar_size);
- if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
+ if (offsetofend(typeof(resp), num_uars_per_page) <= udata->outlen)
resp.response_length += sizeof(resp.num_uars_per_page);
- if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
+ if (offsetofend(typeof(resp), num_dyn_bfregs) <= udata->outlen) {
resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
resp.response_length += sizeof(resp.num_dyn_bfregs);
}
- if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
+ if (offsetofend(typeof(resp), dump_fill_mkey) <= udata->outlen) {
if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
resp.dump_fill_mkey = dump_fill_mkey;
resp.comp_mask |=
@@ -2026,6 +2029,17 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
+static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
+ int uar_idx)
+{
+ unsigned int fw_uars_per_page;
+
+ fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_UARS_IN_PAGE : 1;
+
+ return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
+}
+
static int get_command(unsigned long offset)
{
return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
@@ -2110,6 +2124,11 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
mutex_unlock(&var_table->bitmap_lock);
kfree(mentry);
break;
+ case MLX5_IB_MMAP_TYPE_UAR_WC:
+ case MLX5_IB_MMAP_TYPE_UAR_NC:
+ mlx5_cmd_free_uar(dev->mdev, mentry->page_idx);
+ kfree(mentry);
+ break;
default:
WARN_ON(true);
}
@@ -2130,6 +2149,9 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
bfregi->num_static_sys_pages;
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
@@ -2147,14 +2169,6 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_ALLOC_WC:
-/* Some architectures don't support WC memory */
-#if defined(CONFIG_X86)
- if (!pat_enabled())
- return -EPERM;
-#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
- return -EPERM;
-#endif
- /* fall through */
case MLX5_IB_MMAP_REGULAR_PAGE:
/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
prot = pgprot_writecombine(vma->vm_page_prot);
@@ -2269,7 +2283,8 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
mentry = to_mmmap(entry);
pfn = (mentry->address >> PAGE_SHIFT);
- if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR)
+ if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
+ mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
prot = pgprot_noncached(vma->vm_page_prot);
else
prot = pgprot_writecombine(vma->vm_page_prot);
@@ -2300,9 +2315,12 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
command = get_command(vma->vm_pgoff);
switch (command) {
case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
+ if (!dev->wc_support)
+ return -EPERM;
+ fallthrough;
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
- case MLX5_IB_MMAP_ALLOC_WC:
return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
@@ -4046,6 +4064,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
log_max_ft_size));
priority = fs_matcher->priority;
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ log_max_ft_size));
+ priority = fs_matcher->priority;
}
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
@@ -4062,6 +4085,8 @@ _get_flow_table(struct mlx5_ib_dev *dev,
prio = &dev->flow_db->fdb;
else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
prio = &dev->flow_db->rdma_rx[priority];
+ else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX)
+ prio = &dev->flow_db->rdma_tx[priority];
if (!prio)
return ERR_PTR(-EINVAL);
@@ -6090,9 +6115,9 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-static int var_obj_cleanup(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
+static int mmap_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
{
struct mlx5_user_mmap_entry *obj = uobject->object;
@@ -6100,6 +6125,16 @@ static int var_obj_cleanup(struct ib_uobject *uobject,
return 0;
}
+static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
+ struct mlx5_user_mmap_entry *entry,
+ size_t length)
+{
+ return rdma_user_mmap_entry_insert_range(
+ &c->ibucontext, &entry->rdma_entry, length,
+ (MLX5_IB_MMAP_OFFSET_START << 16),
+ ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
+}
+
static struct mlx5_user_mmap_entry *
alloc_var_entry(struct mlx5_ib_ucontext *c)
{
@@ -6130,10 +6165,8 @@ alloc_var_entry(struct mlx5_ib_ucontext *c)
entry->page_idx = page_idx;
entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
- err = rdma_user_mmap_entry_insert_range(
- &c->ibucontext, &entry->rdma_entry, var_table->stride_size,
- MLX5_IB_MMAP_OFFSET_START << 16,
- (MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1);
+ err = mlx5_rdma_user_mmap_entry_insert(c, entry,
+ var_table->stride_size);
if (err)
goto err_insert;
@@ -6217,7 +6250,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UA_MANDATORY));
DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
- UVERBS_TYPE_ALLOC_IDR(var_obj_cleanup),
+ UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
&UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
&UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
@@ -6229,6 +6262,134 @@ static bool var_is_supported(struct ib_device *device)
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
}
+static struct mlx5_user_mmap_entry *
+alloc_uar_entry(struct mlx5_ib_ucontext *c,
+ enum mlx5_ib_uapi_uar_alloc_type alloc_type)
+{
+ struct mlx5_user_mmap_entry *entry;
+ struct mlx5_ib_dev *dev;
+ u32 uar_index;
+ int err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ dev = to_mdev(c->ibucontext.device);
+ err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ if (err)
+ goto end;
+
+ entry->page_idx = uar_index;
+ entry->address = uar_index2paddress(dev, uar_index);
+ if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
+ else
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
+
+ err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
+ if (err)
+ goto err_insert;
+
+ return entry;
+
+err_insert:
+ mlx5_cmd_free_uar(dev->mdev, uar_index);
+end:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
+ enum mlx5_ib_uapi_uar_alloc_type alloc_type;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_user_mmap_entry *entry;
+ u64 mmap_offset;
+ u32 length;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ err = uverbs_get_const(&alloc_type, attrs,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
+ if (err)
+ return err;
+
+ if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
+ alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
+ return -EOPNOTSUPP;
+
+ if (!to_mdev(c->ibucontext.device)->wc_support &&
+ alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
+ return -EOPNOTSUPP;
+
+ entry = alloc_uar_entry(c, alloc_type);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ mmap_offset = mlx5_entry_to_mmap_offset(entry);
+ length = entry->rdma_entry.npages * PAGE_SIZE;
+ uobj->object = entry;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
+ if (err)
+ goto err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+ &entry->page_idx, sizeof(entry->page_idx));
+ if (err)
+ goto err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ &length, sizeof(length));
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ rdma_user_mmap_entry_remove(&entry->rdma_entry);
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_UAR_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_UAR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
+ enum mlx5_ib_uapi_uar_alloc_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_UAR_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_UAR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
+ UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
+
ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_dm,
UVERBS_OBJECT_DM,
@@ -6253,12 +6414,14 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE(
static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
{}
};
@@ -6392,7 +6555,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->reset_flow_resource_lock);
xa_init(&dev->odp_mkeys);
xa_init(&dev->sig_mrs);
- spin_lock_init(&dev->mkey_lock);
+ atomic_set(&dev->mkey_var, 0);
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
@@ -6548,7 +6711,8 @@ static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
doorbell_bar_offset);
bar_size = (1ULL << log_doorbell_bar_size) * 4096;
var_table->stride_size = 1ULL << log_doorbell_stride;
- var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size);
+ var_table->num_var_hw_entries = div_u64(bar_size,
+ var_table->stride_size);
mutex_init(&var_table->bitmap_lock);
var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
GFP_KERNEL);
@@ -7080,6 +7244,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
mlx5_ib_stage_counters_init,
mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_UAR,
mlx5_ib_stage_uar_init,
mlx5_ib_stage_uar_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b90a3649e7d1..c19ec9fd8a63 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -316,7 +316,7 @@ int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
if (!dev->mdev->roce.roce_en &&
port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
if (mlx5_core_is_pf(dev->mdev))
- dev->wc_support = true;
+ dev->wc_support = arch_can_pci_mmap_wc();
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index fc19dc1cf2e1..a4e522385de0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -64,8 +64,6 @@
dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
__LINE__, current->pid, ##arg)
-#define field_avail(type, fld, sz) (offsetof(type, fld) + \
- sizeof(((type *)0)->fld) <= (sz))
#define MLX5_IB_DEFAULT_UIDX 0xffffff
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
@@ -126,11 +124,27 @@ enum {
enum mlx5_ib_mmap_type {
MLX5_IB_MMAP_TYPE_MEMIC = 1,
MLX5_IB_MMAP_TYPE_VAR = 2,
+ MLX5_IB_MMAP_TYPE_UAR_WC = 3,
+ MLX5_IB_MMAP_TYPE_UAR_NC = 4,
};
-#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \
- (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
-#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+struct mlx5_bfreg_info {
+ u32 *sys_pages;
+ int num_low_latency_bfregs;
+ unsigned int *count;
+
+ /*
+ * protect bfreg allocation data structs
+ */
+ struct mutex lock;
+ u32 ver;
+ u8 lib_uar_4k : 1;
+ u8 lib_uar_dyn : 1;
+ u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
+};
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
@@ -203,6 +217,11 @@ struct mlx5_ib_flow_matcher {
u8 match_criteria_enable;
};
+struct mlx5_ib_pp {
+ u16 index;
+ struct mlx5_core_dev *mdev;
+};
+
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
@@ -210,6 +229,7 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
struct mlx5_ib_flow_prio fdb;
struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
@@ -618,8 +638,8 @@ struct mlx5_ib_mr {
struct ib_umem *umem;
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
- int order;
- bool allocated_from_cache;
+ unsigned int order;
+ struct mlx5_cache_ent *cache_ent;
int npages;
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
@@ -701,22 +721,34 @@ struct mlx5_cache_ent {
u32 access_mode;
u32 page;
- u32 size;
- u32 cur;
+ u8 disabled:1;
+ u8 fill_to_high_water:1;
+
+ /*
+ * - available_mrs is the length of list head, ie the number of MRs
+ * available for immediate allocation.
+ * - total_mrs is available_mrs plus all in use MRs that could be
+ * returned to the cache.
+ * - limit is the low water mark for available_mrs, 2* limit is the
+ * upper water mark.
+ * - pending is the number of MRs currently being created
+ */
+ u32 total_mrs;
+ u32 available_mrs;
+ u32 limit;
+ u32 pending;
+
+ /* Statistics */
u32 miss;
- u32 limit;
struct mlx5_ib_dev *dev;
struct work_struct work;
struct delayed_work dwork;
- int pending;
- struct completion compl;
};
struct mlx5_mr_cache {
struct workqueue_struct *wq;
struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
- int stopped;
struct dentry *root;
unsigned long last_add;
};
@@ -794,6 +826,7 @@ enum mlx5_ib_dbg_cc_types {
MLX5_IB_DBG_CC_RP_BYTE_RESET,
MLX5_IB_DBG_CC_RP_THRESHOLD,
MLX5_IB_DBG_CC_RP_AI_RATE,
+ MLX5_IB_DBG_CC_RP_MAX_RATE,
MLX5_IB_DBG_CC_RP_HAI_RATE,
MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
MLX5_IB_DBG_CC_RP_MIN_RATE,
@@ -803,6 +836,7 @@ enum mlx5_ib_dbg_cc_types {
MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
MLX5_IB_DBG_CC_RP_GD,
+ MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
MLX5_IB_DBG_CC_NP_CNP_DSCP,
MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
MLX5_IB_DBG_CC_NP_CNP_PRIO,
@@ -986,19 +1020,16 @@ struct mlx5_ib_dev {
*/
struct mutex cap_mask_mutex;
u8 ib_active:1;
- u8 fill_delay:1;
u8 is_rep:1;
u8 lag_active:1;
u8 wc_support:1;
+ u8 fill_delay;
struct umr_common umrc;
/* sync used page count stats
*/
struct mlx5_ib_resources devr;
- /* protect mkey key part */
- spinlock_t mkey_lock;
- u8 mkey_key;
-
+ atomic_t mkey_var;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
@@ -1268,7 +1299,8 @@ int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
-struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ unsigned int entry);
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
@@ -1388,6 +1420,7 @@ int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
extern const struct uapi_definition mlx5_ib_devx_defs[];
extern const struct uapi_definition mlx5_ib_flow_defs[];
+extern const struct uapi_definition mlx5_ib_qos_defs[];
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
@@ -1477,12 +1510,11 @@ static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
{
u8 cqe_version = ucontext->cqe_version;
- if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
- !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
+ (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
return 0;
- if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
- !!cqe_version))
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
return -EINVAL;
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
@@ -1495,12 +1527,11 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
{
u8 cqe_version = ucontext->cqe_version;
- if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
- !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
+ (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
return 0;
- if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
- !!cqe_version))
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
return -EINVAL;
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
@@ -1539,7 +1570,9 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return false;
- if (access_flags & IB_ACCESS_RELAXED_ORDERING)
+ if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
+ (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) ||
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)))
return false;
return true;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8508af500972..a401931189b7 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -54,12 +54,8 @@ static void
assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
u32 *in)
{
+ u8 key = atomic_inc_return(&dev->mkey_var);
void *mkc;
- u8 key;
-
- spin_lock_irq(&dev->mkey_lock);
- key = dev->mkey_key++;
- spin_unlock_irq(&dev->mkey_lock);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, mkey_7_0, key);
@@ -90,6 +86,7 @@ mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
@@ -103,16 +100,6 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-static int order2idx(struct mlx5_ib_dev *dev, int order)
-{
- struct mlx5_mr_cache *cache = &dev->cache;
-
- if (order < cache->ent[0].order)
- return 0;
- else
- return order - cache->ent[0].order;
-}
-
static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
{
return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
@@ -124,18 +111,16 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
struct mlx5_ib_mr *mr =
container_of(context, struct mlx5_ib_mr, cb_work);
struct mlx5_ib_dev *dev = mr->dev;
- struct mlx5_mr_cache *cache = &dev->cache;
- int c = order2idx(dev, mr->order);
- struct mlx5_cache_ent *ent = &cache->ent[c];
+ struct mlx5_cache_ent *ent = mr->cache_ent;
unsigned long flags;
- spin_lock_irqsave(&ent->lock, flags);
- ent->pending--;
- spin_unlock_irqrestore(&ent->lock, flags);
if (status) {
mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
kfree(mr);
- dev->fill_delay = 1;
+ spin_lock_irqsave(&ent->lock, flags);
+ ent->pending--;
+ WRITE_ONCE(dev->fill_delay, 1);
+ spin_unlock_irqrestore(&ent->lock, flags);
mod_timer(&dev->delay_timer, jiffies + HZ);
return;
}
@@ -144,23 +129,44 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
mr->mmkey.key |= mlx5_idx_to_mkey(
MLX5_GET(create_mkey_out, mr->out, mkey_index));
- cache->last_add = jiffies;
+ WRITE_ONCE(dev->cache.last_add, jiffies);
spin_lock_irqsave(&ent->lock, flags);
list_add_tail(&mr->list, &ent->head);
- ent->cur++;
- ent->size++;
+ ent->available_mrs++;
+ ent->total_mrs++;
+ /* If we are doing fill_to_high_water then keep going. */
+ queue_adjust_cache_locked(ent);
+ ent->pending--;
spin_unlock_irqrestore(&ent->lock, flags);
+}
- if (!completion_done(&ent->compl))
- complete(&ent->compl);
+static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
+{
+ struct mlx5_ib_mr *mr;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return NULL;
+ mr->order = ent->order;
+ mr->cache_ent = ent;
+ mr->dev = ent->dev;
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
+
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
+ MLX5_SET(mkc, mkc, log_page_size, ent->page);
+ return mr;
}
-static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
+/* Asynchronously schedule new MRs to be populated in the cache. */
+static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent = &cache->ent[c];
- int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
@@ -173,42 +179,29 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
for (i = 0; i < num; i++) {
- if (ent->pending >= MAX_PENDING_REG_MR) {
- err = -EAGAIN;
- break;
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ mr = alloc_cache_mr(ent, mkc);
if (!mr) {
err = -ENOMEM;
break;
}
- mr->order = ent->order;
- mr->allocated_from_cache = true;
- mr->dev = dev;
-
- MLX5_SET(mkc, mkc, free, 1);
- MLX5_SET(mkc, mkc, umr_en, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
- MLX5_SET(mkc, mkc, access_mode_4_2,
- (ent->access_mode >> 2) & 0x7);
-
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
- MLX5_SET(mkc, mkc, log_page_size, ent->page);
-
spin_lock_irq(&ent->lock);
+ if (ent->pending >= MAX_PENDING_REG_MR) {
+ err = -EAGAIN;
+ spin_unlock_irq(&ent->lock);
+ kfree(mr);
+ break;
+ }
ent->pending++;
spin_unlock_irq(&ent->lock);
- err = mlx5_ib_create_mkey_cb(dev, &mr->mmkey,
- &dev->async_ctx, in, inlen,
- mr->out, sizeof(mr->out),
- &mr->cb_work);
+ err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
+ &ent->dev->async_ctx, in, inlen,
+ mr->out, sizeof(mr->out),
+ &mr->cb_work);
if (err) {
spin_lock_irq(&ent->lock);
ent->pending--;
spin_unlock_irq(&ent->lock);
- mlx5_ib_warn(dev, "create mkey failed %d\n", err);
+ mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
kfree(mr);
break;
}
@@ -218,70 +211,128 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
return err;
}
-static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
+/* Synchronously create a MR in the cache */
+static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent = &cache->ent[c];
- struct mlx5_ib_mr *tmp_mr;
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mr *mr;
- LIST_HEAD(del_list);
- int i;
+ void *mkc;
+ u32 *in;
+ int err;
- for (i = 0; i < num; i++) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
- break;
- }
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- list_move(&mr->list, &del_list);
- ent->cur--;
- ent->size--;
- spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
- }
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
- list_del(&mr->list);
- kfree(mr);
+ mr = alloc_cache_mr(ent, mkc);
+ if (!mr) {
+ err = -ENOMEM;
+ goto free_in;
}
+
+ err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
+ if (err)
+ goto free_mr;
+
+ mr->mmkey.type = MLX5_MKEY_MR;
+ WRITE_ONCE(ent->dev->cache.last_add, jiffies);
+ spin_lock_irq(&ent->lock);
+ ent->total_mrs++;
+ spin_unlock_irq(&ent->lock);
+ kfree(in);
+ return mr;
+free_mr:
+ kfree(mr);
+free_in:
+ kfree(in);
+ return ERR_PTR(err);
}
-static ssize_t size_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
+{
+ struct mlx5_ib_mr *mr;
+
+ lockdep_assert_held(&ent->lock);
+ if (list_empty(&ent->head))
+ return;
+ mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
+ list_del(&mr->list);
+ ent->available_mrs--;
+ ent->total_mrs--;
+ spin_unlock_irq(&ent->lock);
+ mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
+ kfree(mr);
+ spin_lock_irq(&ent->lock);
+}
+
+static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
+ bool limit_fill)
{
- struct mlx5_cache_ent *ent = filp->private_data;
- struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20] = {0};
- u32 var;
int err;
- int c;
- count = min(count, sizeof(lbuf) - 1);
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
+ lockdep_assert_held(&ent->lock);
- c = order2idx(dev, ent->order);
+ while (true) {
+ if (limit_fill)
+ target = ent->limit * 2;
+ if (target == ent->available_mrs + ent->pending)
+ return 0;
+ if (target > ent->available_mrs + ent->pending) {
+ u32 todo = target - (ent->available_mrs + ent->pending);
- if (sscanf(lbuf, "%u", &var) != 1)
- return -EINVAL;
+ spin_unlock_irq(&ent->lock);
+ err = add_keys(ent, todo);
+ if (err == -EAGAIN)
+ usleep_range(3000, 5000);
+ spin_lock_irq(&ent->lock);
+ if (err) {
+ if (err != -EAGAIN)
+ return err;
+ } else
+ return 0;
+ } else {
+ remove_cache_mr_locked(ent);
+ }
+ }
+}
- if (var < ent->limit)
- return -EINVAL;
+static ssize_t size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_cache_ent *ent = filp->private_data;
+ u32 target;
+ int err;
- if (var > ent->size) {
- do {
- err = add_keys(dev, c, var - ent->size);
- if (err && err != -EAGAIN)
- return err;
+ err = kstrtou32_from_user(buf, count, 0, &target);
+ if (err)
+ return err;
- usleep_range(3000, 5000);
- } while (err);
- } else if (var < ent->size) {
- remove_keys(dev, c, ent->size - var);
+ /*
+ * Target is the new value of total_mrs the user requests, however we
+ * cannot free MRs that are in use. Compute the target value for
+ * available_mrs.
+ */
+ spin_lock_irq(&ent->lock);
+ if (target < ent->total_mrs - ent->available_mrs) {
+ err = -EINVAL;
+ goto err_unlock;
}
+ target = target - (ent->total_mrs - ent->available_mrs);
+ if (target < ent->limit || target > ent->limit*2) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ err = resize_available_mrs(ent, target, false);
+ if (err)
+ goto err_unlock;
+ spin_unlock_irq(&ent->lock);
return count;
+
+err_unlock:
+ spin_unlock_irq(&ent->lock);
+ return err;
}
static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
@@ -291,7 +342,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20];
int err;
- err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
+ err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
if (err < 0)
return err;
@@ -309,32 +360,23 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_cache_ent *ent = filp->private_data;
- struct mlx5_ib_dev *dev = ent->dev;
- char lbuf[20] = {0};
u32 var;
int err;
- int c;
-
- count = min(count, sizeof(lbuf) - 1);
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
-
- c = order2idx(dev, ent->order);
-
- if (sscanf(lbuf, "%u", &var) != 1)
- return -EINVAL;
- if (var > ent->size)
- return -EINVAL;
+ err = kstrtou32_from_user(buf, count, 0, &var);
+ if (err)
+ return err;
+ /*
+ * Upon set we immediately fill the cache to high water mark implied by
+ * the limit.
+ */
+ spin_lock_irq(&ent->lock);
ent->limit = var;
-
- if (ent->cur < ent->limit) {
- err = add_keys(dev, c, 2 * ent->limit - ent->cur);
- if (err)
- return err;
- }
-
+ err = resize_available_mrs(ent, 0, true);
+ spin_unlock_irq(&ent->lock);
+ if (err)
+ return err;
return count;
}
@@ -359,68 +401,119 @@ static const struct file_operations limit_fops = {
.read = limit_read,
};
-static int someone_adding(struct mlx5_mr_cache *cache)
+static bool someone_adding(struct mlx5_mr_cache *cache)
{
- int i;
+ unsigned int i;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
- if (cache->ent[i].cur < cache->ent[i].limit)
- return 1;
+ struct mlx5_cache_ent *ent = &cache->ent[i];
+ bool ret;
+
+ spin_lock_irq(&ent->lock);
+ ret = ent->available_mrs < ent->limit;
+ spin_unlock_irq(&ent->lock);
+ if (ret)
+ return true;
}
+ return false;
+}
- return 0;
+/*
+ * Check if the bucket is outside the high/low water mark and schedule an async
+ * update. The cache refill has hysteresis, once the low water mark is hit it is
+ * refilled up to the high mark.
+ */
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
+{
+ lockdep_assert_held(&ent->lock);
+
+ if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
+ return;
+ if (ent->available_mrs < ent->limit) {
+ ent->fill_to_high_water = true;
+ queue_work(ent->dev->cache.wq, &ent->work);
+ } else if (ent->fill_to_high_water &&
+ ent->available_mrs + ent->pending < 2 * ent->limit) {
+ /*
+ * Once we start populating due to hitting a low water mark
+ * continue until we pass the high water mark.
+ */
+ queue_work(ent->dev->cache.wq, &ent->work);
+ } else if (ent->available_mrs == 2 * ent->limit) {
+ ent->fill_to_high_water = false;
+ } else if (ent->available_mrs > 2 * ent->limit) {
+ /* Queue deletion of excess entries */
+ ent->fill_to_high_water = false;
+ if (ent->pending)
+ queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
+ msecs_to_jiffies(1000));
+ else
+ queue_work(ent->dev->cache.wq, &ent->work);
+ }
}
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
struct mlx5_mr_cache *cache = &dev->cache;
- int i = order2idx(dev, ent->order);
int err;
- if (cache->stopped)
- return;
+ spin_lock_irq(&ent->lock);
+ if (ent->disabled)
+ goto out;
- ent = &dev->cache.ent[i];
- if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
- err = add_keys(dev, i, 1);
- if (ent->cur < 2 * ent->limit) {
- if (err == -EAGAIN) {
- mlx5_ib_dbg(dev, "returned eagain, order %d\n",
- i + 2);
- queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(3));
- } else if (err) {
- mlx5_ib_warn(dev, "command failed order %d, err %d\n",
- i + 2, err);
+ if (ent->fill_to_high_water &&
+ ent->available_mrs + ent->pending < 2 * ent->limit &&
+ !READ_ONCE(dev->fill_delay)) {
+ spin_unlock_irq(&ent->lock);
+ err = add_keys(ent, 1);
+ spin_lock_irq(&ent->lock);
+ if (ent->disabled)
+ goto out;
+ if (err) {
+ /*
+ * EAGAIN only happens if pending is positive, so we
+ * will be rescheduled from reg_mr_callback(). The only
+ * failure path here is ENOMEM.
+ */
+ if (err != -EAGAIN) {
+ mlx5_ib_warn(
+ dev,
+ "command failed order %d, err %d\n",
+ ent->order, err);
queue_delayed_work(cache->wq, &ent->dwork,
msecs_to_jiffies(1000));
- } else {
- queue_work(cache->wq, &ent->work);
}
}
- } else if (ent->cur > 2 * ent->limit) {
+ } else if (ent->available_mrs > 2 * ent->limit) {
+ bool need_delay;
+
/*
- * The remove_keys() logic is performed as garbage collection
- * task. Such task is intended to be run when no other active
- * processes are running.
+ * The remove_cache_mr() logic is performed as garbage
+ * collection task. Such task is intended to be run when no
+ * other active processes are running.
*
* The need_resched() will return TRUE if there are user tasks
* to be activated in near future.
*
- * In such case, we don't execute remove_keys() and postpone
- * the garbage collection work to try to run in next cycle,
- * in order to free CPU resources to other tasks.
+ * In such case, we don't execute remove_cache_mr() and postpone
+ * the garbage collection work to try to run in next cycle, in
+ * order to free CPU resources to other tasks.
*/
- if (!need_resched() && !someone_adding(cache) &&
- time_after(jiffies, cache->last_add + 300 * HZ)) {
- remove_keys(dev, i, 1);
- if (ent->cur > ent->limit)
- queue_work(cache->wq, &ent->work);
- } else {
+ spin_unlock_irq(&ent->lock);
+ need_delay = need_resched() || someone_adding(cache) ||
+ time_after(jiffies,
+ READ_ONCE(cache->last_add) + 300 * HZ);
+ spin_lock_irq(&ent->lock);
+ if (ent->disabled)
+ goto out;
+ if (need_delay)
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
- }
+ remove_cache_mr_locked(ent);
+ queue_adjust_cache_locked(ent);
}
+out:
+ spin_unlock_irq(&ent->lock);
}
static void delayed_cache_work_func(struct work_struct *work)
@@ -439,117 +532,95 @@ static void cache_work_func(struct work_struct *work)
__cache_work_func(ent);
}
-struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
+/* Allocate a special entry from the cache */
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ unsigned int entry)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
- int err;
- if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
+ if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
+ entry >= ARRAY_SIZE(cache->ent)))
return ERR_PTR(-EINVAL);
- }
ent = &cache->ent[entry];
- while (1) {
- spin_lock_irq(&ent->lock);
- if (list_empty(&ent->head)) {
- spin_unlock_irq(&ent->lock);
-
- err = add_keys(dev, entry, 1);
- if (err && err != -EAGAIN)
- return ERR_PTR(err);
-
- wait_for_completion(&ent->compl);
- } else {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
- list);
- list_del(&mr->list);
- ent->cur--;
- spin_unlock_irq(&ent->lock);
- if (ent->cur < ent->limit)
- queue_work(cache->wq, &ent->work);
+ spin_lock_irq(&ent->lock);
+ if (list_empty(&ent->head)) {
+ spin_unlock_irq(&ent->lock);
+ mr = create_cache_mr(ent);
+ if (IS_ERR(mr))
return mr;
- }
+ } else {
+ mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
+ list_del(&mr->list);
+ ent->available_mrs--;
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->lock);
}
+ return mr;
}
-static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
+/* Return a MR already available in the cache */
+static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
{
- struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_ib_dev *dev = req_ent->dev;
struct mlx5_ib_mr *mr = NULL;
- struct mlx5_cache_ent *ent;
- int last_umr_cache_entry;
- int c;
- int i;
+ struct mlx5_cache_ent *ent = req_ent;
- c = order2idx(dev, order);
- last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
- if (c < 0 || c > last_umr_cache_entry) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
- return NULL;
- }
-
- for (i = c; i <= last_umr_cache_entry; i++) {
- ent = &cache->ent[i];
-
- mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
+ /* Try larger MR pools from the cache to satisfy the allocation */
+ for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
+ mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
+ ent - dev->cache.ent);
spin_lock_irq(&ent->lock);
if (!list_empty(&ent->head)) {
mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
list);
list_del(&mr->list);
- ent->cur--;
+ ent->available_mrs--;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
- if (ent->cur < ent->limit)
- queue_work(cache->wq, &ent->work);
break;
}
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
-
- queue_work(cache->wq, &ent->work);
}
if (!mr)
- cache->ent[c].miss++;
+ req_ent->miss++;
return mr;
}
+static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_cache_ent *ent = mr->cache_ent;
+
+ mr->cache_ent = NULL;
+ spin_lock_irq(&ent->lock);
+ ent->total_mrs--;
+ spin_unlock_irq(&ent->lock);
+}
+
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- struct mlx5_mr_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent;
- int shrink = 0;
- int c;
+ struct mlx5_cache_ent *ent = mr->cache_ent;
- if (!mr->allocated_from_cache)
+ if (!ent)
return;
- c = order2idx(dev, mr->order);
- WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
-
if (mlx5_mr_cache_invalidate(mr)) {
- mr->allocated_from_cache = false;
+ detach_mr_from_cache(mr);
destroy_mkey(dev, mr);
- ent = &cache->ent[c];
- if (ent->cur < ent->limit)
- queue_work(cache->wq, &ent->work);
return;
}
- ent = &cache->ent[c];
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
- ent->cur++;
- if (ent->cur > 2 * ent->limit)
- shrink = 1;
+ ent->available_mrs++;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
-
- if (shrink)
- queue_work(cache->wq, &ent->work);
}
static void clean_keys(struct mlx5_ib_dev *dev, int c)
@@ -569,8 +640,8 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
list_move(&mr->list, &del_list);
- ent->cur--;
- ent->size--;
+ ent->available_mrs--;
+ ent->total_mrs--;
spin_unlock_irq(&ent->lock);
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
@@ -608,7 +679,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
dir = debugfs_create_dir(ent->name, cache->root);
debugfs_create_file("size", 0600, dir, ent, &size_fops);
debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
- debugfs_create_u32("cur", 0400, dir, &ent->cur);
+ debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
debugfs_create_u32("miss", 0600, dir, &ent->miss);
}
}
@@ -617,7 +688,7 @@ static void delay_time_func(struct timer_list *t)
{
struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
- dev->fill_delay = 0;
+ WRITE_ONCE(dev->fill_delay, 0);
}
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
@@ -643,7 +714,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->dev = dev;
ent->limit = 0;
- init_completion(&ent->compl);
INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
@@ -665,7 +735,9 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
ent->limit = 0;
- queue_work(cache->wq, &ent->work);
+ spin_lock_irq(&ent->lock);
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->lock);
}
mlx5_mr_cache_debugfs_init(dev);
@@ -675,13 +747,20 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
- int i;
+ unsigned int i;
if (!dev->cache.wq)
return 0;
- dev->cache.stopped = 1;
- flush_workqueue(dev->cache.wq);
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ struct mlx5_cache_ent *ent = &dev->cache.ent[i];
+
+ spin_lock_irq(&ent->lock);
+ ent->disabled = true;
+ spin_unlock_irq(&ent->lock);
+ cancel_work_sync(&ent->work);
+ cancel_delayed_work_sync(&ent->dwork);
+ }
mlx5_mr_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
@@ -876,31 +955,37 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
return err;
}
-static struct mlx5_ib_mr *alloc_mr_from_cache(
- struct ib_pd *pd, struct ib_umem *umem,
- u64 virt_addr, u64 len, int npages,
- int page_shift, int order, int access_flags)
+static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
+ unsigned int order)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+
+ if (order < cache->ent[0].order)
+ return &cache->ent[0];
+ order = order - cache->ent[0].order;
+ if (order > MR_CACHE_LAST_STD_ENTRY)
+ return NULL;
+ return &cache->ent[order];
+}
+
+static struct mlx5_ib_mr *
+alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr,
+ u64 len, int npages, int page_shift, unsigned int order,
+ int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order);
struct mlx5_ib_mr *mr;
- int err = 0;
- int i;
-
- for (i = 0; i < 1; i++) {
- mr = alloc_cached_mr(dev, order);
- if (mr)
- break;
- err = add_keys(dev, order2idx(dev, order), 1);
- if (err && err != -EAGAIN) {
- mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
- break;
- }
+ if (!ent)
+ return ERR_PTR(-E2BIG);
+ mr = get_cache_mr(ent);
+ if (!mr) {
+ mr = create_cache_mr(ent);
+ if (IS_ERR(mr))
+ return mr;
}
- if (!mr)
- return ERR_PTR(-EAGAIN);
-
mr->ibmr.pd = pd;
mr->umem = umem;
mr->access_flags = access_flags;
@@ -1474,10 +1559,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
/*
* UMR can't be used - MKey needs to be replaced.
*/
- if (mr->allocated_from_cache)
- err = mlx5_mr_cache_invalidate(mr);
- else
- err = destroy_mkey(dev, mr);
+ if (mr->cache_ent)
+ detach_mr_from_cache(mr);
+ err = destroy_mkey(dev, mr);
if (err)
goto err;
@@ -1489,8 +1573,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mr = to_mmr(ib_mr);
goto err;
}
-
- mr->allocated_from_cache = false;
} else {
/*
* Send a UMR WQE
@@ -1577,8 +1659,6 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int allocated_from_cache = mr->allocated_from_cache;
-
if (mr->sig) {
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
@@ -1593,7 +1673,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig = NULL;
}
- if (!allocated_from_cache) {
+ if (!mr->cache_ent) {
destroy_mkey(dev, mr);
mlx5_free_priv_descs(mr);
}
@@ -1610,7 +1690,7 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
else
clean_mr(dev, mr);
- if (mr->allocated_from_cache)
+ if (mr->cache_ent)
mlx5_mr_cache_free(dev, mr);
else
kfree(mr);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index bf50cd91f472..3de7606d4a1a 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -197,7 +197,7 @@ static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
odp->private = NULL;
mutex_unlock(&odp->umem_mutex);
- if (!mr->allocated_from_cache) {
+ if (!mr->cache_ent) {
mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey);
WARN_ON(mr->descs);
}
diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c
new file mode 100644
index 000000000000..cac878a70edb
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qos.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static bool pp_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return (MLX5_CAP_GEN(dev->mdev, qos) &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing) &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_uid));
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_pp *pp_entry;
+ void *in_ctx;
+ u16 uid;
+ int inlen;
+ u32 flags;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ /* The allocated entry can be used only by a DEVX context */
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ dev = to_mdev(c->ibucontext.device);
+ pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL);
+ if (!pp_entry)
+ return -ENOMEM;
+
+ in_ctx = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
+ memcpy(rl_raw, in_ctx, inlen);
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX);
+ if (err)
+ goto err;
+
+ uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ?
+ c->devx_uid : MLX5_SHARED_RESOURCE_UID;
+
+ err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid,
+ (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX),
+ &pp_entry->index);
+ if (err)
+ goto err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ &pp_entry->index, sizeof(pp_entry->index));
+ if (err)
+ goto clean;
+
+ pp_entry->mdev = dev->mdev;
+ uobj->object = pp_entry;
+ return 0;
+
+clean:
+ mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index);
+err:
+ kfree(pp_entry);
+ return err;
+}
+
+static int pp_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_pp *pp_entry = uobject->object;
+
+ mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index);
+ kfree(pp_entry);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_PP_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_PP,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
+ UVERBS_ATTR_SIZE(1,
+ MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ enum mlx5_ib_uapi_pp_alloc_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_PP_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_PP,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP,
+ UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY));
+
+
+const struct uapi_definition mlx5_ib_qos_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_PP,
+ UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8fe149e808af..1456db4b6295 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -697,6 +697,9 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev,
{
int bfregn = -ENOMEM;
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
mutex_lock(&bfregi->lock);
if (bfregi->ver >= 2) {
bfregn = alloc_high_class_bfreg(dev, bfregi);
@@ -768,6 +771,9 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
u32 index_of_sys_page;
u32 offset;
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;
@@ -919,6 +925,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
void *qpc;
int err;
u16 uid;
+ u32 uar_flags;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (err) {
@@ -928,24 +935,29 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
ibucontext);
- if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
+ uar_flags = ucmd.flags & (MLX5_QP_FLAG_UAR_PAGE_INDEX |
+ MLX5_QP_FLAG_BFREG_INDEX);
+ switch (uar_flags) {
+ case MLX5_QP_FLAG_UAR_PAGE_INDEX:
+ uar_index = ucmd.bfreg_index;
+ bfregn = MLX5_IB_INVALID_BFREG;
+ break;
+ case MLX5_QP_FLAG_BFREG_INDEX:
uar_index = bfregn_to_uar_index(dev, &context->bfregi,
ucmd.bfreg_index, true);
if (uar_index < 0)
return uar_index;
-
bfregn = MLX5_IB_INVALID_BFREG;
- } else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
- /*
- * TBD: should come from the verbs when we have the API
- */
- /* In CROSS_CHANNEL CQ and QP must use the same UAR */
- bfregn = MLX5_CROSS_CHANNEL_BFREG;
- }
- else {
+ break;
+ case 0:
+ if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ return -EINVAL;
bfregn = alloc_bfreg(dev, &context->bfregi);
if (bfregn < 0)
return bfregn;
+ break;
+ default:
+ return -EINVAL;
}
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
@@ -2100,6 +2112,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_UAR_PAGE_INDEX |
MLX5_QP_FLAG_TYPE_DCI |
MLX5_QP_FLAG_TYPE_DCT))
return -EINVAL;
@@ -2789,7 +2802,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
mlx5_ib_dbg(dev, "unsupported qp type %d\n",
init_attr->qp_type);
/* Don't support raw QPs */
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 78a48aea3faf..fa808582b08b 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -58,7 +58,7 @@ struct mthca_user_db_table {
u64 uvirt;
struct scatterlist mem;
int refcount;
- } page[0];
+ } page[];
};
static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index da9b8f9b884f..f9a2e65e2ff5 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -68,7 +68,7 @@ struct mthca_icm_table {
int lowmem;
int coherent;
struct mutex mutex;
- struct mthca_icm *icm[0];
+ struct mthca_icm *icm[];
};
struct mthca_icm_iter {
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index ac19d57803b5..69a3e4f62fb1 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -561,7 +561,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
}
default:
/* Don't support raw QPs */
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
if (err) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index d47ea675734b..10e343894595 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1111,7 +1111,7 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
(attrs->qp_type != IB_QPT_UD)) {
pr_err("%s(%d) unsupported qp type=0x%x requested\n",
__func__, dev->id, attrs->qp_type);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
/* Skip the check for QP1 to support CM size of 128 */
if ((attrs->qp_type != IB_QPT_GSI) &&
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 484b555150e0..a5bd3adaf90a 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1186,7 +1186,7 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: unsupported qp type=0x%x requested\n",
attrs->qp_type);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (attrs->cap.max_send_wr > qattr->max_sqe) {
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 5ef93f8f17a1..7508abb6a0fa 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -39,7 +39,6 @@
#include <linux/utsname.h>
#include <linux/rculist.h>
#include <linux/mm.h>
-#include <linux/random.h>
#include <linux/vmalloc.h>
#include <rdma/rdma_vt.h>
@@ -1503,7 +1502,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
unsigned i, ctxt;
int ret;
- get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 8bf414b47b96..dc0e81f3b6f4 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -177,7 +177,6 @@ struct qib_ibdev {
struct timer_list mem_timer;
struct qib_pio_header *pio_hdrs;
dma_addr_t pio_hdrs_phys;
- u32 qp_rnd; /* random bytes for hash */
u32 n_piowait;
u32 n_txwait;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 556b8e44a51c..71f82339446c 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -504,7 +504,7 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
if (init_attr->qp_type != IB_QPT_UD) {
usnic_err("%s asked to make a non-UD QP: %d\n",
dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
trans_spec = cmd.spec;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 70be49b1ca05..7ec8991ace67 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -77,7 +77,7 @@ struct usnic_uiom_reg {
struct usnic_uiom_chunk {
struct list_head list;
int nents;
- struct scatterlist page_list[0];
+ struct scatterlist page_list[];
};
struct usnic_uiom_pd *usnic_uiom_alloc_pd(void);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 9de1281f9a3b..afcc2abcf55c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -217,7 +217,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
init_attr->qp_type != IB_QPT_GSI) {
dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
init_attr->qp_type);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
if (is_srq && !dev->dsr->caps.max_srq) {
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 7858d499db03..0e1b291d2cec 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1220,7 +1220,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
default:
/* Don't support raw QPs */
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EOPNOTSUPP);
}
init_attr->cap.max_inline_data = 0;
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 986265ad6e79..72b031ab7092 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -284,12 +284,6 @@ static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
&gid->global.interface_id);
}
-static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
- *ibucontext)
-{
- return container_of(ibucontext, struct rvt_ucontext, ibucontext);
-}
-
/**
* rvt_alloc_ucontext - Allocate a user context
* @uctx: Verbs context
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 0946a301a5c5..4afdd2e20883 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -103,6 +103,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
+ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
+ rxe->ndev->dev_addr);
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index ec21f616ac98..6c11c3aeeca6 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -590,15 +590,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
int err;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
- int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
+ int max_rd_atomic = attr->max_rd_atomic ?
+ roundup_pow_of_two(attr->max_rd_atomic) : 0;
qp->attr.max_rd_atomic = max_rd_atomic;
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
}
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- int max_dest_rd_atomic =
- __roundup_pow_of_two(attr->max_dest_rd_atomic);
+ int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
+ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index acd0a925481c..8ef17d617022 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -63,7 +63,7 @@ struct rxe_queue_buf {
__u32 pad_2[31];
__u32 consumer_index;
__u32 pad_3[31];
- __u8 data[0];
+ __u8 data[];
};
struct rxe_queue {
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index c5651a96b196..559e5fd3bad8 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1769,14 +1769,23 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
return 0;
}
-static int siw_listen_address(struct iw_cm_id *id, int backlog,
- struct sockaddr *laddr, int addr_family)
+/*
+ * siw_create_listen - Create resources for a listener's IWCM ID @id
+ *
+ * Starts listen on the socket address id->local_addr.
+ *
+ */
+int siw_create_listen(struct iw_cm_id *id, int backlog)
{
struct socket *s;
struct siw_cep *cep = NULL;
struct siw_device *sdev = to_siw_dev(id->device);
+ int addr_family = id->local_addr.ss_family;
int rv = 0, s_val;
+ if (addr_family != AF_INET && addr_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
return rv;
@@ -1791,9 +1800,25 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
siw_dbg(id->device, "setsockopt error: %d\n", rv);
goto error;
}
- rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
- sizeof(struct sockaddr_in) :
- sizeof(struct sockaddr_in6));
+ if (addr_family == AF_INET) {
+ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
+ s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
+
+ rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in));
+ } else {
+ struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
+
+ /* For wildcard addr, limit binding to current device only */
+ if (ipv6_addr_any(&laddr->sin6_addr))
+ s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
+
+ rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in6));
+ }
if (rv) {
siw_dbg(id->device, "socket bind error: %d\n", rv);
goto error;
@@ -1852,7 +1877,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
- siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
+ siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
return 0;
@@ -1910,106 +1935,6 @@ static void siw_drop_listeners(struct iw_cm_id *id)
}
}
-/*
- * siw_create_listen - Create resources for a listener's IWCM ID @id
- *
- * Listens on the socket address id->local_addr.
- *
- * If the listener's @id provides a specific local IP address, at most one
- * listening socket is created and associated with @id.
- *
- * If the listener's @id provides the wildcard (zero) local IP address,
- * a separate listen is performed for each local IP address of the device
- * by creating a listening socket and binding to that local IP address.
- *
- */
-int siw_create_listen(struct iw_cm_id *id, int backlog)
-{
- struct net_device *dev = to_siw_dev(id->device)->netdev;
- int rv = 0, listeners = 0;
-
- siw_dbg(id->device, "backlog %d\n", backlog);
-
- /*
- * For each attached address of the interface, create a
- * listening socket, if id->local_addr is the wildcard
- * IP address or matches the IP address.
- */
- if (id->local_addr.ss_family == AF_INET) {
- struct in_device *in_dev = in_dev_get(dev);
- struct sockaddr_in s_laddr;
- const struct in_ifaddr *ifa;
-
- if (!in_dev) {
- rv = -ENODEV;
- goto out;
- }
- memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
-
- siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
-
- rtnl_lock();
- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
- if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) ||
- s_laddr.sin_addr.s_addr == ifa->ifa_address) {
- s_laddr.sin_addr.s_addr = ifa->ifa_address;
-
- rv = siw_listen_address(id, backlog,
- (struct sockaddr *)&s_laddr,
- AF_INET);
- if (!rv)
- listeners++;
- }
- }
- rtnl_unlock();
- in_dev_put(in_dev);
- } else if (id->local_addr.ss_family == AF_INET6) {
- struct inet6_dev *in6_dev = in6_dev_get(dev);
- struct inet6_ifaddr *ifp;
- struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr);
-
- if (!in6_dev) {
- rv = -ENODEV;
- goto out;
- }
- siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
-
- rtnl_lock();
- list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
- if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
- continue;
- if (ipv6_addr_any(&s_laddr->sin6_addr) ||
- ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
- struct sockaddr_in6 bind_addr = {
- .sin6_family = AF_INET6,
- .sin6_port = s_laddr->sin6_port,
- .sin6_flowinfo = 0,
- .sin6_addr = ifp->addr,
- .sin6_scope_id = dev->ifindex };
-
- rv = siw_listen_address(id, backlog,
- (struct sockaddr *)&bind_addr,
- AF_INET6);
- if (!rv)
- listeners++;
- }
- }
- rtnl_unlock();
- in6_dev_put(in6_dev);
- } else {
- rv = -EAFNOSUPPORT;
- }
-out:
- if (listeners)
- rv = 0;
- else if (!rv)
- rv = -EINVAL;
-
- siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
-
- return rv;
-}
-
int siw_destroy_listen(struct iw_cm_id *id)
{
if (!id->provider_data) {
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 9ccce2909ac4..650520244ed0 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -332,7 +332,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
struct siw_srq *srq;
struct siw_wqe *wqe = NULL;
bool srq_event = false;
- unsigned long flags;
+ unsigned long uninitialized_var(flags);
srq = qp->srq;
if (srq) {
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 07e30138aaa1..aeb842bc7a1e 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -165,15 +165,16 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
struct ib_port_attr *attr)
{
struct siw_device *sdev = to_siw_dev(base_dev);
+ int rv;
memset(attr, 0, sizeof(*attr));
- attr->active_mtu = attr->max_mtu;
- attr->active_speed = 2;
- attr->active_width = 2;
+ rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
+ &attr->active_width);
attr->gid_tbl_len = 1;
attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
attr->pkey_tbl_len = 1;
@@ -192,7 +193,7 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
* attr->subnet_timeout = 0;
* attr->init_type_repy = 0;
*/
- return 0;
+ return rv;
}
int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
@@ -322,7 +323,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
}
if (attrs->qp_type != IB_QPT_RC) {
siw_dbg(base_dev, "only RC QP's supported\n");
- rv = -EINVAL;
+ rv = -EOPNOTSUPP;
goto err_out;
}
if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 2aa3457a30ce..e188a95984b5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -838,6 +838,4 @@ extern int ipoib_debug_level;
#define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff)
-extern const char ipoib_driver_version[];
-
#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index a10a0c2ca2da..67a21fdf5367 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -68,9 +68,6 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->version, ipoib_driver_version,
- sizeof(drvinfo->version));
-
strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 4a0d3a9e72e1..81b8227214f1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -52,10 +52,6 @@
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
-#define DRV_VERSION "1.0.0"
-
-const char ipoib_driver_version[] = DRV_VERSION;
-
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 7a8f24de3631..999ef7cdd05e 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -292,12 +292,27 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
{
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+ struct iser_fr_desc *desc;
+ struct ib_mr_status mr_status;
- if (!reg->mem_h)
+ desc = reg->mem_h;
+ if (!desc)
return;
- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
- reg->mem_h);
+ /*
+ * The signature MR cannot be invalidated and reused without checking.
+ * libiscsi calls the check_protection transport handler only if
+ * SCSI-Response is received. And the signature MR is not checked if
+ * the task is completed for some other reason like a timeout or error
+ * handling. That's why we must check the signature MR here before
+ * putting it to the free pool.
+ */
+ if (unlikely(desc->sig_protected)) {
+ desc->sig_protected = false;
+ ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
+ &mr_status);
+ }
+ device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc);
reg->mem_h = NULL;
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
index 4480092c68e0..d324312a373c 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -239,7 +239,7 @@ struct opa_veswport_mactable_entry {
* @offset: mac table starting offset
* @num_entries: Number of entries to get or set
* @mac_tbl_digest: mac table digest
- * @tbl_entries[]: Array of table entries
+ * @tbl_entries: Array of table entries
*
* The EM sends down this structure in a MAD indicating
* the starting offset in the forwarding table that this
@@ -258,7 +258,7 @@ struct opa_veswport_mactable {
__be16 offset;
__be16 num_entries;
__be32 mac_tbl_digest;
- struct opa_veswport_mactable_entry tbl_entries[0];
+ struct opa_veswport_mactable_entry tbl_entries[];
} __packed;
/**
@@ -440,7 +440,7 @@ struct opa_veswport_iface_macs {
__be16 num_macs_in_msg;
__be16 tot_macs_in_lst;
__be16 gen_count;
- struct opa_vnic_iface_mac_entry entry[0];
+ struct opa_vnic_iface_mac_entry entry[];
} __packed;
/**
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
index 8ad7da989a0e..42d557dff19d 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
@@ -125,8 +125,6 @@ static void vnic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, opa_vnic_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index 6dbc08e1a6a6..dd942dd642bd 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -292,7 +292,6 @@ struct opa_vnic_mac_tbl_node {
hlist_for_each_entry(obj, &name[bkt], member)
extern char opa_vnic_driver_name[];
-extern const char opa_vnic_driver_version[];
struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
u8 port_num, u8 vport_num);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index be5befd92d16..6e8d650c17c7 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -59,9 +59,7 @@
#include "opa_vnic_internal.h"
-#define DRV_VERSION "1.0"
char opa_vnic_driver_name[] = "opa_vnic";
-const char opa_vnic_driver_version[] = DRV_VERSION;
/*
* The trap service level is kept in bits 3 to 7 in the trap_sl_rsvd
@@ -1041,9 +1039,6 @@ static int __init opa_vnic_init(void)
{
int rc;
- pr_info("OPA Virtual Network Driver - v%s\n",
- opa_vnic_driver_version);
-
rc = ib_register_client(&opa_vnic_client);
if (rc)
pr_err("VNIC driver register failed %d\n", rc);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 5359ece561ca..6fabcc2faf1f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -309,7 +309,7 @@ struct srp_fr_pool {
int max_page_list_len;
spinlock_t lock;
struct list_head free_list;
- struct srp_fr_desc desc[0];
+ struct srp_fr_desc desc[];
};
/**
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 76938ece1658..a88f2f07bc27 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,9 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_BCM_VOTER
+ tristate
+
config INTERCONNECT_QCOM_MSM8916
tristate "Qualcomm MSM8916 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -23,6 +26,13 @@ config INTERCONNECT_QCOM_MSM8974
This is a driver for the Qualcomm Network-on-Chip on msm8974-based
platforms.
+config INTERCONNECT_QCOM_OSM_L3
+ tristate "Qualcomm OSM L3 interconnect driver"
+ depends on INTERCONNECT_QCOM || COMPILE_TEST
+ help
+ Say y here to support the Operating State Manager (OSM) interconnect
+ driver which controls the scaling of L3 caches on Qualcomm SoCs.
+
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -32,10 +42,25 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_RPMH
+ tristate
+
+config INTERCONNECT_QCOM_SC7180
+ tristate "Qualcomm SC7180 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sc7180-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
depends on INTERCONNECT_QCOM
depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
help
This is a driver for the Qualcomm Network-on-Chip on sdm845-based
platforms.
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index e8271575e3d8..3a047fe6e45a 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,13 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
+icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8916-objs := msm8916.o
qnoc-msm8974-objs := msm8974.o
+icc-osm-l3-objs := osm-l3.o
qnoc-qcs404-objs := qcs404.o
+icc-rpmh-obj := icc-rpmh.o
+qnoc-sc7180-objs := sc7180.o
qnoc-sdm845-objs := sdm845.o
icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
+obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
+obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
new file mode 100644
index 000000000000..2adfde8cdf19
--- /dev/null
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <asm/div64.h>
+#include <linux/interconnect-provider.h>
+#include <linux/list_sort.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+static LIST_HEAD(bcm_voters);
+static DEFINE_MUTEX(bcm_voter_lock);
+
+/**
+ * struct bcm_voter - Bus Clock Manager voter
+ * @dev: reference to the device that communicates with the BCM
+ * @np: reference to the device node to match bcm voters
+ * @lock: mutex to protect commit and wake/sleep lists in the voter
+ * @commit_list: list containing bcms to be committed to hardware
+ * @ws_list: list containing bcms that have different wake/sleep votes
+ * @voter_node: list of bcm voters
+ */
+struct bcm_voter {
+ struct device *dev;
+ struct device_node *np;
+ struct mutex lock;
+ struct list_head commit_list;
+ struct list_head ws_list;
+ struct list_head voter_node;
+};
+
+static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
+{
+ const struct qcom_icc_bcm *bcm_a =
+ list_entry(a, struct qcom_icc_bcm, list);
+ const struct qcom_icc_bcm *bcm_b =
+ list_entry(b, struct qcom_icc_bcm, list);
+
+ if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd)
+ return -1;
+ else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd)
+ return 0;
+ else
+ return 1;
+}
+
+static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+{
+ size_t i, bucket;
+ u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
+ u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
+ u64 temp;
+
+ for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
+ for (i = 0; i < bcm->num_nodes; i++) {
+ temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ agg_avg[bucket] = max(agg_avg[bucket], temp);
+
+ temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth);
+ agg_peak[bucket] = max(agg_peak[bucket], temp);
+ }
+
+ temp = agg_avg[bucket] * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_x[bucket] = temp;
+
+ temp = agg_peak[bucket] * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_y[bucket] = temp;
+ }
+
+ if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
+ bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
+ bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
+ bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
+ }
+}
+
+static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
+ u32 addr, bool commit)
+{
+ bool valid = true;
+
+ if (!cmd)
+ return;
+
+ if (vote_x == 0 && vote_y == 0)
+ valid = false;
+
+ if (vote_x > BCM_TCS_CMD_VOTE_MASK)
+ vote_x = BCM_TCS_CMD_VOTE_MASK;
+
+ if (vote_y > BCM_TCS_CMD_VOTE_MASK)
+ vote_y = BCM_TCS_CMD_VOTE_MASK;
+
+ cmd->addr = addr;
+ cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
+
+ /*
+ * Set the wait for completion flag on command that need to be completed
+ * before the next command.
+ */
+ if (commit)
+ cmd->wait = true;
+}
+
+static void tcs_list_gen(struct list_head *bcm_list, int bucket,
+ struct tcs_cmd tcs_list[MAX_BCMS],
+ int n[MAX_VCD + 1])
+{
+ struct qcom_icc_bcm *bcm;
+ bool commit;
+ size_t idx = 0, batch = 0, cur_vcd_size = 0;
+
+ memset(n, 0, sizeof(int) * (MAX_VCD + 1));
+
+ list_for_each_entry(bcm, bcm_list, list) {
+ commit = false;
+ cur_vcd_size++;
+ if ((list_is_last(&bcm->list, bcm_list)) ||
+ bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
+ commit = true;
+ cur_vcd_size = 0;
+ }
+ tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
+ bcm->vote_y[bucket], bcm->addr, commit);
+ idx++;
+ n[batch]++;
+ /*
+ * Batch the BCMs in such a way that we do not split them in
+ * multiple payloads when they are under the same VCD. This is
+ * to ensure that every BCM is committed since we only set the
+ * commit bit on the last BCM request of every VCD.
+ */
+ if (n[batch] >= MAX_RPMH_PAYLOAD) {
+ if (!commit) {
+ n[batch] -= cur_vcd_size;
+ n[batch + 1] = cur_vcd_size;
+ }
+ batch++;
+ }
+ }
+}
+
+/**
+ * of_bcm_voter_get - gets a bcm voter handle from DT node
+ * @dev: device pointer for the consumer device
+ * @name: name for the bcm voter device
+ *
+ * This function will match a device_node pointer for the phandle
+ * specified in the device DT and return a bcm_voter handle on success.
+ *
+ * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned
+ * when matching bcm voter is yet to be found.
+ */
+struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
+{
+ struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER);
+ struct bcm_voter *temp;
+ struct device_node *np, *node;
+ int idx = 0;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ if (name) {
+ idx = of_property_match_string(np, "qcom,bcm-voter-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ node = of_parse_phandle(np, "qcom,bcm-voters", idx);
+
+ mutex_lock(&bcm_voter_lock);
+ list_for_each_entry(temp, &bcm_voters, voter_node) {
+ if (temp->np == node) {
+ voter = temp;
+ break;
+ }
+ }
+ mutex_unlock(&bcm_voter_lock);
+
+ return voter;
+}
+EXPORT_SYMBOL_GPL(of_bcm_voter_get);
+
+/**
+ * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates
+ * @voter: voter that the bcms are being added to
+ * @bcm: bcm to add to the commit and wake sleep list
+ */
+void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm)
+{
+ if (!voter)
+ return;
+
+ mutex_lock(&voter->lock);
+ if (list_empty(&bcm->list))
+ list_add_tail(&bcm->list, &voter->commit_list);
+
+ if (list_empty(&bcm->ws_list))
+ list_add_tail(&bcm->ws_list, &voter->ws_list);
+
+ mutex_unlock(&voter->lock);
+}
+EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add);
+
+/**
+ * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms
+ * @voter: voter that needs flushing
+ *
+ * This function generates a set of AMC commands and flushes to the BCM device
+ * associated with the voter. It conditionally generate WAKE and SLEEP commands
+ * based on deltas between WAKE/SLEEP requirements. The ws_list persists
+ * through multiple commit requests and bcm nodes are removed only when the
+ * requirements for WAKE matches SLEEP.
+ *
+ * Returns 0 on success, or an appropriate error code otherwise.
+ */
+int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
+{
+ struct qcom_icc_bcm *bcm;
+ struct qcom_icc_bcm *bcm_tmp;
+ int commit_idx[MAX_VCD + 1];
+ struct tcs_cmd cmds[MAX_BCMS];
+ int ret = 0;
+
+ if (!voter)
+ return 0;
+
+ mutex_lock(&voter->lock);
+ list_for_each_entry(bcm, &voter->commit_list, list)
+ bcm_aggregate(bcm);
+
+ /*
+ * Pre sort the BCMs based on VCD for ease of generating a command list
+ * that groups the BCMs with the same VCD together. VCDs are numbered
+ * with lowest being the most expensive time wise, ensuring that
+ * those commands are being sent the earliest in the queue. This needs
+ * to be sorted every commit since we can't guarantee the order in which
+ * the BCMs are added to the list.
+ */
+ list_sort(NULL, &voter->commit_list, cmp_vcd);
+
+ /*
+ * Construct the command list based on a pre ordered list of BCMs
+ * based on VCD.
+ */
+ tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
+
+ if (!commit_idx[0])
+ goto out;
+
+ ret = rpmh_invalidate(voter->dev);
+ if (ret) {
+ pr_err("Error invalidating RPMH client (%d)\n", ret);
+ goto out;
+ }
+
+ ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
+ cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending AMC RPMH requests (%d)\n", ret);
+ goto out;
+ }
+
+ list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
+ list_del_init(&bcm->list);
+
+ list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) {
+ /*
+ * Only generate WAKE and SLEEP commands if a resource's
+ * requirements change as the execution environment transitions
+ * between different power states.
+ */
+ if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] !=
+ bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
+ bcm->vote_y[QCOM_ICC_BUCKET_WAKE] !=
+ bcm->vote_y[QCOM_ICC_BUCKET_SLEEP])
+ list_add_tail(&bcm->list, &voter->commit_list);
+ else
+ list_del_init(&bcm->ws_list);
+ }
+
+ if (list_empty(&voter->commit_list))
+ goto out;
+
+ list_sort(NULL, &voter->commit_list, cmp_vcd);
+
+ tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
+
+ ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
+ goto out;
+ }
+
+ tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
+
+ ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
+ goto out;
+ }
+
+out:
+ list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
+ list_del_init(&bcm->list);
+
+ mutex_unlock(&voter->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
+
+static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
+{
+ struct bcm_voter *voter;
+
+ voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
+ if (!voter)
+ return -ENOMEM;
+
+ voter->dev = &pdev->dev;
+ voter->np = pdev->dev.of_node;
+ mutex_init(&voter->lock);
+ INIT_LIST_HEAD(&voter->commit_list);
+ INIT_LIST_HEAD(&voter->ws_list);
+
+ mutex_lock(&bcm_voter_lock);
+ list_add_tail(&voter->voter_node, &bcm_voters);
+ mutex_unlock(&bcm_voter_lock);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_voter_of_match[] = {
+ { .compatible = "qcom,bcm-voter" },
+ { }
+};
+
+static struct platform_driver qcom_icc_bcm_voter_driver = {
+ .probe = qcom_icc_bcm_voter_probe,
+ .driver = {
+ .name = "bcm_voter",
+ .of_match_table = bcm_voter_of_match,
+ },
+};
+module_platform_driver(qcom_icc_bcm_voter_driver);
+
+MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/bcm-voter.h b/drivers/interconnect/qcom/bcm-voter.h
new file mode 100644
index 000000000000..0f64c0bab2c0
--- /dev/null
+++ b/drivers/interconnect/qcom/bcm-voter.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_BCM_VOTER_H__
+#define __DRIVERS_INTERCONNECT_QCOM_BCM_VOTER_H__
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#include "icc-rpmh.h"
+
+#define DEFINE_QBCM(_name, _bcmname, _keepalive, ...) \
+static struct qcom_icc_bcm _name = { \
+ .name = _bcmname, \
+ .keepalive = _keepalive, \
+ .num_nodes = ARRAY_SIZE(((struct qcom_icc_node *[]){ __VA_ARGS__ })), \
+ .nodes = { __VA_ARGS__ }, \
+}
+
+struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name);
+void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm);
+int qcom_icc_bcm_voter_commit(struct bcm_voter *voter);
+
+#endif
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
new file mode 100644
index 000000000000..3ac5182c9ab2
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+/**
+ * qcom_icc_pre_aggregate - cleans up stale values from prior icc_set
+ * @node: icc node to operate on
+ */
+void qcom_icc_pre_aggregate(struct icc_node *node)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ qn->sum_avg[i] = 0;
+ qn->max_peak[i] = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
+
+/**
+ * qcom_icc_aggregate - aggregate bw for buckets indicated by tag
+ * @node: node to aggregate
+ * @tag: tag to indicate which buckets to aggregate
+ * @avg_bw: new bw to sum aggregate
+ * @peak_bw: new bw to max aggregate
+ * @agg_avg: existing aggregate avg bw val
+ * @agg_peak: existing aggregate peak bw val
+ */
+int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+ struct qcom_icc_provider *qp;
+
+ qn = node->data;
+ qp = to_qcom_provider(node->provider);
+
+ if (!tag)
+ tag = QCOM_ICC_TAG_ALWAYS;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ if (tag & BIT(i)) {
+ qn->sum_avg[i] += avg_bw;
+ qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
+ }
+ }
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
+
+/**
+ * qcom_icc_set - set the constraints based on path
+ * @src: source node for the path to set constraints on
+ * @dst: destination node for the path to set constraints on
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+
+ if (!src)
+ node = dst;
+ else
+ node = src;
+
+ qp = to_qcom_provider(node->provider);
+
+ qcom_icc_bcm_voter_commit(qp->voter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_set);
+
+/**
+ * qcom_icc_bcm_init - populates bcm aux data and connect qnodes
+ * @bcm: bcm to be initialized
+ * @dev: associated provider device
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
+{
+ struct qcom_icc_node *qn;
+ const struct bcm_db *data;
+ size_t data_count;
+ int i;
+
+ /* BCM is already initialised*/
+ if (bcm->addr)
+ return 0;
+
+ bcm->addr = cmd_db_read_addr(bcm->name);
+ if (!bcm->addr) {
+ dev_err(dev, "%s could not find RPMh address\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ data = cmd_db_read_aux_data(bcm->name, &data_count);
+ if (IS_ERR(data)) {
+ dev_err(dev, "%s command db read error (%ld)\n",
+ bcm->name, PTR_ERR(data));
+ return PTR_ERR(data);
+ }
+ if (!data_count) {
+ dev_err(dev, "%s command db missing or partial aux data\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm->aux_data.unit = le32_to_cpu(data->unit);
+ bcm->aux_data.width = le16_to_cpu(data->width);
+ bcm->aux_data.vcd = data->vcd;
+ bcm->aux_data.reserved = data->reserved;
+ INIT_LIST_HEAD(&bcm->list);
+ INIT_LIST_HEAD(&bcm->ws_list);
+
+ /* Link Qnodes to their respective BCMs */
+ for (i = 0; i < bcm->num_nodes; i++) {
+ qn = bcm->nodes[i];
+ qn->bcms[qn->num_bcms] = bcm;
+ qn->num_bcms++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_bcm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
new file mode 100644
index 000000000000..903d25e61984
--- /dev/null
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_RPMH_H__
+#define __DRIVERS_INTERCONNECT_QCOM_ICC_RPMH_H__
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+/**
+ * struct qcom_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @dev: reference to the NoC device
+ * @bcms: list of bcms that maps to the provider
+ * @num_bcms: number of @bcms
+ * @voter: bcm voter targeted by this provider
+ */
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct device *dev;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+ struct bcm_voter *voter;
+};
+
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+#define MAX_LINKS 128
+#define MAX_BCMS 64
+#define MAX_BCM_PER_NODE 3
+#define MAX_VCD 10
+
+/*
+ * The AMC bucket denotes constraints that are applied to hardware when
+ * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
+ * when the execution environment transitions between active and low power mode.
+ */
+#define QCOM_ICC_BUCKET_AMC 0
+#define QCOM_ICC_BUCKET_WAKE 1
+#define QCOM_ICC_BUCKET_SLEEP 2
+#define QCOM_ICC_NUM_BUCKETS 3
+#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
+#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
+#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
+#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
+#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
+ QCOM_ICC_TAG_SLEEP)
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @links: an array of nodes where we can go next while traversing
+ * @id: a unique node identifier
+ * @num_links: the total number of @links
+ * @channels: num of channels at this node
+ * @buswidth: width of the interconnect between a node and the bus
+ * @sum_avg: current sum aggregate value of all avg bw requests
+ * @max_peak: current max aggregate value of all peak bw requests
+ * @bcms: list of bcms associated with this logical node
+ * @num_bcms: num of @bcms
+ */
+struct qcom_icc_node {
+ const char *name;
+ u16 links[MAX_LINKS];
+ u16 id;
+ u16 num_links;
+ u16 channels;
+ u16 buswidth;
+ u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
+ u64 max_peak[QCOM_ICC_NUM_BUCKETS];
+ struct qcom_icc_bcm *bcms[MAX_BCM_PER_NODE];
+ size_t num_bcms;
+};
+
+/**
+ * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
+ * known as Bus Clock Manager (BCM)
+ * @name: the bcm node name used to fetch BCM data from command db
+ * @type: latency or bandwidth bcm
+ * @addr: address offsets used when voting to RPMH
+ * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
+ * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
+ * @dirty: flag used to indicate whether the bcm needs to be committed
+ * @keepalive: flag used to indicate whether a keepalive is required
+ * @aux_data: auxiliary data used when calculating threshold values and
+ * communicating with RPMh
+ * @list: used to link to other bcms when compiling lists for commit
+ * @ws_list: used to keep track of bcms that may transition between wake/sleep
+ * @num_nodes: total number of @num_nodes
+ * @nodes: list of qcom_icc_nodes that this BCM encapsulates
+ */
+struct qcom_icc_bcm {
+ const char *name;
+ u32 type;
+ u32 addr;
+ u64 vote_x[QCOM_ICC_NUM_BUCKETS];
+ u64 vote_y[QCOM_ICC_NUM_BUCKETS];
+ bool dirty;
+ bool keepalive;
+ struct bcm_db aux_data;
+ struct list_head list;
+ struct list_head ws_list;
+ size_t num_nodes;
+ struct qcom_icc_node *nodes[];
+};
+
+struct qcom_icc_fabric {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+#define DEFINE_QNODE(_name, _id, _channels, _buswidth, ...) \
+ static struct qcom_icc_node _name = { \
+ .id = _id, \
+ .name = #_name, \
+ .channels = _channels, \
+ .buswidth = _buswidth, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+int qcom_icc_set(struct icc_node *src, struct icc_node *dst);
+int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev);
+void qcom_icc_pre_aggregate(struct icc_node *node);
+
+#endif
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
new file mode 100644
index 000000000000..a03c6d6833df
--- /dev/null
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interconnect/qcom,osm-l3.h>
+
+#include "sc7180.h"
+#include "sdm845.h"
+
+#define LUT_MAX_ENTRIES 40U
+#define LUT_SRC GENMASK(31, 30)
+#define LUT_L_VAL GENMASK(7, 0)
+#define LUT_ROW_SIZE 32
+#define CLK_HW_DIV 2
+
+/* Register offsets */
+#define REG_ENABLE 0x0
+#define REG_FREQ_LUT 0x110
+#define REG_PERF_STATE 0x920
+
+#define OSM_L3_MAX_LINKS 1
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_osm_l3_icc_provider, provider)
+
+struct qcom_osm_l3_icc_provider {
+ void __iomem *base;
+ unsigned int max_state;
+ unsigned long lut_tables[LUT_MAX_ENTRIES];
+ struct icc_provider provider;
+};
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @links: an array of nodes where we can go next while traversing
+ * @id: a unique node identifier
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus
+ */
+struct qcom_icc_node {
+ const char *name;
+ u16 links[OSM_L3_MAX_LINKS];
+ u16 id;
+ u16 num_links;
+ u16 buswidth;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, ...) \
+ static struct qcom_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3);
+DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16);
+
+static struct qcom_icc_node *sdm845_osm_l3_nodes[] = {
+ [MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3,
+ [SLAVE_OSM_L3] = &sdm845_osm_l3,
+};
+
+const static struct qcom_icc_desc sdm845_icc_osm_l3 = {
+ .nodes = sdm845_osm_l3_nodes,
+ .num_nodes = ARRAY_SIZE(sdm845_osm_l3_nodes),
+};
+
+DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3);
+DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16);
+
+static struct qcom_icc_node *sc7180_osm_l3_nodes[] = {
+ [MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3,
+ [SLAVE_OSM_L3] = &sc7180_osm_l3,
+};
+
+const static struct qcom_icc_desc sc7180_icc_osm_l3 = {
+ .nodes = sc7180_osm_l3_nodes,
+ .num_nodes = ARRAY_SIZE(sc7180_osm_l3_nodes),
+};
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_osm_l3_icc_provider *qp;
+ struct icc_provider *provider;
+ struct qcom_icc_node *qn;
+ struct icc_node *n;
+ unsigned int index;
+ u32 agg_peak = 0;
+ u32 agg_avg = 0;
+ u64 rate;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ rate = max(agg_avg, agg_peak);
+ rate = icc_units_to_bps(rate);
+ do_div(rate, qn->buswidth);
+
+ for (index = 0; index < qp->max_state - 1; index++) {
+ if (qp->lut_tables[index] >= rate)
+ break;
+ }
+
+ writel_relaxed(index, qp->base + REG_PERF_STATE);
+
+ return 0;
+}
+
+static int qcom_osm_l3_remove(struct platform_device *pdev)
+{
+ struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static int qcom_osm_l3_probe(struct platform_device *pdev)
+{
+ u32 info, src, lval, i, prev_freq = 0, freq;
+ static unsigned long hw_rate, xo_rate;
+ struct qcom_osm_l3_icc_provider *qp;
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct icc_node *node;
+ size_t num_nodes;
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(&pdev->dev, "xo");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ xo_rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ clk = clk_get(&pdev->dev, "alternate");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
+ clk_put(clk);
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ qp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qp->base))
+ return PTR_ERR(qp->base);
+
+ /* HW should be in enabled state to proceed */
+ if (!(readl_relaxed(qp->base + REG_ENABLE) & 0x1)) {
+ dev_err(&pdev->dev, "error hardware not enabled\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+ info = readl_relaxed(qp->base + REG_FREQ_LUT +
+ i * LUT_ROW_SIZE);
+ src = FIELD_GET(LUT_SRC, info);
+ lval = FIELD_GET(LUT_L_VAL, info);
+ if (src)
+ freq = xo_rate * lval;
+ else
+ freq = hw_rate;
+
+ /* Two of the same frequencies signify end of table */
+ if (i > 0 && prev_freq == freq)
+ break;
+
+ dev_dbg(&pdev->dev, "index=%d freq=%d\n", i, freq);
+
+ qp->lut_tables[i] = freq;
+ prev_freq = freq;
+ }
+ qp->max_state = i;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+
+ return ret;
+}
+
+static const struct of_device_id osm_l3_of_match[] = {
+ { .compatible = "qcom,sc7180-osm-l3", .data = &sc7180_icc_osm_l3 },
+ { .compatible = "qcom,sdm845-osm-l3", .data = &sdm845_icc_osm_l3 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, osm_l3_of_match);
+
+static struct platform_driver osm_l3_driver = {
+ .probe = qcom_osm_l3_probe,
+ .remove = qcom_osm_l3_remove,
+ .driver = {
+ .name = "osm-l3",
+ .of_match_table = osm_l3_of_match,
+ },
+};
+module_platform_driver(osm_l3_driver);
+
+MODULE_DESCRIPTION("Qualcomm OSM L3 interconnect driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
new file mode 100644
index 000000000000..dcf493d07928
--- /dev/null
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sc7180.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sc7180.h"
+
+DEFINE_QNODE(qhm_a1noc_cfg, SC7180_MASTER_A1NOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qspi, SC7180_MASTER_QSPI, 1, 4, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_qup_0, SC7180_MASTER_QUP_0, 1, 4, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, SC7180_MASTER_SDCC_2, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_emmc, SC7180_MASTER_EMMC, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, SC7180_MASTER_UFS_MEM, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, SC7180_MASTER_A2NOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, SC7180_MASTER_QDSS_BAM, 1, 4, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup_1, SC7180_MASTER_QUP_1, 1, 4, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, SC7180_MASTER_CRYPTO, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, SC7180_MASTER_IPA, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SC7180_MASTER_QDSS_ETR, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_usb3, SC7180_MASTER_USB3, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SC7180_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SC7180_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, SC7180_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qnm_npu, SC7180_MASTER_NPU, 2, 32, SC7180_SLAVE_CDSP_GEM_NOC);
+DEFINE_QNODE(qxm_npu_dsp, SC7180_MASTER_NPU_PROC, 1, 8, SC7180_SLAVE_CDSP_GEM_NOC);
+DEFINE_QNODE(qnm_snoc, SC7180_MASTER_SNOC_CNOC, 1, 8, SC7180_SLAVE_A1NOC_CFG, SC7180_SLAVE_A2NOC_CFG, SC7180_SLAVE_AHB2PHY_SOUTH, SC7180_SLAVE_AHB2PHY_CENTER, SC7180_SLAVE_AOP, SC7180_SLAVE_AOSS, SC7180_SLAVE_BOOT_ROM, SC7180_SLAVE_CAMERA_CFG, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, SC7180_SLAVE_CLK_CTL, SC7180_SLAVE_RBCPR_CX_CFG, SC7180_SLAVE_RBCPR_MX_CFG, SC7180_SLAVE_CRYPTO_0_CFG, SC7180_SLAVE_DCC_CFG, SC7180_SLAVE_CNOC_DDRSS, SC7180_SLAVE_DISPLAY_CFG, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, SC7180_SLAVE_EMMC_CFG, SC7180_SLAVE_GLM,
+ SC7180_SLAVE_GFX3D_CFG, SC7180_SLAVE_IMEM_CFG, SC7180_SLAVE_IPA_CFG, SC7180_SLAVE_CNOC_MNOC_CFG, SC7180_SLAVE_CNOC_MSS, SC7180_SLAVE_NPU_CFG, SC7180_SLAVE_NPU_DMA_BWMON_CFG, SC7180_SLAVE_NPU_PROC_BWMON_CFG, SC7180_SLAVE_PDM, SC7180_SLAVE_PIMEM_CFG, SC7180_SLAVE_PRNG, SC7180_SLAVE_QDSS_CFG, SC7180_SLAVE_QM_CFG, SC7180_SLAVE_QM_MPU_CFG, SC7180_SLAVE_QSPI_0, SC7180_SLAVE_QUP_0, SC7180_SLAVE_QUP_1, SC7180_SLAVE_SDCC_2, SC7180_SLAVE_SECURITY, SC7180_SLAVE_SNOC_CFG, SC7180_SLAVE_TCSR, SC7180_SLAVE_TLMM_WEST, SC7180_SLAVE_TLMM_NORTH, SC7180_SLAVE_TLMM_SOUTH, SC7180_SLAVE_UFS_MEM_CFG, SC7180_SLAVE_USB3, SC7180_SLAVE_VENUS_CFG, SC7180_SLAVE_VENUS_THROTTLE_CFG, SC7180_SLAVE_VSENSE_CTRL_CFG, SC7180_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(xm_qdss_dap, SC7180_MASTER_QDSS_DAP, 1, 8, SC7180_SLAVE_A1NOC_CFG, SC7180_SLAVE_A2NOC_CFG, SC7180_SLAVE_AHB2PHY_SOUTH, SC7180_SLAVE_AHB2PHY_CENTER, SC7180_SLAVE_AOP, SC7180_SLAVE_AOSS, SC7180_SLAVE_BOOT_ROM, SC7180_SLAVE_CAMERA_CFG, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, SC7180_SLAVE_CLK_CTL, SC7180_SLAVE_RBCPR_CX_CFG, SC7180_SLAVE_RBCPR_MX_CFG, SC7180_SLAVE_CRYPTO_0_CFG, SC7180_SLAVE_DCC_CFG, SC7180_SLAVE_CNOC_DDRSS, SC7180_SLAVE_DISPLAY_CFG, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, SC7180_SLAVE_EMMC_CFG, SC7180_SLAVE_GLM, SC7180_SLAVE_GFX3D_CFG, SC7180_SLAVE_IMEM_CFG, SC7180_SLAVE_IPA_CFG, SC7180_SLAVE_CNOC_MNOC_CFG, SC7180_SLAVE_CNOC_MSS, SC7180_SLAVE_NPU_CFG, SC7180_SLAVE_NPU_DMA_BWMON_CFG,
+SC7180_SLAVE_NPU_PROC_BWMON_CFG, SC7180_SLAVE_PDM, SC7180_SLAVE_PIMEM_CFG, SC7180_SLAVE_PRNG, SC7180_SLAVE_QDSS_CFG, SC7180_SLAVE_QM_CFG, SC7180_SLAVE_QM_MPU_CFG, SC7180_SLAVE_QSPI_0, SC7180_SLAVE_QUP_0, SC7180_SLAVE_QUP_1, SC7180_SLAVE_SDCC_2, SC7180_SLAVE_SECURITY, SC7180_SLAVE_SNOC_CFG, SC7180_SLAVE_TCSR, SC7180_SLAVE_TLMM_WEST, SC7180_SLAVE_TLMM_NORTH, SC7180_SLAVE_TLMM_SOUTH, SC7180_SLAVE_UFS_MEM_CFG, SC7180_SLAVE_USB3, SC7180_SLAVE_VENUS_CFG, SC7180_SLAVE_VENUS_THROTTLE_CFG, SC7180_SLAVE_VSENSE_CTRL_CFG, SC7180_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qhm_cnoc_dc_noc, SC7180_MASTER_CNOC_DC_NOC, 1, 4, SC7180_SLAVE_GEM_NOC_CFG, SC7180_SLAVE_LLCC_CFG);
+DEFINE_QNODE(acm_apps0, SC7180_MASTER_APPSS_PROC, 1, 16, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(acm_sys_tcu, SC7180_MASTER_SYS_TCU, 1, 8, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qhm_gemnoc_cfg, SC7180_MASTER_GEM_NOC_CFG, 1, 4, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, SC7180_SLAVE_SERVICE_GEM_NOC);
+DEFINE_QNODE(qnm_cmpnoc, SC7180_MASTER_COMPUTE_NOC, 1, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, SC7180_MASTER_MNOC_HF_MEM_NOC, 1, 32, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
+DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE);
+DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1);
+DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, SC7180_MASTER_CAMNOC_HF1, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, SC7180_MASTER_CAMNOC_SF, 1, 32, SC7180_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SC7180_MASTER_MDP0, 1, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SC7180_MASTER_ROTATOR, 1, 16, SC7180_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, SC7180_MASTER_VIDEO_P0, 1, 32, SC7180_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, SC7180_MASTER_VIDEO_PROC, 1, 8, SC7180_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(amm_npu_sys, SC7180_MASTER_NPU_SYS, 2, 32, SC7180_SLAVE_NPU_COMPUTE_NOC);
+DEFINE_QNODE(qhm_npu_cfg, SC7180_MASTER_NPU_NOC_CFG, 1, 4, SC7180_SLAVE_NPU_CAL_DP0, SC7180_SLAVE_NPU_CP, SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG, SC7180_SLAVE_NPU_DPM, SC7180_SLAVE_ISENSE_CFG, SC7180_SLAVE_NPU_LLM_CFG, SC7180_SLAVE_NPU_TCM, SC7180_SLAVE_SERVICE_NPU_NOC);
+DEFINE_QNODE(qup_core_master_1, SC7180_MASTER_QUP_CORE_0, 1, 4, SC7180_SLAVE_QUP_CORE_0);
+DEFINE_QNODE(qup_core_master_2, SC7180_MASTER_QUP_CORE_1, 1, 4, SC7180_SLAVE_QUP_CORE_1);
+DEFINE_QNODE(qhm_snoc_cfg, SC7180_MASTER_SNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, SC7180_MASTER_A1NOC_SNOC, 1, 16, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_SNOC_GEM_NOC_SF, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, SC7180_MASTER_A2NOC_SNOC, 1, 16, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_SNOC_GEM_NOC_SF, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM, SC7180_SLAVE_TCU);
+DEFINE_QNODE(qnm_gemnoc, SC7180_MASTER_GEM_NOC_SNOC, 1, 8, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM, SC7180_SLAVE_TCU);
+DEFINE_QNODE(qxm_pimem, SC7180_MASTER_PIMEM, 1, 8, SC7180_SLAVE_SNOC_GEM_NOC_GC, SC7180_SLAVE_IMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SC7180_SLAVE_A1NOC_SNOC, 1, 16, SC7180_MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SC7180_SLAVE_SERVICE_A1NOC, 1, 4);
+DEFINE_QNODE(qns_a2noc_snoc, SC7180_SLAVE_A2NOC_SNOC, 1, 16, SC7180_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SC7180_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qns_camnoc_uncomp, SC7180_SLAVE_CAMNOC_UNCOMP, 1, 32);
+DEFINE_QNODE(qns_cdsp_gemnoc, SC7180_SLAVE_CDSP_GEM_NOC, 1, 32, SC7180_MASTER_COMPUTE_NOC);
+DEFINE_QNODE(qhs_a1_noc_cfg, SC7180_SLAVE_A1NOC_CFG, 1, 4, SC7180_MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SC7180_SLAVE_A2NOC_CFG, 1, 4, SC7180_MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_ahb2phy0, SC7180_SLAVE_AHB2PHY_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_ahb2phy2, SC7180_SLAVE_AHB2PHY_CENTER, 1, 4);
+DEFINE_QNODE(qhs_aop, SC7180_SLAVE_AOP, 1, 4);
+DEFINE_QNODE(qhs_aoss, SC7180_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_boot_rom, SC7180_SLAVE_BOOT_ROM, 1, 4);
+DEFINE_QNODE(qhs_camera_cfg, SC7180_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_camera_nrt_throttle_cfg, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_camera_rt_throttle_cfg, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SC7180_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SC7180_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mx, SC7180_SLAVE_RBCPR_MX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SC7180_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SC7180_SLAVE_DCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SC7180_SLAVE_CNOC_DDRSS, 1, 4, SC7180_MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_display_cfg, SC7180_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_display_rt_throttle_cfg, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_display_throttle_cfg, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_emmc_cfg, SC7180_SLAVE_EMMC_CFG, 1, 4);
+DEFINE_QNODE(qhs_glm, SC7180_SLAVE_GLM, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SC7180_SLAVE_GFX3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_imem_cfg, SC7180_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SC7180_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mnoc_cfg, SC7180_SLAVE_CNOC_MNOC_CFG, 1, 4, SC7180_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_mss_cfg, SC7180_SLAVE_CNOC_MSS, 1, 4);
+DEFINE_QNODE(qhs_npu_cfg, SC7180_SLAVE_NPU_CFG, 1, 4, SC7180_MASTER_NPU_NOC_CFG);
+DEFINE_QNODE(qhs_npu_dma_throttle_cfg, SC7180_SLAVE_NPU_DMA_BWMON_CFG, 1, 4);
+DEFINE_QNODE(qhs_npu_dsp_throttle_cfg, SC7180_SLAVE_NPU_PROC_BWMON_CFG, 1, 4);
+DEFINE_QNODE(qhs_pdm, SC7180_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SC7180_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_prng, SC7180_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SC7180_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qm_cfg, SC7180_SLAVE_QM_CFG, 1, 4);
+DEFINE_QNODE(qhs_qm_mpu_cfg, SC7180_SLAVE_QM_MPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_qspi, SC7180_SLAVE_QSPI_0, 1, 4);
+DEFINE_QNODE(qhs_qup0, SC7180_SLAVE_QUP_0, 1, 4);
+DEFINE_QNODE(qhs_qup1, SC7180_SLAVE_QUP_1, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SC7180_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_security, SC7180_SLAVE_SECURITY, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SC7180_SLAVE_SNOC_CFG, 1, 4, SC7180_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_tcsr, SC7180_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm_1, SC7180_SLAVE_TLMM_WEST, 1, 4);
+DEFINE_QNODE(qhs_tlmm_2, SC7180_SLAVE_TLMM_NORTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm_3, SC7180_SLAVE_TLMM_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SC7180_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3, SC7180_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SC7180_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_venus_throttle_cfg, SC7180_SLAVE_VENUS_THROTTLE_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SC7180_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(srvc_cnoc, SC7180_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(qhs_gemnoc, SC7180_SLAVE_GEM_NOC_CFG, 1, 4, SC7180_MASTER_GEM_NOC_CFG);
+DEFINE_QNODE(qhs_llcc, SC7180_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC);
+DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC);
+DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4);
+DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8);
+DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4);
+DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SC7180_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qhs_cal_dp0, SC7180_SLAVE_NPU_CAL_DP0, 1, 4);
+DEFINE_QNODE(qhs_cp, SC7180_SLAVE_NPU_CP, 1, 4);
+DEFINE_QNODE(qhs_dma_bwmon, SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG, 1, 4);
+DEFINE_QNODE(qhs_dpm, SC7180_SLAVE_NPU_DPM, 1, 4);
+DEFINE_QNODE(qhs_isense, SC7180_SLAVE_ISENSE_CFG, 1, 4);
+DEFINE_QNODE(qhs_llm, SC7180_SLAVE_NPU_LLM_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcm, SC7180_SLAVE_NPU_TCM, 1, 4);
+DEFINE_QNODE(qns_npu_sys, SC7180_SLAVE_NPU_COMPUTE_NOC, 2, 32);
+DEFINE_QNODE(srvc_noc, SC7180_SLAVE_SERVICE_NPU_NOC, 1, 4);
+DEFINE_QNODE(qup_core_slave_1, SC7180_SLAVE_QUP_CORE_0, 1, 4);
+DEFINE_QNODE(qup_core_slave_2, SC7180_SLAVE_QUP_CORE_1, 1, 4);
+DEFINE_QNODE(qhs_apss, SC7180_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qns_cnoc, SC7180_SLAVE_SNOC_CNOC, 1, 8, SC7180_MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_gemnoc_gc, SC7180_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SC7180_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_gemnoc_sf, SC7180_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SC7180_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SC7180_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(qxs_pimem, SC7180_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SC7180_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_qdss_stm, SC7180_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SC7180_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
+DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
+DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
+DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup_core_master_1, &qup_core_master_2);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps0);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_cn1, "CN1", false, &qhm_qspi, &xm_sdc2, &xm_emmc, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qxm_pimem, &qns_gemnoc_gc);
+DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_QSPI] = &qhm_qspi,
+ [MASTER_QUP_0] = &qhm_qup_0,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_EMMC] = &xm_emmc,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sc7180_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_1] = &qhm_qup_1,
+ [MASTER_USB3] = &qhm_usb3,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sc7180_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node *camnoc_virt_nodes[] = {
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+};
+
+static struct qcom_icc_desc sc7180_camnoc_virt = {
+ .nodes = camnoc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
+ .bcms = camnoc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *compute_noc_bcms[] = {
+ &bcm_co0,
+ &bcm_co2,
+ &bcm_co3,
+};
+
+static struct qcom_icc_node *compute_noc_nodes[] = {
+ [MASTER_NPU] = &qnm_npu,
+ [MASTER_NPU_PROC] = &qxm_npu_dsp,
+ [SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
+};
+
+static struct qcom_icc_desc sc7180_compute_noc = {
+ .nodes = compute_noc_nodes,
+ .num_nodes = ARRAY_SIZE(compute_noc_nodes),
+ .bcms = compute_noc_bcms,
+ .num_bcms = ARRAY_SIZE(compute_noc_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_CENTER] = &qhs_ahb2phy2,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_BOOT_ROM] = &qhs_boot_rom,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_throttle_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_DISPLAY_RT_THROTTLE_CFG] = &qhs_display_rt_throttle_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
+ [SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_NPU_CFG] = &qhs_npu_cfg,
+ [SLAVE_NPU_DMA_BWMON_CFG] = &qhs_npu_dma_throttle_cfg,
+ [SLAVE_NPU_PROC_BWMON_CFG] = &qhs_npu_dsp_throttle_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QM_CFG] = &qhs_qm_cfg,
+ [SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_WEST] = &qhs_tlmm_1,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_2,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_3,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static struct qcom_icc_desc sc7180_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
+ [SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+};
+
+static struct qcom_icc_desc sc7180_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh4,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_APPSS_PROC] = &acm_apps0,
+ [MASTER_SYS_TCU] = &acm_sys_tcu,
+ [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GFX3D] = &qxm_gpu,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
+};
+
+static struct qcom_icc_desc sc7180_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+ &bcm_ip0,
+};
+
+static struct qcom_icc_node *ipa_virt_nodes[] = {
+ [MASTER_IPA_CORE] = &ipa_core_master,
+ [SLAVE_IPA_CORE] = &ipa_core_slave,
+};
+
+static struct qcom_icc_desc sc7180_ipa_virt = {
+ .nodes = ipa_virt_nodes,
+ .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
+ .bcms = ipa_virt_bcms,
+ .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static struct qcom_icc_desc sc7180_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static struct qcom_icc_desc sc7180_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_node *npu_noc_nodes[] = {
+ [MASTER_NPU_SYS] = &amm_npu_sys,
+ [MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
+ [SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
+ [SLAVE_NPU_CP] = &qhs_cp,
+ [SLAVE_NPU_INT_DMA_BWMON_CFG] = &qhs_dma_bwmon,
+ [SLAVE_NPU_DPM] = &qhs_dpm,
+ [SLAVE_ISENSE_CFG] = &qhs_isense,
+ [SLAVE_NPU_LLM_CFG] = &qhs_llm,
+ [SLAVE_NPU_TCM] = &qhs_tcm,
+ [SLAVE_NPU_COMPUTE_NOC] = &qns_npu_sys,
+ [SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
+};
+
+static struct qcom_icc_desc sc7180_npu_noc = {
+ .nodes = npu_noc_nodes,
+ .num_nodes = ARRAY_SIZE(npu_noc_nodes),
+};
+
+static struct qcom_icc_bcm *qup_virt_bcms[] = {
+ &bcm_qup0,
+};
+
+static struct qcom_icc_node *qup_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup_core_master_1,
+ [MASTER_QUP_CORE_1] = &qup_core_master_2,
+ [SLAVE_QUP_CORE_0] = &qup_core_slave_1,
+ [SLAVE_QUP_CORE_1] = &qup_core_slave_2,
+};
+
+static struct qcom_icc_desc sc7180_qup_virt = {
+ .nodes = qup_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qup_virt_nodes),
+ .bcms = qup_virt_bcms,
+ .num_bcms = ARRAY_SIZE(qup_virt_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn7,
+ &bcm_sn9,
+ &bcm_sn12,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sc7180_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sc7180-aggre1-noc",
+ .data = &sc7180_aggre1_noc},
+ { .compatible = "qcom,sc7180-aggre2-noc",
+ .data = &sc7180_aggre2_noc},
+ { .compatible = "qcom,sc7180-camnoc-virt",
+ .data = &sc7180_camnoc_virt},
+ { .compatible = "qcom,sc7180-compute-noc",
+ .data = &sc7180_compute_noc},
+ { .compatible = "qcom,sc7180-config-noc",
+ .data = &sc7180_config_noc},
+ { .compatible = "qcom,sc7180-dc-noc",
+ .data = &sc7180_dc_noc},
+ { .compatible = "qcom,sc7180-gem-noc",
+ .data = &sc7180_gem_noc},
+ { .compatible = "qcom,sc7180-ipa-virt",
+ .data = &sc7180_ipa_virt},
+ { .compatible = "qcom,sc7180-mc-virt",
+ .data = &sc7180_mc_virt},
+ { .compatible = "qcom,sc7180-mmss-noc",
+ .data = &sc7180_mmss_noc},
+ { .compatible = "qcom,sc7180-npu-noc",
+ .data = &sc7180_npu_noc},
+ { .compatible = "qcom,sc7180-qup-virt",
+ .data = &sc7180_qup_virt},
+ { .compatible = "qcom,sc7180-system-noc",
+ .data = &sc7180_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sc7180",
+ .of_match_table = qnoc_of_match,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SC7180 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sc7180.h b/drivers/interconnect/qcom/sc7180.h
new file mode 100644
index 000000000000..c6212a10c2f6
--- /dev/null
+++ b/drivers/interconnect/qcom/sc7180.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm #define SC7180 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SC7180_H
+#define __DRIVERS_INTERCONNECT_QCOM_SC7180_H
+
+#define SC7180_MASTER_APPSS_PROC 0
+#define SC7180_MASTER_SYS_TCU 1
+#define SC7180_MASTER_NPU_SYS 2
+#define SC7180_MASTER_IPA_CORE 3
+#define SC7180_MASTER_LLCC 4
+#define SC7180_MASTER_A1NOC_CFG 5
+#define SC7180_MASTER_A2NOC_CFG 6
+#define SC7180_MASTER_CNOC_DC_NOC 7
+#define SC7180_MASTER_GEM_NOC_CFG 8
+#define SC7180_MASTER_CNOC_MNOC_CFG 9
+#define SC7180_MASTER_NPU_NOC_CFG 10
+#define SC7180_MASTER_QDSS_BAM 11
+#define SC7180_MASTER_QSPI 12
+#define SC7180_MASTER_QUP_0 13
+#define SC7180_MASTER_QUP_1 14
+#define SC7180_MASTER_SNOC_CFG 15
+#define SC7180_MASTER_A1NOC_SNOC 16
+#define SC7180_MASTER_A2NOC_SNOC 17
+#define SC7180_MASTER_COMPUTE_NOC 18
+#define SC7180_MASTER_GEM_NOC_SNOC 19
+#define SC7180_MASTER_MNOC_HF_MEM_NOC 20
+#define SC7180_MASTER_MNOC_SF_MEM_NOC 21
+#define SC7180_MASTER_NPU 22
+#define SC7180_MASTER_SNOC_CNOC 23
+#define SC7180_MASTER_SNOC_GC_MEM_NOC 24
+#define SC7180_MASTER_SNOC_SF_MEM_NOC 25
+#define SC7180_MASTER_QUP_CORE_0 26
+#define SC7180_MASTER_QUP_CORE_1 27
+#define SC7180_MASTER_CAMNOC_HF0 28
+#define SC7180_MASTER_CAMNOC_HF1 29
+#define SC7180_MASTER_CAMNOC_HF0_UNCOMP 30
+#define SC7180_MASTER_CAMNOC_HF1_UNCOMP 31
+#define SC7180_MASTER_CAMNOC_SF 32
+#define SC7180_MASTER_CAMNOC_SF_UNCOMP 33
+#define SC7180_MASTER_CRYPTO 34
+#define SC7180_MASTER_GFX3D 35
+#define SC7180_MASTER_IPA 36
+#define SC7180_MASTER_MDP0 37
+#define SC7180_MASTER_NPU_PROC 38
+#define SC7180_MASTER_PIMEM 39
+#define SC7180_MASTER_ROTATOR 40
+#define SC7180_MASTER_VIDEO_P0 41
+#define SC7180_MASTER_VIDEO_PROC 42
+#define SC7180_MASTER_QDSS_DAP 43
+#define SC7180_MASTER_QDSS_ETR 44
+#define SC7180_MASTER_SDCC_2 45
+#define SC7180_MASTER_UFS_MEM 46
+#define SC7180_MASTER_USB3 47
+#define SC7180_MASTER_EMMC 48
+#define SC7180_SLAVE_EBI1 49
+#define SC7180_SLAVE_IPA_CORE 50
+#define SC7180_SLAVE_A1NOC_CFG 51
+#define SC7180_SLAVE_A2NOC_CFG 52
+#define SC7180_SLAVE_AHB2PHY_SOUTH 53
+#define SC7180_SLAVE_AHB2PHY_CENTER 54
+#define SC7180_SLAVE_AOP 55
+#define SC7180_SLAVE_AOSS 56
+#define SC7180_SLAVE_APPSS 57
+#define SC7180_SLAVE_BOOT_ROM 58
+#define SC7180_SLAVE_NPU_CAL_DP0 59
+#define SC7180_SLAVE_CAMERA_CFG 60
+#define SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG 61
+#define SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG 62
+#define SC7180_SLAVE_CLK_CTL 63
+#define SC7180_SLAVE_NPU_CP 64
+#define SC7180_SLAVE_RBCPR_CX_CFG 65
+#define SC7180_SLAVE_RBCPR_MX_CFG 66
+#define SC7180_SLAVE_CRYPTO_0_CFG 67
+#define SC7180_SLAVE_DCC_CFG 68
+#define SC7180_SLAVE_CNOC_DDRSS 69
+#define SC7180_SLAVE_DISPLAY_CFG 70
+#define SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG 71
+#define SC7180_SLAVE_DISPLAY_THROTTLE_CFG 72
+#define SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG 73
+#define SC7180_SLAVE_NPU_DPM 74
+#define SC7180_SLAVE_EMMC_CFG 75
+#define SC7180_SLAVE_GEM_NOC_CFG 76
+#define SC7180_SLAVE_GLM 77
+#define SC7180_SLAVE_GFX3D_CFG 78
+#define SC7180_SLAVE_IMEM_CFG 79
+#define SC7180_SLAVE_IPA_CFG 80
+#define SC7180_SLAVE_ISENSE_CFG 81
+#define SC7180_SLAVE_LLCC_CFG 82
+#define SC7180_SLAVE_NPU_LLM_CFG 83
+#define SC7180_SLAVE_MSS_PROC_MS_MPU_CFG 84
+#define SC7180_SLAVE_CNOC_MNOC_CFG 85
+#define SC7180_SLAVE_CNOC_MSS 86
+#define SC7180_SLAVE_NPU_CFG 87
+#define SC7180_SLAVE_NPU_DMA_BWMON_CFG 88
+#define SC7180_SLAVE_NPU_PROC_BWMON_CFG 89
+#define SC7180_SLAVE_PDM 90
+#define SC7180_SLAVE_PIMEM_CFG 91
+#define SC7180_SLAVE_PRNG 92
+#define SC7180_SLAVE_QDSS_CFG 93
+#define SC7180_SLAVE_QM_CFG 94
+#define SC7180_SLAVE_QM_MPU_CFG 95
+#define SC7180_SLAVE_QSPI_0 96
+#define SC7180_SLAVE_QUP_0 97
+#define SC7180_SLAVE_QUP_1 98
+#define SC7180_SLAVE_SDCC_2 99
+#define SC7180_SLAVE_SECURITY 100
+#define SC7180_SLAVE_SNOC_CFG 101
+#define SC7180_SLAVE_NPU_TCM 102
+#define SC7180_SLAVE_TCSR 103
+#define SC7180_SLAVE_TLMM_WEST 104
+#define SC7180_SLAVE_TLMM_NORTH 105
+#define SC7180_SLAVE_TLMM_SOUTH 106
+#define SC7180_SLAVE_UFS_MEM_CFG 107
+#define SC7180_SLAVE_USB3 108
+#define SC7180_SLAVE_VENUS_CFG 109
+#define SC7180_SLAVE_VENUS_THROTTLE_CFG 110
+#define SC7180_SLAVE_VSENSE_CTRL_CFG 111
+#define SC7180_SLAVE_A1NOC_SNOC 112
+#define SC7180_SLAVE_A2NOC_SNOC 113
+#define SC7180_SLAVE_CAMNOC_UNCOMP 114
+#define SC7180_SLAVE_CDSP_GEM_NOC 115
+#define SC7180_SLAVE_SNOC_CNOC 116
+#define SC7180_SLAVE_GEM_NOC_SNOC 117
+#define SC7180_SLAVE_SNOC_GEM_NOC_GC 118
+#define SC7180_SLAVE_SNOC_GEM_NOC_SF 119
+#define SC7180_SLAVE_LLCC 120
+#define SC7180_SLAVE_MNOC_HF_MEM_NOC 121
+#define SC7180_SLAVE_MNOC_SF_MEM_NOC 122
+#define SC7180_SLAVE_NPU_COMPUTE_NOC 123
+#define SC7180_SLAVE_QUP_CORE_0 124
+#define SC7180_SLAVE_QUP_CORE_1 125
+#define SC7180_SLAVE_IMEM 126
+#define SC7180_SLAVE_PIMEM 127
+#define SC7180_SLAVE_SERVICE_A1NOC 128
+#define SC7180_SLAVE_SERVICE_A2NOC 129
+#define SC7180_SLAVE_SERVICE_CNOC 130
+#define SC7180_SLAVE_SERVICE_GEM_NOC 131
+#define SC7180_SLAVE_SERVICE_MNOC 132
+#define SC7180_SLAVE_SERVICE_NPU_NOC 133
+#define SC7180_SLAVE_SERVICE_SNOC 134
+#define SC7180_SLAVE_QDSS_STM 135
+#define SC7180_SLAVE_TCU 136
+#define SC7180_MASTER_OSM_L3_APPS 137
+#define SC7180_SLAVE_OSM_L3 138
+
+#endif
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index f078cf0fce56..b013b80caa45 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -1,379 +1,245 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- *
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
-#include <asm/div64.h>
-#include <dt-bindings/interconnect/qcom,sdm845.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
-#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/sort.h>
-
-#include <soc/qcom/cmd-db.h>
-#include <soc/qcom/rpmh.h>
-#include <soc/qcom/tcs.h>
-
-#define to_qcom_provider(_provider) \
- container_of(_provider, struct qcom_icc_provider, provider)
-
-struct qcom_icc_provider {
- struct icc_provider provider;
- struct device *dev;
- struct qcom_icc_bcm **bcms;
- size_t num_bcms;
-};
-
-/**
- * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
- * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
- * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
- * @vcd: virtual clock domain that this bcm belongs to
- * @reserved: reserved field
- */
-struct bcm_db {
- __le32 unit;
- __le16 width;
- u8 vcd;
- u8 reserved;
-};
-#define SDM845_MAX_LINKS 43
-#define SDM845_MAX_BCMS 30
-#define SDM845_MAX_BCM_PER_NODE 2
-#define SDM845_MAX_VCD 10
+#include <dt-bindings/interconnect/qcom,sdm845.h>
-/*
- * The AMC bucket denotes constraints that are applied to hardware when
- * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
- * when the execution environment transitions between active and low power mode.
- */
-#define QCOM_ICC_BUCKET_AMC 0
-#define QCOM_ICC_BUCKET_WAKE 1
-#define QCOM_ICC_BUCKET_SLEEP 2
-#define QCOM_ICC_NUM_BUCKETS 3
-#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
-#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
-#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
-#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
-#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
- QCOM_ICC_TAG_SLEEP)
-
-/**
- * struct qcom_icc_node - Qualcomm specific interconnect nodes
- * @name: the node name used in debugfs
- * @links: an array of nodes where we can go next while traversing
- * @id: a unique node identifier
- * @num_links: the total number of @links
- * @channels: num of channels at this node
- * @buswidth: width of the interconnect between a node and the bus
- * @sum_avg: current sum aggregate value of all avg bw requests
- * @max_peak: current max aggregate value of all peak bw requests
- * @bcms: list of bcms associated with this logical node
- * @num_bcms: num of @bcms
- */
-struct qcom_icc_node {
- const char *name;
- u16 links[SDM845_MAX_LINKS];
- u16 id;
- u16 num_links;
- u16 channels;
- u16 buswidth;
- u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
- u64 max_peak[QCOM_ICC_NUM_BUCKETS];
- struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
- size_t num_bcms;
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sdm845.h"
+
+DEFINE_QNODE(qhm_a1noc_cfg, SDM845_MASTER_A1NOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup1, SDM845_MASTER_BLSP_1, 1, 4, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_tsif, SDM845_MASTER_TSIF, 1, 4, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, SDM845_MASTER_SDCC_2, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc4, SDM845_MASTER_SDCC_4, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_card, SDM845_MASTER_UFS_CARD, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, SDM845_MASTER_UFS_MEM, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_pcie_0, SDM845_MASTER_PCIE_0, 1, 8, SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, SDM845_MASTER_A2NOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, SDM845_MASTER_QDSS_BAM, 1, 4, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, SDM845_MASTER_BLSP_2, 1, 4, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_cnoc, SDM845_MASTER_CNOC_A2NOC, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, SDM845_MASTER_CRYPTO, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, SDM845_MASTER_IPA, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_pcie3_1, SDM845_MASTER_PCIE_1, 1, 8, SDM845_SLAVE_ANOC_PCIE_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SDM845_MASTER_QDSS_ETR, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, SDM845_MASTER_USB3_0, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_1, SDM845_MASTER_USB3_1, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SDM845_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SDM845_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, SDM845_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qhm_spdm, SDM845_MASTER_SPDM, 1, 4, SDM845_SLAVE_CNOC_A2NOC);
+DEFINE_QNODE(qhm_tic, SDM845_MASTER_TIC, 1, 4, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_CNOC_A2NOC, SDM845_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qnm_snoc, SDM845_MASTER_SNOC_CNOC, 1, 8, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(xm_qdss_dap, SDM845_MASTER_QDSS_DAP, 1, 8, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_CNOC_A2NOC, SDM845_SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qhm_cnoc, SDM845_MASTER_CNOC_DC_NOC, 1, 4, SDM845_SLAVE_LLCC_CFG, SDM845_SLAVE_MEM_NOC_CFG);
+DEFINE_QNODE(acm_l3, SDM845_MASTER_APPSS_PROC, 1, 16, SDM845_SLAVE_GNOC_SNOC, SDM845_SLAVE_GNOC_MEM_NOC, SDM845_SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(pm_gnoc_cfg, SDM845_MASTER_GNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(llcc_mc, SDM845_MASTER_LLCC, 4, 4, SDM845_SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, SDM845_MASTER_TCU_0, 1, 8, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_memnoc_cfg, SDM845_MASTER_MEM_NOC_CFG, 1, 4, SDM845_SLAVE_MSS_PROC_MS_MPU_CFG, SDM845_SLAVE_SERVICE_MEM_NOC);
+DEFINE_QNODE(qnm_apps, SDM845_MASTER_GNOC_MEM_NOC, 2, 32, SDM845_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, SDM845_MASTER_MNOC_HF_MEM_NOC, 2, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SDM845_MASTER_MNOC_SF_MEM_NOC, 1, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SDM845_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDM845_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SDM845_MASTER_SNOC_SF_MEM_NOC, 1, 16, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, SDM845_MASTER_GFX3D, 2, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_mnoc_cfg, SDM845_MASTER_CNOC_MNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, SDM845_MASTER_CAMNOC_HF0, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, SDM845_MASTER_CAMNOC_HF1, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, SDM845_MASTER_CAMNOC_SF, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SDM845_MASTER_MDP0, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, SDM845_MASTER_MDP1, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SDM845_MASTER_ROTATOR, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, SDM845_MASTER_VIDEO_P0, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus1, SDM845_MASTER_VIDEO_P1, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, SDM845_MASTER_VIDEO_PROC, 1, 8, SDM845_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_snoc_cfg, SDM845_MASTER_SNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, SDM845_MASTER_A1NOC_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, SDM845_MASTER_A2NOC_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_PCIE_0, SDM845_SLAVE_PCIE_1, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM, SDM845_SLAVE_TCU);
+DEFINE_QNODE(qnm_gladiator_sodv, SDM845_MASTER_GNOC_SNOC, 1, 8, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_IMEM, SDM845_SLAVE_PCIE_0, SDM845_SLAVE_PCIE_1, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM, SDM845_SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc, SDM845_MASTER_MEM_NOC_SNOC, 1, 8, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_IMEM, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_pcie_anoc, SDM845_MASTER_ANOC_PCIE_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, SDM845_MASTER_PIMEM, 1, 8, SDM845_SLAVE_SNOC_MEM_NOC_GC, SDM845_SLAVE_IMEM);
+DEFINE_QNODE(xm_gic, SDM845_MASTER_GIC, 1, 8, SDM845_SLAVE_SNOC_MEM_NOC_GC, SDM845_SLAVE_IMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SDM845_SLAVE_A1NOC_SNOC, 1, 16, SDM845_MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SDM845_SLAVE_SERVICE_A1NOC, 1, 4, 0);
+DEFINE_QNODE(qns_pcie_a1noc_snoc, SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, SDM845_MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(qns_a2noc_snoc, SDM845_SLAVE_A2NOC_SNOC, 1, 16, SDM845_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(qns_pcie_snoc, SDM845_SLAVE_ANOC_PCIE_SNOC, 1, 16, SDM845_MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SDM845_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qns_camnoc_uncomp, SDM845_SLAVE_CAMNOC_UNCOMP, 1, 32);
+DEFINE_QNODE(qhs_a1_noc_cfg, SDM845_SLAVE_A1NOC_CFG, 1, 4, SDM845_MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SDM845_SLAVE_A2NOC_CFG, 1, 4, SDM845_MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_aop, SDM845_SLAVE_AOP, 1, 4);
+DEFINE_QNODE(qhs_aoss, SDM845_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_camera_cfg, SDM845_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SDM845_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_compute_dsp_cfg, SDM845_SLAVE_CDSP_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SDM845_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SDM845_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SDM845_SLAVE_DCC_CFG, 1, 4, SDM845_MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_ddrss_cfg, SDM845_SLAVE_CNOC_DDRSS, 1, 4);
+DEFINE_QNODE(qhs_display_cfg, SDM845_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_glm, SDM845_SLAVE_GLM, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SDM845_SLAVE_GFX3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_imem_cfg, SDM845_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SDM845_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mnoc_cfg, SDM845_SLAVE_CNOC_MNOC_CFG, 1, 4, SDM845_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_pcie0_cfg, SDM845_SLAVE_PCIE_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_pcie_gen3_cfg, SDM845_SLAVE_PCIE_1_CFG, 1, 4);
+DEFINE_QNODE(qhs_pdm, SDM845_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_phy_refgen_south, SDM845_SLAVE_SOUTH_PHY_CFG, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SDM845_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_prng, SDM845_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SDM845_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qupv3_north, SDM845_SLAVE_BLSP_2, 1, 4);
+DEFINE_QNODE(qhs_qupv3_south, SDM845_SLAVE_BLSP_1, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SDM845_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_sdc4, SDM845_SLAVE_SDCC_4, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SDM845_SLAVE_SNOC_CFG, 1, 4, SDM845_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spdm, SDM845_SLAVE_SPDM_WRAPPER, 1, 4);
+DEFINE_QNODE(qhs_spss_cfg, SDM845_SLAVE_SPSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SDM845_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm_north, SDM845_SLAVE_TLMM_NORTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm_south, SDM845_SLAVE_TLMM_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_tsif, SDM845_SLAVE_TSIF, 1, 4);
+DEFINE_QNODE(qhs_ufs_card_cfg, SDM845_SLAVE_UFS_CARD_CFG, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SDM845_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3_0, SDM845_SLAVE_USB3_0, 1, 4);
+DEFINE_QNODE(qhs_usb3_1, SDM845_SLAVE_USB3_1, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SDM845_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SDM845_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(qns_cnoc_a2noc, SDM845_SLAVE_CNOC_A2NOC, 1, 8, SDM845_MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SDM845_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(qhs_llcc, SDM845_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_memnoc, SDM845_SLAVE_MEM_NOC_CFG, 1, 4, SDM845_MASTER_MEM_NOC_CFG);
+DEFINE_QNODE(qns_gladiator_sodv, SDM845_SLAVE_GNOC_SNOC, 1, 8, SDM845_MASTER_GNOC_SNOC);
+DEFINE_QNODE(qns_gnoc_memnoc, SDM845_SLAVE_GNOC_MEM_NOC, 2, 32, SDM845_MASTER_GNOC_MEM_NOC);
+DEFINE_QNODE(srvc_gnoc, SDM845_SLAVE_SERVICE_GNOC, 1, 4);
+DEFINE_QNODE(ebi, SDM845_SLAVE_EBI1, 4, 4);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SDM845_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_apps_io, SDM845_SLAVE_MEM_NOC_GNOC, 1, 32);
+DEFINE_QNODE(qns_llcc, SDM845_SLAVE_LLCC, 4, 16, SDM845_MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SDM845_SLAVE_MEM_NOC_SNOC, 1, 8, SDM845_MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(srvc_memnoc, SDM845_SLAVE_SERVICE_MEM_NOC, 1, 4);
+DEFINE_QNODE(qns2_mem_noc, SDM845_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SDM845_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_hf, SDM845_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SDM845_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SDM845_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qhs_apss, SDM845_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qns_cnoc, SDM845_SLAVE_SNOC_CNOC, 1, 8, SDM845_MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_memnoc_gc, SDM845_SLAVE_SNOC_MEM_NOC_GC, 1, 8, SDM845_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_memnoc_sf, SDM845_SLAVE_SNOC_MEM_NOC_SF, 1, 16, SDM845_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SDM845_SLAVE_IMEM, 1, 8);
+DEFINE_QNODE(qxs_pcie, SDM845_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(qxs_pcie_gen3, SDM845_SLAVE_PCIE_1, 1, 8);
+DEFINE_QNODE(qxs_pimem, SDM845_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SDM845_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_qdss_stm, SDM845_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SDM845_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", false, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qxs_pcie);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qxs_pcie_gen3);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &srvc_aggre1_noc, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn11, "SN11", false, &srvc_aggre2_noc, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
+DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
+DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+ &bcm_sn9,
};
-/**
- * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
- * known as Bus Clock Manager (BCM)
- * @name: the bcm node name used to fetch BCM data from command db
- * @type: latency or bandwidth bcm
- * @addr: address offsets used when voting to RPMH
- * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
- * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
- * @dirty: flag used to indicate whether the bcm needs to be committed
- * @keepalive: flag used to indicate whether a keepalive is required
- * @aux_data: auxiliary data used when calculating threshold values and
- * communicating with RPMh
- * @list: used to link to other bcms when compiling lists for commit
- * @num_nodes: total number of @num_nodes
- * @nodes: list of qcom_icc_nodes that this BCM encapsulates
- */
-struct qcom_icc_bcm {
- const char *name;
- u32 type;
- u32 addr;
- u64 vote_x[QCOM_ICC_NUM_BUCKETS];
- u64 vote_y[QCOM_ICC_NUM_BUCKETS];
- bool dirty;
- bool keepalive;
- struct bcm_db aux_data;
- struct list_head list;
- size_t num_nodes;
- struct qcom_icc_node *nodes[];
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+ [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
};
-struct qcom_icc_fabric {
- struct qcom_icc_node **nodes;
- size_t num_nodes;
+const static struct qcom_icc_desc sdm845_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
- size_t num_nodes;
- struct qcom_icc_bcm **bcms;
- size_t num_bcms;
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_sn11,
+ &bcm_qup0,
};
-#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \
- _numlinks, ...) \
- static struct qcom_icc_node _name = { \
- .id = _id, \
- .name = #_name, \
- .channels = _channels, \
- .buswidth = _buswidth, \
- .num_links = _numlinks, \
- .links = { __VA_ARGS__ }, \
- }
-
-DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC);
-DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC);
-DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG);
-DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC);
-DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC);
-DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1);
-DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC);
-DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
-DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
-DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
-DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM);
-DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
-DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
-DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC);
-DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0);
-DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
-DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC);
-DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
-DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0);
-DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0);
-DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG);
-DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG);
-DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0);
-DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0);
-DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0);
-DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0);
-DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0);
-DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0);
-DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0);
-DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0);
-DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0);
-DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0);
-DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0);
-DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0);
-DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0);
-DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0);
-DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0);
-DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0);
-DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0);
-DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0);
-DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0);
-DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0);
-DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC);
-DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0);
-DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0);
-DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG);
-DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC);
-DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC);
-DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0);
-DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0);
-DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0);
-DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0);
-DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC);
-DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC);
-DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0);
-DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0);
-DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0);
-DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC);
-DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0);
-DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0);
-DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0);
-DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0);
-DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0);
-DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0);
-DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0);
-
-#define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \
- static struct qcom_icc_bcm _name = { \
- .name = _bcmname, \
- .keepalive = _keepalive, \
- .num_nodes = _numnodes, \
- .nodes = { __VA_ARGS__ }, \
- }
-
-DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io);
-DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
-DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc);
-DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu);
-DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
-DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf);
-DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2);
-DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc);
-DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc);
-DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem);
-DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
-DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie);
-DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3);
-DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic);
-DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc);
-DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc);
-
-static struct qcom_icc_node *rsc_hlos_nodes[] = {
- [MASTER_APPSS_PROC] = &acm_l3,
- [MASTER_TCU_0] = &acm_tcu,
- [MASTER_LLCC] = &llcc_mc,
- [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
- [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
- [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
- [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
- [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
- [MASTER_BLSP_1] = &qhm_qup1,
- [MASTER_BLSP_2] = &qhm_qup2,
- [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
- [MASTER_SPDM] = &qhm_spdm,
- [MASTER_TIC] = &qhm_tic,
- [MASTER_TSIF] = &qhm_tsif,
- [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
- [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
- [MASTER_GNOC_MEM_NOC] = &qnm_apps,
[MASTER_CNOC_A2NOC] = &qnm_cnoc,
- [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
- [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
- [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
- [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
- [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
- [MASTER_SNOC_CNOC] = &qnm_snoc,
- [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
- [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
- [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
- [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
- [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
- [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
- [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
- [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[MASTER_CRYPTO] = &qxm_crypto,
- [MASTER_GFX3D] = &qxm_gpu,
[MASTER_IPA] = &qxm_ipa,
- [MASTER_MDP0] = &qxm_mdp0,
- [MASTER_MDP1] = &qxm_mdp1,
- [MASTER_PIMEM] = &qxm_pimem,
- [MASTER_ROTATOR] = &qxm_rot,
- [MASTER_VIDEO_P0] = &qxm_venus0,
- [MASTER_VIDEO_P1] = &qxm_venus1,
- [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
- [MASTER_GIC] = &xm_gic,
[MASTER_PCIE_1] = &xm_pcie3_1,
- [MASTER_PCIE_0] = &xm_pcie_0,
- [MASTER_QDSS_DAP] = &xm_qdss_dap,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
- [MASTER_SDCC_2] = &xm_sdc2,
- [MASTER_SDCC_4] = &xm_sdc4,
- [MASTER_UFS_CARD] = &xm_ufs_card,
- [MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB3_0] = &xm_usb3_0,
[MASTER_USB3_1] = &xm_usb3_1,
- [SLAVE_EBI1] = &ebi,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+const static struct qcom_icc_desc sdm845_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_SPDM] = &qhm_spdm,
+ [MASTER_TIC] = &qhm_tic,
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AOP] = &qhs_aop,
[SLAVE_AOSS] = &qhs_aoss,
- [SLAVE_APPSS] = &qhs_apss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
@@ -386,9 +252,6 @@ static struct qcom_icc_node *rsc_hlos_nodes[] = {
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
- [SLAVE_LLCC_CFG] = &qhs_llcc,
- [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
- [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg,
@@ -414,53 +277,122 @@ static struct qcom_icc_node *rsc_hlos_nodes[] = {
[SLAVE_USB3_1] = &qhs_usb3_1,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
- [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
- [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
- [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
- [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
- [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
- [SLAVE_SNOC_CNOC] = &qns_cnoc,
[SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+const static struct qcom_icc_desc sdm845_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
+};
+
+const static struct qcom_icc_desc sdm845_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gladiator_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *gladiator_noc_nodes[] = {
+ [MASTER_APPSS_PROC] = &acm_l3,
+ [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
[SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
[SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
- [SLAVE_LLCC] = &qns_llcc,
- [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
- [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
- [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
- [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
- [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
- [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
- [SLAVE_IMEM] = &qxs_imem,
- [SLAVE_PCIE_0] = &qxs_pcie,
- [SLAVE_PCIE_1] = &qxs_pcie_gen3,
- [SLAVE_PIMEM] = &qxs_pimem,
- [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
- [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
- [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
[SLAVE_SERVICE_GNOC] = &srvc_gnoc,
- [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
- [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
- [SLAVE_SERVICE_SNOC] = &srvc_snoc,
- [SLAVE_QDSS_STM] = &xs_qdss_stm,
- [SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
- &bcm_acv,
+const static struct qcom_icc_desc sdm845_gladiator_noc = {
+ .nodes = gladiator_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gladiator_noc_nodes),
+ .bcms = gladiator_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gladiator_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mem_noc_bcms[] = {
&bcm_mc0,
+ &bcm_acv,
&bcm_sh0,
- &bcm_mm0,
&bcm_sh1,
- &bcm_mm1,
&bcm_sh2,
- &bcm_mm2,
&bcm_sh3,
- &bcm_mm3,
&bcm_sh5,
+};
+
+static struct qcom_icc_node *mem_noc_nodes[] = {
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
+ [MASTER_GNOC_MEM_NOC] = &qnm_apps,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GFX3D] = &qxm_gpu,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+const static struct qcom_icc_desc sdm845_mem_noc = {
+ .nodes = mem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mem_noc_nodes),
+ .bcms = mem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+ &bcm_mm3,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_MDP1] = &qxm_mdp1,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+};
+
+const static struct qcom_icc_desc sdm845_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn0,
- &bcm_ce0,
- &bcm_cn0,
- &bcm_qup0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
@@ -476,297 +408,34 @@ static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
&bcm_sn15,
};
-static struct qcom_icc_desc sdm845_rsc_hlos = {
- .nodes = rsc_hlos_nodes,
- .num_nodes = ARRAY_SIZE(rsc_hlos_nodes),
- .bcms = rsc_hlos_bcms,
- .num_bcms = ARRAY_SIZE(rsc_hlos_bcms),
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
+ [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &qxs_pcie,
+ [SLAVE_PCIE_1] = &qxs_pcie_gen3,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
};
-static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
-{
- struct qcom_icc_node *qn;
- const struct bcm_db *data;
- size_t data_count;
- int i;
-
- bcm->addr = cmd_db_read_addr(bcm->name);
- if (!bcm->addr) {
- dev_err(dev, "%s could not find RPMh address\n",
- bcm->name);
- return -EINVAL;
- }
-
- data = cmd_db_read_aux_data(bcm->name, &data_count);
- if (IS_ERR(data)) {
- dev_err(dev, "%s command db read error (%ld)\n",
- bcm->name, PTR_ERR(data));
- return PTR_ERR(data);
- }
- if (!data_count) {
- dev_err(dev, "%s command db missing or partial aux data\n",
- bcm->name);
- return -EINVAL;
- }
-
- bcm->aux_data.unit = le32_to_cpu(data->unit);
- bcm->aux_data.width = le16_to_cpu(data->width);
- bcm->aux_data.vcd = data->vcd;
- bcm->aux_data.reserved = data->reserved;
-
- /*
- * Link Qnodes to their respective BCMs
- */
- for (i = 0; i < bcm->num_nodes; i++) {
- qn = bcm->nodes[i];
- qn->bcms[qn->num_bcms] = bcm;
- qn->num_bcms++;
- }
-
- return 0;
-}
-
-inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
- u32 addr, bool commit)
-{
- bool valid = true;
-
- if (!cmd)
- return;
-
- if (vote_x == 0 && vote_y == 0)
- valid = false;
-
- if (vote_x > BCM_TCS_CMD_VOTE_MASK)
- vote_x = BCM_TCS_CMD_VOTE_MASK;
-
- if (vote_y > BCM_TCS_CMD_VOTE_MASK)
- vote_y = BCM_TCS_CMD_VOTE_MASK;
-
- cmd->addr = addr;
- cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
-
- /*
- * Set the wait for completion flag on command that need to be completed
- * before the next command.
- */
- if (commit)
- cmd->wait = true;
-}
-
-static void tcs_list_gen(struct list_head *bcm_list, int bucket,
- struct tcs_cmd tcs_list[SDM845_MAX_VCD],
- int n[SDM845_MAX_VCD])
-{
- struct qcom_icc_bcm *bcm;
- bool commit;
- size_t idx = 0, batch = 0, cur_vcd_size = 0;
-
- memset(n, 0, sizeof(int) * SDM845_MAX_VCD);
-
- list_for_each_entry(bcm, bcm_list, list) {
- commit = false;
- cur_vcd_size++;
- if ((list_is_last(&bcm->list, bcm_list)) ||
- bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
- commit = true;
- cur_vcd_size = 0;
- }
- tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
- bcm->vote_y[bucket], bcm->addr, commit);
- idx++;
- n[batch]++;
- /*
- * Batch the BCMs in such a way that we do not split them in
- * multiple payloads when they are under the same VCD. This is
- * to ensure that every BCM is committed since we only set the
- * commit bit on the last BCM request of every VCD.
- */
- if (n[batch] >= MAX_RPMH_PAYLOAD) {
- if (!commit) {
- n[batch] -= cur_vcd_size;
- n[batch + 1] = cur_vcd_size;
- }
- batch++;
- }
- }
-}
-
-static void bcm_aggregate(struct qcom_icc_bcm *bcm)
-{
- size_t i, bucket;
- u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
- u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
- u64 temp;
-
- for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
- for (i = 0; i < bcm->num_nodes; i++) {
- temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
- agg_avg[bucket] = max(agg_avg[bucket], temp);
-
- temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth);
- agg_peak[bucket] = max(agg_peak[bucket], temp);
- }
-
- temp = agg_avg[bucket] * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_x[bucket] = temp;
-
- temp = agg_peak[bucket] * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_y[bucket] = temp;
- }
-
- if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
- bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
- bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
- bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
- bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
- bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
- }
-
- bcm->dirty = false;
-}
-
-static void qcom_icc_pre_aggregate(struct icc_node *node)
-{
- size_t i;
- struct qcom_icc_node *qn;
-
- qn = node->data;
-
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
- qn->sum_avg[i] = 0;
- qn->max_peak[i] = 0;
- }
-}
-
-static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
- u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
-{
- size_t i;
- struct qcom_icc_node *qn;
-
- qn = node->data;
-
- if (!tag)
- tag = QCOM_ICC_TAG_ALWAYS;
-
- for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
- if (tag & BIT(i)) {
- qn->sum_avg[i] += avg_bw;
- qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
- }
- }
-
- *agg_avg += avg_bw;
- *agg_peak = max_t(u32, *agg_peak, peak_bw);
-
- for (i = 0; i < qn->num_bcms; i++)
- qn->bcms[i]->dirty = true;
-
- return 0;
-}
-
-static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
-{
- struct qcom_icc_provider *qp;
- struct icc_node *node;
- struct tcs_cmd cmds[SDM845_MAX_BCMS];
- struct list_head commit_list;
- int commit_idx[SDM845_MAX_VCD];
- int ret = 0, i;
-
- if (!src)
- node = dst;
- else
- node = src;
-
- qp = to_qcom_provider(node->provider);
-
- INIT_LIST_HEAD(&commit_list);
-
- for (i = 0; i < qp->num_bcms; i++) {
- if (qp->bcms[i]->dirty) {
- bcm_aggregate(qp->bcms[i]);
- list_add_tail(&qp->bcms[i]->list, &commit_list);
- }
- }
-
- /*
- * Construct the command list based on a pre ordered list of BCMs
- * based on VCD.
- */
- tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
-
- if (!commit_idx[0])
- return ret;
-
- ret = rpmh_invalidate(qp->dev);
- if (ret) {
- pr_err("Error invalidating RPMH client (%d)\n", ret);
- return ret;
- }
-
- ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE,
- cmds, commit_idx);
- if (ret) {
- pr_err("Error sending AMC RPMH requests (%d)\n", ret);
- return ret;
- }
-
- INIT_LIST_HEAD(&commit_list);
-
- for (i = 0; i < qp->num_bcms; i++) {
- /*
- * Only generate WAKE and SLEEP commands if a resource's
- * requirements change as the execution environment transitions
- * between different power states.
- */
- if (qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_WAKE] !=
- qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
- qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_WAKE] !=
- qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_SLEEP]) {
- list_add_tail(&qp->bcms[i]->list, &commit_list);
- }
- }
-
- if (list_empty(&commit_list))
- return ret;
-
- tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
-
- ret = rpmh_write_batch(qp->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
- if (ret) {
- pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
- return ret;
- }
-
- tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
-
- ret = rpmh_write_batch(qp->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
- if (ret) {
- pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
- return ret;
- }
-
- return ret;
-}
-
-static int cmp_vcd(const void *_l, const void *_r)
-{
- const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l;
- const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r;
-
- if (l[0]->aux_data.vcd < r[0]->aux_data.vcd)
- return -1;
- else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd)
- return 0;
- else
- return 1;
-}
+const static struct qcom_icc_desc sdm845_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
static int qnoc_probe(struct platform_device *pdev)
{
@@ -779,7 +448,7 @@ static int qnoc_probe(struct platform_device *pdev)
size_t num_nodes, i;
int ret;
- desc = of_device_get_match_data(&pdev->dev);
+ desc = device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
@@ -808,6 +477,12 @@ static int qnoc_probe(struct platform_device *pdev)
qp->bcms = desc->bcms;
qp->num_bcms = desc->num_bcms;
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter)) {
+ dev_err(&pdev->dev, "bcm_voter err:%ld\n", PTR_ERR(qp->voter));
+ return PTR_ERR(qp->voter);
+ }
+
ret = icc_provider_add(provider);
if (ret) {
dev_err(&pdev->dev, "error adding interconnect provider\n");
@@ -817,6 +492,9 @@ static int qnoc_probe(struct platform_device *pdev)
for (i = 0; i < num_nodes; i++) {
size_t j;
+ if (!qnodes[i])
+ continue;
+
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
@@ -827,10 +505,6 @@ static int qnoc_probe(struct platform_device *pdev)
node->data = qnodes[i];
icc_node_add(node, provider);
- dev_dbg(&pdev->dev, "registered node %p %s %d\n", node,
- qnodes[i]->name, node->id);
-
- /* populate links */
for (j = 0; j < qnodes[i]->num_links; j++)
icc_link_create(node, qnodes[i]->links[j]);
@@ -841,19 +515,9 @@ static int qnoc_probe(struct platform_device *pdev)
for (i = 0; i < qp->num_bcms; i++)
qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
- /*
- * Pre sort the BCMs based on VCD for ease of generating a command list
- * that groups the BCMs with the same VCD together. VCDs are numbered
- * with lowest being the most expensive time wise, ensuring that
- * those commands are being sent the earliest in the queue.
- */
- sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL);
-
platform_set_drvdata(pdev, qp);
- dev_dbg(&pdev->dev, "Registered SDM845 ICC\n");
-
- return ret;
+ return 0;
err:
icc_nodes_remove(provider);
icc_provider_del(provider);
@@ -869,8 +533,23 @@ static int qnoc_remove(struct platform_device *pdev)
}
static const struct of_device_id qnoc_of_match[] = {
- { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos },
- { },
+ { .compatible = "qcom,sdm845-aggre1-noc",
+ .data = &sdm845_aggre1_noc},
+ { .compatible = "qcom,sdm845-aggre2-noc",
+ .data = &sdm845_aggre2_noc},
+ { .compatible = "qcom,sdm845-config-noc",
+ .data = &sdm845_config_noc},
+ { .compatible = "qcom,sdm845-dc-noc",
+ .data = &sdm845_dc_noc},
+ { .compatible = "qcom,sdm845-gladiator-noc",
+ .data = &sdm845_gladiator_noc},
+ { .compatible = "qcom,sdm845-mem-noc",
+ .data = &sdm845_mem_noc},
+ { .compatible = "qcom,sdm845-mmss-noc",
+ .data = &sdm845_mmss_noc},
+ { .compatible = "qcom,sdm845-system-noc",
+ .data = &sdm845_system_noc},
+ { }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
diff --git a/drivers/interconnect/qcom/sdm845.h b/drivers/interconnect/qcom/sdm845.h
new file mode 100644
index 000000000000..776e9c2acb27
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm845.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDM845_H__
+#define __DRIVERS_INTERCONNECT_QCOM_SDM845_H__
+
+#define SDM845_MASTER_A1NOC_CFG 1
+#define SDM845_MASTER_BLSP_1 2
+#define SDM845_MASTER_TSIF 3
+#define SDM845_MASTER_SDCC_2 4
+#define SDM845_MASTER_SDCC_4 5
+#define SDM845_MASTER_UFS_CARD 6
+#define SDM845_MASTER_UFS_MEM 7
+#define SDM845_MASTER_PCIE_0 8
+#define SDM845_MASTER_A2NOC_CFG 9
+#define SDM845_MASTER_QDSS_BAM 10
+#define SDM845_MASTER_BLSP_2 11
+#define SDM845_MASTER_CNOC_A2NOC 12
+#define SDM845_MASTER_CRYPTO 13
+#define SDM845_MASTER_IPA 14
+#define SDM845_MASTER_PCIE_1 15
+#define SDM845_MASTER_QDSS_ETR 16
+#define SDM845_MASTER_USB3_0 17
+#define SDM845_MASTER_USB3_1 18
+#define SDM845_MASTER_CAMNOC_HF0_UNCOMP 19
+#define SDM845_MASTER_CAMNOC_HF1_UNCOMP 20
+#define SDM845_MASTER_CAMNOC_SF_UNCOMP 21
+#define SDM845_MASTER_SPDM 22
+#define SDM845_MASTER_TIC 23
+#define SDM845_MASTER_SNOC_CNOC 24
+#define SDM845_MASTER_QDSS_DAP 25
+#define SDM845_MASTER_CNOC_DC_NOC 26
+#define SDM845_MASTER_APPSS_PROC 27
+#define SDM845_MASTER_GNOC_CFG 28
+#define SDM845_MASTER_LLCC 29
+#define SDM845_MASTER_TCU_0 30
+#define SDM845_MASTER_MEM_NOC_CFG 31
+#define SDM845_MASTER_GNOC_MEM_NOC 32
+#define SDM845_MASTER_MNOC_HF_MEM_NOC 33
+#define SDM845_MASTER_MNOC_SF_MEM_NOC 34
+#define SDM845_MASTER_SNOC_GC_MEM_NOC 35
+#define SDM845_MASTER_SNOC_SF_MEM_NOC 36
+#define SDM845_MASTER_GFX3D 37
+#define SDM845_MASTER_CNOC_MNOC_CFG 38
+#define SDM845_MASTER_CAMNOC_HF0 39
+#define SDM845_MASTER_CAMNOC_HF1 40
+#define SDM845_MASTER_CAMNOC_SF 41
+#define SDM845_MASTER_MDP0 42
+#define SDM845_MASTER_MDP1 43
+#define SDM845_MASTER_ROTATOR 44
+#define SDM845_MASTER_VIDEO_P0 45
+#define SDM845_MASTER_VIDEO_P1 46
+#define SDM845_MASTER_VIDEO_PROC 47
+#define SDM845_MASTER_SNOC_CFG 48
+#define SDM845_MASTER_A1NOC_SNOC 49
+#define SDM845_MASTER_A2NOC_SNOC 50
+#define SDM845_MASTER_GNOC_SNOC 51
+#define SDM845_MASTER_MEM_NOC_SNOC 52
+#define SDM845_MASTER_ANOC_PCIE_SNOC 53
+#define SDM845_MASTER_PIMEM 54
+#define SDM845_MASTER_GIC 55
+#define SDM845_SLAVE_A1NOC_SNOC 56
+#define SDM845_SLAVE_SERVICE_A1NOC 57
+#define SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC 58
+#define SDM845_SLAVE_A2NOC_SNOC 59
+#define SDM845_SLAVE_ANOC_PCIE_SNOC 60
+#define SDM845_SLAVE_SERVICE_A2NOC 61
+#define SDM845_SLAVE_CAMNOC_UNCOMP 62
+#define SDM845_SLAVE_A1NOC_CFG 63
+#define SDM845_SLAVE_A2NOC_CFG 64
+#define SDM845_SLAVE_AOP 65
+#define SDM845_SLAVE_AOSS 66
+#define SDM845_SLAVE_CAMERA_CFG 67
+#define SDM845_SLAVE_CLK_CTL 68
+#define SDM845_SLAVE_CDSP_CFG 69
+#define SDM845_SLAVE_RBCPR_CX_CFG 70
+#define SDM845_SLAVE_CRYPTO_0_CFG 71
+#define SDM845_SLAVE_DCC_CFG 72
+#define SDM845_SLAVE_CNOC_DDRSS 73
+#define SDM845_SLAVE_DISPLAY_CFG 74
+#define SDM845_SLAVE_GLM 75
+#define SDM845_SLAVE_GFX3D_CFG 76
+#define SDM845_SLAVE_IMEM_CFG 77
+#define SDM845_SLAVE_IPA_CFG 78
+#define SDM845_SLAVE_CNOC_MNOC_CFG 79
+#define SDM845_SLAVE_PCIE_0_CFG 80
+#define SDM845_SLAVE_PCIE_1_CFG 81
+#define SDM845_SLAVE_PDM 82
+#define SDM845_SLAVE_SOUTH_PHY_CFG 83
+#define SDM845_SLAVE_PIMEM_CFG 84
+#define SDM845_SLAVE_PRNG 85
+#define SDM845_SLAVE_QDSS_CFG 86
+#define SDM845_SLAVE_BLSP_2 87
+#define SDM845_SLAVE_BLSP_1 88
+#define SDM845_SLAVE_SDCC_2 89
+#define SDM845_SLAVE_SDCC_4 90
+#define SDM845_SLAVE_SNOC_CFG 91
+#define SDM845_SLAVE_SPDM_WRAPPER 92
+#define SDM845_SLAVE_SPSS_CFG 93
+#define SDM845_SLAVE_TCSR 94
+#define SDM845_SLAVE_TLMM_NORTH 95
+#define SDM845_SLAVE_TLMM_SOUTH 96
+#define SDM845_SLAVE_TSIF 97
+#define SDM845_SLAVE_UFS_CARD_CFG 98
+#define SDM845_SLAVE_UFS_MEM_CFG 99
+#define SDM845_SLAVE_USB3_0 100
+#define SDM845_SLAVE_USB3_1 101
+#define SDM845_SLAVE_VENUS_CFG 102
+#define SDM845_SLAVE_VSENSE_CTRL_CFG 103
+#define SDM845_SLAVE_CNOC_A2NOC 104
+#define SDM845_SLAVE_SERVICE_CNOC 105
+#define SDM845_SLAVE_LLCC_CFG 106
+#define SDM845_SLAVE_MEM_NOC_CFG 107
+#define SDM845_SLAVE_GNOC_SNOC 108
+#define SDM845_SLAVE_GNOC_MEM_NOC 109
+#define SDM845_SLAVE_SERVICE_GNOC 110
+#define SDM845_SLAVE_EBI1 111
+#define SDM845_SLAVE_MSS_PROC_MS_MPU_CFG 112
+#define SDM845_SLAVE_MEM_NOC_GNOC 113
+#define SDM845_SLAVE_LLCC 114
+#define SDM845_SLAVE_MEM_NOC_SNOC 115
+#define SDM845_SLAVE_SERVICE_MEM_NOC 116
+#define SDM845_SLAVE_MNOC_SF_MEM_NOC 117
+#define SDM845_SLAVE_MNOC_HF_MEM_NOC 118
+#define SDM845_SLAVE_SERVICE_MNOC 119
+#define SDM845_SLAVE_APPSS 120
+#define SDM845_SLAVE_SNOC_CNOC 121
+#define SDM845_SLAVE_SNOC_MEM_NOC_GC 122
+#define SDM845_SLAVE_SNOC_MEM_NOC_SF 123
+#define SDM845_SLAVE_IMEM 124
+#define SDM845_SLAVE_PCIE_0 125
+#define SDM845_SLAVE_PCIE_1 126
+#define SDM845_SLAVE_PIMEM 127
+#define SDM845_SLAVE_SERVICE_SNOC 128
+#define SDM845_SLAVE_QDSS_STM 129
+#define SDM845_SLAVE_TCU 130
+#define SDM845_MASTER_OSM_L3_APPS 131
+#define SDM845_SLAVE_OSM_L3 132
+
+#endif /* __DRIVERS_INTERCONNECT_QCOM_SDM845_H__ */
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 7f811fe5bf69..1d3d273309bd 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -124,6 +124,20 @@ static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
return irq;
}
+unsigned int xintc_get_irq(void)
+{
+ unsigned int irq = -1;
+ u32 hwirq;
+
+ hwirq = xintc_read(primary_intc, IVR);
+ if (hwirq != -1U)
+ irq = irq_find_mapping(primary_intc->root_domain, hwirq);
+
+ pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+ return irq;
+}
+
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct xintc_irq_chip *irqc = d->host_data;
@@ -163,25 +177,6 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void xil_intc_handle_irq(struct pt_regs *regs)
-{
- u32 hwirq;
- struct xintc_irq_chip *irqc = primary_intc;
-
- do {
- hwirq = xintc_read(irqc, IVR);
- if (likely(hwirq != -1U)) {
- int ret;
-
- ret = handle_domain_irq(irqc->root_domain, hwirq, regs);
- WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq);
- continue;
- }
-
- break;
- } while (1);
-}
-
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
@@ -250,7 +245,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
}
} else {
primary_intc = irqc;
- set_handle_irq(xil_intc_handle_irq);
+ irq_set_default_host(primary_intc->root_domain);
}
return 0;
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index b1314d104b06..b4821c751d04 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -142,7 +142,7 @@ const struct file_operations anslcd_fops = {
};
static struct miscdevice anslcd_dev = {
- ANSLCD_MINOR,
+ LCD_MINOR,
"anslcd",
&anslcd_fops
};
diff --git a/drivers/macintosh/ans-lcd.h b/drivers/macintosh/ans-lcd.h
index f0a6e4c68557..bca7d76d441b 100644
--- a/drivers/macintosh/ans-lcd.h
+++ b/drivers/macintosh/ans-lcd.h
@@ -2,8 +2,6 @@
#ifndef _PPC_ANS_LCD_H
#define _PPC_ANS_LCD_H
-#define ANSLCD_MINOR 156
-
#define ANSLCD_CLEAR 0x01
#define ANSLCD_SENDCTRL 0x02
#define ANSLCD_SETSHORTDELAY 0x03
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index a0d87ed9da69..f55f6adf5e5f 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -323,7 +323,7 @@ static void do_attach(struct i2c_adapter *adapter)
of_node_put(np);
} else {
strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
- i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
+ i2c_new_scanned_device(adapter, &info, scan_ds1775, NULL);
}
np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
@@ -331,7 +331,7 @@ static void do_attach(struct i2c_adapter *adapter)
of_node_put(np);
} else {
strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
- i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
+ i2c_new_scanned_device(adapter, &info, scan_adm1030, NULL);
}
}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index d38fb78a3b23..83eb05bf85ff 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -75,9 +75,6 @@
/* Some compile options */
#undef DEBUG_SLEEP
-/* Misc minor number allocated for /dev/pmu */
-#define PMU_MINOR 154
-
/* How many iterations between battery polls */
#define BATTERY_POLLING_COUNT 2
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ab4eb750bbdd..5a577a6734cf 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -227,4 +227,13 @@ config ZYNQMP_IPI_MBOX
message to the IPI buffer and will access the IPI control
registers to kick the other processor or enquire status.
+config SUN6I_MSGBOX
+ tristate "Allwinner sun6i/sun8i/sun9i/sun50i Message Box"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default ARCH_SUNXI
+ help
+ Mailbox implementation for the hardware message box present in
+ various Allwinner SoCs. This mailbox is used for communication
+ between the application CPUs and the power management coprocessor.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index c22fad6f696b..2e4364ef5c47 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -48,3 +48,5 @@ obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
+
+obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
index 02b7b28e6969..9f2ce7f03c67 100644
--- a/drivers/mailbox/armada-37xx-rwtm-mailbox.c
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -156,16 +156,12 @@ static int armada_37xx_mbox_probe(struct platform_device *pdev)
return -ENOMEM;
mbox->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mbox->base)) {
- dev_err(&pdev->dev, "ioremap failed\n");
+ if (IS_ERR(mbox->base))
return PTR_ERR(mbox->base);
- }
mbox->irq = platform_get_irq(pdev, 0);
- if (mbox->irq < 0) {
- dev_err(&pdev->dev, "Cannot get irq\n");
+ if (mbox->irq < 0)
return mbox->irq;
- }
mbox->dev = &pdev->dev;
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index 8ee9db274802..bee33abb5308 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1599,6 +1599,7 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
1 << RING_CMPL_ALIGN_ORDER, 0);
if (!mbox->cmpl_pool) {
ret = -ENOMEM;
+ goto fail_destroy_bd_pool;
}
/* Allocate platform MSIs for each ring */
@@ -1661,6 +1662,7 @@ fail_free_debugfs_root:
platform_msi_domain_free_irqs(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
+fail_destroy_bd_pool:
dma_pool_destroy(mbox->bd_pool);
fail:
return ret;
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index fcb3b18a0678..c10a9318a4b7 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -436,33 +436,33 @@ static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
pdcs = filp->private_data;
out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"SPU %u stats:\n", pdcs->pdc_idx);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"PDC requests....................%u\n",
pdcs->pdc_requests);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"PDC responses...................%u\n",
pdcs->pdc_replies);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx not done.....................%u\n",
pdcs->last_tx_not_done);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx ring full....................%u\n",
pdcs->tx_ring_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Rx ring full....................%u\n",
pdcs->rx_ring_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Tx desc write fail. Ring full...%u\n",
pdcs->txnobuf);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Rx desc write fail. Ring full...%u\n",
pdcs->rxnobuf);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Receive overflow................%u\n",
pdcs->rx_oflow);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Num frags in rx ring............%u\n",
NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
pdcs->nrxpost));
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 2cdcdc5f1119..7906624a731c 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -4,8 +4,10 @@
*/
#include <linux/clk.h>
+#include <linux/firmware/imx/ipc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
@@ -27,6 +29,8 @@
#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
#define IMX_MU_CHANS 16
+/* TX0/RX0/RXDB[0-3] */
+#define IMX_MU_SCU_CHANS 6
#define IMX_MU_CHAN_NAME_SIZE 20
enum imx_mu_chan_type {
@@ -36,11 +40,9 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RXDB, /* Rx doorbell */
};
-struct imx_mu_dcfg {
- u32 xTR[4]; /* Transmit Registers */
- u32 xRR[4]; /* Receive Registers */
- u32 xSR; /* Status Register */
- u32 xCR; /* Control Register */
+struct imx_sc_rpc_msg_max {
+ struct imx_sc_rpc_msg hdr;
+ u32 data[7];
};
struct imx_mu_con_priv {
@@ -67,18 +69,14 @@ struct imx_mu_priv {
bool side_b;
};
-static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
- .xTR = {0x0, 0x4, 0x8, 0xc},
- .xRR = {0x10, 0x14, 0x18, 0x1c},
- .xSR = 0x20,
- .xCR = 0x24,
-};
-
-static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
- .xTR = {0x20, 0x24, 0x28, 0x2c},
- .xRR = {0x40, 0x44, 0x48, 0x4c},
- .xSR = 0x60,
- .xCR = 0x64,
+struct imx_mu_dcfg {
+ int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
+ int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
+ void (*init)(struct imx_mu_priv *priv);
+ u32 xTR[4]; /* Transmit Registers */
+ u32 xRR[4]; /* Receive Registers */
+ u32 xSR; /* Status Register */
+ u32 xCR; /* Control Register */
};
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -111,6 +109,117 @@ static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
return val;
}
+static int imx_mu_generic_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ u32 *arg = data;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_TXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
+ tasklet_schedule(&cp->txdb_tasklet);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_generic_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ u32 dat;
+
+ dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
+ mbox_chan_received_data(cp->chan, (void *)&dat);
+
+ return 0;
+}
+
+static int imx_mu_scu_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ struct imx_sc_rpc_msg_max *msg = data;
+ u32 *arg = data;
+ int i, ret;
+ u32 xsr;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ if (msg->hdr.size > sizeof(*msg)) {
+ /*
+ * The real message size can be different to
+ * struct imx_sc_rpc_msg_max size
+ */
+ dev_err(priv->dev, "Exceed max msg size (%zu) on TX, got: %i\n", sizeof(*msg), msg->hdr.size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4 && i < msg->hdr.size; i++)
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ for (; i < msg->hdr.size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR,
+ xsr,
+ xsr & IMX_MU_xSR_TEn(i % 4),
+ 0, 100);
+ if (ret) {
+ dev_err(priv->dev, "Send data index: %d timeout\n", i);
+ return ret;
+ }
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_scu_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ struct imx_sc_rpc_msg_max msg;
+ u32 *data = (u32 *)&msg;
+ int i, ret;
+ u32 xsr;
+
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
+
+ if (msg.hdr.size > sizeof(msg)) {
+ dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
+ sizeof(msg), msg.hdr.size);
+ return -EINVAL;
+ }
+
+ for (i = 1; i < msg.hdr.size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR, xsr,
+ xsr & IMX_MU_xSR_RFn(i % 4), 0, 100);
+ if (ret) {
+ dev_err(priv->dev, "timeout read idx %d\n", i);
+ return ret;
+ }
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR[i % 4]);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(0), 0);
+ mbox_chan_received_data(cp->chan, (void *)&msg);
+
+ return 0;
+}
+
static void imx_mu_txdb_tasklet(unsigned long data)
{
struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
@@ -123,7 +232,7 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
struct mbox_chan *chan = p;
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
- u32 val, ctrl, dat;
+ u32 val, ctrl;
ctrl = imx_mu_read(priv, priv->dcfg->xCR);
val = imx_mu_read(priv, priv->dcfg->xSR);
@@ -152,8 +261,7 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
mbox_chan_txdone(chan, 0);
} else if (val == IMX_MU_xSR_RFn(cp->idx)) {
- dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
- mbox_chan_received_data(chan, (void *)&dat);
+ priv->dcfg->rx(priv, cp);
} else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
mbox_chan_received_data(chan, NULL);
@@ -169,23 +277,8 @@ static int imx_mu_send_data(struct mbox_chan *chan, void *data)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
- u32 *arg = data;
- switch (cp->type) {
- case IMX_MU_TYPE_TX:
- imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
- break;
- case IMX_MU_TYPE_TXDB:
- imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
- tasklet_schedule(&cp->txdb_tasklet);
- break;
- default:
- dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
- return -EINVAL;
- }
-
- return 0;
+ return priv->dcfg->tx(priv, cp, data);
}
static int imx_mu_startup(struct mbox_chan *chan)
@@ -256,6 +349,42 @@ static const struct mbox_chan_ops imx_mu_ops = {
.shutdown = imx_mu_shutdown,
};
+static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+
+ switch (type) {
+ case IMX_MU_TYPE_TX:
+ case IMX_MU_TYPE_RX:
+ if (idx != 0)
+ dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
+ chan = type;
+ break;
+ case IMX_MU_TYPE_RXDB:
+ chan = 2 + idx;
+ break;
+ default:
+ dev_err(mbox->dev, "Invalid chan type: %d\n", type);
+ return NULL;
+ }
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
@@ -280,6 +409,22 @@ static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
static void imx_mu_init_generic(struct imx_mu_priv *priv)
{
+ unsigned int i;
+
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i % 4;
+ cp->type = i >> 2;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_CHANS;
+ priv->mbox.of_xlate = imx_mu_xlate;
+
if (priv->side_b)
return;
@@ -287,13 +432,34 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
imx_mu_write(priv, 0, priv->dcfg->xCR);
}
+static void imx_mu_init_scu(struct imx_mu_priv *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i < 2 ? 0 : i - 2;
+ cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_SCU_CHANS;
+ priv->mbox.of_xlate = imx_mu_scu_xlate;
+
+ /* Set default MU configuration */
+ imx_mu_write(priv, 0, priv->dcfg->xCR);
+}
+
static int imx_mu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx_mu_priv *priv;
const struct imx_mu_dcfg *dcfg;
- unsigned int i;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -329,32 +495,19 @@ static int imx_mu_probe(struct platform_device *pdev)
return ret;
}
- for (i = 0; i < IMX_MU_CHANS; i++) {
- struct imx_mu_con_priv *cp = &priv->con_priv[i];
-
- cp->idx = i % 4;
- cp->type = i >> 2;
- cp->chan = &priv->mbox_chans[i];
- priv->mbox_chans[i].con_priv = cp;
- snprintf(cp->irq_desc, sizeof(cp->irq_desc),
- "imx_mu_chan[%i-%i]", cp->type, cp->idx);
- }
-
priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
+ priv->dcfg->init(priv);
+
spin_lock_init(&priv->xcr_lock);
priv->mbox.dev = dev;
priv->mbox.ops = &imx_mu_ops;
priv->mbox.chans = priv->mbox_chans;
- priv->mbox.num_chans = IMX_MU_CHANS;
- priv->mbox.of_xlate = imx_mu_xlate;
priv->mbox.txdone_irq = true;
platform_set_drvdata(pdev, priv);
- imx_mu_init_generic(priv);
-
return devm_mbox_controller_register(dev, &priv->mbox);
}
@@ -367,9 +520,40 @@ static int imx_mu_remove(struct platform_device *pdev)
return 0;
}
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .xTR = {0x20, 0x24, 0x28, 0x2c},
+ .xRR = {0x40, 0x44, 0x48, 0x4c},
+ .xSR = 0x60,
+ .xCR = 0x64,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
+ .tx = imx_mu_scu_tx,
+ .rx = imx_mu_scu_rx,
+ .init = imx_mu_init_scu,
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
+ { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ },
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 9a6ce9f5a7db..b24822ad8409 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -56,7 +56,6 @@ struct cmdq_thread {
void __iomem *base;
struct list_head task_busy_list;
u32 priority;
- bool atomic_exec;
};
struct cmdq_task {
@@ -162,48 +161,11 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
cmdq_thread_invalidate_fetched_data(thread);
}
-static bool cmdq_command_is_wfe(u64 cmd)
-{
- u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
- u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
- u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
-
- return ((cmd & wfe_mask) == (wfe_op | wfe_option));
-}
-
-/* we assume tasks in the same display GCE thread are waiting the same event. */
-static void cmdq_task_remove_wfe(struct cmdq_task *task)
-{
- struct device *dev = task->cmdq->mbox.dev;
- u64 *base = task->pkt->va_base;
- int i;
-
- dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
- DMA_TO_DEVICE);
- for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
- if (cmdq_command_is_wfe(base[i]))
- base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
- CMDQ_JUMP_PASS;
- dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
- DMA_TO_DEVICE);
-}
-
static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
{
return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
}
-static void cmdq_thread_wait_end(struct cmdq_thread *thread,
- unsigned long end_pa)
-{
- struct device *dev = thread->chan->mbox->dev;
- unsigned long curr_pa;
-
- if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
- curr_pa, curr_pa == end_pa, 1, 20))
- dev_err(dev, "GCE thread cannot run to end.\n");
-}
-
static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
{
struct cmdq_task_cb *cb = &task->pkt->async_cb;
@@ -383,36 +345,15 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
-
- /*
- * Atomic execution should remove the following wfe, i.e. only
- * wait event at first task, and prevent to pause when running.
- */
- if (thread->atomic_exec) {
- /* GCE is executing if command is not WFE */
- if (!cmdq_thread_is_in_wfe(thread)) {
- cmdq_thread_resume(thread);
- cmdq_thread_wait_end(thread, end_pa);
- WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
- /* set to this task directly */
- writel(task->pa_base,
- thread->base + CMDQ_THR_CURR_ADDR);
- } else {
- cmdq_task_insert_into_thread(task);
- cmdq_task_remove_wfe(task);
- smp_mb(); /* modify jump before enable thread */
- }
+ /* check boundary */
+ if (curr_pa == end_pa - CMDQ_INST_SIZE ||
+ curr_pa == end_pa) {
+ /* set to this task directly */
+ writel(task->pa_base,
+ thread->base + CMDQ_THR_CURR_ADDR);
} else {
- /* check boundary */
- if (curr_pa == end_pa - CMDQ_INST_SIZE ||
- curr_pa == end_pa) {
- /* set to this task directly */
- writel(task->pa_base,
- thread->base + CMDQ_THR_CURR_ADDR);
- } else {
- cmdq_task_insert_into_thread(task);
- smp_mb(); /* modify jump before enable thread */
- }
+ cmdq_task_insert_into_thread(task);
+ smp_mb(); /* modify jump before enable thread */
}
writel(task->pa_base + pkt->cmd_buf_size,
thread->base + CMDQ_THR_END_ADDR);
@@ -432,10 +373,62 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
{
}
+static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq_task_cb *cb;
+ struct cmdq_cb_data data;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task, *tmp;
+ unsigned long flags;
+ u32 enable;
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ if (list_empty(&thread->task_busy_list))
+ goto out;
+
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ if (!cmdq_thread_is_in_wfe(thread))
+ goto wait;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ cb = &task->pkt->async_cb;
+ if (cb->cb) {
+ data.sta = CMDQ_CB_ERROR;
+ data.data = cb->data;
+ cb->cb(data);
+ }
+ list_del(&task->list_entry);
+ kfree(task);
+ }
+
+ cmdq_thread_resume(thread);
+ cmdq_thread_disable(cmdq, thread);
+ clk_disable(cmdq->clock);
+
+out:
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ return 0;
+
+wait:
+ cmdq_thread_resume(thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
+ enable, enable == 0, 1, timeout)) {
+ dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
+ (u32)(thread->base - cmdq->base));
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
.send_data = cmdq_mbox_send_data,
.startup = cmdq_mbox_startup,
.shutdown = cmdq_mbox_shutdown,
+ .flush = cmdq_mbox_flush,
};
static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
@@ -449,7 +442,6 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
thread->priority = sp->args[1];
- thread->atomic_exec = (sp->args[2] != 0);
thread->chan = &mbox->chans[ind];
return &mbox->chans[ind];
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
new file mode 100644
index 000000000000..ccecf2e5941d
--- /dev/null
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2017-2019 Samuel Holland <samuel@sholland.org>
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define NUM_CHANS 8
+
+#define CTRL_REG(n) (0x0000 + 0x4 * ((n) / 4))
+#define CTRL_RX(n) BIT(0 + 8 * ((n) % 4))
+#define CTRL_TX(n) BIT(4 + 8 * ((n) % 4))
+
+#define REMOTE_IRQ_EN_REG 0x0040
+#define REMOTE_IRQ_STAT_REG 0x0050
+#define LOCAL_IRQ_EN_REG 0x0060
+#define LOCAL_IRQ_STAT_REG 0x0070
+
+#define RX_IRQ(n) BIT(0 + 2 * (n))
+#define RX_IRQ_MASK 0x5555
+#define TX_IRQ(n) BIT(1 + 2 * (n))
+#define TX_IRQ_MASK 0xaaaa
+
+#define FIFO_STAT_REG(n) (0x0100 + 0x4 * (n))
+#define FIFO_STAT_MASK GENMASK(0, 0)
+
+#define MSG_STAT_REG(n) (0x0140 + 0x4 * (n))
+#define MSG_STAT_MASK GENMASK(2, 0)
+
+#define MSG_DATA_REG(n) (0x0180 + 0x4 * (n))
+
+#define mbox_dbg(mbox, ...) dev_dbg((mbox)->controller.dev, __VA_ARGS__)
+
+struct sun6i_msgbox {
+ struct mbox_controller controller;
+ struct clk *clk;
+ spinlock_t lock;
+ void __iomem *regs;
+};
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan);
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan);
+
+static inline int channel_number(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline struct sun6i_msgbox *to_sun6i_msgbox(struct mbox_chan *chan)
+{
+ return chan->con_priv;
+}
+
+static irqreturn_t sun6i_msgbox_irq(int irq, void *dev_id)
+{
+ struct sun6i_msgbox *mbox = dev_id;
+ uint32_t status;
+ int n;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mbox->regs + LOCAL_IRQ_EN_REG) &
+ readl(mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < NUM_CHANS; ++n) {
+ struct mbox_chan *chan = &mbox->controller.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ while (sun6i_msgbox_peek_data(chan)) {
+ uint32_t msg = readl(mbox->regs + MSG_DATA_REG(n));
+
+ mbox_dbg(mbox, "Channel %d received 0x%08x\n", n, msg);
+ mbox_chan_received_data(chan, &msg);
+ }
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun6i_msgbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+ uint32_t msg = *(uint32_t *)data;
+
+ /* Using a channel backwards gets the hardware into a bad state. */
+ if (WARN_ON_ONCE(!(readl(mbox->regs + CTRL_REG(n)) & CTRL_TX(n))))
+ return 0;
+
+ writel(msg, mbox->regs + MSG_DATA_REG(n));
+ mbox_dbg(mbox, "Channel %d sent 0x%08x\n", n, msg);
+
+ return 0;
+}
+
+static int sun6i_msgbox_startup(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /* The coprocessor is responsible for setting channel directions. */
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Flush the receive FIFO. */
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ /* Enable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) | RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+ }
+
+ mbox_dbg(mbox, "Channel %d startup complete\n", n);
+
+ return 0;
+}
+
+static void sun6i_msgbox_shutdown(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Disable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) & ~RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+
+ /* Attempt to flush the FIFO until the IRQ is cleared. */
+ do {
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ } while (readl(mbox->regs + LOCAL_IRQ_STAT_REG) & RX_IRQ(n));
+ }
+
+ mbox_dbg(mbox, "Channel %d shutdown complete\n", n);
+}
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /*
+ * The hardware allows snooping on the remote user's IRQ statuses.
+ * We consider a message to be acknowledged only once the receive IRQ
+ * for that channel is cleared. Since the receive IRQ for a channel
+ * cannot be cleared until the FIFO for that channel is empty, this
+ * ensures that the message has actually been read. It also gives the
+ * recipient an opportunity to perform minimal processing before
+ * acknowledging the message.
+ */
+ return !(readl(mbox->regs + REMOTE_IRQ_STAT_REG) & RX_IRQ(n));
+}
+
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ return readl(mbox->regs + MSG_STAT_REG(n)) & MSG_STAT_MASK;
+}
+
+static const struct mbox_chan_ops sun6i_msgbox_chan_ops = {
+ .send_data = sun6i_msgbox_send_data,
+ .startup = sun6i_msgbox_startup,
+ .shutdown = sun6i_msgbox_shutdown,
+ .last_tx_done = sun6i_msgbox_last_tx_done,
+ .peek_data = sun6i_msgbox_peek_data,
+};
+
+static int sun6i_msgbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_chan *chans;
+ struct reset_control *reset;
+ struct resource *res;
+ struct sun6i_msgbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, NUM_CHANS, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ for (i = 0; i < NUM_CHANS; ++i)
+ chans[i].con_priv = mbox;
+
+ mbox->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mbox->clk)) {
+ ret = PTR_ERR(mbox->clk);
+ dev_err(dev, "Failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mbox->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ dev_err(dev, "Failed to get reset control: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /*
+ * NOTE: We rely on platform firmware to preconfigure the channel
+ * directions, and we share this hardware block with other firmware
+ * that runs concurrently with Linux (e.g. a trusted monitor).
+ *
+ * Therefore, we do *not* assert the reset line if probing fails or
+ * when removing the device.
+ */
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ dev_err(dev, "Failed to deassert reset: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto err_disable_unprepare;
+ }
+
+ mbox->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /* Disable all IRQs for this end of the msgbox. */
+ writel(0, mbox->regs + LOCAL_IRQ_EN_REG);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+ sun6i_msgbox_irq, 0, dev_name(dev), mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register IRQ handler: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.ops = &sun6i_msgbox_chan_ops;
+ mbox->controller.chans = chans;
+ mbox->controller.num_chans = NUM_CHANS;
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+
+ spin_lock_init(&mbox->lock);
+ platform_set_drvdata(pdev, mbox);
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(dev, "Failed to register controller: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ return 0;
+
+err_disable_unprepare:
+ clk_disable_unprepare(mbox->clk);
+
+ return ret;
+}
+
+static int sun6i_msgbox_remove(struct platform_device *pdev)
+{
+ struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ /* See the comment in sun6i_msgbox_probe about the reset line. */
+ clk_disable_unprepare(mbox->clk);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_msgbox_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-msgbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun6i_msgbox_of_match);
+
+static struct platform_driver sun6i_msgbox_driver = {
+ .driver = {
+ .name = "sun6i-msgbox",
+ .of_match_table = sun6i_msgbox_of_match,
+ },
+ .probe = sun6i_msgbox_probe,
+ .remove = sun6i_msgbox_remove,
+};
+module_platform_driver(sun6i_msgbox_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner sun6i/sun8i/sun9i/sun50i Message Box");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
index c05b12110456..17712456fa63 100644
--- a/drivers/md/dm-clone-metadata.c
+++ b/drivers/md/dm-clone-metadata.c
@@ -656,7 +656,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd,
return (bit >= (start + nr_regions));
}
-unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
+unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
{
return bitmap_weight(cmd->region_map, cmd->nr_regions);
}
@@ -850,6 +850,12 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re
struct dirty_map *dmap;
unsigned long word, flags;
+ if (unlikely(region_nr >= cmd->nr_regions)) {
+ DMERR("Region %lu out of range (total number of regions %lu)",
+ region_nr, cmd->nr_regions);
+ return -ERANGE;
+ }
+
word = region_nr / BITS_PER_LONG;
spin_lock_irqsave(&cmd->bitmap_lock, flags);
@@ -879,6 +885,13 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
struct dirty_map *dmap;
unsigned long word, region_nr;
+ if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start ||
+ (start + nr_regions) > cmd->nr_regions)) {
+ DMERR("Invalid region range: start %lu, nr_regions %lu (total number of regions %lu)",
+ start, nr_regions, cmd->nr_regions);
+ return -ERANGE;
+ }
+
spin_lock_irq(&cmd->bitmap_lock);
if (cmd->read_only) {
diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h
index 14af1ebd853f..d848b8799c07 100644
--- a/drivers/md/dm-clone-metadata.h
+++ b/drivers/md/dm-clone-metadata.h
@@ -156,7 +156,7 @@ bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd,
/*
* Returns the number of hydrated regions.
*/
-unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd);
+unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd);
/*
* Returns the first unhydrated region with region_nr >= @start
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index d1e1b5b56b1b..5ce96ddf1ce1 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -282,7 +282,7 @@ static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
/* Get the address of the region in sectors */
static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
{
- return (region_nr << clone->region_shift);
+ return ((sector_t)region_nr << clone->region_shift);
}
/* Get the region number of the bio */
@@ -293,10 +293,17 @@ static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
/* Get the region range covered by the bio */
static void bio_region_range(struct clone *clone, struct bio *bio,
- unsigned long *rs, unsigned long *re)
+ unsigned long *rs, unsigned long *nr_regions)
{
+ unsigned long end;
+
*rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
- *re = bio_end_sector(bio) >> clone->region_shift;
+ end = bio_end_sector(bio) >> clone->region_shift;
+
+ if (*rs >= end)
+ *nr_regions = 0;
+ else
+ *nr_regions = end - *rs;
}
/* Check whether a bio overwrites a region */
@@ -454,7 +461,7 @@ static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
{
- unsigned long rs, re;
+ unsigned long rs, nr_regions;
/*
* If the destination device supports discards, remap and trim the
@@ -463,9 +470,9 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
*/
if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
remap_to_dest(clone, bio);
- bio_region_range(clone, bio, &rs, &re);
- trim_bio(bio, rs << clone->region_shift,
- (re - rs) << clone->region_shift);
+ bio_region_range(clone, bio, &rs, &nr_regions);
+ trim_bio(bio, region_to_sector(clone, rs),
+ nr_regions << clone->region_shift);
generic_make_request(bio);
} else
bio_endio(bio);
@@ -473,12 +480,21 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
static void process_discard_bio(struct clone *clone, struct bio *bio)
{
- unsigned long rs, re;
+ unsigned long rs, nr_regions;
- bio_region_range(clone, bio, &rs, &re);
- BUG_ON(re > clone->nr_regions);
+ bio_region_range(clone, bio, &rs, &nr_regions);
+ if (!nr_regions) {
+ bio_endio(bio);
+ return;
+ }
- if (unlikely(rs == re)) {
+ if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
+ (rs + nr_regions) > clone->nr_regions)) {
+ DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
+ clone_device_name(clone), rs, nr_regions,
+ clone->nr_regions,
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio_sectors(bio));
bio_endio(bio);
return;
}
@@ -487,7 +503,7 @@ static void process_discard_bio(struct clone *clone, struct bio *bio)
* The covered regions are already hydrated so we just need to pass
* down the discard.
*/
- if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) {
+ if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
complete_discard_bio(clone, bio, true);
return;
}
@@ -788,11 +804,14 @@ static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr
struct dm_io_region from, to;
struct clone *clone = hd->clone;
+ if (WARN_ON(!nr_regions))
+ return;
+
region_size = clone->region_size;
region_start = hd->region_nr;
region_end = region_start + nr_regions - 1;
- total_size = (nr_regions - 1) << clone->region_shift;
+ total_size = region_to_sector(clone, nr_regions - 1);
if (region_end == clone->nr_regions - 1) {
/*
@@ -1169,7 +1188,7 @@ static void process_deferred_discards(struct clone *clone)
int r = -EPERM;
struct bio *bio;
struct blk_plug plug;
- unsigned long rs, re;
+ unsigned long rs, nr_regions;
struct bio_list discards = BIO_EMPTY_LIST;
spin_lock_irq(&clone->lock);
@@ -1185,14 +1204,13 @@ static void process_deferred_discards(struct clone *clone)
/* Update the metadata */
bio_list_for_each(bio, &discards) {
- bio_region_range(clone, bio, &rs, &re);
+ bio_region_range(clone, bio, &rs, &nr_regions);
/*
* A discard request might cover regions that have been already
* hydrated. There is no need to update the metadata for these
* regions.
*/
- r = dm_clone_cond_set_range(clone->cmd, rs, re - rs);
-
+ r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
if (unlikely(r))
break;
}
@@ -1455,7 +1473,7 @@ static void clone_status(struct dm_target *ti, status_type_t type,
goto error;
}
- DMEMIT("%u %llu/%llu %llu %lu/%lu %u ",
+ DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
DM_CLONE_METADATA_BLOCK_SIZE,
(unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
(unsigned long long)nr_metadata_blocks,
@@ -1775,6 +1793,7 @@ error:
static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
+ sector_t nr_regions;
struct clone *clone;
struct dm_arg_set as;
@@ -1816,7 +1835,16 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto out_with_source_dev;
clone->region_shift = __ffs(clone->region_size);
- clone->nr_regions = dm_sector_div_up(ti->len, clone->region_size);
+ nr_regions = dm_sector_div_up(ti->len, clone->region_size);
+
+ /* Check for overflow */
+ if (nr_regions != (unsigned long)nr_regions) {
+ ti->error = "Too many regions. Consider increasing the region size";
+ r = -EOVERFLOW;
+ goto out_with_source_dev;
+ }
+
+ clone->nr_regions = nr_regions;
r = validate_nr_regions(clone->nr_regions, &ti->error);
if (r)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index c6a529873d0f..3df90daba89e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -230,6 +230,8 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
struct scatterlist *sg);
+static bool crypt_integrity_aead(struct crypt_config *cc);
+
/*
* Use this to access cipher attributes that are independent of the key.
*/
@@ -346,7 +348,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
unsigned bs;
int log;
- if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
+ if (crypt_integrity_aead(cc))
bs = crypto_aead_blocksize(any_tfm_aead(cc));
else
bs = crypto_skcipher_blocksize(any_tfm(cc));
@@ -712,7 +714,7 @@ static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) {
+ if (crypt_integrity_aead(cc)) {
ti->error = "AEAD transforms not supported for EBOIV";
return -EINVAL;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 2f03fecd312d..4094c47eca7f 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -39,6 +39,7 @@
#define RECALC_WRITE_SUPER 16
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
+#define DISCARD_FILLER 0xf6
/*
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -257,6 +258,7 @@ struct dm_integrity_c {
bool just_formatted;
bool recalculate_flag;
bool fix_padding;
+ bool discard;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
@@ -284,7 +286,7 @@ struct dm_integrity_io {
struct work_struct work;
struct dm_integrity_c *ic;
- bool write;
+ enum req_opf op;
bool fua;
struct dm_integrity_range range;
@@ -510,8 +512,8 @@ static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
- (unsigned long long)sector,
- (unsigned long long)n_sectors,
+ sector,
+ n_sectors,
ic->sb->log2_sectors_per_block,
ic->log2_blocks_per_bitmap_bit,
mode);
@@ -1299,6 +1301,11 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
unsigned *metadata_offset, unsigned total_size, int op)
{
+#define MAY_BE_FILLER 1
+#define MAY_BE_HASH 2
+ unsigned hash_offset = 0;
+ unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+
do {
unsigned char *data, *dp;
struct dm_buffer *b;
@@ -1320,18 +1327,35 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
} else if (op == TAG_WRITE) {
memcpy(dp, tag, to_copy);
dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
- } else {
+ } else {
/* e.g.: op == TAG_CMP */
- if (unlikely(memcmp(dp, tag, to_copy))) {
- unsigned i;
- for (i = 0; i < to_copy; i++) {
- if (dp[i] != tag[i])
- break;
- total_size--;
+ if (likely(is_power_of_2(ic->tag_size))) {
+ if (unlikely(memcmp(dp, tag, to_copy)))
+ if (unlikely(!ic->discard) ||
+ unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
+ goto thorough_test;
+ }
+ } else {
+ unsigned i, ts;
+thorough_test:
+ ts = total_size;
+
+ for (i = 0; i < to_copy; i++, ts--) {
+ if (unlikely(dp[i] != tag[i]))
+ may_be &= ~MAY_BE_HASH;
+ if (likely(dp[i] != DISCARD_FILLER))
+ may_be &= ~MAY_BE_FILLER;
+ hash_offset++;
+ if (unlikely(hash_offset == ic->tag_size)) {
+ if (unlikely(!may_be)) {
+ dm_bufio_release(b);
+ return ts;
+ }
+ hash_offset = 0;
+ may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ }
}
- dm_bufio_release(b);
- return total_size;
}
}
dm_bufio_release(b);
@@ -1342,10 +1366,17 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
(*metadata_block)++;
*metadata_offset = 0;
}
+
+ if (unlikely(!is_power_of_2(ic->tag_size))) {
+ hash_offset = (hash_offset + to_copy) % ic->tag_size;
+ }
+
total_size -= to_copy;
} while (unlikely(total_size));
return 0;
+#undef MAY_BE_FILLER
+#undef MAY_BE_HASH
}
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
@@ -1428,7 +1459,7 @@ static void dec_in_flight(struct dm_integrity_io *dio)
remove_range(ic, &dio->range);
- if (unlikely(dio->write))
+ if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
schedule_autocommit(ic);
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
@@ -1519,15 +1550,20 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[HASH_MAX_DIGESTSIZE];
- unsigned sectors_to_process = dio->range.n_sectors;
- sector_t sector = dio->range.logical_sector;
+ char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ sector_t sector;
+ unsigned sectors_to_process;
+ sector_t save_metadata_block;
+ unsigned save_metadata_offset;
if (unlikely(ic->mode == 'R'))
goto skip_io;
- checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
- GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
+ if (likely(dio->op != REQ_OP_DISCARD))
+ checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
+ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
+ else
+ checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
if (!checksums) {
checksums = checksums_onstack;
if (WARN_ON(extra_space &&
@@ -1537,6 +1573,43 @@ static void integrity_metadata(struct work_struct *w)
}
}
+ if (unlikely(dio->op == REQ_OP_DISCARD)) {
+ sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
+ unsigned bi_size = dio->bio_details.bi_iter.bi_size;
+ unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
+ unsigned max_blocks = max_size / ic->tag_size;
+ memset(checksums, DISCARD_FILLER, max_size);
+
+ while (bi_size) {
+ unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+ this_step_blocks = min(this_step_blocks, max_blocks);
+ r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+ this_step_blocks * ic->tag_size, TAG_WRITE);
+ if (unlikely(r)) {
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
+ goto error;
+ }
+
+ /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
+ printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
+ printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
+ BUG();
+ }*/
+ bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+ bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
+ }
+
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
+ goto skip_io;
+ }
+
+ save_metadata_block = dio->metadata_block;
+ save_metadata_offset = dio->metadata_offset;
+ sector = dio->range.logical_sector;
+ sectors_to_process = dio->range.n_sectors;
+
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos;
char *mem, *checksums_ptr;
@@ -1555,11 +1628,12 @@ again:
kunmap_atomic(mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
- checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
+ checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- DMERR_LIMIT("Checksum failed at sector 0x%llx",
- (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
+ (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
r = -EILSEQ;
atomic64_inc(&ic->number_of_mismatches);
}
@@ -1598,7 +1672,7 @@ again:
tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
this_len = min(biv.bv_len, data_to_process);
r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
- this_len, !dio->write ? TAG_READ : TAG_WRITE);
+ this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
if (unlikely(r))
goto error;
data_to_process -= this_len;
@@ -1625,6 +1699,20 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
dio->ic = ic;
dio->bi_status = 0;
+ dio->op = bio_op(bio);
+
+ if (unlikely(dio->op == REQ_OP_DISCARD)) {
+ if (ti->max_io_len) {
+ sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
+ unsigned log2_max_io_len = __fls(ti->max_io_len);
+ sector_t start_boundary = sec >> log2_max_io_len;
+ sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
+ if (start_boundary < end_boundary) {
+ sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
+ dm_accept_partial_bio(bio, len);
+ }
+ }
+ }
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
submit_flush_bio(ic, dio);
@@ -1632,8 +1720,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
- dio->write = bio_op(bio) == REQ_OP_WRITE;
- dio->fua = dio->write && bio->bi_opf & REQ_FUA;
+ dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
if (unlikely(dio->fua)) {
/*
* Don't pass down the FUA flag because we have to flush
@@ -1643,18 +1730,18 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
- (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
- (unsigned long long)ic->provided_data_sectors);
+ dio->range.logical_sector, bio_sectors(bio),
+ ic->provided_data_sectors);
return DM_MAPIO_KILL;
}
if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
ic->sectors_per_block,
- (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
+ dio->range.logical_sector, bio_sectors(bio));
return DM_MAPIO_KILL;
}
- if (ic->sectors_per_block > 1) {
+ if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
struct bvec_iter iter;
struct bio_vec bv;
bio_for_each_segment(bv, bio, iter) {
@@ -1687,7 +1774,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
}
- if (unlikely(ic->mode == 'R') && unlikely(dio->write))
+ if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
return DM_MAPIO_KILL;
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
@@ -1717,13 +1804,13 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
retry_kmap:
mem = kmap_atomic(bv.bv_page);
- if (likely(dio->write))
+ if (likely(dio->op == REQ_OP_WRITE))
flush_dcache_page(bv.bv_page);
do {
struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
- if (unlikely(!dio->write)) {
+ if (unlikely(dio->op == REQ_OP_READ)) {
struct journal_sector *js;
char *mem_ptr;
unsigned s;
@@ -1748,12 +1835,12 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
- (unsigned long long)logical_sector);
+ logical_sector);
}
}
#endif
@@ -1770,7 +1857,7 @@ retry_kmap:
char *tag_addr;
BUG_ON(PageHighMem(biv.bv_page));
tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
- if (likely(dio->write))
+ if (likely(dio->op == REQ_OP_WRITE))
memcpy(tag_ptr, tag_addr, tag_now);
else
memcpy(tag_addr, tag_ptr, tag_now);
@@ -1778,12 +1865,12 @@ retry_kmap:
tag_ptr += tag_now;
tag_todo -= tag_now;
} while (unlikely(tag_todo)); else {
- if (likely(dio->write))
+ if (likely(dio->op == REQ_OP_WRITE))
memset(tag_ptr, 0, tag_todo);
}
}
- if (likely(dio->write)) {
+ if (likely(dio->op == REQ_OP_WRITE)) {
struct journal_sector *js;
unsigned s;
@@ -1819,12 +1906,12 @@ retry_kmap:
bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
- if (unlikely(!dio->write))
+ if (unlikely(dio->op == REQ_OP_READ))
flush_dcache_page(bv.bv_page);
kunmap_atomic(mem);
} while (n_sectors);
- if (likely(dio->write)) {
+ if (likely(dio->op == REQ_OP_WRITE)) {
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
@@ -1856,7 +1943,10 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
unsigned journal_section, journal_entry;
unsigned journal_read_pos;
struct completion read_comp;
- bool need_sync_io = ic->internal_hash && !dio->write;
+ bool discard_retried = false;
+ bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+ if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
+ need_sync_io = true;
if (need_sync_io && from_map) {
INIT_WORK(&dio->work, integrity_bio_wait);
@@ -1874,8 +1964,8 @@ retry:
}
dio->range.n_sectors = bio_sectors(bio);
journal_read_pos = NOT_FOUND;
- if (likely(ic->mode == 'J')) {
- if (dio->write) {
+ if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
+ if (dio->op == REQ_OP_WRITE) {
unsigned next_entry, i, pos;
unsigned ws, we, range_sectors;
@@ -1970,6 +2060,21 @@ offload_to_thread:
}
}
}
+ if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
+ sector_t next_sector;
+ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ if (unlikely(new_pos != NOT_FOUND) ||
+ unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
+ remove_range_unlocked(ic, &dio->range);
+ spin_unlock_irq(&ic->endio_wait.lock);
+ queue_work(ic->commit_wq, &ic->commit_work);
+ flush_workqueue(ic->commit_wq);
+ queue_work(ic->writer_wq, &ic->writer_work);
+ flush_workqueue(ic->writer_wq);
+ discard_retried = true;
+ goto lock_retry;
+ }
+ }
spin_unlock_irq(&ic->endio_wait.lock);
if (unlikely(journal_read_pos != NOT_FOUND)) {
@@ -1978,7 +2083,7 @@ offload_to_thread:
goto journal_read_write;
}
- if (ic->mode == 'B' && dio->write) {
+ if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
struct bitmap_block_status *bbs;
@@ -2007,6 +2112,18 @@ offload_to_thread:
bio->bi_end_io = integrity_end_io;
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
+ if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
+ integrity_metadata(&dio->work);
+ dm_integrity_flush_buffers(ic);
+
+ dio->in_flight = (atomic_t)ATOMIC_INIT(1);
+ dio->completion = NULL;
+
+ generic_make_request(bio);
+
+ return;
+ }
+
generic_make_request(bio);
if (need_sync_io) {
@@ -2193,6 +2310,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
sec &= ~(sector_t)(ic->sectors_per_block - 1);
}
}
+ if (unlikely(sec >= ic->provided_data_sectors))
+ continue;
get_area_and_offset(ic, sec, &area, &offset);
restore_last_bytes(ic, access_journal_data(ic, i, j), je);
for (k = j + 1; k < ic->journal_section_entries; k++) {
@@ -2202,6 +2321,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
break;
BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
sec2 = journal_entry_get_sector(je2);
+ if (unlikely(sec2 >= ic->provided_data_sectors))
+ break;
get_area_and_offset(ic, sec2, &area2, &offset2);
if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
break;
@@ -2404,7 +2525,7 @@ next_chunk:
get_area_and_offset(ic, logical_sector, &area, &offset);
}
- DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors);
+ DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
recalc_write_super(ic);
@@ -2828,9 +2949,29 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
static void dm_integrity_resume(struct dm_target *ti)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+ __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
int r;
+
DEBUG_print("resume\n");
+ if (ic->provided_data_sectors != old_provided_data_sectors) {
+ if (ic->provided_data_sectors > old_provided_data_sectors &&
+ ic->mode == 'B' &&
+ ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
+ rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
+ ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ }
+
+ ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+ }
+
if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
DEBUG_print("resume dirty_bitmap\n");
rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
@@ -2898,7 +3039,7 @@ static void dm_integrity_resume(struct dm_target *ti)
DEBUG_print("testing recalc: %x\n", ic->sb->flags);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
- DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors);
+ DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
if (recalc_pos < ic->provided_data_sectors) {
queue_work(ic->recalc_wq, &ic->recalc_work);
} else if (recalc_pos > ic->provided_data_sectors) {
@@ -2929,9 +3070,9 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_INFO:
DMEMIT("%llu %llu",
(unsigned long long)atomic64_read(&ic->number_of_mismatches),
- (unsigned long long)ic->provided_data_sectors);
+ ic->provided_data_sectors);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
- DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
+ DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
else
DMEMIT(" -");
break;
@@ -2944,6 +3085,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
+ arg_count += ic->discard;
arg_count += ic->mode == 'J';
arg_count += ic->mode == 'J';
arg_count += ic->mode == 'B';
@@ -2952,7 +3094,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
- DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
+ DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
ic->tag_size, ic->mode, arg_count);
if (ic->meta_dev)
DMEMIT(" meta_device:%s", ic->meta_dev->name);
@@ -2960,6 +3102,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
DMEMIT(" recalculate");
+ if (ic->discard)
+ DMEMIT(" allow_discards");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
@@ -2968,7 +3112,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
if (ic->mode == 'B') {
- DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
+ DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
}
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
@@ -3073,6 +3217,24 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
return 0;
}
+static void get_provided_data_sectors(struct dm_integrity_c *ic)
+{
+ if (!ic->meta_dev) {
+ int test_bit;
+ ic->provided_data_sectors = 0;
+ for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
+ __u64 prev_data_sectors = ic->provided_data_sectors;
+
+ ic->provided_data_sectors |= (sector_t)1 << test_bit;
+ if (calculate_device_limits(ic))
+ ic->provided_data_sectors = prev_data_sectors;
+ }
+ } else {
+ ic->provided_data_sectors = ic->data_device_sectors;
+ ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
+ }
+}
+
static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
{
unsigned journal_sections;
@@ -3100,20 +3262,15 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
- ic->provided_data_sectors = 0;
- for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
- __u64 prev_data_sectors = ic->provided_data_sectors;
-
- ic->provided_data_sectors |= (sector_t)1 << test_bit;
- if (calculate_device_limits(ic))
- ic->provided_data_sectors = prev_data_sectors;
- }
+ get_provided_data_sectors(ic);
if (!ic->provided_data_sectors)
return -EINVAL;
} else {
ic->sb->log2_interleave_sectors = 0;
- ic->provided_data_sectors = ic->data_device_sectors;
- ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
+
+ get_provided_data_sectors(ic);
+ if (!ic->provided_data_sectors)
+ return -EINVAL;
try_smaller_buffer:
ic->sb->journal_sections = cpu_to_le32(0);
@@ -3733,6 +3890,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
ic->recalculate_flag = true;
+ } else if (!strcmp(opt_string, "allow_discards")) {
+ ic->discard = true;
} else if (!strcmp(opt_string, "fix_padding")) {
ic->fix_padding = true;
} else {
@@ -3791,6 +3950,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ if (ic->discard && !ic->internal_hash) {
+ r = -EINVAL;
+ ti->error = "Discard can be only used with internal hash";
+ goto bad;
+ }
+
ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
ic->autocommit_msec = sync_msec;
timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
@@ -3920,16 +4085,16 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
}
- ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
- if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
- /* test for overflow */
+ if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
r = -EINVAL;
- ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
+ ti->error = "Journal mac mismatch";
goto bad;
}
- if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
+
+ get_provided_data_sectors(ic);
+ if (!ic->provided_data_sectors) {
r = -EINVAL;
- ti->error = "Journal mac mismatch";
+ ti->error = "The device is too small";
goto bad;
}
@@ -3994,10 +4159,9 @@ try_smaller_buffer:
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
- DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
- (unsigned long long)ic->provided_data_sectors);
+ DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
- DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal);
+ DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
@@ -4121,6 +4285,8 @@ try_smaller_buffer:
ti->num_flush_bios = 1;
ti->flush_supported = true;
+ if (ic->discard)
+ ti->num_discard_bios = 1;
return 0;
@@ -4202,7 +4368,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 5, 0},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 3ceeb6b404ed..49147e634046 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -551,6 +551,7 @@ void verity_fec_dtr(struct dm_verity *v)
mempool_exit(&f->rs_pool);
mempool_exit(&f->prealloc_pool);
mempool_exit(&f->extra_pool);
+ mempool_exit(&f->output_pool);
kmem_cache_destroy(f->cache);
if (f->data_bufio)
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index a09bdc000e64..114927da9cc9 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -26,6 +26,8 @@
#define AUTOCOMMIT_BLOCKS_SSD 65536
#define AUTOCOMMIT_BLOCKS_PMEM 64
#define AUTOCOMMIT_MSEC 1000
+#define MAX_AGE_DIV 16
+#define MAX_AGE_UNSPECIFIED -1UL
#define BITMAP_GRANULARITY 65536
#if BITMAP_GRANULARITY < PAGE_SIZE
@@ -88,6 +90,7 @@ struct wc_entry {
:47
#endif
;
+ unsigned long age;
#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
uint64_t original_sector;
uint64_t seq_count;
@@ -119,6 +122,7 @@ struct dm_writecache {
size_t writeback_size;
size_t freelist_high_watermark;
size_t freelist_low_watermark;
+ unsigned long max_age;
unsigned uncommitted_blocks;
unsigned autocommit_blocks;
@@ -130,6 +134,8 @@ struct dm_writecache {
struct timer_list autocommit_timer;
struct wait_queue_head freelist_wait;
+ struct timer_list max_age_timer;
+
atomic_t bio_in_progress[2];
struct wait_queue_head bio_in_progress_wait[2];
@@ -160,6 +166,7 @@ struct dm_writecache {
bool autocommit_time_set:1;
bool writeback_fua_set:1;
bool flush_on_suspend:1;
+ bool cleaner:1;
unsigned writeback_all;
struct workqueue_struct *writeback_wq;
@@ -502,6 +509,34 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
}
+static void ssd_commit_superblock(struct dm_writecache *wc)
+{
+ int r;
+ struct dm_io_region region;
+ struct dm_io_request req;
+
+ region.bdev = wc->ssd_dev->bdev;
+ region.sector = 0;
+ region.count = PAGE_SIZE;
+
+ if (unlikely(region.sector + region.count > wc->metadata_sectors))
+ region.count = wc->metadata_sectors - region.sector;
+
+ region.sector += wc->start_sector;
+
+ req.bi_op = REQ_OP_WRITE;
+ req.bi_op_flags = REQ_SYNC | REQ_FUA;
+ req.mem.type = DM_IO_VMA;
+ req.mem.ptr.vma = (char *)wc->memory_map;
+ req.client = wc->dm_io;
+ req.notify.fn = NULL;
+ req.notify.context = NULL;
+
+ r = dm_io(&req, 1, &region, NULL);
+ if (unlikely(r))
+ writecache_error(wc, r, "error writing superblock");
+}
+
static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
{
if (WC_MODE_PMEM(wc))
@@ -596,6 +631,7 @@ static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *i
rb_link_node(&ins->rb_node, parent, node);
rb_insert_color(&ins->rb_node, &wc->tree);
list_add(&ins->lru, &wc->lru);
+ ins->age = jiffies;
}
static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
@@ -631,6 +667,16 @@ static inline void writecache_verify_watermark(struct dm_writecache *wc)
queue_work(wc->writeback_wq, &wc->writeback_work);
}
+static void writecache_max_age_timer(struct timer_list *t)
+{
+ struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
+
+ if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
+ queue_work(wc->writeback_wq, &wc->writeback_work);
+ mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
+ }
+}
+
static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
{
struct wc_entry *e;
@@ -741,8 +787,10 @@ static void writecache_flush(struct dm_writecache *wc)
wc->seq_count++;
pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
- writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
- writecache_commit_flushed(wc, false);
+ if (WC_MODE_PMEM(wc))
+ writecache_commit_flushed(wc, false);
+ else
+ ssd_commit_superblock(wc);
wc->overwrote_committed = false;
@@ -837,6 +885,7 @@ static void writecache_suspend(struct dm_target *ti)
bool flush_on_suspend;
del_timer_sync(&wc->autocommit_timer);
+ del_timer_sync(&wc->max_age_timer);
wc_lock(wc);
writecache_flush(wc);
@@ -876,6 +925,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
struct wc_entry *e = &wc->entries[b];
e->index = b;
e->write_in_progress = false;
+ cond_resched();
}
return 0;
@@ -930,6 +980,7 @@ static void writecache_resume(struct dm_target *ti)
e->original_sector = le64_to_cpu(wme.original_sector);
e->seq_count = le64_to_cpu(wme.seq_count);
}
+ cond_resched();
}
#endif
for (b = 0; b < wc->n_blocks; b++) {
@@ -973,6 +1024,9 @@ erase_this:
writecache_verify_watermark(wc);
+ if (wc->max_age != MAX_AGE_UNSPECIFIED)
+ mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
+
wc_unlock(wc);
}
@@ -1021,6 +1075,28 @@ static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_w
return 0;
}
+static void activate_cleaner(struct dm_writecache *wc)
+{
+ wc->flush_on_suspend = true;
+ wc->cleaner = true;
+ wc->freelist_high_watermark = wc->n_blocks;
+ wc->freelist_low_watermark = wc->n_blocks;
+}
+
+static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+{
+ if (argc != 1)
+ return -EINVAL;
+
+ wc_lock(wc);
+ activate_cleaner(wc);
+ if (!dm_suspended(wc->ti))
+ writecache_verify_watermark(wc);
+ wc_unlock(wc);
+
+ return 0;
+}
+
static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
char *result, unsigned maxlen)
{
@@ -1031,6 +1107,8 @@ static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
r = process_flush_mesg(argc, argv, wc);
else if (!strcasecmp(argv[0], "flush_on_suspend"))
r = process_flush_on_suspend_mesg(argc, argv, wc);
+ else if (!strcasecmp(argv[0], "cleaner"))
+ r = process_cleaner_mesg(argc, argv, wc);
else
DMERR("unrecognised message received: %s", argv[0]);
@@ -1194,6 +1272,7 @@ read_next_block:
}
} else {
do {
+ bool found_entry = false;
if (writecache_has_error(wc))
goto unlock_error;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
@@ -1204,9 +1283,25 @@ read_next_block:
wc->overwrote_committed = true;
goto bio_copy;
}
+ found_entry = true;
+ } else {
+ if (unlikely(wc->cleaner))
+ goto direct_write;
}
e = writecache_pop_from_freelist(wc, (sector_t)-1);
if (unlikely(!e)) {
+ if (!found_entry) {
+direct_write:
+ e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
+ if (e) {
+ sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
+ BUG_ON(!next_boundary);
+ if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
+ dm_accept_partial_bio(bio, next_boundary);
+ }
+ }
+ goto unlock_remap_origin;
+ }
writecache_wait_on_freelist(wc);
continue;
}
@@ -1619,7 +1714,9 @@ restart:
wbl.size = 0;
while (!list_empty(&wc->lru) &&
(wc->writeback_all ||
- wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) {
+ wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
+ (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
+ wc->max_age - wc->max_age / MAX_AGE_DIV))) {
n_walked++;
if (unlikely(n_walked > WRITEBACK_LATENCY) &&
@@ -1791,8 +1888,10 @@ static int init_memory(struct dm_writecache *wc)
pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
- for (b = 0; b < wc->n_blocks; b++)
+ for (b = 0; b < wc->n_blocks; b++) {
write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
+ cond_resched();
+ }
writecache_flush_all_metadata(wc);
writecache_commit_flushed(wc, false);
@@ -1882,9 +1981,11 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
wc->ti = ti;
mutex_init(&wc->lock);
+ wc->max_age = MAX_AGE_UNSPECIFIED;
writecache_poison_lists(wc);
init_waitqueue_head(&wc->freelist_wait);
timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
+ timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
for (i = 0; i < 2; i++) {
atomic_set(&wc->bio_in_progress[i], 0);
@@ -2058,6 +2159,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto invalid_optional;
wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
wc->autocommit_time_set = true;
+ } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
+ unsigned max_age_msecs;
+ string = dm_shift_arg(&as), opt_params--;
+ if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
+ goto invalid_optional;
+ if (max_age_msecs > 86400000)
+ goto invalid_optional;
+ wc->max_age = msecs_to_jiffies(max_age_msecs);
+ } else if (!strcasecmp(string, "cleaner")) {
+ wc->cleaner = true;
} else if (!strcasecmp(string, "fua")) {
if (WC_MODE_PMEM(wc)) {
wc->writeback_fua = true;
@@ -2235,6 +2346,9 @@ overflow:
do_div(x, 100);
wc->freelist_low_watermark = x;
+ if (wc->cleaner)
+ activate_cleaner(wc);
+
r = writecache_alloc_entries(wc);
if (r) {
ti->error = "Cannot allocate memory";
@@ -2278,9 +2392,9 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
extra_args = 0;
if (wc->start_sector)
extra_args += 2;
- if (wc->high_wm_percent_set)
+ if (wc->high_wm_percent_set && !wc->cleaner)
extra_args += 2;
- if (wc->low_wm_percent_set)
+ if (wc->low_wm_percent_set && !wc->cleaner)
extra_args += 2;
if (wc->max_writeback_jobs_set)
extra_args += 2;
@@ -2288,19 +2402,21 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
extra_args += 2;
if (wc->autocommit_time_set)
extra_args += 2;
+ if (wc->cleaner)
+ extra_args++;
if (wc->writeback_fua_set)
extra_args++;
DMEMIT("%u", extra_args);
if (wc->start_sector)
DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
- if (wc->high_wm_percent_set) {
+ if (wc->high_wm_percent_set && !wc->cleaner) {
x = (uint64_t)wc->freelist_high_watermark * 100;
x += wc->n_blocks / 2;
do_div(x, (size_t)wc->n_blocks);
DMEMIT(" high_watermark %u", 100 - (unsigned)x);
}
- if (wc->low_wm_percent_set) {
+ if (wc->low_wm_percent_set && !wc->cleaner) {
x = (uint64_t)wc->freelist_low_watermark * 100;
x += wc->n_blocks / 2;
do_div(x, (size_t)wc->n_blocks);
@@ -2312,6 +2428,10 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
if (wc->autocommit_time_set)
DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
+ if (wc->max_age != MAX_AGE_UNSPECIFIED)
+ DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
+ if (wc->cleaner)
+ DMEMIT(" cleaner");
if (wc->writeback_fua_set)
DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
break;
@@ -2320,7 +2440,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
static struct target_type writecache_target = {
.name = "writecache",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = writecache_ctr,
.dtr = writecache_dtr,
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 516c7b671d25..369de15c4e80 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1109,7 +1109,6 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
switch (blkz->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
- zmd->nr_rnd_zones++;
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 753302e83910..21c0207e3207 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1740,8 +1740,9 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
* won't be imposed.
*/
if (current->bio_list) {
- blk_queue_split(md->queue, &bio);
- if (!is_abnormal_io(bio))
+ if (is_abnormal_io(bio))
+ blk_queue_split(md->queue, &bio);
+ else
dm_queue_split(md, ti, &bio);
}
diff --git a/drivers/memory/.gitignore b/drivers/memory/.gitignore
index cbca8b028437..caedc4c7d2db 100644
--- a/drivers/memory/.gitignore
+++ b/drivers/memory/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
ti-emif-asm-offsets.h
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 21f05240682b..33b8216bac30 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1158,6 +1158,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 8ae474d9bfb9..b16715e9515d 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -628,6 +628,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index e3efd9529506..b42bdb667e85 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1256,6 +1256,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc->debugfs.max_rate = emc->timings[i].rate;
}
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
emc->debugfs.max_rate);
if (err < 0) {
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index 8a24494f8c4d..a1ec7e84d6fe 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -64,6 +64,7 @@
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -111,13 +112,13 @@ MODULE_DESCRIPTION(LANAME);
#ifdef MPT_LAN_IO_DEBUG
#define dioprintk(x) printk x
#else
-#define dioprintk(x)
+#define dioprintk(x) no_printk x
#endif
#ifdef MPT_LAN_DEBUG
#define dlprintk(x) printk x
#else
-#define dlprintk(x)
+#define dlprintk(x) no_printk x
#endif
#define NETDEV_TO_LANPRIV_PTR(d) ((struct mpt_lan_priv *)netdev_priv(d))
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index c396483d3624..e35b13891fe4 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -110,7 +110,7 @@ struct fw_event_work {
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
- char event_data[0] __aligned(4);
+ char event_data[] __aligned(4);
};
struct mptsas_discovery_event {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 7f0d48f406e3..99e151475d8f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -480,4 +480,5 @@ source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
+source "drivers/misc/uacce/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c1860d35dc7e..9abf2923d831 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -56,4 +56,5 @@ obj-$(CONFIG_OCXL) += ocxl/
obj-y += cardreader/
obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
+obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 423fecc19fc4..3a9467aaa435 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -394,6 +394,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
void rts522a_init_params(struct rtsx_pcr *pcr)
{
rts5227_init_params(pcr);
+ pcr->ops = &rts522a_pcr_ops;
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 282c9ef68ed2..9ff18d4961ce 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -227,6 +227,7 @@ MODULE_DEVICE_TABLE(of, at24_of_match);
static const struct acpi_device_id at24_acpi_ids[] = {
{ "INT3499", (kernel_ulong_t)&at24_data_INT3499 },
+ { "TPF0001", (kernel_ulong_t)&at24_data_24c1024 },
{ /* END OF LIST */ }
};
MODULE_DEVICE_TABLE(acpi, at24_acpi_ids);
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 0bf08678431b..409276b6374d 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -129,6 +129,8 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
spin_unlock(&job->user_cb->lock);
hl_cb_put(job->user_cb);
job->user_cb = NULL;
+ } else if (!rc) {
+ job->job_cb_size = job->user_cb_size;
}
return rc;
@@ -507,7 +509,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cb *cb;
bool int_queues_only = true;
u32 size_to_copy;
- int rc, i, parse_cnt;
+ int rc, i;
*cs_seq = ULLONG_MAX;
@@ -547,7 +549,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
hl_debugfs_add_cs(cs);
/* Validate ALL the CS chunks before submitting the CS */
- for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
+ for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
enum hl_queue_type queue_type;
bool is_kernel_allocated_cb;
@@ -585,10 +587,6 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
job->cs = cs;
job->user_cb = cb;
job->user_cb_size = chunk->cb_size;
- if (is_kernel_allocated_cb)
- job->job_cb_size = cb->size;
- else
- job->job_cb_size = chunk->cb_size;
job->hw_queue_id = chunk->queue_index;
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
@@ -659,8 +657,8 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
struct hl_device *hdev = hpriv->hdev;
union hl_cs_args *args = data;
struct hl_ctx *ctx = hpriv->ctx;
- void __user *chunks;
- u32 num_chunks;
+ void __user *chunks_execute, *chunks_restore;
+ u32 num_chunks_execute, num_chunks_restore;
u64 cs_seq = ULONG_MAX;
int rc, do_ctx_switch;
bool need_soft_reset = false;
@@ -673,13 +671,25 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
goto out;
}
+ chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute;
+ num_chunks_execute = args->in.num_chunks_execute;
+
+ if (!num_chunks_execute) {
+ dev_err(hdev->dev,
+ "Got execute CS with 0 chunks, context %d\n",
+ ctx->asid);
+ rc = -EINVAL;
+ goto out;
+ }
+
do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
long ret;
- chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
- num_chunks = args->in.num_chunks_restore;
+ chunks_restore =
+ (void __user *) (uintptr_t) args->in.chunks_restore;
+ num_chunks_restore = args->in.num_chunks_restore;
mutex_lock(&hpriv->restore_phase_mutex);
@@ -707,13 +717,13 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
hdev->asic_funcs->restore_phase_topology(hdev);
- if (num_chunks == 0) {
+ if (!num_chunks_restore) {
dev_dbg(hdev->dev,
"Need to run restore phase but restore CS is empty\n");
rc = 0;
} else {
- rc = _hl_cs_ioctl(hpriv, chunks, num_chunks,
- &cs_seq);
+ rc = _hl_cs_ioctl(hpriv, chunks_restore,
+ num_chunks_restore, &cs_seq);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -726,7 +736,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
}
/* Need to wait for restore completion before execution phase */
- if (num_chunks > 0) {
+ if (num_chunks_restore) {
ret = _hl_cs_wait_ioctl(hdev, ctx,
jiffies_to_usecs(hdev->timeout_jiffies),
cs_seq);
@@ -754,18 +764,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
}
}
- chunks = (void __user *)(uintptr_t)args->in.chunks_execute;
- num_chunks = args->in.num_chunks_execute;
-
- if (num_chunks == 0) {
- dev_err(hdev->dev,
- "Got execute CS with 0 chunks, context %d\n",
- ctx->asid);
- rc = -EINVAL;
- goto out;
- }
-
- rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq);
+ rc = _hl_cs_ioctl(hpriv, chunks_execute, num_chunks_execute, &cs_seq);
out:
if (rc != -EAGAIN) {
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 20413e350343..756d36ed5d95 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -393,9 +393,10 @@ static int mmu_show(struct seq_file *s, void *data)
}
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
mutex_lock(&ctx->mmu_lock);
@@ -547,12 +548,15 @@ static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
goto out;
if (hdev->dram_supports_virtual_memory &&
- addr >= prop->va_space_dram_start_address &&
- addr < prop->va_space_dram_end_address)
+ (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
return true;
- if (addr >= prop->va_space_host_start_address &&
- addr < prop->va_space_host_end_address)
+ if (addr >= prop->pmmu.start_addr &&
+ addr < prop->pmmu.end_addr)
+ return true;
+
+ if (addr >= prop->pmmu_huge.start_addr &&
+ addr < prop->pmmu_huge.end_addr)
return true;
out:
return false;
@@ -575,9 +579,10 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
}
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
mutex_lock(&ctx->mmu_lock);
@@ -705,6 +710,65 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_data_read64(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[32];
+ u64 addr = entry->addr;
+ u64 val;
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
+ return rc;
+ }
+
+ sprintf(tmp_buf, "0x%016llx\n", val);
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
+}
+
+static ssize_t hl_data_write64(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u64 addr = entry->addr;
+ u64 value;
+ ssize_t rc;
+
+ rc = kstrtoull_from_user(buf, count, 16, &value);
+ if (rc)
+ return rc;
+
+ if (hl_is_device_va(hdev, addr)) {
+ rc = device_va_to_pa(hdev, addr, &addr);
+ if (rc)
+ return rc;
+ }
+
+ rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
+ value, addr);
+ return rc;
+ }
+
+ return count;
+}
+
static ssize_t hl_get_power_state(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -912,6 +976,12 @@ static const struct file_operations hl_data32b_fops = {
.write = hl_data_write32
};
+static const struct file_operations hl_data64b_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_data_read64,
+ .write = hl_data_write64
+};
+
static const struct file_operations hl_i2c_data_fops = {
.owner = THIS_MODULE,
.read = hl_i2c_data_read,
@@ -1025,6 +1095,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_data32b_fops);
+ debugfs_create_file("data64",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_data64b_fops);
+
debugfs_create_file("set_power_state",
0200,
dev_entry->root,
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index b680b0caa69b..aef4de36b7aa 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -36,7 +36,7 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
status = HL_DEVICE_STATUS_OPERATIONAL;
return status;
-};
+}
static void hpriv_release(struct kref *ref)
{
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index b8a8de24aaf7..68f065607544 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -324,7 +324,11 @@ static u32 goya_all_events[] = {
GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
- GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
+ GOYA_ASYNC_EVENT_ID_DMA_BM_CH4,
+ GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S,
+ GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E,
+ GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S,
+ GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
};
static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
@@ -393,19 +397,21 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->dmmu.hop2_mask = HOP2_MASK;
prop->dmmu.hop3_mask = HOP3_MASK;
prop->dmmu.hop4_mask = HOP4_MASK;
- prop->dmmu.huge_page_size = PAGE_SIZE_2MB;
+ prop->dmmu.start_addr = VA_DDR_SPACE_START;
+ prop->dmmu.end_addr = VA_DDR_SPACE_END;
+ prop->dmmu.page_size = PAGE_SIZE_2MB;
- /* No difference between PMMU and DMMU except of page size */
+ /* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
- prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->pmmu.start_addr = VA_HOST_SPACE_START;
+ prop->pmmu.end_addr = VA_HOST_SPACE_END;
prop->pmmu.page_size = PAGE_SIZE_4KB;
- prop->va_space_host_start_address = VA_HOST_SPACE_START;
- prop->va_space_host_end_address = VA_HOST_SPACE_END;
- prop->va_space_dram_start_address = VA_DDR_SPACE_START;
- prop->va_space_dram_end_address = VA_DDR_SPACE_END;
- prop->dram_size_for_default_page_mapping =
- prop->va_space_dram_end_address;
+ /* PMMU and HPMMU are the same except of page size */
+ memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
+ prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
+
+ prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
@@ -2573,8 +2579,7 @@ static int goya_hw_init(struct hl_device *hdev)
* After CPU initialization is finished, change DDR bar mapping inside
* iATU to point to the start address of the MMU page tables
*/
- if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
- (MMU_PAGE_TABLES_ADDR &
+ if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR &
~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
dev_err(hdev->dev,
"failed to map DDR bar to MMU page tables\n");
@@ -3443,12 +3448,13 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
/*
* WA for HW-23.
* We can't allow user to read from Host using QMANs other than 1.
+ * PMMU and HPMMU addresses are equal, check only one of them.
*/
if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
le32_to_cpu(user_dma_pkt->tsize),
- hdev->asic_prop.va_space_host_start_address,
- hdev->asic_prop.va_space_host_end_address)) {
+ hdev->asic_prop.pmmu.start_addr,
+ hdev->asic_prop.pmmu.end_addr)) {
dev_err(hdev->dev,
"Can't DMA from host on queue other then 1\n");
return -EFAULT;
@@ -4178,6 +4184,96 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
return rc;
}
+static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 ddr_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
+ u32 val_l = RREG32(addr - CFG_BASE);
+ u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
+
+ *val = (((u64) val_h) << 32) | val_l;
+
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
+
+ *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+
+ } else if ((addr >= DRAM_PHYS_BASE) &&
+ (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (ddr_bar_addr != U64_MAX) {
+ *val = readq(hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - bar_base_addr));
+
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev,
+ ddr_bar_addr);
+ }
+ if (ddr_bar_addr == U64_MAX)
+ rc = -EIO;
+
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
+
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 ddr_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
+ WREG32(addr - CFG_BASE, lower_32_bits(val));
+ WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
+
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
+
+ writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+
+ } else if ((addr >= DRAM_PHYS_BASE) &&
+ (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
+ if (ddr_bar_addr != U64_MAX) {
+ writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
+ (addr - bar_base_addr));
+
+ ddr_bar_addr = goya_set_ddr_bar_base(hdev,
+ ddr_bar_addr);
+ }
+ if (ddr_bar_addr == U64_MAX)
+ rc = -EIO;
+
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
@@ -4297,6 +4393,14 @@ static const char *_goya_get_event_desc(u16 event_type)
return "TPC%d_bmon_spmu";
case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
return "DMA_bm_ch%d";
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
+ return "POWER_ENV_S";
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
+ return "POWER_ENV_E";
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
+ return "THERMAL_ENV_S";
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
+ return "THERMAL_ENV_E";
default:
return "N/A";
}
@@ -4388,22 +4492,22 @@ static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
static void goya_print_razwi_info(struct hl_device *hdev)
{
if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
- dev_err(hdev->dev, "Illegal write to LBW\n");
+ dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n");
WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
- dev_err(hdev->dev, "Illegal read from LBW\n");
+ dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n");
WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
- dev_err(hdev->dev, "Illegal write to HBW\n");
+ dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n");
WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
}
if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
- dev_err(hdev->dev, "Illegal read from HBW\n");
+ dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n");
WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
}
}
@@ -4423,7 +4527,8 @@ static void goya_print_mmu_error_info(struct hl_device *hdev)
addr <<= 32;
addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
- dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
+ dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
+ addr);
WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
}
@@ -4435,7 +4540,7 @@ static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
char desc[20] = "";
goya_get_event_desc(event_type, desc, sizeof(desc));
- dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
+ dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
if (razwi) {
@@ -4526,6 +4631,33 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
return rc;
}
+static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
+{
+ switch (event_type) {
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
+ dev_info_ratelimited(hdev->dev,
+ "Clock throttling due to power consumption\n");
+ break;
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
+ dev_info_ratelimited(hdev->dev,
+ "Power envelop is safe, back to optimal clock\n");
+ break;
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
+ dev_info_ratelimited(hdev->dev,
+ "Clock throttling due to overheating\n");
+ break;
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
+ dev_info_ratelimited(hdev->dev,
+ "Thermal envelop is safe, back to optimal clock\n");
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid clock change event %d\n",
+ event_type);
+ break;
+ }
+}
+
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
@@ -4609,6 +4741,14 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
goya_unmask_irq(hdev, event_type);
break;
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
+ case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
+ case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
+ goya_print_clk_change_info(hdev, event_type);
+ goya_unmask_irq(hdev, event_type);
+ break;
+
default:
dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
event_type);
@@ -4776,7 +4916,8 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off,
- prop->dram_base_address + off, PAGE_SIZE_2MB);
+ prop->dram_base_address + off, PAGE_SIZE_2MB,
+ (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
if (rc) {
dev_err(hdev->dev, "Map failed for address 0x%llx\n",
prop->dram_base_address + off);
@@ -4786,7 +4927,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
- hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB);
+ hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB, true);
if (rc) {
dev_err(hdev->dev,
@@ -4799,7 +4940,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
rc = hl_mmu_map(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
hdev->cpu_accessible_dma_address + cpu_off,
- PAGE_SIZE_4KB);
+ PAGE_SIZE_4KB, true);
if (rc) {
dev_err(hdev->dev,
"Map failed for CPU accessible memory\n");
@@ -4825,14 +4966,15 @@ unmap_cpu:
for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
if (hl_mmu_unmap(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
- PAGE_SIZE_4KB))
+ PAGE_SIZE_4KB, true))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
unmap:
for (; off >= 0 ; off -= PAGE_SIZE_2MB)
if (hl_mmu_unmap(hdev->kernel_ctx,
- prop->dram_base_address + off, PAGE_SIZE_2MB))
+ prop->dram_base_address + off, PAGE_SIZE_2MB,
+ true))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
prop->dram_base_address + off);
@@ -4857,14 +4999,15 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
- PAGE_SIZE_2MB))
+ PAGE_SIZE_2MB, true))
dev_warn(hdev->dev,
"Failed to unmap CPU accessible memory\n");
} else {
for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
if (hl_mmu_unmap(hdev->kernel_ctx,
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
- PAGE_SIZE_4KB))
+ PAGE_SIZE_4KB,
+ (cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
dev_warn_ratelimited(hdev->dev,
"failed to unmap address 0x%llx\n",
VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
@@ -4872,7 +5015,8 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
if (hl_mmu_unmap(hdev->kernel_ctx,
- prop->dram_base_address + off, PAGE_SIZE_2MB))
+ prop->dram_base_address + off, PAGE_SIZE_2MB,
+ (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
dev_warn_ratelimited(hdev->dev,
"Failed to unmap address 0x%llx\n",
prop->dram_base_address + off);
@@ -5113,6 +5257,7 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
}
static void goya_hw_queues_lock(struct hl_device *hdev)
+ __acquires(&goya->hw_queues_lock)
{
struct goya_device *goya = hdev->asic_specific;
@@ -5120,6 +5265,7 @@ static void goya_hw_queues_lock(struct hl_device *hdev)
}
static void goya_hw_queues_unlock(struct hl_device *hdev)
+ __releases(&goya->hw_queues_lock)
{
struct goya_device *goya = hdev->asic_specific;
@@ -5180,6 +5326,8 @@ static const struct hl_asic_funcs goya_funcs = {
.restore_phase_topology = goya_restore_phase_topology,
.debugfs_read32 = goya_debugfs_read32,
.debugfs_write32 = goya_debugfs_write32,
+ .debugfs_read64 = goya_debugfs_read64,
+ .debugfs_write64 = goya_debugfs_write64,
.add_device_attr = goya_add_device_attr,
.handle_eqe = goya_handle_eqe,
.set_pll_profile = goya_set_pll_profile,
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index c1ee6e2b5dff..a1bc930d904f 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -364,8 +364,8 @@ static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
u64 range_start, range_end;
if (hdev->mmu_enable) {
- range_start = prop->va_space_dram_start_address;
- range_end = prop->va_space_dram_end_address;
+ range_start = prop->dmmu.start_addr;
+ range_end = prop->dmmu.end_addr;
} else {
range_start = prop->dram_user_base_address;
range_end = prop->dram_end_address;
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index b2ebc01e27f4..cdd4903e48fa 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -298,8 +298,8 @@ static ssize_t pm_mng_profile_store(struct device *dev,
/* Make sure we are in LOW PLL when changing modes */
if (hdev->pm_mng_profile == PM_MANUAL) {
hdev->curr_pll_profile = PLL_HIGH;
- hl_device_set_frequency(hdev, PLL_LOW);
hdev->pm_mng_profile = PM_AUTO;
+ hl_device_set_frequency(hdev, PLL_LOW);
}
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
if (hdev->pm_mng_profile == PM_AUTO) {
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 00c949f4ccd1..31ebcf9458fe 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -132,6 +132,8 @@ enum hl_device_hw_state {
/**
* struct hl_mmu_properties - ASIC specific MMU address translation properties.
+ * @start_addr: virtual start address of the memory region.
+ * @end_addr: virtual end address of the memory region.
* @hop0_shift: shift of hop 0 mask.
* @hop1_shift: shift of hop 1 mask.
* @hop2_shift: shift of hop 2 mask.
@@ -143,9 +145,10 @@ enum hl_device_hw_state {
* @hop3_mask: mask to get the PTE address in hop 3.
* @hop4_mask: mask to get the PTE address in hop 4.
* @page_size: default page size used to allocate memory.
- * @huge_page_size: page size used to allocate memory with huge pages.
*/
struct hl_mmu_properties {
+ u64 start_addr;
+ u64 end_addr;
u64 hop0_shift;
u64 hop1_shift;
u64 hop2_shift;
@@ -157,7 +160,6 @@ struct hl_mmu_properties {
u64 hop3_mask;
u64 hop4_mask;
u32 page_size;
- u32 huge_page_size;
};
/**
@@ -169,6 +171,8 @@ struct hl_mmu_properties {
* @preboot_ver: F/W Preboot version.
* @dmmu: DRAM MMU address translation properties.
* @pmmu: PCI (host) MMU address translation properties.
+ * @pmmu_huge: PCI (host) MMU address translation properties for memory
+ * allocated with huge pages.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -178,14 +182,6 @@ struct hl_mmu_properties {
* @dram_size: DRAM total size.
* @dram_pci_bar_size: size of PCI bar towards DRAM.
* @max_power_default: max power of the device after reset
- * @va_space_host_start_address: base address of virtual memory range for
- * mapping host memory.
- * @va_space_host_end_address: end address of virtual memory range for
- * mapping host memory.
- * @va_space_dram_start_address: base address of virtual memory range for
- * mapping DRAM memory.
- * @va_space_dram_end_address: end address of virtual memory range for
- * mapping DRAM memory.
* @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
* fault.
* @pcie_dbi_base_address: Base address of the PCIE_DBI block.
@@ -218,6 +214,7 @@ struct asic_fixed_properties {
char preboot_ver[VERSION_MAX_LEN];
struct hl_mmu_properties dmmu;
struct hl_mmu_properties pmmu;
+ struct hl_mmu_properties pmmu_huge;
u64 sram_base_address;
u64 sram_end_address;
u64 sram_user_base_address;
@@ -227,10 +224,6 @@ struct asic_fixed_properties {
u64 dram_size;
u64 dram_pci_bar_size;
u64 max_power_default;
- u64 va_space_host_start_address;
- u64 va_space_host_end_address;
- u64 va_space_dram_start_address;
- u64 va_space_dram_end_address;
u64 dram_size_for_default_page_mapping;
u64 pcie_dbi_base_address;
u64 pcie_aux_dbi_reg_addr;
@@ -431,10 +424,12 @@ struct hl_eq {
* enum hl_asic_type - supported ASIC types.
* @ASIC_INVALID: Invalid ASIC type.
* @ASIC_GOYA: Goya device.
+ * @ASIC_GAUDI: Gaudi device.
*/
enum hl_asic_type {
ASIC_INVALID,
- ASIC_GOYA
+ ASIC_GOYA,
+ ASIC_GAUDI
};
struct hl_cs_parser;
@@ -589,6 +584,8 @@ struct hl_asic_funcs {
void (*restore_phase_topology)(struct hl_device *hdev);
int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
+ int (*debugfs_read64)(struct hl_device *hdev, u64 addr, u64 *val);
+ int (*debugfs_write64)(struct hl_device *hdev, u64 addr, u64 val);
void (*add_device_attr)(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
void (*handle_eqe)(struct hl_device *hdev,
@@ -658,6 +655,8 @@ struct hl_va_range {
* this hits 0l. It is incremented on CS and CS_WAIT.
* @cs_pending: array of DMA fence objects representing pending CS.
* @host_va_range: holds available virtual addresses for host mappings.
+ * @host_huge_va_range: holds available virtual addresses for host mappings
+ * with huge pages.
* @dram_va_range: holds available virtual addresses for DRAM mappings.
* @mem_hash_lock: protects the mem_hash.
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
@@ -688,8 +687,9 @@ struct hl_ctx {
struct hl_device *hdev;
struct kref refcount;
struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
- struct hl_va_range host_va_range;
- struct hl_va_range dram_va_range;
+ struct hl_va_range *host_va_range;
+ struct hl_va_range *host_huge_va_range;
+ struct hl_va_range *dram_va_range;
struct mutex mem_hash_lock;
struct mutex mmu_lock;
struct list_head debugfs_list;
@@ -763,7 +763,7 @@ struct hl_userptr {
* @aborted: true if CS was aborted due to some device error.
*/
struct hl_cs {
- u8 jobs_in_queue_cnt[HL_MAX_QUEUES];
+ u16 jobs_in_queue_cnt[HL_MAX_QUEUES];
struct hl_ctx *ctx;
struct list_head job_list;
spinlock_t job_lock;
@@ -1291,6 +1291,8 @@ struct hl_device_idle_busy_ts {
* otherwise.
* @dram_supports_virtual_memory: is MMU enabled towards DRAM.
* @dram_default_page_mapping: is DRAM default page mapping enabled.
+ * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
+ * huge pages.
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
@@ -1372,6 +1374,7 @@ struct hl_device {
u8 reset_on_lockup;
u8 dram_supports_virtual_memory;
u8 dram_default_page_mapping;
+ u8 pmmu_huge_range;
u8 init_done;
u8 device_cpu_disabled;
u8 dma_mask;
@@ -1573,8 +1576,10 @@ int hl_mmu_init(struct hl_device *hdev);
void hl_mmu_fini(struct hl_device *hdev);
int hl_mmu_ctx_init(struct hl_ctx *ctx);
void hl_mmu_ctx_fini(struct hl_ctx *ctx);
-int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size);
-int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
+int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+ u32 page_size, bool flush_pte);
+int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
+ bool flush_pte);
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
@@ -1606,11 +1611,18 @@ int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
-long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
-long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
-long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
-long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
-long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
+int hl_get_temperature(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_set_temperature(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value);
+int hl_get_voltage(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_get_current(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_get_fan_speed(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_get_pwm_info(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value);
u64 hl_get_max_power(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 8c342fb499ca..b670859c677a 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -40,12 +40,13 @@ MODULE_PARM_DESC(reset_on_lockup,
#define PCI_VENDOR_ID_HABANALABS 0x1da3
#define PCI_IDS_GOYA 0x0001
+#define PCI_IDS_GAUDI 0x1000
static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, ids);
/*
* get_asic_type - translate device id to asic type
@@ -63,6 +64,9 @@ static enum hl_asic_type get_asic_type(u16 device)
case PCI_IDS_GOYA:
asic_type = ASIC_GOYA;
break;
+ case PCI_IDS_GAUDI:
+ asic_type = ASIC_GAUDI;
+ break;
default:
asic_type = ASIC_INVALID;
break;
@@ -263,6 +267,11 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
dev_err(&pdev->dev, "Unsupported ASIC\n");
rc = -ENODEV;
goto free_hdev;
+ } else if (hdev->asic_type == ASIC_GAUDI) {
+ dev_err(&pdev->dev,
+ "GAUDI is not supported by the current kernel\n");
+ rc = -ENODEV;
+ goto free_hdev;
}
} else {
hdev->asic_type = asic_type;
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
index 7be4bace9b4f..a21a26e07c3b 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -113,6 +113,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct hl_device *hdev = dev_get_drvdata(dev);
+ int rc;
if (hl_device_disabled_or_in_reset(hdev))
return -ENODEV;
@@ -125,36 +126,40 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp_crit:
case hwmon_temp_max_hyst:
case hwmon_temp_crit_hyst:
+ case hwmon_temp_offset:
+ case hwmon_temp_highest:
break;
default:
return -EINVAL;
}
- *val = hl_get_temperature(hdev, channel, attr);
+ rc = hl_get_temperature(hdev, channel, attr, val);
break;
case hwmon_in:
switch (attr) {
case hwmon_in_input:
case hwmon_in_min:
case hwmon_in_max:
+ case hwmon_in_highest:
break;
default:
return -EINVAL;
}
- *val = hl_get_voltage(hdev, channel, attr);
+ rc = hl_get_voltage(hdev, channel, attr, val);
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
case hwmon_curr_min:
case hwmon_curr_max:
+ case hwmon_curr_highest:
break;
default:
return -EINVAL;
}
- *val = hl_get_current(hdev, channel, attr);
+ rc = hl_get_current(hdev, channel, attr, val);
break;
case hwmon_fan:
switch (attr) {
@@ -165,7 +170,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
default:
return -EINVAL;
}
- *val = hl_get_fan_speed(hdev, channel, attr);
+ rc = hl_get_fan_speed(hdev, channel, attr, val);
break;
case hwmon_pwm:
switch (attr) {
@@ -175,12 +180,12 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
default:
return -EINVAL;
}
- *val = hl_get_pwm_info(hdev, channel, attr);
+ rc = hl_get_pwm_info(hdev, channel, attr, val);
break;
default:
return -EINVAL;
}
- return 0;
+ return rc;
}
static int hl_write(struct device *dev, enum hwmon_sensor_types type,
@@ -192,6 +197,15 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
return -ENODEV;
switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_offset:
+ break;
+ default:
+ return -EINVAL;
+ }
+ hl_set_temperature(hdev, channel, attr, val);
+ break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
@@ -219,7 +233,10 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_temp_max_hyst:
case hwmon_temp_crit:
case hwmon_temp_crit_hyst:
+ case hwmon_temp_highest:
return 0444;
+ case hwmon_temp_offset:
+ return 0644;
}
break;
case hwmon_in:
@@ -227,6 +244,7 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_in_input:
case hwmon_in_min:
case hwmon_in_max:
+ case hwmon_in_highest:
return 0444;
}
break;
@@ -235,6 +253,7 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_curr_input:
case hwmon_curr_min:
case hwmon_curr_max:
+ case hwmon_curr_highest:
return 0444;
}
break;
@@ -265,10 +284,10 @@ static const struct hwmon_ops hl_hwmon_ops = {
.write = hl_write
};
-long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_get_temperature(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -279,22 +298,47 @@ long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get temperature from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
-long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_set_temperature(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEMPERATURE_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set temperature of sensor %d, error %d\n",
+ sensor_index, rc);
+
+ return rc;
+}
+
+int hl_get_voltage(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -305,22 +349,22 @@ long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get voltage from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
-long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_get_current(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -331,22 +375,22 @@ long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get current from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
-long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_get_fan_speed(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -357,22 +401,22 @@ long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get fan speed from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
-long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
+int hl_get_pwm_info(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
{
struct armcp_packet pkt;
- long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
@@ -383,16 +427,16 @@ long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- SENSORS_PKT_TIMEOUT, &result);
+ SENSORS_PKT_TIMEOUT, value);
if (rc) {
dev_err(hdev->dev,
"Failed to get pwm info from sensor %d, error %d\n",
sensor_index, rc);
- result = 0;
+ *value = 0;
}
- return result;
+ return rc;
}
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index e4c6699a1868..bdd0a4c3a9cf 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -189,6 +189,10 @@ enum pq_init_status {
* ArmCP to write to the structure, to prevent data corruption in case of
* mismatched driver/FW versions.
*
+ * ARMCP_PACKET_TEMPERATURE_SET -
+ * Set the value of the offset property of a specified thermal sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
*/
enum armcp_packet_id {
@@ -214,6 +218,8 @@ enum armcp_packet_id {
ARMCP_PACKET_MAX_POWER_GET, /* sysfs */
ARMCP_PACKET_MAX_POWER_SET, /* sysfs */
ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+ ARMCP_RESERVED,
+ ARMCP_PACKET_TEMPERATURE_SET, /* sysfs */
};
#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -271,24 +277,32 @@ enum armcp_packet_rc {
armcp_packet_fault
};
+/*
+ * armcp_temp_type should adhere to hwmon_temp_attributes
+ * defined in Linux kernel hwmon.h file
+ */
enum armcp_temp_type {
armcp_temp_input,
armcp_temp_max = 6,
armcp_temp_max_hyst,
armcp_temp_crit,
- armcp_temp_crit_hyst
+ armcp_temp_crit_hyst,
+ armcp_temp_offset = 19,
+ armcp_temp_highest = 22
};
enum armcp_in_attributes {
armcp_in_input,
armcp_in_min,
- armcp_in_max
+ armcp_in_max,
+ armcp_in_highest = 7
};
enum armcp_curr_attributes {
armcp_curr_input,
armcp_curr_min,
- armcp_curr_max
+ armcp_curr_max,
+ armcp_curr_highest = 7
};
enum armcp_fan_attributes {
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h
index bb7a1aa3279e..5fb92362fc5f 100644
--- a/drivers/misc/habanalabs/include/goya/goya_async_events.h
+++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h
@@ -188,6 +188,10 @@ enum goya_async_event_id {
GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485,
GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486,
GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487,
+ GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S = 507,
+ GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E = 508,
+ GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S = 509,
+ GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E = 510,
GOYA_ASYNC_EVENT_ID_LAST_VALID_ID = 1023,
GOYA_ASYNC_EVENT_ID_SIZE
};
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
index cd89723c7f61..08061282cd9c 100644
--- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -11,24 +11,27 @@
/*
* PSOC scratch-pad registers
*/
-#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
-#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
-#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
-#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
-#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
-#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
-#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
-#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
-#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
-#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
-#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
-#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
-#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
-#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
-#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
-#define mmUBOOT_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
-#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
+#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
+#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
+#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
+#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
+#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
+#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
+#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
+#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
+#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
-#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
+#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
+#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS mmPSOC_GLOBAL_CONF_WARM_REBOOT
#endif /* GOYA_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index 2853a2de8cf6..f7992a69fd3a 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -8,20 +8,35 @@
#ifndef HL_BOOT_IF_H
#define HL_BOOT_IF_H
+#define LKD_HARD_RESET_MAGIC 0xED7BD694
+
+/* CPU error bits in BOOT_ERROR registers */
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << 0)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << 1)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << 2)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << 3)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << 4)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << 5)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6)
+#define CPU_BOOT_ERR0_ENABLED (1 << 31)
+
enum cpu_boot_status {
CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
- CPU_BOOT_STATUS_IN_WFE,
- CPU_BOOT_STATUS_DRAM_RDY,
- CPU_BOOT_STATUS_SRAM_AVAIL,
- CPU_BOOT_STATUS_IN_BTL, /* BTL is H/W FSM */
- CPU_BOOT_STATUS_IN_PREBOOT,
- CPU_BOOT_STATUS_IN_SPL,
- CPU_BOOT_STATUS_IN_UBOOT,
- CPU_BOOT_STATUS_DRAM_INIT_FAIL,
- CPU_BOOT_STATUS_FIT_CORRUPTED,
- CPU_BOOT_STATUS_UBOOT_NOT_READY,
- CPU_BOOT_STATUS_RESERVED,
- CPU_BOOT_STATUS_TS_INIT_FAIL,
+ CPU_BOOT_STATUS_IN_WFE = 1,
+ CPU_BOOT_STATUS_DRAM_RDY = 2,
+ CPU_BOOT_STATUS_SRAM_AVAIL = 3,
+ CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT = 5,
+ CPU_BOOT_STATUS_IN_SPL = 6,
+ CPU_BOOT_STATUS_IN_UBOOT = 7,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ CPU_BOOT_STATUS_NIC_FW_RDY = 11,
+ CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_READY_TO_BOOT = 15,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 6c72cb4eff54..a72f766ca470 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -530,7 +530,7 @@ static u64 get_va_block(struct hl_device *hdev,
* or not, hence we continue with the biggest possible
* granularity.
*/
- page_size = hdev->asic_prop.pmmu.huge_page_size;
+ page_size = hdev->asic_prop.pmmu_huge.page_size;
else
page_size = hdev->asic_prop.dmmu.page_size;
@@ -638,13 +638,12 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_userptr *userptr,
struct hl_vm_phys_pg_pack **pphys_pg_pack)
{
- struct hl_mmu_properties *mmu_prop = &ctx->hdev->asic_prop.pmmu;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
u64 page_mask, total_npages;
u32 npages, page_size = PAGE_SIZE,
- huge_page_size = mmu_prop->huge_page_size;
+ huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
@@ -747,7 +746,8 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
- rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
+ rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size,
+ (i + 1) == phys_pg_pack->npages);
if (rc) {
dev_err(hdev->dev,
"map failed for handle %u, npages: %llu, mapped: %llu",
@@ -765,7 +765,8 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
err:
next_vaddr = vaddr;
for (i = 0 ; i < mapped_pg_cnt ; i++) {
- if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size,
+ (i + 1) == mapped_pg_cnt))
dev_warn_ratelimited(hdev->dev,
"failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
phys_pg_pack->handle, next_vaddr,
@@ -794,7 +795,8 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
next_vaddr = vaddr;
for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
- if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size,
+ (i + 1) == phys_pg_pack->npages))
dev_warn_ratelimited(hdev->dev,
"unmap failed for vaddr: 0x%llx\n", next_vaddr);
@@ -853,6 +855,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct hl_userptr *userptr = NULL;
struct hl_vm_hash_node *hnode;
+ struct hl_va_range *va_range;
enum vm_type_t *vm_type;
u64 ret_vaddr, hint_addr;
u32 handle = 0;
@@ -924,9 +927,16 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto hnode_err;
}
- ret_vaddr = get_va_block(hdev,
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
- phys_pg_pack->total_size, hint_addr, is_userptr);
+ if (is_userptr)
+ if (phys_pg_pack->page_size == hdev->asic_prop.pmmu.page_size)
+ va_range = ctx->host_va_range;
+ else
+ va_range = ctx->host_huge_va_range;
+ else
+ va_range = ctx->dram_va_range;
+
+ ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
+ hint_addr, is_userptr);
if (!ret_vaddr) {
dev_err(hdev->dev, "no available va block for handle %u\n",
handle);
@@ -965,10 +975,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
return 0;
map_err:
- if (add_va_block(hdev,
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
- ret_vaddr,
- ret_vaddr + phys_pg_pack->total_size - 1))
+ if (add_va_block(hdev, va_range, ret_vaddr,
+ ret_vaddr + phys_pg_pack->total_size - 1))
dev_warn(hdev->dev,
"release va block failed for handle 0x%x, vaddr: 0x%llx\n",
handle, ret_vaddr);
@@ -1030,7 +1038,6 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
- va_range = &ctx->host_va_range;
userptr = hnode->ptr;
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
&phys_pg_pack);
@@ -1040,9 +1047,15 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
vaddr);
goto vm_type_err;
}
+
+ if (phys_pg_pack->page_size ==
+ hdev->asic_prop.pmmu.page_size)
+ va_range = ctx->host_va_range;
+ else
+ va_range = ctx->host_huge_va_range;
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
is_userptr = false;
- va_range = &ctx->dram_va_range;
+ va_range = ctx->dram_va_range;
phys_pg_pack = hnode->ptr;
} else {
dev_warn(hdev->dev,
@@ -1438,19 +1451,18 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
}
/*
- * hl_va_range_init - initialize virtual addresses range
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_range : pointer to the range to initialize
- * @start : range start address
- * @end : range end address
+ * va_range_init - initialize virtual addresses range
+ * @hdev: pointer to the habanalabs device structure
+ * @va_range: pointer to the range to initialize
+ * @start: range start address
+ * @end: range end address
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
* addresses.
*/
-static int hl_va_range_init(struct hl_device *hdev,
- struct hl_va_range *va_range, u64 start, u64 end)
+static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
+ u64 start, u64 end)
{
int rc;
@@ -1485,47 +1497,105 @@ static int hl_va_range_init(struct hl_device *hdev,
}
/*
- * hl_vm_ctx_init_with_ranges - initialize virtual memory for context
+ * va_range_fini() - clear a virtual addresses range
+ * @hdev: pointer to the habanalabs structure
+ * va_range: pointer to virtual addresses range
*
- * @ctx : pointer to the habanalabs context structure
- * @host_range_start : host virtual addresses range start
- * @host_range_end : host virtual addresses range end
- * @dram_range_start : dram virtual addresses range start
- * @dram_range_end : dram virtual addresses range end
+ * This function does the following:
+ * - Frees the virtual addresses block list and its lock
+ */
+static void va_range_fini(struct hl_device *hdev,
+ struct hl_va_range *va_range)
+{
+ mutex_lock(&va_range->lock);
+ clear_va_list_locked(hdev, &va_range->list);
+ mutex_unlock(&va_range->lock);
+
+ mutex_destroy(&va_range->lock);
+ kfree(va_range);
+}
+
+/*
+ * vm_ctx_init_with_ranges() - initialize virtual memory for context
+ * @ctx: pointer to the habanalabs context structure
+ * @host_range_start: host virtual addresses range start.
+ * @host_range_end: host virtual addresses range end.
+ * @host_huge_range_start: host virtual addresses range start for memory
+ * allocated with huge pages.
+ * @host_huge_range_end: host virtual addresses range end for memory allocated
+ * with huge pages.
+ * @dram_range_start: dram virtual addresses range start.
+ * @dram_range_end: dram virtual addresses range end.
*
* This function initializes the following:
* - MMU for context
* - Virtual address to area descriptor hashtable
* - Virtual block list of available virtual memory
*/
-static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
- u64 host_range_end, u64 dram_range_start,
- u64 dram_range_end)
+static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
+ u64 host_range_start,
+ u64 host_range_end,
+ u64 host_huge_range_start,
+ u64 host_huge_range_end,
+ u64 dram_range_start,
+ u64 dram_range_end)
{
struct hl_device *hdev = ctx->hdev;
int rc;
+ ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL);
+ if (!ctx->host_va_range)
+ return -ENOMEM;
+
+ ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range),
+ GFP_KERNEL);
+ if (!ctx->host_huge_va_range) {
+ rc = -ENOMEM;
+ goto host_huge_va_range_err;
+ }
+
+ ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL);
+ if (!ctx->dram_va_range) {
+ rc = -ENOMEM;
+ goto dram_va_range_err;
+ }
+
rc = hl_mmu_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
- return rc;
+ goto mmu_ctx_err;
}
mutex_init(&ctx->mem_hash_lock);
hash_init(ctx->mem_hash);
- mutex_init(&ctx->host_va_range.lock);
+ mutex_init(&ctx->host_va_range->lock);
- rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start,
- host_range_end);
+ rc = va_range_init(hdev, ctx->host_va_range, host_range_start,
+ host_range_end);
if (rc) {
dev_err(hdev->dev, "failed to init host vm range\n");
- goto host_vm_err;
+ goto host_page_range_err;
+ }
+
+ if (hdev->pmmu_huge_range) {
+ mutex_init(&ctx->host_huge_va_range->lock);
+
+ rc = va_range_init(hdev, ctx->host_huge_va_range,
+ host_huge_range_start,
+ host_huge_range_end);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to init host huge vm range\n");
+ goto host_hpage_range_err;
+ }
+ } else {
+ ctx->host_huge_va_range = ctx->host_va_range;
}
- mutex_init(&ctx->dram_va_range.lock);
+ mutex_init(&ctx->dram_va_range->lock);
- rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start,
+ rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start,
dram_range_end);
if (rc) {
dev_err(hdev->dev, "failed to init dram vm range\n");
@@ -1537,15 +1607,29 @@ static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start,
return 0;
dram_vm_err:
- mutex_destroy(&ctx->dram_va_range.lock);
+ mutex_destroy(&ctx->dram_va_range->lock);
- mutex_lock(&ctx->host_va_range.lock);
- clear_va_list_locked(hdev, &ctx->host_va_range.list);
- mutex_unlock(&ctx->host_va_range.lock);
-host_vm_err:
- mutex_destroy(&ctx->host_va_range.lock);
+ if (hdev->pmmu_huge_range) {
+ mutex_lock(&ctx->host_huge_va_range->lock);
+ clear_va_list_locked(hdev, &ctx->host_huge_va_range->list);
+ mutex_unlock(&ctx->host_huge_va_range->lock);
+ }
+host_hpage_range_err:
+ if (hdev->pmmu_huge_range)
+ mutex_destroy(&ctx->host_huge_va_range->lock);
+ mutex_lock(&ctx->host_va_range->lock);
+ clear_va_list_locked(hdev, &ctx->host_va_range->list);
+ mutex_unlock(&ctx->host_va_range->lock);
+host_page_range_err:
+ mutex_destroy(&ctx->host_va_range->lock);
mutex_destroy(&ctx->mem_hash_lock);
hl_mmu_ctx_fini(ctx);
+mmu_ctx_err:
+ kfree(ctx->dram_va_range);
+dram_va_range_err:
+ kfree(ctx->host_huge_va_range);
+host_huge_va_range_err:
+ kfree(ctx->host_va_range);
return rc;
}
@@ -1553,8 +1637,8 @@ host_vm_err:
int hl_vm_ctx_init(struct hl_ctx *ctx)
{
struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
- u64 host_range_start, host_range_end, dram_range_start,
- dram_range_end;
+ u64 host_range_start, host_range_end, host_huge_range_start,
+ host_huge_range_end, dram_range_start, dram_range_end;
atomic64_set(&ctx->dram_phys_mem, 0);
@@ -1566,38 +1650,26 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
* address of the memory related to the given handle.
*/
if (ctx->hdev->mmu_enable) {
- dram_range_start = prop->va_space_dram_start_address;
- dram_range_end = prop->va_space_dram_end_address;
- host_range_start = prop->va_space_host_start_address;
- host_range_end = prop->va_space_host_end_address;
+ dram_range_start = prop->dmmu.start_addr;
+ dram_range_end = prop->dmmu.end_addr;
+ host_range_start = prop->pmmu.start_addr;
+ host_range_end = prop->pmmu.end_addr;
+ host_huge_range_start = prop->pmmu_huge.start_addr;
+ host_huge_range_end = prop->pmmu_huge.end_addr;
} else {
dram_range_start = prop->dram_user_base_address;
dram_range_end = prop->dram_end_address;
host_range_start = prop->dram_user_base_address;
host_range_end = prop->dram_end_address;
+ host_huge_range_start = prop->dram_user_base_address;
+ host_huge_range_end = prop->dram_end_address;
}
- return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
- dram_range_start, dram_range_end);
-}
-
-/*
- * hl_va_range_fini - clear a virtual addresses range
- *
- * @hdev : pointer to the habanalabs structure
- * va_range : pointer to virtual addresses range
- *
- * This function does the following:
- * - Frees the virtual addresses block list and its lock
- */
-static void hl_va_range_fini(struct hl_device *hdev,
- struct hl_va_range *va_range)
-{
- mutex_lock(&va_range->lock);
- clear_va_list_locked(hdev, &va_range->list);
- mutex_unlock(&va_range->lock);
-
- mutex_destroy(&va_range->lock);
+ return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
+ host_huge_range_start,
+ host_huge_range_end,
+ dram_range_start,
+ dram_range_end);
}
/*
@@ -1664,8 +1736,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
}
spin_unlock(&vm->idr_lock);
- hl_va_range_fini(hdev, &ctx->dram_va_range);
- hl_va_range_fini(hdev, &ctx->host_va_range);
+ va_range_fini(hdev, ctx->dram_va_range);
+ if (hdev->pmmu_huge_range)
+ va_range_fini(hdev, ctx->host_huge_va_range);
+ va_range_fini(hdev, ctx->host_va_range);
mutex_destroy(&ctx->mem_hash_lock);
hl_mmu_ctx_fini(ctx);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 6262b26e2086..a290d6b49d78 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -254,6 +254,15 @@ static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
return phys_hop_addr + pte_offset;
}
+static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+}
+
static int dram_default_mapping_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
@@ -548,6 +557,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
curr_pte;
bool is_huge, clear_hop3 = true;
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
hop0_addr = get_hop0_addr(ctx);
@@ -637,29 +647,27 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
clear_hop3 = true;
if (!clear_hop3)
- goto flush;
+ goto mapped;
clear_pte(ctx, hop3_pte_addr);
if (put_pte(ctx, hop3_addr))
- goto flush;
+ goto mapped;
clear_pte(ctx, hop2_pte_addr);
if (put_pte(ctx, hop2_addr))
- goto flush;
+ goto mapped;
clear_pte(ctx, hop1_pte_addr);
if (put_pte(ctx, hop1_addr))
- goto flush;
+ goto mapped;
clear_pte(ctx, hop0_pte_addr);
}
-flush:
- flush(ctx);
-
+mapped:
return 0;
not_mapped:
@@ -675,6 +683,7 @@ not_mapped:
* @ctx: pointer to the context structure
* @virt_addr: virt addr to map from
* @page_size: size of the page to unmap
+ * @flush_pte: whether to do a PCI flush
*
* This function does the following:
* - Check that the virt addr is mapped
@@ -685,40 +694,43 @@ not_mapped:
* changes the MMU hash, it must be protected by a lock.
* However, because it maps only a single page, the lock should be implemented
* in a higher level in order to protect the entire mapping of the memory area
+ *
+ * For optimization reasons PCI flush may be requested once after unmapping of
+ * large area.
*/
-int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
+int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
+ bool flush_pte)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr;
u32 real_page_size, npages;
- int i, rc;
+ int i, rc = 0;
bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ is_dram_addr = is_dram_va(hdev, virt_addr);
- mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+ if (is_dram_addr)
+ mmu_prop = &prop->dmmu;
+ else if ((page_size % prop->pmmu_huge.page_size) == 0)
+ mmu_prop = &prop->pmmu_huge;
+ else
+ mmu_prop = &prop->pmmu;
/*
* The H/W handles mapping of specific page sizes. Hence if the page
* size is bigger, we break it to sub-pages and unmap them separately.
*/
- if ((page_size % mmu_prop->huge_page_size) == 0) {
- real_page_size = mmu_prop->huge_page_size;
- } else if ((page_size % mmu_prop->page_size) == 0) {
+ if ((page_size % mmu_prop->page_size) == 0) {
real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not %uKB nor %uMB aligned, can't unmap\n",
- page_size,
- mmu_prop->page_size >> 10,
- mmu_prop->huge_page_size >> 20);
+ "page size of %u is not %uKB aligned, can't unmap\n",
+ page_size, mmu_prop->page_size >> 10);
return -EFAULT;
}
@@ -729,12 +741,15 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
for (i = 0 ; i < npages ; i++) {
rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
- return rc;
+ break;
real_virt_addr += real_page_size;
}
- return 0;
+ if (flush_pte)
+ flush(ctx);
+
+ return rc;
}
static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
@@ -753,8 +768,6 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
hop4_new = false, is_huge;
int rc = -ENOMEM;
- mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
-
/*
* This mapping function can map a page or a huge page. For huge page
* there are only 3 hops rather than 4. Currently the DRAM allocation
@@ -762,11 +775,15 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
* one of the two page sizes. Since this is a common code for all the
* three cases, we need this hugs page check.
*/
- is_huge = page_size == mmu_prop->huge_page_size;
-
- if (is_dram_addr && !is_huge) {
- dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
- return -EFAULT;
+ if (is_dram_addr) {
+ mmu_prop = &prop->dmmu;
+ is_huge = true;
+ } else if (page_size == prop->pmmu_huge.page_size) {
+ mmu_prop = &prop->pmmu_huge;
+ is_huge = true;
+ } else {
+ mmu_prop = &prop->pmmu;
+ is_huge = false;
}
hop0_addr = get_hop0_addr(ctx);
@@ -885,8 +902,6 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
get_pte(ctx, hop3_addr);
}
- flush(ctx);
-
return 0;
err:
@@ -909,6 +924,7 @@ err:
* @virt_addr: virt addr to map from
* @phys_addr: phys addr to map to
* @page_size: physical page size
+ * @flush_pte: whether to do a PCI flush
*
* This function does the following:
* - Check that the virt addr is not mapped
@@ -919,8 +935,12 @@ err:
* changes the MMU hash, it must be protected by a lock.
* However, because it maps only a single page, the lock should be implemented
* in a higher level in order to protect the entire mapping of the memory area
+ *
+ * For optimization reasons PCI flush may be requested once after mapping of
+ * large area.
*/
-int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
+int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
+ bool flush_pte)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -933,26 +953,25 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
if (!hdev->mmu_enable)
return 0;
- is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ is_dram_addr = is_dram_va(hdev, virt_addr);
- mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+ if (is_dram_addr)
+ mmu_prop = &prop->dmmu;
+ else if ((page_size % prop->pmmu_huge.page_size) == 0)
+ mmu_prop = &prop->pmmu_huge;
+ else
+ mmu_prop = &prop->pmmu;
/*
* The H/W handles mapping of specific page sizes. Hence if the page
* size is bigger, we break it to sub-pages and map them separately.
*/
- if ((page_size % mmu_prop->huge_page_size) == 0) {
- real_page_size = mmu_prop->huge_page_size;
- } else if ((page_size % mmu_prop->page_size) == 0) {
+ if ((page_size % mmu_prop->page_size) == 0) {
real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not %dKB nor %dMB aligned, can't unmap\n",
- page_size,
- mmu_prop->page_size >> 10,
- mmu_prop->huge_page_size >> 20);
+ "page size of %u is not %uKB aligned, can't unmap\n",
+ page_size, mmu_prop->page_size >> 10);
return -EFAULT;
}
@@ -976,6 +995,9 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
mapped_cnt++;
}
+ if (flush_pte)
+ flush(ctx);
+
return 0;
err:
@@ -988,6 +1010,8 @@ err:
real_virt_addr += real_page_size;
}
+ flush(ctx);
+
return rc;
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index cc92bc3ed820..886459e0ddd9 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -11,6 +11,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
#ifdef CONFIG_X86_32
#include <asm/desc.h>
@@ -175,6 +176,80 @@ void lkdtm_HUNG_TASK(void)
schedule();
}
+volatile unsigned int huge = INT_MAX - 2;
+volatile unsigned int ignored;
+
+void lkdtm_OVERFLOW_SIGNED(void)
+{
+ int value;
+
+ value = huge;
+ pr_info("Normal signed addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing signed addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+
+void lkdtm_OVERFLOW_UNSIGNED(void)
+{
+ unsigned int value;
+
+ value = huge;
+ pr_info("Normal unsigned addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing unsigned addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+/* Intentially using old-style flex array definition of 1 byte. */
+struct array_bounds_flex_array {
+ int one;
+ int two;
+ char data[1];
+};
+
+struct array_bounds {
+ int one;
+ int two;
+ char data[8];
+ int three;
+};
+
+void lkdtm_ARRAY_BOUNDS(void)
+{
+ struct array_bounds_flex_array *not_checked;
+ struct array_bounds *checked;
+ volatile int i;
+
+ not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
+ checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+
+ pr_info("Array access within bounds ...\n");
+ /* For both, touch all bytes in the actual member size. */
+ for (i = 0; i < sizeof(checked->data); i++)
+ checked->data[i] = 'A';
+ /*
+ * For the uninstrumented flex array member, also touch 1 byte
+ * beyond to verify it is correctly uninstrumented.
+ */
+ for (i = 0; i < sizeof(not_checked->data) + 1; i++)
+ not_checked->data[i] = 'A';
+
+ pr_info("Array access beyond bounds ...\n");
+ for (i = 0; i < sizeof(checked->data) + 1; i++)
+ checked->data[i] = 'B';
+
+ kfree(not_checked);
+ kfree(checked);
+}
+
void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 5ce4ac8c06fc..a5e344df9166 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -130,6 +130,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(HARDLOCKUP),
CRASHTYPE(SPINLOCKUP),
CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(OVERFLOW_SIGNED),
+ CRASHTYPE(OVERFLOW_UNSIGNED),
+ CRASHTYPE(ARRAY_BOUNDS),
CRASHTYPE(EXEC_DATA),
CRASHTYPE(EXEC_STACK),
CRASHTYPE(EXEC_KMALLOC),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 8d13d0176624..601a2156a0d4 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -22,6 +22,9 @@ void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
+void lkdtm_OVERFLOW_SIGNED(void);
+void lkdtm_OVERFLOW_UNSIGNED(void);
+void lkdtm_ARRAY_BOUNDS(void);
void lkdtm_CORRUPT_LIST_ADD(void);
void lkdtm_CORRUPT_LIST_DEL(void);
void lkdtm_CORRUPT_USER_DS(void);
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
index d5a084475abc..d1a5c0705be3 100644
--- a/drivers/misc/lkdtm/stackleak.c
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -16,6 +16,7 @@ void lkdtm_STACKLEAK_ERASING(void)
unsigned long *sp, left, found, i;
const unsigned long check_depth =
STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+ bool test_failed = false;
/*
* For the details about the alignment of the poison values, see
@@ -34,7 +35,8 @@ void lkdtm_STACKLEAK_ERASING(void)
left--;
} else {
pr_err("FAIL: not enough stack space for the test\n");
- return;
+ test_failed = true;
+ goto end;
}
pr_info("checking unused part of the thread stack (%lu bytes)...\n",
@@ -52,22 +54,29 @@ void lkdtm_STACKLEAK_ERASING(void)
}
if (found <= check_depth) {
- pr_err("FAIL: thread stack is not erased (checked %lu bytes)\n",
+ pr_err("FAIL: the erased part is not found (checked %lu bytes)\n",
i * sizeof(unsigned long));
- return;
+ test_failed = true;
+ goto end;
}
- pr_info("first %lu bytes are unpoisoned\n",
+ pr_info("the erased part begins after %lu not poisoned bytes\n",
(i - found) * sizeof(unsigned long));
/* The rest of thread stack should be erased */
for (; i < left; i++) {
if (*(sp - i) != STACKLEAK_POISON) {
- pr_err("FAIL: thread stack is NOT properly erased\n");
- return;
+ pr_err("FAIL: bad value number %lu in the erased part: 0x%lx\n",
+ i, *(sp - i));
+ test_failed = true;
}
}
- pr_info("OK: the rest of the thread stack is properly erased\n");
- return;
+end:
+ if (test_failed) {
+ pr_err("FAIL: the thread stack is NOT properly erased\n");
+ dump_stack();
+ } else {
+ pr_info("OK: the rest of the thread stack is properly erased\n");
+ }
}
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 9ad9c01ddf41..910f059b3384 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -91,7 +91,7 @@ struct mkhi_rule_id {
struct mkhi_fwcaps {
struct mkhi_rule_id id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct mkhi_fw_ver_block {
@@ -119,7 +119,7 @@ struct mkhi_msg_hdr {
struct mkhi_msg {
struct mkhi_msg_hdr hdr;
- u8 data[0];
+ u8 data[];
} __packed;
#define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1e3edbbacb1e..204d807e755b 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1585,7 +1585,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
goto err;
}
- hbuf_len = mei_slots2data(hbuf_slots);
+ hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
@@ -1718,7 +1718,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
goto out;
}
- hbuf_len = mei_slots2data(hbuf_slots);
+ hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 87a0201ba6b3..9392934e3a06 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -75,9 +75,9 @@
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
-#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */
+#define MEI_DEV_ID_CNP_LP_3 0x9DE4 /* Cannon Point LP 3 (iTouch) */
#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
-#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
+#define MEI_DEV_ID_CNP_H_3 0xA364 /* Cannon Point H 3 (iTouch) */
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
@@ -87,6 +87,8 @@
#define MEI_DEV_ID_CMP_H 0x06e0 /* Comet Lake H */
#define MEI_DEV_ID_CMP_H_3 0x06e4 /* Comet Lake H 3 (iTouch) */
+#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */
+
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index d025a5f8317e..b1a8d5ec88b3 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -209,11 +209,14 @@ struct mei_msg_hdr {
u32 extension[0];
} __packed;
+/* The length is up to 9 bits */
+#define MEI_MSG_MAX_LEN_MASK GENMASK(9, 0)
+
#define MEI_MSG_HDR_MAX 2
struct mei_bus_message {
u8 hbm_cmd;
- u8 data[0];
+ u8 data[];
} __packed;
/**
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 76f8ff5ff974..3a29db07211d 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -533,7 +533,7 @@ struct mei_device {
#endif /* CONFIG_DEBUG_FS */
const struct mei_hw_ops *ops;
- char hw[0] __aligned(sizeof(void *));
+ char hw[] __aligned(sizeof(void *));
};
static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 2711451b3d87..3d21c38e2dbb 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -1,25 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/fcntl.h>
#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/compat.h>
-#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/pm_domain.h>
@@ -92,9 +83,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
@@ -111,6 +102,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index f1c16a587495..beacf2a2f2b5 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -1,20 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2017, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/pm_domain.h>
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 4f2d9212432c..fb5b3989753d 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -137,7 +137,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size,
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
dma_addr_t tmp;
- void *va = kmalloc(size, gfp | __GFP_ZERO);
+ void *va = kzalloc(size, gfp);
if (va) {
tmp = mic_map_single(mdev, va, size);
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index a7743312da9c..d18cda966912 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -350,10 +350,10 @@ mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw)
if (!buf)
return -ENOMEM;
- len += snprintf(buf, CMDLINE_SIZE - len,
+ len += scnprintf(buf, CMDLINE_SIZE - len,
" mem=%dM", boot_mem);
if (mdev->cosm_dev->cmdline)
- snprintf(buf + len, CMDLINE_SIZE - len, " %s",
+ scnprintf(buf + len, CMDLINE_SIZE - len, " %s",
mdev->cosm_dev->cmdline);
memcpy_toio(cmd_line_va, buf, strlen(buf) + 1);
kfree(buf);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index a5e317073d95..ef5a1af6bab7 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
@@ -64,6 +65,9 @@
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
+#define PCI_ENDPOINT_TEST_FLAGS 0x2c
+#define FLAG_USE_DMA BIT(0)
+
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define is_am654_pci_dev(pdev) \
@@ -98,11 +102,13 @@ struct pci_endpoint_test {
struct completion irq_raised;
int last_irq;
int num_irqs;
+ int irq_type;
/* mutex to protect the ioctls */
struct mutex mutex;
struct miscdevice miscdev;
enum pci_barno test_reg_bar;
size_t alignment;
+ const char *name;
};
struct pci_endpoint_test_data {
@@ -157,6 +163,7 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
struct pci_dev *pdev = test->pdev;
pci_free_irq_vectors(pdev);
+ test->irq_type = IRQ_TYPE_UNDEFINED;
}
static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
@@ -191,6 +198,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
irq = 0;
res = false;
}
+
+ test->irq_type = type;
test->num_irqs = irq;
return res;
@@ -218,7 +227,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
for (i = 0; i < test->num_irqs; i++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
- IRQF_SHARED, DRV_MODULE_NAME, test);
+ IRQF_SHARED, test->name, test);
if (err)
goto fail;
}
@@ -315,11 +324,16 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
return false;
}
-static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
+static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
+ unsigned long arg)
{
+ struct pci_endpoint_test_xfer_param param;
bool ret = false;
void *src_addr;
void *dst_addr;
+ u32 flags = 0;
+ bool use_dma;
+ size_t size;
dma_addr_t src_phys_addr;
dma_addr_t dst_phys_addr;
struct pci_dev *pdev = test->pdev;
@@ -330,25 +344,46 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_dst_phys_addr;
size_t offset;
size_t alignment = test->alignment;
+ int irq_type = test->irq_type;
u32 src_crc32;
u32 dst_crc32;
+ int err;
+ err = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (err) {
+ dev_err(dev, "Failed to get transfer param\n");
+ return false;
+ }
+
+ size = param.size;
if (size > SIZE_MAX - alignment)
goto err;
+ use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
+ if (use_dma)
+ flags |= FLAG_USE_DMA;
+
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
- orig_src_addr = dma_alloc_coherent(dev, size + alignment,
- &orig_src_phys_addr, GFP_KERNEL);
+ orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_src_addr) {
dev_err(dev, "Failed to allocate source buffer\n");
ret = false;
goto err;
}
+ get_random_bytes(orig_src_addr, size + alignment);
+ orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
+ size + alignment, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, orig_src_phys_addr)) {
+ dev_err(dev, "failed to map source buffer address\n");
+ ret = false;
+ goto err_src_phys_addr;
+ }
+
if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
offset = src_phys_addr - orig_src_phys_addr;
@@ -364,15 +399,21 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
upper_32_bits(src_phys_addr));
- get_random_bytes(src_addr, size);
src_crc32 = crc32_le(~0, src_addr, size);
- orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
- &orig_dst_phys_addr, GFP_KERNEL);
+ orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_dst_addr) {
dev_err(dev, "Failed to allocate destination address\n");
ret = false;
- goto err_orig_src_addr;
+ goto err_dst_addr;
+ }
+
+ orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
+ size + alignment, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, orig_dst_phys_addr)) {
+ dev_err(dev, "failed to map destination buffer address\n");
+ ret = false;
+ goto err_dst_phys_addr;
}
if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
@@ -392,6 +433,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
@@ -399,24 +441,34 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
wait_for_completion(&test->irq_raised);
+ dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
+ DMA_FROM_DEVICE);
+
dst_crc32 = crc32_le(~0, dst_addr, size);
if (dst_crc32 == src_crc32)
ret = true;
- dma_free_coherent(dev, size + alignment, orig_dst_addr,
- orig_dst_phys_addr);
+err_dst_phys_addr:
+ kfree(orig_dst_addr);
-err_orig_src_addr:
- dma_free_coherent(dev, size + alignment, orig_src_addr,
- orig_src_phys_addr);
+err_dst_addr:
+ dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
+ DMA_TO_DEVICE);
+
+err_src_phys_addr:
+ kfree(orig_src_addr);
err:
return ret;
}
-static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
+static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
+ unsigned long arg)
{
+ struct pci_endpoint_test_xfer_param param;
bool ret = false;
+ u32 flags = 0;
+ bool use_dma;
u32 reg;
void *addr;
dma_addr_t phys_addr;
@@ -426,24 +478,47 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_phys_addr;
size_t offset;
size_t alignment = test->alignment;
+ int irq_type = test->irq_type;
+ size_t size;
u32 crc32;
+ int err;
+
+ err = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (err != 0) {
+ dev_err(dev, "Failed to get transfer param\n");
+ return false;
+ }
+ size = param.size;
if (size > SIZE_MAX - alignment)
goto err;
+ use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
+ if (use_dma)
+ flags |= FLAG_USE_DMA;
+
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
- orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
- GFP_KERNEL);
+ orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate address\n");
ret = false;
goto err;
}
+ get_random_bytes(orig_addr, size + alignment);
+
+ orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, orig_phys_addr)) {
+ dev_err(dev, "failed to map source buffer address\n");
+ ret = false;
+ goto err_phys_addr;
+ }
+
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
offset = phys_addr - orig_phys_addr;
@@ -453,8 +528,6 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
addr = orig_addr;
}
- get_random_bytes(addr, size);
-
crc32 = crc32_le(~0, addr, size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
crc32);
@@ -466,6 +539,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
@@ -477,15 +551,24 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
if (reg & STATUS_READ_SUCCESS)
ret = true;
- dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
+ dma_unmap_single(dev, orig_phys_addr, size + alignment,
+ DMA_TO_DEVICE);
+
+err_phys_addr:
+ kfree(orig_addr);
err:
return ret;
}
-static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
+static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
+ unsigned long arg)
{
+ struct pci_endpoint_test_xfer_param param;
bool ret = false;
+ u32 flags = 0;
+ bool use_dma;
+ size_t size;
void *addr;
dma_addr_t phys_addr;
struct pci_dev *pdev = test->pdev;
@@ -494,24 +577,44 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_phys_addr;
size_t offset;
size_t alignment = test->alignment;
+ int irq_type = test->irq_type;
u32 crc32;
+ int err;
+
+ err = copy_from_user(&param, (void __user *)arg, sizeof(param));
+ if (err) {
+ dev_err(dev, "Failed to get transfer param\n");
+ return false;
+ }
+ size = param.size;
if (size > SIZE_MAX - alignment)
goto err;
+ use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
+ if (use_dma)
+ flags |= FLAG_USE_DMA;
+
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
- orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
- GFP_KERNEL);
+ orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate destination address\n");
ret = false;
goto err;
}
+ orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, orig_phys_addr)) {
+ dev_err(dev, "failed to map source buffer address\n");
+ ret = false;
+ goto err_phys_addr;
+ }
+
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
offset = phys_addr - orig_phys_addr;
@@ -528,6 +631,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
@@ -535,15 +639,26 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
wait_for_completion(&test->irq_raised);
+ dma_unmap_single(dev, orig_phys_addr, size + alignment,
+ DMA_FROM_DEVICE);
+
crc32 = crc32_le(~0, addr, size);
if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
ret = true;
- dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
+err_phys_addr:
+ kfree(orig_addr);
err:
return ret;
}
+static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
+{
+ pci_endpoint_test_release_irq(test);
+ pci_endpoint_test_free_irq_vectors(test);
+ return true;
+}
+
static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
int req_irq_type)
{
@@ -555,7 +670,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
return false;
}
- if (irq_type == req_irq_type)
+ if (test->irq_type == req_irq_type)
return true;
pci_endpoint_test_release_irq(test);
@@ -567,12 +682,10 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
if (!pci_endpoint_test_request_irq(test))
goto err;
- irq_type = req_irq_type;
return true;
err:
pci_endpoint_test_free_irq_vectors(test);
- irq_type = IRQ_TYPE_UNDEFINED;
return false;
}
@@ -616,6 +729,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
case PCITEST_GET_IRQTYPE:
ret = irq_type;
break;
+ case PCITEST_CLEAR_IRQ:
+ ret = pci_endpoint_test_clear_irq(test);
+ break;
}
ret:
@@ -633,7 +749,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
{
int err;
int id;
- char name[20];
+ char name[24];
enum pci_barno bar;
void __iomem *base;
struct device *dev = &pdev->dev;
@@ -652,6 +768,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->test_reg_bar = 0;
test->alignment = 0;
test->pdev = pdev;
+ test->irq_type = IRQ_TYPE_UNDEFINED;
if (no_msi)
irq_type = IRQ_TYPE_LEGACY;
@@ -667,6 +784,12 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
init_completion(&test->irq_raised);
mutex_init(&test->mutex);
+ if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(dev, "Cannot set DMA mask\n");
+ return -EINVAL;
+ }
+
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Cannot enable PCI device\n");
@@ -684,9 +807,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
goto err_disable_irq;
- if (!pci_endpoint_test_request_irq(test))
- goto err_disable_irq;
-
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
base = pci_ioremap_bar(pdev, bar);
@@ -716,12 +836,21 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
}
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
+ test->name = kstrdup(name, GFP_KERNEL);
+ if (!test->name) {
+ err = -ENOMEM;
+ goto err_ida_remove;
+ }
+
+ if (!pci_endpoint_test_request_irq(test))
+ goto err_kfree_test_name;
+
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
misc_device->name = kstrdup(name, GFP_KERNEL);
if (!misc_device->name) {
err = -ENOMEM;
- goto err_ida_remove;
+ goto err_release_irq;
}
misc_device->fops = &pci_endpoint_test_fops,
@@ -736,6 +865,12 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
err_kfree_name:
kfree(misc_device->name);
+err_release_irq:
+ pci_endpoint_test_release_irq(test);
+
+err_kfree_test_name:
+ kfree(test->name);
+
err_ida_remove:
ida_simple_remove(&pci_endpoint_test_ida, id);
@@ -744,7 +879,6 @@ err_iounmap:
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
- pci_endpoint_test_release_irq(test);
err_disable_irq:
pci_endpoint_test_free_irq_vectors(test);
@@ -770,6 +904,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
misc_deregister(&test->miscdev);
kfree(misc_device->name);
+ kfree(test->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
@@ -783,6 +918,12 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static const struct pci_endpoint_test_data default_data = {
+ .test_reg_bar = BAR_0,
+ .alignment = SZ_4K,
+ .irq_type = IRQ_TYPE_MSI,
+};
+
static const struct pci_endpoint_test_data am654_data = {
.test_reg_bar = BAR_2,
.alignment = SZ_64K,
@@ -790,8 +931,12 @@ static const struct pci_endpoint_test_data am654_data = {
};
static const struct pci_device_id pci_endpoint_test_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
- { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
+ .driver_data = (kernel_ulong_t)&default_data,
+ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
+ .driver_data = (kernel_ulong_t)&default_data,
+ },
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
index e77d1b1f9d05..85c103923632 100644
--- a/drivers/misc/sgi-gru/grulib.h
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -136,7 +136,7 @@ struct gru_dump_context_header {
pid_t pid;
unsigned long vaddr;
int cch_locked;
- unsigned long data[0];
+ unsigned long data[];
};
/*
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index a7e44b2eb413..5ce8f3081e96 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -372,7 +372,7 @@ struct gru_thread_state {
int ts_data_valid; /* Indicates if ts_gdata has
valid data */
struct gru_gseg_statistics ustats; /* User statistics */
- unsigned long ts_gdata[0]; /* save area for GRU data (CB,
+ unsigned long ts_gdata[]; /* save area for GRU data (CB,
DS, CBE) */
};
diff --git a/drivers/misc/uacce/Kconfig b/drivers/misc/uacce/Kconfig
new file mode 100644
index 000000000000..5e39b6083b0f
--- /dev/null
+++ b/drivers/misc/uacce/Kconfig
@@ -0,0 +1,13 @@
+config UACCE
+ tristate "Accelerator Framework for User Land"
+ depends on IOMMU_API
+ help
+ UACCE provides interface for the user process to access the hardware
+ without interaction with the kernel space in data path.
+
+ The user-space interface is described in
+ include/uapi/misc/uacce/uacce.h
+
+ See Documentation/misc-devices/uacce.rst for more details.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/misc/uacce/Makefile b/drivers/misc/uacce/Makefile
new file mode 100644
index 000000000000..5b4374e8b5f2
--- /dev/null
+++ b/drivers/misc/uacce/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+obj-$(CONFIG_UACCE) += uacce.o
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
new file mode 100644
index 000000000000..d39307f060bd
--- /dev/null
+++ b/drivers/misc/uacce/uacce.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/compat.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/uacce.h>
+
+static struct class *uacce_class;
+static dev_t uacce_devt;
+static DEFINE_MUTEX(uacce_mutex);
+static DEFINE_XARRAY_ALLOC(uacce_xa);
+
+static int uacce_start_queue(struct uacce_queue *q)
+{
+ int ret = 0;
+
+ mutex_lock(&uacce_mutex);
+
+ if (q->state != UACCE_Q_INIT) {
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ if (q->uacce->ops->start_queue) {
+ ret = q->uacce->ops->start_queue(q);
+ if (ret < 0)
+ goto out_with_lock;
+ }
+
+ q->state = UACCE_Q_STARTED;
+
+out_with_lock:
+ mutex_unlock(&uacce_mutex);
+
+ return ret;
+}
+
+static int uacce_put_queue(struct uacce_queue *q)
+{
+ struct uacce_device *uacce = q->uacce;
+
+ mutex_lock(&uacce_mutex);
+
+ if (q->state == UACCE_Q_ZOMBIE)
+ goto out;
+
+ if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
+ uacce->ops->stop_queue(q);
+
+ if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
+ uacce->ops->put_queue)
+ uacce->ops->put_queue(q);
+
+ q->state = UACCE_Q_ZOMBIE;
+out:
+ mutex_unlock(&uacce_mutex);
+
+ return 0;
+}
+
+static long uacce_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
+
+ switch (cmd) {
+ case UACCE_CMD_START_Q:
+ return uacce_start_queue(q);
+
+ case UACCE_CMD_PUT_Q:
+ return uacce_put_queue(q);
+
+ default:
+ if (!uacce->ops->ioctl)
+ return -EINVAL;
+
+ return uacce->ops->ioctl(q, cmd, arg);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long uacce_fops_compat_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ arg = (unsigned long)compat_ptr(arg);
+
+ return uacce_fops_unl_ioctl(filep, cmd, arg);
+}
+#endif
+
+static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
+ void *data)
+{
+ struct uacce_mm *uacce_mm = data;
+ struct uacce_queue *q;
+
+ /*
+ * No new queue can be added concurrently because no caller can have a
+ * reference to this mm. But there may be concurrent calls to
+ * uacce_mm_put(), so we need the lock.
+ */
+ mutex_lock(&uacce_mm->lock);
+ list_for_each_entry(q, &uacce_mm->queues, list)
+ uacce_put_queue(q);
+ uacce_mm->mm = NULL;
+ mutex_unlock(&uacce_mm->lock);
+
+ return 0;
+}
+
+static struct iommu_sva_ops uacce_sva_ops = {
+ .mm_exit = uacce_sva_exit,
+};
+
+static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
+ struct uacce_queue *q,
+ struct mm_struct *mm)
+{
+ struct uacce_mm *uacce_mm = NULL;
+ struct iommu_sva *handle = NULL;
+ int ret;
+
+ lockdep_assert_held(&uacce->mm_lock);
+
+ list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
+ if (uacce_mm->mm == mm) {
+ mutex_lock(&uacce_mm->lock);
+ list_add(&q->list, &uacce_mm->queues);
+ mutex_unlock(&uacce_mm->lock);
+ return uacce_mm;
+ }
+ }
+
+ uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
+ if (!uacce_mm)
+ return NULL;
+
+ if (uacce->flags & UACCE_DEV_SVA) {
+ /*
+ * Safe to pass an incomplete uacce_mm, since mm_exit cannot
+ * fire while we hold a reference to the mm.
+ */
+ handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
+ if (IS_ERR(handle))
+ goto err_free;
+
+ ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
+ if (ret)
+ goto err_unbind;
+
+ uacce_mm->pasid = iommu_sva_get_pasid(handle);
+ if (uacce_mm->pasid == IOMMU_PASID_INVALID)
+ goto err_unbind;
+ }
+
+ uacce_mm->mm = mm;
+ uacce_mm->handle = handle;
+ INIT_LIST_HEAD(&uacce_mm->queues);
+ mutex_init(&uacce_mm->lock);
+ list_add(&q->list, &uacce_mm->queues);
+ list_add(&uacce_mm->list, &uacce->mm_list);
+
+ return uacce_mm;
+
+err_unbind:
+ if (handle)
+ iommu_sva_unbind_device(handle);
+err_free:
+ kfree(uacce_mm);
+ return NULL;
+}
+
+static void uacce_mm_put(struct uacce_queue *q)
+{
+ struct uacce_mm *uacce_mm = q->uacce_mm;
+
+ lockdep_assert_held(&q->uacce->mm_lock);
+
+ mutex_lock(&uacce_mm->lock);
+ list_del(&q->list);
+ mutex_unlock(&uacce_mm->lock);
+
+ if (list_empty(&uacce_mm->queues)) {
+ if (uacce_mm->handle)
+ iommu_sva_unbind_device(uacce_mm->handle);
+ list_del(&uacce_mm->list);
+ kfree(uacce_mm);
+ }
+}
+
+static int uacce_fops_open(struct inode *inode, struct file *filep)
+{
+ struct uacce_mm *uacce_mm = NULL;
+ struct uacce_device *uacce;
+ struct uacce_queue *q;
+ int ret = 0;
+
+ uacce = xa_load(&uacce_xa, iminor(inode));
+ if (!uacce)
+ return -ENODEV;
+
+ q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
+ mutex_lock(&uacce->mm_lock);
+ uacce_mm = uacce_mm_get(uacce, q, current->mm);
+ mutex_unlock(&uacce->mm_lock);
+ if (!uacce_mm) {
+ ret = -ENOMEM;
+ goto out_with_mem;
+ }
+
+ q->uacce = uacce;
+ q->uacce_mm = uacce_mm;
+
+ if (uacce->ops->get_queue) {
+ ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
+ if (ret < 0)
+ goto out_with_mm;
+ }
+
+ init_waitqueue_head(&q->wait);
+ filep->private_data = q;
+ uacce->inode = inode;
+ q->state = UACCE_Q_INIT;
+
+ return 0;
+
+out_with_mm:
+ mutex_lock(&uacce->mm_lock);
+ uacce_mm_put(q);
+ mutex_unlock(&uacce->mm_lock);
+out_with_mem:
+ kfree(q);
+ return ret;
+}
+
+static int uacce_fops_release(struct inode *inode, struct file *filep)
+{
+ struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
+
+ uacce_put_queue(q);
+
+ mutex_lock(&uacce->mm_lock);
+ uacce_mm_put(q);
+ mutex_unlock(&uacce->mm_lock);
+
+ kfree(q);
+
+ return 0;
+}
+
+static vm_fault_t uacce_vma_fault(struct vm_fault *vmf)
+{
+ if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
+ return VM_FAULT_SIGBUS;
+
+ return 0;
+}
+
+static void uacce_vma_close(struct vm_area_struct *vma)
+{
+ struct uacce_queue *q = vma->vm_private_data;
+ struct uacce_qfile_region *qfr = NULL;
+
+ if (vma->vm_pgoff < UACCE_MAX_REGION)
+ qfr = q->qfrs[vma->vm_pgoff];
+
+ kfree(qfr);
+}
+
+static const struct vm_operations_struct uacce_vm_ops = {
+ .fault = uacce_vma_fault,
+ .close = uacce_vma_close,
+};
+
+static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
+ struct uacce_qfile_region *qfr;
+ enum uacce_qfrt type = UACCE_MAX_REGION;
+ int ret = 0;
+
+ if (vma->vm_pgoff < UACCE_MAX_REGION)
+ type = vma->vm_pgoff;
+ else
+ return -EINVAL;
+
+ qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
+ if (!qfr)
+ return -ENOMEM;
+
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
+ vma->vm_ops = &uacce_vm_ops;
+ vma->vm_private_data = q;
+ qfr->type = type;
+
+ mutex_lock(&uacce_mutex);
+
+ if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ if (q->qfrs[type]) {
+ ret = -EEXIST;
+ goto out_with_lock;
+ }
+
+ switch (type) {
+ case UACCE_QFRT_MMIO:
+ if (!uacce->ops->mmap) {
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ ret = uacce->ops->mmap(q, vma, qfr);
+ if (ret)
+ goto out_with_lock;
+
+ break;
+
+ case UACCE_QFRT_DUS:
+ if (!uacce->ops->mmap) {
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ ret = uacce->ops->mmap(q, vma, qfr);
+ if (ret)
+ goto out_with_lock;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto out_with_lock;
+ }
+
+ q->qfrs[type] = qfr;
+ mutex_unlock(&uacce_mutex);
+
+ return ret;
+
+out_with_lock:
+ mutex_unlock(&uacce_mutex);
+ kfree(qfr);
+ return ret;
+}
+
+static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
+{
+ struct uacce_queue *q = file->private_data;
+ struct uacce_device *uacce = q->uacce;
+
+ poll_wait(file, &q->wait, wait);
+ if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
+ return EPOLLIN | EPOLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations uacce_fops = {
+ .owner = THIS_MODULE,
+ .open = uacce_fops_open,
+ .release = uacce_fops_release,
+ .unlocked_ioctl = uacce_fops_unl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = uacce_fops_compat_ioctl,
+#endif
+ .mmap = uacce_fops_mmap,
+ .poll = uacce_fops_poll,
+};
+
+#define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
+
+static ssize_t api_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%s\n", uacce->api_ver);
+}
+
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%u\n", uacce->flags);
+}
+
+static ssize_t available_instances_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ if (!uacce->ops->get_available_instances)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n",
+ uacce->ops->get_available_instances(uacce));
+}
+
+static ssize_t algorithms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%s\n", uacce->algs);
+}
+
+static ssize_t region_mmio_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%lu\n",
+ uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
+}
+
+static ssize_t region_dus_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sprintf(buf, "%lu\n",
+ uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
+}
+
+static DEVICE_ATTR_RO(api);
+static DEVICE_ATTR_RO(flags);
+static DEVICE_ATTR_RO(available_instances);
+static DEVICE_ATTR_RO(algorithms);
+static DEVICE_ATTR_RO(region_mmio_size);
+static DEVICE_ATTR_RO(region_dus_size);
+
+static struct attribute *uacce_dev_attrs[] = {
+ &dev_attr_api.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_available_instances.attr,
+ &dev_attr_algorithms.attr,
+ &dev_attr_region_mmio_size.attr,
+ &dev_attr_region_dus_size.attr,
+ NULL,
+};
+
+static umode_t uacce_dev_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ if (((attr == &dev_attr_region_mmio_size.attr) &&
+ (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
+ ((attr == &dev_attr_region_dus_size.attr) &&
+ (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group uacce_dev_group = {
+ .is_visible = uacce_dev_is_visible,
+ .attrs = uacce_dev_attrs,
+};
+
+__ATTRIBUTE_GROUPS(uacce_dev);
+
+static void uacce_release(struct device *dev)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ kfree(uacce);
+}
+
+/**
+ * uacce_alloc() - alloc an accelerator
+ * @parent: pointer of uacce parent device
+ * @interface: pointer of uacce_interface for register
+ *
+ * Returns uacce pointer if success and ERR_PTR if not
+ * Need check returned negotiated uacce->flags
+ */
+struct uacce_device *uacce_alloc(struct device *parent,
+ struct uacce_interface *interface)
+{
+ unsigned int flags = interface->flags;
+ struct uacce_device *uacce;
+ int ret;
+
+ uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
+ if (!uacce)
+ return ERR_PTR(-ENOMEM);
+
+ if (flags & UACCE_DEV_SVA) {
+ ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
+ if (ret)
+ flags &= ~UACCE_DEV_SVA;
+ }
+
+ uacce->parent = parent;
+ uacce->flags = flags;
+ uacce->ops = interface->ops;
+
+ ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto err_with_uacce;
+
+ INIT_LIST_HEAD(&uacce->mm_list);
+ mutex_init(&uacce->mm_lock);
+ device_initialize(&uacce->dev);
+ uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
+ uacce->dev.class = uacce_class;
+ uacce->dev.groups = uacce_dev_groups;
+ uacce->dev.parent = uacce->parent;
+ uacce->dev.release = uacce_release;
+ dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
+
+ return uacce;
+
+err_with_uacce:
+ if (flags & UACCE_DEV_SVA)
+ iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+ kfree(uacce);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(uacce_alloc);
+
+/**
+ * uacce_register() - add the accelerator to cdev and export to user space
+ * @uacce: The initialized uacce device
+ *
+ * Return 0 if register succeeded, or an error.
+ */
+int uacce_register(struct uacce_device *uacce)
+{
+ if (!uacce)
+ return -ENODEV;
+
+ uacce->cdev = cdev_alloc();
+ if (!uacce->cdev)
+ return -ENOMEM;
+
+ uacce->cdev->ops = &uacce_fops;
+ uacce->cdev->owner = THIS_MODULE;
+
+ return cdev_device_add(uacce->cdev, &uacce->dev);
+}
+EXPORT_SYMBOL_GPL(uacce_register);
+
+/**
+ * uacce_remove() - remove the accelerator
+ * @uacce: the accelerator to remove
+ */
+void uacce_remove(struct uacce_device *uacce)
+{
+ struct uacce_mm *uacce_mm;
+ struct uacce_queue *q;
+
+ if (!uacce)
+ return;
+ /*
+ * unmap remaining mapping from user space, preventing user still
+ * access the mmaped area while parent device is already removed
+ */
+ if (uacce->inode)
+ unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
+
+ /* ensure no open queue remains */
+ mutex_lock(&uacce->mm_lock);
+ list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
+ /*
+ * We don't take the uacce_mm->lock here. Since we hold the
+ * device's mm_lock, no queue can be added to or removed from
+ * this uacce_mm. We may run concurrently with mm_exit, but
+ * uacce_put_queue() is serialized and iommu_sva_unbind_device()
+ * waits for the lock that mm_exit is holding.
+ */
+ list_for_each_entry(q, &uacce_mm->queues, list)
+ uacce_put_queue(q);
+
+ if (uacce->flags & UACCE_DEV_SVA) {
+ iommu_sva_unbind_device(uacce_mm->handle);
+ uacce_mm->handle = NULL;
+ }
+ }
+ mutex_unlock(&uacce->mm_lock);
+
+ /* disable sva now since no opened queues */
+ if (uacce->flags & UACCE_DEV_SVA)
+ iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
+
+ if (uacce->cdev)
+ cdev_device_del(uacce->cdev, &uacce->dev);
+ xa_erase(&uacce_xa, uacce->dev_id);
+ put_device(&uacce->dev);
+}
+EXPORT_SYMBOL_GPL(uacce_remove);
+
+static int __init uacce_init(void)
+{
+ int ret;
+
+ uacce_class = class_create(THIS_MODULE, UACCE_NAME);
+ if (IS_ERR(uacce_class))
+ return PTR_ERR(uacce_class);
+
+ ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
+ if (ret)
+ class_destroy(uacce_class);
+
+ return ret;
+}
+
+static __exit void uacce_exit(void)
+{
+ unregister_chrdev_region(uacce_devt, MINORMASK);
+ class_destroy(uacce_class);
+}
+
+subsys_initcall(uacce_init);
+module_exit(uacce_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Accelerator interface for Userland applications");
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 058fcd7f9f01..a431787c0898 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -42,7 +42,7 @@ struct vexpress_syscfg_func {
struct vexpress_syscfg *syscfg;
struct regmap *regmap;
int num_templates;
- u32 template[0]; /* Keep it last! */
+ u32 template[]; /* Keep it last! */
};
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 00a79489067c..142c0f9485fe 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -834,7 +834,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
/* Someone else might have been playing with it. */
return -EAGAIN;
}
- /* Fall through */
+ fallthrough;
case FL_READY:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
@@ -907,7 +907,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* Fall through */
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 04b383bc3947..a1f3e1031c3d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -966,8 +966,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* fall through */
-
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -2935,7 +2934,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
- /* fall through */
+ fallthrough;
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 54edae63b92d..270322bca221 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -324,8 +324,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
case FL_JEDEC_QUERY:
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, cmd_addr);
if (map_word_andequal(map, status, status_OK, status_OK)) {
@@ -462,8 +461,7 @@ static int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
#endif
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -756,8 +754,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -998,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
- /* Fall through */
+ fallthrough;
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
@@ -1054,8 +1051,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
@@ -1201,8 +1197,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- /* Fall through */
-
+ fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index e2d4db05aeb3..99b7986002f0 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -109,13 +109,13 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi
case 8:
onecmd |= (onecmd << (chip_mode * 32));
#endif
- /* fall through */
+ fallthrough;
case 4:
onecmd |= (onecmd << (chip_mode * 16));
- /* fall through */
+ fallthrough;
case 2:
onecmd |= (onecmd << (chip_mode * 8));
- /* fall through */
+ fallthrough;
case 1:
;
}
@@ -165,13 +165,13 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map,
case 8:
res |= (onestat >> (chip_mode * 32));
#endif
- /* fall through */
+ fallthrough;
case 4:
res |= (onestat >> (chip_mode * 16));
- /* fall through */
+ fallthrough;
case 2:
res |= (onestat >> (chip_mode * 8));
- /* fall through */
+ fallthrough;
case 1:
;
}
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 36aa082f6db0..c08721b11642 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -329,10 +329,10 @@ static int ustrtoul(const char *cp, char **endp, unsigned int base)
switch (**endp) {
case 'G' :
result *= 1024;
- /* fall through */
+ fallthrough;
case 'M':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'K':
case 'k':
result *= 1024;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 931e5c2481b5..087b5e86d1bf 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -148,10 +148,10 @@ static int parse_num64(uint64_t *num64, char *token)
switch (token[len - 2]) {
case 'G':
shift += 10;
- /* fall through */
+ fallthrough;
case 'M':
shift += 10;
- /* fall through */
+ fallthrough;
case 'k':
shift += 10;
token[len - 2] = 0;
@@ -243,22 +243,25 @@ static int phram_setup(const char *val)
ret = parse_num64(&start, token[1]);
if (ret) {
- kfree(name);
parse_err("illegal start address\n");
+ goto error;
}
ret = parse_num64(&len, token[2]);
if (ret) {
- kfree(name);
parse_err("illegal device length\n");
+ goto error;
}
ret = register_device(name, start, len);
- if (!ret)
- pr_info("%s device: %#llx at %#llx\n", name, len, start);
- else
- kfree(name);
+ if (ret)
+ goto error;
+
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
+ return 0;
+error:
+ kfree(name);
return ret;
}
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index 08d543b124cd..f350a0809f88 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -11,6 +11,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -57,8 +58,10 @@ static const struct hyperbus_ops am654_hbmc_ops = {
static int am654_hbmc_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct am654_hbmc_priv *priv;
+ struct resource res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -67,6 +70,10 @@ static int am654_hbmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
if (of_property_read_bool(dev->of_node, "mux-controls")) {
struct mux_control *control = devm_mux_control_get(dev, NULL);
@@ -88,6 +95,11 @@ static int am654_hbmc_probe(struct platform_device *pdev)
goto disable_pm;
}
+ priv->hbdev.map.size = resource_size(&res);
+ priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(priv->hbdev.map.virt))
+ return PTR_ERR(priv->hbdev.map.virt);
+
priv->ctlr.dev = dev;
priv->ctlr.ops = &am654_hbmc_ops;
priv->hbdev.ctlr = &priv->ctlr;
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
index 6af9ea34117d..32685e8dd278 100644
--- a/drivers/mtd/hyperbus/hyperbus-core.c
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -10,7 +10,6 @@
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/types.h>
static struct hyperbus_device *map_to_hbdev(struct map_info *map)
@@ -62,7 +61,6 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
struct hyperbus_ctlr *ctlr;
struct device_node *np;
struct map_info *map;
- struct resource res;
struct device *dev;
int ret;
@@ -73,22 +71,15 @@ int hyperbus_register_device(struct hyperbus_device *hbdev)
np = hbdev->np;
ctlr = hbdev->ctlr;
- if (!of_device_is_compatible(np, "cypress,hyperflash"))
+ if (!of_device_is_compatible(np, "cypress,hyperflash")) {
+ dev_err(ctlr->dev, "\"cypress,hyperflash\" compatible missing\n");
return -ENODEV;
+ }
hbdev->memtype = HYPERFLASH;
- ret = of_address_to_resource(np, 0, &res);
- if (ret)
- return ret;
-
dev = ctlr->dev;
map = &hbdev->map;
- map->size = resource_size(&res);
- map->virt = devm_ioremap_resource(dev, &res);
- if (IS_ERR(map->virt))
- return PTR_ERR(map->virt);
-
map->name = dev_name(dev);
map->bankwidth = 2;
map->device_node = np;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 54b176d4319f..af16d3485de0 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -130,7 +130,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
+ " BlockMultiplierBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = 0x%x\n"
" PercentUsed = %d\n",
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 1efc643c9871..fb1cbc9a2870 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -68,7 +68,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
GFP_KERNEL);
if (!shared) {
- kfree(lpddr);
kfree(mtd);
return NULL;
}
@@ -305,8 +304,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
- /* fall through */
-
+ fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 47602af4ee34..d3d4e987c163 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -34,7 +34,7 @@ struct sa_subdev_info {
struct sa_info {
struct mtd_info *mtd;
int num_subdev;
- struct sa_subdev_info subdev[0];
+ struct sa_subdev_info subdev[];
};
static DEFINE_SPINLOCK(sa1100_vpp_lock);
@@ -81,8 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
default:
printk(KERN_WARNING "SA1100 flash: unknown base address "
"0x%08lx, assuming CS0\n", phys);
- /* Fall through */
-
+ fallthrough;
case SA1100_CS0_PHYS:
subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
break;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index c06b5322d470..078e0f67377d 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -294,12 +294,13 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
+ int ret;
mutex_lock(&mtdblk->cache_mutex);
- write_cached_data(mtdblk);
+ ret = write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
mtd_sync(dev->mtd);
- return 0;
+ return ret;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index b841008a9eb7..c5935b2f9cd1 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -349,6 +349,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops = {};
uint32_t retlen;
@@ -360,7 +361,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!mtd->_write_oob)
+ if (!master->_write_oob)
return -EOPNOTSUPP;
ops.ooblen = length;
@@ -586,6 +587,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_write_req req;
struct mtd_oob_ops ops = {};
const void __user *usr_data, *usr_oob;
@@ -597,9 +599,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_data = (const void __user *)(uintptr_t)req.usr_data;
usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
- if (!mtd->_write_oob)
+ if (!master->_write_oob)
return -EOPNOTSUPP;
-
ops.mode = req.mode;
ops.len = (size_t)req.len;
ops.ooblen = (size_t)req.ooblen;
@@ -635,6 +636,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ struct mtd_info *master = mtd_get_master(mtd);
void __user *argp = (void __user *)arg;
int ret = 0;
struct mtd_info_user info;
@@ -824,7 +826,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct nand_oobinfo oi;
- if (!mtd->ooblayout)
+ if (!master->ooblayout)
return -EOPNOTSUPP;
ret = get_oobinfo(mtd, &oi);
@@ -918,7 +920,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct nand_ecclayout_user *usrlay;
- if (!mtd->ooblayout)
+ if (!master->ooblayout)
return -EOPNOTSUPP;
usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5fac4355b9c2..2916674208b3 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -456,13 +456,14 @@ static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
struct mtd_pairing_info *info)
{
- int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+ int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
if (wunit < 0 || wunit >= npairs)
return -EINVAL;
- if (mtd->pairing && mtd->pairing->get_info)
- return mtd->pairing->get_info(mtd, wunit, info);
+ if (master->pairing && master->pairing->get_info)
+ return master->pairing->get_info(master, wunit, info);
info->group = 0;
info->pair = wunit;
@@ -498,15 +499,16 @@ EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
const struct mtd_pairing_info *info)
{
- int ngroups = mtd_pairing_groups(mtd);
- int npairs = mtd_wunit_per_eb(mtd) / ngroups;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ngroups = mtd_pairing_groups(master);
+ int npairs = mtd_wunit_per_eb(master) / ngroups;
if (!info || info->pair < 0 || info->pair >= npairs ||
info->group < 0 || info->group >= ngroups)
return -EINVAL;
- if (mtd->pairing && mtd->pairing->get_wunit)
- return mtd->pairing->get_wunit(mtd, info);
+ if (master->pairing && master->pairing->get_wunit)
+ return mtd->pairing->get_wunit(master, info);
return info->pair;
}
@@ -524,10 +526,12 @@ EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
*/
int mtd_pairing_groups(struct mtd_info *mtd)
{
- if (!mtd->pairing || !mtd->pairing->ngroups)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->pairing || !master->pairing->ngroups)
return 1;
- return mtd->pairing->ngroups;
+ return master->pairing->ngroups;
}
EXPORT_SYMBOL_GPL(mtd_pairing_groups);
@@ -587,6 +591,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
int add_mtd_device(struct mtd_info *mtd)
{
+ struct mtd_info *master = mtd_get_master(mtd);
struct mtd_notifier *not;
int i, error;
@@ -608,7 +613,7 @@ int add_mtd_device(struct mtd_info *mtd)
(mtd->_read && mtd->_read_oob)))
return -EINVAL;
- if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+ if (WARN_ON((!mtd->erasesize || !master->_erase) &&
!(mtd->flags & MTD_NO_ERASE)))
return -EINVAL;
@@ -765,7 +770,8 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd)
pr_debug("mtd device won't show a device symlink in sysfs\n");
}
- mtd->orig_flags = mtd->flags;
+ INIT_LIST_HEAD(&mtd->partitions);
+ mutex_init(&mtd->master.partitions_lock);
}
/**
@@ -971,20 +977,26 @@ EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int err;
- if (!try_module_get(mtd->owner))
+ if (!try_module_get(master->owner))
return -ENODEV;
- if (mtd->_get_device) {
- err = mtd->_get_device(mtd);
+ if (master->_get_device) {
+ err = master->_get_device(mtd);
if (err) {
- module_put(mtd->owner);
+ module_put(master->owner);
return err;
}
}
- mtd->usecount++;
+
+ while (mtd->parent) {
+ mtd->usecount++;
+ mtd = mtd->parent;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -1038,13 +1050,18 @@ EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
- --mtd->usecount;
- BUG_ON(mtd->usecount < 0);
+ struct mtd_info *master = mtd_get_master(mtd);
- if (mtd->_put_device)
- mtd->_put_device(mtd);
+ while (mtd->parent) {
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+ mtd = mtd->parent;
+ }
+
+ if (master->_put_device)
+ master->_put_device(master);
- module_put(mtd->owner);
+ module_put(master->owner);
}
EXPORT_SYMBOL_GPL(__put_mtd_device);
@@ -1055,9 +1072,13 @@ EXPORT_SYMBOL_GPL(__put_mtd_device);
*/
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+ u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
+ int ret;
+
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
- if (!mtd->erasesize || !mtd->_erase)
+ if (!mtd->erasesize || !master->_erase)
return -ENOTSUPP;
if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
@@ -1069,7 +1090,14 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
return 0;
ledtrig_mtd_activity();
- return mtd->_erase(mtd, instr);
+
+ instr->addr += mst_ofs;
+ ret = master->_erase(master, instr);
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= mst_ofs;
+
+ instr->addr -= mst_ofs;
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase);
@@ -1079,30 +1107,36 @@ EXPORT_SYMBOL_GPL(mtd_erase);
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
*virt = NULL;
if (phys)
*phys = 0;
- if (!mtd->_point)
+ if (!master->_point)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- return mtd->_point(mtd, from, len, retlen, virt, phys);
+
+ from = mtd_get_master_ofs(mtd, from);
+ return master->_point(master, from, len, retlen, virt, phys);
}
EXPORT_SYMBOL_GPL(mtd_point);
/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
- if (!mtd->_unpoint)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_unpoint)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- return mtd->_unpoint(mtd, from, len);
+ return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
}
EXPORT_SYMBOL_GPL(mtd_unpoint);
@@ -1129,6 +1163,25 @@ unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
+ const struct mtd_ecc_stats *old_stats)
+{
+ struct mtd_ecc_stats diff;
+
+ if (master == mtd)
+ return;
+
+ diff = master->ecc_stats;
+ diff.failed -= old_stats->failed;
+ diff.corrected -= old_stats->corrected;
+
+ while (mtd->parent) {
+ mtd->ecc_stats.failed += diff.failed;
+ mtd->ecc_stats.corrected += diff.corrected;
+ mtd = mtd->parent;
+ }
+}
+
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf)
{
@@ -1171,8 +1224,10 @@ EXPORT_SYMBOL_GPL(mtd_write);
int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_panic_write)
+ if (!master->_panic_write)
return -EOPNOTSUPP;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
@@ -1183,7 +1238,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
if (!mtd->oops_panic_write)
mtd->oops_panic_write = true;
- return mtd->_panic_write(mtd, to, len, retlen, buf);
+ return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
+ retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
@@ -1222,7 +1278,10 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_ecc_stats old_stats = master->ecc_stats;
int ret_code;
+
ops->retlen = ops->oobretlen = 0;
ret_code = mtd_check_oob_ops(mtd, from, ops);
@@ -1232,14 +1291,17 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
ledtrig_mtd_activity();
/* Check the validity of a potential fallback on mtd->_read */
- if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
+ if (!master->_read_oob && (!master->_read || ops->oobbuf))
return -EOPNOTSUPP;
- if (mtd->_read_oob)
- ret_code = mtd->_read_oob(mtd, from, ops);
+ from = mtd_get_master_ofs(mtd, from);
+ if (master->_read_oob)
+ ret_code = master->_read_oob(master, from, ops);
else
- ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
- ops->datbuf);
+ ret_code = master->_read(master, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ mtd_update_ecc_stats(mtd, master, &old_stats);
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1258,6 +1320,7 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int ret;
ops->retlen = ops->oobretlen = 0;
@@ -1272,14 +1335,16 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
ledtrig_mtd_activity();
/* Check the validity of a potential fallback on mtd->_write */
- if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
+ if (!master->_write_oob && (!master->_write || ops->oobbuf))
return -EOPNOTSUPP;
- if (mtd->_write_oob)
- return mtd->_write_oob(mtd, to, ops);
+ to = mtd_get_master_ofs(mtd, to);
+
+ if (master->_write_oob)
+ return master->_write_oob(master, to, ops);
else
- return mtd->_write(mtd, to, ops->len, &ops->retlen,
- ops->datbuf);
+ return master->_write(master, to, ops->len, &ops->retlen,
+ ops->datbuf);
}
EXPORT_SYMBOL_GPL(mtd_write_oob);
@@ -1302,15 +1367,17 @@ EXPORT_SYMBOL_GPL(mtd_write_oob);
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
memset(oobecc, 0, sizeof(*oobecc));
- if (!mtd || section < 0)
+ if (!master || section < 0)
return -EINVAL;
- if (!mtd->ooblayout || !mtd->ooblayout->ecc)
+ if (!master->ooblayout || !master->ooblayout->ecc)
return -ENOTSUPP;
- return mtd->ooblayout->ecc(mtd, section, oobecc);
+ return master->ooblayout->ecc(master, section, oobecc);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
@@ -1334,15 +1401,17 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
int mtd_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobfree)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
memset(oobfree, 0, sizeof(*oobfree));
- if (!mtd || section < 0)
+ if (!master || section < 0)
return -EINVAL;
- if (!mtd->ooblayout || !mtd->ooblayout->free)
+ if (!master->ooblayout || !master->ooblayout->free)
return -ENOTSUPP;
- return mtd->ooblayout->free(mtd, section, oobfree);
+ return master->ooblayout->free(master, section, oobfree);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
@@ -1651,60 +1720,69 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
- if (!mtd->_get_fact_prot_info)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_get_fact_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
+ return master->_get_fact_prot_info(master, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_read_fact_prot_reg)
+ if (!master->_read_fact_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+ return master->_read_fact_prot_reg(master, from, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
- if (!mtd->_get_user_prot_info)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_get_user_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_get_user_prot_info(mtd, len, retlen, buf);
+ return master->_get_user_prot_info(master, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
- if (!mtd->_read_user_prot_reg)
+ if (!master->_read_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+ return master->_read_user_prot_reg(master, from, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, u_char *buf)
{
+ struct mtd_info *master = mtd_get_master(mtd);
int ret;
*retlen = 0;
- if (!mtd->_write_user_prot_reg)
+ if (!master->_write_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+ ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
if (ret)
return ret;
@@ -1718,80 +1796,105 @@ EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
{
- if (!mtd->_lock_user_prot_reg)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_lock_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
- return mtd->_lock_user_prot_reg(mtd, from, len);
+ return master->_lock_user_prot_reg(master, from, len);
}
EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
/* Chip-supported device locking */
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_lock)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_lock)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_lock(mtd, ofs, len);
+ return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_lock);
int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_unlock)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_unlock)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_unlock(mtd, ofs, len);
+ return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_unlock);
int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
- if (!mtd->_is_locked)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_is_locked)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
- return mtd->_is_locked(mtd, ofs, len);
+ return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_is_locked);
int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
- if (!mtd->_block_isreserved)
+ if (!master->_block_isreserved)
return 0;
- return mtd->_block_isreserved(mtd, ofs);
+ return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isreserved);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
- if (!mtd->_block_isbad)
+ if (!master->_block_isbad)
return 0;
- return mtd->_block_isbad(mtd, ofs);
+ return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isbad);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- if (!mtd->_block_markbad)
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (!master->_block_markbad)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return mtd->_block_markbad(mtd, ofs);
+
+ ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
+ if (ret)
+ return ret;
+
+ while (mtd->parent) {
+ mtd->ecc_stats.badblocks++;
+ mtd = mtd->parent;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mtd_block_markbad);
@@ -1841,12 +1944,17 @@ static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
+ struct mtd_info *master = mtd_get_master(mtd);
+
*retlen = 0;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- if (!mtd->_writev)
+
+ if (!master->_writev)
return default_mtd_writev(mtd, vecs, count, to, retlen);
- return mtd->_writev(mtd, vecs, count, to, retlen);
+
+ return master->_writev(master, vecs, count,
+ mtd_get_master_ofs(mtd, to), retlen);
}
EXPORT_SYMBOL_GPL(mtd_writev);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 7328c066c5ba..3f6025684f58 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -20,339 +20,52 @@
#include "mtdcore.h"
-/* Our partition linked list */
-static LIST_HEAD(mtd_partitions);
-static DEFINE_MUTEX(mtd_partitions_mutex);
-
-/**
- * struct mtd_part - our partition node structure
- *
- * @mtd: struct holding partition details
- * @parent: parent mtd - flash device or another partition
- * @offset: partition offset relative to the *flash device*
- */
-struct mtd_part {
- struct mtd_info mtd;
- struct mtd_info *parent;
- uint64_t offset;
- struct list_head list;
-};
-
-/*
- * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
- * the pointer to that structure.
- */
-static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
-{
- return container_of(mtd, struct mtd_part, mtd);
-}
-
-static u64 part_absolute_offset(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- if (!mtd_is_partition(mtd))
- return 0;
-
- return part_absolute_offset(part->parent) + part->offset;
-}
-
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
*/
-static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- struct mtd_ecc_stats stats;
- int res;
-
- stats = part->parent->ecc_stats;
- res = part->parent->_read(part->parent, from + part->offset, len,
- retlen, buf);
- if (unlikely(mtd_is_eccerr(res)))
- mtd->ecc_stats.failed +=
- part->parent->ecc_stats.failed - stats.failed;
- else
- mtd->ecc_stats.corrected +=
- part->parent->ecc_stats.corrected - stats.corrected;
- return res;
-}
-
-static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_point(part->parent, from + part->offset, len,
- retlen, virt, phys);
-}
-
-static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_unpoint(part->parent, from + part->offset, len);
-}
-
-static int part_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- struct mtd_ecc_stats stats;
- int res;
-
- stats = part->parent->ecc_stats;
- res = part->parent->_read_oob(part->parent, from + part->offset, ops);
- if (unlikely(mtd_is_eccerr(res)))
- mtd->ecc_stats.failed +=
- part->parent->ecc_stats.failed - stats.failed;
- else
- mtd->ecc_stats.corrected +=
- part->parent->ecc_stats.corrected - stats.corrected;
- return res;
-}
-
-static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_read_user_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
- size_t *retlen, struct otp_info *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_user_prot_info(part->parent, len, retlen,
- buf);
-}
-
-static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_read_fact_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
- size_t *retlen, struct otp_info *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_fact_prot_info(part->parent, len, retlen,
- buf);
-}
-
-static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_write(part->parent, to + part->offset, len,
- retlen, buf);
-}
-
-static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_panic_write(part->parent, to + part->offset, len,
- retlen, buf);
-}
-
-static int part_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_write_oob(part->parent, to + part->offset, ops);
-}
-
-static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_write_user_prot_reg(part->parent, from, len,
- retlen, buf);
-}
-
-static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
- size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_lock_user_prot_reg(part->parent, from, len);
-}
-
-static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_writev(part->parent, vecs, count,
- to + part->offset, retlen);
-}
-
-static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- int ret;
-
- instr->addr += part->offset;
- ret = part->parent->_erase(part->parent, instr);
- if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
- instr->fail_addr -= part->offset;
- instr->addr -= part->offset;
-
- return ret;
-}
-
-static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_lock(part->parent, ofs + part->offset, len);
-}
-
-static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_unlock(part->parent, ofs + part->offset, len);
-}
-
-static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_is_locked(part->parent, ofs + part->offset, len);
-}
-
-static void part_sync(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_sync(part->parent);
-}
-
-static int part_suspend(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_suspend(part->parent);
-}
-
-static void part_resume(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_resume(part->parent);
-}
-
-static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- ofs += part->offset;
- return part->parent->_block_isreserved(part->parent, ofs);
-}
-
-static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- ofs += part->offset;
- return part->parent->_block_isbad(part->parent, ofs);
-}
-
-static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- int res;
-
- ofs += part->offset;
- res = part->parent->_block_markbad(part->parent, ofs);
- if (!res)
- mtd->ecc_stats.badblocks++;
- return res;
-}
-
-static int part_get_device(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- return part->parent->_get_device(part->parent);
-}
-
-static void part_put_device(struct mtd_info *mtd)
-{
- struct mtd_part *part = mtd_to_part(mtd);
- part->parent->_put_device(part->parent);
-}
-
-static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return mtd_ooblayout_ecc(part->parent, section, oobregion);
-}
-
-static int part_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return mtd_ooblayout_free(part->parent, section, oobregion);
-}
-
-static const struct mtd_ooblayout_ops part_ooblayout_ops = {
- .ecc = part_ooblayout_ecc,
- .free = part_ooblayout_free,
-};
-
-static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- return part->parent->_max_bad_blocks(part->parent,
- ofs + part->offset, len);
-}
-
-static inline void free_partition(struct mtd_part *p)
+static inline void free_partition(struct mtd_info *mtd)
{
- kfree(p->mtd.name);
- kfree(p);
+ kfree(mtd->name);
+ kfree(mtd);
}
-static struct mtd_part *allocate_partition(struct mtd_info *parent,
- const struct mtd_partition *part, int partno,
- uint64_t cur_offset)
+static struct mtd_info *allocate_partition(struct mtd_info *parent,
+ const struct mtd_partition *part,
+ int partno, uint64_t cur_offset)
{
int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
parent->erasesize;
- struct mtd_part *slave;
+ struct mtd_info *child, *master = mtd_get_master(parent);
u32 remainder;
char *name;
u64 tmp;
/* allocate the partition structure */
- slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ child = kzalloc(sizeof(*child), GFP_KERNEL);
name = kstrdup(part->name, GFP_KERNEL);
- if (!name || !slave) {
+ if (!name || !child) {
printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
parent->name);
kfree(name);
- kfree(slave);
+ kfree(child);
return ERR_PTR(-ENOMEM);
}
/* set up the MTD object for this partition */
- slave->mtd.type = parent->type;
- slave->mtd.flags = parent->orig_flags & ~part->mask_flags;
- slave->mtd.orig_flags = slave->mtd.flags;
- slave->mtd.size = part->size;
- slave->mtd.writesize = parent->writesize;
- slave->mtd.writebufsize = parent->writebufsize;
- slave->mtd.oobsize = parent->oobsize;
- slave->mtd.oobavail = parent->oobavail;
- slave->mtd.subpage_sft = parent->subpage_sft;
- slave->mtd.pairing = parent->pairing;
-
- slave->mtd.name = name;
- slave->mtd.owner = parent->owner;
+ child->type = parent->type;
+ child->part.flags = parent->flags & ~part->mask_flags;
+ child->flags = child->part.flags;
+ child->size = part->size;
+ child->writesize = parent->writesize;
+ child->writebufsize = parent->writebufsize;
+ child->oobsize = parent->oobsize;
+ child->oobavail = parent->oobavail;
+ child->subpage_sft = parent->subpage_sft;
+
+ child->name = name;
+ child->owner = parent->owner;
/* NOTE: Historically, we didn't arrange MTDs as a tree out of
* concern for showing the same data in multiple partitions.
@@ -360,134 +73,76 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
* so the MTD_PARTITIONED_MASTER option allows that. The master
* will have device nodes etc only if this is set, so make the
* parent conditional on that option. Note, this is a way to
- * distinguish between the master and the partition in sysfs.
+ * distinguish between the parent and its partitions in sysfs.
*/
- slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
- &parent->dev :
- parent->dev.parent;
- slave->mtd.dev.of_node = part->of_node;
-
- if (parent->_read)
- slave->mtd._read = part_read;
- if (parent->_write)
- slave->mtd._write = part_write;
-
- if (parent->_panic_write)
- slave->mtd._panic_write = part_panic_write;
-
- if (parent->_point && parent->_unpoint) {
- slave->mtd._point = part_point;
- slave->mtd._unpoint = part_unpoint;
- }
-
- if (parent->_read_oob)
- slave->mtd._read_oob = part_read_oob;
- if (parent->_write_oob)
- slave->mtd._write_oob = part_write_oob;
- if (parent->_read_user_prot_reg)
- slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
- if (parent->_read_fact_prot_reg)
- slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
- if (parent->_write_user_prot_reg)
- slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
- if (parent->_lock_user_prot_reg)
- slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
- if (parent->_get_user_prot_info)
- slave->mtd._get_user_prot_info = part_get_user_prot_info;
- if (parent->_get_fact_prot_info)
- slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
- if (parent->_sync)
- slave->mtd._sync = part_sync;
- if (!partno && !parent->dev.class && parent->_suspend &&
- parent->_resume) {
- slave->mtd._suspend = part_suspend;
- slave->mtd._resume = part_resume;
- }
- if (parent->_writev)
- slave->mtd._writev = part_writev;
- if (parent->_lock)
- slave->mtd._lock = part_lock;
- if (parent->_unlock)
- slave->mtd._unlock = part_unlock;
- if (parent->_is_locked)
- slave->mtd._is_locked = part_is_locked;
- if (parent->_block_isreserved)
- slave->mtd._block_isreserved = part_block_isreserved;
- if (parent->_block_isbad)
- slave->mtd._block_isbad = part_block_isbad;
- if (parent->_block_markbad)
- slave->mtd._block_markbad = part_block_markbad;
- if (parent->_max_bad_blocks)
- slave->mtd._max_bad_blocks = part_max_bad_blocks;
-
- if (parent->_get_device)
- slave->mtd._get_device = part_get_device;
- if (parent->_put_device)
- slave->mtd._put_device = part_put_device;
-
- slave->mtd._erase = part_erase;
- slave->parent = parent;
- slave->offset = part->offset;
-
- if (slave->offset == MTDPART_OFS_APPEND)
- slave->offset = cur_offset;
- if (slave->offset == MTDPART_OFS_NXTBLK) {
+ child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
+ &parent->dev : parent->dev.parent;
+ child->dev.of_node = part->of_node;
+ child->parent = parent;
+ child->part.offset = part->offset;
+ INIT_LIST_HEAD(&child->partitions);
+
+ if (child->part.offset == MTDPART_OFS_APPEND)
+ child->part.offset = cur_offset;
+ if (child->part.offset == MTDPART_OFS_NXTBLK) {
tmp = cur_offset;
- slave->offset = cur_offset;
+ child->part.offset = cur_offset;
remainder = do_div(tmp, wr_alignment);
if (remainder) {
- slave->offset += wr_alignment - remainder;
+ child->part.offset += wr_alignment - remainder;
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
- (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+ (unsigned long long)cur_offset,
+ child->part.offset);
}
}
- if (slave->offset == MTDPART_OFS_RETAIN) {
- slave->offset = cur_offset;
- if (parent->size - slave->offset >= slave->mtd.size) {
- slave->mtd.size = parent->size - slave->offset
- - slave->mtd.size;
+ if (child->part.offset == MTDPART_OFS_RETAIN) {
+ child->part.offset = cur_offset;
+ if (parent->size - child->part.offset >= child->size) {
+ child->size = parent->size - child->part.offset -
+ child->size;
} else {
printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
- part->name, parent->size - slave->offset,
- slave->mtd.size);
+ part->name, parent->size - child->part.offset,
+ child->size);
/* register to preserve ordering */
goto out_register;
}
}
- if (slave->mtd.size == MTDPART_SIZ_FULL)
- slave->mtd.size = parent->size - slave->offset;
+ if (child->size == MTDPART_SIZ_FULL)
+ child->size = parent->size - child->part.offset;
- printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
- (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
+ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
+ child->part.offset, child->part.offset + child->size,
+ child->name);
/* let's do some sanity checks */
- if (slave->offset >= parent->size) {
+ if (child->part.offset >= parent->size) {
/* let's register it anyway to preserve ordering */
- slave->offset = 0;
- slave->mtd.size = 0;
+ child->part.offset = 0;
+ child->size = 0;
/* Initialize ->erasesize to make add_mtd_device() happy. */
- slave->mtd.erasesize = parent->erasesize;
-
+ child->erasesize = parent->erasesize;
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
}
- if (slave->offset + slave->mtd.size > parent->size) {
- slave->mtd.size = parent->size - slave->offset;
+ if (child->part.offset + child->size > parent->size) {
+ child->size = parent->size - child->part.offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
- part->name, parent->name, (unsigned long long)slave->mtd.size);
+ part->name, parent->name, child->size);
}
if (parent->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = parent->numeraseregions;
- u64 end = slave->offset + slave->mtd.size;
+ u64 end = child->part.offset + child->size;
struct mtd_erase_region_info *regions = parent->eraseregions;
/* Find the first erase regions which is part of this
* partition. */
- for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+ for (i = 0; i < max && regions[i].offset <= child->part.offset;
+ i++)
;
/* The loop searched for the region _behind_ the first one */
if (i > 0)
@@ -495,70 +150,68 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
/* Pick biggest erasesize */
for (; i < max && regions[i].offset < end; i++) {
- if (slave->mtd.erasesize < regions[i].erasesize) {
- slave->mtd.erasesize = regions[i].erasesize;
- }
+ if (child->erasesize < regions[i].erasesize)
+ child->erasesize = regions[i].erasesize;
}
- BUG_ON(slave->mtd.erasesize == 0);
+ BUG_ON(child->erasesize == 0);
} else {
/* Single erase size */
- slave->mtd.erasesize = parent->erasesize;
+ child->erasesize = parent->erasesize;
}
/*
- * Slave erasesize might differ from the master one if the master
+ * Child erasesize might differ from the parent one if the parent
* exposes several regions with different erasesize. Adjust
* wr_alignment accordingly.
*/
- if (!(slave->mtd.flags & MTD_NO_ERASE))
- wr_alignment = slave->mtd.erasesize;
+ if (!(child->flags & MTD_NO_ERASE))
+ wr_alignment = child->erasesize;
- tmp = part_absolute_offset(parent) + slave->offset;
+ tmp = mtd_get_master_ofs(child, 0);
remainder = do_div(tmp, wr_alignment);
- if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+ if ((child->flags & MTD_WRITEABLE) && remainder) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
+ child->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
part->name);
}
- tmp = part_absolute_offset(parent) + slave->mtd.size;
+ tmp = mtd_get_master_ofs(child, 0) + child->size;
remainder = do_div(tmp, wr_alignment);
- if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
+ if ((child->flags & MTD_WRITEABLE) && remainder) {
+ child->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
part->name);
}
- mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
- slave->mtd.ecc_step_size = parent->ecc_step_size;
- slave->mtd.ecc_strength = parent->ecc_strength;
- slave->mtd.bitflip_threshold = parent->bitflip_threshold;
+ child->ecc_step_size = parent->ecc_step_size;
+ child->ecc_strength = parent->ecc_strength;
+ child->bitflip_threshold = parent->bitflip_threshold;
- if (parent->_block_isbad) {
+ if (master->_block_isbad) {
uint64_t offs = 0;
- while (offs < slave->mtd.size) {
- if (mtd_block_isreserved(parent, offs + slave->offset))
- slave->mtd.ecc_stats.bbtblocks++;
- else if (mtd_block_isbad(parent, offs + slave->offset))
- slave->mtd.ecc_stats.badblocks++;
- offs += slave->mtd.erasesize;
+ while (offs < child->size) {
+ if (mtd_block_isreserved(child, offs))
+ child->ecc_stats.bbtblocks++;
+ else if (mtd_block_isbad(child, offs))
+ child->ecc_stats.badblocks++;
+ offs += child->erasesize;
}
}
out_register:
- return slave;
+ return child;
}
static ssize_t mtd_partition_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
- struct mtd_part *part = mtd_to_part(mtd);
- return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset);
+
+ return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset);
}
static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
@@ -568,9 +221,9 @@ static const struct attribute *mtd_partition_attrs[] = {
NULL
};
-static int mtd_add_partition_attrs(struct mtd_part *new)
+static int mtd_add_partition_attrs(struct mtd_info *new)
{
- int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
+ int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs);
if (ret)
printk(KERN_WARNING
"mtd: failed to create partition attrs, err=%d\n", ret);
@@ -580,8 +233,9 @@ static int mtd_add_partition_attrs(struct mtd_part *new)
int mtd_add_partition(struct mtd_info *parent, const char *name,
long long offset, long long length)
{
+ struct mtd_info *master = mtd_get_master(parent);
struct mtd_partition part;
- struct mtd_part *new;
+ struct mtd_info *child;
int ret = 0;
/* the direct offset is expected */
@@ -600,28 +254,28 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
part.size = length;
part.offset = offset;
- new = allocate_partition(parent, &part, -1, offset);
- if (IS_ERR(new))
- return PTR_ERR(new);
+ child = allocate_partition(parent, &part, -1, offset);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- mutex_lock(&mtd_partitions_mutex);
- list_add(&new->list, &mtd_partitions);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_add_tail(&child->part.node, &parent->partitions);
+ mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(&new->mtd);
+ ret = add_mtd_device(child);
if (ret)
goto err_remove_part;
- mtd_add_partition_attrs(new);
+ mtd_add_partition_attrs(child);
return 0;
err_remove_part:
- mutex_lock(&mtd_partitions_mutex);
- list_del(&new->list);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_del(&child->part.node);
+ mutex_unlock(&master->master.partitions_lock);
- free_partition(new);
+ free_partition(child);
return ret;
}
@@ -630,119 +284,142 @@ EXPORT_SYMBOL_GPL(mtd_add_partition);
/**
* __mtd_del_partition - delete MTD partition
*
- * @priv: internal MTD struct for partition to be deleted
+ * @priv: MTD structure to be deleted
*
* This function must be called with the partitions mutex locked.
*/
-static int __mtd_del_partition(struct mtd_part *priv)
+static int __mtd_del_partition(struct mtd_info *mtd)
{
- struct mtd_part *child, *next;
+ struct mtd_info *child, *next;
int err;
- list_for_each_entry_safe(child, next, &mtd_partitions, list) {
- if (child->parent == &priv->mtd) {
- err = __mtd_del_partition(child);
- if (err)
- return err;
- }
+ list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ err = __mtd_del_partition(child);
+ if (err)
+ return err;
}
- sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
+ sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
- err = del_mtd_device(&priv->mtd);
+ err = del_mtd_device(mtd);
if (err)
return err;
- list_del(&priv->list);
- free_partition(priv);
+ list_del(&child->part.node);
+ free_partition(mtd);
return 0;
}
/*
* This function unregisters and destroy all slave MTD objects which are
- * attached to the given MTD object.
+ * attached to the given MTD object, recursively.
*/
-int del_mtd_partitions(struct mtd_info *mtd)
+static int __del_mtd_partitions(struct mtd_info *mtd)
{
- struct mtd_part *slave, *next;
+ struct mtd_info *child, *next;
+ LIST_HEAD(tmp_list);
int ret, err = 0;
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry_safe(slave, next, &mtd_partitions, list)
- if (slave->parent == mtd) {
- ret = __mtd_del_partition(slave);
- if (ret < 0)
- err = ret;
+ list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ if (mtd_has_partitions(child))
+ del_mtd_partitions(child);
+
+ pr_info("Deleting %s MTD partition\n", child->name);
+ ret = del_mtd_device(child);
+ if (ret < 0) {
+ pr_err("Error when deleting partition \"%s\" (%d)\n",
+ child->name, ret);
+ err = ret;
+ continue;
}
- mutex_unlock(&mtd_partitions_mutex);
+
+ list_del(&child->part.node);
+ free_partition(child);
+ }
return err;
}
+int del_mtd_partitions(struct mtd_info *mtd)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name);
+
+ mutex_lock(&master->master.partitions_lock);
+ ret = __del_mtd_partitions(mtd);
+ mutex_unlock(&master->master.partitions_lock);
+
+ return ret;
+}
+
int mtd_del_partition(struct mtd_info *mtd, int partno)
{
- struct mtd_part *slave, *next;
+ struct mtd_info *child, *master = mtd_get_master(mtd);
int ret = -EINVAL;
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry_safe(slave, next, &mtd_partitions, list)
- if ((slave->parent == mtd) &&
- (slave->mtd.index == partno)) {
- ret = __mtd_del_partition(slave);
+ mutex_lock(&master->master.partitions_lock);
+ list_for_each_entry(child, &mtd->partitions, part.node) {
+ if (child->index == partno) {
+ ret = __mtd_del_partition(child);
break;
}
- mutex_unlock(&mtd_partitions_mutex);
+ }
+ mutex_unlock(&master->master.partitions_lock);
return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
/*
- * This function, given a master MTD object and a partition table, creates
- * and registers slave MTD objects which are bound to the master according to
- * the partition definitions.
+ * This function, given a parent MTD object and a partition table, creates
+ * and registers the child MTD objects which are bound to the parent according
+ * to the partition definitions.
*
- * For historical reasons, this function's caller only registers the master
+ * For historical reasons, this function's caller only registers the parent
* if the MTD_PARTITIONED_MASTER config option is set.
*/
-int add_mtd_partitions(struct mtd_info *master,
+int add_mtd_partitions(struct mtd_info *parent,
const struct mtd_partition *parts,
int nbparts)
{
- struct mtd_part *slave;
+ struct mtd_info *child, *master = mtd_get_master(parent);
uint64_t cur_offset = 0;
int i, ret;
- printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n",
+ nbparts, parent->name);
for (i = 0; i < nbparts; i++) {
- slave = allocate_partition(master, parts + i, i, cur_offset);
- if (IS_ERR(slave)) {
- ret = PTR_ERR(slave);
+ child = allocate_partition(parent, parts + i, i, cur_offset);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
goto err_del_partitions;
}
- mutex_lock(&mtd_partitions_mutex);
- list_add(&slave->list, &mtd_partitions);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_add_tail(&child->part.node, &parent->partitions);
+ mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(&slave->mtd);
+ ret = add_mtd_device(child);
if (ret) {
- mutex_lock(&mtd_partitions_mutex);
- list_del(&slave->list);
- mutex_unlock(&mtd_partitions_mutex);
+ mutex_lock(&master->master.partitions_lock);
+ list_del(&child->part.node);
+ mutex_unlock(&master->master.partitions_lock);
- free_partition(slave);
+ free_partition(child);
goto err_del_partitions;
}
- mtd_add_partition_attrs(slave);
+ mtd_add_partition_attrs(child);
+
/* Look for subpartitions */
- parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
+ parse_mtd_partitions(child, parts[i].types, NULL);
- cur_offset = slave->offset + slave->mtd.size;
+ cur_offset = child->part.offset + child->size;
}
return 0;
@@ -1023,29 +700,11 @@ void mtd_part_parser_cleanup(struct mtd_partitions *parts)
}
}
-int mtd_is_partition(const struct mtd_info *mtd)
-{
- struct mtd_part *part;
- int ispart = 0;
-
- mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry(part, &mtd_partitions, list)
- if (&part->mtd == mtd) {
- ispart = 1;
- break;
- }
- mutex_unlock(&mtd_partitions_mutex);
-
- return ispart;
-}
-EXPORT_SYMBOL_GPL(mtd_is_partition);
-
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
- if (!mtd_is_partition(mtd))
- return mtd->size;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
- return mtd_get_device_size(mtd_to_part(mtd)->parent);
+ return master->size;
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index d5326d19b136..ec18ade33262 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -3259,7 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd)
switch (density) {
case ONENAND_DEVICE_DENSITY_8Gb:
this->options |= ONENAND_HAS_NOP_1;
- /* fall through */
+ fallthrough;
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 8312182088c1..d66dab25df20 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -19,15 +19,17 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-gpio.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
/*
* MTD structure for E3 (Delta)
*/
-struct ams_delta_nand {
+struct gpio_nand {
struct nand_controller base;
struct nand_chip nand_chip;
struct gpio_desc *gpiod_rdy;
@@ -39,41 +41,20 @@ struct ams_delta_nand {
struct gpio_desc *gpiod_cle;
struct gpio_descs *data_gpiods;
bool data_in;
+ unsigned int tRP;
+ unsigned int tWP;
+ u8 (*io_read)(struct gpio_nand *this);
+ void (*io_write)(struct gpio_nand *this, u8 byte);
};
-/*
- * Define partitions for flash devices
- */
-
-static const struct mtd_partition partition_info[] = {
- { .name = "Kernel",
- .offset = 0,
- .size = 3 * SZ_1M + SZ_512K },
- { .name = "u-boot",
- .offset = 3 * SZ_1M + SZ_512K,
- .size = SZ_256K },
- { .name = "u-boot params",
- .offset = 3 * SZ_1M + SZ_512K + SZ_256K,
- .size = SZ_256K },
- { .name = "Amstrad LDR",
- .offset = 4 * SZ_1M,
- .size = SZ_256K },
- { .name = "File system",
- .offset = 4 * SZ_1M + 1 * SZ_256K,
- .size = 27 * SZ_1M },
- { .name = "PBL reserved",
- .offset = 32 * SZ_1M - 3 * SZ_256K,
- .size = 3 * SZ_256K },
-};
-
-static void ams_delta_write_commit(struct ams_delta_nand *priv)
+static void gpio_nand_write_commit(struct gpio_nand *priv)
{
- gpiod_set_value(priv->gpiod_nwe, 0);
- ndelay(40);
gpiod_set_value(priv->gpiod_nwe, 1);
+ ndelay(priv->tWP);
+ gpiod_set_value(priv->gpiod_nwe, 0);
}
-static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
+static void gpio_nand_io_write(struct gpio_nand *priv, u8 byte)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
@@ -81,10 +62,10 @@ static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
data_gpiods->info, values);
- ams_delta_write_commit(priv);
+ gpio_nand_write_commit(priv);
}
-static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
+static void gpio_nand_dir_output(struct gpio_nand *priv, u8 byte)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
@@ -94,30 +75,30 @@ static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
gpiod_direction_output_raw(data_gpiods->desc[i],
test_bit(i, values));
- ams_delta_write_commit(priv);
+ gpio_nand_write_commit(priv);
priv->data_in = false;
}
-static u8 ams_delta_io_read(struct ams_delta_nand *priv)
+static u8 gpio_nand_io_read(struct gpio_nand *priv)
{
u8 res;
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
- gpiod_set_value(priv->gpiod_nre, 0);
- ndelay(40);
+ gpiod_set_value(priv->gpiod_nre, 1);
+ ndelay(priv->tRP);
gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
data_gpiods->info, values);
- gpiod_set_value(priv->gpiod_nre, 1);
+ gpiod_set_value(priv->gpiod_nre, 0);
res = values[0];
return res;
}
-static void ams_delta_dir_input(struct ams_delta_nand *priv)
+static void gpio_nand_dir_input(struct gpio_nand *priv)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
int i;
@@ -128,68 +109,67 @@ static void ams_delta_dir_input(struct ams_delta_nand *priv)
priv->data_in = true;
}
-static void ams_delta_write_buf(struct ams_delta_nand *priv, const u8 *buf,
- int len)
+static void gpio_nand_write_buf(struct gpio_nand *priv, const u8 *buf, int len)
{
int i = 0;
if (len > 0 && priv->data_in)
- ams_delta_dir_output(priv, buf[i++]);
+ gpio_nand_dir_output(priv, buf[i++]);
while (i < len)
- ams_delta_io_write(priv, buf[i++]);
+ priv->io_write(priv, buf[i++]);
}
-static void ams_delta_read_buf(struct ams_delta_nand *priv, u8 *buf, int len)
+static void gpio_nand_read_buf(struct gpio_nand *priv, u8 *buf, int len)
{
int i;
- if (!priv->data_in)
- ams_delta_dir_input(priv);
+ if (priv->data_gpiods && !priv->data_in)
+ gpio_nand_dir_input(priv);
for (i = 0; i < len; i++)
- buf[i] = ams_delta_io_read(priv);
+ buf[i] = priv->io_read(priv);
}
-static void ams_delta_ctrl_cs(struct ams_delta_nand *priv, bool assert)
+static void gpio_nand_ctrl_cs(struct gpio_nand *priv, bool assert)
{
- gpiod_set_value(priv->gpiod_nce, assert ? 0 : 1);
+ gpiod_set_value(priv->gpiod_nce, assert);
}
-static int ams_delta_exec_op(struct nand_chip *this,
+static int gpio_nand_exec_op(struct nand_chip *this,
const struct nand_operation *op, bool check_only)
{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
+ struct gpio_nand *priv = nand_get_controller_data(this);
const struct nand_op_instr *instr;
int ret = 0;
if (check_only)
return 0;
- ams_delta_ctrl_cs(priv, 1);
+ gpio_nand_ctrl_cs(priv, 1);
for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
switch (instr->type) {
case NAND_OP_CMD_INSTR:
gpiod_set_value(priv->gpiod_cle, 1);
- ams_delta_write_buf(priv, &instr->ctx.cmd.opcode, 1);
+ gpio_nand_write_buf(priv, &instr->ctx.cmd.opcode, 1);
gpiod_set_value(priv->gpiod_cle, 0);
break;
case NAND_OP_ADDR_INSTR:
gpiod_set_value(priv->gpiod_ale, 1);
- ams_delta_write_buf(priv, instr->ctx.addr.addrs,
+ gpio_nand_write_buf(priv, instr->ctx.addr.addrs,
instr->ctx.addr.naddrs);
gpiod_set_value(priv->gpiod_ale, 0);
break;
case NAND_OP_DATA_IN_INSTR:
- ams_delta_read_buf(priv, instr->ctx.data.buf.in,
+ gpio_nand_read_buf(priv, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
- ams_delta_write_buf(priv, instr->ctx.data.buf.out,
+ gpio_nand_write_buf(priv, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
@@ -206,28 +186,61 @@ static int ams_delta_exec_op(struct nand_chip *this,
break;
}
- ams_delta_ctrl_cs(priv, 0);
+ gpio_nand_ctrl_cs(priv, 0);
return ret;
}
-static const struct nand_controller_ops ams_delta_ops = {
- .exec_op = ams_delta_exec_op,
+static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline,
+ const struct nand_data_interface *cf)
+{
+ struct gpio_nand *priv = nand_get_controller_data(this);
+ const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
+ struct device *dev = &nand_to_mtd(this)->dev;
+
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ if (priv->gpiod_nre) {
+ priv->tRP = DIV_ROUND_UP(sdr->tRP_min, 1000);
+ dev_dbg(dev, "using %u ns read pulse width\n", priv->tRP);
+ }
+
+ priv->tWP = DIV_ROUND_UP(sdr->tWP_min, 1000);
+ dev_dbg(dev, "using %u ns write pulse width\n", priv->tWP);
+
+ return 0;
+}
+
+static const struct nand_controller_ops gpio_nand_ops = {
+ .exec_op = gpio_nand_exec_op,
+ .setup_data_interface = gpio_nand_setup_data_interface,
};
/*
* Main initialization routine
*/
-static int ams_delta_init(struct platform_device *pdev)
+static int gpio_nand_probe(struct platform_device *pdev)
{
- struct ams_delta_nand *priv;
+ struct gpio_nand_platdata *pdata = dev_get_platdata(&pdev->dev);
+ const struct mtd_partition *partitions = NULL;
+ int num_partitions = 0;
+ struct gpio_nand *priv;
struct nand_chip *this;
struct mtd_info *mtd;
- struct gpio_descs *data_gpiods;
+ int (*probe)(struct platform_device *pdev, struct gpio_nand *priv);
int err = 0;
+ if (pdata) {
+ partitions = pdata->parts;
+ num_partitions = pdata->num_parts;
+ }
+
/* Allocate memory for MTD device structure and private data */
- priv = devm_kzalloc(&pdev->dev, sizeof(struct ams_delta_nand),
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_nand),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -238,6 +251,7 @@ static int ams_delta_init(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
nand_set_controller_data(this, priv);
+ nand_set_flash_node(this, pdev->dev.of_node);
priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
if (IS_ERR(priv->gpiod_rdy)) {
@@ -251,29 +265,33 @@ static int ams_delta_init(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- /* Set chip enabled, but */
- priv->gpiod_nwp = devm_gpiod_get(&pdev->dev, "nwp", GPIOD_OUT_HIGH);
+ /* Set chip enabled but write protected */
+ priv->gpiod_nwp = devm_gpiod_get_optional(&pdev->dev, "nwp",
+ GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nwp)) {
err = PTR_ERR(priv->gpiod_nwp);
dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nce = devm_gpiod_get(&pdev->dev, "nce", GPIOD_OUT_HIGH);
+ priv->gpiod_nce = devm_gpiod_get_optional(&pdev->dev, "nce",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nce)) {
err = PTR_ERR(priv->gpiod_nce);
dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nre = devm_gpiod_get(&pdev->dev, "nre", GPIOD_OUT_HIGH);
+ priv->gpiod_nre = devm_gpiod_get_optional(&pdev->dev, "nre",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nre)) {
err = PTR_ERR(priv->gpiod_nre);
dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
return err;
}
- priv->gpiod_nwe = devm_gpiod_get(&pdev->dev, "nwe", GPIOD_OUT_HIGH);
+ priv->gpiod_nwe = devm_gpiod_get_optional(&pdev->dev, "nwe",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nwe)) {
err = PTR_ERR(priv->gpiod_nwe);
dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
@@ -295,28 +313,62 @@ static int ams_delta_init(struct platform_device *pdev)
}
/* Request array of data pins, initialize them as input */
- data_gpiods = devm_gpiod_get_array(&pdev->dev, "data", GPIOD_IN);
- if (IS_ERR(data_gpiods)) {
- err = PTR_ERR(data_gpiods);
+ priv->data_gpiods = devm_gpiod_get_array_optional(&pdev->dev, "data",
+ GPIOD_IN);
+ if (IS_ERR(priv->data_gpiods)) {
+ err = PTR_ERR(priv->data_gpiods);
dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
return err;
}
- priv->data_gpiods = data_gpiods;
- priv->data_in = true;
+ if (priv->data_gpiods) {
+ if (!priv->gpiod_nwe) {
+ dev_err(&pdev->dev,
+ "mandatory NWE pin not provided by platform\n");
+ return -ENODEV;
+ }
- /* Initialize the NAND controller object embedded in ams_delta_nand. */
- priv->base.ops = &ams_delta_ops;
+ priv->io_read = gpio_nand_io_read;
+ priv->io_write = gpio_nand_io_write;
+ priv->data_in = true;
+ }
+
+ if (pdev->id_entry)
+ probe = (void *) pdev->id_entry->driver_data;
+ else
+ probe = of_device_get_match_data(&pdev->dev);
+ if (probe)
+ err = probe(pdev, priv);
+ if (err)
+ return err;
+
+ if (!priv->io_read || !priv->io_write) {
+ dev_err(&pdev->dev, "incomplete device configuration\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the NAND controller object embedded in gpio_nand. */
+ priv->base.ops = &gpio_nand_ops;
nand_controller_init(&priv->base);
this->controller = &priv->base;
+ /*
+ * FIXME: We should release write protection only after nand_scan() to
+ * be on the safe side but we can't do that until we have a generic way
+ * to assert/deassert WP from the core. Even if the core shouldn't
+ * write things in the nand_scan() path, it should have control on this
+ * pin just in case we ever need to disable write protection during
+ * chip detection/initialization.
+ */
+ /* Release write protection */
+ gpiod_set_value(priv->gpiod_nwp, 0);
+
/* Scan to find existence of the device */
err = nand_scan(this, 1);
if (err)
return err;
/* Register the partitions */
- err = mtd_device_register(mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ err = mtd_device_register(mtd, partitions, num_partitions);
if (err)
goto err_nand_cleanup;
@@ -331,26 +383,47 @@ err_nand_cleanup:
/*
* Clean up routine
*/
-static int ams_delta_cleanup(struct platform_device *pdev)
+static int gpio_nand_remove(struct platform_device *pdev)
{
- struct ams_delta_nand *priv = platform_get_drvdata(pdev);
+ struct gpio_nand *priv = platform_get_drvdata(pdev);
struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+ /* Apply write protection */
+ gpiod_set_value(priv->gpiod_nwp, 1);
+
/* Unregister device */
nand_release(mtd_to_nand(mtd));
return 0;
}
-static struct platform_driver ams_delta_nand_driver = {
- .probe = ams_delta_init,
- .remove = ams_delta_cleanup,
+static const struct of_device_id gpio_nand_of_id_table[] = {
+ {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
+
+static const struct platform_device_id gpio_nand_plat_id_table[] = {
+ {
+ .name = "ams-delta-nand",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
+
+static struct platform_driver gpio_nand_driver = {
+ .probe = gpio_nand_probe,
+ .remove = gpio_nand_remove,
+ .id_table = gpio_nand_plat_id_table,
.driver = {
.name = "ams-delta-nand",
+ .of_match_table = of_match_ptr(gpio_nand_of_id_table),
},
};
-module_platform_driver(ams_delta_nand_driver);
+module_platform_driver(gpio_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 44518dada75b..e4e3ceeac38f 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -102,6 +102,45 @@ struct brcm_nand_dma_desc {
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
#define NAND_POLL_STATUS_TIMEOUT_MS 100
+#define EDU_CMD_WRITE 0x00
+#define EDU_CMD_READ 0x01
+#define EDU_STATUS_ACTIVE BIT(0)
+#define EDU_ERR_STATUS_ERRACK BIT(0)
+#define EDU_DONE_MASK GENMASK(1, 0)
+
+#define EDU_CONFIG_MODE_NAND BIT(0)
+#define EDU_CONFIG_SWAP_BYTE BIT(1)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define EDU_CONFIG_SWAP_CFG EDU_CONFIG_SWAP_BYTE
+#else
+#define EDU_CONFIG_SWAP_CFG 0
+#endif
+
+/* edu registers */
+enum edu_reg {
+ EDU_CONFIG = 0,
+ EDU_DRAM_ADDR,
+ EDU_EXT_ADDR,
+ EDU_LENGTH,
+ EDU_CMD,
+ EDU_STOP,
+ EDU_STATUS,
+ EDU_DONE,
+ EDU_ERR_STATUS,
+};
+
+static const u16 edu_regs[] = {
+ [EDU_CONFIG] = 0x00,
+ [EDU_DRAM_ADDR] = 0x04,
+ [EDU_EXT_ADDR] = 0x08,
+ [EDU_LENGTH] = 0x0c,
+ [EDU_CMD] = 0x10,
+ [EDU_STOP] = 0x14,
+ [EDU_STATUS] = 0x18,
+ [EDU_DONE] = 0x1c,
+ [EDU_ERR_STATUS] = 0x20,
+};
+
/* flash_dma registers */
enum flash_dma_reg {
FLASH_DMA_REVISION = 0,
@@ -167,6 +206,8 @@ enum {
BRCMNAND_HAS_WP = BIT(3),
};
+struct brcmnand_host;
+
struct brcmnand_controller {
struct device *dev;
struct nand_controller controller;
@@ -185,17 +226,32 @@ struct brcmnand_controller {
int cmd_pending;
bool dma_pending;
+ bool edu_pending;
struct completion done;
struct completion dma_done;
+ struct completion edu_done;
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
+ /* EDU info, per-transaction */
+ const u16 *edu_offsets;
+ void __iomem *edu_base;
+ int edu_irq;
+ int edu_count;
+ u64 edu_dram_addr;
+ u32 edu_ext_addr;
+ u32 edu_cmd;
+ u32 edu_config;
+
/* flash_dma reg */
const u16 *flash_dma_offsets;
struct brcm_nand_dma_desc *dma_desc;
dma_addr_t dma_pa;
+ int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 dma_cmd);
+
/* in-memory cache of the FLASH_CACHE, used only for some commands */
u8 flash_cache[FC_BYTES];
@@ -216,6 +272,7 @@ struct brcmnand_controller {
u32 nand_cs_nand_xor;
u32 corr_stat_threshold;
u32 flash_dma_mode;
+ u32 flash_edu_mode;
bool pio_poll_mode;
};
@@ -657,6 +714,22 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
__raw_writel(val, ctrl->nand_fc + word * 4);
}
+static inline void edu_writel(struct brcmnand_controller *ctrl,
+ enum edu_reg reg, u32 val)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ brcmnand_writel(val, ctrl->edu_base + offs);
+}
+
+static inline u32 edu_readl(struct brcmnand_controller *ctrl,
+ enum edu_reg reg)
+{
+ u16 offs = ctrl->edu_offsets[reg];
+
+ return brcmnand_readl(ctrl->edu_base + offs);
+}
+
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
{
@@ -926,6 +999,16 @@ static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
return ctrl->flash_dma_base;
}
+static inline bool has_edu(struct brcmnand_controller *ctrl)
+{
+ return ctrl->edu_base;
+}
+
+static inline bool use_dma(struct brcmnand_controller *ctrl)
+{
+ return has_flash_dma(ctrl) || has_edu(ctrl);
+}
+
static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
{
if (ctrl->pio_poll_mode)
@@ -1299,6 +1382,52 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
return tbytes;
}
+static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
+{
+ /* initialize edu */
+ edu_writel(ctrl, EDU_ERR_STATUS, 0);
+ edu_readl(ctrl, EDU_ERR_STATUS);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+}
+
+/* edu irq */
+static irqreturn_t brcmnand_edu_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->edu_count) {
+ ctrl->edu_count--;
+ while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
+ udelay(1);
+ edu_writel(ctrl, EDU_DONE, 0);
+ edu_readl(ctrl, EDU_DONE);
+ }
+
+ if (ctrl->edu_count) {
+ ctrl->edu_dram_addr += FC_BYTES;
+ ctrl->edu_ext_addr += FC_BYTES;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ return IRQ_HANDLED;
+ }
+
+ complete(&ctrl->edu_done);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
{
struct brcmnand_controller *ctrl = data;
@@ -1307,6 +1436,16 @@ static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
if (ctrl->dma_pending)
return IRQ_HANDLED;
+ /* check if you need to piggy back on the ctrlrdy irq */
+ if (ctrl->edu_pending) {
+ if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
+ /* Discard interrupts while using dedicated edu irq */
+ return IRQ_HANDLED;
+
+ /* no registered edu irq, call handler */
+ return brcmnand_edu_irq(irq, data);
+ }
+
complete(&ctrl->done);
return IRQ_HANDLED;
}
@@ -1645,6 +1784,81 @@ static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
}
/**
+ * Kick EDU engine
+ */
+static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(200);
+ int ret = 0;
+ int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
+ unsigned int trans = len >> FC_SHIFT;
+ dma_addr_t pa;
+
+ pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
+ return -ENOMEM;
+ }
+
+ ctrl->edu_pending = true;
+ ctrl->edu_dram_addr = pa;
+ ctrl->edu_ext_addr = addr;
+ ctrl->edu_cmd = edu_cmd;
+ ctrl->edu_count = trans;
+
+ edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
+ edu_readl(ctrl, EDU_DRAM_ADDR);
+ edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
+ edu_readl(ctrl, EDU_EXT_ADDR);
+ edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
+ edu_readl(ctrl, EDU_LENGTH);
+
+ /* Start edu engine */
+ mb(); /* flush previous writes */
+ edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
+ edu_readl(ctrl, EDU_CMD);
+
+ if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for EDU; status %#x, error status %#x\n",
+ edu_readl(ctrl, EDU_STATUS),
+ edu_readl(ctrl, EDU_ERR_STATUS));
+ }
+
+ dma_unmap_single(ctrl->dev, pa, len, dir);
+
+ /* for program page check NAND status */
+ if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
+ edu_cmd == EDU_CMD_WRITE) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ /* Make sure the EDU status is clean */
+ if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
+ dev_warn(ctrl->dev, "EDU still active: %#x\n",
+ edu_readl(ctrl, EDU_STATUS));
+
+ if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
+ dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ }
+
+ ctrl->edu_pending = false;
+ brcmnand_edu_init(ctrl);
+ edu_writel(ctrl, EDU_STOP, 0); /* force stop */
+ edu_readl(ctrl, EDU_STOP);
+
+ return ret;
+}
+
+/**
* Construct a FLASH_DMA descriptor as part of a linked list. You must know the
* following ahead of time:
* - Is this descriptor the beginning or end of a linked list?
@@ -1850,9 +2064,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
try_dmaread:
brcmnand_clear_ecc_addr(ctrl);
- if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
- err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
- CMD_PAGE_READ);
+ if (ctrl->dma_trans && !oob && flash_dma_buf_ok(buf)) {
+ err = ctrl->dma_trans(host, addr, buf,
+ trans * FC_BYTES,
+ CMD_PAGE_READ);
+
if (err) {
if (mtd_is_bitflip_or_eccerr(err))
err_addr = addr;
@@ -1988,10 +2204,12 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < ctrl->max_oob; i += 4)
oob_reg_write(ctrl, i, 0xffffffff);
- if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
- if (brcmnand_dma_trans(host, addr, (u32 *)buf,
- mtd->writesize, CMD_PROGRAM_PAGE))
+ if (use_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ if (ctrl->dma_trans(host, addr, (u32 *)buf, mtd->writesize,
+ CMD_PROGRAM_PAGE))
+
ret = -EIO;
+
goto out;
}
@@ -2494,6 +2712,8 @@ static int brcmnand_suspend(struct device *dev)
if (has_flash_dma(ctrl))
ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+ else if (has_edu(ctrl))
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
return 0;
}
@@ -2508,6 +2728,14 @@ static int brcmnand_resume(struct device *dev)
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
}
+ if (has_edu(ctrl))
+ ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
+ else {
+ edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
+ edu_readl(ctrl, EDU_CONFIG);
+ brcmnand_edu_init(ctrl);
+ }
+
brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
@@ -2553,6 +2781,49 @@ MODULE_DEVICE_TABLE(of, brcmnand_of_match);
/***********************************************************************
* Platform driver setup (per controller)
***********************************************************************/
+static int brcmnand_edu_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
+ if (res) {
+ ctrl->edu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->edu_base))
+ return PTR_ERR(ctrl->edu_base);
+
+ ctrl->edu_offsets = edu_regs;
+
+ edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
+ EDU_CONFIG_SWAP_CFG);
+ edu_readl(ctrl, EDU_CONFIG);
+
+ /* initialize edu */
+ brcmnand_edu_init(ctrl);
+
+ ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
+ if (ctrl->edu_irq < 0) {
+ dev_warn(dev,
+ "FLASH EDU enabled, using ctlrdy irq\n");
+ } else {
+ ret = devm_request_irq(dev, ctrl->edu_irq,
+ brcmnand_edu_irq, 0,
+ "brcmnand-edu", ctrl);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->edu_irq, ret);
+ return ret;
+ }
+
+ dev_info(dev, "FLASH EDU enabled using irq %u\n",
+ ctrl->edu_irq);
+ }
+ }
+
+ return 0;
+}
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
{
@@ -2578,6 +2849,7 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
+ init_completion(&ctrl->edu_done);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &brcmnand_controller_ops;
INIT_LIST_HEAD(&ctrl->host_list);
@@ -2675,6 +2947,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
}
dev_info(dev, "enabling FLASH_DMA\n");
+ /* set flash dma transfer function to call */
+ ctrl->dma_trans = brcmnand_dma_trans;
+ } else {
+ ret = brcmnand_edu_setup(pdev);
+ if (ret < 0)
+ goto err;
+
+ /* set edu transfer function to call */
+ ctrl->dma_trans = brcmnand_edu_trans;
}
/* Disable automatic device ID config, direct addressing */
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index f6c7102a1e32..efddc5c68afb 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -30,7 +30,6 @@
* Generic mode is used for executing rest of commands.
*/
-#define MAX_OOB_SIZE_PER_SECTOR 32
#define MAX_ADDRESS_CYC 6
#define MAX_ERASE_ADDRESS_CYC 3
#define MAX_DATA_SIZE 0xFFFC
@@ -190,6 +189,7 @@
/* BCH Engine identification register 3. */
#define BCH_CFG_3 0x844
+#define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
/* Ready/Busy# line status. */
#define RBN_SETINGS 0x1004
@@ -499,6 +499,7 @@ struct cdns_nand_ctrl {
unsigned long assigned_cs;
struct list_head chips;
+ u8 bch_metadata_size;
};
struct cdns_nand_chip {
@@ -997,6 +998,7 @@ static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
return status;
cadence_nand_reset_irq(cdns_ctrl);
+ reinit_completion(&cdns_ctrl->complete);
writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
cdns_ctrl->reg + CMD_REG2);
@@ -1077,6 +1079,14 @@ static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
int max_step_size = 0, nstrengths, i;
u32 reg;
+ reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
+ cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
+ if (cdns_ctrl->bch_metadata_size < 4) {
+ dev_err(cdns_ctrl->dev,
+ "Driver needs at least 4 bytes of BCH meta data\n");
+ return -EIO;
+ }
+
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
@@ -1170,7 +1180,8 @@ static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
cadence_nand_get_caps(cdns_ctrl);
- cadence_nand_read_bch_caps(cdns_ctrl);
+ if (cadence_nand_read_bch_caps(cdns_ctrl))
+ return -EIO;
/*
* Set IO width access to 8.
@@ -2585,9 +2596,8 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
- u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
+ u32 ecc_size;
struct mtd_info *mtd = nand_to_mtd(chip);
- u32 max_oob_data_size;
int ret;
if (chip->options & NAND_BUSWIDTH_16) {
@@ -2603,12 +2613,9 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
chip->options |= NAND_NO_SUBPAGE_WRITE;
cdns_chip->bbm_offs = chip->badblockpos;
- if (chip->options & NAND_BUSWIDTH_16) {
- cdns_chip->bbm_offs &= ~0x01;
- cdns_chip->bbm_len = 2;
- } else {
- cdns_chip->bbm_len = 1;
- }
+ cdns_chip->bbm_offs &= ~0x01;
+ /* this value should be even number */
+ cdns_chip->bbm_len = 2;
ret = nand_ecc_choose_conf(chip,
&cdns_ctrl->ecc_caps,
@@ -2625,13 +2632,12 @@ int cadence_nand_attach_chip(struct nand_chip *chip)
/* Error correction configuration. */
cdns_chip->sector_size = chip->ecc.size;
cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
+ ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
- max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR;
-
- if (cdns_chip->avail_oob_size > max_oob_data_size)
- cdns_chip->avail_oob_size = max_oob_data_size;
+ if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
+ cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
> mtd->oobsize)
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index fafd0a0aa8e2..6a6c919b2569 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1317,6 +1317,7 @@ int denali_init(struct denali_controller *denali)
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+ iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
denali_clear_irq_all(denali);
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index e5cdcda56d14..ac46eb7956ce 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -328,7 +328,7 @@ struct denali_chip {
struct nand_chip chip;
struct list_head node;
unsigned int nsels;
- struct denali_chip_sel sels[0];
+ struct denali_chip_sel sels[];
};
/**
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index c0e1a8ebe820..c2a391ad2c35 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1169,7 +1169,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
- " BlockMultiplerBits = %d\n"
+ " BlockMultiplierBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = %d.%d.%d.%d\n"
" PercentUsed = %d\n",
@@ -1482,7 +1482,7 @@ static int __init doc_probe(unsigned long physadr)
break;
case DOC_ChipID_DocMilPlus32:
pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
- /* fall through */
+ fallthrough;
default:
ret = -ENODEV;
goto notfound;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 634c550db13a..e1dc675b12bb 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -324,8 +324,7 @@ static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
/* READ0 and READ1 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ1:
column += 256;
-
- /* fall-through */
+ fallthrough;
case NAND_CMD_READ0:
dev_dbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index b9d5d55a5edb..53b00c841aec 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1148,20 +1148,21 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
struct dma_chan *dma_chan;
+ int ret = 0;
/* request dma channel */
- dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!dma_chan) {
- dev_err(this->dev, "Failed to request DMA channel.\n");
- goto acquire_err;
+ dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(this->dev, "DMA channel request failed: %d\n",
+ ret);
+ release_dma_channels(this);
+ } else {
+ this->dma_chans[0] = dma_chan;
}
- this->dma_chans[0] = dma_chan;
- return 0;
-
-acquire_err:
- release_dma_channels(this);
- return -EINVAL;
+ return ret;
}
static int gpmi_get_clks(struct gpmi_nand_data *this)
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
index e30feb56b650..96c5ae8b1bbc 100644
--- a/drivers/mtd/nand/raw/ingenic/Kconfig
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config MTD_NAND_JZ4780
tristate "JZ4780 NAND controller"
+ depends on MIPS || COMPILE_TEST
depends on JZ4780_NEMC
help
Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index c954189606f6..8e22cd6ec71f 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -124,7 +124,6 @@ int ingenic_ecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ingenic_ecc *ecc;
- struct resource *res;
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc)
@@ -134,8 +133,7 @@ int ingenic_ecc_probe(struct platform_device *pdev)
if (!ecc->ops)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ecc->base = devm_ioremap_resource(dev, res);
+ ecc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ecc->base))
return PTR_ERR(ecc->base);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index 49afebee50db..935c4902ada7 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -253,7 +253,7 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
chip->ecc.calculate = ingenic_nand_ecc_calculate;
chip->ecc.correct = ingenic_nand_ecc_correct;
- /* fall through */
+ fallthrough;
case NAND_ECC_SOFT:
dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
(nfc->ecc) ? "hardware ECC" : "software ECC",
diff --git a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
index 6c852eae09cf..2d0e0a2192ae 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4725b_bch.c
@@ -145,10 +145,10 @@ static void jz4725b_bch_read_parity(struct ingenic_ecc *bch, u8 *buf,
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
- /* fall-through */
+ fallthrough;
case 2:
dest8[1] = (val >> 8) & 0xff;
- /* fall-through */
+ fallthrough;
case 1:
dest8[0] = val & 0xff;
break;
diff --git a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
index 079266a0d6cf..d67dbfff76cc 100644
--- a/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/ingenic/jz4780_bch.c
@@ -123,10 +123,10 @@ static void jz4780_bch_read_parity(struct ingenic_ecc *bch, void *buf,
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
- /* fall through */
+ fallthrough;
case 2:
dest8[1] = (val >> 8) & 0xff;
- /* fall through */
+ fallthrough;
case 1:
dest8[0] = val & 0xff;
break;
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index cba6fe7dd8c4..9d0caadf940e 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -30,6 +30,7 @@
#define NAND_MFR_SAMSUNG 0xec
#define NAND_MFR_SANDISK 0x45
#define NAND_MFR_STMICRO 0x20
+/* Kioxia is new name of Toshiba memory. */
#define NAND_MFR_TOSHIBA 0x98
#define NAND_MFR_WINBOND 0xef
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index fb5abdcfb007..179f0ca585f8 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -334,7 +334,7 @@ struct marvell_nand_chip {
int addr_cyc;
int selected_die;
unsigned int nsels;
- struct marvell_nand_chip_sel sels[0];
+ struct marvell_nand_chip_sel sels[];
};
static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
@@ -2743,16 +2743,21 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
if (ret)
return ret;
- nfc->dma_chan = dma_request_slave_channel(nfc->dev, "data");
- if (!nfc->dma_chan) {
- dev_err(nfc->dev,
- "Unable to request data DMA channel\n");
- return -ENODEV;
+ nfc->dma_chan = dma_request_chan(nfc->dev, "data");
+ if (IS_ERR(nfc->dma_chan)) {
+ ret = PTR_ERR(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nfc->dev, "DMA channel request failed: %d\n",
+ ret);
+ return ret;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENXIO;
+ if (!r) {
+ ret = -ENXIO;
+ goto release_channel;
+ }
config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -2763,7 +2768,7 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
ret = dmaengine_slave_config(nfc->dma_chan, &config);
if (ret < 0) {
dev_err(nfc->dev, "Failed to configure DMA channel\n");
- return ret;
+ goto release_channel;
}
/*
@@ -2773,12 +2778,20 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
* the provided buffer.
*/
nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!nfc->dma_buf)
- return -ENOMEM;
+ if (!nfc->dma_buf) {
+ ret = -ENOMEM;
+ goto release_channel;
+ }
nfc->use_dma = true;
return 0;
+
+release_channel:
+ dma_release_channel(nfc->dma_chan);
+ nfc->dma_chan = NULL;
+
+ return ret;
}
static void marvell_nfc_reset(struct marvell_nfc *nfc)
@@ -2920,10 +2933,13 @@ static int marvell_nfc_probe(struct platform_device *pdev)
ret = marvell_nand_chips_init(dev, nfc);
if (ret)
- goto unprepare_reg_clk;
+ goto release_dma;
return 0;
+release_dma:
+ if (nfc->use_dma)
+ dma_release_channel(nfc->dma_chan);
unprepare_reg_clk:
clk_disable_unprepare(nfc->reg_clk);
unprepare_core_clk:
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 9f17b5b8efbf..f6fb5c0e6255 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -118,7 +118,7 @@ struct meson_nfc_nand_chip {
u8 *data_buf;
__le64 *info_buf;
u32 nsels;
- u8 sels[0];
+ u8 sels[];
};
struct meson_nand_ecc {
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index b8305e39ab51..ef149e8b26d0 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -131,7 +131,7 @@ struct mtk_nfc_nand_chip {
u32 spare_per_sector;
int nsels;
- u8 sels[0];
+ u8 sels[];
/* nothing after this field */
};
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index f64e3b6605c6..c24e5e2ba130 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -683,7 +683,12 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
if (ret)
return ret;
- timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ /*
+ * +1 below is necessary because if we are now in the last fraction
+ * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+ * small jiffy fraction - possibly leading to false timeout
+ */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
ret = nand_read_data_op(chip, &status, sizeof(status), true);
if (ret)
@@ -4321,16 +4326,22 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
+ *
+ * Returns 0 for success or negative error code otherwise.
*/
static int nand_suspend(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
mutex_lock(&chip->lock);
- chip->suspended = 1;
+ if (chip->suspend)
+ ret = chip->suspend(chip);
+ if (!ret)
+ chip->suspended = 1;
mutex_unlock(&chip->lock);
- return 0;
+ return ret;
}
/**
@@ -4342,11 +4353,14 @@ static void nand_resume(struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
mutex_lock(&chip->lock);
- if (chip->suspended)
+ if (chip->suspended) {
+ if (chip->resume)
+ chip->resume(chip);
chip->suspended = 0;
- else
+ } else {
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
+ }
mutex_unlock(&chip->lock);
}
@@ -4360,6 +4374,38 @@ static void nand_shutdown(struct mtd_info *mtd)
nand_suspend(mtd);
}
+/**
+ * nand_lock - [MTD Interface] Lock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to lock (must be a multiple of block/page size)
+ */
+static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->lock_area)
+ return -ENOTSUPP;
+
+ return chip->lock_area(chip, ofs, len);
+}
+
+/**
+ * nand_unlock - [MTD Interface] Unlock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to unlock (must be a multiple of block/page size)
+ */
+static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->unlock_area)
+ return -ENOTSUPP;
+
+ return chip->unlock_area(chip, ofs, len);
+}
+
/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip)
{
@@ -5591,8 +5637,7 @@ static int nand_scan_tail(struct nand_chip *chip)
}
if (!ecc->read_page)
ecc->read_page = nand_read_page_hwecc_oob_first;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
if (!ecc->read_page)
@@ -5611,8 +5656,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->read_subpage = nand_read_subpage;
if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
ecc->write_subpage = nand_write_subpage_hwecc;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_HW_SYNDROME:
if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
(!ecc->read_page ||
@@ -5649,8 +5693,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->size, mtd->writesize);
ecc->mode = NAND_ECC_SOFT;
ecc->algo = NAND_ECC_HAMMING;
- /* fall through */
-
+ fallthrough;
case NAND_ECC_SOFT:
ret = nand_set_ecc_soft_ops(chip);
if (ret) {
@@ -5786,8 +5829,8 @@ static int nand_scan_tail(struct nand_chip *chip)
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
mtd->_sync = nand_sync;
- mtd->_lock = NULL;
- mtd->_unlock = NULL;
+ mtd->_lock = nand_lock;
+ mtd->_unlock = nand_unlock;
mtd->_suspend = nand_suspend;
mtd->_resume = nand_resume;
mtd->_reboot = nand_shutdown;
@@ -5907,6 +5950,8 @@ void nand_cleanup(struct nand_chip *chip)
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+ nanddev_cleanup(&chip->base);
+
/* Free bad block table memory */
kfree(chip->bbt);
kfree(chip->data_buf);
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 194e4227aefe..7caedaa5b9e5 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -26,7 +26,7 @@
struct hynix_read_retry {
int nregs;
const u8 *regs;
- u8 values[0];
+ u8 values[];
};
/**
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index f2526ec616a6..f91e92e1b972 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -331,8 +331,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command,
*/
if (column == -1 && page_addr == -1)
return;
- /* fall through */
-
+ fallthrough;
default:
/*
* If we don't have access to the busy pin, we apply the given
@@ -483,8 +482,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
-
- /* fall through - This applies to read commands */
+ fallthrough; /* This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 3ff7ce00cbdb..09c254c97b5c 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -6,11 +6,31 @@
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
*/
+#include "linux/delay.h"
#include "internals.h"
#define MACRONIX_READ_RETRY_BIT BIT(0)
#define MACRONIX_NUM_READ_RETRY_MODES 6
+#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
+#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
+#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
+
+#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
+#define MACRONIX_RANDOMIZER_BIT BIT(1)
+#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
+#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
+#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
+#define MACRONIX_RANDOMIZER_MODE_ENTER \
+ (MACRONIX_RANDOMIZER_ENPGM | \
+ MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+#define MACRONIX_RANDOMIZER_MODE_EXIT \
+ (MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+
+#define MXIC_CMD_POWER_DOWN 0xB9
+
struct nand_onfi_vendor_macronix {
u8 reserved;
u8 reliability_func;
@@ -29,15 +49,83 @@ static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
}
+static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ if (feature[0])
+ return feature[0];
+
+ feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ /* RANDEN and RANDOPT OTP bits are programmed */
+ feature[0] = 0x0;
+ ret = nand_prog_page_op(chip, 0, 0, feature, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void macronix_nand_onfi_init(struct nand_chip *chip)
{
struct nand_parameters *p = &chip->parameters;
struct nand_onfi_vendor_macronix *mxic;
+ struct device_node *dn = nand_get_flash_node(chip);
+ int rand_otp = 0;
+ int ret;
if (!p->onfi)
return;
+ if (of_find_property(dn, "mxic,enable-randomizer-otp", NULL))
+ rand_otp = 1;
+
mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
+ /* Subpage write is prohibited in randomizer operatoin */
+ if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
+ mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ ret = macronix_nand_randomizer_check_enable(chip);
+ if (ret < 0) {
+ bitmap_clear(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ bitmap_clear(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ pr_info("Macronix NAND randomizer failed\n");
+ } else {
+ pr_info("Macronix NAND randomizer enabled\n");
+ }
+ }
+ }
+
if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
return;
@@ -91,6 +179,143 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
+/*
+ * Macronix NAND supports Block Protection by Protectoin(PT) pin;
+ * active high at power-on which protects the entire chip even the #WP is
+ * disabled. Lock/unlock protection area can be partition according to
+ * protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
+ */
+static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static void macronix_nand_block_protection_support(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
+ if (ret)
+ pr_err("Block protection check failed\n");
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+ return;
+ }
+
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ chip->lock_area = mxic_nand_lock;
+ chip->unlock_area = mxic_nand_unlock;
+}
+
+static int nand_power_down_op(struct nand_chip *chip)
+{
+ int ret;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
+ };
+
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ } else {
+ chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
+ }
+
+ return 0;
+}
+
+static int mxic_nand_suspend(struct nand_chip *chip)
+{
+ int ret;
+
+ nand_select_target(chip, 0);
+ ret = nand_power_down_op(chip);
+ if (ret < 0)
+ pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static void mxic_nand_resume(struct nand_chip *chip)
+{
+ /*
+ * Toggle #CS pin to resume NAND device and don't care
+ * of the others CLE, #WE, #RE pins status.
+ * A NAND controller ensure it is able to assert/de-assert #CS
+ * by sending any byte over the NAND bus.
+ * i.e.,
+ * NAND power down command or reset command w/o R/B# status checking.
+ */
+ nand_select_target(chip, 0);
+ nand_power_down_op(chip);
+ /* The minimum of a recovery time tRDP is 35 us */
+ usleep_range(35, 100);
+ nand_deselect_target(chip);
+}
+
+static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
+{
+ int i;
+ static const char * const deep_power_down_dev[] = {
+ "MX30UF1G28AD",
+ "MX30UF2G28AD",
+ "MX30UF4G28AD",
+ };
+
+ i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ chip->suspend = mxic_nand_suspend;
+ chip->resume = mxic_nand_resume;
+}
+
static int macronix_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
@@ -98,6 +323,8 @@ static int macronix_nand_init(struct nand_chip *chip)
macronix_nand_fix_broken_get_timings(chip);
macronix_nand_onfi_init(chip);
+ macronix_nand_block_protection_support(chip);
+ macronix_nand_deep_power_down_support(chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index 9c03fbb1f47d..f3dcd695b5db 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -14,14 +14,68 @@
/* Recommended to rewrite for BENAND */
#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
+/* ECC Status Read Command for BENAND */
+#define TOSHIBA_NAND_CMD_ECC_STATUS_READ 0x7A
+
+/* ECC Status Mask for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_MASK 0x0F
+
+/* Uncorrectable Error for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_UNCORR 0x0F
+
+/* Max ECC Steps for BENAND */
+#define TOSHIBA_NAND_MAX_ECC_STEPS 8
+
+static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
+ u8 *buf)
+{
+ u8 *ecc_status = buf;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ return -ENOTSUPP;
+}
+
static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
unsigned int max_bitflips = 0;
- u8 status;
+ u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
/* Check Status */
+ ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
+ if (!ret) {
+ unsigned int i, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
+ if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bitflips;
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+ }
+
+ return max_bitflips;
+ }
+
+ /*
+ * Fallback to regular status check if
+ * toshiba_nand_benand_read_eccstatus_op() failed.
+ */
ret = nand_status_op(chip, &status);
if (ret)
return ret;
@@ -108,7 +162,7 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
*/
if (chip->id.len >= 6 && nand_is_slc(chip) &&
(chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
- !(chip->id.data[4] & 0x80) /* !BENAND */) {
+ !(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
memorg->oobsize = 32 * memorg->pagesize >> 9;
mtd->oobsize = memorg->oobsize;
}
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 9a70754a61ef..1de03bb34e84 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2251,10 +2251,10 @@ static int __init ns_init_module(void)
switch (bbt) {
case 2:
chip->bbt_options |= NAND_BBT_NO_OOB;
- /* fall through */
+ fallthrough;
case 1:
chip->bbt_options |= NAND_BBT_USE_FLASH;
- /* fall through */
+ fallthrough;
case 0:
break;
default:
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 5502ffbdd1e6..3fa0e2cbbe53 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -455,13 +455,13 @@ static int elm_context_save(struct elm_info *info)
ELM_SYNDROME_FRAGMENT_5 + offset);
regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_4 + offset);
- /* fall through */
+ fallthrough;
case BCH8_ECC:
regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_3 + offset);
regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_2 + offset);
- /* fall through */
+ fallthrough;
case BCH4_ECC:
regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_1 + offset);
@@ -503,13 +503,13 @@ static int elm_context_restore(struct elm_info *info)
regs->elm_syndrome_fragment_5[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
regs->elm_syndrome_fragment_4[i]);
- /* fall through */
+ fallthrough;
case BCH8_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
regs->elm_syndrome_fragment_3[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
regs->elm_syndrome_fragment_2[i]);
- /* fall through */
+ fallthrough;
case BCH4_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
regs->elm_syndrome_fragment_1[i]);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 7bb9a7e8e1e7..5b11c7061497 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2628,6 +2628,29 @@ static const struct nand_controller_ops qcom_nandc_ops = {
.attach_chip = qcom_nand_attach_chip,
};
+static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->is_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+
static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
{
int ret;
@@ -2673,22 +2696,37 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
return -EIO;
}
- nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
- if (!nandc->tx_chan) {
- dev_err(nandc->dev, "failed to request tx channel\n");
- return -ENODEV;
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "tx DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
- nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
- if (!nandc->rx_chan) {
- dev_err(nandc->dev, "failed to request rx channel\n");
- return -ENODEV;
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "rx DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
- nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
- if (!nandc->cmd_chan) {
- dev_err(nandc->dev, "failed to request cmd channel\n");
- return -ENODEV;
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "cmd DMA channel request failed: %d\n",
+ ret);
+ goto unalloc;
}
/*
@@ -2702,14 +2740,19 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unalloc;
}
} else {
- nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
- if (!nandc->chan) {
- dev_err(nandc->dev,
- "failed to request slave channel\n");
- return -ENODEV;
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(nandc->dev,
+ "rxtx DMA channel request failed: %d\n",
+ ret);
+ return ret;
}
}
@@ -2720,29 +2763,9 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
nandc->controller.ops = &qcom_nandc_ops;
return 0;
-}
-
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-{
- if (nandc->props->is_bam) {
- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
-
- if (nandc->tx_chan)
- dma_release_channel(nandc->tx_chan);
-
- if (nandc->rx_chan)
- dma_release_channel(nandc->rx_chan);
-
- if (nandc->cmd_chan)
- dma_release_channel(nandc->cmd_chan);
- } else {
- if (nandc->chan)
- dma_release_channel(nandc->chan);
- }
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
}
/* one time setup of a few nand controller registers */
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 3ba73f18841f..b6d45cd911ae 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1606,15 +1606,36 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
/* DMA configuration */
static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
{
- int ret;
+ int ret = 0;
- fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx");
- fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx");
- fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc");
+ fmc2->dma_tx_ch = dma_request_chan(fmc2->dev, "tx");
+ if (IS_ERR(fmc2->dma_tx_ch)) {
+ ret = PTR_ERR(fmc2->dma_tx_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request tx DMA channel: %d\n", ret);
+ fmc2->dma_tx_ch = NULL;
+ goto err_dma;
+ }
- if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) {
- dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n");
- return 0;
+ fmc2->dma_rx_ch = dma_request_chan(fmc2->dev, "rx");
+ if (IS_ERR(fmc2->dma_rx_ch)) {
+ ret = PTR_ERR(fmc2->dma_rx_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request rx DMA channel: %d\n", ret);
+ fmc2->dma_rx_ch = NULL;
+ goto err_dma;
+ }
+
+ fmc2->dma_ecc_ch = dma_request_chan(fmc2->dev, "ecc");
+ if (IS_ERR(fmc2->dma_ecc_ch)) {
+ ret = PTR_ERR(fmc2->dma_ecc_ch);
+ if (ret != -ENODEV)
+ dev_err(fmc2->dev,
+ "failed to request ecc DMA channel: %d\n", ret);
+ fmc2->dma_ecc_ch = NULL;
+ goto err_dma;
}
ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
@@ -1635,6 +1656,15 @@ static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
init_completion(&fmc2->dma_ecc_complete);
return 0;
+
+err_dma:
+ if (ret == -ENODEV) {
+ dev_warn(fmc2->dev,
+ "DMAs not defined in the DT, polling mode is used\n");
+ ret = 0;
+ }
+
+ return ret;
}
/* NAND callbacks setup */
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 37a4ac0dd85b..5f3e40b79fb1 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -195,7 +195,7 @@ struct sunxi_nand_chip {
u32 timing_cfg;
u32 timing_ctl;
int nsels;
- struct sunxi_nand_chip_sel sels[0];
+ struct sunxi_nand_chip_sel sels[];
};
static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
@@ -2123,8 +2123,16 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (ret)
goto out_ahb_reset_reassert;
- nfc->dmac = dma_request_slave_channel(dev, "rxtx");
- if (nfc->dmac) {
+ nfc->dmac = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(nfc->dmac)) {
+ ret = PTR_ERR(nfc->dmac);
+ if (ret == -EPROBE_DEFER)
+ goto out_ahb_reset_reassert;
+
+ /* Ignore errors to fall back to PIO mode */
+ dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret);
+ nfc->dmac = NULL;
+ } else {
struct dma_slave_config dmac_cfg = { };
dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
@@ -2138,9 +2146,6 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (nfc->caps->extra_mbus_conf)
writel(readl(nfc->regs + NFC_REG_CTL) |
NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
-
- } else {
- dev_warn(dev, "failed to request rxtx DMA channel\n");
}
platform_set_drvdata(pdev, nfc);
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 89f6beefb01c..b6bb358b96ce 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -16,6 +16,7 @@
#include <linux/mtd/spinand.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -370,10 +371,11 @@ out:
return status & STATUS_BUSY ? -ETIMEDOUT : 0;
}
-static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
+static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
+ u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
- SPINAND_MAX_ID_LEN);
+ struct spi_mem_op op = SPINAND_READID_OP(
+ naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
@@ -568,18 +570,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
- .ooblen = 2,
+ .ooblen = sizeof(marker),
.ooboffs = 0,
- .oobbuf.in = spinand->oobbuf,
+ .oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
- memset(spinand->oobbuf, 0, 2);
spinand_select_target(spinand, pos->target);
spinand_read_page(spinand, &req, false);
- if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
+ if (marker[0] != 0xff || marker[1] != 0xff)
return true;
return false;
@@ -603,15 +605,16 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
.ooboffs = 0,
- .ooblen = 2,
- .oobbuf.out = spinand->oobbuf,
+ .ooblen = sizeof(marker),
+ .oobbuf.out = marker,
+ .mode = MTD_OPS_RAW,
};
int ret;
- /* Erase block before marking it bad. */
ret = spinand_select_target(spinand, pos->target);
if (ret)
return ret;
@@ -620,9 +623,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- spinand_erase_op(spinand, pos);
-
- memset(spinand->oobbuf, 0, 2);
return spinand_write_page(spinand, &req);
}
@@ -762,24 +762,62 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&winbond_spinand_manufacturer,
};
-static int spinand_manufacturer_detect(struct spinand_device *spinand)
+static int spinand_manufacturer_match(struct spinand_device *spinand,
+ enum spinand_readid_method rdid_method)
{
+ u8 *id = spinand->id.data;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
- ret = spinand_manufacturers[i]->ops->detect(spinand);
- if (ret > 0) {
- spinand->manufacturer = spinand_manufacturers[i];
- return 0;
- } else if (ret < 0) {
- return ret;
- }
- }
+ const struct spinand_manufacturer *manufacturer =
+ spinand_manufacturers[i];
+
+ if (id[0] != manufacturer->id)
+ continue;
+
+ ret = spinand_match_and_init(spinand,
+ manufacturer->chips,
+ manufacturer->nchips,
+ rdid_method);
+ if (ret < 0)
+ continue;
+ spinand->manufacturer = manufacturer;
+ return 0;
+ }
return -ENOTSUPP;
}
+static int spinand_id_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ ret = spinand_read_id_op(spinand, 0, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 1, 0, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_ADDR);
+ if (!ret)
+ return 0;
+
+ ret = spinand_read_id_op(spinand, 0, 1, id);
+ if (ret)
+ return ret;
+ ret = spinand_manufacturer_match(spinand,
+ SPINAND_READID_METHOD_OPCODE_DUMMY);
+
+ return ret;
+}
+
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
if (spinand->manufacturer->ops->init)
@@ -835,9 +873,9 @@ spinand_select_op_variant(struct spinand_device *spinand,
* @spinand: SPI NAND object
* @table: SPI NAND device description table
* @table_size: size of the device description table
+ * @rdid_method: read id method to match
*
- * Should be used by SPI NAND manufacturer drivers when they want to find a
- * match between a device ID retrieved through the READ_ID command and an
+ * Match between a device ID retrieved through the READ_ID command and an
* entry in the SPI NAND description table. If a match is found, the spinand
* object will be initialized with information provided by the matching
* spinand_info entry.
@@ -846,8 +884,10 @@ spinand_select_op_variant(struct spinand_device *spinand,
*/
int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *table,
- unsigned int table_size, u16 devid)
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method)
{
+ u8 *id = spinand->id.data;
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
@@ -855,13 +895,17 @@ int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *info = &table[i];
const struct spi_mem_op *op;
- if (devid != info->devid)
+ if (rdid_method != info->devid.method)
+ continue;
+
+ if (memcmp(id + 1, info->devid.id, info->devid.len))
continue;
nand->memorg = table[i].memorg;
nand->eccreq = table[i].eccreq;
spinand->eccinfo = table[i].eccinfo;
spinand->flags = table[i].flags;
+ spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
op = spinand_select_op_variant(spinand,
@@ -898,13 +942,7 @@ static int spinand_detect(struct spinand_device *spinand)
if (ret)
return ret;
- ret = spinand_read_id_op(spinand, spinand->id.data);
- if (ret)
- return ret;
-
- spinand->id.len = SPINAND_MAX_ID_LEN;
-
- ret = spinand_manufacturer_detect(spinand);
+ ret = spinand_id_detect(spinand);
if (ret) {
dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
spinand->id.data);
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e99d425aa93f..d219c970042a 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -195,7 +195,8 @@ static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info gigadevice_spinand_table[] = {
- SPINAND_INFO("GD5F1GQ4xA", 0xF1,
+ SPINAND_INFO("GD5F1GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -204,7 +205,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F2GQ4xA", 0xF2,
+ SPINAND_INFO("GD5F2GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -213,7 +215,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F4GQ4xA", 0xF4,
+ SPINAND_INFO("GD5F4GQ4xA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4),
NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -222,7 +225,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
- SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
+ SPINAND_INFO("GD5F1GQ4UExxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -231,7 +235,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
- SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
+ SPINAND_INFO("GD5F1GQ4UFxxG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
@@ -242,39 +247,13 @@ static const struct spinand_info gigadevice_spinand_table[] = {
gd5fxgq4ufxxg_ecc_get_status)),
};
-static int gigadevice_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- u16 did;
- int ret;
-
- /*
- * Earlier GDF5-series devices (A,E) return [0][MID][DID]
- * Later (F) devices return [MID][DID1][DID2]
- */
-
- if (id[0] == SPINAND_MFR_GIGADEVICE)
- did = (id[1] << 8) + id[2];
- else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE)
- did = id[2];
- else
- return 0;
-
- ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
- ARRAY_SIZE(gigadevice_spinand_table),
- did);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
- .detect = gigadevice_spinand_detect,
};
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
.id = SPINAND_MFR_GIGADEVICE,
.name = "GigaDevice",
+ .chips = gigadevice_spinand_table,
+ .nchips = ARRAY_SIZE(gigadevice_spinand_table),
.ops = &gigadevice_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 21def3f8fb36..0f900f3aa21a 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -99,7 +99,8 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info macronix_spinand_table[] = {
- SPINAND_INFO("MX35LF1GE4AB", 0x12,
+ SPINAND_INFO("MX35LF1GE4AB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -108,7 +109,8 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
- SPINAND_INFO("MX35LF2GE4AB", 0x22,
+ SPINAND_INFO("MX35LF2GE4AB",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -118,33 +120,13 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
};
-static int macronix_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /*
- * Macronix SPI NAND read ID needs a dummy byte, so the first byte in
- * raw_id is garbage.
- */
- if (id[1] != SPINAND_MFR_MACRONIX)
- return 0;
-
- ret = spinand_match_and_init(spinand, macronix_spinand_table,
- ARRAY_SIZE(macronix_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
- .detect = macronix_spinand_detect,
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
.id = SPINAND_MFR_MACRONIX,
.name = "Macronix",
+ .chips = macronix_spinand_table,
+ .nchips = ARRAY_SIZE(macronix_spinand_table),
.ops = &macronix_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 7d7b1f7fcf71..5d370cfcdaaa 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -18,6 +18,16 @@
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+#define MICRON_CFG_CR BIT(0)
+
+/*
+ * As per datasheet, die selection is done by the 6th bit of Die
+ * Select Register (Address 0xD0).
+ */
+#define MICRON_DIE_SELECT_REG 0xD0
+
+#define MICRON_SELECT_DIE(x) ((x) << 6)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -34,38 +44,52 @@ static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
- region->offset = 64;
- region->length = 64;
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
return 0;
}
-static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
/* Reserve 2 bytes for the BBM. */
region->offset = 2;
- region->length = 62;
+ region->length = (mtd->oobsize / 2) - 2;
return 0;
}
-static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
- .ecc = mt29f2g01abagd_ooblayout_ecc,
- .free = mt29f2g01abagd_ooblayout_free,
+static const struct mtd_ooblayout_ops micron_8_ooblayout = {
+ .ecc = micron_8_ooblayout_ecc,
+ .free = micron_8_ooblayout_free,
};
-static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int micron_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ spinand->scratchbuf);
+
+ if (target > 1)
+ return -EINVAL;
+
+ *spinand->scratchbuf = MICRON_SELECT_DIE(target);
+
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
+static int micron_8_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
switch (status & MICRON_STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -91,43 +115,131 @@ static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info micron_spinand_table[] = {
- SPINAND_INFO("MT29F2G01ABAGD", 0x24,
+ /* M79A 2Gb 3.3V */
+ SPINAND_INFO("MT29F2G01ABAGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 2Gb 1.8V */
+ SPINAND_INFO("MT29F2G01ABBGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
- mt29f2g01abagd_ecc_get_status)),
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 3.3V */
+ SPINAND_INFO("MT29F1G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 1.8V */
+ SPINAND_INFO("MT29F1G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ADAGD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ABAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 4Gb 1.8V */
+ SPINAND_INFO("MT29F4G01ABBFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 8Gb 3.3V */
+ SPINAND_INFO("MT29F8G01ADAFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 8Gb 1.8V */
+ SPINAND_INFO("MT29F8G01ADBFD",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
};
-static int micron_spinand_detect(struct spinand_device *spinand)
+static int micron_spinand_init(struct spinand_device *spinand)
{
- u8 *id = spinand->id.data;
- int ret;
-
/*
- * Micron SPI NAND read ID need a dummy byte,
- * so the first byte in raw_id is dummy.
+ * M70A device series enable Continuous Read feature at Power-up,
+ * which is not supported. Disable this bit to avoid any possible
+ * failure.
*/
- if (id[1] != SPINAND_MFR_MICRON)
- return 0;
-
- ret = spinand_match_and_init(spinand, micron_spinand_table,
- ARRAY_SIZE(micron_spinand_table), id[2]);
- if (ret)
- return ret;
+ if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
+ return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
- return 1;
+ return 0;
}
static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
- .detect = micron_spinand_detect,
+ .init = micron_spinand_init,
};
const struct spinand_manufacturer micron_spinand_manufacturer = {
.id = SPINAND_MFR_MICRON,
.name = "Micron",
+ .chips = micron_spinand_table,
+ .nchips = ARRAY_SIZE(micron_spinand_table),
.ops = &micron_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 52307681cbd0..519ade513c1f 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -97,7 +97,8 @@ static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
static const struct spinand_info paragon_spinand_table[] = {
- SPINAND_INFO("PN26G01A", 0xe1,
+ SPINAND_INFO("PN26G01A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -106,7 +107,8 @@ static const struct spinand_info paragon_spinand_table[] = {
0,
SPINAND_ECCINFO(&pn26g0xa_ooblayout,
pn26g0xa_ecc_get_status)),
- SPINAND_INFO("PN26G02A", 0xe2,
+ SPINAND_INFO("PN26G02A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2),
NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -117,31 +119,13 @@ static const struct spinand_info paragon_spinand_table[] = {
pn26g0xa_ecc_get_status)),
};
-static int paragon_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /* Read ID returns [0][MID][DID] */
-
- if (id[1] != SPINAND_MFR_PARAGON)
- return 0;
-
- ret = spinand_match_and_init(spinand, paragon_spinand_table,
- ARRAY_SIZE(paragon_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
- .detect = paragon_spinand_detect,
};
const struct spinand_manufacturer paragon_spinand_manufacturer = {
.id = SPINAND_MFR_PARAGON,
.name = "Paragon",
+ .chips = paragon_spinand_table,
+ .nchips = ARRAY_SIZE(paragon_spinand_table),
.ops = &paragon_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 0db5ee4e82af..bc801d83343e 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+/* Kioxia is new name of Toshiba memory. */
#define SPINAND_MFR_TOSHIBA 0x98
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
@@ -19,14 +20,26 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+static SPINAND_OP_VARIANTS(write_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+/**
+ * Backward compatibility for 1st generation Serial NAND devices
+ * which don't support Quad Program Load operation.
+ */
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section > 0)
return -ERANGE;
@@ -37,8 +50,8 @@ static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
return 0;
}
-static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *region)
+static int tx58cxgxsxraix_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
{
if (section > 0)
return -ERANGE;
@@ -50,13 +63,13 @@ static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
-static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = {
- .ecc = tc58cxgxsx_ooblayout_ecc,
- .free = tc58cxgxsx_ooblayout_free,
+static const struct mtd_ooblayout_ops tx58cxgxsxraix_ooblayout = {
+ .ecc = tx58cxgxsxraix_ooblayout_ecc,
+ .free = tx58cxgxsxraix_ooblayout_free,
};
-static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
- u8 status)
+static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
@@ -94,105 +107,174 @@ static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info toshiba_spinand_table[] = {
- /* 3.3V 1Gb */
- SPINAND_INFO("TC58CVG0S3", 0xC2,
+ /* 3.3V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 2Gb */
- SPINAND_INFO("TC58CVG1S3", 0xCB,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 4Gb */
- SPINAND_INFO("TC58CVG2S0", 0xCD,
- NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
- NAND_ECCREQ(8, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
- &write_cache_variants,
- &update_cache_variants),
- 0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 3.3V 4Gb */
- SPINAND_INFO("TC58CVG2S0", 0xED,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 1Gb */
- SPINAND_INFO("TC58CYG0S3", 0xB2,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 2Gb */
- SPINAND_INFO("TC58CYG1S3", 0xBB,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
- /* 1.8V 4Gb */
- SPINAND_INFO("TC58CYG2S0", 0xBD,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIG",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
- tc58cxgxsx_ecc_get_status)),
-};
-
-static int toshiba_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
/*
- * Toshiba SPI NAND read ID needs a dummy byte,
- * so the first byte in id is garbage.
+ * 2nd generation serial nand has HOLD_D which is equivalent to
+ * QE_BIT.
*/
- if (id[1] != SPINAND_MFR_TOSHIBA)
- return 0;
-
- ret = spinand_match_and_init(spinand, toshiba_spinand_table,
- ARRAY_SIZE(toshiba_spinand_table),
- id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
+ /* 3.3V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CVG3S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CYG3S0HRAIJ",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+};
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
- .detect = toshiba_spinand_detect,
};
const struct spinand_manufacturer toshiba_spinand_manufacturer = {
.id = SPINAND_MFR_TOSHIBA,
.name = "Toshiba",
+ .chips = toshiba_spinand_table,
+ .nchips = ARRAY_SIZE(toshiba_spinand_table),
.ops = &toshiba_spinand_manuf_ops,
};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index a6c17e0cace8..76684428354e 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -75,7 +75,8 @@ static int w25m02gv_select_target(struct spinand_device *spinand,
}
static const struct spinand_info winbond_spinand_table[] = {
- SPINAND_INFO("W25M02GV", 0xAB,
+ SPINAND_INFO("W25M02GV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -84,7 +85,8 @@ static const struct spinand_info winbond_spinand_table[] = {
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
- SPINAND_INFO("W25N01GV", 0xAA,
+ SPINAND_INFO("W25N01GV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -94,31 +96,6 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
};
-/**
- * winbond_spinand_detect - initialize device related part in spinand_device
- * struct if it is a Winbond device.
- * @spinand: SPI NAND device structure
- */
-static int winbond_spinand_detect(struct spinand_device *spinand)
-{
- u8 *id = spinand->id.data;
- int ret;
-
- /*
- * Winbond SPI NAND read ID need a dummy byte,
- * so the first byte in raw_id is dummy.
- */
- if (id[1] != SPINAND_MFR_WINBOND)
- return 0;
-
- ret = spinand_match_and_init(spinand, winbond_spinand_table,
- ARRAY_SIZE(winbond_spinand_table), id[2]);
- if (ret)
- return ret;
-
- return 1;
-}
-
static int winbond_spinand_init(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -138,12 +115,13 @@ static int winbond_spinand_init(struct spinand_device *spinand)
}
static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
- .detect = winbond_spinand_detect,
.init = winbond_spinand_init,
};
const struct spinand_manufacturer winbond_spinand_manufacturer = {
.id = SPINAND_MFR_WINBOND,
.name = "Winbond",
+ .chips = winbond_spinand_table,
+ .nchips = ARRAY_SIZE(winbond_spinand_table),
.ops = &winbond_spinand_manuf_ops,
};
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 267b9000782e..6e816eafb312 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -24,79 +24,6 @@ config MTD_SPI_NOR_USE_4K_SECTORS
Please note that some tools/drivers/filesystems may not work with
4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
-config SPI_ASPEED_SMC
- tristate "Aspeed flash controllers in SPI mode"
- depends on ARCH_ASPEED || COMPILE_TEST
- depends on HAS_IOMEM && OF
- help
- This enables support for the Firmware Memory controller (FMC)
- in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
- and support for the SPI flash memory controller (SPI) for
- the host firmware. The implementation only supports SPI NOR.
-
-config SPI_CADENCE_QUADSPI
- tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || ARM64 || COMPILE_TEST)
- help
- Enable support for the Cadence Quad SPI Flash controller.
-
- Cadence QSPI is a specialized controller for connecting an SPI
- Flash over 1/2/4-bit wide bus. Enable this option if you have a
- device with a Cadence QSPI controller and want to access the
- Flash as an MTD device.
-
-config SPI_HISI_SFC
- tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
- depends on ARCH_HISI || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables support for HiSilicon FMC SPI-NOR flash controller.
-
-config SPI_NXP_SPIFI
- tristate "NXP SPI Flash Interface (SPIFI)"
- depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
- depends on HAS_IOMEM
- help
- Enable support for the NXP LPC SPI Flash Interface controller.
-
- SPIFI is a specialized controller for connecting serial SPI
- Flash. Enable this option if you have a device with a SPIFI
- controller and want to access the Flash as a mtd device.
-
-config SPI_INTEL_SPI
- tristate
-
-config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
- depends on X86 && PCI
- select SPI_INTEL_SPI
- help
- This enables PCI support for the Intel PCH/PCU SPI controller in
- master mode. This controller is present in modern Intel hardware
- and is used to hold BIOS and other persistent settings. Using
- this driver it is possible to upgrade BIOS directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-pci.
-
-config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
- depends on X86
- select SPI_INTEL_SPI
- help
- This enables platform support for the Intel PCH/PCU SPI
- controller in master mode. This controller is present in modern
- Intel hardware and is used to hold BIOS and other persistent
- settings. Using this driver it is possible to upgrade BIOS
- directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-spi-platform.
+source "drivers/mtd/spi-nor/controllers/Kconfig"
endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 738dfd74cf76..7ddb742de1fe 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,9 +1,20 @@
# SPDX-License-Identifier: GPL-2.0
+
+spi-nor-objs := core.o sfdp.o
+spi-nor-objs += atmel.o
+spi-nor-objs += catalyst.o
+spi-nor-objs += eon.o
+spi-nor-objs += esmt.o
+spi-nor-objs += everspin.o
+spi-nor-objs += fujitsu.o
+spi-nor-objs += gigadevice.o
+spi-nor-objs += intel.o
+spi-nor-objs += issi.o
+spi-nor-objs += macronix.o
+spi-nor-objs += micron-st.o
+spi-nor-objs += spansion.o
+spi-nor-objs += sst.o
+spi-nor-objs += winbond.o
+spi-nor-objs += xilinx.o
+spi-nor-objs += xmc.o
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
-obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
-obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
-obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
-obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
-obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
-obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
-obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
new file mode 100644
index 000000000000..3f5f21a473a6
--- /dev/null
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info atmel_parts[] = {
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
+
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
+
+ { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
+ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+
+ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+};
+
+static void atmel_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static const struct spi_nor_fixups atmel_fixups = {
+ .default_init = atmel_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_atmel = {
+ .name = "atmel",
+ .parts = atmel_parts,
+ .nparts = ARRAY_SIZE(atmel_parts),
+ .fixups = &atmel_fixups,
+};
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
new file mode 100644
index 000000000000..011b83e99e95
--- /dev/null
+++ b/drivers/mtd/spi-nor/catalyst.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info catalyst_parts[] = {
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO(16, 8, 16, 1,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c03", CAT25_INFO(32, 8, 16, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c09", CAT25_INFO(128, 8, 32, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25c17", CAT25_INFO(256, 8, 32, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+};
+
+const struct spi_nor_manufacturer spi_nor_catalyst = {
+ .name = "catalyst",
+ .parts = catalyst_parts,
+ .nparts = ARRAY_SIZE(catalyst_parts),
+};
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
new file mode 100644
index 000000000000..10b86660b821
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SPI_ASPEED_SMC
+ tristate "Aspeed flash controllers in SPI mode"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ This enables support for the Firmware Memory controller (FMC)
+ in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips,
+ and support for the SPI flash memory controller (SPI) for
+ the host firmware. The implementation only supports SPI NOR.
+
+config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+ Cadence QSPI is a specialized controller for connecting an SPI
+ Flash over 1/2/4-bit wide bus. Enable this option if you have a
+ device with a Cadence QSPI controller and want to access the
+ Flash as an MTD device.
+
+config SPI_HISI_SFC
+ tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for HiSilicon FMC SPI-NOR flash controller.
+
+config SPI_NXP_SPIFI
+ tristate "NXP SPI Flash Interface (SPIFI)"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enable support for the NXP LPC SPI Flash Interface controller.
+
+ SPIFI is a specialized controller for connecting serial SPI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
+
+config SPI_INTEL_SPI
+ tristate
+
+config SPI_INTEL_SPI_PCI
+ tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ depends on X86 && PCI
+ select SPI_INTEL_SPI
+ help
+ This enables PCI support for the Intel PCH/PCU SPI controller in
+ master mode. This controller is present in modern Intel hardware
+ and is used to hold BIOS and other persistent settings. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-pci.
+
+config SPI_INTEL_SPI_PLATFORM
+ tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
+ depends on X86
+ select SPI_INTEL_SPI
+ help
+ This enables platform support for the Intel PCH/PCU SPI
+ controller in master mode. This controller is present in modern
+ Intel hardware and is used to hold BIOS and other persistent
+ settings. Using this driver it is possible to upgrade BIOS
+ directly from Linux.
+
+ Say N here unless you know what you are doing. Overwriting the
+ SPI flash may render the system unbootable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-spi-platform.
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
new file mode 100644
index 000000000000..46e6fbe586e3
--- /dev/null
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
+obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
+obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
+obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
+obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
+obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
index 395127349aa8..ae85e4c0e114 100644
--- a/drivers/mtd/spi-nor/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
@@ -109,7 +109,7 @@ struct aspeed_smc_controller {
void __iomem *ahb_base; /* per-chip windows resource */
u32 ahb_window_size; /* full mapping window size */
- struct aspeed_smc_chip *chips[0]; /* pointers to attached chips */
+ struct aspeed_smc_chip *chips[]; /* pointers to attached chips */
};
/*
@@ -354,7 +354,7 @@ static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr)
default:
WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n",
nor->addr_width);
- /* FALLTHROUGH */
+ fallthrough;
case 3:
cmdaddr = addr & 0xFFFFFF;
cmdaddr |= cmd << 24;
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
index 494dcab4aaaa..494dcab4aaaa 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 6c7a4118752e..6c7a4118752e 100644
--- a/drivers/mtd/spi-nor/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
index 81329f680bec..81329f680bec 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
diff --git a/drivers/mtd/spi-nor/intel-spi-platform.c b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
index f80f1086f928..f80f1086f928 100644
--- a/drivers/mtd/spi-nor/intel-spi-platform.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c
index 61d2a0ad2131..61d2a0ad2131 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi.c
diff --git a/drivers/mtd/spi-nor/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h
index e2f41b8827bf..e2f41b8827bf 100644
--- a/drivers/mtd/spi-nor/intel-spi.h
+++ b/drivers/mtd/spi-nor/controllers/intel-spi.h
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 9a5b1a7c636a..9a5b1a7c636a 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
new file mode 100644
index 000000000000..cc68ea84318e
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.c
@@ -0,0 +1,3466 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task_stack.h>
+#include <linux/spi/flash.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
+
+#define SPI_NOR_MAX_ADDR_WIDTH 4
+
+/**
+ * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
+ * transfer
+ * @nor: pointer to 'struct spi_nor'
+ * @op: pointer to 'struct spi_mem_op' template for transfer
+ *
+ * If we have to use the bounce buffer, the data field in @op will be updated.
+ *
+ * Return: true if the bounce buffer is needed, false if not
+ */
+static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
+{
+ /* op->data.buf.in occupies the same memory as op->data.buf.out */
+ if (object_is_on_stack(op->data.buf.in) ||
+ !virt_addr_valid(op->data.buf.in)) {
+ if (op->data.nbytes > nor->bouncebuf_size)
+ op->data.nbytes = nor->bouncebuf_size;
+ op->data.buf.in = nor->bouncebuf;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * spi_nor_spimem_exec_op() - execute a memory operation
+ * @nor: pointer to 'struct spi_nor'
+ * @op: pointer to 'struct spi_mem_op' template for transfer
+ *
+ * Return: 0 on success, -error otherwise.
+ */
+static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
+{
+ int error;
+
+ error = spi_mem_adjust_op_size(nor->spimem, op);
+ if (error)
+ return error;
+
+ return spi_mem_exec_op(nor->spimem, op);
+}
+
+/**
+ * spi_nor_spimem_read_data() - read data from flash's memory region via
+ * spi-mem
+ * @nor: pointer to 'struct spi_nor'
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
+ size_t len, u8 *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+ SPI_MEM_OP_DATA_IN(len, buf, 1));
+ bool usebouncebuf;
+ ssize_t nbytes;
+ int error;
+
+ /* get transfer protocols. */
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ op.dummy.buswidth = op.addr.buswidth;
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+
+ usebouncebuf = spi_nor_spimem_bounce(nor, &op);
+
+ if (nor->dirmap.rdesc) {
+ nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
+ op.data.nbytes, op.data.buf.in);
+ } else {
+ error = spi_nor_spimem_exec_op(nor, &op);
+ if (error)
+ return error;
+ nbytes = op.data.nbytes;
+ }
+
+ if (usebouncebuf && nbytes > 0)
+ memcpy(buf, op.data.buf.in, nbytes);
+
+ return nbytes;
+}
+
+/**
+ * spi_nor_read_data() - read data from flash memory
+ * @nor: pointer to 'struct spi_nor'
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @buf: pointer to dst buffer
+ *
+ * Return: number of bytes read successfully, -errno otherwise
+ */
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
+{
+ if (nor->spimem)
+ return spi_nor_spimem_read_data(nor, from, len, buf);
+
+ return nor->controller_ops->read(nor, from, len, buf);
+}
+
+/**
+ * spi_nor_spimem_write_data() - write data to flash memory via
+ * spi-mem
+ * @nor: pointer to 'struct spi_nor'
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
+ size_t len, const u8 *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ ssize_t nbytes;
+ int error;
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op.addr.nbytes = 0;
+
+ if (spi_nor_spimem_bounce(nor, &op))
+ memcpy(nor->bouncebuf, buf, op.data.nbytes);
+
+ if (nor->dirmap.wdesc) {
+ nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
+ op.data.nbytes, op.data.buf.out);
+ } else {
+ error = spi_nor_spimem_exec_op(nor, &op);
+ if (error)
+ return error;
+ nbytes = op.data.nbytes;
+ }
+
+ return nbytes;
+}
+
+/**
+ * spi_nor_write_data() - write data to flash memory
+ * @nor: pointer to 'struct spi_nor'
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @buf: pointer to src buffer
+ *
+ * Return: number of bytes written successfully, -errno otherwise
+ */
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf)
+{
+ if (nor->spimem)
+ return spi_nor_spimem_write_data(nor, to, len, buf);
+
+ return nor->controller_ops->write(nor, to, len, buf);
+}
+
+/**
+ * spi_nor_write_enable() - Set write enable latch with Write Enable command.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_write_disable() - Send Write Disable instruction to the chip.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_disable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_sr() - Read the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
+ sr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_fsr() - Read the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'
+ * @fsr: pointer to a DMA-able buffer where the value of the
+ * Flag Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, fsr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
+ fsr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_cr() - Read the Configuration Register using the
+ * SPINOR_OP_RDCR (35h) command.
+ * @nor: pointer to 'struct spi_nor'
+ * @cr: pointer to a DMA-able buffer where the value of the
+ * Configuration Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, cr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
+ SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor,
+ enable ? SPINOR_OP_EN4B :
+ SPINOR_OP_EX4B,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
+/**
+ * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
+ * flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ nor->bouncebuf[0] = enable << 7;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_write_ear() - Write Extended Address Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @ear: value to write to the Extended Address Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
+{
+ int ret;
+
+ nor->bouncebuf[0] = ear;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d writing EAR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to a DMA-able buffer where the value of the
+ * Status Register will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
+ sr, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
+ * the flash is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_xsr_ready(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return !!(nor->bouncebuf[0] & XSR_RDY);
+}
+
+/**
+ * spi_nor_clear_sr() - Clear the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_sr(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing SR\n", ret);
+}
+
+/**
+ * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
+ * for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_sr_ready(struct spi_nor *nor)
+{
+ int ret = spi_nor_read_sr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_USE_CLSR &&
+ nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
+ if (nor->bouncebuf[0] & SR_E_ERR)
+ dev_err(nor->dev, "Erase Error occurred\n");
+ else
+ dev_err(nor->dev, "Programming Error occurred\n");
+
+ spi_nor_clear_sr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return !(nor->bouncebuf[0] & SR_WIP);
+}
+
+/**
+ * spi_nor_clear_fsr() - Clear the Flag Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ */
+static void spi_nor_clear_fsr(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
+}
+
+/**
+ * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
+ * ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
+
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
+ if (nor->bouncebuf[0] & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (nor->bouncebuf[0] & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ spi_nor_clear_fsr(nor);
+
+ /*
+ * WEL bit remains set to one when an erase or page program
+ * error occurs. Issue a Write Disable command to protect
+ * against inadvertent writes that can possibly corrupt the
+ * contents of the memory.
+ */
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ return ret;
+
+ return -EIO;
+ }
+
+ return nor->bouncebuf[0] & FSR_READY;
+}
+
+/**
+ * spi_nor_ready() - Query the flash to see if it is ready for new commands.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+
+ if (nor->flags & SNOR_F_READY_XSR_RDY)
+ sr = spi_nor_xsr_ready(nor);
+ else
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
+}
+
+/**
+ * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
+ * Status Register until ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ * @timeout_jiffies: jiffies to wait until timeout.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout_jiffies)
+{
+ unsigned long deadline;
+ int timeout = 0, ret;
+
+ deadline = jiffies + timeout_jiffies;
+
+ while (!timeout) {
+ if (time_after_eq(jiffies, deadline))
+ timeout = 1;
+
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
+ cond_resched();
+ }
+
+ dev_dbg(nor->dev, "flash operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
+ * flash to be ready, or timeout occurs.
+ * @nor: pointer to "struct spi_nor".
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+
+/**
+ * spi_nor_write_sr() - Write the Status Register.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr: pointer to DMA-able buffer to write to the Status Register.
+ * @len: number of bytes to write to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, sr, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
+ sr, len);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
+ * ensure that the byte written match the received value.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+
+ nor->bouncebuf[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] != sr1) {
+ dev_dbg(nor->dev, "SR1: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
+ * Status Register 2 in one shot. Ensure that the byte written in the Status
+ * Register 1 match the received value, and that the 16-bit Write did not
+ * affect what was already in the Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register 1.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 cr_written;
+
+ /* Make sure we don't overwrite the contents of Status Register 2. */
+ if (!(nor->flags & SNOR_F_NO_READ_CR)) {
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+ } else if (nor->params->quad_enable) {
+ /*
+ * If the Status Register 2 Read command (35h) is not
+ * supported, we should at least be sure we don't
+ * change the value of the SR2 Quad Enable bit.
+ *
+ * We can safely assume that when the Quad Enable method is
+ * set, the value of the QE bit is one, as a consequence of the
+ * nor->params->quad_enable() call.
+ *
+ * We can safely assume that the Quad Enable bit is present in
+ * the Status Register 2 at BIT(1). According to the JESD216
+ * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
+ * Write Status (01h) command is available just for the cases
+ * in which the QE bit is described in SR2 at BIT(1).
+ */
+ sr_cr[1] = SR2_QUAD_EN_BIT1;
+ } else {
+ sr_cr[1] = 0;
+ }
+
+ sr_cr[0] = sr1;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ cr_written = sr_cr[1];
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr_written != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
+ * Configuration Register in one shot. Ensure that the byte written in the
+ * Configuration Register match the received value, and that the 16-bit Write
+ * did not affect what was already in the Status Register 1.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @cr: byte value to be written to the Configuration Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
+{
+ int ret;
+ u8 *sr_cr = nor->bouncebuf;
+ u8 sr_written;
+
+ /* Keep the current value of the Status Register 1. */
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ sr_cr[1] = cr;
+
+ ret = spi_nor_write_sr(nor, sr_cr, 2);
+ if (ret)
+ return ret;
+
+ sr_written = sr_cr[0];
+
+ ret = spi_nor_read_sr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ if (sr_written != sr_cr[0]) {
+ dev_dbg(nor->dev, "SR: Read back test failed\n");
+ return -EIO;
+ }
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return 0;
+
+ ret = spi_nor_read_cr(nor, &sr_cr[1]);
+ if (ret)
+ return ret;
+
+ if (cr != sr_cr[1]) {
+ dev_dbg(nor->dev, "CR: read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
+ * the byte written match the received value without affecting other bits in the
+ * Status Register 1 and 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @sr1: byte value to be written to the Status Register.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
+{
+ if (nor->flags & SNOR_F_HAS_16BIT_SR)
+ return spi_nor_write_16bit_sr_and_check(nor, sr1);
+
+ return spi_nor_write_sr1_and_check(nor, sr1);
+}
+
+/**
+ * spi_nor_write_sr2() - Write the Status Register 2 using the
+ * SPINOR_OP_WRSR2 (3eh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
+ sr2, 1);
+ }
+
+ if (ret) {
+ dev_dbg(nor->dev, "error %d writing SR2\n", ret);
+ return ret;
+ }
+
+ return spi_nor_wait_till_ready(nor);
+}
+
+/**
+ * spi_nor_read_sr2() - Read the Status Register 2 using the
+ * SPINOR_OP_RDSR2 (3fh) command.
+ * @nor: pointer to 'struct spi_nor'.
+ * @sr2: pointer to DMA-able buffer where the value of the
+ * Status Register 2 will be written.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
+{
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, sr2, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
+ sr2, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d reading SR2\n", ret);
+
+ return ret;
+}
+
+/**
+ * spi_nor_erase_chip() - Erase the entire flash memory.
+ * @nor: pointer to 'struct spi_nor'.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_chip(struct spi_nor *nor)
+{
+ int ret;
+
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
+ NULL, 0);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d erasing chip\n", ret);
+
+ return ret;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
+{
+ return !!nor->params->erase_map.uniform_erase_type;
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
+{
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+ if (!spi_nor_has_uniform_erase(nor)) {
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ struct spi_nor_erase_type *erase;
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ erase = &map->erase_type[i];
+ erase->opcode =
+ spi_nor_convert_3to4_erase(erase->opcode);
+ }
+ }
+}
+
+int spi_nor_lock_and_prep(struct spi_nor *nor)
+{
+ int ret = 0;
+
+ mutex_lock(&nor->lock);
+
+ if (nor->controller_ops && nor->controller_ops->prepare) {
+ ret = nor->controller_ops->prepare(nor);
+ if (ret) {
+ mutex_unlock(&nor->lock);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+void spi_nor_unlock_and_unprep(struct spi_nor *nor)
+{
+ if (nor->controller_ops && nor->controller_ops->unprepare)
+ nor->controller_ops->unprepare(nor);
+ mutex_unlock(&nor->lock);
+}
+
+static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
+{
+ if (!nor->params->convert_addr)
+ return addr;
+
+ return nor->params->convert_addr(nor, addr);
+}
+
+/*
+ * Initiate the erasure of a single sector
+ */
+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+{
+ int i;
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ return spi_mem_exec_op(nor->spimem, &op);
+ } else if (nor->controller_ops->erase) {
+ return nor->controller_ops->erase(nor, addr);
+ }
+
+ /*
+ * Default implementation, if driver doesn't have a specialized HW
+ * control
+ */
+ for (i = nor->addr_width - 1; i >= 0; i--) {
+ nor->bouncebuf[i] = addr & 0xff;
+ addr >>= 8;
+ }
+
+ return nor->controller_ops->write_reg(nor, nor->erase_opcode,
+ nor->bouncebuf, nor->addr_width);
+}
+
+/**
+ * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @dividend: dividend value
+ * @remainder: pointer to u32 remainder (will be updated)
+ *
+ * Return: the result of the division
+ */
+static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
+ u64 dividend, u32 *remainder)
+{
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ *remainder = (u32)dividend & erase->size_mask;
+ return dividend >> erase->size_shift;
+}
+
+/**
+ * spi_nor_find_best_erase_type() - find the best erase type for the given
+ * offset in the serial flash memory and the
+ * number of bytes to erase. The region in
+ * which the address fits is expected to be
+ * provided.
+ * @map: the erase map of the SPI NOR
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Return: a pointer to the best fitted erase type, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+ const struct spi_nor_erase_region *region,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_type *erase;
+ u32 rem;
+ int i;
+ u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ /*
+ * Erase types are ordered by size, with the smallest erase type at
+ * index 0.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ /* Does the erase region support the tested erase type? */
+ if (!(erase_mask & BIT(i)))
+ continue;
+
+ erase = &map->erase_type[i];
+
+ /* Don't erase more than what the user has asked for. */
+ if (erase->size > len)
+ continue;
+
+ /* Alignment is not mandatory for overlaid regions */
+ if (region->offset & SNOR_OVERLAID_REGION)
+ return erase;
+
+ spi_nor_div_by_erase_size(erase, addr, &rem);
+ if (rem)
+ continue;
+ else
+ return erase;
+ }
+
+ return NULL;
+}
+
+static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
+{
+ return region->offset & SNOR_LAST_REGION;
+}
+
+static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
+{
+ return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
+}
+
+/**
+ * spi_nor_region_next() - get the next spi nor region
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ *
+ * Return: the next spi nor region or NULL if last region.
+ */
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region)
+{
+ if (spi_nor_region_is_last(region))
+ return NULL;
+ region++;
+ return region;
+}
+
+/**
+ * spi_nor_find_erase_region() - find the region of the serial flash memory in
+ * which the offset fits
+ * @map: the erase map of the SPI NOR
+ * @addr: offset in the serial flash memory
+ *
+ * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_region *
+spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ u64 region_end = region_start + region->size;
+
+ while (addr < region_start || addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ return ERR_PTR(-EINVAL);
+
+ region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ region_end = region_start + region->size;
+ }
+
+ return region;
+}
+
+/**
+ * spi_nor_init_erase_cmd() - initialize an erase command
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ *
+ * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_command *
+spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase)
+{
+ struct spi_nor_erase_command *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&cmd->list);
+ cmd->opcode = erase->opcode;
+ cmd->count = 1;
+
+ if (region->offset & SNOR_OVERLAID_REGION)
+ cmd->size = region->size;
+ else
+ cmd->size = erase->size;
+
+ return cmd;
+}
+
+/**
+ * spi_nor_destroy_erase_cmd_list() - destroy erase command list
+ * @erase_list: list of erase commands
+ */
+static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
+{
+ struct spi_nor_erase_command *cmd, *next;
+
+ list_for_each_entry_safe(cmd, next, erase_list, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+}
+
+/**
+ * spi_nor_init_erase_cmd_list() - initialize erase command list
+ * @nor: pointer to a 'struct spi_nor'
+ * @erase_list: list of erase commands to be executed once we validate that the
+ * erase can be performed
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Builds the list of best fitted erase commands and verifies if the erase can
+ * be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+ struct list_head *erase_list,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_type *erase, *prev_erase = NULL;
+ struct spi_nor_erase_region *region;
+ struct spi_nor_erase_command *cmd = NULL;
+ u64 region_end;
+ int ret = -EINVAL;
+
+ region = spi_nor_find_erase_region(map, addr);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ region_end = spi_nor_region_end(region);
+
+ while (len) {
+ erase = spi_nor_find_best_erase_type(map, region, addr, len);
+ if (!erase)
+ goto destroy_erase_cmd_list;
+
+ if (prev_erase != erase ||
+ region->offset & SNOR_OVERLAID_REGION) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ goto destroy_erase_cmd_list;
+ }
+
+ list_add_tail(&cmd->list, erase_list);
+ } else {
+ cmd->count++;
+ }
+
+ addr += cmd->size;
+ len -= cmd->size;
+
+ if (len && addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ goto destroy_erase_cmd_list;
+ region_end = spi_nor_region_end(region);
+ }
+
+ prev_erase = erase;
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(erase_list);
+ return ret;
+}
+
+/**
+ * spi_nor_erase_multi_sectors() - perform a non-uniform erase
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Build a list of best fitted erase commands and execute it once we validate
+ * that the erase can be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
+{
+ LIST_HEAD(erase_list);
+ struct spi_nor_erase_command *cmd, *next;
+ int ret;
+
+ ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(cmd, next, &erase_list, list) {
+ nor->erase_opcode = cmd->opcode;
+ while (cmd->count) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ addr += cmd->size;
+ cmd->count--;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+ }
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(&erase_list);
+ return ret;
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len;
+ uint32_t rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+ }
+
+ addr = instr->addr;
+ len = instr->len;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ /* whole-chip erase? */
+ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+ unsigned long timeout;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_erase_chip(nor);
+ if (ret)
+ goto erase_err;
+
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(mtd->size / SZ_2M));
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ if (ret)
+ goto erase_err;
+
+ /* REVISIT in some cases we could speed up erasing large regions
+ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
+ * to use "small sector erase", but that's not always optimal.
+ */
+
+ /* "sector"-at-a-time erase */
+ } else if (spi_nor_has_uniform_erase(nor)) {
+ while (len) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto erase_err;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+ }
+
+ /* erase multiple sectors */
+ } else {
+ ret = spi_nor_erase_multi_sectors(nor, addr, len);
+ if (ret)
+ goto erase_err;
+ }
+
+ ret = spi_nor_write_disable(nor);
+
+erase_err:
+ spi_nor_unlock_and_unprep(nor);
+
+ return ret;
+}
+
+static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
+{
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
+ return mask | SR_BP3_BIT6;
+
+ if (nor->flags & SNOR_F_HAS_4BIT_BP)
+ return mask | SR_BP3;
+
+ return mask;
+}
+
+static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
+ return SR_TB_BIT6;
+ else
+ return SR_TB_BIT5;
+}
+
+static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
+{
+ unsigned int bp_slots, bp_slots_needed;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+
+ /* Reserved one for "protect none" and one for "protect all". */
+ bp_slots = (1 << hweight8(mask)) - 2;
+ bp_slots_needed = ilog2(nor->info->n_sectors);
+
+ if (bp_slots_needed > bp_slots)
+ return nor->info->sector_size <<
+ (bp_slots_needed - bp_slots);
+ else
+ return nor->info->sector_size;
+}
+
+static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 bp, val = sr & mask;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
+ val = (val & ~SR_BP3_BIT6) | SR_BP3;
+
+ bp = val >> SR_BP_SHIFT;
+
+ if (!bp) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ return;
+ }
+
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ *len = min_prot_len << (bp - 1);
+
+ if (*len > mtd->size)
+ *len = mtd->size;
+
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
+ *ofs = 0;
+ else
+ *ofs = mtd->size - *len;
+}
+
+/*
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
+ * @locked is false); 0 otherwise
+ */
+static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
+ uint64_t len, u8 sr, bool locked)
+{
+ loff_t lock_offs;
+ uint64_t lock_len;
+
+ if (!len)
+ return 1;
+
+ spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
+
+ if (locked)
+ /* Requested range is a sub-range of locked range */
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+ else
+ /* Requested range does not overlap with locked range */
+ return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
+}
+
+static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
+ * register
+ * (SR). Does not support these features found in newer SR bitfields:
+ * - SEC: sector/block protect - only handle SEC=0 (block protect)
+ * - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Support for the following is provided conditionally for some flash:
+ * - TB: top/bottom protect
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
+ * --------------------------------------------------------------------------
+ * X | X | 0 | 0 | 0 | NONE | NONE
+ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
+ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
+ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
+ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
+ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
+ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
+ * X | X | 1 | 1 | 1 | 8 MB | ALL
+ * ------|-------|-------|-------|-------|---------------|-------------------
+ * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
+ * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
+ * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
+ * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
+ * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
+ * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ int ret, status_old, status_new;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
+
+ /* If nothing in our range is unlocked, we don't need to do anything */
+ if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is unlocked, we can't use 'bottom' protection */
+ if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
+ can_be_bottom = false;
+
+ /* If anything above us is unlocked, we can't use 'top' protection */
+ if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_top = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should end up locked */
+ if (use_top)
+ lock_len = mtd->size - ofs;
+ else
+ lock_len = ofs + len;
+
+ if (lock_len == mtd->size) {
+ val = mask;
+ } else {
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+ val = pow << SR_BP_SHIFT;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+ val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+ if (val & ~mask)
+ return -EINVAL;
+
+ /* Don't "lock" with no region! */
+ if (!(val & mask))
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~tb_mask) | val;
+
+ /* Disallow further writes if WP pin is asserted */
+ status_new |= SR_SRWD;
+
+ if (!use_top)
+ status_new |= tb_mask;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & mask) < (status_old & mask))
+ return -EINVAL;
+
+ return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Unlock a region of the flash. See spi_nor_sr_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u64 min_prot_len;
+ int ret, status_old, status_new;
+ u8 mask = spi_nor_get_sr_bp_mask(nor);
+ u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
+ u8 pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ status_old = nor->bouncebuf[0];
+
+ /* If nothing in our range is locked, we don't need to do anything */
+ if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is locked, we can't use 'top' protection */
+ if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
+ can_be_top = false;
+
+ /* If anything above us is locked, we can't use 'bottom' protection */
+ if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_bottom = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should remain locked */
+ if (use_top)
+ lock_len = mtd->size - (ofs + len);
+ else
+ lock_len = ofs;
+
+ if (lock_len == 0) {
+ val = 0; /* fully unlocked */
+ } else {
+ min_prot_len = spi_nor_get_min_prot_length_sr(nor);
+ pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
+ val = pow << SR_BP_SHIFT;
+
+ if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
+ val = (val & ~SR_BP3) | SR_BP3_BIT6;
+
+ /* Some power-of-two sizes are not supported */
+ if (val & ~mask)
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~tb_mask) | val;
+
+ /* Don't protect status register if we're fully unlocked */
+ if (lock_len == 0)
+ status_new &= ~SR_SRWD;
+
+ if (!use_top)
+ status_new |= tb_mask;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & mask) > (status_old & mask))
+ return -EINVAL;
+
+ return spi_nor_write_sr_and_check(nor, status_new);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
+ * for more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
+}
+
+static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
+ .lock = spi_nor_sr_lock,
+ .unlock = spi_nor_sr_unlock,
+ .is_locked = spi_nor_sr_is_locked,
+};
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->lock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->unlock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = nor->params->locking_ops->is_locked(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+/**
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ ret = spi_nor_read_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+ return 0;
+
+ nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
+
+ return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
+{
+ int ret;
+
+ if (nor->flags & SNOR_F_NO_READ_CR)
+ return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
+
+ ret = spi_nor_read_cr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+ return 0;
+
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
+ return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
+}
+
+/**
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register 2.
+ *
+ * This is one of the procedures to set the QE bit described in the SFDP
+ * (JESD216 rev B) specification but no manufacturer using this procedure has
+ * been identified yet, hence the name of the function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
+{
+ u8 *sr2 = nor->bouncebuf;
+ int ret;
+ u8 sr2_written;
+
+ /* Check current Quad Enable bit value. */
+ ret = spi_nor_read_sr2(nor, sr2);
+ if (ret)
+ return ret;
+ if (*sr2 & SR2_QUAD_EN_BIT7)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ *sr2 |= SR2_QUAD_EN_BIT7;
+
+ ret = spi_nor_write_sr2(nor, sr2);
+ if (ret)
+ return ret;
+
+ sr2_written = *sr2;
+
+ /* Read back and check it. */
+ ret = spi_nor_read_sr2(nor, sr2);
+ if (ret)
+ return ret;
+
+ if (*sr2 != sr2_written) {
+ dev_dbg(nor->dev, "SR2: Read back test failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct spi_nor_manufacturer *manufacturers[] = {
+ &spi_nor_atmel,
+ &spi_nor_catalyst,
+ &spi_nor_eon,
+ &spi_nor_esmt,
+ &spi_nor_everspin,
+ &spi_nor_fujitsu,
+ &spi_nor_gigadevice,
+ &spi_nor_intel,
+ &spi_nor_issi,
+ &spi_nor_macronix,
+ &spi_nor_micron,
+ &spi_nor_st,
+ &spi_nor_spansion,
+ &spi_nor_sst,
+ &spi_nor_winbond,
+ &spi_nor_xilinx,
+ &spi_nor_xmc,
+};
+
+static const struct flash_info *
+spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
+ const u8 *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < nparts; i++) {
+ if (parts[i].id_len &&
+ !memcmp(parts[i].id, id, parts[i].id_len))
+ return &parts[i];
+ }
+
+ return NULL;
+}
+
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+{
+ const struct flash_info *info;
+ u8 *id = nor->bouncebuf;
+ unsigned int i;
+ int ret;
+
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
+ SPI_NOR_MAX_ID_LEN);
+ }
+ if (ret) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+ info = spi_nor_search_part_by_id(manufacturers[i]->parts,
+ manufacturers[i]->nparts,
+ id);
+ if (info) {
+ nor->manufacturer = manufacturers[i];
+ return info;
+ }
+ }
+
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
+ SPI_NOR_MAX_ID_LEN, id);
+ return ERR_PTR(-ENODEV);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ while (len) {
+ loff_t addr = from;
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ WARN_ON(ret > len);
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+
+ /*
+ * If page_size is a power of two, the offset can be quickly
+ * calculated with an AND operation. On the other cases we
+ * need to do a modulus operation (more expensive).
+ * Power of two numbers have only one bit set and we can use
+ * the instruction hweight32 to detect if we need to do a
+ * modulus (do_div()) or not.
+ */
+ if (hweight32(nor->page_size) == 1) {
+ page_offset = addr & (nor->page_size - 1);
+ } else {
+ uint64_t aux = addr;
+
+ page_offset = do_div(aux, nor->page_size);
+ }
+ /* the size of data remaining on the first page */
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto write_err;
+
+ ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ if (ret < 0)
+ goto write_err;
+ written = ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+ *retlen += written;
+ i += written;
+ }
+
+write_err:
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static int spi_nor_check(struct spi_nor *nor)
+{
+ if (!nor->dev ||
+ (!nor->spimem && !nor->controller_ops) ||
+ (!nor->spimem && nor->controller_ops &&
+ (!nor->controller_ops->read ||
+ !nor->controller_ops->write ||
+ !nor->controller_ops->read_reg ||
+ !nor->controller_ops->write_reg))) {
+ pr_err("spi-nor: please fill all the necessary fields!\n");
+ return -EINVAL;
+ }
+
+ if (nor->spimem && nor->controller_ops) {
+ dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = num_mode_clocks;
+ read->num_wait_states = num_wait_states;
+ read->opcode = opcode;
+ read->proto = proto;
+}
+
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ pp->opcode = opcode;
+ pp->proto = proto;
+}
+
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == (int)hwcaps)
+ return table[i][1];
+
+ return -EINVAL;
+}
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+ static const int hwcaps_read2cmd[][2] = {
+ { SNOR_HWCAPS_READ, SNOR_CMD_READ },
+ { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
+ { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
+ { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
+ { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
+ { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
+ { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
+ { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
+ { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
+ { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
+ { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
+ { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+ ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+ static const int hwcaps_pp2cmd[][2] = {
+ { SNOR_HWCAPS_PP, SNOR_CMD_PP },
+ { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
+ { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
+ { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
+ { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
+ { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+ ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
+/**
+ * spi_nor_spimem_check_op - check if the operation is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@op: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_op(struct spi_nor *nor,
+ struct spi_mem_op *op)
+{
+ /*
+ * First test with 4 address bytes. The opcode itself might
+ * be a 3B addressing opcode but we don't care, because
+ * SPI controller implementation should not check the opcode,
+ * but just the sequence.
+ */
+ op->addr.nbytes = 4;
+ if (!spi_mem_supports_op(nor->spimem, op)) {
+ if (nor->mtd.size > SZ_16M)
+ return -ENOTSUPP;
+
+ /* If flash size <= 16MB, 3 address bytes are sufficient */
+ op->addr.nbytes = 3;
+ if (!spi_mem_supports_op(nor->spimem, op))
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_spimem_check_readop - check if the read op is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@read: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_readop(struct spi_nor *nor,
+ const struct spi_nor_read_command *read)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
+ SPI_MEM_OP_ADDR(3, 0, 1),
+ SPI_MEM_OP_DUMMY(0, 1),
+ SPI_MEM_OP_DATA_IN(0, NULL, 1));
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
+ op.dummy.buswidth = op.addr.buswidth;
+ op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
+ op.dummy.buswidth / 8;
+
+ return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_check_pp - check if the page program op is supported
+ * by controller
+ *@nor: pointer to a 'struct spi_nor'
+ *@pp: pointer to op template to be checked
+ *
+ * Returns 0 if operation is supported, -ENOTSUPP otherwise.
+ */
+static int spi_nor_spimem_check_pp(struct spi_nor *nor,
+ const struct spi_nor_pp_command *pp)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
+ SPI_MEM_OP_ADDR(3, 0, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(0, NULL, 1));
+
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
+
+ return spi_nor_spimem_check_op(nor, &op);
+}
+
+/**
+ * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
+ * based on SPI controller capabilities
+ * @nor: pointer to a 'struct spi_nor'
+ * @hwcaps: pointer to resulting capabilities after adjusting
+ * according to controller and flash's capability
+ */
+static void
+spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ unsigned int cap;
+
+ /* DTR modes are not supported yet, mask them all. */
+ *hwcaps &= ~SNOR_HWCAPS_DTR;
+
+ /* X-X-X modes are not supported yet, mask them all. */
+ *hwcaps &= ~SNOR_HWCAPS_X_X_X;
+
+ for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
+ int rdidx, ppidx;
+
+ if (!(*hwcaps & BIT(cap)))
+ continue;
+
+ rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
+ if (rdidx >= 0 &&
+ spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
+ *hwcaps &= ~BIT(cap);
+
+ ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
+ if (ppidx < 0)
+ continue;
+
+ if (spi_nor_spimem_check_pp(nor,
+ &params->page_programs[ppidx]))
+ *hwcaps &= ~BIT(cap);
+ }
+}
+
+/**
+ * spi_nor_set_erase_type() - set a SPI NOR erase type
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ */
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ u8 opcode)
+{
+ erase->size = size;
+ erase->opcode = opcode;
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ erase->size_shift = ffs(erase->size) - 1;
+ erase->size_mask = (1 << erase->size_shift) - 1;
+}
+
+/**
+ * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: bitmask encoding erase types that can erase the entire
+ * flash memory
+ * @flash_size: the spi nor flash memory size
+ */
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size)
+{
+ /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
+ map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
+ SNOR_LAST_REGION;
+ map->uniform_region.size = flash_size;
+ map->regions = &map->uniform_region;
+ map->uniform_erase_type = erase_mask;
+}
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ int ret;
+
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_bfpt) {
+ ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
+ bfpt, params);
+ if (ret)
+ return ret;
+ }
+
+ if (nor->info->fixups && nor->info->fixups->post_bfpt)
+ return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
+ params);
+
+ return 0;
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
+ const struct spi_nor_read_command *read;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ read = &nor->params->reads[cmd];
+ nor->read_opcode = read->opcode;
+ nor->read_proto = read->proto;
+
+ /*
+ * In the spi-nor framework, we don't need to make the difference
+ * between mode clock cycles and wait state clock cycles.
+ * Indeed, the value of the mode clock cycles is used by a QSPI
+ * flash memory to know whether it should enter or leave its 0-4-4
+ * (Continuous Read / XIP) mode.
+ * eXecution In Place is out of the scope of the mtd sub-system.
+ * Hence we choose to merge both mode and wait state clock cycles
+ * into the so called dummy clock cycles.
+ */
+ nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+ return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
+ const struct spi_nor_pp_command *pp;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ pp = &nor->params->page_programs[cmd];
+ nor->program_opcode = pp->opcode;
+ nor->write_proto = pp->proto;
+ return 0;
+}
+
+/**
+ * spi_nor_select_uniform_erase() - select optimum uniform erase type
+ * @map: the erase map of the SPI NOR
+ * @wanted_size: the erase type size to search for. Contains the value of
+ * info->sector_size or of the "small sector" size in case
+ * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
+ *
+ * Once the optimum uniform sector erase command is found, disable all the
+ * other.
+ *
+ * Return: pointer to erase type on success, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
+ const u32 wanted_size)
+{
+ const struct spi_nor_erase_type *tested_erase, *erase = NULL;
+ int i;
+ u8 uniform_erase_type = map->uniform_erase_type;
+
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (!(uniform_erase_type & BIT(i)))
+ continue;
+
+ tested_erase = &map->erase_type[i];
+
+ /*
+ * If the current erase size is the one, stop here:
+ * we have found the right uniform Sector Erase command.
+ */
+ if (tested_erase->size == wanted_size) {
+ erase = tested_erase;
+ break;
+ }
+
+ /*
+ * Otherwise, the current erase size is still a valid canditate.
+ * Select the biggest valid candidate.
+ */
+ if (!erase && tested_erase->size)
+ erase = tested_erase;
+ /* keep iterating to find the wanted_size */
+ }
+
+ if (!erase)
+ return NULL;
+
+ /* Disable all other Sector Erase commands. */
+ map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
+ map->uniform_erase_type |= BIT(erase - map->erase_type);
+ return erase;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor)
+{
+ struct spi_nor_erase_map *map = &nor->params->erase_map;
+ const struct spi_nor_erase_type *erase = NULL;
+ struct mtd_info *mtd = &nor->mtd;
+ u32 wanted_size = nor->info->sector_size;
+ int i;
+
+ /*
+ * The previous implementation handling Sector Erase commands assumed
+ * that the SPI flash memory has an uniform layout then used only one
+ * of the supported erase sizes for all Sector Erase commands.
+ * So to be backward compatible, the new implementation also tries to
+ * manage the SPI flash memory as uniform with a single erase sector
+ * size, when possible.
+ */
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ wanted_size = 4096u;
+#endif
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ erase = spi_nor_select_uniform_erase(map, wanted_size);
+ if (!erase)
+ return -EINVAL;
+ nor->erase_opcode = erase->opcode;
+ mtd->erasesize = erase->size;
+ return 0;
+ }
+
+ /*
+ * For non-uniform SPI flash memory, set mtd->erasesize to the
+ * maximum erase sector size. No need to set nor->erase_opcode.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (map->erase_type[i].size) {
+ erase = &map->erase_type[i];
+ break;
+ }
+ }
+
+ if (!erase)
+ return -EINVAL;
+
+ mtd->erasesize = erase->size;
+ return 0;
+}
+
+static int spi_nor_default_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ u32 ignored_mask, shared_mask;
+ int err;
+
+ /*
+ * Keep only the hardware capabilities supported by both the SPI
+ * controller and the SPI flash memory.
+ */
+ shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+ if (nor->spimem) {
+ /*
+ * When called from spi_nor_probe(), all caps are set and we
+ * need to discard some of them based on what the SPI
+ * controller actually supports (using spi_mem_supports_op()).
+ */
+ spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
+ } else {
+ /*
+ * SPI n-n-n protocols are not supported when the SPI
+ * controller directly implements the spi_nor interface.
+ * Yet another reason to switch to spi-mem.
+ */
+ ignored_mask = SNOR_HWCAPS_X_X_X;
+ if (shared_mask & ignored_mask) {
+ dev_dbg(nor->dev,
+ "SPI n-n-n protocols are not supported.\n");
+ shared_mask &= ~ignored_mask;
+ }
+ }
+
+ /* Select the (Fast) Read command. */
+ err = spi_nor_select_read(nor, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select read settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Page Program command. */
+ err = spi_nor_select_pp(nor, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select write settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Sector Erase command. */
+ err = spi_nor_select_erase(nor);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select erase settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ if (!nor->params->setup)
+ return 0;
+
+ return nor->params->setup(nor, hwcaps);
+}
+
+/**
+ * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
+ * settings based on MFR register and ->default_init() hook.
+ * @nor: pointer to a 'struct spi-nor'.
+ */
+static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->default_init)
+ nor->manufacturer->fixups->default_init(nor);
+
+ if (nor->info->fixups && nor->info->fixups->default_init)
+ nor->info->fixups->default_init(nor);
+}
+
+/**
+ * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
+ * based on JESD216 SFDP standard.
+ * @nor: pointer to a 'struct spi-nor'.
+ *
+ * The method has a roll-back mechanism: in case the SFDP parsing fails, the
+ * legacy flash parameters and settings will be restored.
+ */
+static void spi_nor_sfdp_init_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter sfdp_params;
+
+ memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
+
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
+ nor->flags &= ~SNOR_F_4B_OPCODES;
+ } else {
+ memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+ }
+}
+
+/**
+ * spi_nor_info_init_params() - Initialize the flash's parameters and settings
+ * based on nor->info data.
+ * @nor: pointer to a 'struct spi-nor'.
+ */
+static void spi_nor_info_init_params(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ const struct flash_info *info = nor->info;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ u8 i, erase_mask;
+
+ /* Initialize legacy flash parameters and settings. */
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
+ params->setup = spi_nor_default_setup;
+ /* Default to 16-bit Write Status (01h) Command */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ /* Set SPI NOR sizes. */
+ params->size = (u64)info->sector_size * info->n_sectors;
+ params->page_size = info->page_size;
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ /* Default to Fast Read for DT and non-DT platform devices. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+
+ /* Mask out Fast Read if not requested at DT instantiation. */
+ if (np && !of_property_read_bool(np, "m25p,fast-read"))
+ params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+ }
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+
+ if (info->flags & SPI_NOR_DUAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+ 0, 8, SPINOR_OP_READ_1_1_2,
+ SNOR_PROTO_1_1_2);
+ }
+
+ if (info->flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+ 0, 8, SPINOR_OP_READ_1_1_4,
+ SNOR_PROTO_1_1_4);
+ }
+
+ if (info->flags & SPI_NOR_OCTAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ 0, 8, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_1_1_8);
+ }
+
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+ /*
+ * Sector Erase settings. Sort Erase Types in ascending order, with the
+ * smallest erase size starting at BIT(0).
+ */
+ erase_mask = 0;
+ i = 0;
+ if (info->flags & SECT_4K_PMC) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K_PMC);
+ i++;
+ } else if (info->flags & SECT_4K) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K);
+ i++;
+ }
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
+ SPINOR_OP_SE);
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+}
+
+/**
+ * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
+ * after SFDP has been parsed (is also called for SPI NORs that do not
+ * support RDSFDP).
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Typically used to tweak various parameters that could not be extracted by
+ * other means (i.e. when information provided by the SFDP/flash_info tables
+ * are incomplete or wrong).
+ */
+static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->manufacturer && nor->manufacturer->fixups &&
+ nor->manufacturer->fixups->post_sfdp)
+ nor->manufacturer->fixups->post_sfdp(nor);
+
+ if (nor->info->fixups && nor->info->fixups->post_sfdp)
+ nor->info->fixups->post_sfdp(nor);
+}
+
+/**
+ * spi_nor_late_init_params() - Late initialization of default flash parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Used to set default flash parameters and settings when the ->default_init()
+ * hook or the SFDP parser let voids.
+ */
+static void spi_nor_late_init_params(struct spi_nor *nor)
+{
+ /*
+ * NOR protection support. When locking_ops are not provided, we pick
+ * the default ones.
+ */
+ if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
+ nor->params->locking_ops = &spi_nor_sr_locking_ops;
+}
+
+/**
+ * spi_nor_init_params() - Initialize the flash's parameters and settings.
+ * @nor: pointer to a 'struct spi-nor'.
+ *
+ * The flash parameters and settings are initialized based on a sequence of
+ * calls that are ordered by priority:
+ *
+ * 1/ Default flash parameters initialization. The initializations are done
+ * based on nor->info data:
+ * spi_nor_info_init_params()
+ *
+ * which can be overwritten by:
+ * 2/ Manufacturer flash parameters initialization. The initializations are
+ * done based on MFR register, or when the decisions can not be done solely
+ * based on MFR, by using specific flash_info tweeks, ->default_init():
+ * spi_nor_manufacturer_init_params()
+ *
+ * which can be overwritten by:
+ * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
+ * should be more accurate that the above.
+ * spi_nor_sfdp_init_params()
+ *
+ * Please note that there is a ->post_bfpt() fixup hook that can overwrite
+ * the flash parameters and settings immediately after parsing the Basic
+ * Flash Parameter Table.
+ *
+ * which can be overwritten by:
+ * 4/ Post SFDP flash parameters initialization. Used to tweak various
+ * parameters that could not be extracted by other means (i.e. when
+ * information provided by the SFDP/flash_info tables are incomplete or
+ * wrong).
+ * spi_nor_post_sfdp_fixups()
+ *
+ * 5/ Late default flash parameters initialization, used when the
+ * ->default_init() hook or the SFDP parser do not set specific params.
+ * spi_nor_late_init_params()
+ */
+static int spi_nor_init_params(struct spi_nor *nor)
+{
+ nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
+ if (!nor->params)
+ return -ENOMEM;
+
+ spi_nor_info_init_params(nor);
+
+ spi_nor_manufacturer_init_params(nor);
+
+ if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+ !(nor->info->flags & SPI_NOR_SKIP_SFDP))
+ spi_nor_sfdp_init_params(nor);
+
+ spi_nor_post_sfdp_fixups(nor);
+
+ spi_nor_late_init_params(nor);
+
+ return 0;
+}
+
+/**
+ * spi_nor_quad_enable() - enable Quad I/O if needed.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_quad_enable(struct spi_nor *nor)
+{
+ if (!nor->params->quad_enable)
+ return 0;
+
+ if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+ spi_nor_get_protocol_width(nor->write_proto) == 4))
+ return 0;
+
+ return nor->params->quad_enable(nor);
+}
+
+/**
+ * spi_nor_unlock_all() - Unlocks the entire flash memory array.
+ * @nor: pointer to a 'struct spi_nor'.
+ *
+ * Some SPI NOR flashes are write protected by default after a power-on reset
+ * cycle, in order to avoid inadvertent writes during power-up. Backward
+ * compatibility imposes to unlock the entire flash memory array at power-up
+ * by default.
+ */
+static int spi_nor_unlock_all(struct spi_nor *nor)
+{
+ if (nor->flags & SNOR_F_HAS_LOCK)
+ return spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ err = spi_nor_quad_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
+ }
+
+ err = spi_nor_unlock_all(nor);
+ if (err) {
+ dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
+ return err;
+ }
+
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
+ nor->params->set_4byte_addr_mode(nor, true);
+ }
+
+ return 0;
+}
+
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ nor->flags & SNOR_F_BROKEN_RESET)
+ nor->params->set_4byte_addr_mode(nor, false);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
+static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
+ const char *name)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+ for (j = 0; j < manufacturers[i]->nparts; j++) {
+ if (!strcmp(name, manufacturers[i]->parts[j].name)) {
+ nor->manufacturer = manufacturers[i];
+ return &manufacturers[i]->parts[j];
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int spi_nor_set_addr_width(struct spi_nor *nor)
+{
+ if (nor->addr_width) {
+ /* already configured from SFDP */
+ } else if (nor->info->addr_width) {
+ nor->addr_width = nor->info->addr_width;
+ } else if (nor->mtd.size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ } else {
+ nor->addr_width = 3;
+ }
+
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ dev_dbg(nor->dev, "address width is too large: %u\n",
+ nor->addr_width);
+ return -EINVAL;
+ }
+
+ /* Set 4byte opcodes when possible. */
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
+ return 0;
+}
+
+static void spi_nor_debugfs_init(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ struct mtd_info *mtd = &nor->mtd;
+
+ mtd->dbg.partname = info->name;
+ mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
+ info->id_len, info->id);
+}
+
+static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
+ const char *name)
+{
+ const struct flash_info *info = NULL;
+
+ if (name)
+ info = spi_nor_match_id(nor, name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+ if (!info)
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
+ return ERR_PTR(-ENOENT);
+
+ /*
+ * If caller has specified name of flash model that can normally be
+ * detected using JEDEC, let's verify it.
+ */
+ if (name && info->id_len) {
+ const struct flash_info *jinfo;
+
+ jinfo = spi_nor_read_id(nor);
+ if (IS_ERR(jinfo)) {
+ return jinfo;
+ } else if (jinfo != info) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to lose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(nor->dev, "found %s, expected %s\n",
+ jinfo->name, info->name);
+ info = jinfo;
+ }
+ }
+
+ return info;
+}
+
+int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ const struct flash_info *info;
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = &nor->mtd;
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ int ret;
+ int i;
+
+ ret = spi_nor_check(nor);
+ if (ret)
+ return ret;
+
+ /* Reset SPI protocol for all commands. */
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+
+ /*
+ * We need the bounce buffer early to read/write registers when going
+ * through the spi-mem layer (buffers have to be DMA-able).
+ * For spi-mem drivers, we'll reallocate a new buffer if
+ * nor->page_size turns out to be greater than PAGE_SIZE (which
+ * shouldn't happen before long since NOR pages are usually less
+ * than 1KB) after spi_nor_scan() returns.
+ */
+ nor->bouncebuf_size = PAGE_SIZE;
+ nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
+ GFP_KERNEL);
+ if (!nor->bouncebuf)
+ return -ENOMEM;
+
+ info = spi_nor_get_flash_info(nor, name);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ nor->info = info;
+
+ spi_nor_debugfs_init(nor, info);
+
+ mutex_init(&nor->lock);
+
+ /*
+ * Make sure the XSR_RDY flag is set before calling
+ * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
+ * with Atmel spi-nor
+ */
+ if (info->flags & SPI_NOR_XSR_RDY)
+ nor->flags |= SNOR_F_READY_XSR_RDY;
+
+ if (info->flags & SPI_NOR_HAS_LOCK)
+ nor->flags |= SNOR_F_HAS_LOCK;
+
+ mtd->_write = spi_nor_write;
+
+ /* Init flash parameters based on flash_info struct and SFDP */
+ ret = spi_nor_init_params(nor);
+ if (ret)
+ return ret;
+
+ if (!mtd->name)
+ mtd->name = dev_name(dev);
+ mtd->priv = nor;
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = nor->params->size;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+ mtd->_resume = spi_nor_resume;
+
+ if (nor->params->locking_ops) {
+ mtd->_lock = spi_nor_lock;
+ mtd->_unlock = spi_nor_unlock;
+ mtd->_is_locked = spi_nor_is_locked;
+ }
+
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+ if (info->flags & SPI_NOR_HAS_TB) {
+ nor->flags |= SNOR_F_HAS_SR_TB;
+ if (info->flags & SPI_NOR_TB_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
+ }
+
+ if (info->flags & NO_CHIP_ERASE)
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+ if (info->flags & USE_CLSR)
+ nor->flags |= SNOR_F_USE_CLSR;
+
+ if (info->flags & SPI_NOR_4BIT_BP) {
+ nor->flags |= SNOR_F_HAS_4BIT_BP;
+ if (info->flags & SPI_NOR_BP3_SR_BIT6)
+ nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
+ }
+
+ if (info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+
+ mtd->dev.parent = dev;
+ nor->page_size = nor->params->page_size;
+ mtd->writebufsize = nor->page_size;
+
+ if (of_property_read_bool(np, "broken-flash-reset"))
+ nor->flags |= SNOR_F_BROKEN_RESET;
+
+ /*
+ * Configure the SPI memory:
+ * - select op codes for (Fast) Read, Page Program and Sector Erase.
+ * - set the number of dummy cycles (mode cycles + wait states).
+ * - set the SPI protocols for register and memory accesses.
+ */
+ ret = spi_nor_setup(nor, hwcaps);
+ if (ret)
+ return ret;
+
+ if (info->flags & SPI_NOR_4B_OPCODES)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ ret = spi_nor_set_addr_width(nor);
+ if (ret)
+ return ret;
+
+ /* Send all the required SPI flash commands to initialize device */
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "%s (%lld Kbytes)\n", info->name,
+ (long long)mtd->size >> 10);
+
+ dev_dbg(dev,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB), "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+ if (mtd->numeraseregions)
+ for (i = 0; i < mtd->numeraseregions; i++)
+ dev_dbg(dev,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].erasesize / 1024,
+ mtd->eraseregions[i].numblocks);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+static int spi_nor_create_read_dirmap(struct spi_nor *nor)
+{
+ struct spi_mem_dirmap_info info = {
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+ SPI_MEM_OP_DATA_IN(0, NULL, 1)),
+ .offset = 0,
+ .length = nor->mtd.size,
+ };
+ struct spi_mem_op *op = &info.op_tmpl;
+
+ /* get transfer protocols. */
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ op->dummy.buswidth = op->addr.buswidth;
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
+
+ nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+ &info);
+ return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
+}
+
+static int spi_nor_create_write_dirmap(struct spi_nor *nor)
+{
+ struct spi_mem_dirmap_info info = {
+ .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
+ .offset = 0,
+ .length = nor->mtd.size,
+ };
+ struct spi_mem_op *op = &info.op_tmpl;
+
+ /* get transfer protocols. */
+ op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+ op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+ op->dummy.buswidth = op->addr.buswidth;
+ op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op->addr.nbytes = 0;
+
+ nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
+ &info);
+ return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
+}
+
+static int spi_nor_probe(struct spi_mem *spimem)
+{
+ struct spi_device *spi = spimem->spi;
+ struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+ struct spi_nor *nor;
+ /*
+ * Enable all caps by default. The core will mask them after
+ * checking what's really supported using spi_mem_supports_op().
+ */
+ const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
+ char *flash_name;
+ int ret;
+
+ nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+ if (!nor)
+ return -ENOMEM;
+
+ nor->spimem = spimem;
+ nor->dev = &spi->dev;
+ spi_nor_set_flash_node(nor, spi->dev.of_node);
+
+ spi_mem_set_drvdata(spimem, nor);
+
+ if (data && data->name)
+ nor->mtd.name = data->name;
+
+ if (!nor->mtd.name)
+ nor->mtd.name = spi_mem_get_name(spimem);
+
+ /*
+ * For some (historical?) reason many platforms provide two different
+ * names in flash_platform_data: "name" and "type". Quite often name is
+ * set to "m25p80" and then "type" provides a real chip name.
+ * If that's the case, respect "type" and ignore a "name".
+ */
+ if (data && data->type)
+ flash_name = data->type;
+ else if (!strcmp(spi->modalias, "spi-nor"))
+ flash_name = NULL; /* auto-detect */
+ else
+ flash_name = spi->modalias;
+
+ ret = spi_nor_scan(nor, flash_name, &hwcaps);
+ if (ret)
+ return ret;
+
+ /*
+ * None of the existing parts have > 512B pages, but let's play safe
+ * and add this logic so that if anyone ever adds support for such
+ * a NOR we don't end up with buffer overflows.
+ */
+ if (nor->page_size > PAGE_SIZE) {
+ nor->bouncebuf_size = nor->page_size;
+ devm_kfree(nor->dev, nor->bouncebuf);
+ nor->bouncebuf = devm_kmalloc(nor->dev,
+ nor->bouncebuf_size,
+ GFP_KERNEL);
+ if (!nor->bouncebuf)
+ return -ENOMEM;
+ }
+
+ ret = spi_nor_create_read_dirmap(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_create_write_dirmap(nor);
+ if (ret)
+ return ret;
+
+ return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
+ data ? data->nr_parts : 0);
+}
+
+static int spi_nor_remove(struct spi_mem *spimem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ spi_nor_restore(nor);
+
+ /* Clean up MTD stuff. */
+ return mtd_device_unregister(&nor->mtd);
+}
+
+static void spi_nor_shutdown(struct spi_mem *spimem)
+{
+ struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+
+ spi_nor_restore(nor);
+}
+
+/*
+ * Do NOT add to this array without reading the following:
+ *
+ * Historically, many flash devices are bound to this driver by their name. But
+ * since most of these flash are compatible to some extent, and their
+ * differences can often be differentiated by the JEDEC read-ID command, we
+ * encourage new users to add support to the spi-nor library, and simply bind
+ * against a generic string here (e.g., "jedec,spi-nor").
+ *
+ * Many flash names are kept here in this list (as well as in spi-nor.c) to
+ * keep them available as module aliases for existing platforms.
+ */
+static const struct spi_device_id spi_nor_dev_ids[] = {
+ /*
+ * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
+ * hack around the fact that the SPI core does not provide uevent
+ * matching for .of_match_table
+ */
+ {"spi-nor"},
+
+ /*
+ * Entries not used in DTs that should be safe to drop after replacing
+ * them with "spi-nor" in platform data.
+ */
+ {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
+
+ /*
+ * Entries that were used in DTs without "jedec,spi-nor" fallback and
+ * should be kept for backward compatibility.
+ */
+ {"at25df321a"}, {"at25df641"}, {"at26df081a"},
+ {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
+ {"mx25l25635e"},{"mx66l51235l"},
+ {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
+ {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
+ {"s25fl064k"},
+ {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
+ {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
+ {"m25p64"}, {"m25p128"},
+ {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
+ {"w25q80bl"}, {"w25q128"}, {"w25q256"},
+
+ /* Flashes that can't be detected using JEDEC */
+ {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
+ {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
+ {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
+
+ /* Everspin MRAMs (non-JEDEC) */
+ { "mr25h128" }, /* 128 Kib, 40 MHz */
+ { "mr25h256" }, /* 256 Kib, 40 MHz */
+ { "mr25h10" }, /* 1 Mib, 40 MHz */
+ { "mr25h40" }, /* 4 Mib, 40 MHz */
+
+ { },
+};
+MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
+
+static const struct of_device_id spi_nor_of_table[] = {
+ /*
+ * Generic compatibility for SPI NOR that can be identified by the
+ * JEDEC READ ID opcode (0x9F). Use this, if possible.
+ */
+ { .compatible = "jedec,spi-nor" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spi_nor_of_table);
+
+/*
+ * REVISIT: many of these chips have deep power-down modes, which
+ * should clearly be entered on suspend() to minimize power use.
+ * And also when they're otherwise idle...
+ */
+static struct spi_mem_driver spi_nor_driver = {
+ .spidrv = {
+ .driver = {
+ .name = "spi-nor",
+ .of_match_table = spi_nor_of_table,
+ },
+ .id_table = spi_nor_dev_ids,
+ },
+ .probe = spi_nor_probe,
+ .remove = spi_nor_remove,
+ .shutdown = spi_nor_shutdown,
+};
+module_spi_mem_driver(spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
new file mode 100644
index 000000000000..6f2f6b27173f
--- /dev/null
+++ b/drivers/mtd/spi-nor/core.h
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_INTERNAL_H
+#define __LINUX_MTD_SPI_NOR_INTERNAL_H
+
+#include "sfdp.h"
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+enum spi_nor_option_flags {
+ SNOR_F_USE_FSR = BIT(0),
+ SNOR_F_HAS_SR_TB = BIT(1),
+ SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
+ SNOR_F_READY_XSR_RDY = BIT(3),
+ SNOR_F_USE_CLSR = BIT(4),
+ SNOR_F_BROKEN_RESET = BIT(5),
+ SNOR_F_4B_OPCODES = BIT(6),
+ SNOR_F_HAS_4BAIT = BIT(7),
+ SNOR_F_HAS_LOCK = BIT(8),
+ SNOR_F_HAS_16BIT_SR = BIT(9),
+ SNOR_F_NO_READ_CR = BIT(10),
+ SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
+ SNOR_F_HAS_4BIT_BP = BIT(12),
+ SNOR_F_HAS_SR_BP3_BIT6 = BIT(13),
+};
+
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octal SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octal SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+
+ SNOR_CMD_PP_MAX
+};
+
+/**
+ * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type.
+ * JEDEC JESD216B imposes erase sizes to be a power of 2.
+ * @size_shift: @size is a power of 2, the shift is stored in
+ * @size_shift.
+ * @size_mask: the size mask based on @size_shift.
+ * @opcode: the SPI command op code to erase the sector/block.
+ * @idx: Erase Type index as sorted in the Basic Flash Parameter
+ * Table. It will be used to synchronize the supported
+ * Erase Types with the ones identified in the SFDP
+ * optional tables.
+ */
+struct spi_nor_erase_type {
+ u32 size;
+ u32 size_shift;
+ u32 size_mask;
+ u8 opcode;
+ u8 idx;
+};
+
+/**
+ * struct spi_nor_erase_command - Used for non-uniform erases
+ * The structure is used to describe a list of erase commands to be executed
+ * once we validate that the erase can be performed. The elements in the list
+ * are run-length encoded.
+ * @list: for inclusion into the list of erase commands.
+ * @count: how many times the same erase command should be
+ * consecutively used.
+ * @size: the size of the sector/block erased by the command.
+ * @opcode: the SPI command op code to erase the sector/block.
+ */
+struct spi_nor_erase_command {
+ struct list_head list;
+ u32 count;
+ u32 size;
+ u8 opcode;
+};
+
+/**
+ * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
+ * @offset: the offset in the data array of erase region start.
+ * LSB bits are used as a bitmask encoding flags to
+ * determine if this region is overlaid, if this region is
+ * the last in the SPI NOR flash memory and to indicate
+ * all the supported erase commands inside this region.
+ * The erase types are sorted in ascending order with the
+ * smallest Erase Type size being at BIT(0).
+ * @size: the size of the region in bytes.
+ */
+struct spi_nor_erase_region {
+ u64 offset;
+ u64 size;
+};
+
+#define SNOR_ERASE_TYPE_MAX 4
+#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
+
+#define SNOR_LAST_REGION BIT(4)
+#define SNOR_OVERLAID_REGION BIT(5)
+
+#define SNOR_ERASE_FLAGS_MAX 6
+#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
+
+/**
+ * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
+ * @regions: array of erase regions. The regions are consecutive in
+ * address space. Walking through the regions is done
+ * incrementally.
+ * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
+ * sector size (legacy implementation).
+ * @erase_type: an array of erase types shared by all the regions.
+ * The erase types are sorted in ascending order, with the
+ * smallest Erase Type size being the first member in the
+ * erase_type array.
+ * @uniform_erase_type: bitmask encoding erase types that can erase the
+ * entire memory. This member is completed at init by
+ * uniform and non-uniform SPI NOR flash memories if they
+ * support at least one erase type that can erase the
+ * entire memory.
+ */
+struct spi_nor_erase_map {
+ struct spi_nor_erase_region *regions;
+ struct spi_nor_erase_region uniform_region;
+ struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
+ u8 uniform_erase_type;
+};
+
+/**
+ * struct spi_nor_locking_ops - SPI NOR locking methods
+ * @lock: lock a region of the SPI NOR.
+ * @unlock: unlock a region of the SPI NOR.
+ * @is_locked: check if a region of the SPI NOR is completely locked
+ */
+struct spi_nor_locking_ops {
+ int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+};
+
+/**
+ * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
+ * Includes legacy flash parameters and settings that can be overwritten
+ * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
+ * Serial Flash Discoverable Parameters (SFDP) tables.
+ *
+ * @size: the flash memory density in bytes.
+ * @page_size: the page size of the SPI NOR flash memory.
+ * @hwcaps: describes the read and page program hardware
+ * capabilities.
+ * @reads: read capabilities ordered by priority: the higher index
+ * in the array, the higher priority.
+ * @page_programs: page program capabilities ordered by priority: the
+ * higher index in the array, the higher priority.
+ * @erase_map: the erase map parsed from the SFDP Sector Map Parameter
+ * Table.
+ * @quad_enable: enables SPI NOR quad mode.
+ * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
+ * @convert_addr: converts an absolute address into something the flash
+ * will understand. Particularly useful when pagesize is
+ * not a power-of-2.
+ * @setup: configures the SPI NOR memory. Useful for SPI NOR
+ * flashes that have peculiarities to the SPI NOR standard
+ * e.g. different opcodes, specific address calculation,
+ * page size, etc.
+ * @locking_ops: SPI NOR locking methods.
+ */
+struct spi_nor_flash_parameter {
+ u64 size;
+ u32 page_size;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ struct spi_nor_erase_map erase_map;
+
+ int (*quad_enable)(struct spi_nor *nor);
+ int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
+ u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
+ int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
+
+ const struct spi_nor_locking_ops *locking_ops;
+};
+
+/**
+ * struct spi_nor_fixups - SPI NOR fixup hooks
+ * @default_init: called after default flash parameters init. Used to tweak
+ * flash parameters when information provided by the flash_info
+ * table is incomplete or wrong.
+ * @post_bfpt: called after the BFPT table has been parsed
+ * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
+ * that do not support RDSFDP). Typically used to tweak various
+ * parameters that could not be extracted by other means (i.e.
+ * when information provided by the SFDP/flash_info tables are
+ * incomplete or wrong).
+ *
+ * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
+ * table is broken or not available.
+ */
+struct spi_nor_fixups {
+ void (*default_init)(struct spi_nor *nor);
+ int (*post_bfpt)(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params);
+ void (*post_sfdp)(struct spi_nor *nor);
+};
+
+struct flash_info {
+ char *name;
+
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u32 flags;
+#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
+#define SST_WRITE BIT(2) /* use SST byte programming */
+#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
+#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
+#define USE_FSR BIT(7) /* use flag status register */
+#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
+#define SPI_NOR_HAS_TB BIT(9) /*
+ * Flash SR has Top/Bottom (TB) protect
+ * bit. Must be used with
+ * SPI_NOR_HAS_LOCK.
+ */
+#define SPI_NOR_XSR_RDY BIT(10) /*
+ * S3AN flashes have specific opcode to
+ * read the status register.
+ */
+#define SPI_NOR_4B_OPCODES BIT(11) /*
+ * Use dedicated 4byte address op codes
+ * to support memory size above 128Mib.
+ */
+#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
+#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
+#define USE_CLSR BIT(14) /* use CLSR command */
+#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
+#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
+ * Top/Bottom (TB) is bit 6 of
+ * status register. Must be used with
+ * SPI_NOR_HAS_TB.
+ */
+#define SPI_NOR_4BIT_BP BIT(17) /*
+ * Flash SR has 4 bit fields (BP0-3)
+ * for block protection.
+ */
+#define SPI_NOR_BP3_SR_BIT6 BIT(18) /*
+ * BP3 is bit 6 of status register.
+ * Must be used with SPI_NOR_4BIT_BP.
+ */
+
+ /* Part specific fixup hooks. */
+ const struct spi_nor_fixups *fixups;
+};
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 16) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = 6, \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = (_addr_width), \
+ .flags = (_flags),
+
+#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff \
+ }, \
+ .id_len = 3, \
+ .sector_size = (8*_page_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = _page_size, \
+ .addr_width = 3, \
+ .flags = SPI_NOR_NO_FR | SPI_NOR_XSR_RDY,
+
+/**
+ * struct spi_nor_manufacturer - SPI NOR manufacturer object
+ * @name: manufacturer name
+ * @parts: array of parts supported by this manufacturer
+ * @nparts: number of entries in the parts array
+ * @fixups: hooks called at various points in time during spi_nor_scan()
+ */
+struct spi_nor_manufacturer {
+ const char *name;
+ const struct flash_info *parts;
+ unsigned int nparts;
+ const struct spi_nor_fixups *fixups;
+};
+
+/* Manufacturer drivers. */
+extern const struct spi_nor_manufacturer spi_nor_atmel;
+extern const struct spi_nor_manufacturer spi_nor_catalyst;
+extern const struct spi_nor_manufacturer spi_nor_eon;
+extern const struct spi_nor_manufacturer spi_nor_esmt;
+extern const struct spi_nor_manufacturer spi_nor_everspin;
+extern const struct spi_nor_manufacturer spi_nor_fujitsu;
+extern const struct spi_nor_manufacturer spi_nor_gigadevice;
+extern const struct spi_nor_manufacturer spi_nor_intel;
+extern const struct spi_nor_manufacturer spi_nor_issi;
+extern const struct spi_nor_manufacturer spi_nor_macronix;
+extern const struct spi_nor_manufacturer spi_nor_micron;
+extern const struct spi_nor_manufacturer spi_nor_st;
+extern const struct spi_nor_manufacturer spi_nor_spansion;
+extern const struct spi_nor_manufacturer spi_nor_sst;
+extern const struct spi_nor_manufacturer spi_nor_winbond;
+extern const struct spi_nor_manufacturer spi_nor_xilinx;
+extern const struct spi_nor_manufacturer spi_nor_xmc;
+
+int spi_nor_write_enable(struct spi_nor *nor);
+int spi_nor_write_disable(struct spi_nor *nor);
+int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
+int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
+int spi_nor_wait_till_ready(struct spi_nor *nor);
+int spi_nor_lock_and_prep(struct spi_nor *nor);
+void spi_nor_unlock_and_unprep(struct spi_nor *nor);
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
+
+int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
+ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+ u8 *buf);
+ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+
+int spi_nor_hwcaps_read2cmd(u32 hwcaps);
+u8 spi_nor_convert_3to4_read(u8 opcode);
+void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ enum spi_nor_protocol proto);
+
+void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ u8 opcode);
+struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region);
+void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size);
+
+int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params);
+
+static struct spi_nor __maybe_unused *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
diff --git a/drivers/mtd/spi-nor/eon.c b/drivers/mtd/spi-nor/eon.c
new file mode 100644
index 000000000000..ddb8e3650835
--- /dev/null
+++ b/drivers/mtd/spi-nor/eon.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info eon_parts[] = {
+ /* EON -- en25xxx */
+ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
+ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
+ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
+ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
+ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
+};
+
+const struct spi_nor_manufacturer spi_nor_eon = {
+ .name = "eon",
+ .parts = eon_parts,
+ .nparts = ARRAY_SIZE(eon_parts),
+};
diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c
new file mode 100644
index 000000000000..c93170008118
--- /dev/null
+++ b/drivers/mtd/spi-nor/esmt.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info esmt_parts[] = {
+ /* ESMT */
+ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+ { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_HAS_LOCK) },
+};
+
+const struct spi_nor_manufacturer spi_nor_esmt = {
+ .name = "esmt",
+ .parts = esmt_parts,
+ .nparts = ARRAY_SIZE(esmt_parts),
+};
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
new file mode 100644
index 000000000000..04a177a32283
--- /dev/null
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info everspin_parts[] = {
+ /* Everspin */
+ { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3,
+ SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+};
+
+const struct spi_nor_manufacturer spi_nor_everspin = {
+ .name = "everspin",
+ .parts = everspin_parts,
+ .nparts = ARRAY_SIZE(everspin_parts),
+};
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
new file mode 100644
index 000000000000..e385d93e756c
--- /dev/null
+++ b/drivers/mtd/spi-nor/fujitsu.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info fujitsu_parts[] = {
+ /* Fujitsu */
+ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
+};
+
+const struct spi_nor_manufacturer spi_nor_fujitsu = {
+ .name = "fujitsu",
+ .parts = fujitsu_parts,
+ .nparts = ARRAY_SIZE(fujitsu_parts),
+};
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
new file mode 100644
index 000000000000..447d84bb2128
--- /dev/null
+++ b/drivers/mtd/spi-nor/gigadevice.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static void gd25q256_default_init(struct spi_nor *nor)
+{
+ /*
+ * Some manufacturer like GigaDevice may use different
+ * bit to set QE on different memories, so the MFR can't
+ * indicate the quad_enable method for this case, we need
+ * to set it in the default_init fixup hook.
+ */
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static struct spi_nor_fixups gd25q256_fixups = {
+ .default_init = gd25q256_default_init,
+};
+
+static const struct flash_info gigadevice_parts[] = {
+ { "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK |
+ SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
+ .fixups = &gd25q256_fixups },
+};
+
+const struct spi_nor_manufacturer spi_nor_gigadevice = {
+ .name = "gigadevice",
+ .parts = gigadevice_parts,
+ .nparts = ARRAY_SIZE(gigadevice_parts),
+};
diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c
new file mode 100644
index 000000000000..d8196f101368
--- /dev/null
+++ b/drivers/mtd/spi-nor/intel.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info intel_parts[] = {
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
+ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
+ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+};
+
+static void intel_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static const struct spi_nor_fixups intel_fixups = {
+ .default_init = intel_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_intel = {
+ .name = "intel",
+ .parts = intel_parts,
+ .nparts = ARRAY_SIZE(intel_parts),
+ .fixups = &intel_fixups,
+};
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
new file mode 100644
index 000000000000..ffcb60e54a80
--- /dev/null
+++ b/drivers/mtd/spi-nor/issi.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+is25lp256_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * IS25LP256 supports 4B opcodes, but the BFPT advertises a
+ * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
+ * Overwrite the address width advertised by the BFPT.
+ */
+ if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
+ BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
+ nor->addr_width = 4;
+
+ return 0;
+}
+
+static struct spi_nor_fixups is25lp256_fixups = {
+ .post_bfpt = is25lp256_post_bfpt_fixups,
+};
+
+static const struct flash_info issi_parts[] = {
+ /* ISSI */
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES)
+ .fixups = &is25lp256_fixups },
+
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
+};
+
+static void issi_default_init(struct spi_nor *nor)
+{
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+}
+
+static const struct spi_nor_fixups issi_fixups = {
+ .default_init = issi_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_issi = {
+ .name = "issi",
+ .parts = issi_parts,
+ .nparts = ARRAY_SIZE(issi_parts),
+ .fixups = &issi_fixups,
+};
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
new file mode 100644
index 000000000000..ab0f963d630c
--- /dev/null
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static int
+mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * MX25L25635F supports 4B opcodes but MX25L25635E does not.
+ * Unfortunately, Macronix has re-used the same JEDEC ID for both
+ * variants which prevents us from defining a new entry in the parts
+ * table.
+ * We need a way to differentiate MX25L25635E and MX25L25635F, and it
+ * seems that the F version advertises support for Fast Read 4-4-4 in
+ * its BFPT table.
+ */
+ if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ return 0;
+}
+
+static struct spi_nor_fixups mx25l25635_fixups = {
+ .post_bfpt = mx25l25635_post_bfpt_fixups,
+};
+
+static const struct flash_info macronix_parts[] = {
+ /* Macronix */
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
+ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+ { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &mx25l25635_fixups },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
+ SPI_NOR_QUAD_READ) },
+};
+
+static void macronix_default_init(struct spi_nor *nor)
+{
+ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups macronix_fixups = {
+ .default_init = macronix_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_macronix = {
+ .name = "macronix",
+ .parts = macronix_parts,
+ .nparts = ARRAY_SIZE(macronix_parts),
+ .fixups = &macronix_fixups,
+};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
new file mode 100644
index 000000000000..6c034b9718e2
--- /dev/null
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info micron_parts[] = {
+ { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_4B_OPCODES) },
+};
+
+static const struct flash_info st_parts[] = {
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64,
+ SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64,
+ SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
+ USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+ { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ NO_CHIP_ERASE) },
+
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+
+ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
+ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
+ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
+ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
+ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
+ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
+ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
+ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
+ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+
+ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
+
+ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
+ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
+ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
+ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
+};
+
+/**
+ * st_micron_set_4byte_addr_mode() - Set 4-byte address mode for ST and Micron
+ * flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_set_4byte_addr_mode(nor, enable);
+ if (ret)
+ return ret;
+
+ return spi_nor_write_disable(nor);
+}
+
+static void micron_st_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ nor->params->quad_enable = NULL;
+ nor->params->set_4byte_addr_mode = st_micron_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups micron_st_fixups = {
+ .default_init = micron_st_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_micron = {
+ .name = "micron",
+ .parts = micron_parts,
+ .nparts = ARRAY_SIZE(micron_parts),
+ .fixups = &micron_st_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_st = {
+ .name = "st",
+ .parts = st_parts,
+ .nparts = ARRAY_SIZE(st_parts),
+ .fixups = &micron_st_fixups,
+};
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
new file mode 100644
index 000000000000..f6038d3a3684
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -0,0 +1,1204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+ (((p)->parameter_table_pointer[2] << 16) | \
+ ((p)->parameter_table_pointer[1] << 8) | \
+ ((p)->parameter_table_pointer[0] << 0))
+
+#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
+#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
+
+#define SFDP_SIGNATURE 0x50444653U
+#define SFDP_JESD216_MAJOR 1
+#define SFDP_JESD216_MINOR 0
+#define SFDP_JESD216A_MINOR 5
+#define SFDP_JESD216B_MINOR 6
+
+struct sfdp_header {
+ u32 signature; /* Ox50444653U <=> "SFDP" */
+ u8 minor;
+ u8 major;
+ u8 nph; /* 0-base number of parameter headers */
+ u8 unused;
+
+ /* Basic Flash Parameter Table. */
+ struct sfdp_parameter_header bfpt_header;
+};
+
+/* Fast Read settings. */
+struct sfdp_bfpt_read {
+ /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
+ * whether the Fast Read x-y-z command is supported.
+ */
+ u32 supported_dword;
+ u32 supported_bit;
+
+ /*
+ * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
+ * encodes the op code, the number of mode clocks and the number of wait
+ * states to be used by Fast Read x-y-z command.
+ */
+ u32 settings_dword;
+ u32 settings_shift;
+
+ /* The SPI protocol for this Fast Read x-y-z command. */
+ enum spi_nor_protocol proto;
+};
+
+struct sfdp_bfpt_erase {
+ /*
+ * The half-word at offset <shift> in DWORD <dwoard> encodes the
+ * op code and erase sector size to be used by Sector Erase commands.
+ */
+ u32 dword;
+ u32 shift;
+};
+
+#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
+#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
+
+#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
+#define SMPT_CMD_READ_DUMMY_SHIFT 16
+#define SMPT_CMD_READ_DUMMY(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
+#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
+
+#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
+#define SMPT_CMD_READ_DATA_SHIFT 24
+#define SMPT_CMD_READ_DATA(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
+
+#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
+#define SMPT_CMD_OPCODE_SHIFT 8
+#define SMPT_CMD_OPCODE(_cmd) \
+ (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
+
+#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
+#define SMPT_MAP_REGION_COUNT_SHIFT 16
+#define SMPT_MAP_REGION_COUNT(_header) \
+ ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
+ SMPT_MAP_REGION_COUNT_SHIFT) + 1)
+
+#define SMPT_MAP_ID_MASK GENMASK(15, 8)
+#define SMPT_MAP_ID_SHIFT 8
+#define SMPT_MAP_ID(_header) \
+ (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
+
+#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
+#define SMPT_MAP_REGION_SIZE_SHIFT 8
+#define SMPT_MAP_REGION_SIZE(_region) \
+ (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
+ SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
+
+#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
+#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
+ ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
+
+#define SMPT_DESC_TYPE_MAP BIT(1)
+#define SMPT_DESC_END BIT(0)
+
+#define SFDP_4BAIT_DWORD_MAX 2
+
+struct sfdp_4bait {
+ /* The hardware capability. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
+ * the associated 4-byte address op code is supported.
+ */
+ u32 supported_bit;
+};
+
+/**
+ * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
+ * addr_width and read_dummy members of the struct spi_nor
+ * should be previously
+ * set.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to read
+ * @buf: buffer where the data is copied into (dma-safe memory)
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
+{
+ ssize_t ret;
+
+ while (len) {
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret < 0)
+ return ret;
+ if (!ret || ret > len)
+ return -EIO;
+
+ buf += ret;
+ addr += ret;
+ len -= ret;
+ }
+ return 0;
+}
+
+/**
+ * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into (dma-safe memory)
+ *
+ * Whatever the actual numbers of bytes for address and dummy cycles are
+ * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
+ * followed by a 3-byte address and 8 dummy clock cycles.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ u8 addr_width, read_opcode, read_dummy;
+ int ret;
+
+ read_opcode = nor->read_opcode;
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+
+ nor->read_opcode = SPINOR_OP_RDSFDP;
+ nor->addr_width = 3;
+ nor->read_dummy = 8;
+
+ ret = spi_nor_read_raw(nor, addr, len, buf);
+
+ nor->read_opcode = read_opcode;
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+
+ return ret;
+}
+
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ * otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ void *dma_safe_buf;
+ int ret;
+
+ dma_safe_buf = kmalloc(len, GFP_KERNEL);
+ if (!dma_safe_buf)
+ return -ENOMEM;
+
+ ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+ memcpy(buf, dma_safe_buf, len);
+ kfree(dma_safe_buf);
+
+ return ret;
+}
+
+static void
+spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
+ u16 half,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = (half >> 5) & 0x07;
+ read->num_wait_states = (half >> 0) & 0x1f;
+ read->opcode = (half >> 8) & 0xff;
+ read->proto = proto;
+}
+
+static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
+ /* Fast Read 1-1-2 */
+ {
+ SNOR_HWCAPS_READ_1_1_2,
+ BFPT_DWORD(1), BIT(16), /* Supported bit */
+ BFPT_DWORD(4), 0, /* Settings */
+ SNOR_PROTO_1_1_2,
+ },
+
+ /* Fast Read 1-2-2 */
+ {
+ SNOR_HWCAPS_READ_1_2_2,
+ BFPT_DWORD(1), BIT(20), /* Supported bit */
+ BFPT_DWORD(4), 16, /* Settings */
+ SNOR_PROTO_1_2_2,
+ },
+
+ /* Fast Read 2-2-2 */
+ {
+ SNOR_HWCAPS_READ_2_2_2,
+ BFPT_DWORD(5), BIT(0), /* Supported bit */
+ BFPT_DWORD(6), 16, /* Settings */
+ SNOR_PROTO_2_2_2,
+ },
+
+ /* Fast Read 1-1-4 */
+ {
+ SNOR_HWCAPS_READ_1_1_4,
+ BFPT_DWORD(1), BIT(22), /* Supported bit */
+ BFPT_DWORD(3), 16, /* Settings */
+ SNOR_PROTO_1_1_4,
+ },
+
+ /* Fast Read 1-4-4 */
+ {
+ SNOR_HWCAPS_READ_1_4_4,
+ BFPT_DWORD(1), BIT(21), /* Supported bit */
+ BFPT_DWORD(3), 0, /* Settings */
+ SNOR_PROTO_1_4_4,
+ },
+
+ /* Fast Read 4-4-4 */
+ {
+ SNOR_HWCAPS_READ_4_4_4,
+ BFPT_DWORD(5), BIT(4), /* Supported bit */
+ BFPT_DWORD(7), 16, /* Settings */
+ SNOR_PROTO_4_4_4,
+ },
+};
+
+static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
+ /* Erase Type 1 in DWORD8 bits[15:0] */
+ {BFPT_DWORD(8), 0},
+
+ /* Erase Type 2 in DWORD8 bits[31:16] */
+ {BFPT_DWORD(8), 16},
+
+ /* Erase Type 3 in DWORD9 bits[15:0] */
+ {BFPT_DWORD(9), 0},
+
+ /* Erase Type 4 in DWORD9 bits[31:16] */
+ {BFPT_DWORD(9), 16},
+};
+
+/**
+ * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ * @i: erase type index as sorted in the Basic Flash Parameter Table
+ *
+ * The supported Erase Types will be sorted at init in ascending order, with
+ * the smallest Erase Type size being the first member in the erase_type array
+ * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
+ * the Basic Flash Parameter Table since it will be used later on to
+ * synchronize with the supported Erase Types defined in SFDP optional tables.
+ */
+static void
+spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
+ u32 size, u8 opcode, u8 i)
+{
+ erase->idx = i;
+ spi_nor_set_erase_type(erase, size, opcode);
+}
+
+/**
+ * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
+ * @l: member in the left half of the map's erase_type array
+ * @r: member in the right half of the map's erase_type array
+ *
+ * Comparison function used in the sort() call to sort in ascending order the
+ * map's erase types, the smallest erase type size being the first member in the
+ * sorted erase_type array.
+ *
+ * Return: the result of @l->size - @r->size
+ */
+static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
+{
+ const struct spi_nor_erase_type *left = l, *right = r;
+
+ return left->size - right->size;
+}
+
+/**
+ * spi_nor_sort_erase_mask() - sort erase mask
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: the erase type mask to be sorted
+ *
+ * Replicate the sort done for the map's erase types in BFPT: sort the erase
+ * mask in ascending order with the smallest erase type size starting from
+ * BIT(0) in the sorted erase mask.
+ *
+ * Return: sorted erase mask.
+ */
+static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
+{
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ int i;
+ u8 sorted_erase_mask = 0;
+
+ if (!erase_mask)
+ return 0;
+
+ /* Replicate the sort done for the map's erase types. */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
+ sorted_erase_mask |= BIT(i);
+
+ return sorted_erase_mask;
+}
+
+/**
+ * spi_nor_regions_sort_erase_types() - sort erase types in each region
+ * @map: the erase map of the SPI NOR
+ *
+ * Function assumes that the erase types defined in the erase map are already
+ * sorted in ascending order, with the smallest erase type size being the first
+ * member in the erase_type array. It replicates the sort done for the map's
+ * erase types. Each region's erase bitmask will indicate which erase types are
+ * supported from the sorted erase types defined in the erase map.
+ * Sort the all region's erase type at init in order to speed up the process of
+ * finding the best erase command at runtime.
+ */
+static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u8 region_erase_mask, sorted_erase_mask;
+
+ while (region) {
+ region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ sorted_erase_mask = spi_nor_sort_erase_mask(map,
+ region_erase_mask);
+
+ /* Overwrite erase mask. */
+ region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
+ sorted_erase_mask;
+
+ region = spi_nor_region_next(region);
+ }
+}
+
+/**
+ * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
+ * @nor: pointer to a 'struct spi_nor'
+ * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Basic Flash Parameter Table length and version
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Basic Flash Parameter Table is the main and only mandatory table as
+ * defined by the SFDP (JESD216) specification.
+ * It provides us with the total size (memory density) of the data array and
+ * the number of address bytes for Fast Read, Page Program and Sector Erase
+ * commands.
+ * For Fast READ commands, it also gives the number of mode clock cycles and
+ * wait states (regrouped in the number of dummy clock cycles) for each
+ * supported instruction op code.
+ * For Page Program, the page size is now available since JESD216 rev A, however
+ * the supported instruction op codes are still not provided.
+ * For Sector Erase commands, this table stores the supported instruction op
+ * codes and the associated sector sizes.
+ * Finally, the Quad Enable Requirements (QER) are also available since JESD216
+ * rev A. The QER bits encode the manufacturer dependent procedure to be
+ * executed to set the Quad Enable (QE) bit in some internal register of the
+ * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
+ * sending any Quad SPI command to the memory. Actually, setting the QE bit
+ * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
+ * and IO3 hence enabling 4 (Quad) I/O lines.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_bfpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ struct spi_nor_flash_parameter *params)
+{
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ struct sfdp_bfpt bfpt;
+ size_t len;
+ int i, cmd, err;
+ u32 addr;
+ u16 half;
+ u8 erase_mask;
+
+ /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
+ if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
+ return -EINVAL;
+
+ /* Read the Basic Flash Parameter Table. */
+ len = min_t(size_t, sizeof(bfpt),
+ bfpt_header->length * sizeof(u32));
+ addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
+ memset(&bfpt, 0, sizeof(bfpt));
+ err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
+ if (err < 0)
+ return err;
+
+ /* Fix endianness of the BFPT DWORDs. */
+ le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX);
+
+ /* Number of address bytes. */
+ switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
+ case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+ nor->addr_width = 3;
+ break;
+
+ case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
+ nor->addr_width = 4;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Flash Memory Density (in bits). */
+ params->size = bfpt.dwords[BFPT_DWORD(2)];
+ if (params->size & BIT(31)) {
+ params->size &= ~BIT(31);
+
+ /*
+ * Prevent overflows on params->size. Anyway, a NOR of 2^64
+ * bits is unlikely to exist so this error probably means
+ * the BFPT we are reading is corrupted/wrong.
+ */
+ if (params->size > 63)
+ return -EINVAL;
+
+ params->size = 1ULL << params->size;
+ } else {
+ params->size++;
+ }
+ params->size >>= 3; /* Convert to bytes. */
+
+ /* Fast Read settings. */
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
+ const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
+ struct spi_nor_read_command *read;
+
+ if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
+ params->hwcaps.mask &= ~rd->hwcaps;
+ continue;
+ }
+
+ params->hwcaps.mask |= rd->hwcaps;
+ cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
+ read = &params->reads[cmd];
+ half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
+ spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
+ }
+
+ /*
+ * Sector Erase settings. Reinitialize the uniform erase map using the
+ * Erase Types defined in the bfpt table.
+ */
+ erase_mask = 0;
+ memset(&params->erase_map, 0, sizeof(params->erase_map));
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
+ const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
+ u32 erasesize;
+ u8 opcode;
+
+ half = bfpt.dwords[er->dword] >> er->shift;
+ erasesize = half & 0xff;
+
+ /* erasesize == 0 means this Erase Type is not supported. */
+ if (!erasesize)
+ continue;
+
+ erasesize = 1U << erasesize;
+ opcode = (half >> 8) & 0xff;
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
+ opcode, i);
+ }
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+ /*
+ * Sort all the map's Erase Types in ascending order with the smallest
+ * erase size being the first member in the erase_type array.
+ */
+ sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
+ spi_nor_map_cmp_erase_type, NULL);
+ /*
+ * Sort the erase types in the uniform region in order to update the
+ * uniform_erase_type bitmask. The bitmask will be used later on when
+ * selecting the uniform erase.
+ */
+ spi_nor_regions_sort_erase_types(map);
+ map->uniform_erase_type = map->uniform_region.offset &
+ SNOR_ERASE_TYPE_MASK;
+
+ /* Stop here if not JESD216 rev A or later. */
+ if (bfpt_header->length < BFPT_DWORD_MAX)
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
+ params);
+
+ /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
+ params->page_size = bfpt.dwords[BFPT_DWORD(11)];
+ params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
+ params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+ params->page_size = 1U << params->page_size;
+
+ /* Quad Enable Requirements. */
+ switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
+ case BFPT_DWORD15_QER_NONE:
+ params->quad_enable = NULL;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ /*
+ * Writing only one byte to the Status Register has the
+ * side-effect of clearing Status Register 2.
+ */
+ case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
+ /*
+ * Read Configuration Register (35h) instruction is not
+ * supported.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR1_BIT6:
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT7:
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+ params->quad_enable = spi_nor_sr2_bit7_quad_enable;
+ break;
+
+ case BFPT_DWORD15_QER_SR2_BIT1:
+ /*
+ * JESD216 rev B or later does not specify if writing only one
+ * byte to the Status Register clears or not the Status
+ * Register 2, so let's be cautious and keep the default
+ * assumption of a 16-bit Write Status (01h) command.
+ */
+ nor->flags |= SNOR_F_HAS_16BIT_SR;
+
+ params->quad_enable = spi_nor_sr2_bit1_quad_enable;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
+}
+
+/**
+ * spi_nor_smpt_addr_width() - return the address width used in the
+ * configuration detection command.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ */
+static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+{
+ switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
+ case SMPT_CMD_ADDRESS_LEN_0:
+ return 0;
+ case SMPT_CMD_ADDRESS_LEN_3:
+ return 3;
+ case SMPT_CMD_ADDRESS_LEN_4:
+ return 4;
+ case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
+ default:
+ return nor->addr_width;
+ }
+}
+
+/**
+ * spi_nor_smpt_read_dummy() - return the configuration detection command read
+ * latency, in clock cycles.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ *
+ * Return: the number of dummy cycles for an SMPT read
+ */
+static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
+{
+ u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
+
+ if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
+ return nor->read_dummy;
+ return read_dummy;
+}
+
+/**
+ * spi_nor_get_map_in_use() - get the configuration map in use
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt: pointer to the sector map parameter table
+ * @smpt_len: sector map parameter table length
+ *
+ * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
+ */
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
+ u8 smpt_len)
+{
+ const u32 *ret;
+ u8 *buf;
+ u32 addr;
+ int err;
+ u8 i;
+ u8 addr_width, read_opcode, read_dummy;
+ u8 read_data_mask, map_id;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+ read_opcode = nor->read_opcode;
+
+ map_id = 0;
+ /* Determine if there are any optional Detection Command Descriptors */
+ for (i = 0; i < smpt_len; i += 2) {
+ if (smpt[i] & SMPT_DESC_TYPE_MAP)
+ break;
+
+ read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
+ nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+ nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
+ nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
+ addr = smpt[i + 1];
+
+ err = spi_nor_read_raw(nor, addr, 1, buf);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out;
+ }
+
+ /*
+ * Build an index value that is used to select the Sector Map
+ * Configuration that is currently in use.
+ */
+ map_id = map_id << 1 | !!(*buf & read_data_mask);
+ }
+
+ /*
+ * If command descriptors are provided, they always precede map
+ * descriptors in the table. There is no need to start the iteration
+ * over smpt array all over again.
+ *
+ * Find the matching configuration map.
+ */
+ ret = ERR_PTR(-EINVAL);
+ while (i < smpt_len) {
+ if (SMPT_MAP_ID(smpt[i]) == map_id) {
+ ret = smpt + i;
+ break;
+ }
+
+ /*
+ * If there are no more configuration map descriptors and no
+ * configuration ID matched the configuration identifier, the
+ * sector address map is unknown.
+ */
+ if (smpt[i] & SMPT_DESC_END)
+ break;
+
+ /* increment the table index to the next map */
+ i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
+ }
+
+ /* fall through */
+out:
+ kfree(buf);
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+ nor->read_opcode = read_opcode;
+ return ret;
+}
+
+static void spi_nor_region_mark_end(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_LAST_REGION;
+}
+
+static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_OVERLAID_REGION;
+}
+
+/**
+ * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @erase_type: erase type bitmask
+ */
+static void
+spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase,
+ const u8 erase_type)
+{
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (!(erase_type & BIT(i)))
+ continue;
+ if (region->size & erase[i].size_mask) {
+ spi_nor_region_mark_overlay(region);
+ return;
+ }
+ }
+}
+
+/**
+ * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
+ * @nor: pointer to a 'struct spi_nor'
+ * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is
+ * used for storing SFDP parsed data
+ * @smpt: pointer to the sector map parameter table
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params,
+ const u32 *smpt)
+{
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase = map->erase_type;
+ struct spi_nor_erase_region *region;
+ u64 offset;
+ u32 region_count;
+ int i, j;
+ u8 uniform_erase_type, save_uniform_erase_type;
+ u8 erase_type, regions_erase_type;
+
+ region_count = SMPT_MAP_REGION_COUNT(*smpt);
+ /*
+ * The regions will be freed when the driver detaches from the
+ * device.
+ */
+ region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ map->regions = region;
+
+ uniform_erase_type = 0xff;
+ regions_erase_type = 0;
+ offset = 0;
+ /* Populate regions. */
+ for (i = 0; i < region_count; i++) {
+ j = i + 1; /* index for the region dword */
+ region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
+ erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
+ region[i].offset = offset | erase_type;
+
+ spi_nor_region_check_overlay(&region[i], erase, erase_type);
+
+ /*
+ * Save the erase types that are supported in all regions and
+ * can erase the entire flash memory.
+ */
+ uniform_erase_type &= erase_type;
+
+ /*
+ * regions_erase_type mask will indicate all the erase types
+ * supported in this configuration map.
+ */
+ regions_erase_type |= erase_type;
+
+ offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+ region[i].size;
+ }
+
+ save_uniform_erase_type = map->uniform_erase_type;
+ map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+ uniform_erase_type);
+
+ if (!regions_erase_type) {
+ /*
+ * Roll back to the previous uniform_erase_type mask, SMPT is
+ * broken.
+ */
+ map->uniform_erase_type = save_uniform_erase_type;
+ return -EINVAL;
+ }
+
+ /*
+ * BFPT advertises all the erase types supported by all the possible
+ * map configurations. Mask out the erase types that are not supported
+ * by the current map configuration.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (!(regions_erase_type & BIT(erase[i].idx)))
+ spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+
+ spi_nor_region_mark_end(&region[i - 1]);
+
+ return 0;
+}
+
+/**
+ * spi_nor_parse_smpt() - parse Sector Map Parameter Table
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt_header: sector map parameter table header
+ * @params: pointer to a duplicate 'struct spi_nor_flash_parameter'
+ * that is used for storing SFDP parsed data
+ *
+ * This table is optional, but when available, we parse it to identify the
+ * location and size of sectors within the main data array of the flash memory
+ * device and to identify which Erase Types are supported by each sector.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_smpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *smpt_header,
+ struct spi_nor_flash_parameter *params)
+{
+ const u32 *sector_map;
+ u32 *smpt;
+ size_t len;
+ u32 addr;
+ int ret;
+
+ /* Read the Sector Map Parameter Table. */
+ len = smpt_header->length * sizeof(*smpt);
+ smpt = kmalloc(len, GFP_KERNEL);
+ if (!smpt)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(smpt_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, smpt);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the SMPT DWORDs. */
+ le32_to_cpu_array(smpt, smpt_header->length);
+
+ sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
+ if (IS_ERR(sector_map)) {
+ ret = PTR_ERR(sector_map);
+ goto out;
+ }
+
+ ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
+ if (ret)
+ goto out;
+
+ spi_nor_regions_sort_erase_types(&params->erase_map);
+ /* fall through */
+out:
+ kfree(smpt);
+ return ret;
+}
+
+/**
+ * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
+ * @nor: pointer to a 'struct spi_nor'.
+ * @param_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the 4-Byte Address Instruction Table length and version.
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_4bait(struct spi_nor *nor,
+ const struct sfdp_parameter_header *param_header,
+ struct spi_nor_flash_parameter *params)
+{
+ static const struct sfdp_4bait reads[] = {
+ { SNOR_HWCAPS_READ, BIT(0) },
+ { SNOR_HWCAPS_READ_FAST, BIT(1) },
+ { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
+ { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
+ { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
+ { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
+ };
+ static const struct sfdp_4bait programs[] = {
+ { SNOR_HWCAPS_PP, BIT(6) },
+ { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
+ { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
+ };
+ static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
+ { 0u /* not used */, BIT(9) },
+ { 0u /* not used */, BIT(10) },
+ { 0u /* not used */, BIT(11) },
+ { 0u /* not used */, BIT(12) },
+ };
+ struct spi_nor_pp_command *params_pp = params->page_programs;
+ struct spi_nor_erase_map *map = &params->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ u32 *dwords;
+ size_t len;
+ u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
+ int i, ret;
+
+ if (param_header->major != SFDP_JESD216_MAJOR ||
+ param_header->length < SFDP_4BAIT_DWORD_MAX)
+ return -EINVAL;
+
+ /* Read the 4-byte Address Instruction Table. */
+ len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(param_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the 4BAIT DWORDs. */
+ le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX);
+
+ /*
+ * Compute the subset of (Fast) Read commands for which the 4-byte
+ * version is supported.
+ */
+ discard_hwcaps = 0;
+ read_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(reads); i++) {
+ const struct sfdp_4bait *read = &reads[i];
+
+ discard_hwcaps |= read->hwcaps;
+ if ((params->hwcaps.mask & read->hwcaps) &&
+ (dwords[0] & read->supported_bit))
+ read_hwcaps |= read->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Page Program commands for which the 4-byte
+ * version is supported.
+ */
+ pp_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(programs); i++) {
+ const struct sfdp_4bait *program = &programs[i];
+
+ /*
+ * The 4 Byte Address Instruction (Optional) Table is the only
+ * SFDP table that indicates support for Page Program Commands.
+ * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
+ * authority for specifying Page Program support.
+ */
+ discard_hwcaps |= program->hwcaps;
+ if (dwords[0] & program->supported_bit)
+ pp_hwcaps |= program->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Sector Erase commands for which the 4-byte
+ * version is supported.
+ */
+ erase_mask = 0;
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ const struct sfdp_4bait *erase = &erases[i];
+
+ if (dwords[0] & erase->supported_bit)
+ erase_mask |= BIT(i);
+ }
+
+ /* Replicate the sort done for the map's erase types in BFPT. */
+ erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
+
+ /*
+ * We need at least one 4-byte op code per read, program and erase
+ * operation; the .read(), .write() and .erase() hooks share the
+ * nor->addr_width value.
+ */
+ if (!read_hwcaps || !pp_hwcaps || !erase_mask)
+ goto out;
+
+ /*
+ * Discard all operations from the 4-byte instruction set which are
+ * not supported by this memory.
+ */
+ params->hwcaps.mask &= ~discard_hwcaps;
+ params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
+
+ /* Use the 4-byte address instruction set. */
+ for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
+ struct spi_nor_read_command *read_cmd = &params->reads[i];
+
+ read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
+ }
+
+ /* 4BAIT is the only SFDP table that indicates page program support. */
+ if (pp_hwcaps & SNOR_HWCAPS_PP)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B,
+ SNOR_PROTO_1_1_4);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4_4B,
+ SNOR_PROTO_1_4_4);
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (erase_mask & BIT(i))
+ erase_type[i].opcode = (dwords[1] >>
+ erase_type[i].idx * 8) & 0xFF;
+ else
+ spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
+ }
+
+ /*
+ * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
+ * later because we already did the conversion to 4byte opcodes. Also,
+ * this latest function implements a legacy quirk for the erase size of
+ * Spansion memory. However this quirk is no longer needed with new
+ * SFDP compliant memories.
+ */
+ nor->addr_width = 4;
+ nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
+
+ /* fall through */
+out:
+ kfree(dwords);
+ return ret;
+}
+
+/**
+ * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
+ * specification. This is a standard which tends to supported by almost all
+ * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
+ * runtime the main parameters needed to perform basic SPI flash operations such
+ * as Fast Read, Page Program or Sector Erase commands.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+{
+ const struct sfdp_parameter_header *param_header, *bfpt_header;
+ struct sfdp_parameter_header *param_headers = NULL;
+ struct sfdp_header header;
+ struct device *dev = nor->dev;
+ size_t psize;
+ int i, err;
+
+ /* Get the SFDP header. */
+ err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
+ if (err < 0)
+ return err;
+
+ /* Check the SFDP header version. */
+ if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
+ header.major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Verify that the first and only mandatory parameter header is a
+ * Basic Flash Parameter Table header as specified in JESD216.
+ */
+ bfpt_header = &header.bfpt_header;
+ if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
+ bfpt_header->major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Allocate memory then read all parameter headers with a single
+ * Read SFDP command. These parameter headers will actually be parsed
+ * twice: a first time to get the latest revision of the basic flash
+ * parameter table, then a second time to handle the supported optional
+ * tables.
+ * Hence we read the parameter headers once for all to reduce the
+ * processing time. Also we use kmalloc() instead of devm_kmalloc()
+ * because we don't need to keep these parameter headers: the allocated
+ * memory is always released with kfree() before exiting this function.
+ */
+ if (header.nph) {
+ psize = header.nph * sizeof(*param_headers);
+
+ param_headers = kmalloc(psize, GFP_KERNEL);
+ if (!param_headers)
+ return -ENOMEM;
+
+ err = spi_nor_read_sfdp(nor, sizeof(header),
+ psize, param_headers);
+ if (err < 0) {
+ dev_dbg(dev, "failed to read SFDP parameter headers\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Check other parameter headers to get the latest revision of
+ * the basic flash parameter table.
+ */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
+ param_header->major == SFDP_JESD216_MAJOR &&
+ (param_header->minor > bfpt_header->minor ||
+ (param_header->minor == bfpt_header->minor &&
+ param_header->length > bfpt_header->length)))
+ bfpt_header = param_header;
+ }
+
+ err = spi_nor_parse_bfpt(nor, bfpt_header, params);
+ if (err)
+ goto exit;
+
+ /* Parse optional parameter tables. */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ switch (SFDP_PARAM_HEADER_ID(param_header)) {
+ case SFDP_SECTOR_MAP_ID:
+ err = spi_nor_parse_smpt(nor, param_header, params);
+ break;
+
+ case SFDP_4BAIT_ID:
+ err = spi_nor_parse_4bait(nor, param_header, params);
+ break;
+
+ default:
+ break;
+ }
+
+ if (err) {
+ dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
+ SFDP_PARAM_HEADER_ID(param_header));
+ /*
+ * Let's not drop all information we extracted so far
+ * if optional table parsers fail. In case of failing,
+ * each optional parser is responsible to roll back to
+ * the previously known spi_nor data.
+ */
+ err = 0;
+ }
+ }
+
+exit:
+ kfree(param_headers);
+ return err;
+}
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
new file mode 100644
index 000000000000..e0a8ded04890
--- /dev/null
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#ifndef __LINUX_MTD_SFDP_H
+#define __LINUX_MTD_SFDP_H
+
+/* Basic Flash Parameter Table */
+
+/*
+ * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * They are indexed from 1 but C arrays are indexed from 0.
+ */
+#define BFPT_DWORD(i) ((i) - 1)
+#define BFPT_DWORD_MAX 16
+
+struct sfdp_bfpt {
+ u32 dwords[BFPT_DWORD_MAX];
+};
+
+/* The first version of JESD216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216 9
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
+#define BFPT_DWORD1_DTR BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
+#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ * reads based on instruction. DQ3/HOLD# functions are hold during
+ * instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * Writing only one byte to the status register has the side-effect of
+ * clearing status register 2, including the QE bit. The 100b code is
+ * used if writing one byte to the status register does not modify
+ * status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ * one data byte where bit 6 is one.
+ * [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ * register 2 instruction 3Eh with one data byte where bit 7 is one.
+ * [...]
+ * The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * In contrast to the 001b code, writing one byte to the status
+ * register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ * Read Status instruction 05h. Status register2 is read using
+ * instruction 35h. QE is set via Write Status instruction 01h with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ */
+#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+
+struct sfdp_parameter_header {
+ u8 id_lsb;
+ u8 minor;
+ u8 major;
+ u8 length; /* in double words */
+ u8 parameter_table_pointer[3]; /* byte address */
+ u8 id_msb;
+};
+
+int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params);
+
+#endif /* __LINUX_MTD_SFDP_H */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
new file mode 100644
index 000000000000..6756202ace4b
--- /dev/null
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info spansion_parts[] = {
+ /* Spansion/Cypress -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
+ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | USE_CLSR) },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+};
+
+static void spansion_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->params->size <= SZ_16M)
+ return;
+
+ nor->flags |= SNOR_F_4B_OPCODES;
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = nor->info->sector_size;
+}
+
+static const struct spi_nor_fixups spansion_fixups = {
+ .post_sfdp = spansion_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_spansion = {
+ .name = "spansion",
+ .parts = spansion_parts,
+ .nparts = ARRAY_SIZE(spansion_parts),
+ .fixups = &spansion_fixups,
+};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
deleted file mode 100644
index 4fc632ec18fe..000000000000
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ /dev/null
@@ -1,5434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
- * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
- *
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/math64.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/sort.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/of_platform.h>
-#include <linux/sched/task_stack.h>
-#include <linux/spi/flash.h>
-#include <linux/mtd/spi-nor.h>
-
-/* Define max times to check status register before we give up. */
-
-/*
- * For everything but full-chip erase; probably could be much smaller, but kept
- * around for safety for now
- */
-#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
-
-/*
- * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
- * for larger flash
- */
-#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
-
-#define SPI_NOR_MAX_ID_LEN 6
-#define SPI_NOR_MAX_ADDR_WIDTH 4
-
-struct sfdp_parameter_header {
- u8 id_lsb;
- u8 minor;
- u8 major;
- u8 length; /* in double words */
- u8 parameter_table_pointer[3]; /* byte address */
- u8 id_msb;
-};
-
-#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
-#define SFDP_PARAM_HEADER_PTP(p) \
- (((p)->parameter_table_pointer[2] << 16) | \
- ((p)->parameter_table_pointer[1] << 8) | \
- ((p)->parameter_table_pointer[0] << 0))
-
-#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
-#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
-#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
-
-#define SFDP_SIGNATURE 0x50444653U
-#define SFDP_JESD216_MAJOR 1
-#define SFDP_JESD216_MINOR 0
-#define SFDP_JESD216A_MINOR 5
-#define SFDP_JESD216B_MINOR 6
-
-struct sfdp_header {
- u32 signature; /* Ox50444653U <=> "SFDP" */
- u8 minor;
- u8 major;
- u8 nph; /* 0-base number of parameter headers */
- u8 unused;
-
- /* Basic Flash Parameter Table. */
- struct sfdp_parameter_header bfpt_header;
-};
-
-/* Basic Flash Parameter Table */
-
-/*
- * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
- * They are indexed from 1 but C arrays are indexed from 0.
- */
-#define BFPT_DWORD(i) ((i) - 1)
-#define BFPT_DWORD_MAX 16
-
-/* The first version of JESD216 defined only 9 DWORDs. */
-#define BFPT_DWORD_MAX_JESD216 9
-
-/* 1st DWORD. */
-#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
-#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
-#define BFPT_DWORD1_DTR BIT(19)
-#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
-#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
-#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
-
-/* 5th DWORD. */
-#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
-#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
-
-/* 11th DWORD. */
-#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
-#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
-
-/* 15th DWORD. */
-
-/*
- * (from JESD216 rev B)
- * Quad Enable Requirements (QER):
- * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
- * reads based on instruction. DQ3/HOLD# functions are hold during
- * instruction phase.
- * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- * Writing only one byte to the status register has the side-effect of
- * clearing status register 2, including the QE bit. The 100b code is
- * used if writing one byte to the status register does not modify
- * status register 2.
- * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
- * one data byte where bit 6 is one.
- * [...]
- * - 011b: QE is bit 7 of status register 2. It is set via Write status
- * register 2 instruction 3Eh with one data byte where bit 7 is one.
- * [...]
- * The status register 2 is read using instruction 3Fh.
- * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- * In contrast to the 001b code, writing one byte to the status
- * register does not modify status register 2.
- * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
- * Read Status instruction 05h. Status register2 is read using
- * instruction 35h. QE is set via Write Status instruction 01h with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- */
-#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
-#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
-#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
-#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
-#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
-
-struct sfdp_bfpt {
- u32 dwords[BFPT_DWORD_MAX];
-};
-
-/**
- * struct spi_nor_fixups - SPI NOR fixup hooks
- * @default_init: called after default flash parameters init. Used to tweak
- * flash parameters when information provided by the flash_info
- * table is incomplete or wrong.
- * @post_bfpt: called after the BFPT table has been parsed
- * @post_sfdp: called after SFDP has been parsed (is also called for SPI NORs
- * that do not support RDSFDP). Typically used to tweak various
- * parameters that could not be extracted by other means (i.e.
- * when information provided by the SFDP/flash_info tables are
- * incomplete or wrong).
- *
- * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
- * table is broken or not available.
- */
-struct spi_nor_fixups {
- void (*default_init)(struct spi_nor *nor);
- int (*post_bfpt)(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params);
- void (*post_sfdp)(struct spi_nor *nor);
-};
-
-struct flash_info {
- char *name;
-
- /*
- * This array stores the ID bytes.
- * The first three bytes are the JEDIC ID.
- * JEDEC ID zero means "no ID" (mostly older chips).
- */
- u8 id[SPI_NOR_MAX_ID_LEN];
- u8 id_len;
-
- /* The size listed here is what works with SPINOR_OP_SE, which isn't
- * necessarily called a "sector" by the vendor.
- */
- unsigned sector_size;
- u16 n_sectors;
-
- u16 page_size;
- u16 addr_width;
-
- u32 flags;
-#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
-#define SST_WRITE BIT(2) /* use SST byte programming */
-#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
-#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
-#define USE_FSR BIT(7) /* use flag status register */
-#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
-#define SPI_NOR_HAS_TB BIT(9) /*
- * Flash SR has Top/Bottom (TB) protect
- * bit. Must be used with
- * SPI_NOR_HAS_LOCK.
- */
-#define SPI_NOR_XSR_RDY BIT(10) /*
- * S3AN flashes have specific opcode to
- * read the status register.
- * Flags SPI_NOR_XSR_RDY and SPI_S3AN
- * use the same bit as one implies the
- * other, but we will get rid of
- * SPI_S3AN soon.
- */
-#define SPI_S3AN BIT(10) /*
- * Xilinx Spartan 3AN In-System Flash
- * (MFR cannot be used for probing
- * because it has the same value as
- * ATMEL flashes)
- */
-#define SPI_NOR_4B_OPCODES BIT(11) /*
- * Use dedicated 4byte address op codes
- * to support memory size above 128Mib.
- */
-#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
-#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
-#define USE_CLSR BIT(14) /* use CLSR command */
-#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
-#define SPI_NOR_TB_SR_BIT6 BIT(16) /*
- * Top/Bottom (TB) is bit 6 of
- * status register. Must be used with
- * SPI_NOR_HAS_TB.
- */
-
- /* Part specific fixup hooks. */
- const struct spi_nor_fixups *fixups;
-};
-
-#define JEDEC_MFR(info) ((info)->id[0])
-
-/**
- * spi_nor_spimem_xfer_data() - helper function to read/write data to
- * flash's memory region
- * @nor: pointer to 'struct spi_nor'
- * @op: pointer to 'struct spi_mem_op' template for transfer
- *
- * Return: number of bytes transferred on success, -errno otherwise
- */
-static ssize_t spi_nor_spimem_xfer_data(struct spi_nor *nor,
- struct spi_mem_op *op)
-{
- bool usebouncebuf = false;
- void *rdbuf = NULL;
- const void *buf;
- int ret;
-
- if (op->data.dir == SPI_MEM_DATA_IN)
- buf = op->data.buf.in;
- else
- buf = op->data.buf.out;
-
- if (object_is_on_stack(buf) || !virt_addr_valid(buf))
- usebouncebuf = true;
-
- if (usebouncebuf) {
- if (op->data.nbytes > nor->bouncebuf_size)
- op->data.nbytes = nor->bouncebuf_size;
-
- if (op->data.dir == SPI_MEM_DATA_IN) {
- rdbuf = op->data.buf.in;
- op->data.buf.in = nor->bouncebuf;
- } else {
- op->data.buf.out = nor->bouncebuf;
- memcpy(nor->bouncebuf, buf,
- op->data.nbytes);
- }
- }
-
- ret = spi_mem_adjust_op_size(nor->spimem, op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(nor->spimem, op);
- if (ret)
- return ret;
-
- if (usebouncebuf && op->data.dir == SPI_MEM_DATA_IN)
- memcpy(rdbuf, nor->bouncebuf, op->data.nbytes);
-
- return op->data.nbytes;
-}
-
-/**
- * spi_nor_spimem_read_data() - read data from flash's memory region via
- * spi-mem
- * @nor: pointer to 'struct spi_nor'
- * @from: offset to read from
- * @len: number of bytes to read
- * @buf: pointer to dst buffer
- *
- * Return: number of bytes read successfully, -errno otherwise
- */
-static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
- size_t len, u8 *buf)
-{
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
- SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
- SPI_MEM_OP_DATA_IN(len, buf, 1));
-
- /* get transfer protocols. */
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
-
- /* convert the dummy cycles to the number of bytes */
- op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
-
- return spi_nor_spimem_xfer_data(nor, &op);
-}
-
-/**
- * spi_nor_read_data() - read data from flash memory
- * @nor: pointer to 'struct spi_nor'
- * @from: offset to read from
- * @len: number of bytes to read
- * @buf: pointer to dst buffer
- *
- * Return: number of bytes read successfully, -errno otherwise
- */
-static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
- u8 *buf)
-{
- if (nor->spimem)
- return spi_nor_spimem_read_data(nor, from, len, buf);
-
- return nor->controller_ops->read(nor, from, len, buf);
-}
-
-/**
- * spi_nor_spimem_write_data() - write data to flash memory via
- * spi-mem
- * @nor: pointer to 'struct spi_nor'
- * @to: offset to write to
- * @len: number of bytes to write
- * @buf: pointer to src buffer
- *
- * Return: number of bytes written successfully, -errno otherwise
- */
-static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
- size_t len, const u8 *buf)
-{
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
-
- if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
- op.addr.nbytes = 0;
-
- return spi_nor_spimem_xfer_data(nor, &op);
-}
-
-/**
- * spi_nor_write_data() - write data to flash memory
- * @nor: pointer to 'struct spi_nor'
- * @to: offset to write to
- * @len: number of bytes to write
- * @buf: pointer to src buffer
- *
- * Return: number of bytes written successfully, -errno otherwise
- */
-static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
- const u8 *buf)
-{
- if (nor->spimem)
- return spi_nor_spimem_write_data(nor, to, len, buf);
-
- return nor->controller_ops->write(nor, to, len, buf);
-}
-
-/**
- * spi_nor_write_enable() - Set write enable latch with Write Enable command.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_enable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_write_disable() - Send Write Disable instruction to the chip.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_disable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_sr() - Read the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
- sr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading SR\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_fsr() - Read the Flag Status Register.
- * @nor: pointer to 'struct spi_nor'
- * @fsr: pointer to a DMA-able buffer where the value of the
- * Flag Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, fsr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
- fsr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading FSR\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_read_cr() - Read the Configuration Register using the
- * SPINOR_OP_RDCR (35h) command.
- * @nor: pointer to 'struct spi_nor'
- * @cr: pointer to a DMA-able buffer where the value of the
- * Configuration Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, cr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading CR\n", ret);
-
- return ret;
-}
-
-/**
- * macronix_set_4byte() - Set 4-byte address mode for Macronix flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int macronix_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
- SPINOR_OP_EN4B :
- SPINOR_OP_EX4B,
- 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor,
- enable ? SPINOR_OP_EN4B :
- SPINOR_OP_EX4B,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
-
- return ret;
-}
-
-/**
- * st_micron_set_4byte() - Set 4-byte address mode for ST and Micron flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int st_micron_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- ret = macronix_set_4byte(nor, enable);
- if (ret)
- return ret;
-
- return spi_nor_write_disable(nor);
-}
-
-/**
- * spansion_set_4byte() - Set 4-byte address mode for Spansion flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- nor->bouncebuf[0] = enable << 7;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
- nor->bouncebuf, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_write_ear() - Write Extended Address Register.
- * @nor: pointer to 'struct spi_nor'.
- * @ear: value to write to the Extended Address Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
-{
- int ret;
-
- nor->bouncebuf[0] = ear;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
- nor->bouncebuf, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d writing EAR\n", ret);
-
- return ret;
-}
-
-/**
- * winbond_set_4byte() - Set 4-byte address mode for Winbond flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
- * address mode.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int winbond_set_4byte(struct spi_nor *nor, bool enable)
-{
- int ret;
-
- ret = macronix_set_4byte(nor, enable);
- if (ret || enable)
- return ret;
-
- /*
- * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
- * Register to be set to 1, so all 3-byte-address reads come from the
- * second 16M. We must clear the register to enable normal behavior.
- */
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- ret = spi_nor_write_ear(nor, 0);
- if (ret)
- return ret;
-
- return spi_nor_write_disable(nor);
-}
-
-/**
- * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
- sr, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
-
- return ret;
-}
-
-/**
- * s3an_sr_ready() - Query the Status Register of the S3AN flash to see if the
- * flash is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int s3an_sr_ready(struct spi_nor *nor)
-{
- int ret;
-
- ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return !!(nor->bouncebuf[0] & XSR_RDY);
-}
-
-/**
- * spi_nor_clear_sr() - Clear the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- */
-static void spi_nor_clear_sr(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d clearing SR\n", ret);
-}
-
-/**
- * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
- * for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr_ready(struct spi_nor *nor)
-{
- int ret = spi_nor_read_sr(nor, nor->bouncebuf);
-
- if (ret)
- return ret;
-
- if (nor->flags & SNOR_F_USE_CLSR &&
- nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
- if (nor->bouncebuf[0] & SR_E_ERR)
- dev_err(nor->dev, "Erase Error occurred\n");
- else
- dev_err(nor->dev, "Programming Error occurred\n");
-
- spi_nor_clear_sr(nor);
- return -EIO;
- }
-
- return !(nor->bouncebuf[0] & SR_WIP);
-}
-
-/**
- * spi_nor_clear_fsr() - Clear the Flag Status Register.
- * @nor: pointer to 'struct spi_nor'.
- */
-static void spi_nor_clear_fsr(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
-}
-
-/**
- * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
- * ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_fsr_ready(struct spi_nor *nor)
-{
- int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
-
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
- if (nor->bouncebuf[0] & FSR_E_ERR)
- dev_err(nor->dev, "Erase operation failed.\n");
- else
- dev_err(nor->dev, "Program operation failed.\n");
-
- if (nor->bouncebuf[0] & FSR_PT_ERR)
- dev_err(nor->dev,
- "Attempted to modify a protected sector.\n");
-
- spi_nor_clear_fsr(nor);
- return -EIO;
- }
-
- return nor->bouncebuf[0] & FSR_READY;
-}
-
-/**
- * spi_nor_ready() - Query the flash to see if it is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_ready(struct spi_nor *nor)
-{
- int sr, fsr;
-
- if (nor->flags & SNOR_F_READY_XSR_RDY)
- sr = s3an_sr_ready(nor);
- else
- sr = spi_nor_sr_ready(nor);
- if (sr < 0)
- return sr;
- fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
- if (fsr < 0)
- return fsr;
- return sr && fsr;
-}
-
-/**
- * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
- * Status Register until ready, or timeout occurs.
- * @nor: pointer to "struct spi_nor".
- * @timeout_jiffies: jiffies to wait until timeout.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
- unsigned long timeout_jiffies)
-{
- unsigned long deadline;
- int timeout = 0, ret;
-
- deadline = jiffies + timeout_jiffies;
-
- while (!timeout) {
- if (time_after_eq(jiffies, deadline))
- timeout = 1;
-
- ret = spi_nor_ready(nor);
- if (ret < 0)
- return ret;
- if (ret)
- return 0;
-
- cond_resched();
- }
-
- dev_dbg(nor->dev, "flash operation timed out\n");
-
- return -ETIMEDOUT;
-}
-
-/**
- * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
- * flash to be ready, or timeout occurs.
- * @nor: pointer to "struct spi_nor".
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
-{
- return spi_nor_wait_till_ready_with_timeout(nor,
- DEFAULT_READY_WAIT_JIFFIES);
-}
-
-/**
- * spi_nor_write_sr() - Write the Status Register.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to DMA-able buffer to write to the Status Register.
- * @len: number of bytes to write to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, sr, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
- sr, len);
- }
-
- if (ret) {
- dev_dbg(nor->dev, "error %d writing SR\n", ret);
- return ret;
- }
-
- return spi_nor_wait_till_ready(nor);
-}
-
-/**
- * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
- * ensure that the byte written match the received value.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
-{
- int ret;
-
- nor->bouncebuf[0] = sr1;
-
- ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
- if (ret)
- return ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] != sr1) {
- dev_dbg(nor->dev, "SR1: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
- * Status Register 2 in one shot. Ensure that the byte written in the Status
- * Register 1 match the received value, and that the 16-bit Write did not
- * affect what was already in the Status Register 2.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register 1.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
-{
- int ret;
- u8 *sr_cr = nor->bouncebuf;
- u8 cr_written;
-
- /* Make sure we don't overwrite the contents of Status Register 2. */
- if (!(nor->flags & SNOR_F_NO_READ_CR)) {
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
- } else if (nor->params.quad_enable) {
- /*
- * If the Status Register 2 Read command (35h) is not
- * supported, we should at least be sure we don't
- * change the value of the SR2 Quad Enable bit.
- *
- * We can safely assume that when the Quad Enable method is
- * set, the value of the QE bit is one, as a consequence of the
- * nor->params.quad_enable() call.
- *
- * We can safely assume that the Quad Enable bit is present in
- * the Status Register 2 at BIT(1). According to the JESD216
- * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
- * Write Status (01h) command is available just for the cases
- * in which the QE bit is described in SR2 at BIT(1).
- */
- sr_cr[1] = SR2_QUAD_EN_BIT1;
- } else {
- sr_cr[1] = 0;
- }
-
- sr_cr[0] = sr1;
-
- ret = spi_nor_write_sr(nor, sr_cr, 2);
- if (ret)
- return ret;
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return 0;
-
- cr_written = sr_cr[1];
-
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
-
- if (cr_written != sr_cr[1]) {
- dev_dbg(nor->dev, "CR: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
- * Configuration Register in one shot. Ensure that the byte written in the
- * Configuration Register match the received value, and that the 16-bit Write
- * did not affect what was already in the Status Register 1.
- * @nor: pointer to a 'struct spi_nor'.
- * @cr: byte value to be written to the Configuration Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
-{
- int ret;
- u8 *sr_cr = nor->bouncebuf;
- u8 sr_written;
-
- /* Keep the current value of the Status Register 1. */
- ret = spi_nor_read_sr(nor, sr_cr);
- if (ret)
- return ret;
-
- sr_cr[1] = cr;
-
- ret = spi_nor_write_sr(nor, sr_cr, 2);
- if (ret)
- return ret;
-
- sr_written = sr_cr[0];
-
- ret = spi_nor_read_sr(nor, sr_cr);
- if (ret)
- return ret;
-
- if (sr_written != sr_cr[0]) {
- dev_dbg(nor->dev, "SR: Read back test failed\n");
- return -EIO;
- }
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return 0;
-
- ret = spi_nor_read_cr(nor, &sr_cr[1]);
- if (ret)
- return ret;
-
- if (cr != sr_cr[1]) {
- dev_dbg(nor->dev, "CR: read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
- * the byte written match the received value without affecting other bits in the
- * Status Register 1 and 2.
- * @nor: pointer to a 'struct spi_nor'.
- * @sr1: byte value to be written to the Status Register.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
-{
- if (nor->flags & SNOR_F_HAS_16BIT_SR)
- return spi_nor_write_16bit_sr_and_check(nor, sr1);
-
- return spi_nor_write_sr1_and_check(nor, sr1);
-}
-
-/**
- * spi_nor_write_sr2() - Write the Status Register 2 using the
- * SPINOR_OP_WRSR2 (3eh) command.
- * @nor: pointer to 'struct spi_nor'.
- * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
-{
- int ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- return ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(1, sr2, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
- sr2, 1);
- }
-
- if (ret) {
- dev_dbg(nor->dev, "error %d writing SR2\n", ret);
- return ret;
- }
-
- return spi_nor_wait_till_ready(nor);
-}
-
-/**
- * spi_nor_read_sr2() - Read the Status Register 2 using the
- * SPINOR_OP_RDSR2 (3fh) command.
- * @nor: pointer to 'struct spi_nor'.
- * @sr2: pointer to DMA-able buffer where the value of the
- * Status Register 2 will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr2, 1));
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
- sr2, 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading SR2\n", ret);
-
- return ret;
-}
-
-/**
- * spi_nor_erase_chip() - Erase the entire flash memory.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_erase_chip(struct spi_nor *nor)
-{
- int ret;
-
- dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
- NULL, 0);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d erasing chip\n", ret);
-
- return ret;
-}
-
-static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
-{
- return mtd->priv;
-}
-
-static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == opcode)
- return table[i][1];
-
- /* No conversion found, keep input op code. */
- return opcode;
-}
-
-static u8 spi_nor_convert_3to4_read(u8 opcode)
-{
- static const u8 spi_nor_3to4_read[][2] = {
- { SPINOR_OP_READ, SPINOR_OP_READ_4B },
- { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
- { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
- { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
- { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
- { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
- { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
- { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
-
- { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
- { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
- { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
- ARRAY_SIZE(spi_nor_3to4_read));
-}
-
-static u8 spi_nor_convert_3to4_program(u8 opcode)
-{
- static const u8 spi_nor_3to4_program[][2] = {
- { SPINOR_OP_PP, SPINOR_OP_PP_4B },
- { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
- { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
- { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
- { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
- ARRAY_SIZE(spi_nor_3to4_program));
-}
-
-static u8 spi_nor_convert_3to4_erase(u8 opcode)
-{
- static const u8 spi_nor_3to4_erase[][2] = {
- { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
- { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
- { SPINOR_OP_SE, SPINOR_OP_SE_4B },
- };
-
- return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
- ARRAY_SIZE(spi_nor_3to4_erase));
-}
-
-static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
-{
- nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
- nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
- nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
-
- if (!spi_nor_has_uniform_erase(nor)) {
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- struct spi_nor_erase_type *erase;
- int i;
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- erase = &map->erase_type[i];
- erase->opcode =
- spi_nor_convert_3to4_erase(erase->opcode);
- }
- }
-}
-
-static int spi_nor_lock_and_prep(struct spi_nor *nor)
-{
- int ret = 0;
-
- mutex_lock(&nor->lock);
-
- if (nor->controller_ops && nor->controller_ops->prepare) {
- ret = nor->controller_ops->prepare(nor);
- if (ret) {
- mutex_unlock(&nor->lock);
- return ret;
- }
- }
- return ret;
-}
-
-static void spi_nor_unlock_and_unprep(struct spi_nor *nor)
-{
- if (nor->controller_ops && nor->controller_ops->unprepare)
- nor->controller_ops->unprepare(nor);
- mutex_unlock(&nor->lock);
-}
-
-/*
- * This code converts an address to the Default Address Mode, that has non
- * power of two page sizes. We must support this mode because it is the default
- * mode supported by Xilinx tools, it can access the whole flash area and
- * changing over to the Power-of-two mode is irreversible and corrupts the
- * original data.
- * Addr can safely be unsigned int, the biggest S3AN device is smaller than
- * 4 MiB.
- */
-static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
-{
- u32 offset, page;
-
- offset = addr % nor->page_size;
- page = addr / nor->page_size;
- page <<= (nor->page_size > 512) ? 10 : 9;
-
- return page | offset;
-}
-
-static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
-{
- if (!nor->params.convert_addr)
- return addr;
-
- return nor->params.convert_addr(nor, addr);
-}
-
-/*
- * Initiate the erasure of a single sector
- */
-static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
-{
- int i;
-
- addr = spi_nor_convert_addr(nor, addr);
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
- SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_NO_DATA);
-
- return spi_mem_exec_op(nor->spimem, &op);
- } else if (nor->controller_ops->erase) {
- return nor->controller_ops->erase(nor, addr);
- }
-
- /*
- * Default implementation, if driver doesn't have a specialized HW
- * control
- */
- for (i = nor->addr_width - 1; i >= 0; i--) {
- nor->bouncebuf[i] = addr & 0xff;
- addr >>= 8;
- }
-
- return nor->controller_ops->write_reg(nor, nor->erase_opcode,
- nor->bouncebuf, nor->addr_width);
-}
-
-/**
- * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @dividend: dividend value
- * @remainder: pointer to u32 remainder (will be updated)
- *
- * Return: the result of the division
- */
-static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
- u64 dividend, u32 *remainder)
-{
- /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
- *remainder = (u32)dividend & erase->size_mask;
- return dividend >> erase->size_shift;
-}
-
-/**
- * spi_nor_find_best_erase_type() - find the best erase type for the given
- * offset in the serial flash memory and the
- * number of bytes to erase. The region in
- * which the address fits is expected to be
- * provided.
- * @map: the erase map of the SPI NOR
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Return: a pointer to the best fitted erase type, NULL otherwise.
- */
-static const struct spi_nor_erase_type *
-spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
- const struct spi_nor_erase_region *region,
- u64 addr, u32 len)
-{
- const struct spi_nor_erase_type *erase;
- u32 rem;
- int i;
- u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- /*
- * Erase types are ordered by size, with the smallest erase type at
- * index 0.
- */
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- /* Does the erase region support the tested erase type? */
- if (!(erase_mask & BIT(i)))
- continue;
-
- erase = &map->erase_type[i];
-
- /* Don't erase more than what the user has asked for. */
- if (erase->size > len)
- continue;
-
- /* Alignment is not mandatory for overlaid regions */
- if (region->offset & SNOR_OVERLAID_REGION)
- return erase;
-
- spi_nor_div_by_erase_size(erase, addr, &rem);
- if (rem)
- continue;
- else
- return erase;
- }
-
- return NULL;
-}
-
-/**
- * spi_nor_region_next() - get the next spi nor region
- * @region: pointer to a structure that describes a SPI NOR erase region
- *
- * Return: the next spi nor region or NULL if last region.
- */
-static struct spi_nor_erase_region *
-spi_nor_region_next(struct spi_nor_erase_region *region)
-{
- if (spi_nor_region_is_last(region))
- return NULL;
- region++;
- return region;
-}
-
-/**
- * spi_nor_find_erase_region() - find the region of the serial flash memory in
- * which the offset fits
- * @map: the erase map of the SPI NOR
- * @addr: offset in the serial flash memory
- *
- * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_region *
-spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
-{
- struct spi_nor_erase_region *region = map->regions;
- u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- u64 region_end = region_start + region->size;
-
- while (addr < region_start || addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- return ERR_PTR(-EINVAL);
-
- region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
- region_end = region_start + region->size;
- }
-
- return region;
-}
-
-/**
- * spi_nor_init_erase_cmd() - initialize an erase command
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @erase: pointer to a structure that describes a SPI NOR erase type
- *
- * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
- * otherwise.
- */
-static struct spi_nor_erase_command *
-spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
- const struct spi_nor_erase_type *erase)
-{
- struct spi_nor_erase_command *cmd;
-
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&cmd->list);
- cmd->opcode = erase->opcode;
- cmd->count = 1;
-
- if (region->offset & SNOR_OVERLAID_REGION)
- cmd->size = region->size;
- else
- cmd->size = erase->size;
-
- return cmd;
-}
-
-/**
- * spi_nor_destroy_erase_cmd_list() - destroy erase command list
- * @erase_list: list of erase commands
- */
-static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
-{
- struct spi_nor_erase_command *cmd, *next;
-
- list_for_each_entry_safe(cmd, next, erase_list, list) {
- list_del(&cmd->list);
- kfree(cmd);
- }
-}
-
-/**
- * spi_nor_init_erase_cmd_list() - initialize erase command list
- * @nor: pointer to a 'struct spi_nor'
- * @erase_list: list of erase commands to be executed once we validate that the
- * erase can be performed
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Builds the list of best fitted erase commands and verifies if the erase can
- * be performed.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
- struct list_head *erase_list,
- u64 addr, u32 len)
-{
- const struct spi_nor_erase_map *map = &nor->params.erase_map;
- const struct spi_nor_erase_type *erase, *prev_erase = NULL;
- struct spi_nor_erase_region *region;
- struct spi_nor_erase_command *cmd = NULL;
- u64 region_end;
- int ret = -EINVAL;
-
- region = spi_nor_find_erase_region(map, addr);
- if (IS_ERR(region))
- return PTR_ERR(region);
-
- region_end = spi_nor_region_end(region);
-
- while (len) {
- erase = spi_nor_find_best_erase_type(map, region, addr, len);
- if (!erase)
- goto destroy_erase_cmd_list;
-
- if (prev_erase != erase ||
- region->offset & SNOR_OVERLAID_REGION) {
- cmd = spi_nor_init_erase_cmd(region, erase);
- if (IS_ERR(cmd)) {
- ret = PTR_ERR(cmd);
- goto destroy_erase_cmd_list;
- }
-
- list_add_tail(&cmd->list, erase_list);
- } else {
- cmd->count++;
- }
-
- addr += cmd->size;
- len -= cmd->size;
-
- if (len && addr >= region_end) {
- region = spi_nor_region_next(region);
- if (!region)
- goto destroy_erase_cmd_list;
- region_end = spi_nor_region_end(region);
- }
-
- prev_erase = erase;
- }
-
- return 0;
-
-destroy_erase_cmd_list:
- spi_nor_destroy_erase_cmd_list(erase_list);
- return ret;
-}
-
-/**
- * spi_nor_erase_multi_sectors() - perform a non-uniform erase
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the serial flash memory
- * @len: number of bytes to erase
- *
- * Build a list of best fitted erase commands and execute it once we validate
- * that the erase can be performed.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
-{
- LIST_HEAD(erase_list);
- struct spi_nor_erase_command *cmd, *next;
- int ret;
-
- ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
- if (ret)
- return ret;
-
- list_for_each_entry_safe(cmd, next, &erase_list, list) {
- nor->erase_opcode = cmd->opcode;
- while (cmd->count) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto destroy_erase_cmd_list;
-
- ret = spi_nor_erase_sector(nor, addr);
- if (ret)
- goto destroy_erase_cmd_list;
-
- addr += cmd->size;
- cmd->count--;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto destroy_erase_cmd_list;
- }
- list_del(&cmd->list);
- kfree(cmd);
- }
-
- return 0;
-
-destroy_erase_cmd_list:
- spi_nor_destroy_erase_cmd_list(&erase_list);
- return ret;
-}
-
-/*
- * Erase an address range on the nor chip. The address range may extend
- * one or more erase sectors. Return an error is there is a problem erasing.
- */
-static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
- uint32_t rem;
- int ret;
-
- dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
- (long long)instr->len);
-
- if (spi_nor_has_uniform_erase(nor)) {
- div_u64_rem(instr->len, mtd->erasesize, &rem);
- if (rem)
- return -EINVAL;
- }
-
- addr = instr->addr;
- len = instr->len;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- /* whole-chip erase? */
- if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
- unsigned long timeout;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_erase_chip(nor);
- if (ret)
- goto erase_err;
-
- /*
- * Scale the timeout linearly with the size of the flash, with
- * a minimum calibrated to an old 2MB flash. We could try to
- * pull these from CFI/SFDP, but these values should be good
- * enough for now.
- */
- timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
- CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
- (unsigned long)(mtd->size / SZ_2M));
- ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
- if (ret)
- goto erase_err;
-
- /* REVISIT in some cases we could speed up erasing large regions
- * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
- * to use "small sector erase", but that's not always optimal.
- */
-
- /* "sector"-at-a-time erase */
- } else if (spi_nor_has_uniform_erase(nor)) {
- while (len) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_erase_sector(nor, addr);
- if (ret)
- goto erase_err;
-
- addr += mtd->erasesize;
- len -= mtd->erasesize;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto erase_err;
- }
-
- /* erase multiple sectors */
- } else {
- ret = spi_nor_erase_multi_sectors(nor, addr, len);
- if (ret)
- goto erase_err;
- }
-
- ret = spi_nor_write_disable(nor);
-
-erase_err:
- spi_nor_unlock_and_unprep(nor);
-
- return ret;
-}
-
-static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
- uint64_t *len)
-{
- struct mtd_info *mtd = &nor->mtd;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- int shift = ffs(mask) - 1;
- int pow;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
-
- if (!(sr & mask)) {
- /* No protection */
- *ofs = 0;
- *len = 0;
- } else {
- pow = ((sr & mask) ^ mask) >> shift;
- *len = mtd->size >> pow;
- if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
- *ofs = 0;
- else
- *ofs = mtd->size - *len;
- }
-}
-
-/*
- * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
- * @locked is false); 0 otherwise
- */
-static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr, bool locked)
-{
- loff_t lock_offs;
- uint64_t lock_len;
-
- if (!len)
- return 1;
-
- stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
-
- if (locked)
- /* Requested range is a sub-range of locked range */
- return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
- else
- /* Requested range does not overlap with locked range */
- return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
-}
-
-static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
-{
- return stm_check_lock_status_sr(nor, ofs, len, sr, true);
-}
-
-static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
-{
- return stm_check_lock_status_sr(nor, ofs, len, sr, false);
-}
-
-/*
- * Lock a region of the flash. Compatible with ST Micro and similar flash.
- * Supports the block protection bits BP{0,1,2} in the status register
- * (SR). Does not support these features found in newer SR bitfields:
- * - SEC: sector/block protect - only handle SEC=0 (block protect)
- * - CMP: complement protect - only support CMP=0 (range is not complemented)
- *
- * Support for the following is provided conditionally for some flash:
- * - TB: top/bottom protect
- *
- * Sample table portion for 8MB flash (Winbond w25q64fw):
- *
- * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
- * --------------------------------------------------------------------------
- * X | X | 0 | 0 | 0 | NONE | NONE
- * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
- * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
- * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
- * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
- * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
- * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
- * X | X | 1 | 1 | 1 | 8 MB | ALL
- * ------|-------|-------|-------|-------|---------------|-------------------
- * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
- * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
- * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
- * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
- * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
- * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
- *
- * Returns negative on errors, 0 on success.
- */
-static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- struct mtd_info *mtd = &nor->mtd;
- int ret, status_old, status_new;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- u8 shift = ffs(mask) - 1, pow, val;
- loff_t lock_len;
- bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
- bool use_top;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- status_old = nor->bouncebuf[0];
-
- /* If nothing in our range is unlocked, we don't need to do anything */
- if (stm_is_locked_sr(nor, ofs, len, status_old))
- return 0;
-
- /* If anything below us is unlocked, we can't use 'bottom' protection */
- if (!stm_is_locked_sr(nor, 0, ofs, status_old))
- can_be_bottom = false;
-
- /* If anything above us is unlocked, we can't use 'top' protection */
- if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
- status_old))
- can_be_top = false;
-
- if (!can_be_bottom && !can_be_top)
- return -EINVAL;
-
- /* Prefer top, if both are valid */
- use_top = can_be_top;
-
- /* lock_len: length of region that should end up locked */
- if (use_top)
- lock_len = mtd->size - ofs;
- else
- lock_len = ofs + len;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
-
- /*
- * Need smallest pow such that:
- *
- * 1 / (2^pow) <= (len / size)
- *
- * so (assuming power-of-2 size) we do:
- *
- * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
- */
- pow = ilog2(mtd->size) - ilog2(lock_len);
- val = mask - (pow << shift);
- if (val & ~mask)
- return -EINVAL;
- /* Don't "lock" with no region! */
- if (!(val & mask))
- return -EINVAL;
-
- status_new = (status_old & ~mask & ~tb_mask) | val;
-
- /* Disallow further writes if WP pin is asserted */
- status_new |= SR_SRWD;
-
- if (!use_top)
- status_new |= tb_mask;
-
- /* Don't bother if they're the same */
- if (status_new == status_old)
- return 0;
-
- /* Only modify protection if it will not unlock other areas */
- if ((status_new & mask) < (status_old & mask))
- return -EINVAL;
-
- return spi_nor_write_sr_and_check(nor, status_new);
-}
-
-/*
- * Unlock a region of the flash. See stm_lock() for more info
- *
- * Returns negative on errors, 0 on success.
- */
-static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- struct mtd_info *mtd = &nor->mtd;
- int ret, status_old, status_new;
- u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
- u8 tb_mask = SR_TB_BIT5;
- u8 shift = ffs(mask) - 1, pow, val;
- loff_t lock_len;
- bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
- bool use_top;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- status_old = nor->bouncebuf[0];
-
- /* If nothing in our range is locked, we don't need to do anything */
- if (stm_is_unlocked_sr(nor, ofs, len, status_old))
- return 0;
-
- /* If anything below us is locked, we can't use 'top' protection */
- if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
- can_be_top = false;
-
- /* If anything above us is locked, we can't use 'bottom' protection */
- if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
- status_old))
- can_be_bottom = false;
-
- if (!can_be_bottom && !can_be_top)
- return -EINVAL;
-
- /* Prefer top, if both are valid */
- use_top = can_be_top;
-
- /* lock_len: length of region that should remain locked */
- if (use_top)
- lock_len = mtd->size - (ofs + len);
- else
- lock_len = ofs;
-
- if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
- tb_mask = SR_TB_BIT6;
- /*
- * Need largest pow such that:
- *
- * 1 / (2^pow) >= (len / size)
- *
- * so (assuming power-of-2 size) we do:
- *
- * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
- */
- pow = ilog2(mtd->size) - order_base_2(lock_len);
- if (lock_len == 0) {
- val = 0; /* fully unlocked */
- } else {
- val = mask - (pow << shift);
- /* Some power-of-two sizes are not supported */
- if (val & ~mask)
- return -EINVAL;
- }
-
- status_new = (status_old & ~mask & ~tb_mask) | val;
-
- /* Don't protect status register if we're fully unlocked */
- if (lock_len == 0)
- status_new &= ~SR_SRWD;
-
- if (!use_top)
- status_new |= tb_mask;
-
- /* Don't bother if they're the same */
- if (status_new == status_old)
- return 0;
-
- /* Only modify protection if it will not lock other areas */
- if ((status_new & mask) > (status_old & mask))
- return -EINVAL;
-
- return spi_nor_write_sr_and_check(nor, status_new);
-}
-
-/*
- * Check if a region of the flash is (completely) locked. See stm_lock() for
- * more info.
- *
- * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
- * negative on errors.
- */
-static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
-{
- int ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return stm_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
-}
-
-static const struct spi_nor_locking_ops stm_locking_ops = {
- .lock = stm_lock,
- .unlock = stm_unlock,
- .is_locked = stm_is_locked,
-};
-
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->lock(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->unlock(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = nor->params.locking_ops->is_locked(nor, ofs, len);
-
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-/**
- * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
- * Register 1.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
-{
- int ret;
-
- ret = spi_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
- return 0;
-
- nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
-
- return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
-}
-
-/**
- * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
- * Register 2.
- * @nor: pointer to a 'struct spi_nor'.
- *
- * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
-{
- int ret;
-
- if (nor->flags & SNOR_F_NO_READ_CR)
- return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
-
- ret = spi_nor_read_cr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
- return 0;
-
- nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
-
- return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
-}
-
-/**
- * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register 2.
- *
- * This is one of the procedures to set the QE bit described in the SFDP
- * (JESD216 rev B) specification but no manufacturer using this procedure has
- * been identified yet, hence the name of the function.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
-{
- u8 *sr2 = nor->bouncebuf;
- int ret;
- u8 sr2_written;
-
- /* Check current Quad Enable bit value. */
- ret = spi_nor_read_sr2(nor, sr2);
- if (ret)
- return ret;
- if (*sr2 & SR2_QUAD_EN_BIT7)
- return 0;
-
- /* Update the Quad Enable bit. */
- *sr2 |= SR2_QUAD_EN_BIT7;
-
- ret = spi_nor_write_sr2(nor, sr2);
- if (ret)
- return ret;
-
- sr2_written = *sr2;
-
- /* Read back and check it. */
- ret = spi_nor_read_sr2(nor, sr2);
- if (ret)
- return ret;
-
- if (*sr2 != sr2_written) {
- dev_dbg(nor->dev, "SR2: Read back test failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff, \
- ((_ext_id) >> 8) & 0xff, \
- (_ext_id) & 0xff, \
- }, \
- .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .flags = (_flags),
-
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff, \
- ((_ext_id) >> 16) & 0xff, \
- ((_ext_id) >> 8) & 0xff, \
- (_ext_id) & 0xff, \
- }, \
- .id_len = 6, \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = 256, \
- .flags = (_flags),
-
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
- .sector_size = (_sector_size), \
- .n_sectors = (_n_sectors), \
- .page_size = (_page_size), \
- .addr_width = (_addr_width), \
- .flags = (_flags),
-
-#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
- .id = { \
- ((_jedec_id) >> 16) & 0xff, \
- ((_jedec_id) >> 8) & 0xff, \
- (_jedec_id) & 0xff \
- }, \
- .id_len = 3, \
- .sector_size = (8*_page_size), \
- .n_sectors = (_n_sectors), \
- .page_size = _page_size, \
- .addr_width = 3, \
- .flags = SPI_NOR_NO_FR | SPI_S3AN,
-
-static int
-is25lp256_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- /*
- * IS25LP256 supports 4B opcodes, but the BFPT advertises a
- * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
- * Overwrite the address width advertised by the BFPT.
- */
- if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
- BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
- nor->addr_width = 4;
-
- return 0;
-}
-
-static struct spi_nor_fixups is25lp256_fixups = {
- .post_bfpt = is25lp256_post_bfpt_fixups,
-};
-
-static int
-mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- /*
- * MX25L25635F supports 4B opcodes but MX25L25635E does not.
- * Unfortunately, Macronix has re-used the same JEDEC ID for both
- * variants which prevents us from defining a new entry in the parts
- * table.
- * We need a way to differentiate MX25L25635E and MX25L25635F, and it
- * seems that the F version advertises support for Fast Read 4-4-4 in
- * its BFPT table.
- */
- if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
- nor->flags |= SNOR_F_4B_OPCODES;
-
- return 0;
-}
-
-static struct spi_nor_fixups mx25l25635_fixups = {
- .post_bfpt = mx25l25635_post_bfpt_fixups,
-};
-
-static void gd25q256_default_init(struct spi_nor *nor)
-{
- /*
- * Some manufacturer like GigaDevice may use different
- * bit to set QE on different memories, so the MFR can't
- * indicate the quad_enable method for this case, we need
- * to set it in the default_init fixup hook.
- */
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
-}
-
-static struct spi_nor_fixups gd25q256_fixups = {
- .default_init = gd25q256_default_init,
-};
-
-/* NOTE: double check command sets and memory organization when you add
- * more nor chips. This current list focusses on newer chips, which
- * have been converging on command sets which including JEDEC ID.
- *
- * All newly added entries should describe *hardware* and should use SECT_4K
- * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
- * scenarios excluding small sectors there is config option that can be
- * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
- * For historical (and compatibility) reasons (before we got above config) some
- * old entries may be missing 4K flag.
- */
-static const struct flash_info spi_nor_ids[] = {
- /* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
- { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
-
- { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
- { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
- { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
-
- { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
- { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
- { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
- { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
- { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
-
- { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
-
- /* EON -- en25xxx */
- { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
- { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
- { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
- { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
- { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
- { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
- { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
- { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
-
- /* ESMT */
- { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
- { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
-
- /* Everspin */
- { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-
- /* Fujitsu */
- { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
-
- /* GigaDevice */
- {
- "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_TB_SR_BIT6)
- .fixups = &gd25q256_fixups,
- },
-
- /* Intel/Numonyx -- xxxs33b */
- { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
- { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
- { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
-
- /* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
- { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ) },
- { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
- .fixups = &is25lp256_fixups },
- { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
- .fixups = &is25lp256_fixups },
-
- /* Macronix */
- { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
- { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
- { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
- { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
- { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
- { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
- { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
- { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
- { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
- .fixups = &mx25l25635_fixups },
- { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
- { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
-
- /* Micron <--> ST Micro */
- { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
- USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
- { "mt25qu256a", INFO6(0x20bb19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
- USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
-
- /* Micron */
- {
- "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES)
- },
- { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
-
- /* PMC */
- { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
- { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
- { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
-
- /* Spansion/Cypress -- single (large) sector size only, at least
- * for the chips listed here (without boot sectors).
- */
- { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | USE_CLSR) },
- { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
- { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
- { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
- { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
- { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
- { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
- { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
-
- /* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
- { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
- { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
- { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
- { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
- { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32, SECT_4K |
- SPI_NOR_DUAL_READ) },
- { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-
- /* ST Microelectronics -- newer production may have feature updates */
- { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
- { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
- { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
- { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
- { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
- { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
- { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
- { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
- { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
-
- { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
- { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
- { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
- { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
- { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
- { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
- { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
- { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
- { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
-
- { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
- { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
- { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
-
- { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
- { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
- { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
-
- { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
- { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
- { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
- { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
-
- /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
- { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
- { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
- { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
- {
- "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
- {
- "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
- { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- {
- "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- {
- "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- {
- "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
- },
- { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
- { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
- SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
-
- /* Catalyst / On Semiconductor -- non-JEDEC */
- { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
- { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-
- /* Xilinx S3AN Internal Flash */
- { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
- { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
- { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
- { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
-
- /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
- { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { },
-};
-
-static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
-{
- int tmp;
- u8 *id = nor->bouncebuf;
- const struct flash_info *info;
-
- if (nor->spimem) {
- struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
- SPI_MEM_OP_NO_ADDR,
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
-
- tmp = spi_mem_exec_op(nor->spimem, &op);
- } else {
- tmp = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
- SPI_NOR_MAX_ID_LEN);
- }
- if (tmp) {
- dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
- return ERR_PTR(tmp);
- }
-
- for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
- info = &spi_nor_ids[tmp];
- if (info->id_len) {
- if (!memcmp(info->id, id, info->id_len))
- return &spi_nor_ids[tmp];
- }
- }
- dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
- SPI_NOR_MAX_ID_LEN, id);
- return ERR_PTR(-ENODEV);
-}
-
-static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- ssize_t ret;
-
- dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- while (len) {
- loff_t addr = from;
-
- addr = spi_nor_convert_addr(nor, addr);
-
- ret = spi_nor_read_data(nor, addr, len, buf);
- if (ret == 0) {
- /* We shouldn't see 0-length reads */
- ret = -EIO;
- goto read_err;
- }
- if (ret < 0)
- goto read_err;
-
- WARN_ON(ret > len);
- *retlen += ret;
- buf += ret;
- from += ret;
- len -= ret;
- }
- ret = 0;
-
-read_err:
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t actual = 0;
- int ret;
-
- dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto out;
-
- nor->sst_write_second = false;
-
- /* Start write from odd address. */
- if (to % 2) {
- nor->program_opcode = SPINOR_OP_BP;
-
- /* write one byte. */
- ret = spi_nor_write_data(nor, to, 1, buf);
- if (ret < 0)
- goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- to++;
- actual++;
- }
-
- /* Write out most of the data here. */
- for (; actual < len - 1; actual += 2) {
- nor->program_opcode = SPINOR_OP_AAI_WP;
-
- /* write two bytes. */
- ret = spi_nor_write_data(nor, to, 2, buf + actual);
- if (ret < 0)
- goto out;
- WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
- to += 2;
- nor->sst_write_second = true;
- }
- nor->sst_write_second = false;
-
- ret = spi_nor_write_disable(nor);
- if (ret)
- goto out;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- /* Write out trailing byte if it exists. */
- if (actual != len) {
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto out;
-
- nor->program_opcode = SPINOR_OP_BP;
- ret = spi_nor_write_data(nor, to, 1, buf + actual);
- if (ret < 0)
- goto out;
- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto out;
-
- actual += 1;
-
- ret = spi_nor_write_disable(nor);
- }
-out:
- *retlen += actual;
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-/*
- * Write an address range to the nor chip. Data must be written in
- * FLASH_PAGESIZE chunks. The address range may be any size provided
- * it is within the physical boundaries.
- */
-static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t page_offset, page_remain, i;
- ssize_t ret;
-
- dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
-
- ret = spi_nor_lock_and_prep(nor);
- if (ret)
- return ret;
-
- for (i = 0; i < len; ) {
- ssize_t written;
- loff_t addr = to + i;
-
- /*
- * If page_size is a power of two, the offset can be quickly
- * calculated with an AND operation. On the other cases we
- * need to do a modulus operation (more expensive).
- * Power of two numbers have only one bit set and we can use
- * the instruction hweight32 to detect if we need to do a
- * modulus (do_div()) or not.
- */
- if (hweight32(nor->page_size) == 1) {
- page_offset = addr & (nor->page_size - 1);
- } else {
- uint64_t aux = addr;
-
- page_offset = do_div(aux, nor->page_size);
- }
- /* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
-
- addr = spi_nor_convert_addr(nor, addr);
-
- ret = spi_nor_write_enable(nor);
- if (ret)
- goto write_err;
-
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
- if (ret < 0)
- goto write_err;
- written = ret;
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto write_err;
- *retlen += written;
- i += written;
- }
-
-write_err:
- spi_nor_unlock_and_unprep(nor);
- return ret;
-}
-
-static int spi_nor_check(struct spi_nor *nor)
-{
- if (!nor->dev ||
- (!nor->spimem && !nor->controller_ops) ||
- (!nor->spimem && nor->controller_ops &&
- (!nor->controller_ops->read ||
- !nor->controller_ops->write ||
- !nor->controller_ops->read_reg ||
- !nor->controller_ops->write_reg))) {
- pr_err("spi-nor: please fill all the necessary fields!\n");
- return -EINVAL;
- }
-
- if (nor->spimem && nor->controller_ops) {
- dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int s3an_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- int ret;
-
- ret = spi_nor_xread_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- nor->erase_opcode = SPINOR_OP_XSE;
- nor->program_opcode = SPINOR_OP_XPP;
- nor->read_opcode = SPINOR_OP_READ;
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
- /*
- * This flashes have a page size of 264 or 528 bytes (known as
- * Default addressing mode). It can be changed to a more standard
- * Power of two mode where the page size is 256/512. This comes
- * with a price: there is 3% less of space, the data is corrupted
- * and the page size cannot be changed back to default addressing
- * mode.
- *
- * The current addressing mode can be read from the XRDSR register
- * and should not be changed, because is a destructive operation.
- */
- if (nor->bouncebuf[0] & XSR_PAGESIZE) {
- /* Flash in Power of 2 mode */
- nor->page_size = (nor->page_size == 264) ? 256 : 512;
- nor->mtd.writebufsize = nor->page_size;
- nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
- nor->mtd.erasesize = 8 * nor->page_size;
- } else {
- /* Flash in Default addressing mode */
- nor->params.convert_addr = s3an_convert_addr;
- nor->mtd.erasesize = nor->info->sector_size;
- }
-
- return 0;
-}
-
-static void
-spi_nor_set_read_settings(struct spi_nor_read_command *read,
- u8 num_mode_clocks,
- u8 num_wait_states,
- u8 opcode,
- enum spi_nor_protocol proto)
-{
- read->num_mode_clocks = num_mode_clocks;
- read->num_wait_states = num_wait_states;
- read->opcode = opcode;
- read->proto = proto;
-}
-
-static void
-spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
- u8 opcode,
- enum spi_nor_protocol proto)
-{
- pp->opcode = opcode;
- pp->proto = proto;
-}
-
-static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == (int)hwcaps)
- return table[i][1];
-
- return -EINVAL;
-}
-
-static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
-{
- static const int hwcaps_read2cmd[][2] = {
- { SNOR_HWCAPS_READ, SNOR_CMD_READ },
- { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
- { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
- { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
- { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
- { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
- { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
- { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
- { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
- { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
- { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
- { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
- { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
- { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
- { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
- };
-
- return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
- ARRAY_SIZE(hwcaps_read2cmd));
-}
-
-static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
-{
- static const int hwcaps_pp2cmd[][2] = {
- { SNOR_HWCAPS_PP, SNOR_CMD_PP },
- { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
- { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
- { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
- { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
- { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
- { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
- };
-
- return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
- ARRAY_SIZE(hwcaps_pp2cmd));
-}
-
-/*
- * Serial Flash Discoverable Parameters (SFDP) parsing.
- */
-
-/**
- * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
- * addr_width and read_dummy members of the struct spi_nor
- * should be previously
- * set.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the serial flash memory
- * @len: number of bytes to read
- * @buf: buffer where the data is copied into (dma-safe memory)
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
-{
- ssize_t ret;
-
- while (len) {
- ret = spi_nor_read_data(nor, addr, len, buf);
- if (ret < 0)
- return ret;
- if (!ret || ret > len)
- return -EIO;
-
- buf += ret;
- addr += ret;
- len -= ret;
- }
- return 0;
-}
-
-/**
- * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the SFDP area to start reading data from
- * @len: number of bytes to read
- * @buf: buffer where the SFDP data are copied into (dma-safe memory)
- *
- * Whatever the actual numbers of bytes for address and dummy cycles are
- * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
- * followed by a 3-byte address and 8 dummy clock cycles.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
- size_t len, void *buf)
-{
- u8 addr_width, read_opcode, read_dummy;
- int ret;
-
- read_opcode = nor->read_opcode;
- addr_width = nor->addr_width;
- read_dummy = nor->read_dummy;
-
- nor->read_opcode = SPINOR_OP_RDSFDP;
- nor->addr_width = 3;
- nor->read_dummy = 8;
-
- ret = spi_nor_read_raw(nor, addr, len, buf);
-
- nor->read_opcode = read_opcode;
- nor->addr_width = addr_width;
- nor->read_dummy = read_dummy;
-
- return ret;
-}
-
-/**
- * spi_nor_spimem_check_op - check if the operation is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@op: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_op(struct spi_nor *nor,
- struct spi_mem_op *op)
-{
- /*
- * First test with 4 address bytes. The opcode itself might
- * be a 3B addressing opcode but we don't care, because
- * SPI controller implementation should not check the opcode,
- * but just the sequence.
- */
- op->addr.nbytes = 4;
- if (!spi_mem_supports_op(nor->spimem, op)) {
- if (nor->mtd.size > SZ_16M)
- return -ENOTSUPP;
-
- /* If flash size <= 16MB, 3 address bytes are sufficient */
- op->addr.nbytes = 3;
- if (!spi_mem_supports_op(nor->spimem, op))
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-/**
- * spi_nor_spimem_check_readop - check if the read op is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@read: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_readop(struct spi_nor *nor,
- const struct spi_nor_read_command *read)
-{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
- SPI_MEM_OP_DUMMY(0, 1),
- SPI_MEM_OP_DATA_IN(0, NULL, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
- op.dummy.buswidth = op.addr.buswidth;
- op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
- op.dummy.buswidth / 8;
-
- return spi_nor_spimem_check_op(nor, &op);
-}
-
-/**
- * spi_nor_spimem_check_pp - check if the page program op is supported
- * by controller
- *@nor: pointer to a 'struct spi_nor'
- *@pp: pointer to op template to be checked
- *
- * Returns 0 if operation is supported, -ENOTSUPP otherwise.
- */
-static int spi_nor_spimem_check_pp(struct spi_nor *nor,
- const struct spi_nor_pp_command *pp)
-{
- struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
- SPI_MEM_OP_ADDR(3, 0, 1),
- SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(0, NULL, 1));
-
- op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
- op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
- op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
-
- return spi_nor_spimem_check_op(nor, &op);
-}
-
-/**
- * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
- * based on SPI controller capabilities
- * @nor: pointer to a 'struct spi_nor'
- * @hwcaps: pointer to resulting capabilities after adjusting
- * according to controller and flash's capability
- */
-static void
-spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- unsigned int cap;
-
- /* DTR modes are not supported yet, mask them all. */
- *hwcaps &= ~SNOR_HWCAPS_DTR;
-
- /* X-X-X modes are not supported yet, mask them all. */
- *hwcaps &= ~SNOR_HWCAPS_X_X_X;
-
- for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
- int rdidx, ppidx;
-
- if (!(*hwcaps & BIT(cap)))
- continue;
-
- rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
- if (rdidx >= 0 &&
- spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
- *hwcaps &= ~BIT(cap);
-
- ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
- if (ppidx < 0)
- continue;
-
- if (spi_nor_spimem_check_pp(nor,
- &params->page_programs[ppidx]))
- *hwcaps &= ~BIT(cap);
- }
-}
-
-/**
- * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @addr: offset in the SFDP area to start reading data from
- * @len: number of bytes to read
- * @buf: buffer where the SFDP data are copied into
- *
- * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
- * guaranteed to be dma-safe.
- *
- * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
- * otherwise.
- */
-static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
- size_t len, void *buf)
-{
- void *dma_safe_buf;
- int ret;
-
- dma_safe_buf = kmalloc(len, GFP_KERNEL);
- if (!dma_safe_buf)
- return -ENOMEM;
-
- ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
- memcpy(buf, dma_safe_buf, len);
- kfree(dma_safe_buf);
-
- return ret;
-}
-
-/* Fast Read settings. */
-
-static void
-spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
- u16 half,
- enum spi_nor_protocol proto)
-{
- read->num_mode_clocks = (half >> 5) & 0x07;
- read->num_wait_states = (half >> 0) & 0x1f;
- read->opcode = (half >> 8) & 0xff;
- read->proto = proto;
-}
-
-struct sfdp_bfpt_read {
- /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
- u32 hwcaps;
-
- /*
- * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
- * whether the Fast Read x-y-z command is supported.
- */
- u32 supported_dword;
- u32 supported_bit;
-
- /*
- * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
- * encodes the op code, the number of mode clocks and the number of wait
- * states to be used by Fast Read x-y-z command.
- */
- u32 settings_dword;
- u32 settings_shift;
-
- /* The SPI protocol for this Fast Read x-y-z command. */
- enum spi_nor_protocol proto;
-};
-
-static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
- /* Fast Read 1-1-2 */
- {
- SNOR_HWCAPS_READ_1_1_2,
- BFPT_DWORD(1), BIT(16), /* Supported bit */
- BFPT_DWORD(4), 0, /* Settings */
- SNOR_PROTO_1_1_2,
- },
-
- /* Fast Read 1-2-2 */
- {
- SNOR_HWCAPS_READ_1_2_2,
- BFPT_DWORD(1), BIT(20), /* Supported bit */
- BFPT_DWORD(4), 16, /* Settings */
- SNOR_PROTO_1_2_2,
- },
-
- /* Fast Read 2-2-2 */
- {
- SNOR_HWCAPS_READ_2_2_2,
- BFPT_DWORD(5), BIT(0), /* Supported bit */
- BFPT_DWORD(6), 16, /* Settings */
- SNOR_PROTO_2_2_2,
- },
-
- /* Fast Read 1-1-4 */
- {
- SNOR_HWCAPS_READ_1_1_4,
- BFPT_DWORD(1), BIT(22), /* Supported bit */
- BFPT_DWORD(3), 16, /* Settings */
- SNOR_PROTO_1_1_4,
- },
-
- /* Fast Read 1-4-4 */
- {
- SNOR_HWCAPS_READ_1_4_4,
- BFPT_DWORD(1), BIT(21), /* Supported bit */
- BFPT_DWORD(3), 0, /* Settings */
- SNOR_PROTO_1_4_4,
- },
-
- /* Fast Read 4-4-4 */
- {
- SNOR_HWCAPS_READ_4_4_4,
- BFPT_DWORD(5), BIT(4), /* Supported bit */
- BFPT_DWORD(7), 16, /* Settings */
- SNOR_PROTO_4_4_4,
- },
-};
-
-struct sfdp_bfpt_erase {
- /*
- * The half-word at offset <shift> in DWORD <dwoard> encodes the
- * op code and erase sector size to be used by Sector Erase commands.
- */
- u32 dword;
- u32 shift;
-};
-
-static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
- /* Erase Type 1 in DWORD8 bits[15:0] */
- {BFPT_DWORD(8), 0},
-
- /* Erase Type 2 in DWORD8 bits[31:16] */
- {BFPT_DWORD(8), 16},
-
- /* Erase Type 3 in DWORD9 bits[15:0] */
- {BFPT_DWORD(9), 0},
-
- /* Erase Type 4 in DWORD9 bits[31:16] */
- {BFPT_DWORD(9), 16},
-};
-
-/**
- * spi_nor_set_erase_type() - set a SPI NOR erase type
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type
- * @opcode: the SPI command op code to erase the sector/block
- */
-static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
- u32 size, u8 opcode)
-{
- erase->size = size;
- erase->opcode = opcode;
- /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
- erase->size_shift = ffs(erase->size) - 1;
- erase->size_mask = (1 << erase->size_shift) - 1;
-}
-
-/**
- * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type
- * @opcode: the SPI command op code to erase the sector/block
- * @i: erase type index as sorted in the Basic Flash Parameter Table
- *
- * The supported Erase Types will be sorted at init in ascending order, with
- * the smallest Erase Type size being the first member in the erase_type array
- * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
- * the Basic Flash Parameter Table since it will be used later on to
- * synchronize with the supported Erase Types defined in SFDP optional tables.
- */
-static void
-spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
- u32 size, u8 opcode, u8 i)
-{
- erase->idx = i;
- spi_nor_set_erase_type(erase, size, opcode);
-}
-
-/**
- * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
- * @l: member in the left half of the map's erase_type array
- * @r: member in the right half of the map's erase_type array
- *
- * Comparison function used in the sort() call to sort in ascending order the
- * map's erase types, the smallest erase type size being the first member in the
- * sorted erase_type array.
- *
- * Return: the result of @l->size - @r->size
- */
-static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
-{
- const struct spi_nor_erase_type *left = l, *right = r;
-
- return left->size - right->size;
-}
-
-/**
- * spi_nor_sort_erase_mask() - sort erase mask
- * @map: the erase map of the SPI NOR
- * @erase_mask: the erase type mask to be sorted
- *
- * Replicate the sort done for the map's erase types in BFPT: sort the erase
- * mask in ascending order with the smallest erase type size starting from
- * BIT(0) in the sorted erase mask.
- *
- * Return: sorted erase mask.
- */
-static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
-{
- struct spi_nor_erase_type *erase_type = map->erase_type;
- int i;
- u8 sorted_erase_mask = 0;
-
- if (!erase_mask)
- return 0;
-
- /* Replicate the sort done for the map's erase types. */
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
- if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
- sorted_erase_mask |= BIT(i);
-
- return sorted_erase_mask;
-}
-
-/**
- * spi_nor_regions_sort_erase_types() - sort erase types in each region
- * @map: the erase map of the SPI NOR
- *
- * Function assumes that the erase types defined in the erase map are already
- * sorted in ascending order, with the smallest erase type size being the first
- * member in the erase_type array. It replicates the sort done for the map's
- * erase types. Each region's erase bitmask will indicate which erase types are
- * supported from the sorted erase types defined in the erase map.
- * Sort the all region's erase type at init in order to speed up the process of
- * finding the best erase command at runtime.
- */
-static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
-{
- struct spi_nor_erase_region *region = map->regions;
- u8 region_erase_mask, sorted_erase_mask;
-
- while (region) {
- region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
-
- sorted_erase_mask = spi_nor_sort_erase_mask(map,
- region_erase_mask);
-
- /* Overwrite erase mask. */
- region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
- sorted_erase_mask;
-
- region = spi_nor_region_next(region);
- }
-}
-
-/**
- * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
- * @map: the erase map of the SPI NOR
- * @erase_mask: bitmask encoding erase types that can erase the entire
- * flash memory
- * @flash_size: the spi nor flash memory size
- */
-static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
- u8 erase_mask, u64 flash_size)
-{
- /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
- map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
- SNOR_LAST_REGION;
- map->uniform_region.size = flash_size;
- map->regions = &map->uniform_region;
- map->uniform_erase_type = erase_mask;
-}
-
-static int
-spi_nor_post_bfpt_fixups(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- const struct sfdp_bfpt *bfpt,
- struct spi_nor_flash_parameter *params)
-{
- if (nor->info->fixups && nor->info->fixups->post_bfpt)
- return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
- params);
-
- return 0;
-}
-
-/**
- * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
- * @nor: pointer to a 'struct spi_nor'
- * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
- * the Basic Flash Parameter Table length and version
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be
- * filled
- *
- * The Basic Flash Parameter Table is the main and only mandatory table as
- * defined by the SFDP (JESD216) specification.
- * It provides us with the total size (memory density) of the data array and
- * the number of address bytes for Fast Read, Page Program and Sector Erase
- * commands.
- * For Fast READ commands, it also gives the number of mode clock cycles and
- * wait states (regrouped in the number of dummy clock cycles) for each
- * supported instruction op code.
- * For Page Program, the page size is now available since JESD216 rev A, however
- * the supported instruction op codes are still not provided.
- * For Sector Erase commands, this table stores the supported instruction op
- * codes and the associated sector sizes.
- * Finally, the Quad Enable Requirements (QER) are also available since JESD216
- * rev A. The QER bits encode the manufacturer dependent procedure to be
- * executed to set the Quad Enable (QE) bit in some internal register of the
- * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
- * sending any Quad SPI command to the memory. Actually, setting the QE bit
- * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
- * and IO3 hence enabling 4 (Quad) I/O lines.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_bfpt(struct spi_nor *nor,
- const struct sfdp_parameter_header *bfpt_header,
- struct spi_nor_flash_parameter *params)
-{
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase_type = map->erase_type;
- struct sfdp_bfpt bfpt;
- size_t len;
- int i, cmd, err;
- u32 addr;
- u16 half;
- u8 erase_mask;
-
- /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
- if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
- return -EINVAL;
-
- /* Read the Basic Flash Parameter Table. */
- len = min_t(size_t, sizeof(bfpt),
- bfpt_header->length * sizeof(u32));
- addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
- memset(&bfpt, 0, sizeof(bfpt));
- err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
- if (err < 0)
- return err;
-
- /* Fix endianness of the BFPT DWORDs. */
- for (i = 0; i < BFPT_DWORD_MAX; i++)
- bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
-
- /* Number of address bytes. */
- switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
- case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
- nor->addr_width = 3;
- break;
-
- case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
- nor->addr_width = 4;
- break;
-
- default:
- break;
- }
-
- /* Flash Memory Density (in bits). */
- params->size = bfpt.dwords[BFPT_DWORD(2)];
- if (params->size & BIT(31)) {
- params->size &= ~BIT(31);
-
- /*
- * Prevent overflows on params->size. Anyway, a NOR of 2^64
- * bits is unlikely to exist so this error probably means
- * the BFPT we are reading is corrupted/wrong.
- */
- if (params->size > 63)
- return -EINVAL;
-
- params->size = 1ULL << params->size;
- } else {
- params->size++;
- }
- params->size >>= 3; /* Convert to bytes. */
-
- /* Fast Read settings. */
- for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
- const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
- struct spi_nor_read_command *read;
-
- if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
- params->hwcaps.mask &= ~rd->hwcaps;
- continue;
- }
-
- params->hwcaps.mask |= rd->hwcaps;
- cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
- read = &params->reads[cmd];
- half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
- spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
- }
-
- /*
- * Sector Erase settings. Reinitialize the uniform erase map using the
- * Erase Types defined in the bfpt table.
- */
- erase_mask = 0;
- memset(&params->erase_map, 0, sizeof(params->erase_map));
- for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
- const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
- u32 erasesize;
- u8 opcode;
-
- half = bfpt.dwords[er->dword] >> er->shift;
- erasesize = half & 0xff;
-
- /* erasesize == 0 means this Erase Type is not supported. */
- if (!erasesize)
- continue;
-
- erasesize = 1U << erasesize;
- opcode = (half >> 8) & 0xff;
- erase_mask |= BIT(i);
- spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
- opcode, i);
- }
- spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
- /*
- * Sort all the map's Erase Types in ascending order with the smallest
- * erase size being the first member in the erase_type array.
- */
- sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
- spi_nor_map_cmp_erase_type, NULL);
- /*
- * Sort the erase types in the uniform region in order to update the
- * uniform_erase_type bitmask. The bitmask will be used later on when
- * selecting the uniform erase.
- */
- spi_nor_regions_sort_erase_types(map);
- map->uniform_erase_type = map->uniform_region.offset &
- SNOR_ERASE_TYPE_MASK;
-
- /* Stop here if not JESD216 rev A or later. */
- if (bfpt_header->length < BFPT_DWORD_MAX)
- return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
- params);
-
- /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
- params->page_size = bfpt.dwords[BFPT_DWORD(11)];
- params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
- params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
- params->page_size = 1U << params->page_size;
-
- /* Quad Enable Requirements. */
- switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
- case BFPT_DWORD15_QER_NONE:
- params->quad_enable = NULL;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
- /*
- * Writing only one byte to the Status Register has the
- * side-effect of clearing Status Register 2.
- */
- case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
- /*
- * Read Configuration Register (35h) instruction is not
- * supported.
- */
- nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR;
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR1_BIT6:
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- params->quad_enable = spi_nor_sr1_bit6_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT7:
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- params->quad_enable = spi_nor_sr2_bit7_quad_enable;
- break;
-
- case BFPT_DWORD15_QER_SR2_BIT1:
- /*
- * JESD216 rev B or later does not specify if writing only one
- * byte to the Status Register clears or not the Status
- * Register 2, so let's be cautious and keep the default
- * assumption of a 16-bit Write Status (01h) command.
- */
- nor->flags |= SNOR_F_HAS_16BIT_SR;
-
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- break;
-
- default:
- return -EINVAL;
- }
-
- return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
-}
-
-#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
-#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
-#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
-
-#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
-#define SMPT_CMD_READ_DUMMY_SHIFT 16
-#define SMPT_CMD_READ_DUMMY(_cmd) \
- (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
-#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
-
-#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
-#define SMPT_CMD_READ_DATA_SHIFT 24
-#define SMPT_CMD_READ_DATA(_cmd) \
- (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
-
-#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
-#define SMPT_CMD_OPCODE_SHIFT 8
-#define SMPT_CMD_OPCODE(_cmd) \
- (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
-
-#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
-#define SMPT_MAP_REGION_COUNT_SHIFT 16
-#define SMPT_MAP_REGION_COUNT(_header) \
- ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
- SMPT_MAP_REGION_COUNT_SHIFT) + 1)
-
-#define SMPT_MAP_ID_MASK GENMASK(15, 8)
-#define SMPT_MAP_ID_SHIFT 8
-#define SMPT_MAP_ID(_header) \
- (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
-
-#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
-#define SMPT_MAP_REGION_SIZE_SHIFT 8
-#define SMPT_MAP_REGION_SIZE(_region) \
- (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
- SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
-
-#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
-#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
- ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
-
-#define SMPT_DESC_TYPE_MAP BIT(1)
-#define SMPT_DESC_END BIT(0)
-
-/**
- * spi_nor_smpt_addr_width() - return the address width used in the
- * configuration detection command.
- * @nor: pointer to a 'struct spi_nor'
- * @settings: configuration detection command descriptor, dword1
- */
-static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
-{
- switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
- case SMPT_CMD_ADDRESS_LEN_0:
- return 0;
- case SMPT_CMD_ADDRESS_LEN_3:
- return 3;
- case SMPT_CMD_ADDRESS_LEN_4:
- return 4;
- case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
- /* fall through */
- default:
- return nor->addr_width;
- }
-}
-
-/**
- * spi_nor_smpt_read_dummy() - return the configuration detection command read
- * latency, in clock cycles.
- * @nor: pointer to a 'struct spi_nor'
- * @settings: configuration detection command descriptor, dword1
- *
- * Return: the number of dummy cycles for an SMPT read
- */
-static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
-{
- u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
-
- if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
- return nor->read_dummy;
- return read_dummy;
-}
-
-/**
- * spi_nor_get_map_in_use() - get the configuration map in use
- * @nor: pointer to a 'struct spi_nor'
- * @smpt: pointer to the sector map parameter table
- * @smpt_len: sector map parameter table length
- *
- * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
- */
-static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
- u8 smpt_len)
-{
- const u32 *ret;
- u8 *buf;
- u32 addr;
- int err;
- u8 i;
- u8 addr_width, read_opcode, read_dummy;
- u8 read_data_mask, map_id;
-
- /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- addr_width = nor->addr_width;
- read_dummy = nor->read_dummy;
- read_opcode = nor->read_opcode;
-
- map_id = 0;
- /* Determine if there are any optional Detection Command Descriptors */
- for (i = 0; i < smpt_len; i += 2) {
- if (smpt[i] & SMPT_DESC_TYPE_MAP)
- break;
-
- read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
- nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
- nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
- nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
- addr = smpt[i + 1];
-
- err = spi_nor_read_raw(nor, addr, 1, buf);
- if (err) {
- ret = ERR_PTR(err);
- goto out;
- }
-
- /*
- * Build an index value that is used to select the Sector Map
- * Configuration that is currently in use.
- */
- map_id = map_id << 1 | !!(*buf & read_data_mask);
- }
-
- /*
- * If command descriptors are provided, they always precede map
- * descriptors in the table. There is no need to start the iteration
- * over smpt array all over again.
- *
- * Find the matching configuration map.
- */
- ret = ERR_PTR(-EINVAL);
- while (i < smpt_len) {
- if (SMPT_MAP_ID(smpt[i]) == map_id) {
- ret = smpt + i;
- break;
- }
-
- /*
- * If there are no more configuration map descriptors and no
- * configuration ID matched the configuration identifier, the
- * sector address map is unknown.
- */
- if (smpt[i] & SMPT_DESC_END)
- break;
-
- /* increment the table index to the next map */
- i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
- }
-
- /* fall through */
-out:
- kfree(buf);
- nor->addr_width = addr_width;
- nor->read_dummy = read_dummy;
- nor->read_opcode = read_opcode;
- return ret;
-}
-
-/**
- * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
- * @region: pointer to a structure that describes a SPI NOR erase region
- * @erase: pointer to a structure that describes a SPI NOR erase type
- * @erase_type: erase type bitmask
- */
-static void
-spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
- const struct spi_nor_erase_type *erase,
- const u8 erase_type)
-{
- int i;
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- if (!(erase_type & BIT(i)))
- continue;
- if (region->size & erase[i].size_mask) {
- spi_nor_region_mark_overlay(region);
- return;
- }
- }
-}
-
-/**
- * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
- * @nor: pointer to a 'struct spi_nor'
- * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is
- * used for storing SFDP parsed data
- * @smpt: pointer to the sector map parameter table
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int
-spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
- struct spi_nor_flash_parameter *params,
- const u32 *smpt)
-{
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase = map->erase_type;
- struct spi_nor_erase_region *region;
- u64 offset;
- u32 region_count;
- int i, j;
- u8 uniform_erase_type, save_uniform_erase_type;
- u8 erase_type, regions_erase_type;
-
- region_count = SMPT_MAP_REGION_COUNT(*smpt);
- /*
- * The regions will be freed when the driver detaches from the
- * device.
- */
- region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
- GFP_KERNEL);
- if (!region)
- return -ENOMEM;
- map->regions = region;
-
- uniform_erase_type = 0xff;
- regions_erase_type = 0;
- offset = 0;
- /* Populate regions. */
- for (i = 0; i < region_count; i++) {
- j = i + 1; /* index for the region dword */
- region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
- erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
- region[i].offset = offset | erase_type;
-
- spi_nor_region_check_overlay(&region[i], erase, erase_type);
-
- /*
- * Save the erase types that are supported in all regions and
- * can erase the entire flash memory.
- */
- uniform_erase_type &= erase_type;
-
- /*
- * regions_erase_type mask will indicate all the erase types
- * supported in this configuration map.
- */
- regions_erase_type |= erase_type;
-
- offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
- region[i].size;
- }
-
- save_uniform_erase_type = map->uniform_erase_type;
- map->uniform_erase_type = spi_nor_sort_erase_mask(map,
- uniform_erase_type);
-
- if (!regions_erase_type) {
- /*
- * Roll back to the previous uniform_erase_type mask, SMPT is
- * broken.
- */
- map->uniform_erase_type = save_uniform_erase_type;
- return -EINVAL;
- }
-
- /*
- * BFPT advertises all the erase types supported by all the possible
- * map configurations. Mask out the erase types that are not supported
- * by the current map configuration.
- */
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
- if (!(regions_erase_type & BIT(erase[i].idx)))
- spi_nor_set_erase_type(&erase[i], 0, 0xFF);
-
- spi_nor_region_mark_end(&region[i - 1]);
-
- return 0;
-}
-
-/**
- * spi_nor_parse_smpt() - parse Sector Map Parameter Table
- * @nor: pointer to a 'struct spi_nor'
- * @smpt_header: sector map parameter table header
- * @params: pointer to a duplicate 'struct spi_nor_flash_parameter'
- * that is used for storing SFDP parsed data
- *
- * This table is optional, but when available, we parse it to identify the
- * location and size of sectors within the main data array of the flash memory
- * device and to identify which Erase Types are supported by each sector.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_smpt(struct spi_nor *nor,
- const struct sfdp_parameter_header *smpt_header,
- struct spi_nor_flash_parameter *params)
-{
- const u32 *sector_map;
- u32 *smpt;
- size_t len;
- u32 addr;
- int i, ret;
-
- /* Read the Sector Map Parameter Table. */
- len = smpt_header->length * sizeof(*smpt);
- smpt = kmalloc(len, GFP_KERNEL);
- if (!smpt)
- return -ENOMEM;
-
- addr = SFDP_PARAM_HEADER_PTP(smpt_header);
- ret = spi_nor_read_sfdp(nor, addr, len, smpt);
- if (ret)
- goto out;
-
- /* Fix endianness of the SMPT DWORDs. */
- for (i = 0; i < smpt_header->length; i++)
- smpt[i] = le32_to_cpu(smpt[i]);
-
- sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
- if (IS_ERR(sector_map)) {
- ret = PTR_ERR(sector_map);
- goto out;
- }
-
- ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map);
- if (ret)
- goto out;
-
- spi_nor_regions_sort_erase_types(&params->erase_map);
- /* fall through */
-out:
- kfree(smpt);
- return ret;
-}
-
-#define SFDP_4BAIT_DWORD_MAX 2
-
-struct sfdp_4bait {
- /* The hardware capability. */
- u32 hwcaps;
-
- /*
- * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
- * the associated 4-byte address op code is supported.
- */
- u32 supported_bit;
-};
-
-/**
- * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
- * @nor: pointer to a 'struct spi_nor'.
- * @param_header: pointer to the 'struct sfdp_parameter_header' describing
- * the 4-Byte Address Instruction Table length and version.
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_4bait(struct spi_nor *nor,
- const struct sfdp_parameter_header *param_header,
- struct spi_nor_flash_parameter *params)
-{
- static const struct sfdp_4bait reads[] = {
- { SNOR_HWCAPS_READ, BIT(0) },
- { SNOR_HWCAPS_READ_FAST, BIT(1) },
- { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
- { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
- { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
- { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
- { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
- { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
- { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
- };
- static const struct sfdp_4bait programs[] = {
- { SNOR_HWCAPS_PP, BIT(6) },
- { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
- { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
- };
- static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
- { 0u /* not used */, BIT(9) },
- { 0u /* not used */, BIT(10) },
- { 0u /* not used */, BIT(11) },
- { 0u /* not used */, BIT(12) },
- };
- struct spi_nor_pp_command *params_pp = params->page_programs;
- struct spi_nor_erase_map *map = &params->erase_map;
- struct spi_nor_erase_type *erase_type = map->erase_type;
- u32 *dwords;
- size_t len;
- u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
- int i, ret;
-
- if (param_header->major != SFDP_JESD216_MAJOR ||
- param_header->length < SFDP_4BAIT_DWORD_MAX)
- return -EINVAL;
-
- /* Read the 4-byte Address Instruction Table. */
- len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
-
- /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
- dwords = kmalloc(len, GFP_KERNEL);
- if (!dwords)
- return -ENOMEM;
-
- addr = SFDP_PARAM_HEADER_PTP(param_header);
- ret = spi_nor_read_sfdp(nor, addr, len, dwords);
- if (ret)
- goto out;
-
- /* Fix endianness of the 4BAIT DWORDs. */
- for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
- dwords[i] = le32_to_cpu(dwords[i]);
-
- /*
- * Compute the subset of (Fast) Read commands for which the 4-byte
- * version is supported.
- */
- discard_hwcaps = 0;
- read_hwcaps = 0;
- for (i = 0; i < ARRAY_SIZE(reads); i++) {
- const struct sfdp_4bait *read = &reads[i];
-
- discard_hwcaps |= read->hwcaps;
- if ((params->hwcaps.mask & read->hwcaps) &&
- (dwords[0] & read->supported_bit))
- read_hwcaps |= read->hwcaps;
- }
-
- /*
- * Compute the subset of Page Program commands for which the 4-byte
- * version is supported.
- */
- pp_hwcaps = 0;
- for (i = 0; i < ARRAY_SIZE(programs); i++) {
- const struct sfdp_4bait *program = &programs[i];
-
- /*
- * The 4 Byte Address Instruction (Optional) Table is the only
- * SFDP table that indicates support for Page Program Commands.
- * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
- * authority for specifying Page Program support.
- */
- discard_hwcaps |= program->hwcaps;
- if (dwords[0] & program->supported_bit)
- pp_hwcaps |= program->hwcaps;
- }
-
- /*
- * Compute the subset of Sector Erase commands for which the 4-byte
- * version is supported.
- */
- erase_mask = 0;
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- const struct sfdp_4bait *erase = &erases[i];
-
- if (dwords[0] & erase->supported_bit)
- erase_mask |= BIT(i);
- }
-
- /* Replicate the sort done for the map's erase types in BFPT. */
- erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
-
- /*
- * We need at least one 4-byte op code per read, program and erase
- * operation; the .read(), .write() and .erase() hooks share the
- * nor->addr_width value.
- */
- if (!read_hwcaps || !pp_hwcaps || !erase_mask)
- goto out;
-
- /*
- * Discard all operations from the 4-byte instruction set which are
- * not supported by this memory.
- */
- params->hwcaps.mask &= ~discard_hwcaps;
- params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
-
- /* Use the 4-byte address instruction set. */
- for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
- struct spi_nor_read_command *read_cmd = &params->reads[i];
-
- read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
- }
-
- /* 4BAIT is the only SFDP table that indicates page program support. */
- if (pp_hwcaps & SNOR_HWCAPS_PP)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
- SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
- if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
- SPINOR_OP_PP_1_1_4_4B,
- SNOR_PROTO_1_1_4);
- if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
- spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
- SPINOR_OP_PP_1_4_4_4B,
- SNOR_PROTO_1_4_4);
-
- for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
- if (erase_mask & BIT(i))
- erase_type[i].opcode = (dwords[1] >>
- erase_type[i].idx * 8) & 0xFF;
- else
- spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
- }
-
- /*
- * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
- * later because we already did the conversion to 4byte opcodes. Also,
- * this latest function implements a legacy quirk for the erase size of
- * Spansion memory. However this quirk is no longer needed with new
- * SFDP compliant memories.
- */
- nor->addr_width = 4;
- nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
-
- /* fall through */
-out:
- kfree(dwords);
- return ret;
-}
-
-/**
- * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
- * @nor: pointer to a 'struct spi_nor'
- * @params: pointer to the 'struct spi_nor_flash_parameter' to be
- * filled
- *
- * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
- * specification. This is a standard which tends to supported by almost all
- * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
- * runtime the main parameters needed to perform basic SPI flash operations such
- * as Fast Read, Page Program or Sector Erase commands.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_parse_sfdp(struct spi_nor *nor,
- struct spi_nor_flash_parameter *params)
-{
- const struct sfdp_parameter_header *param_header, *bfpt_header;
- struct sfdp_parameter_header *param_headers = NULL;
- struct sfdp_header header;
- struct device *dev = nor->dev;
- size_t psize;
- int i, err;
-
- /* Get the SFDP header. */
- err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
- if (err < 0)
- return err;
-
- /* Check the SFDP header version. */
- if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
- header.major != SFDP_JESD216_MAJOR)
- return -EINVAL;
-
- /*
- * Verify that the first and only mandatory parameter header is a
- * Basic Flash Parameter Table header as specified in JESD216.
- */
- bfpt_header = &header.bfpt_header;
- if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
- bfpt_header->major != SFDP_JESD216_MAJOR)
- return -EINVAL;
-
- /*
- * Allocate memory then read all parameter headers with a single
- * Read SFDP command. These parameter headers will actually be parsed
- * twice: a first time to get the latest revision of the basic flash
- * parameter table, then a second time to handle the supported optional
- * tables.
- * Hence we read the parameter headers once for all to reduce the
- * processing time. Also we use kmalloc() instead of devm_kmalloc()
- * because we don't need to keep these parameter headers: the allocated
- * memory is always released with kfree() before exiting this function.
- */
- if (header.nph) {
- psize = header.nph * sizeof(*param_headers);
-
- param_headers = kmalloc(psize, GFP_KERNEL);
- if (!param_headers)
- return -ENOMEM;
-
- err = spi_nor_read_sfdp(nor, sizeof(header),
- psize, param_headers);
- if (err < 0) {
- dev_dbg(dev, "failed to read SFDP parameter headers\n");
- goto exit;
- }
- }
-
- /*
- * Check other parameter headers to get the latest revision of
- * the basic flash parameter table.
- */
- for (i = 0; i < header.nph; i++) {
- param_header = &param_headers[i];
-
- if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
- param_header->major == SFDP_JESD216_MAJOR &&
- (param_header->minor > bfpt_header->minor ||
- (param_header->minor == bfpt_header->minor &&
- param_header->length > bfpt_header->length)))
- bfpt_header = param_header;
- }
-
- err = spi_nor_parse_bfpt(nor, bfpt_header, params);
- if (err)
- goto exit;
-
- /* Parse optional parameter tables. */
- for (i = 0; i < header.nph; i++) {
- param_header = &param_headers[i];
-
- switch (SFDP_PARAM_HEADER_ID(param_header)) {
- case SFDP_SECTOR_MAP_ID:
- err = spi_nor_parse_smpt(nor, param_header, params);
- break;
-
- case SFDP_4BAIT_ID:
- err = spi_nor_parse_4bait(nor, param_header, params);
- break;
-
- default:
- break;
- }
-
- if (err) {
- dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
- SFDP_PARAM_HEADER_ID(param_header));
- /*
- * Let's not drop all information we extracted so far
- * if optional table parsers fail. In case of failing,
- * each optional parser is responsible to roll back to
- * the previously known spi_nor data.
- */
- err = 0;
- }
- }
-
-exit:
- kfree(param_headers);
- return err;
-}
-
-static int spi_nor_select_read(struct spi_nor *nor,
- u32 shared_hwcaps)
-{
- int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
- const struct spi_nor_read_command *read;
-
- if (best_match < 0)
- return -EINVAL;
-
- cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
- if (cmd < 0)
- return -EINVAL;
-
- read = &nor->params.reads[cmd];
- nor->read_opcode = read->opcode;
- nor->read_proto = read->proto;
-
- /*
- * In the spi-nor framework, we don't need to make the difference
- * between mode clock cycles and wait state clock cycles.
- * Indeed, the value of the mode clock cycles is used by a QSPI
- * flash memory to know whether it should enter or leave its 0-4-4
- * (Continuous Read / XIP) mode.
- * eXecution In Place is out of the scope of the mtd sub-system.
- * Hence we choose to merge both mode and wait state clock cycles
- * into the so called dummy clock cycles.
- */
- nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
- return 0;
-}
-
-static int spi_nor_select_pp(struct spi_nor *nor,
- u32 shared_hwcaps)
-{
- int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
- const struct spi_nor_pp_command *pp;
-
- if (best_match < 0)
- return -EINVAL;
-
- cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
- if (cmd < 0)
- return -EINVAL;
-
- pp = &nor->params.page_programs[cmd];
- nor->program_opcode = pp->opcode;
- nor->write_proto = pp->proto;
- return 0;
-}
-
-/**
- * spi_nor_select_uniform_erase() - select optimum uniform erase type
- * @map: the erase map of the SPI NOR
- * @wanted_size: the erase type size to search for. Contains the value of
- * info->sector_size or of the "small sector" size in case
- * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
- *
- * Once the optimum uniform sector erase command is found, disable all the
- * other.
- *
- * Return: pointer to erase type on success, NULL otherwise.
- */
-static const struct spi_nor_erase_type *
-spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
- const u32 wanted_size)
-{
- const struct spi_nor_erase_type *tested_erase, *erase = NULL;
- int i;
- u8 uniform_erase_type = map->uniform_erase_type;
-
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- if (!(uniform_erase_type & BIT(i)))
- continue;
-
- tested_erase = &map->erase_type[i];
-
- /*
- * If the current erase size is the one, stop here:
- * we have found the right uniform Sector Erase command.
- */
- if (tested_erase->size == wanted_size) {
- erase = tested_erase;
- break;
- }
-
- /*
- * Otherwise, the current erase size is still a valid canditate.
- * Select the biggest valid candidate.
- */
- if (!erase && tested_erase->size)
- erase = tested_erase;
- /* keep iterating to find the wanted_size */
- }
-
- if (!erase)
- return NULL;
-
- /* Disable all other Sector Erase commands. */
- map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
- map->uniform_erase_type |= BIT(erase - map->erase_type);
- return erase;
-}
-
-static int spi_nor_select_erase(struct spi_nor *nor)
-{
- struct spi_nor_erase_map *map = &nor->params.erase_map;
- const struct spi_nor_erase_type *erase = NULL;
- struct mtd_info *mtd = &nor->mtd;
- u32 wanted_size = nor->info->sector_size;
- int i;
-
- /*
- * The previous implementation handling Sector Erase commands assumed
- * that the SPI flash memory has an uniform layout then used only one
- * of the supported erase sizes for all Sector Erase commands.
- * So to be backward compatible, the new implementation also tries to
- * manage the SPI flash memory as uniform with a single erase sector
- * size, when possible.
- */
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
- /* prefer "small sector" erase if possible */
- wanted_size = 4096u;
-#endif
-
- if (spi_nor_has_uniform_erase(nor)) {
- erase = spi_nor_select_uniform_erase(map, wanted_size);
- if (!erase)
- return -EINVAL;
- nor->erase_opcode = erase->opcode;
- mtd->erasesize = erase->size;
- return 0;
- }
-
- /*
- * For non-uniform SPI flash memory, set mtd->erasesize to the
- * maximum erase sector size. No need to set nor->erase_opcode.
- */
- for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
- if (map->erase_type[i].size) {
- erase = &map->erase_type[i];
- break;
- }
- }
-
- if (!erase)
- return -EINVAL;
-
- mtd->erasesize = erase->size;
- return 0;
-}
-
-static int spi_nor_default_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- u32 ignored_mask, shared_mask;
- int err;
-
- /*
- * Keep only the hardware capabilities supported by both the SPI
- * controller and the SPI flash memory.
- */
- shared_mask = hwcaps->mask & params->hwcaps.mask;
-
- if (nor->spimem) {
- /*
- * When called from spi_nor_probe(), all caps are set and we
- * need to discard some of them based on what the SPI
- * controller actually supports (using spi_mem_supports_op()).
- */
- spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
- } else {
- /*
- * SPI n-n-n protocols are not supported when the SPI
- * controller directly implements the spi_nor interface.
- * Yet another reason to switch to spi-mem.
- */
- ignored_mask = SNOR_HWCAPS_X_X_X;
- if (shared_mask & ignored_mask) {
- dev_dbg(nor->dev,
- "SPI n-n-n protocols are not supported.\n");
- shared_mask &= ~ignored_mask;
- }
- }
-
- /* Select the (Fast) Read command. */
- err = spi_nor_select_read(nor, shared_mask);
- if (err) {
- dev_dbg(nor->dev,
- "can't select read settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- /* Select the Page Program command. */
- err = spi_nor_select_pp(nor, shared_mask);
- if (err) {
- dev_dbg(nor->dev,
- "can't select write settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- /* Select the Sector Erase command. */
- err = spi_nor_select_erase(nor);
- if (err) {
- dev_dbg(nor->dev,
- "can't select erase settings supported by both the SPI controller and memory.\n");
- return err;
- }
-
- return 0;
-}
-
-static int spi_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- if (!nor->params.setup)
- return 0;
-
- return nor->params.setup(nor, hwcaps);
-}
-
-static void atmel_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void intel_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void issi_set_default_init(struct spi_nor *nor)
-{
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
-}
-
-static void macronix_set_default_init(struct spi_nor *nor)
-{
- nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
- nor->params.set_4byte = macronix_set_4byte;
-}
-
-static void sst_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
-}
-
-static void st_micron_set_default_init(struct spi_nor *nor)
-{
- nor->flags |= SNOR_F_HAS_LOCK;
- nor->flags &= ~SNOR_F_HAS_16BIT_SR;
- nor->params.quad_enable = NULL;
- nor->params.set_4byte = st_micron_set_4byte;
-}
-
-static void winbond_set_default_init(struct spi_nor *nor)
-{
- nor->params.set_4byte = winbond_set_4byte;
-}
-
-/**
- * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
- * settings based on MFR register and ->default_init() hook.
- * @nor: pointer to a 'struct spi-nor'.
- */
-static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
-{
- /* Init flash parameters based on MFR */
- switch (JEDEC_MFR(nor->info)) {
- case SNOR_MFR_ATMEL:
- atmel_set_default_init(nor);
- break;
-
- case SNOR_MFR_INTEL:
- intel_set_default_init(nor);
- break;
-
- case SNOR_MFR_ISSI:
- issi_set_default_init(nor);
- break;
-
- case SNOR_MFR_MACRONIX:
- macronix_set_default_init(nor);
- break;
-
- case SNOR_MFR_ST:
- case SNOR_MFR_MICRON:
- st_micron_set_default_init(nor);
- break;
-
- case SNOR_MFR_SST:
- sst_set_default_init(nor);
- break;
-
- case SNOR_MFR_WINBOND:
- winbond_set_default_init(nor);
- break;
-
- default:
- break;
- }
-
- if (nor->info->fixups && nor->info->fixups->default_init)
- nor->info->fixups->default_init(nor);
-}
-
-/**
- * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
- * based on JESD216 SFDP standard.
- * @nor: pointer to a 'struct spi-nor'.
- *
- * The method has a roll-back mechanism: in case the SFDP parsing fails, the
- * legacy flash parameters and settings will be restored.
- */
-static void spi_nor_sfdp_init_params(struct spi_nor *nor)
-{
- struct spi_nor_flash_parameter sfdp_params;
-
- memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
-
- if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
- nor->addr_width = 0;
- nor->flags &= ~SNOR_F_4B_OPCODES;
- } else {
- memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
- }
-}
-
-/**
- * spi_nor_info_init_params() - Initialize the flash's parameters and settings
- * based on nor->info data.
- * @nor: pointer to a 'struct spi-nor'.
- */
-static void spi_nor_info_init_params(struct spi_nor *nor)
-{
- struct spi_nor_flash_parameter *params = &nor->params;
- struct spi_nor_erase_map *map = &params->erase_map;
- const struct flash_info *info = nor->info;
- struct device_node *np = spi_nor_get_flash_node(nor);
- u8 i, erase_mask;
-
- /* Initialize legacy flash parameters and settings. */
- params->quad_enable = spi_nor_sr2_bit1_quad_enable;
- params->set_4byte = spansion_set_4byte;
- params->setup = spi_nor_default_setup;
- /* Default to 16-bit Write Status (01h) Command */
- nor->flags |= SNOR_F_HAS_16BIT_SR;
-
- /* Set SPI NOR sizes. */
- params->size = (u64)info->sector_size * info->n_sectors;
- params->page_size = info->page_size;
-
- if (!(info->flags & SPI_NOR_NO_FR)) {
- /* Default to Fast Read for DT and non-DT platform devices. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
-
- /* Mask out Fast Read if not requested at DT instantiation. */
- if (np && !of_property_read_bool(np, "m25p,fast-read"))
- params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
- }
-
- /* (Fast) Read settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_READ;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
- 0, 0, SPINOR_OP_READ,
- SNOR_PROTO_1_1_1);
-
- if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
- 0, 8, SPINOR_OP_READ_FAST,
- SNOR_PROTO_1_1_1);
-
- if (info->flags & SPI_NOR_DUAL_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
- 0, 8, SPINOR_OP_READ_1_1_2,
- SNOR_PROTO_1_1_2);
- }
-
- if (info->flags & SPI_NOR_QUAD_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
- 0, 8, SPINOR_OP_READ_1_1_4,
- SNOR_PROTO_1_1_4);
- }
-
- if (info->flags & SPI_NOR_OCTAL_READ) {
- params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
- spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
- 0, 8, SPINOR_OP_READ_1_1_8,
- SNOR_PROTO_1_1_8);
- }
-
- /* Page Program settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_PP;
- spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
- SPINOR_OP_PP, SNOR_PROTO_1_1_1);
-
- /*
- * Sector Erase settings. Sort Erase Types in ascending order, with the
- * smallest erase size starting at BIT(0).
- */
- erase_mask = 0;
- i = 0;
- if (info->flags & SECT_4K_PMC) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K_PMC);
- i++;
- } else if (info->flags & SECT_4K) {
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], 4096u,
- SPINOR_OP_BE_4K);
- i++;
- }
- erase_mask |= BIT(i);
- spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
- SPINOR_OP_SE);
- spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
-}
-
-static void spansion_post_sfdp_fixups(struct spi_nor *nor)
-{
- if (nor->params.size <= SZ_16M)
- return;
-
- nor->flags |= SNOR_F_4B_OPCODES;
- /* No small sector erase for 4-byte command set */
- nor->erase_opcode = SPINOR_OP_SE;
- nor->mtd.erasesize = nor->info->sector_size;
-}
-
-static void s3an_post_sfdp_fixups(struct spi_nor *nor)
-{
- nor->params.setup = s3an_nor_setup;
-}
-
-/**
- * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
- * after SFDP has been parsed (is also called for SPI NORs that do not
- * support RDSFDP).
- * @nor: pointer to a 'struct spi_nor'
- *
- * Typically used to tweak various parameters that could not be extracted by
- * other means (i.e. when information provided by the SFDP/flash_info tables
- * are incomplete or wrong).
- */
-static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
-{
- switch (JEDEC_MFR(nor->info)) {
- case SNOR_MFR_SPANSION:
- spansion_post_sfdp_fixups(nor);
- break;
-
- default:
- break;
- }
-
- if (nor->info->flags & SPI_S3AN)
- s3an_post_sfdp_fixups(nor);
-
- if (nor->info->fixups && nor->info->fixups->post_sfdp)
- nor->info->fixups->post_sfdp(nor);
-}
-
-/**
- * spi_nor_late_init_params() - Late initialization of default flash parameters.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Used to set default flash parameters and settings when the ->default_init()
- * hook or the SFDP parser let voids.
- */
-static void spi_nor_late_init_params(struct spi_nor *nor)
-{
- /*
- * NOR protection support. When locking_ops are not provided, we pick
- * the default ones.
- */
- if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops)
- nor->params.locking_ops = &stm_locking_ops;
-}
-
-/**
- * spi_nor_init_params() - Initialize the flash's parameters and settings.
- * @nor: pointer to a 'struct spi-nor'.
- *
- * The flash parameters and settings are initialized based on a sequence of
- * calls that are ordered by priority:
- *
- * 1/ Default flash parameters initialization. The initializations are done
- * based on nor->info data:
- * spi_nor_info_init_params()
- *
- * which can be overwritten by:
- * 2/ Manufacturer flash parameters initialization. The initializations are
- * done based on MFR register, or when the decisions can not be done solely
- * based on MFR, by using specific flash_info tweeks, ->default_init():
- * spi_nor_manufacturer_init_params()
- *
- * which can be overwritten by:
- * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
- * should be more accurate that the above.
- * spi_nor_sfdp_init_params()
- *
- * Please note that there is a ->post_bfpt() fixup hook that can overwrite
- * the flash parameters and settings immediately after parsing the Basic
- * Flash Parameter Table.
- *
- * which can be overwritten by:
- * 4/ Post SFDP flash parameters initialization. Used to tweak various
- * parameters that could not be extracted by other means (i.e. when
- * information provided by the SFDP/flash_info tables are incomplete or
- * wrong).
- * spi_nor_post_sfdp_fixups()
- *
- * 5/ Late default flash parameters initialization, used when the
- * ->default_init() hook or the SFDP parser do not set specific params.
- * spi_nor_late_init_params()
- */
-static void spi_nor_init_params(struct spi_nor *nor)
-{
- spi_nor_info_init_params(nor);
-
- spi_nor_manufacturer_init_params(nor);
-
- if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
- !(nor->info->flags & SPI_NOR_SKIP_SFDP))
- spi_nor_sfdp_init_params(nor);
-
- spi_nor_post_sfdp_fixups(nor);
-
- spi_nor_late_init_params(nor);
-}
-
-/**
- * spi_nor_quad_enable() - enable Quad I/O if needed.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spi_nor_quad_enable(struct spi_nor *nor)
-{
- if (!nor->params.quad_enable)
- return 0;
-
- if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
- spi_nor_get_protocol_width(nor->write_proto) == 4))
- return 0;
-
- return nor->params.quad_enable(nor);
-}
-
-/**
- * spi_nor_unlock_all() - Unlocks the entire flash memory array.
- * @nor: pointer to a 'struct spi_nor'.
- *
- * Some SPI NOR flashes are write protected by default after a power-on reset
- * cycle, in order to avoid inadvertent writes during power-up. Backward
- * compatibility imposes to unlock the entire flash memory array at power-up
- * by default.
- */
-static int spi_nor_unlock_all(struct spi_nor *nor)
-{
- if (nor->flags & SNOR_F_HAS_LOCK)
- return spi_nor_unlock(&nor->mtd, 0, nor->params.size);
-
- return 0;
-}
-
-static int spi_nor_init(struct spi_nor *nor)
-{
- int err;
-
- err = spi_nor_quad_enable(nor);
- if (err) {
- dev_dbg(nor->dev, "quad mode not supported\n");
- return err;
- }
-
- err = spi_nor_unlock_all(nor);
- if (err) {
- dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
- return err;
- }
-
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
- /*
- * If the RESET# pin isn't hooked up properly, or the system
- * otherwise doesn't perform a reset command in the boot
- * sequence, it's impossible to 100% protect against unexpected
- * reboots (e.g., crashes). Warn the user (or hopefully, system
- * designer) that this is bad.
- */
- WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
- "enabling reset hack; may not recover from unexpected reboots\n");
- nor->params.set_4byte(nor, true);
- }
-
- return 0;
-}
-
-/* mtd resume handler */
-static void spi_nor_resume(struct mtd_info *mtd)
-{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
- struct device *dev = nor->dev;
- int ret;
-
- /* re-initialize the nor chip */
- ret = spi_nor_init(nor);
- if (ret)
- dev_err(dev, "resume() failed\n");
-}
-
-void spi_nor_restore(struct spi_nor *nor)
-{
- /* restore the addressing mode */
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
- nor->flags & SNOR_F_BROKEN_RESET)
- nor->params.set_4byte(nor, false);
-}
-EXPORT_SYMBOL_GPL(spi_nor_restore);
-
-static const struct flash_info *spi_nor_match_id(const char *name)
-{
- const struct flash_info *id = spi_nor_ids;
-
- while (id->name) {
- if (!strcmp(name, id->name))
- return id;
- id++;
- }
- return NULL;
-}
-
-static int spi_nor_set_addr_width(struct spi_nor *nor)
-{
- if (nor->addr_width) {
- /* already configured from SFDP */
- } else if (nor->info->addr_width) {
- nor->addr_width = nor->info->addr_width;
- } else if (nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
- } else {
- nor->addr_width = 3;
- }
-
- if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
- dev_dbg(nor->dev, "address width is too large: %u\n",
- nor->addr_width);
- return -EINVAL;
- }
-
- /* Set 4byte opcodes when possible. */
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
-
- return 0;
-}
-
-static void spi_nor_debugfs_init(struct spi_nor *nor,
- const struct flash_info *info)
-{
- struct mtd_info *mtd = &nor->mtd;
-
- mtd->dbg.partname = info->name;
- mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
- info->id_len, info->id);
-}
-
-static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
- const char *name)
-{
- const struct flash_info *info = NULL;
-
- if (name)
- info = spi_nor_match_id(name);
- /* Try to auto-detect if chip name wasn't specified or not found */
- if (!info)
- info = spi_nor_read_id(nor);
- if (IS_ERR_OR_NULL(info))
- return ERR_PTR(-ENOENT);
-
- /*
- * If caller has specified name of flash model that can normally be
- * detected using JEDEC, let's verify it.
- */
- if (name && info->id_len) {
- const struct flash_info *jinfo;
-
- jinfo = spi_nor_read_id(nor);
- if (IS_ERR(jinfo)) {
- return jinfo;
- } else if (jinfo != info) {
- /*
- * JEDEC knows better, so overwrite platform ID. We
- * can't trust partitions any longer, but we'll let
- * mtd apply them anyway, since some partitions may be
- * marked read-only, and we don't want to lose that
- * information, even if it's not 100% accurate.
- */
- dev_warn(nor->dev, "found %s, expected %s\n",
- jinfo->name, info->name);
- info = jinfo;
- }
- }
-
- return info;
-}
-
-int spi_nor_scan(struct spi_nor *nor, const char *name,
- const struct spi_nor_hwcaps *hwcaps)
-{
- const struct flash_info *info;
- struct device *dev = nor->dev;
- struct mtd_info *mtd = &nor->mtd;
- struct device_node *np = spi_nor_get_flash_node(nor);
- struct spi_nor_flash_parameter *params = &nor->params;
- int ret;
- int i;
-
- ret = spi_nor_check(nor);
- if (ret)
- return ret;
-
- /* Reset SPI protocol for all commands. */
- nor->reg_proto = SNOR_PROTO_1_1_1;
- nor->read_proto = SNOR_PROTO_1_1_1;
- nor->write_proto = SNOR_PROTO_1_1_1;
-
- /*
- * We need the bounce buffer early to read/write registers when going
- * through the spi-mem layer (buffers have to be DMA-able).
- * For spi-mem drivers, we'll reallocate a new buffer if
- * nor->page_size turns out to be greater than PAGE_SIZE (which
- * shouldn't happen before long since NOR pages are usually less
- * than 1KB) after spi_nor_scan() returns.
- */
- nor->bouncebuf_size = PAGE_SIZE;
- nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
- GFP_KERNEL);
- if (!nor->bouncebuf)
- return -ENOMEM;
-
- info = spi_nor_get_flash_info(nor, name);
- if (IS_ERR(info))
- return PTR_ERR(info);
-
- nor->info = info;
-
- spi_nor_debugfs_init(nor, info);
-
- mutex_init(&nor->lock);
-
- /*
- * Make sure the XSR_RDY flag is set before calling
- * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
- * with Atmel spi-nor
- */
- if (info->flags & SPI_NOR_XSR_RDY)
- nor->flags |= SNOR_F_READY_XSR_RDY;
-
- if (info->flags & SPI_NOR_HAS_LOCK)
- nor->flags |= SNOR_F_HAS_LOCK;
-
- /* Init flash parameters based on flash_info struct and SFDP */
- spi_nor_init_params(nor);
-
- if (!mtd->name)
- mtd->name = dev_name(dev);
- mtd->priv = nor;
- mtd->type = MTD_NORFLASH;
- mtd->writesize = 1;
- mtd->flags = MTD_CAP_NORFLASH;
- mtd->size = params->size;
- mtd->_erase = spi_nor_erase;
- mtd->_read = spi_nor_read;
- mtd->_resume = spi_nor_resume;
-
- if (nor->params.locking_ops) {
- mtd->_lock = spi_nor_lock;
- mtd->_unlock = spi_nor_unlock;
- mtd->_is_locked = spi_nor_is_locked;
- }
-
- /* sst nor chips use AAI word program */
- if (info->flags & SST_WRITE)
- mtd->_write = sst_write;
- else
- mtd->_write = spi_nor_write;
-
- if (info->flags & USE_FSR)
- nor->flags |= SNOR_F_USE_FSR;
- if (info->flags & SPI_NOR_HAS_TB) {
- nor->flags |= SNOR_F_HAS_SR_TB;
- if (info->flags & SPI_NOR_TB_SR_BIT6)
- nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
- }
-
- if (info->flags & NO_CHIP_ERASE)
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
- if (info->flags & USE_CLSR)
- nor->flags |= SNOR_F_USE_CLSR;
-
- if (info->flags & SPI_NOR_NO_ERASE)
- mtd->flags |= MTD_NO_ERASE;
-
- mtd->dev.parent = dev;
- nor->page_size = params->page_size;
- mtd->writebufsize = nor->page_size;
-
- if (of_property_read_bool(np, "broken-flash-reset"))
- nor->flags |= SNOR_F_BROKEN_RESET;
-
- /*
- * Configure the SPI memory:
- * - select op codes for (Fast) Read, Page Program and Sector Erase.
- * - set the number of dummy cycles (mode cycles + wait states).
- * - set the SPI protocols for register and memory accesses.
- */
- ret = spi_nor_setup(nor, hwcaps);
- if (ret)
- return ret;
-
- if (info->flags & SPI_NOR_4B_OPCODES)
- nor->flags |= SNOR_F_4B_OPCODES;
-
- ret = spi_nor_set_addr_width(nor);
- if (ret)
- return ret;
-
- /* Send all the required SPI flash commands to initialize device */
- ret = spi_nor_init(nor);
- if (ret)
- return ret;
-
- dev_info(dev, "%s (%lld Kbytes)\n", info->name,
- (long long)mtd->size >> 10);
-
- dev_dbg(dev,
- "mtd .name = %s, .size = 0x%llx (%lldMiB), "
- ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
- mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
- mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
-
- if (mtd->numeraseregions)
- for (i = 0; i < mtd->numeraseregions; i++)
- dev_dbg(dev,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].erasesize / 1024,
- mtd->eraseregions[i].numblocks);
- return 0;
-}
-EXPORT_SYMBOL_GPL(spi_nor_scan);
-
-static int spi_nor_probe(struct spi_mem *spimem)
-{
- struct spi_device *spi = spimem->spi;
- struct flash_platform_data *data = dev_get_platdata(&spi->dev);
- struct spi_nor *nor;
- /*
- * Enable all caps by default. The core will mask them after
- * checking what's really supported using spi_mem_supports_op().
- */
- const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
- char *flash_name;
- int ret;
-
- nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
- if (!nor)
- return -ENOMEM;
-
- nor->spimem = spimem;
- nor->dev = &spi->dev;
- spi_nor_set_flash_node(nor, spi->dev.of_node);
-
- spi_mem_set_drvdata(spimem, nor);
-
- if (data && data->name)
- nor->mtd.name = data->name;
-
- if (!nor->mtd.name)
- nor->mtd.name = spi_mem_get_name(spimem);
-
- /*
- * For some (historical?) reason many platforms provide two different
- * names in flash_platform_data: "name" and "type". Quite often name is
- * set to "m25p80" and then "type" provides a real chip name.
- * If that's the case, respect "type" and ignore a "name".
- */
- if (data && data->type)
- flash_name = data->type;
- else if (!strcmp(spi->modalias, "spi-nor"))
- flash_name = NULL; /* auto-detect */
- else
- flash_name = spi->modalias;
-
- ret = spi_nor_scan(nor, flash_name, &hwcaps);
- if (ret)
- return ret;
-
- /*
- * None of the existing parts have > 512B pages, but let's play safe
- * and add this logic so that if anyone ever adds support for such
- * a NOR we don't end up with buffer overflows.
- */
- if (nor->page_size > PAGE_SIZE) {
- nor->bouncebuf_size = nor->page_size;
- devm_kfree(nor->dev, nor->bouncebuf);
- nor->bouncebuf = devm_kmalloc(nor->dev,
- nor->bouncebuf_size,
- GFP_KERNEL);
- if (!nor->bouncebuf)
- return -ENOMEM;
- }
-
- return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
- data ? data->nr_parts : 0);
-}
-
-static int spi_nor_remove(struct spi_mem *spimem)
-{
- struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
- spi_nor_restore(nor);
-
- /* Clean up MTD stuff. */
- return mtd_device_unregister(&nor->mtd);
-}
-
-static void spi_nor_shutdown(struct spi_mem *spimem)
-{
- struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-
- spi_nor_restore(nor);
-}
-
-/*
- * Do NOT add to this array without reading the following:
- *
- * Historically, many flash devices are bound to this driver by their name. But
- * since most of these flash are compatible to some extent, and their
- * differences can often be differentiated by the JEDEC read-ID command, we
- * encourage new users to add support to the spi-nor library, and simply bind
- * against a generic string here (e.g., "jedec,spi-nor").
- *
- * Many flash names are kept here in this list (as well as in spi-nor.c) to
- * keep them available as module aliases for existing platforms.
- */
-static const struct spi_device_id spi_nor_dev_ids[] = {
- /*
- * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
- * hack around the fact that the SPI core does not provide uevent
- * matching for .of_match_table
- */
- {"spi-nor"},
-
- /*
- * Entries not used in DTs that should be safe to drop after replacing
- * them with "spi-nor" in platform data.
- */
- {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
-
- /*
- * Entries that were used in DTs without "jedec,spi-nor" fallback and
- * should be kept for backward compatibility.
- */
- {"at25df321a"}, {"at25df641"}, {"at26df081a"},
- {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
- {"mx25l25635e"},{"mx66l51235l"},
- {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
- {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
- {"s25fl064k"},
- {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
- {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
- {"m25p64"}, {"m25p128"},
- {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
- {"w25q80bl"}, {"w25q128"}, {"w25q256"},
-
- /* Flashes that can't be detected using JEDEC */
- {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
- {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
- {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
-
- /* Everspin MRAMs (non-JEDEC) */
- { "mr25h128" }, /* 128 Kib, 40 MHz */
- { "mr25h256" }, /* 256 Kib, 40 MHz */
- { "mr25h10" }, /* 1 Mib, 40 MHz */
- { "mr25h40" }, /* 4 Mib, 40 MHz */
-
- { },
-};
-MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
-
-static const struct of_device_id spi_nor_of_table[] = {
- /*
- * Generic compatibility for SPI NOR that can be identified by the
- * JEDEC READ ID opcode (0x9F). Use this, if possible.
- */
- { .compatible = "jedec,spi-nor" },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, spi_nor_of_table);
-
-/*
- * REVISIT: many of these chips have deep power-down modes, which
- * should clearly be entered on suspend() to minimize power use.
- * And also when they're otherwise idle...
- */
-static struct spi_mem_driver spi_nor_driver = {
- .spidrv = {
- .driver = {
- .name = "spi-nor",
- .of_match_table = spi_nor_of_table,
- },
- .id_table = spi_nor_dev_ids,
- },
- .probe = spi_nor_probe,
- .remove = spi_nor_remove,
- .shutdown = spi_nor_shutdown,
-};
-module_spi_mem_driver(spi_nor_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
-MODULE_AUTHOR("Mike Lavender");
-MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
new file mode 100644
index 000000000000..e0af6d25d573
--- /dev/null
+++ b/drivers/mtd/spi-nor/sst.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info sst_parts[] = {
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64,
+ SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8,
+ SECT_4K | SST_WRITE) },
+ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16,
+ SECT_4K | SST_WRITE) },
+ { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+ { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ) },
+};
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t actual = 0;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ ret = spi_nor_lock_and_prep(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ nor->sst_write_second = false;
+
+ /* Start write from odd address. */
+ if (to % 2) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ ret = spi_nor_write_data(nor, to, 1, buf);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ to++;
+ actual++;
+ }
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ ret = spi_nor_write_data(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ ret = spi_nor_write_disable(nor);
+ if (ret)
+ goto out;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto out;
+
+ nor->program_opcode = SPINOR_OP_BP;
+ ret = spi_nor_write_data(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto out;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto out;
+
+ actual += 1;
+
+ ret = spi_nor_write_disable(nor);
+ }
+out:
+ *retlen += actual;
+ spi_nor_unlock_and_unprep(nor);
+ return ret;
+}
+
+static void sst_default_init(struct spi_nor *nor)
+{
+ nor->flags |= SNOR_F_HAS_LOCK;
+}
+
+static void sst_post_sfdp_fixups(struct spi_nor *nor)
+{
+ if (nor->info->flags & SST_WRITE)
+ nor->mtd._write = sst_write;
+}
+
+static const struct spi_nor_fixups sst_fixups = {
+ .default_init = sst_default_init,
+ .post_sfdp = sst_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_sst = {
+ .name = "sst",
+ .parts = sst_parts,
+ .nparts = ARRAY_SIZE(sst_parts),
+ .fixups = &sst_fixups,
+};
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
new file mode 100644
index 000000000000..17deabad57e1
--- /dev/null
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info winbond_parts[] = {
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ { "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_HAS_TB) },
+ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+ { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
+};
+
+/**
+ * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes.
+ * @nor: pointer to 'struct spi_nor'.
+ * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
+ * address mode.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
+{
+ int ret;
+
+ ret = spi_nor_set_4byte_addr_mode(nor, enable);
+ if (ret || enable)
+ return ret;
+
+ /*
+ * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
+ * Register to be set to 1, so all 3-byte-address reads come from the
+ * second 16M. We must clear the register to enable normal behavior.
+ */
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_ear(nor, 0);
+ if (ret)
+ return ret;
+
+ return spi_nor_write_disable(nor);
+}
+
+static void winbond_default_init(struct spi_nor *nor)
+{
+ nor->params->set_4byte_addr_mode = winbond_set_4byte_addr_mode;
+}
+
+static const struct spi_nor_fixups winbond_fixups = {
+ .default_init = winbond_default_init,
+};
+
+const struct spi_nor_manufacturer spi_nor_winbond = {
+ .name = "winbond",
+ .parts = winbond_parts,
+ .nparts = ARRAY_SIZE(winbond_parts),
+ .fixups = &winbond_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
new file mode 100644
index 000000000000..1138bdbf4199
--- /dev/null
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xilinx_parts[] = {
+ /* Xilinx S3AN Internal Flash */
+ { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
+ { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
+ { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
+ { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+};
+
+/*
+ * This code converts an address to the Default Address Mode, that has non
+ * power of two page sizes. We must support this mode because it is the default
+ * mode supported by Xilinx tools, it can access the whole flash area and
+ * changing over to the Power-of-two mode is irreversible and corrupts the
+ * original data.
+ * Addr can safely be unsigned int, the biggest S3AN device is smaller than
+ * 4 MiB.
+ */
+static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
+{
+ u32 offset, page;
+
+ offset = addr % nor->page_size;
+ page = addr / nor->page_size;
+ page <<= (nor->page_size > 512) ? 10 : 9;
+
+ return page | offset;
+}
+
+static int xilinx_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ int ret;
+
+ ret = spi_nor_xread_sr(nor, nor->bouncebuf);
+ if (ret)
+ return ret;
+
+ nor->erase_opcode = SPINOR_OP_XSE;
+ nor->program_opcode = SPINOR_OP_XPP;
+ nor->read_opcode = SPINOR_OP_READ;
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+
+ /*
+ * This flashes have a page size of 264 or 528 bytes (known as
+ * Default addressing mode). It can be changed to a more standard
+ * Power of two mode where the page size is 256/512. This comes
+ * with a price: there is 3% less of space, the data is corrupted
+ * and the page size cannot be changed back to default addressing
+ * mode.
+ *
+ * The current addressing mode can be read from the XRDSR register
+ * and should not be changed, because is a destructive operation.
+ */
+ if (nor->bouncebuf[0] & XSR_PAGESIZE) {
+ /* Flash in Power of 2 mode */
+ nor->page_size = (nor->page_size == 264) ? 256 : 512;
+ nor->mtd.writebufsize = nor->page_size;
+ nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
+ nor->mtd.erasesize = 8 * nor->page_size;
+ } else {
+ /* Flash in Default addressing mode */
+ nor->params->convert_addr = s3an_convert_addr;
+ nor->mtd.erasesize = nor->info->sector_size;
+ }
+
+ return 0;
+}
+
+static void xilinx_post_sfdp_fixups(struct spi_nor *nor)
+{
+ nor->params->setup = xilinx_nor_setup;
+}
+
+static const struct spi_nor_fixups xilinx_fixups = {
+ .post_sfdp = xilinx_post_sfdp_fixups,
+};
+
+const struct spi_nor_manufacturer spi_nor_xilinx = {
+ .name = "xilinx",
+ .parts = xilinx_parts,
+ .nparts = ARRAY_SIZE(xilinx_parts),
+ .fixups = &xilinx_fixups,
+};
diff --git a/drivers/mtd/spi-nor/xmc.c b/drivers/mtd/spi-nor/xmc.c
new file mode 100644
index 000000000000..2c7773b68993
--- /dev/null
+++ b/drivers/mtd/spi-nor/xmc.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ */
+
+#include <linux/mtd/spi-nor.h>
+
+#include "core.h"
+
+static const struct flash_info xmc_parts[] = {
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+};
+
+const struct spi_nor_manufacturer spi_nor_xmc = {
+ .name = "xmc",
+ .parts = xmc_parts,
+ .nparts = ARRAY_SIZE(xmc_parts),
+};
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index ea7440ac913b..ae5abe492b52 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -1059,7 +1059,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
* be a result of power cut during erasure.
*/
ai->maybe_bad_peb_count += 1;
- /* fall through */
+ fallthrough;
case UBI_IO_BAD_HDR:
/*
* If we're facing a bad VID header we have to drop *all*
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 2f93c25bbaee..12c02342149c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1342,10 +1342,10 @@ static int bytes_str_to_int(const char *str)
switch (*endp) {
case 'G':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'M':
result *= 1024;
- /* fall through */
+ fallthrough;
case 'K':
result *= 1024;
if (endp[1] == 'i' && endp[2] == 'B')
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 426820ab9afe..b486250923c5 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
return victim;
}
+static inline void return_unused_peb(struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+}
+
/**
* return_unused_pool_pebs - returns unused PEB to the free tree.
* @ubi: UBI device description object
@@ -52,8 +59,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
for (i = pool->used; i < pool->size; i++) {
e = ubi->lookuptbl[pool->pebs[i]];
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
+ return_unused_peb(ubi, e);
}
}
@@ -361,6 +367,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ if (ubi->fm_anchor) {
+ return_unused_peb(ubi, ubi->fm_anchor);
+ ubi->fm_anchor = NULL;
+ }
+
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index b5fe8f82281b..386db0598e95 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -498,6 +498,6 @@ struct ubi_fm_volhdr {
struct ubi_fm_eba {
__be32 magic;
__be32 reserved_pebs;
- __be32 pnum[0];
+ __be32 pnum[];
} __packed;
#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 837d690a8c60..5146cce5fe32 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1875,7 +1875,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
- ubi_ensure_anchor_pebs(ubi);
+ if (!ubi->ro_mode && !ubi->fm_disabled)
+ ubi_ensure_anchor_pebs(ubi);
#endif
return 0;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index affa5c6e135c..c7ac63f41918 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -480,7 +480,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->slave_mii_bus->parent = ds->dev->parent;
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
- err = of_mdiobus_register(priv->slave_mii_bus, dn);
+ err = mdiobus_register(priv->slave_mii_bus);
if (err && dn)
of_node_put(dn);
@@ -1079,6 +1079,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
const struct bcm_sf2_of_data *data;
struct b53_platform_data *pdata;
struct dsa_switch_ops *ops;
+ struct device_node *ports;
struct bcm_sf2_priv *priv;
struct b53_device *dev;
struct dsa_switch *ds;
@@ -1146,7 +1147,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
set_bit(0, priv->cfp.used);
set_bit(0, priv->cfp.unique);
- bcm_sf2_identify_ports(priv, dn->child);
+ ports = of_find_node_by_name(dn, "ports");
+ if (ports) {
+ bcm_sf2_identify_ports(priv, ports);
+ of_node_put(ports);
+ }
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ef57552db260..84391c8a0e16 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -67,58 +67,6 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
};
static int
-mt7623_trgmii_write(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- int ret;
-
- ret = regmap_write(priv->ethernet, TRGMII_BASE(reg), val);
- if (ret < 0)
- dev_err(priv->dev,
- "failed to priv write register\n");
- return ret;
-}
-
-static u32
-mt7623_trgmii_read(struct mt7530_priv *priv, u32 reg)
-{
- int ret;
- u32 val;
-
- ret = regmap_read(priv->ethernet, TRGMII_BASE(reg), &val);
- if (ret < 0) {
- dev_err(priv->dev,
- "failed to priv read register\n");
- return ret;
- }
-
- return val;
-}
-
-static void
-mt7623_trgmii_rmw(struct mt7530_priv *priv, u32 reg,
- u32 mask, u32 set)
-{
- u32 val;
-
- val = mt7623_trgmii_read(priv, reg);
- val &= ~mask;
- val |= set;
- mt7623_trgmii_write(priv, reg, val);
-}
-
-static void
-mt7623_trgmii_set(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- mt7623_trgmii_rmw(priv, reg, 0, val);
-}
-
-static void
-mt7623_trgmii_clear(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- mt7623_trgmii_rmw(priv, reg, val, 0);
-}
-
-static int
core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
{
struct mii_bus *bus = priv->bus;
@@ -530,27 +478,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
mt7530_rmw(priv, MT7530_TRGMII_RD(i),
RD_TAP_MASK, RD_TAP(16));
- else
- if (priv->id != ID_MT7621)
- mt7623_trgmii_set(priv, GSW_INTF_MODE,
- INTF_MODE_TRGMII);
-
- return 0;
-}
-
-static int
-mt7623_pad_clk_setup(struct dsa_switch *ds)
-{
- struct mt7530_priv *priv = ds->priv;
- int i;
-
- for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
- mt7623_trgmii_write(priv, GSW_TRGMII_TD_ODT(i),
- TD_DM_DRVP(8) | TD_DM_DRVN(8));
-
- mt7623_trgmii_set(priv, GSW_TRGMII_RCK_CTRL, RX_RST | RXC_DQSISEL);
- mt7623_trgmii_clear(priv, GSW_TRGMII_RCK_CTRL, RX_RST);
-
return 0;
}
@@ -1303,10 +1230,6 @@ mt7530_setup(struct dsa_switch *ds)
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
if (priv->id == ID_MT7530) {
- priv->ethernet = syscon_node_to_regmap(dn);
- if (IS_ERR(priv->ethernet))
- return PTR_ERR(priv->ethernet);
-
regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
ret = regulator_enable(priv->core_pwr);
if (ret < 0) {
@@ -1403,6 +1326,9 @@ mt7530_setup(struct dsa_switch *ds)
continue;
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
if (phy_node->parent == priv->dev->of_node->parent) {
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV)
@@ -1465,14 +1391,6 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
/* Setup TX circuit incluing relevant PAD and driving */
mt7530_pad_clk_setup(ds, state->interface);
- if (priv->id == ID_MT7530) {
- /* Setup RX circuit, relevant PAD and driving on the
- * host which must be placed after the setup on the
- * device side is all finished.
- */
- mt7623_pad_clk_setup(ds);
- }
-
priv->p6_interface = state->interface;
break;
default:
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index ef9b52f3152b..4aef6024441b 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -277,7 +277,6 @@ enum mt7530_vlan_port_attr {
/* Registers for TRGMII on the both side */
#define MT7530_TRGMII_RCK_CTRL 0x7a00
-#define GSW_TRGMII_RCK_CTRL 0x300
#define RX_RST BIT(31)
#define RXC_DQSISEL BIT(30)
#define DQSI1_TAP_MASK (0x7f << 8)
@@ -286,31 +285,24 @@ enum mt7530_vlan_port_attr {
#define DQSI0_TAP(x) ((x) & 0x7f)
#define MT7530_TRGMII_RCK_RTT 0x7a04
-#define GSW_TRGMII_RCK_RTT 0x304
#define DQS1_GATE BIT(31)
#define DQS0_GATE BIT(30)
#define MT7530_TRGMII_RD(x) (0x7a10 + (x) * 8)
-#define GSW_TRGMII_RD(x) (0x310 + (x) * 8)
#define BSLIP_EN BIT(31)
#define EDGE_CHK BIT(30)
#define RD_TAP_MASK 0x7f
#define RD_TAP(x) ((x) & 0x7f)
-#define GSW_TRGMII_TXCTRL 0x340
#define MT7530_TRGMII_TXCTRL 0x7a40
#define TRAIN_TXEN BIT(31)
#define TXC_INV BIT(30)
#define TX_RST BIT(28)
#define MT7530_TRGMII_TD_ODT(i) (0x7a54 + 8 * (i))
-#define GSW_TRGMII_TD_ODT(i) (0x354 + 8 * (i))
#define TD_DM_DRVP(x) ((x) & 0xf)
#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
-#define GSW_INTF_MODE 0x390
-#define INTF_MODE_TRGMII BIT(1)
-
#define MT7530_TRGMII_TCK_CTRL 0x7a78
#define TCK_TAP(x) (((x) & 0xf) << 8)
@@ -443,7 +435,6 @@ static const char *p5_intf_modes(unsigned int p5_interface)
* @ds: The pointer to the dsa core structure
* @bus: The bus used for the device and built-in PHY
* @rstc: The pointer to reset control used by MCM
- * @ethernet: The regmap used for access TRGMII-based registers
* @core_pwr: The power supplied into the core
* @io_pwr: The power supplied into the I/O
* @reset: The descriptor for GPIO line tied to its reset pin
@@ -460,7 +451,6 @@ struct mt7530_priv {
struct dsa_switch *ds;
struct mii_bus *bus;
struct reset_control *rstc;
- struct regmap *ethernet;
struct regulator *core_pwr;
struct regulator *io_pwr;
struct gpio_desc *reset;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index bd898f5b4da5..e74dd1f86bba 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -488,6 +488,12 @@ struct fec_enet_priv_rx_q {
struct sk_buff *rx_skbuff[RX_RING_SIZE];
};
+struct fec_stop_mode_gpr {
+ struct regmap *gpr;
+ u8 reg;
+ u8 bit;
+};
+
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
@@ -562,6 +568,7 @@ struct fec_enet_private {
int hwts_tx_en;
struct delayed_work time_keep;
struct regulator *reg_phy;
+ struct fec_stop_mode_gpr stop_gpr;
unsigned int tx_align;
unsigned int rx_align;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c1c267b61647..dc6f8763a5d4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -62,6 +62,8 @@
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
#include <linux/prefetch.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@ -84,6 +86,56 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_OPD_V 0xFFF0
#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+struct fec_devinfo {
+ u32 quirks;
+ u8 stop_gpr_reg;
+ u8 stop_gpr_bit;
+};
+
+static const struct fec_devinfo fec_imx25_info = {
+ .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx27_info = {
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx28_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx6q_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+ FEC_QUIRK_HAS_RACC,
+ .stop_gpr_reg = 0x34,
+ .stop_gpr_bit = 27,
+};
+
+static const struct fec_devinfo fec_mvf600_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+};
+
+static const struct fec_devinfo fec_imx6x_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+};
+
+static const struct fec_devinfo fec_imx6ul_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+ FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_COALESCE,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -91,39 +143,25 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
- FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx25_info,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx27_info,
}, {
.name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx28_info,
}, {
.name = "imx6q-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC,
+ .driver_data = (kernel_ulong_t)&fec_imx6q_info,
}, {
.name = "mvf600-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+ .driver_data = (kernel_ulong_t)&fec_mvf600_info,
}, {
.name = "imx6sx-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ .driver_data = (kernel_ulong_t)&fec_imx6x_info,
}, {
.name = "imx6ul-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
- FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE,
+ .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
}, {
/* sentinel */
}
@@ -1092,11 +1130,28 @@ fec_restart(struct net_device *ndev)
}
+static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
+{
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
+
+ if (stop_gpr->gpr) {
+ if (enabled)
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit),
+ BIT(stop_gpr->bit));
+ else
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit), 0);
+ } else if (pdata && pdata->sleep_mode_enable) {
+ pdata->sleep_mode_enable(enabled);
+ }
+}
+
static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
@@ -1125,9 +1180,7 @@ fec_stop(struct net_device *ndev)
val = readl(fep->hwp + FEC_ECNTRL);
val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
-
- if (pdata && pdata->sleep_mode_enable)
- pdata->sleep_mode_enable(true);
+ fec_enet_stop_mode(fep, true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -3398,6 +3451,37 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
return irq_cnt;
}
+static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
+ struct fec_devinfo *dev_info,
+ struct device_node *np)
+{
+ struct device_node *gpr_np;
+ int ret = 0;
+
+ if (!dev_info)
+ return 0;
+
+ gpr_np = of_parse_phandle(np, "gpr", 0);
+ if (!gpr_np)
+ return 0;
+
+ fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
+ if (IS_ERR(fep->stop_gpr.gpr)) {
+ dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
+ ret = PTR_ERR(fep->stop_gpr.gpr);
+ fep->stop_gpr.gpr = NULL;
+ goto out;
+ }
+
+ fep->stop_gpr.reg = dev_info->stop_gpr_reg;
+ fep->stop_gpr.bit = dev_info->stop_gpr_bit;
+
+out:
+ of_node_put(gpr_np);
+
+ return ret;
+}
+
static int
fec_probe(struct platform_device *pdev)
{
@@ -3413,6 +3497,7 @@ fec_probe(struct platform_device *pdev)
int num_rx_qs;
char irq_name[8];
int irq_cnt;
+ struct fec_devinfo *dev_info;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
@@ -3430,7 +3515,9 @@ fec_probe(struct platform_device *pdev)
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
- fep->quirks = pdev->id_entry->driver_data;
+ dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+ if (dev_info)
+ fep->quirks = dev_info->quirks;
fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs;
@@ -3464,6 +3551,10 @@ fec_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+ ret = fec_enet_init_stop_mode(fep, dev_info, np);
+ if (ret)
+ goto failed_stop_mode;
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
@@ -3632,6 +3723,7 @@ failed_clk:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
+failed_stop_mode:
failed_phy:
dev_id--;
failed_ioremap:
@@ -3709,7 +3801,6 @@ static int __maybe_unused fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
int ret;
int val;
@@ -3727,8 +3818,8 @@ static int __maybe_unused fec_resume(struct device *dev)
goto failed_clk;
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
- if (pdata && pdata->sleep_mode_enable)
- pdata->sleep_mode_enable(false);
+ fec_enet_stop_mode(fep, false);
+
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 306a4e5b2320..5b190c257124 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3508,9 +3508,9 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ err = pci_aer_clear_nonfatal_status(pdev);
if (err)
- dev_dbg(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
err);
/* non-fatal, continue */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 8d28f90acfe7..09047109d0da 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -65,6 +65,17 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+{
+ u32 val;
+
+ val = mtk_r32(eth, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(eth, val, reg);
+ return reg;
+}
+
static int mtk_mdio_busy_wait(struct mtk_eth *eth)
{
unsigned long t_start = jiffies;
@@ -193,7 +204,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
- u32 mcr_cur, mcr_new, sid;
+ u32 mcr_cur, mcr_new, sid, i;
int val, ge_mode, err;
/* MT76x8 has no hardware settings between for the MAC */
@@ -255,6 +266,17 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
PHY_INTERFACE_MODE_TRGMII)
mtk_gmac0_rgmii_adjust(mac->hw,
state->speed);
+
+ /* mt7623_pad_clk_setup */
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mtk_w32(mac->hw,
+ TD_DM_DRVP(8) | TD_DM_DRVN(8),
+ TRGMII_TD_ODT(i));
+
+ /* Assert/release MT7623 RXC reset */
+ mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
+ TRGMII_RCK_CTRL);
+ mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
}
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 85830fe14a1b..454cfcd465fd 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -352,10 +352,13 @@
#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_RST BIT(31)
#define RXC_DQSISEL BIT(30)
#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+#define NUM_TRGMII_CTRL 5
+
/* TRGMII RXC control register */
#define TRGMII_TCK_CTRL 0x10340
#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
@@ -363,6 +366,11 @@
#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+/* TRGMII TX Drive Strength */
+#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i))
+#define TD_DM_DRVP(x) ((x) & 0xf)
+#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
+
/* TRGMII Interface mode register */
#define INTF_MODE 0x10390
#define TRGMII_INTF_DIS BIT(0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index bdeb291f6b67..e94f0c4d74a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -23,7 +23,10 @@ static int mlx5_devlink_flash_update(struct devlink *devlink,
if (err)
return err;
- return mlx5_firmware_flash(dev, fw, extack);
+ err = mlx5_firmware_flash(dev, fw, extack);
+ release_firmware(fw);
+
+ return err;
}
static u8 mlx5_fw_ver_major(u32 version)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index ad3e3a65d403..16416eaac39e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -67,11 +67,9 @@ struct mlx5_ct_ft {
struct nf_flowtable *nf_ft;
struct mlx5_tc_ct_priv *ct_priv;
struct rhashtable ct_entries_ht;
- struct list_head ct_entries_list;
};
struct mlx5_ct_entry {
- struct list_head list;
u16 zone;
struct rhash_head node;
struct flow_rule *flow_rule;
@@ -617,8 +615,6 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_insert;
- list_add(&entry->list, &ft->ct_entries_list);
-
return 0;
err_insert:
@@ -646,7 +642,6 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
&entry->node,
cts_ht_params));
- list_del(&entry->list);
kfree(entry);
return 0;
@@ -818,7 +813,6 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
ft->zone = zone;
ft->nf_ft = nf_ft;
ft->ct_priv = ct_priv;
- INIT_LIST_HEAD(&ft->ct_entries_list);
refcount_set(&ft->refcount, 1);
err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
@@ -847,12 +841,12 @@ err_init:
}
static void
-mlx5_tc_ct_flush_ft(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
+mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
{
- struct mlx5_ct_entry *entry;
+ struct mlx5_tc_ct_priv *ct_priv = arg;
+ struct mlx5_ct_entry *entry = ptr;
- list_for_each_entry(entry, &ft->ct_entries_list, list)
- mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry);
+ mlx5_tc_ct_entry_del_rules(ct_priv, entry);
}
static void
@@ -863,9 +857,10 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
nf_flow_table_offload_del_cb(ft->nf_ft,
mlx5_tc_ct_block_flow_offload, ft);
- mlx5_tc_ct_flush_ft(ct_priv, ft);
rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
- rhashtable_destroy(&ft->ct_entries_ht);
+ rhashtable_free_and_destroy(&ft->ct_entries_ht,
+ mlx5_tc_ct_flush_ft_entry,
+ ct_priv);
kfree(ft);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dd7f338425eb..f02150a97ac8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5526,8 +5526,8 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
#endif
- mlx5e_devlink_port_unregister(priv);
unregister_netdev(priv->netdev);
+ mlx5e_devlink_port_unregister(priv);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2a0243e4af75..55457f268495 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -2050,29 +2050,30 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct netdev_phys_item_id ppid = {};
unsigned int dl_port_index = 0;
+ u16 pfnum;
if (!is_devlink_port_supported(dev, rpriv))
return 0;
mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
if (rep->vport == MLX5_VPORT_UPLINK) {
devlink_port_attrs_set(&rpriv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
- PCI_FUNC(dev->pdev->devfn), false, 0,
+ pfnum, false, 0,
&ppid.id[0], ppid.id_len);
dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
} else if (rep->vport == MLX5_VPORT_PF) {
devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
- dev->pdev->devfn);
+ pfnum);
dl_port_index = rep->vport;
} else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
rpriv->rep->vport)) {
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
- dev->pdev->devfn,
- rep->vport - 1);
+ pfnum, rep->vport - 1);
dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 438128dde187..a574c588269a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1343,7 +1343,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
return err;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
@@ -3558,12 +3559,13 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr,
u32 *action)
{
- int nest_level = attr->parse_attr->filter_dev->lower_level;
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
};
- int err = 0;
+ int nest_level, err = 0;
+ nest_level = attr->parse_attr->filter_dev->lower_level -
+ priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 39f42f985fbd..c1848b57f61c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -403,7 +403,6 @@ enum {
MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
- MLX5_ESW_ATTR_FLAG_HAIRPIN = BIT(3),
};
struct mlx5_esw_flow_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f171eb2234b0..b2e38e0cde97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -300,7 +300,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
bool split = !!(attr->split_count);
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
- bool hairpin = false;
int j, i = 0;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
@@ -398,21 +397,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
- if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) {
+ if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
- hairpin = true;
- } else {
+ else
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
- }
if (IS_ERR(rule))
goto err_add_rule;
else
atomic64_inc(&esw->offloads.num_flows);
- if (hairpin)
- attr->flags |= MLX5_ESW_ATTR_FLAG_HAIRPIN;
-
return rule;
err_add_rule:
@@ -501,7 +495,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- if (attr->flags & MLX5_ESW_ATTR_FLAG_HAIRPIN) {
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (attr->dests[i].termtbl)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index b25465d9e030..90048697b2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -904,6 +904,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_SNIFFER_TX:
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
+ case FS_FT_RDMA_TX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 62ce2b9417ab..d5defe09339a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -87,6 +87,15 @@
.identified_miss_table_mode), \
FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
+#define FS_CHAINING_CAPS_RDMA_TX \
+ FS_REQUIRED_CAPS( \
+ FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
+ .identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
+ .flow_table_modify))
+
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
@@ -202,6 +211,18 @@ static struct init_tree_node rdma_rx_root_fs = {
}
};
+static struct init_tree_node rdma_tx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 1,
+ .children = (struct init_tree_node[]) {
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ FS_CHAINING_CAPS_RDMA_TX,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+};
+
enum fs_i_lock_class {
FS_LOCK_GRANDPARENT,
FS_LOCK_PARENT,
@@ -2121,6 +2142,8 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_KERNEL_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ root_ns = steering->rdma_tx_root_ns;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@ -2524,6 +2547,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
+ cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
@@ -2580,6 +2604,29 @@ out_err:
return err;
}
+static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ int err;
+
+ steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
+ if (!steering->rdma_tx_root_ns)
+ return -ENOMEM;
+
+ err = init_root_tree(steering, &rdma_tx_root_fs,
+ &steering->rdma_tx_root_ns->ns.node);
+ if (err)
+ goto out_err;
+
+ set_prio_attrs(steering->rdma_tx_root_ns);
+
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->rdma_tx_root_ns);
+ steering->rdma_tx_root_ns = NULL;
+ return err;
+}
+
/* FT and tc chains are stored in the same array so we can re-use the
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
* When creating a new ns for each chain store it in the first available slot.
@@ -2890,6 +2937,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
+ err = init_rdma_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index be5f5e32c1e8..508108c58dae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -86,7 +86,8 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
- FS_FT_MAX_TYPE = FS_FT_RDMA_RX,
+ FS_FT_RDMA_TX = 0X8,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TX,
};
enum fs_flow_table_op_mod {
@@ -116,6 +117,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
+ struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
};
@@ -316,7 +318,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index fa1665caac46..f99e1752d4e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -243,7 +243,7 @@ recover_from_sw_reset:
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
break;
- cond_resched();
+ msleep(20);
} while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 2f76908cae73..51117a5a6bbf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -150,14 +150,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
u8 prio = act->vlan.prio;
u16 vid = act->vlan.vid;
- return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
- act->id, vid,
- proto, prio, extack);
+ err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+ act->id, vid,
+ proto, prio, extack);
+ if (err)
+ return err;
+ break;
}
case FLOW_ACTION_PRIORITY:
- return mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
- act->priority,
- extack);
+ err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
+ act->priority,
+ extack);
+ if (err)
+ return err;
+ break;
case FLOW_ACTION_MANGLE: {
enum flow_action_mangle_base htype = act->mangle.htype;
__be32 be_mask = (__force __be32) act->mangle.mask;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 9096ffd89e50..fbf714d027d8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -643,7 +643,7 @@ static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size,
{
int bs = fls64(burst) - 1;
- if (burst != (1 << bs)) {
+ if (burst != (BIT_ULL(bs))) {
NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 4b8a76098ca3..5acf4f46c268 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -2127,6 +2127,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
if (lif->registered)
ionic_lif_set_netdev_info(lif);
+ ionic_rx_filter_replay(lif);
+
if (netif_running(lif->netdev)) {
err = ionic_txrx_alloc(lif);
if (err)
@@ -2206,9 +2208,9 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
cancel_work_sync(&lif->deferred.work);
cancel_work_sync(&lif->tx_timeout_work);
+ ionic_rx_filters_deinit(lif);
}
- ionic_rx_filters_deinit(lif);
if (lif->netdev->features & NETIF_F_RXHASH)
ionic_lif_rss_deinit(lif);
@@ -2339,24 +2341,30 @@ static int ionic_station_set(struct ionic_lif *lif)
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
-
+ netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
+ ctx.comp.lif_getattr.mac);
if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
return 0;
- memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
- addr.sa_family = AF_INET;
- err = eth_prepare_mac_addr_change(netdev, &addr);
- if (err) {
- netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
- addr.sa_data, err);
- return 0;
- }
+ if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) {
+ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
+ addr.sa_family = AF_INET;
+ err = eth_prepare_mac_addr_change(netdev, &addr);
+ if (err) {
+ netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
+ addr.sa_data, err);
+ return 0;
+ }
- netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
- netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, false);
+ if (!is_zero_ether_addr(netdev->dev_addr)) {
+ netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
+ netdev->dev_addr);
+ ionic_lif_addr(lif, netdev->dev_addr, false);
+ }
+
+ eth_commit_mac_addr_change(netdev, &addr);
+ }
- eth_commit_mac_addr_change(netdev, &addr);
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
ionic_lif_addr(lif, netdev->dev_addr, true);
@@ -2421,9 +2429,11 @@ static int ionic_lif_init(struct ionic_lif *lif)
if (err)
goto err_out_notifyq_deinit;
- err = ionic_rx_filters_init(lif);
- if (err)
- goto err_out_notifyq_deinit;
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ err = ionic_rx_filters_init(lif);
+ if (err)
+ goto err_out_notifyq_deinit;
+ }
err = ionic_station_set(lif);
if (err)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 7a093f148ee5..f3c7dd1596ee 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -17,17 +17,49 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
devm_kfree(dev, f);
}
-int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f)
+void ionic_rx_filter_replay(struct ionic_lif *lif)
{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .filter_id = cpu_to_le32(f->filter_id),
- },
- };
-
- return ionic_adminq_post_wait(lif, &ctx);
+ struct ionic_rx_filter_add_cmd *ac;
+ struct ionic_admin_ctx ctx;
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ unsigned int i;
+ int err = 0;
+
+ ac = &ctx.cmd.rx_filter_add;
+
+ for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+ head = &lif->rx_filters.by_id[i];
+ hlist_for_each_entry_safe(f, tmp, head, by_id) {
+ ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
+ memcpy(ac, &f->cmd, sizeof(f->cmd));
+ dev_dbg(&lif->netdev->dev, "replay filter command:\n");
+ dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ &ctx.cmd, sizeof(ctx.cmd), true);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err) {
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
+ err,
+ le16_to_cpu(ac->vlan.vlan));
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
+ err, ac->mac.addr);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC_VLAN:
+ netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
+ err,
+ le16_to_cpu(ac->vlan.vlan),
+ ac->mac.addr);
+ break;
+ }
+ }
+ }
+ }
}
int ionic_rx_filters_init(struct ionic_lif *lif)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
index b6aec9c19918..cf8f4c0a961c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -24,7 +24,7 @@ struct ionic_rx_filters {
};
void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f);
-int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f);
+void ionic_rx_filter_replay(struct ionic_lif *lif);
int ionic_rx_filters_init(struct ionic_lif *lif);
void ionic_rx_filters_deinit(struct ionic_lif *lif);
int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 1a5fc2ae351c..29810a1aa210 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -369,8 +369,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
- int rc = -EINVAL;
u16 rx_mode = 0;
+ int rc;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 55cb5730beb6..bf5bf05970a2 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5441,9 +5441,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -5460,26 +5459,26 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ if (rtl_chip_supports_csum_v2(tp))
+ dev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ dev->features |= dev->hw_features;
+
+ /* There has been a number of reports that using SG/TSO results in
+ * tx timeouts. However for a lot of people SG/TSO works fine.
+ * Therefore disable both features by default, but allow users to
+ * enable them. Use at own risk!
+ */
if (rtl_chip_supports_csum_v2(tp)) {
- dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
} else {
+ dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
}
- /* RTL8168e-vl and one RTL8168c variant are known to have a
- * HW issue with TSO.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
- dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- }
-
- dev->features |= dev->hw_features;
-
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 542784300620..efc6ec1b8027 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -207,7 +207,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
reg++;
}
- while (reg <= perfect_addr_number) {
+ while (reg < perfect_addr_number) {
writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
writel(0, ioaddr + GMAC_ADDR_LOW(reg));
reg++;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index da82d7f16a09..a183250ff66a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2594,6 +2594,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(dev);
macsec = macsec_priv(dev);
+ if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
+ return -EINVAL;
+
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
if (macsec->offload == offload)
return 0;
@@ -3806,7 +3809,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
- struct macsec_tx_sa tx_sc;
+ struct macsec_tx_sc tx_sc;
struct macsec_secy secy;
int ret;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 522760c8bca6..7a4eb3f2cb74 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -464,7 +464,7 @@ static struct class mdio_bus_class = {
/**
* mdio_find_bus - Given the name of a mdiobus, find the mii_bus.
- * @mdio_bus_np: Pointer to the mii_bus.
+ * @mdio_name: The name of a mdiobus.
*
* Returns a reference to the mii_bus, or NULL if none found. The
* embedded struct device will have its reference count incremented,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2ec19e5540bf..05d20343b816 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -25,6 +25,7 @@
#include <linux/micrel_phy.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/delay.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -952,6 +953,12 @@ static int kszphy_resume(struct phy_device *phydev)
genphy_resume(phydev);
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+ * read/write access to the PHY registers.
+ */
+ usleep_range(1000, 2000);
+
ret = kszphy_config_reset(phydev);
if (ret)
return ret;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 228fe449dc6d..07476c6510f2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1678,8 +1678,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
alloc_frag->offset += buflen;
}
err = tun_xdp_act(tun, xdp_prog, &xdp, act);
- if (err < 0)
- goto err_xdp;
+ if (err < 0) {
+ if (act == XDP_REDIRECT || act == XDP_TX)
+ put_page(alloc_frag->page);
+ goto out;
+ }
+
if (err == XDP_REDIRECT)
xdp_do_flush();
if (err != XDP_PASS)
@@ -1693,8 +1697,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
-err_xdp:
- put_page(alloc_frag->page);
out:
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 8783e2ab3ec0..0ef7e1f443e3 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -54,6 +54,7 @@ static const char driver_name[] = "pegasus";
#undef PEGASUS_WRITE_EEPROM
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
+#define CARRIER_CHECK_DELAY (2 * HZ)
static bool loopback;
static bool mii_mode;
@@ -1089,17 +1090,12 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg81, 2);
}
-
-static int pegasus_count;
-static struct workqueue_struct *pegasus_workqueue;
-#define CARRIER_CHECK_DELAY (2 * HZ)
-
static void check_carrier(struct work_struct *work)
{
pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
set_carrier(pegasus->net);
if (!(pegasus->flags & PEGASUS_UNPLUG)) {
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
}
}
@@ -1120,18 +1116,6 @@ static int pegasus_blacklisted(struct usb_device *udev)
return 0;
}
-/* we rely on probe() and remove() being serialized so we
- * don't need extra locking on pegasus_count.
- */
-static void pegasus_dec_workqueue(void)
-{
- pegasus_count--;
- if (pegasus_count == 0) {
- destroy_workqueue(pegasus_workqueue);
- pegasus_workqueue = NULL;
- }
-}
-
static int pegasus_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -1144,14 +1128,6 @@ static int pegasus_probe(struct usb_interface *intf,
if (pegasus_blacklisted(dev))
return -ENODEV;
- if (pegasus_count == 0) {
- pegasus_workqueue = alloc_workqueue("pegasus", WQ_MEM_RECLAIM,
- 0);
- if (!pegasus_workqueue)
- return -ENOMEM;
- }
- pegasus_count++;
-
net = alloc_etherdev(sizeof(struct pegasus));
if (!net)
goto out;
@@ -1209,7 +1185,7 @@ static int pegasus_probe(struct usb_interface *intf,
res = register_netdev(net);
if (res)
goto out3;
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
dev_info(&intf->dev, "%s, %s, %pM\n", net->name,
usb_dev_id[dev_index].name, net->dev_addr);
@@ -1222,7 +1198,6 @@ out2:
out1:
free_netdev(net);
out:
- pegasus_dec_workqueue();
return res;
}
@@ -1237,7 +1212,7 @@ static void pegasus_disconnect(struct usb_interface *intf)
}
pegasus->flags |= PEGASUS_UNPLUG;
- cancel_delayed_work(&pegasus->carrier_check);
+ cancel_delayed_work_sync(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
@@ -1246,7 +1221,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
pegasus->rx_skb = NULL;
}
free_netdev(pegasus->net);
- pegasus_dec_workqueue();
}
static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
@@ -1254,7 +1228,7 @@ static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
struct pegasus *pegasus = usb_get_intfdata(intf);
netif_device_detach(pegasus->net);
- cancel_delayed_work(&pegasus->carrier_check);
+ cancel_delayed_work_sync(&pegasus->carrier_check);
if (netif_running(pegasus->net)) {
usb_kill_urb(pegasus->rx_urb);
usb_kill_urb(pegasus->intr_urb);
@@ -1276,7 +1250,7 @@ static int pegasus_resume(struct usb_interface *intf)
pegasus->intr_urb->actual_length = 0;
intr_callback(pegasus->intr_urb);
}
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
return 0;
}
diff --git a/drivers/net/wan/.gitignore b/drivers/net/wan/.gitignore
index dae3ea6bb18c..247bfbf10912 100644
--- a/drivers/net/wan/.gitignore
+++ b/drivers/net/wan/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
wanxlfw.inc
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index f66c0f8f6f4a..ecb3fccca603 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -740,9 +740,6 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery);
static
int i2400m_bm_buf_alloc(struct i2400m *i2400m)
{
- int result;
-
- result = -ENOMEM;
i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
if (i2400m->bm_cmd_buf == NULL)
goto error_bm_cmd_kzalloc;
@@ -754,7 +751,7 @@ int i2400m_bm_buf_alloc(struct i2400m *i2400m)
error_bm_ack_buf_kzalloc:
kfree(i2400m->bm_cmd_buf);
error_bm_cmd_kzalloc:
- return result;
+ return -ENOMEM;
}
@@ -843,7 +840,7 @@ EXPORT_SYMBOL_GPL(i2400m_reset);
*/
int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
{
- int result = -ENODEV;
+ int result;
struct device *dev = i2400m_dev(i2400m);
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index dcf234680535..edae52384b8a 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2674,8 +2674,8 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
ret = pci_enable_pcie_error_reporting(pdev);
if (ret != 0)
dev_warn(&pdev->dev, "PCIe AER capability disabled\n");
- else /* Cleanup uncorrectable error status before getting to init */
- pci_cleanup_aer_uncorrect_error_status(pdev);
+ else /* Cleanup nonfatal error status before getting to init */
+ pci_aer_clear_nonfatal_status(pdev);
/* First enable the PCI device */
ret = pcim_enable_device(pdev);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 86603d9b0cef..76dbb55625ac 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -142,14 +142,6 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static const struct blk_mq_ops nvme_rdma_mq_ops;
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
-/* XXX: really should move to a generic header sooner or later.. */
-static inline void put_unaligned_le24(u32 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
-}
-
static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
{
return queue - queue->ctrl->queues;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 9e1b8c61f54e..c90c06839d64 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -146,12 +146,6 @@ static int num_pages(int len)
return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
}
-/* XXX: really should move to a generic header sooner or later.. */
-static inline u32 get_unaligned_le24(const u8 *p)
-{
- return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
-}
-
static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
{
return nvme_is_write(rsp->req.cmd) &&
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 35efab1ba8d9..d7b7f6d688e7 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -55,6 +55,18 @@ config NVMEM_IMX_OCOTP_SCU
This is a driver for the SCU On-Chip OTP Controller (OCOTP)
available on i.MX8 SoCs.
+config JZ4780_EFUSE
+ tristate "JZ4780 EFUSE Memory Support"
+ depends on MACH_INGENIC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ select REGMAP_MMIO
+ help
+ Say Y here to include support for JZ4780 efuse memory found on
+ all JZ4780 SoC based devices.
+ To compile this driver as a module, choose M here: the module
+ will be called nvmem_jz4780_efuse.
+
config NVMEM_LPC18XX_EEPROM
tristate "NXP LPC18XX EEPROM Memory Support"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 6b466cd1427b..a7c377218341 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -6,9 +6,6 @@
obj-$(CONFIG_NVMEM) += nvmem_core.o
nvmem_core-y := core.o
-obj-$(CONFIG_NVMEM_SYSFS) += nvmem_sysfs.o
-nvmem_sysfs-y := nvmem-sysfs.o
-
# Devices
obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
nvmem-bcm-ocotp-y := bcm-ocotp.o
@@ -18,6 +15,8 @@ obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
nvmem-imx-ocotp-y := imx-ocotp.o
obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o
nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
+obj-$(CONFIG_JZ4780_EFUSE) += nvmem_jz4780_efuse.o
+nvmem_jz4780_efuse-y := jz4780-efuse.o
obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index ef326f243f36..05c6ae4b0b97 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -18,7 +18,31 @@
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/slab.h>
-#include "nvmem.h"
+
+struct nvmem_device {
+ struct module *owner;
+ struct device dev;
+ int stride;
+ int word_size;
+ int id;
+ struct kref refcnt;
+ size_t size;
+ bool read_only;
+ bool root_only;
+ int flags;
+ enum nvmem_type type;
+ struct bin_attribute eeprom;
+ struct device *base_dev;
+ struct list_head cells;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ struct gpio_desc *wp_gpio;
+ void *priv;
+};
+
+#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+
+#define FLAG_COMPAT BIT(0)
struct nvmem_cell {
const char *name;
@@ -42,6 +66,250 @@ static LIST_HEAD(nvmem_lookup_list);
static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
+#ifdef CONFIG_NVMEM_SYSFS
+static const char * const nvmem_type_str[] = {
+ [NVMEM_TYPE_UNKNOWN] = "Unknown",
+ [NVMEM_TYPE_EEPROM] = "EEPROM",
+ [NVMEM_TYPE_OTP] = "OTP",
+ [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key eeprom_lock_key;
+#endif
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *nvmem_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+
+static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int rc;
+
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = container_of(kobj, struct device, kobj);
+ nvmem = to_nvmem_device(dev);
+
+ /* Stop the user from reading */
+ if (pos >= nvmem->size)
+ return 0;
+
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ if (!nvmem->reg_read)
+ return -EPERM;
+
+ rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev;
+ struct nvmem_device *nvmem;
+ int rc;
+
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = container_of(kobj, struct device, kobj);
+ nvmem = to_nvmem_device(dev);
+
+ /* Stop the user from writing */
+ if (pos >= nvmem->size)
+ return -EFBIG;
+
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
+ if (pos + count > nvmem->size)
+ count = nvmem->size - pos;
+
+ count = round_down(count, nvmem->word_size);
+
+ if (!nvmem->reg_write)
+ return -EPERM;
+
+ rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
+
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+ umode_t mode = 0400;
+
+ if (!nvmem->root_only)
+ mode |= 0044;
+
+ if (!nvmem->read_only)
+ mode |= 0200;
+
+ if (!nvmem->reg_write)
+ mode &= ~0200;
+
+ if (!nvmem->reg_read)
+ mode &= ~0444;
+
+ return mode;
+}
+
+/* default read/write permissions */
+static struct bin_attribute bin_attr_rw_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0644,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_attributes[] = {
+ &bin_attr_rw_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_group = {
+ .bin_attrs = nvmem_bin_attributes,
+ .attrs = nvmem_attrs,
+ .is_bin_visible = nvmem_bin_attr_is_visible,
+};
+
+static const struct attribute_group *nvmem_dev_groups[] = {
+ &nvmem_bin_group,
+ NULL,
+};
+
+/* read only permission */
+static struct bin_attribute bin_attr_ro_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0444,
+ },
+ .read = bin_attr_nvmem_read,
+};
+
+/* default read/write permissions, root only */
+static struct bin_attribute bin_attr_rw_root_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0600,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+/* read only permission, root only */
+static struct bin_attribute bin_attr_ro_root_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = 0400,
+ },
+ .read = bin_attr_nvmem_read,
+};
+
+/*
+ * nvmem_setup_compat() - Create an additional binary entry in
+ * drivers sys directory, to be backwards compatible with the older
+ * drivers/misc/eeprom drivers.
+ */
+static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ int rval;
+
+ if (!config->compat)
+ return 0;
+
+ if (!config->base_dev)
+ return -EINVAL;
+
+ if (nvmem->read_only) {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_ro_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_ro_nvmem;
+ } else {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_rw_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_rw_nvmem;
+ }
+ nvmem->eeprom.attr.name = "eeprom";
+ nvmem->eeprom.size = nvmem->size;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ nvmem->eeprom.attr.key = &eeprom_lock_key;
+#endif
+ nvmem->eeprom.private = &nvmem->dev;
+ nvmem->base_dev = config->base_dev;
+
+ rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
+ if (rval) {
+ dev_err(&nvmem->dev,
+ "Failed to create eeprom binary file %d\n", rval);
+ return rval;
+ }
+
+ nvmem->flags |= FLAG_COMPAT;
+
+ return 0;
+}
+
+static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ if (config->compat)
+ device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+}
+
+#else /* CONFIG_NVMEM_SYSFS */
+
+static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ return -ENOSYS;
+}
+static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+}
+
+#endif /* CONFIG_NVMEM_SYSFS */
static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
@@ -72,6 +340,7 @@ static void nvmem_release(struct device *dev)
struct nvmem_device *nvmem = to_nvmem_device(dev);
ida_simple_remove(&nvmem_ida, nvmem->id);
+ gpiod_put(nvmem->wp_gpio);
kfree(nvmem);
}
@@ -338,6 +607,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (!config->dev)
return ERR_PTR(-EINVAL);
+ if (!config->reg_read && !config->reg_write)
+ return ERR_PTR(-EINVAL);
+
nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
if (!nvmem)
return ERR_PTR(-ENOMEM);
@@ -347,14 +619,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
kfree(nvmem);
return ERR_PTR(rval);
}
+
if (config->wp_gpio)
nvmem->wp_gpio = config->wp_gpio;
else
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
- if (IS_ERR(nvmem->wp_gpio))
- return ERR_CAST(nvmem->wp_gpio);
-
+ if (IS_ERR(nvmem->wp_gpio)) {
+ ida_simple_remove(&nvmem_ida, nvmem->id);
+ rval = PTR_ERR(nvmem->wp_gpio);
+ kfree(nvmem);
+ return ERR_PTR(rval);
+ }
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
@@ -369,6 +645,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->dev.type = &nvmem_provider_type;
nvmem->dev.bus = &nvmem_bus_type;
nvmem->dev.parent = config->dev;
+ nvmem->root_only = config->root_only;
nvmem->priv = config->priv;
nvmem->type = config->type;
nvmem->reg_read = config->reg_read;
@@ -387,13 +664,13 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
- nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
-
- device_initialize(&nvmem->dev);
+#ifdef CONFIG_NVMEM_SYSFS
+ nvmem->dev.groups = nvmem_dev_groups;
+#endif
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
- rval = device_add(&nvmem->dev);
+ rval = device_register(&nvmem->dev);
if (rval)
goto err_put_device;
@@ -447,8 +724,7 @@ static void nvmem_device_release(struct kref *kref)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
nvmem_device_remove_all_cells(nvmem);
- device_del(&nvmem->dev);
- put_device(&nvmem->dev);
+ device_unregister(&nvmem->dev);
}
/**
@@ -1088,16 +1364,8 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
}
EXPORT_SYMBOL_GPL(nvmem_cell_write);
-/**
- * nvmem_cell_read_u16() - Read a cell value as an u16
- *
- * @dev: Device that requests the nvmem cell.
- * @cell_id: Name of nvmem cell to read.
- * @val: pointer to output value.
- *
- * Return: 0 on success or negative errno.
- */
-int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
+static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
+ void *val, size_t count)
{
struct nvmem_cell *cell;
void *buf;
@@ -1112,17 +1380,31 @@ int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
nvmem_cell_put(cell);
return PTR_ERR(buf);
}
- if (len != sizeof(*val)) {
+ if (len != count) {
kfree(buf);
nvmem_cell_put(cell);
return -EINVAL;
}
- memcpy(val, buf, sizeof(*val));
+ memcpy(val, buf, count);
kfree(buf);
nvmem_cell_put(cell);
return 0;
}
+
+/**
+ * nvmem_cell_read_u16() - Read a cell value as an u16
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
/**
@@ -1136,33 +1418,26 @@ EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
*/
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
{
- struct nvmem_cell *cell;
- void *buf;
- size_t len;
-
- cell = nvmem_cell_get(dev, cell_id);
- if (IS_ERR(cell))
- return PTR_ERR(cell);
-
- buf = nvmem_cell_read(cell, &len);
- if (IS_ERR(buf)) {
- nvmem_cell_put(cell);
- return PTR_ERR(buf);
- }
- if (len != sizeof(*val)) {
- kfree(buf);
- nvmem_cell_put(cell);
- return -EINVAL;
- }
- memcpy(val, buf, sizeof(*val));
-
- kfree(buf);
- nvmem_cell_put(cell);
- return 0;
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
}
EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
/**
+ * nvmem_cell_read_u64() - Read a cell value as an u64
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
+
+/**
* nvmem_device_cell_read() - Read a given nvmem device and cell
*
* @nvmem: nvmem device to read from.
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 4ba9cc8f76df..50bea2aadc1b 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -44,6 +44,11 @@
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
+#define IMX_OCOTP_BM_CTRL_ADDR_8MP 0x000001FF
+#define IMX_OCOTP_BM_CTRL_BUSY_8MP 0x00000200
+#define IMX_OCOTP_BM_CTRL_ERROR_8MP 0x00000400
+#define IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP 0x00000800
+
#define IMX_OCOTP_BM_CTRL_DEFAULT \
{ \
.bm_addr = IMX_OCOTP_BM_CTRL_ADDR, \
@@ -52,6 +57,14 @@
.bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS,\
}
+#define IMX_OCOTP_BM_CTRL_8MP \
+ { \
+ .bm_addr = IMX_OCOTP_BM_CTRL_ADDR_8MP, \
+ .bm_busy = IMX_OCOTP_BM_CTRL_BUSY_8MP, \
+ .bm_error = IMX_OCOTP_BM_CTRL_ERROR_8MP, \
+ .bm_rel_shadows = IMX_OCOTP_BM_CTRL_REL_SHADOWS_8MP,\
+ }
+
#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */
#define TIMING_STROBE_READ_NS 37 /* Min time before read */
#define TIMING_RELAX_NS 17
@@ -193,9 +206,9 @@ read_end:
static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
{
- unsigned long clk_rate = 0;
+ unsigned long clk_rate;
unsigned long strobe_read, relax, strobe_prog;
- u32 timing = 0;
+ u32 timing;
/* 47.3.1.3.1
* Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
@@ -245,9 +258,9 @@ static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv)
{
- unsigned long clk_rate = 0;
+ unsigned long clk_rate;
u64 fsource, strobe_prog;
- u32 timing = 0;
+ u32 timing;
/* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1
* 6.4.3.3
@@ -520,6 +533,13 @@ static const struct ocotp_params imx8mn_params = {
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
+static const struct ocotp_params imx8mp_params = {
+ .nregs = 384,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+ .ctrl = IMX_OCOTP_BM_CTRL_8MP,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
@@ -532,6 +552,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
{ .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params },
{ .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params },
+ { .compatible = "fsl,imx8mp-ocotp", .data = &imx8mp_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/jz4780-efuse.c b/drivers/nvmem/jz4780-efuse.c
new file mode 100644
index 000000000000..512e1872ba36
--- /dev/null
+++ b/drivers/nvmem/jz4780-efuse.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * JZ4780 EFUSE Memory Support driver
+ *
+ * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+ * Copyright (c) 2020 H. Nikolaus Schaller <hns@goldelico.com>
+ */
+
+/*
+ * Currently supports JZ4780 efuse which has 8K programmable bit.
+ * Efuse is separated into seven segments as below:
+ *
+ * -----------------------------------------------------------------------
+ * | 64 bit | 128 bit | 128 bit | 3520 bit | 8 bit | 2296 bit | 2048 bit |
+ * -----------------------------------------------------------------------
+ *
+ * The rom itself is accessed using a 9 bit address line and an 8 word wide bus
+ * which reads/writes based on strobes. The strobe is configured in the config
+ * register and is based on number of cycles of the bus clock.
+ *
+ * Driver supports read only as the writes are done in the Factory.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/timer.h>
+
+#define JZ_EFUCTRL (0x0) /* Control Register */
+#define JZ_EFUCFG (0x4) /* Configure Register*/
+#define JZ_EFUSTATE (0x8) /* Status Register */
+#define JZ_EFUDATA(n) (0xC + (n) * 4)
+
+/* We read 32 byte chunks to avoid complexity in the driver. */
+#define JZ_EFU_READ_SIZE 32
+
+#define EFUCTRL_ADDR_MASK 0x3FF
+#define EFUCTRL_ADDR_SHIFT 21
+#define EFUCTRL_LEN_MASK 0x1F
+#define EFUCTRL_LEN_SHIFT 16
+#define EFUCTRL_PG_EN BIT(15)
+#define EFUCTRL_WR_EN BIT(1)
+#define EFUCTRL_RD_EN BIT(0)
+
+#define EFUCFG_INT_EN BIT(31)
+#define EFUCFG_RD_ADJ_MASK 0xF
+#define EFUCFG_RD_ADJ_SHIFT 20
+#define EFUCFG_RD_STR_MASK 0xF
+#define EFUCFG_RD_STR_SHIFT 16
+#define EFUCFG_WR_ADJ_MASK 0xF
+#define EFUCFG_WR_ADJ_SHIFT 12
+#define EFUCFG_WR_STR_MASK 0xFFF
+#define EFUCFG_WR_STR_SHIFT 0
+
+#define EFUSTATE_WR_DONE BIT(1)
+#define EFUSTATE_RD_DONE BIT(0)
+
+struct jz4780_efuse {
+ struct device *dev;
+ struct regmap *map;
+ struct clk *clk;
+};
+
+/* main entry point */
+static int jz4780_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct jz4780_efuse *efuse = context;
+
+ while (bytes > 0) {
+ size_t start = offset & ~(JZ_EFU_READ_SIZE - 1);
+ size_t chunk = min(bytes, (start + JZ_EFU_READ_SIZE)
+ - offset);
+ char buf[JZ_EFU_READ_SIZE];
+ unsigned int tmp;
+ u32 ctrl;
+ int ret;
+
+ ctrl = (start << EFUCTRL_ADDR_SHIFT)
+ | ((JZ_EFU_READ_SIZE - 1) << EFUCTRL_LEN_SHIFT)
+ | EFUCTRL_RD_EN;
+
+ regmap_update_bits(efuse->map, JZ_EFUCTRL,
+ (EFUCTRL_ADDR_MASK << EFUCTRL_ADDR_SHIFT) |
+ (EFUCTRL_LEN_MASK << EFUCTRL_LEN_SHIFT) |
+ EFUCTRL_PG_EN | EFUCTRL_WR_EN |
+ EFUCTRL_RD_EN,
+ ctrl);
+
+ ret = regmap_read_poll_timeout(efuse->map, JZ_EFUSTATE,
+ tmp, tmp & EFUSTATE_RD_DONE,
+ 1 * MSEC_PER_SEC,
+ 50 * MSEC_PER_SEC);
+ if (ret < 0) {
+ dev_err(efuse->dev, "Time out while reading efuse data");
+ return ret;
+ }
+
+ ret = regmap_bulk_read(efuse->map, JZ_EFUDATA(0),
+ buf, JZ_EFU_READ_SIZE / sizeof(u32));
+ if (ret < 0)
+ return ret;
+
+ memcpy(val, &buf[offset - start], chunk);
+
+ val += chunk;
+ offset += chunk;
+ bytes -= chunk;
+ }
+
+ return 0;
+}
+
+static struct nvmem_config jz4780_efuse_nvmem_config = {
+ .name = "jz4780-efuse",
+ .size = 1024,
+ .word_size = 1,
+ .stride = 1,
+ .owner = THIS_MODULE,
+ .reg_read = jz4780_efuse_read,
+};
+
+static const struct regmap_config jz4780_efuse_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = JZ_EFUDATA(7),
+};
+
+static void clk_disable_unprepare_helper(void *clock)
+{
+ clk_disable_unprepare(clock);
+}
+
+static int jz4780_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ struct jz4780_efuse *efuse;
+ struct nvmem_config cfg;
+ unsigned long clk_rate;
+ unsigned long rd_adj;
+ unsigned long rd_strobe;
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ int ret;
+
+ efuse = devm_kzalloc(dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ efuse->map = devm_regmap_init_mmio(dev, regs,
+ &jz4780_efuse_regmap_config);
+ if (IS_ERR(efuse->map))
+ return PTR_ERR(efuse->map);
+
+ efuse->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(efuse->clk))
+ return PTR_ERR(efuse->clk);
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ clk_disable_unprepare_helper,
+ efuse->clk);
+ if (ret < 0)
+ return ret;
+
+ clk_rate = clk_get_rate(efuse->clk);
+
+ efuse->dev = dev;
+
+ /*
+ * rd_adj and rd_strobe are 4 bit values
+ * conditions:
+ * bus clk_period * (rd_adj + 1) > 6.5ns
+ * bus clk_period * (rd_adj + 5 + rd_strobe) > 35ns
+ * i.e. rd_adj >= 6.5ns / clk_period
+ * i.e. rd_strobe >= 35 ns / clk_period - 5 - rd_adj + 1
+ * constants:
+ * 1 / 6.5ns == 153846154 Hz
+ * 1 / 35ns == 28571429 Hz
+ */
+
+ rd_adj = clk_rate / 153846154;
+ rd_strobe = clk_rate / 28571429 - 5 - rd_adj + 1;
+
+ if (rd_adj > EFUCFG_RD_ADJ_MASK ||
+ rd_strobe > EFUCFG_RD_STR_MASK) {
+ dev_err(&pdev->dev, "Cannot set clock configuration\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(efuse->map, JZ_EFUCFG,
+ (EFUCFG_RD_ADJ_MASK << EFUCFG_RD_ADJ_SHIFT) |
+ (EFUCFG_RD_STR_MASK << EFUCFG_RD_STR_SHIFT),
+ (rd_adj << EFUCFG_RD_ADJ_SHIFT) |
+ (rd_strobe << EFUCFG_RD_STR_SHIFT));
+
+ cfg = jz4780_efuse_nvmem_config;
+ cfg.dev = &pdev->dev;
+ cfg.priv = efuse;
+
+ nvmem = devm_nvmem_register(dev, &cfg);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ return 0;
+}
+
+static const struct of_device_id jz4780_efuse_match[] = {
+ { .compatible = "ingenic,jz4780-efuse" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, jz4780_efuse_match);
+
+static struct platform_driver jz4780_efuse_driver = {
+ .probe = jz4780_efuse_probe,
+ .driver = {
+ .name = "jz4780-efuse",
+ .of_match_table = jz4780_efuse_match,
+ },
+};
+module_platform_driver(jz4780_efuse_driver);
+
+MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>");
+MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ4780 efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 8e4898dec002..588ab56d75b7 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -130,6 +130,11 @@ static const struct of_device_id mxs_ocotp_match[] = {
};
MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
+static void mxs_ocotp_action(void *data)
+{
+ clk_unprepare(data);
+}
+
static int mxs_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -160,39 +165,26 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
return ret;
}
+ ret = devm_add_action_or_reset(&pdev->dev, mxs_ocotp_action, otp->clk);
+ if (ret)
+ return ret;
+
data = match->data;
ocotp_config.size = data->size;
ocotp_config.priv = otp;
ocotp_config.dev = dev;
otp->nvmem = devm_nvmem_register(dev, &ocotp_config);
- if (IS_ERR(otp->nvmem)) {
- ret = PTR_ERR(otp->nvmem);
- goto err_clk;
- }
+ if (IS_ERR(otp->nvmem))
+ return PTR_ERR(otp->nvmem);
platform_set_drvdata(pdev, otp);
return 0;
-
-err_clk:
- clk_unprepare(otp->clk);
-
- return ret;
-}
-
-static int mxs_ocotp_remove(struct platform_device *pdev)
-{
- struct mxs_ocotp *otp = platform_get_drvdata(pdev);
-
- clk_unprepare(otp->clk);
-
- return 0;
}
static struct platform_driver mxs_ocotp_driver = {
.probe = mxs_ocotp_probe,
- .remove = mxs_ocotp_remove,
.driver = {
.name = "mxs-ocotp",
.of_match_table = mxs_ocotp_match,
diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
deleted file mode 100644
index 9e0c429cd08a..000000000000
--- a/drivers/nvmem/nvmem-sysfs.c
+++ /dev/null
@@ -1,263 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019, Linaro Limited
- */
-#include "nvmem.h"
-
-static const char * const nvmem_type_str[] = {
- [NVMEM_TYPE_UNKNOWN] = "Unknown",
- [NVMEM_TYPE_EEPROM] = "EEPROM",
- [NVMEM_TYPE_OTP] = "OTP",
- [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
-};
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key eeprom_lock_key;
-#endif
-
-static ssize_t type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvmem_device *nvmem = to_nvmem_device(dev);
-
- return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
-}
-
-static DEVICE_ATTR_RO(type);
-
-static struct attribute *nvmem_attrs[] = {
- &dev_attr_type.attr,
- NULL,
-};
-
-static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t pos, size_t count)
-{
- struct device *dev;
- struct nvmem_device *nvmem;
- int rc;
-
- if (attr->private)
- dev = attr->private;
- else
- dev = container_of(kobj, struct device, kobj);
- nvmem = to_nvmem_device(dev);
-
- /* Stop the user from reading */
- if (pos >= nvmem->size)
- return 0;
-
- if (count < nvmem->word_size)
- return -EINVAL;
-
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
- count = round_down(count, nvmem->word_size);
-
- rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
-
- if (rc)
- return rc;
-
- return count;
-}
-
-static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t pos, size_t count)
-{
- struct device *dev;
- struct nvmem_device *nvmem;
- int rc;
-
- if (attr->private)
- dev = attr->private;
- else
- dev = container_of(kobj, struct device, kobj);
- nvmem = to_nvmem_device(dev);
-
- /* Stop the user from writing */
- if (pos >= nvmem->size)
- return -EFBIG;
-
- if (count < nvmem->word_size)
- return -EINVAL;
-
- if (pos + count > nvmem->size)
- count = nvmem->size - pos;
-
- count = round_down(count, nvmem->word_size);
-
- rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
-
- if (rc)
- return rc;
-
- return count;
-}
-
-/* default read/write permissions */
-static struct bin_attribute bin_attr_rw_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0644,
- },
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
-};
-
-static struct bin_attribute *nvmem_bin_rw_attributes[] = {
- &bin_attr_rw_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_rw_group = {
- .bin_attrs = nvmem_bin_rw_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_rw_dev_groups[] = {
- &nvmem_bin_rw_group,
- NULL,
-};
-
-/* read only permission */
-static struct bin_attribute bin_attr_ro_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0444,
- },
- .read = bin_attr_nvmem_read,
-};
-
-static struct bin_attribute *nvmem_bin_ro_attributes[] = {
- &bin_attr_ro_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_ro_group = {
- .bin_attrs = nvmem_bin_ro_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_ro_dev_groups[] = {
- &nvmem_bin_ro_group,
- NULL,
-};
-
-/* default read/write permissions, root only */
-static struct bin_attribute bin_attr_rw_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0600,
- },
- .read = bin_attr_nvmem_read,
- .write = bin_attr_nvmem_write,
-};
-
-static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
- &bin_attr_rw_root_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_rw_root_group = {
- .bin_attrs = nvmem_bin_rw_root_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
- &nvmem_bin_rw_root_group,
- NULL,
-};
-
-/* read only permission, root only */
-static struct bin_attribute bin_attr_ro_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0400,
- },
- .read = bin_attr_nvmem_read,
-};
-
-static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
- &bin_attr_ro_root_nvmem,
- NULL,
-};
-
-static const struct attribute_group nvmem_bin_ro_root_group = {
- .bin_attrs = nvmem_bin_ro_root_attributes,
- .attrs = nvmem_attrs,
-};
-
-static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
- &nvmem_bin_ro_root_group,
- NULL,
-};
-
-const struct attribute_group **nvmem_sysfs_get_groups(
- struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- if (config->root_only)
- return nvmem->read_only ?
- nvmem_ro_root_dev_groups :
- nvmem_rw_root_dev_groups;
-
- return nvmem->read_only ? nvmem_ro_dev_groups : nvmem_rw_dev_groups;
-}
-
-/*
- * nvmem_setup_compat() - Create an additional binary entry in
- * drivers sys directory, to be backwards compatible with the older
- * drivers/misc/eeprom drivers.
- */
-int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- int rval;
-
- if (!config->compat)
- return 0;
-
- if (!config->base_dev)
- return -EINVAL;
-
- if (nvmem->read_only) {
- if (config->root_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_ro_nvmem;
- } else {
- if (config->root_only)
- nvmem->eeprom = bin_attr_rw_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_nvmem;
- }
- nvmem->eeprom.attr.name = "eeprom";
- nvmem->eeprom.size = nvmem->size;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- nvmem->eeprom.attr.key = &eeprom_lock_key;
-#endif
- nvmem->eeprom.private = &nvmem->dev;
- nvmem->base_dev = config->base_dev;
-
- rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
- if (rval) {
- dev_err(&nvmem->dev,
- "Failed to create eeprom binary file %d\n", rval);
- return rval;
- }
-
- nvmem->flags |= FLAG_COMPAT;
-
- return 0;
-}
-
-void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- if (config->compat)
- device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
-}
diff --git a/drivers/nvmem/nvmem.h b/drivers/nvmem/nvmem.h
deleted file mode 100644
index be0d66d75c8a..000000000000
--- a/drivers/nvmem/nvmem.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _DRIVERS_NVMEM_H
-#define _DRIVERS_NVMEM_H
-
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/nvmem-consumer.h>
-#include <linux/nvmem-provider.h>
-#include <linux/gpio/consumer.h>
-
-struct nvmem_device {
- struct module *owner;
- struct device dev;
- int stride;
- int word_size;
- int id;
- struct kref refcnt;
- size_t size;
- bool read_only;
- int flags;
- enum nvmem_type type;
- struct bin_attribute eeprom;
- struct device *base_dev;
- struct list_head cells;
- nvmem_reg_read_t reg_read;
- nvmem_reg_write_t reg_write;
- struct gpio_desc *wp_gpio;
- void *priv;
-};
-
-#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
-#define FLAG_COMPAT BIT(0)
-
-#ifdef CONFIG_NVMEM_SYSFS
-const struct attribute_group **nvmem_sysfs_get_groups(
- struct nvmem_device *nvmem,
- const struct nvmem_config *config);
-int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config);
-void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config);
-#else
-static inline const struct attribute_group **nvmem_sysfs_get_groups(
- struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- return NULL;
-}
-
-static inline int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
- return -ENOSYS;
-}
-static inline void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
- const struct nvmem_config *config)
-{
-}
-#endif /* CONFIG_NVMEM_SYSFS */
-
-#endif /* _DRIVERS_NVMEM_H */
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
index 2f1e0fbd1901..925feb21d5ad 100644
--- a/drivers/nvmem/sprd-efuse.c
+++ b/drivers/nvmem/sprd-efuse.c
@@ -217,12 +217,14 @@ static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
* Enable the auto-check function to validate if the programming is
* successful.
*/
- sprd_efuse_set_auto_check(efuse, true);
+ if (lock)
+ sprd_efuse_set_auto_check(efuse, true);
writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
/* Disable auto-check and data double after programming */
- sprd_efuse_set_auto_check(efuse, false);
+ if (lock)
+ sprd_efuse_set_auto_check(efuse, false);
sprd_efuse_set_data_double(efuse, false);
/*
@@ -237,9 +239,9 @@ static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
writel(SPRD_EFUSE_ERR_CLR_MASK,
efuse->base + SPRD_EFUSE_ERR_CLR);
ret = -EBUSY;
- } else {
+ } else if (lock) {
sprd_efuse_set_prog_lock(efuse, lock);
- writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+ writel(0, efuse->base + SPRD_EFUSE_MEM(blk));
sprd_efuse_set_prog_lock(efuse, false);
}
@@ -322,6 +324,8 @@ unlock:
static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
{
struct sprd_efuse *efuse = context;
+ bool blk_double = efuse->data->blk_double;
+ bool lock;
int ret;
ret = sprd_efuse_lock(efuse);
@@ -332,7 +336,20 @@ static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
if (ret)
goto unlock;
- ret = sprd_efuse_raw_prog(efuse, offset, false, false, val);
+ /*
+ * If the writing bytes are equal with the block width, which means the
+ * whole block will be programmed. For this case, we should not allow
+ * this block to be programmed again by locking this block.
+ *
+ * If the block was programmed partially, we should allow this block to
+ * be programmed again.
+ */
+ if (bytes < SPRD_EFUSE_BLOCK_WIDTH)
+ lock = false;
+ else
+ lock = true;
+
+ ret = sprd_efuse_raw_prog(efuse, offset, blk_double, lock, val);
clk_disable_unprepare(efuse->clk);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index e8a39c3ec4d4..8eea3f6e29a4 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -100,6 +100,28 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
return IORESOURCE_MEM;
}
+static unsigned int of_bus_pci_get_flags(const __be32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = be32_to_cpup(addr);
+
+ if (!IS_ENABLED(CONFIG_PCI))
+ return 0;
+
+ switch((w >> 24) & 0x03) {
+ case 0x01:
+ flags |= IORESOURCE_IO;
+ break;
+ case 0x02: /* 32 bits */
+ case 0x03: /* 64 bits */
+ flags |= IORESOURCE_MEM;
+ break;
+ }
+ if (w & 0x40000000)
+ flags |= IORESOURCE_PREFETCH;
+ return flags;
+}
+
#ifdef CONFIG_PCI
/*
* PCI bus specific translator
@@ -125,25 +147,6 @@ static void of_bus_pci_count_cells(struct device_node *np,
*sizec = 2;
}
-static unsigned int of_bus_pci_get_flags(const __be32 *addr)
-{
- unsigned int flags = 0;
- u32 w = be32_to_cpup(addr);
-
- switch((w >> 24) & 0x03) {
- case 0x01:
- flags |= IORESOURCE_IO;
- break;
- case 0x02: /* 32 bits */
- case 0x03: /* 64 bits */
- flags |= IORESOURCE_MEM;
- break;
- }
- if (w & 0x40000000)
- flags |= IORESOURCE_PREFETCH;
- return flags;
-}
-
static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
int pna)
{
@@ -234,93 +237,6 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-static int parser_init(struct of_pci_range_parser *parser,
- struct device_node *node, const char *name)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
- parser->dma = !strcmp(name, "dma-ranges");
-
- parser->range = of_get_property(node, name, &rlen);
- if (parser->range == NULL)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
-
- return 0;
-}
-
-int of_pci_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- return parser_init(parser, node, "ranges");
-}
-EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
-
-int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- return parser_init(parser, node, "dma-ranges");
-}
-EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
-
-struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
- struct of_pci_range *range)
-{
- const int na = 3, ns = 2;
-
- if (!range)
- return NULL;
-
- if (!parser->range || parser->range + parser->np > parser->end)
- return NULL;
-
- range->pci_space = be32_to_cpup(parser->range);
- range->flags = of_bus_pci_get_flags(parser->range);
- range->pci_addr = of_read_number(parser->range + 1, ns);
- if (parser->dma)
- range->cpu_addr = of_translate_dma_address(parser->node,
- parser->range + na);
- else
- range->cpu_addr = of_translate_address(parser->node,
- parser->range + na);
- range->size = of_read_number(parser->range + parser->pna + na, ns);
-
- parser->range += parser->np;
-
- /* Now consume following elements while they are contiguous */
- while (parser->range + parser->np <= parser->end) {
- u32 flags;
- u64 pci_addr, cpu_addr, size;
-
- flags = of_bus_pci_get_flags(parser->range);
- pci_addr = of_read_number(parser->range + 1, ns);
- if (parser->dma)
- cpu_addr = of_translate_dma_address(parser->node,
- parser->range + na);
- else
- cpu_addr = of_translate_address(parser->node,
- parser->range + na);
- size = of_read_number(parser->range + parser->pna + na, ns);
-
- if (flags != range->flags)
- break;
- if (pci_addr != range->pci_addr + range->size ||
- cpu_addr != range->cpu_addr + range->size)
- break;
-
- range->size += size;
- parser->range += parser->np;
- }
-
- return range;
-}
-EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
-
/*
* of_pci_range_to_resource - Create a resource from an of_pci_range
* @range: the PCI range that describes the resource
@@ -775,6 +691,101 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
+static int parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node, const char *name)
+{
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->na = of_bus_n_addr_cells(node);
+ parser->ns = of_bus_n_size_cells(node);
+ parser->dma = !strcmp(name, "dma-ranges");
+
+ parser->range = of_get_property(node, name, &rlen);
+ if (parser->range == NULL)
+ return -ENOENT;
+
+ parser->end = parser->range + rlen / sizeof(__be32);
+
+ return 0;
+}
+
+int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return parser_init(parser, node, "ranges");
+}
+EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
+
+int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return parser_init(parser, node, "dma-ranges");
+}
+EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
+#define of_dma_range_parser_init of_pci_dma_range_parser_init
+
+struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
+ struct of_pci_range *range)
+{
+ int na = parser->na;
+ int ns = parser->ns;
+ int np = parser->pna + na + ns;
+
+ if (!range)
+ return NULL;
+
+ if (!parser->range || parser->range + np > parser->end)
+ return NULL;
+
+ if (parser->na == 3)
+ range->flags = of_bus_pci_get_flags(parser->range);
+ else
+ range->flags = 0;
+
+ range->pci_addr = of_read_number(parser->range, na);
+
+ if (parser->dma)
+ range->cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ range->cpu_addr = of_translate_address(parser->node,
+ parser->range + na);
+ range->size = of_read_number(parser->range + parser->pna + na, ns);
+
+ parser->range += np;
+
+ /* Now consume following elements while they are contiguous */
+ while (parser->range + np <= parser->end) {
+ u32 flags = 0;
+ u64 pci_addr, cpu_addr, size;
+
+ if (parser->na == 3)
+ flags = of_bus_pci_get_flags(parser->range);
+ pci_addr = of_read_number(parser->range, na);
+ if (parser->dma)
+ cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ cpu_addr = of_translate_address(parser->node,
+ parser->range + na);
+ size = of_read_number(parser->range + parser->pna + na, ns);
+
+ if (flags != range->flags)
+ break;
+ if (pci_addr != range->pci_addr + range->size ||
+ cpu_addr != range->cpu_addr + range->size)
+ break;
+
+ range->size += size;
+ parser->range += np;
+ }
+
+ return range;
+}
+EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
+
static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
u64 size)
{
@@ -928,10 +939,12 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
{
struct device_node *node = of_node_get(np);
const __be32 *ranges = NULL;
- int len, naddr, nsize, pna;
+ int len;
int ret = 0;
bool found_dma_ranges = false;
- u64 dmaaddr;
+ struct of_range_parser parser;
+ struct of_range range;
+ u64 dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
while (node) {
ranges = of_get_property(node, "dma-ranges", &len);
@@ -956,32 +969,38 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
goto out;
}
- naddr = of_bus_n_addr_cells(node);
- nsize = of_bus_n_size_cells(node);
- pna = of_n_addr_cells(node);
- if ((len / sizeof(__be32)) % (pna + naddr + nsize)) {
- ret = -EINVAL;
- goto out;
+ of_dma_range_parser_init(&parser, node);
+
+ for_each_of_range(&parser, &range) {
+ pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
+ range.bus_addr, range.cpu_addr, range.size);
+
+ if (dma_offset && range.cpu_addr - range.bus_addr != dma_offset) {
+ pr_warn("Can't handle multiple dma-ranges with different offsets on node(%pOF)\n", node);
+ /* Don't error out as we'd break some existing DTs */
+ continue;
+ }
+ dma_offset = range.cpu_addr - range.bus_addr;
+
+ /* Take lower and upper limits */
+ if (range.bus_addr < dma_start)
+ dma_start = range.bus_addr;
+ if (range.bus_addr + range.size > dma_end)
+ dma_end = range.bus_addr + range.size;
}
- /* dma-ranges format:
- * DMA addr : naddr cells
- * CPU addr : pna cells
- * size : nsize cells
- */
- dmaaddr = of_read_number(ranges, naddr);
- *paddr = of_translate_dma_address(node, ranges + naddr);
- if (*paddr == OF_BAD_ADDR) {
- pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
- dmaaddr, np);
+ if (dma_start >= dma_end) {
ret = -EINVAL;
+ pr_debug("Invalid DMA ranges configuration on node(%pOF)\n",
+ node);
goto out;
}
- *dma_addr = dmaaddr;
- *size = of_read_number(ranges + naddr + pna, nsize);
+ *dma_addr = dma_start;
+ *size = dma_end - dma_start;
+ *paddr = dma_start + dma_offset;
- pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
+ pr_debug("final: dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
*dma_addr, *paddr, *size);
out:
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 207863c151a5..edc682249c00 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -24,7 +24,7 @@ struct alias_prop {
const char *alias;
struct device_node *np;
int id;
- char stem[0];
+ char stem[];
};
#if defined(CONFIG_SPARC)
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 6bd610ee2cd7..1a84bc0d5fa8 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/memblock.h>
-#define MAX_RESERVED_REGIONS 32
+#define MAX_RESERVED_REGIONS 64
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index f104f15b57fb..b4916dcc9e72 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1204,6 +1204,8 @@ DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
+DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
@@ -1226,6 +1228,8 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_io_channels, },
{ .parse_prop = parse_interrupt_parent, },
{ .parse_prop = parse_dmas, },
+ { .parse_prop = parse_power_domains, },
+ { .parse_prop = parse_hwlocks, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 83c766233181..b278ab4338ce 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -321,8 +321,11 @@ int of_resolve_phandles(struct device_node *overlay)
err = of_property_read_string(tree_symbols,
prop->name, &refpath);
- if (err)
+ if (err) {
+ pr_err("node label '%s' not found in live devicetree symbols table\n",
+ prop->name);
goto out;
+ }
refnode = of_find_node_by_path(refpath);
if (!refnode) {
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 9b6807065827..009f4045c8e4 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -21,7 +21,13 @@ obj-$(CONFIG_OF_OVERLAY) += overlay.dtb.o \
overlay_bad_add_dup_prop.dtb.o \
overlay_bad_phandle.dtb.o \
overlay_bad_symbol.dtb.o \
- overlay_base.dtb.o
+ overlay_base.dtb.o \
+ overlay_gpio_01.dtb.o \
+ overlay_gpio_02a.dtb.o \
+ overlay_gpio_02b.dtb.o \
+ overlay_gpio_03.dtb.o \
+ overlay_gpio_04a.dtb.o \
+ overlay_gpio_04b.dtb.o
# enable creation of __symbols__ node
DTC_FLAGS_overlay += -@
diff --git a/drivers/of/unittest-data/overlay_gpio_01.dts b/drivers/of/unittest-data/overlay_gpio_01.dts
new file mode 100644
index 000000000000..699ff104ae10
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_01.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@0 {
+ compatible = "unittest-gpio";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B";
+
+ line-b {
+ gpio-hog;
+ gpios = <2 0>;
+ input;
+ line-name = "line-B-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_02a.dts b/drivers/of/unittest-data/overlay_gpio_02a.dts
new file mode 100644
index 000000000000..ec59aff6ed47
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_02a.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@2 {
+ compatible = "unittest-gpio";
+ reg = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B";
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_02b.dts b/drivers/of/unittest-data/overlay_gpio_02b.dts
new file mode 100644
index 000000000000..43ce111d41ce
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_02b.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@2 {
+ line-a {
+ gpio-hog;
+ gpios = <1 0>;
+ input;
+ line-name = "line-A-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_03.dts b/drivers/of/unittest-data/overlay_gpio_03.dts
new file mode 100644
index 000000000000..6e0312340a1b
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_03.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@3 {
+ compatible = "unittest-gpio";
+ reg = <3>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B", "line-C", "line-D";
+
+ line-d {
+ gpio-hog;
+ gpios = <4 0>;
+ input;
+ line-name = "line-D-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_04a.dts b/drivers/of/unittest-data/overlay_gpio_04a.dts
new file mode 100644
index 000000000000..7b1e04ebfa7a
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_04a.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@4 {
+ compatible = "unittest-gpio";
+ reg = <4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ gpio-line-names = "line-A", "line-B", "line-C", "line-D";
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_gpio_04b.dts b/drivers/of/unittest-data/overlay_gpio_04b.dts
new file mode 100644
index 000000000000..a14e95c6699a
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_gpio_04b.dts
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&unittest_test_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpio@4 {
+ line-c {
+ gpio-hog;
+ gpios = <3 0>;
+ input;
+ line-name = "line-C-input";
+ };
+ };
+};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 68b87587b2ef..7e27670c3616 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -24,6 +24,7 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/gpio/driver.h>
#include <linux/bitops.h>
@@ -46,6 +47,20 @@ static struct unittest_results {
failed; \
})
+/*
+ * Expected message may have a message level other than KERN_INFO.
+ * Print the expected message only if the current loglevel will allow
+ * the actual message to print.
+ *
+ * Do not use EXPECT_BEGIN() or EXPECT_END() for messages generated by
+ * pr_debug().
+ */
+#define EXPECT_BEGIN(level, fmt, ...) \
+ printk(level pr_fmt("EXPECT \\ : ") fmt, ##__VA_ARGS__)
+
+#define EXPECT_END(level, fmt, ...) \
+ printk(level pr_fmt("EXPECT / : ") fmt, ##__VA_ARGS__)
+
static void __init of_unittest_find_node_by_name(void)
{
struct device_node *np;
@@ -444,29 +459,77 @@ static void __init of_unittest_parse_phandle_with_args(void)
/* Check for missing cells property */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
+
rc = of_parse_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing", 0, &args);
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
+
rc = of_count_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing");
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for bad phandle in list */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
+
rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells", 0, &args);
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
+
rc = of_count_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells");
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for incorrectly formed argument list */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+
rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells", 1, &args);
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+
rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells");
+
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -577,20 +640,41 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
/* Check for missing cells,map,mask property */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: could not get #phandle-missing-cells for /testcase-data/phandle-tests/provider1");
+
rc = of_parse_phandle_with_args_map(np, "phandle-list",
"phandle-missing", 0, &args);
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: could not get #phandle-missing-cells for /testcase-data/phandle-tests/provider1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for bad phandle in list */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
+
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle",
"phandle", 0, &args);
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for incorrectly formed argument list */
memset(&args, 0, sizeof(args));
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
"phandle", 1, &args);
+ EXPECT_END(KERN_INFO,
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -1121,7 +1205,15 @@ static void __init of_unittest_platform_populate(void)
np = of_find_node_by_path("/testcase-data/testcase-device2");
pdev = of_find_device_by_node(np);
unittest(pdev, "device 2 creation failed\n");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "platform testcase-data:testcase-device2: IRQ index 0 not found");
+
irq = platform_get_irq(pdev, 0);
+
+ EXPECT_END(KERN_INFO,
+ "platform testcase-data:testcase-device2: IRQ index 0 not found");
+
unittest(irq < 0 && irq != -EPROBE_DEFER,
"device parsing error failed - %d\n", irq);
}
@@ -1325,6 +1417,9 @@ static int __init unittest_data_add(void)
return 0;
}
+ EXPECT_BEGIN(KERN_INFO,
+ "Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
+
/* attach the sub-tree to live tree */
np = unittest_data_node->child;
while (np) {
@@ -1335,6 +1430,9 @@ static int __init unittest_data_add(void)
np = next;
}
+ EXPECT_END(KERN_INFO,
+ "Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
+
of_overlay_mutex_unlock();
return 0;
@@ -1410,6 +1508,249 @@ static int of_path_platform_device_exists(const char *path)
return pdev != NULL;
}
+#ifdef CONFIG_OF_GPIO
+
+struct unittest_gpio_dev {
+ struct gpio_chip chip;
+};
+
+static int unittest_gpio_chip_request_count;
+static int unittest_gpio_probe_count;
+static int unittest_gpio_probe_pass_count;
+
+static int unittest_gpio_chip_request(struct gpio_chip *chip, unsigned int offset)
+{
+ unittest_gpio_chip_request_count++;
+
+ pr_debug("%s(): %s %d %d\n", __func__, chip->label, offset,
+ unittest_gpio_chip_request_count);
+ return 0;
+}
+
+static int unittest_gpio_probe(struct platform_device *pdev)
+{
+ struct unittest_gpio_dev *devptr;
+ int ret;
+
+ unittest_gpio_probe_count++;
+
+ devptr = kzalloc(sizeof(*devptr), GFP_KERNEL);
+ if (!devptr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, devptr);
+
+ devptr->chip.of_node = pdev->dev.of_node;
+ devptr->chip.label = "of-unittest-gpio";
+ devptr->chip.base = -1; /* dynamic allocation */
+ devptr->chip.ngpio = 5;
+ devptr->chip.request = unittest_gpio_chip_request;
+
+ ret = gpiochip_add_data(&devptr->chip, NULL);
+
+ unittest(!ret,
+ "gpiochip_add_data() for node @%pOF failed, ret = %d\n", devptr->chip.of_node, ret);
+
+ if (!ret)
+ unittest_gpio_probe_pass_count++;
+ return ret;
+}
+
+static int unittest_gpio_remove(struct platform_device *pdev)
+{
+ struct unittest_gpio_dev *gdev = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
+
+ if (!gdev)
+ return -EINVAL;
+
+ if (gdev->chip.base != -1)
+ gpiochip_remove(&gdev->chip);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(gdev);
+
+ return 0;
+}
+
+static const struct of_device_id unittest_gpio_id[] = {
+ { .compatible = "unittest-gpio", },
+ {}
+};
+
+static struct platform_driver unittest_gpio_driver = {
+ .probe = unittest_gpio_probe,
+ .remove = unittest_gpio_remove,
+ .driver = {
+ .name = "unittest-gpio",
+ .of_match_table = of_match_ptr(unittest_gpio_id),
+ },
+};
+
+static void __init of_unittest_overlay_gpio(void)
+{
+ int chip_request_count;
+ int probe_pass_count;
+ int ret;
+
+ /*
+ * tests: apply overlays before registering driver
+ * Similar to installing a driver as a module, the
+ * driver is registered after applying the overlays.
+ *
+ * The overlays are applied by overlay_data_apply()
+ * instead of of_unittest_apply_overlay() so that they
+ * will not be tracked. Thus they will not be removed
+ * by of_unittest_destroy_tracked_overlays().
+ *
+ * - apply overlay_gpio_01
+ * - apply overlay_gpio_02a
+ * - apply overlay_gpio_02b
+ * - register driver
+ *
+ * register driver will result in
+ * - probe and processing gpio hog for overlay_gpio_01
+ * - probe for overlay_gpio_02a
+ * - processing gpio for overlay_gpio_02b
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ /*
+ * overlay_gpio_01 contains gpio node and child gpio hog node
+ * overlay_gpio_02a contains gpio node
+ * overlay_gpio_02b contains child gpio hog node
+ */
+
+ unittest(overlay_data_apply("overlay_gpio_01", NULL),
+ "Adding overlay 'overlay_gpio_01' failed\n");
+
+ unittest(overlay_data_apply("overlay_gpio_02a", NULL),
+ "Adding overlay 'overlay_gpio_02a' failed\n");
+
+ unittest(overlay_data_apply("overlay_gpio_02b", NULL),
+ "Adding overlay 'overlay_gpio_02b' failed\n");
+
+ /*
+ * messages are the result of the probes, after the
+ * driver is registered
+ */
+
+ EXPECT_BEGIN(KERN_INFO,
+ "GPIO line <<int>> (line-B-input) hogged as input\n");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "GPIO line <<int>> (line-A-input) hogged as input\n");
+
+ ret = platform_driver_register(&unittest_gpio_driver);
+ if (unittest(ret == 0, "could not register unittest gpio driver\n"))
+ return;
+
+ EXPECT_END(KERN_INFO,
+ "GPIO line <<int>> (line-A-input) hogged as input\n");
+ EXPECT_END(KERN_INFO,
+ "GPIO line <<int>> (line-B-input) hogged as input\n");
+
+ unittest(probe_pass_count + 2 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ unittest(chip_request_count + 2 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+
+ /*
+ * tests: apply overlays after registering driver
+ *
+ * Similar to a driver built-in to the kernel, the
+ * driver is registered before applying the overlays.
+ *
+ * overlay_gpio_03 contains gpio node and child gpio hog node
+ *
+ * - apply overlay_gpio_03
+ *
+ * apply overlay will result in
+ * - probe and processing gpio hog.
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "GPIO line <<int>> (line-D-input) hogged as input\n");
+
+ /* overlay_gpio_03 contains gpio node and child gpio hog node */
+
+ unittest(overlay_data_apply("overlay_gpio_03", NULL),
+ "Adding overlay 'overlay_gpio_03' failed\n");
+
+ EXPECT_END(KERN_INFO,
+ "GPIO line <<int>> (line-D-input) hogged as input\n");
+
+ unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+
+ /*
+ * overlay_gpio_04a contains gpio node
+ *
+ * - apply overlay_gpio_04a
+ *
+ * apply the overlay will result in
+ * - probe for overlay_gpio_04a
+ */
+
+ probe_pass_count = unittest_gpio_probe_pass_count;
+ chip_request_count = unittest_gpio_chip_request_count;
+
+ /* overlay_gpio_04a contains gpio node */
+
+ unittest(overlay_data_apply("overlay_gpio_04a", NULL),
+ "Adding overlay 'overlay_gpio_04a' failed\n");
+
+ unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
+ "unittest_gpio_probe() failed or not called\n");
+
+ /*
+ * overlay_gpio_04b contains child gpio hog node
+ *
+ * - apply overlay_gpio_04b
+ *
+ * apply the overlay will result in
+ * - processing gpio for overlay_gpio_04b
+ */
+
+ EXPECT_BEGIN(KERN_INFO,
+ "GPIO line <<int>> (line-C-input) hogged as input\n");
+
+ /* overlay_gpio_04b contains child gpio hog node */
+
+ unittest(overlay_data_apply("overlay_gpio_04b", NULL),
+ "Adding overlay 'overlay_gpio_04b' failed\n");
+
+ EXPECT_END(KERN_INFO,
+ "GPIO line <<int>> (line-C-input) hogged as input\n");
+
+ unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
+ "unittest_gpio_chip_request() called %d times (expected 1 time)\n",
+ unittest_gpio_chip_request_count - chip_request_count);
+}
+
+#else
+
+static void __init of_unittest_overlay_gpio(void)
+{
+ /* skip tests */
+}
+
+#endif
+
#if IS_BUILTIN(CONFIG_I2C)
/* get the i2c client device instantiated at the path */
@@ -1511,19 +1852,27 @@ static const char *overlay_name_from_nr(int nr)
static const char *bus_path = "/testcase-data/overlay-node/test-bus";
-/* it is guaranteed that overlay ids are assigned in sequence */
+/* FIXME: it is NOT guaranteed that overlay ids are assigned in sequence */
+
#define MAX_UNITTEST_OVERLAYS 256
static unsigned long overlay_id_bits[BITS_TO_LONGS(MAX_UNITTEST_OVERLAYS)];
static int overlay_first_id = -1;
+static long of_unittest_overlay_tracked(int id)
+{
+ if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
+ return 0;
+ return overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id);
+}
+
static void of_unittest_track_overlay(int id)
{
if (overlay_first_id < 0)
overlay_first_id = id;
id -= overlay_first_id;
- /* we shouldn't need that many */
- BUG_ON(id >= MAX_UNITTEST_OVERLAYS);
+ if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
+ return;
overlay_id_bits[BIT_WORD(id)] |= BIT_MASK(id);
}
@@ -1532,7 +1881,8 @@ static void of_unittest_untrack_overlay(int id)
if (overlay_first_id < 0)
return;
id -= overlay_first_id;
- BUG_ON(id >= MAX_UNITTEST_OVERLAYS);
+ if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
+ return;
overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
}
@@ -1548,7 +1898,7 @@ static void of_unittest_destroy_tracked_overlays(void)
defers = 0;
/* remove in reverse order */
for (id = MAX_UNITTEST_OVERLAYS - 1; id >= 0; id--) {
- if (!(overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id)))
+ if (!of_unittest_overlay_tracked(id))
continue;
ovcs_id = id + overlay_first_id;
@@ -1565,7 +1915,7 @@ static void of_unittest_destroy_tracked_overlays(void)
continue;
}
- overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
+ of_unittest_untrack_overlay(id);
}
} while (defers > 0);
}
@@ -1626,7 +1976,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
- int ret, ovcs_id;
+ int ret, ovcs_id, save_id;
/* unittest device must be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
@@ -1654,6 +2004,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
return -EINVAL;
}
+ save_id = ovcs_id;
ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "%s failed to be destroyed @\"%s\"\n",
@@ -1661,6 +2012,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
unittest_path(unittest_nr, ovtype));
return ret;
}
+ of_unittest_untrack_overlay(save_id);
/* unittest device must be again in before state */
if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) {
@@ -1677,8 +2029,18 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
/* test activation of device */
static void __init of_unittest_overlay_0(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest0/status");
+
/* device should enable */
- if (of_unittest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY))
+ ret = of_unittest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest0/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 0);
@@ -1687,28 +2049,58 @@ static void __init of_unittest_overlay_0(void)
/* test deactivation of device */
static void __init of_unittest_overlay_1(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest1/status");
+
/* device should disable */
- if (of_unittest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY))
+ ret = of_unittest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest1/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 1);
+
}
/* test activation of device */
static void __init of_unittest_overlay_2(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest2/status");
+
/* device should enable */
- if (of_unittest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY))
- return;
+ ret = of_unittest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY);
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest2/status");
+
+ if (ret)
+ return;
unittest(1, "overlay test %d passed\n", 2);
}
/* test deactivation of device */
static void __init of_unittest_overlay_3(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest3/status");
+
/* device should disable */
- if (of_unittest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY))
+ ret = of_unittest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest3/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 3);
@@ -1727,8 +2119,18 @@ static void __init of_unittest_overlay_4(void)
/* test overlay apply/revert sequence */
static void __init of_unittest_overlay_5(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest5/status");
+
/* device should disable */
- if (of_unittest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY))
+ ret = of_unittest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest5/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 5);
@@ -1742,6 +2144,8 @@ static void __init of_unittest_overlay_6(void)
int before = 0, after = 1;
const char *overlay_name;
+ int ret;
+
/* unittest device must be in before state */
for (i = 0; i < 2; i++) {
if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY)
@@ -1756,18 +2160,41 @@ static void __init of_unittest_overlay_6(void)
}
/* apply the overlays */
- for (i = 0; i < 2; i++) {
- overlay_name = overlay_name_from_nr(overlay_nr + i);
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status");
+
+ overlay_name = overlay_name_from_nr(overlay_nr + 0);
+
+ ret = overlay_data_apply(overlay_name, &ovcs_id);
- if (!overlay_data_apply(overlay_name, &ovcs_id)) {
- unittest(0, "could not apply overlay \"%s\"\n",
- overlay_name);
+ if (!ret) {
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
+ return;
+ }
+ ov_id[0] = ovcs_id;
+ of_unittest_track_overlay(ov_id[0]);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status");
+
+ overlay_name = overlay_name_from_nr(overlay_nr + 1);
+
+ ret = overlay_data_apply(overlay_name, &ovcs_id);
+
+ if (!ret) {
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
- }
- ov_id[i] = ovcs_id;
- of_unittest_track_overlay(ov_id[i]);
}
+ ov_id[1] = ovcs_id;
+ of_unittest_track_overlay(ov_id[1]);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status");
+
for (i = 0; i < 2; i++) {
/* unittest device must be in after state */
@@ -1808,6 +2235,7 @@ static void __init of_unittest_overlay_6(void)
}
unittest(1, "overlay test %d passed\n", 6);
+
}
/* test overlay application in sequence */
@@ -1816,26 +2244,65 @@ static void __init of_unittest_overlay_8(void)
int i, ov_id[2], ovcs_id;
int overlay_nr = 8, unittest_nr = 8;
const char *overlay_name;
+ int ret;
/* we don't care about device state in this test */
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/status");
+
+ overlay_name = overlay_name_from_nr(overlay_nr + 0);
+
+ ret = overlay_data_apply(overlay_name, &ovcs_id);
+ if (!ret)
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/status");
+
+ if (!ret)
+ return;
+
+ ov_id[0] = ovcs_id;
+ of_unittest_track_overlay(ov_id[0]);
+
+ overlay_name = overlay_name_from_nr(overlay_nr + 1);
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/property-foo");
+
/* apply the overlays */
- for (i = 0; i < 2; i++) {
+ ret = overlay_data_apply(overlay_name, &ovcs_id);
- overlay_name = overlay_name_from_nr(overlay_nr + i);
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/property-foo");
- if (!overlay_data_apply(overlay_name, &ovcs_id)) {
- unittest(0, "could not apply overlay \"%s\"\n",
- overlay_name);
- return;
- }
- ov_id[i] = ovcs_id;
- of_unittest_track_overlay(ov_id[i]);
+ if (!ret) {
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
+ return;
}
+ ov_id[1] = ovcs_id;
+ of_unittest_track_overlay(ov_id[1]);
+
/* now try to remove first overlay (it should fail) */
ovcs_id = ov_id[0];
- if (!of_overlay_remove(&ovcs_id)) {
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: overlay #6 is not topmost");
+
+ ret = of_overlay_remove(&ovcs_id);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: overlay #6 is not topmost");
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
+
+ if (!ret) {
unittest(0, "%s was destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr + 0),
unittest_path(unittest_nr,
@@ -1867,6 +2334,7 @@ static void __init of_unittest_overlay_10(void)
/* device should disable */
ret = of_unittest_apply_overlay_check(10, 10, 0, 1, PDEV_OVERLAY);
+
if (unittest(ret == 0,
"overlay test %d failed; overlay application\n", 10))
return;
@@ -1890,6 +2358,7 @@ static void __init of_unittest_overlay_11(void)
/* device should disable */
ret = of_unittest_apply_revert_overlay_check(11, 11, 0, 1,
PDEV_OVERLAY);
+
unittest(ret == 0, "overlay test %d failed; overlay apply\n", 11);
}
@@ -2120,12 +2589,21 @@ static int of_unittest_overlay_i2c_init(void)
return ret;
ret = platform_driver_register(&unittest_i2c_bus_driver);
+
if (unittest(ret == 0,
"could not register unittest i2c bus driver\n"))
return ret;
#if IS_BUILTIN(CONFIG_I2C_MUX)
+
+ EXPECT_BEGIN(KERN_INFO,
+ "i2c i2c-1: Added multiplexed i2c bus 2");
+
ret = i2c_add_driver(&unittest_i2c_mux_driver);
+
+ EXPECT_END(KERN_INFO,
+ "i2c i2c-1: Added multiplexed i2c bus 2");
+
if (unittest(ret == 0,
"could not register unittest i2c mux driver\n"))
return ret;
@@ -2145,8 +2623,18 @@ static void of_unittest_overlay_i2c_cleanup(void)
static void __init of_unittest_overlay_i2c_12(void)
{
+ int ret;
+
/* device should enable */
- if (of_unittest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY))
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12/status");
+
+ ret = of_unittest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 12);
@@ -2155,8 +2643,18 @@ static void __init of_unittest_overlay_i2c_12(void)
/* test deactivation of device */
static void __init of_unittest_overlay_i2c_13(void)
{
+ int ret;
+
+ EXPECT_BEGIN(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13/status");
+
/* device should disable */
- if (of_unittest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY))
+ ret = of_unittest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13/status");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 13);
@@ -2169,8 +2667,18 @@ static void of_unittest_overlay_i2c_14(void)
static void __init of_unittest_overlay_i2c_15(void)
{
+ int ret;
+
/* device should enable */
- if (of_unittest_apply_overlay_check(15, 15, 0, 1, I2C_OVERLAY))
+ EXPECT_BEGIN(KERN_INFO,
+ "i2c i2c-1: Added multiplexed i2c bus 3");
+
+ ret = of_unittest_apply_overlay_check(15, 15, 0, 1, I2C_OVERLAY);
+
+ EXPECT_END(KERN_INFO,
+ "i2c i2c-1: Added multiplexed i2c bus 3");
+
+ if (ret)
return;
unittest(1, "overlay test %d passed\n", 15);
@@ -2242,6 +2750,8 @@ static void __init of_unittest_overlay(void)
of_unittest_overlay_i2c_cleanup();
#endif
+ of_unittest_overlay_gpio();
+
of_unittest_destroy_tracked_overlays();
out:
@@ -2295,6 +2805,12 @@ OVERLAY_INFO_EXTERN(overlay_11);
OVERLAY_INFO_EXTERN(overlay_12);
OVERLAY_INFO_EXTERN(overlay_13);
OVERLAY_INFO_EXTERN(overlay_15);
+OVERLAY_INFO_EXTERN(overlay_gpio_01);
+OVERLAY_INFO_EXTERN(overlay_gpio_02a);
+OVERLAY_INFO_EXTERN(overlay_gpio_02b);
+OVERLAY_INFO_EXTERN(overlay_gpio_03);
+OVERLAY_INFO_EXTERN(overlay_gpio_04a);
+OVERLAY_INFO_EXTERN(overlay_gpio_04b);
OVERLAY_INFO_EXTERN(overlay_bad_add_dup_node);
OVERLAY_INFO_EXTERN(overlay_bad_add_dup_prop);
OVERLAY_INFO_EXTERN(overlay_bad_phandle);
@@ -2319,6 +2835,12 @@ static struct overlay_info overlays[] = {
OVERLAY_INFO(overlay_12, 0),
OVERLAY_INFO(overlay_13, 0),
OVERLAY_INFO(overlay_15, 0),
+ OVERLAY_INFO(overlay_gpio_01, 0),
+ OVERLAY_INFO(overlay_gpio_02a, 0),
+ OVERLAY_INFO(overlay_gpio_02b, 0),
+ OVERLAY_INFO(overlay_gpio_03, 0),
+ OVERLAY_INFO(overlay_gpio_04a, 0),
+ OVERLAY_INFO(overlay_gpio_04b, 0),
OVERLAY_INFO(overlay_bad_add_dup_node, -EINVAL),
OVERLAY_INFO(overlay_bad_add_dup_prop, -EINVAL),
OVERLAY_INFO(overlay_bad_phandle, -EINVAL),
@@ -2470,6 +2992,7 @@ static __init void of_unittest_overlay_high_level(void)
struct device_node *overlay_base_symbols;
struct device_node **pprev;
struct property *prop;
+ int ret;
if (!overlay_base_root) {
unittest(0, "overlay_base_root not initialized\n");
@@ -2584,15 +3107,86 @@ static __init void of_unittest_overlay_high_level(void)
/* now do the normal overlay usage test */
- unittest(overlay_data_apply("overlay", NULL),
- "Adding overlay 'overlay' failed\n");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/status");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/status");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@30/incline-up");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@40/incline-up");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/status");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/color");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/rate");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/hvac_2");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_left");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_right");
+
+ ret = overlay_data_apply("overlay", NULL);
+
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_right");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_left");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/hvac_2");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/rate");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/color");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/status");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@40/incline-up");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@30/incline-up");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/status");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/status");
+
+ unittest(ret, "Adding overlay 'overlay' failed\n");
+
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/controller/name");
unittest(overlay_data_apply("overlay_bad_add_dup_node", NULL),
"Adding overlay 'overlay_bad_add_dup_node' failed\n");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/controller/name");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller");
+
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/motor-1/rpm_avail");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/motor-1/rpm_avail");
+ EXPECT_BEGIN(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/rpm_avail");
+
unittest(overlay_data_apply("overlay_bad_add_dup_prop", NULL),
"Adding overlay 'overlay_bad_add_dup_prop' failed\n");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/rpm_avail");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/motor-1/rpm_avail");
+ EXPECT_END(KERN_ERR,
+ "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/motor-1/rpm_avail");
+
unittest(overlay_data_apply("overlay_bad_phandle", NULL),
"Adding overlay 'overlay_bad_phandle' failed\n");
@@ -2616,6 +3210,8 @@ static int __init of_unittest(void)
struct device_node *np;
int res;
+ pr_info("start of unittest - you will see error messages\n");
+
/* adding data for unittest */
if (IS_ENABLED(CONFIG_UML))
@@ -2634,7 +3230,6 @@ static int __init of_unittest(void)
}
of_node_put(np);
- pr_info("start of unittest - you will see error messages\n");
of_unittest_check_tree_linkage();
of_unittest_check_phandles();
of_unittest_find_node_by_name();
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 9d00a24277aa..f96e5eaee87e 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -243,11 +243,6 @@ static irqreturn_t dummy_irq2_handler(int _, void *dev)
return IRQ_HANDLED;
}
-static struct irqaction irq2_action = {
- .handler = dummy_irq2_handler,
- .name = "cascade",
-};
-
static void init_eisa_pic(void)
{
unsigned long flags;
@@ -335,7 +330,8 @@ static int __init eisa_probe(struct parisc_device *dev)
}
/* Reserve IRQ2 */
- setup_irq(2, &irq2_action);
+ if (request_irq(2, dummy_irq2_handler, 0, "cascade", NULL))
+ pr_err("Failed to request irq 2 (cascade)\n");
for (i = 0; i < 16; i++) {
irq_set_chip_and_handler(i, &eisa_interrupt_type,
handle_simple_irq);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 20bf00f587bd..91bfdb784829 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -213,16 +213,6 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
-config PCIE_MOBIVEIL
- bool "Mobiveil AXI PCIe controller"
- depends on ARCH_ZYNQMP || COMPILE_TEST
- depends on OF
- depends on PCI_MSI_IRQ_DOMAIN
- help
- Say Y here if you want to enable support for the Mobiveil AXI PCIe
- Soft IP. It has up to 8 outbound and inbound windows
- for address translation and it is a PCIe Gen4 IP.
-
config PCIE_TANGO_SMP8759
bool "Tango SMP8759 PCIe controller (DANGEROUS)"
depends on ARCH_TANGO && PCI_MSI && OF
@@ -269,5 +259,6 @@ config PCI_HYPERV_INTERFACE
have a common interface with the Hyper-V PCI frontend driver.
source "drivers/pci/controller/dwc/Kconfig"
+source "drivers/pci/controller/mobiveil/Kconfig"
source "drivers/pci/controller/cadence/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 01b2502a5323..158c59771824 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -25,12 +25,12 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
-obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
+obj-y += mobiveil/
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 0830dfcfa43a..03dcaf65d159 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -248,14 +248,37 @@ config PCI_MESON
implement the driver.
config PCIE_TEGRA194
- tristate "NVIDIA Tegra194 (and later) PCIe controller"
+ tristate
+
+config PCIE_TEGRA194_HOST
+ tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode"
depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_TEGRA194_EP
+ tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
help
- Say Y here if you want support for DesignWare core based PCIe host
- controller found in NVIDIA Tegra194 SoC.
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe controllers"
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 9bf7fa99b103..3b0e58f2de58 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -215,10 +215,6 @@ static int dra7xx_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
- .host_init = dra7xx_pcie_host_init,
-};
-
static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -233,43 +229,77 @@ static const struct irq_domain_ops intx_domain_ops = {
.xlate = pci_irqd_intx_xlate,
};
-static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+ unsigned long val;
+ int pos, irq;
- if (!pcie_intc_node) {
- dev_err(dev, "No PCIe Intc node found\n");
- return -ENODEV;
- }
+ val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (index * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!val)
+ return 0;
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
- of_node_put(pcie_intc_node);
- if (!dra7xx->irq_domain) {
- dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return -ENODEV;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
+ while (pos != MAX_MSI_IRQS_PER_CTRL) {
+ irq = irq_find_mapping(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
+ generic_handle_irq(irq);
+ pos++;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
}
- return 0;
+ return 1;
}
-static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
{
- struct dra7xx_pcie *dra7xx = arg;
- struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret, i, count, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /**
+ * Need to make sure all MSI status bits read 0 before exiting.
+ * Else, new MSI IRQs are not registered by the wrapper. Have an
+ * upperbound for the loop and exit the IRQ in case of IRQ flood
+ * to avoid locking up system in interrupt context.
+ */
+ count = 0;
+ do {
+ ret = 0;
+
+ for (i = 0; i < num_ctrls; i++)
+ ret |= dra7xx_pcie_handle_msi(pp, i);
+ count++;
+ } while (ret && count <= 1000);
+
+ if (count > 1000)
+ dev_warn_ratelimited(pci->dev,
+ "Too many MSI IRQs to handle\n");
+}
+
+static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dra7xx_pcie *dra7xx;
+ struct dw_pcie *pci;
+ struct pcie_port *pp;
unsigned long reg;
u32 virq, bit;
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ pci = to_dw_pcie_from_pp(pp);
+ dra7xx = to_dra7xx_pcie(pci);
+
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
switch (reg) {
case MSI:
- dw_handle_msi_irq(pp);
+ dra7xx_pcie_handle_msi_irq(pp);
break;
case INTA:
case INTB:
@@ -283,9 +313,7 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
break;
}
- dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
-
- return IRQ_HANDLED;
+ chained_irq_exit(chip, desc);
}
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
@@ -347,6 +375,145 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
+ pp);
+ dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ of_node_put(pcie_intc_node);
+ if (!dra7xx->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u64 msi_target;
+
+ msi_target = (u64)pp->msi_data;
+
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+
+ msg->data = d->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)d->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int dra7xx_pcie_msi_set_affinity(struct irq_data *d,
+ const struct cpumask *mask,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static void dra7xx_pcie_bottom_mask(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ pp->irq_mask[ctrl] |= BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
+ pp->irq_mask[ctrl]);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dra7xx_pcie_bottom_unmask(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ pp->irq_mask[ctrl] &= ~BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
+ pp->irq_mask[ctrl]);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dra7xx_pcie_bottom_ack(struct irq_data *d)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int res, bit, ctrl;
+
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
+}
+
+static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = {
+ .name = "DRA7XX-PCI-MSI",
+ .irq_ack = dra7xx_pcie_bottom_ack,
+ .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg,
+ .irq_set_affinity = dra7xx_pcie_msi_set_affinity,
+ .irq_mask = dra7xx_pcie_bottom_mask,
+ .irq_unmask = dra7xx_pcie_bottom_unmask,
+};
+
+static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 ctrl, num_ctrls;
+
+ pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ pp->irq_mask[ctrl] = ~0;
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+
+ return dw_pcie_allocate_domains(pp);
+}
+
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+ .host_init = dra7xx_pcie_host_init,
+ .msi_host_init = dra7xx_pcie_msi_host_init,
+};
+
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -467,14 +634,6 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return pp->irq;
}
- ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "dra7-pcie-msi", dra7xx);
- if (ret) {
- dev_err(dev, "failed to request irq\n");
- return ret;
- }
-
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c8c702c494a2..790679fdfa48 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -959,6 +959,9 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
case PCI_EPC_IRQ_MSI:
dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
break;
+ case PCI_EPC_IRQ_MSIX:
+ dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ break;
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
return -EINVAL;
@@ -970,7 +973,7 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features ks_pcie_am654_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
+ .msix_capable = true,
.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
.bar_fixed_64bit = 1 << BAR_0,
.bar_fixed_size[2] = SZ_1M,
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 3772b02a5c55..3715dceca1bf 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -66,7 +66,6 @@
#define PORT_CLK_RATE 100000000UL
#define MAX_PAYLOAD_SIZE 256
#define MAX_READ_REQ_SIZE 256
-#define MESON_PCIE_PHY_POWERUP 0x1c
#define PCIE_RESET_DELAY 500
#define PCIE_SHARED_RESET 1
#define PCIE_NORMAL_RESET 0
@@ -81,26 +80,19 @@ enum pcie_data_rate {
struct meson_pcie_mem_res {
void __iomem *elbi_base;
void __iomem *cfg_base;
- void __iomem *phy_base;
};
struct meson_pcie_clk_res {
struct clk *clk;
- struct clk *mipi_gate;
struct clk *port_clk;
struct clk *general_clk;
};
struct meson_pcie_rc_reset {
- struct reset_control *phy;
struct reset_control *port;
struct reset_control *apb;
};
-struct meson_pcie_param {
- bool has_shared_phy;
-};
-
struct meson_pcie {
struct dw_pcie pci;
struct meson_pcie_mem_res mem_res;
@@ -108,7 +100,6 @@ struct meson_pcie {
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
struct phy *phy;
- const struct meson_pcie_param *param;
};
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
@@ -130,13 +121,6 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
- if (!mp->param->has_shared_phy) {
- mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
- if (IS_ERR(mrst->phy))
- return PTR_ERR(mrst->phy);
- reset_control_deassert(mrst->phy);
- }
-
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
if (IS_ERR(mrst->port))
return PTR_ERR(mrst->port);
@@ -162,22 +146,6 @@ static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
return devm_ioremap_resource(dev, res);
}
-static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
- struct meson_pcie *mp,
- const char *id)
-{
- struct device *dev = mp->pci.dev;
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
- if (!res) {
- dev_err(dev, "No REG resource %s\n", id);
- return ERR_PTR(-ENXIO);
- }
-
- return devm_ioremap(dev, res->start, resource_size(res));
-}
-
static int meson_pcie_get_mems(struct platform_device *pdev,
struct meson_pcie *mp)
{
@@ -189,14 +157,6 @@ static int meson_pcie_get_mems(struct platform_device *pdev,
if (IS_ERR(mp->mem_res.cfg_base))
return PTR_ERR(mp->mem_res.cfg_base);
- /* Meson AXG SoC has two PCI controllers use same phy register */
- if (!mp->param->has_shared_phy) {
- mp->mem_res.phy_base =
- meson_pcie_get_mem_shared(pdev, mp, "phy");
- if (IS_ERR(mp->mem_res.phy_base))
- return PTR_ERR(mp->mem_res.phy_base);
- }
-
return 0;
}
@@ -204,37 +164,33 @@ static int meson_pcie_power_on(struct meson_pcie *mp)
{
int ret = 0;
- if (mp->param->has_shared_phy) {
- ret = phy_init(mp->phy);
- if (ret)
- return ret;
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
- ret = phy_power_on(mp->phy);
- if (ret) {
- phy_exit(mp->phy);
- return ret;
- }
- } else
- writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
return 0;
}
+static void meson_pcie_power_off(struct meson_pcie *mp)
+{
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
+}
+
static int meson_pcie_reset(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
int ret = 0;
- if (mp->param->has_shared_phy) {
- ret = phy_reset(mp->phy);
- if (ret)
- return ret;
- } else {
- reset_control_assert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- reset_control_deassert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- }
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
reset_control_assert(mrst->port);
reset_control_assert(mrst->apb);
@@ -286,12 +242,6 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
- if (!mp->param->has_shared_phy) {
- res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
- if (IS_ERR(res->mipi_gate))
- return PTR_ERR(res->mipi_gate);
- }
-
res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
@@ -562,7 +512,6 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int meson_pcie_probe(struct platform_device *pdev)
{
- const struct meson_pcie_param *match_data;
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct meson_pcie *mp;
@@ -576,17 +525,10 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- match_data = of_device_get_match_data(dev);
- if (!match_data) {
- dev_err(dev, "failed to get match data\n");
- return -ENODEV;
- }
- mp->param = match_data;
-
- if (mp->param->has_shared_phy) {
- mp->phy = devm_phy_get(dev, "pcie");
- if (IS_ERR(mp->phy))
- return PTR_ERR(mp->phy);
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy)) {
+ dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy));
+ return PTR_ERR(mp->phy);
}
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -636,30 +578,16 @@ static int meson_pcie_probe(struct platform_device *pdev)
return 0;
err_phy:
- if (mp->param->has_shared_phy) {
- phy_power_off(mp->phy);
- phy_exit(mp->phy);
- }
-
+ meson_pcie_power_off(mp);
return ret;
}
-static struct meson_pcie_param meson_pcie_axg_param = {
- .has_shared_phy = false,
-};
-
-static struct meson_pcie_param meson_pcie_g12a_param = {
- .has_shared_phy = true,
-};
-
static const struct of_device_id meson_pcie_of_match[] = {
{
.compatible = "amlogic,axg-pcie",
- .data = &meson_pcie_axg_param,
},
{
.compatible = "amlogic,g12a-pcie",
- .data = &meson_pcie_g12a_param,
},
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index cfeccd7e9fff..1cdcbd102ce8 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -18,6 +18,15 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
pci_epc_linkup(epc);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_init_notify(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
int flags)
@@ -125,6 +134,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
clear_bit(atu_index, ep->ib_window_map);
+ ep->epf_bar[bar] = NULL;
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
@@ -158,6 +168,7 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
dw_pcie_writel_dbi(pci, reg + 4, 0);
}
+ ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -269,7 +280,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
return val;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
+ enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -278,12 +290,22 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
if (!ep->msix_cap)
return -EINVAL;
+ dw_pcie_dbi_ro_wr_en(pci);
+
reg = ep->msix_cap + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= interrupts;
- dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, reg, val);
+
+ reg = ep->msix_cap + PCI_MSIX_TABLE;
+ val = offset | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
+ reg = ep->msix_cap + PCI_MSIX_PBA;
+ val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_writel_dbi(pci, reg, val);
+
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -409,55 +431,41 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epf_msix_tbl *msix_tbl;
struct pci_epc *epc = ep->epc;
- u16 tbl_offset, bir;
- u32 bar_addr_upper, bar_addr_lower;
- u32 msg_addr_upper, msg_addr_lower;
+ struct pci_epf_bar *epf_bar;
u32 reg, msg_data, vec_ctrl;
- u64 tbl_addr, msg_addr, reg_u64;
- void __iomem *msix_tbl;
+ unsigned int aligned_offset;
+ u32 tbl_offset;
+ u64 msg_addr;
int ret;
+ u8 bir;
reg = ep->msix_cap + PCI_MSIX_TABLE;
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- reg = PCI_BASE_ADDRESS_0 + (4 * bir);
- bar_addr_upper = 0;
- bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
- reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
- if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
- bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
-
- tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
- tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
- tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
-
- msix_tbl = ioremap(ep->phys_base + tbl_addr,
- PCI_MSIX_ENTRY_SIZE);
- if (!msix_tbl)
- return -EINVAL;
-
- msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
- msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
- msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
- vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ epf_bar = ep->epf_bar[bir];
+ msix_tbl = epf_bar->addr;
+ msix_tbl = (struct pci_epf_msix_tbl *)((char *)msix_tbl + tbl_offset);
- iounmap(msix_tbl);
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+ vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
return -EPERM;
}
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+ aligned_offset = msg_addr & (epc->mem->page_size - 1);
+ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem);
+ writel(msg_data, ep->msi_mem + aligned_offset);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
@@ -492,19 +500,54 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0;
}
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int offset;
+ unsigned int nbars;
+ u8 hdr_type;
+ u32 reg;
int i;
+
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+ dev_err(pci->dev,
+ "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+ hdr_type);
+ return -EIO;
+ }
+
+ ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+ ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
+
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
+ dw_pcie_setup(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
int ret;
- u32 reg;
void *addr;
- u8 hdr_type;
- unsigned int nbars;
- unsigned int offset;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ const struct pci_epc_features *epc_features;
if (!pci->dbi_base || !pci->dbi_base2) {
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
@@ -563,13 +606,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ep->ops->ep_init)
ep->ops->ep_init(ep);
- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
- if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
- dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
- hdr_type);
- return -EIO;
- }
-
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
@@ -587,23 +623,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
}
- ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
-
- ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
-
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
- dw_pcie_dbi_ro_wr_en(pci);
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
- dw_pcie_dbi_ro_wr_dis(pci);
+ if (ep->ops->get_features) {
+ epc_features = ep->ops->get_features(ep);
+ if (epc_features->core_init_notifier)
+ return 0;
}
- dw_pcie_setup(pci);
-
- return 0;
+ return dw_pcie_ep_init_complete(ep);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index a22ea5982817..d6e1f397e6b0 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -233,6 +233,7 @@ struct dw_pcie_ep {
phys_addr_t msi_mem_phys;
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
struct dw_pcie_ops {
@@ -411,6 +412,8 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
+void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -428,6 +431,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return 0;
}
+static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+{
+}
+
static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 5ea527a6bd9f..138e1a2d21cc 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1439,7 +1439,13 @@ static void qcom_fixup_class(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index cbe95f0ea0ca..ae30a2fd3716 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -53,6 +54,7 @@
#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
+#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
@@ -60,19 +62,26 @@
#define APPL_INTR_STATUS_L0 0xC
#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
+#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
+#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
#define APPL_INTR_EN_L1_0_0 0x1C
#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
+#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
#define APPL_INTR_STATUS_L1_0_0 0x20
#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
+#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
+#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
#define APPL_INTR_STATUS_L1_1 0x2C
#define APPL_INTR_STATUS_L1_2 0x30
#define APPL_INTR_STATUS_L1_3 0x34
#define APPL_INTR_STATUS_L1_6 0x3C
#define APPL_INTR_STATUS_L1_7 0x40
+#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
#define APPL_INTR_EN_L1_8_0 0x44
#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
@@ -103,8 +112,12 @@
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+#define APPL_MSI_CTRL_1 0xAC
+
#define APPL_MSI_CTRL_2 0xB0
+#define APPL_LEGACY_INTX 0xB8
+
#define APPL_LTR_MSG_1 0xC4
#define LTR_MSG_REQ BIT(15)
#define LTR_MST_NO_SNOOP_SHIFT 16
@@ -205,6 +218,13 @@
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define MSIX_ADDR_MATCH_LOW_OFF 0x940
+#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
+#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
+
+#define MSIX_ADDR_MATCH_HIGH_OFF 0x944
+#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
+
#define PORT_LOGIC_MSIX_DOORBELL 0x948
#define CAP_SPCIE_CAP_OFF 0x154
@@ -223,6 +243,13 @@
#define GEN3_CORE_CLK_FREQ 250000000
#define GEN4_CORE_CLK_FREQ 500000000
+#define LTR_MSG_TIMEOUT (100 * 1000)
+
+#define PERST_DEBOUNCE_TIME (5 * 1000)
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_ENABLED 1
+
static const unsigned int pcie_gen_freq[] = {
GEN1_CORE_CLK_FREQ,
GEN2_CORE_CLK_FREQ,
@@ -260,6 +287,8 @@ struct tegra_pcie_dw {
struct dw_pcie pci;
struct tegra_bpmp *bpmp;
+ enum dw_pcie_device_mode mode;
+
bool supports_clkreq;
bool enable_cdm_check;
bool link_state;
@@ -283,6 +312,16 @@ struct tegra_pcie_dw {
struct phy **phys;
struct dentry *debugfs;
+
+ /* Endpoint mode specific */
+ struct gpio_desc *pex_rst_gpiod;
+ struct gpio_desc *pex_refclk_sel_gpiod;
+ unsigned int pex_rst_irq;
+ int ep_state;
+};
+
+struct tegra_pcie_dw_of_data {
+ enum dw_pcie_device_mode mode;
};
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
@@ -339,8 +378,9 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
}
}
-static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
+static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
+ struct tegra_pcie_dw *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
u32 val, tmp;
@@ -411,11 +451,121 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
return IRQ_HANDLED;
}
-static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+}
+
+static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, speed;
+
+ speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ PCI_EXP_LNKSTA_CLS;
+ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
- return tegra_pcie_rp_irq_handler(pcie);
+ /* If EP doesn't advertise L1SS, just return */
+ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
+ return IRQ_HANDLED;
+
+ /* Check if BME is set to '1' */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ if (val & PCI_COMMAND_MASTER) {
+ ktime_t timeout;
+
+ /* 110us for both snoop and no-snoop */
+ val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
+ val |= (val << LTR_MST_NO_SNOOP_SHIFT);
+ appl_writel(pcie, val, APPL_LTR_MSG_1);
+
+ /* Send LTR upstream */
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+
+ timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
+ for (;;) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
+ break;
+ if (ktime_after(ktime_get(), timeout))
+ break;
+ usleep_range(1000, 1100);
+ }
+ if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
+ dev_err(pcie->dev, "Failed to send LTR message\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+ int spurious = 1;
+ u32 val, tmp;
+
+ val = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
+
+ if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ pex_ep_event_hot_rst_done(pcie);
+
+ if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ tmp = appl_readl(pcie, APPL_LINK_STATUS);
+ if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ dev_dbg(pcie->dev, "Link is up with Host\n");
+ dw_pcie_ep_linkup(ep);
+ }
+ }
+
+ spurious = 0;
+ }
+
+ if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
+
+ if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ return IRQ_WAKE_THREAD;
+
+ spurious = 0;
+ }
+
+ if (spurious) {
+ dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+ val);
+ appl_writel(pcie, val, APPL_INTR_STATUS_L0);
+ }
+
+ return IRQ_HANDLED;
}
static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
@@ -884,8 +1034,26 @@ static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
pp->num_vectors = MAX_MSI_IRQS;
}
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ enable_irq(pcie->pex_rst_irq);
+
+ return 0;
+}
+
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ disable_irq(pcie->pex_rst_irq);
+}
+
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
.link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
};
static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
@@ -986,6 +1154,40 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
pcie->enable_cdm_check =
of_property_read_bool(np, "snps,enable-cdm-check");
+ if (pcie->mode == DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* Endpoint mode specific DT entries */
+ pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie->pex_rst_gpiod)) {
+ int err = PTR_ERR(pcie->pex_rst_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get PERST GPIO: %d\n"),
+ err);
+ return err;
+ }
+
+ pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
+ "nvidia,refclk-select",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
+ int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
+ err);
+ pcie->pex_refclk_sel_gpiod = NULL;
+ }
+
return 0;
}
@@ -1017,6 +1219,34 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (enable) {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
+ req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
+ } else {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
+ req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
struct pcie_port *pp = &pcie->pci.pp;
@@ -1427,8 +1657,396 @@ fail_pm_get_sync:
return ret;
}
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_DISABLED)
+ return;
+
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (ret)
+ dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+
+ reset_control_assert(pcie->core_rst);
+
+ tegra_pcie_disable_phy(pcie);
+
+ reset_control_assert(pcie->core_apb_rst);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ pm_runtime_put_sync(pcie->dev);
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+ dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
+}
+
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = pcie->dev;
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ return;
+ }
+
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
+ goto fail_pll_init;
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk_enable;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
+ goto fail_core_apb_rst;
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Clear any stale interrupt statuses */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ /* configure this core for EP mode operation */
+ val = appl_readl(pcie, APPL_DM_TYPE);
+ val &= ~APPL_DM_TYPE_MASK;
+ val |= APPL_DM_TYPE_EP;
+ appl_writel(pcie, val, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_SYS_PRE_DET_STATE;
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= APPL_CFG_MISC_SLV_EP_MODE;
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ appl_writel(pcie, pcie->atu_dma_res->start &
+ APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
+ val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+ reset_control_deassert(pcie->core_rst);
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ /* Configure N_FTS & FTS */
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
+ val &= ~(N_FTS_MASK << N_FTS_SHIFT);
+ val |= N_FTS_VAL << N_FTS_SHIFT;
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
+ val &= ~FTS_MASK;
+ val |= FTS_VAL;
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+
+ /* Configure Max Speed from DT */
+ if (pcie->max_speed && pcie->max_speed != -EINVAL) {
+ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_SLS;
+ val |= pcie->max_speed;
+ dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
+ val);
+ }
+
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+ val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+ val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto fail_init_complete;
+ }
+
+ dw_pcie_ep_init_notify(ep);
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ pcie->ep_state = EP_STATE_ENABLED;
+ dev_dbg(dev, "Initialization of endpoint is completed\n");
+
+ return;
+
+fail_init_complete:
+ reset_control_assert(pcie->core_rst);
+ tegra_pcie_disable_phy(pcie);
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk_enable:
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+
+ if (gpiod_get_value(pcie->pex_rst_gpiod))
+ pex_ep_event_pex_rst_assert(pcie);
+ else
+ pex_ep_event_pex_rst_deassert(pcie);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ /* Tegra194 supports only INTA */
+ if (irq > 1)
+ return -EINVAL;
+
+ appl_writel(pcie, 1, APPL_LEGACY_INTX);
+ usleep_range(1000, 2000);
+ appl_writel(pcie, 0, APPL_LEGACY_INTX);
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ if (unlikely(irq > 31))
+ return -EINVAL;
+
+ appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+
+ writel(irq, ep->msi_mem);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSI:
+ return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
+
+ case PCI_EPC_IRQ_MSIX:
+ return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
+
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features tegra_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = false,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0,
+ .bar_fixed_size[0] = SZ_1M,
+};
+
+static const struct pci_epc_features*
+tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &tegra_pcie_epc_features;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .raise_irq = tegra_pcie_ep_raise_irq,
+ .get_features = tegra_pcie_ep_get_features,
+};
+
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pcie->dev;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ char *name;
+ int ret;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+ ep->page_size = SZ_64K;
+
+ ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = gpiod_to_irq(pcie->pex_rst_gpiod);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
+ return ret;
+ }
+ pcie->pex_rst_irq = (unsigned int)ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PERST IRQ string\n");
+ return -ENOMEM;
+ }
+
+ irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+
+ ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
+ tegra_pcie_ep_pex_rst_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, (void *)pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PCIe EP work thread string\n");
+ return -ENOMEM;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int tegra_pcie_dw_probe(struct platform_device *pdev)
{
+ const struct tegra_pcie_dw_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
struct tegra_pcie_dw *pcie;
@@ -1440,6 +2058,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
int ret;
u32 i;
+ data = of_device_get_match_data(dev);
+
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -1449,19 +2069,37 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pci->ops = &tegra_dw_pcie_ops;
pp = &pci->pp;
pcie->dev = &pdev->dev;
+ pcie->mode = (enum dw_pcie_device_mode)data->mode;
ret = tegra_pcie_dw_parse_dt(pcie);
if (ret < 0) {
- dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to parse device tree: %d\n"),
+ ret);
return ret;
}
ret = tegra_pcie_get_slot_regulators(pcie);
if (ret < 0) {
- dev_err(dev, "Failed to get slot regulators: %d\n", ret);
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to get slot regulators: %d\n"),
+ ret);
return ret;
}
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
+
pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
if (IS_ERR(pcie->pex_ctl_supply)) {
ret = PTR_ERR(pcie->pex_ctl_supply);
@@ -1557,24 +2195,49 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
- IRQF_SHARED, "tegra-pcie-intr", pcie);
- if (ret) {
- dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
- return ret;
- }
-
pcie->bpmp = tegra_bpmp_get(dev);
if (IS_ERR(pcie->bpmp))
return PTR_ERR(pcie->bpmp);
platform_set_drvdata(pdev, pcie);
- ret = tegra_pcie_config_rp(pcie);
- if (ret && ret != -ENOMEDIUM)
- goto fail;
- else
- return 0;
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
+ IRQF_SHARED, "tegra-pcie-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_rp(pcie);
+ if (ret && ret != -ENOMEDIUM)
+ goto fail;
+ else
+ return 0;
+ break;
+
+ case DW_PCIE_EP_TYPE:
+ ret = devm_request_threaded_irq(dev, pp->irq,
+ tegra_pcie_ep_hard_irq,
+ tegra_pcie_ep_irq_thread,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "tegra-pcie-ep-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_ep(pcie, pdev);
+ if (ret < 0)
+ goto fail;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
+ }
fail:
tegra_bpmp_put(pcie->bpmp);
@@ -1593,6 +2256,8 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev)
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
tegra_bpmp_put(pcie->bpmp);
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
return 0;
}
@@ -1697,9 +2362,22 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
__deinit_controller(pcie);
}
+static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id tegra_pcie_dw_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
+ .data = &tegra_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra194-pcie-ep",
+ .data = &tegra_pcie_dw_ep_of_data,
},
{},
};
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
new file mode 100644
index 000000000000..a62d247018cf
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Mobiveil PCIe Core Support"
+ depends on PCI
+
+config PCIE_MOBIVEIL
+ bool
+
+config PCIE_MOBIVEIL_HOST
+ bool
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_MOBIVEIL
+
+config PCIE_MOBIVEIL_PLAT
+ bool "Mobiveil AXI PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want to enable support for the Mobiveil AXI PCIe
+ Soft IP. It has up to 8 outbound and inbound windows
+ for address translation and it is a PCIe Gen4 IP.
+
+config PCIE_LAYERSCAPE_GEN4
+ bool "Freescale Layerscape PCIe Gen4 controller"
+ depends on PCI
+ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_MOBIVEIL_HOST
+ help
+ Say Y here if you want PCIe Gen4 controller support on
+ Layerscape SoCs.
+endmenu
diff --git a/drivers/pci/controller/mobiveil/Makefile b/drivers/pci/controller/mobiveil/Makefile
new file mode 100644
index 000000000000..99d879de32d6
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
+obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
+obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
+obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie-layerscape-gen4.o
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
new file mode 100644
index 000000000000..a6d2190a6753
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe Gen4 host controller driver for NXP Layerscape SoCs
+ *
+ * Copyright 2019-2020 NXP
+ *
+ * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-mobiveil.h"
+
+/* LUT and PF control registers */
+#define PCIE_LUT_OFF 0x80000
+#define PCIE_PF_OFF 0xc0000
+#define PCIE_PF_INT_STAT 0x18
+#define PF_INT_STAT_PABRST BIT(31)
+
+#define PCIE_PF_DBG 0x7fc
+#define PF_DBG_LTSSM_MASK 0x3f
+#define PF_DBG_LTSSM_L0 0x2d /* L0 state */
+#define PF_DBG_WE BIT(31)
+#define PF_DBG_PABR BIT(27)
+
+#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
+
+struct ls_pcie_g4 {
+ struct mobiveil_pcie pci;
+ struct delayed_work dwork;
+ int irq;
+};
+
+static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
+{
+ return ioread32(pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
+}
+
+static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
+ u32 off, u32 val)
+{
+ iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_LUT_OFF + off);
+}
+
+static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
+{
+ return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
+}
+
+static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
+ u32 off, u32 val)
+{
+ iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
+}
+
+static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
+{
+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
+ u32 state;
+
+ state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ state = state & PF_DBG_LTSSM_MASK;
+
+ if (state == PF_DBG_LTSSM_L0)
+ return 1;
+
+ return 0;
+}
+
+static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+
+ mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
+}
+
+static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ /* Clear the interrupt status */
+ mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT);
+
+ val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
+ PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
+ mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
+}
+
+static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
+{
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ struct device *dev = &mv_pci->pdev->dev;
+ u32 val, act_stat;
+ int to = 100;
+
+ /* Poll for pab_csb_reset to set and PAB activity to clear */
+ do {
+ usleep_range(10, 15);
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
+ act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT);
+ } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
+ if (to < 0) {
+ dev_err(dev, "Poll PABRST&PABACT timeout\n");
+ return -EIO;
+ }
+
+ /* clear PEX_RESET bit in PEX_PF0_DBG register */
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val |= PF_DBG_WE;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val |= PF_DBG_PABR;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val &= ~PF_DBG_WE;
+ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+
+ mobiveil_host_init(mv_pci, true);
+
+ to = 100;
+ while (!ls_pcie_g4_link_up(mv_pci) && to--)
+ usleep_range(200, 250);
+ if (to < 0) {
+ dev_err(dev, "PCIe link training timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
+{
+ struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u32 val;
+
+ val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PAB_INTP_RESET) {
+ ls_pcie_g4_disable_interrupt(pcie);
+ schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
+ }
+
+ mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
+
+ return IRQ_HANDLED;
+}
+
+static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
+{
+ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
+ struct platform_device *pdev = mv_pci->pdev;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "intr");
+ if (pcie->irq < 0) {
+ dev_err(dev, "Can't get 'intr' IRQ, errno = %d\n", pcie->irq);
+ return pcie->irq;
+ }
+ ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr,
+ IRQF_SHARED, pdev->name, pcie);
+ if (ret) {
+ dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ls_pcie_g4_reset(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
+ struct mobiveil_pcie *mv_pci = &pcie->pci;
+ u16 ctrl;
+
+ ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
+
+ if (!ls_pcie_g4_reinit_hw(pcie))
+ return;
+
+ ls_pcie_g4_enable_interrupt(pcie);
+}
+
+static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
+ .interrupt_init = ls_pcie_g4_interrupt_init,
+};
+
+static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
+ .link_up = ls_pcie_g4_link_up,
+};
+
+static int __init ls_pcie_g4_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct mobiveil_pcie *mv_pci;
+ struct ls_pcie_g4 *pcie;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!of_parse_phandle(np, "msi-parent", 0)) {
+ dev_err(dev, "Failed to find msi-parent\n");
+ return -EINVAL;
+ }
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ mv_pci = &pcie->pci;
+
+ mv_pci->pdev = pdev;
+ mv_pci->ops = &ls_pcie_g4_pab_ops;
+ mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
+ mv_pci->rp.bridge = bridge;
+
+ platform_set_drvdata(pdev, pcie);
+
+ INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
+
+ ret = mobiveil_pcie_host_probe(mv_pci);
+ if (ret) {
+ dev_err(dev, "Fail to probe\n");
+ return ret;
+ }
+
+ ls_pcie_g4_enable_interrupt(pcie);
+
+ return 0;
+}
+
+static const struct of_device_id ls_pcie_g4_of_match[] = {
+ { .compatible = "fsl,lx2160a-pcie", },
+ { },
+};
+
+static struct platform_driver ls_pcie_g4_driver = {
+ .driver = {
+ .name = "layerscape-pcie-gen4",
+ .of_match_table = ls_pcie_g4_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 3a696ca45bfa..a94be264240f 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -3,10 +3,12 @@
* PCIe host controller driver for Mobiveil PCIe Host controller
*
* Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019-2020 NXP
+ *
* Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
*/
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -23,274 +25,22 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "../pci.h"
-
-/* register offsets and bit positions */
-
-/*
- * translation tables are grouped into windows, each window registers are
- * grouped into blocks of 4 or 16 registers each
- */
-#define PAB_REG_BLOCK_SIZE 16
-#define PAB_EXT_REG_BLOCK_SIZE 4
-
-#define PAB_REG_ADDR(offset, win) \
- (offset + (win * PAB_REG_BLOCK_SIZE))
-#define PAB_EXT_REG_ADDR(offset, win) \
- (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
-
-#define LTSSM_STATUS 0x0404
-#define LTSSM_STATUS_L0_MASK 0x3f
-#define LTSSM_STATUS_L0 0x2d
-
-#define PAB_CTRL 0x0808
-#define AMBA_PIO_ENABLE_SHIFT 0
-#define PEX_PIO_ENABLE_SHIFT 1
-#define PAGE_SEL_SHIFT 13
-#define PAGE_SEL_MASK 0x3f
-#define PAGE_LO_MASK 0x3ff
-#define PAGE_SEL_OFFSET_SHIFT 10
-
-#define PAB_AXI_PIO_CTRL 0x0840
-#define APIO_EN_MASK 0xf
-
-#define PAB_PEX_PIO_CTRL 0x08c0
-#define PIO_ENABLE_SHIFT 0
-
-#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
-#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
-#define PAB_INTP_INTX_MASK 0x01e0
-#define PAB_INTP_MSI_MASK 0x8
-
-#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
-#define WIN_ENABLE_SHIFT 0
-#define WIN_TYPE_SHIFT 1
-#define WIN_TYPE_MASK 0x3
-#define WIN_SIZE_MASK 0xfffffc00
-
-#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
-
-#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
-#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
-#define AXI_WINDOW_ALIGN_MASK 3
-
-#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
-#define PAB_BUS_SHIFT 24
-#define PAB_DEVICE_SHIFT 19
-#define PAB_FUNCTION_SHIFT 16
-
-#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
-#define PAB_INTP_AXI_PIO_CLASS 0x474
-
-#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
-#define AMAP_CTRL_EN_SHIFT 0
-#define AMAP_CTRL_TYPE_SHIFT 1
-#define AMAP_CTRL_TYPE_MASK 3
-
-#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
-#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
-#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
-#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
-#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
-
-/* starting offset of INTX bits in status register */
-#define PAB_INTX_START 5
-
-/* supported number of MSI interrupts */
-#define PCI_NUM_MSI 16
-
-/* MSI registers */
-#define MSI_BASE_LO_OFFSET 0x04
-#define MSI_BASE_HI_OFFSET 0x08
-#define MSI_SIZE_OFFSET 0x0c
-#define MSI_ENABLE_OFFSET 0x14
-#define MSI_STATUS_OFFSET 0x18
-#define MSI_DATA_OFFSET 0x20
-#define MSI_ADDR_L_OFFSET 0x24
-#define MSI_ADDR_H_OFFSET 0x28
-
-/* outbound and inbound window definitions */
-#define WIN_NUM_0 0
-#define WIN_NUM_1 1
-#define CFG_WINDOW_TYPE 0
-#define IO_WINDOW_TYPE 1
-#define MEM_WINDOW_TYPE 2
-#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
-#define MAX_PIO_WINDOWS 8
-
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_MIN 90000
-#define LINK_WAIT_MAX 100000
-
-#define PAGED_ADDR_BNDRY 0xc00
-#define OFFSET_TO_PAGE_ADDR(off) \
- ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
-#define OFFSET_TO_PAGE_IDX(off) \
- ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
-
-struct mobiveil_msi { /* MSI information */
- struct mutex lock; /* protect bitmap variable */
- struct irq_domain *msi_domain;
- struct irq_domain *dev_domain;
- phys_addr_t msi_pages_phys;
- int num_of_vectors;
- DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
-};
-
-struct mobiveil_pcie {
- struct platform_device *pdev;
- void __iomem *config_axi_slave_base; /* endpoint config base */
- void __iomem *csr_axi_slave_base; /* root port config base */
- void __iomem *apb_csr_base; /* MSI register base */
- phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
- struct irq_domain *intx_domain;
- raw_spinlock_t intx_mask_lock;
- int irq;
- int apio_wins;
- int ppio_wins;
- int ob_wins_configured; /* configured outbound windows */
- int ib_wins_configured; /* configured inbound windows */
- struct resource *ob_io_res;
- char root_bus_nr;
- struct mobiveil_msi msi;
-};
-
-/*
- * mobiveil_pcie_sel_page - routine to access paged register
- *
- * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
- * for this scheme to work extracted higher 6 bits of the offset will be
- * written to pg_sel field of PAB_CTRL register and rest of the lower 10
- * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
- */
-static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
-{
- u32 val;
-
- val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
- val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
- val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
-
- writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
-}
-
-static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
-{
- if (off < PAGED_ADDR_BNDRY) {
- /* For directly accessed registers, clear the pg_sel field */
- mobiveil_pcie_sel_page(pcie, 0);
- return pcie->csr_axi_slave_base + off;
- }
-
- mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
- return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
-}
-
-static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
-{
- if ((uintptr_t)addr & (size - 1)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- switch (size) {
- case 4:
- *val = readl(addr);
- break;
- case 2:
- *val = readw(addr);
- break;
- case 1:
- *val = readb(addr);
- break;
- default:
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
-{
- if ((uintptr_t)addr & (size - 1))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- switch (size) {
- case 4:
- writel(val, addr);
- break;
- case 2:
- writew(val, addr);
- break;
- case 1:
- writeb(val, addr);
- break;
- default:
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
-{
- void *addr;
- u32 val;
- int ret;
-
- addr = mobiveil_pcie_comp_addr(pcie, off);
-
- ret = mobiveil_pcie_read(addr, size, &val);
- if (ret)
- dev_err(&pcie->pdev->dev, "read CSR address failed\n");
-
- return val;
-}
-
-static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
- size_t size)
-{
- void *addr;
- int ret;
-
- addr = mobiveil_pcie_comp_addr(pcie, off);
-
- ret = mobiveil_pcie_write(addr, size, val);
- if (ret)
- dev_err(&pcie->pdev->dev, "write CSR address failed\n");
-}
-
-static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
-{
- return mobiveil_csr_read(pcie, off, 0x4);
-}
-
-static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
-{
- mobiveil_csr_write(pcie, val, off, 0x4);
-}
-
-static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
-{
- return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
- LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
-}
+#include "pcie-mobiveil.h"
static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
{
struct mobiveil_pcie *pcie = bus->sysdata;
+ struct mobiveil_root_port *rp = &pcie->rp;
/* Only one device down on each root port */
- if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+ if ((bus->number == rp->root_bus_nr) && (devfn > 0))
return false;
/*
* Do not read more than one device on the bus directly
* attached to RC
*/
- if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
+ if ((bus->primary == rp->root_bus_nr) && (PCI_SLOT(devfn) > 0))
return false;
return true;
@@ -304,13 +54,14 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct mobiveil_pcie *pcie = bus->sysdata;
+ struct mobiveil_root_port *rp = &pcie->rp;
u32 value;
if (!mobiveil_pcie_valid_device(bus, devfn))
return NULL;
/* RC config access */
- if (bus->number == pcie->root_bus_nr)
+ if (bus->number == rp->root_bus_nr)
return pcie->csr_axi_slave_base + where;
/*
@@ -325,7 +76,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
- return pcie->config_axi_slave_base + where;
+ return rp->config_axi_slave_base + where;
}
static struct pci_ops mobiveil_pcie_ops = {
@@ -339,7 +90,8 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
struct device *dev = &pcie->pdev->dev;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct mobiveil_msi *msi = &rp->msi;
u32 msi_data, msi_addr_lo, msi_addr_hi;
u32 intr_status, msi_status;
unsigned long shifted_status;
@@ -365,7 +117,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
shifted_status >>= PAB_INTX_START;
do {
for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
- virq = irq_find_mapping(pcie->intx_domain,
+ virq = irq_find_mapping(rp->intx_domain,
bit + 1);
if (virq)
generic_handle_irq(virq);
@@ -424,15 +176,16 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
struct device *dev = &pcie->pdev->dev;
struct platform_device *pdev = pcie->pdev;
struct device_node *node = dev->of_node;
+ struct mobiveil_root_port *rp = &pcie->rp;
struct resource *res;
/* map config resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"config_axi_slave");
- pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie->config_axi_slave_base))
- return PTR_ERR(pcie->config_axi_slave_base);
- pcie->ob_io_res = res;
+ rp->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(rp->config_axi_slave_base))
+ return PTR_ERR(rp->config_axi_slave_base);
+ rp->ob_io_res = res;
/* map csr resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -442,12 +195,6 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
return PTR_ERR(pcie->csr_axi_slave_base);
pcie->pcie_reg_base = res->start;
- /* map MSI config resource */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
- pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie->apb_csr_base))
- return PTR_ERR(pcie->apb_csr_base);
-
/* read the number of windows requested */
if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
pcie->apio_wins = MAX_PIO_WINDOWS;
@@ -455,118 +202,15 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
pcie->ppio_wins = MAX_PIO_WINDOWS;
- pcie->irq = platform_get_irq(pdev, 0);
- if (pcie->irq <= 0) {
- dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
- return -ENODEV;
- }
-
return 0;
}
-static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
- u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
-{
- u32 value;
- u64 size64 = ~(size - 1);
-
- if (win_num >= pcie->ppio_wins) {
- dev_err(&pcie->pdev->dev,
- "ERROR: max inbound windows reached !\n");
- return;
- }
-
- value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
- value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
- value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
- (lower_32_bits(size64) & WIN_SIZE_MASK);
- mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
-
- mobiveil_csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
-
- mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
- PAB_PEX_AMAP_AXI_WIN(win_num));
- mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
-
- mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_L(win_num));
- mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_H(win_num));
-
- pcie->ib_wins_configured++;
-}
-
-/*
- * routine to program the outbound windows
- */
-static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
- u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
-{
- u32 value;
- u64 size64 = ~(size - 1);
-
- if (win_num >= pcie->apio_wins) {
- dev_err(&pcie->pdev->dev,
- "ERROR: max outbound windows reached !\n");
- return;
- }
-
- /*
- * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
- * to 4 KB in PAB_AXI_AMAP_CTRL register
- */
- value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
- value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
- value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
- (lower_32_bits(size64) & WIN_SIZE_MASK);
- mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
-
- mobiveil_csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_AXI_AMAP_SIZE(win_num));
-
- /*
- * program AXI window base with appropriate value in
- * PAB_AXI_AMAP_AXI_WIN0 register
- */
- mobiveil_csr_writel(pcie,
- lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
- mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
-
- mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
- mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
-
- pcie->ob_wins_configured++;
-}
-
-static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
-{
- int retries;
-
- /* check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (mobiveil_pcie_link_up(pcie))
- return 0;
-
- usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
- }
-
- dev_err(&pcie->pdev->dev, "link never came up\n");
-
- return -ETIMEDOUT;
-}
-
static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
{
phys_addr_t msg_addr = pcie->pcie_reg_base;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
- pcie->msi.num_of_vectors = PCI_NUM_MSI;
+ msi->num_of_vectors = PCI_NUM_MSI;
msi->msi_pages_phys = (phys_addr_t)msg_addr;
writel_relaxed(lower_32_bits(msg_addr),
@@ -577,17 +221,23 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
}
-static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct pci_host_bridge *bridge = rp->bridge;
u32 value, pab_ctrl, type;
struct resource_entry *win;
- /* setup bus numbers */
- value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
- value &= 0xff000000;
- value |= 0x00ff0100;
- mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ pcie->ib_wins_configured = 0;
+ pcie->ob_wins_configured = 0;
+
+ if (!reinit) {
+ /* setup bus numbers */
+ value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
+ value &= 0xff000000;
+ value |= 0x00ff0100;
+ mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ }
/*
* program Bus Master Enable Bit in Command Register in PAB Config
@@ -605,9 +255,6 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
- mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
-
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
@@ -629,8 +276,8 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
*/
/* config outbound translation window */
- program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
- CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
+ program_ob_windows(pcie, WIN_NUM_0, rp->ob_io_res->start, 0,
+ CFG_WINDOW_TYPE, resource_size(rp->ob_io_res));
/* memory inbound translation window */
program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
@@ -657,9 +304,6 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
value |= (PCI_CLASS_BRIDGE_PCI << 16);
mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
- /* setup MSI hardware registers */
- mobiveil_pcie_enable_msi(pcie);
-
return 0;
}
@@ -667,32 +311,36 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
{
struct irq_desc *desc = irq_to_desc(data->irq);
struct mobiveil_pcie *pcie;
+ struct mobiveil_root_port *rp;
unsigned long flags;
u32 mask, shifted_val;
pcie = irq_desc_get_chip_data(desc);
+ rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
- raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val &= ~mask;
mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
- raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
}
static void mobiveil_unmask_intx_irq(struct irq_data *data)
{
struct irq_desc *desc = irq_to_desc(data->irq);
struct mobiveil_pcie *pcie;
+ struct mobiveil_root_port *rp;
unsigned long flags;
u32 shifted_val, mask;
pcie = irq_desc_get_chip_data(desc);
+ rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
- raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val |= mask;
mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
- raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
}
static struct irq_chip intx_irq_chip = {
@@ -760,7 +408,7 @@ static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
unsigned int nr_irqs, void *args)
{
struct mobiveil_pcie *pcie = domain->host_data;
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
unsigned long bit;
WARN_ON(nr_irqs != 1);
@@ -787,7 +435,7 @@ static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_lock(&msi->lock);
@@ -808,9 +456,9 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct mobiveil_msi *msi = &pcie->msi;
+ struct mobiveil_msi *msi = &pcie->rp.msi;
- mutex_init(&pcie->msi.lock);
+ mutex_init(&msi->lock);
msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
&msi_domain_ops, pcie);
if (!msi->dev_domain) {
@@ -834,18 +482,19 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct device_node *node = dev->of_node;
+ struct mobiveil_root_port *rp = &pcie->rp;
int ret;
/* setup INTx */
- pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
- if (!pcie->intx_domain) {
+ if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
}
- raw_spin_lock_init(&pcie->intx_mask_lock);
+ raw_spin_lock_init(&rp->intx_mask_lock);
/* setup MSI */
ret = mobiveil_allocate_msi_domains(pcie);
@@ -855,23 +504,74 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
return 0;
}
-static int mobiveil_pcie_probe(struct platform_device *pdev)
+static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie)
{
- struct mobiveil_pcie *pcie;
- struct pci_bus *bus;
- struct pci_bus *child;
- struct pci_host_bridge *bridge;
+ struct platform_device *pdev = pcie->pdev;
struct device *dev = &pdev->dev;
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct resource *res;
int ret;
- /* allocate the PCIe port */
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
- if (!bridge)
- return -ENOMEM;
+ /* map MSI config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->apb_csr_base))
+ return PTR_ERR(pcie->apb_csr_base);
- pcie = pci_host_bridge_priv(bridge);
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
- pcie->pdev = pdev;
+ rp->irq = platform_get_irq(pdev, 0);
+ if (rp->irq <= 0) {
+ dev_err(dev, "failed to map IRQ: %d\n", rp->irq);
+ return -ENODEV;
+ }
+
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return ret;
+ }
+
+ irq_set_chained_handler_and_data(rp->irq, mobiveil_pcie_isr, pcie);
+
+ /* Enable interrupts */
+ mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
+
+
+ return 0;
+}
+
+static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
+{
+ struct mobiveil_root_port *rp = &pcie->rp;
+
+ if (rp->ops->interrupt_init)
+ return rp->ops->interrupt_init(pcie);
+
+ return mobiveil_pcie_integrated_interrupt_init(pcie);
+}
+
+static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie)
+{
+ u32 header_type;
+
+ header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE);
+ header_type &= 0x7f;
+
+ return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
+{
+ struct mobiveil_root_port *rp = &pcie->rp;
+ struct pci_host_bridge *bridge = rp->bridge;
+ struct device *dev = &pcie->pdev->dev;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ int ret;
ret = mobiveil_pcie_parse_dt(pcie);
if (ret) {
@@ -879,6 +579,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return ret;
}
+ if (!mobiveil_pcie_is_bridge(pcie))
+ return -ENODEV;
+
/* parse the host bridge base addresses from the device tree file */
ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
&bridge->dma_ranges, NULL);
@@ -891,25 +594,22 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
* configure all inbound and outbound windows and prepare the RC for
* config access
*/
- ret = mobiveil_host_init(pcie);
+ ret = mobiveil_host_init(pcie, false);
if (ret) {
dev_err(dev, "Failed to initialize host\n");
return ret;
}
- /* initialize the IRQ domains */
- ret = mobiveil_pcie_init_irq_domain(pcie);
+ ret = mobiveil_pcie_interrupt_init(pcie);
if (ret) {
- dev_err(dev, "Failed creating IRQ Domain\n");
+ dev_err(dev, "Interrupt init failed\n");
return ret;
}
- irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
-
/* Initialize bridge */
bridge->dev.parent = dev;
bridge->sysdata = pcie;
- bridge->busnr = pcie->root_bus_nr;
+ bridge->busnr = rp->root_bus_nr;
bridge->ops = &mobiveil_pcie_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
@@ -934,25 +634,3 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return 0;
}
-
-static const struct of_device_id mobiveil_pcie_of_match[] = {
- {.compatible = "mbvl,gpex40-pcie",},
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
-
-static struct platform_driver mobiveil_pcie_driver = {
- .probe = mobiveil_pcie_probe,
- .driver = {
- .name = "mobiveil-pcie",
- .of_match_table = mobiveil_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-builtin_platform_driver(mobiveil_pcie_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
-MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
new file mode 100644
index 000000000000..f6fcd95c2bf5
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "pcie-mobiveil.h"
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+ struct mobiveil_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+
+ /* allocate the PCIe port */
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(bridge);
+ pcie->rp.bridge = bridge;
+
+ pcie->pdev = pdev;
+
+ return mobiveil_pcie_host_probe(pcie);
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+ {.compatible = "mbvl,gpex40-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+ .probe = mobiveil_pcie_probe,
+ .driver = {
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.c b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
new file mode 100644
index 000000000000..62ecbaeb0a60
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-mobiveil.h"
+
+/*
+ * mobiveil_pcie_sel_page - routine to access paged register
+ *
+ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
+ * for this scheme to work extracted higher 6 bits of the offset will be
+ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
+ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
+ */
+static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
+{
+ u32 val;
+
+ val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
+ val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
+ val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
+
+ writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
+}
+
+static void __iomem *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie,
+ u32 off)
+{
+ if (off < PAGED_ADDR_BNDRY) {
+ /* For directly accessed registers, clear the pg_sel field */
+ mobiveil_pcie_sel_page(pcie, 0);
+ return pcie->csr_axi_slave_base + off;
+ }
+
+ mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
+ return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
+}
+
+static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+ if ((uintptr_t)addr & (size - 1)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ switch (size) {
+ case 4:
+ *val = readl(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ case 1:
+ *val = readb(addr);
+ break;
+ default:
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
+{
+ if ((uintptr_t)addr & (size - 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ switch (size) {
+ case 4:
+ writel(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ case 1:
+ writeb(val, addr);
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+{
+ void __iomem *addr;
+ u32 val;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_read(addr, size, &val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "read CSR address failed\n");
+
+ return val;
+}
+
+void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size)
+{
+ void __iomem *addr;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_write(addr, size, val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "write CSR address failed\n");
+}
+
+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+ if (pcie->ops->link_up)
+ return pcie->ops->link_up(pcie);
+
+ return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+ u32 value;
+ u64 size64 = ~(size - 1);
+
+ if (win_num >= pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+ }
+
+ value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+ PAB_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ib_wins_configured++;
+}
+
+/*
+ * routine to program the outbound windows
+ */
+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+ u32 value;
+ u64 size64 = ~(size - 1);
+
+ if (win_num >= pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+ }
+
+ /*
+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+ * to 4 KB in PAB_AXI_AMAP_CTRL register
+ */
+ value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+ /*
+ * program AXI window base with appropriate value in
+ * PAB_AXI_AMAP_AXI_WIN0 register
+ */
+ mobiveil_csr_writel(pcie,
+ lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ob_wins_configured++;
+}
+
+int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (mobiveil_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ }
+
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+
+ return -ETIMEDOUT;
+}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
new file mode 100644
index 000000000000..767e36a8522d
--- /dev/null
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Copyright 2019 NXP
+ *
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ * Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+ */
+
+#ifndef _PCIE_MOBIVEIL_H
+#define _PCIE_MOBIVEIL_H
+
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include "../../pci.h"
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
+
+#define PAB_REG_ADDR(offset, win) \
+ (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) \
+ (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
+
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_OFFSET_SHIFT 10
+
+#define PAB_ACTIVITY_STAT 0x81c
+
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
+
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
+
+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_RESET BIT(1)
+#define PAB_INTP_MSI BIT(3)
+#define PAB_INTP_INTA BIT(5)
+#define PAB_INTP_INTB BIT(6)
+#define PAB_INTP_INTC BIT(7)
+#define PAB_INTP_INTD BIT(8)
+#define PAB_INTP_PCIE_UE BIT(9)
+#define PAB_INTP_IE_PMREDI BIT(29)
+#define PAB_INTP_IE_EC BIT(30)
+#define PAB_INTP_MSI_MASK PAB_INTP_MSI
+#define PAB_INTP_INTX_MASK (PAB_INTP_INTA | PAB_INTP_INTB |\
+ PAB_INTP_INTC | PAB_INTP_INTD)
+
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+#define WIN_TYPE_MASK 0x3
+#define WIN_SIZE_MASK 0xfffffc00
+
+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
+#define AXI_WINDOW_ALIGN_MASK 3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS 0x474
+
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+#define AMAP_CTRL_TYPE_MASK 3
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START 5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI 16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+#define PAGED_ADDR_BNDRY 0xc00
+#define OFFSET_TO_PAGE_ADDR(off) \
+ ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
+#define OFFSET_TO_PAGE_IDX(off) \
+ ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
+
+struct mobiveil_msi { /* MSI information */
+ struct mutex lock; /* protect bitmap variable */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ phys_addr_t msi_pages_phys;
+ int num_of_vectors;
+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie;
+
+struct mobiveil_rp_ops {
+ int (*interrupt_init)(struct mobiveil_pcie *pcie);
+};
+
+struct mobiveil_root_port {
+ char root_bus_nr;
+ void __iomem *config_axi_slave_base; /* endpoint config base */
+ struct resource *ob_io_res;
+ struct mobiveil_rp_ops *ops;
+ int irq;
+ raw_spinlock_t intx_mask_lock;
+ struct irq_domain *intx_domain;
+ struct mobiveil_msi msi;
+ struct pci_host_bridge *bridge;
+};
+
+struct mobiveil_pab_ops {
+ int (*link_up)(struct mobiveil_pcie *pcie);
+};
+
+struct mobiveil_pcie {
+ struct platform_device *pdev;
+ void __iomem *csr_axi_slave_base; /* root port config base */
+ void __iomem *apb_csr_base; /* MSI register base */
+ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
+ int apio_wins;
+ int ppio_wins;
+ int ob_wins_configured; /* configured outbound windows */
+ int ib_wins_configured; /* configured inbound windows */
+ const struct mobiveil_pab_ops *ops;
+ struct mobiveil_root_port rp;
+};
+
+int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
+int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit);
+bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
+int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
+void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size);
+void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
+ u64 pci_addr, u32 type, u64 size);
+u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
+void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size);
+
+static inline u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x4);
+}
+
+static inline u16 mobiveil_csr_readw(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x2);
+}
+
+static inline u8 mobiveil_csr_readb(struct mobiveil_pcie *pcie, u32 off)
+{
+ return mobiveil_csr_read(pcie, off, 0x1);
+}
+
+
+static inline void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x4);
+}
+
+static inline void mobiveil_csr_writew(struct mobiveil_pcie *pcie, u16 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x2);
+}
+
+static inline void mobiveil_csr_writeb(struct mobiveil_pcie *pcie, u8 val,
+ u32 off)
+{
+ mobiveil_csr_write(pcie, val, off, 0x1);
+}
+
+#endif /* _PCIE_MOBIVEIL_H */
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 9977abff92fc..e15022ff63e3 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -63,6 +63,7 @@
enum pci_protocol_version_t {
PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
+ PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
};
#define CPU_AFFINITY_ALL -1ULL
@@ -72,6 +73,7 @@ enum pci_protocol_version_t {
* first.
*/
static enum pci_protocol_version_t pci_protocol_versions[] = {
+ PCI_PROTOCOL_VERSION_1_3,
PCI_PROTOCOL_VERSION_1_2,
PCI_PROTOCOL_VERSION_1_1,
};
@@ -119,6 +121,7 @@ enum pci_message_type {
PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
+ PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
PCI_MESSAGE_MAXIMUM
};
@@ -164,6 +167,26 @@ struct pci_function_description {
u32 ser; /* serial number */
} __packed;
+enum pci_device_description_flags {
+ HV_PCI_DEVICE_FLAG_NONE = 0x0,
+ HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
+};
+
+struct pci_function_description2 {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+ u32 flags;
+ u16 virtual_numa_node;
+ u16 reserved;
+} __packed;
+
/**
* struct hv_msi_desc
* @vector: IDT entry
@@ -260,7 +283,7 @@ struct pci_packet {
int resp_packet_size);
void *compl_ctxt;
- struct pci_message message[0];
+ struct pci_message message[];
};
/*
@@ -296,7 +319,13 @@ struct pci_bus_d0_entry {
struct pci_bus_relations {
struct pci_incoming_message incoming;
u32 device_count;
- struct pci_function_description func[0];
+ struct pci_function_description func[];
+} __packed;
+
+struct pci_bus_relations2 {
+ struct pci_incoming_message incoming;
+ u32 device_count;
+ struct pci_function_description2 func[];
} __packed;
struct pci_q_res_req_response {
@@ -407,42 +436,6 @@ struct pci_eject_response {
static int pci_ring_size = (4 * PAGE_SIZE);
/*
- * Definitions or interrupt steering hypercall.
- */
-#define HV_PARTITION_ID_SELF ((u64)-1)
-#define HVCALL_RETARGET_INTERRUPT 0x7e
-
-struct hv_interrupt_entry {
- u32 source; /* 1 for MSI(-X) */
- u32 reserved1;
- u32 address;
- u32 data;
-};
-
-/*
- * flags for hv_device_interrupt_target.flags
- */
-#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
-#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
-
-struct hv_device_interrupt_target {
- u32 vector;
- u32 flags;
- union {
- u64 vp_mask;
- struct hv_vpset vp_set;
- };
-};
-
-struct retarget_msi_interrupt {
- u64 partition_id; /* use "self" */
- u64 device_id;
- struct hv_interrupt_entry int_entry;
- u64 reserved2;
- struct hv_device_interrupt_target int_target;
-} __packed __aligned(8);
-
-/*
* Driver specific state.
*/
@@ -488,7 +481,7 @@ struct hv_pcibus_device {
struct workqueue_struct *wq;
/* hypercall arg, must not cross page boundary */
- struct retarget_msi_interrupt retarget_msi_interrupt_params;
+ struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
/*
* Don't put anything here: retarget_msi_interrupt_params must be last
@@ -505,10 +498,24 @@ struct hv_dr_work {
struct hv_pcibus_device *bus;
};
+struct hv_pcidev_description {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+ u32 flags;
+ u16 virtual_numa_node;
+};
+
struct hv_dr_state {
struct list_head list_entry;
u32 device_count;
- struct pci_function_description func[0];
+ struct hv_pcidev_description func[];
};
enum hv_pcichild_state {
@@ -525,7 +532,7 @@ struct hv_pci_dev {
refcount_t refs;
enum hv_pcichild_state state;
struct pci_slot *pci_slot;
- struct pci_function_description desc;
+ struct hv_pcidev_description desc;
bool reported_missing;
struct hv_pcibus_device *hbus;
struct work_struct wrk;
@@ -1184,7 +1191,7 @@ static void hv_irq_unmask(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct irq_cfg *cfg = irqd_cfg(data);
- struct retarget_msi_interrupt *params;
+ struct hv_retarget_device_interrupt *params;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
cpumask_var_t tmp;
@@ -1206,8 +1213,7 @@ static void hv_irq_unmask(struct irq_data *data)
memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF;
params->int_entry.source = 1; /* MSI(-X) */
- params->int_entry.address = msi_desc->msg.address_lo;
- params->int_entry.data = msi_desc->msg.data;
+ hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
@@ -1401,6 +1407,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
break;
case PCI_PROTOCOL_VERSION_1_2:
+ case PCI_PROTOCOL_VERSION_1_3:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
@@ -1799,6 +1806,27 @@ static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
}
}
+/*
+ * Set NUMA node for the devices on the bus
+ */
+static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus = hbus->pci_bus;
+ struct hv_pci_dev *hv_dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
+ if (!hv_dev)
+ continue;
+
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
+ set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
+
+ put_pcichild(hv_dev);
+ }
+}
+
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@@ -1821,6 +1849,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
+ hv_pci_assign_numa_node(hbus);
pci_bus_assign_resources(hbus->pci_bus);
hv_pci_assign_slots(hbus);
pci_bus_add_devices(hbus->pci_bus);
@@ -1877,7 +1906,7 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
* Return: Pointer to the new tracking struct
*/
static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
- struct pci_function_description *desc)
+ struct hv_pcidev_description *desc)
{
struct hv_pci_dev *hpdev;
struct pci_child_message *res_req;
@@ -1988,7 +2017,7 @@ static void pci_devices_present_work(struct work_struct *work)
{
u32 child_no;
bool found;
- struct pci_function_description *new_desc;
+ struct hv_pcidev_description *new_desc;
struct hv_pci_dev *hpdev;
struct hv_pcibus_device *hbus;
struct list_head removed;
@@ -2089,6 +2118,7 @@ static void pci_devices_present_work(struct work_struct *work)
*/
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
+ hv_pci_assign_numa_node(hbus);
hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
break;
@@ -2107,17 +2137,15 @@ static void pci_devices_present_work(struct work_struct *work)
}
/**
- * hv_pci_devices_present() - Handles list of new children
+ * hv_pci_start_relations_work() - Queue work to start device discovery
* @hbus: Root PCI bus, as understood by this driver
- * @relations: Packet from host listing children
+ * @dr: The list of children returned from host
*
- * This function is invoked whenever a new list of devices for
- * this bus appears.
+ * Return: 0 on success, -errno on failure
*/
-static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
- struct pci_bus_relations *relations)
+static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
+ struct hv_dr_state *dr)
{
- struct hv_dr_state *dr;
struct hv_dr_work *dr_wrk;
unsigned long flags;
bool pending_dr;
@@ -2125,29 +2153,15 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
if (hbus->state == hv_pcibus_removing) {
dev_info(&hbus->hdev->device,
"PCI VMBus BUS_RELATIONS: ignored\n");
- return;
+ return -ENOENT;
}
dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
if (!dr_wrk)
- return;
-
- dr = kzalloc(offsetof(struct hv_dr_state, func) +
- (sizeof(struct pci_function_description) *
- (relations->device_count)), GFP_NOWAIT);
- if (!dr) {
- kfree(dr_wrk);
- return;
- }
+ return -ENOMEM;
INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
dr_wrk->bus = hbus;
- dr->device_count = relations->device_count;
- if (dr->device_count != 0) {
- memcpy(dr->func, relations->func,
- sizeof(struct pci_function_description) *
- dr->device_count);
- }
spin_lock_irqsave(&hbus->device_list_lock, flags);
/*
@@ -2165,6 +2179,87 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
get_hvpcibus(hbus);
queue_work(hbus->wq, &dr_wrk->wrk);
}
+
+ return 0;
+}
+
+/**
+ * hv_pci_devices_present() - Handle list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * Process a new list of devices on the bus. The list of devices is
+ * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
+ * whenever a new list of devices for this bus appears.
+ */
+static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations *relations)
+{
+ struct hv_dr_state *dr;
+ int i;
+
+ dr = kzalloc(offsetof(struct hv_dr_state, func) +
+ (sizeof(struct hv_pcidev_description) *
+ (relations->device_count)), GFP_NOWAIT);
+
+ if (!dr)
+ return;
+
+ dr->device_count = relations->device_count;
+ for (i = 0; i < dr->device_count; i++) {
+ dr->func[i].v_id = relations->func[i].v_id;
+ dr->func[i].d_id = relations->func[i].d_id;
+ dr->func[i].rev = relations->func[i].rev;
+ dr->func[i].prog_intf = relations->func[i].prog_intf;
+ dr->func[i].subclass = relations->func[i].subclass;
+ dr->func[i].base_class = relations->func[i].base_class;
+ dr->func[i].subsystem_id = relations->func[i].subsystem_id;
+ dr->func[i].win_slot = relations->func[i].win_slot;
+ dr->func[i].ser = relations->func[i].ser;
+ }
+
+ if (hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
+}
+
+/**
+ * hv_pci_devices_present2() - Handle list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * This function is the v2 version of hv_pci_devices_present()
+ */
+static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations2 *relations)
+{
+ struct hv_dr_state *dr;
+ int i;
+
+ dr = kzalloc(offsetof(struct hv_dr_state, func) +
+ (sizeof(struct hv_pcidev_description) *
+ (relations->device_count)), GFP_NOWAIT);
+
+ if (!dr)
+ return;
+
+ dr->device_count = relations->device_count;
+ for (i = 0; i < dr->device_count; i++) {
+ dr->func[i].v_id = relations->func[i].v_id;
+ dr->func[i].d_id = relations->func[i].d_id;
+ dr->func[i].rev = relations->func[i].rev;
+ dr->func[i].prog_intf = relations->func[i].prog_intf;
+ dr->func[i].subclass = relations->func[i].subclass;
+ dr->func[i].base_class = relations->func[i].base_class;
+ dr->func[i].subsystem_id = relations->func[i].subsystem_id;
+ dr->func[i].win_slot = relations->func[i].win_slot;
+ dr->func[i].ser = relations->func[i].ser;
+ dr->func[i].flags = relations->func[i].flags;
+ dr->func[i].virtual_numa_node =
+ relations->func[i].virtual_numa_node;
+ }
+
+ if (hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
}
/**
@@ -2280,6 +2375,7 @@ static void hv_pci_onchannelcallback(void *context)
struct pci_response *response;
struct pci_incoming_message *new_message;
struct pci_bus_relations *bus_rel;
+ struct pci_bus_relations2 *bus_rel2;
struct pci_dev_inval_block *inval;
struct pci_dev_incoming *dev_message;
struct hv_pci_dev *hpdev;
@@ -2347,6 +2443,21 @@ static void hv_pci_onchannelcallback(void *context)
hv_pci_devices_present(hbus, bus_rel);
break;
+ case PCI_BUS_RELATIONS2:
+
+ bus_rel2 = (struct pci_bus_relations2 *)buffer;
+ if (bytes_recvd <
+ offsetof(struct pci_bus_relations2, func) +
+ (sizeof(struct pci_function_description2) *
+ (bus_rel2->device_count))) {
+ dev_err(&hbus->hdev->device,
+ "bus relations v2 too small\n");
+ break;
+ }
+
+ hv_pci_devices_present2(hbus, bus_rel2);
+ break;
+
case PCI_EJECT:
dev_message = (struct pci_dev_incoming *)buffer;
@@ -2922,7 +3033,7 @@ static int hv_pci_probe(struct hv_device *hdev,
* positive by using kmemleak_alloc() and kmemleak_free() to ask
* kmemleak to track and scan the hbus buffer.
*/
- hbus = (struct hv_pcibus_device *)kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
+ hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hbus)
return -ENOMEM;
hbus->state = hv_pcibus_init;
@@ -3058,7 +3169,7 @@ destroy_wq:
free_dom:
hv_put_dom_num(hbus->sysdata.domain);
free_bus:
- free_page((unsigned long)hbus);
+ kfree(hbus);
return ret;
}
@@ -3069,7 +3180,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
- struct pci_bus_relations relations;
+ struct hv_dr_state *dr;
struct hv_pci_compl comp_pkt;
int ret;
@@ -3082,8 +3193,9 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
if (!hibernating) {
/* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
+ dr = kzalloc(sizeof(*dr), GFP_KERNEL);
+ if (dr && hv_pci_start_relations_work(hbus, dr))
+ kfree(dr);
}
ret = hv_send_resources_released(hdev);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 0e03cef72840..3e64ba6a36a8 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -355,16 +355,6 @@ struct tegra_pcie {
int irq;
struct resource cs;
- struct resource io;
- struct resource pio;
- struct resource mem;
- struct resource prefetch;
- struct resource busn;
-
- struct {
- resource_size_t mem;
- resource_size_t io;
- } offset;
struct clk *pex_clk;
struct clk *afi_clk;
@@ -797,38 +787,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
-static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
-{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct list_head *windows = &host->windows;
- struct device *dev = pcie->dev;
- int err;
-
- pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
- pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
- pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
- pci_add_resource(windows, &pcie->busn);
-
- err = devm_request_pci_bus_resources(dev, windows);
- if (err < 0) {
- pci_free_resource_list(windows);
- return err;
- }
-
- pci_remap_iospace(&pcie->pio, pcie->io.start);
-
- return 0;
-}
-
-static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
-{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct list_head *windows = &host->windows;
-
- pci_unmap_iospace(&pcie->pio);
- pci_free_resource_list(windows);
-}
-
static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
struct tegra_pcie *pcie = pdev->bus->sysdata;
@@ -909,36 +867,49 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
*/
static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
{
- u32 fpci_bar, size, axi_address;
+ u32 size;
+ struct resource_entry *entry;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
/* Bar 0: type 1 extended configuration space */
size = resource_size(&pcie->cs);
afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
- /* Bar 1: downstream IO bar */
- fpci_bar = 0xfdfc0000;
- size = resource_size(&pcie->io);
- axi_address = pcie->io.start;
- afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
- afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
-
- /* Bar 2: prefetchable memory BAR */
- fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
- size = resource_size(&pcie->prefetch);
- axi_address = pcie->prefetch.start;
- afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
- afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
-
- /* Bar 3: non prefetchable memory BAR */
- fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
- size = resource_size(&pcie->mem);
- axi_address = pcie->mem.start;
- afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
- afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
- afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ u32 fpci_bar, axi_address;
+ struct resource *res = entry->res;
+
+ size = resource_size(res);
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ /* Bar 1: downstream IO bar */
+ fpci_bar = 0xfdfc0000;
+ axi_address = pci_pio_to_address(res->start);
+ afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
+ break;
+ case IORESOURCE_MEM:
+ fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
+ axi_address = res->start;
+
+ if (res->flags & IORESOURCE_PREFETCH) {
+ /* Bar 2: prefetchable memory BAR */
+ afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
+
+ } else {
+ /* Bar 3: non prefetchable memory BAR */
+ afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
+ }
+ break;
+ }
+ }
/* NULL out the remaining BARs as they are not used */
afi_writel(pcie, 0, AFI_AXI_BAR4_START);
@@ -2157,76 +2128,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node, *port;
const struct tegra_pcie_soc *soc = pcie->soc;
- struct of_pci_range_parser parser;
- struct of_pci_range range;
u32 lanes = 0, mask = 0;
unsigned int lane = 0;
- struct resource res;
int err;
- if (of_pci_range_parser_init(&parser, np)) {
- dev_err(dev, "missing \"ranges\" property\n");
- return -EINVAL;
- }
-
- for_each_of_pci_range(&parser, &range) {
- err = of_pci_range_to_resource(&range, np, &res);
- if (err < 0)
- return err;
-
- switch (res.flags & IORESOURCE_TYPE_BITS) {
- case IORESOURCE_IO:
- /* Track the bus -> CPU I/O mapping offset. */
- pcie->offset.io = res.start - range.pci_addr;
-
- memcpy(&pcie->pio, &res, sizeof(res));
- pcie->pio.name = np->full_name;
-
- /*
- * The Tegra PCIe host bridge uses this to program the
- * mapping of the I/O space to the physical address,
- * so we override the .start and .end fields here that
- * of_pci_range_to_resource() converted to I/O space.
- * We also set the IORESOURCE_MEM type to clarify that
- * the resource is in the physical memory space.
- */
- pcie->io.start = range.cpu_addr;
- pcie->io.end = range.cpu_addr + range.size - 1;
- pcie->io.flags = IORESOURCE_MEM;
- pcie->io.name = "I/O";
-
- memcpy(&res, &pcie->io, sizeof(res));
- break;
-
- case IORESOURCE_MEM:
- /*
- * Track the bus -> CPU memory mapping offset. This
- * assumes that the prefetchable and non-prefetchable
- * regions will be the last of type IORESOURCE_MEM in
- * the ranges property.
- * */
- pcie->offset.mem = res.start - range.pci_addr;
-
- if (res.flags & IORESOURCE_PREFETCH) {
- memcpy(&pcie->prefetch, &res, sizeof(res));
- pcie->prefetch.name = "prefetchable";
- } else {
- memcpy(&pcie->mem, &res, sizeof(res));
- pcie->mem.name = "non-prefetchable";
- }
- break;
- }
- }
-
- err = of_pci_parse_bus_range(np, &pcie->busn);
- if (err < 0) {
- dev_err(dev, "failed to parse ranges property: %d\n", err);
- pcie->busn.name = np->name;
- pcie->busn.start = 0;
- pcie->busn.end = 0xff;
- pcie->busn.flags = IORESOURCE_BUS;
- }
-
/* parse root ports */
for_each_child_of_node(np, port) {
struct tegra_pcie_port *rp;
@@ -2766,6 +2671,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
struct pci_host_bridge *host;
struct tegra_pcie *pcie;
struct pci_bus *child;
+ struct resource *bus;
int err;
host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
@@ -2780,6 +2686,12 @@ static int tegra_pcie_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&pcie->ports);
pcie->dev = dev;
+ err = pci_parse_request_of_pci_ranges(dev, &host->windows, NULL, &bus);
+ if (err) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return err;
+ }
+
err = tegra_pcie_parse_dt(pcie);
if (err < 0)
return err;
@@ -2803,11 +2715,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
goto teardown_msi;
}
- err = tegra_pcie_request_resources(pcie);
- if (err)
- goto pm_runtime_put;
-
- host->busnr = pcie->busn.start;
+ host->busnr = bus->start;
host->dev.parent = &pdev->dev;
host->ops = &tegra_pcie_ops;
host->map_irq = tegra_pcie_map_irq;
@@ -2816,7 +2724,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = pci_scan_root_bus_bridge(host);
if (err < 0) {
dev_err(dev, "failed to register host: %d\n", err);
- goto free_resources;
+ goto pm_runtime_put;
}
pci_bus_size_bridges(host->bus);
@@ -2835,8 +2743,6 @@ static int tegra_pcie_probe(struct platform_device *pdev)
return 0;
-free_resources:
- tegra_pcie_free_resources(pcie);
pm_runtime_put:
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
@@ -2858,7 +2764,6 @@ static int tegra_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(host->bus);
pci_remove_root_bus(host->bus);
- tegra_pcie_free_resources(pcie);
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 3a10e678c7f4..6d79d14527a6 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -824,8 +824,8 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
dev_info(dev, "link up, %s x%u %s\n",
- PCIE_SPEED2STR(cls + PCI_SPEED_133MHz_PCIX_533),
- nlw, ssc_good ? "(SSC)" : "(!SSC)");
+ pci_speed_string(pcie_link_speed[cls]), nlw,
+ ssc_good ? "(SSC)" : "(!SSC)");
/* PCIe->SCB endian mode for BAR */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 5d74f81ddfe4..60330f3e3751 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -8,6 +8,7 @@
#include <linux/crc32.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -39,6 +40,8 @@
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
+#define FLAG_USE_DMA BIT(0)
+
#define TIMER_RESOLUTION 1
static struct workqueue_struct *kpcitest_workqueue;
@@ -47,7 +50,11 @@ struct pci_epf_test {
void *reg[PCI_STD_NUM_BARS];
struct pci_epf *epf;
enum pci_barno test_reg_bar;
+ size_t msix_table_offset;
struct delayed_work cmd_handler;
+ struct dma_chan *dma_chan;
+ struct completion transfer_complete;
+ bool dma_supported;
const struct pci_epc_features *epc_features;
};
@@ -61,6 +68,7 @@ struct pci_epf_test_reg {
u32 checksum;
u32 irq_type;
u32 irq_number;
+ u32 flags;
} __packed;
static struct pci_epf_header test_header = {
@@ -72,13 +80,156 @@ static struct pci_epf_header test_header = {
static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
+static void pci_epf_test_dma_callback(void *param)
+{
+ struct pci_epf_test *epf_test = param;
+
+ complete(&epf_test->transfer_complete);
+}
+
+/**
+ * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
+ * data between PCIe EP and remote PCIe RC
+ * @epf_test: the EPF test device that performs the data transfer operation
+ * @dma_dst: The destination address of the data transfer. It can be a physical
+ * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
+ * @dma_src: The source address of the data transfer. It can be a physical
+ * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
+ * @len: The size of the data transfer
+ *
+ * Function that uses dmaengine API to transfer data between PCIe EP and remote
+ * PCIe RC. The source and destination address can be a physical address given
+ * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
+ *
+ * The function returns '0' on success and negative value on failure.
+ */
+static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
+ dma_addr_t dma_dst, dma_addr_t dma_src,
+ size_t len)
+{
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ struct dma_chan *chan = epf_test->dma_chan;
+ struct pci_epf *epf = epf_test->epf;
+ struct dma_async_tx_descriptor *tx;
+ struct device *dev = &epf->dev;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (IS_ERR_OR_NULL(chan)) {
+ dev_err(dev, "Invalid DMA memcpy channel\n");
+ return -EINVAL;
+ }
+
+ tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
+ if (!tx) {
+ dev_err(dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ tx->callback = pci_epf_test_dma_callback;
+ tx->callback_param = epf_test;
+ cookie = tx->tx_submit(tx);
+ reinit_completion(&epf_test->transfer_complete);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
+ return -EIO;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
+ if (ret < 0) {
+ dmaengine_terminate_sync(chan);
+ dev_err(dev, "DMA wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
+ * @epf_test: the EPF test device that performs data transfer operation
+ *
+ * Function to initialize EPF test DMA channel.
+ */
+static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
+{
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct dma_chan *dma_chan;
+ dma_cap_mask_t mask;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dma_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get DMA channel\n");
+ return ret;
+ }
+ init_completion(&epf_test->transfer_complete);
+
+ epf_test->dma_chan = dma_chan;
+
+ return 0;
+}
+
+/**
+ * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
+ * @epf: the EPF test device that performs data transfer operation
+ *
+ * Helper to cleanup EPF test DMA channel.
+ */
+static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
+{
+ dma_release_channel(epf_test->dma_chan);
+ epf_test->dma_chan = NULL;
+}
+
+static void pci_epf_test_print_rate(const char *ops, u64 size,
+ struct timespec64 *start,
+ struct timespec64 *end, bool dma)
+{
+ struct timespec64 ts;
+ u64 rate, ns;
+
+ ts = timespec64_sub(*end, *start);
+
+ /* convert both size (stored in 'rate') and time in terms of 'ns' */
+ ns = timespec64_to_ns(&ts);
+ rate = size * NSEC_PER_SEC;
+
+ /* Divide both size (stored in 'rate') and ns by a common factor */
+ while (ns > UINT_MAX) {
+ rate >>= 1;
+ ns >>= 1;
+ }
+
+ if (!ns)
+ return;
+
+ /* calculate the rate */
+ do_div(rate, (uint32_t)ns);
+
+ pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
+ "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
+ (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
+}
+
static int pci_epf_test_copy(struct pci_epf_test *epf_test)
{
int ret;
+ bool use_dma;
void __iomem *src_addr;
void __iomem *dst_addr;
phys_addr_t src_phys_addr;
phys_addr_t dst_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
@@ -117,8 +268,26 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_dst_addr;
}
- memcpy(dst_addr, src_addr, reg->size);
+ ktime_get_ts64(&start);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
+ ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
+ src_phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ } else {
+ memcpy(dst_addr, src_addr, reg->size);
+ }
+ ktime_get_ts64(&end);
+ pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
+err_map_addr:
pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
err_dst_addr:
@@ -140,10 +309,14 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
void __iomem *src_addr;
void *buf;
u32 crc32;
+ bool use_dma;
phys_addr_t phys_addr;
+ phys_addr_t dst_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dma_dev = epf->epc->dev.parent;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -169,12 +342,44 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err_map_addr;
}
- memcpy_fromio(buf, src_addr, reg->size);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_dma_map;
+ }
+
+ dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dst_phys_addr)) {
+ dev_err(dev, "Failed to map destination buffer addr\n");
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
+ phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
+ DMA_FROM_DEVICE);
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_fromio(buf, src_addr, reg->size);
+ ktime_get_ts64(&end);
+ }
+
+ pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
crc32 = crc32_le(~0, buf, reg->size);
if (crc32 != reg->checksum)
ret = -EIO;
+err_dma_map:
kfree(buf);
err_map_addr:
@@ -192,10 +397,14 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
int ret;
void __iomem *dst_addr;
void *buf;
+ bool use_dma;
phys_addr_t phys_addr;
+ phys_addr_t src_phys_addr;
+ struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dma_dev = epf->epc->dev.parent;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@@ -224,7 +433,38 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
get_random_bytes(buf, reg->size);
reg->checksum = crc32_le(~0, buf, reg->size);
- memcpy_toio(dst_addr, buf, reg->size);
+ use_dma = !!(reg->flags & FLAG_USE_DMA);
+ if (use_dma) {
+ if (!epf_test->dma_supported) {
+ dev_err(dev, "Cannot transfer data using DMA\n");
+ ret = -EINVAL;
+ goto err_map_addr;
+ }
+
+ src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, src_phys_addr)) {
+ dev_err(dev, "Failed to map source buffer addr\n");
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test, phys_addr,
+ src_phys_addr, reg->size);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, src_phys_addr, reg->size,
+ DMA_TO_DEVICE);
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_toio(dst_addr, buf, reg->size);
+ ktime_get_ts64(&end);
+ }
+
+ pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
@@ -232,6 +472,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
*/
usleep_range(1000, 2000);
+err_dma_map:
kfree(buf);
err_map_addr:
@@ -360,14 +601,6 @@ reset_handler:
msecs_to_jiffies(1));
}
-static void pci_epf_test_linkup(struct pci_epf *epf)
-{
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
-
- queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
- msecs_to_jiffies(1));
-}
-
static void pci_epf_test_unbind(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -376,6 +609,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
int bar;
cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epf_test_clean_dma_chan(epf_test);
pci_epc_stop(epc);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
@@ -424,11 +658,90 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
return 0;
}
+static int pci_epf_test_core_init(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epf_header *header = epf->header;
+ const struct pci_epc_features *epc_features;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ bool msix_capable = false;
+ bool msi_capable = true;
+ int ret;
+
+ epc_features = pci_epc_get_features(epc, epf->func_no);
+ if (epc_features) {
+ msix_capable = epc_features->msix_capable;
+ msi_capable = epc_features->msi_capable;
+ }
+
+ ret = pci_epc_write_header(epc, epf->func_no, header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
+
+ ret = pci_epf_test_set_bar(epf);
+ if (ret)
+ return ret;
+
+ if (msi_capable) {
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
+ if (ret) {
+ dev_err(dev, "MSI configuration failed\n");
+ return ret;
+ }
+ }
+
+ if (msix_capable) {
+ ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
+ epf_test->test_reg_bar,
+ epf_test->msix_table_offset);
+ if (ret) {
+ dev_err(dev, "MSI-X configuration failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ int ret;
+
+ switch (val) {
+ case CORE_INIT:
+ ret = pci_epf_test_core_init(epf);
+ if (ret)
+ return NOTIFY_BAD;
+ break;
+
+ case LINK_UP:
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+ break;
+
+ default:
+ dev_err(&epf->dev, "Invalid EPF test notifier event\n");
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_OK;
+}
+
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
struct pci_epf_bar *epf_bar;
+ size_t msix_table_size = 0;
+ size_t test_reg_bar_size;
+ size_t pba_size = 0;
+ bool msix_capable;
void *base;
int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
@@ -437,13 +750,25 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
epc_features = epf_test->epc_features;
- if (epc_features->bar_fixed_size[test_reg_bar])
+ test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
+
+ msix_capable = epc_features->msix_capable;
+ if (msix_capable) {
+ msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
+ epf_test->msix_table_offset = test_reg_bar_size;
+ /* Align to QWORD or 8 Bytes */
+ pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
+ }
+ test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
+
+ if (epc_features->bar_fixed_size[test_reg_bar]) {
+ if (test_reg_size > bar_size[test_reg_bar])
+ return -ENOMEM;
test_reg_size = bar_size[test_reg_bar];
- else
- test_reg_size = sizeof(struct pci_epf_test_reg);
+ }
- base = pci_epf_alloc_space(epf, test_reg_size,
- test_reg_bar, epc_features->align);
+ base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
+ epc_features->align);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
@@ -492,14 +817,11 @@ static int pci_epf_test_bind(struct pci_epf *epf)
{
int ret;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- struct pci_epf_header *header = epf->header;
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
- struct device *dev = &epf->dev;
bool linkup_notifier = false;
- bool msix_capable = false;
- bool msi_capable = true;
+ bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
@@ -507,8 +829,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
epc_features = pci_epc_get_features(epc, epf->func_no);
if (epc_features) {
linkup_notifier = epc_features->linkup_notifier;
- msix_capable = epc_features->msix_capable;
- msi_capable = epc_features->msi_capable;
+ core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
pci_epf_configure_bar(epf, epc_features);
}
@@ -516,38 +837,28 @@ static int pci_epf_test_bind(struct pci_epf *epf)
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
- ret = pci_epc_write_header(epc, epf->func_no, header);
- if (ret) {
- dev_err(dev, "Configuration header write failed\n");
- return ret;
- }
-
ret = pci_epf_test_alloc_space(epf);
if (ret)
return ret;
- ret = pci_epf_test_set_bar(epf);
- if (ret)
- return ret;
-
- if (msi_capable) {
- ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
- if (ret) {
- dev_err(dev, "MSI configuration failed\n");
+ if (!core_init_notifier) {
+ ret = pci_epf_test_core_init(epf);
+ if (ret)
return ret;
- }
}
- if (msix_capable) {
- ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
- if (ret) {
- dev_err(dev, "MSI-X configuration failed\n");
- return ret;
- }
- }
+ epf_test->dma_supported = true;
- if (!linkup_notifier)
+ ret = pci_epf_test_init_dma_chan(epf_test);
+ if (ret)
+ epf_test->dma_supported = false;
+
+ if (linkup_notifier) {
+ epf->nb.notifier_call = pci_epf_test_notifier;
+ pci_epc_register_notifier(epc, &epf->nb);
+ } else {
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+ }
return 0;
}
@@ -580,7 +891,6 @@ static int pci_epf_test_probe(struct pci_epf *epf)
static struct pci_epf_ops ops = {
.unbind = pci_epf_test_unbind,
.bind = pci_epf_test_bind,
- .linkup = pci_epf_test_linkup,
};
static struct pci_epf_driver test_driver = {
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index d1288a0bd530..55edce50be96 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -29,7 +29,6 @@ struct pci_epc_group {
struct config_group group;
struct pci_epc *epc;
bool start;
- unsigned long function_num_map;
};
static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item)
@@ -58,6 +57,7 @@ static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
if (!start) {
pci_epc_stop(epc);
+ epc_group->start = 0;
return len;
}
@@ -89,37 +89,22 @@ static int pci_epc_epf_link(struct config_item *epc_item,
struct config_item *epf_item)
{
int ret;
- u32 func_no = 0;
struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
struct pci_epc *epc = epc_group->epc;
struct pci_epf *epf = epf_group->epf;
- func_no = find_first_zero_bit(&epc_group->function_num_map,
- BITS_PER_LONG);
- if (func_no >= BITS_PER_LONG)
- return -EINVAL;
-
- set_bit(func_no, &epc_group->function_num_map);
- epf->func_no = func_no;
-
ret = pci_epc_add_epf(epc, epf);
if (ret)
- goto err_add_epf;
+ return ret;
ret = pci_epf_bind(epf);
- if (ret)
- goto err_epf_bind;
+ if (ret) {
+ pci_epc_remove_epf(epc, epf);
+ return ret;
+ }
return 0;
-
-err_epf_bind:
- pci_epc_remove_epf(epc, epf);
-
-err_add_epf:
- clear_bit(func_no, &epc_group->function_num_map);
-
- return ret;
}
static void pci_epc_epf_unlink(struct config_item *epc_item,
@@ -134,7 +119,6 @@ static void pci_epc_epf_unlink(struct config_item *epc_item,
epc = epc_group->epc;
epf = epf_group->epf;
- clear_bit(epf->func_no, &epc_group->function_num_map);
pci_epf_unbind(epf);
pci_epc_remove_epf(epc, epf);
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 2091508c1620..82ba0dc7f2f5 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -120,7 +120,6 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
u8 func_no)
{
const struct pci_epc_features *epc_features;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return NULL;
@@ -128,9 +127,9 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
if (!epc->ops->get_features)
return NULL;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc_features = epc->ops->get_features(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return epc_features;
}
@@ -144,14 +143,12 @@ EXPORT_SYMBOL_GPL(pci_epc_get_features);
*/
void pci_epc_stop(struct pci_epc *epc)
{
- unsigned long flags;
-
if (IS_ERR(epc) || !epc->ops->stop)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->stop(epc);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_stop);
@@ -164,7 +161,6 @@ EXPORT_SYMBOL_GPL(pci_epc_stop);
int pci_epc_start(struct pci_epc *epc)
{
int ret;
- unsigned long flags;
if (IS_ERR(epc))
return -EINVAL;
@@ -172,9 +168,9 @@ int pci_epc_start(struct pci_epc *epc)
if (!epc->ops->start)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->start(epc);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -193,7 +189,6 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -201,9 +196,9 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
if (!epc->ops->raise_irq)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -219,7 +214,6 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
{
int interrupt;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
@@ -227,9 +221,9 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
if (!epc->ops->get_msi)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
interrupt = epc->ops->get_msi(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
if (interrupt < 0)
return 0;
@@ -252,7 +246,6 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
{
int ret;
u8 encode_int;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
interrupts > 32)
@@ -263,9 +256,9 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
encode_int = order_base_2(interrupts);
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->set_msi(epc, func_no, encode_int);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -281,7 +274,6 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
{
int interrupt;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
@@ -289,9 +281,9 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
if (!epc->ops->get_msix)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
interrupt = epc->ops->get_msix(epc, func_no);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
if (interrupt < 0)
return 0;
@@ -305,13 +297,15 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
* @epc: the EPC device on which MSI-X has to be configured
* @func_no: the endpoint function number in the EPC device
* @interrupts: number of MSI-X interrupts required by the EPF
+ * @bir: BAR where the MSI-X table resides
+ * @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
+ enum pci_barno bir, u32 offset)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
interrupts < 1 || interrupts > 2048)
@@ -320,9 +314,9 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
if (!epc->ops->set_msix)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
- ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_lock(&epc->lock);
+ ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -339,17 +333,15 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr)
{
- unsigned long flags;
-
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
if (!epc->ops->unmap_addr)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->unmap_addr(epc, func_no, phys_addr);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@@ -367,7 +359,6 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -375,9 +366,9 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
if (!epc->ops->map_addr)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -394,8 +385,6 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
struct pci_epf_bar *epf_bar)
{
- unsigned long flags;
-
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
(epf_bar->barno == BAR_5 &&
epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
@@ -404,9 +393,9 @@ void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
if (!epc->ops->clear_bar)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
epc->ops->clear_bar(epc, func_no, epf_bar);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@@ -422,7 +411,6 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
struct pci_epf_bar *epf_bar)
{
int ret;
- unsigned long irq_flags;
int flags = epf_bar->flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
@@ -437,9 +425,9 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
if (!epc->ops->set_bar)
return 0;
- spin_lock_irqsave(&epc->lock, irq_flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->set_bar(epc, func_no, epf_bar);
- spin_unlock_irqrestore(&epc->lock, irq_flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -460,7 +448,6 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *header)
{
int ret;
- unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
@@ -468,9 +455,9 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
if (!epc->ops->write_header)
return 0;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
ret = epc->ops->write_header(epc, func_no, header);
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
return ret;
}
@@ -487,7 +474,8 @@ EXPORT_SYMBOL_GPL(pci_epc_write_header);
*/
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
{
- unsigned long flags;
+ u32 func_no;
+ int ret = 0;
if (epf->epc)
return -EBUSY;
@@ -495,16 +483,30 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
if (IS_ERR(epc))
return -EINVAL;
- if (epf->func_no > epc->max_functions - 1)
- return -EINVAL;
+ mutex_lock(&epc->lock);
+ func_no = find_first_zero_bit(&epc->function_num_map,
+ BITS_PER_LONG);
+ if (func_no >= BITS_PER_LONG) {
+ ret = -EINVAL;
+ goto ret;
+ }
+ if (func_no > epc->max_functions - 1) {
+ dev_err(&epc->dev, "Exceeding max supported Function Number\n");
+ ret = -EINVAL;
+ goto ret;
+ }
+
+ set_bit(func_no, &epc->function_num_map);
+ epf->func_no = func_no;
epf->epc = epc;
- spin_lock_irqsave(&epc->lock, flags);
list_add_tail(&epf->list, &epc->pci_epf);
- spin_unlock_irqrestore(&epc->lock, flags);
- return 0;
+ret:
+ mutex_unlock(&epc->lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pci_epc_add_epf);
@@ -517,15 +519,14 @@ EXPORT_SYMBOL_GPL(pci_epc_add_epf);
*/
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
{
- unsigned long flags;
-
if (!epc || IS_ERR(epc) || !epf)
return;
- spin_lock_irqsave(&epc->lock, flags);
+ mutex_lock(&epc->lock);
+ clear_bit(epf->func_no, &epc->function_num_map);
list_del(&epf->list);
epf->epc = NULL;
- spin_unlock_irqrestore(&epc->lock, flags);
+ mutex_unlock(&epc->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
@@ -539,20 +540,31 @@ EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
*/
void pci_epc_linkup(struct pci_epc *epc)
{
- unsigned long flags;
- struct pci_epf *epf;
-
if (!epc || IS_ERR(epc))
return;
- spin_lock_irqsave(&epc->lock, flags);
- list_for_each_entry(epf, &epc->pci_epf, list)
- pci_epf_linkup(epf);
- spin_unlock_irqrestore(&epc->lock, flags);
+ atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
}
EXPORT_SYMBOL_GPL(pci_epc_linkup);
/**
+ * pci_epc_init_notify() - Notify the EPF device that EPC device's core
+ * initialization is completed.
+ * @epc: the EPC device whose core initialization is completeds
+ *
+ * Invoke to Notify the EPF device that the EPC device's initialization
+ * is completed.
+ */
+void pci_epc_init_notify(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_init_notify);
+
+/**
* pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
@@ -610,8 +622,9 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
goto err_ret;
}
- spin_lock_init(&epc->lock);
+ mutex_init(&epc->lock);
INIT_LIST_HEAD(&epc->pci_epf);
+ ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
device_initialize(&epc->dev);
epc->dev.class = pci_epc_class;
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index d2b174ce15de..abfac1109a13 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
mem->page_size = page_size;
mem->pages = pages;
mem->size = size;
+ mutex_init(&mem->lock);
epc->mem = mem;
@@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
int pageno;
- void __iomem *virt_addr;
+ void __iomem *virt_addr = NULL;
struct pci_epc_mem *mem = epc->mem;
unsigned int page_shift = ilog2(mem->page_size);
int order;
@@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
if (pageno < 0)
- return NULL;
+ goto ret;
*phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
+ret:
+ mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
@@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
pageno = (phys_addr - mem->phys_base) >> page_shift;
size = ALIGN(size, mem->page_size);
order = pci_epc_mem_get_order(mem, size);
+ mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
+ mutex_unlock(&mem->lock);
}
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index fb1306de8f40..244e00f48c5c 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -21,26 +21,6 @@ static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
- * pci_epf_linkup() - Notify the function driver that EPC device has
- * established a connection with the Root Complex.
- * @epf: the EPF device bound to the EPC device which has established
- * the connection with the host
- *
- * Invoke to notify the function driver that EPC device has established
- * a connection with the Root Complex.
- */
-void pci_epf_linkup(struct pci_epf *epf)
-{
- if (!epf->driver) {
- dev_WARN(&epf->dev, "epf device not bound to driver\n");
- return;
- }
-
- epf->driver->ops->linkup(epf);
-}
-EXPORT_SYMBOL_GPL(pci_epf_linkup);
-
-/**
* pci_epf_unbind() - Notify the function driver that the binding between the
* EPF device and EPC device has been lost
* @epf: the EPF device which has lost the binding with the EPC device
@@ -55,7 +35,9 @@ void pci_epf_unbind(struct pci_epf *epf)
return;
}
+ mutex_lock(&epf->lock);
epf->driver->ops->unbind(epf);
+ mutex_unlock(&epf->lock);
module_put(epf->driver->owner);
}
EXPORT_SYMBOL_GPL(pci_epf_unbind);
@@ -69,6 +51,8 @@ EXPORT_SYMBOL_GPL(pci_epf_unbind);
*/
int pci_epf_bind(struct pci_epf *epf)
{
+ int ret;
+
if (!epf->driver) {
dev_WARN(&epf->dev, "epf device not bound to driver\n");
return -EINVAL;
@@ -77,7 +61,11 @@ int pci_epf_bind(struct pci_epf *epf)
if (!try_module_get(epf->driver->owner))
return -EAGAIN;
- return epf->driver->ops->bind(epf);
+ mutex_lock(&epf->lock);
+ ret = epf->driver->ops->bind(epf);
+ mutex_unlock(&epf->lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_bind);
@@ -99,6 +87,7 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
epf->bar[bar].phys_addr);
epf->bar[bar].phys_addr = 0;
+ epf->bar[bar].addr = NULL;
epf->bar[bar].size = 0;
epf->bar[bar].barno = 0;
epf->bar[bar].flags = 0;
@@ -135,6 +124,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
}
epf->bar[bar].phys_addr = phys_addr;
+ epf->bar[bar].addr = space;
epf->bar[bar].size = size;
epf->bar[bar].barno = bar;
epf->bar[bar].flags |= upper_32_bits(size) ?
@@ -214,7 +204,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (!driver->ops)
return -EINVAL;
- if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup)
+ if (!driver->ops->bind || !driver->ops->unbind)
return -EINVAL;
driver->driver.bus = &pci_epf_bus_type;
@@ -272,6 +262,7 @@ struct pci_epf *pci_epf_create(const char *name)
device_initialize(dev);
dev->bus = &pci_epf_bus_type;
dev->type = &pci_epf_type;
+ mutex_init(&epf->lock);
ret = dev_set_name(dev, "%s", name);
if (ret) {
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index aa61d4c219d7..ae44f46d1bf3 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -84,6 +84,7 @@ struct controller {
struct pcie_device *pcie;
u32 slot_cap; /* capabilities and quirks */
+ unsigned int inband_presence_disabled:1;
u16 slot_ctrl; /* control register access */
struct mutex ctrl_lock;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8a2cb1764386..53433b37e181 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -14,6 +14,7 @@
#define dev_fmt(fmt) "pciehp: " fmt
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/jiffies.h>
@@ -26,6 +27,24 @@
#include "../pci.h"
#include "pciehp.h"
+static const struct dmi_system_id inband_presence_disabled_dmi_table[] = {
+ /*
+ * Match all Dell systems, as some Dell systems have inband
+ * presence disabled on NVMe slots (but don't support the bit to
+ * report it). Setting inband presence disabled should have no
+ * negative effect, except on broken hotplug slots that never
+ * assert presence detect--and those will still work, they will
+ * just have a bit of extra delay before being probed.
+ */
+ {
+ .ident = "Dell System",
+ .matches = {
+ DMI_MATCH(DMI_OEM_STRING, "Dell System"),
+ },
+ },
+ {}
+};
+
static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
{
return ctrl->pcie->port;
@@ -252,6 +271,22 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
return found;
}
+static void pcie_wait_for_presence(struct pci_dev *pdev)
+{
+ int timeout = 1250;
+ u16 slot_status;
+
+ do {
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_PDS)
+ return;
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 0);
+
+ pci_info(pdev, "Timeout waiting for Presence Detect\n");
+}
+
int pciehp_check_link_status(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
@@ -261,6 +296,9 @@ int pciehp_check_link_status(struct controller *ctrl)
if (!pcie_wait_for_link(pdev, true))
return -1;
+ if (ctrl->inband_presence_disabled)
+ pcie_wait_for_presence(pdev);
+
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
@@ -527,7 +565,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct device *parent = pdev->dev.parent;
- u16 status, events;
+ u16 status, events = 0;
/*
* Interrupts only occur in D3hot or shallower and only if enabled
@@ -552,6 +590,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
}
}
+read_status:
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
if (status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
@@ -564,24 +603,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
* Slot Status contains plain status bits as well as event
* notification bits; right now we only want the event bits.
*/
- events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
- PCI_EXP_SLTSTA_DLLSC);
+ status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC;
/*
* If we've already reported a power fault, don't report it again
* until we've done something to handle it.
*/
if (ctrl->power_fault_detected)
- events &= ~PCI_EXP_SLTSTA_PFD;
+ status &= ~PCI_EXP_SLTSTA_PFD;
+ events |= status;
if (!events) {
if (parent)
pm_runtime_put(parent);
return IRQ_NONE;
}
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+ if (status) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+
+ /*
+ * In MSI mode, all event bits must be zero before the port
+ * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
+ * So re-read the Slot Status register in case a bit was set
+ * between read and write.
+ */
+ if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
+ goto read_status;
+ }
+
ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
if (parent)
pm_runtime_put(parent);
@@ -625,17 +677,15 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
ret = pciehp_isr(irq, dev_id);
enable_irq(irq);
- if (ret != IRQ_WAKE_THREAD) {
- pci_config_pm_runtime_put(pdev);
- return ret;
- }
+ if (ret != IRQ_WAKE_THREAD)
+ goto out;
}
synchronize_hardirq(irq);
events = atomic_xchg(&ctrl->pending_events, 0);
if (!events) {
- pci_config_pm_runtime_put(pdev);
- return IRQ_NONE;
+ ret = IRQ_NONE;
+ goto out;
}
/* Check Attention Button Pressed */
@@ -664,10 +714,12 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
pciehp_handle_presence_or_link_change(ctrl, events);
up_read(&ctrl->reset_lock);
+ ret = IRQ_HANDLED;
+out:
pci_config_pm_runtime_put(pdev);
ctrl->ist_running = false;
wake_up(&ctrl->requester);
- return IRQ_HANDLED;
+ return ret;
}
static int pciehp_poll(void *data)
@@ -848,7 +900,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
- u32 slot_cap, link_cap;
+ u32 slot_cap, slot_cap2, link_cap;
u8 poweron;
struct pci_dev *pdev = dev->port;
struct pci_bus *subordinate = pdev->subordinate;
@@ -883,6 +935,16 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
up_read(&pci_bus_sem);
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2);
+ if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) {
+ pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE,
+ PCI_EXP_SLTCTL_IBPD_DISABLE);
+ ctrl->inband_presence_disabled = 1;
+ }
+
+ if (dmi_first_match(inband_presence_disabled_dmi_table))
+ ctrl->inband_presence_disabled = 1;
+
/* Check if Data Link Layer Link Active Reporting is implemented */
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
@@ -892,7 +954,7 @@ struct controller *pcie_init(struct pcie_device *dev)
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
- ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
@@ -903,6 +965,7 @@ struct controller *pcie_init(struct pcie_device *dev)
FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
+ FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD),
FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 977946e4e613..c5eb509c72f0 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -140,7 +140,7 @@ static void dlpar_pci_add_bus(struct device_node *dn)
struct pci_controller *phb = pdn->phb;
struct pci_dev *dev = NULL;
- eeh_add_device_tree_early(pdn);
+ pseries_eeh_init_edev_recursive(pdn);
/* Add EADS device to PHB bus, adding new entry to bus->devices */
dev = of_create_pci_dev(dn, phb->bus, pdn->devfn);
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index e408e4021cee..6504869efabc 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -288,11 +288,10 @@ EXPORT_SYMBOL_GPL(rpaphp_check_drc_props);
static int is_php_type(char *drc_type)
{
- unsigned long value;
char *endptr;
/* PCI Hotplug nodes have an integer for drc_type */
- value = simple_strtoul(drc_type, &endptr, 10);
+ simple_strtoul(drc_type, &endptr, 10);
if (endptr == drc_type)
return 0;
@@ -494,6 +493,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return retval;
if (state == PRESENT) {
+ pseries_eeh_init_edev_recursive(PCI_DN(slot->dn));
+
pci_lock_rescan_remove();
pci_hp_add_devices(slot->bus);
pci_unlock_rescan_remove();
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index beca61badeea..c380bdacd146 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -95,8 +95,10 @@ int rpaphp_enable_slot(struct slot *slot)
return -EINVAL;
}
- if (list_empty(&bus->devices))
+ if (list_empty(&bus->devices)) {
+ pseries_eeh_init_edev_recursive(PCI_DN(slot->dn));
pci_hp_add_devices(bus);
+ }
if (!list_empty(&bus->devices)) {
slot->state = CONFIGURED;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 30ee72268790..39295d88f670 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -19,7 +19,6 @@
#include <asm/sclp.h>
#define SLOT_NAME_SIZE 10
-static LIST_HEAD(s390_hotplug_slot_list);
static int zpci_fn_configured(enum zpci_state state)
{
@@ -27,97 +26,86 @@ static int zpci_fn_configured(enum zpci_state state)
state == ZPCI_FN_STATE_ONLINE;
}
-/*
- * struct slot - slot information for each *physical* slot
- */
-struct slot {
- struct list_head slot_list;
- struct hotplug_slot hotplug_slot;
- struct zpci_dev *zdev;
-};
-
-static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+static inline int zdev_configure(struct zpci_dev *zdev)
{
- return container_of(hotplug_slot, struct slot, hotplug_slot);
-}
-
-static inline int slot_configure(struct slot *slot)
-{
- int ret = sclp_pci_configure(slot->zdev->fid);
+ int ret = sclp_pci_configure(zdev->fid);
- zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+ zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, ret);
if (!ret)
- slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
return ret;
}
-static inline int slot_deconfigure(struct slot *slot)
+static inline int zdev_deconfigure(struct zpci_dev *zdev)
{
- int ret = sclp_pci_deconfigure(slot->zdev->fid);
+ int ret = sclp_pci_deconfigure(zdev->fid);
- zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
if (!ret)
- slot->zdev->state = ZPCI_FN_STATE_STANDBY;
+ zdev->state = ZPCI_FN_STATE_STANDBY;
return ret;
}
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = to_slot(hotplug_slot);
+ struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
+ hotplug_slot);
int rc;
- if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
+ if (zdev->state != ZPCI_FN_STATE_STANDBY)
return -EIO;
- rc = slot_configure(slot);
+ rc = zdev_configure(zdev);
if (rc)
return rc;
- rc = zpci_enable_device(slot->zdev);
+ rc = zpci_enable_device(zdev);
if (rc)
goto out_deconfigure;
- pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN);
+ pci_scan_slot(zdev->bus, ZPCI_DEVFN);
pci_lock_rescan_remove();
- pci_bus_add_devices(slot->zdev->bus);
+ pci_bus_add_devices(zdev->bus);
pci_unlock_rescan_remove();
return rc;
out_deconfigure:
- slot_deconfigure(slot);
+ zdev_deconfigure(zdev);
return rc;
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = to_slot(hotplug_slot);
+ struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
+ hotplug_slot);
struct pci_dev *pdev;
int rc;
- if (!zpci_fn_configured(slot->zdev->state))
+ if (!zpci_fn_configured(zdev->state))
return -EIO;
- pdev = pci_get_slot(slot->zdev->bus, ZPCI_DEVFN);
+ pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
if (pdev) {
pci_stop_and_remove_bus_device_locked(pdev);
pci_dev_put(pdev);
}
- rc = zpci_disable_device(slot->zdev);
+ rc = zpci_disable_device(zdev);
if (rc)
return rc;
- return slot_deconfigure(slot);
+ return zdev_deconfigure(zdev);
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = to_slot(hotplug_slot);
+ struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
+ hotplug_slot);
- switch (slot->zdev->state) {
+ switch (zdev->state) {
case ZPCI_FN_STATE_STANDBY:
*value = 0;
break;
@@ -145,44 +133,15 @@ static const struct hotplug_slot_ops s390_hotplug_slot_ops = {
int zpci_init_slot(struct zpci_dev *zdev)
{
char name[SLOT_NAME_SIZE];
- struct slot *slot;
- int rc;
- if (!zdev)
- return 0;
-
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- goto error;
-
- slot->zdev = zdev;
- slot->hotplug_slot.ops = &s390_hotplug_slot_ops;
+ zdev->hotplug_slot.ops = &s390_hotplug_slot_ops;
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
- rc = pci_hp_register(&slot->hotplug_slot, zdev->bus,
- ZPCI_DEVFN, name);
- if (rc)
- goto error_reg;
-
- list_add(&slot->slot_list, &s390_hotplug_slot_list);
- return 0;
-
-error_reg:
- kfree(slot);
-error:
- return -ENOMEM;
+ return pci_hp_register(&zdev->hotplug_slot, zdev->bus,
+ ZPCI_DEVFN, name);
}
void zpci_exit_slot(struct zpci_dev *zdev)
{
- struct slot *slot, *next;
-
- list_for_each_entry_safe(slot, next, &s390_hotplug_slot_list,
- slot_list) {
- if (slot->zdev != zdev)
- continue;
- list_del(&slot->slot_list);
- pci_hp_deregister(&slot->hotplug_slot);
- kfree(slot);
- }
+ pci_hp_deregister(&zdev->hotplug_slot);
}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 9a8a38384121..b73b10bce0df 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -291,6 +291,9 @@ static const struct pci_p2pdma_whitelist_entry {
{PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
/* Intel SkyLake-E */
{PCI_VENDOR_ID_INTEL, 0x2030, 0},
+ {PCI_VENDOR_ID_INTEL, 0x2031, 0},
+ {PCI_VENDOR_ID_INTEL, 0x2032, 0},
+ {PCI_VENDOR_ID_INTEL, 0x2033, 0},
{PCI_VENDOR_ID_INTEL, 0x2020, 0},
{}
};
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0c02d500158f..d21969fba6ab 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -439,7 +439,7 @@ enum hpx_type3_dev_type {
static u16 hpx3_device_type(struct pci_dev *dev)
{
u16 pcie_type = pci_pcie_type(dev);
- const int pcie_to_hpx3_type[] = {
+ static const int pcie_to_hpx3_type[] = {
[PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
[PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
[PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
@@ -1241,6 +1241,7 @@ static void pci_acpi_setup(struct device *dev)
pci_acpi_optimize_delay(pci_dev, adev->handle);
pci_acpi_set_untrusted(pci_dev);
+ pci_acpi_add_edr_notifier(pci_dev);
pci_acpi_add_pm_notifier(adev, pci_dev);
if (!adev->wakeup.flags.valid)
@@ -1268,6 +1269,7 @@ static void pci_acpi_cleanup(struct device *dev)
if (!adev)
return;
+ pci_acpi_remove_edr_notifier(pci_dev);
pci_acpi_remove_pm_notifier(adev);
if (adev->wakeup.flags.valid) {
acpi_device_power_remove_dependent(adev, dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 13f766db0684..6d78df981d41 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -156,7 +156,8 @@ static ssize_t max_link_speed_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return sprintf(buf, "%s\n", PCIE_SPEED2STR(pcie_get_speed_cap(pdev)));
+ return sprintf(buf, "%s\n",
+ pci_speed_string(pcie_get_speed_cap(pdev)));
}
static DEVICE_ATTR_RO(max_link_speed);
@@ -175,33 +176,15 @@ static ssize_t current_link_speed_show(struct device *dev,
struct pci_dev *pci_dev = to_pci_dev(dev);
u16 linkstat;
int err;
- const char *speed;
+ enum pci_bus_speed speed;
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
if (err)
return -EINVAL;
- switch (linkstat & PCI_EXP_LNKSTA_CLS) {
- case PCI_EXP_LNKSTA_CLS_32_0GB:
- speed = "32 GT/s";
- break;
- case PCI_EXP_LNKSTA_CLS_16_0GB:
- speed = "16 GT/s";
- break;
- case PCI_EXP_LNKSTA_CLS_8_0GB:
- speed = "8 GT/s";
- break;
- case PCI_EXP_LNKSTA_CLS_5_0GB:
- speed = "5 GT/s";
- break;
- case PCI_EXP_LNKSTA_CLS_2_5GB:
- speed = "2.5 GT/s";
- break;
- default:
- speed = "Unknown speed";
- }
+ speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
- return sprintf(buf, "%s\n", speed);
+ return sprintf(buf, "%s\n", pci_speed_string(speed));
}
static DEVICE_ATTR_RO(current_link_speed);
@@ -464,7 +447,8 @@ static ssize_t dev_rescan_store(struct device *dev,
}
return count;
}
-static DEVICE_ATTR_WO(dev_rescan);
+static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
+ dev_rescan_store);
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -501,7 +485,8 @@ static ssize_t bus_rescan_store(struct device *dev,
}
return count;
}
-static DEVICE_ATTR_WO(bus_rescan);
+static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
+ bus_rescan_store);
#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 86821313c007..595fcf59843f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1560,7 +1560,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_rebar_state(dev);
pci_restore_dpc_state(dev);
- pci_cleanup_aer_error_status_regs(dev);
+ pci_aer_clear_status(dev);
pci_restore_aer_state(dev);
pci_restore_config_space(dev);
@@ -5841,19 +5841,10 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
* where only 2.5 GT/s and 5.0 GT/s speeds were defined.
*/
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
- if (lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
- return PCIE_SPEED_32_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
- return PCIE_SPEED_16_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
- return PCIE_SPEED_8_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
- return PCIE_SPEED_5_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
- return PCIE_SPEED_2_5GT;
- return PCI_SPEED_UNKNOWN;
- }
+
+ /* PCIe r3.0-compliant */
+ if (lnkcap2)
+ return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
@@ -5929,14 +5920,14 @@ void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
if (bw_avail >= bw_cap && verbose)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
bw_cap / 1000, bw_cap % 1000,
- PCIE_SPEED2STR(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap);
else if (bw_avail < bw_cap)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
bw_avail / 1000, bw_avail % 1000,
- PCIE_SPEED2STR(speed), width,
+ pci_speed_string(speed), width,
limiting_dev ? pci_name(limiting_dev) : "<unknown>",
bw_cap / 1000, bw_cap % 1000,
- PCIE_SPEED2STR(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap);
}
/**
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6394e7746fb5..6d3f75867106 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -292,22 +292,25 @@ void pci_disable_bridge_window(struct pci_dev *dev);
struct pci_bus *pci_bus_get(struct pci_bus *bus);
void pci_bus_put(struct pci_bus *bus);
-/* PCIe link information */
-#define PCIE_SPEED2STR(speed) \
- ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \
- (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \
- (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \
- (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \
- "Unknown speed")
+/* PCIe link information from Link Capabilities 2 */
+#define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \
+ ((lnkcap2) & PCI_EXP_LNKCAP2_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
+ (lnkcap2) & PCI_EXP_LNKCAP2_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
+ PCI_SPEED_UNKNOWN)
/* PCIe speed to Mb/s reduced by encoding overhead */
#define PCIE_SPEED2MBS_ENC(speed) \
- ((speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
+ ((speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \
+ (speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
(speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \
(speed) == PCIE_SPEED_5_0GT ? 5000*8/10 : \
(speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \
0)
+const char *pci_speed_string(enum pci_bus_speed speed);
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
@@ -448,9 +451,13 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
#ifdef CONFIG_PCIE_DPC
void pci_save_dpc_state(struct pci_dev *dev);
void pci_restore_dpc_state(struct pci_dev *dev);
+void pci_dpc_init(struct pci_dev *pdev);
+void dpc_process_error(struct pci_dev *pdev);
+pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
#else
static inline void pci_save_dpc_state(struct pci_dev *dev) {}
static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
+static inline void pci_dpc_init(struct pci_dev *pdev) {}
#endif
#ifdef CONFIG_PCI_ATS
@@ -547,8 +554,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
#endif
/* PCI error reporting and recovery */
-void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
- u32 service);
+pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ enum pci_channel_state state,
+ pci_ers_result_t (*reset_link)(struct pci_dev *pdev));
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
@@ -651,12 +659,16 @@ void pci_aer_exit(struct pci_dev *dev);
extern const struct attribute_group aer_stats_attr_group;
void pci_aer_clear_fatal_status(struct pci_dev *dev);
void pci_aer_clear_device_status(struct pci_dev *dev);
+int pci_aer_clear_status(struct pci_dev *dev);
+int pci_aer_raw_clear_status(struct pci_dev *dev);
#else
static inline void pci_no_aer(void) { }
static inline void pci_aer_init(struct pci_dev *d) { }
static inline void pci_aer_exit(struct pci_dev *d) { }
static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
+static inline int pci_aer_clear_status(struct pci_dev *dev) { return -EINVAL; }
+static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL; }
#endif
#ifdef CONFIG_ACPI
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 7876dc4b28f8..66386811cfde 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -141,3 +141,13 @@ config PCIE_BW
This enables PCI Express Bandwidth Change Notification. If
you know link width or rate changes occur only to correct
unreliable links, you may answer Y.
+
+config PCIE_EDR
+ bool "PCI Express Error Disconnect Recover support"
+ depends on PCIE_DPC && ACPI
+ help
+ This option adds Error Disconnect Recover support as specified
+ in the Downstream Port Containment Related Enhancements ECN to
+ the PCI Firmware Specification r3.2. Enable this if you want to
+ support hybrid DPC model which uses both firmware and OS to
+ implement DPC.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index efb9d2e71e9e..68da9280ff11 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
obj-$(CONFIG_PCIE_PTM) += ptm.o
obj-$(CONFIG_PCIE_BW) += bw_notification.o
+obj-$(CONFIG_PCIE_EDR) += edr.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 4a818b07a1af..f4274d301235 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -102,6 +102,7 @@ struct aer_stats {
#define ERR_UNCOR_ID(d) (d >> 16)
static int pcie_aer_disable;
+static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
void pci_no_aer(void)
{
@@ -376,7 +377,7 @@ void pci_aer_clear_device_status(struct pci_dev *dev)
pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
}
-int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
int pos;
u32 status, sev;
@@ -397,7 +398,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+EXPORT_SYMBOL_GPL(pci_aer_clear_nonfatal_status);
void pci_aer_clear_fatal_status(struct pci_dev *dev)
{
@@ -419,7 +420,16 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
}
-int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+/**
+ * pci_aer_raw_clear_status - Clear AER error registers.
+ * @dev: the PCI device
+ *
+ * Clearing AER error status registers unconditionally, regardless of
+ * whether they're owned by firmware or the OS.
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_aer_raw_clear_status(struct pci_dev *dev)
{
int pos;
u32 status;
@@ -432,9 +442,6 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
if (!pos)
return -EIO;
- if (pcie_aer_get_firmware_first(dev))
- return -EIO;
-
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
@@ -450,6 +457,14 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
+int pci_aer_clear_status(struct pci_dev *dev)
+{
+ if (pcie_aer_get_firmware_first(dev))
+ return -EIO;
+
+ return pci_aer_raw_clear_status(dev);
+}
+
void pci_save_aer_state(struct pci_dev *dev)
{
struct pci_cap_saved_state *save_state;
@@ -515,7 +530,7 @@ void pci_aer_init(struct pci_dev *dev)
n = pcie_cap_has_rtctl(dev) ? 5 : 4;
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
- pci_cleanup_aer_error_status_regs(dev);
+ pci_aer_clear_status(dev);
}
void pci_aer_exit(struct pci_dev *dev)
@@ -1053,11 +1068,9 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
info->status);
pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
- pcie_do_recovery(dev, pci_channel_io_normal,
- PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
else if (info->severity == AER_FATAL)
- pcie_do_recovery(dev, pci_channel_io_frozen,
- PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(dev, pci_channel_io_frozen, aer_root_reset);
pci_dev_put(dev);
}
@@ -1094,10 +1107,10 @@ static void aer_recover_work_func(struct work_struct *work)
cper_print_aer(pdev, entry.severity, entry.regs);
if (entry.severity == AER_NONFATAL)
pcie_do_recovery(pdev, pci_channel_io_normal,
- PCIE_PORT_SERVICE_AER);
+ aer_root_reset);
else if (entry.severity == AER_FATAL)
pcie_do_recovery(pdev, pci_channel_io_frozen,
- PCIE_PORT_SERVICE_AER);
+ aer_root_reset);
pci_dev_put(pdev);
}
}
@@ -1501,7 +1514,6 @@ static struct pcie_port_service_driver aerdriver = {
.probe = aer_probe,
.remove = aer_remove,
- .reset_link = aer_root_reset,
};
/**
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 0dcd44308228..2378ed692534 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -273,7 +273,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
}
if (consistent)
return;
- pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n");
+ pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
}
/* Configure downstream component, all functions */
@@ -747,9 +747,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
/* Enable what we need to enable */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CAP_L1_PM_SS, val);
+ PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index e06f42f58d3d..762170423fdd 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -17,13 +17,6 @@
#include "portdrv.h"
#include "../pci.h"
-struct dpc_dev {
- struct pcie_device *dev;
- u16 cap_pos;
- bool rp_extensions;
- u8 rp_log_size;
-};
-
static const char * const rp_pio_error_string[] = {
"Configuration Request received UR Completion", /* Bit Position 0 */
"Configuration Request received CA Completion", /* Bit Position 1 */
@@ -46,63 +39,42 @@ static const char * const rp_pio_error_string[] = {
"Memory Request Completion Timeout", /* Bit Position 18 */
};
-static struct dpc_dev *to_dpc_dev(struct pci_dev *dev)
-{
- struct device *device;
-
- device = pcie_port_find_device(dev, PCIE_PORT_SERVICE_DPC);
- if (!device)
- return NULL;
- return get_service_data(to_pcie_device(device));
-}
-
void pci_save_dpc_state(struct pci_dev *dev)
{
- struct dpc_dev *dpc;
struct pci_cap_saved_state *save_state;
u16 *cap;
if (!pci_is_pcie(dev))
return;
- dpc = to_dpc_dev(dev);
- if (!dpc)
- return;
-
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
if (!save_state)
return;
cap = (u16 *)&save_state->cap.data[0];
- pci_read_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, cap);
+ pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap);
}
void pci_restore_dpc_state(struct pci_dev *dev)
{
- struct dpc_dev *dpc;
struct pci_cap_saved_state *save_state;
u16 *cap;
if (!pci_is_pcie(dev))
return;
- dpc = to_dpc_dev(dev);
- if (!dpc)
- return;
-
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
if (!save_state)
return;
cap = (u16 *)&save_state->cap.data[0];
- pci_write_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, *cap);
+ pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
}
-static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
+static int dpc_wait_rp_inactive(struct pci_dev *pdev)
{
unsigned long timeout = jiffies + HZ;
- struct pci_dev *pdev = dpc->dev->port;
- u16 cap = dpc->cap_pos, status;
+ u16 cap = pdev->dpc_cap, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
while (status & PCI_EXP_DPC_RP_BUSY &&
@@ -117,17 +89,15 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
return 0;
}
-static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
+pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
- struct dpc_dev *dpc;
u16 cap;
/*
* DPC disables the Link automatically in hardware, so it has
* already been reset by the time we get here.
*/
- dpc = to_dpc_dev(pdev);
- cap = dpc->cap_pos;
+ cap = pdev->dpc_cap;
/*
* Wait until the Link is inactive, then clear DPC Trigger Status
@@ -135,7 +105,7 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
*/
pcie_wait_for_link(pdev, false);
- if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
+ if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
return PCI_ERS_RESULT_DISCONNECT;
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
@@ -147,10 +117,9 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
+static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
- struct pci_dev *pdev = dpc->dev->port;
- u16 cap = dpc->cap_pos, dpc_status, first_error;
+ u16 cap = pdev->dpc_cap, dpc_status, first_error;
u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
int i;
@@ -175,7 +144,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
first_error == i ? " (First)" : "");
}
- if (dpc->rp_log_size < 4)
+ if (pdev->dpc_rp_log_size < 4)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
&dw0);
@@ -188,12 +157,12 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
dw0, dw1, dw2, dw3);
- if (dpc->rp_log_size < 5)
+ if (pdev->dpc_rp_log_size < 5)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
- for (i = 0; i < dpc->rp_log_size - 5; i++) {
+ for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
@@ -224,12 +193,10 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
return 1;
}
-static irqreturn_t dpc_handler(int irq, void *context)
+void dpc_process_error(struct pci_dev *pdev)
{
+ u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
struct aer_err_info info;
- struct dpc_dev *dpc = context;
- struct pci_dev *pdev = dpc->dev->port;
- u16 cap = dpc->cap_pos, status, source, reason, ext_reason;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
@@ -248,27 +215,33 @@ static irqreturn_t dpc_handler(int irq, void *context)
"reserved error");
/* show RP PIO error detail information */
- if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
- dpc_process_rp_pio_error(dpc);
+ if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0)
+ dpc_process_rp_pio_error(pdev);
else if (reason == 0 &&
dpc_get_aer_uncorrect_severity(pdev, &info) &&
aer_get_device_error_info(pdev, &info)) {
aer_print_error(pdev, &info);
- pci_cleanup_aer_uncorrect_error_status(pdev);
+ pci_aer_clear_nonfatal_status(pdev);
pci_aer_clear_fatal_status(pdev);
}
+}
+
+static irqreturn_t dpc_handler(int irq, void *context)
+{
+ struct pci_dev *pdev = context;
+
+ dpc_process_error(pdev);
/* We configure DPC so it only triggers on ERR_FATAL */
- pcie_do_recovery(pdev, pci_channel_io_frozen, PCIE_PORT_SERVICE_DPC);
+ pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
return IRQ_HANDLED;
}
static irqreturn_t dpc_irq(int irq, void *context)
{
- struct dpc_dev *dpc = (struct dpc_dev *)context;
- struct pci_dev *pdev = dpc->dev->port;
- u16 cap = dpc->cap_pos, status;
+ struct pci_dev *pdev = context;
+ u16 cap = pdev->dpc_cap, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
@@ -282,10 +255,30 @@ static irqreturn_t dpc_irq(int irq, void *context)
return IRQ_HANDLED;
}
+void pci_dpc_init(struct pci_dev *pdev)
+{
+ u16 cap;
+
+ pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
+ if (!pdev->dpc_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
+ if (!(cap & PCI_EXP_DPC_CAP_RP_EXT))
+ return;
+
+ pdev->dpc_rp_extensions = true;
+ pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
+ if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ pci_err(pdev, "RP PIO log size %u is invalid\n",
+ pdev->dpc_rp_log_size);
+ pdev->dpc_rp_log_size = 0;
+ }
+}
+
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
static int dpc_probe(struct pcie_device *dev)
{
- struct dpc_dev *dpc;
struct pci_dev *pdev = dev->port;
struct device *device = &dev->device;
int status;
@@ -294,43 +287,25 @@ static int dpc_probe(struct pcie_device *dev)
if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
- dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL);
- if (!dpc)
- return -ENOMEM;
-
- dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
- dpc->dev = dev;
- set_service_data(dev, dpc);
-
status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
dpc_handler, IRQF_SHARED,
- "pcie-dpc", dpc);
+ "pcie-dpc", pdev);
if (status) {
pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
status);
return status;
}
- pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
- pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
-
- dpc->rp_extensions = (cap & PCI_EXP_DPC_CAP_RP_EXT);
- if (dpc->rp_extensions) {
- dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
- if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) {
- pci_err(pdev, "RP PIO log size %u is invalid\n",
- dpc->rp_log_size);
- dpc->rp_log_size = 0;
- }
- }
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
- pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
+ pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
- FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
+ FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size,
FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
@@ -339,13 +314,12 @@ static int dpc_probe(struct pcie_device *dev)
static void dpc_remove(struct pcie_device *dev)
{
- struct dpc_dev *dpc = get_service_data(dev);
struct pci_dev *pdev = dev->port;
u16 ctl;
- pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
+ pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
- pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
+ pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
}
static struct pcie_port_service_driver dpcdriver = {
@@ -354,7 +328,6 @@ static struct pcie_port_service_driver dpcdriver = {
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
.remove = dpc_remove,
- .reset_link = dpc_reset_link,
};
int __init pcie_dpc_init(void)
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
new file mode 100644
index 000000000000..594622a6cb16
--- /dev/null
+++ b/drivers/pci/pcie/edr.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Error Disconnect Recover support
+ * Author: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+ *
+ * Copyright (C) 2020 Intel Corp.
+ */
+
+#define dev_fmt(fmt) "EDR: " fmt
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+
+#include "portdrv.h"
+#include "../pci.h"
+
+#define EDR_PORT_DPC_ENABLE_DSM 0x0C
+#define EDR_PORT_LOCATE_DSM 0x0D
+#define EDR_OST_SUCCESS 0x80
+#define EDR_OST_FAILED 0x81
+
+/*
+ * _DSM wrapper function to enable/disable DPC
+ * @pdev : PCI device structure
+ *
+ * returns 0 on success or errno on failure.
+ */
+static int acpi_enable_dpc(struct pci_dev *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ union acpi_object *obj, argv4, req;
+ int status = 0;
+
+ /*
+ * Behavior when calling unsupported _DSM functions is undefined,
+ * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ */
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ 1ULL << EDR_PORT_DPC_ENABLE_DSM))
+ return 0;
+
+ req.type = ACPI_TYPE_INTEGER;
+ req.integer.value = 1;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &req;
+
+ /*
+ * Per Downstream Port Containment Related Enhancements ECN to PCI
+ * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+ * optional. Return success if it's not implemented.
+ */
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ EDR_PORT_DPC_ENABLE_DSM, &argv4);
+ if (!obj)
+ return 0;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ pci_err(pdev, FW_BUG "Enable DPC _DSM returned non integer\n");
+ status = -EIO;
+ }
+
+ if (obj->integer.value != 1) {
+ pci_err(pdev, "Enable DPC _DSM failed to enable DPC\n");
+ status = -EIO;
+ }
+
+ ACPI_FREE(obj);
+
+ return status;
+}
+
+/*
+ * _DSM wrapper function to locate DPC port
+ * @pdev : Device which received EDR event
+ *
+ * Returns pci_dev or NULL. Caller is responsible for dropping a reference
+ * on the returned pci_dev with pci_dev_put().
+ */
+static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ union acpi_object *obj;
+ u16 port;
+
+ /*
+ * Behavior when calling unsupported _DSM functions is undefined,
+ * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ */
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ 1ULL << EDR_PORT_LOCATE_DSM))
+ return pci_dev_get(pdev);
+
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ EDR_PORT_LOCATE_DSM, NULL);
+ if (!obj)
+ return pci_dev_get(pdev);
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ ACPI_FREE(obj);
+ pci_err(pdev, FW_BUG "Locate Port _DSM returned non integer\n");
+ return NULL;
+ }
+
+ /*
+ * Firmware returns DPC port BDF details in following format:
+ * 15:8 = bus
+ * 7:3 = device
+ * 2:0 = function
+ */
+ port = obj->integer.value;
+
+ ACPI_FREE(obj);
+
+ return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ PCI_BUS_NUM(port), port & 0xff);
+}
+
+/*
+ * _OST wrapper function to let firmware know the status of EDR event
+ * @pdev : Device used to send _OST
+ * @edev : Device which experienced EDR event
+ * @status : Status of EDR event
+ */
+static int acpi_send_edr_status(struct pci_dev *pdev, struct pci_dev *edev,
+ u16 status)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ u32 ost_status;
+
+ pci_dbg(pdev, "Status for %s: %#x\n", pci_name(edev), status);
+
+ ost_status = PCI_DEVID(edev->bus->number, edev->devfn) << 16;
+ ost_status |= status;
+
+ status = acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_DISCONNECT_RECOVER,
+ ost_status, NULL);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void edr_handle_event(acpi_handle handle, u32 event, void *data)
+{
+ struct pci_dev *pdev = data, *edev;
+ pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT;
+ u16 status;
+
+ pci_info(pdev, "ACPI event %#x received\n", event);
+
+ if (event != ACPI_NOTIFY_DISCONNECT_RECOVER)
+ return;
+
+ /* Locate the port which issued EDR event */
+ edev = acpi_dpc_port_get(pdev);
+ if (!edev) {
+ pci_err(pdev, "Firmware failed to locate DPC port\n");
+ return;
+ }
+
+ pci_dbg(pdev, "Reported EDR dev: %s\n", pci_name(edev));
+
+ /* If port does not support DPC, just send the OST */
+ if (!edev->dpc_cap) {
+ pci_err(edev, FW_BUG "This device doesn't support DPC\n");
+ goto send_ost;
+ }
+
+ /* Check if there is a valid DPC trigger */
+ pci_read_config_word(edev, edev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
+ if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
+ pci_err(edev, "Invalid DPC trigger %#010x\n", status);
+ goto send_ost;
+ }
+
+ dpc_process_error(edev);
+ pci_aer_raw_clear_status(edev);
+
+ /*
+ * Irrespective of whether the DPC event is triggered by ERR_FATAL
+ * or ERR_NONFATAL, since the link is already down, use the FATAL
+ * error recovery path for both cases.
+ */
+ estate = pcie_do_recovery(edev, pci_channel_io_frozen, dpc_reset_link);
+
+send_ost:
+
+ /*
+ * If recovery is successful, send _OST(0xF, BDF << 16 | 0x80)
+ * to firmware. If not successful, send _OST(0xF, BDF << 16 | 0x81).
+ */
+ if (estate == PCI_ERS_RESULT_RECOVERED) {
+ pci_dbg(edev, "DPC port successfully recovered\n");
+ acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
+ } else {
+ pci_dbg(edev, "DPC port recovery failed\n");
+ acpi_send_edr_status(pdev, edev, EDR_OST_FAILED);
+ }
+
+ pci_dev_put(edev);
+}
+
+void pci_acpi_add_edr_notifier(struct pci_dev *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ acpi_status status;
+
+ if (!adev) {
+ pci_dbg(pdev, "No valid ACPI node, skipping EDR init\n");
+ return;
+ }
+
+ status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+ edr_handle_event, pdev);
+ if (ACPI_FAILURE(status)) {
+ pci_err(pdev, "Failed to install notify handler\n");
+ return;
+ }
+
+ if (acpi_enable_dpc(pdev))
+ acpi_remove_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+ edr_handle_event);
+ else
+ pci_dbg(pdev, "Notify handler installed\n");
+}
+
+void pci_acpi_remove_edr_notifier(struct pci_dev *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+
+ if (!adev)
+ return;
+
+ acpi_remove_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+ edr_handle_event);
+ pci_dbg(pdev, "Notify handler removed\n");
+}
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 01dfc8bb7ca0..14bb8f54723e 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -146,49 +146,9 @@ out:
return 0;
}
-/**
- * default_reset_link - default reset function
- * @dev: pointer to pci_dev data structure
- *
- * Invoked when performing link reset on a Downstream Port or a
- * Root Port with no aer driver.
- */
-static pci_ers_result_t default_reset_link(struct pci_dev *dev)
-{
- int rc;
-
- rc = pci_bus_error_reset(dev);
- pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
- return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
-{
- pci_ers_result_t status;
- struct pcie_port_service_driver *driver = NULL;
-
- driver = pcie_port_find_service(dev, service);
- if (driver && driver->reset_link) {
- status = driver->reset_link(dev);
- } else if (pcie_downstream_port(dev)) {
- status = default_reset_link(dev);
- } else {
- pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
- pci_name(dev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- if (status != PCI_ERS_RESULT_RECOVERED) {
- pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
- pci_name(dev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return status;
-}
-
-void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
- u32 service)
+pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ enum pci_channel_state state,
+ pci_ers_result_t (*reset_link)(struct pci_dev *pdev))
{
pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
struct pci_bus *bus;
@@ -203,14 +163,16 @@ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
bus = dev->subordinate;
pci_dbg(dev, "broadcast error_detected message\n");
- if (state == pci_channel_io_frozen)
+ if (state == pci_channel_io_frozen) {
pci_walk_bus(bus, report_frozen_detected, &status);
- else
+ status = reset_link(dev);
+ if (status != PCI_ERS_RESULT_RECOVERED) {
+ pci_warn(dev, "link reset failed\n");
+ goto failed;
+ }
+ } else {
pci_walk_bus(bus, report_normal_detected, &status);
-
- if (state == pci_channel_io_frozen &&
- reset_link(dev, service) != PCI_ERS_RESULT_RECOVERED)
- goto failed;
+ }
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
@@ -236,13 +198,15 @@ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
pci_walk_bus(bus, report_resume, &status);
pci_aer_clear_device_status(dev);
- pci_cleanup_aer_uncorrect_error_status(dev);
+ pci_aer_clear_nonfatal_status(dev);
pci_info(dev, "device recovery successful\n");
- return;
+ return status;
failed:
pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
pci_info(dev, "device recovery failed\n");
+
+ return status;
}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 1e673619b101..64b5e081cdb2 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -92,9 +92,6 @@ struct pcie_port_service_driver {
/* Device driver may resume normal operations */
void (*error_resume)(struct pci_dev *dev);
- /* Link Reset Capability - AER service driver specific */
- pci_ers_result_t (*reset_link)(struct pci_dev *dev);
-
int port_type; /* Type of the port this driver can handle */
u32 service; /* Port service this device represents */
@@ -161,7 +158,5 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
}
#endif
-struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
- u32 service);
struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 5075cb9e850c..50a9522ab07d 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -459,27 +459,6 @@ static int find_service_iter(struct device *device, void *data)
}
/**
- * pcie_port_find_service - find the service driver
- * @dev: PCI Express port the service is associated with
- * @service: Service to find
- *
- * Find PCI Express port service driver associated with given service
- */
-struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
- u32 service)
-{
- struct pcie_port_service_driver *drv;
- struct portdrv_service_data pdrvs;
-
- pdrvs.drv = NULL;
- pdrvs.service = service;
- device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
-
- drv = pdrvs.drv;
- return drv;
-}
-
-/**
* pcie_port_find_device - find the struct device
* @dev: PCI Express port the service is associated with
* @service: For the service to find
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 512cb4312ddd..77b8a145c39b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -598,6 +598,7 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
bridge->native_ltr = 1;
+ bridge->native_dpc = 1;
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -640,6 +641,7 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge)
}
EXPORT_SYMBOL(pci_free_host_bridge);
+/* Indexed by PCI_X_SSTATUS_FREQ (secondary bus mode and frequency) */
static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCI_SPEED_66MHz_PCIX, /* 1 */
@@ -659,6 +661,7 @@ static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_133MHz_PCIX_533 /* F */
};
+/* Indexed by PCI_EXP_LNKCAP_SLS, PCI_EXP_LNKSTA_CLS */
const unsigned char pcie_link_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCIE_SPEED_2_5GT, /* 1 */
@@ -677,6 +680,44 @@ const unsigned char pcie_link_speed[] = {
PCI_SPEED_UNKNOWN, /* E */
PCI_SPEED_UNKNOWN /* F */
};
+EXPORT_SYMBOL_GPL(pcie_link_speed);
+
+const char *pci_speed_string(enum pci_bus_speed speed)
+{
+ /* Indexed by the pci_bus_speed enum */
+ static const char *speed_strings[] = {
+ "33 MHz PCI", /* 0x00 */
+ "66 MHz PCI", /* 0x01 */
+ "66 MHz PCI-X", /* 0x02 */
+ "100 MHz PCI-X", /* 0x03 */
+ "133 MHz PCI-X", /* 0x04 */
+ NULL, /* 0x05 */
+ NULL, /* 0x06 */
+ NULL, /* 0x07 */
+ NULL, /* 0x08 */
+ "66 MHz PCI-X 266", /* 0x09 */
+ "100 MHz PCI-X 266", /* 0x0a */
+ "133 MHz PCI-X 266", /* 0x0b */
+ "Unknown AGP", /* 0x0c */
+ "1x AGP", /* 0x0d */
+ "2x AGP", /* 0x0e */
+ "4x AGP", /* 0x0f */
+ "8x AGP", /* 0x10 */
+ "66 MHz PCI-X 533", /* 0x11 */
+ "100 MHz PCI-X 533", /* 0x12 */
+ "133 MHz PCI-X 533", /* 0x13 */
+ "2.5 GT/s PCIe", /* 0x14 */
+ "5.0 GT/s PCIe", /* 0x15 */
+ "8.0 GT/s PCIe", /* 0x16 */
+ "16.0 GT/s PCIe", /* 0x17 */
+ "32.0 GT/s PCIe", /* 0x18 */
+ };
+
+ if (speed < ARRAY_SIZE(speed_strings))
+ return speed_strings[speed];
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(pci_speed_string);
void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
{
@@ -2329,6 +2370,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_enable_acs(dev); /* Enable ACS P2P upstream forwarding */
pci_ptm_init(dev); /* Precision Time Measurement */
pci_aer_init(dev); /* Advanced Error Reporting */
+ pci_dpc_init(dev); /* Downstream Port Containment */
pcie_report_downtraining(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 29f473ebf20f..28c9a2409c50 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1970,26 +1970,92 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk
/*
* IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
* 300641-004US, section 5.7.3.
+ *
+ * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003.
+ * Core IO on Xeon E5 v2, see Intel order no 329188-003.
+ * Core IO on Xeon E7 v2, see Intel order no 329595-002.
+ * Core IO on Xeon E5 v3, see Intel order no 330784-003.
+ * Core IO on Xeon E7 v3, see Intel order no 332315-001US.
+ * Core IO on Xeon E5 v4, see Intel order no 333810-002US.
+ * Core IO on Xeon E7 v4, see Intel order no 332315-001US.
+ * Core IO on Xeon D-1500, see Intel order no 332051-001.
+ * Core IO on Xeon Scalable, see Intel order no 610950.
*/
-#define INTEL_6300_IOAPIC_ABAR 0x40
+#define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */
#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
+#define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */
+#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
+
static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
{
u16 pci_config_word;
+ u32 pci_config_dword;
if (noioapicquirk)
return;
- pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
- pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
- pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
-
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_ESB_10:
+ pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
+ &pci_config_word);
+ pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
+ pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
+ pci_config_word);
+ break;
+ case 0x3c28: /* Xeon E5 1600/2600/4600 */
+ case 0x0e28: /* Xeon E5/E7 V2 */
+ case 0x2f28: /* Xeon E5/E7 V3,V4 */
+ case 0x6f28: /* Xeon D-1500 */
+ case 0x2034: /* Xeon Scalable Family */
+ pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
+ &pci_config_dword);
+ pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
+ pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
+ pci_config_dword);
+ break;
+ default:
+ return;
+ }
pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+/*
+ * Device 29 Func 5 Device IDs of IO-APIC
+ * containing ABAR—APIC1 Alternate Base Address Register
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
+ quirk_disable_intel_boot_interrupt);
+
+/*
+ * Device 5 Func 0 Device IDs of Core IO modules/hubs
+ * containing Coherent Interface Protocol Interrupt Control
+ *
+ * Device IDs obtained from volume 2 datasheets of commented
+ * families above.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
+ quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
+ quirk_disable_intel_boot_interrupt);
/* Disable boot interrupts on HT-1000 */
#define BC_HT1000_FEATURE_REG 0x64
@@ -4400,6 +4466,29 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
}
/*
+ * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability.
+ * But the implementation could block peer-to-peer transactions between them
+ * and provide ACS-like functionality.
+ */
+static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ if (!pci_is_pcie(dev) ||
+ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ return -ENOTTY;
+
+ switch (dev->device) {
+ case 0x0710 ... 0x071e:
+ case 0x0721:
+ case 0x0723 ... 0x0732:
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+
+ return false;
+}
+
+/*
* Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
@@ -4701,6 +4790,12 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
+ /* Zhaoxin multi-function devices */
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
+ /* Zhaoxin Root/Downstream Ports */
+ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
{ 0 }
};
@@ -5461,3 +5556,14 @@ out_disable:
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
PCI_CLASS_DISPLAY_VGA, 8,
quirk_reset_lenovo_thinkpad_p50_nvgpu);
+
+/*
+ * Device [1b21:2142]
+ * When in D0, PME# doesn't get asserted when plugging USB 3.0 device.
+ */
+static void pci_fixup_no_d0_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# does not work under D0, disabling it\n");
+ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 137bf0cee897..8fc9a4e911e3 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
pci_disable_rom(pdev);
}
EXPORT_SYMBOL(pci_unmap_rom);
-
-/**
- * pci_platform_rom - provides a pointer to any ROM image provided by the
- * platform
- * @pdev: pointer to pci device struct
- * @size: pointer to receive size of pci window over ROM
- */
-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
-{
- if (pdev->rom && pdev->romlen) {
- *size = pdev->romlen;
- return phys_to_virt((phys_addr_t)pdev->rom);
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(pci_platform_rom);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f2461bf9243d..bbcef1a053ab 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -846,7 +846,7 @@ static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
* Per spec, I/O windows are 4K-aligned, but some bridges have
* an extension to support 1K alignment.
*/
- if (bus->self->io_window_1k)
+ if (bus->self && bus->self->io_window_1k)
align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
else
align = PCI_P2P_DEFAULT_IO_ALIGN;
@@ -920,7 +920,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
calculate_iosize(size, min_size, size1, add_size, children_add_size,
resource_size(b_res), min_align);
if (!size0 && !size1) {
- if (b_res->start || b_res->end)
+ if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
b_res->flags = 0;
@@ -930,7 +930,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
b_res->start = min_align;
b_res->end = b_res->start + size0 - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
- if (size1 > size0 && realloc_head) {
+ if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
@@ -1073,7 +1073,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
calculate_memsize(size, min_size, add_size, children_add_size,
resource_size(b_res), add_align);
if (!size0 && !size1) {
- if (b_res->start || b_res->end)
+ if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
b_res->flags = 0;
@@ -1082,7 +1082,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->start = min_align;
b_res->end = size0 + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
- if (size1 > size0 && realloc_head) {
+ if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
b_res, &bus->busn_res,
@@ -1196,8 +1196,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
unsigned long mask, prefmask, type2 = 0, type3 = 0;
resource_size_t additional_io_size = 0, additional_mmio_size = 0,
additional_mmio_pref_size = 0;
- struct resource *b_res;
- int ret;
+ struct resource *pref;
+ struct pci_host_bridge *host;
+ int hdr_type, i, ret;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -1217,10 +1218,20 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
}
/* The root bus? */
- if (pci_is_root_bus(bus))
- return;
+ if (pci_is_root_bus(bus)) {
+ host = to_pci_host_bridge(bus->bridge);
+ if (!host->size_windows)
+ return;
+ pci_bus_for_each_resource(bus, pref, i)
+ if (pref && (pref->flags & IORESOURCE_PREFETCH))
+ break;
+ hdr_type = -1; /* Intentionally invalid - not a PCI device. */
+ } else {
+ pref = &bus->self->resource[PCI_BRIDGE_RESOURCES + 2];
+ hdr_type = bus->self->hdr_type;
+ }
- switch (bus->self->hdr_type) {
+ switch (hdr_type) {
case PCI_HEADER_TYPE_CARDBUS:
/* Don't size CardBuses yet */
break;
@@ -1242,10 +1253,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
* the size required to put all 64-bit prefetchable
* resources in it.
*/
- b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES];
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (b_res[2].flags & IORESOURCE_MEM_64) {
+ if (pref && (pref->flags & IORESOURCE_MEM_64)) {
prefmask |= IORESOURCE_MEM_64;
ret = pbus_size_mem(bus, prefmask, prefmask,
prefmask, prefmask,
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index ae4aa0e1f2f4..cc386ef2fa12 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,45 +49,9 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
slot->number);
}
-/* these strings match up with the values in pci_bus_speed */
-static const char *pci_bus_speed_strings[] = {
- "33 MHz PCI", /* 0x00 */
- "66 MHz PCI", /* 0x01 */
- "66 MHz PCI-X", /* 0x02 */
- "100 MHz PCI-X", /* 0x03 */
- "133 MHz PCI-X", /* 0x04 */
- NULL, /* 0x05 */
- NULL, /* 0x06 */
- NULL, /* 0x07 */
- NULL, /* 0x08 */
- "66 MHz PCI-X 266", /* 0x09 */
- "100 MHz PCI-X 266", /* 0x0a */
- "133 MHz PCI-X 266", /* 0x0b */
- "Unknown AGP", /* 0x0c */
- "1x AGP", /* 0x0d */
- "2x AGP", /* 0x0e */
- "4x AGP", /* 0x0f */
- "8x AGP", /* 0x10 */
- "66 MHz PCI-X 533", /* 0x11 */
- "100 MHz PCI-X 533", /* 0x12 */
- "133 MHz PCI-X 533", /* 0x13 */
- "2.5 GT/s PCIe", /* 0x14 */
- "5.0 GT/s PCIe", /* 0x15 */
- "8.0 GT/s PCIe", /* 0x16 */
- "16.0 GT/s PCIe", /* 0x17 */
- "32.0 GT/s PCIe", /* 0x18 */
-};
-
static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
{
- const char *speed_string;
-
- if (speed < ARRAY_SIZE(pci_bus_speed_strings))
- speed_string = pci_bus_speed_strings[speed];
- else
- speed_string = "Unknown";
-
- return sprintf(buf, "%s\n", speed_string);
+ return sprintf(buf, "%s\n", pci_speed_string(speed));
}
static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf)
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 33c9b6ea7364..fb9b17fa0fb5 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -40,7 +40,7 @@ struct cis_cache_entry {
unsigned int addr;
unsigned int len;
unsigned int attr;
- unsigned char cache[0];
+ unsigned char cache[];
};
struct pccard_resource_ops {
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 0a04eb04f3a2..d3ef5534991e 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -329,7 +329,7 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
static struct platform_driver omap_cf_driver = {
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
},
.remove = __exit_p(omap_cf_remove),
};
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 9e6922c08ef6..3b05760e69d6 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -1076,7 +1076,7 @@ static ssize_t show_io_db(struct device *dev,
for (p = data->io_db.next; p != &data->io_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
@@ -1133,7 +1133,7 @@ static ssize_t show_mem_db(struct device *dev,
p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
@@ -1142,7 +1142,7 @@ static ssize_t show_mem_db(struct device *dev,
for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index e2e8729afd9d..784ada5b8c4f 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -14,7 +14,7 @@
#include <asm/mach-types.h>
#include <mach/simpad.h>
#include "sa1100_generic.h"
-
+
static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
@@ -66,7 +66,7 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
break;
- case 33:
+ case 33:
simpad_clear_cs3_bit(VCC_3V_EN|EN1);
simpad_set_cs3_bit(VCC_5V_EN|EN0);
break;
@@ -95,7 +95,7 @@ static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
simpad_set_cs3_bit(PCMCIA_RESET);
}
-static struct pcmcia_low_level simpad_pcmcia_ops = {
+static struct pcmcia_low_level simpad_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = simpad_pcmcia_hw_init,
.hw_shutdown = simpad_pcmcia_hw_shutdown,
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index b7f993f1bbd0..222e81c79365 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -88,7 +88,7 @@ struct soc_pcmcia_socket {
struct skt_dev_info {
int nskt;
- struct soc_pcmcia_socket skt[0];
+ struct soc_pcmcia_socket skt[];
};
struct pcmcia_state {
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 49b1c6a1bdbe..bf6529b0b5b0 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -180,12 +180,12 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri
for (i = 0; i < 0x24; i += 4) {
unsigned val;
if (!(i & 15))
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
val = cb_readl(socket, i);
- offset += snprintf(buf + offset, PAGE_SIZE - offset, " %08x", val);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %08x", val);
}
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:");
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:");
for (i = 0; i < 0x45; i++) {
unsigned char val;
if (!(i & 7)) {
@@ -193,10 +193,10 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri
memcpy(buf + offset, " -", 2);
offset += 2;
} else
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
}
val = exca_readb(socket, i);
- offset += snprintf(buf + offset, PAGE_SIZE - offset, " %02x", val);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %02x", val);
}
buf[offset++] = '\n';
return offset;
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index af774ac2b934..71801e30d601 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -59,3 +59,25 @@ config PHY_MESON_G12A_USB3_PCIE
Enable this to support the Meson USB3 + PCIE Combo PHY found
in Meson G12A SoCs.
If unsure, say N.
+
+config PHY_MESON_AXG_PCIE
+ tristate "Meson AXG PCIE PHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Enable this to support the Meson MIPI + PCIE PHY found
+ in Meson AXG SoCs.
+ If unsure, say N.
+
+config PHY_MESON_AXG_MIPI_PCIE_ANALOG
+ tristate "Meson AXG MIPI + PCIE analog PHY driver"
+ default ARCH_MESON
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Enable this to support the Meson MIPI + PCIE analog PHY
+ found in Meson AXG SoCs.
+ If unsure, say N.
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index 11d1c42ac2be..e2baa133f7af 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
-obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
-obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
-obj-$(CONFIG_PHY_MESON_GXL_USB3) += phy-meson-gxl-usb3.o
-obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o
+obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
+obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
+obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
+obj-$(CONFIG_PHY_MESON_GXL_USB3) += phy-meson-gxl-usb3.o
+obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o
+obj-$(CONFIG_PHY_MESON_AXG_PCIE) += phy-meson-axg-pcie.o
+obj-$(CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG) += phy-meson-axg-mipi-pcie-analog.o
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
new file mode 100644
index 000000000000..1431cbf885e1
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Amlogic AXG MIPI + PCIE analog PHY driver
+ *
+ * Copyright (C) 2019 Remi Pommarel <repk@triplefau.lt>
+ */
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/phy/phy.h>
+
+#define HHI_MIPI_CNTL0 0x00
+#define HHI_MIPI_CNTL0_COMMON_BLOCK GENMASK(31, 28)
+#define HHI_MIPI_CNTL0_ENABLE BIT(29)
+#define HHI_MIPI_CNTL0_BANDGAP BIT(26)
+#define HHI_MIPI_CNTL0_DECODE_TO_RTERM GENMASK(15, 12)
+#define HHI_MIPI_CNTL0_OUTPUT_EN BIT(3)
+
+#define HHI_MIPI_CNTL1 0x01
+#define HHI_MIPI_CNTL1_CH0_CML_PDR_EN BIT(12)
+#define HHI_MIPI_CNTL1_LP_ABILITY GENMASK(5, 4)
+#define HHI_MIPI_CNTL1_LP_RESISTER BIT(3)
+#define HHI_MIPI_CNTL1_INPUT_SETTING BIT(2)
+#define HHI_MIPI_CNTL1_INPUT_SEL BIT(1)
+#define HHI_MIPI_CNTL1_PRBS7_EN BIT(0)
+
+#define HHI_MIPI_CNTL2 0x02
+#define HHI_MIPI_CNTL2_CH_PU GENMASK(31, 25)
+#define HHI_MIPI_CNTL2_CH_CTL GENMASK(24, 19)
+#define HHI_MIPI_CNTL2_CH0_DIGDR_EN BIT(18)
+#define HHI_MIPI_CNTL2_CH_DIGDR_EN BIT(17)
+#define HHI_MIPI_CNTL2_LPULPS_EN BIT(16)
+#define HHI_MIPI_CNTL2_CH_EN(n) BIT(15 - (n))
+#define HHI_MIPI_CNTL2_CH0_LP_CTL GENMASK(10, 1)
+
+struct phy_axg_mipi_pcie_analog_priv {
+ struct phy *phy;
+ unsigned int mode;
+ struct regmap *regmap;
+};
+
+static const struct regmap_config phy_axg_mipi_pcie_analog_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = HHI_MIPI_CNTL2,
+};
+
+static int phy_axg_mipi_pcie_analog_power_on(struct phy *phy)
+{
+ struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy);
+
+ /* MIPI not supported yet */
+ if (priv->mode != PHY_TYPE_PCIE)
+ return -EINVAL;
+
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+ HHI_MIPI_CNTL0_BANDGAP, HHI_MIPI_CNTL0_BANDGAP);
+
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+ HHI_MIPI_CNTL0_ENABLE, HHI_MIPI_CNTL0_ENABLE);
+ return 0;
+}
+
+static int phy_axg_mipi_pcie_analog_power_off(struct phy *phy)
+{
+ struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy);
+
+ /* MIPI not supported yet */
+ if (priv->mode != PHY_TYPE_PCIE)
+ return -EINVAL;
+
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+ HHI_MIPI_CNTL0_BANDGAP, 0);
+ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+ HHI_MIPI_CNTL0_ENABLE, 0);
+ return 0;
+}
+
+static int phy_axg_mipi_pcie_analog_init(struct phy *phy)
+{
+ return 0;
+}
+
+static int phy_axg_mipi_pcie_analog_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static const struct phy_ops phy_axg_mipi_pcie_analog_ops = {
+ .init = phy_axg_mipi_pcie_analog_init,
+ .exit = phy_axg_mipi_pcie_analog_exit,
+ .power_on = phy_axg_mipi_pcie_analog_power_on,
+ .power_off = phy_axg_mipi_pcie_analog_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *phy_axg_mipi_pcie_analog_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct phy_axg_mipi_pcie_analog_priv *priv = dev_get_drvdata(dev);
+ unsigned int mode;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mode = args->args[0];
+
+ /* MIPI mode is not supported yet */
+ if (mode != PHY_TYPE_PCIE) {
+ dev_err(dev, "invalid phy mode select argument\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv->mode = mode;
+ return priv->phy;
+}
+
+static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_axg_mipi_pcie_analog_priv *priv;
+ struct device_node *np = dev->of_node;
+ struct regmap *map;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to get regmap base\n");
+ return PTR_ERR(base);
+ }
+
+ map = devm_regmap_init_mmio(dev, base,
+ &phy_axg_mipi_pcie_analog_regmap_conf);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to get HHI regmap\n");
+ return PTR_ERR(map);
+ }
+ priv->regmap = map;
+
+ priv->phy = devm_phy_create(dev, np, &phy_axg_mipi_pcie_analog_ops);
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to create PHY\n");
+ return ret;
+ }
+
+ phy_set_drvdata(priv->phy, priv);
+ dev_set_drvdata(dev, priv);
+
+ phy = devm_of_phy_provider_register(dev,
+ phy_axg_mipi_pcie_analog_xlate);
+
+ return PTR_ERR_OR_ZERO(phy);
+}
+
+static const struct of_device_id phy_axg_mipi_pcie_analog_of_match[] = {
+ {
+ .compatible = "amlogic,axg-mipi-pcie-analog-phy",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_axg_mipi_pcie_analog_of_match);
+
+static struct platform_driver phy_axg_mipi_pcie_analog_driver = {
+ .probe = phy_axg_mipi_pcie_analog_probe,
+ .driver = {
+ .name = "phy-axg-mipi-pcie-analog",
+ .of_match_table = phy_axg_mipi_pcie_analog_of_match,
+ },
+};
+module_platform_driver(phy_axg_mipi_pcie_analog_driver);
+
+MODULE_AUTHOR("Remi Pommarel <repk@triplefau.lt>");
+MODULE_DESCRIPTION("Amlogic AXG MIPI + PCIE analog PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c
new file mode 100644
index 000000000000..377ed0dcd0d9
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Amlogic AXG PCIE PHY driver
+ *
+ * Copyright (C) 2020 Remi Pommarel <repk@triplefau.lt>
+ */
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/bitfield.h>
+#include <dt-bindings/phy/phy.h>
+
+#define MESON_PCIE_REG0 0x00
+#define MESON_PCIE_COMMON_CLK BIT(4)
+#define MESON_PCIE_PORT_SEL GENMASK(3, 2)
+#define MESON_PCIE_CLK BIT(1)
+#define MESON_PCIE_POWERDOWN BIT(0)
+
+#define MESON_PCIE_TWO_X1 FIELD_PREP(MESON_PCIE_PORT_SEL, 0x3)
+#define MESON_PCIE_COMMON_REF_CLK FIELD_PREP(MESON_PCIE_COMMON_CLK, 0x1)
+#define MESON_PCIE_PHY_INIT (MESON_PCIE_TWO_X1 | \
+ MESON_PCIE_COMMON_REF_CLK)
+#define MESON_PCIE_RESET_DELAY 500
+
+struct phy_axg_pcie_priv {
+ struct phy *phy;
+ struct phy *analog;
+ struct regmap *regmap;
+ struct reset_control *reset;
+};
+
+static const struct regmap_config phy_axg_pcie_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MESON_PCIE_REG0,
+};
+
+static int phy_axg_pcie_power_on(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_power_on(priv->analog);
+ if (ret != 0)
+ return ret;
+
+ regmap_update_bits(priv->regmap, MESON_PCIE_REG0,
+ MESON_PCIE_POWERDOWN, 0);
+ return 0;
+}
+
+static int phy_axg_pcie_power_off(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_power_off(priv->analog);
+ if (ret != 0)
+ return ret;
+
+ regmap_update_bits(priv->regmap, MESON_PCIE_REG0,
+ MESON_PCIE_POWERDOWN, 1);
+ return 0;
+}
+
+static int phy_axg_pcie_init(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_init(priv->analog);
+ if (ret != 0)
+ return ret;
+
+ regmap_write(priv->regmap, MESON_PCIE_REG0, MESON_PCIE_PHY_INIT);
+ return reset_control_reset(priv->reset);
+}
+
+static int phy_axg_pcie_exit(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = phy_exit(priv->analog);
+ if (ret != 0)
+ return ret;
+
+ return reset_control_reset(priv->reset);
+}
+
+static int phy_axg_pcie_reset(struct phy *phy)
+{
+ struct phy_axg_pcie_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ ret = phy_reset(priv->analog);
+ if (ret != 0)
+ goto out;
+
+ ret = reset_control_assert(priv->reset);
+ if (ret != 0)
+ goto out;
+ udelay(MESON_PCIE_RESET_DELAY);
+
+ ret = reset_control_deassert(priv->reset);
+ if (ret != 0)
+ goto out;
+ udelay(MESON_PCIE_RESET_DELAY);
+
+out:
+ return ret;
+}
+
+static const struct phy_ops phy_axg_pcie_ops = {
+ .init = phy_axg_pcie_init,
+ .exit = phy_axg_pcie_exit,
+ .power_on = phy_axg_pcie_power_on,
+ .power_off = phy_axg_pcie_power_off,
+ .reset = phy_axg_pcie_reset,
+ .owner = THIS_MODULE,
+};
+
+static int phy_axg_pcie_probe(struct platform_device *pdev)
+{
+ struct phy_provider *pphy;
+ struct device *dev = &pdev->dev;
+ struct phy_axg_pcie_priv *priv;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phy = devm_phy_create(dev, np, &phy_axg_pcie_ops);
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to create PHY\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap = devm_regmap_init_mmio(dev, base,
+ &phy_axg_pcie_regmap_conf);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->reset = devm_reset_control_array_get(dev, false, false);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
+
+ priv->analog = devm_phy_get(dev, "analog");
+ if (IS_ERR(priv->analog))
+ return PTR_ERR(priv->analog);
+
+ phy_set_drvdata(priv->phy, priv);
+ dev_set_drvdata(dev, priv);
+ pphy = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(pphy);
+}
+
+static const struct of_device_id phy_axg_pcie_of_match[] = {
+ {
+ .compatible = "amlogic,axg-pcie-phy",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, phy_axg_pcie_of_match);
+
+static struct platform_driver phy_axg_pcie_driver = {
+ .probe = phy_axg_pcie_probe,
+ .driver = {
+ .name = "phy-axg-pcie",
+ .of_match_table = phy_axg_pcie_of_match,
+ },
+};
+module_platform_driver(phy_axg_pcie_driver);
+
+MODULE_AUTHOR("Remi Pommarel <repk@triplefau.lt>");
+MODULE_DESCRIPTION("Amlogic AXG PCIE PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index df0ef69dd474..834c59950d1c 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -126,6 +126,18 @@ config PINCTRL_DA850_PUPD
Driver for TI DA850/OMAP-L138/AM18XX pinconf. Used to control
pullup/pulldown pin groups.
+config PINCTRL_DA9062
+ tristate "Dialog Semiconductor DA9062 PMIC pinctrl and GPIO Support"
+ depends on MFD_DA9062
+ select GPIOLIB
+ help
+ The Dialog DA9062 PMIC provides multiple GPIOs that can be muxed for
+ different functions. This driver bundles a pinctrl driver to select the
+ function muxing and a GPIO driver to handle the GPIO when the GPIO
+ function is selected.
+
+ Say yes to enable pinctrl and GPIO support for the DA9062 PMIC.
+
config PINCTRL_DIGICOLOR
bool
depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST)
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 879f312bfb75..0b36a1cfca8a 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
+obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index 771d6fd50b45..47a4ccd9fed4 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1125,317 +1125,317 @@ static const struct owl_pingroup s700_groups[] = {
};
static const char * const nor_groups[] = {
- "lcd0_d18",
- "i2s_d0",
- "i2s0_pcm0",
- "i2s1_pcm0",
- "i2s_d1",
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out0",
- "ks_out1",
- "ks_out2",
- "lcd0_d2",
- "lvds_ee_pn",
- "uart2_rx_tx",
- "spi0_i2c_pcm",
- "lvds_e_pn",
- "sd0_d0",
- "sd0_d1",
- "sd0_d2_d3",
- "sd1_d0_d3",
- "sd0_cmd",
- "sd1_cmd",
- "sens0_ckout",
- "sen0_pclk",
+ "lcd0_d18_mfp",
+ "i2s_d0_mfp",
+ "i2s0_pcm0_mfp",
+ "i2s1_pcm0_mfp",
+ "i2s_d1_mfp",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "lcd0_d2_mfp",
+ "lvds_ee_pn_mfp",
+ "uart2_rx_tx_mfp",
+ "spi0_i2c_pcm_mfp",
+ "lvds_e_pn_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd1_d0_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd1_cmd_mfp",
+ "sens0_ckout_mfp",
+ "sen0_pclk_mfp",
};
static const char * const eth_rmii_groups[] = {
- "rgmii_txd23",
- "rgmii_rxd2",
- "rgmii_rxd3",
- "rgmii_txd01",
- "rgmii_txd0",
- "rgmii_txd1",
- "rgmii_txen",
- "rgmii_rxen",
- "rgmii_rxd1",
- "rgmii_rxd0",
- "rgmii_ref_clk",
+ "rgmii_txd23_mfp",
+ "rgmii_rxd2_mfp",
+ "rgmii_rxd3_mfp",
+ "rgmii_txd01_mfp",
+ "rgmii_txd0_mfp",
+ "rgmii_txd1_mfp",
+ "rgmii_txen_mfp",
+ "rgmii_rxen_mfp",
+ "rgmii_rxd1_mfp",
+ "rgmii_rxd0_mfp",
+ "rgmii_ref_clk_mfp",
"eth_smi_dummy",
};
static const char * const eth_smii_groups[] = {
- "rgmii_txd0",
- "rgmii_txd1",
- "rgmii_rxd0",
- "rgmii_rxd1",
- "rgmii_ref_clk",
+ "rgmii_txd0_mfp",
+ "rgmii_txd1_mfp",
+ "rgmii_rxd0_mfp",
+ "rgmii_rxd1_mfp",
+ "rgmii_ref_clk_mfp",
"eth_smi_dummy",
};
static const char * const spi0_groups[] = {
- "dsi_dn0",
- "dsi_dp2",
- "dsi_dp0",
- "uart2_rx_tx",
- "spi0_i2c_pcm",
- "dsi_dn2",
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp0_mfp",
+ "uart2_rx_tx_mfp",
+ "spi0_i2c_pcm_mfp",
+ "dsi_dn2_mfp",
};
static const char * const spi1_groups[] = {
- "uart0_rx",
- "uart0_tx",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
"i2c0_mfp",
};
static const char * const spi2_groups[] = {
- "rgmii_txd01",
- "rgmii_txd0",
- "rgmii_txd1",
- "rgmii_ref_clk",
- "dnand_acle_ce0",
+ "rgmii_txd01_mfp",
+ "rgmii_txd0_mfp",
+ "rgmii_txd1_mfp",
+ "rgmii_ref_clk_mfp",
+ "dnand_acle_ce0_mfp",
};
static const char * const spi3_groups[] = {
- "rgmii_txen",
- "rgmii_rxen",
- "rgmii_rxd1",
- "rgmii_rxd0",
+ "rgmii_txen_mfp",
+ "rgmii_rxen_mfp",
+ "rgmii_rxd1_mfp",
+ "rgmii_rxd0_mfp",
};
static const char * const sens0_groups[] = {
- "csi_cn_cp",
- "sens0_ckout",
- "csi_dn_dp",
- "sen0_pclk",
+ "csi_cn_cp_mfp",
+ "sens0_ckout_mfp",
+ "csi_dn_dp_mfp",
+ "sen0_pclk_mfp",
};
static const char * const sens1_groups[] = {
- "lcd0_d18",
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out0",
- "ks_out1",
- "ks_out2",
- "sens0_ckout",
- "pcm1_in",
- "pcm1_clk",
- "pcm1_sync",
- "pcm1_out",
+ "lcd0_d18_mfp",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "sens0_ckout_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
};
static const char * const uart0_groups[] = {
- "uart2_rtsb",
- "uart2_ctsb",
- "uart0_rx",
- "uart0_tx",
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
};
static const char * const uart1_groups[] = {
- "sd0_d2_d3",
+ "sd0_d2_d3_mfp",
"i2c0_mfp",
};
static const char * const uart2_groups[] = {
- "rgmii_txen",
- "rgmii_rxen",
- "rgmii_rxd1",
- "rgmii_rxd0",
- "dsi_dn0",
- "dsi_dp2",
- "dsi_dp0",
- "uart2_rx_tx",
- "dsi_dn2",
- "uart2_rtsb",
- "uart2_ctsb",
- "sd0_d0",
- "sd0_d1",
- "sd0_d2_d3",
- "uart0_rx",
- "uart0_tx",
+ "rgmii_txen_mfp",
+ "rgmii_rxen_mfp",
+ "rgmii_rxd1_mfp",
+ "rgmii_rxd0_mfp",
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp0_mfp",
+ "uart2_rx_tx_mfp",
+ "dsi_dn2_mfp",
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
"i2c0_mfp",
"uart2_dummy"
};
static const char * const uart3_groups[] = {
- "rgmii_txd23",
- "rgmii_rxd2",
- "rgmii_rxd3",
- "uart3_rtsb",
- "uart3_ctsb",
+ "rgmii_txd23_mfp",
+ "rgmii_rxd2_mfp",
+ "rgmii_rxd3_mfp",
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
"uart3_dummy"
};
static const char * const uart4_groups[] = {
- "rgmii_txd01",
- "rgmii_ref_clk",
- "ks_out0",
- "ks_out1",
+ "rgmii_txd01_mfp",
+ "rgmii_ref_clk_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
};
static const char * const uart5_groups[] = {
- "rgmii_rxd1",
- "rgmii_rxd0",
- "ks_out0",
- "ks_out2",
- "uart3_rtsb",
- "uart3_ctsb",
- "sd0_d0",
- "sd0_d1",
+ "rgmii_rxd1_mfp",
+ "rgmii_rxd0_mfp",
+ "ks_out0_mfp",
+ "ks_out2_mfp",
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
};
static const char * const uart6_groups[] = {
- "rgmii_txd0",
- "rgmii_txd1",
+ "rgmii_txd0_mfp",
+ "rgmii_txd1_mfp",
};
static const char * const i2s0_groups[] = {
- "i2s_d0",
- "i2s_pcm1",
- "i2s0_pcm0",
+ "i2s_d0_mfp",
+ "i2s_pcm1_mfp",
+ "i2s0_pcm0_mfp",
};
static const char * const i2s1_groups[] = {
- "i2s1_pcm0",
- "i2s_d1",
+ "i2s1_pcm0_mfp",
+ "i2s_d1_mfp",
"i2s1_dummy",
- "spi0_i2c_pcm",
- "uart0_rx",
- "uart0_tx",
+ "spi0_i2c_pcm_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
};
static const char * const pcm1_groups[] = {
- "i2s_pcm1",
- "spi0_i2c_pcm",
- "uart0_rx",
- "uart0_tx",
- "pcm1_in",
- "pcm1_clk",
- "pcm1_sync",
- "pcm1_out",
+ "i2s_pcm1_mfp",
+ "spi0_i2c_pcm_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
};
static const char * const pcm0_groups[] = {
- "i2s0_pcm0",
- "i2s1_pcm0",
- "uart2_rx_tx",
- "spi0_i2c_pcm",
+ "i2s0_pcm0_mfp",
+ "i2s1_pcm0_mfp",
+ "uart2_rx_tx_mfp",
+ "spi0_i2c_pcm_mfp",
};
static const char * const ks_groups[] = {
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out0",
- "ks_out1",
- "ks_out2",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
};
static const char * const jtag_groups[] = {
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out1",
- "sd0_d0",
- "sd0_d2_d3",
- "sd0_cmd",
- "sd0_clk",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out1_mfp",
+ "sd0_d0_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
};
static const char * const pwm0_groups[] = {
- "rgmii_rxd2",
- "rgmii_txen",
- "ks_in2",
- "sen0_pclk",
+ "rgmii_rxd2_mfp",
+ "rgmii_txen_mfp",
+ "ks_in2_mfp",
+ "sen0_pclk_mfp",
};
static const char * const pwm1_groups[] = {
- "rgmii_rxen",
- "ks_in1",
- "ks_in3",
- "sens0_ckout",
+ "rgmii_rxen_mfp",
+ "ks_in1_mfp",
+ "ks_in3_mfp",
+ "sens0_ckout_mfp",
};
static const char * const pwm2_groups[] = {
- "lcd0_d18",
- "rgmii_rxd3",
- "rgmii_rxd1",
- "ks_out0",
- "ks_out2",
+ "lcd0_d18_mfp",
+ "rgmii_rxd3_mfp",
+ "rgmii_rxd1_mfp",
+ "ks_out0_mfp",
+ "ks_out2_mfp",
};
static const char * const pwm3_groups[] = {
- "rgmii_rxd0",
- "ks_out1",
- "lcd0_d2",
+ "rgmii_rxd0_mfp",
+ "ks_out1_mfp",
+ "lcd0_d2_mfp",
};
static const char * const pwm4_groups[] = {
- "lcd0_d18",
- "rgmii_txd01",
- "rgmii_txd0",
- "ks_in0",
- "pcm1_in",
- "nand_ceb3",
+ "lcd0_d18_mfp",
+ "rgmii_txd01_mfp",
+ "rgmii_txd0_mfp",
+ "ks_in0_mfp",
+ "pcm1_in_mfp",
+ "nand_ceb3_mfp",
};
static const char * const pwm5_groups[] = {
- "rgmii_txd1",
- "ks_in1",
- "pcm1_clk",
- "nand_ceb2",
+ "rgmii_txd1_mfp",
+ "ks_in1_mfp",
+ "pcm1_clk_mfp",
+ "nand_ceb2_mfp",
};
static const char * const p0_groups[] = {
- "ks_in2",
- "ks_in0",
+ "ks_in2_mfp",
+ "ks_in0_mfp",
};
static const char * const sd0_groups[] = {
- "ks_out0",
- "ks_out1",
- "ks_out2",
- "lcd0_d2",
- "dsi_dp3",
- "dsi_dp0",
- "sd0_d0",
- "sd0_d1",
- "sd0_d2_d3",
- "sd1_d0_d3",
- "sd0_cmd",
- "sd0_clk",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "lcd0_d2_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dp0_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd1_d0_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
};
static const char * const sd1_groups[] = {
- "dsi_dp2",
- "mfp1_16_14",
- "lcd0_d2",
- "mfp1_16_14_d17",
- "dsi_dp3",
- "dsi_dn3",
- "dsi_dnp1_cp_d2",
- "dsi_dnp1_cp_d17",
- "dsi_dn2",
- "sd1_d0_d3",
- "sd1_cmd",
+ "dsi_dp2_mfp",
+ "mfp1_16_14_mfp",
+ "lcd0_d2_mfp",
+ "mfp1_16_14_d17_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "dsi_dnp1_cp_d2_mfp",
+ "dsi_dnp1_cp_d17_mfp",
+ "dsi_dn2_mfp",
+ "sd1_d0_d3_mfp",
+ "sd1_cmd_mfp",
"sd1_dummy",
};
static const char * const sd2_groups[] = {
- "dnand_data_wr",
+ "dnand_data_wr_mfp",
};
static const char * const i2c0_groups[] = {
- "uart0_rx",
- "uart0_tx",
- "i2c0_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "i2c0_mfp_mfp",
};
static const char * const i2c1_groups[] = {
@@ -1448,85 +1448,85 @@ static const char * const i2c2_groups[] = {
};
static const char * const i2c3_groups[] = {
- "uart2_rx_tx",
- "pcm1_sync",
- "pcm1_out",
+ "uart2_rx_tx_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
};
static const char * const lvds_groups[] = {
- "lvds_o_pn",
- "lvds_ee_pn",
- "lvds_e_pn",
+ "lvds_o_pn_mfp",
+ "lvds_ee_pn_mfp",
+ "lvds_e_pn_mfp",
};
static const char * const bt_groups[] = {
- "i2s_pcm1",
- "i2s0_pcm0",
- "i2s1_pcm0",
- "ks_in2",
- "ks_in1",
- "ks_in0",
- "ks_in3",
- "ks_out0",
- "ks_out1",
- "ks_out2",
- "lvds_o_pn",
- "lvds_ee_pn",
- "pcm1_in",
- "pcm1_clk",
- "pcm1_sync",
- "pcm1_out",
+ "i2s_pcm1_mfp",
+ "i2s0_pcm0_mfp",
+ "i2s1_pcm0_mfp",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "lvds_o_pn_mfp",
+ "lvds_ee_pn_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
};
static const char * const lcd0_groups[] = {
- "lcd0_d18",
- "lcd0_d2",
- "mfp1_16_14_d17",
- "lvds_o_pn",
- "dsi_dp3",
- "dsi_dn3",
- "lvds_ee_pn",
- "dsi_dnp1_cp_d2",
- "dsi_dnp1_cp_d17",
- "lvds_e_pn",
+ "lcd0_d18_mfp",
+ "lcd0_d2_mfp",
+ "mfp1_16_14_d17_mfp",
+ "lvds_o_pn_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "lvds_ee_pn_mfp",
+ "dsi_dnp1_cp_d2_mfp",
+ "dsi_dnp1_cp_d17_mfp",
+ "lvds_e_pn_mfp",
};
static const char * const usb30_groups[] = {
- "ks_in1",
+ "ks_in1_mfp",
};
static const char * const clko_25m_groups[] = {
- "clko_25m",
+ "clko_25m_mfp",
};
static const char * const mipi_csi_groups[] = {
- "csi_cn_cp",
- "csi_dn_dp",
+ "csi_cn_cp_mfp",
+ "csi_dn_dp_mfp",
};
static const char * const dsi_groups[] = {
- "dsi_dn0",
- "dsi_dp2",
- "dsi_dp3",
- "dsi_dn3",
- "dsi_dp0",
- "dsi_dnp1_cp_d2",
- "dsi_dnp1_cp_d17",
- "dsi_dn2",
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "dsi_dp0_mfp",
+ "dsi_dnp1_cp_d2_mfp",
+ "dsi_dnp1_cp_d17_mfp",
+ "dsi_dn2_mfp",
"dsi_dummy",
};
static const char * const nand_groups[] = {
- "dnand_data_wr",
- "dnand_acle_ce0",
- "nand_ceb2",
- "nand_ceb3",
+ "dnand_data_wr_mfp",
+ "dnand_acle_ce0_mfp",
+ "nand_ceb2_mfp",
+ "nand_ceb3_mfp",
"nand_dummy",
};
static const char * const spdif_groups[] = {
- "uart0_tx",
+ "uart0_tx_mfp",
};
static const char * const sirq0_groups[] = {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0de1a3a96984..06bd2b70af3c 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -37,12 +37,10 @@
#define MODULE_NAME "pinctrl-bcm2835"
#define BCM2835_NUM_GPIOS 54
+#define BCM2711_NUM_GPIOS 58
#define BCM2835_NUM_BANKS 2
#define BCM2835_NUM_IRQS 3
-#define BCM2835_PIN_BITMAP_SZ \
- DIV_ROUND_UP(BCM2835_NUM_GPIOS, sizeof(unsigned long) * 8)
-
/* GPIO register offsets */
#define GPFSEL0 0x0 /* Function Select */
#define GPSET0 0x1c /* Pin Output Set */
@@ -81,10 +79,11 @@ struct bcm2835_pinctrl {
/* note: locking assumes each bank will have its own unsigned long */
unsigned long enabled_irq_map[BCM2835_NUM_BANKS];
- unsigned int irq_type[BCM2835_NUM_GPIOS];
+ unsigned int irq_type[BCM2711_NUM_GPIOS];
struct pinctrl_dev *pctl_dev;
struct gpio_chip gpio_chip;
+ struct pinctrl_desc pctl_desc;
struct pinctrl_gpio_range gpio_range;
raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
@@ -147,6 +146,10 @@ static struct pinctrl_pin_desc bcm2835_gpio_pins[] = {
BCM2835_GPIO_PIN(51),
BCM2835_GPIO_PIN(52),
BCM2835_GPIO_PIN(53),
+ BCM2835_GPIO_PIN(54),
+ BCM2835_GPIO_PIN(55),
+ BCM2835_GPIO_PIN(56),
+ BCM2835_GPIO_PIN(57),
};
/* one pin per group */
@@ -205,6 +208,10 @@ static const char * const bcm2835_gpio_groups[] = {
"gpio51",
"gpio52",
"gpio53",
+ "gpio54",
+ "gpio55",
+ "gpio56",
+ "gpio57",
};
enum bcm2835_fsel {
@@ -322,7 +329,10 @@ static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offse
if (fsel > BCM2835_FSEL_GPIO_OUT)
return -EINVAL;
- return (fsel == BCM2835_FSEL_GPIO_IN);
+ if (fsel == BCM2835_FSEL_GPIO_IN)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -355,6 +365,22 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.can_sleep = false,
};
+static const struct gpio_chip bcm2711_gpio_chip = {
+ .label = "pinctrl-bcm2711",
+ .owner = THIS_MODULE,
+ .request = gpiochip_generic_request,
+ .free = gpiochip_generic_free,
+ .direction_input = bcm2835_gpio_direction_input,
+ .direction_output = bcm2835_gpio_direction_output,
+ .get_direction = bcm2835_gpio_get_direction,
+ .get = bcm2835_gpio_get,
+ .set = bcm2835_gpio_set,
+ .set_config = gpiochip_generic_config,
+ .base = -1,
+ .ngpio = BCM2711_NUM_GPIOS,
+ .can_sleep = false,
+};
+
static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
unsigned int bank, u32 mask)
{
@@ -401,7 +427,7 @@ static void bcm2835_gpio_irq_handler(struct irq_desc *desc)
bcm2835_gpio_irq_handle_bank(pc, 0, 0xf0000000);
bcm2835_gpio_irq_handle_bank(pc, 1, 0x00003fff);
break;
- case 2: /* IRQ2 covers GPIOs 46-53 */
+ case 2: /* IRQ2 covers GPIOs 46-57 */
bcm2835_gpio_irq_handle_bank(pc, 1, 0x003fc000);
break;
}
@@ -620,7 +646,7 @@ static struct irq_chip bcm2835_gpio_irq_chip = {
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
{
- return ARRAY_SIZE(bcm2835_gpio_groups);
+ return BCM2835_NUM_GPIOS;
}
static const char *bcm2835_pctl_get_group_name(struct pinctrl_dev *pctldev,
@@ -778,7 +804,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
err = of_property_read_u32_index(np, "brcm,pins", i, &pin);
if (err)
goto out;
- if (pin >= ARRAY_SIZE(bcm2835_gpio_pins)) {
+ if (pin >= pc->pctl_desc.npins) {
dev_err(pc->dev, "%pOF: invalid brcm,pins value %d\n",
np, pin);
err = -EINVAL;
@@ -854,7 +880,7 @@ static int bcm2835_pmx_get_function_groups(struct pinctrl_dev *pctldev,
{
/* every pin can do every function */
*groups = bcm2835_gpio_groups;
- *num_groups = ARRAY_SIZE(bcm2835_gpio_groups);
+ *num_groups = BCM2835_NUM_GPIOS;
return 0;
}
@@ -1054,29 +1080,62 @@ static const struct pinconf_ops bcm2711_pinconf_ops = {
.pin_config_set = bcm2711_pinconf_set,
};
-static struct pinctrl_desc bcm2835_pinctrl_desc = {
+static const struct pinctrl_desc bcm2835_pinctrl_desc = {
.name = MODULE_NAME,
.pins = bcm2835_gpio_pins,
- .npins = ARRAY_SIZE(bcm2835_gpio_pins),
+ .npins = BCM2835_NUM_GPIOS,
.pctlops = &bcm2835_pctl_ops,
.pmxops = &bcm2835_pmx_ops,
.confops = &bcm2835_pinconf_ops,
.owner = THIS_MODULE,
};
-static struct pinctrl_gpio_range bcm2835_pinctrl_gpio_range = {
+static const struct pinctrl_desc bcm2711_pinctrl_desc = {
+ .name = "pinctrl-bcm2711",
+ .pins = bcm2835_gpio_pins,
+ .npins = BCM2711_NUM_GPIOS,
+ .pctlops = &bcm2835_pctl_ops,
+ .pmxops = &bcm2835_pmx_ops,
+ .confops = &bcm2711_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct pinctrl_gpio_range bcm2835_pinctrl_gpio_range = {
.name = MODULE_NAME,
.npins = BCM2835_NUM_GPIOS,
};
+static const struct pinctrl_gpio_range bcm2711_pinctrl_gpio_range = {
+ .name = "pinctrl-bcm2711",
+ .npins = BCM2711_NUM_GPIOS,
+};
+
+struct bcm_plat_data {
+ const struct gpio_chip *gpio_chip;
+ const struct pinctrl_desc *pctl_desc;
+ const struct pinctrl_gpio_range *gpio_range;
+};
+
+static const struct bcm_plat_data bcm2835_plat_data = {
+ .gpio_chip = &bcm2835_gpio_chip,
+ .pctl_desc = &bcm2835_pinctrl_desc,
+ .gpio_range = &bcm2835_pinctrl_gpio_range,
+};
+
+static const struct bcm_plat_data bcm2711_plat_data = {
+ .gpio_chip = &bcm2711_gpio_chip,
+ .pctl_desc = &bcm2711_pinctrl_desc,
+ .gpio_range = &bcm2711_pinctrl_gpio_range,
+};
+
static const struct of_device_id bcm2835_pinctrl_match[] = {
{
.compatible = "brcm,bcm2835-gpio",
- .data = &bcm2835_pinconf_ops,
+ .data = &bcm2835_plat_data,
},
{
.compatible = "brcm,bcm2711-gpio",
- .data = &bcm2711_pinconf_ops,
+ .data = &bcm2711_plat_data,
},
{}
};
@@ -1085,14 +1144,15 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ const struct bcm_plat_data *pdata;
struct bcm2835_pinctrl *pc;
struct gpio_irq_chip *girq;
struct resource iomem;
int err, i;
const struct of_device_id *match;
- BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_pins) != BCM2835_NUM_GPIOS);
- BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_groups) != BCM2835_NUM_GPIOS);
+ BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_pins) != BCM2711_NUM_GPIOS);
+ BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_groups) != BCM2711_NUM_GPIOS);
pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
@@ -1111,7 +1171,13 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(pc->base))
return PTR_ERR(pc->base);
- pc->gpio_chip = bcm2835_gpio_chip;
+ match = of_match_node(bcm2835_pinctrl_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+
+ pdata = match->data;
+
+ pc->gpio_chip = *pdata->gpio_chip;
pc->gpio_chip.parent = dev;
pc->gpio_chip.of_node = np;
@@ -1162,19 +1228,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
return err;
}
- match = of_match_node(bcm2835_pinctrl_match, pdev->dev.of_node);
- if (match) {
- bcm2835_pinctrl_desc.confops =
- (const struct pinconf_ops *)match->data;
- }
-
- pc->pctl_dev = devm_pinctrl_register(dev, &bcm2835_pinctrl_desc, pc);
+ pc->pctl_desc = *pdata->pctl_desc;
+ pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
if (IS_ERR(pc->pctl_dev)) {
gpiochip_remove(&pc->gpio_chip);
return PTR_ERR(pc->pctl_dev);
}
- pc->gpio_range = bcm2835_pinctrl_gpio_range;
+ pc->gpio_range = *pdata->gpio_range;
pc->gpio_range.base = pc->gpio_chip.base;
pc->gpio_range.gc = &pc->gpio_chip;
pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 25166217c3e0..a38f0d5f47ce 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -363,7 +363,10 @@ static int iproc_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
unsigned int offset = IPROC_GPIO_REG(gpio, IPROC_GPIO_OUT_EN_OFFSET);
unsigned int shift = IPROC_GPIO_SHIFT(gpio);
- return !(readl(chip->base + offset) & BIT(shift));
+ if (readl(chip->base + offset) & BIT(shift))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index f23c55e22195..821242bb4b16 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -176,6 +176,7 @@ const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin)
return desc->name;
}
+EXPORT_SYMBOL_GPL(pin_get_name);
/* Deletes a range of pin descriptors */
static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 1ed20ac2243f..c6fe7d64c913 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -103,6 +103,7 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
{
return get_pinctrl_dev_from_of_node(np);
}
+EXPORT_SYMBOL_GPL(of_pinctrl_get);
static int dt_to_map_one_config(struct pinctrl *p,
struct pinctrl_dev *hog_pctldev,
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index de775a85a51e..c784663b00ad 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -125,28 +125,28 @@ config PINCTRL_IMX7ULP
config PINCTRL_IMX8MM
bool "IMX8MM pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8mm pinctrl driver
config PINCTRL_IMX8MN
bool "IMX8MN pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8mn pinctrl driver
config PINCTRL_IMX8MP
bool "IMX8MP pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8mp pinctrl driver
config PINCTRL_IMX8MQ
bool "IMX8MQ pinctrl driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8mq pinctrl driver
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6765.c b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
index 32451e8693be..905dae8c3fd8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6765.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
@@ -1070,15 +1070,12 @@ static const struct mtk_pin_soc mt6765_data = {
.ngrps = ARRAY_SIZE(mtk_pins_mt6765),
.eint_hw = &mt6765_eint_hw,
.gpio_m = 0,
- .ies_present = true,
.base_names = mt6765_pinctrl_register_base_names,
.nbase_names = ARRAY_SIZE(mt6765_pinctrl_register_base_names),
- .bias_disable_set = mtk_pinconf_bias_disable_set,
- .bias_disable_get = mtk_pinconf_bias_disable_get,
- .bias_set = mtk_pinconf_bias_set,
- .bias_get = mtk_pinconf_bias_get,
- .drive_set = mtk_pinconf_drive_set_rev1,
- .drive_get = mtk_pinconf_drive_get_rev1,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_raw,
+ .drive_get = mtk_pinconf_drive_get_raw,
.adv_pull_get = mtk_pinconf_adv_pull_get,
.adv_pull_set = mtk_pinconf_adv_pull_set,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8183.c b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
index 9a74d5025be6..60318339b618 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8183.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
@@ -554,13 +554,10 @@ static const struct mtk_pin_soc mt8183_data = {
.ngrps = ARRAY_SIZE(mtk_pins_mt8183),
.eint_hw = &mt8183_eint_hw,
.gpio_m = 0,
- .ies_present = true,
.base_names = mt8183_pinctrl_register_base_names,
.nbase_names = ARRAY_SIZE(mt8183_pinctrl_register_base_names),
- .bias_disable_set = mtk_pinconf_bias_disable_set_rev1,
- .bias_disable_get = mtk_pinconf_bias_disable_get_rev1,
- .bias_set = mtk_pinconf_bias_set_rev1,
- .bias_get = mtk_pinconf_bias_get_rev1,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
.drive_set = mtk_pinconf_drive_set_rev1,
.drive_get = mtk_pinconf_drive_get_rev1,
.adv_pull_get = mtk_pinconf_adv_pull_get,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 20e1c890e73b..d3169a87e1b3 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -6,6 +6,7 @@
*
*/
+#include <dt-bindings/pinctrl/mt65xx.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
@@ -66,34 +67,44 @@ static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
int field, struct mtk_pin_field *pfd)
{
- const struct mtk_pin_field_calc *c, *e;
+ const struct mtk_pin_field_calc *c;
const struct mtk_pin_reg_calc *rc;
+ int start = 0, end, check;
+ bool found = false;
u32 bits;
if (hw->soc->reg_cal && hw->soc->reg_cal[field].range) {
rc = &hw->soc->reg_cal[field];
} else {
dev_dbg(hw->dev,
- "Not support field %d for pin %d (%s)\n",
- field, desc->number, desc->name);
+ "Not support field %d for this soc\n", field);
return -ENOTSUPP;
}
- c = rc->range;
- e = c + rc->nranges;
+ end = rc->nranges - 1;
- while (c < e) {
- if (desc->number >= c->s_pin && desc->number <= c->e_pin)
+ while (start <= end) {
+ check = (start + end) >> 1;
+ if (desc->number >= rc->range[check].s_pin
+ && desc->number <= rc->range[check].e_pin) {
+ found = true;
+ break;
+ } else if (start == end)
break;
- c++;
+ else if (desc->number < rc->range[check].s_pin)
+ end = check - 1;
+ else
+ start = check + 1;
}
- if (c >= e) {
+ if (!found) {
dev_dbg(hw->dev, "Not support field %d for pin = %d (%s)\n",
field, desc->number, desc->name);
return -ENOTSUPP;
}
+ c = rc->range + check;
+
if (c->i_base > hw->nbase - 1) {
dev_err(hw->dev,
"Invalid base for field %d for pin = %d (%s)\n",
@@ -182,6 +193,9 @@ int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
if (err)
return err;
+ if (value < 0 || value > pf.mask)
+ return -EINVAL;
+
if (!pf.next)
mtk_rmw(hw, pf.index, pf.offset, pf.mask << pf.bitpos,
(value & pf.mask) << pf.bitpos);
@@ -502,6 +516,226 @@ int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+/* Combo for the following pull register type:
+ * 1. PU + PD
+ * 2. PULLSEL + PULLEN
+ * 3. PUPD + R0 + R1
+ */
+static int mtk_pinconf_bias_set_pu_pd(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ int err, pu, pd;
+
+ if (arg == MTK_DISABLE) {
+ pu = 0;
+ pd = 0;
+ } else if ((arg == MTK_ENABLE) && pullup) {
+ pu = 1;
+ pd = 0;
+ } else if ((arg == MTK_ENABLE) && !pullup) {
+ pu = 0;
+ pd = 1;
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, pu);
+ if (err)
+ goto out;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD, pd);
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_set_pullsel_pullen(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ int err, enable;
+
+ if (arg == MTK_DISABLE)
+ enable = 0;
+ else if (arg == MTK_ENABLE)
+ enable = 1;
+ else {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN, enable);
+ if (err)
+ goto out;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, pullup);
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_set_pupd_r1_r0(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ int err, r0, r1;
+
+ if ((arg == MTK_DISABLE) || (arg == MTK_PUPD_SET_R1R0_00)) {
+ pullup = 0;
+ r0 = 0;
+ r1 = 0;
+ } else if (arg == MTK_PUPD_SET_R1R0_01) {
+ r0 = 1;
+ r1 = 0;
+ } else if (arg == MTK_PUPD_SET_R1R0_10) {
+ r0 = 0;
+ r1 = 1;
+ } else if (arg == MTK_PUPD_SET_R1R0_11) {
+ r0 = 1;
+ r1 = 1;
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* MTK HW PUPD bit: 1 for pull-down, 0 for pull-up */
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PUPD, !pullup);
+ if (err)
+ goto out;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R0, r0);
+ if (err)
+ goto out;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R1, r1);
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_get_pu_pd(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err, pu, pd;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PU, &pu);
+ if (err)
+ goto out;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &pd);
+ if (err)
+ goto out;
+
+ if (pu == 0 && pd == 0) {
+ *pullup = 0;
+ *enable = MTK_DISABLE;
+ } else if (pu == 1 && pd == 0) {
+ *pullup = 1;
+ *enable = MTK_ENABLE;
+ } else if (pu == 0 && pd == 1) {
+ *pullup = 0;
+ *enable = MTK_ENABLE;
+ } else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_get_pullsel_pullen(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, pullup);
+ if (err)
+ goto out;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, enable);
+
+out:
+ return err;
+}
+
+static int mtk_pinconf_bias_get_pupd_r1_r0(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err, r0, r1;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PUPD, pullup);
+ if (err)
+ goto out;
+ /* MTK HW PUPD bit: 1 for pull-down, 0 for pull-up */
+ *pullup = !(*pullup);
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R0, &r0);
+ if (err)
+ goto out;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R1, &r1);
+ if (err)
+ goto out;
+
+ if ((r1 == 0) && (r0 == 0))
+ *enable = MTK_PUPD_SET_R1R0_00;
+ else if ((r1 == 0) && (r0 == 1))
+ *enable = MTK_PUPD_SET_R1R0_01;
+ else if ((r1 == 1) && (r0 == 0))
+ *enable = MTK_PUPD_SET_R1R0_10;
+ else if ((r1 == 1) && (r0 == 1))
+ *enable = MTK_PUPD_SET_R1R0_11;
+ else
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
+int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ int err;
+
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
+ if (!err)
+ goto out;
+
+ err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc, pullup, arg);
+ if (!err)
+ goto out;
+
+ err = mtk_pinconf_bias_set_pupd_r1_r0(hw, desc, pullup, arg);
+
+out:
+ return err;
+}
+
+int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
+{
+ int err;
+
+ err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
+ if (!err)
+ goto out;
+
+ err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc, pullup, enable);
+ if (!err)
+ goto out;
+
+ err = mtk_pinconf_bias_get_pupd_r1_r0(hw, desc, pullup, enable);
+
+out:
+ return err;
+}
+
/* Revision 0 */
int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg)
@@ -593,6 +827,18 @@ int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+int mtk_pinconf_drive_set_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg)
+{
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV, arg);
+}
+
+int mtk_pinconf_drive_get_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val)
+{
+ return mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV, val);
+}
+
int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
u32 arg)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 1b7da42aa1d5..27df08736396 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -216,6 +216,11 @@ struct mtk_pin_soc {
int (*bias_get)(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup, int *res);
+ int (*bias_set_combo)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 pullup, u32 arg);
+ int (*bias_get_combo)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 *pullup, u32 *arg);
+
int (*drive_set)(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg);
int (*drive_get)(struct mtk_pinctrl *hw,
@@ -277,6 +282,12 @@ int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
int *res);
+int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 enable);
+int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable);
int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg);
@@ -288,6 +299,11 @@ int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *val);
+int mtk_pinconf_drive_set_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_drive_get_raw(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val);
+
int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
u32 arg);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 67f8444f7a0c..a02ad10ec6fa 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -804,7 +804,10 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
pctl->devdata->spec_dir_set(&reg_addr, offset);
regmap_read(pctl->regmap1, reg_addr, &read_val);
- return !(read_val & bit);
+ if (read_val & bit)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 923264d0e9ef..3853ec3a2a8e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -78,93 +78,88 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
{
struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
u32 param = pinconf_to_config_param(*config);
- int val, val2, err, reg, ret = 1;
+ int pullup, err, reg, ret = 1;
const struct mtk_pin_desc *desc;
+ if (pin >= hw->soc->npins) {
+ err = -EINVAL;
+ goto out;
+ }
desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- if (hw->soc->bias_disable_get) {
- err = hw->soc->bias_disable_get(hw, desc, &ret);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
- break;
case PIN_CONFIG_BIAS_PULL_UP:
- if (hw->soc->bias_get) {
- err = hw->soc->bias_get(hw, desc, 1, &ret);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
- break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- if (hw->soc->bias_get) {
- err = hw->soc->bias_get(hw, desc, 0, &ret);
+ if (hw->soc->bias_get_combo) {
+ err = hw->soc->bias_get_combo(hw, desc, &pullup, &ret);
if (err)
- return err;
+ goto out;
+ if (param == PIN_CONFIG_BIAS_DISABLE) {
+ if (ret == MTK_PUPD_SET_R1R0_00)
+ ret = MTK_DISABLE;
+ } else if (param == PIN_CONFIG_BIAS_PULL_UP) {
+ /* When desire to get pull-up value, return
+ * error if current setting is pull-down
+ */
+ if (!pullup)
+ err = -EINVAL;
+ } else if (param == PIN_CONFIG_BIAS_PULL_DOWN) {
+ /* When desire to get pull-down value, return
+ * error if current setting is pull-up
+ */
+ if (pullup)
+ err = -EINVAL;
+ }
} else {
- return -ENOTSUPP;
+ err = -ENOTSUPP;
}
break;
case PIN_CONFIG_SLEW_RATE:
- err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &val);
- if (err)
- return err;
-
- if (!val)
- return -EINVAL;
-
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
break;
case PIN_CONFIG_INPUT_ENABLE:
case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
- return err;
-
- /* HW takes input mode as zero; output mode as non-zero */
- if ((val && param == PIN_CONFIG_INPUT_ENABLE) ||
- (!val && param == PIN_CONFIG_OUTPUT_ENABLE))
- return -EINVAL;
+ goto out;
+ /* CONFIG Current direction return value
+ * ------------- ----------------- ----------------------
+ * OUTPUT_ENABLE output 1 (= HW value)
+ * input 0 (= HW value)
+ * INPUT_ENABLE output 0 (= reverse HW value)
+ * input 1 (= reverse HW value)
+ */
+ if (param == PIN_CONFIG_INPUT_ENABLE)
+ ret = !ret;
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
- return err;
-
- err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &val2);
- if (err)
- return err;
+ goto out;
+ /* return error when in output mode
+ * because schmitt trigger only work in input mode
+ */
+ if (ret) {
+ err = -EINVAL;
+ goto out;
+ }
- if (val || !val2)
- return -EINVAL;
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- if (hw->soc->drive_get) {
+ if (hw->soc->drive_get)
err = hw->soc->drive_get(hw, desc, &ret);
- if (err)
- return err;
- } else {
+ else
err = -ENOTSUPP;
- }
break;
case MTK_PIN_CONFIG_TDSEL:
case MTK_PIN_CONFIG_RDSEL:
reg = (param == MTK_PIN_CONFIG_TDSEL) ?
PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
-
- err = mtk_hw_get_value(hw, desc, reg, &val);
- if (err)
- return err;
-
- ret = val;
-
+ err = mtk_hw_get_value(hw, desc, reg, &ret);
break;
case MTK_PIN_CONFIG_PU_ADV:
case MTK_PIN_CONFIG_PD_ADV:
@@ -173,28 +168,24 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
pullup = param == MTK_PIN_CONFIG_PU_ADV;
err = hw->soc->adv_pull_get(hw, desc, pullup, &ret);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ } else
+ err = -ENOTSUPP;
break;
case MTK_PIN_CONFIG_DRV_ADV:
- if (hw->soc->adv_drive_get) {
+ if (hw->soc->adv_drive_get)
err = hw->soc->adv_drive_get(hw, desc, &ret);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ else
+ err = -ENOTSUPP;
break;
default:
- return -ENOTSUPP;
+ err = -ENOTSUPP;
}
- *config = pinconf_to_config_packed(param, ret);
+out:
+ if (!err)
+ *config = pinconf_to_config_packed(param, ret);
- return 0;
+ return err;
}
static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
@@ -206,64 +197,55 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
int err = 0;
u32 reg;
+ if (pin >= hw->soc->npins) {
+ err = -EINVAL;
+ goto err;
+ }
desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
switch ((u32)param) {
case PIN_CONFIG_BIAS_DISABLE:
- if (hw->soc->bias_disable_set) {
- err = hw->soc->bias_disable_set(hw, desc);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ if (hw->soc->bias_set_combo)
+ err = hw->soc->bias_set_combo(hw, desc, 0, MTK_DISABLE);
+ else
+ err = -ENOTSUPP;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- if (hw->soc->bias_set) {
- err = hw->soc->bias_set(hw, desc, 1);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ if (hw->soc->bias_set_combo)
+ err = hw->soc->bias_set_combo(hw, desc, 1, arg);
+ else
+ err = -ENOTSUPP;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- if (hw->soc->bias_set) {
- err = hw->soc->bias_set(hw, desc, 0);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ if (hw->soc->bias_set_combo)
+ err = hw->soc->bias_set_combo(hw, desc, 0, arg);
+ else
+ err = -ENOTSUPP;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
MTK_DISABLE);
- if (err)
+ /* Keep set direction to consider the case that a GPIO pin
+ * does not have SMT control
+ */
+ if (err != -ENOTSUPP)
goto err;
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
MTK_OUTPUT);
- if (err)
- goto err;
break;
case PIN_CONFIG_INPUT_ENABLE:
- if (hw->soc->ies_present) {
- mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES,
- MTK_ENABLE);
- }
+ /* regard all non-zero value as enable */
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
+ if (err)
+ goto err;
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
MTK_INPUT);
- if (err)
- goto err;
break;
case PIN_CONFIG_SLEW_RATE:
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR,
- arg);
- if (err)
- goto err;
-
+ /* regard all non-zero value as enable */
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR, !!arg);
break;
case PIN_CONFIG_OUTPUT:
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
@@ -273,41 +255,29 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
arg);
- if (err)
- goto err;
break;
+ case PIN_CONFIG_INPUT_SCHMITT:
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
/* arg = 1: Input mode & SMT enable ;
* arg = 0: Output mode & SMT disable
*/
- arg = arg ? 2 : 1;
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- arg & 1);
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, !arg);
if (err)
goto err;
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
- !!(arg & 2));
- if (err)
- goto err;
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT, !!arg);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- if (hw->soc->drive_set) {
+ if (hw->soc->drive_set)
err = hw->soc->drive_set(hw, desc, arg);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ else
+ err = -ENOTSUPP;
break;
case MTK_PIN_CONFIG_TDSEL:
case MTK_PIN_CONFIG_RDSEL:
reg = (param == MTK_PIN_CONFIG_TDSEL) ?
PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
-
err = mtk_hw_set_value(hw, desc, reg, arg);
- if (err)
- goto err;
break;
case MTK_PIN_CONFIG_PU_ADV:
case MTK_PIN_CONFIG_PD_ADV:
@@ -317,20 +287,14 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
pullup = param == MTK_PIN_CONFIG_PU_ADV;
err = hw->soc->adv_pull_set(hw, desc, pullup,
arg);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ } else
+ err = -ENOTSUPP;
break;
case MTK_PIN_CONFIG_DRV_ADV:
- if (hw->soc->adv_drive_set) {
+ if (hw->soc->adv_drive_set)
err = hw->soc->adv_drive_set(hw, desc, arg);
- if (err)
- return err;
- } else {
- return -ENOTSUPP;
- }
+ else
+ err = -ENOTSUPP;
break;
default:
err = -ENOTSUPP;
@@ -575,12 +539,120 @@ static int mtk_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
+static int mtk_hw_get_value_wrap(struct mtk_pinctrl *hw, unsigned int gpio, int field)
+{
+ const struct mtk_pin_desc *desc;
+ int value, err;
+
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+ err = mtk_hw_get_value(hw, desc, field, &value);
+ if (err)
+ return err;
+
+ return value;
+}
+
+#define mtk_pctrl_get_pinmux(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_MODE)
+
+#define mtk_pctrl_get_direction(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_DIR)
+
+#define mtk_pctrl_get_out(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_DO)
+
+#define mtk_pctrl_get_in(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_DI)
+
+#define mtk_pctrl_get_smt(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_SMT)
+
+#define mtk_pctrl_get_ies(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_IES)
+
+#define mtk_pctrl_get_driving(hw, gpio) \
+ mtk_hw_get_value_wrap(hw, gpio, PINCTRL_PIN_REG_DRV)
+
+ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
+ unsigned int gpio, char *buf, unsigned int bufLen)
+{
+ int pinmux, pullup, pullen, len = 0, r1 = -1, r0 = -1;
+ const struct mtk_pin_desc *desc;
+
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+ pinmux = mtk_pctrl_get_pinmux(hw, gpio);
+ if (pinmux >= hw->soc->nfuncs)
+ pinmux -= hw->soc->nfuncs;
+
+ mtk_pinconf_bias_get_combo(hw, desc, &pullup, &pullen);
+ if (pullen == MTK_PUPD_SET_R1R0_00) {
+ pullen = 0;
+ r1 = 0;
+ r0 = 0;
+ } else if (pullen == MTK_PUPD_SET_R1R0_01) {
+ pullen = 1;
+ r1 = 0;
+ r0 = 1;
+ } else if (pullen == MTK_PUPD_SET_R1R0_10) {
+ pullen = 1;
+ r1 = 1;
+ r0 = 0;
+ } else if (pullen == MTK_PUPD_SET_R1R0_11) {
+ pullen = 1;
+ r1 = 1;
+ r0 = 1;
+ } else if (pullen != MTK_DISABLE && pullen != MTK_ENABLE) {
+ pullen = 0;
+ }
+ len += scnprintf(buf + len, bufLen - len,
+ "%03d: %1d%1d%1d%1d%02d%1d%1d%1d%1d",
+ gpio,
+ pinmux,
+ mtk_pctrl_get_direction(hw, gpio),
+ mtk_pctrl_get_out(hw, gpio),
+ mtk_pctrl_get_in(hw, gpio),
+ mtk_pctrl_get_driving(hw, gpio),
+ mtk_pctrl_get_smt(hw, gpio),
+ mtk_pctrl_get_ies(hw, gpio),
+ pullen,
+ pullup);
+
+ if (r1 != -1) {
+ len += scnprintf(buf + len, bufLen - len, " (%1d %1d)\n",
+ r1, r0);
+ } else {
+ len += scnprintf(buf + len, bufLen - len, "\n");
+ }
+
+ return len;
+}
+
+#define PIN_DBG_BUF_SZ 96
+static void mtk_pctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int gpio)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ char buf[PIN_DBG_BUF_SZ];
+
+ (void)mtk_pctrl_show_one_pin(hw, gpio, buf, PIN_DBG_BUF_SZ);
+
+ seq_printf(s, "%s", buf);
+}
+
static const struct pinctrl_ops mtk_pctlops = {
.dt_node_to_map = mtk_pctrl_dt_node_to_map,
.dt_free_map = pinctrl_utils_free_map,
.get_groups_count = mtk_pctrl_get_groups_count,
.get_group_name = mtk_pctrl_get_group_name,
.get_group_pins = mtk_pctrl_get_group_pins,
+ .pin_dbg_show = mtk_pctrl_dbg_show,
};
static int mtk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -677,6 +749,7 @@ static const struct pinconf_ops mtk_confops = {
.pin_config_get = mtk_pinconf_get,
.pin_config_group_get = mtk_pconf_group_get,
.pin_config_group_set = mtk_pconf_group_set,
+ .is_generic = true,
};
static struct pinctrl_desc mtk_desc = {
@@ -693,13 +766,19 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
const struct mtk_pin_desc *desc;
int value, err;
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &value);
if (err)
return err;
- return !value;
+ if (value)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
@@ -708,6 +787,9 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
const struct mtk_pin_desc *desc;
int value, err;
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
@@ -722,6 +804,9 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
struct mtk_pinctrl *hw = gpiochip_get_data(chip);
const struct mtk_pin_desc *desc;
+ if (gpio >= hw->soc->npins)
+ return;
+
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
@@ -729,12 +814,22 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
return pinctrl_gpio_direction_input(chip->base + gpio);
}
static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
int value)
{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+
+ if (gpio >= hw->soc->npins)
+ return -EINVAL;
+
mtk_gpio_set(chip, gpio, value);
return pinctrl_gpio_direction_output(chip->base + gpio);
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.h b/drivers/pinctrl/mediatek/pinctrl-paris.h
index 3d43771074e6..afb7650fd25b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.h
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.h
@@ -60,6 +60,9 @@
int mtk_paris_pinctrl_probe(struct platform_device *pdev,
const struct mtk_pin_soc *soc);
+ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
+ unsigned int gpio, char *buf, unsigned int bufLen);
+
extern const struct dev_pm_ops mtk_paris_pinctrl_pm_ops;
#endif /* __PINCTRL_PARIS_H */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 926b9997159a..d130c635f74b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -231,10 +231,24 @@ static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+static const unsigned int tsin_a_d_valid_pins[] = { GPIOY_0 };
+static const unsigned int tsin_a_sop_pins[] = { GPIOY_1 };
+static const unsigned int tsin_a_clk_pins[] = { GPIOY_2 };
+static const unsigned int tsin_a_d0_pins[] = { GPIOY_3 };
+static const unsigned int tsin_a_dp_pins[] = {
+ GPIOY_4, GPIOY_5, GPIOY_6, GPIOY_7, GPIOY_8, GPIOY_9, GPIOY_10
+};
+
+static const unsigned int tsin_a_fail_pins[] = { GPIOY_11 };
static const unsigned int i2s_out_ch23_y_pins[] = { GPIOY_8 };
static const unsigned int i2s_out_ch45_y_pins[] = { GPIOY_9 };
static const unsigned int i2s_out_ch67_y_pins[] = { GPIOY_10 };
+static const unsigned int tsin_b_d_valid_pins[] = { GPIOX_6 };
+static const unsigned int tsin_b_sop_pins[] = { GPIOX_7 };
+static const unsigned int tsin_b_clk_pins[] = { GPIOX_8 };
+static const unsigned int tsin_b_d0_pins[] = { GPIOX_9 };
+
static const unsigned int spdif_out_y_pins[] = { GPIOY_12 };
static const unsigned int gen_clk_out_pins[] = { GPIOY_15 };
@@ -437,12 +451,22 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(pwm_a_x, 3, 17),
GROUP(pwm_e, 2, 30),
GROUP(pwm_f_x, 3, 18),
+ GROUP(tsin_b_d_valid, 3, 9),
+ GROUP(tsin_b_sop, 3, 8),
+ GROUP(tsin_b_clk, 3, 10),
+ GROUP(tsin_b_d0, 3, 7),
/* Bank Y */
GROUP(uart_cts_c, 1, 17),
GROUP(uart_rts_c, 1, 16),
GROUP(uart_tx_c, 1, 19),
GROUP(uart_rx_c, 1, 18),
+ GROUP(tsin_a_fail, 3, 3),
+ GROUP(tsin_a_d_valid, 3, 2),
+ GROUP(tsin_a_sop, 3, 1),
+ GROUP(tsin_a_clk, 3, 0),
+ GROUP(tsin_a_d0, 3, 4),
+ GROUP(tsin_a_dp, 3, 5),
GROUP(pwm_a_y, 1, 21),
GROUP(pwm_f_y, 1, 20),
GROUP(i2s_out_ch23_y, 1, 5),
@@ -601,6 +625,15 @@ static const char * const gpio_periphs_groups[] = {
"GPIOX_20", "GPIOX_21", "GPIOX_22",
};
+static const char * const tsin_a_groups[] = {
+ "tsin_a_clk", "tsin_a_sop", "tsin_a_d_valid", "tsin_a_d0",
+ "tsin_a_dp", "tsin_a_fail",
+};
+
+static const char * const tsin_b_groups[] = {
+ "tsin_b_clk", "tsin_b_sop", "tsin_b_d_valid", "tsin_b_d0",
+};
+
static const char * const emmc_groups[] = {
"emmc_nand_d07", "emmc_clk", "emmc_cmd", "emmc_ds",
};
@@ -792,6 +825,8 @@ static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(i2s_out),
FUNCTION(spdif_out),
FUNCTION(gen_clk_out),
+ FUNCTION(tsin_a),
+ FUNCTION(tsin_b),
};
static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 2ac921c83da9..32552d795bb2 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -241,6 +241,17 @@ static const unsigned int tsin_a_dp_pins[] = {
GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5, GPIODV_6, GPIODV_7,
};
+static const unsigned int tsin_b_clk_pins[] = { GPIOH_6 };
+static const unsigned int tsin_b_d0_pins[] = { GPIOH_7 };
+static const unsigned int tsin_b_sop_pins[] = { GPIOH_8 };
+static const unsigned int tsin_b_d_valid_pins[] = { GPIOH_9 };
+
+static const unsigned int tsin_b_fail_z4_pins[] = { GPIOZ_4 };
+static const unsigned int tsin_b_clk_z3_pins[] = { GPIOZ_3 };
+static const unsigned int tsin_b_d0_z2_pins[] = { GPIOZ_2 };
+static const unsigned int tsin_b_sop_z1_pins[] = { GPIOZ_1 };
+static const unsigned int tsin_b_d_valid_z0_pins[] = { GPIOZ_0 };
+
static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
MESON_PIN(GPIOAO_0),
MESON_PIN(GPIOAO_1),
@@ -438,6 +449,11 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(eth_txd1, 4, 12),
GROUP(eth_txd2, 4, 11),
GROUP(eth_txd3, 4, 10),
+ GROUP(tsin_b_fail_z4, 3, 15),
+ GROUP(tsin_b_clk_z3, 3, 16),
+ GROUP(tsin_b_d0_z2, 3, 17),
+ GROUP(tsin_b_sop_z1, 3, 18),
+ GROUP(tsin_b_d_valid_z0, 3, 19),
GROUP(pwm_c, 3, 20),
GROUP(i2s_out_ch23_z, 3, 26),
GROUP(i2s_out_ch45_z, 3, 25),
@@ -454,6 +470,10 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(i2s_out_lr_clk, 6, 24),
GROUP(i2s_out_ch01, 6, 23),
GROUP(spdif_out_h, 6, 28),
+ GROUP(tsin_b_d0, 6, 17),
+ GROUP(tsin_b_sop, 6, 18),
+ GROUP(tsin_b_d_valid, 6, 19),
+ GROUP(tsin_b_clk, 6, 20),
/* Bank DV */
GROUP(uart_tx_b, 2, 16),
@@ -689,6 +709,12 @@ static const char * const tsin_a_groups[] = {
"tsin_a_dp", "tsin_a_fail",
};
+static const char * const tsin_b_groups[] = {
+ "tsin_b_clk", "tsin_b_sop", "tsin_b_d_valid", "tsin_b_d0",
+ "tsin_b_clk_z3", "tsin_b_sop_z1", "tsin_b_d_valid_z0", "tsin_b_d0_z2",
+ "tsin_b_fail_z4",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -764,6 +790,7 @@ static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(spdif_out),
FUNCTION(eth_led),
FUNCTION(tsin_a),
+ FUNCTION(tsin_b),
};
static struct meson_pmx_func meson_gxl_aobus_functions[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 243fba254175..5f125bd6279d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -402,7 +403,10 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
mask = BIT(offset);
regmap_read(info->regmap, reg, &val);
- return !(val & mask);
+ if (val & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
@@ -738,14 +742,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
return ret;
}
- nr_irq_parent = platform_irq_count(pdev);
- if (nr_irq_parent < 0) {
- if (nr_irq_parent != -EPROBE_DEFER)
- dev_err(dev, "Couldn't determine irq count: %pe\n",
- ERR_PTR(nr_irq_parent));
- return nr_irq_parent;
- }
-
+ nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
@@ -782,7 +779,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
if (!girq->parents)
return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
- int irq = platform_get_irq(pdev, i);
+ int irq = irq_of_parse_and_map(np, i);
if (irq < 0)
continue;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 95f864dfdef4..ca7bbe4164c0 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -831,11 +831,14 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
clk_enable(nmk_chip->clk);
- dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+ dir = readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset);
clk_disable(nmk_chip->clk);
- return dir;
+ if (dir)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 9eb86309c70b..dfef471201f6 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -286,6 +286,7 @@ out:
kfree(cfg);
return ret;
}
+EXPORT_SYMBOL_GPL(pinconf_generic_parse_dt_config);
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 73aff6591de2..1fe62a35bb12 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -46,7 +46,10 @@ static int amd_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
pin_reg = readl(gpio_dev->base + offset * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- return !(pin_reg & BIT(OUTPUT_ENABLE_OFF));
+ if (pin_reg & BIT(OUTPUT_ENABLE_OFF))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 207f266e9cf2..52386ad29f28 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1414,7 +1414,10 @@ static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
u32 osr;
osr = readl_relaxed(pio + PIO_OSR);
- return !(osr & mask);
+ if (osr & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int at91_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index be5b645815e5..207cbae3a7bf 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -149,13 +149,16 @@ static int axp20x_gpio_get_direction(struct gpio_chip *chip,
* going to change the value soon anyway. Default to output.
*/
if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
/*
* The GPIO directions are the three lowest values.
* 2 is input, 0 and 1 are output
*/
- return val & 2;
+ if (val & 2)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
new file mode 100644
index 000000000000..1c08579f0198
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dialog DA9062 pinctrl and GPIO driver.
+ * Based on DA9055 GPIO driver.
+ *
+ * TODO:
+ * - add pinmux and pinctrl support (gpio alternate mode)
+ *
+ * Documents:
+ * [1] https://www.dialog-semiconductor.com/sites/default/files/da9062_datasheet_3v6.pdf
+ *
+ * Copyright (C) 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ */
+#include <linux/bits.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/gpio/driver.h>
+
+#include <linux/mfd/da9062/core.h>
+#include <linux/mfd/da9062/registers.h>
+
+/*
+ * We need this get the gpio_desc from a <gpio_chip,offset> tuple to decide if
+ * the gpio is active low without a vendor specific dt-binding.
+ */
+#include "../gpio/gpiolib.h"
+
+#define DA9062_TYPE(offset) (4 * (offset % 2))
+#define DA9062_PIN_SHIFT(offset) (4 * (offset % 2))
+#define DA9062_PIN_ALTERNATE 0x00 /* gpio alternate mode */
+#define DA9062_PIN_GPI 0x01 /* gpio in */
+#define DA9062_PIN_GPO_OD 0x02 /* gpio out open-drain */
+#define DA9062_PIN_GPO_PP 0x03 /* gpio out push-pull */
+#define DA9062_GPIO_NUM 5
+
+struct da9062_pctl {
+ struct da9062 *da9062;
+ struct gpio_chip gc;
+ unsigned int pin_config[DA9062_GPIO_NUM];
+};
+
+static int da9062_pctl_get_pin_mode(struct da9062_pctl *pctl,
+ unsigned int offset)
+{
+ struct regmap *regmap = pctl->da9062->regmap;
+ int ret, val;
+
+ ret = regmap_read(regmap, DA9062AA_GPIO_0_1 + (offset >> 1), &val);
+ if (ret < 0)
+ return ret;
+
+ val >>= DA9062_PIN_SHIFT(offset);
+ val &= DA9062AA_GPIO0_PIN_MASK;
+
+ return val;
+}
+
+static int da9062_pctl_set_pin_mode(struct da9062_pctl *pctl,
+ unsigned int offset, unsigned int mode_req)
+{
+ struct regmap *regmap = pctl->da9062->regmap;
+ unsigned int mode = mode_req;
+ unsigned int mask;
+ int ret;
+
+ mode &= DA9062AA_GPIO0_PIN_MASK;
+ mode <<= DA9062_PIN_SHIFT(offset);
+ mask = DA9062AA_GPIO0_PIN_MASK << DA9062_PIN_SHIFT(offset);
+
+ ret = regmap_update_bits(regmap, DA9062AA_GPIO_0_1 + (offset >> 1),
+ mask, mode);
+ if (!ret)
+ pctl->pin_config[offset] = mode_req;
+
+ return ret;
+}
+
+static int da9062_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct regmap *regmap = pctl->da9062->regmap;
+ int gpio_mode, val;
+ int ret;
+
+ gpio_mode = da9062_pctl_get_pin_mode(pctl, offset);
+ if (gpio_mode < 0)
+ return gpio_mode;
+
+ switch (gpio_mode) {
+ case DA9062_PIN_ALTERNATE:
+ return -ENOTSUPP;
+ case DA9062_PIN_GPI:
+ ret = regmap_read(regmap, DA9062AA_STATUS_B, &val);
+ if (ret < 0)
+ return ret;
+ break;
+ case DA9062_PIN_GPO_OD:
+ case DA9062_PIN_GPO_PP:
+ ret = regmap_read(regmap, DA9062AA_GPIO_MODE0_4, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return !!(val & BIT(offset));
+}
+
+static void da9062_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct regmap *regmap = pctl->da9062->regmap;
+
+ regmap_update_bits(regmap, DA9062AA_GPIO_MODE0_4, BIT(offset),
+ value << offset);
+}
+
+static int da9062_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ int gpio_mode;
+
+ gpio_mode = da9062_pctl_get_pin_mode(pctl, offset);
+ if (gpio_mode < 0)
+ return gpio_mode;
+
+ switch (gpio_mode) {
+ case DA9062_PIN_ALTERNATE:
+ return -ENOTSUPP;
+ case DA9062_PIN_GPI:
+ return GPIO_LINE_DIRECTION_IN;
+ case DA9062_PIN_GPO_OD:
+ case DA9062_PIN_GPO_PP:
+ return GPIO_LINE_DIRECTION_OUT;
+ }
+
+ return -EINVAL;
+}
+
+static int da9062_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct regmap *regmap = pctl->da9062->regmap;
+ struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
+ unsigned int gpi_type;
+ int ret;
+
+ ret = da9062_pctl_set_pin_mode(pctl, offset, DA9062_PIN_GPI);
+ if (ret)
+ return ret;
+
+ /*
+ * If the gpio is active low we should set it in hw too. No worries
+ * about gpio_get() because we read and return the gpio-level. So the
+ * gpiolib active_low handling is still correct.
+ *
+ * 0 - active low, 1 - active high
+ */
+ gpi_type = !gpiod_is_active_low(desc);
+
+ return regmap_update_bits(regmap, DA9062AA_GPIO_0_1 + (offset >> 1),
+ DA9062AA_GPIO0_TYPE_MASK << DA9062_TYPE(offset),
+ gpi_type << DA9062_TYPE(offset));
+}
+
+static int da9062_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ unsigned int pin_config = pctl->pin_config[offset];
+ int ret;
+
+ ret = da9062_pctl_set_pin_mode(pctl, offset, pin_config);
+ if (ret)
+ return ret;
+
+ da9062_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int da9062_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct regmap *regmap = pctl->da9062->regmap;
+ int gpio_mode;
+
+ /*
+ * We need to meet the following restrictions [1, Figure 18]:
+ * - PIN_CONFIG_BIAS_PULL_DOWN -> only allowed if the pin is used as
+ * gpio input
+ * - PIN_CONFIG_BIAS_PULL_UP -> only allowed if the pin is used as
+ * gpio output open-drain.
+ */
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ return regmap_update_bits(regmap, DA9062AA_CONFIG_K,
+ BIT(offset), 0);
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ gpio_mode = da9062_pctl_get_pin_mode(pctl, offset);
+ if (gpio_mode < 0)
+ return -EINVAL;
+ else if (gpio_mode != DA9062_PIN_GPI)
+ return -ENOTSUPP;
+ return regmap_update_bits(regmap, DA9062AA_CONFIG_K,
+ BIT(offset), BIT(offset));
+ case PIN_CONFIG_BIAS_PULL_UP:
+ gpio_mode = da9062_pctl_get_pin_mode(pctl, offset);
+ if (gpio_mode < 0)
+ return -EINVAL;
+ else if (gpio_mode != DA9062_PIN_GPO_OD)
+ return -ENOTSUPP;
+ return regmap_update_bits(regmap, DA9062AA_CONFIG_K,
+ BIT(offset), BIT(offset));
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ return da9062_pctl_set_pin_mode(pctl, offset,
+ DA9062_PIN_GPO_OD);
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return da9062_pctl_set_pin_mode(pctl, offset,
+ DA9062_PIN_GPO_PP);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int da9062_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct da9062_pctl *pctl = gpiochip_get_data(gc);
+ struct da9062 *da9062 = pctl->da9062;
+
+ return regmap_irq_get_virq(da9062->regmap_irq,
+ DA9062_IRQ_GPI0 + offset);
+}
+
+static const struct gpio_chip reference_gc = {
+ .owner = THIS_MODULE,
+ .get = da9062_gpio_get,
+ .set = da9062_gpio_set,
+ .get_direction = da9062_gpio_get_direction,
+ .direction_input = da9062_gpio_direction_input,
+ .direction_output = da9062_gpio_direction_output,
+ .set_config = da9062_gpio_set_config,
+ .to_irq = da9062_gpio_to_irq,
+ .can_sleep = true,
+ .ngpio = DA9062_GPIO_NUM,
+ .base = -1,
+};
+
+static int da9062_pctl_probe(struct platform_device *pdev)
+{
+ struct device *parent = pdev->dev.parent;
+ struct da9062_pctl *pctl;
+ int i;
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ pctl->da9062 = dev_get_drvdata(parent);
+ if (!pctl->da9062)
+ return -EINVAL;
+
+ if (!device_property_present(parent, "gpio-controller"))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(pctl->pin_config); i++)
+ pctl->pin_config[i] = DA9062_PIN_GPO_PP;
+
+ /*
+ * Currently the driver handles only the GPIO support. The
+ * pinctrl/pinmux support can be added later if needed.
+ */
+ pctl->gc = reference_gc;
+ pctl->gc.label = dev_name(&pdev->dev);
+ pctl->gc.parent = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ pctl->gc.of_node = parent->of_node;
+#endif
+
+ platform_set_drvdata(pdev, pctl);
+
+ return devm_gpiochip_add_data(&pdev->dev, &pctl->gc, pctl);
+}
+
+static struct platform_driver da9062_pctl_driver = {
+ .probe = da9062_pctl_probe,
+ .driver = {
+ .name = "da9062-gpio",
+ },
+};
+module_platform_driver(da9062_pctl_driver);
+
+MODULE_AUTHOR("Marco Felsch <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("DA9062 PMIC pinctrl and GPIO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:da9062-gpio");
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 96f04d121ebd..e5dcf77fe43d 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
* Copyright (c) 2019 周ç°æ° (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ * Copyright (c) 2017, 2019 Paul Boddie <paul@boddie.org.uk>
*/
#include <linux/compiler.h>
@@ -900,6 +901,7 @@ static int jz4780_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, };
static int jz4780_i2c3_pins[] = { 0x6a, 0x6b, };
static int jz4780_i2c4_e_pins[] = { 0x8c, 0x8d, };
static int jz4780_i2c4_f_pins[] = { 0xb9, 0xb8, };
+static int jz4780_hdmi_ddc_pins[] = { 0xb9, 0xb8, };
static int jz4780_uart2_data_funcs[] = { 1, 1, };
static int jz4780_uart2_hwflow_funcs[] = { 1, 1, };
@@ -908,6 +910,7 @@ static int jz4780_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, };
static int jz4780_i2c3_funcs[] = { 1, 1, };
static int jz4780_i2c4_e_funcs[] = { 1, 1, };
static int jz4780_i2c4_f_funcs[] = { 1, 1, };
+static int jz4780_hdmi_ddc_funcs[] = { 0, 0, };
static const struct group_desc jz4780_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
@@ -950,6 +953,7 @@ static const struct group_desc jz4780_groups[] = {
INGENIC_PIN_GROUP("i2c3-data", jz4780_i2c3),
INGENIC_PIN_GROUP("i2c4-data-e", jz4780_i2c4_e),
INGENIC_PIN_GROUP("i2c4-data-f", jz4780_i2c4_f),
+ INGENIC_PIN_GROUP("hdmi-ddc", jz4780_hdmi_ddc),
INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit),
INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit),
{ "lcd-no-pins", },
@@ -982,6 +986,7 @@ static const char *jz4780_nemc_groups[] = {
static const char *jz4780_i2c3_groups[] = { "i2c3-data", };
static const char *jz4780_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", };
static const char *jz4780_cim_groups[] = { "cim-data", };
+static const char *jz4780_hdmi_ddc_groups[] = { "hdmi-ddc", };
static const struct function_desc jz4780_functions[] = {
{ "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
@@ -1014,6 +1019,8 @@ static const struct function_desc jz4780_functions[] = {
{ "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), },
{ "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
{ "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
+ { "hdmi-ddc", jz4780_hdmi_ddc_groups,
+ ARRAY_SIZE(jz4780_hdmi_ddc_groups), },
};
static const struct ingenic_chip_info jz4780_chip_info = {
@@ -1437,6 +1444,19 @@ static int x1830_mmc1_4bit_pins[] = { 0x45, 0x46, 0x47, };
static int x1830_i2c0_pins[] = { 0x0c, 0x0d, };
static int x1830_i2c1_pins[] = { 0x39, 0x3a, };
static int x1830_i2c2_pins[] = { 0x5b, 0x5c, };
+static int x1830_lcd_rgb_18bit_pins[] = {
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b,
+};
+static int x1830_lcd_slcd_8bit_pins[] = {
+ 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x6c, 0x6d,
+ 0x69, 0x72, 0x73, 0x7b, 0x7a,
+};
+static int x1830_lcd_slcd_16bit_pins[] = {
+ 0x6e, 0x6f, 0x70, 0x71, 0x76, 0x77, 0x78, 0x79,
+};
static int x1830_pwm_pwm0_b_pins[] = { 0x31, };
static int x1830_pwm_pwm0_c_pins[] = { 0x4b, };
static int x1830_pwm_pwm1_b_pins[] = { 0x32, };
@@ -1486,6 +1506,16 @@ static int x1830_mmc1_4bit_funcs[] = { 0, 0, 0, };
static int x1830_i2c0_funcs[] = { 1, 1, };
static int x1830_i2c1_funcs[] = { 0, 0, };
static int x1830_i2c2_funcs[] = { 1, 1, };
+static int x1830_lcd_rgb_18bit_funcs[] = {
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+};
+static int x1830_lcd_slcd_8bit_funcs[] = {
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+};
+static int x1830_lcd_slcd_16bit_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, };
static int x1830_pwm_pwm0_b_funcs[] = { 0, };
static int x1830_pwm_pwm0_c_funcs[] = { 1, };
static int x1830_pwm_pwm1_b_funcs[] = { 0, };
@@ -1534,6 +1564,10 @@ static const struct group_desc x1830_groups[] = {
INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0),
INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1),
INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2),
+ INGENIC_PIN_GROUP("lcd-rgb-18bit", x1830_lcd_rgb_18bit),
+ INGENIC_PIN_GROUP("lcd-slcd-8bit", x1830_lcd_slcd_8bit),
+ INGENIC_PIN_GROUP("lcd-slcd-16bit", x1830_lcd_slcd_16bit),
+ { "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0-b", x1830_pwm_pwm0_b),
INGENIC_PIN_GROUP("pwm0-c", x1830_pwm_pwm0_c),
INGENIC_PIN_GROUP("pwm1-b", x1830_pwm_pwm1_b),
@@ -1572,6 +1606,9 @@ static const char *x1830_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
static const char *x1830_i2c0_groups[] = { "i2c0-data", };
static const char *x1830_i2c1_groups[] = { "i2c1-data", };
static const char *x1830_i2c2_groups[] = { "i2c2-data", };
+static const char *x1830_lcd_groups[] = {
+ "lcd-rgb-18bit", "lcd-slcd-8bit", "lcd-slcd-16bit", "lcd-no-pins",
+};
static const char *x1830_pwm0_groups[] = { "pwm0-b", "pwm0-c", };
static const char *x1830_pwm1_groups[] = { "pwm1-b", "pwm1-c", };
static const char *x1830_pwm2_groups[] = { "pwm2-c-8", "pwm2-c-13", };
@@ -1593,6 +1630,7 @@ static const struct function_desc x1830_functions[] = {
{ "i2c0", x1830_i2c0_groups, ARRAY_SIZE(x1830_i2c0_groups), },
{ "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
{ "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
+ { "lcd", x1830_lcd_groups, ARRAY_SIZE(x1830_lcd_groups), },
{ "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
{ "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
{ "pwm2", x1830_pwm2_groups, ARRAY_SIZE(x1830_pwm2_groups), },
@@ -1916,13 +1954,19 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
struct ingenic_pinctrl *jzpc = jzgc->jzpc;
unsigned int pin = gc->base + offset;
- if (jzpc->info->version >= ID_JZ4760)
- return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1);
+ if (jzpc->info->version >= ID_JZ4760) {
+ if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+ return GPIO_LINE_DIRECTION_IN;
+ return GPIO_LINE_DIRECTION_OUT;
+ }
if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
- return true;
+ return GPIO_LINE_DIRECTION_IN;
+
+ if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_DIR))
+ return GPIO_LINE_DIRECTION_OUT;
- return !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_DIR);
+ return GPIO_LINE_DIRECTION_IN;
}
static const struct pinctrl_ops ingenic_pctlops = {
@@ -2158,7 +2202,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
default:
- unreachable();
+ /* unreachable */
+ break;
}
}
@@ -2278,11 +2323,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.direction_input = ingenic_gpio_direction_input;
jzgc->gc.direction_output = ingenic_gpio_direction_output;
jzgc->gc.get_direction = ingenic_gpio_get_direction;
-
- if (of_property_read_bool(node, "gpio-ranges")) {
- jzgc->gc.request = gpiochip_generic_request;
- jzgc->gc.free = gpiochip_generic_free;
- }
+ jzgc->gc.request = gpiochip_generic_request;
+ jzgc->gc.free = gpiochip_generic_free;
jzgc->irq = irq_of_parse_and_map(node, 0);
if (!jzgc->irq)
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index eb3dd0d46d6c..ed8eac6c1494 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -604,7 +604,10 @@ static int ocelot_gpio_get_direction(struct gpio_chip *chip,
regmap_read(info->map, REG(OCELOT_GPIO_OE, info, offset), &val);
- return !(val & BIT(offset % 32));
+ if (val & BIT(offset % 32))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int ocelot_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 674b7b5919df..5a312279b3c7 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -756,7 +756,10 @@ static int oxnas_gpio_get_direction(struct gpio_chip *chip,
struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
u32 mask = BIT(offset);
- return !(readl_relaxed(bank->reg_base + OUTPUT_EN) & mask);
+ if (readl_relaxed(bank->reg_base + OUTPUT_EN) & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int oxnas_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index e5d6d3f9753e..a6e2a4a4ca95 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -1990,7 +1990,10 @@ static int pic32_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
- return !!(readl(bank->reg_base + TRIS_REG) & BIT(offset));
+ if (readl(bank->reg_base + TRIS_REG) & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static void pic32_gpio_irq_ack(struct irq_data *data)
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index fa370c171cad..ec761ba2a2da 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1166,7 +1166,10 @@ static int pistachio_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct pistachio_gpio_bank *bank = gpiochip_get_data(chip);
- return !(gpio_readl(bank, GPIO_OUTPUT_EN) & BIT(offset));
+ if (gpio_readl(bank, GPIO_OUTPUT_EN) & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int pistachio_gpio_get(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index 26adbe9d6d42..cccbe072274e 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -184,7 +184,7 @@ static int rk805_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
/* default output*/
if (!pci->pin_cfg[offset].dir_msk)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(pci->rk808->regmap,
pci->pin_cfg[offset].reg,
@@ -194,7 +194,10 @@ static int rk805_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
return ret;
}
- return !(val & pci->pin_cfg[offset].dir_msk);
+ if (val & pci->pin_cfg[offset].dir_msk)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static const struct gpio_chip rk805_gpio_chip = {
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index fc9a2a9959d9..098951346339 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -2549,7 +2549,10 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
clk_disable(bank->clk);
- return !(data & BIT(offset));
+ if (data & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
/*
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 617585be6a7d..da2d8365c690 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -777,7 +777,10 @@ static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
{
struct rza1_port *port = gpiochip_get_data(chip);
- return !!rza1_get_bit(port, RZA1_PM_REG, gpio);
+ if (rza1_get_bit(port, RZA1_PM_REG, gpio))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int rza1_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c
index a205964e839b..c5bf98c86b2b 100644
--- a/drivers/pinctrl/pinctrl-rza2.c
+++ b/drivers/pinctrl/pinctrl-rza2.c
@@ -135,10 +135,10 @@ static int rza2_chip_get_direction(struct gpio_chip *chip, unsigned int offset)
reg16 = (reg16 >> (pin * 2)) & RZA2_PDR_MASK;
if (reg16 == RZA2_PDR_OUTPUT)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
if (reg16 == RZA2_PDR_INPUT)
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
/*
* This GPIO controller has a default Hi-Z state that is not input or
@@ -146,7 +146,7 @@ static int rza2_chip_get_direction(struct gpio_chip *chip, unsigned int offset)
*/
rza2_pin_to_gpio(priv->base, offset, 1);
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int rza2_chip_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 4f39a7945d01..7b8c7a0b13de 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -746,7 +746,10 @@ static int st_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
function = st_pctl_get_pin_function(&pc, offset);
if (function) {
st_pinconf_get_direction(&pc, offset, &config);
- return !ST_PINCONF_UNPACK_OE(config);
+ if (ST_PINCONF_UNPACK_OE(config))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
/*
@@ -758,7 +761,10 @@ static int st_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
direction |= ((value >> offset) & 0x1) << i;
}
- return (direction == ST_GPIO_DIRECTION_IN);
+ if (direction == ST_GPIO_DIRECTION_IN)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
/* Pinctrl Groups */
@@ -996,6 +1002,7 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
unsigned int function;
int offset = st_gpio_pin(pin_id);
char f[16];
+ int oe;
mutex_unlock(&pctldev->mutex);
pc = st_get_pio_control(pctldev, pin_id);
@@ -1008,10 +1015,11 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
else
snprintf(f, 5, "GPIO");
+ oe = st_gpio_get_direction(&pc_to_bank(pc)->gpio_chip, offset);
seq_printf(s, "[OE:%d,PU:%ld,OD:%ld]\t%s\n"
"\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
"de:%ld,rt-clk:%ld,rt-delay:%ld]",
- !st_gpio_get_direction(&pc_to_bank(pc)->gpio_chip, offset),
+ (oe == GPIO_LINE_DIRECTION_OUT),
ST_PINCONF_UNPACK_PU(config),
ST_PINCONF_UNPACK_OD(config),
f,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 16723797fa7c..60100b45f5e5 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -134,10 +134,14 @@ static int stmfx_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
ret = regmap_read(pctl->stmfx->map, reg, &val);
/*
* On stmfx, gpio pins direction is (0)input, (1)output.
- * .get_direction returns 0=out, 1=in
*/
+ if (ret)
+ return ret;
- return ret ? ret : !(val & mask);
+ if (val & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int stmfx_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
@@ -223,6 +227,13 @@ static int stmfx_pinconf_get(struct pinctrl_dev *pctldev,
dir = stmfx_gpio_get_direction(&pctl->gpio_chip, pin);
if (dir < 0)
return dir;
+
+ /*
+ * Currently the gpiolib IN is 1 and OUT is 0 but let's not count
+ * on it just to be on the safe side also in the future :)
+ */
+ dir = (dir == GPIO_LINE_DIRECTION_IN) ? 1 : 0;
+
type = stmfx_pinconf_get_type(pctl, pin);
if (type < 0)
return type;
@@ -360,7 +371,7 @@ static void stmfx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
if (val < 0)
return;
- if (!dir) {
+ if (dir == GPIO_LINE_DIRECTION_OUT) {
seq_printf(s, "output %s ", val ? "high" : "low");
if (type)
seq_printf(s, "open drain %s internal pull-up ",
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 566665931a04..6e74bd87d959 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -391,13 +391,16 @@ static int sx150x_gpio_get_direction(struct gpio_chip *chip,
int ret;
if (sx150x_pin_is_oscio(pctl, offset))
- return false;
+ return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(pctl->regmap, pctl->data->reg_dir, &value);
if (ret < 0)
return ret;
- return !!(value & BIT(offset));
+ if (value & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int sx150x_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -687,7 +690,7 @@ static int sx150x_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
- if (ret)
+ if (ret == GPIO_LINE_DIRECTION_IN)
return -EINVAL;
ret = sx150x_gpio_get(&pctl->gpio, pin);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 811af2f81c39..c5d4428f1f94 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -50,6 +50,16 @@ config PINCTRL_IPQ8074
Qualcomm Technologies Inc. IPQ8074 platform. Select this for
IPQ8074.
+config PINCTRL_IPQ6018
+ tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for
+ the Qualcomm Technologies Inc. TLMM block found on the
+ Qualcomm Technologies Inc. IPQ6018 platform. Select this for
+ IPQ6018.
+
config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index c2c2f9ad6827..d9e09045a776 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
obj-$(CONFIG_PINCTRL_IPQ8074) += pinctrl-ipq8074.o
+obj-$(CONFIG_PINCTRL_IPQ6018) += pinctrl-ipq6018.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
new file mode 100644
index 000000000000..38c33a778cb8
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+static const struct pinctrl_pin_desc ipq6018_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+
+enum ipq6018_functions {
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_audio0,
+ msm_mux_audio1,
+ msm_mux_audio2,
+ msm_mux_audio3,
+ msm_mux_audio_rxbclk,
+ msm_mux_audio_rxfsync,
+ msm_mux_audio_rxmclk,
+ msm_mux_audio_rxmclkin,
+ msm_mux_audio_txbclk,
+ msm_mux_audio_txfsync,
+ msm_mux_audio_txmclk,
+ msm_mux_audio_txmclkin,
+ msm_mux_blsp0_i2c,
+ msm_mux_blsp0_spi,
+ msm_mux_blsp0_uart,
+ msm_mux_blsp1_i2c,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp1_uart,
+ msm_mux_blsp2_i2c,
+ msm_mux_blsp2_spi,
+ msm_mux_blsp2_uart,
+ msm_mux_blsp3_i2c,
+ msm_mux_blsp3_spi,
+ msm_mux_blsp3_uart,
+ msm_mux_blsp4_i2c,
+ msm_mux_blsp4_spi,
+ msm_mux_blsp4_uart,
+ msm_mux_blsp5_i2c,
+ msm_mux_blsp5_uart,
+ msm_mux_burn0,
+ msm_mux_burn1,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_cxc0,
+ msm_mux_cxc1,
+ msm_mux_dbg_out,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gpio,
+ msm_mux_lpass_aud,
+ msm_mux_lpass_aud0,
+ msm_mux_lpass_aud1,
+ msm_mux_lpass_aud2,
+ msm_mux_lpass_pcm,
+ msm_mux_lpass_pdm,
+ msm_mux_mac00,
+ msm_mux_mac01,
+ msm_mux_mac10,
+ msm_mux_mac11,
+ msm_mux_mac12,
+ msm_mux_mac13,
+ msm_mux_mac20,
+ msm_mux_mac21,
+ msm_mux_mdc,
+ msm_mux_mdio,
+ msm_mux_pcie0_clk,
+ msm_mux_pcie0_rst,
+ msm_mux_pcie0_wake,
+ msm_mux_prng_rosc,
+ msm_mux_pta1_0,
+ msm_mux_pta1_1,
+ msm_mux_pta1_2,
+ msm_mux_pta2_0,
+ msm_mux_pta2_1,
+ msm_mux_pta2_2,
+ msm_mux_pwm00,
+ msm_mux_pwm01,
+ msm_mux_pwm02,
+ msm_mux_pwm03,
+ msm_mux_pwm04,
+ msm_mux_pwm10,
+ msm_mux_pwm11,
+ msm_mux_pwm12,
+ msm_mux_pwm13,
+ msm_mux_pwm14,
+ msm_mux_pwm20,
+ msm_mux_pwm21,
+ msm_mux_pwm22,
+ msm_mux_pwm23,
+ msm_mux_pwm24,
+ msm_mux_pwm30,
+ msm_mux_pwm31,
+ msm_mux_pwm32,
+ msm_mux_pwm33,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_qpic_pad,
+ msm_mux_rx0,
+ msm_mux_rx1,
+ msm_mux_rx_swrm,
+ msm_mux_rx_swrm0,
+ msm_mux_rx_swrm1,
+ msm_mux_sd_card,
+ msm_mux_sd_write,
+ msm_mux_tsens_max,
+ msm_mux_tx_swrm,
+ msm_mux_tx_swrm0,
+ msm_mux_tx_swrm1,
+ msm_mux_tx_swrm2,
+ msm_mux_wci20,
+ msm_mux_wci21,
+ msm_mux_wci22,
+ msm_mux_wci23,
+ msm_mux_wsa_swrm,
+ msm_mux__,
+};
+
+static const char * const blsp3_uart_groups[] = {
+ "gpio73", "gpio74", "gpio75", "gpio76",
+};
+
+static const char * const blsp3_i2c_groups[] = {
+ "gpio73", "gpio74",
+};
+
+static const char * const blsp3_spi_groups[] = {
+ "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", "gpio78", "gpio79",
+};
+
+static const char * const wci20_groups[] = {
+ "gpio0", "gpio2",
+};
+
+static const char * const qpic_pad_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio9", "gpio10",
+ "gpio11", "gpio17",
+};
+
+static const char * const burn0_groups[] = {
+ "gpio0",
+};
+
+static const char * const mac12_groups[] = {
+ "gpio1", "gpio11",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio1",
+};
+
+static const char * const burn1_groups[] = {
+ "gpio1",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio0",
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8", "gpio9",
+ "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15", "gpio16",
+ "gpio17",
+};
+
+static const char * const mac01_groups[] = {
+ "gpio3", "gpio4",
+};
+
+static const char * const mac21_groups[] = {
+ "gpio5", "gpio6",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio9",
+};
+
+static const char * const cxc0_groups[] = {
+ "gpio9", "gpio16",
+};
+
+static const char * const mac13_groups[] = {
+ "gpio9", "gpio16",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio9",
+};
+
+static const char * const wci22_groups[] = {
+ "gpio11", "gpio17",
+};
+
+static const char * const pwm00_groups[] = {
+ "gpio18",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio18",
+};
+
+static const char * const wci23_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const mac11_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const pwm10_groups[] = {
+ "gpio19",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio19",
+};
+
+static const char * const pwm20_groups[] = {
+ "gpio20",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio20",
+};
+
+static const char * const pwm30_groups[] = {
+ "gpio21",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio21",
+};
+
+static const char * const audio_txmclk_groups[] = {
+ "gpio22",
+};
+
+static const char * const audio_txmclkin_groups[] = {
+ "gpio22",
+};
+
+static const char * const pwm02_groups[] = {
+ "gpio22",
+};
+
+static const char * const tx_swrm0_groups[] = {
+ "gpio22",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio22",
+};
+
+static const char * const audio_txbclk_groups[] = {
+ "gpio23",
+};
+
+static const char * const pwm12_groups[] = {
+ "gpio23",
+};
+
+static const char * const wsa_swrm_groups[] = {
+ "gpio23", "gpio24",
+};
+
+static const char * const tx_swrm1_groups[] = {
+ "gpio23",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio23",
+};
+
+static const char * const audio_txfsync_groups[] = {
+ "gpio24",
+};
+
+static const char * const pwm22_groups[] = {
+ "gpio24",
+};
+
+static const char * const tx_swrm2_groups[] = {
+ "gpio24",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio24",
+};
+
+static const char * const audio0_groups[] = {
+ "gpio25", "gpio32",
+};
+
+static const char * const pwm32_groups[] = {
+ "gpio25",
+};
+
+static const char * const tx_swrm_groups[] = {
+ "gpio25",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio25",
+};
+
+static const char * const audio1_groups[] = {
+ "gpio26", "gpio33",
+};
+
+static const char * const pwm04_groups[] = {
+ "gpio26",
+};
+
+static const char * const audio2_groups[] = {
+ "gpio27",
+};
+
+static const char * const pwm14_groups[] = {
+ "gpio27",
+};
+
+static const char * const audio3_groups[] = {
+ "gpio28",
+};
+
+static const char * const pwm24_groups[] = {
+ "gpio28",
+};
+
+static const char * const audio_rxmclk_groups[] = {
+ "gpio29",
+};
+
+static const char * const audio_rxmclkin_groups[] = {
+ "gpio29",
+};
+
+static const char * const pwm03_groups[] = {
+ "gpio29",
+};
+
+static const char * const lpass_pdm_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const lpass_aud_groups[] = {
+ "gpio29",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio29",
+};
+
+static const char * const audio_rxbclk_groups[] = {
+ "gpio30",
+};
+
+static const char * const pwm13_groups[] = {
+ "gpio30",
+};
+
+static const char * const lpass_aud0_groups[] = {
+ "gpio30",
+};
+
+static const char * const rx_swrm_groups[] = {
+ "gpio30",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio30",
+};
+
+static const char * const audio_rxfsync_groups[] = {
+ "gpio31",
+};
+
+static const char * const pwm23_groups[] = {
+ "gpio31",
+};
+
+static const char * const lpass_aud1_groups[] = {
+ "gpio31",
+};
+
+static const char * const rx_swrm0_groups[] = {
+ "gpio31",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio31",
+};
+
+static const char * const pwm33_groups[] = {
+ "gpio32",
+};
+
+static const char * const lpass_aud2_groups[] = {
+ "gpio32",
+};
+
+static const char * const rx_swrm1_groups[] = {
+ "gpio32",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio32",
+};
+
+static const char * const lpass_pcm_groups[] = {
+ "gpio34", "gpio35", "gpio36", "gpio37",
+};
+
+static const char * const mac10_groups[] = {
+ "gpio34", "gpio35",
+};
+
+static const char * const mac00_groups[] = {
+ "gpio34", "gpio35",
+};
+
+static const char * const mac20_groups[] = {
+ "gpio36", "gpio37",
+};
+
+static const char * const blsp0_uart_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41",
+};
+
+static const char * const blsp0_i2c_groups[] = {
+ "gpio38", "gpio39",
+};
+
+static const char * const blsp0_spi_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41",
+};
+
+static const char * const blsp2_uart_groups[] = {
+ "gpio42", "gpio43", "gpio44", "gpio45",
+};
+
+static const char * const blsp2_i2c_groups[] = {
+ "gpio42", "gpio43",
+};
+
+static const char * const blsp2_spi_groups[] = {
+ "gpio42", "gpio43", "gpio44", "gpio45",
+};
+
+static const char * const blsp5_i2c_groups[] = {
+ "gpio46", "gpio47",
+};
+
+static const char * const blsp5_uart_groups[] = {
+ "gpio48", "gpio49",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio48",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio49",
+};
+
+static const char * const pwm01_groups[] = {
+ "gpio50",
+};
+
+static const char * const pta1_1_groups[] = {
+ "gpio51",
+};
+
+static const char * const pwm11_groups[] = {
+ "gpio51",
+};
+
+static const char * const rx1_groups[] = {
+ "gpio51",
+};
+
+static const char * const pta1_2_groups[] = {
+ "gpio52",
+};
+
+static const char * const pwm21_groups[] = {
+ "gpio52",
+};
+
+static const char * const pta1_0_groups[] = {
+ "gpio53",
+};
+
+static const char * const pwm31_groups[] = {
+ "gpio53",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio53",
+};
+
+static const char * const blsp4_uart_groups[] = {
+ "gpio55", "gpio56", "gpio57", "gpio58",
+};
+
+static const char * const blsp4_i2c_groups[] = {
+ "gpio55", "gpio56",
+};
+
+static const char * const blsp4_spi_groups[] = {
+ "gpio55", "gpio56", "gpio57", "gpio58",
+};
+
+static const char * const pcie0_clk_groups[] = {
+ "gpio59",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio59",
+};
+
+static const char * const pcie0_rst_groups[] = {
+ "gpio60",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio60",
+};
+
+static const char * const pcie0_wake_groups[] = {
+ "gpio61",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio61",
+};
+
+static const char * const sd_card_groups[] = {
+ "gpio62",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio63",
+};
+
+static const char * const rx0_groups[] = {
+ "gpio63",
+};
+
+static const char * const tsens_max_groups[] = {
+ "gpio63",
+};
+
+static const char * const mdc_groups[] = {
+ "gpio64",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79",
+};
+
+static const char * const mdio_groups[] = {
+ "gpio65",
+};
+
+static const char * const pta2_0_groups[] = {
+ "gpio66",
+};
+
+static const char * const wci21_groups[] = {
+ "gpio66", "gpio68",
+};
+
+static const char * const cxc1_groups[] = {
+ "gpio66", "gpio68",
+};
+
+static const char * const pta2_1_groups[] = {
+ "gpio67",
+};
+
+static const char * const pta2_2_groups[] = {
+ "gpio68",
+};
+
+static const char * const blsp1_uart_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72",
+};
+
+static const char * const blsp1_i2c_groups[] = {
+ "gpio69", "gpio70",
+};
+
+static const char * const blsp1_spi_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio69", "gpio71",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio70",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79",
+};
+
+static const struct msm_function ipq6018_functions[] = {
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(audio0),
+ FUNCTION(audio1),
+ FUNCTION(audio2),
+ FUNCTION(audio3),
+ FUNCTION(audio_rxbclk),
+ FUNCTION(audio_rxfsync),
+ FUNCTION(audio_rxmclk),
+ FUNCTION(audio_rxmclkin),
+ FUNCTION(audio_txbclk),
+ FUNCTION(audio_txfsync),
+ FUNCTION(audio_txmclk),
+ FUNCTION(audio_txmclkin),
+ FUNCTION(blsp0_i2c),
+ FUNCTION(blsp0_spi),
+ FUNCTION(blsp0_uart),
+ FUNCTION(blsp1_i2c),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp1_uart),
+ FUNCTION(blsp2_i2c),
+ FUNCTION(blsp2_spi),
+ FUNCTION(blsp2_uart),
+ FUNCTION(blsp3_i2c),
+ FUNCTION(blsp3_spi),
+ FUNCTION(blsp3_uart),
+ FUNCTION(blsp4_i2c),
+ FUNCTION(blsp4_spi),
+ FUNCTION(blsp4_uart),
+ FUNCTION(blsp5_i2c),
+ FUNCTION(blsp5_uart),
+ FUNCTION(burn0),
+ FUNCTION(burn1),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(cxc0),
+ FUNCTION(cxc1),
+ FUNCTION(dbg_out),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gcc_tlmm),
+ FUNCTION(gpio),
+ FUNCTION(lpass_aud),
+ FUNCTION(lpass_aud0),
+ FUNCTION(lpass_aud1),
+ FUNCTION(lpass_aud2),
+ FUNCTION(lpass_pcm),
+ FUNCTION(lpass_pdm),
+ FUNCTION(mac00),
+ FUNCTION(mac01),
+ FUNCTION(mac10),
+ FUNCTION(mac11),
+ FUNCTION(mac12),
+ FUNCTION(mac13),
+ FUNCTION(mac20),
+ FUNCTION(mac21),
+ FUNCTION(mdc),
+ FUNCTION(mdio),
+ FUNCTION(pcie0_clk),
+ FUNCTION(pcie0_rst),
+ FUNCTION(pcie0_wake),
+ FUNCTION(prng_rosc),
+ FUNCTION(pta1_0),
+ FUNCTION(pta1_1),
+ FUNCTION(pta1_2),
+ FUNCTION(pta2_0),
+ FUNCTION(pta2_1),
+ FUNCTION(pta2_2),
+ FUNCTION(pwm00),
+ FUNCTION(pwm01),
+ FUNCTION(pwm02),
+ FUNCTION(pwm03),
+ FUNCTION(pwm04),
+ FUNCTION(pwm10),
+ FUNCTION(pwm11),
+ FUNCTION(pwm12),
+ FUNCTION(pwm13),
+ FUNCTION(pwm14),
+ FUNCTION(pwm20),
+ FUNCTION(pwm21),
+ FUNCTION(pwm22),
+ FUNCTION(pwm23),
+ FUNCTION(pwm24),
+ FUNCTION(pwm30),
+ FUNCTION(pwm31),
+ FUNCTION(pwm32),
+ FUNCTION(pwm33),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(qpic_pad),
+ FUNCTION(rx0),
+ FUNCTION(rx1),
+ FUNCTION(rx_swrm),
+ FUNCTION(rx_swrm0),
+ FUNCTION(rx_swrm1),
+ FUNCTION(sd_card),
+ FUNCTION(sd_write),
+ FUNCTION(tsens_max),
+ FUNCTION(tx_swrm),
+ FUNCTION(tx_swrm0),
+ FUNCTION(tx_swrm1),
+ FUNCTION(tx_swrm2),
+ FUNCTION(wci20),
+ FUNCTION(wci21),
+ FUNCTION(wci22),
+ FUNCTION(wci23),
+ FUNCTION(wsa_swrm),
+};
+
+static const struct msm_pingroup ipq6018_groups[] = {
+ PINGROUP(0, qpic_pad, wci20, qdss_traceclk_b, _, burn0, _, _, _, _),
+ PINGROUP(1, qpic_pad, mac12, qdss_tracectl_b, _, burn1, _, _, _, _),
+ PINGROUP(2, qpic_pad, wci20, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(3, qpic_pad, mac01, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(4, qpic_pad, mac01, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(5, qpic_pad, mac21, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(6, qpic_pad, mac21, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(7, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(8, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(9, qpic_pad, atest_char, cxc0, mac13, dbg_out, qdss_tracedata_b, _, _, _),
+ PINGROUP(10, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(11, qpic_pad, wci22, mac12, qdss_tracedata_b, _, _, _, _, _),
+ PINGROUP(12, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(13, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(14, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(15, qpic_pad, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(16, qpic_pad, cxc0, mac13, qdss_tracedata_b, _, _, _, _, _),
+ PINGROUP(17, qpic_pad, qdss_tracedata_b, wci22, _, _, _, _, _, _),
+ PINGROUP(18, pwm00, atest_char0, wci23, mac11, _, _, _, _, _),
+ PINGROUP(19, pwm10, atest_char1, wci23, mac11, _, _, _, _, _),
+ PINGROUP(20, pwm20, atest_char2, _, _, _, _, _, _, _),
+ PINGROUP(21, pwm30, atest_char3, _, _, _, _, _, _, _),
+ PINGROUP(22, audio_txmclk, audio_txmclkin, pwm02, tx_swrm0, _, qdss_cti_trig_out_b0, _, _, _),
+ PINGROUP(23, audio_txbclk, pwm12, wsa_swrm, tx_swrm1, _, qdss_cti_trig_in_b0, _, _, _),
+ PINGROUP(24, audio_txfsync, pwm22, wsa_swrm, tx_swrm2, _, qdss_cti_trig_out_b1, _, _, _),
+ PINGROUP(25, audio0, pwm32, tx_swrm, _, qdss_cti_trig_in_b1, _, _, _, _),
+ PINGROUP(26, audio1, pwm04, _, _, _, _, _, _, _),
+ PINGROUP(27, audio2, pwm14, _, _, _, _, _, _, _),
+ PINGROUP(28, audio3, pwm24, _, _, _, _, _, _, _),
+ PINGROUP(29, audio_rxmclk, audio_rxmclkin, pwm03, lpass_pdm, lpass_aud, qdss_cti_trig_in_a1, _, _, _),
+ PINGROUP(30, audio_rxbclk, pwm13, lpass_pdm, lpass_aud0, rx_swrm, _, qdss_cti_trig_out_a1, _, _),
+ PINGROUP(31, audio_rxfsync, pwm23, lpass_pdm, lpass_aud1, rx_swrm0, _, qdss_cti_trig_in_a0, _, _),
+ PINGROUP(32, audio0, pwm33, lpass_pdm, lpass_aud2, rx_swrm1, _, qdss_cti_trig_out_a0, _, _),
+ PINGROUP(33, audio1, _, _, _, _, _, _, _, _),
+ PINGROUP(34, lpass_pcm, mac10, mac00, _, _, _, _, _, _),
+ PINGROUP(35, lpass_pcm, mac10, mac00, _, _, _, _, _, _),
+ PINGROUP(36, lpass_pcm, mac20, _, _, _, _, _, _, _),
+ PINGROUP(37, lpass_pcm, mac20, _, _, _, _, _, _, _),
+ PINGROUP(38, blsp0_uart, blsp0_i2c, blsp0_spi, _, _, _, _, _, _),
+ PINGROUP(39, blsp0_uart, blsp0_i2c, blsp0_spi, _, _, _, _, _, _),
+ PINGROUP(40, blsp0_uart, blsp0_spi, _, _, _, _, _, _, _),
+ PINGROUP(41, blsp0_uart, blsp0_spi, _, _, _, _, _, _, _),
+ PINGROUP(42, blsp2_uart, blsp2_i2c, blsp2_spi, _, _, _, _, _, _),
+ PINGROUP(43, blsp2_uart, blsp2_i2c, blsp2_spi, _, _, _, _, _, _),
+ PINGROUP(44, blsp2_uart, blsp2_spi, _, _, _, _, _, _, _),
+ PINGROUP(45, blsp2_uart, blsp2_spi, _, _, _, _, _, _, _),
+ PINGROUP(46, blsp5_i2c, _, _, _, _, _, _, _, _),
+ PINGROUP(47, blsp5_i2c, _, _, _, _, _, _, _, _),
+ PINGROUP(48, blsp5_uart, _, qdss_traceclk_a, _, _, _, _, _, _),
+ PINGROUP(49, blsp5_uart, _, qdss_tracectl_a, _, _, _, _, _, _),
+ PINGROUP(50, pwm01, _, _, _, _, _, _, _, _),
+ PINGROUP(51, pta1_1, pwm11, _, rx1, _, _, _, _, _),
+ PINGROUP(52, pta1_2, pwm21, _, _, _, _, _, _, _),
+ PINGROUP(53, pta1_0, pwm31, prng_rosc, _, _, _, _, _, _),
+ PINGROUP(54, _, _, _, _, _, _, _, _, _),
+ PINGROUP(55, blsp4_uart, blsp4_i2c, blsp4_spi, _, _, _, _, _, _),
+ PINGROUP(56, blsp4_uart, blsp4_i2c, blsp4_spi, _, _, _, _, _, _),
+ PINGROUP(57, blsp4_uart, blsp4_spi, _, _, _, _, _, _, _),
+ PINGROUP(58, blsp4_uart, blsp4_spi, _, _, _, _, _, _, _),
+ PINGROUP(59, pcie0_clk, _, _, cri_trng0, _, _, _, _, _),
+ PINGROUP(60, pcie0_rst, _, _, cri_trng1, _, _, _, _, _),
+ PINGROUP(61, pcie0_wake, _, _, cri_trng, _, _, _, _, _),
+ PINGROUP(62, sd_card, _, _, _, _, _, _, _, _),
+ PINGROUP(63, sd_write, rx0, _, tsens_max, _, _, _, _, _),
+ PINGROUP(64, mdc, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(65, mdio, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(66, pta2_0, wci21, cxc1, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(67, pta2_1, qdss_tracedata_a, _, _, _, _, _, _, _),
+ PINGROUP(68, pta2_2, wci21, cxc1, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(69, blsp1_uart, blsp1_i2c, blsp1_spi, gcc_plltest, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(70, blsp1_uart, blsp1_i2c, blsp1_spi, gcc_tlmm, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(71, blsp1_uart, blsp1_spi, gcc_plltest, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(72, blsp1_uart, blsp1_spi, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(73, blsp3_uart, blsp3_i2c, blsp3_spi, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(74, blsp3_uart, blsp3_i2c, blsp3_spi, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(75, blsp3_uart, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(76, blsp3_uart, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(77, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(78, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(79, blsp3_spi, _, qdss_tracedata_a, _, _, _, _, _, _),
+};
+
+static const struct msm_pinctrl_soc_data ipq6018_pinctrl = {
+ .pins = ipq6018_pins,
+ .npins = ARRAY_SIZE(ipq6018_pins),
+ .functions = ipq6018_functions,
+ .nfunctions = ARRAY_SIZE(ipq6018_functions),
+ .groups = ipq6018_groups,
+ .ngroups = ARRAY_SIZE(ipq6018_groups),
+ .ngpios = 80,
+};
+
+static int ipq6018_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &ipq6018_pinctrl);
+}
+
+static const struct of_device_id ipq6018_pinctrl_of_match[] = {
+ { .compatible = "qcom,ipq6018-pinctrl", },
+ { },
+};
+
+static struct platform_driver ipq6018_pinctrl_driver = {
+ .driver = {
+ .name = "ipq6018-pinctrl",
+ .of_match_table = ipq6018_pinctrl_of_match,
+ },
+ .probe = ipq6018_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init ipq6018_pinctrl_init(void)
+{
+ return platform_driver_register(&ipq6018_pinctrl_driver);
+}
+arch_initcall(ipq6018_pinctrl_init);
+
+static void __exit ipq6018_pinctrl_exit(void)
+{
+ platform_driver_unregister(&ipq6018_pinctrl_driver);
+}
+module_exit(ipq6018_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI ipq6018 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, ipq6018_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index c2fb1ddf2f22..ac717ee38416 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -299,7 +299,7 @@ static const char * const gpio_groups[] = {
};
static const char * const mdio_groups[] = {
- "gpio0", "gpio1", "gpio10", "gpio11",
+ "gpio0", "gpio1", "gpio2", "gpio10", "gpio11", "gpio66",
};
static const char * const mi2s_groups[] = {
@@ -403,8 +403,8 @@ static const char * const usb2_hsic_groups[] = {
};
static const char * const rgmii2_groups[] = {
- "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32",
- "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62",
+ "gpio2", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio51", "gpio52", "gpio59", "gpio60", "gpio61", "gpio62", "gpio66",
};
static const char * const sata_groups[] = {
@@ -539,7 +539,7 @@ static const struct msm_function ipq8064_functions[] = {
static const struct msm_pingroup ipq8064_groups[] = {
PINGROUP(0, mdio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(1, mdio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(2, gsbi5_spi_cs3, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, gsbi5_spi_cs3, rgmii2, mdio, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(3, pcie1_rst, pcie1_prsnt, pdm, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(4, pcie1_pwren_n, pcie1_pwren, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(5, pcie1_clk_req, pcie1_pwrflt, NA, NA, NA, NA, NA, NA, NA, NA),
@@ -603,7 +603,7 @@ static const struct msm_pingroup ipq8064_groups[] = {
PINGROUP(63, pcie3_rst, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(65, pcie3_clk_req, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(66, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, rgmii2, mdio, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(67, usb2_hsic, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(68, usb2_hsic, NA, NA, NA, NA, NA, NA, NA, NA, NA),
SDC_PINGROUP(sdc3_clk, 0x204a, 14, 6),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 1a948c3f54b7..9a398a211d30 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -22,6 +22,8 @@
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/log2.h>
+#include <linux/qcom_scm.h>
+#include <linux/io.h>
#include <linux/soc/qcom/irq.h>
@@ -60,6 +62,8 @@ struct msm_pinctrl {
struct irq_chip irq_chip;
int irq;
+ bool intr_target_use_scm;
+
raw_spinlock_t lock;
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
@@ -68,6 +72,7 @@ struct msm_pinctrl {
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
+ u32 phys_base[MAX_NR_TILES];
};
#define MSM_ACCESSOR(name) \
@@ -489,8 +494,8 @@ static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
val = msm_readl_ctl(pctrl, g);
- /* 0 = output, 1 = input */
- return val & BIT(g->oe_bit) ? 0 : 1;
+ return val & BIT(g->oe_bit) ? GPIO_LINE_DIRECTION_OUT :
+ GPIO_LINE_DIRECTION_IN;
}
static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -882,11 +887,30 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
else
clear_bit(d->hwirq, pctrl->dual_edge_irqs);
- /* Route interrupts to application cpu */
- val = msm_readl_intr_target(pctrl, g);
- val &= ~(7 << g->intr_target_bit);
- val |= g->intr_target_kpss_val << g->intr_target_bit;
- msm_writel_intr_target(val, pctrl, g);
+ /* Route interrupts to application cpu.
+ * With intr_target_use_scm interrupts are routed to
+ * application cpu using scm calls.
+ */
+ if (pctrl->intr_target_use_scm) {
+ u32 addr = pctrl->phys_base[0] + g->intr_target_reg;
+ int ret;
+
+ qcom_scm_io_readl(addr, &val);
+
+ val &= ~(7 << g->intr_target_bit);
+ val |= g->intr_target_kpss_val << g->intr_target_bit;
+
+ ret = qcom_scm_io_writel(addr, val);
+ if (ret)
+ dev_err(pctrl->dev,
+ "Failed routing %lu interrupt to Apps proc",
+ d->hwirq);
+ } else {
+ val = msm_readl_intr_target(pctrl, g);
+ val &= ~(7 << g->intr_target_bit);
+ val |= g->intr_target_kpss_val << g->intr_target_bit;
+ msm_writel_intr_target(val, pctrl, g);
+ }
/* Update configuration for gpio.
* RAW_STATUS_EN is left on for all gpio irqs. Due to the
@@ -1240,6 +1264,9 @@ int msm_pinctrl_probe(struct platform_device *pdev,
pctrl->dev = &pdev->dev;
pctrl->soc = soc_data;
pctrl->chip = msm_gpio_template;
+ pctrl->intr_target_use_scm = of_device_is_compatible(
+ pctrl->dev->of_node,
+ "qcom,ipq8064-pinctrl");
raw_spin_lock_init(&pctrl->lock);
@@ -1252,9 +1279,12 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pctrl->regs[i]);
}
} else {
- pctrl->regs[0] = devm_platform_ioremap_resource(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctrl->regs[0] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pctrl->regs[0]))
return PTR_ERR(pctrl->regs[0]);
+
+ pctrl->phys_base[0] = res->start;
}
msm_pinctrl_setup_pm_reset(pctrl);
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index cf0e0dc42b84..9552851b96f1 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -26,8 +26,8 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7792 if ARCH_R8A7792
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
- select PINCTRL_PFC_R8A77950 if ARCH_R8A77950 || ARCH_R8A7795
- select PINCTRL_PFC_R8A77951 if ARCH_R8A77951 || ARCH_R8A7795
+ select PINCTRL_PFC_R8A77950 if ARCH_R8A77950
+ select PINCTRL_PFC_R8A77951 if ARCH_R8A77951
select PINCTRL_PFC_R8A77960 if ARCH_R8A77960
select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 82209116955b..a2e19efa26e3 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -726,6 +726,27 @@ static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
#ifdef DEBUG
+#define SH_PFC_MAX_REGS 300
+#define SH_PFC_MAX_ENUMS 3000
+
+static unsigned int sh_pfc_errors __initdata = 0;
+static unsigned int sh_pfc_warnings __initdata = 0;
+static u32 *sh_pfc_regs __initdata = NULL;
+static u32 sh_pfc_num_regs __initdata = 0;
+static u16 *sh_pfc_enums __initdata = NULL;
+static u32 sh_pfc_num_enums __initdata = 0;
+
+#define sh_pfc_err(fmt, ...) \
+ do { \
+ pr_err("%s: " fmt, drvname, ##__VA_ARGS__); \
+ sh_pfc_errors++; \
+ } while (0)
+#define sh_pfc_warn(fmt, ...) \
+ do { \
+ pr_warn("%s: " fmt, drvname, ##__VA_ARGS__); \
+ sh_pfc_warnings++; \
+ } while (0)
+
static bool __init is0s(const u16 *enum_ids, unsigned int n)
{
unsigned int i;
@@ -737,77 +758,181 @@ static bool __init is0s(const u16 *enum_ids, unsigned int n)
return true;
}
-static unsigned int sh_pfc_errors __initdata = 0;
-static unsigned int sh_pfc_warnings __initdata = 0;
+static bool __init same_name(const char *a, const char *b)
+{
+ if (!a || !b)
+ return false;
+
+ return !strcmp(a, b);
+}
+
+static void __init sh_pfc_check_reg(const char *drvname, u32 reg)
+{
+ unsigned int i;
+
+ for (i = 0; i < sh_pfc_num_regs; i++)
+ if (reg == sh_pfc_regs[i]) {
+ sh_pfc_err("reg 0x%x conflict\n", reg);
+ return;
+ }
+
+ if (sh_pfc_num_regs == SH_PFC_MAX_REGS) {
+ pr_warn_once("%s: Please increase SH_PFC_MAX_REGS\n", drvname);
+ return;
+ }
+
+ sh_pfc_regs[sh_pfc_num_regs++] = reg;
+}
+
+static int __init sh_pfc_check_enum(const char *drvname, u16 enum_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sh_pfc_num_enums; i++) {
+ if (enum_id == sh_pfc_enums[i])
+ return -EINVAL;
+ }
+
+ if (sh_pfc_num_enums == SH_PFC_MAX_ENUMS) {
+ pr_warn_once("%s: Please increase SH_PFC_MAX_ENUMS\n", drvname);
+ return 0;
+ }
+
+ sh_pfc_enums[sh_pfc_num_enums++] = enum_id;
+ return 0;
+}
+
+static void __init sh_pfc_check_reg_enums(const char *drvname, u32 reg,
+ const u16 *enums, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ if (enums[i] && sh_pfc_check_enum(drvname, enums[i]))
+ sh_pfc_err("reg 0x%x enum_id %u conflict\n", reg,
+ enums[i]);
+ }
+}
+
+static void __init sh_pfc_check_pin(const struct sh_pfc_soc_info *info,
+ u32 reg, unsigned int pin)
+{
+ const char *drvname = info->name;
+ unsigned int i;
+
+ if (pin == SH_PFC_PIN_NONE)
+ return;
+
+ for (i = 0; i < info->nr_pins; i++) {
+ if (pin == info->pins[i].pin)
+ return;
+ }
+
+ sh_pfc_err("reg 0x%x: pin %u not found\n", reg, pin);
+}
static void __init sh_pfc_check_cfg_reg(const char *drvname,
const struct pinmux_cfg_reg *cfg_reg)
{
unsigned int i, n, rw, fw;
+ sh_pfc_check_reg(drvname, cfg_reg->reg);
+
if (cfg_reg->field_width) {
- /* Checked at build time */
- return;
+ n = cfg_reg->reg_width / cfg_reg->field_width;
+ /* Skip field checks (done at build time) */
+ goto check_enum_ids;
}
for (i = 0, n = 0, rw = 0; (fw = cfg_reg->var_field_width[i]); i++) {
- if (fw > 3 && is0s(&cfg_reg->enum_ids[n], 1 << fw)) {
- pr_warn("%s: reg 0x%x: reserved field [%u:%u] can be split to reduce table size\n",
- drvname, cfg_reg->reg, rw, rw + fw - 1);
- sh_pfc_warnings++;
- }
+ if (fw > 3 && is0s(&cfg_reg->enum_ids[n], 1 << fw))
+ sh_pfc_warn("reg 0x%x: reserved field [%u:%u] can be split to reduce table size\n",
+ cfg_reg->reg, rw, rw + fw - 1);
n += 1 << fw;
rw += fw;
}
- if (rw != cfg_reg->reg_width) {
- pr_err("%s: reg 0x%x: var_field_width declares %u instead of %u bits\n",
- drvname, cfg_reg->reg, rw, cfg_reg->reg_width);
- sh_pfc_errors++;
- }
+ if (rw != cfg_reg->reg_width)
+ sh_pfc_err("reg 0x%x: var_field_width declares %u instead of %u bits\n",
+ cfg_reg->reg, rw, cfg_reg->reg_width);
+
+ if (n != cfg_reg->nr_enum_ids)
+ sh_pfc_err("reg 0x%x: enum_ids[] has %u instead of %u values\n",
+ cfg_reg->reg, cfg_reg->nr_enum_ids, n);
+
+check_enum_ids:
+ sh_pfc_check_reg_enums(drvname, cfg_reg->reg, cfg_reg->enum_ids, n);
+}
+
+static void __init sh_pfc_check_drive_reg(const struct sh_pfc_soc_info *info,
+ const struct pinmux_drive_reg *drive)
+{
+ const char *drvname = info->name;
+ unsigned long seen = 0, mask;
+ unsigned int i;
+
+ sh_pfc_check_reg(info->name, drive->reg);
+ for (i = 0; i < ARRAY_SIZE(drive->fields); i++) {
+ const struct pinmux_drive_reg_field *field = &drive->fields[i];
+
+ if (!field->pin && !field->offset && !field->size)
+ continue;
+
+ mask = GENMASK(field->offset + field->size, field->offset);
+ if (mask & seen)
+ sh_pfc_err("drive_reg 0x%x: field %u overlap\n",
+ drive->reg, i);
+ seen |= mask;
- if (n != cfg_reg->nr_enum_ids) {
- pr_err("%s: reg 0x%x: enum_ids[] has %u instead of %u values\n",
- drvname, cfg_reg->reg, cfg_reg->nr_enum_ids, n);
- sh_pfc_errors++;
+ sh_pfc_check_pin(info, drive->reg, field->pin);
}
}
+static void __init sh_pfc_check_bias_reg(const struct sh_pfc_soc_info *info,
+ const struct pinmux_bias_reg *bias)
+{
+ unsigned int i;
+
+ sh_pfc_check_reg(info->name, bias->puen);
+ if (bias->pud)
+ sh_pfc_check_reg(info->name, bias->pud);
+ for (i = 0; i < ARRAY_SIZE(bias->pins); i++)
+ sh_pfc_check_pin(info, bias->puen, bias->pins[i]);
+}
+
static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
{
- const struct sh_pfc_function *func;
const char *drvname = info->name;
unsigned int *refcnts;
unsigned int i, j, k;
pr_info("Checking %s\n", drvname);
+ sh_pfc_num_regs = 0;
+ sh_pfc_num_enums = 0;
/* Check pins */
for (i = 0; i < info->nr_pins; i++) {
+ const struct sh_pfc_pin *pin = &info->pins[i];
+
+ if (!pin->name) {
+ sh_pfc_err("empty pin %u\n", i);
+ continue;
+ }
for (j = 0; j < i; j++) {
- if (!strcmp(info->pins[i].name, info->pins[j].name)) {
- pr_err("%s: pin %s/%s: name conflict\n",
- drvname, info->pins[i].name,
- info->pins[j].name);
- sh_pfc_errors++;
- }
+ const struct sh_pfc_pin *pin2 = &info->pins[j];
- if (info->pins[i].pin != (u16)-1 &&
- info->pins[i].pin == info->pins[j].pin) {
- pr_err("%s: pin %s/%s: pin %u conflict\n",
- drvname, info->pins[i].name,
- info->pins[j].name, info->pins[i].pin);
- sh_pfc_errors++;
- }
+ if (same_name(pin->name, pin2->name))
+ sh_pfc_err("pin %s: name conflict\n",
+ pin->name);
- if (info->pins[i].enum_id &&
- info->pins[i].enum_id == info->pins[j].enum_id) {
- pr_err("%s: pin %s/%s: enum_id %u conflict\n",
- drvname, info->pins[i].name,
- info->pins[j].name,
- info->pins[i].enum_id);
- sh_pfc_errors++;
- }
+ if (pin->pin != (u16)-1 && pin->pin == pin2->pin)
+ sh_pfc_err("pin %s/%s: pin %u conflict\n",
+ pin->name, pin2->name, pin->pin);
+
+ if (pin->enum_id && pin->enum_id == pin2->enum_id)
+ sh_pfc_err("pin %s/%s: enum_id %u conflict\n",
+ pin->name, pin2->name,
+ pin->enum_id);
}
}
@@ -817,45 +942,49 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
return;
for (i = 0; i < info->nr_functions; i++) {
- func = &info->functions[i];
+ const struct sh_pfc_function *func = &info->functions[i];
+
if (!func->name) {
- pr_err("%s: empty function %u\n", drvname, i);
- sh_pfc_errors++;
+ sh_pfc_err("empty function %u\n", i);
continue;
}
+ for (j = 0; j < i; j++) {
+ if (same_name(func->name, info->functions[j].name))
+ sh_pfc_err("function %s: name conflict\n",
+ func->name);
+ }
for (j = 0; j < func->nr_groups; j++) {
for (k = 0; k < info->nr_groups; k++) {
- if (info->groups[k].name &&
- !strcmp(func->groups[j],
- info->groups[k].name)) {
+ if (same_name(func->groups[j],
+ info->groups[k].name)) {
refcnts[k]++;
break;
}
}
- if (k == info->nr_groups) {
- pr_err("%s: function %s: group %s not found\n",
- drvname, func->name, func->groups[j]);
- sh_pfc_errors++;
- }
+ if (k == info->nr_groups)
+ sh_pfc_err("function %s: group %s not found\n",
+ func->name, func->groups[j]);
}
}
for (i = 0; i < info->nr_groups; i++) {
- if (!info->groups[i].name) {
- pr_err("%s: empty group %u\n", drvname, i);
- sh_pfc_errors++;
+ const struct sh_pfc_pin_group *group = &info->groups[i];
+
+ if (!group->name) {
+ sh_pfc_err("empty group %u\n", i);
continue;
}
- if (!refcnts[i]) {
- pr_err("%s: orphan group %s\n", drvname,
- info->groups[i].name);
- sh_pfc_errors++;
- } else if (refcnts[i] > 1) {
- pr_warn("%s: group %s referenced by %u functions\n",
- drvname, info->groups[i].name, refcnts[i]);
- sh_pfc_warnings++;
+ for (j = 0; j < i; j++) {
+ if (same_name(group->name, info->groups[j].name))
+ sh_pfc_err("group %s: name conflict\n",
+ group->name);
}
+ if (!refcnts[i])
+ sh_pfc_err("orphan group %s\n", group->name);
+ else if (refcnts[i] > 1)
+ sh_pfc_warn("group %s referenced by %u functions\n",
+ group->name, refcnts[i]);
}
kfree(refcnts);
@@ -863,12 +992,62 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info)
/* Check config register descriptions */
for (i = 0; info->cfg_regs && info->cfg_regs[i].reg; i++)
sh_pfc_check_cfg_reg(drvname, &info->cfg_regs[i]);
+
+ /* Check drive strength registers */
+ for (i = 0; info->drive_regs && info->drive_regs[i].reg; i++)
+ sh_pfc_check_drive_reg(info, &info->drive_regs[i]);
+
+ /* Check bias registers */
+ for (i = 0; info->bias_regs && info->bias_regs[i].puen; i++)
+ sh_pfc_check_bias_reg(info, &info->bias_regs[i]);
+
+ /* Check ioctrl registers */
+ for (i = 0; info->ioctrl_regs && info->ioctrl_regs[i].reg; i++)
+ sh_pfc_check_reg(drvname, info->ioctrl_regs[i].reg);
+
+ /* Check data registers */
+ for (i = 0; info->data_regs && info->data_regs[i].reg; i++) {
+ sh_pfc_check_reg(drvname, info->data_regs[i].reg);
+ sh_pfc_check_reg_enums(drvname, info->data_regs[i].reg,
+ info->data_regs[i].enum_ids,
+ info->data_regs[i].reg_width);
+ }
+
+#ifdef CONFIG_PINCTRL_SH_FUNC_GPIO
+ /* Check function GPIOs */
+ for (i = 0; i < info->nr_func_gpios; i++) {
+ const struct pinmux_func *func = &info->func_gpios[i];
+
+ if (!func->name) {
+ sh_pfc_err("empty function gpio %u\n", i);
+ continue;
+ }
+ for (j = 0; j < i; j++) {
+ if (same_name(func->name, info->func_gpios[j].name))
+ sh_pfc_err("func_gpio %s: name conflict\n",
+ func->name);
+ }
+ if (sh_pfc_check_enum(drvname, func->enum_id))
+ sh_pfc_err("%s enum_id %u conflict\n", func->name,
+ func->enum_id);
+ }
+#endif
}
static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
{
unsigned int i;
+ sh_pfc_regs = kcalloc(SH_PFC_MAX_REGS, sizeof(*sh_pfc_regs),
+ GFP_KERNEL);
+ if (!sh_pfc_regs)
+ return;
+
+ sh_pfc_enums = kcalloc(SH_PFC_MAX_ENUMS, sizeof(*sh_pfc_enums),
+ GFP_KERNEL);
+ if (!sh_pfc_enums)
+ goto free_regs;
+
pr_warn("Checking builtin pinmux tables\n");
for (i = 0; pdrv->id_table[i].name[0]; i++)
@@ -881,6 +1060,10 @@ static void __init sh_pfc_check_driver(const struct platform_driver *pdrv)
pr_warn("Detected %u errors and %u warnings\n", sh_pfc_errors,
sh_pfc_warnings);
+
+ kfree(sh_pfc_enums);
+free_regs:
+ kfree(sh_pfc_regs);
}
#else /* !DEBUG */
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 8213e118aa40..9c6e931ae766 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -205,14 +205,11 @@ static int gpio_pin_to_irq(struct gpio_chip *gc, unsigned offset)
for (k = 0; gpios[k] >= 0; k++) {
if (gpios[k] == offset)
- goto found;
+ return pfc->irqs[i];
}
}
return 0;
-
-found:
- return pfc->irqs[i];
}
static int gpio_pin_setup(struct sh_pfc_chip *chip)
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index b1a9611f46b3..50df9e084414 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -352,7 +352,7 @@ struct atlas7_gpio_chip {
int nbank;
raw_spinlock_t lock;
struct gpio_chip chip;
- struct atlas7_gpio_bank banks[0];
+ struct atlas7_gpio_bank banks[];
};
/**
diff --git a/drivers/pinctrl/sprd/Kconfig b/drivers/pinctrl/sprd/Kconfig
index b6c5479b58fb..eef35d01b770 100644
--- a/drivers/pinctrl/sprd/Kconfig
+++ b/drivers/pinctrl/sprd/Kconfig
@@ -4,9 +4,7 @@
#
config PINCTRL_SPRD
- bool "Spreadtrum pinctrl driver"
- depends on OF
- depends on ARCH_SPRD || COMPILE_TEST
+ tristate
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -15,7 +13,9 @@ config PINCTRL_SPRD
Say Y here to enable Spreadtrum pinctrl driver
config PINCTRL_SPRD_SC9860
- bool "Spreadtrum SC9860 pinctrl driver"
- depends on PINCTRL_SPRD
+ tristate "Spreadtrum SC9860 pinctrl driver"
+ depends on OF
+ depends on ARCH_SPRD || COMPILE_TEST
+ select PINCTRL_SPRD
help
Say Y here to enable Spreadtrum SC9860 pinctrl driver
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 157712ab05a8..48cbf2a2837f 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -464,9 +464,15 @@ static int sprd_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin_id,
case PIN_CONFIG_INPUT_ENABLE:
arg = (reg >> SLEEP_INPUT_SHIFT) & SLEEP_INPUT_MASK;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_OUTPUT_ENABLE:
arg = reg & SLEEP_OUTPUT_MASK;
break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ if ((reg & SLEEP_OUTPUT) || (reg & SLEEP_INPUT))
+ return -EINVAL;
+
+ arg = 1;
+ break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg = (reg >> DRIVE_STRENGTH_SHIFT) &
DRIVE_STRENGTH_MASK;
@@ -635,13 +641,23 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
shift = SLEEP_INPUT_SHIFT;
}
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_OUTPUT_ENABLE:
if (is_sleep_config == true) {
- val |= SLEEP_OUTPUT;
+ if (arg > 0)
+ val |= SLEEP_OUTPUT;
+ else
+ val &= ~SLEEP_OUTPUT;
+
mask = SLEEP_OUTPUT_MASK;
shift = SLEEP_OUTPUT_SHIFT;
}
break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ if (is_sleep_config == true) {
+ val = shift = 0;
+ mask = SLEEP_OUTPUT | SLEEP_INPUT;
+ }
+ break;
case PIN_CONFIG_DRIVE_STRENGTH:
if (arg < 2 || arg > 60)
return -EINVAL;
@@ -1090,6 +1106,7 @@ int sprd_pinctrl_core_probe(struct platform_device *pdev,
return 0;
}
+EXPORT_SYMBOL_GPL(sprd_pinctrl_core_probe);
int sprd_pinctrl_remove(struct platform_device *pdev)
{
@@ -1098,6 +1115,7 @@ int sprd_pinctrl_remove(struct platform_device *pdev)
pinctrl_unregister(sprd_pctl->pctl);
return 0;
}
+EXPORT_SYMBOL_GPL(sprd_pinctrl_remove);
void sprd_pinctrl_shutdown(struct platform_device *pdev)
{
@@ -1112,6 +1130,7 @@ void sprd_pinctrl_shutdown(struct platform_device *pdev)
return;
pinctrl_select_state(pinctl, state);
}
+EXPORT_SYMBOL_GPL(sprd_pinctrl_shutdown);
MODULE_DESCRIPTION("SPREADTRUM Pin Controller Driver");
MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index af3b24f26ff2..a657cd829ce6 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -284,9 +284,9 @@ static int stm32_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
stm32_pmx_get_mode(bank, pin, &mode, &alt);
if ((alt == 0) && (mode == 0))
- ret = 1;
+ ret = GPIO_LINE_DIRECTION_IN;
else if ((alt == 0) && (mode == 1))
- ret = 0;
+ ret = GPIO_LINE_DIRECTION_OUT;
else
ret = -EINVAL;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index b35c3245ab3f..8e792f8e2dc9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/export.h>
@@ -1058,6 +1059,14 @@ static void sunxi_pinctrl_irq_ack_unmask(struct irq_data *d)
sunxi_pinctrl_irq_unmask(d);
}
+static int sunxi_pinctrl_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ u8 bank = d->hwirq / IRQ_PER_BANK;
+
+ return irq_set_irq_wake(pctl->irq[bank], on);
+}
+
static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
.name = "sunxi_pio_edge",
.irq_ack = sunxi_pinctrl_irq_ack,
@@ -1066,7 +1075,8 @@ static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
.irq_request_resources = sunxi_pinctrl_irq_request_resources,
.irq_release_resources = sunxi_pinctrl_irq_release_resources,
.irq_set_type = sunxi_pinctrl_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .irq_set_wake = sunxi_pinctrl_irq_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static struct irq_chip sunxi_pinctrl_level_irq_chip = {
@@ -1081,7 +1091,9 @@ static struct irq_chip sunxi_pinctrl_level_irq_chip = {
.irq_request_resources = sunxi_pinctrl_irq_request_resources,
.irq_release_resources = sunxi_pinctrl_irq_release_resources,
.irq_set_type = sunxi_pinctrl_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_EOI_THREADED |
+ .irq_set_wake = sunxi_pinctrl_irq_set_wake,
+ .flags = IRQCHIP_EOI_THREADED |
+ IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_EOI_IF_HANDLED,
};
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index cefbbb8d1a68..21661f6490d6 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -275,11 +275,57 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
+static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ const struct tegra_pingroup *group;
+ u32 value;
+
+ if (!pmx->soc->sfsel_in_mux)
+ return 0;
+
+ group = &pmx->soc->groups[offset];
+
+ if (group->mux_reg < 0 || group->sfsel_bit < 0)
+ return -EINVAL;
+
+ value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
+ value &= ~BIT(group->sfsel_bit);
+ pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
+
+ return 0;
+}
+
+static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ const struct tegra_pingroup *group;
+ u32 value;
+
+ if (!pmx->soc->sfsel_in_mux)
+ return;
+
+ group = &pmx->soc->groups[offset];
+
+ if (group->mux_reg < 0 || group->sfsel_bit < 0)
+ return;
+
+ value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
+ value |= BIT(group->sfsel_bit);
+ pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
+}
+
static const struct pinmux_ops tegra_pinmux_ops = {
.get_functions_count = tegra_pinctrl_get_funcs_count,
.get_function_name = tegra_pinctrl_get_func_name,
.get_function_groups = tegra_pinctrl_get_func_groups,
.set_mux = tegra_pinctrl_set_mux,
+ .gpio_request_enable = tegra_pinctrl_gpio_request_enable,
+ .gpio_disable_free = tegra_pinctrl_gpio_disable_free,
};
static int tegra_pinconf_reg(struct tegra_pmx *pmx,
@@ -689,12 +735,12 @@ const struct dev_pm_ops tegra_pinctrl_pm = {
.resume = &tegra_pinctrl_resume
};
-static bool gpio_node_has_range(const char *compatible)
+static bool tegra_pinctrl_gpio_node_has_range(struct tegra_pmx *pmx)
{
struct device_node *np;
bool has_prop = false;
- np = of_find_compatible_node(NULL, NULL, compatible);
+ np = of_find_compatible_node(NULL, NULL, pmx->soc->gpio_compatible);
if (!np)
return has_prop;
@@ -794,7 +840,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
tegra_pinctrl_clear_parked_bits(pmx);
- if (!gpio_node_has_range(pmx->soc->gpio_compatible))
+ if (pmx->soc->ngpios > 0 && !tegra_pinctrl_gpio_node_has_range(pmx))
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
platform_set_drvdata(pdev, pmx);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index 0fc82eea9cf1..fcad7f74c5a2 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -107,7 +107,8 @@ struct tegra_function {
* drvup, slwr, slwf, and drvtype parameters.
* @drv_bank: Drive fields register bank.
* @hsm_bit: High Speed Mode register bit.
- * @schmitt_bit: Scmitt register bit.
+ * @sfsel_bit: GPIO/SFIO selection register bit.
+ * @schmitt_bit: Schmitt register bit.
* @lpmd_bit: Low Power Mode register bit.
* @drvdn_bit: Drive Down register bit.
* @drvdn_width: Drive Down field width.
@@ -153,6 +154,7 @@ struct tegra_pingroup {
s32 ioreset_bit:6;
s32 rcv_sel_bit:6;
s32 hsm_bit:6;
+ s32 sfsel_bit:6;
s32 schmitt_bit:6;
s32 lpmd_bit:6;
s32 drvdn_bit:6;
@@ -192,6 +194,7 @@ struct tegra_pinctrl_soc_data {
bool hsm_in_mux;
bool schmitt_in_mux;
bool drvtype_in_mux;
+ bool sfsel_in_mux;
};
extern const struct dev_pm_ops tegra_pinctrl_pm;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c
index daf44cf240c9..2e0b5f7bb095 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra194.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c
@@ -24,17 +24,14 @@
/* Define unique ID for each pins */
enum pin_id {
- TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0 = 256,
- TEGRA_PIN_PEX_L5_RST_N_PGG1 = 257,
- TEGRA_PIN_NUM_GPIOS = 258,
+ TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0,
+ TEGRA_PIN_PEX_L5_RST_N_PGG1,
};
/* Table for pin descriptor */
static const struct pinctrl_pin_desc tegra194_pins[] = {
- PINCTRL_PIN(TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0,
- "TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0"),
- PINCTRL_PIN(TEGRA_PIN_PEX_L5_RST_N_PGG1,
- "TEGRA_PIN_PEX_L5_RST_N_PGG1"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L5_CLKREQ_N_PGG0, "PEX_L5_CLKREQ_N_PGG0"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L5_RST_N_PGG1, "PEX_L5_RST_N_PGG1"),
};
static const unsigned int pex_l5_clkreq_n_pgg0_pins[] = {
@@ -59,6 +56,7 @@ enum tegra_mux_dt {
{ \
.name = #lid, \
}
+
static struct tegra_function tegra194_functions[] = {
TEGRA_PIN_FUNCTION(rsvd0),
TEGRA_PIN_FUNCTION(rsvd1),
@@ -70,7 +68,7 @@ static struct tegra_function tegra194_functions[] = {
#define DRV_PINGROUP_ENTRY_Y(r, drvdn_b, drvdn_w, drvup_b, \
drvup_w, slwr_b, slwr_w, slwf_b, \
slwf_w, bank) \
- .drv_reg = ((r)), \
+ .drv_reg = ((r)), \
.drv_bank = bank, \
.drvdn_bit = drvdn_b, \
.drvdn_width = drvdn_w, \
@@ -89,7 +87,7 @@ static struct tegra_function tegra194_functions[] = {
.hsm_bit = -1, \
.mux_bank = bank, \
.mux_bit = 0, \
- .pupd_reg = ((r)), \
+ .pupd_reg = ((r)), \
.pupd_bank = bank, \
.pupd_bit = 2, \
.tri_reg = ((r)), \
@@ -97,6 +95,7 @@ static struct tegra_function tegra194_functions[] = {
.tri_bit = 4, \
.einput_bit = e_input, \
.odrain_bit = e_od, \
+ .sfsel_bit = 10, \
.schmitt_bit = schmitt_b, \
.drvtype_bit = 13, \
.drv_reg = -1, \
@@ -109,20 +108,20 @@ static struct tegra_function tegra194_functions[] = {
#define PINGROUP(pg_name, f0, f1, f2, f3, r, bank, pupd, e_lpbk, \
e_input, e_lpdr, e_od, schmitt_b, drvtype, io_rail) \
- { \
- .name = #pg_name, \
- .pins = pg_name##_pins, \
- .npins = ARRAY_SIZE(pg_name##_pins), \
- .funcs = { \
- TEGRA_MUX_##f0, \
- TEGRA_MUX_##f1, \
- TEGRA_MUX_##f2, \
- TEGRA_MUX_##f3, \
- }, \
- PIN_PINGROUP_ENTRY_Y(r, bank, pupd, e_lpbk, \
- e_input, e_od, \
- schmitt_b, drvtype), \
- drive_##pg_name, \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
+ }, \
+ PIN_PINGROUP_ENTRY_Y(r, bank, pupd, e_lpbk, \
+ e_input, e_od, \
+ schmitt_b, drvtype), \
+ drive_##pg_name, \
}
static const struct tegra_pingroup tegra194_groups[] = {
@@ -133,7 +132,6 @@ static const struct tegra_pingroup tegra194_groups[] = {
};
static const struct tegra_pinctrl_soc_data tegra194_pinctrl = {
- .ngpios = TEGRA_PIN_NUM_GPIOS,
.pins = tegra194_pins,
.npins = ARRAY_SIZE(tegra194_pins),
.functions = tegra194_functions,
@@ -143,6 +141,7 @@ static const struct tegra_pinctrl_soc_data tegra194_pinctrl = {
.hsm_in_mux = true,
.schmitt_in_mux = true,
.drvtype_in_mux = true,
+ .sfsel_in_mux = true,
};
static int tegra194_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 57babf31e320..ade348b49b31 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -29,7 +29,7 @@ struct uniphier_pinctrl_reg_region {
struct list_head node;
unsigned int base;
unsigned int nregs;
- u32 vals[0];
+ u32 vals[];
};
struct uniphier_pinctrl_priv {
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index ea910a18b4d7..65b97e240196 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -486,8 +486,10 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
u32 val;
val = readl_relaxed(data->base + reg_dir);
- /* Return 0 == output, 1 == input */
- return !(val & BIT(bit));
+ if (val & BIT(bit))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 5f4f5716c893..cc7dd4d87cce 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -19,8 +19,8 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = {
- {"INT1051", 0},
{"INT33D5", 0},
+ {"INTC1051", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index 7b23efc46a43..289c6655d425 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static bool int0002_check_wake(void *data)
+{
+ u32 gpe_sts_reg;
+
+ gpe_sts_reg = inl(GPE0A_STS_PORT);
+ return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
+}
+
static struct irq_chip int0002_byt_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
@@ -220,6 +228,7 @@ static int int0002_probe(struct platform_device *pdev)
return ret;
}
+ acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
device_init_wakeup(dev, true);
return 0;
}
@@ -227,6 +236,7 @@ static int int0002_probe(struct platform_device *pdev)
static int int0002_remove(struct platform_device *pdev)
{
device_init_wakeup(&pdev->dev, false);
+ acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
return 0;
}
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 513efe8e7628..890380302080 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -248,7 +248,7 @@ config SYSCON_REBOOT_MODE
action according to the mode.
config POWER_RESET_SC27XX
- bool "Spreadtrum SC27xx PMIC power-off driver"
+ tristate "Spreadtrum SC27xx PMIC power-off driver"
depends on MFD_SC27XX_PMIC || COMPILE_TEST
help
This driver supports powering off a system through
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index d94e3267c3b6..3ff9d93a5226 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -35,6 +35,7 @@
#define AT91_RSTC_MR 0x08 /* Reset Controller Mode Register */
#define AT91_RSTC_URSTEN BIT(0) /* User Reset Enable */
+#define AT91_RSTC_URSTASYNC BIT(2) /* User Reset Asynchronous Control */
#define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */
#define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */
@@ -49,105 +50,63 @@ enum reset_type {
RESET_TYPE_ULP2 = 8,
};
-static void __iomem *at91_ramc_base[2], *at91_rstc_base;
-static struct clk *sclk;
+struct at91_reset {
+ void __iomem *rstc_base;
+ void __iomem *ramc_base[2];
+ struct clk *sclk;
+ struct notifier_block nb;
+ u32 args;
+ u32 ramc_lpr;
+};
/*
* unless the SDRAM is cleanly shutdown before we hit the
* reset register it can be left driving the data bus and
* killing the chance of a subsequent boot from NAND
*/
-static int at91sam9260_restart(struct notifier_block *this, unsigned long mode,
- void *cmd)
+static int at91_reset(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
- asm volatile(
- /* Align to cache lines */
- ".balign 32\n\t"
-
- /* Disable SDRAM accesses */
- "str %2, [%0, #" __stringify(AT91_SDRAMC_TR) "]\n\t"
-
- /* Power down SDRAM */
- "str %3, [%0, #" __stringify(AT91_SDRAMC_LPR) "]\n\t"
-
- /* Reset CPU */
- "str %4, [%1, #" __stringify(AT91_RSTC_CR) "]\n\t"
-
- "b .\n\t"
- :
- : "r" (at91_ramc_base[0]),
- "r" (at91_rstc_base),
- "r" (1),
- "r" cpu_to_le32(AT91_SDRAMC_LPCB_POWER_DOWN),
- "r" cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST));
+ struct at91_reset *reset = container_of(this, struct at91_reset, nb);
- return NOTIFY_DONE;
-}
-
-static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode,
- void *cmd)
-{
asm volatile(
- /*
- * Test wether we have a second RAM controller to care
- * about.
- *
- * First, test that we can dereference the virtual address.
- */
- "cmp %1, #0\n\t"
- "beq 1f\n\t"
-
- /* Then, test that the RAM controller is enabled */
- "ldr r0, [%1]\n\t"
- "cmp r0, #0\n\t"
-
/* Align to cache lines */
".balign 32\n\t"
/* Disable SDRAM0 accesses */
- "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
+ " tst %0, #0\n\t"
+ " beq 1f\n\t"
+ " str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
/* Power down SDRAM0 */
- " str %4, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+ " str %4, [%0, %6]\n\t"
/* Disable SDRAM1 accesses */
+ "1: tst %1, #0\n\t"
+ " beq 2f\n\t"
" strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
/* Power down SDRAM1 */
- " strne %4, [%1, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+ " strne %4, [%1, %6]\n\t"
/* Reset CPU */
- " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
+ "2: str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
" b .\n\t"
:
- : "r" (at91_ramc_base[0]),
- "r" (at91_ramc_base[1]),
- "r" (at91_rstc_base),
+ : "r" (reset->ramc_base[0]),
+ "r" (reset->ramc_base[1]),
+ "r" (reset->rstc_base),
"r" (1),
"r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN),
- "r" cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST)
- : "r0");
-
- return NOTIFY_DONE;
-}
-
-static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
- void *cmd)
-{
- writel(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
- at91_rstc_base);
+ "r" (reset->args),
+ "r" (reset->ramc_lpr)
+ : "r4");
return NOTIFY_DONE;
}
-static int samx7_restart(struct notifier_block *this, unsigned long mode,
- void *cmd)
-{
- writel(AT91_RSTC_KEY | AT91_RSTC_PROCRST, at91_rstc_base);
- return NOTIFY_DONE;
-}
-
-static void __init at91_reset_status(struct platform_device *pdev)
+static void __init at91_reset_status(struct platform_device *pdev,
+ void __iomem *base)
{
const char *reason;
- u32 reg = readl(at91_rstc_base + AT91_RSTC_SR);
+ u32 reg = readl(base + AT91_RSTC_SR);
switch ((reg & AT91_RSTC_RSTTYP) >> 8) {
case RESET_TYPE_GENERAL:
@@ -183,42 +142,68 @@ static void __init at91_reset_status(struct platform_device *pdev)
}
static const struct of_device_id at91_ramc_of_match[] = {
- { .compatible = "atmel,at91sam9260-sdramc", },
- { .compatible = "atmel,at91sam9g45-ddramc", },
+ {
+ .compatible = "atmel,at91sam9260-sdramc",
+ .data = (void *)AT91_SDRAMC_LPR,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-ddramc",
+ .data = (void *)AT91_DDRSDRC_LPR,
+ },
{ /* sentinel */ }
};
static const struct of_device_id at91_reset_of_match[] = {
- { .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart },
- { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart },
- { .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
- { .compatible = "atmel,samx7-rstc", .data = samx7_restart },
- { .compatible = "microchip,sam9x60-rstc", .data = samx7_restart },
+ {
+ .compatible = "atmel,at91sam9260-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
+ AT91_RSTC_PROCRST),
+ },
+ {
+ .compatible = "atmel,at91sam9g45-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
+ AT91_RSTC_PROCRST)
+ },
+ {
+ .compatible = "atmel,sama5d3-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST |
+ AT91_RSTC_PROCRST)
+ },
+ {
+ .compatible = "atmel,samx7-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ },
+ {
+ .compatible = "microchip,sam9x60-rstc",
+ .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST)
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_reset_of_match);
-static struct notifier_block at91_restart_nb = {
- .priority = 192,
-};
-
static int __init at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
+ struct at91_reset *reset;
struct device_node *np;
int ret, idx = 0;
- at91_rstc_base = of_iomap(pdev->dev.of_node, 0);
- if (!at91_rstc_base) {
+ reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->rstc_base = of_iomap(pdev->dev.of_node, 0);
+ if (!reset->rstc_base) {
dev_err(&pdev->dev, "Could not map reset controller address\n");
return -ENODEV;
}
if (!of_device_is_compatible(pdev->dev.of_node, "atmel,sama5d3-rstc")) {
/* we need to shutdown the ddr controller, so get ramc base */
- for_each_matching_node(np, at91_ramc_of_match) {
- at91_ramc_base[idx] = of_iomap(np, 0);
- if (!at91_ramc_base[idx]) {
+ for_each_matching_node_and_match(np, at91_ramc_of_match, &match) {
+ reset->ramc_lpr = (u32)match->data;
+ reset->ramc_base[idx] = of_iomap(np, 0);
+ if (!reset->ramc_base[idx]) {
dev_err(&pdev->dev, "Could not map ram controller address\n");
of_node_put(np);
return -ENODEV;
@@ -228,33 +213,46 @@ static int __init at91_reset_probe(struct platform_device *pdev)
}
match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
- at91_restart_nb.notifier_call = match->data;
+ reset->nb.notifier_call = at91_reset;
+ reset->nb.priority = 192;
+ reset->args = (u32)match->data;
- sclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sclk))
- return PTR_ERR(sclk);
+ reset->sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(reset->sclk))
+ return PTR_ERR(reset->sclk);
- ret = clk_prepare_enable(sclk);
+ ret = clk_prepare_enable(reset->sclk);
if (ret) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
return ret;
}
- ret = register_restart_handler(&at91_restart_nb);
+ platform_set_drvdata(pdev, reset);
+
+ if (of_device_is_compatible(pdev->dev.of_node, "microchip,sam9x60-rstc")) {
+ u32 val = readl(reset->rstc_base + AT91_RSTC_MR);
+
+ writel(AT91_RSTC_KEY | AT91_RSTC_URSTASYNC | val,
+ reset->rstc_base + AT91_RSTC_MR);
+ }
+
+ ret = register_restart_handler(&reset->nb);
if (ret) {
- clk_disable_unprepare(sclk);
+ clk_disable_unprepare(reset->sclk);
return ret;
}
- at91_reset_status(pdev);
+ at91_reset_status(pdev, reset->rstc_base);
return 0;
}
static int __exit at91_reset_remove(struct platform_device *pdev)
{
- unregister_restart_handler(&at91_restart_nb);
- clk_disable_unprepare(sclk);
+ struct at91_reset *reset = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&reset->nb);
+ clk_disable_unprepare(reset->sclk);
return 0;
}
diff --git a/drivers/power/reset/sc27xx-poweroff.c b/drivers/power/reset/sc27xx-poweroff.c
index 29fb08b8faa0..90287c31992c 100644
--- a/drivers/power/reset/sc27xx-poweroff.c
+++ b/drivers/power/reset/sc27xx-poweroff.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
@@ -13,6 +14,8 @@
#define SC27XX_PWR_PD_HW 0xc2c
#define SC27XX_PWR_OFF_EN BIT(0)
+#define SC27XX_SLP_CTRL 0xdf0
+#define SC27XX_LDO_XTL_EN BIT(3)
static struct regmap *regmap;
@@ -27,10 +30,13 @@ static struct regmap *regmap;
*/
static void sc27xx_poweroff_shutdown(void)
{
-#ifdef CONFIG_PM_SLEEP_SMP
- int cpu = smp_processor_id();
+#ifdef CONFIG_HOTPLUG_CPU
+ int cpu;
- freeze_secondary_cpus(cpu);
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id())
+ remove_cpu(cpu);
+ }
#endif
}
@@ -40,6 +46,9 @@ static struct syscore_ops poweroff_syscore_ops = {
static void sc27xx_poweroff_do_poweroff(void)
{
+ /* Disable the external subsys connection's power firstly */
+ regmap_write(regmap, SC27XX_SLP_CTRL, SC27XX_LDO_XTL_EN);
+
regmap_write(regmap, SC27XX_PWR_PD_HW, SC27XX_PWR_OFF_EN);
}
@@ -63,4 +72,8 @@ static struct platform_driver sc27xx_poweroff_driver = {
.name = "sc27xx-poweroff",
},
};
-builtin_platform_driver(sc27xx_poweroff_driver);
+module_platform_driver(sc27xx_poweroff_driver);
+
+MODULE_DESCRIPTION("Power off driver for SC27XX PMIC Device");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 9a5591ab90d0..195bc0462d3e 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -480,7 +480,7 @@ config CHARGER_GPIO
called gpio-charger.
config CHARGER_MANAGER
- bool "Battery charger manager for multiple chargers"
+ tristate "Battery charger manager for multiple chargers"
depends on REGULATOR
select EXTCON
help
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index f69550d64f09..9469fe182d02 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -404,7 +404,7 @@ disable_otp:
}
/**
- * ab8500_power_supply_changed - a wrapper with local extentions for
+ * ab8500_power_supply_changed - a wrapper with local extensions for
* power_supply_changed
* @di: pointer to the ab8500_charger structure
* @psy: pointer to power_supply_that have changed.
@@ -683,7 +683,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
/*
* Platform only supports USB 2.0.
* This means that charging current from USB source
- * is maximum 500 mA. Every occurence of USB_STAT_*_HOST_*
+ * is maximum 500 mA. Every occurrence of USB_STAT_*_HOST_*
* should set USB_CH_IP_CUR_LVL_0P5.
*/
@@ -1379,13 +1379,13 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
/*
* Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
- * will be triggered everytime we enable the VDD ADC supply.
+ * will be triggered every time we enable the VDD ADC supply.
* This will turn off charging for a short while.
* It can be avoided by having the supply on when
* there is a charger enabled. Normally the VDD ADC supply
- * is enabled everytime a GPADC conversion is triggered. We will
- * force it to be enabled from this driver to have
- * the GPADC module independant of the AB8500 chargers
+ * is enabled every time a GPADC conversion is triggered.
+ * We will force it to be enabled from this driver to have
+ * the GPADC module independent of the AB8500 chargers
*/
if (!di->vddadc_en_ac) {
ret = regulator_enable(di->regu);
@@ -1455,7 +1455,7 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
if (is_ab8500_1p1_or_earlier(di->parent)) {
/*
* For ABB revision 1.0 and 1.1 there is a bug in the
- * watchdog logic. That means we have to continously
+ * watchdog logic. That means we have to continuously
* kick the charger watchdog even when no charger is
* connected. This is only valid once the AC charger
* has been enabled. This is a bug that is not handled
@@ -1552,13 +1552,13 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
/*
* Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
- * will be triggered everytime we enable the VDD ADC supply.
+ * will be triggered every time we enable the VDD ADC supply.
* This will turn off charging for a short while.
* It can be avoided by having the supply on when
* there is a charger enabled. Normally the VDD ADC supply
- * is enabled everytime a GPADC conversion is triggered. We will
- * force it to be enabled from this driver to have
- * the GPADC module independant of the AB8500 chargers
+ * is enabled every time a GPADC conversion is triggered.
+ * We will force it to be enabled from this driver to have
+ * the GPADC module independent of the AB8500 chargers
*/
if (!di->vddadc_en_usb) {
ret = regulator_enable(di->regu);
@@ -1582,7 +1582,10 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
return -ENXIO;
}
- /* ChVoltLevel: max voltage upto which battery can be charged */
+ /*
+ * ChVoltLevel: max voltage up to which battery can be
+ * charged
+ */
ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
if (ret) {
@@ -2014,7 +2017,7 @@ static void ab8500_charger_check_hw_failure_work(struct work_struct *work)
* Work queue function for kicking the charger watchdog.
*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
- * logic. That means we have to continously kick the charger
+ * logic. That means we have to continuously kick the charger
* watchdog even when no charger is connected. This is only
* valid once the AC charger has been enabled. This is
* a bug that is not handled by the algorithm and the
@@ -2262,7 +2265,7 @@ static void ab8500_charger_usb_link_status_work(struct work_struct *work)
* Some chargers that breaks the USB spec is
* identified as invalid by AB8500 and it refuse
* to start the charging process. but by jumping
- * thru a few hoops it can be forced to start.
+ * through a few hoops it can be forced to start.
*/
if (is_ab8500(di->parent))
ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
@@ -3214,7 +3217,7 @@ static int ab8500_charger_resume(struct platform_device *pdev)
/*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
- * logic. That means we have to continously kick the charger
+ * logic. That means we have to continuously kick the charger
* watchdog even when no charger is connected. This is only
* valid once the AC charger has been enabled. This is
* a bug that is not handled by the algorithm and the
@@ -3483,7 +3486,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
- * logic. That means we have to continously kick the charger
+ * logic. That means we have to continuously kick the charger
* watchdog even when no charger is connected. This is only
* valid once the AC charger has been enabled. This is
* a bug that is not handled by the algorithm and the
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 1bbba6bba673..cf4c67b2d235 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -21,6 +21,7 @@
#include <linux/property.h>
#include <linux/mfd/axp20x.h>
#include <linux/extcon.h>
+#include <linux/dmi.h>
#define PS_STAT_VBUS_TRIGGER BIT(0)
#define PS_STAT_BAT_CHRG_DIR BIT(2)
@@ -545,6 +546,49 @@ out:
return IRQ_HANDLED;
}
+/*
+ * The HP Pavilion x2 10 series comes in a number of variants:
+ * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D"
+ * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
+ * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4"
+ *
+ * The variants with the AXP288 PMIC are all kinds of special:
+ *
+ * 1. All variants use a Type-C connector which the AXP288 does not support, so
+ * when using a Type-C charger it is not recognized. Unlike most AXP288 devices,
+ * this model actually has mostly working ACPI AC / Battery code, the ACPI code
+ * "solves" this by simply setting the input_current_limit to 3A.
+ * There are still some issues with the ACPI code, so we use this native driver,
+ * and to solve the charging not working (500mA is not enough) issue we hardcode
+ * the 3A input_current_limit like the ACPI code does.
+ *
+ * 2. If no charger is connected the machine boots with the vbus-path disabled.
+ * Normally this is done when a 5V boost converter is active to avoid the PMIC
+ * trying to charge from the 5V boost converter's output. This is done when
+ * an OTG host cable is inserted and the ID pin on the micro-B receptacle is
+ * pulled low and the ID pin has an ACPI event handler associated with it
+ * which re-enables the vbus-path when the ID pin is pulled high when the
+ * OTG host cable is removed. The Type-C connector has no ID pin, there is
+ * no ID pin handler and there appears to be no 5V boost converter, so we
+ * end up not charging because the vbus-path is disabled, until we unplug
+ * the charger which automatically clears the vbus-path disable bit and then
+ * on the second plug-in of the adapter we start charging. To solve the not
+ * charging on first charger plugin we unconditionally enable the vbus-path at
+ * probe on this model, which is safe since there is no 5V boost converter.
+ */
+static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
+ {
+ /*
+ * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
+ * Trail model has "HP", so we only match on product_name.
+ */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ },
+ },
+ {} /* Terminating entry */
+};
+
static void axp288_charger_extcon_evt_worker(struct work_struct *work)
{
struct axp288_chrg_info *info =
@@ -568,7 +612,11 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
}
/* Determine cable/charger type */
- if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
+ if (dmi_check_system(axp288_hp_x2_dmi_ids)) {
+ /* See comment above axp288_hp_x2_dmi_ids declaration */
+ dev_dbg(&info->pdev->dev, "HP X2 with Type-C, setting inlmt to 3A\n");
+ current_limit = 3000000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n");
current_limit = 500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
@@ -685,6 +733,13 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
return ret;
}
+ if (dmi_check_system(axp288_hp_x2_dmi_ids)) {
+ /* See comment above axp288_hp_x2_dmi_ids declaration */
+ ret = axp288_charger_vbus_path_select(info, true);
+ if (ret < 0)
+ return ret;
+ }
+
/* Read current charge voltage and current limit */
ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val);
if (ret < 0) {
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index e1bc4e6e6f30..f40fa0e63b6e 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -706,14 +706,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
{
/* Intel Cherry Trail Compute Stick, Windows version */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
},
},
{
/* Intel Cherry Trail Compute Stick, version without an OS */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
},
},
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 195c18c2f426..664e50103eaa 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1885,7 +1885,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
if (IS_ERR(di->bat)) {
- dev_err(di->dev, "failed to register battery\n");
+ if (PTR_ERR(di->bat) == -EPROBE_DEFER)
+ dev_dbg(di->dev, "failed to register battery, deferring probe\n");
+ else
+ dev_err(di->dev, "failed to register battery\n");
return PTR_ERR(di->bat);
}
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 2748715c4c75..dd3d93dfe3eb 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -148,7 +148,8 @@ static int ingenic_battery_probe(struct platform_device *pdev)
bat->battery = devm_power_supply_register(dev, desc, &psy_cfg);
if (IS_ERR(bat->battery)) {
- dev_err(dev, "Unable to register battery\n");
+ if (PTR_ERR(bat->battery) != -EPROBE_DEFER)
+ dev_err(dev, "Unable to register battery\n");
return PTR_ERR(bat->battery);
}
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index 469c83fdaa8e..a7c8a8453db1 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -614,6 +614,17 @@ static int sc27xx_fgu_get_property(struct power_supply *psy,
val->intval = data->total_cap * 1000;
break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ ret = sc27xx_fgu_get_clbcnt(data, &value);
+ if (ret)
+ goto error;
+
+ value = DIV_ROUND_CLOSEST(value * 10,
+ 36 * SC27XX_FGU_SAMPLE_HZ);
+ val->intval = sc27xx_fgu_adc_to_current(data, value);
+
+ break;
+
default:
ret = -EINVAL;
break;
@@ -682,6 +693,7 @@ static enum power_supply_property sc27xx_fgu_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_CALIBRATE,
+ POWER_SUPPLY_PROP_CHARGE_NOW
};
static const struct power_supply_desc sc27xx_fgu_desc = {
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 648ab80288c9..1bc49b2e12e8 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -726,10 +726,10 @@ twl4030_bci_mode_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(modes); i++)
if (mode == i)
- len += snprintf(buf+len, PAGE_SIZE-len,
+ len += scnprintf(buf+len, PAGE_SIZE-len,
"[%s] ", modes[i]);
else
- len += snprintf(buf+len, PAGE_SIZE-len,
+ len += scnprintf(buf+len, PAGE_SIZE-len,
"%s ", modes[i]);
buf[len-1] = '\n';
return len;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index ffdb5bc25d6d..fbaed079b299 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -35,7 +35,7 @@ config MTK_SCP
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
- depends on ARCH_OMAP4 || SOC_OMAP5
+ depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX
depends on OMAP_IOMMU
select MAILBOX
select OMAP2PLUS_MBOX
@@ -52,6 +52,18 @@ config OMAP_REMOTEPROC
It's safe to say N here if you're not interested in multimedia
offloading or just want a bare minimum kernel.
+config OMAP_REMOTEPROC_WATCHDOG
+ bool "OMAP remoteproc watchdog timer"
+ depends on OMAP_REMOTEPROC
+ default n
+ help
+ Say Y here to enable watchdog timer for remote processors.
+
+ This option controls the watchdog functionality for the remote
+ processors in OMAP. Dedicated OMAP DMTimers are used by the remote
+ processors and triggers the timer interrupt upon a watchdog
+ detection.
+
config WKUP_M3_RPROC
tristate "AMx3xx Wakeup M3 remoteproc support"
depends on SOC_AM33XX || SOC_AM43XX
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 3e72b6f38d4b..8957ed271d20 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -186,7 +186,7 @@ static int imx_rproc_stop(struct rproc *rproc)
}
static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
- int len, u64 *sys)
+ size_t len, u64 *sys)
{
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
int i;
@@ -203,19 +203,19 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
}
}
- dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%x\n",
+ dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%zx\n",
da, len);
return -ENOENT;
}
-static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct imx_rproc *priv = rproc->priv;
void *va = NULL;
u64 sys;
int i;
- if (len <= 0)
+ if (len == 0)
return NULL;
/*
@@ -235,7 +235,8 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
}
}
- dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va);
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n",
+ da, len, va);
return va;
}
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 5c4658f00b3d..cd266163a65f 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -246,7 +246,7 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid)
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
-static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct keystone_rproc *ksproc = rproc->priv;
void __iomem *va = NULL;
@@ -255,7 +255,7 @@ static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
size_t size;
int i;
- if (len <= 0)
+ if (len == 0)
return NULL;
for (i = 0; i < ksproc->num_mems; i++) {
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 7ccdf64ff3ea..ea3743e7e794 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -320,7 +320,7 @@ stop:
return ret;
}
-static void *scp_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
int offset;
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 6398194075aa..6955fab0a78b 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -2,7 +2,7 @@
/*
* OMAP Remote Processor driver
*
- * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011-2020 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <ohad@wizery.com>
@@ -15,31 +15,466 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk/ti.h>
#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
#include <linux/remoteproc.h>
#include <linux/mailbox_client.h>
+#include <linux/omap-iommu.h>
#include <linux/omap-mailbox.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+#include <clocksource/timer-ti-dm.h>
-#include <linux/platform_data/remoteproc-omap.h>
+#include <linux/platform_data/dmtimer-omap.h>
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
+/* default auto-suspend delay (ms) */
+#define DEFAULT_AUTOSUSPEND_DELAY 10000
+
+/**
+ * struct omap_rproc_boot_data - boot data structure for the DSP omap rprocs
+ * @syscon: regmap handle for the system control configuration module
+ * @boot_reg: boot register offset within the @syscon regmap
+ * @boot_reg_shift: bit-field shift required for the boot address value in
+ * @boot_reg
+ */
+struct omap_rproc_boot_data {
+ struct regmap *syscon;
+ unsigned int boot_reg;
+ unsigned int boot_reg_shift;
+};
+
+/**
+ * struct omap_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: bus address used to access the memory region
+ * @dev_addr: device address of the memory region from DSP view
+ * @size: size of the memory region
+ */
+struct omap_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct omap_rproc_timer - data structure for a timer used by a omap rproc
+ * @odt: timer pointer
+ * @timer_ops: OMAP dmtimer ops for @odt timer
+ * @irq: timer irq
+ */
+struct omap_rproc_timer {
+ struct omap_dm_timer *odt;
+ const struct omap_dm_timer_ops *timer_ops;
+ int irq;
+};
+
/**
* struct omap_rproc - omap remote processor state
* @mbox: mailbox channel handle
* @client: mailbox client to request the mailbox channel
+ * @boot_data: boot data structure for setting processor boot address
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @num_timers: number of rproc timer(s)
+ * @num_wd_timers: number of rproc watchdog timers
+ * @timers: timer(s) info used by rproc
+ * @autosuspend_delay: auto-suspend delay value to be used for runtime pm
+ * @need_resume: if true a resume is needed in the system resume callback
* @rproc: rproc handle
+ * @reset: reset handle
+ * @pm_comp: completion primitive to sync for suspend response
+ * @fck: functional clock for the remoteproc
+ * @suspend_acked: state machine flag to store the suspend request ack
*/
struct omap_rproc {
struct mbox_chan *mbox;
struct mbox_client client;
+ struct omap_rproc_boot_data *boot_data;
+ struct omap_rproc_mem *mem;
+ int num_mems;
+ int num_timers;
+ int num_wd_timers;
+ struct omap_rproc_timer *timers;
+ int autosuspend_delay;
+ bool need_resume;
struct rproc *rproc;
+ struct reset_control *reset;
+ struct completion pm_comp;
+ struct clk *fck;
+ bool suspend_acked;
};
/**
+ * struct omap_rproc_mem_data - memory definitions for an omap remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct omap_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct omap_rproc_dev_data - device data for the omap remote processor
+ * @device_name: device name of the remote processor
+ * @mems: memory definitions for this remote processor
+ */
+struct omap_rproc_dev_data {
+ const char *device_name;
+ const struct omap_rproc_mem_data *mems;
+};
+
+/**
+ * omap_rproc_request_timer() - request a timer for a remoteproc
+ * @dev: device requesting the timer
+ * @np: device node pointer to the desired timer
+ * @timer: handle to a struct omap_rproc_timer to return the timer handle
+ *
+ * This helper function is used primarily to request a timer associated with
+ * a remoteproc. The returned handle is stored in the .odt field of the
+ * @timer structure passed in, and is used to invoke other timer specific
+ * ops (like starting a timer either during device initialization or during
+ * a resume operation, or for stopping/freeing a timer).
+ *
+ * Return: 0 on success, otherwise an appropriate failure
+ */
+static int omap_rproc_request_timer(struct device *dev, struct device_node *np,
+ struct omap_rproc_timer *timer)
+{
+ int ret;
+
+ timer->odt = timer->timer_ops->request_by_node(np);
+ if (!timer->odt) {
+ dev_err(dev, "request for timer node %p failed\n", np);
+ return -EBUSY;
+ }
+
+ ret = timer->timer_ops->set_source(timer->odt, OMAP_TIMER_SRC_SYS_CLK);
+ if (ret) {
+ dev_err(dev, "error setting OMAP_TIMER_SRC_SYS_CLK as source for timer node %p\n",
+ np);
+ timer->timer_ops->free(timer->odt);
+ return ret;
+ }
+
+ /* clean counter, remoteproc code will set the value */
+ timer->timer_ops->set_load(timer->odt, 0);
+
+ return 0;
+}
+
+/**
+ * omap_rproc_start_timer() - start a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used to start a timer associated with a remoteproc,
+ * obtained using the request_timer ops. The helper function needs to be
+ * invoked by the driver to start the timer (during device initialization)
+ * or to just resume the timer.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_start_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->start(timer->odt);
+}
+
+/**
+ * omap_rproc_stop_timer() - stop a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used to disable a timer associated with a
+ * remoteproc, and needs to be called either during a device shutdown
+ * or suspend operation. The separate helper function allows the driver
+ * to just stop a timer without having to release the timer during a
+ * suspend operation.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_stop_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->stop(timer->odt);
+}
+
+/**
+ * omap_rproc_release_timer() - release a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used primarily to release a timer associated
+ * with a remoteproc. The dmtimer will be available for other clients to
+ * use once released.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_release_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->free(timer->odt);
+}
+
+/**
+ * omap_rproc_get_timer_irq() - get the irq for a timer
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This function is used to get the irq associated with a watchdog timer. The
+ * function is called by the OMAP remoteproc driver to register a interrupt
+ * handler to handle watchdog events on the remote processor.
+ *
+ * Return: irq id on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->get_irq(timer->odt);
+}
+
+/**
+ * omap_rproc_ack_timer_irq() - acknowledge a timer irq
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This function is used to clear the irq associated with a watchdog timer. The
+ * The function is called by the OMAP remoteproc upon a watchdog event on the
+ * remote processor to clear the interrupt status of the watchdog timer.
+ */
+static inline void omap_rproc_ack_timer_irq(struct omap_rproc_timer *timer)
+{
+ timer->timer_ops->write_status(timer->odt, OMAP_TIMER_INT_OVERFLOW);
+}
+
+/**
+ * omap_rproc_watchdog_isr() - Watchdog ISR handler for remoteproc device
+ * @irq: IRQ number associated with a watchdog timer
+ * @data: IRQ handler data
+ *
+ * This ISR routine executes the required necessary low-level code to
+ * acknowledge a watchdog timer interrupt. There can be multiple watchdog
+ * timers associated with a rproc (like IPUs which have 2 watchdog timers,
+ * one per Cortex M3/M4 core), so a lookup has to be performed to identify
+ * the timer to acknowledge its interrupt.
+ *
+ * The function also invokes rproc_report_crash to report the watchdog event
+ * to the remoteproc driver core, to trigger a recovery.
+ *
+ * Return: IRQ_HANDLED on success, otherwise IRQ_NONE
+ */
+static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
+{
+ struct rproc *rproc = data;
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc_timer *timers = oproc->timers;
+ struct omap_rproc_timer *wd_timer = NULL;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+ int i;
+
+ for (i = oproc->num_timers; i < num_timers; i++) {
+ if (timers[i].irq > 0 && irq == timers[i].irq) {
+ wd_timer = &timers[i];
+ break;
+ }
+ }
+
+ if (!wd_timer) {
+ dev_err(dev, "invalid timer\n");
+ return IRQ_NONE;
+ }
+
+ omap_rproc_ack_timer_irq(wd_timer);
+
+ rproc_report_crash(rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * omap_rproc_enable_timers() - enable the timers for a remoteproc
+ * @rproc: handle of a remote processor
+ * @configure: boolean flag used to acquire and configure the timer handle
+ *
+ * This function is used primarily to enable the timers associated with
+ * a remoteproc. The configure flag is provided to allow the driver to
+ * to either acquire and start a timer (during device initialization) or
+ * to just start a timer (during a resume operation).
+ *
+ * Return: 0 on success, otherwise an appropriate failure
+ */
+static int omap_rproc_enable_timers(struct rproc *rproc, bool configure)
+{
+ int i;
+ int ret = 0;
+ struct platform_device *tpdev;
+ struct dmtimer_platform_data *tpdata;
+ const struct omap_dm_timer_ops *timer_ops;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_timer *timers = oproc->timers;
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = NULL;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+
+ if (!num_timers)
+ return 0;
+
+ if (!configure)
+ goto start_timers;
+
+ for (i = 0; i < num_timers; i++) {
+ if (i < oproc->num_timers)
+ np = of_parse_phandle(dev->of_node, "ti,timers", i);
+ else
+ np = of_parse_phandle(dev->of_node,
+ "ti,watchdog-timers",
+ (i - oproc->num_timers));
+ if (!np) {
+ ret = -ENXIO;
+ dev_err(dev, "device node lookup for timer at index %d failed: %d\n",
+ i < oproc->num_timers ? i :
+ i - oproc->num_timers, ret);
+ goto free_timers;
+ }
+
+ tpdev = of_find_device_by_node(np);
+ if (!tpdev) {
+ ret = -ENODEV;
+ dev_err(dev, "could not get timer platform device\n");
+ goto put_node;
+ }
+
+ tpdata = dev_get_platdata(&tpdev->dev);
+ put_device(&tpdev->dev);
+ if (!tpdata) {
+ ret = -EINVAL;
+ dev_err(dev, "dmtimer pdata structure NULL\n");
+ goto put_node;
+ }
+
+ timer_ops = tpdata->timer_ops;
+ if (!timer_ops || !timer_ops->request_by_node ||
+ !timer_ops->set_source || !timer_ops->set_load ||
+ !timer_ops->free || !timer_ops->start ||
+ !timer_ops->stop || !timer_ops->get_irq ||
+ !timer_ops->write_status) {
+ ret = -EINVAL;
+ dev_err(dev, "device does not have required timer ops\n");
+ goto put_node;
+ }
+
+ timers[i].irq = -1;
+ timers[i].timer_ops = timer_ops;
+ ret = omap_rproc_request_timer(dev, np, &timers[i]);
+ if (ret) {
+ dev_err(dev, "request for timer %p failed: %d\n", np,
+ ret);
+ goto put_node;
+ }
+ of_node_put(np);
+
+ if (i >= oproc->num_timers) {
+ timers[i].irq = omap_rproc_get_timer_irq(&timers[i]);
+ if (timers[i].irq < 0) {
+ dev_err(dev, "get_irq for timer %p failed: %d\n",
+ np, timers[i].irq);
+ ret = -EBUSY;
+ goto free_timers;
+ }
+
+ ret = request_irq(timers[i].irq,
+ omap_rproc_watchdog_isr, IRQF_SHARED,
+ "rproc-wdt", rproc);
+ if (ret) {
+ dev_err(dev, "error requesting irq for timer %p\n",
+ np);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ goto free_timers;
+ }
+ }
+ }
+
+start_timers:
+ for (i = 0; i < num_timers; i++) {
+ ret = omap_rproc_start_timer(&timers[i]);
+ if (ret) {
+ dev_err(dev, "start timer %p failed failed: %d\n", np,
+ ret);
+ break;
+ }
+ }
+ if (ret) {
+ while (i >= 0) {
+ omap_rproc_stop_timer(&timers[i]);
+ i--;
+ }
+ goto put_node;
+ }
+ return 0;
+
+put_node:
+ if (configure)
+ of_node_put(np);
+free_timers:
+ while (i--) {
+ if (i >= oproc->num_timers)
+ free_irq(timers[i].irq, rproc);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * omap_rproc_disable_timers() - disable the timers for a remoteproc
+ * @rproc: handle of a remote processor
+ * @configure: boolean flag used to release the timer handle
+ *
+ * This function is used primarily to disable the timers associated with
+ * a remoteproc. The configure flag is provided to allow the driver to
+ * to either stop and release a timer (during device shutdown) or to just
+ * stop a timer (during a suspend operation).
+ *
+ * Return: 0 on success or no timers
+ */
+static int omap_rproc_disable_timers(struct rproc *rproc, bool configure)
+{
+ int i;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_timer *timers = oproc->timers;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+
+ if (!num_timers)
+ return 0;
+
+ for (i = 0; i < num_timers; i++) {
+ omap_rproc_stop_timer(&timers[i]);
+ if (configure) {
+ if (i >= oproc->num_timers)
+ free_irq(timers[i].irq, rproc);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
* omap_rproc_mbox_callback() - inbound mailbox message handler
* @client: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
@@ -65,13 +500,29 @@ static void omap_rproc_mbox_callback(struct mbox_client *client, void *data)
switch (msg) {
case RP_MBOX_CRASH:
- /* just log this for now. later, we'll also do recovery */
+ /*
+ * remoteproc detected an exception, notify the rproc core.
+ * The remoteproc core will handle the recovery.
+ */
dev_err(dev, "omap rproc %s crashed\n", name);
+ rproc_report_crash(oproc->rproc, RPROC_FATAL_ERROR);
break;
case RP_MBOX_ECHO_REPLY:
dev_info(dev, "received echo reply from %s\n", name);
break;
+ case RP_MBOX_SUSPEND_ACK:
+ /* Fall through */
+ case RP_MBOX_SUSPEND_CANCEL:
+ oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK;
+ complete(&oproc->pm_comp);
+ break;
default:
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > oproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
/* msg contains the index of the triggered vring */
if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
@@ -85,11 +536,52 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
struct device *dev = rproc->dev.parent;
int ret;
+ /* wake up the rproc before kicking it */
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0)) {
+ dev_err(dev, "pm_runtime_get_sync() failed during kick, ret = %d\n",
+ ret);
+ pm_runtime_put_noidle(dev);
+ return;
+ }
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(oproc->mbox, (void *)vqid);
if (ret < 0)
dev_err(dev, "failed to send mailbox message, status = %d\n",
ret);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+/**
+ * omap_rproc_write_dsp_boot_addr() - set boot address for DSP remote processor
+ * @rproc: handle of a remote processor
+ *
+ * Set boot address for a supported DSP remote processor.
+ *
+ * Return: 0 on success, or -EINVAL if boot address is not aligned properly
+ */
+static int omap_rproc_write_dsp_boot_addr(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_boot_data *bdata = oproc->boot_data;
+ u32 offset = bdata->boot_reg;
+ u32 value;
+ u32 mask;
+
+ if (rproc->bootaddr & (SZ_1K - 1)) {
+ dev_err(dev, "invalid boot address 0x%llx, must be aligned on a 1KB boundary\n",
+ rproc->bootaddr);
+ return -EINVAL;
+ }
+
+ value = rproc->bootaddr >> bdata->boot_reg_shift;
+ mask = ~(SZ_1K - 1) >> bdata->boot_reg_shift;
+
+ return regmap_update_bits(bdata->syscon, offset, mask, value);
}
/*
@@ -103,13 +595,14 @@ static int omap_rproc_start(struct rproc *rproc)
{
struct omap_rproc *oproc = rproc->priv;
struct device *dev = rproc->dev.parent;
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
int ret;
struct mbox_client *client = &oproc->client;
- if (pdata->set_bootaddr)
- pdata->set_bootaddr(rproc->bootaddr);
+ if (oproc->boot_data) {
+ ret = omap_rproc_write_dsp_boot_addr(rproc);
+ if (ret)
+ return ret;
+ }
client->dev = dev;
client->tx_done = NULL;
@@ -117,7 +610,7 @@ static int omap_rproc_start(struct rproc *rproc)
client->tx_block = false;
client->knows_txdone = false;
- oproc->mbox = omap_mbox_request_channel(client, pdata->mbox_name);
+ oproc->mbox = mbox_request_channel(client, 0);
if (IS_ERR(oproc->mbox)) {
ret = -EBUSY;
dev_err(dev, "mbox_request_channel failed: %ld\n",
@@ -138,14 +631,34 @@ static int omap_rproc_start(struct rproc *rproc)
goto put_mbox;
}
- ret = pdata->device_enable(pdev);
+ ret = omap_rproc_enable_timers(rproc, true);
if (ret) {
- dev_err(dev, "omap_device_enable failed: %d\n", ret);
+ dev_err(dev, "omap_rproc_enable_timers failed: %d\n", ret);
goto put_mbox;
}
+ ret = reset_control_deassert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset control deassert failed: %d\n", ret);
+ goto disable_timers;
+ }
+
+ /*
+ * remote processor is up, so update the runtime pm status and
+ * enable the auto-suspend. The device usage count is incremented
+ * manually for balancing it for auto-suspend
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
+disable_timers:
+ omap_rproc_disable_timers(rproc, true);
put_mbox:
mbox_free_channel(oproc->mbox);
return ret;
@@ -155,32 +668,638 @@ put_mbox:
static int omap_rproc_stop(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
struct omap_rproc *oproc = rproc->priv;
int ret;
- ret = pdata->device_shutdown(pdev);
- if (ret)
+ /*
+ * cancel any possible scheduled runtime suspend by incrementing
+ * the device usage count, and resuming the device. The remoteproc
+ * also needs to be woken up if suspended, to avoid the remoteproc
+ * OS to continue to remember any context that it has saved, and
+ * avoid potential issues in misindentifying a subsequent device
+ * reboot as a power restore boot
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
return ret;
+ }
+
+ ret = reset_control_assert(oproc->reset);
+ if (ret)
+ goto out;
+
+ ret = omap_rproc_disable_timers(rproc, true);
+ if (ret)
+ goto enable_device;
mbox_free_channel(oproc->mbox);
+ /*
+ * update the runtime pm states and status now that the remoteproc
+ * has stopped
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+
return 0;
+
+enable_device:
+ reset_control_deassert(oproc->reset);
+out:
+ /* schedule the next auto-suspend */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+/**
+ * omap_rproc_da_to_va() - internal memory translation helper
+ * @rproc: remote processor to apply the address translation for
+ * @da: device address to translate
+ * @len: length of the memory buffer
+ *
+ * Custom function implementing the rproc .da_to_va ops to provide address
+ * translation (device address to kernel virtual address) for internal RAMs
+ * present in a DSP or IPU device). The translated addresses can be used
+ * either by the remoteproc core for loading, or by any rpmsg bus drivers.
+ *
+ * Return: translated virtual address in kernel memory space on success,
+ * or NULL on failure.
+ */
+static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ int i;
+ u32 offset;
+
+ if (len <= 0)
+ return NULL;
+
+ if (!oproc->num_mems)
+ return NULL;
+
+ for (i = 0; i < oproc->num_mems; i++) {
+ if (da >= oproc->mem[i].dev_addr && da + len <=
+ oproc->mem[i].dev_addr + oproc->mem[i].size) {
+ offset = da - oproc->mem[i].dev_addr;
+ /* __force to make sparse happy with type conversion */
+ return (__force void *)(oproc->mem[i].cpu_addr +
+ offset);
+ }
+ }
+
+ return NULL;
}
static const struct rproc_ops omap_rproc_ops = {
.start = omap_rproc_start,
.stop = omap_rproc_stop,
.kick = omap_rproc_kick,
+ .da_to_va = omap_rproc_da_to_va,
+};
+
+#ifdef CONFIG_PM
+static bool _is_rproc_in_standby(struct omap_rproc *oproc)
+{
+ return ti_clk_is_in_standby(oproc->fck);
+}
+
+/* 1 sec is long enough time to let the remoteproc side suspend the device */
+#define DEF_SUSPEND_TIMEOUT 1000
+static int _omap_rproc_suspend(struct rproc *rproc, bool auto_suspend)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ unsigned long to = msecs_to_jiffies(DEF_SUSPEND_TIMEOUT);
+ unsigned long ta = jiffies + to;
+ u32 suspend_msg = auto_suspend ?
+ RP_MBOX_SUSPEND_AUTO : RP_MBOX_SUSPEND_SYSTEM;
+ int ret;
+
+ reinit_completion(&oproc->pm_comp);
+ oproc->suspend_acked = false;
+ ret = mbox_send_message(oproc->mbox, (void *)suspend_msg);
+ if (ret < 0) {
+ dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&oproc->pm_comp, to);
+ if (!oproc->suspend_acked)
+ return -EBUSY;
+
+ /*
+ * The remoteproc side is returning the ACK message before saving the
+ * context, because the context saving is performed within a SYS/BIOS
+ * function, and it cannot have any inter-dependencies against the IPC
+ * layer. Also, as the SYS/BIOS needs to preserve properly the processor
+ * register set, sending this ACK or signalling the completion of the
+ * context save through a shared memory variable can never be the
+ * absolute last thing to be executed on the remoteproc side, and the
+ * MPU cannot use the ACK message as a sync point to put the remoteproc
+ * into reset. The only way to ensure that the remote processor has
+ * completed saving the context is to check that the module has reached
+ * STANDBY state (after saving the context, the SYS/BIOS executes the
+ * appropriate target-specific WFI instruction causing the module to
+ * enter STANDBY).
+ */
+ while (!_is_rproc_in_standby(oproc)) {
+ if (time_after(jiffies, ta))
+ return -ETIME;
+ schedule();
+ }
+
+ ret = reset_control_assert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset assert during suspend failed %d\n", ret);
+ return ret;
+ }
+
+ ret = omap_rproc_disable_timers(rproc, false);
+ if (ret) {
+ dev_err(dev, "disabling timers during suspend failed %d\n",
+ ret);
+ goto enable_device;
+ }
+
+ /*
+ * IOMMUs would have to be disabled specifically for runtime suspend.
+ * They are handled automatically through System PM callbacks for
+ * regular system suspend
+ */
+ if (auto_suspend) {
+ ret = omap_iommu_domain_deactivate(rproc->domain);
+ if (ret) {
+ dev_err(dev, "iommu domain deactivate failed %d\n",
+ ret);
+ goto enable_timers;
+ }
+ }
+
+ return 0;
+
+enable_timers:
+ /* ignore errors on re-enabling code */
+ omap_rproc_enable_timers(rproc, false);
+enable_device:
+ reset_control_deassert(oproc->reset);
+ return ret;
+}
+
+static int _omap_rproc_resume(struct rproc *rproc, bool auto_suspend)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ /*
+ * IOMMUs would have to be enabled specifically for runtime resume.
+ * They would have been already enabled automatically through System
+ * PM callbacks for regular system resume
+ */
+ if (auto_suspend) {
+ ret = omap_iommu_domain_activate(rproc->domain);
+ if (ret) {
+ dev_err(dev, "omap_iommu activate failed %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* boot address could be lost after suspend, so restore it */
+ if (oproc->boot_data) {
+ ret = omap_rproc_write_dsp_boot_addr(rproc);
+ if (ret) {
+ dev_err(dev, "boot address restore failed %d\n", ret);
+ goto suspend_iommu;
+ }
+ }
+
+ ret = omap_rproc_enable_timers(rproc, false);
+ if (ret) {
+ dev_err(dev, "enabling timers during resume failed %d\n", ret);
+ goto suspend_iommu;
+ }
+
+ ret = reset_control_deassert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset deassert during resume failed %d\n", ret);
+ goto disable_timers;
+ }
+
+ return 0;
+
+disable_timers:
+ omap_rproc_disable_timers(rproc, false);
+suspend_iommu:
+ if (auto_suspend)
+ omap_iommu_domain_deactivate(rproc->domain);
+out:
+ return ret;
+}
+
+static int __maybe_unused omap_rproc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret = 0;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (rproc->state == RPROC_SUSPENDED)
+ goto out;
+
+ if (rproc->state != RPROC_RUNNING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = _omap_rproc_suspend(rproc, false);
+ if (ret) {
+ dev_err(dev, "suspend failed %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * remoteproc is running at the time of system suspend, so remember
+ * it so as to wake it up during system resume
+ */
+ oproc->need_resume = true;
+ rproc->state = RPROC_SUSPENDED;
+
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int __maybe_unused omap_rproc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret = 0;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (rproc->state != RPROC_SUSPENDED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * remoteproc was auto-suspended at the time of system suspend,
+ * so no need to wake-up the processor (leave it in suspended
+ * state, will be woken up during a subsequent runtime_resume)
+ */
+ if (!oproc->need_resume)
+ goto out;
+
+ ret = _omap_rproc_resume(rproc, false);
+ if (ret) {
+ dev_err(dev, "resume failed %d\n", ret);
+ goto out;
+ }
+
+ oproc->need_resume = false;
+ rproc->state = RPROC_RUNNING;
+
+ pm_runtime_mark_last_busy(dev);
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int omap_rproc_runtime_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_CRASHED) {
+ dev_dbg(dev, "rproc cannot be runtime suspended when crashed!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (WARN_ON(rproc->state != RPROC_RUNNING)) {
+ dev_err(dev, "rproc cannot be runtime suspended when not running!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * do not even attempt suspend if the remote processor is not
+ * idled for runtime auto-suspend
+ */
+ if (!_is_rproc_in_standby(oproc)) {
+ ret = -EBUSY;
+ goto abort;
+ }
+
+ ret = _omap_rproc_suspend(rproc, true);
+ if (ret)
+ goto abort;
+
+ rproc->state = RPROC_SUSPENDED;
+ mutex_unlock(&rproc->lock);
+ return 0;
+
+abort:
+ pm_runtime_mark_last_busy(dev);
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int omap_rproc_runtime_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&rproc->lock);
+ if (WARN_ON(rproc->state != RPROC_SUSPENDED)) {
+ dev_err(dev, "rproc cannot be runtime resumed if not suspended! state=%d\n",
+ rproc->state);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = _omap_rproc_resume(rproc, true);
+ if (ret) {
+ dev_err(dev, "runtime resume failed %d\n", ret);
+ goto out;
+ }
+
+ rproc->state = RPROC_RUNNING;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct omap_rproc_mem_data ipu_mems[] = {
+ { .name = "l2ram", .dev_addr = 0x20000000 },
+ { },
+};
+
+static const struct omap_rproc_mem_data dra7_dsp_mems[] = {
+ { .name = "l2ram", .dev_addr = 0x800000 },
+ { .name = "l1pram", .dev_addr = 0xe00000 },
+ { .name = "l1dram", .dev_addr = 0xf00000 },
+ { },
+};
+
+static const struct omap_rproc_dev_data omap4_dsp_dev_data = {
+ .device_name = "dsp",
+};
+
+static const struct omap_rproc_dev_data omap4_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
};
+static const struct omap_rproc_dev_data omap5_dsp_dev_data = {
+ .device_name = "dsp",
+};
+
+static const struct omap_rproc_dev_data omap5_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct omap_rproc_dev_data dra7_dsp_dev_data = {
+ .device_name = "dsp",
+ .mems = dra7_dsp_mems,
+};
+
+static const struct omap_rproc_dev_data dra7_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct of_device_id omap_rproc_of_match[] = {
+ {
+ .compatible = "ti,omap4-dsp",
+ .data = &omap4_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,omap4-ipu",
+ .data = &omap4_ipu_dev_data,
+ },
+ {
+ .compatible = "ti,omap5-dsp",
+ .data = &omap5_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,omap5-ipu",
+ .data = &omap5_ipu_dev_data,
+ },
+ {
+ .compatible = "ti,dra7-dsp",
+ .data = &dra7_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,dra7-ipu",
+ .data = &dra7_ipu_dev_data,
+ },
+ {
+ /* end */
+ },
+};
+MODULE_DEVICE_TABLE(of, omap_rproc_of_match);
+
+static const char *omap_rproc_get_firmware(struct platform_device *pdev)
+{
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fw_name;
+}
+
+static int omap_rproc_get_boot_data(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc = rproc->priv;
+ const struct omap_rproc_dev_data *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ if (!of_property_read_bool(np, "ti,bootreg"))
+ return 0;
+
+ oproc->boot_data = devm_kzalloc(&pdev->dev, sizeof(*oproc->boot_data),
+ GFP_KERNEL);
+ if (!oproc->boot_data)
+ return -ENOMEM;
+
+ oproc->boot_data->syscon =
+ syscon_regmap_lookup_by_phandle(np, "ti,bootreg");
+ if (IS_ERR(oproc->boot_data->syscon)) {
+ ret = PTR_ERR(oproc->boot_data->syscon);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "ti,bootreg", 1,
+ &oproc->boot_data->boot_reg)) {
+ dev_err(&pdev->dev, "couldn't get the boot register\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_index(np, "ti,bootreg", 2,
+ &oproc->boot_data->boot_reg_shift);
+
+ return 0;
+}
+
+static int omap_rproc_of_get_internal_memories(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = &pdev->dev;
+ const struct omap_rproc_dev_data *data;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ if (!data->mems)
+ return 0;
+
+ num_mems = of_property_count_elems_of_size(dev->of_node, "reg",
+ sizeof(u32)) / 2;
+
+ oproc->mem = devm_kcalloc(dev, num_mems, sizeof(*oproc->mem),
+ GFP_KERNEL);
+ if (!oproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; data->mems[i].name; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "no memory defined for %s\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ oproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(oproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to parse and map %s memory\n",
+ data->mems[i].name);
+ return PTR_ERR(oproc->mem[i].cpu_addr);
+ }
+ oproc->mem[i].bus_addr = res->start;
+ oproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ oproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %pK da 0x%x\n",
+ data->mems[i].name, &oproc->mem[i].bus_addr,
+ oproc->mem[i].size, oproc->mem[i].cpu_addr,
+ oproc->mem[i].dev_addr);
+ }
+ oproc->num_mems = num_mems;
+
+ return 0;
+}
+
+#ifdef CONFIG_OMAP_REMOTEPROC_WATCHDOG
+static int omap_rproc_count_wdog_timers(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_count_phandle_with_args(np, "ti,watchdog-timers", NULL);
+ if (ret <= 0) {
+ dev_dbg(dev, "device does not have watchdog timers, status = %d\n",
+ ret);
+ ret = 0;
+ }
+
+ return ret;
+}
+#else
+static int omap_rproc_count_wdog_timers(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static int omap_rproc_of_get_timers(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = &pdev->dev;
+ int num_timers;
+
+ /*
+ * Timer nodes are directly used in client nodes as phandles, so
+ * retrieve the count using appropriate size
+ */
+ oproc->num_timers = of_count_phandle_with_args(np, "ti,timers", NULL);
+ if (oproc->num_timers <= 0) {
+ dev_dbg(dev, "device does not have timers, status = %d\n",
+ oproc->num_timers);
+ oproc->num_timers = 0;
+ }
+
+ oproc->num_wd_timers = omap_rproc_count_wdog_timers(dev);
+
+ num_timers = oproc->num_timers + oproc->num_wd_timers;
+ if (num_timers) {
+ oproc->timers = devm_kcalloc(dev, num_timers,
+ sizeof(*oproc->timers),
+ GFP_KERNEL);
+ if (!oproc->timers)
+ return -ENOMEM;
+
+ dev_dbg(dev, "device has %d tick timers and %d watchdog timers\n",
+ oproc->num_timers, oproc->num_wd_timers);
+ }
+
+ return 0;
+}
+
static int omap_rproc_probe(struct platform_device *pdev)
{
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc;
struct rproc *rproc;
+ const char *firmware;
int ret;
+ struct reset_control *reset;
+
+ if (!np) {
+ dev_err(&pdev->dev, "only DT-based devices are supported\n");
+ return -ENODEV;
+ }
+
+ reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ firmware = omap_rproc_get_firmware(pdev);
+ if (IS_ERR(firmware))
+ return PTR_ERR(firmware);
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -188,24 +1307,60 @@ static int omap_rproc_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, pdata->name, &omap_rproc_ops,
- pdata->firmware, sizeof(*oproc));
+ rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
+ firmware, sizeof(*oproc));
if (!rproc)
return -ENOMEM;
oproc = rproc->priv;
oproc->rproc = rproc;
+ oproc->reset = reset;
/* All existing OMAP IPU and DSP processors have an MMU */
rproc->has_iommu = true;
+ ret = omap_rproc_of_get_internal_memories(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = omap_rproc_get_boot_data(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = omap_rproc_of_get_timers(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ init_completion(&oproc->pm_comp);
+ oproc->autosuspend_delay = DEFAULT_AUTOSUSPEND_DELAY;
+
+ of_property_read_u32(pdev->dev.of_node, "ti,autosuspend-delay-ms",
+ &oproc->autosuspend_delay);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, oproc->autosuspend_delay);
+
+ oproc->fck = devm_clk_get(&pdev->dev, 0);
+ if (IS_ERR(oproc->fck)) {
+ ret = PTR_ERR(oproc->fck);
+ goto free_rproc;
+ }
+
+ ret = of_reserved_mem_device_init(&pdev->dev);
+ if (ret) {
+ dev_warn(&pdev->dev, "device does not have specific CMA pool.\n");
+ dev_warn(&pdev->dev, "Typically this should be provided,\n");
+ dev_warn(&pdev->dev, "only omit if you know what you are doing.\n");
+ }
+
platform_set_drvdata(pdev, rproc);
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto release_mem;
return 0;
+release_mem:
+ of_reserved_mem_device_release(&pdev->dev);
free_rproc:
rproc_free(rproc);
return ret;
@@ -217,15 +1372,24 @@ static int omap_rproc_remove(struct platform_device *pdev)
rproc_del(rproc);
rproc_free(rproc);
+ of_reserved_mem_device_release(&pdev->dev);
return 0;
}
+static const struct dev_pm_ops omap_rproc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rproc_suspend, omap_rproc_resume)
+ SET_RUNTIME_PM_OPS(omap_rproc_runtime_suspend,
+ omap_rproc_runtime_resume, NULL)
+};
+
static struct platform_driver omap_rproc_driver = {
.probe = omap_rproc_probe,
.remove = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
+ .pm = &omap_rproc_pm_ops,
+ .of_match_table = omap_rproc_of_match,
},
};
diff --git a/drivers/remoteproc/omap_remoteproc.h b/drivers/remoteproc/omap_remoteproc.h
index f6d2036d383d..828e13256c02 100644
--- a/drivers/remoteproc/omap_remoteproc.h
+++ b/drivers/remoteproc/omap_remoteproc.h
@@ -1,35 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Remote processor messaging
*
- * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011-2020 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Texas Instruments nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _OMAP_RPMSG_H
@@ -56,6 +31,22 @@
*
* @RP_MBOX_ABORT_REQUEST: a "please crash" request, used for testing the
* recovery mechanism (to some extent).
+ *
+ * @RP_MBOX_SUSPEND_AUTO: auto suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_SYSTEM: system suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_ACK: successful response from remote processor for a
+ * suspend request
+ *
+ * @RP_MBOX_SUSPEND_CANCEL: a cancel suspend response from a remote processor
+ * on a suspend request
+ *
+ * Introduce new message definitions if any here.
+ *
+ * @RP_MBOX_END_MSG: Indicates end of known/defined messages from remote core
+ * This should be the last definition.
+ *
*/
enum omap_rp_mbox_messages {
RP_MBOX_READY = 0xFFFFFF00,
@@ -64,6 +55,11 @@ enum omap_rp_mbox_messages {
RP_MBOX_ECHO_REQUEST = 0xFFFFFF03,
RP_MBOX_ECHO_REPLY = 0xFFFFFF04,
RP_MBOX_ABORT_REQUEST = 0xFFFFFF05,
+ RP_MBOX_SUSPEND_AUTO = 0xFFFFFF10,
+ RP_MBOX_SUSPEND_SYSTEM = 0xFFFFFF11,
+ RP_MBOX_SUSPEND_ACK = 0xFFFFFF12,
+ RP_MBOX_SUSPEND_CANCEL = 0xFFFFFF13,
+ RP_MBOX_END_MSG = 0xFFFFFF14,
};
#endif /* _OMAP_RPMSG_H */
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index cb0f4a0be032..111a442c993c 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -15,6 +15,8 @@
#include <linux/remoteproc.h>
#include "qcom_q6v5.h"
+#define Q6V5_PANIC_DELAY_MS 200
+
/**
* qcom_q6v5_prepare() - reinitialize the qcom_q6v5 context before start
* @q6v5: reference to qcom_q6v5 context to be reinitialized
@@ -163,6 +165,24 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
EXPORT_SYMBOL_GPL(qcom_q6v5_request_stop);
/**
+ * qcom_q6v5_panic() - panic handler to invoke a stop on the remote
+ * @q6v5: reference to qcom_q6v5 context
+ *
+ * Set the stop bit and sleep in order to allow the remote processor to flush
+ * its caches etc for post mortem debugging.
+ *
+ * Return: 200ms
+ */
+unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5)
+{
+ qcom_smem_state_update_bits(q6v5->state,
+ BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
+
+ return Q6V5_PANIC_DELAY_MS;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_panic);
+
+/**
* qcom_q6v5_init() - initializer of the q6v5 common struct
* @q6v5: handle to be initialized
* @pdev: platform_device reference for acquiring resources
diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h
index 7ac92c1e0f49..c4ed887c1499 100644
--- a/drivers/remoteproc/qcom_q6v5.h
+++ b/drivers/remoteproc/qcom_q6v5.h
@@ -42,5 +42,6 @@ int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5);
int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5);
int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5);
int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout);
+unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5);
#endif
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index e953886b2eb7..24a3db961d5e 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -270,7 +270,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
@@ -282,12 +282,20 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
return adsp->mem_region + offset;
}
+static unsigned long adsp_panic(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = rproc->priv;
+
+ return qcom_q6v5_panic(&adsp->q6v5);
+}
+
static const struct rproc_ops adsp_ops = {
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = adsp_load,
+ .panic = adsp_panic,
};
static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids)
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index f9ccce76e44b..ce49c3236ff7 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -381,23 +381,33 @@ static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
}
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
- bool remote_owner, phys_addr_t addr,
+ bool local, bool remote, phys_addr_t addr,
size_t size)
{
- struct qcom_scm_vmperm next;
+ struct qcom_scm_vmperm next[2];
+ int perms = 0;
if (!qproc->need_mem_protection)
return 0;
- if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
- return 0;
- if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
+
+ if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
+ remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
return 0;
- next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
- next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
+ if (local) {
+ next[perms].vmid = QCOM_SCM_VMID_HLOS;
+ next[perms].perm = QCOM_SCM_PERM_RWX;
+ perms++;
+ }
+
+ if (remote) {
+ next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
+ next[perms].perm = QCOM_SCM_PERM_RW;
+ perms++;
+ }
return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
- current_perm, &next, 1);
+ current_perm, next, perms);
}
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
@@ -803,7 +813,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
- ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
+ ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
+ phys, size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to metadata failed: %d\n", ret);
@@ -821,7 +832,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
/* Metadata authentication done, remove modem access */
- xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
+ xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
+ phys, size);
if (xferop_ret)
dev_warn(qproc->dev,
"mdt buffer not reclaimed system may become unstable\n");
@@ -908,7 +920,7 @@ static int q6v5_mba_load(struct q6v5 *qproc)
}
/* Assign MBA image access in DDR to q6 */
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
qproc->mba_phys, qproc->mba_size);
if (ret) {
dev_err(qproc->dev,
@@ -945,8 +957,8 @@ halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
reclaim_mba:
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
- qproc->mba_phys,
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ false, qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret) {
dev_err(qproc->dev,
@@ -1003,11 +1015,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
- false, qproc->mpss_phys,
- qproc->mpss_size);
- WARN_ON(ret);
-
q6v5_reset_assert(qproc);
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
@@ -1021,7 +1028,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
/* In case of failure or coredump scenario where reclaiming MBA memory
* could not happen reclaim it here.
*/
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
qproc->mba_phys,
qproc->mba_size);
WARN_ON(ret);
@@ -1037,6 +1044,23 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
}
}
+static int q6v5_reload_mba(struct rproc *rproc)
+{
+ struct q6v5 *qproc = rproc->priv;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, rproc->firmware, qproc->dev);
+ if (ret < 0)
+ return ret;
+
+ q6v5_load(rproc, fw);
+ ret = q6v5_mba_load(qproc);
+ release_firmware(fw);
+
+ return ret;
+}
+
static int q6v5_mpss_load(struct q6v5 *qproc)
{
const struct elf32_phdr *phdrs;
@@ -1048,6 +1072,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
phys_addr_t boot_addr;
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
+ u32 code_length;
bool relocate = false;
char *fw_name;
size_t fw_name_len;
@@ -1097,6 +1122,24 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
}
+ /**
+ * In case of a modem subsystem restart on secure devices, the modem
+ * memory can be reclaimed only after MBA is loaded. For modem cold
+ * boot this will be a nop
+ */
+ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
+ qproc->mpss_phys, qproc->mpss_size);
+
+ /* Share ownership between Linux and MSS, during segment loading */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mpss memory failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto release_firmware;
+ }
+
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
qproc->mpss_reloc = mpss_reloc;
/* Load firmware segments */
@@ -1145,10 +1188,25 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
phdr->p_memsz - phdr->p_filesz);
}
size += phdr->p_memsz;
+
+ code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ if (!code_length) {
+ boot_addr = relocate ? qproc->mpss_phys : min_addr;
+ writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+ }
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
+ ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
+ if (ret < 0) {
+ dev_err(qproc->dev, "MPSS authentication failed: %d\n",
+ ret);
+ goto release_firmware;
+ }
}
/* Transfer ownership of modem ddr region to q6 */
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
qproc->mpss_phys, qproc->mpss_size);
if (ret) {
dev_err(qproc->dev,
@@ -1157,11 +1215,6 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
- boot_addr = relocate ? qproc->mpss_phys : min_addr;
- writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
- writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
- writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
-
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "MPSS authentication timed out\n");
@@ -1186,8 +1239,16 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
/* Unlock mba before copying segments */
- if (!qproc->dump_mba_loaded)
- ret = q6v5_mba_load(qproc);
+ if (!qproc->dump_mba_loaded) {
+ ret = q6v5_reload_mba(rproc);
+ if (!ret) {
+ /* Reset ownership back to Linux to copy segments */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ true, false,
+ qproc->mpss_phys,
+ qproc->mpss_size);
+ }
+ }
if (!ptr || ret)
memset(dest, 0xff, segment->size);
@@ -1198,8 +1259,14 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
/* Reclaim mba after copying segments */
if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
- if (qproc->dump_mba_loaded)
+ if (qproc->dump_mba_loaded) {
+ /* Try to reset ownership back to Q6 */
+ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ false, true,
+ qproc->mpss_phys,
+ qproc->mpss_size);
q6v5_mba_reclaim(qproc);
+ }
}
}
@@ -1225,8 +1292,8 @@ static int q6v5_start(struct rproc *rproc)
goto reclaim_mpss;
}
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
- qproc->mba_phys,
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ false, qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret)
dev_err(qproc->dev,
@@ -1239,10 +1306,6 @@ static int q6v5_start(struct rproc *rproc)
return 0;
reclaim_mpss:
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
- false, qproc->mpss_phys,
- qproc->mpss_size);
- WARN_ON(xfermemop_ret);
q6v5_mba_reclaim(qproc);
return ret;
@@ -1264,7 +1327,7 @@ static int q6v5_stop(struct rproc *rproc)
return 0;
}
-static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *q6v5_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct q6v5 *qproc = rproc->priv;
int offset;
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index edf9d0e1a235..7a63efb85405 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -222,7 +222,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
@@ -234,12 +234,20 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
return adsp->mem_region + offset;
}
+static unsigned long adsp_panic(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+
+ return qcom_q6v5_panic(&adsp->q6v5);
+}
+
static const struct rproc_ops adsp_ops = {
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = adsp_load,
+ .panic = adsp_panic,
};
static int adsp_init_clock(struct qcom_adsp *adsp)
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index f93e1e4a1cc0..f1924b740a10 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -406,7 +406,7 @@ static int q6v5_wcss_stop(struct rproc *rproc)
return 0;
}
-static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct q6v5_wcss *wcss = rproc->priv;
int offset;
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index dc135754bb9c..0c7afd038f0d 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -287,7 +287,7 @@ static int wcnss_stop(struct rproc *rproc)
return ret;
}
-static void *wcnss_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
int offset;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 097f33e4f1f3..e12a54e67588 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -26,6 +27,7 @@
#include <linux/string.h>
#include <linux/debugfs.h>
#include <linux/devcoredump.h>
+#include <linux/rculist.h>
#include <linux/remoteproc.h>
#include <linux/iommu.h>
#include <linux/idr.h>
@@ -38,11 +40,13 @@
#include <linux/platform_device.h>
#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);
+static struct notifier_block rproc_panic_nb;
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
void *, int offset, int avail);
@@ -185,7 +189,7 @@ EXPORT_SYMBOL(rproc_va_to_pa);
* here the output of the DMA API for the carveouts, which should be more
* correct.
*/
-void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct rproc_mem_entry *carveout;
void *ptr = NULL;
@@ -224,7 +228,8 @@ EXPORT_SYMBOL(rproc_da_to_va);
/**
* rproc_find_carveout_by_name() - lookup the carveout region by a name
* @rproc: handle of a remote processor
- * @name,..: carveout name to find (standard printf format)
+ * @name: carveout name to find (format string)
+ * @...: optional parameters matching @name string
*
* Platform driver has the capability to register some pre-allacoted carveout
* (physically contiguous memory regions) before rproc firmware loading and
@@ -318,8 +323,9 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
struct device *dev = &rproc->dev;
struct rproc_vring *rvring = &rvdev->vring[i];
struct fw_rsc_vdev *rsc;
- int ret, size, notifyid;
+ int ret, notifyid;
struct rproc_mem_entry *mem;
+ size_t size;
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
@@ -445,6 +451,7 @@ static void rproc_rvdev_release(struct device *dev)
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
* @rsc: the vring resource descriptor
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* This resource entry requests the host to statically register a virtio
@@ -587,6 +594,7 @@ void rproc_vdev_release(struct kref *ref)
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
* @rsc: the trace resource descriptor
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* In case the remote processor dumps trace logs into memory,
@@ -652,6 +660,7 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* rproc_handle_devmem() - handle devmem resource entry
* @rproc: remote processor handle
* @rsc: the devmem resource entry
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* Remote processors commonly need to access certain on-chip peripherals.
@@ -746,11 +755,12 @@ static int rproc_alloc_carveout(struct rproc *rproc,
va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
if (!va) {
dev_err(dev->parent,
- "failed to allocate dma memory: len 0x%x\n", mem->len);
+ "failed to allocate dma memory: len 0x%zx\n",
+ mem->len);
return -ENOMEM;
}
- dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
+ dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n",
va, &dma, mem->len);
if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
@@ -853,6 +863,7 @@ static int rproc_release_carveout(struct rproc *rproc,
* rproc_handle_carveout() - handle phys contig memory allocation requests
* @rproc: rproc handle
* @rsc: the resource entry
+ * @offset: offset of the resource entry
* @avail: size of available data (for image validation)
*
* This function will handle firmware requests for allocation of physically
@@ -957,7 +968,7 @@ EXPORT_SYMBOL(rproc_add_carveout);
*/
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
- void *va, dma_addr_t dma, int len, u32 da,
+ void *va, dma_addr_t dma, size_t len, u32 da,
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
int (*release)(struct rproc *, struct rproc_mem_entry *),
const char *name, ...)
@@ -999,7 +1010,7 @@ EXPORT_SYMBOL(rproc_mem_entry_init);
* provided by client.
*/
struct rproc_mem_entry *
-rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...)
{
struct rproc_mem_entry *mem;
@@ -1022,7 +1033,7 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
}
EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
-/**
+/*
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
*/
@@ -1270,7 +1281,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
if (unmapped != entry->len) {
/* nothing much to do besides complaining */
- dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
+ dev_err(dev, "failed to unmap %zx/%zu\n", entry->len,
unmapped);
}
@@ -1564,20 +1575,21 @@ EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
static void rproc_coredump(struct rproc *rproc)
{
struct rproc_dump_segment *segment;
- struct elf32_phdr *phdr;
- struct elf32_hdr *ehdr;
+ void *phdr;
+ void *ehdr;
size_t data_size;
size_t offset;
void *data;
void *ptr;
+ u8 class = rproc->elf_class;
int phnum = 0;
if (list_empty(&rproc->dump_segments))
return;
- data_size = sizeof(*ehdr);
+ data_size = elf_size_of_hdr(class);
list_for_each_entry(segment, &rproc->dump_segments, node) {
- data_size += sizeof(*phdr) + segment->size;
+ data_size += elf_size_of_phdr(class) + segment->size;
phnum++;
}
@@ -1588,33 +1600,33 @@ static void rproc_coredump(struct rproc *rproc)
ehdr = data;
- memset(ehdr, 0, sizeof(*ehdr));
- memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
- ehdr->e_ident[EI_CLASS] = ELFCLASS32;
- ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
- ehdr->e_ident[EI_VERSION] = EV_CURRENT;
- ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
- ehdr->e_type = ET_CORE;
- ehdr->e_machine = EM_NONE;
- ehdr->e_version = EV_CURRENT;
- ehdr->e_entry = rproc->bootaddr;
- ehdr->e_phoff = sizeof(*ehdr);
- ehdr->e_ehsize = sizeof(*ehdr);
- ehdr->e_phentsize = sizeof(*phdr);
- ehdr->e_phnum = phnum;
-
- phdr = data + ehdr->e_phoff;
- offset = ehdr->e_phoff + sizeof(*phdr) * ehdr->e_phnum;
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, EM_NONE);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
+ elf_hdr_set_e_phnum(class, ehdr, phnum);
+
+ phdr = data + elf_hdr_get_e_phoff(class, ehdr);
+ offset = elf_hdr_get_e_phoff(class, ehdr);
+ offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
+
list_for_each_entry(segment, &rproc->dump_segments, node) {
- memset(phdr, 0, sizeof(*phdr));
- phdr->p_type = PT_LOAD;
- phdr->p_offset = offset;
- phdr->p_vaddr = segment->da;
- phdr->p_paddr = segment->da;
- phdr->p_filesz = segment->size;
- phdr->p_memsz = segment->size;
- phdr->p_flags = PF_R | PF_W | PF_X;
- phdr->p_align = 0;
+ memset(phdr, 0, elf_size_of_phdr(class));
+ elf_phdr_set_p_type(class, phdr, PT_LOAD);
+ elf_phdr_set_p_offset(class, phdr, offset);
+ elf_phdr_set_p_vaddr(class, phdr, segment->da);
+ elf_phdr_set_p_paddr(class, phdr, segment->da);
+ elf_phdr_set_p_filesz(class, phdr, segment->size);
+ elf_phdr_set_p_memsz(class, phdr, segment->size);
+ elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
+ elf_phdr_set_p_align(class, phdr, 0);
if (segment->dump) {
segment->dump(rproc, segment, data + offset);
@@ -1630,8 +1642,8 @@ static void rproc_coredump(struct rproc *rproc)
}
}
- offset += phdr->p_filesz;
- phdr++;
+ offset += elf_phdr_get_p_filesz(class, phdr);
+ phdr += elf_size_of_phdr(class);
}
dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
@@ -1653,12 +1665,16 @@ int rproc_trigger_recovery(struct rproc *rproc)
struct device *dev = &rproc->dev;
int ret;
- dev_err(dev, "recovering %s\n", rproc->name);
-
ret = mutex_lock_interruptible(&rproc->lock);
if (ret)
return ret;
+ /* State could have changed before we got the mutex */
+ if (rproc->state != RPROC_CRASHED)
+ goto unlock_mutex;
+
+ dev_err(dev, "recovering %s\n", rproc->name);
+
ret = rproc_stop(rproc, true);
if (ret)
goto unlock_mutex;
@@ -1685,6 +1701,7 @@ unlock_mutex:
/**
* rproc_crash_handler_work() - handle a crash
+ * @work: work treating the crash
*
* This function needs to handle everything related to a crash, like cpu
* registers and stack dump, information to help to debug the fatal error, etc.
@@ -1854,8 +1871,8 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
if (!np)
return NULL;
- mutex_lock(&rproc_list_mutex);
- list_for_each_entry(r, &rproc_list, node) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(r, &rproc_list, node) {
if (r->dev.parent && r->dev.parent->of_node == np) {
/* prevent underlying implementation from being removed */
if (!try_module_get(r->dev.parent->driver->owner)) {
@@ -1868,7 +1885,7 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
break;
}
}
- mutex_unlock(&rproc_list_mutex);
+ rcu_read_unlock();
of_node_put(np);
@@ -1925,7 +1942,7 @@ int rproc_add(struct rproc *rproc)
/* expose to rproc_get_by_phandle users */
mutex_lock(&rproc_list_mutex);
- list_add(&rproc->node, &rproc_list);
+ list_add_rcu(&rproc->node, &rproc_list);
mutex_unlock(&rproc_list_mutex);
return 0;
@@ -2029,6 +2046,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->name = name;
rproc->priv = &rproc[1];
rproc->auto_boot = true;
+ rproc->elf_class = ELFCLASS32;
device_initialize(&rproc->dev);
rproc->dev.parent = dev;
@@ -2053,7 +2071,8 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->ops->load = rproc_elf_load_segments;
rproc->ops->parse_fw = rproc_elf_load_rsc_table;
rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
- rproc->ops->sanity_check = rproc_elf_sanity_check;
+ if (!rproc->ops->sanity_check)
+ rproc->ops->sanity_check = rproc_elf32_sanity_check;
rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
}
@@ -2140,9 +2159,12 @@ int rproc_del(struct rproc *rproc)
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
- list_del(&rproc->node);
+ list_del_rcu(&rproc->node);
mutex_unlock(&rproc_list_mutex);
+ /* Ensure that no readers of rproc_list are still active */
+ synchronize_rcu();
+
device_del(&rproc->dev);
return 0;
@@ -2216,10 +2238,50 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
}
EXPORT_SYMBOL(rproc_report_crash);
+static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ unsigned int longest = 0;
+ struct rproc *rproc;
+ unsigned int d;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rproc, &rproc_list, node) {
+ if (!rproc->ops->panic || rproc->state != RPROC_RUNNING)
+ continue;
+
+ d = rproc->ops->panic(rproc);
+ longest = max(longest, d);
+ }
+ rcu_read_unlock();
+
+ /*
+ * Delay for the longest requested duration before returning. This can
+ * be used by the remoteproc drivers to give the remote processor time
+ * to perform any requested operations (such as flush caches), when
+ * it's not possible to signal the Linux side due to the panic.
+ */
+ mdelay(longest);
+
+ return NOTIFY_DONE;
+}
+
+static void __init rproc_init_panic(void)
+{
+ rproc_panic_nb.notifier_call = rproc_panic_handler;
+ atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb);
+}
+
+static void __exit rproc_exit_panic(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb);
+}
+
static int __init remoteproc_init(void)
{
rproc_init_sysfs();
rproc_init_debugfs();
+ rproc_init_panic();
return 0;
}
@@ -2229,6 +2291,7 @@ static void __exit remoteproc_exit(void)
{
ida_destroy(&rproc_dev_index);
+ rproc_exit_panic();
rproc_exit_debugfs();
rproc_exit_sysfs();
}
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index dd93cf04e17f..d734cadb16e3 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -138,16 +138,16 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
buf[count - 1] = '\0';
if (!strncmp(buf, "enabled", count)) {
+ /* change the flag and begin the recovery process if needed */
rproc->recovery_disabled = false;
- /* if rproc has crashed, trigger recovery */
- if (rproc->state == RPROC_CRASHED)
- rproc_trigger_recovery(rproc);
+ rproc_trigger_recovery(rproc);
} else if (!strncmp(buf, "disabled", count)) {
rproc->recovery_disabled = true;
} else if (!strncmp(buf, "recover", count)) {
- /* if rproc has crashed, trigger recovery */
- if (rproc->state == RPROC_CRASHED)
- rproc_trigger_recovery(rproc);
+ /* begin the recovery process without changing the flag */
+ rproc_trigger_recovery(rproc);
+ } else {
+ return -EINVAL;
}
return count;
@@ -293,7 +293,7 @@ static int rproc_carveouts_show(struct seq_file *seq, void *p)
seq_printf(seq, "\tVirtual address: %pK\n", carveout->va);
seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
- seq_printf(seq, "\tLength: 0x%x Bytes\n\n", carveout->len);
+ seq_printf(seq, "\tLength: 0x%zx Bytes\n\n", carveout->len);
}
return 0;
@@ -349,7 +349,7 @@ void rproc_create_debug_dir(struct rproc *rproc)
debugfs_create_file("name", 0400, rproc->dbg_dir,
rproc, &rproc_name_ops);
- debugfs_create_file("recovery", 0400, rproc->dbg_dir,
+ debugfs_create_file("recovery", 0600, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
debugfs_create_file("crash", 0200, rproc->dbg_dir,
rproc, &rproc_crash_ops);
diff --git a/drivers/remoteproc/remoteproc_elf_helpers.h b/drivers/remoteproc/remoteproc_elf_helpers.h
new file mode 100644
index 000000000000..4b6be7b6bf4d
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_elf_helpers.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Remote processor elf helpers defines
+ *
+ * Copyright (C) 2020 Kalray, Inc.
+ */
+
+#ifndef REMOTEPROC_ELF_LOADER_H
+#define REMOTEPROC_ELF_LOADER_H
+
+#include <linux/elf.h>
+#include <linux/types.h>
+
+/**
+ * fw_elf_get_class - Get elf class
+ * @fw: the ELF firmware image
+ *
+ * Note that we use and elf32_hdr to access the class since the start of the
+ * struct is the same for both elf class
+ *
+ * Return: elf class of the firmware
+ */
+static inline u8 fw_elf_get_class(const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
+
+ return ehdr->e_ident[EI_CLASS];
+}
+
+static inline void elf_hdr_init_ident(struct elf32_hdr *hdr, u8 class)
+{
+ memcpy(hdr->e_ident, ELFMAG, SELFMAG);
+ hdr->e_ident[EI_CLASS] = class;
+ hdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ hdr->e_ident[EI_VERSION] = EV_CURRENT;
+ hdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+}
+
+/* Generate getter and setter for a specific elf struct/field */
+#define ELF_GEN_FIELD_GET_SET(__s, __field, __type) \
+static inline __type elf_##__s##_get_##__field(u8 class, const void *arg) \
+{ \
+ if (class == ELFCLASS32) \
+ return (__type) ((const struct elf32_##__s *) arg)->__field; \
+ else \
+ return (__type) ((const struct elf64_##__s *) arg)->__field; \
+} \
+static inline void elf_##__s##_set_##__field(u8 class, void *arg, \
+ __type value) \
+{ \
+ if (class == ELFCLASS32) \
+ ((struct elf32_##__s *) arg)->__field = (__type) value; \
+ else \
+ ((struct elf64_##__s *) arg)->__field = (__type) value; \
+}
+
+ELF_GEN_FIELD_GET_SET(hdr, e_entry, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_phnum, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_shnum, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_phoff, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_shoff, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_shstrndx, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_machine, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_type, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_version, u32)
+ELF_GEN_FIELD_GET_SET(hdr, e_ehsize, u32)
+ELF_GEN_FIELD_GET_SET(hdr, e_phentsize, u16)
+
+ELF_GEN_FIELD_GET_SET(phdr, p_paddr, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_vaddr, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_filesz, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_memsz, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_type, u32)
+ELF_GEN_FIELD_GET_SET(phdr, p_offset, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_flags, u32)
+ELF_GEN_FIELD_GET_SET(phdr, p_align, u64)
+
+ELF_GEN_FIELD_GET_SET(shdr, sh_size, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_offset, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_name, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_addr, u64)
+
+#define ELF_STRUCT_SIZE(__s) \
+static inline unsigned long elf_size_of_##__s(u8 class) \
+{ \
+ if (class == ELFCLASS32)\
+ return sizeof(struct elf32_##__s); \
+ else \
+ return sizeof(struct elf64_##__s); \
+}
+
+ELF_STRUCT_SIZE(shdr)
+ELF_STRUCT_SIZE(phdr)
+ELF_STRUCT_SIZE(hdr)
+
+#endif /* REMOTEPROC_ELF_LOADER_H */
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index 606aae166eba..16e2c496fd45 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -23,20 +23,29 @@
#include <linux/elf.h>
#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
/**
- * rproc_elf_sanity_check() - Sanity Check ELF firmware image
+ * rproc_elf_sanity_check() - Sanity Check for ELF32/ELF64 firmware image
* @rproc: the remote processor handle
* @fw: the ELF firmware image
*
- * Make sure this fw image is sane.
+ * Make sure this fw image is sane (ie a correct ELF32/ELF64 file).
*/
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
const char *name = rproc->firmware;
struct device *dev = &rproc->dev;
+ /*
+ * Elf files are beginning with the same structure. Thus, to simplify
+ * header parsing, we can use the elf32_hdr one for both elf64 and
+ * elf32.
+ */
struct elf32_hdr *ehdr;
+ u32 elf_shdr_get_size;
+ u64 phoff, shoff;
char class;
+ u16 phnum;
if (!fw) {
dev_err(dev, "failed to load %s\n", name);
@@ -50,13 +59,22 @@ int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
ehdr = (struct elf32_hdr *)fw->data;
- /* We only support ELF32 at this point */
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
class = ehdr->e_ident[EI_CLASS];
- if (class != ELFCLASS32) {
+ if (class != ELFCLASS32 && class != ELFCLASS64) {
dev_err(dev, "Unsupported class: %d\n", class);
return -EINVAL;
}
+ if (class == ELFCLASS64 && fw->size < sizeof(struct elf64_hdr)) {
+ dev_err(dev, "elf64 header is too small\n");
+ return -EINVAL;
+ }
+
/* We assume the firmware has the same endianness as the host */
# ifdef __LITTLE_ENDIAN
if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
@@ -67,31 +85,55 @@ int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
return -EINVAL;
}
- if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
- dev_err(dev, "Image is too small\n");
- return -EINVAL;
- }
+ phoff = elf_hdr_get_e_phoff(class, fw->data);
+ shoff = elf_hdr_get_e_shoff(class, fw->data);
+ phnum = elf_hdr_get_e_phnum(class, fw->data);
+ elf_shdr_get_size = elf_size_of_shdr(class);
- if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
- dev_err(dev, "Image is corrupted (bad magic)\n");
+ if (fw->size < shoff + elf_shdr_get_size) {
+ dev_err(dev, "Image is too small\n");
return -EINVAL;
}
- if (ehdr->e_phnum == 0) {
+ if (phnum == 0) {
dev_err(dev, "No loadable segments\n");
return -EINVAL;
}
- if (ehdr->e_phoff > fw->size) {
+ if (phoff > fw->size) {
dev_err(dev, "Firmware size is too small\n");
return -EINVAL;
}
+ dev_dbg(dev, "Firmware is an elf%d file\n",
+ class == ELFCLASS32 ? 32 : 64);
+
return 0;
}
EXPORT_SYMBOL(rproc_elf_sanity_check);
/**
+ * rproc_elf_sanity_check() - Sanity Check ELF32 firmware image
+ * @rproc: the remote processor handle
+ * @fw: the ELF32 firmware image
+ *
+ * Make sure this fw image is sane.
+ */
+int rproc_elf32_sanity_check(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret = rproc_elf_sanity_check(rproc, fw);
+
+ if (ret)
+ return ret;
+
+ if (fw_elf_get_class(fw) == ELFCLASS32)
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(rproc_elf32_sanity_check);
+
+/**
* rproc_elf_get_boot_addr() - Get rproc's boot address.
* @rproc: the remote processor handle
* @fw: the ELF firmware image
@@ -102,11 +144,9 @@ EXPORT_SYMBOL(rproc_elf_sanity_check);
* Note that the boot address is not a configurable property of all remote
* processors. Some will always boot at a specific hard-coded address.
*/
-u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
- struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
-
- return ehdr->e_entry;
+ return elf_hdr_get_e_entry(fw_elf_get_class(fw), fw->data);
}
EXPORT_SYMBOL(rproc_elf_get_boot_addr);
@@ -137,53 +177,65 @@ EXPORT_SYMBOL(rproc_elf_get_boot_addr);
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
- struct elf32_hdr *ehdr;
- struct elf32_phdr *phdr;
+ const void *ehdr, *phdr;
int i, ret = 0;
+ u16 phnum;
const u8 *elf_data = fw->data;
+ u8 class = fw_elf_get_class(fw);
+ u32 elf_phdr_get_size = elf_size_of_phdr(class);
- ehdr = (struct elf32_hdr *)elf_data;
- phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+ ehdr = elf_data;
+ phnum = elf_hdr_get_e_phnum(class, ehdr);
+ phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
/* go through the available ELF segments */
- for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
- u32 da = phdr->p_paddr;
- u32 memsz = phdr->p_memsz;
- u32 filesz = phdr->p_filesz;
- u32 offset = phdr->p_offset;
+ for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
+ u64 da = elf_phdr_get_p_paddr(class, phdr);
+ u64 memsz = elf_phdr_get_p_memsz(class, phdr);
+ u64 filesz = elf_phdr_get_p_filesz(class, phdr);
+ u64 offset = elf_phdr_get_p_offset(class, phdr);
+ u32 type = elf_phdr_get_p_type(class, phdr);
void *ptr;
- if (phdr->p_type != PT_LOAD)
+ if (type != PT_LOAD)
continue;
- dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
- phdr->p_type, da, memsz, filesz);
+ dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
+ type, da, memsz, filesz);
if (filesz > memsz) {
- dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
- dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
offset + filesz, fw->size);
ret = -EINVAL;
break;
}
+ if (!rproc_u64_fit_in_size_t(memsz)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ memsz);
+ ret = -EOVERFLOW;
+ break;
+ }
+
/* grab the kernel address for this device address */
ptr = rproc_da_to_va(rproc, da, memsz);
if (!ptr) {
- dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
+ memsz);
ret = -EINVAL;
break;
}
/* put the segment where the remote processor expects it */
- if (phdr->p_filesz)
- memcpy(ptr, elf_data + phdr->p_offset, filesz);
+ if (filesz)
+ memcpy(ptr, elf_data + offset, filesz);
/*
* Zero out remaining memory for this segment.
@@ -196,28 +248,42 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
memset(ptr + filesz, 0, memsz - filesz);
}
+ if (ret == 0)
+ rproc->elf_class = class;
+
return ret;
}
EXPORT_SYMBOL(rproc_elf_load_segments);
-static struct elf32_shdr *
-find_table(struct device *dev, struct elf32_hdr *ehdr, size_t fw_size)
+static const void *
+find_table(struct device *dev, const struct firmware *fw)
{
- struct elf32_shdr *shdr;
+ const void *shdr, *name_table_shdr;
int i;
const char *name_table;
struct resource_table *table = NULL;
- const u8 *elf_data = (void *)ehdr;
+ const u8 *elf_data = (void *)fw->data;
+ u8 class = fw_elf_get_class(fw);
+ size_t fw_size = fw->size;
+ const void *ehdr = elf_data;
+ u16 shnum = elf_hdr_get_e_shnum(class, ehdr);
+ u32 elf_shdr_get_size = elf_size_of_shdr(class);
+ u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
/* look for the resource table and handle it */
- shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
- name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
-
- for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
- u32 size = shdr->sh_size;
- u32 offset = shdr->sh_offset;
-
- if (strcmp(name_table + shdr->sh_name, ".resource_table"))
+ /* First, get the section header according to the elf class */
+ shdr = elf_data + elf_hdr_get_e_shoff(class, ehdr);
+ /* Compute name table section header entry in shdr array */
+ name_table_shdr = shdr + (shstrndx * elf_shdr_get_size);
+ /* Finally, compute the name table section address in elf */
+ name_table = elf_data + elf_shdr_get_sh_offset(class, name_table_shdr);
+
+ for (i = 0; i < shnum; i++, shdr += elf_shdr_get_size) {
+ u64 size = elf_shdr_get_sh_size(class, shdr);
+ u64 offset = elf_shdr_get_sh_offset(class, shdr);
+ u32 name = elf_shdr_get_sh_name(class, shdr);
+
+ if (strcmp(name_table + name, ".resource_table"))
continue;
table = (struct resource_table *)(elf_data + offset);
@@ -270,21 +336,21 @@ find_table(struct device *dev, struct elf32_hdr *ehdr, size_t fw_size)
*/
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw)
{
- struct elf32_hdr *ehdr;
- struct elf32_shdr *shdr;
+ const void *shdr;
struct device *dev = &rproc->dev;
struct resource_table *table = NULL;
const u8 *elf_data = fw->data;
size_t tablesz;
+ u8 class = fw_elf_get_class(fw);
+ u64 sh_offset;
- ehdr = (struct elf32_hdr *)elf_data;
-
- shdr = find_table(dev, ehdr, fw->size);
+ shdr = find_table(dev, fw);
if (!shdr)
return -EINVAL;
- table = (struct resource_table *)(elf_data + shdr->sh_offset);
- tablesz = shdr->sh_size;
+ sh_offset = elf_shdr_get_sh_offset(class, shdr);
+ table = (struct resource_table *)(elf_data + sh_offset);
+ tablesz = elf_shdr_get_sh_size(class, shdr);
/*
* Create a copy of the resource table. When a virtio device starts
@@ -317,13 +383,24 @@ EXPORT_SYMBOL(rproc_elf_load_rsc_table);
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw)
{
- struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
- struct elf32_shdr *shdr;
+ const void *shdr;
+ u64 sh_addr, sh_size;
+ u8 class = fw_elf_get_class(fw);
+ struct device *dev = &rproc->dev;
- shdr = find_table(&rproc->dev, ehdr, fw->size);
+ shdr = find_table(&rproc->dev, fw);
if (!shdr)
return NULL;
- return rproc_da_to_va(rproc, shdr->sh_addr, shdr->sh_size);
+ sh_addr = elf_shdr_get_sh_addr(class, shdr);
+ sh_size = elf_shdr_get_sh_size(class, shdr);
+
+ if (!rproc_u64_fit_in_size_t(sh_size)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ sh_size);
+ return NULL;
+ }
+
+ return rproc_da_to_va(rproc, sh_addr, sh_size);
}
EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 493ef9262411..b389dc79da81 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -50,12 +50,13 @@ void rproc_exit_sysfs(void);
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
-void *rproc_da_to_va(struct rproc *rproc, u64 da, int len);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
+int rproc_elf32_sanity_check(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw);
-u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
+u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw);
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
@@ -73,7 +74,7 @@ int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
}
static inline
-u32 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+u64 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
if (rproc->ops->get_boot_addr)
return rproc->ops->get_boot_addr(rproc, fw);
@@ -119,4 +120,13 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
return NULL;
}
+static inline
+bool rproc_u64_fit_in_size_t(u64 val)
+{
+ if (sizeof(size_t) == sizeof(u64))
+ return true;
+
+ return (val <= (size_t) -1);
+}
+
#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 8c07cb2ca8ba..e61d738d9b47 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -320,6 +320,7 @@ static void rproc_virtio_dev_release(struct device *dev)
/**
* rproc_add_virtio_dev() - register an rproc-induced virtio device
* @rvdev: the remote vdev
+ * @id: the device type identification (used to match it with a driver).
*
* This function registers a virtio device. This vdev's partent is
* the rproc device.
@@ -334,6 +335,13 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
struct rproc_mem_entry *mem;
int ret;
+ if (rproc->ops->kick == NULL) {
+ ret = -EINVAL;
+ dev_err(dev, ".kick method not defined for %s",
+ rproc->name);
+ goto out;
+ }
+
/* Try to find dedicated vdev buffer carveout */
mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
if (mem) {
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index ee13d23b43a9..a6cbfa452764 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -190,7 +190,7 @@ static int st_rproc_start(struct rproc *rproc)
}
}
- dev_info(&rproc->dev, "Started from 0x%x\n", rproc->bootaddr);
+ dev_info(&rproc->dev, "Started from 0x%llx\n", rproc->bootaddr);
return 0;
@@ -233,7 +233,7 @@ static const struct rproc_ops st_rproc_ops = {
.parse_fw = st_rproc_parse_fw,
.load = rproc_elf_load_segments,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
- .sanity_check = rproc_elf_sanity_check,
+ .sanity_check = rproc_elf32_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 04492fead3c8..3cca8b65a8db 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -174,7 +174,7 @@ static int slim_rproc_stop(struct rproc *rproc)
return 0;
}
-static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
void *va = NULL;
@@ -191,7 +191,7 @@ static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
}
}
- dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%pK\n",
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n",
da, len, va);
return va;
@@ -203,7 +203,7 @@ static const struct rproc_ops slim_rproc_ops = {
.da_to_va = slim_rproc_da_to_va,
.get_boot_addr = rproc_elf_get_boot_addr,
.load = rproc_elf_load_segments,
- .sanity_check = rproc_elf_sanity_check,
+ .sanity_check = rproc_elf32_sanity_check,
};
/**
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index a18f88044111..6a66dbf2df40 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -505,7 +505,7 @@ static struct rproc_ops st_rproc_ops = {
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
- .sanity_check = rproc_elf_sanity_check,
+ .sanity_check = rproc_elf32_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
@@ -602,7 +602,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds);
if (err)
- dev_warn(dev, "failed to get pdds\n");
+ dev_info(dev, "failed to get pdds\n");
rproc->auto_boot = of_property_read_bool(np, "st,auto-boot");
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 3984e585c847..b9349d684258 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -80,14 +80,14 @@ static int wkup_m3_rproc_stop(struct rproc *rproc)
return 0;
}
-static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
{
struct wkup_m3_rproc *wkupm3 = rproc->priv;
void *va = NULL;
int i;
u32 offset;
- if (len <= 0)
+ if (len == 0)
return NULL;
for (i = 0; i < WKUPM3_MEM_MAX; i++) {
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 8e503881d9d6..f942a3302cdc 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -41,9 +41,6 @@ config RTC_HCTOSYS_DEVICE
device should record time in UTC, since the kernel won't do
timezone correction.
- The driver for this RTC device must be loaded before late_initcall
- functions run, so it must usually be statically linked.
-
This clock should be battery-backed, so that it reads the correct
time when the system boots from a power-off state. Otherwise, your
system will need an external clock source (like an NTP server).
@@ -241,6 +238,7 @@ config RTC_DRV_AS3722
config RTC_DRV_DS1307
tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057"
select REGMAP_I2C
+ select WATCHDOG_CORE if WATCHDOG
help
If you say yes here you get support for various compatible RTC
chips (often with battery backup) connected with I2C. This driver
@@ -1335,7 +1333,7 @@ config RTC_DRV_IMXDI
config RTC_DRV_FSL_FTM_ALARM
tristate "Freescale FlexTimer alarm timer"
- depends on ARCH_LAYERSCAPE || SOC_LS1021A
+ depends on ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
help
For the FlexTimer in LS1012A, LS1021A, LS1028A, LS1043A, LS1046A,
LS1088A, LS208xA, we can use FTM as the wakeup source.
@@ -1762,6 +1760,7 @@ config RTC_DRV_MXC_V2
config RTC_DRV_SNVS
tristate "Freescale SNVS RTC support"
select REGMAP_MMIO
+ depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
depends on OF
help
@@ -1807,6 +1806,16 @@ config RTC_DRV_MOXART
This driver can also be built as a module. If so, the module
will be called rtc-moxart
+config RTC_DRV_MT2712
+ tristate "MediaTek MT2712 SoC based RTC"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for the real time clock built in the MediaTek
+ SoCs for MT2712.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-mt2712.
+
config RTC_DRV_MT6397
tristate "MediaTek PMIC based RTC"
depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 24c7dfa1bd7d..3b66ee99063f 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -6,7 +6,6 @@
ccflags-$(CONFIG_RTC_DEBUG) := -DDEBUG
obj-$(CONFIG_RTC_LIB) += lib.o
-obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
obj-$(CONFIG_RTC_SYSTOHC) += systohc.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o
@@ -106,6 +105,7 @@ obj-$(CONFIG_RTC_DRV_MESON) += rtc-meson.o
obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o
obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
+obj-$(CONFIG_RTC_DRV_MT2712) += rtc-mt2712.o
obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o
obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 9458e6d6686a..7c88d190c51f 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -34,6 +34,50 @@ static void rtc_device_release(struct device *dev)
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
/* Result of the last RTC to system clock attempt. */
int rtc_hctosys_ret = -ENODEV;
+
+/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
+ * whether it stores the most close value or the value with partial
+ * seconds truncated. However, it is important that we use it to store
+ * the truncated value. This is because otherwise it is necessary,
+ * in an rtc sync function, to read both xtime.tv_sec and
+ * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
+ * of >32bits is not possible. So storing the most close value would
+ * slow down the sync API. So here we have the truncated value and
+ * the best guess is to add 0.5s.
+ */
+
+static void rtc_hctosys(struct rtc_device *rtc)
+{
+ int err;
+ struct rtc_time tm;
+ struct timespec64 tv64 = {
+ .tv_nsec = NSEC_PER_SEC >> 1,
+ };
+
+ err = rtc_read_time(rtc, &tm);
+ if (err) {
+ dev_err(rtc->dev.parent,
+ "hctosys: unable to read the hardware clock\n");
+ goto err_read;
+ }
+
+ tv64.tv_sec = rtc_tm_to_time64(&tm);
+
+#if BITS_PER_LONG == 32
+ if (tv64.tv_sec > INT_MAX) {
+ err = -ERANGE;
+ goto err_read;
+ }
+#endif
+
+ err = do_settimeofday64(&tv64);
+
+ dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n",
+ &tm, (long long)tv64.tv_sec);
+
+err_read:
+ rtc_hctosys_ret = err;
+}
#endif
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
@@ -375,6 +419,11 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
dev_info(rtc->dev.parent, "registered as %s\n",
dev_name(&rtc->dev));
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+ if (!strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE))
+ rtc_hctosys(rtc);
+#endif
+
return 0;
}
EXPORT_SYMBOL_GPL(__rtc_register_device);
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
deleted file mode 100644
index a74d0d890600..000000000000
--- a/drivers/rtc/hctosys.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * RTC subsystem, initialize system time on startup
- *
- * Copyright (C) 2005 Tower Technologies
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/rtc.h>
-
-/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
- * whether it stores the most close value or the value with partial
- * seconds truncated. However, it is important that we use it to store
- * the truncated value. This is because otherwise it is necessary,
- * in an rtc sync function, to read both xtime.tv_sec and
- * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
- * of >32bits is not possible. So storing the most close value would
- * slow down the sync API. So here we have the truncated value and
- * the best guess is to add 0.5s.
- */
-
-static int __init rtc_hctosys(void)
-{
- int err = -ENODEV;
- struct rtc_time tm;
- struct timespec64 tv64 = {
- .tv_nsec = NSEC_PER_SEC >> 1,
- };
- struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
-
- if (!rtc) {
- pr_info("unable to open rtc device (%s)\n",
- CONFIG_RTC_HCTOSYS_DEVICE);
- goto err_open;
- }
-
- err = rtc_read_time(rtc, &tm);
- if (err) {
- dev_err(rtc->dev.parent,
- "hctosys: unable to read the hardware clock\n");
- goto err_read;
- }
-
- tv64.tv_sec = rtc_tm_to_time64(&tm);
-
-#if BITS_PER_LONG == 32
- if (tv64.tv_sec > INT_MAX) {
- err = -ERANGE;
- goto err_read;
- }
-#endif
-
- err = do_settimeofday64(&tv64);
-
- dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n",
- &tm, (long long)tv64.tv_sec);
-
-err_read:
- rtc_class_close(rtc);
-
-err_open:
- rtc_hctosys_ret = err;
-
- return err;
-}
-
-late_initcall(rtc_hctosys);
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 4743b16a8d84..cc9b14ef90f1 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -28,7 +28,6 @@ struct pm860x_rtc_info {
int irq;
int vrtc;
- int (*sync)(unsigned int ticks);
};
#define REG_VRTC_MEAS1 0x7D
@@ -76,33 +75,6 @@ static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-/*
- * Calculate the next alarm time given the requested alarm time mask
- * and the current time.
- */
-static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
- struct rtc_time *alrm)
-{
- unsigned long next_time;
- unsigned long now_time;
-
- next->tm_year = now->tm_year;
- next->tm_mon = now->tm_mon;
- next->tm_mday = now->tm_mday;
- next->tm_hour = alrm->tm_hour;
- next->tm_min = alrm->tm_min;
- next->tm_sec = alrm->tm_sec;
-
- rtc_tm_to_time(now, &now_time);
- rtc_tm_to_time(next, &next_time);
-
- if (next_time < now_time) {
- /* Advance one day */
- next_time += 60 * 60 * 24;
- rtc_time_to_tm(next_time, next);
- }
-}
-
static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
@@ -123,7 +95,7 @@ static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
- rtc_time_to_tm(ticks, tm);
+ rtc_time64_to_tm(ticks, tm);
return 0;
}
@@ -140,7 +112,7 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
1900 + tm->tm_year);
return -EINVAL;
}
- rtc_tm_to_time(tm, &ticks);
+ ticks = rtc_tm_to_time64(tm);
/* load 32-bit read-only counter */
pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
@@ -155,8 +127,6 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF);
pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF);
- if (info->sync)
- info->sync(ticks);
return 0;
}
@@ -180,7 +150,7 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
- rtc_time_to_tm(ticks, &alrm->time);
+ rtc_time64_to_tm(ticks, &alrm->time);
ret = pm860x_reg_read(info->i2c, PM8607_RTC1);
alrm->enabled = (ret & ALARM_EN) ? 1 : 0;
alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0;
@@ -190,7 +160,6 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pm860x_rtc_info *info = dev_get_drvdata(dev);
- struct rtc_time now_tm, alarm_tm;
unsigned long ticks, base, data;
unsigned char buf[8];
int mask;
@@ -203,18 +172,7 @@ static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
(buf[5] << 8) | buf[7];
- /* load 32-bit read-only counter */
- pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
- data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
- (buf[1] << 8) | buf[0];
- ticks = base + data;
- dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
- base, data, ticks);
-
- rtc_time_to_tm(ticks, &now_tm);
- rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
- /* get new ticks for alarm in 24 hours */
- rtc_tm_to_time(&alarm_tm, &ticks);
+ ticks = rtc_tm_to_time64(&alrm->time);
data = ticks - base;
buf[0] = data & 0xff;
@@ -309,20 +267,15 @@ static int pm860x_rtc_dt_init(struct platform_device *pdev,
return 0;
}
#else
-#define pm860x_rtc_dt_init(x, y) (-1)
+#define pm860x_rtc_dt_init(x, y) do { } while (0)
#endif
static int pm860x_rtc_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm860x_rtc_pdata *pdata = NULL;
struct pm860x_rtc_info *info;
- struct rtc_time tm;
- unsigned long ticks = 0;
int ret;
- pdata = dev_get_platdata(&pdev->dev);
-
info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_rtc_info),
GFP_KERNEL);
if (!info)
@@ -336,6 +289,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, info);
+ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(info->rtc_dev))
+ return PTR_ERR(info->rtc_dev);
+
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
rtc_update_handler, IRQF_ONESHOT, "rtc",
info);
@@ -351,39 +308,14 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA);
pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA);
- ret = pm860x_rtc_read_time(&pdev->dev, &tm);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to read initial time.\n");
- return ret;
- }
- if ((tm.tm_year < 70) || (tm.tm_year > 138)) {
- tm.tm_year = 70;
- tm.tm_mon = 0;
- tm.tm_mday = 1;
- tm.tm_hour = 0;
- tm.tm_min = 0;
- tm.tm_sec = 0;
- ret = pm860x_rtc_set_time(&pdev->dev, &tm);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to set initial time.\n");
- return ret;
- }
- }
- rtc_tm_to_time(&tm, &ticks);
- if (pm860x_rtc_dt_init(pdev, info)) {
- if (pdata && pdata->sync) {
- pdata->sync(ticks);
- info->sync = pdata->sync;
- }
- }
+ pm860x_rtc_dt_init(pdev, info);
+
+ info->rtc_dev->ops = &pm860x_rtc_ops;
+ info->rtc_dev->range_max = U32_MAX;
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc",
- &pm860x_rtc_ops, THIS_MODULE);
- ret = PTR_ERR(info->rtc_dev);
- if (IS_ERR(info->rtc_dev)) {
- dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ ret = rtc_register_device(info->rtc_dev);
+ if (ret)
return ret;
- }
/*
* enable internal XO instead of internal 3.25MHz clock since it can
@@ -393,12 +325,6 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
#ifdef VRTC_CALIBRATION
/* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */
- if (pm860x_rtc_dt_init(pdev, info)) {
- if (pdata && pdata->vrtc)
- info->vrtc = pdata->vrtc & 0x3;
- else
- info->vrtc = 1;
- }
pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC);
/* calibrate VRTC */
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 8492ffed4ca6..3d60f3283f11 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -100,7 +100,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
secs = secs / COUNTS_PER_SEC;
secs = secs + (mins * 60);
- rtc_time_to_tm(secs, tm);
+ rtc_time64_to_tm(secs, tm);
return 0;
}
@@ -110,7 +110,7 @@ static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
unsigned long no_secs, no_mins, secs = 0;
- rtc_tm_to_time(tm, &secs);
+ secs = rtc_tm_to_time64(tm);
no_mins = secs / 60;
@@ -168,7 +168,7 @@ static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
secs = mins * 60;
- rtc_time_to_tm(secs, &alarm->time);
+ rtc_time64_to_tm(secs, &alarm->time);
return 0;
}
@@ -188,7 +188,7 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct rtc_time curtm;
/* Get the number of seconds since 1970 */
- rtc_tm_to_time(&alarm->time, &secs);
+ secs = rtc_tm_to_time64(&alarm->time);
/*
* Check whether alarm is set less than 1min.
@@ -196,7 +196,7 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
* return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON
*/
ab8500_rtc_read_time(dev, &curtm); /* Read current time */
- rtc_tm_to_time(&curtm, &cursec);
+ cursec = rtc_tm_to_time64(&curtm);
if ((secs - cursec) < 59) {
dev_dbg(dev, "Alarm less than 1 minute not supported\r\n");
return -EINVAL;
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index 7c5530c71285..791bebcb6f47 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -34,7 +34,7 @@ static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm)
t = alchemy_rdsys(AU1000_SYS_TOYREAD);
- rtc_time_to_tm(t, tm);
+ rtc_time64_to_tm(t, tm);
return 0;
}
@@ -43,7 +43,7 @@ static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned long t;
- rtc_tm_to_time(tm, &t);
+ t = rtc_tm_to_time64(tm);
alchemy_wrsys(t, AU1000_SYS_TOYWRITE);
@@ -65,17 +65,13 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtcdev;
unsigned long t;
- int ret;
t = alchemy_rdsys(AU1000_SYS_CNTRCTRL);
if (!(t & CNTR_OK)) {
dev_err(&pdev->dev, "counters not working; aborting.\n");
- ret = -ENODEV;
- goto out_err;
+ return -ENODEV;
}
- ret = -ETIMEDOUT;
-
/* set counter0 tickrate to 1Hz if necessary */
if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767) {
/* wait until hardware gives access to TRIM register */
@@ -88,7 +84,7 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
* counters are unusable.
*/
dev_err(&pdev->dev, "timeout waiting for access\n");
- goto out_err;
+ return -ETIMEDOUT;
}
/* set 1Hz TOY tick rate */
@@ -99,19 +95,16 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S)
msleep(1);
- rtcdev = devm_rtc_device_register(&pdev->dev, "rtc-au1xxx",
- &au1xtoy_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtcdev)) {
- ret = PTR_ERR(rtcdev);
- goto out_err;
- }
+ rtcdev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdev))
+ return PTR_ERR(rtcdev);
- platform_set_drvdata(pdev, rtcdev);
+ rtcdev->ops = &au1xtoy_rtc_ops;
+ rtcdev->range_max = U32_MAX;
- return 0;
+ platform_set_drvdata(pdev, rtcdev);
-out_err:
- return ret;
+ return rtc_register_device(rtcdev);
}
static struct platform_driver au1xrtc_driver = {
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index bbbb1f07c91f..4492b770422c 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -542,10 +542,8 @@ static int bd70528_probe(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, irq_name);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq\n");
+ if (irq < 0)
return irq;
- }
platform_set_drvdata(pdev, bd_rtc);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 82bfe009a50f..bcc96ab7793f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -649,10 +649,11 @@ static struct cmos_rtc cmos_rtc;
static irqreturn_t cmos_interrupt(int irq, void *p)
{
+ unsigned long flags;
u8 irqstat;
u8 rtc_control;
- spin_lock(&rtc_lock);
+ spin_lock_irqsave(&rtc_lock, flags);
/* When the HPET interrupt handler calls us, the interrupt
* status is passed as arg1 instead of the irq number. But
@@ -686,7 +687,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
}
- spin_unlock(&rtc_lock);
+ spin_unlock_irqrestore(&rtc_lock, flags);
if (is_intr(irqstat)) {
rtc_update_irq(p, 1, irqstat);
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index 6a3b70fd7e1f..a603f1f21125 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -56,14 +56,14 @@ static void cpcap2rtc_time(struct rtc_time *rtc, struct cpcap_time *cpcap)
tod = (cpcap->tod1 & TOD1_MASK) | ((cpcap->tod2 & TOD2_MASK) << 8);
time = tod + ((cpcap->day & DAY_MASK) * SECS_PER_DAY);
- rtc_time_to_tm(time, rtc);
+ rtc_time64_to_tm(time, rtc);
}
static void rtc2cpcap_time(struct cpcap_time *cpcap, struct rtc_time *rtc)
{
unsigned long time;
- rtc_tm_to_time(rtc, &time);
+ time = rtc_tm_to_time64(rtc);
cpcap->day = time / SECS_PER_DAY;
time %= SECS_PER_DAY;
@@ -256,12 +256,13 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
return -ENODEV;
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = devm_rtc_device_register(dev, "cpcap_rtc",
- &cpcap_rtc_ops, THIS_MODULE);
-
+ rtc->rtc_dev = devm_rtc_allocate_device(dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
+ rtc->rtc_dev->ops = &cpcap_rtc_ops;
+ rtc->rtc_dev->range_max = (1 << 14) * SECS_PER_DAY - 1;
+
err = cpcap_get_vendor(dev, rtc->regmap, &rtc->vendor);
if (err)
return err;
@@ -298,7 +299,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
/* ignore error and continue without wakeup support */
}
- return 0;
+ return rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id cpcap_rtc_of_match[] = {
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 204eb7cf1aa4..58de10da37b1 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -103,13 +103,11 @@ static int da9052_set_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm)
int ret;
uint8_t v[3];
- ret = rtc_tm_to_time(rtc_tm, &alm_time);
- if (ret != 0)
- return ret;
+ alm_time = rtc_tm_to_time64(rtc_tm);
if (rtc_tm->tm_sec > 0) {
alm_time += 60 - rtc_tm->tm_sec;
- rtc_time_to_tm(alm_time, rtc_tm);
+ rtc_time64_to_tm(alm_time, rtc_tm);
}
BUG_ON(rtc_tm->tm_sec); /* it will cause repeated irqs if not zero */
@@ -298,12 +296,18 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc_err(rtc, "Failed to disable TICKS: %d\n", ret);
device_init_wakeup(&pdev->dev, true);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &da9052_rtc_ops, THIS_MODULE);
-
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc))
return PTR_ERR(rtc->rtc);
+ rtc->rtc->ops = &da9052_rtc_ops;
+ rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->rtc->range_max = RTC_TIMESTAMP_END_2063;
+
+ ret = rtc_register_device(rtc->rtc);
+ if (ret)
+ return ret;
+
ret = da9052_request_irq(rtc->da9052, DA9052_IRQ_ALARM, "ALM",
da9052_rtc_irq, rtc);
if (ret != 0) {
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 390b7351e0fe..73f87a17cdf3 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -227,7 +227,7 @@ davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
return ret;
}
-static int convertfromdays(u16 days, struct rtc_time *tm)
+static void convertfromdays(u16 days, struct rtc_time *tm)
{
int tmp_days, year, mon;
@@ -250,24 +250,17 @@ static int convertfromdays(u16 days, struct rtc_time *tm)
break;
}
}
- return 0;
}
-static int convert2days(u16 *days, struct rtc_time *tm)
+static void convert2days(u16 *days, struct rtc_time *tm)
{
int i;
*days = 0;
- /* epoch == 1900 */
- if (tm->tm_year < 100 || tm->tm_year > 199)
- return -EINVAL;
-
for (i = 2000; i < 1900 + tm->tm_year; i++)
*days += rtc_year_days(1, 12, i);
*days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year);
-
- return 0;
}
static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -300,8 +293,7 @@ static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
days <<= 8;
days |= day0;
- if (convertfromdays(days, tm) < 0)
- return -EINVAL;
+ convertfromdays(days, tm);
return 0;
}
@@ -313,8 +305,7 @@ static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm)
u8 rtc_cctrl;
unsigned long flags;
- if (convert2days(&days, tm) < 0)
- return -EINVAL;
+ convert2days(&days, tm);
spin_lock_irqsave(&davinci_rtc_lock, flags);
@@ -396,8 +387,7 @@ static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
days <<= 8;
days |= day0;
- if (convertfromdays(days, &alm->time) < 0)
- return -EINVAL;
+ convertfromdays(days, &alm->time);
alm->pending = !!(rtcss_read(davinci_rtc,
PRTCSS_RTC_CCTRL) &
@@ -413,29 +403,7 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
unsigned long flags;
u16 days;
- if (alm->time.tm_mday <= 0 && alm->time.tm_mon < 0
- && alm->time.tm_year < 0) {
- struct rtc_time tm;
- unsigned long now, then;
-
- davinci_rtc_read_time(dev, &tm);
- rtc_tm_to_time(&tm, &now);
-
- alm->time.tm_mday = tm.tm_mday;
- alm->time.tm_mon = tm.tm_mon;
- alm->time.tm_year = tm.tm_year;
- rtc_tm_to_time(&alm->time, &then);
-
- if (then < now) {
- rtc_time_to_tm(now + 24 * 60 * 60, &tm);
- alm->time.tm_mday = tm.tm_mday;
- alm->time.tm_mon = tm.tm_mon;
- alm->time.tm_year = tm.tm_year;
- }
- }
-
- if (convert2days(&days, &alm->time) < 0)
- return -EINVAL;
+ convert2days(&days, &alm->time);
spin_lock_irqsave(&davinci_rtc_lock, flags);
@@ -485,13 +453,13 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, davinci_rtc);
- davinci_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &davinci_rtc_ops, THIS_MODULE);
- if (IS_ERR(davinci_rtc->rtc)) {
- dev_err(dev, "unable to register RTC device, err %d\n",
- ret);
+ davinci_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(davinci_rtc->rtc))
return PTR_ERR(davinci_rtc->rtc);
- }
+
+ davinci_rtc->rtc->ops = &davinci_rtc_ops;
+ davinci_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ davinci_rtc->rtc->range_max = RTC_TIMESTAMP_BEGIN_2000 + (1 << 16) * 86400ULL - 1;
rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
@@ -516,7 +484,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
- return 0;
+ return rtc_register_device(davinci_rtc->rtc);
}
static int __exit davinci_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 4420fbf2f8fe..a3d790889eea 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -325,17 +325,13 @@ static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
u8 buf[1 + DS1305_ALM_LEN];
/* convert desired alarm to time_t */
- status = rtc_tm_to_time(&alm->time, &later);
- if (status < 0)
- return status;
+ later = rtc_tm_to_time64(&alm->time);
/* Read current time as time_t */
status = ds1305_get_time(dev, &tm);
if (status < 0)
return status;
- status = rtc_tm_to_time(&tm, &now);
- if (status < 0)
- return status;
+ now = rtc_tm_to_time64(&tm);
/* make sure alarm fires within the next 24 hours */
if (later <= now)
@@ -694,6 +690,8 @@ static int ds1305_probe(struct spi_device *spi)
return PTR_ERR(ds1305->rtc);
ds1305->rtc->ops = &ds1305_ops;
+ ds1305->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ ds1305->rtc->range_max = RTC_TIMESTAMP_END_2099;
ds1305_nvmem_cfg.priv = ds1305;
ds1305->rtc->nvram_old_abi = true;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 1f7e8aefc1eb..49702942bb08 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -22,6 +22,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
+#include <linux/watchdog.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -144,6 +145,15 @@ enum ds_type {
# define M41TXX_BIT_CALIB_SIGN BIT(5)
# define M41TXX_M_CALIBRATION GENMASK(4, 0)
+#define DS1388_REG_WDOG_HUN_SECS 0x08
+#define DS1388_REG_WDOG_SECS 0x09
+#define DS1388_REG_FLAG 0x0b
+# define DS1388_BIT_WF BIT(6)
+# define DS1388_BIT_OSF BIT(7)
+#define DS1388_REG_CONTROL 0x0c
+# define DS1388_BIT_RST BIT(0)
+# define DS1388_BIT_WDE BIT(1)
+
/* negative offset step is -2.034ppm */
#define M41TXX_NEG_OFFSET_STEP_PPB 2034
/* positive offset step is +4.068ppm */
@@ -252,6 +262,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
if (tmp & DS1340_BIT_OSF)
return -EINVAL;
break;
+ case ds_1388:
+ ret = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &tmp);
+ if (ret)
+ return ret;
+ if (tmp & DS1388_BIT_OSF)
+ return -EINVAL;
+ break;
case mcp794xx:
if (!(tmp & MCP794XX_BIT_ST))
return -EINVAL;
@@ -845,6 +862,72 @@ static int m41txx_rtc_set_offset(struct device *dev, long offset)
ctrl_reg);
}
+#ifdef CONFIG_WATCHDOG_CORE
+static int ds1388_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
+ u8 regs[2];
+ int ret;
+
+ ret = regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
+ DS1388_BIT_WF, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
+ DS1388_BIT_WDE | DS1388_BIT_RST, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * watchdog timeouts are measured in seconds. So ignore hundredths of
+ * seconds field.
+ */
+ regs[0] = 0;
+ regs[1] = bin2bcd(wdt_dev->timeout);
+
+ ret = regmap_bulk_write(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
+ sizeof(regs));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
+ DS1388_BIT_WDE | DS1388_BIT_RST,
+ DS1388_BIT_WDE | DS1388_BIT_RST);
+}
+
+static int ds1388_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
+
+ return regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL,
+ DS1388_BIT_WDE | DS1388_BIT_RST, 0);
+}
+
+static int ds1388_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
+ u8 regs[2];
+
+ return regmap_bulk_read(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
+ sizeof(regs));
+}
+
+static int ds1388_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int val)
+{
+ struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev);
+ u8 regs[2];
+
+ wdt_dev->timeout = val;
+ regs[0] = 0;
+ regs[1] = bin2bcd(wdt_dev->timeout);
+
+ return regmap_bulk_write(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs,
+ sizeof(regs));
+}
+#endif
+
static const struct rtc_class_ops rx8130_rtc_ops = {
.read_time = ds1307_get_time,
.set_time = ds1307_set_time,
@@ -1567,6 +1650,48 @@ static void ds1307_clks_register(struct ds1307 *ds1307)
#endif /* CONFIG_COMMON_CLK */
+#ifdef CONFIG_WATCHDOG_CORE
+static const struct watchdog_info ds1388_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "DS1388 watchdog",
+};
+
+static const struct watchdog_ops ds1388_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = ds1388_wdt_start,
+ .stop = ds1388_wdt_stop,
+ .ping = ds1388_wdt_ping,
+ .set_timeout = ds1388_wdt_set_timeout,
+
+};
+
+static void ds1307_wdt_register(struct ds1307 *ds1307)
+{
+ struct watchdog_device *wdt;
+
+ if (ds1307->type != ds_1388)
+ return;
+
+ wdt = devm_kzalloc(ds1307->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return;
+
+ wdt->info = &ds1388_wdt_info;
+ wdt->ops = &ds1388_wdt_ops;
+ wdt->timeout = 99;
+ wdt->max_timeout = 99;
+ wdt->min_timeout = 1;
+
+ watchdog_init_timeout(wdt, 0, ds1307->dev);
+ watchdog_set_drvdata(wdt, ds1307);
+ devm_watchdog_register_device(ds1307->dev, wdt);
+}
+#else
+static void ds1307_wdt_register(struct ds1307 *ds1307)
+{
+}
+#endif /* CONFIG_WATCHDOG_CORE */
+
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -1856,6 +1981,7 @@ static int ds1307_probe(struct i2c_client *client,
ds1307_hwmon_register(ds1307);
ds1307_clks_register(ds1307);
+ ds1307_wdt_register(ds1307);
return 0;
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 6e9ddcd03992..9c51a12cf70f 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -164,7 +164,7 @@ static int ds1374_read_time(struct device *dev, struct rtc_time *time)
ret = ds1374_read_rtc(client, &itime, DS1374_REG_TOD0, 4);
if (!ret)
- rtc_time_to_tm(itime, time);
+ rtc_time64_to_tm(itime, time);
return ret;
}
@@ -172,9 +172,8 @@ static int ds1374_read_time(struct device *dev, struct rtc_time *time)
static int ds1374_set_time(struct device *dev, struct rtc_time *time)
{
struct i2c_client *client = to_i2c_client(dev);
- unsigned long itime;
+ unsigned long itime = rtc_tm_to_time64(time);
- rtc_tm_to_time(time, &itime);
return ds1374_write_rtc(client, itime, DS1374_REG_TOD0, 4);
}
@@ -212,7 +211,7 @@ static int ds1374_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (ret)
goto out;
- rtc_time_to_tm(now + cur_alarm, &alarm->time);
+ rtc_time64_to_tm(now + cur_alarm, &alarm->time);
alarm->enabled = !!(cr & DS1374_REG_CR_WACE);
alarm->pending = !!(sr & DS1374_REG_SR_AF);
@@ -237,8 +236,8 @@ static int ds1374_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (ret < 0)
return ret;
- rtc_tm_to_time(&alarm->time, &new_alarm);
- rtc_tm_to_time(&now, &itime);
+ new_alarm = rtc_tm_to_time64(&alarm->time);
+ itime = rtc_tm_to_time64(&now);
/* This can happen due to races, in addition to dates that are
* truly in the past. To avoid requiring the caller to check for
@@ -620,6 +619,10 @@ static int ds1374_probe(struct i2c_client *client,
if (!ds1374)
return -ENOMEM;
+ ds1374->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(ds1374->rtc))
+ return PTR_ERR(ds1374->rtc);
+
ds1374->client = client;
i2c_set_clientdata(client, ds1374);
@@ -641,12 +644,12 @@ static int ds1374_probe(struct i2c_client *client,
device_set_wakeup_capable(&client->dev, 1);
}
- ds1374->rtc = devm_rtc_device_register(&client->dev, client->name,
- &ds1374_rtc_ops, THIS_MODULE);
- if (IS_ERR(ds1374->rtc)) {
- dev_err(&client->dev, "unable to register the class device\n");
- return PTR_ERR(ds1374->rtc);
- }
+ ds1374->rtc->ops = &ds1374_rtc_ops;
+ ds1374->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(ds1374->rtc);
+ if (ret)
+ return ret;
#ifdef CONFIG_RTC_DRV_DS1374_WDT
save_client = client;
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 9e6e994cce99..756af62b0486 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -20,6 +20,7 @@
#include <linux/fsl/ftm.h>
#include <linux/rtc.h>
#include <linux/time.h>
+#include <linux/acpi.h>
#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
@@ -151,6 +152,8 @@ static irqreturn_t ftm_rtc_alarm_interrupt(int irq, void *dev)
{
struct ftm_rtc *rtc = dev;
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
ftm_irq_acknowledge(rtc);
ftm_irq_disable(rtc);
ftm_clean_alarm(rtc);
@@ -242,7 +245,6 @@ static const struct rtc_class_ops ftm_rtc_ops = {
static int ftm_rtc_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
int irq;
int ret;
struct ftm_rtc *rtc;
@@ -265,10 +267,10 @@ static int ftm_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc->base);
}
- irq = irq_of_parse_and_map(np, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "unable to get IRQ from DT, %d\n", irq);
- return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "can't get irq number\n");
+ return irq;
}
ret = devm_request_irq(&pdev->dev, irq, ftm_rtc_alarm_interrupt,
@@ -278,7 +280,9 @@ static int ftm_rtc_probe(struct platform_device *pdev)
return ret;
}
- rtc->big_endian = of_property_read_bool(np, "big-endian");
+ rtc->big_endian =
+ device_property_read_bool(&pdev->dev, "big-endian");
+
rtc->alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV;
rtc->rtc_dev->ops = &ftm_rtc_ops;
@@ -305,11 +309,18 @@ static const struct of_device_id ftm_rtc_match[] = {
{ },
};
+static const struct acpi_device_id ftm_imx_acpi_ids[] = {
+ {"NXP0011",},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ftm_imx_acpi_ids);
+
static struct platform_driver ftm_rtc_driver = {
.probe = ftm_rtc_probe,
.driver = {
.name = "ftm-alarm",
.of_match_table = ftm_rtc_match,
+ .acpi_match_table = ACPI_PTR(ftm_imx_acpi_ids),
},
};
diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c
index cf2c12107f2b..a5f59e6f862e 100644
--- a/drivers/rtc/rtc-imx-sc.c
+++ b/drivers/rtc/rtc-imx-sc.c
@@ -37,7 +37,7 @@ struct imx_sc_msg_timer_rtc_set_alarm {
u8 hour;
u8 min;
u8 sec;
-} __packed;
+} __packed __aligned(4);
static int imx_sc_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 18023e472cbc..e4c719085c31 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -46,6 +46,7 @@
enum jz4740_rtc_type {
ID_JZ4740,
+ ID_JZ4760,
ID_JZ4780,
};
@@ -106,7 +107,7 @@ static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg,
{
int ret = 0;
- if (rtc->type >= ID_JZ4780)
+ if (rtc->type >= ID_JZ4760)
ret = jz4780_rtc_enable_write(rtc);
if (ret == 0)
ret = jz4740_rtc_wait_write_ready(rtc);
@@ -298,6 +299,7 @@ static void jz4740_rtc_power_off(void)
static const struct of_device_id jz4740_rtc_of_match[] = {
{ .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
+ { .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
{},
};
@@ -372,13 +374,14 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (!pm_power_off) {
/* Default: 60ms */
rtc->reset_pin_assert_time = 60;
- of_property_read_u32(np, "reset-pin-assert-time-ms",
+ of_property_read_u32(np,
+ "ingenic,reset-pin-assert-time-ms",
&rtc->reset_pin_assert_time);
/* Default: 100ms */
rtc->min_wakeup_pin_assert_time = 100;
of_property_read_u32(np,
- "min-wakeup-pin-assert-time-ms",
+ "ingenic,min-wakeup-pin-assert-time-ms",
&rtc->min_wakeup_pin_assert_time);
dev_for_power_off = &pdev->dev;
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index e8194f1f01a8..92f19bf997b2 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -160,15 +160,10 @@ static int m48t35_probe(struct platform_device *pdev)
return -ENOMEM;
priv->size = resource_size(res);
- /*
- * kludge: remove the #ifndef after ioc3 resource
- * conflicts are resolved
- */
-#ifndef CONFIG_SGI_IP27
if (!devm_request_mem_region(&pdev->dev, res->start, priv->size,
pdev->name))
return -EBUSY;
-#endif
+
priv->baseaddr = res->start;
priv->reg = devm_ioremap(&pdev->dev, priv->baseaddr, priv->size);
if (!priv->reg)
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 15a9d0278778..3040844129ce 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -111,7 +111,7 @@ static int mpc5121_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
now = in_be32(&regs->actual_time) + in_be32(&regs->target_time);
- rtc_time_to_tm(now, tm);
+ rtc_time64_to_tm(now, tm);
/*
* update second minute hour registers
@@ -126,16 +126,14 @@ static int mpc5121_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
- int ret;
unsigned long now;
/*
* The actual_time register is read only so we write the offset
* between it and linux time to the target_time register.
*/
- ret = rtc_tm_to_time(tm, &now);
- if (ret == 0)
- out_be32(&regs->target_time, now - in_be32(&regs->actual_time));
+ now = rtc_tm_to_time64(tm);
+ out_be32(&regs->target_time, now - in_be32(&regs->actual_time));
/*
* update second minute hour registers
@@ -315,8 +313,8 @@ static int mpc5121_rtc_probe(struct platform_device *op)
if (!rtc)
return -ENOMEM;
- rtc->regs = of_iomap(op->dev.of_node, 0);
- if (!rtc->regs) {
+ rtc->regs = devm_platform_ioremap_resource(op, 0);
+ if (IS_ERR(rtc->regs)) {
dev_err(&op->dev, "%s: couldn't map io space\n", __func__);
return -ENOSYS;
}
@@ -326,8 +324,8 @@ static int mpc5121_rtc_probe(struct platform_device *op)
platform_set_drvdata(op, rtc);
rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1);
- err = request_irq(rtc->irq, mpc5121_rtc_handler, 0,
- "mpc5121-rtc", &op->dev);
+ err = devm_request_irq(&op->dev, rtc->irq, mpc5121_rtc_handler, 0,
+ "mpc5121-rtc", &op->dev);
if (err) {
dev_err(&op->dev, "%s: could not request irq: %i\n",
__func__, rtc->irq);
@@ -335,14 +333,26 @@ static int mpc5121_rtc_probe(struct platform_device *op)
}
rtc->irq_periodic = irq_of_parse_and_map(op->dev.of_node, 0);
- err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd,
- 0, "mpc5121-rtc_upd", &op->dev);
+ err = devm_request_irq(&op->dev, rtc->irq_periodic,
+ mpc5121_rtc_handler_upd, 0, "mpc5121-rtc_upd",
+ &op->dev);
if (err) {
dev_err(&op->dev, "%s: could not request irq: %i\n",
__func__, rtc->irq_periodic);
goto out_dispose2;
}
+ rtc->rtc = devm_rtc_allocate_device(&op->dev);
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto out_dispose2;
+ }
+
+ rtc->rtc->ops = &mpc5200_rtc_ops;
+ rtc->rtc->uie_unsupported = 1;
+ rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ rtc->rtc->range_max = 65733206399ULL; /* 4052-12-31 23:59:59 */
+
if (of_device_is_compatible(op->dev.of_node, "fsl,mpc5121-rtc")) {
u32 ka;
ka = in_be32(&rtc->regs->keep_alive);
@@ -351,30 +361,26 @@ static int mpc5121_rtc_probe(struct platform_device *op)
"mpc5121-rtc: Battery or oscillator failure!\n");
out_be32(&rtc->regs->keep_alive, ka);
}
-
- rtc->rtc = devm_rtc_device_register(&op->dev, "mpc5121-rtc",
- &mpc5121_rtc_ops, THIS_MODULE);
- } else {
- rtc->rtc = devm_rtc_device_register(&op->dev, "mpc5200-rtc",
- &mpc5200_rtc_ops, THIS_MODULE);
+ rtc->rtc->ops = &mpc5121_rtc_ops;
+ /*
+ * This is a limitation of the driver that abuses the target
+ * time register, the actual maximum year for the mpc5121 is
+ * also 4052.
+ */
+ rtc->rtc->range_min = 0;
+ rtc->rtc->range_max = U32_MAX;
}
- if (IS_ERR(rtc->rtc)) {
- err = PTR_ERR(rtc->rtc);
- goto out_free_irq;
- }
- rtc->rtc->uie_unsupported = 1;
+ err = rtc_register_device(rtc->rtc);
+ if (err)
+ goto out_dispose2;
return 0;
-out_free_irq:
- free_irq(rtc->irq_periodic, &op->dev);
out_dispose2:
irq_dispose_mapping(rtc->irq_periodic);
- free_irq(rtc->irq, &op->dev);
out_dispose:
irq_dispose_mapping(rtc->irq);
- iounmap(rtc->regs);
return err;
}
@@ -388,9 +394,6 @@ static int mpc5121_rtc_remove(struct platform_device *op)
out_8(&regs->alm_enable, 0);
out_8(&regs->int_enable, in_8(&regs->int_enable) & ~0x1);
- iounmap(rtc->regs);
- free_irq(rtc->irq, &op->dev);
- free_irq(rtc->irq_periodic, &op->dev);
irq_dispose_mapping(rtc->irq);
irq_dispose_mapping(rtc->irq_periodic);
diff --git a/drivers/rtc/rtc-mt2712.c b/drivers/rtc/rtc-mt2712.c
new file mode 100644
index 000000000000..581b8731fb8a
--- /dev/null
+++ b/drivers/rtc/rtc-mt2712.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Ran Bi <ran.bi@mediatek.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define MT2712_BBPU 0x0000
+#define MT2712_BBPU_CLRPKY BIT(4)
+#define MT2712_BBPU_RELOAD BIT(5)
+#define MT2712_BBPU_CBUSY BIT(6)
+#define MT2712_BBPU_KEY (0x43 << 8)
+
+#define MT2712_IRQ_STA 0x0004
+#define MT2712_IRQ_STA_AL BIT(0)
+#define MT2712_IRQ_STA_TC BIT(1)
+
+#define MT2712_IRQ_EN 0x0008
+#define MT2712_IRQ_EN_AL BIT(0)
+#define MT2712_IRQ_EN_TC BIT(1)
+#define MT2712_IRQ_EN_ONESHOT BIT(2)
+
+#define MT2712_CII_EN 0x000c
+
+#define MT2712_AL_MASK 0x0010
+#define MT2712_AL_MASK_DOW BIT(4)
+
+#define MT2712_TC_SEC 0x0014
+#define MT2712_TC_MIN 0x0018
+#define MT2712_TC_HOU 0x001c
+#define MT2712_TC_DOM 0x0020
+#define MT2712_TC_DOW 0x0024
+#define MT2712_TC_MTH 0x0028
+#define MT2712_TC_YEA 0x002c
+
+#define MT2712_AL_SEC 0x0030
+#define MT2712_AL_MIN 0x0034
+#define MT2712_AL_HOU 0x0038
+#define MT2712_AL_DOM 0x003c
+#define MT2712_AL_DOW 0x0040
+#define MT2712_AL_MTH 0x0044
+#define MT2712_AL_YEA 0x0048
+
+#define MT2712_SEC_MASK 0x003f
+#define MT2712_MIN_MASK 0x003f
+#define MT2712_HOU_MASK 0x001f
+#define MT2712_DOM_MASK 0x001f
+#define MT2712_DOW_MASK 0x0007
+#define MT2712_MTH_MASK 0x000f
+#define MT2712_YEA_MASK 0x007f
+
+#define MT2712_POWERKEY1 0x004c
+#define MT2712_POWERKEY2 0x0050
+#define MT2712_POWERKEY1_KEY 0xa357
+#define MT2712_POWERKEY2_KEY 0x67d2
+
+#define MT2712_CON0 0x005c
+#define MT2712_CON1 0x0060
+
+#define MT2712_PROT 0x0070
+#define MT2712_PROT_UNLOCK1 0x9136
+#define MT2712_PROT_UNLOCK2 0x586a
+
+#define MT2712_WRTGR 0x0078
+
+#define MT2712_RTC_TIMESTAMP_END_2127 4985971199LL
+
+struct mt2712_rtc {
+ struct rtc_device *rtc;
+ void __iomem *base;
+ int irq;
+ u8 irq_wake_enabled;
+ u8 powerlost;
+};
+
+static inline u32 mt2712_readl(struct mt2712_rtc *mt2712_rtc, u32 reg)
+{
+ return readl(mt2712_rtc->base + reg);
+}
+
+static inline void mt2712_writel(struct mt2712_rtc *mt2712_rtc,
+ u32 reg, u32 val)
+{
+ writel(val, mt2712_rtc->base + reg);
+}
+
+static void mt2712_rtc_write_trigger(struct mt2712_rtc *mt2712_rtc)
+{
+ unsigned long timeout = jiffies + HZ / 10;
+
+ mt2712_writel(mt2712_rtc, MT2712_WRTGR, 1);
+ while (1) {
+ if (!(mt2712_readl(mt2712_rtc, MT2712_BBPU)
+ & MT2712_BBPU_CBUSY))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&mt2712_rtc->rtc->dev,
+ "%s time out!\n", __func__);
+ break;
+ }
+ cpu_relax();
+ }
+}
+
+static void mt2712_rtc_writeif_unlock(struct mt2712_rtc *mt2712_rtc)
+{
+ mt2712_writel(mt2712_rtc, MT2712_PROT, MT2712_PROT_UNLOCK1);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+ mt2712_writel(mt2712_rtc, MT2712_PROT, MT2712_PROT_UNLOCK2);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+}
+
+static irqreturn_t rtc_irq_handler_thread(int irq, void *data)
+{
+ struct mt2712_rtc *mt2712_rtc = data;
+ u16 irqsta;
+
+ /* Clear interrupt */
+ irqsta = mt2712_readl(mt2712_rtc, MT2712_IRQ_STA);
+ if (irqsta & MT2712_IRQ_STA_AL) {
+ rtc_update_irq(mt2712_rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void __mt2712_rtc_read_time(struct mt2712_rtc *mt2712_rtc,
+ struct rtc_time *tm, int *sec)
+{
+ tm->tm_sec = mt2712_readl(mt2712_rtc, MT2712_TC_SEC)
+ & MT2712_SEC_MASK;
+ tm->tm_min = mt2712_readl(mt2712_rtc, MT2712_TC_MIN)
+ & MT2712_MIN_MASK;
+ tm->tm_hour = mt2712_readl(mt2712_rtc, MT2712_TC_HOU)
+ & MT2712_HOU_MASK;
+ tm->tm_mday = mt2712_readl(mt2712_rtc, MT2712_TC_DOM)
+ & MT2712_DOM_MASK;
+ tm->tm_mon = (mt2712_readl(mt2712_rtc, MT2712_TC_MTH) - 1)
+ & MT2712_MTH_MASK;
+ tm->tm_year = (mt2712_readl(mt2712_rtc, MT2712_TC_YEA) + 100)
+ & MT2712_YEA_MASK;
+
+ *sec = mt2712_readl(mt2712_rtc, MT2712_TC_SEC) & MT2712_SEC_MASK;
+}
+
+static int mt2712_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+ int sec;
+
+ if (mt2712_rtc->powerlost)
+ return -EINVAL;
+
+ do {
+ __mt2712_rtc_read_time(mt2712_rtc, tm, &sec);
+ } while (sec < tm->tm_sec); /* SEC has carried */
+
+ return 0;
+}
+
+static int mt2712_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+
+ mt2712_writel(mt2712_rtc, MT2712_TC_SEC, tm->tm_sec & MT2712_SEC_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_MIN, tm->tm_min & MT2712_MIN_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_HOU, tm->tm_hour & MT2712_HOU_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_DOM, tm->tm_mday & MT2712_DOM_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_MTH,
+ (tm->tm_mon + 1) & MT2712_MTH_MASK);
+ mt2712_writel(mt2712_rtc, MT2712_TC_YEA,
+ (tm->tm_year - 100) & MT2712_YEA_MASK);
+
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ if (mt2712_rtc->powerlost)
+ mt2712_rtc->powerlost = false;
+
+ return 0;
+}
+
+static int mt2712_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alm->time;
+ u16 irqen;
+
+ irqen = mt2712_readl(mt2712_rtc, MT2712_IRQ_EN);
+ alm->enabled = !!(irqen & MT2712_IRQ_EN_AL);
+
+ tm->tm_sec = mt2712_readl(mt2712_rtc, MT2712_AL_SEC) & MT2712_SEC_MASK;
+ tm->tm_min = mt2712_readl(mt2712_rtc, MT2712_AL_MIN) & MT2712_MIN_MASK;
+ tm->tm_hour = mt2712_readl(mt2712_rtc, MT2712_AL_HOU) & MT2712_HOU_MASK;
+ tm->tm_mday = mt2712_readl(mt2712_rtc, MT2712_AL_DOM) & MT2712_DOM_MASK;
+ tm->tm_mon = (mt2712_readl(mt2712_rtc, MT2712_AL_MTH) - 1)
+ & MT2712_MTH_MASK;
+ tm->tm_year = (mt2712_readl(mt2712_rtc, MT2712_AL_YEA) + 100)
+ & MT2712_YEA_MASK;
+
+ return 0;
+}
+
+static int mt2712_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+ u16 irqen;
+
+ irqen = mt2712_readl(mt2712_rtc, MT2712_IRQ_EN);
+ if (enabled)
+ irqen |= MT2712_IRQ_EN_AL;
+ else
+ irqen &= ~MT2712_IRQ_EN_AL;
+ mt2712_writel(mt2712_rtc, MT2712_IRQ_EN, irqen);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ return 0;
+}
+
+static int mt2712_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alm->time;
+
+ dev_dbg(&mt2712_rtc->rtc->dev, "set al time: %ptR, alm en: %d\n",
+ tm, alm->enabled);
+
+ mt2712_writel(mt2712_rtc, MT2712_AL_SEC,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_SEC)
+ & ~(MT2712_SEC_MASK)) | (tm->tm_sec & MT2712_SEC_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_MIN,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_MIN)
+ & ~(MT2712_MIN_MASK)) | (tm->tm_min & MT2712_MIN_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_HOU,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_HOU)
+ & ~(MT2712_HOU_MASK)) | (tm->tm_hour & MT2712_HOU_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_DOM,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_DOM)
+ & ~(MT2712_DOM_MASK)) | (tm->tm_mday & MT2712_DOM_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_MTH,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_MTH)
+ & ~(MT2712_MTH_MASK))
+ | ((tm->tm_mon + 1) & MT2712_MTH_MASK));
+ mt2712_writel(mt2712_rtc, MT2712_AL_YEA,
+ (mt2712_readl(mt2712_rtc, MT2712_AL_YEA)
+ & ~(MT2712_YEA_MASK))
+ | ((tm->tm_year - 100) & MT2712_YEA_MASK));
+
+ /* mask day of week */
+ mt2712_writel(mt2712_rtc, MT2712_AL_MASK, MT2712_AL_MASK_DOW);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ mt2712_rtc_alarm_irq_enable(dev, alm->enabled);
+
+ return 0;
+}
+
+/* Init RTC register */
+static void mt2712_rtc_hw_init(struct mt2712_rtc *mt2712_rtc)
+{
+ u32 p1, p2;
+
+ mt2712_writel(mt2712_rtc, MT2712_BBPU,
+ MT2712_BBPU_KEY | MT2712_BBPU_RELOAD);
+
+ mt2712_writel(mt2712_rtc, MT2712_CII_EN, 0);
+ mt2712_writel(mt2712_rtc, MT2712_AL_MASK, 0);
+ /* necessary before set MT2712_POWERKEY */
+ mt2712_writel(mt2712_rtc, MT2712_CON0, 0x4848);
+ mt2712_writel(mt2712_rtc, MT2712_CON1, 0x0048);
+
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ p1 = mt2712_readl(mt2712_rtc, MT2712_POWERKEY1);
+ p2 = mt2712_readl(mt2712_rtc, MT2712_POWERKEY2);
+ if (p1 != MT2712_POWERKEY1_KEY || p2 != MT2712_POWERKEY2_KEY) {
+ mt2712_rtc->powerlost = true;
+ dev_dbg(&mt2712_rtc->rtc->dev,
+ "powerkey not set (lost power)\n");
+ } else {
+ mt2712_rtc->powerlost = false;
+ }
+
+ /* RTC need POWERKEY1/2 match, then goto normal work mode */
+ mt2712_writel(mt2712_rtc, MT2712_POWERKEY1, MT2712_POWERKEY1_KEY);
+ mt2712_writel(mt2712_rtc, MT2712_POWERKEY2, MT2712_POWERKEY2_KEY);
+ mt2712_rtc_write_trigger(mt2712_rtc);
+
+ mt2712_rtc_writeif_unlock(mt2712_rtc);
+}
+
+static const struct rtc_class_ops mt2712_rtc_ops = {
+ .read_time = mt2712_rtc_read_time,
+ .set_time = mt2712_rtc_set_time,
+ .read_alarm = mt2712_rtc_read_alarm,
+ .set_alarm = mt2712_rtc_set_alarm,
+ .alarm_irq_enable = mt2712_rtc_alarm_irq_enable,
+};
+
+static int mt2712_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct mt2712_rtc *mt2712_rtc;
+ int ret;
+
+ mt2712_rtc = devm_kzalloc(&pdev->dev,
+ sizeof(struct mt2712_rtc), GFP_KERNEL);
+ if (!mt2712_rtc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mt2712_rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mt2712_rtc->base))
+ return PTR_ERR(mt2712_rtc->base);
+
+ /* rtc hw init */
+ mt2712_rtc_hw_init(mt2712_rtc);
+
+ mt2712_rtc->irq = platform_get_irq(pdev, 0);
+ if (mt2712_rtc->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return mt2712_rtc->irq;
+ }
+
+ platform_set_drvdata(pdev, mt2712_rtc);
+
+ mt2712_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(mt2712_rtc->rtc))
+ return PTR_ERR(mt2712_rtc->rtc);
+
+ ret = devm_request_threaded_irq(&pdev->dev, mt2712_rtc->irq, NULL,
+ rtc_irq_handler_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(&mt2712_rtc->rtc->dev),
+ mt2712_rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ mt2712_rtc->irq, ret);
+ return ret;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+
+ mt2712_rtc->rtc->ops = &mt2712_rtc_ops;
+ mt2712_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ mt2712_rtc->rtc->range_max = MT2712_RTC_TIMESTAMP_END_2127;
+
+ ret = rtc_register_device(mt2712_rtc->rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "register rtc device failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mt2712_rtc_suspend(struct device *dev)
+{
+ int wake_status = 0;
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev)) {
+ wake_status = enable_irq_wake(mt2712_rtc->irq);
+ if (!wake_status)
+ mt2712_rtc->irq_wake_enabled = true;
+ }
+
+ return 0;
+}
+
+static int mt2712_rtc_resume(struct device *dev)
+{
+ int wake_status = 0;
+ struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev) && mt2712_rtc->irq_wake_enabled) {
+ wake_status = disable_irq_wake(mt2712_rtc->irq);
+ if (!wake_status)
+ mt2712_rtc->irq_wake_enabled = false;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mt2712_pm_ops, mt2712_rtc_suspend,
+ mt2712_rtc_resume);
+#endif
+
+static const struct of_device_id mt2712_rtc_of_match[] = {
+ { .compatible = "mediatek,mt2712-rtc", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, mt2712_rtc_of_match);
+
+static struct platform_driver mt2712_rtc_driver = {
+ .driver = {
+ .name = "mt2712-rtc",
+ .of_match_table = mt2712_rtc_of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mt2712_pm_ops,
+#endif
+ },
+ .probe = mt2712_rtc_probe,
+};
+
+module_platform_driver(mt2712_rtc_driver);
+
+MODULE_DESCRIPTION("MediaTek MT2712 SoC based RTC Driver");
+MODULE_AUTHOR("Ran Bi <ran.bi@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 902d57dcd0d4..a8cfbde048f4 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -307,6 +307,14 @@ static const struct rtc_class_ops mxc_rtc_ops = {
.alarm_irq_enable = mxc_rtc_alarm_irq_enable,
};
+static void mxc_rtc_action(void *p)
+{
+ struct rtc_plat_data *pdata = p;
+
+ clk_disable_unprepare(pdata->clk_ref);
+ clk_disable_unprepare(pdata->clk_ipg);
+}
+
static int mxc_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
@@ -366,14 +374,20 @@ static int mxc_rtc_probe(struct platform_device *pdev)
pdata->clk_ref = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(pdata->clk_ref)) {
+ clk_disable_unprepare(pdata->clk_ipg);
dev_err(&pdev->dev, "unable to get ref clock!\n");
- ret = PTR_ERR(pdata->clk_ref);
- goto exit_put_clk_ipg;
+ return PTR_ERR(pdata->clk_ref);
}
ret = clk_prepare_enable(pdata->clk_ref);
+ if (ret) {
+ clk_disable_unprepare(pdata->clk_ipg);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, mxc_rtc_action, pdata);
if (ret)
- goto exit_put_clk_ipg;
+ return ret;
rate = clk_get_rate(pdata->clk_ref);
@@ -385,16 +399,14 @@ static int mxc_rtc_probe(struct platform_device *pdev)
reg = RTC_INPUT_CLK_38400HZ;
else {
dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate);
- ret = -EINVAL;
- goto exit_put_clk_ref;
+ return -EINVAL;
}
reg |= RTC_ENABLE_BIT;
writew(reg, (pdata->ioaddr + RTC_RTCCTL));
if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
dev_err(&pdev->dev, "hardware module can't be enabled!\n");
- ret = -EIO;
- goto exit_put_clk_ref;
+ return -EIO;
}
platform_set_drvdata(pdev, pdata);
@@ -417,29 +429,10 @@ static int mxc_rtc_probe(struct platform_device *pdev)
}
ret = rtc_register_device(rtc);
- if (ret)
- goto exit_put_clk_ref;
-
- return 0;
-
-exit_put_clk_ref:
- clk_disable_unprepare(pdata->clk_ref);
-exit_put_clk_ipg:
- clk_disable_unprepare(pdata->clk_ipg);
return ret;
}
-static int mxc_rtc_remove(struct platform_device *pdev)
-{
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(pdata->clk_ref);
- clk_disable_unprepare(pdata->clk_ipg);
-
- return 0;
-}
-
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
@@ -447,7 +440,6 @@ static struct platform_driver mxc_rtc_driver = {
},
.id_table = imx_rtc_devtype,
.probe = mxc_rtc_probe,
- .remove = mxc_rtc_remove,
};
module_platform_driver(mxc_rtc_driver)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index d4ed20fb3194..c20fc7937dfa 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -9,7 +9,6 @@
* Copyright (C) 2014 Johan Hovold <johan@kernel.org>
*/
-#include <dt-bindings/gpio/gpio.h>
#include <linux/bcd.h>
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 1db17ba1fc64..7a87f461bec8 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -9,6 +9,7 @@
* Copyright (C) 2019 Micro Crystal AG
* Author: Alexandre Belloni <alexandre.belloni@bootlin.com>
*/
+#include <linux/clk-provider.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
@@ -44,6 +45,10 @@
#define PCF85063_OFFSET_STEP0 4340
#define PCF85063_OFFSET_STEP1 4069
+#define PCF85063_REG_CLKO_F_MASK 0x07 /* frequency mask */
+#define PCF85063_REG_CLKO_F_32768HZ 0x00
+#define PCF85063_REG_CLKO_F_OFF 0x07
+
#define PCF85063_REG_RAM 0x03
#define PCF85063_REG_SC 0x04 /* datetime */
@@ -61,6 +66,9 @@ struct pcf85063_config {
struct pcf85063 {
struct rtc_device *rtc;
struct regmap *regmap;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
};
static int pcf85063_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -357,6 +365,150 @@ static int pcf85063_load_capacitance(struct pcf85063 *pcf85063,
PCF85063_REG_CTRL1_CAP_SEL, reg);
}
+#ifdef CONFIG_COMMON_CLK
+/*
+ * Handling of the clkout
+ */
+
+#define clkout_hw_to_pcf85063(_hw) container_of(_hw, struct pcf85063, clkout_hw)
+
+static int clkout_rates[] = {
+ 32768,
+ 16384,
+ 8192,
+ 4096,
+ 2048,
+ 1024,
+ 1,
+ 0
+};
+
+static unsigned long pcf85063_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw);
+ unsigned int buf;
+ int ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf);
+
+ if (ret < 0)
+ return 0;
+
+ buf &= PCF85063_REG_CLKO_F_MASK;
+ return clkout_rates[buf];
+}
+
+static long pcf85063_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int pcf85063_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate)
+ return regmap_update_bits(pcf85063->regmap,
+ PCF85063_REG_CTRL2,
+ PCF85063_REG_CLKO_F_MASK, i);
+
+ return -EINVAL;
+}
+
+static int pcf85063_clkout_control(struct clk_hw *hw, bool enable)
+{
+ struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw);
+ unsigned int buf;
+ int ret;
+
+ ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf);
+ if (ret < 0)
+ return ret;
+ buf &= PCF85063_REG_CLKO_F_MASK;
+
+ if (enable) {
+ if (buf == PCF85063_REG_CLKO_F_OFF)
+ buf = PCF85063_REG_CLKO_F_32768HZ;
+ else
+ return 0;
+ } else {
+ if (buf != PCF85063_REG_CLKO_F_OFF)
+ buf = PCF85063_REG_CLKO_F_OFF;
+ else
+ return 0;
+ }
+
+ return regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL2,
+ PCF85063_REG_CLKO_F_MASK, buf);
+}
+
+static int pcf85063_clkout_prepare(struct clk_hw *hw)
+{
+ return pcf85063_clkout_control(hw, 1);
+}
+
+static void pcf85063_clkout_unprepare(struct clk_hw *hw)
+{
+ pcf85063_clkout_control(hw, 0);
+}
+
+static int pcf85063_clkout_is_prepared(struct clk_hw *hw)
+{
+ struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw);
+ unsigned int buf;
+ int ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf);
+
+ if (ret < 0)
+ return 0;
+
+ return (buf & PCF85063_REG_CLKO_F_MASK) != PCF85063_REG_CLKO_F_OFF;
+}
+
+static const struct clk_ops pcf85063_clkout_ops = {
+ .prepare = pcf85063_clkout_prepare,
+ .unprepare = pcf85063_clkout_unprepare,
+ .is_prepared = pcf85063_clkout_is_prepared,
+ .recalc_rate = pcf85063_clkout_recalc_rate,
+ .round_rate = pcf85063_clkout_round_rate,
+ .set_rate = pcf85063_clkout_set_rate,
+};
+
+static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = "pcf85063-clkout";
+ init.ops = &pcf85063_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ pcf85063->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(pcf85063->rtc->dev.of_node,
+ "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(pcf85063->rtc->dev.of_node,
+ of_clk_src_simple_get, clk);
+
+ return clk;
+}
+#endif
+
static const struct pcf85063_config pcf85063a_config = {
.regmap = {
.reg_bits = 8,
@@ -457,6 +609,11 @@ static int pcf85063_probe(struct i2c_client *client)
nvmem_cfg.priv = pcf85063->regmap;
rtc_nvmem_register(pcf85063->rtc, &nvmem_cfg);
+#ifdef CONFIG_COMMON_CLK
+ /* register clk in common clk framework */
+ pcf85063_clkout_register_clk(pcf85063);
+#endif
+
return rtc_register_device(pcf85063->rtc);
}
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index d4a5f8afafbc..ebe03eba8f5f 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -36,32 +36,24 @@ static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(rtc->base + RTC_MR), &alrm->time);
+ rtc_time64_to_tm(readl(rtc->base + RTC_MR), &alrm->time);
return 0;
}
static int pl030_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
- /*
- * At the moment, we can only deal with non-wildcarded alarm times.
- */
- ret = rtc_valid_tm(&alrm->time);
- if (ret == 0)
- ret = rtc_tm_to_time(&alrm->time, &time);
- if (ret == 0)
- writel(time, rtc->base + RTC_MR);
- return ret;
+ writel(rtc_tm_to_time64(&alrm->time), rtc->base + RTC_MR);
+
+ return 0;
}
static int pl030_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(rtc->base + RTC_DR), tm);
+ rtc_time64_to_tm(readl(rtc->base + RTC_DR), tm);
return 0;
}
@@ -77,14 +69,10 @@ static int pl030_read_time(struct device *dev, struct rtc_time *tm)
static int pl030_set_time(struct device *dev, struct rtc_time *tm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
- ret = rtc_tm_to_time(tm, &time);
- if (ret == 0)
- writel(time + 1, rtc->base + RTC_LR);
+ writel(rtc_tm_to_time64(tm) + 1, rtc->base + RTC_LR);
- return ret;
+ return 0;
}
static const struct rtc_class_ops pl030_ops = {
@@ -116,6 +104,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
}
rtc->rtc->ops = &pl030_ops;
+ rtc->rtc->range_max = U32_MAX;
rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!rtc->base) {
ret = -ENOMEM;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 180caebbd355..40d7450a1ce4 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -80,6 +80,8 @@ struct pl031_vendor_data {
bool clockwatch;
bool st_weekday;
unsigned long irqflags;
+ time64_t range_min;
+ timeu64_t range_max;
};
struct pl031_local {
@@ -123,11 +125,9 @@ static int pl031_stv2_tm_to_time(struct device *dev,
return -EINVAL;
} else if (wday == -1) {
/* wday is not provided, calculate it here */
- unsigned long time;
struct rtc_time calc_tm;
- rtc_tm_to_time(tm, &time);
- rtc_time_to_tm(time, &calc_tm);
+ rtc_time64_to_tm(rtc_tm_to_time64(tm), &calc_tm);
wday = calc_tm.tm_wday;
}
@@ -210,17 +210,13 @@ static int pl031_stv2_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
unsigned long bcd_year;
int ret;
- /* At the moment, we can only deal with non-wildcarded alarm times. */
- ret = rtc_valid_tm(&alarm->time);
+ ret = pl031_stv2_tm_to_time(dev, &alarm->time,
+ &time, &bcd_year);
if (ret == 0) {
- ret = pl031_stv2_tm_to_time(dev, &alarm->time,
- &time, &bcd_year);
- if (ret == 0) {
- writel(bcd_year, ldata->base + RTC_YMR);
- writel(time, ldata->base + RTC_MR);
+ writel(bcd_year, ldata->base + RTC_YMR);
+ writel(time, ldata->base + RTC_MR);
- pl031_alarm_irq_enable(dev, alarm->enabled);
- }
+ pl031_alarm_irq_enable(dev, alarm->enabled);
}
return ret;
@@ -248,30 +244,25 @@ static int pl031_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(ldata->base + RTC_DR), tm);
+ rtc_time64_to_tm(readl(ldata->base + RTC_DR), tm);
return 0;
}
static int pl031_set_time(struct device *dev, struct rtc_time *tm)
{
- unsigned long time;
struct pl031_local *ldata = dev_get_drvdata(dev);
- int ret;
- ret = rtc_tm_to_time(tm, &time);
+ writel(rtc_tm_to_time64(tm), ldata->base + RTC_LR);
- if (ret == 0)
- writel(time, ldata->base + RTC_LR);
-
- return ret;
+ return 0;
}
static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
+ rtc_time64_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
alarm->pending = readl(ldata->base + RTC_RIS) & RTC_BIT_AI;
alarm->enabled = readl(ldata->base + RTC_IMSC) & RTC_BIT_AI;
@@ -282,20 +273,10 @@ static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
- /* At the moment, we can only deal with non-wildcarded alarm times. */
- ret = rtc_valid_tm(&alarm->time);
- if (ret == 0) {
- ret = rtc_tm_to_time(&alarm->time, &time);
- if (ret == 0) {
- writel(time, ldata->base + RTC_MR);
- pl031_alarm_irq_enable(dev, alarm->enabled);
- }
- }
+ writel(rtc_tm_to_time64(&alarm->time), ldata->base + RTC_MR);
- return ret;
+ return 0;
}
static int pl031_remove(struct amba_device *adev)
@@ -383,6 +364,8 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(ldata->rtc);
ldata->rtc->ops = ops;
+ ldata->rtc->range_min = vendor->range_min;
+ ldata->rtc->range_max = vendor->range_max;
ret = rtc_register_device(ldata->rtc);
if (ret)
@@ -413,6 +396,7 @@ static struct pl031_vendor_data arm_pl031 = {
.set_alarm = pl031_set_alarm,
.alarm_irq_enable = pl031_alarm_irq_enable,
},
+ .range_max = U32_MAX,
};
/* The First ST derivative */
@@ -426,6 +410,7 @@ static struct pl031_vendor_data stv1_pl031 = {
},
.clockwatch = true,
.st_weekday = true,
+ .range_max = U32_MAX,
};
/* And the second ST derivative */
@@ -446,6 +431,8 @@ static struct pl031_vendor_data stv2_pl031 = {
* remove IRQF_COND_SUSPEND
*/
.irqflags = IRQF_SHARED | IRQF_COND_SUSPEND,
+ .range_min = RTC_TIMESTAMP_BEGIN_0000,
+ .range_max = RTC_TIMESTAMP_END_9999,
};
static const struct amba_id pl031_ids[] = {
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 07ea1be3abb9..b45ee2cb2c04 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -84,7 +84,7 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
if (!rtc_dd->allow_set_time)
return -EACCES;
- rtc_tm_to_time(tm, &secs);
+ secs = rtc_tm_to_time64(tm);
dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
@@ -208,7 +208,7 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
secs = value[0] | (value[1] << 8) | (value[2] << 16) |
((unsigned long)value[3] << 24);
- rtc_time_to_tm(secs, tm);
+ rtc_time64_to_tm(secs, tm);
dev_dbg(dev, "secs = %lu, h:m:s == %ptRt, y-m-d = %ptRdr\n", secs, tm, tm);
@@ -224,7 +224,7 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
- rtc_tm_to_time(&alarm->time, &secs);
+ secs = rtc_tm_to_time64(&alarm->time);
for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) {
value[i] = secs & 0xFF;
@@ -280,13 +280,7 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
secs = value[0] | (value[1] << 8) | (value[2] << 16) |
((unsigned long)value[3] << 24);
- rtc_time_to_tm(secs, &alarm->time);
-
- rc = rtc_valid_tm(&alarm->time);
- if (rc < 0) {
- dev_err(dev, "Invalid alarm time read from RTC\n");
- return rc;
- }
+ rtc_time64_to_tm(secs, &alarm->time);
dev_dbg(dev, "Alarm set for - h:m:s=%ptRt, y-m-d=%ptRdr\n",
&alarm->time, &alarm->time);
@@ -301,6 +295,7 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
unsigned int ctrl_reg;
+ u8 value[NUM_8_BIT_RTC_REGS] = {0};
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
@@ -319,6 +314,16 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
goto rtc_rw_fail;
}
+ /* Clear Alarm register */
+ if (!enable) {
+ rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
+ sizeof(value));
+ if (rc) {
+ dev_err(dev, "Clear RTC ALARM register failed\n");
+ goto rtc_rw_fail;
+ }
+ }
+
rtc_rw_fail:
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
return rc;
@@ -486,13 +491,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
/* Register the RTC device */
- rtc_dd->rtc = devm_rtc_device_register(&pdev->dev, "pm8xxx_rtc",
- &pm8xxx_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc_dd->rtc)) {
- dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n",
- __func__, PTR_ERR(rtc_dd->rtc));
+ rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc_dd->rtc))
return PTR_ERR(rtc_dd->rtc);
- }
+
+ rtc_dd->rtc->ops = &pm8xxx_rtc_ops;
+ rtc_dd->rtc->range_max = U32_MAX;
/* Request the alarm IRQ */
rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->rtc_alarm_irq,
@@ -504,9 +508,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
return rc;
}
- dev_dbg(&pdev->dev, "Probe success !!\n");
-
- return 0;
+ return rtc_register_device(rtc_dd->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index 89ff713163dd..954b88d2485f 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -85,7 +85,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
/* Time read/write */
static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
- rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
+ rtc_time64_to_tm(readl(RTC_RCNR), rtc_tm);
dev_dbg(dev, "read time %ptRr\n", rtc_tm);
@@ -94,12 +94,9 @@ static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm)
{
- unsigned long rtc_count = 0;
-
dev_dbg(dev, "set time %ptRr\n", tm);
- rtc_tm_to_time(tm, &rtc_count);
- writel(rtc_count, RTC_RCNR);
+ writel(rtc_tm_to_time64(tm), RTC_RCNR);
return 0;
}
@@ -108,7 +105,7 @@ static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *alm_tm = &alrm->time;
- rtc_time_to_tm(readl(RTC_RTAR), alm_tm);
+ rtc_time64_to_tm(readl(RTC_RTAR), alm_tm);
alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE;
@@ -120,12 +117,10 @@ static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *tm = &alrm->time;
- unsigned long rtcalarm_count = 0;
dev_dbg(dev, "set alarm: %d, %ptRr\n", alrm->enabled, tm);
- rtc_tm_to_time(tm, &rtcalarm_count);
- writel(rtcalarm_count, RTC_RTAR);
+ writel(rtc_tm_to_time64(tm), RTC_RTAR);
puv3_rtc_setaie(dev, alrm->enabled);
@@ -234,6 +229,7 @@ static int puv3_rtc_probe(struct platform_device *pdev)
/* register RTC and exit */
rtc->ops = &puv3_rtcops;
+ rtc->range_max = U32_MAX;
ret = rtc_register_device(rtc);
if (ret)
goto err_nortc;
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index d37893f6eaee..9ccc97cf5e09 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -111,20 +111,17 @@ static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
- rtc_time_to_tm(readl_relaxed(info->rcnr), tm);
+ rtc_time64_to_tm(readl_relaxed(info->rcnr), tm);
return 0;
}
static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
- ret = rtc_tm_to_time(tm, &time);
- if (ret == 0)
- writel_relaxed(time, info->rcnr);
- return ret;
+ writel_relaxed(rtc_tm_to_time64(tm), info->rcnr);
+
+ return 0;
}
static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -141,24 +138,18 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sa1100_rtc *info = dev_get_drvdata(dev);
- unsigned long time;
- int ret;
spin_lock_irq(&info->lock);
- ret = rtc_tm_to_time(&alrm->time, &time);
- if (ret != 0)
- goto out;
writel_relaxed(readl_relaxed(info->rtsr) &
(RTSR_HZE | RTSR_ALE | RTSR_AL), info->rtsr);
- writel_relaxed(time, info->rtar);
+ writel_relaxed(rtc_tm_to_time64(&alrm->time), info->rtar);
if (alrm->enabled)
writel_relaxed(readl_relaxed(info->rtsr) | RTSR_ALE, info->rtsr);
else
writel_relaxed(readl_relaxed(info->rtsr) & ~RTSR_ALE, info->rtsr);
-out:
spin_unlock_irq(&info->lock);
- return ret;
+ return 0;
}
static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -182,7 +173,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
{
- struct rtc_device *rtc;
int ret;
spin_lock_init(&info->lock);
@@ -211,15 +201,15 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
writel_relaxed(0, info->rcnr);
}
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(rtc)) {
+ info->rtc->ops = &sa1100_rtc_ops;
+ info->rtc->max_user_freq = RTC_FREQ;
+ info->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(info->rtc);
+ if (ret) {
clk_disable_unprepare(info->clk);
- return PTR_ERR(rtc);
+ return ret;
}
- info->rtc = rtc;
-
- rtc->max_user_freq = RTC_FREQ;
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
@@ -267,6 +257,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
info->irq_1hz = irq_1hz;
info->irq_alarm = irq_alarm;
+ info->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(info->rtc))
+ return PTR_ERR(info->rtc);
+
ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0,
"rtc 1Hz", &pdev->dev);
if (ret) {
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index feb1f8e52c00..9167b48014a1 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -504,8 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
if (unlikely(!rtc->res))
return -EBUSY;
- rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start,
- rtc->regsize);
+ rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start, rtc->regsize);
if (unlikely(!rtc->regbase))
return -EINVAL;
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index a2c9c55667cd..abf19435dbad 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -90,13 +90,13 @@ static int sirfsoc_rtc_read_alarm(struct device *dev,
*/
/* if alarm is in next overflow cycle */
if (rtc_count > rtc_alarm)
- rtc_time_to_tm((rtcdrv->overflow_rtc + 1)
- << (BITS_PER_LONG - RTC_SHIFT)
- | rtc_alarm >> RTC_SHIFT, &(alrm->time));
+ rtc_time64_to_tm((rtcdrv->overflow_rtc + 1)
+ << (BITS_PER_LONG - RTC_SHIFT)
+ | rtc_alarm >> RTC_SHIFT, &alrm->time);
else
- rtc_time_to_tm(rtcdrv->overflow_rtc
- << (BITS_PER_LONG - RTC_SHIFT)
- | rtc_alarm >> RTC_SHIFT, &(alrm->time));
+ rtc_time64_to_tm(rtcdrv->overflow_rtc
+ << (BITS_PER_LONG - RTC_SHIFT)
+ | rtc_alarm >> RTC_SHIFT, &alrm->time);
if (sirfsoc_rtc_readl(rtcdrv, RTC_STATUS) & SIRFSOC_RTC_AL0E)
alrm->enabled = 1;
@@ -113,7 +113,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev,
rtcdrv = dev_get_drvdata(dev);
if (alrm->enabled) {
- rtc_tm_to_time(&(alrm->time), &rtc_alarm);
+ rtc_alarm = rtc_tm_to_time64(&alrm->time);
spin_lock_irq(&rtcdrv->lock);
@@ -181,8 +181,8 @@ static int sirfsoc_rtc_read_time(struct device *dev,
cpu_relax();
} while (tmp_rtc != sirfsoc_rtc_readl(rtcdrv, RTC_CN));
- rtc_time_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT) |
- tmp_rtc >> RTC_SHIFT, tm);
+ rtc_time64_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT)
+ | tmp_rtc >> RTC_SHIFT, tm);
return 0;
}
@@ -193,7 +193,7 @@ static int sirfsoc_rtc_set_time(struct device *dev,
struct sirfsoc_rtc_drv *rtcdrv;
rtcdrv = dev_get_drvdata(dev);
- rtc_tm_to_time(tm, &rtc_time);
+ rtc_time = rtc_tm_to_time64(tm);
rtcdrv->overflow_rtc = rtc_time >> (BITS_PER_LONG - RTC_SHIFT);
@@ -341,28 +341,22 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
rtcdrv->overflow_rtc =
sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE);
- rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &sirfsoc_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtcdrv->rtc)) {
- err = PTR_ERR(rtcdrv->rtc);
- dev_err(&pdev->dev, "can't register RTC device\n");
- return err;
- }
+ rtcdrv->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtcdrv->rtc))
+ return PTR_ERR(rtcdrv->rtc);
+
+ rtcdrv->rtc->ops = &sirfsoc_rtc_ops;
+ rtcdrv->rtc->range_max = (1ULL << 60) - 1;
rtcdrv->irq = platform_get_irq(pdev, 0);
- err = devm_request_irq(
- &pdev->dev,
- rtcdrv->irq,
- sirfsoc_rtc_irq_handler,
- IRQF_SHARED,
- pdev->name,
- rtcdrv);
+ err = devm_request_irq(&pdev->dev, rtcdrv->irq, sirfsoc_rtc_irq_handler,
+ IRQF_SHARED, pdev->name, rtcdrv);
if (err) {
dev_err(&pdev->dev, "Unable to register for the SiRF SOC RTC IRQ\n");
return err;
}
- return 0;
+ return rtc_register_device(rtcdrv->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 757f4daa7181..35ee08aa7584 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/rtc.h>
@@ -264,6 +263,12 @@ static const struct regmap_config snvs_rtc_config = {
.reg_stride = 4,
};
+static void snvs_rtc_action(void *data)
+{
+ if (data)
+ clk_disable_unprepare(data);
+}
+
static int snvs_rtc_probe(struct platform_device *pdev)
{
struct snvs_rtc_data *data;
@@ -314,6 +319,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
}
}
+ ret = devm_add_action_or_reset(&pdev->dev, snvs_rtc_action, data->clk);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, data);
/* Initialize glitch detect */
@@ -326,7 +335,7 @@ static int snvs_rtc_probe(struct platform_device *pdev)
ret = snvs_rtc_enable(data, true);
if (ret) {
dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
- goto error_rtc_device_register;
+ return ret;
}
device_init_wakeup(&pdev->dev, true);
@@ -339,24 +348,13 @@ static int snvs_rtc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request irq %d: %d\n",
data->irq, ret);
- goto error_rtc_device_register;
+ return ret;
}
data->rtc->ops = &snvs_rtc_ops;
data->rtc->range_max = U32_MAX;
- ret = rtc_register_device(data->rtc);
- if (ret) {
- dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
- goto error_rtc_device_register;
- }
-
- return 0;
-error_rtc_device_register:
- if (data->clk)
- clk_disable_unprepare(data->clk);
-
- return ret;
+ return rtc_register_device(data->rtc);
}
static int __maybe_unused snvs_rtc_suspend_noirq(struct device *dev)
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index a7d49329d626..37a26279e107 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -27,7 +27,7 @@ static u32 starfire_get_time(void)
static int starfire_read_time(struct device *dev, struct rtc_time *tm)
{
- rtc_time_to_tm(starfire_get_time(), tm);
+ rtc_time64_to_tm(starfire_get_time(), tm);
return 0;
}
@@ -39,14 +39,16 @@ static int __init starfire_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- rtc = devm_rtc_device_register(&pdev->dev, "starfire",
- &starfire_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->ops = &starfire_rtc_ops;
+ rtc->range_max = U32_MAX;
+
platform_set_drvdata(pdev, rtc);
- return 0;
+ return rtc_register_device(rtc);
}
static struct platform_driver starfire_rtc_driver = {
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 852f5f3b3592..e2b8b150bcb4 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -108,7 +108,6 @@
* driver, even though it is somewhat limited.
*/
#define SUN6I_YEAR_MIN 1970
-#define SUN6I_YEAR_MAX 2033
#define SUN6I_YEAR_OFF (SUN6I_YEAR_MIN - 1900)
/*
@@ -250,19 +249,17 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
writel(reg, rtc->base + SUN6I_LOSC_CTRL);
}
- /* Switch to the external, more precise, oscillator */
- reg |= SUN6I_LOSC_CTRL_EXT_OSC;
- if (rtc->data->has_losc_en)
- reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN;
+ /* Switch to the external, more precise, oscillator, if present */
+ if (of_get_property(node, "clocks", NULL)) {
+ reg |= SUN6I_LOSC_CTRL_EXT_OSC;
+ if (rtc->data->has_losc_en)
+ reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN;
+ }
writel(reg, rtc->base + SUN6I_LOSC_CTRL);
/* Yes, I know, this is ugly. */
sun6i_rtc = rtc;
- /* Deal with old DTs */
- if (!of_get_property(node, "clocks", NULL))
- goto err;
-
/* Only read IOSC name from device tree if it is exported */
if (rtc->data->export_iosc)
of_property_read_string_index(node, "clock-output-names", 2,
@@ -279,11 +276,13 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
}
parents[0] = clk_hw_get_name(rtc->int_osc);
+ /* If there is no external oscillator, this will be NULL and ... */
parents[1] = of_clk_get_parent_name(node, 0);
rtc->hw.init = &init;
init.parent_names = parents;
+ /* ... number of clock parents will be 1. */
init.num_parents = of_clk_get_parent_count(node) + 1;
of_property_read_string_index(node, "clock-output-names", 0,
&init.name);
@@ -499,7 +498,7 @@ static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN);
wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN);
- rtc_time_to_tm(chip->alarm, &wkalrm->time);
+ rtc_time64_to_tm(chip->alarm, &wkalrm->time);
return 0;
}
@@ -520,8 +519,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
return -EINVAL;
}
- rtc_tm_to_time(alrm_tm, &time_set);
- rtc_tm_to_time(&tm_now, &time_now);
+ time_set = rtc_tm_to_time64(alrm_tm);
+ time_now = rtc_tm_to_time64(&tm_now);
if (time_set <= time_now) {
dev_err(dev, "Date to set in the past\n");
return -EINVAL;
@@ -569,14 +568,6 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
u32 date = 0;
u32 time = 0;
- int year;
-
- year = rtc_tm->tm_year + 1900;
- if (year < SUN6I_YEAR_MIN || year > SUN6I_YEAR_MAX) {
- dev_err(dev, "rtc only supports year in range %d - %d\n",
- SUN6I_YEAR_MIN, SUN6I_YEAR_MAX);
- return -EINVAL;
- }
rtc_tm->tm_year -= SUN6I_YEAR_OFF;
rtc_tm->tm_mon += 1;
@@ -585,7 +576,7 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
- if (is_leap_year(year))
+ if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN))
date |= SUN6I_LEAP_SET_VALUE(1);
time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) |
@@ -726,12 +717,16 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- chip->rtc = devm_rtc_device_register(&pdev->dev, "rtc-sun6i",
- &sun6i_rtc_ops, THIS_MODULE);
- if (IS_ERR(chip->rtc)) {
- dev_err(&pdev->dev, "unable to register device\n");
+ chip->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
- }
+
+ chip->rtc->ops = &sun6i_rtc_ops;
+ chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
+
+ ret = rtc_register_device(chip->rtc);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "RTC enabled\n");
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 5786866c09e9..4b1077e2f826 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -38,6 +38,8 @@
#define RTC_CALIB_DEF 0x198233
#define RTC_CALIB_MASK 0x1FFFFF
+#define RTC_ALRM_MASK BIT(1)
+#define RTC_MSEC 1000
struct xlnx_rtc_dev {
struct rtc_device *rtc;
@@ -123,11 +125,28 @@ static int xlnx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int xlnx_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
{
struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ unsigned int status;
+ ulong timeout;
+
+ timeout = jiffies + msecs_to_jiffies(RTC_MSEC);
+
+ if (enabled) {
+ while (1) {
+ status = readl(xrtcdev->reg_base + RTC_INT_STS);
+ if (!((status & RTC_ALRM_MASK) == RTC_ALRM_MASK))
+ break;
+
+ if (time_after_eq(jiffies, timeout)) {
+ dev_err(dev, "Time out occur, while clearing alarm status bit\n");
+ return -ETIMEDOUT;
+ }
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS);
+ }
- if (enabled)
writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_EN);
- else
+ } else {
writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS);
+ }
return 0;
}
@@ -183,8 +202,8 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
if (!(status & (RTC_INT_SEC | RTC_INT_ALRM)))
return IRQ_NONE;
- /* Clear RTC_INT_ALRM interrupt only */
- writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS);
+ /* Disable RTC_INT_ALRM interrupt only */
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS);
if (status & RTC_INT_ALRM)
rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_AF);
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index b7ca7d79fb28..950fac0d41ff 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -279,7 +279,7 @@ static bool rtc_does_wakealarm(struct rtc_device *rtc)
static umode_t rtc_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct rtc_device *rtc = to_rtc_device(dev);
umode_t mode = attr->mode;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index ee73b0607e47..4691a3c35d72 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1987,7 +1987,7 @@ dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
* DASD_3990_ERP_COMPOUND_CONFIG
*
* DESCRIPTION
- * Handles the compound ERP action for configruation
+ * Handles the compound ERP action for configuration
* dependent error.
* Note: duplex handling is not implemented (yet).
*
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 8d4971645cf1..facb588d09e4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -58,7 +58,7 @@ struct dasd_diag_private {
struct dasd_diag_req {
unsigned int block_count;
- struct dasd_diag_bio bio[0];
+ struct dasd_diag_bio bio[];
};
static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 6943508d0f1d..ca24a78a256e 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -220,7 +220,7 @@ struct LRE_eckd_data {
__u8 imbedded_count;
__u8 extended_operation;
__u16 extended_parameter_length;
- __u8 extended_parameter[0];
+ __u8 extended_parameter[];
} __attribute__ ((packed));
/* Prefix data for format 0x00 and 0x01 */
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index e7cf0a1d4f71..92757f9bd010 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -398,7 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
}
if (dstat == 0x08)
break;
- /* else, fall through */
+ fallthrough;
case 0x04:
/* Device end interrupt. */
if ((raw = req->info) == NULL)
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 0e70397d6e04..37ee8f698c3b 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -137,7 +137,7 @@ static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
while ((*cmd != '\0') && !iscntrl(*cmd))
++cmd;
ftp->fname = start;
- /* fall through */
+ fallthrough;
default:
*cmd = '\0';
break;
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 3afaa35f7351..08f36e973b43 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -211,7 +211,7 @@ struct string
struct list_head update;
unsigned long size;
unsigned long len;
- char string[0];
+ char string[];
} __attribute__ ((aligned(8)));
static inline struct string *
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 37d42de06079..a864b21af602 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -406,7 +406,7 @@ static void __init add_memory_merged(u16 rn)
if (!size)
goto skip_add;
for (addr = start; addr < start + size; addr += block_size)
- add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size);
+ add_memory(0, addr, block_size);
skip_add:
first_rn = rn;
num = 1;
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index 995e9196852e..a3e5a5fb0c1e 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -39,7 +39,7 @@ struct err_notify_evbuf {
u8 atype;
u32 fh;
u32 fid;
- u8 data[0];
+ u8 data[];
} __packed;
struct err_notify_sccb {
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 13f97fd73aca..644b61013679 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -214,7 +214,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
break;
case SDIAS_EVSTATE_NO_DATA:
TRACE("no data\n");
- /* fall through */
+ fallthrough;
default:
pr_err("Error from SCLP while copying hsa. Event status = %x\n",
sdias_evbuf.event_status);
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 3e0b2f63a9d2..380e6a67719c 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -677,7 +677,7 @@ tape_generic_remove(struct ccw_device *cdev)
switch (device->tape_state) {
case TS_INIT:
tape_state_set(device, TS_NOT_OPER);
- /* fallthrough */
+ fallthrough;
case TS_NOT_OPER:
/*
* Nothing to do.
@@ -950,7 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
break;
if (device->tape_state == TS_UNUSED)
break;
- /* fallthrough */
+ fallthrough;
default:
if (device->tape_state == TS_BLKUSE)
break;
@@ -1118,7 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case -ETIMEDOUT:
DBF_LH(1, "(%08x): Request timed out\n",
device->cdev_id);
- /* fallthrough */
+ fallthrough;
case -EIO:
__tape_end_request(device, request, -EIO);
break;
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 427b2e24a8ce..cb466ed7eb5e 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -105,16 +105,12 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-static struct irqaction airq_interrupt = {
- .name = "AIO",
- .handler = do_airq_interrupt,
-};
-
void __init init_airq_interrupts(void)
{
irq_set_chip_and_handler(THIN_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
- setup_irq(THIN_INTERRUPT, &airq_interrupt);
+ if (request_irq(THIN_INTERRUPT, do_airq_interrupt, 0, "AIO", NULL))
+ panic("Failed to register AIO interrupt\n");
}
static inline unsigned long iv_size(unsigned long bits)
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index b42a93736668..483a9ecfcbb1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -485,79 +485,10 @@ static void ccwgroup_shutdown(struct device *dev)
gdrv->shutdown(gdev);
}
-static int ccwgroup_pm_prepare(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
- /* Fail while device is being set online/offline. */
- if (atomic_read(&gdev->onoff))
- return -EAGAIN;
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return 0;
-
- return gdrv->prepare ? gdrv->prepare(gdev) : 0;
-}
-
-static void ccwgroup_pm_complete(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return;
-
- if (gdrv->complete)
- gdrv->complete(gdev);
-}
-
-static int ccwgroup_pm_freeze(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return 0;
-
- return gdrv->freeze ? gdrv->freeze(gdev) : 0;
-}
-
-static int ccwgroup_pm_thaw(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return 0;
-
- return gdrv->thaw ? gdrv->thaw(gdev) : 0;
-}
-
-static int ccwgroup_pm_restore(struct device *dev)
-{
- struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
- struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
-
- if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
- return 0;
-
- return gdrv->restore ? gdrv->restore(gdev) : 0;
-}
-
-static const struct dev_pm_ops ccwgroup_pm_ops = {
- .prepare = ccwgroup_pm_prepare,
- .complete = ccwgroup_pm_complete,
- .freeze = ccwgroup_pm_freeze,
- .thaw = ccwgroup_pm_thaw,
- .restore = ccwgroup_pm_restore,
-};
-
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
- .pm = &ccwgroup_pm_ops,
};
bool dev_is_ccwgroup(struct device *dev)
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6392a1b95b02..1ca73c2e5a8f 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -180,11 +180,12 @@ EXPORT_SYMBOL_GPL(chsc_ssqd);
* @scssc: request and response block for SADC
* @summary_indicator_addr: summary indicator address
* @subchannel_indicator_addr: subchannel indicator address
+ * @isc: Interruption Subclass for this subchannel
*
* Returns 0 on success.
*/
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
{
memset(scssc, 0, sizeof(*scssc));
scssc->request.length = 0x0fe0;
@@ -196,7 +197,7 @@ int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
scssc->ks = PAGE_DEFAULT_KEY >> 4;
scssc->kc = PAGE_DEFAULT_KEY >> 4;
- scssc->isc = QDIO_AIRQ_ISC;
+ scssc->isc = isc;
scssc->schid = schid;
/* enable the time delay disablement facility */
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index e57d68e325a3..34de6d77442c 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -163,7 +163,8 @@ void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr);
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr,
+ u8 isc);
int chsc_sgib(u32 origin);
int chsc_error_from_response(int response);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 18f5458f90e8..6d716db2a46a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -563,16 +563,12 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-static struct irqaction io_interrupt = {
- .name = "I/O",
- .handler = do_cio_interrupt,
-};
-
void __init init_cio_interrupts(void)
{
irq_set_chip_and_handler(IO_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
- setup_irq(IO_INTERRUPT, &io_interrupt);
+ if (request_irq(IO_INTERRUPT, do_cio_interrupt, 0, "I/O", NULL))
+ panic("Failed to register I/O interrupt\n");
}
#ifdef CONFIG_CCW_CONSOLE
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 0c6245fc7706..50007cb9be5b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1262,7 +1262,7 @@ static int recovery_check(struct device *dev, void *data)
sch = to_subchannel(cdev->dev.parent);
if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
break;
- /* fall through */
+ fallthrough;
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid,
@@ -2091,7 +2091,7 @@ static void ccw_device_todo(struct work_struct *work)
case CDEV_TODO_UNREG_EVAL:
if (!sch_is_pseudo_sch(sch))
css_schedule_eval(sch->schid);
- /* fall-through */
+ fallthrough;
case CDEV_TODO_UNREG:
if (sch_is_pseudo_sch(sch))
ccw_device_unregister(cdev);
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 835de44dbbcc..77d0ea7b381b 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -13,7 +13,7 @@
struct idset {
int num_ssid;
int num_id;
- unsigned long bitmap[0];
+ unsigned long bitmap[];
};
static inline unsigned long bitmap_size(int num_ssid, int num_id)
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index f72f961cc78f..b0beafc43d37 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -250,7 +250,6 @@ struct qdio_q {
/* upper-layer program handler */
qdio_handler_t (*handler);
- struct dentry *debugfs_q;
struct qdio_irq *irq_ptr;
struct sl *sl;
/*
@@ -266,7 +265,6 @@ struct qdio_irq {
struct ccw_device *cdev;
struct list_head entry; /* list of thinint devices */
struct dentry *debugfs_dev;
- struct dentry *debugfs_perf;
unsigned long int_parm;
struct subchannel_id schid;
@@ -391,12 +389,9 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
struct subchannel_id *schid,
struct qdio_ssqd_desc *data);
-int qdio_setup_irq(struct qdio_initialize *init_data);
-void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
- struct ccw_device *cdev);
+int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
void qdio_release_memory(struct qdio_irq *irq_ptr);
-int qdio_setup_create_sysfs(struct ccw_device *cdev);
-void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void);
void qdio_setup_exit(void);
int qdio_enable_async_operation(struct qdio_output_q *q);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 00244607c8c0..5a3d9ee90a7f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -81,7 +81,7 @@ int qdio_allocate_dbf(struct qdio_initialize *init_data,
/* allocate trace view for the interface */
snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
- dev_name(&init_data->cdev->dev));
+ dev_name(&irq_ptr->cdev->dev));
irq_ptr->debug_area = qdio_get_dbf_entry(text);
if (irq_ptr->debug_area)
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
@@ -190,6 +190,23 @@ static int qstat_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(qstat);
+static int ssqd_show(struct seq_file *m, void *v)
+{
+ struct ccw_device *cdev = m->private;
+ struct qdio_ssqd_desc ssqd;
+ int rc;
+
+ rc = qdio_get_ssqd_desc(cdev, &ssqd);
+ if (rc)
+ return rc;
+
+ seq_hex_dump(m, "", DUMP_PREFIX_NONE, 16, 4, &ssqd, sizeof(ssqd),
+ false);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(ssqd);
+
static char *qperf_names[] = {
"Assumed adapter interrupts",
"QDIO interrupts",
@@ -284,53 +301,37 @@ static const struct file_operations debugfs_perf_fops = {
.release = single_release,
};
-static void setup_debugfs_entry(struct qdio_q *q)
+static void setup_debugfs_entry(struct dentry *parent, struct qdio_q *q)
{
char name[QDIO_DEBUGFS_NAME_LEN];
snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
- q->debugfs_q = debugfs_create_file(name, 0444,
- q->irq_ptr->debugfs_dev, q, &qstat_fops);
- if (IS_ERR(q->debugfs_q))
- q->debugfs_q = NULL;
+ debugfs_create_file(name, 0444, parent, q, &qstat_fops);
}
-void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
- irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+ irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&irq_ptr->cdev->dev),
debugfs_root);
- if (IS_ERR(irq_ptr->debugfs_dev))
- irq_ptr->debugfs_dev = NULL;
-
- irq_ptr->debugfs_perf = debugfs_create_file("statistics",
- S_IFREG | S_IRUGO | S_IWUSR,
- irq_ptr->debugfs_dev, irq_ptr,
- &debugfs_perf_fops);
- if (IS_ERR(irq_ptr->debugfs_perf))
- irq_ptr->debugfs_perf = NULL;
+ debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR,
+ irq_ptr->debugfs_dev, irq_ptr, &debugfs_perf_fops);
+ debugfs_create_file("ssqd", 0444, irq_ptr->debugfs_dev, irq_ptr->cdev,
+ &ssqd_fops);
for_each_input_queue(irq_ptr, q, i)
- setup_debugfs_entry(q);
+ setup_debugfs_entry(irq_ptr->debugfs_dev, q);
for_each_output_queue(irq_ptr, q, i)
- setup_debugfs_entry(q);
+ setup_debugfs_entry(irq_ptr->debugfs_dev, q);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
{
- struct qdio_q *q;
- int i;
-
- for_each_input_queue(irq_ptr, q, i)
- debugfs_remove(q->debugfs_q);
- for_each_output_queue(irq_ptr, q, i)
- debugfs_remove(q->debugfs_q);
- debugfs_remove(irq_ptr->debugfs_perf);
- debugfs_remove(irq_ptr->debugfs_dev);
+ debugfs_remove_recursive(irq_ptr->debugfs_dev);
}
int __init qdio_debug_init(void)
@@ -352,7 +353,7 @@ int __init qdio_debug_init(void)
void qdio_debug_exit(void)
{
qdio_clear_dbf_list();
- debugfs_remove(debugfs_root);
+ debugfs_remove_recursive(debugfs_root);
debug_unregister(qdio_dbf_setup);
debug_unregister(qdio_dbf_error);
}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index f85f5fa7cefc..122450ba6b90 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -66,8 +66,7 @@ static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
int qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr);
-void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
- struct ccw_device *cdev);
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr);
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
int qdio_debug_init(void);
void qdio_debug_exit(void);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 02ced5949287..c890848064fe 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1100,9 +1100,8 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
-static void qdio_shutdown_queues(struct ccw_device *cdev)
+static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
int i;
@@ -1150,7 +1149,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
tiqdio_remove_device(irq_ptr);
- qdio_shutdown_queues(cdev);
+ qdio_shutdown_queues(irq_ptr);
qdio_shutdown_debug_entries(irq_ptr);
/* cleanup subchannel */
@@ -1225,10 +1224,11 @@ EXPORT_SYMBOL_GPL(qdio_free);
*/
int qdio_allocate(struct qdio_initialize *init_data)
{
+ struct ccw_device *cdev = init_data->cdev;
struct subchannel_id schid;
struct qdio_irq *irq_ptr;
- ccw_device_get_schid(init_data->cdev, &schid);
+ ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qallocate:%4x", schid.sch_no);
if ((init_data->no_input_qs && !init_data->input_handler) ||
@@ -1248,6 +1248,7 @@ int qdio_allocate(struct qdio_initialize *init_data)
if (!irq_ptr)
goto out_err;
+ irq_ptr->cdev = cdev;
mutex_init(&irq_ptr->setup_mutex);
if (qdio_allocate_dbf(init_data, irq_ptr))
goto out_rel;
@@ -1272,7 +1273,7 @@ int qdio_allocate(struct qdio_initialize *init_data)
goto out_rel;
INIT_LIST_HEAD(&irq_ptr->entry);
- init_data->cdev->private->qdio_data = irq_ptr;
+ cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
out_rel:
@@ -1311,19 +1312,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
int qdio_establish(struct qdio_initialize *init_data)
{
struct ccw_device *cdev = init_data->cdev;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- struct qdio_irq *irq_ptr;
int rc;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qestablish:%4x", schid.sch_no);
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
mutex_lock(&irq_ptr->setup_mutex);
- qdio_setup_irq(init_data);
+ qdio_setup_irq(irq_ptr, init_data);
rc = qdio_establish_thinint(irq_ptr);
if (rc) {
@@ -1369,8 +1369,8 @@ int qdio_establish(struct qdio_initialize *init_data)
qdio_init_buf_states(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_print_subchannel_info(irq_ptr, cdev);
- qdio_setup_debug_entries(irq_ptr, cdev);
+ qdio_print_subchannel_info(irq_ptr);
+ qdio_setup_debug_entries(irq_ptr);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_establish);
@@ -1381,14 +1381,13 @@ EXPORT_SYMBOL_GPL(qdio_establish);
*/
int qdio_activate(struct ccw_device *cdev)
{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- struct qdio_irq *irq_ptr;
int rc;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qactivate:%4x", schid.sch_no);
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 7b831bb4e229..bbbefc9f9e04 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -449,10 +449,10 @@ static void setup_qib(struct qdio_irq *irq_ptr,
memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
}
-int qdio_setup_irq(struct qdio_initialize *init_data)
+int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
{
+ struct ccw_device *cdev = irq_ptr->cdev;
struct ciw *ciw;
- struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -460,8 +460,9 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
- irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
- irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
+ irq_ptr->debugfs_dev = NULL;
+ irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
+ irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
/* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
@@ -469,9 +470,8 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
- irq_ptr->cdev = init_data->cdev;
irq_ptr->scan_threshold = init_data->scan_threshold;
- ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
+ ccw_device_get_schid(cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
if (init_data->irq_poll) {
@@ -494,14 +494,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
/* get qdio commands */
- ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
return -EINVAL;
}
irq_ptr->equeue = *ciw;
- ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
return -EINVAL;
@@ -509,21 +509,20 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->aqueue = *ciw;
/* set new interrupt handler */
- spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev));
- irq_ptr->orig_handler = init_data->cdev->handler;
- init_data->cdev->handler = qdio_int_handler;
- spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ irq_ptr->orig_handler = cdev->handler;
+ cdev->handler = qdio_int_handler;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
return 0;
}
-void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
- struct ccw_device *cdev)
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
{
char s[80];
snprintf(s, 80, "qdio: %s %s on SC %x using "
"AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
- dev_name(&cdev->dev),
+ dev_name(&irq_ptr->cdev->dev),
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
irq_ptr->schid.sch_no,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8f315c53de23..ea09df7209f0 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -207,7 +207,7 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
}
rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
- subchannel_indicator_addr);
+ subchannel_indicator_addr, tiqdio_airq.isc);
if (rc) {
DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
scssc->response.code);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5256e3ce84e5..35064443e748 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -18,13 +18,13 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
-#include <linux/suspend.h>
#include <asm/airq.h>
#include <linux/atomic.h>
#include <asm/isc.h>
@@ -103,16 +103,9 @@ static struct hrtimer ap_poll_timer;
*/
static unsigned long long poll_timeout = 250000;
-/* Suspend flag */
-static int ap_suspend_flag;
/* Maximum domain id */
static int ap_max_domain_id;
-/*
- * Flag to check if domain was set through module parameter domain=. This is
- * important when supsend and resume is done in a z/VM environment where the
- * domain might change.
- */
-static int user_set_domain;
+
static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
@@ -360,7 +353,7 @@ void ap_wait(enum ap_wait wait)
wake_up(&ap_poll_wait);
break;
}
- /* Fall through */
+ fallthrough;
case AP_WAIT_TIMEOUT:
spin_lock_bh(&ap_poll_timer_lock);
if (!hrtimer_is_queued(&ap_poll_timer)) {
@@ -386,8 +379,6 @@ void ap_request_timeout(struct timer_list *t)
{
struct ap_queue *aq = from_timer(aq, t, timeout);
- if (ap_suspend_flag)
- return;
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
spin_unlock_bh(&aq->lock);
@@ -401,8 +392,7 @@ void ap_request_timeout(struct timer_list *t)
*/
static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
{
- if (!ap_suspend_flag)
- tasklet_schedule(&ap_tasklet);
+ tasklet_schedule(&ap_tasklet);
return HRTIMER_NORESTART;
}
@@ -413,8 +403,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
{
inc_irq_stat(IRQIO_APB);
- if (!ap_suspend_flag)
- tasklet_schedule(&ap_tasklet);
+ tasklet_schedule(&ap_tasklet);
}
/**
@@ -486,7 +475,7 @@ static int ap_poll_thread(void *data)
while (!kthread_should_stop()) {
add_wait_queue(&ap_poll_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- if (ap_suspend_flag || !ap_pending_requests()) {
+ if (!ap_pending_requests()) {
schedule();
try_to_freeze();
}
@@ -587,51 +576,6 @@ static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
-static int ap_dev_suspend(struct device *dev)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
-
- if (ap_dev->drv && ap_dev->drv->suspend)
- ap_dev->drv->suspend(ap_dev);
- return 0;
-}
-
-static int ap_dev_resume(struct device *dev)
-{
- struct ap_device *ap_dev = to_ap_dev(dev);
-
- if (ap_dev->drv && ap_dev->drv->resume)
- ap_dev->drv->resume(ap_dev);
- return 0;
-}
-
-static void ap_bus_suspend(void)
-{
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
- ap_suspend_flag = 1;
- /*
- * Disable scanning for devices, thus we do not want to scan
- * for them after removing.
- */
- flush_work(&ap_scan_work);
- tasklet_disable(&ap_tasklet);
-}
-
-static int __ap_card_devices_unregister(struct device *dev, void *dummy)
-{
- if (is_card_dev(dev))
- device_unregister(dev);
- return 0;
-}
-
-static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
-{
- if (is_queue_dev(dev))
- device_unregister(dev);
- return 0;
-}
-
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
if (is_queue_dev(dev) &&
@@ -640,67 +584,10 @@ static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
return 0;
}
-static void ap_bus_resume(void)
-{
- int rc;
-
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
- /* remove all queue devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL,
- __ap_queue_devices_unregister);
- /* remove all card devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL,
- __ap_card_devices_unregister);
-
- /* Reset thin interrupt setting */
- if (ap_interrupts_available() && !ap_using_interrupts()) {
- rc = register_adapter_interrupt(&ap_airq);
- ap_airq_flag = (rc == 0);
- }
- if (!ap_interrupts_available() && ap_using_interrupts()) {
- unregister_adapter_interrupt(&ap_airq);
- ap_airq_flag = 0;
- }
- /* Reset domain */
- if (!user_set_domain)
- ap_domain_index = -1;
- /* Get things going again */
- ap_suspend_flag = 0;
- if (ap_airq_flag)
- xchg(ap_airq.lsi_ptr, 0);
- tasklet_enable(&ap_tasklet);
- queue_work(system_long_wq, &ap_scan_work);
-}
-
-static int ap_power_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- switch (event) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- ap_bus_suspend();
- break;
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- ap_bus_resume();
- break;
- default:
- break;
- }
- return NOTIFY_DONE;
-}
-static struct notifier_block ap_power_notifier = {
- .notifier_call = ap_power_event,
-};
-
-static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
-
static struct bus_type ap_bus_type = {
.name = "ap",
.match = &ap_bus_match,
.uevent = &ap_uevent,
- .pm = &ap_bus_pm_ops,
};
static int __ap_revise_reserved(struct device *dev, void *dummy)
@@ -873,8 +760,6 @@ EXPORT_SYMBOL(ap_driver_unregister);
void ap_bus_force_rescan(void)
{
- if (ap_suspend_flag)
- return;
/* processing a asynchronous bus rescan */
del_timer(&ap_config_timer);
queue_work(system_long_wq, &ap_scan_work);
@@ -1021,7 +906,7 @@ EXPORT_SYMBOL(ap_parse_mask_str);
static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
}
static ssize_t ap_domain_store(struct bus_type *bus,
@@ -1047,14 +932,14 @@ static BUS_ATTR_RW(ap_domain);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->adm[0], ap_configuration->adm[1],
- ap_configuration->adm[2], ap_configuration->adm[3],
- ap_configuration->adm[4], ap_configuration->adm[5],
- ap_configuration->adm[6], ap_configuration->adm[7]);
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->adm[0], ap_configuration->adm[1],
+ ap_configuration->adm[2], ap_configuration->adm[3],
+ ap_configuration->adm[4], ap_configuration->adm[5],
+ ap_configuration->adm[6], ap_configuration->adm[7]);
}
static BUS_ATTR_RO(ap_control_domain_mask);
@@ -1062,14 +947,14 @@ static BUS_ATTR_RO(ap_control_domain_mask);
static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->aqm[0], ap_configuration->aqm[1],
- ap_configuration->aqm[2], ap_configuration->aqm[3],
- ap_configuration->aqm[4], ap_configuration->aqm[5],
- ap_configuration->aqm[6], ap_configuration->aqm[7]);
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->aqm[0], ap_configuration->aqm[1],
+ ap_configuration->aqm[2], ap_configuration->aqm[3],
+ ap_configuration->aqm[4], ap_configuration->aqm[5],
+ ap_configuration->aqm[6], ap_configuration->aqm[7]);
}
static BUS_ATTR_RO(ap_usage_domain_mask);
@@ -1077,29 +962,29 @@ static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
{
if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->apm[0], ap_configuration->apm[1],
- ap_configuration->apm[2], ap_configuration->apm[3],
- ap_configuration->apm[4], ap_configuration->apm[5],
- ap_configuration->apm[6], ap_configuration->apm[7]);
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->apm[0], ap_configuration->apm[1],
+ ap_configuration->apm[2], ap_configuration->apm[3],
+ ap_configuration->apm[4], ap_configuration->apm[5],
+ ap_configuration->apm[6], ap_configuration->apm[7]);
}
static BUS_ATTR_RO(ap_adapter_mask);
static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- ap_using_interrupts() ? 1 : 0);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ ap_using_interrupts() ? 1 : 0);
}
static BUS_ATTR_RO(ap_interrupts);
static ssize_t config_time_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
}
static ssize_t config_time_store(struct bus_type *bus,
@@ -1118,7 +1003,7 @@ static BUS_ATTR_RW(config_time);
static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
}
static ssize_t poll_thread_store(struct bus_type *bus,
@@ -1141,7 +1026,7 @@ static BUS_ATTR_RW(poll_thread);
static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
}
static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
@@ -1176,7 +1061,7 @@ static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
max_domain_id = ap_max_domain_id ? : -1;
else
max_domain_id = 15;
- return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
}
static BUS_ATTR_RO(ap_max_domain_id);
@@ -1187,10 +1072,10 @@ static ssize_t apmask_show(struct bus_type *bus, char *buf)
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- rc = snprintf(buf, PAGE_SIZE,
- "0x%016lx%016lx%016lx%016lx\n",
- ap_perms.apm[0], ap_perms.apm[1],
- ap_perms.apm[2], ap_perms.apm[3]);
+ rc = scnprintf(buf, PAGE_SIZE,
+ "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.apm[0], ap_perms.apm[1],
+ ap_perms.apm[2], ap_perms.apm[3]);
mutex_unlock(&ap_perms_mutex);
return rc;
@@ -1218,10 +1103,10 @@ static ssize_t aqmask_show(struct bus_type *bus, char *buf)
if (mutex_lock_interruptible(&ap_perms_mutex))
return -ERESTARTSYS;
- rc = snprintf(buf, PAGE_SIZE,
- "0x%016lx%016lx%016lx%016lx\n",
- ap_perms.aqm[0], ap_perms.aqm[1],
- ap_perms.aqm[2], ap_perms.aqm[3]);
+ rc = scnprintf(buf, PAGE_SIZE,
+ "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.aqm[0], ap_perms.aqm[1],
+ ap_perms.aqm[2], ap_perms.aqm[3]);
mutex_unlock(&ap_perms_mutex);
return rc;
@@ -1567,8 +1452,6 @@ static void ap_scan_bus(struct work_struct *unused)
static void ap_config_timeout(struct timer_list *unused)
{
- if (ap_suspend_flag)
- return;
queue_work(system_long_wq, &ap_scan_work);
}
@@ -1641,11 +1524,6 @@ static int __init ap_module_init(void)
ap_domain_index);
ap_domain_index = -1;
}
- /* In resume callback we need to know if the user had set the domain.
- * If so, we can not just reset it.
- */
- if (ap_domain_index >= 0)
- user_set_domain = 1;
if (ap_interrupts_available()) {
rc = register_adapter_interrupt(&ap_airq);
@@ -1688,17 +1566,11 @@ static int __init ap_module_init(void)
goto out_work;
}
- rc = register_pm_notifier(&ap_power_notifier);
- if (rc)
- goto out_pm;
-
queue_work(system_long_wq, &ap_scan_work);
initialised = true;
return 0;
-out_pm:
- ap_poll_thread_stop();
out_work:
hrtimer_cancel(&ap_poll_timer);
root_device_unregister(ap_root_device);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4348fdff1c61..8e8e37b6c0ee 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,7 +91,6 @@ enum ap_state {
AP_STATE_IDLE,
AP_STATE_WORKING,
AP_STATE_QUEUE_FULL,
- AP_STATE_SUSPEND_WAIT,
AP_STATE_REMOVE, /* about to be removed from driver */
AP_STATE_UNBOUND, /* momentary not bound to a driver */
AP_STATE_BORKED, /* broken */
@@ -136,8 +135,6 @@ struct ap_driver {
int (*probe)(struct ap_device *);
void (*remove)(struct ap_device *);
- void (*suspend)(struct ap_device *);
- void (*resume)(struct ap_device *);
};
#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
@@ -259,8 +256,6 @@ void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
-void ap_queue_suspend(struct ap_device *ap_dev);
-void ap_queue_resume(struct ap_device *ap_dev);
void ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index e85bfca1ed16..0a39dfdb6a1d 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -23,7 +23,7 @@ static ssize_t hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
}
static DEVICE_ATTR_RO(hwtype);
@@ -33,7 +33,7 @@ static ssize_t raw_hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
}
static DEVICE_ATTR_RO(raw_hwtype);
@@ -43,7 +43,7 @@ static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
}
static DEVICE_ATTR_RO(depth);
@@ -53,7 +53,7 @@ static ssize_t ap_functions_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+ return scnprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
}
static DEVICE_ATTR_RO(ap_functions);
@@ -69,7 +69,7 @@ static ssize_t request_count_show(struct device *dev,
spin_lock_bh(&ap_list_lock);
req_cnt = atomic64_read(&ac->total_request_count);
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -102,7 +102,7 @@ static ssize_t requestq_count_show(struct device *dev,
for_each_ap_queue(aq, ac)
reqq_cnt += aq->requestq_count;
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
@@ -119,7 +119,7 @@ static ssize_t pendingq_count_show(struct device *dev,
for_each_ap_queue(aq, ac)
penq_cnt += aq->pendingq_count;
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
@@ -127,7 +127,8 @@ static DEVICE_ATTR_RO(pendingq_count);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+ return scnprintf(buf, PAGE_SIZE, "ap:t%02X\n",
+ to_ap_dev(dev)->device_type);
}
static DEVICE_ATTR_RO(modalias);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index a317ab484932..0eaf1d04e8df 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -152,7 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
ap_msg->receive(aq, ap_msg, aq->reply);
break;
}
- /* fall through */
+ fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
@@ -201,31 +201,6 @@ static enum ap_wait ap_sm_read(struct ap_queue *aq)
}
/**
- * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
- * without changing the device state in between. In suspend mode we don't
- * allow sending new requests, therefore just fetch pending replies.
- * @aq: pointer to the AP queue
- *
- * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
- */
-static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
-{
- struct ap_queue_status status;
-
- if (!aq->reply)
- return AP_WAIT_NONE;
- status = ap_sm_recv(aq);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (aq->queue_count > 0)
- return AP_WAIT_AGAIN;
- /* fall through */
- default:
- return AP_WAIT_NONE;
- }
-}
-
-/**
* ap_sm_write(): Send messages from the request queue to an AP queue.
* @aq: pointer to the AP queue
*
@@ -254,7 +229,7 @@ static enum ap_wait ap_sm_write(struct ap_queue *aq)
aq->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
- /* fall through */
+ fallthrough;
case AP_RESPONSE_Q_FULL:
aq->state = AP_STATE_QUEUE_FULL;
return AP_WAIT_INTERRUPT;
@@ -380,7 +355,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
return AP_WAIT_AGAIN;
- /* fallthrough */
+ fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
return AP_WAIT_TIMEOUT;
default:
@@ -417,10 +392,6 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_POLL] = ap_sm_read,
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
- [AP_STATE_SUSPEND_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_suspend_read,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
[AP_STATE_REMOVE] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -450,28 +421,6 @@ enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
}
/*
- * Power management for queue devices
- */
-void ap_queue_suspend(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-
- /* Poll on the device until all requests are finished. */
- spin_lock_bh(&aq->lock);
- aq->state = AP_STATE_SUSPEND_WAIT;
- while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
- ;
- aq->state = AP_STATE_BORKED;
- spin_unlock_bh(&aq->lock);
-}
-EXPORT_SYMBOL(ap_queue_suspend);
-
-void ap_queue_resume(struct ap_device *ap_dev)
-{
-}
-EXPORT_SYMBOL(ap_queue_resume);
-
-/*
* AP queue related attributes.
*/
static ssize_t request_count_show(struct device *dev,
@@ -484,7 +433,7 @@ static ssize_t request_count_show(struct device *dev,
spin_lock_bh(&aq->lock);
req_cnt = aq->total_request_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -511,7 +460,7 @@ static ssize_t requestq_count_show(struct device *dev,
spin_lock_bh(&aq->lock);
reqq_cnt = aq->requestq_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
@@ -525,7 +474,7 @@ static ssize_t pendingq_count_show(struct device *dev,
spin_lock_bh(&aq->lock);
penq_cnt = aq->pendingq_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
@@ -540,14 +489,14 @@ static ssize_t reset_show(struct device *dev,
switch (aq->state) {
case AP_STATE_RESET_START:
case AP_STATE_RESET_WAIT:
- rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
break;
case AP_STATE_WORKING:
case AP_STATE_QUEUE_FULL:
- rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
break;
default:
- rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
}
spin_unlock_bh(&aq->lock);
return rc;
@@ -581,11 +530,11 @@ static ssize_t interrupt_show(struct device *dev,
spin_lock_bh(&aq->lock);
if (aq->state == AP_STATE_SETIRQ_WAIT)
- rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
else if (aq->interrupt == AP_INTR_ENABLED)
- rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
else
- rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+ rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
spin_unlock_bh(&aq->lock);
return rc;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 2f33c5fcf676..74e63ec49068 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -80,7 +80,7 @@ struct clearaeskeytoken {
u8 res1[3];
u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
u32 len; /* bytes actually stored in clearkey[] */
- u8 clearkey[0]; /* clear key value */
+ u8 clearkey[]; /* clear key value */
} __packed;
/*
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 5c0f53c6dde7..e0bde8518745 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -90,7 +90,7 @@ static void vfio_ap_wait_for_irqclear(int apqn)
case AP_RESPONSE_RESET_IN_PROGRESS:
if (!status.irq_enabled)
return;
- /* Fall through */
+ fallthrough;
case AP_RESPONSE_BUSY:
msleep(20);
break;
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index d4f35a183c15..c53cab4b0c9e 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -41,7 +41,7 @@ static ssize_t type_show(struct device *dev,
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
}
static DEVICE_ATTR_RO(type);
@@ -52,7 +52,7 @@ static ssize_t online_show(struct device *dev,
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", zc->online);
}
static ssize_t online_store(struct device *dev,
@@ -86,7 +86,7 @@ static ssize_t load_show(struct device *dev,
{
struct zcrypt_card *zc = to_ap_card(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
}
static DEVICE_ATTR_RO(load);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 110fe9d0cb91..1b835398feec 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -592,7 +592,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
u8 pad2[1];
u8 vptype;
u8 vp[32]; /* verification pattern */
- } keyblock;
+ } ckb;
} lv3;
} __packed * prepparm;
@@ -650,15 +650,16 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
/* check the returned keyblock */
- if (prepparm->lv3.keyblock.version != 0x01) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x01\n",
- __func__, (int) prepparm->lv3.keyblock.version);
+ if (prepparm->lv3.ckb.version != 0x01 &&
+ prepparm->lv3.ckb.version != 0x02) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int) prepparm->lv3.ckb.version);
rc = -EIO;
goto out;
}
/* copy the tanslated protected key */
- switch (prepparm->lv3.keyblock.len) {
+ switch (prepparm->lv3.ckb.len) {
case 16+32:
/* AES 128 protected key */
if (protkeytype)
@@ -676,13 +677,13 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
break;
default:
DEBUG_ERR("%s unknown/unsupported keylen %d\n",
- __func__, prepparm->lv3.keyblock.len);
+ __func__, prepparm->lv3.ckb.len);
rc = -EIO;
goto out;
}
- memcpy(protkey, prepparm->lv3.keyblock.key, prepparm->lv3.keyblock.len);
+ memcpy(protkey, prepparm->lv3.ckb.key, prepparm->lv3.ckb.len);
if (protkeylen)
- *protkeylen = prepparm->lv3.keyblock.len;
+ *protkeylen = prepparm->lv3.ckb.len;
out:
free_cprbmem(mem, PARMBSIZE, 0);
@@ -1260,10 +1261,10 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
/* check the returned keyblock */
- if (prepparm->vud.ckb.version != 0x01) {
- DEBUG_ERR(
- "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
- __func__, (int) prepparm->vud.ckb.version);
+ if (prepparm->vud.ckb.version != 0x01 &&
+ prepparm->vud.ckb.version != 0x02) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int) prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
@@ -1568,9 +1569,9 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
return -EINVAL;
/* fetch status of all crypto cards */
- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
@@ -1640,7 +1641,7 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
} else
rc = -ENODEV;
- kfree(device_status);
+ kvfree(device_status);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 3a9876d5ab0e..8b7a641671c9 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -90,7 +90,7 @@ struct cipherkeytoken {
u16 kmf1; /* key management field 1 */
u16 kmf2; /* key management field 2 */
u16 kmf3; /* key management field 3 */
- u8 vdata[0]; /* variable part data follows */
+ u8 vdata[]; /* variable part data follows */
} __packed;
/* Some defines for the CCA AES cipherkeytoken kmf1 field */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 7cbb384ec535..b447f3e9e4a2 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -204,8 +204,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
static struct ap_driver zcrypt_cex2a_queue_driver = {
.probe = zcrypt_cex2a_queue_probe,
.remove = zcrypt_cex2a_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
.ids = zcrypt_cex2a_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index c78c0d119806..266440168bb7 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -260,8 +260,6 @@ static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
static struct ap_driver zcrypt_cex2c_queue_driver = {
.probe = zcrypt_cex2c_queue_probe,
.remove = zcrypt_cex2c_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
.ids = zcrypt_cex2c_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 9a9d02e19774..cdaa8348ad04 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -87,7 +87,7 @@ static ssize_t cca_serialnr_show(struct device *dev,
if (ap_domain_index >= 0)
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
- return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
}
static struct device_attribute dev_attr_cca_serialnr =
@@ -122,22 +122,24 @@ static ssize_t cca_mkvps_show(struct device *dev,
&ci, zq->online);
if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
- n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
- new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_mk_state - '1'], ci.new_mkvp);
else
- n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
- n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n",
- cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
else
- n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
- n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n",
- cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
else
- n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
return n;
}
@@ -170,9 +172,9 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.API_ord_nr > 0)
- return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
}
static struct device_attribute dev_attr_ep11_api_ordinalnr =
@@ -191,11 +193,11 @@ static ssize_t ep11_fw_version_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.FW_version > 0)
- return snprintf(buf, PAGE_SIZE, "%d.%d\n",
- (int)(ci.FW_version >> 8),
- (int)(ci.FW_version & 0xFF));
+ return scnprintf(buf, PAGE_SIZE, "%d.%d\n",
+ (int)(ci.FW_version >> 8),
+ (int)(ci.FW_version & 0xFF));
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
}
static struct device_attribute dev_attr_ep11_fw_version =
@@ -214,9 +216,9 @@ static ssize_t ep11_serialnr_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
if (ci.serial[0])
- return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+ return scnprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ return scnprintf(buf, PAGE_SIZE, "\n");
}
static struct device_attribute dev_attr_ep11_serialnr =
@@ -251,11 +253,11 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
- n += snprintf(buf + n, PAGE_SIZE - n,
- "%s", ep11_op_modes[i].mode_txt);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
}
}
- n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
return n;
}
@@ -298,28 +300,28 @@ static ssize_t ep11_mkvps_show(struct device *dev,
&di);
if (di.cur_wk_state == '0') {
- n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
- cwk_state[di.cur_wk_state - '0']);
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+ cwk_state[di.cur_wk_state - '0']);
} else if (di.cur_wk_state == '1') {
- n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
- cwk_state[di.cur_wk_state - '0']);
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+ cwk_state[di.cur_wk_state - '0']);
bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
n += 2 * sizeof(di.cur_wkvp);
- n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
} else
- n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
if (di.new_wk_state == '0') {
- n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
- nwk_state[di.new_wk_state - '0']);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+ nwk_state[di.new_wk_state - '0']);
} else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
- n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
- nwk_state[di.new_wk_state - '0']);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+ nwk_state[di.new_wk_state - '0']);
bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
n += 2 * sizeof(di.new_wkvp);
- n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
} else
- n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
return n;
}
@@ -346,11 +348,11 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
- n += snprintf(buf + n, PAGE_SIZE - n,
- "%s", ep11_op_modes[i].mode_txt);
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
}
}
- n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
return n;
}
@@ -654,8 +656,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
static struct ap_driver zcrypt_cex4_queue_driver = {
.probe = zcrypt_cex4_queue_probe,
.remove = zcrypt_cex4_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
.ids = zcrypt_cex4_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 2afe2153b34e..004ce022fc78 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -1217,9 +1217,9 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
struct ep11_card_info eci;
/* fetch status of all crypto cards */
- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
@@ -1227,7 +1227,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
/* allocate 1k space for up to 256 apqns */
_apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
if (!_apqns) {
- kfree(device_status);
+ kvfree(device_status);
return -ENOMEM;
}
@@ -1282,7 +1282,7 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
rc = 0;
}
- kfree(device_status);
+ kvfree(device_status);
return rc;
}
EXPORT_SYMBOL(ep11_findcard2);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index a36251d138fb..fd1cbb2d6b3f 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -590,7 +590,7 @@ struct type86x_reply {
struct CPRBX cprbx;
unsigned char pad[4]; /* 4 byte function code/rules block ? */
unsigned short length;
- char text[0];
+ char text[];
} __packed;
struct type86_ep11_reply {
@@ -801,7 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
- /* fall through - wrong cprb version is an unknown response */
+ fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -834,7 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
}
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(zq, reply, xcRB);
- /* fall through - wrong cprb version is an unknown response */
+ fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
@@ -864,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
return convert_type86_ep11_xcrb(zq, reply, xcRB);
- /* fall through - wrong cprb version is an unknown resp */
+ fallthrough; /* wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -894,7 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zq, reply, data);
- /* fall through - wrong cprb version is an unknown response */
+ fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 522c4bc69a08..b7d9fa567880 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -42,7 +42,7 @@ static ssize_t online_show(struct device *dev,
{
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", zq->online);
}
static ssize_t online_store(struct device *dev,
@@ -78,7 +78,7 @@ static ssize_t load_show(struct device *dev,
{
struct zcrypt_queue *zq = to_ap_queue(dev)->private;
- return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
}
static DEVICE_ATTR_RO(load);
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 4fc2056bd227..c75112ee7b97 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -567,31 +567,11 @@ static void ism_remove(struct pci_dev *pdev)
kfree(ism);
}
-static int ism_suspend(struct device *dev)
-{
- struct ism_dev *ism = dev_get_drvdata(dev);
-
- ism_dev_exit(ism);
- return 0;
-}
-
-static int ism_resume(struct device *dev)
-{
- struct ism_dev *ism = dev_get_drvdata(dev);
-
- return ism_dev_init(ism);
-}
-
-static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
-
static struct pci_driver ism_driver = {
.name = DRV_NAME,
.id_table = ism_device_table,
.probe = ism_probe,
.remove = ism_remove,
- .driver = {
- .pm = &ism_pm_ops,
- },
};
static int __init ism_init(void)
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 1234294700c4..673e42defb91 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -4,7 +4,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -104,6 +104,48 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
}
/**
+ * zfcp_dbf_hba_fsf_fces - trace event for fsf responses related to
+ * FC Endpoint Security (FCES)
+ * @tag: tag indicating which kind of FC Endpoint Security event has occurred
+ * @req: request for which a response was received
+ * @wwpn: remote port or ZFCP_DBF_INVALID_WWPN
+ * @fc_security_old: old FC Endpoint Security of FCP device or connection
+ * @fc_security_new: new FC Endpoint Security of FCP device or connection
+ */
+void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
+ u32 fc_security_old, u32 fc_security_new)
+{
+ struct zfcp_dbf *dbf = req->adapter->dbf;
+ struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+ struct fsf_qtcb_header *q_head = &req->qtcb->header;
+ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+ static int const level = 3;
+ unsigned long flags;
+
+ if (unlikely(!debug_level_enabled(dbf->hba, level)))
+ return;
+
+ spin_lock_irqsave(&dbf->hba_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_HBA_FCES;
+ rec->fsf_req_id = req->req_id;
+ rec->fsf_req_status = req->status;
+ rec->fsf_cmd = q_head->fsf_command;
+ rec->fsf_seq_no = q_pref->req_seq_no;
+ rec->u.fces.req_issued = req->issued;
+ rec->u.fces.fsf_status = q_head->fsf_status;
+ rec->u.fces.port_handle = q_head->port_handle;
+ rec->u.fces.wwpn = wwpn;
+ rec->u.fces.fc_security_old = fc_security_old;
+ rec->u.fces.fc_security_new = fc_security_new;
+
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
+/**
* zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request providing the unsolicited status
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 900c779cc39b..4d1435c573bc 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -3,7 +3,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2017
+ * Copyright IBM Corp. 2008, 2020
*/
#ifndef ZFCP_DBF_H
@@ -16,6 +16,7 @@
#define ZFCP_DBF_TAG_LEN 7
+#define ZFCP_DBF_INVALID_WWPN 0x0000000000000000ull
#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
enum zfcp_dbf_pseudo_erp_act_type {
@@ -158,17 +159,38 @@ struct zfcp_dbf_hba_uss {
} __packed;
/**
+ * struct zfcp_dbf_hba_fces - trace record for FC Endpoint Security
+ * @req_issued: timestamp when request was issued
+ * @fsf_status: fsf status
+ * @port_handle: handle for port
+ * @wwpn: remote FC port WWPN
+ * @fc_security_old: old FC Endpoint Security
+ * @fc_security_new: new FC Endpoint Security
+ *
+ */
+struct zfcp_dbf_hba_fces {
+ u64 req_issued;
+ u32 fsf_status;
+ u32 port_handle;
+ u64 wwpn;
+ u32 fc_security_old;
+ u32 fc_security_new;
+} __packed;
+
+/**
* enum zfcp_dbf_hba_id - HBA trace record identifier
* @ZFCP_DBF_HBA_RES: response trace record
* @ZFCP_DBF_HBA_USS: unsolicited status trace record
* @ZFCP_DBF_HBA_BIT: bit error trace record
* @ZFCP_DBF_HBA_BASIC: basic adapter event, only trace tag, no other data
+ * @ZFCP_DBF_HBA_FCES: FC Endpoint Security trace record
*/
enum zfcp_dbf_hba_id {
ZFCP_DBF_HBA_RES = 1,
ZFCP_DBF_HBA_USS = 2,
ZFCP_DBF_HBA_BIT = 3,
ZFCP_DBF_HBA_BASIC = 4,
+ ZFCP_DBF_HBA_FCES = 5,
};
/**
@@ -181,9 +203,10 @@ enum zfcp_dbf_hba_id {
* @fsf_seq_no: fsf sequence number
* @pl_len: length of payload stored as zfcp_dbf_pay
* @u: record type specific data
- * @u.res: data for fsf responses
- * @u.uss: data for unsolicited status buffer
- * @u.be: data for bit error unsolicited status buffer
+ * @u.res: data for fsf responses
+ * @u.uss: data for unsolicited status buffer
+ * @u.be: data for bit error unsolicited status buffer
+ * @u.fces: data for FC Endpoint Security
*/
struct zfcp_dbf_hba {
u8 id;
@@ -197,6 +220,7 @@ struct zfcp_dbf_hba {
struct zfcp_dbf_hba_res res;
struct zfcp_dbf_hba_uss uss;
struct fsf_bit_error_payload be;
+ struct zfcp_dbf_hba_fces fces;
} u;
} __packed;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 8cc0eefe4ccc..da8a5ceb615c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#ifndef ZFCP_DEF_H
@@ -158,6 +158,8 @@ struct zfcp_adapter {
u32 adapter_features; /* FCP channel features */
u32 connection_features; /* host connection features */
u32 hardware_version; /* of FCP channel */
+ u32 fc_security_algorithms; /* of FCP channel */
+ u32 fc_security_algorithms_old; /* of FCP channel */
u16 timer_ticks; /* time int for a tick */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
struct list_head port_list; /* remote port list */
@@ -218,6 +220,8 @@ struct zfcp_port {
atomic_t erp_counter;
u32 maxframe_size;
u32 supported_classes;
+ u32 connection_info;
+ u32 connection_info_old;
struct work_struct gid_pn_work;
struct work_struct test_link_work;
struct work_struct rport_work;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 93655b85b73f..18a6751299f9 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -725,7 +725,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
adapter->peer_d_id);
if (IS_ERR(port)) /* error or port already attached */
return;
- _zfcp_erp_port_reopen(port, 0, "ereptp1");
+ zfcp_erp_port_reopen(port, 0, "ereptp1");
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c8556787cfdc..88294ca0e2ea 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -4,7 +4,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#ifndef ZFCP_EXT_H
@@ -44,6 +44,9 @@ extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req,
+ u64 wwpn, u32 fc_security_old,
+ u32 fc_security_new);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
@@ -135,6 +138,13 @@ extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
+enum zfcp_fsf_print_fmt {
+ ZFCP_FSF_PRINT_FMT_LIST,
+ ZFCP_FSF_PRINT_FMT_SINGLEITEM,
+};
+extern ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size,
+ u32 fc_security,
+ enum zfcp_fsf_print_fmt fmt);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index cae9b7ff79b0..7c603e5b5b19 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4,7 +4,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -120,6 +120,23 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
+static void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
+{
+ struct Scsi_Host *shost = adapter->scsi_host;
+
+ fc_host_port_id(shost) = 0;
+ fc_host_fabric_name(shost) = 0;
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+ adapter->hydra_version = 0;
+ snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
+ memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
+
+ adapter->peer_wwpn = 0;
+ adapter->peer_wwnn = 0;
+ adapter->peer_d_id = 0;
+}
+
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
struct fsf_link_down_info *link_down)
{
@@ -132,6 +149,8 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
zfcp_scsi_schedule_rports_block(adapter);
+ zfcp_fsf_fc_host_link_down(adapter);
+
if (!link_down)
goto out;
@@ -502,6 +521,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
+ snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
+ "IBM");
fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
@@ -510,9 +531,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM);
- if (fc_host_permanent_port_name(shost) == -1)
- fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
-
zfcp_scsi_set_prot(adapter);
/* no error return above here, otherwise must fix call chains */
@@ -525,6 +543,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
adapter->hydra_version = bottom->adapter_type;
+ snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
+ bottom->adapter_type);
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
@@ -532,8 +552,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ fc_host_fabric_name(shost) = 0;
break;
case FSF_TOPO_FABRIC:
+ fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
else
@@ -541,8 +563,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ fc_host_fabric_name(shost) = 0;
/* fall through */
default:
+ fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
@@ -565,6 +589,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
+ snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->lic_version);
adapter->fsf_lic_version = bottom->lic_version;
adapter->adapter_features = bottom->adapter_features;
adapter->connection_features = bottom->connection_features;
@@ -598,13 +624,6 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
zfcp_diag_update_xdata(diag_hdr, bottom, true);
req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
- fc_host_node_name(shost) = 0;
- fc_host_port_name(shost) = 0;
- fc_host_port_id(shost) = 0;
- fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
- fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
- adapter->hydra_version = 0;
-
/* avoids adapter shutdown to be able to recognize
* events such as LINK UP */
atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -621,6 +640,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
adapter->hardware_version = bottom->hardware_version;
+ snprintf(fc_host_hardware_version(shost),
+ FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->hardware_version);
memcpy(fc_host_serial_number(shost), bottom->serial_number,
min(FC_SERIAL_NUMBER_SIZE, 17));
EBCASC(fc_host_serial_number(shost),
@@ -642,6 +664,99 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
}
}
+/*
+ * Mapping of FC Endpoint Security flag masks to mnemonics
+ *
+ * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any
+ * changes.
+ */
+static const struct {
+ u32 mask;
+ char *name;
+} zfcp_fsf_fc_security_mnemonics[] = {
+ { FSF_FC_SECURITY_AUTH, "Authentication" },
+ { FSF_FC_SECURITY_ENC_FCSP2 |
+ FSF_FC_SECURITY_ENC_ERAS, "Encryption" },
+};
+
+/* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */
+#define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15
+
+/**
+ * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into
+ * mnemonics and place in a buffer
+ * @buf : the buffer to place the translated FC Endpoint Security flag(s)
+ * into
+ * @size : the size of the buffer, including the trailing null space
+ * @fc_security: one or more FC Endpoint Security flags, or zero
+ * @fmt : specifies whether a list or a single item is to be put into the
+ * buffer
+ *
+ * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics.
+ * If the FC Endpoint Security flags are zero "none" is placed into the buffer.
+ *
+ * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by
+ * a comma followed by a space into the buffer. If one or more FC Endpoint
+ * Security flags cannot be translated into a mnemonic, as they are undefined
+ * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal
+ * representation is placed into the buffer.
+ *
+ * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into
+ * the buffer. If the FC Endpoint Security flag cannot be translated, as it is
+ * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal
+ * representation is placed into the buffer. If more than one FC Endpoint
+ * Security flag was specified, their value in hexadecimal representation is
+ * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH
+ * can be used to define a buffer that is large enough to hold one mnemonic.
+ *
+ * Return: The number of characters written into buf not including the trailing
+ * '\0'. If size is == 0 the function returns 0.
+ */
+ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security,
+ enum zfcp_fsf_print_fmt fmt)
+{
+ const char *prefix = "";
+ ssize_t len = 0;
+ int i;
+
+ if (fc_security == 0)
+ return scnprintf(buf, size, "none");
+ if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1)
+ return scnprintf(buf, size, "0x%08x", fc_security);
+
+ for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) {
+ if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask))
+ continue;
+
+ len += scnprintf(buf + len, size - len, "%s%s", prefix,
+ zfcp_fsf_fc_security_mnemonics[i].name);
+ prefix = ", ";
+ fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask;
+ }
+
+ if (fc_security != 0)
+ len += scnprintf(buf + len, size - len, "%s0x%08x",
+ prefix, fc_security);
+
+ return len;
+}
+
+static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter,
+ struct zfcp_fsf_req *req)
+{
+ if (adapter->fc_security_algorithms ==
+ adapter->fc_security_algorithms_old) {
+ /* no change, no trace */
+ return;
+ }
+
+ zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN,
+ adapter->fc_security_algorithms_old,
+ adapter->fc_security_algorithms);
+
+ adapter->fc_security_algorithms_old = adapter->fc_security_algorithms;
+}
+
static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
@@ -651,10 +766,7 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
- fc_host_permanent_port_name(shost) = bottom->wwpn;
- } else
- fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+ fc_host_permanent_port_name(shost) = bottom->wwpn;
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) =
zfcp_fsf_convert_portspeed(bottom->supported_speed);
@@ -662,6 +774,12 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
FC_FC4_LIST_SIZE);
+ if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
+ adapter->fc_security_algorithms =
+ bottom->fc_security_algorithms;
+ else
+ adapter->fc_security_algorithms = 0;
+ zfcp_fsf_dbf_adapter_fc_security(adapter, req);
}
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -688,9 +806,9 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
zfcp_diag_update_xdata(diag_hdr, bottom, true);
req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
- zfcp_fsf_exchange_port_evaluate(req);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+ zfcp_fsf_exchange_port_evaluate(req);
break;
}
}
@@ -1287,7 +1405,8 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT |
- FSF_FEATURE_REQUEST_SFP_DATA;
+ FSF_FEATURE_REQUEST_SFP_DATA |
+ FSF_FEATURE_FC_SECURITY;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req_id = req->req_id;
@@ -1341,7 +1460,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT |
- FSF_FEATURE_REQUEST_SFP_DATA;
+ FSF_FEATURE_REQUEST_SFP_DATA |
+ FSF_FEATURE_FC_SECURITY;
if (data)
req->data = data;
@@ -1478,10 +1598,117 @@ out_unlock:
return retval;
}
+static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port,
+ struct zfcp_fsf_req *req)
+{
+ char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
+ char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
+
+ if (port->connection_info == port->connection_info_old) {
+ /* no change, no log nor trace */
+ return;
+ }
+
+ zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn,
+ port->connection_info_old,
+ port->connection_info);
+
+ zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old),
+ port->connection_info_old,
+ ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+ zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new),
+ port->connection_info,
+ ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+
+ if (strncmp(mnemonic_old, mnemonic_new,
+ ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) {
+ /* no change in string representation, no log */
+ goto out;
+ }
+
+ if (port->connection_info_old == 0) {
+ /* activation */
+ dev_info(&port->adapter->ccw_device->dev,
+ "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n",
+ port->wwpn, mnemonic_new);
+ } else if (port->connection_info == 0) {
+ /* deactivation */
+ dev_warn(&port->adapter->ccw_device->dev,
+ "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n",
+ port->wwpn, mnemonic_old);
+ } else {
+ /* change */
+ dev_warn(&port->adapter->ccw_device->dev,
+ "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n",
+ port->wwpn, mnemonic_old, mnemonic_new);
+ }
+
+out:
+ port->connection_info_old = port->connection_info;
+}
+
+static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0,
+ u64 wwpn)
+{
+ switch (fsf_sqw0) {
+
+ /*
+ * Open Port command error codes
+ */
+
+ case FSF_SQ_SECURITY_REQUIRED:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n",
+ wwpn);
+ break;
+ case FSF_SQ_SECURITY_TIMEOUT:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n",
+ wwpn);
+ break;
+ case FSF_SQ_SECURITY_KM_UNAVAILABLE:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n",
+ wwpn);
+ break;
+ case FSF_SQ_SECURITY_RKM_UNAVAILABLE:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n",
+ wwpn);
+ break;
+ case FSF_SQ_SECURITY_AUTH_FAILURE:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n",
+ wwpn);
+ break;
+
+ /*
+ * Send FCP command error codes
+ */
+
+ case FSF_SQ_SECURITY_ENC_FAILURE:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n",
+ wwpn);
+ break;
+
+ /*
+ * Unknown error codes
+ */
+
+ default:
+ dev_warn_ratelimited(dev,
+ "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n",
+ fsf_sqw0, wwpn);
+ }
+}
+
static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
{
+ struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port = req->data;
struct fsf_qtcb_header *header = &req->qtcb->header;
+ struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
struct fc_els_flogi *plogi;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -1491,7 +1718,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
case FSF_PORT_ALREADY_OPEN:
break;
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
- dev_warn(&req->adapter->ccw_device->dev,
+ dev_warn(&adapter->ccw_device->dev,
"Not enough FCP adapter resources to open "
"remote port 0x%016Lx\n",
(unsigned long long)port->wwpn);
@@ -1499,6 +1726,12 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
ZFCP_STATUS_COMMON_ERP_FAILED);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
+ case FSF_SECURITY_ERROR:
+ zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
+ header->fsf_status_qual.word[0],
+ port->wwpn);
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
@@ -1512,6 +1745,11 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
break;
case FSF_GOOD:
port->handle = header->port_handle;
+ if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
+ port->connection_info = bottom->connection_info;
+ else
+ port->connection_info = 0;
+ zfcp_fsf_log_port_fc_security(port, req);
atomic_or(ZFCP_STATUS_COMMON_OPEN |
ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
@@ -1531,10 +1769,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
* another GID_PN straight after a port has been opened.
* Alternately, an ADISC/PDISC ELS should suffice, as well.
*/
- plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
- if (req->qtcb->bottom.support.els1_length >=
- FSF_PLOGI_MIN_LEN)
- zfcp_fc_plogi_evaluate(port, plogi);
+ plogi = (struct fc_els_flogi *) bottom->els;
+ if (bottom->els1_length >= FSF_PLOGI_MIN_LEN)
+ zfcp_fc_plogi_evaluate(port, plogi);
break;
case FSF_UNKNOWN_OP_SUBTYPE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -2225,6 +2462,13 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
zfcp_fc_test_link(zfcp_sdev->port);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
+ case FSF_SECURITY_ERROR:
+ zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
+ header->fsf_status_qual.word[0],
+ zfcp_sdev->port->wwpn);
+ zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7");
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
}
}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 4bfb79f20588..09d73d0061ef 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -4,7 +4,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#ifndef FSF_H
@@ -78,6 +78,7 @@
#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081
#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
+#define FSF_SECURITY_ERROR 0x00000090
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
@@ -110,6 +111,14 @@
#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED 0x00004000
#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT 0x00008000
+/* FSF status qualifier, security error */
+#define FSF_SQ_SECURITY_REQUIRED 0x00000001
+#define FSF_SQ_SECURITY_TIMEOUT 0x00000002
+#define FSF_SQ_SECURITY_KM_UNAVAILABLE 0x00000003
+#define FSF_SQ_SECURITY_RKM_UNAVAILABLE 0x00000004
+#define FSF_SQ_SECURITY_AUTH_FAILURE 0x00000005
+#define FSF_SQ_SECURITY_ENC_FAILURE 0x00000010
+
/* payload size in status read buffer */
#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
@@ -165,6 +174,7 @@
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
#define FSF_FEATURE_REQUEST_SFP_DATA 0x00000200
#define FSF_FEATURE_REPORT_SFP_DATA 0x00000800
+#define FSF_FEATURE_FC_SECURITY 0x00001000
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
@@ -174,6 +184,11 @@
/* option */
#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
+/* FC security algorithms */
+#define FSF_FC_SECURITY_AUTH 0x00000001
+#define FSF_FC_SECURITY_ENC_FCSP2 0x00000002
+#define FSF_FC_SECURITY_ENC_ERAS 0x00000004
+
struct fsf_queue_designator {
u8 cssid;
u8 chpid;
@@ -338,7 +353,8 @@ struct fsf_qtcb_bottom_support {
u8 res3[3];
u8 timeout;
u32 lun_access_info;
- u8 res4[180];
+ u32 connection_info;
+ u8 res4[176];
u32 els1_length;
u32 els2_length;
u32 req_buf_length;
@@ -426,7 +442,8 @@ struct fsf_qtcb_bottom_port {
u16 port_tx_type :4;
};
} sfp_flags;
- u8 res3[240];
+ u32 fc_security_algorithms;
+ u8 res3[236];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 3910d529c15a..13d873f806e4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -856,6 +856,10 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
+ .show_host_manufacturer = 1,
+ .show_host_model = 1,
+ .show_host_hardware_version = 1,
+ .show_host_firmware_version = 1,
.get_fc_host_stats = zfcp_scsi_get_fc_host_stats,
.reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats,
.set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo,
@@ -871,5 +875,6 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
+ .show_host_fabric_name = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index a711a0d15100..7ec30ded0169 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -4,7 +4,7 @@
*
* sysfs attributes.
*
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -370,6 +370,42 @@ static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
zfcp_sysfs_adapter_diag_max_age_show,
zfcp_sysfs_adapter_diag_max_age_store);
+static ssize_t zfcp_sysfs_adapter_fc_security_show(
+ struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ unsigned int status;
+ int i;
+
+ if (!adapter)
+ return -ENODEV;
+
+ /*
+ * Adapter status COMMON_OPEN implies xconf data and xport data
+ * was done. Adapter FC Endpoint Security capability remains
+ * unchanged in case of COMMON_ERP_FAILED (e.g. due to local link
+ * down).
+ */
+ status = atomic_read(&adapter->status);
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN))
+ i = sprintf(buf, "unknown\n");
+ else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
+ i = sprintf(buf, "unsupported\n");
+ else {
+ i = zfcp_fsf_scnprint_fc_security(
+ buf, PAGE_SIZE - 1, adapter->fc_security_algorithms,
+ ZFCP_FSF_PRINT_FMT_LIST);
+ i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+ }
+
+ zfcp_ccw_adapter_put(adapter);
+ return i;
+}
+static ZFCP_DEV_ATTR(adapter, fc_security, S_IRUGO,
+ zfcp_sysfs_adapter_fc_security_show,
+ NULL);
+
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
@@ -383,6 +419,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
&dev_attr_adapter_diag_max_age.attr,
+ &dev_attr_adapter_fc_security.attr,
NULL
};
@@ -426,6 +463,36 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
+static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+ struct zfcp_adapter *adapter = port->adapter;
+ unsigned int status = atomic_read(&port->status);
+ int i;
+
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+ 0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) ||
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
+ 0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
+ i = sprintf(buf, "unknown\n");
+ else if (!(adapter->adapter_features & FSF_FEATURE_FC_SECURITY))
+ i = sprintf(buf, "unsupported\n");
+ else {
+ i = zfcp_fsf_scnprint_fc_security(
+ buf, PAGE_SIZE - 1, port->connection_info,
+ ZFCP_FSF_PRINT_FMT_SINGLEITEM);
+ i += scnprintf(buf + i, PAGE_SIZE - i, "\n");
+ }
+
+ return i;
+}
+static ZFCP_DEV_ATTR(port, fc_security, S_IRUGO,
+ zfcp_sysfs_port_fc_security_show,
+ NULL);
+
static struct attribute *zfcp_port_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
@@ -433,6 +500,7 @@ static struct attribute *zfcp_port_attrs[] = {
&dev_attr_port_in_recovery.attr,
&dev_attr_port_status.attr,
&dev_attr_port_access_denied.attr,
+ &dev_attr_port_fc_security.attr,
NULL
};
static struct attribute_group zfcp_port_attr_group = {
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 12d66aa61ede..843e830b5f87 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -37,8 +37,6 @@
#define DRIVER_NAME "envctrl"
#define PFX DRIVER_NAME ": "
-#define ENVCTRL_MINOR 162
-
#define PCF8584_ADDRESS 0x55
#define CONTROL_PIN 0x80
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index e85a05aca4d6..4147d22fd448 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -31,8 +31,6 @@ static struct {
unsigned long busy; /* In use? */
} flash;
-#define FLASH_MINOR 152
-
static int
flash_mmap(struct file *file, struct vm_area_struct *vma)
{
@@ -157,7 +155,7 @@ static const struct file_operations flash_fops = {
.release = flash_release,
};
-static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
+static struct miscdevice flash_dev = { SBUS_FLASH_MINOR, "flash", &flash_fops };
static int flash_probe(struct platform_device *op)
{
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 7173a2e4e8cf..37d252f2548d 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -23,8 +23,6 @@
#include <asm/io.h>
#include <asm/pgtable.h>
-#define UCTRL_MINOR 174
-
#define DEBUG 1
#ifdef DEBUG
#define dprintk(x) printk x
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
index e2956741fbd1..5f65cb75f534 100644
--- a/drivers/scsi/.gitignore
+++ b/drivers/scsi/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
53c700_d.h
scsi_devinfo_tbl.c
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 186259417449..b5b3154e2c28 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3654,7 +3654,7 @@ static bool __init blogic_parse(char **str, char *keyword)
selected host adapter.
The BusLogic Driver Probing Options are described in
- <file:Documentation/scsi/BusLogic.txt>.
+ <file:Documentation/scsi/BusLogic.rst>.
*/
static int __init blogic_parseopts(char *options)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 1b6eaf8da5fa..17feff174f57 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -33,7 +33,7 @@ config SCSI
Channel, and FireWire storage.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called scsi_mod.
However, do not compile this as a module if your root file system
@@ -79,7 +79,7 @@ config BLK_DEV_SD
CD-ROMs.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called sd_mod.
Do not compile this driver as a module if your root file system
@@ -94,11 +94,11 @@ config CHR_DEV_ST
If you want to use a SCSI tape drive under Linux, say Y and read the
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>, and
- <file:Documentation/scsi/st.txt> in the kernel source. This is NOT
+ <file:Documentation/scsi/st.rst> in the kernel source. This is NOT
for SCSI CD-ROMs.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>. The module will be called st.
+ <file:Documentation/scsi/scsi.rst>. The module will be called st.
config BLK_DEV_SR
tristate "SCSI CDROM support"
@@ -112,18 +112,9 @@ config BLK_DEV_SR
Make sure to say Y or M to "ISO 9660 CD-ROM file system support".
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called sr_mod.
-config BLK_DEV_SR_VENDOR
- bool "Enable vendor-specific extensions (for SCSI CDROM)"
- depends on BLK_DEV_SR
- help
- This enables the usage of vendor specific SCSI commands. This is
- required to support multisession CDs with old NEC/TOSHIBA cdrom
- drives (and HP Writers). If you have such a drive and get the first
- session only, try saying Y here; everybody else says N.
-
config CHR_DEV_SG
tristate "SCSI generic support"
depends on SCSI
@@ -142,10 +133,10 @@ config CHR_DEV_SG
quality digital reader of audio CDs (<http://www.xiph.org/paranoia/>).
For other devices, it's possible that you'll have to write the
driver software yourself. Please read the file
- <file:Documentation/scsi/scsi-generic.txt> for more information.
+ <file:Documentation/scsi/scsi-generic.rst> for more information.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>. The module will be called sg.
+ <file:Documentation/scsi/scsi.rst>. The module will be called sg.
If unsure, say N.
@@ -158,12 +149,12 @@ config CHR_DEV_SCH
don't need this for those tiny 6-slot cdrom changers. Media
changers are listed as "Type: Medium Changer" in /proc/scsi/scsi.
If you have such hardware and want to use it with linux, say Y
- here. Check <file:Documentation/scsi/scsi-changer.txt> for details.
+ here. Check <file:Documentation/scsi/scsi-changer.rst> for details.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.rst> and
- <file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
+ <file:Documentation/scsi/scsi.rst>. The module will be called ch.o.
If unsure, say N.
config SCSI_ENCLOSURE
@@ -392,7 +383,7 @@ config SCSI_AHA152X
It is explained in section 3.3 of the SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. You might also want to
- read the file <file:Documentation/scsi/aha152x.txt>.
+ read the file <file:Documentation/scsi/aha152x.rst>.
To compile this driver as a module, choose M here: the
module will be called aha152x.
@@ -430,7 +421,7 @@ config SCSI_AACRAID
help
This driver supports a variety of Dell, HP, Adaptec, IBM and
ICP storage products. For a list of supported products, refer
- to <file:Documentation/scsi/aacraid.txt>.
+ to <file:Documentation/scsi/aacraid.rst>.
To compile this driver as a module, choose M here: the module
will be called aacraid.
@@ -457,7 +448,7 @@ config SCSI_DPT_I2O
help
This driver supports all of Adaptec's I2O based RAID controllers as
well as the DPT SmartRaid V cards. This is an Adaptec maintained
- driver by Deanna Bonds. See <file:Documentation/scsi/dpti.txt>.
+ driver by Deanna Bonds. See <file:Documentation/scsi/dpti.rst>.
To compile this driver as a module, choose M here: the
module will be called dpt_i2o.
@@ -511,8 +502,8 @@ config SCSI_BUSLOGIC
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>, and the files
- <file:Documentation/scsi/BusLogic.txt> and
- <file:Documentation/scsi/FlashPoint.txt> for more information.
+ <file:Documentation/scsi/BusLogic.rst> and
+ <file:Documentation/scsi/FlashPoint.rst> for more information.
Note that support for FlashPoint is only available for 32-bit
x86 configurations.
@@ -613,7 +604,7 @@ config FCOE_FNIC
This is support for the Cisco PCI-Express FCoE HBA.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called fnic.
config SCSI_SNIC
@@ -623,7 +614,7 @@ config SCSI_SNIC
This is support for the Cisco PCI-Express SCSI HBA.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/scsi.txt>.
+ <file:Documentation/scsi/scsi.rst>.
The module will be called snic.
config SCSI_SNIC_DEBUG_FS
@@ -813,7 +804,7 @@ config SCSI_PPA
newer drives)", below.
For more information about this driver and how to use it you should
- read the file <file:Documentation/scsi/ppa.txt>. You should also read
+ read the file <file:Documentation/scsi/ppa.rst>. You should also read
the SCSI-HOWTO, which is available from
<http://www.tldp.org/docs.html#howto>. If you use this driver,
you will still be able to use the parallel port for other tasks,
@@ -840,7 +831,7 @@ config SCSI_IMM
here and Y to "IOMEGA Parallel Port (ppa - older drives)", above.
For more information about this driver and how to use it you should
- read the file <file:Documentation/scsi/ppa.txt>. You should also read
+ read the file <file:Documentation/scsi/ppa.rst>. You should also read
the SCSI-HOWTO, which is available from
<http://www.tldp.org/docs.html#howto>. If you use this driver,
you will still be able to use the parallel port for other tasks,
@@ -930,7 +921,7 @@ config SCSI_SYM53C8XX_2
language. It does not support LSI53C10XX Ultra-320 PCI-X SCSI
controllers; you need to use the Fusion MPT driver for that.
- Please read <file:Documentation/scsi/sym53c8xx_2.txt> for more
+ Please read <file:Documentation/scsi/sym53c8xx_2.rst> for more
information.
config SCSI_SYM53C8XX_DMA_ADDRESSING_MODE
@@ -1127,7 +1118,7 @@ config SCSI_QLOGIC_FAS
SCSI support"), below.
Information about this driver is contained in
- <file:Documentation/scsi/qlogicfas.txt>. You should also read the
+ <file:Documentation/scsi/qlogicfas.rst>. You should also read the
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
@@ -1197,7 +1188,7 @@ config SCSI_DC395x
This driver works, but is still in experimental status. So better
have a bootable disk and a backup in case of emergency.
- Documentation can be found in <file:Documentation/scsi/dc395x.txt>.
+ Documentation can be found in <file:Documentation/scsi/dc395x.rst>.
To compile this driver as a module, choose M here: the
module will be called dc395x.
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 33dbc051bff9..eb72ac8136c3 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -798,6 +798,11 @@ static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
return 0;
}
+static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
+{
+ aac_probe_container_callback1(scsi_cmnd);
+}
+
int aac_probe_container(struct aac_dev *dev, int cid)
{
struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
@@ -810,7 +815,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
return -ENOMEM;
}
scsicmd->list.next = NULL;
- scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
+ scsicmd->scsi_done = aac_probe_container_scsi_done;
scsicmd->device = scsidev;
scsidev->sdev_state = 0;
@@ -2601,9 +2606,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
static void synchronize_callback(void *context, struct fib *fibptr)
{
struct aac_synchronize_reply *synchronizereply;
- struct scsi_cmnd *cmd;
-
- cmd = context;
+ struct scsi_cmnd *cmd = context;
if (!aac_valid_context(cmd, fibptr))
return;
@@ -2644,77 +2647,8 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
int status;
struct fib *cmd_fibcontext;
struct aac_synchronize *synchronizecmd;
- struct scsi_cmnd *cmd;
struct scsi_device *sdev = scsicmd->device;
- int active = 0;
struct aac_dev *aac;
- u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
- (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
- u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
- unsigned long flags;
-
- /*
- * Wait for all outstanding queued commands to complete to this
- * specific target (block).
- */
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_for_each_entry(cmd, &sdev->cmd_list, list)
- if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
- u64 cmnd_lba;
- u32 cmnd_count;
-
- if (cmd->cmnd[0] == WRITE_6) {
- cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
- (cmd->cmnd[2] << 8) |
- cmd->cmnd[3];
- cmnd_count = cmd->cmnd[4];
- if (cmnd_count == 0)
- cmnd_count = 256;
- } else if (cmd->cmnd[0] == WRITE_16) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
- ((u64)cmd->cmnd[3] << 48) |
- ((u64)cmd->cmnd[4] << 40) |
- ((u64)cmd->cmnd[5] << 32) |
- ((u64)cmd->cmnd[6] << 24) |
- (cmd->cmnd[7] << 16) |
- (cmd->cmnd[8] << 8) |
- cmd->cmnd[9];
- cmnd_count = (cmd->cmnd[10] << 24) |
- (cmd->cmnd[11] << 16) |
- (cmd->cmnd[12] << 8) |
- cmd->cmnd[13];
- } else if (cmd->cmnd[0] == WRITE_12) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
- (cmd->cmnd[3] << 16) |
- (cmd->cmnd[4] << 8) |
- cmd->cmnd[5];
- cmnd_count = (cmd->cmnd[6] << 24) |
- (cmd->cmnd[7] << 16) |
- (cmd->cmnd[8] << 8) |
- cmd->cmnd[9];
- } else if (cmd->cmnd[0] == WRITE_10) {
- cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
- (cmd->cmnd[3] << 16) |
- (cmd->cmnd[4] << 8) |
- cmd->cmnd[5];
- cmnd_count = (cmd->cmnd[7] << 8) |
- cmd->cmnd[8];
- } else
- continue;
- if (((cmnd_lba + cmnd_count) < lba) ||
- (count && ((lba + count) < cmnd_lba)))
- continue;
- ++active;
- break;
- }
-
- spin_unlock_irqrestore(&sdev->list_lock, flags);
-
- /*
- * Yield the processor (requeue for later)
- */
- if (active)
- return SCSI_MLQUEUE_DEVICE_BUSY;
aac = (struct aac_dev *)sdev->host->hostdata;
if (aac->in_reset)
@@ -2723,8 +2657,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
/*
* Allocate and initialize a Fib
*/
- if (!(cmd_fibcontext = aac_fib_alloc(aac)))
- return SCSI_MLQUEUE_HOST_BUSY;
+ cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
aac_fib_init(cmd_fibcontext);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index f75878d773cf..355b16f0b145 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -272,36 +272,35 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
+static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data, bool rsvd)
+{
+ int *active = data;
+
+ if (cmd->SCp.phase == AAC_OWNER_FIRMWARE)
+ *active = *active + 1;
+ return true;
+}
static void aac_wait_for_io_completion(struct aac_dev *aac)
{
- unsigned long flagv = 0;
- int i = 0;
+ int i = 0, active;
for (i = 60; i; --i) {
- struct scsi_device *dev;
- struct scsi_cmnd *command;
- int active = 0;
-
- __shost_for_each_device(dev, aac->scsi_host_ptr) {
- spin_lock_irqsave(&dev->list_lock, flagv);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flagv);
- if (active)
- break;
- }
+ active = 0;
+ scsi_host_busy_iter(aac->scsi_host_ptr,
+ wait_for_io_iter, &active);
/*
* We can exit If all the commands are complete
*/
if (active == 0)
break;
+ dev_info(&aac->pdev->dev,
+ "Wait for %d commands to complete\n", active);
ssleep(1);
}
+ if (active)
+ dev_err(&aac->pdev->dev,
+ "%d outstanding commands during shutdown\n", active);
}
/**
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 5a8a999606ea..4725e4c763cf 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -729,7 +729,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
hbacmd->request_id =
cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
- } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
+ } else
return -EINVAL;
@@ -1476,10 +1476,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
int index, quirks;
int retval;
- struct Scsi_Host *host;
- struct scsi_device *dev;
- struct scsi_cmnd *command;
- struct scsi_cmnd *command_list;
+ struct Scsi_Host *host = aac->scsi_host_ptr;
int jafo = 0;
int bled;
u64 dmamask;
@@ -1495,8 +1492,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* - The card is dead, or will be very shortly ;-/ so no new
* commands are completing in the interrupt service.
*/
- host = aac->scsi_host_ptr;
- scsi_block_requests(host);
aac_adapter_disable_int(aac);
if (aac->thread && aac->thread->pid != current->pid) {
spin_unlock_irq(host->host_lock);
@@ -1607,39 +1602,11 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* This is where the assumption that the Adapter is quiesced
* is important.
*/
- command_list = NULL;
- __shost_for_each_device(dev, host) {
- unsigned long flags;
- spin_lock_irqsave(&dev->list_lock, flags);
- list_for_each_entry(command, &dev->cmd_list, list)
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- command->SCp.buffer = (struct scatterlist *)command_list;
- command_list = command;
- }
- spin_unlock_irqrestore(&dev->list_lock, flags);
- }
- while ((command = command_list)) {
- command_list = (struct scsi_cmnd *)command->SCp.buffer;
- command->SCp.buffer = NULL;
- command->result = DID_OK << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_TASK_SET_FULL;
- command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
- command->scsi_done(command);
- }
- /*
- * Any Device that was already marked offline needs to be marked
- * running
- */
- __shost_for_each_device(dev, host) {
- if (!scsi_device_online(dev))
- scsi_device_set_state(dev, SDEV_RUNNING);
- }
- retval = 0;
+ scsi_host_complete_all_commands(host, DID_RESET);
+ retval = 0;
out:
aac->in_reset = 0;
- scsi_unblock_requests(host);
/*
* Issue bus rescan to catch any configuration that might have
@@ -1660,7 +1627,7 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
unsigned long flagv = 0;
int retval;
- struct Scsi_Host * host;
+ struct Scsi_Host *host = aac->scsi_host_ptr;
int bled;
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
@@ -1678,8 +1645,7 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* target (block maximum 60 seconds). Although not necessary,
* it does make us a good storage citizen.
*/
- host = aac->scsi_host_ptr;
- scsi_block_requests(host);
+ scsi_host_block(host);
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
@@ -1690,6 +1656,8 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
retval = _aac_reset_adapter(aac, bled, reset_type);
spin_unlock_irqrestore(host->host_lock, flagv);
+ retval = scsi_host_unblock(host, SDEV_RUNNING);
+
if ((forced < 2) && (retval == -ENODEV)) {
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
struct fib * fibctx = aac_fib_alloc(aac);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0443b74390cf..83a60b0a8cd8 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -623,54 +623,56 @@ static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
return aac_do_ioctl(dev, cmd, arg);
}
-static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+struct fib_count_data {
+ int mlcnt;
+ int llcnt;
+ int ehcnt;
+ int fwcnt;
+ int krlcnt;
+};
+
+static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved)
{
+ struct fib_count_data *fib_count = data;
- unsigned long flags;
- struct scsi_device *sdev = NULL;
+ switch (scmnd->SCp.phase) {
+ case AAC_OWNER_FIRMWARE:
+ fib_count->fwcnt++;
+ break;
+ case AAC_OWNER_ERROR_HANDLER:
+ fib_count->ehcnt++;
+ break;
+ case AAC_OWNER_LOWLEVEL:
+ fib_count->llcnt++;
+ break;
+ case AAC_OWNER_MIDLEVEL:
+ fib_count->mlcnt++;
+ break;
+ default:
+ fib_count->krlcnt++;
+ break;
+ }
+ return true;
+}
+
+/* Called during SCSI EH, so we don't need to block requests */
+static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+{
struct Scsi_Host *shost = aac->scsi_host_ptr;
- struct scsi_cmnd *scmnd = NULL;
struct device *ctrl_dev;
+ struct fib_count_data fcnt = { };
- int mlcnt = 0;
- int llcnt = 0;
- int ehcnt = 0;
- int fwcnt = 0;
- int krlcnt = 0;
-
- __shost_for_each_device(sdev, shost) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_for_each_entry(scmnd, &sdev->cmd_list, list) {
- switch (scmnd->SCp.phase) {
- case AAC_OWNER_FIRMWARE:
- fwcnt++;
- break;
- case AAC_OWNER_ERROR_HANDLER:
- ehcnt++;
- break;
- case AAC_OWNER_LOWLEVEL:
- llcnt++;
- break;
- case AAC_OWNER_MIDLEVEL:
- mlcnt++;
- break;
- default:
- krlcnt++;
- break;
- }
- }
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
+ scsi_host_busy_iter(shost, fib_count_iter, &fcnt);
ctrl_dev = &aac->pdev->dev;
- dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
- dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
- dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
- dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
- dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
+ dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", fcnt.mlcnt);
+ dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", fcnt.llcnt);
+ dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", fcnt.ehcnt);
+ dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fcnt.fwcnt);
+ dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", fcnt.krlcnt);
- return mlcnt + llcnt + ehcnt + fwcnt;
+ return fcnt.mlcnt + fcnt.llcnt + fcnt.ehcnt + fcnt.fwcnt;
}
static int aac_eh_abort(struct scsi_cmnd* cmd)
@@ -732,7 +734,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
(fib_callback) aac_hba_callback,
(void *) cmd);
-
+ if (status != -EINPROGRESS) {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
/* Wait up to 15 secs for completion */
for (count = 0; count < 15; ++count) {
if (cmd->SCp.sent_command) {
@@ -911,11 +917,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
- info->reset_state > 0)
+ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+ !(info->reset_state > 0)))
return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ pr_err("%s: Host device reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
fib = aac_fib_alloc(aac);
@@ -930,7 +936,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
status = aac_hba_send(command, fib,
(fib_callback) aac_tmf_callback,
(void *) info);
-
+ if (status != -EINPROGRESS) {
+ info->reset_state = 0;
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state == 0) {
@@ -969,11 +980,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
- info->reset_state > 0)
+ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
+ !(info->reset_state > 0)))
return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+ pr_err("%s: Host target reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
fib = aac_fib_alloc(aac);
@@ -990,6 +1001,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
(fib_callback) aac_tmf_callback,
(void *) info);
+ if (status != -EINPROGRESS) {
+ info->reset_state = 0;
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return ret;
+ }
+
/* Wait up to 15 seconds for completion */
for (count = 0; count < 15; ++count) {
if (info->reset_state <= 0) {
@@ -1042,7 +1060,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
}
}
- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
+ pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
/*
* Check the health of the controller
@@ -1270,20 +1288,21 @@ static ssize_t aac_show_flags(struct device *cdev,
if (nblank(dprintk(x)))
len = snprintf(buf, PAGE_SIZE, "dprintk\n");
#ifdef AAC_DETAILED_STATUS_INFO
- len += snprintf(buf + len, PAGE_SIZE - len,
- "AAC_DETAILED_STATUS_INFO\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "AAC_DETAILED_STATUS_INFO\n");
#endif
if (dev->raw_io_interface && dev->raw_io_64)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "SAI_READ_CAPACITY_16\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SAI_READ_CAPACITY_16\n");
if (dev->jbod)
- len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_JBOD\n");
if (dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "SUPPORTED_POWER_MANAGEMENT\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_POWER_MANAGEMENT\n");
if (dev->msi)
- len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
return len;
}
@@ -1676,7 +1695,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->irq = pdev->irq;
shost->unique_id = unique_id;
shost->max_cmd_len = 16;
- shost->use_cmd_list = 1;
if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
aac_init_char();
@@ -1895,7 +1913,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- scsi_block_requests(shost);
+ scsi_host_block(shost);
aac_cancel_rescan_worker(aac);
aac_send_shutdown(aac);
@@ -1931,7 +1949,7 @@ static int aac_resume(struct pci_dev *pdev)
* aac_send_shutdown() to block ioctls from upperlayer
*/
aac->adapter_shutdown = 0;
- scsi_unblock_requests(shost);
+ scsi_host_unblock(shost, SDEV_RUNNING);
return 0;
@@ -1946,7 +1964,8 @@ fail_device:
static void aac_shutdown(struct pci_dev *dev)
{
struct Scsi_Host *shost = pci_get_drvdata(dev);
- scsi_block_requests(shost);
+
+ scsi_host_block(shost);
__aac_shutdown((struct aac_dev *)shost->hostdata);
}
@@ -1978,26 +1997,6 @@ static void aac_remove_one(struct pci_dev *pdev)
}
}
-static void aac_flush_ios(struct aac_dev *aac)
-{
- int i;
- struct scsi_cmnd *cmd;
-
- for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
- cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
- if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
- scsi_dma_unmap(cmd);
-
- if (aac->handle_pci_error)
- cmd->result = DID_NO_CONNECT << 16;
- else
- cmd->result = DID_RESET << 16;
-
- cmd->scsi_done(cmd);
- }
- }
-}
-
static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
enum pci_channel_state error)
{
@@ -2012,9 +2011,9 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
case pci_channel_io_frozen:
aac->handle_pci_error = 1;
- scsi_block_requests(aac->scsi_host_ptr);
+ scsi_host_block(shost);
aac_cancel_rescan_worker(aac);
- aac_flush_ios(aac);
+ scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
aac_release_resources(aac);
pci_disable_pcie_error_reporting(pdev);
@@ -2024,7 +2023,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
aac->handle_pci_error = 1;
- aac_flush_ios(aac);
+ scsi_host_complete_all_commands(shost, DID_NO_CONNECT);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -2065,7 +2064,6 @@ fail_device:
static void aac_pci_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct scsi_device *sdev = NULL;
struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
if (aac_adapter_ioremap(aac, aac->base_size)) {
@@ -2092,10 +2090,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
aac->adapter_shutdown = 0;
aac->handle_pci_error = 0;
- shost_for_each_device(sdev, shost)
- if (sdev->sdev_state == SDEV_OFFLINE)
- sdev->sdev_state = SDEV_RUNNING;
- scsi_unblock_requests(aac->scsi_host_ptr);
+ scsi_host_unblock(shost, SDEV_RUNNING);
aac_scan_host(aac);
pci_save_state(pdev);
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index a242a62caaa1..c2c7850ff7b4 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -316,7 +316,7 @@ typedef struct asc_sg_head {
ushort queue_cnt;
ushort entry_to_copy;
ushort res;
- ASC_SG_LIST sg_list[0];
+ ASC_SG_LIST sg_list[];
} ASC_SG_HEAD;
typedef struct asc_scsi_q {
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index eb466c2e1839..90f97df1c42a 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -220,7 +220,7 @@
*
**************************************************************************
- see Documentation/scsi/aha152x.txt for configuration details
+ see Documentation/scsi/aha152x.rst for configuration details
**************************************************************************/
@@ -1249,7 +1249,7 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
"aha152x: unable to verify geometry for disk with >1GB.\n"
" Using default translation. Please verify yourself.\n"
" Perhaps you need to enable extended translation in the driver.\n"
- " See Documentation/scsi/aha152x.txt for details.\n");
+ " See Documentation/scsi/aha152x.rst for details.\n");
}
} else {
info_array[0] = info[0];
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index da4150c17781..5a227c03895f 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -592,7 +592,6 @@ static int aha1740_probe (struct device *dev)
DMA_BIDIRECTIONAL);
if (!host->ecb_dma_addr) {
printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n");
- scsi_host_put (shpnt);
goto err_host_put;
}
diff --git a/drivers/scsi/aic7xxx/.gitignore b/drivers/scsi/aic7xxx/.gitignore
index b8ee24d5748a..9aa780221718 100644
--- a/drivers/scsi/aic7xxx/.gitignore
+++ b/drivers/scsi/aic7xxx/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
aic79xx_reg.h
aic79xx_reg_print.c
aic79xx_seq.h
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 16743fb9eead..d4c50b8fce29 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -32,7 +32,7 @@ config AIC79XX_CMDS_PER_DEVICE
on some devices. The upper bound is 253. 0 disables tagged queueing.
Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See Documentation/scsi/aic79xx.txt for details.
+ "tag_info" option. See Documentation/scsi/aic79xx.rst for details.
config AIC79XX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 3546b8cc401f..9d027549d698 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -37,7 +37,7 @@ config AIC7XXX_CMDS_PER_DEVICE
on some devices. The upper bound is 253. 0 disables tagged queueing.
Per device tag depth can be controlled via the kernel command line
- "tag_info" option. See Documentation/scsi/aic7xxx.txt for details.
+ "tag_info" option. See Documentation/scsi/aic7xxx.rst for details.
config AIC7XXX_RESET_DELAY_MS
int "Initial bus reset delay in milli-seconds"
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 7e5044bf05c0..a336a458c978 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3107,19 +3107,6 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
printerror = 0;
} else if (ahd_sent_msg(ahd, AHDMSG_1B,
MSG_BUS_DEV_RESET, TRUE)) {
-#ifdef __FreeBSD__
- /*
- * Don't mark the user's request for this BDR
- * as completing with CAM_BDR_SENT. CAM3
- * specifies CAM_REQ_CMP.
- */
- if (scb != NULL
- && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
- && ahd_match_scb(ahd, scb, target, 'A',
- CAM_LUN_WILDCARD, SCB_LIST_NULL,
- ROLE_INITIATOR))
- ahd_set_transaction_status(scb, CAM_REQ_CMP);
-#endif
ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
CAM_BDR_SENT, "Bus Device Reset",
/*verbose_level*/0);
@@ -6067,22 +6054,17 @@ ahd_alloc(void *platform_arg, char *name)
{
struct ahd_softc *ahd;
-#ifndef __FreeBSD__
ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC);
if (!ahd) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
-#else
- ahd = device_get_softc((device_t)platform_arg);
-#endif
+
memset(ahd, 0, sizeof(*ahd));
ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
if (ahd->seep_config == NULL) {
-#ifndef __FreeBSD__
kfree(ahd);
-#endif
kfree(name);
return (NULL);
}
@@ -6206,9 +6188,7 @@ ahd_free(struct ahd_softc *ahd)
kfree(ahd->seep_config);
if (ahd->saved_stack != NULL)
kfree(ahd->saved_stack);
-#ifndef __FreeBSD__
kfree(ahd);
-#endif
return;
}
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 259d9c20bf25..57be9609d504 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -41,7 +41,7 @@
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
-** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
*******************************************************************************
*/
#include <linux/module.h>
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index c2c79a37a9ef..30914c8f29cc 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -41,7 +41,7 @@
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
-** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
+** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
*******************************************************************************
*/
#include <linux/module.h>
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index d4febaadfaa3..a2d69b287c7b 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1178,12 +1178,12 @@ beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num,
- (total_cids - avlbl_cids));
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ (total_cids - avlbl_cids));
} else
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num, 0);
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
}
return len;
@@ -1208,12 +1208,12 @@ beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num,
- BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num,
+ BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
else
- len += snprintf(buf+len, PAGE_SIZE - len,
- "ULP%d : %d\n", ulp_num, 0);
+ len += scnprintf(buf+len, PAGE_SIZE - len,
+ "ULP%d : %d\n", ulp_num, 0);
}
return len;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index ed5f4a6ae270..cb74ab1ae5a4 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -44,7 +44,6 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER);
-static DEFINE_MUTEX(ch_mutex);
static int init = 1;
module_param(init, int, 0444);
MODULE_PARM_DESC(init, \
@@ -569,6 +568,7 @@ static void ch_destroy(struct kref *ref)
{
scsi_changer *ch = container_of(ref, scsi_changer, ref);
+ ch->device = NULL;
kfree(ch->dt);
kfree(ch);
}
@@ -590,20 +590,22 @@ ch_open(struct inode *inode, struct file *file)
scsi_changer *ch;
int minor = iminor(inode);
- mutex_lock(&ch_mutex);
spin_lock(&ch_index_lock);
ch = idr_find(&ch_index_idr, minor);
- if (NULL == ch || scsi_device_get(ch->device)) {
+ if (ch == NULL || !kref_get_unless_zero(&ch->ref)) {
spin_unlock(&ch_index_lock);
- mutex_unlock(&ch_mutex);
return -ENXIO;
}
- kref_get(&ch->ref);
spin_unlock(&ch_index_lock);
-
+ if (scsi_device_get(ch->device)) {
+ kref_put(&ch->ref, ch_destroy);
+ return -ENXIO;
+ }
+ /* Synchronize with ch_probe() */
+ mutex_lock(&ch->lock);
file->private_data = ch;
- mutex_unlock(&ch_mutex);
+ mutex_unlock(&ch->lock);
return 0;
}
@@ -938,7 +940,16 @@ static int ch_probe(struct device *dev)
ch->minor = ret;
sprintf(ch->name,"ch%d",ch->minor);
+ ret = scsi_device_get(sd);
+ if (ret) {
+ sdev_printk(KERN_WARNING, sd, "ch%d: failed to get device\n",
+ ch->minor);
+ goto remove_idr;
+ }
+ mutex_init(&ch->lock);
+ kref_init(&ch->ref);
+ ch->device = sd;
class_dev = device_create(ch_sysfs_class, dev,
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
@@ -946,24 +957,27 @@ static int ch_probe(struct device *dev)
sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n",
ch->minor);
ret = PTR_ERR(class_dev);
- goto remove_idr;
+ goto put_device;
}
- mutex_init(&ch->lock);
- kref_init(&ch->ref);
- ch->device = sd;
+ mutex_lock(&ch->lock);
ret = ch_readconfig(ch);
- if (ret)
+ if (ret) {
+ mutex_unlock(&ch->lock);
goto destroy_dev;
+ }
if (init)
ch_init_elem(ch);
+ mutex_unlock(&ch->lock);
dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
return 0;
destroy_dev:
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR, ch->minor));
+put_device:
+ scsi_device_put(sd);
remove_idr:
idr_remove(&ch_index_idr, ch->minor);
free_ch:
@@ -977,9 +991,11 @@ static int ch_remove(struct device *dev)
spin_lock(&ch_index_lock);
idr_remove(&ch_index_idr, ch->minor);
+ dev_set_drvdata(dev, NULL);
spin_unlock(&ch_index_lock);
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
+ scsi_device_put(ch->device);
kref_put(&ch->ref, ch_destroy);
return 0;
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 13fbb2eab842..e95f5b3bef4d 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -61,7 +61,6 @@
#include <asm/io.h>
#include <scsi/scsi.h>
-#include <scsi/scsicam.h> /* needed for scsicam_bios_param */
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
@@ -1053,38 +1052,6 @@ complete:
static DEF_SCSI_QCMD(dc395x_queue_command)
-/*
- * Return the disk geometry for the given SCSI device.
- */
-static int dc395x_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *info)
-{
-#ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
- int heads, sectors, cylinders;
- struct AdapterCtlBlk *acb;
- int size = capacity;
-
- dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
- acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
- heads = 64;
- sectors = 32;
- cylinders = size / (heads * sectors);
-
- if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
- heads = 255;
- sectors = 63;
- cylinders = size / (heads * sectors);
- }
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
- return 0;
-#else
- return scsicam_bios_param(bdev, capacity, info);
-#endif
-}
-
-
static void dump_register_info(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
{
@@ -4622,7 +4589,6 @@ static struct scsi_host_template dc395x_driver_template = {
.show_info = dc395x_show_info,
.name = DC395X_BANNER " " DC395X_VERSION,
.queuecommand = dc395x_queue_command,
- .bios_param = dc395x_bios_param,
.slave_alloc = dc395x_slave_alloc,
.slave_destroy = dc395x_slave_destroy,
.can_queue = DC395x_MAX_CAN_QUEUE,
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index 6bc33f4f020d..25e9251f8c78 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -5,7 +5,7 @@
begin : Thu Sep 7 2000
copyright : (C) 2001 by Adaptec
- See Documentation/scsi/dpti.txt for history, notes, license info
+ See Documentation/scsi/dpti.rst for history, notes, license info
and credits
***************************************************************************/
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index abc74fd474dc..02dff3a684e0 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -8,7 +8,7 @@
July 30, 2001 First version being submitted
for inclusion in the kernel. V2.4
- See Documentation/scsi/dpti.txt for history, notes, license info
+ See Documentation/scsi/dpti.rst for history, notes, license info
and credits
***************************************************************************/
@@ -817,7 +817,7 @@ static int adpt_hba_reset(adpt_hba* pHba)
}
pHba->state &= ~DPTI_STATE_RESET;
- adpt_fail_posted_scbs(pHba);
+ scsi_host_complete_all_commands(pHba->host, DID_RESET);
return 0; /* return success */
}
@@ -2173,7 +2173,7 @@ static irqreturn_t adpt_isr(int irq, void *dev_id)
readl(reply + 12) - 1);
if(cmd != NULL){
scsi_dma_unmap(cmd);
- adpt_i2o_to_scsi(reply, cmd);
+ adpt_i2o_scsi_complete(reply, cmd);
}
}
writel(m, pHba->reply_port);
@@ -2335,13 +2335,12 @@ static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
host->unique_id = (u32)sys_tbl_pa + pHba->unit;
host->sg_tablesize = pHba->sg_tablesize;
host->can_queue = pHba->post_fifo_size;
- host->use_cmd_list = 1;
return 0;
}
-static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
+static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
{
adpt_hba* pHba;
u32 hba_status;
@@ -2459,7 +2458,6 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
if(cmd->scsi_done != NULL){
cmd->scsi_done(cmd);
}
- return cmd->result;
}
@@ -2647,23 +2645,6 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
return 0;
}
-static void adpt_fail_posted_scbs(adpt_hba* pHba)
-{
- struct scsi_cmnd* cmd = NULL;
- struct scsi_device* d = NULL;
-
- shost_for_each_device(d, pHba->host) {
- unsigned long flags;
- spin_lock_irqsave(&d->list_lock, flags);
- list_for_each_entry(cmd, &d->cmd_list, list) {
- cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
- cmd->scsi_done(cmd);
- }
- spin_unlock_irqrestore(&d->list_lock, flags);
- }
-}
-
-
/*============================================================================
* Routines from i2o subsystem
*============================================================================
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 42b1e28b5884..8a079e8d7f65 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -5,7 +5,7 @@
begin : Thu Sep 7 2000
copyright : (C) 2001 by Adaptec
- See Documentation/scsi/dpti.txt for history, notes, license info
+ See Documentation/scsi/dpti.rst for history, notes, license info
and credits
***************************************************************************/
@@ -286,7 +286,7 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba);
static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
-static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
+static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd);
static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
static s32 adpt_hba_reset(adpt_hba* pHba);
static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
@@ -295,7 +295,6 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
static void adpt_i2o_delete_hba(adpt_hba* pHba);
static void adpt_inquiry(adpt_hba* pHba);
-static void adpt_fail_posted_scbs(adpt_hba* pHba);
static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
static int adpt_i2o_online_hba(adpt_hba* pHba);
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index a0d01aea28f7..9d52d83161ed 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -138,7 +138,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
* Dump trace buffer entry to memory file
* and increment read index @rd_idx
*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
@@ -180,7 +180,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
* Dump trace buffer entry to memory file
* and increment read index @rd_idx
*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
@@ -220,12 +220,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
struct timespec64 val1, val2;
ktime_get_real_ts64(&val1);
- len = snprintf(debug->debug_buffer + len, buf_size - len,
+ len = scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Current time : [%lld:%ld]\n"
"Last stats reset time: [%lld:%09ld]\n"
"Last stats read time: [%lld:%ld]\n"
@@ -243,11 +243,11 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
stats->stats_timestamps.last_read_time = val1;
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tIO Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
"Number of IOs: %lld\nNumber of IO Completions: %lld\n"
"Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
@@ -280,16 +280,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec),
(u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\nCurrent Max IO time : %lld\n",
(u64)atomic64_read(&stats->io_stats.current_max_io_time));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tAbort Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Aborts: %lld\n"
"Number of Abort Failures: %lld\n"
"Number of Abort Driver Timeouts: %lld\n"
@@ -318,12 +318,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tTerminate Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Terminates: %lld\n"
"Maximum Terminates: %lld\n"
"Number of Terminate Driver Timeouts: %lld\n"
@@ -337,12 +337,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
(u64)atomic64_read(&stats->term_stats.terminate_failures));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tReset Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Device Resets: %lld\n"
"Number of Device Reset Failures: %lld\n"
"Number of Device Reset Aborts: %lld\n"
@@ -368,12 +368,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
&stats->reset_stats.fnic_reset_completions),
(u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tFirmware Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active FW Requests %lld\n"
"Maximum FW Requests: %lld\n"
"Number of FW out of resources: %lld\n"
@@ -383,12 +383,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
(u64)atomic64_read(&stats->fw_stats.io_fw_errs));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tVlan Discovery Statistics\n"
"------------------------------------------\n");
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Vlan Discovery Requests Sent %lld\n"
"Vlan Response Received with no FCF VLAN ID: %lld\n"
"No solicitations recvd after vlan set, expiry count: %lld\n"
@@ -398,7 +398,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
(u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
@@ -406,7 +406,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Last ISR time: %llu (%8llu.%09lu)\n"
"Last ACK time: %llu (%8llu.%09lu)\n"
"Max ISR jiffies: %llu\n"
@@ -452,7 +452,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
(u64)atomic64_read(&stats->misc_stats.frame_errors));
- len += snprintf(debug->debug_buffer + len, buf_size - len,
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Firmware reported port speed: %llu\n",
(u64)atomic64_read(
&stats->misc_stats.current_port_speed));
@@ -742,7 +742,7 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
rd_idx = fc_trace_entries.rd_idx;
wr_idx = fc_trace_entries.wr_idx;
if (rdata_flag == 0) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
"Time Stamp (UTC)\t\t"
"Host No: F Type: len: FCoE_FRAME:\n");
@@ -762,11 +762,11 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
} else {
fc_trace = (char *)tdata;
for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3)
- len, "%02x", fc_trace[j] & 0xff);
} /* for loop */
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
"\n");
}
@@ -810,7 +810,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len,
fmt,
tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
@@ -823,25 +823,25 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
for (j = 0; j < min_t(u8, tdata->frame_len,
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
if (tdata->frame_type == FNIC_FC_LE) {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "%c", fc_trace[j]);
} else {
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "%02x", fc_trace[j] & 0xff);
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, " ");
if (j == ethhdr_len ||
j == ethhdr_len + fcoehdr_len ||
j == ethhdr_len + fcoehdr_len + fchdr_len ||
(i > 3 && j%fchdr_len == 0)) {
- len += snprintf(fnic_dbgfs_prt->buffer
+ len += scnprintf(fnic_dbgfs_prt->buffer
+ len, max_size - len,
"\n\t\t\t\t\t\t\t\t");
i++;
}
} /* end of else*/
} /* End of for loop*/
- len += snprintf(fnic_dbgfs_prt->buffer + len,
+ len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "\n");
*orig_len = len;
}
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index c5dde556dc7c..c20d30e36dfc 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -442,7 +442,7 @@ struct vnic_devcmd_notify {
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
- u8 data[0];
+ u8 data[];
};
/*
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 2ab774e62e40..2cc676e3df6a 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -20,7 +20,7 @@
* Added ISAPNP support for DTC436 adapters,
* Thomas Sailer, sailer@ife.ee.ethz.ch
*
- * See Documentation/scsi/g_NCR5380.txt for more info.
+ * See Documentation/scsi/g_NCR5380.rst for more info.
*/
#include <asm/io.h>
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 381d849726ac..c764312f9ba0 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -193,7 +193,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
for (i = 1; i < MAX_RES_ARGS; i++) {
if (reserve_list[i] == 0xff)
break;
- hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
+ hlen += scnprintf(hrec + hlen, 161 - hlen, ",%d", reserve_list[i]);
}
}
seq_printf(m,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a2debe0c8185..374885aa8d77 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2938,6 +2938,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
{
u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ *
HISI_SAS_IOST_ITCT_CACHE_NUM;
+ struct device *dev = hisi_hba->dev;
u32 *buf = cache;
u32 i, val;
@@ -2950,7 +2951,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
}
if (val != 0xffffffff) {
- pr_err("Issue occur when reading IOST/ITCT cache!\n");
+ dev_err(dev, "Issue occurred in reading IOST/ITCT cache!\n");
return;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 1d669e47b692..7ec91c3a66ca 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -650,3 +650,68 @@ void scsi_flush_work(struct Scsi_Host *shost)
flush_workqueue(shost->work_q);
}
EXPORT_SYMBOL_GPL(scsi_flush_work);
+
+static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
+{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ int status = *(int *)data;
+
+ scsi_dma_unmap(scmd);
+ scmd->result = status << 16;
+ scmd->scsi_done(scmd);
+ return true;
+}
+
+/**
+ * scsi_host_complete_all_commands - Terminate all running commands
+ * @shost: Scsi Host on which commands should be terminated
+ * @status: Status to be set for the terminated commands
+ *
+ * There is no protection against modification of the number
+ * of outstanding commands. It is the responsibility of the
+ * caller to ensure that concurrent I/O submission and/or
+ * completion is stopped when calling this function.
+ */
+void scsi_host_complete_all_commands(struct Scsi_Host *shost, int status)
+{
+ blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
+ &status);
+}
+EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
+
+struct scsi_host_busy_iter_data {
+ bool (*fn)(struct scsi_cmnd *, void *, bool);
+ void *priv;
+};
+
+static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
+ bool reserved)
+{
+ struct scsi_host_busy_iter_data *iter_data = priv;
+ struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
+
+ return iter_data->fn(sc, iter_data->priv, reserved);
+}
+
+/**
+ * scsi_host_busy_iter - Iterate over all busy commands
+ * @shost: Pointer to Scsi_Host.
+ * @fn: Function to call on each busy command
+ * @priv: Data pointer passed to @fn
+ *
+ * If locking against concurrent command completions is required
+ * ithas to be provided by the caller
+ **/
+void scsi_host_busy_iter(struct Scsi_Host *shost,
+ bool (*fn)(struct scsi_cmnd *, void *, bool),
+ void *priv)
+{
+ struct scsi_host_busy_iter_data iter_data = {
+ .fn = fn,
+ .priv = priv,
+ };
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, __scsi_host_busy_iter_fn,
+ &iter_data);
+}
+EXPORT_SYMBOL_GPL(scsi_host_busy_iter);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 1a4ddfacb458..1e9302e99d05 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -504,6 +504,12 @@ static ssize_t host_store_rescan(struct device *dev,
return count;
}
+static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
+{
+ device->offload_enabled = 0;
+ device->offload_to_be_enabled = 0;
+}
+
static ssize_t host_show_firmware_revision(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1738,8 +1744,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
__func__,
h->scsi_host->host_no, logical_drive->bus,
logical_drive->target, logical_drive->lun);
- logical_drive->offload_enabled = 0;
- logical_drive->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(logical_drive);
logical_drive->queue_depth = 8;
}
}
@@ -2499,8 +2504,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
IOACCEL2_SERV_RESPONSE_FAILURE) {
if (c2->error_data.status ==
IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
- dev->offload_enabled = 0;
- dev->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(dev);
}
if (dev->in_reset) {
@@ -3670,10 +3674,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
this_device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
if (this_device->offload_config) {
- this_device->offload_to_be_enabled =
+ bool offload_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
- if (hpsa_get_raid_map(h, scsi3addr, this_device))
- this_device->offload_to_be_enabled = 0;
+ /*
+ * Check to see if offload can be enabled.
+ */
+ if (offload_enabled) {
+ rc = hpsa_get_raid_map(h, scsi3addr, this_device);
+ if (rc) /* could not load raid_map */
+ goto out;
+ this_device->offload_to_be_enabled = 1;
+ }
}
out:
@@ -3996,8 +4007,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
} else {
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
- this_device->offload_enabled = 0;
- this_device->offload_to_be_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(this_device);
this_device->hba_ioaccel_enabled = 0;
this_device->volume_offline = 0;
this_device->queue_depth = h->nr_cmds;
@@ -5230,8 +5240,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
/* Handles load balance across RAID 1 members.
* (2-drive R1 and R10 with even # of drives.)
* Appropriate for SSDs, not optimal for HDDs
+ * Ensure we have the correct raid_map.
*/
- BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
+ if (le16_to_cpu(map->layout_map_count) != 2) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
if (dev->offload_to_mirror)
map_index += le16_to_cpu(map->data_disks_per_row);
dev->offload_to_mirror = !dev->offload_to_mirror;
@@ -5239,8 +5253,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
case HPSA_RAID_ADM:
/* Handles N-way mirrors (R1-ADM)
* and R10 with # of drives divisible by 3.)
+ * Ensure we have the correct raid_map.
*/
- BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
+ if (le16_to_cpu(map->layout_map_count) != 3) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
offload_to_mirror = dev->offload_to_mirror;
raid_map_helper(map, offload_to_mirror,
@@ -5265,7 +5283,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
r5or6_blocks_per_row =
le16_to_cpu(map->strip_size) *
le16_to_cpu(map->data_disks_per_row);
- BUG_ON(r5or6_blocks_per_row == 0);
+ if (r5or6_blocks_per_row == 0) {
+ hpsa_turn_off_ioaccel_for_device(dev);
+ return IO_ACCEL_INELIGIBLE;
+ }
stripesize = r5or6_blocks_per_row *
le16_to_cpu(map->layout_map_count);
#if BITS_PER_LONG == 32
@@ -8285,7 +8306,7 @@ static int detect_controller_lockup(struct ctlr_info *h)
*
* Called from monitor controller worker (hpsa_event_monitor_worker)
*
- * A Volume (or Volumes that comprise an Array set may be undergoing a
+ * A Volume (or Volumes that comprise an Array set) may be undergoing a
* transformation, so we will be turning off ioaccel for all volumes that
* make up the Array.
*/
@@ -8308,6 +8329,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
* Run through current device list used during I/O requests.
*/
for (i = 0; i < h->ndevices; i++) {
+ int offload_to_be_enabled = 0;
+ int offload_config = 0;
+
device = h->dev[i];
if (!device)
@@ -8325,25 +8349,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
continue;
ioaccel_status = buf[IOACCEL_STATUS_BYTE];
- device->offload_config =
+
+ /*
+ * Check if offload is still configured on
+ */
+ offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
- if (device->offload_config)
- device->offload_to_be_enabled =
+ /*
+ * If offload is configured on, check to see if ioaccel
+ * needs to be enabled.
+ */
+ if (offload_config)
+ offload_to_be_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
/*
+ * If ioaccel is to be re-enabled, re-enable later during the
+ * scan operation so the driver can get a fresh raidmap
+ * before turning ioaccel back on.
+ */
+ if (offload_to_be_enabled)
+ continue;
+
+ /*
* Immediately turn off ioaccel for any volume the
* controller tells us to. Some of the reasons could be:
* transformation - change to the LVs of an Array.
* degraded volume - component failure
- *
- * If ioaccel is to be re-enabled, re-enable later during the
- * scan operation so the driver can get a fresh raidmap
- * before turning ioaccel back on.
- *
*/
- if (!device->offload_to_be_enabled)
- device->offload_enabled = 0;
+ hpsa_turn_off_ioaccel_for_device(device);
}
kfree(buf);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index df897df5cafe..7da9e060b270 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -133,6 +133,7 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
+static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
static const char *unknown_error = "unknown error";
@@ -413,22 +414,44 @@ static const char *ibmvfc_get_fc_type(u16 status)
* @tgt: ibmvfc target struct
* @action: action to perform
*
+ * Returns:
+ * 0 if action changed / non-zero if not changed
**/
-static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
+static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
enum ibmvfc_target_action action)
{
+ int rc = -EINVAL;
+
switch (tgt->action) {
+ case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
+ action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
+ if (action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
case IBMVFC_TGT_ACTION_DEL_RPORT:
- if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
+ if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
tgt->action = action;
+ rc = 0;
+ }
case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
- if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
+ if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
tgt->add_rport = 0;
tgt->action = action;
+ rc = 0;
break;
}
+
+ return rc;
}
/**
@@ -537,6 +560,19 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_del_tgt - Schedule cleanup and removal of the target
+ * @tgt: ibmvfc target struct
+ * @job_step: job step to perform
+ *
+ **/
+static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
+{
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
+ tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
+ wake_up(&tgt->vhost->work_wait_q);
+}
+
+/**
* ibmvfc_link_down - Handle a link down event from the adapter
* @vhost: ibmvfc host struct
* @state: ibmvfc host state to enter
@@ -550,7 +586,7 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
ENTER;
scsi_block_requests(vhost->host);
list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
ibmvfc_set_host_state(vhost, state);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
@@ -583,7 +619,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
vhost->async_crq.cur = 0;
list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@@ -1500,7 +1536,7 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
list_for_each_entry(tgt, &vhost->targets, queue) {
if (rport == tgt->rport) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
}
}
@@ -2686,7 +2722,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
tgt->logo_rcvd = 1;
if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
ibmvfc_reinit_host(vhost);
}
}
@@ -3220,8 +3256,8 @@ static void ibmvfc_tasklet(void *data)
static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
- tgt->job_step = job_step;
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
+ tgt->job_step = job_step;
wake_up(&tgt->vhost->work_wait_q);
}
@@ -3237,7 +3273,7 @@ static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
wake_up(&tgt->vhost->work_wait_q);
return 0;
} else
@@ -3312,13 +3348,13 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
tgt->add_rport = 1;
} else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
} else if (prli_rsp[index].retry)
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
} else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -3335,7 +3371,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3434,7 +3470,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3515,33 +3551,28 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
break;
}
- if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
- else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
- tgt->scsi_id != tgt->new_scsi_id)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
kref_put(&tgt->kref, ibmvfc_release_tgt);
wake_up(&vhost->work_wait_q);
}
/**
- * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
* @tgt: ibmvfc target struct
*
+ * Returns:
+ * Allocated and initialized ibmvfc_event struct
**/
-static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
+ void (*done) (struct ibmvfc_event *))
{
struct ibmvfc_implicit_logout *mad;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
- if (vhost->discovery_threads >= disc_threads)
- return;
-
kref_get(&tgt->kref);
evt = ibmvfc_get_event(vhost);
- vhost->discovery_threads++;
- ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
+ ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
mad = &evt->iu.implicit_logout;
memset(mad, 0, sizeof(*mad));
@@ -3549,6 +3580,25 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
mad->common.length = cpu_to_be16(sizeof(*mad));
mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+ return evt;
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_done);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
@@ -3560,6 +3610,53 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
}
/**
+ * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
+ u32 status = be16_to_cpu(mad->common.status);
+
+ vhost->discovery_threads--;
+ ibmvfc_free_event(evt);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_and_del_done);
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Implicit Logout\n");
+}
+
+/**
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
* @mad: ibmvfc passthru mad struct
* @tgt: ibmvfc target struct
@@ -3600,13 +3697,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "ADISC succeeded\n");
if (ibmvfc_adisc_needs_plogi(mad, tgt))
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_FAILED:
default:
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3799,9 +3896,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Query Target succeeded\n");
- tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id);
if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ ibmvfc_del_tgt(tgt);
else
ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
break;
@@ -3815,11 +3911,11 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
@@ -3896,7 +3992,6 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
memset(tgt, 0, sizeof(*tgt));
tgt->scsi_id = scsi_id;
- tgt->new_scsi_id = scsi_id;
tgt->vhost = vhost;
tgt->need_login = 1;
tgt->cancel_key = vhost->task_set++;
@@ -4189,6 +4284,25 @@ static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_dev_logo_to_do - Is there target logout work to do?
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
+ tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
+ return 1;
+ }
+ return 0;
+}
+
+/**
* __ibmvfc_work_to_do - Is there task level work to do? (no locking)
* @vhost: ibmvfc host struct
*
@@ -4217,11 +4331,20 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
return 0;
return 1;
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ if (vhost->discovery_threads == disc_threads)
+ return 0;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
+ return 1;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
+ return 0;
+ return 1;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
- case IBMVFC_HOST_ACTION_TGT_DEL:
- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
@@ -4391,6 +4514,18 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
+ tgt->job_step(tgt);
+ break;
+ }
+ }
+
+ if (ibmvfc_dev_logo_to_do(vhost)) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
+ }
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
rport = tgt->rport;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 7da89f4d26b2..907889f1fa9d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -596,6 +596,8 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_NONE = 0,
IBMVFC_TGT_ACTION_INIT,
IBMVFC_TGT_ACTION_INIT_WAIT,
+ IBMVFC_TGT_ACTION_LOGOUT_RPORT,
+ IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT,
IBMVFC_TGT_ACTION_DEL_RPORT,
IBMVFC_TGT_ACTION_DELETED_RPORT,
};
@@ -604,7 +606,6 @@ struct ibmvfc_target {
struct list_head queue;
struct ibmvfc_host *vhost;
u64 scsi_id;
- u64 new_scsi_id;
struct fc_rport *rport;
int target_id;
enum ibmvfc_target_action action;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cd8db1349871..d48a8fa997b9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1299,9 +1299,9 @@ static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
char *p = buffer;
*p = '\0';
- p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
+ p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
- p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
+ p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
return buffer;
}
@@ -1322,7 +1322,7 @@ static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
char *p = buffer;
*p = '\0';
- p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+ p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
__ipr_format_res_path(res_path, p, len - (buffer - p));
return buffer;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b97aa9ac2ffe..9a0d3d729320 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -451,12 +451,12 @@ struct ipr_config_table_hdr64 {
struct ipr_config_table {
struct ipr_config_table_hdr hdr;
- struct ipr_config_table_entry dev[0];
+ struct ipr_config_table_entry dev[];
}__attribute__((packed, aligned (4)));
struct ipr_config_table64 {
struct ipr_config_table_hdr64 hdr64;
- struct ipr_config_table_entry64 dev[0];
+ struct ipr_config_table_entry64 dev[];
}__attribute__((packed, aligned (8)));
struct ipr_config_table_entry_wrapper {
@@ -792,7 +792,7 @@ struct ipr_mode_page28 {
struct ipr_mode_page_hdr hdr;
u8 num_entries;
u8 entry_length;
- struct ipr_dev_bus_entry bus[0];
+ struct ipr_dev_bus_entry bus[];
}__attribute__((packed));
struct ipr_mode_page24 {
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
index dc26b4aea99e..15d8f3631ab7 100644
--- a/drivers/scsi/isci/sas.h
+++ b/drivers/scsi/isci/sas.h
@@ -201,7 +201,7 @@ struct smp_req {
u8 func; /* byte 1 */
u8 alloc_resp_len; /* byte 2 */
u8 req_len; /* byte 3 */
- u8 req_data[0];
+ u8 req_data[];
} __packed;
/*
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 70b99c0e2e67..874dd4beed10 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2771,7 +2771,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
"must be a power of 2.\n", total_cmds);
total_cmds = rounddown_pow_of_two(total_cmds);
if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
- return NULL;
+ goto dec_session_count;
printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
total_cmds);
}
@@ -3153,13 +3153,18 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
switch (flag) {
case STOP_CONN_RECOVER:
+ cls_conn->state = ISCSI_CONN_FAILED;
+ break;
case STOP_CONN_TERM:
- iscsi_start_session_recovery(session, conn, flag);
+ cls_conn->state = ISCSI_CONN_DOWN;
break;
default:
iscsi_conn_printk(KERN_ERR, conn,
"invalid stop flag %d\n", flag);
+ return;
}
+
+ iscsi_start_session_recovery(session, conn, flag);
}
EXPORT_SYMBOL_GPL(iscsi_conn_stop);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 04d73e2be373..357fdec06bae 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -262,7 +262,6 @@ struct lpfc_stats {
uint32_t elsRcvPRLI;
uint32_t elsRcvLIRR;
uint32_t elsRcvRLS;
- uint32_t elsRcvRPS;
uint32_t elsRcvRPL;
uint32_t elsRcvRRQ;
uint32_t elsRcvRTV;
@@ -749,6 +748,7 @@ struct lpfc_hba {
* capability
*/
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
+#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -1353,3 +1353,32 @@ lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq,
writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr);
eq->q_mode = delay;
}
+
+
+/*
+ * Macro that declares tables and a routine to perform enum type to
+ * ascii string lookup.
+ *
+ * Defines a <key,value> table for an enum. Uses xxx_INIT defines for
+ * the enum to populate the table. Macro defines a routine (named
+ * by caller) that will search all elements of the table for the key
+ * and return the name string if found or "Unrecognized" if not found.
+ */
+#define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init) \
+static struct { \
+ enum enum_name value; \
+ char *name; \
+} fc_##enum_name##_e2str_names[] = enum_init; \
+static const char *routine(enum enum_name table_key) \
+{ \
+ int i; \
+ char *name = "Unrecognized"; \
+ \
+ for (i = 0; i < ARRAY_SIZE(fc_##enum_name##_e2str_names); i++) {\
+ if (fc_##enum_name##_e2str_names[i].value == table_key) {\
+ name = fc_##enum_name##_e2str_names[i].name; \
+ break; \
+ } \
+ } \
+ return name; \
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 46f56f30f77e..be3b0ccbac78 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -3869,9 +3869,6 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
# commands per FCP LUN. Value range is [1,512]. Default value is 30.
-# If this parameter value is greater than 1/8th the maximum number of exchanges
-# supported by the HBA port, then the lun queue depth will be reduced to
-# 1/8th the maximum number of exchanges.
*/
LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
"Max number of FCP commands we can queue to a specific LUN");
@@ -4783,7 +4780,7 @@ static DEVICE_ATTR_RW(lpfc_aer_support);
* Description:
* If the @buf contains 1 and the device currently has the AER support
* enabled, then invokes the kernel AER helper routine
- * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
+ * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
* error status register.
*
* Notes:
@@ -4809,7 +4806,7 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (phba->hba_flag & HBA_AER_ENABLED)
- rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
+ rc = pci_aer_clear_nonfatal_status(phba->pcidev);
if (rc == 0)
return strlen(buf);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 25d3dd39bc05..a450477a7e00 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -140,9 +140,10 @@ int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
-int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry);
int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
+int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 58b35a1442c1..2aa578d20f8c 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2073,8 +2073,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -2090,8 +2090,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
/* This string MUST be consistent with other FC platforms
* supported by Broadcom.
@@ -2115,8 +2115,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->SerialNumber,
sizeof(ae->un.AttrString));
@@ -2137,8 +2137,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelName,
sizeof(ae->un.AttrString));
@@ -2158,8 +2158,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelDesc,
sizeof(ae->un.AttrString));
@@ -2181,8 +2181,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t i, j, incr, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
/* Convert JEDEC ID to ascii for hardware version */
incr = vp->rev.biuRev;
@@ -2211,8 +2211,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, lpfc_release_version,
sizeof(ae->un.AttrString));
@@ -2233,8 +2233,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
@@ -2258,8 +2258,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
len = strnlen(ae->un.AttrString,
@@ -2278,8 +2278,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
init_utsname()->sysname,
@@ -2301,7 +2301,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
size = FOURBYTES + sizeof(uint32_t);
@@ -2317,8 +2317,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
len = lpfc_vport_symbolic_node_name(vport,
ae->un.AttrString, 256);
@@ -2336,7 +2336,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Nothing is defined for this currently */
ae->un.AttrInt = cpu_to_be32(0);
@@ -2353,7 +2353,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Each driver instance corresponds to a single port */
ae->un.AttrInt = cpu_to_be32(1);
@@ -2370,8 +2370,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
sizeof(struct lpfc_name));
@@ -2389,8 +2389,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strlcat(ae->un.AttrString, phba->BIOSVersion,
sizeof(ae->un.AttrString));
@@ -2410,7 +2410,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Driver doesn't have access to this information */
ae->un.AttrInt = cpu_to_be32(0);
@@ -2427,8 +2427,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "EMULEX",
sizeof(ae->un.AttrString));
@@ -2450,10 +2450,9 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 32);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
- ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
@@ -2476,7 +2475,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -2530,7 +2529,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
@@ -2600,7 +2599,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
hsp = (struct serv_parm *)&vport->fc_sparam;
ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
@@ -2620,8 +2619,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
"/sys/class/scsi_host/host%d", shost->host_no);
@@ -2641,8 +2640,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
vport->phba->os_host_name);
@@ -2662,8 +2661,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -2680,8 +2679,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -2698,8 +2697,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
len += (len & 3) ? (4 - (len & 3)) : 4;
@@ -2717,7 +2716,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
else
@@ -2735,7 +2734,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2750,8 +2749,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, sizeof(struct lpfc_name));
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
sizeof(struct lpfc_name));
@@ -2768,10 +2767,9 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 32);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
- ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
@@ -2792,7 +2790,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* Link Up - operational */
ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
size = FOURBYTES + sizeof(uint32_t);
@@ -2808,7 +2806,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
vport->fdmi_num_disc = lpfc_find_map_node(vport);
ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
size = FOURBYTES + sizeof(uint32_t);
@@ -2824,7 +2822,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2839,8 +2837,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "Smart SAN Initiator",
sizeof(ae->un.AttrString));
@@ -2860,8 +2858,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
@@ -2881,8 +2879,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
sizeof(ae->un.AttrString));
@@ -2903,8 +2901,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
+ ae = &ad->AttrValue;
+ memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelName,
sizeof(ae->un.AttrString));
@@ -2923,7 +2921,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
/* SRIOV (type 3) is not supported */
if (vport->vpi)
@@ -2943,7 +2941,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(0);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -2958,7 +2956,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
struct lpfc_fdmi_attr_entry *ae;
uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+ ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(1);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
@@ -3106,7 +3104,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Registered Port List */
/* One entry (port) per adapter */
rh->rpl.EntryCnt = cpu_to_be32(1);
- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName,
+ memcpy(&rh->rpl.pe.PortName,
+ &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
/* point to the HBA attribute block */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 42a2bf38eaea..80d1e661b0d4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -3008,10 +3008,9 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* This routine is a generic completion callback function for ELS commands.
* Specifically, it is the callback function which does not need to perform
* any command specific operations. It is currently used by the ELS command
- * issuing routines for the ELS State Change Request (SCR),
- * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
- * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
- * certain debug loggings, this callback function simply invokes the
+ * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
+ * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
+ * Other than certain debug loggings, this callback function simply invokes the
* lpfc_els_chk_latt() routine to check whether link went down during the
* discovery process.
**/
@@ -3025,14 +3024,117 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ELS cmd cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
+ irsp->ulpIoTag, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout);
+
+ /* Check to see if link went down during discovery */
+ lpfc_els_chk_latt(vport);
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is a generic completion callback function for Discovery ELS cmd.
+ * Currently used by the ELS command issuing routines for the ELS State Change
+ * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
+ * These commands will be retried once only for ELS timeout errors.
+ **/
+static void
+lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+ struct lpfc_els_rdf_rsp *prdf;
+ struct lpfc_dmabuf *pcmd, *prsp;
+ u32 *pdata;
+ u32 cmd;
+
+ irsp = &rspiocb->iocb;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ELS cmd cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->un.elsreq64.remoteID);
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
+ "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
+ "x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ irsp->un.ulpWord[4], irsp->ulpTimeout,
+ cmdiocb->retry);
+
+ pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ if (!pcmd)
+ goto out;
+
+ pdata = (u32 *)pcmd->virt;
+ if (!pdata)
+ goto out;
+ cmd = *pdata;
+
+ /* Only 1 retry for ELS Timeout only */
+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_SEQUENCE_TIMEOUT)) {
+ cmdiocb->retry++;
+ if (cmdiocb->retry <= 1) {
+ switch (cmd) {
+ case ELS_CMD_SCR:
+ lpfc_issue_els_scr(vport, cmdiocb->retry);
+ break;
+ case ELS_CMD_RDF:
+ cmdiocb->context1 = NULL; /* save ndlp refcnt */
+ lpfc_issue_els_rdf(vport, cmdiocb->retry);
+ break;
+ }
+ goto out;
+ }
+ phba->fc_stat.elsRetryExceeded++;
+ }
+ if (irsp->ulpStatus) {
+ /* ELS discovery cmd completes with error */
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "4203 ELS cmd x%x error: x%x x%X\n", cmd,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto out;
+ }
+
+ /* The RDF response doesn't have any impact on the running driver
+ * but the notification descriptors are dumped here for support.
+ */
+ if (cmd == ELS_CMD_RDF) {
+ int i;
+
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
+ if (!prdf)
+ goto out;
+
+ for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
+ i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "4677 Fabric RDF Notification Grant Data: "
+ "0x%08x\n",
+ be32_to_cpu(
+ prdf->reg_d1.desc_tags[i]));
+ }
+
+out:
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
lpfc_els_free_iocb(phba, cmdiocb);
@@ -3042,11 +3144,10 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
* lpfc_issue_els_scr - Issue a scr to an node on a vport
* @vport: pointer to a host virtual N_Port data structure.
- * @nportid: N_Port identifier to the remote node.
- * @retry: number of retries to the command IOCB.
+ * @retry: retry counter for the command IOCB.
*
* This routine issues a State Change Request (SCR) to a fabric node
- * on a @vport. The remote node @nportid is passed into the function. It
+ * on a @vport. The remote node is Fabric Controller (0xfffffd). It
* first search the @vport node list to find the matching ndlp. If no such
* ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
* IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
@@ -3062,7 +3163,7 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* 1 - Failed to issue scr command
**/
int
-lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
@@ -3072,9 +3173,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
- ndlp = lpfc_findnode_did(vport, nportid);
+ ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
if (!ndlp) {
- ndlp = lpfc_nlp_init(vport, nportid);
+ ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
if (!ndlp)
return 1;
lpfc_enqueue_node(vport, ndlp);
@@ -3109,7 +3210,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitSCR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
/* The additional lpfc_nlp_put will cause the following
@@ -3339,6 +3440,102 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of the node.
*/
+ /* Don't release reference count as RDF is likely outstanding */
+ return 0;
+}
+
+/**
+ * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @retry: retry counter for the command IOCB.
+ *
+ * This routine issues an ELS RDF to the Fabric Controller to register
+ * for diagnostic functions.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RDF ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued rdf command
+ * 1 - Failed to issue rdf command
+ **/
+int
+lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_els_rdf_req *prdf;
+ struct lpfc_nodelist *ndlp;
+ uint16_t cmdsize;
+
+ cmdsize = sizeof(*prdf);
+
+ ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
+ if (!ndlp) {
+ ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
+ if (!ndlp)
+ return -ENODEV;
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return -ENODEV;
+ }
+
+ /* RDF ELS is not required on an NPIV VN_Port. */
+ if (vport->port_type == LPFC_NPIV_PORT) {
+ lpfc_nlp_put(ndlp);
+ return -EACCES;
+ }
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_RDF);
+ if (!elsiocb) {
+ /* This will trigger the release of the node just
+ * allocated
+ */
+ lpfc_nlp_put(ndlp);
+ return -ENOMEM;
+ }
+
+ /* Configure the payload for the supported FPIN events. */
+ prdf = (struct lpfc_els_rdf_req *)
+ (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ memset(prdf, 0, cmdsize);
+ prdf->rdf.fpin_cmd = ELS_RDF;
+ prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
+ sizeof(struct fc_els_rdf));
+ prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
+ prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
+ prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
+ prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue RDF: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6444 Xmit RDF to remote NPORT x%x\n",
+ ndlp->nlp_DID);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+ IOCB_ERROR) {
+ /* The additional lpfc_nlp_put will cause the following
+ * lpfc_els_free_iocb routine to trigger the rlease of
+ * the node.
+ */
+ lpfc_nlp_put(ndlp);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return -EIO;
+ }
+
+ /* An RDF was issued - this put ensures the ndlp is cleaned up
+ * when the RDF completes.
+ */
lpfc_nlp_put(ndlp);
return 0;
}
@@ -7135,108 +7332,12 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/**
- * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
- * @phba: pointer to lpfc hba data structure.
- * @pmb: pointer to the driver internal queue element for mailbox command.
- *
- * This routine is the completion callback function for the MBX_READ_LNK_STAT
- * mailbox command. This callback function is to actually send the Accept
- * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
- * collects the link statistics from the completion of the MBX_READ_LNK_STAT
- * mailbox command, constructs the RPS response with the link statistics
- * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
- * response to the RPS.
- *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
- *
- **/
-static void
-lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
-{
- MAILBOX_t *mb;
- IOCB_t *icmd;
- RPS_RSP *rps_rsp;
- uint8_t *pcmd;
- struct lpfc_iocbq *elsiocb;
- struct lpfc_nodelist *ndlp;
- uint16_t status;
- uint16_t oxid;
- uint16_t rxid;
- uint32_t cmdsize;
-
- mb = &pmb->u.mb;
-
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
- oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
- pmb->ctx_ndlp = NULL;
- pmb->ctx_buf = NULL;
-
- if (mb->mbxStatus) {
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
- }
-
- cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
- mempool_free(pmb, phba->mbox_mem_pool);
- elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
- lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_ACC);
-
- /* Decrement the ndlp reference count from previous mbox command */
- lpfc_nlp_put(ndlp);
-
- if (!elsiocb)
- return;
-
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rxid;
- icmd->unsli3.rcvsli3.ox_id = oxid;
-
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof(uint32_t); /* Skip past command */
- rps_rsp = (RPS_RSP *)pcmd;
-
- if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
- status = 0x10;
- else
- status = 0x8;
- if (phba->pport->fc_flag & FC_FABRIC)
- status |= 0x4;
-
- rps_rsp->rsvd1 = 0;
- rps_rsp->portStatus = cpu_to_be16(status);
- rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
- rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
- rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
- rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
- rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
- rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
- /* Xmit ELS RPS ACC response tag <ulpIoTag> */
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
- "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- phba->fc_stat.elsXmitACC++;
- if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
- lpfc_els_free_iocb(phba, elsiocb);
- return;
-}
-
-/**
* lpfc_els_rcv_rls - Process an unsolicited rls iocb
* @vport: pointer to a host virtual N_Port data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
*
- * This routine processes Read Port Status (RPL) IOCB received as an
+ * This routine processes Read Link Status (RLS) IOCB received as an
* ELS unsolicited event. It first checks the remote port state. If the
* remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
* state, it invokes the lpfc_els_rsl_reject() routine to send the reject
@@ -7258,7 +7359,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
+ /* reject the unsolicited RLS request and done with it */
goto reject_out;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
@@ -7306,7 +7407,7 @@ reject_out:
* Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
* will be incremented by 1 for holding the ndlp and the reference to ndlp
* will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
+ * callback function to the RTV Accept Response ELS IOCB command.
*
* Return codes
* 0 - Successfully processed rtv iocb (currently always return 0)
@@ -7325,7 +7426,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
+ /* reject the unsolicited RTV request and done with it */
goto reject_out;
cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
@@ -7378,84 +7479,7 @@ reject_out:
return 0;
}
-/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
- * @vport: pointer to a host virtual N_Port data structure.
- * @cmdiocb: pointer to lpfc command iocb data structure.
- * @ndlp: pointer to a node-list data structure.
- *
- * This routine processes Read Port Status (RPS) IOCB received as an
- * ELS unsolicited event. It first checks the remote port state. If the
- * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
- * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
- * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
- * for reading the HBA link statistics. It is for the callback function,
- * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
- * to actually sending out RPS Accept (ACC) response.
- *
- * Return codes
- * 0 - Successfully processed rps iocb (currently always return 0)
- **/
-static int
-lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
- struct lpfc_nodelist *ndlp)
-{
- struct lpfc_hba *phba = vport->phba;
- uint32_t *lp;
- uint8_t flag;
- LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *pcmd;
- RPS *rps;
- struct ls_rjt stat;
-
- if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
- /* reject the unsolicited RPS request and done with it */
- goto reject_out;
-
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
- flag = (be32_to_cpu(*lp++) & 0xf);
- rps = (RPS *) lp;
-
- if ((flag == 0) ||
- ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
- ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
- sizeof(struct lpfc_name)) == 0))) {
-
- printk("Fix me....\n");
- dump_stack();
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
- if (mbox) {
- lpfc_read_lnk_stat(phba, mbox);
- mbox->ctx_buf = (void *)((unsigned long)
- ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
- cmdiocb->iocb.ulpContext)); /* rx_id */
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- mbox->vport = vport;
- mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
- if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
- != MBX_NOT_FINISHED)
- /* Mbox completion will send ELS Response */
- return 0;
- /* Decrement reference count used for the failed mbox
- * command.
- */
- lpfc_nlp_put(ndlp);
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- }
-
-reject_out:
- /* issue rejection response */
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
- stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
- return 0;
-}
-
-/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+/* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
* @did: DID of the target.
@@ -8310,6 +8334,90 @@ lpfc_send_els_event(struct lpfc_vport *vport,
}
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
+ FC_LS_TLV_DTAG_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
+ FC_FPIN_LI_EVT_TYPES_INIT);
+
+/**
+ * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
+ * @vport: Pointer to vport object.
+ * @lnk_not: Pointer to the Link Integrity Notification Descriptor.
+ *
+ * This function processes a link integrity FPIN event by
+ * logging a message
+ **/
+static void
+lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv)
+{
+ struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
+ const char *li_evt_str;
+ u32 li_evt;
+
+ li_evt = be16_to_cpu(li->event_type);
+ li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "4680 FPIN Link Integrity %s (x%x) "
+ "Detecting PN x%016llx Attached PN x%016llx "
+ "Duration %d mSecs Count %d Port Cnt %d\n",
+ li_evt_str, li_evt,
+ be64_to_cpu(li->detecting_wwpn),
+ be64_to_cpu(li->attached_wwpn),
+ be32_to_cpu(li->event_threshold),
+ be32_to_cpu(li->event_count),
+ be32_to_cpu(li->pname_count));
+}
+
+static void
+lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
+ u32 fpin_length)
+{
+ struct fc_tlv_desc *tlv;
+ const char *dtag_nm;
+ uint32_t desc_cnt = 0, bytes_remain;
+ u32 dtag;
+
+ /* FPINs handled only if we are in the right discovery state */
+ if (vport->port_state < LPFC_DISC_AUTH)
+ return;
+
+ /* make sure there is the full fpin header */
+ if (fpin_length < sizeof(struct fc_els_fpin))
+ return;
+
+ tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
+ bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
+ bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
+
+ /* process each descriptor */
+ while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
+ bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
+
+ dtag = be32_to_cpu(tlv->desc_tag);
+ switch (dtag) {
+ case ELS_DTAG_LNK_INTEGRITY:
+ lpfc_els_rcv_fpin_li(vport, tlv);
+ break;
+ default:
+ dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "4678 skipped FPIN descriptor[%d]: "
+ "tag x%x (%s)\n",
+ desc_cnt, dtag, dtag_nm);
+ break;
+ }
+
+ desc_cnt++;
+ bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ }
+
+ fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length,
+ (char *)fpin);
+}
+
/**
* lpfc_els_unsol_buffer - Process an unsolicited event data buffer
* @phba: pointer to lpfc hba data structure.
@@ -8331,7 +8439,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
- uint32_t *payload;
+ uint32_t *payload, payload_len;
uint32_t cmd, did, newnode;
uint8_t rjt_exp, rjt_err = 0, init_link = 0;
IOCB_t *icmd = &elsiocb->iocb;
@@ -8342,6 +8450,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
newnode = 0;
payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
lpfc_post_buffer(phba, pring, 1);
@@ -8632,16 +8741,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
- case ELS_CMD_RPS:
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
- "RCV RPS: did:x%x/ste:x%x flg:x%x",
- did, vport->port_state, ndlp->nlp_flag);
-
- phba->fc_stat.elsRcvRPS++;
- lpfc_els_rcv_rps(vport, elsiocb, ndlp);
- if (newnode)
- lpfc_nlp_put(ndlp);
- break;
case ELS_CMD_RPL:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RPL: did:x%x/ste:x%x flg:x%x",
@@ -8697,12 +8796,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_INVALID_OX_RX;
break;
case ELS_CMD_FPIN:
- /*
- * Received FPIN from fabric - pass it to the
- * transport FPIN handler.
- */
- fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len,
- (char *)payload);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FPIN: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
+ payload_len);
+
+ /* There are no replies, so no rjt codes */
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index dcc8999c6a68..789eecbf32eb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1163,13 +1163,16 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Start discovery by sending a FLOGI. port_state is identically
- * LPFC_FLOGI while waiting for FLOGI cmpl
+ * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
+ * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
*/
- if (vport->port_state != LPFC_FLOGI)
- lpfc_initial_flogi(vport);
- else if (vport->fc_flag & FC_PT2PT)
- lpfc_disc_start(vport);
-
+ if (vport->port_state != LPFC_FLOGI) {
+ if (!(phba->hba_flag & HBA_DEFER_FLOGI))
+ lpfc_initial_flogi(vport);
+ } else {
+ if (vport->fc_flag & FC_PT2PT)
+ lpfc_disc_start(vport);
+ }
return;
out:
@@ -3094,6 +3097,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Check if sending the FLOGI is being deferred to after we get
+ * up to date CSPs from MBX_READ_SPARAM.
+ */
+ if (phba->hba_flag & HBA_DEFER_FLOGI) {
+ lpfc_initial_flogi(vport);
+ phba->hba_flag &= ~HBA_DEFER_FLOGI;
+ }
return;
out:
@@ -3224,6 +3235,23 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
lpfc_linkup(phba);
+ sparam_mbox = NULL;
+
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!cfglink_mbox)
+ goto out;
+ vport->port_state = LPFC_LOCAL_CFG_LINK;
+ lpfc_config_link(phba, cfglink_mbox);
+ cfglink_mbox->vport = vport;
+ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ }
+
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!sparam_mbox)
goto out;
@@ -3244,20 +3272,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
goto out;
}
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!cfglink_mbox)
- goto out;
- vport->port_state = LPFC_LOCAL_CFG_LINK;
- lpfc_config_link(phba, cfglink_mbox);
- cfglink_mbox->vport = vport;
- cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
- goto out;
- }
- } else {
+ if (phba->hba_flag & HBA_FCOE_MODE) {
vport->port_state = LPFC_VPORT_UNKNOWN;
/*
* Add the driver's default FCF record at FCF index 0 now. This
@@ -3314,6 +3329,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
/* Reset FCF roundrobin bmask for new discovery */
lpfc_sli4_clear_fcf_rr_bmask(phba);
+ } else {
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
+ !(phba->link_flag & LS_LOOPBACK_MODE))
+ phba->hba_flag |= HBA_DEFER_FLOGI;
}
/* Prepare for LINK up registrations */
@@ -4070,7 +4089,9 @@ out:
FC_TYPE_NVME);
/* Issue SCR just before NameServer GID_FT Query */
- lpfc_issue_els_scr(vport, SCR_DID, 0);
+ lpfc_issue_els_scr(vport, 0);
+
+ lpfc_issue_els_rdf(vport, 0);
}
vport->fc_ns_retry = 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 436cdc8c5ef4..ae51c0dbba0a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -22,7 +22,7 @@
#define FDMI_DID 0xfffffaU
#define NameServer_DID 0xfffffcU
-#define SCR_DID 0xfffffdU
+#define Fabric_Cntl_DID 0xfffffdU
#define Fabric_DID 0xfffffeU
#define Bcast_DID 0xffffffU
#define Mask_DID 0xffffffU
@@ -588,6 +588,7 @@ struct fc_vft_header {
#define ELS_CMD_RRQ 0x12000000
#define ELS_CMD_REC 0x13000000
#define ELS_CMD_RDP 0x18000000
+#define ELS_CMD_RDF 0x19000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_NVMEPRLI 0x20140018
#define ELS_CMD_PRLO 0x21100014
@@ -597,7 +598,6 @@ struct fc_vft_header {
#define ELS_CMD_ADISC 0x52000000
#define ELS_CMD_FARP 0x54000000
#define ELS_CMD_FARPR 0x55000000
-#define ELS_CMD_RPS 0x56000000
#define ELS_CMD_RPL 0x57000000
#define ELS_CMD_FAN 0x60000000
#define ELS_CMD_RSCN 0x61040000
@@ -630,6 +630,7 @@ struct fc_vft_header {
#define ELS_CMD_RRQ 0x12
#define ELS_CMD_REC 0x13
#define ELS_CMD_RDP 0x18
+#define ELS_CMD_RDF 0x19
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_NVMEPRLI 0x18001420
#define ELS_CMD_PRLO 0x14001021
@@ -639,7 +640,6 @@ struct fc_vft_header {
#define ELS_CMD_ADISC 0x52
#define ELS_CMD_FARP 0x54
#define ELS_CMD_FARPR 0x55
-#define ELS_CMD_RPS 0x56
#define ELS_CMD_RPL 0x57
#define ELS_CMD_FAN 0x60
#define ELS_CMD_RSCN 0x0461
@@ -919,24 +919,6 @@ typedef struct _RNID { /* Structure is in Big Endian format */
} un;
} __packed RNID;
-typedef struct _RPS { /* Structure is in Big Endian format */
- union {
- uint32_t portNum;
- struct lpfc_name portName;
- } un;
-} RPS;
-
-typedef struct _RPS_RSP { /* Structure is in Big Endian format */
- uint16_t rsvd1;
- uint16_t portStatus;
- uint32_t linkFailureCnt;
- uint32_t lossSyncCnt;
- uint32_t lossSignalCnt;
- uint32_t primSeqErrCnt;
- uint32_t invalidXmitWord;
- uint32_t crcCnt;
-} RPS_RSP;
-
struct RLS { /* Structure is in Big Endian format */
uint32_t rls;
#define rls_rsvd_SHIFT 24
@@ -1340,25 +1322,8 @@ struct fc_rdp_res_frame {
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
#define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
-/*
- * Registered Port List Format
- */
-struct lpfc_fdmi_reg_port_list {
- uint32_t EntryCnt;
- uint32_t pe; /* Variable-length array */
-};
-
-
/* Definitions for HBA / Port attribute entries */
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
- /* Structure is in Big Endian format */
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
-};
-
-
/* Attribute Entry */
struct lpfc_fdmi_attr_entry {
union {
@@ -1369,7 +1334,13 @@ struct lpfc_fdmi_attr_entry {
} un;
};
-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
+struct lpfc_fdmi_attr_def { /* Defined in TLV format */
+ /* Structure is in Big Endian format */
+ uint32_t AttrType:16;
+ uint32_t AttrLen:16;
+ /* Marks start of Value (ATTRIBUTE_ENTRY) */
+ struct lpfc_fdmi_attr_entry AttrValue;
+} __packed;
/*
* HBA Attribute Block
@@ -1394,12 +1365,19 @@ struct lpfc_fdmi_hba_ident {
};
/*
+ * Registered Port List Format
+ */
+struct lpfc_fdmi_reg_port_list {
+ uint32_t EntryCnt;
+ struct lpfc_fdmi_port_entry pe;
+} __packed;
+
+/*
* Register HBA(RHBA)
*/
struct lpfc_fdmi_reg_hba {
struct lpfc_fdmi_hba_ident hi;
- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
-/* struct lpfc_fdmi_attr_block ab; */
+ struct lpfc_fdmi_reg_port_list rpl;
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 9a064b96e570..10c5d1c3122e 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -20,6 +20,8 @@
* included with this package. *
*******************************************************************/
+#include <uapi/scsi/fc/fc_els.h>
+
/* Macros to deal with bit fields. Each bit field must have 3 #defines
* associated with it (_SHIFT, _MASK, and _WORD).
* EG. For a bit field that is in the 7th bit of the "field4" field of a
@@ -4795,6 +4797,23 @@ struct send_frame_wqe {
uint32_t fc_hdr_wd5; /* word 15 */
};
+#define ELS_RDF_REG_TAG_CNT 1
+struct lpfc_els_rdf_reg_desc {
+ struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */
+ __be32 desc_tags[ELS_RDF_REG_TAG_CNT];
+ /* tags in reg_desc */
+};
+
+struct lpfc_els_rdf_req {
+ struct fc_els_rdf rdf; /* hdr up to descriptors */
+ struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
+};
+
+struct lpfc_els_rdf_rsp {
+ struct fc_els_rdf_resp rdf_resp; /* hdr up to descriptors */
+ struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
+};
+
union lpfc_wqe {
uint32_t words[16];
struct lpfc_wqe_generic generic;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5a605773dd0a..9d03e9b71efb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -512,21 +512,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_sli_read_link_ste(phba);
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
- i = (mb->un.varRdConfig.max_xri + 1);
- if (phba->cfg_hba_queue_depth > i) {
+ if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"3359 HBA queue depth changed from %d to %d\n",
- phba->cfg_hba_queue_depth, i);
- phba->cfg_hba_queue_depth = i;
- }
-
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
- i = (mb->un.varRdConfig.max_xri >> 3);
- if (phba->pport->cfg_lun_queue_depth > i) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "3360 LUN queue depth changed from %d to %d\n",
- phba->pport->cfg_lun_queue_depth, i);
- phba->pport->cfg_lun_queue_depth = i;
+ phba->cfg_hba_queue_depth,
+ mb->un.varRdConfig.max_xri);
+ phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
}
phba->lmt = mb->un.varRdConfig.lmt;
@@ -9235,6 +9226,7 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
/* Free the CQ/WQ corresponding to the Hardware Queue */
lpfc_sli4_queue_free(hdwq[idx].io_cq);
lpfc_sli4_queue_free(hdwq[idx].io_wq);
+ hdwq[idx].hba_eq = NULL;
hdwq[idx].io_cq = NULL;
hdwq[idx].io_wq = NULL;
if (phba->cfg_xpsgl && !phba->nvmet_support)
@@ -11105,15 +11097,19 @@ found_any:
* @cpu: cpu going offline
* @eqlist:
*/
-static void
+static int
lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
struct list_head *eqlist)
{
const struct cpumask *maskp;
struct lpfc_queue *eq;
- cpumask_t tmp;
+ struct cpumask *tmp;
u16 idx;
+ tmp = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
maskp = pci_irq_get_affinity(phba->pcidev, idx);
if (!maskp)
@@ -11123,7 +11119,7 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
* then we don't need to poll the eq attached
* to it.
*/
- if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
continue;
/* get the cpus that are online and are affini-
* tized to this irq vector. If the count is
@@ -11131,8 +11127,8 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
* down this vector. Since this cpu has not
* gone offline yet, we need >1.
*/
- cpumask_and(&tmp, maskp, cpu_online_mask);
- if (cpumask_weight(&tmp) > 1)
+ cpumask_and(tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(tmp) > 1)
continue;
/* Now that we have an irq to shutdown, get the eq
@@ -11143,6 +11139,8 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
list_add(&eq->_poll_list, eqlist);
}
+ kfree(tmp);
+ return 0;
}
static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
@@ -11313,7 +11311,9 @@ static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
lpfc_irq_rebalance(phba, cpu, true);
- lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+ retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+ if (retval)
+ return retval;
/* start polling on these eq's */
list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2c7e0b22db2f..0fc9a242bc65 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -671,8 +671,10 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_cmd->prot_data_type = 0;
#endif
tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
- if (!tmp)
+ if (!tmp) {
+ lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
return NULL;
+ }
lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 64002b0cb02d..0b26b5c0527e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -7371,15 +7371,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
- rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
- if (phba->pport->cfg_lun_queue_depth > rc) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "3362 LUN queue depth changed from %d to %d\n",
- phba->pport->cfg_lun_queue_depth, rc);
- phba->pport->cfg_lun_queue_depth = rc;
- }
-
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
@@ -9468,6 +9459,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
*pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_RDF ||
*pcmd == ELS_CMD_RSCN_XMT ||
*pcmd == ELS_CMD_FDISC ||
*pcmd == ELS_CMD_LOGO ||
@@ -17950,6 +17942,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
list_add_tail(&iocbq->list, &first_iocbq->list);
}
}
+ /* Free the sequence's header buffer */
+ if (!first_iocbq)
+ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
+
return first_iocbq;
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9563c49f36ab..c4ab006e6ecc 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.6.0.3"
+#define LPFC_DRIVER_VERSION "12.6.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index fd4b5ac6ac5b..babe85d7b537 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2987,9 +2987,10 @@ megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
u32 __iomem *reg = (u32 __iomem *)reg_set;
for (i = 0; i < sz / sizeof(u32); i++) {
- bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
- "%08x: %08x\n", (i * 4),
- readl(&reg[i]));
+ bytes_wrote += scnprintf(loc + bytes_wrote,
+ PAGE_SIZE - bytes_wrote,
+ "%08x: %08x\n", (i * 4),
+ readl(&reg[i]));
}
return bytes_wrote;
}
@@ -8224,8 +8225,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
cmd->cmd_status_drv);
- error = -EBUSY;
- goto out;
+ error = -EBUSY;
+ goto out;
}
cmd->sync_cmd = 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c597d544eb39..778d5e6ce385 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -207,7 +207,7 @@ struct fw_event_work {
u8 ignore;
u16 event;
struct kref refcount;
- char event_data[0] __aligned(4);
+ char event_data[] __aligned(4);
};
static void fw_event_work_free(struct kref *r)
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 519edc796691..327fdd5ee962 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -394,7 +394,7 @@ struct mvs_info {
dma_addr_t bulk_buffer_dma1;
#define TRASH_BUCKET_SIZE 0x20000
void *dma_pool;
- struct mvs_slot_info slot_info[0];
+ struct mvs_slot_info slot_info[];
};
struct mvs_prv_info{
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index ec8cc2207536..60d5691fc4ab 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -130,7 +130,7 @@ enum {
struct mvumi_hotplug_event {
u16 size;
u8 dummy[2];
- u8 bitmap[0];
+ u8 bitmap[];
};
struct mvumi_driver_event {
@@ -290,7 +290,7 @@ struct mvumi_rsp_frame {
struct mvumi_ob_data {
struct list_head list;
- unsigned char data[0];
+ unsigned char data[];
};
struct version_info {
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 11a2cb844ecb..f88adab3f913 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -2203,7 +2203,7 @@ static struct script script0 __initdata = {
** Possible data corruption during Memory Write and Invalidate.
** This work-around resets the addressing logic prior to the
** start of the first MOVE of a DATA IN phase.
- ** (See Documentation/scsi/ncr53c8xx.txt for more information)
+ ** (See Documentation/scsi/ncr53c8xx.rst for more information)
*/
SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
20,
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index dc9b74c9348a..9696b6b5591f 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -36,7 +36,7 @@ config PCMCIA_NINJA_SCSI
help
If you intend to attach this type of PCMCIA SCSI host adapter to
your computer, say Y here and read
- <file:Documentation/scsi/NinjaSCSI.txt>.
+ <file:Documentation/scsi/NinjaSCSI.rst>.
Supported cards:
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7c6be2ec110d..3c9f42779dd0 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -463,7 +463,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
pm8001_ha->nvmd_completion = &completion;
payload.minor_function = 7;
payload.offset = 0;
- payload.length = 4096;
+ payload.rd_length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
if (!payload.func_specific)
return -ENOMEM;
@@ -554,6 +554,49 @@ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
+/**
+ ** non_fatal_log_show - non fatal error logging
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **
+ ** A sysfs 'read-only' shost attribute.
+ **/
+static ssize_t non_fatal_log_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 count;
+
+ count = pm80xx_get_non_fatal_dump(cdev, attr, buf);
+ return count;
+}
+static DEVICE_ATTR_RO(non_fatal_log);
+
+static ssize_t non_fatal_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%08x",
+ pm8001_ha->non_fatal_count);
+}
+
+static ssize_t non_fatal_count_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ int val = 0;
+
+ if (kstrtoint(buf, 16, &val) != 0)
+ return -EINVAL;
+
+ pm8001_ha->non_fatal_count = val;
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(non_fatal_count);
/**
** pm8001_ctl_gsm_log_show - gsm dump collection
@@ -631,7 +674,7 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
pm8001_ha->fw_image->size);
- payload->length = pm8001_ha->fw_image->size;
+ payload->wr_length = pm8001_ha->fw_image->size;
payload->id = 0;
payload->minor_function = 0x1;
pm8001_ha->nvmd_completion = &completion;
@@ -677,7 +720,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
IOCTL_BUF_SIZE);
for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
- payload->length = 1024*16;
+ payload->wr_length = 1024*16;
payload->id = 0;
fwControl =
(struct fw_control_info *)&payload->func_specific;
@@ -829,6 +872,8 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_aap_log,
&dev_attr_iop_log,
&dev_attr_fatal_log,
+ &dev_attr_non_fatal_log,
+ &dev_attr_non_fatal_count,
&dev_attr_gsm_log,
&dev_attr_max_out_io,
&dev_attr_max_devices,
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 48e0624ecc68..1c7f15fd69ce 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -75,7 +75,7 @@ enum port_type {
};
/* driver compile-time configuration */
-#define PM8001_MAX_CCB 512 /* max ccbs supported */
+#define PM8001_MAX_CCB 256 /* max ccbs supported */
#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
#define PM8001_MAX_INB_NUM 1
#define PM8001_MAX_OUTB_NUM 1
@@ -99,7 +99,8 @@ enum port_type {
#define OB (CI + PM8001_MAX_SPCV_INB_NUM)
#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM)
#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM)
-#define PM8001_MAX_DMA_SG SG_ALL
+#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528
+#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG
enum memory_region_num {
AAP1 = 0x0, /* application acceleration processor */
IOP, /* IO processor */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 2328ff1349ac..fb9848e1d481 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4793,7 +4793,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
- fw_control_context->len = ioctl_payload->length;
+ fw_control_context->len = ioctl_payload->rd_length;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&nvmd_req, 0, sizeof(nvmd_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4814,7 +4814,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
twi_page_size << 8 | TWI_DEVICE);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4823,7 +4823,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case C_SEEPROM: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4832,7 +4832,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case VPD_FLASH: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4841,7 +4841,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case EXPAN_ROM: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4850,7 +4850,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case IOP_RDUMP: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->rd_length);
nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4890,7 +4890,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
&ioctl_payload->func_specific,
- ioctl_payload->length);
+ ioctl_payload->wr_length);
memset(&nvmd_req, 0, sizeof(nvmd_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc) {
@@ -4909,7 +4909,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
twi_page_size << 8 | TWI_DEVICE);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
@@ -4918,7 +4918,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
}
case C_SEEPROM:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4927,7 +4927,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
break;
case VPD_FLASH:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
@@ -4936,7 +4936,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
break;
case EXPAN_ROM:
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
- nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+ nvmd_req.resp_len = cpu_to_le32(ioctl_payload->wr_length);
nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3c6076e4c6d2..a8f5344fdfda 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -95,7 +95,7 @@ static struct scsi_host_template pm8001_sht = {
.bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = PM8001_MAX_DMA_SG,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
@@ -251,6 +251,9 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
return ret;
}
+static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
+static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
+
/**
* pm8001_alloc - initiate our hba structure and 6 DMAs area.
* @pm8001_ha:our hba structure.
@@ -483,6 +486,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = logging_level;
+ pm8001_ha->non_fatal_count = 0;
if (link_rate >= 1 && link_rate <= 15)
pm8001_ha->link_rate = (link_rate << 8);
else {
@@ -635,22 +639,22 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
if (pm8001_ha->chip_id == chip_8001) {
if (deviceid == 0x8081 || deviceid == 0x0042) {
payload.minor_function = 4;
- payload.length = 4096;
+ payload.rd_length = 4096;
} else {
payload.minor_function = 0;
- payload.length = 128;
+ payload.rd_length = 128;
}
} else if ((pm8001_ha->chip_id == chip_8070 ||
pm8001_ha->chip_id == chip_8072) &&
pm8001_ha->pdev->subsystem_vendor == PCI_VENDOR_ID_ATTO) {
payload.minor_function = 4;
- payload.length = 4096;
+ payload.rd_length = 4096;
} else {
payload.minor_function = 1;
- payload.length = 4096;
+ payload.rd_length = 4096;
}
payload.offset = 0;
- payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
+ payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL);
if (!payload.func_specific) {
PM8001_INIT_DBG(pm8001_ha, pm8001_printk("mem alloc fail\n"));
return;
@@ -720,7 +724,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
/* SAS ADDRESS read from flash / EEPROM */
payload.minor_function = 6;
payload.offset = 0;
- payload.length = 4096;
+ payload.rd_length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
if (!payload.func_specific)
return -ENOMEM;
@@ -893,9 +897,7 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
- u32 i = 0, j = 0;
u32 number_of_intr;
- int flag = 0;
int rc;
/* SPCv controllers supports 64 msi-x */
@@ -903,11 +905,11 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
number_of_intr = 1;
} else {
number_of_intr = PM8001_MAX_MSIX_VEC;
- flag &= ~IRQF_SHARED;
}
rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
number_of_intr, PCI_IRQ_MSIX);
+ number_of_intr = rc;
if (rc < 0)
return rc;
pm8001_ha->number_of_intr = number_of_intr;
@@ -915,8 +917,22 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
"pci_alloc_irq_vectors request ret:%d no of intr %d\n",
rc, pm8001_ha->number_of_intr));
+ return 0;
+}
- for (i = 0; i < number_of_intr; i++) {
+static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
+{
+ u32 i = 0, j = 0;
+ int flag = 0, rc = 0;
+
+ if (pm8001_ha->chip_id != chip_8001)
+ flag &= ~IRQF_SHARED;
+
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("pci_enable_msix request number of intr %d\n",
+ pm8001_ha->number_of_intr));
+
+ for (i = 0; i < pm8001_ha->number_of_intr; i++) {
snprintf(pm8001_ha->intr_drvname[i],
sizeof(pm8001_ha->intr_drvname[0]),
"%s-%d", pm8001_ha->name, i);
@@ -941,6 +957,21 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
}
#endif
+static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
+{
+ struct pci_dev *pdev;
+
+ pdev = pm8001_ha->pdev;
+
+#ifdef PM8001_USE_MSIX
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ return pm8001_setup_msix(pm8001_ha);
+ PM8001_INIT_DBG(pm8001_ha,
+ pm8001_printk("MSIX not supported!!!\n"));
+#endif
+ return 0;
+}
+
/**
* pm8001_request_irq - register interrupt
* @chip_info: our ha struct.
@@ -954,7 +985,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
#ifdef PM8001_USE_MSIX
if (pdev->msix_cap && pci_msi_enabled())
- return pm8001_setup_msix(pm8001_ha);
+ return pm8001_request_msix(pm8001_ha);
else {
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("MSIX not supported!!!\n"));
@@ -989,6 +1020,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
struct pm8001_hba_info *pm8001_ha;
struct Scsi_Host *shost = NULL;
const struct pm8001_chip_info *chip;
+ struct sas_ha_struct *sha;
dev_printk(KERN_INFO, &pdev->dev,
"pm80xx: driver version %s\n", DRV_VERSION);
@@ -1017,12 +1049,12 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_regions;
}
chip = &pm8001_chips[ent->driver_data];
- SHOST_TO_SAS_HA(shost) =
- kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
- if (!SHOST_TO_SAS_HA(shost)) {
+ sha = kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
+ if (!sha) {
rc = -ENOMEM;
goto err_out_free_host;
}
+ SHOST_TO_SAS_HA(shost) = sha;
rc = pm8001_prep_sas_ha_init(shost, chip);
if (rc) {
@@ -1036,7 +1068,14 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
rc = -ENOMEM;
goto err_out_free;
}
- list_add_tail(&pm8001_ha->list, &hba_list);
+ /* Setup Interrupt */
+ rc = pm8001_setup_irq(pm8001_ha);
+ if (rc) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "pm8001_setup_irq failed [ret: %d]\n", rc));
+ goto err_out_shost;
+ }
+
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc) {
@@ -1048,6 +1087,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
+ /* Request Interrupt */
rc = pm8001_request_irq(pm8001_ha);
if (rc) {
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
@@ -1070,8 +1110,12 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
- if (rc)
+ if (rc) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "sas_register_ha failed [ret: %d]\n", rc));
goto err_out_shost;
+ }
+ list_add_tail(&pm8001_ha->list, &hba_list);
scsi_scan_host(pm8001_ha->shost);
pm8001_ha->flags = PM8001F_RUN_TIME;
return 0;
@@ -1081,7 +1125,7 @@ err_out_shost:
err_out_ha_free:
pm8001_free(pm8001_ha);
err_out_free:
- kfree(SHOST_TO_SAS_HA(shost));
+ kfree(sha);
err_out_free_host:
scsi_host_put(shost);
err_out_regions:
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 93438c8f67da..ae7ba9b3c4bc 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -137,10 +137,11 @@ struct pm8001_ioctl_payload {
u32 signature;
u16 major_function;
u16 minor_function;
- u16 length;
u16 status;
u16 offset;
u16 id;
+ u32 wr_length;
+ u32 rd_length;
u8 *func_specific;
};
@@ -558,6 +559,8 @@ struct pm8001_hba_info {
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
u32 reset_in_progress;
+ u32 non_fatal_count;
+ u32 non_fatal_read_length;
};
struct pm8001_work {
@@ -741,6 +744,8 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf);
+ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf);
ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index d1d95f1a2c6a..4d205ebaee87 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -393,6 +393,136 @@ moreData:
(char *)buf;
}
+/* pm80xx_get_non_fatal_dump - dump the nonfatal data from the dma
+ * location by the firmware.
+ */
+ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ void __iomem *nonfatal_table_address = pm8001_ha->fatal_tbl_addr;
+ u32 accum_len = 0;
+ u32 total_len = 0;
+ u32 reg_val = 0;
+ u32 *temp = NULL;
+ u32 index = 0;
+ u32 output_length;
+ unsigned long start = 0;
+ char *buf_copy = buf;
+
+ temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
+ if (++pm8001_ha->non_fatal_count == 1) {
+ if (pm8001_ha->chip_id == chip_8001) {
+ snprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+ PAGE_SIZE, "Not supported for SPC controller");
+ return 0;
+ }
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("forensic_info TYPE_NON_FATAL...\n"));
+ /*
+ * Step 1: Write the host buffer parameters in the MPI Fatal and
+ * Non-Fatal Error Dump Capture Table.This is the buffer
+ * where debug data will be DMAed to.
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_LO_OFFSET,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_lo);
+
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HI_OFFSET,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].phys_addr_hi);
+
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_LENGTH, SYSFS_OFFSET);
+
+ /* Optionally, set the DUMPCTRL bit to 1 if the host
+ * keeps sending active I/Os while capturing the non-fatal
+ * debug data. Otherwise, leave this bit set to zero
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_HANDSHAKE_RDY);
+
+ /*
+ * Step 2: Clear Accumulative Length of Debug Data Transferred
+ * [ACCDDLEN] field in the MPI Fatal and Non-Fatal Error Dump
+ * Capture Table to zero.
+ */
+ pm8001_mw32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN, 0);
+
+ /* initiallize previous accumulated length to 0 */
+ pm8001_ha->forensic_preserved_accumulated_transfer = 0;
+ pm8001_ha->non_fatal_read_length = 0;
+ }
+
+ total_len = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_TOTAL_LEN);
+ /*
+ * Step 3:Clear Fatal/Non-Fatal Debug Data Transfer Status [FDDTSTAT]
+ * field and then request that the SPCv controller transfer the debug
+ * data by setting bit 7 of the Inbound Doorbell Set Register.
+ */
+ pm8001_mw32(nonfatal_table_address, MPI_FATAL_EDUMP_TABLE_STATUS, 0);
+ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET,
+ SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP);
+
+ /*
+ * Step 4.1: Read back the Inbound Doorbell Set Register (by polling for
+ * 2 seconds) until register bit 7 is cleared.
+ * This step only indicates the request is accepted by the controller.
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+ do {
+ reg_val = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET) &
+ SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP;
+ } while ((reg_val != 0) && time_before(jiffies, start));
+
+ /* Step 4.2: To check the completion of the transfer, poll the Fatal/Non
+ * Fatal Debug Data Transfer Status [FDDTSTAT] field for 2 seconds in
+ * the MPI Fatal and Non-Fatal Error Dump Capture Table.
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+ do {
+ reg_val = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS);
+ } while ((!reg_val) && time_before(jiffies, start));
+
+ if ((reg_val == 0x00) ||
+ (reg_val == MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED) ||
+ (reg_val > MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE)) {
+ pm8001_ha->non_fatal_read_length = 0;
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 0xFFFFFFFF);
+ pm8001_ha->non_fatal_count = 0;
+ return (buf_copy - buf);
+ } else if (reg_val ==
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA) {
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 2);
+ } else if ((reg_val == MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) ||
+ (pm8001_ha->non_fatal_read_length >= total_len)) {
+ pm8001_ha->non_fatal_read_length = 0;
+ buf_copy += snprintf(buf_copy, PAGE_SIZE, "%08x ", 4);
+ pm8001_ha->non_fatal_count = 0;
+ }
+ accum_len = pm8001_mr32(nonfatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+ output_length = accum_len -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+
+ for (index = 0; index < output_length/4; index++)
+ buf_copy += snprintf(buf_copy, PAGE_SIZE,
+ "%08x ", *(temp+index));
+
+ pm8001_ha->non_fatal_read_length += output_length;
+
+ /* store current accumulated length to use in next iteration as
+ * the previous accumulated length
+ */
+ pm8001_ha->forensic_preserved_accumulated_transfer = accum_len;
+ return (buf_copy - buf);
+}
+
/**
* read_main_config_table - read the configure table and save it.
* @pm8001_ha: our hba card information
@@ -1438,11 +1568,18 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (!pm8001_ha->controller_fatal_error) {
/* Check if MPI is in ready state to reset */
if (mpi_uninit_check(pm8001_ha) != 0) {
- regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ u32 r0 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+ u32 r1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ u32 r2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+ u32 r3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "MPI state is not ready scratch1 :0x%x\n",
- regval));
- return -1;
+ "MPI state is not ready scratch: %x:%x:%x:%x\n",
+ r0, r1, r2, r3));
+ /* if things aren't ready but the bootloader is ok then
+ * try the reset anyway.
+ */
+ if (r1 & SCRATCH_PAD1_BOOTSTATE_MASK)
+ return -1;
}
}
/* checked for reset register normal state; 0x0 */
@@ -3708,28 +3845,32 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
+ u32 tag;
u8 page_code;
+ int rc = 0;
struct set_phy_profile_resp *pPayload =
(struct set_phy_profile_resp *)(piomb + 4);
u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
u32 status = le32_to_cpu(pPayload->status);
+ tag = le32_to_cpu(pPayload->tag);
page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
if (status) {
/* status is FAILED */
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("PhyProfile command failed with status "
"0x%08X \n", status));
- return -1;
+ rc = -1;
} else {
if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Invalid page code 0x%X\n",
page_code));
- return -1;
+ rc = -1;
}
}
- return 0;
+ pm8001_tag_free(pm8001_ha, tag);
+ return rc;
}
/**
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index a4f7eb8f50a3..15c962108075 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -623,7 +623,7 @@ struct pmcraid_aen_msg {
u32 hostno;
u32 length;
u8 reserved[8];
- u8 data[0];
+ u8 data[];
};
/* Controller state event message type */
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 9513fd320ffd..9498279ae80d 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -36,6 +36,7 @@ struct qedi_endpoint;
*/
#define QEDI_MODE_NORMAL 0
#define QEDI_MODE_RECOVERY 1
+#define QEDI_MODE_SHUTDOWN 2
#define ISCSI_WQE_SET_PTU_INVALIDATE 1
#define QEDI_MAX_ISCSI_TASK 4096
@@ -278,6 +279,7 @@ struct qedi_ctx {
#define QEDI_IOTHREAD_WAKE 2
#define QEDI_IN_RECOVERY 5
#define QEDI_IN_OFFLINE 6
+#define QEDI_IN_SHUTDOWN 7
u8 mac[ETH_ALEN];
u32 src_ip[4];
@@ -331,6 +333,7 @@ struct qedi_ctx {
u16 ll2_mtu;
struct workqueue_struct *dpc_wq;
+ struct delayed_work recovery_work;
spinlock_t task_idx_lock; /* To protect gbl context */
s32 last_tidx_alloc;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 8ba7c771ce4d..116645c08c71 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -73,5 +73,6 @@ void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
void qedi_clearsq(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn,
struct iscsi_task *task);
+void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess);
#endif
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 8829880a54c3..1f4a5fb00a05 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -392,6 +392,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_ep->conn = qedi_conn;
qedi_conn->ep = qedi_ep;
+ qedi_conn->iscsi_ep = ep;
qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
qedi_conn->fw_cid = qedi_ep->fw_cid;
qedi_conn->cmd_cleanup_req = 0;
@@ -782,6 +783,9 @@ static int qedi_task_xmit(struct iscsi_task *task)
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags))
+ return -ENODEV;
+
cmd->state = 0;
cmd->task = NULL;
cmd->use_slowpath = false;
@@ -1596,6 +1600,20 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep,
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}
+void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *session = cls_sess->dd_data;
+ struct iscsi_conn *conn = session->leadconn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+
+ if (iscsi_is_session_online(cls_sess))
+ qedi_ep_disconnect(qedi_conn->iscsi_ep);
+
+ qedi_conn_destroy(qedi_conn->cls_conn);
+
+ qedi_session_destroy(cls_sess);
+}
+
void qedi_process_tcp_error(struct qedi_endpoint *ep,
struct iscsi_eqe_data *data)
{
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 67c3b7349271..39dc27c85e3c 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -149,6 +149,7 @@ struct qedi_conn {
struct iscsi_cls_conn *cls_conn;
struct qedi_ctx *qedi;
struct qedi_endpoint *ep;
+ struct iscsi_endpoint *iscsi_ep;
struct list_head active_cmd_list;
spinlock_t list_lock; /* internal conn lock */
u32 active_cmd_count;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index acb930b8c6a6..b995b19865ca 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -58,6 +58,7 @@ static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
+static void qedi_recovery_handler(struct work_struct *work);
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
@@ -1113,6 +1114,20 @@ exit_get_data:
return;
}
+static void qedi_schedule_recovery_handler(void *dev)
+{
+ struct qedi_ctx *qedi = dev;
+
+ QEDI_ERR(&qedi->dbg_ctx, "Recovery handler scheduled.\n");
+
+ if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags))
+ return;
+
+ atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+ schedule_delayed_work(&qedi->recovery_work, 0);
+}
+
static void qedi_link_update(void *dev, struct qed_link_output *link)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
@@ -1130,6 +1145,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
static struct qed_iscsi_cb_ops qedi_cb_ops = {
{
.link_update = qedi_link_update,
+ .schedule_recovery_handler = qedi_schedule_recovery_handler,
.get_protocol_tlv_data = qedi_get_protocol_tlv_data,
.get_generic_tlv_data = qedi_get_generic_tlv_data,
}
@@ -2328,16 +2344,22 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
int rval;
- if (qedi->tmf_thread) {
- flush_workqueue(qedi->tmf_thread);
- destroy_workqueue(qedi->tmf_thread);
- qedi->tmf_thread = NULL;
- }
+ if (mode == QEDI_MODE_SHUTDOWN)
+ iscsi_host_for_each_session(qedi->shost,
+ qedi_clear_session_ctx);
+
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
+ if (qedi->tmf_thread) {
+ flush_workqueue(qedi->tmf_thread);
+ destroy_workqueue(qedi->tmf_thread);
+ qedi->tmf_thread = NULL;
+ }
- if (qedi->offload_thread) {
- flush_workqueue(qedi->offload_thread);
- destroy_workqueue(qedi->offload_thread);
- qedi->offload_thread = NULL;
+ if (qedi->offload_thread) {
+ flush_workqueue(qedi->offload_thread);
+ destroy_workqueue(qedi->offload_thread);
+ qedi->offload_thread = NULL;
+ }
}
#ifdef CONFIG_DEBUG_FS
@@ -2353,8 +2375,7 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_ops->ll2->stop(qedi->cdev);
}
- if (mode == QEDI_MODE_NORMAL)
- qedi_free_iscsi_pf_param(qedi);
+ qedi_free_iscsi_pf_param(qedi);
rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
if (rval)
@@ -2367,15 +2388,12 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_destroy_fp(qedi);
- if (mode == QEDI_MODE_NORMAL) {
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
qedi_release_cid_que(qedi);
qedi_cm_free_mem(qedi);
qedi_free_uio(qedi->udev);
qedi_free_itt(qedi);
- iscsi_host_remove(qedi->shost);
- iscsi_host_free(qedi->shost);
-
if (qedi->ll2_recv_thread) {
kthread_stop(qedi->ll2_recv_thread);
qedi->ll2_recv_thread = NULL;
@@ -2384,9 +2402,22 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
if (qedi->boot_kset)
iscsi_boot_destroy_kset(qedi->boot_kset);
+
+ iscsi_host_remove(qedi->shost);
+ iscsi_host_free(qedi->shost);
}
}
+static void qedi_shutdown(struct pci_dev *pdev)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ QEDI_ERR(&qedi->dbg_ctx, "%s: Shutdown qedi\n", __func__);
+ if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
+ return;
+ __qedi_remove(pdev, QEDI_MODE_SHUTDOWN);
+}
+
static int __qedi_probe(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi;
@@ -2435,14 +2466,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi->dev_info.common.num_hwfns,
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
- if (mode != QEDI_MODE_RECOVERY) {
- rc = qedi_set_iscsi_pf_param(qedi);
- if (rc) {
- rc = -ENOMEM;
- QEDI_ERR(&qedi->dbg_ctx,
- "Set iSCSI pf param fail\n");
- goto free_host;
- }
+ rc = qedi_set_iscsi_pf_param(qedi);
+ if (rc) {
+ rc = -ENOMEM;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Set iSCSI pf param fail\n");
+ goto free_host;
}
qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
@@ -2633,6 +2662,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
goto free_cid_que;
}
+ INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler);
+
/* F/w needs 1st task context memory entry for performance */
set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
atomic_set(&qedi->num_offloads, 0);
@@ -2673,6 +2704,32 @@ exit_probe:
return rc;
}
+static void qedi_mark_conn_recovery(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn = session->leadconn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+
+ iscsi_conn_failure(qedi_conn->cls_conn->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+static void qedi_recovery_handler(struct work_struct *work)
+{
+ struct qedi_ctx *qedi =
+ container_of(work, struct qedi_ctx, recovery_work.work);
+
+ iscsi_host_for_each_session(qedi->shost, qedi_mark_conn_recovery);
+
+ /* Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qedi_ops->common->recovery_prolog(qedi->cdev);
+
+ __qedi_remove(qedi->pdev, QEDI_MODE_RECOVERY);
+ __qedi_probe(qedi->pdev, QEDI_MODE_RECOVERY);
+ clear_bit(QEDI_IN_RECOVERY, &qedi->flags);
+}
+
static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
return __qedi_probe(pdev, QEDI_MODE_NORMAL);
@@ -2697,6 +2754,7 @@ static struct pci_driver qedi_pci_driver = {
.id_table = qedi_pci_tbl,
.probe = qedi_probe,
.remove = qedi_remove,
+ .shutdown = qedi_shutdown,
};
static int __init qedi_init(void)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d7e7043f9eab..97cabd7e0014 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1324,6 +1324,79 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t led[3] = { 0 };
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ if (ql26xx_led_config(vha, 0, led))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
+ led[0], led[1], led[2]);
+}
+
+static ssize_t
+qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t options = BIT_0;
+ uint16_t led[3] = { 0 };
+ uint16_t word[4];
+ int n;
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
+ if (n == 4) {
+ if (word[0] == 3) {
+ options |= BIT_3|BIT_2|BIT_1;
+ led[0] = word[1];
+ led[1] = word[2];
+ led[2] = word[3];
+ goto write;
+ }
+ return -EINVAL;
+ }
+
+ if (n == 2) {
+ /* check led index */
+ if (word[0] == 0) {
+ options |= BIT_2;
+ led[0] = word[1];
+ goto write;
+ }
+ if (word[0] == 1) {
+ options |= BIT_3;
+ led[1] = word[1];
+ goto write;
+ }
+ if (word[0] == 2) {
+ options |= BIT_1;
+ led[2] = word[1];
+ goto write;
+ }
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+
+write:
+ if (ql26xx_led_config(vha, options, led))
+ return -EFAULT;
+
+ return count;
+}
+
+static ssize_t
qla2x00_optrom_bios_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2250,6 +2323,26 @@ qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
}
+static ssize_t
+qla2x00_dport_diagnostics_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+ !IS_QLA28XX(vha->hw))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ if (!*vha->dport_data)
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
+ vha->dport_data[0], vha->dport_data[1],
+ vha->dport_data[2], vha->dport_data[3]);
+}
+static DEVICE_ATTR(dport_diagnostics, 0444,
+ qla2x00_dport_diagnostics_show, NULL);
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -2264,6 +2357,8 @@ static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
qla2x00_zio_timer_store);
static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
qla2x00_beacon_store);
+static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
+ qla2x00_beacon_config_store);
static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
qla2x00_optrom_bios_version_show, NULL);
static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
@@ -2327,6 +2422,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_zio,
&dev_attr_zio_timer,
&dev_attr_beacon,
+ &dev_attr_beacon_config,
&dev_attr_optrom_bios_version,
&dev_attr_optrom_efi_version,
&dev_attr_optrom_fcode_version,
@@ -2355,6 +2451,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_port_speed,
&dev_attr_port_no,
&dev_attr_fw_attr,
+ &dev_attr_dport_diagnostics,
NULL, /* reserve for qlini_mode */
NULL, /* reserve for ql2xiniexchg */
NULL, /* reserve for ql2xexchoffld */
@@ -2648,22 +2745,28 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (rval != QLA_SUCCESS)
goto done_free;
- p->link_failure_count = stats->link_fail_cnt;
- p->loss_of_sync_count = stats->loss_sync_cnt;
- p->loss_of_signal_count = stats->loss_sig_cnt;
- p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
- p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
- p->invalid_crc_count = stats->inval_crc_cnt;
+ p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
+ p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
+ p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
+ p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
+ p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
+ p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
if (IS_FWI2_CAPABLE(ha)) {
- p->lip_count = stats->lip_cnt;
- p->tx_frames = stats->tx_frames;
- p->rx_frames = stats->rx_frames;
- p->dumped_frames = stats->discarded_frames;
- p->nos_count = stats->nos_rcvd;
+ p->lip_count = le32_to_cpu(stats->lip_cnt);
+ p->tx_frames = le32_to_cpu(stats->tx_frames);
+ p->rx_frames = le32_to_cpu(stats->rx_frames);
+ p->dumped_frames = le32_to_cpu(stats->discarded_frames);
+ p->nos_count = le32_to_cpu(stats->nos_rcvd);
p->error_frames =
- stats->dropped_frames + stats->discarded_frames;
- p->rx_words = vha->qla_stats.input_bytes;
- p->tx_words = vha->qla_stats.output_bytes;
+ le32_to_cpu(stats->dropped_frames) +
+ le32_to_cpu(stats->discarded_frames);
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
+ p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
+ } else {
+ p->rx_words = vha->qla_stats.input_bytes;
+ p->tx_words = vha->qla_stats.output_bytes;
+ }
}
p->fcp_control_requests = vha->qla_stats.control_requests;
p->fcp_input_requests = vha->qla_stats.input_requests;
@@ -2671,7 +2774,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
p->seconds_since_last_reset =
- get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
+ get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
do_div(p->seconds_since_last_reset, HZ);
done_free:
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index d7169e43f5e1..97b51c477972 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -11,6 +11,14 @@
#include <linux/delay.h>
#include <linux/bsg-lib.h>
+static void qla2xxx_free_fcport_work(struct work_struct *work)
+{
+ struct fc_port *fcport = container_of(work, typeof(*fcport),
+ free_work);
+
+ qla2x00_free_fcport(fcport);
+}
+
/* BSG support for ELS/CT pass through */
void qla2x00_bsg_job_done(srb_t *sp, int res)
{
@@ -53,8 +61,10 @@ void qla2x00_bsg_sp_free(srb_t *sp)
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_FXIOCB_BCMD ||
- sp->type == SRB_ELS_CMD_HST)
- qla2x00_free_fcport(sp->fcport);
+ sp->type == SRB_ELS_CMD_HST) {
+ INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
+ queue_work(ha->wq, &sp->fcport->free_work);
+ }
qla2x00_rel_sp(sp);
}
@@ -718,7 +728,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
uint16_t response[MAILBOX_REGISTER_COUNT];
uint16_t config[4], new_config[4];
uint8_t *fw_sts_ptr;
- uint8_t *req_data = NULL;
+ void *req_data = NULL;
dma_addr_t req_data_dma;
uint32_t req_data_len;
uint8_t *rsp_data = NULL;
@@ -796,10 +806,11 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
bsg_request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
- (ha->current_topology == ISP_CFG_F ||
- (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
- req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
- elreq.options == EXTERNAL_LOOPBACK) {
+ ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
+ get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
+ req_data_len == MAX_ELS_FRAME_PAYLOAD &&
+ elreq.options == EXTERNAL_LOOPBACK))) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
ql_dbg(ql_dbg_user, vha, 0x701e,
"BSG request type: %s.\n", type);
@@ -1506,10 +1517,15 @@ qla2x00_update_optrom(struct bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
- ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+ rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
- bsg_reply->result = DID_OK;
+ if (rval) {
+ bsg_reply->result = -EINVAL;
+ rval = -EINVAL;
+ } else {
+ bsg_reply->result = DID_OK;
+ }
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
ha->optrom_state = QLA_SWAITING;
@@ -2404,7 +2420,7 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
regions.global_image = active_regions.global;
if (IS_QLA28XX(ha)) {
- qla27xx_get_active_image(vha, &active_regions);
+ qla28xx_get_aux_images(vha, &active_regions);
regions.board_config = active_regions.aux.board_config;
regions.vpd_nvram = active_regions.aux.vpd_nvram;
regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 88a56e8480f7..f301a8048b2f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -73,6 +73,8 @@
#include "qla_def.h"
#include <linux/delay.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/qla.h>
static uint32_t ql_dbg_offset = 0x800;
@@ -2537,15 +2539,30 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
{
va_list va;
struct va_format vaf;
-
- if (!ql_mask_match(level))
- return;
+ char pbuf[64];
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
+ if (!ql_mask_match(level)) {
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id,
+ vha->host_no);
+ } else {
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, "0000:00:00.0", id);
+ }
+ pbuf[sizeof(pbuf) - 1] = 0;
+ trace_ql_dbg_log(pbuf, &vaf);
+ va_end(va);
+ return;
+ }
+
if (vha != NULL) {
const struct pci_dev *pdev = vha->hw->pdev;
/* <module-name> <pci-name> <msg-id>:<host> Message */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ed32e9715794..47c7a56438b5 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -119,7 +119,10 @@ typedef struct {
#define LSD(x) ((uint32_t)((uint64_t)(x)))
#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
-#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
+static inline uint32_t make_handle(uint16_t x, uint16_t y)
+{
+ return ((uint32_t)x << 16) | y;
+}
/*
* I/O register
@@ -414,7 +417,7 @@ struct els_logo_payload {
struct els_plogi_payload {
uint8_t opcode;
uint8_t rsvd[3];
- uint8_t data[112];
+ __be32 data[112 / 4];
};
struct ct_arg {
@@ -597,9 +600,6 @@ typedef struct srb {
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
- unsigned int abort:1;
- unsigned int aborted:1;
- unsigned int completed:1;
uint32_t handle;
uint16_t flags;
@@ -1049,6 +1049,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBA_TEMPERATURE_ALERT 0x8070 /* Temperature Alert */
#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */
#define MBA_TRANS_INSERT 0x8130 /* Transceiver Insertion */
+#define MBA_TRANS_REMOVE 0x8131 /* Transceiver Removal */
#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
@@ -1134,6 +1135,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */
#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/
#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */
+#define MBC_SET_GET_FC_LED_CONFIG 0x3b /* Set/Get FC LED config */
#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */
#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */
#define MBC_GET_RESOURCE_COUNTS 0x42 /* Get Resource Counts. */
@@ -1260,10 +1262,15 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_ELS_CMD 0x5
#define RNID_TYPE_PORT_LOGIN 0x7
+#define RNID_BUFFER_CREDITS 0x8
#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
+#define ELS_CMD_MAP_SIZE 32
+#define ELS_COMMAND_RDP 0x18
+
/*
* Firmware state codes from get firmware state mailbox command
*/
@@ -1474,47 +1481,44 @@ typedef struct {
#define GLSO_USE_DID BIT_3
struct link_statistics {
- uint32_t link_fail_cnt;
- uint32_t loss_sync_cnt;
- uint32_t loss_sig_cnt;
- uint32_t prim_seq_err_cnt;
- uint32_t inval_xmit_word_cnt;
- uint32_t inval_crc_cnt;
- uint32_t lip_cnt;
- uint32_t link_up_cnt;
- uint32_t link_down_loop_init_tmo;
- uint32_t link_down_los;
- uint32_t link_down_loss_rcv_clk;
+ __le32 link_fail_cnt;
+ __le32 loss_sync_cnt;
+ __le32 loss_sig_cnt;
+ __le32 prim_seq_err_cnt;
+ __le32 inval_xmit_word_cnt;
+ __le32 inval_crc_cnt;
+ __le32 lip_cnt;
+ __le32 link_up_cnt;
+ __le32 link_down_loop_init_tmo;
+ __le32 link_down_los;
+ __le32 link_down_loss_rcv_clk;
uint32_t reserved0[5];
- uint32_t port_cfg_chg;
+ __le32 port_cfg_chg;
uint32_t reserved1[11];
- uint32_t rsp_q_full;
- uint32_t atio_q_full;
- uint32_t drop_ae;
- uint32_t els_proto_err;
- uint32_t reserved2;
- uint32_t tx_frames;
- uint32_t rx_frames;
- uint32_t discarded_frames;
- uint32_t dropped_frames;
+ __le32 rsp_q_full;
+ __le32 atio_q_full;
+ __le32 drop_ae;
+ __le32 els_proto_err;
+ __le32 reserved2;
+ __le32 tx_frames;
+ __le32 rx_frames;
+ __le32 discarded_frames;
+ __le32 dropped_frames;
uint32_t reserved3;
- uint32_t nos_rcvd;
+ __le32 nos_rcvd;
uint32_t reserved4[4];
- uint32_t tx_prjt;
- uint32_t rcv_exfail;
- uint32_t rcv_abts;
- uint32_t seq_frm_miss;
- uint32_t corr_err;
- uint32_t mb_rqst;
- uint32_t nport_full;
- uint32_t eofa;
+ __le32 tx_prjt;
+ __le32 rcv_exfail;
+ __le32 rcv_abts;
+ __le32 seq_frm_miss;
+ __le32 corr_err;
+ __le32 mb_rqst;
+ __le32 nport_full;
+ __le32 eofa;
uint32_t reserved5;
- uint32_t fpm_recv_word_cnt_lo;
- uint32_t fpm_recv_word_cnt_hi;
- uint32_t fpm_disc_word_cnt_lo;
- uint32_t fpm_disc_word_cnt_hi;
- uint32_t fpm_xmit_word_cnt_lo;
- uint32_t fpm_xmit_word_cnt_hi;
+ __le64 fpm_recv_word_cnt;
+ __le64 fpm_disc_word_cnt;
+ __le64 fpm_xmit_word_cnt;
uint32_t reserved6[70];
};
@@ -2624,10 +2628,11 @@ static const char * const port_dstate_str[] = {
#define GFF_ID_RSP_SIZE (16 + 128)
/*
- * HBA attribute types.
+ * FDMI HBA attribute types.
*/
-#define FDMI_HBA_ATTR_COUNT 9
-#define FDMIV2_HBA_ATTR_COUNT 17
+#define FDMI1_HBA_ATTR_COUNT 9
+#define FDMI2_HBA_ATTR_COUNT 17
+
#define FDMI_HBA_NODE_NAME 0x1
#define FDMI_HBA_MANUFACTURER 0x2
#define FDMI_HBA_SERIAL_NUMBER 0x3
@@ -2639,12 +2644,13 @@ static const char * const port_dstate_str[] = {
#define FDMI_HBA_FIRMWARE_VERSION 0x9
#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
+
#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc
-#define FDMI_HBA_VENDOR_ID 0xd
+#define FDMI_HBA_VENDOR_SPECIFIC_INFO 0xd
#define FDMI_HBA_NUM_PORTS 0xe
#define FDMI_HBA_FABRIC_NAME 0xf
#define FDMI_HBA_BOOT_BIOS_NAME 0x10
-#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER 0xe0
+#define FDMI_HBA_VENDOR_IDENTIFIER 0xe0
struct ct_fdmi_hba_attr {
uint16_t type;
@@ -2661,31 +2667,9 @@ struct ct_fdmi_hba_attr {
uint8_t fw_version[32];
uint8_t os_version[128];
uint32_t max_ct_len;
- } a;
-};
-
-struct ct_fdmi_hba_attributes {
- uint32_t count;
- struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
-};
-struct ct_fdmiv2_hba_attr {
- uint16_t type;
- uint16_t len;
- union {
- uint8_t node_name[WWN_SIZE];
- uint8_t manufacturer[64];
- uint8_t serial_num[32];
- uint8_t model[16+1];
- uint8_t model_desc[80];
- uint8_t hw_version[16];
- uint8_t driver_version[32];
- uint8_t orom_version[16];
- uint8_t fw_version[32];
- uint8_t os_version[128];
- uint32_t max_ct_len;
uint8_t sym_name[256];
- uint32_t vendor_id;
+ uint32_t vendor_specific_info;
uint32_t num_ports;
uint8_t fabric_name[WWN_SIZE];
uint8_t bios_name[32];
@@ -2693,22 +2677,30 @@ struct ct_fdmiv2_hba_attr {
} a;
};
-struct ct_fdmiv2_hba_attributes {
+struct ct_fdmi1_hba_attributes {
uint32_t count;
- struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
+ struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT];
+};
+
+struct ct_fdmi2_hba_attributes {
+ uint32_t count;
+ struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT];
};
/*
- * Port attribute types.
+ * FDMI Port attribute types.
*/
-#define FDMI_PORT_ATTR_COUNT 6
-#define FDMIV2_PORT_ATTR_COUNT 16
+#define FDMI1_PORT_ATTR_COUNT 6
+#define FDMI2_PORT_ATTR_COUNT 16
+#define FDMI2_SMARTSAN_PORT_ATTR_COUNT 23
+
#define FDMI_PORT_FC4_TYPES 0x1
#define FDMI_PORT_SUPPORT_SPEED 0x2
#define FDMI_PORT_CURRENT_SPEED 0x3
#define FDMI_PORT_MAX_FRAME_SIZE 0x4
#define FDMI_PORT_OS_DEVICE_NAME 0x5
#define FDMI_PORT_HOST_NAME 0x6
+
#define FDMI_PORT_NODE_NAME 0x7
#define FDMI_PORT_NAME 0x8
#define FDMI_PORT_SYM_NAME 0x9
@@ -2718,7 +2710,15 @@ struct ct_fdmiv2_hba_attributes {
#define FDMI_PORT_FC4_TYPE 0xd
#define FDMI_PORT_STATE 0x101
#define FDMI_PORT_COUNT 0x102
-#define FDMI_PORT_ID 0x103
+#define FDMI_PORT_IDENTIFIER 0x103
+
+#define FDMI_SMARTSAN_SERVICE 0xF100
+#define FDMI_SMARTSAN_GUID 0xF101
+#define FDMI_SMARTSAN_VERSION 0xF102
+#define FDMI_SMARTSAN_PROD_NAME 0xF103
+#define FDMI_SMARTSAN_PORT_INFO 0xF104
+#define FDMI_SMARTSAN_QOS_SUPPORT 0xF105
+#define FDMI_SMARTSAN_SECURITY_SUPPORT 0xF106
#define FDMI_PORT_SPEED_1GB 0x1
#define FDMI_PORT_SPEED_2GB 0x2
@@ -2734,7 +2734,7 @@ struct ct_fdmiv2_hba_attributes {
#define FC_CLASS_3 0x08
#define FC_CLASS_2_3 0x0C
-struct ct_fdmiv2_port_attr {
+struct ct_fdmi_port_attr {
uint16_t type;
uint16_t len;
union {
@@ -2744,6 +2744,7 @@ struct ct_fdmiv2_port_attr {
uint32_t max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[256];
+
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
@@ -2754,35 +2755,38 @@ struct ct_fdmiv2_port_attr {
uint32_t port_state;
uint32_t num_ports;
uint32_t port_id;
+
+ uint8_t smartsan_service[24];
+ uint8_t smartsan_guid[16];
+ uint8_t smartsan_version[24];
+ uint8_t smartsan_prod_name[16];
+ uint32_t smartsan_port_info;
+ uint32_t smartsan_qos_support;
+ uint32_t smartsan_security_support;
} a;
};
-/*
- * Port Attribute Block.
- */
-struct ct_fdmiv2_port_attributes {
+struct ct_fdmi1_port_attributes {
uint32_t count;
- struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
-};
-
-struct ct_fdmi_port_attr {
- uint16_t type;
- uint16_t len;
- union {
- uint8_t fc4_types[32];
- uint32_t sup_speed;
- uint32_t cur_speed;
- uint32_t max_frame_size;
- uint8_t os_dev_name[32];
- uint8_t host_name[256];
- } a;
+ struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT];
};
-struct ct_fdmi_port_attributes {
+struct ct_fdmi2_port_attributes {
uint32_t count;
- struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
+ struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT];
};
+#define FDMI_ATTR_TYPELEN(obj) \
+ (sizeof((obj)->type) + sizeof((obj)->len))
+
+#define FDMI_ATTR_ALIGNMENT(len) \
+ (4 - ((len) & 3))
+
+/* FDMI register call options */
+#define CALLOPT_FDMI1 0
+#define CALLOPT_FDMI2 1
+#define CALLOPT_FDMI2_SMARTSAN 2
+
/* FDMI definitions. */
#define GRHL_CMD 0x100
#define GHAT_CMD 0x101
@@ -2793,10 +2797,13 @@ struct ct_fdmi_port_attributes {
#define RHBA_RSP_SIZE 16
#define RHAT_CMD 0x201
+
#define RPRT_CMD 0x210
+#define RPRT_RSP_SIZE 24
#define RPA_CMD 0x211
#define RPA_RSP_SIZE 16
+#define SMARTSAN_RPA_RSP_SIZE 24
#define DHBA_CMD 0x300
#define DHBA_REQ_SIZE (16 + 8)
@@ -2879,30 +2886,24 @@ struct ct_sns_req {
uint8_t hba_identifier[8];
uint32_t entry_count;
uint8_t port_name[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi2_hba_attributes attrs;
} rhba;
struct {
uint8_t hba_identifier[8];
- uint32_t entry_count;
- uint8_t port_name[8];
- struct ct_fdmiv2_hba_attributes attrs;
- } rhba2;
-
- struct {
- uint8_t hba_identifier[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi1_hba_attributes attrs;
} rhat;
struct {
uint8_t port_name[8];
- struct ct_fdmi_port_attributes attrs;
+ struct ct_fdmi2_port_attributes attrs;
} rpa;
struct {
+ uint8_t hba_identifier[8];
uint8_t port_name[8];
- struct ct_fdmiv2_port_attributes attrs;
- } rpa2;
+ struct ct_fdmi2_port_attributes attrs;
+ } rprt;
struct {
uint8_t port_name[8];
@@ -3016,7 +3017,7 @@ struct ct_sns_rsp {
struct {
uint32_t entry_count;
uint8_t port_name[8];
- struct ct_fdmi_hba_attributes attrs;
+ struct ct_fdmi1_hba_attributes attrs;
} ghat;
struct {
@@ -3250,6 +3251,7 @@ struct isp_operations {
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
@@ -3562,6 +3564,134 @@ struct qlfc_fw {
uint32_t len;
};
+struct rdp_req_payload {
+ uint32_t els_request;
+ uint32_t desc_list_len;
+
+ /* NPIV descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t reserved;
+ uint8_t nport_id[3];
+ } npiv_desc;
+};
+
+struct rdp_rsp_payload {
+ struct {
+ uint32_t cmd;
+ uint32_t len;
+ } hdr;
+
+ /* LS Request Info descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint32_t req_payload_word_0;
+ } ls_req_info_desc;
+
+ /* LS Request Info descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint32_t req_payload_word_0;
+ } ls_req_info_desc2;
+
+ /* SFP diagnostic param descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint16_t temperature;
+ uint16_t vcc;
+ uint16_t tx_bias;
+ uint16_t tx_power;
+ uint16_t rx_power;
+ uint16_t sfp_flags;
+ } sfp_diag_desc;
+
+ /* Port Speed Descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint16_t speed_capab;
+ uint16_t operating_speed;
+ } port_speed_desc;
+
+ /* Link Error Status Descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint32_t link_fail_cnt;
+ uint32_t loss_sync_cnt;
+ uint32_t loss_sig_cnt;
+ uint32_t prim_seq_err_cnt;
+ uint32_t inval_xmit_word_cnt;
+ uint32_t inval_crc_cnt;
+ uint8_t pn_port_phy_type;
+ uint8_t reserved[3];
+ } ls_err_desc;
+
+ /* Port name description with diag param */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t WWNN[WWN_SIZE];
+ uint8_t WWPN[WWN_SIZE];
+ } port_name_diag_desc;
+
+ /* Port Name desc for Direct attached Fx_Port or Nx_Port */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t WWNN[WWN_SIZE];
+ uint8_t WWPN[WWN_SIZE];
+ } port_name_direct_desc;
+
+ /* Buffer Credit descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint32_t fcport_b2b;
+ uint32_t attached_fcport_b2b;
+ uint32_t fcport_rtt;
+ } buffer_credit_desc;
+
+ /* Optical Element Data Descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint16_t high_alarm;
+ uint16_t low_alarm;
+ uint16_t high_warn;
+ uint16_t low_warn;
+ uint32_t element_flags;
+ } optical_elmt_desc[5];
+
+ /* Optical Product Data Descriptor */
+ struct {
+ uint32_t desc_tag;
+ uint32_t desc_len;
+ uint8_t vendor_name[16];
+ uint8_t part_number[16];
+ uint8_t serial_number[16];
+ uint8_t revision[4];
+ uint8_t date[8];
+ } optical_prod_desc;
+};
+
+#define RDP_DESC_LEN(obj) \
+ (sizeof(obj) - sizeof((obj).desc_tag) - sizeof((obj).desc_len))
+
+#define RDP_PORT_SPEED_1GB BIT_15
+#define RDP_PORT_SPEED_2GB BIT_14
+#define RDP_PORT_SPEED_4GB BIT_13
+#define RDP_PORT_SPEED_10GB BIT_12
+#define RDP_PORT_SPEED_8GB BIT_11
+#define RDP_PORT_SPEED_16GB BIT_10
+#define RDP_PORT_SPEED_32GB BIT_9
+#define RDP_PORT_SPEED_64GB BIT_8
+#define RDP_PORT_SPEED_UNKNOWN BIT_0
+
struct scsi_qlt_host {
void *target_lport_ptr;
struct mutex tgt_mutex;
@@ -3673,8 +3803,8 @@ struct qla_hw_data {
uint32_t fw_started:1;
uint32_t fw_init_done:1;
- uint32_t detected_lr_sfp:1;
- uint32_t using_lr_setting:1;
+ uint32_t lr_detected:1;
+
uint32_t rida_fmt2:1;
uint32_t purge_mbox:1;
uint32_t n2n_bigger:1;
@@ -3683,7 +3813,7 @@ struct qla_hw_data {
} flags;
uint16_t max_exchg;
- uint16_t long_range_distance; /* 32G & above */
+ uint16_t lr_distance; /* 32G & above */
#define LR_DISTANCE_5K 1
#define LR_DISTANCE_10K 0
@@ -3965,6 +4095,8 @@ struct qla_hw_data {
#define SFP_DEV_SIZE 512
#define SFP_BLOCK_SIZE 64
+#define SFP_RTDI_LEN SFP_BLOCK_SIZE
+
void *sfp_data;
dma_addr_t sfp_data_dma;
@@ -4344,6 +4476,15 @@ struct active_regions {
#define QLA_SET_DATA_RATE_NOLR 1
#define QLA_SET_DATA_RATE_LR 2 /* Set speed and initiate LR */
+struct purex_item {
+ struct list_head list;
+ struct scsi_qla_host *vha;
+ void (*process_item)(struct scsi_qla_host *vha, void *pkt);
+ struct {
+ uint8_t iocb[64];
+ } iocb;
+};
+
/*
* Qlogic scsi host structure
*/
@@ -4424,6 +4565,8 @@ typedef struct scsi_qla_host {
#define ISP_ABORT_TO_ROM 33
#define VPORT_DELETE 34
+#define PROCESS_PUREX_IOCB 63
+
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
@@ -4461,6 +4604,7 @@ typedef struct scsi_qla_host {
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t fabric_node_name[WWN_SIZE];
+ uint8_t fabric_port_name[WWN_SIZE];
struct nvme_fc_local_port *nvme_local_port;
struct completion nvme_del_done;
@@ -4531,6 +4675,11 @@ typedef struct scsi_qla_host {
uint16_t ql2xexchoffld;
uint16_t ql2xiniexchg;
+ struct purex_list {
+ struct list_head head;
+ spinlock_t lock;
+ } purex_list;
+
struct name_list_extended gnl;
/* Count of active session/fcport */
int fcport_count;
@@ -4540,6 +4689,7 @@ typedef struct scsi_qla_host {
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
+ __le16 dport_data[4];
struct list_head gpnid_list;
struct fab_scan scan;
@@ -4822,11 +4972,14 @@ struct sff_8247_a0 {
u8 resv2[128];
};
-#define AUTO_DETECT_SFP_SUPPORT(_vha)\
- (ql2xautodetectsfp && !_vha->vp_idx && \
- (IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
- IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw) || \
- IS_QLA28XX(_vha->hw)))
+/* BPM -- Buffer Plus Management support. */
+#define IS_BPM_CAPABLE(ha) \
+ (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BPM_RANGE_CAPABLE(ha) \
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BPM_ENABLED(vha) \
+ (ql2xautodetectsfp && !vha->vp_idx && IS_BPM_CAPABLE(vha->hw))
#define FLASH_SEMAPHORE_REGISTER_ADDR 0x00101016
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0a6fb359f4d5..e62b2115235e 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -134,11 +134,11 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
} else {
seq_puts(s, "FW Resource count\n\n");
seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
- seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
- seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
- seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
- seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
- seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
+ seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
+ seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
+ seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
+ seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
seq_printf(s, "MAX VP count[%d]\n", mb[11]);
seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
@@ -149,7 +149,6 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
mb[22]);
seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
mb[23]);
-
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d641918cdd46..f9bad5bd7198 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -31,6 +31,9 @@
#define PDO_FORCE_ADISC BIT_1
#define PDO_FORCE_PLOGI BIT_0
+struct buffer_credit_24xx {
+ u32 parameter[28];
+};
#define PORT_DATABASE_24XX_SIZE 64
struct port_database_24xx {
@@ -721,6 +724,48 @@ struct ct_entry_24xx {
};
/*
+ * ISP queue - PUREX IOCB entry structure definition
+ */
+#define PUREX_IOCB_TYPE 0x51 /* CT Pass Through IOCB entry */
+struct purex_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint16_t reserved1;
+ uint8_t vp_idx;
+ uint8_t reserved2;
+
+ uint16_t status_flags;
+ uint16_t nport_handle;
+
+ uint16_t frame_size;
+ uint16_t trunc_frame_size;
+
+ uint32_t rx_xchg_addr;
+
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+
+ uint8_t f_ctl[3];
+ uint8_t type;
+
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint32_t param;
+
+ uint8_t els_frame_payload[20];
+};
+
+/*
* ISP queue - ELS Pass-Through entry structure definition.
*/
#define ELS_IOCB_TYPE 0x53 /* ELS Pass-Through IOCB entry */
@@ -732,9 +777,8 @@ struct els_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t reserved_1;
-
- uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t comp_status; /* response only */
+ uint16_t nport_handle;
uint16_t tx_dsd_count;
@@ -749,7 +793,7 @@ struct els_entry_24xx {
uint8_t opcode;
uint8_t reserved_2;
- uint8_t port_id[3];
+ uint8_t d_id[3];
uint8_t s_id[3];
uint16_t control_flags; /* Control flags. */
@@ -761,13 +805,24 @@ struct els_entry_24xx {
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
- __le32 rx_byte_count;
- __le32 tx_byte_count;
+ union {
+ struct {
+ __le32 rx_byte_count;
+ __le32 tx_byte_count;
- __le64 tx_address __packed; /* Data segment 0 address. */
- __le32 tx_len; /* Data segment 0 length. */
- __le64 rx_address __packed; /* Data segment 1 address. */
- __le32 rx_len; /* Data segment 1 length. */
+ __le64 tx_address __packed; /* DSD 0 address. */
+ __le32 tx_len; /* DSD 0 length. */
+
+ __le64 rx_address __packed; /* DSD 1 address. */
+ __le32 rx_len; /* DSD 1 length. */
+ };
+ struct {
+ uint32_t total_byte_count;
+ uint32_t error_subcode_1;
+ uint32_t error_subcode_2;
+ uint32_t error_subcode_3;
+ };
+ };
};
struct els_sts_entry_24xx {
@@ -793,15 +848,16 @@ struct els_sts_entry_24xx {
uint8_t opcode;
uint8_t reserved_3;
- uint8_t port_id[3];
- uint8_t reserved_4;
-
- uint16_t reserved_5;
+ uint8_t d_id[3];
+ uint8_t s_id[3];
uint16_t control_flags; /* Control flags. */
uint32_t total_byte_count;
uint32_t error_subcode_1;
uint32_t error_subcode_2;
+ uint32_t error_subcode_3;
+
+ uint32_t reserved_4[4];
};
/*
* ISP queue - Mailbox Command entry structure definition.
@@ -942,6 +998,91 @@ struct abort_entry_24xx {
uint8_t reserved_2[12];
};
+#define ABTS_RCV_TYPE 0x54
+#define ABTS_RSP_TYPE 0x55
+struct abts_entry_24xx {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t handle_count;
+ uint8_t entry_status;
+
+ uint32_t handle; /* type 0x55 only */
+
+ uint16_t comp_status; /* type 0x55 only */
+ uint16_t nport_handle; /* type 0x54 only */
+
+ uint16_t control_flags; /* type 0x55 only */
+ uint8_t vp_idx;
+ uint8_t sof_type; /* sof_type is upper nibble */
+
+ uint32_t rx_xch_addr;
+
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+
+ uint8_t f_ctl[3];
+ uint8_t type;
+
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+
+ uint16_t rx_id;
+ uint16_t ox_id;
+
+ uint32_t param;
+
+ union {
+ struct {
+ uint32_t subcode3;
+ uint32_t rsvd;
+ uint32_t subcode1;
+ uint32_t subcode2;
+ } error;
+ struct {
+ uint16_t rsrvd1;
+ uint8_t last_seq_id;
+ uint8_t seq_id_valid;
+ uint16_t aborted_rx_id;
+ uint16_t aborted_ox_id;
+ uint16_t high_seq_cnt;
+ uint16_t low_seq_cnt;
+ } ba_acc;
+ struct {
+ uint8_t vendor_unique;
+ uint8_t explanation;
+ uint8_t reason;
+ } ba_rjt;
+ } payload;
+
+ uint32_t rx_xch_addr_to_abort;
+} __packed;
+
+/* ABTS payload explanation values */
+#define BA_RJT_EXP_NO_ADDITIONAL 0
+#define BA_RJT_EXP_INV_OX_RX_ID 3
+#define BA_RJT_EXP_SEQ_ABORTED 5
+
+/* ABTS payload reason values */
+#define BA_RJT_RSN_INV_CMD_CODE 1
+#define BA_RJT_RSN_LOGICAL_ERROR 3
+#define BA_RJT_RSN_LOGICAL_BUSY 5
+#define BA_RJT_RSN_PROTOCOL_ERROR 7
+#define BA_RJT_RSN_UNABLE_TO_PERFORM 9
+#define BA_RJT_RSN_VENDOR_SPECIFIC 0xff
+
+/* FC_F values */
+#define FC_TYPE_BLD 0x000 /* Basic link data */
+#define FC_F_CTL_RSP_CNTXT 0x800000 /* Responder of exchange */
+#define FC_F_CTL_LAST_SEQ 0x100000 /* Last sequence */
+#define FC_F_CTL_END_SEQ 0x80000 /* Last sequence */
+#define FC_F_CTL_SEQ_INIT 0x010000 /* Sequence initiative */
+#define FC_ROUTING_BLD 0x80 /* Basic link data frame */
+#define FC_R_CTL_BLD_BA_ACC 0x04 /* BA_ACC (basic accept) */
+
/*
* ISP I/O Register Set structure definitions.
*/
@@ -1726,9 +1867,8 @@ struct access_chip_rsp_84xx {
/* LR Distance bit positions */
#define LR_DIST_NV_POS 2
+#define LR_DIST_NV_MASK 0xf
#define LR_DIST_FW_POS 12
-#define LR_DIST_FW_SHIFT (LR_DIST_FW_POS - LR_DIST_NV_POS)
-#define LR_DIST_FW_FIELD(x) ((x) << LR_DIST_FW_SHIFT & 0xf000)
/* FAC semaphore defines */
#define FAC_SEMAPHORE_UNLOCK 0
@@ -1883,6 +2023,7 @@ struct nvram_81xx {
* BIT 6-15 = Unused
*/
uint16_t enhanced_features;
+
uint16_t reserved_24[4];
/* Offset 416. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2a64729a2bc5..1b93f5b4d77d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -31,7 +31,7 @@ extern int qla24xx_nvram_config(struct scsi_qla_host *);
extern int qla81xx_nvram_config(struct scsi_qla_host *);
extern void qla2x00_update_fw_options(struct scsi_qla_host *);
extern void qla24xx_update_fw_options(scsi_qla_host_t *);
-extern void qla81xx_update_fw_options(scsi_qla_host_t *);
+
extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
@@ -109,7 +109,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
void *, u8);
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_detect_sfp(scsi_qla_host_t *vha);
+int qla24xx_detect_sfp(scsi_qla_host_t *);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
extern void qla28xx_get_aux_images(struct scsi_qla_host *,
@@ -142,6 +142,8 @@ extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
+extern int ql2xrdpenable;
+extern int ql2xsmartsan;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
@@ -226,6 +228,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
+void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt);
/*
* Global Functions in qla_mid.c source file.
@@ -354,6 +357,9 @@ extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int
+qla24xx_get_port_database(scsi_qla_host_t *, u16, struct port_database_24xx *);
+
+extern int
qla2x00_get_firmware_state(scsi_qla_host_t *, uint16_t *);
extern int
@@ -452,6 +458,13 @@ extern int
qla25xx_set_driver_version(scsi_qla_host_t *, char *);
extern int
+qla25xx_set_els_cmds_supported(scsi_qla_host_t *);
+
+extern int
+qla24xx_get_buffer_credits(scsi_qla_host_t *, struct buffer_credit_24xx *,
+ dma_addr_t);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -552,6 +565,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
extern irqreturn_t
qla2xxx_msix_rsp_q(int irq, void *dev_id);
+extern irqreturn_t
+qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
@@ -656,7 +671,7 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
-extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
+extern size_t qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
@@ -844,6 +859,7 @@ extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int);
+extern int ql26xx_led_config(scsi_qla_host_t *, uint16_t, uint16_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct bsg_job *);
@@ -913,6 +929,7 @@ void qlt_remove_target_resources(struct qla_hw_data *);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
+extern void qla24xx_process_purex_list(struct purex_list *);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index aaa4a5bbf2ff..42c3ad27f1cb 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -19,6 +19,8 @@ static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
static int qla_async_rsnn_nn(scsi_qla_host_t *);
+
+
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
* @vha: HA context
@@ -844,19 +846,18 @@ done:
return rval;
}
-void
+size_t
qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
{
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(ha))
- snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
- ha->mr.fw_version, qla2x00_version_str);
- else
- snprintf(snn, size,
- "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
- ha->fw_major_version, ha->fw_minor_version,
- ha->fw_subminor_version, qla2x00_version_str);
+ return scnprintf(snn, size, "%s FW:v%s DVR:v%s",
+ ha->model_number, ha->mr.fw_version, qla2x00_version_str);
+
+ return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s",
+ ha->model_number, ha->fw_major_version, ha->fw_minor_version,
+ ha->fw_subminor_version, qla2x00_version_str);
}
/**
@@ -1501,747 +1502,732 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
return &p->p.req;
}
+static uint
+qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
+{
+ if (IS_CNA_CAPABLE(ha))
+ return FDMI_PORT_SPEED_10GB;
+ if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
+ uint speeds = 0;
+
+ if (ha->max_supported_speed == 2) {
+ if (ha->min_supported_speed <= 6)
+ speeds |= FDMI_PORT_SPEED_64GB;
+ }
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1) {
+ if (ha->min_supported_speed <= 5)
+ speeds |= FDMI_PORT_SPEED_32GB;
+ }
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 4)
+ speeds |= FDMI_PORT_SPEED_16GB;
+ }
+ if (ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 3)
+ speeds |= FDMI_PORT_SPEED_8GB;
+ }
+ if (ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 2)
+ speeds |= FDMI_PORT_SPEED_4GB;
+ }
+ return speeds;
+ }
+ if (IS_QLA2031(ha))
+ return FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
+ FDMI_PORT_SPEED_4GB;
+ if (IS_QLA25XX(ha))
+ return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
+ FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
+ if (IS_QLA24XX_TYPE(ha))
+ return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_1GB;
+ if (IS_QLA23XX(ha))
+ return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
+ return FDMI_PORT_SPEED_1GB;
+}
+static uint
+qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
+{
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ return FDMI_PORT_SPEED_1GB;
+ case PORT_SPEED_2GB:
+ return FDMI_PORT_SPEED_2GB;
+ case PORT_SPEED_4GB:
+ return FDMI_PORT_SPEED_4GB;
+ case PORT_SPEED_8GB:
+ return FDMI_PORT_SPEED_8GB;
+ case PORT_SPEED_10GB:
+ return FDMI_PORT_SPEED_10GB;
+ case PORT_SPEED_16GB:
+ return FDMI_PORT_SPEED_16GB;
+ case PORT_SPEED_32GB:
+ return FDMI_PORT_SPEED_32GB;
+ case PORT_SPEED_64GB:
+ return FDMI_PORT_SPEED_64GB;
+ default:
+ return FDMI_PORT_SPEED_UNKNOWN;
+ }
+}
+
/**
- * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
+ * qla2x00_hba_attributes() perform HBA attributes registration
* @vha: HA context
+ * @entries: number of entries to use
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
-static int
-qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
+static unsigned long
+qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
+ unsigned int callopt)
{
- int rval, alen;
- uint32_t size, sn;
-
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
- struct ct_fdmi_hba_attr *eiter;
struct qla_hw_data *ha = vha->hw;
-
- /* Issue RHBA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
- ct_req->req.rhba.entry_count = cpu_to_be32(1);
- memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
- size = 2 * WWN_SIZE + 4 + 4;
-
- /* Attributes */
- ct_req->req.rhba.attrs.count =
- cpu_to_be32(FDMI_HBA_ATTR_COUNT);
- entries = &ct_req->req;
+ struct init_cb_24xx *icb24 = (void *)ha->init_cb;
+ struct new_utsname *p_sysid = utsname();
+ struct ct_fdmi_hba_attr *eiter;
+ uint16_t alen;
+ unsigned long size = 0;
/* Nodename. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x2025,
- "NodeName = %8phN.\n", eiter->a.node_name);
-
+ memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
+ alen = sizeof(eiter->a.node_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a0,
+ "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
/* Manufacturer. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
- alen = strlen(QLA2XXX_MANUFACTURER);
- snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2026,
- "Manufacturer = %s.\n", eiter->a.manufacturer);
-
+ alen = scnprintf(
+ eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+ "%s", "QLogic Corporation");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a1,
+ "MANUFACTURER = %s.\n", eiter->a.manufacturer);
/* Serial number. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
- if (IS_FWI2_CAPABLE(ha))
- qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
- sizeof(eiter->a.serial_num));
- else {
- sn = ((ha->serial0 & 0x1f) << 16) |
+ alen = 0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ alen = qla2xxx_get_vpd_field(vha, "SN",
+ eiter->a.serial_num, sizeof(eiter->a.serial_num));
+ }
+ if (!alen) {
+ uint32_t sn = ((ha->serial0 & 0x1f) << 16) |
(ha->serial2 << 8) | ha->serial1;
- snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
- "%c%05d", 'A' + sn / 100000, sn % 100000);
+ alen = scnprintf(
+ eiter->a.serial_num, sizeof(eiter->a.serial_num),
+ "%c%05d", 'A' + sn / 100000, sn % 100000);
}
- alen = strlen(eiter->a.serial_num);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2027,
- "Serial no. = %s.\n", eiter->a.serial_num);
-
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a2,
+ "SERIAL NUMBER = %s.\n", eiter->a.serial_num);
/* Model name. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
- snprintf(eiter->a.model, sizeof(eiter->a.model),
- "%s", ha->model_number);
- alen = strlen(eiter->a.model);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2028,
- "Model Name = %s.\n", eiter->a.model);
-
+ alen = scnprintf(
+ eiter->a.model, sizeof(eiter->a.model),
+ "%s", ha->model_number);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a3,
+ "MODEL NAME = %s.\n", eiter->a.model);
/* Model description. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
- snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
- "%s", ha->model_desc);
- alen = strlen(eiter->a.model_desc);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x2029,
- "Model Desc = %s.\n", eiter->a.model_desc);
-
+ alen = scnprintf(
+ eiter->a.model_desc, sizeof(eiter->a.model_desc),
+ "%s", ha->model_desc);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a4,
+ "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc);
/* Hardware version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
- if (!IS_FWI2_CAPABLE(ha)) {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
+ alen = 0;
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (!alen) {
+ alen = qla2xxx_get_vpd_field(vha, "MN",
+ eiter->a.hw_version, sizeof(eiter->a.hw_version));
+ }
+ if (!alen) {
+ alen = qla2xxx_get_vpd_field(vha, "EC",
+ eiter->a.hw_version, sizeof(eiter->a.hw_version));
+ }
}
- alen = strlen(eiter->a.hw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202a,
- "Hardware ver = %s.\n", eiter->a.hw_version);
-
+ if (!alen) {
+ alen = scnprintf(
+ eiter->a.hw_version, sizeof(eiter->a.hw_version),
+ "HW:%s", ha->adapter_id);
+ }
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a5,
+ "HARDWARE VERSION = %s.\n", eiter->a.hw_version);
/* Driver version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
- snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
- "%s", qla2x00_version_str);
- alen = strlen(eiter->a.driver_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202b,
- "Driver ver = %s.\n", eiter->a.driver_version);
-
+ alen = scnprintf(
+ eiter->a.driver_version, sizeof(eiter->a.driver_version),
+ "%s", qla2x00_version_str);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a6,
+ "DRIVER VERSION = %s.\n", eiter->a.driver_version);
/* Option ROM version. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
- snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
- "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.orom_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha , 0x202c,
- "Optrom vers = %s.\n", eiter->a.orom_version);
+ alen = scnprintf(
+ eiter->a.orom_version, sizeof(eiter->a.orom_version),
+ "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a7,
+ "OPTROM VERSION = %d.%02d.\n",
+ eiter->a.orom_version[1], eiter->a.orom_version[0]);
/* Firmware version */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
sizeof(eiter->a.fw_version));
- alen = strlen(eiter->a.fw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x202d,
- "Firmware vers = %s.\n", eiter->a.fw_version);
-
- /* Update MS request size. */
- qla2x00_update_ms_fdmi_iocb(vha, size + 16);
-
- ql_dbg(ql_dbg_disc, vha, 0x202e,
- "RHBA identifier = %8phN size=%d.\n",
- ct_req->req.rhba.hba_identifier, size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
- entries, size);
-
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2030,
- "RHBA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
- ct_rsp->header.explanation_code ==
- CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x2034,
- "HBA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20ad,
- "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
- }
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2035,
- "RHBA exiting normally.\n");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a8,
+ "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
+ if (callopt == CALLOPT_FDMI1)
+ goto done;
+ /* OS Name and Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
+ alen = 0;
+ if (p_sysid) {
+ alen = scnprintf(
+ eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s %s",
+ p_sysid->sysname, p_sysid->release, p_sysid->machine);
}
-
- return rval;
+ if (!alen) {
+ alen = scnprintf(
+ eiter->a.os_version, sizeof(eiter->a.os_version),
+ "%s %s",
+ "Linux", fc_host_system_hostname(vha->host));
+ }
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20a9,
+ "OS VERSION = %s.\n", eiter->a.os_version);
+ /* MAX CT Payload Length */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
+ eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
+ icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ alen = sizeof(eiter->a.max_ct_len);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20aa,
+ "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
+ /* Node Sybolic Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
+ alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
+ sizeof(eiter->a.sym_name));
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ab,
+ "SYMBOLIC NAME = %s.\n", eiter->a.sym_name);
+ /* Vendor Specific information */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO);
+ eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC);
+ alen = sizeof(eiter->a.vendor_specific_info);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ac,
+ "VENDOR SPECIFIC INFO = 0x%x.\n",
+ be32_to_cpu(eiter->a.vendor_specific_info));
+ /* Num Ports */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
+ eiter->a.num_ports = cpu_to_be32(1);
+ alen = sizeof(eiter->a.num_ports);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ad,
+ "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
+ /* Fabric Name */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name,
+ sizeof(eiter->a.fabric_name));
+ alen = sizeof(eiter->a.fabric_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ae,
+ "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+ /* BIOS Version */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
+ alen = scnprintf(
+ eiter->a.bios_name, sizeof(eiter->a.bios_name),
+ "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20af,
+ "BIOS NAME = %s\n", eiter->a.bios_name);
+ /* Vendor Identifier */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER);
+ alen = scnprintf(
+ eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
+ "%s", "QLGC");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20b0,
+ "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier);
+done:
+ return size;
}
/**
- * qla2x00_fdmi_rpa() - perform RPA registration
+ * qla2x00_port_attributes() perform Port attributes registration
* @vha: HA context
+ * @entries: number of entries to use
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
-static int
-qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+static unsigned long
+qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
+ unsigned int callopt)
{
- int rval, alen;
- uint32_t size;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
+ struct init_cb_24xx *icb24 = (void *)ha->init_cb;
+ struct new_utsname *p_sysid = utsname();
+ char *hostname = p_sysid ?
+ p_sysid->nodename : fc_host_system_hostname(vha->host);
struct ct_fdmi_port_attr *eiter;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RPA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
- RPA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
- size = WWN_SIZE + 4;
-
- /* Attributes */
- ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
- entries = &ct_req->req;
+ uint16_t alen;
+ unsigned long size = 0;
/* FC4 types. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
- eiter->len = cpu_to_be16(4 + 32);
+ eiter->a.fc4_types[0] = 0x00;
+ eiter->a.fc4_types[1] = 0x00;
eiter->a.fc4_types[2] = 0x01;
- size += 4 + 32;
-
- ql_dbg(ql_dbg_disc, vha, 0x2039,
- "FC4_TYPES=%02x %02x.\n",
- eiter->a.fc4_types[2],
- eiter->a.fc4_types[1]);
-
+ eiter->a.fc4_types[3] = 0x00;
+ alen = sizeof(eiter->a.fc4_types);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c0,
+ "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types);
+ if (vha->flags.nvme_enabled) {
+ eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
+ ql_dbg(ql_dbg_disc, vha, 0x211f,
+ "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+ eiter->a.fc4_types[6]);
+ }
/* Supported speed. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- if (IS_CNA_CAPABLE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_10GB);
- else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_32GB|
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB);
- else if (IS_QLA2031(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB);
- else if (IS_QLA25XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA24XX_TYPE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA23XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_1GB);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203a,
- "Supported_Speed=%x.\n", eiter->a.sup_speed);
-
+ eiter->a.sup_speed = cpu_to_be32(
+ qla25xx_fdmi_port_speed_capability(ha));
+ alen = sizeof(eiter->a.sup_speed);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c1,
+ "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed));
/* Current speed. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- switch (ha->link_data_rate) {
- case PORT_SPEED_1GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_1GB);
- break;
- case PORT_SPEED_2GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_2GB);
- break;
- case PORT_SPEED_4GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_4GB);
- break;
- case PORT_SPEED_8GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_8GB);
- break;
- case PORT_SPEED_10GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_10GB);
- break;
- case PORT_SPEED_16GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_16GB);
- break;
- case PORT_SPEED_32GB:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_32GB);
- break;
- default:
- eiter->a.cur_speed =
- cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
- break;
- }
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203b,
- "Current_Speed=%x.\n", eiter->a.cur_speed);
-
+ eiter->a.cur_speed = cpu_to_be32(
+ qla25xx_fdmi_port_speed_currently(ha));
+ alen = sizeof(eiter->a.cur_speed);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c2,
+ "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed));
/* Max frame size. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->len = cpu_to_be16(4 + 4);
- eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
- eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x203c,
- "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
-
+ eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
+ icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ alen = sizeof(eiter->a.max_frame_size);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c3,
+ "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size));
/* OS device name. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
- "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
- alen = strlen(eiter->a.os_dev_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x204b,
- "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
-
+ alen = scnprintf(
+ eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+ "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c4,
+ "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name);
/* Hostname. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", p_sysid->nodename);
- } else {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", fc_host_system_hostname(vha->host));
- }
- alen = strlen(eiter->a.host_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
-
- /* Update MS request size. */
- qla2x00_update_ms_fdmi_iocb(vha, size + 16);
-
- ql_dbg(ql_dbg_disc, vha, 0x203e,
- "RPA portname %016llx, size = %d.\n",
- wwn_to_u64(ct_req->req.rpa.port_name), size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
- entries, size);
-
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2040,
- "RPA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
- ct_rsp->header.explanation_code ==
- CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20cd,
- "RPA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- }
-
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2041,
- "RPA exiting normally.\n");
- }
-
- return rval;
-}
-
-/**
- * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
- * @vha: HA context
- *
- * Returns 0 on success.
- */
-static int
-qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
-{
- int rval, alen;
- uint32_t size, sn;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- void *entries;
- struct ct_fdmiv2_hba_attr *eiter;
- struct qla_hw_data *ha = vha->hw;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RHBA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
-
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
- RHBA_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
- ct_req->req.rhba2.entry_count = cpu_to_be32(1);
- memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
- size = 2 * WWN_SIZE + 4 + 4;
+ if (!*hostname || !strncmp(hostname, "(none)", 6))
+ hostname = "Linux-default";
+ alen = scnprintf(
+ eiter->a.host_name, sizeof(eiter->a.host_name),
+ "%s", hostname);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c5,
+ "HOSTNAME = %s.\n", eiter->a.host_name);
- /* Attributes */
- ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
- entries = &ct_req->req;
+ if (callopt == CALLOPT_FDMI1)
+ goto done;
- /* Nodename. */
+ /* Node Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x207d,
- "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+ eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
+ memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
+ alen = sizeof(eiter->a.node_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c6,
+ "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
- /* Manufacturer. */
+ /* Port Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
- snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
- eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
- alen = strlen(eiter->a.manufacturer);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a5,
- "Manufacturer = %s.\n", eiter->a.manufacturer);
+ eiter->type = cpu_to_be16(FDMI_PORT_NAME);
+ memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name));
+ alen = sizeof(eiter->a.port_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c7,
+ "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name));
- /* Serial number. */
+ /* Port Symbolic Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
- if (IS_FWI2_CAPABLE(ha))
- qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
- sizeof(eiter->a.serial_num));
- else {
- sn = ((ha->serial0 & 0x1f) << 16) |
- (ha->serial2 << 8) | ha->serial1;
- snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
- "%c%05d", 'A' + sn / 100000, sn % 100000);
- }
- alen = strlen(eiter->a.serial_num);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a6,
- "Serial no. = %s.\n", eiter->a.serial_num);
+ eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
+ alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
+ sizeof(eiter->a.port_sym_name));
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name);
- /* Model name. */
+ /* Port Type */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
- snprintf(eiter->a.model, sizeof(eiter->a.model),
- "%s", ha->model_number);
- alen = strlen(eiter->a.model);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a7,
- "Model Name = %s.\n", eiter->a.model);
-
- /* Model description. */
+ eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
+ eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
+ alen = sizeof(eiter->a.port_type);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20c9,
+ "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type));
+
+ /* Supported Class of Service */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
- snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
- "%s", ha->model_desc);
- alen = strlen(eiter->a.model_desc);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a8,
- "Model Desc = %s.\n", eiter->a.model_desc);
+ eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
+ eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
+ alen = sizeof(eiter->a.port_supported_cos);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ca,
+ "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos));
- /* Hardware version. */
+ /* Port Fabric Name */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
- if (!IS_FWI2_CAPABLE(ha)) {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
- sizeof(eiter->a.hw_version))) {
- ;
- } else {
- snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
- "HW:%s", ha->adapter_id);
- }
- alen = strlen(eiter->a.hw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a9,
- "Hardware ver = %s.\n", eiter->a.hw_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
+ memcpy(eiter->a.fabric_name, vha->fabric_node_name,
+ sizeof(eiter->a.fabric_name));
+ alen = sizeof(eiter->a.fabric_name);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cb,
+ "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
- /* Driver version. */
+ /* FC4_type */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
- snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
- "%s", qla2x00_version_str);
- alen = strlen(eiter->a.driver_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20aa,
- "Driver ver = %s.\n", eiter->a.driver_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
+ eiter->a.port_fc4_type[0] = 0x00;
+ eiter->a.port_fc4_type[1] = 0x00;
+ eiter->a.port_fc4_type[2] = 0x01;
+ eiter->a.port_fc4_type[3] = 0x00;
+ alen = sizeof(eiter->a.port_fc4_type);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cc,
+ "PORT ACTIVE FC4 TYPE = %016llx.\n",
+ *(uint64_t *)eiter->a.port_fc4_type);
- /* Option ROM version. */
+ /* Port State */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
- snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
- "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.orom_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha , 0x20ab,
- "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
- eiter->a.orom_version[0]);
+ eiter->type = cpu_to_be16(FDMI_PORT_STATE);
+ eiter->a.port_state = cpu_to_be32(2);
+ alen = sizeof(eiter->a.port_state);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cd,
+ "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state));
- /* Firmware version */
+ /* Number of Ports */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
- ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
- sizeof(eiter->a.fw_version));
- alen = strlen(eiter->a.fw_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ac,
- "Firmware vers = %s.\n", eiter->a.fw_version);
-
- /* OS Name and Version */
+ eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
+ eiter->a.num_ports = cpu_to_be32(1);
+ alen = sizeof(eiter->a.num_ports);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20ce,
+ "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
+
+ /* Port Identifier */
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
- "%s %s %s",
- p_sysid->sysname, p_sysid->release, p_sysid->version);
- } else {
- snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
- "%s %s", "Linux", fc_host_system_hostname(vha->host));
- }
- alen = strlen(eiter->a.os_version);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ae,
- "OS Name and Version = %s.\n", eiter->a.os_version);
+ eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER);
+ eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
+ alen = sizeof(eiter->a.port_id);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20cf,
+ "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id));
+
+ if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan)
+ goto done;
- /* MAX CT Payload Length */
+ /* Smart SAN Service Category (Populate Smart SAN Initiator)*/
eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
- eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE);
+ alen = scnprintf(
+ eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service),
+ "%s", "Smart SAN Initiator");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d0,
+ "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service);
+
+ /* Smart SAN GUID (NWWN+PWWN) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID);
+ memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE);
+ memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE);
+ alen = sizeof(eiter->a.smartsan_guid);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d1,
+ "Smart SAN GUID = %016llx-%016llx\n",
+ wwn_to_u64(eiter->a.smartsan_guid),
+ wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE));
+
+ /* Smart SAN Version (populate "Smart SAN Version 1.0") */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION);
+ alen = scnprintf(
+ eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version),
+ "%s", "Smart SAN Version 2.0");
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version);
+
+ /* Smart SAN Product Name (Specify Adapter Model No) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME);
+ alen = scnprintf(eiter->a.smartsan_prod_name,
+ sizeof(eiter->a.smartsan_prod_name),
+ "ISP%04x", ha->pdev->device);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name);
+
+ /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO);
+ eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1);
+ alen = sizeof(eiter->a.smartsan_port_info);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d4,
+ "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info);
+
+ /* Smart SAN Security Support */
+ eiter = entries + size;
+ eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT);
+ eiter->a.smartsan_security_support = cpu_to_be32(1);
+ alen = sizeof(eiter->a.smartsan_security_support);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+ size += alen;
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "SMARTSAN SECURITY SUPPORT = %d\n",
+ be32_to_cpu(eiter->a.smartsan_security_support));
- ql_dbg(ql_dbg_disc, vha, 0x20af,
- "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
+done:
+ return size;
+}
- /* Node Sybolic Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
- qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
- sizeof(eiter->a.sym_name));
- alen = strlen(eiter->a.sym_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+/**
+ * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
+ * @vha: HA context
+ * @callopt: Option to issue FDMI registration
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long size = 0;
+ unsigned int rval, count;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
- ql_dbg(ql_dbg_disc, vha, 0x20b0,
- "Symbolic Name = %s.\n", eiter->a.sym_name);
+ count = callopt != CALLOPT_FDMI1 ?
+ FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT;
- /* Vendor Id */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
- eiter->a.vendor_id = cpu_to_be32(0x1077);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ size = RHBA_RSP_SIZE;
- ql_dbg(ql_dbg_disc, vha, 0x20b1,
- "Vendor Id = %x.\n", eiter->a.vendor_id);
+ ql_dbg(ql_dbg_disc, vha, 0x20e0,
+ "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
- /* Num Ports */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
- eiter->a.num_ports = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
- ql_dbg(ql_dbg_disc, vha, 0x20b2,
- "Port Num = %x.\n", eiter->a.num_ports);
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size);
+ ct_rsp = &ha->ct_sns->p.rsp;
- /* Fabric Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
- memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
+ /* Prepare FDMI command entries */
+ memcpy(ct_req->req.rhba.hba_identifier, vha->port_name,
+ sizeof(ct_req->req.rhba.hba_identifier));
+ size += sizeof(ct_req->req.rhba.hba_identifier);
- ql_dbg(ql_dbg_disc, vha, 0x20b3,
- "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+ ct_req->req.rhba.entry_count = cpu_to_be32(1);
+ size += sizeof(ct_req->req.rhba.entry_count);
- /* BIOS Version */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
- snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
- "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
- alen = strlen(eiter->a.bios_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+ memcpy(ct_req->req.rhba.port_name, vha->port_name,
+ sizeof(ct_req->req.rhba.port_name));
+ size += sizeof(ct_req->req.rhba.port_name);
- ql_dbg(ql_dbg_disc, vha, 0x20b4,
- "BIOS Name = %s\n", eiter->a.bios_name);
+ /* Attribute count */
+ ct_req->req.rhba.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rhba.attrs.count);
- /* Vendor Identifier */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
- snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
- "%s", "QLGC");
- alen = strlen(eiter->a.vendor_identifier);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
+ /* Attribute block */
+ entries = &ct_req->req.rhba.attrs.entry;
- ql_dbg(ql_dbg_disc, vha, 0x201b,
- "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
+ size += qla2x00_hba_attributes(vha, entries, callopt);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- ql_dbg(ql_dbg_disc, vha, 0x20b5,
- "RHBA identifier = %016llx.\n",
- wwn_to_u64(ct_req->req.rhba2.hba_identifier));
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
+ ql_dbg(ql_dbg_disc, vha, 0x20e1,
+ "RHBA %016llx %016llx.\n",
+ wwn_to_u64(ct_req->req.rhba.hba_identifier),
+ wwn_to_u64(ct_req->req.rhba.port_name));
+
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2,
entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x20b7,
- "RHBA issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "RHBA iocb failed (%d).\n", rval);
+ return rval;
+ }
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA");
+ if (rval) {
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20b8,
- "HBA already registered.\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2016,
- "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
+ ql_dbg(ql_dbg_disc, vha, 0x20e4,
+ "RHBA already registered.\n");
+ return QLA_ALREADY_REGISTERED;
}
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20b9,
- "RHBA FDMI V2 exiting normally.\n");
+
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "RHBA failed, CT Reason %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
+ ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n");
return rval;
}
-/**
- * qla2x00_fdmi_dhba() -
- * @vha: HA context
- *
- * Returns 0 on success.
- */
+
static int
qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
{
@@ -2250,22 +2236,17 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
-
/* Issue RPA */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
DHBA_RSP_SIZE);
-
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
-
/* Prepare FDMI command arguments -- portname. */
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
-
ql_dbg(ql_dbg_disc, vha, 0x2036,
"DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
-
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
@@ -2280,337 +2261,178 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_disc, vha, 0x2038,
"DHBA exiting normally.\n");
}
-
return rval;
}
/**
- * qla2x00_fdmiv2_rpa() -
+ * qla2x00_fdmi_rprt() perform RPRT registration
* @vha: HA context
+ * @callopt: Option to issue extended or standard FDMI
+ * command parameter
*
* Returns 0 on success.
*/
static int
-qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
+qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt)
{
- int rval, alen;
- uint32_t size;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
struct qla_hw_data *ha = vha->hw;
+ ulong size = 0;
+ uint rval, count;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
void *entries;
- struct ct_fdmiv2_port_attr *eiter;
- struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
- struct new_utsname *p_sysid = NULL;
-
- /* Issue RPA */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
-
+ count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
+ FDMI2_SMARTSAN_PORT_ATTR_COUNT :
+ callopt != CALLOPT_FDMI1 ?
+ FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
+
+ size = RPRT_RSP_SIZE;
+ ql_dbg(ql_dbg_disc, vha, 0x20e8,
+ "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size);
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size);
ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
- size = WWN_SIZE + 4;
-
- /* Attributes */
- ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
- entries = &ct_req->req;
-
- /* FC4 types. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
- eiter->len = cpu_to_be16(4 + 32);
- eiter->a.fc4_types[2] = 0x01;
- size += 4 + 32;
-
- ql_dbg(ql_dbg_disc, vha, 0x20ba,
- "FC4_TYPES=%02x %02x.\n",
- eiter->a.fc4_types[2],
- eiter->a.fc4_types[1]);
-
- if (vha->flags.nvme_enabled) {
- eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
- ql_dbg(ql_dbg_disc, vha, 0x211f,
- "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
- eiter->a.fc4_types[6]);
- }
-
- /* Supported speed. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- if (IS_CNA_CAPABLE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_10GB);
- else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_32GB|
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB);
- else if (IS_QLA2031(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_16GB|
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB);
- else if (IS_QLA25XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_8GB|
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA24XX_TYPE(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_4GB|
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else if (IS_QLA23XX(ha))
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_2GB|
- FDMI_PORT_SPEED_1GB);
- else
- eiter->a.sup_speed = cpu_to_be32(
- FDMI_PORT_SPEED_1GB);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20bb,
- "Supported Port Speed = %x.\n", eiter->a.sup_speed);
-
- /* Current speed. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
- eiter->len = cpu_to_be16(4 + 4);
- switch (ha->link_data_rate) {
- case PORT_SPEED_1GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
- break;
- case PORT_SPEED_2GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
- break;
- case PORT_SPEED_4GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
- break;
- case PORT_SPEED_8GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
- break;
- case PORT_SPEED_10GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
- break;
- case PORT_SPEED_16GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
- break;
- case PORT_SPEED_32GB:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
- break;
- default:
- eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
- break;
+ /* Prepare FDMI command entries */
+ memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name,
+ sizeof(ct_req->req.rprt.hba_identifier));
+ size += sizeof(ct_req->req.rprt.hba_identifier);
+ memcpy(ct_req->req.rprt.port_name, vha->port_name,
+ sizeof(ct_req->req.rprt.port_name));
+ size += sizeof(ct_req->req.rprt.port_name);
+ /* Attribute count */
+ ct_req->req.rprt.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rprt.attrs.count);
+ /* Attribute block */
+ entries = ct_req->req.rprt.attrs.entry;
+ size += qla2x00_port_attributes(vha, entries, callopt);
+ /* Update MS request size. */
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+ ql_dbg(ql_dbg_disc, vha, 0x20e9,
+ "RPRT %016llx %016llx.\n",
+ wwn_to_u64(ct_req->req.rprt.port_name),
+ wwn_to_u64(ct_req->req.rprt.port_name));
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea,
+ entries, size);
+ /* Execute MS IOCB */
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "RPRT iocb failed (%d).\n", rval);
+ return rval;
}
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x2017,
- "Current_Speed = %x.\n", eiter->a.cur_speed);
-
- /* Max frame size. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->len = cpu_to_be16(4 + 4);
- eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
- le16_to_cpu(icb24->frame_payload_size) :
- le16_to_cpu(ha->init_cb->frame_payload_size);
- eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20bc,
- "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
-
- /* OS device name. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- alen = strlen(QLA2XXX_DRIVER_NAME);
- snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
- "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20be,
- "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT");
+ if (rval) {
+ if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+ ct_rsp->header.explanation_code ==
+ CT_EXPL_ALREADY_REGISTERED) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "RPRT already registered.\n");
+ return QLA_ALREADY_REGISTERED;
+ }
- /* Hostname. */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
- p_sysid = utsname();
- if (p_sysid) {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", p_sysid->nodename);
- } else {
- snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", fc_host_system_hostname(vha->host));
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
- alen = strlen(eiter->a.host_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x201a,
- "HostName=%s.\n", eiter->a.host_name);
-
- /* Node Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
- memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c0,
- "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
-
- /* Port Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_NAME);
- memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c1,
- "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
-
- /* Port Symbolic Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
- qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
- sizeof(eiter->a.port_sym_name));
- alen = strlen(eiter->a.port_sym_name);
- alen += 4 - (alen & 3);
- eiter->len = cpu_to_be16(4 + alen);
- size += 4 + alen;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c2,
- "port symbolic name = %s\n", eiter->a.port_sym_name);
-
- /* Port Type */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
- eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c3,
- "Port Type = %x.\n", eiter->a.port_type);
-
- /* Class of Service */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
- eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c4,
- "Supported COS = %08x\n", eiter->a.port_supported_cos);
+ ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n");
+ return rval;
+}
- /* Port Fabric Name */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
- memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
- eiter->len = cpu_to_be16(4 + WWN_SIZE);
- size += 4 + WWN_SIZE;
+/**
+ * qla2x00_fdmi_rpa() - perform RPA registration
+ * @vha: HA context
+ * @callopt: Option to issue FDMI registration
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ulong size = 0;
+ uint rval, count;
+ ms_iocb_entry_t *ms_pkt;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_rsp *ct_rsp;
+ void *entries;
- ql_dbg(ql_dbg_disc, vha, 0x20c5,
- "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+ count =
+ callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
+ FDMI2_SMARTSAN_PORT_ATTR_COUNT :
+ callopt != CALLOPT_FDMI1 ?
+ FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
- /* FC4_type */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
- eiter->a.port_fc4_type[0] = 0;
- eiter->a.port_fc4_type[1] = 0;
- eiter->a.port_fc4_type[2] = 1;
- eiter->a.port_fc4_type[3] = 0;
- eiter->len = cpu_to_be16(4 + 32);
- size += 4 + 32;
+ size =
+ callopt != CALLOPT_FDMI1 ?
+ SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE;
- ql_dbg(ql_dbg_disc, vha, 0x20c6,
- "Port Active FC4 Type = %02x %02x.\n",
- eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
- if (vha->flags.nvme_enabled) {
- eiter->a.port_fc4_type[4] = 0;
- eiter->a.port_fc4_type[5] = 0;
- eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
- ql_dbg(ql_dbg_disc, vha, 0x2120,
- "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
- eiter->a.port_fc4_type[6]);
- }
+ /* Request size adjusted after CT preparation */
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
- /* Port State */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_STATE);
- eiter->a.port_state = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size);
+ ct_rsp = &ha->ct_sns->p.rsp;
- ql_dbg(ql_dbg_disc, vha, 0x20c7,
- "Port State = %x.\n", eiter->a.port_state);
+ /* Prepare FDMI command entries. */
+ memcpy(ct_req->req.rpa.port_name, vha->port_name,
+ sizeof(ct_req->req.rpa.port_name));
+ size += sizeof(ct_req->req.rpa.port_name);
- /* Number of Ports */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
- eiter->a.num_ports = cpu_to_be32(1);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c8,
- "Number of ports = %x.\n", eiter->a.num_ports);
+ /* Attribute count */
+ ct_req->req.rpa.attrs.count = cpu_to_be32(count);
+ size += sizeof(ct_req->req.rpa.attrs.count);
- /* Port Id */
- eiter = entries + size;
- eiter->type = cpu_to_be16(FDMI_PORT_ID);
- eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
- eiter->len = cpu_to_be16(4 + 4);
- size += 4 + 4;
+ /* Attribute block */
+ entries = ct_req->req.rpa.attrs.entry;
- ql_dbg(ql_dbg_disc, vha, 0x201c,
- "Port Id = %x.\n", eiter->a.port_id);
+ size += qla2x00_port_attributes(vha, entries, callopt);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- ql_dbg(ql_dbg_disc, vha, 0x2018,
- "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
- ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
+ ql_dbg(ql_dbg_disc, vha, 0x20f1,
+ "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name));
+
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2,
entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x20cb,
- "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ sizeof(*ha->ms_iocb));
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f3,
+ "RPA iocb failed (%d).\n", rval);
+ return rval;
+ }
+
+ rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA");
+ if (rval) {
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
- ql_dbg(ql_dbg_disc, vha, 0x20ce,
- "RPA FDMI v2 already registered\n");
- rval = QLA_ALREADY_REGISTERED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2020,
- "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
- ct_rsp->header.reason_code,
- ct_rsp->header.explanation_code);
+ ql_dbg(ql_dbg_disc, vha, 0x20f4,
+ "RPA already registered.\n");
+ return QLA_ALREADY_REGISTERED;
}
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20cc,
- "RPA FDMI V2 exiting normally.\n");
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f5,
+ "RPA failed, CT Reason code: %#x, CT Explanation %#x\n",
+ ct_rsp->header.reason_code,
+ ct_rsp->header.explanation_code);
+ return rval;
}
+ ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n");
return rval;
}
@@ -2623,18 +2445,31 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
int
qla2x00_fdmi_register(scsi_qla_host_t *vha)
{
- int rval = QLA_FUNCTION_FAILED;
+ int rval = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
IS_QLAFX00(ha))
- return QLA_FUNCTION_FAILED;
+ return rval;
rval = qla2x00_mgmt_svr_login(vha);
if (rval)
return rval;
- rval = qla2x00_fdmiv2_rhba(vha);
+ /* For npiv/vport send rprt only */
+ if (vha->vp_idx) {
+ if (ql2xsmartsan)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN);
+ if (rval || !ql2xsmartsan)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2);
+ if (rval)
+ rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1);
+
+ return rval;
+ }
+
+ /* Try fdmi2 first, if fails then try fdmi1 */
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
goto try_fdmi;
@@ -2643,18 +2478,22 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
if (rval)
goto try_fdmi;
- rval = qla2x00_fdmiv2_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
if (rval)
goto try_fdmi;
}
- rval = qla2x00_fdmiv2_rpa(vha);
+
+ if (ql2xsmartsan)
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN);
+ if (rval || !ql2xsmartsan)
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2);
if (rval)
goto try_fdmi;
- goto out;
+ return rval;
try_fdmi:
- rval = qla2x00_fdmi_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
return rval;
@@ -2663,12 +2502,13 @@ try_fdmi:
if (rval)
return rval;
- rval = qla2x00_fdmi_rhba(vha);
+ rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
if (rval)
return rval;
}
- rval = qla2x00_fdmi_rpa(vha);
-out:
+
+ rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1);
+
return rval;
}
@@ -2893,7 +2733,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Set default FC4 Type as UNKNOWN so the default is to
* Process this port */
- list[i].fc4_type = FC4_TYPE_UNKNOWN;
+ list[i].fc4_type = 0;
/* Do not attempt GFF_ID if we are not FWI_2 capable */
if (!IS_FWI2_CAPABLE(ha))
@@ -3243,7 +3083,7 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
"%s %d %8phC post new sess\n",
__func__, __LINE__, ea->port_name);
qla24xx_post_newsess_work(vha, &ea->id,
- ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
+ ea->port_name, NULL, NULL, 0);
}
}
}
@@ -3647,6 +3487,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->last_rscn_gen = fcport->rscn_gen;
found = true;
/*
* If device was not a fabric device before.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9e6b56527b25..5b2deaa730bf 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1043,7 +1043,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
__func__, __LINE__, (u8 *)&wwn, id.b24);
wwnn = wwn_to_u64(e->node_name);
qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
- (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
+ (u8 *)&wwnn, NULL, 0);
}
}
@@ -2219,10 +2219,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Check for secure flash support */
if (IS_QLA28XX(ha)) {
- if (RD_REG_DWORD(&reg->mailbox12) & BIT_0) {
- ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
+ if (RD_REG_DWORD(&reg->mailbox12) & BIT_0)
ha->flags.secure_adapter = 1;
- }
+ ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
+ (ha->flags.secure_adapter) ? "Yes" : "No");
}
@@ -2270,6 +2270,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x0078,
"Verifying loaded RISC code...\n");
+ /* If smartsan enabled then require fdmi and rdp enabled */
+ if (ql2xsmartsan) {
+ ql2xfdmienable = 1;
+ ql2xrdpenable = 1;
+ }
+
if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
rval = ha->isp_ops->chip_diag(vha);
if (rval)
@@ -3544,53 +3550,77 @@ static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
}
-/*
- * Return Code:
- * QLA_SUCCESS: no action
- * QLA_INTERFACE_ERROR: SFP is not there.
- * QLA_FUNCTION_FAILED: detected New SFP
+/**
+ * qla24xx_detect_sfp()
+ *
+ * @vha: adapter state pointer.
+ *
+ * @return
+ * 0 -- Configure firmware to use short-range settings -- normal
+ * buffer-to-buffer credits.
+ *
+ * 1 -- Configure firmware to use long-range settings -- extra
+ * buffer-to-buffer credits should be allocated with
+ * ha->lr_distance containing distance settings from NVRAM or SFP
+ * (if supported).
*/
int
qla24xx_detect_sfp(scsi_qla_host_t *vha)
{
- int rc = QLA_SUCCESS;
+ int rc, used_nvram;
struct sff_8247_a0 *a;
struct qla_hw_data *ha = vha->hw;
-
- if (!AUTO_DETECT_SFP_SUPPORT(vha))
+ struct nvram_81xx *nv = ha->nvram;
+#define LR_DISTANCE_UNKNOWN 2
+ static const char * const types[] = { "Short", "Long" };
+ static const char * const lengths[] = { "(10km)", "(5km)", "" };
+ u8 ll = 0;
+
+ /* Seed with NVRAM settings. */
+ used_nvram = 0;
+ ha->flags.lr_detected = 0;
+ if (IS_BPM_RANGE_CAPABLE(ha) &&
+ (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
+ used_nvram = 1;
+ ha->flags.lr_detected = 1;
+ ha->lr_distance =
+ (nv->enhanced_features >> LR_DIST_NV_POS)
+ & LR_DIST_NV_MASK;
+ }
+
+ if (!IS_BPM_ENABLED(vha))
goto out;
-
+ /* Determine SR/LR capabilities of SFP/Transceiver. */
rc = qla2x00_read_sfp_dev(vha, NULL, 0);
if (rc)
goto out;
+ used_nvram = 0;
a = (struct sff_8247_a0 *)vha->hw->sfp_data;
qla2xxx_print_sfp_info(vha);
- if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
- /* long range */
- ha->flags.detected_lr_sfp = 1;
+ ha->flags.lr_detected = 0;
+ ll = a->fc_ll_cc7;
+ if (ll & FC_LL_VL || ll & FC_LL_L) {
+ /* Long range, track length. */
+ ha->flags.lr_detected = 1;
if (a->length_km > 5 || a->length_100m > 50)
- ha->long_range_distance = LR_DISTANCE_10K;
+ ha->lr_distance = LR_DISTANCE_10K;
else
- ha->long_range_distance = LR_DISTANCE_5K;
-
- if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
- ql_dbg(ql_dbg_async, vha, 0x507b,
- "Detected Long Range SFP.\n");
- } else {
- /* short range */
- ha->flags.detected_lr_sfp = 0;
- if (ha->flags.using_lr_setting)
- ql_dbg(ql_dbg_async, vha, 0x5084,
- "Detected Short Range SFP.\n");
+ ha->lr_distance = LR_DISTANCE_5K;
}
if (!vha->flags.init_done)
rc = QLA_SUCCESS;
out:
- return rc;
+ ql_dbg(ql_dbg_async, vha, 0x507b,
+ "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
+ types[ha->flags.lr_detected],
+ ha->flags.lr_detected ? lengths[ha->lr_distance] :
+ lengths[LR_DISTANCE_UNKNOWN],
+ used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
+ return ha->flags.lr_detected;
}
/**
@@ -3608,6 +3638,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags;
uint16_t fw_major_version;
+ int done_once = 0;
if (IS_P3P_TYPE(ha)) {
rval = ha->isp_ops->load_risc(vha, &srisc_address);
@@ -3628,6 +3659,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
qla81xx_mpi_sync(vha);
+execute_fw_with_lr:
/* Load firmware sequences */
rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
@@ -3649,7 +3681,15 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
- qla24xx_detect_sfp(vha);
+ /* Enable BPM support? */
+ if (!done_once++ && qla24xx_detect_sfp(vha)) {
+ ql_dbg(ql_dbg_init, vha, 0x00ca,
+ "Re-starting firmware -- BPM.\n");
+ /* Best-effort - re-init. */
+ ha->isp_ops->reset_chip(vha);
+ ha->isp_ops->chip_diag(vha);
+ goto execute_fw_with_lr;
+ }
if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) &&
@@ -3708,6 +3748,10 @@ enable_82xx_npiv:
"ISP Firmware failed checksum.\n");
goto failed;
}
+
+ /* Enable PUREX PASSTHRU */
+ if (ql2xrdpenable)
+ qla25xx_set_els_cmds_supported(vha);
} else
goto failed;
@@ -3919,6 +3963,13 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] &= ~BIT_8;
}
+ if (ql2xrdpenable)
+ ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
+
+ /* Enable Async 8130/8131 events -- transceiver insertion/removal */
+ if (IS_BPM_RANGE_CAPABLE(ha))
+ ha->fw_options[3] |= BIT_10;
+
ql_dbg(ql_dbg_init, vha, 0x00e8,
"%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
__func__, ha->fw_options[1], ha->fw_options[2],
@@ -5060,7 +5111,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (N2N_TOPO(ha)) {
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
/* borrowing */
- u32 *bp, i, sz;
+ u32 *bp, sz;
memset(ha->init_cb, 0, ha->init_cb_size);
sz = min_t(int, sizeof(struct els_plogi_payload),
@@ -5068,13 +5119,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
rval = qla24xx_get_port_login_templ(vha,
ha->init_cb_dma, (void *)ha->init_cb, sz);
if (rval == QLA_SUCCESS) {
+ __be32 *q = &ha->plogi_els_payld.data[0];
+
bp = (uint32_t *)ha->init_cb;
- for (i = 0; i < sz/4 ; i++, bp++)
- *bp = cpu_to_be32(*bp);
+ cpu_to_be32_array(q, bp, sz / 4);
- memcpy(&ha->plogi_els_payld.data,
- (void *)ha->init_cb,
- sizeof(ha->plogi_els_payld.data));
+ memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
} else {
ql_dbg(ql_dbg_init, vha, 0x00d1,
"PLOGI ELS param read fail.\n");
@@ -5097,6 +5147,7 @@ skip_login:
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
+ return QLA_FUNCTION_FAILED;
}
found_devs = 0;
@@ -5541,24 +5592,22 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
vha->device_flags |= SWITCH_FOUND;
+ rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
+ if (rval != QLA_SUCCESS)
+ ql_dbg(ql_dbg_disc, vha, 0x20ff,
+ "Failed to get Fabric Port Name\n");
if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
rval = qla2x00_send_change_request(vha, 0x3, 0);
if (rval != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x121,
- "Failed to enable receiving of RSCN requests: 0x%x.\n",
- rval);
+ "Failed to enable receiving of RSCN requests: 0x%x.\n",
+ rval);
}
-
do {
qla2x00_mgmt_svr_login(vha);
- /* FDMI support. */
- if (ql2xfdmienable &&
- test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
- qla2x00_fdmi_register(vha);
-
/* Ensure we are logged into the SNS. */
loop_id = NPH_SNS_LID(ha);
rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
@@ -5570,6 +5619,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
return rval;
}
+
+ /* FDMI support. */
+ if (ql2xfdmienable &&
+ test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
+ qla2x00_fdmi_register(vha);
+
if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
if (qla2x00_rft_id(vha)) {
/* EMPTY */
@@ -5812,7 +5867,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
if (ql2xgffidenable &&
(!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
- new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
+ new_fcport->fc4_type != 0))
continue;
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -6656,7 +6711,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
- ha->flags.fw_started = 0;
+ QLA_FW_STOPPED(ha);
ha->flags.fw_init_done = 0;
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
@@ -8663,61 +8718,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
return status;
}
-void
-qla81xx_update_fw_options(scsi_qla_host_t *vha)
-{
- struct qla_hw_data *ha = vha->hw;
-
- /* Hold status IOCBs until ABTS response received. */
- if (ql2xfwholdabts)
- ha->fw_options[3] |= BIT_12;
-
- /* Set Retry FLOGI in case of P2P connection */
- if (ha->operating_mode == P2P) {
- ha->fw_options[2] |= BIT_3;
- ql_dbg(ql_dbg_disc, vha, 0x2103,
- "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
- __func__, ha->fw_options[2]);
- }
-
- /* Move PUREX, ABTS RX & RIDA to ATIOQ */
- if (ql2xmvasynctoatio) {
- if (qla_tgt_mode_enabled(vha) ||
- qla_dual_mode_enabled(vha))
- ha->fw_options[2] |= BIT_11;
- else
- ha->fw_options[2] &= ~BIT_11;
- }
-
- if (qla_tgt_mode_enabled(vha) ||
- qla_dual_mode_enabled(vha)) {
- /* FW auto send SCSI status during */
- ha->fw_options[1] |= BIT_8;
- ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
-
- /* FW perform Exchange validation */
- ha->fw_options[2] |= BIT_4;
- } else {
- ha->fw_options[1] &= ~BIT_8;
- ha->fw_options[10] &= 0x00ff;
-
- ha->fw_options[2] &= ~BIT_4;
- }
-
- if (ql2xetsenable) {
- /* Enable ETS Burst. */
- memset(ha->fw_options, 0, sizeof(ha->fw_options));
- ha->fw_options[2] |= BIT_9;
- }
-
- ql_dbg(ql_dbg_init, vha, 0x00e9,
- "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
- __func__, ha->fw_options[1], ha->fw_options[2],
- ha->fw_options[3], vha->host->active_mode);
-
- qla2x00_set_fw_options(vha, ha->fw_options);
-}
-
/*
* qla24xx_get_fcp_prio
* Gets the fcp cmd priority value for the logged in port.
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 47bf60a9490a..182bd68c79ac 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -530,7 +530,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
- mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
+ mrk24->handle = make_handle(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16((uint16_t)lun);
@@ -1655,7 +1655,7 @@ qla24xx_start_scsi(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1843,7 +1843,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Fill-in common area */
cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
@@ -1975,7 +1975,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -2178,7 +2178,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Fill-in common area */
cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
clr_ptr = (uint32_t *)cmd_pkt + 2;
memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
@@ -2362,6 +2362,8 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
struct srb_iocb *lio = &sp->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+
if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
} else {
@@ -2489,7 +2491,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->entry_type = TSK_MGMT_IOCB_TYPE;
tsk->entry_count = 1;
- tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
+ tsk->handle = make_handle(req->id, tsk->handle);
tsk->nport_handle = cpu_to_le16(fcport->loop_id);
tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->control_flags = cpu_to_le32(flags);
@@ -2684,9 +2686,9 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_dsd_count = 0;
els_iocb->opcode = elsio->u.els_logo.els_cmd;
- els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- els_iocb->port_id[1] = sp->fcport->d_id.b.area;
- els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->d_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
/* For SID the byte order is different than DID */
els_iocb->s_id[1] = vha->d_id.b.al_pa;
els_iocb->s_id[2] = vha->d_id.b.area;
@@ -2939,7 +2941,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->fcport = fcport;
elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
- init_completion(&elsio->u.els_plogi.comp);
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
@@ -2949,7 +2950,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
ptr = elsio->u.els_plogi.els_plogi_pyld =
- dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
if (!elsio->u.els_plogi.els_plogi_pyld) {
@@ -2958,7 +2959,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
resp_ptr = elsio->u.els_plogi.els_resp_pyld =
- dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
&elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
if (!elsio->u.els_plogi.els_resp_pyld) {
@@ -2982,6 +2983,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
sizeof(*elsio->u.els_plogi.els_plogi_pyld));
+ init_completion(&elsio->u.els_plogi.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -3030,9 +3032,9 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
sp->type == SRB_ELS_CMD_RPT ?
bsg_request->rqst_data.r_els.els_code :
bsg_request->rqst_data.h_els.command_code;
- els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- els_iocb->port_id[1] = sp->fcport->d_id.b.area;
- els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->d_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
els_iocb->control_flags = 0;
els_iocb->rx_byte_count =
cpu_to_le32(bsg_job->reply_payload.payload_len);
@@ -3358,7 +3360,7 @@ sufficient_dsds:
}
cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -3429,7 +3431,7 @@ sufficient_dsds:
goto queuing_error;
cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
@@ -3534,7 +3536,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->handle = cpu_to_le32(make_handle(req->id, sp->handle));
if (sp->fcport) {
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -3542,7 +3544,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
}
abt_iocb->handle_to_abort =
- cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ cpu_to_le32(make_handle(aio->u.abt.req_que_no,
aio->u.abt.cmd_hndl));
abt_iocb->vp_index = vha->vp_idx;
abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
@@ -3905,7 +3907,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
}
cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e40705d38cea..8d7a905f6247 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -31,6 +31,144 @@ const char *const port_state_str[] = {
"ONLINE"
};
+static void qla24xx_purex_iocb(scsi_qla_host_t *vha, void *pkt,
+ void (*process_item)(struct scsi_qla_host *vha, void *pkt))
+{
+ struct purex_list *list = &vha->purex_list;
+ struct purex_item *item;
+ ulong flags;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ ql_log(ql_log_warn, vha, 0x5092,
+ ">> Failed allocate purex list item.\n");
+ return;
+ }
+
+ item->vha = vha;
+ item->process_item = process_item;
+ memcpy(&item->iocb, pkt, sizeof(item->iocb));
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_add_tail(&item->list, &list->head);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
+}
+
+static void
+qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
+{
+ struct abts_entry_24xx *abts = pkt;
+ struct qla_hw_data *ha = vha->hw;
+ struct els_entry_24xx *rsp_els;
+ struct abts_entry_24xx *abts_rsp;
+ dma_addr_t dma;
+ uint32_t fctl;
+ int rval;
+
+ ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
+
+ ql_log(ql_log_warn, vha, 0x0287,
+ "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
+ abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
+ abts->seq_id, abts->seq_cnt);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
+ "-------- ABTS RCV -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
+ (uint8_t *)abts, sizeof(*abts));
+
+ rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
+ GFP_KERNEL);
+ if (!rsp_els) {
+ ql_log(ql_log_warn, vha, 0x0287,
+ "Failed allocate dma buffer ABTS/ELS RSP.\n");
+ return;
+ }
+
+ /* terminate exchange */
+ memset(rsp_els, 0, sizeof(*rsp_els));
+ rsp_els->entry_type = ELS_IOCB_TYPE;
+ rsp_els->entry_count = 1;
+ rsp_els->nport_handle = ~0;
+ rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
+ rsp_els->control_flags = EPD_RX_XCHG;
+ ql_dbg(ql_dbg_init, vha, 0x0283,
+ "Sending ELS Response to terminate exchange %#x...\n",
+ abts->rx_xch_addr_to_abort);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
+ (uint8_t *)rsp_els, sizeof(*rsp_els));
+ rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0288,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (rsp_els->comp_status) {
+ ql_log(ql_log_warn, vha, 0x0289,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, rsp_els->comp_status,
+ rsp_els->error_subcode_1, rsp_els->error_subcode_2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x028a,
+ "%s: abort exchange done.\n", __func__);
+ }
+
+ /* send ABTS response */
+ abts_rsp = (void *)rsp_els;
+ memset(abts_rsp, 0, sizeof(*abts_rsp));
+ abts_rsp->entry_type = ABTS_RSP_TYPE;
+ abts_rsp->entry_count = 1;
+ abts_rsp->nport_handle = abts->nport_handle;
+ abts_rsp->vp_idx = abts->vp_idx;
+ abts_rsp->sof_type = abts->sof_type & 0xf0;
+ abts_rsp->rx_xch_addr = abts->rx_xch_addr;
+ abts_rsp->d_id[0] = abts->s_id[0];
+ abts_rsp->d_id[1] = abts->s_id[1];
+ abts_rsp->d_id[2] = abts->s_id[2];
+ abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
+ abts_rsp->s_id[0] = abts->d_id[0];
+ abts_rsp->s_id[1] = abts->d_id[1];
+ abts_rsp->s_id[2] = abts->d_id[2];
+ abts_rsp->cs_ctl = abts->cs_ctl;
+ /* include flipping bit23 in fctl */
+ fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
+ FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
+ abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
+ abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
+ abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
+ abts_rsp->type = FC_TYPE_BLD;
+ abts_rsp->rx_id = abts->rx_id;
+ abts_rsp->ox_id = abts->ox_id;
+ abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
+ abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
+ abts_rsp->payload.ba_acc.high_seq_cnt = ~0;
+ abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
+ ql_dbg(ql_dbg_init, vha, 0x028b,
+ "Sending BA ACC response to ABTS %#x...\n",
+ abts->rx_xch_addr_to_abort);
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
+ (uint8_t *)abts_rsp, sizeof(*abts_rsp));
+ rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x028c,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (abts_rsp->comp_status) {
+ ql_log(ql_log_warn, vha, 0x028d,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, abts_rsp->comp_status,
+ abts_rsp->payload.error.subcode1,
+ abts_rsp->payload.error.subcode2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x028ea,
+ "%s: done.\n", __func__);
+ }
+
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
+}
+
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
* @irq: interrupt number
@@ -716,12 +854,24 @@ skip_rio:
break;
case MBA_SYSTEM_ERR: /* System Error */
- mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) ?
- RD_REG_WORD(&reg24->mailbox7) : 0;
- ql_log(ql_log_warn, vha, 0x5003,
- "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
- "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
+ mbx = 0;
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+ IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ u16 m[4];
+
+ m[0] = RD_REG_WORD(&reg24->mailbox4);
+ m[1] = RD_REG_WORD(&reg24->mailbox5);
+ m[2] = RD_REG_WORD(&reg24->mailbox6);
+ mbx = m[3] = RD_REG_WORD(&reg24->mailbox7);
+
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
+ mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
+ } else
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
+ mb[1], mb[2], mb[3]);
+
ha->fw_dump_mpi =
(IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
RD_REG_WORD(&reg24->mailbox7) & BIT_8;
@@ -813,13 +963,15 @@ skip_rio:
"LOOP UP detected (%s Gbps).\n",
qla2x00_get_link_speed_str(ha, ha->link_data_rate));
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (mb[2] & BIT_0)
+ ql_log(ql_log_info, vha, 0x11a0,
+ "FEC=enabled (link up).\n");
+ }
+
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
- if (AUTO_DETECT_SFP_SUPPORT(vha)) {
- set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- }
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
@@ -1254,6 +1406,7 @@ global_port_update:
ql_dbg(ql_dbg_async, vha, 0x5052,
"D-Port Diagnostics: %04x %04x %04x %04x\n",
mb[0], mb[1], mb[2], mb[3]);
+ memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
static char *results[] = {
"start", "done(pass)", "done(error)", "undefined" };
@@ -1291,6 +1444,11 @@ global_port_update:
case MBA_TRANS_INSERT:
ql_dbg(ql_dbg_async, vha, 0x5091,
"Transceiver Insertion: %04x\n", mb[1]);
+ set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
+ break;
+
+ case MBA_TRANS_REMOVE:
+ ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
break;
default:
@@ -1754,11 +1912,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
- ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
- "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
- "iop0=%x.\n", type, fcport->port_name, sp->handle,
- fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ ql_dbg(ql_dbg_async, sp->vha, 0x5036,
+ "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
le32_to_cpu(logio->io_parameter[0]));
vha->hw->exch_starvation = 0;
@@ -1837,11 +1993,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
- "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n", type, fcport->port_name,
- sp->handle, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ ql_dbg(ql_dbg_async, sp->vha, 0x5037,
+ "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
le32_to_cpu(logio->io_parameter[1]));
@@ -1910,6 +2064,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct nvmefc_fcp_req *fd;
uint16_t ret = QLA_SUCCESS;
uint16_t comp_status = le16_to_cpu(sts->comp_status);
+ int logit = 0;
iocb = &sp->u.iocb_cmd;
fcport = sp->fcport;
@@ -1920,6 +2075,12 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (unlikely(iocb->u.nvme.aen_op))
atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
+ if (unlikely(comp_status != CS_COMPLETE))
+ logit = 1;
+
+ fd->transferred_length = fd->payload_length -
+ le32_to_cpu(sts->residual_len);
+
/*
* State flags: Bit 6 and 0.
* If 0 is set, we don't care about 6.
@@ -1930,8 +2091,20 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
*/
if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
iocb->u.nvme.rsp_pyld_len = 0;
- } else if ((state_flags & SF_FCP_RSP_DMA)) {
+ } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
+ (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
+ /* Response already DMA'd to fd->rspaddr. */
iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ } else if ((state_flags & SF_FCP_RSP_DMA)) {
+ /*
+ * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
+ * as an error.
+ */
+ iocb->u.nvme.rsp_pyld_len = 0;
+ fd->transferred_length = 0;
+ ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
+ "Unexpected values in NVMe_RSP IU.\n");
+ logit = 1;
} else if (state_flags & SF_NVME_ERSP) {
uint32_t *inbuf, *outbuf;
uint16_t iter;
@@ -1954,16 +2127,28 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iter = iocb->u.nvme.rsp_pyld_len >> 2;
for (; iter; iter--)
*outbuf++ = swab32(*inbuf++);
- } else { /* unhandled case */
- ql_log(ql_log_warn, fcport->vha, 0x503a,
- "NVME-%s error. Unhandled state_flags of %x\n",
- sp->name, state_flags);
}
- fd->transferred_length = fd->payload_length -
- le32_to_cpu(sts->residual_len);
+ if (state_flags & SF_NVME_ERSP) {
+ struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
+ u32 tgt_xfer_len;
- if (unlikely(comp_status != CS_COMPLETE))
+ tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
+ if (fd->transferred_length != tgt_xfer_len) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3079,
+ "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
+ tgt_xfer_len, fd->transferred_length);
+ logit = 1;
+ } else if (comp_status == CS_DATA_UNDERRUN) {
+ /*
+ * Do not log if this is just an underflow and there
+ * is no data loss.
+ */
+ logit = 0;
+ }
+ }
+
+ if (unlikely(logit))
ql_log(ql_log_warn, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
@@ -2516,11 +2701,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
- if (sp->abort)
- sp->aborted = 1;
- else
- sp->completed = 1;
-
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
ql_dbg(ql_dbg_io, vha, 0x3015,
@@ -3083,6 +3263,11 @@ process_err:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
+ if (qla_ini_mode_enabled(vha)) {
+ qla24xx_purex_iocb(vha, pkt,
+ qla24xx_process_abts);
+ break;
+ }
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) {
/* ensure that the ATIO queue is empty */
@@ -3127,6 +3312,19 @@ process_err:
qla_ctrlvp_completed(vha, rsp->req,
(struct vp_ctrl_entry_24xx *)pkt);
break;
+ case PUREX_IOCB_TYPE:
+ {
+ struct purex_entry_24xx *purex = (void *)pkt;
+
+ if (purex->els_frame_payload[3] != ELS_COMMAND_RDP) {
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "Discarding ELS Request opcode %#x...\n",
+ purex->els_frame_payload[3]);
+ break;
+ }
+ qla24xx_purex_iocb(vha, pkt, qla24xx_process_purex_rdp);
+ break;
+ }
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -3442,6 +3640,25 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
+
+ qpair = dev_id;
+ if (!qpair) {
+ ql_log(ql_log_info, NULL, 0x505b,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = qpair->hw;
+
+ queue_work(ha->wq, &qpair->q_work);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
struct device_reg_24xx __iomem *reg;
unsigned long flags;
@@ -3453,13 +3670,10 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
}
ha = qpair->hw;
- /* Clear the interrupt, if enabled, for this response queue */
- if (unlikely(!ha->flags.disable_msix_handshake)) {
- reg = &ha->iobase->isp24;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- }
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
queue_work(ha->wq, &qpair->q_work);
@@ -3478,6 +3692,7 @@ static const struct qla_init_msix_entry msix_entries[] = {
{ "rsp_q", qla24xx_msix_rsp_q },
{ "atio_q", qla83xx_msix_atio_q },
{ "qpair_multiq", qla2xxx_msix_rsp_q },
+ { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
};
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 9e09964f5c0e..9fd83d1bffe0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -117,10 +117,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
- if (ha->pdev->error_state > pci_channel_io_frozen) {
+ if (ha->pdev->error_state == pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x1001,
- "error_state is greater than pci_channel_io_frozen, "
- "exiting.\n");
+ "PCI channel failed permanently, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -643,30 +642,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
return rval;
}
-#define EXTENDED_BB_CREDITS BIT_0
#define NVME_ENABLE_FLAG BIT_3
-static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
-{
- uint16_t mb4 = BIT_0;
-
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
- mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
-
- return mb4;
-}
-
-static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
-{
- uint16_t mb4 = BIT_0;
-
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- struct nvram_81xx *nv = ha->nvram;
-
- mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
- }
-
- return mb4;
-}
/*
* qla2x00_execute_fw
@@ -690,10 +666,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
struct qla_hw_data *ha = vha->hw;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ u8 semaphore = 0;
+#define EXE_FW_FORCE_SEMAPHORE BIT_7
+ u8 retry = 3;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
"Entered %s.\n", __func__);
+again:
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
@@ -703,25 +683,13 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[3] = 0;
mcp->mb[4] = 0;
mcp->mb[11] = 0;
- ha->flags.using_lr_setting = 0;
- if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
- IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ql2xautodetectsfp) {
- if (ha->flags.detected_lr_sfp) {
- mcp->mb[4] |=
- qla25xx_set_sfp_lr_dist(ha);
- ha->flags.using_lr_setting = 1;
- }
- } else {
- struct nvram_81xx *nv = ha->nvram;
- /* set LR distance if specified in nvram */
- if (nv->enhanced_features &
- NEF_LR_DIST_ENABLE) {
- mcp->mb[4] |=
- qla25xx_set_nvr_lr_dist(ha);
- ha->flags.using_lr_setting = 1;
- }
- }
+
+ /* Enable BPM? */
+ if (ha->flags.lr_detected) {
+ mcp->mb[4] = BIT_0;
+ if (IS_BPM_RANGE_CAPABLE(ha))
+ mcp->mb[4] |=
+ ha->lr_distance << LR_DIST_FW_POS;
}
if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
@@ -747,6 +715,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
if (ha->flags.exchoffld_enabled)
mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
+ if (semaphore)
+ mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
+
mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
} else {
@@ -763,6 +734,15 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
+ if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
+ mcp->mb[1] == 0x27 && retry) {
+ semaphore = 1;
+ retry--;
+ ql_dbg(ql_dbg_async, vha, 0x1026,
+ "Exe FW: force semaphore.\n");
+ goto again;
+ }
+
ql_dbg(ql_dbg_mbx, vha, 0x1026,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
return rval;
@@ -1137,11 +1117,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
if (IS_QLA28XX(ha)) {
- if (mcp->mb[16] & BIT_10) {
- ql_log(ql_log_info, vha, 0xffff,
- "FW support secure flash updates\n");
+ if (mcp->mb[16] & BIT_10)
ha->flags.secure_fw = 1;
- }
+
+ ql_log(ql_log_info, vha, 0xffff,
+ "Secure Flash Update in FW: %s\n",
+ (ha->flags.secure_fw) ? "Supported" :
+ "Not Supported");
}
}
@@ -1405,17 +1387,20 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ if (!vha->hw->flags.fw_started)
+ return QLA_INVALID_COMMAND;
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
"Entered %s.\n", __func__);
mcp->mb[0] = MBC_IOCB_COMMAND_A64;
mcp->mb[1] = 0;
- mcp->mb[2] = MSW(phys_addr);
- mcp->mb[3] = LSW(phys_addr);
+ mcp->mb[2] = MSW(LSD(phys_addr));
+ mcp->mb[3] = LSW(LSD(phys_addr));
mcp->mb[6] = MSW(MSD(phys_addr));
mcp->mb[7] = LSW(MSD(phys_addr));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_2|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
mcp->tov = tov;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1424,13 +1409,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
} else {
- sts_entry_t *sts_entry = (sts_entry_t *) buffer;
+ sts_entry_t *sts_entry = buffer;
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
- "Done %s.\n", __func__);
+ "Done %s (status=%x).\n", __func__,
+ sts_entry->entry_status);
}
return rval;
@@ -1475,7 +1461,7 @@ qla2x00_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
"Entered %s.\n", __func__);
- if (vha->flags.qpairs_available && sp->qpair)
+ if (sp->qpair)
req = sp->qpair->req;
else
req = vha->req;
@@ -2045,6 +2031,57 @@ gpd_error_out:
return rval;
}
+int
+qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
+ struct port_database_24xx *pdb)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ dma_addr_t pdb_dma;
+ int rval;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
+ "Entered %s.\n", __func__);
+
+ memset(pdb, 0, sizeof(*pdb));
+
+ pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
+ sizeof(*pdb), DMA_FROM_DEVICE);
+ if (!pdb_dma) {
+ ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ mcp->mb[0] = MBC_GET_PORT_DATABASE;
+ mcp->mb[1] = nport_handle;
+ mcp->mb[2] = MSW(LSD(pdb_dma));
+ mcp->mb[3] = LSW(LSD(pdb_dma));
+ mcp->mb[6] = MSW(MSD(pdb_dma));
+ mcp->mb[7] = LSW(MSD(pdb_dma));
+ mcp->mb[9] = 0;
+ mcp->mb[10] = 0;
+ mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->buf_size = sizeof(*pdb);
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = vha->hw->login_timeout * 2;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x111a,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
+ sizeof(*pdb), DMA_FROM_DEVICE);
+
+ return rval;
+}
+
/*
* qla2x00_get_firmware_state
* Get adapter firmware state.
@@ -2384,7 +2421,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
- lg->handle = MAKE_HANDLE(req->id, lg->handle);
+ lg->handle = make_handle(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
if (opt & BIT_0)
@@ -2654,7 +2691,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
- lg->handle = MAKE_HANDLE(req->id, lg->handle);
+ lg->handle = make_handle(req->id, lg->handle);
lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
@@ -3060,18 +3097,19 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter, dwords;
+ uint32_t *iter = (void *)stats;
+ ushort dwords = sizeof(*stats)/sizeof(*iter);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
- mc.mb[2] = MSW(stats_dma);
- mc.mb[3] = LSW(stats_dma);
+ mc.mb[2] = MSW(LSD(stats_dma));
+ mc.mb[3] = LSW(LSD(stats_dma));
mc.mb[6] = MSW(MSD(stats_dma));
mc.mb[7] = LSW(MSD(stats_dma));
- mc.mb[8] = sizeof(struct link_statistics) / 4;
+ mc.mb[8] = dwords;
mc.mb[9] = cpu_to_le16(vha->vp_idx);
mc.mb[10] = cpu_to_le16(options);
@@ -3086,8 +3124,6 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
"Done %s.\n", __func__);
/* Re-endianize - firmware data is le32. */
- dwords = sizeof(struct link_statistics) / 4;
- iter = &stats->link_fail_cnt;
for ( ; dwords--; iter++)
le32_to_cpus(iter);
}
@@ -3145,9 +3181,9 @@ qla24xx_abort_command(srb_t *sp)
abt->entry_type = ABORT_IOCB_TYPE;
abt->entry_count = 1;
- abt->handle = MAKE_HANDLE(req->id, abt->handle);
+ abt->handle = make_handle(req->id, abt->handle);
abt->nport_handle = cpu_to_le16(fcport->loop_id);
- abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
+ abt->handle_to_abort = make_handle(req->id, handle);
abt->port_id[0] = fcport->d_id.b.al_pa;
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
@@ -3224,7 +3260,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
tsk->p.tsk.entry_count = 1;
- tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
+ tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -3888,11 +3924,29 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_SCAN;
fcport->n2n_flag = 0;
}
+ id.b24 = 0;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(rptid_entry->u.f1.port_name)) {
+ vha->d_id.b24 = 0;
+ vha->d_id.b.al_pa = 1;
+ ha->flags.n2n_bigger = 1;
+
+ id.b.al_pa = 2;
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: assign local id %x remote id %x\n",
+ vha->d_id.b24, id.b24);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote login - Waiting for WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+ ha->flags.n2n_bigger = 0;
+ }
fcport = qla2x00_find_fcport_by_wwpn(vha,
rptid_entry->u.f1.port_name, 1);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
if (fcport) {
fcport->plogi_nack_done_deadline = jiffies + HZ;
fcport->dm_login_expire = jiffies + 2*HZ;
@@ -3903,6 +3957,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vha->flags.nvme_enabled)
fcport->fc4_type |= FS_FC4TYPE_NVME;
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(fcport->port_name)) {
+ fcport->d_id = id;
+ }
+
switch (fcport->disc_state) {
case DSC_DELETED:
set_bit(RELOGIN_NEEDED,
@@ -3915,25 +3974,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
break;
}
} else {
- id.b24 = 0;
- if (wwn_to_u64(vha->port_name) >
- wwn_to_u64(rptid_entry->u.f1.port_name)) {
- vha->d_id.b24 = 0;
- vha->d_id.b.al_pa = 1;
- ha->flags.n2n_bigger = 1;
- ha->flags.n2n_ae = 0;
-
- id.b.al_pa = 2;
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: assign local id %x remote id %x\n",
- vha->d_id.b24, id.b24);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x5075,
- "Format 1: Remote login - Waiting for WWPN %8phC.\n",
- rptid_entry->u.f1.port_name);
- ha->flags.n2n_bigger = 0;
- ha->flags.n2n_ae = 1;
- }
qla24xx_post_newsess_work(vha, &id,
rptid_entry->u.f1.port_name,
rptid_entry->u.f1.node_name,
@@ -4827,6 +4867,103 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
return rval;
}
+int
+qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint8_t *els_cmd_map;
+ dma_addr_t els_cmd_map_dma;
+ uint cmd_opcode = ELS_COMMAND_RDP;
+ uint index = cmd_opcode / 8;
+ uint bit = cmd_opcode % 8;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
+ return QLA_SUCCESS;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
+ "Entered %s.\n", __func__);
+
+ els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
+ &els_cmd_map_dma, GFP_KERNEL);
+ if (!els_cmd_map) {
+ ql_log(ql_log_warn, vha, 0x7101,
+ "Failed to allocate RDP els command param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE);
+
+ els_cmd_map[index] |= 1 << bit;
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
+ mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
+ mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
+ mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
+ mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->buf_size = ELS_CMD_MAP_SIZE;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x118d,
+ "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
+ "Done %s.\n", __func__);
+ }
+
+ dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ els_cmd_map, els_cmd_map_dma);
+
+ return rval;
+}
+
+int
+qla24xx_get_buffer_credits(scsi_qla_host_t *vha, struct buffer_credit_24xx *bbc,
+ dma_addr_t bbc_dma)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118e,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_BUFFER_CREDITS << 8;
+ mcp->mb[2] = MSW(LSD(bbc_dma));
+ mcp->mb[3] = LSW(LSD(bbc_dma));
+ mcp->mb[6] = MSW(MSD(bbc_dma));
+ mcp->mb[7] = LSW(MSD(bbc_dma));
+ mcp->mb[8] = sizeof(*bbc) / sizeof(*bbc->parameter);
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->buf_size = sizeof(*bbc);
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x118f,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1190,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
@@ -4880,8 +5017,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mcp->mb[0] = MBC_READ_SFP;
mcp->mb[1] = dev;
- mcp->mb[2] = MSW(sfp_dma);
- mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[2] = MSW(LSD(sfp_dma));
+ mcp->mb[3] = LSW(LSD(sfp_dma));
mcp->mb[6] = MSW(MSD(sfp_dma));
mcp->mb[7] = LSW(MSD(sfp_dma));
mcp->mb[8] = len;
@@ -4934,8 +5071,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mcp->mb[0] = MBC_WRITE_SFP;
mcp->mb[1] = dev;
- mcp->mb[2] = MSW(sfp_dma);
- mcp->mb[3] = LSW(sfp_dma);
+ mcp->mb[2] = MSW(LSD(sfp_dma));
+ mcp->mb[3] = LSW(LSD(sfp_dma));
mcp->mb[6] = MSW(MSD(sfp_dma));
mcp->mb[7] = LSW(MSD(sfp_dma));
mcp->mb[8] = len;
@@ -5170,10 +5307,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
- IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+ IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
mcp->in_mb |= MBX_1;
- if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+ IS_QLA28XX(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
@@ -5407,6 +5545,15 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1107,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
+ if (mcp->mb[1] != 0x7)
+ ha->link_data_rate = mcp->mb[1];
+
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (mcp->mb[4] & BIT_0)
+ ql_log(ql_log_info, vha, 0x11a2,
+ "FEC=enabled (data rate).\n");
+ }
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
"Done %s.\n", __func__);
if (mcp->mb[1] != 0x7)
@@ -6688,3 +6835,60 @@ int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
return rval;
}
+
+int
+ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval;
+
+ if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
+ __func__, options);
+
+ mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
+ mcp->mb[1] = options;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ if (options & BIT_0) {
+ if (options & BIT_1) {
+ mcp->mb[2] = led[2];
+ mcp->out_mb |= MBX_2;
+ }
+ if (options & BIT_2) {
+ mcp->mb[3] = led[0];
+ mcp->out_mb |= MBX_3;
+ }
+ if (options & BIT_3) {
+ mcp->mb[4] = led[1];
+ mcp->out_mb |= MBX_4;
+ }
+ } else {
+ mcp->in_mb |= MBX_4|MBX_3|MBX_2;
+ }
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval) {
+ ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
+ __func__, rval, mcp->mb[0], mcp->mb[1]);
+ return rval;
+ }
+
+ if (options & BIT_0) {
+ ha->beacon_blink_led = 0;
+ ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
+ } else {
+ led[2] = mcp->mb[2];
+ led[0] = mcp->mb[3];
+ led[1] = mcp->mb[4];
+ ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
+ __func__, led[0], led[1], led[2]);
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 8ae639d089d1..d82e92da529a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -361,6 +361,13 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
+ if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
+ if (atomic_read(&vha->loop_state) == LOOP_READY) {
+ qla24xx_process_purex_list(&vha->purex_list);
+ clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
+ }
+ }
+
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, vha, 0x4016,
"FCPort update scheduled.\n");
@@ -509,6 +516,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
vha->dpc_flags = 0L;
+ ha->dpc_active = 0;
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
/*
* To fix the issue of processing a parent's RSCN for the vport before
@@ -886,7 +896,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->rsp_q_out);
ret = qla25xx_request_irq(ha, qpair, qpair->msix,
- QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
+ ha->flags.disable_msix_handshake ?
+ QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
if (ret)
goto que_failed;
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index cad1fc2a1b28..df99911b8bb9 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -53,10 +53,9 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- if (ha->pdev->error_state > pci_channel_io_frozen) {
+ if (ha->pdev->error_state == pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x115c,
- "error_state is greater than pci_channel_io_frozen, "
- "exiting.\n");
+ "PCI channel failed permanently, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -3136,7 +3135,7 @@ qlafx00_start_scsi(srb_t *sp)
memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
- lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+ lcmd_pkt.handle = make_handle(req->id, sp->handle);
lcmd_pkt.reserved_0 = 0;
lcmd_pkt.port_path_ctrl = 0;
lcmd_pkt.reserved_1 = 0;
@@ -3206,7 +3205,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
- tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ tm_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
@@ -3232,9 +3231,9 @@ qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
abt_iocb.entry_count = 1;
- abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
abt_iocb.abort_handle =
- cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+ cpu_to_le32(make_handle(req->id, fxio->u.abt.cmd_hndl));
abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
abt_iocb.req_que_no = cpu_to_le16(req->id);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index bfcd02fdf2b8..84e2a980dea0 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -413,7 +413,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
clr_ptr = (uint32_t *)cmd_pkt + 2;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7a94e1171c72..d190db5ea7d9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -113,7 +113,8 @@ module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
- "0 - no FDMI. Default is 1 - perform FDMI.");
+ "0 - no FDMI registrations. "
+ "1 - provide FDMI registrations (default).");
#define MAX_Q_DEPTH 64
static int ql2xmaxqdepth = MAX_Q_DEPTH;
@@ -122,11 +123,7 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
"Default is 64.");
-#if (IS_ENABLED(CONFIG_NVME_FC))
-int ql2xenabledif;
-#else
int ql2xenabledif = 2;
-#endif
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF:\n"
@@ -306,6 +303,22 @@ MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
"0 (Default). Based on check.\n"
"1 Force using internal buffers\n");
+int ql2xsmartsan;
+module_param(ql2xsmartsan, int, 0444);
+module_param_named(smartsan, ql2xsmartsan, int, 0444);
+MODULE_PARM_DESC(ql2xsmartsan,
+ "Send SmartSAN Management Attributes for FDMI Registration."
+ " Default is 0 - No SmartSAN registration,"
+ " 1 - Register SmartSAN Management Attributes.");
+
+int ql2xrdpenable;
+module_param(ql2xrdpenable, int, 0444);
+module_param_named(rdpenable, ql2xrdpenable, int, 0444);
+MODULE_PARM_DESC(ql2xrdpenable,
+ "Enables RDP responses. "
+ "0 - no RDP responses (default). "
+ "1 - provide RDP responses.");
+
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
@@ -583,6 +596,9 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
case 3:
speed_str = "8.0GT/s";
break;
+ case 4:
+ speed_str = "16.0GT/s";
+ break;
default:
speed_str = "<unknown>";
break;
@@ -1253,17 +1269,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return SUCCESS;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- if (sp->completed) {
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return SUCCESS;
- }
-
- if (sp->abort || sp->aborted) {
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return FAILED;
- }
-
- sp->abort = 1;
sp->comp = &comp;
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -1688,6 +1693,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
unsigned long *flags)
__releases(qp->qp_lock_ptr)
@@ -1696,10 +1705,13 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
DECLARE_COMPLETION_ONSTACK(comp);
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
int rval;
bool ret_cmd;
uint32_t ratov_j;
+ lockdep_assert_held(qp->qp_lock_ptr);
+
if (qla2x00_chip_is_down(vha)) {
sp->done(sp, res);
return;
@@ -1715,7 +1727,6 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
sp->comp = &comp;
- sp->abort = 1;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
rval = ha->isp_ops->abort_command(sp);
@@ -1739,13 +1750,17 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- if (ret_cmd && (!sp->completed || !sp->aborted))
+ if (ret_cmd && blk_mq_request_started(cmd->request))
sp->done(sp, res);
} else {
sp->done(sp, res);
}
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
@@ -1792,6 +1807,10 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
}
+/*
+ * The caller must ensure that no completion interrupts will happen
+ * while this function is in progress.
+ */
void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{
@@ -2285,7 +2304,7 @@ static struct isp_operations qla81xx_isp_ops = {
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
- .update_fw_options = qla81xx_update_fw_options,
+ .update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
@@ -2402,7 +2421,7 @@ static struct isp_operations qla83xx_isp_ops = {
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
- .update_fw_options = qla81xx_update_fw_options,
+ .update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
@@ -3439,13 +3458,6 @@ skip_dpc:
if (test_bit(UNLOADING, &base_vha->dpc_flags))
return -ENODEV;
- if (ha->flags.detected_lr_sfp) {
- ql_log(ql_log_info, base_vha, 0xffff,
- "Reset chip to pick up LR SFP setting\n");
- set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
- qla2xxx_wake_dpc(base_vha);
- }
-
return 0;
probe_failed:
@@ -3806,6 +3818,20 @@ qla2x00_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static inline void
+qla24xx_free_purex_list(struct purex_list *list)
+{
+ struct list_head *item, *next;
+ ulong flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_for_each_safe(item, next, &list->head) {
+ list_del(item);
+ kfree(list_entry(item, struct purex_item, list));
+ }
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
static void
qla2x00_free_device(scsi_qla_host_t *vha)
{
@@ -3838,6 +3864,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
}
+ qla24xx_free_purex_list(&vha->purex_list);
+
qla2x00_mem_free(ha);
qla82xx_md_free(vha);
@@ -3907,19 +3935,6 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
-/*
- * qla2x00_mark_all_devices_lost
- * Updates fcport state when device goes offline.
- *
- * Input:
- * ha = adapter block pointer.
- * fcport = port structure pointer.
- *
- * Return:
- * None.
- *
- * Context:
- */
void
qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
{
@@ -3931,16 +3946,6 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
qlt_schedule_sess_for_deletion(fcport);
-
- if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
- continue;
-
- /*
- * No point in marking the device as lost, if the device is
- * already DEAD.
- */
- if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
- continue;
}
}
@@ -4811,6 +4816,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->gpnid_list);
INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
+ INIT_LIST_HEAD(&vha->purex_list.head);
+ spin_lock_init(&vha->purex_list.lock);
+
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
@@ -5168,9 +5176,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->n2n_flag = 1;
}
fcport->fw_login_state = 0;
- /*
- * wait link init done before sending login
- */
+
+ schedule_delayed_work(&vha->scan.scan_work, 5);
} else {
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -5731,6 +5738,583 @@ retry_lock:
return;
}
+static bool
+qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
+ struct purex_entry_24xx *purex)
+{
+ char fwstr[16];
+ u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
+ struct port_database_24xx *pdb;
+
+ /* Domain Controller is always logged-out. */
+ /* if RDP request is not from Domain Controller: */
+ if (sid != 0xfffc01)
+ return false;
+
+ ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
+
+ pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
+ if (!pdb) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Failed allocate pdb\n", __func__);
+ } else if (qla24xx_get_port_database(vha, purex->nport_handle, pdb)) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Failed get pdb sid=%x\n", __func__, sid);
+ } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
+ pdb->current_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "%s: Port not logged in sid=%#x\n", __func__, sid);
+ } else {
+ /* RDP request is from logged in port */
+ kfree(pdb);
+ return false;
+ }
+ kfree(pdb);
+
+ vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
+ fwstr[strcspn(fwstr, " ")] = 0;
+ /* if FW version allows RDP response length upto 2048 bytes: */
+ if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
+ return false;
+
+ ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
+
+ /* RDP response length is to be reduced to maximum 256 bytes */
+ return true;
+}
+
+static uint
+qla25xx_rdp_port_speed_capability(struct qla_hw_data *ha)
+{
+ if (IS_CNA_CAPABLE(ha))
+ return RDP_PORT_SPEED_10GB;
+
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ unsigned int speeds = 0;
+
+ if (ha->max_supported_speed == 2) {
+ if (ha->min_supported_speed <= 6)
+ speeds |= RDP_PORT_SPEED_64GB;
+ }
+
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1) {
+ if (ha->min_supported_speed <= 5)
+ speeds |= RDP_PORT_SPEED_32GB;
+ }
+
+ if (ha->max_supported_speed == 2 ||
+ ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 4)
+ speeds |= RDP_PORT_SPEED_16GB;
+ }
+
+ if (ha->max_supported_speed == 1 ||
+ ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 3)
+ speeds |= RDP_PORT_SPEED_8GB;
+ }
+
+ if (ha->max_supported_speed == 0) {
+ if (ha->min_supported_speed <= 2)
+ speeds |= RDP_PORT_SPEED_4GB;
+ }
+
+ return speeds;
+ }
+
+ if (IS_QLA2031(ha))
+ return RDP_PORT_SPEED_16GB|RDP_PORT_SPEED_8GB|
+ RDP_PORT_SPEED_4GB;
+
+ if (IS_QLA25XX(ha))
+ return RDP_PORT_SPEED_8GB|RDP_PORT_SPEED_4GB|
+ RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
+
+ if (IS_QLA24XX_TYPE(ha))
+ return RDP_PORT_SPEED_4GB|RDP_PORT_SPEED_2GB|
+ RDP_PORT_SPEED_1GB;
+
+ if (IS_QLA23XX(ha))
+ return RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
+
+ return RDP_PORT_SPEED_1GB;
+}
+
+static uint
+qla25xx_rdp_port_speed_currently(struct qla_hw_data *ha)
+{
+ switch (ha->link_data_rate) {
+ case PORT_SPEED_1GB:
+ return RDP_PORT_SPEED_1GB;
+
+ case PORT_SPEED_2GB:
+ return RDP_PORT_SPEED_2GB;
+
+ case PORT_SPEED_4GB:
+ return RDP_PORT_SPEED_4GB;
+
+ case PORT_SPEED_8GB:
+ return RDP_PORT_SPEED_8GB;
+
+ case PORT_SPEED_10GB:
+ return RDP_PORT_SPEED_10GB;
+
+ case PORT_SPEED_16GB:
+ return RDP_PORT_SPEED_16GB;
+
+ case PORT_SPEED_32GB:
+ return RDP_PORT_SPEED_32GB;
+
+ case PORT_SPEED_64GB:
+ return RDP_PORT_SPEED_64GB;
+
+ default:
+ return RDP_PORT_SPEED_UNKNOWN;
+ }
+}
+
+/*
+ * Function Name: qla24xx_process_purex_iocb
+ *
+ * Description:
+ * Prepare a RDP response and send to Fabric switch
+ *
+ * PARAMETERS:
+ * vha: SCSI qla host
+ * purex: RDP request received by HBA
+ */
+void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct purex_entry_24xx *purex = pkt;
+ dma_addr_t rsp_els_dma;
+ dma_addr_t rsp_payload_dma;
+ dma_addr_t stat_dma;
+ dma_addr_t bbc_dma;
+ dma_addr_t sfp_dma;
+ struct els_entry_24xx *rsp_els = NULL;
+ struct rdp_rsp_payload *rsp_payload = NULL;
+ struct link_statistics *stat = NULL;
+ struct buffer_credit_24xx *bbc = NULL;
+ uint8_t *sfp = NULL;
+ uint16_t sfp_flags = 0;
+ uint rsp_payload_length = sizeof(*rsp_payload);
+ int rval;
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
+ "%s: Enter\n", __func__);
+
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
+ "-------- ELS REQ -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
+ (void *)purex, sizeof(*purex));
+
+ if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
+ rsp_payload_length =
+ offsetof(typeof(*rsp_payload), optical_elmt_desc);
+ ql_dbg(ql_dbg_init, vha, 0x0181,
+ "Reducing RSP payload length to %u bytes...\n",
+ rsp_payload_length);
+ }
+
+ rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
+ &rsp_els_dma, GFP_KERNEL);
+ if (!rsp_els) {
+ ql_log(ql_log_warn, vha, 0x0183,
+ "Failed allocate dma buffer ELS RSP.\n");
+ goto dealloc;
+ }
+
+ rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
+ &rsp_payload_dma, GFP_KERNEL);
+ if (!rsp_payload) {
+ ql_log(ql_log_warn, vha, 0x0184,
+ "Failed allocate dma buffer ELS RSP payload.\n");
+ goto dealloc;
+ }
+
+ sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
+ &sfp_dma, GFP_KERNEL);
+
+ stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
+ &stat_dma, GFP_KERNEL);
+
+ bbc = dma_alloc_coherent(&ha->pdev->dev, sizeof(*bbc),
+ &bbc_dma, GFP_KERNEL);
+
+ /* Prepare Response IOCB */
+ rsp_els->entry_type = ELS_IOCB_TYPE;
+ rsp_els->entry_count = 1;
+ rsp_els->sys_define = 0;
+ rsp_els->entry_status = 0;
+ rsp_els->handle = 0;
+ rsp_els->nport_handle = purex->nport_handle;
+ rsp_els->tx_dsd_count = 1;
+ rsp_els->vp_index = purex->vp_idx;
+ rsp_els->sof_type = EST_SOFI3;
+ rsp_els->rx_xchg_address = purex->rx_xchg_addr;
+ rsp_els->rx_dsd_count = 0;
+ rsp_els->opcode = purex->els_frame_payload[0];
+
+ rsp_els->d_id[0] = purex->s_id[0];
+ rsp_els->d_id[1] = purex->s_id[1];
+ rsp_els->d_id[2] = purex->s_id[2];
+
+ rsp_els->control_flags = EPD_ELS_ACC;
+ rsp_els->rx_byte_count = 0;
+ rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
+
+ put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
+ rsp_els->tx_len = rsp_els->tx_byte_count;
+
+ rsp_els->rx_address = 0;
+ rsp_els->rx_len = 0;
+
+ /* Prepare Response Payload */
+ rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
+ rsp_payload->hdr.len = cpu_to_be32(
+ rsp_els->tx_byte_count - sizeof(rsp_payload->hdr));
+
+ /* Link service Request Info Descriptor */
+ rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
+ rsp_payload->ls_req_info_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
+ rsp_payload->ls_req_info_desc.req_payload_word_0 =
+ cpu_to_be32p((uint32_t *)purex->els_frame_payload);
+
+ /* Link service Request Info Descriptor 2 */
+ rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
+ rsp_payload->ls_req_info_desc2.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
+ rsp_payload->ls_req_info_desc2.req_payload_word_0 =
+ cpu_to_be32p((uint32_t *)purex->els_frame_payload);
+
+
+ rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
+ rsp_payload->sfp_diag_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
+
+ if (sfp) {
+ /* SFP Flags */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
+ if (!rval) {
+ /* SFP Flags bits 3-0: Port Tx Laser Type */
+ if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
+ sfp_flags |= BIT_0; /* short wave */
+ else if (sfp[0] & BIT_1)
+ sfp_flags |= BIT_1; /* long wave 1310nm */
+ else if (sfp[1] & BIT_4)
+ sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
+ }
+
+ /* SFP Type */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
+ if (!rval) {
+ sfp_flags |= BIT_4; /* optical */
+ if (sfp[0] == 0x3)
+ sfp_flags |= BIT_6; /* sfp+ */
+ }
+
+ rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
+
+ /* SFP Diagnostics */
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
+ if (!rval) {
+ uint16_t *trx = (void *)sfp; /* already be16 */
+ rsp_payload->sfp_diag_desc.temperature = trx[0];
+ rsp_payload->sfp_diag_desc.vcc = trx[1];
+ rsp_payload->sfp_diag_desc.tx_bias = trx[2];
+ rsp_payload->sfp_diag_desc.tx_power = trx[3];
+ rsp_payload->sfp_diag_desc.rx_power = trx[4];
+ }
+ }
+
+ /* Port Speed Descriptor */
+ rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
+ rsp_payload->port_speed_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
+ rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
+ qla25xx_rdp_port_speed_capability(ha));
+ rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
+ qla25xx_rdp_port_speed_currently(ha));
+
+ /* Link Error Status Descriptor */
+ rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
+ rsp_payload->ls_err_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
+
+ if (stat) {
+ rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
+ if (!rval) {
+ rsp_payload->ls_err_desc.link_fail_cnt =
+ cpu_to_be32(stat->link_fail_cnt);
+ rsp_payload->ls_err_desc.loss_sync_cnt =
+ cpu_to_be32(stat->loss_sync_cnt);
+ rsp_payload->ls_err_desc.loss_sig_cnt =
+ cpu_to_be32(stat->loss_sig_cnt);
+ rsp_payload->ls_err_desc.prim_seq_err_cnt =
+ cpu_to_be32(stat->prim_seq_err_cnt);
+ rsp_payload->ls_err_desc.inval_xmit_word_cnt =
+ cpu_to_be32(stat->inval_xmit_word_cnt);
+ rsp_payload->ls_err_desc.inval_crc_cnt =
+ cpu_to_be32(stat->inval_crc_cnt);
+ rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
+ }
+ }
+
+ /* Portname Descriptor */
+ rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
+ rsp_payload->port_name_diag_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
+ memcpy(rsp_payload->port_name_diag_desc.WWNN,
+ vha->node_name,
+ sizeof(rsp_payload->port_name_diag_desc.WWNN));
+ memcpy(rsp_payload->port_name_diag_desc.WWPN,
+ vha->port_name,
+ sizeof(rsp_payload->port_name_diag_desc.WWPN));
+
+ /* F-Port Portname Descriptor */
+ rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
+ rsp_payload->port_name_direct_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
+ memcpy(rsp_payload->port_name_direct_desc.WWNN,
+ vha->fabric_node_name,
+ sizeof(rsp_payload->port_name_direct_desc.WWNN));
+ memcpy(rsp_payload->port_name_direct_desc.WWPN,
+ vha->fabric_port_name,
+ sizeof(rsp_payload->port_name_direct_desc.WWPN));
+
+ /* Bufer Credit Descriptor */
+ rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
+ rsp_payload->buffer_credit_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
+ rsp_payload->buffer_credit_desc.fcport_b2b = 0;
+ rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
+ rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
+
+ if (bbc) {
+ memset(bbc, 0, sizeof(*bbc));
+ rval = qla24xx_get_buffer_credits(vha, bbc, bbc_dma);
+ if (!rval) {
+ rsp_payload->buffer_credit_desc.fcport_b2b =
+ cpu_to_be32(LSW(bbc->parameter[0]));
+ }
+ }
+
+ if (rsp_payload_length < sizeof(*rsp_payload))
+ goto send;
+
+ /* Optical Element Descriptor, Temperature */
+ rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[0].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Voltage */
+ rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[1].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Tx Bias Current */
+ rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[2].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Tx Power */
+ rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[3].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+ /* Optical Element Descriptor, Rx Power */
+ rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
+ rsp_payload->optical_elmt_desc[4].desc_len =
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
+
+ if (sfp) {
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
+ if (!rval) {
+ uint16_t *trx = (void *)sfp; /* already be16 */
+
+ /* Optical Element Descriptor, Temperature */
+ rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
+ rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
+ rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
+ rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
+ rsp_payload->optical_elmt_desc[0].element_flags =
+ cpu_to_be32(1 << 28);
+
+ /* Optical Element Descriptor, Voltage */
+ rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
+ rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
+ rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
+ rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
+ rsp_payload->optical_elmt_desc[1].element_flags =
+ cpu_to_be32(2 << 28);
+
+ /* Optical Element Descriptor, Tx Bias Current */
+ rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
+ rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
+ rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
+ rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
+ rsp_payload->optical_elmt_desc[2].element_flags =
+ cpu_to_be32(3 << 28);
+
+ /* Optical Element Descriptor, Tx Power */
+ rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
+ rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
+ rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
+ rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
+ rsp_payload->optical_elmt_desc[3].element_flags =
+ cpu_to_be32(4 << 28);
+
+ /* Optical Element Descriptor, Rx Power */
+ rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
+ rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
+ rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
+ rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
+ rsp_payload->optical_elmt_desc[4].element_flags =
+ cpu_to_be32(5 << 28);
+ }
+
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
+ if (!rval) {
+ /* Temperature high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[0].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 7 & 1) << 3 |
+ (sfp[0] >> 6 & 1) << 2 |
+ (sfp[4] >> 7 & 1) << 1 |
+ (sfp[4] >> 6 & 1) << 0);
+
+ /* Voltage high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[1].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 5 & 1) << 3 |
+ (sfp[0] >> 4 & 1) << 2 |
+ (sfp[4] >> 5 & 1) << 1 |
+ (sfp[4] >> 4 & 1) << 0);
+
+ /* Tx Bias Current high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[2].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 3 & 1) << 3 |
+ (sfp[0] >> 2 & 1) << 2 |
+ (sfp[4] >> 3 & 1) << 1 |
+ (sfp[4] >> 2 & 1) << 0);
+
+ /* Tx Power high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[3].element_flags |=
+ cpu_to_be32(
+ (sfp[0] >> 1 & 1) << 3 |
+ (sfp[0] >> 0 & 1) << 2 |
+ (sfp[4] >> 1 & 1) << 1 |
+ (sfp[4] >> 0 & 1) << 0);
+
+ /* Rx Power high/low alarm/warning */
+ rsp_payload->optical_elmt_desc[4].element_flags |=
+ cpu_to_be32(
+ (sfp[1] >> 7 & 1) << 3 |
+ (sfp[1] >> 6 & 1) << 2 |
+ (sfp[5] >> 7 & 1) << 1 |
+ (sfp[5] >> 6 & 1) << 0);
+ }
+ }
+
+ /* Optical Product Data Descriptor */
+ rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
+ rsp_payload->optical_prod_desc.desc_len =
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
+
+ if (sfp) {
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
+ if (!rval) {
+ memcpy(rsp_payload->optical_prod_desc.vendor_name,
+ sfp + 0,
+ sizeof(rsp_payload->optical_prod_desc.vendor_name));
+ memcpy(rsp_payload->optical_prod_desc.part_number,
+ sfp + 20,
+ sizeof(rsp_payload->optical_prod_desc.part_number));
+ memcpy(rsp_payload->optical_prod_desc.revision,
+ sfp + 36,
+ sizeof(rsp_payload->optical_prod_desc.revision));
+ memcpy(rsp_payload->optical_prod_desc.serial_number,
+ sfp + 48,
+ sizeof(rsp_payload->optical_prod_desc.serial_number));
+ }
+
+ memset(sfp, 0, SFP_RTDI_LEN);
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
+ if (!rval) {
+ memcpy(rsp_payload->optical_prod_desc.date,
+ sfp + 0,
+ sizeof(rsp_payload->optical_prod_desc.date));
+ }
+ }
+
+send:
+ ql_dbg(ql_dbg_init, vha, 0x0183,
+ "Sending ELS Response to RDP Request...\n");
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
+ "-------- ELS RSP -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
+ (void *)rsp_els, sizeof(*rsp_els));
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
+ "-------- ELS RSP PAYLOAD -------\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
+ (void *)rsp_payload, rsp_payload_length);
+
+ rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0188,
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
+ } else if (rsp_els->comp_status) {
+ ql_log(ql_log_warn, vha, 0x0189,
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
+ __func__, rsp_els->comp_status,
+ rsp_els->error_subcode_1, rsp_els->error_subcode_2);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
+ }
+
+dealloc:
+ if (bbc)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*bbc),
+ bbc, bbc_dma);
+ if (stat)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
+ stat, stat_dma);
+ if (sfp)
+ dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
+ sfp, sfp_dma);
+ if (rsp_payload)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
+ rsp_payload, rsp_payload_dma);
+ if (rsp_els)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
+ rsp_els, rsp_els_dma);
+}
+
+void qla24xx_process_purex_list(struct purex_list *list)
+{
+ struct list_head head = LIST_HEAD_INIT(head);
+ struct purex_item *item, *next;
+ ulong flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ list_splice_init(&list->head, &head);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ list_for_each_entry_safe(item, next, &head, list) {
+ list_del(&item->list);
+ item->process_item(item->vha, &item->iocb);
+ kfree(item);
+ }
+}
+
void
qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
@@ -6254,13 +6838,14 @@ qla2x00_do_dpc(void *data)
}
if (test_and_clear_bit(DETECT_SFP_CHANGE,
- &base_vha->dpc_flags) &&
- !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) {
- qla24xx_detect_sfp(base_vha);
-
- if (ha->flags.detected_lr_sfp !=
- ha->flags.using_lr_setting)
- set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+ &base_vha->dpc_flags)) {
+ /* Semantic:
+ * - NO-OP -- await next ISP-ABORT. Preferred method
+ * to minimize disruptions that will occur
+ * when a forced chip-reset occurs.
+ * - Force -- ISP-ABORT scheduled.
+ */
+ /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
}
if (test_and_clear_bit
@@ -6301,6 +6886,15 @@ qla2x00_do_dpc(void *data)
}
}
+ if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
+ if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
+ qla24xx_process_purex_list
+ (&base_vha->purex_list);
+ clear_bit(PROCESS_PUREX_IOCB,
+ &base_vha->dpc_flags);
+ }
+ }
+
if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
&base_vha->dpc_flags)) {
qla2x00_update_fcports(base_vha);
@@ -6692,7 +7286,8 @@ qla2x00_timer(struct timer_list *t)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+ test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
"fcport_update_needed=%d start_dpc=%d "
@@ -6705,12 +7300,13 @@ qla2x00_timer(struct timer_list *t)
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
- "relogin_needed=%d.\n",
+ "relogin_needed=%d, Process_purex_iocb=%d.\n",
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+ test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 76a38bf86cbc..3da79ee1d88e 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2683,7 +2683,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t sec_mask, rest_addr, fdata;
void *optrom = NULL;
dma_addr_t optrom_dma;
- int rval;
+ int rval, ret;
struct secure_flash_update_block *sfub;
dma_addr_t sfub_dma;
uint32_t offset = faddr << 2;
@@ -2939,11 +2939,12 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
write_protect:
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
"Protect flash...\n");
- rval = qla24xx_protect_flash(vha);
- if (rval) {
+ ret = qla24xx_protect_flash(vha);
+ if (ret) {
qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
ql_log(ql_log_warn, vha, 0x7099,
"Failed protect flash\n");
+ rval = QLA_COMMAND_ERROR;
}
if (reset_to_rom == true) {
@@ -2951,10 +2952,12 @@ write_protect:
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- rval = qla2x00_wait_for_hba_online(vha);
- if (rval != QLA_SUCCESS)
+ ret = qla2x00_wait_for_hba_online(vha);
+ if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0xffff,
"Adapter did not come out of reset\n");
+ rval = QLA_COMMAND_ERROR;
+ }
}
done:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 70081b395fb2..622e7337affc 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -27,8 +27,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
-#include <target/target_core_base.h>
-#include <target/target_core_fabric.h>
#include "qla_def.h"
#include "qla_target.h"
@@ -1760,7 +1758,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
}
- resp->handle = MAKE_HANDLE(qpair->req->id, h);
+ resp->handle = make_handle(qpair->req->id, h);
resp->entry_type = ABTS_RESP_24XX;
resp->entry_count = 1;
resp->nport_handle = abts->nport_handle;
@@ -2582,7 +2580,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
} else
qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle = make_handle(qpair->req->id, h);
pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
@@ -3095,7 +3093,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
} else
qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle = make_handle(qpair->req->id, h);
pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
@@ -3816,7 +3814,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
return;
}
cmd->jiffies_at_free = get_jiffies_64();
- target_free_tag(sess->se_sess, &cmd->se_cmd);
+ cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
}
EXPORT_SYMBOL(qlt_free_cmd);
@@ -4150,7 +4148,7 @@ out_term:
qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
- target_free_tag(sess->se_sess, &cmd->se_cmd);
+ cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
ha->tgt.tgt_ops->put_sess(sess);
@@ -4277,24 +4275,18 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct fc_port *sess,
struct atio_from_isp *atio)
{
- struct se_session *se_sess = sess->se_sess;
struct qla_tgt_cmd *cmd;
- int tag, cpu;
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
- if (tag < 0)
+ cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
+ if (!cmd)
return NULL;
- cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
- memset(cmd, 0, sizeof(struct qla_tgt_cmd));
cmd->cmd_type = TYPE_TGT_CMD;
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
qlt_incr_num_pend_cmds(vha);
cmd->vha = vha;
- cmd->se_cmd.map_tag = tag;
- cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
@@ -4747,11 +4739,11 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
qla24xx_post_newsess_work(vha, &port_id,
iocb->u.isp24.port_name,
iocb->u.isp24.u.plogi.node_name,
- pla, FC4_TYPE_UNKNOWN);
+ pla, 0);
else
qla24xx_post_newsess_work(vha, &port_id,
iocb->u.isp24.port_name, NULL,
- pla, FC4_TYPE_UNKNOWN);
+ pla, 0);
goto out;
}
@@ -5352,9 +5344,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
- struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
- int tag, cpu;
unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
@@ -5384,10 +5374,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
if (!sess)
return;
- se_sess = sess->se_sess;
-
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
- if (tag < 0) {
+ cmd = ha->tgt.tgt_ops->get_cmd(sess);
+ if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3009,
"qla_target(%d): %s: Allocation of cmd failed\n",
vha->vp_idx, __func__);
@@ -5402,9 +5390,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
return;
}
- cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
- memset(cmd, 0, sizeof(struct qla_tgt_cmd));
-
qlt_incr_num_pend_cmds(vha);
INIT_LIST_HEAD(&cmd->cmd_list);
memcpy(&cmd->atio, atio, sizeof(*atio));
@@ -5414,7 +5399,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
cmd->qpair = ha->base_qpair;
- cmd->se_cmd.map_cpu = cpu;
if (qfull) {
cmd->q_full = 1;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 6539499e9e95..3cf8590feeac 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -671,6 +671,8 @@ struct qla_tgt_func_tmpl {
void (*handle_data)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t);
+ struct qla_tgt_cmd *(*get_cmd)(struct fc_port *);
+ void (*rel_cmd)(struct qla_tgt_cmd *);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
void (*free_session)(struct fc_port *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 5b0c057def2b..6aeb1c3fb7a8 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -870,7 +870,7 @@ bailout:
static void
qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
{
- tmp->capture_timestamp = jiffies;
+ tmp->capture_timestamp = cpu_to_le32(jiffies);
}
static void
@@ -882,9 +882,10 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
"%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
v+0, v+1, v+2, v+3, v+4, v+5) != 6);
- tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
- tmp->driver_info[1] = v[5] << 8 | v[4];
- tmp->driver_info[2] = 0x12345678;
+ tmp->driver_info[0] = cpu_to_le32(
+ v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
+ tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]);
+ tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678);
}
static void
@@ -894,10 +895,10 @@ qla27xx_firmware_info(struct scsi_qla_host *vha,
tmp->firmware_version[0] = vha->hw->fw_major_version;
tmp->firmware_version[1] = vha->hw->fw_minor_version;
tmp->firmware_version[2] = vha->hw->fw_subminor_version;
- tmp->firmware_version[3] =
- vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
- tmp->firmware_version[4] =
- vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
+ tmp->firmware_version[3] = cpu_to_le32(
+ vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
+ tmp->firmware_version[4] = cpu_to_le32(
+ vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]);
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index d2a0014e8b21..bba8dc90acfb 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -18,11 +18,11 @@ struct __packed qla27xx_fwdt_template {
__le32 entry_count;
uint32_t template_version;
- uint32_t capture_timestamp;
+ __le32 capture_timestamp;
uint32_t template_checksum;
uint32_t reserved_2;
- uint32_t driver_info[3];
+ __le32 driver_info[3];
uint32_t saved_state[16];
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index bb03c022e023..8ccd9ba1ddef 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.22-k"
+#define QLA2XXX_VERSION "10.01.00.25-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index abe7f79bb789..1f0a185b2a95 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -268,6 +268,29 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
+static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct qla_tgt_cmd *cmd;
+ int tag, cpu;
+
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ return NULL;
+
+ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+ cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
+
+ return cmd;
+}
+
+static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd)
+{
+ target_free_tag(cmd->sess->se_sess, &cmd->se_cmd);
+}
+
/*
* Called from qla_target_template->free_cmd(), and will call
* tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
@@ -1549,6 +1572,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr,
+ .get_cmd = tcm_qla2xxx_get_cmd,
+ .rel_cmd = tcm_qla2xxx_rel_cmd,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
.free_session = tcm_qla2xxx_free_session,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 930e4803d888..56c24a73e0c7 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -94,20 +94,6 @@ EXPORT_SYMBOL(scsi_logging_level);
ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
EXPORT_SYMBOL(scsi_sd_pm_domain);
-/**
- * scsi_put_command - Free a scsi command block
- * @cmd: command block to free
- *
- * Returns: Nothing.
- *
- * Notes: The command must not belong to any lists.
- */
-void scsi_put_command(struct scsi_cmnd *cmd)
-{
- scsi_del_cmd_from_list(cmd);
- BUG_ON(delayed_work_pending(&cmd->abort_work));
-}
-
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd)
{
@@ -764,10 +750,6 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
-/* This should go away in the future, it doesn't do anything anymore */
-bool scsi_use_blk_mq = true;
-module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
-
static int __init init_scsi(void)
{
int error;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ae2fa170f6ad..978be1602f71 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2412,7 +2412,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
wake_up(&shost->host_wait);
scsi_run_host_queues(shost);
- scsi_put_command(scmd);
kfree(rq);
out_put_autopm_host:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 610ee41fa54c..47835c4b4ee0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -562,7 +562,6 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
- scsi_del_cmd_from_list(cmd);
}
/* Returns false when no more bytes to process, true if there are more */
@@ -1098,36 +1097,7 @@ static void scsi_cleanup_rq(struct request *rq)
}
}
-/* Add a command to the list used by the aacraid and dpt_i2o drivers */
-void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
-{
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- unsigned long flags;
-
- if (shost->use_cmd_list) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_add_tail(&cmd->list, &sdev->cmd_list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
-}
-
-/* Remove a command from the list used by the aacraid and dpt_i2o drivers */
-void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
-{
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- unsigned long flags;
-
- if (shost->use_cmd_list) {
- spin_lock_irqsave(&sdev->list_lock, flags);
- BUG_ON(list_empty(&cmd->list));
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
-}
-
-/* Called after a request has been started. */
+/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
void *buf = cmd->sense_buffer;
@@ -1135,7 +1105,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
struct request *rq = blk_mq_rq_from_pdu(cmd);
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
- int retries;
+ int retries, to_clear;
bool in_flight;
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
@@ -1146,9 +1116,15 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
jiffies_at_alloc = cmd->jiffies_at_alloc;
retries = cmd->retries;
in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /* zero out the cmd, except for the embedded scsi_request */
- memset((char *)cmd + sizeof(cmd->req), 0,
- sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
+ /*
+ * Zero out the cmd, except for the embedded scsi_request. Only clear
+ * the driver-private command data if the LLD does not supply a
+ * function to initialize that data.
+ */
+ to_clear = sizeof(*cmd) - sizeof(cmd->req);
+ if (!dev->host->hostt->init_cmd_priv)
+ to_clear += dev->host->hostt->cmd_size;
+ memset((char *)cmd + sizeof(cmd->req), 0, to_clear);
cmd->device = dev;
cmd->sense_buffer = buf;
@@ -1160,7 +1136,6 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
if (in_flight)
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- scsi_add_cmd_to_list(cmd);
}
static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
@@ -1240,8 +1215,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
* commands. The device must be brought online
* before trying any recovery commands.
*/
- sdev_printk(KERN_ERR, sdev,
- "rejecting I/O to offline device\n");
+ if (!sdev->offline_already) {
+ sdev->offline_already = true;
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to offline device\n");
+ }
return BLK_STS_IOERR;
case SDEV_DEL:
/*
@@ -1742,6 +1720,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
struct scatterlist *sg;
+ int ret = 0;
if (unchecked_isa_dma)
cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
@@ -1757,14 +1736,24 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
}
- return 0;
+ if (shost->hostt->init_cmd_priv) {
+ ret = shost->hostt->init_cmd_priv(shost, cmd);
+ if (ret < 0)
+ scsi_free_sense_buffer(unchecked_isa_dma,
+ cmd->sense_buffer);
+ }
+
+ return ret;
}
static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
+ struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ if (shost->hostt->exit_cmd_priv)
+ shost->hostt->exit_cmd_priv(shost, cmd);
scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
cmd->sense_buffer);
}
@@ -2340,6 +2329,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
break;
}
+ sdev->offline_already = false;
sdev->sdev_state = state;
return 0;
@@ -2845,6 +2835,36 @@ scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
}
EXPORT_SYMBOL_GPL(scsi_target_unblock);
+int
+scsi_host_block(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+ int ret = 0;
+
+ shost_for_each_device(sdev, shost) {
+ ret = scsi_internal_device_block(sdev);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_host_block);
+
+int
+scsi_host_unblock(struct Scsi_Host *shost, int new_state)
+{
+ struct scsi_device *sdev;
+ int ret = 0;
+
+ shost_for_each_device(sdev, shost) {
+ ret = scsi_internal_device_unblock(sdev, new_state);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_host_unblock);
+
/**
* scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
* @sgl: scatter-gather list
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3bff9f7aa684..22b6585e28b4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -29,7 +29,6 @@ extern int scsi_init_hosts(void);
extern void scsi_exit_hosts(void);
/* scsi.c */
-extern bool scsi_use_blk_mq;
int scsi_init_sense_cache(struct Scsi_Host *shost);
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
@@ -84,8 +83,6 @@ int scsi_eh_get_sense(struct list_head *work_q,
int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */
-extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
-extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 058079f915f1..f2437a7570ce 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -236,7 +236,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sdev_state = SDEV_CREATED;
INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings);
- INIT_LIST_HEAD(&sdev->cmd_list);
INIT_LIST_HEAD(&sdev->starved_entry);
INIT_LIST_HEAD(&sdev->event_list);
spin_lock_init(&sdev->list_lock);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 677b5c5403d2..163dbcb741c1 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -856,7 +856,7 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \
struct bin_attribute *bin_attr, \
char *buf, loff_t off, size_t count) \
{ \
- struct device *dev = container_of(kobj, struct device, kobj); \
+ struct device *dev = kobj_to_dev(kobj); \
struct scsi_device *sdev = to_scsi_device(dev); \
struct scsi_vpd *vpd_page; \
int ret = -EINVAL; \
@@ -884,7 +884,7 @@ static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
if (!sdev->inquiry)
@@ -1045,14 +1045,14 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
name = sdev_bflags_name[i];
if (name)
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%s%s", len ? " " : "", name);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%s%s", len ? " " : "", name);
else
- len += snprintf(buf + len, PAGE_SIZE - len,
- "%sINVALID_BIT(%d)", len ? " " : "", i);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%sINVALID_BIT(%d)", len ? " " : "", i);
}
if (len)
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL);
@@ -1181,7 +1181,7 @@ static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
@@ -1207,7 +1207,7 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
struct bin_attribute *attr, int i)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct scsi_device *sdev = to_scsi_device(dev);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index ac35c301c792..41a950075913 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -18,11 +18,9 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- u32 lba = 0, txlen;
+ u32 lba, txlen;
- lba |= ((cdb[1] & 0x1F) << 16);
- lba |= (cdb[2] << 8);
- lba |= cdb[3];
+ lba = get_unaligned_be24(&cdb[1]) & 0x1fffff;
/*
* From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be read (READ(6)) or written (WRITE(6)).
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index dfc726fa34e3..0ec1b31c75a9 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,9 +86,17 @@ struct iscsi_internal {
struct transport_container session_cont;
};
+/* Worker to perform connection failure on unresponsive connections
+ * completely in kernel space.
+ */
+static void stop_conn_work_fn(struct work_struct *work);
+static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
+
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
+static struct workqueue_struct *iscsi_destroy_workq;
+
static DEFINE_IDA(iscsi_sess_ida);
/*
* list of registered transports and lock that must
@@ -1609,8 +1617,10 @@ static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
static LIST_HEAD(sesslist);
+static LIST_HEAD(sessdestroylist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
+static LIST_HEAD(connlist_err);
static DEFINE_SPINLOCK(connlock);
static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn)
@@ -2028,6 +2038,14 @@ static void __iscsi_unbind_session(struct work_struct *work)
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}
+static void __iscsi_destroy_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, destroy_work);
+
+ session->transport->destroy_session(session);
+}
+
struct iscsi_cls_session *
iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
int dd_size)
@@ -2050,6 +2068,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
INIT_WORK(&session->block_work, __iscsi_block_session);
INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
INIT_WORK(&session->scan_work, iscsi_scan_session);
+ INIT_WORK(&session->destroy_work, __iscsi_destroy_session);
spin_lock_init(&session->lock);
/* this is released in the dev's release function */
@@ -2254,8 +2273,10 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_list_err);
conn->transport = transport;
conn->cid = cid;
+ conn->state = ISCSI_CONN_DOWN;
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@@ -2307,6 +2328,7 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
spin_lock_irqsave(&connlock, flags);
list_del(&conn->conn_list);
+ list_del(&conn->conn_list_err);
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
@@ -2421,6 +2443,51 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+static void stop_conn_work_fn(struct work_struct *work)
+{
+ struct iscsi_cls_conn *conn, *tmp;
+ unsigned long flags;
+ LIST_HEAD(recovery_list);
+
+ spin_lock_irqsave(&connlock, flags);
+ if (list_empty(&connlist_err)) {
+ spin_unlock_irqrestore(&connlock, flags);
+ return;
+ }
+ list_splice_init(&connlist_err, &recovery_list);
+ spin_unlock_irqrestore(&connlock, flags);
+
+ list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
+ uint32_t sid = iscsi_conn_get_sid(conn);
+ struct iscsi_cls_session *session;
+
+ mutex_lock(&rx_queue_mutex);
+
+ session = iscsi_session_lookup(sid);
+ if (session) {
+ if (system_state != SYSTEM_RUNNING) {
+ session->recovery_tmo = 0;
+ conn->transport->stop_conn(conn,
+ STOP_CONN_TERM);
+ } else {
+ conn->transport->stop_conn(conn,
+ STOP_CONN_RECOVER);
+ }
+ }
+
+ list_del_init(&conn->conn_list_err);
+
+ mutex_unlock(&rx_queue_mutex);
+
+ /* we don't want to hold rx_queue_mutex for too long,
+ * for instance if many conns failed at the same time,
+ * since this stall other iscsi maintenance operations.
+ * Give other users a chance to proceed.
+ */
+ cond_resched();
+ }
+}
+
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
@@ -2428,6 +2495,12 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
+ unsigned long flags;
+
+ spin_lock_irqsave(&connlock, flags);
+ list_add(&conn->conn_list_err, &connlist_err);
+ spin_unlock_irqrestore(&connlock, flags);
+ queue_work(system_unbound_wq, &stop_conn_work);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2757,11 +2830,19 @@ static int
iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
+ unsigned long flags;
conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
if (!conn)
return -EINVAL;
+ spin_lock_irqsave(&connlock, flags);
+ if (!list_empty(&conn->conn_list_err)) {
+ spin_unlock_irqrestore(&connlock, flags);
+ return -EAGAIN;
+ }
+ spin_unlock_irqrestore(&connlock, flags);
+
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
if (transport->destroy_conn)
transport->destroy_conn(conn);
@@ -3563,6 +3644,23 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
else
transport->destroy_session(session);
break;
+ case ISCSI_UEVENT_DESTROY_SESSION_ASYNC:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+ if (!session)
+ err = -EINVAL;
+ else if (iscsi_session_has_conns(ev->u.d_session.sid))
+ err = -EBUSY;
+ else {
+ unsigned long flags;
+
+ /* Prevent this session from being found again */
+ spin_lock_irqsave(&sesslock, flags);
+ list_move(&session->sess_list, &sessdestroylist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ queue_work(iscsi_destroy_workq, &session->destroy_work);
+ }
+ break;
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
if (session)
@@ -3612,8 +3710,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
case ISCSI_UEVENT_START_CONN:
conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
- if (conn)
+ if (conn) {
ev->r.retcode = transport->start_conn(conn);
+ if (!ev->r.retcode)
+ conn->state = ISCSI_CONN_UP;
+ }
else
err = -EINVAL;
break;
@@ -3810,6 +3911,26 @@ iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
+static const char *const connection_state_names[] = {
+ [ISCSI_CONN_UP] = "up",
+ [ISCSI_CONN_DOWN] = "down",
+ [ISCSI_CONN_FAILED] = "failed"
+};
+
+static ssize_t show_conn_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
+ const char *state = "unknown";
+
+ if (conn->state >= 0 &&
+ conn->state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn->state];
+
+ return sprintf(buf, "%s\n", state);
+}
+static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state,
+ NULL);
#define iscsi_conn_ep_attr_show(param) \
static ssize_t show_conn_ep_param_##param(struct device *dev, \
@@ -3879,6 +4000,7 @@ static struct attribute *iscsi_conn_attrs[] = {
&dev_attr_conn_tcp_xmit_wsf.attr,
&dev_attr_conn_tcp_recv_wsf.attr,
&dev_attr_conn_local_ipaddr.attr,
+ &dev_attr_conn_state.attr,
NULL,
};
@@ -3950,6 +4072,8 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_TCP_RECV_WSF;
else if (attr == &dev_attr_conn_local_ipaddr.attr)
param = ISCSI_PARAM_LOCAL_IPADDR;
+ else if (attr == &dev_attr_conn_state.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid conn attr");
return 0;
@@ -4608,8 +4732,16 @@ static __init int iscsi_transport_init(void)
goto release_nls;
}
+ iscsi_destroy_workq = create_singlethread_workqueue("iscsi_destroy");
+ if (!iscsi_destroy_workq) {
+ err = -ENOMEM;
+ goto destroy_wq;
+ }
+
return 0;
+destroy_wq:
+ destroy_workqueue(iscsi_eh_timer_workq);
release_nls:
netlink_kernel_release(nls);
unregister_flashnode_bus:
@@ -4631,6 +4763,7 @@ unregister_transport_class:
static void __exit iscsi_transport_exit(void)
{
+ destroy_workqueue(iscsi_destroy_workq);
destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index bc6506884e3b..d3311c014863 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -53,4 +53,4 @@ config SCSI_SMARTPQI
Note: the aacraid driver will not manage a smartpqi
controller. You need to enable smartpqi for smartpqi
controllers. For more information, please see
- Documentation/scsi/smartpqi.txt
+ Documentation/scsi/smartpqi.rst
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b7492568e02f..cd157f11eb22 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1614,28 +1614,28 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
"%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
if (device->target_lun_valid)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"%d:%d",
device->target,
device->lun);
else
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"-:-");
if (pqi_is_logical_device(device))
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" %08x%08x",
*((u32 *)&device->scsi3addr),
*((u32 *)&device->scsi3addr[4]));
else
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" %016llx", device->sas_address);
- count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
+ count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
" %s %.8s %.16s ",
pqi_device_type(device),
device->vendor,
@@ -1643,19 +1643,19 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
if (pqi_is_logical_device(device)) {
if (device->devtype == TYPE_DISK)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"SSDSmartPathCap%c En%c %-12s",
device->raid_bypass_configured ? '+' : '-',
device->raid_bypass_enabled ? '+' : '-',
pqi_raid_level_to_string(device->raid_level));
} else {
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"AIO%c", device->aio_enabled ? '+' : '-');
if (device->devtype == TYPE_DISK ||
device->devtype == TYPE_ZBC)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" qd=%-6d", device->queue_depth);
}
@@ -6191,14 +6191,14 @@ static ssize_t pqi_lockup_action_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
if (pqi_lockup_actions[i].action == pqi_lockup_action)
- count += snprintf(buffer + count, PAGE_SIZE - count,
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
"[%s] ", pqi_lockup_actions[i].name);
else
- count += snprintf(buffer + count, PAGE_SIZE - count,
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
"%s ", pqi_lockup_actions[i].name);
}
- count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
+ count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
return count;
}
diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h
index d81b4f0ceaaa..0e0fa38f8d90 100644
--- a/drivers/scsi/snic/vnic_devcmd.h
+++ b/drivers/scsi/snic/vnic_devcmd.h
@@ -208,7 +208,7 @@ struct vnic_devcmd_notify {
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
- u8 data[0];
+ u8 data[];
};
/*
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e4240e4ae8bb..1c270e6034d5 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -79,7 +79,6 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \
CDC_MRW|CDC_MRW_W|CDC_RAM)
-static DEFINE_MUTEX(sr_mutex);
static int sr_probe(struct device *);
static int sr_remove(struct device *);
static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt);
@@ -536,9 +535,9 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
scsi_autopm_get_device(sdev);
check_disk_change(bdev);
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
ret = cdrom_open(&cd->cdi, bdev, mode);
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
scsi_autopm_put_device(sdev);
if (ret)
@@ -551,10 +550,10 @@ out:
static void sr_block_release(struct gendisk *disk, fmode_t mode)
{
struct scsi_cd *cd = scsi_cd(disk);
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
cdrom_release(&cd->cdi, mode);
scsi_cd_put(cd);
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
@@ -565,7 +564,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
void __user *argp = (void __user *)arg;
int ret;
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
(mode & FMODE_NDELAY) != 0);
@@ -595,7 +594,7 @@ put:
scsi_autopm_put_device(sdev);
out:
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
return ret;
}
@@ -608,7 +607,7 @@ static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsign
void __user *argp = compat_ptr(arg);
int ret;
- mutex_lock(&sr_mutex);
+ mutex_lock(&cd->lock);
ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
(mode & FMODE_NDELAY) != 0);
@@ -638,7 +637,7 @@ put:
scsi_autopm_put_device(sdev);
out:
- mutex_unlock(&sr_mutex);
+ mutex_unlock(&cd->lock);
return ret;
}
@@ -745,6 +744,7 @@ static int sr_probe(struct device *dev)
disk = alloc_disk(1);
if (!disk)
goto fail_free;
+ mutex_init(&cd->lock);
spin_lock(&sr_index_lock);
minor = find_first_zero_bit(sr_index_bits, SR_DISKS);
@@ -1055,6 +1055,8 @@ static void sr_kref_release(struct kref *kref)
put_disk(disk);
+ mutex_destroy(&cd->lock);
+
kfree(cd);
}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index a2bb7b8bace5..339c624e04d8 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -20,6 +20,7 @@
#include <linux/genhd.h>
#include <linux/kref.h>
+#include <linux/mutex.h>
#define MAX_RETRIES 3
#define SR_TIMEOUT (30 * HZ)
@@ -51,6 +52,7 @@ typedef struct scsi_cd {
bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */
struct cdrom_device_info cdi;
+ struct mutex lock;
/* We hold gendisk and scsi_device references on probe and use
* the refs on this kref to decide when to release them */
struct kref kref;
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 17a56c87d383..1f988a1b9166 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -67,9 +67,6 @@
void sr_vendor_init(Scsi_CD *cd)
{
-#ifndef CONFIG_BLK_DEV_SR_VENDOR
- cd->vendor = VENDOR_SCSI3;
-#else
const char *vendor = cd->device->vendor;
const char *model = cd->device->model;
@@ -118,7 +115,6 @@ void sr_vendor_init(Scsi_CD *cd)
CDC_PLAY_AUDIO
);
}
-#endif
}
@@ -132,10 +128,8 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
struct ccs_modesel_head *modesel;
int rc, density = 0;
-#ifdef CONFIG_BLK_DEV_SR_VENDOR
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
-#endif
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
@@ -223,7 +217,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
}
break;
-#ifdef CONFIG_BLK_DEV_SR_VENDOR
case VENDOR_NEC:{
unsigned long min, sec, frame;
cgc.cmd[0] = 0xde;
@@ -316,7 +309,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
sector = buffer[11] + (buffer[10] << 8) +
(buffer[9] << 16) + (buffer[8] << 24);
break;
-#endif /* CONFIG_BLK_DEV_SR_VENDOR */
default:
/* should not happen */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 393f3019ccac..c5f9b348b438 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
- file Documentation/scsi/st.txt for more information.
+ file Documentation/scsi/st.rst for more information.
History:
Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara.
@@ -45,6 +45,7 @@ static const char *verstr = "20160209";
#include <linux/uaccess.h>
#include <asm/dma.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
@@ -2680,8 +2681,7 @@ static void deb_space_print(struct scsi_tape *STp, int direction, char *units, u
if (!debugging)
return;
- sc = cmd[2] & 0x80 ? 0xff000000 : 0;
- sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
+ sc = sign_extend32(get_unaligned_be24(&cmd[2]), 23);
if (direction)
sc = -sc;
st_printk(ST_DEB_MSG, STp, "Spacing tape %s over %d %s.\n",
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 33287b6bdf0e..d4f10c0d813c 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -236,7 +236,7 @@ struct req_msg {
u8 data_dir;
u8 payload_sz; /* payload size in 4-byte, not used */
u8 cdb[STEX_CDB_LENGTH];
- u32 variable[0];
+ u32 variable[];
};
struct status_msg {
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index d14c2243e02a..e2005aeddc2d 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -46,7 +46,7 @@ config SCSI_UFSHCD
The module will be called ufshcd.
To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/ufs.txt>.
+ <file:Documentation/scsi/ufs.rst>.
However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a UFS device.
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 56a6a1ed5ec2..da065a259f6e 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -192,7 +192,7 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
* and device TX LCC are disabled once link startup is
* completed.
*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+ ufshcd_disable_host_tx_lcc(hba);
/*
* Disabling Autohibern8 feature in cadence UFS
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 5d6487350a6c..074a6a055a4c 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -235,7 +235,7 @@ static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
/* Unipro PA_Local_TX_LCC_Enable */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
+ ufshcd_disable_host_tx_lcc(hba);
/* close Unipro VS_Mk2ExtnSupport */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 53eae5fe2ade..40a66b31b31f 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -66,6 +66,21 @@ static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
}
}
+static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (status == PRE_CHANGE) {
+ if (host->unipro_lpm)
+ hba->hba_enable_delay_us = 0;
+ else
+ hba->hba_enable_delay_us = 600;
+ }
+
+ return 0;
+}
+
static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -107,6 +122,7 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
if (on) {
ufs_mtk_ref_clk_notify(on, res);
+ ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
} else {
ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
@@ -132,12 +148,40 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
out:
host->ref_clk_enabled = on;
- if (!on)
+ if (!on) {
+ ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
ufs_mtk_ref_clk_notify(on, res);
+ }
return 0;
}
+static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
+ u16 gating_us, u16 ungating_us)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (hba->dev_info.clk_gating_wait_us) {
+ host->ref_clk_gating_wait_us =
+ hba->dev_info.clk_gating_wait_us;
+ } else {
+ host->ref_clk_gating_wait_us = gating_us;
+ }
+
+ host->ref_clk_ungating_wait_us = ungating_us;
+}
+
+static u32 ufs_mtk_link_get_state(struct ufs_hba *hba)
+{
+ u32 val;
+
+ ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ val = ufshcd_readl(hba, REG_UFS_PROBE);
+ val = val >> 28;
+
+ return val;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -150,7 +194,7 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- int ret = -EINVAL;
+ int ret = 0;
/*
* In case ufs_mtk_init() is not yet done, simply ignore.
@@ -160,19 +204,24 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
- switch (status) {
- case PRE_CHANGE:
- if (!on) {
+ if (!on && status == PRE_CHANGE) {
+ if (!ufshcd_is_link_active(hba)) {
ufs_mtk_setup_ref_clk(hba, on);
ret = phy_power_off(host->mphy);
+ } else {
+ /*
+ * Gate ref-clk if link state is in Hibern8
+ * triggered by Auto-Hibern8.
+ */
+ if (!ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_auto_hibern8_enabled(hba) &&
+ ufs_mtk_link_get_state(hba) ==
+ VS_LINK_HIBERN8)
+ ufs_mtk_setup_ref_clk(hba, on);
}
- break;
- case POST_CHANGE:
- if (on) {
- ret = phy_power_on(host->mphy);
- ufs_mtk_setup_ref_clk(hba, on);
- }
- break;
+ } else if (on && status == POST_CHANGE) {
+ ret = phy_power_on(host->mphy);
+ ufs_mtk_setup_ref_clk(hba, on);
}
return ret;
@@ -285,11 +334,36 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
return ret;
}
+static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
+{
+ int ret;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ ret = ufshcd_dme_set(hba,
+ UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
+ lpm);
+ if (!ret)
+ host->unipro_lpm = lpm;
+
+ return ret;
+}
+
static int ufs_mtk_pre_link(struct ufs_hba *hba)
{
int ret;
u32 tmp;
+ ufs_mtk_unipro_set_pm(hba, 0);
+
+ /*
+ * Setting PA_Local_TX_LCC_Enable to 0 before link startup
+ * to make sure that both host and device TX LCC are disabled
+ * once link startup is completed.
+ */
+ ret = ufshcd_disable_host_tx_lcc(hba);
+ if (ret)
+ return ret;
+
/* disable deep stall */
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
if (ret)
@@ -321,9 +395,6 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
static int ufs_mtk_post_link(struct ufs_hba *hba)
{
- /* disable device LCC */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
-
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
@@ -390,9 +461,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
if (err)
return err;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 0);
+ err = ufs_mtk_unipro_set_pm(hba, 0);
if (err)
return err;
@@ -413,14 +482,10 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 1);
+ err = ufs_mtk_unipro_set_pm(hba, 1);
if (err) {
/* Resume UniPro state for following error recovery */
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- 0);
+ ufs_mtk_unipro_set_pm(hba, 0);
return err;
}
@@ -436,10 +501,11 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
err = ufs_mtk_link_set_lpm(hba);
if (err)
return -EAGAIN;
- phy_power_off(host->mphy);
- ufs_mtk_setup_ref_clk(hba, false);
}
+ if (!ufshcd_is_link_active(hba))
+ phy_power_off(host->mphy);
+
return 0;
}
@@ -448,9 +514,10 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
int err;
- if (ufshcd_is_link_hibern8(hba)) {
- ufs_mtk_setup_ref_clk(hba, true);
+ if (!ufshcd_is_link_active(hba))
phy_power_on(host->mphy);
+
+ if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
return err;
@@ -477,9 +544,24 @@ static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 mid = dev_info->wmanufacturerid;
- if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG)
+ if (mid == UFS_VENDOR_SAMSUNG) {
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
+ }
+
+ /*
+ * Decide waiting time before gating reference clock and
+ * after ungating reference clock according to vendors'
+ * requirements.
+ */
+ if (mid == UFS_VENDOR_SAMSUNG)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
+ else if (mid == UFS_VENDOR_SKHYNIX)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
+ else if (mid == UFS_VENDOR_TOSHIBA)
+ ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
return 0;
}
@@ -494,6 +576,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
.init = ufs_mtk_init,
.setup_clocks = ufs_mtk_setup_clocks,
+ .hce_enable_notify = ufs_mtk_hce_enable_notify,
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index fccdd979d6fb..5bbd3e9cbae2 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -54,6 +54,18 @@
#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
/*
+ * Vendor specific link state
+ */
+enum {
+ VS_LINK_DISABLED = 0,
+ VS_LINK_DOWN = 1,
+ VS_LINK_UP = 2,
+ VS_LINK_HIBERN8 = 3,
+ VS_LINK_LOST = 4,
+ VS_LINK_CFG = 5,
+};
+
+/*
* SiP commands
*/
#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
@@ -79,7 +91,10 @@ enum {
struct ufs_mtk_host {
struct ufs_hba *hba;
struct phy *mphy;
+ bool unipro_lpm;
bool ref_clk_enabled;
+ u16 ref_clk_ungating_wait_us;
+ u16 ref_clk_gating_wait_us;
};
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index c69c29a1ceb9..19aa5c44e0da 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -10,6 +10,7 @@
#include <linux/phy/phy.h>
#include <linux/gpio/consumer.h>
#include <linux/reset-controller.h>
+#include <linux/devfreq.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
@@ -38,7 +39,6 @@ enum {
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
@@ -554,9 +554,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
* completed.
*/
if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
- 0);
+ err = ufshcd_disable_host_tx_lcc(hba);
break;
case POST_CHANGE:
@@ -674,7 +672,7 @@ static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
}
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
{
int err = 0;
@@ -705,7 +703,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
vote = ufs_qcom_get_bus_vote(host, mode);
if (vote >= 0)
- err = ufs_qcom_set_bus_vote(host, vote);
+ err = __ufs_qcom_set_bus_vote(host, vote);
else
err = vote;
@@ -716,6 +714,35 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
return err;
}
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int vote, err;
+
+ /*
+ * In case ufs_qcom_init() is not yet done, simply ignore.
+ * This ufs_qcom_set_bus_vote() shall be called from
+ * ufs_qcom_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ if (on) {
+ vote = host->bus_vote.saved_vote;
+ if (vote == host->bus_vote.min_bw_vote)
+ ufs_qcom_update_bus_bw_vote(host);
+ } else {
+ vote = host->bus_vote.min_bw_vote;
+ }
+
+ err = __ufs_qcom_set_bus_vote(host, vote);
+ if (err)
+ dev_err(hba->dev, "%s: set bus vote failed %d\n",
+ __func__, err);
+
+ return err;
+}
+
static ssize_t
show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -792,7 +819,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
return 0;
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int ufs_qcom_set_bus_vote(struct ufs_hba *host, bool on)
{
return 0;
}
@@ -817,11 +844,27 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
/*
* If we are here to disable this clock it might be immediately
* after entering into hibern8 in which case we need to make
- * sure that device ref_clk is active at least 1us after the
+ * sure that device ref_clk is active for specific time after
* hibern8 enter.
*/
- if (!enable)
- udelay(1);
+ if (!enable) {
+ unsigned long gating_wait;
+
+ gating_wait = host->hba->dev_info.clk_gating_wait_us;
+ if (!gating_wait) {
+ udelay(1);
+ } else {
+ /*
+ * bRefClkGatingWaitTime defines the minimum
+ * time for which the reference clock is
+ * required by device during transition from
+ * HS-MODE to LS-MODE or HIBERN8 state. Give it
+ * more delay to be on the safe side.
+ */
+ gating_wait += 10;
+ usleep_range(gating_wait, gating_wait + 10);
+ }
+ }
writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
@@ -898,6 +941,20 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, true);
+
+ if (host->hw_ver.major >= 0x4) {
+ if (dev_req_params->gear_tx == UFS_HS_G4) {
+ /* INITIAL ADAPT */
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_INITIAL_ADAPT);
+ } else {
+ /* NO ADAPT */
+ ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_NO_ADAPT);
+ }
+ }
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
@@ -956,6 +1013,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+ if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
+ hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+
return err;
}
@@ -1030,8 +1090,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err;
- int vote = 0;
+ int err = 0;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1041,28 +1100,28 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
- if (on && (status == POST_CHANGE)) {
- /* enable the device ref clock for HS mode*/
- if (ufshcd_is_hs_mode(&hba->pwr_info))
- ufs_qcom_dev_ref_clk_ctrl(host, true);
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
-
- } else if (!on && (status == PRE_CHANGE)) {
- if (!ufs_qcom_is_link_active(hba)) {
- /* disable device ref_clk */
- ufs_qcom_dev_ref_clk_ctrl(host, false);
+ switch (status) {
+ case PRE_CHANGE:
+ if (on) {
+ err = ufs_qcom_set_bus_vote(hba, true);
+ } else {
+ if (!ufs_qcom_is_link_active(hba)) {
+ /* disable device ref_clk */
+ ufs_qcom_dev_ref_clk_ctrl(host, false);
+ }
}
-
- vote = host->bus_vote.min_bw_vote;
+ break;
+ case POST_CHANGE:
+ if (on) {
+ /* enable the device ref clock for HS mode*/
+ if (ufshcd_is_hs_mode(&hba->pwr_info))
+ ufs_qcom_dev_ref_clk_ctrl(host, true);
+ } else {
+ err = ufs_qcom_set_bus_vote(hba, false);
+ }
+ break;
}
- err = ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
-
return err;
}
@@ -1238,6 +1297,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
+ ufs_qcom_set_bus_vote(hba, true);
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
@@ -1630,6 +1690,29 @@ static void ufs_qcom_device_reset(struct ufs_hba *hba)
usleep_range(10, 15);
}
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *p,
+ void *data)
+{
+ static struct devfreq_simple_ondemand_data *d;
+
+ if (!data)
+ return;
+
+ d = (struct devfreq_simple_ondemand_data *)data;
+ p->polling_ms = 60;
+ d->upthreshold = 70;
+ d->downdifferential = 5;
+}
+#else
+static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *p,
+ void *data)
+{
+}
+#endif
+
/**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1651,6 +1734,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.resume = ufs_qcom_resume,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
.device_reset = ufs_qcom_device_reset,
+ .config_scaling_param = ufs_qcom_config_scaling_param,
};
/**
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index dbdf8b01abed..92a63eebdca9 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -210,8 +210,10 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
if (param_size > 8)
return -EINVAL;
+ pm_runtime_get_sync(hba->dev);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
+ pm_runtime_put_sync(hba->dev);
if (ret)
return -EINVAL;
switch (param_size) {
@@ -558,6 +560,7 @@ static ssize_t _name##_show(struct device *dev, \
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
if (!desc_buf) \
return -ENOMEM; \
+ pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
0, 0, desc_buf, &desc_len); \
@@ -574,6 +577,7 @@ static ssize_t _name##_show(struct device *dev, \
goto out; \
ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \
out: \
+ pm_runtime_put_sync(hba->dev); \
kfree(desc_buf); \
return ret; \
} \
@@ -604,9 +608,13 @@ static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
bool flag; \
+ int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
- if (ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
- QUERY_FLAG_IDN##_uname, &flag)) \
+ pm_runtime_get_sync(hba->dev); \
+ ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
+ QUERY_FLAG_IDN##_uname, &flag); \
+ pm_runtime_put_sync(hba->dev); \
+ if (ret) \
return -EINVAL; \
return sprintf(buf, "%s\n", flag ? "true" : "false"); \
} \
@@ -644,8 +652,12 @@ static ssize_t _name##_show(struct device *dev, \
{ \
struct ufs_hba *hba = dev_get_drvdata(dev); \
u32 value; \
- if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
- QUERY_ATTR_IDN##_uname, 0, 0, &value)) \
+ int ret; \
+ pm_runtime_get_sync(hba->dev); \
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
+ QUERY_ATTR_IDN##_uname, 0, 0, &value); \
+ pm_runtime_put_sync(hba->dev); \
+ if (ret) \
return -EINVAL; \
return sprintf(buf, "0x%08X\n", value); \
} \
@@ -766,9 +778,13 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba = shost_priv(sdev->host);
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
+ int ret;
- if (ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value))
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
+ pm_runtime_put_sync(hba->dev);
+ if (ret)
return -EINVAL;
return sprintf(buf, "0x%08X\n", value);
}
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index cfe380348bf0..990cb48e2403 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -167,6 +167,7 @@ enum attr_idn {
QUERY_ATTR_IDN_FFU_STATUS = 0x14,
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
+ QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
};
/* Descriptor idn for Query requests */
@@ -534,6 +535,8 @@ struct ufs_dev_info {
u16 wmanufacturerid;
/*UFS device Product Name */
u8 *model;
+ u16 wspecversion;
+ u32 clk_gating_wait_us;
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index d0ab147f98d3..df7a1e6805a3 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -15,6 +15,7 @@
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
#define UFS_VENDOR_SKHYNIX 0x1AD
+#define UFS_VENDOR_WDC 0x145
/**
* ufs_dev_fix - ufs device quirk info
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 3b19de3ae9a3..8f78a8151499 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -44,7 +44,7 @@ static int ufs_intel_disable_lcc(struct ufs_hba *hba)
ufshcd_dme_get(hba, attr, &lcc_enable);
if (lcc_enable)
- ufshcd_dme_set(hba, attr, 0);
+ ufshcd_disable_host_tx_lcc(hba);
return 0;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2d705694636c..e04e8b8bdca6 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -42,6 +42,7 @@
#include <linux/nls.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/blk-pm.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -91,6 +92,9 @@
/* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000
+/* Default value of wait time before gating device ref clock */
+#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -532,6 +536,18 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
+{
+ if (!us)
+ return;
+
+ if (us < 10)
+ udelay(us);
+ else
+ usleep_range(us, us + tolerance);
+}
+EXPORT_SYMBOL_GPL(ufshcd_delay_us);
+
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
@@ -642,11 +658,7 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
*/
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
{
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos),
- REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
}
/**
@@ -656,10 +668,7 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
*/
static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
{
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
+ ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
}
/**
@@ -1191,6 +1200,9 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ /* Override with the closest supported frequency */
+ *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
@@ -1205,8 +1217,11 @@ static int ufshcd_devfreq_target(struct device *dev,
goto out;
}
- clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ /* Decide based on the rounded-off frequency and update */
scale_up = (*freq == clki->max_freq) ? true : false;
+ if (!scale_up)
+ *freq = clki->min_freq;
+ /* Update the frequency */
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0;
@@ -1254,6 +1269,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags;
+ struct list_head *clk_list = &hba->clk_list_head;
+ struct ufs_clk_info *clki;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
@@ -1264,6 +1281,13 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
if (!scaling->window_start_t)
goto start_window;
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
+ /*
+ * If current frequency is 0, then the ondemand governor considers
+ * there's no initial frequency set. And it always requests to set
+ * to max. frequency.
+ */
+ stat->current_frequency = clki->curr_freq;
if (scaling->is_busy_started)
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
@@ -1292,6 +1316,17 @@ static struct devfreq_dev_profile ufs_devfreq_profile = {
.get_dev_status = ufshcd_devfreq_get_dev_status,
};
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufs_ondemand_data = {
+ .upthreshold = 70,
+ .downdifferential = 5,
+};
+
+static void *gov_data = &ufs_ondemand_data;
+#else
+static void *gov_data; /* NULL */
+#endif
+
static int ufshcd_devfreq_init(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
@@ -1307,10 +1342,12 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
dev_pm_opp_add(hba->dev, clki->min_freq, 0);
dev_pm_opp_add(hba->dev, clki->max_freq, 0);
+ ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile,
+ gov_data);
devfreq = devfreq_add_device(hba->dev,
&ufs_devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
- NULL);
+ gov_data);
if (IS_ERR(devfreq)) {
ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
@@ -1518,6 +1555,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2093,13 +2135,8 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return sg_segments;
if (sg_segments) {
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16)(sg_segments *
- sizeof(struct ufshcd_sg_entry)));
- else
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)sg_segments);
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
@@ -2363,6 +2400,27 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
+static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
+{
+ struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
+ dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
+ i * sizeof(struct utp_transfer_cmd_desc);
+ u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
+ response_upiu);
+ u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+ lrb->utr_descriptor_ptr = utrdlp + i;
+ lrb->utrd_dma_addr = hba->utrdl_dma_addr +
+ i * sizeof(struct utp_transfer_req_desc);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+ lrb->ucd_req_dma_addr = cmd_desc_element_addr;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
+}
+
/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @host: SCSI host pointer
@@ -2452,7 +2510,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_vops_setup_xfer_req(hba, tag, true);
ufshcd_send_command(hba, tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -2639,7 +2697,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
+ ufshcd_vops_setup_xfer_req(hba, tag, false);
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -3276,6 +3334,31 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
param_offset, param_read_buf, param_size);
}
+static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
+{
+ int err = 0;
+ u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
+
+ if (hba->dev_info.wspecversion >= 0x300) {
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
+ &gating_wait);
+ if (err)
+ dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
+ err, gating_wait);
+
+ if (gating_wait == 0) {
+ gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
+ dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
+ gating_wait);
+ }
+
+ hba->dev_info.clk_gating_wait_us = gating_wait;
+ }
+
+ return err;
+}
+
/**
* ufshcd_memory_alloc - allocate memory for host memory space data structures
* @hba: per adapter instance
@@ -3373,7 +3456,6 @@ out:
*/
static void ufshcd_host_memory_configure(struct ufs_hba *hba)
{
- struct utp_transfer_cmd_desc *cmd_descp;
struct utp_transfer_req_desc *utrdlp;
dma_addr_t cmd_desc_dma_addr;
dma_addr_t cmd_desc_element_addr;
@@ -3383,7 +3465,6 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
int i;
utrdlp = hba->utrdl_base_addr;
- cmd_descp = hba->ucdl_base_addr;
response_offset =
offsetof(struct utp_transfer_cmd_desc, response_upiu);
@@ -3403,36 +3484,13 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
/* Response upiu and prdt offset should be in double words */
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
- utrdlp[i].response_upiu_offset =
- cpu_to_le16(response_offset);
- utrdlp[i].prd_table_offset =
- cpu_to_le16(prdt_offset);
- utrdlp[i].response_upiu_length =
- cpu_to_le16(ALIGNED_UPIU_SIZE);
- } else {
- utrdlp[i].response_upiu_offset =
- cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
- cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
- cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
- }
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset >> 2);
+ utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
- hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
- hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
- (i * sizeof(struct utp_transfer_req_desc));
- hba->lrb[i].ucd_req_ptr =
- (struct utp_upiu_req *)(cmd_descp + i);
- hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
- hba->lrb[i].ucd_rsp_ptr =
- (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
- hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
- response_offset;
- hba->lrb[i].ucd_prdt_ptr =
- (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
- hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
- prdt_offset;
+ ufshcd_init_lrb(hba, &hba->lrb[i], i);
}
}
@@ -3460,52 +3518,6 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
"dme-link-startup: error code %d\n", ret);
return ret;
}
-/**
- * ufshcd_dme_reset - UIC command for DME_RESET
- * @hba: per adapter instance
- *
- * DME_RESET command is issued in order to reset UniPro stack.
- * This function now deal with cold reset.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_reset(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_RESET;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
-
- return ret;
-}
-
-/**
- * ufshcd_dme_enable - UIC command for DME_ENABLE
- * @hba: per adapter instance
- *
- * DME_ENABLE command is issued in order to enable UniPro stack.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_enable(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_ENABLE;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
-
- return ret;
-}
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -4224,7 +4236,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
}
/**
- * ufshcd_hba_execute_hce - initialize the controller
+ * ufshcd_hba_enable - initialize the controller
* @hba: per adapter instance
*
* The controller resets itself and controller firmware initialization
@@ -4233,7 +4245,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
*
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
+int ufshcd_hba_enable(struct ufs_hba *hba)
{
int retry;
@@ -4259,10 +4271,10 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
* instruction might be read back.
* This delay can be changed based on the controller.
*/
- usleep_range(1000, 1100);
+ ufshcd_delay_us(hba->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
- retry = 10;
+ retry = 50;
while (ufshcd_is_hba_active(hba)) {
if (retry) {
retry--;
@@ -4271,7 +4283,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
"Controller enable failed\n");
return -EIO;
}
- usleep_range(5000, 5100);
+ usleep_range(1000, 1100);
}
/* enable UIC related interrupts */
@@ -4281,37 +4293,11 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
return 0;
}
-
-int ufshcd_hba_enable(struct ufs_hba *hba)
-{
- int ret;
-
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
- ufshcd_set_link_off(hba);
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
-
- /* enable UIC related interrupts */
- ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
- ret = ufshcd_dme_reset(hba);
- if (!ret) {
- ret = ufshcd_dme_enable(hba);
- if (!ret)
- ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
- if (ret)
- dev_err(hba->dev,
- "Host controller enable failed with non-hce\n");
- }
- } else {
- ret = ufshcd_hba_execute_hce(hba);
- }
-
- return ret;
-}
EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
- int tx_lanes, i, err = 0;
+ int tx_lanes = 0, i, err = 0;
if (!peer)
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
@@ -4737,8 +4723,15 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* UFS device needs urgent BKOPs.
*/
if (!hba->pm_op_in_progress &&
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
- schedule_work(&hba->eeh_work);
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
+ schedule_work(&hba->eeh_work)) {
+ /*
+ * Prevent suspend once eeh_work is scheduled
+ * to avoid deadlock between ufshcd_suspend
+ * and exception event handler.
+ */
+ pm_runtime_get_noresume(hba->dev);
+ }
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -4876,8 +4869,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
- if (ufshcd_is_intr_aggr_allowed(hba) &&
- !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
+ if (ufshcd_is_intr_aggr_allowed(hba))
ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -5191,7 +5183,14 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
out:
ufshcd_scsi_unblock_requests(hba);
- pm_runtime_put_sync(hba->dev);
+ /*
+ * pm_runtime_get_noresume is called while scheduling
+ * eeh_work to avoid suspend racing with exception work.
+ * Hence decrement usage counter using pm_runtime_put_noidle
+ * to allow suspend on completion of exception event handler.
+ */
+ pm_runtime_put_noidle(hba->dev);
+ pm_runtime_put(hba->dev);
return;
}
@@ -5486,7 +5485,8 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
u32 intr_mask)
{
- if (!ufshcd_is_auto_hibern8_supported(hba))
+ if (!ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_auto_hibern8_enabled(hba))
return false;
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
@@ -6482,11 +6482,12 @@ out:
return icc_level;
}
-static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
int buff_len = hba->desc_size.pwr_desc;
u8 *desc_buf;
+ u32 icc_level;
desc_buf = kmalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
@@ -6501,25 +6502,32 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
goto out;
}
- hba->init_prefetch_data.icc_level =
- ufshcd_find_max_sup_active_icc_level(hba,
- desc_buf, buff_len);
- dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
- __func__, hba->init_prefetch_data.icc_level);
+ icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
+ buff_len);
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
if (ret)
dev_err(hba->dev,
"%s: Failed configuring bActiveICCLevel = %d ret = %d",
- __func__, hba->init_prefetch_data.icc_level , ret);
+ __func__, icc_level, ret);
out:
kfree(desc_buf);
}
+static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
+{
+ scsi_autopm_get_device(sdev);
+ blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
+ if (sdev->rpm_autosuspend)
+ pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
+ RPM_AUTOSUSPEND_DELAY_MS);
+ scsi_autopm_put_device(sdev);
+}
+
/**
* ufshcd_scsi_add_wlus - Adds required W-LUs
* @hba: per-adapter instance
@@ -6559,6 +6567,7 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
hba->sdev_ufs_device = NULL;
goto out;
}
+ ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
scsi_device_put(hba->sdev_ufs_device);
sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
@@ -6567,14 +6576,17 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
ret = PTR_ERR(sdev_rpmb);
goto remove_sdev_ufs_device;
}
+ ufshcd_blk_pm_runtime_init(sdev_rpmb);
scsi_device_put(sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
- if (IS_ERR(sdev_boot))
+ if (IS_ERR(sdev_boot)) {
dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
- else
+ } else {
+ ufshcd_blk_pm_runtime_init(sdev_boot);
scsi_device_put(sdev_boot);
+ }
goto out;
remove_sdev_ufs_device:
@@ -6614,6 +6626,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+ /* getting Specification Version in big endian format */
+ dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
@@ -6811,14 +6827,14 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
ufshcd_tune_pa_hibern8time(hba);
}
+ ufshcd_vops_apply_dev_quirks(hba);
+
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
-
- ufshcd_vops_apply_dev_quirks(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -6991,6 +7007,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
goto out;
}
+ ufshcd_get_ref_clk_gating_wait(hba);
+
ufs_fixup_device_setup(hba);
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
@@ -7014,8 +7032,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
{
int ret;
- ufshcd_init_icc_levels(hba);
-
/* Add required well known logical units to scsi mid layer */
ret = ufshcd_scsi_add_wlus(hba);
if (ret)
@@ -7113,6 +7129,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
}
}
+ /*
+ * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
+ * and for removable UFS card as well, hence always set the parameter.
+ * Note: Error handler may issue the device reset hence resetting
+ * bActiveICCLevel as well so it is always safe to set this here.
+ */
+ ufshcd_set_active_icc_lvl(hba);
+
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -7241,6 +7265,11 @@ static int ufshcd_config_vreg(struct device *dev,
name = vreg->name;
if (regulator_count_voltages(reg) > 0) {
+ uA_load = on ? vreg->max_uA : 0;
+ ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+ if (ret)
+ goto out;
+
if (vreg->min_uV && vreg->max_uV) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
@@ -7251,11 +7280,6 @@ static int ufshcd_config_vreg(struct device *dev,
goto out;
}
}
-
- uA_load = on ? vreg->max_uA : 0;
- ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
- if (ret)
- goto out;
}
out:
return ret;
@@ -7395,16 +7419,9 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (list_empty(head))
goto out;
- /*
- * vendor specific setup_clocks ops may depend on clocks managed by
- * this standard driver hence call the vendor specific setup_clocks
- * before disabling the clocks managed here.
- */
- if (!on) {
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
- if (ret)
- return ret;
- }
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
@@ -7428,16 +7445,9 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- /*
- * vendor specific setup_clocks ops may depend on clocks managed by
- * this standard driver hence call the vendor specific setup_clocks
- * after enabling the clocks managed here.
- */
- if (on) {
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
- if (ret)
- return ret;
- }
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
out:
if (ret) {
@@ -7931,6 +7941,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto enable_gating;
}
+ flush_work(&hba->eeh_work);
ret = ufshcd_link_state_transition(hba, req_link_state, 1);
if (ret)
goto set_dev_active;
@@ -8402,6 +8413,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
+ hba->hba_enable_delay_us = 1000;
err = ufshcd_hba_init(hba);
if (err)
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 2ae6c7c8528c..dd1ee277069a 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -55,6 +55,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
+#include <linux/bitfield.h>
+#include <linux/devfreq.h>
#include "unipro.h"
#include <asm/irq.h>
@@ -326,6 +328,9 @@ struct ufs_hba_variant_ops {
void (*dbg_register_dump)(struct ufs_hba *hba);
int (*phy_initialization)(struct ufs_hba *);
void (*device_reset)(struct ufs_hba *hba);
+ void (*config_scaling_param)(struct ufs_hba *hba,
+ struct devfreq_dev_profile *profile,
+ void *data);
};
/* clock gating state */
@@ -403,15 +408,6 @@ struct ufs_clk_scaling {
bool is_suspended;
};
-/**
- * struct ufs_init_prefetch - contains data that is pre-fetched once during
- * initialization
- * @icc_level: icc level which was read during initialization
- */
-struct ufs_init_prefetch {
- u32 icc_level;
-};
-
#define UFS_ERR_REG_HIST_LENGTH 8
/**
* struct ufs_err_reg_hist - keeps history of errors
@@ -469,6 +465,85 @@ struct ufs_stats {
struct ufs_err_reg_hist task_abort;
};
+enum ufshcd_quirks {
+ /* Interrupt aggregation support is broken */
+ UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
+
+ /*
+ * delay before each dme command is required as the unipro
+ * layer has shown instabilities
+ */
+ UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
+
+ /*
+ * If UFS host controller is having issue in processing LCC (Line
+ * Control Command) coming from device then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
+ * attribute of device to 0).
+ */
+ UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
+
+ /*
+ * The attribute PA_RXHSUNTERMCAP specifies whether or not the
+ * inbound Link supports unterminated line in HS mode. Setting this
+ * attribute to 1 fixes moving to HS gear.
+ */
+ UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
+
+ /*
+ * This quirk needs to be enabled if the host controller only allows
+ * accessing the peer dme attributes in AUTO mode (FAST AUTO or
+ * SLOW AUTO).
+ */
+ UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
+
+ /*
+ * This quirk needs to be enabled if the host controller doesn't
+ * advertise the correct version in UFS_VER register. If this quirk
+ * is enabled, standard UFS host driver will call the vendor specific
+ * ops (get_ufs_hci_version) to get the correct version.
+ */
+ UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
+};
+
+enum ufshcd_caps {
+ /* Allow dynamic clk gating */
+ UFSHCD_CAP_CLK_GATING = 1 << 0,
+
+ /* Allow hiberb8 with clk gating */
+ UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
+
+ /* Allow dynamic clk scaling */
+ UFSHCD_CAP_CLK_SCALING = 1 << 2,
+
+ /* Allow auto bkops to enabled during runtime suspend */
+ UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
+
+ /*
+ * This capability allows host controller driver to use the UFS HCI's
+ * interrupt aggregation capability.
+ * CAUTION: Enabling this might reduce overall UFS throughput.
+ */
+ UFSHCD_CAP_INTR_AGGR = 1 << 4,
+
+ /*
+ * This capability allows the device auto-bkops to be always enabled
+ * except during suspend (both runtime and suspend).
+ * Enabling this capability means that device will always be allowed
+ * to do background operation when it's active but it might degrade
+ * the performance of ongoing read/write operations.
+ */
+ UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
+
+ /*
+ * This capability allows host controller driver to automatically
+ * enable runtime power management by itself instead of waiting
+ * for userspace to control the power management.
+ */
+ UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -501,7 +576,6 @@ struct ufs_stats {
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
- * @init_prefetch_data: data pre-fetched during initialization
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
@@ -572,68 +646,6 @@ struct ufs_hba {
bool is_irq_enabled;
enum ufs_ref_clk_freq dev_ref_clk_freq;
- /* Interrupt aggregation support is broken */
- #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
-
- /*
- * delay before each dme command is required as the unipro
- * layer has shown instabilities
- */
- #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
-
- /*
- * If UFS host controller is having issue in processing LCC (Line
- * Control Command) coming from device then enable this quirk.
- * When this quirk is enabled, host controller driver should disable
- * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
- * attribute of device to 0).
- */
- #define UFSHCD_QUIRK_BROKEN_LCC 0x4
-
- /*
- * The attribute PA_RXHSUNTERMCAP specifies whether or not the
- * inbound Link supports unterminated line in HS mode. Setting this
- * attribute to 1 fixes moving to HS gear.
- */
- #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
-
- /*
- * This quirk needs to be enabled if the host contoller only allows
- * accessing the peer dme attributes in AUTO mode (FAST AUTO or
- * SLOW AUTO).
- */
- #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
-
- /*
- * This quirk needs to be enabled if the host contoller doesn't
- * advertise the correct version in UFS_VER register. If this quirk
- * is enabled, standard UFS host driver will call the vendor specific
- * ops (get_ufs_hci_version) to get the correct version.
- */
- #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
-
- /*
- * This quirk needs to be enabled if the host contoller regards
- * resolution of the values of PRDTO and PRDTL in UTRD as byte.
- */
- #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
-
- /*
- * Clear handling for transfer/task request list is just opposite.
- */
- #define UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR 0x100
-
- /*
- * This quirk needs to be enabled if host controller doesn't allow
- * that the interrupt aggregation timer and counter are reset by s/w.
- */
- #define UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR 0x200
-
- /*
- * This quirks needs to be enabled if host controller cannot be
- * enabled via HCE register.
- */
- #define UFSHCI_QUIRK_BROKEN_HCE 0x400
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
@@ -650,8 +662,8 @@ struct ufs_hba {
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
+ u16 hba_enable_delay_us;
bool is_powered;
- struct ufs_init_prefetch init_prefetch_data;
/* Work Queues */
struct work_struct eh_work;
@@ -688,34 +700,6 @@ struct ufs_hba {
struct ufs_clk_gating clk_gating;
/* Control to enable/disable host capabilities */
u32 caps;
- /* Allow dynamic clk gating */
-#define UFSHCD_CAP_CLK_GATING (1 << 0)
- /* Allow hiberb8 with clk gating */
-#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
- /* Allow dynamic clk scaling */
-#define UFSHCD_CAP_CLK_SCALING (1 << 2)
- /* Allow auto bkops to enabled during runtime suspend */
-#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
- /*
- * This capability allows host controller driver to use the UFS HCI's
- * interrupt aggregation capability.
- * CAUTION: Enabling this might reduce overall UFS throughput.
- */
-#define UFSHCD_CAP_INTR_AGGR (1 << 4)
- /*
- * This capability allows the device auto-bkops to be always enabled
- * except during suspend (both runtime and suspend).
- * Enabling this capability means that device will always be allowed
- * to do background operation when it's active but it might degrade
- * the performance of ongoing read/write operations.
- */
-#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
- /*
- * This capability allows host controller driver to automatically
- * enable runtime power management by itself instead of waiting
- * for userspace to control the power management.
- */
-#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@@ -773,6 +757,11 @@ static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
}
+static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
+{
+ return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -802,6 +791,7 @@ int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep);
@@ -908,6 +898,11 @@ static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
pwr_info->pwr_tx == FASTAUTO_MODE);
}
+static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+}
+
/* Expose Query-Request API */
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -1092,6 +1087,14 @@ static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
}
}
+static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile
+ *profile, void *data)
+{
+ if (hba->vops && hba->vops->config_scaling_param)
+ hba->vops->config_scaling_param(hba, profile, data);
+}
+
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/*
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 3dc4d8b76509..766d551df3fc 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -146,6 +146,12 @@
#define PA_SLEEPNOCONFIGTIME 0x15A2
#define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4
+#define PA_TXHSADAPTTYPE 0x15D4
+
+/* Adpat type for PA_TXHSADAPTTYPE attribute */
+#define PA_REFRESH_ADAPT 0x00
+#define PA_INITIAL_ADAPT 0x01
+#define PA_NO_ADAPT 0x03
#define PA_TACTIVATE_TIME_UNIT_US 10
#define PA_HIBERN8_TIME_UNIT_US 100
@@ -203,6 +209,7 @@ enum ufs_hs_gear_tag {
UFS_HS_G1, /* HS Gear 1 (default for reset) */
UFS_HS_G2, /* HS Gear 2 */
UFS_HS_G3, /* HS Gear 3 */
+ UFS_HS_G4, /* HS Gear 4 */
};
enum ufs_unipro_ver {
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index bfec84aacd90..0e0910c5b942 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -742,7 +742,6 @@ static struct scsi_host_template virtscsi_host_template = {
.dma_boundary = UINT_MAX,
.map_queues = virtscsi_map_queues,
.track_queue_depth = 1,
- .force_blk_mq = 1,
};
#define virtscsi_config_get(vdev, fld) \
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index bdd82e497d5f..c6727bcbc2e3 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -801,8 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
/* additional setup required for Fastlane */
if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
/* map full address space up to ESP base for DMA */
- zep->board_base = ioremap(board,
- FASTLANE_ESP_ADDR-1);
+ zep->board_base = ioremap(board, FASTLANE_ESP_ADDR - 1);
if (!zep->board_base) {
pr_err("Cannot allocate board address space\n");
err = -ENOMEM;
@@ -843,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
* dma_registers size if adding any more
*/
esp->dma_regs = ioremap(dmaaddr,
- sizeof(struct fastlane_dma_registers));
+ sizeof(struct fastlane_dma_registers));
} else
/* ZorroII address space remapped nocache by early startup */
esp->dma_regs = ZTWO_VADDR(dmaaddr);
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 8b49d782a1ab..a39f17cea376 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
-obj-$(CONFIG_ARCH_MXC) += imx/
+obj-y += imx/
obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index bc2c912949bd..321c5e26a268 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -48,6 +48,19 @@ config MESON_EE_PM_DOMAINS
Say yes to expose Amlogic Meson Everything-Else Power Domains as
Generic Power Domains.
+config MESON_SECURE_PM_DOMAINS
+ bool "Amlogic Meson Secure Power Domains driver"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
+ depends on PM && OF
+ depends on HAVE_ARM_SMCCC
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Support for the power controller on Amlogic A1/C1 series.
+ Say yes to expose Amlogic Meson Secure Power Domains as Generic
+ Power Domains.
+
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile
index de79d044b545..7b8c5d323f5c 100644
--- a/drivers/soc/amlogic/Makefile
+++ b/drivers/soc/amlogic/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o
+obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
new file mode 100644
index 000000000000..5fb29a475879
--- /dev/null
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Amlogic, Inc.
+ * Author: Jianxin Pan <jianxin.pan@amlogic.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <dt-bindings/power/meson-a1-power.h>
+#include <linux/arm-smccc.h>
+#include <linux/firmware/meson/meson_sm.h>
+
+#define PWRC_ON 1
+#define PWRC_OFF 0
+
+struct meson_secure_pwrc_domain {
+ struct generic_pm_domain base;
+ unsigned int index;
+ struct meson_secure_pwrc *pwrc;
+};
+
+struct meson_secure_pwrc {
+ struct meson_secure_pwrc_domain *domains;
+ struct genpd_onecell_data xlate;
+ struct meson_sm_firmware *fw;
+};
+
+struct meson_secure_pwrc_domain_desc {
+ unsigned int index;
+ unsigned int flags;
+ char *name;
+ bool (*is_off)(struct meson_secure_pwrc_domain *pwrc_domain);
+};
+
+struct meson_secure_pwrc_domain_data {
+ unsigned int count;
+ struct meson_secure_pwrc_domain_desc *domains;
+};
+
+static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain)
+{
+ int is_off = 1;
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_GET, &is_off,
+ pwrc_domain->index, 0, 0, 0, 0) < 0)
+ pr_err("failed to get power domain status\n");
+
+ return is_off;
+}
+
+static int meson_secure_pwrc_off(struct generic_pm_domain *domain)
+{
+ int ret = 0;
+ struct meson_secure_pwrc_domain *pwrc_domain =
+ container_of(domain, struct meson_secure_pwrc_domain, base);
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
+ pwrc_domain->index, PWRC_OFF, 0, 0, 0) < 0) {
+ pr_err("failed to set power domain off\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int meson_secure_pwrc_on(struct generic_pm_domain *domain)
+{
+ int ret = 0;
+ struct meson_secure_pwrc_domain *pwrc_domain =
+ container_of(domain, struct meson_secure_pwrc_domain, base);
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
+ pwrc_domain->index, PWRC_ON, 0, 0, 0) < 0) {
+ pr_err("failed to set power domain on\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+#define SEC_PD(__name, __flag) \
+[PWRC_##__name##_ID] = \
+{ \
+ .name = #__name, \
+ .index = PWRC_##__name##_ID, \
+ .is_off = pwrc_secure_is_off, \
+ .flags = __flag, \
+}
+
+static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
+ SEC_PD(DSPA, 0),
+ SEC_PD(DSPB, 0),
+ /* UART should keep working in ATF after suspend and before resume */
+ SEC_PD(UART, GENPD_FLAG_ALWAYS_ON),
+ /* DMC is for DDR PHY ana/dig and DMC, and should be always on */
+ SEC_PD(DMC, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(I2C, 0),
+ SEC_PD(PSRAM, 0),
+ SEC_PD(ACODEC, 0),
+ SEC_PD(AUDIO, 0),
+ SEC_PD(OTP, 0),
+ SEC_PD(DMA, 0),
+ SEC_PD(SD_EMMC, 0),
+ SEC_PD(RAMA, 0),
+ /* SRAMB is used as ATF runtime memory, and should be always on */
+ SEC_PD(RAMB, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(IR, 0),
+ SEC_PD(SPICC, 0),
+ SEC_PD(SPIFC, 0),
+ SEC_PD(USB, 0),
+ /* NIC is for the Arm NIC-400 interconnect, and should be always on */
+ SEC_PD(NIC, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(PDMIN, 0),
+ SEC_PD(RSA, 0),
+};
+
+static int meson_secure_pwrc_probe(struct platform_device *pdev)
+{
+ int i;
+ struct device_node *sm_np;
+ struct meson_secure_pwrc *pwrc;
+ const struct meson_secure_pwrc_domain_data *match;
+
+ match = of_device_get_match_data(&pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
+ sm_np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gxbb-sm");
+ if (!sm_np) {
+ dev_err(&pdev->dev, "no secure-monitor node\n");
+ return -ENODEV;
+ }
+
+ pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
+ if (!pwrc)
+ return -ENOMEM;
+
+ pwrc->fw = meson_sm_get(sm_np);
+ of_node_put(sm_np);
+ if (!pwrc->fw)
+ return -EPROBE_DEFER;
+
+ pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count,
+ sizeof(*pwrc->xlate.domains),
+ GFP_KERNEL);
+ if (!pwrc->xlate.domains)
+ return -ENOMEM;
+
+ pwrc->domains = devm_kcalloc(&pdev->dev, match->count,
+ sizeof(*pwrc->domains), GFP_KERNEL);
+ if (!pwrc->domains)
+ return -ENOMEM;
+
+ pwrc->xlate.num_domains = match->count;
+ platform_set_drvdata(pdev, pwrc);
+
+ for (i = 0 ; i < match->count ; ++i) {
+ struct meson_secure_pwrc_domain *dom = &pwrc->domains[i];
+
+ if (!match->domains[i].index)
+ continue;
+
+ dom->pwrc = pwrc;
+ dom->index = match->domains[i].index;
+ dom->base.name = match->domains[i].name;
+ dom->base.flags = match->domains[i].flags;
+ dom->base.power_on = meson_secure_pwrc_on;
+ dom->base.power_off = meson_secure_pwrc_off;
+
+ pm_genpd_init(&dom->base, NULL, match->domains[i].is_off(dom));
+
+ pwrc->xlate.domains[i] = &dom->base;
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
+}
+
+static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = {
+ .domains = a1_pwrc_domains,
+ .count = ARRAY_SIZE(a1_pwrc_domains),
+};
+
+static const struct of_device_id meson_secure_pwrc_match_table[] = {
+ {
+ .compatible = "amlogic,meson-a1-pwrc",
+ .data = &meson_secure_a1_pwrc_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver meson_secure_pwrc_driver = {
+ .probe = meson_secure_pwrc_probe,
+ .driver = {
+ .name = "meson_secure_pwrc",
+ .of_match_table = meson_secure_pwrc_match_table,
+ },
+};
+builtin_platform_driver(meson_secure_pwrc_driver);
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 518a8e081b49..cd4f6410e8c2 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#include <linux/types.h>
@@ -433,6 +433,69 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
/**
+ * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
+ * to a frame queue using one fqid.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
+
+/**
+ * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
+ * to different frame queue using a list of fqids.
+ * @d: the given DPIO service.
+ * @fqid: the given list of frame queue ids.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
+ u32 *fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ int i;
+ struct qbman_eq_desc ed[32];
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ for (i = 0; i < nb; i++) {
+ qbman_eq_desc_clear(&ed[i]);
+ qbman_eq_desc_set_no_orp(&ed[i], 0);
+ qbman_eq_desc_set_fq(&ed[i], fqid[i]);
+ }
+
+ return qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
+
+/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
* @qdid: the given queuing destination id.
@@ -526,7 +589,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
/**
* dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
- * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
* @dev: the device to allow mapping/unmapping the DMAable region.
*
* The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
@@ -541,7 +604,7 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
struct dpaa2_io_store *ret;
size_t size;
- if (!max_frames || (max_frames > 16))
+ if (!max_frames || (max_frames > 32))
return NULL;
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index c66f5b73777c..d1f49caa5b13 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -1,24 +1,18 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#include <asm/cacheflush.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <soc/fsl/dpaa2-global.h>
#include "qbman-portal.h"
-#define QMAN_REV_4000 0x04000000
-#define QMAN_REV_4100 0x04010000
-#define QMAN_REV_4101 0x04010001
-#define QMAN_REV_5000 0x05000000
-
-#define QMAN_REV_MASK 0xffff0000
-
/* All QBMan command and result structures use this "valid bit" encoding */
#define QB_VALID_BIT ((u32)0x80)
@@ -28,6 +22,7 @@
/* CINH register offsets */
#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
#define QBMAN_CINH_SWP_EQAR 0x8c0
#define QBMAN_CINH_SWP_CR_RT 0x900
#define QBMAN_CINH_SWP_VDQCR_RT 0x940
@@ -51,6 +46,8 @@
#define QBMAN_CENA_SWP_CR 0x600
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
/* CENA register offsets in memory-backed mode */
#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
@@ -78,6 +75,12 @@
/* opaque token for static dequeues */
#define QMAN_SDQCR_TOKEN 0xbb
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
+
+#define EQ_DESC_SIZE_WITHOUT_FD 29
+#define EQ_DESC_SIZE_FD_START 32
+
enum qbman_sdqcr_dct {
qbman_sdqcr_dct_null = 0,
qbman_sdqcr_dct_prio_ics,
@@ -90,6 +93,82 @@ enum qbman_sdqcr_fc {
qbman_sdqcr_fc_up_to_3 = 1
};
+/* Internal Function declaration */
+static int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static int qbman_swp_pull_direct(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+static int qbman_swp_pull_mem_back(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
+
+static int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+static int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Function pointers */
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+ = qbman_swp_enqueue_direct;
+
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_direct;
+
+int
+(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_desc_direct;
+
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
+ = qbman_swp_pull_direct;
+
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
+ = qbman_swp_dqrr_next_direct;
+
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+ = qbman_swp_release_direct;
+
/* Portal Access */
static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
@@ -146,6 +225,15 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
#define QMAN_RT_MODE 0x00000100
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ else
+ return (2 * ringsize) - (first - last);
+}
+
/**
* qbman_swp_init() - Create a functional object representing the given
* QBMan portal descriptor.
@@ -156,11 +244,16 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
*/
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
{
- struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
u32 reg;
+ u32 mask_size;
+ u32 eqcr_pi;
if (!p)
return NULL;
+
+ spin_lock_init(&p->access_spinlock);
+
p->desc = d;
p->mc.valid_bit = QB_VALID_BIT;
p->sdq = 0;
@@ -186,25 +279,38 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->addr_cena = d->cena_bar;
p->addr_cinh = d->cinh_bar;
- if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
- memset(p->addr_cena, 0, 64 * 1024);
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
- 1, /* Writes Non-cacheable */
- 0, /* EQCR_CI stashing threshold */
- 3, /* RPM: Valid bit mode, RCR in array mode */
- 2, /* DCM: Discrete consumption ack mode */
- 3, /* EPM: Valid bit mode, EQCR in array mode */
- 1, /* mem stashing drop enable == TRUE */
- 1, /* mem stashing priority == TRUE */
- 1, /* mem stashing enable == TRUE */
- 1, /* dequeue stashing priority == TRUE */
- 0, /* dequeue stashing enable == FALSE */
- 0); /* EQCR_CI stashing priority == FALSE */
- if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 0, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 2, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable enable */
+ 0); /* EQCR_CI stashing priority enable */
+ } else {
+ memset(p->addr_cena, 0, 64 * 1024);
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 1, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 0, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable */
+ 0); /* EQCR_CI stashing priority enable */
reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
+ }
qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
@@ -225,6 +331,30 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
* applied when dequeues from a specific channel are enabled.
*/
qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+
+ p->eqcr.pi_ring_size = 8;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ p->eqcr.pi_ring_size = 32;
+ qbman_swp_enqueue_ptr =
+ qbman_swp_enqueue_mem_back;
+ qbman_swp_enqueue_multiple_ptr =
+ qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_desc_ptr =
+ qbman_swp_enqueue_multiple_desc_mem_back;
+ qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
+ qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
+ qbman_swp_release_ptr = qbman_swp_release_mem_back;
+ }
+
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
+ p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
+ eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
+ p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
+ & p->eqcr.pi_ci_mask;
+ p->eqcr.available = p->eqcr.pi_ring_size;
+
return p;
}
@@ -378,6 +508,7 @@ enum qb_enqueue_commands {
#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
/**
* qbman_eq_desc_clear() - Clear the contents of a descriptor to
@@ -453,8 +584,9 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
QMAN_RT_MODE);
}
+#define QB_RT_BIT ((u32)0x100)
/**
- * qbman_swp_enqueue() - Issue an enqueue command
+ * qbman_swp_enqueue_direct() - Issue an enqueue command
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: the frame descriptor to be enqueued
@@ -464,30 +596,351 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
*
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
- const struct dpaa2_fd *fd)
+static
+int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
{
- struct qbman_eq_desc *p;
- u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
- if (!EQAR_SUCCESS(eqar))
- return -EBUSY;
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qbman_swp_enqueue_mem_back() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static
+int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
- memcpy(&p->dca, &d->dca, 31);
- memcpy(&p->fd, fd, sizeof(*fd));
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- /* Set the verb byte, have to substitute in the valid-bit */
- dma_wmb();
- p->verb = d->verb | EQAR_VB(eqar);
- } else {
- p->verb = d->verb | EQAR_VB(eqar);
- dma_wmb();
- qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
+/**
+ * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)d;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ spin_lock(&s->access_spinlock);
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ spin_unlock(&s->access_spinlock);
+ return 0;
+ }
}
- return 0;
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (size_t)s->addr_cena;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+ spin_unlock(&s->access_spinlock);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ unsigned long irq_flags;
+
+ spin_lock(&s->access_spinlock);
+ local_irq_save(irq_flags);
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
+ s->eqcr.ci = __raw_readl(p) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ local_irq_restore(irq_flags);
+ spin_unlock(&s->access_spinlock);
+ return 0;
+ }
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ local_irq_restore(irq_flags);
+ spin_unlock(&s->access_spinlock);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (uint64_t)s->addr_cena;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
+ s->eqcr.ci = __raw_readl(p) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+
+ return num_enqueued;
}
/* Static (push) dequeue */
@@ -645,7 +1098,7 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
}
/**
- * qbman_swp_pull() - Issue the pull dequeue command
+ * qbman_swp_pull_direct() - Issue the pull dequeue command
* @s: the software portal object
* @d: the software portal descriptor which has been configured with
* the set of qbman_pull_desc_set_*() calls
@@ -653,7 +1106,8 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
* Return 0 for success, and -EBUSY if the software portal is not ready
* to do pull dequeue.
*/
-int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+static
+int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
{
struct qbman_pull_desc *p;
@@ -671,18 +1125,48 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
p->dq_src = d->dq_src;
p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt;
+ dma_wmb();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- dma_wmb();
- /* Set the verb byte, have to substitute in the valid-bit */
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
- } else {
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
- dma_wmb();
- qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
+ return 0;
+}
+
+/**
+ * qbman_swp_pull_mem_back() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static
+int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
}
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
return 0;
}
@@ -690,14 +1174,14 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
#define QMAN_DQRR_PI_MASK 0xf
/**
- * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
* @s: the software portal object
*
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
-const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
{
u32 verb;
u32 response_verb;
@@ -740,10 +1224,99 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
- else
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
verb = p->dq.verb;
/*
@@ -872,7 +1445,7 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
#define RAR_SUCCESS(rar) ((rar) & 0x100)
/**
- * qbman_swp_release() - Issue a buffer release command
+ * qbman_swp_release_direct() - Issue a buffer release command
* @s: the software portal object
* @d: the release descriptor
* @buffers: a pointer pointing to the buffer address to be released
@@ -880,8 +1453,9 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
*
* Return 0 for success, -EBUSY if the release command ring is not ready.
*/
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
- const u64 *buffers, unsigned int num_buffers)
+int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
{
int i;
struct qbman_release_desc *p;
@@ -895,28 +1469,59 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
return -EBUSY;
/* Start the release command */
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
- else
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
/* Copy the caller's buffer pointers to the command */
for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- /*
- * Set the verb byte, have to substitute in the valid-bit
- * and the number of buffers.
- */
- dma_wmb();
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
- } else {
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
- dma_wmb();
- qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
- RAR_IDX(rar) * 4, QMAN_RT_MODE);
- }
+ /*
+ * Set the verb byte, have to substitute in the valid-bit
+ * and the number of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+ return 0;
+}
+
+/**
+ * qbman_swp_release_mem_back() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
return 0;
}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index f3ec5d2044fb..c7c2225b7d91 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -9,6 +9,13 @@
#include <soc/fsl/dpaa2-fd.h>
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+
+#define QMAN_REV_MASK 0xffff0000
+
struct dpaa2_dq;
struct qbman_swp;
@@ -81,6 +88,10 @@ struct qbman_eq_desc {
u8 wae;
u8 rspid;
__le64 rsp_addr;
+};
+
+struct qbman_eq_desc_with_fd {
+ struct qbman_eq_desc desc;
u8 fd[32];
};
@@ -132,8 +143,48 @@ struct qbman_swp {
u8 dqrr_size;
int reset_bug; /* indicates dqrr reset workaround is needed */
} dqrr;
+
+ struct {
+ u32 pi;
+ u32 pi_vb;
+ u32 pi_ring_size;
+ u32 pi_ci_mask;
+ u32 ci;
+ int available;
+ u32 pend;
+ u32 no_pfdr;
+ } eqcr;
+
+ spinlock_t access_spinlock;
};
+/* Function pointers */
+extern
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+extern
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+extern
+int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+extern
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
+extern
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
+extern
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Functions */
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
void qbman_swp_finish(struct qbman_swp *p);
u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
@@ -158,9 +209,6 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
enum qbman_pull_type_e dct);
-int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
-
-const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
@@ -172,15 +220,11 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
u32 qd_bin, u32 qd_prio);
-int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
- const struct dpaa2_fd *fd);
void qbman_release_desc_clear(struct qbman_release_desc *d);
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
- const u64 *buffers, unsigned int num_buffers);
int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
unsigned int num_buffers);
int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
@@ -194,6 +238,61 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
void *qbman_swp_mc_result(struct qbman_swp *p);
/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static inline int
+qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ return qbman_swp_enqueue_ptr(s, d, fd);
+}
+
+/**
+ * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
+}
+
+/**
* qbman_result_is_DQ() - check if the dequeue result is a dequeue response
* @dq: the dequeue result to be checked
*
@@ -504,4 +603,49 @@ int qbman_bp_query(struct qbman_swp *s, u16 bpid,
u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+static inline int qbman_swp_release(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ return qbman_swp_release_ptr(s, d, buffers, num_buffers);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static inline int qbman_swp_pull(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+{
+ return qbman_swp_pull_ptr(s, d);
+}
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ return qbman_swp_dqrr_next_ptr(s);
+}
+
#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 96c2057d8d8e..447146861c2c 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -423,7 +423,7 @@ static void qe_upload_microcode(const void *base,
qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
- qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
+ qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
/*
@@ -525,7 +525,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
*/
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
- qe_firmware_info.extended_modes = firmware->extended_modes;
+ qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
sizeof(firmware->vtraps));
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index a81a1a79f1ca..75075591f630 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -46,7 +46,7 @@ int cpm_muram_init(void)
{
struct device_node *np;
struct resource r;
- u32 zero[OF_MAX_ADDR_CELLS] = {};
+ __be32 zero[OF_MAX_ADDR_CELLS] = {};
resource_size_t max = 0;
int i = 0;
int ret = 0;
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 0dd5bdb04a14..0390af999900 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -44,7 +44,7 @@
struct qe_ic {
/* Control registers offset */
- u32 __iomem *regs;
+ __be32 __iomem *regs;
/* The remapper for this QEIC */
struct irq_domain *irqhost;
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index 90157acc5ba6..d6c93970df4d 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -632,7 +632,7 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
{
int source;
u32 shift;
- struct qe_mux *qe_mux_reg;
+ struct qe_mux __iomem *qe_mux_reg;
qe_mux_reg = &qe_immr->qmx;
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
index 274d34449846..7e11be41ab62 100644
--- a/drivers/soc/fsl/qe/ucc_slow.c
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(ucc_slow_restart_tx);
void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
- struct ucc_slow *us_regs;
+ struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
@@ -93,7 +93,7 @@ EXPORT_SYMBOL(ucc_slow_enable);
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
- struct ucc_slow *us_regs;
+ struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
@@ -122,7 +122,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
u32 i;
struct ucc_slow __iomem *us_regs;
u32 gumr;
- struct qe_bd *bd;
+ struct qe_bd __iomem *bd;
u32 id;
u32 command;
int ret = 0;
@@ -168,16 +168,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return -ENOMEM;
}
- uccs->saved_uccm = 0;
- uccs->p_rx_frame = 0;
us_regs = uccs->us_regs;
- uccs->p_ucce = (u16 *) & (us_regs->ucce);
- uccs->p_uccm = (u16 *) & (us_regs->uccm);
-#ifdef STATISTICS
- uccs->rx_frames = 0;
- uccs->tx_frames = 0;
- uccs->rx_discarded = 0;
-#endif /* STATISTICS */
+ uccs->p_ucce = &us_regs->ucce;
+ uccs->p_uccm = &us_regs->uccm;
/* Get PRAM base */
uccs->us_pram_offset =
@@ -231,24 +224,24 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* clear bd buffer */
qe_iowrite32be(0, &bd->buf);
/* set bd status and length */
- qe_iowrite32be(0, (u32 *)bd);
+ qe_iowrite32be(0, (u32 __iomem *)bd);
bd++;
}
/* for last BD set Wrap bit */
qe_iowrite32be(0, &bd->buf);
- qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
+ qe_iowrite32be(T_W, (u32 __iomem *)bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
- qe_iowrite32be(0, (u32 *)bd);
+ qe_iowrite32be(0, (u32 __iomem *)bd);
/* clear bd buffer */
qe_iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
- qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
+ qe_iowrite32be(R_W, (u32 __iomem *)bd);
qe_iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
@@ -273,8 +266,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
qe_iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
- gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
- us_info->diag | us_info->mode;
+ gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
+ (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
if (us_info->tci)
gumr |= UCC_SLOW_GUMR_L_TCI;
if (us_info->rinv)
@@ -289,8 +282,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* if the data is in cachable memory, the 'global' */
/* in the function code should be set. */
- uccs->us_pram->tbmr = UCC_BMR_BO_BE;
- uccs->us_pram->rbmr = UCC_BMR_BO_BE;
+ qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
+ qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
/* rbase, tbase are offsets from MURAM base */
qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 0281ef9a1800..67aa94b2481b 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -10,11 +10,20 @@ config IMX_GPCV2_PM_DOMAINS
config IMX_SCU_SOC
bool "i.MX System Controller Unit SoC info support"
- depends on IMX_SCU || COMPILE_TEST
+ depends on IMX_SCU
select SOC_BUS
help
If you say yes here you get support for the NXP i.MX System
Controller Unit SoC info module, it will provide the SoC info
like SoC family, ID and revision etc.
+config SOC_IMX8M
+ bool "i.MX8M SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ help
+ If you say yes here you get support for the NXP i.MX8M family
+ support, it will provide the SoC info like SoC family,
+ ID and revision etc.
+
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index cf9ca42ff739..103e2c93c342 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
-obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
+obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 98b9d9a902ae..90a8b2c0676f 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -87,8 +87,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
{
struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
- int i, ret, sw, sw2iso;
- u32 val;
+ int i, ret;
+ u32 val, req;
if (pd->supply) {
ret = regulator_enable(pd->supply);
@@ -107,17 +107,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
0x1, 0x1);
- /* Read ISO and ISO2SW power up delays */
- regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
- sw = val & 0x3f;
- sw2iso = (val >> 8) & 0x3f;
-
/* Request GPC to power up domain */
- val = BIT(pd->cntr_pdn_bit + 1);
- regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
+ req = BIT(pd->cntr_pdn_bit + 1);
+ regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
- /* Wait ISO + ISO2SW IPG clock cycles */
- udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz));
+ /* Wait for the PGC to handle the request */
+ ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
+ 1, 50);
+ if (ret)
+ pr_err("powerup request on domain %s timed out\n", genpd->name);
+
+ /* Wait for reset to propagate through peripherals */
+ usleep_range(5, 10);
/* Disable reset clocks for all devices in the domain */
for (i = 0; i < pd->num_clks; i++)
@@ -343,6 +344,7 @@ static const struct regmap_config imx_gpc_regmap_config = {
.rd_table = &access_table,
.wr_table = &access_table,
.max_register = 0x2ac,
+ .fast_io = true,
};
static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index b0dffb06c05d..6cf8a7a412bd 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -14,6 +14,7 @@
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
#include <dt-bindings/power/imx7-power.h>
#include <dt-bindings/power/imx8mq-power.h>
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8m.c
index 719e1f189ebf..719e1f189ebf 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8m.c
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index de20e6cba83b..db37144ae98c 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -78,6 +78,7 @@ struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
client->pkt_cnt = 0;
client->client.dev = dev;
client->client.tx_block = false;
+ client->client.knows_txdone = true;
client->chan = mbox_request_channel(&client->client, index);
if (IS_ERR(client->chan)) {
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index c725315cf6a8..5d34e8b9c988 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -111,6 +111,28 @@ enum dew_regs {
PWRAP_RG_SPI_CON13,
PWRAP_SPISLV_KEY,
+ /* MT6359 only regs */
+ PWRAP_DEW_CRC_SWRST,
+ PWRAP_DEW_RG_EN_RECORD,
+ PWRAP_DEW_RECORD_CMD0,
+ PWRAP_DEW_RECORD_CMD1,
+ PWRAP_DEW_RECORD_CMD2,
+ PWRAP_DEW_RECORD_CMD3,
+ PWRAP_DEW_RECORD_CMD4,
+ PWRAP_DEW_RECORD_CMD5,
+ PWRAP_DEW_RECORD_WDATA0,
+ PWRAP_DEW_RECORD_WDATA1,
+ PWRAP_DEW_RECORD_WDATA2,
+ PWRAP_DEW_RECORD_WDATA3,
+ PWRAP_DEW_RECORD_WDATA4,
+ PWRAP_DEW_RECORD_WDATA5,
+ PWRAP_DEW_RG_ADDR_TARGET,
+ PWRAP_DEW_RG_ADDR_MASK,
+ PWRAP_DEW_RG_WDATA_TARGET,
+ PWRAP_DEW_RG_WDATA_MASK,
+ PWRAP_DEW_RG_SPI_RECORD_CLR,
+ PWRAP_DEW_RG_CMD_ALERT_CLR,
+
/* MT6397 only regs */
PWRAP_DEW_EVENT_OUT_EN,
PWRAP_DEW_EVENT_SRC_EN,
@@ -197,6 +219,42 @@ static const u32 mt6358_regs[] = {
[PWRAP_SPISLV_KEY] = 0x044a,
};
+static const u32 mt6359_regs[] = {
+ [PWRAP_DEW_RG_EN_RECORD] = 0x040a,
+ [PWRAP_DEW_DIO_EN] = 0x040c,
+ [PWRAP_DEW_READ_TEST] = 0x040e,
+ [PWRAP_DEW_WRITE_TEST] = 0x0410,
+ [PWRAP_DEW_CRC_SWRST] = 0x0412,
+ [PWRAP_DEW_CRC_EN] = 0x0414,
+ [PWRAP_DEW_CRC_VAL] = 0x0416,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0418,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x041a,
+ [PWRAP_DEW_CIPHER_EN] = 0x041c,
+ [PWRAP_DEW_CIPHER_RDY] = 0x041e,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0420,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x0422,
+ [PWRAP_DEW_RDDMY_NO] = 0x0424,
+ [PWRAP_DEW_RECORD_CMD0] = 0x0428,
+ [PWRAP_DEW_RECORD_CMD1] = 0x042a,
+ [PWRAP_DEW_RECORD_CMD2] = 0x042c,
+ [PWRAP_DEW_RECORD_CMD3] = 0x042e,
+ [PWRAP_DEW_RECORD_CMD4] = 0x0430,
+ [PWRAP_DEW_RECORD_CMD5] = 0x0432,
+ [PWRAP_DEW_RECORD_WDATA0] = 0x0434,
+ [PWRAP_DEW_RECORD_WDATA1] = 0x0436,
+ [PWRAP_DEW_RECORD_WDATA2] = 0x0438,
+ [PWRAP_DEW_RECORD_WDATA3] = 0x043a,
+ [PWRAP_DEW_RECORD_WDATA4] = 0x043c,
+ [PWRAP_DEW_RECORD_WDATA5] = 0x043e,
+ [PWRAP_DEW_RG_ADDR_TARGET] = 0x0440,
+ [PWRAP_DEW_RG_ADDR_MASK] = 0x0442,
+ [PWRAP_DEW_RG_WDATA_TARGET] = 0x0444,
+ [PWRAP_DEW_RG_WDATA_MASK] = 0x0446,
+ [PWRAP_DEW_RG_SPI_RECORD_CLR] = 0x0448,
+ [PWRAP_DEW_RG_CMD_ALERT_CLR] = 0x0448,
+ [PWRAP_SPISLV_KEY] = 0x044a,
+};
+
static const u32 mt6397_regs[] = {
[PWRAP_DEW_BASE] = 0xbc00,
[PWRAP_DEW_EVENT_OUT_EN] = 0xbc00,
@@ -497,6 +555,45 @@ static int mt6765_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x1E0,
};
+static int mt6779_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_RDDMY] = 0x20,
+ [PWRAP_CSHEXT_WRITE] = 0x24,
+ [PWRAP_CSHEXT_READ] = 0x28,
+ [PWRAP_CSLEXT_WRITE] = 0x2C,
+ [PWRAP_CSLEXT_READ] = 0x30,
+ [PWRAP_EXT_CK_WRITE] = 0x34,
+ [PWRAP_STAUPD_CTRL] = 0x3C,
+ [PWRAP_STAUPD_GRPEN] = 0x40,
+ [PWRAP_EINT_STA0_ADR] = 0x44,
+ [PWRAP_HARB_HPRIO] = 0x68,
+ [PWRAP_HIPRIO_ARB_EN] = 0x6C,
+ [PWRAP_MAN_EN] = 0x7C,
+ [PWRAP_MAN_CMD] = 0x80,
+ [PWRAP_WACS0_EN] = 0x8C,
+ [PWRAP_INIT_DONE0] = 0x90,
+ [PWRAP_WACS1_EN] = 0x94,
+ [PWRAP_WACS2_EN] = 0x9C,
+ [PWRAP_INIT_DONE1] = 0x98,
+ [PWRAP_INIT_DONE2] = 0xA0,
+ [PWRAP_INT_EN] = 0xBC,
+ [PWRAP_INT_FLG_RAW] = 0xC0,
+ [PWRAP_INT_FLG] = 0xC4,
+ [PWRAP_INT_CLR] = 0xC8,
+ [PWRAP_INT1_EN] = 0xCC,
+ [PWRAP_INT1_FLG] = 0xD4,
+ [PWRAP_INT1_CLR] = 0xD8,
+ [PWRAP_TIMER_EN] = 0xF0,
+ [PWRAP_WDT_UNIT] = 0xF8,
+ [PWRAP_WDT_SRC_EN] = 0xFC,
+ [PWRAP_WDT_SRC_EN_1] = 0x100,
+ [PWRAP_WACS2_CMD] = 0xC20,
+ [PWRAP_WACS2_RDATA] = 0xC24,
+ [PWRAP_WACS2_VLDCLR] = 0xC28,
+};
+
static int mt6797_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -938,6 +1035,7 @@ enum pmic_type {
PMIC_MT6351,
PMIC_MT6357,
PMIC_MT6358,
+ PMIC_MT6359,
PMIC_MT6380,
PMIC_MT6397,
};
@@ -945,6 +1043,7 @@ enum pmic_type {
enum pwrap_type {
PWRAP_MT2701,
PWRAP_MT6765,
+ PWRAP_MT6779,
PWRAP_MT6797,
PWRAP_MT7622,
PWRAP_MT8135,
@@ -1377,6 +1476,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
break;
case PWRAP_MT2701:
case PWRAP_MT6765:
+ case PWRAP_MT6779:
case PWRAP_MT6797:
case PWRAP_MT8173:
case PWRAP_MT8516:
@@ -1711,6 +1811,15 @@ static const struct pwrap_slv_type pmic_mt6358 = {
.pwrap_write = pwrap_write16,
};
+static const struct pwrap_slv_type pmic_mt6359 = {
+ .dew_regs = mt6359_regs,
+ .type = PMIC_MT6359,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_DUALIO,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
static const struct pwrap_slv_type pmic_mt6380 = {
.dew_regs = NULL,
.type = PMIC_MT6380,
@@ -1744,6 +1853,9 @@ static const struct of_device_id of_slave_match_tbl[] = {
.compatible = "mediatek,mt6358",
.data = &pmic_mt6358,
}, {
+ .compatible = "mediatek,mt6359",
+ .data = &pmic_mt6359,
+ }, {
/* The MT6380 PMIC only implements a regulator, so we bind it
* directly instead of using a MFD.
*/
@@ -1783,6 +1895,19 @@ static const struct pmic_wrapper_type pwrap_mt6765 = {
.init_soc_specific = NULL,
};
+static const struct pmic_wrapper_type pwrap_mt6779 = {
+ .regs = mt6779_regs,
+ .type = PWRAP_MT6779,
+ .arb_en_all = 0xfbb7f,
+ .int_en_all = 0xfffffffe,
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static const struct pmic_wrapper_type pwrap_mt6797 = {
.regs = mt6797_regs,
.type = PWRAP_MT6797,
@@ -1868,6 +1993,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt6765-pwrap",
.data = &pwrap_mt6765,
}, {
+ .compatible = "mediatek,mt6779-pwrap",
+ .data = &pwrap_mt6779,
+ }, {
.compatible = "mediatek,mt6797-pwrap",
.data = &pwrap_mt6797,
}, {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index d0a73e76d563..bf42a17a45de 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -76,6 +76,10 @@ config QCOM_OCMEM
requirements. This is typically used by the GPU, camera/video, and
audio components on some Snapdragon SoCs.
+config QCOM_PDR_HELPERS
+ tristate
+ select QCOM_QMI_HELPERS
+
config QCOM_PM
bool "Qualcomm Power Management"
depends on ARCH_QCOM && !ARM64
@@ -88,7 +92,6 @@ config QCOM_PM
config QCOM_QMI_HELPERS
tristate
- depends on ARCH_QCOM || COMPILE_TEST
depends on NET
config QCOM_RMTFS_MEM
@@ -197,6 +200,8 @@ config QCOM_APR
tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
depends on ARCH_QCOM || COMPILE_TEST
depends on RPMSG
+ depends on NET
+ select QCOM_PDR_HELPERS
help
Enable APR IPC protocol support between
application processor and QDSP6. APR is
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 9fb35c8a495e..5d6b83dc58e8 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
+obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 4fcc32420c47..1f35b097c635 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/of_device.h>
#include <linux/soc/qcom/apr.h>
+#include <linux/soc/qcom/pdr.h>
#include <linux/rpmsg.h>
#include <linux/of.h>
@@ -21,6 +22,7 @@ struct apr {
spinlock_t rx_lock;
struct idr svcs_idr;
int dest_domain_id;
+ struct pdr_handle *pdr;
struct workqueue_struct *rxwq;
struct work_struct rx_work;
struct list_head rx_list;
@@ -289,6 +291,9 @@ static int apr_add_device(struct device *dev, struct device_node *np,
id->svc_id + 1, GFP_ATOMIC);
spin_unlock(&apr->svcs_lock);
+ of_property_read_string_index(np, "qcom,protection-domain",
+ 1, &adev->service_path);
+
dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
ret = device_register(&adev->dev);
@@ -300,14 +305,75 @@ static int apr_add_device(struct device *dev, struct device_node *np,
return ret;
}
-static void of_register_apr_devices(struct device *dev)
+static int of_apr_add_pd_lookups(struct device *dev)
+{
+ const char *service_name, *service_path;
+ struct apr *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+ struct pdr_service *pds;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 0, &service_name);
+ if (ret < 0)
+ continue;
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (ret < 0) {
+ dev_err(dev, "pdr service path missing: %d\n", ret);
+ return ret;
+ }
+
+ pds = pdr_add_lookup(apr->pdr, service_name, service_path);
+ if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
+ dev_err(dev, "pdr add lookup failed: %d\n", ret);
+ return PTR_ERR(pds);
+ }
+ }
+
+ return 0;
+}
+
+static void of_register_apr_devices(struct device *dev, const char *svc_path)
{
struct apr *apr = dev_get_drvdata(dev);
struct device_node *node;
+ const char *service_path;
+ int ret;
for_each_child_of_node(dev->of_node, node) {
struct apr_device_id id = { {0} };
+ /*
+ * This function is called with svc_path NULL during
+ * apr_probe(), in which case we register any apr devices
+ * without a qcom,protection-domain specified.
+ *
+ * Then as the protection domains becomes available
+ * (if applicable) this function is again called, but with
+ * svc_path representing the service becoming available. In
+ * this case we register any apr devices with a matching
+ * qcom,protection-domain.
+ */
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (svc_path) {
+ /* skip APR services that are PD independent */
+ if (ret)
+ continue;
+
+ /* skip APR services whose PD paths don't match */
+ if (strcmp(service_path, svc_path))
+ continue;
+ } else {
+ /* skip APR services whose PD lookups are registered */
+ if (ret == 0)
+ continue;
+ }
+
if (of_property_read_u32(node, "reg", &id.svc_id))
continue;
@@ -318,6 +384,34 @@ static void of_register_apr_devices(struct device *dev)
}
}
+static int apr_remove_device(struct device *dev, void *svc_path)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ if (svc_path && adev->service_path) {
+ if (!strcmp(adev->service_path, (char *)svc_path))
+ device_unregister(&adev->dev);
+ } else {
+ device_unregister(&adev->dev);
+ }
+
+ return 0;
+}
+
+static void apr_pd_status(int state, char *svc_path, void *priv)
+{
+ struct apr *apr = (struct apr *)priv;
+
+ switch (state) {
+ case SERVREG_SERVICE_STATE_UP:
+ of_register_apr_devices(apr->dev, svc_path);
+ break;
+ case SERVREG_SERVICE_STATE_DOWN:
+ device_for_each_child(apr->dev, svc_path, apr_remove_device);
+ break;
+ }
+}
+
static int apr_probe(struct rpmsg_device *rpdev)
{
struct device *dev = &rpdev->dev;
@@ -343,28 +437,39 @@ static int apr_probe(struct rpmsg_device *rpdev)
return -ENOMEM;
}
INIT_WORK(&apr->rx_work, apr_rxwq);
+
+ apr->pdr = pdr_handle_alloc(apr_pd_status, apr);
+ if (IS_ERR(apr->pdr)) {
+ dev_err(dev, "Failed to init PDR handle\n");
+ ret = PTR_ERR(apr->pdr);
+ goto destroy_wq;
+ }
+
INIT_LIST_HEAD(&apr->rx_list);
spin_lock_init(&apr->rx_lock);
spin_lock_init(&apr->svcs_lock);
idr_init(&apr->svcs_idr);
- of_register_apr_devices(dev);
-
- return 0;
-}
-static int apr_remove_device(struct device *dev, void *null)
-{
- struct apr_device *adev = to_apr_device(dev);
+ ret = of_apr_add_pd_lookups(dev);
+ if (ret)
+ goto handle_release;
- device_unregister(&adev->dev);
+ of_register_apr_devices(dev, NULL);
return 0;
+
+handle_release:
+ pdr_handle_release(apr->pdr);
+destroy_wq:
+ destroy_workqueue(apr->rxwq);
+ return ret;
}
static void apr_remove(struct rpmsg_device *rpdev)
{
struct apr *apr = dev_get_drvdata(&rpdev->dev);
+ pdr_handle_release(apr->pdr);
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
flush_workqueue(apr->rxwq);
destroy_workqueue(apr->rxwq);
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
new file mode 100644
index 000000000000..17ad3b8698e1
--- /dev/null
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "pdr_internal.h"
+
+struct pdr_service {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ char service_path[SERVREG_NAME_LENGTH + 1];
+
+ struct sockaddr_qrtr addr;
+
+ unsigned int instance;
+ unsigned int service;
+ u8 service_data_valid;
+ u32 service_data;
+ int state;
+
+ bool need_notifier_register;
+ bool need_notifier_remove;
+ bool need_locator_lookup;
+ bool service_connected;
+
+ struct list_head node;
+};
+
+struct pdr_handle {
+ struct qmi_handle locator_hdl;
+ struct qmi_handle notifier_hdl;
+
+ struct sockaddr_qrtr locator_addr;
+
+ struct list_head lookups;
+ struct list_head indack_list;
+
+ /* control access to pdr lookup/indack lists */
+ struct mutex list_lock;
+
+ /* serialize pd status invocation */
+ struct mutex status_lock;
+
+ /* control access to the locator state */
+ struct mutex lock;
+
+ bool locator_init_complete;
+
+ struct work_struct locator_work;
+ struct work_struct notifier_work;
+ struct work_struct indack_work;
+
+ struct workqueue_struct *notifier_wq;
+ struct workqueue_struct *indack_wq;
+
+ void (*status)(int state, char *service_path, void *priv);
+ void *priv;
+};
+
+struct pdr_list_node {
+ enum servreg_service_state curr_state;
+ u16 transaction_id;
+ struct pdr_service *pds;
+ struct list_head node;
+};
+
+static int pdr_locator_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+ struct pdr_service *pds;
+
+ /* Create a local client port for QMI communication */
+ pdr->locator_addr.sq_family = AF_QIPCRTR;
+ pdr->locator_addr.sq_node = svc->node;
+ pdr->locator_addr.sq_port = svc->port;
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = true;
+ mutex_unlock(&pdr->lock);
+
+ /* Service pending lookup requests */
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->need_locator_lookup)
+ schedule_work(&pdr->locator_work);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_locator_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = false;
+ mutex_unlock(&pdr->lock);
+
+ pdr->locator_addr.sq_node = 0;
+ pdr->locator_addr.sq_port = 0;
+}
+
+static struct qmi_ops pdr_locator_ops = {
+ .new_server = pdr_locator_new_server,
+ .del_server = pdr_locator_del_server,
+};
+
+static int pdr_register_listener(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ bool enable)
+{
+ struct servreg_register_listener_resp resp;
+ struct servreg_register_listener_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_register_listener_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.enable = enable;
+ strcpy(req.service_path, pds->service_path);
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_REGISTER_LISTENER_REQ,
+ SERVREG_REGISTER_LISTENER_REQ_LEN,
+ servreg_register_listener_req_ei,
+ &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s register listener txn wait failed: %d\n",
+ pds->service_path, ret);
+ return ret;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s register listener failed: 0x%x\n",
+ pds->service_path, resp.resp.error);
+ return ret;
+ }
+
+ if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX)
+ pr_err("PDR: %s notification state invalid: 0x%x\n",
+ pds->service_path, resp.curr_state);
+
+ pds->state = resp.curr_state;
+
+ return 0;
+}
+
+static void pdr_notifier_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ notifier_work);
+ struct pdr_service *pds;
+ int ret;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service_connected) {
+ if (!pds->need_notifier_register)
+ continue;
+
+ pds->need_notifier_register = false;
+ ret = pdr_register_listener(pdr, pds, true);
+ if (ret < 0)
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ } else {
+ if (!pds->need_notifier_remove)
+ continue;
+
+ pds->need_notifier_remove = false;
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ }
+
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static int pdr_notifier_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = true;
+ pds->need_notifier_register = true;
+ pds->addr.sq_family = AF_QIPCRTR;
+ pds->addr.sq_node = svc->node;
+ pds->addr.sq_port = svc->port;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_notifier_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = false;
+ pds->need_notifier_remove = true;
+ pds->addr.sq_node = 0;
+ pds->addr.sq_port = 0;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static struct qmi_ops pdr_notifier_ops = {
+ .new_server = pdr_notifier_new_server,
+ .del_server = pdr_notifier_del_server,
+};
+
+static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
+ u16 tid)
+{
+ struct servreg_set_ack_resp resp;
+ struct servreg_set_ack_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.transaction_id = tid;
+ strcpy(req.service_path, pds->service_path);
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_SET_ACK_REQ,
+ SERVREG_SET_ACK_REQ_LEN,
+ servreg_set_ack_req_ei,
+ &req);
+
+ /* Skip waiting for response */
+ qmi_txn_cancel(&txn);
+ return ret;
+}
+
+static void pdr_indack_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ indack_work);
+ struct pdr_list_node *ind, *tmp;
+ struct pdr_service *pds;
+
+ list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
+ pds = ind->pds;
+ pdr_send_indack_msg(pdr, pds, ind->transaction_id);
+
+ mutex_lock(&pdr->status_lock);
+ pds->state = ind->curr_state;
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+
+ mutex_lock(&pdr->list_lock);
+ list_del(&ind->node);
+ mutex_unlock(&pdr->list_lock);
+
+ kfree(ind);
+ }
+}
+
+static void pdr_indication_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ const struct servreg_state_updated_ind *ind_msg = data;
+ struct pdr_list_node *ind;
+ struct pdr_service *pds;
+ bool found = false;
+
+ if (!ind_msg || !ind_msg->service_path[0] ||
+ strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (strcmp(pds->service_path, ind_msg->service_path))
+ continue;
+
+ found = true;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!found)
+ return;
+
+ pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
+ ind_msg->service_path, ind_msg->curr_state,
+ ind_msg->transaction_id);
+
+ ind = kzalloc(sizeof(*ind), GFP_KERNEL);
+ if (!ind)
+ return;
+
+ ind->transaction_id = ind_msg->transaction_id;
+ ind->curr_state = ind_msg->curr_state;
+ ind->pds = pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_add_tail(&ind->node, &pdr->indack_list);
+ mutex_unlock(&pdr->list_lock);
+
+ queue_work(pdr->indack_wq, &pdr->indack_work);
+}
+
+static struct qmi_msg_handler qmi_indication_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = SERVREG_STATE_UPDATED_IND_ID,
+ .ei = servreg_state_updated_ind_ei,
+ .decoded_size = sizeof(struct servreg_state_updated_ind),
+ .fn = pdr_indication_cb,
+ },
+ {}
+};
+
+static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
+ struct servreg_get_domain_list_resp *resp,
+ struct pdr_handle *pdr)
+{
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->locator_hdl, &txn,
+ servreg_get_domain_list_resp_ei, resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->locator_hdl,
+ &pdr->locator_addr,
+ &txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ servreg_get_domain_list_req_ei,
+ req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s get domain list txn wait failed: %d\n",
+ req->service_name, ret);
+ return ret;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s get domain list failed: 0x%x\n",
+ req->service_name, resp->resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_get_domain_list_resp *resp;
+ struct servreg_get_domain_list_req req;
+ struct servreg_location_entry *entry;
+ int domains_read = 0;
+ int ret, i;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Prepare req message */
+ strcpy(req.service_name, pds->service_name);
+ req.domain_offset_valid = true;
+ req.domain_offset = 0;
+
+ do {
+ req.domain_offset = domains_read;
+ ret = pdr_get_domain_list(&req, resp, pdr);
+ if (ret < 0)
+ goto out;
+
+ for (i = domains_read; i < resp->domain_list_len; i++) {
+ entry = &resp->domain_list[i];
+
+ if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
+ continue;
+
+ if (!strcmp(entry->name, pds->service_path)) {
+ pds->service_data_valid = entry->service_data_valid;
+ pds->service_data = entry->service_data;
+ pds->instance = entry->instance;
+ goto out;
+ }
+ }
+
+ /* Update ret to indicate that the service is not yet found */
+ ret = -ENXIO;
+
+ /* Always read total_domains from the response msg */
+ if (resp->domain_list_len > resp->total_domains)
+ resp->domain_list_len = resp->total_domains;
+
+ domains_read += resp->domain_list_len;
+ } while (domains_read < resp->total_domains);
+out:
+ kfree(resp);
+ return ret;
+}
+
+static void pdr_notify_lookup_failure(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ int err)
+{
+ pr_err("PDR: service lookup for %s failed: %d\n",
+ pds->service_name, err);
+
+ if (err == -ENXIO)
+ return;
+
+ list_del(&pds->node);
+ pds->state = SERVREG_LOCATOR_ERR;
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ kfree(pds);
+}
+
+static void pdr_locator_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ locator_work);
+ struct pdr_service *pds, *tmp;
+ int ret = 0;
+
+ /* Bail out early if the SERVREG LOCATOR QMI service is not up */
+ mutex_lock(&pdr->lock);
+ if (!pdr->locator_init_complete) {
+ mutex_unlock(&pdr->lock);
+ pr_debug("PDR: SERVICE LOCATOR service not available\n");
+ return;
+ }
+ mutex_unlock(&pdr->lock);
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ if (!pds->need_locator_lookup)
+ continue;
+
+ ret = pdr_locate_service(pdr, pds);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1,
+ pds->instance);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ pds->need_locator_lookup = false;
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+/**
+ * pdr_add_lookup() - register a tracking request for a PD
+ * @pdr: PDR client handle
+ * @service_name: service name of the tracking request
+ * @service_path: service path of the tracking request
+ *
+ * Registering a pdr lookup allows for tracking the life cycle of the PD.
+ *
+ * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is
+ * returned if a lookup is already in progress for the given service path.
+ */
+struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
+ const char *service_name,
+ const char *service_path)
+{
+ struct pdr_service *pds, *tmp;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return ERR_PTR(-EINVAL);
+
+ if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH ||
+ !service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
+ return ERR_PTR(-EINVAL);
+
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return ERR_PTR(-ENOMEM);
+
+ pds->service = SERVREG_NOTIFIER_SERVICE;
+ strcpy(pds->service_name, service_name);
+ strcpy(pds->service_path, service_path);
+ pds->need_locator_lookup = true;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (strcmp(tmp->service_path, service_path))
+ continue;
+
+ mutex_unlock(&pdr->list_lock);
+ ret = -EALREADY;
+ goto err;
+ }
+
+ list_add(&pds->node, &pdr->lookups);
+ mutex_unlock(&pdr->list_lock);
+
+ schedule_work(&pdr->locator_work);
+
+ return pds;
+err:
+ kfree(pds);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_add_lookup);
+
+/**
+ * pdr_restart_pd() - restart PD
+ * @pdr: PDR client handle
+ * @pds: PD service handle
+ *
+ * Restarts the PD tracked by the PDR client handle for a given service path.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_restart_pd_resp resp;
+ struct servreg_restart_pd_req req;
+ struct sockaddr_qrtr addr;
+ struct pdr_service *tmp;
+ struct qmi_txn txn;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds))
+ return -EINVAL;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (tmp != pds)
+ continue;
+
+ if (!pds->service_connected)
+ break;
+
+ /* Prepare req message */
+ strcpy(req.service_path, pds->service_path);
+ addr = pds->addr;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!req.service_path[0])
+ return -EINVAL;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_restart_pd_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &addr,
+ &txn, SERVREG_RESTART_PD_REQ,
+ SERVREG_RESTART_PD_REQ_MAX_LEN,
+ servreg_restart_pd_req_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s PD restart txn wait failed: %d\n",
+ req.service_path, ret);
+ return ret;
+ }
+
+ /* Check response if PDR is disabled */
+ if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
+ resp.resp.error == QMI_ERR_DISABLED_V01) {
+ pr_err("PDR: %s PD restart is disabled: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EOPNOTSUPP;
+ }
+
+ /* Check the response for other error case*/
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s request for PD restart failed: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pdr_restart_pd);
+
+/**
+ * pdr_handle_alloc() - initialize the PDR client handle
+ * @status: function to be called on PD state change
+ * @priv: handle for client's use
+ *
+ * Initializes the PDR client handle to allow for tracking/restart of PDs.
+ *
+ * Return: pdr_handle object on success, ERR_PTR on failure.
+ */
+struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
+ char *service_path,
+ void *priv), void *priv)
+{
+ struct pdr_handle *pdr;
+ int ret;
+
+ if (!status)
+ return ERR_PTR(-EINVAL);
+
+ pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
+ if (!pdr)
+ return ERR_PTR(-ENOMEM);
+
+ pdr->status = status;
+ pdr->priv = priv;
+
+ mutex_init(&pdr->status_lock);
+ mutex_init(&pdr->list_lock);
+ mutex_init(&pdr->lock);
+
+ INIT_LIST_HEAD(&pdr->lookups);
+ INIT_LIST_HEAD(&pdr->indack_list);
+
+ INIT_WORK(&pdr->locator_work, pdr_locator_work);
+ INIT_WORK(&pdr->notifier_work, pdr_notifier_work);
+ INIT_WORK(&pdr->indack_work, pdr_indack_work);
+
+ pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
+ if (!pdr->notifier_wq) {
+ ret = -ENOMEM;
+ goto free_pdr_handle;
+ }
+
+ pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
+ if (!pdr->indack_wq) {
+ ret = -ENOMEM;
+ goto destroy_notifier;
+ }
+
+ ret = qmi_handle_init(&pdr->locator_hdl,
+ SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
+ &pdr_locator_ops, NULL);
+ if (ret < 0)
+ goto destroy_indack;
+
+ ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ ret = qmi_handle_init(&pdr->notifier_hdl,
+ SERVREG_STATE_UPDATED_IND_MAX_LEN,
+ &pdr_notifier_ops,
+ qmi_indication_handler);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ return pdr;
+
+release_qmi_handle:
+ qmi_handle_release(&pdr->locator_hdl);
+destroy_indack:
+ destroy_workqueue(pdr->indack_wq);
+destroy_notifier:
+ destroy_workqueue(pdr->notifier_wq);
+free_pdr_handle:
+ kfree(pdr);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_handle_alloc);
+
+/**
+ * pdr_handle_release() - release the PDR client handle
+ * @pdr: PDR client handle
+ *
+ * Cleans up pending tracking requests and releases the underlying qmi handles.
+ */
+void pdr_handle_release(struct pdr_handle *pdr)
+{
+ struct pdr_service *pds, *tmp;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ list_del(&pds->node);
+ kfree(pds);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ cancel_work_sync(&pdr->locator_work);
+ cancel_work_sync(&pdr->notifier_work);
+ cancel_work_sync(&pdr->indack_work);
+
+ destroy_workqueue(pdr->notifier_wq);
+ destroy_workqueue(pdr->indack_wq);
+
+ qmi_handle_release(&pdr->locator_hdl);
+ qmi_handle_release(&pdr->notifier_hdl);
+
+ kfree(pdr);
+}
+EXPORT_SYMBOL(pdr_handle_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
new file mode 100644
index 000000000000..15b5002e4127
--- /dev/null
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PDR_HELPER_INTERNAL__
+#define __QCOM_PDR_HELPER_INTERNAL__
+
+#include <linux/soc/qcom/pdr.h>
+
+#define SERVREG_LOCATOR_SERVICE 0x40
+#define SERVREG_NOTIFIER_SERVICE 0x42
+
+#define SERVREG_REGISTER_LISTENER_REQ 0x20
+#define SERVREG_GET_DOMAIN_LIST_REQ 0x21
+#define SERVREG_STATE_UPDATED_IND_ID 0x22
+#define SERVREG_SET_ACK_REQ 0x23
+#define SERVREG_RESTART_PD_REQ 0x24
+
+#define SERVREG_DOMAIN_LIST_LENGTH 32
+#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
+#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
+#define SERVREG_SET_ACK_REQ_LEN 72
+#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
+#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
+#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
+
+struct servreg_location_entry {
+ char name[SERVREG_NAME_LENGTH + 1];
+ u8 service_data_valid;
+ u32 service_data;
+ u32 instance;
+};
+
+struct qmi_elem_info servreg_location_entry_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ instance),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_req {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ u8 domain_offset_valid;
+ u32 domain_offset;
+};
+
+struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ service_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_resp {
+ struct qmi_response_type_v01 resp;
+ u8 total_domains_valid;
+ u16 total_domains;
+ u8 db_rev_count_valid;
+ u16 db_rev_count;
+ u8 domain_list_valid;
+ u32 domain_list_len;
+ struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
+};
+
+struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
+ .elem_size = sizeof(struct servreg_location_entry),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list),
+ .ei_array = servreg_location_entry_ei,
+ },
+ {}
+};
+
+struct servreg_register_listener_req {
+ u8 enable;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+struct qmi_elem_info servreg_register_listener_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_register_listener_req,
+ enable),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_register_listener_resp {
+ struct qmi_response_type_v01 resp;
+ u8 curr_state_valid;
+ enum servreg_service_state curr_state;
+};
+
+struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum servreg_service_state),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state),
+ },
+ {}
+};
+
+struct servreg_restart_pd_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_restart_pd_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_restart_pd_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_restart_pd_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct servreg_state_updated_ind {
+ enum servreg_service_state curr_state;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ curr_state),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+struct qmi_elem_info servreg_set_ack_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_set_ack_req,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_req,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+#endif
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 006ac40c526a..f43a2e07ee83 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -200,7 +200,7 @@ static irqreturn_t qmp_intr(int irq, void *data)
{
struct qmp *qmp = data;
- wake_up_interruptible_all(&qmp->event);
+ wake_up_all(&qmp->event);
return IRQ_HANDLED;
}
@@ -225,6 +225,7 @@ static bool qmp_message_empty(struct qmp *qmp)
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
+ size_t tlen;
int ret;
if (WARN_ON(len + sizeof(u32) > qmp->size))
@@ -239,6 +240,9 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
data, len / sizeof(u32));
writel(len, qmp->msgram + qmp->offset);
+
+ /* Read back len to confirm data written in message RAM */
+ tlen = readl(qmp->msgram + qmp->offset);
qmp_kick(qmp);
time_left = wait_event_interruptible_timeout(qmp->event,
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index a7bbbb67991c..6eec32b97f83 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -110,5 +110,6 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
int rpmh_rsc_invalidate(struct rsc_drv *drv);
void rpmh_tx_done(const struct tcs_request *msg, int r);
+int rpmh_flush(struct rpmh_ctrlr *ctrlr);
#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index e278fc11fe5c..b71822131f59 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -277,7 +277,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
- trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+ trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 035091fd44b8..eb0ded059d2e 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -23,7 +23,7 @@
#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
-#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
+#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
struct rpmh_request name = { \
.msg = { \
.state = s, \
@@ -33,7 +33,7 @@
}, \
.cmd = { { 0 } }, \
.completion = q, \
- .dev = dev, \
+ .dev = device, \
.needs_free = false, \
}
@@ -427,11 +427,10 @@ static int is_req_valid(struct cache_req *req)
req->sleep_val != req->wake_val);
}
-static int send_single(const struct device *dev, enum rpmh_state state,
+static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
u32 addr, u32 data)
{
- DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
/* Wake sets are always complete and sleep sets are not */
rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
@@ -445,7 +444,7 @@ static int send_single(const struct device *dev, enum rpmh_state state,
/**
* rpmh_flush: Flushes the buffered active and sleep sets to TCS
*
- * @dev: The device making the request
+ * @ctrlr: controller making request to flush cached data
*
* Return: -EBUSY if the controller is busy, probably waiting on a response
* to a RPMH request sent earlier.
@@ -454,10 +453,9 @@ static int send_single(const struct device *dev, enum rpmh_state state,
* that is powering down the entire system. Since no other RPMH API would be
* executing at this time, it is safe to run lockless.
*/
-int rpmh_flush(const struct device *dev)
+int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
int ret;
if (!ctrlr->dirty) {
@@ -480,11 +478,12 @@ int rpmh_flush(const struct device *dev)
__func__, p->addr, p->sleep_val, p->wake_val);
continue;
}
- ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+ ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
+ p->sleep_val);
if (ret)
return ret;
- ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
- p->addr, p->wake_val);
+ ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
+ p->wake_val);
if (ret)
return ret;
}
@@ -493,7 +492,6 @@ int rpmh_flush(const struct device *dev)
return 0;
}
-EXPORT_SYMBOL(rpmh_flush);
/**
* rpmh_invalidate: Invalidate all sleep and active sets
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 7864b75ce569..ebb49aee179b 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -277,7 +277,7 @@ static int show_image_##type(struct seq_file *seq, void *p) \
{ \
struct smem_image_version *image_version = seq->private; \
seq_puts(seq, image_version->type); \
- seq_puts(seq, "\n"); \
+ seq_putc(seq, '\n'); \
return 0; \
} \
static int open_image_##type(struct inode *inode, struct file *file) \
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index ba2b8b51d2d9..1982c7fb45fa 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -116,6 +116,7 @@ config ARCH_R8A7779
bool "R-Car H1 (R8A77790)"
select ARCH_RCAR_GEN1
select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select SYSC_R8A7779
@@ -163,6 +164,7 @@ config ARCH_SH73A0
bool "SH-Mobile AG5 (R8A73A00)"
select ARCH_RMOBILE
select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select RENESAS_INTC_IRQPIN
@@ -193,19 +195,19 @@ config ARCH_R8A774C0
This enables support for the Renesas RZ/G2E SoC.
config ARCH_R8A77950
- bool
+ bool "Renesas R-Car H3 ES1.x SoC Platform"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A7795
+ help
+ This enables support for the Renesas R-Car H3 SoC (revision 1.x).
config ARCH_R8A77951
- bool
-
-config ARCH_R8A7795
- bool "Renesas R-Car H3 SoC Platform"
- select ARCH_R8A77950
- select ARCH_R8A77951
+ bool "Renesas R-Car H3 ES2.0+ SoC Platform"
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
- This enables support for the Renesas R-Car H3 SoC.
+ This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
+ later).
config ARCH_R8A77960
bool "Renesas R-Car M3-W SoC Platform"
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 8d074489fba9..0fc3b119930a 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Renesas R-Car System Controller
*
* Copyright (C) 2016 Glider bvba
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 850f5733dc88..35dba8b8814e 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -259,7 +259,7 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A7794
{ .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
#endif
-#ifdef CONFIG_ARCH_R8A7795
+#if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951)
{ .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77960
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 1699dda6b393..1c533a969f54 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3,7 +3,7 @@
* drivers/soc/tegra/pmc.c
*
* Copyright (c) 2010 Google, Inc
- * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -13,9 +13,13 @@
#include <linux/arm-smccc.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/clk-conf.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -48,6 +52,7 @@
#include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
+#include <dt-bindings/soc/tegra-pmc.h>
#define PMC_CNTRL 0x0
#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */
@@ -57,12 +62,15 @@
#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
#define PMC_CNTRL_PWRREQ_POLARITY BIT(8)
+#define PMC_CNTRL_BLINK_EN 7
#define PMC_CNTRL_MAIN_RST BIT(4)
#define PMC_WAKE_MASK 0x0c
#define PMC_WAKE_LEVEL 0x10
#define PMC_WAKE_STATUS 0x14
#define PMC_SW_WAKE_STATUS 0x18
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK 20
#define DPD_SAMPLE 0x020
#define DPD_SAMPLE_ENABLE BIT(0)
@@ -75,6 +83,7 @@
#define PWRGATE_STATUS 0x38
+#define PMC_BLINK_TIMER 0x40
#define PMC_IMPL_E_33V_PWR 0x40
#define PMC_PWR_DET 0x48
@@ -100,6 +109,8 @@
#define PMC_WAKE2_STATUS 0x168
#define PMC_SW_WAKE2_STATUS 0x16c
+#define PMC_CLK_OUT_CNTRL 0x1a8
+#define PMC_CLK_OUT_MUX_MASK GENMASK(1, 0)
#define PMC_SENSOR_CTRL 0x1b0
#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
@@ -155,6 +166,71 @@
#define TEGRA_SMC_PMC_READ 0xaa
#define TEGRA_SMC_PMC_WRITE 0xbb
+struct pmc_clk {
+ struct clk_hw hw;
+ unsigned long offs;
+ u32 mux_shift;
+ u32 force_en_shift;
+};
+
+#define to_pmc_clk(_hw) container_of(_hw, struct pmc_clk, hw)
+
+struct pmc_clk_gate {
+ struct clk_hw hw;
+ unsigned long offs;
+ u32 shift;
+};
+
+#define to_pmc_clk_gate(_hw) container_of(_hw, struct pmc_clk_gate, hw)
+
+struct pmc_clk_init_data {
+ char *name;
+ const char *const *parents;
+ int num_parents;
+ int clk_id;
+ u8 mux_shift;
+ u8 force_en_shift;
+};
+
+static const char * const clk_out1_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern1",
+};
+
+static const char * const clk_out2_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern2",
+};
+
+static const char * const clk_out3_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern3",
+};
+
+static const struct pmc_clk_init_data tegra_pmc_clks_data[] = {
+ {
+ .name = "pmc_clk_out_1",
+ .parents = clk_out1_parents,
+ .num_parents = ARRAY_SIZE(clk_out1_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_1,
+ .mux_shift = 6,
+ .force_en_shift = 2,
+ },
+ {
+ .name = "pmc_clk_out_2",
+ .parents = clk_out2_parents,
+ .num_parents = ARRAY_SIZE(clk_out2_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_2,
+ .mux_shift = 14,
+ .force_en_shift = 10,
+ },
+ {
+ .name = "pmc_clk_out_3",
+ .parents = clk_out3_parents,
+ .num_parents = ARRAY_SIZE(clk_out3_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_3,
+ .mux_shift = 22,
+ .force_en_shift = 18,
+ },
+};
+
struct tegra_powergate {
struct generic_pm_domain genpd;
struct tegra_pmc *pmc;
@@ -254,6 +330,10 @@ struct tegra_pmc_soc {
*/
const struct tegra_wake_event *wake_events;
unsigned int num_wake_events;
+
+ const struct pmc_clk_init_data *pmc_clks_data;
+ unsigned int num_pmc_clks;
+ bool has_blink_output;
};
static const char * const tegra186_reset_sources[] = {
@@ -2163,6 +2243,258 @@ static int tegra_pmc_clk_notify_cb(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void pmc_clk_fence_udelay(u32 offset)
+{
+ tegra_pmc_readl(pmc, offset);
+ /* pmc clk propagation delay 2 us */
+ udelay(2);
+}
+
+static u8 pmc_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs) >> clk->mux_shift;
+ val &= PMC_CLK_OUT_MUX_MASK;
+
+ return val;
+}
+
+static int pmc_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs);
+ val &= ~(PMC_CLK_OUT_MUX_MASK << clk->mux_shift);
+ val |= index << clk->mux_shift;
+ tegra_pmc_writel(pmc, val, clk->offs);
+ pmc_clk_fence_udelay(clk->offs);
+
+ return 0;
+}
+
+static int pmc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs) & BIT(clk->force_en_shift);
+
+ return val ? 1 : 0;
+}
+
+static void pmc_clk_set_state(unsigned long offs, u32 shift, int state)
+{
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, offs);
+ val = state ? (val | BIT(shift)) : (val & ~BIT(shift));
+ tegra_pmc_writel(pmc, val, offs);
+ pmc_clk_fence_udelay(offs);
+}
+
+static int pmc_clk_enable(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+
+ pmc_clk_set_state(clk->offs, clk->force_en_shift, 1);
+
+ return 0;
+}
+
+static void pmc_clk_disable(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+
+ pmc_clk_set_state(clk->offs, clk->force_en_shift, 0);
+}
+
+static const struct clk_ops pmc_clk_ops = {
+ .get_parent = pmc_clk_mux_get_parent,
+ .set_parent = pmc_clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+ .is_enabled = pmc_clk_is_enabled,
+ .enable = pmc_clk_enable,
+ .disable = pmc_clk_disable,
+};
+
+static struct clk *
+tegra_pmc_clk_out_register(struct tegra_pmc *pmc,
+ const struct pmc_clk_init_data *data,
+ unsigned long offset)
+{
+ struct clk_init_data init;
+ struct pmc_clk *pmc_clk;
+
+ pmc_clk = devm_kzalloc(pmc->dev, sizeof(*pmc_clk), GFP_KERNEL);
+ if (!pmc_clk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = data->name;
+ init.ops = &pmc_clk_ops;
+ init.parent_names = data->parents;
+ init.num_parents = data->num_parents;
+ init.flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT |
+ CLK_SET_PARENT_GATE;
+
+ pmc_clk->hw.init = &init;
+ pmc_clk->offs = offset;
+ pmc_clk->mux_shift = data->mux_shift;
+ pmc_clk->force_en_shift = data->force_en_shift;
+
+ return clk_register(NULL, &pmc_clk->hw);
+}
+
+static int pmc_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ return tegra_pmc_readl(pmc, gate->offs) & BIT(gate->shift) ? 1 : 0;
+}
+
+static int pmc_clk_gate_enable(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ pmc_clk_set_state(gate->offs, gate->shift, 1);
+
+ return 0;
+}
+
+static void pmc_clk_gate_disable(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ pmc_clk_set_state(gate->offs, gate->shift, 0);
+}
+
+static const struct clk_ops pmc_clk_gate_ops = {
+ .is_enabled = pmc_clk_gate_is_enabled,
+ .enable = pmc_clk_gate_enable,
+ .disable = pmc_clk_gate_disable,
+};
+
+static struct clk *
+tegra_pmc_clk_gate_register(struct tegra_pmc *pmc, const char *name,
+ const char *parent_name, unsigned long offset,
+ u32 shift)
+{
+ struct clk_init_data init;
+ struct pmc_clk_gate *gate;
+
+ gate = devm_kzalloc(pmc->dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pmc_clk_gate_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ gate->hw.init = &init;
+ gate->offs = offset;
+ gate->shift = shift;
+
+ return clk_register(NULL, &gate->hw);
+}
+
+static void tegra_pmc_clock_register(struct tegra_pmc *pmc,
+ struct device_node *np)
+{
+ struct clk *clk;
+ struct clk_onecell_data *clk_data;
+ unsigned int num_clks;
+ int i, err;
+
+ num_clks = pmc->soc->num_pmc_clks;
+ if (pmc->soc->has_blink_output)
+ num_clks += 1;
+
+ if (!num_clks)
+ return;
+
+ clk_data = devm_kmalloc(pmc->dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clks = devm_kcalloc(pmc->dev, TEGRA_PMC_CLK_MAX,
+ sizeof(*clk_data->clks), GFP_KERNEL);
+ if (!clk_data->clks)
+ return;
+
+ clk_data->clk_num = TEGRA_PMC_CLK_MAX;
+
+ for (i = 0; i < TEGRA_PMC_CLK_MAX; i++)
+ clk_data->clks[i] = ERR_PTR(-ENOENT);
+
+ for (i = 0; i < pmc->soc->num_pmc_clks; i++) {
+ const struct pmc_clk_init_data *data;
+
+ data = pmc->soc->pmc_clks_data + i;
+
+ clk = tegra_pmc_clk_out_register(pmc, data, PMC_CLK_OUT_CNTRL);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev, "unable to register clock %s: %d\n",
+ data->name, PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ err = clk_register_clkdev(clk, data->name, NULL);
+ if (err) {
+ dev_warn(pmc->dev,
+ "unable to register %s clock lookup: %d\n",
+ data->name, err);
+ return;
+ }
+
+ clk_data->clks[data->clk_id] = clk;
+ }
+
+ if (pmc->soc->has_blink_output) {
+ tegra_pmc_writel(pmc, 0x0, PMC_BLINK_TIMER);
+ clk = tegra_pmc_clk_gate_register(pmc,
+ "pmc_blink_override",
+ "clk_32k",
+ PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink_override: %d\n",
+ PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ clk = tegra_pmc_clk_gate_register(pmc, "pmc_blink",
+ "pmc_blink_override",
+ PMC_CNTRL,
+ PMC_CNTRL_BLINK_EN);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink: %d\n",
+ PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ err = clk_register_clkdev(clk, "pmc_blink", NULL);
+ if (err) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink lookup: %d\n",
+ err);
+ return;
+ }
+
+ clk_data->clks[TEGRA_PMC_CLK_BLINK] = clk;
+ }
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ if (err)
+ dev_warn(pmc->dev, "failed to add pmc clock provider: %d\n",
+ err);
+}
+
static int tegra_pmc_probe(struct platform_device *pdev)
{
void __iomem *base;
@@ -2281,6 +2613,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->base = base;
mutex_unlock(&pmc->powergates_lock);
+ tegra_pmc_clock_register(pmc, pdev->dev.of_node);
platform_set_drvdata(pdev, pmc);
return 0;
@@ -2422,6 +2755,9 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.num_reset_sources = 0,
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = true,
};
static const char * const tegra30_powergates[] = {
@@ -2469,6 +2805,9 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra114_powergates[] = {
@@ -2520,6 +2859,9 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra124_powergates[] = {
@@ -2569,38 +2911,38 @@ static const u8 tegra124_cpu_powergates[] = {
.name = (_name) \
})
-#define TEGRA124_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
- _pad(TEGRA_IO_PAD_BB, 15, UINT_MAX, "bb"), \
- _pad(TEGRA_IO_PAD_CAM, 36, UINT_MAX, "cam"), \
- _pad(TEGRA_IO_PAD_COMP, 22, UINT_MAX, "comp"), \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csb"), \
- _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "cse"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_HV, 38, UINT_MAX, "hv"), \
- _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_NAND, 13, UINT_MAX, "nand"), \
- _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC1, 33, UINT_MAX, "sdmmc1"), \
- _pad(TEGRA_IO_PAD_SDMMC3, 34, UINT_MAX, "sdmmc3"), \
- _pad(TEGRA_IO_PAD_SDMMC4, 35, UINT_MAX, "sdmmc4"), \
- _pad(TEGRA_IO_PAD_SYS_DDC, 58, UINT_MAX, "sys_ddc"), \
- _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb_bias")
+#define TEGRA124_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_BB, 15, UINT_MAX, "bb"), \
+ _pad(TEGRA_IO_PAD_CAM, 36, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_COMP, 22, UINT_MAX, "comp"), \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csb"), \
+ _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "cse"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_HV, 38, UINT_MAX, "hv"), \
+ _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_NAND, 13, UINT_MAX, "nand"), \
+ _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC1, 33, UINT_MAX, "sdmmc1"), \
+ _pad(TEGRA_IO_PAD_SDMMC3, 34, UINT_MAX, "sdmmc3"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 35, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_SYS_DDC, 58, UINT_MAX, "sys_ddc"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb_bias")
static const struct tegra_io_pad_soc tegra124_io_pads[] = {
TEGRA124_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2631,6 +2973,9 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra210_powergates[] = {
@@ -2667,46 +3012,46 @@ static const u8 tegra210_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
-#define TEGRA210_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_AUDIO, 17, 5, "audio"), \
- _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 18, "audio-hv"), \
- _pad(TEGRA_IO_PAD_CAM, 36, 10, "cam"), \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
- _pad(TEGRA_IO_PAD_CSIC, 42, UINT_MAX, "csic"), \
- _pad(TEGRA_IO_PAD_CSID, 43, UINT_MAX, "csid"), \
- _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "csie"), \
- _pad(TEGRA_IO_PAD_CSIF, 45, UINT_MAX, "csif"), \
- _pad(TEGRA_IO_PAD_DBG, 25, 19, "dbg"), \
- _pad(TEGRA_IO_PAD_DEBUG_NONAO, 26, UINT_MAX, "debug-nonao"), \
- _pad(TEGRA_IO_PAD_DMIC, 50, 20, "dmic"), \
- _pad(TEGRA_IO_PAD_DP, 51, UINT_MAX, "dp"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_EMMC, 35, UINT_MAX, "emmc"), \
- _pad(TEGRA_IO_PAD_EMMC2, 37, UINT_MAX, "emmc2"), \
- _pad(TEGRA_IO_PAD_GPIO, 27, 21, "gpio"), \
- _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, UINT_MAX, 11, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC1, 33, 12, "sdmmc1"), \
- _pad(TEGRA_IO_PAD_SDMMC3, 34, 13, "sdmmc3"), \
- _pad(TEGRA_IO_PAD_SPI, 46, 22, "spi"), \
- _pad(TEGRA_IO_PAD_SPI_HV, 47, 23, "spi-hv"), \
- _pad(TEGRA_IO_PAD_UART, 14, 2, "uart"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB3, 18, UINT_MAX, "usb3"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias")
+#define TEGRA210_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, 5, "audio"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 18, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_CAM, 36, 10, "cam"), \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_CSIC, 42, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 43, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 45, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, 19, "dbg"), \
+ _pad(TEGRA_IO_PAD_DEBUG_NONAO, 26, UINT_MAX, "debug-nonao"), \
+ _pad(TEGRA_IO_PAD_DMIC, 50, 20, "dmic"), \
+ _pad(TEGRA_IO_PAD_DP, 51, UINT_MAX, "dp"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_EMMC, 35, UINT_MAX, "emmc"), \
+ _pad(TEGRA_IO_PAD_EMMC2, 37, UINT_MAX, "emmc2"), \
+ _pad(TEGRA_IO_PAD_GPIO, 27, 21, "gpio"), \
+ _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, UINT_MAX, 11, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC1, 33, 12, "sdmmc1"), \
+ _pad(TEGRA_IO_PAD_SDMMC3, 34, 13, "sdmmc3"), \
+ _pad(TEGRA_IO_PAD_SPI, 46, 22, "spi"), \
+ _pad(TEGRA_IO_PAD_SPI_HV, 47, 23, "spi-hv"), \
+ _pad(TEGRA_IO_PAD_UART, 14, 2, "uart"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB3, 18, UINT_MAX, "usb3"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias")
static const struct tegra_io_pad_soc tegra210_io_pads[] = {
TEGRA210_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2745,48 +3090,51 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_reset_levels = 0,
.num_wake_events = ARRAY_SIZE(tegra210_wake_events),
.wake_events = tegra210_wake_events,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
-#define TEGRA186_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias"), \
- _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
- _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
- _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
- _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC2_HV, 34, 5, "sdmmc2-hv"), \
- _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
- _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
- _pad(TEGRA_IO_PAD_DSIB, 40, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 41, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 42, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
- _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
- _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
- _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
- _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
- _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
- _pad(TEGRA_IO_PAD_DMIC_HV, 52, 2, "dmic-hv"), \
- _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
- _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
- _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
- _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
- _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
- _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
+#define TEGRA186_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC2_HV, 34, 5, "sdmmc2-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_DSIB, 40, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 41, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 42, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
+ _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
+ _pad(TEGRA_IO_PAD_DMIC_HV, 52, 2, "dmic-hv"), \
+ _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
+ _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
+ _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
static const struct tegra_io_pad_soc tegra186_io_pads[] = {
TEGRA186_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2874,56 +3222,69 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra186_wake_events),
.wake_events = tegra186_wake_events,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
};
+#define TEGRA194_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_EQOS, 8, UINT_MAX, "eqos"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_2_BIAS, 9, UINT_MAX, "pex-clk-2-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_2, 10, UINT_MAX, "pex-clk-2"), \
+ _pad(TEGRA_IO_PAD_DAP3, 11, UINT_MAX, "dap3"), \
+ _pad(TEGRA_IO_PAD_DAP5, 12, UINT_MAX, "dap5"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_PWR_CTL, 15, UINT_MAX, "pwr-ctl"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO53, 16, UINT_MAX, "soc-gpio53"), \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_GP_PWM2, 18, UINT_MAX, "gp-pwm2"), \
+ _pad(TEGRA_IO_PAD_GP_PWM3, 19, UINT_MAX, "gp-pwm3"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO12, 20, UINT_MAX, "soc-gpio12"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO13, 21, UINT_MAX, "soc-gpio13"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO10, 22, UINT_MAX, "soc-gpio10"), \
+ _pad(TEGRA_IO_PAD_UART4, 23, UINT_MAX, "uart4"), \
+ _pad(TEGRA_IO_PAD_UART5, 24, UINT_MAX, "uart5"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP3, 26, UINT_MAX, "hdmi-dp3"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP2, 27, UINT_MAX, "hdmi-dp2"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_PEX_CTL2, 33, UINT_MAX, "pex-ctl2"), \
+ _pad(TEGRA_IO_PAD_PEX_L0_RST_N, 34, UINT_MAX, "pex-l0-rst"), \
+ _pad(TEGRA_IO_PAD_PEX_L1_RST_N, 35, UINT_MAX, "pex-l1-rst"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_PEX_L5_RST_N, 37, UINT_MAX, "pex-l5-rst"), \
+ _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
+ _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
+ _pad(TEGRA_IO_PAD_CSIG, 50, UINT_MAX, "csig"), \
+ _pad(TEGRA_IO_PAD_CSIH, 51, UINT_MAX, "csih"), \
+ _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
+ _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
+ _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
+
static const struct tegra_io_pad_soc tegra194_io_pads[] = {
- { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK_BIAS, .dpd = 4, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK3, .dpd = 5, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 7, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_EQOS, .dpd = 8, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2_BIAS, .dpd = 9, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 10, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DAP3, .dpd = 11, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DAP5, .dpd = 12, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PWR_CTL, .dpd = 15, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO53, .dpd = 16, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_GP_PWM2, .dpd = 18, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_GP_PWM3, .dpd = 19, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO12, .dpd = 20, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO13, .dpd = 21, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO10, .dpd = 22, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART4, .dpd = 23, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART5, .dpd = 24, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP3, .dpd = 26, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP2, .dpd = 27, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP0, .dpd = 28, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP1, .dpd = 29, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CTL2, .dpd = 33, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L0_RST_N, .dpd = 34, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L1_RST_N, .dpd = 35, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 36, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L5_RST_N, .dpd = 37, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIC, .dpd = 43, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSID, .dpd = 44, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIE, .dpd = 45, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIF, .dpd = 46, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SPI, .dpd = 47, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UFS, .dpd = 49, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIG, .dpd = 50, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIH, .dpd = 51, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_EDP, .dpd = 53, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC1_HV, .dpd = 55, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC3_HV, .dpd = 56, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CONN, .dpd = 60, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = UINT_MAX },
+ TEGRA194_IO_PAD_TABLE(TEGRA_IO_PAD)
+};
+
+static const struct pinctrl_pin_desc tegra194_pin_descs[] = {
+ TEGRA194_IO_PAD_TABLE(TEGRA_IO_PIN_DESC)
};
static const struct tegra_pmc_regs tegra194_pmc_regs = {
@@ -2976,10 +3337,12 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.has_tsense_reset = false,
.has_gpu_clamps = false,
.needs_mbist_war = false,
- .has_impl_33v_pwr = false,
+ .has_impl_33v_pwr = true,
.maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra194_io_pads),
.io_pads = tegra194_io_pads,
+ .num_pin_descs = ARRAY_SIZE(tegra194_pin_descs),
+ .pin_descs = tegra194_pin_descs,
.regs = &tegra194_pmc_regs,
.init = NULL,
.setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
@@ -2991,6 +3354,9 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra194_wake_events),
.wake_events = tegra194_wake_events,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
};
static const struct of_device_id tegra_pmc_match[] = {
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index ccc6d53fe788..de0123ec8ad6 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -130,6 +130,19 @@ static int am33xx_push_sram_idle(void)
return 0;
}
+static int am33xx_do_sram_idle(u32 wfi_flags)
+{
+ int ret = 0;
+
+ if (!m3_ipc || !pm_ops)
+ return 0;
+
+ if (wfi_flags & WFI_FLAG_WAKE_M3)
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+
+ return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
+}
+
static int __init am43xx_map_gic(void)
{
gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
@@ -260,6 +273,8 @@ static int am33xx_pm_begin(suspend_state_t state)
rtc_only_idle = 0;
}
+ pm_ops->begin_suspend();
+
switch (state) {
case PM_SUSPEND_MEM:
ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
@@ -301,6 +316,8 @@ static void am33xx_pm_end(void)
}
rtc_only_idle = 0;
+
+ pm_ops->finish_suspend();
}
static int am33xx_pm_valid(suspend_state_t state)
@@ -503,7 +520,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
#endif /* CONFIG_SUSPEND */
- ret = pm_ops->init();
+ ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
dev_err(dev, "Unable to call core pm init!\n");
ret = -ENODEV;
@@ -522,6 +539,8 @@ err_free_sram:
static int am33xx_pm_remove(struct platform_device *pdev)
{
+ if (pm_ops->deinit)
+ pm_ops->deinit();
suspend_set_ops(NULL);
wkup_m3_ipc_put(m3_ipc);
am33xx_pm_free_sram();
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 6106577fb3ed..488c3c9e4947 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -2,6 +2,7 @@
// Copyright(c) 2015-17 Intel Corporation.
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/soundwire/sdw_registers.h>
@@ -113,6 +114,8 @@ static int sdw_delete_slave(struct device *dev, void *data)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
struct sdw_bus *bus = slave->bus;
+ pm_runtime_disable(dev);
+
sdw_slave_debugfs_exit(slave);
mutex_lock(&bus->bus_lock);
@@ -317,6 +320,92 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
return 0;
}
+/*
+ * Read/Write IO functions.
+ * no_pm versions can only be called by the bus, e.g. while enumerating or
+ * handling suspend-resume sequences.
+ * all clients need to use the pm versions
+ */
+
+static int
+sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, slave, addr, count,
+ slave->dev_num, SDW_MSG_FLAG_READ, val);
+ if (ret < 0)
+ return ret;
+
+ return sdw_transfer(slave->bus, &msg);
+}
+
+static int
+sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, slave, addr, count,
+ slave->dev_num, SDW_MSG_FLAG_WRITE, val);
+ if (ret < 0)
+ return ret;
+
+ return sdw_transfer(slave->bus, &msg);
+}
+
+static int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ return sdw_nwrite_no_pm(slave, addr, 1, &value);
+}
+
+static int
+sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
+{
+ struct sdw_msg msg;
+ u8 buf;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
+ SDW_MSG_FLAG_READ, &buf);
+ if (ret)
+ return ret;
+
+ ret = sdw_transfer(bus, &msg);
+ if (ret < 0)
+ return ret;
+ else
+ return buf;
+}
+
+static int
+sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
+ SDW_MSG_FLAG_WRITE, &value);
+ if (ret)
+ return ret;
+
+ return sdw_transfer(bus, &msg);
+}
+
+static int
+sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+{
+ u8 buf;
+ int ret;
+
+ ret = sdw_nread_no_pm(slave, addr, 1, &buf);
+ if (ret < 0)
+ return ret;
+ else
+ return buf;
+}
+
/**
* sdw_nread() - Read "n" contiguous SDW Slave registers
* @slave: SDW Slave
@@ -326,19 +415,17 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
*/
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
- struct sdw_msg msg;
int ret;
- ret = sdw_fill_msg(&msg, slave, addr, count,
- slave->dev_num, SDW_MSG_FLAG_READ, val);
- if (ret < 0)
- return ret;
-
ret = pm_runtime_get_sync(slave->bus->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_noidle(slave->bus->dev);
return ret;
+ }
+
+ ret = sdw_nread_no_pm(slave, addr, count, val);
- ret = sdw_transfer(slave->bus, &msg);
+ pm_runtime_mark_last_busy(slave->bus->dev);
pm_runtime_put(slave->bus->dev);
return ret;
@@ -354,19 +441,17 @@ EXPORT_SYMBOL(sdw_nread);
*/
int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
- struct sdw_msg msg;
int ret;
- ret = sdw_fill_msg(&msg, slave, addr, count,
- slave->dev_num, SDW_MSG_FLAG_WRITE, val);
- if (ret < 0)
- return ret;
-
ret = pm_runtime_get_sync(slave->bus->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_noidle(slave->bus->dev);
return ret;
+ }
+
+ ret = sdw_nwrite_no_pm(slave, addr, count, val);
- ret = sdw_transfer(slave->bus, &msg);
+ pm_runtime_mark_last_busy(slave->bus->dev);
pm_runtime_put(slave->bus->dev);
return ret;
@@ -486,7 +571,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
dev_num = slave->dev_num;
slave->dev_num = 0;
- ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
+ ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
if (ret < 0) {
dev_err(&slave->dev, "Program device_num %d failed: %d\n",
dev_num, ret);
@@ -504,22 +589,11 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
{
dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
- /*
- * Spec definition
- * Register Bit Contents
- * DevId_0 [7:4] 47:44 sdw_version
- * DevId_0 [3:0] 43:40 unique_id
- * DevId_1 39:32 mfg_id [15:8]
- * DevId_2 31:24 mfg_id [7:0]
- * DevId_3 23:16 part_id [15:8]
- * DevId_4 15:08 part_id [7:0]
- * DevId_5 07:00 class_id
- */
- id->sdw_version = (addr >> 44) & GENMASK(3, 0);
- id->unique_id = (addr >> 40) & GENMASK(3, 0);
- id->mfg_id = (addr >> 24) & GENMASK(15, 0);
- id->part_id = (addr >> 8) & GENMASK(15, 0);
- id->class_id = addr & GENMASK(7, 0);
+ id->sdw_version = SDW_VERSION(addr);
+ id->unique_id = SDW_UNIQUE_ID(addr);
+ id->mfg_id = SDW_MFG_ID(addr);
+ id->part_id = SDW_PART_ID(addr);
+ id->class_id = SDW_CLASS_ID(addr);
dev_dbg(bus->dev,
"SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x\n",
@@ -610,10 +684,320 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
enum sdw_slave_status status)
{
mutex_lock(&slave->bus->bus_lock);
+
+ dev_vdbg(&slave->dev,
+ "%s: changing status slave %d status %d new status %d\n",
+ __func__, slave->dev_num, slave->status, status);
+
+ if (status == SDW_SLAVE_UNATTACHED) {
+ dev_dbg(&slave->dev,
+ "%s: initializing completion for Slave %d\n",
+ __func__, slave->dev_num);
+
+ init_completion(&slave->enumeration_complete);
+ init_completion(&slave->initialization_complete);
+
+ } else if ((status == SDW_SLAVE_ATTACHED) &&
+ (slave->status == SDW_SLAVE_UNATTACHED)) {
+ dev_dbg(&slave->dev,
+ "%s: signaling completion for Slave %d\n",
+ __func__, slave->dev_num);
+
+ complete(&slave->enumeration_complete);
+ }
slave->status = status;
mutex_unlock(&slave->bus->bus_lock);
}
+static enum sdw_clk_stop_mode sdw_get_clk_stop_mode(struct sdw_slave *slave)
+{
+ enum sdw_clk_stop_mode mode;
+
+ /*
+ * Query for clock stop mode if Slave implements
+ * ops->get_clk_stop_mode, else read from property.
+ */
+ if (slave->ops && slave->ops->get_clk_stop_mode) {
+ mode = slave->ops->get_clk_stop_mode(slave);
+ } else {
+ if (slave->prop.clk_stop_mode1)
+ mode = SDW_CLK_STOP_MODE1;
+ else
+ mode = SDW_CLK_STOP_MODE0;
+ }
+
+ return mode;
+}
+
+static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
+ enum sdw_clk_stop_mode mode,
+ enum sdw_clk_stop_type type)
+{
+ int ret;
+
+ if (slave->ops && slave->ops->clk_stop) {
+ ret = slave->ops->clk_stop(slave, mode, type);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "Clk Stop type =%d failed: %d\n", type, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
+ enum sdw_clk_stop_mode mode,
+ bool prepare)
+{
+ bool wake_en;
+ u32 val = 0;
+ int ret;
+
+ wake_en = slave->prop.wake_capable;
+
+ if (prepare) {
+ val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
+
+ if (mode == SDW_CLK_STOP_MODE1)
+ val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
+
+ if (wake_en)
+ val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
+ } else {
+ val = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
+
+ val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
+ }
+
+ ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
+
+ if (ret != 0)
+ dev_err(&slave->dev,
+ "Clock Stop prepare failed for slave: %d", ret);
+
+ return ret;
+}
+
+static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num)
+{
+ int retry = bus->clk_stop_timeout;
+ int val;
+
+ do {
+ val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT) &
+ SDW_SCP_STAT_CLK_STP_NF;
+ if (!val) {
+ dev_info(bus->dev, "clock stop prep/de-prep done slave:%d",
+ dev_num);
+ return 0;
+ }
+
+ usleep_range(1000, 1500);
+ retry--;
+ } while (retry);
+
+ dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d",
+ dev_num);
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
+ *
+ * @bus: SDW bus instance
+ *
+ * Query Slave for clock stop mode and prepare for that mode.
+ */
+int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
+{
+ enum sdw_clk_stop_mode slave_mode;
+ bool simple_clk_stop = true;
+ struct sdw_slave *slave;
+ bool is_slave = false;
+ int ret = 0;
+
+ /*
+ * In order to save on transition time, prepare
+ * each Slave and then wait for all Slave(s) to be
+ * prepared for clock stop.
+ */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num)
+ continue;
+
+ /* Identify if Slave(s) are available on Bus */
+ is_slave = true;
+
+ if (slave->status != SDW_SLAVE_ATTACHED &&
+ slave->status != SDW_SLAVE_ALERT)
+ continue;
+
+ slave_mode = sdw_get_clk_stop_mode(slave);
+ slave->curr_clk_stop_mode = slave_mode;
+
+ ret = sdw_slave_clk_stop_callback(slave, slave_mode,
+ SDW_CLK_PRE_PREPARE);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "pre-prepare failed:%d", ret);
+ return ret;
+ }
+
+ ret = sdw_slave_clk_stop_prepare(slave,
+ slave_mode, true);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "pre-prepare failed:%d", ret);
+ return ret;
+ }
+
+ if (slave_mode == SDW_CLK_STOP_MODE1)
+ simple_clk_stop = false;
+ }
+
+ if (is_slave && !simple_clk_stop) {
+ ret = sdw_bus_wait_for_clk_prep_deprep(bus,
+ SDW_BROADCAST_DEV_NUM);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Inform slaves that prep is done */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num)
+ continue;
+
+ if (slave->status != SDW_SLAVE_ATTACHED &&
+ slave->status != SDW_SLAVE_ALERT)
+ continue;
+
+ slave_mode = slave->curr_clk_stop_mode;
+
+ if (slave_mode == SDW_CLK_STOP_MODE1) {
+ ret = sdw_slave_clk_stop_callback(slave,
+ slave_mode,
+ SDW_CLK_POST_PREPARE);
+
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "post-prepare failed:%d", ret);
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
+
+/**
+ * sdw_bus_clk_stop: stop bus clock
+ *
+ * @bus: SDW bus instance
+ *
+ * After preparing the Slaves for clock stop, stop the clock by broadcasting
+ * write to SCP_CTRL register.
+ */
+int sdw_bus_clk_stop(struct sdw_bus *bus)
+{
+ int ret;
+
+ /*
+ * broadcast clock stop now, attached Slaves will ACK this,
+ * unattached will ignore
+ */
+ ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
+ SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
+ if (ret < 0) {
+ if (ret == -ENODATA)
+ dev_dbg(bus->dev,
+ "ClockStopNow Broadcast msg ignored %d", ret);
+ else
+ dev_err(bus->dev,
+ "ClockStopNow Broadcast msg failed %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_bus_clk_stop);
+
+/**
+ * sdw_bus_exit_clk_stop: Exit clock stop mode
+ *
+ * @bus: SDW bus instance
+ *
+ * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
+ * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
+ * back.
+ */
+int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
+{
+ enum sdw_clk_stop_mode mode;
+ bool simple_clk_stop = true;
+ struct sdw_slave *slave;
+ bool is_slave = false;
+ int ret;
+
+ /*
+ * In order to save on transition time, de-prepare
+ * each Slave and then wait for all Slave(s) to be
+ * de-prepared after clock resume.
+ */
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num)
+ continue;
+
+ /* Identify if Slave(s) are available on Bus */
+ is_slave = true;
+
+ if (slave->status != SDW_SLAVE_ATTACHED &&
+ slave->status != SDW_SLAVE_ALERT)
+ continue;
+
+ mode = slave->curr_clk_stop_mode;
+
+ if (mode == SDW_CLK_STOP_MODE1) {
+ simple_clk_stop = false;
+ continue;
+ }
+
+ ret = sdw_slave_clk_stop_callback(slave, mode,
+ SDW_CLK_PRE_DEPREPARE);
+ if (ret < 0)
+ dev_warn(&slave->dev,
+ "clk stop deprep failed:%d", ret);
+
+ ret = sdw_slave_clk_stop_prepare(slave, mode,
+ false);
+
+ if (ret < 0)
+ dev_warn(&slave->dev,
+ "clk stop deprep failed:%d", ret);
+ }
+
+ if (is_slave && !simple_clk_stop)
+ sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
+
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (!slave->dev_num)
+ continue;
+
+ if (slave->status != SDW_SLAVE_ATTACHED &&
+ slave->status != SDW_SLAVE_ALERT)
+ continue;
+
+ mode = slave->curr_clk_stop_mode;
+ sdw_slave_clk_stop_callback(slave, mode,
+ SDW_CLK_POST_DEPREPARE);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
+
int sdw_configure_dpn_intr(struct sdw_slave *slave,
int port, bool enable, int mask)
{
@@ -672,13 +1056,10 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
- if (ret < 0) {
+ if (ret < 0)
dev_err(slave->bus->dev,
"SDW_DP0_INTMASK read failed:%d\n", ret);
- return val;
- }
-
- return 0;
+ return ret;
}
static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
@@ -831,12 +1212,19 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
+ ret = pm_runtime_get_sync(&slave->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
+ pm_runtime_put_noidle(slave->bus->dev);
+ return ret;
+ }
+
/* Read Instat 1, Instat 2 and Instat 3 registers */
ret = sdw_read(slave, SDW_SCP_INT1);
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT1 read failed:%d\n", ret);
- return ret;
+ goto io_err;
}
buf = ret;
@@ -844,7 +1232,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT2/3 read failed:%d\n", ret);
- return ret;
+ goto io_err;
}
do {
@@ -924,7 +1312,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT1 write failed:%d\n", ret);
- return ret;
+ goto io_err;
}
/*
@@ -935,7 +1323,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT1 read failed:%d\n", ret);
- return ret;
+ goto io_err;
}
_buf = ret;
@@ -943,7 +1331,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (ret < 0) {
dev_err(slave->bus->dev,
"SDW_SCP_INT2/3 read failed:%d\n", ret);
- return ret;
+ goto io_err;
}
/* Make sure no interrupts are pending */
@@ -964,16 +1352,39 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
if (count == SDW_READ_INTR_CLEAR_RETRY)
dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read\n");
+io_err:
+ pm_runtime_mark_last_busy(&slave->dev);
+ pm_runtime_put_autosuspend(&slave->dev);
+
return ret;
}
static int sdw_update_slave_status(struct sdw_slave *slave,
enum sdw_slave_status status)
{
- if (slave->ops && slave->ops->update_status)
- return slave->ops->update_status(slave, status);
+ unsigned long time;
- return 0;
+ if (!slave->probed) {
+ /*
+ * the slave status update is typically handled in an
+ * interrupt thread, which can race with the driver
+ * probe, e.g. when a module needs to be loaded.
+ *
+ * make sure the probe is complete before updating
+ * status.
+ */
+ time = wait_for_completion_timeout(&slave->probe_complete,
+ msecs_to_jiffies(DEFAULT_PROBE_TIMEOUT));
+ if (!time) {
+ dev_err(&slave->dev, "Probe not complete, timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (!slave->ops || !slave->ops->update_status)
+ return 0;
+
+ return slave->ops->update_status(slave, status);
}
/**
@@ -986,6 +1397,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
{
enum sdw_slave_status prev_status;
struct sdw_slave *slave;
+ bool attached_initializing;
int i, ret = 0;
/* first check if any Slaves fell off the bus */
@@ -1031,6 +1443,8 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
if (!slave)
continue;
+ attached_initializing = false;
+
switch (status[i]) {
case SDW_SLAVE_UNATTACHED:
if (slave->status == SDW_SLAVE_UNATTACHED)
@@ -1057,6 +1471,8 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
if (prev_status == SDW_SLAVE_ALERT)
break;
+ attached_initializing = true;
+
ret = sdw_initialize_slave(slave);
if (ret)
dev_err(bus->dev,
@@ -1075,8 +1491,37 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
if (ret)
dev_err(slave->bus->dev,
"Update Slave status failed:%d\n", ret);
+ if (attached_initializing)
+ complete(&slave->initialization_complete);
}
return ret;
}
EXPORT_SYMBOL(sdw_handle_slave_status);
+
+void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
+{
+ struct sdw_slave *slave;
+ int i;
+
+ /* Check all non-zero devices */
+ for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+ mutex_lock(&bus->bus_lock);
+ if (test_bit(i, bus->assigned) == false) {
+ mutex_unlock(&bus->bus_lock);
+ continue;
+ }
+ mutex_unlock(&bus->bus_lock);
+
+ slave = sdw_get_slave(bus, i);
+ if (!slave)
+ continue;
+
+ if (slave->status != SDW_SLAVE_UNATTACHED)
+ sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+
+ /* keep track of request, used in pm_runtime resume */
+ slave->unattach_request = request;
+ }
+}
+EXPORT_SYMBOL(sdw_clear_slave_status);
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index cb482da914da..204204a26db8 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -5,6 +5,7 @@
#define __SDW_BUS_H
#define DEFAULT_BANK_SWITCH_TIMEOUT 3000
+#define DEFAULT_PROBE_TIMEOUT 2000
#if IS_ENABLED(CONFIG_ACPI)
int sdw_acpi_find_slaves(struct sdw_bus *bus);
@@ -164,4 +165,12 @@ sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
return sdw_write(slave, addr, tmp);
}
+/*
+ * At the moment we only track Master-initiated hw_reset.
+ * Additional fields can be added as needed
+ */
+#define SDW_UNATTACH_REQUEST_MASTER_RESET BIT(0)
+
+void sdw_clear_slave_status(struct sdw_bus *bus, u32 request);
+
#endif /* __SDW_BUS_H */
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 4a465f55039f..17f096dd6806 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -110,6 +110,11 @@ static int sdw_drv_probe(struct device *dev)
slave->bus->clk_stop_timeout = max_t(u32, slave->bus->clk_stop_timeout,
slave->prop.clk_stop_timeout);
+ slave->probed = true;
+ complete(&slave->probe_complete);
+
+ dev_dbg(dev, "probe complete\n");
+
return 0;
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 9bec270d0fa4..ecd357d1c63d 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -183,7 +183,6 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_PDI_CONFIG_PORT GENMASK(4, 0)
/* Driver defaults */
-#define CDNS_DEFAULT_SSP_INTERVAL 0x18
#define CDNS_TX_TIMEOUT 2000
#define CDNS_SCP_RX_FIFOLEVEL 0x2
@@ -211,34 +210,45 @@ static inline void cdns_updatel(struct sdw_cdns *cdns,
cdns_writel(cdns, offset, tmp);
}
-static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
+static int cdns_set_wait(struct sdw_cdns *cdns, int offset, u32 mask, u32 value)
{
int timeout = 10;
u32 reg_read;
- writel(value, cdns->registers + offset);
-
- /* Wait for bit to be self cleared */
+ /* Wait for bit to be set */
do {
reg_read = readl(cdns->registers + offset);
- if ((reg_read & value) == 0)
+ if ((reg_read & mask) == value)
return 0;
timeout--;
- udelay(50);
+ usleep_range(50, 100);
} while (timeout != 0);
- return -EAGAIN;
+ return -ETIMEDOUT;
+}
+
+static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
+{
+ writel(value, cdns->registers + offset);
+
+ /* Wait for bit to be self cleared */
+ return cdns_set_wait(cdns, offset, value, 0);
}
/*
* all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL
* need to be confirmed with a write to MCP_CONFIG_UPDATE
*/
-static int cdns_update_config(struct sdw_cdns *cdns)
+static int cdns_config_update(struct sdw_cdns *cdns)
{
int ret;
+ if (sdw_cdns_is_clock_stop(cdns)) {
+ dev_err(cdns->dev, "Cannot program MCP_CONFIG_UPDATE in ClockStopMode\n");
+ return -EINVAL;
+ }
+
ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
CDNS_MCP_CONFIG_UPDATE_BIT);
if (ret < 0)
@@ -832,17 +842,36 @@ int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
CDNS_MCP_CONTROL_HW_RST,
CDNS_MCP_CONTROL_HW_RST);
- /* enable bus operations with clock and data */
- cdns_updatel(cdns, CDNS_MCP_CONFIG,
- CDNS_MCP_CONFIG_OP,
- CDNS_MCP_CONFIG_OP_NORMAL);
-
/* commit changes */
- return cdns_update_config(cdns);
+ cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+
+ /* don't wait here */
+ return 0;
+
}
EXPORT_SYMBOL(sdw_cdns_exit_reset);
/**
+ * sdw_cdns_enable_slave_interrupt() - Enable SDW slave interrupts
+ * @cdns: Cadence instance
+ * @state: boolean for true/false
+ */
+static void cdns_enable_slave_interrupts(struct sdw_cdns *cdns, bool state)
+{
+ u32 mask;
+
+ mask = cdns_readl(cdns, CDNS_MCP_INTMASK);
+ if (state)
+ mask |= CDNS_MCP_INT_SLAVE_MASK;
+ else
+ mask &= ~CDNS_MCP_INT_SLAVE_MASK;
+
+ cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
+}
+
+/**
* sdw_cdns_enable_interrupt() - Enable SDW interrupts
* @cdns: Cadence instance
* @state: True if we are trying to enable interrupt.
@@ -1014,26 +1043,13 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
return val;
}
-/**
- * sdw_cdns_init() - Cadence initialization
- * @cdns: Cadence instance
- */
-int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
+static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
{
struct sdw_bus *bus = &cdns->bus;
struct sdw_master_prop *prop = &bus->prop;
u32 val;
+ u32 ssp_interval;
int divider;
- int ret;
-
- if (clock_stop_exit) {
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CLK_STOP_CLR);
- if (ret < 0) {
- dev_err(cdns->dev, "Couldn't exit from clock stop\n");
- return ret;
- }
- }
/* Set clock divider */
divider = (prop->mclk_freq / prop->max_clk_freq) - 1;
@@ -1052,8 +1068,23 @@ int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
cdns_writel(cdns, CDNS_MCP_FRAME_SHAPE_INIT, val);
/* Set SSP interval to default value */
- cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL);
- cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+ ssp_interval = prop->default_frame_rate / SDW_CADENCE_GSYNC_HZ;
+ cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, ssp_interval);
+ cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, ssp_interval);
+}
+
+/**
+ * sdw_cdns_init() - Cadence initialization
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_init(struct sdw_cdns *cdns)
+{
+ u32 val;
+
+ cdns_init_clock_ctrl(cdns);
+
+ /* reset msg_count to default value of FIFOLEVEL */
+ cdns->msg_count = cdns_readl(cdns, CDNS_MCP_FIFOLEVEL);
/* flush command FIFOs */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST,
@@ -1066,25 +1097,31 @@ int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
/* Configure mcp config */
val = cdns_readl(cdns, CDNS_MCP_CONFIG);
- /* Set Max cmd retry to 15 */
- val |= CDNS_MCP_CONFIG_MCMD_RETRY;
+ /* enable bus operations with clock and data */
+ val &= ~CDNS_MCP_CONFIG_OP;
+ val |= CDNS_MCP_CONFIG_OP_NORMAL;
- /* Set frame delay between PREQ and ping frame to 15 frames */
- val |= 0xF << SDW_REG_SHIFT(CDNS_MCP_CONFIG_MPREQ_DELAY);
+ /* Set cmd mode for Tx and Rx cmds */
+ val &= ~CDNS_MCP_CONFIG_CMD;
+
+ /* Disable sniffer mode */
+ val &= ~CDNS_MCP_CONFIG_SNIFFER;
/* Disable auto bus release */
val &= ~CDNS_MCP_CONFIG_BUS_REL;
- /* Disable sniffer mode */
- val &= ~CDNS_MCP_CONFIG_SNIFFER;
+ if (cdns->bus.multi_link)
+ /* Set Multi-master mode to take gsync into account */
+ val |= CDNS_MCP_CONFIG_MMASTER;
- /* Set cmd mode for Tx and Rx cmds */
- val &= ~CDNS_MCP_CONFIG_CMD;
+ /* leave frame delay to hardware default of 0x1F */
+
+ /* leave command retry to hardware default of 0 */
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
- /* commit changes */
- return cdns_update_config(cdns);
+ /* changes will be committed later */
+ return 0;
}
EXPORT_SYMBOL(sdw_cdns_init);
@@ -1218,6 +1255,166 @@ static const struct sdw_master_port_ops cdns_port_ops = {
};
/**
+ * sdw_cdns_is_clock_stop: Check clock status
+ *
+ * @cdns: Cadence instance
+ */
+bool sdw_cdns_is_clock_stop(struct sdw_cdns *cdns)
+{
+ return !!(cdns_readl(cdns, CDNS_MCP_STAT) & CDNS_MCP_STAT_CLK_STOP);
+}
+EXPORT_SYMBOL(sdw_cdns_is_clock_stop);
+
+/**
+ * sdw_cdns_clock_stop: Cadence clock stop configuration routine
+ *
+ * @cdns: Cadence instance
+ * @block_wake: prevent wakes if required by the platform
+ */
+int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
+{
+ bool slave_present = false;
+ struct sdw_slave *slave;
+ int ret;
+
+ /* Check suspend status */
+ if (sdw_cdns_is_clock_stop(cdns)) {
+ dev_dbg(cdns->dev, "Clock is already stopped\n");
+ return 0;
+ }
+
+ /*
+ * Before entering clock stop we mask the Slave
+ * interrupts. This helps avoid having to deal with e.g. a
+ * Slave becoming UNATTACHED while the clock is being stopped
+ */
+ cdns_enable_slave_interrupts(cdns, false);
+
+ /*
+ * For specific platforms, it is required to be able to put
+ * master into a state in which it ignores wake-up trials
+ * in clock stop state
+ */
+ if (block_wake)
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_BLOCK_WAKEUP,
+ CDNS_MCP_CONTROL_BLOCK_WAKEUP);
+
+ list_for_each_entry(slave, &cdns->bus.slaves, node) {
+ if (slave->status == SDW_SLAVE_ATTACHED ||
+ slave->status == SDW_SLAVE_ALERT) {
+ slave_present = true;
+ break;
+ }
+ }
+
+ /*
+ * This CMD_ACCEPT should be used when there are no devices
+ * attached on the link when entering clock stop mode. If this is
+ * not set and there is a broadcast write then the command ignored
+ * will be treated as a failure
+ */
+ if (!slave_present)
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CMD_ACCEPT,
+ CDNS_MCP_CONTROL_CMD_ACCEPT);
+ else
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CMD_ACCEPT, 0);
+
+ /* commit changes */
+ ret = cdns_config_update(cdns);
+ if (ret < 0) {
+ dev_err(cdns->dev, "%s: config_update failed\n", __func__);
+ return ret;
+ }
+
+ /* Prepare slaves for clock stop */
+ ret = sdw_bus_prep_clk_stop(&cdns->bus);
+ if (ret < 0) {
+ dev_err(cdns->dev, "prepare clock stop failed %d", ret);
+ return ret;
+ }
+
+ /*
+ * Enter clock stop mode and only report errors if there are
+ * Slave devices present (ALERT or ATTACHED)
+ */
+ ret = sdw_bus_clk_stop(&cdns->bus);
+ if (ret < 0 && slave_present && ret != -ENODATA) {
+ dev_err(cdns->dev, "bus clock stop failed %d", ret);
+ return ret;
+ }
+
+ ret = cdns_set_wait(cdns, CDNS_MCP_STAT,
+ CDNS_MCP_STAT_CLK_STOP,
+ CDNS_MCP_STAT_CLK_STOP);
+ if (ret < 0)
+ dev_err(cdns->dev, "Clock stop failed %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_cdns_clock_stop);
+
+/**
+ * sdw_cdns_clock_restart: Cadence PM clock restart configuration routine
+ *
+ * @cdns: Cadence instance
+ * @bus_reset: context may be lost while in low power modes and the bus
+ * may require a Severe Reset and re-enumeration after a wake.
+ */
+int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset)
+{
+ int ret;
+
+ /* unmask Slave interrupts that were masked when stopping the clock */
+ cdns_enable_slave_interrupts(cdns, true);
+
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CLK_STOP_CLR);
+ if (ret < 0) {
+ dev_err(cdns->dev, "Couldn't exit from clock stop\n");
+ return ret;
+ }
+
+ ret = cdns_set_wait(cdns, CDNS_MCP_STAT, CDNS_MCP_STAT_CLK_STOP, 0);
+ if (ret < 0) {
+ dev_err(cdns->dev, "clock stop exit failed %d\n", ret);
+ return ret;
+ }
+
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_BLOCK_WAKEUP, 0);
+
+ /*
+ * clear CMD_ACCEPT so that the command ignored
+ * will be treated as a failure during a broadcast write
+ */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, 0);
+
+ if (!bus_reset) {
+
+ /* enable bus operations with clock and data */
+ cdns_updatel(cdns, CDNS_MCP_CONFIG,
+ CDNS_MCP_CONFIG_OP,
+ CDNS_MCP_CONFIG_OP_NORMAL);
+
+ ret = cdns_config_update(cdns);
+ if (ret < 0) {
+ dev_err(cdns->dev, "%s: config_update failed\n", __func__);
+ return ret;
+ }
+
+ ret = sdw_bus_exit_clk_stop(&cdns->bus);
+ if (ret < 0)
+ dev_err(cdns->dev, "bus failed to exit clock stop %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_cdns_clock_restart);
+
+/**
* sdw_cdns_probe() - Cadence probe routine
* @cdns: Cadence instance
*/
@@ -1306,6 +1503,7 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
cdns_updatel(cdns, offset, CDNS_PORTCTRL_DIRN, val);
val = pdi->num;
+ val |= CDNS_PDI_CONFIG_SOFT_RESET;
val |= ((1 << ch) - 1) << SDW_REG_SHIFT(CDNS_PDI_CONFIG_CHANNEL);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
}
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 001457cbe5ad..b410656f8194 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -5,6 +5,9 @@
#ifndef __SDW_CADENCE_H
#define __SDW_CADENCE_H
+#define SDW_CADENCE_GSYNC_KHZ 4 /* 4 kHz */
+#define SDW_CADENCE_GSYNC_HZ (SDW_CADENCE_GSYNC_KHZ * 1000)
+
/**
* struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
*
@@ -138,30 +141,26 @@ extern struct sdw_master_ops sdw_cdns_master_ops;
irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
-int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit);
+int sdw_cdns_init(struct sdw_cdns *cdns);
int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config);
int sdw_cdns_exit_reset(struct sdw_cdns *cdns);
int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state);
+bool sdw_cdns_is_clock_stop(struct sdw_cdns *cdns);
+int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake);
+int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset);
+
#ifdef CONFIG_DEBUG_FS
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
#endif
-int sdw_cdns_get_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- u32 ch, u32 dir);
struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
u32 ch, u32 dir, int dai_id);
void sdw_cdns_config_stream(struct sdw_cdns *cdns,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
-int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai,
- void *stream, int direction);
-int sdw_cdns_pdm_set_stream(struct snd_soc_dai *dai,
- void *stream, int direction);
-
enum sdw_command_response
cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 06ef3a3ac080..3c83e76c6bf9 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -103,7 +103,7 @@ enum intel_pdi_type {
struct sdw_intel {
struct sdw_cdns cdns;
int instance;
- struct sdw_intel_link_res *res;
+ struct sdw_intel_link_res *link_res;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
@@ -193,8 +193,8 @@ static ssize_t intel_sprintf(void __iomem *mem, bool l,
static int intel_reg_show(struct seq_file *s_file, void *data)
{
struct sdw_intel *sdw = s_file->private;
- void __iomem *s = sdw->res->shim;
- void __iomem *a = sdw->res->alh;
+ void __iomem *s = sdw->link_res->shim;
+ void __iomem *a = sdw->link_res->alh;
char *buf;
ssize_t ret;
int i, j;
@@ -289,7 +289,7 @@ static void intel_debugfs_exit(struct sdw_intel *sdw) {}
static int intel_link_power_up(struct sdw_intel *sdw)
{
unsigned int link_id = sdw->instance;
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
int spa_mask, cpa_mask;
int link_control, ret;
@@ -309,7 +309,7 @@ static int intel_link_power_up(struct sdw_intel *sdw)
static int intel_shim_init(struct sdw_intel *sdw)
{
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int sync_reg, ret;
u16 ioctl = 0, act = 0;
@@ -370,7 +370,7 @@ static int intel_shim_init(struct sdw_intel *sdw)
static void intel_pdi_init(struct sdw_intel *sdw,
struct sdw_cdns_stream_config *config)
{
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int pcm_cap, pdm_cap;
@@ -404,7 +404,7 @@ static void intel_pdi_init(struct sdw_intel *sdw,
static int
intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
{
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int count;
@@ -476,7 +476,7 @@ static int intel_pdi_ch_update(struct sdw_intel *sdw)
static void
intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
{
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int pdi_conf = 0;
@@ -508,7 +508,7 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
static void
intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
{
- void __iomem *alh = sdw->res->alh;
+ void __iomem *alh = sdw->link_res->alh;
unsigned int link_id = sdw->instance;
unsigned int conf;
@@ -535,7 +535,7 @@ static int intel_params_stream(struct sdw_intel *sdw,
struct snd_pcm_hw_params *hw_params,
int link_id, int alh_stream_id)
{
- struct sdw_intel_link_res *res = sdw->res;
+ struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_intel_stream_params_data params_data;
params_data.substream = substream;
@@ -550,6 +550,25 @@ static int intel_params_stream(struct sdw_intel *sdw,
return -EIO;
}
+static int intel_free_stream(struct sdw_intel *sdw,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ int link_id)
+{
+ struct sdw_intel_link_res *res = sdw->link_res;
+ struct sdw_intel_stream_free_data free_data;
+
+ free_data.substream = substream;
+ free_data.dai = dai;
+ free_data.link_id = link_id;
+
+ if (res->ops && res->ops->free_stream && res->dev)
+ return res->ops->free_stream(res->dev,
+ &free_data);
+
+ return 0;
+}
+
/*
* bank switch routines
*/
@@ -558,7 +577,7 @@ static int intel_pre_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
int sync_reg;
/* Write to register only for multi-link */
@@ -577,7 +596,7 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- void __iomem *shim = sdw->res->shim;
+ void __iomem *shim = sdw->link_res->shim;
int sync_reg, ret;
/* Write to register only for multi-link */
@@ -617,6 +636,68 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
* DAI routines
*/
+static int sdw_stream_setup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdw_stream_runtime *sdw_stream = NULL;
+ char *name;
+ int i, ret;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ name = kasprintf(GFP_KERNEL, "%s-Playback", dai->name);
+ else
+ name = kasprintf(GFP_KERNEL, "%s-Capture", dai->name);
+
+ if (!name)
+ return -ENOMEM;
+
+ sdw_stream = sdw_alloc_stream(name);
+ if (!sdw_stream) {
+ dev_err(dai->dev, "alloc stream failed for DAI %s", dai->name);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Set stream pointer on CPU DAI */
+ ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
+ if (ret < 0) {
+ dev_err(dai->dev, "failed to set stream pointer on cpu dai %s",
+ dai->name);
+ goto release_stream;
+ }
+
+ /* Set stream pointer on all CODEC DAIs */
+ for (i = 0; i < rtd->num_codecs; i++) {
+ ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sdw_stream,
+ substream->stream);
+ if (ret < 0) {
+ dev_err(dai->dev, "failed to set stream pointer on codec dai %s",
+ rtd->codec_dais[i]->name);
+ goto release_stream;
+ }
+ }
+
+ return 0;
+
+release_stream:
+ sdw_release_stream(sdw_stream);
+error:
+ kfree(name);
+ return ret;
+}
+
+static int intel_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ /*
+ * TODO: add pm_runtime support here, the startup callback
+ * will make sure the IP is 'active'
+ */
+
+ return sdw_stream_setup(substream, dai);
+}
+
static int intel_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -699,10 +780,63 @@ error:
return ret;
}
+static int intel_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_cdns_dma_data *dma;
+
+ dma = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dma) {
+ dev_err(dai->dev, "failed to get dma data in %s",
+ __func__);
+ return -EIO;
+ }
+
+ return sdw_prepare_stream(dma->stream);
+}
+
+static int intel_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_cdns_dma_data *dma;
+ int ret;
+
+ dma = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dma) {
+ dev_err(dai->dev, "failed to get dma data in %s", __func__);
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ ret = sdw_enable_stream(dma->stream);
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ ret = sdw_disable_stream(dma->stream);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ dev_err(dai->dev,
+ "%s trigger %d failed: %d",
+ __func__, cmd, ret);
+ return ret;
+}
+
static int
intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dma_data *dma;
int ret;
@@ -710,12 +844,29 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
if (!dma)
return -EIO;
+ ret = sdw_deprepare_stream(dma->stream);
+ if (ret) {
+ dev_err(dai->dev, "sdw_deprepare_stream: failed %d", ret);
+ return ret;
+ }
+
ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dai->dev, "remove master from stream %s failed: %d\n",
dma->stream->name, ret);
+ return ret;
+ }
- return ret;
+ ret = intel_free_stream(sdw, substream, dai, sdw->instance);
+ if (ret < 0) {
+ dev_err(dai->dev, "intel_free_stream: failed %d", ret);
+ return ret;
+ }
+
+ kfree(dma->stream->name);
+ sdw_release_stream(dma->stream);
+
+ return 0;
}
static void intel_shutdown(struct snd_pcm_substream *substream,
@@ -744,14 +895,20 @@ static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
}
static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
+ .startup = intel_startup,
.hw_params = intel_hw_params,
+ .prepare = intel_prepare,
+ .trigger = intel_trigger,
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pcm_set_sdw_stream,
};
static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
+ .startup = intel_startup,
.hw_params = intel_hw_params,
+ .prepare = intel_prepare,
+ .trigger = intel_trigger,
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pdm_set_sdw_stream,
@@ -920,7 +1077,7 @@ static int intel_init(struct sdw_intel *sdw)
intel_link_power_up(sdw);
intel_shim_init(sdw);
- return sdw_cdns_init(&sdw->cdns, false);
+ return sdw_cdns_init(&sdw->cdns);
}
/*
@@ -937,9 +1094,9 @@ static int intel_probe(struct platform_device *pdev)
return -ENOMEM;
sdw->instance = pdev->id;
- sdw->res = dev_get_platdata(&pdev->dev);
+ sdw->link_res = dev_get_platdata(&pdev->dev);
sdw->cdns.dev = &pdev->dev;
- sdw->cdns.registers = sdw->res->registers;
+ sdw->cdns.registers = sdw->link_res->registers;
sdw->cdns.instance = sdw->instance;
sdw->cdns.msg_count = 0;
sdw->cdns.bus.dev = &pdev->dev;
@@ -979,11 +1136,12 @@ static int intel_probe(struct platform_device *pdev)
intel_pdi_ch_update(sdw);
/* Acquire IRQ */
- ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq, sdw_cdns_thread,
+ ret = request_threaded_irq(sdw->link_res->irq,
+ sdw_cdns_irq, sdw_cdns_thread,
IRQF_SHARED, KBUILD_MODNAME, &sdw->cdns);
if (ret < 0) {
dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n",
- sdw->res->irq);
+ sdw->link_res->irq);
goto err_init;
}
@@ -1013,7 +1171,7 @@ static int intel_probe(struct platform_device *pdev)
err_interrupt:
sdw_cdns_enable_interrupt(&sdw->cdns, false);
- free_irq(sdw->res->irq, sdw);
+ free_irq(sdw->link_res->irq, sdw);
err_init:
sdw_delete_bus_master(&sdw->cdns.bus);
return ret;
@@ -1028,7 +1186,7 @@ static int intel_remove(struct platform_device *pdev)
if (!sdw->cdns.bus.prop.hw_disabled) {
intel_debugfs_exit(sdw);
sdw_cdns_enable_interrupt(&sdw->cdns, false);
- free_irq(sdw->res->irq, sdw);
+ free_irq(sdw->link_res->irq, sdw);
snd_soc_unregister_component(sdw->cdns.dev);
}
sdw_delete_bus_master(&sdw->cdns.bus);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 1c6c6a2e0def..d6c9ad231873 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -588,12 +588,20 @@ static int qcom_swrm_set_sdw_stream(struct snd_soc_dai *dai,
return 0;
}
+static void *qcom_swrm_get_sdw_stream(struct snd_soc_dai *dai, int direction)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+
+ return ctrl->sruntime[dai->id];
+}
+
static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sdw_stream_runtime *sruntime;
+ struct snd_soc_dai *codec_dai;
int ret, i;
sruntime = sdw_alloc_stream(dai->name);
@@ -602,12 +610,12 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
ctrl->sruntime[dai->id] = sruntime;
- for (i = 0; i < rtd->num_codecs; i++) {
- ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sruntime,
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime,
substream->stream);
if (ret < 0 && ret != -ENOTSUPP) {
dev_err(dai->dev, "Failed to set sdw stream on %s",
- rtd->codec_dais[i]->name);
+ codec_dai->name);
sdw_release_stream(sruntime);
return ret;
}
@@ -631,6 +639,7 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
.startup = qcom_swrm_startup,
.shutdown = qcom_swrm_shutdown,
.set_sdw_stream = qcom_swrm_set_sdw_stream,
+ .get_sdw_stream = qcom_swrm_get_sdw_stream,
};
static const struct snd_soc_component_driver qcom_swrm_dai_component = {
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 19919975bb6d..aace57fae7f8 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -46,7 +46,11 @@ static int sdw_slave_add(struct sdw_bus *bus,
slave->dev.of_node = of_node_get(to_of_node(fwnode));
slave->bus = bus;
slave->status = SDW_SLAVE_UNATTACHED;
+ init_completion(&slave->enumeration_complete);
+ init_completion(&slave->initialization_complete);
slave->dev_num = 0;
+ init_completion(&slave->probe_complete);
+ slave->probed = false;
mutex_lock(&bus->bus_lock);
list_add_tail(&slave->node, &bus->slaves);
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 178ae92b8cc1..a9a72574b34a 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -167,13 +167,15 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
return ret;
}
- /* Program DPN_BlockCtrl1 register */
- ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1));
- if (ret < 0) {
- dev_err(&s_rt->slave->dev,
- "DPN_BlockCtrl1 register write failed for port %d\n",
- t_params->port_num);
- return ret;
+ if (!dpn_prop->read_only_wordlength) {
+ /* Program DPN_BlockCtrl1 register */
+ ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1));
+ if (ret < 0) {
+ dev_err(&s_rt->slave->dev,
+ "DPN_BlockCtrl1 register write failed for port %d\n",
+ t_params->port_num);
+ return ret;
+ }
}
/* Program DPN_SampleCtrl1 register */
@@ -313,9 +315,9 @@ static int sdw_enable_disable_slave_ports(struct sdw_bus *bus,
* it is safe to reset this register
*/
if (en)
- ret = sdw_update(s_rt->slave, addr, 0xFF, p_rt->ch_mask);
+ ret = sdw_write(s_rt->slave, addr, p_rt->ch_mask);
else
- ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0);
+ ret = sdw_write(s_rt->slave, addr, 0x0);
if (ret < 0)
dev_err(&s_rt->slave->dev,
@@ -464,10 +466,9 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
addr = SDW_DPN_PREPARECTRL(p_rt->num);
if (prep)
- ret = sdw_update(s_rt->slave, addr,
- 0xFF, p_rt->ch_mask);
+ ret = sdw_write(s_rt->slave, addr, p_rt->ch_mask);
else
- ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0);
+ ret = sdw_write(s_rt->slave, addr, 0x0);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
@@ -587,10 +588,11 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
if (slave->ops->bus_config) {
ret = slave->ops->bus_config(slave, &bus->params);
- if (ret < 0)
+ if (ret < 0) {
dev_err(bus->dev, "Notify Slave: %d failed\n",
slave->dev_num);
- return ret;
+ return ret;
+ }
}
}
@@ -602,13 +604,25 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
* and Slave(s)
*
* @bus: SDW bus instance
+ * @prepare: true if sdw_program_params() is called by _prepare.
*/
-static int sdw_program_params(struct sdw_bus *bus)
+static int sdw_program_params(struct sdw_bus *bus, bool prepare)
{
struct sdw_master_runtime *m_rt;
int ret = 0;
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+
+ /*
+ * this loop walks through all master runtimes for a
+ * bus, but the ports can only be configured while
+ * explicitly preparing a stream or handling an
+ * already-prepared stream otherwise.
+ */
+ if (!prepare &&
+ m_rt->stream->state == SDW_STREAM_CONFIGURED)
+ continue;
+
ret = sdw_program_port_params(m_rt);
if (ret < 0) {
dev_err(bus->dev,
@@ -1460,7 +1474,8 @@ static void sdw_release_bus_lock(struct sdw_stream_runtime *stream)
}
}
-static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
+static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
+ bool update_params)
{
struct sdw_master_runtime *m_rt;
struct sdw_bus *bus = NULL;
@@ -1480,6 +1495,9 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
+ if (!update_params)
+ goto program_params;
+
/* Increment cumulative bus bandwidth */
/* TODO: Update this during Device-Device support */
bus->params.bandwidth += m_rt->stream->params.rate *
@@ -1495,8 +1513,9 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
}
}
+program_params:
/* Program params */
- ret = sdw_program_params(bus);
+ ret = sdw_program_params(bus, true);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
goto restore_params;
@@ -1544,7 +1563,8 @@ restore_params:
*/
int sdw_prepare_stream(struct sdw_stream_runtime *stream)
{
- int ret = 0;
+ bool update_params = true;
+ int ret;
if (!stream) {
pr_err("SoundWire: Handle not found for stream\n");
@@ -1553,8 +1573,32 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
- ret = _sdw_prepare_stream(stream);
+ if (stream->state == SDW_STREAM_PREPARED) {
+ ret = 0;
+ goto state_err;
+ }
+
+ if (stream->state != SDW_STREAM_CONFIGURED &&
+ stream->state != SDW_STREAM_DEPREPARED &&
+ stream->state != SDW_STREAM_DISABLED) {
+ pr_err("%s: %s: inconsistent state state %d\n",
+ __func__, stream->name, stream->state);
+ ret = -EINVAL;
+ goto state_err;
+ }
+
+ /*
+ * when the stream is DISABLED, this means sdw_prepare_stream()
+ * is called as a result of an underflow or a resume operation.
+ * In this case, the bus parameters shall not be recomputed, but
+ * still need to be re-applied
+ */
+ if (stream->state == SDW_STREAM_DISABLED)
+ update_params = false;
+
+ ret = _sdw_prepare_stream(stream, update_params);
+state_err:
sdw_release_bus_lock(stream);
return ret;
}
@@ -1571,7 +1615,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
bus = m_rt->bus;
/* Program params */
- ret = sdw_program_params(bus);
+ ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
@@ -1619,8 +1663,17 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
+ if (stream->state != SDW_STREAM_PREPARED &&
+ stream->state != SDW_STREAM_DISABLED) {
+ pr_err("%s: %s: inconsistent state state %d\n",
+ __func__, stream->name, stream->state);
+ ret = -EINVAL;
+ goto state_err;
+ }
+
ret = _sdw_enable_stream(stream);
+state_err:
sdw_release_bus_lock(stream);
return ret;
}
@@ -1647,7 +1700,7 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
struct sdw_bus *bus = m_rt->bus;
/* Program params */
- ret = sdw_program_params(bus);
+ ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
@@ -1693,8 +1746,16 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
sdw_acquire_bus_lock(stream);
+ if (stream->state != SDW_STREAM_ENABLED) {
+ pr_err("%s: %s: inconsistent state state %d\n",
+ __func__, stream->name, stream->state);
+ ret = -EINVAL;
+ goto state_err;
+ }
+
ret = _sdw_disable_stream(stream);
+state_err:
sdw_release_bus_lock(stream);
return ret;
}
@@ -1721,7 +1782,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
m_rt->ch_count * m_rt->stream->params.bps;
/* Program params */
- ret = sdw_program_params(bus);
+ ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
return ret;
@@ -1749,8 +1810,18 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
}
sdw_acquire_bus_lock(stream);
+
+ if (stream->state != SDW_STREAM_PREPARED &&
+ stream->state != SDW_STREAM_DISABLED) {
+ pr_err("%s: %s: inconsistent state state %d\n",
+ __func__, stream->name, stream->state);
+ ret = -EINVAL;
+ goto state_err;
+ }
+
ret = _sdw_deprepare_stream(stream);
+state_err:
sdw_release_bus_lock(stream);
return ret;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index efce98e9844e..741b9140992a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -575,7 +575,7 @@ config SPI_PPC4xx
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
- depends on (ARCH_PXA || ARCH_MMP || PCI || ACPI)
+ depends on ARCH_PXA || ARCH_MMP || PCI || ACPI || COMPILE_TEST
select PXA_SSP if ARCH_PXA || ARCH_MMP
help
This enables using a PXA2xx or Sodaville SSP port as a SPI master
diff --git a/drivers/staging/comedi/drivers/ni_routing/tools/.gitignore b/drivers/staging/comedi/drivers/ni_routing/tools/.gitignore
index ef38008280a9..e3ebffcd900e 100644
--- a/drivers/staging/comedi/drivers/ni_routing/tools/.gitignore
+++ b/drivers/staging/comedi/drivers/ni_routing/tools/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
comedi_h.py
*.pyc
ni_values.py
diff --git a/drivers/staging/greybus/tools/.gitignore b/drivers/staging/greybus/tools/.gitignore
index 023654c83068..1fd364aba774 100644
--- a/drivers/staging/greybus/tools/.gitignore
+++ b/drivers/staging/greybus/tools/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
loopback_test
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index d920256328c3..d30571663585 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -1,16 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
-#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
+#include <linux/miscdevice.h> /* for misc_register, and MISC_DYNAMIC_MINOR */
#include <linux/types.h>
#include <linux/uaccess.h>
#include "speakup.h"
#include "spk_priv.h"
-#ifndef SYNTH_MINOR
-#define SYNTH_MINOR 25
-#endif
-
static int misc_registered;
static int dev_opened;
@@ -67,7 +63,7 @@ static const struct file_operations synth_fops = {
};
static struct miscdevice synth_device = {
- .minor = SYNTH_MINOR,
+ .minor = MISC_DYNAMIC_MINOR,
.name = "synth",
.fops = &synth_fops,
};
@@ -81,7 +77,7 @@ void speakup_register_devsynth(void)
pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
} else {
pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
- MISC_MAJOR, SYNTH_MINOR);
+ MISC_MAJOR, synth_device.minor);
misc_registered = 1;
}
}
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 28cedaec6d8a..f591ec095582 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -10,7 +10,7 @@
*/
#include <linux/unistd.h>
-#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
+#include <linux/miscdevice.h> /* for misc_register, and MISC_DYNAMIC_MINOR */
#include <linux/poll.h> /* for poll_wait() */
/* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
@@ -20,8 +20,6 @@
#include "speakup.h"
#define DRV_VERSION "2.6"
-#define SOFTSYNTH_MINOR 26 /* might as well give it one more than /dev/synth */
-#define SOFTSYNTHU_MINOR 27 /* might as well give it one more than /dev/synth */
#define PROCSPEECH 0x0d
#define CLEAR_SYNTH 0x18
@@ -375,7 +373,7 @@ static int softsynth_probe(struct spk_synth *synth)
if (misc_registered != 0)
return 0;
memset(&synth_device, 0, sizeof(synth_device));
- synth_device.minor = SOFTSYNTH_MINOR;
+ synth_device.minor = MISC_DYNAMIC_MINOR;
synth_device.name = "softsynth";
synth_device.fops = &softsynth_fops;
if (misc_register(&synth_device)) {
@@ -384,7 +382,7 @@ static int softsynth_probe(struct spk_synth *synth)
}
memset(&synthu_device, 0, sizeof(synthu_device));
- synthu_device.minor = SOFTSYNTHU_MINOR;
+ synthu_device.minor = MISC_DYNAMIC_MINOR;
synthu_device.name = "softsynthu";
synthu_device.fops = &softsynthu_fops;
if (misc_register(&synthu_device)) {
@@ -393,8 +391,10 @@ static int softsynth_probe(struct spk_synth *synth)
}
misc_registered = 1;
- pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR 26)\n");
- pr_info("initialized device: /dev/softsynthu, node (MAJOR 10, MINOR 27)\n");
+ pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR %d)\n",
+ synth_device.minor);
+ pr_info("initialized device: /dev/softsynthu, node (MAJOR 10, MINOR %d)\n",
+ synthu_device.minor);
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 09e55ea0bf5d..59379d662626 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4301,30 +4301,37 @@ int iscsit_close_connection(
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
iscsit_close_session(sess);
return 0;
} else if (atomic_read(&sess->session_logout)) {
pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
sess->session_state = TARG_SESS_STATE_FREE;
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
} else {
pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
sess->session_state = TARG_SESS_STATE_FAILED;
- if (!atomic_read(&sess->session_continuation)) {
- spin_unlock_bh(&sess->conn_lock);
+ if (!atomic_read(&sess->session_continuation))
iscsit_start_time2retain_handler(sess);
- } else
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
}
@@ -4368,8 +4375,7 @@ int iscsit_close_session(struct iscsi_session *sess)
* restart the timer and exit.
*/
if (!in_interrupt()) {
- if (iscsit_check_session_usage_count(sess) == 1)
- iscsit_stop_session(sess, 1, 1);
+ iscsit_check_session_usage_count(sess);
} else {
if (iscsit_check_session_usage_count(sess) == 2) {
atomic_set(&sess->session_logout, 0);
@@ -4430,9 +4436,9 @@ static void iscsit_logout_post_handler_closesession(
complete(&conn->conn_logout_comp);
iscsit_dec_conn_usage_count(conn);
+ atomic_set(&sess->session_close, 1);
iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
}
static void iscsit_logout_post_handler_samecid(
@@ -4567,49 +4573,6 @@ void iscsit_fail_session(struct iscsi_session *sess)
sess->session_state = TARG_SESS_STATE_FAILED;
}
-int iscsit_free_session(struct iscsi_session *sess)
-{
- u16 conn_count = atomic_read(&sess->nconn);
- struct iscsi_conn *conn, *conn_tmp = NULL;
- int is_last;
-
- spin_lock_bh(&sess->conn_lock);
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
-
- list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
- conn_list) {
- if (conn_count == 0)
- break;
-
- if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
- is_last = 1;
- } else {
- iscsit_inc_conn_usage_count(conn_tmp);
- is_last = 0;
- }
- iscsit_inc_conn_usage_count(conn);
-
- spin_unlock_bh(&sess->conn_lock);
- iscsit_cause_connection_reinstatement(conn, 1);
- spin_lock_bh(&sess->conn_lock);
-
- iscsit_dec_conn_usage_count(conn);
- if (is_last == 0)
- iscsit_dec_conn_usage_count(conn_tmp);
-
- conn_count--;
- }
-
- if (atomic_read(&sess->nconn)) {
- spin_unlock_bh(&sess->conn_lock);
- wait_for_completion(&sess->session_wait_comp);
- } else
- spin_unlock_bh(&sess->conn_lock);
-
- iscsit_close_session(sess);
- return 0;
-}
-
void iscsit_stop_session(
struct iscsi_session *sess,
int session_sleep,
@@ -4620,8 +4583,6 @@ void iscsit_stop_session(
int is_last;
spin_lock_bh(&sess->conn_lock);
- if (session_sleep)
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
if (connection_sleep) {
list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
@@ -4679,12 +4640,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
continue;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
list_move_tail(&se_sess->sess_list, &free_list);
@@ -4694,7 +4658,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
- iscsit_free_session(sess);
+ list_del_init(&se_sess->sess_list);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
session_count++;
}
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index c95f56a3ce31..7409ce2a6607 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *);
extern int iscsit_close_connection(struct iscsi_conn *);
extern int iscsit_close_session(struct iscsi_session *);
extern void iscsit_fail_session(struct iscsi_session *);
-extern int iscsit_free_session(struct iscsi_session *);
extern void iscsit_stop_session(struct iscsi_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 42b369fc415e..0fa1d57b26fa 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1476,20 +1476,23 @@ static void lio_tpg_close_session(struct se_session *se_sess)
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
return;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
- iscsit_close_session(sess);
+ iscsit_dec_session_usage_count(sess);
}
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index f53330813207..731ee67fe914 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -156,6 +156,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess_p->conn_lock);
continue;
@@ -166,6 +167,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
atomic_set(&sess_p->session_fall_back_to_erl0, 1);
+ atomic_set(&sess_p->session_close, 1);
spin_unlock(&sess_p->conn_lock);
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@@ -190,7 +192,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
@@ -198,7 +199,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
return 0;
}
@@ -486,6 +486,7 @@ static int iscsi_login_non_zero_tsih_s2(
sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
if (!memcmp(sess_p->isid, pdu->isid, 6) &&
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e6e175597860..ff82b21fdcce 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -684,7 +684,9 @@ static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
if (ret < 0)
return ret;
- if (val != 0 && val != 1 && val != 2) {
+ if (val != TARGET_UA_INTLCK_CTRL_CLEAR
+ && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR
+ && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
pr_err("Illegal value %d\n", val);
return -EINVAL;
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 2d19f0e332b0..4cee1138284b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -767,7 +767,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.emulate_fua_write = 1;
dev->dev_attrib.emulate_fua_read = 1;
dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
@@ -829,7 +829,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
block_size;
- attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
+ attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 6d4cf2643c0a..ca5579ebc81d 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -847,8 +847,17 @@ static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
- p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ switch (dev->dev_attrib.emulate_ua_intlck_ctrl) {
+ case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA:
+ p[4] = 0x30;
+ break;
+ case TARGET_UA_INTLCK_CTRL_NO_CLEAR:
+ p[4] = 0x20;
+ break;
+ default: /* TARGET_UA_INTLCK_CTRL_CLEAR */
+ p[4] = 0x00;
+ break;
+ }
/*
* From spc4r17, section 7.4.6 Control mode Page
*
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index feeba3966617..afbd492c76a9 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -78,7 +78,7 @@ static int target_check_cdb_and_preempt(struct list_head *list,
}
static bool __target_check_io_state(struct se_cmd *se_cmd,
- struct se_session *tmr_sess, int tas)
+ struct se_session *tmr_sess, bool tas)
{
struct se_session *sess = se_cmd->se_sess;
@@ -251,7 +251,7 @@ static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
struct se_session *tmr_sess,
- int tas,
+ bool tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
@@ -334,7 +334,7 @@ int core_tmr_lun_reset(
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
struct se_session *tmr_sess = NULL;
- int tas;
+ bool tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0ae9e60fc4d5..594b724bbf79 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1898,7 +1898,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
- cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
+ == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
target_ua_allocate_lun(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 151b56002da5..4276690fb6cb 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -199,6 +199,8 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
+ bool dev_ua_intlck_clear = (dev->dev_attrib.emulate_ua_intlck_ctrl
+ == TARGET_UA_INTLCK_CTRL_CLEAR);
if (WARN_ON_ONCE(!sess))
return false;
@@ -229,7 +231,7 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
+ if (!dev_ua_intlck_clear) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -254,8 +256,8 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
" INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->fabric_name,
- (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
+ dev_ua_intlck_clear ? "Releasing" : "Reporting",
+ dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
return head == 0;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 37d22e39fd8d..6aec502c495c 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -44,7 +44,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev)
kref_init(&ctx->refcount);
ctx->teedev = teedev;
- INIT_LIST_HEAD(&ctx->list_shm);
rc = teedev->desc->ops->open(ctx);
if (rc)
goto err;
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index f797171f0434..e55204df31ce 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -37,7 +37,8 @@ struct tee_shm_pool {
* @num_users: number of active users of this device
* @c_no_user: completion used when unregistering the device
* @mutex: mutex protecting @num_users and @idr
- * @idr: register of shared memory object allocated on this device
+ * @idr: register of user space shared memory objects allocated or
+ * registered on this device
* @pool: shared memory pool
*/
struct tee_device {
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 937ac5aaa6d8..bd679b72bd05 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -13,13 +13,13 @@
static void tee_shm_release(struct tee_shm *shm)
{
- struct tee_device *teedev = shm->teedev;
+ struct tee_device *teedev = shm->ctx->teedev;
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- if (shm->ctx)
- list_del(&shm->link);
- mutex_unlock(&teedev->mutex);
+ if (shm->flags & TEE_SHM_DMA_BUF) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+ }
if (shm->flags & TEE_SHM_POOL) {
struct tee_shm_pool_mgr *poolm;
@@ -44,8 +44,7 @@ static void tee_shm_release(struct tee_shm *shm)
kfree(shm->pages);
}
- if (shm->ctx)
- teedev_ctx_put(shm->ctx);
+ teedev_ctx_put(shm->ctx);
kfree(shm);
@@ -77,7 +76,7 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
/* Refuse sharing shared memory provided by application */
- if (shm->flags & TEE_SHM_REGISTER)
+ if (shm->flags & TEE_SHM_USER_MAPPED)
return -EINVAL;
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
@@ -91,20 +90,14 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
.mmap = tee_shm_op_mmap,
};
-static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
- struct tee_device *teedev,
- size_t size, u32 flags)
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
{
+ struct tee_device *teedev = ctx->teedev;
struct tee_shm_pool_mgr *poolm = NULL;
struct tee_shm *shm;
void *ret;
int rc;
- if (ctx && ctx->teedev != teedev) {
- dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
- return ERR_PTR(-EINVAL);
- }
-
if (!(flags & TEE_SHM_MAPPED)) {
dev_err(teedev->dev.parent,
"only mapped allocations supported\n");
@@ -132,7 +125,6 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
}
shm->flags = flags | TEE_SHM_POOL;
- shm->teedev = teedev;
shm->ctx = ctx;
if (flags & TEE_SHM_DMA_BUF)
poolm = teedev->pool->dma_buf_mgr;
@@ -145,17 +137,18 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
goto err_kfree;
}
- mutex_lock(&teedev->mutex);
- shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
- mutex_unlock(&teedev->mutex);
- if (shm->id < 0) {
- ret = ERR_PTR(shm->id);
- goto err_pool_free;
- }
if (flags & TEE_SHM_DMA_BUF) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ mutex_lock(&teedev->mutex);
+ shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (shm->id < 0) {
+ ret = ERR_PTR(shm->id);
+ goto err_pool_free;
+ }
+
exp_info.ops = &tee_shm_dma_buf_ops;
exp_info.size = shm->size;
exp_info.flags = O_RDWR;
@@ -168,18 +161,16 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
}
}
- if (ctx) {
+ if (ctx)
teedev_ctx_get(ctx);
- mutex_lock(&teedev->mutex);
- list_add_tail(&shm->link, &ctx->list_shm);
- mutex_unlock(&teedev->mutex);
- }
return shm;
err_rem:
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
+ if (flags & TEE_SHM_DMA_BUF) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+ }
err_pool_free:
poolm->ops->free(poolm, shm);
err_kfree:
@@ -188,31 +179,8 @@ err_dev_put:
tee_device_put(teedev);
return ret;
}
-
-/**
- * tee_shm_alloc() - Allocate shared memory
- * @ctx: Context that allocates the shared memory
- * @size: Requested size of shared memory
- * @flags: Flags setting properties for the requested shared memory.
- *
- * Memory allocated as global shared memory is automatically freed when the
- * TEE file pointer is closed. The @flags field uses the bits defined by
- * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
- * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
- * associated with a dma-buf handle, else driver private memory.
- */
-struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
-{
- return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
-}
EXPORT_SYMBOL_GPL(tee_shm_alloc);
-struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
-{
- return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
-}
-EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
-
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
size_t length, u32 flags)
{
@@ -245,7 +213,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
}
shm->flags = flags | TEE_SHM_REGISTER;
- shm->teedev = teedev;
shm->ctx = ctx;
shm->id = -1;
addr = untagged_addr(addr);
@@ -301,10 +268,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
}
}
- mutex_lock(&teedev->mutex);
- list_add_tail(&shm->link, &ctx->list_shm);
- mutex_unlock(&teedev->mutex);
-
return shm;
err:
if (shm) {
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index efae0c02d898..6cad15eb9cf4 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -369,8 +369,8 @@ static int int3400_thermal_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3400_thermal_match[] = {
- {"INT1040", 0},
{"INT3400", 0},
+ {"INTC1040", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index aeece1e136a5..f86cbb125e2f 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -282,8 +282,8 @@ static int int3403_remove(struct platform_device *pdev)
}
static const struct acpi_device_id int3403_device_ids[] = {
- {"INT1043", 0},
{"INT3403", 0},
+ {"INTC1043", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index a312cb33a99b..2dff93d7a501 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -137,7 +137,6 @@ config LEGACY_PTYS
security. This option enables these legacy devices; on most
systems, it is safe to say N.
-
config LEGACY_PTY_COUNT
int "Maximum number of legacy PTY in use"
depends on LEGACY_PTYS
@@ -151,6 +150,31 @@ config LEGACY_PTY_COUNT
When not in use, each legacy PTY occupies 12 bytes on 32-bit
architectures and 24 bytes on 64-bit architectures.
+config LDISC_AUTOLOAD
+ bool "Automatically load TTY Line Disciplines"
+ default y
+ help
+ Historically the kernel has always automatically loaded any
+ line discipline that is in a kernel module when a user asks
+ for it to be loaded with the TIOCSETD ioctl, or through other
+ means. This is not always the best thing to do on systems
+ where you know you will not be using some of the more
+ "ancient" line disciplines, so prevent the kernel from doing
+ this unless the request is coming from a process with the
+ CAP_SYS_MODULE permissions.
+
+ Say 'Y' here if you trust your userspace users to do the right
+ thing, or if you have only provided the line disciplines that
+ you know you will be using, or if you wish to continue to use
+ the traditional method of on-demand loading of these modules
+ by any user.
+
+ This functionality can be changed at runtime with the
+ dev.tty.ldisc_autoload sysctl, this configuration option will
+ only set the default value of this functionality.
+
+source "drivers/tty/serial/Kconfig"
+
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
depends on HAS_IOMEM
@@ -270,16 +294,6 @@ config SYNCLINK_GT
synchronous and asynchronous serial adapters
manufactured by Microgate Systems, Ltd. (www.microgate.com)
-config NOZOMI
- tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
- depends on PCI
- help
- If you have a HSDPA driver Broadband Wireless Data Card -
- Globe Trotter PCMCIA card, say Y here.
-
- To compile this driver as a module, choose M here, the module
- will be called nozomi.
-
config ISI
tristate "Multi-Tech multiport card support"
depends on SERIAL_NONSTANDARD && PCI
@@ -302,43 +316,6 @@ config N_HDLC
The module will be called n_hdlc. If you want to do that, say M
here.
-config N_GSM
- tristate "GSM MUX line discipline support (EXPERIMENTAL)"
- depends on NET
- help
- This line discipline provides support for the GSM MUX protocol and
- presents the mux as a set of 61 individual tty devices.
-
-config TRACE_ROUTER
- tristate "Trace data router for MIPI P1149.7 cJTAG standard"
- depends on TRACE_SINK
- help
- The trace router uses the Linux tty line discipline framework to
- route trace data coming from a tty port (say UART for example) to
- the trace sink line discipline driver and to another tty port (say
- USB). This is part of a solution for the MIPI P1149.7, compact JTAG,
- standard, which is for debugging mobile devices. The PTI driver in
- drivers/misc/pti.c defines the majority of this MIPI solution.
-
- You should select this driver if the target kernel is meant for
- a mobile device containing a modem. Then you will need to select
- "Trace data sink for MIPI P1149.7 cJTAG standard" line discipline
- driver.
-
-config TRACE_SINK
- tristate "Trace data sink for MIPI P1149.7 cJTAG standard"
- help
- The trace sink uses the Linux line discipline framework to receive
- trace data coming from the trace router line discipline driver
- to a user-defined tty port target, like USB.
- This is to provide a way to extract modem trace data on
- devices that do not have a PTI HW module, or just need modem
- trace data to come out of a different HW output port.
- This is part of a solution for the P1149.7, compact JTAG, standard.
-
- If you select this option, you need to select
- "Trace data router for MIPI P1149.7 cJTAG standard".
-
config PPC_EPAPR_HV_BYTECHAN
bool "ePAPR hypervisor byte channel driver"
depends on PPC
@@ -374,20 +351,6 @@ config PPC_EARLY_DEBUG_EHV_BC_HANDLE
there simply will be no early console output. This is true also
if you don't boot under a hypervisor at all.
-config NULL_TTY
- tristate "NULL TTY driver"
- help
- Say Y here if you want a NULL TTY which simply discards messages.
-
- This is useful to allow userspace applications which expect a console
- device to work without modifications even when no console is
- available or desired.
-
- In order to use this driver, you should redirect the console to this
- TTY, or boot the kernel with console=ttynull.
-
- If unsure, say N.
-
config GOLDFISH_TTY
tristate "Goldfish TTY Driver"
depends on GOLDFISH
@@ -401,6 +364,23 @@ config GOLDFISH_TTY_EARLY_CONSOLE
default y if GOLDFISH_TTY=y
select SERIAL_EARLYCON
+config N_GSM
+ tristate "GSM MUX line discipline support (EXPERIMENTAL)"
+ depends on NET
+ help
+ This line discipline provides support for the GSM MUX protocol and
+ presents the mux as a set of 61 individual tty devices.
+
+config NOZOMI
+ tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
+ depends on PCI
+ help
+ If you have a HSDPA driver Broadband Wireless Data Card -
+ Globe Trotter PCMCIA card, say Y here.
+
+ To compile this driver as a module, choose M here, the module
+ will be called nozomi.
+
config MIPS_EJTAG_FDC_TTY
bool "MIPS EJTAG Fast Debug Channel TTY"
depends on MIPS_CDMM
@@ -448,33 +428,58 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config NULL_TTY
+ tristate "NULL TTY driver"
+ help
+ Say Y here if you want a NULL TTY which simply discards messages.
+
+ This is useful to allow userspace applications which expect a console
+ device to work without modifications even when no console is
+ available or desired.
+
+ In order to use this driver, you should redirect the console to this
+ TTY, or boot the kernel with console=ttynull.
+
+ If unsure, say N.
+
+config TRACE_ROUTER
+ tristate "Trace data router for MIPI P1149.7 cJTAG standard"
+ depends on TRACE_SINK
+ help
+ The trace router uses the Linux tty line discipline framework to
+ route trace data coming from a tty port (say UART for example) to
+ the trace sink line discipline driver and to another tty port (say
+ USB). This is part of a solution for the MIPI P1149.7, compact JTAG,
+ standard, which is for debugging mobile devices. The PTI driver in
+ drivers/misc/pti.c defines the majority of this MIPI solution.
+
+ You should select this driver if the target kernel is meant for
+ a mobile device containing a modem. Then you will need to select
+ "Trace data sink for MIPI P1149.7 cJTAG standard" line discipline
+ driver.
+
+config TRACE_SINK
+ tristate "Trace data sink for MIPI P1149.7 cJTAG standard"
+ help
+ The trace sink uses the Linux line discipline framework to receive
+ trace data coming from the trace router line discipline driver
+ to a user-defined tty port target, like USB.
+ This is to provide a way to extract modem trace data on
+ devices that do not have a PTI HW module, or just need modem
+ trace data to come out of a different HW output port.
+ This is part of a solution for the P1149.7, compact JTAG, standard.
+
+ If you select this option, you need to select
+ "Trace data router for MIPI P1149.7 cJTAG standard".
+
config VCC
tristate "Sun Virtual Console Concentrator"
depends on SUN_LDOMS
help
Support for Sun logical domain consoles.
-config LDISC_AUTOLOAD
- bool "Automatically load TTY Line Disciplines"
- default y
- help
- Historically the kernel has always automatically loaded any
- line discipline that is in a kernel module when a user asks
- for it to be loaded with the TIOCSETD ioctl, or through other
- means. This is not always the best thing to do on systems
- where you know you will not be using some of the more
- "ancient" line disciplines, so prevent the kernel from doing
- this unless the request is coming from a process with the
- CAP_SYS_MODULE permissions.
-
- Say 'Y' here if you trust your userspace users to do the right
- thing, or if you have only provided the line disciplines that
- you know you will be using, or if you wish to continue to use
- the traditional method of on-demand loading of these modules
- by any user.
-
- This functionality can be changed at runtime with the
- dev.tty.ldisc_autoload sysctl, this configuration option will
- only set the default value of this functionality.
+source "drivers/tty/hvc/Kconfig"
endif # TTY
+
+source "drivers/tty/serdev/Kconfig"
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 769e0a5d1dfc..3c6dd06ec5fb 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -136,6 +136,21 @@ static int find_console_handle(void)
return 1;
}
+static unsigned int local_ev_byte_channel_send(unsigned int handle,
+ unsigned int *count,
+ const char *p)
+{
+ char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
+ unsigned int c = *count;
+
+ if (c < sizeof(buffer)) {
+ memcpy(buffer, p, c);
+ memset(&buffer[c], 0, sizeof(buffer) - c);
+ p = buffer;
+ }
+ return ev_byte_channel_send(handle, count, p);
+}
+
/*************************** EARLY CONSOLE DRIVER ***************************/
#ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
@@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data)
do {
count = 1;
- ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
+ ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
&count, &data);
} while (ret == EV_EAGAIN);
}
@@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s,
while (count) {
len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES);
do {
- ret = ev_byte_channel_send(handle, &len, s);
+ ret = local_ev_byte_channel_send(handle, &len, s);
} while (ret == EV_EAGAIN);
count -= len;
s += len;
@@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE),
EV_BYTE_CHANNEL_MAX_BYTES);
- ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
+ ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
/* 'len' is valid only if the return code is 0 or EV_EAGAIN */
if (!ret || (ret == EV_EAGAIN))
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 6a3c97d345a0..31b7e1b03749 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-if TTY
config HVC_DRIVER
bool
@@ -113,5 +112,3 @@ config HVCS
will depend on arch specific APIs exported from hvcserver.ko
which will also be compiled when this driver is built as a
module.
-
-endif # TTY
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index d6644f3d81fc..0aea76cd67ff 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -3,8 +3,6 @@
# Serial device configuration
#
-if TTY
-
menu "Serial drivers"
depends on HAS_IOMEM
@@ -1568,5 +1566,3 @@ endmenu
config SERIAL_MCTRL_GPIO
tristate
-
-endif # TTY
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a57698985f9c..6e725c6c6256 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -996,6 +996,44 @@ err_device_create:
}
EXPORT_SYMBOL_GPL(__uio_register_device);
+static void devm_uio_unregister_device(struct device *dev, void *res)
+{
+ uio_unregister_device(*(struct uio_info **)res);
+}
+
+/**
+ * devm_uio_register_device - Resource managed uio_register_device()
+ * @owner: module that creates the new device
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
+int __devm_uio_register_device(struct module *owner,
+ struct device *parent,
+ struct uio_info *info)
+{
+ struct uio_info **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_uio_unregister_device, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ *ptr = info;
+ ret = __uio_register_device(owner, parent, info);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ devres_add(parent, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__devm_uio_register_device);
+
/**
* uio_unregister_device - unregister a industrial IO device
* @info: UIO device capabilities
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index fc25ce90da3b..ae319ef3a832 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -99,6 +99,13 @@ static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
return 0;
}
+static void uio_pdrv_genirq_cleanup(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_disable(dev);
+}
+
static int uio_pdrv_genirq_probe(struct platform_device *pdev)
{
struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
@@ -213,28 +220,16 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
*/
pm_runtime_enable(&pdev->dev);
- ret = uio_register_device(&pdev->dev, priv->uioinfo);
- if (ret) {
- dev_err(&pdev->dev, "unable to register uio device\n");
- pm_runtime_disable(&pdev->dev);
+ ret = devm_add_action_or_reset(&pdev->dev, uio_pdrv_genirq_cleanup,
+ &pdev->dev);
+ if (ret)
return ret;
- }
-
- platform_set_drvdata(pdev, priv);
- return 0;
-}
-
-static int uio_pdrv_genirq_remove(struct platform_device *pdev)
-{
- struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev);
- uio_unregister_device(priv->uioinfo);
- pm_runtime_disable(&pdev->dev);
-
- priv->uioinfo->handler = NULL;
- priv->uioinfo->irqcontrol = NULL;
+ ret = devm_uio_register_device(&pdev->dev, priv->uioinfo);
+ if (ret)
+ dev_err(&pdev->dev, "unable to register uio device\n");
- return 0;
+ return ret;
}
static int uio_pdrv_genirq_runtime_nop(struct device *dev)
@@ -271,7 +266,6 @@ MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio");
static struct platform_driver uio_pdrv_genirq = {
.probe = uio_pdrv_genirq_probe,
- .remove = uio_pdrv_genirq_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &uio_pdrv_genirq_dev_pm_ops,
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 7c96c4665178..950d2a85f098 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -216,6 +216,7 @@
#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index e5e3a2553aaa..bdeb1e233fc9 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -172,11 +172,6 @@ enum data_direction {
DATA_DIR_NONE
};
-static inline u32 get_unaligned_be24(u8 *buf)
-{
- return 0xffffff & (u32) get_unaligned_be32(buf - 1);
-}
-
static inline struct fsg_lun *fsg_lun_from_dev(struct device *dev)
{
return container_of(dev, struct fsg_lun, dev);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 379a02c36e37..6c6b37b5c04e 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -9,7 +9,6 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/eventfd.h>
@@ -54,6 +53,12 @@ module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(disable_idle_d3,
"Disable using the PCI D3 low power state for idle, unused devices");
+static bool enable_sriov;
+#ifdef CONFIG_PCI_IOV
+module_param(enable_sriov, bool, 0644);
+MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
+#endif
+
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -466,6 +471,44 @@ out:
vfio_pci_set_power_state(vdev, PCI_D3hot);
}
+static struct pci_driver vfio_pci_driver;
+
+static struct vfio_pci_device *get_pf_vdev(struct vfio_pci_device *vdev,
+ struct vfio_device **pf_dev)
+{
+ struct pci_dev *physfn = pci_physfn(vdev->pdev);
+
+ if (!vdev->pdev->is_virtfn)
+ return NULL;
+
+ *pf_dev = vfio_device_get_from_dev(&physfn->dev);
+ if (!*pf_dev)
+ return NULL;
+
+ if (pci_dev_driver(physfn) != &vfio_pci_driver) {
+ vfio_device_put(*pf_dev);
+ return NULL;
+ }
+
+ return vfio_device_data(*pf_dev);
+}
+
+static void vfio_pci_vf_token_user_add(struct vfio_pci_device *vdev, int val)
+{
+ struct vfio_device *pf_dev;
+ struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev);
+
+ if (!pf_vdev)
+ return;
+
+ mutex_lock(&pf_vdev->vf_token->lock);
+ pf_vdev->vf_token->users += val;
+ WARN_ON(pf_vdev->vf_token->users < 0);
+ mutex_unlock(&pf_vdev->vf_token->lock);
+
+ vfio_device_put(pf_dev);
+}
+
static void vfio_pci_release(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
@@ -473,6 +516,7 @@ static void vfio_pci_release(void *device_data)
mutex_lock(&vdev->reflck->lock);
if (!(--vdev->refcnt)) {
+ vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
}
@@ -498,6 +542,7 @@ static int vfio_pci_open(void *device_data)
goto error;
vfio_spapr_pci_eeh_open(vdev->pdev);
+ vfio_pci_vf_token_user_add(vdev, 1);
}
vdev->refcnt++;
error:
@@ -1140,6 +1185,65 @@ hot_reset_release:
return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
ioeventfd.data, count, ioeventfd.fd);
+ } else if (cmd == VFIO_DEVICE_FEATURE) {
+ struct vfio_device_feature feature;
+ uuid_t uuid;
+
+ minsz = offsetofend(struct vfio_device_feature, flags);
+
+ if (copy_from_user(&feature, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (feature.argsz < minsz)
+ return -EINVAL;
+
+ /* Check unknown flags */
+ if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK |
+ VFIO_DEVICE_FEATURE_SET |
+ VFIO_DEVICE_FEATURE_GET |
+ VFIO_DEVICE_FEATURE_PROBE))
+ return -EINVAL;
+
+ /* GET & SET are mutually exclusive except with PROBE */
+ if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
+ (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
+ (feature.flags & VFIO_DEVICE_FEATURE_GET))
+ return -EINVAL;
+
+ switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
+ case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
+ if (!vdev->vf_token)
+ return -ENOTTY;
+
+ /*
+ * We do not support GET of the VF Token UUID as this
+ * could expose the token of the previous device user.
+ */
+ if (feature.flags & VFIO_DEVICE_FEATURE_GET)
+ return -EINVAL;
+
+ if (feature.flags & VFIO_DEVICE_FEATURE_PROBE)
+ return 0;
+
+ /* Don't SET unless told to do so */
+ if (!(feature.flags & VFIO_DEVICE_FEATURE_SET))
+ return -EINVAL;
+
+ if (feature.argsz < minsz + sizeof(uuid))
+ return -EINVAL;
+
+ if (copy_from_user(&uuid, (void __user *)(arg + minsz),
+ sizeof(uuid)))
+ return -EFAULT;
+
+ mutex_lock(&vdev->vf_token->lock);
+ uuid_copy(&vdev->vf_token->uuid, &uuid);
+ mutex_unlock(&vdev->vf_token->lock);
+
+ return 0;
+ default:
+ return -ENOTTY;
+ }
}
return -ENOTTY;
@@ -1278,6 +1382,150 @@ static void vfio_pci_request(void *device_data, unsigned int count)
mutex_unlock(&vdev->igate);
}
+static int vfio_pci_validate_vf_token(struct vfio_pci_device *vdev,
+ bool vf_token, uuid_t *uuid)
+{
+ /*
+ * There's always some degree of trust or collaboration between SR-IOV
+ * PF and VFs, even if just that the PF hosts the SR-IOV capability and
+ * can disrupt VFs with a reset, but often the PF has more explicit
+ * access to deny service to the VF or access data passed through the
+ * VF. We therefore require an opt-in via a shared VF token (UUID) to
+ * represent this trust. This both prevents that a VF driver might
+ * assume the PF driver is a trusted, in-kernel driver, and also that
+ * a PF driver might be replaced with a rogue driver, unknown to in-use
+ * VF drivers.
+ *
+ * Therefore when presented with a VF, if the PF is a vfio device and
+ * it is bound to the vfio-pci driver, the user needs to provide a VF
+ * token to access the device, in the form of appending a vf_token to
+ * the device name, for example:
+ *
+ * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
+ *
+ * When presented with a PF which has VFs in use, the user must also
+ * provide the current VF token to prove collaboration with existing
+ * VF users. If VFs are not in use, the VF token provided for the PF
+ * device will act to set the VF token.
+ *
+ * If the VF token is provided but unused, an error is generated.
+ */
+ if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
+ return 0; /* No VF token provided or required */
+
+ if (vdev->pdev->is_virtfn) {
+ struct vfio_device *pf_dev;
+ struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev);
+ bool match;
+
+ if (!pf_vdev) {
+ if (!vf_token)
+ return 0; /* PF is not vfio-pci, no VF token */
+
+ pci_info_ratelimited(vdev->pdev,
+ "VF token incorrectly provided, PF not bound to vfio-pci\n");
+ return -EINVAL;
+ }
+
+ if (!vf_token) {
+ vfio_device_put(pf_dev);
+ pci_info_ratelimited(vdev->pdev,
+ "VF token required to access device\n");
+ return -EACCES;
+ }
+
+ mutex_lock(&pf_vdev->vf_token->lock);
+ match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
+ mutex_unlock(&pf_vdev->vf_token->lock);
+
+ vfio_device_put(pf_dev);
+
+ if (!match) {
+ pci_info_ratelimited(vdev->pdev,
+ "Incorrect VF token provided for device\n");
+ return -EACCES;
+ }
+ } else if (vdev->vf_token) {
+ mutex_lock(&vdev->vf_token->lock);
+ if (vdev->vf_token->users) {
+ if (!vf_token) {
+ mutex_unlock(&vdev->vf_token->lock);
+ pci_info_ratelimited(vdev->pdev,
+ "VF token required to access device\n");
+ return -EACCES;
+ }
+
+ if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
+ mutex_unlock(&vdev->vf_token->lock);
+ pci_info_ratelimited(vdev->pdev,
+ "Incorrect VF token provided for device\n");
+ return -EACCES;
+ }
+ } else if (vf_token) {
+ uuid_copy(&vdev->vf_token->uuid, uuid);
+ }
+
+ mutex_unlock(&vdev->vf_token->lock);
+ } else if (vf_token) {
+ pci_info_ratelimited(vdev->pdev,
+ "VF token incorrectly provided, not a PF or VF\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define VF_TOKEN_ARG "vf_token="
+
+static int vfio_pci_match(void *device_data, char *buf)
+{
+ struct vfio_pci_device *vdev = device_data;
+ bool vf_token = false;
+ uuid_t uuid;
+ int ret;
+
+ if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
+ return 0; /* No match */
+
+ if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
+ buf += strlen(pci_name(vdev->pdev));
+
+ if (*buf != ' ')
+ return 0; /* No match: non-whitespace after name */
+
+ while (*buf) {
+ if (*buf == ' ') {
+ buf++;
+ continue;
+ }
+
+ if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
+ strlen(VF_TOKEN_ARG))) {
+ buf += strlen(VF_TOKEN_ARG);
+
+ if (strlen(buf) < UUID_STRING_LEN)
+ return -EINVAL;
+
+ ret = uuid_parse(buf, &uuid);
+ if (ret)
+ return ret;
+
+ vf_token = true;
+ buf += UUID_STRING_LEN;
+ } else {
+ /* Unknown/duplicate option */
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
+ if (ret)
+ return ret;
+
+ return 1; /* Match */
+}
+
static const struct vfio_device_ops vfio_pci_ops = {
.name = "vfio-pci",
.open = vfio_pci_open,
@@ -1287,10 +1535,40 @@ static const struct vfio_device_ops vfio_pci_ops = {
.write = vfio_pci_write,
.mmap = vfio_pci_mmap,
.request = vfio_pci_request,
+ .match = vfio_pci_match,
};
static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
+static struct pci_driver vfio_pci_driver;
+
+static int vfio_pci_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct vfio_pci_device *vdev = container_of(nb,
+ struct vfio_pci_device, nb);
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *physfn = pci_physfn(pdev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE &&
+ pdev->is_virtfn && physfn == vdev->pdev) {
+ pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
+ pci_name(pdev));
+ pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ vfio_pci_ops.name);
+ } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
+ pdev->is_virtfn && physfn == vdev->pdev) {
+ struct pci_driver *drv = pci_dev_driver(pdev);
+
+ if (drv && drv != &vfio_pci_driver)
+ pci_warn(vdev->pdev,
+ "VF %s bound to driver %s while PF bound to vfio-pci\n",
+ pci_name(pdev), drv->name);
+ }
+
+ return 0;
+}
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -1302,12 +1580,12 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -EINVAL;
/*
- * Prevent binding to PFs with VFs enabled, this too easily allows
- * userspace instance with VFs and PFs from the same device, which
- * cannot work. Disabling SR-IOV here would initiate removing the
- * VFs, which would unbind the driver, which is prone to blocking
- * if that VF is also in use by vfio-pci. Just reject these PFs
- * and let the user sort it out.
+ * Prevent binding to PFs with VFs enabled, the VFs might be in use
+ * by the host or other users. We cannot capture the VFs if they
+ * already exist, nor can we track VF users. Disabling SR-IOV here
+ * would initiate removing the VFs, which would unbind the driver,
+ * which is prone to blocking if that VF is also in use by vfio-pci.
+ * Just reject these PFs and let the user sort it out.
*/
if (pci_num_vf(pdev)) {
pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
@@ -1320,8 +1598,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
- vfio_iommu_group_put(group, &pdev->dev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_group_put;
}
vdev->pdev = pdev;
@@ -1332,18 +1610,27 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&vdev->ioeventfds_list);
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
- if (ret) {
- vfio_iommu_group_put(group, &pdev->dev);
- kfree(vdev);
- return ret;
- }
+ if (ret)
+ goto out_free;
ret = vfio_pci_reflck_attach(vdev);
- if (ret) {
- vfio_del_group_dev(&pdev->dev);
- vfio_iommu_group_put(group, &pdev->dev);
- kfree(vdev);
- return ret;
+ if (ret)
+ goto out_del_group_dev;
+
+ if (pdev->is_physfn) {
+ vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
+ if (!vdev->vf_token) {
+ ret = -ENOMEM;
+ goto out_reflck;
+ }
+
+ mutex_init(&vdev->vf_token->lock);
+ uuid_gen(&vdev->vf_token->uuid);
+
+ vdev->nb.notifier_call = vfio_pci_bus_notifier;
+ ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
+ if (ret)
+ goto out_vf_token;
}
if (vfio_pci_is_vga(pdev)) {
@@ -1369,16 +1656,39 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
return ret;
+
+out_vf_token:
+ kfree(vdev->vf_token);
+out_reflck:
+ vfio_pci_reflck_put(vdev->reflck);
+out_del_group_dev:
+ vfio_del_group_dev(&pdev->dev);
+out_free:
+ kfree(vdev);
+out_group_put:
+ vfio_iommu_group_put(group, &pdev->dev);
+ return ret;
}
static void vfio_pci_remove(struct pci_dev *pdev)
{
struct vfio_pci_device *vdev;
+ pci_disable_sriov(pdev);
+
vdev = vfio_del_group_dev(&pdev->dev);
if (!vdev)
return;
+ if (vdev->vf_token) {
+ WARN_ON(vdev->vf_token->users);
+ mutex_destroy(&vdev->vf_token->lock);
+ kfree(vdev->vf_token);
+ }
+
+ if (vdev->nb.notifier_call)
+ bus_unregister_notifier(&pci_bus_type, &vdev->nb);
+
vfio_pci_reflck_put(vdev->reflck);
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
@@ -1427,16 +1737,48 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
}
+static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
+{
+ struct vfio_pci_device *vdev;
+ struct vfio_device *device;
+ int ret = 0;
+
+ might_sleep();
+
+ if (!enable_sriov)
+ return -ENOENT;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ vdev = vfio_device_data(device);
+ if (!vdev) {
+ vfio_device_put(device);
+ return -ENODEV;
+ }
+
+ if (nr_virtfn == 0)
+ pci_disable_sriov(pdev);
+ else
+ ret = pci_enable_sriov(pdev, nr_virtfn);
+
+ vfio_device_put(device);
+
+ return ret < 0 ? ret : nr_virtfn;
+}
+
static const struct pci_error_handlers vfio_err_handlers = {
.error_detected = vfio_pci_aer_err_detected,
};
static struct pci_driver vfio_pci_driver = {
- .name = "vfio-pci",
- .id_table = NULL, /* only dynamic ids */
- .probe = vfio_pci_probe,
- .remove = vfio_pci_remove,
- .err_handler = &vfio_err_handlers,
+ .name = "vfio-pci",
+ .id_table = NULL, /* only dynamic ids */
+ .probe = vfio_pci_probe,
+ .remove = vfio_pci_remove,
+ .sriov_configure = vfio_pci_sriov_configure,
+ .err_handler = &vfio_err_handlers,
};
static DEFINE_MUTEX(reflck_lock);
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index df4d96038cd4..ed20d73cc27c 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -422,8 +422,14 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
&mmio_atsd)) {
- dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
- mmio_atsd = 0;
+ if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0,
+ &mmio_atsd)) {
+ dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
+ mmio_atsd = 0;
+ } else {
+ dev_warn(&vdev->pdev->dev,
+ "Using fallback ibm,mmio-atsd[0] for ATSD.\n");
+ }
}
if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 8a2c7607d513..36ec69081ecd 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -12,6 +12,8 @@
#include <linux/pci.h>
#include <linux/irqbypass.h>
#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/notifier.h>
#ifndef VFIO_PCI_PRIVATE_H
#define VFIO_PCI_PRIVATE_H
@@ -84,6 +86,12 @@ struct vfio_pci_reflck {
struct mutex lock;
};
+struct vfio_pci_vf_token {
+ struct mutex lock;
+ uuid_t uuid;
+ int users;
+};
+
struct vfio_pci_device {
struct pci_dev *pdev;
void __iomem *barmap[PCI_STD_NUM_BARS];
@@ -122,6 +130,8 @@ struct vfio_pci_device {
struct list_head dummy_resources_list;
struct mutex ioeventfds_lock;
struct list_head ioeventfds_list;
+ struct vfio_pci_vf_token *vf_token;
+ struct notifier_block nb;
};
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index ae1a5eb98620..1e2769010089 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -44,7 +44,7 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i)
{
struct platform_device *pdev = (struct platform_device *) vdev->opaque;
- return platform_get_irq(pdev, i);
+ return platform_get_irq_optional(pdev, i);
}
static int vfio_platform_probe(struct platform_device *pdev)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index c8482624ca34..765e0e5d83ed 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -875,11 +875,23 @@ EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
char *buf)
{
- struct vfio_device *it, *device = NULL;
+ struct vfio_device *it, *device = ERR_PTR(-ENODEV);
mutex_lock(&group->device_lock);
list_for_each_entry(it, &group->device_list, group_next) {
- if (!strcmp(dev_name(it->dev), buf)) {
+ int ret;
+
+ if (it->ops->match) {
+ ret = it->ops->match(it->device_data, buf);
+ if (ret < 0) {
+ device = ERR_PTR(ret);
+ break;
+ }
+ } else {
+ ret = !strcmp(dev_name(it->dev), buf);
+ }
+
+ if (ret) {
device = it;
vfio_device_get(device);
break;
@@ -1430,8 +1442,8 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
return -EPERM;
device = vfio_device_get_from_name(group, buf);
- if (!device)
- return -ENODEV;
+ if (IS_ERR(device))
+ return PTR_ERR(device);
ret = device->ops->open(device->device_data);
if (ret) {
@@ -1720,6 +1732,44 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
}
EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
+/**
+ * External user API, exported by symbols to be linked dynamically.
+ * The external user passes in a device pointer
+ * to verify that:
+ * - A VFIO group is assiciated with the device;
+ * - IOMMU is set for the group.
+ * If both checks passed, vfio_group_get_external_user_from_dev()
+ * increments the container user counter to prevent the VFIO group
+ * from disposal before external user exits and returns the pointer
+ * to the VFIO group.
+ *
+ * When the external user finishes using the VFIO group, it calls
+ * vfio_group_put_external_user() to release the VFIO group and
+ * decrement the container user counter.
+ *
+ * @dev [in] : device
+ * Return error PTR or pointer to VFIO group.
+ */
+
+struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev)
+{
+ struct vfio_group *group;
+ int ret;
+
+ group = vfio_group_get_from_dev(dev);
+ if (!group)
+ return ERR_PTR(-ENODEV);
+
+ ret = vfio_group_add_container_user(group);
+ if (ret) {
+ vfio_group_put(group);
+ return ERR_PTR(ret);
+ }
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev);
+
void vfio_group_put_external_user(struct vfio_group *group)
{
vfio_group_try_dissolve_container(group);
@@ -1961,6 +2011,146 @@ err_unpin_pages:
}
EXPORT_SYMBOL(vfio_unpin_pages);
+/*
+ * Pin a set of guest IOVA PFNs and return their associated host PFNs for a
+ * VFIO group.
+ *
+ * The caller needs to call vfio_group_get_external_user() or
+ * vfio_group_get_external_user_from_dev() prior to calling this interface,
+ * so as to prevent the VFIO group from disposal in the middle of the call.
+ * But it can keep the reference to the VFIO group for several calls into
+ * this interface.
+ * After finishing using of the VFIO group, the caller needs to release the
+ * VFIO group by calling vfio_group_put_external_user().
+ *
+ * @group [in] : VFIO group
+ * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned.
+ * @npage [in] : count of elements in user_iova_pfn array.
+ * This count should not be greater
+ * VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @prot [in] : protection flags
+ * @phys_pfn [out] : array of host PFNs
+ * Return error or number of pages pinned.
+ */
+int vfio_group_pin_pages(struct vfio_group *group,
+ unsigned long *user_iova_pfn, int npage,
+ int prot, unsigned long *phys_pfn)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ if (!group || !user_iova_pfn || !phys_pfn || !npage)
+ return -EINVAL;
+
+ if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ return -E2BIG;
+
+ container = group->container;
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->pin_pages))
+ ret = driver->ops->pin_pages(container->iommu_data,
+ user_iova_pfn, npage,
+ prot, phys_pfn);
+ else
+ ret = -ENOTTY;
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_group_pin_pages);
+
+/*
+ * Unpin a set of guest IOVA PFNs for a VFIO group.
+ *
+ * The caller needs to call vfio_group_get_external_user() or
+ * vfio_group_get_external_user_from_dev() prior to calling this interface,
+ * so as to prevent the VFIO group from disposal in the middle of the call.
+ * But it can keep the reference to the VFIO group for several calls into
+ * this interface.
+ * After finishing using of the VFIO group, the caller needs to release the
+ * VFIO group by calling vfio_group_put_external_user().
+ *
+ * @group [in] : vfio group
+ * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned.
+ * @npage [in] : count of elements in user_iova_pfn array.
+ * This count should not be greater than
+ * VFIO_PIN_PAGES_MAX_ENTRIES.
+ * Return error or number of pages unpinned.
+ */
+int vfio_group_unpin_pages(struct vfio_group *group,
+ unsigned long *user_iova_pfn, int npage)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret;
+
+ if (!group || !user_iova_pfn || !npage)
+ return -EINVAL;
+
+ if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ return -E2BIG;
+
+ container = group->container;
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->unpin_pages))
+ ret = driver->ops->unpin_pages(container->iommu_data,
+ user_iova_pfn, npage);
+ else
+ ret = -ENOTTY;
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_group_unpin_pages);
+
+
+/*
+ * This interface allows the CPUs to perform some sort of virtual DMA on
+ * behalf of the device.
+ *
+ * CPUs read/write from/into a range of IOVAs pointing to user space memory
+ * into/from a kernel buffer.
+ *
+ * As the read/write of user space memory is conducted via the CPUs and is
+ * not a real device DMA, it is not necessary to pin the user space memory.
+ *
+ * The caller needs to call vfio_group_get_external_user() or
+ * vfio_group_get_external_user_from_dev() prior to calling this interface,
+ * so as to prevent the VFIO group from disposal in the middle of the call.
+ * But it can keep the reference to the VFIO group for several calls into
+ * this interface.
+ * After finishing using of the VFIO group, the caller needs to release the
+ * VFIO group by calling vfio_group_put_external_user().
+ *
+ * @group [in] : VFIO group
+ * @user_iova [in] : base IOVA of a user space buffer
+ * @data [in] : pointer to kernel buffer
+ * @len [in] : kernel buffer length
+ * @write : indicate read or write
+ * Return error code on failure or 0 on success.
+ */
+int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
+ void *data, size_t len, bool write)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret = 0;
+
+ if (!group || !data || len <= 0)
+ return -EINVAL;
+
+ container = group->container;
+ driver = container->iommu_driver;
+
+ if (likely(driver && driver->ops->dma_rw))
+ ret = driver->ops->dma_rw(container->iommu_data,
+ user_iova, data, len, write);
+ else
+ ret = -ENOTTY;
+
+ return ret;
+}
+EXPORT_SYMBOL(vfio_dma_rw);
+
static int vfio_register_iommu_notifier(struct vfio_group *group,
unsigned long *events,
struct notifier_block *nb)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index a177bf2c6683..85b32c325282 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -27,6 +27,7 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/rbtree.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
@@ -1786,7 +1787,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (resv_msi) {
ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
- if (ret)
+ if (ret && ret != -ENODEV)
goto out_detach;
}
@@ -2305,6 +2306,80 @@ static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
return blocking_notifier_chain_unregister(&iommu->notifier, nb);
}
+static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
+ dma_addr_t user_iova, void *data,
+ size_t count, bool write,
+ size_t *copied)
+{
+ struct mm_struct *mm;
+ unsigned long vaddr;
+ struct vfio_dma *dma;
+ bool kthread = current->mm == NULL;
+ size_t offset;
+
+ *copied = 0;
+
+ dma = vfio_find_dma(iommu, user_iova, 1);
+ if (!dma)
+ return -EINVAL;
+
+ if ((write && !(dma->prot & IOMMU_WRITE)) ||
+ !(dma->prot & IOMMU_READ))
+ return -EPERM;
+
+ mm = get_task_mm(dma->task);
+
+ if (!mm)
+ return -EPERM;
+
+ if (kthread)
+ use_mm(mm);
+ else if (current->mm != mm)
+ goto out;
+
+ offset = user_iova - dma->iova;
+
+ if (count > dma->size - offset)
+ count = dma->size - offset;
+
+ vaddr = dma->vaddr + offset;
+
+ if (write)
+ *copied = __copy_to_user((void __user *)vaddr, data,
+ count) ? 0 : count;
+ else
+ *copied = __copy_from_user(data, (void __user *)vaddr,
+ count) ? 0 : count;
+ if (kthread)
+ unuse_mm(mm);
+out:
+ mmput(mm);
+ return *copied ? 0 : -EFAULT;
+}
+
+static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
+ void *data, size_t count, bool write)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ int ret = 0;
+ size_t done;
+
+ mutex_lock(&iommu->lock);
+ while (count > 0) {
+ ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data,
+ count, write, &done);
+ if (ret)
+ break;
+
+ count -= done;
+ data += done;
+ user_iova += done;
+ }
+
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.name = "vfio-iommu-type1",
.owner = THIS_MODULE,
@@ -2317,6 +2392,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.unpin_pages = vfio_iommu_type1_unpin_pages,
.register_notifier = vfio_iommu_type1_register_notifier,
.unregister_notifier = vfio_iommu_type1_unregister_notifier,
+ .dma_rw = vfio_iommu_type1_dma_rw,
};
static int __init vfio_iommu_type1_init(void)
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0093bbd0d326..7d22d7377606 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -9,7 +9,7 @@ menu "Backlight & LCD device support"
# LCD
#
config LCD_CLASS_DEVICE
- tristate "Lowlevel LCD controls"
+ tristate "Lowlevel LCD controls"
help
This framework adds support for low-level control of LCD.
Some framebuffer devices connect to platform-specific LCD modules
@@ -141,10 +141,10 @@ endif # LCD_CLASS_DEVICE
# Backlight
#
config BACKLIGHT_CLASS_DEVICE
- tristate "Lowlevel Backlight controls"
+ tristate "Lowlevel Backlight controls"
help
This framework adds support for low-level control of the LCD
- backlight. This includes support for brightness and power.
+ backlight. This includes support for brightness and power.
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
@@ -272,7 +272,7 @@ config BACKLIGHT_APPLE
tristate "Apple Backlight Driver"
depends on X86 && ACPI
help
- If you have an Intel-based Apple say Y to enable a driver for its
+ If you have an Intel-based Apple say Y to enable a driver for its
backlight.
config BACKLIGHT_TOSA
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 70c10ea1c38b..3c01b0d2414f 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -27,7 +27,7 @@ config VGACON_SOFT_SCROLLBACK
depends on VGA_CONSOLE
default n
help
- The scrollback buffer of the standard VGA console is located in
+ The scrollback buffer of the standard VGA console is located in
the VGA RAM. The size of this RAM is fixed and is quite small.
If you require a larger scrollback buffer, this can be placed in
System RAM which is dynamically allocated during initialization.
@@ -84,36 +84,36 @@ config MDA_CONSOLE
If unsure, say N.
config SGI_NEWPORT_CONSOLE
- tristate "SGI Newport Console support"
+ tristate "SGI Newport Console support"
depends on SGI_IP22 && HAS_IOMEM
- select FONT_SUPPORT
- help
- Say Y here if you want the console on the Newport aka XL graphics
- card of your Indy. Most people say Y here.
+ select FONT_SUPPORT
+ help
+ Say Y here if you want the console on the Newport aka XL graphics
+ card of your Indy. Most people say Y here.
config DUMMY_CONSOLE
bool
default y
config DUMMY_CONSOLE_COLUMNS
- int "Initial number of console screen columns"
- depends on DUMMY_CONSOLE && !ARM
- default 160 if PARISC
- default 80
- help
- On PA-RISC, the default value is 160, which should fit a 1280x1024
- monitor.
- Select 80 if you use a 640x480 resolution by default.
+ int "Initial number of console screen columns"
+ depends on DUMMY_CONSOLE && !ARM
+ default 160 if PARISC
+ default 80
+ help
+ On PA-RISC, the default value is 160, which should fit a 1280x1024
+ monitor.
+ Select 80 if you use a 640x480 resolution by default.
config DUMMY_CONSOLE_ROWS
- int "Initial number of console screen rows"
- depends on DUMMY_CONSOLE && !ARM
- default 64 if PARISC
- default 25
- help
- On PA-RISC, the default value is 64, which should fit a 1280x1024
- monitor.
- Select 25 if you use a 640x480 resolution by default.
+ int "Initial number of console screen rows"
+ depends on DUMMY_CONSOLE && !ARM
+ default 64 if PARISC
+ default 25
+ help
+ On PA-RISC, the default value is 64, which should fit a 1280x1024
+ monitor.
+ Select 25 if you use a 640x480 resolution by default.
config FRAMEBUFFER_CONSOLE
bool "Framebuffer Console support"
@@ -129,11 +129,11 @@ config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
depends on FRAMEBUFFER_CONSOLE
default n
---help---
- If this option is selected, the framebuffer console will
- automatically select the primary display device (if the architecture
+ If this option is selected, the framebuffer console will
+ automatically select the primary display device (if the architecture
supports this feature). Otherwise, the framebuffer console will
- always select the first framebuffer driver that is loaded. The latter
- is the default behavior.
+ always select the first framebuffer driver that is loaded. The latter
+ is the default behavior.
You can always override the automatic selection of the primary device
by using the fbcon=map: boot option.
@@ -144,11 +144,11 @@ config FRAMEBUFFER_CONSOLE_ROTATION
bool "Framebuffer Console Rotation"
depends on FRAMEBUFFER_CONSOLE
help
- Enable display rotation for the framebuffer console. This is done
- in software and may be significantly slower than a normally oriented
- display. Note that the rotation is done at the console level only
- such that other users of the framebuffer will remain normally
- oriented.
+ Enable display rotation for the framebuffer console. This is done
+ in software and may be significantly slower than a normally oriented
+ display. Note that the rotation is done at the console level only
+ such that other users of the framebuffer will remain normally
+ oriented.
config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
bool "Framebuffer Console Deferred Takeover"
@@ -162,14 +162,14 @@ config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
black screen as soon as fbcon loads.
config STI_CONSOLE
- bool "STI text console"
+ bool "STI text console"
depends on PARISC && HAS_IOMEM
- select FONT_SUPPORT
- default y
- help
- The STI console is the builtin display/keyboard on HP-PARISC
- machines. Say Y here to build support for it into your kernel.
- The alternative is to use your primary serial port as a console.
+ select FONT_SUPPORT
+ default y
+ help
+ The STI console is the builtin display/keyboard on HP-PARISC
+ machines. Say Y here to build support for it into your kernel.
+ The alternative is to use your primary serial port as a console.
endmenu
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index f65991a67af2..91b0a719d221 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -435,7 +435,7 @@ config FB_FM2
config FB_ARC
tristate "Arc Monochrome LCD board support"
- depends on FB && X86
+ depends on FB && (X86 || COMPILE_TEST)
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -1639,7 +1639,7 @@ config FB_VT8500
config FB_WM8505
bool "Wondermedia WM8xxx-series frame buffer support"
- depends on (FB = y) && ARM && ARCH_VT8500
+ depends on (FB = y) && HAS_IOMEM && (ARCH_VT8500 || COMPILE_TEST)
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
@@ -1827,7 +1827,7 @@ config FB_FSL_DIU
config FB_W100
tristate "W100 frame buffer support"
- depends on FB && ARCH_PXA
+ depends on FB && HAS_IOMEM && (ARCH_PXA || COMPILE_TEST)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1844,7 +1844,8 @@ config FB_W100
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
- depends on FB && (SUPERH || ARCH_RENESAS) && HAVE_CLK
+ depends on FB && HAVE_CLK && HAS_IOMEM
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
index 27cb65fa2ba2..9c37e28fb78b 100644
--- a/drivers/video/fbdev/aty/mach64_gx.c
+++ b/drivers/video/fbdev/aty/mach64_gx.c
@@ -618,14 +618,13 @@ static int aty_var_to_pll_8398(const struct fb_info *info, u32 vclk_per,
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
- u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
+ u32 mach64MinFreq, mach64MaxFreq;
u16 m, n, k = 0, save_m, save_n, twoToKth;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
- mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
save_m = 0;
save_n = 0;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 3af00e3b965e..e116a3f9ad56 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -849,12 +849,6 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
case 9 ... 16:
v.bits_per_pixel = 16;
break;
- case 17 ... 24:
-#if 0 /* Doesn't seem to work */
- v.bits_per_pixel = 24;
- break;
-#endif
- return -EINVAL;
case 25 ... 32:
v.bits_per_pixel = 32;
break;
@@ -1650,14 +1644,14 @@ static int radeonfb_set_par(struct fb_info *info)
struct fb_var_screeninfo *mode = &info->var;
struct radeon_regs *newmode;
int hTotal, vTotal, hSyncStart, hSyncEnd,
- hSyncPol, vSyncStart, vSyncEnd, vSyncPol, cSync;
+ vSyncStart, vSyncEnd;
u8 hsync_adj_tab[] = {0, 0x12, 9, 9, 6, 5};
u8 hsync_fudge_fp[] = {2, 2, 0, 0, 5, 5};
u32 sync, h_sync_pol, v_sync_pol, dotClock, pixClock;
int i, freq;
int format = 0;
int nopllcalc = 0;
- int hsync_start, hsync_fudge, bytpp, hsync_wid, vsync_wid;
+ int hsync_start, hsync_fudge, hsync_wid, vsync_wid;
int primary_mon = PRIMARY_MONITOR(rinfo);
int depth = var_to_depth(mode);
int use_rmx = 0;
@@ -1730,13 +1724,7 @@ static int radeonfb_set_par(struct fb_info *info)
else if (vsync_wid > 0x1f) /* max */
vsync_wid = 0x1f;
- hSyncPol = mode->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
- vSyncPol = mode->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
-
- cSync = mode->sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0;
-
format = radeon_get_dstbpp(depth);
- bytpp = mode->bits_per_pixel >> 3;
if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD))
hsync_fudge = hsync_fudge_fp[format-1];
@@ -2548,16 +2536,6 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
if (rinfo->mon2_EDID)
sysfs_remove_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr);
-#if 0
- /* restore original state
- *
- * Doesn't quite work yet, I suspect if we come from a legacy
- * VGA mode (or worse, text mode), we need to do some VGA black
- * magic here that I know nothing about. --BenH
- */
- radeon_write_mode (rinfo, &rinfo->init_state, 1);
- #endif
-
del_timer_sync(&rinfo->lvds_timer);
arch_phys_wc_del(rinfo->wc_cookie);
unregister_framebuffer(info);
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index a620b51cf7d0..6a745eb46ca1 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -509,8 +509,7 @@ static int cg14_probe(struct platform_device *op)
if (!par->regs || !par->clut || !par->cursor || !info->screen_base)
goto out_unmap_regs;
- is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) ==
- (8 * 1024 * 1024));
+ is_8mb = (resource_size(&op->resource[1]) == (8 * 1024 * 1024));
BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map));
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index 37710316a680..26cbc965497c 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -16,7 +16,6 @@ fb-y += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
fbcon_ccw.o
endif
endif
-fb-objs := $(fb-y)
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bb6ae995c2e5..28335788e76e 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -873,7 +873,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
int oldidx = con2fb_map[unit];
struct fb_info *info = registered_fb[newidx];
struct fb_info *oldinfo = NULL;
- int found, err = 0;
+ int found, err = 0;
WARN_CONSOLE_UNLOCKED();
@@ -895,31 +895,30 @@ static int set_con2fb_map(int unit, int newidx, int user)
con2fb_map[unit] = newidx;
if (!err && !found)
- err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
-
+ err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
/*
* If old fb is not mapped to any of the consoles,
* fbcon should release it.
*/
- if (!err && oldinfo && !search_fb_in_map(oldidx))
- err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
- found);
+ if (!err && oldinfo && !search_fb_in_map(oldidx))
+ err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
+ found);
- if (!err) {
- int show_logo = (fg_console == 0 && !user &&
- logo_shown != FBCON_LOGO_DONTSHOW);
+ if (!err) {
+ int show_logo = (fg_console == 0 && !user &&
+ logo_shown != FBCON_LOGO_DONTSHOW);
- if (!found)
- fbcon_add_cursor_timer(info);
- con2fb_map_boot[unit] = newidx;
- con2fb_init_display(vc, info, unit, show_logo);
+ if (!found)
+ fbcon_add_cursor_timer(info);
+ con2fb_map_boot[unit] = newidx;
+ con2fb_init_display(vc, info, unit, show_logo);
}
if (!search_fb_in_map(info_idx))
info_idx = newidx;
- return err;
+ return err;
}
/*
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index d04554959ea7..30e73ec4ad5c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -663,20 +663,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
fb_logo.depth = 1;
- if (fb_logo.depth > 4 && depth > 4) {
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- fb_logo.needs_truepalette = 1;
- break;
- case FB_VISUAL_DIRECTCOLOR:
- fb_logo.needs_directpalette = 1;
- fb_logo.needs_cmapreset = 1;
- break;
- case FB_VISUAL_PSEUDOCOLOR:
- fb_logo.needs_cmapreset = 1;
- break;
- }
- }
+ if (fb_logo.depth > 4 && depth > 4) {
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ fb_logo.needs_truepalette = 1;
+ break;
+ case FB_VISUAL_DIRECTCOLOR:
+ fb_logo.needs_directpalette = 1;
+ fb_logo.needs_cmapreset = 1;
+ break;
+ case FB_VISUAL_PSEUDOCOLOR:
+ fb_logo.needs_cmapreset = 1;
+ break;
+ }
+ }
height = fb_logo.logo->height;
if (fb_center_logo)
@@ -1065,19 +1065,19 @@ fb_blank(struct fb_info *info, int blank)
struct fb_event event;
int ret = -EINVAL;
- if (blank > FB_BLANK_POWERDOWN)
- blank = FB_BLANK_POWERDOWN;
+ if (blank > FB_BLANK_POWERDOWN)
+ blank = FB_BLANK_POWERDOWN;
event.info = info;
event.data = &blank;
if (info->fbops->fb_blank)
- ret = info->fbops->fb_blank(blank, info);
+ ret = info->fbops->fb_blank(blank, info);
if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
- return ret;
+ return ret;
}
EXPORT_SYMBOL(fb_blank);
@@ -1115,7 +1115,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
break;
case FBIOGET_FSCREENINFO:
lock_fb_info(info);
- fix = info->fix;
+ memcpy(&fix, &info->fix, sizeof(fix));
if (info->flags & FBINFO_HIDE_SMEM_START)
fix.smem_start = 0;
unlock_fb_info(info);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index f47d50e560c0..e4c3c8b65da4 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -594,8 +594,8 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
if (!t) {
pr_err("Time out on waiting resolution response\n");
- ret = -ETIMEDOUT;
- goto out;
+ ret = -ETIMEDOUT;
+ goto out;
}
if (msg->resolution_resp.resolution_count == 0) {
diff --git a/drivers/video/fbdev/kyro/STG4000OverlayDevice.c b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
index 0aeeaa10708b..9fde0e3b69ec 100644
--- a/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
@@ -331,7 +331,7 @@ int SetOverlayViewPort(volatile STG4000REG __iomem *pSTGReg,
u32 ulScale;
u32 ulLeft, ulRight;
u32 ulSrcLeft, ulSrcRight;
- u32 ulScaleLeft, ulScaleRight;
+ u32 ulScaleLeft;
u32 ulhDecim;
u32 ulsVal;
u32 ulVertDecFactor;
@@ -470,7 +470,6 @@ int SetOverlayViewPort(volatile STG4000REG __iomem *pSTGReg,
* round down the pixel pos to the nearest 8 pixels.
*/
ulScaleLeft = ulSrcLeft;
- ulScaleRight = ulSrcRight;
/* shift fxscale until it is in the range of the scaler */
ulhDecim = 0;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 36cc718b96ae..570439b32655 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1376,6 +1376,12 @@ static struct video_board vbG200 = {
.accelID = FB_ACCEL_MATROX_MGAG200,
.lowlevel = &matrox_G100
};
+static struct video_board vbG200eW = {
+ .maxvram = 0x800000,
+ .maxdisplayable = 0x800000,
+ .accelID = FB_ACCEL_MATROX_MGAG200,
+ .lowlevel = &matrox_G100
+};
/* from doc it looks like that accelerator can draw only to low 16MB :-( Direct accesses & displaying are OK for
whole 32MB */
static struct video_board vbG400 = {
@@ -1494,6 +1500,13 @@ static struct board {
MGA_G200,
&vbG200,
"MGA-G200 (PCI)"},
+ {PCI_VENDOR_ID_MATROX, 0x0532, 0xFF,
+ 0, 0,
+ DEVF_G200,
+ 250000,
+ MGA_G200,
+ &vbG200eW,
+ "MGA-G200eW (PCI)"},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_AGP, 0xFF,
PCI_SS_VENDOR_ID_MATROX, PCI_SS_ID_MATROX_GENERIC,
DEVF_G200,
@@ -2136,6 +2149,8 @@ static const struct pci_device_id matroxfb_devices[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_PCI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_MATROX, 0x0532,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_AGP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400,
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index 335d4983dc52..167585a889d3 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
@@ -1406,7 +1406,7 @@ struct mmphw_ctrl {
/*pathes*/
int path_num;
- struct mmphw_path_plat path_plats[0];
+ struct mmphw_path_plat path_plats[];
};
static inline int overlay_is_vid(struct mmp_overlay *overlay)
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index c583c018304d..c24de9107958 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -168,27 +168,26 @@ static int nvidia_panel_tweak(struct nvidia_par *par,
{
int tweak = 0;
- if (par->paneltweak) {
- tweak = par->paneltweak;
- } else {
- /* begin flat panel hacks */
- /* This is unfortunate, but some chips need this register
- tweaked or else you get artifacts where adjacent pixels are
- swapped. There are no hard rules for what to set here so all
- we can do is experiment and apply hacks. */
-
- if(((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
- /* At least one NV34 laptop needs this workaround. */
- tweak = -1;
- }
-
- if((par->Chipset & 0xfff0) == 0x0310) {
- tweak = 1;
- }
- /* end flat panel hacks */
- }
-
- return tweak;
+ if (par->paneltweak) {
+ tweak = par->paneltweak;
+ } else {
+ /* Begin flat panel hacks.
+ * This is unfortunate, but some chips need this register
+ * tweaked or else you get artifacts where adjacent pixels are
+ * swapped. There are no hard rules for what to set here so all
+ * we can do is experiment and apply hacks.
+ */
+ if (((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
+ /* At least one NV34 laptop needs this workaround. */
+ tweak = -1;
+ }
+
+ if ((par->Chipset & 0xfff0) == 0x0310)
+ tweak = 1;
+ /* end flat panel hacks */
+ }
+
+ return tweak;
}
static void nvidia_screen_off(struct nvidia_par *par, int on)
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 8dfa9158ba78..836e7b1639ce 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1154,16 +1154,12 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
r = fbdev->ctrl->setcolreg(regno, red, green, blue,
transp, update_hw_pal);
*/
- /* Fallthrough */
r = -EINVAL;
break;
case OMAPFB_COLOR_RGB565:
case OMAPFB_COLOR_RGB444:
case OMAPFB_COLOR_RGB24P:
case OMAPFB_COLOR_RGB24U:
- if (r != 0)
- break;
-
if (regno < 16) {
u32 pal;
pal = ((red >> (16 - var->red.length)) <<
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 9b9ec1468347..aef8a3042590 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -769,7 +769,7 @@ failed_free_fbmem:
dma_free_wc(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma);
failed_free_info:
- kfree(info);
+ framebuffer_release(info);
dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
return ret;
@@ -779,7 +779,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
{
struct pxa168fb_info *fbi = platform_get_drvdata(pdev);
struct fb_info *info;
- int irq;
unsigned int data;
if (!fbi)
@@ -799,8 +798,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
- irq = platform_get_irq(pdev, 0);
-
dma_free_wc(fbi->dev, info->fix.smem_len,
info->screen_base, info->fix.smem_start);
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 74ffb446e00c..4279e13a3b58 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -36,7 +36,6 @@
#include "pxa3xx-gcu.h"
#define DRV_NAME "pxa3xx-gcu"
-#define MISCDEV_MINOR 197
#define REG_GCCR 0x00
#define GCCR_SYNC_CLR (1 << 9)
@@ -595,7 +594,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
* container_of(). This isn't really necessary as we have a fixed minor
* number anyway, but this is to avoid statics. */
- priv->misc_dev.minor = MISCDEV_MINOR,
+ priv->misc_dev.minor = PXA3XX_GCU_MINOR,
priv->misc_dev.name = DRV_NAME,
priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops;
@@ -638,7 +637,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
ret = misc_register(&priv->misc_dev);
if (ret < 0) {
dev_err(dev, "misc_register() for minor %d failed\n",
- MISCDEV_MINOR);
+ PXA3XX_GCU_MINOR);
goto err_free_dma;
}
@@ -714,7 +713,7 @@ module_platform_driver(pxa3xx_gcu_driver);
MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(MISCDEV_MINOR);
+MODULE_ALIAS_MISCDEV(PXA3XX_GCU_MINOR);
MODULE_AUTHOR("Janine Kropp <nin@directfb.org>, "
"Denis Oliver Kropp <dok@directfb.org>, "
"Daniel Mack <daniel@caiaq.de>");
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 8048499e398d..eaea8c373753 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -746,9 +746,9 @@ s1d13xxxfb_remove(struct platform_device *pdev)
}
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1);
+ resource_size(&pdev->resource[0]));
release_mem_region(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1);
+ resource_size(&pdev->resource[1]));
return 0;
}
@@ -788,14 +788,14 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
}
if (!request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1, "s1d13xxxfb mem")) {
+ resource_size(&pdev->resource[0]), "s1d13xxxfb mem")) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto bail;
}
if (!request_mem_region(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1, "s1d13xxxfb regs")) {
+ resource_size(&pdev->resource[1]), "s1d13xxxfb regs")) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto bail;
@@ -810,7 +810,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
default_par = info->par;
default_par->regs = ioremap(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1);
+ resource_size(&pdev->resource[1]));
if (!default_par->regs) {
printk(KERN_ERR PFX "unable to map registers\n");
ret = -ENOMEM;
@@ -819,7 +819,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
info->pseudo_palette = default_par->pseudo_palette;
info->screen_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1);
+ resource_size(&pdev->resource[0]));
if (!info->screen_base) {
printk(KERN_ERR PFX "unable to map framebuffer\n");
@@ -857,9 +857,9 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
info->fix = s1d13xxxfb_fix;
info->fix.mmio_start = pdev->resource[1].start;
- info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1;
+ info->fix.mmio_len = resource_size(&pdev->resource[1]);
info->fix.smem_start = pdev->resource[0].start;
- info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ info->fix.smem_len = resource_size(&pdev->resource[0]);
printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n",
default_par->regs, info->fix.smem_len / 1024, info->screen_base);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 5bb653db0cec..2d285cc384cf 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1053,7 +1053,7 @@ static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
}
/* Fake monspecs to fill in fbinfo structure */
-static struct fb_monspecs monspecs = {
+static const struct fb_monspecs monspecs = {
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 50,
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 4ea6f932b334..8a27d12e6ea8 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1572,7 +1572,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &sh_mobile_lcdc_overlay_ops;
info->device = priv->dev;
- info->screen_base = ovl->fb_mem;
+ info->screen_buffer = ovl->fb_mem;
info->par = ovl;
/* Initialize fixed screen information. Restrict pan to 2 lines steps
@@ -2056,7 +2056,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &sh_mobile_lcdc_ops;
info->device = priv->dev;
- info->screen_base = ch->fb_mem;
+ info->screen_buffer = ch->fb_mem;
info->pseudo_palette = &ch->pseudo_palette;
info->par = ch;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 142535267fec..12fa1050f3eb 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -89,7 +89,7 @@ struct ssd1307fb_par {
struct ssd1307fb_array {
u8 type;
- u8 data[0];
+ u8 data[];
};
static const struct fb_fix_screeninfo ssd1307fb_fix = {
@@ -791,6 +791,8 @@ static int ssd1307fb_remove(struct i2c_client *client)
pwm_disable(par->pwm);
pwm_put(par->pwm);
}
+ if (par->vbat_reg)
+ regulator_disable(par->vbat_reg);
fb_deferred_io_cleanup(info);
__free_pages(__va(info->fix.smem_start), get_order(info->fix.smem_len));
framebuffer_release(info);
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index ad26cbffbc6f..2d6e2738b792 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -61,9 +61,9 @@ struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
#define BITS_PER_PIXEL 16
/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */
-static void *remapped_base;
-static void *remapped_regs;
-static void *remapped_fbuf;
+static void __iomem *remapped_base;
+static void __iomem *remapped_regs;
+static void __iomem *remapped_fbuf;
#define REMAPPED_FB_LEN 0x15ffff
@@ -635,7 +635,7 @@ static int w100fb_resume(struct platform_device *dev)
#endif
-int w100fb_probe(struct platform_device *pdev)
+static int w100fb_probe(struct platform_device *pdev)
{
int err = -EIO;
struct w100fb_mach_info *inf;
@@ -807,10 +807,11 @@ static int w100fb_remove(struct platform_device *pdev)
static void w100_soft_reset(void)
{
- u16 val = readw((u16 *) remapped_base + cfgSTATUS);
- writew(val | 0x08, (u16 *) remapped_base + cfgSTATUS);
+ u16 val = readw((u16 __iomem *)remapped_base + cfgSTATUS);
+
+ writew(val | 0x08, (u16 __iomem *)remapped_base + cfgSTATUS);
udelay(100);
- writew(0x00, (u16 *) remapped_base + cfgSTATUS);
+ writew(0x00, (u16 __iomem *)remapped_base + cfgSTATUS);
udelay(100);
}
@@ -1022,7 +1023,8 @@ struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
return pll_entry->pll_table;
pll_entry++;
} while (pll_entry->xtal_freq);
- return 0;
+
+ return NULL;
}
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index b656eff58c23..8f4d674fa0d0 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -339,7 +339,7 @@ static int wm8505fb_probe(struct platform_device *pdev)
fbi->fb.fix.smem_start = fb_mem_phys;
fbi->fb.fix.smem_len = fb_mem_len;
- fbi->fb.screen_base = fb_mem_virt;
+ fbi->fb.screen_buffer = fb_mem_virt;
fbi->fb.screen_size = fb_mem_len;
fbi->contrast = 0x10;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 9c82e2a0a411..856a8c4e84a2 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -53,18 +53,14 @@ static void hdmi_infoframe_set_checksum(void *buffer, size_t size)
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
- *
- * Returns 0 on success or a negative error code on failure.
*/
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = HDMI_AVI_INFOFRAME_SIZE;
-
- return 0;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_init);
@@ -1553,7 +1549,6 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
const void *buffer, size_t size)
{
const u8 *ptr = buffer;
- int ret;
if (size < HDMI_INFOFRAME_SIZE(AVI))
return -EINVAL;
@@ -1566,9 +1561,7 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AVI)) != 0)
return -EINVAL;
- ret = hdmi_avi_infoframe_init(frame);
- if (ret)
- return ret;
+ hdmi_avi_infoframe_init(frame);
ptr += HDMI_INFOFRAME_HEADER_SIZE;
diff --git a/drivers/video/logo/.gitignore b/drivers/video/logo/.gitignore
index 9dda1b26b2e4..5311d207c0b9 100644
--- a/drivers/video/logo/.gitignore
+++ b/drivers/video/logo/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
*_mono.c
*_vga16.c
*_clut224.c
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index d823d558c0c4..b690a8a4bf9e 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1553,8 +1553,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
#ifdef CONFIG_COMPAT
case VBG_IOCTL_HGCM_CALL_32(0):
f32bit = true;
+ fallthrough;
#endif
- /* Fall through */
case VBG_IOCTL_HGCM_CALL(0):
return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
case VBG_IOCTL_LOG(0):
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 50920b6fc319..7396187ee32a 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -311,7 +311,7 @@ static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
switch (type) {
default:
WARN_ON(1);
- /* Fall through */
+ fallthrough;
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 078615cf2afc..4b2dd8259ff5 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -58,6 +58,7 @@ config VIRTIO_BALLOON
tristate "Virtio balloon driver"
depends on VIRTIO
select MEMORY_BALLOON
+ select PAGE_REPORTING
---help---
This driver supports increasing and decreasing the amount
of memory within a KVM guest.
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 341458fd95ca..0ef16566c3f3 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -14,11 +14,13 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/balloon_compaction.h>
+#include <linux/oom.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include <linux/pseudo_fs.h>
+#include <linux/page_reporting.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -27,7 +29,9 @@
*/
#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
-#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
+/* Maximum number of (4k) pages to deflate on OOM notifications. */
+#define VIRTIO_BALLOON_OOM_NR_PAGES 256
+#define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
__GFP_NOMEMALLOC)
@@ -47,6 +51,7 @@ enum virtio_balloon_vq {
VIRTIO_BALLOON_VQ_DEFLATE,
VIRTIO_BALLOON_VQ_STATS,
VIRTIO_BALLOON_VQ_FREE_PAGE,
+ VIRTIO_BALLOON_VQ_REPORTING,
VIRTIO_BALLOON_VQ_MAX
};
@@ -112,8 +117,15 @@ struct virtio_balloon {
/* Memory statistics */
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
- /* To register a shrinker to shrink memory upon memory pressure */
+ /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
struct shrinker shrinker;
+
+ /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
+ struct notifier_block oom_nb;
+
+ /* Free page reporting device */
+ struct virtqueue *reporting_vq;
+ struct page_reporting_dev_info pr_dev_info;
};
static struct virtio_device_id id_table[] = {
@@ -153,6 +165,33 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
}
+int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info,
+ struct scatterlist *sg, unsigned int nents)
+{
+ struct virtio_balloon *vb =
+ container_of(pr_dev_info, struct virtio_balloon, pr_dev_info);
+ struct virtqueue *vq = vb->reporting_vq;
+ unsigned int unused, err;
+
+ /* We should always be able to add these buffers to an empty queue. */
+ err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN);
+
+ /*
+ * In the extremely unlikely case that something has occurred and we
+ * are able to trigger an error we will simply display a warning
+ * and exit without actually processing the pages.
+ */
+ if (WARN_ON_ONCE(err))
+ return err;
+
+ virtqueue_kick(vq);
+
+ /* When host has read buffer, this completes via balloon_ack */
+ wait_event(vb->acked, virtqueue_get_buf(vq, &unused));
+
+ return 0;
+}
+
static void set_page_pfns(struct virtio_balloon *vb,
__virtio32 pfns[], struct page *page)
{
@@ -481,6 +520,7 @@ static int init_vqs(struct virtio_balloon *vb)
names[VIRTIO_BALLOON_VQ_STATS] = NULL;
callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+ names[VIRTIO_BALLOON_VQ_REPORTING] = NULL;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
names[VIRTIO_BALLOON_VQ_STATS] = "stats";
@@ -492,6 +532,11 @@ static int init_vqs(struct virtio_balloon *vb)
callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
}
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
+ names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq";
+ callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
+ }
+
err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX,
vqs, callbacks, names, NULL, NULL);
if (err)
@@ -524,6 +569,9 @@ static int init_vqs(struct virtio_balloon *vb)
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
+ vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING];
+
return 0;
}
@@ -788,50 +836,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
}
-static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
- unsigned long pages_to_free)
-{
- return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
- VIRTIO_BALLOON_PAGES_PER_PAGE;
-}
-
-static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
- unsigned long pages_to_free)
-{
- unsigned long pages_freed = 0;
-
- /*
- * One invocation of leak_balloon can deflate at most
- * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
- * multiple times to deflate pages till reaching pages_to_free.
- */
- while (vb->num_pages && pages_freed < pages_to_free)
- pages_freed += leak_balloon_pages(vb,
- pages_to_free - pages_freed);
-
- update_balloon_size(vb);
-
- return pages_freed;
-}
-
static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
- unsigned long pages_to_free, pages_freed = 0;
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- pages_to_free = sc->nr_to_scan;
-
- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
- pages_freed = shrink_free_pages(vb, pages_to_free);
-
- if (pages_freed >= pages_to_free)
- return pages_freed;
-
- pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed);
-
- return pages_freed;
+ return shrink_free_pages(vb, sc->nr_to_scan);
}
static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
@@ -839,12 +850,22 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
{
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- unsigned long count;
- count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
- count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
+ return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
+}
+
+static int virtio_balloon_oom_notify(struct notifier_block *nb,
+ unsigned long dummy, void *parm)
+{
+ struct virtio_balloon *vb = container_of(nb,
+ struct virtio_balloon, oom_nb);
+ unsigned long *freed = parm;
+
+ *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) /
+ VIRTIO_BALLOON_PAGES_PER_PAGE;
+ update_balloon_size(vb);
- return count;
+ return NOTIFY_OK;
}
static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
@@ -864,7 +885,6 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
- __u32 poison_val;
int err;
if (!vdev->config->get) {
@@ -930,27 +950,65 @@ static int virtballoon_probe(struct virtio_device *vdev)
VIRTIO_BALLOON_CMD_ID_STOP);
spin_lock_init(&vb->free_page_list_lock);
INIT_LIST_HEAD(&vb->free_page_list);
- if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
- memset(&poison_val, PAGE_POISON, sizeof(poison_val));
- virtio_cwrite(vb->vdev, struct virtio_balloon_config,
- poison_val, &poison_val);
- }
- }
- /*
- * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a
- * shrinker needs to be registered to relieve memory pressure.
- */
- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
+ /*
+ * We're allowed to reuse any free pages, even if they are
+ * still to be processed by the host.
+ */
err = virtio_balloon_register_shrinker(vb);
if (err)
goto out_del_balloon_wq;
}
+
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
+ vb->oom_nb.notifier_call = virtio_balloon_oom_notify;
+ vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY;
+ err = register_oom_notifier(&vb->oom_nb);
+ if (err < 0)
+ goto out_unregister_shrinker;
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
+ /* Start with poison val of 0 representing general init */
+ __u32 poison_val = 0;
+
+ /*
+ * Let the hypervisor know that we are expecting a
+ * specific value to be written back in balloon pages.
+ */
+ if (!want_init_on_free())
+ memset(&poison_val, PAGE_POISON, sizeof(poison_val));
+
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config,
+ poison_val, &poison_val);
+ }
+
+ vb->pr_dev_info.report = virtballoon_free_page_report;
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
+ unsigned int capacity;
+
+ capacity = virtqueue_get_vring_size(vb->reporting_vq);
+ if (capacity < PAGE_REPORTING_CAPACITY) {
+ err = -ENOSPC;
+ goto out_unregister_oom;
+ }
+
+ err = page_reporting_register(&vb->pr_dev_info);
+ if (err)
+ goto out_unregister_oom;
+ }
+
virtio_device_ready(vdev);
if (towards_target(vb))
virtballoon_changed(vdev);
return 0;
+out_unregister_oom:
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ unregister_oom_notifier(&vb->oom_nb);
+out_unregister_shrinker:
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ virtio_balloon_unregister_shrinker(vb);
out_del_balloon_wq:
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
destroy_workqueue(vb->balloon_wq);
@@ -989,7 +1047,11 @@ static void virtballoon_remove(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
+ page_reporting_unregister(&vb->pr_dev_info);
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ unregister_oom_notifier(&vb->oom_nb);
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
virtio_balloon_unregister_shrinker(vb);
spin_lock_irq(&vb->stop_update_lock);
vb->stop_update = true;
@@ -1045,7 +1107,10 @@ static int virtballoon_restore(struct virtio_device *vdev)
static int virtballoon_validate(struct virtio_device *vdev)
{
- if (!page_poisoning_enabled())
+ /* Tell the host whether we care about poisoned pages. */
+ if (!want_init_on_free() &&
+ (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) ||
+ !page_poisoning_enabled()))
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
__virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
@@ -1058,6 +1123,7 @@ static unsigned int features[] = {
VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
VIRTIO_BALLOON_F_FREE_PAGE_HINT,
VIRTIO_BALLOON_F_PAGE_POISON,
+ VIRTIO_BALLOON_F_REPORTING,
};
static struct virtio_driver virtio_balloon_driver = {
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index b20e43e148ce..da51a5d34e6e 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -320,7 +320,7 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
if (val & PCI_MSIX_FLAGS_ENABLE)
ret |= INTERRUPT_TYPE_MSIX;
}
- return ret;
+ return ret ?: INTERRUPT_TYPE_NONE;
}
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index 28c45180a12e..5fe431c79f25 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -65,10 +65,10 @@ struct config_field_entry {
void *data;
};
-#define INTERRUPT_TYPE_NONE (1<<0)
-#define INTERRUPT_TYPE_INTX (1<<1)
-#define INTERRUPT_TYPE_MSI (1<<2)
-#define INTERRUPT_TYPE_MSIX (1<<3)
+#define INTERRUPT_TYPE_NONE (0)
+#define INTERRUPT_TYPE_INTX (1<<0)
+#define INTERRUPT_TYPE_MSI (1<<1)
+#define INTERRUPT_TYPE_MSIX (1<<2)
extern bool xen_pcibk_permissive;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index e17ca8156171..31eb822ac313 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -517,6 +517,48 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
return err;
}
+/**
+ * xenbus_unmap_ring
+ * @dev: xenbus device
+ * @handles: grant handle array
+ * @nr_handles: number of handles in the array
+ * @vaddrs: addresses to unmap
+ *
+ * Unmap memory in this domain that was imported from another domain.
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
+ unsigned int nr_handles, unsigned long *vaddrs)
+{
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+ int i;
+ int err;
+
+ if (nr_handles > XENBUS_MAX_RING_GRANTS)
+ return -EINVAL;
+
+ for (i = 0; i < nr_handles; i++)
+ gnttab_set_unmap_op(&unmap[i], vaddrs[i],
+ GNTMAP_host_map, handles[i]);
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
+ BUG();
+
+ err = GNTST_okay;
+ for (i = 0; i < nr_handles; i++) {
+ if (unmap[i].status != GNTST_okay) {
+ xenbus_dev_error(dev, unmap[i].status,
+ "unmapping page at handle %d error %d",
+ handles[i], unmap[i].status);
+ err = unmap[i].status;
+ break;
+ }
+ }
+
+ return err;
+}
+
struct map_ring_valloc_hvm
{
unsigned int idx;
@@ -608,45 +650,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
return err;
}
-
-/**
- * xenbus_map_ring
- * @dev: xenbus device
- * @gnt_refs: grant reference array
- * @nr_grefs: number of grant reference
- * @handles: pointer to grant handle to be filled
- * @vaddrs: addresses to be mapped to
- * @leaked: fail to clean up a failed map, caller should not free vaddr
- *
- * Map pages of memory into this domain from another domain's grant table.
- * xenbus_map_ring does not allocate the virtual address space (you must do
- * this yourself!). It only maps in the pages to the specified address.
- * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
- * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
- * XenbusStateClosing and the first error message will be saved in XenStore.
- * Further more if we fail to map the ring, caller should check @leaked.
- * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
- * should not free the address space of @vaddr.
- */
-int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
- unsigned int nr_grefs, grant_handle_t *handles,
- unsigned long *vaddrs, bool *leaked)
-{
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
- int i;
-
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- for (i = 0; i < nr_grefs; i++)
- phys_addrs[i] = (unsigned long)vaddrs[i];
-
- return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
- phys_addrs, GNTMAP_host_map, leaked);
-}
-EXPORT_SYMBOL_GPL(xenbus_map_ring);
-
-
/**
* xenbus_unmap_ring_vfree
* @dev: xenbus device
@@ -859,51 +862,6 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
}
/**
- * xenbus_unmap_ring
- * @dev: xenbus device
- * @handles: grant handle array
- * @nr_handles: number of handles in the array
- * @vaddrs: addresses to unmap
- *
- * Unmap memory in this domain that was imported from another domain.
- * Returns 0 on success and returns GNTST_* on error
- * (see xen/include/interface/grant_table.h).
- */
-int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t *handles, unsigned int nr_handles,
- unsigned long *vaddrs)
-{
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
- int i;
- int err;
-
- if (nr_handles > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- for (i = 0; i < nr_handles; i++)
- gnttab_set_unmap_op(&unmap[i], vaddrs[i],
- GNTMAP_host_map, handles[i]);
-
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
- BUG();
-
- err = GNTST_okay;
- for (i = 0; i < nr_handles; i++) {
- if (unmap[i].status != GNTST_okay) {
- xenbus_dev_error(dev, unmap[i].status,
- "unmapping page at handle %d error %d",
- handles[i], unmap[i].status);
- err = unmap[i].status;
- break;
- }
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
-
-
-/**
* xenbus_read_driver_state
* @path: path for driver
*
diff --git a/drivers/zorro/.gitignore b/drivers/zorro/.gitignore
index 34f980bd8ff6..acd6ffb8d77d 100644
--- a/drivers/zorro/.gitignore
+++ b/drivers/zorro/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
devlist.h
gen-devlist
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index ac2ec4543fe1..09fd4a185fd2 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -32,13 +32,13 @@ endif
config 9P_FS_SECURITY
- bool "9P Security Labels"
- depends on 9P_FS
- help
- Security labels support alternative access control models
- implemented by security modules like SELinux. This option
- enables an extended attribute handler for file security
- labels in the 9P filesystem.
-
- If you are not using a security module that requires using
- extended attributes for file security labels, say N.
+ bool "9P Security Labels"
+ depends on 9P_FS
+ help
+ Security labels support alternative access control models
+ implemented by security modules like SELinux. This option
+ enables an extended attribute handler for file security
+ labels in the 9P filesystem.
+
+ If you are not using a security module that requires using
+ extended attributes for file security labels, say N.
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index fe7f0bd2048e..92cd1d80218d 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -388,7 +388,10 @@ v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
iov_iter_count(to), iocb->ki_pos);
- ret = p9_client_read(fid, iocb->ki_pos, to, &err);
+ if (iocb->ki_filp->f_flags & O_NONBLOCK)
+ ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
+ else
+ ret = p9_client_read(fid, iocb->ki_pos, to, &err);
if (!ret)
return err;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index b82423a72f68..c9255d399917 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -143,7 +143,7 @@ static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses,
default:
p9_debug(P9_DEBUG_ERROR, "Unknown special type %c %s\n",
type, stat->extension);
- };
+ }
*rdev = MKDEV(major, minor);
} else
res |= S_IFREG;
diff --git a/fs/Kconfig b/fs/Kconfig
index 708ba336e689..f08fbbfafd9a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -140,9 +140,10 @@ endmenu
endif # BLOCK
if BLOCK
-menu "DOS/FAT/NT Filesystems"
+menu "DOS/FAT/EXFAT/NT Filesystems"
source "fs/fat/Kconfig"
+source "fs/exfat/Kconfig"
source "fs/ntfs/Kconfig"
endmenu
diff --git a/fs/Makefile b/fs/Makefile
index 505e51166973..2ce5112b02c8 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_HUGETLBFS) += hugetlbfs/
obj-$(CONFIG_CODA_FS) += coda/
obj-$(CONFIG_MINIX_FS) += minix/
obj-$(CONFIG_FAT_FS) += fat/
+obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_BFS_FS) += bfs/
obj-$(CONFIG_ISO9660_FS) += isofs/
obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+
diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c
index a3cdb0036c5d..f3a0f412b43b 100644
--- a/fs/autofs/dev-ioctl.c
+++ b/fs/autofs/dev-ioctl.c
@@ -186,7 +186,7 @@ static int find_autofs_mount(const char *pathname,
struct path path;
int err;
- err = kern_path_mountpoint(AT_FDCWD, pathname, &path, 0);
+ err = kern_path(pathname, LOOKUP_MOUNTPOINT, &path);
if (err)
return err;
err = -ENOENT;
@@ -519,8 +519,8 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
if (!fp || param->ioctlfd == -1) {
if (autofs_type_any(type))
- err = kern_path_mountpoint(AT_FDCWD,
- name, &path, LOOKUP_FOLLOW);
+ err = kern_path(name, LOOKUP_FOLLOW | LOOKUP_MOUNTPOINT,
+ &path);
else
err = find_autofs_mount(name, &path,
test_by_type, &type);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f4713ea76e82..13f25e241ac4 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -27,6 +27,7 @@
#include <linux/highuid.h>
#include <linux/compiler.h>
#include <linux/highmem.h>
+#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
@@ -698,19 +699,11 @@ static int load_elf_binary(struct linux_binprm *bprm)
unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
- struct {
- struct elfhdr interp_elf_ex;
- } *loc;
+ struct elfhdr *interp_elf_ex = NULL;
struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
struct mm_struct *mm;
struct pt_regs *regs;
- loc = kmalloc(sizeof(*loc), GFP_KERNEL);
- if (!loc) {
- retval = -ENOMEM;
- goto out_ret;
- }
-
retval = -ENOEXEC;
/* First of all, some simple consistency checks */
if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
@@ -770,9 +763,15 @@ static int load_elf_binary(struct linux_binprm *bprm)
*/
would_dump(bprm, interpreter);
+ interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
+ if (!interp_elf_ex) {
+ retval = -ENOMEM;
+ goto out_free_ph;
+ }
+
/* Get the exec headers */
- retval = elf_read(interpreter, &loc->interp_elf_ex,
- sizeof(loc->interp_elf_ex), 0);
+ retval = elf_read(interpreter, interp_elf_ex,
+ sizeof(*interp_elf_ex), 0);
if (retval < 0)
goto out_free_dentry;
@@ -806,25 +805,25 @@ out_free_interp:
if (interpreter) {
retval = -ELIBBAD;
/* Not an ELF interpreter */
- if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+ if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
goto out_free_dentry;
/* Verify the interpreter has a valid arch */
- if (!elf_check_arch(&loc->interp_elf_ex) ||
- elf_check_fdpic(&loc->interp_elf_ex))
+ if (!elf_check_arch(interp_elf_ex) ||
+ elf_check_fdpic(interp_elf_ex))
goto out_free_dentry;
/* Load the interpreter program headers */
- interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
+ interp_elf_phdata = load_elf_phdrs(interp_elf_ex,
interpreter);
if (!interp_elf_phdata)
goto out_free_dentry;
/* Pass PT_LOPROC..PT_HIPROC headers to arch code */
elf_ppnt = interp_elf_phdata;
- for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
switch (elf_ppnt->p_type) {
case PT_LOPROC ... PT_HIPROC:
- retval = arch_elf_pt_proc(&loc->interp_elf_ex,
+ retval = arch_elf_pt_proc(interp_elf_ex,
elf_ppnt, interpreter,
true, &arch_state);
if (retval)
@@ -839,7 +838,7 @@ out_free_interp:
* the exec syscall.
*/
retval = arch_check_elf(elf_ex,
- !!interpreter, &loc->interp_elf_ex,
+ !!interpreter, interp_elf_ex,
&arch_state);
if (retval)
goto out_free_dentry;
@@ -1055,7 +1054,7 @@ out_free_interp:
}
if (interpreter) {
- elf_entry = load_elf_interp(&loc->interp_elf_ex,
+ elf_entry = load_elf_interp(interp_elf_ex,
interpreter,
load_bias, interp_elf_phdata);
if (!IS_ERR((void *)elf_entry)) {
@@ -1064,7 +1063,7 @@ out_free_interp:
* adjustment
*/
interp_load_addr = elf_entry;
- elf_entry += loc->interp_elf_ex.e_entry;
+ elf_entry += interp_elf_ex->e_entry;
}
if (BAD_ADDR(elf_entry)) {
retval = IS_ERR((void *)elf_entry) ?
@@ -1075,6 +1074,9 @@ out_free_interp:
allow_write_access(interpreter);
fput(interpreter);
+
+ kfree(interp_elf_ex);
+ kfree(interp_elf_phdata);
} else {
elf_entry = e_entry;
if (BAD_ADDR(elf_entry)) {
@@ -1083,7 +1085,6 @@ out_free_interp:
}
}
- kfree(interp_elf_phdata);
kfree(elf_phdata);
set_binfmt(&elf_format);
@@ -1153,12 +1154,11 @@ out_free_interp:
start_thread(regs, elf_entry, bprm->p);
retval = 0;
out:
- kfree(loc);
-out_ret:
return retval;
/* error cleanup */
out_free_dentry:
+ kfree(interp_elf_ex);
kfree(interp_elf_phdata);
allow_write_access(interpreter);
if (interpreter)
@@ -1317,7 +1317,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
}
/* Hugetlb memory check */
- if (vma->vm_flags & VM_HUGETLB) {
+ if (is_vm_hugetlb_page(vma)) {
if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
goto whole;
if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 9501880dff5e..52b6f646cdbd 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -34,6 +34,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/falloc.h>
#include <linux/uaccess.h>
+#include <linux/suspend.h>
#include "internal.h"
struct bdev_inode {
@@ -2013,7 +2014,8 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (bdev_read_only(I_BDEV(bd_inode)))
return -EPERM;
- if (IS_SWAPFILE(bd_inode))
+ /* uswsusp needs write permission to the swap */
+ if (IS_SWAPFILE(bd_inode) && !hibernation_available())
return -ETXTBSY;
if (!iov_iter_count(from))
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index f34757e8f25f..2d357680094c 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/device.h>
+#include <linux/pm_runtime.h>
#include <linux/poll.h>
#include <linux/security.h>
@@ -1070,7 +1071,14 @@ static int debugfs_show_regset32(struct seq_file *s, void *data)
{
struct debugfs_regset32 *regset = s->private;
+ if (regset->dev)
+ pm_runtime_get_sync(regset->dev);
+
debugfs_print_regs32(s, regset->regs, regset->nregs, regset->base, "");
+
+ if (regset->dev)
+ pm_runtime_put(regset->dev);
+
return 0;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index eee3c92a9ebf..8c596641a72b 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -218,13 +218,18 @@ struct eventpoll {
struct file *file;
/* used to optimize loop detection check */
- int visited;
struct list_head visited_list_link;
+ int visited;
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
unsigned int napi_id;
#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /* tracks wakeup nests for lockdep validation */
+ u8 nests;
+#endif
};
/* Wait structure used by the poll hooks */
@@ -545,30 +550,47 @@ out_unlock:
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static DEFINE_PER_CPU(int, wakeup_nest);
-
-static void ep_poll_safewake(wait_queue_head_t *wq)
+static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
{
+ struct eventpoll *ep_src;
unsigned long flags;
- int subclass;
+ u8 nests = 0;
- local_irq_save(flags);
- preempt_disable();
- subclass = __this_cpu_read(wakeup_nest);
- spin_lock_nested(&wq->lock, subclass + 1);
- __this_cpu_inc(wakeup_nest);
- wake_up_locked_poll(wq, POLLIN);
- __this_cpu_dec(wakeup_nest);
- spin_unlock(&wq->lock);
- local_irq_restore(flags);
- preempt_enable();
+ /*
+ * To set the subclass or nesting level for spin_lock_irqsave_nested()
+ * it might be natural to create a per-cpu nest count. However, since
+ * we can recurse on ep->poll_wait.lock, and a non-raw spinlock can
+ * schedule() in the -rt kernel, the per-cpu variable are no longer
+ * protected. Thus, we are introducing a per eventpoll nest field.
+ * If we are not being call from ep_poll_callback(), epi is NULL and
+ * we are at the first level of nesting, 0. Otherwise, we are being
+ * called from ep_poll_callback() and if a previous wakeup source is
+ * not an epoll file itself, we are at depth 1 since the wakeup source
+ * is depth 0. If the wakeup source is a previous epoll file in the
+ * wakeup chain then we use its nests value and record ours as
+ * nests + 1. The previous epoll file nests value is stable since its
+ * already holding its own poll_wait.lock.
+ */
+ if (epi) {
+ if ((is_file_epoll(epi->ffd.file))) {
+ ep_src = epi->ffd.file->private_data;
+ nests = ep_src->nests;
+ } else {
+ nests = 1;
+ }
+ }
+ spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
+ ep->nests = nests + 1;
+ wake_up_locked_poll(&ep->poll_wait, EPOLLIN);
+ ep->nests = 0;
+ spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
}
#else
-static void ep_poll_safewake(wait_queue_head_t *wq)
+static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
{
- wake_up_poll(wq, EPOLLIN);
+ wake_up_poll(&ep->poll_wait, EPOLLIN);
}
#endif
@@ -789,7 +811,7 @@ static void ep_free(struct eventpoll *ep)
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
- ep_poll_safewake(&ep->poll_wait);
+ ep_poll_safewake(ep, NULL);
/*
* We need to lock this because we could be hit by
@@ -1258,7 +1280,7 @@ out_unlock:
/* We have to call this outside the lock */
if (pwake)
- ep_poll_safewake(&ep->poll_wait);
+ ep_poll_safewake(ep, epi);
if (!(epi->event.events & EPOLLEXCLUSIVE))
ewake = 1;
@@ -1562,7 +1584,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
/* We have to call this outside the lock */
if (pwake)
- ep_poll_safewake(&ep->poll_wait);
+ ep_poll_safewake(ep, NULL);
return 0;
@@ -1666,7 +1688,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
/* We have to call this outside the lock */
if (pwake)
- ep_poll_safewake(&ep->poll_wait);
+ ep_poll_safewake(ep, NULL);
return 0;
}
diff --git a/fs/exec.c b/fs/exec.c
index 688c824cdac8..06b4c550af5d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1036,16 +1036,26 @@ ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
}
EXPORT_SYMBOL(read_code);
+/*
+ * Maps the mm_struct mm into the current task struct.
+ * On success, this function returns with the mutex
+ * exec_update_mutex locked.
+ */
static int exec_mmap(struct mm_struct *mm)
{
struct task_struct *tsk;
struct mm_struct *old_mm, *active_mm;
+ int ret;
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
exec_mm_release(tsk, old_mm);
+ ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
+ if (ret)
+ return ret;
+
if (old_mm) {
sync_mm_rss(old_mm);
/*
@@ -1057,9 +1067,11 @@ static int exec_mmap(struct mm_struct *mm)
down_read(&old_mm->mmap_sem);
if (unlikely(old_mm->core_state)) {
up_read(&old_mm->mmap_sem);
+ mutex_unlock(&tsk->signal->exec_update_mutex);
return -EINTR;
}
}
+
task_lock(tsk);
active_mm = tsk->active_mm;
membarrier_exec_mmap(mm);
@@ -1215,10 +1227,22 @@ no_thread_group:
/* we have changed execution domain */
tsk->exit_signal = SIGCHLD;
-#ifdef CONFIG_POSIX_TIMERS
- exit_itimers(sig);
- flush_itimer_signals();
-#endif
+ BUG_ON(!thread_group_leader(tsk));
+ return 0;
+
+killed:
+ /* protects against exit_notify() and __exit_signal() */
+ read_lock(&tasklist_lock);
+ sig->group_exit_task = NULL;
+ sig->notify_count = 0;
+ read_unlock(&tasklist_lock);
+ return -EAGAIN;
+}
+
+
+static int unshare_sighand(struct task_struct *me)
+{
+ struct sighand_struct *oldsighand = me->sighand;
if (refcount_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
@@ -1236,23 +1260,13 @@ no_thread_group:
write_lock_irq(&tasklist_lock);
spin_lock(&oldsighand->siglock);
- rcu_assign_pointer(tsk->sighand, newsighand);
+ rcu_assign_pointer(me->sighand, newsighand);
spin_unlock(&oldsighand->siglock);
write_unlock_irq(&tasklist_lock);
__cleanup_sighand(oldsighand);
}
-
- BUG_ON(!thread_group_leader(tsk));
return 0;
-
-killed:
- /* protects against exit_notify() and __exit_signal() */
- read_lock(&tasklist_lock);
- sig->group_exit_task = NULL;
- sig->notify_count = 0;
- read_unlock(&tasklist_lock);
- return -EAGAIN;
}
char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
@@ -1286,13 +1300,13 @@ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
*/
int flush_old_exec(struct linux_binprm * bprm)
{
+ struct task_struct *me = current;
int retval;
/*
- * Make sure we have a private signal table and that
- * we are unassociated from the previous thread group.
+ * Make this the only thread in the thread group.
*/
- retval = de_thread(current);
+ retval = de_thread(me);
if (retval)
goto out;
@@ -1312,18 +1326,31 @@ int flush_old_exec(struct linux_binprm * bprm)
goto out;
/*
- * After clearing bprm->mm (to mark that current is using the
- * prepared mm now), we have nothing left of the original
+ * After setting bprm->called_exec_mmap (to mark that current is
+ * using the prepared mm now), we have nothing left of the original
* process. If anything from here on returns an error, the check
* in search_binary_handler() will SEGV current.
*/
+ bprm->called_exec_mmap = 1;
bprm->mm = NULL;
+#ifdef CONFIG_POSIX_TIMERS
+ exit_itimers(me->signal);
+ flush_itimer_signals();
+#endif
+
+ /*
+ * Make the signal table private.
+ */
+ retval = unshare_sighand(me);
+ if (retval)
+ goto out;
+
set_fs(USER_DS);
- current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
+ me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
PF_NOFREEZE | PF_NO_SETAFFINITY);
flush_thread();
- current->personality &= ~bprm->per_clear;
+ me->personality &= ~bprm->per_clear;
/*
* We have to apply CLOEXEC before we change whether the process is
@@ -1331,7 +1358,7 @@ int flush_old_exec(struct linux_binprm * bprm)
* trying to access the should-be-closed file descriptors of a process
* undergoing exec(2).
*/
- do_close_on_exec(current->files);
+ do_close_on_exec(me->files);
return 0;
out:
@@ -1412,7 +1439,7 @@ void setup_new_exec(struct linux_binprm * bprm)
/* An exec changes our domain. We are no longer part of the thread
group */
- current->self_exec_id++;
+ WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1);
flush_signal_handlers(current, 0);
}
EXPORT_SYMBOL(setup_new_exec);
@@ -1450,6 +1477,8 @@ static void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
+ if (bprm->called_exec_mmap)
+ mutex_unlock(&current->signal->exec_update_mutex);
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@@ -1499,6 +1528,7 @@ void install_exec_creds(struct linux_binprm *bprm)
* credentials; any time after this it may be unlocked.
*/
security_bprm_committed_creds(bprm);
+ mutex_unlock(&current->signal->exec_update_mutex);
mutex_unlock(&current->signal->cred_guard_mutex);
}
EXPORT_SYMBOL(install_exec_creds);
@@ -1690,7 +1720,7 @@ int search_binary_handler(struct linux_binprm *bprm)
read_lock(&binfmt_lock);
put_binfmt(fmt);
- if (retval < 0 && !bprm->mm) {
+ if (retval < 0 && bprm->called_exec_mmap) {
/* we got to flush_old_exec() and failed after it */
read_unlock(&binfmt_lock);
force_sigsegv(SIGSEGV);
diff --git a/fs/exfat/Kconfig b/fs/exfat/Kconfig
new file mode 100644
index 000000000000..2d3636dc5b8c
--- /dev/null
+++ b/fs/exfat/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config EXFAT_FS
+ tristate "exFAT filesystem support"
+ select NLS
+ help
+ This allows you to mount devices formatted with the exFAT file system.
+ exFAT is typically used on SD-Cards or USB sticks.
+
+ To compile this as a module, choose M here: the module will be called
+ exfat.
+
+config EXFAT_DEFAULT_IOCHARSET
+ string "Default iocharset for exFAT"
+ default "utf8"
+ depends on EXFAT_FS
+ help
+ Set this to the default input/output character set to use for
+ converting between the encoding is used for user visible filename and
+ UTF-16 character that exfat filesystem use, and can be overridden with
+ the "iocharset" mount option for exFAT filesystems.
diff --git a/fs/exfat/Makefile b/fs/exfat/Makefile
new file mode 100644
index 000000000000..ed51926a4971
--- /dev/null
+++ b/fs/exfat/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for the linux exFAT filesystem support.
+#
+obj-$(CONFIG_EXFAT_FS) += exfat.o
+
+exfat-y := inode.o namei.o dir.o super.o fatent.o cache.o nls.o misc.o \
+ file.o balloc.o
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
new file mode 100644
index 000000000000..6a04cc02565a
--- /dev/null
+++ b/fs/exfat/balloc.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+static const unsigned char free_bit[] = {
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/* 0 ~ 19*/
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3,/* 20 ~ 39*/
+ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/* 40 ~ 59*/
+ 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/* 60 ~ 79*/
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2,/* 80 ~ 99*/
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3,/*100 ~ 119*/
+ 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*120 ~ 139*/
+ 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,/*140 ~ 159*/
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/*160 ~ 179*/
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3,/*180 ~ 199*/
+ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*200 ~ 219*/
+ 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/*220 ~ 239*/
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /*240 ~ 254*/
+};
+
+static const unsigned char used_bit[] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3,/* 0 ~ 19*/
+ 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4,/* 20 ~ 39*/
+ 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5,/* 40 ~ 59*/
+ 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,/* 60 ~ 79*/
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4,/* 80 ~ 99*/
+ 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6,/*100 ~ 119*/
+ 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,/*120 ~ 139*/
+ 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,/*140 ~ 159*/
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5,/*160 ~ 179*/
+ 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5,/*180 ~ 199*/
+ 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6,/*200 ~ 219*/
+ 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,/*220 ~ 239*/
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /*240 ~ 255*/
+};
+
+/*
+ * Allocation Bitmap Management Functions
+ */
+static int exfat_allocate_bitmap(struct super_block *sb,
+ struct exfat_dentry *ep)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ long long map_size;
+ unsigned int i, need_map_size;
+ sector_t sector;
+
+ sbi->map_clu = le32_to_cpu(ep->dentry.bitmap.start_clu);
+ map_size = le64_to_cpu(ep->dentry.bitmap.size);
+ need_map_size = ((EXFAT_DATA_CLUSTER_COUNT(sbi) - 1) / BITS_PER_BYTE)
+ + 1;
+ if (need_map_size != map_size) {
+ exfat_msg(sb, KERN_ERR,
+ "bogus allocation bitmap size(need : %u, cur : %lld)",
+ need_map_size, map_size);
+ /*
+ * Only allowed when bogus allocation
+ * bitmap size is large
+ */
+ if (need_map_size > map_size)
+ return -EIO;
+ }
+ sbi->map_sectors = ((need_map_size - 1) >>
+ (sb->s_blocksize_bits)) + 1;
+ sbi->vol_amap = kmalloc_array(sbi->map_sectors,
+ sizeof(struct buffer_head *), GFP_KERNEL);
+ if (!sbi->vol_amap)
+ return -ENOMEM;
+
+ sector = exfat_cluster_to_sector(sbi, sbi->map_clu);
+ for (i = 0; i < sbi->map_sectors; i++) {
+ sbi->vol_amap[i] = sb_bread(sb, sector + i);
+ if (!sbi->vol_amap[i]) {
+ /* release all buffers and free vol_amap */
+ int j = 0;
+
+ while (j < i)
+ brelse(sbi->vol_amap[j++]);
+
+ kfree(sbi->vol_amap);
+ sbi->vol_amap = NULL;
+ return -EIO;
+ }
+ }
+
+ sbi->pbr_bh = NULL;
+ return 0;
+}
+
+int exfat_load_bitmap(struct super_block *sb)
+{
+ unsigned int i, type;
+ struct exfat_chain clu;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ exfat_chain_set(&clu, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ for (i = 0; i < sbi->dentries_per_clu; i++) {
+ struct exfat_dentry *ep;
+ struct buffer_head *bh;
+
+ ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ if (!ep)
+ return -EIO;
+
+ type = exfat_get_entry_type(ep);
+ if (type == TYPE_UNUSED)
+ break;
+ if (type != TYPE_BITMAP)
+ continue;
+ if (ep->dentry.bitmap.flags == 0x0) {
+ int err;
+
+ err = exfat_allocate_bitmap(sb, ep);
+ brelse(bh);
+ return err;
+ }
+ brelse(bh);
+ }
+
+ if (exfat_get_next_cluster(sb, &clu.dir))
+ return -EIO;
+ }
+
+ return -EINVAL;
+}
+
+void exfat_free_bitmap(struct exfat_sb_info *sbi)
+{
+ int i;
+
+ brelse(sbi->pbr_bh);
+
+ for (i = 0; i < sbi->map_sectors; i++)
+ __brelse(sbi->vol_amap[i]);
+
+ kfree(sbi->vol_amap);
+}
+
+/*
+ * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+ * the cluster heap.
+ */
+int exfat_set_bitmap(struct inode *inode, unsigned int clu)
+{
+ int i, b;
+ unsigned int ent_idx;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ WARN_ON(clu < EXFAT_FIRST_CLUSTER);
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+
+ set_bit_le(b, sbi->vol_amap[i]->b_data);
+ exfat_update_bh(sb, sbi->vol_amap[i], IS_DIRSYNC(inode));
+ return 0;
+}
+
+/*
+ * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+ * the cluster heap.
+ */
+void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
+{
+ int i, b;
+ unsigned int ent_idx;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *opts = &sbi->options;
+
+ WARN_ON(clu < EXFAT_FIRST_CLUSTER);
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+
+ clear_bit_le(b, sbi->vol_amap[i]->b_data);
+ exfat_update_bh(sb, sbi->vol_amap[i], IS_DIRSYNC(inode));
+
+ if (opts->discard) {
+ int ret_discard;
+
+ ret_discard = sb_issue_discard(sb,
+ exfat_cluster_to_sector(sbi, clu +
+ EXFAT_RESERVED_CLUSTERS),
+ (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
+
+ if (ret_discard == -EOPNOTSUPP) {
+ exfat_msg(sb, KERN_ERR,
+ "discard not supported by device, disabling");
+ opts->discard = 0;
+ }
+ }
+}
+
+/*
+ * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+ * the cluster heap.
+ */
+unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu)
+{
+ unsigned int i, map_i, map_b, ent_idx;
+ unsigned int clu_base, clu_free;
+ unsigned char k, clu_mask;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ WARN_ON(clu < EXFAT_FIRST_CLUSTER);
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+ clu_base = BITMAP_ENT_TO_CLUSTER(ent_idx & ~(BITS_PER_BYTE_MASK));
+ clu_mask = IGNORED_BITS_REMAINED(clu, clu_base);
+
+ map_i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ map_b = BITMAP_OFFSET_BYTE_IN_SECTOR(sb, ent_idx);
+
+ for (i = EXFAT_FIRST_CLUSTER; i < sbi->num_clusters;
+ i += BITS_PER_BYTE) {
+ k = *(sbi->vol_amap[map_i]->b_data + map_b);
+ if (clu_mask > 0) {
+ k |= clu_mask;
+ clu_mask = 0;
+ }
+ if (k < 0xFF) {
+ clu_free = clu_base + free_bit[k];
+ if (clu_free < sbi->num_clusters)
+ return clu_free;
+ }
+ clu_base += BITS_PER_BYTE;
+
+ if (++map_b >= sb->s_blocksize ||
+ clu_base >= sbi->num_clusters) {
+ if (++map_i >= sbi->map_sectors) {
+ clu_base = EXFAT_FIRST_CLUSTER;
+ map_i = 0;
+ }
+ map_b = 0;
+ }
+ }
+
+ return EXFAT_EOF_CLUSTER;
+}
+
+int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned int count = 0;
+ unsigned int i, map_i = 0, map_b = 0;
+ unsigned int total_clus = EXFAT_DATA_CLUSTER_COUNT(sbi);
+ unsigned int last_mask = total_clus & BITS_PER_BYTE_MASK;
+ unsigned char clu_bits;
+ const unsigned char last_bit_mask[] = {0, 0b00000001, 0b00000011,
+ 0b00000111, 0b00001111, 0b00011111, 0b00111111, 0b01111111};
+
+ total_clus &= ~last_mask;
+ for (i = 0; i < total_clus; i += BITS_PER_BYTE) {
+ clu_bits = *(sbi->vol_amap[map_i]->b_data + map_b);
+ count += used_bit[clu_bits];
+ if (++map_b >= (unsigned int)sb->s_blocksize) {
+ map_i++;
+ map_b = 0;
+ }
+ }
+
+ if (last_mask) {
+ clu_bits = *(sbi->vol_amap[map_i]->b_data + map_b);
+ clu_bits &= last_bit_mask[last_mask];
+ count += used_bit[clu_bits];
+ }
+
+ *ret_count = count;
+ return 0;
+}
diff --git a/fs/exfat/cache.c b/fs/exfat/cache.c
new file mode 100644
index 000000000000..03d0824fc368
--- /dev/null
+++ b/fs/exfat/cache.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * linux/fs/fat/cache.c
+ *
+ * Written 1992,1993 by Werner Almesberger
+ *
+ * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
+ * of inode number.
+ * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/buffer_head.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+#define EXFAT_CACHE_VALID 0
+#define EXFAT_MAX_CACHE 16
+
+struct exfat_cache {
+ struct list_head cache_list;
+ unsigned int nr_contig; /* number of contiguous clusters */
+ unsigned int fcluster; /* cluster number in the file. */
+ unsigned int dcluster; /* cluster number on disk. */
+};
+
+struct exfat_cache_id {
+ unsigned int id;
+ unsigned int nr_contig;
+ unsigned int fcluster;
+ unsigned int dcluster;
+};
+
+static struct kmem_cache *exfat_cachep;
+
+static void exfat_cache_init_once(void *c)
+{
+ struct exfat_cache *cache = (struct exfat_cache *)c;
+
+ INIT_LIST_HEAD(&cache->cache_list);
+}
+
+int exfat_cache_init(void)
+{
+ exfat_cachep = kmem_cache_create("exfat_cache",
+ sizeof(struct exfat_cache),
+ 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
+ exfat_cache_init_once);
+ if (!exfat_cachep)
+ return -ENOMEM;
+ return 0;
+}
+
+void exfat_cache_shutdown(void)
+{
+ if (!exfat_cachep)
+ return;
+ kmem_cache_destroy(exfat_cachep);
+}
+
+void exfat_cache_init_inode(struct inode *inode)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+
+ spin_lock_init(&ei->cache_lru_lock);
+ ei->nr_caches = 0;
+ ei->cache_valid_id = EXFAT_CACHE_VALID + 1;
+ INIT_LIST_HEAD(&ei->cache_lru);
+}
+
+static inline struct exfat_cache *exfat_cache_alloc(void)
+{
+ return kmem_cache_alloc(exfat_cachep, GFP_NOFS);
+}
+
+static inline void exfat_cache_free(struct exfat_cache *cache)
+{
+ WARN_ON(!list_empty(&cache->cache_list));
+ kmem_cache_free(exfat_cachep, cache);
+}
+
+static inline void exfat_cache_update_lru(struct inode *inode,
+ struct exfat_cache *cache)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+
+ if (ei->cache_lru.next != &cache->cache_list)
+ list_move(&cache->cache_list, &ei->cache_lru);
+}
+
+static unsigned int exfat_cache_lookup(struct inode *inode,
+ unsigned int fclus, struct exfat_cache_id *cid,
+ unsigned int *cached_fclus, unsigned int *cached_dclus)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ static struct exfat_cache nohit = { .fcluster = 0, };
+ struct exfat_cache *hit = &nohit, *p;
+ unsigned int offset = EXFAT_EOF_CLUSTER;
+
+ spin_lock(&ei->cache_lru_lock);
+ list_for_each_entry(p, &ei->cache_lru, cache_list) {
+ /* Find the cache of "fclus" or nearest cache. */
+ if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
+ hit = p;
+ if (hit->fcluster + hit->nr_contig < fclus) {
+ offset = hit->nr_contig;
+ } else {
+ offset = fclus - hit->fcluster;
+ break;
+ }
+ }
+ }
+ if (hit != &nohit) {
+ exfat_cache_update_lru(inode, hit);
+
+ cid->id = ei->cache_valid_id;
+ cid->nr_contig = hit->nr_contig;
+ cid->fcluster = hit->fcluster;
+ cid->dcluster = hit->dcluster;
+ *cached_fclus = cid->fcluster + offset;
+ *cached_dclus = cid->dcluster + offset;
+ }
+ spin_unlock(&ei->cache_lru_lock);
+
+ return offset;
+}
+
+static struct exfat_cache *exfat_cache_merge(struct inode *inode,
+ struct exfat_cache_id *new)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct exfat_cache *p;
+
+ list_for_each_entry(p, &ei->cache_lru, cache_list) {
+ /* Find the same part as "new" in cluster-chain. */
+ if (p->fcluster == new->fcluster) {
+ if (new->nr_contig > p->nr_contig)
+ p->nr_contig = new->nr_contig;
+ return p;
+ }
+ }
+ return NULL;
+}
+
+static void exfat_cache_add(struct inode *inode,
+ struct exfat_cache_id *new)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct exfat_cache *cache, *tmp;
+
+ if (new->fcluster == EXFAT_EOF_CLUSTER) /* dummy cache */
+ return;
+
+ spin_lock(&ei->cache_lru_lock);
+ if (new->id != EXFAT_CACHE_VALID &&
+ new->id != ei->cache_valid_id)
+ goto unlock; /* this cache was invalidated */
+
+ cache = exfat_cache_merge(inode, new);
+ if (cache == NULL) {
+ if (ei->nr_caches < EXFAT_MAX_CACHE) {
+ ei->nr_caches++;
+ spin_unlock(&ei->cache_lru_lock);
+
+ tmp = exfat_cache_alloc();
+ if (!tmp) {
+ spin_lock(&ei->cache_lru_lock);
+ ei->nr_caches--;
+ spin_unlock(&ei->cache_lru_lock);
+ return;
+ }
+
+ spin_lock(&ei->cache_lru_lock);
+ cache = exfat_cache_merge(inode, new);
+ if (cache != NULL) {
+ ei->nr_caches--;
+ exfat_cache_free(tmp);
+ goto out_update_lru;
+ }
+ cache = tmp;
+ } else {
+ struct list_head *p = ei->cache_lru.prev;
+
+ cache = list_entry(p,
+ struct exfat_cache, cache_list);
+ }
+ cache->fcluster = new->fcluster;
+ cache->dcluster = new->dcluster;
+ cache->nr_contig = new->nr_contig;
+ }
+out_update_lru:
+ exfat_cache_update_lru(inode, cache);
+unlock:
+ spin_unlock(&ei->cache_lru_lock);
+}
+
+/*
+ * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
+ * fixes itself after a while.
+ */
+static void __exfat_cache_inval_inode(struct inode *inode)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct exfat_cache *cache;
+
+ while (!list_empty(&ei->cache_lru)) {
+ cache = list_entry(ei->cache_lru.next,
+ struct exfat_cache, cache_list);
+ list_del_init(&cache->cache_list);
+ ei->nr_caches--;
+ exfat_cache_free(cache);
+ }
+ /* Update. The copy of caches before this id is discarded. */
+ ei->cache_valid_id++;
+ if (ei->cache_valid_id == EXFAT_CACHE_VALID)
+ ei->cache_valid_id++;
+}
+
+void exfat_cache_inval_inode(struct inode *inode)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+
+ spin_lock(&ei->cache_lru_lock);
+ __exfat_cache_inval_inode(inode);
+ spin_unlock(&ei->cache_lru_lock);
+}
+
+static inline int cache_contiguous(struct exfat_cache_id *cid,
+ unsigned int dclus)
+{
+ cid->nr_contig++;
+ return cid->dcluster + cid->nr_contig == dclus;
+}
+
+static inline void cache_init(struct exfat_cache_id *cid,
+ unsigned int fclus, unsigned int dclus)
+{
+ cid->id = EXFAT_CACHE_VALID;
+ cid->fcluster = fclus;
+ cid->dcluster = dclus;
+ cid->nr_contig = 0;
+}
+
+int exfat_get_cluster(struct inode *inode, unsigned int cluster,
+ unsigned int *fclus, unsigned int *dclus,
+ unsigned int *last_dclus, int allow_eof)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned int limit = sbi->num_clusters;
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct exfat_cache_id cid;
+ unsigned int content;
+
+ if (ei->start_clu == EXFAT_FREE_CLUSTER) {
+ exfat_fs_error(sb,
+ "invalid access to exfat cache (entry 0x%08x)",
+ ei->start_clu);
+ return -EIO;
+ }
+
+ *fclus = 0;
+ *dclus = ei->start_clu;
+ *last_dclus = *dclus;
+
+ /*
+ * Don`t use exfat_cache if zero offset or non-cluster allocation
+ */
+ if (cluster == 0 || *dclus == EXFAT_EOF_CLUSTER)
+ return 0;
+
+ cache_init(&cid, EXFAT_EOF_CLUSTER, EXFAT_EOF_CLUSTER);
+
+ if (exfat_cache_lookup(inode, cluster, &cid, fclus, dclus) ==
+ EXFAT_EOF_CLUSTER) {
+ /*
+ * dummy, always not contiguous
+ * This is reinitialized by cache_init(), later.
+ */
+ WARN_ON(cid.id != EXFAT_CACHE_VALID ||
+ cid.fcluster != EXFAT_EOF_CLUSTER ||
+ cid.dcluster != EXFAT_EOF_CLUSTER ||
+ cid.nr_contig != 0);
+ }
+
+ if (*fclus == cluster)
+ return 0;
+
+ while (*fclus < cluster) {
+ /* prevent the infinite loop of cluster chain */
+ if (*fclus > limit) {
+ exfat_fs_error(sb,
+ "detected the cluster chain loop (i_pos %u)",
+ (*fclus));
+ return -EIO;
+ }
+
+ if (exfat_ent_get(sb, *dclus, &content))
+ return -EIO;
+
+ *last_dclus = *dclus;
+ *dclus = content;
+ (*fclus)++;
+
+ if (content == EXFAT_EOF_CLUSTER) {
+ if (!allow_eof) {
+ exfat_fs_error(sb,
+ "invalid cluster chain (i_pos %u, last_clus 0x%08x is EOF)",
+ *fclus, (*last_dclus));
+ return -EIO;
+ }
+
+ break;
+ }
+
+ if (!cache_contiguous(&cid, *dclus))
+ cache_init(&cid, *fclus, *dclus);
+ }
+
+ exfat_cache_add(inode, &cid);
+ return 0;
+}
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
new file mode 100644
index 000000000000..4b91afb0f051
--- /dev/null
+++ b/fs/exfat/dir.c
@@ -0,0 +1,1238 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+static int exfat_extract_uni_name(struct exfat_dentry *ep,
+ unsigned short *uniname)
+{
+ int i, len = 0;
+
+ for (i = 0; i < EXFAT_FILE_NAME_LEN; i++) {
+ *uniname = le16_to_cpu(ep->dentry.name.unicode_0_14[i]);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ *uniname = 0x0;
+ return len;
+
+}
+
+static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
+ struct exfat_chain *p_dir, int entry, unsigned short *uniname)
+{
+ int i;
+ struct exfat_dentry *ep;
+ struct exfat_entry_set_cache *es;
+
+ es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
+ if (!es)
+ return;
+
+ if (es->num_entries < 3)
+ goto free_es;
+
+ ep += 2;
+
+ /*
+ * First entry : file entry
+ * Second entry : stream-extension entry
+ * Third entry : first file-name entry
+ * So, the index of first file-name dentry should start from 2.
+ */
+ for (i = 2; i < es->num_entries; i++, ep++) {
+ /* end of name entry */
+ if (exfat_get_entry_type(ep) != TYPE_EXTEND)
+ goto free_es;
+
+ exfat_extract_uni_name(ep, uniname);
+ uniname += EXFAT_FILE_NAME_LEN;
+ }
+
+free_es:
+ kfree(es);
+}
+
+/* read a directory entry from the opened directory */
+static int exfat_readdir(struct inode *inode, struct exfat_dir_entry *dir_entry)
+{
+ int i, dentries_per_clu, dentries_per_clu_bits = 0;
+ unsigned int type, clu_offset;
+ sector_t sector;
+ struct exfat_chain dir, clu;
+ struct exfat_uni_name uni_name;
+ struct exfat_dentry *ep;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ unsigned int dentry = ei->rwoffset & 0xFFFFFFFF;
+ struct buffer_head *bh;
+
+ /* check if the given file ID is opened */
+ if (ei->type != TYPE_DIR)
+ return -EPERM;
+
+ if (ei->entry == -1)
+ exfat_chain_set(&dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+ else
+ exfat_chain_set(&dir, ei->start_clu,
+ EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags);
+
+ dentries_per_clu = sbi->dentries_per_clu;
+ dentries_per_clu_bits = ilog2(dentries_per_clu);
+
+ clu_offset = dentry >> dentries_per_clu_bits;
+ exfat_chain_dup(&clu, &dir);
+
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ clu.dir += clu_offset;
+ clu.size -= clu_offset;
+ } else {
+ /* hint_information */
+ if (clu_offset > 0 && ei->hint_bmap.off != EXFAT_EOF_CLUSTER &&
+ ei->hint_bmap.off > 0 && clu_offset >= ei->hint_bmap.off) {
+ clu_offset -= ei->hint_bmap.off;
+ clu.dir = ei->hint_bmap.clu;
+ }
+
+ while (clu_offset > 0) {
+ if (exfat_get_next_cluster(sb, &(clu.dir)))
+ return -EIO;
+
+ clu_offset--;
+ }
+ }
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ i = dentry & (dentries_per_clu - 1);
+
+ for ( ; i < dentries_per_clu; i++, dentry++) {
+ ep = exfat_get_dentry(sb, &clu, i, &bh, &sector);
+ if (!ep)
+ return -EIO;
+
+ type = exfat_get_entry_type(ep);
+ if (type == TYPE_UNUSED) {
+ brelse(bh);
+ break;
+ }
+
+ if (type != TYPE_FILE && type != TYPE_DIR) {
+ brelse(bh);
+ continue;
+ }
+
+ dir_entry->attr = le16_to_cpu(ep->dentry.file.attr);
+ exfat_get_entry_time(sbi, &dir_entry->crtime,
+ ep->dentry.file.create_tz,
+ ep->dentry.file.create_time,
+ ep->dentry.file.create_date,
+ ep->dentry.file.create_time_ms);
+ exfat_get_entry_time(sbi, &dir_entry->mtime,
+ ep->dentry.file.modify_tz,
+ ep->dentry.file.modify_time,
+ ep->dentry.file.modify_date,
+ ep->dentry.file.modify_time_ms);
+ exfat_get_entry_time(sbi, &dir_entry->atime,
+ ep->dentry.file.access_tz,
+ ep->dentry.file.access_time,
+ ep->dentry.file.access_date,
+ 0);
+
+ *uni_name.name = 0x0;
+ exfat_get_uniname_from_ext_entry(sb, &dir, dentry,
+ uni_name.name);
+ exfat_utf16_to_nls(sb, &uni_name,
+ dir_entry->namebuf.lfn,
+ dir_entry->namebuf.lfnbuf_len);
+ brelse(bh);
+
+ ep = exfat_get_dentry(sb, &clu, i + 1, &bh, NULL);
+ if (!ep)
+ return -EIO;
+ dir_entry->size =
+ le64_to_cpu(ep->dentry.stream.valid_size);
+ brelse(bh);
+
+ ei->hint_bmap.off = dentry >> dentries_per_clu_bits;
+ ei->hint_bmap.clu = clu.dir;
+
+ ei->rwoffset = ++dentry;
+ return 0;
+ }
+
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ if (--clu.size > 0)
+ clu.dir++;
+ else
+ clu.dir = EXFAT_EOF_CLUSTER;
+ } else {
+ if (exfat_get_next_cluster(sb, &(clu.dir)))
+ return -EIO;
+ }
+ }
+
+ dir_entry->namebuf.lfn[0] = '\0';
+ ei->rwoffset = dentry;
+ return 0;
+}
+
+static void exfat_init_namebuf(struct exfat_dentry_namebuf *nb)
+{
+ nb->lfn = NULL;
+ nb->lfnbuf_len = 0;
+}
+
+static int exfat_alloc_namebuf(struct exfat_dentry_namebuf *nb)
+{
+ nb->lfn = __getname();
+ if (!nb->lfn)
+ return -ENOMEM;
+ nb->lfnbuf_len = MAX_VFSNAME_BUF_SIZE;
+ return 0;
+}
+
+static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb)
+{
+ if (!nb->lfn)
+ return;
+
+ __putname(nb->lfn);
+ exfat_init_namebuf(nb);
+}
+
+/* skip iterating emit_dots when dir is empty */
+#define ITER_POS_FILLED_DOTS (2)
+static int exfat_iterate(struct file *filp, struct dir_context *ctx)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ struct inode *tmp;
+ struct exfat_dir_entry de;
+ struct exfat_dentry_namebuf *nb = &(de.namebuf);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ unsigned long inum;
+ loff_t cpos, i_pos;
+ int err = 0, fake_offset = 0;
+
+ exfat_init_namebuf(nb);
+ mutex_lock(&EXFAT_SB(sb)->s_lock);
+
+ cpos = ctx->pos;
+ if (!dir_emit_dots(filp, ctx))
+ goto unlock;
+
+ if (ctx->pos == ITER_POS_FILLED_DOTS) {
+ cpos = 0;
+ fake_offset = 1;
+ }
+
+ if (cpos & (DENTRY_SIZE - 1)) {
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ /* name buffer should be allocated before use */
+ err = exfat_alloc_namebuf(nb);
+ if (err)
+ goto unlock;
+get_new:
+ ei->rwoffset = EXFAT_B_TO_DEN(cpos);
+
+ if (cpos >= i_size_read(inode))
+ goto end_of_dir;
+
+ err = exfat_readdir(inode, &de);
+ if (err) {
+ /*
+ * At least we tried to read a sector. Move cpos to next sector
+ * position (should be aligned).
+ */
+ if (err == -EIO) {
+ cpos += 1 << (sb->s_blocksize_bits);
+ cpos &= ~(sb->s_blocksize - 1);
+ }
+
+ err = -EIO;
+ goto end_of_dir;
+ }
+
+ cpos = EXFAT_DEN_TO_B(ei->rwoffset);
+
+ if (!nb->lfn[0])
+ goto end_of_dir;
+
+ i_pos = ((loff_t)ei->start_clu << 32) |
+ ((ei->rwoffset - 1) & 0xffffffff);
+ tmp = exfat_iget(sb, i_pos);
+ if (tmp) {
+ inum = tmp->i_ino;
+ iput(tmp);
+ } else {
+ inum = iunique(sb, EXFAT_ROOT_INO);
+ }
+
+ /*
+ * Before calling dir_emit(), sb_lock should be released.
+ * Because page fault can occur in dir_emit() when the size
+ * of buffer given from user is larger than one page size.
+ */
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum,
+ (de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
+ goto out_unlocked;
+ mutex_lock(&EXFAT_SB(sb)->s_lock);
+ ctx->pos = cpos;
+ goto get_new;
+
+end_of_dir:
+ if (!cpos && fake_offset)
+ cpos = ITER_POS_FILLED_DOTS;
+ ctx->pos = cpos;
+unlock:
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+out_unlocked:
+ /*
+ * To improve performance, free namebuf after unlock sb_lock.
+ * If namebuf is not allocated, this function do nothing
+ */
+ exfat_free_namebuf(nb);
+ return err;
+}
+
+const struct file_operations exfat_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate = exfat_iterate,
+ .fsync = generic_file_fsync,
+};
+
+int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
+{
+ int ret;
+
+ exfat_chain_set(clu, EXFAT_EOF_CLUSTER, 0, ALLOC_NO_FAT_CHAIN);
+
+ ret = exfat_alloc_cluster(inode, 1, clu);
+ if (ret)
+ return ret;
+
+ return exfat_zeroed_cluster(inode, clu->dir);
+}
+
+int exfat_calc_num_entries(struct exfat_uni_name *p_uniname)
+{
+ int len;
+
+ len = p_uniname->name_len;
+ if (len == 0)
+ return -EINVAL;
+
+ /* 1 file entry + 1 stream entry + name entries */
+ return ((len - 1) / EXFAT_FILE_NAME_LEN + 3);
+}
+
+unsigned int exfat_get_entry_type(struct exfat_dentry *ep)
+{
+ if (ep->type == EXFAT_UNUSED)
+ return TYPE_UNUSED;
+ if (IS_EXFAT_DELETED(ep->type))
+ return TYPE_DELETED;
+ if (ep->type == EXFAT_INVAL)
+ return TYPE_INVALID;
+ if (IS_EXFAT_CRITICAL_PRI(ep->type)) {
+ if (ep->type == EXFAT_BITMAP)
+ return TYPE_BITMAP;
+ if (ep->type == EXFAT_UPCASE)
+ return TYPE_UPCASE;
+ if (ep->type == EXFAT_VOLUME)
+ return TYPE_VOLUME;
+ if (ep->type == EXFAT_FILE) {
+ if (le16_to_cpu(ep->dentry.file.attr) & ATTR_SUBDIR)
+ return TYPE_DIR;
+ return TYPE_FILE;
+ }
+ return TYPE_CRITICAL_PRI;
+ }
+ if (IS_EXFAT_BENIGN_PRI(ep->type)) {
+ if (ep->type == EXFAT_GUID)
+ return TYPE_GUID;
+ if (ep->type == EXFAT_PADDING)
+ return TYPE_PADDING;
+ if (ep->type == EXFAT_ACLTAB)
+ return TYPE_ACLTAB;
+ return TYPE_BENIGN_PRI;
+ }
+ if (IS_EXFAT_CRITICAL_SEC(ep->type)) {
+ if (ep->type == EXFAT_STREAM)
+ return TYPE_STREAM;
+ if (ep->type == EXFAT_NAME)
+ return TYPE_EXTEND;
+ if (ep->type == EXFAT_ACL)
+ return TYPE_ACL;
+ return TYPE_CRITICAL_SEC;
+ }
+ return TYPE_BENIGN_SEC;
+}
+
+static void exfat_set_entry_type(struct exfat_dentry *ep, unsigned int type)
+{
+ if (type == TYPE_UNUSED) {
+ ep->type = EXFAT_UNUSED;
+ } else if (type == TYPE_DELETED) {
+ ep->type &= EXFAT_DELETE;
+ } else if (type == TYPE_STREAM) {
+ ep->type = EXFAT_STREAM;
+ } else if (type == TYPE_EXTEND) {
+ ep->type = EXFAT_NAME;
+ } else if (type == TYPE_BITMAP) {
+ ep->type = EXFAT_BITMAP;
+ } else if (type == TYPE_UPCASE) {
+ ep->type = EXFAT_UPCASE;
+ } else if (type == TYPE_VOLUME) {
+ ep->type = EXFAT_VOLUME;
+ } else if (type == TYPE_DIR) {
+ ep->type = EXFAT_FILE;
+ ep->dentry.file.attr = cpu_to_le16(ATTR_SUBDIR);
+ } else if (type == TYPE_FILE) {
+ ep->type = EXFAT_FILE;
+ ep->dentry.file.attr = cpu_to_le16(ATTR_ARCHIVE);
+ }
+}
+
+static void exfat_init_stream_entry(struct exfat_dentry *ep,
+ unsigned char flags, unsigned int start_clu,
+ unsigned long long size)
+{
+ exfat_set_entry_type(ep, TYPE_STREAM);
+ ep->dentry.stream.flags = flags;
+ ep->dentry.stream.start_clu = cpu_to_le32(start_clu);
+ ep->dentry.stream.valid_size = cpu_to_le64(size);
+ ep->dentry.stream.size = cpu_to_le64(size);
+}
+
+static void exfat_init_name_entry(struct exfat_dentry *ep,
+ unsigned short *uniname)
+{
+ int i;
+
+ exfat_set_entry_type(ep, TYPE_EXTEND);
+ ep->dentry.name.flags = 0x0;
+
+ for (i = 0; i < EXFAT_FILE_NAME_LEN; i++) {
+ ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
+ if (*uniname == 0x0)
+ break;
+ uniname++;
+ }
+}
+
+int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
+ int entry, unsigned int type, unsigned int start_clu,
+ unsigned long long size)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct timespec64 ts = current_time(inode);
+ sector_t sector;
+ struct exfat_dentry *ep;
+ struct buffer_head *bh;
+
+ /*
+ * We cannot use exfat_get_dentry_set here because file ep is not
+ * initialized yet.
+ */
+ ep = exfat_get_dentry(sb, p_dir, entry, &bh, &sector);
+ if (!ep)
+ return -EIO;
+
+ exfat_set_entry_type(ep, type);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.create_tz,
+ &ep->dentry.file.create_time,
+ &ep->dentry.file.create_date,
+ &ep->dentry.file.create_time_ms);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.modify_tz,
+ &ep->dentry.file.modify_time,
+ &ep->dentry.file.modify_date,
+ &ep->dentry.file.modify_time_ms);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.access_tz,
+ &ep->dentry.file.access_time,
+ &ep->dentry.file.access_date,
+ NULL);
+
+ exfat_update_bh(sb, bh, IS_DIRSYNC(inode));
+ brelse(bh);
+
+ ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
+ if (!ep)
+ return -EIO;
+
+ exfat_init_stream_entry(ep,
+ (type == TYPE_FILE) ? ALLOC_FAT_CHAIN : ALLOC_NO_FAT_CHAIN,
+ start_clu, size);
+ exfat_update_bh(sb, bh, IS_DIRSYNC(inode));
+ brelse(bh);
+
+ return 0;
+}
+
+int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
+ int entry)
+{
+ struct super_block *sb = inode->i_sb;
+ int ret = 0;
+ int i, num_entries;
+ sector_t sector;
+ unsigned short chksum;
+ struct exfat_dentry *ep, *fep;
+ struct buffer_head *fbh, *bh;
+
+ fep = exfat_get_dentry(sb, p_dir, entry, &fbh, &sector);
+ if (!fep)
+ return -EIO;
+
+ num_entries = fep->dentry.file.num_ext + 1;
+ chksum = exfat_calc_chksum_2byte(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
+
+ for (i = 1; i < num_entries; i++) {
+ ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, NULL);
+ if (!ep) {
+ ret = -EIO;
+ goto release_fbh;
+ }
+ chksum = exfat_calc_chksum_2byte(ep, DENTRY_SIZE, chksum,
+ CS_DEFAULT);
+ brelse(bh);
+ }
+
+ fep->dentry.file.checksum = cpu_to_le16(chksum);
+ exfat_update_bh(sb, fbh, IS_DIRSYNC(inode));
+release_fbh:
+ brelse(fbh);
+ return ret;
+}
+
+int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
+ int entry, int num_entries, struct exfat_uni_name *p_uniname)
+{
+ struct super_block *sb = inode->i_sb;
+ int i;
+ sector_t sector;
+ unsigned short *uniname = p_uniname->name;
+ struct exfat_dentry *ep;
+ struct buffer_head *bh;
+ int sync = IS_DIRSYNC(inode);
+
+ ep = exfat_get_dentry(sb, p_dir, entry, &bh, &sector);
+ if (!ep)
+ return -EIO;
+
+ ep->dentry.file.num_ext = (unsigned char)(num_entries - 1);
+ exfat_update_bh(sb, bh, sync);
+ brelse(bh);
+
+ ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
+ if (!ep)
+ return -EIO;
+
+ ep->dentry.stream.name_len = p_uniname->name_len;
+ ep->dentry.stream.name_hash = cpu_to_le16(p_uniname->name_hash);
+ exfat_update_bh(sb, bh, sync);
+ brelse(bh);
+
+ for (i = EXFAT_FIRST_CLUSTER; i < num_entries; i++) {
+ ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, &sector);
+ if (!ep)
+ return -EIO;
+
+ exfat_init_name_entry(ep, uniname);
+ exfat_update_bh(sb, bh, sync);
+ brelse(bh);
+ uniname += EXFAT_FILE_NAME_LEN;
+ }
+
+ exfat_update_dir_chksum(inode, p_dir, entry);
+ return 0;
+}
+
+int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
+ int entry, int order, int num_entries)
+{
+ struct super_block *sb = inode->i_sb;
+ int i;
+ sector_t sector;
+ struct exfat_dentry *ep;
+ struct buffer_head *bh;
+
+ for (i = order; i < num_entries; i++) {
+ ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, &sector);
+ if (!ep)
+ return -EIO;
+
+ exfat_set_entry_type(ep, TYPE_DELETED);
+ exfat_update_bh(sb, bh, IS_DIRSYNC(inode));
+ brelse(bh);
+ }
+
+ return 0;
+}
+
+int exfat_update_dir_chksum_with_entry_set(struct super_block *sb,
+ struct exfat_entry_set_cache *es, int sync)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct buffer_head *bh;
+ sector_t sec = es->sector;
+ unsigned int off = es->offset;
+ int chksum_type = CS_DIR_ENTRY, i, num_entries = es->num_entries;
+ unsigned int buf_off = (off - es->offset);
+ unsigned int remaining_byte_in_sector, copy_entries, clu;
+ unsigned short chksum = 0;
+
+ for (i = 0; i < num_entries; i++) {
+ chksum = exfat_calc_chksum_2byte(&es->entries[i], DENTRY_SIZE,
+ chksum, chksum_type);
+ chksum_type = CS_DEFAULT;
+ }
+
+ es->entries[0].dentry.file.checksum = cpu_to_le16(chksum);
+
+ while (num_entries) {
+ /* write per sector base */
+ remaining_byte_in_sector = (1 << sb->s_blocksize_bits) - off;
+ copy_entries = min_t(int,
+ EXFAT_B_TO_DEN(remaining_byte_in_sector),
+ num_entries);
+ bh = sb_bread(sb, sec);
+ if (!bh)
+ goto err_out;
+ memcpy(bh->b_data + off,
+ (unsigned char *)&es->entries[0] + buf_off,
+ EXFAT_DEN_TO_B(copy_entries));
+ exfat_update_bh(sb, bh, sync);
+ brelse(bh);
+ num_entries -= copy_entries;
+
+ if (num_entries) {
+ /* get next sector */
+ if (exfat_is_last_sector_in_cluster(sbi, sec)) {
+ clu = exfat_sector_to_cluster(sbi, sec);
+ if (es->alloc_flag == ALLOC_NO_FAT_CHAIN)
+ clu++;
+ else if (exfat_get_next_cluster(sb, &clu))
+ goto err_out;
+ sec = exfat_cluster_to_sector(sbi, clu);
+ } else {
+ sec++;
+ }
+ off = 0;
+ buf_off += EXFAT_DEN_TO_B(copy_entries);
+ }
+ }
+
+ return 0;
+err_out:
+ return -EIO;
+}
+
+static int exfat_walk_fat_chain(struct super_block *sb,
+ struct exfat_chain *p_dir, unsigned int byte_offset,
+ unsigned int *clu)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned int clu_offset;
+ unsigned int cur_clu;
+
+ clu_offset = EXFAT_B_TO_CLU(byte_offset, sbi);
+ cur_clu = p_dir->dir;
+
+ if (p_dir->flags == ALLOC_NO_FAT_CHAIN) {
+ cur_clu += clu_offset;
+ } else {
+ while (clu_offset > 0) {
+ if (exfat_get_next_cluster(sb, &cur_clu))
+ return -EIO;
+ if (cur_clu == EXFAT_EOF_CLUSTER) {
+ exfat_fs_error(sb,
+ "invalid dentry access beyond EOF (clu : %u, eidx : %d)",
+ p_dir->dir,
+ EXFAT_B_TO_DEN(byte_offset));
+ return -EIO;
+ }
+ clu_offset--;
+ }
+ }
+
+ *clu = cur_clu;
+ return 0;
+}
+
+int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
+ int entry, sector_t *sector, int *offset)
+{
+ int ret;
+ unsigned int off, clu = 0;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ off = EXFAT_DEN_TO_B(entry);
+
+ ret = exfat_walk_fat_chain(sb, p_dir, off, &clu);
+ if (ret)
+ return ret;
+
+ /* byte offset in cluster */
+ off = EXFAT_CLU_OFFSET(off, sbi);
+
+ /* byte offset in sector */
+ *offset = EXFAT_BLK_OFFSET(off, sb);
+
+ /* sector offset in cluster */
+ *sector = EXFAT_B_TO_BLK(off, sb);
+ *sector += exfat_cluster_to_sector(sbi, clu);
+ return 0;
+}
+
+#define EXFAT_MAX_RA_SIZE (128*1024)
+static int exfat_dir_readahead(struct super_block *sb, sector_t sec)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct buffer_head *bh;
+ unsigned int max_ra_count = EXFAT_MAX_RA_SIZE >> sb->s_blocksize_bits;
+ unsigned int page_ra_count = PAGE_SIZE >> sb->s_blocksize_bits;
+ unsigned int adj_ra_count = max(sbi->sect_per_clus, page_ra_count);
+ unsigned int ra_count = min(adj_ra_count, max_ra_count);
+
+ /* Read-ahead is not required */
+ if (sbi->sect_per_clus == 1)
+ return 0;
+
+ if (sec < sbi->data_start_sector) {
+ exfat_msg(sb, KERN_ERR,
+ "requested sector is invalid(sect:%llu, root:%llu)",
+ (unsigned long long)sec, sbi->data_start_sector);
+ return -EIO;
+ }
+
+ /* Not sector aligned with ra_count, resize ra_count to page size */
+ if ((sec - sbi->data_start_sector) & (ra_count - 1))
+ ra_count = page_ra_count;
+
+ bh = sb_find_get_block(sb, sec);
+ if (!bh || !buffer_uptodate(bh)) {
+ unsigned int i;
+
+ for (i = 0; i < ra_count; i++)
+ sb_breadahead(sb, (sector_t)(sec + i));
+ }
+ brelse(bh);
+ return 0;
+}
+
+struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
+ struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
+ sector_t *sector)
+{
+ unsigned int dentries_per_page = EXFAT_B_TO_DEN(PAGE_SIZE);
+ int off;
+ sector_t sec;
+
+ if (p_dir->dir == DIR_DELETED) {
+ exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry\n");
+ return NULL;
+ }
+
+ if (exfat_find_location(sb, p_dir, entry, &sec, &off))
+ return NULL;
+
+ if (p_dir->dir != EXFAT_FREE_CLUSTER &&
+ !(entry & (dentries_per_page - 1)))
+ exfat_dir_readahead(sb, sec);
+
+ *bh = sb_bread(sb, sec);
+ if (!*bh)
+ return NULL;
+
+ if (sector)
+ *sector = sec;
+ return (struct exfat_dentry *)((*bh)->b_data + off);
+}
+
+enum exfat_validate_dentry_mode {
+ ES_MODE_STARTED,
+ ES_MODE_GET_FILE_ENTRY,
+ ES_MODE_GET_STRM_ENTRY,
+ ES_MODE_GET_NAME_ENTRY,
+ ES_MODE_GET_CRITICAL_SEC_ENTRY,
+};
+
+static bool exfat_validate_entry(unsigned int type,
+ enum exfat_validate_dentry_mode *mode)
+{
+ if (type == TYPE_UNUSED || type == TYPE_DELETED)
+ return false;
+
+ switch (*mode) {
+ case ES_MODE_STARTED:
+ if (type != TYPE_FILE && type != TYPE_DIR)
+ return false;
+ *mode = ES_MODE_GET_FILE_ENTRY;
+ return true;
+ case ES_MODE_GET_FILE_ENTRY:
+ if (type != TYPE_STREAM)
+ return false;
+ *mode = ES_MODE_GET_STRM_ENTRY;
+ return true;
+ case ES_MODE_GET_STRM_ENTRY:
+ if (type != TYPE_EXTEND)
+ return false;
+ *mode = ES_MODE_GET_NAME_ENTRY;
+ return true;
+ case ES_MODE_GET_NAME_ENTRY:
+ if (type == TYPE_STREAM)
+ return false;
+ if (type != TYPE_EXTEND) {
+ if (!(type & TYPE_CRITICAL_SEC))
+ return false;
+ *mode = ES_MODE_GET_CRITICAL_SEC_ENTRY;
+ }
+ return true;
+ case ES_MODE_GET_CRITICAL_SEC_ENTRY:
+ if (type == TYPE_EXTEND || type == TYPE_STREAM)
+ return false;
+ if ((type & TYPE_CRITICAL_SEC) != TYPE_CRITICAL_SEC)
+ return false;
+ return true;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+}
+
+/*
+ * Returns a set of dentries for a file or dir.
+ *
+ * Note that this is a copy (dump) of dentries so that user should
+ * call write_entry_set() to apply changes made in this entry set
+ * to the real device.
+ *
+ * in:
+ * sb+p_dir+entry: indicates a file/dir
+ * type: specifies how many dentries should be included.
+ * out:
+ * file_ep: will point the first dentry(= file dentry) on success
+ * return:
+ * pointer of entry set on success,
+ * NULL on failure.
+ */
+struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
+ struct exfat_chain *p_dir, int entry, unsigned int type,
+ struct exfat_dentry **file_ep)
+{
+ int ret;
+ unsigned int off, byte_offset, clu = 0;
+ unsigned int entry_type;
+ sector_t sec;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_entry_set_cache *es;
+ struct exfat_dentry *ep, *pos;
+ unsigned char num_entries;
+ enum exfat_validate_dentry_mode mode = ES_MODE_STARTED;
+ struct buffer_head *bh;
+
+ if (p_dir->dir == DIR_DELETED) {
+ exfat_msg(sb, KERN_ERR, "access to deleted dentry\n");
+ return NULL;
+ }
+
+ byte_offset = EXFAT_DEN_TO_B(entry);
+ ret = exfat_walk_fat_chain(sb, p_dir, byte_offset, &clu);
+ if (ret)
+ return NULL;
+
+ /* byte offset in cluster */
+ byte_offset = EXFAT_CLU_OFFSET(byte_offset, sbi);
+
+ /* byte offset in sector */
+ off = EXFAT_BLK_OFFSET(byte_offset, sb);
+
+ /* sector offset in cluster */
+ sec = EXFAT_B_TO_BLK(byte_offset, sb);
+ sec += exfat_cluster_to_sector(sbi, clu);
+
+ bh = sb_bread(sb, sec);
+ if (!bh)
+ return NULL;
+
+ ep = (struct exfat_dentry *)(bh->b_data + off);
+ entry_type = exfat_get_entry_type(ep);
+
+ if (entry_type != TYPE_FILE && entry_type != TYPE_DIR)
+ goto release_bh;
+
+ num_entries = type == ES_ALL_ENTRIES ?
+ ep->dentry.file.num_ext + 1 : type;
+ es = kmalloc(struct_size(es, entries, num_entries), GFP_KERNEL);
+ if (!es)
+ goto release_bh;
+
+ es->num_entries = num_entries;
+ es->sector = sec;
+ es->offset = off;
+ es->alloc_flag = p_dir->flags;
+
+ pos = &es->entries[0];
+
+ while (num_entries) {
+ if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
+ goto free_es;
+
+ /* copy dentry */
+ memcpy(pos, ep, sizeof(struct exfat_dentry));
+
+ if (--num_entries == 0)
+ break;
+
+ if (((off + DENTRY_SIZE) & (sb->s_blocksize - 1)) <
+ (off & (sb->s_blocksize - 1))) {
+ /* get the next sector */
+ if (exfat_is_last_sector_in_cluster(sbi, sec)) {
+ if (es->alloc_flag == ALLOC_NO_FAT_CHAIN)
+ clu++;
+ else if (exfat_get_next_cluster(sb, &clu))
+ goto free_es;
+ sec = exfat_cluster_to_sector(sbi, clu);
+ } else {
+ sec++;
+ }
+
+ brelse(bh);
+ bh = sb_bread(sb, sec);
+ if (!bh)
+ goto free_es;
+ off = 0;
+ ep = (struct exfat_dentry *)bh->b_data;
+ } else {
+ ep++;
+ off += DENTRY_SIZE;
+ }
+ pos++;
+ }
+
+ if (file_ep)
+ *file_ep = &es->entries[0];
+ brelse(bh);
+ return es;
+
+free_es:
+ kfree(es);
+release_bh:
+ brelse(bh);
+ return NULL;
+}
+
+enum {
+ DIRENT_STEP_FILE,
+ DIRENT_STEP_STRM,
+ DIRENT_STEP_NAME,
+ DIRENT_STEP_SECD,
+};
+
+/*
+ * return values:
+ * >= 0 : return dir entiry position with the name in dir
+ * -EEXIST : (root dir, ".") it is the root dir itself
+ * -ENOENT : entry with the name does not exist
+ * -EIO : I/O error
+ */
+int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
+ struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
+ int num_entries, unsigned int type)
+{
+ int i, rewind = 0, dentry = 0, end_eidx = 0, num_ext = 0, len;
+ int order, step, name_len = 0;
+ int dentries_per_clu, num_empty = 0;
+ unsigned int entry_type;
+ unsigned short *uniname = NULL;
+ struct exfat_chain clu;
+ struct exfat_hint *hint_stat = &ei->hint_stat;
+ struct exfat_hint_femp candi_empty;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ dentries_per_clu = sbi->dentries_per_clu;
+
+ exfat_chain_dup(&clu, p_dir);
+
+ if (hint_stat->eidx) {
+ clu.dir = hint_stat->clu;
+ dentry = hint_stat->eidx;
+ end_eidx = dentry;
+ }
+
+ candi_empty.eidx = EXFAT_HINT_NONE;
+rewind:
+ order = 0;
+ step = DIRENT_STEP_FILE;
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ i = dentry & (dentries_per_clu - 1);
+ for (; i < dentries_per_clu; i++, dentry++) {
+ struct exfat_dentry *ep;
+ struct buffer_head *bh;
+
+ if (rewind && dentry == end_eidx)
+ goto not_found;
+
+ ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ if (!ep)
+ return -EIO;
+
+ entry_type = exfat_get_entry_type(ep);
+
+ if (entry_type == TYPE_UNUSED ||
+ entry_type == TYPE_DELETED) {
+ step = DIRENT_STEP_FILE;
+
+ num_empty++;
+ if (candi_empty.eidx == EXFAT_HINT_NONE &&
+ num_empty == 1) {
+ exfat_chain_set(&candi_empty.cur,
+ clu.dir, clu.size, clu.flags);
+ }
+
+ if (candi_empty.eidx == EXFAT_HINT_NONE &&
+ num_empty >= num_entries) {
+ candi_empty.eidx =
+ dentry - (num_empty - 1);
+ WARN_ON(candi_empty.eidx < 0);
+ candi_empty.count = num_empty;
+
+ if (ei->hint_femp.eidx ==
+ EXFAT_HINT_NONE ||
+ candi_empty.eidx <=
+ ei->hint_femp.eidx) {
+ memcpy(&ei->hint_femp,
+ &candi_empty,
+ sizeof(candi_empty));
+ }
+ }
+
+ brelse(bh);
+ if (entry_type == TYPE_UNUSED)
+ goto not_found;
+ continue;
+ }
+
+ num_empty = 0;
+ candi_empty.eidx = EXFAT_HINT_NONE;
+
+ if (entry_type == TYPE_FILE || entry_type == TYPE_DIR) {
+ step = DIRENT_STEP_FILE;
+ if (type == TYPE_ALL || type == entry_type) {
+ num_ext = ep->dentry.file.num_ext;
+ step = DIRENT_STEP_STRM;
+ }
+ brelse(bh);
+ continue;
+ }
+
+ if (entry_type == TYPE_STREAM) {
+ unsigned short name_hash;
+
+ if (step != DIRENT_STEP_STRM) {
+ step = DIRENT_STEP_FILE;
+ brelse(bh);
+ continue;
+ }
+ step = DIRENT_STEP_FILE;
+ name_hash = le16_to_cpu(
+ ep->dentry.stream.name_hash);
+ if (p_uniname->name_hash == name_hash &&
+ p_uniname->name_len ==
+ ep->dentry.stream.name_len) {
+ step = DIRENT_STEP_NAME;
+ order = 1;
+ name_len = 0;
+ }
+ brelse(bh);
+ continue;
+ }
+
+ brelse(bh);
+ if (entry_type == TYPE_EXTEND) {
+ unsigned short entry_uniname[16], unichar;
+
+ if (step != DIRENT_STEP_NAME) {
+ step = DIRENT_STEP_FILE;
+ continue;
+ }
+
+ if (++order == 2)
+ uniname = p_uniname->name;
+ else
+ uniname += EXFAT_FILE_NAME_LEN;
+
+ len = exfat_extract_uni_name(ep, entry_uniname);
+ name_len += len;
+
+ unichar = *(uniname+len);
+ *(uniname+len) = 0x0;
+
+ if (exfat_uniname_ncmp(sb, uniname,
+ entry_uniname, len)) {
+ step = DIRENT_STEP_FILE;
+ } else if (p_uniname->name_len == name_len) {
+ if (order == num_ext)
+ goto found;
+ step = DIRENT_STEP_SECD;
+ }
+
+ *(uniname+len) = unichar;
+ continue;
+ }
+
+ if (entry_type &
+ (TYPE_CRITICAL_SEC | TYPE_BENIGN_SEC)) {
+ if (step == DIRENT_STEP_SECD) {
+ if (++order == num_ext)
+ goto found;
+ continue;
+ }
+ }
+ step = DIRENT_STEP_FILE;
+ }
+
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ if (--clu.size > 0)
+ clu.dir++;
+ else
+ clu.dir = EXFAT_EOF_CLUSTER;
+ } else {
+ if (exfat_get_next_cluster(sb, &clu.dir))
+ return -EIO;
+ }
+ }
+
+not_found:
+ /*
+ * We started at not 0 index,so we should try to find target
+ * from 0 index to the index we started at.
+ */
+ if (!rewind && end_eidx) {
+ rewind = 1;
+ dentry = 0;
+ clu.dir = p_dir->dir;
+ /* reset empty hint */
+ num_empty = 0;
+ candi_empty.eidx = EXFAT_HINT_NONE;
+ goto rewind;
+ }
+
+ /* initialized hint_stat */
+ hint_stat->clu = p_dir->dir;
+ hint_stat->eidx = 0;
+ return -ENOENT;
+
+found:
+ /* next dentry we'll find is out of this cluster */
+ if (!((dentry + 1) & (dentries_per_clu - 1))) {
+ int ret = 0;
+
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ if (--clu.size > 0)
+ clu.dir++;
+ else
+ clu.dir = EXFAT_EOF_CLUSTER;
+ } else {
+ ret = exfat_get_next_cluster(sb, &clu.dir);
+ }
+
+ if (ret || clu.dir != EXFAT_EOF_CLUSTER) {
+ /* just initialized hint_stat */
+ hint_stat->clu = p_dir->dir;
+ hint_stat->eidx = 0;
+ return (dentry - num_ext);
+ }
+ }
+
+ hint_stat->clu = clu.dir;
+ hint_stat->eidx = dentry + 1;
+ return dentry - num_ext;
+}
+
+int exfat_count_ext_entries(struct super_block *sb, struct exfat_chain *p_dir,
+ int entry, struct exfat_dentry *ep)
+{
+ int i, count = 0;
+ unsigned int type;
+ struct exfat_dentry *ext_ep;
+ struct buffer_head *bh;
+
+ for (i = 0, entry++; i < ep->dentry.file.num_ext; i++, entry++) {
+ ext_ep = exfat_get_dentry(sb, p_dir, entry, &bh, NULL);
+ if (!ext_ep)
+ return -EIO;
+
+ type = exfat_get_entry_type(ext_ep);
+ brelse(bh);
+ if (type == TYPE_EXTEND || type == TYPE_STREAM)
+ count++;
+ else
+ break;
+ }
+ return count;
+}
+
+int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
+{
+ int i, count = 0;
+ int dentries_per_clu;
+ unsigned int entry_type;
+ struct exfat_chain clu;
+ struct exfat_dentry *ep;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct buffer_head *bh;
+
+ dentries_per_clu = sbi->dentries_per_clu;
+
+ exfat_chain_dup(&clu, p_dir);
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ if (!ep)
+ return -EIO;
+ entry_type = exfat_get_entry_type(ep);
+ brelse(bh);
+
+ if (entry_type == TYPE_UNUSED)
+ return count;
+ if (entry_type != TYPE_DIR)
+ continue;
+ count++;
+ }
+
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ if (--clu.size > 0)
+ clu.dir++;
+ else
+ clu.dir = EXFAT_EOF_CLUSTER;
+ } else {
+ if (exfat_get_next_cluster(sb, &(clu.dir)))
+ return -EIO;
+ }
+ }
+
+ return count;
+}
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
new file mode 100644
index 000000000000..67d4e46fb810
--- /dev/null
+++ b/fs/exfat/exfat_fs.h
@@ -0,0 +1,519 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef _EXFAT_FS_H
+#define _EXFAT_FS_H
+
+#include <linux/fs.h>
+#include <linux/ratelimit.h>
+#include <linux/nls.h>
+
+#define EXFAT_SUPER_MAGIC 0x2011BAB0UL
+#define EXFAT_ROOT_INO 1
+
+#define EXFAT_SB_DIRTY 0
+
+#define EXFAT_CLUSTERS_UNTRACKED (~0u)
+
+/*
+ * exfat error flags
+ */
+enum exfat_error_mode {
+ EXFAT_ERRORS_CONT, /* ignore error and continue */
+ EXFAT_ERRORS_PANIC, /* panic on error */
+ EXFAT_ERRORS_RO, /* remount r/o on error */
+};
+
+/*
+ * exfat nls lossy flag
+ */
+enum {
+ NLS_NAME_NO_LOSSY, /* no lossy */
+ NLS_NAME_LOSSY, /* just detected incorrect filename(s) */
+ NLS_NAME_OVERLEN, /* the length is over than its limit */
+};
+
+#define EXFAT_HASH_BITS 8
+#define EXFAT_HASH_SIZE (1UL << EXFAT_HASH_BITS)
+
+/*
+ * Type Definitions
+ */
+#define ES_2_ENTRIES 2
+#define ES_ALL_ENTRIES 0
+
+#define DIR_DELETED 0xFFFF0321
+
+/* type values */
+#define TYPE_UNUSED 0x0000
+#define TYPE_DELETED 0x0001
+#define TYPE_INVALID 0x0002
+#define TYPE_CRITICAL_PRI 0x0100
+#define TYPE_BITMAP 0x0101
+#define TYPE_UPCASE 0x0102
+#define TYPE_VOLUME 0x0103
+#define TYPE_DIR 0x0104
+#define TYPE_FILE 0x011F
+#define TYPE_CRITICAL_SEC 0x0200
+#define TYPE_STREAM 0x0201
+#define TYPE_EXTEND 0x0202
+#define TYPE_ACL 0x0203
+#define TYPE_BENIGN_PRI 0x0400
+#define TYPE_GUID 0x0401
+#define TYPE_PADDING 0x0402
+#define TYPE_ACLTAB 0x0403
+#define TYPE_BENIGN_SEC 0x0800
+#define TYPE_ALL 0x0FFF
+
+#define MAX_CHARSET_SIZE 6 /* max size of multi-byte character */
+#define MAX_NAME_LENGTH 255 /* max len of file name excluding NULL */
+#define MAX_VFSNAME_BUF_SIZE ((MAX_NAME_LENGTH + 1) * MAX_CHARSET_SIZE)
+
+#define FAT_CACHE_SIZE 128
+#define FAT_CACHE_HASH_SIZE 64
+#define BUF_CACHE_SIZE 256
+#define BUF_CACHE_HASH_SIZE 64
+
+#define EXFAT_HINT_NONE -1
+#define EXFAT_MIN_SUBDIR 2
+
+/*
+ * helpers for cluster size to byte conversion.
+ */
+#define EXFAT_CLU_TO_B(b, sbi) ((b) << (sbi)->cluster_size_bits)
+#define EXFAT_B_TO_CLU(b, sbi) ((b) >> (sbi)->cluster_size_bits)
+#define EXFAT_B_TO_CLU_ROUND_UP(b, sbi) \
+ (((b - 1) >> (sbi)->cluster_size_bits) + 1)
+#define EXFAT_CLU_OFFSET(off, sbi) ((off) & ((sbi)->cluster_size - 1))
+
+/*
+ * helpers for block size to byte conversion.
+ */
+#define EXFAT_BLK_TO_B(b, sb) ((b) << (sb)->s_blocksize_bits)
+#define EXFAT_B_TO_BLK(b, sb) ((b) >> (sb)->s_blocksize_bits)
+#define EXFAT_B_TO_BLK_ROUND_UP(b, sb) \
+ (((b - 1) >> (sb)->s_blocksize_bits) + 1)
+#define EXFAT_BLK_OFFSET(off, sb) ((off) & ((sb)->s_blocksize - 1))
+
+/*
+ * helpers for block size to dentry size conversion.
+ */
+#define EXFAT_B_TO_DEN_IDX(b, sbi) \
+ ((b) << ((sbi)->cluster_size_bits - DENTRY_SIZE_BITS))
+#define EXFAT_B_TO_DEN(b) ((b) >> DENTRY_SIZE_BITS)
+#define EXFAT_DEN_TO_B(b) ((b) << DENTRY_SIZE_BITS)
+
+/*
+ * helpers for fat entry.
+ */
+#define FAT_ENT_SIZE (4)
+#define FAT_ENT_SIZE_BITS (2)
+#define FAT_ENT_OFFSET_SECTOR(sb, loc) (EXFAT_SB(sb)->FAT1_start_sector + \
+ (((u64)loc << FAT_ENT_SIZE_BITS) >> sb->s_blocksize_bits))
+#define FAT_ENT_OFFSET_BYTE_IN_SECTOR(sb, loc) \
+ ((loc << FAT_ENT_SIZE_BITS) & (sb->s_blocksize - 1))
+
+/*
+ * helpers for bitmap.
+ */
+#define CLUSTER_TO_BITMAP_ENT(clu) ((clu) - EXFAT_RESERVED_CLUSTERS)
+#define BITMAP_ENT_TO_CLUSTER(ent) ((ent) + EXFAT_RESERVED_CLUSTERS)
+#define BITS_PER_SECTOR(sb) ((sb)->s_blocksize * BITS_PER_BYTE)
+#define BITS_PER_SECTOR_MASK(sb) (BITS_PER_SECTOR(sb) - 1)
+#define BITMAP_OFFSET_SECTOR_INDEX(sb, ent) \
+ ((ent / BITS_PER_BYTE) >> (sb)->s_blocksize_bits)
+#define BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent) (ent & BITS_PER_SECTOR_MASK(sb))
+#define BITMAP_OFFSET_BYTE_IN_SECTOR(sb, ent) \
+ ((ent / BITS_PER_BYTE) & ((sb)->s_blocksize - 1))
+#define BITS_PER_BYTE_MASK 0x7
+#define IGNORED_BITS_REMAINED(clu, clu_base) ((1 << ((clu) - (clu_base))) - 1)
+
+struct exfat_dentry_namebuf {
+ char *lfn;
+ int lfnbuf_len; /* usally MAX_UNINAME_BUF_SIZE */
+};
+
+/* unicode name structure */
+struct exfat_uni_name {
+ /* +3 for null and for converting */
+ unsigned short name[MAX_NAME_LENGTH + 3];
+ unsigned short name_hash;
+ unsigned char name_len;
+};
+
+/* directory structure */
+struct exfat_chain {
+ unsigned int dir;
+ unsigned int size;
+ unsigned char flags;
+};
+
+/* first empty entry hint information */
+struct exfat_hint_femp {
+ /* entry index of a directory */
+ int eidx;
+ /* count of continuous empty entry */
+ int count;
+ /* the cluster that first empty slot exists in */
+ struct exfat_chain cur;
+};
+
+/* hint structure */
+struct exfat_hint {
+ unsigned int clu;
+ union {
+ unsigned int off; /* cluster offset */
+ int eidx; /* entry index */
+ };
+};
+
+struct exfat_entry_set_cache {
+ /* sector number that contains file_entry */
+ sector_t sector;
+ /* byte offset in the sector */
+ unsigned int offset;
+ /* flag in stream entry. 01 for cluster chain, 03 for contig. */
+ int alloc_flag;
+ unsigned int num_entries;
+ struct exfat_dentry entries[];
+};
+
+struct exfat_dir_entry {
+ struct exfat_chain dir;
+ int entry;
+ unsigned int type;
+ unsigned int start_clu;
+ unsigned char flags;
+ unsigned short attr;
+ loff_t size;
+ unsigned int num_subdirs;
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ struct timespec64 crtime;
+ struct exfat_dentry_namebuf namebuf;
+};
+
+/*
+ * exfat mount in-memory data
+ */
+struct exfat_mount_options {
+ kuid_t fs_uid;
+ kgid_t fs_gid;
+ unsigned short fs_fmask;
+ unsigned short fs_dmask;
+ /* permission for setting the [am]time */
+ unsigned short allow_utime;
+ /* charset for filename input/display */
+ char *iocharset;
+ /* on error: continue, panic, remount-ro */
+ enum exfat_error_mode errors;
+ unsigned utf8:1, /* Use of UTF-8 character set */
+ discard:1; /* Issue discard requests on deletions */
+ int time_offset; /* Offset of timestamps from UTC (in minutes) */
+};
+
+/*
+ * EXFAT file system superblock in-memory data
+ */
+struct exfat_sb_info {
+ unsigned long long num_sectors; /* num of sectors in volume */
+ unsigned int num_clusters; /* num of clusters in volume */
+ unsigned int cluster_size; /* cluster size in bytes */
+ unsigned int cluster_size_bits;
+ unsigned int sect_per_clus; /* cluster size in sectors */
+ unsigned int sect_per_clus_bits;
+ unsigned long long FAT1_start_sector; /* FAT1 start sector */
+ unsigned long long FAT2_start_sector; /* FAT2 start sector */
+ unsigned long long data_start_sector; /* data area start sector */
+ unsigned int num_FAT_sectors; /* num of FAT sectors */
+ unsigned int root_dir; /* root dir cluster */
+ unsigned int dentries_per_clu; /* num of dentries per cluster */
+ unsigned int vol_flag; /* volume dirty flag */
+ struct buffer_head *pbr_bh; /* buffer_head of PBR sector */
+
+ unsigned int map_clu; /* allocation bitmap start cluster */
+ unsigned int map_sectors; /* num of allocation bitmap sectors */
+ struct buffer_head **vol_amap; /* allocation bitmap */
+
+ unsigned short *vol_utbl; /* upcase table */
+
+ unsigned int clu_srch_ptr; /* cluster search pointer */
+ unsigned int used_clusters; /* number of used clusters */
+
+ unsigned long s_state;
+ struct mutex s_lock; /* superblock lock */
+ struct exfat_mount_options options;
+ struct nls_table *nls_io; /* Charset used for input and display */
+ struct ratelimit_state ratelimit;
+
+ spinlock_t inode_hash_lock;
+ struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
+
+ struct rcu_head rcu;
+};
+
+/*
+ * EXFAT file system inode in-memory data
+ */
+struct exfat_inode_info {
+ struct exfat_chain dir;
+ int entry;
+ unsigned int type;
+ unsigned short attr;
+ unsigned int start_clu;
+ unsigned char flags;
+ /*
+ * the copy of low 32bit of i_version to check
+ * the validation of hint_stat.
+ */
+ unsigned int version;
+ /* file offset or dentry index for readdir */
+ loff_t rwoffset;
+
+ /* hint for cluster last accessed */
+ struct exfat_hint hint_bmap;
+ /* hint for entry index we try to lookup next time */
+ struct exfat_hint hint_stat;
+ /* hint for first empty entry */
+ struct exfat_hint_femp hint_femp;
+
+ spinlock_t cache_lru_lock;
+ struct list_head cache_lru;
+ int nr_caches;
+ /* for avoiding the race between alloc and free */
+ unsigned int cache_valid_id;
+
+ /*
+ * NOTE: i_size_ondisk is 64bits, so must hold ->inode_lock to access.
+ * physically allocated size.
+ */
+ loff_t i_size_ondisk;
+ /* block-aligned i_size (used in cont_write_begin) */
+ loff_t i_size_aligned;
+ /* on-disk position of directory entry or 0 */
+ loff_t i_pos;
+ /* hash by i_location */
+ struct hlist_node i_hash_fat;
+ /* protect bmap against truncate */
+ struct rw_semaphore truncate_lock;
+ struct inode vfs_inode;
+ /* File creation time */
+ struct timespec64 i_crtime;
+};
+
+static inline struct exfat_sb_info *EXFAT_SB(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
+{
+ return container_of(inode, struct exfat_inode_info, vfs_inode);
+}
+
+/*
+ * If ->i_mode can't hold 0222 (i.e. ATTR_RO), we use ->i_attrs to
+ * save ATTR_RO instead of ->i_mode.
+ *
+ * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only
+ * bit, it's just used as flag for app.
+ */
+static inline int exfat_mode_can_hold_ro(struct inode *inode)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+ if (S_ISDIR(inode->i_mode))
+ return 0;
+
+ if ((~sbi->options.fs_fmask) & 0222)
+ return 1;
+ return 0;
+}
+
+/* Convert attribute bits and a mask to the UNIX mode. */
+static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi,
+ unsigned short attr, mode_t mode)
+{
+ if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR))
+ mode &= ~0222;
+
+ if (attr & ATTR_SUBDIR)
+ return (mode & ~sbi->options.fs_dmask) | S_IFDIR;
+
+ return (mode & ~sbi->options.fs_fmask) | S_IFREG;
+}
+
+/* Return the FAT attribute byte for this inode */
+static inline unsigned short exfat_make_attr(struct inode *inode)
+{
+ unsigned short attr = EXFAT_I(inode)->attr;
+
+ if (S_ISDIR(inode->i_mode))
+ attr |= ATTR_SUBDIR;
+ if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & 0222))
+ attr |= ATTR_READONLY;
+ return attr;
+}
+
+static inline void exfat_save_attr(struct inode *inode, unsigned short attr)
+{
+ if (exfat_mode_can_hold_ro(inode))
+ EXFAT_I(inode)->attr = attr & (ATTR_RWMASK | ATTR_READONLY);
+ else
+ EXFAT_I(inode)->attr = attr & ATTR_RWMASK;
+}
+
+static inline bool exfat_is_last_sector_in_cluster(struct exfat_sb_info *sbi,
+ sector_t sec)
+{
+ return ((sec - sbi->data_start_sector + 1) &
+ ((1 << sbi->sect_per_clus_bits) - 1)) == 0;
+}
+
+static inline sector_t exfat_cluster_to_sector(struct exfat_sb_info *sbi,
+ unsigned int clus)
+{
+ return ((clus - EXFAT_RESERVED_CLUSTERS) << sbi->sect_per_clus_bits) +
+ sbi->data_start_sector;
+}
+
+static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi,
+ sector_t sec)
+{
+ return ((sec - sbi->data_start_sector) >> sbi->sect_per_clus_bits) +
+ EXFAT_RESERVED_CLUSTERS;
+}
+
+/* super.c */
+int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag);
+
+/* fatent.c */
+#define exfat_get_next_cluster(sb, pclu) exfat_ent_get(sb, *(pclu), pclu)
+
+int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
+ struct exfat_chain *p_chain);
+int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain);
+int exfat_ent_get(struct super_block *sb, unsigned int loc,
+ unsigned int *content);
+int exfat_ent_set(struct super_block *sb, unsigned int loc,
+ unsigned int content);
+int exfat_count_ext_entries(struct super_block *sb, struct exfat_chain *p_dir,
+ int entry, struct exfat_dentry *p_entry);
+int exfat_chain_cont_cluster(struct super_block *sb, unsigned int chain,
+ unsigned int len);
+int exfat_zeroed_cluster(struct inode *dir, unsigned int clu);
+int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain,
+ unsigned int *ret_clu);
+int exfat_count_num_clusters(struct super_block *sb,
+ struct exfat_chain *p_chain, unsigned int *ret_count);
+
+/* balloc.c */
+int exfat_load_bitmap(struct super_block *sb);
+void exfat_free_bitmap(struct exfat_sb_info *sbi);
+int exfat_set_bitmap(struct inode *inode, unsigned int clu);
+void exfat_clear_bitmap(struct inode *inode, unsigned int clu);
+unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
+int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);
+
+/* file.c */
+extern const struct file_operations exfat_file_operations;
+int __exfat_truncate(struct inode *inode, loff_t new_size);
+void exfat_truncate(struct inode *inode, loff_t size);
+int exfat_setattr(struct dentry *dentry, struct iattr *attr);
+int exfat_getattr(const struct path *path, struct kstat *stat,
+ unsigned int request_mask, unsigned int query_flags);
+
+/* namei.c */
+extern const struct dentry_operations exfat_dentry_ops;
+extern const struct dentry_operations exfat_utf8_dentry_ops;
+
+/* cache.c */
+int exfat_cache_init(void);
+void exfat_cache_shutdown(void);
+void exfat_cache_init_inode(struct inode *inode);
+void exfat_cache_inval_inode(struct inode *inode);
+int exfat_get_cluster(struct inode *inode, unsigned int cluster,
+ unsigned int *fclus, unsigned int *dclus,
+ unsigned int *last_dclus, int allow_eof);
+
+/* dir.c */
+extern const struct inode_operations exfat_dir_inode_operations;
+extern const struct file_operations exfat_dir_operations;
+unsigned int exfat_get_entry_type(struct exfat_dentry *p_entry);
+int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
+ int entry, unsigned int type, unsigned int start_clu,
+ unsigned long long size);
+int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
+ int entry, int num_entries, struct exfat_uni_name *p_uniname);
+int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
+ int entry, int order, int num_entries);
+int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
+ int entry);
+int exfat_update_dir_chksum_with_entry_set(struct super_block *sb,
+ struct exfat_entry_set_cache *es, int sync);
+int exfat_calc_num_entries(struct exfat_uni_name *p_uniname);
+int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
+ struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
+ int num_entries, unsigned int type);
+int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu);
+int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
+ int entry, sector_t *sector, int *offset);
+struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
+ struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
+ sector_t *sector);
+struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
+ struct exfat_chain *p_dir, int entry, unsigned int type,
+ struct exfat_dentry **file_ep);
+int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir);
+
+/* inode.c */
+extern const struct inode_operations exfat_file_inode_operations;
+void exfat_sync_inode(struct inode *inode);
+struct inode *exfat_build_inode(struct super_block *sb,
+ struct exfat_dir_entry *info, loff_t i_pos);
+void exfat_hash_inode(struct inode *inode, loff_t i_pos);
+void exfat_unhash_inode(struct inode *inode);
+struct inode *exfat_iget(struct super_block *sb, loff_t i_pos);
+int exfat_write_inode(struct inode *inode, struct writeback_control *wbc);
+void exfat_evict_inode(struct inode *inode);
+int exfat_block_truncate_page(struct inode *inode, loff_t from);
+
+/* exfat/nls.c */
+unsigned short exfat_toupper(struct super_block *sb, unsigned short a);
+int exfat_uniname_ncmp(struct super_block *sb, unsigned short *a,
+ unsigned short *b, unsigned int len);
+int exfat_utf16_to_nls(struct super_block *sb,
+ struct exfat_uni_name *uniname, unsigned char *p_cstring,
+ int len);
+int exfat_nls_to_utf16(struct super_block *sb,
+ const unsigned char *p_cstring, const int len,
+ struct exfat_uni_name *uniname, int *p_lossy);
+int exfat_create_upcase_table(struct super_block *sb);
+void exfat_free_upcase_table(struct exfat_sb_info *sbi);
+unsigned short exfat_high_surrogate(unicode_t u);
+unsigned short exfat_low_surrogate(unicode_t u);
+
+/* exfat/misc.c */
+void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
+ __printf(3, 4) __cold;
+#define exfat_fs_error(sb, fmt, args...) \
+ __exfat_fs_error(sb, 1, fmt, ## args)
+#define exfat_fs_error_ratelimit(sb, fmt, args...) \
+ __exfat_fs_error(sb, __ratelimit(&EXFAT_SB(sb)->ratelimit), \
+ fmt, ## args)
+void exfat_msg(struct super_block *sb, const char *lv, const char *fmt, ...)
+ __printf(3, 4) __cold;
+void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
+ u8 tz, __le16 time, __le16 date, u8 time_ms);
+void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
+ u8 *tz, __le16 *time, __le16 *date, u8 *time_ms);
+unsigned short exfat_calc_chksum_2byte(void *data, int len,
+ unsigned short chksum, int type);
+void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync);
+void exfat_chain_set(struct exfat_chain *ec, unsigned int dir,
+ unsigned int size, unsigned char flags);
+void exfat_chain_dup(struct exfat_chain *dup, struct exfat_chain *ec);
+
+#endif /* !_EXFAT_FS_H */
diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h
new file mode 100644
index 000000000000..2a841010e649
--- /dev/null
+++ b/fs/exfat/exfat_raw.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef _EXFAT_RAW_H
+#define _EXFAT_RAW_H
+
+#include <linux/types.h>
+
+#define PBR_SIGNATURE 0xAA55
+
+#define EXFAT_MAX_FILE_LEN 255
+
+#define VOL_CLEAN 0x0000
+#define VOL_DIRTY 0x0002
+
+#define EXFAT_EOF_CLUSTER 0xFFFFFFFFu
+#define EXFAT_BAD_CLUSTER 0xFFFFFFF7u
+#define EXFAT_FREE_CLUSTER 0
+/* Cluster 0, 1 are reserved, the first cluster is 2 in the cluster heap. */
+#define EXFAT_RESERVED_CLUSTERS 2
+#define EXFAT_FIRST_CLUSTER 2
+#define EXFAT_DATA_CLUSTER_COUNT(sbi) \
+ ((sbi)->num_clusters - EXFAT_RESERVED_CLUSTERS)
+
+/* AllocationPossible and NoFatChain field in GeneralSecondaryFlags Field */
+#define ALLOC_FAT_CHAIN 0x01
+#define ALLOC_NO_FAT_CHAIN 0x03
+
+#define DENTRY_SIZE 32 /* directory entry size */
+#define DENTRY_SIZE_BITS 5
+/* exFAT allows 8388608(256MB) directory entries */
+#define MAX_EXFAT_DENTRIES 8388608
+
+/* dentry types */
+#define EXFAT_UNUSED 0x00 /* end of directory */
+#define EXFAT_DELETE (~0x80)
+#define IS_EXFAT_DELETED(x) ((x) < 0x80) /* deleted file (0x01~0x7F) */
+#define EXFAT_INVAL 0x80 /* invalid value */
+#define EXFAT_BITMAP 0x81 /* allocation bitmap */
+#define EXFAT_UPCASE 0x82 /* upcase table */
+#define EXFAT_VOLUME 0x83 /* volume label */
+#define EXFAT_FILE 0x85 /* file or dir */
+#define EXFAT_GUID 0xA0
+#define EXFAT_PADDING 0xA1
+#define EXFAT_ACLTAB 0xA2
+#define EXFAT_STREAM 0xC0 /* stream entry */
+#define EXFAT_NAME 0xC1 /* file name entry */
+#define EXFAT_ACL 0xC2 /* stream entry */
+
+#define IS_EXFAT_CRITICAL_PRI(x) (x < 0xA0)
+#define IS_EXFAT_BENIGN_PRI(x) (x < 0xC0)
+#define IS_EXFAT_CRITICAL_SEC(x) (x < 0xE0)
+
+/* checksum types */
+#define CS_DIR_ENTRY 0
+#define CS_PBR_SECTOR 1
+#define CS_DEFAULT 2
+
+/* file attributes */
+#define ATTR_READONLY 0x0001
+#define ATTR_HIDDEN 0x0002
+#define ATTR_SYSTEM 0x0004
+#define ATTR_VOLUME 0x0008
+#define ATTR_SUBDIR 0x0010
+#define ATTR_ARCHIVE 0x0020
+
+#define ATTR_RWMASK (ATTR_HIDDEN | ATTR_SYSTEM | ATTR_VOLUME | \
+ ATTR_SUBDIR | ATTR_ARCHIVE)
+
+#define PBR64_JUMP_BOOT_LEN 3
+#define PBR64_OEM_NAME_LEN 8
+#define PBR64_RESERVED_LEN 53
+
+#define EXFAT_FILE_NAME_LEN 15
+
+/* EXFAT BIOS parameter block (64 bytes) */
+struct bpb64 {
+ __u8 jmp_boot[PBR64_JUMP_BOOT_LEN];
+ __u8 oem_name[PBR64_OEM_NAME_LEN];
+ __u8 res_zero[PBR64_RESERVED_LEN];
+} __packed;
+
+/* EXFAT EXTEND BIOS parameter block (56 bytes) */
+struct bsx64 {
+ __le64 vol_offset;
+ __le64 vol_length;
+ __le32 fat_offset;
+ __le32 fat_length;
+ __le32 clu_offset;
+ __le32 clu_count;
+ __le32 root_cluster;
+ __le32 vol_serial;
+ __u8 fs_version[2];
+ __le16 vol_flags;
+ __u8 sect_size_bits;
+ __u8 sect_per_clus_bits;
+ __u8 num_fats;
+ __u8 phy_drv_no;
+ __u8 perc_in_use;
+ __u8 reserved2[7];
+} __packed;
+
+/* EXFAT PBR[BPB+BSX] (120 bytes) */
+struct pbr64 {
+ struct bpb64 bpb;
+ struct bsx64 bsx;
+} __packed;
+
+/* Common PBR[Partition Boot Record] (512 bytes) */
+struct pbr {
+ union {
+ __u8 raw[64];
+ struct bpb64 f64;
+ } bpb;
+ union {
+ __u8 raw[56];
+ struct bsx64 f64;
+ } bsx;
+ __u8 boot_code[390];
+ __le16 signature;
+} __packed;
+
+struct exfat_dentry {
+ __u8 type;
+ union {
+ struct {
+ __u8 num_ext;
+ __le16 checksum;
+ __le16 attr;
+ __le16 reserved1;
+ __le16 create_time;
+ __le16 create_date;
+ __le16 modify_time;
+ __le16 modify_date;
+ __le16 access_time;
+ __le16 access_date;
+ __u8 create_time_ms;
+ __u8 modify_time_ms;
+ __u8 create_tz;
+ __u8 modify_tz;
+ __u8 access_tz;
+ __u8 reserved2[7];
+ } __packed file; /* file directory entry */
+ struct {
+ __u8 flags;
+ __u8 reserved1;
+ __u8 name_len;
+ __le16 name_hash;
+ __le16 reserved2;
+ __le64 valid_size;
+ __le32 reserved3;
+ __le32 start_clu;
+ __le64 size;
+ } __packed stream; /* stream extension directory entry */
+ struct {
+ __u8 flags;
+ __le16 unicode_0_14[EXFAT_FILE_NAME_LEN];
+ } __packed name; /* file name directory entry */
+ struct {
+ __u8 flags;
+ __u8 reserved[18];
+ __le32 start_clu;
+ __le64 size;
+ } __packed bitmap; /* allocation bitmap directory entry */
+ struct {
+ __u8 reserved1[3];
+ __le32 checksum;
+ __u8 reserved2[12];
+ __le32 start_clu;
+ __le64 size;
+ } __packed upcase; /* up-case table directory entry */
+ } __packed dentry;
+} __packed;
+
+#define EXFAT_TZ_VALID (1 << 7)
+
+/* Jan 1 GMT 00:00:00 1980 */
+#define EXFAT_MIN_TIMESTAMP_SECS 315532800LL
+/* Dec 31 GMT 23:59:59 2107 */
+#define EXFAT_MAX_TIMESTAMP_SECS 4354819199LL
+
+#endif /* !_EXFAT_RAW_H */
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
new file mode 100644
index 000000000000..a855b1769a96
--- /dev/null
+++ b/fs/exfat/fatent.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/buffer_head.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+static int exfat_mirror_bh(struct super_block *sb, sector_t sec,
+ struct buffer_head *bh)
+{
+ struct buffer_head *c_bh;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ sector_t sec2;
+ int err = 0;
+
+ if (sbi->FAT2_start_sector != sbi->FAT1_start_sector) {
+ sec2 = sec - sbi->FAT1_start_sector + sbi->FAT2_start_sector;
+ c_bh = sb_getblk(sb, sec2);
+ if (!c_bh)
+ return -ENOMEM;
+ memcpy(c_bh->b_data, bh->b_data, sb->s_blocksize);
+ set_buffer_uptodate(c_bh);
+ mark_buffer_dirty(c_bh);
+ if (sb->s_flags & SB_SYNCHRONOUS)
+ err = sync_dirty_buffer(c_bh);
+ brelse(c_bh);
+ }
+
+ return err;
+}
+
+static int __exfat_ent_get(struct super_block *sb, unsigned int loc,
+ unsigned int *content)
+{
+ unsigned int off;
+ sector_t sec;
+ struct buffer_head *bh;
+
+ sec = FAT_ENT_OFFSET_SECTOR(sb, loc);
+ off = FAT_ENT_OFFSET_BYTE_IN_SECTOR(sb, loc);
+
+ bh = sb_bread(sb, sec);
+ if (!bh)
+ return -EIO;
+
+ *content = le32_to_cpu(*(__le32 *)(&bh->b_data[off]));
+
+ /* remap reserved clusters to simplify code */
+ if (*content > EXFAT_BAD_CLUSTER)
+ *content = EXFAT_EOF_CLUSTER;
+
+ brelse(bh);
+ return 0;
+}
+
+int exfat_ent_set(struct super_block *sb, unsigned int loc,
+ unsigned int content)
+{
+ unsigned int off;
+ sector_t sec;
+ __le32 *fat_entry;
+ struct buffer_head *bh;
+
+ sec = FAT_ENT_OFFSET_SECTOR(sb, loc);
+ off = FAT_ENT_OFFSET_BYTE_IN_SECTOR(sb, loc);
+
+ bh = sb_bread(sb, sec);
+ if (!bh)
+ return -EIO;
+
+ fat_entry = (__le32 *)&(bh->b_data[off]);
+ *fat_entry = cpu_to_le32(content);
+ exfat_update_bh(sb, bh, sb->s_flags & SB_SYNCHRONOUS);
+ exfat_mirror_bh(sb, sec, bh);
+ brelse(bh);
+ return 0;
+}
+
+static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
+ unsigned int clus)
+{
+ if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
+ return false;
+ return true;
+}
+
+int exfat_ent_get(struct super_block *sb, unsigned int loc,
+ unsigned int *content)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ int err;
+
+ if (!is_valid_cluster(sbi, loc)) {
+ exfat_fs_error(sb, "invalid access to FAT (entry 0x%08x)",
+ loc);
+ return -EIO;
+ }
+
+ err = __exfat_ent_get(sb, loc, content);
+ if (err) {
+ exfat_fs_error(sb,
+ "failed to access to FAT (entry 0x%08x, err:%d)",
+ loc, err);
+ return err;
+ }
+
+ if (*content == EXFAT_FREE_CLUSTER) {
+ exfat_fs_error(sb,
+ "invalid access to FAT free cluster (entry 0x%08x)",
+ loc);
+ return -EIO;
+ }
+
+ if (*content == EXFAT_BAD_CLUSTER) {
+ exfat_fs_error(sb,
+ "invalid access to FAT bad cluster (entry 0x%08x)",
+ loc);
+ return -EIO;
+ }
+
+ if (*content != EXFAT_EOF_CLUSTER && !is_valid_cluster(sbi, *content)) {
+ exfat_fs_error(sb,
+ "invalid access to FAT (entry 0x%08x) bogus content (0x%08x)",
+ loc, *content);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int exfat_chain_cont_cluster(struct super_block *sb, unsigned int chain,
+ unsigned int len)
+{
+ if (!len)
+ return 0;
+
+ while (len > 1) {
+ if (exfat_ent_set(sb, chain, chain + 1))
+ return -EIO;
+ chain++;
+ len--;
+ }
+
+ if (exfat_ent_set(sb, chain, EXFAT_EOF_CLUSTER))
+ return -EIO;
+ return 0;
+}
+
+int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
+{
+ unsigned int num_clusters = 0;
+ unsigned int clu;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ /* invalid cluster number */
+ if (p_chain->dir == EXFAT_FREE_CLUSTER ||
+ p_chain->dir == EXFAT_EOF_CLUSTER ||
+ p_chain->dir < EXFAT_FIRST_CLUSTER)
+ return 0;
+
+ /* no cluster to truncate */
+ if (p_chain->size == 0)
+ return 0;
+
+ /* check cluster validation */
+ if (p_chain->dir < 2 && p_chain->dir >= sbi->num_clusters) {
+ exfat_msg(sb, KERN_ERR, "invalid start cluster (%u)",
+ p_chain->dir);
+ return -EIO;
+ }
+
+ set_bit(EXFAT_SB_DIRTY, &sbi->s_state);
+ clu = p_chain->dir;
+
+ if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ do {
+ exfat_clear_bitmap(inode, clu);
+ clu++;
+
+ num_clusters++;
+ } while (num_clusters < p_chain->size);
+ } else {
+ do {
+ exfat_clear_bitmap(inode, clu);
+
+ if (exfat_get_next_cluster(sb, &clu))
+ goto dec_used_clus;
+
+ num_clusters++;
+ } while (clu != EXFAT_EOF_CLUSTER);
+ }
+
+dec_used_clus:
+ sbi->used_clusters -= num_clusters;
+ return 0;
+}
+
+int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain,
+ unsigned int *ret_clu)
+{
+ unsigned int clu, next;
+ unsigned int count = 0;
+
+ next = p_chain->dir;
+ if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ *ret_clu = next + p_chain->size - 1;
+ return 0;
+ }
+
+ do {
+ count++;
+ clu = next;
+ if (exfat_ent_get(sb, clu, &next))
+ return -EIO;
+ } while (next != EXFAT_EOF_CLUSTER);
+
+ if (p_chain->size != count) {
+ exfat_fs_error(sb,
+ "bogus directory size (clus : ondisk(%d) != counted(%d))",
+ p_chain->size, count);
+ return -EIO;
+ }
+
+ *ret_clu = clu;
+ return 0;
+}
+
+static inline int exfat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
+{
+ int i, err = 0;
+
+ for (i = 0; i < nr_bhs; i++)
+ write_dirty_buffer(bhs[i], 0);
+
+ for (i = 0; i < nr_bhs; i++) {
+ wait_on_buffer(bhs[i]);
+ if (!err && !buffer_uptodate(bhs[i]))
+ err = -EIO;
+ }
+ return err;
+}
+
+int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
+{
+ struct super_block *sb = dir->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct buffer_head *bhs[MAX_BUF_PER_PAGE];
+ int nr_bhs = MAX_BUF_PER_PAGE;
+ sector_t blknr, last_blknr;
+ int err, i, n;
+
+ blknr = exfat_cluster_to_sector(sbi, clu);
+ last_blknr = blknr + sbi->sect_per_clus;
+
+ if (last_blknr > sbi->num_sectors && sbi->num_sectors > 0) {
+ exfat_fs_error_ratelimit(sb,
+ "%s: out of range(sect:%llu len:%u)",
+ __func__, (unsigned long long)blknr,
+ sbi->sect_per_clus);
+ return -EIO;
+ }
+
+ /* Zeroing the unused blocks on this cluster */
+ n = 0;
+ while (blknr < last_blknr) {
+ bhs[n] = sb_getblk(sb, blknr);
+ if (!bhs[n]) {
+ err = -ENOMEM;
+ goto release_bhs;
+ }
+ memset(bhs[n]->b_data, 0, sb->s_blocksize);
+ exfat_update_bh(sb, bhs[n], 0);
+
+ n++;
+ blknr++;
+
+ if (n == nr_bhs) {
+ if (IS_DIRSYNC(dir)) {
+ err = exfat_sync_bhs(bhs, n);
+ if (err)
+ goto release_bhs;
+ }
+
+ for (i = 0; i < n; i++)
+ brelse(bhs[i]);
+ n = 0;
+ }
+ }
+
+ if (IS_DIRSYNC(dir)) {
+ err = exfat_sync_bhs(bhs, n);
+ if (err)
+ goto release_bhs;
+ }
+
+ for (i = 0; i < n; i++)
+ brelse(bhs[i]);
+
+ return 0;
+
+release_bhs:
+ exfat_msg(sb, KERN_ERR, "failed zeroed sect %llu\n",
+ (unsigned long long)blknr);
+ for (i = 0; i < n; i++)
+ bforget(bhs[i]);
+ return err;
+}
+
+int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
+ struct exfat_chain *p_chain)
+{
+ int ret = -ENOSPC;
+ unsigned int num_clusters = 0, total_cnt;
+ unsigned int hint_clu, new_clu, last_clu = EXFAT_EOF_CLUSTER;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ total_cnt = EXFAT_DATA_CLUSTER_COUNT(sbi);
+
+ if (unlikely(total_cnt < sbi->used_clusters)) {
+ exfat_fs_error_ratelimit(sb,
+ "%s: invalid used clusters(t:%u,u:%u)\n",
+ __func__, total_cnt, sbi->used_clusters);
+ return -EIO;
+ }
+
+ if (num_alloc > total_cnt - sbi->used_clusters)
+ return -ENOSPC;
+
+ hint_clu = p_chain->dir;
+ /* find new cluster */
+ if (hint_clu == EXFAT_EOF_CLUSTER) {
+ if (sbi->clu_srch_ptr < EXFAT_FIRST_CLUSTER) {
+ exfat_msg(sb, KERN_ERR,
+ "sbi->clu_srch_ptr is invalid (%u)\n",
+ sbi->clu_srch_ptr);
+ sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
+ }
+
+ hint_clu = exfat_find_free_bitmap(sb, sbi->clu_srch_ptr);
+ if (hint_clu == EXFAT_EOF_CLUSTER)
+ return -ENOSPC;
+ }
+
+ /* check cluster validation */
+ if (hint_clu < EXFAT_FIRST_CLUSTER && hint_clu >= sbi->num_clusters) {
+ exfat_msg(sb, KERN_ERR, "hint_cluster is invalid (%u)\n",
+ hint_clu);
+ hint_clu = EXFAT_FIRST_CLUSTER;
+ if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ if (exfat_chain_cont_cluster(sb, p_chain->dir,
+ num_clusters))
+ return -EIO;
+ p_chain->flags = ALLOC_FAT_CHAIN;
+ }
+ }
+
+ set_bit(EXFAT_SB_DIRTY, &sbi->s_state);
+
+ p_chain->dir = EXFAT_EOF_CLUSTER;
+
+ while ((new_clu = exfat_find_free_bitmap(sb, hint_clu)) !=
+ EXFAT_EOF_CLUSTER) {
+ if (new_clu != hint_clu &&
+ p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ if (exfat_chain_cont_cluster(sb, p_chain->dir,
+ num_clusters)) {
+ ret = -EIO;
+ goto free_cluster;
+ }
+ p_chain->flags = ALLOC_FAT_CHAIN;
+ }
+
+ /* update allocation bitmap */
+ if (exfat_set_bitmap(inode, new_clu)) {
+ ret = -EIO;
+ goto free_cluster;
+ }
+
+ num_clusters++;
+
+ /* update FAT table */
+ if (p_chain->flags == ALLOC_FAT_CHAIN) {
+ if (exfat_ent_set(sb, new_clu, EXFAT_EOF_CLUSTER)) {
+ ret = -EIO;
+ goto free_cluster;
+ }
+ }
+
+ if (p_chain->dir == EXFAT_EOF_CLUSTER) {
+ p_chain->dir = new_clu;
+ } else if (p_chain->flags == ALLOC_FAT_CHAIN) {
+ if (exfat_ent_set(sb, last_clu, new_clu)) {
+ ret = -EIO;
+ goto free_cluster;
+ }
+ }
+ last_clu = new_clu;
+
+ if (--num_alloc == 0) {
+ sbi->clu_srch_ptr = hint_clu;
+ sbi->used_clusters += num_clusters;
+
+ p_chain->size += num_clusters;
+ return 0;
+ }
+
+ hint_clu = new_clu + 1;
+ if (hint_clu >= sbi->num_clusters) {
+ hint_clu = EXFAT_FIRST_CLUSTER;
+
+ if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ if (exfat_chain_cont_cluster(sb, p_chain->dir,
+ num_clusters)) {
+ ret = -EIO;
+ goto free_cluster;
+ }
+ p_chain->flags = ALLOC_FAT_CHAIN;
+ }
+ }
+ }
+free_cluster:
+ if (num_clusters)
+ exfat_free_cluster(inode, p_chain);
+ return ret;
+}
+
+int exfat_count_num_clusters(struct super_block *sb,
+ struct exfat_chain *p_chain, unsigned int *ret_count)
+{
+ unsigned int i, count;
+ unsigned int clu;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ if (!p_chain->dir || p_chain->dir == EXFAT_EOF_CLUSTER) {
+ *ret_count = 0;
+ return 0;
+ }
+
+ if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ *ret_count = p_chain->size;
+ return 0;
+ }
+
+ clu = p_chain->dir;
+ count = 0;
+ for (i = EXFAT_FIRST_CLUSTER; i < sbi->num_clusters; i++) {
+ count++;
+ if (exfat_ent_get(sb, clu, &clu))
+ return -EIO;
+ if (clu == EXFAT_EOF_CLUSTER)
+ break;
+ }
+
+ *ret_count = count;
+ return 0;
+}
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
new file mode 100644
index 000000000000..483f683757aa
--- /dev/null
+++ b/fs/exfat/file.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/buffer_head.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+static int exfat_cont_expand(struct inode *inode, loff_t size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ loff_t start = i_size_read(inode), count = size - i_size_read(inode);
+ int err, err2;
+
+ err = generic_cont_expand_simple(inode, size);
+ if (err)
+ return err;
+
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ mark_inode_dirty(inode);
+
+ if (!IS_SYNC(inode))
+ return 0;
+
+ err = filemap_fdatawrite_range(mapping, start, start + count - 1);
+ err2 = sync_mapping_buffers(mapping);
+ if (!err)
+ err = err2;
+ err2 = write_inode_now(inode, 1);
+ if (!err)
+ err = err2;
+ if (err)
+ return err;
+
+ return filemap_fdatawait_range(mapping, start, start + count - 1);
+}
+
+static bool exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode)
+{
+ mode_t allow_utime = sbi->options.allow_utime;
+
+ if (!uid_eq(current_fsuid(), inode->i_uid)) {
+ if (in_group_p(inode->i_gid))
+ allow_utime >>= 3;
+ if (allow_utime & MAY_WRITE)
+ return true;
+ }
+
+ /* use a default check */
+ return false;
+}
+
+static int exfat_sanitize_mode(const struct exfat_sb_info *sbi,
+ struct inode *inode, umode_t *mode_ptr)
+{
+ mode_t i_mode, mask, perm;
+
+ i_mode = inode->i_mode;
+
+ mask = (S_ISREG(i_mode) || S_ISLNK(i_mode)) ?
+ sbi->options.fs_fmask : sbi->options.fs_dmask;
+ perm = *mode_ptr & ~(S_IFMT | mask);
+
+ /* Of the r and x bits, all (subject to umask) must be present.*/
+ if ((perm & 0555) != (i_mode & 0555))
+ return -EPERM;
+
+ if (exfat_mode_can_hold_ro(inode)) {
+ /*
+ * Of the w bits, either all (subject to umask) or none must
+ * be present.
+ */
+ if ((perm & 0222) && ((perm & 0222) != (0222 & ~mask)))
+ return -EPERM;
+ } else {
+ /*
+ * If exfat_mode_can_hold_ro(inode) is false, can't change
+ * w bits.
+ */
+ if ((perm & 0222) != (0222 & ~mask))
+ return -EPERM;
+ }
+
+ *mode_ptr &= S_IFMT | perm;
+
+ return 0;
+}
+
+/* resize the file length */
+int __exfat_truncate(struct inode *inode, loff_t new_size)
+{
+ unsigned int num_clusters_new, num_clusters_phys;
+ unsigned int last_clu = EXFAT_FREE_CLUSTER;
+ struct exfat_chain clu;
+ struct exfat_dentry *ep, *ep2;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct exfat_entry_set_cache *es = NULL;
+ int evict = (ei->dir.dir == DIR_DELETED) ? 1 : 0;
+
+ /* check if the given file ID is opened */
+ if (ei->type != TYPE_FILE && ei->type != TYPE_DIR)
+ return -EPERM;
+
+ exfat_set_vol_flags(sb, VOL_DIRTY);
+
+ num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi);
+ num_clusters_phys =
+ EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk, sbi);
+
+ exfat_chain_set(&clu, ei->start_clu, num_clusters_phys, ei->flags);
+
+ if (new_size > 0) {
+ /*
+ * Truncate FAT chain num_clusters after the first cluster
+ * num_clusters = min(new, phys);
+ */
+ unsigned int num_clusters =
+ min(num_clusters_new, num_clusters_phys);
+
+ /*
+ * Follow FAT chain
+ * (defensive coding - works fine even with corrupted FAT table
+ */
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ clu.dir += num_clusters;
+ clu.size -= num_clusters;
+ } else {
+ while (num_clusters > 0) {
+ last_clu = clu.dir;
+ if (exfat_get_next_cluster(sb, &(clu.dir)))
+ return -EIO;
+
+ num_clusters--;
+ clu.size--;
+ }
+ }
+ } else {
+ ei->flags = ALLOC_NO_FAT_CHAIN;
+ ei->start_clu = EXFAT_EOF_CLUSTER;
+ }
+
+ i_size_write(inode, new_size);
+
+ if (ei->type == TYPE_FILE)
+ ei->attr |= ATTR_ARCHIVE;
+
+ /* update the directory entry */
+ if (!evict) {
+ struct timespec64 ts;
+
+ es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es)
+ return -EIO;
+ ep2 = ep + 1;
+
+ ts = current_time(inode);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.modify_tz,
+ &ep->dentry.file.modify_time,
+ &ep->dentry.file.modify_date,
+ &ep->dentry.file.modify_time_ms);
+ ep->dentry.file.attr = cpu_to_le16(ei->attr);
+
+ /* File size should be zero if there is no cluster allocated */
+ if (ei->start_clu == EXFAT_EOF_CLUSTER) {
+ ep->dentry.stream.valid_size = 0;
+ ep->dentry.stream.size = 0;
+ } else {
+ ep->dentry.stream.valid_size = cpu_to_le64(new_size);
+ ep->dentry.stream.size = ep->dentry.stream.valid_size;
+ }
+
+ if (new_size == 0) {
+ /* Any directory can not be truncated to zero */
+ WARN_ON(ei->type != TYPE_FILE);
+
+ ep2->dentry.stream.flags = ALLOC_FAT_CHAIN;
+ ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER;
+ }
+
+ if (exfat_update_dir_chksum_with_entry_set(sb, es,
+ inode_needs_sync(inode)))
+ return -EIO;
+ kfree(es);
+ }
+
+ /* cut off from the FAT chain */
+ if (ei->flags == ALLOC_FAT_CHAIN && last_clu != EXFAT_FREE_CLUSTER &&
+ last_clu != EXFAT_EOF_CLUSTER) {
+ if (exfat_ent_set(sb, last_clu, EXFAT_EOF_CLUSTER))
+ return -EIO;
+ }
+
+ /* invalidate cache and free the clusters */
+ /* clear exfat cache */
+ exfat_cache_inval_inode(inode);
+
+ /* hint information */
+ ei->hint_bmap.off = EXFAT_EOF_CLUSTER;
+ ei->hint_bmap.clu = EXFAT_EOF_CLUSTER;
+ if (ei->rwoffset > new_size)
+ ei->rwoffset = new_size;
+
+ /* hint_stat will be used if this is directory. */
+ ei->hint_stat.eidx = 0;
+ ei->hint_stat.clu = ei->start_clu;
+ ei->hint_femp.eidx = EXFAT_HINT_NONE;
+
+ /* free the clusters */
+ if (exfat_free_cluster(inode, &clu))
+ return -EIO;
+
+ exfat_set_vol_flags(sb, VOL_CLEAN);
+
+ return 0;
+}
+
+void exfat_truncate(struct inode *inode, loff_t size)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned int blocksize = 1 << inode->i_blkbits;
+ loff_t aligned_size;
+ int err;
+
+ mutex_lock(&sbi->s_lock);
+ if (EXFAT_I(inode)->start_clu == 0) {
+ /*
+ * Empty start_clu != ~0 (not allocated)
+ */
+ exfat_fs_error(sb, "tried to truncate zeroed cluster.");
+ goto write_size;
+ }
+
+ err = __exfat_truncate(inode, i_size_read(inode));
+ if (err)
+ goto write_size;
+
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ if (IS_DIRSYNC(inode))
+ exfat_sync_inode(inode);
+ else
+ mark_inode_dirty(inode);
+
+ inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) &
+ ~(sbi->cluster_size - 1)) >> inode->i_blkbits;
+write_size:
+ aligned_size = i_size_read(inode);
+ if (aligned_size & (blocksize - 1)) {
+ aligned_size |= (blocksize - 1);
+ aligned_size++;
+ }
+
+ if (EXFAT_I(inode)->i_size_ondisk > i_size_read(inode))
+ EXFAT_I(inode)->i_size_ondisk = aligned_size;
+
+ if (EXFAT_I(inode)->i_size_aligned > i_size_read(inode))
+ EXFAT_I(inode)->i_size_aligned = aligned_size;
+ mutex_unlock(&sbi->s_lock);
+}
+
+int exfat_getattr(const struct path *path, struct kstat *stat,
+ unsigned int request_mask, unsigned int query_flags)
+{
+ struct inode *inode = d_backing_inode(path->dentry);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+
+ generic_fillattr(inode, stat);
+ stat->result_mask |= STATX_BTIME;
+ stat->btime.tv_sec = ei->i_crtime.tv_sec;
+ stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
+ stat->blksize = EXFAT_SB(inode->i_sb)->cluster_size;
+ return 0;
+}
+
+int exfat_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb);
+ struct inode *inode = dentry->d_inode;
+ unsigned int ia_valid;
+ int error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size > i_size_read(inode)) {
+ error = exfat_cont_expand(inode, attr->ia_size);
+ if (error || attr->ia_valid == ATTR_SIZE)
+ return error;
+ attr->ia_valid &= ~ATTR_SIZE;
+ }
+
+ /* Check for setting the inode time. */
+ ia_valid = attr->ia_valid;
+ if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) &&
+ exfat_allow_set_time(sbi, inode)) {
+ attr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET |
+ ATTR_TIMES_SET);
+ }
+
+ error = setattr_prepare(dentry, attr);
+ attr->ia_valid = ia_valid;
+ if (error)
+ goto out;
+
+ if (((attr->ia_valid & ATTR_UID) &&
+ !uid_eq(attr->ia_uid, sbi->options.fs_uid)) ||
+ ((attr->ia_valid & ATTR_GID) &&
+ !gid_eq(attr->ia_gid, sbi->options.fs_gid)) ||
+ ((attr->ia_valid & ATTR_MODE) &&
+ (attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | 0777)))) {
+ error = -EPERM;
+ goto out;
+ }
+
+ /*
+ * We don't return -EPERM here. Yes, strange, but this is too
+ * old behavior.
+ */
+ if (attr->ia_valid & ATTR_MODE) {
+ if (exfat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0)
+ attr->ia_valid &= ~ATTR_MODE;
+ }
+
+ if (attr->ia_valid & ATTR_SIZE) {
+ error = exfat_block_truncate_page(inode, attr->ia_size);
+ if (error)
+ goto out;
+
+ down_write(&EXFAT_I(inode)->truncate_lock);
+ truncate_setsize(inode, attr->ia_size);
+ exfat_truncate(inode, attr->ia_size);
+ up_write(&EXFAT_I(inode)->truncate_lock);
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
+out:
+ return error;
+}
+
+const struct file_operations exfat_file_operations = {
+ .llseek = generic_file_llseek,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
+ .mmap = generic_file_mmap,
+ .fsync = generic_file_fsync,
+ .splice_read = generic_file_splice_read,
+};
+
+const struct inode_operations exfat_file_inode_operations = {
+ .setattr = exfat_setattr,
+ .getattr = exfat_getattr,
+};
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
new file mode 100644
index 000000000000..06887492f54b
--- /dev/null
+++ b/fs/exfat/inode.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/init.h>
+#include <linux/buffer_head.h>
+#include <linux/mpage.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/time.h>
+#include <linux/writeback.h>
+#include <linux/uio.h>
+#include <linux/random.h>
+#include <linux/iversion.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+static int __exfat_write_inode(struct inode *inode, int sync)
+{
+ int ret = -EIO;
+ unsigned long long on_disk_size;
+ struct exfat_dentry *ep, *ep2;
+ struct exfat_entry_set_cache *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ bool is_dir = (ei->type == TYPE_DIR) ? true : false;
+
+ if (inode->i_ino == EXFAT_ROOT_INO)
+ return 0;
+
+ /*
+ * If the indode is already unlinked, there is no need for updating it.
+ */
+ if (ei->dir.dir == DIR_DELETED)
+ return 0;
+
+ if (is_dir && ei->dir.dir == sbi->root_dir && ei->entry == -1)
+ return 0;
+
+ exfat_set_vol_flags(sb, VOL_DIRTY);
+
+ /* get the directory entry of given file or directory */
+ es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES,
+ &ep);
+ if (!es)
+ return -EIO;
+ ep2 = ep + 1;
+
+ ep->dentry.file.attr = cpu_to_le16(exfat_make_attr(inode));
+
+ /* set FILE_INFO structure using the acquired struct exfat_dentry */
+ exfat_set_entry_time(sbi, &ei->i_crtime,
+ &ep->dentry.file.create_tz,
+ &ep->dentry.file.create_time,
+ &ep->dentry.file.create_date,
+ &ep->dentry.file.create_time_ms);
+ exfat_set_entry_time(sbi, &inode->i_mtime,
+ &ep->dentry.file.modify_tz,
+ &ep->dentry.file.modify_time,
+ &ep->dentry.file.modify_date,
+ &ep->dentry.file.modify_time_ms);
+ exfat_set_entry_time(sbi, &inode->i_atime,
+ &ep->dentry.file.access_tz,
+ &ep->dentry.file.access_time,
+ &ep->dentry.file.access_date,
+ NULL);
+
+ /* File size should be zero if there is no cluster allocated */
+ on_disk_size = i_size_read(inode);
+
+ if (ei->start_clu == EXFAT_EOF_CLUSTER)
+ on_disk_size = 0;
+
+ ep2->dentry.stream.valid_size = cpu_to_le64(on_disk_size);
+ ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
+
+ ret = exfat_update_dir_chksum_with_entry_set(sb, es, sync);
+ kfree(es);
+ return ret;
+}
+
+int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ int ret;
+
+ mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
+ ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+ mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
+
+ return ret;
+}
+
+void exfat_sync_inode(struct inode *inode)
+{
+ lockdep_assert_held(&EXFAT_SB(inode->i_sb)->s_lock);
+ __exfat_write_inode(inode, 1);
+}
+
+/*
+ * Input: inode, (logical) clu_offset, target allocation area
+ * Output: errcode, cluster number
+ * *clu = (~0), if it's unable to allocate a new cluster
+ */
+static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
+ unsigned int *clu, int create)
+{
+ int ret, modified = false;
+ unsigned int last_clu;
+ struct exfat_chain new_clu;
+ struct exfat_dentry *ep;
+ struct exfat_entry_set_cache *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ unsigned int local_clu_offset = clu_offset;
+ unsigned int num_to_be_allocated = 0, num_clusters = 0;
+
+ ei->rwoffset = EXFAT_CLU_TO_B(clu_offset, sbi);
+
+ if (EXFAT_I(inode)->i_size_ondisk > 0)
+ num_clusters =
+ EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk,
+ sbi);
+
+ if (clu_offset >= num_clusters)
+ num_to_be_allocated = clu_offset - num_clusters + 1;
+
+ if (!create && (num_to_be_allocated > 0)) {
+ *clu = EXFAT_EOF_CLUSTER;
+ return 0;
+ }
+
+ *clu = last_clu = ei->start_clu;
+
+ if (ei->flags == ALLOC_NO_FAT_CHAIN) {
+ if (clu_offset > 0 && *clu != EXFAT_EOF_CLUSTER) {
+ last_clu += clu_offset - 1;
+
+ if (clu_offset == num_clusters)
+ *clu = EXFAT_EOF_CLUSTER;
+ else
+ *clu += clu_offset;
+ }
+ } else if (ei->type == TYPE_FILE) {
+ unsigned int fclus = 0;
+ int err = exfat_get_cluster(inode, clu_offset,
+ &fclus, clu, &last_clu, 1);
+ if (err)
+ return -EIO;
+
+ clu_offset -= fclus;
+ } else {
+ /* hint information */
+ if (clu_offset > 0 && ei->hint_bmap.off != EXFAT_EOF_CLUSTER &&
+ ei->hint_bmap.off > 0 && clu_offset >= ei->hint_bmap.off) {
+ clu_offset -= ei->hint_bmap.off;
+ /* hint_bmap.clu should be valid */
+ WARN_ON(ei->hint_bmap.clu < 2);
+ *clu = ei->hint_bmap.clu;
+ }
+
+ while (clu_offset > 0 && *clu != EXFAT_EOF_CLUSTER) {
+ last_clu = *clu;
+ if (exfat_get_next_cluster(sb, clu))
+ return -EIO;
+ clu_offset--;
+ }
+ }
+
+ if (*clu == EXFAT_EOF_CLUSTER) {
+ exfat_set_vol_flags(sb, VOL_DIRTY);
+
+ new_clu.dir = (last_clu == EXFAT_EOF_CLUSTER) ?
+ EXFAT_EOF_CLUSTER : last_clu + 1;
+ new_clu.size = 0;
+ new_clu.flags = ei->flags;
+
+ /* allocate a cluster */
+ if (num_to_be_allocated < 1) {
+ /* Broken FAT (i_sze > allocated FAT) */
+ exfat_fs_error(sb, "broken FAT chain.");
+ return -EIO;
+ }
+
+ ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu);
+ if (ret)
+ return ret;
+
+ if (new_clu.dir == EXFAT_EOF_CLUSTER ||
+ new_clu.dir == EXFAT_FREE_CLUSTER) {
+ exfat_fs_error(sb,
+ "bogus cluster new allocated (last_clu : %u, new_clu : %u)",
+ last_clu, new_clu.dir);
+ return -EIO;
+ }
+
+ /* append to the FAT chain */
+ if (last_clu == EXFAT_EOF_CLUSTER) {
+ if (new_clu.flags == ALLOC_FAT_CHAIN)
+ ei->flags = ALLOC_FAT_CHAIN;
+ ei->start_clu = new_clu.dir;
+ modified = true;
+ } else {
+ if (new_clu.flags != ei->flags) {
+ /* no-fat-chain bit is disabled,
+ * so fat-chain should be synced with
+ * alloc-bitmap
+ */
+ exfat_chain_cont_cluster(sb, ei->start_clu,
+ num_clusters);
+ ei->flags = ALLOC_FAT_CHAIN;
+ modified = true;
+ }
+ if (new_clu.flags == ALLOC_FAT_CHAIN)
+ if (exfat_ent_set(sb, last_clu, new_clu.dir))
+ return -EIO;
+ }
+
+ num_clusters += num_to_be_allocated;
+ *clu = new_clu.dir;
+
+ if (ei->dir.dir != DIR_DELETED) {
+ es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es)
+ return -EIO;
+ /* get stream entry */
+ ep++;
+
+ /* update directory entry */
+ if (modified) {
+ if (ep->dentry.stream.flags != ei->flags)
+ ep->dentry.stream.flags = ei->flags;
+
+ if (le32_to_cpu(ep->dentry.stream.start_clu) !=
+ ei->start_clu)
+ ep->dentry.stream.start_clu =
+ cpu_to_le32(ei->start_clu);
+
+ ep->dentry.stream.valid_size =
+ cpu_to_le64(i_size_read(inode));
+ ep->dentry.stream.size =
+ ep->dentry.stream.valid_size;
+ }
+
+ if (exfat_update_dir_chksum_with_entry_set(sb, es,
+ inode_needs_sync(inode)))
+ return -EIO;
+ kfree(es);
+
+ } /* end of if != DIR_DELETED */
+
+ inode->i_blocks +=
+ num_to_be_allocated << sbi->sect_per_clus_bits;
+
+ /*
+ * Move *clu pointer along FAT chains (hole care) because the
+ * caller of this function expect *clu to be the last cluster.
+ * This only works when num_to_be_allocated >= 2,
+ * *clu = (the first cluster of the allocated chain) =>
+ * (the last cluster of ...)
+ */
+ if (ei->flags == ALLOC_NO_FAT_CHAIN) {
+ *clu += num_to_be_allocated - 1;
+ } else {
+ while (num_to_be_allocated > 1) {
+ if (exfat_get_next_cluster(sb, clu))
+ return -EIO;
+ num_to_be_allocated--;
+ }
+ }
+
+ }
+
+ /* hint information */
+ ei->hint_bmap.off = local_clu_offset;
+ ei->hint_bmap.clu = *clu;
+
+ return 0;
+}
+
+static int exfat_map_new_buffer(struct exfat_inode_info *ei,
+ struct buffer_head *bh, loff_t pos)
+{
+ if (buffer_delay(bh) && pos > ei->i_size_aligned)
+ return -EIO;
+ set_buffer_new(bh);
+
+ /*
+ * Adjust i_size_aligned if i_size_ondisk is bigger than it.
+ */
+ if (ei->i_size_ondisk > ei->i_size_aligned)
+ ei->i_size_aligned = ei->i_size_ondisk;
+ return 0;
+}
+
+static int exfat_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
+ int err = 0;
+ unsigned long mapped_blocks = 0;
+ unsigned int cluster, sec_offset;
+ sector_t last_block;
+ sector_t phys = 0;
+ loff_t pos;
+
+ mutex_lock(&sbi->s_lock);
+ last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
+ if (iblock >= last_block && !create)
+ goto done;
+
+ /* Is this block already allocated? */
+ err = exfat_map_cluster(inode, iblock >> sbi->sect_per_clus_bits,
+ &cluster, create);
+ if (err) {
+ if (err != -ENOSPC)
+ exfat_fs_error_ratelimit(sb,
+ "failed to bmap (inode : %p iblock : %llu, err : %d)",
+ inode, (unsigned long long)iblock, err);
+ goto unlock_ret;
+ }
+
+ if (cluster == EXFAT_EOF_CLUSTER)
+ goto done;
+
+ /* sector offset in cluster */
+ sec_offset = iblock & (sbi->sect_per_clus - 1);
+
+ phys = exfat_cluster_to_sector(sbi, cluster) + sec_offset;
+ mapped_blocks = sbi->sect_per_clus - sec_offset;
+ max_blocks = min(mapped_blocks, max_blocks);
+
+ /* Treat newly added block / cluster */
+ if (iblock < last_block)
+ create = 0;
+
+ if (create || buffer_delay(bh_result)) {
+ pos = EXFAT_BLK_TO_B((iblock + 1), sb);
+ if (ei->i_size_ondisk < pos)
+ ei->i_size_ondisk = pos;
+ }
+
+ if (create) {
+ err = exfat_map_new_buffer(ei, bh_result, pos);
+ if (err) {
+ exfat_fs_error(sb,
+ "requested for bmap out of range(pos : (%llu) > i_size_aligned(%llu)\n",
+ pos, ei->i_size_aligned);
+ goto unlock_ret;
+ }
+ }
+
+ if (buffer_delay(bh_result))
+ clear_buffer_delay(bh_result);
+ map_bh(bh_result, sb, phys);
+done:
+ bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb);
+unlock_ret:
+ mutex_unlock(&sbi->s_lock);
+ return err;
+}
+
+static int exfat_readpage(struct file *file, struct page *page)
+{
+ return mpage_readpage(page, exfat_get_block);
+}
+
+static int exfat_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *pages, unsigned int nr_pages)
+{
+ return mpage_readpages(mapping, pages, nr_pages, exfat_get_block);
+}
+
+static int exfat_writepage(struct page *page, struct writeback_control *wbc)
+{
+ return block_write_full_page(page, exfat_get_block, wbc);
+}
+
+static int exfat_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ return mpage_writepages(mapping, wbc, exfat_get_block);
+}
+
+static void exfat_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > i_size_read(inode)) {
+ truncate_pagecache(inode, i_size_read(inode));
+ exfat_truncate(inode, EXFAT_I(inode)->i_size_aligned);
+ }
+}
+
+static int exfat_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int flags,
+ struct page **pagep, void **fsdata)
+{
+ int ret;
+
+ *pagep = NULL;
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ exfat_get_block,
+ &EXFAT_I(mapping->host)->i_size_ondisk);
+
+ if (ret < 0)
+ exfat_write_failed(mapping, pos+len);
+
+ return ret;
+}
+
+static int exfat_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *pagep, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ int err;
+
+ err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
+
+ if (EXFAT_I(inode)->i_size_aligned < i_size_read(inode)) {
+ exfat_fs_error(inode->i_sb,
+ "invalid size(size(%llu) > aligned(%llu)\n",
+ i_size_read(inode), EXFAT_I(inode)->i_size_aligned);
+ return -EIO;
+ }
+
+ if (err < len)
+ exfat_write_failed(mapping, pos+len);
+
+ if (!(err < 0) && !(ei->attr & ATTR_ARCHIVE)) {
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ ei->attr |= ATTR_ARCHIVE;
+ mark_inode_dirty(inode);
+ }
+
+ return err;
+}
+
+static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = mapping->host;
+ loff_t size = iocb->ki_pos + iov_iter_count(iter);
+ int rw = iov_iter_rw(iter);
+ ssize_t ret;
+
+ if (rw == WRITE) {
+ /*
+ * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
+ * so we need to update the ->i_size_aligned to block boundary.
+ *
+ * But we must fill the remaining area or hole by nul for
+ * updating ->i_size_aligned
+ *
+ * Return 0, and fallback to normal buffered write.
+ */
+ if (EXFAT_I(inode)->i_size_aligned < size)
+ return 0;
+ }
+
+ /*
+ * Need to use the DIO_LOCKING for avoiding the race
+ * condition of exfat_get_block() and ->truncate().
+ */
+ ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
+ if (ret < 0 && (rw & WRITE))
+ exfat_write_failed(mapping, size);
+ return ret;
+}
+
+static sector_t exfat_aop_bmap(struct address_space *mapping, sector_t block)
+{
+ sector_t blocknr;
+
+ /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */
+ down_read(&EXFAT_I(mapping->host)->truncate_lock);
+ blocknr = generic_block_bmap(mapping, block, exfat_get_block);
+ up_read(&EXFAT_I(mapping->host)->truncate_lock);
+ return blocknr;
+}
+
+/*
+ * exfat_block_truncate_page() zeroes out a mapping from file offset `from'
+ * up to the end of the block which corresponds to `from'.
+ * This is required during truncate to physically zeroout the tail end
+ * of that block so it doesn't yield old data if the file is later grown.
+ * Also, avoid causing failure from fsx for cases of "data past EOF"
+ */
+int exfat_block_truncate_page(struct inode *inode, loff_t from)
+{
+ return block_truncate_page(inode->i_mapping, from, exfat_get_block);
+}
+
+static const struct address_space_operations exfat_aops = {
+ .readpage = exfat_readpage,
+ .readpages = exfat_readpages,
+ .writepage = exfat_writepage,
+ .writepages = exfat_writepages,
+ .write_begin = exfat_write_begin,
+ .write_end = exfat_write_end,
+ .direct_IO = exfat_direct_IO,
+ .bmap = exfat_aop_bmap
+};
+
+static inline unsigned long exfat_hash(loff_t i_pos)
+{
+ return hash_32(i_pos, EXFAT_HASH_BITS);
+}
+
+void exfat_hash_inode(struct inode *inode, loff_t i_pos)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+ struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
+
+ spin_lock(&sbi->inode_hash_lock);
+ EXFAT_I(inode)->i_pos = i_pos;
+ hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head);
+ spin_unlock(&sbi->inode_hash_lock);
+}
+
+void exfat_unhash_inode(struct inode *inode)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_del_init(&EXFAT_I(inode)->i_hash_fat);
+ EXFAT_I(inode)->i_pos = 0;
+ spin_unlock(&sbi->inode_hash_lock);
+}
+
+struct inode *exfat_iget(struct super_block *sb, loff_t i_pos)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *info;
+ struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
+ struct inode *inode = NULL;
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_for_each_entry(info, head, i_hash_fat) {
+ WARN_ON(info->vfs_inode.i_sb != sb);
+
+ if (i_pos != info->i_pos)
+ continue;
+ inode = igrab(&info->vfs_inode);
+ if (inode)
+ break;
+ }
+ spin_unlock(&sbi->inode_hash_lock);
+ return inode;
+}
+
+/* doesn't deal with root inode */
+static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ loff_t size = info->size;
+
+ memcpy(&ei->dir, &info->dir, sizeof(struct exfat_chain));
+ ei->entry = info->entry;
+ ei->attr = info->attr;
+ ei->start_clu = info->start_clu;
+ ei->flags = info->flags;
+ ei->type = info->type;
+
+ ei->version = 0;
+ ei->hint_stat.eidx = 0;
+ ei->hint_stat.clu = info->start_clu;
+ ei->hint_femp.eidx = EXFAT_HINT_NONE;
+ ei->rwoffset = 0;
+ ei->hint_bmap.off = EXFAT_EOF_CLUSTER;
+ ei->i_pos = 0;
+
+ inode->i_uid = sbi->options.fs_uid;
+ inode->i_gid = sbi->options.fs_gid;
+ inode_inc_iversion(inode);
+ inode->i_generation = prandom_u32();
+
+ if (info->attr & ATTR_SUBDIR) { /* directory */
+ inode->i_generation &= ~1;
+ inode->i_mode = exfat_make_mode(sbi, info->attr, 0777);
+ inode->i_op = &exfat_dir_inode_operations;
+ inode->i_fop = &exfat_dir_operations;
+ set_nlink(inode, info->num_subdirs);
+ } else { /* regular file */
+ inode->i_generation |= 1;
+ inode->i_mode = exfat_make_mode(sbi, info->attr, 0777);
+ inode->i_op = &exfat_file_inode_operations;
+ inode->i_fop = &exfat_file_operations;
+ inode->i_mapping->a_ops = &exfat_aops;
+ inode->i_mapping->nrpages = 0;
+ }
+
+ i_size_write(inode, size);
+
+ /* ondisk and aligned size should be aligned with block size */
+ if (size & (inode->i_sb->s_blocksize - 1)) {
+ size |= (inode->i_sb->s_blocksize - 1);
+ size++;
+ }
+
+ ei->i_size_aligned = size;
+ ei->i_size_ondisk = size;
+
+ exfat_save_attr(inode, info->attr);
+
+ inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) &
+ ~(sbi->cluster_size - 1)) >> inode->i_blkbits;
+ inode->i_mtime = info->mtime;
+ inode->i_ctime = info->mtime;
+ ei->i_crtime = info->crtime;
+ inode->i_atime = info->atime;
+
+ exfat_cache_init_inode(inode);
+
+ return 0;
+}
+
+struct inode *exfat_build_inode(struct super_block *sb,
+ struct exfat_dir_entry *info, loff_t i_pos)
+{
+ struct inode *inode;
+ int err;
+
+ inode = exfat_iget(sb, i_pos);
+ if (inode)
+ goto out;
+ inode = new_inode(sb);
+ if (!inode) {
+ inode = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ inode->i_ino = iunique(sb, EXFAT_ROOT_INO);
+ inode_set_iversion(inode, 1);
+ err = exfat_fill_inode(inode, info);
+ if (err) {
+ iput(inode);
+ inode = ERR_PTR(err);
+ goto out;
+ }
+ exfat_hash_inode(inode, i_pos);
+ insert_inode_hash(inode);
+out:
+ return inode;
+}
+
+void exfat_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+
+ if (!inode->i_nlink) {
+ i_size_write(inode, 0);
+ mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
+ __exfat_truncate(inode, 0);
+ mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
+ }
+
+ invalidate_inode_buffers(inode);
+ clear_inode(inode);
+ exfat_cache_inval_inode(inode);
+ exfat_unhash_inode(inode);
+}
diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c
new file mode 100644
index 000000000000..14a3300848f6
--- /dev/null
+++ b/fs/exfat/misc.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Written 1992,1993 by Werner Almesberger
+ * 22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980
+ * and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru)
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+/*
+ * exfat_fs_error reports a file system problem that might indicate fa data
+ * corruption/inconsistency. Depending on 'errors' mount option the
+ * panic() is called, or error message is printed FAT and nothing is done,
+ * or filesystem is remounted read-only (default behavior).
+ * In case the file system is remounted read-only, it can be made writable
+ * again by remounting it.
+ */
+void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
+{
+ struct exfat_mount_options *opts = &EXFAT_SB(sb)->options;
+ va_list args;
+ struct va_format vaf;
+
+ if (report) {
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ exfat_msg(sb, KERN_ERR, "error, %pV\n", &vaf);
+ va_end(args);
+ }
+
+ if (opts->errors == EXFAT_ERRORS_PANIC) {
+ panic("exFAT-fs (%s): fs panic from previous error\n",
+ sb->s_id);
+ } else if (opts->errors == EXFAT_ERRORS_RO && !sb_rdonly(sb)) {
+ sb->s_flags |= SB_RDONLY;
+ exfat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
+ }
+}
+
+/*
+ * exfat_msg() - print preformated EXFAT specific messages.
+ * All logs except what uses exfat_fs_error() should be written by exfat_msg()
+ */
+void exfat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ /* level means KERN_ pacility level */
+ printk("%sexFAT-fs (%s): %pV\n", level, sb->s_id, &vaf);
+ va_end(args);
+}
+
+#define SECS_PER_MIN (60)
+#define TIMEZONE_SEC(x) ((x) * 15 * SECS_PER_MIN)
+
+static void exfat_adjust_tz(struct timespec64 *ts, u8 tz_off)
+{
+ if (tz_off <= 0x3F)
+ ts->tv_sec -= TIMEZONE_SEC(tz_off);
+ else /* 0x40 <= (tz_off & 0x7F) <=0x7F */
+ ts->tv_sec += TIMEZONE_SEC(0x80 - tz_off);
+}
+
+/* Convert a EXFAT time/date pair to a UNIX date (seconds since 1 1 70). */
+void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
+ u8 tz, __le16 time, __le16 date, u8 time_ms)
+{
+ u16 t = le16_to_cpu(time);
+ u16 d = le16_to_cpu(date);
+
+ ts->tv_sec = mktime64(1980 + (d >> 9), d >> 5 & 0x000F, d & 0x001F,
+ t >> 11, (t >> 5) & 0x003F, (t & 0x001F) << 1);
+
+
+ /* time_ms field represent 0 ~ 199(1990 ms) */
+ if (time_ms) {
+ ts->tv_sec += time_ms / 100;
+ ts->tv_nsec = (time_ms % 100) * 10 * NSEC_PER_MSEC;
+ }
+
+ if (tz & EXFAT_TZ_VALID)
+ /* Adjust timezone to UTC0. */
+ exfat_adjust_tz(ts, tz & ~EXFAT_TZ_VALID);
+ else
+ /* Convert from local time to UTC using time_offset. */
+ ts->tv_sec -= sbi->options.time_offset * SECS_PER_MIN;
+}
+
+/* Convert linear UNIX date to a EXFAT time/date pair. */
+void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
+ u8 *tz, __le16 *time, __le16 *date, u8 *time_ms)
+{
+ struct tm tm;
+ u16 t, d;
+
+ time64_to_tm(ts->tv_sec, 0, &tm);
+ t = (tm.tm_hour << 11) | (tm.tm_min << 5) | (tm.tm_sec >> 1);
+ d = ((tm.tm_year - 80) << 9) | ((tm.tm_mon + 1) << 5) | tm.tm_mday;
+
+ *time = cpu_to_le16(t);
+ *date = cpu_to_le16(d);
+
+ /* time_ms field represent 0 ~ 199(1990 ms) */
+ if (time_ms)
+ *time_ms = (tm.tm_sec & 1) * 100 +
+ ts->tv_nsec / (10 * NSEC_PER_MSEC);
+
+ /*
+ * Record 00h value for OffsetFromUtc field and 1 value for OffsetValid
+ * to indicate that local time and UTC are the same.
+ */
+ *tz = EXFAT_TZ_VALID;
+}
+
+unsigned short exfat_calc_chksum_2byte(void *data, int len,
+ unsigned short chksum, int type)
+{
+ int i;
+ unsigned char *c = (unsigned char *)data;
+
+ for (i = 0; i < len; i++, c++) {
+ if (((i == 2) || (i == 3)) && (type == CS_DIR_ENTRY))
+ continue;
+ chksum = (((chksum & 1) << 15) | ((chksum & 0xFFFE) >> 1)) +
+ (unsigned short)*c;
+ }
+ return chksum;
+}
+
+void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync)
+{
+ set_bit(EXFAT_SB_DIRTY, &EXFAT_SB(sb)->s_state);
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+
+ if (sync)
+ sync_dirty_buffer(bh);
+}
+
+void exfat_chain_set(struct exfat_chain *ec, unsigned int dir,
+ unsigned int size, unsigned char flags)
+{
+ ec->dir = dir;
+ ec->size = size;
+ ec->flags = flags;
+}
+
+void exfat_chain_dup(struct exfat_chain *dup, struct exfat_chain *ec)
+{
+ return exfat_chain_set(dup, ec->dir, ec->size, ec->flags);
+}
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
new file mode 100644
index 000000000000..a8681d91f569
--- /dev/null
+++ b/fs/exfat/namei.c
@@ -0,0 +1,1448 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/iversion.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/nls.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+static inline unsigned long exfat_d_version(struct dentry *dentry)
+{
+ return (unsigned long) dentry->d_fsdata;
+}
+
+static inline void exfat_d_version_set(struct dentry *dentry,
+ unsigned long version)
+{
+ dentry->d_fsdata = (void *) version;
+}
+
+/*
+ * If new entry was created in the parent, it could create the 8.3 alias (the
+ * shortname of logname). So, the parent may have the negative-dentry which
+ * matches the created 8.3 alias.
+ *
+ * If it happened, the negative dentry isn't actually negative anymore. So,
+ * drop it.
+ */
+static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ int ret;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ /*
+ * This is not negative dentry. Always valid.
+ *
+ * Note, rename() to existing directory entry will have ->d_inode, and
+ * will use existing name which isn't specified name by user.
+ *
+ * We may be able to drop this positive dentry here. But dropping
+ * positive dentry isn't good idea. So it's unsupported like
+ * rename("filename", "FILENAME") for now.
+ */
+ if (d_really_is_positive(dentry))
+ return 1;
+
+ /*
+ * Drop the negative dentry, in order to make sure to use the case
+ * sensitive name which is specified by user if this is for creation.
+ */
+ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+ return 0;
+
+ spin_lock(&dentry->d_lock);
+ ret = inode_eq_iversion(d_inode(dentry->d_parent),
+ exfat_d_version(dentry));
+ spin_unlock(&dentry->d_lock);
+ return ret;
+}
+
+/* returns the length of a struct qstr, ignoring trailing dots */
+static unsigned int exfat_striptail_len(unsigned int len, const char *name)
+{
+ while (len && name[len - 1] == '.')
+ len--;
+ return len;
+}
+
+/*
+ * Compute the hash for the exfat name corresponding to the dentry. If the name
+ * is invalid, we leave the hash code unchanged so that the existing dentry can
+ * be used. The exfat fs routines will return ENOENT or EINVAL as appropriate.
+ */
+static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct nls_table *t = EXFAT_SB(sb)->nls_io;
+ const unsigned char *name = qstr->name;
+ unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+ unsigned long hash = init_name_hash(dentry);
+ int i, charlen;
+ wchar_t c;
+
+ for (i = 0; i < len; i += charlen) {
+ charlen = t->char2uni(&name[i], len - i, &c);
+ if (charlen < 0)
+ return charlen;
+ hash = partial_name_hash(exfat_toupper(sb, c), hash);
+ }
+
+ qstr->hash = end_name_hash(hash);
+ return 0;
+}
+
+static int exfat_d_cmp(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct nls_table *t = EXFAT_SB(sb)->nls_io;
+ unsigned int alen = exfat_striptail_len(name->len, name->name);
+ unsigned int blen = exfat_striptail_len(len, str);
+ wchar_t c1, c2;
+ int charlen, i;
+
+ if (alen != blen)
+ return 1;
+
+ for (i = 0; i < len; i += charlen) {
+ charlen = t->char2uni(&name->name[i], alen - i, &c1);
+ if (charlen < 0)
+ return 1;
+ if (charlen != t->char2uni(&str[i], blen - i, &c2))
+ return 1;
+
+ if (exfat_toupper(sb, c1) != exfat_toupper(sb, c2))
+ return 1;
+ }
+
+ return 0;
+}
+
+const struct dentry_operations exfat_dentry_ops = {
+ .d_revalidate = exfat_d_revalidate,
+ .d_hash = exfat_d_hash,
+ .d_compare = exfat_d_cmp,
+};
+
+static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr)
+{
+ struct super_block *sb = dentry->d_sb;
+ const unsigned char *name = qstr->name;
+ unsigned int len = exfat_striptail_len(qstr->len, qstr->name);
+ unsigned long hash = init_name_hash(dentry);
+ int i, charlen;
+ unicode_t u;
+
+ for (i = 0; i < len; i += charlen) {
+ charlen = utf8_to_utf32(&name[i], len - i, &u);
+ if (charlen < 0)
+ return charlen;
+
+ /*
+ * Convert to UTF-16: code points above U+FFFF are encoded as
+ * surrogate pairs.
+ * exfat_toupper() works only for code points up to the U+FFFF.
+ */
+ if (u > 0xFFFF) {
+ hash = partial_name_hash(exfat_high_surrogate(u), hash);
+ hash = partial_name_hash(exfat_low_surrogate(u), hash);
+ } else {
+ hash = partial_name_hash(exfat_toupper(sb, u), hash);
+ }
+ }
+
+ qstr->hash = end_name_hash(hash);
+ return 0;
+}
+
+static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ struct super_block *sb = dentry->d_sb;
+ unsigned int alen = exfat_striptail_len(name->len, name->name);
+ unsigned int blen = exfat_striptail_len(len, str);
+ unicode_t u_a, u_b;
+ int charlen, i;
+
+ if (alen != blen)
+ return 1;
+
+ for (i = 0; i < alen; i += charlen) {
+ charlen = utf8_to_utf32(&name->name[i], alen - i, &u_a);
+ if (charlen < 0)
+ return 1;
+ if (charlen != utf8_to_utf32(&str[i], blen - i, &u_b))
+ return 1;
+
+ if (u_a <= 0xFFFF && u_b <= 0xFFFF) {
+ if (exfat_toupper(sb, u_a) != exfat_toupper(sb, u_b))
+ return 1;
+ } else if (u_a > 0xFFFF && u_b > 0xFFFF) {
+ if (exfat_low_surrogate(u_a) !=
+ exfat_low_surrogate(u_b) ||
+ exfat_high_surrogate(u_a) !=
+ exfat_high_surrogate(u_b))
+ return 1;
+ } else {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+const struct dentry_operations exfat_utf8_dentry_ops = {
+ .d_revalidate = exfat_d_revalidate,
+ .d_hash = exfat_utf8_d_hash,
+ .d_compare = exfat_utf8_d_cmp,
+};
+
+/* used only in search empty_slot() */
+#define CNT_UNUSED_NOHIT (-1)
+#define CNT_UNUSED_HIT (-2)
+/* search EMPTY CONTINUOUS "num_entries" entries */
+static int exfat_search_empty_slot(struct super_block *sb,
+ struct exfat_hint_femp *hint_femp, struct exfat_chain *p_dir,
+ int num_entries)
+{
+ int i, dentry, num_empty = 0;
+ int dentries_per_clu;
+ unsigned int type;
+ struct exfat_chain clu;
+ struct exfat_dentry *ep;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct buffer_head *bh;
+
+ dentries_per_clu = sbi->dentries_per_clu;
+
+ if (hint_femp->eidx != EXFAT_HINT_NONE) {
+ dentry = hint_femp->eidx;
+ if (num_entries <= hint_femp->count) {
+ hint_femp->eidx = EXFAT_HINT_NONE;
+ return dentry;
+ }
+
+ exfat_chain_dup(&clu, &hint_femp->cur);
+ } else {
+ exfat_chain_dup(&clu, p_dir);
+ dentry = 0;
+ }
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ i = dentry & (dentries_per_clu - 1);
+
+ for (; i < dentries_per_clu; i++, dentry++) {
+ ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ if (!ep)
+ return -EIO;
+ type = exfat_get_entry_type(ep);
+ brelse(bh);
+
+ if (type == TYPE_UNUSED || type == TYPE_DELETED) {
+ num_empty++;
+ if (hint_femp->eidx == EXFAT_HINT_NONE) {
+ hint_femp->eidx = dentry;
+ hint_femp->count = CNT_UNUSED_NOHIT;
+ exfat_chain_set(&hint_femp->cur,
+ clu.dir, clu.size, clu.flags);
+ }
+
+ if (type == TYPE_UNUSED &&
+ hint_femp->count != CNT_UNUSED_HIT)
+ hint_femp->count = CNT_UNUSED_HIT;
+ } else {
+ if (hint_femp->eidx != EXFAT_HINT_NONE &&
+ hint_femp->count == CNT_UNUSED_HIT) {
+ /* unused empty group means
+ * an empty group which includes
+ * unused dentry
+ */
+ exfat_fs_error(sb,
+ "found bogus dentry(%d) beyond unused empty group(%d) (start_clu : %u, cur_clu : %u)",
+ dentry, hint_femp->eidx,
+ p_dir->dir, clu.dir);
+ return -EIO;
+ }
+
+ num_empty = 0;
+ hint_femp->eidx = EXFAT_HINT_NONE;
+ }
+
+ if (num_empty >= num_entries) {
+ /* found and invalidate hint_femp */
+ hint_femp->eidx = EXFAT_HINT_NONE;
+ return (dentry - (num_entries - 1));
+ }
+ }
+
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ if (--clu.size > 0)
+ clu.dir++;
+ else
+ clu.dir = EXFAT_EOF_CLUSTER;
+ } else {
+ if (exfat_get_next_cluster(sb, &clu.dir))
+ return -EIO;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int exfat_check_max_dentries(struct inode *inode)
+{
+ if (EXFAT_B_TO_DEN(i_size_read(inode)) >= MAX_EXFAT_DENTRIES) {
+ /*
+ * exFAT spec allows a dir to grow upto 8388608(256MB)
+ * dentries
+ */
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+/* find empty directory entry.
+ * if there isn't any empty slot, expand cluster chain.
+ */
+static int exfat_find_empty_entry(struct inode *inode,
+ struct exfat_chain *p_dir, int num_entries)
+{
+ int dentry;
+ unsigned int ret, last_clu;
+ sector_t sector;
+ loff_t size = 0;
+ struct exfat_chain clu;
+ struct exfat_dentry *ep = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct exfat_hint_femp hint_femp;
+
+ hint_femp.eidx = EXFAT_HINT_NONE;
+
+ if (ei->hint_femp.eidx != EXFAT_HINT_NONE) {
+ memcpy(&hint_femp, &ei->hint_femp,
+ sizeof(struct exfat_hint_femp));
+ ei->hint_femp.eidx = EXFAT_HINT_NONE;
+ }
+
+ while ((dentry = exfat_search_empty_slot(sb, &hint_femp, p_dir,
+ num_entries)) < 0) {
+ if (dentry == -EIO)
+ break;
+
+ if (exfat_check_max_dentries(inode))
+ return -ENOSPC;
+
+ /* we trust p_dir->size regardless of FAT type */
+ if (exfat_find_last_cluster(sb, p_dir, &last_clu))
+ return -EIO;
+
+ /*
+ * Allocate new cluster to this directory
+ */
+ exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
+
+ /* allocate a cluster */
+ ret = exfat_alloc_cluster(inode, 1, &clu);
+ if (ret)
+ return ret;
+
+ if (exfat_zeroed_cluster(inode, clu.dir))
+ return -EIO;
+
+ /* append to the FAT chain */
+ if (clu.flags != p_dir->flags) {
+ /* no-fat-chain bit is disabled,
+ * so fat-chain should be synced with alloc-bitmap
+ */
+ exfat_chain_cont_cluster(sb, p_dir->dir, p_dir->size);
+ p_dir->flags = ALLOC_FAT_CHAIN;
+ hint_femp.cur.flags = ALLOC_FAT_CHAIN;
+ }
+
+ if (clu.flags == ALLOC_FAT_CHAIN)
+ if (exfat_ent_set(sb, last_clu, clu.dir))
+ return -EIO;
+
+ if (hint_femp.eidx == EXFAT_HINT_NONE) {
+ /* the special case that new dentry
+ * should be allocated from the start of new cluster
+ */
+ hint_femp.eidx = EXFAT_B_TO_DEN_IDX(p_dir->size, sbi);
+ hint_femp.count = sbi->dentries_per_clu;
+
+ exfat_chain_set(&hint_femp.cur, clu.dir, 0, clu.flags);
+ }
+ hint_femp.cur.size++;
+ p_dir->size++;
+ size = EXFAT_CLU_TO_B(p_dir->size, sbi);
+
+ /* update the directory entry */
+ if (p_dir->dir != sbi->root_dir) {
+ struct buffer_head *bh;
+
+ ep = exfat_get_dentry(sb,
+ &(ei->dir), ei->entry + 1, &bh, &sector);
+ if (!ep)
+ return -EIO;
+
+ ep->dentry.stream.valid_size = cpu_to_le64(size);
+ ep->dentry.stream.size = ep->dentry.stream.valid_size;
+ ep->dentry.stream.flags = p_dir->flags;
+ exfat_update_bh(sb, bh, IS_DIRSYNC(inode));
+ brelse(bh);
+ if (exfat_update_dir_chksum(inode, &(ei->dir),
+ ei->entry))
+ return -EIO;
+ }
+
+ /* directory inode should be updated in here */
+ i_size_write(inode, size);
+ EXFAT_I(inode)->i_size_ondisk += sbi->cluster_size;
+ EXFAT_I(inode)->i_size_aligned += sbi->cluster_size;
+ EXFAT_I(inode)->flags = p_dir->flags;
+ inode->i_blocks += 1 << sbi->sect_per_clus_bits;
+ }
+
+ return dentry;
+}
+
+/*
+ * Name Resolution Functions :
+ * Zero if it was successful; otherwise nonzero.
+ */
+static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
+ struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
+ int lookup)
+{
+ int namelen;
+ int lossy = NLS_NAME_NO_LOSSY;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+
+ /* strip all trailing periods */
+ namelen = exfat_striptail_len(strlen(path), path);
+ if (!namelen)
+ return -ENOENT;
+
+ if (strlen(path) > (MAX_NAME_LENGTH * MAX_CHARSET_SIZE))
+ return -ENAMETOOLONG;
+
+ /*
+ * strip all leading spaces :
+ * "MS windows 7" supports leading spaces.
+ * So we should skip this preprocessing for compatibility.
+ */
+
+ /* file name conversion :
+ * If lookup case, we allow bad-name for compatibility.
+ */
+ namelen = exfat_nls_to_utf16(sb, path, namelen, p_uniname,
+ &lossy);
+ if (namelen < 0)
+ return namelen; /* return error value */
+
+ if ((lossy && !lookup) || !namelen)
+ return -EINVAL;
+
+ exfat_chain_set(p_dir, ei->start_clu,
+ EXFAT_B_TO_CLU(i_size_read(inode), sbi), ei->flags);
+
+ return 0;
+}
+
+static inline int exfat_resolve_path(struct inode *inode,
+ const unsigned char *path, struct exfat_chain *dir,
+ struct exfat_uni_name *uni)
+{
+ return __exfat_resolve_path(inode, path, dir, uni, 0);
+}
+
+static inline int exfat_resolve_path_for_lookup(struct inode *inode,
+ const unsigned char *path, struct exfat_chain *dir,
+ struct exfat_uni_name *uni)
+{
+ return __exfat_resolve_path(inode, path, dir, uni, 1);
+}
+
+static inline loff_t exfat_make_i_pos(struct exfat_dir_entry *info)
+{
+ return ((loff_t) info->dir.dir << 32) | (info->entry & 0xffffffff);
+}
+
+static int exfat_add_entry(struct inode *inode, const char *path,
+ struct exfat_chain *p_dir, unsigned int type,
+ struct exfat_dir_entry *info)
+{
+ int ret, dentry, num_entries;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_uni_name uniname;
+ struct exfat_chain clu;
+ int clu_size = 0;
+ unsigned int start_clu = EXFAT_FREE_CLUSTER;
+
+ ret = exfat_resolve_path(inode, path, p_dir, &uniname);
+ if (ret)
+ goto out;
+
+ num_entries = exfat_calc_num_entries(&uniname);
+ if (num_entries < 0) {
+ ret = num_entries;
+ goto out;
+ }
+
+ /* exfat_find_empty_entry must be called before alloc_cluster() */
+ dentry = exfat_find_empty_entry(inode, p_dir, num_entries);
+ if (dentry < 0) {
+ ret = dentry; /* -EIO or -ENOSPC */
+ goto out;
+ }
+
+ if (type == TYPE_DIR) {
+ ret = exfat_alloc_new_dir(inode, &clu);
+ if (ret)
+ goto out;
+ start_clu = clu.dir;
+ clu_size = sbi->cluster_size;
+ }
+
+ /* update the directory entry */
+ /* fill the dos name directory entry information of the created file.
+ * the first cluster is not determined yet. (0)
+ */
+ ret = exfat_init_dir_entry(inode, p_dir, dentry, type,
+ start_clu, clu_size);
+ if (ret)
+ goto out;
+
+ ret = exfat_init_ext_entry(inode, p_dir, dentry, num_entries, &uniname);
+ if (ret)
+ goto out;
+
+ memcpy(&info->dir, p_dir, sizeof(struct exfat_chain));
+ info->entry = dentry;
+ info->flags = ALLOC_NO_FAT_CHAIN;
+ info->type = type;
+
+ if (type == TYPE_FILE) {
+ info->attr = ATTR_ARCHIVE;
+ info->start_clu = EXFAT_EOF_CLUSTER;
+ info->size = 0;
+ info->num_subdirs = 0;
+ } else {
+ int count;
+ struct exfat_chain cdir;
+
+ info->attr = ATTR_SUBDIR;
+ info->start_clu = start_clu;
+ info->size = clu_size;
+
+ exfat_chain_set(&cdir, info->start_clu,
+ EXFAT_B_TO_CLU(info->size, sbi), info->flags);
+ count = exfat_count_dir_entries(sb, &cdir);
+ if (count < 0)
+ return -EIO;
+ info->num_subdirs = count + EXFAT_MIN_SUBDIR;
+ }
+ memset(&info->crtime, 0, sizeof(info->crtime));
+ memset(&info->mtime, 0, sizeof(info->mtime));
+ memset(&info->atime, 0, sizeof(info->atime));
+out:
+ return ret;
+}
+
+static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct exfat_chain cdir;
+ struct exfat_dir_entry info;
+ loff_t i_pos;
+ int err;
+
+ mutex_lock(&EXFAT_SB(sb)->s_lock);
+ exfat_set_vol_flags(sb, VOL_DIRTY);
+ err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
+ &info);
+ exfat_set_vol_flags(sb, VOL_CLEAN);
+ if (err)
+ goto unlock;
+
+ inode_inc_iversion(dir);
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ i_pos = exfat_make_i_pos(&info);
+ inode = exfat_build_inode(sb, &info, i_pos);
+ if (IS_ERR(inode))
+ goto unlock;
+
+ inode_inc_iversion(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime =
+ EXFAT_I(inode)->i_crtime = current_time(inode);
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ d_instantiate(dentry, inode);
+unlock:
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ return err;
+}
+
+/* lookup a file */
+static int exfat_find(struct inode *dir, struct qstr *qname,
+ struct exfat_dir_entry *info)
+{
+ int ret, dentry, num_entries, count;
+ struct exfat_chain cdir;
+ struct exfat_uni_name uni_name;
+ struct exfat_dentry *ep, *ep2;
+ struct exfat_entry_set_cache *es = NULL;
+ struct super_block *sb = dir->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(dir);
+
+ if (qname->len == 0)
+ return -ENOENT;
+
+ /* check the validity of directory name in the given pathname */
+ ret = exfat_resolve_path_for_lookup(dir, qname->name, &cdir, &uni_name);
+ if (ret)
+ return ret;
+
+ num_entries = exfat_calc_num_entries(&uni_name);
+ if (num_entries < 0)
+ return num_entries;
+
+ /* check the validation of hint_stat and initialize it if required */
+ if (ei->version != (inode_peek_iversion_raw(dir) & 0xffffffff)) {
+ ei->hint_stat.clu = cdir.dir;
+ ei->hint_stat.eidx = 0;
+ ei->version = (inode_peek_iversion_raw(dir) & 0xffffffff);
+ ei->hint_femp.eidx = EXFAT_HINT_NONE;
+ }
+
+ /* search the file name for directories */
+ dentry = exfat_find_dir_entry(sb, ei, &cdir, &uni_name,
+ num_entries, TYPE_ALL);
+
+ if ((dentry < 0) && (dentry != -EEXIST))
+ return dentry; /* -error value */
+
+ memcpy(&info->dir, &cdir.dir, sizeof(struct exfat_chain));
+ info->entry = dentry;
+ info->num_subdirs = 0;
+
+ /* root directory itself */
+ if (unlikely(dentry == -EEXIST)) {
+ int num_clu = 0;
+
+ info->type = TYPE_DIR;
+ info->attr = ATTR_SUBDIR;
+ info->flags = ALLOC_FAT_CHAIN;
+ info->start_clu = sbi->root_dir;
+ memset(&info->crtime, 0, sizeof(info->crtime));
+ memset(&info->mtime, 0, sizeof(info->mtime));
+ memset(&info->atime, 0, sizeof(info->atime));
+
+ exfat_chain_set(&cdir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+ if (exfat_count_num_clusters(sb, &cdir, &num_clu))
+ return -EIO;
+ info->size = num_clu << sbi->cluster_size_bits;
+
+ count = exfat_count_dir_entries(sb, &cdir);
+ if (count < 0)
+ return -EIO;
+
+ info->num_subdirs = count;
+ } else {
+ es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES, &ep);
+ if (!es)
+ return -EIO;
+ ep2 = ep + 1;
+
+ info->type = exfat_get_entry_type(ep);
+ info->attr = le16_to_cpu(ep->dentry.file.attr);
+ info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
+ if ((info->type == TYPE_FILE) && (info->size == 0)) {
+ info->flags = ALLOC_NO_FAT_CHAIN;
+ info->start_clu = EXFAT_EOF_CLUSTER;
+ } else {
+ info->flags = ep2->dentry.stream.flags;
+ info->start_clu =
+ le32_to_cpu(ep2->dentry.stream.start_clu);
+ }
+
+ if (ei->start_clu == EXFAT_FREE_CLUSTER) {
+ exfat_fs_error(sb,
+ "non-zero size file starts with zero cluster (size : %llu, p_dir : %u, entry : 0x%08x)",
+ i_size_read(dir), ei->dir.dir, ei->entry);
+ return -EIO;
+ }
+
+ exfat_get_entry_time(sbi, &info->crtime,
+ ep->dentry.file.create_tz,
+ ep->dentry.file.create_time,
+ ep->dentry.file.create_date,
+ ep->dentry.file.create_time_ms);
+ exfat_get_entry_time(sbi, &info->mtime,
+ ep->dentry.file.modify_tz,
+ ep->dentry.file.modify_time,
+ ep->dentry.file.modify_date,
+ ep->dentry.file.modify_time_ms);
+ exfat_get_entry_time(sbi, &info->atime,
+ ep->dentry.file.access_tz,
+ ep->dentry.file.access_time,
+ ep->dentry.file.access_date,
+ 0);
+ kfree(es);
+
+ if (info->type == TYPE_DIR) {
+ exfat_chain_set(&cdir, info->start_clu,
+ EXFAT_B_TO_CLU(info->size, sbi), info->flags);
+ count = exfat_count_dir_entries(sb, &cdir);
+ if (count < 0)
+ return -EIO;
+
+ info->num_subdirs = count + EXFAT_MIN_SUBDIR;
+ }
+ }
+ return 0;
+}
+
+static int exfat_d_anon_disconn(struct dentry *dentry)
+{
+ return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct dentry *alias;
+ struct exfat_dir_entry info;
+ int err;
+ loff_t i_pos;
+ mode_t i_mode;
+
+ mutex_lock(&EXFAT_SB(sb)->s_lock);
+ err = exfat_find(dir, &dentry->d_name, &info);
+ if (err) {
+ if (err == -ENOENT) {
+ inode = NULL;
+ goto out;
+ }
+ goto unlock;
+ }
+
+ i_pos = exfat_make_i_pos(&info);
+ inode = exfat_build_inode(sb, &info, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto unlock;
+ }
+
+ i_mode = inode->i_mode;
+ alias = d_find_alias(inode);
+
+ /*
+ * Checking "alias->d_parent == dentry->d_parent" to make sure
+ * FS is not corrupted (especially double linked dir).
+ */
+ if (alias && alias->d_parent == dentry->d_parent &&
+ !exfat_d_anon_disconn(alias)) {
+
+ /*
+ * Unhashed alias is able to exist because of revalidate()
+ * called by lookup_fast. You can easily make this status
+ * by calling create and lookup concurrently
+ * In such case, we reuse an alias instead of new dentry
+ */
+ if (d_unhashed(alias)) {
+ WARN_ON(alias->d_name.hash_len !=
+ dentry->d_name.hash_len);
+ exfat_msg(sb, KERN_INFO,
+ "rehashed a dentry(%p) in read lookup", alias);
+ d_drop(dentry);
+ d_rehash(alias);
+ } else if (!S_ISDIR(i_mode)) {
+ /*
+ * This inode has non anonymous-DCACHE_DISCONNECTED
+ * dentry. This means, the user did ->lookup() by an
+ * another name (longname vs 8.3 alias of it) in past.
+ *
+ * Switch to new one for reason of locality if possible.
+ */
+ d_move(alias, dentry);
+ }
+ iput(inode);
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ return alias;
+ }
+ dput(alias);
+out:
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ if (!inode)
+ exfat_d_version_set(dentry, inode_query_iversion(dir));
+
+ return d_splice_alias(inode, dentry);
+unlock:
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ return ERR_PTR(err);
+}
+
+/* remove an entry, BUT don't truncate */
+static int exfat_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct exfat_chain cdir;
+ struct exfat_dentry *ep;
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode = dentry->d_inode;
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct buffer_head *bh;
+ sector_t sector;
+ int num_entries, entry, err = 0;
+
+ mutex_lock(&EXFAT_SB(sb)->s_lock);
+ exfat_chain_dup(&cdir, &ei->dir);
+ entry = ei->entry;
+ if (ei->dir.dir == DIR_DELETED) {
+ exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry");
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ ep = exfat_get_dentry(sb, &cdir, entry, &bh, &sector);
+ if (!ep) {
+ err = -EIO;
+ goto unlock;
+ }
+ num_entries = exfat_count_ext_entries(sb, &cdir, entry, ep);
+ if (num_entries < 0) {
+ err = -EIO;
+ brelse(bh);
+ goto unlock;
+ }
+ num_entries++;
+ brelse(bh);
+
+ exfat_set_vol_flags(sb, VOL_DIRTY);
+ /* update the directory entry */
+ if (exfat_remove_entries(dir, &cdir, entry, 0, num_entries)) {
+ err = -EIO;
+ goto unlock;
+ }
+
+ /* This doesn't modify ei */
+ ei->dir.dir = DIR_DELETED;
+ exfat_set_vol_flags(sb, VOL_CLEAN);
+
+ inode_inc_iversion(dir);
+ dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = current_time(inode);
+ exfat_unhash_inode(inode);
+ exfat_d_version_set(dentry, inode_query_iversion(dir));
+unlock:
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ return err;
+}
+
+static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct exfat_dir_entry info;
+ struct exfat_chain cdir;
+ loff_t i_pos;
+ int err;
+
+ mutex_lock(&EXFAT_SB(sb)->s_lock);
+ exfat_set_vol_flags(sb, VOL_DIRTY);
+ err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
+ &info);
+ exfat_set_vol_flags(sb, VOL_CLEAN);
+ if (err)
+ goto unlock;
+
+ inode_inc_iversion(dir);
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+ inc_nlink(dir);
+
+ i_pos = exfat_make_i_pos(&info);
+ inode = exfat_build_inode(sb, &info, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto unlock;
+ }
+
+ inode_inc_iversion(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime =
+ EXFAT_I(inode)->i_crtime = current_time(inode);
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ d_instantiate(dentry, inode);
+
+unlock:
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ return err;
+}
+
+static int exfat_check_dir_empty(struct super_block *sb,
+ struct exfat_chain *p_dir)
+{
+ int i, dentries_per_clu;
+ unsigned int type;
+ struct exfat_chain clu;
+ struct exfat_dentry *ep;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct buffer_head *bh;
+
+ dentries_per_clu = sbi->dentries_per_clu;
+
+ exfat_chain_dup(&clu, p_dir);
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ if (!ep)
+ return -EIO;
+ type = exfat_get_entry_type(ep);
+ brelse(bh);
+ if (type == TYPE_UNUSED)
+ return 0;
+
+ if (type != TYPE_FILE && type != TYPE_DIR)
+ continue;
+
+ return -ENOTEMPTY;
+ }
+
+ if (clu.flags == ALLOC_NO_FAT_CHAIN) {
+ if (--clu.size > 0)
+ clu.dir++;
+ else
+ clu.dir = EXFAT_EOF_CLUSTER;
+ } else {
+ if (exfat_get_next_cluster(sb, &(clu.dir)))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct exfat_dentry *ep;
+ struct exfat_chain cdir, clu_to_free;
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct buffer_head *bh;
+ sector_t sector;
+ int num_entries, entry, err;
+
+ mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
+
+ exfat_chain_dup(&cdir, &ei->dir);
+ entry = ei->entry;
+
+ if (ei->dir.dir == DIR_DELETED) {
+ exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry");
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ exfat_set_vol_flags(sb, VOL_DIRTY);
+ exfat_chain_set(&clu_to_free, ei->start_clu,
+ EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi), ei->flags);
+
+ err = exfat_check_dir_empty(sb, &clu_to_free);
+ if (err) {
+ if (err == -EIO)
+ exfat_msg(sb, KERN_ERR,
+ "failed to exfat_check_dir_empty : err(%d)",
+ err);
+ goto unlock;
+ }
+
+ ep = exfat_get_dentry(sb, &cdir, entry, &bh, &sector);
+ if (!ep) {
+ err = -EIO;
+ goto unlock;
+ }
+
+ num_entries = exfat_count_ext_entries(sb, &cdir, entry, ep);
+ if (num_entries < 0) {
+ err = -EIO;
+ brelse(bh);
+ goto unlock;
+ }
+ num_entries++;
+ brelse(bh);
+
+ err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
+ if (err) {
+ exfat_msg(sb, KERN_ERR,
+ "failed to exfat_remove_entries : err(%d)",
+ err);
+ goto unlock;
+ }
+ ei->dir.dir = DIR_DELETED;
+ exfat_set_vol_flags(sb, VOL_CLEAN);
+
+ inode_inc_iversion(dir);
+ dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+ drop_nlink(dir);
+
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = current_time(inode);
+ exfat_unhash_inode(inode);
+ exfat_d_version_set(dentry, inode_query_iversion(dir));
+unlock:
+ mutex_unlock(&EXFAT_SB(inode->i_sb)->s_lock);
+ return err;
+}
+
+static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
+ int oldentry, struct exfat_uni_name *p_uniname,
+ struct exfat_inode_info *ei)
+{
+ int ret, num_old_entries, num_new_entries;
+ sector_t sector_old, sector_new;
+ struct exfat_dentry *epold, *epnew;
+ struct super_block *sb = inode->i_sb;
+ struct buffer_head *new_bh, *old_bh;
+ int sync = IS_DIRSYNC(inode);
+
+ epold = exfat_get_dentry(sb, p_dir, oldentry, &old_bh, &sector_old);
+ if (!epold)
+ return -EIO;
+
+ num_old_entries = exfat_count_ext_entries(sb, p_dir, oldentry, epold);
+ if (num_old_entries < 0)
+ return -EIO;
+ num_old_entries++;
+
+ num_new_entries = exfat_calc_num_entries(p_uniname);
+ if (num_new_entries < 0)
+ return num_new_entries;
+
+ if (num_old_entries < num_new_entries) {
+ int newentry;
+
+ newentry =
+ exfat_find_empty_entry(inode, p_dir, num_new_entries);
+ if (newentry < 0)
+ return newentry; /* -EIO or -ENOSPC */
+
+ epnew = exfat_get_dentry(sb, p_dir, newentry, &new_bh,
+ &sector_new);
+ if (!epnew)
+ return -EIO;
+
+ memcpy(epnew, epold, DENTRY_SIZE);
+ if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+ epnew->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
+ ei->attr |= ATTR_ARCHIVE;
+ }
+ exfat_update_bh(sb, new_bh, sync);
+ brelse(old_bh);
+ brelse(new_bh);
+
+ epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
+ &sector_old);
+ epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
+ &sector_new);
+ if (!epold || !epnew)
+ return -EIO;
+
+ memcpy(epnew, epold, DENTRY_SIZE);
+ exfat_update_bh(sb, new_bh, sync);
+ brelse(old_bh);
+ brelse(new_bh);
+
+ ret = exfat_init_ext_entry(inode, p_dir, newentry,
+ num_new_entries, p_uniname);
+ if (ret)
+ return ret;
+
+ exfat_remove_entries(inode, p_dir, oldentry, 0,
+ num_old_entries);
+ ei->entry = newentry;
+ } else {
+ if (exfat_get_entry_type(epold) == TYPE_FILE) {
+ epold->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
+ ei->attr |= ATTR_ARCHIVE;
+ }
+ exfat_update_bh(sb, old_bh, sync);
+ brelse(old_bh);
+ ret = exfat_init_ext_entry(inode, p_dir, oldentry,
+ num_new_entries, p_uniname);
+ if (ret)
+ return ret;
+
+ exfat_remove_entries(inode, p_dir, oldentry, num_new_entries,
+ num_old_entries);
+ }
+ return 0;
+}
+
+static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
+ int oldentry, struct exfat_chain *p_newdir,
+ struct exfat_uni_name *p_uniname, struct exfat_inode_info *ei)
+{
+ int ret, newentry, num_new_entries, num_old_entries;
+ sector_t sector_mov, sector_new;
+ struct exfat_dentry *epmov, *epnew;
+ struct super_block *sb = inode->i_sb;
+ struct buffer_head *mov_bh, *new_bh;
+
+ epmov = exfat_get_dentry(sb, p_olddir, oldentry, &mov_bh, &sector_mov);
+ if (!epmov)
+ return -EIO;
+
+ /* check if the source and target directory is the same */
+ if (exfat_get_entry_type(epmov) == TYPE_DIR &&
+ le32_to_cpu(epmov->dentry.stream.start_clu) == p_newdir->dir)
+ return -EINVAL;
+
+ num_old_entries = exfat_count_ext_entries(sb, p_olddir, oldentry,
+ epmov);
+ if (num_old_entries < 0)
+ return -EIO;
+ num_old_entries++;
+
+ num_new_entries = exfat_calc_num_entries(p_uniname);
+ if (num_new_entries < 0)
+ return num_new_entries;
+
+ newentry = exfat_find_empty_entry(inode, p_newdir, num_new_entries);
+ if (newentry < 0)
+ return newentry; /* -EIO or -ENOSPC */
+
+ epnew = exfat_get_dentry(sb, p_newdir, newentry, &new_bh, &sector_new);
+ if (!epnew)
+ return -EIO;
+
+ memcpy(epnew, epmov, DENTRY_SIZE);
+ if (exfat_get_entry_type(epnew) == TYPE_FILE) {
+ epnew->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
+ ei->attr |= ATTR_ARCHIVE;
+ }
+ exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
+ brelse(mov_bh);
+ brelse(new_bh);
+
+ epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
+ &sector_mov);
+ epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
+ &sector_new);
+ if (!epmov || !epnew)
+ return -EIO;
+
+ memcpy(epnew, epmov, DENTRY_SIZE);
+ exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
+ brelse(mov_bh);
+ brelse(new_bh);
+
+ ret = exfat_init_ext_entry(inode, p_newdir, newentry, num_new_entries,
+ p_uniname);
+ if (ret)
+ return ret;
+
+ exfat_remove_entries(inode, p_olddir, oldentry, 0, num_old_entries);
+
+ exfat_chain_set(&ei->dir, p_newdir->dir, p_newdir->size,
+ p_newdir->flags);
+
+ ei->entry = newentry;
+ return 0;
+}
+
+static void exfat_update_parent_info(struct exfat_inode_info *ei,
+ struct inode *parent_inode)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(parent_inode->i_sb);
+ struct exfat_inode_info *parent_ei = EXFAT_I(parent_inode);
+ loff_t parent_isize = i_size_read(parent_inode);
+
+ /*
+ * the problem that struct exfat_inode_info caches wrong parent info.
+ *
+ * because of flag-mismatch of ei->dir,
+ * there is abnormal traversing cluster chain.
+ */
+ if (unlikely(parent_ei->flags != ei->dir.flags ||
+ parent_isize != EXFAT_CLU_TO_B(ei->dir.size, sbi) ||
+ parent_ei->start_clu != ei->dir.dir)) {
+ exfat_chain_set(&ei->dir, parent_ei->start_clu,
+ EXFAT_B_TO_CLU_ROUND_UP(parent_isize, sbi),
+ parent_ei->flags);
+ }
+}
+
+/* rename or move a old file into a new file */
+static int __exfat_rename(struct inode *old_parent_inode,
+ struct exfat_inode_info *ei, struct inode *new_parent_inode,
+ struct dentry *new_dentry)
+{
+ int ret;
+ int dentry;
+ struct exfat_chain olddir, newdir;
+ struct exfat_chain *p_dir = NULL;
+ struct exfat_uni_name uni_name;
+ struct exfat_dentry *ep;
+ struct super_block *sb = old_parent_inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ const unsigned char *new_path = new_dentry->d_name.name;
+ struct inode *new_inode = new_dentry->d_inode;
+ int num_entries;
+ struct exfat_inode_info *new_ei = NULL;
+ unsigned int new_entry_type = TYPE_UNUSED;
+ int new_entry = 0;
+ struct buffer_head *old_bh, *new_bh = NULL;
+
+ /* check the validity of pointer parameters */
+ if (new_path == NULL || strlen(new_path) == 0)
+ return -EINVAL;
+
+ if (ei->dir.dir == DIR_DELETED) {
+ exfat_msg(sb, KERN_ERR,
+ "abnormal access to deleted source dentry");
+ return -ENOENT;
+ }
+
+ exfat_update_parent_info(ei, old_parent_inode);
+
+ exfat_chain_dup(&olddir, &ei->dir);
+ dentry = ei->entry;
+
+ ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh, NULL);
+ if (!ep) {
+ ret = -EIO;
+ goto out;
+ }
+ brelse(old_bh);
+
+ /* check whether new dir is existing directory and empty */
+ if (new_inode) {
+ ret = -EIO;
+ new_ei = EXFAT_I(new_inode);
+
+ if (new_ei->dir.dir == DIR_DELETED) {
+ exfat_msg(sb, KERN_ERR,
+ "abnormal access to deleted target dentry");
+ goto out;
+ }
+
+ exfat_update_parent_info(new_ei, new_parent_inode);
+
+ p_dir = &(new_ei->dir);
+ new_entry = new_ei->entry;
+ ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
+ if (!ep)
+ goto out;
+
+ new_entry_type = exfat_get_entry_type(ep);
+ brelse(new_bh);
+
+ /* if new_inode exists, update ei */
+ if (new_entry_type == TYPE_DIR) {
+ struct exfat_chain new_clu;
+
+ new_clu.dir = new_ei->start_clu;
+ new_clu.size =
+ EXFAT_B_TO_CLU_ROUND_UP(i_size_read(new_inode),
+ sbi);
+ new_clu.flags = new_ei->flags;
+
+ ret = exfat_check_dir_empty(sb, &new_clu);
+ if (ret)
+ goto out;
+ }
+ }
+
+ /* check the validity of directory name in the given new pathname */
+ ret = exfat_resolve_path(new_parent_inode, new_path, &newdir,
+ &uni_name);
+ if (ret)
+ goto out;
+
+ exfat_set_vol_flags(sb, VOL_DIRTY);
+
+ if (olddir.dir == newdir.dir)
+ ret = exfat_rename_file(new_parent_inode, &olddir, dentry,
+ &uni_name, ei);
+ else
+ ret = exfat_move_file(new_parent_inode, &olddir, dentry,
+ &newdir, &uni_name, ei);
+
+ if (!ret && new_inode) {
+ /* delete entries of new_dir */
+ ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
+ if (!ep) {
+ ret = -EIO;
+ goto del_out;
+ }
+
+ num_entries = exfat_count_ext_entries(sb, p_dir, new_entry, ep);
+ if (num_entries < 0) {
+ ret = -EIO;
+ goto del_out;
+ }
+ brelse(new_bh);
+
+ if (exfat_remove_entries(new_inode, p_dir, new_entry, 0,
+ num_entries + 1)) {
+ ret = -EIO;
+ goto del_out;
+ }
+
+ /* Free the clusters if new_inode is a dir(as if exfat_rmdir) */
+ if (new_entry_type == TYPE_DIR) {
+ /* new_ei, new_clu_to_free */
+ struct exfat_chain new_clu_to_free;
+
+ exfat_chain_set(&new_clu_to_free, new_ei->start_clu,
+ EXFAT_B_TO_CLU_ROUND_UP(i_size_read(new_inode),
+ sbi), new_ei->flags);
+
+ if (exfat_free_cluster(new_inode, &new_clu_to_free)) {
+ /* just set I/O error only */
+ ret = -EIO;
+ }
+
+ i_size_write(new_inode, 0);
+ new_ei->start_clu = EXFAT_EOF_CLUSTER;
+ new_ei->flags = ALLOC_NO_FAT_CHAIN;
+ }
+del_out:
+ /* Update new_inode ei
+ * Prevent syncing removed new_inode
+ * (new_ei is already initialized above code ("if (new_inode)")
+ */
+ new_ei->dir.dir = DIR_DELETED;
+ }
+ exfat_set_vol_flags(sb, VOL_CLEAN);
+out:
+ return ret;
+}
+
+static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct inode *old_inode, *new_inode;
+ struct super_block *sb = old_dir->i_sb;
+ loff_t i_pos;
+ int err;
+
+ /*
+ * The VFS already checks for existence, so for local filesystems
+ * the RENAME_NOREPLACE implementation is equivalent to plain rename.
+ * Don't support any other flags
+ */
+ if (flags & ~RENAME_NOREPLACE)
+ return -EINVAL;
+
+ mutex_lock(&EXFAT_SB(sb)->s_lock);
+ old_inode = old_dentry->d_inode;
+ new_inode = new_dentry->d_inode;
+
+ err = __exfat_rename(old_dir, EXFAT_I(old_inode), new_dir, new_dentry);
+ if (err)
+ goto unlock;
+
+ inode_inc_iversion(new_dir);
+ new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime =
+ EXFAT_I(new_dir)->i_crtime = current_time(new_dir);
+ if (IS_DIRSYNC(new_dir))
+ exfat_sync_inode(new_dir);
+ else
+ mark_inode_dirty(new_dir);
+
+ i_pos = ((loff_t)EXFAT_I(old_inode)->dir.dir << 32) |
+ (EXFAT_I(old_inode)->entry & 0xffffffff);
+ exfat_unhash_inode(old_inode);
+ exfat_hash_inode(old_inode, i_pos);
+ if (IS_DIRSYNC(new_dir))
+ exfat_sync_inode(old_inode);
+ else
+ mark_inode_dirty(old_inode);
+
+ if (S_ISDIR(old_inode->i_mode) && old_dir != new_dir) {
+ drop_nlink(old_dir);
+ if (!new_inode)
+ inc_nlink(new_dir);
+ }
+
+ inode_inc_iversion(old_dir);
+ old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ if (IS_DIRSYNC(old_dir))
+ exfat_sync_inode(old_dir);
+ else
+ mark_inode_dirty(old_dir);
+
+ if (new_inode) {
+ exfat_unhash_inode(new_inode);
+
+ /* skip drop_nlink if new_inode already has been dropped */
+ if (new_inode->i_nlink) {
+ drop_nlink(new_inode);
+ if (S_ISDIR(new_inode->i_mode))
+ drop_nlink(new_inode);
+ } else {
+ exfat_msg(sb, KERN_WARNING,
+ "abnormal access to an inode dropped");
+ WARN_ON(new_inode->i_nlink == 0);
+ }
+ new_inode->i_ctime = EXFAT_I(new_inode)->i_crtime =
+ current_time(new_inode);
+ }
+
+unlock:
+ mutex_unlock(&EXFAT_SB(sb)->s_lock);
+ return err;
+}
+
+const struct inode_operations exfat_dir_inode_operations = {
+ .create = exfat_create,
+ .lookup = exfat_lookup,
+ .unlink = exfat_unlink,
+ .mkdir = exfat_mkdir,
+ .rmdir = exfat_rmdir,
+ .rename = exfat_rename,
+ .setattr = exfat_setattr,
+ .getattr = exfat_getattr,
+};
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
new file mode 100644
index 000000000000..6d1c3ae130ff
--- /dev/null
+++ b/fs/exfat/nls.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <asm/unaligned.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+/* Upcase tabel macro */
+#define EXFAT_NUM_UPCASE (2918)
+#define UTBL_COUNT (0x10000)
+
+/*
+ * Upcase table in compressed format (7.2.5.1 Recommended Up-case Table
+ * in exfat specification, See:
+ * https://docs.microsoft.com/en-us/windows/win32/fileio/exfat-specification).
+ */
+static const unsigned short uni_def_upcase[EXFAT_NUM_UPCASE] = {
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
+ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f,
+ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
+ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
+ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
+ 0x0060, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+ 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
+ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+ 0x0058, 0x0059, 0x005a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f,
+ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+ 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f,
+ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+ 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f,
+ 0x00a0, 0x00a1, 0x00a2, 0x00a3, 0x00a4, 0x00a5, 0x00a6, 0x00a7,
+ 0x00a8, 0x00a9, 0x00aa, 0x00ab, 0x00ac, 0x00ad, 0x00ae, 0x00af,
+ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x00b4, 0x00b5, 0x00b6, 0x00b7,
+ 0x00b8, 0x00b9, 0x00ba, 0x00bb, 0x00bc, 0x00bd, 0x00be, 0x00bf,
+ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
+ 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
+ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00d7,
+ 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x00df,
+ 0x00c0, 0x00c1, 0x00c2, 0x00c3, 0x00c4, 0x00c5, 0x00c6, 0x00c7,
+ 0x00c8, 0x00c9, 0x00ca, 0x00cb, 0x00cc, 0x00cd, 0x00ce, 0x00cf,
+ 0x00d0, 0x00d1, 0x00d2, 0x00d3, 0x00d4, 0x00d5, 0x00d6, 0x00f7,
+ 0x00d8, 0x00d9, 0x00da, 0x00db, 0x00dc, 0x00dd, 0x00de, 0x0178,
+ 0x0100, 0x0100, 0x0102, 0x0102, 0x0104, 0x0104, 0x0106, 0x0106,
+ 0x0108, 0x0108, 0x010a, 0x010a, 0x010c, 0x010c, 0x010e, 0x010e,
+ 0x0110, 0x0110, 0x0112, 0x0112, 0x0114, 0x0114, 0x0116, 0x0116,
+ 0x0118, 0x0118, 0x011a, 0x011a, 0x011c, 0x011c, 0x011e, 0x011e,
+ 0x0120, 0x0120, 0x0122, 0x0122, 0x0124, 0x0124, 0x0126, 0x0126,
+ 0x0128, 0x0128, 0x012a, 0x012a, 0x012c, 0x012c, 0x012e, 0x012e,
+ 0x0130, 0x0131, 0x0132, 0x0132, 0x0134, 0x0134, 0x0136, 0x0136,
+ 0x0138, 0x0139, 0x0139, 0x013b, 0x013b, 0x013d, 0x013d, 0x013f,
+ 0x013f, 0x0141, 0x0141, 0x0143, 0x0143, 0x0145, 0x0145, 0x0147,
+ 0x0147, 0x0149, 0x014a, 0x014a, 0x014c, 0x014c, 0x014e, 0x014e,
+ 0x0150, 0x0150, 0x0152, 0x0152, 0x0154, 0x0154, 0x0156, 0x0156,
+ 0x0158, 0x0158, 0x015a, 0x015a, 0x015c, 0x015c, 0x015e, 0x015e,
+ 0x0160, 0x0160, 0x0162, 0x0162, 0x0164, 0x0164, 0x0166, 0x0166,
+ 0x0168, 0x0168, 0x016a, 0x016a, 0x016c, 0x016c, 0x016e, 0x016e,
+ 0x0170, 0x0170, 0x0172, 0x0172, 0x0174, 0x0174, 0x0176, 0x0176,
+ 0x0178, 0x0179, 0x0179, 0x017b, 0x017b, 0x017d, 0x017d, 0x017f,
+ 0x0243, 0x0181, 0x0182, 0x0182, 0x0184, 0x0184, 0x0186, 0x0187,
+ 0x0187, 0x0189, 0x018a, 0x018b, 0x018b, 0x018d, 0x018e, 0x018f,
+ 0x0190, 0x0191, 0x0191, 0x0193, 0x0194, 0x01f6, 0x0196, 0x0197,
+ 0x0198, 0x0198, 0x023d, 0x019b, 0x019c, 0x019d, 0x0220, 0x019f,
+ 0x01a0, 0x01a0, 0x01a2, 0x01a2, 0x01a4, 0x01a4, 0x01a6, 0x01a7,
+ 0x01a7, 0x01a9, 0x01aa, 0x01ab, 0x01ac, 0x01ac, 0x01ae, 0x01af,
+ 0x01af, 0x01b1, 0x01b2, 0x01b3, 0x01b3, 0x01b5, 0x01b5, 0x01b7,
+ 0x01b8, 0x01b8, 0x01ba, 0x01bb, 0x01bc, 0x01bc, 0x01be, 0x01f7,
+ 0x01c0, 0x01c1, 0x01c2, 0x01c3, 0x01c4, 0x01c5, 0x01c4, 0x01c7,
+ 0x01c8, 0x01c7, 0x01ca, 0x01cb, 0x01ca, 0x01cd, 0x01cd, 0x01cf,
+ 0x01cf, 0x01d1, 0x01d1, 0x01d3, 0x01d3, 0x01d5, 0x01d5, 0x01d7,
+ 0x01d7, 0x01d9, 0x01d9, 0x01db, 0x01db, 0x018e, 0x01de, 0x01de,
+ 0x01e0, 0x01e0, 0x01e2, 0x01e2, 0x01e4, 0x01e4, 0x01e6, 0x01e6,
+ 0x01e8, 0x01e8, 0x01ea, 0x01ea, 0x01ec, 0x01ec, 0x01ee, 0x01ee,
+ 0x01f0, 0x01f1, 0x01f2, 0x01f1, 0x01f4, 0x01f4, 0x01f6, 0x01f7,
+ 0x01f8, 0x01f8, 0x01fa, 0x01fa, 0x01fc, 0x01fc, 0x01fe, 0x01fe,
+ 0x0200, 0x0200, 0x0202, 0x0202, 0x0204, 0x0204, 0x0206, 0x0206,
+ 0x0208, 0x0208, 0x020a, 0x020a, 0x020c, 0x020c, 0x020e, 0x020e,
+ 0x0210, 0x0210, 0x0212, 0x0212, 0x0214, 0x0214, 0x0216, 0x0216,
+ 0x0218, 0x0218, 0x021a, 0x021a, 0x021c, 0x021c, 0x021e, 0x021e,
+ 0x0220, 0x0221, 0x0222, 0x0222, 0x0224, 0x0224, 0x0226, 0x0226,
+ 0x0228, 0x0228, 0x022a, 0x022a, 0x022c, 0x022c, 0x022e, 0x022e,
+ 0x0230, 0x0230, 0x0232, 0x0232, 0x0234, 0x0235, 0x0236, 0x0237,
+ 0x0238, 0x0239, 0x2c65, 0x023b, 0x023b, 0x023d, 0x2c66, 0x023f,
+ 0x0240, 0x0241, 0x0241, 0x0243, 0x0244, 0x0245, 0x0246, 0x0246,
+ 0x0248, 0x0248, 0x024a, 0x024a, 0x024c, 0x024c, 0x024e, 0x024e,
+ 0x0250, 0x0251, 0x0252, 0x0181, 0x0186, 0x0255, 0x0189, 0x018a,
+ 0x0258, 0x018f, 0x025a, 0x0190, 0x025c, 0x025d, 0x025e, 0x025f,
+ 0x0193, 0x0261, 0x0262, 0x0194, 0x0264, 0x0265, 0x0266, 0x0267,
+ 0x0197, 0x0196, 0x026a, 0x2c62, 0x026c, 0x026d, 0x026e, 0x019c,
+ 0x0270, 0x0271, 0x019d, 0x0273, 0x0274, 0x019f, 0x0276, 0x0277,
+ 0x0278, 0x0279, 0x027a, 0x027b, 0x027c, 0x2c64, 0x027e, 0x027f,
+ 0x01a6, 0x0281, 0x0282, 0x01a9, 0x0284, 0x0285, 0x0286, 0x0287,
+ 0x01ae, 0x0244, 0x01b1, 0x01b2, 0x0245, 0x028d, 0x028e, 0x028f,
+ 0x0290, 0x0291, 0x01b7, 0x0293, 0x0294, 0x0295, 0x0296, 0x0297,
+ 0x0298, 0x0299, 0x029a, 0x029b, 0x029c, 0x029d, 0x029e, 0x029f,
+ 0x02a0, 0x02a1, 0x02a2, 0x02a3, 0x02a4, 0x02a5, 0x02a6, 0x02a7,
+ 0x02a8, 0x02a9, 0x02aa, 0x02ab, 0x02ac, 0x02ad, 0x02ae, 0x02af,
+ 0x02b0, 0x02b1, 0x02b2, 0x02b3, 0x02b4, 0x02b5, 0x02b6, 0x02b7,
+ 0x02b8, 0x02b9, 0x02ba, 0x02bb, 0x02bc, 0x02bd, 0x02be, 0x02bf,
+ 0x02c0, 0x02c1, 0x02c2, 0x02c3, 0x02c4, 0x02c5, 0x02c6, 0x02c7,
+ 0x02c8, 0x02c9, 0x02ca, 0x02cb, 0x02cc, 0x02cd, 0x02ce, 0x02cf,
+ 0x02d0, 0x02d1, 0x02d2, 0x02d3, 0x02d4, 0x02d5, 0x02d6, 0x02d7,
+ 0x02d8, 0x02d9, 0x02da, 0x02db, 0x02dc, 0x02dd, 0x02de, 0x02df,
+ 0x02e0, 0x02e1, 0x02e2, 0x02e3, 0x02e4, 0x02e5, 0x02e6, 0x02e7,
+ 0x02e8, 0x02e9, 0x02ea, 0x02eb, 0x02ec, 0x02ed, 0x02ee, 0x02ef,
+ 0x02f0, 0x02f1, 0x02f2, 0x02f3, 0x02f4, 0x02f5, 0x02f6, 0x02f7,
+ 0x02f8, 0x02f9, 0x02fa, 0x02fb, 0x02fc, 0x02fd, 0x02fe, 0x02ff,
+ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307,
+ 0x0308, 0x0309, 0x030a, 0x030b, 0x030c, 0x030d, 0x030e, 0x030f,
+ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
+ 0x0318, 0x0319, 0x031a, 0x031b, 0x031c, 0x031d, 0x031e, 0x031f,
+ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327,
+ 0x0328, 0x0329, 0x032a, 0x032b, 0x032c, 0x032d, 0x032e, 0x032f,
+ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337,
+ 0x0338, 0x0339, 0x033a, 0x033b, 0x033c, 0x033d, 0x033e, 0x033f,
+ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347,
+ 0x0348, 0x0349, 0x034a, 0x034b, 0x034c, 0x034d, 0x034e, 0x034f,
+ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357,
+ 0x0358, 0x0359, 0x035a, 0x035b, 0x035c, 0x035d, 0x035e, 0x035f,
+ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367,
+ 0x0368, 0x0369, 0x036a, 0x036b, 0x036c, 0x036d, 0x036e, 0x036f,
+ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377,
+ 0x0378, 0x0379, 0x037a, 0x03fd, 0x03fe, 0x03ff, 0x037e, 0x037f,
+ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387,
+ 0x0388, 0x0389, 0x038a, 0x038b, 0x038c, 0x038d, 0x038e, 0x038f,
+ 0x0390, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397,
+ 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e, 0x039f,
+ 0x03a0, 0x03a1, 0x03a2, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7,
+ 0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x0386, 0x0388, 0x0389, 0x038a,
+ 0x03b0, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397,
+ 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e, 0x039f,
+ 0x03a0, 0x03a1, 0x03a3, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7,
+ 0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x038c, 0x038e, 0x038f, 0x03cf,
+ 0x03d0, 0x03d1, 0x03d2, 0x03d3, 0x03d4, 0x03d5, 0x03d6, 0x03d7,
+ 0x03d8, 0x03d8, 0x03da, 0x03da, 0x03dc, 0x03dc, 0x03de, 0x03de,
+ 0x03e0, 0x03e0, 0x03e2, 0x03e2, 0x03e4, 0x03e4, 0x03e6, 0x03e6,
+ 0x03e8, 0x03e8, 0x03ea, 0x03ea, 0x03ec, 0x03ec, 0x03ee, 0x03ee,
+ 0x03f0, 0x03f1, 0x03f9, 0x03f3, 0x03f4, 0x03f5, 0x03f6, 0x03f7,
+ 0x03f7, 0x03f9, 0x03fa, 0x03fa, 0x03fc, 0x03fd, 0x03fe, 0x03ff,
+ 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407,
+ 0x0408, 0x0409, 0x040a, 0x040b, 0x040c, 0x040d, 0x040e, 0x040f,
+ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417,
+ 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f,
+ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427,
+ 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f,
+ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417,
+ 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f,
+ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427,
+ 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f,
+ 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407,
+ 0x0408, 0x0409, 0x040a, 0x040b, 0x040c, 0x040d, 0x040e, 0x040f,
+ 0x0460, 0x0460, 0x0462, 0x0462, 0x0464, 0x0464, 0x0466, 0x0466,
+ 0x0468, 0x0468, 0x046a, 0x046a, 0x046c, 0x046c, 0x046e, 0x046e,
+ 0x0470, 0x0470, 0x0472, 0x0472, 0x0474, 0x0474, 0x0476, 0x0476,
+ 0x0478, 0x0478, 0x047a, 0x047a, 0x047c, 0x047c, 0x047e, 0x047e,
+ 0x0480, 0x0480, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487,
+ 0x0488, 0x0489, 0x048a, 0x048a, 0x048c, 0x048c, 0x048e, 0x048e,
+ 0x0490, 0x0490, 0x0492, 0x0492, 0x0494, 0x0494, 0x0496, 0x0496,
+ 0x0498, 0x0498, 0x049a, 0x049a, 0x049c, 0x049c, 0x049e, 0x049e,
+ 0x04a0, 0x04a0, 0x04a2, 0x04a2, 0x04a4, 0x04a4, 0x04a6, 0x04a6,
+ 0x04a8, 0x04a8, 0x04aa, 0x04aa, 0x04ac, 0x04ac, 0x04ae, 0x04ae,
+ 0x04b0, 0x04b0, 0x04b2, 0x04b2, 0x04b4, 0x04b4, 0x04b6, 0x04b6,
+ 0x04b8, 0x04b8, 0x04ba, 0x04ba, 0x04bc, 0x04bc, 0x04be, 0x04be,
+ 0x04c0, 0x04c1, 0x04c1, 0x04c3, 0x04c3, 0x04c5, 0x04c5, 0x04c7,
+ 0x04c7, 0x04c9, 0x04c9, 0x04cb, 0x04cb, 0x04cd, 0x04cd, 0x04c0,
+ 0x04d0, 0x04d0, 0x04d2, 0x04d2, 0x04d4, 0x04d4, 0x04d6, 0x04d6,
+ 0x04d8, 0x04d8, 0x04da, 0x04da, 0x04dc, 0x04dc, 0x04de, 0x04de,
+ 0x04e0, 0x04e0, 0x04e2, 0x04e2, 0x04e4, 0x04e4, 0x04e6, 0x04e6,
+ 0x04e8, 0x04e8, 0x04ea, 0x04ea, 0x04ec, 0x04ec, 0x04ee, 0x04ee,
+ 0x04f0, 0x04f0, 0x04f2, 0x04f2, 0x04f4, 0x04f4, 0x04f6, 0x04f6,
+ 0x04f8, 0x04f8, 0x04fa, 0x04fa, 0x04fc, 0x04fc, 0x04fe, 0x04fe,
+ 0x0500, 0x0500, 0x0502, 0x0502, 0x0504, 0x0504, 0x0506, 0x0506,
+ 0x0508, 0x0508, 0x050a, 0x050a, 0x050c, 0x050c, 0x050e, 0x050e,
+ 0x0510, 0x0510, 0x0512, 0x0512, 0x0514, 0x0515, 0x0516, 0x0517,
+ 0x0518, 0x0519, 0x051a, 0x051b, 0x051c, 0x051d, 0x051e, 0x051f,
+ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527,
+ 0x0528, 0x0529, 0x052a, 0x052b, 0x052c, 0x052d, 0x052e, 0x052f,
+ 0x0530, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537,
+ 0x0538, 0x0539, 0x053a, 0x053b, 0x053c, 0x053d, 0x053e, 0x053f,
+ 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547,
+ 0x0548, 0x0549, 0x054a, 0x054b, 0x054c, 0x054d, 0x054e, 0x054f,
+ 0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0x0557,
+ 0x0558, 0x0559, 0x055a, 0x055b, 0x055c, 0x055d, 0x055e, 0x055f,
+ 0x0560, 0x0531, 0x0532, 0x0533, 0x0534, 0x0535, 0x0536, 0x0537,
+ 0x0538, 0x0539, 0x053a, 0x053b, 0x053c, 0x053d, 0x053e, 0x053f,
+ 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0546, 0x0547,
+ 0x0548, 0x0549, 0x054a, 0x054b, 0x054c, 0x054d, 0x054e, 0x054f,
+ 0x0550, 0x0551, 0x0552, 0x0553, 0x0554, 0x0555, 0x0556, 0xffff,
+ 0x17f6, 0x2c63, 0x1d7e, 0x1d7f, 0x1d80, 0x1d81, 0x1d82, 0x1d83,
+ 0x1d84, 0x1d85, 0x1d86, 0x1d87, 0x1d88, 0x1d89, 0x1d8a, 0x1d8b,
+ 0x1d8c, 0x1d8d, 0x1d8e, 0x1d8f, 0x1d90, 0x1d91, 0x1d92, 0x1d93,
+ 0x1d94, 0x1d95, 0x1d96, 0x1d97, 0x1d98, 0x1d99, 0x1d9a, 0x1d9b,
+ 0x1d9c, 0x1d9d, 0x1d9e, 0x1d9f, 0x1da0, 0x1da1, 0x1da2, 0x1da3,
+ 0x1da4, 0x1da5, 0x1da6, 0x1da7, 0x1da8, 0x1da9, 0x1daa, 0x1dab,
+ 0x1dac, 0x1dad, 0x1dae, 0x1daf, 0x1db0, 0x1db1, 0x1db2, 0x1db3,
+ 0x1db4, 0x1db5, 0x1db6, 0x1db7, 0x1db8, 0x1db9, 0x1dba, 0x1dbb,
+ 0x1dbc, 0x1dbd, 0x1dbe, 0x1dbf, 0x1dc0, 0x1dc1, 0x1dc2, 0x1dc3,
+ 0x1dc4, 0x1dc5, 0x1dc6, 0x1dc7, 0x1dc8, 0x1dc9, 0x1dca, 0x1dcb,
+ 0x1dcc, 0x1dcd, 0x1dce, 0x1dcf, 0x1dd0, 0x1dd1, 0x1dd2, 0x1dd3,
+ 0x1dd4, 0x1dd5, 0x1dd6, 0x1dd7, 0x1dd8, 0x1dd9, 0x1dda, 0x1ddb,
+ 0x1ddc, 0x1ddd, 0x1dde, 0x1ddf, 0x1de0, 0x1de1, 0x1de2, 0x1de3,
+ 0x1de4, 0x1de5, 0x1de6, 0x1de7, 0x1de8, 0x1de9, 0x1dea, 0x1deb,
+ 0x1dec, 0x1ded, 0x1dee, 0x1def, 0x1df0, 0x1df1, 0x1df2, 0x1df3,
+ 0x1df4, 0x1df5, 0x1df6, 0x1df7, 0x1df8, 0x1df9, 0x1dfa, 0x1dfb,
+ 0x1dfc, 0x1dfd, 0x1dfe, 0x1dff, 0x1e00, 0x1e00, 0x1e02, 0x1e02,
+ 0x1e04, 0x1e04, 0x1e06, 0x1e06, 0x1e08, 0x1e08, 0x1e0a, 0x1e0a,
+ 0x1e0c, 0x1e0c, 0x1e0e, 0x1e0e, 0x1e10, 0x1e10, 0x1e12, 0x1e12,
+ 0x1e14, 0x1e14, 0x1e16, 0x1e16, 0x1e18, 0x1e18, 0x1e1a, 0x1e1a,
+ 0x1e1c, 0x1e1c, 0x1e1e, 0x1e1e, 0x1e20, 0x1e20, 0x1e22, 0x1e22,
+ 0x1e24, 0x1e24, 0x1e26, 0x1e26, 0x1e28, 0x1e28, 0x1e2a, 0x1e2a,
+ 0x1e2c, 0x1e2c, 0x1e2e, 0x1e2e, 0x1e30, 0x1e30, 0x1e32, 0x1e32,
+ 0x1e34, 0x1e34, 0x1e36, 0x1e36, 0x1e38, 0x1e38, 0x1e3a, 0x1e3a,
+ 0x1e3c, 0x1e3c, 0x1e3e, 0x1e3e, 0x1e40, 0x1e40, 0x1e42, 0x1e42,
+ 0x1e44, 0x1e44, 0x1e46, 0x1e46, 0x1e48, 0x1e48, 0x1e4a, 0x1e4a,
+ 0x1e4c, 0x1e4c, 0x1e4e, 0x1e4e, 0x1e50, 0x1e50, 0x1e52, 0x1e52,
+ 0x1e54, 0x1e54, 0x1e56, 0x1e56, 0x1e58, 0x1e58, 0x1e5a, 0x1e5a,
+ 0x1e5c, 0x1e5c, 0x1e5e, 0x1e5e, 0x1e60, 0x1e60, 0x1e62, 0x1e62,
+ 0x1e64, 0x1e64, 0x1e66, 0x1e66, 0x1e68, 0x1e68, 0x1e6a, 0x1e6a,
+ 0x1e6c, 0x1e6c, 0x1e6e, 0x1e6e, 0x1e70, 0x1e70, 0x1e72, 0x1e72,
+ 0x1e74, 0x1e74, 0x1e76, 0x1e76, 0x1e78, 0x1e78, 0x1e7a, 0x1e7a,
+ 0x1e7c, 0x1e7c, 0x1e7e, 0x1e7e, 0x1e80, 0x1e80, 0x1e82, 0x1e82,
+ 0x1e84, 0x1e84, 0x1e86, 0x1e86, 0x1e88, 0x1e88, 0x1e8a, 0x1e8a,
+ 0x1e8c, 0x1e8c, 0x1e8e, 0x1e8e, 0x1e90, 0x1e90, 0x1e92, 0x1e92,
+ 0x1e94, 0x1e94, 0x1e96, 0x1e97, 0x1e98, 0x1e99, 0x1e9a, 0x1e9b,
+ 0x1e9c, 0x1e9d, 0x1e9e, 0x1e9f, 0x1ea0, 0x1ea0, 0x1ea2, 0x1ea2,
+ 0x1ea4, 0x1ea4, 0x1ea6, 0x1ea6, 0x1ea8, 0x1ea8, 0x1eaa, 0x1eaa,
+ 0x1eac, 0x1eac, 0x1eae, 0x1eae, 0x1eb0, 0x1eb0, 0x1eb2, 0x1eb2,
+ 0x1eb4, 0x1eb4, 0x1eb6, 0x1eb6, 0x1eb8, 0x1eb8, 0x1eba, 0x1eba,
+ 0x1ebc, 0x1ebc, 0x1ebe, 0x1ebe, 0x1ec0, 0x1ec0, 0x1ec2, 0x1ec2,
+ 0x1ec4, 0x1ec4, 0x1ec6, 0x1ec6, 0x1ec8, 0x1ec8, 0x1eca, 0x1eca,
+ 0x1ecc, 0x1ecc, 0x1ece, 0x1ece, 0x1ed0, 0x1ed0, 0x1ed2, 0x1ed2,
+ 0x1ed4, 0x1ed4, 0x1ed6, 0x1ed6, 0x1ed8, 0x1ed8, 0x1eda, 0x1eda,
+ 0x1edc, 0x1edc, 0x1ede, 0x1ede, 0x1ee0, 0x1ee0, 0x1ee2, 0x1ee2,
+ 0x1ee4, 0x1ee4, 0x1ee6, 0x1ee6, 0x1ee8, 0x1ee8, 0x1eea, 0x1eea,
+ 0x1eec, 0x1eec, 0x1eee, 0x1eee, 0x1ef0, 0x1ef0, 0x1ef2, 0x1ef2,
+ 0x1ef4, 0x1ef4, 0x1ef6, 0x1ef6, 0x1ef8, 0x1ef8, 0x1efa, 0x1efb,
+ 0x1efc, 0x1efd, 0x1efe, 0x1eff, 0x1f08, 0x1f09, 0x1f0a, 0x1f0b,
+ 0x1f0c, 0x1f0d, 0x1f0e, 0x1f0f, 0x1f08, 0x1f09, 0x1f0a, 0x1f0b,
+ 0x1f0c, 0x1f0d, 0x1f0e, 0x1f0f, 0x1f18, 0x1f19, 0x1f1a, 0x1f1b,
+ 0x1f1c, 0x1f1d, 0x1f16, 0x1f17, 0x1f18, 0x1f19, 0x1f1a, 0x1f1b,
+ 0x1f1c, 0x1f1d, 0x1f1e, 0x1f1f, 0x1f28, 0x1f29, 0x1f2a, 0x1f2b,
+ 0x1f2c, 0x1f2d, 0x1f2e, 0x1f2f, 0x1f28, 0x1f29, 0x1f2a, 0x1f2b,
+ 0x1f2c, 0x1f2d, 0x1f2e, 0x1f2f, 0x1f38, 0x1f39, 0x1f3a, 0x1f3b,
+ 0x1f3c, 0x1f3d, 0x1f3e, 0x1f3f, 0x1f38, 0x1f39, 0x1f3a, 0x1f3b,
+ 0x1f3c, 0x1f3d, 0x1f3e, 0x1f3f, 0x1f48, 0x1f49, 0x1f4a, 0x1f4b,
+ 0x1f4c, 0x1f4d, 0x1f46, 0x1f47, 0x1f48, 0x1f49, 0x1f4a, 0x1f4b,
+ 0x1f4c, 0x1f4d, 0x1f4e, 0x1f4f, 0x1f50, 0x1f59, 0x1f52, 0x1f5b,
+ 0x1f54, 0x1f5d, 0x1f56, 0x1f5f, 0x1f58, 0x1f59, 0x1f5a, 0x1f5b,
+ 0x1f5c, 0x1f5d, 0x1f5e, 0x1f5f, 0x1f68, 0x1f69, 0x1f6a, 0x1f6b,
+ 0x1f6c, 0x1f6d, 0x1f6e, 0x1f6f, 0x1f68, 0x1f69, 0x1f6a, 0x1f6b,
+ 0x1f6c, 0x1f6d, 0x1f6e, 0x1f6f, 0x1fba, 0x1fbb, 0x1fc8, 0x1fc9,
+ 0x1fca, 0x1fcb, 0x1fda, 0x1fdb, 0x1ff8, 0x1ff9, 0x1fea, 0x1feb,
+ 0x1ffa, 0x1ffb, 0x1f7e, 0x1f7f, 0x1f88, 0x1f89, 0x1f8a, 0x1f8b,
+ 0x1f8c, 0x1f8d, 0x1f8e, 0x1f8f, 0x1f88, 0x1f89, 0x1f8a, 0x1f8b,
+ 0x1f8c, 0x1f8d, 0x1f8e, 0x1f8f, 0x1f98, 0x1f99, 0x1f9a, 0x1f9b,
+ 0x1f9c, 0x1f9d, 0x1f9e, 0x1f9f, 0x1f98, 0x1f99, 0x1f9a, 0x1f9b,
+ 0x1f9c, 0x1f9d, 0x1f9e, 0x1f9f, 0x1fa8, 0x1fa9, 0x1faa, 0x1fab,
+ 0x1fac, 0x1fad, 0x1fae, 0x1faf, 0x1fa8, 0x1fa9, 0x1faa, 0x1fab,
+ 0x1fac, 0x1fad, 0x1fae, 0x1faf, 0x1fb8, 0x1fb9, 0x1fb2, 0x1fbc,
+ 0x1fb4, 0x1fb5, 0x1fb6, 0x1fb7, 0x1fb8, 0x1fb9, 0x1fba, 0x1fbb,
+ 0x1fbc, 0x1fbd, 0x1fbe, 0x1fbf, 0x1fc0, 0x1fc1, 0x1fc2, 0x1fc3,
+ 0x1fc4, 0x1fc5, 0x1fc6, 0x1fc7, 0x1fc8, 0x1fc9, 0x1fca, 0x1fcb,
+ 0x1fc3, 0x1fcd, 0x1fce, 0x1fcf, 0x1fd8, 0x1fd9, 0x1fd2, 0x1fd3,
+ 0x1fd4, 0x1fd5, 0x1fd6, 0x1fd7, 0x1fd8, 0x1fd9, 0x1fda, 0x1fdb,
+ 0x1fdc, 0x1fdd, 0x1fde, 0x1fdf, 0x1fe8, 0x1fe9, 0x1fe2, 0x1fe3,
+ 0x1fe4, 0x1fec, 0x1fe6, 0x1fe7, 0x1fe8, 0x1fe9, 0x1fea, 0x1feb,
+ 0x1fec, 0x1fed, 0x1fee, 0x1fef, 0x1ff0, 0x1ff1, 0x1ff2, 0x1ff3,
+ 0x1ff4, 0x1ff5, 0x1ff6, 0x1ff7, 0x1ff8, 0x1ff9, 0x1ffa, 0x1ffb,
+ 0x1ff3, 0x1ffd, 0x1ffe, 0x1fff, 0x2000, 0x2001, 0x2002, 0x2003,
+ 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200a, 0x200b,
+ 0x200c, 0x200d, 0x200e, 0x200f, 0x2010, 0x2011, 0x2012, 0x2013,
+ 0x2014, 0x2015, 0x2016, 0x2017, 0x2018, 0x2019, 0x201a, 0x201b,
+ 0x201c, 0x201d, 0x201e, 0x201f, 0x2020, 0x2021, 0x2022, 0x2023,
+ 0x2024, 0x2025, 0x2026, 0x2027, 0x2028, 0x2029, 0x202a, 0x202b,
+ 0x202c, 0x202d, 0x202e, 0x202f, 0x2030, 0x2031, 0x2032, 0x2033,
+ 0x2034, 0x2035, 0x2036, 0x2037, 0x2038, 0x2039, 0x203a, 0x203b,
+ 0x203c, 0x203d, 0x203e, 0x203f, 0x2040, 0x2041, 0x2042, 0x2043,
+ 0x2044, 0x2045, 0x2046, 0x2047, 0x2048, 0x2049, 0x204a, 0x204b,
+ 0x204c, 0x204d, 0x204e, 0x204f, 0x2050, 0x2051, 0x2052, 0x2053,
+ 0x2054, 0x2055, 0x2056, 0x2057, 0x2058, 0x2059, 0x205a, 0x205b,
+ 0x205c, 0x205d, 0x205e, 0x205f, 0x2060, 0x2061, 0x2062, 0x2063,
+ 0x2064, 0x2065, 0x2066, 0x2067, 0x2068, 0x2069, 0x206a, 0x206b,
+ 0x206c, 0x206d, 0x206e, 0x206f, 0x2070, 0x2071, 0x2072, 0x2073,
+ 0x2074, 0x2075, 0x2076, 0x2077, 0x2078, 0x2079, 0x207a, 0x207b,
+ 0x207c, 0x207d, 0x207e, 0x207f, 0x2080, 0x2081, 0x2082, 0x2083,
+ 0x2084, 0x2085, 0x2086, 0x2087, 0x2088, 0x2089, 0x208a, 0x208b,
+ 0x208c, 0x208d, 0x208e, 0x208f, 0x2090, 0x2091, 0x2092, 0x2093,
+ 0x2094, 0x2095, 0x2096, 0x2097, 0x2098, 0x2099, 0x209a, 0x209b,
+ 0x209c, 0x209d, 0x209e, 0x209f, 0x20a0, 0x20a1, 0x20a2, 0x20a3,
+ 0x20a4, 0x20a5, 0x20a6, 0x20a7, 0x20a8, 0x20a9, 0x20aa, 0x20ab,
+ 0x20ac, 0x20ad, 0x20ae, 0x20af, 0x20b0, 0x20b1, 0x20b2, 0x20b3,
+ 0x20b4, 0x20b5, 0x20b6, 0x20b7, 0x20b8, 0x20b9, 0x20ba, 0x20bb,
+ 0x20bc, 0x20bd, 0x20be, 0x20bf, 0x20c0, 0x20c1, 0x20c2, 0x20c3,
+ 0x20c4, 0x20c5, 0x20c6, 0x20c7, 0x20c8, 0x20c9, 0x20ca, 0x20cb,
+ 0x20cc, 0x20cd, 0x20ce, 0x20cf, 0x20d0, 0x20d1, 0x20d2, 0x20d3,
+ 0x20d4, 0x20d5, 0x20d6, 0x20d7, 0x20d8, 0x20d9, 0x20da, 0x20db,
+ 0x20dc, 0x20dd, 0x20de, 0x20df, 0x20e0, 0x20e1, 0x20e2, 0x20e3,
+ 0x20e4, 0x20e5, 0x20e6, 0x20e7, 0x20e8, 0x20e9, 0x20ea, 0x20eb,
+ 0x20ec, 0x20ed, 0x20ee, 0x20ef, 0x20f0, 0x20f1, 0x20f2, 0x20f3,
+ 0x20f4, 0x20f5, 0x20f6, 0x20f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb,
+ 0x20fc, 0x20fd, 0x20fe, 0x20ff, 0x2100, 0x2101, 0x2102, 0x2103,
+ 0x2104, 0x2105, 0x2106, 0x2107, 0x2108, 0x2109, 0x210a, 0x210b,
+ 0x210c, 0x210d, 0x210e, 0x210f, 0x2110, 0x2111, 0x2112, 0x2113,
+ 0x2114, 0x2115, 0x2116, 0x2117, 0x2118, 0x2119, 0x211a, 0x211b,
+ 0x211c, 0x211d, 0x211e, 0x211f, 0x2120, 0x2121, 0x2122, 0x2123,
+ 0x2124, 0x2125, 0x2126, 0x2127, 0x2128, 0x2129, 0x212a, 0x212b,
+ 0x212c, 0x212d, 0x212e, 0x212f, 0x2130, 0x2131, 0x2132, 0x2133,
+ 0x2134, 0x2135, 0x2136, 0x2137, 0x2138, 0x2139, 0x213a, 0x213b,
+ 0x213c, 0x213d, 0x213e, 0x213f, 0x2140, 0x2141, 0x2142, 0x2143,
+ 0x2144, 0x2145, 0x2146, 0x2147, 0x2148, 0x2149, 0x214a, 0x214b,
+ 0x214c, 0x214d, 0x2132, 0x214f, 0x2150, 0x2151, 0x2152, 0x2153,
+ 0x2154, 0x2155, 0x2156, 0x2157, 0x2158, 0x2159, 0x215a, 0x215b,
+ 0x215c, 0x215d, 0x215e, 0x215f, 0x2160, 0x2161, 0x2162, 0x2163,
+ 0x2164, 0x2165, 0x2166, 0x2167, 0x2168, 0x2169, 0x216a, 0x216b,
+ 0x216c, 0x216d, 0x216e, 0x216f, 0x2160, 0x2161, 0x2162, 0x2163,
+ 0x2164, 0x2165, 0x2166, 0x2167, 0x2168, 0x2169, 0x216a, 0x216b,
+ 0x216c, 0x216d, 0x216e, 0x216f, 0x2180, 0x2181, 0x2182, 0x2183,
+ 0x2183, 0xffff, 0x034b, 0x24b6, 0x24b7, 0x24b8, 0x24b9, 0x24ba,
+ 0x24bb, 0x24bc, 0x24bd, 0x24be, 0x24bf, 0x24c0, 0x24c1, 0x24c2,
+ 0x24c3, 0x24c4, 0x24c5, 0x24c6, 0x24c7, 0x24c8, 0x24c9, 0x24ca,
+ 0x24cb, 0x24cc, 0x24cd, 0x24ce, 0x24cf, 0xffff, 0x0746, 0x2c00,
+ 0x2c01, 0x2c02, 0x2c03, 0x2c04, 0x2c05, 0x2c06, 0x2c07, 0x2c08,
+ 0x2c09, 0x2c0a, 0x2c0b, 0x2c0c, 0x2c0d, 0x2c0e, 0x2c0f, 0x2c10,
+ 0x2c11, 0x2c12, 0x2c13, 0x2c14, 0x2c15, 0x2c16, 0x2c17, 0x2c18,
+ 0x2c19, 0x2c1a, 0x2c1b, 0x2c1c, 0x2c1d, 0x2c1e, 0x2c1f, 0x2c20,
+ 0x2c21, 0x2c22, 0x2c23, 0x2c24, 0x2c25, 0x2c26, 0x2c27, 0x2c28,
+ 0x2c29, 0x2c2a, 0x2c2b, 0x2c2c, 0x2c2d, 0x2c2e, 0x2c5f, 0x2c60,
+ 0x2c60, 0x2c62, 0x2c63, 0x2c64, 0x2c65, 0x2c66, 0x2c67, 0x2c67,
+ 0x2c69, 0x2c69, 0x2c6b, 0x2c6b, 0x2c6d, 0x2c6e, 0x2c6f, 0x2c70,
+ 0x2c71, 0x2c72, 0x2c73, 0x2c74, 0x2c75, 0x2c75, 0x2c77, 0x2c78,
+ 0x2c79, 0x2c7a, 0x2c7b, 0x2c7c, 0x2c7d, 0x2c7e, 0x2c7f, 0x2c80,
+ 0x2c80, 0x2c82, 0x2c82, 0x2c84, 0x2c84, 0x2c86, 0x2c86, 0x2c88,
+ 0x2c88, 0x2c8a, 0x2c8a, 0x2c8c, 0x2c8c, 0x2c8e, 0x2c8e, 0x2c90,
+ 0x2c90, 0x2c92, 0x2c92, 0x2c94, 0x2c94, 0x2c96, 0x2c96, 0x2c98,
+ 0x2c98, 0x2c9a, 0x2c9a, 0x2c9c, 0x2c9c, 0x2c9e, 0x2c9e, 0x2ca0,
+ 0x2ca0, 0x2ca2, 0x2ca2, 0x2ca4, 0x2ca4, 0x2ca6, 0x2ca6, 0x2ca8,
+ 0x2ca8, 0x2caa, 0x2caa, 0x2cac, 0x2cac, 0x2cae, 0x2cae, 0x2cb0,
+ 0x2cb0, 0x2cb2, 0x2cb2, 0x2cb4, 0x2cb4, 0x2cb6, 0x2cb6, 0x2cb8,
+ 0x2cb8, 0x2cba, 0x2cba, 0x2cbc, 0x2cbc, 0x2cbe, 0x2cbe, 0x2cc0,
+ 0x2cc0, 0x2cc2, 0x2cc2, 0x2cc4, 0x2cc4, 0x2cc6, 0x2cc6, 0x2cc8,
+ 0x2cc8, 0x2cca, 0x2cca, 0x2ccc, 0x2ccc, 0x2cce, 0x2cce, 0x2cd0,
+ 0x2cd0, 0x2cd2, 0x2cd2, 0x2cd4, 0x2cd4, 0x2cd6, 0x2cd6, 0x2cd8,
+ 0x2cd8, 0x2cda, 0x2cda, 0x2cdc, 0x2cdc, 0x2cde, 0x2cde, 0x2ce0,
+ 0x2ce0, 0x2ce2, 0x2ce2, 0x2ce4, 0x2ce5, 0x2ce6, 0x2ce7, 0x2ce8,
+ 0x2ce9, 0x2cea, 0x2ceb, 0x2cec, 0x2ced, 0x2cee, 0x2cef, 0x2cf0,
+ 0x2cf1, 0x2cf2, 0x2cf3, 0x2cf4, 0x2cf5, 0x2cf6, 0x2cf7, 0x2cf8,
+ 0x2cf9, 0x2cfa, 0x2cfb, 0x2cfc, 0x2cfd, 0x2cfe, 0x2cff, 0x10a0,
+ 0x10a1, 0x10a2, 0x10a3, 0x10a4, 0x10a5, 0x10a6, 0x10a7, 0x10a8,
+ 0x10a9, 0x10aa, 0x10ab, 0x10ac, 0x10ad, 0x10ae, 0x10af, 0x10b0,
+ 0x10b1, 0x10b2, 0x10b3, 0x10b4, 0x10b5, 0x10b6, 0x10b7, 0x10b8,
+ 0x10b9, 0x10ba, 0x10bb, 0x10bc, 0x10bd, 0x10be, 0x10bf, 0x10c0,
+ 0x10c1, 0x10c2, 0x10c3, 0x10c4, 0x10c5, 0xffff, 0xd21b, 0xff21,
+ 0xff22, 0xff23, 0xff24, 0xff25, 0xff26, 0xff27, 0xff28, 0xff29,
+ 0xff2a, 0xff2b, 0xff2c, 0xff2d, 0xff2e, 0xff2f, 0xff30, 0xff31,
+ 0xff32, 0xff33, 0xff34, 0xff35, 0xff36, 0xff37, 0xff38, 0xff39,
+ 0xff3a, 0xff5b, 0xff5c, 0xff5d, 0xff5e, 0xff5f, 0xff60, 0xff61,
+ 0xff62, 0xff63, 0xff64, 0xff65, 0xff66, 0xff67, 0xff68, 0xff69,
+ 0xff6a, 0xff6b, 0xff6c, 0xff6d, 0xff6e, 0xff6f, 0xff70, 0xff71,
+ 0xff72, 0xff73, 0xff74, 0xff75, 0xff76, 0xff77, 0xff78, 0xff79,
+ 0xff7a, 0xff7b, 0xff7c, 0xff7d, 0xff7e, 0xff7f, 0xff80, 0xff81,
+ 0xff82, 0xff83, 0xff84, 0xff85, 0xff86, 0xff87, 0xff88, 0xff89,
+ 0xff8a, 0xff8b, 0xff8c, 0xff8d, 0xff8e, 0xff8f, 0xff90, 0xff91,
+ 0xff92, 0xff93, 0xff94, 0xff95, 0xff96, 0xff97, 0xff98, 0xff99,
+ 0xff9a, 0xff9b, 0xff9c, 0xff9d, 0xff9e, 0xff9f, 0xffa0, 0xffa1,
+ 0xffa2, 0xffa3, 0xffa4, 0xffa5, 0xffa6, 0xffa7, 0xffa8, 0xffa9,
+ 0xffaa, 0xffab, 0xffac, 0xffad, 0xffae, 0xffaf, 0xffb0, 0xffb1,
+ 0xffb2, 0xffb3, 0xffb4, 0xffb5, 0xffb6, 0xffb7, 0xffb8, 0xffb9,
+ 0xffba, 0xffbb, 0xffbc, 0xffbd, 0xffbe, 0xffbf, 0xffc0, 0xffc1,
+ 0xffc2, 0xffc3, 0xffc4, 0xffc5, 0xffc6, 0xffc7, 0xffc8, 0xffc9,
+ 0xffca, 0xffcb, 0xffcc, 0xffcd, 0xffce, 0xffcf, 0xffd0, 0xffd1,
+ 0xffd2, 0xffd3, 0xffd4, 0xffd5, 0xffd6, 0xffd7, 0xffd8, 0xffd9,
+ 0xffda, 0xffdb, 0xffdc, 0xffdd, 0xffde, 0xffdf, 0xffe0, 0xffe1,
+ 0xffe2, 0xffe3, 0xffe4, 0xffe5, 0xffe6, 0xffe7, 0xffe8, 0xffe9,
+ 0xffea, 0xffeb, 0xffec, 0xffed, 0xffee, 0xffef, 0xfff0, 0xfff1,
+ 0xfff2, 0xfff3, 0xfff4, 0xfff5, 0xfff6, 0xfff7, 0xfff8, 0xfff9,
+ 0xfffa, 0xfffb, 0xfffc, 0xfffd, 0xfffe, 0xffff,
+};
+
+/*
+ * Allow full-width illegal characters :
+ * "MS windows 7" supports full-width-invalid-name-characters.
+ * So we should check half-width-invalid-name-characters(ASCII) only
+ * for compatibility.
+ *
+ * " * / : < > ? \ |
+ */
+static unsigned short bad_uni_chars[] = {
+ 0x0022, 0x002A, 0x002F, 0x003A,
+ 0x003C, 0x003E, 0x003F, 0x005C, 0x007C,
+ 0
+};
+
+static int exfat_convert_char_to_ucs2(struct nls_table *nls,
+ const unsigned char *ch, int ch_len, unsigned short *ucs2,
+ int *lossy)
+{
+ int len;
+
+ *ucs2 = 0x0;
+
+ if (ch[0] < 0x80) {
+ *ucs2 = ch[0];
+ return 1;
+ }
+
+ len = nls->char2uni(ch, ch_len, ucs2);
+ if (len < 0) {
+ /* conversion failed */
+ if (lossy != NULL)
+ *lossy |= NLS_NAME_LOSSY;
+ *ucs2 = '_';
+ return 1;
+ }
+ return len;
+}
+
+static int exfat_convert_ucs2_to_char(struct nls_table *nls,
+ unsigned short ucs2, unsigned char *ch, int *lossy)
+{
+ int len;
+
+ ch[0] = 0x0;
+
+ if (ucs2 < 0x0080) {
+ ch[0] = ucs2;
+ return 1;
+ }
+
+ len = nls->uni2char(ucs2, ch, MAX_CHARSET_SIZE);
+ if (len < 0) {
+ /* conversion failed */
+ if (lossy != NULL)
+ *lossy |= NLS_NAME_LOSSY;
+ ch[0] = '_';
+ return 1;
+ }
+ return len;
+}
+
+unsigned short exfat_toupper(struct super_block *sb, unsigned short a)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ return sbi->vol_utbl[a] ? sbi->vol_utbl[a] : a;
+}
+
+static unsigned short *exfat_wstrchr(unsigned short *str, unsigned short wchar)
+{
+ while (*str) {
+ if (*(str++) == wchar)
+ return str;
+ }
+ return NULL;
+}
+
+int exfat_uniname_ncmp(struct super_block *sb, unsigned short *a,
+ unsigned short *b, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, a++, b++)
+ if (exfat_toupper(sb, *a) != exfat_toupper(sb, *b))
+ return 1;
+ return 0;
+}
+
+static int exfat_utf16_to_utf8(struct super_block *sb,
+ struct exfat_uni_name *p_uniname, unsigned char *p_cstring,
+ int buflen)
+{
+ int len;
+ const unsigned short *uniname = p_uniname->name;
+
+ /* always len >= 0 */
+ len = utf16s_to_utf8s(uniname, MAX_NAME_LENGTH, UTF16_HOST_ENDIAN,
+ p_cstring, buflen);
+ p_cstring[len] = '\0';
+ return len;
+}
+
+static int exfat_utf8_to_utf16(struct super_block *sb,
+ const unsigned char *p_cstring, const int len,
+ struct exfat_uni_name *p_uniname, int *p_lossy)
+{
+ int i, unilen, lossy = NLS_NAME_NO_LOSSY;
+ unsigned short upname[MAX_NAME_LENGTH + 1];
+ unsigned short *uniname = p_uniname->name;
+
+ WARN_ON(!len);
+
+ unilen = utf8s_to_utf16s(p_cstring, len, UTF16_HOST_ENDIAN,
+ (wchar_t *)uniname, MAX_NAME_LENGTH + 2);
+ if (unilen < 0) {
+ exfat_msg(sb, KERN_ERR,
+ "failed to %s (err : %d) nls len : %d",
+ __func__, unilen, len);
+ return unilen;
+ }
+
+ if (unilen > MAX_NAME_LENGTH) {
+ exfat_msg(sb, KERN_ERR,
+ "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
+ __func__, len, unilen, MAX_NAME_LENGTH);
+ return -ENAMETOOLONG;
+ }
+
+ p_uniname->name_len = unilen & 0xFF;
+
+ for (i = 0; i < unilen; i++) {
+ if (*uniname < 0x0020 ||
+ exfat_wstrchr(bad_uni_chars, *uniname))
+ lossy |= NLS_NAME_LOSSY;
+
+ upname[i] = exfat_toupper(sb, *uniname);
+ uniname++;
+ }
+
+ *uniname = '\0';
+ p_uniname->name_len = unilen;
+ p_uniname->name_hash = exfat_calc_chksum_2byte(upname, unilen << 1, 0,
+ CS_DEFAULT);
+
+ if (p_lossy)
+ *p_lossy = lossy;
+ return unilen;
+}
+
+#define PLANE_SIZE 0x00010000
+#define SURROGATE_MASK 0xfffff800
+#define SURROGATE_PAIR 0x0000d800
+#define SURROGATE_LOW 0x00000400
+#define SURROGATE_BITS 0x000003ff
+
+unsigned short exfat_high_surrogate(unicode_t u)
+{
+ return ((u - PLANE_SIZE) >> 10) + SURROGATE_PAIR;
+}
+
+unsigned short exfat_low_surrogate(unicode_t u)
+{
+ return ((u - PLANE_SIZE) & SURROGATE_BITS) | SURROGATE_PAIR |
+ SURROGATE_LOW;
+}
+
+static int __exfat_utf16_to_nls(struct super_block *sb,
+ struct exfat_uni_name *p_uniname, unsigned char *p_cstring,
+ int buflen)
+{
+ int i, j, len, out_len = 0;
+ unsigned char buf[MAX_CHARSET_SIZE];
+ const unsigned short *uniname = p_uniname->name;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_io;
+
+ i = 0;
+ while (i < MAX_NAME_LENGTH && out_len < (buflen - 1)) {
+ if (*uniname == '\0')
+ break;
+ if ((*uniname & SURROGATE_MASK) != SURROGATE_PAIR) {
+ len = exfat_convert_ucs2_to_char(nls, *uniname, buf,
+ NULL);
+ } else {
+ /* Process UTF-16 surrogate pair as one character */
+ if (!(*uniname & SURROGATE_LOW) &&
+ i+1 < MAX_NAME_LENGTH &&
+ (*(uniname+1) & SURROGATE_MASK) == SURROGATE_PAIR &&
+ (*(uniname+1) & SURROGATE_LOW)) {
+ uniname++;
+ i++;
+ }
+
+ /*
+ * UTF-16 surrogate pair encodes code points above
+ * U+FFFF. Code points above U+FFFF are not supported
+ * by kernel NLS framework therefore use replacement
+ * character
+ */
+ len = 1;
+ buf[0] = '_';
+ }
+
+ if (out_len + len >= buflen)
+ len = buflen - 1 - out_len;
+ out_len += len;
+
+ if (len > 1) {
+ for (j = 0; j < len; j++)
+ *p_cstring++ = buf[j];
+ } else { /* len == 1 */
+ *p_cstring++ = *buf;
+ }
+
+ uniname++;
+ i++;
+ }
+
+ *p_cstring = '\0';
+ return out_len;
+}
+
+static int exfat_nls_to_ucs2(struct super_block *sb,
+ const unsigned char *p_cstring, const int len,
+ struct exfat_uni_name *p_uniname, int *p_lossy)
+{
+ int i = 0, unilen = 0, lossy = NLS_NAME_NO_LOSSY;
+ unsigned short upname[MAX_NAME_LENGTH + 1];
+ unsigned short *uniname = p_uniname->name;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_io;
+
+ WARN_ON(!len);
+
+ while (unilen < MAX_NAME_LENGTH && i < len) {
+ i += exfat_convert_char_to_ucs2(nls, p_cstring + i, len - i,
+ uniname, &lossy);
+
+ if (*uniname < 0x0020 ||
+ exfat_wstrchr(bad_uni_chars, *uniname))
+ lossy |= NLS_NAME_LOSSY;
+
+ upname[unilen] = exfat_toupper(sb, *uniname);
+ uniname++;
+ unilen++;
+ }
+
+ if (p_cstring[i] != '\0')
+ lossy |= NLS_NAME_OVERLEN;
+
+ *uniname = '\0';
+ p_uniname->name_len = unilen;
+ p_uniname->name_hash = exfat_calc_chksum_2byte(upname, unilen << 1, 0,
+ CS_DEFAULT);
+
+ if (p_lossy)
+ *p_lossy = lossy;
+ return unilen;
+}
+
+int exfat_utf16_to_nls(struct super_block *sb, struct exfat_uni_name *uniname,
+ unsigned char *p_cstring, int buflen)
+{
+ if (EXFAT_SB(sb)->options.utf8)
+ return exfat_utf16_to_utf8(sb, uniname, p_cstring,
+ buflen);
+ return __exfat_utf16_to_nls(sb, uniname, p_cstring, buflen);
+}
+
+int exfat_nls_to_utf16(struct super_block *sb, const unsigned char *p_cstring,
+ const int len, struct exfat_uni_name *uniname, int *p_lossy)
+{
+ if (EXFAT_SB(sb)->options.utf8)
+ return exfat_utf8_to_utf16(sb, p_cstring, len,
+ uniname, p_lossy);
+ return exfat_nls_to_ucs2(sb, p_cstring, len, uniname, p_lossy);
+}
+
+static int exfat_load_upcase_table(struct super_block *sb,
+ sector_t sector, unsigned long long num_sectors,
+ unsigned int utbl_checksum)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned int sect_size = sb->s_blocksize;
+ unsigned int i, index = 0, checksum = 0;
+ int ret;
+ unsigned char skip = false;
+ unsigned short *upcase_table;
+
+ upcase_table = kcalloc(UTBL_COUNT, sizeof(unsigned short), GFP_KERNEL);
+ if (!upcase_table)
+ return -ENOMEM;
+
+ sbi->vol_utbl = upcase_table;
+ num_sectors += sector;
+
+ while (sector < num_sectors) {
+ struct buffer_head *bh;
+
+ bh = sb_bread(sb, sector);
+ if (!bh) {
+ exfat_msg(sb, KERN_ERR,
+ "failed to read sector(0x%llx)\n",
+ (unsigned long long)sector);
+ ret = -EIO;
+ goto free_table;
+ }
+ sector++;
+ for (i = 0; i < sect_size && index <= 0xFFFF; i += 2) {
+ unsigned short uni = get_unaligned_le16(bh->b_data + i);
+
+ checksum = ((checksum & 1) ? 0x80000000 : 0) +
+ (checksum >> 1) +
+ *(((unsigned char *)bh->b_data) + i);
+ checksum = ((checksum & 1) ? 0x80000000 : 0) +
+ (checksum >> 1) +
+ *(((unsigned char *)bh->b_data) + (i + 1));
+
+ if (skip) {
+ index += uni;
+ skip = false;
+ } else if (uni == index) {
+ index++;
+ } else if (uni == 0xFFFF) {
+ skip = true;
+ } else { /* uni != index , uni != 0xFFFF */
+ upcase_table[index] = uni;
+ index++;
+ }
+ }
+ brelse(bh);
+ }
+
+ if (index >= 0xFFFF && utbl_checksum == checksum)
+ return 0;
+
+ exfat_msg(sb, KERN_ERR,
+ "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)\n",
+ index, checksum, utbl_checksum);
+ ret = -EINVAL;
+free_table:
+ exfat_free_upcase_table(sbi);
+ return ret;
+}
+
+static int exfat_load_default_upcase_table(struct super_block *sb)
+{
+ int i, ret = -EIO;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned char skip = false;
+ unsigned short uni = 0, *upcase_table;
+ unsigned int index = 0;
+
+ upcase_table = kcalloc(UTBL_COUNT, sizeof(unsigned short), GFP_KERNEL);
+ if (!upcase_table)
+ return -ENOMEM;
+
+ sbi->vol_utbl = upcase_table;
+
+ for (i = 0; index <= 0xFFFF && i < EXFAT_NUM_UPCASE; i++) {
+ uni = uni_def_upcase[i];
+ if (skip) {
+ index += uni;
+ skip = false;
+ } else if (uni == index) {
+ index++;
+ } else if (uni == 0xFFFF) {
+ skip = true;
+ } else {
+ upcase_table[index] = uni;
+ index++;
+ }
+ }
+
+ if (index >= 0xFFFF)
+ return 0;
+
+ /* FATAL error: default upcase table has error */
+ exfat_free_upcase_table(sbi);
+ return ret;
+}
+
+int exfat_create_upcase_table(struct super_block *sb)
+{
+ int i, ret;
+ unsigned int tbl_clu, type;
+ sector_t sector;
+ unsigned long long tbl_size, num_sectors;
+ unsigned char blksize_bits = sb->s_blocksize_bits;
+ struct exfat_chain clu;
+ struct exfat_dentry *ep;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct buffer_head *bh;
+
+ clu.dir = sbi->root_dir;
+ clu.flags = ALLOC_FAT_CHAIN;
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+ for (i = 0; i < sbi->dentries_per_clu; i++) {
+ ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ if (!ep)
+ return -EIO;
+
+ type = exfat_get_entry_type(ep);
+ if (type == TYPE_UNUSED) {
+ brelse(bh);
+ break;
+ }
+
+ if (type != TYPE_UPCASE) {
+ brelse(bh);
+ continue;
+ }
+
+ tbl_clu = le32_to_cpu(ep->dentry.upcase.start_clu);
+ tbl_size = le64_to_cpu(ep->dentry.upcase.size);
+
+ sector = exfat_cluster_to_sector(sbi, tbl_clu);
+ num_sectors = ((tbl_size - 1) >> blksize_bits) + 1;
+ ret = exfat_load_upcase_table(sb, sector, num_sectors,
+ le32_to_cpu(ep->dentry.upcase.checksum));
+
+ brelse(bh);
+ if (ret && ret != -EIO)
+ goto load_default;
+
+ /* load successfully */
+ return ret;
+ }
+
+ if (exfat_get_next_cluster(sb, &(clu.dir)))
+ return -EIO;
+ }
+
+load_default:
+ /* load default upcase table */
+ return exfat_load_default_upcase_table(sb);
+}
+
+void exfat_free_upcase_table(struct exfat_sb_info *sbi)
+{
+ kfree(sbi->vol_utbl);
+}
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
new file mode 100644
index 000000000000..16ed202ef527
--- /dev/null
+++ b/fs/exfat/super.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/mount.h>
+#include <linux/cred.h>
+#include <linux/statfs.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <linux/fs_struct.h>
+#include <linux/iversion.h>
+#include <linux/nls.h>
+#include <linux/buffer_head.h>
+
+#include "exfat_raw.h"
+#include "exfat_fs.h"
+
+static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET;
+static struct kmem_cache *exfat_inode_cachep;
+
+static void exfat_free_iocharset(struct exfat_sb_info *sbi)
+{
+ if (sbi->options.iocharset != exfat_default_iocharset)
+ kfree(sbi->options.iocharset);
+}
+
+static void exfat_delayed_free(struct rcu_head *p)
+{
+ struct exfat_sb_info *sbi = container_of(p, struct exfat_sb_info, rcu);
+
+ unload_nls(sbi->nls_io);
+ exfat_free_iocharset(sbi);
+ exfat_free_upcase_table(sbi);
+ kfree(sbi);
+}
+
+static void exfat_put_super(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ mutex_lock(&sbi->s_lock);
+ if (test_and_clear_bit(EXFAT_SB_DIRTY, &sbi->s_state))
+ sync_blockdev(sb->s_bdev);
+ exfat_set_vol_flags(sb, VOL_CLEAN);
+ exfat_free_bitmap(sbi);
+ mutex_unlock(&sbi->s_lock);
+
+ call_rcu(&sbi->rcu, exfat_delayed_free);
+}
+
+static int exfat_sync_fs(struct super_block *sb, int wait)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ int err = 0;
+
+ /* If there are some dirty buffers in the bdev inode */
+ mutex_lock(&sbi->s_lock);
+ if (test_and_clear_bit(EXFAT_SB_DIRTY, &sbi->s_state)) {
+ sync_blockdev(sb->s_bdev);
+ if (exfat_set_vol_flags(sb, VOL_CLEAN))
+ err = -EIO;
+ }
+ mutex_unlock(&sbi->s_lock);
+ return err;
+}
+
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ unsigned long long id = huge_encode_dev(sb->s_bdev->bd_dev);
+
+ if (sbi->used_clusters == EXFAT_CLUSTERS_UNTRACKED) {
+ mutex_lock(&sbi->s_lock);
+ if (exfat_count_used_clusters(sb, &sbi->used_clusters)) {
+ mutex_unlock(&sbi->s_lock);
+ return -EIO;
+ }
+ mutex_unlock(&sbi->s_lock);
+ }
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = sbi->cluster_size;
+ buf->f_blocks = sbi->num_clusters - 2; /* clu 0 & 1 */
+ buf->f_bfree = buf->f_blocks - sbi->used_clusters;
+ buf->f_bavail = buf->f_bfree;
+ buf->f_fsid.val[0] = (unsigned int)id;
+ buf->f_fsid.val[1] = (unsigned int)(id >> 32);
+ /* Unicode utf16 255 characters */
+ buf->f_namelen = EXFAT_MAX_FILE_LEN * NLS_MAX_CHARSET_SIZE;
+ return 0;
+}
+
+int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct pbr64 *bpb;
+ bool sync = 0;
+
+ /* flags are not changed */
+ if (sbi->vol_flag == new_flag)
+ return 0;
+
+ sbi->vol_flag = new_flag;
+
+ /* skip updating volume dirty flag,
+ * if this volume has been mounted with read-only
+ */
+ if (sb_rdonly(sb))
+ return 0;
+
+ if (!sbi->pbr_bh) {
+ sbi->pbr_bh = sb_bread(sb, 0);
+ if (!sbi->pbr_bh) {
+ exfat_msg(sb, KERN_ERR, "failed to read boot sector");
+ return -ENOMEM;
+ }
+ }
+
+ bpb = (struct pbr64 *)sbi->pbr_bh->b_data;
+ bpb->bsx.vol_flags = cpu_to_le16(new_flag);
+
+ if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->pbr_bh))
+ sync = true;
+ else
+ sync = false;
+
+ set_buffer_uptodate(sbi->pbr_bh);
+ mark_buffer_dirty(sbi->pbr_bh);
+
+ if (sync)
+ sync_dirty_buffer(sbi->pbr_bh);
+ return 0;
+}
+
+static int exfat_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct super_block *sb = root->d_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *opts = &sbi->options;
+
+ /* Show partition info */
+ if (!uid_eq(opts->fs_uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, opts->fs_uid));
+ if (!gid_eq(opts->fs_gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, opts->fs_gid));
+ seq_printf(m, ",fmask=%04o,dmask=%04o", opts->fs_fmask, opts->fs_dmask);
+ if (opts->allow_utime)
+ seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
+ if (opts->utf8)
+ seq_puts(m, ",iocharset=utf8");
+ else if (sbi->nls_io)
+ seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
+ seq_printf(m, ",bps=%ld", sb->s_blocksize);
+ if (opts->errors == EXFAT_ERRORS_CONT)
+ seq_puts(m, ",errors=continue");
+ else if (opts->errors == EXFAT_ERRORS_PANIC)
+ seq_puts(m, ",errors=panic");
+ else
+ seq_puts(m, ",errors=remount-ro");
+ if (opts->discard)
+ seq_puts(m, ",discard");
+ if (opts->time_offset)
+ seq_printf(m, ",time_offset=%d", opts->time_offset);
+ return 0;
+}
+
+static struct inode *exfat_alloc_inode(struct super_block *sb)
+{
+ struct exfat_inode_info *ei;
+
+ ei = kmem_cache_alloc(exfat_inode_cachep, GFP_NOFS);
+ if (!ei)
+ return NULL;
+
+ init_rwsem(&ei->truncate_lock);
+ return &ei->vfs_inode;
+}
+
+static void exfat_free_inode(struct inode *inode)
+{
+ kmem_cache_free(exfat_inode_cachep, EXFAT_I(inode));
+}
+
+static const struct super_operations exfat_sops = {
+ .alloc_inode = exfat_alloc_inode,
+ .free_inode = exfat_free_inode,
+ .write_inode = exfat_write_inode,
+ .evict_inode = exfat_evict_inode,
+ .put_super = exfat_put_super,
+ .sync_fs = exfat_sync_fs,
+ .statfs = exfat_statfs,
+ .show_options = exfat_show_options,
+};
+
+enum {
+ Opt_uid,
+ Opt_gid,
+ Opt_umask,
+ Opt_dmask,
+ Opt_fmask,
+ Opt_allow_utime,
+ Opt_charset,
+ Opt_errors,
+ Opt_discard,
+ Opt_time_offset,
+};
+
+static const struct constant_table exfat_param_enums[] = {
+ { "continue", EXFAT_ERRORS_CONT },
+ { "panic", EXFAT_ERRORS_PANIC },
+ { "remount-ro", EXFAT_ERRORS_RO },
+ {}
+};
+
+static const struct fs_parameter_spec exfat_parameters[] = {
+ fsparam_u32("uid", Opt_uid),
+ fsparam_u32("gid", Opt_gid),
+ fsparam_u32oct("umask", Opt_umask),
+ fsparam_u32oct("dmask", Opt_dmask),
+ fsparam_u32oct("fmask", Opt_fmask),
+ fsparam_u32oct("allow_utime", Opt_allow_utime),
+ fsparam_string("iocharset", Opt_charset),
+ fsparam_enum("errors", Opt_errors, exfat_param_enums),
+ fsparam_flag("discard", Opt_discard),
+ fsparam_s32("time_offset", Opt_time_offset),
+ {}
+};
+
+static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct exfat_sb_info *sbi = fc->s_fs_info;
+ struct exfat_mount_options *opts = &sbi->options;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, exfat_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ opts->fs_uid = make_kuid(current_user_ns(), result.uint_32);
+ break;
+ case Opt_gid:
+ opts->fs_gid = make_kgid(current_user_ns(), result.uint_32);
+ break;
+ case Opt_umask:
+ opts->fs_fmask = result.uint_32;
+ opts->fs_dmask = result.uint_32;
+ break;
+ case Opt_dmask:
+ opts->fs_dmask = result.uint_32;
+ break;
+ case Opt_fmask:
+ opts->fs_fmask = result.uint_32;
+ break;
+ case Opt_allow_utime:
+ opts->allow_utime = result.uint_32 & 0022;
+ break;
+ case Opt_charset:
+ exfat_free_iocharset(sbi);
+ opts->iocharset = kstrdup(param->string, GFP_KERNEL);
+ if (!opts->iocharset)
+ return -ENOMEM;
+ break;
+ case Opt_errors:
+ opts->errors = result.uint_32;
+ break;
+ case Opt_discard:
+ opts->discard = 1;
+ break;
+ case Opt_time_offset:
+ /*
+ * Make the limit 24 just in case someone invents something
+ * unusual.
+ */
+ if (result.int_32 < -24 * 60 || result.int_32 > 24 * 60)
+ return -EINVAL;
+ opts->time_offset = result.int_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void exfat_hash_init(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ int i;
+
+ spin_lock_init(&sbi->inode_hash_lock);
+ for (i = 0; i < EXFAT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
+}
+
+static int exfat_read_root(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ struct exfat_chain cdir;
+ int num_subdirs, num_clu = 0;
+
+ exfat_chain_set(&ei->dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+ ei->entry = -1;
+ ei->start_clu = sbi->root_dir;
+ ei->flags = ALLOC_FAT_CHAIN;
+ ei->type = TYPE_DIR;
+ ei->version = 0;
+ ei->rwoffset = 0;
+ ei->hint_bmap.off = EXFAT_EOF_CLUSTER;
+ ei->hint_stat.eidx = 0;
+ ei->hint_stat.clu = sbi->root_dir;
+ ei->hint_femp.eidx = EXFAT_HINT_NONE;
+
+ exfat_chain_set(&cdir, sbi->root_dir, 0, ALLOC_FAT_CHAIN);
+ if (exfat_count_num_clusters(sb, &cdir, &num_clu))
+ return -EIO;
+ i_size_write(inode, num_clu << sbi->cluster_size_bits);
+
+ num_subdirs = exfat_count_dir_entries(sb, &cdir);
+ if (num_subdirs < 0)
+ return -EIO;
+ set_nlink(inode, num_subdirs + EXFAT_MIN_SUBDIR);
+
+ inode->i_uid = sbi->options.fs_uid;
+ inode->i_gid = sbi->options.fs_gid;
+ inode_inc_iversion(inode);
+ inode->i_generation = 0;
+ inode->i_mode = exfat_make_mode(sbi, ATTR_SUBDIR, 0777);
+ inode->i_op = &exfat_dir_inode_operations;
+ inode->i_fop = &exfat_dir_operations;
+
+ inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1))
+ & ~(sbi->cluster_size - 1)) >> inode->i_blkbits;
+ EXFAT_I(inode)->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
+ EXFAT_I(inode)->i_size_aligned = i_size_read(inode);
+ EXFAT_I(inode)->i_size_ondisk = i_size_read(inode);
+
+ exfat_save_attr(inode, ATTR_SUBDIR);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
+ current_time(inode);
+ exfat_cache_init_inode(inode);
+ return 0;
+}
+
+static struct pbr *exfat_read_pbr_with_logical_sector(struct super_block *sb,
+ struct buffer_head **prev_bh)
+{
+ struct pbr *p_pbr = (struct pbr *) (*prev_bh)->b_data;
+ unsigned short logical_sect = 0;
+
+ logical_sect = 1 << p_pbr->bsx.f64.sect_size_bits;
+
+ if (!is_power_of_2(logical_sect) ||
+ logical_sect < 512 || logical_sect > 4096) {
+ exfat_msg(sb, KERN_ERR, "bogus logical sector size %u",
+ logical_sect);
+ return NULL;
+ }
+
+ if (logical_sect < sb->s_blocksize) {
+ exfat_msg(sb, KERN_ERR,
+ "logical sector size too small for device (logical sector size = %u)",
+ logical_sect);
+ return NULL;
+ }
+
+ if (logical_sect > sb->s_blocksize) {
+ struct buffer_head *bh = NULL;
+
+ __brelse(*prev_bh);
+ *prev_bh = NULL;
+
+ if (!sb_set_blocksize(sb, logical_sect)) {
+ exfat_msg(sb, KERN_ERR,
+ "unable to set blocksize %u", logical_sect);
+ return NULL;
+ }
+ bh = sb_bread(sb, 0);
+ if (!bh) {
+ exfat_msg(sb, KERN_ERR,
+ "unable to read boot sector (logical sector size = %lu)",
+ sb->s_blocksize);
+ return NULL;
+ }
+
+ *prev_bh = bh;
+ p_pbr = (struct pbr *) bh->b_data;
+ }
+ return p_pbr;
+}
+
+/* mount the file system volume */
+static int __exfat_fill_super(struct super_block *sb)
+{
+ int ret;
+ struct pbr *p_pbr;
+ struct pbr64 *p_bpb;
+ struct buffer_head *bh;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ /* set block size to read super block */
+ sb_min_blocksize(sb, 512);
+
+ /* read boot sector */
+ bh = sb_bread(sb, 0);
+ if (!bh) {
+ exfat_msg(sb, KERN_ERR, "unable to read boot sector");
+ return -EIO;
+ }
+
+ /* PRB is read */
+ p_pbr = (struct pbr *)bh->b_data;
+
+ /* check the validity of PBR */
+ if (le16_to_cpu((p_pbr->signature)) != PBR_SIGNATURE) {
+ exfat_msg(sb, KERN_ERR, "invalid boot record signature");
+ ret = -EINVAL;
+ goto free_bh;
+ }
+
+
+ /* check logical sector size */
+ p_pbr = exfat_read_pbr_with_logical_sector(sb, &bh);
+ if (!p_pbr) {
+ ret = -EIO;
+ goto free_bh;
+ }
+
+ /*
+ * res_zero field must be filled with zero to prevent mounting
+ * from FAT volume.
+ */
+ if (memchr_inv(p_pbr->bpb.f64.res_zero, 0,
+ sizeof(p_pbr->bpb.f64.res_zero))) {
+ ret = -EINVAL;
+ goto free_bh;
+ }
+
+ p_bpb = (struct pbr64 *)p_pbr;
+ if (!p_bpb->bsx.num_fats) {
+ exfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
+ ret = -EINVAL;
+ goto free_bh;
+ }
+
+ sbi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits;
+ sbi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits;
+ sbi->cluster_size_bits = sbi->sect_per_clus_bits + sb->s_blocksize_bits;
+ sbi->cluster_size = 1 << sbi->cluster_size_bits;
+ sbi->num_FAT_sectors = le32_to_cpu(p_bpb->bsx.fat_length);
+ sbi->FAT1_start_sector = le32_to_cpu(p_bpb->bsx.fat_offset);
+ sbi->FAT2_start_sector = p_bpb->bsx.num_fats == 1 ?
+ sbi->FAT1_start_sector :
+ sbi->FAT1_start_sector + sbi->num_FAT_sectors;
+ sbi->data_start_sector = le32_to_cpu(p_bpb->bsx.clu_offset);
+ sbi->num_sectors = le64_to_cpu(p_bpb->bsx.vol_length);
+ /* because the cluster index starts with 2 */
+ sbi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) +
+ EXFAT_RESERVED_CLUSTERS;
+
+ sbi->root_dir = le32_to_cpu(p_bpb->bsx.root_cluster);
+ sbi->dentries_per_clu = 1 <<
+ (sbi->cluster_size_bits - DENTRY_SIZE_BITS);
+
+ sbi->vol_flag = le16_to_cpu(p_bpb->bsx.vol_flags);
+ sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
+ sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
+
+ if (le16_to_cpu(p_bpb->bsx.vol_flags) & VOL_DIRTY) {
+ sbi->vol_flag |= VOL_DIRTY;
+ exfat_msg(sb, KERN_WARNING,
+ "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
+ }
+
+ /* exFAT file size is limited by a disk volume size */
+ sb->s_maxbytes = (u64)(sbi->num_clusters - EXFAT_RESERVED_CLUSTERS) <<
+ sbi->cluster_size_bits;
+
+ ret = exfat_create_upcase_table(sb);
+ if (ret) {
+ exfat_msg(sb, KERN_ERR, "failed to load upcase table");
+ goto free_bh;
+ }
+
+ ret = exfat_load_bitmap(sb);
+ if (ret) {
+ exfat_msg(sb, KERN_ERR, "failed to load alloc-bitmap");
+ goto free_upcase_table;
+ }
+
+ ret = exfat_count_used_clusters(sb, &sbi->used_clusters);
+ if (ret) {
+ exfat_msg(sb, KERN_ERR, "failed to scan clusters");
+ goto free_alloc_bitmap;
+ }
+
+ return 0;
+
+free_alloc_bitmap:
+ exfat_free_bitmap(sbi);
+free_upcase_table:
+ exfat_free_upcase_table(sbi);
+free_bh:
+ brelse(bh);
+ return ret;
+}
+
+static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct exfat_sb_info *sbi = sb->s_fs_info;
+ struct exfat_mount_options *opts = &sbi->options;
+ struct inode *root_inode;
+ int err;
+
+ if (opts->allow_utime == (unsigned short)-1)
+ opts->allow_utime = ~opts->fs_dmask & 0022;
+
+ if (opts->discard) {
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
+
+ if (!blk_queue_discard(q))
+ exfat_msg(sb, KERN_WARNING,
+ "mounting with \"discard\" option, but the device does not support discard");
+ opts->discard = 0;
+ }
+
+ sb->s_flags |= SB_NODIRATIME;
+ sb->s_magic = EXFAT_SUPER_MAGIC;
+ sb->s_op = &exfat_sops;
+
+ sb->s_time_gran = 1;
+ sb->s_time_min = EXFAT_MIN_TIMESTAMP_SECS;
+ sb->s_time_max = EXFAT_MAX_TIMESTAMP_SECS;
+
+ err = __exfat_fill_super(sb);
+ if (err) {
+ exfat_msg(sb, KERN_ERR, "failed to recognize exfat type");
+ goto check_nls_io;
+ }
+
+ /* set up enough so that it can read an inode */
+ exfat_hash_init(sb);
+
+ if (!strcmp(sbi->options.iocharset, "utf8"))
+ opts->utf8 = 1;
+ else {
+ sbi->nls_io = load_nls(sbi->options.iocharset);
+ if (!sbi->nls_io) {
+ exfat_msg(sb, KERN_ERR, "IO charset %s not found",
+ sbi->options.iocharset);
+ err = -EINVAL;
+ goto free_table;
+ }
+ }
+
+ if (sbi->options.utf8)
+ sb->s_d_op = &exfat_utf8_dentry_ops;
+ else
+ sb->s_d_op = &exfat_dentry_ops;
+
+ root_inode = new_inode(sb);
+ if (!root_inode) {
+ exfat_msg(sb, KERN_ERR, "failed to allocate root inode.");
+ err = -ENOMEM;
+ goto free_table;
+ }
+
+ root_inode->i_ino = EXFAT_ROOT_INO;
+ inode_set_iversion(root_inode, 1);
+ err = exfat_read_root(root_inode);
+ if (err) {
+ exfat_msg(sb, KERN_ERR, "failed to initialize root inode.");
+ goto put_inode;
+ }
+
+ exfat_hash_inode(root_inode, EXFAT_I(root_inode)->i_pos);
+ insert_inode_hash(root_inode);
+
+ sb->s_root = d_make_root(root_inode);
+ if (!sb->s_root) {
+ exfat_msg(sb, KERN_ERR, "failed to get the root dentry");
+ err = -ENOMEM;
+ goto put_inode;
+ }
+
+ return 0;
+
+put_inode:
+ iput(root_inode);
+ sb->s_root = NULL;
+
+free_table:
+ exfat_free_upcase_table(sbi);
+ exfat_free_bitmap(sbi);
+
+check_nls_io:
+ unload_nls(sbi->nls_io);
+ exfat_free_iocharset(sbi);
+ sb->s_fs_info = NULL;
+ kfree(sbi);
+ return err;
+}
+
+static int exfat_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, exfat_fill_super);
+}
+
+static void exfat_free(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations exfat_context_ops = {
+ .parse_param = exfat_parse_param,
+ .get_tree = exfat_get_tree,
+ .free = exfat_free,
+};
+
+static int exfat_init_fs_context(struct fs_context *fc)
+{
+ struct exfat_sb_info *sbi;
+
+ sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ mutex_init(&sbi->s_lock);
+ ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ sbi->options.fs_uid = current_uid();
+ sbi->options.fs_gid = current_gid();
+ sbi->options.fs_fmask = current->fs->umask;
+ sbi->options.fs_dmask = current->fs->umask;
+ sbi->options.allow_utime = -1;
+ sbi->options.iocharset = exfat_default_iocharset;
+ sbi->options.errors = EXFAT_ERRORS_RO;
+
+ fc->s_fs_info = sbi;
+ fc->ops = &exfat_context_ops;
+ return 0;
+}
+
+static struct file_system_type exfat_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "exfat",
+ .init_fs_context = exfat_init_fs_context,
+ .parameters = exfat_parameters,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+static void exfat_inode_init_once(void *foo)
+{
+ struct exfat_inode_info *ei = (struct exfat_inode_info *)foo;
+
+ INIT_HLIST_NODE(&ei->i_hash_fat);
+ inode_init_once(&ei->vfs_inode);
+}
+
+static int __init init_exfat_fs(void)
+{
+ int err;
+
+ err = exfat_cache_init();
+ if (err)
+ return err;
+
+ exfat_inode_cachep = kmem_cache_create("exfat_inode_cache",
+ sizeof(struct exfat_inode_info),
+ 0, SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ exfat_inode_init_once);
+ if (!exfat_inode_cachep) {
+ err = -ENOMEM;
+ goto shutdown_cache;
+ }
+
+ err = register_filesystem(&exfat_fs_type);
+ if (err)
+ goto destroy_cache;
+
+ return 0;
+
+destroy_cache:
+ kmem_cache_destroy(exfat_inode_cachep);
+shutdown_cache:
+ exfat_cache_shutdown();
+ return err;
+}
+
+static void __exit exit_exfat_fs(void)
+{
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(exfat_inode_cachep);
+ unregister_filesystem(&exfat_fs_type);
+ exfat_cache_shutdown();
+}
+
+module_init(init_exfat_fs);
+module_exit(exit_exfat_fs);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("exFAT filesystem support");
+MODULE_AUTHOR("Samsung Electronics Co., Ltd.");
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 0456bc990b5e..943cc469f42f 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -56,6 +56,7 @@
#include <linux/buffer_head.h>
#include <linux/init.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
@@ -84,8 +85,8 @@
printk("\n"); \
} while (0)
#else
-# define ea_idebug(f...)
-# define ea_bdebug(f...)
+# define ea_idebug(inode, f...) no_printk(f)
+# define ea_bdebug(bh, f...) no_printk(f)
#endif
static int ext2_xattr_set2(struct inode *, struct buffer_head *,
@@ -790,7 +791,15 @@ ext2_xattr_delete_inode(struct inode *inode)
struct buffer_head *bh = NULL;
struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
- down_write(&EXT2_I(inode)->xattr_sem);
+ /*
+ * We are the only ones holding inode reference. The xattr_sem should
+ * better be unlocked! We could as well just not acquire xattr_sem at
+ * all but this makes the code more futureproof. OTOH we need trylock
+ * here to avoid false-positive warning from lockdep about reclaim
+ * circular dependency.
+ */
+ if (WARN_ON_ONCE(!down_write_trylock(&EXT2_I(inode)->xattr_sem)))
+ return;
if (!EXT2_I(inode)->i_file_acl)
goto cleanup;
@@ -864,8 +873,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
true);
if (error) {
if (error == -EBUSY) {
- ea_bdebug(bh, "already in cache (%d cache entries)",
- atomic_read(&ext2_xattr_cache->c_entry_count));
+ ea_bdebug(bh, "already in cache");
error = 0;
}
} else
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
index cee888cdc235..16272e6ddcf4 100644
--- a/fs/ext2/xattr.h
+++ b/fs/ext2/xattr.h
@@ -39,7 +39,7 @@ struct ext2_xattr_entry {
__le32 e_value_block; /* disk block attribute is stored on (n/i) */
__le32 e_value_size; /* size of attribute value */
__le32 e_hash; /* hash value of name and value */
- char e_name[0]; /* attribute name */
+ char e_name[]; /* attribute name */
};
#define EXT2_XATTR_PAD_BITS 2
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 8fd0b3cdab4c..0e0a4d6209c7 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -516,10 +516,9 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
wait_on_buffer(bh);
ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO);
if (!buffer_uptodate(bh)) {
- ext4_set_errno(sb, EIO);
- ext4_error(sb, "Cannot read block bitmap - "
- "block_group = %u, block_bitmap = %llu",
- block_group, (unsigned long long) bh->b_blocknr);
+ ext4_error_err(sb, EIO, "Cannot read block bitmap - "
+ "block_group = %u, block_bitmap = %llu",
+ block_group, (unsigned long long) bh->b_blocknr);
ext4_mark_group_bitmap_corrupted(sb, block_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
return -EIO;
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 0a734ffb4310..16e9b2fda03a 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -166,10 +166,8 @@ static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
(start_blk + count < start_blk) ||
- (start_blk + count > ext4_blocks_count(sbi->s_es))) {
- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+ (start_blk + count > ext4_blocks_count(sbi->s_es)))
return 0;
- }
if (system_blks == NULL)
return 1;
@@ -181,10 +179,8 @@ static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
n = n->rb_left;
else if (start_blk >= (entry->start_blk + entry->count))
n = n->rb_right;
- else {
- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+ else
return 0;
- }
}
return 1;
}
@@ -220,10 +216,12 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
} else {
if (!ext4_data_block_valid_rcu(sbi, system_blks,
map.m_pblk, n)) {
- ext4_error(sb, "blocks %llu-%llu from inode %u "
- "overlap system zone", map.m_pblk,
- map.m_pblk + map.m_len - 1, ino);
err = -EFSCORRUPTED;
+ __ext4_error(sb, __func__, __LINE__, -err,
+ map.m_pblk, "blocks %llu-%llu "
+ "from inode %u overlap system zone",
+ map.m_pblk,
+ map.m_pblk + map.m_len - 1, ino);
break;
}
err = add_system_zone(system_blks, map.m_pblk, n);
@@ -365,7 +363,6 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
int ext4_check_blockref(const char *function, unsigned int line,
struct inode *inode, __le32 *p, unsigned int max)
{
- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
__le32 *bref = p;
unsigned int blk;
@@ -379,7 +376,6 @@ int ext4_check_blockref(const char *function, unsigned int line,
if (blk &&
unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
blk, 1))) {
- es->s_last_error_block = cpu_to_le64(blk);
ext4_error_inode(inode, function, line, blk,
"invalid block");
return -EFSCORRUPTED;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 9aa1f75409b0..c654205f648d 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -392,7 +392,7 @@ struct fname {
__u32 inode;
__u8 name_len;
__u8 file_type;
- char name[0];
+ char name[];
};
/*
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 61b37a052052..91eb4381cae5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -414,7 +414,7 @@ struct flex_groups {
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
#define EXT4_VERITY_FL 0x00100000 /* Verity protected inode */
#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
-#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
+/* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */
#define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
#define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded file */
@@ -487,7 +487,7 @@ enum {
EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
EXT4_INODE_VERITY = 20, /* Verity protected inode */
EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
- EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */
+/* 22 was formerly EXT4_INODE_EOFBLOCKS */
EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */
EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */
EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
@@ -533,7 +533,6 @@ static inline void ext4_check_flag_values(void)
CHECK_FLAG_VALUE(EXTENTS);
CHECK_FLAG_VALUE(VERITY);
CHECK_FLAG_VALUE(EA_INODE);
- CHECK_FLAG_VALUE(EOFBLOCKS);
CHECK_FLAG_VALUE(INLINE_DATA);
CHECK_FLAG_VALUE(PROJINHERIT);
CHECK_FLAG_VALUE(RESERVED);
@@ -2771,21 +2770,20 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno,
extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
ext4_group_t block_group,
unsigned int flags);
-extern void ext4_set_errno(struct super_block *sb, int err);
-extern __printf(4, 5)
-void __ext4_error(struct super_block *, const char *, unsigned int,
+extern __printf(6, 7)
+void __ext4_error(struct super_block *, const char *, unsigned int, int, __u64,
const char *, ...);
-extern __printf(5, 6)
-void __ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t,
- const char *, ...);
+extern __printf(6, 7)
+void __ext4_error_inode(struct inode *, const char *, unsigned int,
+ ext4_fsblk_t, int, const char *, ...);
extern __printf(5, 6)
void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
const char *, ...);
extern void __ext4_std_error(struct super_block *, const char *,
unsigned int, int);
-extern __printf(4, 5)
-void __ext4_abort(struct super_block *, const char *, unsigned int,
+extern __printf(5, 6)
+void __ext4_abort(struct super_block *, const char *, unsigned int, int,
const char *, ...);
extern __printf(4, 5)
void __ext4_warning(struct super_block *, const char *, unsigned int,
@@ -2806,8 +2804,12 @@ void __ext4_grp_locked_error(const char *, unsigned int,
#define EXT4_ERROR_INODE(inode, fmt, a...) \
ext4_error_inode((inode), __func__, __LINE__, 0, (fmt), ## a)
-#define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \
- ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
+#define EXT4_ERROR_INODE_ERR(inode, err, fmt, a...) \
+ __ext4_error_inode((inode), __func__, __LINE__, 0, (err), (fmt), ## a)
+
+#define ext4_error_inode_block(inode, block, err, fmt, a...) \
+ __ext4_error_inode((inode), __func__, __LINE__, (block), (err), \
+ (fmt), ## a)
#define EXT4_ERROR_FILE(file, block, fmt, a...) \
ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
@@ -2815,13 +2817,18 @@ void __ext4_grp_locked_error(const char *, unsigned int,
#ifdef CONFIG_PRINTK
#define ext4_error_inode(inode, func, line, block, fmt, ...) \
- __ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__)
+ __ext4_error_inode(inode, func, line, block, 0, fmt, ##__VA_ARGS__)
+#define ext4_error_inode_err(inode, func, line, block, err, fmt, ...) \
+ __ext4_error_inode((inode), (func), (line), (block), \
+ (err), (fmt), ##__VA_ARGS__)
#define ext4_error_file(file, func, line, block, fmt, ...) \
__ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__)
#define ext4_error(sb, fmt, ...) \
- __ext4_error(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
-#define ext4_abort(sb, fmt, ...) \
- __ext4_abort(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
+ __ext4_error((sb), __func__, __LINE__, 0, 0, (fmt), ##__VA_ARGS__)
+#define ext4_error_err(sb, err, fmt, ...) \
+ __ext4_error((sb), __func__, __LINE__, (err), 0, (fmt), ##__VA_ARGS__)
+#define ext4_abort(sb, err, fmt, ...) \
+ __ext4_abort((sb), __func__, __LINE__, (err), (fmt), ##__VA_ARGS__)
#define ext4_warning(sb, fmt, ...) \
__ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
#define ext4_warning_inode(inode, fmt, ...) \
@@ -2839,7 +2846,12 @@ void __ext4_grp_locked_error(const char *, unsigned int,
#define ext4_error_inode(inode, func, line, block, fmt, ...) \
do { \
no_printk(fmt, ##__VA_ARGS__); \
- __ext4_error_inode(inode, "", 0, block, " "); \
+ __ext4_error_inode(inode, "", 0, block, 0, " "); \
+} while (0)
+#define ext4_error_inode_err(inode, func, line, block, err, fmt, ...) \
+do { \
+ no_printk(fmt, ##__VA_ARGS__); \
+ __ext4_error_inode(inode, "", 0, block, err, " "); \
} while (0)
#define ext4_error_file(file, func, line, block, fmt, ...) \
do { \
@@ -2849,12 +2861,17 @@ do { \
#define ext4_error(sb, fmt, ...) \
do { \
no_printk(fmt, ##__VA_ARGS__); \
- __ext4_error(sb, "", 0, " "); \
+ __ext4_error(sb, "", 0, 0, 0, " "); \
+} while (0)
+#define ext4_error_err(sb, err, fmt, ...) \
+do { \
+ no_printk(fmt, ##__VA_ARGS__); \
+ __ext4_error(sb, "", 0, err, 0, " "); \
} while (0)
-#define ext4_abort(sb, fmt, ...) \
+#define ext4_abort(sb, err, fmt, ...) \
do { \
no_printk(fmt, ##__VA_ARGS__); \
- __ext4_abort(sb, "", 0, " "); \
+ __ext4_abort(sb, "", 0, err, " "); \
} while (0)
#define ext4_warning(sb, fmt, ...) \
do { \
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 1f53d64e42a5..7f16e1af8d5c 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -80,8 +80,7 @@ static int ext4_journal_check_start(struct super_block *sb)
* take the FS itself readonly cleanly.
*/
if (journal && is_journal_aborted(journal)) {
- ext4_set_errno(sb, -journal->j_errno);
- ext4_abort(sb, "Detected aborted journal");
+ ext4_abort(sb, -journal->j_errno, "Detected aborted journal");
return -EROFS;
}
return 0;
@@ -272,8 +271,7 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
if (err) {
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
- ext4_set_errno(inode->i_sb, -err);
- __ext4_abort(inode->i_sb, where, line,
+ __ext4_abort(inode->i_sb, where, line, -err,
"error %d when attempting revoke", err);
}
BUFFER_TRACE(bh, "exit");
@@ -332,6 +330,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
err);
}
} else {
+ set_buffer_uptodate(bh);
if (inode)
mark_buffer_dirty_inode(bh, inode);
else
@@ -342,11 +341,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
struct ext4_super_block *es;
es = EXT4_SB(inode->i_sb)->s_es;
- es->s_last_error_block =
- cpu_to_le64(bh->b_blocknr);
- ext4_set_errno(inode->i_sb, EIO);
- ext4_error_inode(inode, where, line,
- bh->b_blocknr,
+ ext4_error_inode_err(inode, where, line,
+ bh->b_blocknr, EIO,
"IO error syncing itable block");
err = -EIO;
}
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 7ea4f6fa173b..4b9002f0e84c 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -512,6 +512,9 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
return 0;
if (ext4_should_journal_data(inode))
return 0;
+ /* temporary fix to prevent generic/422 test failures */
+ if (!test_opt(inode->i_sb, DELALLOC))
+ return 0;
return 1;
}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 954013d6076b..031752cfb6f7 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/fiemap.h>
#include <linux/backing-dev.h>
+#include <linux/iomap.h>
#include "ext4_jbd2.h"
#include "ext4_extents.h"
#include "xattr.h"
@@ -83,13 +84,6 @@ static void ext4_extent_block_csum_set(struct inode *inode,
et->et_checksum = ext4_extent_block_csum(inode, eh);
}
-static int ext4_split_extent(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path **ppath,
- struct ext4_map_blocks *map,
- int split_flag,
- int flags);
-
static int ext4_split_extent_at(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
@@ -97,9 +91,6 @@ static int ext4_split_extent_at(handle_t *handle,
int split_flag,
int flags);
-static int ext4_find_delayed_extent(struct inode *inode,
- struct extent_status *newes);
-
static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
{
/*
@@ -358,8 +349,8 @@ static int ext4_valid_extent_idx(struct inode *inode,
}
static int ext4_valid_extent_entries(struct inode *inode,
- struct ext4_extent_header *eh,
- int depth)
+ struct ext4_extent_header *eh,
+ ext4_fsblk_t *pblk, int depth)
{
unsigned short entries;
if (eh->eh_entries == 0)
@@ -370,8 +361,6 @@ static int ext4_valid_extent_entries(struct inode *inode,
if (depth == 0) {
/* leaf entries */
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
- ext4_fsblk_t pblock = 0;
ext4_lblk_t lblock = 0;
ext4_lblk_t prev = 0;
int len = 0;
@@ -383,8 +372,7 @@ static int ext4_valid_extent_entries(struct inode *inode,
lblock = le32_to_cpu(ext->ee_block);
len = ext4_ext_get_actual_len(ext);
if ((lblock <= prev) && prev) {
- pblock = ext4_ext_pblock(ext);
- es->s_last_error_block = cpu_to_le64(pblock);
+ *pblk = ext4_ext_pblock(ext);
return 0;
}
ext++;
@@ -431,7 +419,7 @@ static int __ext4_ext_check(const char *function, unsigned int line,
error_msg = "invalid eh_entries";
goto corrupted;
}
- if (!ext4_valid_extent_entries(inode, eh, depth)) {
+ if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
error_msg = "invalid extent entries";
goto corrupted;
}
@@ -449,14 +437,14 @@ static int __ext4_ext_check(const char *function, unsigned int line,
return 0;
corrupted:
- ext4_set_errno(inode->i_sb, -err);
- ext4_error_inode(inode, function, line, 0,
- "pblk %llu bad header/extent: %s - magic %x, "
- "entries %u, max %u(%u), depth %u(%u)",
- (unsigned long long) pblk, error_msg,
- le16_to_cpu(eh->eh_magic),
- le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
- max, le16_to_cpu(eh->eh_depth), depth);
+ ext4_error_inode_err(inode, function, line, 0, -err,
+ "pblk %llu bad header/extent: %s - magic %x, "
+ "entries %u, max %u(%u), depth %u(%u)",
+ (unsigned long long) pblk, error_msg,
+ le16_to_cpu(eh->eh_magic),
+ le16_to_cpu(eh->eh_entries),
+ le16_to_cpu(eh->eh_max),
+ max, le16_to_cpu(eh->eh_depth), depth);
return err;
}
@@ -556,6 +544,12 @@ int ext4_ext_precache(struct inode *inode)
down_read(&ei->i_data_sem);
depth = ext_depth(inode);
+ /* Don't cache anything if there are no external extent blocks */
+ if (!depth) {
+ up_read(&ei->i_data_sem);
+ return ret;
+ }
+
path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
GFP_NOFS);
if (path == NULL) {
@@ -563,9 +557,6 @@ int ext4_ext_precache(struct inode *inode)
return -ENOMEM;
}
- /* Don't cache anything if there are no external extent blocks */
- if (depth == 0)
- goto out;
path[0].p_hdr = ext_inode_hdr(inode);
ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
if (ret)
@@ -2134,155 +2125,6 @@ cleanup:
return err;
}
-static int ext4_fill_fiemap_extents(struct inode *inode,
- ext4_lblk_t block, ext4_lblk_t num,
- struct fiemap_extent_info *fieinfo)
-{
- struct ext4_ext_path *path = NULL;
- struct ext4_extent *ex;
- struct extent_status es;
- ext4_lblk_t next, next_del, start = 0, end = 0;
- ext4_lblk_t last = block + num;
- int exists, depth = 0, err = 0;
- unsigned int flags = 0;
- unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
-
- while (block < last && block != EXT_MAX_BLOCKS) {
- num = last - block;
- /* find extent for this block */
- down_read(&EXT4_I(inode)->i_data_sem);
-
- path = ext4_find_extent(inode, block, &path, 0);
- if (IS_ERR(path)) {
- up_read(&EXT4_I(inode)->i_data_sem);
- err = PTR_ERR(path);
- path = NULL;
- break;
- }
-
- depth = ext_depth(inode);
- if (unlikely(path[depth].p_hdr == NULL)) {
- up_read(&EXT4_I(inode)->i_data_sem);
- EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
- err = -EFSCORRUPTED;
- break;
- }
- ex = path[depth].p_ext;
- next = ext4_ext_next_allocated_block(path);
-
- flags = 0;
- exists = 0;
- if (!ex) {
- /* there is no extent yet, so try to allocate
- * all requested space */
- start = block;
- end = block + num;
- } else if (le32_to_cpu(ex->ee_block) > block) {
- /* need to allocate space before found extent */
- start = block;
- end = le32_to_cpu(ex->ee_block);
- if (block + num < end)
- end = block + num;
- } else if (block >= le32_to_cpu(ex->ee_block)
- + ext4_ext_get_actual_len(ex)) {
- /* need to allocate space after found extent */
- start = block;
- end = block + num;
- if (end >= next)
- end = next;
- } else if (block >= le32_to_cpu(ex->ee_block)) {
- /*
- * some part of requested space is covered
- * by found extent
- */
- start = block;
- end = le32_to_cpu(ex->ee_block)
- + ext4_ext_get_actual_len(ex);
- if (block + num < end)
- end = block + num;
- exists = 1;
- } else {
- BUG();
- }
- BUG_ON(end <= start);
-
- if (!exists) {
- es.es_lblk = start;
- es.es_len = end - start;
- es.es_pblk = 0;
- } else {
- es.es_lblk = le32_to_cpu(ex->ee_block);
- es.es_len = ext4_ext_get_actual_len(ex);
- es.es_pblk = ext4_ext_pblock(ex);
- if (ext4_ext_is_unwritten(ex))
- flags |= FIEMAP_EXTENT_UNWRITTEN;
- }
-
- /*
- * Find delayed extent and update es accordingly. We call
- * it even in !exists case to find out whether es is the
- * last existing extent or not.
- */
- next_del = ext4_find_delayed_extent(inode, &es);
- if (!exists && next_del) {
- exists = 1;
- flags |= (FIEMAP_EXTENT_DELALLOC |
- FIEMAP_EXTENT_UNKNOWN);
- }
- up_read(&EXT4_I(inode)->i_data_sem);
-
- if (unlikely(es.es_len == 0)) {
- EXT4_ERROR_INODE(inode, "es.es_len == 0");
- err = -EFSCORRUPTED;
- break;
- }
-
- /*
- * This is possible iff next == next_del == EXT_MAX_BLOCKS.
- * we need to check next == EXT_MAX_BLOCKS because it is
- * possible that an extent is with unwritten and delayed
- * status due to when an extent is delayed allocated and
- * is allocated by fallocate status tree will track both of
- * them in a extent.
- *
- * So we could return a unwritten and delayed extent, and
- * its block is equal to 'next'.
- */
- if (next == next_del && next == EXT_MAX_BLOCKS) {
- flags |= FIEMAP_EXTENT_LAST;
- if (unlikely(next_del != EXT_MAX_BLOCKS ||
- next != EXT_MAX_BLOCKS)) {
- EXT4_ERROR_INODE(inode,
- "next extent == %u, next "
- "delalloc extent = %u",
- next, next_del);
- err = -EFSCORRUPTED;
- break;
- }
- }
-
- if (exists) {
- err = fiemap_fill_next_extent(fieinfo,
- (__u64)es.es_lblk << blksize_bits,
- (__u64)es.es_pblk << blksize_bits,
- (__u64)es.es_len << blksize_bits,
- flags);
- if (err < 0)
- break;
- if (err == 1) {
- err = 0;
- break;
- }
- }
-
- block = es.es_lblk + es.es_len;
- }
-
- ext4_ext_drop_refs(path);
- kfree(path);
- return err;
-}
-
static int ext4_fill_es_cache_info(struct inode *inode,
ext4_lblk_t block, ext4_lblk_t num,
struct fiemap_extent_info *fieinfo)
@@ -3874,64 +3716,11 @@ out:
return err;
}
-/*
- * Handle EOFBLOCKS_FL flag, clearing it if necessary
- */
-static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
- ext4_lblk_t lblk,
- struct ext4_ext_path *path,
- unsigned int len)
-{
- int i, depth;
- struct ext4_extent_header *eh;
- struct ext4_extent *last_ex;
-
- if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
- return 0;
-
- depth = ext_depth(inode);
- eh = path[depth].p_hdr;
-
- /*
- * We're going to remove EOFBLOCKS_FL entirely in future so we
- * do not care for this case anymore. Simply remove the flag
- * if there are no extents.
- */
- if (unlikely(!eh->eh_entries))
- goto out;
- last_ex = EXT_LAST_EXTENT(eh);
- /*
- * We should clear the EOFBLOCKS_FL flag if we are writing the
- * last block in the last extent in the file. We test this by
- * first checking to see if the caller to
- * ext4_ext_get_blocks() was interested in the last block (or
- * a block beyond the last block) in the current extent. If
- * this turns out to be false, we can bail out from this
- * function immediately.
- */
- if (lblk + len < le32_to_cpu(last_ex->ee_block) +
- ext4_ext_get_actual_len(last_ex))
- return 0;
- /*
- * If the caller does appear to be planning to write at or
- * beyond the end of the current extent, we then test to see
- * if the current extent is the last extent in the file, by
- * checking to make sure it was reached via the rightmost node
- * at each level of the tree.
- */
- for (i = depth-1; i >= 0; i--)
- if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
- return 0;
-out:
- ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
- return ext4_mark_inode_dirty(handle, inode);
-}
-
static int
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath,
- unsigned int allocated)
+ unsigned int *allocated)
{
struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
@@ -3991,14 +3780,12 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
ext4_ext_show_leaf(inode, path);
ext4_update_inode_fsync_trans(handle, inode, 1);
- err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
- if (err)
- return err;
+
map->m_flags |= EXT4_MAP_UNWRITTEN;
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_len = allocated;
- return allocated;
+ if (*allocated > map->m_len)
+ *allocated = map->m_len;
+ map->m_len = *allocated;
+ return 0;
}
static int
@@ -4007,7 +3794,9 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath, int flags,
unsigned int allocated, ext4_fsblk_t newblock)
{
+#ifdef EXT_DEBUG
struct ext4_ext_path *path = *ppath;
+#endif
int ret = 0;
int err = 0;
@@ -4047,11 +3836,9 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
}
ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
ppath);
- if (ret >= 0) {
+ if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
- err = check_eofblocks_fl(handle, inode, map->m_lblk,
- path, map->m_len);
- } else
+ else
err = ret;
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = newblock;
@@ -4100,12 +3887,6 @@ out:
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
- if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
- err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
- map->m_len);
- if (err < 0)
- goto out2;
- }
out1:
if (allocated > map->m_len)
allocated = map->m_len;
@@ -4244,12 +4025,11 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0;
- int free_on_err = 0, err = 0, depth, ret;
+ int err = 0, depth, ret;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
ext4_lblk_t cluster_offset;
- bool map_from_cluster = false;
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
@@ -4308,12 +4088,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
- allocated = convert_initialized_extent(
- handle, inode, map, &path,
- allocated);
+ err = convert_initialized_extent(handle,
+ inode, map, &path, &allocated);
goto out2;
- } else if (!ext4_ext_is_unwritten(ex))
+ } else if (!ext4_ext_is_unwritten(ex)) {
goto out;
+ }
ret = ext4_ext_handle_unwritten_extents(
handle, inode, map, &path, flags,
@@ -4364,7 +4144,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
- map_from_cluster = true;
goto got_allocated_blocks;
}
@@ -4385,7 +4164,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
ar.len = allocated = map->m_len;
newblock = map->m_pblk;
- map_from_cluster = true;
goto got_allocated_blocks;
}
@@ -4442,7 +4220,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
goto out2;
ext_debug("allocate new block: goal %llu, found %llu/%u\n",
ar.goal, newblock, allocated);
- free_on_err = 1;
allocated_clusters = ar.len;
ar.len = EXT4_C2B(sbi, ar.len) - offset;
if (ar.len > allocated)
@@ -4453,28 +4230,28 @@ got_allocated_blocks:
ext4_ext_store_pblock(&newex, newblock + offset);
newex.ee_len = cpu_to_le16(ar.len);
/* Mark unwritten */
- if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
+ if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
ext4_ext_mark_unwritten(&newex);
map->m_flags |= EXT4_MAP_UNWRITTEN;
}
- err = 0;
- if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
- err = check_eofblocks_fl(handle, inode, map->m_lblk,
- path, ar.len);
- if (!err)
- err = ext4_ext_insert_extent(handle, inode, &path,
- &newex, flags);
-
- if (err && free_on_err) {
- int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
- EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
- /* free data blocks we just allocated */
- /* not a good idea to call discard here directly,
- * but otherwise we'd need to call it every free() */
- ext4_discard_preallocations(inode);
- ext4_free_blocks(handle, inode, NULL, newblock,
- EXT4_C2B(sbi, allocated_clusters), fb_flags);
+ err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
+ if (err) {
+ if (allocated_clusters) {
+ int fb_flags = 0;
+
+ /*
+ * free data blocks we just allocated.
+ * not a good idea to call discard here directly,
+ * but otherwise we'd need to call it every free().
+ */
+ ext4_discard_preallocations(inode);
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+ fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
+ ext4_free_blocks(handle, inode, NULL, newblock,
+ EXT4_C2B(sbi, allocated_clusters),
+ fb_flags);
+ }
goto out2;
}
@@ -4491,7 +4268,7 @@ got_allocated_blocks:
* clusters discovered to be delayed allocated. Once allocated, a
* cluster is not included in the reserved count.
*/
- if (test_opt(inode->i_sb, DELALLOC) && !map_from_cluster) {
+ if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
/*
* When allocating delayed allocated clusters, simply
@@ -4645,10 +4422,6 @@ retry:
epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1)
inode->i_mtime = inode->i_ctime;
- } else {
- if (epos > inode->i_size)
- ext4_set_inode_flag(inode,
- EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -4802,16 +4575,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
inode->i_mtime = inode->i_ctime = current_time(inode);
- if (new_size) {
+ if (new_size)
ext4_update_inode_size(inode, new_size);
- } else {
- /*
- * Mark that we allocate beyond EOF so the subsequent truncate
- * can proceed even if the new size is the same as i_size.
- */
- if (offset + len > inode->i_size)
- ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
- }
ext4_mark_inode_dirty(handle, inode);
/* Zero out partial block at the edges of the range */
@@ -5009,64 +4774,13 @@ int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
return ret < 0 ? ret : err;
}
-/*
- * If newes is not existing extent (newes->ec_pblk equals zero) find
- * delayed extent at start of newes and update newes accordingly and
- * return start of the next delayed extent.
- *
- * If newes is existing extent (newes->ec_pblk is not equal zero)
- * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
- * extent found. Leave newes unmodified.
- */
-static int ext4_find_delayed_extent(struct inode *inode,
- struct extent_status *newes)
-{
- struct extent_status es;
- ext4_lblk_t block, next_del;
-
- if (newes->es_pblk == 0) {
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
- newes->es_lblk,
- newes->es_lblk + newes->es_len - 1,
- &es);
-
- /*
- * No extent in extent-tree contains block @newes->es_pblk,
- * then the block may stay in 1)a hole or 2)delayed-extent.
- */
- if (es.es_len == 0)
- /* A hole found. */
- return 0;
-
- if (es.es_lblk > newes->es_lblk) {
- /* A hole found. */
- newes->es_len = min(es.es_lblk - newes->es_lblk,
- newes->es_len);
- return 0;
- }
-
- newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
- }
-
- block = newes->es_lblk + newes->es_len;
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block,
- EXT_MAX_BLOCKS, &es);
- if (es.es_len == 0)
- next_del = EXT_MAX_BLOCKS;
- else
- next_del = es.es_lblk;
-
- return next_del;
-}
-
-static int ext4_xattr_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo)
+static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
{
__u64 physical = 0;
- __u64 length;
- __u32 flags = FIEMAP_EXTENT_LAST;
+ __u64 length = 0;
int blockbits = inode->i_sb->s_blocksize_bits;
int error = 0;
+ u16 iomap_type;
/* in-inode? */
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
@@ -5081,40 +4795,49 @@ static int ext4_xattr_fiemap(struct inode *inode,
EXT4_I(inode)->i_extra_isize;
physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
- flags |= FIEMAP_EXTENT_DATA_INLINE;
brelse(iloc.bh);
- } else { /* external block */
+ iomap_type = IOMAP_INLINE;
+ } else if (EXT4_I(inode)->i_file_acl) { /* external block */
physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
+ iomap_type = IOMAP_MAPPED;
+ } else {
+ /* no in-inode or external block for xattr, so return -ENOENT */
+ error = -ENOENT;
+ goto out;
}
- if (physical)
- error = fiemap_fill_next_extent(fieinfo, 0, physical,
- length, flags);
- return (error < 0 ? error : 0);
+ iomap->addr = physical;
+ iomap->offset = 0;
+ iomap->length = length;
+ iomap->type = iomap_type;
+ iomap->flags = 0;
+out:
+ return error;
}
-static int _ext4_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len,
- int (*fill)(struct inode *, ext4_lblk_t,
- ext4_lblk_t,
- struct fiemap_extent_info *))
+static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
+ loff_t length, unsigned flags,
+ struct iomap *iomap, struct iomap *srcmap)
{
- ext4_lblk_t start_blk;
- u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR;
+ int error;
- int error = 0;
-
- if (ext4_has_inline_data(inode)) {
- int has_inline = 1;
+ error = ext4_iomap_xattr_fiemap(inode, iomap);
+ if (error == 0 && (offset >= iomap->length))
+ error = -ENOENT;
+ return error;
+}
- error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
- start, len);
+static const struct iomap_ops ext4_iomap_xattr_ops = {
+ .iomap_begin = ext4_iomap_xattr_begin,
+};
- if (has_inline)
- return error;
- }
+static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len, bool from_es_cache)
+{
+ ext4_lblk_t start_blk;
+ u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR;
+ int error = 0;
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
error = ext4_ext_precache(inode);
@@ -5123,19 +4846,19 @@ static int _ext4_fiemap(struct inode *inode,
fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
}
- /* fallback to generic here if not in extents fmt */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
- fill == ext4_fill_fiemap_extents)
- return generic_block_fiemap(inode, fieinfo, start, len,
- ext4_get_block);
-
- if (fill == ext4_fill_es_cache_info)
+ if (from_es_cache)
ext4_fiemap_flags &= FIEMAP_FLAG_XATTR;
+
if (fiemap_check_flags(fieinfo, ext4_fiemap_flags))
return -EBADR;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
- error = ext4_xattr_fiemap(inode, fieinfo);
+ fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
+ error = iomap_fiemap(inode, fieinfo, start, len,
+ &ext4_iomap_xattr_ops);
+ } else if (!from_es_cache) {
+ error = iomap_fiemap(inode, fieinfo, start, len,
+ &ext4_iomap_report_ops);
} else {
ext4_lblk_t len_blks;
__u64 last_blk;
@@ -5150,7 +4873,8 @@ static int _ext4_fiemap(struct inode *inode,
* Walk the extent tree gathering extent information
* and pushing extents back to the user.
*/
- error = fill(inode, start_blk, len_blks, fieinfo);
+ error = ext4_fill_es_cache_info(inode, start_blk, len_blks,
+ fieinfo);
}
return error;
}
@@ -5158,8 +4882,7 @@ static int _ext4_fiemap(struct inode *inode,
int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
- return _ext4_fiemap(inode, fieinfo, start, len,
- ext4_fill_fiemap_extents);
+ return _ext4_fiemap(inode, fieinfo, start, len, false);
}
int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -5175,8 +4898,7 @@ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
return 0;
}
- return _ext4_fiemap(inode, fieinfo, start, len,
- ext4_fill_es_cache_info);
+ return _ext4_fiemap(inode, fieinfo, start, len, true);
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5f225881176b..0d624250a62b 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -872,6 +872,7 @@ const struct file_operations ext4_file_operations = {
.llseek = ext4_llseek,
.read_iter = ext4_file_read_iter,
.write_iter = ext4_file_write_iter,
+ .iopoll = iomap_dio_iopoll,
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f95ee99091e4..b420c9dc444d 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -196,10 +196,9 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
if (!buffer_uptodate(bh)) {
put_bh(bh);
- ext4_set_errno(sb, EIO);
- ext4_error(sb, "Cannot read inode bitmap - "
- "block_group = %u, inode_bitmap = %llu",
- block_group, bitmap_blk);
+ ext4_error_err(sb, EIO, "Cannot read inode bitmap - "
+ "block_group = %u, inode_bitmap = %llu",
+ block_group, bitmap_blk);
ext4_mark_group_bitmap_corrupted(sb, block_group,
EXT4_GROUP_INFO_IBITMAP_CORRUPT);
return ERR_PTR(-EIO);
@@ -712,21 +711,34 @@ out:
static int find_inode_bit(struct super_block *sb, ext4_group_t group,
struct buffer_head *bitmap, unsigned long *ino)
{
+ bool check_recently_deleted = EXT4_SB(sb)->s_journal == NULL;
+ unsigned long recently_deleted_ino = EXT4_INODES_PER_GROUP(sb);
+
next:
*ino = ext4_find_next_zero_bit((unsigned long *)
bitmap->b_data,
EXT4_INODES_PER_GROUP(sb), *ino);
if (*ino >= EXT4_INODES_PER_GROUP(sb))
- return 0;
+ goto not_found;
- if ((EXT4_SB(sb)->s_journal == NULL) &&
- recently_deleted(sb, group, *ino)) {
+ if (check_recently_deleted && recently_deleted(sb, group, *ino)) {
+ recently_deleted_ino = *ino;
*ino = *ino + 1;
if (*ino < EXT4_INODES_PER_GROUP(sb))
goto next;
- return 0;
+ goto not_found;
}
-
+ return 1;
+not_found:
+ if (recently_deleted_ino >= EXT4_INODES_PER_GROUP(sb))
+ return 0;
+ /*
+ * Not reusing recently deleted inodes is mostly a preference. We don't
+ * want to report ENOSPC or skew allocation patterns because of that.
+ * So return even recently deleted inode if we could find better in the
+ * given range.
+ */
+ *ino = recently_deleted_ino;
return 1;
}
@@ -1231,9 +1243,9 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
- ext4_set_errno(sb, -err);
- ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
- ino, err);
+ ext4_error_err(sb, -err,
+ "couldn't read orphan inode %lu (err %d)",
+ ino, err);
return inode;
}
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 569fc68e8975..107f0043f67f 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1019,7 +1019,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
* (should be rare).
*/
if (!bh) {
- EXT4_ERROR_INODE_BLOCK(inode, nr,
+ ext4_error_inode_block(inode, nr, EIO,
"Read failure");
continue;
}
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index fad82d08fca5..f35e289e17aa 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -98,10 +98,9 @@ int ext4_get_max_inline_size(struct inode *inode)
error = ext4_get_inode_loc(inode, &iloc);
if (error) {
- ext4_set_errno(inode->i_sb, -error);
- ext4_error_inode(inode, __func__, __LINE__, 0,
- "can't get inode location %lu",
- inode->i_ino);
+ ext4_error_inode_err(inode, __func__, __LINE__, 0, -error,
+ "can't get inode location %lu",
+ inode->i_ino);
return 0;
}
@@ -1762,9 +1761,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
err = ext4_get_inode_loc(dir, &iloc);
if (err) {
- ext4_set_errno(dir->i_sb, -err);
- EXT4_ERROR_INODE(dir, "error %d getting inode %lu block",
- err, dir->i_ino);
+ EXT4_ERROR_INODE_ERR(dir, -err,
+ "error %d getting inode %lu block",
+ err, dir->i_ino);
return true;
}
@@ -1857,47 +1856,6 @@ out:
return error;
}
-int ext4_inline_data_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- int *has_inline, __u64 start, __u64 len)
-{
- __u64 physical = 0;
- __u64 inline_len;
- __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
- FIEMAP_EXTENT_LAST;
- int error = 0;
- struct ext4_iloc iloc;
-
- down_read(&EXT4_I(inode)->xattr_sem);
- if (!ext4_has_inline_data(inode)) {
- *has_inline = 0;
- goto out;
- }
- inline_len = min_t(size_t, ext4_get_inline_size(inode),
- i_size_read(inode));
- if (start >= inline_len)
- goto out;
- if (start + len < inline_len)
- inline_len = start + len;
- inline_len -= start;
-
- error = ext4_get_inode_loc(inode, &iloc);
- if (error)
- goto out;
-
- physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
- physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
- physical += offsetof(struct ext4_inode, i_block);
-
- brelse(iloc.bh);
-out:
- up_read(&EXT4_I(inode)->xattr_sem);
- if (physical)
- error = fiemap_fill_next_extent(fieinfo, start, physical,
- inline_len, flags);
- return (error < 0 ? error : 0);
-}
-
int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{
handle_t *handle;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index fa0ff78dc033..e416096fc081 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -269,10 +269,9 @@ void ext4_evict_inode(struct inode *inode)
if (inode->i_blocks) {
err = ext4_truncate(inode);
if (err) {
- ext4_set_errno(inode->i_sb, -err);
- ext4_error(inode->i_sb,
- "couldn't truncate inode %lu (err %d)",
- inode->i_ino, err);
+ ext4_error_err(inode->i_sb, -err,
+ "couldn't truncate inode %lu (err %d)",
+ inode->i_ino, err);
goto stop_handle;
}
}
@@ -2478,10 +2477,9 @@ update_disksize:
up_write(&EXT4_I(inode)->i_data_sem);
err2 = ext4_mark_inode_dirty(handle, inode);
if (err2) {
- ext4_set_errno(inode->i_sb, -err2);
- ext4_error(inode->i_sb,
- "Failed to mark inode %lu dirty",
- inode->i_ino);
+ ext4_error_err(inode->i_sb, -err2,
+ "Failed to mark inode %lu dirty",
+ inode->i_ino);
}
if (!err)
err = err2;
@@ -3212,7 +3210,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
return 0;
}
- return generic_block_bmap(mapping, block, ext4_get_block);
+ return iomap_bmap(mapping, block, &ext4_iomap_ops);
}
static int ext4_readpage(struct file *file, struct page *page)
@@ -3333,6 +3331,10 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
iomap->offset = (u64) map->m_lblk << blkbits;
iomap->length = (u64) map->m_len << blkbits;
+ if ((map->m_flags & EXT4_MAP_MAPPED) &&
+ !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ iomap->flags |= IOMAP_F_MERGED;
+
/*
* Flags passed to ext4_map_blocks() for direct I/O writes can result
* in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
@@ -3542,12 +3544,28 @@ static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
+ /*
+ * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
+ * So handle it here itself instead of querying ext4_map_blocks().
+ * Since ext4_map_blocks() will warn about it and will return
+ * -EIO error.
+ */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+ if (offset >= sbi->s_bitmap_maxbytes) {
+ map.m_flags = 0;
+ goto set_iomap;
+ }
+ }
+
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
if (ret == 0)
delalloc = ext4_iomap_is_delalloc(inode, &map);
+set_iomap:
ext4_set_iomap(inode, iomap, &map, offset, length);
if (delalloc && iomap->type == IOMAP_HOLE)
iomap->type = IOMAP_DELALLOC;
@@ -4144,8 +4162,6 @@ int ext4_truncate(struct inode *inode)
if (!ext4_can_truncate(inode))
return 0;
- ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
-
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
@@ -4364,8 +4380,7 @@ make_io:
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
simulate_eio:
- ext4_set_errno(inode->i_sb, EIO);
- EXT4_ERROR_INODE_BLOCK(inode, block,
+ ext4_error_inode_block(inode, block, EIO,
"unable to read itable block");
brelse(bh);
return -EIO;
@@ -4517,7 +4532,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
(ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
if (flags & EXT4_IGET_HANDLE)
return ERR_PTR(-ESTALE);
- __ext4_error(sb, function, line,
+ __ext4_error(sb, function, line, EFSCORRUPTED, 0,
"inode #%lu: comm %s: iget: illegal inode #",
ino, current->comm);
return ERR_PTR(-EFSCORRUPTED);
@@ -4580,9 +4595,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
if (!ext4_inode_csum_verify(inode, raw_inode, ei) ||
ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) {
- ext4_set_errno(inode->i_sb, EFSBADCRC);
- ext4_error_inode(inode, function, line, 0,
- "iget: checksum invalid");
+ ext4_error_inode_err(inode, function, line, 0, EFSBADCRC,
+ "iget: checksum invalid");
ret = -EFSBADCRC;
goto bad_inode;
}
@@ -4812,7 +4826,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode_info *ei)
{
struct inode *inode = &(ei->vfs_inode);
- u64 i_blocks = inode->i_blocks;
+ u64 i_blocks = READ_ONCE(inode->i_blocks);
struct super_block *sb = inode->i_sb;
if (i_blocks <= ~0U) {
@@ -4982,7 +4996,7 @@ static int ext4_do_update_inode(handle_t *handle,
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
- if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
+ if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
ext4_isize_set(raw_inode, ei->i_disksize);
need_datasync = 1;
}
@@ -5131,9 +5145,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
- ext4_set_errno(inode->i_sb, EIO);
- EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
- "IO error syncing inode");
+ ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
+ "IO error syncing inode");
err = -EIO;
}
brelse(iloc.bh);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0c1d1720cf1a..bfc1281fc4cb 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -327,18 +327,6 @@ static int ext4_ioctl_setflags(struct inode *inode,
if ((flags ^ oldflags) & EXT4_EXTENTS_FL)
migrate = 1;
- if (flags & EXT4_EOFBLOCKS_FL) {
- /* we don't support adding EOFBLOCKS flag */
- if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
- err = -EOPNOTSUPP;
- goto flags_out;
- }
- } else if (oldflags & EXT4_EOFBLOCKS_FL) {
- err = ext4_truncate(inode);
- if (err)
- goto flags_out;
- }
-
if ((flags ^ oldflags) & EXT4_CASEFOLD_FL) {
if (!ext4_has_feature_casefold(sb)) {
err = -EOPNOTSUPP;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 51a78eb65f3c..87c85be4c12e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
BUG_ON(buddy == NULL);
k = mb_find_next_zero_bit(buddy, max, 0);
- BUG_ON(k >= max);
-
+ if (k >= max) {
+ ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
+ "%d free clusters of order %d. But found 0",
+ grp->bb_counters[i], i);
+ ext4_mark_group_bitmap_corrupted(ac->ac_sb,
+ e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ break;
+ }
ac->ac_found++;
ac->ac_b_ex.fe_len = 1 << i;
@@ -3914,9 +3921,9 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh)) {
err = PTR_ERR(bitmap_bh);
- ext4_set_errno(sb, -err);
- ext4_error(sb, "Error %d reading block bitmap for %u",
- err, group);
+ ext4_error_err(sb, -err,
+ "Error %d reading block bitmap for %u",
+ err, group);
return 0;
}
@@ -4083,18 +4090,16 @@ repeat:
err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL);
if (err) {
- ext4_set_errno(sb, -err);
- ext4_error(sb, "Error %d loading buddy information for %u",
- err, group);
+ ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
+ err, group);
continue;
}
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh)) {
err = PTR_ERR(bitmap_bh);
- ext4_set_errno(sb, -err);
- ext4_error(sb, "Error %d reading block bitmap for %u",
- err, group);
+ ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
+ err, group);
ext4_mb_unload_buddy(&e4b);
continue;
}
@@ -4302,7 +4307,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
- pa_inode_list) {
+ pa_inode_list,
+ lockdep_is_held(&lg->lg_prealloc_lock)) {
spin_lock(&pa->pa_lock);
if (atomic_read(&pa->pa_count)) {
/*
@@ -4347,9 +4353,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL);
if (err) {
- ext4_set_errno(sb, -err);
- ext4_error(sb, "Error %d loading buddy information for %u",
- err, group);
+ ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
+ err, group);
continue;
}
ext4_lock_group(sb, group);
@@ -4386,7 +4391,8 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
/* Add the prealloc space to lg */
spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
- pa_inode_list) {
+ pa_inode_list,
+ lockdep_is_held(&lg->lg_prealloc_lock)) {
spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted) {
spin_unlock(&tmp_pa->pa_lock);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 87f7551c5132..d34cb8c46655 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -175,8 +175,8 @@ static int kmmpd(void *data)
*/
if (retval) {
if ((failed_writes % 60) == 0) {
- ext4_set_errno(sb, -retval);
- ext4_error(sb, "Error writing to MMP block");
+ ext4_error_err(sb, -retval,
+ "Error writing to MMP block");
}
failed_writes++;
}
@@ -208,9 +208,9 @@ static int kmmpd(void *data)
retval = read_mmp_block(sb, &bh_check, mmp_block);
if (retval) {
- ext4_set_errno(sb, -retval);
- ext4_error(sb, "error reading MMP data: %d",
- retval);
+ ext4_error_err(sb, -retval,
+ "error reading MMP data: %d",
+ retval);
goto exit_thread;
}
@@ -222,8 +222,7 @@ static int kmmpd(void *data)
"Error while updating MMP info. "
"The filesystem seems to have been"
" multiply mounted.");
- ext4_set_errno(sb, EBUSY);
- ext4_error(sb, "abort");
+ ext4_error_err(sb, EBUSY, "abort");
put_bh(bh_check);
retval = -EBUSY;
goto exit_thread;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 30ce3dc69378..1ed86fb6c302 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -422,8 +422,8 @@ repair_branches:
block_len_in_page, 0, &err2);
ext4_double_up_write_data_sem(orig_inode, donor_inode);
if (replaced_count != block_len_in_page) {
- EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset),
- "Unable to copy data block,"
+ ext4_error_inode_block(orig_inode, (sector_t)(orig_blk_offset),
+ EIO, "Unable to copy data block,"
" data will be lost.");
*err = -EIO;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b05ea72f38fd..a8aca4772aaa 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -160,9 +160,9 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
!ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC))
set_buffer_verified(bh);
else {
- ext4_set_errno(inode->i_sb, EFSBADCRC);
- ext4_error_inode(inode, func, line, block,
- "Directory index failed checksum");
+ ext4_error_inode_err(inode, func, line, block,
+ EFSBADCRC,
+ "Directory index failed checksum");
brelse(bh);
return ERR_PTR(-EFSBADCRC);
}
@@ -172,9 +172,9 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
!ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC))
set_buffer_verified(bh);
else {
- ext4_set_errno(inode->i_sb, EFSBADCRC);
- ext4_error_inode(inode, func, line, block,
- "Directory block failed checksum");
+ ext4_error_inode_err(inode, func, line, block,
+ EFSBADCRC,
+ "Directory block failed checksum");
brelse(bh);
return ERR_PTR(-EFSBADCRC);
}
@@ -233,13 +233,13 @@ struct dx_root
u8 unused_flags;
}
info;
- struct dx_entry entries[0];
+ struct dx_entry entries[];
};
struct dx_node
{
struct fake_dirent fake;
- struct dx_entry entries[0];
+ struct dx_entry entries[];
};
@@ -1532,9 +1532,9 @@ restart:
goto next;
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
- ext4_set_errno(sb, EIO);
- EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
- (unsigned long) block);
+ EXT4_ERROR_INODE_ERR(dir, EIO,
+ "reading directory lblock %lu",
+ (unsigned long) block);
brelse(bh);
ret = ERR_PTR(-EIO);
goto cleanup_and_exit;
@@ -1543,9 +1543,9 @@ restart:
!is_dx_internal_node(dir, block,
(struct ext4_dir_entry *)bh->b_data) &&
!ext4_dirblock_csum_verify(dir, bh)) {
- ext4_set_errno(sb, EFSBADCRC);
- EXT4_ERROR_INODE(dir, "checksumming directory "
- "block %lu", (unsigned long)block);
+ EXT4_ERROR_INODE_ERR(dir, EFSBADCRC,
+ "checksumming directory "
+ "block %lu", (unsigned long)block);
brelse(bh);
ret = ERR_PTR(-EFSBADCRC);
goto cleanup_and_exit;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c8dff4c68141..9728e7b0e84f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -335,10 +335,12 @@ static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
#define ext4_get_tstamp(es, tstamp) \
__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
-static void __save_error_info(struct super_block *sb, const char *func,
- unsigned int line)
+static void __save_error_info(struct super_block *sb, int error,
+ __u32 ino, __u64 block,
+ const char *func, unsigned int line)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ int err;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
if (bdev_read_only(sb->s_bdev))
@@ -347,8 +349,62 @@ static void __save_error_info(struct super_block *sb, const char *func,
ext4_update_tstamp(es, s_last_error_time);
strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
es->s_last_error_line = cpu_to_le32(line);
- if (es->s_last_error_errcode == 0)
- es->s_last_error_errcode = EXT4_ERR_EFSCORRUPTED;
+ es->s_last_error_ino = cpu_to_le32(ino);
+ es->s_last_error_block = cpu_to_le64(block);
+ switch (error) {
+ case EIO:
+ err = EXT4_ERR_EIO;
+ break;
+ case ENOMEM:
+ err = EXT4_ERR_ENOMEM;
+ break;
+ case EFSBADCRC:
+ err = EXT4_ERR_EFSBADCRC;
+ break;
+ case 0:
+ case EFSCORRUPTED:
+ err = EXT4_ERR_EFSCORRUPTED;
+ break;
+ case ENOSPC:
+ err = EXT4_ERR_ENOSPC;
+ break;
+ case ENOKEY:
+ err = EXT4_ERR_ENOKEY;
+ break;
+ case EROFS:
+ err = EXT4_ERR_EROFS;
+ break;
+ case EFBIG:
+ err = EXT4_ERR_EFBIG;
+ break;
+ case EEXIST:
+ err = EXT4_ERR_EEXIST;
+ break;
+ case ERANGE:
+ err = EXT4_ERR_ERANGE;
+ break;
+ case EOVERFLOW:
+ err = EXT4_ERR_EOVERFLOW;
+ break;
+ case EBUSY:
+ err = EXT4_ERR_EBUSY;
+ break;
+ case ENOTDIR:
+ err = EXT4_ERR_ENOTDIR;
+ break;
+ case ENOTEMPTY:
+ err = EXT4_ERR_ENOTEMPTY;
+ break;
+ case ESHUTDOWN:
+ err = EXT4_ERR_ESHUTDOWN;
+ break;
+ case EFAULT:
+ err = EXT4_ERR_EFAULT;
+ break;
+ default:
+ err = EXT4_ERR_UNKNOWN;
+ }
+ es->s_last_error_errcode = err;
if (!es->s_first_error_time) {
es->s_first_error_time = es->s_last_error_time;
es->s_first_error_time_hi = es->s_last_error_time_hi;
@@ -368,11 +424,13 @@ static void __save_error_info(struct super_block *sb, const char *func,
le32_add_cpu(&es->s_error_count, 1);
}
-static void save_error_info(struct super_block *sb, const char *func,
- unsigned int line)
+static void save_error_info(struct super_block *sb, int error,
+ __u32 ino, __u64 block,
+ const char *func, unsigned int line)
{
- __save_error_info(sb, func, line);
- ext4_commit_super(sb, 1);
+ __save_error_info(sb, error, ino, block, func, line);
+ if (!bdev_read_only(sb->s_bdev))
+ ext4_commit_super(sb, 1);
}
/*
@@ -477,7 +535,8 @@ static void ext4_handle_error(struct super_block *sb)
"EXT4-fs error")
void __ext4_error(struct super_block *sb, const char *function,
- unsigned int line, const char *fmt, ...)
+ unsigned int line, int error, __u64 block,
+ const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -495,24 +554,21 @@ void __ext4_error(struct super_block *sb, const char *function,
sb->s_id, function, line, current->comm, &vaf);
va_end(args);
}
- save_error_info(sb, function, line);
+ save_error_info(sb, error, 0, block, function, line);
ext4_handle_error(sb);
}
void __ext4_error_inode(struct inode *inode, const char *function,
- unsigned int line, ext4_fsblk_t block,
+ unsigned int line, ext4_fsblk_t block, int error,
const char *fmt, ...)
{
va_list args;
struct va_format vaf;
- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return;
trace_ext4_error(inode->i_sb, function, line);
- es->s_last_error_ino = cpu_to_le32(inode->i_ino);
- es->s_last_error_block = cpu_to_le64(block);
if (ext4_error_ratelimit(inode->i_sb)) {
va_start(args, fmt);
vaf.fmt = fmt;
@@ -529,7 +585,8 @@ void __ext4_error_inode(struct inode *inode, const char *function,
current->comm, &vaf);
va_end(args);
}
- save_error_info(inode->i_sb, function, line);
+ save_error_info(inode->i_sb, error, inode->i_ino, block,
+ function, line);
ext4_handle_error(inode->i_sb);
}
@@ -548,7 +605,6 @@ void __ext4_error_file(struct file *file, const char *function,
trace_ext4_error(inode->i_sb, function, line);
es = EXT4_SB(inode->i_sb)->s_es;
- es->s_last_error_ino = cpu_to_le32(inode->i_ino);
if (ext4_error_ratelimit(inode->i_sb)) {
path = file_path(file, pathname, sizeof(pathname));
if (IS_ERR(path))
@@ -570,7 +626,8 @@ void __ext4_error_file(struct file *file, const char *function,
current->comm, path, &vaf);
va_end(args);
}
- save_error_info(inode->i_sb, function, line);
+ save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
+ function, line);
ext4_handle_error(inode->i_sb);
}
@@ -614,66 +671,6 @@ const char *ext4_decode_error(struct super_block *sb, int errno,
return errstr;
}
-void ext4_set_errno(struct super_block *sb, int err)
-{
- if (err < 0)
- err = -err;
-
- switch (err) {
- case EIO:
- err = EXT4_ERR_EIO;
- break;
- case ENOMEM:
- err = EXT4_ERR_ENOMEM;
- break;
- case EFSBADCRC:
- err = EXT4_ERR_EFSBADCRC;
- break;
- case EFSCORRUPTED:
- err = EXT4_ERR_EFSCORRUPTED;
- break;
- case ENOSPC:
- err = EXT4_ERR_ENOSPC;
- break;
- case ENOKEY:
- err = EXT4_ERR_ENOKEY;
- break;
- case EROFS:
- err = EXT4_ERR_EROFS;
- break;
- case EFBIG:
- err = EXT4_ERR_EFBIG;
- break;
- case EEXIST:
- err = EXT4_ERR_EEXIST;
- break;
- case ERANGE:
- err = EXT4_ERR_ERANGE;
- break;
- case EOVERFLOW:
- err = EXT4_ERR_EOVERFLOW;
- break;
- case EBUSY:
- err = EXT4_ERR_EBUSY;
- break;
- case ENOTDIR:
- err = EXT4_ERR_ENOTDIR;
- break;
- case ENOTEMPTY:
- err = EXT4_ERR_ENOTEMPTY;
- break;
- case ESHUTDOWN:
- err = EXT4_ERR_ESHUTDOWN;
- break;
- case EFAULT:
- err = EXT4_ERR_EFAULT;
- break;
- default:
- err = EXT4_ERR_UNKNOWN;
- }
- EXT4_SB(sb)->s_es->s_last_error_errcode = err;
-}
-
/* __ext4_std_error decodes expected errors from journaling functions
* automatically and invokes the appropriate error response. */
@@ -698,8 +695,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
sb->s_id, function, line, errstr);
}
- ext4_set_errno(sb, -errno);
- save_error_info(sb, function, line);
+ save_error_info(sb, -errno, 0, 0, function, line);
ext4_handle_error(sb);
}
@@ -714,7 +710,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
*/
void __ext4_abort(struct super_block *sb, const char *function,
- unsigned int line, const char *fmt, ...)
+ unsigned int line, int error, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -722,7 +718,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
return;
- save_error_info(sb, function, line);
+ save_error_info(sb, error, 0, 0, function, line);
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
@@ -741,7 +737,6 @@ void __ext4_abort(struct super_block *sb, const char *function,
sb->s_flags |= SB_RDONLY;
if (EXT4_SB(sb)->s_journal)
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
- save_error_info(sb, function, line);
}
if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
if (EXT4_SB(sb)->s_journal &&
@@ -815,15 +810,12 @@ __acquires(bitlock)
{
struct va_format vaf;
va_list args;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
return;
trace_ext4_error(sb, function, line);
- es->s_last_error_ino = cpu_to_le32(ino);
- es->s_last_error_block = cpu_to_le64(block);
- __save_error_info(sb, function, line);
+ __save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
if (ext4_error_ratelimit(sb)) {
va_start(args, fmt);
@@ -1024,17 +1016,22 @@ static void ext4_put_super(struct super_block *sb)
destroy_workqueue(sbi->rsv_conversion_wq);
+ /*
+ * Unregister sysfs before destroying jbd2 journal.
+ * Since we could still access attr_journal_task attribute via sysfs
+ * path which could have sbi->s_journal->j_task as NULL
+ */
+ ext4_unregister_sysfs(sb);
+
if (sbi->s_journal) {
aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
if ((err < 0) && !aborted) {
- ext4_set_errno(sb, -err);
- ext4_abort(sb, "Couldn't clean up the journal");
+ ext4_abort(sb, -err, "Couldn't clean up the journal");
}
}
- ext4_unregister_sysfs(sb);
ext4_es_unregister_shrinker(sbi);
del_timer_sync(&sbi->s_err_report);
ext4_release_system_zone(sb);
@@ -2180,6 +2177,14 @@ static int parse_options(char *options, struct super_block *sb,
}
}
#endif
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ int blocksize =
+ BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
+ if (blocksize < PAGE_SIZE)
+ ext4_msg(sb, KERN_WARNING, "Warning: mounting with an "
+ "experimental mount option 'dioread_nolock' "
+ "for blocksize < PAGE_SIZE");
+ }
return 1;
}
@@ -3609,7 +3614,8 @@ int ext4_calculate_overhead(struct super_block *sb)
*/
if (sbi->s_journal && !sbi->journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
- else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
+ else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
+ /* j_inum for internal journal is non-zero */
j_inode = ext4_get_journal_inode(sb, j_inum);
if (j_inode) {
j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
@@ -3785,7 +3791,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_opt(sb, NO_UID32);
/* xattr user namespace & acls are now defaulted on */
set_opt(sb, XATTR_USER);
- set_opt(sb, DIOREAD_NOLOCK);
#ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL);
#endif
@@ -3835,6 +3840,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+
+ if (blocksize == PAGE_SIZE)
+ set_opt(sb, DIOREAD_NOLOCK);
+
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
@@ -4157,7 +4166,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
sbi->s_inodes_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
- sbi->s_blocks_per_group);
+ sbi->s_inodes_per_group);
goto failed_mount;
}
sbi->s_itb_per_group = sbi->s_inodes_per_group /
@@ -4286,9 +4295,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
EXT4_BLOCKS_PER_GROUP(sb) - 1);
do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
- ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
+ ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
"(block count %llu, first data block %u, "
- "blocks per group %lu)", sbi->s_groups_count,
+ "blocks per group %lu)", blocks_count,
ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb));
@@ -5433,7 +5442,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
- ext4_abort(sb, "Abort forced by user");
+ ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
@@ -5622,10 +5631,8 @@ static int ext4_statfs_project(struct super_block *sb,
return PTR_ERR(dquot);
spin_lock(&dquot->dq_dqb_lock);
- limit = dquot->dq_dqb.dqb_bsoftlimit;
- if (dquot->dq_dqb.dqb_bhardlimit &&
- (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
- limit = dquot->dq_dqb.dqb_bhardlimit;
+ limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
+ dquot->dq_dqb.dqb_bhardlimit);
limit >>= sb->s_blocksize_bits;
if (limit && buf->f_blocks > limit) {
@@ -5637,11 +5644,8 @@ static int ext4_statfs_project(struct super_block *sb,
(buf->f_blocks - curblock) : 0;
}
- limit = dquot->dq_dqb.dqb_isoftlimit;
- if (dquot->dq_dqb.dqb_ihardlimit &&
- (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
- limit = dquot->dq_dqb.dqb_ihardlimit;
-
+ limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
+ dquot->dq_dqb.dqb_ihardlimit);
if (limit && buf->f_files > limit) {
buf->f_files = limit;
buf->f_ffree =
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 8cac7d95c3ad..21df43a25328 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -245,7 +245,7 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
bh->b_data);
errout:
if (error)
- __ext4_error_inode(inode, function, line, 0,
+ __ext4_error_inode(inode, function, line, 0, -error,
"corrupted xattr block %llu",
(unsigned long long) bh->b_blocknr);
else
@@ -269,7 +269,7 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
errout:
if (error)
- __ext4_error_inode(inode, function, line, 0,
+ __ext4_error_inode(inode, function, line, 0, -error,
"corrupted in-inode xattr");
return error;
}
@@ -2880,9 +2880,9 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
if (IS_ERR(bh)) {
error = PTR_ERR(bh);
if (error == -EIO) {
- ext4_set_errno(inode->i_sb, EIO);
- EXT4_ERROR_INODE(inode, "block %llu read error",
- EXT4_I(inode)->i_file_acl);
+ EXT4_ERROR_INODE_ERR(inode, EIO,
+ "block %llu read error",
+ EXT4_I(inode)->i_file_acl);
}
bh = NULL;
goto cleanup;
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index f39cad2abe2a..ffe21ac77f78 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -48,7 +48,7 @@ struct ext4_xattr_entry {
__le32 e_value_inum; /* inode in which the value is stored */
__le32 e_value_size; /* size of attribute value */
__le32 e_hash; /* hash value of name and value */
- char e_name[0]; /* attribute name */
+ char e_name[]; /* attribute name */
};
#define EXT4_XATTR_PAD_BITS 2
@@ -118,7 +118,7 @@ struct ext4_xattr_ibody_find {
struct ext4_xattr_inode_array {
unsigned int count; /* # of used items in the array */
- struct inode *inodes[0];
+ struct inode *inodes[];
};
extern const struct xattr_handler ext4_xattr_user_handler;
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index f0faada30f30..bb68d21e1f8c 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -118,3 +118,12 @@ config F2FS_FS_LZ4
default y
help
Support LZ4 compress algorithm, if unsure, say Y.
+
+config F2FS_FS_ZSTD
+ bool "ZSTD compression support"
+ depends on F2FS_FS_COMPRESSION
+ select ZSTD_COMPRESS
+ select ZSTD_DECOMPRESS
+ default y
+ help
+ Support ZSTD compress algorithm, if unsure, say Y.
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 44e84ac5c941..852890b72d6a 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -50,9 +50,6 @@ repeat:
return page;
}
-/*
- * We guarantee no failure on the returned page.
- */
static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
bool is_meta)
{
@@ -206,7 +203,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
}
/*
- * Readahead CP/NAT/SIT/SSA pages
+ * Readahead CP/NAT/SIT/SSA/POR pages
*/
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync)
@@ -898,7 +895,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
return -ENOMEM;
/*
* Finding out valid cp block involves read both
- * sets( cp pack1 and cp pack 2)
+ * sets( cp pack 1 and cp pack 2)
*/
cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
@@ -1250,20 +1247,20 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
f2fs_unlock_all(sbi);
}
-void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
{
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
- if (!get_pages(sbi, F2FS_WB_CP_DATA))
+ if (!get_pages(sbi, type))
break;
if (unlikely(f2fs_cp_error(sbi)))
break;
- io_schedule_timeout(5*HZ);
+ io_schedule_timeout(DEFAULT_IO_TIMEOUT);
}
finish_wait(&sbi->cp_wait, &wait);
}
@@ -1301,10 +1298,14 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
else
__clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
- is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
__set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
+ __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
+
if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
__set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
else
@@ -1384,13 +1385,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Flush all the NAT/SIT pages */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
- f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
- !f2fs_cp_error(sbi));
- /*
- * modify checkpoint
- * version number is already updated
- */
+ /* start to update checkpoint, cp ver is already updated previously */
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
@@ -1493,11 +1489,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Here, we have one bio having CP pack except cp pack 2 page */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
- f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
- !f2fs_cp_error(sbi));
+ /* Wait for all dirty meta pages to be submitted for IO */
+ f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
/* wait for previous submitted meta pages writeback */
- f2fs_wait_on_all_pages_writeback(sbi);
+ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
/* flush all device cache */
err = f2fs_flush_device_cache(sbi);
@@ -1506,7 +1502,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* barrier and flush checkpoint cp pack 2 page if it can */
commit_checkpoint(sbi, ckpt, start_blk);
- f2fs_wait_on_all_pages_writeback(sbi);
+ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
/*
* invalidate intermediate page cache borrowed from meta inode which are
@@ -1543,9 +1539,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
}
-/*
- * We guarantee that this checkpoint procedure will not fail.
- */
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1613,7 +1606,6 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_sit_entries(sbi, cpc);
- /* unlock all the fs_lock[] in do_checkpoint() */
err = do_checkpoint(sbi, cpc);
if (err)
f2fs_release_discard_addrs(sbi);
@@ -1626,7 +1618,7 @@ stop:
if (cpc->reason & CP_RECOVERY)
f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
- /* do checkpoint periodically */
+ /* update CP_TIME to trigger checkpoint periodically */
f2fs_update_time(sbi, CP_TIME);
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index d8a64be90a50..df7b2d15eacd 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -11,6 +11,7 @@
#include <linux/backing-dev.h>
#include <linux/lzo.h>
#include <linux/lz4.h>
+#include <linux/zstd.h>
#include "f2fs.h"
#include "node.h"
@@ -20,6 +21,8 @@ struct f2fs_compress_ops {
int (*init_compress_ctx)(struct compress_ctx *cc);
void (*destroy_compress_ctx)(struct compress_ctx *cc);
int (*compress_pages)(struct compress_ctx *cc);
+ int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
+ void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
int (*decompress_pages)(struct decompress_io_ctx *dic);
};
@@ -52,7 +55,7 @@ bool f2fs_is_compressed_page(struct page *page)
}
static void f2fs_set_compressed_page(struct page *page,
- struct inode *inode, pgoff_t index, void *data, refcount_t *r)
+ struct inode *inode, pgoff_t index, void *data)
{
SetPagePrivate(page);
set_page_private(page, (unsigned long)data);
@@ -60,8 +63,6 @@ static void f2fs_set_compressed_page(struct page *page,
/* i_crypto_info and iv index */
page->index = index;
page->mapping = inode->i_mapping;
- if (r)
- refcount_inc(r);
}
static void f2fs_put_compressed_page(struct page *page)
@@ -291,6 +292,165 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
};
#endif
+#ifdef CONFIG_F2FS_FS_ZSTD
+#define F2FS_ZSTD_DEFAULT_CLEVEL 1
+
+static int zstd_init_compress_ctx(struct compress_ctx *cc)
+{
+ ZSTD_parameters params;
+ ZSTD_CStream *stream;
+ void *workspace;
+ unsigned int workspace_size;
+
+ params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
+ workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
+
+ workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+ workspace_size, GFP_NOFS);
+ if (!workspace)
+ return -ENOMEM;
+
+ stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
+ if (!stream) {
+ printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
+ KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
+ __func__);
+ kvfree(workspace);
+ return -EIO;
+ }
+
+ cc->private = workspace;
+ cc->private2 = stream;
+
+ cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
+ return 0;
+}
+
+static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
+{
+ kvfree(cc->private);
+ cc->private = NULL;
+ cc->private2 = NULL;
+}
+
+static int zstd_compress_pages(struct compress_ctx *cc)
+{
+ ZSTD_CStream *stream = cc->private2;
+ ZSTD_inBuffer inbuf;
+ ZSTD_outBuffer outbuf;
+ int src_size = cc->rlen;
+ int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
+ int ret;
+
+ inbuf.pos = 0;
+ inbuf.src = cc->rbuf;
+ inbuf.size = src_size;
+
+ outbuf.pos = 0;
+ outbuf.dst = cc->cbuf->cdata;
+ outbuf.size = dst_size;
+
+ ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
+ if (ZSTD_isError(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
+ KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
+ __func__, ZSTD_getErrorCode(ret));
+ return -EIO;
+ }
+
+ ret = ZSTD_endStream(stream, &outbuf);
+ if (ZSTD_isError(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
+ KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
+ __func__, ZSTD_getErrorCode(ret));
+ return -EIO;
+ }
+
+ cc->clen = outbuf.pos;
+ return 0;
+}
+
+static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
+{
+ ZSTD_DStream *stream;
+ void *workspace;
+ unsigned int workspace_size;
+
+ workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
+
+ workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
+ workspace_size, GFP_NOFS);
+ if (!workspace)
+ return -ENOMEM;
+
+ stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
+ workspace, workspace_size);
+ if (!stream) {
+ printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
+ KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
+ __func__);
+ kvfree(workspace);
+ return -EIO;
+ }
+
+ dic->private = workspace;
+ dic->private2 = stream;
+
+ return 0;
+}
+
+static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
+{
+ kvfree(dic->private);
+ dic->private = NULL;
+ dic->private2 = NULL;
+}
+
+static int zstd_decompress_pages(struct decompress_io_ctx *dic)
+{
+ ZSTD_DStream *stream = dic->private2;
+ ZSTD_inBuffer inbuf;
+ ZSTD_outBuffer outbuf;
+ int ret;
+
+ inbuf.pos = 0;
+ inbuf.src = dic->cbuf->cdata;
+ inbuf.size = dic->clen;
+
+ outbuf.pos = 0;
+ outbuf.dst = dic->rbuf;
+ outbuf.size = dic->rlen;
+
+ ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
+ if (ZSTD_isError(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
+ KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
+ __func__, ZSTD_getErrorCode(ret));
+ return -EIO;
+ }
+
+ if (dic->rlen != outbuf.pos) {
+ printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
+ "expected:%lu\n", KERN_ERR,
+ F2FS_I_SB(dic->inode)->sb->s_id,
+ __func__, dic->rlen,
+ PAGE_SIZE << dic->log_cluster_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct f2fs_compress_ops f2fs_zstd_ops = {
+ .init_compress_ctx = zstd_init_compress_ctx,
+ .destroy_compress_ctx = zstd_destroy_compress_ctx,
+ .compress_pages = zstd_compress_pages,
+ .init_decompress_ctx = zstd_init_decompress_ctx,
+ .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
+ .decompress_pages = zstd_decompress_pages,
+};
+#endif
+
static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
#ifdef CONFIG_F2FS_FS_LZO
&f2fs_lzo_ops,
@@ -302,6 +462,11 @@ static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
#else
NULL,
#endif
+#ifdef CONFIG_F2FS_FS_ZSTD
+ &f2fs_zstd_ops,
+#else
+ NULL,
+#endif
};
bool f2fs_is_compress_backend_ready(struct inode *inode)
@@ -334,9 +499,11 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
cc->cluster_size, fi->i_compress_algorithm);
- ret = cops->init_compress_ctx(cc);
- if (ret)
- goto out;
+ if (cops->init_compress_ctx) {
+ ret = cops->init_compress_ctx(cc);
+ if (ret)
+ goto out;
+ }
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
@@ -380,21 +547,27 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
}
cc->cbuf->clen = cpu_to_le32(cc->clen);
- cc->cbuf->chksum = cpu_to_le32(0);
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
cc->cbuf->reserved[i] = cpu_to_le32(0);
+ nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+
+ /* zero out any unused part of the last page */
+ memset(&cc->cbuf->cdata[cc->clen], 0,
+ (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
+
vunmap(cc->cbuf);
vunmap(cc->rbuf);
- nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
-
for (i = nr_cpages; i < cc->nr_cpages; i++) {
f2fs_put_compressed_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
+ if (cops->destroy_compress_ctx)
+ cops->destroy_compress_ctx(cc);
+
cc->nr_cpages = nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
@@ -413,7 +586,8 @@ out_free_cpages:
kfree(cc->cpages);
cc->cpages = NULL;
destroy_compress_ctx:
- cops->destroy_compress_ctx(cc);
+ if (cops->destroy_compress_ctx)
+ cops->destroy_compress_ctx(cc);
out:
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
@@ -447,10 +621,16 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}
+ if (cops->init_decompress_ctx) {
+ ret = cops->init_decompress_ctx(dic);
+ if (ret)
+ goto out_free_dic;
+ }
+
dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
if (!dic->rbuf) {
ret = -ENOMEM;
- goto out_free_dic;
+ goto destroy_decompress_ctx;
}
dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
@@ -473,7 +653,12 @@ out_vunmap_cbuf:
vunmap(dic->cbuf);
out_vunmap_rbuf:
vunmap(dic->rbuf);
+destroy_decompress_ctx:
+ if (cops->destroy_decompress_ctx)
+ cops->destroy_decompress_ctx(dic);
out_free_dic:
+ if (verity)
+ refcount_set(&dic->ref, dic->nr_cpages);
if (!verity)
f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
ret, false);
@@ -532,8 +717,7 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
return true;
}
-/* return # of compressed block addresses */
-static int f2fs_compressed_blocks(struct compress_ctx *cc)
+static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
{
struct dnode_of_data dn;
int ret;
@@ -554,10 +738,15 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc)
for (i = 1; i < cc->cluster_size; i++) {
block_t blkaddr;
- blkaddr = datablock_addr(dn.inode,
+ blkaddr = data_blkaddr(dn.inode,
dn.node_page, dn.ofs_in_node + i);
- if (blkaddr != NULL_ADDR)
- ret++;
+ if (compr) {
+ if (__is_valid_data_blkaddr(blkaddr))
+ ret++;
+ } else {
+ if (blkaddr != NULL_ADDR)
+ ret++;
+ }
}
}
fail:
@@ -565,6 +754,18 @@ fail:
return ret;
}
+/* return # of compressed blocks in compressed cluster */
+static int f2fs_compressed_blocks(struct compress_ctx *cc)
+{
+ return __f2fs_cluster_blocks(cc, true);
+}
+
+/* return # of valid blocks in compressed cluster */
+static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
+{
+ return __f2fs_cluster_blocks(cc, false);
+}
+
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
struct compress_ctx cc = {
@@ -574,7 +775,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
};
- return f2fs_compressed_blocks(&cc);
+ return f2fs_cluster_blocks(&cc, false);
}
static bool cluster_may_compress(struct compress_ctx *cc)
@@ -623,7 +824,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
bool prealloc;
retry:
- ret = f2fs_compressed_blocks(cc);
+ ret = f2fs_cluster_blocks(cc, false);
if (ret <= 0)
return ret;
@@ -653,7 +854,7 @@ retry:
struct bio *bio = NULL;
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
- &last_block_in_bio, false);
+ &last_block_in_bio, false, true);
f2fs_destroy_compress_ctx(cc);
if (ret)
goto release_pages;
@@ -772,7 +973,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
.encrypted_page = NULL,
.compressed_page = NULL,
.submitted = false,
- .need_lock = LOCK_RETRY,
.io_type = io_type,
.io_wbc = wbc,
.encrypted = f2fs_encrypted_file(cc->inode),
@@ -785,16 +985,17 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
- set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
+ if (!f2fs_trylock_op(sbi))
+ return -EAGAIN;
- f2fs_lock_op(sbi);
+ set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
if (err)
goto out_unlock_op;
for (i = 0; i < cc->cluster_size; i++) {
- if (datablock_addr(dn.inode, dn.node_page,
+ if (data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i) == NULL_ADDR)
goto out_put_dnode;
}
@@ -813,7 +1014,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
- refcount_set(&cic->ref, 1);
+ refcount_set(&cic->ref, cc->nr_cpages);
cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
cc->log_cluster_size, GFP_NOFS);
if (!cic->rpages)
@@ -823,8 +1024,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
for (i = 0; i < cc->nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
- cc->rpages[i + 1]->index,
- cic, i ? &cic->ref : NULL);
+ cc->rpages[i + 1]->index, cic);
fio.compressed_page = cc->cpages[i];
if (fio.encrypted) {
fio.page = cc->rpages[i + 1];
@@ -843,9 +1043,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
block_t blkaddr;
- blkaddr = datablock_addr(dn.inode, dn.node_page,
- dn.ofs_in_node);
- fio.page = cic->rpages[i];
+ blkaddr = f2fs_data_blkaddr(&dn);
+ fio.page = cc->rpages[i];
fio.old_blkaddr = blkaddr;
/* cluster header */
@@ -895,10 +1094,10 @@ unlock_continue:
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
- down_write(&fi->i_sem);
+ spin_lock(&fi->i_size_lock);
if (fi->last_disk_size < psize)
fi->last_disk_size = psize;
- up_write(&fi->i_sem);
+ spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
f2fs_destroy_compress_ctx(cc);
@@ -984,24 +1183,30 @@ retry_write:
unlock_page(cc->rpages[i]);
ret = 0;
} else if (ret == -EAGAIN) {
+ /*
+ * for quota file, just redirty left pages to
+ * avoid deadlock caused by cluster update race
+ * from foreground operation.
+ */
+ if (IS_NOQUOTA(cc->inode)) {
+ err = 0;
+ goto out_err;
+ }
ret = 0;
cond_resched();
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC,
+ DEFAULT_IO_TIMEOUT);
lock_page(cc->rpages[i]);
clear_page_dirty_for_io(cc->rpages[i]);
goto retry_write;
}
err = ret;
- goto out_fail;
+ goto out_err;
}
*submitted += _submitted;
}
return 0;
-
-out_fail:
- /* TODO: revoke partially updated block addresses */
- BUG_ON(compr_blocks);
out_err:
for (++i; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
@@ -1069,7 +1274,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
dic->inode = cc->inode;
- refcount_set(&dic->ref, 1);
+ refcount_set(&dic->ref, cc->nr_cpages);
dic->cluster_idx = cc->cluster_idx;
dic->cluster_size = cc->cluster_size;
dic->log_cluster_size = cc->log_cluster_size;
@@ -1093,8 +1298,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
goto out_free;
f2fs_set_compressed_page(page, cc->inode,
- start_idx + i + 1,
- dic, i ? &dic->ref : NULL);
+ start_idx + i + 1, dic);
dic->cpages[i] = page;
}
@@ -1104,20 +1308,16 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
goto out_free;
for (i = 0; i < dic->cluster_size; i++) {
- if (cc->rpages[i])
+ if (cc->rpages[i]) {
+ dic->tpages[i] = cc->rpages[i];
continue;
+ }
dic->tpages[i] = f2fs_grab_page();
if (!dic->tpages[i])
goto out_free;
}
- for (i = 0; i < dic->cluster_size; i++) {
- if (dic->tpages[i])
- continue;
- dic->tpages[i] = cc->rpages[i];
- }
-
return dic;
out_free:
@@ -1133,7 +1333,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i])
continue;
- f2fs_put_page(dic->tpages[i], 1);
+ if (!dic->tpages[i])
+ continue;
+ unlock_page(dic->tpages[i]);
+ put_page(dic->tpages[i]);
}
kfree(dic->tpages);
}
@@ -1162,15 +1365,17 @@ void f2fs_decompress_end_io(struct page **rpages,
if (!rpage)
continue;
- if (err || PageError(rpage)) {
- ClearPageUptodate(rpage);
- ClearPageError(rpage);
- } else {
- if (!verity || fsverity_verify_page(rpage))
- SetPageUptodate(rpage);
- else
- SetPageError(rpage);
+ if (err || PageError(rpage))
+ goto clear_uptodate;
+
+ if (!verity || fsverity_verify_page(rpage)) {
+ SetPageUptodate(rpage);
+ goto unlock;
}
+clear_uptodate:
+ ClearPageUptodate(rpage);
+ ClearPageError(rpage);
+unlock:
unlock_page(rpage);
}
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index b27b72107911..cdf2f626bea7 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -54,17 +54,13 @@ static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
}
-struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail)
+struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio)
{
- struct bio *bio;
-
- if (no_fail) {
+ if (noio) {
/* No failure on bio allocation */
- bio = __f2fs_bio_alloc(GFP_NOIO, npages);
- if (!bio)
- bio = __f2fs_bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
- return bio;
+ return __f2fs_bio_alloc(GFP_NOIO, npages);
}
+
if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
return NULL;
@@ -143,6 +139,8 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
f2fs_decompress_pages(bio, page, verity);
continue;
}
+ if (verity)
+ continue;
#endif
/* PG_error was set if any post_read step failed */
@@ -191,12 +189,38 @@ static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
static void f2fs_verify_bio(struct bio *bio)
{
- struct page *page = bio_first_page_all(bio);
- struct decompress_io_ctx *dic =
- (struct decompress_io_ctx *)page_private(page);
+ struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
- f2fs_verify_pages(dic->rpages, dic->cluster_size);
- f2fs_free_dic(dic);
+ bio_for_each_segment_all(bv, bio, iter_all) {
+ struct page *page = bv->bv_page;
+ struct decompress_io_ctx *dic;
+
+ dic = (struct decompress_io_ctx *)page_private(page);
+
+ if (dic) {
+ if (refcount_dec_not_one(&dic->ref))
+ continue;
+ f2fs_verify_pages(dic->rpages,
+ dic->cluster_size);
+ f2fs_free_dic(dic);
+ continue;
+ }
+
+ if (bio->bi_status || PageError(page))
+ goto clear_uptodate;
+
+ if (fsverity_verify_page(page)) {
+ SetPageUptodate(page);
+ goto unlock;
+ }
+clear_uptodate:
+ ClearPageUptodate(page);
+ ClearPageError(page);
+unlock:
+ dec_page_count(F2FS_P_SB(page), __read_io_type(page));
+ unlock_page(page);
+ }
}
#endif
@@ -364,9 +388,6 @@ static void f2fs_write_end_io(struct bio *bio)
bio_put(bio);
}
-/*
- * Return true, if pre_bio's bdev is same as its target device.
- */
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
@@ -403,6 +424,9 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
return 0;
}
+/*
+ * Return true, if pre_bio's bdev is same as its target device.
+ */
static bool __same_bdev(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
@@ -410,9 +434,6 @@ static bool __same_bdev(struct f2fs_sb_info *sbi,
return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
}
-/*
- * Low-level block read/write IO operations.
- */
static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
{
struct f2fs_sb_info *sbi = fio->sbi;
@@ -445,7 +466,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
if (type != DATA && type != NODE)
goto submit_io;
- if (test_opt(sbi, LFS) && current->plug)
+ if (f2fs_lfs_mode(sbi) && current->plug)
blk_finish_plug(current->plug);
if (F2FS_IO_ALIGNED(sbi))
@@ -928,14 +949,15 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages, unsigned op_flag,
- pgoff_t first_idx)
+ pgoff_t first_idx, bool for_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
struct bio_post_read_ctx *ctx;
unsigned int post_read_steps = 0;
- bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
+ bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES),
+ for_write);
if (!bio)
return ERR_PTR(-ENOMEM);
f2fs_target_device(sbi, blkaddr, bio);
@@ -970,12 +992,12 @@ static void f2fs_release_read_bio(struct bio *bio)
/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
- block_t blkaddr)
+ block_t blkaddr, bool for_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
- bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index);
+ bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index, for_write);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -1047,8 +1069,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
for (; count > 0; dn->ofs_in_node++) {
- block_t blkaddr = datablock_addr(dn->inode,
- dn->node_page, dn->ofs_in_node);
+ block_t blkaddr = f2fs_data_blkaddr(dn);
if (blkaddr == NULL_ADDR) {
dn->data_blkaddr = NEW_ADDR;
__set_data_blkaddr(dn);
@@ -1162,7 +1183,7 @@ got_it:
return page;
}
- err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
+ err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, for_write);
if (err)
goto put_err;
return page;
@@ -1300,8 +1321,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
if (err)
return err;
- dn->data_blkaddr = datablock_addr(dn->inode,
- dn->node_page, dn->ofs_in_node);
+ dn->data_blkaddr = f2fs_data_blkaddr(dn);
if (dn->data_blkaddr != NULL_ADDR)
goto alloc;
@@ -1388,13 +1408,9 @@ void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
}
/*
- * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
- * f2fs_map_blocks structure.
- * If original data blocks are allocated, then give them to blockdev.
- * Otherwise,
- * a. preallocate requested block addresses
- * b. do not use extent cache for better performance
- * c. give the block addresses to blockdev
+ * f2fs_map_blocks() tries to find or build mapping relationship which
+ * maps continuous logical blocks to physical blocks, and return such
+ * info via f2fs_map_blocks structure.
*/
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag)
@@ -1422,7 +1438,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
end = pgofs + maxblocks;
if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
- if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
+ if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
map->m_may_create)
goto next_dnode;
@@ -1467,7 +1483,7 @@ next_dnode:
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
next_block:
- blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
+ blkaddr = f2fs_data_blkaddr(&dn);
if (__is_valid_data_blkaddr(blkaddr) &&
!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
@@ -1477,7 +1493,7 @@ next_block:
if (__is_valid_data_blkaddr(blkaddr)) {
/* use out-place-update for driect IO under LFS mode */
- if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
+ if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
map->m_may_create) {
err = __allocate_data_block(&dn, map->m_seg_type);
if (err)
@@ -1980,7 +1996,8 @@ submit_and_realloc:
}
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
- is_readahead ? REQ_RAHEAD : 0, page->index);
+ is_readahead ? REQ_RAHEAD : 0, page->index,
+ false);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
bio = NULL;
@@ -2015,7 +2032,7 @@ out:
#ifdef CONFIG_F2FS_FS_COMPRESSION
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
- bool is_readahead)
+ bool is_readahead, bool for_write)
{
struct dnode_of_data dn;
struct inode *inode = cc->inode;
@@ -2031,7 +2048,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
- last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ last_block_in_file = (f2fs_readpage_limit(inode) +
+ blocksize - 1) >> blkbits;
/* get rid of pages beyond EOF */
for (i = 0; i < cc->cluster_size; i++) {
@@ -2067,7 +2085,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
for (i = 1; i < cc->cluster_size; i++) {
block_t blkaddr;
- blkaddr = datablock_addr(dn.inode, dn.node_page,
+ blkaddr = data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -2096,7 +2114,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
struct page *page = dic->cpages[i];
block_t blkaddr;
- blkaddr = datablock_addr(dn.inode, dn.node_page,
+ blkaddr = data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i + 1);
if (bio && !page_is_mergeable(sbi, bio,
@@ -2109,7 +2127,7 @@ submit_and_realloc:
if (!bio) {
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
is_readahead ? REQ_RAHEAD : 0,
- page->index);
+ page->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
bio = NULL;
@@ -2210,7 +2228,7 @@ int f2fs_mpage_readpages(struct address_space *mapping,
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
- is_readahead);
+ is_readahead, false);
f2fs_destroy_compress_ctx(&cc);
if (ret)
goto set_error_page;
@@ -2253,7 +2271,7 @@ next_page:
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
- is_readahead);
+ is_readahead, false);
f2fs_destroy_compress_ctx(&cc);
}
}
@@ -2326,7 +2344,7 @@ retry_encrypt:
/* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
f2fs_flush_merged_writes(fio->sbi);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
}
@@ -2397,7 +2415,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- if (test_opt(sbi, LFS))
+ if (f2fs_lfs_mode(sbi))
return true;
if (S_ISDIR(inode->i_mode))
return true;
@@ -2647,10 +2665,10 @@ write:
if (err) {
file_set_keep_isize(inode);
} else {
- down_write(&F2FS_I(inode)->i_sem);
+ spin_lock(&F2FS_I(inode)->i_size_lock);
if (F2FS_I(inode)->last_disk_size < psize)
F2FS_I(inode)->last_disk_size = psize;
- up_write(&F2FS_I(inode)->i_sem);
+ spin_unlock(&F2FS_I(inode)->i_size_lock);
}
done:
@@ -2917,7 +2935,7 @@ result:
if (wbc->sync_mode == WB_SYNC_ALL) {
cond_resched();
congestion_wait(BLK_RW_ASYNC,
- HZ/50);
+ DEFAULT_IO_TIMEOUT);
goto retry_write;
}
goto next;
@@ -2973,15 +2991,17 @@ next:
static inline bool __should_serialize_io(struct inode *inode,
struct writeback_control *wbc)
{
+ /* to avoid deadlock in path of data flush */
+ if (F2FS_I(inode)->cp_task)
+ return false;
+
if (!S_ISREG(inode->i_mode))
return false;
- if (f2fs_compressed_file(inode))
- return true;
if (IS_NOQUOTA(inode))
return false;
- /* to avoid deadlock in path of data flush */
- if (F2FS_I(inode)->cp_task)
- return false;
+
+ if (f2fs_compressed_file(inode))
+ return true;
if (wbc->sync_mode != WB_SYNC_ALL)
return true;
if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
@@ -3283,7 +3303,7 @@ repeat:
err = -EFSCORRUPTED;
goto fail;
}
- err = f2fs_submit_page_read(inode, page, blkaddr);
+ err = f2fs_submit_page_read(inode, page, blkaddr, true);
if (err)
goto fail;
@@ -3464,7 +3484,8 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, rw == WRITE ? get_data_block_dio_write :
get_data_block_dio, NULL, f2fs_dio_submit_bio,
- DIO_LOCKING | DIO_SKIP_HOLES);
+ rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
+ DIO_SKIP_HOLES);
if (do_opu)
up_read(&fi->i_gc_rwsem[READ]);
@@ -3861,7 +3882,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
int __init f2fs_init_bio_entry_cache(void)
{
- bio_entry_slab = f2fs_kmem_cache_create("bio_entry_slab",
+ bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
sizeof(struct bio_entry));
if (!bio_entry_slab)
return -ENOMEM;
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 6b89eae5e4ca..0dbcb0f9c019 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -301,6 +301,9 @@ static int stat_show(struct seq_file *s, void *v)
si->ssa_area_segs, si->main_area_segs);
seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
si->overp_segs, si->rsvd_segs);
+ seq_printf(s, "Current Time Sec: %llu / Mounted Time Sec: %llu\n\n",
+ ktime_get_boottime_seconds(),
+ SIT_I(si->sbi)->mounted_time);
if (test_opt(si->sbi, DISCARD))
seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
si->utilization, si->valid_count, si->discard_blks);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 27d0dd7a16d6..44bfc464df78 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -471,7 +471,6 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
struct page *dpage)
{
struct page *page;
- int dummy_encrypt = DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(dir));
int err;
if (is_inode_flag_set(inode, FI_NEW_INODE)) {
@@ -498,8 +497,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
if (err)
goto put_error;
- if ((IS_ENCRYPTED(dir) || dummy_encrypt) &&
- f2fs_may_encrypt(inode)) {
+ if (IS_ENCRYPTED(inode)) {
err = fscrypt_inherit_context(dir, inode, page, false);
if (err)
goto put_error;
@@ -850,12 +848,6 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
0);
set_page_dirty(page);
- dir->i_ctime = dir->i_mtime = current_time(dir);
- f2fs_mark_inode_dirty_sync(dir, false);
-
- if (inode)
- f2fs_drop_nlink(dir, inode);
-
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!f2fs_truncate_hole(dir, page->index, page->index + 1)) {
f2fs_clear_page_cache_dirty_tag(page);
@@ -867,6 +859,12 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
f2fs_remove_dirty_inode(dir);
}
f2fs_put_page(page, 1);
+
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
+
+ if (inode)
+ f2fs_drop_nlink(dir, inode);
}
bool f2fs_empty_dir(struct inode *dir)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 088c3e7a1080..ba470d5687fe 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -75,7 +75,6 @@ extern const char *f2fs_fault_name[FAULT_MAX];
/*
* For mount options
*/
-#define F2FS_MOUNT_BG_GC 0x00000001
#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
#define F2FS_MOUNT_DISCARD 0x00000004
#define F2FS_MOUNT_NOHEAP 0x00000008
@@ -89,11 +88,8 @@ extern const char *f2fs_fault_name[FAULT_MAX];
#define F2FS_MOUNT_NOBARRIER 0x00000800
#define F2FS_MOUNT_FASTBOOT 0x00001000
#define F2FS_MOUNT_EXTENT_CACHE 0x00002000
-#define F2FS_MOUNT_FORCE_FG_GC 0x00004000
#define F2FS_MOUNT_DATA_FLUSH 0x00008000
#define F2FS_MOUNT_FAULT_INJECTION 0x00010000
-#define F2FS_MOUNT_ADAPTIVE 0x00020000
-#define F2FS_MOUNT_LFS 0x00040000
#define F2FS_MOUNT_USRQUOTA 0x00080000
#define F2FS_MOUNT_GRPQUOTA 0x00100000
#define F2FS_MOUNT_PRJQUOTA 0x00200000
@@ -101,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
#define F2FS_MOUNT_RESERVE_ROOT 0x01000000
#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
+#define F2FS_MOUNT_NORECOVERY 0x04000000
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
@@ -139,6 +136,8 @@ struct f2fs_mount_info {
int whint_mode;
int alloc_mode; /* segment allocation policy */
int fsync_mode; /* fsync policy */
+ int fs_mode; /* fs mode: LFS or ADAPTIVE */
+ int bggc_mode; /* bggc mode: off, on or sync */
bool test_dummy_encryption; /* test dummy encryption */
block_t unusable_cap; /* Amount of space allowed to be
* unusable when disabling checkpoint
@@ -332,8 +331,8 @@ struct discard_policy {
bool io_aware; /* issue discard in idle time */
bool sync; /* submit discard with REQ_SYNC flag */
bool ordered; /* issue discard by lba order */
+ bool timeout; /* discard timeout for put_super */
unsigned int granularity; /* discard granularity */
- int timeout; /* discard timeout for put_super */
};
struct discard_cmd_control {
@@ -428,6 +427,7 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
+#define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64)
#define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL
#define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL
@@ -560,6 +560,9 @@ enum {
#define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
+/* congestion wait timeout value, default: 20ms */
+#define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
+
/* maximum retry quota flush count */
#define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
@@ -676,6 +679,44 @@ enum {
MAX_GC_FAILURE
};
+/* used for f2fs_inode_info->flags */
+enum {
+ FI_NEW_INODE, /* indicate newly allocated inode */
+ FI_DIRTY_INODE, /* indicate inode is dirty or not */
+ FI_AUTO_RECOVER, /* indicate inode is recoverable */
+ FI_DIRTY_DIR, /* indicate directory has dirty pages */
+ FI_INC_LINK, /* need to increment i_nlink */
+ FI_ACL_MODE, /* indicate acl mode */
+ FI_NO_ALLOC, /* should not allocate any blocks */
+ FI_FREE_NID, /* free allocated nide */
+ FI_NO_EXTENT, /* not to use the extent cache */
+ FI_INLINE_XATTR, /* used for inline xattr */
+ FI_INLINE_DATA, /* used for inline data*/
+ FI_INLINE_DENTRY, /* used for inline dentry */
+ FI_APPEND_WRITE, /* inode has appended data */
+ FI_UPDATE_WRITE, /* inode has in-place-update data */
+ FI_NEED_IPU, /* used for ipu per file */
+ FI_ATOMIC_FILE, /* indicate atomic file */
+ FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
+ FI_VOLATILE_FILE, /* indicate volatile file */
+ FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
+ FI_DROP_CACHE, /* drop dirty page cache */
+ FI_DATA_EXIST, /* indicate data exists */
+ FI_INLINE_DOTS, /* indicate inline dot dentries */
+ FI_DO_DEFRAG, /* indicate defragment is running */
+ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
+ FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
+ FI_HOT_DATA, /* indicate file is hot */
+ FI_EXTRA_ATTR, /* indicate file has extra attribute */
+ FI_PROJ_INHERIT, /* indicate file inherits projectid */
+ FI_PIN_FILE, /* indicate file should not be gced */
+ FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
+ FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
+ FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
+ FI_MMAP_FILE, /* indicate file was mmapped */
+ FI_MAX, /* max flag, never be used */
+};
+
struct f2fs_inode_info {
struct inode vfs_inode; /* serve a vfs inode */
unsigned long i_flags; /* keep an inode flags for ioctl */
@@ -688,7 +729,7 @@ struct f2fs_inode_info {
umode_t i_acl_mode; /* keep file acl mode temporarily */
/* Use below internally in f2fs*/
- unsigned long flags; /* use to pass per-file flags */
+ unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
struct rw_semaphore i_sem; /* protect fi info */
atomic_t dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */
@@ -697,6 +738,7 @@ struct f2fs_inode_info {
struct task_struct *cp_task; /* separate cp/wb IO stats*/
nid_t i_xattr_nid; /* node id that contains xattrs */
loff_t last_disk_size; /* lastly written file size */
+ spinlock_t i_size_lock; /* protect last_disk_size */
#ifdef CONFIG_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
@@ -1173,6 +1215,20 @@ enum {
};
enum {
+ BGGC_MODE_ON, /* background gc is on */
+ BGGC_MODE_OFF, /* background gc is off */
+ BGGC_MODE_SYNC, /*
+ * background gc is on, migrating blocks
+ * like foreground gc
+ */
+};
+
+enum {
+ FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
+ FS_MODE_LFS, /* use lfs allocation only */
+};
+
+enum {
WHINT_MODE_OFF, /* not pass down write hints */
WHINT_MODE_USER, /* try to pass down hints given by users */
WHINT_MODE_FS, /* pass down hints with F2FS policy */
@@ -1212,13 +1268,13 @@ enum fsync_mode {
enum compress_algorithm_type {
COMPRESS_LZO,
COMPRESS_LZ4,
+ COMPRESS_ZSTD,
COMPRESS_MAX,
};
-#define COMPRESS_DATA_RESERVED_SIZE 4
+#define COMPRESS_DATA_RESERVED_SIZE 5
struct compress_data {
__le32 clen; /* compressed data size */
- __le32 chksum; /* checksum of compressed data */
__le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
u8 cdata[]; /* compressed data */
};
@@ -1242,6 +1298,7 @@ struct compress_ctx {
size_t rlen; /* valid data length in rbuf */
size_t clen; /* valid data length in cbuf */
void *private; /* payload buffer for specified compression algorithm */
+ void *private2; /* extra payload buffer */
};
/* compress context for write IO path */
@@ -1271,11 +1328,14 @@ struct decompress_io_ctx {
size_t clen; /* valid data length in cbuf */
refcount_t ref; /* referrence count of compressed page */
bool failed; /* indicate IO error during decompression */
+ void *private; /* payload buffer for specified decompression algorithm */
+ void *private2; /* extra payload buffer */
};
#define NULL_CLUSTER ((unsigned int)(~0))
#define MIN_COMPRESS_LOG_SIZE 2
#define MAX_COMPRESS_LOG_SIZE 8
+#define MAX_COMPRESS_WINDOW_SIZE ((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE)
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
@@ -1471,6 +1531,9 @@ struct f2fs_sb_info {
__u32 s_chksum_seed;
struct workqueue_struct *post_read_wq; /* post read workqueue */
+
+ struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
+ unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
};
struct f2fs_private_dio {
@@ -2211,7 +2274,7 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
dquot_free_inode(inode);
} else {
if (unlikely(inode->i_blocks == 0)) {
- f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
+ f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
inode->i_ino,
(unsigned long long)inode->i_blocks);
set_sbi_flag(sbi, SBI_NEED_FSCK);
@@ -2379,7 +2442,7 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
}
static inline int f2fs_has_extra_attr(struct inode *inode);
-static inline block_t datablock_addr(struct inode *inode,
+static inline block_t data_blkaddr(struct inode *inode,
struct page *node_page, unsigned int offset)
{
struct f2fs_node *raw_node;
@@ -2389,9 +2452,9 @@ static inline block_t datablock_addr(struct inode *inode,
raw_node = F2FS_NODE(node_page);
- /* from GC path only */
if (is_inode) {
if (!inode)
+ /* from GC path only */
base = offset_in_addr(&raw_node->i);
else if (f2fs_has_extra_attr(inode))
base = get_extra_isize(inode);
@@ -2401,6 +2464,11 @@ static inline block_t datablock_addr(struct inode *inode,
return le32_to_cpu(addr_array[base + offset]);
}
+static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
+{
+ return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
+}
+
static inline int f2fs_test_bit(unsigned int nr, char *addr)
{
int mask;
@@ -2498,43 +2566,6 @@ static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
return flags & F2FS_OTHER_FLMASK;
}
-/* used for f2fs_inode_info->flags */
-enum {
- FI_NEW_INODE, /* indicate newly allocated inode */
- FI_DIRTY_INODE, /* indicate inode is dirty or not */
- FI_AUTO_RECOVER, /* indicate inode is recoverable */
- FI_DIRTY_DIR, /* indicate directory has dirty pages */
- FI_INC_LINK, /* need to increment i_nlink */
- FI_ACL_MODE, /* indicate acl mode */
- FI_NO_ALLOC, /* should not allocate any blocks */
- FI_FREE_NID, /* free allocated nide */
- FI_NO_EXTENT, /* not to use the extent cache */
- FI_INLINE_XATTR, /* used for inline xattr */
- FI_INLINE_DATA, /* used for inline data*/
- FI_INLINE_DENTRY, /* used for inline dentry */
- FI_APPEND_WRITE, /* inode has appended data */
- FI_UPDATE_WRITE, /* inode has in-place-update data */
- FI_NEED_IPU, /* used for ipu per file */
- FI_ATOMIC_FILE, /* indicate atomic file */
- FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
- FI_VOLATILE_FILE, /* indicate volatile file */
- FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
- FI_DROP_CACHE, /* drop dirty page cache */
- FI_DATA_EXIST, /* indicate data exists */
- FI_INLINE_DOTS, /* indicate inline dot dentries */
- FI_DO_DEFRAG, /* indicate defragment is running */
- FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
- FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
- FI_HOT_DATA, /* indicate file is hot */
- FI_EXTRA_ATTR, /* indicate file has extra attribute */
- FI_PROJ_INHERIT, /* indicate file inherits projectid */
- FI_PIN_FILE, /* indicate file should not be gced */
- FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
- FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
- FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
- FI_MMAP_FILE, /* indicate file was mmapped */
-};
-
static inline void __mark_inode_dirty_flag(struct inode *inode,
int flag, bool set)
{
@@ -2549,27 +2580,24 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
case FI_PIN_FILE:
- case FI_COMPRESSED_FILE:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
static inline void set_inode_flag(struct inode *inode, int flag)
{
- if (!test_bit(flag, &F2FS_I(inode)->flags))
- set_bit(flag, &F2FS_I(inode)->flags);
+ test_and_set_bit(flag, F2FS_I(inode)->flags);
__mark_inode_dirty_flag(inode, flag, true);
}
static inline int is_inode_flag_set(struct inode *inode, int flag)
{
- return test_bit(flag, &F2FS_I(inode)->flags);
+ return test_bit(flag, F2FS_I(inode)->flags);
}
static inline void clear_inode_flag(struct inode *inode, int flag)
{
- if (test_bit(flag, &F2FS_I(inode)->flags))
- clear_bit(flag, &F2FS_I(inode)->flags);
+ test_and_clear_bit(flag, F2FS_I(inode)->flags);
__mark_inode_dirty_flag(inode, flag, false);
}
@@ -2660,19 +2688,19 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
struct f2fs_inode_info *fi = F2FS_I(inode);
if (ri->i_inline & F2FS_INLINE_XATTR)
- set_bit(FI_INLINE_XATTR, &fi->flags);
+ set_bit(FI_INLINE_XATTR, fi->flags);
if (ri->i_inline & F2FS_INLINE_DATA)
- set_bit(FI_INLINE_DATA, &fi->flags);
+ set_bit(FI_INLINE_DATA, fi->flags);
if (ri->i_inline & F2FS_INLINE_DENTRY)
- set_bit(FI_INLINE_DENTRY, &fi->flags);
+ set_bit(FI_INLINE_DENTRY, fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
- set_bit(FI_DATA_EXIST, &fi->flags);
+ set_bit(FI_DATA_EXIST, fi->flags);
if (ri->i_inline & F2FS_INLINE_DOTS)
- set_bit(FI_INLINE_DOTS, &fi->flags);
+ set_bit(FI_INLINE_DOTS, fi->flags);
if (ri->i_inline & F2FS_EXTRA_ATTR)
- set_bit(FI_EXTRA_ATTR, &fi->flags);
+ set_bit(FI_EXTRA_ATTR, fi->flags);
if (ri->i_inline & F2FS_PIN_FILE)
- set_bit(FI_PIN_FILE, &fi->flags);
+ set_bit(FI_PIN_FILE, fi->flags);
}
static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
@@ -2857,9 +2885,9 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
if (!f2fs_is_time_consistent(inode))
return false;
- down_read(&F2FS_I(inode)->i_sem);
+ spin_lock(&F2FS_I(inode)->i_size_lock);
ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
- up_read(&F2FS_I(inode)->i_sem);
+ spin_unlock(&F2FS_I(inode)->i_size_lock);
return ret;
}
@@ -3213,7 +3241,7 @@ void f2fs_drop_inmem_pages(struct inode *inode);
void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
int f2fs_commit_inmem_pages(struct inode *inode);
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
-void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi);
+void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
@@ -3309,7 +3337,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
void f2fs_update_dirty_page(struct inode *inode, struct page *page);
void f2fs_remove_dirty_inode(struct inode *inode);
int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
-void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi);
+void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
int __init f2fs_create_checkpoint_caches(void);
@@ -3320,7 +3348,7 @@ void f2fs_destroy_checkpoint_caches(void);
*/
int __init f2fs_init_bioset(void);
void f2fs_destroy_bioset(void);
-struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail);
+struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio);
int f2fs_init_bio_entry_cache(void);
void f2fs_destroy_bio_entry_cache(void);
void f2fs_submit_bio(struct f2fs_sb_info *sbi,
@@ -3776,7 +3804,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
- bool is_readahead);
+ bool is_readahead, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_free_dic(struct decompress_io_ctx *dic);
void f2fs_decompress_end_io(struct page **rpages,
@@ -3813,6 +3841,7 @@ static inline void set_compress_context(struct inode *inode)
F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
set_inode_flag(inode, FI_COMPRESSED_FILE);
stat_inc_compr_inode(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline u64 f2fs_disable_compressed_file(struct inode *inode)
@@ -3821,12 +3850,17 @@ static inline u64 f2fs_disable_compressed_file(struct inode *inode)
if (!f2fs_compressed_file(inode))
return 0;
- if (fi->i_compr_blocks)
- return fi->i_compr_blocks;
+ if (S_ISREG(inode->i_mode)) {
+ if (get_dirty_pages(inode))
+ return 1;
+ if (fi->i_compr_blocks)
+ return fi->i_compr_blocks;
+ }
fi->i_flags &= ~F2FS_COMPR_FL;
- clear_inode_flag(inode, FI_COMPRESSED_FILE);
stat_dec_compr_inode(inode);
+ clear_inode_flag(inode, FI_COMPRESSED_FILE);
+ f2fs_mark_inode_dirty_sync(inode, true);
return 0;
}
@@ -3903,31 +3937,25 @@ static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
return false;
}
-
-static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
+static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
{
- clear_opt(sbi, ADAPTIVE);
- clear_opt(sbi, LFS);
-
- switch (mt) {
- case F2FS_MOUNT_ADAPTIVE:
- set_opt(sbi, ADAPTIVE);
- break;
- case F2FS_MOUNT_LFS:
- set_opt(sbi, LFS);
- break;
- }
+ return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
}
-static inline bool f2fs_may_encrypt(struct inode *inode)
+static inline bool f2fs_may_encrypt(struct inode *dir, struct inode *inode)
{
#ifdef CONFIG_FS_ENCRYPTION
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
umode_t mode = inode->i_mode;
- return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
-#else
- return false;
+ /*
+ * If the directory encrypted or dummy encryption enabled,
+ * then we should encrypt the inode.
+ */
+ if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi))
+ return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
#endif
+ return false;
}
static inline bool f2fs_may_compress(struct inode *inode)
@@ -3971,7 +3999,7 @@ static inline int allow_outplace_dio(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int rw = iov_iter_rw(iter);
- return (test_opt(sbi, LFS) && (rw == WRITE) &&
+ return (f2fs_lfs_mode(sbi) && (rw == WRITE) &&
!block_unaligned_IO(inode, iocb, iter));
}
@@ -3993,7 +4021,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
*/
if (f2fs_sb_has_blkzoned(sbi))
return true;
- if (test_opt(sbi, LFS) && (rw == WRITE)) {
+ if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
if (block_unaligned_IO(inode, iocb, iter))
return true;
if (F2FS_IO_ALIGNED(sbi))
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 351762f77840..6ab8f621a3c5 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -106,13 +106,20 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
err = f2fs_get_block(&dn, page->index);
f2fs_put_dnode(&dn);
__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
- if (err) {
- unlock_page(page);
- goto out_sem;
- }
}
- /* fill the page */
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (!need_alloc) {
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ f2fs_put_dnode(&dn);
+ }
+#endif
+ if (err) {
+ unlock_page(page);
+ goto out_sem;
+ }
+
f2fs_wait_on_page_writeback(page, DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */
@@ -448,8 +455,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
block_t blkaddr;
- blkaddr = datablock_addr(dn.inode,
- dn.node_page, dn.ofs_in_node);
+ blkaddr = f2fs_data_blkaddr(&dn);
if (__is_valid_data_blkaddr(blkaddr) &&
!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
@@ -793,6 +799,8 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
}
flags = fi->i_flags;
+ if (flags & F2FS_COMPR_FL)
+ stat->attributes |= STATX_ATTR_COMPRESSED;
if (flags & F2FS_APPEND_FL)
stat->attributes |= STATX_ATTR_APPEND;
if (IS_ENCRYPTED(inode))
@@ -804,7 +812,8 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
if (IS_VERITY(inode))
stat->attributes |= STATX_ATTR_VERITY;
- stat->attributes_mask |= (STATX_ATTR_APPEND |
+ stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
+ STATX_ATTR_APPEND |
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP |
@@ -929,10 +938,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
- down_write(&F2FS_I(inode)->i_sem);
+ spin_lock(&F2FS_I(inode)->i_size_lock);
inode->i_mtime = inode->i_ctime = current_time(inode);
F2FS_I(inode)->last_disk_size = i_size_read(inode);
- up_write(&F2FS_I(inode)->i_sem);
+ spin_unlock(&F2FS_I(inode)->i_size_lock);
}
__setattr_copy(inode, attr);
@@ -1109,8 +1118,7 @@ next_dnode:
done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
dn.ofs_in_node, len);
for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
- *blkaddr = datablock_addr(dn.inode,
- dn.node_page, dn.ofs_in_node);
+ *blkaddr = f2fs_data_blkaddr(&dn);
if (__is_valid_data_blkaddr(*blkaddr) &&
!f2fs_is_valid_blkaddr(sbi, *blkaddr,
@@ -1121,7 +1129,7 @@ next_dnode:
if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
- if (test_opt(sbi, LFS)) {
+ if (f2fs_lfs_mode(sbi)) {
f2fs_put_dnode(&dn);
return -EOPNOTSUPP;
}
@@ -1199,8 +1207,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
ADDRS_PER_PAGE(dn.node_page, dst_inode) -
dn.ofs_in_node, len - i);
do {
- dn.data_blkaddr = datablock_addr(dn.inode,
- dn.node_page, dn.ofs_in_node);
+ dn.data_blkaddr = f2fs_data_blkaddr(&dn);
f2fs_truncate_data_blocks_range(&dn, 1);
if (do_replace[i]) {
@@ -1376,8 +1383,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
int ret;
for (; index < end; index++, dn->ofs_in_node++) {
- if (datablock_addr(dn->inode, dn->node_page,
- dn->ofs_in_node) == NULL_ADDR)
+ if (f2fs_data_blkaddr(dn) == NULL_ADDR)
count++;
}
@@ -1388,8 +1394,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
dn->ofs_in_node = ofs_in_node;
for (index = start; index < end; index++, dn->ofs_in_node++) {
- dn->data_blkaddr = datablock_addr(dn->inode,
- dn->node_page, dn->ofs_in_node);
+ dn->data_blkaddr = f2fs_data_blkaddr(dn);
/*
* f2fs_reserve_new_blocks will not guarantee entire block
* allocation.
@@ -1787,12 +1792,15 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
+ u32 masked_flags = fi->i_flags & mask;
+
+ f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
return -EPERM;
- if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) {
+ if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
return -EOPNOTSUPP;
if (!f2fs_empty_dir(inode))
@@ -1806,27 +1814,22 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
return -EINVAL;
}
- if ((iflags ^ fi->i_flags) & F2FS_COMPR_FL) {
- if (S_ISREG(inode->i_mode) &&
- (fi->i_flags & F2FS_COMPR_FL || i_size_read(inode) ||
- F2FS_HAS_BLOCKS(inode)))
- return -EINVAL;
+ if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
+ if (masked_flags & F2FS_COMPR_FL) {
+ if (f2fs_disable_compressed_file(inode))
+ return -EINVAL;
+ }
if (iflags & F2FS_NOCOMP_FL)
return -EINVAL;
if (iflags & F2FS_COMPR_FL) {
- int err = f2fs_convert_inline_inode(inode);
-
- if (err)
- return err;
-
if (!f2fs_may_compress(inode))
return -EINVAL;
set_compress_context(inode);
}
}
- if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
- if (fi->i_flags & F2FS_COMPR_FL)
+ if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
+ if (masked_flags & F2FS_COMPR_FL)
return -EINVAL;
}
@@ -3401,6 +3404,21 @@ out:
return err;
}
+static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u64 blocks;
+
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return -EOPNOTSUPP;
+
+ if (!f2fs_compressed_file(inode))
+ return -EINVAL;
+
+ blocks = F2FS_I(inode)->i_compr_blocks;
+ return put_user(blocks, (u64 __user *)arg);
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -3481,6 +3499,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_get_volume_name(filp, arg);
case F2FS_IOC_SET_VOLUME_NAME:
return f2fs_set_volume_name(filp, arg);
+ case F2FS_IOC_GET_COMPRESS_BLOCKS:
+ return f2fs_get_compress_blocks(filp, arg);
default:
return -ENOTTY;
}
@@ -3508,8 +3528,10 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
- if (!f2fs_is_compress_backend_ready(inode))
- return -EOPNOTSUPP;
+ if (!f2fs_is_compress_backend_ready(inode)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode)) {
@@ -3639,6 +3661,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC_MEASURE_VERITY:
case F2FS_IOC_GET_VOLUME_NAME:
case F2FS_IOC_SET_VOLUME_NAME:
+ case F2FS_IOC_GET_COMPRESS_BLOCKS:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index db8725d473b5..26248c8936db 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -31,6 +31,8 @@ static int gc_thread_func(void *data)
set_freezable();
do {
+ bool sync_mode;
+
wait_event_interruptible_timeout(*wq,
kthread_should_stop() || freezing(current) ||
gc_th->gc_wake,
@@ -101,15 +103,17 @@ static int gc_thread_func(void *data)
do_gc:
stat_inc_bggc_count(sbi->stat_info);
+ sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
+ if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
wait_ms = gc_th->no_gc_sleep_time;
trace_f2fs_background_gc(sbi->sb, wait_ms,
prefree_segments(sbi), free_segments(sbi));
/* balancing f2fs's metadata periodically */
- f2fs_balance_fs_bg(sbi);
+ f2fs_balance_fs_bg(sbi, true);
next:
sb_end_write(sbi->sb);
@@ -192,7 +196,10 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
p->ofs_unit = sbi->segs_per_sec;
}
- /* we need to check every dirty segments in the FG_GC case */
+ /*
+ * adjust candidates range, should select all dirty segments for
+ * foreground GC and urgent GC cases.
+ */
if (gc_type != FG_GC &&
(sbi->gc_mode != GC_URGENT) &&
p->max_search > sbi->max_victim_search)
@@ -634,7 +641,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
}
*nofs = ofs_of_node(node_page);
- source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
+ source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
f2fs_put_page(node_page, 1);
if (source_blkaddr != blkaddr) {
@@ -762,7 +769,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
struct page *page, *mpage;
block_t newaddr;
int err = 0;
- bool lfs_mode = test_opt(fio.sbi, LFS);
+ bool lfs_mode = f2fs_lfs_mode(fio.sbi);
/* do not read out */
page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
@@ -970,7 +977,8 @@ retry:
if (err) {
clear_cold_data(page);
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC,
+ DEFAULT_IO_TIMEOUT);
goto retry;
}
if (is_dirty)
@@ -1018,8 +1026,8 @@ next_step:
* race condition along with SSR block allocation.
*/
if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
- get_valid_blocks(sbi, segno, false) ==
- sbi->blocks_per_seg)
+ get_valid_blocks(sbi, segno, true) ==
+ BLKS_PER_SEC(sbi))
return submitted;
if (check_valid_map(sbi, segno, off) == 0)
@@ -1203,7 +1211,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
if (get_valid_blocks(sbi, segno, false) == 0)
goto freed;
- if (__is_large_section(sbi) &&
+ if (gc_type == BG_GC && __is_large_section(sbi) &&
migrated >= sbi->migration_granularity)
goto skip;
if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
@@ -1233,12 +1241,12 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
segno, gc_type);
stat_inc_seg_count(sbi, type, gc_type);
+ migrated++;
freed:
if (gc_type == FG_GC &&
get_valid_blocks(sbi, segno, false) == 0)
seg_freed++;
- migrated++;
if (__is_large_section(sbi) && segno + 1 < end_segno)
sbi->next_victim_seg[gc_type] = segno + 1;
@@ -1434,12 +1442,19 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
{
struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
- int section_count = le32_to_cpu(raw_sb->section_count);
- int segment_count = le32_to_cpu(raw_sb->segment_count);
- int segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
- long long block_count = le64_to_cpu(raw_sb->block_count);
+ int section_count;
+ int segment_count;
+ int segment_count_main;
+ long long block_count;
int segs = secs * sbi->segs_per_sec;
+ down_write(&sbi->sb_lock);
+
+ section_count = le32_to_cpu(raw_sb->section_count);
+ segment_count = le32_to_cpu(raw_sb->segment_count);
+ segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
+ block_count = le64_to_cpu(raw_sb->block_count);
+
raw_sb->section_count = cpu_to_le32(section_count + secs);
raw_sb->segment_count = cpu_to_le32(segment_count + segs);
raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
@@ -1453,6 +1468,8 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
raw_sb->devs[last_dev].total_segments =
cpu_to_le32(dev_segs + segs);
}
+
+ up_write(&sbi->sb_lock);
}
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
@@ -1570,11 +1587,17 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
goto out;
}
+ mutex_lock(&sbi->cp_mutex);
update_fs_metadata(sbi, -secs);
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+ mutex_unlock(&sbi->cp_mutex);
+
err = f2fs_sync_fs(sbi->sb, 1);
if (err) {
+ mutex_lock(&sbi->cp_mutex);
update_fs_metadata(sbi, secs);
+ mutex_unlock(&sbi->cp_mutex);
update_sb_metadata(sbi, secs);
f2fs_commit_super(sbi, false);
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 78c3f1d70f1d..44582a4db513 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -291,13 +291,30 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
fi->i_flags & F2FS_COMPR_FL &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
i_log_cluster_size)) {
- if (ri->i_compress_algorithm >= COMPRESS_MAX)
+ if (ri->i_compress_algorithm >= COMPRESS_MAX) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
+ "compress algorithm: %u, run fsck to fix",
+ __func__, inode->i_ino,
+ ri->i_compress_algorithm);
return false;
- if (le64_to_cpu(ri->i_compr_blocks) > inode->i_blocks)
+ }
+ if (le64_to_cpu(ri->i_compr_blocks) >
+ SECTOR_TO_BLOCK(inode->i_blocks)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
+ "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
+ __func__, inode->i_ino,
+ le64_to_cpu(ri->i_compr_blocks),
+ SECTOR_TO_BLOCK(inode->i_blocks));
return false;
+ }
if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
- ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE)
+ ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
+ "log cluster size: %u, run fsck to fix",
+ __func__, inode->i_ino,
+ ri->i_log_cluster_size);
return false;
+ }
}
return true;
@@ -345,7 +362,7 @@ static int do_read_inode(struct inode *inode)
fi->i_flags = le32_to_cpu(ri->i_flags);
if (S_ISREG(inode->i_mode))
fi->i_flags &= ~F2FS_PROJINHERIT_FL;
- fi->flags = 0;
+ bitmap_zero(fi->flags, FI_MAX);
fi->i_advise = ri->i_advise;
fi->i_pino = le32_to_cpu(ri->i_pino);
fi->i_dir_level = ri->i_dir_level;
@@ -518,7 +535,7 @@ retry:
inode = f2fs_iget(sb, ino);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
goto retry;
}
}
@@ -759,7 +776,7 @@ no_delete:
else
f2fs_inode_synced(inode);
- /* ino == 0, if f2fs_new_inode() was failed t*/
+ /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
if (inode->i_ino)
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
inode->i_ino);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 2aa035422c0f..f54119da2217 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -75,9 +75,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
set_inode_flag(inode, FI_NEW_INODE);
- /* If the directory encrypted, then we should encrypt the inode. */
- if ((IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
- f2fs_may_encrypt(inode))
+ if (f2fs_may_encrypt(dir, inode))
f2fs_set_encrypted_inode(inode);
if (f2fs_sb_has_extra_attr(sbi)) {
@@ -177,7 +175,7 @@ static inline int is_extension_exist(const unsigned char *s, const char *sub)
}
/*
- * Set multimedia files as cold files for hot/cold data separation
+ * Set file's temperature for hot/cold data separation
*/
static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode,
const unsigned char *name)
@@ -876,12 +874,6 @@ static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) {
- int err = fscrypt_get_encryption_info(dir);
- if (err)
- return err;
- }
-
return __f2fs_tmpfile(dir, dentry, mode, NULL);
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9d02cdcdbb07..ecbd6bd14a49 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -510,9 +510,6 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
return nr - nr_shrink;
}
-/*
- * This function always returns success
- */
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct node_info *ni)
{
@@ -716,8 +713,7 @@ got:
/*
* Caller should call f2fs_put_dnode(dn).
* Also, it should grab and release a rwsem by calling f2fs_lock_op() and
- * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
- * In the case of RDONLY_NODE, we don't need to care about mutex.
+ * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
*/
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
{
@@ -809,8 +805,7 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
dn->nid = nids[level];
dn->ofs_in_node = offset[level];
dn->node_page = npage[level];
- dn->data_blkaddr = datablock_addr(dn->inode,
- dn->node_page, dn->ofs_in_node);
+ dn->data_blkaddr = f2fs_data_blkaddr(dn);
return 0;
release_pages:
@@ -1188,8 +1183,9 @@ int f2fs_remove_inode_page(struct inode *inode)
}
if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
- f2fs_warn(F2FS_I_SB(inode), "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
- inode->i_ino, (unsigned long long)inode->i_blocks);
+ f2fs_warn(F2FS_I_SB(inode),
+ "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
+ inode->i_ino, (unsigned long long)inode->i_blocks);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
}
@@ -1562,15 +1558,16 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
- set_page_writeback(page);
- ClearPageError(page);
-
+ /* should add to global list before clearing PAGECACHE status */
if (f2fs_in_warm_node_list(sbi, page)) {
seq = f2fs_add_fsync_node_entry(sbi, page);
if (seq_id)
*seq_id = seq;
}
+ set_page_writeback(page);
+ ClearPageError(page);
+
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
@@ -1979,7 +1976,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
goto skip_write;
/* balancing f2fs's metadata in background */
- f2fs_balance_fs_bg(sbi);
+ f2fs_balance_fs_bg(sbi, true);
/* collect a number of dirty node pages and write together */
if (wbc->sync_mode != WB_SYNC_ALL &&
@@ -2602,7 +2599,7 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
retry:
ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
if (!ipage) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
goto retry;
}
@@ -3193,22 +3190,22 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
int __init f2fs_create_node_manager_caches(void)
{
- nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
+ nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
sizeof(struct nat_entry));
if (!nat_entry_slab)
goto fail;
- free_nid_slab = f2fs_kmem_cache_create("free_nid",
+ free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
sizeof(struct free_nid));
if (!free_nid_slab)
goto destroy_nat_entry;
- nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
+ nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
sizeof(struct nat_entry_set));
if (!nat_entry_set_slab)
goto destroy_free_nid;
- fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
+ fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
sizeof(struct fsync_node_entry));
if (!fsync_node_entry_slab)
goto destroy_nat_entry_set;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 763d5c0951d1..dd804c07eeb0 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -496,8 +496,7 @@ out:
return 0;
truncate_out:
- if (datablock_addr(tdn.inode, tdn.node_page,
- tdn.ofs_in_node) == blkaddr)
+ if (f2fs_data_blkaddr(&tdn) == blkaddr)
f2fs_truncate_data_blocks_range(&tdn, 1);
if (dn->inode->i_ino == nid && !dn->inode_page_locked)
unlock_page(dn->inode_page);
@@ -535,7 +534,7 @@ retry_dn:
err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
goto retry_dn;
}
goto out;
@@ -560,8 +559,8 @@ retry_dn:
for (; start < end; start++, dn.ofs_in_node++) {
block_t src, dest;
- src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
- dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
+ src = f2fs_data_blkaddr(&dn);
+ dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
if (__is_valid_data_blkaddr(src) &&
!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
@@ -618,7 +617,8 @@ retry_prev:
err = check_index_in_prev_nodes(sbi, dest, &dn);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC,
+ DEFAULT_IO_TIMEOUT);
goto retry_prev;
}
goto err;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index cf0eb002cfd4..b7a9421472a7 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -172,7 +172,7 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
- if (test_opt(sbi, LFS))
+ if (f2fs_lfs_mode(sbi))
return false;
if (sbi->gc_mode == GC_URGENT)
return true;
@@ -245,7 +245,8 @@ retry:
LOOKUP_NODE);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC,
+ DEFAULT_IO_TIMEOUT);
cond_resched();
goto retry;
}
@@ -312,7 +313,7 @@ next:
skip:
iput(inode);
}
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
cond_resched();
if (gc_failure) {
if (++looped >= count)
@@ -415,7 +416,8 @@ retry:
err = f2fs_do_write_data_page(&fio);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC,
+ DEFAULT_IO_TIMEOUT);
cond_resched();
goto retry;
}
@@ -494,7 +496,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
/* balance_fs_bg is able to be pending */
if (need && excess_cached_nats(sbi))
- f2fs_balance_fs_bg(sbi);
+ f2fs_balance_fs_bg(sbi, false);
if (!f2fs_is_checkpoint_ready(sbi))
return;
@@ -509,7 +511,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
}
}
-void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
+void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
{
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return;
@@ -538,7 +540,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
excess_dirty_nats(sbi) ||
excess_dirty_nodes(sbi) ||
f2fs_time_over(sbi, CP_TIME)) {
- if (test_opt(sbi, DATA_FLUSH)) {
+ if (test_opt(sbi, DATA_FLUSH) && from_bg) {
struct blk_plug plug;
mutex_lock(&sbi->flush_lock);
@@ -1078,7 +1080,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
dpolicy->io_aware_gran = MAX_PLIST_NUM;
- dpolicy->timeout = 0;
+ dpolicy->timeout = false;
if (discard_type == DPOLICY_BG) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
@@ -1103,6 +1105,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
dpolicy->io_aware = false;
/* we need to issue all to keep CP_TRIMMED_FLAG */
dpolicy->granularity = 1;
+ dpolicy->timeout = true;
}
}
@@ -1471,12 +1474,12 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
int i, issued = 0;
bool io_interrupted = false;
- if (dpolicy->timeout != 0)
- f2fs_update_time(sbi, dpolicy->timeout);
+ if (dpolicy->timeout)
+ f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
- if (dpolicy->timeout != 0 &&
- f2fs_time_over(sbi, dpolicy->timeout))
+ if (dpolicy->timeout &&
+ f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
break;
if (i + 1 < dpolicy->granularity)
@@ -1497,8 +1500,8 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
- if (dpolicy->timeout != 0 &&
- f2fs_time_over(sbi, dpolicy->timeout))
+ if (dpolicy->timeout &&
+ f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
break;
if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
@@ -1677,7 +1680,6 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
dcc->discard_granularity);
- dpolicy.timeout = UMOUNT_DISCARD_TIMEOUT;
__issue_discard_cmd(sbi, &dpolicy);
dropped = __drop_discard_cmd(sbi);
@@ -1940,7 +1942,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason & CP_DISCARD);
- bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
+ bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
mutex_lock(&dirty_i->seglist_lock);
@@ -1972,7 +1974,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
(end - 1) <= cpc->trim_end)
continue;
- if (!test_opt(sbi, LFS) || !__is_large_section(sbi)) {
+ if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg);
continue;
@@ -2801,7 +2803,7 @@ next:
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
trimmed += __wait_all_discard_cmd(sbi, NULL);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
goto next;
}
skip:
@@ -2830,7 +2832,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
struct discard_policy dpolicy;
unsigned long long trimmed = 0;
int err = 0;
- bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
+ bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
@@ -3193,7 +3195,7 @@ static void update_device_state(struct f2fs_io_info *fio)
static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{
int type = __get_segment_type(fio);
- bool keep_order = (test_opt(fio->sbi, LFS) && type == CURSEG_COLD_DATA);
+ bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
if (keep_order)
down_read(&fio->sbi->io_order_lock);
@@ -4071,7 +4073,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
sit_i->dirty_sentries = 0;
sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
- sit_i->mounted_time = ktime_get_real_seconds();
+ sit_i->mounted_time = ktime_get_boottime_seconds();
init_rwsem(&sit_i->sentry_lock);
return 0;
}
@@ -4678,7 +4680,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
- if (!test_opt(sbi, LFS))
+ if (!f2fs_lfs_mode(sbi))
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
@@ -4830,22 +4832,22 @@ void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
int __init f2fs_create_segment_manager_caches(void)
{
- discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
+ discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
sizeof(struct discard_entry));
if (!discard_entry_slab)
goto fail;
- discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
+ discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
sizeof(struct discard_cmd));
if (!discard_cmd_slab)
goto destroy_discard_entry;
- sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
+ sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
sizeof(struct sit_entry_set));
if (!sit_entry_set_slab)
goto destroy_discard_cmd;
- inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
+ inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
sizeof(struct inmem_pages));
if (!inmem_entry_slab)
goto destroy_sit_entry_set;
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 459dc3901a57..7a83bd530812 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -756,7 +756,7 @@ static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
bool base_time)
{
struct sit_info *sit_i = SIT_I(sbi);
- time64_t diff, now = ktime_get_real_seconds();
+ time64_t diff, now = ktime_get_boottime_seconds();
if (now >= sit_i->mounted_time)
return sit_i->elapsed_time + now - sit_i->mounted_time;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index a467aca29cfe..d66de5999a26 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -58,7 +58,7 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
/* count extent cache entries */
count += __count_extent_cache(sbi);
- /* shrink clean nat cache entries */
+ /* count clean nat cache entries */
count += __count_nat_entries(sbi);
/* count free nids cache entries */
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index d398b2d90c6c..f2dfc21c6abb 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -428,14 +428,11 @@ static int parse_options(struct super_block *sb, char *options)
if (!name)
return -ENOMEM;
if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
- set_opt(sbi, BG_GC);
- clear_opt(sbi, FORCE_FG_GC);
+ F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
} else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
- clear_opt(sbi, BG_GC);
- clear_opt(sbi, FORCE_FG_GC);
+ F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
} else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
- set_opt(sbi, BG_GC);
- set_opt(sbi, FORCE_FG_GC);
+ F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
} else {
kvfree(name);
return -EINVAL;
@@ -447,7 +444,7 @@ static int parse_options(struct super_block *sb, char *options)
break;
case Opt_norecovery:
/* this option mounts f2fs with ro */
- set_opt(sbi, DISABLE_ROLL_FORWARD);
+ set_opt(sbi, NORECOVERY);
if (!f2fs_readonly(sb))
return -EINVAL;
break;
@@ -601,10 +598,10 @@ static int parse_options(struct super_block *sb, char *options)
kvfree(name);
return -EINVAL;
}
- set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
} else if (strlen(name) == 3 &&
!strncmp(name, "lfs", 3)) {
- set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
} else {
kvfree(name);
return -EINVAL;
@@ -833,6 +830,10 @@ static int parse_options(struct super_block *sb, char *options)
!strcmp(name, "lz4")) {
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_LZ4;
+ } else if (strlen(name) == 4 &&
+ !strcmp(name, "zstd")) {
+ F2FS_OPTION(sbi).compress_algorithm =
+ COMPRESS_ZSTD;
} else {
kfree(name);
return -EINVAL;
@@ -905,7 +906,7 @@ static int parse_options(struct super_block *sb, char *options)
}
#endif
- if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
+ if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
F2FS_IO_SIZE_KB(sbi));
return -EINVAL;
@@ -935,7 +936,7 @@ static int parse_options(struct super_block *sb, char *options)
}
}
- if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
+ if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
return -EINVAL;
}
@@ -961,6 +962,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Initialize f2fs-specific inode info */
atomic_set(&fi->dirty_pages, 0);
init_rwsem(&fi->i_sem);
+ spin_lock_init(&fi->i_size_lock);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->inmem_ilist);
@@ -1173,7 +1175,7 @@ static void f2fs_put_super(struct super_block *sb)
/* our cp_error case, we can wait for any writeback page */
f2fs_flush_merged_writes(sbi);
- f2fs_wait_on_all_pages_writeback(sbi);
+ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
f2fs_bug_on(sbi, sbi->fsync_node_num);
@@ -1205,6 +1207,7 @@ static void f2fs_put_super(struct super_block *sb)
kvfree(sbi->raw_super);
destroy_device_list(sbi);
+ f2fs_destroy_xattr_caches(sbi);
mempool_destroy(sbi->write_io_dummy);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
@@ -1421,6 +1424,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
case COMPRESS_LZ4:
algtype = "lz4";
break;
+ case COMPRESS_ZSTD:
+ algtype = "zstd";
+ break;
}
seq_printf(seq, ",compress_algorithm=%s", algtype);
@@ -1437,16 +1443,17 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
- if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
- if (test_opt(sbi, FORCE_FG_GC))
- seq_printf(seq, ",background_gc=%s", "sync");
- else
- seq_printf(seq, ",background_gc=%s", "on");
- } else {
+ if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
+ seq_printf(seq, ",background_gc=%s", "sync");
+ else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
+ seq_printf(seq, ",background_gc=%s", "on");
+ else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
seq_printf(seq, ",background_gc=%s", "off");
- }
+
if (test_opt(sbi, DISABLE_ROLL_FORWARD))
seq_puts(seq, ",disable_roll_forward");
+ if (test_opt(sbi, NORECOVERY))
+ seq_puts(seq, ",norecovery");
if (test_opt(sbi, DISCARD))
seq_puts(seq, ",discard");
else
@@ -1498,9 +1505,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",data_flush");
seq_puts(seq, ",mode=");
- if (test_opt(sbi, ADAPTIVE))
+ if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
seq_puts(seq, "adaptive");
- else if (test_opt(sbi, LFS))
+ else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
if (test_opt(sbi, RESERVE_ROOT))
@@ -1571,11 +1578,11 @@ static void default_options(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).test_dummy_encryption = false;
F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
- F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZO;
+ F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
F2FS_OPTION(sbi).compress_ext_cnt = 0;
+ F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
- set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
@@ -1587,9 +1594,9 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, FLUSH_MERGE);
set_opt(sbi, DISCARD);
if (f2fs_sb_has_blkzoned(sbi))
- set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
else
- set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -1658,7 +1665,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
out_unlock:
up_write(&sbi->gc_lock);
restore_flag:
- sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+ sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
return err;
}
@@ -1781,7 +1788,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
* or if background_gc = off is passed in mount
* option. Also sync the filesystem.
*/
- if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
+ if ((*flags & SB_RDONLY) ||
+ F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) {
if (sbi->gc_thread) {
f2fs_stop_gc_thread(sbi);
need_restart_gc = true;
@@ -1886,7 +1894,8 @@ repeat:
page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
if (IS_ERR(page)) {
if (PTR_ERR(page) == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC,
+ DEFAULT_IO_TIMEOUT);
goto repeat;
}
set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
@@ -1928,6 +1937,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
int offset = off & (sb->s_blocksize - 1);
size_t towrite = len;
struct page *page;
+ void *fsdata = NULL;
char *kaddr;
int err = 0;
int tocopy;
@@ -1937,10 +1947,11 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
towrite);
retry:
err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
- &page, NULL);
+ &page, &fsdata);
if (unlikely(err)) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ congestion_wait(BLK_RW_ASYNC,
+ DEFAULT_IO_TIMEOUT);
goto retry;
}
set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
@@ -1953,7 +1964,7 @@ retry:
flush_dcache_page(page);
a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
- page, NULL);
+ page, fsdata);
offset = 0;
towrite -= tocopy;
off += tocopy;
@@ -3457,12 +3468,17 @@ try_onemore:
}
}
+ /* init per sbi slab cache */
+ err = f2fs_init_xattr_caches(sbi);
+ if (err)
+ goto free_io_dummy;
+
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
f2fs_err(sbi, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode);
- goto free_io_dummy;
+ goto free_xattr_cache;
}
err = f2fs_get_valid_checkpoint(sbi);
@@ -3590,7 +3606,7 @@ try_onemore:
f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
}
#endif
- /* if there are nt orphan nodes free them */
+ /* if there are any orphan inodes, free them */
err = f2fs_recover_orphan_inodes(sbi);
if (err)
goto free_meta;
@@ -3599,7 +3615,8 @@ try_onemore:
goto reset_checkpoint;
/* recover fsynced data */
- if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
+ if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
+ !test_opt(sbi, NORECOVERY)) {
/*
* mount should be failed, when device has readonly mode, and
* previous checkpoint was not done by clean system shutdown.
@@ -3665,7 +3682,7 @@ reset_checkpoint:
* If filesystem is not mounted as read-only then
* do start the gc_thread.
*/
- if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
+ if (F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF && !f2fs_readonly(sb)) {
/* After POR, we can run background GC thread.*/
err = f2fs_start_gc_thread(sbi);
if (err)
@@ -3734,6 +3751,8 @@ free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
sbi->meta_inode = NULL;
+free_xattr_cache:
+ f2fs_destroy_xattr_caches(sbi);
free_io_dummy:
mempool_destroy(sbi->write_io_dummy);
free_percpu:
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 91d649790b1b..e3bbbef9b4f0 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -109,47 +109,47 @@ static ssize_t features_show(struct f2fs_attr *a,
return sprintf(buf, "0\n");
if (f2fs_sb_has_encrypt(sbi))
- len += snprintf(buf, PAGE_SIZE - len, "%s",
+ len += scnprintf(buf, PAGE_SIZE - len, "%s",
"encryption");
if (f2fs_sb_has_blkzoned(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "blkzoned");
if (f2fs_sb_has_extra_attr(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "extra_attr");
if (f2fs_sb_has_project_quota(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "projquota");
if (f2fs_sb_has_inode_chksum(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "inode_checksum");
if (f2fs_sb_has_flexible_inline_xattr(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "flexible_inline_xattr");
if (f2fs_sb_has_quota_ino(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "quota_ino");
if (f2fs_sb_has_inode_crtime(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "inode_crtime");
if (f2fs_sb_has_lost_found(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "lost_found");
if (f2fs_sb_has_verity(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "verity");
if (f2fs_sb_has_sb_chksum(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "sb_checksum");
if (f2fs_sb_has_casefold(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "casefold");
if (f2fs_sb_has_compression(sbi))
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "compression");
- len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "pin_file");
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -185,6 +185,12 @@ static ssize_t encoding_show(struct f2fs_attr *a,
return sprintf(buf, "(none)");
}
+static ssize_t mounted_time_sec_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sprintf(buf, "%llu", SIT_I(sbi)->mounted_time);
+}
+
#ifdef CONFIG_F2FS_STAT_FS
static ssize_t moved_blocks_foreground_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
@@ -233,16 +239,16 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
int hot_count = sbi->raw_super->hot_ext_count;
int len = 0, i;
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"cold file extension:\n");
for (i = 0; i < cold_count; i++)
- len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
extlist[i]);
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"hot file extension:\n");
for (i = cold_count; i < cold_count + hot_count; i++)
- len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n",
extlist[i]);
return len;
}
@@ -544,6 +550,7 @@ F2FS_GENERAL_RO_ATTR(features);
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
F2FS_GENERAL_RO_ATTR(unusable);
F2FS_GENERAL_RO_ATTR(encoding);
+F2FS_GENERAL_RO_ATTR(mounted_time_sec);
#ifdef CONFIG_F2FS_STAT_FS
F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count);
F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count);
@@ -573,7 +580,9 @@ F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY);
#endif
F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION);
+#endif
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -621,6 +630,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(reserved_blocks),
ATTR_LIST(current_reserved_blocks),
ATTR_LIST(encoding),
+ ATTR_LIST(mounted_time_sec),
#ifdef CONFIG_F2FS_STAT_FS
ATTR_LIST(cp_foreground_calls),
ATTR_LIST(cp_background_calls),
@@ -654,7 +664,9 @@ static struct attribute *f2fs_feat_attrs[] = {
#endif
ATTR_LIST(sb_checksum),
ATTR_LIST(casefold),
+#ifdef CONFIG_F2FS_FS_COMPRESSION
ATTR_LIST(compression),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(f2fs_feat);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 296b3189448a..4f6582ef7ee3 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -23,6 +23,25 @@
#include "xattr.h"
#include "segment.h"
+static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline)
+{
+ if (likely(size == sbi->inline_xattr_slab_size)) {
+ *is_inline = true;
+ return kmem_cache_zalloc(sbi->inline_xattr_slab, GFP_NOFS);
+ }
+ *is_inline = false;
+ return f2fs_kzalloc(sbi, size, GFP_NOFS);
+}
+
+static void xattr_free(struct f2fs_sb_info *sbi, void *xattr_addr,
+ bool is_inline)
+{
+ if (is_inline)
+ kmem_cache_free(sbi->inline_xattr_slab, xattr_addr);
+ else
+ kvfree(xattr_addr);
+}
+
static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
@@ -301,7 +320,8 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
unsigned int index, unsigned int len,
const char *name, struct f2fs_xattr_entry **xe,
- void **base_addr, int *base_size)
+ void **base_addr, int *base_size,
+ bool *is_inline)
{
void *cur_addr, *txattr_addr, *last_txattr_addr;
void *last_addr = NULL;
@@ -312,12 +332,12 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
if (!xnid && !inline_size)
return -ENODATA;
- *base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE;
- txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
+ *base_size = XATTR_SIZE(inode) + XATTR_PADDING_SIZE;
+ txattr_addr = xattr_alloc(F2FS_I_SB(inode), *base_size, is_inline);
if (!txattr_addr)
return -ENOMEM;
- last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode);
+ last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(inode);
/* read from inline xattr */
if (inline_size) {
@@ -362,7 +382,7 @@ check:
*base_addr = txattr_addr;
return 0;
out:
- kvfree(txattr_addr);
+ xattr_free(F2FS_I_SB(inode), txattr_addr, *is_inline);
return err;
}
@@ -499,6 +519,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
unsigned int size, len;
void *base_addr = NULL;
int base_size;
+ bool is_inline;
if (name == NULL)
return -EINVAL;
@@ -509,7 +530,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name,
- &entry, &base_addr, &base_size);
+ &entry, &base_addr, &base_size, &is_inline);
up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;
@@ -532,14 +553,13 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
}
error = size;
out:
- kvfree(base_addr);
+ xattr_free(F2FS_I_SB(inode), base_addr, is_inline);
return error;
}
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = d_inode(dentry);
- nid_t xnid = F2FS_I(inode)->i_xattr_nid;
struct f2fs_xattr_entry *entry;
void *base_addr, *last_base_addr;
int error = 0;
@@ -551,7 +571,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
if (error)
return error;
- last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
+ last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
@@ -609,7 +629,6 @@ static int __f2fs_setxattr(struct inode *inode, int index,
{
struct f2fs_xattr_entry *here, *last;
void *base_addr, *last_base_addr;
- nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int found, newsize;
size_t len;
__u32 new_hsize;
@@ -633,7 +652,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (error)
return error;
- last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
+ last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
/* find entry with wanted name. */
here = __find_xattr(base_addr, last_base_addr, index, len, name);
@@ -758,14 +777,34 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
- /* protect xattr_ver */
- down_write(&F2FS_I(inode)->i_sem);
down_write(&F2FS_I(inode)->i_xattr_sem);
err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
up_write(&F2FS_I(inode)->i_xattr_sem);
- up_write(&F2FS_I(inode)->i_sem);
f2fs_unlock_op(sbi);
f2fs_update_time(sbi, REQ_TIME);
return err;
}
+
+int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi)
+{
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+ char slab_name[32];
+
+ sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev));
+
+ sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size *
+ sizeof(__le32) + XATTR_PADDING_SIZE;
+
+ sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name,
+ sbi->inline_xattr_slab_size);
+ if (!sbi->inline_xattr_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi)
+{
+ kmem_cache_destroy(sbi->inline_xattr_slab);
+}
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index de0c600b9cab..938fcd20565d 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -49,7 +49,7 @@ struct f2fs_xattr_entry {
__u8 e_name_index;
__u8 e_name_len;
__le16 e_value_size; /* size of attribute value */
- char e_name[0]; /* attribute name */
+ char e_name[]; /* attribute name */
};
#define XATTR_HDR(ptr) ((struct f2fs_xattr_header *)(ptr))
@@ -73,7 +73,8 @@ struct f2fs_xattr_entry {
entry = XATTR_NEXT_ENTRY(entry))
#define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
#define XATTR_PADDING_SIZE (sizeof(__u32))
-#define XATTR_SIZE(x,i) (((x) ? VALID_XATTR_BLOCK_SIZE : 0) + \
+#define XATTR_SIZE(i) ((F2FS_I(i)->i_xattr_nid ? \
+ VALID_XATTR_BLOCK_SIZE : 0) + \
(inline_xattr_size(i)))
#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \
VALID_XATTR_BLOCK_SIZE)
@@ -130,6 +131,8 @@ extern int f2fs_setxattr(struct inode *, int, const char *,
extern int f2fs_getxattr(struct inode *, int, const char *, void *,
size_t, struct page *);
extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
+extern int f2fs_init_xattr_caches(struct f2fs_sb_info *);
+extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *);
#else
#define f2fs_xattr_handlers NULL
@@ -150,6 +153,8 @@ static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
{
return -EOPNOTSUPP;
}
+static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; }
+static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { }
#endif
#ifdef CONFIG_F2FS_FS_SECURITY
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 7e6fb43f9541..ab53e42a874a 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -368,8 +368,6 @@ bool fs_validate_description(const char *name,
const struct fs_parameter_spec *param, *p2;
bool good = true;
- pr_notice("*** VALIDATE %s ***\n", name);
-
for (param = desc; param->name; param++) {
/* Check for duplicate parameter names */
for (p2 = desc; p2 < param; p2++) {
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index e6b8c49076bb..c070c0d8e3e9 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -139,8 +139,8 @@ static char *inode_name(struct inode *ino)
static char *follow_link(char *link)
{
- int len, n;
char *name, *resolved, *end;
+ int n;
name = __getname();
if (!name) {
@@ -164,15 +164,13 @@ static char *follow_link(char *link)
return name;
*(end + 1) = '\0';
- len = strlen(link) + strlen(name) + 1;
- resolved = kmalloc(len, GFP_KERNEL);
+ resolved = kasprintf(GFP_KERNEL, "%s%s", link, name);
if (resolved == NULL) {
n = -ENOMEM;
goto out_free;
}
- sprintf(resolved, "%s%s", link, name);
__putname(name);
kfree(link);
return resolved;
@@ -921,18 +919,16 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
sb->s_d_op = &simple_dentry_operations;
sb->s_maxbytes = MAX_LFS_FILESIZE;
- /* NULL is printed as <NULL> by sprintf: avoid that. */
+ /* NULL is printed as '(null)' by printf(): avoid that. */
if (req_root == NULL)
req_root = "";
err = -ENOMEM;
sb->s_fs_info = host_root_path =
- kmalloc(strlen(root_ino) + strlen(req_root) + 2, GFP_KERNEL);
+ kasprintf(GFP_KERNEL, "%s/%s", root_ino, req_root);
if (host_root_path == NULL)
goto out;
- sprintf(host_root_path, "%s/%s", root_ino, req_root);
-
root_inode = new_inode(sb);
if (!root_inode)
goto out;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index aff8642f0c2e..991c60c7ffe0 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -393,10 +393,9 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
* In this case, we first scan the range and release found pages.
* After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
* maps and global counts. Page faults can not race with truncation
- * in this routine. hugetlb_no_page() prevents page faults in the
- * truncated range. It checks i_size before allocation, and again after
- * with the page table lock for the page held. The same lock must be
- * acquired to unmap a page.
+ * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
+ * page faults in the truncated range by checking i_size. i_size is
+ * modified while holding i_mmap_rwsem.
* hole punch is indicated if end is not LLONG_MAX
* In the hole punch case we scan the range and release found pages.
* Only when releasing a page is the associated region/reserv map
@@ -436,7 +435,15 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
index = page->index;
hash = hugetlb_fault_mutex_hash(mapping, index);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
+ if (!truncate_op) {
+ /*
+ * Only need to hold the fault mutex in the
+ * hole punch case. This prevents races with
+ * page faults. Races are not possible in the
+ * case of truncation.
+ */
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
+ }
/*
* If page is mapped, it was faulted in after being
@@ -450,7 +457,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
if (unlikely(page_mapped(page))) {
BUG_ON(truncate_op);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
i_mmap_lock_write(mapping);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
hugetlb_vmdelete_list(&mapping->i_mmap,
index * pages_per_huge_page(h),
(index + 1) * pages_per_huge_page(h));
@@ -477,7 +486,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
}
unlock_page(page);
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ if (!truncate_op)
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
huge_pagevec_release(&pvec);
cond_resched();
@@ -515,8 +525,8 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
BUG_ON(offset & ~huge_page_mask(h));
pgoff = offset >> PAGE_SHIFT;
- i_size_write(inode, offset);
i_mmap_lock_write(mapping);
+ i_size_write(inode, offset);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
i_mmap_unlock_write(mapping);
@@ -638,7 +648,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
/* addr is the offset within the file (zero based) */
addr = index * hpage_size;
- /* mutex taken here, fault path and hole punch */
+ /*
+ * fault mutex taken here, protects against fault path
+ * and hole punch. inode_lock previously taken protects
+ * against truncation.
+ */
hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
diff --git a/fs/internal.h b/fs/internal.h
index 4d37912a5587..aa5d45524e87 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -60,7 +60,6 @@ extern int finish_clean_context(struct fs_context *fc);
*/
extern int filename_lookup(int dfd, struct filename *name, unsigned flags,
struct path *path, struct path *root);
-extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct path *);
long do_mknodat(int dfd, const char __user *filename, umode_t mode,
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 7c84c4c027c4..f080f542911b 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -503,7 +503,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
{
- trace_iomap_releasepage(page->mapping->host, page, 0, 0);
+ trace_iomap_releasepage(page->mapping->host, page_offset(page),
+ PAGE_SIZE);
/*
* mm accommodates an old ext3 case where clean pages might not have had
@@ -520,7 +521,7 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
void
iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
{
- trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
+ trace_iomap_invalidatepage(page->mapping->host, offset, len);
/*
* If we are invalidating the entire page, clear the dirty state from it
@@ -1519,7 +1520,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
u64 end_offset;
loff_t offset;
- trace_iomap_writepage(inode, page, 0, 0);
+ trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
/*
* Refuse to write the page out if we are called from reclaim context.
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 23837926c0c5..20dde5aadcdd 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -534,8 +534,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
/*
* We are about to drop our additional submission reference, which
- * might be the last reference to the dio. There are three three
- * different ways we can progress here:
+ * might be the last reference to the dio. There are three different
+ * ways we can progress here:
*
* (a) If this is the last reference we will always complete and free
* the dio ourselves.
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
index 6dc227b8c47e..4df19c66f597 100644
--- a/fs/iomap/trace.h
+++ b/fs/iomap/trace.h
@@ -41,14 +41,12 @@ DEFINE_EVENT(iomap_readpage_class, name, \
DEFINE_READPAGE_EVENT(iomap_readpage);
DEFINE_READPAGE_EVENT(iomap_readpages);
-DECLARE_EVENT_CLASS(iomap_page_class,
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
- unsigned int len),
- TP_ARGS(inode, page, off, len),
+DECLARE_EVENT_CLASS(iomap_range_class,
+ TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),
+ TP_ARGS(inode, off, len),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(u64, ino)
- __field(pgoff_t, pgoff)
__field(loff_t, size)
__field(unsigned long, offset)
__field(unsigned int, length)
@@ -56,29 +54,26 @@ DECLARE_EVENT_CLASS(iomap_page_class,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->pgoff = page_offset(page);
__entry->size = i_size_read(inode);
__entry->offset = off;
__entry->length = len;
),
- TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset %lx "
"length %x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
- __entry->pgoff,
__entry->size,
__entry->offset,
__entry->length)
)
-#define DEFINE_PAGE_EVENT(name) \
-DEFINE_EVENT(iomap_page_class, name, \
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
- unsigned int len), \
- TP_ARGS(inode, page, off, len))
-DEFINE_PAGE_EVENT(iomap_writepage);
-DEFINE_PAGE_EVENT(iomap_releasepage);
-DEFINE_PAGE_EVENT(iomap_invalidatepage);
+#define DEFINE_RANGE_EVENT(name) \
+DEFINE_EVENT(iomap_range_class, name, \
+ TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),\
+ TP_ARGS(inode, off, len))
+DEFINE_RANGE_EVENT(iomap_writepage);
+DEFINE_RANGE_EVENT(iomap_releasepage);
+DEFINE_RANGE_EVENT(iomap_invalidatepage);
#define IOMAP_TYPE_STRINGS \
{ IOMAP_HOLE, "HOLE" }, \
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 27373f5792a4..e855d8260433 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -997,9 +997,10 @@ restart_loop:
* journalled data) we need to unmap buffer and clear
* more bits. We also need to be careful about the check
* because the data page mapping can get cleared under
- * out hands, which alse need not to clear more bits
- * because the page and buffers will be freed and can
- * never be reused once we are done with them.
+ * our hands. Note that if mapping == NULL, we don't
+ * need to make buffer unmapped because the page is
+ * already detached from the mapping and buffers cannot
+ * get reused.
*/
mapping = READ_ONCE(bh->b_page->mapping);
if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index d0f7a5abd9a9..fc2469a20fed 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -53,6 +53,8 @@ static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, int alloc)
kn->iattr->ia_ctime = kn->iattr->ia_atime;
simple_xattrs_init(&kn->iattr->xattrs);
+ atomic_set(&kn->iattr->nr_user_xattrs, 0);
+ atomic_set(&kn->iattr->user_xattr_size, 0);
out_unlock:
ret = kn->iattr;
mutex_unlock(&iattr_mutex);
@@ -303,7 +305,7 @@ int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
if (!attrs)
return -ENOMEM;
- return simple_xattr_set(&attrs->xattrs, name, value, size, flags);
+ return simple_xattr_set(&attrs->xattrs, name, value, size, flags, NULL);
}
static int kernfs_vfs_xattr_get(const struct xattr_handler *handler,
@@ -327,6 +329,86 @@ static int kernfs_vfs_xattr_set(const struct xattr_handler *handler,
return kernfs_xattr_set(kn, name, value, size, flags);
}
+static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
+ const char *full_name,
+ struct simple_xattrs *xattrs,
+ const void *value, size_t size, int flags)
+{
+ atomic_t *sz = &kn->iattr->user_xattr_size;
+ atomic_t *nr = &kn->iattr->nr_user_xattrs;
+ ssize_t removed_size;
+ int ret;
+
+ if (atomic_inc_return(nr) > KERNFS_MAX_USER_XATTRS) {
+ ret = -ENOSPC;
+ goto dec_count_out;
+ }
+
+ if (atomic_add_return(size, sz) > KERNFS_USER_XATTR_SIZE_LIMIT) {
+ ret = -ENOSPC;
+ goto dec_size_out;
+ }
+
+ ret = simple_xattr_set(xattrs, full_name, value, size, flags,
+ &removed_size);
+
+ if (!ret && removed_size >= 0)
+ size = removed_size;
+ else if (!ret)
+ return 0;
+dec_size_out:
+ atomic_sub(size, sz);
+dec_count_out:
+ atomic_dec(nr);
+ return ret;
+}
+
+static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
+ const char *full_name,
+ struct simple_xattrs *xattrs,
+ const void *value, size_t size, int flags)
+{
+ atomic_t *sz = &kn->iattr->user_xattr_size;
+ atomic_t *nr = &kn->iattr->nr_user_xattrs;
+ ssize_t removed_size;
+ int ret;
+
+ ret = simple_xattr_set(xattrs, full_name, value, size, flags,
+ &removed_size);
+
+ if (removed_size >= 0) {
+ atomic_sub(removed_size, sz);
+ atomic_dec(nr);
+ }
+
+ return ret;
+}
+
+static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *suffix, const void *value,
+ size_t size, int flags)
+{
+ const char *full_name = xattr_full_name(handler, suffix);
+ struct kernfs_node *kn = inode->i_private;
+ struct kernfs_iattrs *attrs;
+
+ if (!(kernfs_root(kn)->flags & KERNFS_ROOT_SUPPORT_USER_XATTR))
+ return -EOPNOTSUPP;
+
+ attrs = kernfs_iattrs(kn);
+ if (!attrs)
+ return -ENOMEM;
+
+ if (value)
+ return kernfs_vfs_user_xattr_add(kn, full_name, &attrs->xattrs,
+ value, size, flags);
+ else
+ return kernfs_vfs_user_xattr_rm(kn, full_name, &attrs->xattrs,
+ value, size, flags);
+
+}
+
static const struct xattr_handler kernfs_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = kernfs_vfs_xattr_get,
@@ -339,8 +421,15 @@ static const struct xattr_handler kernfs_security_xattr_handler = {
.set = kernfs_vfs_xattr_set,
};
+static const struct xattr_handler kernfs_user_xattr_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .get = kernfs_vfs_xattr_get,
+ .set = kernfs_vfs_user_xattr_set,
+};
+
const struct xattr_handler *kernfs_xattr_handlers[] = {
&kernfs_trusted_xattr_handler,
&kernfs_security_xattr_handler,
+ &kernfs_user_xattr_handler,
NULL
};
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 2f3c51d55261..7ee97ef59184 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -26,6 +26,8 @@ struct kernfs_iattrs {
struct timespec64 ia_ctime;
struct simple_xattrs xattrs;
+ atomic_t nr_user_xattrs;
+ atomic_t user_xattr_size;
};
/* +1 to avoid triggering overflow warning when negating it */
diff --git a/fs/namei.c b/fs/namei.c
index db6565c99825..a320371899cf 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -503,9 +503,10 @@ struct nameidata {
} *stack, internal[EMBEDDED_LEVELS];
struct filename *name;
struct nameidata *saved;
- struct inode *link_inode;
unsigned root_seq;
int dfd;
+ kuid_t dir_uid;
+ umode_t dir_mode;
} __randomize_layout;
static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
@@ -530,52 +531,34 @@ static void restore_nameidata(void)
kfree(now->stack);
}
-static int __nd_alloc_stack(struct nameidata *nd)
+static bool nd_alloc_stack(struct nameidata *nd)
{
struct saved *p;
- if (nd->flags & LOOKUP_RCU) {
- p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
- GFP_ATOMIC);
- if (unlikely(!p))
- return -ECHILD;
- } else {
- p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
- GFP_KERNEL);
- if (unlikely(!p))
- return -ENOMEM;
- }
+ p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
+ nd->flags & LOOKUP_RCU ? GFP_ATOMIC : GFP_KERNEL);
+ if (unlikely(!p))
+ return false;
memcpy(p, nd->internal, sizeof(nd->internal));
nd->stack = p;
- return 0;
+ return true;
}
/**
- * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
- * @path: nameidate to verify
+ * path_connected - Verify that a dentry is below mnt.mnt_root
*
* Rename can sometimes move a file or directory outside of a bind
* mount, path_connected allows those cases to be detected.
*/
-static bool path_connected(const struct path *path)
+static bool path_connected(struct vfsmount *mnt, struct dentry *dentry)
{
- struct vfsmount *mnt = path->mnt;
struct super_block *sb = mnt->mnt_sb;
/* Bind mounts and multi-root filesystems can have disconnected paths */
if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
return true;
- return is_subdir(path->dentry, mnt->mnt_root);
-}
-
-static inline int nd_alloc_stack(struct nameidata *nd)
-{
- if (likely(nd->depth != EMBEDDED_LEVELS))
- return 0;
- if (likely(nd->stack != nd->internal))
- return 0;
- return __nd_alloc_stack(nd);
+ return is_subdir(dentry, mnt->mnt_root);
}
static void drop_links(struct nameidata *nd)
@@ -608,10 +591,9 @@ static void terminate_walk(struct nameidata *nd)
}
/* path_put is needed afterwards regardless of success or failure */
-static bool legitimize_path(struct nameidata *nd,
- struct path *path, unsigned seq)
+static bool __legitimize_path(struct path *path, unsigned seq, unsigned mseq)
{
- int res = __legitimize_mnt(path->mnt, nd->m_seq);
+ int res = __legitimize_mnt(path->mnt, mseq);
if (unlikely(res)) {
if (res > 0)
path->mnt = NULL;
@@ -625,6 +607,12 @@ static bool legitimize_path(struct nameidata *nd,
return !read_seqcount_retry(&path->dentry->d_seq, seq);
}
+static inline bool legitimize_path(struct nameidata *nd,
+ struct path *path, unsigned seq)
+{
+ return __legitimize_path(path, seq, nd->m_seq);
+}
+
static bool legitimize_links(struct nameidata *nd)
{
int i;
@@ -858,25 +846,6 @@ static int set_root(struct nameidata *nd)
return 0;
}
-static void path_put_conditional(struct path *path, struct nameidata *nd)
-{
- dput(path->dentry);
- if (path->mnt != nd->path.mnt)
- mntput(path->mnt);
-}
-
-static inline void path_to_nameidata(const struct path *path,
- struct nameidata *nd)
-{
- if (!(nd->flags & LOOKUP_RCU)) {
- dput(nd->path.dentry);
- if (nd->path.mnt != path->mnt)
- mntput(nd->path.mnt);
- }
- nd->path.mnt = path->mnt;
- nd->path.dentry = path->dentry;
-}
-
static int nd_jump_root(struct nameidata *nd)
{
if (unlikely(nd->flags & LOOKUP_BENEATH))
@@ -969,28 +938,21 @@ int sysctl_protected_regular __read_mostly;
*
* Returns 0 if following the symlink is allowed, -ve on error.
*/
-static inline int may_follow_link(struct nameidata *nd)
+static inline int may_follow_link(struct nameidata *nd, const struct inode *inode)
{
- const struct inode *inode;
- const struct inode *parent;
- kuid_t puid;
-
if (!sysctl_protected_symlinks)
return 0;
/* Allowed if owner and follower match. */
- inode = nd->link_inode;
if (uid_eq(current_cred()->fsuid, inode->i_uid))
return 0;
/* Allowed if parent directory not sticky and world-writable. */
- parent = nd->inode;
- if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
+ if ((nd->dir_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
return 0;
/* Allowed if parent directory and link owner match. */
- puid = parent->i_uid;
- if (uid_valid(puid) && uid_eq(puid, inode->i_uid))
+ if (uid_valid(nd->dir_uid) && uid_eq(nd->dir_uid, inode->i_uid))
return 0;
if (nd->flags & LOOKUP_RCU)
@@ -1113,63 +1075,6 @@ static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
return 0;
}
-static __always_inline
-const char *get_link(struct nameidata *nd)
-{
- struct saved *last = nd->stack + nd->depth - 1;
- struct dentry *dentry = last->link.dentry;
- struct inode *inode = nd->link_inode;
- int error;
- const char *res;
-
- if (unlikely(nd->flags & LOOKUP_NO_SYMLINKS))
- return ERR_PTR(-ELOOP);
-
- if (!(nd->flags & LOOKUP_RCU)) {
- touch_atime(&last->link);
- cond_resched();
- } else if (atime_needs_update(&last->link, inode)) {
- if (unlikely(unlazy_walk(nd)))
- return ERR_PTR(-ECHILD);
- touch_atime(&last->link);
- }
-
- error = security_inode_follow_link(dentry, inode,
- nd->flags & LOOKUP_RCU);
- if (unlikely(error))
- return ERR_PTR(error);
-
- nd->last_type = LAST_BIND;
- res = READ_ONCE(inode->i_link);
- if (!res) {
- const char * (*get)(struct dentry *, struct inode *,
- struct delayed_call *);
- get = inode->i_op->get_link;
- if (nd->flags & LOOKUP_RCU) {
- res = get(NULL, inode, &last->done);
- if (res == ERR_PTR(-ECHILD)) {
- if (unlikely(unlazy_walk(nd)))
- return ERR_PTR(-ECHILD);
- res = get(dentry, inode, &last->done);
- }
- } else {
- res = get(dentry, inode, &last->done);
- }
- if (IS_ERR_OR_NULL(res))
- return res;
- }
- if (*res == '/') {
- error = nd_jump_root(nd);
- if (unlikely(error))
- return ERR_PTR(error);
- while (unlikely(*++res == '/'))
- ;
- }
- if (!*res)
- res = NULL;
- return res;
-}
-
/*
* follow_up - Find the mountpoint of path's vfsmount
*
@@ -1203,19 +1108,59 @@ int follow_up(struct path *path)
}
EXPORT_SYMBOL(follow_up);
+static bool choose_mountpoint_rcu(struct mount *m, const struct path *root,
+ struct path *path, unsigned *seqp)
+{
+ while (mnt_has_parent(m)) {
+ struct dentry *mountpoint = m->mnt_mountpoint;
+
+ m = m->mnt_parent;
+ if (unlikely(root->dentry == mountpoint &&
+ root->mnt == &m->mnt))
+ break;
+ if (mountpoint != m->mnt.mnt_root) {
+ path->mnt = &m->mnt;
+ path->dentry = mountpoint;
+ *seqp = read_seqcount_begin(&mountpoint->d_seq);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool choose_mountpoint(struct mount *m, const struct path *root,
+ struct path *path)
+{
+ bool found;
+
+ rcu_read_lock();
+ while (1) {
+ unsigned seq, mseq = read_seqbegin(&mount_lock);
+
+ found = choose_mountpoint_rcu(m, root, path, &seq);
+ if (unlikely(!found)) {
+ if (!read_seqretry(&mount_lock, mseq))
+ break;
+ } else {
+ if (likely(__legitimize_path(path, seq, mseq)))
+ break;
+ rcu_read_unlock();
+ path_put(path);
+ rcu_read_lock();
+ }
+ }
+ rcu_read_unlock();
+ return found;
+}
+
/*
* Perform an automount
* - return -EISDIR to tell follow_managed() to stop and return the path we
* were called with.
*/
-static int follow_automount(struct path *path, struct nameidata *nd,
- bool *need_mntput)
+static int follow_automount(struct path *path, int *count, unsigned lookup_flags)
{
- struct vfsmount *mnt;
- int err;
-
- if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
- return -EREMOTE;
+ struct dentry *dentry = path->dentry;
/* We don't want to mount if someone's just doing a stat -
* unless they're stat'ing a directory and appended a '/' to
@@ -1228,138 +1173,91 @@ static int follow_automount(struct path *path, struct nameidata *nd,
* as being automount points. These will need the attentions
* of the daemon to instantiate them before they can be used.
*/
- if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
+ if (!(lookup_flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
- path->dentry->d_inode)
+ dentry->d_inode)
return -EISDIR;
- nd->total_link_count++;
- if (nd->total_link_count >= 40)
+ if (count && (*count)++ >= MAXSYMLINKS)
return -ELOOP;
- mnt = path->dentry->d_op->d_automount(path);
- if (IS_ERR(mnt)) {
- /*
- * The filesystem is allowed to return -EISDIR here to indicate
- * it doesn't want to automount. For instance, autofs would do
- * this so that its userspace daemon can mount on this dentry.
- *
- * However, we can only permit this if it's a terminal point in
- * the path being looked up; if it wasn't then the remainder of
- * the path is inaccessible and we should say so.
- */
- if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT))
- return -EREMOTE;
- return PTR_ERR(mnt);
- }
-
- if (!mnt) /* mount collision */
- return 0;
-
- if (!*need_mntput) {
- /* lock_mount() may release path->mnt on error */
- mntget(path->mnt);
- *need_mntput = true;
- }
- err = finish_automount(mnt, path);
-
- switch (err) {
- case -EBUSY:
- /* Someone else made a mount here whilst we were busy */
- return 0;
- case 0:
- path_put(path);
- path->mnt = mnt;
- path->dentry = dget(mnt->mnt_root);
- return 0;
- default:
- return err;
- }
-
+ return finish_automount(dentry->d_op->d_automount(path), path);
}
/*
- * Handle a dentry that is managed in some way.
- * - Flagged for transit management (autofs)
- * - Flagged as mountpoint
- * - Flagged as automount point
- *
- * This may only be called in refwalk mode.
- * On success path->dentry is known positive.
- *
- * Serialization is taken care of in namespace.c
+ * mount traversal - out-of-line part. One note on ->d_flags accesses -
+ * dentries are pinned but not locked here, so negative dentry can go
+ * positive right under us. Use of smp_load_acquire() provides a barrier
+ * sufficient for ->d_inode and ->d_flags consistency.
*/
-static int follow_managed(struct path *path, struct nameidata *nd)
+static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
+ int *count, unsigned lookup_flags)
{
- struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
- unsigned flags;
+ struct vfsmount *mnt = path->mnt;
bool need_mntput = false;
int ret = 0;
- /* Given that we're not holding a lock here, we retain the value in a
- * local variable for each dentry as we look at it so that we don't see
- * the components of that value change under us */
- while (flags = smp_load_acquire(&path->dentry->d_flags),
- unlikely(flags & DCACHE_MANAGED_DENTRY)) {
+ while (flags & DCACHE_MANAGED_DENTRY) {
/* Allow the filesystem to manage the transit without i_mutex
* being held. */
if (flags & DCACHE_MANAGE_TRANSIT) {
- BUG_ON(!path->dentry->d_op);
- BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(path, false);
flags = smp_load_acquire(&path->dentry->d_flags);
if (ret < 0)
break;
}
- /* Transit to a mounted filesystem. */
- if (flags & DCACHE_MOUNTED) {
+ if (flags & DCACHE_MOUNTED) { // something's mounted on it..
struct vfsmount *mounted = lookup_mnt(path);
- if (mounted) {
+ if (mounted) { // ... in our namespace
dput(path->dentry);
if (need_mntput)
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
+ // here we know it's positive
+ flags = path->dentry->d_flags;
need_mntput = true;
continue;
}
-
- /* Something is mounted on this dentry in another
- * namespace and/or whatever was mounted there in this
- * namespace got unmounted before lookup_mnt() could
- * get it */
}
- /* Handle an automount point */
- if (flags & DCACHE_NEED_AUTOMOUNT) {
- ret = follow_automount(path, nd, &need_mntput);
- if (ret < 0)
- break;
- continue;
- }
+ if (!(flags & DCACHE_NEED_AUTOMOUNT))
+ break;
- /* We didn't change the current path point */
- break;
+ // uncovered automount point
+ ret = follow_automount(path, count, lookup_flags);
+ flags = smp_load_acquire(&path->dentry->d_flags);
+ if (ret < 0)
+ break;
}
- if (need_mntput) {
- if (path->mnt == mnt)
- mntput(path->mnt);
- if (unlikely(nd->flags & LOOKUP_NO_XDEV))
- ret = -EXDEV;
- else
- nd->flags |= LOOKUP_JUMPED;
- }
- if (ret == -EISDIR || !ret)
- ret = 1;
- if (ret > 0 && unlikely(d_flags_negative(flags)))
+ if (ret == -EISDIR)
+ ret = 0;
+ // possible if you race with several mount --move
+ if (need_mntput && path->mnt == mnt)
+ mntput(path->mnt);
+ if (!ret && unlikely(d_flags_negative(flags)))
ret = -ENOENT;
- if (unlikely(ret < 0))
- path_put_conditional(path, nd);
+ *jumped = need_mntput;
return ret;
}
+static inline int traverse_mounts(struct path *path, bool *jumped,
+ int *count, unsigned lookup_flags)
+{
+ unsigned flags = smp_load_acquire(&path->dentry->d_flags);
+
+ /* fastpath */
+ if (likely(!(flags & DCACHE_MANAGED_DENTRY))) {
+ *jumped = false;
+ if (unlikely(d_flags_negative(flags)))
+ return -ENOENT;
+ return 0;
+ }
+ return __traverse_mounts(path, flags, jumped, count, lookup_flags);
+}
+
int follow_down_one(struct path *path)
{
struct vfsmount *mounted;
@@ -1376,11 +1274,22 @@ int follow_down_one(struct path *path)
}
EXPORT_SYMBOL(follow_down_one);
-static inline int managed_dentry_rcu(const struct path *path)
+/*
+ * Follow down to the covering mount currently visible to userspace. At each
+ * point, the filesystem owning that dentry may be queried as to whether the
+ * caller is permitted to proceed or not.
+ */
+int follow_down(struct path *path)
{
- return (path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) ?
- path->dentry->d_op->d_manage(path, true) : 0;
+ struct vfsmount *mnt = path->mnt;
+ bool jumped;
+ int ret = traverse_mounts(path, &jumped, NULL, 0);
+
+ if (path->mnt != mnt)
+ mntput(mnt);
+ return ret;
}
+EXPORT_SYMBOL(follow_down);
/*
* Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
@@ -1389,204 +1298,88 @@ static inline int managed_dentry_rcu(const struct path *path)
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
struct inode **inode, unsigned *seqp)
{
+ struct dentry *dentry = path->dentry;
+ unsigned int flags = dentry->d_flags;
+
+ if (likely(!(flags & DCACHE_MANAGED_DENTRY)))
+ return true;
+
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+ return false;
+
for (;;) {
- struct mount *mounted;
/*
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
*/
- switch (managed_dentry_rcu(path)) {
- case -ECHILD:
- default:
- return false;
- case -EISDIR:
- return true;
- case 0:
- break;
- }
-
- if (!d_mountpoint(path->dentry))
- return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
-
- mounted = __lookup_mnt(path->mnt, path->dentry);
- if (!mounted)
- break;
- if (unlikely(nd->flags & LOOKUP_NO_XDEV))
- return false;
- path->mnt = &mounted->mnt;
- path->dentry = mounted->mnt.mnt_root;
- nd->flags |= LOOKUP_JUMPED;
- *seqp = read_seqcount_begin(&path->dentry->d_seq);
- /*
- * Update the inode too. We don't need to re-check the
- * dentry sequence number here after this d_inode read,
- * because a mount-point is always pinned.
- */
- *inode = path->dentry->d_inode;
- }
- return !read_seqretry(&mount_lock, nd->m_seq) &&
- !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
-}
-
-static int follow_dotdot_rcu(struct nameidata *nd)
-{
- struct inode *inode = nd->inode;
-
- while (1) {
- if (path_equal(&nd->path, &nd->root)) {
- if (unlikely(nd->flags & LOOKUP_BENEATH))
- return -ECHILD;
- break;
+ if (unlikely(flags & DCACHE_MANAGE_TRANSIT)) {
+ int res = dentry->d_op->d_manage(path, true);
+ if (res)
+ return res == -EISDIR;
+ flags = dentry->d_flags;
}
- if (nd->path.dentry != nd->path.mnt->mnt_root) {
- struct dentry *old = nd->path.dentry;
- struct dentry *parent = old->d_parent;
- unsigned seq;
- inode = parent->d_inode;
- seq = read_seqcount_begin(&parent->d_seq);
- if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
- return -ECHILD;
- nd->path.dentry = parent;
- nd->seq = seq;
- if (unlikely(!path_connected(&nd->path)))
- return -ECHILD;
- break;
- } else {
- struct mount *mnt = real_mount(nd->path.mnt);
- struct mount *mparent = mnt->mnt_parent;
- struct dentry *mountpoint = mnt->mnt_mountpoint;
- struct inode *inode2 = mountpoint->d_inode;
- unsigned seq = read_seqcount_begin(&mountpoint->d_seq);
- if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
- return -ECHILD;
- if (&mparent->mnt == nd->path.mnt)
- break;
- if (unlikely(nd->flags & LOOKUP_NO_XDEV))
- return -ECHILD;
- /* we know that mountpoint was pinned */
- nd->path.dentry = mountpoint;
- nd->path.mnt = &mparent->mnt;
- inode = inode2;
- nd->seq = seq;
+ if (flags & DCACHE_MOUNTED) {
+ struct mount *mounted = __lookup_mnt(path->mnt, dentry);
+ if (mounted) {
+ path->mnt = &mounted->mnt;
+ dentry = path->dentry = mounted->mnt.mnt_root;
+ nd->flags |= LOOKUP_JUMPED;
+ *seqp = read_seqcount_begin(&dentry->d_seq);
+ *inode = dentry->d_inode;
+ /*
+ * We don't need to re-check ->d_seq after this
+ * ->d_inode read - there will be an RCU delay
+ * between mount hash removal and ->mnt_root
+ * becoming unpinned.
+ */
+ flags = dentry->d_flags;
+ continue;
+ }
+ if (read_seqretry(&mount_lock, nd->m_seq))
+ return false;
}
+ return !(flags & DCACHE_NEED_AUTOMOUNT);
}
- while (unlikely(d_mountpoint(nd->path.dentry))) {
- struct mount *mounted;
- mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
- if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
- return -ECHILD;
- if (!mounted)
- break;
- if (unlikely(nd->flags & LOOKUP_NO_XDEV))
- return -ECHILD;
- nd->path.mnt = &mounted->mnt;
- nd->path.dentry = mounted->mnt.mnt_root;
- inode = nd->path.dentry->d_inode;
- nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
- }
- nd->inode = inode;
- return 0;
}
-/*
- * Follow down to the covering mount currently visible to userspace. At each
- * point, the filesystem owning that dentry may be queried as to whether the
- * caller is permitted to proceed or not.
- */
-int follow_down(struct path *path)
+static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
+ struct path *path, struct inode **inode,
+ unsigned int *seqp)
{
- unsigned managed;
+ bool jumped;
int ret;
- while (managed = READ_ONCE(path->dentry->d_flags),
- unlikely(managed & DCACHE_MANAGED_DENTRY)) {
- /* Allow the filesystem to manage the transit without i_mutex
- * being held.
- *
- * We indicate to the filesystem if someone is trying to mount
- * something here. This gives autofs the chance to deny anyone
- * other than its daemon the right to mount on its
- * superstructure.
- *
- * The filesystem may sleep at this point.
- */
- if (managed & DCACHE_MANAGE_TRANSIT) {
- BUG_ON(!path->dentry->d_op);
- BUG_ON(!path->dentry->d_op->d_manage);
- ret = path->dentry->d_op->d_manage(path, false);
- if (ret < 0)
- return ret == -EISDIR ? 0 : ret;
- }
-
- /* Transit to a mounted filesystem. */
- if (managed & DCACHE_MOUNTED) {
- struct vfsmount *mounted = lookup_mnt(path);
- if (!mounted)
- break;
- dput(path->dentry);
- mntput(path->mnt);
- path->mnt = mounted;
- path->dentry = dget(mounted->mnt_root);
- continue;
- }
-
- /* Don't handle automount points here */
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(follow_down);
-
-/*
- * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
- */
-static void follow_mount(struct path *path)
-{
- while (d_mountpoint(path->dentry)) {
- struct vfsmount *mounted = lookup_mnt(path);
- if (!mounted)
- break;
- dput(path->dentry);
- mntput(path->mnt);
- path->mnt = mounted;
- path->dentry = dget(mounted->mnt_root);
+ path->mnt = nd->path.mnt;
+ path->dentry = dentry;
+ if (nd->flags & LOOKUP_RCU) {
+ unsigned int seq = *seqp;
+ if (unlikely(!*inode))
+ return -ENOENT;
+ if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
+ return 0;
+ if (unlazy_child(nd, dentry, seq))
+ return -ECHILD;
+ // *path might've been clobbered by __follow_mount_rcu()
+ path->mnt = nd->path.mnt;
+ path->dentry = dentry;
}
-}
-
-static int path_parent_directory(struct path *path)
-{
- struct dentry *old = path->dentry;
- /* rare case of legitimate dget_parent()... */
- path->dentry = dget_parent(path->dentry);
- dput(old);
- if (unlikely(!path_connected(path)))
- return -ENOENT;
- return 0;
-}
-
-static int follow_dotdot(struct nameidata *nd)
-{
- while (1) {
- if (path_equal(&nd->path, &nd->root)) {
- if (unlikely(nd->flags & LOOKUP_BENEATH))
- return -EXDEV;
- break;
- }
- if (nd->path.dentry != nd->path.mnt->mnt_root) {
- int ret = path_parent_directory(&nd->path);
- if (ret)
- return ret;
- break;
- }
- if (!follow_up(&nd->path))
- break;
+ ret = traverse_mounts(path, &jumped, &nd->total_link_count, nd->flags);
+ if (jumped) {
if (unlikely(nd->flags & LOOKUP_NO_XDEV))
- return -EXDEV;
+ ret = -EXDEV;
+ else
+ nd->flags |= LOOKUP_JUMPED;
}
- follow_mount(&nd->path);
- nd->inode = nd->path.dentry->d_inode;
- return 0;
+ if (unlikely(ret)) {
+ dput(path->dentry);
+ if (path->mnt != nd->path.mnt)
+ mntput(path->mnt);
+ } else {
+ *inode = d_backing_inode(path->dentry);
+ *seqp = 0; /* out of RCU mode, so the value doesn't matter */
+ }
+ return ret;
}
/*
@@ -1643,14 +1436,12 @@ static struct dentry *__lookup_hash(const struct qstr *name,
return dentry;
}
-static int lookup_fast(struct nameidata *nd,
- struct path *path, struct inode **inode,
- unsigned *seqp)
+static struct dentry *lookup_fast(struct nameidata *nd,
+ struct inode **inode,
+ unsigned *seqp)
{
- struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry, *parent = nd->path.dentry;
int status = 1;
- int err;
/*
* Rename seqlock is not required here because in the off chance
@@ -1659,12 +1450,11 @@ static int lookup_fast(struct nameidata *nd,
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
- bool negative;
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
if (unlikely(!dentry)) {
if (unlazy_walk(nd))
- return -ECHILD;
- return 0;
+ return ERR_PTR(-ECHILD);
+ return NULL;
}
/*
@@ -1672,9 +1462,8 @@ static int lookup_fast(struct nameidata *nd,
* the dentry name information from lookup.
*/
*inode = d_backing_inode(dentry);
- negative = d_is_negative(dentry);
if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
- return -ECHILD;
+ return ERR_PTR(-ECHILD);
/*
* This sequence count validates that the parent had no
@@ -1684,46 +1473,30 @@ static int lookup_fast(struct nameidata *nd,
* enough, we can use __read_seqcount_retry here.
*/
if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq)))
- return -ECHILD;
+ return ERR_PTR(-ECHILD);
*seqp = seq;
status = d_revalidate(dentry, nd->flags);
- if (likely(status > 0)) {
- /*
- * Note: do negative dentry check after revalidation in
- * case that drops it.
- */
- if (unlikely(negative))
- return -ENOENT;
- path->mnt = mnt;
- path->dentry = dentry;
- if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
- return 1;
- }
+ if (likely(status > 0))
+ return dentry;
if (unlazy_child(nd, dentry, seq))
- return -ECHILD;
+ return ERR_PTR(-ECHILD);
if (unlikely(status == -ECHILD))
/* we'd been told to redo it in non-rcu mode */
status = d_revalidate(dentry, nd->flags);
} else {
dentry = __d_lookup(parent, &nd->last);
if (unlikely(!dentry))
- return 0;
+ return NULL;
status = d_revalidate(dentry, nd->flags);
}
if (unlikely(status <= 0)) {
if (!status)
d_invalidate(dentry);
dput(dentry);
- return status;
+ return ERR_PTR(status);
}
-
- path->mnt = mnt;
- path->dentry = dentry;
- err = follow_managed(path, nd);
- if (likely(err > 0))
- *inode = d_backing_inode(path->dentry);
- return err;
+ return dentry;
}
/* Fast lookup failed, do it the slow way */
@@ -1788,81 +1561,107 @@ static inline int may_lookup(struct nameidata *nd)
return inode_permission(nd->inode, MAY_EXEC);
}
-static inline int handle_dots(struct nameidata *nd, int type)
+static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
{
- if (type == LAST_DOTDOT) {
- int error = 0;
+ if (unlikely(nd->total_link_count++ >= MAXSYMLINKS))
+ return -ELOOP;
- if (!nd->root.mnt) {
- error = set_root(nd);
- if (error)
- return error;
- }
- if (nd->flags & LOOKUP_RCU)
- error = follow_dotdot_rcu(nd);
- else
- error = follow_dotdot(nd);
- if (error)
- return error;
+ if (likely(nd->depth != EMBEDDED_LEVELS))
+ return 0;
+ if (likely(nd->stack != nd->internal))
+ return 0;
+ if (likely(nd_alloc_stack(nd)))
+ return 0;
- if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
- /*
- * If there was a racing rename or mount along our
- * path, then we can't be sure that ".." hasn't jumped
- * above nd->root (and so userspace should retry or use
- * some fallback).
- */
- smp_rmb();
- if (unlikely(__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq)))
- return -EAGAIN;
- if (unlikely(__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq)))
- return -EAGAIN;
- }
+ if (nd->flags & LOOKUP_RCU) {
+ // we need to grab link before we do unlazy. And we can't skip
+ // unlazy even if we fail to grab the link - cleanup needs it
+ bool grabbed_link = legitimize_path(nd, link, seq);
+
+ if (unlazy_walk(nd) != 0 || !grabbed_link)
+ return -ECHILD;
+
+ if (nd_alloc_stack(nd))
+ return 0;
}
- return 0;
+ return -ENOMEM;
}
-static int pick_link(struct nameidata *nd, struct path *link,
- struct inode *inode, unsigned seq)
+enum {WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4};
+
+static const char *pick_link(struct nameidata *nd, struct path *link,
+ struct inode *inode, unsigned seq, int flags)
{
- int error;
struct saved *last;
- if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) {
- path_to_nameidata(link, nd);
- return -ELOOP;
- }
- if (!(nd->flags & LOOKUP_RCU)) {
- if (link->mnt == nd->path.mnt)
- mntget(link->mnt);
- }
- error = nd_alloc_stack(nd);
+ const char *res;
+ int error = reserve_stack(nd, link, seq);
+
if (unlikely(error)) {
- if (error == -ECHILD) {
- if (unlikely(!legitimize_path(nd, link, seq))) {
- drop_links(nd);
- nd->depth = 0;
- nd->flags &= ~LOOKUP_RCU;
- nd->path.mnt = NULL;
- nd->path.dentry = NULL;
- rcu_read_unlock();
- } else if (likely(unlazy_walk(nd)) == 0)
- error = nd_alloc_stack(nd);
- }
- if (error) {
+ if (!(nd->flags & LOOKUP_RCU))
path_put(link);
- return error;
- }
+ return ERR_PTR(error);
}
-
last = nd->stack + nd->depth++;
last->link = *link;
clear_delayed_call(&last->done);
- nd->link_inode = inode;
last->seq = seq;
- return 1;
-}
-enum {WALK_FOLLOW = 1, WALK_MORE = 2};
+ if (flags & WALK_TRAILING) {
+ error = may_follow_link(nd, inode);
+ if (unlikely(error))
+ return ERR_PTR(error);
+ }
+
+ if (unlikely(nd->flags & LOOKUP_NO_SYMLINKS))
+ return ERR_PTR(-ELOOP);
+
+ if (!(nd->flags & LOOKUP_RCU)) {
+ touch_atime(&last->link);
+ cond_resched();
+ } else if (atime_needs_update(&last->link, inode)) {
+ if (unlikely(unlazy_walk(nd)))
+ return ERR_PTR(-ECHILD);
+ touch_atime(&last->link);
+ }
+
+ error = security_inode_follow_link(link->dentry, inode,
+ nd->flags & LOOKUP_RCU);
+ if (unlikely(error))
+ return ERR_PTR(error);
+
+ res = READ_ONCE(inode->i_link);
+ if (!res) {
+ const char * (*get)(struct dentry *, struct inode *,
+ struct delayed_call *);
+ get = inode->i_op->get_link;
+ if (nd->flags & LOOKUP_RCU) {
+ res = get(NULL, inode, &last->done);
+ if (res == ERR_PTR(-ECHILD)) {
+ if (unlikely(unlazy_walk(nd)))
+ return ERR_PTR(-ECHILD);
+ res = get(link->dentry, inode, &last->done);
+ }
+ } else {
+ res = get(link->dentry, inode, &last->done);
+ }
+ if (!res)
+ goto all_done;
+ if (IS_ERR(res))
+ return res;
+ }
+ if (*res == '/') {
+ error = nd_jump_root(nd);
+ if (unlikely(error))
+ return ERR_PTR(error);
+ while (unlikely(*++res == '/'))
+ ;
+ }
+ if (*res)
+ return res;
+all_done: // pure jump
+ put_link(nd);
+ return NULL;
+}
/*
* Do we need to follow links? We _really_ want to be able
@@ -1870,63 +1669,187 @@ enum {WALK_FOLLOW = 1, WALK_MORE = 2};
* so we keep a cache of "no, this doesn't need follow_link"
* for the common case.
*/
-static inline int step_into(struct nameidata *nd, struct path *path,
- int flags, struct inode *inode, unsigned seq)
+static const char *step_into(struct nameidata *nd, int flags,
+ struct dentry *dentry, struct inode *inode, unsigned seq)
{
- if (!(flags & WALK_MORE) && nd->depth)
- put_link(nd);
- if (likely(!d_is_symlink(path->dentry)) ||
- !(flags & WALK_FOLLOW || nd->flags & LOOKUP_FOLLOW)) {
+ struct path path;
+ int err = handle_mounts(nd, dentry, &path, &inode, &seq);
+
+ if (err < 0)
+ return ERR_PTR(err);
+ if (likely(!d_is_symlink(path.dentry)) ||
+ ((flags & WALK_TRAILING) && !(nd->flags & LOOKUP_FOLLOW)) ||
+ (flags & WALK_NOFOLLOW)) {
/* not a symlink or should not follow */
- path_to_nameidata(path, nd);
+ if (!(nd->flags & LOOKUP_RCU)) {
+ dput(nd->path.dentry);
+ if (nd->path.mnt != path.mnt)
+ mntput(nd->path.mnt);
+ }
+ nd->path = path;
nd->inode = inode;
nd->seq = seq;
- return 0;
+ return NULL;
}
- /* make sure that d_is_symlink above matches inode */
if (nd->flags & LOOKUP_RCU) {
- if (read_seqcount_retry(&path->dentry->d_seq, seq))
- return -ECHILD;
+ /* make sure that d_is_symlink above matches inode */
+ if (read_seqcount_retry(&path.dentry->d_seq, seq))
+ return ERR_PTR(-ECHILD);
+ } else {
+ if (path.mnt == nd->path.mnt)
+ mntget(path.mnt);
}
- return pick_link(nd, path, inode, seq);
+ return pick_link(nd, &path, inode, seq, flags);
}
-static int walk_component(struct nameidata *nd, int flags)
+static struct dentry *follow_dotdot_rcu(struct nameidata *nd,
+ struct inode **inodep,
+ unsigned *seqp)
{
- struct path path;
+ struct dentry *parent, *old;
+
+ if (path_equal(&nd->path, &nd->root))
+ goto in_root;
+ if (unlikely(nd->path.dentry == nd->path.mnt->mnt_root)) {
+ struct path path;
+ unsigned seq;
+ if (!choose_mountpoint_rcu(real_mount(nd->path.mnt),
+ &nd->root, &path, &seq))
+ goto in_root;
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+ return ERR_PTR(-ECHILD);
+ nd->path = path;
+ nd->inode = path.dentry->d_inode;
+ nd->seq = seq;
+ if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
+ return ERR_PTR(-ECHILD);
+ /* we know that mountpoint was pinned */
+ }
+ old = nd->path.dentry;
+ parent = old->d_parent;
+ *inodep = parent->d_inode;
+ *seqp = read_seqcount_begin(&parent->d_seq);
+ if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
+ return ERR_PTR(-ECHILD);
+ if (unlikely(!path_connected(nd->path.mnt, parent)))
+ return ERR_PTR(-ECHILD);
+ return parent;
+in_root:
+ if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
+ return ERR_PTR(-ECHILD);
+ if (unlikely(nd->flags & LOOKUP_BENEATH))
+ return ERR_PTR(-ECHILD);
+ return NULL;
+}
+
+static struct dentry *follow_dotdot(struct nameidata *nd,
+ struct inode **inodep,
+ unsigned *seqp)
+{
+ struct dentry *parent;
+
+ if (path_equal(&nd->path, &nd->root))
+ goto in_root;
+ if (unlikely(nd->path.dentry == nd->path.mnt->mnt_root)) {
+ struct path path;
+
+ if (!choose_mountpoint(real_mount(nd->path.mnt),
+ &nd->root, &path))
+ goto in_root;
+ path_put(&nd->path);
+ nd->path = path;
+ nd->inode = path.dentry->d_inode;
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+ return ERR_PTR(-EXDEV);
+ }
+ /* rare case of legitimate dget_parent()... */
+ parent = dget_parent(nd->path.dentry);
+ if (unlikely(!path_connected(nd->path.mnt, parent))) {
+ dput(parent);
+ return ERR_PTR(-ENOENT);
+ }
+ *seqp = 0;
+ *inodep = parent->d_inode;
+ return parent;
+
+in_root:
+ if (unlikely(nd->flags & LOOKUP_BENEATH))
+ return ERR_PTR(-EXDEV);
+ dget(nd->path.dentry);
+ return NULL;
+}
+
+static const char *handle_dots(struct nameidata *nd, int type)
+{
+ if (type == LAST_DOTDOT) {
+ const char *error = NULL;
+ struct dentry *parent;
+ struct inode *inode;
+ unsigned seq;
+
+ if (!nd->root.mnt) {
+ error = ERR_PTR(set_root(nd));
+ if (error)
+ return error;
+ }
+ if (nd->flags & LOOKUP_RCU)
+ parent = follow_dotdot_rcu(nd, &inode, &seq);
+ else
+ parent = follow_dotdot(nd, &inode, &seq);
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ if (unlikely(!parent))
+ error = step_into(nd, WALK_NOFOLLOW,
+ nd->path.dentry, nd->inode, nd->seq);
+ else
+ error = step_into(nd, WALK_NOFOLLOW,
+ parent, inode, seq);
+ if (unlikely(error))
+ return error;
+
+ if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
+ /*
+ * If there was a racing rename or mount along our
+ * path, then we can't be sure that ".." hasn't jumped
+ * above nd->root (and so userspace should retry or use
+ * some fallback).
+ */
+ smp_rmb();
+ if (unlikely(__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq)))
+ return ERR_PTR(-EAGAIN);
+ if (unlikely(__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq)))
+ return ERR_PTR(-EAGAIN);
+ }
+ }
+ return NULL;
+}
+
+static const char *walk_component(struct nameidata *nd, int flags)
+{
+ struct dentry *dentry;
struct inode *inode;
unsigned seq;
- int err;
/*
* "." and ".." are special - ".." especially so because it has
* to be able to know about the current root directory and
* parent relationships.
*/
if (unlikely(nd->last_type != LAST_NORM)) {
- err = handle_dots(nd, nd->last_type);
if (!(flags & WALK_MORE) && nd->depth)
put_link(nd);
- return err;
+ return handle_dots(nd, nd->last_type);
}
- err = lookup_fast(nd, &path, &inode, &seq);
- if (unlikely(err <= 0)) {
- if (err < 0)
- return err;
- path.dentry = lookup_slow(&nd->last, nd->path.dentry,
- nd->flags);
- if (IS_ERR(path.dentry))
- return PTR_ERR(path.dentry);
-
- path.mnt = nd->path.mnt;
- err = follow_managed(&path, nd);
- if (unlikely(err < 0))
- return err;
-
- seq = 0; /* we are already out of RCU mode */
- inode = d_backing_inode(path.dentry);
+ dentry = lookup_fast(nd, &inode, &seq);
+ if (IS_ERR(dentry))
+ return ERR_CAST(dentry);
+ if (unlikely(!dentry)) {
+ dentry = lookup_slow(&nd->last, nd->path.dentry, nd->flags);
+ if (IS_ERR(dentry))
+ return ERR_CAST(dentry);
}
-
- return step_into(nd, &path, flags, inode, seq);
+ if (!(flags & WALK_MORE) && nd->depth)
+ put_link(nd);
+ return step_into(nd, flags, dentry, inode, seq);
}
/*
@@ -2167,8 +2090,11 @@ static inline u64 hash_name(const void *salt, const char *name)
*/
static int link_path_walk(const char *name, struct nameidata *nd)
{
+ int depth = 0; // depth <= nd->depth
int err;
+ nd->last_type = LAST_ROOT;
+ nd->flags |= LOOKUP_PARENT;
if (IS_ERR(name))
return PTR_ERR(name);
while (*name=='/')
@@ -2178,6 +2104,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
/* At this point we know we have a real path component. */
for(;;) {
+ const char *link;
u64 hash_len;
int type;
@@ -2227,36 +2154,27 @@ static int link_path_walk(const char *name, struct nameidata *nd)
} while (unlikely(*name == '/'));
if (unlikely(!*name)) {
OK:
- /* pathname body, done */
- if (!nd->depth)
- return 0;
- name = nd->stack[nd->depth - 1].name;
- /* trailing symlink, done */
- if (!name)
+ /* pathname or trailing symlink, done */
+ if (!depth) {
+ nd->dir_uid = nd->inode->i_uid;
+ nd->dir_mode = nd->inode->i_mode;
+ nd->flags &= ~LOOKUP_PARENT;
return 0;
+ }
/* last component of nested symlink */
- err = walk_component(nd, WALK_FOLLOW);
+ name = nd->stack[--depth].name;
+ link = walk_component(nd, 0);
} else {
/* not the last component */
- err = walk_component(nd, WALK_FOLLOW | WALK_MORE);
+ link = walk_component(nd, WALK_MORE);
}
- if (err < 0)
- return err;
-
- if (err) {
- const char *s = get_link(nd);
-
- if (IS_ERR(s))
- return PTR_ERR(s);
- err = 0;
- if (unlikely(!s)) {
- /* jumped */
- put_link(nd);
- } else {
- nd->stack[nd->depth - 1].name = name;
- name = s;
- continue;
- }
+ if (unlikely(link)) {
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+ /* a symlink to follow */
+ nd->stack[depth++].name = name;
+ name = link;
+ continue;
}
if (unlikely(!d_can_lookup(nd->path.dentry))) {
if (nd->flags & LOOKUP_RCU) {
@@ -2279,8 +2197,7 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
if (flags & LOOKUP_RCU)
rcu_read_lock();
- nd->last_type = LAST_ROOT; /* if there are only slashes... */
- nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
+ nd->flags = flags | LOOKUP_JUMPED;
nd->depth = 0;
nd->m_seq = __read_seqcount_begin(&mount_lock.seqcount);
@@ -2370,54 +2287,20 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
return s;
}
-static const char *trailing_symlink(struct nameidata *nd)
-{
- const char *s;
- int error = may_follow_link(nd);
- if (unlikely(error))
- return ERR_PTR(error);
- nd->flags |= LOOKUP_PARENT;
- nd->stack[0].name = NULL;
- s = get_link(nd);
- return s ? s : "";
-}
-
-static inline int lookup_last(struct nameidata *nd)
+static inline const char *lookup_last(struct nameidata *nd)
{
if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
- nd->flags &= ~LOOKUP_PARENT;
- return walk_component(nd, 0);
+ return walk_component(nd, WALK_TRAILING);
}
static int handle_lookup_down(struct nameidata *nd)
{
- struct path path = nd->path;
- struct inode *inode = nd->inode;
- unsigned seq = nd->seq;
- int err;
-
- if (nd->flags & LOOKUP_RCU) {
- /*
- * don't bother with unlazy_walk on failure - we are
- * at the very beginning of walk, so we lose nothing
- * if we simply redo everything in non-RCU mode
- */
- if (unlikely(!__follow_mount_rcu(nd, &path, &inode, &seq)))
- return -ECHILD;
- } else {
- dget(path.dentry);
- err = follow_managed(&path, nd);
- if (unlikely(err < 0))
- return err;
- inode = d_backing_inode(path.dentry);
- seq = 0;
- }
- path_to_nameidata(&path, nd);
- nd->inode = inode;
- nd->seq = seq;
- return 0;
+ if (!(nd->flags & LOOKUP_RCU))
+ dget(nd->path.dentry);
+ return PTR_ERR(step_into(nd, WALK_NOFOLLOW,
+ nd->path.dentry, nd->inode, nd->seq));
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
@@ -2432,16 +2315,19 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
s = ERR_PTR(err);
}
- while (!(err = link_path_walk(s, nd))
- && ((err = lookup_last(nd)) > 0)) {
- s = trailing_symlink(nd);
- }
+ while (!(err = link_path_walk(s, nd)) &&
+ (s = lookup_last(nd)) != NULL)
+ ;
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY)
if (!d_can_lookup(nd->path.dentry))
err = -ENOTDIR;
+ if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
+ err = handle_lookup_down(nd);
+ nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
+ }
if (!err) {
*path = nd->path;
nd->path.mnt = NULL;
@@ -2470,7 +2356,8 @@ int filename_lookup(int dfd, struct filename *name, unsigned flags,
retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
if (likely(!retval))
- audit_inode(name, path->dentry, 0);
+ audit_inode(name, path->dentry,
+ flags & LOOKUP_MOUNTPOINT ? AUDIT_INODE_NOEVAL : 0);
restore_nameidata();
putname(name);
return retval;
@@ -2718,24 +2605,23 @@ int path_pts(struct path *path)
/* Find something mounted on "pts" in the same directory as
* the input path.
*/
- struct dentry *child, *parent;
- struct qstr this;
- int ret;
-
- ret = path_parent_directory(path);
- if (ret)
- return ret;
+ struct dentry *parent = dget_parent(path->dentry);
+ struct dentry *child;
+ struct qstr this = QSTR_INIT("pts", 3);
- parent = path->dentry;
- this.name = "pts";
- this.len = 3;
+ if (unlikely(!path_connected(path->mnt, parent))) {
+ dput(parent);
+ return -ENOENT;
+ }
+ dput(path->dentry);
+ path->dentry = parent;
child = d_hash_and_lookup(parent, &this);
if (!child)
return -ENOENT;
path->dentry = child;
dput(parent);
- follow_mount(path);
+ follow_down(path);
return 0;
}
#endif
@@ -2748,88 +2634,6 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
}
EXPORT_SYMBOL(user_path_at_empty);
-/**
- * path_mountpoint - look up a path to be umounted
- * @nd: lookup context
- * @flags: lookup flags
- * @path: pointer to container for result
- *
- * Look up the given name, but don't attempt to revalidate the last component.
- * Returns 0 and "path" will be valid on success; Returns error otherwise.
- */
-static int
-path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
-{
- const char *s = path_init(nd, flags);
- int err;
-
- while (!(err = link_path_walk(s, nd)) &&
- (err = lookup_last(nd)) > 0) {
- s = trailing_symlink(nd);
- }
- if (!err && (nd->flags & LOOKUP_RCU))
- err = unlazy_walk(nd);
- if (!err)
- err = handle_lookup_down(nd);
- if (!err) {
- *path = nd->path;
- nd->path.mnt = NULL;
- nd->path.dentry = NULL;
- }
- terminate_walk(nd);
- return err;
-}
-
-static int
-filename_mountpoint(int dfd, struct filename *name, struct path *path,
- unsigned int flags)
-{
- struct nameidata nd;
- int error;
- if (IS_ERR(name))
- return PTR_ERR(name);
- set_nameidata(&nd, dfd, name);
- error = path_mountpoint(&nd, flags | LOOKUP_RCU, path);
- if (unlikely(error == -ECHILD))
- error = path_mountpoint(&nd, flags, path);
- if (unlikely(error == -ESTALE))
- error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path);
- if (likely(!error))
- audit_inode(name, path->dentry, AUDIT_INODE_NOEVAL);
- restore_nameidata();
- putname(name);
- return error;
-}
-
-/**
- * user_path_mountpoint_at - lookup a path from userland in order to umount it
- * @dfd: directory file descriptor
- * @name: pathname from userland
- * @flags: lookup flags
- * @path: pointer to container to hold result
- *
- * A umount is a special case for path walking. We're not actually interested
- * in the inode in this situation, and ESTALE errors can be a problem. We
- * simply want track down the dentry and vfsmount attached at the mountpoint
- * and avoid revalidating the last component.
- *
- * Returns 0 and populates "path" on success.
- */
-int
-user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
- struct path *path)
-{
- return filename_mountpoint(dfd, getname(name), path, flags);
-}
-
-int
-kern_path_mountpoint(int dfd, const char *name, struct path *path,
- unsigned int flags)
-{
- return filename_mountpoint(dfd, getname_kernel(name), path, flags);
-}
-EXPORT_SYMBOL(kern_path_mountpoint);
-
int __check_sticky(struct inode *dir, struct inode *inode)
{
kuid_t fsuid = current_fsuid();
@@ -3127,18 +2931,14 @@ static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t m
*
* Returns an error code otherwise.
*/
-static int atomic_open(struct nameidata *nd, struct dentry *dentry,
- struct path *path, struct file *file,
- const struct open_flags *op,
- int open_flag, umode_t mode)
+static struct dentry *atomic_open(struct nameidata *nd, struct dentry *dentry,
+ struct file *file,
+ int open_flag, umode_t mode)
{
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
struct inode *dir = nd->path.dentry->d_inode;
int error;
- if (!(~open_flag & (O_EXCL | O_CREAT))) /* both O_EXCL and O_CREAT */
- open_flag &= ~O_TRUNC;
-
if (nd->flags & LOOKUP_DIRECTORY)
open_flag |= O_DIRECTORY;
@@ -3149,19 +2949,10 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
d_lookup_done(dentry);
if (!error) {
if (file->f_mode & FMODE_OPENED) {
- /*
- * We didn't have the inode before the open, so check open
- * permission here.
- */
- int acc_mode = op->acc_mode;
- if (file->f_mode & FMODE_CREATED) {
- WARN_ON(!(open_flag & O_CREAT));
- fsnotify_create(dir, dentry);
- acc_mode = 0;
+ if (unlikely(dentry != file->f_path.dentry)) {
+ dput(dentry);
+ dentry = dget(file->f_path.dentry);
}
- error = may_open(&file->f_path, acc_mode, open_flag);
- if (WARN_ON(error > 0))
- error = -EINVAL;
} else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
} else {
@@ -3169,19 +2960,15 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
dput(dentry);
dentry = file->f_path.dentry;
}
- if (file->f_mode & FMODE_CREATED)
- fsnotify_create(dir, dentry);
- if (unlikely(d_is_negative(dentry))) {
+ if (unlikely(d_is_negative(dentry)))
error = -ENOENT;
- } else {
- path->dentry = dentry;
- path->mnt = nd->path.mnt;
- return 0;
- }
}
}
- dput(dentry);
- return error;
+ if (error) {
+ dput(dentry);
+ dentry = ERR_PTR(error);
+ }
+ return dentry;
}
/*
@@ -3199,10 +2986,9 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
*
* An error code is returned on failure.
*/
-static int lookup_open(struct nameidata *nd, struct path *path,
- struct file *file,
- const struct open_flags *op,
- bool got_write)
+static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
+ const struct open_flags *op,
+ bool got_write)
{
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
@@ -3213,7 +2999,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
if (unlikely(IS_DEADDIR(dir_inode)))
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
file->f_mode &= ~FMODE_CREATED;
dentry = d_lookup(dir, &nd->last);
@@ -3221,7 +3007,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
if (!dentry) {
dentry = d_alloc_parallel(dir, &nd->last, &wq);
if (IS_ERR(dentry))
- return PTR_ERR(dentry);
+ return dentry;
}
if (d_in_lookup(dentry))
break;
@@ -3237,7 +3023,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
}
if (dentry->d_inode) {
/* Cached positive dentry: will open in f_op->open */
- goto out_no_open;
+ return dentry;
}
/*
@@ -3249,41 +3035,27 @@ static int lookup_open(struct nameidata *nd, struct path *path,
* Another problem is returing the "right" error value (e.g. for an
* O_EXCL open we want to return EEXIST not EROFS).
*/
+ if (unlikely(!got_write))
+ open_flag &= ~O_TRUNC;
if (open_flag & O_CREAT) {
+ if (open_flag & O_EXCL)
+ open_flag &= ~O_TRUNC;
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
- if (unlikely(!got_write)) {
- create_error = -EROFS;
- open_flag &= ~O_CREAT;
- if (open_flag & (O_EXCL | O_TRUNC))
- goto no_open;
- /* No side effects, safe to clear O_CREAT */
- } else {
+ if (likely(got_write))
create_error = may_o_create(&nd->path, dentry, mode);
- if (create_error) {
- open_flag &= ~O_CREAT;
- if (open_flag & O_EXCL)
- goto no_open;
- }
- }
- } else if ((open_flag & (O_TRUNC|O_WRONLY|O_RDWR)) &&
- unlikely(!got_write)) {
- /*
- * No O_CREATE -> atomicity not a requirement -> fall
- * back to lookup + open
- */
- goto no_open;
+ else
+ create_error = -EROFS;
}
-
+ if (create_error)
+ open_flag &= ~O_CREAT;
if (dir_inode->i_op->atomic_open) {
- error = atomic_open(nd, dentry, path, file, op, open_flag,
- mode);
- if (unlikely(error == -ENOENT) && create_error)
- error = create_error;
- return error;
+ dentry = atomic_open(nd, dentry, file, open_flag, mode);
+ if (unlikely(create_error) && dentry == ERR_PTR(-ENOENT))
+ dentry = ERR_PTR(create_error);
+ return dentry;
}
-no_open:
if (d_in_lookup(dentry)) {
struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry,
nd->flags);
@@ -3310,78 +3082,60 @@ no_open:
open_flag & O_EXCL);
if (error)
goto out_dput;
- fsnotify_create(dir_inode, dentry);
}
if (unlikely(create_error) && !dentry->d_inode) {
error = create_error;
goto out_dput;
}
-out_no_open:
- path->dentry = dentry;
- path->mnt = nd->path.mnt;
- return 0;
+ return dentry;
out_dput:
dput(dentry);
- return error;
+ return ERR_PTR(error);
}
-/*
- * Handle the last step of open()
- */
-static int do_last(struct nameidata *nd,
+static const char *open_last_lookups(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
- kuid_t dir_uid = nd->inode->i_uid;
- umode_t dir_mode = nd->inode->i_mode;
int open_flag = op->open_flag;
- bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
- int acc_mode = op->acc_mode;
unsigned seq;
struct inode *inode;
- struct path path;
+ struct dentry *dentry;
+ const char *res;
int error;
- nd->flags &= ~LOOKUP_PARENT;
nd->flags |= op->intent;
if (nd->last_type != LAST_NORM) {
- error = handle_dots(nd, nd->last_type);
- if (unlikely(error))
- return error;
- goto finish_open;
+ if (nd->depth)
+ put_link(nd);
+ return handle_dots(nd, nd->last_type);
}
if (!(open_flag & O_CREAT)) {
if (nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
/* we _can_ be in RCU mode here */
- error = lookup_fast(nd, &path, &inode, &seq);
- if (likely(error > 0))
+ dentry = lookup_fast(nd, &inode, &seq);
+ if (IS_ERR(dentry))
+ return ERR_CAST(dentry);
+ if (likely(dentry))
goto finish_lookup;
- if (error < 0)
- return error;
-
- BUG_ON(nd->inode != dir->d_inode);
BUG_ON(nd->flags & LOOKUP_RCU);
} else {
/* create side of things */
- /*
- * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
- * has been cleared when we got to the last component we are
- * about to look up
- */
- error = complete_walk(nd);
- if (error)
- return error;
-
+ if (nd->flags & LOOKUP_RCU) {
+ error = unlazy_walk(nd);
+ if (unlikely(error))
+ return ERR_PTR(error);
+ }
audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
/* trailing slashes? */
if (unlikely(nd->last.name[nd->last.len]))
- return -EISDIR;
+ return ERR_PTR(-EISDIR);
}
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
@@ -3398,108 +3152,90 @@ static int do_last(struct nameidata *nd,
inode_lock(dir->d_inode);
else
inode_lock_shared(dir->d_inode);
- error = lookup_open(nd, &path, file, op, got_write);
+ dentry = lookup_open(nd, file, op, got_write);
+ if (!IS_ERR(dentry) && (file->f_mode & FMODE_CREATED))
+ fsnotify_create(dir->d_inode, dentry);
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
inode_unlock_shared(dir->d_inode);
- if (error)
- goto out;
-
- if (file->f_mode & FMODE_OPENED) {
- if ((file->f_mode & FMODE_CREATED) ||
- !S_ISREG(file_inode(file)->i_mode))
- will_truncate = false;
-
- audit_inode(nd->name, file->f_path.dentry, 0);
- goto opened;
- }
+ if (got_write)
+ mnt_drop_write(nd->path.mnt);
- if (file->f_mode & FMODE_CREATED) {
- /* Don't check for write permission, don't truncate */
- open_flag &= ~O_TRUNC;
- will_truncate = false;
- acc_mode = 0;
- path_to_nameidata(&path, nd);
- goto finish_open_created;
- }
+ if (IS_ERR(dentry))
+ return ERR_CAST(dentry);
- /*
- * If atomic_open() acquired write access it is dropped now due to
- * possible mount and symlink following (this might be optimized away if
- * necessary...)
- */
- if (got_write) {
- mnt_drop_write(nd->path.mnt);
- got_write = false;
+ if (file->f_mode & (FMODE_OPENED | FMODE_CREATED)) {
+ dput(nd->path.dentry);
+ nd->path.dentry = dentry;
+ return NULL;
}
- error = follow_managed(&path, nd);
- if (unlikely(error < 0))
- return error;
+finish_lookup:
+ if (nd->depth)
+ put_link(nd);
+ res = step_into(nd, WALK_TRAILING, dentry, inode, seq);
+ if (unlikely(res))
+ nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
+ return res;
+}
- /*
- * create/update audit record if it already exists.
- */
- audit_inode(nd->name, path.dentry, 0);
+/*
+ * Handle the last step of open()
+ */
+static int do_open(struct nameidata *nd,
+ struct file *file, const struct open_flags *op)
+{
+ int open_flag = op->open_flag;
+ bool do_truncate;
+ int acc_mode;
+ int error;
- if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) {
- path_to_nameidata(&path, nd);
- return -EEXIST;
+ if (!(file->f_mode & (FMODE_OPENED | FMODE_CREATED))) {
+ error = complete_walk(nd);
+ if (error)
+ return error;
}
-
- seq = 0; /* out of RCU mode, so the value doesn't matter */
- inode = d_backing_inode(path.dentry);
-finish_lookup:
- error = step_into(nd, &path, 0, inode, seq);
- if (unlikely(error))
- return error;
-finish_open:
- /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
- error = complete_walk(nd);
- if (error)
- return error;
- audit_inode(nd->name, nd->path.dentry, 0);
+ if (!(file->f_mode & FMODE_CREATED))
+ audit_inode(nd->name, nd->path.dentry, 0);
if (open_flag & O_CREAT) {
- error = -EISDIR;
+ if ((open_flag & O_EXCL) && !(file->f_mode & FMODE_CREATED))
+ return -EEXIST;
if (d_is_dir(nd->path.dentry))
- goto out;
- error = may_create_in_sticky(dir_mode, dir_uid,
+ return -EISDIR;
+ error = may_create_in_sticky(nd->dir_mode, nd->dir_uid,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
- goto out;
+ return error;
}
- error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
- goto out;
- if (!d_is_reg(nd->path.dentry))
- will_truncate = false;
+ return -ENOTDIR;
- if (will_truncate) {
+ do_truncate = false;
+ acc_mode = op->acc_mode;
+ if (file->f_mode & FMODE_CREATED) {
+ /* Don't check for write permission, don't truncate */
+ open_flag &= ~O_TRUNC;
+ acc_mode = 0;
+ } else if (d_is_reg(nd->path.dentry) && open_flag & O_TRUNC) {
error = mnt_want_write(nd->path.mnt);
if (error)
- goto out;
- got_write = true;
+ return error;
+ do_truncate = true;
}
-finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
- if (error)
- goto out;
- BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
- error = vfs_open(&nd->path, file);
- if (error)
- goto out;
-opened:
- error = ima_file_check(file, op->acc_mode);
- if (!error && will_truncate)
+ if (!error && !(file->f_mode & FMODE_OPENED))
+ error = vfs_open(&nd->path, file);
+ if (!error)
+ error = ima_file_check(file, op->acc_mode);
+ if (!error && do_truncate)
error = handle_truncate(file);
-out:
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
}
- if (got_write)
+ if (do_truncate)
mnt_drop_write(nd->path.mnt);
return error;
}
@@ -3604,10 +3340,10 @@ static struct file *path_openat(struct nameidata *nd,
} else {
const char *s = path_init(nd, flags);
while (!(error = link_path_walk(s, nd)) &&
- (error = do_last(nd, file, op)) > 0) {
- nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
- s = trailing_symlink(nd);
- }
+ (s = open_last_lookups(nd, file, op)) != NULL)
+ ;
+ if (!error)
+ error = do_open(nd, file, op);
terminate_walk(nd);
}
if (likely(!error)) {
diff --git a/fs/namespace.c b/fs/namespace.c
index 85b5f7bea82e..a28e4db075ed 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1669,7 +1669,7 @@ int ksys_umount(char __user *name, int flags)
struct path path;
struct mount *mnt;
int retval;
- int lookup_flags = 0;
+ int lookup_flags = LOOKUP_MOUNTPOINT;
if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
return -EINVAL;
@@ -1680,7 +1680,7 @@ int ksys_umount(char __user *name, int flags)
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
- retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
+ retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
if (retval)
goto out;
mnt = real_mount(path.mnt);
@@ -2697,45 +2697,32 @@ static int do_move_mount_old(struct path *path, const char *old_name)
/*
* add a mount into a namespace's mount tree
*/
-static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
+static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
+ struct path *path, int mnt_flags)
{
- struct mountpoint *mp;
- struct mount *parent;
- int err;
+ struct mount *parent = real_mount(path->mnt);
mnt_flags &= ~MNT_INTERNAL_FLAGS;
- mp = lock_mount(path);
- if (IS_ERR(mp))
- return PTR_ERR(mp);
-
- parent = real_mount(path->mnt);
- err = -EINVAL;
if (unlikely(!check_mnt(parent))) {
/* that's acceptable only for automounts done in private ns */
if (!(mnt_flags & MNT_SHRINKABLE))
- goto unlock;
+ return -EINVAL;
/* ... and for those we'd better have mountpoint still alive */
if (!parent->mnt_ns)
- goto unlock;
+ return -EINVAL;
}
/* Refuse the same filesystem on the same mount point */
- err = -EBUSY;
if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
path->mnt->mnt_root == path->dentry)
- goto unlock;
+ return -EBUSY;
- err = -EINVAL;
if (d_is_symlink(newmnt->mnt.mnt_root))
- goto unlock;
+ return -EINVAL;
newmnt->mnt.mnt_flags = mnt_flags;
- err = graft_tree(newmnt, parent, mp);
-
-unlock:
- unlock_mount(mp);
- return err;
+ return graft_tree(newmnt, parent, mp);
}
static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
@@ -2748,6 +2735,7 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
unsigned int mnt_flags)
{
struct vfsmount *mnt;
+ struct mountpoint *mp;
struct super_block *sb = fc->root->d_sb;
int error;
@@ -2768,7 +2756,13 @@ static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
mnt_warn_timestamp_expiry(mountpoint, mnt);
- error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags);
+ mp = lock_mount(mountpoint);
+ if (IS_ERR(mp)) {
+ mntput(mnt);
+ return PTR_ERR(mp);
+ }
+ error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
+ unlock_mount(mp);
if (error < 0)
mntput(mnt);
return error;
@@ -2829,23 +2823,63 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
int finish_automount(struct vfsmount *m, struct path *path)
{
- struct mount *mnt = real_mount(m);
+ struct dentry *dentry = path->dentry;
+ struct mountpoint *mp;
+ struct mount *mnt;
int err;
+
+ if (!m)
+ return 0;
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+
+ mnt = real_mount(m);
/* The new mount record should have at least 2 refs to prevent it being
* expired before we get a chance to add it
*/
BUG_ON(mnt_get_count(mnt) < 2);
if (m->mnt_sb == path->mnt->mnt_sb &&
- m->mnt_root == path->dentry) {
+ m->mnt_root == dentry) {
err = -ELOOP;
- goto fail;
+ goto discard;
}
- err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
- if (!err)
- return 0;
-fail:
+ /*
+ * we don't want to use lock_mount() - in this case finding something
+ * that overmounts our mountpoint to be means "quitely drop what we've
+ * got", not "try to mount it on top".
+ */
+ inode_lock(dentry->d_inode);
+ namespace_lock();
+ if (unlikely(cant_mount(dentry))) {
+ err = -ENOENT;
+ goto discard_locked;
+ }
+ rcu_read_lock();
+ if (unlikely(__lookup_mnt(path->mnt, dentry))) {
+ rcu_read_unlock();
+ err = 0;
+ goto discard_locked;
+ }
+ rcu_read_unlock();
+ mp = get_mountpoint(dentry);
+ if (IS_ERR(mp)) {
+ err = PTR_ERR(mp);
+ goto discard_locked;
+ }
+
+ err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
+ unlock_mount(mp);
+ if (unlikely(err))
+ goto discard;
+ mntput(m);
+ return 0;
+
+discard_locked:
+ namespace_unlock();
+ inode_unlock(dentry->d_inode);
+discard:
/* remove m from any expiration list it may be on */
if (!list_empty(&mnt->mnt_expire)) {
namespace_lock();
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 690221747b47..d1a0e2c8b1b4 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -476,7 +476,7 @@ static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
err = ext_tree_remove(bl, true, 0, LLONG_MAX);
WARN_ON(err);
- kfree(bl);
+ kfree_rcu(bl, bl_layout.plh_rcu);
}
static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 549350259840..6a2033131c06 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -127,7 +127,9 @@ extern __be32 nfs4_callback_sequence(void *argp, void *resp,
#define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX 9
#define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12
#define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15
-#define RCA4_TYPE_MASK_ALL 0xf31f
+#define PNFS_FF_RCA4_TYPE_MASK_READ 16
+#define PNFS_FF_RCA4_TYPE_MASK_RW 17
+#define RCA4_TYPE_MASK_ALL 0x3f31f
struct cb_recallanyargs {
uint32_t craa_objs_to_keep;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index cd4c6bc81cae..e61dbc9b86ae 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -121,31 +121,31 @@ out:
*/
static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
const nfs4_stateid *stateid)
+ __must_hold(RCU)
{
struct nfs_server *server;
struct inode *inode;
struct pnfs_layout_hdr *lo;
+ rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- list_for_each_entry(lo, &server->layouts, plh_layouts) {
+ list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
+ if (!pnfs_layout_is_valid(lo))
+ continue;
if (stateid != NULL &&
!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
continue;
+ if (!nfs_sb_active(server->super))
+ continue;
inode = igrab(lo->plh_inode);
- if (!inode)
- return ERR_PTR(-EAGAIN);
- if (!nfs_sb_active(inode->i_sb)) {
- rcu_read_unlock();
- spin_unlock(&clp->cl_lock);
- iput(inode);
- spin_lock(&clp->cl_lock);
- rcu_read_lock();
- return ERR_PTR(-EAGAIN);
- }
- return inode;
+ rcu_read_unlock();
+ if (inode)
+ return inode;
+ nfs_sb_deactive(server->super);
+ return ERR_PTR(-EAGAIN);
}
}
-
+ rcu_read_unlock();
return ERR_PTR(-ENOENT);
}
@@ -163,28 +163,25 @@ static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
struct inode *inode;
struct pnfs_layout_hdr *lo;
+ rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- list_for_each_entry(lo, &server->layouts, plh_layouts) {
+ list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
nfsi = NFS_I(lo->plh_inode);
if (nfs_compare_fh(fh, &nfsi->fh))
continue;
if (nfsi->layout != lo)
continue;
+ if (!nfs_sb_active(server->super))
+ continue;
inode = igrab(lo->plh_inode);
- if (!inode)
- return ERR_PTR(-EAGAIN);
- if (!nfs_sb_active(inode->i_sb)) {
- rcu_read_unlock();
- spin_unlock(&clp->cl_lock);
- iput(inode);
- spin_lock(&clp->cl_lock);
- rcu_read_lock();
- return ERR_PTR(-EAGAIN);
- }
- return inode;
+ rcu_read_unlock();
+ if (inode)
+ return inode;
+ nfs_sb_deactive(server->super);
+ return ERR_PTR(-EAGAIN);
}
}
-
+ rcu_read_unlock();
return ERR_PTR(-ENOENT);
}
@@ -194,14 +191,9 @@ static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
{
struct inode *inode;
- spin_lock(&clp->cl_lock);
- rcu_read_lock();
inode = nfs_layout_find_inode_by_stateid(clp, stateid);
if (inode == ERR_PTR(-ENOENT))
inode = nfs_layout_find_inode_by_fh(clp, fh);
- rcu_read_unlock();
- spin_unlock(&clp->cl_lock);
-
return inode;
}
@@ -280,7 +272,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
goto unlock;
}
- pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
+ pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true);
switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
&args->cbl_range,
be32_to_cpu(args->cbl_stateid.seqid))) {
@@ -605,6 +597,7 @@ __be32 nfs4_callback_recallany(void *argp, void *resp,
struct cb_recallanyargs *args = argp;
__be32 status;
fmode_t flags = 0;
+ bool schedule_manager = false;
status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
if (!cps->clp) /* set in cb_sequence */
@@ -627,6 +620,18 @@ __be32 nfs4_callback_recallany(void *argp, void *resp,
if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
pnfs_recall_all_layouts(cps->clp);
+
+ if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
+ set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
+ schedule_manager = true;
+ }
+ if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
+ set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state);
+ schedule_manager = true;
+ }
+ if (schedule_manager)
+ nfs4_schedule_state_manager(cps->clp);
+
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1865322de142..816e1427f17e 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -378,6 +378,18 @@ nfs_inode_detach_delegation(struct inode *inode)
}
static void
+nfs_update_delegation_cred(struct nfs_delegation *delegation,
+ const struct cred *cred)
+{
+ const struct cred *old;
+
+ if (cred_fscmp(delegation->cred, cred) != 0) {
+ old = xchg(&delegation->cred, get_cred(cred));
+ put_cred(old);
+ }
+}
+
+static void
nfs_update_inplace_delegation(struct nfs_delegation *delegation,
const struct nfs_delegation *update)
{
@@ -385,8 +397,14 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
delegation->stateid.seqid = update->stateid.seqid;
smp_wmb();
delegation->type = update->type;
- if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+ delegation->pagemod_limit = update->pagemod_limit;
+ if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+ delegation->change_attr = update->change_attr;
+ nfs_update_delegation_cred(delegation, update->cred);
+ /* smp_mb__before_atomic() is implicit due to xchg() */
+ clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
atomic_long_inc(&nfs_active_delegations);
+ }
}
}
@@ -545,21 +563,11 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
return ret;
}
-/**
- * nfs_client_return_marked_delegations - return previously marked delegations
- * @clp: nfs_client to process
- *
- * Note that this function is designed to be called by the state
- * manager thread. For this reason, it cannot flush the dirty data,
- * since that could deadlock in case of a state recovery error.
- *
- * Returns zero on success, or a negative errno value.
- */
-int nfs_client_return_marked_delegations(struct nfs_client *clp)
+static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ void __always_unused *data)
{
struct nfs_delegation *delegation;
struct nfs_delegation *prev;
- struct nfs_server *server;
struct inode *inode;
struct inode *place_holder = NULL;
struct nfs_delegation *place_holder_deleg = NULL;
@@ -569,78 +577,79 @@ restart:
/*
* To avoid quadratic looping we hold a reference
* to an inode place_holder. Each time we restart, we
- * list nfs_servers from the server of that inode, and
- * delegation in the server from the delegations of that
- * inode.
+ * list delegation in the server from the delegations
+ * of that inode.
* prev is an RCU-protected pointer to a delegation which
* wasn't marked for return and might be a good choice for
* the next place_holder.
*/
- rcu_read_lock();
prev = NULL;
+ delegation = NULL;
+ rcu_read_lock();
if (place_holder)
- server = NFS_SERVER(place_holder);
- else
- server = list_entry_rcu(clp->cl_superblocks.next,
- struct nfs_server, client_link);
- list_for_each_entry_from_rcu(server, &clp->cl_superblocks, client_link) {
- delegation = NULL;
- if (place_holder && server == NFS_SERVER(place_holder))
- delegation = rcu_dereference(NFS_I(place_holder)->delegation);
- if (!delegation || delegation != place_holder_deleg)
- delegation = list_entry_rcu(server->delegations.next,
- struct nfs_delegation, super_list);
- list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) {
- struct inode *to_put = NULL;
-
- if (!nfs_delegation_need_return(delegation)) {
+ delegation = rcu_dereference(NFS_I(place_holder)->delegation);
+ if (!delegation || delegation != place_holder_deleg)
+ delegation = list_entry_rcu(server->delegations.next,
+ struct nfs_delegation, super_list);
+ list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) {
+ struct inode *to_put = NULL;
+
+ if (test_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags))
+ continue;
+ if (!nfs_delegation_need_return(delegation)) {
+ if (nfs4_is_valid_delegation(delegation, 0))
prev = delegation;
- continue;
- }
- if (!nfs_sb_active(server->super))
- break; /* continue in outer loop */
-
- if (prev) {
- struct inode *tmp;
-
- tmp = nfs_delegation_grab_inode(prev);
- if (tmp) {
- to_put = place_holder;
- place_holder = tmp;
- place_holder_deleg = prev;
- }
- }
+ continue;
+ }
- inode = nfs_delegation_grab_inode(delegation);
- if (inode == NULL) {
- rcu_read_unlock();
- if (to_put)
- iput(to_put);
- nfs_sb_deactive(server->super);
- goto restart;
+ if (prev) {
+ struct inode *tmp = nfs_delegation_grab_inode(prev);
+ if (tmp) {
+ to_put = place_holder;
+ place_holder = tmp;
+ place_holder_deleg = prev;
}
- delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ }
+
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL) {
rcu_read_unlock();
+ iput(to_put);
+ goto restart;
+ }
+ delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ rcu_read_unlock();
- if (to_put)
- iput(to_put);
+ iput(to_put);
- err = nfs_end_delegation_return(inode, delegation, 0);
- iput(inode);
- nfs_sb_deactive(server->super);
- cond_resched();
- if (!err)
- goto restart;
- set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
- if (place_holder)
- iput(place_holder);
- return err;
- }
+ err = nfs_end_delegation_return(inode, delegation, 0);
+ iput(inode);
+ cond_resched();
+ if (!err)
+ goto restart;
+ set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ goto out;
}
rcu_read_unlock();
- if (place_holder)
- iput(place_holder);
- return 0;
+out:
+ iput(place_holder);
+ return err;
+}
+
+/**
+ * nfs_client_return_marked_delegations - return previously marked delegations
+ * @clp: nfs_client to process
+ *
+ * Note that this function is designed to be called by the state
+ * manager thread. For this reason, it cannot flush the dirty data,
+ * since that could deadlock in case of a state recovery error.
+ *
+ * Returns zero on success, or a negative errno value.
+ */
+int nfs_client_return_marked_delegations(struct nfs_client *clp)
+{
+ return nfs_client_for_each_server(clp,
+ nfs_server_return_marked_delegations, NULL);
}
/**
@@ -1083,53 +1092,51 @@ void nfs_delegation_mark_reclaim(struct nfs_client *clp)
rcu_read_unlock();
}
-/**
- * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
- * @clp: nfs_client to process
- *
- */
-void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
+static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
+ void __always_unused *data)
{
struct nfs_delegation *delegation;
- struct nfs_server *server;
struct inode *inode;
-
restart:
rcu_read_lock();
- list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- list_for_each_entry_rcu(delegation, &server->delegations,
- super_list) {
- if (test_bit(NFS_DELEGATION_INODE_FREEING,
- &delegation->flags) ||
- test_bit(NFS_DELEGATION_RETURNING,
- &delegation->flags) ||
- test_bit(NFS_DELEGATION_NEED_RECLAIM,
- &delegation->flags) == 0)
- continue;
- if (!nfs_sb_active(server->super))
- break; /* continue in outer loop */
- inode = nfs_delegation_grab_inode(delegation);
- if (inode == NULL) {
- rcu_read_unlock();
- nfs_sb_deactive(server->super);
- goto restart;
- }
- delegation = nfs_start_delegation_return_locked(NFS_I(inode));
- rcu_read_unlock();
- if (delegation != NULL) {
- if (nfs_detach_delegation(NFS_I(inode), delegation,
- server) != NULL)
- nfs_free_delegation(delegation);
- /* Match nfs_start_delegation_return_locked */
- nfs_put_delegation(delegation);
- }
- iput(inode);
- nfs_sb_deactive(server->super);
- cond_resched();
- goto restart;
+restart_locked:
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURNING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_NEED_RECLAIM,
+ &delegation->flags) == 0)
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+ goto restart_locked;
+ delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ rcu_read_unlock();
+ if (delegation != NULL) {
+ if (nfs_detach_delegation(NFS_I(inode), delegation,
+ server) != NULL)
+ nfs_free_delegation(delegation);
+ /* Match nfs_start_delegation_return_locked */
+ nfs_put_delegation(delegation);
}
+ iput(inode);
+ cond_resched();
+ goto restart;
}
rcu_read_unlock();
+ return 0;
+}
+
+/**
+ * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
+ * @clp: nfs_client to process
+ *
+ */
+void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
+{
+ nfs_client_for_each_server(clp, nfs_server_reap_unclaimed_delegations,
+ NULL);
}
static inline bool nfs4_server_rebooted(const struct nfs_client *clp)
@@ -1215,62 +1222,61 @@ nfs_delegation_test_free_expired(struct inode *inode,
nfs_remove_bad_delegation(inode, stateid);
}
-/**
- * nfs_reap_expired_delegations - reap expired delegations
- * @clp: nfs_client to process
- *
- * Iterates through all the delegations associated with this server and
- * checks if they have may have been revoked. This function is usually
- * expected to be called in cases where the server may have lost its
- * lease.
- */
-void nfs_reap_expired_delegations(struct nfs_client *clp)
+static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ void __always_unused *data)
{
struct nfs_delegation *delegation;
- struct nfs_server *server;
struct inode *inode;
const struct cred *cred;
nfs4_stateid stateid;
-
restart:
rcu_read_lock();
- list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- list_for_each_entry_rcu(delegation, &server->delegations,
- super_list) {
- if (test_bit(NFS_DELEGATION_INODE_FREEING,
- &delegation->flags) ||
- test_bit(NFS_DELEGATION_RETURNING,
- &delegation->flags) ||
- test_bit(NFS_DELEGATION_TEST_EXPIRED,
- &delegation->flags) == 0)
- continue;
- if (!nfs_sb_active(server->super))
- break; /* continue in outer loop */
- inode = nfs_delegation_grab_inode(delegation);
- if (inode == NULL) {
- rcu_read_unlock();
- nfs_sb_deactive(server->super);
- goto restart;
- }
- cred = get_cred_rcu(delegation->cred);
- nfs4_stateid_copy(&stateid, &delegation->stateid);
- clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
- rcu_read_unlock();
- nfs_delegation_test_free_expired(inode, &stateid, cred);
- put_cred(cred);
- if (nfs4_server_rebooted(clp)) {
- nfs_inode_mark_test_expired_delegation(server,inode);
- iput(inode);
- nfs_sb_deactive(server->super);
- return;
- }
+restart_locked:
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURNING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_TEST_EXPIRED,
+ &delegation->flags) == 0)
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+ goto restart_locked;
+ spin_lock(&delegation->lock);
+ cred = get_cred_rcu(delegation->cred);
+ nfs4_stateid_copy(&stateid, &delegation->stateid);
+ spin_unlock(&delegation->lock);
+ clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+ rcu_read_unlock();
+ nfs_delegation_test_free_expired(inode, &stateid, cred);
+ put_cred(cred);
+ if (!nfs4_server_rebooted(server->nfs_client)) {
iput(inode);
- nfs_sb_deactive(server->super);
cond_resched();
goto restart;
}
+ nfs_inode_mark_test_expired_delegation(server,inode);
+ iput(inode);
+ return -EAGAIN;
}
rcu_read_unlock();
+ return 0;
+}
+
+/**
+ * nfs_reap_expired_delegations - reap expired delegations
+ * @clp: nfs_client to process
+ *
+ * Iterates through all the delegations associated with this server and
+ * checks if they have may have been revoked. This function is usually
+ * expected to be called in cases where the server may have lost its
+ * lease.
+ */
+void nfs_reap_expired_delegations(struct nfs_client *clp)
+{
+ nfs_client_for_each_server(clp, nfs_server_reap_expired_delegations,
+ NULL);
}
void nfs_inode_find_delegation_state_and_recover(struct inode *inode,
@@ -1359,11 +1365,14 @@ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
- bool ret;
+ bool ret = false;
flags &= FMODE_READ|FMODE_WRITE;
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
+ if (!delegation)
+ goto out;
+ spin_lock(&delegation->lock);
ret = nfs4_is_valid_delegation(delegation, flags);
if (ret) {
nfs4_stateid_copy(dst, &delegation->stateid);
@@ -1371,6 +1380,8 @@ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
if (cred)
*cred = get_cred(delegation->cred);
}
+ spin_unlock(&delegation->lock);
+out:
rcu_read_unlock();
return ret;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d4b839b6cf89..5a331da5f55a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -141,10 +141,9 @@ struct nfs_cache_array {
int size;
int eof_index;
u64 last_cookie;
- struct nfs_cache_array_entry array[0];
+ struct nfs_cache_array_entry array[];
};
-typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool);
typedef struct {
struct file *file;
struct page *page;
@@ -153,7 +152,7 @@ typedef struct {
u64 *dir_cookie;
u64 last_cookie;
loff_t current_index;
- decode_dirent_t decode;
+ loff_t prev_index;
unsigned long dir_verifier;
unsigned long timestamp;
@@ -240,6 +239,25 @@ out:
return ret;
}
+static inline
+int is_32bit_api(void)
+{
+#ifdef CONFIG_COMPAT
+ return in_compat_syscall();
+#else
+ return (BITS_PER_LONG == 32);
+#endif
+}
+
+static
+bool nfs_readdir_use_cookie(const struct file *filp)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return false;
+ return true;
+}
+
static
int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
{
@@ -289,7 +307,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
!nfs_readdir_inode_mapping_valid(nfsi)) {
ctx->duped = 0;
ctx->attr_gencount = nfsi->attr_gencount;
- } else if (new_pos < desc->ctx->pos) {
+ } else if (new_pos < desc->prev_index) {
if (ctx->duped > 0
&& ctx->dup_cookie == *desc->dir_cookie) {
if (printk_ratelimit()) {
@@ -305,7 +323,11 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
ctx->dup_cookie = *desc->dir_cookie;
ctx->duped = -1;
}
- desc->ctx->pos = new_pos;
+ if (nfs_readdir_use_cookie(desc->file))
+ desc->ctx->pos = *desc->dir_cookie;
+ else
+ desc->ctx->pos = new_pos;
+ desc->prev_index = new_pos;
desc->cache_entry_index = i;
return 0;
}
@@ -376,9 +398,10 @@ error:
static int xdr_decode(nfs_readdir_descriptor_t *desc,
struct nfs_entry *entry, struct xdr_stream *xdr)
{
+ struct inode *inode = file_inode(desc->file);
int error;
- error = desc->decode(xdr, entry, desc->plus);
+ error = NFS_PROTO(inode)->decode_dirent(xdr, entry, desc->plus);
if (error)
return error;
entry->fattr->time_start = desc->timestamp;
@@ -756,6 +779,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
if (desc->page_index == 0) {
desc->current_index = 0;
+ desc->prev_index = 0;
desc->last_cookie = 0;
}
do {
@@ -786,11 +810,14 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
desc->eof = true;
break;
}
- desc->ctx->pos++;
if (i < (array->size-1))
*desc->dir_cookie = array->array[i+1].cookie;
else
*desc->dir_cookie = array->last_cookie;
+ if (nfs_readdir_use_cookie(file))
+ desc->ctx->pos = *desc->dir_cookie;
+ else
+ desc->ctx->pos++;
if (ctx->duped != 0)
ctx->duped = 1;
}
@@ -860,9 +887,14 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
- nfs_readdir_descriptor_t my_desc,
- *desc = &my_desc;
struct nfs_open_dir_context *dir_ctx = file->private_data;
+ nfs_readdir_descriptor_t my_desc = {
+ .file = file,
+ .ctx = ctx,
+ .dir_cookie = &dir_ctx->dir_cookie,
+ .plus = nfs_use_readdirplus(inode, ctx),
+ },
+ *desc = &my_desc;
int res = 0;
dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
@@ -875,14 +907,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
* to either find the entry with the appropriate number or
* revalidate the cookie.
*/
- memset(desc, 0, sizeof(*desc));
-
- desc->file = file;
- desc->ctx = ctx;
- desc->dir_cookie = &dir_ctx->dir_cookie;
- desc->decode = NFS_PROTO(inode)->decode_dirent;
- desc->plus = nfs_use_readdirplus(inode, ctx);
-
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
res = nfs_revalidate_mapping(inode, file->f_mapping);
if (res < 0)
@@ -954,7 +978,10 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
}
if (offset != filp->f_pos) {
filp->f_pos = offset;
- dir_ctx->dir_cookie = 0;
+ if (nfs_readdir_use_cookie(filp))
+ dir_ctx->dir_cookie = offset;
+ else
+ dir_ctx->dir_cookie = 0;
dir_ctx->duped = 0;
}
inode_unlock(inode);
@@ -2282,7 +2309,7 @@ static DEFINE_SPINLOCK(nfs_access_lru_lock);
static LIST_HEAD(nfs_access_lru_list);
static atomic_long_t nfs_access_nr_entries;
-static unsigned long nfs_access_max_cachesize = ULONG_MAX;
+static unsigned long nfs_access_max_cachesize = 4*1024*1024;
module_param(nfs_access_max_cachesize, ulong, 0644);
MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length");
@@ -2642,9 +2669,10 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
status = NFS_PROTO(inode)->access(inode, &cache);
if (status != 0) {
if (status == -ESTALE) {
- nfs_zap_caches(inode);
if (!S_ISDIR(inode->i_mode))
- set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+ nfs_set_inode_stale(inode);
+ else
+ nfs_zap_caches(inode);
}
goto out;
}
@@ -2732,14 +2760,7 @@ force_lookup:
if (!NFS_PROTO(inode)->access)
goto out_notsup;
- /* Always try fast lookups first */
- rcu_read_lock();
- res = nfs_do_access(inode, cred, mask|MAY_NOT_BLOCK);
- rcu_read_unlock();
- if (res == -ECHILD && !(mask & MAY_NOT_BLOCK)) {
- /* Fast lookup failed, try the slow way */
- res = nfs_do_access(inode, cred, mask);
- }
+ res = nfs_do_access(inode, cred, mask);
out:
if (!res && (mask & MAY_EXEC))
res = nfs_execute_ok(inode, mask);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index b768a0b42e82..a57e7c72c7f4 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -94,7 +94,7 @@ struct nfs_direct_req {
#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
/* for read */
#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
- struct nfs_writeverf verf; /* unstable write verifier */
+#define NFS_ODIRECT_DONE INT_MAX /* write verification failed */
};
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
@@ -151,106 +151,6 @@ nfs_direct_count_bytes(struct nfs_direct_req *dreq,
dreq->count = dreq_len;
}
-/*
- * nfs_direct_select_verf - select the right verifier
- * @dreq - direct request possibly spanning multiple servers
- * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
- * @commit_idx - commit bucket index for the DS
- *
- * returns the correct verifier to use given the role of the server
- */
-static struct nfs_writeverf *
-nfs_direct_select_verf(struct nfs_direct_req *dreq,
- struct nfs_client *ds_clp,
- int commit_idx)
-{
- struct nfs_writeverf *verfp = &dreq->verf;
-
-#ifdef CONFIG_NFS_V4_1
- /*
- * pNFS is in use, use the DS verf except commit_through_mds is set
- * for layout segment where nbuckets is zero.
- */
- if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
- if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
- verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
- else
- WARN_ON_ONCE(1);
- }
-#endif
- return verfp;
-}
-
-
-/*
- * nfs_direct_set_hdr_verf - set the write/commit verifier
- * @dreq - direct request possibly spanning multiple servers
- * @hdr - pageio header to validate against previously seen verfs
- *
- * Set the server's (MDS or DS) "seen" verifier
- */
-static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
- struct nfs_pgio_header *hdr)
-{
- struct nfs_writeverf *verfp;
-
- verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
- WARN_ON_ONCE(verfp->committed >= 0);
- memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
- WARN_ON_ONCE(verfp->committed < 0);
-}
-
-static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
- const struct nfs_writeverf *v2)
-{
- return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
-}
-
-/*
- * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
- * @dreq - direct request possibly spanning multiple servers
- * @hdr - pageio header to validate against previously seen verf
- *
- * set the server's "seen" verf if not initialized.
- * returns result of comparison between @hdr->verf and the "seen"
- * verf of the server used by @hdr (DS or MDS)
- */
-static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
- struct nfs_pgio_header *hdr)
-{
- struct nfs_writeverf *verfp;
-
- verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
- if (verfp->committed < 0) {
- nfs_direct_set_hdr_verf(dreq, hdr);
- return 0;
- }
- return nfs_direct_cmp_verf(verfp, &hdr->verf);
-}
-
-/*
- * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
- * @dreq - direct request possibly spanning multiple servers
- * @data - commit data to validate against previously seen verf
- *
- * returns result of comparison between @data->verf and the verf of
- * the server used by @data (DS or MDS)
- */
-static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
- struct nfs_commit_data *data)
-{
- struct nfs_writeverf *verfp;
-
- verfp = nfs_direct_select_verf(dreq, data->ds_clp,
- data->ds_commit_index);
-
- /* verifier not set so always fail */
- if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
- return 1;
-
- return nfs_direct_cmp_verf(verfp, data->res.verf);
-}
-
/**
* nfs_direct_IO - NFS address space operation for direct I/O
* @iocb: target I/O control block
@@ -305,7 +205,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
kref_get(&dreq->kref);
init_completion(&dreq->completion);
INIT_LIST_HEAD(&dreq->mds_cinfo.list);
- dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
+ pnfs_init_ds_commit_info(&dreq->ds_cinfo);
INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
spin_lock_init(&dreq->lock);
@@ -316,7 +216,7 @@ static void nfs_direct_req_free(struct kref *kref)
{
struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
- nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
+ pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
if (dreq->l_ctx != NULL)
nfs_put_lock_context(dreq->l_ctx);
if (dreq->ctx != NULL)
@@ -571,6 +471,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
result = PTR_ERR(l_ctx);
+ nfs_direct_req_release(dreq);
goto out_release;
}
dreq->l_ctx = l_ctx;
@@ -605,15 +506,30 @@ out:
}
static void
+nfs_direct_join_group(struct list_head *list, struct inode *inode)
+{
+ struct nfs_page *req, *next;
+
+ list_for_each_entry(req, list, wb_list) {
+ if (req->wb_head != req || req->wb_this_page == req)
+ continue;
+ for (next = req->wb_this_page;
+ next != req->wb_head;
+ next = next->wb_this_page) {
+ nfs_list_remove_request(next);
+ nfs_release_request(next);
+ }
+ nfs_join_page_group(req, inode);
+ }
+}
+
+static void
nfs_direct_write_scan_commit_list(struct inode *inode,
struct list_head *list,
struct nfs_commit_info *cinfo)
{
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
-#ifdef CONFIG_NFS_V4_1
- if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
- NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
-#endif
+ pnfs_recover_commit_reqs(list, cinfo);
nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
}
@@ -629,11 +545,12 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
nfs_init_cinfo_from_dreq(&cinfo, dreq);
nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
+ nfs_direct_join_group(&reqs, dreq->inode);
+
dreq->count = 0;
dreq->max_count = 0;
list_for_each_entry(req, &reqs, wb_list)
dreq->max_count += req->wb_bytes;
- dreq->verf.committed = NFS_INVALID_STABLE_HOW;
nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
get_dreq(dreq);
@@ -670,27 +587,35 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
static void nfs_direct_commit_complete(struct nfs_commit_data *data)
{
+ const struct nfs_writeverf *verf = data->res.verf;
struct nfs_direct_req *dreq = data->dreq;
struct nfs_commit_info cinfo;
struct nfs_page *req;
int status = data->task.tk_status;
+ if (status < 0) {
+ /* Errors in commit are fatal */
+ dreq->error = status;
+ dreq->max_count = 0;
+ dreq->count = 0;
+ dreq->flags = NFS_ODIRECT_DONE;
+ } else if (dreq->flags == NFS_ODIRECT_DONE)
+ status = dreq->error;
+
nfs_init_cinfo_from_dreq(&cinfo, dreq);
- if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data))
- dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
- if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
+ if (status >= 0 && !nfs_write_match_verf(verf, req)) {
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
/*
* Despite the reboot, the write was successful,
* so reset wb_nio.
*/
req->wb_nio = 0;
- /* Note the rewrite will go through mds */
nfs_mark_request_commit(req, NULL, &cinfo, 0);
- } else
+ } else /* Error or match */
nfs_release_request(req);
nfs_unlock_and_release_request(req);
}
@@ -705,7 +630,8 @@ static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
struct nfs_direct_req *dreq = cinfo->dreq;
spin_lock(&dreq->lock);
- dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ if (dreq->flags != NFS_ODIRECT_DONE)
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
spin_unlock(&dreq->lock);
nfs_mark_request_commit(req, NULL, cinfo, 0);
}
@@ -728,6 +654,23 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
nfs_direct_write_reschedule(dreq);
}
+static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
+{
+ struct nfs_commit_info cinfo;
+ struct nfs_page *req;
+ LIST_HEAD(reqs);
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
+
+ while (!list_empty(&reqs)) {
+ req = nfs_list_entry(reqs.next);
+ nfs_list_remove_request(req);
+ nfs_release_request(req);
+ nfs_unlock_and_release_request(req);
+ }
+}
+
static void nfs_direct_write_schedule_work(struct work_struct *work)
{
struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
@@ -742,6 +685,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
nfs_direct_write_reschedule(dreq);
break;
default:
+ nfs_direct_write_clear_reqs(dreq);
nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
nfs_direct_complete(dreq);
}
@@ -768,20 +712,15 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
}
nfs_direct_count_bytes(dreq, hdr);
- if (hdr->good_bytes != 0) {
- if (nfs_write_need_commit(hdr)) {
- if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
- request_commit = true;
- else if (dreq->flags == 0) {
- nfs_direct_set_hdr_verf(dreq, hdr);
- request_commit = true;
- dreq->flags = NFS_ODIRECT_DO_COMMIT;
- } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
- request_commit = true;
- if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
- dreq->flags =
- NFS_ODIRECT_RESCHED_WRITES;
- }
+ if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
+ switch (dreq->flags) {
+ case 0:
+ dreq->flags = NFS_ODIRECT_DO_COMMIT;
+ request_commit = true;
+ break;
+ case NFS_ODIRECT_RESCHED_WRITES:
+ case NFS_ODIRECT_DO_COMMIT:
+ request_commit = true;
}
}
spin_unlock(&dreq->lock);
@@ -990,11 +929,13 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
result = PTR_ERR(l_ctx);
+ nfs_direct_req_release(dreq);
goto out_release;
}
dreq->l_ctx = l_ctx;
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
+ pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
nfs_start_io_direct(inode);
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 89bd5581f317..963800037609 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -152,12 +152,13 @@ static int nfs_dns_upcall(struct cache_detail *cd,
struct cache_head *ch)
{
struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
- int ret;
- ret = nfs_cache_upcall(cd, key->hostname);
- if (ret)
- ret = sunrpc_cache_pipe_upcall(cd, ch);
- return ret;
+ if (test_and_set_bit(CACHE_PENDING, &ch->flags))
+ return 0;
+ if (!nfs_cache_upcall(cd, key->hostname))
+ return 0;
+ clear_bit(CACHE_PENDING, &ch->flags);
+ return sunrpc_cache_pipe_upcall_timeout(cd, ch);
}
static int nfs_dns_match(struct cache_head *ca,
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index c9b605f6c9cb..a13e69009f19 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -49,6 +49,7 @@ MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
MODULE_DESCRIPTION("The NFSv4 file layout driver");
#define FILELAYOUT_POLL_RETRY_MAX (15*HZ)
+static const struct pnfs_commit_ops filelayout_commit_ops;
static loff_t
filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
@@ -750,72 +751,17 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
/* This assumes a single RW lseg */
if (lseg->pls_range.iomode == IOMODE_RW) {
struct nfs4_filelayout *flo;
+ struct inode *inode;
flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
- flo->commit_info.nbuckets = 0;
- kfree(flo->commit_info.buckets);
- flo->commit_info.buckets = NULL;
+ inode = flo->generic_hdr.plh_inode;
+ spin_lock(&inode->i_lock);
+ pnfs_generic_ds_cinfo_release_lseg(&flo->commit_info, lseg);
+ spin_unlock(&inode->i_lock);
}
_filelayout_free_lseg(fl);
}
-static int
-filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo,
- gfp_t gfp_flags)
-{
- struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
- struct pnfs_commit_bucket *buckets;
- int size, i;
-
- if (fl->commit_through_mds)
- return 0;
-
- size = (fl->stripe_type == STRIPE_SPARSE) ?
- fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
-
- if (cinfo->ds->nbuckets >= size) {
- /* This assumes there is only one IOMODE_RW lseg. What
- * we really want to do is have a layout_hdr level
- * dictionary of <multipath_list4, fh> keys, each
- * associated with a struct list_head, populated by calls
- * to filelayout_write_pagelist().
- * */
- return 0;
- }
-
- buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
- gfp_flags);
- if (!buckets)
- return -ENOMEM;
- for (i = 0; i < size; i++) {
- INIT_LIST_HEAD(&buckets[i].written);
- INIT_LIST_HEAD(&buckets[i].committing);
- /* mark direct verifier as unset */
- buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
- }
-
- spin_lock(&cinfo->inode->i_lock);
- if (cinfo->ds->nbuckets >= size)
- goto out;
- for (i = 0; i < cinfo->ds->nbuckets; i++) {
- list_splice(&cinfo->ds->buckets[i].written,
- &buckets[i].written);
- list_splice(&cinfo->ds->buckets[i].committing,
- &buckets[i].committing);
- buckets[i].direct_verf.committed =
- cinfo->ds->buckets[i].direct_verf.committed;
- buckets[i].wlseg = cinfo->ds->buckets[i].wlseg;
- buckets[i].clseg = cinfo->ds->buckets[i].clseg;
- }
- swap(cinfo->ds->buckets, buckets);
- cinfo->ds->nbuckets = size;
-out:
- spin_unlock(&cinfo->inode->i_lock);
- kfree(buckets);
- return 0;
-}
-
static struct pnfs_layout_segment *
filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
struct nfs4_layoutget_res *lgr,
@@ -938,9 +884,6 @@ static void
filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
- struct nfs_commit_info cinfo;
- int status;
-
pnfs_generic_pg_check_layout(pgio);
if (!pgio->pg_lseg) {
pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
@@ -959,17 +902,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
- goto out_mds;
- nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
- status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
- if (status < 0) {
- pnfs_put_lseg(pgio->pg_lseg);
- pgio->pg_lseg = NULL;
- goto out_mds;
- }
- return;
-out_mds:
- nfs_pageio_reset_write_mds(pgio);
+ nfs_pageio_reset_write_mds(pgio);
}
static const struct nfs_pageio_ops filelayout_pg_read_ops = {
@@ -1078,36 +1011,6 @@ out_err:
return -EAGAIN;
}
-/* filelayout_search_commit_reqs - Search lists in @cinfo for the head reqest
- * for @page
- * @cinfo - commit info for current inode
- * @page - page to search for matching head request
- *
- * Returns a the head request if one is found, otherwise returns NULL.
- */
-static struct nfs_page *
-filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
-{
- struct nfs_page *freq, *t;
- struct pnfs_commit_bucket *b;
- int i;
-
- /* Linearly search the commit lists for each bucket until a matching
- * request is found */
- for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
- list_for_each_entry_safe(freq, t, &b->written, wb_list) {
- if (freq->wb_page == page)
- return freq->wb_head;
- }
- list_for_each_entry_safe(freq, t, &b->committing, wb_list) {
- if (freq->wb_page == page)
- return freq->wb_head;
- }
- }
-
- return NULL;
-}
-
static int
filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
int how, struct nfs_commit_info *cinfo)
@@ -1140,13 +1043,17 @@ filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
struct nfs4_filelayout *flo;
flo = kzalloc(sizeof(*flo), gfp_flags);
- return flo != NULL ? &flo->generic_hdr : NULL;
+ if (flo == NULL)
+ return NULL;
+ pnfs_init_ds_commit_info(&flo->commit_info);
+ flo->commit_info.ops = &filelayout_commit_ops;
+ return &flo->generic_hdr;
}
static void
filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
- kfree(FILELAYOUT_FROM_HDR(lo));
+ kfree_rcu(FILELAYOUT_FROM_HDR(lo), generic_hdr.plh_rcu);
}
static struct pnfs_ds_commit_info *
@@ -1160,6 +1067,46 @@ filelayout_get_ds_info(struct inode *inode)
return &FILELAYOUT_FROM_HDR(layout)->commit_info;
}
+static void
+filelayout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+ struct inode *inode = lseg->pls_layout->plh_inode;
+ struct pnfs_commit_array *array, *new;
+ unsigned int size = (fl->stripe_type == STRIPE_SPARSE) ?
+ fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
+
+ new = pnfs_alloc_commit_array(size, GFP_NOIO);
+ if (new) {
+ spin_lock(&inode->i_lock);
+ array = pnfs_add_commit_array(fl_cinfo, new, lseg);
+ spin_unlock(&inode->i_lock);
+ if (array != new)
+ pnfs_free_commit_array(new);
+ }
+}
+
+static void
+filelayout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
+ struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ pnfs_generic_ds_cinfo_destroy(fl_cinfo);
+ spin_unlock(&inode->i_lock);
+}
+
+static const struct pnfs_commit_ops filelayout_commit_ops = {
+ .setup_ds_info = filelayout_setup_ds_info,
+ .release_ds_info = filelayout_release_ds_info,
+ .mark_request_commit = filelayout_mark_request_commit,
+ .clear_request_commit = pnfs_generic_clear_request_commit,
+ .scan_commit_lists = pnfs_generic_scan_commit_lists,
+ .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
+ .search_commit_reqs = pnfs_generic_search_commit_reqs,
+ .commit_pagelist = filelayout_commit_pagelist,
+};
+
static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
@@ -1173,12 +1120,6 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.pg_read_ops = &filelayout_pg_read_ops,
.pg_write_ops = &filelayout_pg_write_ops,
.get_ds_info = &filelayout_get_ds_info,
- .mark_request_commit = filelayout_mark_request_commit,
- .clear_request_commit = pnfs_generic_clear_request_commit,
- .scan_commit_lists = pnfs_generic_scan_commit_lists,
- .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
- .search_commit_reqs = filelayout_search_commit_reqs,
- .commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
.write_pagelist = filelayout_write_pagelist,
.alloc_deviceid_node = filelayout_alloc_deviceid_node,
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index bb9148b83166..7d399f72ebbb 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -32,6 +32,7 @@
static unsigned short io_maxretrans;
+static const struct pnfs_commit_ops ff_layout_commit_ops;
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
struct nfs_pgio_header *hdr);
static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
@@ -48,9 +49,11 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
ffl = kzalloc(sizeof(*ffl), gfp_flags);
if (ffl) {
+ pnfs_init_ds_commit_info(&ffl->commit_info);
INIT_LIST_HEAD(&ffl->error_list);
INIT_LIST_HEAD(&ffl->mirrors);
ffl->last_report_time = ktime_get();
+ ffl->commit_info.ops = &ff_layout_commit_ops;
return &ffl->generic_hdr;
} else
return NULL;
@@ -59,14 +62,14 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
+ struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
struct nfs4_ff_layout_ds_err *err, *n;
- list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
- list) {
+ list_for_each_entry_safe(err, n, &ffl->error_list, list) {
list_del(&err->list);
kfree(err);
}
- kfree(FF_LAYOUT_FROM_HDR(lo));
+ kfree_rcu(ffl, generic_hdr.plh_rcu);
}
static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
@@ -248,36 +251,10 @@ static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
{
- int i;
-
- if (fls->mirror_array) {
- for (i = 0; i < fls->mirror_array_cnt; i++) {
- /* normally mirror_ds is freed in
- * .free_deviceid_node but we still do it here
- * for .alloc_lseg error path */
- ff_layout_put_mirror(fls->mirror_array[i]);
- }
- kfree(fls->mirror_array);
- fls->mirror_array = NULL;
- }
-}
-
-static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
-{
- int ret = 0;
+ u32 i;
- dprintk("--> %s\n", __func__);
-
- /* FIXME: remove this check when layout segment support is added */
- if (lgr->range.offset != 0 ||
- lgr->range.length != NFS4_MAX_UINT64) {
- dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
- __func__);
- ret = -EINVAL;
- }
-
- dprintk("--> %s returns %d\n", __func__, ret);
- return ret;
+ for (i = 0; i < fls->mirror_array_cnt; i++)
+ ff_layout_put_mirror(fls->mirror_array[i]);
}
static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
@@ -289,6 +266,23 @@ static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
}
static bool
+ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
+ struct pnfs_layout_segment *l2)
+{
+ const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
+ const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
+ u32 i;
+
+ if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
+ return false;
+ for (i = 0; i < fl1->mirror_array_cnt; i++) {
+ if (fl1->mirror_array[i] != fl2->mirror_array[i])
+ return false;
+ }
+ return true;
+}
+
+static bool
ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
const struct pnfs_layout_range *l2)
{
@@ -323,6 +317,8 @@ ff_lseg_merge(struct pnfs_layout_segment *new,
new->pls_range.length);
if (new_end < old->pls_range.offset)
return false;
+ if (!ff_lseg_match_mirrors(new, old))
+ return false;
/* Mergeable: copy info from 'old' to 'new' */
if (new_end < old_end)
@@ -400,16 +396,13 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
goto out_err_free;
rc = -ENOMEM;
- fls = kzalloc(sizeof(*fls), gfp_flags);
+ fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
+ gfp_flags);
if (!fls)
goto out_err_free;
fls->mirror_array_cnt = mirror_array_cnt;
fls->stripe_unit = stripe_unit;
- fls->mirror_array = kcalloc(fls->mirror_array_cnt,
- sizeof(fls->mirror_array[0]), gfp_flags);
- if (fls->mirror_array == NULL)
- goto out_err_free;
for (i = 0; i < fls->mirror_array_cnt; i++) {
struct nfs4_ff_layout_mirror *mirror;
@@ -545,9 +538,6 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
out_sort_mirrors:
ff_layout_sort_mirrors(fls);
- rc = ff_layout_check_layout(lgr);
- if (rc)
- goto out_err_free;
ret = &fls->generic_hdr;
dprintk("<-- %s (success)\n", __func__);
out_free_page:
@@ -560,17 +550,6 @@ out_err_free:
goto out_free_page;
}
-static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
-{
- struct pnfs_layout_segment *lseg;
-
- list_for_each_entry(lseg, &layout->plh_segs, pls_list)
- if (lseg->pls_range.iomode == IOMODE_RW)
- return true;
-
- return false;
-}
-
static void
ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
{
@@ -585,23 +564,12 @@ ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
inode = ffl->generic_hdr.plh_inode;
spin_lock(&inode->i_lock);
- if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
- ffl->commit_info.nbuckets = 0;
- kfree(ffl->commit_info.buckets);
- ffl->commit_info.buckets = NULL;
- }
+ pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
spin_unlock(&inode->i_lock);
}
_ff_layout_free_lseg(fls);
}
-/* Return 1 until we have multiple lsegs support */
-static int
-ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
-{
- return 1;
-}
-
static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
{
@@ -746,52 +714,6 @@ nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
spin_unlock(&mirror->lock);
}
-static int
-ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo,
- gfp_t gfp_flags)
-{
- struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
- struct pnfs_commit_bucket *buckets;
- int size;
-
- if (cinfo->ds->nbuckets != 0) {
- /* This assumes there is only one RW lseg per file.
- * To support multiple lseg per file, we need to
- * change struct pnfs_commit_bucket to allow dynamic
- * increasing nbuckets.
- */
- return 0;
- }
-
- size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
-
- buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
- gfp_flags);
- if (!buckets)
- return -ENOMEM;
- else {
- int i;
-
- spin_lock(&cinfo->inode->i_lock);
- if (cinfo->ds->nbuckets != 0)
- kfree(buckets);
- else {
- cinfo->ds->buckets = buckets;
- cinfo->ds->nbuckets = size;
- for (i = 0; i < size; i++) {
- INIT_LIST_HEAD(&buckets[i].written);
- INIT_LIST_HEAD(&buckets[i].committing);
- /* mark direct verifier as unset */
- buckets[i].direct_verf.committed =
- NFS_INVALID_STABLE_HOW;
- }
- }
- spin_unlock(&cinfo->inode->i_lock);
- return 0;
- }
-}
-
static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx)
{
@@ -876,8 +798,8 @@ ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
nfs_req_openctx(req),
- 0,
- NFS4_MAX_UINT64,
+ req_offset(req),
+ req->wb_bytes,
IOMODE_READ,
strict_iomode,
GFP_KERNEL);
@@ -888,6 +810,14 @@ ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
}
static void
+ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ pnfs_generic_pg_check_layout(pgio);
+ pnfs_generic_pg_check_range(pgio, req);
+}
+
+static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
@@ -897,7 +827,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
int ds_idx;
retry:
- pnfs_generic_pg_check_layout(pgio);
+ ff_layout_pg_check_layout(pgio, req);
/* Use full layout for now */
if (!pgio->pg_lseg) {
ff_layout_pg_get_read(pgio, req, false);
@@ -953,18 +883,16 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
{
struct nfs4_ff_layout_mirror *mirror;
struct nfs_pgio_mirror *pgm;
- struct nfs_commit_info cinfo;
struct nfs4_pnfs_ds *ds;
int i;
- int status;
retry:
- pnfs_generic_pg_check_layout(pgio);
+ ff_layout_pg_check_layout(pgio, req);
if (!pgio->pg_lseg) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
nfs_req_openctx(req),
- 0,
- NFS4_MAX_UINT64,
+ req_offset(req),
+ req->wb_bytes,
IOMODE_RW,
false,
GFP_NOFS);
@@ -978,11 +906,6 @@ retry:
if (pgio->pg_lseg == NULL)
goto out_mds;
- nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
- status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
- if (status < 0)
- goto out_mds;
-
/* Use a direct mapping of ds_idx to pgio mirror_idx */
if (WARN_ON_ONCE(pgio->pg_mirror_count !=
FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
@@ -1297,21 +1220,23 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
}
}
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+ mirror, offset, length, status, opnum,
+ GFP_NOIO);
+
switch (status) {
case NFS4ERR_DELAY:
case NFS4ERR_GRACE:
- return;
- default:
break;
+ case NFS4ERR_NXIO:
+ ff_layout_mark_ds_unreachable(lseg, idx);
+ /* Fallthrough */
+ default:
+ pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
+ lseg);
}
- mirror = FF_LAYOUT_COMP(lseg, idx);
- err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
- mirror, offset, length, status, opnum,
- GFP_NOIO);
- if (status == NFS4ERR_NXIO)
- ff_layout_mark_ds_unreachable(lseg, idx);
- pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
}
@@ -2012,6 +1937,33 @@ ff_layout_get_ds_info(struct inode *inode)
}
static void
+ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
+ struct inode *inode = lseg->pls_layout->plh_inode;
+ struct pnfs_commit_array *array, *new;
+
+ new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO);
+ if (new) {
+ spin_lock(&inode->i_lock);
+ array = pnfs_add_commit_array(fl_cinfo, new, lseg);
+ spin_unlock(&inode->i_lock);
+ if (array != new)
+ pnfs_free_commit_array(new);
+ }
+}
+
+static void
+ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
+ struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ pnfs_generic_ds_cinfo_destroy(fl_cinfo);
+ spin_unlock(&inode->i_lock);
+}
+
+static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
{
nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
@@ -2496,6 +2448,16 @@ ff_layout_set_layoutdriver(struct nfs_server *server,
return 0;
}
+static const struct pnfs_commit_ops ff_layout_commit_ops = {
+ .setup_ds_info = ff_layout_setup_ds_info,
+ .release_ds_info = ff_layout_release_ds_info,
+ .mark_request_commit = pnfs_layout_mark_request_commit,
+ .clear_request_commit = pnfs_generic_clear_request_commit,
+ .scan_commit_lists = pnfs_generic_scan_commit_lists,
+ .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
+ .commit_pagelist = ff_layout_commit_pagelist,
+};
+
static struct pnfs_layoutdriver_type flexfilelayout_type = {
.id = LAYOUT_FLEX_FILES,
.name = "LAYOUT_FLEX_FILES",
@@ -2512,11 +2474,6 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
.pg_write_ops = &ff_layout_pg_write_ops,
.get_ds_info = ff_layout_get_ds_info,
.free_deviceid_node = ff_layout_free_deviceid_node,
- .mark_request_commit = pnfs_layout_mark_request_commit,
- .clear_request_commit = pnfs_generic_clear_request_commit,
- .scan_commit_lists = pnfs_generic_scan_commit_lists,
- .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
- .commit_pagelist = ff_layout_commit_pagelist,
.read_pagelist = ff_layout_read_pagelist,
.write_pagelist = ff_layout_write_pagelist,
.alloc_deviceid_node = ff_layout_alloc_deviceid_node,
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index 2f369966abf7..354a031c69b1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -99,7 +99,7 @@ struct nfs4_ff_layout_segment {
u64 stripe_unit;
u32 flags;
u32 mirror_array_cnt;
- struct nfs4_ff_layout_mirror **mirror_array;
+ struct nfs4_ff_layout_mirror *mirror_array[];
};
struct nfs4_flexfile_layout {
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index e113fcb4bb4c..ccc88be88d6a 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -190,6 +190,7 @@ static const struct constant_table nfs_vers_tokens[] = {
{ "4.0", Opt_vers_4_0 },
{ "4.1", Opt_vers_4_1 },
{ "4.2", Opt_vers_4_2 },
+ {}
};
enum {
@@ -202,13 +203,14 @@ enum {
nr__Opt_xprt
};
-static const struct constant_table nfs_xprt_protocol_tokens[nr__Opt_xprt] = {
+static const struct constant_table nfs_xprt_protocol_tokens[] = {
{ "rdma", Opt_xprt_rdma },
{ "rdma6", Opt_xprt_rdma6 },
{ "tcp", Opt_xprt_tcp },
{ "tcp6", Opt_xprt_tcp6 },
{ "udp", Opt_xprt_udp },
{ "udp6", Opt_xprt_udp6 },
+ {}
};
enum {
@@ -239,6 +241,7 @@ static const struct constant_table nfs_secflavor_tokens[] = {
{ "spkm3i", Opt_sec_spkmi },
{ "spkm3p", Opt_sec_spkmp },
{ "sys", Opt_sec_sys },
+ {}
};
/*
@@ -1135,7 +1138,7 @@ out_no_address:
return nfs_invalf(fc, "NFS4: mount program didn't pass remote address");
out_invalid_transport_udp:
- return nfs_invalf(fc, "NFSv4: Unsupported transport protocol udp");
+ return nfs_invalf(fc, "NFS: Unsupported transport protocol udp");
}
#endif
@@ -1257,7 +1260,7 @@ out_v4_not_compiled:
nfs_errorf(fc, "NFS: NFSv4 is not compiled into kernel");
return -EPROTONOSUPPORT;
out_invalid_transport_udp:
- return nfs_invalf(fc, "NFSv4: Unsupported transport protocol udp");
+ return nfs_invalf(fc, "NFS: Unsupported transport protocol udp");
out_no_address:
return nfs_invalf(fc, "NFS: mount program didn't pass remote address");
out_mountproto_mismatch:
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 11bf15800ac9..b9d0921cb4fe 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -62,7 +62,6 @@
/* Default is to see 64-bit inode numbers */
static bool enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED;
-static void nfs_invalidate_inode(struct inode *);
static int nfs_update_inode(struct inode *, struct nfs_fattr *);
static struct kmem_cache * nfs_inode_cachep;
@@ -284,10 +283,18 @@ EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
* Invalidate, but do not unhash, the inode.
* NB: must be called with inode->i_lock held!
*/
-static void nfs_invalidate_inode(struct inode *inode)
+static void nfs_set_inode_stale_locked(struct inode *inode)
{
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
nfs_zap_caches_locked(inode);
+ trace_nfs_set_inode_stale(inode);
+}
+
+void nfs_set_inode_stale(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ nfs_set_inode_stale_locked(inode);
+ spin_unlock(&inode->i_lock);
}
struct nfs_find_desc {
@@ -959,16 +966,16 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
struct file *filp)
{
struct nfs_open_context *ctx;
- const struct cred *cred = get_current_cred();
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- put_cred(cred);
+ if (!ctx)
return ERR_PTR(-ENOMEM);
- }
nfs_sb_active(dentry->d_sb);
ctx->dentry = dget(dentry);
- ctx->cred = cred;
+ if (filp)
+ ctx->cred = get_cred(filp->f_cred);
+ else
+ ctx->cred = get_current_cred();
ctx->ll_cred = NULL;
ctx->state = NULL;
ctx->mode = f_mode;
@@ -1163,9 +1170,10 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
status = 0;
break;
case -ESTALE:
- nfs_zap_caches(inode);
if (!S_ISDIR(inode->i_mode))
- set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+ nfs_set_inode_stale(inode);
+ else
+ nfs_zap_caches(inode);
}
goto err_out;
}
@@ -2064,7 +2072,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
* lookup validation will know that the inode is bad.
* (But we fall through to invalidate the caches.)
*/
- nfs_invalidate_inode(inode);
+ nfs_set_inode_stale_locked(inode);
return -ESTALE;
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index f80c47d5ff27..1f32a9fbfdaf 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -274,12 +274,6 @@ void nfs_free_request(struct nfs_page *req);
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc);
-static inline bool nfs_pgio_has_mirroring(struct nfs_pageio_descriptor *desc)
-{
- WARN_ON_ONCE(desc->pg_mirror_count < 1);
- return desc->pg_mirror_count > 1;
-}
-
static inline bool nfs_match_open_context(const struct nfs_open_context *ctx1,
const struct nfs_open_context *ctx2)
{
@@ -417,7 +411,9 @@ extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
extern bool nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
-
+extern int nfs_client_for_each_server(struct nfs_client *clp,
+ int (*fn)(struct nfs_server *, void *),
+ void *data);
/* io.c */
extern void nfs_start_io_read(struct inode *inode);
extern void nfs_end_io_read(struct inode *inode);
@@ -515,13 +511,25 @@ int nfs_filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
#ifdef CONFIG_NFS_V4_1
+static inline void
+pnfs_bucket_clear_pnfs_ds_commit_verifiers(struct pnfs_commit_bucket *buckets,
+ unsigned int nbuckets)
+{
+ unsigned int i;
+
+ for (i = 0; i < nbuckets; i++)
+ buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
+}
static inline
void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
{
- int i;
+ struct pnfs_commit_array *array;
- for (i = 0; i < cinfo->nbuckets; i++)
- cinfo->buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
+ rcu_read_lock();
+ list_for_each_entry_rcu(array, &cinfo->commits, cinfo_list)
+ pnfs_bucket_clear_pnfs_ds_commit_verifiers(array->buckets,
+ array->nbuckets);
+ rcu_read_unlock();
}
#else
static inline
@@ -542,6 +550,14 @@ nfs_write_verifier_cmp(const struct nfs_write_verifier *v1,
return memcmp(v1->data, v2->data, sizeof(v1->data));
}
+static inline bool
+nfs_write_match_verf(const struct nfs_writeverf *verf,
+ struct nfs_page *req)
+{
+ return verf->committed > NFS_UNSTABLE &&
+ !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier);
+}
+
/* unlink.c */
extern struct rpc_task *
nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index f3ece8ed3203..6b063227e34e 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -145,6 +145,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
struct vfsmount *mnt = ERR_PTR(-ENOMEM);
struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
struct nfs_client *client = server->nfs_client;
+ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
int ret;
if (IS_ROOT(path->dentry))
@@ -190,12 +191,12 @@ struct vfsmount *nfs_d_automount(struct path *path)
if (IS_ERR(mnt))
goto out_fc;
- if (nfs_mountpoint_expiry_timeout < 0)
+ mntget(mnt); /* prevent immediate expiration */
+ if (timeout <= 0)
goto out_fc;
- mntget(mnt); /* prevent immediate expiration */
mnt_set_expiry(mnt, &nfs_automount_list);
- schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+ schedule_delayed_work(&nfs_automount_task, timeout);
out_fc:
put_fs_context(fc);
@@ -233,10 +234,11 @@ const struct inode_operations nfs_referral_inode_operations = {
static void nfs_expire_automounts(struct work_struct *work)
{
struct list_head *list = &nfs_automount_list;
+ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
mark_mounts_for_expiry(list);
- if (!list_empty(list))
- schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+ if (!list_empty(list) && timeout > 0)
+ schedule_delayed_work(&nfs_automount_task, timeout);
}
void nfs_release_automount_timer(void)
@@ -247,10 +249,7 @@ void nfs_release_automount_timer(void)
/**
* nfs_do_submount - set up mountpoint when crossing a filesystem boundary
- * @dentry: parent directory
- * @fh: filehandle for new root dentry
- * @fattr: attributes for new root inode
- * @authflavor: security flavor to use when performing the mount
+ * @fc: pointer to struct nfs_fs_context
*
*/
int nfs_do_submount(struct fs_context *fc)
@@ -312,3 +311,53 @@ int nfs_submount(struct fs_context *fc, struct nfs_server *server)
return nfs_do_submount(fc);
}
EXPORT_SYMBOL_GPL(nfs_submount);
+
+static int param_set_nfs_timeout(const char *val, const struct kernel_param *kp)
+{
+ long num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+ ret = kstrtol(val, 0, &num);
+ if (ret)
+ return -EINVAL;
+ if (num > 0) {
+ if (num >= INT_MAX / HZ)
+ num = INT_MAX;
+ else
+ num *= HZ;
+ *((int *)kp->arg) = num;
+ if (!list_empty(&nfs_automount_list))
+ mod_delayed_work(system_wq, &nfs_automount_task, num);
+ } else {
+ *((int *)kp->arg) = -1*HZ;
+ cancel_delayed_work(&nfs_automount_task);
+ }
+ return 0;
+}
+
+static int param_get_nfs_timeout(char *buffer, const struct kernel_param *kp)
+{
+ long num = *((int *)kp->arg);
+
+ if (num > 0) {
+ if (num >= INT_MAX - (HZ - 1))
+ num = INT_MAX / HZ;
+ else
+ num = (num + (HZ - 1)) / HZ;
+ } else
+ num = -1;
+ return scnprintf(buffer, PAGE_SIZE, "%li\n", num);
+}
+
+static const struct kernel_param_ops param_ops_nfs_timeout = {
+ .set = param_set_nfs_timeout,
+ .get = param_get_nfs_timeout,
+};
+#define param_check_nfs_timeout(name, p) __param_check(name, p, int);
+
+module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644);
+MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout,
+ "Set the NFS automounted mountpoint timeout value (seconds)."
+ "Values <= 0 turn expiration off.");
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8be1ba7c62bb..2b7f6dcd2eb8 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -42,7 +42,9 @@ enum nfs4_client_state {
NFS4CLNT_LEASE_MOVED,
NFS4CLNT_DELEGATION_EXPIRED,
NFS4CLNT_RUN_MANAGER,
- NFS4CLNT_DELEGRETURN_RUNNING,
+ NFS4CLNT_RECALL_RUNNING,
+ NFS4CLNT_RECALL_ANY_LAYOUT_READ,
+ NFS4CLNT_RECALL_ANY_LAYOUT_RW,
};
#define NFS4_RENEW_TIMEOUT 0x01
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 1297919e0fce..8e5d6223ddd3 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -252,6 +252,9 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
if (remap_flags & ~REMAP_FILE_ADVISORY)
return -EINVAL;
+ if (IS_SWAPFILE(dst_inode) || IS_SWAPFILE(src_inode))
+ return -ETXTBSY;
+
/* check alignment w.r.t. clone_blksize */
ret = -EINVAL;
if (bs) {
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 84026e7b8a5f..a3ab6e219061 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -354,7 +354,7 @@ static int try_location(struct fs_context *fc,
/**
* nfs_follow_referral - set up mountpoint when hitting a referral on moved error
- * @dentry: parent directory
+ * @fc: pointer to struct nfs_fs_context
* @locations: array of NFSv4 server location information
*
*/
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index cb34e840e4fb..512afb1c7867 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2346,7 +2346,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
.callback_ops = &nfs4_open_confirm_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
int status;
@@ -2511,7 +2511,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
.callback_ops = &nfs4_open_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
int status;
@@ -2790,16 +2790,19 @@ static int nfs41_check_delegation_stateid(struct nfs4_state *state)
return NFS_OK;
}
+ spin_lock(&delegation->lock);
nfs4_stateid_copy(&stateid, &delegation->stateid);
if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
&delegation->flags)) {
+ spin_unlock(&delegation->lock);
rcu_read_unlock();
return NFS_OK;
}
if (delegation->cred)
cred = get_cred(delegation->cred);
+ spin_unlock(&delegation->lock);
rcu_read_unlock();
status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
trace_nfs4_test_delegation_stateid(state, NULL, status);
@@ -3651,7 +3654,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
.rpc_message = &msg,
.callback_ops = &nfs4_close_ops,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
int status = -ENOMEM;
@@ -5544,7 +5547,7 @@ unwind:
struct nfs4_cached_acl {
int cached;
size_t len;
- char data[0];
+ char data[];
};
static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
@@ -6253,6 +6256,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
/* Fallthrough */
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_STALE_STATEID:
+ case -ETIMEDOUT:
task->tk_status = 0;
break;
case -NFS4ERR_OLD_STATEID:
@@ -6343,7 +6347,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_delegreturn_ops,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | RPC_TASK_TIMEOUT,
};
int status = 0;
@@ -6926,7 +6930,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
.rpc_message = &msg,
.callback_ops = &nfs4_lock_ops,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
int ret;
@@ -9170,7 +9174,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
.rpc_message = &msg,
.callback_ops = &nfs4_layoutget_call_ops,
.callback_data = lgp,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
struct pnfs_layout_segment *lseg = NULL;
struct nfs4_exception exception = {
@@ -9287,6 +9291,7 @@ static void nfs4_layoutreturn_release(void *calldata)
lrp->ld_private.ops->free(&lrp->ld_private);
pnfs_put_layout_hdr(lrp->args.layout);
nfs_iput_and_deactive(lrp->inode);
+ put_cred(lrp->cred);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f7723d221945..ac93715c05a4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2524,6 +2524,21 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
}
return 0;
}
+
+static void nfs4_layoutreturn_any_run(struct nfs_client *clp)
+{
+ int iomode = 0;
+
+ if (test_and_clear_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &clp->cl_state))
+ iomode += IOMODE_READ;
+ if (test_and_clear_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &clp->cl_state))
+ iomode += IOMODE_RW;
+ /* Note: IOMODE_READ + IOMODE_RW == IOMODE_ANY */
+ if (iomode) {
+ pnfs_layout_return_unused_byclid(clp, iomode);
+ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+ }
+}
#else /* CONFIG_NFS_V4_1 */
static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
@@ -2531,6 +2546,10 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
{
return 0;
}
+
+static void nfs4_layoutreturn_any_run(struct nfs_client *clp)
+{
+}
#endif /* CONFIG_NFS_V4_1 */
static void nfs4_state_manager(struct nfs_client *clp)
@@ -2635,12 +2654,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
- if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
+ if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) {
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
nfs_client_return_marked_delegations(clp);
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
}
- clear_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state);
+ nfs4_layoutreturn_any_run(clp);
+ clear_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state);
}
/* Did we race with an attempt to give us more work? */
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 1e97e5e04cb4..543541173a3d 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -584,7 +584,9 @@ TRACE_DEFINE_ENUM(NFS4CLNT_MOVED);
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED);
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED);
TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER);
-TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_RUNNING);
+TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_RUNNING);
+TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_READ);
+TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_RW);
#define show_nfs4_clp_state(state) \
__print_flags(state, "|", \
@@ -605,7 +607,9 @@ TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_RUNNING);
{ NFS4CLNT_LEASE_MOVED, "LEASE_MOVED" }, \
{ NFS4CLNT_DELEGATION_EXPIRED, "DELEGATION_EXPIRED" }, \
{ NFS4CLNT_RUN_MANAGER, "RUN_MANAGER" }, \
- { NFS4CLNT_DELEGRETURN_RUNNING, "DELEGRETURN_RUNNING" })
+ { NFS4CLNT_RECALL_RUNNING, "RECALL_RUNNING" }, \
+ { NFS4CLNT_RECALL_ANY_LAYOUT_READ, "RECALL_ANY_LAYOUT_READ" }, \
+ { NFS4CLNT_RECALL_ANY_LAYOUT_RW, "RECALL_ANY_LAYOUT_RW" })
TRACE_EVENT(nfs4_state_mgr,
TP_PROTO(
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index effaa4247b91..8d3278805602 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -88,7 +88,7 @@
#define NFS_ROOT "/tftpboot/%s"
/* Default NFSROOT mount options. */
-#define NFS_DEF_OPTIONS "vers=2,udp,rsize=4096,wsize=4096"
+#define NFS_DEF_OPTIONS "vers=2,tcp,rsize=4096,wsize=4096"
/* Parameters passed from the kernel command line */
static char nfs_root_parms[NFS_MAXPATHLEN + 1] __initdata = "";
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index a9588d19a5ae..7e7a97ae21ed 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -181,6 +181,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
int error \
), \
TP_ARGS(inode, error))
+DEFINE_NFS_INODE_EVENT(nfs_set_inode_stale);
DEFINE_NFS_INODE_EVENT(nfs_refresh_inode_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_refresh_inode_exit);
DEFINE_NFS_INODE_EVENT(nfs_revalidate_inode_enter);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 20b3717cd7ca..f61f96603df7 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -33,9 +33,7 @@ static const struct rpc_call_ops nfs_pgio_common_ops;
struct nfs_pgio_mirror *
nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
{
- return nfs_pgio_has_mirroring(desc) ?
- &desc->pg_mirrors[desc->pg_mirror_idx] :
- &desc->pg_mirrors[0];
+ return &desc->pg_mirrors[desc->pg_mirror_idx];
}
EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
@@ -133,47 +131,166 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
/*
- * nfs_page_group_lock - lock the head of the page group
- * @req - request in group that is to be locked
+ * nfs_page_lock_head_request - page lock the head of the page group
+ * @req: any member of the page group
+ */
+struct nfs_page *
+nfs_page_group_lock_head(struct nfs_page *req)
+{
+ struct nfs_page *head = req->wb_head;
+
+ while (!nfs_lock_request(head)) {
+ int ret = nfs_wait_on_request(head);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+ if (head != req)
+ kref_get(&head->wb_kref);
+ return head;
+}
+
+/*
+ * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
+ * @head: head request of page group, must be holding head lock
+ * @req: request that couldn't lock and needs to wait on the req bit lock
*
- * this lock must be held when traversing or modifying the page
- * group list
+ * This is a helper function for nfs_lock_and_join_requests
+ * returns 0 on success, < 0 on error.
+ */
+static void
+nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
+{
+ struct nfs_page *tmp;
+
+ /* relinquish all the locks successfully grabbed this run */
+ for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
+ if (!kref_read(&tmp->wb_kref))
+ continue;
+ nfs_unlock_and_release_request(tmp);
+ }
+}
+
+/*
+ * nfs_page_group_lock_subreq - try to lock a subrequest
+ * @head: head request of page group
+ * @subreq: request to lock
*
- * return 0 on success, < 0 on error
+ * This is a helper function for nfs_lock_and_join_requests which
+ * must be called with the head request and page group both locked.
+ * On error, it returns with the page group unlocked.
*/
-int
-nfs_page_group_lock(struct nfs_page *req)
+static int
+nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
{
- struct nfs_page *head = req->wb_head;
+ int ret;
+
+ if (!kref_get_unless_zero(&subreq->wb_kref))
+ return 0;
+ while (!nfs_lock_request(subreq)) {
+ nfs_page_group_unlock(head);
+ ret = nfs_wait_on_request(subreq);
+ if (!ret)
+ ret = nfs_page_group_lock(head);
+ if (ret < 0) {
+ nfs_unroll_locks(head, subreq);
+ nfs_release_request(subreq);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/*
+ * nfs_page_group_lock_subrequests - try to lock the subrequests
+ * @head: head request of page group
+ *
+ * This is a helper function for nfs_lock_and_join_requests which
+ * must be called with the head request locked.
+ */
+int nfs_page_group_lock_subrequests(struct nfs_page *head)
+{
+ struct nfs_page *subreq;
+ int ret;
- WARN_ON_ONCE(head != head->wb_head);
+ ret = nfs_page_group_lock(head);
+ if (ret < 0)
+ return ret;
+ /* lock each request in the page group */
+ for (subreq = head->wb_this_page; subreq != head;
+ subreq = subreq->wb_this_page) {
+ ret = nfs_page_group_lock_subreq(head, subreq);
+ if (ret < 0)
+ return ret;
+ }
+ nfs_page_group_unlock(head);
+ return 0;
+}
- if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
+/*
+ * nfs_page_set_headlock - set the request PG_HEADLOCK
+ * @req: request that is to be locked
+ *
+ * this lock must be held when modifying req->wb_head
+ *
+ * return 0 on success, < 0 on error
+ */
+int
+nfs_page_set_headlock(struct nfs_page *req)
+{
+ if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
return 0;
- set_bit(PG_CONTENDED1, &head->wb_flags);
+ set_bit(PG_CONTENDED1, &req->wb_flags);
smp_mb__after_atomic();
- return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
+ return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
}
/*
- * nfs_page_group_unlock - unlock the head of the page group
- * @req - request in group that is to be unlocked
+ * nfs_page_clear_headlock - clear the request PG_HEADLOCK
+ * @req: request that is to be locked
*/
void
-nfs_page_group_unlock(struct nfs_page *req)
+nfs_page_clear_headlock(struct nfs_page *req)
{
- struct nfs_page *head = req->wb_head;
-
- WARN_ON_ONCE(head != head->wb_head);
-
smp_mb__before_atomic();
- clear_bit(PG_HEADLOCK, &head->wb_flags);
+ clear_bit(PG_HEADLOCK, &req->wb_flags);
smp_mb__after_atomic();
- if (!test_bit(PG_CONTENDED1, &head->wb_flags))
+ if (!test_bit(PG_CONTENDED1, &req->wb_flags))
return;
- wake_up_bit(&head->wb_flags, PG_HEADLOCK);
+ wake_up_bit(&req->wb_flags, PG_HEADLOCK);
+}
+
+/*
+ * nfs_page_group_lock - lock the head of the page group
+ * @req: request in group that is to be locked
+ *
+ * this lock must be held when traversing or modifying the page
+ * group list
+ *
+ * return 0 on success, < 0 on error
+ */
+int
+nfs_page_group_lock(struct nfs_page *req)
+{
+ int ret;
+
+ ret = nfs_page_set_headlock(req);
+ if (ret || req->wb_head == req)
+ return ret;
+ return nfs_page_set_headlock(req->wb_head);
+}
+
+/*
+ * nfs_page_group_unlock - unlock the head of the page group
+ * @req: request in group that is to be unlocked
+ */
+void
+nfs_page_group_unlock(struct nfs_page *req)
+{
+ if (req != req->wb_head)
+ nfs_page_clear_headlock(req->wb_head);
+ nfs_page_clear_headlock(req);
}
/*
@@ -359,15 +476,23 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
}
static struct nfs_page *
-nfs_create_subreq(struct nfs_page *req, struct nfs_page *last,
- unsigned int pgbase, unsigned int offset,
+nfs_create_subreq(struct nfs_page *req,
+ unsigned int pgbase,
+ unsigned int offset,
unsigned int count)
{
+ struct nfs_page *last;
struct nfs_page *ret;
ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
pgbase, offset, count);
if (!IS_ERR(ret)) {
+ /* find the last request */
+ for (last = req->wb_head;
+ last->wb_this_page != req->wb_head;
+ last = last->wb_this_page)
+ ;
+
nfs_lock_request(ret);
ret->wb_index = req->wb_index;
nfs_page_group_init(ret, last);
@@ -627,9 +752,8 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
.callback_ops = call_ops,
.callback_data = hdr,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC | flags,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | flags,
};
- int ret = 0;
hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
@@ -641,18 +765,10 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
(unsigned long long)hdr->args.offset);
task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- ret = PTR_ERR(task);
- goto out;
- }
- if (how & FLUSH_SYNC) {
- ret = rpc_wait_for_completion_task(task);
- if (ret == 0)
- ret = task->tk_status;
- }
+ if (IS_ERR(task))
+ return PTR_ERR(task);
rpc_put_task(task);
-out:
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
@@ -886,15 +1002,6 @@ static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
pgio->pg_mirror_count = mirror_count;
}
-/*
- * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
- */
-void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
-{
- pgio->pg_mirror_count = 1;
- pgio->pg_mirror_idx = 0;
-}
-
static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
{
pgio->pg_mirror_count = 1;
@@ -911,7 +1018,7 @@ static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
}
/**
- * nfs_can_coalesce_requests - test two requests for compatibility
+ * nfs_coalesce_size - test two requests for compatibility
* @prev: pointer to nfs_page
* @req: pointer to nfs_page
* @pgio: pointer to nfs_pagio_descriptor
@@ -920,41 +1027,36 @@ static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
* page data area they describe is contiguous, and that their RPC
* credentials, NFSv4 open state, and lockowners are the same.
*
- * Return 'true' if this is the case, else return 'false'.
+ * Returns size of the request that can be coalesced
*/
-static bool nfs_can_coalesce_requests(struct nfs_page *prev,
+static unsigned int nfs_coalesce_size(struct nfs_page *prev,
struct nfs_page *req,
struct nfs_pageio_descriptor *pgio)
{
- size_t size;
struct file_lock_context *flctx;
if (prev) {
if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
- return false;
+ return 0;
flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx;
if (flctx != NULL &&
!(list_empty_careful(&flctx->flc_posix) &&
list_empty_careful(&flctx->flc_flock)) &&
!nfs_match_lock_context(req->wb_lock_context,
prev->wb_lock_context))
- return false;
+ return 0;
if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
- return false;
+ return 0;
if (req->wb_page == prev->wb_page) {
if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
- return false;
+ return 0;
} else {
if (req->wb_pgbase != 0 ||
prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
- return false;
+ return 0;
}
}
- size = pgio->pg_ops->pg_test(pgio, prev, req);
- WARN_ON_ONCE(size > req->wb_bytes);
- if (size && size < req->wb_bytes)
- req->wb_bytes = size;
- return size > 0;
+ return pgio->pg_ops->pg_test(pgio, prev, req);
}
/**
@@ -962,15 +1064,16 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
* @desc: destination io descriptor
* @req: request
*
- * Returns true if the request 'req' was successfully coalesced into the
- * existing list of pages 'desc'.
+ * If the request 'req' was successfully coalesced into the existing list
+ * of pages 'desc', it returns the size of req.
*/
-static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
- struct nfs_page *req)
+static unsigned int
+nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *req)
{
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
-
struct nfs_page *prev = NULL;
+ unsigned int size;
if (mirror->pg_count != 0) {
prev = nfs_list_entry(mirror->pg_list.prev);
@@ -990,11 +1093,12 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
return 0;
}
- if (!nfs_can_coalesce_requests(prev, req, desc))
- return 0;
+ size = nfs_coalesce_size(prev, req, desc);
+ if (size < req->wb_bytes)
+ return size;
nfs_list_move_request(req, &mirror->pg_list);
mirror->pg_count += req->wb_bytes;
- return 1;
+ return req->wb_bytes;
}
/*
@@ -1034,7 +1138,8 @@ nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
* @req: request
*
* This may split a request into subrequests which are all part of the
- * same page group.
+ * same page group. If so, it will submit @req as the last one, to ensure
+ * the pointer to @req is still valid in case of failure.
*
* Returns true if the request 'req' was successfully coalesced into the
* existing list of pages 'desc'.
@@ -1043,51 +1148,50 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
-
struct nfs_page *subreq;
- unsigned int bytes_left = 0;
- unsigned int offset, pgbase;
+ unsigned int size, subreq_size;
nfs_page_group_lock(req);
subreq = req;
- bytes_left = subreq->wb_bytes;
- offset = subreq->wb_offset;
- pgbase = subreq->wb_pgbase;
-
- do {
- if (!nfs_pageio_do_add_request(desc, subreq)) {
- /* make sure pg_test call(s) did nothing */
- WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
- WARN_ON_ONCE(subreq->wb_offset != offset);
- WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
-
+ subreq_size = subreq->wb_bytes;
+ for(;;) {
+ size = nfs_pageio_do_add_request(desc, subreq);
+ if (size == subreq_size) {
+ /* We successfully submitted a request */
+ if (subreq == req)
+ break;
+ req->wb_pgbase += size;
+ req->wb_bytes -= size;
+ req->wb_offset += size;
+ subreq_size = req->wb_bytes;
+ subreq = req;
+ continue;
+ }
+ if (WARN_ON_ONCE(subreq != req)) {
+ nfs_page_group_unlock(req);
+ nfs_pageio_cleanup_request(desc, subreq);
+ subreq = req;
+ subreq_size = req->wb_bytes;
+ nfs_page_group_lock(req);
+ }
+ if (!size) {
+ /* Can't coalesce any more, so do I/O */
nfs_page_group_unlock(req);
desc->pg_moreio = 1;
nfs_pageio_doio(desc);
if (desc->pg_error < 0 || mirror->pg_recoalesce)
- goto out_cleanup_subreq;
+ return 0;
/* retry add_request for this subreq */
nfs_page_group_lock(req);
continue;
}
-
- /* check for buggy pg_test call(s) */
- WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
- WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
- WARN_ON_ONCE(subreq->wb_bytes == 0);
-
- bytes_left -= subreq->wb_bytes;
- offset += subreq->wb_bytes;
- pgbase += subreq->wb_bytes;
-
- if (bytes_left) {
- subreq = nfs_create_subreq(req, subreq, pgbase,
- offset, bytes_left);
- if (IS_ERR(subreq))
- goto err_ptr;
- }
- } while (bytes_left > 0);
+ subreq = nfs_create_subreq(req, req->wb_pgbase,
+ req->wb_offset, size);
+ if (IS_ERR(subreq))
+ goto err_ptr;
+ subreq_size = size;
+ }
nfs_page_group_unlock(req);
return 1;
@@ -1095,10 +1199,6 @@ err_ptr:
desc->pg_error = PTR_ERR(subreq);
nfs_page_group_unlock(req);
return 0;
-out_cleanup_subreq:
- if (req != subreq)
- nfs_pageio_cleanup_request(desc, subreq);
- return 0;
}
static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
@@ -1167,7 +1267,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
{
u32 midx;
unsigned int pgbase, offset, bytes;
- struct nfs_page *dupreq, *lastreq;
+ struct nfs_page *dupreq;
pgbase = req->wb_pgbase;
offset = req->wb_offset;
@@ -1177,38 +1277,32 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
if (desc->pg_error < 0)
goto out_failed;
- for (midx = 0; midx < desc->pg_mirror_count; midx++) {
- if (midx) {
- nfs_page_group_lock(req);
+ /* Create the mirror instances first, and fire them off */
+ for (midx = 1; midx < desc->pg_mirror_count; midx++) {
+ nfs_page_group_lock(req);
- /* find the last request */
- for (lastreq = req->wb_head;
- lastreq->wb_this_page != req->wb_head;
- lastreq = lastreq->wb_this_page)
- ;
+ dupreq = nfs_create_subreq(req,
+ pgbase, offset, bytes);
- dupreq = nfs_create_subreq(req, lastreq,
- pgbase, offset, bytes);
-
- nfs_page_group_unlock(req);
- if (IS_ERR(dupreq)) {
- desc->pg_error = PTR_ERR(dupreq);
- goto out_failed;
- }
- } else
- dupreq = req;
+ nfs_page_group_unlock(req);
+ if (IS_ERR(dupreq)) {
+ desc->pg_error = PTR_ERR(dupreq);
+ goto out_failed;
+ }
- if (nfs_pgio_has_mirroring(desc))
- desc->pg_mirror_idx = midx;
+ desc->pg_mirror_idx = midx;
if (!nfs_pageio_add_request_mirror(desc, dupreq))
goto out_cleanup_subreq;
}
+ desc->pg_mirror_idx = 0;
+ if (!nfs_pageio_add_request_mirror(desc, req))
+ goto out_failed;
+
return 1;
out_cleanup_subreq:
- if (req != dupreq)
- nfs_pageio_cleanup_request(desc, dupreq);
+ nfs_pageio_cleanup_request(desc, dupreq);
out_failed:
nfs_pageio_error_cleanup(desc);
return 0;
@@ -1226,8 +1320,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
u32 restore_idx = desc->pg_mirror_idx;
- if (nfs_pgio_has_mirroring(desc))
- desc->pg_mirror_idx = mirror_idx;
+ desc->pg_mirror_idx = mirror_idx;
for (;;) {
nfs_pageio_doio(desc);
if (desc->pg_error < 0 || !mirror->pg_recoalesce)
@@ -1320,6 +1413,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
}
}
+/*
+ * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
+ */
+void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
+{
+ nfs_pageio_complete(pgio);
+}
+
int __init nfs_init_nfspagecache(void)
{
nfs_page_cachep = kmem_cache_create("nfs_page",
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 542ea8dfd1bc..f2dc35c22964 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -268,11 +268,11 @@ pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
struct nfs_server *server = NFS_SERVER(lo->plh_inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
- if (!list_empty(&lo->plh_layouts)) {
+ if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
struct nfs_client *clp = server->nfs_client;
spin_lock(&clp->cl_lock);
- list_del_init(&lo->plh_layouts);
+ list_del_rcu(&lo->plh_layouts);
spin_unlock(&clp->cl_lock);
}
put_cred(lo->plh_lc_cred);
@@ -309,6 +309,16 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
}
}
+static struct inode *
+pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct inode *inode = igrab(lo->plh_inode);
+ if (inode)
+ return inode;
+ set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
+ return NULL;
+}
+
static void
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
u32 seq)
@@ -496,6 +506,7 @@ pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
{
INIT_LIST_HEAD(&lseg->pls_list);
INIT_LIST_HEAD(&lseg->pls_lc_list);
+ INIT_LIST_HEAD(&lseg->pls_commits);
refcount_set(&lseg->pls_refcount, 1);
set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
lseg->pls_layout = lo;
@@ -782,9 +793,10 @@ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
/* If the sb is being destroyed, just bail */
if (!nfs_sb_active(server->super))
break;
- inode = igrab(lo->plh_inode);
+ inode = pnfs_grab_inode_layout_hdr(lo);
if (inode != NULL) {
- list_del_init(&lo->plh_layouts);
+ if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags))
+ list_del_rcu(&lo->plh_layouts);
if (pnfs_layout_add_bulk_destroy_list(inode,
layout_list))
continue;
@@ -794,7 +806,6 @@ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
} else {
rcu_read_unlock();
spin_unlock(&clp->cl_lock);
- set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
}
nfs_sb_deactive(server->super);
spin_lock(&clp->cl_lock);
@@ -903,10 +914,21 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
pnfs_destroy_layouts_byclid(clp, false);
}
+static void
+pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred)
+{
+ const struct cred *old;
+
+ if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) {
+ old = xchg(&lo->plh_lc_cred, get_cred(cred));
+ put_cred(old);
+ }
+}
+
/* update lo->plh_stateid with new if is more recent */
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
- bool update_barrier)
+ const struct cred *cred, bool update_barrier)
{
u32 oldseq, newseq, new_barrier = 0;
@@ -914,6 +936,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
newseq = be32_to_cpu(new->seqid);
if (!pnfs_layout_is_valid(lo)) {
+ pnfs_set_layout_cred(lo, cred);
nfs4_stateid_copy(&lo->plh_stateid, new);
lo->plh_barrier = newseq;
pnfs_clear_layoutreturn_info(lo);
@@ -1061,7 +1084,7 @@ pnfs_alloc_init_layoutget_args(struct inode *ino,
lgp->args.ctx = get_nfs_open_context(ctx);
nfs4_stateid_copy(&lgp->args.stateid, stateid);
lgp->gfp_flags = gfp_flags;
- lgp->cred = get_cred(ctx->cred);
+ lgp->cred = ctx->cred;
return lgp;
}
@@ -1072,7 +1095,6 @@ void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
nfs4_free_pages(lgp->args.layout.pages, max_pages);
if (lgp->args.inode)
pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
- put_cred(lgp->cred);
put_nfs_open_context(lgp->args.ctx);
kfree(lgp);
}
@@ -1109,7 +1131,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
pnfs_free_returned_lsegs(lo, &freeme, range, seq);
- pnfs_set_layout_stateid(lo, stateid, true);
+ pnfs_set_layout_stateid(lo, stateid, NULL, true);
} else
pnfs_mark_layout_stateid_invalid(lo, &freeme);
out_unlock:
@@ -1122,6 +1144,7 @@ out_unlock:
static bool
pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
nfs4_stateid *stateid,
+ const struct cred **cred,
enum pnfs_iomode *iomode)
{
/* Serialise LAYOUTGET/LAYOUTRETURN */
@@ -1132,18 +1155,17 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
pnfs_get_layout_hdr(lo);
if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
- if (stateid != NULL) {
- nfs4_stateid_copy(stateid, &lo->plh_stateid);
- if (lo->plh_return_seq != 0)
- stateid->seqid = cpu_to_be32(lo->plh_return_seq);
- }
+ nfs4_stateid_copy(stateid, &lo->plh_stateid);
+ *cred = get_cred(lo->plh_lc_cred);
+ if (lo->plh_return_seq != 0)
+ stateid->seqid = cpu_to_be32(lo->plh_return_seq);
if (iomode != NULL)
*iomode = lo->plh_return_iomode;
pnfs_clear_layoutreturn_info(lo);
return true;
}
- if (stateid != NULL)
- nfs4_stateid_copy(stateid, &lo->plh_stateid);
+ nfs4_stateid_copy(stateid, &lo->plh_stateid);
+ *cred = get_cred(lo->plh_lc_cred);
if (iomode != NULL)
*iomode = IOMODE_ANY;
return true;
@@ -1167,20 +1189,26 @@ pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
}
static int
-pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
- enum pnfs_iomode iomode, bool sync)
+pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *stateid,
+ const struct cred **pcred,
+ enum pnfs_iomode iomode,
+ bool sync)
{
struct inode *ino = lo->plh_inode;
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
struct nfs4_layoutreturn *lrp;
+ const struct cred *cred = *pcred;
int status = 0;
+ *pcred = NULL;
lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
if (unlikely(lrp == NULL)) {
status = -ENOMEM;
spin_lock(&ino->i_lock);
pnfs_clear_layoutreturn_waitbit(lo);
spin_unlock(&ino->i_lock);
+ put_cred(cred);
pnfs_put_layout_hdr(lo);
goto out;
}
@@ -1188,7 +1216,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
lrp->args.ld_private = &lrp->ld_private;
lrp->clp = NFS_SERVER(ino)->nfs_client;
- lrp->cred = lo->plh_lc_cred;
+ lrp->cred = cred;
if (ld->prepare_layoutreturn)
ld->prepare_layoutreturn(&lrp->args);
@@ -1233,15 +1261,16 @@ static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
return;
spin_lock(&inode->i_lock);
if (pnfs_layout_need_return(lo)) {
+ const struct cred *cred;
nfs4_stateid stateid;
enum pnfs_iomode iomode;
bool send;
- send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
+ send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
spin_unlock(&inode->i_lock);
if (send) {
/* Send an async layoutreturn so we dont deadlock */
- pnfs_send_layoutreturn(lo, &stateid, iomode, false);
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
}
} else
spin_unlock(&inode->i_lock);
@@ -1261,6 +1290,7 @@ _pnfs_return_layout(struct inode *ino)
struct pnfs_layout_hdr *lo = NULL;
struct nfs_inode *nfsi = NFS_I(ino);
LIST_HEAD(tmp_list);
+ const struct cred *cred;
nfs4_stateid stateid;
int status = 0;
bool send, valid_layout;
@@ -1305,10 +1335,10 @@ _pnfs_return_layout(struct inode *ino)
goto out_put_layout_hdr;
}
- send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
+ send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
spin_unlock(&ino->i_lock);
if (send)
- status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
+ status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
out_put_layout_hdr:
pnfs_free_lseg_list(&tmp_list);
pnfs_put_layout_hdr(lo);
@@ -1354,6 +1384,7 @@ bool pnfs_roc(struct inode *ino,
struct nfs4_state *state;
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg, *next;
+ const struct cred *lc_cred;
nfs4_stateid stateid;
enum pnfs_iomode iomode = 0;
bool layoutreturn = false, roc = false;
@@ -1423,16 +1454,20 @@ retry:
* 2. we don't send layoutreturn
*/
/* lo ref dropped in pnfs_roc_release() */
- layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
+ layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode);
/* If the creds don't match, we can't compound the layoutreturn */
- if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
+ if (!layoutreturn)
goto out_noroc;
+ if (cred_fscmp(cred, lc_cred) != 0)
+ goto out_noroc_put_cred;
roc = layoutreturn;
pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
res->lrs_present = 0;
layoutreturn = false;
+out_noroc_put_cred:
+ put_cred(lc_cred);
out_noroc:
spin_unlock(&ino->i_lock);
rcu_read_unlock();
@@ -1445,7 +1480,7 @@ out_noroc:
return true;
}
if (layoutreturn)
- pnfs_send_layoutreturn(lo, &stateid, iomode, true);
+ pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, true);
pnfs_put_layout_hdr(lo);
return false;
}
@@ -1859,15 +1894,14 @@ static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
static void _add_to_server_list(struct pnfs_layout_hdr *lo,
struct nfs_server *server)
{
- if (list_empty(&lo->plh_layouts)) {
+ if (!test_and_set_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
struct nfs_client *clp = server->nfs_client;
/* The lo must be on the clp list if there is any
* chance of a CB_LAYOUTRECALL(FILE) coming in.
*/
spin_lock(&clp->cl_lock);
- if (list_empty(&lo->plh_layouts))
- list_add_tail(&lo->plh_layouts, &server->layouts);
+ list_add_tail_rcu(&lo->plh_layouts, &server->layouts);
spin_unlock(&clp->cl_lock);
}
}
@@ -2323,14 +2357,14 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
if (!pnfs_layout_is_valid(lo)) {
/* We have a completely new layout */
- pnfs_set_layout_stateid(lo, &res->stateid, true);
+ pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
} else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
/* existing state ID, make sure the sequence number matches. */
if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
dprintk("%s forget reply due to sequence\n", __func__);
goto out_forget;
}
- pnfs_set_layout_stateid(lo, &res->stateid, false);
+ pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
} else {
/*
* We got an entirely new state ID. Mark all segments for the
@@ -2423,43 +2457,159 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
return -ENOENT;
}
-void pnfs_error_mark_layout_for_return(struct inode *inode,
- struct pnfs_layout_segment *lseg)
+static void
+pnfs_mark_layout_for_return(struct inode *inode,
+ const struct pnfs_layout_range *range)
{
- struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
- struct pnfs_layout_range range = {
- .iomode = lseg->pls_range.iomode,
- .offset = 0,
- .length = NFS4_MAX_UINT64,
- };
+ struct pnfs_layout_hdr *lo;
bool return_now = false;
spin_lock(&inode->i_lock);
+ lo = NFS_I(inode)->layout;
if (!pnfs_layout_is_valid(lo)) {
spin_unlock(&inode->i_lock);
return;
}
- pnfs_set_plh_return_info(lo, range.iomode, 0);
+ pnfs_set_plh_return_info(lo, range->iomode, 0);
/*
* mark all matching lsegs so that we are sure to have no live
* segments at hand when sending layoutreturn. See pnfs_put_lseg()
* for how it works.
*/
- if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0) != -EBUSY) {
+ if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, range, 0) != -EBUSY) {
+ const struct cred *cred;
nfs4_stateid stateid;
enum pnfs_iomode iomode;
- return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
+ return_now = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
spin_unlock(&inode->i_lock);
if (return_now)
- pnfs_send_layoutreturn(lo, &stateid, iomode, false);
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
} else {
spin_unlock(&inode->i_lock);
nfs_commit_inode(inode, 0);
}
}
+
+void pnfs_error_mark_layout_for_return(struct inode *inode,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_range range = {
+ .iomode = lseg->pls_range.iomode,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+
+ pnfs_mark_layout_for_return(inode, &range);
+}
EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
+static bool
+pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo)
+{
+ return pnfs_layout_is_valid(lo) &&
+ !test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) &&
+ !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
+}
+
+static struct pnfs_layout_segment *
+pnfs_find_first_lseg(struct pnfs_layout_hdr *lo,
+ const struct pnfs_layout_range *range,
+ enum pnfs_iomode iomode)
+{
+ struct pnfs_layout_segment *lseg;
+
+ list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
+ if (!test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
+ continue;
+ if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+ continue;
+ if (lseg->pls_range.iomode != iomode && iomode != IOMODE_ANY)
+ continue;
+ if (pnfs_lseg_range_intersecting(&lseg->pls_range, range))
+ return lseg;
+ }
+ return NULL;
+}
+
+/* Find open file states whose mode matches that of the range */
+static bool
+pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
+ const struct pnfs_layout_range *range)
+{
+ struct list_head *head;
+ struct nfs_open_context *ctx;
+ fmode_t mode = 0;
+
+ if (!pnfs_layout_can_be_returned(lo) ||
+ !pnfs_find_first_lseg(lo, range, range->iomode))
+ return false;
+
+ head = &NFS_I(lo->plh_inode)->open_files;
+ list_for_each_entry_rcu(ctx, head, list) {
+ if (ctx->state)
+ mode |= ctx->state->state & (FMODE_READ|FMODE_WRITE);
+ }
+
+ switch (range->iomode) {
+ default:
+ break;
+ case IOMODE_READ:
+ mode &= ~FMODE_WRITE;
+ break;
+ case IOMODE_RW:
+ if (pnfs_find_first_lseg(lo, range, IOMODE_READ))
+ mode &= ~FMODE_READ;
+ }
+ return mode == 0;
+}
+
+static int
+pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
+{
+ const struct pnfs_layout_range *range = data;
+ struct pnfs_layout_hdr *lo;
+ struct inode *inode;
+restart:
+ rcu_read_lock();
+ list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
+ if (!pnfs_layout_can_be_returned(lo) ||
+ test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+ continue;
+ inode = lo->plh_inode;
+ spin_lock(&inode->i_lock);
+ if (!pnfs_should_return_unused_layout(lo, range)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ spin_unlock(&inode->i_lock);
+ inode = pnfs_grab_inode_layout_hdr(lo);
+ if (!inode)
+ continue;
+ rcu_read_unlock();
+ pnfs_mark_layout_for_return(inode, range);
+ iput(inode);
+ cond_resched();
+ goto restart;
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+void
+pnfs_layout_return_unused_byclid(struct nfs_client *clp,
+ enum pnfs_iomode iomode)
+{
+ struct pnfs_layout_range range = {
+ .iomode = iomode,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+
+ nfs_client_for_each_server(clp, pnfs_layout_return_unused_byserver,
+ &range);
+}
+
void
pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
{
@@ -2475,7 +2625,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
* Check for any intersection between the request and the pgio->pg_lseg,
* and if none, put this pgio->pg_lseg away.
*/
-static void
+void
pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
@@ -2483,6 +2633,7 @@ pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page
pgio->pg_lseg = NULL;
}
}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
@@ -3000,10 +3151,10 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
end_pos = nfsi->layout->plh_lwb;
nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
+ data->cred = get_cred(nfsi->layout->plh_lc_cred);
spin_unlock(&inode->i_lock);
data->args.inode = inode;
- data->cred = get_cred(nfsi->layout->plh_lc_cred);
nfs_fattr_init(&data->fattr);
data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
data->res.fattr = &data->fattr;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 0fafdadc9c8d..8e0ada581b92 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -66,6 +66,7 @@ struct nfs4_pnfs_ds {
struct pnfs_layout_segment {
struct list_head pls_list;
struct list_head pls_lc_list;
+ struct list_head pls_commits;
struct pnfs_layout_range pls_range;
refcount_t pls_refcount;
u32 pls_seq;
@@ -105,6 +106,7 @@ enum {
NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */
+ NFS_LAYOUT_HASHED, /* The layout visible */
};
enum layoutdriver_policy_flags {
@@ -148,22 +150,6 @@ struct pnfs_layoutdriver_type {
const struct nfs_pageio_ops *pg_write_ops;
struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode);
- void (*mark_request_commit) (struct nfs_page *req,
- struct pnfs_layout_segment *lseg,
- struct nfs_commit_info *cinfo,
- u32 ds_commit_idx);
- void (*clear_request_commit) (struct nfs_page *req,
- struct nfs_commit_info *cinfo);
- int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
- int max);
- void (*recover_commit_reqs) (struct list_head *list,
- struct nfs_commit_info *cinfo);
- struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo,
- struct page *page);
- int (*commit_pagelist)(struct inode *inode,
- struct list_head *mds_pages,
- int how,
- struct nfs_commit_info *cinfo);
int (*sync)(struct inode *inode, bool datasync);
@@ -186,6 +172,29 @@ struct pnfs_layoutdriver_type {
int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args);
};
+struct pnfs_commit_ops {
+ void (*setup_ds_info)(struct pnfs_ds_commit_info *,
+ struct pnfs_layout_segment *);
+ void (*release_ds_info)(struct pnfs_ds_commit_info *,
+ struct inode *inode);
+ int (*commit_pagelist)(struct inode *inode,
+ struct list_head *mds_pages,
+ int how,
+ struct nfs_commit_info *cinfo);
+ void (*mark_request_commit) (struct nfs_page *req,
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo,
+ u32 ds_commit_idx);
+ void (*clear_request_commit) (struct nfs_page *req,
+ struct nfs_commit_info *cinfo);
+ int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
+ int max);
+ void (*recover_commit_reqs) (struct list_head *list,
+ struct nfs_commit_info *cinfo);
+ struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo,
+ struct page *page);
+};
+
struct pnfs_layout_hdr {
refcount_t plh_refcount;
atomic_t plh_outstanding; /* number of RPCs out */
@@ -203,6 +212,7 @@ struct pnfs_layout_hdr {
loff_t plh_lwb; /* last write byte for layoutcommit */
const struct cred *plh_lc_cred; /* layoutcommit cred */
struct inode *plh_inode;
+ struct rcu_head plh_rcu;
};
struct pnfs_device {
@@ -242,6 +252,7 @@ void pnfs_put_lseg(struct pnfs_layout_segment *lseg);
void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, struct nfs_fsinfo *);
void unset_pnfs_layoutdriver(struct nfs_server *);
void pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio);
+void pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req);
void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *);
int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
@@ -267,6 +278,7 @@ bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo);
void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
const nfs4_stateid *new,
+ const struct cred *cred,
bool update_barrier);
int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
@@ -326,6 +338,9 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *);
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
void pnfs_error_mark_layout_for_return(struct inode *inode,
struct pnfs_layout_segment *lseg);
+void pnfs_layout_return_unused_byclid(struct nfs_client *clp,
+ enum pnfs_iomode iomode);
+
/* nfs4_deviceid_flags */
enum {
NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */
@@ -360,6 +375,16 @@ bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node);
void nfs4_deviceid_purge_client(const struct nfs_client *);
/* pnfs_nfs.c */
+struct pnfs_commit_array *pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags);
+void pnfs_free_commit_array(struct pnfs_commit_array *p);
+struct pnfs_commit_array *pnfs_add_commit_array(struct pnfs_ds_commit_info *,
+ struct pnfs_commit_array *,
+ struct pnfs_layout_segment *);
+
+void pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo,
+ struct pnfs_layout_segment *lseg);
+void pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo);
+
void pnfs_generic_clear_request_commit(struct nfs_page *req,
struct nfs_commit_info *cinfo);
void pnfs_generic_commit_release(void *calldata);
@@ -367,6 +392,8 @@ void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data);
void pnfs_generic_rw_release(void *data);
void pnfs_generic_recover_commit_reqs(struct list_head *dst,
struct nfs_commit_info *cinfo);
+struct nfs_page *pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo,
+ struct page *page);
int pnfs_generic_commit_pagelist(struct inode *inode,
struct list_head *mds_pages,
int how,
@@ -438,9 +465,11 @@ static inline int
pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
struct nfs_commit_info *cinfo)
{
- if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0)
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+
+ if (fl_cinfo == NULL || fl_cinfo->ncommitting == 0)
return PNFS_NOT_ATTEMPTED;
- return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo);
+ return fl_cinfo->ops->commit_pagelist(inode, mds_pages, how, cinfo);
}
static inline struct pnfs_ds_commit_info *
@@ -454,6 +483,28 @@ pnfs_get_ds_info(struct inode *inode)
}
static inline void
+pnfs_init_ds_commit_info_ops(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode)
+{
+ struct pnfs_ds_commit_info *inode_cinfo = pnfs_get_ds_info(inode);
+ if (inode_cinfo != NULL)
+ fl_cinfo->ops = inode_cinfo->ops;
+}
+
+static inline void
+pnfs_init_ds_commit_info(struct pnfs_ds_commit_info *fl_cinfo)
+{
+ INIT_LIST_HEAD(&fl_cinfo->commits);
+ fl_cinfo->ops = NULL;
+}
+
+static inline void
+pnfs_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode)
+{
+ if (fl_cinfo->ops != NULL && fl_cinfo->ops->release_ds_info != NULL)
+ fl_cinfo->ops->release_ds_info(fl_cinfo, inode);
+}
+
+static inline void
pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node)
{
set_bit(NFS_DEVICEID_INVALID, &node->flags);
@@ -463,24 +514,22 @@ static inline bool
pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
struct nfs_commit_info *cinfo, u32 ds_commit_idx)
{
- struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
- struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
- if (lseg == NULL || ld->mark_request_commit == NULL)
+ if (!lseg || !fl_cinfo->ops->mark_request_commit)
return false;
- ld->mark_request_commit(req, lseg, cinfo, ds_commit_idx);
+ fl_cinfo->ops->mark_request_commit(req, lseg, cinfo, ds_commit_idx);
return true;
}
static inline bool
pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
{
- struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
- struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
- if (ld == NULL || ld->clear_request_commit == NULL)
+ if (!fl_cinfo || !fl_cinfo->ops || !fl_cinfo->ops->clear_request_commit)
return false;
- ld->clear_request_commit(req, cinfo);
+ fl_cinfo->ops->clear_request_commit(req, cinfo);
return true;
}
@@ -488,21 +537,31 @@ static inline int
pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
int max)
{
- if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+
+ if (!fl_cinfo || fl_cinfo->nwritten == 0)
return 0;
- else
- return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max);
+ return fl_cinfo->ops->scan_commit_lists(cinfo, max);
+}
+
+static inline void
+pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo)
+{
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+
+ if (fl_cinfo && fl_cinfo->nwritten != 0)
+ fl_cinfo->ops->recover_commit_reqs(head, cinfo);
}
static inline struct nfs_page *
pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
struct page *page)
{
- struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
- if (ld == NULL || ld->search_commit_reqs == NULL)
+ if (!fl_cinfo->ops || !fl_cinfo->ops->search_commit_reqs)
return NULL;
- return ld->search_commit_reqs(cinfo, page);
+ return fl_cinfo->ops->search_commit_reqs(cinfo, page);
}
/* Should the pNFS client commit and return the layout upon a setattr */
@@ -750,6 +809,21 @@ pnfs_get_ds_info(struct inode *inode)
return NULL;
}
+static inline void
+pnfs_init_ds_commit_info_ops(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode)
+{
+}
+
+static inline void
+pnfs_init_ds_commit_info(struct pnfs_ds_commit_info *fl_cinfo)
+{
+}
+
+static inline void
+pnfs_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode)
+{
+}
+
static inline bool
pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
struct nfs_commit_info *cinfo, u32 ds_commit_idx)
@@ -770,6 +844,11 @@ pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
return 0;
}
+static inline void
+pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo)
+{
+}
+
static inline struct nfs_page *
pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
struct page *page)
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 8b37e7f8e789..25f135572fc8 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -59,6 +59,17 @@ void pnfs_generic_commit_release(void *calldata)
}
EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
+static struct pnfs_layout_segment *
+pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket)
+{
+ if (list_empty(&bucket->committing) && list_empty(&bucket->written)) {
+ struct pnfs_layout_segment *freeme = bucket->lseg;
+ bucket->lseg = NULL;
+ return freeme;
+ }
+ return NULL;
+}
+
/* The generic layer is about to remove the req from the commit list.
* If this will make the bucket empty, it will need to put the lseg reference.
* Note this must be called holding nfsi->commit_mutex
@@ -78,8 +89,7 @@ pnfs_generic_clear_request_commit(struct nfs_page *req,
bucket = list_first_entry(&req->wb_list,
struct pnfs_commit_bucket,
written);
- freeme = bucket->wlseg;
- bucket->wlseg = NULL;
+ freeme = pnfs_free_bucket_lseg(bucket);
}
out:
nfs_request_remove_commit_list(req, cinfo);
@@ -87,10 +97,154 @@ out:
}
EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
+struct pnfs_commit_array *
+pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags)
+{
+ struct pnfs_commit_array *p;
+ struct pnfs_commit_bucket *b;
+
+ p = kmalloc(struct_size(p, buckets, n), gfp_flags);
+ if (!p)
+ return NULL;
+ p->nbuckets = n;
+ INIT_LIST_HEAD(&p->cinfo_list);
+ INIT_LIST_HEAD(&p->lseg_list);
+ p->lseg = NULL;
+ for (b = &p->buckets[0]; n != 0; b++, n--) {
+ INIT_LIST_HEAD(&b->written);
+ INIT_LIST_HEAD(&b->committing);
+ b->lseg = NULL;
+ b->direct_verf.committed = NFS_INVALID_STABLE_HOW;
+ }
+ return p;
+}
+EXPORT_SYMBOL_GPL(pnfs_alloc_commit_array);
+
+void
+pnfs_free_commit_array(struct pnfs_commit_array *p)
+{
+ kfree_rcu(p, rcu);
+}
+EXPORT_SYMBOL_GPL(pnfs_free_commit_array);
+
+static struct pnfs_commit_array *
+pnfs_find_commit_array_by_lseg(struct pnfs_ds_commit_info *fl_cinfo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_commit_array *array;
+
+ list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
+ if (array->lseg == lseg)
+ return array;
+ }
+ return NULL;
+}
+
+struct pnfs_commit_array *
+pnfs_add_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
+ struct pnfs_commit_array *new,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_commit_array *array;
+
+ array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
+ if (array)
+ return array;
+ new->lseg = lseg;
+ refcount_set(&new->refcount, 1);
+ list_add_rcu(&new->cinfo_list, &fl_cinfo->commits);
+ list_add(&new->lseg_list, &lseg->pls_commits);
+ return new;
+}
+EXPORT_SYMBOL_GPL(pnfs_add_commit_array);
+
+static struct pnfs_commit_array *
+pnfs_lookup_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_commit_array *array;
+
+ rcu_read_lock();
+ array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
+ if (!array) {
+ rcu_read_unlock();
+ fl_cinfo->ops->setup_ds_info(fl_cinfo, lseg);
+ rcu_read_lock();
+ array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
+ }
+ rcu_read_unlock();
+ return array;
+}
+
+static void
+pnfs_release_commit_array_locked(struct pnfs_commit_array *array)
+{
+ list_del_rcu(&array->cinfo_list);
+ list_del(&array->lseg_list);
+ pnfs_free_commit_array(array);
+}
+
+static void
+pnfs_put_commit_array_locked(struct pnfs_commit_array *array)
+{
+ if (refcount_dec_and_test(&array->refcount))
+ pnfs_release_commit_array_locked(array);
+}
+
+static void
+pnfs_put_commit_array(struct pnfs_commit_array *array, struct inode *inode)
+{
+ if (refcount_dec_and_lock(&array->refcount, &inode->i_lock)) {
+ pnfs_release_commit_array_locked(array);
+ spin_unlock(&inode->i_lock);
+ }
+}
+
+static struct pnfs_commit_array *
+pnfs_get_commit_array(struct pnfs_commit_array *array)
+{
+ if (refcount_inc_not_zero(&array->refcount))
+ return array;
+ return NULL;
+}
+
+static void
+pnfs_remove_and_free_commit_array(struct pnfs_commit_array *array)
+{
+ array->lseg = NULL;
+ list_del_init(&array->lseg_list);
+ pnfs_put_commit_array_locked(array);
+}
+
+void
+pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_commit_array *array, *tmp;
+
+ list_for_each_entry_safe(array, tmp, &lseg->pls_commits, lseg_list)
+ pnfs_remove_and_free_commit_array(array);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_release_lseg);
+
+void
+pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo)
+{
+ struct pnfs_commit_array *array, *tmp;
+
+ list_for_each_entry_safe(array, tmp, &fl_cinfo->commits, cinfo_list)
+ pnfs_remove_and_free_commit_array(array);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_destroy);
+
+/*
+ * Locks the nfs_page requests for commit and moves them to
+ * @bucket->committing.
+ */
static int
-pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
- struct nfs_commit_info *cinfo,
- int max)
+pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
+ struct nfs_commit_info *cinfo,
+ int max)
{
struct list_head *src = &bucket->written;
struct list_head *dst = &bucket->committing;
@@ -101,158 +255,253 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
if (ret) {
cinfo->ds->nwritten -= ret;
cinfo->ds->ncommitting += ret;
- if (bucket->clseg == NULL)
- bucket->clseg = pnfs_get_lseg(bucket->wlseg);
- if (list_empty(src)) {
- pnfs_put_lseg(bucket->wlseg);
- bucket->wlseg = NULL;
- }
}
return ret;
}
+static int pnfs_bucket_scan_array(struct nfs_commit_info *cinfo,
+ struct pnfs_commit_bucket *buckets,
+ unsigned int nbuckets,
+ int max)
+{
+ unsigned int i;
+ int rv = 0, cnt;
+
+ for (i = 0; i < nbuckets && max != 0; i++) {
+ cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max);
+ rv += cnt;
+ max -= cnt;
+ }
+ return rv;
+}
+
/* Move reqs from written to committing lists, returning count
* of number moved.
*/
-int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
- int max)
+int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max)
{
- int i, rv = 0, cnt;
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+ struct pnfs_commit_array *array;
+ int rv = 0, cnt;
- lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
- for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
- cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
- cinfo, max);
- max -= cnt;
+ rcu_read_lock();
+ list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
+ if (!array->lseg || !pnfs_get_commit_array(array))
+ continue;
+ rcu_read_unlock();
+ cnt = pnfs_bucket_scan_array(cinfo, array->buckets,
+ array->nbuckets, max);
+ rcu_read_lock();
+ pnfs_put_commit_array(array, cinfo->inode);
rv += cnt;
+ max -= cnt;
+ if (!max)
+ break;
}
+ rcu_read_unlock();
return rv;
}
EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
-/* Pull everything off the committing lists and dump into @dst. */
-void pnfs_generic_recover_commit_reqs(struct list_head *dst,
- struct nfs_commit_info *cinfo)
+static unsigned int
+pnfs_bucket_recover_commit_reqs(struct list_head *dst,
+ struct pnfs_commit_bucket *buckets,
+ unsigned int nbuckets,
+ struct nfs_commit_info *cinfo)
{
struct pnfs_commit_bucket *b;
struct pnfs_layout_segment *freeme;
- int nwritten;
- int i;
+ unsigned int nwritten, ret = 0;
+ unsigned int i;
- lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
restart:
- for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
+ for (i = 0, b = buckets; i < nbuckets; i++, b++) {
nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0);
if (!nwritten)
continue;
- cinfo->ds->nwritten -= nwritten;
- if (list_empty(&b->written)) {
- freeme = b->wlseg;
- b->wlseg = NULL;
+ ret += nwritten;
+ freeme = pnfs_free_bucket_lseg(b);
+ if (freeme) {
pnfs_put_lseg(freeme);
goto restart;
}
}
+ return ret;
+}
+
+/* Pull everything off the committing lists and dump into @dst. */
+void pnfs_generic_recover_commit_reqs(struct list_head *dst,
+ struct nfs_commit_info *cinfo)
+{
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+ struct pnfs_commit_array *array;
+ unsigned int nwritten;
+
+ lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
+ rcu_read_lock();
+ list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
+ if (!array->lseg || !pnfs_get_commit_array(array))
+ continue;
+ rcu_read_unlock();
+ nwritten = pnfs_bucket_recover_commit_reqs(dst,
+ array->buckets,
+ array->nbuckets,
+ cinfo);
+ rcu_read_lock();
+ pnfs_put_commit_array(array, cinfo->inode);
+ fl_cinfo->nwritten -= nwritten;
+ }
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
-static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
+static struct nfs_page *
+pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets,
+ unsigned int nbuckets, struct page *page)
+{
+ struct nfs_page *req;
+ struct pnfs_commit_bucket *b;
+ unsigned int i;
+
+ /* Linearly search the commit lists for each bucket until a matching
+ * request is found */
+ for (i = 0, b = buckets; i < nbuckets; i++, b++) {
+ list_for_each_entry(req, &b->written, wb_list) {
+ if (req->wb_page == page)
+ return req->wb_head;
+ }
+ list_for_each_entry(req, &b->committing, wb_list) {
+ if (req->wb_page == page)
+ return req->wb_head;
+ }
+ }
+ return NULL;
+}
+
+/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head reqest
+ * for @page
+ * @cinfo - commit info for current inode
+ * @page - page to search for matching head request
+ *
+ * Returns a the head request if one is found, otherwise returns NULL.
+ */
+struct nfs_page *
+pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
{
struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+ struct pnfs_commit_array *array;
+ struct nfs_page *req;
+
+ list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) {
+ req = pnfs_bucket_search_commit_reqs(array->buckets,
+ array->nbuckets, page);
+ if (req)
+ return req;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_search_commit_reqs);
+
+static struct pnfs_layout_segment *
+pnfs_bucket_get_committing(struct list_head *head,
+ struct pnfs_commit_bucket *bucket,
+ struct nfs_commit_info *cinfo)
+{
+ struct list_head *pos;
+
+ list_for_each(pos, &bucket->committing)
+ cinfo->ds->ncommitting--;
+ list_splice_init(&bucket->committing, head);
+ return pnfs_free_bucket_lseg(bucket);
+}
+
+static struct nfs_commit_data *
+pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
+ struct nfs_commit_info *cinfo)
+{
+ struct nfs_commit_data *data = nfs_commitdata_alloc(false);
+
+ if (!data)
+ return NULL;
+ data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
+ if (!data->lseg)
+ data->lseg = pnfs_get_lseg(bucket->lseg);
+ return data;
+}
+
+static void pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets,
+ unsigned int nbuckets,
+ struct nfs_commit_info *cinfo,
+ unsigned int idx)
+{
struct pnfs_commit_bucket *bucket;
struct pnfs_layout_segment *freeme;
- struct list_head *pos;
LIST_HEAD(pages);
- int i;
- mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
- for (i = idx; i < fl_cinfo->nbuckets; i++) {
- bucket = &fl_cinfo->buckets[i];
+ for (bucket = buckets; idx < nbuckets; bucket++, idx++) {
if (list_empty(&bucket->committing))
continue;
- freeme = bucket->clseg;
- bucket->clseg = NULL;
- list_for_each(pos, &bucket->committing)
- cinfo->ds->ncommitting--;
- list_splice_init(&bucket->committing, &pages);
+ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
+ freeme = pnfs_bucket_get_committing(&pages, bucket, cinfo);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
- nfs_retry_commit(&pages, freeme, cinfo, i);
+ nfs_retry_commit(&pages, freeme, cinfo, idx);
pnfs_put_lseg(freeme);
- mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
}
- mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
}
static unsigned int
-pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
- struct list_head *list)
+pnfs_bucket_alloc_ds_commits(struct list_head *list,
+ struct pnfs_commit_bucket *buckets,
+ unsigned int nbuckets,
+ struct nfs_commit_info *cinfo)
{
- struct pnfs_ds_commit_info *fl_cinfo;
struct pnfs_commit_bucket *bucket;
struct nfs_commit_data *data;
- int i;
+ unsigned int i;
unsigned int nreq = 0;
- fl_cinfo = cinfo->ds;
- bucket = fl_cinfo->buckets;
- for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
+ for (i = 0, bucket = buckets; i < nbuckets; i++, bucket++) {
if (list_empty(&bucket->committing))
continue;
- data = nfs_commitdata_alloc(false);
- if (!data)
- break;
- data->ds_commit_index = i;
- list_add(&data->pages, list);
- nreq++;
+ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
+ if (!list_empty(&bucket->committing)) {
+ data = pnfs_bucket_fetch_commitdata(bucket, cinfo);
+ if (!data)
+ goto out_error;
+ data->ds_commit_index = i;
+ list_add_tail(&data->list, list);
+ atomic_inc(&cinfo->mds->rpcs_out);
+ nreq++;
+ }
+ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
}
-
+ return nreq;
+out_error:
+ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
/* Clean up on error */
- pnfs_generic_retry_commit(cinfo, i);
+ pnfs_generic_retry_commit(buckets, nbuckets, cinfo, i);
return nreq;
}
-static inline
-void pnfs_fetch_commit_bucket_list(struct list_head *pages,
- struct nfs_commit_data *data,
- struct nfs_commit_info *cinfo)
+static unsigned int
+pnfs_alloc_ds_commits_list(struct list_head *list,
+ struct pnfs_ds_commit_info *fl_cinfo,
+ struct nfs_commit_info *cinfo)
{
- struct pnfs_commit_bucket *bucket;
- struct list_head *pos;
-
- bucket = &cinfo->ds->buckets[data->ds_commit_index];
- mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
- list_for_each(pos, &bucket->committing)
- cinfo->ds->ncommitting--;
- list_splice_init(&bucket->committing, pages);
- data->lseg = bucket->clseg;
- bucket->clseg = NULL;
- mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
-
-}
+ struct pnfs_commit_array *array;
+ unsigned int ret = 0;
-/* Helper function for pnfs_generic_commit_pagelist to catch an empty
- * page list. This can happen when two commits race.
- *
- * This must be called instead of nfs_init_commit - call one or the other, but
- * not both!
- */
-static bool
-pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
- struct nfs_commit_data *data,
- struct nfs_commit_info *cinfo)
-{
- if (list_empty(pages)) {
- if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
- wake_up_var(&cinfo->mds->rpcs_out);
- /* don't call nfs_commitdata_release - it tries to put
- * the open_context which is not acquired until nfs_init_commit
- * which has not been called on @data */
- WARN_ON_ONCE(data->context);
- nfs_commit_free(data);
- return true;
+ rcu_read_lock();
+ list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
+ if (!array->lseg || !pnfs_get_commit_array(array))
+ continue;
+ rcu_read_unlock();
+ ret += pnfs_bucket_alloc_ds_commits(list, array->buckets,
+ array->nbuckets, cinfo);
+ rcu_read_lock();
+ pnfs_put_commit_array(array, cinfo->inode);
}
-
- return false;
+ return ret;
}
/* This follows nfs_commit_list pretty closely */
@@ -262,6 +511,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
int (*initiate_commit)(struct nfs_commit_data *data,
int how))
{
+ struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
struct nfs_commit_data *data, *tmp;
LIST_HEAD(list);
unsigned int nreq = 0;
@@ -269,40 +519,25 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
if (!list_empty(mds_pages)) {
data = nfs_commitdata_alloc(true);
data->ds_commit_index = -1;
- list_add(&data->pages, &list);
+ list_splice_init(mds_pages, &data->pages);
+ list_add_tail(&data->list, &list);
+ atomic_inc(&cinfo->mds->rpcs_out);
nreq++;
}
- nreq += pnfs_generic_alloc_ds_commits(cinfo, &list);
-
+ nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo);
if (nreq == 0)
goto out;
- atomic_add(nreq, &cinfo->mds->rpcs_out);
-
- list_for_each_entry_safe(data, tmp, &list, pages) {
- list_del_init(&data->pages);
+ list_for_each_entry_safe(data, tmp, &list, list) {
+ list_del(&data->list);
if (data->ds_commit_index < 0) {
- /* another commit raced with us */
- if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
- data, cinfo))
- continue;
-
- nfs_init_commit(data, mds_pages, NULL, cinfo);
+ nfs_init_commit(data, NULL, NULL, cinfo);
nfs_initiate_commit(NFS_CLIENT(inode), data,
NFS_PROTO(data->inode),
data->mds_ops, how, 0);
} else {
- LIST_HEAD(pages);
-
- pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
-
- /* another commit raced with us */
- if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
- data, cinfo))
- continue;
-
- nfs_init_commit(data, &pages, data->lseg, cinfo);
+ nfs_init_commit(data, NULL, data->lseg, cinfo);
initiate_commit(data, how);
}
}
@@ -930,32 +1165,33 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
u32 ds_commit_idx)
{
struct list_head *list;
- struct pnfs_commit_bucket *buckets;
+ struct pnfs_commit_array *array;
+ struct pnfs_commit_bucket *bucket;
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
- buckets = cinfo->ds->buckets;
- list = &buckets[ds_commit_idx].written;
- if (list_empty(list)) {
- if (!pnfs_is_valid_lseg(lseg)) {
- mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
- cinfo->completion_ops->resched_write(cinfo, req);
- return;
- }
- /* Non-empty buckets hold a reference on the lseg. That ref
- * is normally transferred to the COMMIT call and released
- * there. It could also be released if the last req is pulled
- * off due to a rewrite, in which case it will be done in
- * pnfs_common_clear_request_commit
- */
- WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
- buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
- }
+ array = pnfs_lookup_commit_array(cinfo->ds, lseg);
+ if (!array || !pnfs_is_valid_lseg(lseg))
+ goto out_resched;
+ bucket = &array->buckets[ds_commit_idx];
+ list = &bucket->written;
+ /* Non-empty buckets hold a reference on the lseg. That ref
+ * is normally transferred to the COMMIT call and released
+ * there. It could also be released if the last req is pulled
+ * off due to a rewrite, in which case it will be done in
+ * pnfs_common_clear_request_commit
+ */
+ if (!bucket->lseg)
+ bucket->lseg = pnfs_get_lseg(lseg);
set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
cinfo->ds->nwritten++;
nfs_request_add_commit_list_locked(req, list, cinfo);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_mark_page_unstable(req->wb_page, cinfo);
+ return;
+out_resched:
+ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
+ cinfo->completion_ops->resched_write(cinfo, req);
}
EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 34bb9add2302..13b22e898116 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -250,7 +250,7 @@ static int nfs_readpage_done(struct rpc_task *task,
trace_nfs_readpage_done(task, hdr);
if (task->tk_status == -ESTALE) {
- set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+ nfs_set_inode_stale(inode);
nfs_mark_for_revalidate(inode);
}
return 0;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index bb14bede6da5..59ef3b13ccca 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -176,6 +176,41 @@ void nfs_sb_deactive(struct super_block *sb)
}
EXPORT_SYMBOL_GPL(nfs_sb_deactive);
+static int __nfs_list_for_each_server(struct list_head *head,
+ int (*fn)(struct nfs_server *, void *),
+ void *data)
+{
+ struct nfs_server *server, *last = NULL;
+ int ret = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, head, client_link) {
+ if (!nfs_sb_active(server->super))
+ continue;
+ rcu_read_unlock();
+ if (last)
+ nfs_sb_deactive(last->super);
+ last = server;
+ ret = fn(server, data);
+ if (ret)
+ goto out;
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+out:
+ if (last)
+ nfs_sb_deactive(last->super);
+ return ret;
+}
+
+int nfs_client_for_each_server(struct nfs_client *clp,
+ int (*fn)(struct nfs_server *, void *),
+ void *data)
+{
+ return __nfs_list_for_each_server(&clp->cl_superblocks, fn, data);
+}
+EXPORT_SYMBOL_GPL(nfs_client_for_each_server);
+
/*
* Deliver file system statistics to userspace
*/
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 0effeee28352..b27ebdccef70 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -98,7 +98,7 @@ static void nfs_do_call_unlink(struct inode *inode, struct nfs_unlinkdata *data)
.callback_ops = &nfs_unlink_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
struct rpc_task *task;
struct inode *dir = d_inode(data->dentry->d_parent);
@@ -341,7 +341,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
.callback_ops = &nfs_rename_ops,
.workqueue = nfsiod_workqueue,
.rpc_client = NFS_CLIENT(old_dir),
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
};
data = kzalloc(sizeof(*data), GFP_KERNEL);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c478b772cc49..df4b87c30ac9 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -149,6 +149,31 @@ static void nfs_io_completion_put(struct nfs_io_completion *ioc)
kref_put(&ioc->refcount, nfs_io_completion_release);
}
+static void
+nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
+{
+ if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) {
+ kref_get(&req->wb_kref);
+ atomic_long_inc(&NFS_I(inode)->nrequests);
+ }
+}
+
+static int
+nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+{
+ int ret;
+
+ if (!test_bit(PG_REMOVE, &req->wb_flags))
+ return 0;
+ ret = nfs_page_group_lock(req);
+ if (ret)
+ return ret;
+ if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
+ nfs_page_set_inode_ref(req, inode);
+ nfs_page_group_unlock(req);
+ return 0;
+}
+
static struct nfs_page *
nfs_page_private_request(struct page *page)
{
@@ -218,6 +243,36 @@ static struct nfs_page *nfs_page_find_head_request(struct page *page)
return req;
}
+static struct nfs_page *nfs_find_and_lock_page_request(struct page *page)
+{
+ struct inode *inode = page_file_mapping(page)->host;
+ struct nfs_page *req, *head;
+ int ret;
+
+ for (;;) {
+ req = nfs_page_find_head_request(page);
+ if (!req)
+ return req;
+ head = nfs_page_group_lock_head(req);
+ if (head != req)
+ nfs_release_request(req);
+ if (IS_ERR(head))
+ return head;
+ ret = nfs_cancel_remove_inode(head, inode);
+ if (ret < 0) {
+ nfs_unlock_and_release_request(head);
+ return ERR_PTR(ret);
+ }
+ /* Ensure that nobody removed the request before we locked it */
+ if (head == nfs_page_private_request(page))
+ break;
+ if (PageSwapCache(page))
+ break;
+ nfs_unlock_and_release_request(head);
+ }
+ return head;
+}
+
/* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
{
@@ -380,34 +435,6 @@ static void nfs_end_page_writeback(struct nfs_page *req)
}
/*
- * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
- *
- * this is a helper function for nfs_lock_and_join_requests
- *
- * @inode - inode associated with request page group, must be holding inode lock
- * @head - head request of page group, must be holding head lock
- * @req - request that couldn't lock and needs to wait on the req bit lock
- *
- * NOTE: this must be called holding page_group bit lock
- * which will be released before returning.
- *
- * returns 0 on success, < 0 on error.
- */
-static void
-nfs_unroll_locks(struct inode *inode, struct nfs_page *head,
- struct nfs_page *req)
-{
- struct nfs_page *tmp;
-
- /* relinquish all the locks successfully grabbed this run */
- for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
- if (!kref_read(&tmp->wb_kref))
- continue;
- nfs_unlock_and_release_request(tmp);
- }
-}
-
-/*
* nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
*
* @destroy_list - request list (using wb_this_page) terminated by @old_head
@@ -428,22 +455,29 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
destroy_list = (subreq->wb_this_page == old_head) ?
NULL : subreq->wb_this_page;
+ /* Note: lock subreq in order to change subreq->wb_head */
+ nfs_page_set_headlock(subreq);
WARN_ON_ONCE(old_head != subreq->wb_head);
/* make sure old group is not used */
subreq->wb_this_page = subreq;
+ subreq->wb_head = subreq;
clear_bit(PG_REMOVE, &subreq->wb_flags);
/* Note: races with nfs_page_group_destroy() */
if (!kref_read(&subreq->wb_kref)) {
/* Check if we raced with nfs_page_group_destroy() */
- if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
+ if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
+ nfs_page_clear_headlock(subreq);
nfs_free_request(subreq);
+ } else
+ nfs_page_clear_headlock(subreq);
continue;
}
+ nfs_page_clear_headlock(subreq);
- subreq->wb_head = subreq;
+ nfs_release_request(old_head);
if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
nfs_release_request(subreq);
@@ -457,105 +491,43 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
}
/*
- * nfs_lock_and_join_requests - join all subreqs to the head req and return
- * a locked reference, cancelling any pending
- * operations for this page.
- *
- * @page - the page used to lookup the "page group" of nfs_page structures
+ * nfs_join_page_group - destroy subrequests of the head req
+ * @head: the page used to lookup the "page group" of nfs_page structures
+ * @inode: Inode to which the request belongs.
*
* This function joins all sub requests to the head request by first
* locking all requests in the group, cancelling any pending operations
* and finally updating the head request to cover the whole range covered by
* the (former) group. All subrequests are removed from any write or commit
* lists, unlinked from the group and destroyed.
- *
- * Returns a locked, referenced pointer to the head request - which after
- * this call is guaranteed to be the only request associated with the page.
- * Returns NULL if no requests are found for @page, or a ERR_PTR if an
- * error was encountered.
*/
-static struct nfs_page *
-nfs_lock_and_join_requests(struct page *page)
+void
+nfs_join_page_group(struct nfs_page *head, struct inode *inode)
{
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_page *head, *subreq;
+ struct nfs_page *subreq;
struct nfs_page *destroy_list = NULL;
- unsigned int total_bytes;
- int ret;
+ unsigned int pgbase, off, bytes;
-try_again:
- /*
- * A reference is taken only on the head request which acts as a
- * reference to the whole page group - the group will not be destroyed
- * until the head reference is released.
- */
- head = nfs_page_find_head_request(page);
- if (!head)
- return NULL;
-
- /* lock the page head first in order to avoid an ABBA inefficiency */
- if (!nfs_lock_request(head)) {
- ret = nfs_wait_on_request(head);
- nfs_release_request(head);
- if (ret < 0)
- return ERR_PTR(ret);
- goto try_again;
- }
-
- /* Ensure that nobody removed the request before we locked it */
- if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
- nfs_unlock_and_release_request(head);
- goto try_again;
- }
-
- ret = nfs_page_group_lock(head);
- if (ret < 0)
- goto release_request;
-
- /* lock each request in the page group */
- total_bytes = head->wb_bytes;
+ pgbase = head->wb_pgbase;
+ bytes = head->wb_bytes;
+ off = head->wb_offset;
for (subreq = head->wb_this_page; subreq != head;
subreq = subreq->wb_this_page) {
-
- if (!kref_get_unless_zero(&subreq->wb_kref)) {
- if (subreq->wb_offset == head->wb_offset + total_bytes)
- total_bytes += subreq->wb_bytes;
- continue;
- }
-
- while (!nfs_lock_request(subreq)) {
- /*
- * Unlock page to allow nfs_page_group_sync_on_bit()
- * to succeed
- */
- nfs_page_group_unlock(head);
- ret = nfs_wait_on_request(subreq);
- if (!ret)
- ret = nfs_page_group_lock(head);
- if (ret < 0) {
- nfs_unroll_locks(inode, head, subreq);
- nfs_release_request(subreq);
- goto release_request;
- }
- }
- /*
- * Subrequests are always contiguous, non overlapping
- * and in order - but may be repeated (mirrored writes).
- */
- if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
- /* keep track of how many bytes this group covers */
- total_bytes += subreq->wb_bytes;
- } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
- ((subreq->wb_offset + subreq->wb_bytes) >
- (head->wb_offset + total_bytes)))) {
- nfs_page_group_unlock(head);
- nfs_unroll_locks(inode, head, subreq);
- nfs_unlock_and_release_request(subreq);
- ret = -EIO;
- goto release_request;
+ /* Subrequests should always form a contiguous range */
+ if (pgbase > subreq->wb_pgbase) {
+ off -= pgbase - subreq->wb_pgbase;
+ bytes += pgbase - subreq->wb_pgbase;
+ pgbase = subreq->wb_pgbase;
}
+ bytes = max(subreq->wb_pgbase + subreq->wb_bytes
+ - pgbase, bytes);
}
+ /* Set the head request's range to cover the former page group */
+ head->wb_pgbase = pgbase;
+ head->wb_bytes = bytes;
+ head->wb_offset = off;
+
/* Now that all requests are locked, make sure they aren't on any list.
* Commit list removal accounting is done after locks are dropped */
subreq = head;
@@ -569,36 +541,52 @@ try_again:
/* destroy list will be terminated by head */
destroy_list = head->wb_this_page;
head->wb_this_page = head;
-
- /* change head request to cover whole range that
- * the former page group covered */
- head->wb_bytes = total_bytes;
}
- /* Postpone destruction of this request */
- if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) {
- set_bit(PG_INODE_REF, &head->wb_flags);
- kref_get(&head->wb_kref);
- atomic_long_inc(&NFS_I(inode)->nrequests);
- }
+ nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
+}
- nfs_page_group_unlock(head);
+/*
+ * nfs_lock_and_join_requests - join all subreqs to the head req
+ * @page: the page used to lookup the "page group" of nfs_page structures
+ *
+ * This function joins all sub requests to the head request by first
+ * locking all requests in the group, cancelling any pending operations
+ * and finally updating the head request to cover the whole range covered by
+ * the (former) group. All subrequests are removed from any write or commit
+ * lists, unlinked from the group and destroyed.
+ *
+ * Returns a locked, referenced pointer to the head request - which after
+ * this call is guaranteed to be the only request associated with the page.
+ * Returns NULL if no requests are found for @page, or a ERR_PTR if an
+ * error was encountered.
+ */
+static struct nfs_page *
+nfs_lock_and_join_requests(struct page *page)
+{
+ struct inode *inode = page_file_mapping(page)->host;
+ struct nfs_page *head;
+ int ret;
- nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
+ /*
+ * A reference is taken only on the head request which acts as a
+ * reference to the whole page group - the group will not be destroyed
+ * until the head reference is released.
+ */
+ head = nfs_find_and_lock_page_request(page);
+ if (IS_ERR_OR_NULL(head))
+ return head;
- /* Did we lose a race with nfs_inode_remove_request()? */
- if (!(PagePrivate(page) || PageSwapCache(page))) {
+ /* lock each request in the page group */
+ ret = nfs_page_group_lock_subrequests(head);
+ if (ret < 0) {
nfs_unlock_and_release_request(head);
- return NULL;
+ return ERR_PTR(ret);
}
- /* still holds ref on head from nfs_page_find_head_request
- * and still has lock on head from lock loop */
- return head;
+ nfs_join_page_group(head, inode);
-release_request:
- nfs_unlock_and_release_request(head);
- return ERR_PTR(ret);
+ return head;
}
static void nfs_write_error(struct nfs_page *req, int error)
@@ -1707,7 +1695,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC | flags,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | flags,
.priority = priority,
};
/* Set up the initial task struct. */
@@ -1746,14 +1734,19 @@ void nfs_init_commit(struct nfs_commit_data *data,
struct pnfs_layout_segment *lseg,
struct nfs_commit_info *cinfo)
{
- struct nfs_page *first = nfs_list_entry(head->next);
- struct nfs_open_context *ctx = nfs_req_openctx(first);
- struct inode *inode = d_inode(ctx->dentry);
+ struct nfs_page *first;
+ struct nfs_open_context *ctx;
+ struct inode *inode;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
- list_splice_init(head, &data->pages);
+ if (head)
+ list_splice_init(head, &data->pages);
+
+ first = nfs_list_entry(data->pages.next);
+ ctx = nfs_req_openctx(first);
+ inode = d_inode(ctx->dentry);
data->inode = inode;
data->cred = ctx->cred;
@@ -1869,8 +1862,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
/* Okay, COMMIT succeeded, apparently. Check the verifier
* returned by the server against all stored verfs. */
- if (verf->committed > NFS_UNSTABLE &&
- !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
+ if (nfs_write_match_verf(verf, req)) {
/* We have a match */
if (req->wb_page)
nfs_inode_remove_request(req);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index f368f3215f88..99d2cae91bd6 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -136,7 +136,7 @@ config NFSD_FLEXFILELAYOUT
config NFSD_V4_2_INTER_SSC
bool "NFSv4.2 inter server to server COPY"
- depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2
+ depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2 && NFS_FS=y
help
This option enables support for NFSv4.2 inter server to
server copy where the destination server calls the NFSv4.2
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 15422c951fd1..cb777fe82988 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -23,6 +23,7 @@
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
@@ -50,6 +51,11 @@ static void expkey_put(struct kref *ref)
kfree_rcu(key, ek_rcu);
}
+static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h);
+}
+
static void expkey_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
@@ -140,7 +146,9 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
if (len == 0) {
set_bit(CACHE_NEGATIVE, &key.h.flags);
ek = svc_expkey_update(cd, &key, ek);
- if (!ek)
+ if (ek)
+ trace_nfsd_expkey_update(ek, NULL);
+ else
err = -ENOMEM;
} else {
err = kern_path(buf, 0, &key.ek_path);
@@ -150,7 +158,9 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
dprintk("Found the path %s\n", buf);
ek = svc_expkey_update(cd, &key, ek);
- if (!ek)
+ if (ek)
+ trace_nfsd_expkey_update(ek, buf);
+ else
err = -ENOMEM;
path_put(&key.ek_path);
}
@@ -249,6 +259,7 @@ static const struct cache_detail svc_expkey_cache_template = {
.hash_size = EXPKEY_HASHMAX,
.name = "nfsd.fh",
.cache_put = expkey_put,
+ .cache_upcall = expkey_upcall,
.cache_request = expkey_request,
.cache_parse = expkey_parse,
.cache_show = expkey_show,
@@ -330,6 +341,11 @@ static void svc_export_put(struct kref *ref)
kfree_rcu(exp, ex_rcu);
}
+static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h);
+}
+
static void svc_export_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
@@ -643,15 +659,17 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
}
expp = svc_export_lookup(&exp);
- if (expp)
- expp = svc_export_update(&exp, expp);
- else
- err = -ENOMEM;
- cache_flush();
- if (expp == NULL)
+ if (!expp) {
err = -ENOMEM;
- else
+ goto out4;
+ }
+ expp = svc_export_update(&exp, expp);
+ if (expp) {
+ trace_nfsd_export_update(expp);
+ cache_flush();
exp_put(expp);
+ } else
+ err = -ENOMEM;
out4:
nfsd4_fslocs_free(&exp.ex_fslocs);
kfree(exp.ex_uuid);
@@ -767,6 +785,7 @@ static const struct cache_detail svc_export_cache_template = {
.hash_size = EXPORT_HASHMAX,
.name = "nfsd.export",
.cache_put = svc_export_put,
+ .cache_upcall = svc_export_upcall,
.cache_request = svc_export_request,
.cache_parse = svc_export_parse,
.cache_show = svc_export_show,
@@ -832,8 +851,10 @@ exp_find_key(struct cache_detail *cd, struct auth_domain *clp, int fsid_type,
if (ek == NULL)
return ERR_PTR(-ENOMEM);
err = cache_check(cd, &ek->h, reqp);
- if (err)
+ if (err) {
+ trace_nfsd_exp_find_key(&key, err);
return ERR_PTR(err);
+ }
return ek;
}
@@ -855,8 +876,10 @@ exp_get_by_name(struct cache_detail *cd, struct auth_domain *clp,
if (exp == NULL)
return ERR_PTR(-ENOMEM);
err = cache_check(cd, &exp->h, reqp);
- if (err)
+ if (err) {
+ trace_nfsd_exp_get_by_name(&key, err);
return ERR_PTR(err);
+ }
return exp;
}
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 22e77ede9f14..82198d747c4c 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -890,7 +890,7 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
- nf_node) {
+ nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
if ((need & nf->nf_may) != need)
continue;
if (nf->nf_inode != inode)
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 2baf32311e00..09aa545825bd 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -172,6 +172,8 @@ struct nfsd_net {
unsigned int longest_chain_cachesize;
struct shrinker nfsd_reply_cache_shrinker;
+ /* utsname taken from the the process that starts the server */
+ char nfsd_name[UNX_MAXNODENAME+1];
};
/* Simple check to find out if a given net was properly initialized */
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index d1f285245af8..9460be8a8321 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -122,6 +122,12 @@ idtoname_hash(struct ent *ent)
return hash;
}
+static int
+idtoname_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall_timeout(cd, h);
+}
+
static void
idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
int *blen)
@@ -184,6 +190,7 @@ static const struct cache_detail idtoname_cache_template = {
.hash_size = ENT_HASHMAX,
.name = "nfs4.idtoname",
.cache_put = ent_put,
+ .cache_upcall = idtoname_upcall,
.cache_request = idtoname_request,
.cache_parse = idtoname_parse,
.cache_show = idtoname_show,
@@ -295,6 +302,12 @@ nametoid_hash(struct ent *ent)
return hash_str(ent->name, ENT_HASHBITS);
}
+static int
+nametoid_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall_timeout(cd, h);
+}
+
static void
nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
int *blen)
@@ -347,6 +360,7 @@ static const struct cache_detail nametoid_cache_template = {
.hash_size = ENT_HASHMAX,
.name = "nfs4.nametoid",
.cache_put = ent_put,
+ .cache_upcall = nametoid_upcall,
.cache_request = nametoid_request,
.cache_parse = nametoid_parse,
.cache_show = nametoid_show,
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 65cfe9ab47be..e32ecedece0f 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -494,6 +494,8 @@ find_any_file(struct nfs4_file *f)
{
struct nfsd_file *ret;
+ if (!f)
+ return NULL;
spin_lock(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDWR);
if (!ret) {
@@ -1309,6 +1311,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
nfs4_free_stateowner(sop);
}
+static bool
+nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
+{
+ return list_empty(&stp->st_perfile);
+}
+
static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_file *fp = stp->st_stid.sc_file;
@@ -1379,9 +1387,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+ if (!unhash_ol_stateid(stp))
+ return false;
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
- return unhash_ol_stateid(stp);
+ return true;
}
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -1446,13 +1456,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
struct list_head *reaplist)
{
- bool unhashed;
-
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
- unhashed = unhash_ol_stateid(stp);
+ if (!unhash_ol_stateid(stp))
+ return false;
release_open_stateid_locks(stp, reaplist);
- return unhashed;
+ return true;
}
static void release_open_stateid(struct nfs4_ol_stateid *stp)
@@ -2636,7 +2645,7 @@ static const struct file_operations client_ctl_fops = {
static const struct tree_descr client_files[] = {
[0] = {"info", &client_info_fops, S_IRUSR},
[1] = {"states", &client_states_fops, S_IRUSR},
- [2] = {"ctl", &client_ctl_fops, S_IRUSR|S_IWUSR},
+ [2] = {"ctl", &client_ctl_fops, S_IWUSR},
[3] = {""},
};
@@ -4343,7 +4352,8 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
{
struct nfs4_file *fp;
- hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
+ hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
+ lockdep_is_held(&state_lock)) {
if (fh_match(&fp->fi_fhandle, fh)) {
if (refcount_inc_not_zero(&fp->fi_ref))
return fp;
@@ -5521,15 +5531,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return status;
- /* Client debugging aid. */
- if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
- char addr_str[INET6_ADDRSTRLEN];
- rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
- sizeof(addr_str));
- pr_warn_ratelimited("NFSD: client %s testing state ID "
- "with incorrect client ID\n", addr_str);
+ if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
return status;
- }
spin_lock(&cl->cl_lock);
s = find_stateid_locked(cl, stateid);
if (!s)
@@ -6393,21 +6396,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
}
static struct nfs4_ol_stateid *
-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
+find_lock_stateid(const struct nfs4_lockowner *lo,
+ const struct nfs4_ol_stateid *ost)
{
struct nfs4_ol_stateid *lst;
- struct nfs4_client *clp = lo->lo_owner.so_client;
- lockdep_assert_held(&clp->cl_lock);
+ lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
- list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
- if (lst->st_stid.sc_type != NFS4_LOCK_STID)
- continue;
- if (lst->st_stid.sc_file == fp) {
- refcount_inc(&lst->st_stid.sc_count);
- return lst;
+ /* If ost is not hashed, ost->st_locks will not be valid */
+ if (!nfs4_ol_stateid_unhashed(ost))
+ list_for_each_entry(lst, &ost->st_locks, st_locks) {
+ if (lst->st_stateowner == &lo->lo_owner) {
+ refcount_inc(&lst->st_stid.sc_count);
+ return lst;
+ }
}
- }
return NULL;
}
@@ -6423,11 +6426,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
retry:
spin_lock(&clp->cl_lock);
- spin_lock(&fp->fi_lock);
- retstp = find_lock_stateid(lo, fp);
+ if (nfs4_ol_stateid_unhashed(open_stp))
+ goto out_close;
+ retstp = find_lock_stateid(lo, open_stp);
if (retstp)
- goto out_unlock;
-
+ goto out_found;
refcount_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_LOCK_STID;
stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
@@ -6436,22 +6439,26 @@ retry:
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
+ spin_lock(&fp->fi_lock);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
list_add(&stp->st_perfile, &fp->fi_stateids);
-out_unlock:
spin_unlock(&fp->fi_lock);
spin_unlock(&clp->cl_lock);
- if (retstp) {
- if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
- nfs4_put_stid(&retstp->st_stid);
- goto retry;
- }
- /* To keep mutex tracking happy */
- mutex_unlock(&stp->st_mutex);
- stp = retstp;
- }
return stp;
+out_found:
+ spin_unlock(&clp->cl_lock);
+ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+ nfs4_put_stid(&retstp->st_stid);
+ goto retry;
+ }
+ /* To keep mutex tracking happy */
+ mutex_unlock(&stp->st_mutex);
+ return retstp;
+out_close:
+ spin_unlock(&clp->cl_lock);
+ mutex_unlock(&stp->st_mutex);
+ return NULL;
}
static struct nfs4_ol_stateid *
@@ -6466,7 +6473,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
*new = false;
spin_lock(&clp->cl_lock);
- lst = find_lock_stateid(lo, fi);
+ lst = find_lock_stateid(lo, ost);
spin_unlock(&clp->cl_lock);
if (lst != NULL) {
if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 9761512674a0..996ac01ee977 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3591,23 +3591,22 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
__be32 nfserr;
__be32 tmp;
__be32 *p;
- u32 zzz = 0;
int pad;
+ /*
+ * svcrdma requires every READ payload to start somewhere
+ * in xdr->pages.
+ */
+ if (xdr->iov == xdr->buf->head) {
+ xdr->iov = NULL;
+ xdr->end = xdr->p;
+ }
+
len = maxcount;
v = 0;
-
- thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p));
- p = xdr_reserve_space(xdr, (thislen+3)&~3);
- WARN_ON_ONCE(!p);
- resp->rqstp->rq_vec[v].iov_base = p;
- resp->rqstp->rq_vec[v].iov_len = thislen;
- v++;
- len -= thislen;
-
while (len) {
thislen = min_t(long, len, PAGE_SIZE);
- p = xdr_reserve_space(xdr, (thislen+3)&~3);
+ p = xdr_reserve_space(xdr, thislen);
WARN_ON_ONCE(!p);
resp->rqstp->rq_vec[v].iov_base = p;
resp->rqstp->rq_vec[v].iov_len = thislen;
@@ -3616,23 +3615,25 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
}
read->rd_vlen = v;
- len = maxcount;
nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset,
resp->rqstp->rq_vec, read->rd_vlen, &maxcount,
&eof);
read->rd_length = maxcount;
if (nfserr)
return nfserr;
- xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3));
+ if (svc_encode_read_payload(resp->rqstp, starting_len + 8, maxcount))
+ return nfserr_io;
+ xdr_truncate_encode(xdr, starting_len + 8 + xdr_align_size(maxcount));
tmp = htonl(eof);
write_bytes_to_xdr_buf(xdr->buf, starting_len , &tmp, 4);
tmp = htonl(maxcount);
write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
+ tmp = xdr_zero;
pad = (maxcount&3) ? 4 - (maxcount&3) : 0;
write_bytes_to_xdr_buf(xdr->buf, starting_len + 8 + maxcount,
- &zzz, pad);
+ &tmp, pad);
return 0;
}
@@ -4005,11 +4006,12 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
int major_id_sz;
int server_scope_sz;
uint64_t minor_id = 0;
+ struct nfsd_net *nn = net_generic(SVC_NET(resp->rqstp), nfsd_net_id);
- major_id = utsname()->nodename;
- major_id_sz = strlen(major_id);
- server_scope = utsname()->nodename;
- server_scope_sz = strlen(server_scope);
+ major_id = nn->nfsd_name;
+ major_id_sz = strlen(nn->nfsd_name);
+ server_scope = nn->nfsd_name;
+ server_scope_sz = strlen(nn->nfsd_name);
p = xdr_reserve_space(xdr,
8 /* eir_clientid */ +
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index e109a1007704..3bb2db947d29 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1333,6 +1333,7 @@ void nfsd_client_rmdir(struct dentry *dentry)
dget(dentry);
ret = simple_rmdir(dir, dentry);
WARN_ON_ONCE(ret);
+ fsnotify_rmdir(dir, dentry);
d_delete(dentry);
inode_unlock(dir);
}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index b319080288c3..37bc8f5f4514 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -14,6 +14,7 @@
#include "nfsd.h"
#include "vfs.h"
#include "auth.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_FH
@@ -209,11 +210,14 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
}
error = nfserr_stale;
- if (PTR_ERR(exp) == -ENOENT)
- return error;
+ if (IS_ERR(exp)) {
+ trace_nfsd_set_fh_dentry_badexport(rqstp, fhp, PTR_ERR(exp));
+
+ if (PTR_ERR(exp) == -ENOENT)
+ return error;
- if (IS_ERR(exp))
return nfserrno(PTR_ERR(exp));
+ }
if (exp->ex_flags & NFSEXP_NOSUBTREECHECK) {
/* Elevate privileges so that the lack of 'r' or 'x'
@@ -267,6 +271,9 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
dentry = exportfs_decode_fh(exp->ex_path.mnt, fid,
data_left, fileid_type,
nfsd_acceptable, exp);
+ if (IS_ERR_OR_NULL(dentry))
+ trace_nfsd_set_fh_dentry_badhandle(rqstp, fhp,
+ dentry ? PTR_ERR(dentry) : -ESTALE);
}
if (dentry == NULL)
goto out;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 3b77b904212d..ca9fd348548b 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -749,6 +749,9 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
if (nrservs == 0 && nn->nfsd_serv == NULL)
goto out;
+ strlcpy(nn->nfsd_name, utsname()->nodename,
+ sizeof(nn->nfsd_name));
+
error = nfsd_create_serv(net);
if (error)
goto out;
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 06dd0d337049..78c574251c60 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -9,6 +9,7 @@
#define _NFSD_TRACE_H
#include <linux/tracepoint.h>
+#include "export.h"
#include "nfsfh.h"
TRACE_EVENT(nfsd_compound,
@@ -50,6 +51,127 @@ TRACE_EVENT(nfsd_compound_status,
__get_str(name), __entry->status)
)
+DECLARE_EVENT_CLASS(nfsd_fh_err_class,
+ TP_PROTO(struct svc_rqst *rqstp,
+ struct svc_fh *fhp,
+ int status),
+ TP_ARGS(rqstp, fhp, status),
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, fh_hash)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
+ __entry->status = status;
+ ),
+ TP_printk("xid=0x%08x fh_hash=0x%08x status=%d",
+ __entry->xid, __entry->fh_hash,
+ __entry->status)
+)
+
+#define DEFINE_NFSD_FH_ERR_EVENT(name) \
+DEFINE_EVENT(nfsd_fh_err_class, nfsd_##name, \
+ TP_PROTO(struct svc_rqst *rqstp, \
+ struct svc_fh *fhp, \
+ int status), \
+ TP_ARGS(rqstp, fhp, status))
+
+DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badexport);
+DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badhandle);
+
+TRACE_EVENT(nfsd_exp_find_key,
+ TP_PROTO(const struct svc_expkey *key,
+ int status),
+ TP_ARGS(key, status),
+ TP_STRUCT__entry(
+ __field(int, fsidtype)
+ __array(u32, fsid, 6)
+ __string(auth_domain, key->ek_client->name)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->fsidtype = key->ek_fsidtype;
+ memcpy(__entry->fsid, key->ek_fsid, 4*6);
+ __assign_str(auth_domain, key->ek_client->name);
+ __entry->status = status;
+ ),
+ TP_printk("fsid=%x::%s domain=%s status=%d",
+ __entry->fsidtype,
+ __print_array(__entry->fsid, 6, 4),
+ __get_str(auth_domain),
+ __entry->status
+ )
+);
+
+TRACE_EVENT(nfsd_expkey_update,
+ TP_PROTO(const struct svc_expkey *key, const char *exp_path),
+ TP_ARGS(key, exp_path),
+ TP_STRUCT__entry(
+ __field(int, fsidtype)
+ __array(u32, fsid, 6)
+ __string(auth_domain, key->ek_client->name)
+ __string(path, exp_path)
+ __field(bool, cache)
+ ),
+ TP_fast_assign(
+ __entry->fsidtype = key->ek_fsidtype;
+ memcpy(__entry->fsid, key->ek_fsid, 4*6);
+ __assign_str(auth_domain, key->ek_client->name);
+ __assign_str(path, exp_path);
+ __entry->cache = !test_bit(CACHE_NEGATIVE, &key->h.flags);
+ ),
+ TP_printk("fsid=%x::%s domain=%s path=%s cache=%s",
+ __entry->fsidtype,
+ __print_array(__entry->fsid, 6, 4),
+ __get_str(auth_domain),
+ __get_str(path),
+ __entry->cache ? "pos" : "neg"
+ )
+);
+
+TRACE_EVENT(nfsd_exp_get_by_name,
+ TP_PROTO(const struct svc_export *key,
+ int status),
+ TP_ARGS(key, status),
+ TP_STRUCT__entry(
+ __string(path, key->ex_path.dentry->d_name.name)
+ __string(auth_domain, key->ex_client->name)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __assign_str(path, key->ex_path.dentry->d_name.name);
+ __assign_str(auth_domain, key->ex_client->name);
+ __entry->status = status;
+ ),
+ TP_printk("path=%s domain=%s status=%d",
+ __get_str(path),
+ __get_str(auth_domain),
+ __entry->status
+ )
+);
+
+TRACE_EVENT(nfsd_export_update,
+ TP_PROTO(const struct svc_export *key),
+ TP_ARGS(key),
+ TP_STRUCT__entry(
+ __string(path, key->ex_path.dentry->d_name.name)
+ __string(auth_domain, key->ex_client->name)
+ __field(bool, cache)
+ ),
+ TP_fast_assign(
+ __assign_str(path, key->ex_path.dentry->d_name.name);
+ __assign_str(auth_domain, key->ex_client->name);
+ __entry->cache = !test_bit(CACHE_NEGATIVE, &key->h.flags);
+ ),
+ TP_printk("path=%s domain=%s cache=%s",
+ __get_str(path),
+ __get_str(auth_domain),
+ __entry->cache ? "pos" : "neg"
+ )
+);
+
DECLARE_EVENT_CLASS(nfsd_io_class,
TP_PROTO(struct svc_rqst *rqstp,
struct svc_fh *fhp,
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 5778d1347b35..5435a40f82be 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -17,6 +17,59 @@
#include "fanotify.h"
+static bool fanotify_path_equal(struct path *p1, struct path *p2)
+{
+ return p1->mnt == p2->mnt && p1->dentry == p2->dentry;
+}
+
+static inline bool fanotify_fsid_equal(__kernel_fsid_t *fsid1,
+ __kernel_fsid_t *fsid2)
+{
+ return fsid1->val[0] == fsid2->val[0] && fsid1->val[1] == fsid2->val[1];
+}
+
+static bool fanotify_fh_equal(struct fanotify_fh *fh1,
+ struct fanotify_fh *fh2)
+{
+ if (fh1->type != fh2->type || fh1->len != fh2->len)
+ return false;
+
+ /* Do not merge events if we failed to encode fh */
+ if (fh1->type == FILEID_INVALID)
+ return false;
+
+ return !fh1->len ||
+ !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len);
+}
+
+static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1,
+ struct fanotify_fid_event *ffe2)
+{
+ /* Do not merge fid events without object fh */
+ if (!ffe1->object_fh.len)
+ return false;
+
+ return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) &&
+ fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh);
+}
+
+static bool fanotify_name_event_equal(struct fanotify_name_event *fne1,
+ struct fanotify_name_event *fne2)
+{
+ /*
+ * Do not merge name events without dir fh.
+ * FAN_DIR_MODIFY does not encode object fh, so it may be empty.
+ */
+ if (!fne1->dir_fh.len)
+ return false;
+
+ if (fne1->name_len != fne2->name_len ||
+ !fanotify_fh_equal(&fne1->dir_fh, &fne2->dir_fh))
+ return false;
+
+ return !memcmp(fne1->name, fne2->name, fne1->name_len);
+}
+
static bool should_merge(struct fsnotify_event *old_fsn,
struct fsnotify_event *new_fsn)
{
@@ -26,14 +79,15 @@ static bool should_merge(struct fsnotify_event *old_fsn,
old = FANOTIFY_E(old_fsn);
new = FANOTIFY_E(new_fsn);
- if (old_fsn->inode != new_fsn->inode || old->pid != new->pid ||
- old->fh_type != new->fh_type || old->fh_len != new->fh_len)
+ if (old_fsn->objectid != new_fsn->objectid ||
+ old->type != new->type || old->pid != new->pid)
return false;
- if (fanotify_event_has_path(old)) {
- return old->path.mnt == new->path.mnt &&
- old->path.dentry == new->path.dentry;
- } else if (fanotify_event_has_fid(old)) {
+ switch (old->type) {
+ case FANOTIFY_EVENT_TYPE_PATH:
+ return fanotify_path_equal(fanotify_event_path(old),
+ fanotify_event_path(new));
+ case FANOTIFY_EVENT_TYPE_FID:
/*
* We want to merge many dirent events in the same dir (i.e.
* creates/unlinks/renames), but we do not want to merge dirent
@@ -42,11 +96,18 @@ static bool should_merge(struct fsnotify_event *old_fsn,
* mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+
* unlink pair or rmdir+create pair of events.
*/
- return (old->mask & FS_ISDIR) == (new->mask & FS_ISDIR) &&
- fanotify_fid_equal(&old->fid, &new->fid, old->fh_len);
+ if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR))
+ return false;
+
+ return fanotify_fid_event_equal(FANOTIFY_FE(old),
+ FANOTIFY_FE(new));
+ case FANOTIFY_EVENT_TYPE_FID_NAME:
+ return fanotify_name_event_equal(FANOTIFY_NE(old),
+ FANOTIFY_NE(new));
+ default:
+ WARN_ON_ONCE(1);
}
- /* Do not merge events if we failed to encode fid */
return false;
}
@@ -151,7 +212,7 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
{
__u32 marks_mask = 0, marks_ignored_mask = 0;
__u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS;
- const struct path *path = data;
+ const struct path *path = fsnotify_data_path(data, data_type);
struct fsnotify_mark *mark;
int type;
@@ -160,7 +221,7 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
/* Do we have path to open a file descriptor? */
- if (data_type != FSNOTIFY_EVENT_PATH)
+ if (!path)
return 0;
/* Path type events are only relevant for files and dirs */
if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry))
@@ -172,6 +233,13 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
continue;
mark = iter_info->marks[type];
/*
+ * If the event is on dir and this mark doesn't care about
+ * events on dir, don't send it!
+ */
+ if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR))
+ continue;
+
+ /*
* If the event is for a child and this mark doesn't care about
* events on a child, don't send it!
*/
@@ -187,9 +255,9 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
test_mask = event_mask & marks_mask & ~marks_ignored_mask;
/*
- * dirent modification events (create/delete/move) do not carry the
- * child entry name/inode information. Instead, we report FAN_ONDIR
- * for mkdir/rmdir so user can differentiate them from creat/unlink.
+ * For dirent modification events (create/delete/move) that do not carry
+ * the child entry name information, we report FAN_ONDIR for mkdir/rmdir
+ * so user can differentiate them from creat/unlink.
*
* For backward compatibility and consistency, do not report FAN_ONDIR
* to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR
@@ -203,22 +271,20 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
user_mask &= ~FAN_ONDIR;
}
- if (event_mask & FS_ISDIR &&
- !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
- return 0;
-
return test_mask & user_mask;
}
-static int fanotify_encode_fid(struct fanotify_event *event,
- struct inode *inode, gfp_t gfp,
- __kernel_fsid_t *fsid)
+static void fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
+ gfp_t gfp)
{
- struct fanotify_fid *fid = &event->fid;
- int dwords, bytes = 0;
- int err, type;
+ int dwords, type, bytes = 0;
+ char *ext_buf = NULL;
+ void *buf = fh->buf;
+ int err;
+
+ if (!inode)
+ goto out;
- fid->ext_fh = NULL;
dwords = 0;
err = -ENOENT;
type = exportfs_encode_inode_fh(inode, NULL, &dwords, NULL);
@@ -229,31 +295,33 @@ static int fanotify_encode_fid(struct fanotify_event *event,
if (bytes > FANOTIFY_INLINE_FH_LEN) {
/* Treat failure to allocate fh as failure to allocate event */
err = -ENOMEM;
- fid->ext_fh = kmalloc(bytes, gfp);
- if (!fid->ext_fh)
+ ext_buf = kmalloc(bytes, gfp);
+ if (!ext_buf)
goto out_err;
+
+ *fanotify_fh_ext_buf_ptr(fh) = ext_buf;
+ buf = ext_buf;
}
- type = exportfs_encode_inode_fh(inode, fanotify_fid_fh(fid, bytes),
- &dwords, NULL);
+ type = exportfs_encode_inode_fh(inode, buf, &dwords, NULL);
err = -EINVAL;
if (!type || type == FILEID_INVALID || bytes != dwords << 2)
goto out_err;
- fid->fsid = *fsid;
- event->fh_len = bytes;
+ fh->type = type;
+ fh->len = bytes;
- return type;
+ return;
out_err:
- pr_warn_ratelimited("fanotify: failed to encode fid (fsid=%x.%x, "
- "type=%d, bytes=%d, err=%i)\n",
- fsid->val[0], fsid->val[1], type, bytes, err);
- kfree(fid->ext_fh);
- fid->ext_fh = NULL;
- event->fh_len = 0;
-
- return FILEID_INVALID;
+ pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n",
+ type, bytes, err);
+ kfree(ext_buf);
+ *fanotify_fh_ext_buf_ptr(fh) = NULL;
+out:
+ /* Report the event without a file identifier on encode error */
+ fh->type = FILEID_INVALID;
+ fh->len = 0;
}
/*
@@ -269,21 +337,22 @@ static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask,
{
if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS)
return to_tell;
- else if (data_type == FSNOTIFY_EVENT_INODE)
- return (struct inode *)data;
- else if (data_type == FSNOTIFY_EVENT_PATH)
- return d_inode(((struct path *)data)->dentry);
- return NULL;
+
+ return (struct inode *)fsnotify_data_inode(data, data_type);
}
struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
struct inode *inode, u32 mask,
const void *data, int data_type,
+ const struct qstr *file_name,
__kernel_fsid_t *fsid)
{
struct fanotify_event *event = NULL;
+ struct fanotify_fid_event *ffe = NULL;
+ struct fanotify_name_event *fne = NULL;
gfp_t gfp = GFP_KERNEL_ACCOUNT;
struct inode *id = fanotify_fid_inode(inode, mask, data, data_type);
+ const struct path *path = fsnotify_data_path(data, data_type);
/*
* For queues with unlimited length lost events are not expected and
@@ -305,33 +374,81 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
if (!pevent)
goto out;
+
event = &pevent->fae;
+ event->type = FANOTIFY_EVENT_TYPE_PATH_PERM;
pevent->response = 0;
pevent->state = FAN_EVENT_INIT;
goto init;
}
- event = kmem_cache_alloc(fanotify_event_cachep, gfp);
- if (!event)
- goto out;
-init: __maybe_unused
- fsnotify_init_event(&event->fse, inode);
+
+ /*
+ * For FAN_DIR_MODIFY event, we report the fid of the directory and
+ * the name of the modified entry.
+ * Allocate an fanotify_name_event struct and copy the name.
+ */
+ if (mask & FAN_DIR_MODIFY && !(WARN_ON_ONCE(!file_name))) {
+ fne = kmalloc(sizeof(*fne) + file_name->len + 1, gfp);
+ if (!fne)
+ goto out;
+
+ event = &fne->fae;
+ event->type = FANOTIFY_EVENT_TYPE_FID_NAME;
+ fne->name_len = file_name->len;
+ strcpy(fne->name, file_name->name);
+ goto init;
+ }
+
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
+ ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp);
+ if (!ffe)
+ goto out;
+
+ event = &ffe->fae;
+ event->type = FANOTIFY_EVENT_TYPE_FID;
+ } else {
+ struct fanotify_path_event *pevent;
+
+ pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp);
+ if (!pevent)
+ goto out;
+
+ event = &pevent->fae;
+ event->type = FANOTIFY_EVENT_TYPE_PATH;
+ }
+
+init:
+ /*
+ * Use the victim inode instead of the watching inode as the id for
+ * event queue, so event reported on parent is merged with event
+ * reported on child when both directory and child watches exist.
+ */
+ fsnotify_init_event(&event->fse, (unsigned long)id);
event->mask = mask;
if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
event->pid = get_pid(task_pid(current));
else
event->pid = get_pid(task_tgid(current));
- event->fh_len = 0;
- if (id && FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
- /* Report the event without a file identifier on encode error */
- event->fh_type = fanotify_encode_fid(event, id, gfp, fsid);
- } else if (data_type == FSNOTIFY_EVENT_PATH) {
- event->fh_type = FILEID_ROOT;
- event->path = *((struct path *)data);
- path_get(&event->path);
- } else {
- event->fh_type = FILEID_INVALID;
- event->path.mnt = NULL;
- event->path.dentry = NULL;
+
+ if (fsid && fanotify_event_fsid(event))
+ *fanotify_event_fsid(event) = *fsid;
+
+ if (fanotify_event_object_fh(event))
+ fanotify_encode_fh(fanotify_event_object_fh(event), id, gfp);
+
+ if (fanotify_event_dir_fh(event))
+ fanotify_encode_fh(fanotify_event_dir_fh(event), id, gfp);
+
+ if (fanotify_event_has_path(event)) {
+ struct path *p = fanotify_event_path(event);
+
+ if (path) {
+ *p = *path;
+ path_get(path);
+ } else {
+ p->mnt = NULL;
+ p->dentry = NULL;
+ }
}
out:
memalloc_unuse_memcg();
@@ -392,6 +509,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(FAN_CREATE != FS_CREATE);
BUILD_BUG_ON(FAN_DELETE != FS_DELETE);
+ BUILD_BUG_ON(FAN_DIR_MODIFY != FS_DIR_MODIFY);
BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
@@ -402,7 +520,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
- BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 20);
mask = fanotify_group_event_mask(group, iter_info, mask, data,
data_type);
@@ -429,7 +547,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
}
event = fanotify_alloc_event(group, inode, mask, data, data_type,
- &fsid);
+ file_name, &fsid);
ret = -ENOMEM;
if (unlikely(!event)) {
/*
@@ -451,7 +569,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
ret = 0;
} else if (fanotify_is_perm_event(mask)) {
- ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
+ ret = fanotify_get_response(group, FANOTIFY_PERM(event),
iter_info);
}
finish:
@@ -470,22 +588,58 @@ static void fanotify_free_group_priv(struct fsnotify_group *group)
free_uid(user);
}
+static void fanotify_free_path_event(struct fanotify_event *event)
+{
+ path_put(fanotify_event_path(event));
+ kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event));
+}
+
+static void fanotify_free_perm_event(struct fanotify_event *event)
+{
+ path_put(fanotify_event_path(event));
+ kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event));
+}
+
+static void fanotify_free_fid_event(struct fanotify_event *event)
+{
+ struct fanotify_fid_event *ffe = FANOTIFY_FE(event);
+
+ if (fanotify_fh_has_ext_buf(&ffe->object_fh))
+ kfree(fanotify_fh_ext_buf(&ffe->object_fh));
+ kmem_cache_free(fanotify_fid_event_cachep, ffe);
+}
+
+static void fanotify_free_name_event(struct fanotify_event *event)
+{
+ struct fanotify_name_event *fne = FANOTIFY_NE(event);
+
+ if (fanotify_fh_has_ext_buf(&fne->dir_fh))
+ kfree(fanotify_fh_ext_buf(&fne->dir_fh));
+ kfree(fne);
+}
+
static void fanotify_free_event(struct fsnotify_event *fsn_event)
{
struct fanotify_event *event;
event = FANOTIFY_E(fsn_event);
- if (fanotify_event_has_path(event))
- path_put(&event->path);
- else if (fanotify_event_has_ext_fh(event))
- kfree(event->fid.ext_fh);
put_pid(event->pid);
- if (fanotify_is_perm_event(event->mask)) {
- kmem_cache_free(fanotify_perm_event_cachep,
- FANOTIFY_PE(fsn_event));
- return;
+ switch (event->type) {
+ case FANOTIFY_EVENT_TYPE_PATH:
+ fanotify_free_path_event(event);
+ break;
+ case FANOTIFY_EVENT_TYPE_PATH_PERM:
+ fanotify_free_perm_event(event);
+ break;
+ case FANOTIFY_EVENT_TYPE_FID:
+ fanotify_free_fid_event(event);
+ break;
+ case FANOTIFY_EVENT_TYPE_FID_NAME:
+ fanotify_free_name_event(event);
+ break;
+ default:
+ WARN_ON_ONCE(1);
}
- kmem_cache_free(fanotify_event_cachep, event);
}
static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index 68b30504284c..35bfbf4a7aac 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -5,7 +5,8 @@
#include <linux/exportfs.h>
extern struct kmem_cache *fanotify_mark_cache;
-extern struct kmem_cache *fanotify_event_cachep;
+extern struct kmem_cache *fanotify_fid_event_cachep;
+extern struct kmem_cache *fanotify_path_event_cachep;
extern struct kmem_cache *fanotify_perm_event_cachep;
/* Possible states of the permission event */
@@ -18,94 +19,140 @@ enum {
/*
* 3 dwords are sufficient for most local fs (64bit ino, 32bit generation).
- * For 32bit arch, fid increases the size of fanotify_event by 12 bytes and
- * fh_* fields increase the size of fanotify_event by another 4 bytes.
- * For 64bit arch, fid increases the size of fanotify_fid by 8 bytes and
- * fh_* fields are packed in a hole after mask.
+ * fh buf should be dword aligned. On 64bit arch, the ext_buf pointer is
+ * stored in either the first or last 2 dwords.
*/
-#if BITS_PER_LONG == 32
#define FANOTIFY_INLINE_FH_LEN (3 << 2)
-#else
-#define FANOTIFY_INLINE_FH_LEN (4 << 2)
-#endif
-struct fanotify_fid {
- __kernel_fsid_t fsid;
- union {
- unsigned char fh[FANOTIFY_INLINE_FH_LEN];
- unsigned char *ext_fh;
- };
-};
+struct fanotify_fh {
+ unsigned char buf[FANOTIFY_INLINE_FH_LEN];
+ u8 type;
+ u8 len;
+} __aligned(4);
+
+static inline bool fanotify_fh_has_ext_buf(struct fanotify_fh *fh)
+{
+ return fh->len > FANOTIFY_INLINE_FH_LEN;
+}
+
+static inline char **fanotify_fh_ext_buf_ptr(struct fanotify_fh *fh)
+{
+ BUILD_BUG_ON(__alignof__(char *) - 4 + sizeof(char *) >
+ FANOTIFY_INLINE_FH_LEN);
+ return (char **)ALIGN((unsigned long)(fh->buf), __alignof__(char *));
+}
-static inline void *fanotify_fid_fh(struct fanotify_fid *fid,
- unsigned int fh_len)
+static inline void *fanotify_fh_ext_buf(struct fanotify_fh *fh)
{
- return fh_len <= FANOTIFY_INLINE_FH_LEN ? fid->fh : fid->ext_fh;
+ return *fanotify_fh_ext_buf_ptr(fh);
}
-static inline bool fanotify_fid_equal(struct fanotify_fid *fid1,
- struct fanotify_fid *fid2,
- unsigned int fh_len)
+static inline void *fanotify_fh_buf(struct fanotify_fh *fh)
{
- return fid1->fsid.val[0] == fid2->fsid.val[0] &&
- fid1->fsid.val[1] == fid2->fsid.val[1] &&
- !memcmp(fanotify_fid_fh(fid1, fh_len),
- fanotify_fid_fh(fid2, fh_len), fh_len);
+ return fanotify_fh_has_ext_buf(fh) ? fanotify_fh_ext_buf(fh) : fh->buf;
}
/*
- * Structure for normal fanotify events. It gets allocated in
+ * Common structure for fanotify events. Concrete structs are allocated in
* fanotify_handle_event() and freed when the information is retrieved by
- * userspace
+ * userspace. The type of event determines how it was allocated, how it will
+ * be freed and which concrete struct it may be cast to.
*/
+enum fanotify_event_type {
+ FANOTIFY_EVENT_TYPE_FID, /* fixed length */
+ FANOTIFY_EVENT_TYPE_FID_NAME, /* variable length */
+ FANOTIFY_EVENT_TYPE_PATH,
+ FANOTIFY_EVENT_TYPE_PATH_PERM,
+};
+
struct fanotify_event {
struct fsnotify_event fse;
u32 mask;
- /*
- * Those fields are outside fanotify_fid to pack fanotify_event nicely
- * on 64bit arch and to use fh_type as an indication of whether path
- * or fid are used in the union:
- * FILEID_ROOT (0) for path, > 0 for fid, FILEID_INVALID for neither.
- */
- u8 fh_type;
- u8 fh_len;
- u16 pad;
- union {
- /*
- * We hold ref to this path so it may be dereferenced at any
- * point during this object's lifetime
- */
- struct path path;
- /*
- * With FAN_REPORT_FID, we do not hold any reference on the
- * victim object. Instead we store its NFS file handle and its
- * filesystem's fsid as a unique identifier.
- */
- struct fanotify_fid fid;
- };
+ enum fanotify_event_type type;
struct pid *pid;
};
-static inline bool fanotify_event_has_path(struct fanotify_event *event)
+struct fanotify_fid_event {
+ struct fanotify_event fae;
+ __kernel_fsid_t fsid;
+ struct fanotify_fh object_fh;
+};
+
+static inline struct fanotify_fid_event *
+FANOTIFY_FE(struct fanotify_event *event)
{
- return event->fh_type == FILEID_ROOT;
+ return container_of(event, struct fanotify_fid_event, fae);
}
-static inline bool fanotify_event_has_fid(struct fanotify_event *event)
+struct fanotify_name_event {
+ struct fanotify_event fae;
+ __kernel_fsid_t fsid;
+ struct fanotify_fh dir_fh;
+ u8 name_len;
+ char name[0];
+};
+
+static inline struct fanotify_name_event *
+FANOTIFY_NE(struct fanotify_event *event)
{
- return event->fh_type != FILEID_ROOT &&
- event->fh_type != FILEID_INVALID;
+ return container_of(event, struct fanotify_name_event, fae);
}
-static inline bool fanotify_event_has_ext_fh(struct fanotify_event *event)
+static inline __kernel_fsid_t *fanotify_event_fsid(struct fanotify_event *event)
{
- return fanotify_event_has_fid(event) &&
- event->fh_len > FANOTIFY_INLINE_FH_LEN;
+ if (event->type == FANOTIFY_EVENT_TYPE_FID)
+ return &FANOTIFY_FE(event)->fsid;
+ else if (event->type == FANOTIFY_EVENT_TYPE_FID_NAME)
+ return &FANOTIFY_NE(event)->fsid;
+ else
+ return NULL;
}
-static inline void *fanotify_event_fh(struct fanotify_event *event)
+static inline struct fanotify_fh *fanotify_event_object_fh(
+ struct fanotify_event *event)
{
- return fanotify_fid_fh(&event->fid, event->fh_len);
+ if (event->type == FANOTIFY_EVENT_TYPE_FID)
+ return &FANOTIFY_FE(event)->object_fh;
+ else
+ return NULL;
+}
+
+static inline struct fanotify_fh *fanotify_event_dir_fh(
+ struct fanotify_event *event)
+{
+ if (event->type == FANOTIFY_EVENT_TYPE_FID_NAME)
+ return &FANOTIFY_NE(event)->dir_fh;
+ else
+ return NULL;
+}
+
+static inline int fanotify_event_object_fh_len(struct fanotify_event *event)
+{
+ struct fanotify_fh *fh = fanotify_event_object_fh(event);
+
+ return fh ? fh->len : 0;
+}
+
+static inline bool fanotify_event_has_name(struct fanotify_event *event)
+{
+ return event->type == FANOTIFY_EVENT_TYPE_FID_NAME;
+}
+
+static inline int fanotify_event_name_len(struct fanotify_event *event)
+{
+ return fanotify_event_has_name(event) ?
+ FANOTIFY_NE(event)->name_len : 0;
+}
+
+struct fanotify_path_event {
+ struct fanotify_event fae;
+ struct path path;
+};
+
+static inline struct fanotify_path_event *
+FANOTIFY_PE(struct fanotify_event *event)
+{
+ return container_of(event, struct fanotify_path_event, fae);
}
/*
@@ -117,15 +164,16 @@ static inline void *fanotify_event_fh(struct fanotify_event *event)
*/
struct fanotify_perm_event {
struct fanotify_event fae;
+ struct path path;
unsigned short response; /* userspace answer to the event */
unsigned short state; /* state of the event */
int fd; /* fd we passed to userspace for this event */
};
static inline struct fanotify_perm_event *
-FANOTIFY_PE(struct fsnotify_event *fse)
+FANOTIFY_PERM(struct fanotify_event *event)
{
- return container_of(fse, struct fanotify_perm_event, fae.fse);
+ return container_of(event, struct fanotify_perm_event, fae);
}
static inline bool fanotify_is_perm_event(u32 mask)
@@ -139,7 +187,24 @@ static inline struct fanotify_event *FANOTIFY_E(struct fsnotify_event *fse)
return container_of(fse, struct fanotify_event, fse);
}
+static inline bool fanotify_event_has_path(struct fanotify_event *event)
+{
+ return event->type == FANOTIFY_EVENT_TYPE_PATH ||
+ event->type == FANOTIFY_EVENT_TYPE_PATH_PERM;
+}
+
+static inline struct path *fanotify_event_path(struct fanotify_event *event)
+{
+ if (event->type == FANOTIFY_EVENT_TYPE_PATH)
+ return &FANOTIFY_PE(event)->path;
+ else if (event->type == FANOTIFY_EVENT_TYPE_PATH_PERM)
+ return &FANOTIFY_PERM(event)->path;
+ else
+ return NULL;
+}
+
struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
struct inode *inode, u32 mask,
const void *data, int data_type,
+ const struct qstr *file_name,
__kernel_fsid_t *fsid);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 0aa362b88550..42cb794c62ac 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -46,32 +46,53 @@
extern const struct fsnotify_ops fanotify_fsnotify_ops;
struct kmem_cache *fanotify_mark_cache __read_mostly;
-struct kmem_cache *fanotify_event_cachep __read_mostly;
+struct kmem_cache *fanotify_fid_event_cachep __read_mostly;
+struct kmem_cache *fanotify_path_event_cachep __read_mostly;
struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
#define FANOTIFY_EVENT_ALIGN 4
+#define FANOTIFY_INFO_HDR_LEN \
+ (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle))
+
+static int fanotify_fid_info_len(int fh_len, int name_len)
+{
+ int info_len = fh_len;
+
+ if (name_len)
+ info_len += name_len + 1;
+
+ return roundup(FANOTIFY_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN);
+}
static int fanotify_event_info_len(struct fanotify_event *event)
{
- if (!fanotify_event_has_fid(event))
- return 0;
+ int info_len = 0;
+ int fh_len = fanotify_event_object_fh_len(event);
+
+ if (fh_len)
+ info_len += fanotify_fid_info_len(fh_len, 0);
- return roundup(sizeof(struct fanotify_event_info_fid) +
- sizeof(struct file_handle) + event->fh_len,
- FANOTIFY_EVENT_ALIGN);
+ if (fanotify_event_name_len(event)) {
+ struct fanotify_name_event *fne = FANOTIFY_NE(event);
+
+ info_len += fanotify_fid_info_len(fne->dir_fh.len,
+ fne->name_len);
+ }
+
+ return info_len;
}
/*
- * Get an fsnotify notification event if one exists and is small
+ * Get an fanotify notification event if one exists and is small
* enough to fit in "count". Return an error pointer if the count
* is not large enough. When permission event is dequeued, its state is
* updated accordingly.
*/
-static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
+static struct fanotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
size_t event_size = FAN_EVENT_METADATA_LEN;
- struct fsnotify_event *fsn_event = NULL;
+ struct fanotify_event *event = NULL;
pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
@@ -85,26 +106,23 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
}
if (event_size > count) {
- fsn_event = ERR_PTR(-EINVAL);
+ event = ERR_PTR(-EINVAL);
goto out;
}
- fsn_event = fsnotify_remove_first_event(group);
- if (fanotify_is_perm_event(FANOTIFY_E(fsn_event)->mask))
- FANOTIFY_PE(fsn_event)->state = FAN_EVENT_REPORTED;
+ event = FANOTIFY_E(fsnotify_remove_first_event(group));
+ if (fanotify_is_perm_event(event->mask))
+ FANOTIFY_PERM(event)->state = FAN_EVENT_REPORTED;
out:
spin_unlock(&group->notification_lock);
- return fsn_event;
+ return event;
}
-static int create_fd(struct fsnotify_group *group,
- struct fanotify_event *event,
+static int create_fd(struct fsnotify_group *group, struct path *path,
struct file **file)
{
int client_fd;
struct file *new_file;
- pr_debug("%s: group=%p event=%p\n", __func__, group, event);
-
client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
if (client_fd < 0)
return client_fd;
@@ -113,14 +131,9 @@ static int create_fd(struct fsnotify_group *group,
* we need a new file handle for the userspace program so it can read even if it was
* originally opened O_WRONLY.
*/
- /* it's possible this event was an overflow event. in that case dentry and mnt
- * are NULL; That's fine, just don't call dentry open */
- if (event->path.dentry && event->path.mnt)
- new_file = dentry_open(&event->path,
- group->fanotify_data.f_flags | FMODE_NONOTIFY,
- current_cred());
- else
- new_file = ERR_PTR(-EOVERFLOW);
+ new_file = dentry_open(path,
+ group->fanotify_data.f_flags | FMODE_NONOTIFY,
+ current_cred());
if (IS_ERR(new_file)) {
/*
* we still send an event even if we can't open the file. this
@@ -204,83 +217,111 @@ static int process_access_response(struct fsnotify_group *group,
return -ENOENT;
}
-static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
+static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
+ const char *name, size_t name_len,
+ char __user *buf, size_t count)
{
struct fanotify_event_info_fid info = { };
struct file_handle handle = { };
- unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh;
- size_t fh_len = event->fh_len;
- size_t len = fanotify_event_info_len(event);
+ unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh_buf;
+ size_t fh_len = fh ? fh->len : 0;
+ size_t info_len = fanotify_fid_info_len(fh_len, name_len);
+ size_t len = info_len;
+
+ pr_debug("%s: fh_len=%zu name_len=%zu, info_len=%zu, count=%zu\n",
+ __func__, fh_len, name_len, info_len, count);
- if (!len)
+ if (!fh_len || (name && !name_len))
return 0;
- if (WARN_ON_ONCE(len < sizeof(info) + sizeof(handle) + fh_len))
+ if (WARN_ON_ONCE(len < sizeof(info) || len > count))
return -EFAULT;
- /* Copy event info fid header followed by vaiable sized file handle */
- info.hdr.info_type = FAN_EVENT_INFO_TYPE_FID;
+ /*
+ * Copy event info fid header followed by variable sized file handle
+ * and optionally followed by variable sized filename.
+ */
+ info.hdr.info_type = name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
+ FAN_EVENT_INFO_TYPE_FID;
info.hdr.len = len;
- info.fsid = event->fid.fsid;
+ info.fsid = *fsid;
if (copy_to_user(buf, &info, sizeof(info)))
return -EFAULT;
buf += sizeof(info);
len -= sizeof(info);
- handle.handle_type = event->fh_type;
+ if (WARN_ON_ONCE(len < sizeof(handle)))
+ return -EFAULT;
+
+ handle.handle_type = fh->type;
handle.handle_bytes = fh_len;
if (copy_to_user(buf, &handle, sizeof(handle)))
return -EFAULT;
buf += sizeof(handle);
len -= sizeof(handle);
+ if (WARN_ON_ONCE(len < fh_len))
+ return -EFAULT;
+
/*
- * For an inline fh, copy through stack to exclude the copy from
- * usercopy hardening protections.
+ * For an inline fh and inline file name, copy through stack to exclude
+ * the copy from usercopy hardening protections.
*/
- fh = fanotify_event_fh(event);
+ fh_buf = fanotify_fh_buf(fh);
if (fh_len <= FANOTIFY_INLINE_FH_LEN) {
- memcpy(bounce, fh, fh_len);
- fh = bounce;
+ memcpy(bounce, fh_buf, fh_len);
+ fh_buf = bounce;
}
- if (copy_to_user(buf, fh, fh_len))
+ if (copy_to_user(buf, fh_buf, fh_len))
return -EFAULT;
- /* Pad with 0's */
buf += fh_len;
len -= fh_len;
+
+ if (name_len) {
+ /* Copy the filename with terminating null */
+ name_len++;
+ if (WARN_ON_ONCE(len < name_len))
+ return -EFAULT;
+
+ if (copy_to_user(buf, name, name_len))
+ return -EFAULT;
+
+ buf += name_len;
+ len -= name_len;
+ }
+
+ /* Pad with 0's */
WARN_ON_ONCE(len < 0 || len >= FANOTIFY_EVENT_ALIGN);
if (len > 0 && clear_user(buf, len))
return -EFAULT;
- return 0;
+ return info_len;
}
static ssize_t copy_event_to_user(struct fsnotify_group *group,
- struct fsnotify_event *fsn_event,
+ struct fanotify_event *event,
char __user *buf, size_t count)
{
struct fanotify_event_metadata metadata;
- struct fanotify_event *event;
+ struct path *path = fanotify_event_path(event);
struct file *f = NULL;
int ret, fd = FAN_NOFD;
- pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
- event = container_of(fsn_event, struct fanotify_event, fse);
- metadata.event_len = FAN_EVENT_METADATA_LEN;
+ metadata.event_len = FAN_EVENT_METADATA_LEN +
+ fanotify_event_info_len(event);
metadata.metadata_len = FAN_EVENT_METADATA_LEN;
metadata.vers = FANOTIFY_METADATA_VERSION;
metadata.reserved = 0;
metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS;
metadata.pid = pid_vnr(event->pid);
- if (fanotify_event_has_path(event)) {
- fd = create_fd(group, event, &f);
+ if (path && path->mnt && path->dentry) {
+ fd = create_fd(group, path, &f);
if (fd < 0)
return fd;
- } else if (fanotify_event_has_fid(event)) {
- metadata.event_len += fanotify_event_info_len(event);
}
metadata.fd = fd;
@@ -295,15 +336,39 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
if (copy_to_user(buf, &metadata, FAN_EVENT_METADATA_LEN))
goto out_close_fd;
+ buf += FAN_EVENT_METADATA_LEN;
+ count -= FAN_EVENT_METADATA_LEN;
+
if (fanotify_is_perm_event(event->mask))
- FANOTIFY_PE(fsn_event)->fd = fd;
+ FANOTIFY_PERM(event)->fd = fd;
- if (fanotify_event_has_path(event)) {
+ if (f)
fd_install(fd, f);
- } else if (fanotify_event_has_fid(event)) {
- ret = copy_fid_to_user(event, buf + FAN_EVENT_METADATA_LEN);
+
+ /* Event info records order is: dir fid + name, child fid */
+ if (fanotify_event_name_len(event)) {
+ struct fanotify_name_event *fne = FANOTIFY_NE(event);
+
+ ret = copy_info_to_user(fanotify_event_fsid(event),
+ fanotify_event_dir_fh(event),
+ fne->name, fne->name_len,
+ buf, count);
+ if (ret < 0)
+ return ret;
+
+ buf += ret;
+ count -= ret;
+ }
+
+ if (fanotify_event_object_fh_len(event)) {
+ ret = copy_info_to_user(fanotify_event_fsid(event),
+ fanotify_event_object_fh(event),
+ NULL, 0, buf, count);
if (ret < 0)
return ret;
+
+ buf += ret;
+ count -= ret;
}
return metadata.event_len;
@@ -335,7 +400,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct fsnotify_group *group;
- struct fsnotify_event *kevent;
+ struct fanotify_event *event;
char __user *start;
int ret;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -347,13 +412,13 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
add_wait_queue(&group->notification_waitq, &wait);
while (1) {
- kevent = get_one_event(group, count);
- if (IS_ERR(kevent)) {
- ret = PTR_ERR(kevent);
+ event = get_one_event(group, count);
+ if (IS_ERR(event)) {
+ ret = PTR_ERR(event);
break;
}
- if (!kevent) {
+ if (!event) {
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
@@ -369,7 +434,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
continue;
}
- ret = copy_event_to_user(group, kevent, buf, count);
+ ret = copy_event_to_user(group, event, buf, count);
if (unlikely(ret == -EOPENSTALE)) {
/*
* We cannot report events with stale fd so drop it.
@@ -384,17 +449,17 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
* Permission events get queued to wait for response. Other
* events can be destroyed now.
*/
- if (!fanotify_is_perm_event(FANOTIFY_E(kevent)->mask)) {
- fsnotify_destroy_event(group, kevent);
+ if (!fanotify_is_perm_event(event->mask)) {
+ fsnotify_destroy_event(group, &event->fse);
} else {
if (ret <= 0) {
spin_lock(&group->notification_lock);
finish_permission_event(group,
- FANOTIFY_PE(kevent), FAN_DENY);
+ FANOTIFY_PERM(event), FAN_DENY);
wake_up(&group->fanotify_data.access_waitq);
} else {
spin_lock(&group->notification_lock);
- list_add_tail(&kevent->list,
+ list_add_tail(&event->fse.list,
&group->fanotify_data.access_list);
spin_unlock(&group->notification_lock);
}
@@ -440,8 +505,6 @@ static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t
static int fanotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
- struct fanotify_perm_event *event;
- struct fsnotify_event *fsn_event;
/*
* Stop new events from arriving in the notification queue. since
@@ -456,6 +519,8 @@ static int fanotify_release(struct inode *ignored, struct file *file)
*/
spin_lock(&group->notification_lock);
while (!list_empty(&group->fanotify_data.access_list)) {
+ struct fanotify_perm_event *event;
+
event = list_first_entry(&group->fanotify_data.access_list,
struct fanotify_perm_event, fae.fse.list);
list_del_init(&event->fae.fse.list);
@@ -469,12 +534,14 @@ static int fanotify_release(struct inode *ignored, struct file *file)
* response is consumed and fanotify_get_response() returns.
*/
while (!fsnotify_notify_queue_is_empty(group)) {
- fsn_event = fsnotify_remove_first_event(group);
- if (!(FANOTIFY_E(fsn_event)->mask & FANOTIFY_PERM_EVENTS)) {
+ struct fanotify_event *event;
+
+ event = FANOTIFY_E(fsnotify_remove_first_event(group));
+ if (!(event->mask & FANOTIFY_PERM_EVENTS)) {
spin_unlock(&group->notification_lock);
- fsnotify_destroy_event(group, fsn_event);
+ fsnotify_destroy_event(group, &event->fse);
} else {
- finish_permission_event(group, FANOTIFY_PE(fsn_event),
+ finish_permission_event(group, FANOTIFY_PERM(event),
FAN_ALLOW);
}
spin_lock(&group->notification_lock);
@@ -824,7 +891,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->memcg = get_mem_cgroup_from_mm(current->mm);
oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL,
- FSNOTIFY_EVENT_NONE, NULL);
+ FSNOTIFY_EVENT_NONE, NULL, NULL);
if (unlikely(!oevent)) {
fd = -ENOMEM;
goto out_destroy_group;
@@ -1139,7 +1206,10 @@ static int __init fanotify_user_setup(void)
fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
SLAB_PANIC|SLAB_ACCOUNT);
- fanotify_event_cachep = KMEM_CACHE(fanotify_event, SLAB_PANIC);
+ fanotify_fid_event_cachep = KMEM_CACHE(fanotify_fid_event,
+ SLAB_PANIC);
+ fanotify_path_event_cachep = KMEM_CACHE(fanotify_path_event,
+ SLAB_PANIC);
if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
fanotify_perm_event_cachep =
KMEM_CACHE(fanotify_perm_event, SLAB_PANIC);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 46f225580009..72d332ce8e12 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -143,15 +143,13 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
}
/* Notify this dentry's parent about a child's events. */
-int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
+int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ int data_type)
{
struct dentry *parent;
struct inode *p_inode;
int ret = 0;
- if (!dentry)
- dentry = path->dentry;
-
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return 0;
@@ -168,12 +166,7 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
mask |= FS_EVENT_ON_CHILD;
take_dentry_name_snapshot(&name, dentry);
- if (path)
- ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
- &name.name, 0);
- else
- ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
- &name.name, 0);
+ ret = fsnotify(p_inode, mask, data, data_type, &name.name, 0);
release_dentry_name_snapshot(&name);
}
@@ -181,7 +174,7 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
return ret;
}
-EXPORT_SYMBOL_GPL(__fsnotify_parent);
+EXPORT_SYMBOL_GPL(fsnotify_parent);
static int send_to_group(struct inode *to_tell,
__u32 mask, const void *data,
@@ -318,6 +311,7 @@ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info)
int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
const struct qstr *file_name, u32 cookie)
{
+ const struct path *path = fsnotify_data_path(data, data_is);
struct fsnotify_iter_info iter_info = {};
struct super_block *sb = to_tell->i_sb;
struct mount *mnt = NULL;
@@ -325,8 +319,8 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
int ret = 0;
__u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
- if (data_is == FSNOTIFY_EVENT_PATH) {
- mnt = real_mount(((const struct path *)data)->mnt);
+ if (path) {
+ mnt = real_mount(path->mnt);
mnt_or_sb_mask |= mnt->mnt_fsnotify_mask;
}
/* An event "on child" is not intended for a mount/sb mark */
@@ -389,7 +383,7 @@ static __init int fsnotify_init(void)
{
int ret;
- BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 25);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 26);
ret = init_srcu_struct(&fsnotify_mark_srcu);
if (ret)
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index d510223d302c..2ebc89047153 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn,
if (old->mask & FS_IN_IGNORED)
return false;
if ((old->mask == new->mask) &&
- (old_fsn->inode == new_fsn->inode) &&
+ (old_fsn->objectid == new_fsn->objectid) &&
(old->name_len == new->name_len) &&
(!old->name_len || !strcmp(old->name, new->name)))
return true;
@@ -61,6 +61,7 @@ int inotify_handle_event(struct fsnotify_group *group,
const struct qstr *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info)
{
+ const struct path *path = fsnotify_data_path(data, data_type);
struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
struct inotify_inode_mark *i_mark;
struct inotify_event_info *event;
@@ -73,12 +74,9 @@ int inotify_handle_event(struct fsnotify_group *group,
return 0;
if ((inode_mark->mask & FS_EXCL_UNLINK) &&
- (data_type == FSNOTIFY_EVENT_PATH)) {
- const struct path *path = data;
+ path && d_unlinked(path->dentry))
+ return 0;
- if (d_unlinked(path->dentry))
- return 0;
- }
if (file_name) {
len = file_name->len;
alloc_len += len + 1;
@@ -118,7 +116,7 @@ int inotify_handle_event(struct fsnotify_group *group,
mask &= ~IN_ISDIR;
fsn_event = &event->fse;
- fsnotify_init_event(fsn_event, inode);
+ fsnotify_init_event(fsn_event, (unsigned long)inode);
event->mask = mask;
event->wd = i_mark->wd;
event->sync_cookie = cookie;
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 107537a543fd..81ffc8629fc4 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
return ERR_PTR(-ENOMEM);
}
group->overflow_event = &oevent->fse;
- fsnotify_init_event(group->overflow_event, NULL);
+ fsnotify_init_event(group->overflow_event, 0);
oevent->mask = FS_Q_OVERFLOW;
oevent->wd = -1;
oevent->sync_cookie = 0;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 88534eb0e7c2..65b3abbcce4e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1060,7 +1060,6 @@ bail:
brelse(bhs[i]);
bhs[i] = NULL;
}
- mlog_errno(status);
}
return status;
}
@@ -3942,7 +3941,7 @@ rotate:
* above.
*
* This leaf needs to have space, either by the empty 1st
- * extent record, or by virtue of an l_next_rec < l_count.
+ * extent record, or by virtue of an l_next_free_rec < l_count.
*/
ocfs2_rotate_leaf(el, insert_rec);
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index a368350d4c27..89d13e0705fe 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -101,8 +101,6 @@ static struct o2hb_callback {
static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
-#define O2HB_DEFAULT_BLOCK_BITS 9
-
enum o2hb_heartbeat_modes {
O2HB_HEARTBEAT_LOCAL = 0,
O2HB_HEARTBEAT_GLOBAL,
@@ -1309,7 +1307,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
case O2HB_DB_TYPE_REGION_NUMBER:
reg = (struct o2hb_region *)db->db_data;
- out += snprintf(buf + out, PAGE_SIZE - out, "%d\n",
+ out += scnprintf(buf + out, PAGE_SIZE - out, "%d\n",
reg->hr_region_num);
goto done;
@@ -1319,12 +1317,12 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
/* If 0, it has never been set before */
if (lts)
lts = jiffies_to_msecs(jiffies - lts);
- out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
+ out += scnprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
goto done;
case O2HB_DB_TYPE_REGION_PINNED:
reg = (struct o2hb_region *)db->db_data;
- out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
+ out += scnprintf(buf + out, PAGE_SIZE - out, "%u\n",
!!reg->hr_item_pinned);
goto done;
@@ -1333,8 +1331,8 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
}
while ((i = find_next_bit(map, db->db_len, i + 1)) < db->db_len)
- out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
- out += snprintf(buf + out, PAGE_SIZE - out, "\n");
+ out += scnprintf(buf + out, PAGE_SIZE - out, "%d ", i);
+ out += scnprintf(buf + out, PAGE_SIZE - out, "\n");
done:
i_size_write(inode, out);
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 02bf4a1774cc..667a5c5e1f66 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -443,8 +443,8 @@ static int o2net_fill_bitmap(char *buf, int len)
o2net_fill_node_map(map, sizeof(map));
while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
- out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
- out += snprintf(buf + out, PAGE_SIZE - out, "\n");
+ out += scnprintf(buf + out, PAGE_SIZE - out, "%d ", i);
+ out += scnprintf(buf + out, PAGE_SIZE - out, "\n");
return out;
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 48a3398f0bf5..2c512b40a940 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1570,15 +1570,13 @@ static void o2net_start_connect(struct work_struct *work)
struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
int ret = 0, stop;
unsigned int timeout;
- unsigned int noio_flag;
+ unsigned int nofs_flag;
/*
- * sock_create allocates the sock with GFP_KERNEL. We must set
- * per-process flag PF_MEMALLOC_NOIO so that all allocations done
- * by this process are done as if GFP_NOIO was specified. So we
- * are not reentering filesystem while doing memory reclaim.
+ * sock_create allocates the sock with GFP_KERNEL. We must
+ * prevent the filesystem from being reentered by memory reclaim.
*/
- noio_flag = memalloc_noio_save();
+ nofs_flag = memalloc_nofs_save();
/* if we're greater we initiate tx, otherwise we accept */
if (o2nm_this_node() <= o2net_num_from_nn(nn))
goto out;
@@ -1683,7 +1681,7 @@ out:
if (mynode)
o2nm_node_put(mynode);
- memalloc_noio_restore(noio_flag);
+ memalloc_nofs_restore(nofs_flag);
return;
}
@@ -1810,15 +1808,13 @@ static int o2net_accept_one(struct socket *sock, int *more)
struct o2nm_node *local_node = NULL;
struct o2net_sock_container *sc = NULL;
struct o2net_node *nn;
- unsigned int noio_flag;
+ unsigned int nofs_flag;
/*
- * sock_create_lite allocates the sock with GFP_KERNEL. We must set
- * per-process flag PF_MEMALLOC_NOIO so that all allocations done
- * by this process are done as if GFP_NOIO was specified. So we
- * are not reentering filesystem while doing memory reclaim.
+ * sock_create_lite allocates the sock with GFP_KERNEL. We must
+ * prevent the filesystem from being reentered by memory reclaim.
*/
- noio_flag = memalloc_noio_save();
+ nofs_flag = memalloc_nofs_save();
BUG_ON(sock == NULL);
*more = 0;
@@ -1934,7 +1930,7 @@ out:
if (sc)
sc_put(sc);
- memalloc_noio_restore(noio_flag);
+ memalloc_nofs_restore(nofs_flag);
return ret;
}
@@ -1948,7 +1944,6 @@ static void o2net_accept_many(struct work_struct *work)
{
struct socket *sock = o2net_listen_sock;
int more;
- int err;
/*
* It is critical to note that due to interrupt moderation
@@ -1963,7 +1958,7 @@ static void o2net_accept_many(struct work_struct *work)
*/
for (;;) {
- err = o2net_accept_one(sock, &more);
+ o2net_accept_one(sock, &more);
if (!more)
break;
cond_resched();
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index de87cbffd175..736338f45c59 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -32,7 +32,7 @@ struct o2net_msg
__be32 status;
__be32 key;
__be32 msg_num;
- __u8 buf[0];
+ __u8 buf[];
};
typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data,
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index bdef72c0f099..5761060d2ba8 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -676,7 +676,7 @@ static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen,
int ra_ptr = 0; /* Current index into readahead
buffer */
int num = 0;
- int nblocks, i, err;
+ int nblocks, i;
sb = dir->i_sb;
@@ -708,7 +708,7 @@ restart:
num++;
bh = NULL;
- err = ocfs2_read_dir_block(dir, b++, &bh,
+ ocfs2_read_dir_block(dir, b++, &bh,
OCFS2_BH_READAHEAD);
bh_use[ra_max] = bh;
}
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 0463dce65bb2..c8a444622faa 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -564,7 +564,7 @@ struct dlm_migratable_lockres
// 48 bytes
u8 lvb[DLM_LVB_LEN];
// 112 bytes
- struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112
+ struct dlm_migratable_lock ml[]; // 16 bytes each, begins at byte 112
};
#define DLM_MIG_LOCKRES_MAX_LEN \
(sizeof(struct dlm_migratable_lockres) + \
@@ -601,7 +601,7 @@ struct dlm_convert_lock
u8 name[O2NM_MAX_NAME_LEN];
- s8 lvb[0];
+ s8 lvb[];
};
#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
@@ -616,7 +616,7 @@ struct dlm_unlock_lock
u8 name[O2NM_MAX_NAME_LEN];
- s8 lvb[0];
+ s8 lvb[];
};
#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
@@ -632,7 +632,7 @@ struct dlm_proxy_ast
u8 name[O2NM_MAX_NAME_LEN];
- s8 lvb[0];
+ s8 lvb[];
};
#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index c5c6efba7b5e..4b8b41d23e91 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -244,11 +244,11 @@ static int stringify_lockname(const char *lockname, int locklen, char *buf,
memcpy((__be64 *)&inode_blkno_be,
(char *)&lockname[OCFS2_DENTRY_LOCK_INO_START],
sizeof(__be64));
- out += snprintf(buf + out, len - out, "%.*s%08x",
+ out += scnprintf(buf + out, len - out, "%.*s%08x",
OCFS2_DENTRY_LOCK_INO_START - 1, lockname,
(unsigned int)be64_to_cpu(inode_blkno_be));
} else
- out += snprintf(buf + out, len - out, "%.*s",
+ out += scnprintf(buf + out, len - out, "%.*s",
locklen, lockname);
return out;
}
@@ -260,7 +260,7 @@ static int stringify_nodemap(unsigned long *nodemap, int maxnodes,
int i = -1;
while ((i = find_next_bit(nodemap, maxnodes, i + 1)) < maxnodes)
- out += snprintf(buf + out, len - out, "%d ", i);
+ out += scnprintf(buf + out, len - out, "%d ", i);
return out;
}
@@ -278,34 +278,34 @@ static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len)
mle_type = "MIG";
out += stringify_lockname(mle->mname, mle->mnamelen, buf + out, len - out);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"\t%3s\tmas=%3u\tnew=%3u\tevt=%1d\tuse=%1d\tref=%3d\n",
mle_type, mle->master, mle->new_master,
!list_empty(&mle->hb_events),
!!mle->inuse,
kref_read(&mle->mle_refs));
- out += snprintf(buf + out, len - out, "Maybe=");
+ out += scnprintf(buf + out, len - out, "Maybe=");
out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
- out += snprintf(buf + out, len - out, "Vote=");
+ out += scnprintf(buf + out, len - out, "Vote=");
out += stringify_nodemap(mle->vote_map, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
- out += snprintf(buf + out, len - out, "Response=");
+ out += scnprintf(buf + out, len - out, "Response=");
out += stringify_nodemap(mle->response_map, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
- out += snprintf(buf + out, len - out, "Node=");
+ out += scnprintf(buf + out, len - out, "Node=");
out += stringify_nodemap(mle->node_map, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
return out;
}
@@ -353,7 +353,7 @@ static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
int out = 0;
unsigned long total = 0;
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Dumping Purgelist for Domain: %s\n", dlm->name);
spin_lock(&dlm->spinlock);
@@ -365,13 +365,13 @@ static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
out += stringify_lockname(res->lockname.name,
res->lockname.len,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\t%ld\n",
+ out += scnprintf(buf + out, len - out, "\t%ld\n",
(jiffies - res->last_used)/HZ);
spin_unlock(&res->spinlock);
}
spin_unlock(&dlm->spinlock);
- out += snprintf(buf + out, len - out, "Total on list: %lu\n", total);
+ out += scnprintf(buf + out, len - out, "Total on list: %lu\n", total);
return out;
}
@@ -410,7 +410,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
int i, out = 0;
unsigned long total = 0, longest = 0, bucket_count = 0;
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Dumping MLEs for Domain: %s\n", dlm->name);
spin_lock(&dlm->master_lock);
@@ -428,7 +428,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
}
spin_unlock(&dlm->master_lock);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Total: %lu, Longest: %lu\n", total, longest);
return out;
}
@@ -467,7 +467,7 @@ static int dump_lock(struct dlm_lock *lock, int list_type, char *buf, int len)
#define DEBUG_LOCK_VERSION 1
spin_lock(&lock->spinlock);
- out = snprintf(buf, len, "LOCK:%d,%d,%d,%d,%d,%d:%lld,%d,%d,%d,%d,%d,"
+ out = scnprintf(buf, len, "LOCK:%d,%d,%d,%d,%d,%d:%lld,%d,%d,%d,%d,%d,"
"%d,%d,%d,%d\n",
DEBUG_LOCK_VERSION,
list_type, lock->ml.type, lock->ml.convert_type,
@@ -491,13 +491,13 @@ static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len)
int i;
int out = 0;
- out += snprintf(buf + out, len - out, "NAME:");
+ out += scnprintf(buf + out, len - out, "NAME:");
out += stringify_lockname(res->lockname.name, res->lockname.len,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
#define DEBUG_LRES_VERSION 1
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"LRES:%d,%d,%d,%ld,%d,%d,%d,%d,%d,%d,%d\n",
DEBUG_LRES_VERSION,
res->owner, res->state, res->last_used,
@@ -509,17 +509,17 @@ static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len)
kref_read(&res->refs));
/* refmap */
- out += snprintf(buf + out, len - out, "RMAP:");
+ out += scnprintf(buf + out, len - out, "RMAP:");
out += stringify_nodemap(res->refmap, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
/* lvb */
- out += snprintf(buf + out, len - out, "LVBX:");
+ out += scnprintf(buf + out, len - out, "LVBX:");
for (i = 0; i < DLM_LVB_LEN; i++)
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%02x", (unsigned char)res->lvb[i]);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
/* granted */
list_for_each_entry(lock, &res->granted, list)
@@ -533,7 +533,7 @@ static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len)
list_for_each_entry(lock, &res->blocked, list)
out += dump_lock(lock, 2, buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
return out;
}
@@ -683,41 +683,41 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
}
/* Domain: xxxxxxxxxx Key: 0xdfbac769 */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Domain: %s Key: 0x%08x Protocol: %d.%d\n",
dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major,
dlm->dlm_locking_proto.pv_minor);
/* Thread Pid: xxx Node: xxx State: xxxxx */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Thread Pid: %d Node: %d State: %s\n",
task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state);
/* Number of Joins: xxx Joining Node: xxx */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Number of Joins: %d Joining Node: %d\n",
dlm->num_joins, dlm->joining_node);
/* Domain Map: xx xx xx */
- out += snprintf(buf + out, len - out, "Domain Map: ");
+ out += scnprintf(buf + out, len - out, "Domain Map: ");
out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
/* Exit Domain Map: xx xx xx */
- out += snprintf(buf + out, len - out, "Exit Domain Map: ");
+ out += scnprintf(buf + out, len - out, "Exit Domain Map: ");
out += stringify_nodemap(dlm->exit_domain_map, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
/* Live Map: xx xx xx */
- out += snprintf(buf + out, len - out, "Live Map: ");
+ out += scnprintf(buf + out, len - out, "Live Map: ");
out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
/* Lock Resources: xxx (xxx) */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Lock Resources: %d (%d)\n",
atomic_read(&dlm->res_cur_count),
atomic_read(&dlm->res_tot_count));
@@ -729,29 +729,29 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
cur_mles += atomic_read(&dlm->mle_cur_count[i]);
/* MLEs: xxx (xxx) */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"MLEs: %d (%d)\n", cur_mles, tot_mles);
/* Blocking: xxx (xxx) */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
" Blocking: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
/* Mastery: xxx (xxx) */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
" Mastery: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
/* Migration: xxx (xxx) */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
" Migration: %d (%d)\n",
atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
/* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Lists: Dirty=%s Purge=%s PendingASTs=%s "
"PendingBASTs=%s\n",
(list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
@@ -760,12 +760,12 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
(list_empty(&dlm->pending_basts) ? "Empty" : "InUse"));
/* Purge Count: xxx Refs: xxx */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Purge Count: %d Refs: %d\n", dlm->purge_count,
kref_read(&dlm->dlm_refs));
/* Dead Node: xxx */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Dead Node: %d\n", dlm->reco.dead_node);
/* What about DLM_RECO_STATE_FINALIZE? */
@@ -775,19 +775,19 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
state = "INACTIVE";
/* Recovery Pid: xxxx Master: xxx State: xxxx */
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"Recovery Pid: %d Master: %d State: %s\n",
task_pid_nr(dlm->dlm_reco_thread_task),
dlm->reco.new_master, state);
/* Recovery Map: xx xx */
- out += snprintf(buf + out, len - out, "Recovery Map: ");
+ out += scnprintf(buf + out, len - out, "Recovery Map: ");
out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
buf + out, len - out);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
/* Recovery Node State: */
- out += snprintf(buf + out, len - out, "Recovery Node State:\n");
+ out += scnprintf(buf + out, len - out, "Recovery Node State:\n");
list_for_each_entry(node, &dlm->reco.node_data, list) {
switch (node->state) {
case DLM_RECO_NODE_DATA_INIT:
@@ -815,7 +815,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
state = "BAD";
break;
}
- out += snprintf(buf + out, len - out, "\t%u - %s\n",
+ out += scnprintf(buf + out, len - out, "\t%u - %s\n",
node->node_num, state);
}
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 900f7e466d11..55a6512e9fde 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2749,8 +2749,6 @@ leave:
return ret;
}
-#define DLM_MIGRATION_RETRY_MS 100
-
/*
* Should be called only after beginning the domain leave process.
* There should not be any remaining locks on nonlocal lock resources,
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index fd40c17cd022..5ccc4ff0b82a 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -39,8 +39,6 @@
static int dlm_thread(void *data);
static void dlm_flush_asts(struct dlm_ctxt *dlm);
-#define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
-
/* will exit holding res->spinlock, but may drop in function */
/* waits until flags are cleared on res->state */
void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
@@ -680,7 +678,6 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
#define DLM_THREAD_TIMEOUT_MS (4 * 1000)
#define DLM_THREAD_MAX_DIRTY 100
-#define DLM_THREAD_MAX_ASTS 10
static int dlm_thread(void *data)
{
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index cb9e6a73bea9..152a0fc4e905 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2133,7 +2133,7 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
}
#define OCFS2_SEC_BITS 34
-#define OCFS2_SEC_SHIFT (64 - 34)
+#define OCFS2_SEC_SHIFT (64 - OCFS2_SEC_BITS)
#define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
/* LVB only has room for 64 bits of time here so we pack it for
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 68ba354cf361..b425f0b01dce 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -91,7 +91,7 @@ enum ocfs2_replay_state {
struct ocfs2_replay_map {
unsigned int rm_slots;
enum ocfs2_replay_state rm_state;
- unsigned char rm_replay_slots[0];
+ unsigned char rm_replay_slots[];
};
static void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state)
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index da65251ef815..5381020aaa9a 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -406,7 +406,7 @@ static int ocfs2_mknod(struct inode *dir,
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto roll_back;
}
if (si.enable) {
@@ -414,7 +414,7 @@ static int ocfs2_mknod(struct inode *dir,
meta_ac, data_ac);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto roll_back;
}
}
@@ -427,7 +427,7 @@ static int ocfs2_mknod(struct inode *dir,
OCFS2_I(dir)->ip_blkno);
if (status) {
mlog_errno(status);
- goto leave;
+ goto roll_back;
}
dl = dentry->d_fsdata;
@@ -437,12 +437,19 @@ static int ocfs2_mknod(struct inode *dir,
&lookup);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ goto roll_back;
}
insert_inode_hash(inode);
d_instantiate(dentry, inode);
status = 0;
+
+roll_back:
+ if (status < 0 && S_ISDIR(mode)) {
+ ocfs2_add_links_count(dirfe, -1);
+ drop_nlink(dir);
+ }
+
leave:
if (status < 0 && did_quota_inode)
dquot_free_inode(inode);
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 0db4a7ec58a2..0dd8c41bafd4 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -470,7 +470,7 @@ struct ocfs2_extent_list {
__le16 l_reserved1;
__le64 l_reserved2; /* Pad to
sizeof(ocfs2_extent_rec) */
-/*10*/ struct ocfs2_extent_rec l_recs[0]; /* Extent records */
+/*10*/ struct ocfs2_extent_rec l_recs[]; /* Extent records */
};
/*
@@ -484,7 +484,7 @@ struct ocfs2_chain_list {
__le16 cl_count; /* Total chains in this list */
__le16 cl_next_free_rec; /* Next unused chain slot */
__le64 cl_reserved1;
-/*10*/ struct ocfs2_chain_rec cl_recs[0]; /* Chain records */
+/*10*/ struct ocfs2_chain_rec cl_recs[]; /* Chain records */
};
/*
@@ -496,7 +496,7 @@ struct ocfs2_truncate_log {
/*00*/ __le16 tl_count; /* Total records in this log */
__le16 tl_used; /* Number of records in use */
__le32 tl_reserved1;
-/*08*/ struct ocfs2_truncate_rec tl_recs[0]; /* Truncate records */
+/*08*/ struct ocfs2_truncate_rec tl_recs[]; /* Truncate records */
};
/*
@@ -640,7 +640,7 @@ struct ocfs2_local_alloc
__le16 la_size; /* Size of included bitmap, in bytes */
__le16 la_reserved1;
__le64 la_reserved2;
-/*10*/ __u8 la_bitmap[0];
+/*10*/ __u8 la_bitmap[];
};
/*
@@ -653,7 +653,7 @@ struct ocfs2_inline_data
* for data, starting at id_data */
__le16 id_reserved0;
__le32 id_reserved1;
- __u8 id_data[0]; /* Start of user data */
+ __u8 id_data[]; /* Start of user data */
};
/*
@@ -798,7 +798,7 @@ struct ocfs2_dx_entry_list {
* possible in de_entries */
__le16 de_num_used; /* Current number of
* de_entries entries */
- struct ocfs2_dx_entry de_entries[0]; /* Indexed dir entries
+ struct ocfs2_dx_entry de_entries[]; /* Indexed dir entries
* in a packed array of
* length de_num_used */
};
@@ -935,7 +935,7 @@ struct ocfs2_refcount_list {
__le16 rl_used; /* Current number of used records */
__le32 rl_reserved2;
__le64 rl_reserved1; /* Pad to sizeof(ocfs2_refcount_record) */
-/*10*/ struct ocfs2_refcount_rec rl_recs[0]; /* Refcount records */
+/*10*/ struct ocfs2_refcount_rec rl_recs[]; /* Refcount records */
};
@@ -1021,7 +1021,7 @@ struct ocfs2_xattr_header {
buckets. A block uses
xb_check and sets
this field to zero.) */
- struct ocfs2_xattr_entry xh_entries[0]; /* xattr entry list. */
+ struct ocfs2_xattr_entry xh_entries[]; /* xattr entry list. */
};
/*
@@ -1207,7 +1207,7 @@ struct ocfs2_local_disk_dqinfo {
/* Header of one chunk of a quota file */
struct ocfs2_local_disk_chunk {
__le32 dqc_free; /* Number of free entries in the bitmap */
- __u8 dqc_bitmap[0]; /* Bitmap of entries in the corresponding
+ __u8 dqc_bitmap[]; /* Bitmap of entries in the corresponding
* chunk of quota file */
};
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index ee43e51188be..cfb77f70c888 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -154,6 +154,7 @@ ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
}
static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
+__acquires(&rf->rf_lock)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
@@ -161,6 +162,7 @@ static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
}
static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
+__releases(&rf->rf_lock)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index 0249e8ca1028..bf3842e34fb9 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -33,9 +33,6 @@
static DEFINE_SPINLOCK(resv_lock);
-#define OCFS2_MIN_RESV_WINDOW_BITS 8
-#define OCFS2_MAX_RESV_WINDOW_BITS 1024
-
int ocfs2_dir_resv_allowed(struct ocfs2_super *osb)
{
return (osb->osb_resv_level && osb->osb_dir_resv_level);
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 8aa6a667860c..a191094694c6 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -656,8 +656,6 @@ error:
* and easier to preserve the name.
*/
-#define FS_OCFS2_NM 1
-
static struct ctl_table ocfs2_nm_table[] = {
{
.procname = "hb_ctl_path",
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 939df99d2dec..4836becb7578 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -2509,9 +2509,6 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
bail:
brelse(group_bh);
-
- if (status)
- mlog_errno(status);
return status;
}
@@ -2582,8 +2579,6 @@ static int _ocfs2_free_clusters(handle_t *handle,
num_clusters);
out:
- if (status)
- mlog_errno(status);
return status;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 05dd68ade293..ac61eeaf3837 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -220,31 +220,31 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
int i, out = 0;
unsigned long flags;
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => Id: %-s Uuid: %-s Gen: 0x%X Label: %-s\n",
"Device", osb->dev_str, osb->uuid_str,
osb->fs_generation, osb->vol_label);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => State: %d Flags: 0x%lX\n", "Volume",
atomic_read(&osb->vol_state), osb->osb_flags);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => Block: %lu Cluster: %d\n", "Sizes",
osb->sb->s_blocksize, osb->s_clustersize);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => Compat: 0x%X Incompat: 0x%X "
"ROcompat: 0x%X\n",
"Features", osb->s_feature_compat,
osb->s_feature_incompat, osb->s_feature_ro_compat);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => Opts: 0x%lX AtimeQuanta: %u\n", "Mount",
osb->s_mount_opt, osb->s_atime_quantum);
if (cconn) {
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => Stack: %s Name: %*s "
"Version: %d.%d\n", "Cluster",
(*osb->osb_cluster_stack == '\0' ?
@@ -255,7 +255,7 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
}
spin_lock_irqsave(&osb->dc_task_lock, flags);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => Pid: %d Count: %lu WakeSeq: %lu "
"WorkSeq: %lu\n", "DownCnvt",
(osb->dc_task ? task_pid_nr(osb->dc_task) : -1),
@@ -264,32 +264,32 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
spin_unlock_irqrestore(&osb->dc_task_lock, flags);
spin_lock(&osb->osb_lock);
- out += snprintf(buf + out, len - out, "%10s => Pid: %d Nodes:",
+ out += scnprintf(buf + out, len - out, "%10s => Pid: %d Nodes:",
"Recovery",
(osb->recovery_thread_task ?
task_pid_nr(osb->recovery_thread_task) : -1));
if (rm->rm_used == 0)
- out += snprintf(buf + out, len - out, " None\n");
+ out += scnprintf(buf + out, len - out, " None\n");
else {
for (i = 0; i < rm->rm_used; i++)
- out += snprintf(buf + out, len - out, " %d",
+ out += scnprintf(buf + out, len - out, " %d",
rm->rm_entries[i]);
- out += snprintf(buf + out, len - out, "\n");
+ out += scnprintf(buf + out, len - out, "\n");
}
spin_unlock(&osb->osb_lock);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => Pid: %d Interval: %lu\n", "Commit",
(osb->commit_task ? task_pid_nr(osb->commit_task) : -1),
osb->osb_commit_interval);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => State: %d TxnId: %lu NumTxns: %d\n",
"Journal", osb->journal->j_state,
osb->journal->j_trans_id,
atomic_read(&osb->journal->j_num_trans));
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => GlobalAllocs: %d LocalAllocs: %d "
"SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
"Stats",
@@ -299,7 +299,7 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
atomic_read(&osb->alloc_stats.moves),
atomic_read(&osb->alloc_stats.bg_extends));
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => State: %u Descriptor: %llu Size: %u bits "
"Default: %u bits\n",
"LocalAlloc", osb->local_alloc_state,
@@ -307,7 +307,7 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
osb->local_alloc_bits, osb->local_alloc_default_bits);
spin_lock(&osb->osb_lock);
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s => InodeSlot: %d StolenInodes: %d, "
"MetaSlot: %d StolenMeta: %d\n", "Steal",
osb->s_inode_steal_slot,
@@ -316,20 +316,20 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
atomic_read(&osb->s_num_meta_stolen));
spin_unlock(&osb->osb_lock);
- out += snprintf(buf + out, len - out, "OrphanScan => ");
- out += snprintf(buf + out, len - out, "Local: %u Global: %u ",
+ out += scnprintf(buf + out, len - out, "OrphanScan => ");
+ out += scnprintf(buf + out, len - out, "Local: %u Global: %u ",
os->os_count, os->os_seqno);
- out += snprintf(buf + out, len - out, " Last Scan: ");
+ out += scnprintf(buf + out, len - out, " Last Scan: ");
if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
- out += snprintf(buf + out, len - out, "Disabled\n");
+ out += scnprintf(buf + out, len - out, "Disabled\n");
else
- out += snprintf(buf + out, len - out, "%lu seconds ago\n",
+ out += scnprintf(buf + out, len - out, "%lu seconds ago\n",
(unsigned long)(ktime_get_seconds() - os->os_scantime));
- out += snprintf(buf + out, len - out, "%10s => %3s %10s\n",
+ out += scnprintf(buf + out, len - out, "%10s => %3s %10s\n",
"Slots", "Num", "RecoGen");
for (i = 0; i < osb->max_slots; ++i) {
- out += snprintf(buf + out, len - out,
+ out += scnprintf(buf + out, len - out,
"%10s %c %3d %10d\n",
" ",
(i == osb->slot_num ? '*' : ' '),
diff --git a/fs/open.c b/fs/open.c
index b69d6eed67e6..719b320ede52 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -1046,8 +1046,10 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op)
if (flags & O_CREAT) {
op->intent |= LOOKUP_CREATE;
- if (flags & O_EXCL)
+ if (flags & O_EXCL) {
op->intent |= LOOKUP_EXCL;
+ flags |= O_NOFOLLOW;
+ }
}
if (flags & O_DIRECTORY)
diff --git a/fs/pipe.c b/fs/pipe.c
index 2144507447c5..16fb72e9abf7 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -146,7 +146,7 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
struct page *page = buf->page;
if (page_count(page) == 1) {
- memcg_kmem_uncharge(page, 0);
+ memcg_kmem_uncharge_page(page, 0);
__SetPageLocked(page);
return 0;
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 5efaf3708ec6..8e16f14bb05a 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -635,28 +635,35 @@ int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
- unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
struct mm_struct *mm = get_task_mm(task);
if (mm) {
+ unsigned long size;
+ unsigned long resident = 0;
+ unsigned long shared = 0;
+ unsigned long text = 0;
+ unsigned long data = 0;
+
size = task_statm(mm, &shared, &text, &data, &resident);
mmput(mm);
- }
- /*
- * For quick read, open code by putting numbers directly
- * expected format is
- * seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
- * size, resident, shared, text, data);
- */
- seq_put_decimal_ull(m, "", size);
- seq_put_decimal_ull(m, " ", resident);
- seq_put_decimal_ull(m, " ", shared);
- seq_put_decimal_ull(m, " ", text);
- seq_put_decimal_ull(m, " ", 0);
- seq_put_decimal_ull(m, " ", data);
- seq_put_decimal_ull(m, " ", 0);
- seq_putc(m, '\n');
+ /*
+ * For quick read, open code by putting numbers directly
+ * expected format is
+ * seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
+ * size, resident, shared, text, data);
+ */
+ seq_put_decimal_ull(m, "", size);
+ seq_put_decimal_ull(m, " ", resident);
+ seq_put_decimal_ull(m, " ", shared);
+ seq_put_decimal_ull(m, " ", text);
+ seq_put_decimal_ull(m, " ", 0);
+ seq_put_decimal_ull(m, " ", data);
+ seq_put_decimal_ull(m, " ", 0);
+ seq_putc(m, '\n');
+ } else {
+ seq_write(m, "0 0 0 0 0 0 0\n", 14);
+ }
return 0;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c7c64272b0fa..74f948a6b621 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -405,11 +405,11 @@ print0:
static int lock_trace(struct task_struct *task)
{
- int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ int err = mutex_lock_killable(&task->signal->exec_update_mutex);
if (err)
return err;
if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
- mutex_unlock(&task->signal->cred_guard_mutex);
+ mutex_unlock(&task->signal->exec_update_mutex);
return -EPERM;
}
return 0;
@@ -417,7 +417,7 @@ static int lock_trace(struct task_struct *task)
static void unlock_trace(struct task_struct *task)
{
- mutex_unlock(&task->signal->cred_guard_mutex);
+ mutex_unlock(&task->signal->exec_update_mutex);
}
#ifdef CONFIG_STACKTRACE
@@ -1834,11 +1834,25 @@ void task_dump_owner(struct task_struct *task, umode_t mode,
*rgid = gid;
}
+void proc_pid_evict_inode(struct proc_inode *ei)
+{
+ struct pid *pid = ei->pid;
+
+ if (S_ISDIR(ei->vfs_inode.i_mode)) {
+ spin_lock(&pid->wait_pidfd.lock);
+ hlist_del_init_rcu(&ei->sibling_inodes);
+ spin_unlock(&pid->wait_pidfd.lock);
+ }
+
+ put_pid(pid);
+}
+
struct inode *proc_pid_make_inode(struct super_block * sb,
struct task_struct *task, umode_t mode)
{
struct inode * inode;
struct proc_inode *ei;
+ struct pid *pid;
/* We need a new inode */
@@ -1856,10 +1870,18 @@ struct inode *proc_pid_make_inode(struct super_block * sb,
/*
* grab the reference to task.
*/
- ei->pid = get_task_pid(task, PIDTYPE_PID);
- if (!ei->pid)
+ pid = get_task_pid(task, PIDTYPE_PID);
+ if (!pid)
goto out_unlock;
+ /* Let the pid remember us for quick removal */
+ ei->pid = pid;
+ if (S_ISDIR(mode)) {
+ spin_lock(&pid->wait_pidfd.lock);
+ hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes);
+ spin_unlock(&pid->wait_pidfd.lock);
+ }
+
task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
security_task_to_inode(task, inode);
@@ -2861,7 +2883,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
unsigned long flags;
int result;
- result = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ result = mutex_lock_killable(&task->signal->exec_update_mutex);
if (result)
return result;
@@ -2897,7 +2919,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
result = 0;
out_unlock:
- mutex_unlock(&task->signal->cred_guard_mutex);
+ mutex_unlock(&task->signal->exec_update_mutex);
return result;
}
@@ -3230,90 +3252,29 @@ static const struct inode_operations proc_tgid_base_inode_operations = {
.permission = proc_pid_permission,
};
-static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
-{
- struct dentry *dentry, *leader, *dir;
- char buf[10 + 1];
- struct qstr name;
-
- name.name = buf;
- name.len = snprintf(buf, sizeof(buf), "%u", pid);
- /* no ->d_hash() rejects on procfs */
- dentry = d_hash_and_lookup(mnt->mnt_root, &name);
- if (dentry) {
- d_invalidate(dentry);
- dput(dentry);
- }
-
- if (pid == tgid)
- return;
-
- name.name = buf;
- name.len = snprintf(buf, sizeof(buf), "%u", tgid);
- leader = d_hash_and_lookup(mnt->mnt_root, &name);
- if (!leader)
- goto out;
-
- name.name = "task";
- name.len = strlen(name.name);
- dir = d_hash_and_lookup(leader, &name);
- if (!dir)
- goto out_put_leader;
-
- name.name = buf;
- name.len = snprintf(buf, sizeof(buf), "%u", pid);
- dentry = d_hash_and_lookup(dir, &name);
- if (dentry) {
- d_invalidate(dentry);
- dput(dentry);
- }
-
- dput(dir);
-out_put_leader:
- dput(leader);
-out:
- return;
-}
-
/**
- * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
- * @task: task that should be flushed.
+ * proc_flush_pid - Remove dcache entries for @pid from the /proc dcache.
+ * @pid: pid that should be flushed.
*
- * When flushing dentries from proc, one needs to flush them from global
- * proc (proc_mnt) and from all the namespaces' procs this task was seen
- * in. This call is supposed to do all of this job.
- *
- * Looks in the dcache for
- * /proc/@pid
- * /proc/@tgid/task/@pid
- * if either directory is present flushes it and all of it'ts children
- * from the dcache.
+ * This function walks a list of inodes (that belong to any proc
+ * filesystem) that are attached to the pid and flushes them from
+ * the dentry cache.
*
* It is safe and reasonable to cache /proc entries for a task until
* that task exits. After that they just clog up the dcache with
* useless entries, possibly causing useful dcache entries to be
- * flushed instead. This routine is proved to flush those useless
- * dcache entries at process exit time.
+ * flushed instead. This routine is provided to flush those useless
+ * dcache entries when a process is reaped.
*
* NOTE: This routine is just an optimization so it does not guarantee
- * that no dcache entries will exist at process exit time it
- * just makes it very unlikely that any will persist.
+ * that no dcache entries will exist after a process is reaped
+ * it just makes it very unlikely that any will persist.
*/
-void proc_flush_task(struct task_struct *task)
+void proc_flush_pid(struct pid *pid)
{
- int i;
- struct pid *pid, *tgid;
- struct upid *upid;
-
- pid = task_pid(task);
- tgid = task_tgid(task);
-
- for (i = 0; i <= pid->level; i++) {
- upid = &pid->numbers[i];
- proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
- tgid->numbers[i].nr);
- }
+ proc_invalidate_siblings_dcache(&pid->inodes, &pid->wait_pidfd.lock);
+ put_pid(pid);
}
static struct dentry *proc_pid_instantiate(struct dentry * dentry,
diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c
index c1dea9b8222e..d0989a443c77 100644
--- a/fs/proc/cpuinfo.c
+++ b/fs/proc/cpuinfo.c
@@ -17,6 +17,7 @@ static int cpuinfo_open(struct inode *inode, struct file *file)
}
static const struct proc_ops cpuinfo_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = cpuinfo_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 3faed94e4b65..4ed6dabdf6ff 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -531,6 +531,12 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
return p;
}
+static inline void pde_set_flags(struct proc_dir_entry *pde)
+{
+ if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
+ pde->flags |= PROC_ENTRY_PERMANENT;
+}
+
struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
struct proc_dir_entry *parent,
const struct proc_ops *proc_ops, void *data)
@@ -541,6 +547,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
if (!p)
return NULL;
p->proc_ops = proc_ops;
+ pde_set_flags(p);
return proc_register(parent, p);
}
EXPORT_SYMBOL(proc_create_data);
@@ -572,6 +579,7 @@ static int proc_seq_release(struct inode *inode, struct file *file)
}
static const struct proc_ops proc_seq_ops = {
+ /* not permanent -- can call into arbitrary seq_operations */
.proc_open = proc_seq_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
@@ -602,6 +610,7 @@ static int proc_single_open(struct inode *inode, struct file *file)
}
static const struct proc_ops proc_single_ops = {
+ /* not permanent -- can call into arbitrary ->single_show */
.proc_open = proc_single_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
@@ -662,9 +671,13 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
de = pde_subdir_find(parent, fn, len);
if (de) {
- rb_erase(&de->subdir_node, &parent->subdir);
- if (S_ISDIR(de->mode)) {
- parent->nlink--;
+ if (unlikely(pde_is_permanent(de))) {
+ WARN(1, "removing permanent /proc entry '%s'", de->name);
+ de = NULL;
+ } else {
+ rb_erase(&de->subdir_node, &parent->subdir);
+ if (S_ISDIR(de->mode))
+ parent->nlink--;
}
}
write_unlock(&proc_subdir_lock);
@@ -700,12 +713,24 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
write_unlock(&proc_subdir_lock);
return -ENOENT;
}
+ if (unlikely(pde_is_permanent(root))) {
+ write_unlock(&proc_subdir_lock);
+ WARN(1, "removing permanent /proc entry '%s/%s'",
+ root->parent->name, root->name);
+ return -EINVAL;
+ }
rb_erase(&root->subdir_node, &parent->subdir);
de = root;
while (1) {
next = pde_subdir_first(de);
if (next) {
+ if (unlikely(pde_is_permanent(root))) {
+ write_unlock(&proc_subdir_lock);
+ WARN(1, "removing permanent /proc entry '%s/%s'",
+ next->parent->name, next->name);
+ return -EINVAL;
+ }
rb_erase(&next->subdir_node, &de->subdir);
de = next;
continue;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 6da18316d209..fb4cace9ea41 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -33,21 +33,27 @@ static void proc_evict_inode(struct inode *inode)
{
struct proc_dir_entry *de;
struct ctl_table_header *head;
+ struct proc_inode *ei = PROC_I(inode);
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
/* Stop tracking associated processes */
- put_pid(PROC_I(inode)->pid);
+ if (ei->pid) {
+ proc_pid_evict_inode(ei);
+ ei->pid = NULL;
+ }
/* Let go of any associated proc directory entry */
- de = PDE(inode);
- if (de)
+ de = ei->pde;
+ if (de) {
pde_put(de);
+ ei->pde = NULL;
+ }
- head = PROC_I(inode)->sysctl;
+ head = ei->sysctl;
if (head) {
- RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
+ RCU_INIT_POINTER(ei->sysctl, NULL);
proc_sys_evict_inode(inode, head);
}
}
@@ -68,6 +74,7 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
ei->pde = NULL;
ei->sysctl = NULL;
ei->sysctl_entry = NULL;
+ INIT_HLIST_NODE(&ei->sibling_inodes);
ei->ns_ops = NULL;
return &ei->vfs_inode;
}
@@ -102,6 +109,62 @@ void __init proc_init_kmemcache(void)
BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE);
}
+void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock)
+{
+ struct inode *inode;
+ struct proc_inode *ei;
+ struct hlist_node *node;
+ struct super_block *old_sb = NULL;
+
+ rcu_read_lock();
+ for (;;) {
+ struct super_block *sb;
+ node = hlist_first_rcu(inodes);
+ if (!node)
+ break;
+ ei = hlist_entry(node, struct proc_inode, sibling_inodes);
+ spin_lock(lock);
+ hlist_del_init_rcu(&ei->sibling_inodes);
+ spin_unlock(lock);
+
+ inode = &ei->vfs_inode;
+ sb = inode->i_sb;
+ if ((sb != old_sb) && !atomic_inc_not_zero(&sb->s_active))
+ continue;
+ inode = igrab(inode);
+ rcu_read_unlock();
+ if (sb != old_sb) {
+ if (old_sb)
+ deactivate_super(old_sb);
+ old_sb = sb;
+ }
+ if (unlikely(!inode)) {
+ rcu_read_lock();
+ continue;
+ }
+
+ if (S_ISDIR(inode->i_mode)) {
+ struct dentry *dir = d_find_any_alias(inode);
+ if (dir) {
+ d_invalidate(dir);
+ dput(dir);
+ }
+ } else {
+ struct dentry *dentry;
+ while ((dentry = d_find_alias(inode))) {
+ d_invalidate(dentry);
+ dput(dentry);
+ }
+ }
+ iput(inode);
+
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+ if (old_sb)
+ deactivate_super(old_sb);
+}
+
static int proc_show_options(struct seq_file *seq, struct dentry *root)
{
struct super_block *sb = root->d_sb;
@@ -139,6 +202,7 @@ static void unuse_pde(struct proc_dir_entry *pde)
/* pde is locked on entry, unlocked on exit */
static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
+ __releases(&pde->pde_unload_lock)
{
/*
* close() (proc_reg_release()) can't delete an entry and proceed:
@@ -195,135 +259,204 @@ void proc_entry_rundown(struct proc_dir_entry *de)
spin_unlock(&de->pde_unload_lock);
}
+static loff_t pde_lseek(struct proc_dir_entry *pde, struct file *file, loff_t offset, int whence)
+{
+ typeof_member(struct proc_ops, proc_lseek) lseek;
+
+ lseek = pde->proc_ops->proc_lseek;
+ if (!lseek)
+ lseek = default_llseek;
+ return lseek(file, offset, whence);
+}
+
static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
loff_t rv = -EINVAL;
- if (use_pde(pde)) {
- typeof_member(struct proc_ops, proc_lseek) lseek;
- lseek = pde->proc_ops->proc_lseek;
- if (!lseek)
- lseek = default_llseek;
- rv = lseek(file, offset, whence);
+ if (pde_is_permanent(pde)) {
+ return pde_lseek(pde, file, offset, whence);
+ } else if (use_pde(pde)) {
+ rv = pde_lseek(pde, file, offset, whence);
unuse_pde(pde);
}
return rv;
}
+static ssize_t pde_read(struct proc_dir_entry *pde, struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ typeof_member(struct proc_ops, proc_read) read;
+
+ read = pde->proc_ops->proc_read;
+ if (read)
+ return read(file, buf, count, ppos);
+ return -EIO;
+}
+
static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
- if (use_pde(pde)) {
- typeof_member(struct proc_ops, proc_read) read;
- read = pde->proc_ops->proc_read;
- if (read)
- rv = read(file, buf, count, ppos);
+ if (pde_is_permanent(pde)) {
+ return pde_read(pde, file, buf, count, ppos);
+ } else if (use_pde(pde)) {
+ rv = pde_read(pde, file, buf, count, ppos);
unuse_pde(pde);
}
return rv;
}
+static ssize_t pde_write(struct proc_dir_entry *pde, struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ typeof_member(struct proc_ops, proc_write) write;
+
+ write = pde->proc_ops->proc_write;
+ if (write)
+ return write(file, buf, count, ppos);
+ return -EIO;
+}
+
static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
- if (use_pde(pde)) {
- typeof_member(struct proc_ops, proc_write) write;
- write = pde->proc_ops->proc_write;
- if (write)
- rv = write(file, buf, count, ppos);
+ if (pde_is_permanent(pde)) {
+ return pde_write(pde, file, buf, count, ppos);
+ } else if (use_pde(pde)) {
+ rv = pde_write(pde, file, buf, count, ppos);
unuse_pde(pde);
}
return rv;
}
+static __poll_t pde_poll(struct proc_dir_entry *pde, struct file *file, struct poll_table_struct *pts)
+{
+ typeof_member(struct proc_ops, proc_poll) poll;
+
+ poll = pde->proc_ops->proc_poll;
+ if (poll)
+ return poll(file, pts);
+ return DEFAULT_POLLMASK;
+}
+
static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
__poll_t rv = DEFAULT_POLLMASK;
- if (use_pde(pde)) {
- typeof_member(struct proc_ops, proc_poll) poll;
- poll = pde->proc_ops->proc_poll;
- if (poll)
- rv = poll(file, pts);
+ if (pde_is_permanent(pde)) {
+ return pde_poll(pde, file, pts);
+ } else if (use_pde(pde)) {
+ rv = pde_poll(pde, file, pts);
unuse_pde(pde);
}
return rv;
}
+static long pde_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ typeof_member(struct proc_ops, proc_ioctl) ioctl;
+
+ ioctl = pde->proc_ops->proc_ioctl;
+ if (ioctl)
+ return ioctl(file, cmd, arg);
+ return -ENOTTY;
+}
+
static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
long rv = -ENOTTY;
- if (use_pde(pde)) {
- typeof_member(struct proc_ops, proc_ioctl) ioctl;
- ioctl = pde->proc_ops->proc_ioctl;
- if (ioctl)
- rv = ioctl(file, cmd, arg);
+ if (pde_is_permanent(pde)) {
+ return pde_ioctl(pde, file, cmd, arg);
+ } else if (use_pde(pde)) {
+ rv = pde_ioctl(pde, file, cmd, arg);
unuse_pde(pde);
}
return rv;
}
#ifdef CONFIG_COMPAT
+static long pde_compat_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
+
+ compat_ioctl = pde->proc_ops->proc_compat_ioctl;
+ if (compat_ioctl)
+ return compat_ioctl(file, cmd, arg);
+ return -ENOTTY;
+}
+
static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
long rv = -ENOTTY;
- if (use_pde(pde)) {
- typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
-
- compat_ioctl = pde->proc_ops->proc_compat_ioctl;
- if (compat_ioctl)
- rv = compat_ioctl(file, cmd, arg);
+ if (pde_is_permanent(pde)) {
+ return pde_compat_ioctl(pde, file, cmd, arg);
+ } else if (use_pde(pde)) {
+ rv = pde_compat_ioctl(pde, file, cmd, arg);
unuse_pde(pde);
}
return rv;
}
#endif
+static int pde_mmap(struct proc_dir_entry *pde, struct file *file, struct vm_area_struct *vma)
+{
+ typeof_member(struct proc_ops, proc_mmap) mmap;
+
+ mmap = pde->proc_ops->proc_mmap;
+ if (mmap)
+ return mmap(file, vma);
+ return -EIO;
+}
+
static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
int rv = -EIO;
- if (use_pde(pde)) {
- typeof_member(struct proc_ops, proc_mmap) mmap;
- mmap = pde->proc_ops->proc_mmap;
- if (mmap)
- rv = mmap(file, vma);
+ if (pde_is_permanent(pde)) {
+ return pde_mmap(pde, file, vma);
+ } else if (use_pde(pde)) {
+ rv = pde_mmap(pde, file, vma);
unuse_pde(pde);
}
return rv;
}
static unsigned long
-proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
+pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned long orig_addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- struct proc_dir_entry *pde = PDE(file_inode(file));
- unsigned long rv = -EIO;
+ typeof_member(struct proc_ops, proc_get_unmapped_area) get_area;
- if (use_pde(pde)) {
- typeof_member(struct proc_ops, proc_get_unmapped_area) get_area;
-
- get_area = pde->proc_ops->proc_get_unmapped_area;
+ get_area = pde->proc_ops->proc_get_unmapped_area;
#ifdef CONFIG_MMU
- if (!get_area)
- get_area = current->mm->get_unmapped_area;
+ if (!get_area)
+ get_area = current->mm->get_unmapped_area;
#endif
+ if (get_area)
+ return get_area(file, orig_addr, len, pgoff, flags);
+ return orig_addr;
+}
- if (get_area)
- rv = get_area(file, orig_addr, len, pgoff, flags);
- else
- rv = orig_addr;
+static unsigned long
+proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct proc_dir_entry *pde = PDE(file_inode(file));
+ unsigned long rv = -EIO;
+
+ if (pde_is_permanent(pde)) {
+ return pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags);
+ } else if (use_pde(pde)) {
+ rv = pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags);
unuse_pde(pde);
}
return rv;
@@ -337,6 +470,13 @@ static int proc_reg_open(struct inode *inode, struct file *file)
typeof_member(struct proc_ops, proc_release) release;
struct pde_opener *pdeo;
+ if (pde_is_permanent(pde)) {
+ open = pde->proc_ops->proc_open;
+ if (open)
+ rv = open(inode, file);
+ return rv;
+ }
+
/*
* Ensure that
* 1) PDE's ->release hook will be called no matter what
@@ -386,6 +526,17 @@ static int proc_reg_release(struct inode *inode, struct file *file)
{
struct proc_dir_entry *pde = PDE(inode);
struct pde_opener *pdeo;
+
+ if (pde_is_permanent(pde)) {
+ typeof_member(struct proc_ops, proc_release) release;
+
+ release = pde->proc_ops->proc_release;
+ if (release) {
+ return release(inode, file);
+ }
+ return 0;
+ }
+
spin_lock(&pde->pde_unload_lock);
list_for_each_entry(pdeo, &pde->pde_openers, lh) {
if (pdeo->file == file) {
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 41587276798e..917cc85e3466 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -61,6 +61,7 @@ struct proc_dir_entry {
struct rb_node subdir_node;
char *name;
umode_t mode;
+ u8 flags;
u8 namelen;
char inline_name[];
} __randomize_layout;
@@ -73,6 +74,11 @@ struct proc_dir_entry {
0)
#define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry))
+static inline bool pde_is_permanent(const struct proc_dir_entry *pde)
+{
+ return pde->flags & PROC_ENTRY_PERMANENT;
+}
+
extern struct kmem_cache *proc_dir_entry_cache;
void pde_free(struct proc_dir_entry *pde);
@@ -91,7 +97,7 @@ struct proc_inode {
struct proc_dir_entry *pde;
struct ctl_table_header *sysctl;
struct ctl_table *sysctl_entry;
- struct hlist_node sysctl_inodes;
+ struct hlist_node sibling_inodes;
const struct proc_ns_operations *ns_ops;
struct inode vfs_inode;
} __randomize_layout;
@@ -158,6 +164,7 @@ extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
extern const struct dentry_operations pid_dentry_operations;
extern int pid_getattr(const struct path *, struct kstat *, u32, unsigned int);
extern int proc_setattr(struct dentry *, struct iattr *);
+extern void proc_pid_evict_inode(struct proc_inode *);
extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t);
extern void pid_update_inode(struct task_struct *, struct inode *);
extern int pid_delete_dentry(const struct dentry *);
@@ -210,6 +217,7 @@ extern const struct inode_operations proc_pid_link_inode_operations;
extern const struct super_operations proc_sops;
void proc_init_kmemcache(void);
+void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock);
void set_proc_pid_nlink(void);
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
extern void proc_entry_rundown(struct proc_dir_entry *);
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index ec1b7d2fb773..b38ad552887f 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -50,6 +50,7 @@ static __poll_t kmsg_poll(struct file *file, poll_table *wait)
static const struct proc_ops kmsg_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_read = kmsg_read,
.proc_poll = kmsg_poll,
.proc_open = kmsg_open,
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index c75bb4632ed1..b6f5d459b087 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -267,42 +267,9 @@ static void unuse_table(struct ctl_table_header *p)
complete(p->unregistering);
}
-static void proc_sys_prune_dcache(struct ctl_table_header *head)
+static void proc_sys_invalidate_dcache(struct ctl_table_header *head)
{
- struct inode *inode;
- struct proc_inode *ei;
- struct hlist_node *node;
- struct super_block *sb;
-
- rcu_read_lock();
- for (;;) {
- node = hlist_first_rcu(&head->inodes);
- if (!node)
- break;
- ei = hlist_entry(node, struct proc_inode, sysctl_inodes);
- spin_lock(&sysctl_lock);
- hlist_del_init_rcu(&ei->sysctl_inodes);
- spin_unlock(&sysctl_lock);
-
- inode = &ei->vfs_inode;
- sb = inode->i_sb;
- if (!atomic_inc_not_zero(&sb->s_active))
- continue;
- inode = igrab(inode);
- rcu_read_unlock();
- if (unlikely(!inode)) {
- deactivate_super(sb);
- rcu_read_lock();
- continue;
- }
-
- d_prune_aliases(inode);
- iput(inode);
- deactivate_super(sb);
-
- rcu_read_lock();
- }
- rcu_read_unlock();
+ proc_invalidate_siblings_dcache(&head->inodes, &sysctl_lock);
}
/* called under sysctl_lock, will reacquire if has to wait */
@@ -324,10 +291,10 @@ static void start_unregistering(struct ctl_table_header *p)
spin_unlock(&sysctl_lock);
}
/*
- * Prune dentries for unregistered sysctls: namespaced sysctls
+ * Invalidate dentries for unregistered sysctls: namespaced sysctls
* can have duplicate names and contaminate dcache very badly.
*/
- proc_sys_prune_dcache(p);
+ proc_sys_invalidate_dcache(p);
/*
* do not remove from the list until nobody holds it; walking the
* list in do_sysctl() relies on that.
@@ -483,7 +450,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
}
ei->sysctl = head;
ei->sysctl_entry = table;
- hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes);
+ hlist_add_head_rcu(&ei->sibling_inodes, &head->inodes);
head->count++;
spin_unlock(&sysctl_lock);
@@ -514,7 +481,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head)
{
spin_lock(&sysctl_lock);
- hlist_del_init_rcu(&PROC_I(inode)->sysctl_inodes);
+ hlist_del_init_rcu(&PROC_I(inode)->sibling_inodes);
if (!--head->count)
kfree_rcu(head, rcu);
spin_unlock(&sysctl_lock);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 608233dfd29c..2633f10446c3 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -292,39 +292,3 @@ struct proc_dir_entry proc_root = {
.subdir = RB_ROOT,
.name = "/proc",
};
-
-int pid_ns_prepare_proc(struct pid_namespace *ns)
-{
- struct proc_fs_context *ctx;
- struct fs_context *fc;
- struct vfsmount *mnt;
-
- fc = fs_context_for_mount(&proc_fs_type, SB_KERNMOUNT);
- if (IS_ERR(fc))
- return PTR_ERR(fc);
-
- if (fc->user_ns != ns->user_ns) {
- put_user_ns(fc->user_ns);
- fc->user_ns = get_user_ns(ns->user_ns);
- }
-
- ctx = fc->fs_private;
- if (ctx->pid_ns != ns) {
- put_pid_ns(ctx->pid_ns);
- get_pid_ns(ns);
- ctx->pid_ns = ns;
- }
-
- mnt = fc_mount(fc);
- put_fs_context(fc);
- if (IS_ERR(mnt))
- return PTR_ERR(mnt);
-
- ns->proc_mnt = mnt;
- return 0;
-}
-
-void pid_ns_release_proc(struct pid_namespace *ns)
-{
- kern_unmount(ns->proc_mnt);
-}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 0449edf460f5..46b3293015fe 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -224,6 +224,7 @@ static int stat_open(struct inode *inode, struct file *file)
}
static const struct proc_ops stat_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = stat_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 3ba9ae83bff5..8d382d4ec067 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -123,38 +123,14 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
}
#endif
-static void vma_stop(struct proc_maps_private *priv)
-{
- struct mm_struct *mm = priv->mm;
-
- release_task_mempolicy(priv);
- up_read(&mm->mmap_sem);
- mmput(mm);
-}
-
-static struct vm_area_struct *
-m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
-{
- if (vma == priv->tail_vma)
- return NULL;
- return vma->vm_next ?: priv->tail_vma;
-}
-
-static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
-{
- if (m->count < m->size) /* vma is copied successfully */
- m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
-}
-
static void *m_start(struct seq_file *m, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
- unsigned long last_addr = m->version;
+ unsigned long last_addr = *ppos;
struct mm_struct *mm;
struct vm_area_struct *vma;
- unsigned int pos = *ppos;
- /* See m_cache_vma(). Zero at the start or after lseek. */
+ /* See m_next(). Zero at the start or after lseek. */
if (last_addr == -1UL)
return NULL;
@@ -163,64 +139,59 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
return ERR_PTR(-ESRCH);
mm = priv->mm;
- if (!mm || !mmget_not_zero(mm))
+ if (!mm || !mmget_not_zero(mm)) {
+ put_task_struct(priv->task);
+ priv->task = NULL;
return NULL;
+ }
if (down_read_killable(&mm->mmap_sem)) {
mmput(mm);
+ put_task_struct(priv->task);
+ priv->task = NULL;
return ERR_PTR(-EINTR);
}
hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm);
- if (last_addr) {
- vma = find_vma(mm, last_addr - 1);
- if (vma && vma->vm_start <= last_addr)
- vma = m_next_vma(priv, vma);
- if (vma)
- return vma;
- }
-
- m->version = 0;
- if (pos < mm->map_count) {
- for (vma = mm->mmap; pos; pos--) {
- m->version = vma->vm_start;
- vma = vma->vm_next;
- }
+ vma = find_vma(mm, last_addr);
+ if (vma)
return vma;
- }
-
- /* we do not bother to update m->version in this case */
- if (pos == mm->map_count && priv->tail_vma)
- return priv->tail_vma;
- vma_stop(priv);
- return NULL;
+ return priv->tail_vma;
}
-static void *m_next(struct seq_file *m, void *v, loff_t *pos)
+static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
- struct vm_area_struct *next;
+ struct vm_area_struct *next, *vma = v;
+
+ if (vma == priv->tail_vma)
+ next = NULL;
+ else if (vma->vm_next)
+ next = vma->vm_next;
+ else
+ next = priv->tail_vma;
+
+ *ppos = next ? next->vm_start : -1UL;
- (*pos)++;
- next = m_next_vma(priv, v);
- if (!next)
- vma_stop(priv);
return next;
}
static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
+ struct mm_struct *mm = priv->mm;
- if (!IS_ERR_OR_NULL(v))
- vma_stop(priv);
- if (priv->task) {
- put_task_struct(priv->task);
- priv->task = NULL;
- }
+ if (!priv->task)
+ return;
+
+ release_task_mempolicy(priv);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ put_task_struct(priv->task);
+ priv->task = NULL;
}
static int proc_maps_open(struct inode *inode, struct file *file,
@@ -363,7 +334,6 @@ done:
static int show_map(struct seq_file *m, void *v)
{
show_map_vma(m, v);
- m_cache_vma(m, v);
return 0;
}
@@ -847,8 +817,6 @@ static int show_smap(struct seq_file *m, void *v)
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
show_smap_vma_flags(m, vma);
- m_cache_vma(m, vma);
-
return 0;
}
@@ -1887,7 +1855,6 @@ static int show_numa_map(struct seq_file *m, void *v)
seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
out:
seq_putc(m, '\n');
- m_cache_vma(m, vma);
return 0;
}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 4075e41408b4..5129efc6f2e6 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -842,7 +842,7 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb,
struct item_head *pasted;
struct buffer_info bi;
- buffer_info_init_right(tb, &bi);
+ buffer_info_init_right(tb, &bi);
leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
/* append item in R[0] */
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 45e1a5d11af3..adb21bea3d60 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -184,11 +184,12 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
}
/* we need to make sure nobody is changing the file size beneath us */
-{
- int depth = reiserfs_write_unlock_nested(inode->i_sb);
- inode_lock(inode);
- reiserfs_write_lock_nested(inode->i_sb, depth);
-}
+ {
+ int depth = reiserfs_write_unlock_nested(inode->i_sb);
+
+ inode_lock(inode);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
+ }
reiserfs_write_lock(inode->i_sb);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 959a066b7bb0..1594687582f0 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -838,10 +838,10 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
*/
INC_DIR_INODE_NLINK(dir)
- retval = reiserfs_new_inode(&th, dir, mode, NULL /*symlink */ ,
- old_format_only(dir->i_sb) ?
- EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
- dentry, inode, &security);
+ retval = reiserfs_new_inode(&th, dir, mode, NULL /*symlink */,
+ old_format_only(dir->i_sb) ?
+ EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
+ dentry, inode, &security);
if (retval) {
DEC_DIR_INODE_NLINK(dir)
goto out_failed;
@@ -967,7 +967,7 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
reiserfs_update_sd(&th, inode);
DEC_DIR_INODE_NLINK(dir)
- dir->i_size -= (DEH_SIZE + de.de_entrylen);
+ dir->i_size -= (DEH_SIZE + de.de_entrylen);
reiserfs_update_sd(&th, dir);
/* prevent empty directory from getting lost */
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 1600034a929b..79781ebd2145 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -68,13 +68,6 @@ int seq_open(struct file *file, const struct seq_operations *op)
p->file = file;
/*
- * Wrappers around seq_open(e.g. swaps_open) need to be
- * aware of this. If they set f_version themselves, they
- * should call seq_open first and then set f_version.
- */
- file->f_version = 0;
-
- /*
* seq_files support lseek() and pread(). They do not implement
* write() at all, but we clear FMODE_PWRITE here for historical
* reasons.
@@ -94,7 +87,6 @@ static int traverse(struct seq_file *m, loff_t offset)
int error = 0;
void *p;
- m->version = 0;
m->index = 0;
m->count = m->from = 0;
if (!offset)
@@ -161,25 +153,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
mutex_lock(&m->lock);
/*
- * seq_file->op->..m_start/m_stop/m_next may do special actions
- * or optimisations based on the file->f_version, so we want to
- * pass the file->f_version to those methods.
- *
- * seq_file->version is just copy of f_version, and seq_file
- * methods can treat it simply as file version.
- * It is copied in first and copied out after all operations.
- * It is convenient to have it as part of structure to avoid the
- * need of passing another argument to all the seq_file methods.
- */
- m->version = file->f_version;
-
- /*
* if request is to read from zero offset, reset iterator to first
* record as it might have been already advanced by previous requests
*/
if (*ppos == 0) {
m->index = 0;
- m->version = 0;
m->count = 0;
}
@@ -190,7 +168,6 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
if (err) {
/* With prejudice... */
m->read_pos = 0;
- m->version = 0;
m->index = 0;
m->count = 0;
goto Done;
@@ -243,7 +220,6 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
m->buf = seq_buf_alloc(m->size <<= 1);
if (!m->buf)
goto Enomem;
- m->version = 0;
p = m->op->start(m, &m->index);
}
m->op->stop(m, p);
@@ -287,7 +263,6 @@ Done:
*ppos += copied;
m->read_pos += copied;
}
- file->f_version = m->version;
mutex_unlock(&m->lock);
return copied;
Enomem:
@@ -313,7 +288,6 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
loff_t retval = -EINVAL;
mutex_lock(&m->lock);
- m->version = file->f_version;
switch (whence) {
case SEEK_CUR:
offset += file->f_pos;
@@ -329,7 +303,6 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
/* with extreme prejudice... */
file->f_pos = 0;
m->read_pos = 0;
- m->version = 0;
m->index = 0;
m->count = 0;
} else {
@@ -340,7 +313,6 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
file->f_pos = offset;
}
}
- file->f_version = m->version;
mutex_unlock(&m->lock);
return retval;
}
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 5afe0e7ff7cd..64e6a6698935 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -416,15 +416,18 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
EXPORT_SYMBOL_GPL(sysfs_remove_link_from_group);
/**
- * __compat_only_sysfs_link_entry_to_kobj - add a symlink to a kobject pointing
+ * compat_only_sysfs_link_entry_to_kobj - add a symlink to a kobject pointing
* to a group or an attribute
* @kobj: The kobject containing the group.
* @target_kobj: The target kobject.
* @target_name: The name of the target group or attribute.
+ * @symlink_name: The name of the symlink file (target_name will be
+ * considered if symlink_name is NULL).
*/
-int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
- struct kobject *target_kobj,
- const char *target_name)
+int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name,
+ const char *symlink_name)
{
struct kernfs_node *target;
struct kernfs_node *entry;
@@ -449,15 +452,18 @@ int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
return -ENOENT;
}
- link = kernfs_create_link(kobj->sd, target_name, entry);
+ if (!symlink_name)
+ symlink_name = target_name;
+
+ link = kernfs_create_link(kobj->sd, symlink_name, entry);
if (PTR_ERR(link) == -EEXIST)
- sysfs_warn_dup(kobj->sd, target_name);
+ sysfs_warn_dup(kobj->sd, symlink_name);
kernfs_put(entry);
kernfs_put(target);
return PTR_ERR_OR_ZERO(link);
}
-EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj);
+EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj);
static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn,
const struct attribute_group *grp,
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 8ceb51478800..7e4bfaf2871f 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -225,7 +225,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
int offs, int quiet, int must_chk_crc)
{
- int err = -EINVAL, type, node_len;
+ int err = -EINVAL, type, node_len, dump_node = 1;
uint32_t crc, node_crc, magic;
const struct ubifs_ch *ch = buf;
@@ -278,10 +278,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
out_len:
if (!quiet)
ubifs_err(c, "bad node length %d", node_len);
+ if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ)
+ dump_node = 0;
out:
if (!quiet) {
ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
- ubifs_dump_node(c, buf);
+ if (dump_node) {
+ ubifs_dump_node(c, buf);
+ } else {
+ int safe_len = min3(node_len, c->leb_size - offs,
+ (int)UBIFS_MAX_DATA_NODE_SZ);
+ pr_err("\tprevent out-of-bounds memory access\n");
+ pr_err("\ttruncated data node length %d\n", safe_len);
+ pr_err("\tcorrupted data node:\n");
+ print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
+ buf, safe_len, 0);
+ }
dump_stack();
}
return err;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 3bf8b1fda9d7..e5ec1afe1c66 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -905,6 +905,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
ubifs_err(c, "dead directory entry '%s', error %d",
xent->name, err);
ubifs_ro_mode(c, err);
+ kfree(xent);
goto out_release;
}
ubifs_assert(c, ubifs_inode(xino)->xattr);
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index edf43ddd7dce..283f9eb48410 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -157,7 +157,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
int err = 0;
ino_t xattr_inum;
union ubifs_key key;
- struct ubifs_dent_node *xent;
+ struct ubifs_dent_node *xent, *pxent = NULL;
struct fscrypt_name nm = {0};
struct ubifs_orphan *xattr_orphan;
struct ubifs_orphan *orphan;
@@ -181,11 +181,16 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
xattr_inum = le64_to_cpu(xent->inum);
xattr_orphan = orphan_add(c, xattr_inum, orphan);
- if (IS_ERR(xattr_orphan))
+ if (IS_ERR(xattr_orphan)) {
+ kfree(xent);
return PTR_ERR(xattr_orphan);
+ }
+ kfree(pxent);
+ pxent = xent;
key_read(c, &xent->key, &key);
}
+ kfree(pxent);
return 0;
}
@@ -688,14 +693,14 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ino_key_init(c, &key1, inum);
err = ubifs_tnc_lookup(c, &key1, ino);
- if (err)
+ if (err && err != -ENOENT)
goto out_free;
/*
* Check whether an inode can really get deleted.
* linkat() with O_TMPFILE allows rebirth of an inode.
*/
- if (ino->nlink == 0) {
+ if (err == 0 && ino->nlink == 0) {
dbg_rcvry("deleting orphaned inode %lu",
(unsigned long)inum);
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 3d83be54c474..758efe557a19 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -83,7 +83,7 @@ struct udf_virtual_data {
struct udf_bitmap {
__u32 s_extPosition;
int s_nr_groups;
- struct buffer_head *s_block_bitmap[0];
+ struct buffer_head *s_block_bitmap[];
};
struct udf_part_map {
diff --git a/fs/unicode/.gitignore b/fs/unicode/.gitignore
index 0381e2221480..9b2467e77b2d 100644
--- a/fs/unicode/.gitignore
+++ b/fs/unicode/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
mkutf8data
utf8data.h
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 37df7c9eedb1..e39fdec8a0b0 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -314,8 +314,11 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
if (!pmd_present(_pmd))
goto out;
- if (pmd_trans_huge(_pmd))
+ if (pmd_trans_huge(_pmd)) {
+ if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
+ ret = true;
goto out;
+ }
/*
* the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
@@ -328,12 +331,38 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
*/
if (pte_none(*pte))
ret = true;
+ if (!pte_write(*pte) && (reason & VM_UFFD_WP))
+ ret = true;
pte_unmap(pte);
out:
return ret;
}
+/* Should pair with userfaultfd_signal_pending() */
+static inline long userfaultfd_get_blocking_state(unsigned int flags)
+{
+ if (flags & FAULT_FLAG_INTERRUPTIBLE)
+ return TASK_INTERRUPTIBLE;
+
+ if (flags & FAULT_FLAG_KILLABLE)
+ return TASK_KILLABLE;
+
+ return TASK_UNINTERRUPTIBLE;
+}
+
+/* Should pair with userfaultfd_get_blocking_state() */
+static inline bool userfaultfd_signal_pending(unsigned int flags)
+{
+ if (flags & FAULT_FLAG_INTERRUPTIBLE)
+ return signal_pending(current);
+
+ if (flags & FAULT_FLAG_KILLABLE)
+ return fatal_signal_pending(current);
+
+ return false;
+}
+
/*
* The locking rules involved in returning VM_FAULT_RETRY depending on
* FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
@@ -355,7 +384,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue uwq;
vm_fault_t ret = VM_FAULT_SIGBUS;
- bool must_wait, return_to_userland;
+ bool must_wait;
long blocking_state;
/*
@@ -462,11 +491,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
uwq.ctx = ctx;
uwq.waken = false;
- return_to_userland =
- (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
- (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
- blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
- TASK_KILLABLE;
+ blocking_state = userfaultfd_get_blocking_state(vmf->flags);
spin_lock_irq(&ctx->fault_pending_wqh.lock);
/*
@@ -492,8 +517,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
up_read(&mm->mmap_sem);
if (likely(must_wait && !READ_ONCE(ctx->released) &&
- (return_to_userland ? !signal_pending(current) :
- !fatal_signal_pending(current)))) {
+ !userfaultfd_signal_pending(vmf->flags))) {
wake_up_poll(&ctx->fd_wqh, EPOLLIN);
schedule();
ret |= VM_FAULT_MAJOR;
@@ -515,8 +539,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
set_current_state(blocking_state);
if (READ_ONCE(uwq.waken) ||
READ_ONCE(ctx->released) ||
- (return_to_userland ? signal_pending(current) :
- fatal_signal_pending(current)))
+ userfaultfd_signal_pending(vmf->flags))
break;
schedule();
}
@@ -524,30 +547,6 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
__set_current_state(TASK_RUNNING);
- if (return_to_userland) {
- if (signal_pending(current) &&
- !fatal_signal_pending(current)) {
- /*
- * If we got a SIGSTOP or SIGCONT and this is
- * a normal userland page fault, just let
- * userland return so the signal will be
- * handled and gdb debugging works. The page
- * fault code immediately after we return from
- * this function is going to release the
- * mmap_sem and it's not depending on it
- * (unlike gup would if we were not to return
- * VM_FAULT_RETRY).
- *
- * If a fatal signal is pending we still take
- * the streamlined VM_FAULT_RETRY failure path
- * and there's no need to retake the mmap_sem
- * in such case.
- */
- down_read(&mm->mmap_sem);
- ret = VM_FAULT_NOPAGE;
- }
- }
-
/*
* Here we race with the list_del; list_add in
* userfaultfd_ctx_read(), however because we don't ever run
@@ -1293,10 +1292,13 @@ static __always_inline int validate_range(struct mm_struct *mm,
return 0;
}
-static inline bool vma_can_userfault(struct vm_area_struct *vma)
+static inline bool vma_can_userfault(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
- return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
- vma_is_shmem(vma);
+ /* FIXME: add WP support to hugetlbfs and shmem */
+ return vma_is_anonymous(vma) ||
+ ((is_vm_hugetlb_page(vma) || vma_is_shmem(vma)) &&
+ !(vm_flags & VM_UFFD_WP));
}
static int userfaultfd_register(struct userfaultfd_ctx *ctx,
@@ -1328,15 +1330,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vm_flags = 0;
if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
vm_flags |= VM_UFFD_MISSING;
- if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
+ if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)
vm_flags |= VM_UFFD_WP;
- /*
- * FIXME: remove the below error constraint by
- * implementing the wprotect tracking mode.
- */
- ret = -EINVAL;
- goto out;
- }
ret = validate_range(mm, &uffdio_register.range.start,
uffdio_register.range.len);
@@ -1386,7 +1381,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
/* check not compatible vmas */
ret = -EINVAL;
- if (!vma_can_userfault(cur))
+ if (!vma_can_userfault(cur, vm_flags))
goto out_unlock;
/*
@@ -1414,6 +1409,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
if (end & (vma_hpagesize - 1))
goto out_unlock;
}
+ if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
+ goto out_unlock;
/*
* Check that this vma isn't already owned by a
@@ -1443,7 +1440,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
do {
cond_resched();
- BUG_ON(!vma_can_userfault(vma));
+ BUG_ON(!vma_can_userfault(vma, vm_flags));
BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
vma->vm_userfaultfd_ctx.ctx != ctx);
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
@@ -1498,14 +1495,24 @@ out_unlock:
up_write(&mm->mmap_sem);
mmput(mm);
if (!ret) {
+ __u64 ioctls_out;
+
+ ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
+ UFFD_API_RANGE_IOCTLS;
+
+ /*
+ * Declare the WP ioctl only if the WP mode is
+ * specified and all checks passed with the range
+ */
+ if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
+ ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
+
/*
* Now that we scanned all vmas we can already tell
* userland which ioctls methods are guaranteed to
* succeed on this range.
*/
- if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
- UFFD_API_RANGE_IOCTLS,
- &user_uffdio_register->ioctls))
+ if (put_user(ioctls_out, &user_uffdio_register->ioctls))
ret = -EFAULT;
}
out:
@@ -1581,7 +1588,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
* provides for more strict behavior to notice
* unregistration errors.
*/
- if (!vma_can_userfault(cur))
+ if (!vma_can_userfault(cur, cur->vm_flags))
goto out_unlock;
found = true;
@@ -1595,7 +1602,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
do {
cond_resched();
- BUG_ON(!vma_can_userfault(vma));
+ BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
/*
* Nothing to do: this vma is already registered into this
@@ -1730,11 +1737,12 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
ret = -EINVAL;
if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
goto out;
- if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
+ if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
goto out;
if (mmget_not_zero(ctx->mm)) {
ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
- uffdio_copy.len, &ctx->mmap_changing);
+ uffdio_copy.len, &ctx->mmap_changing,
+ uffdio_copy.mode);
mmput(ctx->mm);
} else {
return -ESRCH;
@@ -1807,6 +1815,53 @@ out:
return ret;
}
+static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
+ unsigned long arg)
+{
+ int ret;
+ struct uffdio_writeprotect uffdio_wp;
+ struct uffdio_writeprotect __user *user_uffdio_wp;
+ struct userfaultfd_wake_range range;
+ bool mode_wp, mode_dontwake;
+
+ if (READ_ONCE(ctx->mmap_changing))
+ return -EAGAIN;
+
+ user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
+
+ if (copy_from_user(&uffdio_wp, user_uffdio_wp,
+ sizeof(struct uffdio_writeprotect)))
+ return -EFAULT;
+
+ ret = validate_range(ctx->mm, &uffdio_wp.range.start,
+ uffdio_wp.range.len);
+ if (ret)
+ return ret;
+
+ if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
+ UFFDIO_WRITEPROTECT_MODE_WP))
+ return -EINVAL;
+
+ mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
+ mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
+
+ if (mode_wp && mode_dontwake)
+ return -EINVAL;
+
+ ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
+ uffdio_wp.range.len, mode_wp,
+ &ctx->mmap_changing);
+ if (ret)
+ return ret;
+
+ if (!mode_wp && !mode_dontwake) {
+ range.start = uffdio_wp.range.start;
+ range.len = uffdio_wp.range.len;
+ wake_userfault(ctx, &range);
+ }
+ return ret;
+}
+
static inline unsigned int uffd_ctx_features(__u64 user_features)
{
/*
@@ -1888,6 +1943,9 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
case UFFDIO_ZEROPAGE:
ret = userfaultfd_zeropage(ctx, arg);
break;
+ case UFFDIO_WRITEPROTECT:
+ ret = userfaultfd_writeprotect(ctx, arg);
+ break;
}
return ret;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index 90dd78f0eb27..e13265e65871 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -817,7 +817,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
if (len < sizeof(*new_xattr))
return NULL;
- new_xattr = kmalloc(len, GFP_KERNEL);
+ new_xattr = kvmalloc(len, GFP_KERNEL);
if (!new_xattr)
return NULL;
@@ -860,6 +860,7 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
* @value: value of the xattr. If %NULL, will remove the attribute.
* @size: size of the new xattr
* @flags: %XATTR_{CREATE|REPLACE}
+ * @removed_size: returns size of the removed xattr, -1 if none removed
*
* %XATTR_CREATE is set, the xattr shouldn't exist already; otherwise fails
* with -EEXIST. If %XATTR_REPLACE is set, the xattr should exist;
@@ -868,7 +869,8 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
* Returns 0 on success, -errno on failure.
*/
int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
- const void *value, size_t size, int flags)
+ const void *value, size_t size, int flags,
+ ssize_t *removed_size)
{
struct simple_xattr *xattr;
struct simple_xattr *new_xattr = NULL;
@@ -882,7 +884,7 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
new_xattr->name = kstrdup(name, GFP_KERNEL);
if (!new_xattr->name) {
- kfree(new_xattr);
+ kvfree(new_xattr);
return -ENOMEM;
}
}
@@ -895,8 +897,12 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
err = -EEXIST;
} else if (new_xattr) {
list_replace(&xattr->list, &new_xattr->list);
+ if (removed_size)
+ *removed_size = xattr->size;
} else {
list_del(&xattr->list);
+ if (removed_size)
+ *removed_size = xattr->size;
}
goto out;
}
@@ -908,11 +914,14 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
list_add(&new_xattr->list, &xattrs->head);
xattr = NULL;
}
+
+ if (removed_size)
+ *removed_size = -1;
out:
spin_unlock(&xattrs->lock);
if (xattr) {
kfree(xattr->name);
- kfree(xattr);
+ kvfree(xattr);
}
return err;
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index aceca2f9a3db..4f95df476181 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -26,6 +26,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_bmap.o \
xfs_bmap_btree.o \
xfs_btree.o \
+ xfs_btree_staging.o \
xfs_da_btree.o \
xfs_defer.o \
xfs_dir2.o \
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 08d6beb54f8c..9d84007a5c65 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -231,7 +231,7 @@ xfs_sbblock_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
- struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
+ struct xfs_dsb *dsb = bp->b_addr;
xfs_sb_to_disk(dsb, &mp->m_sb);
dsb->sb_inprogress = 1;
@@ -243,7 +243,7 @@ xfs_agfblock_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
+ struct xfs_agf *agf = bp->b_addr;
xfs_extlen_t tmpsize;
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
@@ -301,7 +301,7 @@ xfs_agflblock_init(
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
}
- agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
+ agfl_bno = xfs_buf_to_agfl_bno(bp);
for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
}
@@ -312,7 +312,7 @@ xfs_agiblock_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(bp);
+ struct xfs_agi *agi = bp->b_addr;
int bucket;
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
@@ -502,7 +502,7 @@ xfs_ag_extend_space(
if (error)
return error;
- agi = XFS_BUF_TO_AGI(bp);
+ agi = bp->b_addr;
be32_add_cpu(&agi->agi_length, len);
ASSERT(id->agno == mp->m_sb.sb_agcount - 1 ||
be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
@@ -515,7 +515,7 @@ xfs_ag_extend_space(
if (error)
return error;
- agf = XFS_BUF_TO_AGF(bp);
+ agf = bp->b_addr;
be32_add_cpu(&agf->agf_length, len);
ASSERT(agf->agf_length == agi->agi_length);
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
@@ -569,11 +569,11 @@ xfs_ag_get_geometry(
memset(ageo, 0, sizeof(*ageo));
ageo->ag_number = agno;
- agi = XFS_BUF_TO_AGI(agi_bp);
+ agi = agi_bp->b_addr;
ageo->ag_icount = be32_to_cpu(agi->agi_count);
ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
- agf = XFS_BUF_TO_AGF(agf_bp);
+ agf = agf_bp->b_addr;
ageo->ag_length = be32_to_cpu(agf->agf_length);
freeblks = pag->pagf_freeblks +
pag->pagf_flcount +
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index d8053bc96c4d..203e74fa64aa 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -151,7 +151,7 @@ xfs_alloc_lookup_eq(
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
- cur->bc_private.a.priv.abt.active = (*stat == 1);
+ cur->bc_ag.abt.active = (*stat == 1);
return error;
}
@@ -171,7 +171,7 @@ xfs_alloc_lookup_ge(
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
- cur->bc_private.a.priv.abt.active = (*stat == 1);
+ cur->bc_ag.abt.active = (*stat == 1);
return error;
}
@@ -190,7 +190,7 @@ xfs_alloc_lookup_le(
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
- cur->bc_private.a.priv.abt.active = (*stat == 1);
+ cur->bc_ag.abt.active = (*stat == 1);
return error;
}
@@ -198,7 +198,7 @@ static inline bool
xfs_alloc_cur_active(
struct xfs_btree_cur *cur)
{
- return cur && cur->bc_private.a.priv.abt.active;
+ return cur && cur->bc_ag.abt.active;
}
/*
@@ -230,7 +230,7 @@ xfs_alloc_get_rec(
int *stat) /* output: success/failure */
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_private.a.agno;
+ xfs_agnumber_t agno = cur->bc_ag.agno;
union xfs_btree_rec *rec;
int error;
@@ -589,6 +589,7 @@ xfs_agfl_verify(
{
struct xfs_mount *mp = bp->b_mount;
struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
+ __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
int i;
/*
@@ -614,8 +615,8 @@ xfs_agfl_verify(
return __this_address;
for (i = 0; i < xfs_agfl_size(mp); i++) {
- if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
- be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
+ if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
+ be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
return __this_address;
}
@@ -713,7 +714,7 @@ xfs_alloc_update_counters(
struct xfs_buf *agbp,
long len)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_agf *agf = agbp->b_addr;
pag->pagf_freeblks += len;
be32_add_cpu(&agf->agf_freeblks, len);
@@ -721,7 +722,7 @@ xfs_alloc_update_counters(
xfs_trans_agblocks_delta(tp, len);
if (unlikely(be32_to_cpu(agf->agf_freeblks) >
be32_to_cpu(agf->agf_length))) {
- xfs_buf_corruption_error(agbp);
+ xfs_buf_mark_corrupt(agbp);
return -EFSCORRUPTED;
}
@@ -907,7 +908,7 @@ xfs_alloc_cur_check(
deactivate = true;
out:
if (deactivate)
- cur->bc_private.a.priv.abt.active = false;
+ cur->bc_ag.abt.active = false;
trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
*new);
return 0;
@@ -922,13 +923,13 @@ xfs_alloc_cur_finish(
struct xfs_alloc_arg *args,
struct xfs_alloc_cur *acur)
{
+ struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
int error;
ASSERT(acur->cnt && acur->bnolt);
ASSERT(acur->bno >= acur->rec_bno);
ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
- ASSERT(acur->rec_bno + acur->rec_len <=
- be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+ ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
acur->rec_len, acur->bno, acur->len, 0);
@@ -1026,6 +1027,7 @@ xfs_alloc_ag_vextent_small(
xfs_extlen_t *flenp, /* result length */
int *stat) /* status: 0-freelist, 1-normal/none */
{
+ struct xfs_agf *agf = args->agbp->b_addr;
int error = 0;
xfs_agblock_t fbno = NULLAGBLOCK;
xfs_extlen_t flen = 0;
@@ -1054,8 +1056,7 @@ xfs_alloc_ag_vextent_small(
if (args->minlen != 1 || args->alignment != 1 ||
args->resv == XFS_AG_RESV_AGFL ||
- (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) <=
- args->minleft))
+ be32_to_cpu(agf->agf_flcount) <= args->minleft)
goto out;
error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
@@ -1079,9 +1080,7 @@ xfs_alloc_ag_vextent_small(
}
*fbnop = args->agbno = fbno;
*flenp = args->len = 1;
- if (XFS_IS_CORRUPT(args->mp,
- fbno >= be32_to_cpu(
- XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
+ if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
error = -EFSCORRUPTED;
goto error;
}
@@ -1203,6 +1202,7 @@ STATIC int /* error */
xfs_alloc_ag_vextent_exact(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
+ struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
int error;
@@ -1281,8 +1281,7 @@ xfs_alloc_ag_vextent_exact(
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT);
- ASSERT(args->agbno + args->len <=
- be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+ ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
args->len, XFSA_FIXUP_BNO_OK);
if (error) {
@@ -1353,7 +1352,7 @@ xfs_alloc_walk_iter(
if (error)
return error;
if (i == 0)
- cur->bc_private.a.priv.abt.active = false;
+ cur->bc_ag.abt.active = false;
if (count > 0)
count--;
@@ -1468,7 +1467,7 @@ xfs_alloc_ag_vextent_locality(
if (error)
return error;
if (i) {
- acur->cnt->bc_private.a.priv.abt.active = true;
+ acur->cnt->bc_ag.abt.active = true;
fbcur = acur->cnt;
fbinc = false;
}
@@ -1515,7 +1514,7 @@ xfs_alloc_ag_vextent_lastblock(
* maxlen, go to the start of this block, and skip all those smaller
* than minlen.
*/
- if (len || args->alignment > 1) {
+ if (*len || args->alignment > 1) {
acur->cnt->bc_ptrs[0] = 1;
do {
error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
@@ -1661,6 +1660,7 @@ STATIC int /* error */
xfs_alloc_ag_vextent_size(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
+ struct xfs_agf *agf = args->agbp->b_addr;
xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
int error; /* error result */
@@ -1851,8 +1851,7 @@ restart:
args->agbno = rbno;
if (XFS_IS_CORRUPT(args->mp,
args->agbno + args->len >
- be32_to_cpu(
- XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
+ be32_to_cpu(agf->agf_length))) {
error = -EFSCORRUPTED;
goto error0;
}
@@ -2424,7 +2423,7 @@ xfs_agfl_reset(
struct xfs_perag *pag)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_agf *agf = agbp->b_addr;
ASSERT(pag->pagf_agflreset);
trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
@@ -2655,7 +2654,7 @@ xfs_alloc_get_freelist(
xfs_agblock_t *bnop, /* block address retrieved from freelist */
int btreeblk) /* destination is a AGF btree */
{
- xfs_agf_t *agf; /* a.g. freespace structure */
+ struct xfs_agf *agf = agbp->b_addr;
xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
xfs_agblock_t bno; /* block number returned */
__be32 *agfl_bno;
@@ -2667,7 +2666,6 @@ xfs_alloc_get_freelist(
/*
* Freelist is empty, give up.
*/
- agf = XFS_BUF_TO_AGF(agbp);
if (!agf->agf_flcount) {
*bnop = NULLAGBLOCK;
return 0;
@@ -2684,7 +2682,7 @@ xfs_alloc_get_freelist(
/*
* Get the block number and update the data structures.
*/
- agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
+ agfl_bno = xfs_buf_to_agfl_bno(agflbp);
bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
be32_add_cpu(&agf->agf_flfirst, 1);
xfs_trans_brelse(tp, agflbp);
@@ -2745,7 +2743,7 @@ xfs_alloc_log_agf(
sizeof(xfs_agf_t)
};
- trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
+ trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
@@ -2783,18 +2781,15 @@ xfs_alloc_put_freelist(
xfs_agblock_t bno, /* block being freed */
int btreeblk) /* block came from a AGF btree */
{
- xfs_agf_t *agf; /* a.g. freespace structure */
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_agf *agf = agbp->b_addr;
__be32 *blockp;/* pointer to array entry */
int error;
int logflags;
- xfs_mount_t *mp; /* mount structure */
xfs_perag_t *pag; /* per allocation group data */
__be32 *agfl_bno;
int startoff;
- agf = XFS_BUF_TO_AGF(agbp);
- mp = tp->t_mountp;
-
if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
be32_to_cpu(agf->agf_seqno), &agflbp)))
return error;
@@ -2820,7 +2815,7 @@ xfs_alloc_put_freelist(
ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
- agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
+ agfl_bno = xfs_buf_to_agfl_bno(agflbp);
blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
*blockp = cpu_to_be32(bno);
startoff = (char *)blockp - (char *)agflbp->b_addr;
@@ -2838,13 +2833,12 @@ xfs_agf_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_mount;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
+ struct xfs_agf *agf = bp->b_addr;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
- if (!xfs_log_check_lsn(mp,
- be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
+ if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
return __this_address;
}
@@ -2858,6 +2852,13 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
return __this_address;
+ if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
+ return __this_address;
+
+ if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
+ be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
+ return __this_address;
+
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
@@ -2869,6 +2870,10 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
return __this_address;
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
+ be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
+ return __this_address;
+
/*
* during growfs operations, the perag is not fully initialised,
* so we can't use it for any useful checking. growfs ensures we can't
@@ -2883,6 +2888,11 @@ xfs_agf_verify(
return __this_address;
if (xfs_sb_version_hasreflink(&mp->m_sb) &&
+ be32_to_cpu(agf->agf_refcount_blocks) >
+ be32_to_cpu(agf->agf_length))
+ return __this_address;
+
+ if (xfs_sb_version_hasreflink(&mp->m_sb) &&
(be32_to_cpu(agf->agf_refcount_level) < 1 ||
be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
return __this_address;
@@ -2914,6 +2924,7 @@ xfs_agf_write_verify(
{
struct xfs_mount *mp = bp->b_mount;
struct xfs_buf_log_item *bip = bp->b_log_item;
+ struct xfs_agf *agf = bp->b_addr;
xfs_failaddr_t fa;
fa = xfs_agf_verify(bp);
@@ -2926,7 +2937,7 @@ xfs_agf_write_verify(
return;
if (bip)
- XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
}
@@ -2994,7 +3005,7 @@ xfs_alloc_read_agf(
return error;
ASSERT(!(*bpp)->b_error);
- agf = XFS_BUF_TO_AGF(*bpp);
+ agf = (*bpp)->b_addr;
pag = xfs_perag_get(mp, agno);
if (!pag->pagf_init) {
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
@@ -3275,6 +3286,7 @@ __xfs_free_extent(
struct xfs_buf *agbp;
xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
+ struct xfs_agf *agf;
int error;
unsigned int busy_flags = 0;
@@ -3288,6 +3300,7 @@ __xfs_free_extent(
error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
if (error)
return error;
+ agf = agbp->b_addr;
if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
error = -EFSCORRUPTED;
@@ -3295,9 +3308,7 @@ __xfs_free_extent(
}
/* validate the extent size is legal now we have the agf locked */
- if (XFS_IS_CORRUPT(mp,
- agbno + len >
- be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length))) {
+ if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
error = -EFSCORRUPTED;
goto err;
}
@@ -3408,7 +3419,7 @@ xfs_agfl_walk(
unsigned int i;
int error;
- agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
+ agfl_bno = xfs_buf_to_agfl_bno(agflbp);
i = be32_to_cpu(agf->agf_flfirst);
/* Nothing to walk in an empty AGFL. */
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 7380fbe4a3ff..a851bf77f17b 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -236,4 +236,13 @@ typedef int (*xfs_agfl_walk_fn)(struct xfs_mount *mp, xfs_agblock_t bno,
int xfs_agfl_walk(struct xfs_mount *mp, struct xfs_agf *agf,
struct xfs_buf *agflbp, xfs_agfl_walk_fn walk_fn, void *priv);
+static inline __be32 *
+xfs_buf_to_agfl_bno(
+ struct xfs_buf *bp)
+{
+ if (xfs_sb_version_hascrc(&bp->b_mount->m_sb))
+ return bp->b_addr + sizeof(struct xfs_agfl);
+ return bp->b_addr;
+}
+
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 279694d73e4e..60c453cb3ee3 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -12,6 +12,7 @@
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
@@ -25,7 +26,7 @@ xfs_allocbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agbp, cur->bc_private.a.agno,
+ cur->bc_ag.agbp, cur->bc_ag.agno,
cur->bc_btnum);
}
@@ -35,8 +36,8 @@ xfs_allocbt_set_root(
union xfs_btree_ptr *ptr,
int inc)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
int btnum = cur->bc_btnum;
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
@@ -62,7 +63,7 @@ xfs_allocbt_alloc_block(
xfs_agblock_t bno;
/* Allocate the new block from the freelist. If we can't, give up. */
- error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+ error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
&bno, 1);
if (error)
return error;
@@ -72,7 +73,7 @@ xfs_allocbt_alloc_block(
return 0;
}
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
xfs_trans_agbtree_delta(cur->bc_tp, 1);
new->s = cpu_to_be32(bno);
@@ -86,8 +87,8 @@ xfs_allocbt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
xfs_agblock_t bno;
int error;
@@ -113,7 +114,7 @@ xfs_allocbt_update_lastrec(
int ptr,
int reason)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+ struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
struct xfs_perag *pag;
__be32 len;
@@ -162,7 +163,7 @@ xfs_allocbt_update_lastrec(
pag = xfs_perag_get(cur->bc_mp, seqno);
pag->pagf_longest = be32_to_cpu(len);
xfs_perag_put(pag);
- xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
+ xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
}
STATIC int
@@ -226,9 +227,9 @@ xfs_allocbt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+ struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
}
@@ -471,18 +472,14 @@ static const struct xfs_btree_ops xfs_cntbt_ops = {
.recs_inorder = xfs_cntbt_recs_inorder,
};
-/*
- * Allocate a new allocation btree cursor.
- */
-struct xfs_btree_cur * /* new alloc btree cursor */
-xfs_allocbt_init_cursor(
- struct xfs_mount *mp, /* file system mount point */
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* buffer for agf structure */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_btnum_t btnum) /* btree identifier */
+/* Allocate most of a new allocation btree cursor. */
+STATIC struct xfs_btree_cur *
+xfs_allocbt_init_common(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ xfs_btnum_t btnum)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
@@ -495,19 +492,16 @@ xfs_allocbt_init_cursor(
cur->bc_blocklog = mp->m_sb.sb_blocklog;
if (btnum == XFS_BTNUM_CNT) {
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
cur->bc_ops = &xfs_cntbt_ops;
- cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
} else {
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
cur->bc_ops = &xfs_bnobt_ops;
- cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
}
- cur->bc_private.a.agbp = agbp;
- cur->bc_private.a.agno = agno;
- cur->bc_private.a.priv.abt.active = false;
+ cur->bc_ag.agno = agno;
+ cur->bc_ag.abt.active = false;
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
@@ -516,6 +510,73 @@ xfs_allocbt_init_cursor(
}
/*
+ * Allocate a new allocation btree cursor.
+ */
+struct xfs_btree_cur * /* new alloc btree cursor */
+xfs_allocbt_init_cursor(
+ struct xfs_mount *mp, /* file system mount point */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_buf *agbp, /* buffer for agf structure */
+ xfs_agnumber_t agno, /* allocation group number */
+ xfs_btnum_t btnum) /* btree identifier */
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_allocbt_init_common(mp, tp, agno, btnum);
+ if (btnum == XFS_BTNUM_CNT)
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
+ else
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
+
+ cur->bc_ag.agbp = agbp;
+
+ return cur;
+}
+
+/* Create a free space btree cursor with a fake root for staging. */
+struct xfs_btree_cur *
+xfs_allocbt_stage_cursor(
+ struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake,
+ xfs_agnumber_t agno,
+ xfs_btnum_t btnum)
+{
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_allocbt_init_common(mp, NULL, agno, btnum);
+ xfs_btree_stage_afakeroot(cur, afake);
+ return cur;
+}
+
+/*
+ * Install a new free space btree root. Caller is responsible for invalidating
+ * and freeing the old btree blocks.
+ */
+void
+xfs_allocbt_commit_staged_btree(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xbtree_afakeroot *afake = cur->bc_ag.afake;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
+ agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
+
+ if (cur->bc_btnum == XFS_BTNUM_BNO) {
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
+ } else {
+ cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
+ }
+}
+
+/*
* Calculate number of records in an alloc btree block.
*/
int
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.h b/fs/xfs/libxfs/xfs_alloc_btree.h
index c9305ebb69f6..047f09f0be3c 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.h
+++ b/fs/xfs/libxfs/xfs_alloc_btree.h
@@ -13,6 +13,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xbtree_afakeroot;
/*
* Btree block header size depends on a superblock flag.
@@ -48,8 +49,14 @@ struct xfs_mount;
extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *,
xfs_agnumber_t, xfs_btnum_t);
+struct xfs_btree_cur *xfs_allocbt_stage_cursor(struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
+ xfs_btnum_t btnum);
extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
extern xfs_extlen_t xfs_allocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
+void xfs_allocbt_commit_staged_btree(struct xfs_btree_cur *cur,
+ struct xfs_trans *tp, struct xfs_buf *agbp);
+
#endif /* __XFS_ALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index e6149720ce02..e4fe3dca9883 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -56,33 +56,6 @@ STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
-
-STATIC int
-xfs_attr_args_init(
- struct xfs_da_args *args,
- struct xfs_inode *dp,
- const unsigned char *name,
- size_t namelen,
- int flags)
-{
-
- if (!name)
- return -EINVAL;
-
- memset(args, 0, sizeof(*args));
- args->geo = dp->i_mount->m_attr_geo;
- args->whichfork = XFS_ATTR_FORK;
- args->dp = dp;
- args->flags = flags;
- args->name = name;
- args->namelen = namelen;
- if (args->namelen >= MAXNAMELEN)
- return -EFAULT; /* match IRIX behaviour */
-
- args->hashval = xfs_da_hashname(args->name, args->namelen);
- return 0;
-}
-
int
xfs_inode_hasattr(
struct xfs_inode *ip)
@@ -104,85 +77,60 @@ xfs_inode_hasattr(
*/
int
xfs_attr_get_ilocked(
- struct xfs_inode *ip,
struct xfs_da_args *args)
{
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+ ASSERT(xfs_isilocked(args->dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
- if (!xfs_inode_hasattr(ip))
+ if (!xfs_inode_hasattr(args->dp))
return -ENOATTR;
- else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
+
+ if (args->dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
return xfs_attr_shortform_getvalue(args);
- else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK))
+ if (xfs_bmap_one_block(args->dp, XFS_ATTR_FORK))
return xfs_attr_leaf_get(args);
- else
- return xfs_attr_node_get(args);
+ return xfs_attr_node_get(args);
}
/*
* Retrieve an extended attribute by name, and its value if requested.
*
- * If ATTR_KERNOVAL is set in @flags, then the caller does not want the value,
- * just an indication whether the attribute exists and the size of the value if
- * it exists. The size is returned in @valuelenp,
+ * If args->valuelen is zero, then the caller does not want the value, just an
+ * indication whether the attribute exists and the size of the value if it
+ * exists. The size is returned in args.valuelen.
*
- * If the attribute is found, but exceeds the size limit set by the caller in
- * @valuelenp, return -ERANGE with the size of the attribute that was found in
- * @valuelenp.
+ * If args->value is NULL but args->valuelen is non-zero, allocate the buffer
+ * for the value after existence of the attribute has been determined. The
+ * caller always has to free args->value if it is set, no matter if this
+ * function was successful or not.
*
- * If ATTR_ALLOC is set in @flags, allocate the buffer for the value after
- * existence of the attribute has been determined. On success, return that
- * buffer to the caller and leave them to free it. On failure, free any
- * allocated buffer and ensure the buffer pointer returned to the caller is
- * null.
+ * If the attribute is found, but exceeds the size limit set by the caller in
+ * args->valuelen, return -ERANGE with the size of the attribute that was found
+ * in args->valuelen.
*/
int
xfs_attr_get(
- struct xfs_inode *ip,
- const unsigned char *name,
- size_t namelen,
- unsigned char **value,
- int *valuelenp,
- int flags)
+ struct xfs_da_args *args)
{
- struct xfs_da_args args;
uint lock_mode;
int error;
- ASSERT((flags & (ATTR_ALLOC | ATTR_KERNOVAL)) || *value);
-
- XFS_STATS_INC(ip->i_mount, xs_attr_get);
+ XFS_STATS_INC(args->dp->i_mount, xs_attr_get);
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ if (XFS_FORCED_SHUTDOWN(args->dp->i_mount))
return -EIO;
- error = xfs_attr_args_init(&args, ip, name, namelen, flags);
- if (error)
- return error;
+ args->geo = args->dp->i_mount->m_attr_geo;
+ args->whichfork = XFS_ATTR_FORK;
+ args->hashval = xfs_da_hashname(args->name, args->namelen);
/* Entirely possible to look up a name which doesn't exist */
- args.op_flags = XFS_DA_OP_OKNOENT;
- if (flags & ATTR_ALLOC)
- args.op_flags |= XFS_DA_OP_ALLOCVAL;
- else
- args.value = *value;
- args.valuelen = *valuelenp;
+ args->op_flags = XFS_DA_OP_OKNOENT;
- lock_mode = xfs_ilock_attr_map_shared(ip);
- error = xfs_attr_get_ilocked(ip, &args);
- xfs_iunlock(ip, lock_mode);
- *valuelenp = args.valuelen;
+ lock_mode = xfs_ilock_attr_map_shared(args->dp);
+ error = xfs_attr_get_ilocked(args);
+ xfs_iunlock(args->dp, lock_mode);
- /* on error, we have to clean up allocated value buffers */
- if (error) {
- if (flags & ATTR_ALLOC) {
- kmem_free(args.value);
- *value = NULL;
- }
- return error;
- }
- *value = args.value;
- return 0;
+ return error;
}
/*
@@ -238,7 +186,7 @@ xfs_attr_try_sf_addname(
* Commit the shortform mods, and we're done.
* NOTE: this is also the error path (EEXIST, etc).
*/
- if (!error && (args->flags & ATTR_KERNOTIME) == 0)
+ if (!error && !(args->op_flags & XFS_DA_OP_NOTIME))
xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
if (mp->m_flags & XFS_MOUNT_WSYNC)
@@ -336,188 +284,127 @@ xfs_attr_remove_args(
return error;
}
+/*
+ * Note: If args->value is NULL the attribute will be removed, just like the
+ * Linux ->setattr API.
+ */
int
xfs_attr_set(
- struct xfs_inode *dp,
- const unsigned char *name,
- size_t namelen,
- unsigned char *value,
- int valuelen,
- int flags)
+ struct xfs_da_args *args)
{
+ struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
- struct xfs_da_args args;
struct xfs_trans_res tres;
- int rsvd = (flags & ATTR_ROOT) != 0;
+ bool rsvd = (args->attr_filter & XFS_ATTR_ROOT);
int error, local;
-
- XFS_STATS_INC(mp, xs_attr_set);
+ unsigned int total;
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO;
- error = xfs_attr_args_init(&args, dp, name, namelen, flags);
- if (error)
- return error;
-
- args.value = value;
- args.valuelen = valuelen;
- args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
- args.total = xfs_attr_calc_size(&args, &local);
-
error = xfs_qm_dqattach(dp);
if (error)
return error;
- /*
- * If the inode doesn't have an attribute fork, add one.
- * (inode must not be locked when we call this routine)
- */
- if (XFS_IFORK_Q(dp) == 0) {
- int sf_size = sizeof(xfs_attr_sf_hdr_t) +
- XFS_ATTR_SF_ENTSIZE_BYNAME(args.namelen, valuelen);
-
- error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
- if (error)
- return error;
- }
-
- tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
- M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
- tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
- tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
-
- /*
- * Root fork attributes can use reserved data blocks for this
- * operation if necessary
- */
- error = xfs_trans_alloc(mp, &tres, args.total, 0,
- rsvd ? XFS_TRANS_RESERVE : 0, &args.trans);
- if (error)
- return error;
-
- xfs_ilock(dp, XFS_ILOCK_EXCL);
- error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
- rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
- XFS_QMOPT_RES_REGBLKS);
- if (error)
- goto out_trans_cancel;
-
- xfs_trans_ijoin(args.trans, dp, 0);
- error = xfs_attr_set_args(&args);
- if (error)
- goto out_trans_cancel;
- if (!args.trans) {
- /* shortform attribute has already been committed */
- goto out_unlock;
- }
-
- /*
- * If this is a synchronous mount, make sure that the
- * transaction goes to disk before returning to the user.
- */
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(args.trans);
-
- if ((flags & ATTR_KERNOTIME) == 0)
- xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
+ args->geo = mp->m_attr_geo;
+ args->whichfork = XFS_ATTR_FORK;
+ args->hashval = xfs_da_hashname(args->name, args->namelen);
/*
- * Commit the last in the sequence of transactions.
+ * We have no control over the attribute names that userspace passes us
+ * to remove, so we have to allow the name lookup prior to attribute
+ * removal to fail as well.
*/
- xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
- error = xfs_trans_commit(args.trans);
-out_unlock:
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- return error;
+ args->op_flags = XFS_DA_OP_OKNOENT;
-out_trans_cancel:
- if (args.trans)
- xfs_trans_cancel(args.trans);
- goto out_unlock;
-}
+ if (args->value) {
+ XFS_STATS_INC(mp, xs_attr_set);
-/*
- * Generic handler routine to remove a name from an attribute list.
- * Transitions attribute list from Btree to shortform as necessary.
- */
-int
-xfs_attr_remove(
- struct xfs_inode *dp,
- const unsigned char *name,
- size_t namelen,
- int flags)
-{
- struct xfs_mount *mp = dp->i_mount;
- struct xfs_da_args args;
- int error;
-
- XFS_STATS_INC(mp, xs_attr_remove);
+ args->op_flags |= XFS_DA_OP_ADDNAME;
+ args->total = xfs_attr_calc_size(args, &local);
- if (XFS_FORCED_SHUTDOWN(dp->i_mount))
- return -EIO;
+ /*
+ * If the inode doesn't have an attribute fork, add one.
+ * (inode must not be locked when we call this routine)
+ */
+ if (XFS_IFORK_Q(dp) == 0) {
+ int sf_size = sizeof(struct xfs_attr_sf_hdr) +
+ XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen,
+ args->valuelen);
- error = xfs_attr_args_init(&args, dp, name, namelen, flags);
- if (error)
- return error;
+ error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
+ if (error)
+ return error;
+ }
- /*
- * we have no control over the attribute names that userspace passes us
- * to remove, so we have to allow the name lookup prior to attribute
- * removal to fail.
- */
- args.op_flags = XFS_DA_OP_OKNOENT;
+ tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
+ M_RES(mp)->tr_attrsetrt.tr_logres *
+ args->total;
+ tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
+ tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+ total = args->total;
+ } else {
+ XFS_STATS_INC(mp, xs_attr_remove);
- error = xfs_qm_dqattach(dp);
- if (error)
- return error;
+ tres = M_RES(mp)->tr_attrrm;
+ total = XFS_ATTRRM_SPACE_RES(mp);
+ }
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrrm,
- XFS_ATTRRM_SPACE_RES(mp), 0,
- (flags & ATTR_ROOT) ? XFS_TRANS_RESERVE : 0,
- &args.trans);
+ error = xfs_trans_alloc(mp, &tres, total, 0,
+ rsvd ? XFS_TRANS_RESERVE : 0, &args->trans);
if (error)
return error;
xfs_ilock(dp, XFS_ILOCK_EXCL);
- /*
- * No need to make quota reservations here. We expect to release some
- * blocks not allocate in the common case.
- */
- xfs_trans_ijoin(args.trans, dp, 0);
-
- error = xfs_attr_remove_args(&args);
- if (error)
- goto out;
+ xfs_trans_ijoin(args->trans, dp, 0);
+ if (args->value) {
+ unsigned int quota_flags = XFS_QMOPT_RES_REGBLKS;
+
+ if (rsvd)
+ quota_flags |= XFS_QMOPT_FORCE_RES;
+ error = xfs_trans_reserve_quota_nblks(args->trans, dp,
+ args->total, 0, quota_flags);
+ if (error)
+ goto out_trans_cancel;
+ error = xfs_attr_set_args(args);
+ if (error)
+ goto out_trans_cancel;
+ /* shortform attribute has already been committed */
+ if (!args->trans)
+ goto out_unlock;
+ } else {
+ error = xfs_attr_remove_args(args);
+ if (error)
+ goto out_trans_cancel;
+ }
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(args.trans);
+ xfs_trans_set_sync(args->trans);
- if ((flags & ATTR_KERNOTIME) == 0)
- xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
+ if (!(args->op_flags & XFS_DA_OP_NOTIME))
+ xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
/*
* Commit the last in the sequence of transactions.
*/
- xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
- error = xfs_trans_commit(args.trans);
+ xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
+ error = xfs_trans_commit(args->trans);
+out_unlock:
xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
return error;
-out:
- if (args.trans)
- xfs_trans_cancel(args.trans);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- return error;
+out_trans_cancel:
+ if (args->trans)
+ xfs_trans_cancel(args->trans);
+ goto out_unlock;
}
/*========================================================================
@@ -536,10 +423,10 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
trace_xfs_attr_sf_addname(args);
retval = xfs_attr_shortform_lookup(args);
- if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
+ if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
return retval;
- } else if (retval == -EEXIST) {
- if (args->flags & ATTR_CREATE)
+ if (retval == -EEXIST) {
+ if (args->attr_flags & XATTR_CREATE)
return retval;
retval = xfs_attr_shortform_remove(args);
if (retval)
@@ -549,7 +436,7 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
* that the leaf format add routine won't trip over the attr
* not being around.
*/
- args->flags &= ~ATTR_REPLACE;
+ args->attr_flags &= ~XATTR_REPLACE;
}
if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
@@ -602,14 +489,11 @@ xfs_attr_leaf_addname(
* the given flags produce an error or call for an atomic rename.
*/
retval = xfs_attr3_leaf_lookup_int(bp, args);
- if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
- xfs_trans_brelse(args->trans, bp);
- return retval;
- } else if (retval == -EEXIST) {
- if (args->flags & ATTR_CREATE) { /* pure create op */
- xfs_trans_brelse(args->trans, bp);
- return retval;
- }
+ if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
+ goto out_brelse;
+ if (retval == -EEXIST) {
+ if (args->attr_flags & XATTR_CREATE)
+ goto out_brelse;
trace_xfs_attr_leaf_replace(args);
@@ -750,6 +634,9 @@ xfs_attr_leaf_addname(
error = xfs_attr3_leaf_clearflag(args);
}
return error;
+out_brelse:
+ xfs_trans_brelse(args->trans, bp);
+ return retval;
}
/*
@@ -876,10 +763,10 @@ restart:
goto out;
blk = &state->path.blk[ state->path.active-1 ];
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
- if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
+ if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
goto out;
- } else if (retval == -EEXIST) {
- if (args->flags & ATTR_CREATE)
+ if (retval == -EEXIST) {
+ if (args->attr_flags & XATTR_CREATE)
goto out;
trace_xfs_attr_node_replace(args);
@@ -1011,7 +898,7 @@ restart:
* The INCOMPLETE flag means that we will find the "old"
* attr, not the "new" one.
*/
- args->op_flags |= XFS_DA_OP_INCOMPLETE;
+ args->attr_filter |= XFS_ATTR_INCOMPLETE;
state = xfs_da_state_alloc();
state->args = args;
state->mp = mp;
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 4243b2272642..0d2d05908537 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -21,39 +21,6 @@ struct xfs_attr_list_context;
* as possible so as to fit into the literal area of the inode.
*/
-/*========================================================================
- * External interfaces
- *========================================================================*/
-
-
-#define ATTR_DONTFOLLOW 0x0001 /* -- ignored, from IRIX -- */
-#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
-#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
-#define ATTR_SECURE 0x0008 /* use attrs in security namespace */
-#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */
-#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */
-
-#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
-#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
-
-#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
-#define ATTR_ALLOC 0x8000 /* [kernel] allocate xattr buffer on demand */
-
-#define ATTR_KERNEL_FLAGS \
- (ATTR_KERNOTIME | ATTR_KERNOVAL | ATTR_INCOMPLETE | ATTR_ALLOC)
-
-#define XFS_ATTR_FLAGS \
- { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
- { ATTR_ROOT, "ROOT" }, \
- { ATTR_TRUST, "TRUST" }, \
- { ATTR_SECURE, "SECURE" }, \
- { ATTR_CREATE, "CREATE" }, \
- { ATTR_REPLACE, "REPLACE" }, \
- { ATTR_KERNOTIME, "KERNOTIME" }, \
- { ATTR_KERNOVAL, "KERNOVAL" }, \
- { ATTR_INCOMPLETE, "INCOMPLETE" }, \
- { ATTR_ALLOC, "ALLOC" }
-
/*
* The maximum size (into the kernel or returned from the kernel) of an
* attribute value or the buffer used for an attr_list() call. Larger
@@ -62,45 +29,16 @@ struct xfs_attr_list_context;
#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */
/*
- * Define how lists of attribute names are returned to the user from
- * the attr_list() call. A large, 32bit aligned, buffer is passed in
- * along with its size. We put an array of offsets at the top that each
- * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom.
- */
-typedef struct attrlist {
- __s32 al_count; /* number of entries in attrlist */
- __s32 al_more; /* T/F: more attrs (do call again) */
- __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */
-} attrlist_t;
-
-/*
- * Show the interesting info about one attribute. This is what the
- * al_offset[i] entry points to.
- */
-typedef struct attrlist_ent { /* data from attr_list() */
- __u32 a_valuelen; /* number bytes in value of attr */
- char a_name[1]; /* attr name (NULL terminated) */
-} attrlist_ent_t;
-
-/*
- * Given a pointer to the (char*) buffer containing the attr_list() result,
- * and an index, return a pointer to the indicated attribute in the buffer.
- */
-#define ATTR_ENTRY(buffer, index) \
- ((attrlist_ent_t *) \
- &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ])
-
-/*
* Kernel-internal version of the attrlist cursor.
*/
-typedef struct attrlist_cursor_kern {
+struct xfs_attrlist_cursor_kern {
__u32 hashval; /* hash value of next entry to add */
__u32 blkno; /* block containing entry (suggestion) */
__u32 offset; /* offset in list of equal-hashvals */
__u16 pad1; /* padding to match user-level */
__u8 pad2; /* padding to match user-level */
__u8 initted; /* T/F: cursor has been initialized */
-} attrlist_cursor_kern_t;
+};
/*========================================================================
@@ -112,27 +50,28 @@ typedef struct attrlist_cursor_kern {
typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
unsigned char *, int, int);
-typedef struct xfs_attr_list_context {
- struct xfs_trans *tp;
- struct xfs_inode *dp; /* inode */
- struct attrlist_cursor_kern *cursor; /* position in list */
- char *alist; /* output buffer */
+struct xfs_attr_list_context {
+ struct xfs_trans *tp;
+ struct xfs_inode *dp; /* inode */
+ struct xfs_attrlist_cursor_kern cursor; /* position in list */
+ void *buffer; /* output buffer */
/*
* Abort attribute list iteration if non-zero. Can be used to pass
* error values to the xfs_attr_list caller.
*/
- int seen_enough;
+ int seen_enough;
+ bool allow_incomplete;
- ssize_t count; /* num used entries */
- int dupcnt; /* count dup hashvals seen */
- int bufsize; /* total buffer size */
- int firstu; /* first used byte in buffer */
- int flags; /* from VOP call */
- int resynch; /* T/F: resynch with cursor */
- put_listent_func_t put_listent; /* list output fmt function */
- int index; /* index into output buffer */
-} xfs_attr_list_context_t;
+ ssize_t count; /* num used entries */
+ int dupcnt; /* count dup hashvals seen */
+ int bufsize; /* total buffer size */
+ int firstu; /* first used byte in buffer */
+ unsigned int attr_filter; /* XFS_ATTR_{ROOT,SECURE} */
+ int resynch; /* T/F: resynch with cursor */
+ put_listent_func_t put_listent; /* list output fmt function */
+ int index; /* index into output buffer */
+};
/*========================================================================
@@ -143,21 +82,14 @@ typedef struct xfs_attr_list_context {
* Overall external interface routines.
*/
int xfs_attr_inactive(struct xfs_inode *dp);
-int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *);
-int xfs_attr_list_int(struct xfs_attr_list_context *);
+int xfs_attr_list_ilocked(struct xfs_attr_list_context *);
+int xfs_attr_list(struct xfs_attr_list_context *);
int xfs_inode_hasattr(struct xfs_inode *ip);
-int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
-int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
- size_t namelen, unsigned char **value, int *valuelenp,
- int flags);
-int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
- size_t namelen, unsigned char *value, int valuelen, int flags);
+int xfs_attr_get_ilocked(struct xfs_da_args *args);
+int xfs_attr_get(struct xfs_da_args *args);
+int xfs_attr_set(struct xfs_da_args *args);
int xfs_attr_set_args(struct xfs_da_args *args);
-int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name,
- size_t namelen, int flags);
int xfs_attr_remove_args(struct xfs_da_args *args);
-int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
- int flags, struct attrlist_cursor_kern *cursor);
bool xfs_attr_namecheck(const void *name, size_t length);
#endif /* __XFS_ATTR_H__ */
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index fed537a4353d..863444e2dda7 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -445,14 +445,25 @@ xfs_attr3_leaf_read(
* Namespace helper routines
*========================================================================*/
-/*
- * If namespace bits don't match return 0.
- * If all match then return 1.
- */
-STATIC int
-xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
+static bool
+xfs_attr_match(
+ struct xfs_da_args *args,
+ uint8_t namelen,
+ unsigned char *name,
+ int flags)
{
- return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
+ if (args->namelen != namelen)
+ return false;
+ if (memcmp(args->name, name, namelen) != 0)
+ return false;
+ /*
+ * If we are looking for incomplete entries, show only those, else only
+ * show complete entries.
+ */
+ if (args->attr_filter !=
+ (flags & (XFS_ATTR_NSP_ONDISK_MASK | XFS_ATTR_INCOMPLETE)))
+ return false;
+ return true;
}
static int
@@ -464,7 +475,7 @@ xfs_attr_copy_value(
/*
* No copy if all we have to do is get the length
*/
- if (args->flags & ATTR_KERNOVAL) {
+ if (!args->valuelen) {
args->valuelen = valuelen;
return 0;
}
@@ -477,7 +488,7 @@ xfs_attr_copy_value(
return -ERANGE;
}
- if (args->op_flags & XFS_DA_OP_ALLOCVAL) {
+ if (!args->value) {
args->value = kmem_alloc_large(valuelen, 0);
if (!args->value)
return -ENOMEM;
@@ -526,7 +537,7 @@ xfs_attr_shortform_bytesfit(
int offset;
/* rounded down */
- offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
+ offset = (XFS_LITINO(mp) - bytes) >> 3;
if (dp->i_d.di_format == XFS_DINODE_FMT_DEV) {
minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
@@ -593,8 +604,7 @@ xfs_attr_shortform_bytesfit(
minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */
- maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) -
- XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
if (offset >= maxforkoff)
@@ -678,15 +688,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
-#ifdef DEBUG
- if (sfe->namelen != args->namelen)
- continue;
- if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
- continue;
- if (!xfs_attr_namesp_match(args->flags, sfe->flags))
- continue;
- ASSERT(0);
-#endif
+ ASSERT(!xfs_attr_match(args, sfe->namelen, sfe->nameval,
+ sfe->flags));
}
offset = (char *)sfe - (char *)sf;
@@ -697,7 +700,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
sfe->namelen = args->namelen;
sfe->valuelen = args->valuelen;
- sfe->flags = XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
+ sfe->flags = args->attr_filter;
memcpy(sfe->nameval, args->name, args->namelen);
memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
sf->hdr.count++;
@@ -749,13 +752,9 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
base += size, i++) {
size = XFS_ATTR_SF_ENTSIZE(sfe);
- if (sfe->namelen != args->namelen)
- continue;
- if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
- continue;
- if (!xfs_attr_namesp_match(args->flags, sfe->flags))
- continue;
- break;
+ if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
+ sfe->flags))
+ break;
}
if (i == end)
return -ENOATTR;
@@ -816,13 +815,9 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
- if (sfe->namelen != args->namelen)
- continue;
- if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
- continue;
- if (!xfs_attr_namesp_match(args->flags, sfe->flags))
- continue;
- return -EEXIST;
+ if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
+ sfe->flags))
+ return -EEXIST;
}
return -ENOATTR;
}
@@ -830,9 +825,9 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
/*
* Retrieve the attribute value and length.
*
- * If ATTR_KERNOVAL is specified, only the length needs to be returned.
- * Unlike a lookup, we only return an error if the attribute does not
- * exist or we can't retrieve the value.
+ * If args->valuelen is zero, only the length needs to be returned. Unlike a
+ * lookup, we only return an error if the attribute does not exist or we can't
+ * retrieve the value.
*/
int
xfs_attr_shortform_getvalue(
@@ -847,14 +842,10 @@ xfs_attr_shortform_getvalue(
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
- if (sfe->namelen != args->namelen)
- continue;
- if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
- continue;
- if (!xfs_attr_namesp_match(args->flags, sfe->flags))
- continue;
- return xfs_attr_copy_value(args, &sfe->nameval[args->namelen],
- sfe->valuelen);
+ if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
+ sfe->flags))
+ return xfs_attr_copy_value(args,
+ &sfe->nameval[args->namelen], sfe->valuelen);
}
return -ENOATTR;
}
@@ -918,7 +909,7 @@ xfs_attr_shortform_to_leaf(
nargs.valuelen = sfe->valuelen;
nargs.hashval = xfs_da_hashname(sfe->nameval,
sfe->namelen);
- nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
+ nargs.attr_filter = sfe->flags & XFS_ATTR_NSP_ONDISK_MASK;
error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
ASSERT(error == -ENOATTR);
error = xfs_attr3_leaf_add(bp, &nargs);
@@ -1124,7 +1115,7 @@ xfs_attr3_leaf_to_shortform(
nargs.value = &name_loc->nameval[nargs.namelen];
nargs.valuelen = be16_to_cpu(name_loc->valuelen);
nargs.hashval = be32_to_cpu(entry->hashval);
- nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
+ nargs.attr_filter = entry->flags & XFS_ATTR_NSP_ONDISK_MASK;
xfs_attr_shortform_add(&nargs, forkoff);
}
error = 0;
@@ -1449,8 +1440,9 @@ xfs_attr3_leaf_add_work(
entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base +
ichdr->freemap[mapindex].size);
entry->hashval = cpu_to_be32(args->hashval);
- entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
- entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
+ entry->flags = args->attr_filter;
+ if (tmp)
+ entry->flags |= XFS_ATTR_LOCAL;
if (args->op_flags & XFS_DA_OP_RENAME) {
entry->flags |= XFS_ATTR_INCOMPLETE;
if ((args->blkno2 == args->blkno) &&
@@ -2346,7 +2338,7 @@ xfs_attr3_leaf_lookup_int(
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
if (ichdr.count >= args->geo->blksize / 8) {
- xfs_buf_corruption_error(bp);
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
}
@@ -2365,11 +2357,11 @@ xfs_attr3_leaf_lookup_int(
break;
}
if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count))) {
- xfs_buf_corruption_error(bp);
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
}
if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval)) {
- xfs_buf_corruption_error(bp);
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
}
@@ -2399,33 +2391,17 @@ xfs_attr3_leaf_lookup_int(
/*
* GROT: Add code to remove incomplete entries.
*/
- /*
- * If we are looking for INCOMPLETE entries, show only those.
- * If we are looking for complete entries, show only those.
- */
- if (!!(args->op_flags & XFS_DA_OP_INCOMPLETE) !=
- !!(entry->flags & XFS_ATTR_INCOMPLETE)) {
- continue;
- }
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf, probe);
- if (name_loc->namelen != args->namelen)
- continue;
- if (memcmp(args->name, name_loc->nameval,
- args->namelen) != 0)
- continue;
- if (!xfs_attr_namesp_match(args->flags, entry->flags))
+ if (!xfs_attr_match(args, name_loc->namelen,
+ name_loc->nameval, entry->flags))
continue;
args->index = probe;
return -EEXIST;
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf, probe);
- if (name_rmt->namelen != args->namelen)
- continue;
- if (memcmp(args->name, name_rmt->name,
- args->namelen) != 0)
- continue;
- if (!xfs_attr_namesp_match(args->flags, entry->flags))
+ if (!xfs_attr_match(args, name_rmt->namelen,
+ name_rmt->name, entry->flags))
continue;
args->index = probe;
args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
@@ -2444,9 +2420,9 @@ xfs_attr3_leaf_lookup_int(
* Get the value associated with an attribute name from a leaf attribute
* list structure.
*
- * If ATTR_KERNOVAL is specified, only the length needs to be returned.
- * Unlike a lookup, we only return an error if the attribute does not
- * exist or we can't retrieve the value.
+ * If args->valuelen is zero, only the length needs to be returned. Unlike a
+ * lookup, we only return an error if the attribute does not exist or we can't
+ * retrieve the value.
*/
int
xfs_attr3_leaf_getvalue(
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 73615b1dd1a8..6dd2d937a42a 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -8,7 +8,6 @@
#define __XFS_ATTR_LEAF_H__
struct attrlist;
-struct attrlist_cursor_kern;
struct xfs_attr_list_context;
struct xfs_da_args;
struct xfs_da_state;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 8b7f74b3bea2..01ad7f353e08 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -397,7 +397,7 @@ xfs_attr_rmtval_get(
trace_xfs_attr_rmtval_get(args);
- ASSERT(!(args->flags & ATTR_KERNOVAL));
+ ASSERT(args->valuelen != 0);
ASSERT(args->rmtvaluelen == args->valuelen);
valuelen = args->rmtvaluelen;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 9a6d7a84689a..fda13cd7add0 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -193,14 +193,12 @@ xfs_default_attroffset(
struct xfs_mount *mp = ip->i_mount;
uint offset;
- if (mp->m_sb.sb_inodesize == 256) {
- offset = XFS_LITINO(mp, ip->i_d.di_version) -
- XFS_BMDR_SPACE_CALC(MINABTPTRS);
- } else {
+ if (mp->m_sb.sb_inodesize == 256)
+ offset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ else
offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
- }
- ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
+ ASSERT(offset < XFS_LITINO(mp));
return offset;
}
@@ -690,7 +688,7 @@ xfs_bmap_extents_to_btree(
* Need a cursor. Can't allocate until bb_level is filled in.
*/
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
+ cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
/*
* Convert to a btree with two levels, one record in root.
*/
@@ -727,7 +725,7 @@ xfs_bmap_extents_to_btree(
ASSERT(tp->t_firstblock == NULLFSBLOCK ||
args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
tp->t_firstblock = args.fsbno;
- cur->bc_private.b.allocated++;
+ cur->bc_ino.allocated++;
ip->i_d.di_nblocks++;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
@@ -953,7 +951,7 @@ xfs_bmap_add_attrfork_btree(
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
return -ENOSPC;
}
- cur->bc_private.b.allocated = 0;
+ cur->bc_ino.allocated = 0;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
}
return 0;
@@ -980,7 +978,7 @@ xfs_bmap_add_attrfork_extents(
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
XFS_DATA_FORK);
if (cur) {
- cur->bc_private.b.allocated = 0;
+ cur->bc_ino.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
return error;
@@ -1178,13 +1176,13 @@ xfs_iread_bmbt_block(
{
struct xfs_iread_state *ir = priv;
struct xfs_mount *mp = cur->bc_mp;
- struct xfs_inode *ip = cur->bc_private.b.ip;
+ struct xfs_inode *ip = cur->bc_ino.ip;
struct xfs_btree_block *block;
struct xfs_buf *bp;
struct xfs_bmbt_rec *frp;
xfs_extnum_t num_recs;
xfs_extnum_t j;
- int whichfork = cur->bc_private.b.whichfork;
+ int whichfork = cur->bc_ino.whichfork;
block = xfs_btree_get_block(cur, level, &bp);
@@ -1528,7 +1526,7 @@ xfs_bmap_add_extent_delay_real(
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!bma->cur ||
- (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
+ (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
XFS_STATS_INC(mp, xs_add_exlist);
@@ -1818,7 +1816,7 @@ xfs_bmap_add_extent_delay_real(
temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock) -
- (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+ (bma->cur ? bma->cur->bc_ino.allocated : 0));
PREV.br_startoff = new_endoff;
PREV.br_blockcount = temp;
@@ -1904,7 +1902,7 @@ xfs_bmap_add_extent_delay_real(
temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock) -
- (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+ (bma->cur ? bma->cur->bc_ino.allocated : 0));
PREV.br_startblock = nullstartblock(da_new);
PREV.br_blockcount = temp;
@@ -2025,8 +2023,8 @@ xfs_bmap_add_extent_delay_real(
xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
if (bma->cur) {
- da_new += bma->cur->bc_private.b.allocated;
- bma->cur->bc_private.b.allocated = 0;
+ da_new += bma->cur->bc_ino.allocated;
+ bma->cur->bc_ino.allocated = 0;
}
/* adjust for changes in reserved delayed indirect blocks */
@@ -2573,7 +2571,7 @@ xfs_bmap_add_extent_unwritten_real(
/* clear out the allocated field, done with it now in any case. */
if (cur) {
- cur->bc_private.b.allocated = 0;
+ cur->bc_ino.allocated = 0;
*curp = cur;
}
@@ -2752,7 +2750,7 @@ xfs_bmap_add_extent_hole_real(
struct xfs_bmbt_irec old;
ASSERT(!isnullstartblock(new->br_startblock));
- ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
+ ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
XFS_STATS_INC(mp, xs_add_exlist);
@@ -2955,7 +2953,7 @@ xfs_bmap_add_extent_hole_real(
/* clear out the allocated field, done with it now in any case. */
if (cur)
- cur->bc_private.b.allocated = 0;
+ cur->bc_ino.allocated = 0;
xfs_bmap_check_leaf_extents(cur, ip, whichfork);
done:
@@ -4187,8 +4185,8 @@ xfs_bmapi_allocate(
bma->nallocs++;
if (bma->cur)
- bma->cur->bc_private.b.flags =
- bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
+ bma->cur->bc_ino.flags =
+ bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
bma->got.br_startoff = bma->offset;
bma->got.br_startblock = bma->blkno;
@@ -4709,7 +4707,7 @@ xfs_bmapi_remap(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.flags = 0;
+ cur->bc_ino.flags = 0;
}
got.br_startoff = bno;
@@ -5364,7 +5362,7 @@ __xfs_bunmapi(
if (ifp->if_flags & XFS_IFBROOT) {
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.flags = 0;
+ cur->bc_ino.flags = 0;
} else
cur = NULL;
@@ -5620,7 +5618,7 @@ error0:
xfs_trans_log_inode(tp, ip, logflags);
if (cur) {
if (!error)
- cur->bc_private.b.allocated = 0;
+ cur->bc_ino.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
return error;
@@ -5839,7 +5837,7 @@ xfs_bmap_collapse_extents(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.flags = 0;
+ cur->bc_ino.flags = 0;
}
if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
@@ -5956,7 +5954,7 @@ xfs_bmap_insert_extents(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.flags = 0;
+ cur->bc_ino.flags = 0;
}
if (*next_fsb == NULLFSBLOCK) {
@@ -6025,8 +6023,8 @@ del_cursor:
* @split_fsb is a block where the extents is split. If split_fsb lies in a
* hole or the first block of extents, just return 0.
*/
-STATIC int
-xfs_bmap_split_extent_at(
+int
+xfs_bmap_split_extent(
struct xfs_trans *tp,
struct xfs_inode *ip,
xfs_fileoff_t split_fsb)
@@ -6074,7 +6072,7 @@ xfs_bmap_split_extent_at(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.flags = 0;
+ cur->bc_ino.flags = 0;
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto del_cursor;
@@ -6133,7 +6131,7 @@ xfs_bmap_split_extent_at(
del_cursor:
if (cur) {
- cur->bc_private.b.allocated = 0;
+ cur->bc_ino.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
@@ -6142,34 +6140,6 @@ del_cursor:
return error;
}
-int
-xfs_bmap_split_extent(
- struct xfs_inode *ip,
- xfs_fileoff_t split_fsb)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
- int error;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
- XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
- if (error)
- goto out;
-
- return xfs_trans_commit(tp);
-
-out:
- xfs_trans_cancel(tp);
- return error;
-}
-
/* Deferred mapping is only for real extents in the data fork. */
static bool
xfs_bmap_is_update_needed(
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 14d25e0b7d9c..f3259ad5c22c 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -222,7 +222,8 @@ int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
bool *done, xfs_fileoff_t stop_fsb);
-int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
+int xfs_bmap_split_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ xfs_fileoff_t split_offset);
int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index ffe608d2a2d9..295a59cf8840 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -166,13 +166,13 @@ xfs_bmbt_dup_cursor(
struct xfs_btree_cur *new;
new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.b.ip, cur->bc_private.b.whichfork);
+ cur->bc_ino.ip, cur->bc_ino.whichfork);
/*
* Copy the firstblock, dfops, and flags values,
* since init cursor doesn't get them.
*/
- new->bc_private.b.flags = cur->bc_private.b.flags;
+ new->bc_ino.flags = cur->bc_ino.flags;
return new;
}
@@ -183,12 +183,12 @@ xfs_bmbt_update_cursor(
struct xfs_btree_cur *dst)
{
ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
- (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
+ (dst->bc_ino.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
- dst->bc_private.b.allocated += src->bc_private.b.allocated;
+ dst->bc_ino.allocated += src->bc_ino.allocated;
dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
- src->bc_private.b.allocated = 0;
+ src->bc_ino.allocated = 0;
}
STATIC int
@@ -205,8 +205,8 @@ xfs_bmbt_alloc_block(
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.fsbno = cur->bc_tp->t_firstblock;
- xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
- cur->bc_private.b.whichfork);
+ xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino,
+ cur->bc_ino.whichfork);
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = be64_to_cpu(start->l);
@@ -230,7 +230,7 @@ xfs_bmbt_alloc_block(
}
args.minlen = args.maxlen = args.prod = 1;
- args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
+ args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL;
if (!args.wasdel && args.tp->t_blk_res == 0) {
error = -ENOSPC;
goto error0;
@@ -259,10 +259,10 @@ xfs_bmbt_alloc_block(
ASSERT(args.len == 1);
cur->bc_tp->t_firstblock = args.fsbno;
- cur->bc_private.b.allocated++;
- cur->bc_private.b.ip->i_d.di_nblocks++;
- xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
- xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
+ cur->bc_ino.allocated++;
+ cur->bc_ino.ip->i_d.di_nblocks++;
+ xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
+ xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip,
XFS_TRANS_DQ_BCOUNT, 1L);
new->l = cpu_to_be64(args.fsbno);
@@ -280,12 +280,12 @@ xfs_bmbt_free_block(
struct xfs_buf *bp)
{
struct xfs_mount *mp = cur->bc_mp;
- struct xfs_inode *ip = cur->bc_private.b.ip;
+ struct xfs_inode *ip = cur->bc_ino.ip;
struct xfs_trans *tp = cur->bc_tp;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
struct xfs_owner_info oinfo;
- xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
+ xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
ip->i_d.di_nblocks--;
@@ -302,8 +302,8 @@ xfs_bmbt_get_minrecs(
if (level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp;
- ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
- cur->bc_private.b.whichfork);
+ ifp = XFS_IFORK_PTR(cur->bc_ino.ip,
+ cur->bc_ino.whichfork);
return xfs_bmbt_maxrecs(cur->bc_mp,
ifp->if_broot_bytes, level == 0) / 2;
@@ -320,8 +320,8 @@ xfs_bmbt_get_maxrecs(
if (level == cur->bc_nlevels - 1) {
struct xfs_ifork *ifp;
- ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
- cur->bc_private.b.whichfork);
+ ifp = XFS_IFORK_PTR(cur->bc_ino.ip,
+ cur->bc_ino.whichfork);
return xfs_bmbt_maxrecs(cur->bc_mp,
ifp->if_broot_bytes, level == 0);
@@ -347,7 +347,7 @@ xfs_bmbt_get_dmaxrecs(
{
if (level != cur->bc_nlevels - 1)
return cur->bc_mp->m_bmap_dmxr[level != 0];
- return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
+ return xfs_bmdr_maxrecs(cur->bc_ino.forksize, level == 0);
}
STATIC void
@@ -566,11 +566,11 @@ xfs_bmbt_init_cursor(
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
- cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
- cur->bc_private.b.ip = ip;
- cur->bc_private.b.allocated = 0;
- cur->bc_private.b.flags = 0;
- cur->bc_private.b.whichfork = whichfork;
+ cur->bc_ino.forksize = XFS_IFORK_SIZE(ip, whichfork);
+ cur->bc_ino.ip = ip;
+ cur->bc_ino.allocated = 0;
+ cur->bc_ino.flags = 0;
+ cur->bc_ino.whichfork = whichfork;
return cur;
}
@@ -644,7 +644,7 @@ xfs_bmbt_change_owner(
cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
if (!cur)
return -ENOMEM;
- cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
+ cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER;
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
xfs_btree_del_cursor(cur, error);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index fd300dc93ca4..2d25bab68764 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -20,6 +20,7 @@
#include "xfs_trace.h"
#include "xfs_alloc.h"
#include "xfs_log.h"
+#include "xfs_btree_staging.h"
/*
* Cursor allocation zone.
@@ -214,7 +215,7 @@ xfs_btree_check_sptr(
{
if (level <= 0)
return false;
- return xfs_verify_agbno(cur->bc_mp, cur->bc_private.a.agno, agbno);
+ return xfs_verify_agbno(cur->bc_mp, cur->bc_ag.agno, agbno);
}
/*
@@ -234,8 +235,8 @@ xfs_btree_check_ptr(
return 0;
xfs_err(cur->bc_mp,
"Inode %llu fork %d: Corrupt btree %d pointer at level %d index %d.",
- cur->bc_private.b.ip->i_ino,
- cur->bc_private.b.whichfork, cur->bc_btnum,
+ cur->bc_ino.ip->i_ino,
+ cur->bc_ino.whichfork, cur->bc_btnum,
level, index);
} else {
if (xfs_btree_check_sptr(cur, be32_to_cpu((&ptr->s)[index]),
@@ -243,7 +244,7 @@ xfs_btree_check_ptr(
return 0;
xfs_err(cur->bc_mp,
"AG %u: Corrupt btree %d pointer at level %d index %d.",
- cur->bc_private.a.agno, cur->bc_btnum,
+ cur->bc_ag.agno, cur->bc_btnum,
level, index);
}
@@ -378,10 +379,12 @@ xfs_btree_del_cursor(
* allocated indirect blocks' accounting.
*/
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
- cur->bc_private.b.allocated == 0);
+ cur->bc_ino.allocated == 0);
/*
* Free the cursor.
*/
+ if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
+ kmem_free((void *)cur->bc_ops);
kmem_cache_free(xfs_btree_cur_zone, cur);
}
@@ -642,6 +645,17 @@ xfs_btree_ptr_addr(
((char *)block + xfs_btree_ptr_offset(cur, n, level));
}
+struct xfs_ifork *
+xfs_btree_ifork_ptr(
+ struct xfs_btree_cur *cur)
+{
+ ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+
+ if (cur->bc_flags & XFS_BTREE_STAGING)
+ return cur->bc_ino.ifake->if_fork;
+ return XFS_IFORK_PTR(cur->bc_ino.ip, cur->bc_ino.whichfork);
+}
+
/*
* Get the root block which is stored in the inode.
*
@@ -652,9 +666,8 @@ STATIC struct xfs_btree_block *
xfs_btree_get_iroot(
struct xfs_btree_cur *cur)
{
- struct xfs_ifork *ifp;
+ struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
- ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
return (struct xfs_btree_block *)ifp->if_broot;
}
@@ -881,13 +894,13 @@ xfs_btree_readahead_sblock(
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.agno,
left, 1, cur->bc_ops->buf_ops);
rval++;
}
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.agno,
right, 1, cur->bc_ops->buf_ops);
rval++;
}
@@ -945,7 +958,7 @@ xfs_btree_ptr_to_daddr(
*daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
} else {
agbno = be32_to_cpu(ptr->s);
- *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
+ *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.agno,
agbno);
}
@@ -1014,7 +1027,7 @@ xfs_btree_ptr_is_null(
return ptr->s == cpu_to_be32(NULLAGBLOCK);
}
-STATIC void
+void
xfs_btree_set_ptr_null(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
@@ -1050,7 +1063,7 @@ xfs_btree_get_sibling(
}
}
-STATIC void
+void
xfs_btree_set_sibling(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
@@ -1128,7 +1141,7 @@ xfs_btree_init_block(
btnum, level, numrecs, owner, 0);
}
-STATIC void
+void
xfs_btree_init_block_cur(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
@@ -1144,9 +1157,9 @@ xfs_btree_init_block_cur(
* code.
*/
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- owner = cur->bc_private.b.ip->i_ino;
+ owner = cur->bc_ino.ip->i_ino;
else
- owner = cur->bc_private.a.agno;
+ owner = cur->bc_ag.agno;
xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
cur->bc_btnum, level, numrecs,
@@ -1220,7 +1233,7 @@ xfs_btree_set_refs(
}
}
-STATIC int
+int
xfs_btree_get_buf_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
@@ -1280,7 +1293,7 @@ xfs_btree_read_buf_block(
/*
* Copy keys from one btree block to another.
*/
-STATIC void
+void
xfs_btree_copy_keys(
struct xfs_btree_cur *cur,
union xfs_btree_key *dst_key,
@@ -1308,11 +1321,11 @@ xfs_btree_copy_recs(
/*
* Copy block pointers from one btree block to another.
*/
-STATIC void
+void
xfs_btree_copy_ptrs(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *dst_ptr,
- union xfs_btree_ptr *src_ptr,
+ const union xfs_btree_ptr *src_ptr,
int numptrs)
{
ASSERT(numptrs >= 0);
@@ -1393,8 +1406,8 @@ xfs_btree_log_keys(
xfs_btree_key_offset(cur, first),
xfs_btree_key_offset(cur, last + 1) - 1);
} else {
- xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
- xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+ xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
+ xfs_ilog_fbroot(cur->bc_ino.whichfork));
}
}
@@ -1436,8 +1449,8 @@ xfs_btree_log_ptrs(
xfs_btree_ptr_offset(cur, first, level),
xfs_btree_ptr_offset(cur, last + 1, level) - 1);
} else {
- xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
- xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+ xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
+ xfs_ilog_fbroot(cur->bc_ino.whichfork));
}
}
@@ -1505,8 +1518,8 @@ xfs_btree_log_block(
xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
xfs_trans_log_buf(cur->bc_tp, bp, first, last);
} else {
- xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
- xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+ xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
+ xfs_ilog_fbroot(cur->bc_ino.whichfork));
}
}
@@ -1743,10 +1756,10 @@ xfs_btree_lookup_get_block(
/* Check the inode owner since the verifiers don't. */
if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
- !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_INVALID_OWNER) &&
+ !(cur->bc_ino.flags & XFS_BTCUR_BMBT_INVALID_OWNER) &&
(cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
- cur->bc_private.b.ip->i_ino)
+ cur->bc_ino.ip->i_ino)
goto out_bad;
/* Did we get the level we were looking for? */
@@ -1762,7 +1775,7 @@ xfs_btree_lookup_get_block(
out_bad:
*blkp = NULL;
- xfs_buf_corruption_error(bp);
+ xfs_buf_mark_corrupt(bp);
xfs_trans_brelse(cur->bc_tp, bp);
return -EFSCORRUPTED;
}
@@ -2938,9 +2951,9 @@ xfs_btree_new_iroot(
xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
- xfs_iroot_realloc(cur->bc_private.b.ip,
+ xfs_iroot_realloc(cur->bc_ino.ip,
1 - xfs_btree_get_numrecs(cblock),
- cur->bc_private.b.whichfork);
+ cur->bc_ino.whichfork);
xfs_btree_setbuf(cur, level, cbp);
@@ -2953,7 +2966,7 @@ xfs_btree_new_iroot(
xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
*logflags |=
- XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
+ XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork);
*stat = 1;
return 0;
error0:
@@ -3105,11 +3118,11 @@ xfs_btree_make_block_unfull(
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1) {
- struct xfs_inode *ip = cur->bc_private.b.ip;
+ struct xfs_inode *ip = cur->bc_ino.ip;
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
/* A root block that can be made bigger. */
- xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
+ xfs_iroot_realloc(ip, 1, cur->bc_ino.whichfork);
*stat = 1;
} else {
/* A root block that needs replacing */
@@ -3455,8 +3468,8 @@ STATIC int
xfs_btree_kill_iroot(
struct xfs_btree_cur *cur)
{
- int whichfork = cur->bc_private.b.whichfork;
- struct xfs_inode *ip = cur->bc_private.b.ip;
+ int whichfork = cur->bc_ino.whichfork;
+ struct xfs_inode *ip = cur->bc_ino.ip;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_block *block;
struct xfs_btree_block *cblock;
@@ -3514,8 +3527,8 @@ xfs_btree_kill_iroot(
index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
if (index) {
- xfs_iroot_realloc(cur->bc_private.b.ip, index,
- cur->bc_private.b.whichfork);
+ xfs_iroot_realloc(cur->bc_ino.ip, index,
+ cur->bc_ino.whichfork);
block = ifp->if_broot;
}
@@ -3544,7 +3557,7 @@ xfs_btree_kill_iroot(
cur->bc_bufs[level - 1] = NULL;
be16_add_cpu(&block->bb_level, -1);
xfs_trans_log_inode(cur->bc_tp, ip,
- XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+ XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork));
cur->bc_nlevels--;
out0:
return 0;
@@ -3712,8 +3725,8 @@ xfs_btree_delrec(
*/
if (level == cur->bc_nlevels - 1) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
- xfs_iroot_realloc(cur->bc_private.b.ip, -1,
- cur->bc_private.b.whichfork);
+ xfs_iroot_realloc(cur->bc_ino.ip, -1,
+ cur->bc_ino.whichfork);
error = xfs_btree_kill_iroot(cur);
if (error)
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 3eff7c321d43..8626c5a81aad 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -10,6 +10,7 @@ struct xfs_buf;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
+struct xfs_ifork;
extern kmem_zone_t *xfs_btree_cur_zone;
@@ -177,15 +178,37 @@ union xfs_btree_irec {
struct xfs_refcount_irec rc;
};
-/* Per-AG btree private information. */
-union xfs_btree_cur_private {
- struct {
- unsigned long nr_ops; /* # record updates */
- int shape_changes; /* # of extent splits */
- } refc;
- struct {
- bool active; /* allocation cursor state */
- } abt;
+/* Per-AG btree information. */
+struct xfs_btree_cur_ag {
+ union {
+ struct xfs_buf *agbp;
+ struct xbtree_afakeroot *afake; /* for staging cursor */
+ };
+ xfs_agnumber_t agno;
+ union {
+ struct {
+ unsigned long nr_ops; /* # record updates */
+ int shape_changes; /* # of extent splits */
+ } refc;
+ struct {
+ bool active; /* allocation cursor state */
+ } abt;
+ };
+};
+
+/* Btree-in-inode cursor information */
+struct xfs_btree_cur_ino {
+ struct xfs_inode *ip;
+ struct xbtree_ifakeroot *ifake; /* for staging cursor */
+ int allocated;
+ short forksize;
+ char whichfork;
+ char flags;
+/* We are converting a delalloc reservation */
+#define XFS_BTCUR_BMBT_WASDEL (1 << 0)
+
+/* For extent swap, ignore owner check in verifier */
+#define XFS_BTCUR_BMBT_INVALID_OWNER (1 << 1)
};
/*
@@ -209,21 +232,9 @@ typedef struct xfs_btree_cur
xfs_btnum_t bc_btnum; /* identifies which btree type */
int bc_statoff; /* offset of btre stats array */
union {
- struct { /* needed for BNO, CNT, INO */
- struct xfs_buf *agbp; /* agf/agi buffer pointer */
- xfs_agnumber_t agno; /* ag number */
- union xfs_btree_cur_private priv;
- } a;
- struct { /* needed for BMAP */
- struct xfs_inode *ip; /* pointer to our inode */
- int allocated; /* count of alloced */
- short forksize; /* fork's inode space */
- char whichfork; /* data or attr fork */
- char flags; /* flags */
-#define XFS_BTCUR_BPRV_WASDEL (1<<0) /* was delayed */
-#define XFS_BTCUR_BPRV_INVALID_OWNER (1<<1) /* for ext swap */
- } b;
- } bc_private; /* per-btree type data */
+ struct xfs_btree_cur_ag bc_ag;
+ struct xfs_btree_cur_ino bc_ino;
+ };
} xfs_btree_cur_t;
/* cursor flags */
@@ -232,6 +243,12 @@ typedef struct xfs_btree_cur
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
+/*
+ * The root of this btree is a fakeroot structure so that we can stage a btree
+ * rebuild without leaving it accessible via primary metadata. The ops struct
+ * is dynamically allocated and must be freed when the cursor is deleted.
+ */
+#define XFS_BTREE_STAGING (1<<5)
#define XFS_BTREE_NOERROR 0
@@ -494,6 +511,7 @@ union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
int xfs_btree_has_record(struct xfs_btree_cur *cur, union xfs_btree_irec *low,
union xfs_btree_irec *high, bool *exists);
bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
+struct xfs_ifork *xfs_btree_ifork_ptr(struct xfs_btree_cur *cur);
/* Does this cursor point to the last block in the given level? */
static inline bool
@@ -512,4 +530,20 @@ xfs_btree_islastblock(
return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
}
+void xfs_btree_set_ptr_null(struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr);
+int xfs_btree_get_buf_block(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr,
+ struct xfs_btree_block **block, struct xfs_buf **bpp);
+void xfs_btree_set_sibling(struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block, union xfs_btree_ptr *ptr,
+ int lr);
+void xfs_btree_init_block_cur(struct xfs_btree_cur *cur,
+ struct xfs_buf *bp, int level, int numrecs);
+void xfs_btree_copy_ptrs(struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *dst_ptr,
+ const union xfs_btree_ptr *src_ptr, int numptrs);
+void xfs_btree_copy_keys(struct xfs_btree_cur *cur,
+ union xfs_btree_key *dst_key, union xfs_btree_key *src_key,
+ int numkeys);
+
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_btree_staging.c b/fs/xfs/libxfs/xfs_btree_staging.c
new file mode 100644
index 000000000000..f464a7c7cf22
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_btree_staging.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_btree.h"
+#include "xfs_trace.h"
+#include "xfs_btree_staging.h"
+
+/*
+ * Staging Cursors and Fake Roots for Btrees
+ * =========================================
+ *
+ * A staging btree cursor is a special type of btree cursor that callers must
+ * use to construct a new btree index using the btree bulk loader code. The
+ * bulk loading code uses the staging btree cursor to abstract the details of
+ * initializing new btree blocks and filling them with records or key/ptr
+ * pairs. Regular btree operations (e.g. queries and modifications) are not
+ * supported with staging cursors, and callers must not invoke them.
+ *
+ * Fake root structures contain all the information about a btree that is under
+ * construction by the bulk loading code. Staging btree cursors point to fake
+ * root structures instead of the usual AG header or inode structure.
+ *
+ * Callers are expected to initialize a fake root structure and pass it into
+ * the _stage_cursor function for a specific btree type. When bulk loading is
+ * complete, callers should call the _commit_staged_btree function for that
+ * specific btree type to commit the new btree into the filesystem.
+ */
+
+/*
+ * Don't allow staging cursors to be duplicated because they're supposed to be
+ * kept private to a single thread.
+ */
+STATIC struct xfs_btree_cur *
+xfs_btree_fakeroot_dup_cursor(
+ struct xfs_btree_cur *cur)
+{
+ ASSERT(0);
+ return NULL;
+}
+
+/*
+ * Don't allow block allocation for a staging cursor, because staging cursors
+ * do not support regular btree modifications.
+ *
+ * Bulk loading uses a separate callback to obtain new blocks from a
+ * preallocated list, which prevents ENOSPC failures during loading.
+ */
+STATIC int
+xfs_btree_fakeroot_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start_bno,
+ union xfs_btree_ptr *new_bno,
+ int *stat)
+{
+ ASSERT(0);
+ return -EFSCORRUPTED;
+}
+
+/*
+ * Don't allow block freeing for a staging cursor, because staging cursors
+ * do not support regular btree modifications.
+ */
+STATIC int
+xfs_btree_fakeroot_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ ASSERT(0);
+ return -EFSCORRUPTED;
+}
+
+/* Initialize a pointer to the root block from the fakeroot. */
+STATIC void
+xfs_btree_fakeroot_init_ptr_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ struct xbtree_afakeroot *afake;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ afake = cur->bc_ag.afake;
+ ptr->s = cpu_to_be32(afake->af_root);
+}
+
+/*
+ * Bulk Loading for AG Btrees
+ * ==========================
+ *
+ * For a btree rooted in an AG header, pass a xbtree_afakeroot structure to the
+ * staging cursor. Callers should initialize this to zero.
+ *
+ * The _stage_cursor() function for a specific btree type should call
+ * xfs_btree_stage_afakeroot to set up the in-memory cursor as a staging
+ * cursor. The corresponding _commit_staged_btree() function should log the
+ * new root and call xfs_btree_commit_afakeroot() to transform the staging
+ * cursor into a regular btree cursor.
+ */
+
+/* Update the btree root information for a per-AG fake root. */
+STATIC void
+xfs_btree_afakeroot_set_root(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ int inc)
+{
+ struct xbtree_afakeroot *afake = cur->bc_ag.afake;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+ afake->af_root = be32_to_cpu(ptr->s);
+ afake->af_levels += inc;
+}
+
+/*
+ * Initialize a AG-rooted btree cursor with the given AG btree fake root.
+ * The btree cursor's bc_ops will be overridden as needed to make the staging
+ * functionality work.
+ */
+void
+xfs_btree_stage_afakeroot(
+ struct xfs_btree_cur *cur,
+ struct xbtree_afakeroot *afake)
+{
+ struct xfs_btree_ops *nops;
+
+ ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
+ ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE));
+ ASSERT(cur->bc_tp == NULL);
+
+ nops = kmem_alloc(sizeof(struct xfs_btree_ops), KM_NOFS);
+ memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops));
+ nops->alloc_block = xfs_btree_fakeroot_alloc_block;
+ nops->free_block = xfs_btree_fakeroot_free_block;
+ nops->init_ptr_from_cur = xfs_btree_fakeroot_init_ptr_from_cur;
+ nops->set_root = xfs_btree_afakeroot_set_root;
+ nops->dup_cursor = xfs_btree_fakeroot_dup_cursor;
+
+ cur->bc_ag.afake = afake;
+ cur->bc_nlevels = afake->af_levels;
+ cur->bc_ops = nops;
+ cur->bc_flags |= XFS_BTREE_STAGING;
+}
+
+/*
+ * Transform an AG-rooted staging btree cursor back into a regular cursor by
+ * substituting a real btree root for the fake one and restoring normal btree
+ * cursor ops. The caller must log the btree root change prior to calling
+ * this.
+ */
+void
+xfs_btree_commit_afakeroot(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ const struct xfs_btree_ops *ops)
+{
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+ ASSERT(cur->bc_tp == NULL);
+
+ trace_xfs_btree_commit_afakeroot(cur);
+
+ kmem_free((void *)cur->bc_ops);
+ cur->bc_ag.agbp = agbp;
+ cur->bc_ops = ops;
+ cur->bc_flags &= ~XFS_BTREE_STAGING;
+ cur->bc_tp = tp;
+}
+
+/*
+ * Bulk Loading for Inode-Rooted Btrees
+ * ====================================
+ *
+ * For a btree rooted in an inode fork, pass a xbtree_ifakeroot structure to
+ * the staging cursor. This structure should be initialized as follows:
+ *
+ * - if_fork_size field should be set to the number of bytes available to the
+ * fork in the inode.
+ *
+ * - if_fork should point to a freshly allocated struct xfs_ifork.
+ *
+ * - if_format should be set to the appropriate fork type (e.g.
+ * XFS_DINODE_FMT_BTREE).
+ *
+ * All other fields must be zero.
+ *
+ * The _stage_cursor() function for a specific btree type should call
+ * xfs_btree_stage_ifakeroot to set up the in-memory cursor as a staging
+ * cursor. The corresponding _commit_staged_btree() function should log the
+ * new root and call xfs_btree_commit_ifakeroot() to transform the staging
+ * cursor into a regular btree cursor.
+ */
+
+/*
+ * Initialize an inode-rooted btree cursor with the given inode btree fake
+ * root. The btree cursor's bc_ops will be overridden as needed to make the
+ * staging functionality work. If new_ops is not NULL, these new ops will be
+ * passed out to the caller for further overriding.
+ */
+void
+xfs_btree_stage_ifakeroot(
+ struct xfs_btree_cur *cur,
+ struct xbtree_ifakeroot *ifake,
+ struct xfs_btree_ops **new_ops)
+{
+ struct xfs_btree_ops *nops;
+
+ ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
+ ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+ ASSERT(cur->bc_tp == NULL);
+
+ nops = kmem_alloc(sizeof(struct xfs_btree_ops), KM_NOFS);
+ memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops));
+ nops->alloc_block = xfs_btree_fakeroot_alloc_block;
+ nops->free_block = xfs_btree_fakeroot_free_block;
+ nops->init_ptr_from_cur = xfs_btree_fakeroot_init_ptr_from_cur;
+ nops->dup_cursor = xfs_btree_fakeroot_dup_cursor;
+
+ cur->bc_ino.ifake = ifake;
+ cur->bc_nlevels = ifake->if_levels;
+ cur->bc_ops = nops;
+ cur->bc_flags |= XFS_BTREE_STAGING;
+
+ if (new_ops)
+ *new_ops = nops;
+}
+
+/*
+ * Transform an inode-rooted staging btree cursor back into a regular cursor by
+ * substituting a real btree root for the fake one and restoring normal btree
+ * cursor ops. The caller must log the btree root change prior to calling
+ * this.
+ */
+void
+xfs_btree_commit_ifakeroot(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp,
+ int whichfork,
+ const struct xfs_btree_ops *ops)
+{
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+ ASSERT(cur->bc_tp == NULL);
+
+ trace_xfs_btree_commit_ifakeroot(cur);
+
+ kmem_free((void *)cur->bc_ops);
+ cur->bc_ino.ifake = NULL;
+ cur->bc_ino.whichfork = whichfork;
+ cur->bc_ops = ops;
+ cur->bc_flags &= ~XFS_BTREE_STAGING;
+ cur->bc_tp = tp;
+}
+
+/*
+ * Bulk Loading of Staged Btrees
+ * =============================
+ *
+ * This interface is used with a staged btree cursor to create a totally new
+ * btree with a large number of records (i.e. more than what would fit in a
+ * single root block). When the creation is complete, the new root can be
+ * linked atomically into the filesystem by committing the staged cursor.
+ *
+ * Creation of a new btree proceeds roughly as follows:
+ *
+ * The first step is to initialize an appropriate fake btree root structure and
+ * then construct a staged btree cursor. Refer to the block comments about
+ * "Bulk Loading for AG Btrees" and "Bulk Loading for Inode-Rooted Btrees" for
+ * more information about how to do this.
+ *
+ * The second step is to initialize a struct xfs_btree_bload context as
+ * documented in the structure definition.
+ *
+ * The third step is to call xfs_btree_bload_compute_geometry to compute the
+ * height of and the number of blocks needed to construct the btree. See the
+ * section "Computing the Geometry of the New Btree" for details about this
+ * computation.
+ *
+ * In step four, the caller must allocate xfs_btree_bload.nr_blocks blocks and
+ * save them for later use by ->claim_block(). Bulk loading requires all
+ * blocks to be allocated beforehand to avoid ENOSPC failures midway through a
+ * rebuild, and to minimize seek distances of the new btree.
+ *
+ * Step five is to call xfs_btree_bload() to start constructing the btree.
+ *
+ * The final step is to commit the staging btree cursor, which logs the new
+ * btree root and turns the staging cursor into a regular cursor. The caller
+ * is responsible for cleaning up the previous btree blocks, if any.
+ *
+ * Computing the Geometry of the New Btree
+ * =======================================
+ *
+ * The number of items placed in each btree block is computed via the following
+ * algorithm: For leaf levels, the number of items for the level is nr_records
+ * in the bload structure. For node levels, the number of items for the level
+ * is the number of blocks in the next lower level of the tree. For each
+ * level, the desired number of items per block is defined as:
+ *
+ * desired = max(minrecs, maxrecs - slack factor)
+ *
+ * The number of blocks for the level is defined to be:
+ *
+ * blocks = floor(nr_items / desired)
+ *
+ * Note this is rounded down so that the npb calculation below will never fall
+ * below minrecs. The number of items that will actually be loaded into each
+ * btree block is defined as:
+ *
+ * npb = nr_items / blocks
+ *
+ * Some of the leftmost blocks in the level will contain one extra record as
+ * needed to handle uneven division. If the number of records in any block
+ * would exceed maxrecs for that level, blocks is incremented and npb is
+ * recalculated.
+ *
+ * In other words, we compute the number of blocks needed to satisfy a given
+ * loading level, then spread the items as evenly as possible.
+ *
+ * The height and number of fs blocks required to create the btree are computed
+ * and returned via btree_height and nr_blocks.
+ */
+
+/*
+ * Put a btree block that we're loading onto the ordered list and release it.
+ * The btree blocks will be written to disk when bulk loading is finished.
+ */
+static void
+xfs_btree_bload_drop_buf(
+ struct list_head *buffers_list,
+ struct xfs_buf **bpp)
+{
+ if (*bpp == NULL)
+ return;
+
+ if (!xfs_buf_delwri_queue(*bpp, buffers_list))
+ ASSERT(0);
+
+ xfs_buf_relse(*bpp);
+ *bpp = NULL;
+}
+
+/*
+ * Allocate and initialize one btree block for bulk loading.
+ *
+ * The new btree block will have its level and numrecs fields set to the values
+ * of the level and nr_this_block parameters, respectively.
+ *
+ * The caller should ensure that ptrp, bpp, and blockp refer to the left
+ * sibling of the new block, if there is any. On exit, ptrp, bpp, and blockp
+ * will all point to the new block.
+ */
+STATIC int
+xfs_btree_bload_prep_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_bload *bbl,
+ struct list_head *buffers_list,
+ unsigned int level,
+ unsigned int nr_this_block,
+ union xfs_btree_ptr *ptrp, /* in/out */
+ struct xfs_buf **bpp, /* in/out */
+ struct xfs_btree_block **blockp, /* in/out */
+ void *priv)
+{
+ union xfs_btree_ptr new_ptr;
+ struct xfs_buf *new_bp;
+ struct xfs_btree_block *new_block;
+ int ret;
+
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
+ size_t new_size;
+
+ ASSERT(*bpp == NULL);
+
+ /* Allocate a new incore btree root block. */
+ new_size = bbl->iroot_size(cur, nr_this_block, priv);
+ ifp->if_broot = kmem_zalloc(new_size, 0);
+ ifp->if_broot_bytes = (int)new_size;
+ ifp->if_flags |= XFS_IFBROOT;
+
+ /* Initialize it and send it out. */
+ xfs_btree_init_block_int(cur->bc_mp, ifp->if_broot,
+ XFS_BUF_DADDR_NULL, cur->bc_btnum, level,
+ nr_this_block, cur->bc_ino.ip->i_ino,
+ cur->bc_flags);
+
+ *bpp = NULL;
+ *blockp = ifp->if_broot;
+ xfs_btree_set_ptr_null(cur, ptrp);
+ return 0;
+ }
+
+ /* Claim one of the caller's preallocated blocks. */
+ xfs_btree_set_ptr_null(cur, &new_ptr);
+ ret = bbl->claim_block(cur, &new_ptr, priv);
+ if (ret)
+ return ret;
+
+ ASSERT(!xfs_btree_ptr_is_null(cur, &new_ptr));
+
+ ret = xfs_btree_get_buf_block(cur, &new_ptr, &new_block, &new_bp);
+ if (ret)
+ return ret;
+
+ /*
+ * The previous block (if any) is the left sibling of the new block,
+ * so set its right sibling pointer to the new block and drop it.
+ */
+ if (*blockp)
+ xfs_btree_set_sibling(cur, *blockp, &new_ptr, XFS_BB_RIGHTSIB);
+ xfs_btree_bload_drop_buf(buffers_list, bpp);
+
+ /* Initialize the new btree block. */
+ xfs_btree_init_block_cur(cur, new_bp, level, nr_this_block);
+ xfs_btree_set_sibling(cur, new_block, ptrp, XFS_BB_LEFTSIB);
+
+ /* Set the out parameters. */
+ *bpp = new_bp;
+ *blockp = new_block;
+ xfs_btree_copy_ptrs(cur, ptrp, &new_ptr, 1);
+ return 0;
+}
+
+/* Load one leaf block. */
+STATIC int
+xfs_btree_bload_leaf(
+ struct xfs_btree_cur *cur,
+ unsigned int recs_this_block,
+ xfs_btree_bload_get_record_fn get_record,
+ struct xfs_btree_block *block,
+ void *priv)
+{
+ unsigned int j;
+ int ret;
+
+ /* Fill the leaf block with records. */
+ for (j = 1; j <= recs_this_block; j++) {
+ union xfs_btree_rec *block_rec;
+
+ ret = get_record(cur, priv);
+ if (ret)
+ return ret;
+ block_rec = xfs_btree_rec_addr(cur, j, block);
+ cur->bc_ops->init_rec_from_cur(cur, block_rec);
+ }
+
+ return 0;
+}
+
+/*
+ * Load one node block with key/ptr pairs.
+ *
+ * child_ptr must point to a block within the next level down in the tree. A
+ * key/ptr entry will be created in the new node block to the block pointed to
+ * by child_ptr. On exit, child_ptr points to the next block on the child
+ * level that needs processing.
+ */
+STATIC int
+xfs_btree_bload_node(
+ struct xfs_btree_cur *cur,
+ unsigned int recs_this_block,
+ union xfs_btree_ptr *child_ptr,
+ struct xfs_btree_block *block)
+{
+ unsigned int j;
+ int ret;
+
+ /* Fill the node block with keys and pointers. */
+ for (j = 1; j <= recs_this_block; j++) {
+ union xfs_btree_key child_key;
+ union xfs_btree_ptr *block_ptr;
+ union xfs_btree_key *block_key;
+ struct xfs_btree_block *child_block;
+ struct xfs_buf *child_bp;
+
+ ASSERT(!xfs_btree_ptr_is_null(cur, child_ptr));
+
+ ret = xfs_btree_get_buf_block(cur, child_ptr, &child_block,
+ &child_bp);
+ if (ret)
+ return ret;
+
+ block_ptr = xfs_btree_ptr_addr(cur, j, block);
+ xfs_btree_copy_ptrs(cur, block_ptr, child_ptr, 1);
+
+ block_key = xfs_btree_key_addr(cur, j, block);
+ xfs_btree_get_keys(cur, child_block, &child_key);
+ xfs_btree_copy_keys(cur, block_key, &child_key, 1);
+
+ xfs_btree_get_sibling(cur, child_block, child_ptr,
+ XFS_BB_RIGHTSIB);
+ xfs_buf_relse(child_bp);
+ }
+
+ return 0;
+}
+
+/*
+ * Compute the maximum number of records (or keyptrs) per block that we want to
+ * install at this level in the btree. Caller is responsible for having set
+ * @cur->bc_ino.forksize to the desired fork size, if appropriate.
+ */
+STATIC unsigned int
+xfs_btree_bload_max_npb(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_bload *bbl,
+ unsigned int level)
+{
+ unsigned int ret;
+
+ if (level == cur->bc_nlevels - 1 && cur->bc_ops->get_dmaxrecs)
+ return cur->bc_ops->get_dmaxrecs(cur, level);
+
+ ret = cur->bc_ops->get_maxrecs(cur, level);
+ if (level == 0)
+ ret -= bbl->leaf_slack;
+ else
+ ret -= bbl->node_slack;
+ return ret;
+}
+
+/*
+ * Compute the desired number of records (or keyptrs) per block that we want to
+ * install at this level in the btree, which must be somewhere between minrecs
+ * and max_npb. The caller is free to install fewer records per block.
+ */
+STATIC unsigned int
+xfs_btree_bload_desired_npb(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_bload *bbl,
+ unsigned int level)
+{
+ unsigned int npb = xfs_btree_bload_max_npb(cur, bbl, level);
+
+ /* Root blocks are not subject to minrecs rules. */
+ if (level == cur->bc_nlevels - 1)
+ return max(1U, npb);
+
+ return max_t(unsigned int, cur->bc_ops->get_minrecs(cur, level), npb);
+}
+
+/*
+ * Compute the number of records to be stored in each block at this level and
+ * the number of blocks for this level. For leaf levels, we must populate an
+ * empty root block even if there are no records, so we have to have at least
+ * one block.
+ */
+STATIC void
+xfs_btree_bload_level_geometry(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_bload *bbl,
+ unsigned int level,
+ uint64_t nr_this_level,
+ unsigned int *avg_per_block,
+ uint64_t *blocks,
+ uint64_t *blocks_with_extra)
+{
+ uint64_t npb;
+ uint64_t dontcare;
+ unsigned int desired_npb;
+ unsigned int maxnr;
+
+ maxnr = cur->bc_ops->get_maxrecs(cur, level);
+
+ /*
+ * Compute the number of blocks we need to fill each block with the
+ * desired number of records/keyptrs per block. Because desired_npb
+ * could be minrecs, we use regular integer division (which rounds
+ * the block count down) so that in the next step the effective # of
+ * items per block will never be less than desired_npb.
+ */
+ desired_npb = xfs_btree_bload_desired_npb(cur, bbl, level);
+ *blocks = div64_u64_rem(nr_this_level, desired_npb, &dontcare);
+ *blocks = max(1ULL, *blocks);
+
+ /*
+ * Compute the number of records that we will actually put in each
+ * block, assuming that we want to spread the records evenly between
+ * the blocks. Take care that the effective # of items per block (npb)
+ * won't exceed maxrecs even for the blocks that get an extra record,
+ * since desired_npb could be maxrecs, and in the previous step we
+ * rounded the block count down.
+ */
+ npb = div64_u64_rem(nr_this_level, *blocks, blocks_with_extra);
+ if (npb > maxnr || (npb == maxnr && *blocks_with_extra > 0)) {
+ (*blocks)++;
+ npb = div64_u64_rem(nr_this_level, *blocks, blocks_with_extra);
+ }
+
+ *avg_per_block = min_t(uint64_t, npb, nr_this_level);
+
+ trace_xfs_btree_bload_level_geometry(cur, level, nr_this_level,
+ *avg_per_block, desired_npb, *blocks,
+ *blocks_with_extra);
+}
+
+/*
+ * Ensure a slack value is appropriate for the btree.
+ *
+ * If the slack value is negative, set slack so that we fill the block to
+ * halfway between minrecs and maxrecs. Make sure the slack is never so large
+ * that we can underflow minrecs.
+ */
+static void
+xfs_btree_bload_ensure_slack(
+ struct xfs_btree_cur *cur,
+ int *slack,
+ int level)
+{
+ int maxr;
+ int minr;
+
+ maxr = cur->bc_ops->get_maxrecs(cur, level);
+ minr = cur->bc_ops->get_minrecs(cur, level);
+
+ /*
+ * If slack is negative, automatically set slack so that we load the
+ * btree block approximately halfway between minrecs and maxrecs.
+ * Generally, this will net us 75% loading.
+ */
+ if (*slack < 0)
+ *slack = maxr - ((maxr + minr) >> 1);
+
+ *slack = min(*slack, maxr - minr);
+}
+
+/*
+ * Prepare a btree cursor for a bulk load operation by computing the geometry
+ * fields in bbl. Caller must ensure that the btree cursor is a staging
+ * cursor. This function can be called multiple times.
+ */
+int
+xfs_btree_bload_compute_geometry(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_bload *bbl,
+ uint64_t nr_records)
+{
+ uint64_t nr_blocks = 0;
+ uint64_t nr_this_level;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ /*
+ * Make sure that the slack values make sense for traditional leaf and
+ * node blocks. Inode-rooted btrees will return different minrecs and
+ * maxrecs values for the root block (bc_nlevels == level - 1). We're
+ * checking levels 0 and 1 here, so set bc_nlevels such that the btree
+ * code doesn't interpret either as the root level.
+ */
+ cur->bc_nlevels = XFS_BTREE_MAXLEVELS - 1;
+ xfs_btree_bload_ensure_slack(cur, &bbl->leaf_slack, 0);
+ xfs_btree_bload_ensure_slack(cur, &bbl->node_slack, 1);
+
+ bbl->nr_records = nr_this_level = nr_records;
+ for (cur->bc_nlevels = 1; cur->bc_nlevels < XFS_BTREE_MAXLEVELS;) {
+ uint64_t level_blocks;
+ uint64_t dontcare64;
+ unsigned int level = cur->bc_nlevels - 1;
+ unsigned int avg_per_block;
+
+ xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
+ &avg_per_block, &level_blocks, &dontcare64);
+
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
+ /*
+ * If all the items we want to store at this level
+ * would fit in the inode root block, then we have our
+ * btree root and are done.
+ *
+ * Note that bmap btrees forbid records in the root.
+ */
+ if (level != 0 && nr_this_level <= avg_per_block) {
+ nr_blocks++;
+ break;
+ }
+
+ /*
+ * Otherwise, we have to store all the items for this
+ * level in traditional btree blocks and therefore need
+ * another level of btree to point to those blocks.
+ *
+ * We have to re-compute the geometry for each level of
+ * an inode-rooted btree because the geometry differs
+ * between a btree root in an inode fork and a
+ * traditional btree block.
+ *
+ * This distinction is made in the btree code based on
+ * whether level == bc_nlevels - 1. Based on the
+ * previous root block size check against the root
+ * block geometry, we know that we aren't yet ready to
+ * populate the root. Increment bc_nevels and
+ * recalculate the geometry for a traditional
+ * block-based btree level.
+ */
+ cur->bc_nlevels++;
+ xfs_btree_bload_level_geometry(cur, bbl, level,
+ nr_this_level, &avg_per_block,
+ &level_blocks, &dontcare64);
+ } else {
+ /*
+ * If all the items we want to store at this level
+ * would fit in a single root block, we're done.
+ */
+ if (nr_this_level <= avg_per_block) {
+ nr_blocks++;
+ break;
+ }
+
+ /* Otherwise, we need another level of btree. */
+ cur->bc_nlevels++;
+ }
+
+ nr_blocks += level_blocks;
+ nr_this_level = level_blocks;
+ }
+
+ if (cur->bc_nlevels == XFS_BTREE_MAXLEVELS)
+ return -EOVERFLOW;
+
+ bbl->btree_height = cur->bc_nlevels;
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
+ bbl->nr_blocks = nr_blocks - 1;
+ else
+ bbl->nr_blocks = nr_blocks;
+ return 0;
+}
+
+/* Bulk load a btree given the parameters and geometry established in bbl. */
+int
+xfs_btree_bload(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_bload *bbl,
+ void *priv)
+{
+ struct list_head buffers_list;
+ union xfs_btree_ptr child_ptr;
+ union xfs_btree_ptr ptr;
+ struct xfs_buf *bp = NULL;
+ struct xfs_btree_block *block = NULL;
+ uint64_t nr_this_level = bbl->nr_records;
+ uint64_t blocks;
+ uint64_t i;
+ uint64_t blocks_with_extra;
+ uint64_t total_blocks = 0;
+ unsigned int avg_per_block;
+ unsigned int level = 0;
+ int ret;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ INIT_LIST_HEAD(&buffers_list);
+ cur->bc_nlevels = bbl->btree_height;
+ xfs_btree_set_ptr_null(cur, &child_ptr);
+ xfs_btree_set_ptr_null(cur, &ptr);
+
+ xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
+ &avg_per_block, &blocks, &blocks_with_extra);
+
+ /* Load each leaf block. */
+ for (i = 0; i < blocks; i++) {
+ unsigned int nr_this_block = avg_per_block;
+
+ /*
+ * Due to rounding, btree blocks will not be evenly populated
+ * in most cases. blocks_with_extra tells us how many blocks
+ * will receive an extra record to distribute the excess across
+ * the current level as evenly as possible.
+ */
+ if (i < blocks_with_extra)
+ nr_this_block++;
+
+ ret = xfs_btree_bload_prep_block(cur, bbl, &buffers_list, level,
+ nr_this_block, &ptr, &bp, &block, priv);
+ if (ret)
+ goto out;
+
+ trace_xfs_btree_bload_block(cur, level, i, blocks, &ptr,
+ nr_this_block);
+
+ ret = xfs_btree_bload_leaf(cur, nr_this_block, bbl->get_record,
+ block, priv);
+ if (ret)
+ goto out;
+
+ /*
+ * Record the leftmost leaf pointer so we know where to start
+ * with the first node level.
+ */
+ if (i == 0)
+ xfs_btree_copy_ptrs(cur, &child_ptr, &ptr, 1);
+ }
+ total_blocks += blocks;
+ xfs_btree_bload_drop_buf(&buffers_list, &bp);
+
+ /* Populate the internal btree nodes. */
+ for (level = 1; level < cur->bc_nlevels; level++) {
+ union xfs_btree_ptr first_ptr;
+
+ nr_this_level = blocks;
+ block = NULL;
+ xfs_btree_set_ptr_null(cur, &ptr);
+
+ xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
+ &avg_per_block, &blocks, &blocks_with_extra);
+
+ /* Load each node block. */
+ for (i = 0; i < blocks; i++) {
+ unsigned int nr_this_block = avg_per_block;
+
+ if (i < blocks_with_extra)
+ nr_this_block++;
+
+ ret = xfs_btree_bload_prep_block(cur, bbl,
+ &buffers_list, level, nr_this_block,
+ &ptr, &bp, &block, priv);
+ if (ret)
+ goto out;
+
+ trace_xfs_btree_bload_block(cur, level, i, blocks,
+ &ptr, nr_this_block);
+
+ ret = xfs_btree_bload_node(cur, nr_this_block,
+ &child_ptr, block);
+ if (ret)
+ goto out;
+
+ /*
+ * Record the leftmost node pointer so that we know
+ * where to start the next node level above this one.
+ */
+ if (i == 0)
+ xfs_btree_copy_ptrs(cur, &first_ptr, &ptr, 1);
+ }
+ total_blocks += blocks;
+ xfs_btree_bload_drop_buf(&buffers_list, &bp);
+ xfs_btree_copy_ptrs(cur, &child_ptr, &first_ptr, 1);
+ }
+
+ /* Initialize the new root. */
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
+ ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
+ cur->bc_ino.ifake->if_levels = cur->bc_nlevels;
+ cur->bc_ino.ifake->if_blocks = total_blocks - 1;
+ } else {
+ cur->bc_ag.afake->af_root = be32_to_cpu(ptr.s);
+ cur->bc_ag.afake->af_levels = cur->bc_nlevels;
+ cur->bc_ag.afake->af_blocks = total_blocks;
+ }
+
+ /*
+ * Write the new blocks to disk. If the ordered list isn't empty after
+ * that, then something went wrong and we have to fail. This should
+ * never happen, but we'll check anyway.
+ */
+ ret = xfs_buf_delwri_submit(&buffers_list);
+ if (ret)
+ goto out;
+ if (!list_empty(&buffers_list)) {
+ ASSERT(list_empty(&buffers_list));
+ ret = -EIO;
+ }
+
+out:
+ xfs_buf_delwri_cancel(&buffers_list);
+ if (bp)
+ xfs_buf_relse(bp);
+ return ret;
+}
diff --git a/fs/xfs/libxfs/xfs_btree_staging.h b/fs/xfs/libxfs/xfs_btree_staging.h
new file mode 100644
index 000000000000..643f0f9b2994
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_btree_staging.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#ifndef __XFS_BTREE_STAGING_H__
+#define __XFS_BTREE_STAGING_H__
+
+/* Fake root for an AG-rooted btree. */
+struct xbtree_afakeroot {
+ /* AG block number of the new btree root. */
+ xfs_agblock_t af_root;
+
+ /* Height of the new btree. */
+ unsigned int af_levels;
+
+ /* Number of blocks used by the btree. */
+ unsigned int af_blocks;
+};
+
+/* Cursor interactions with with fake roots for AG-rooted btrees. */
+void xfs_btree_stage_afakeroot(struct xfs_btree_cur *cur,
+ struct xbtree_afakeroot *afake);
+void xfs_btree_commit_afakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp,
+ struct xfs_buf *agbp, const struct xfs_btree_ops *ops);
+
+/* Fake root for an inode-rooted btree. */
+struct xbtree_ifakeroot {
+ /* Fake inode fork. */
+ struct xfs_ifork *if_fork;
+
+ /* Number of blocks used by the btree. */
+ int64_t if_blocks;
+
+ /* Height of the new btree. */
+ unsigned int if_levels;
+
+ /* Number of bytes available for this fork in the inode. */
+ unsigned int if_fork_size;
+
+ /* Fork format. */
+ unsigned int if_format;
+
+ /* Number of records. */
+ unsigned int if_extents;
+};
+
+/* Cursor interactions with with fake roots for inode-rooted btrees. */
+void xfs_btree_stage_ifakeroot(struct xfs_btree_cur *cur,
+ struct xbtree_ifakeroot *ifake,
+ struct xfs_btree_ops **new_ops);
+void xfs_btree_commit_ifakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp,
+ int whichfork, const struct xfs_btree_ops *ops);
+
+/* Bulk loading of staged btrees. */
+typedef int (*xfs_btree_bload_get_record_fn)(struct xfs_btree_cur *cur, void *priv);
+typedef int (*xfs_btree_bload_claim_block_fn)(struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr, void *priv);
+typedef size_t (*xfs_btree_bload_iroot_size_fn)(struct xfs_btree_cur *cur,
+ unsigned int nr_this_level, void *priv);
+
+struct xfs_btree_bload {
+ /*
+ * This function will be called nr_records times to load records into
+ * the btree. The function does this by setting the cursor's bc_rec
+ * field in in-core format. Records must be returned in sort order.
+ */
+ xfs_btree_bload_get_record_fn get_record;
+
+ /*
+ * This function will be called nr_blocks times to obtain a pointer
+ * to a new btree block on disk. Callers must preallocate all space
+ * for the new btree before calling xfs_btree_bload, and this function
+ * is what claims that reservation.
+ */
+ xfs_btree_bload_claim_block_fn claim_block;
+
+ /*
+ * This function should return the size of the in-core btree root
+ * block. It is only necessary for XFS_BTREE_ROOT_IN_INODE btree
+ * types.
+ */
+ xfs_btree_bload_iroot_size_fn iroot_size;
+
+ /*
+ * The caller should set this to the number of records that will be
+ * stored in the new btree.
+ */
+ uint64_t nr_records;
+
+ /*
+ * Number of free records to leave in each leaf block. If the caller
+ * sets this to -1, the slack value will be calculated to be be halfway
+ * between maxrecs and minrecs. This typically leaves the block 75%
+ * full. Note that slack values are not enforced on inode root blocks.
+ */
+ int leaf_slack;
+
+ /*
+ * Number of free key/ptrs pairs to leave in each node block. This
+ * field has the same semantics as leaf_slack.
+ */
+ int node_slack;
+
+ /*
+ * The xfs_btree_bload_compute_geometry function will set this to the
+ * number of btree blocks needed to store nr_records records.
+ */
+ uint64_t nr_blocks;
+
+ /*
+ * The xfs_btree_bload_compute_geometry function will set this to the
+ * height of the new btree.
+ */
+ unsigned int btree_height;
+};
+
+int xfs_btree_bload_compute_geometry(struct xfs_btree_cur *cur,
+ struct xfs_btree_bload *bbl, uint64_t nr_records);
+int xfs_btree_bload(struct xfs_btree_cur *cur, struct xfs_btree_bload *bbl,
+ void *priv);
+
+#endif /* __XFS_BTREE_STAGING_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 875e04f82541..897749c41f36 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -590,7 +590,7 @@ xfs_da3_split(
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
- xfs_buf_corruption_error(oldblk->bp);
+ xfs_buf_mark_corrupt(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
@@ -603,7 +603,7 @@ xfs_da3_split(
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
- xfs_buf_corruption_error(oldblk->bp);
+ xfs_buf_mark_corrupt(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
@@ -1624,7 +1624,7 @@ xfs_da3_node_lookup_int(
}
if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
- xfs_buf_corruption_error(blk->bp);
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
}
@@ -1639,7 +1639,7 @@ xfs_da3_node_lookup_int(
/* Tree taller than we can handle; bail out! */
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
- xfs_buf_corruption_error(blk->bp);
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
}
@@ -1647,7 +1647,7 @@ xfs_da3_node_lookup_int(
if (blkno == args->geo->leafblk)
expected_level = nodehdr.level - 1;
else if (expected_level != nodehdr.level) {
- xfs_buf_corruption_error(blk->bp);
+ xfs_buf_mark_corrupt(blk->bp);
return -EFSCORRUPTED;
} else
expected_level--;
@@ -1986,7 +1986,8 @@ xfs_da3_path_shift(
ASSERT(path != NULL);
ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
level = (path->active-1) - 1; /* skip bottom layer in path */
- for (blk = &path->blk[level]; level >= 0; blk--, level--) {
+ for (; level >= 0; level--) {
+ blk = &path->blk[level];
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
blk->bp->b_addr);
@@ -2520,8 +2521,10 @@ xfs_dabuf_map(
*/
if (nirecs > 1) {
map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
- if (!map)
+ if (!map) {
+ error = -ENOMEM;
goto out_free_irecs;
+ }
*mapp = map;
}
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 0f4fbb0889ff..53e503b6f186 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -57,9 +57,10 @@ typedef struct xfs_da_args {
const uint8_t *name; /* string (maybe not NULL terminated) */
int namelen; /* length of string (maybe no NULL) */
uint8_t filetype; /* filetype of inode for directories */
- uint8_t *value; /* set of bytes (maybe contain NULLs) */
+ void *value; /* set of bytes (maybe contain NULLs) */
int valuelen; /* length of value */
- int flags; /* argument flags (eg: ATTR_NOCREATE) */
+ unsigned int attr_filter; /* XFS_ATTR_{ROOT,SECURE,INCOMPLETE} */
+ unsigned int attr_flags; /* XATTR_{CREATE,REPLACE} */
xfs_dahash_t hashval; /* hash value of name */
xfs_ino_t inumber; /* input/output inode number */
struct xfs_inode *dp; /* directory inode to manipulate */
@@ -88,8 +89,7 @@ typedef struct xfs_da_args {
#define XFS_DA_OP_ADDNAME 0x0004 /* this is an add operation */
#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
-#define XFS_DA_OP_ALLOCVAL 0x0020 /* lookup to alloc buffer if found */
-#define XFS_DA_OP_INCOMPLETE 0x0040 /* lookup INCOMPLETE attr keys */
+#define XFS_DA_OP_NOTIME 0x0020 /* don't update inode timestamps */
#define XFS_DA_OP_FLAGS \
{ XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
@@ -97,8 +97,7 @@ typedef struct xfs_da_args {
{ XFS_DA_OP_ADDNAME, "ADDNAME" }, \
{ XFS_DA_OP_OKNOENT, "OKNOENT" }, \
{ XFS_DA_OP_CILOOKUP, "CILOOKUP" }, \
- { XFS_DA_OP_ALLOCVAL, "ALLOCVAL" }, \
- { XFS_DA_OP_INCOMPLETE, "INCOMPLETE" }
+ { XFS_DA_OP_NOTIME, "NOTIME" }
/*
* Storage for holding state during Btree searches and split/join ops.
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 734837a9b51a..08c0a4d98b89 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -692,19 +692,7 @@ struct xfs_attr3_leafblock {
#define XFS_ATTR_ROOT (1 << XFS_ATTR_ROOT_BIT)
#define XFS_ATTR_SECURE (1 << XFS_ATTR_SECURE_BIT)
#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT)
-
-/*
- * Conversion macros for converting namespace bits from argument flags
- * to ondisk flags.
- */
-#define XFS_ATTR_NSP_ARGS_MASK (ATTR_ROOT | ATTR_SECURE)
#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK(flags) ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
-#define XFS_ATTR_NSP_ARGS(flags) ((flags) & XFS_ATTR_NSP_ARGS_MASK)
-#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
- ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
-#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
- ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
/*
* Alignment for namelist and valuelist entries (since they are mixed
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index d6ced59b9567..1dbf2f980a26 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -114,6 +114,23 @@ const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
.verify_struct = xfs_dir3_block_verify,
};
+static xfs_failaddr_t
+xfs_dir3_block_header_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = dp->i_mount;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+
+ if (be64_to_cpu(hdr3->owner) != dp->i_ino)
+ return __this_address;
+ }
+
+ return NULL;
+}
+
int
xfs_dir3_block_read(
struct xfs_trans *tp,
@@ -121,12 +138,24 @@ xfs_dir3_block_read(
struct xfs_buf **bpp)
{
struct xfs_mount *mp = dp->i_mount;
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, 0, bpp,
XFS_DATA_FORK, &xfs_dir3_block_buf_ops);
- if (!err && tp && *bpp)
- xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
+ if (err || !*bpp)
+ return err;
+
+ /* Check things that we can't do in the verifier. */
+ fa = xfs_dir3_block_header_check(dp, *bpp);
+ if (fa) {
+ __xfs_buf_mark_corrupt(*bpp, fa);
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+ xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
return err;
}
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index b9eba8213180..375b3edb2ad2 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -394,6 +394,22 @@ static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
.verify_write = xfs_dir3_data_write_verify,
};
+static xfs_failaddr_t
+xfs_dir3_data_header_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = dp->i_mount;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_data_hdr *hdr3 = bp->b_addr;
+
+ if (be64_to_cpu(hdr3->hdr.owner) != dp->i_ino)
+ return __this_address;
+ }
+
+ return NULL;
+}
int
xfs_dir3_data_read(
@@ -403,12 +419,24 @@ xfs_dir3_data_read(
unsigned int flags,
struct xfs_buf **bpp)
{
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, bno, flags, bpp, XFS_DATA_FORK,
&xfs_dir3_data_buf_ops);
- if (!err && tp && *bpp)
- xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
+ if (err || !*bpp)
+ return err;
+
+ /* Check things that we can't do in the verifier. */
+ fa = xfs_dir3_data_header_check(dp, *bpp);
+ if (fa) {
+ __xfs_buf_mark_corrupt(*bpp, fa);
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+ xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
return err;
}
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index a131b520aac7..95d2a3f92d75 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -1383,7 +1383,7 @@ xfs_dir2_leaf_removename(
ltp = xfs_dir2_leaf_tail_p(geo, leaf);
bestsp = xfs_dir2_leaf_bests_p(ltp);
if (be16_to_cpu(bestsp[db]) != oldbest) {
- xfs_buf_corruption_error(lbp);
+ xfs_buf_mark_corrupt(lbp);
return -EFSCORRUPTED;
}
/*
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index a0cc5e240306..6ac4aad98cd7 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -194,6 +194,8 @@ xfs_dir3_free_header_check(
return __this_address;
if (be32_to_cpu(hdr3->nvalid) < be32_to_cpu(hdr3->nused))
return __this_address;
+ if (be64_to_cpu(hdr3->hdr.owner) != dp->i_ino)
+ return __this_address;
} else {
struct xfs_dir2_free_hdr *hdr = bp->b_addr;
@@ -226,8 +228,9 @@ __xfs_dir3_free_read(
/* Check things that we can't do in the verifier. */
fa = xfs_dir3_free_header_check(dp, fbno, *bpp);
if (fa) {
- xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
+ __xfs_buf_mark_corrupt(*bpp, fa);
xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
return -EFSCORRUPTED;
}
@@ -439,7 +442,7 @@ xfs_dir2_leaf_to_node(
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
if (be32_to_cpu(ltp->bestcount) >
(uint)dp->i_d.di_size / args->geo->blksize) {
- xfs_buf_corruption_error(lbp);
+ xfs_buf_mark_corrupt(lbp);
return -EFSCORRUPTED;
}
@@ -513,7 +516,7 @@ xfs_dir2_leafn_add(
* into other peoples memory
*/
if (index < 0) {
- xfs_buf_corruption_error(bp);
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
}
@@ -800,7 +803,7 @@ xfs_dir2_leafn_lookup_for_entry(
xfs_dir3_leaf_check(dp, bp);
if (leafhdr.count <= 0) {
- xfs_buf_corruption_error(bp);
+ xfs_buf_mark_corrupt(bp);
return -EFSCORRUPTED;
}
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 77e9fa385980..045556e78ee2 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -497,6 +497,23 @@ static inline bool xfs_sb_version_hascrc(struct xfs_sb *sbp)
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
}
+/*
+ * v5 file systems support V3 inodes only, earlier file systems support
+ * v2 and v1 inodes.
+ */
+static inline bool xfs_sb_version_has_v3inode(struct xfs_sb *sbp)
+{
+ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
+}
+
+static inline bool xfs_dinode_good_version(struct xfs_sb *sbp,
+ uint8_t version)
+{
+ if (xfs_sb_version_has_v3inode(sbp))
+ return version == 3;
+ return version == 1 || version == 2;
+}
+
static inline bool xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
{
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
@@ -560,7 +577,6 @@ xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */
#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
-#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr))
#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d))
#define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \
@@ -707,7 +723,6 @@ typedef struct xfs_agf {
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
-#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr))
/*
* Size of the unlinked inode hash table in the agi.
@@ -775,7 +790,6 @@ typedef struct xfs_agi {
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
-#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr))
/*
* The third a.g. block contains the a.g. freelist, an array
@@ -783,21 +797,15 @@ typedef struct xfs_agi {
*/
#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
-#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr))
+#define XFS_BUF_TO_AGFL(bp) ((struct xfs_agfl *)((bp)->b_addr))
-#define XFS_BUF_TO_AGFL_BNO(mp, bp) \
- (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
- &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
- (__be32 *)(bp)->b_addr)
-
-typedef struct xfs_agfl {
+struct xfs_agfl {
__be32 agfl_magicnum;
__be32 agfl_seqno;
uuid_t agfl_uuid;
__be64 agfl_lsn;
__be32 agfl_crc;
- __be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */
-} __attribute__((packed)) xfs_agfl_t;
+} __attribute__((packed));
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
@@ -946,8 +954,12 @@ enum xfs_dinode_fmt {
/*
* Inode size for given fs.
*/
-#define XFS_LITINO(mp, version) \
- ((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version)))
+#define XFS_DINODE_SIZE(sbp) \
+ (xfs_sb_version_has_v3inode(sbp) ? \
+ sizeof(struct xfs_dinode) : \
+ offsetof(struct xfs_dinode, di_crc))
+#define XFS_LITINO(mp) \
+ ((mp)->m_sb.sb_inodesize - XFS_DINODE_SIZE(&(mp)->m_sb))
/*
* Inode data & attribute fork sizes, per inode.
@@ -956,13 +968,9 @@ enum xfs_dinode_fmt {
#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3))
#define XFS_DFORK_DSIZE(dip,mp) \
- (XFS_DFORK_Q(dip) ? \
- XFS_DFORK_BOFF(dip) : \
- XFS_LITINO(mp, (dip)->di_version))
+ (XFS_DFORK_Q(dip) ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp))
#define XFS_DFORK_ASIZE(dip,mp) \
- (XFS_DFORK_Q(dip) ? \
- XFS_LITINO(mp, (dip)->di_version) - XFS_DFORK_BOFF(dip) : \
- 0)
+ (XFS_DFORK_Q(dip) ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0)
#define XFS_DFORK_SIZE(dip,mp,w) \
((w) == XFS_DATA_FORK ? \
XFS_DFORK_DSIZE(dip, mp) : \
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index ef95ca07d084..245188e4f6d3 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -568,10 +568,40 @@ typedef struct xfs_fsop_setdm_handlereq {
struct fsdmidata __user *data; /* DMAPI data */
} xfs_fsop_setdm_handlereq_t;
+/*
+ * Flags passed in xfs_attr_multiop.am_flags for the attr ioctl interface.
+ *
+ * NOTE: Must match the values declared in libattr without the XFS_IOC_ prefix.
+ */
+#define XFS_IOC_ATTR_ROOT 0x0002 /* use attrs in root namespace */
+#define XFS_IOC_ATTR_SECURE 0x0008 /* use attrs in security namespace */
+#define XFS_IOC_ATTR_CREATE 0x0010 /* fail if attr already exists */
+#define XFS_IOC_ATTR_REPLACE 0x0020 /* fail if attr does not exist */
+
typedef struct xfs_attrlist_cursor {
__u32 opaque[4];
} xfs_attrlist_cursor_t;
+/*
+ * Define how lists of attribute names are returned to userspace from the
+ * XFS_IOC_ATTRLIST_BY_HANDLE ioctl. struct xfs_attrlist is the header at the
+ * beginning of the returned buffer, and a each entry in al_offset contains the
+ * relative offset of an xfs_attrlist_ent containing the actual entry.
+ *
+ * NOTE: struct xfs_attrlist must match struct attrlist defined in libattr, and
+ * struct xfs_attrlist_ent must match struct attrlist_ent defined in libattr.
+ */
+struct xfs_attrlist {
+ __s32 al_count; /* number of entries in attrlist */
+ __s32 al_more; /* T/F: more attrs (do call again) */
+ __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */
+};
+
+struct xfs_attrlist_ent { /* data from attr_list() */
+ __u32 a_valuelen; /* number bytes in value of attr */
+ char a_name[1]; /* attr name (NULL terminated) */
+};
+
typedef struct xfs_fsop_attrlist_handlereq {
struct xfs_fsop_handlereq hreq; /* handle interface structure */
struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */
@@ -589,7 +619,7 @@ typedef struct xfs_attr_multiop {
void __user *am_attrname;
void __user *am_attrvalue;
__u32 am_length;
- __u32 am_flags;
+ __u32 am_flags; /* XFS_IOC_ATTR_* */
} xfs_attr_multiop_t;
typedef struct xfs_fsop_attrmulti_handlereq {
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index bf161e930f1d..7fcf62b324b0 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -105,7 +105,7 @@ xfs_inobt_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_private.a.agno;
+ xfs_agnumber_t agno = cur->bc_ag.agno;
union xfs_btree_rec *rec;
int error;
uint64_t realfree;
@@ -177,7 +177,7 @@ xfs_inobt_insert(
xfs_btnum_t btnum)
{
struct xfs_btree_cur *cur;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ struct xfs_agi *agi = agbp->b_addr;
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agino_t thisino;
int i;
@@ -304,7 +304,7 @@ xfs_ialloc_inode_init(
* That means for v3 inode we log the entire buffer rather than just the
* inode cores.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
version = 3;
ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
@@ -339,7 +339,7 @@ xfs_ialloc_inode_init(
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
int ioffset = i << mp->m_sb.sb_inodelog;
- uint isize = xfs_dinode_size(version);
+ uint isize = XFS_DINODE_SIZE(&mp->m_sb);
free = xfs_make_iptr(mp, fbuf, i);
free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
@@ -525,7 +525,7 @@ xfs_inobt_insert_sprec(
bool merge) /* merge or replace */
{
struct xfs_btree_cur *cur;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ struct xfs_agi *agi = agbp->b_addr;
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
int error;
int i;
@@ -658,7 +658,7 @@ xfs_ialloc_ag_alloc(
* chunk of inodes. If the filesystem is striped, this will fill
* an entire stripe unit with inodes.
*/
- agi = XFS_BUF_TO_AGI(agbp);
+ agi = agbp->b_addr;
newino = be32_to_cpu(agi->agi_newino);
agno = be32_to_cpu(agi->agi_seqno);
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
@@ -1130,7 +1130,7 @@ xfs_dialloc_ag_inobt(
xfs_ino_t *inop)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ struct xfs_agi *agi = agbp->b_addr;
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
@@ -1583,7 +1583,7 @@ xfs_dialloc_ag(
xfs_ino_t *inop)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ struct xfs_agi *agi = agbp->b_addr;
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
@@ -1943,7 +1943,7 @@ xfs_difree_inobt(
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ struct xfs_agi *agi = agbp->b_addr;
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
struct xfs_perag *pag;
struct xfs_btree_cur *cur;
@@ -2079,7 +2079,7 @@ xfs_difree_finobt(
xfs_agino_t agino,
struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ struct xfs_agi *agi = agbp->b_addr;
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
struct xfs_btree_cur *cur;
struct xfs_inobt_rec_incore rec;
@@ -2489,9 +2489,8 @@ xfs_ialloc_log_agi(
sizeof(xfs_agi_t)
};
#ifdef DEBUG
- xfs_agi_t *agi; /* allocation group header */
+ struct xfs_agi *agi = bp->b_addr;
- agi = XFS_BUF_TO_AGI(bp);
ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
#endif
@@ -2523,14 +2522,13 @@ xfs_agi_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_mount;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(bp);
+ struct xfs_agi *agi = bp->b_addr;
int i;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
- if (!xfs_log_check_lsn(mp,
- be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
+ if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
return __this_address;
}
@@ -2593,6 +2591,7 @@ xfs_agi_write_verify(
{
struct xfs_mount *mp = bp->b_mount;
struct xfs_buf_log_item *bip = bp->b_log_item;
+ struct xfs_agi *agi = bp->b_addr;
xfs_failaddr_t fa;
fa = xfs_agi_verify(bp);
@@ -2605,7 +2604,7 @@ xfs_agi_write_verify(
return;
if (bip)
- XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
}
@@ -2661,7 +2660,7 @@ xfs_ialloc_read_agi(
if (error)
return error;
- agi = XFS_BUF_TO_AGI(*bpp);
+ agi = (*bpp)->b_addr;
pag = xfs_perag_get(mp, agno);
if (!pag->pagi_init) {
pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
@@ -2873,7 +2872,7 @@ xfs_ialloc_setup_geometry(
* cannot change the behavior.
*/
igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
int new_size = igeo->inode_cluster_size_raw;
new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index b82992f795aa..b2c122ad8f0e 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -12,6 +12,7 @@
#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
#include "xfs_ialloc.h"
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
@@ -20,7 +21,6 @@
#include "xfs_trans.h"
#include "xfs_rmap.h"
-
STATIC int
xfs_inobt_get_minrecs(
struct xfs_btree_cur *cur,
@@ -34,7 +34,7 @@ xfs_inobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agbp, cur->bc_private.a.agno,
+ cur->bc_ag.agbp, cur->bc_ag.agno,
cur->bc_btnum);
}
@@ -44,8 +44,8 @@ xfs_inobt_set_root(
union xfs_btree_ptr *nptr,
int inc) /* level change */
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agi *agi = agbp->b_addr;
agi->agi_root = nptr->s;
be32_add_cpu(&agi->agi_level, inc);
@@ -58,8 +58,8 @@ xfs_finobt_set_root(
union xfs_btree_ptr *nptr,
int inc) /* level change */
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agi *agi = agbp->b_addr;
agi->agi_free_root = nptr->s;
be32_add_cpu(&agi->agi_free_level, inc);
@@ -83,7 +83,7 @@ __xfs_inobt_alloc_block(
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.oinfo = XFS_RMAP_OINFO_INOBT;
- args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.agno, sbno);
args.minlen = 1;
args.maxlen = 1;
args.prod = 1;
@@ -212,9 +212,9 @@ xfs_inobt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
+ struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
+ ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
ptr->s = agi->agi_root;
}
@@ -224,9 +224,9 @@ xfs_finobt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
+ struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
+ ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
ptr->s = agi->agi_free_root;
}
@@ -400,32 +400,27 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
};
/*
- * Allocate a new inode btree cursor.
+ * Initialize a new inode btree cursor.
*/
-struct xfs_btree_cur * /* new inode btree cursor */
-xfs_inobt_init_cursor(
+static struct xfs_btree_cur *
+xfs_inobt_init_common(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* buffer for agi structure */
xfs_agnumber_t agno, /* allocation group number */
xfs_btnum_t btnum) /* ialloc or free ino btree */
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
struct xfs_btree_cur *cur;
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
-
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = btnum;
if (btnum == XFS_BTNUM_INO) {
- cur->bc_nlevels = be32_to_cpu(agi->agi_level);
- cur->bc_ops = &xfs_inobt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
+ cur->bc_ops = &xfs_inobt_ops;
} else {
- cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
- cur->bc_ops = &xfs_finobt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
+ cur->bc_ops = &xfs_finobt_ops;
}
cur->bc_blocklog = mp->m_sb.sb_blocklog;
@@ -433,12 +428,75 @@ xfs_inobt_init_cursor(
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
- cur->bc_private.a.agbp = agbp;
- cur->bc_private.a.agno = agno;
+ cur->bc_ag.agno = agno;
+ return cur;
+}
+
+/* Create an inode btree cursor. */
+struct xfs_btree_cur *
+xfs_inobt_init_cursor(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno,
+ xfs_btnum_t btnum)
+{
+ struct xfs_btree_cur *cur;
+ struct xfs_agi *agi = agbp->b_addr;
+ cur = xfs_inobt_init_common(mp, tp, agno, btnum);
+ if (btnum == XFS_BTNUM_INO)
+ cur->bc_nlevels = be32_to_cpu(agi->agi_level);
+ else
+ cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
+ cur->bc_ag.agbp = agbp;
return cur;
}
+/* Create an inode btree cursor with a fake root for staging. */
+struct xfs_btree_cur *
+xfs_inobt_stage_cursor(
+ struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake,
+ xfs_agnumber_t agno,
+ xfs_btnum_t btnum)
+{
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_inobt_init_common(mp, NULL, agno, btnum);
+ xfs_btree_stage_afakeroot(cur, afake);
+ return cur;
+}
+
+/*
+ * Install a new inobt btree root. Caller is responsible for invalidating
+ * and freeing the old btree blocks.
+ */
+void
+xfs_inobt_commit_staged_btree(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp)
+{
+ struct xfs_agi *agi = agbp->b_addr;
+ struct xbtree_afakeroot *afake = cur->bc_ag.afake;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ if (cur->bc_btnum == XFS_BTNUM_INO) {
+ agi->agi_root = cpu_to_be32(afake->af_root);
+ agi->agi_level = cpu_to_be32(afake->af_levels);
+ xfs_ialloc_log_agi(tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops);
+ } else {
+ agi->agi_free_root = cpu_to_be32(afake->af_root);
+ agi->agi_free_level = cpu_to_be32(afake->af_levels);
+ xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREE_ROOT |
+ XFS_AGI_FREE_LEVEL);
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops);
+ }
+}
+
/*
* Calculate number of records in an inobt btree block.
*/
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index 951305ecaae1..35bbd978c272 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -48,6 +48,9 @@ struct xfs_mount;
extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t,
xfs_btnum_t);
+struct xfs_btree_cur *xfs_inobt_stage_cursor(struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
+ xfs_btnum_t btnum);
extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
/* ir_holemask to inode allocation bitmap conversion */
@@ -68,4 +71,7 @@ int xfs_inobt_cur(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_btnum_t btnum,
struct xfs_btree_cur **curpp, struct xfs_buf **agi_bpp);
+void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur,
+ struct xfs_trans *tp, struct xfs_buf *agbp);
+
#endif /* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 8afacfe4be0a..39c5a6e24915 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -44,17 +44,6 @@ xfs_inobp_check(
}
#endif
-bool
-xfs_dinode_good_version(
- struct xfs_mount *mp,
- __u8 version)
-{
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return version == 3;
-
- return version == 1 || version == 2;
-}
-
/*
* If we are doing readahead on an inode buffer, we might be in log recovery
* reading an inode allocation buffer that hasn't yet been replayed, and hence
@@ -93,7 +82,7 @@ xfs_inode_buf_verify(
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
- xfs_dinode_good_version(mp, dip->di_version) &&
+ xfs_dinode_good_version(&mp->m_sb, dip->di_version) &&
xfs_verify_agino_or_null(mp, agno, unlinked_ino);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP))) {
@@ -205,16 +194,14 @@ xfs_inode_from_disk(
struct xfs_icdinode *to = &ip->i_d;
struct inode *inode = VFS_I(ip);
-
/*
* Convert v1 inodes immediately to v2 inode format as this is the
* minimum inode version format we support in the rest of the code.
+ * They will also be unconditionally written back to disk as v2 inodes.
*/
- to->di_version = from->di_version;
- if (to->di_version == 1) {
+ if (unlikely(from->di_version == 1)) {
set_nlink(inode, be16_to_cpu(from->di_onlink));
to->di_projid = 0;
- to->di_version = 2;
} else {
set_nlink(inode, be32_to_cpu(from->di_nlink));
to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
@@ -222,8 +209,8 @@ xfs_inode_from_disk(
}
to->di_format = from->di_format;
- to->di_uid = be32_to_cpu(from->di_uid);
- to->di_gid = be32_to_cpu(from->di_gid);
+ i_uid_write(inode, be32_to_cpu(from->di_uid));
+ i_gid_write(inode, be32_to_cpu(from->di_gid));
to->di_flushiter = be16_to_cpu(from->di_flushiter);
/*
@@ -252,7 +239,7 @@ xfs_inode_from_disk(
to->di_dmstate = be16_to_cpu(from->di_dmstate);
to->di_flags = be16_to_cpu(from->di_flags);
- if (to->di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
inode_set_iversion_queried(inode,
be64_to_cpu(from->di_changecount));
to->di_crtime.tv_sec = be32_to_cpu(from->di_crtime.t_sec);
@@ -274,10 +261,9 @@ xfs_inode_to_disk(
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
to->di_onlink = 0;
- to->di_version = from->di_version;
to->di_format = from->di_format;
- to->di_uid = cpu_to_be32(from->di_uid);
- to->di_gid = cpu_to_be32(from->di_gid);
+ to->di_uid = cpu_to_be32(i_uid_read(inode));
+ to->di_gid = cpu_to_be32(i_gid_read(inode));
to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
@@ -303,7 +289,8 @@ xfs_inode_to_disk(
to->di_dmstate = cpu_to_be16(from->di_dmstate);
to->di_flags = cpu_to_be16(from->di_flags);
- if (from->di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ to->di_version = 3;
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.tv_sec);
to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.tv_nsec);
@@ -315,6 +302,7 @@ xfs_inode_to_disk(
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to->di_flushiter = 0;
} else {
+ to->di_version = 2;
to->di_flushiter = cpu_to_be16(from->di_flushiter);
}
}
@@ -428,7 +416,7 @@ xfs_dinode_verify_forkoff(
case XFS_DINODE_FMT_LOCAL: /* fall through ... */
case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
case XFS_DINODE_FMT_BTREE:
- if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
+ if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
return __this_address;
break;
default:
@@ -454,7 +442,7 @@ xfs_dinode_verify(
/* Verify v3 integrity information first */
if (dip->di_version >= 3) {
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_sb_version_has_v3inode(&mp->m_sb))
return __this_address;
if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF))
@@ -629,10 +617,9 @@ xfs_iread(
/* shortcut IO on inode allocation if possible */
if ((iget_flags & XFS_IGET_CREATE) &&
- xfs_sb_version_hascrc(&mp->m_sb) &&
+ xfs_sb_version_has_v3inode(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
VFS_I(ip)->i_generation = prandom_u32();
- ip->i_d.di_version = 3;
return 0;
}
@@ -674,7 +661,6 @@ xfs_iread(
* Partial initialisation of the in-core inode. Just the bits
* that xfs_ialloc won't overwrite or relies on being correct.
*/
- ip->i_d.di_version = dip->di_version;
VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
@@ -688,7 +674,6 @@ xfs_iread(
VFS_I(ip)->i_mode = 0;
}
- ASSERT(ip->i_d.di_version >= 2);
ip->i_delayed_blks = 0;
/*
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index fd94b1078722..9b373dcf9e34 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -16,11 +16,8 @@ struct xfs_dinode;
* format specific structures at the appropriate time.
*/
struct xfs_icdinode {
- int8_t di_version; /* inode version */
int8_t di_format; /* format of di_c data */
uint16_t di_flushiter; /* incremented on flush */
- uint32_t di_uid; /* owner's user id */
- uint32_t di_gid; /* owner's group id */
uint32_t di_projid; /* owner's project id */
xfs_fsize_t di_size; /* number of bytes in file */
xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */
@@ -61,8 +58,6 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
void xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
struct xfs_dinode *to);
-bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version);
-
#if defined(DEBUG)
void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#else
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index ad2b9c313fd2..518c6f0ec3a6 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -183,7 +183,7 @@ xfs_iformat_local(
*/
if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
xfs_warn(ip->i_mount,
- "corrupt inode %Lu (bad size %d for local fork, size = %d).",
+ "corrupt inode %Lu (bad size %d for local fork, size = %zd).",
(unsigned long long) ip->i_ino, size,
XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
xfs_inode_verifier_error(ip, -EFSCORRUPTED,
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 500333d0101e..668ee942be22 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -46,14 +46,9 @@ struct xfs_ifork {
(ip)->i_afp : \
(ip)->i_cowfp))
#define XFS_IFORK_DSIZE(ip) \
- (XFS_IFORK_Q(ip) ? \
- XFS_IFORK_BOFF(ip) : \
- XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version))
+ (XFS_IFORK_Q(ip) ? XFS_IFORK_BOFF(ip) : XFS_LITINO((ip)->i_mount))
#define XFS_IFORK_ASIZE(ip) \
- (XFS_IFORK_Q(ip) ? \
- XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version) - \
- XFS_IFORK_BOFF(ip) : \
- 0)
+ (XFS_IFORK_Q(ip) ? XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : 0)
#define XFS_IFORK_SIZE(ip,w) \
((w) == XFS_DATA_FORK ? \
XFS_IFORK_DSIZE(ip) : \
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 9bac0d2e56dc..e3400c9c71cd 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -424,12 +424,10 @@ struct xfs_log_dinode {
/* structure must be padded to 64 bit alignment */
};
-static inline uint xfs_log_dinode_size(int version)
-{
- if (version == 3)
- return sizeof(struct xfs_log_dinode);
- return offsetof(struct xfs_log_dinode, di_next_unlinked);
-}
+#define xfs_log_dinode_size(mp) \
+ (xfs_sb_version_has_v3inode(&(mp)->m_sb) ? \
+ sizeof(struct xfs_log_dinode) : \
+ offsetof(struct xfs_log_dinode, di_next_unlinked))
/*
* Buffer Log Format definitions
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 6e1665f2cb67..2076627243b0 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -46,7 +46,7 @@ xfs_refcount_lookup_le(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -63,7 +63,7 @@ xfs_refcount_lookup_ge(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
XFS_LOOKUP_GE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -80,7 +80,7 @@ xfs_refcount_lookup_eq(
xfs_agblock_t bno,
int *stat)
{
- trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
+ trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
XFS_LOOKUP_LE);
cur->bc_rec.rc.rc_startblock = bno;
cur->bc_rec.rc.rc_blockcount = 0;
@@ -108,7 +108,7 @@ xfs_refcount_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_private.a.agno;
+ xfs_agnumber_t agno = cur->bc_ag.agno;
union xfs_btree_rec *rec;
int error;
xfs_agblock_t realstart;
@@ -119,7 +119,7 @@ xfs_refcount_get_rec(
xfs_refcount_btrec_to_irec(rec, irec);
- agno = cur->bc_private.a.agno;
+ agno = cur->bc_ag.agno;
if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
goto out_bad_rec;
@@ -144,7 +144,7 @@ xfs_refcount_get_rec(
if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
goto out_bad_rec;
- trace_xfs_refcount_get(cur->bc_mp, cur->bc_private.a.agno, irec);
+ trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.agno, irec);
return 0;
out_bad_rec:
@@ -169,14 +169,14 @@ xfs_refcount_update(
union xfs_btree_rec rec;
int error;
- trace_xfs_refcount_update(cur->bc_mp, cur->bc_private.a.agno, irec);
+ trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.agno, irec);
rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
error = xfs_btree_update(cur, &rec);
if (error)
trace_xfs_refcount_update_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -193,7 +193,7 @@ xfs_refcount_insert(
{
int error;
- trace_xfs_refcount_insert(cur->bc_mp, cur->bc_private.a.agno, irec);
+ trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.agno, irec);
cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
@@ -208,7 +208,7 @@ xfs_refcount_insert(
out_error:
if (error)
trace_xfs_refcount_insert_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -234,7 +234,7 @@ xfs_refcount_delete(
error = -EFSCORRUPTED;
goto out_error;
}
- trace_xfs_refcount_delete(cur->bc_mp, cur->bc_private.a.agno, &irec);
+ trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.agno, &irec);
error = xfs_btree_delete(cur, i);
if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
error = -EFSCORRUPTED;
@@ -246,7 +246,7 @@ xfs_refcount_delete(
out_error:
if (error)
trace_xfs_refcount_delete_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -366,7 +366,7 @@ xfs_refcount_split_extent(
return 0;
*shape_changed = true;
- trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.agno,
&rcext, agbno);
/* Establish the right extent. */
@@ -391,7 +391,7 @@ xfs_refcount_split_extent(
out_error:
trace_xfs_refcount_split_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -411,7 +411,7 @@ xfs_refcount_merge_center_extents(
int found_rec;
trace_xfs_refcount_merge_center_extents(cur->bc_mp,
- cur->bc_private.a.agno, left, center, right);
+ cur->bc_ag.agno, left, center, right);
/*
* Make sure the center and right extents are not in the btree.
@@ -468,7 +468,7 @@ xfs_refcount_merge_center_extents(
out_error:
trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -487,7 +487,7 @@ xfs_refcount_merge_left_extent(
int found_rec;
trace_xfs_refcount_merge_left_extent(cur->bc_mp,
- cur->bc_private.a.agno, left, cleft);
+ cur->bc_ag.agno, left, cleft);
/* If the extent at agbno (cleft) wasn't synthesized, remove it. */
if (cleft->rc_refcount > 1) {
@@ -530,7 +530,7 @@ xfs_refcount_merge_left_extent(
out_error:
trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -548,7 +548,7 @@ xfs_refcount_merge_right_extent(
int found_rec;
trace_xfs_refcount_merge_right_extent(cur->bc_mp,
- cur->bc_private.a.agno, cright, right);
+ cur->bc_ag.agno, cright, right);
/*
* If the extent ending at agbno+aglen (cright) wasn't synthesized,
@@ -594,7 +594,7 @@ xfs_refcount_merge_right_extent(
out_error:
trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -679,13 +679,13 @@ xfs_refcount_find_left_extents(
cleft->rc_blockcount = aglen;
cleft->rc_refcount = 1;
}
- trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.agno,
left, cleft, agbno);
return error;
out_error:
trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -768,13 +768,13 @@ xfs_refcount_find_right_extents(
cright->rc_blockcount = aglen;
cright->rc_refcount = 1;
}
- trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.agno,
cright, right, agbno + aglen);
return error;
out_error:
trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -883,7 +883,7 @@ xfs_refcount_still_have_space(
{
unsigned long overhead;
- overhead = cur->bc_private.a.priv.refc.shape_changes *
+ overhead = cur->bc_ag.refc.shape_changes *
xfs_allocfree_log_count(cur->bc_mp, 1);
overhead *= cur->bc_mp->m_sb.sb_blocksize;
@@ -891,17 +891,17 @@ xfs_refcount_still_have_space(
* Only allow 2 refcount extent updates per transaction if the
* refcount continue update "error" has been injected.
*/
- if (cur->bc_private.a.priv.refc.nr_ops > 2 &&
+ if (cur->bc_ag.refc.nr_ops > 2 &&
XFS_TEST_ERROR(false, cur->bc_mp,
XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
return false;
- if (cur->bc_private.a.priv.refc.nr_ops == 0)
+ if (cur->bc_ag.refc.nr_ops == 0)
return true;
else if (overhead > cur->bc_tp->t_log_res)
return false;
return cur->bc_tp->t_log_res - overhead >
- cur->bc_private.a.priv.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
+ cur->bc_ag.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
}
/*
@@ -952,7 +952,7 @@ xfs_refcount_adjust_extents(
ext.rc_startblock - *agbno);
tmp.rc_refcount = 1 + adj;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_private.a.agno, &tmp);
+ cur->bc_ag.agno, &tmp);
/*
* Either cover the hole (increment) or
@@ -968,10 +968,10 @@ xfs_refcount_adjust_extents(
error = -EFSCORRUPTED;
goto out_error;
}
- cur->bc_private.a.priv.refc.nr_ops++;
+ cur->bc_ag.refc.nr_ops++;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
- cur->bc_private.a.agno,
+ cur->bc_ag.agno,
tmp.rc_startblock);
xfs_bmap_add_free(cur->bc_tp, fsbno,
tmp.rc_blockcount, oinfo);
@@ -998,12 +998,12 @@ xfs_refcount_adjust_extents(
goto skip;
ext.rc_refcount += adj;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_private.a.agno, &ext);
+ cur->bc_ag.agno, &ext);
if (ext.rc_refcount > 1) {
error = xfs_refcount_update(cur, &ext);
if (error)
goto out_error;
- cur->bc_private.a.priv.refc.nr_ops++;
+ cur->bc_ag.refc.nr_ops++;
} else if (ext.rc_refcount == 1) {
error = xfs_refcount_delete(cur, &found_rec);
if (error)
@@ -1012,11 +1012,11 @@ xfs_refcount_adjust_extents(
error = -EFSCORRUPTED;
goto out_error;
}
- cur->bc_private.a.priv.refc.nr_ops++;
+ cur->bc_ag.refc.nr_ops++;
goto advloop;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
- cur->bc_private.a.agno,
+ cur->bc_ag.agno,
ext.rc_startblock);
xfs_bmap_add_free(cur->bc_tp, fsbno, ext.rc_blockcount,
oinfo);
@@ -1035,7 +1035,7 @@ advloop:
return error;
out_error:
trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -1057,10 +1057,10 @@ xfs_refcount_adjust(
*new_agbno = agbno;
*new_aglen = aglen;
if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
- trace_xfs_refcount_increase(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.agno,
agbno, aglen);
else
- trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.agno,
agbno, aglen);
/*
@@ -1088,7 +1088,7 @@ xfs_refcount_adjust(
if (shape_changed)
shape_changes++;
if (shape_changes)
- cur->bc_private.a.priv.refc.shape_changes++;
+ cur->bc_ag.refc.shape_changes++;
/* Now that we've taken care of the ends, adjust the middle extents */
error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
@@ -1099,7 +1099,7 @@ xfs_refcount_adjust(
return 0;
out_error:
- trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.agno,
error, _RET_IP_);
return error;
}
@@ -1115,7 +1115,7 @@ xfs_refcount_finish_one_cleanup(
if (rcur == NULL)
return;
- agbp = rcur->bc_private.a.agbp;
+ agbp = rcur->bc_ag.agbp;
xfs_btree_del_cursor(rcur, error);
if (error)
xfs_trans_brelse(tp, agbp);
@@ -1165,9 +1165,9 @@ xfs_refcount_finish_one(
* the startblock, get one now.
*/
rcur = *pcur;
- if (rcur != NULL && rcur->bc_private.a.agno != agno) {
- nr_ops = rcur->bc_private.a.priv.refc.nr_ops;
- shape_changes = rcur->bc_private.a.priv.refc.shape_changes;
+ if (rcur != NULL && rcur->bc_ag.agno != agno) {
+ nr_ops = rcur->bc_ag.refc.nr_ops;
+ shape_changes = rcur->bc_ag.refc.shape_changes;
xfs_refcount_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
@@ -1183,8 +1183,8 @@ xfs_refcount_finish_one(
error = -ENOMEM;
goto out_cur;
}
- rcur->bc_private.a.priv.refc.nr_ops = nr_ops;
- rcur->bc_private.a.priv.refc.shape_changes = shape_changes;
+ rcur->bc_ag.refc.nr_ops = nr_ops;
+ rcur->bc_ag.refc.shape_changes = shape_changes;
}
*pcur = rcur;
@@ -1303,7 +1303,7 @@ xfs_refcount_find_shared(
int have;
int error;
- trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.agno,
agbno, aglen);
/* By default, skip the whole range */
@@ -1383,12 +1383,12 @@ xfs_refcount_find_shared(
done:
trace_xfs_refcount_find_shared_result(cur->bc_mp,
- cur->bc_private.a.agno, *fbno, *flen);
+ cur->bc_ag.agno, *fbno, *flen);
out_error:
if (error)
trace_xfs_refcount_find_shared_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -1485,7 +1485,7 @@ xfs_refcount_adjust_cow_extents(
tmp.rc_blockcount = aglen;
tmp.rc_refcount = 1;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_private.a.agno, &tmp);
+ cur->bc_ag.agno, &tmp);
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
@@ -1513,7 +1513,7 @@ xfs_refcount_adjust_cow_extents(
ext.rc_refcount = 0;
trace_xfs_refcount_modify_extent(cur->bc_mp,
- cur->bc_private.a.agno, &ext);
+ cur->bc_ag.agno, &ext);
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
@@ -1529,7 +1529,7 @@ xfs_refcount_adjust_cow_extents(
return error;
out_error:
trace_xfs_refcount_modify_extent_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -1575,7 +1575,7 @@ xfs_refcount_adjust_cow(
return 0;
out_error:
- trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.agno,
error, _RET_IP_);
return error;
}
@@ -1589,7 +1589,7 @@ __xfs_refcount_cow_alloc(
xfs_agblock_t agbno,
xfs_extlen_t aglen)
{
- trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
+ trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.agno,
agbno, aglen);
/* Add refcount btree reservation */
@@ -1606,7 +1606,7 @@ __xfs_refcount_cow_free(
xfs_agblock_t agbno,
xfs_extlen_t aglen)
{
- trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
+ trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.agno,
agbno, aglen);
/* Remove refcount btree reservation */
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 38529dbacd55..7fd6044a4f78 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -12,6 +12,7 @@
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
#include "xfs_refcount_btree.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
@@ -25,7 +26,7 @@ xfs_refcountbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agbp, cur->bc_private.a.agno);
+ cur->bc_ag.agbp, cur->bc_ag.agno);
}
STATIC void
@@ -34,8 +35,8 @@ xfs_refcountbt_set_root(
union xfs_btree_ptr *ptr,
int inc)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
@@ -57,8 +58,8 @@ xfs_refcountbt_alloc_block(
union xfs_btree_ptr *new,
int *stat)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
struct xfs_alloc_arg args; /* block allocation args */
int error; /* error return value */
@@ -66,7 +67,7 @@ xfs_refcountbt_alloc_block(
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
- args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
+ args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
xfs_refc_block(args.mp));
args.oinfo = XFS_RMAP_OINFO_REFC;
args.minlen = args.maxlen = args.prod = 1;
@@ -75,13 +76,13 @@ xfs_refcountbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto out_error;
- trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
args.agbno, 1);
if (args.fsbno == NULLFSBLOCK) {
*stat = 0;
return 0;
}
- ASSERT(args.agno == cur->bc_private.a.agno);
+ ASSERT(args.agno == cur->bc_ag.agno);
ASSERT(args.len == 1);
new->s = cpu_to_be32(args.agbno);
@@ -101,12 +102,12 @@ xfs_refcountbt_free_block(
struct xfs_buf *bp)
{
struct xfs_mount *mp = cur->bc_mp;
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
int error;
- trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.agno,
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
@@ -169,9 +170,9 @@ xfs_refcountbt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+ struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_refcount_root;
}
@@ -311,41 +312,90 @@ static const struct xfs_btree_ops xfs_refcountbt_ops = {
};
/*
- * Allocate a new refcount btree cursor.
+ * Initialize a new refcount btree cursor.
*/
-struct xfs_btree_cur *
-xfs_refcountbt_init_cursor(
+static struct xfs_btree_cur *
+xfs_refcountbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- struct xfs_buf *agbp,
xfs_agnumber_t agno)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
ASSERT(agno != NULLAGNUMBER);
ASSERT(agno < mp->m_sb.sb_agcount);
- cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
+ cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = XFS_BTNUM_REFC;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
- cur->bc_ops = &xfs_refcountbt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
- cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
-
- cur->bc_private.a.agbp = agbp;
- cur->bc_private.a.agno = agno;
+ cur->bc_ag.agno = agno;
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
- cur->bc_private.a.priv.refc.nr_ops = 0;
- cur->bc_private.a.priv.refc.shape_changes = 0;
+ cur->bc_ag.refc.nr_ops = 0;
+ cur->bc_ag.refc.shape_changes = 0;
+ cur->bc_ops = &xfs_refcountbt_ops;
+ return cur;
+}
+/* Create a btree cursor. */
+struct xfs_btree_cur *
+xfs_refcountbt_init_cursor(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_refcountbt_init_common(mp, tp, agno);
+ cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
+ cur->bc_ag.agbp = agbp;
return cur;
}
+/* Create a btree cursor with a fake root for staging. */
+struct xfs_btree_cur *
+xfs_refcountbt_stage_cursor(
+ struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake,
+ xfs_agnumber_t agno)
+{
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_refcountbt_init_common(mp, NULL, agno);
+ xfs_btree_stage_afakeroot(cur, afake);
+ return cur;
+}
+
+/*
+ * Swap in the new btree root. Once we pass this point the newly rebuilt btree
+ * is in place and we have to kill off all the old btree blocks.
+ */
+void
+xfs_refcountbt_commit_staged_btree(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xbtree_afakeroot *afake = cur->bc_ag.afake;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ agf->agf_refcount_root = cpu_to_be32(afake->af_root);
+ agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
+ agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
+ XFS_AGF_REFCOUNT_ROOT |
+ XFS_AGF_REFCOUNT_LEVEL);
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
+}
+
/*
* Calculate the number of records in a refcount btree block.
*/
@@ -420,7 +470,7 @@ xfs_refcountbt_calc_reserves(
if (error)
return error;
- agf = XFS_BUF_TO_AGF(agbp);
+ agf = agbp->b_addr;
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_refcount_blocks);
xfs_trans_brelse(tp, agbp);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index ba416f71c824..69dc515db671 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -13,6 +13,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xbtree_afakeroot;
/*
* Btree block header size
@@ -46,6 +47,8 @@ struct xfs_mount;
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *agbp,
xfs_agnumber_t agno);
+struct xfs_btree_cur *xfs_refcountbt_stage_cursor(struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake, xfs_agnumber_t agno);
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
@@ -58,4 +61,7 @@ extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
struct xfs_trans *tp, xfs_agnumber_t agno, xfs_extlen_t *ask,
xfs_extlen_t *used);
+void xfs_refcountbt_commit_staged_btree(struct xfs_btree_cur *cur,
+ struct xfs_trans *tp, struct xfs_buf *agbp);
+
#endif /* __XFS_REFCOUNT_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index ff9412f113c4..27c39268c31f 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -79,7 +79,7 @@ xfs_rmap_update(
union xfs_btree_rec rec;
int error;
- trace_xfs_rmap_update(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.agno,
irec->rm_startblock, irec->rm_blockcount,
irec->rm_owner, irec->rm_offset, irec->rm_flags);
@@ -91,7 +91,7 @@ xfs_rmap_update(
error = xfs_btree_update(cur, &rec);
if (error)
trace_xfs_rmap_update_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -107,7 +107,7 @@ xfs_rmap_insert(
int i;
int error;
- trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_private.a.agno, agbno,
+ trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.agno, agbno,
len, owner, offset, flags);
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
@@ -133,7 +133,7 @@ xfs_rmap_insert(
done:
if (error)
trace_xfs_rmap_insert_error(rcur->bc_mp,
- rcur->bc_private.a.agno, error, _RET_IP_);
+ rcur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -149,7 +149,7 @@ xfs_rmap_delete(
int i;
int error;
- trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_private.a.agno, agbno,
+ trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.agno, agbno,
len, owner, offset, flags);
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
@@ -170,7 +170,7 @@ xfs_rmap_delete(
done:
if (error)
trace_xfs_rmap_delete_error(rcur->bc_mp,
- rcur->bc_private.a.agno, error, _RET_IP_);
+ rcur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -197,7 +197,7 @@ xfs_rmap_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- xfs_agnumber_t agno = cur->bc_private.a.agno;
+ xfs_agnumber_t agno = cur->bc_ag.agno;
union xfs_btree_rec *rec;
int error;
@@ -260,7 +260,7 @@ xfs_rmap_find_left_neighbor_helper(
struct xfs_find_left_neighbor_info *info = priv;
trace_xfs_rmap_find_left_neighbor_candidate(cur->bc_mp,
- cur->bc_private.a.agno, rec->rm_startblock,
+ cur->bc_ag.agno, rec->rm_startblock,
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
rec->rm_flags);
@@ -312,7 +312,7 @@ xfs_rmap_find_left_neighbor(
info.stat = stat;
trace_xfs_rmap_find_left_neighbor_query(cur->bc_mp,
- cur->bc_private.a.agno, bno, 0, owner, offset, flags);
+ cur->bc_ag.agno, bno, 0, owner, offset, flags);
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_find_left_neighbor_helper, &info);
@@ -320,7 +320,7 @@ xfs_rmap_find_left_neighbor(
error = 0;
if (*stat)
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
- cur->bc_private.a.agno, irec->rm_startblock,
+ cur->bc_ag.agno, irec->rm_startblock,
irec->rm_blockcount, irec->rm_owner,
irec->rm_offset, irec->rm_flags);
return error;
@@ -336,7 +336,7 @@ xfs_rmap_lookup_le_range_helper(
struct xfs_find_left_neighbor_info *info = priv;
trace_xfs_rmap_lookup_le_range_candidate(cur->bc_mp,
- cur->bc_private.a.agno, rec->rm_startblock,
+ cur->bc_ag.agno, rec->rm_startblock,
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
rec->rm_flags);
@@ -385,14 +385,14 @@ xfs_rmap_lookup_le_range(
info.stat = stat;
trace_xfs_rmap_lookup_le_range(cur->bc_mp,
- cur->bc_private.a.agno, bno, 0, owner, offset, flags);
+ cur->bc_ag.agno, bno, 0, owner, offset, flags);
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_lookup_le_range_helper, &info);
if (error == -ECANCELED)
error = 0;
if (*stat)
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_private.a.agno, irec->rm_startblock,
+ cur->bc_ag.agno, irec->rm_startblock,
irec->rm_blockcount, irec->rm_owner,
irec->rm_offset, irec->rm_flags);
return error;
@@ -498,7 +498,7 @@ xfs_rmap_unmap(
(flags & XFS_RMAP_BMBT_BLOCK);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_unmap(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
/*
@@ -522,7 +522,7 @@ xfs_rmap_unmap(
goto out_error;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_private.a.agno, ltrec.rm_startblock,
+ cur->bc_ag.agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
ltoff = ltrec.rm_offset;
@@ -588,7 +588,7 @@ xfs_rmap_unmap(
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
/* exact match, simply remove the record from rmap tree */
- trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
ltrec.rm_startblock, ltrec.rm_blockcount,
ltrec.rm_owner, ltrec.rm_offset,
ltrec.rm_flags);
@@ -666,7 +666,7 @@ xfs_rmap_unmap(
else
cur->bc_rec.r.rm_offset = offset + len;
cur->bc_rec.r.rm_flags = flags;
- trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.agno,
cur->bc_rec.r.rm_startblock,
cur->bc_rec.r.rm_blockcount,
cur->bc_rec.r.rm_owner,
@@ -678,11 +678,11 @@ xfs_rmap_unmap(
}
out_done:
- trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_unmap_done(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_unmap_error(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_unmap_error(mp, cur->bc_ag.agno,
error, _RET_IP_);
return error;
}
@@ -773,7 +773,7 @@ xfs_rmap_map(
(flags & XFS_RMAP_BMBT_BLOCK);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_map(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
@@ -795,7 +795,7 @@ xfs_rmap_map(
goto out_error;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_private.a.agno, ltrec.rm_startblock,
+ cur->bc_ag.agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
@@ -831,7 +831,7 @@ xfs_rmap_map(
goto out_error;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_private.a.agno, gtrec.rm_startblock,
+ cur->bc_ag.agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
gtrec.rm_offset, gtrec.rm_flags);
if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
@@ -870,7 +870,7 @@ xfs_rmap_map(
* result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
*/
ltrec.rm_blockcount += gtrec.rm_blockcount;
- trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
gtrec.rm_startblock,
gtrec.rm_blockcount,
gtrec.rm_owner,
@@ -921,7 +921,7 @@ xfs_rmap_map(
cur->bc_rec.r.rm_owner = owner;
cur->bc_rec.r.rm_offset = offset;
cur->bc_rec.r.rm_flags = flags;
- trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno, len,
owner, offset, flags);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -932,11 +932,11 @@ xfs_rmap_map(
}
}
- trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_map_done(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
- trace_xfs_rmap_map_error(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_map_error(mp, cur->bc_ag.agno,
error, _RET_IP_);
return error;
}
@@ -1010,7 +1010,7 @@ xfs_rmap_convert(
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
new_endoff = offset + len;
- trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_convert(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
/*
@@ -1034,7 +1034,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_private.a.agno, PREV.rm_startblock,
+ cur->bc_ag.agno, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
PREV.rm_offset, PREV.rm_flags);
@@ -1076,7 +1076,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
- cur->bc_private.a.agno, LEFT.rm_startblock,
+ cur->bc_ag.agno, LEFT.rm_startblock,
LEFT.rm_blockcount, LEFT.rm_owner,
LEFT.rm_offset, LEFT.rm_flags);
if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
@@ -1114,7 +1114,7 @@ xfs_rmap_convert(
goto done;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_private.a.agno, RIGHT.rm_startblock,
+ cur->bc_ag.agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags);
if (bno + len == RIGHT.rm_startblock &&
@@ -1132,7 +1132,7 @@ xfs_rmap_convert(
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
- trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state,
+ trace_xfs_rmap_convert_state(mp, cur->bc_ag.agno, state,
_RET_IP_);
/* reset the cursor back to PREV */
@@ -1162,7 +1162,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
RIGHT.rm_flags);
@@ -1180,7 +1180,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
PREV.rm_startblock, PREV.rm_blockcount,
PREV.rm_owner, PREV.rm_offset,
PREV.rm_flags);
@@ -1210,7 +1210,7 @@ xfs_rmap_convert(
* Setting all of a previous oldext extent to newext.
* The left neighbor is contiguous, the right is not.
*/
- trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
PREV.rm_startblock, PREV.rm_blockcount,
PREV.rm_owner, PREV.rm_offset,
PREV.rm_flags);
@@ -1247,7 +1247,7 @@ xfs_rmap_convert(
error = -EFSCORRUPTED;
goto done;
}
- trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
RIGHT.rm_flags);
@@ -1326,7 +1326,7 @@ xfs_rmap_convert(
NEW.rm_blockcount = len;
NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno,
len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1383,7 +1383,7 @@ xfs_rmap_convert(
NEW.rm_blockcount = len;
NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno,
len, owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1414,7 +1414,7 @@ xfs_rmap_convert(
NEW = PREV;
NEW.rm_blockcount = offset - PREV.rm_offset;
cur->bc_rec.r = NEW;
- trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.agno,
NEW.rm_startblock, NEW.rm_blockcount,
NEW.rm_owner, NEW.rm_offset,
NEW.rm_flags);
@@ -1441,7 +1441,7 @@ xfs_rmap_convert(
/* new middle extent - newext */
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
cur->bc_rec.r.rm_flags |= newext;
- trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno, len,
owner, offset, newext);
error = xfs_btree_insert(cur, &i);
if (error)
@@ -1465,12 +1465,12 @@ xfs_rmap_convert(
ASSERT(0);
}
- trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_convert_done(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
done:
if (error)
trace_xfs_rmap_convert_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -1506,7 +1506,7 @@ xfs_rmap_convert_shared(
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
new_endoff = offset + len;
- trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_convert(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
/*
@@ -1573,7 +1573,7 @@ xfs_rmap_convert_shared(
goto done;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_private.a.agno, RIGHT.rm_startblock,
+ cur->bc_ag.agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags);
if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
@@ -1589,7 +1589,7 @@ xfs_rmap_convert_shared(
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
- trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state,
+ trace_xfs_rmap_convert_state(mp, cur->bc_ag.agno, state,
_RET_IP_);
/*
* Switch out based on the FILLING and CONTIG state bits.
@@ -1880,12 +1880,12 @@ xfs_rmap_convert_shared(
ASSERT(0);
}
- trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_convert_done(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
done:
if (error)
trace_xfs_rmap_convert_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -1923,7 +1923,7 @@ xfs_rmap_unmap_shared(
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_unmap(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
/*
@@ -2072,12 +2072,12 @@ xfs_rmap_unmap_shared(
goto out_error;
}
- trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_unmap_done(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
trace_xfs_rmap_unmap_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -2112,7 +2112,7 @@ xfs_rmap_map_shared(
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
flags |= XFS_RMAP_UNWRITTEN;
- trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_map(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
/* Is there a left record that abuts our range? */
@@ -2138,7 +2138,7 @@ xfs_rmap_map_shared(
goto out_error;
}
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
- cur->bc_private.a.agno, gtrec.rm_startblock,
+ cur->bc_ag.agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
gtrec.rm_offset, gtrec.rm_flags);
@@ -2231,12 +2231,12 @@ xfs_rmap_map_shared(
goto out_error;
}
- trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
+ trace_xfs_rmap_map_done(mp, cur->bc_ag.agno, bno, len,
unwritten, oinfo);
out_error:
if (error)
trace_xfs_rmap_map_error(cur->bc_mp,
- cur->bc_private.a.agno, error, _RET_IP_);
+ cur->bc_ag.agno, error, _RET_IP_);
return error;
}
@@ -2336,7 +2336,7 @@ xfs_rmap_finish_one_cleanup(
if (rcur == NULL)
return;
- agbp = rcur->bc_private.a.agbp;
+ agbp = rcur->bc_ag.agbp;
xfs_btree_del_cursor(rcur, error);
if (error)
xfs_trans_brelse(tp, agbp);
@@ -2386,7 +2386,7 @@ xfs_rmap_finish_one(
* the startblock, get one now.
*/
rcur = *pcur;
- if (rcur != NULL && rcur->bc_private.a.agno != agno) {
+ if (rcur != NULL && rcur->bc_ag.agno != agno) {
xfs_rmap_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
@@ -2694,7 +2694,6 @@ struct xfs_rmap_key_state {
uint64_t owner;
uint64_t offset;
unsigned int flags;
- bool has_rmap;
};
/* For each rmap given, figure out if it doesn't match the key we want. */
@@ -2709,7 +2708,6 @@ xfs_rmap_has_other_keys_helper(
if (rks->owner == rec->rm_owner && rks->offset == rec->rm_offset &&
((rks->flags & rec->rm_flags) & XFS_RMAP_KEY_FLAGS) == rks->flags)
return 0;
- rks->has_rmap = true;
return -ECANCELED;
}
@@ -2731,7 +2729,7 @@ xfs_rmap_has_other_keys(
int error;
xfs_owner_info_unpack(oinfo, &rks.owner, &rks.offset, &rks.flags);
- rks.has_rmap = false;
+ *has_rmap = false;
low.rm_startblock = bno;
memset(&high, 0xFF, sizeof(high));
@@ -2739,11 +2737,12 @@ xfs_rmap_has_other_keys(
error = xfs_rmap_query_range(cur, &low, &high,
xfs_rmap_has_other_keys_helper, &rks);
- if (error < 0)
- return error;
+ if (error == -ECANCELED) {
+ *has_rmap = true;
+ return 0;
+ }
- *has_rmap = rks.has_rmap;
- return 0;
+ return error;
}
const struct xfs_owner_info XFS_RMAP_OINFO_SKIP_UPDATE = {
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index fc78efa52c94..b7c05314d07c 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -14,6 +14,7 @@
#include "xfs_trans.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
#include "xfs_rmap.h"
#include "xfs_rmap_btree.h"
#include "xfs_trace.h"
@@ -51,7 +52,7 @@ xfs_rmapbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agbp, cur->bc_private.a.agno);
+ cur->bc_ag.agbp, cur->bc_ag.agno);
}
STATIC void
@@ -60,8 +61,8 @@ xfs_rmapbt_set_root(
union xfs_btree_ptr *ptr,
int inc)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
int btnum = cur->bc_btnum;
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
@@ -83,25 +84,25 @@ xfs_rmapbt_alloc_block(
union xfs_btree_ptr *new,
int *stat)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
int error;
xfs_agblock_t bno;
/* Allocate the new block from the freelist. If we can't, give up. */
- error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+ error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
&bno, 1);
if (error)
return error;
- trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
bno, 1);
if (bno == NULLAGBLOCK) {
*stat = 0;
return 0;
}
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1,
false);
xfs_trans_agbtree_delta(cur->bc_tp, 1);
@@ -109,7 +110,7 @@ xfs_rmapbt_alloc_block(
be32_add_cpu(&agf->agf_rmap_blocks, 1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
- xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_private.a.agno);
+ xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_ag.agno);
*stat = 1;
return 0;
@@ -120,13 +121,13 @@ xfs_rmapbt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
xfs_agblock_t bno;
int error;
bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
- trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_ag.agno,
bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
@@ -138,7 +139,7 @@ xfs_rmapbt_free_block(
XFS_EXTENT_BUSY_SKIP_DISCARD);
xfs_trans_agbtree_delta(cur->bc_tp, -1);
- xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_private.a.agno);
+ xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_ag.agno);
return 0;
}
@@ -215,9 +216,9 @@ xfs_rmapbt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+ struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_roots[cur->bc_btnum];
}
@@ -448,17 +449,12 @@ static const struct xfs_btree_ops xfs_rmapbt_ops = {
.recs_inorder = xfs_rmapbt_recs_inorder,
};
-/*
- * Allocate a new allocation btree cursor.
- */
-struct xfs_btree_cur *
-xfs_rmapbt_init_cursor(
+static struct xfs_btree_cur *
+xfs_rmapbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- struct xfs_buf *agbp,
xfs_agnumber_t agno)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
@@ -468,17 +464,68 @@ xfs_rmapbt_init_cursor(
cur->bc_btnum = XFS_BTNUM_RMAP;
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
+ cur->bc_ag.agno = agno;
cur->bc_ops = &xfs_rmapbt_ops;
+
+ return cur;
+}
+
+/* Create a new reverse mapping btree cursor. */
+struct xfs_btree_cur *
+xfs_rmapbt_init_cursor(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_rmapbt_init_common(mp, tp, agno);
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
+ cur->bc_ag.agbp = agbp;
+ return cur;
+}
- cur->bc_private.a.agbp = agbp;
- cur->bc_private.a.agno = agno;
+/* Create a new reverse mapping btree cursor with a fake root for staging. */
+struct xfs_btree_cur *
+xfs_rmapbt_stage_cursor(
+ struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake,
+ xfs_agnumber_t agno)
+{
+ struct xfs_btree_cur *cur;
+ cur = xfs_rmapbt_init_common(mp, NULL, agno);
+ xfs_btree_stage_afakeroot(cur, afake);
return cur;
}
/*
+ * Install a new reverse mapping btree root. Caller is responsible for
+ * invalidating and freeing the old btree blocks.
+ */
+void
+xfs_rmapbt_commit_staged_btree(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xbtree_afakeroot *afake = cur->bc_ag.afake;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
+ agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
+ agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
+ XFS_AGF_RMAP_BLOCKS);
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
+}
+
+/*
* Calculate number of records in an rmap btree block.
*/
int
@@ -569,7 +616,7 @@ xfs_rmapbt_calc_reserves(
if (error)
return error;
- agf = XFS_BUF_TO_AGF(agbp);
+ agf = agbp->b_addr;
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_rmap_blocks);
xfs_trans_brelse(tp, agbp);
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index 820d668b063d..115c3455a734 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -9,6 +9,7 @@
struct xfs_buf;
struct xfs_btree_cur;
struct xfs_mount;
+struct xbtree_afakeroot;
/* rmaps only exist on crc enabled filesystems */
#define XFS_RMAP_BLOCK_LEN XFS_BTREE_SBLOCK_CRC_LEN
@@ -43,6 +44,10 @@ struct xfs_mount;
struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
xfs_agnumber_t agno);
+struct xfs_btree_cur *xfs_rmapbt_stage_cursor(struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake, xfs_agnumber_t agno);
+void xfs_rmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
+ struct xfs_trans *tp, struct xfs_buf *agbp);
int xfs_rmapbt_maxrecs(int blocklen, int leaf);
extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 2f60fc3c99a0..00266de58954 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -220,7 +220,7 @@ xfs_validate_sb_common(
struct xfs_buf *bp,
struct xfs_sb *sbp)
{
- struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
+ struct xfs_dsb *dsb = bp->b_addr;
uint32_t agcount = 0;
uint32_t rem;
@@ -681,7 +681,7 @@ xfs_sb_read_verify(
{
struct xfs_sb sb;
struct xfs_mount *mp = bp->b_mount;
- struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
+ struct xfs_dsb *dsb = bp->b_addr;
int error;
/*
@@ -707,7 +707,7 @@ xfs_sb_read_verify(
* Check all the superblock fields. Don't byteswap the xquota flags
* because _verify_common checks the on-disk values.
*/
- __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
+ __xfs_sb_from_disk(&sb, dsb, false);
error = xfs_validate_sb_common(mp, bp, &sb);
if (error)
goto out_error;
@@ -730,7 +730,7 @@ static void
xfs_sb_quiet_read_verify(
struct xfs_buf *bp)
{
- struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
+ struct xfs_dsb *dsb = bp->b_addr;
if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
/* XFS filesystem, verify noisily! */
@@ -748,13 +748,14 @@ xfs_sb_write_verify(
struct xfs_sb sb;
struct xfs_mount *mp = bp->b_mount;
struct xfs_buf_log_item *bip = bp->b_log_item;
+ struct xfs_dsb *dsb = bp->b_addr;
int error;
/*
* Check all the superblock fields. Don't byteswap the xquota flags
* because _verify_common checks the on-disk values.
*/
- __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
+ __xfs_sb_from_disk(&sb, dsb, false);
error = xfs_validate_sb_common(mp, bp, &sb);
if (error)
goto out_error;
@@ -766,7 +767,7 @@ xfs_sb_write_verify(
return;
if (bip)
- XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ dsb->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF);
return;
@@ -927,7 +928,7 @@ xfs_log_sb(
mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree);
mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
- xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
+ xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1);
}
@@ -1007,7 +1008,7 @@ xfs_update_secondary_sbs(
bp->b_ops = &xfs_sb_buf_ops;
xfs_buf_oneshot(bp);
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
- xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
+ xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
xfs_buf_delwri_queue(bp, &buffer_list);
xfs_buf_relse(bp);
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 7a9c04920505..d1a0848cb52e 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -187,7 +187,7 @@ xfs_calc_inode_chunk_res(
XFS_FSB_TO_B(mp, 1));
if (alloc) {
/* icreate tx uses ordered buffers */
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_sb_version_has_v3inode(&mp->m_sb))
return res;
size = XFS_FSB_TO_B(mp, 1);
}
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index ba0f747c82e8..e9bcf1faa183 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -92,7 +92,7 @@ xchk_superblock(
if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
return error;
- sb = XFS_BUF_TO_SBP(bp);
+ sb = bp->b_addr;
/*
* Verify the geometries match. Fields that are permanently
@@ -358,7 +358,7 @@ static inline void
xchk_agf_xref_freeblks(
struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
xfs_extlen_t blocks = 0;
int error;
@@ -378,7 +378,7 @@ static inline void
xchk_agf_xref_cntbt(
struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
xfs_agblock_t agbno;
xfs_extlen_t blocks;
int have;
@@ -410,7 +410,7 @@ STATIC void
xchk_agf_xref_btreeblks(
struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
struct xfs_mount *mp = sc->mp;
xfs_agblock_t blocks;
xfs_agblock_t btreeblks;
@@ -456,7 +456,7 @@ static inline void
xchk_agf_xref_refcblks(
struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
xfs_agblock_t blocks;
int error;
@@ -525,7 +525,7 @@ xchk_agf(
goto out;
xchk_buffer_recheck(sc, sc->sa.agf_bp);
- agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ agf = sc->sa.agf_bp->b_addr;
/* Check the AG length */
eoag = be32_to_cpu(agf->agf_length);
@@ -711,7 +711,7 @@ xchk_agfl(
goto out;
/* Allocate buffer to ensure uniqueness of AGFL entries. */
- agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ agf = sc->sa.agf_bp->b_addr;
agflcount = be32_to_cpu(agf->agf_flcount);
if (agflcount > xfs_agfl_size(sc->mp)) {
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
@@ -728,7 +728,7 @@ xchk_agfl(
}
/* Check the blocks in the AGFL. */
- error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
+ error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr,
sc->sa.agfl_bp, xchk_agfl_block, &sai);
if (error == -ECANCELED) {
error = 0;
@@ -765,7 +765,7 @@ static inline void
xchk_agi_xref_icounts(
struct xfs_scrub *sc)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
+ struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
xfs_agino_t icount;
xfs_agino_t freecount;
int error;
@@ -834,7 +834,7 @@ xchk_agi(
goto out;
xchk_buffer_recheck(sc, sc->sa.agi_bp);
- agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
+ agi = sc->sa.agi_bp->b_addr;
/* Check the AG length */
eoag = be32_to_cpu(agi->agi_length);
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index d5e6db9af434..bca2ab1d4be9 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -49,7 +49,7 @@ xrep_superblock(
/* Copy AG 0's superblock to this one. */
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
- xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
+ xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
/* Write this to disk. */
xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF);
@@ -140,7 +140,7 @@ xrep_agf_find_btrees(
struct xrep_find_ag_btree *fab,
struct xfs_buf *agfl_bp)
{
- struct xfs_agf *old_agf = XFS_BUF_TO_AGF(agf_bp);
+ struct xfs_agf *old_agf = agf_bp->b_addr;
int error;
/* Go find the root data. */
@@ -181,7 +181,7 @@ xrep_agf_init_header(
struct xfs_agf *old_agf)
{
struct xfs_mount *mp = sc->mp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+ struct xfs_agf *agf = agf_bp->b_addr;
memcpy(old_agf, agf, sizeof(*old_agf));
memset(agf, 0, BBTOB(agf_bp->b_length));
@@ -238,7 +238,7 @@ xrep_agf_calc_from_btrees(
{
struct xrep_agf_allocbt raa = { .sc = sc };
struct xfs_btree_cur *cur = NULL;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+ struct xfs_agf *agf = agf_bp->b_addr;
struct xfs_mount *mp = sc->mp;
xfs_agblock_t btreeblks;
xfs_agblock_t blocks;
@@ -302,7 +302,7 @@ xrep_agf_commit_new(
struct xfs_buf *agf_bp)
{
struct xfs_perag *pag;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+ struct xfs_agf *agf = agf_bp->b_addr;
/* Trigger fdblocks recalculation */
xfs_force_summary_recalc(sc->mp);
@@ -376,7 +376,7 @@ xrep_agf(
if (error)
return error;
agf_bp->b_ops = &xfs_agf_buf_ops;
- agf = XFS_BUF_TO_AGF(agf_bp);
+ agf = agf_bp->b_addr;
/*
* Load the AGFL so that we can screen out OWN_AG blocks that are on
@@ -395,7 +395,7 @@ xrep_agf(
* Spot-check the AGFL blocks; if they're obviously corrupt then
* there's nothing we can do but bail out.
*/
- error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(agf_bp), agfl_bp,
+ error = xfs_agfl_walk(sc->mp, agf_bp->b_addr, agfl_bp,
xrep_agf_check_agfl_block, sc);
if (error)
return error;
@@ -429,10 +429,10 @@ out_revert:
struct xrep_agfl {
/* Bitmap of other OWN_AG metadata blocks. */
- struct xfs_bitmap agmetablocks;
+ struct xbitmap agmetablocks;
/* Bitmap of free space. */
- struct xfs_bitmap *freesp;
+ struct xbitmap *freesp;
struct xfs_scrub *sc;
};
@@ -453,14 +453,14 @@ xrep_agfl_walk_rmap(
/* Record all the OWN_AG blocks. */
if (rec->rm_owner == XFS_RMAP_OWN_AG) {
- fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
+ fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
rec->rm_startblock);
- error = xfs_bitmap_set(ra->freesp, fsb, rec->rm_blockcount);
+ error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount);
if (error)
return error;
}
- return xfs_bitmap_set_btcur_path(&ra->agmetablocks, cur);
+ return xbitmap_set_btcur_path(&ra->agmetablocks, cur);
}
/*
@@ -476,19 +476,17 @@ STATIC int
xrep_agfl_collect_blocks(
struct xfs_scrub *sc,
struct xfs_buf *agf_bp,
- struct xfs_bitmap *agfl_extents,
+ struct xbitmap *agfl_extents,
xfs_agblock_t *flcount)
{
struct xrep_agfl ra;
struct xfs_mount *mp = sc->mp;
struct xfs_btree_cur *cur;
- struct xfs_bitmap_range *br;
- struct xfs_bitmap_range *n;
int error;
ra.sc = sc;
ra.freesp = agfl_extents;
- xfs_bitmap_init(&ra.agmetablocks);
+ xbitmap_init(&ra.agmetablocks);
/* Find all space used by the free space btrees & rmapbt. */
cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
@@ -500,7 +498,7 @@ xrep_agfl_collect_blocks(
/* Find all blocks currently being used by the bnobt. */
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
XFS_BTNUM_BNO);
- error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
+ error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
if (error)
goto err;
xfs_btree_del_cursor(cur, error);
@@ -508,7 +506,7 @@ xrep_agfl_collect_blocks(
/* Find all blocks currently being used by the cntbt. */
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
XFS_BTNUM_CNT);
- error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
+ error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
if (error)
goto err;
@@ -518,8 +516,8 @@ xrep_agfl_collect_blocks(
* Drop the freesp meta blocks that are in use by btrees.
* The remaining blocks /should/ be AGFL blocks.
*/
- error = xfs_bitmap_disunion(agfl_extents, &ra.agmetablocks);
- xfs_bitmap_destroy(&ra.agmetablocks);
+ error = xbitmap_disunion(agfl_extents, &ra.agmetablocks);
+ xbitmap_destroy(&ra.agmetablocks);
if (error)
return error;
@@ -527,18 +525,12 @@ xrep_agfl_collect_blocks(
* Calculate the new AGFL size. If we found more blocks than fit in
* the AGFL we'll free them later.
*/
- *flcount = 0;
- for_each_xfs_bitmap_extent(br, n, agfl_extents) {
- *flcount += br->len;
- if (*flcount > xfs_agfl_size(mp))
- break;
- }
- if (*flcount > xfs_agfl_size(mp))
- *flcount = xfs_agfl_size(mp);
+ *flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents),
+ xfs_agfl_size(mp));
return 0;
err:
- xfs_bitmap_destroy(&ra.agmetablocks);
+ xbitmap_destroy(&ra.agmetablocks);
xfs_btree_del_cursor(cur, error);
return error;
}
@@ -550,7 +542,7 @@ xrep_agfl_update_agf(
struct xfs_buf *agf_bp,
xfs_agblock_t flcount)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+ struct xfs_agf *agf = agf_bp->b_addr;
ASSERT(flcount <= xfs_agfl_size(sc->mp));
@@ -573,13 +565,13 @@ STATIC void
xrep_agfl_init_header(
struct xfs_scrub *sc,
struct xfs_buf *agfl_bp,
- struct xfs_bitmap *agfl_extents,
+ struct xbitmap *agfl_extents,
xfs_agblock_t flcount)
{
struct xfs_mount *mp = sc->mp;
__be32 *agfl_bno;
- struct xfs_bitmap_range *br;
- struct xfs_bitmap_range *n;
+ struct xbitmap_range *br;
+ struct xbitmap_range *n;
struct xfs_agfl *agfl;
xfs_agblock_t agbno;
unsigned int fl_off;
@@ -602,8 +594,8 @@ xrep_agfl_init_header(
* step.
*/
fl_off = 0;
- agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
- for_each_xfs_bitmap_extent(br, n, agfl_extents) {
+ agfl_bno = xfs_buf_to_agfl_bno(agfl_bp);
+ for_each_xbitmap_extent(br, n, agfl_extents) {
agbno = XFS_FSB_TO_AGBNO(mp, br->start);
trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
@@ -637,7 +629,7 @@ int
xrep_agfl(
struct xfs_scrub *sc)
{
- struct xfs_bitmap agfl_extents;
+ struct xbitmap agfl_extents;
struct xfs_mount *mp = sc->mp;
struct xfs_buf *agf_bp;
struct xfs_buf *agfl_bp;
@@ -649,7 +641,7 @@ xrep_agfl(
return -EOPNOTSUPP;
xchk_perag_get(sc->mp, &sc->sa);
- xfs_bitmap_init(&agfl_extents);
+ xbitmap_init(&agfl_extents);
/*
* Read the AGF so that we can query the rmapbt. We hope that there's
@@ -696,10 +688,10 @@ xrep_agfl(
goto err;
/* Dump any AGFL overflow. */
- return xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
+ error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
XFS_AG_RESV_AGFL);
err:
- xfs_bitmap_destroy(&agfl_extents);
+ xbitmap_destroy(&agfl_extents);
return error;
}
@@ -761,7 +753,7 @@ xrep_agi_init_header(
struct xfs_buf *agi_bp,
struct xfs_agi *old_agi)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+ struct xfs_agi *agi = agi_bp->b_addr;
struct xfs_mount *mp = sc->mp;
memcpy(old_agi, agi, sizeof(*old_agi));
@@ -807,7 +799,7 @@ xrep_agi_calc_from_btrees(
struct xfs_buf *agi_bp)
{
struct xfs_btree_cur *cur;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+ struct xfs_agi *agi = agi_bp->b_addr;
struct xfs_mount *mp = sc->mp;
xfs_agino_t count;
xfs_agino_t freecount;
@@ -835,7 +827,7 @@ xrep_agi_commit_new(
struct xfs_buf *agi_bp)
{
struct xfs_perag *pag;
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+ struct xfs_agi *agi = agi_bp->b_addr;
/* Trigger inode count recalculation */
xfs_force_summary_recalc(sc->mp);
@@ -892,7 +884,7 @@ xrep_agi(
if (error)
return error;
agi_bp->b_ops = &xfs_agi_buf_ops;
- agi = XFS_BUF_TO_AGI(agi_bp);
+ agi = agi_bp->b_addr;
/* Find the AGI btree roots. */
error = xrep_agi_find_btrees(sc, fab);
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 5533e48e605d..73d924e47565 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -94,7 +94,7 @@ xchk_allocbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.agno;
xfs_agblock_t bno;
xfs_extlen_t len;
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
index d9f0dd444b80..9faddb334a2c 100644
--- a/fs/xfs/scrub/attr.c
+++ b/fs/xfs/scrub/attr.c
@@ -98,7 +98,7 @@ struct xchk_xattr {
/*
* Check that an extended attribute key can be looked up by hash.
*
- * We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked)
+ * We use the XFS attribute list iterator (i.e. xfs_attr_list_ilocked)
* to call this function for every attribute key in an inode. Once
* we're here, we load the attribute value to see if any errors happen,
* or if we get more or less data than we expected.
@@ -147,11 +147,8 @@ xchk_xattr_listent(
return;
}
- args.flags = ATTR_KERNOTIME;
- if (flags & XFS_ATTR_ROOT)
- args.flags |= ATTR_ROOT;
- else if (flags & XFS_ATTR_SECURE)
- args.flags |= ATTR_SECURE;
+ args.op_flags = XFS_DA_OP_NOTIME;
+ args.attr_filter = flags & XFS_ATTR_NSP_ONDISK_MASK;
args.geo = context->dp->i_mount->m_attr_geo;
args.whichfork = XFS_ATTR_FORK;
args.dp = context->dp;
@@ -162,7 +159,10 @@ xchk_xattr_listent(
args.value = xchk_xattr_valuebuf(sx->sc);
args.valuelen = valuelen;
- error = xfs_attr_get_ilocked(context->dp, &args);
+ error = xfs_attr_get_ilocked(&args);
+ /* ENODATA means the hash lookup failed and the attr is bad */
+ if (error == -ENODATA)
+ error = -EFSCORRUPTED;
if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
&error))
goto fail_xref;
@@ -474,7 +474,6 @@ xchk_xattr(
struct xfs_scrub *sc)
{
struct xchk_xattr sx;
- struct attrlist_cursor_kern cursor = { 0 };
xfs_dablk_t last_checked = -1U;
int error = 0;
@@ -493,11 +492,10 @@ xchk_xattr(
/* Check that every attr key can also be looked up by hash. */
sx.context.dp = sc->ip;
- sx.context.cursor = &cursor;
sx.context.resynch = 1;
sx.context.put_listent = xchk_xattr_listent;
sx.context.tp = sc->tp;
- sx.context.flags = ATTR_INCOMPLETE;
+ sx.context.allow_incomplete = true;
sx.sc = sc;
/*
@@ -516,7 +514,7 @@ xchk_xattr(
* iteration, which doesn't really follow the usual buffer
* locking order.
*/
- error = xfs_attr_list_int_ilocked(&sx.context);
+ error = xfs_attr_list_ilocked(&sx.context);
if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
goto out;
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 18a684e18a69..f88694f22d05 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -18,14 +18,14 @@
* This is the logical equivalent of bitmap |= mask(start, len).
*/
int
-xfs_bitmap_set(
- struct xfs_bitmap *bitmap,
+xbitmap_set(
+ struct xbitmap *bitmap,
uint64_t start,
uint64_t len)
{
- struct xfs_bitmap_range *bmr;
+ struct xbitmap_range *bmr;
- bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
+ bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
if (!bmr)
return -ENOMEM;
@@ -39,13 +39,13 @@ xfs_bitmap_set(
/* Free everything related to this bitmap. */
void
-xfs_bitmap_destroy(
- struct xfs_bitmap *bitmap)
+xbitmap_destroy(
+ struct xbitmap *bitmap)
{
- struct xfs_bitmap_range *bmr;
- struct xfs_bitmap_range *n;
+ struct xbitmap_range *bmr;
+ struct xbitmap_range *n;
- for_each_xfs_bitmap_extent(bmr, n, bitmap) {
+ for_each_xbitmap_extent(bmr, n, bitmap) {
list_del(&bmr->list);
kmem_free(bmr);
}
@@ -53,24 +53,24 @@ xfs_bitmap_destroy(
/* Set up a per-AG block bitmap. */
void
-xfs_bitmap_init(
- struct xfs_bitmap *bitmap)
+xbitmap_init(
+ struct xbitmap *bitmap)
{
INIT_LIST_HEAD(&bitmap->list);
}
/* Compare two btree extents. */
static int
-xfs_bitmap_range_cmp(
+xbitmap_range_cmp(
void *priv,
struct list_head *a,
struct list_head *b)
{
- struct xfs_bitmap_range *ap;
- struct xfs_bitmap_range *bp;
+ struct xbitmap_range *ap;
+ struct xbitmap_range *bp;
- ap = container_of(a, struct xfs_bitmap_range, list);
- bp = container_of(b, struct xfs_bitmap_range, list);
+ ap = container_of(a, struct xbitmap_range, list);
+ bp = container_of(b, struct xbitmap_range, list);
if (ap->start > bp->start)
return 1;
@@ -96,14 +96,14 @@ xfs_bitmap_range_cmp(
#define LEFT_ALIGNED (1 << 0)
#define RIGHT_ALIGNED (1 << 1)
int
-xfs_bitmap_disunion(
- struct xfs_bitmap *bitmap,
- struct xfs_bitmap *sub)
+xbitmap_disunion(
+ struct xbitmap *bitmap,
+ struct xbitmap *sub)
{
struct list_head *lp;
- struct xfs_bitmap_range *br;
- struct xfs_bitmap_range *new_br;
- struct xfs_bitmap_range *sub_br;
+ struct xbitmap_range *br;
+ struct xbitmap_range *new_br;
+ struct xbitmap_range *sub_br;
uint64_t sub_start;
uint64_t sub_len;
int state;
@@ -113,8 +113,8 @@ xfs_bitmap_disunion(
return 0;
ASSERT(!list_empty(&sub->list));
- list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
- list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
+ list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
+ list_sort(NULL, &sub->list, xbitmap_range_cmp);
/*
* Now that we've sorted both lists, we iterate bitmap once, rolling
@@ -124,11 +124,11 @@ xfs_bitmap_disunion(
* list traversal is similar to merge sort, but we're deleting
* instead. In this manner we avoid O(n^2) operations.
*/
- sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
+ sub_br = list_first_entry(&sub->list, struct xbitmap_range,
list);
lp = bitmap->list.next;
while (lp != &bitmap->list) {
- br = list_entry(lp, struct xfs_bitmap_range, list);
+ br = list_entry(lp, struct xbitmap_range, list);
/*
* Advance sub_br and/or br until we find a pair that
@@ -181,7 +181,7 @@ xfs_bitmap_disunion(
* Deleting from the middle: add the new right extent
* and then shrink the left extent.
*/
- new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
+ new_br = kmem_alloc(sizeof(struct xbitmap_range),
KM_MAYFAIL);
if (!new_br) {
error = -ENOMEM;
@@ -247,8 +247,8 @@ out:
* blocks going from the leaf towards the root.
*/
int
-xfs_bitmap_set_btcur_path(
- struct xfs_bitmap *bitmap,
+xbitmap_set_btcur_path(
+ struct xbitmap *bitmap,
struct xfs_btree_cur *cur)
{
struct xfs_buf *bp;
@@ -261,7 +261,7 @@ xfs_bitmap_set_btcur_path(
if (!bp)
continue;
fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
- error = xfs_bitmap_set(bitmap, fsb, 1);
+ error = xbitmap_set(bitmap, fsb, 1);
if (error)
return error;
}
@@ -271,12 +271,12 @@ xfs_bitmap_set_btcur_path(
/* Collect a btree's block in the bitmap. */
STATIC int
-xfs_bitmap_collect_btblock(
+xbitmap_collect_btblock(
struct xfs_btree_cur *cur,
int level,
void *priv)
{
- struct xfs_bitmap *bitmap = priv;
+ struct xbitmap *bitmap = priv;
struct xfs_buf *bp;
xfs_fsblock_t fsbno;
@@ -285,15 +285,30 @@ xfs_bitmap_collect_btblock(
return 0;
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
- return xfs_bitmap_set(bitmap, fsbno, 1);
+ return xbitmap_set(bitmap, fsbno, 1);
}
/* Walk the btree and mark the bitmap wherever a btree block is found. */
int
-xfs_bitmap_set_btblocks(
- struct xfs_bitmap *bitmap,
+xbitmap_set_btblocks(
+ struct xbitmap *bitmap,
struct xfs_btree_cur *cur)
{
- return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock,
+ return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
XFS_BTREE_VISIT_ALL, bitmap);
}
+
+/* How many bits are set in this bitmap? */
+uint64_t
+xbitmap_hweight(
+ struct xbitmap *bitmap)
+{
+ struct xbitmap_range *bmr;
+ struct xbitmap_range *n;
+ uint64_t ret = 0;
+
+ for_each_xbitmap_extent(bmr, n, bitmap)
+ ret += bmr->len;
+
+ return ret;
+}
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
index ae8ecbce6fa6..900646b72de1 100644
--- a/fs/xfs/scrub/bitmap.h
+++ b/fs/xfs/scrub/bitmap.h
@@ -6,31 +6,32 @@
#ifndef __XFS_SCRUB_BITMAP_H__
#define __XFS_SCRUB_BITMAP_H__
-struct xfs_bitmap_range {
+struct xbitmap_range {
struct list_head list;
uint64_t start;
uint64_t len;
};
-struct xfs_bitmap {
+struct xbitmap {
struct list_head list;
};
-void xfs_bitmap_init(struct xfs_bitmap *bitmap);
-void xfs_bitmap_destroy(struct xfs_bitmap *bitmap);
+void xbitmap_init(struct xbitmap *bitmap);
+void xbitmap_destroy(struct xbitmap *bitmap);
-#define for_each_xfs_bitmap_extent(bex, n, bitmap) \
+#define for_each_xbitmap_extent(bex, n, bitmap) \
list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
-#define for_each_xfs_bitmap_block(b, bex, n, bitmap) \
+#define for_each_xbitmap_block(b, bex, n, bitmap) \
list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
- for ((b) = bex->start; (b) < bex->start + bex->len; (b)++)
+ for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++)
-int xfs_bitmap_set(struct xfs_bitmap *bitmap, uint64_t start, uint64_t len);
-int xfs_bitmap_disunion(struct xfs_bitmap *bitmap, struct xfs_bitmap *sub);
-int xfs_bitmap_set_btcur_path(struct xfs_bitmap *bitmap,
+int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
+int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
+int xbitmap_set_btcur_path(struct xbitmap *bitmap,
struct xfs_btree_cur *cur);
-int xfs_bitmap_set_btblocks(struct xfs_bitmap *bitmap,
+int xbitmap_set_btblocks(struct xbitmap *bitmap,
struct xfs_btree_cur *cur);
+uint64_t xbitmap_hweight(struct xbitmap *bitmap);
#endif /* __XFS_SCRUB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index fa6ea6407992..add8598eacd5 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -374,7 +374,7 @@ xchk_bmapbt_rec(
struct xfs_bmbt_irec iext_irec;
struct xfs_iext_cursor icur;
struct xchk_bmap_info *info = bs->private;
- struct xfs_inode *ip = bs->cur->bc_private.b.ip;
+ struct xfs_inode *ip = bs->cur->bc_ino.ip;
struct xfs_buf *bp = NULL;
struct xfs_btree_block *block;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, info->whichfork);
@@ -501,7 +501,7 @@ xchk_bmap_check_rmap(
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
- cur->bc_private.a.agno, rec->rm_startblock))
+ cur->bc_ag.agno, rec->rm_startblock))
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_blockcount > rec->rm_blockcount)
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 97a15b6f2865..9a2e27ac1300 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -219,19 +219,21 @@ xchk_da_btree_block_check_sibling(
int direction,
xfs_dablk_t sibling)
{
+ struct xfs_da_state_path *path = &ds->state->path;
+ struct xfs_da_state_path *altpath = &ds->state->altpath;
int retval;
+ int plevel;
int error;
- memcpy(&ds->state->altpath, &ds->state->path,
- sizeof(ds->state->altpath));
+ memcpy(altpath, path, sizeof(ds->state->altpath));
/*
* If the pointer is null, we shouldn't be able to move the upper
* level pointer anywhere.
*/
if (sibling == 0) {
- error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
- direction, false, &retval);
+ error = xfs_da3_path_shift(ds->state, altpath, direction,
+ false, &retval);
if (error == 0 && retval == 0)
xchk_da_set_corrupt(ds, level);
error = 0;
@@ -239,27 +241,33 @@ xchk_da_btree_block_check_sibling(
}
/* Move the alternate cursor one block in the direction given. */
- error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
- direction, false, &retval);
+ error = xfs_da3_path_shift(ds->state, altpath, direction, false,
+ &retval);
if (!xchk_da_process_error(ds, level, &error))
- return error;
+ goto out;
if (retval) {
xchk_da_set_corrupt(ds, level);
- return error;
+ goto out;
}
- if (ds->state->altpath.blk[level].bp)
- xchk_buffer_recheck(ds->sc,
- ds->state->altpath.blk[level].bp);
+ if (altpath->blk[level].bp)
+ xchk_buffer_recheck(ds->sc, altpath->blk[level].bp);
/* Compare upper level pointer to sibling pointer. */
- if (ds->state->altpath.blk[level].blkno != sibling)
+ if (altpath->blk[level].blkno != sibling)
xchk_da_set_corrupt(ds, level);
- if (ds->state->altpath.blk[level].bp) {
- xfs_trans_brelse(ds->dargs.trans,
- ds->state->altpath.blk[level].bp);
- ds->state->altpath.blk[level].bp = NULL;
- }
+
out:
+ /* Free all buffers in the altpath that aren't referenced from path. */
+ for (plevel = 0; plevel < altpath->active; plevel++) {
+ if (altpath->blk[plevel].bp == NULL ||
+ (plevel < path->active &&
+ altpath->blk[plevel].bp == path->blk[plevel].bp))
+ continue;
+
+ xfs_trans_brelse(ds->dargs.trans, altpath->blk[plevel].bp);
+ altpath->blk[plevel].bp = NULL;
+ }
+
return error;
}
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 266da4e4bde6..fe2a6e030c8a 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -155,6 +155,9 @@ xchk_dir_actor(
xname.type = XFS_DIR3_FT_UNKNOWN;
error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
+ /* ENOENT means the hash lookup failed and the dir is corrupt */
+ if (error == -ENOENT)
+ error = -EFSCORRUPTED;
if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error))
goto out;
@@ -500,7 +503,7 @@ xchk_directory_leaf1_bestfree(
/* Read the free space block. */
error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, &bp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
- goto out;
+ return error;
xchk_buffer_recheck(sc, bp);
leaf = bp->b_addr;
@@ -565,9 +568,10 @@ xchk_directory_leaf1_bestfree(
xchk_directory_check_freesp(sc, lblk, dbp, best);
xfs_trans_brelse(sc->tp, dbp);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- goto out;
+ break;
}
out:
+ xfs_trans_brelse(sc->tp, bp);
return error;
}
@@ -589,7 +593,7 @@ xchk_directory_free_bestfree(
/* Read the free space block */
error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
- goto out;
+ return error;
xchk_buffer_recheck(sc, bp);
if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
@@ -612,7 +616,7 @@ xchk_directory_free_bestfree(
0, &dbp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
- break;
+ goto out;
xchk_directory_check_freesp(sc, lblk, dbp, best);
xfs_trans_brelse(sc->tp, dbp);
}
@@ -620,6 +624,7 @@ xchk_directory_free_bestfree(
if (freehdr.nused + stale != freehdr.nvalid)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
out:
+ xfs_trans_brelse(sc->tp, bp);
return error;
}
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 681758704fda..64c217eb06a7 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -104,7 +104,7 @@ xchk_iallocbt_chunk(
xfs_extlen_t len)
{
struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.agno;
xfs_agblock_t bno;
bno = XFS_AGINO_TO_AGBNO(mp, agino);
@@ -164,7 +164,7 @@ xchk_iallocbt_check_cluster_ifree(
* the record, compute which fs inode we're talking about.
*/
agino = irec->ir_startino + irec_ino;
- fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
+ fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.agno, agino);
irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
@@ -215,7 +215,7 @@ xchk_iallocbt_check_cluster(
struct xfs_dinode *dip;
struct xfs_buf *cluster_bp;
unsigned int nr_inodes;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.agno;
xfs_agblock_t agbno;
unsigned int cluster_index;
uint16_t cluster_mask = 0;
@@ -426,7 +426,7 @@ xchk_iallocbt_rec(
struct xchk_iallocbt *iabt = bs->private;
struct xfs_inobt_rec_incore irec;
uint64_t holes;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.agno;
xfs_agino_t agino;
xfs_extlen_t len;
int holecount;
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 0cab11a5d390..beaeb6fa3119 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -336,7 +336,7 @@ xchk_refcountbt_rec(
{
struct xfs_mount *mp = bs->cur->bc_mp;
xfs_agblock_t *cow_blocks = bs->private;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.agno;
xfs_agblock_t bno;
xfs_extlen_t len;
xfs_nlink_t refcount;
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index e489d7a8446a..db3cfd12803d 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -208,8 +208,10 @@ xrep_calc_ag_resblks(
/* Now grab the block counters from the AGF. */
error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp);
if (!error) {
- aglen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_length);
- freelen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_freeblks);
+ struct xfs_agf *agf = bp->b_addr;
+
+ aglen = be32_to_cpu(agf->agf_length);
+ freelen = be32_to_cpu(agf->agf_freeblks);
usedlen = aglen - freelen;
xfs_buf_relse(bp);
}
@@ -434,10 +436,10 @@ xrep_init_btblock(
int
xrep_invalidate_blocks(
struct xfs_scrub *sc,
- struct xfs_bitmap *bitmap)
+ struct xbitmap *bitmap)
{
- struct xfs_bitmap_range *bmr;
- struct xfs_bitmap_range *n;
+ struct xbitmap_range *bmr;
+ struct xbitmap_range *n;
struct xfs_buf *bp;
xfs_fsblock_t fsbno;
@@ -449,7 +451,7 @@ xrep_invalidate_blocks(
* because we never own those; and if we can't TRYLOCK the buffer we
* assume it's owned by someone else.
*/
- for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
+ for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
/* Skip AG headers and post-EOFS blocks */
if (!xfs_verify_fsbno(sc->mp, fsbno))
continue;
@@ -595,18 +597,18 @@ out_free:
int
xrep_reap_extents(
struct xfs_scrub *sc,
- struct xfs_bitmap *bitmap,
+ struct xbitmap *bitmap,
const struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type)
{
- struct xfs_bitmap_range *bmr;
- struct xfs_bitmap_range *n;
+ struct xbitmap_range *bmr;
+ struct xbitmap_range *n;
xfs_fsblock_t fsbno;
int error = 0;
ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
- for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
+ for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
ASSERT(sc->ip != NULL ||
XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
trace_xrep_dispose_btree_extent(sc->mp,
@@ -615,11 +617,9 @@ xrep_reap_extents(
error = xrep_reap_block(sc, fsbno, oinfo, type);
if (error)
- goto out;
+ break;
}
-out:
- xfs_bitmap_destroy(bitmap);
return error;
}
@@ -879,7 +879,7 @@ xrep_find_ag_btree_roots(
ri.sc = sc;
ri.btree_info = btree_info;
- ri.agf = XFS_BUF_TO_AGF(agf_bp);
+ ri.agf = agf_bp->b_addr;
ri.agfl_bp = agfl_bp;
for (fab = btree_info; fab->buf_ops; fab++) {
ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index c3422403b169..04a47d45605b 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -28,11 +28,11 @@ int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
struct xfs_buf **bpp, xfs_btnum_t btnum,
const struct xfs_buf_ops *ops);
-struct xfs_bitmap;
+struct xbitmap;
int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
-int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xfs_bitmap *btlist);
-int xrep_reap_extents(struct xfs_scrub *sc, struct xfs_bitmap *exlist,
+int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xbitmap *btlist);
+int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *exlist,
const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
struct xrep_find_ag_btree {
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index 8d4cefd761c1..f4fcb4719f41 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -92,7 +92,7 @@ xchk_rmapbt_rec(
{
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_rmap_irec irec;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agnumber_t agno = bs->cur->bc_ag.agno;
bool non_inode;
bool is_unwritten;
bool is_bmbt;
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index f1775bb19313..8ebf35b115ce 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -168,6 +168,7 @@ xchk_teardown(
xfs_irele(sc->ip);
sc->ip = NULL;
}
+ sb_end_write(sc->mp->m_super);
if (sc->flags & XCHK_REAPING_DISABLED)
xchk_start_reaping(sc);
if (sc->flags & XCHK_HAS_QUOTAOFFLOCK) {
@@ -490,6 +491,14 @@ xfs_scrub_metadata(
sc.ops = &meta_scrub_ops[sm->sm_type];
sc.sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type);
retry_op:
+ /*
+ * If freeze runs concurrently with a scrub, the freeze can be delayed
+ * indefinitely as we walk the filesystem and iterate over metadata
+ * buffers. Freeze quiesces the log (which waits for the buffer LRU to
+ * be emptied) and that won't happen while checking is running.
+ */
+ sb_start_write(mp->m_super);
+
/* Set up for the operation. */
error = sc.ops->setup(&sc, ip);
if (error)
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 9eaab2eb5ed3..2c6c248be823 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -24,9 +24,9 @@ xchk_btree_cur_fsbno(
return XFS_DADDR_TO_FSB(cur->bc_mp, cur->bc_bufs[level]->b_bn);
else if (level == cur->bc_nlevels - 1 &&
cur->bc_flags & XFS_BTREE_LONG_PTRS)
- return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_private.b.ip->i_ino);
+ return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
else if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS))
- return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno, 0);
+ return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno, 0);
return NULLFSBLOCK;
}
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 096203119934..e46f5cef90da 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -379,7 +379,7 @@ TRACE_EVENT(xchk_ifork_btree_op_error,
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->ino = sc->ip->i_ino;
- __entry->whichfork = cur->bc_private.b.whichfork;
+ __entry->whichfork = cur->bc_ino.whichfork;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
__entry->level = level;
@@ -459,7 +459,7 @@ TRACE_EVENT(xchk_ifork_btree_error,
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->ino = sc->ip->i_ino;
- __entry->whichfork = cur->bc_private.b.whichfork;
+ __entry->whichfork = cur->bc_ino.whichfork;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
__entry->level = level;
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index cd743fad8478..d4c687b5cd06 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -14,6 +14,8 @@
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_acl.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
#include <linux/posix_acl_xattr.h>
@@ -67,10 +69,12 @@ xfs_acl_from_disk(
switch (acl_e->e_tag) {
case ACL_USER:
- acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id));
+ acl_e->e_uid = make_kuid(&init_user_ns,
+ be32_to_cpu(ace->ae_id));
break;
case ACL_GROUP:
- acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id));
+ acl_e->e_gid = make_kgid(&init_user_ns,
+ be32_to_cpu(ace->ae_id));
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
@@ -103,10 +107,12 @@ xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
ace->ae_tag = cpu_to_be32(acl_e->e_tag);
switch (acl_e->e_tag) {
case ACL_USER:
- ace->ae_id = cpu_to_be32(xfs_kuid_to_uid(acl_e->e_uid));
+ ace->ae_id = cpu_to_be32(
+ from_kuid(&init_user_ns, acl_e->e_uid));
break;
case ACL_GROUP:
- ace->ae_id = cpu_to_be32(xfs_kgid_to_gid(acl_e->e_gid));
+ ace->ae_id = cpu_to_be32(
+ from_kgid(&init_user_ns, acl_e->e_gid));
break;
default:
ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
@@ -120,102 +126,86 @@ xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
struct posix_acl *
xfs_get_acl(struct inode *inode, int type)
{
- struct xfs_inode *ip = XFS_I(inode);
- struct posix_acl *acl = NULL;
- struct xfs_acl *xfs_acl = NULL;
- unsigned char *ea_name;
- int error;
- int len;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct posix_acl *acl = NULL;
+ struct xfs_da_args args = {
+ .dp = ip,
+ .attr_filter = XFS_ATTR_ROOT,
+ .valuelen = XFS_ACL_MAX_SIZE(mp),
+ };
+ int error;
trace_xfs_get_acl(ip);
switch (type) {
case ACL_TYPE_ACCESS:
- ea_name = SGI_ACL_FILE;
+ args.name = SGI_ACL_FILE;
break;
case ACL_TYPE_DEFAULT:
- ea_name = SGI_ACL_DEFAULT;
+ args.name = SGI_ACL_DEFAULT;
break;
default:
BUG();
}
+ args.namelen = strlen(args.name);
/*
- * If we have a cached ACLs value just return it, not need to
- * go out to the disk.
+ * If the attribute doesn't exist make sure we have a negative cache
+ * entry, for any other error assume it is transient.
*/
- len = XFS_ACL_MAX_SIZE(ip->i_mount);
- error = xfs_attr_get(ip, ea_name, strlen(ea_name),
- (unsigned char **)&xfs_acl, &len,
- ATTR_ALLOC | ATTR_ROOT);
- if (error) {
- /*
- * If the attribute doesn't exist make sure we have a negative
- * cache entry, for any other error assume it is transient.
- */
- if (error != -ENOATTR)
- acl = ERR_PTR(error);
- } else {
- acl = xfs_acl_from_disk(ip->i_mount, xfs_acl, len,
- XFS_ACL_MAX_ENTRIES(ip->i_mount));
- kmem_free(xfs_acl);
+ error = xfs_attr_get(&args);
+ if (!error) {
+ acl = xfs_acl_from_disk(mp, args.value, args.valuelen,
+ XFS_ACL_MAX_ENTRIES(mp));
+ } else if (error != -ENOATTR) {
+ acl = ERR_PTR(error);
}
+
+ kmem_free(args.value);
return acl;
}
int
__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
- struct xfs_inode *ip = XFS_I(inode);
- unsigned char *ea_name;
- int error;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_da_args args = {
+ .dp = ip,
+ .attr_filter = XFS_ATTR_ROOT,
+ };
+ int error;
switch (type) {
case ACL_TYPE_ACCESS:
- ea_name = SGI_ACL_FILE;
+ args.name = SGI_ACL_FILE;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
- ea_name = SGI_ACL_DEFAULT;
+ args.name = SGI_ACL_DEFAULT;
break;
default:
return -EINVAL;
}
+ args.namelen = strlen(args.name);
if (acl) {
- struct xfs_acl *xfs_acl;
- int len = XFS_ACL_MAX_SIZE(ip->i_mount);
-
- xfs_acl = kmem_zalloc_large(len, 0);
- if (!xfs_acl)
+ args.valuelen = XFS_ACL_SIZE(acl->a_count);
+ args.value = kmem_zalloc_large(args.valuelen, 0);
+ if (!args.value)
return -ENOMEM;
-
- xfs_acl_to_disk(xfs_acl, acl);
-
- /* subtract away the unused acl entries */
- len -= sizeof(struct xfs_acl_entry) *
- (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
-
- error = xfs_attr_set(ip, ea_name, strlen(ea_name),
- (unsigned char *)xfs_acl, len, ATTR_ROOT);
-
- kmem_free(xfs_acl);
- } else {
- /*
- * A NULL ACL argument means we want to remove the ACL.
- */
- error = xfs_attr_remove(ip, ea_name,
- strlen(ea_name),
- ATTR_ROOT);
-
- /*
- * If the attribute didn't exist to start with that's fine.
- */
- if (error == -ENOATTR)
- error = 0;
+ xfs_acl_to_disk(args.value, acl);
}
+ error = xfs_attr_set(&args);
+ kmem_free(args.value);
+
+ /*
+ * If the attribute didn't exist to start with that's fine.
+ */
+ if (!acl && error == -ENOATTR)
+ error = 0;
if (!error)
set_cached_acl(inode, type, acl);
return error;
@@ -275,3 +265,19 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
return error;
}
+
+/*
+ * Invalidate any cached ACLs if the user has bypassed the ACL interface.
+ * We don't validate the content whatsoever so it is caller responsibility to
+ * provide data in valid format and ensure i_mode is consistent.
+ */
+void
+xfs_forget_acl(
+ struct inode *inode,
+ const char *name)
+{
+ if (!strcmp(name, SGI_ACL_FILE))
+ forget_cached_acl(inode, ACL_TYPE_ACCESS);
+ else if (!strcmp(name, SGI_ACL_DEFAULT))
+ forget_cached_acl(inode, ACL_TYPE_DEFAULT);
+}
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 94615e34bc86..c042c0868016 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -13,14 +13,16 @@ struct posix_acl;
extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
extern int __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+void xfs_forget_acl(struct inode *inode, const char *name);
#else
static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type)
{
return NULL;
}
# define xfs_set_acl NULL
+static inline void xfs_forget_acl(struct inode *inode, const char *name)
+{
+}
#endif /* CONFIG_XFS_POSIX_ACL */
-extern void xfs_forget_acl(struct inode *inode, const char *name, int xflags);
-
#endif /* __XFS_ACL_H__ */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 58e937be24ce..9d9cebf18726 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -539,7 +539,7 @@ xfs_discard_page(
if (XFS_FORCED_SHUTDOWN(mp))
goto out_invalidate;
- xfs_alert(mp,
+ xfs_alert_ratelimited(mp,
"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
page, ip->i_ino, offset);
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index bbfa6ba84dcd..c42f90e16b4f 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -145,8 +145,8 @@ xfs_attr3_node_inactive(
* Since this code is recursive (gasp!) we must protect ourselves.
*/
if (level > XFS_DA_NODE_MAXDEPTH) {
+ xfs_buf_mark_corrupt(bp);
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
- xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
}
@@ -194,7 +194,7 @@ xfs_attr3_node_inactive(
error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
break;
default:
- xfs_buf_corruption_error(child_bp);
+ xfs_buf_mark_corrupt(child_bp);
xfs_trans_brelse(*trans, child_bp);
error = -EFSCORRUPTED;
break;
@@ -289,7 +289,7 @@ xfs_attr3_root_inactive(
break;
default:
error = -EFSCORRUPTED;
- xfs_buf_corruption_error(bp);
+ xfs_buf_mark_corrupt(bp);
xfs_trans_brelse(*trans, bp);
break;
}
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index d37743bdf274..5ff1d929d3b5 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -52,24 +52,19 @@ static int
xfs_attr_shortform_list(
struct xfs_attr_list_context *context)
{
- struct attrlist_cursor_kern *cursor;
+ struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
+ struct xfs_inode *dp = context->dp;
struct xfs_attr_sf_sort *sbuf, *sbp;
struct xfs_attr_shortform *sf;
struct xfs_attr_sf_entry *sfe;
- struct xfs_inode *dp;
int sbsize, nsbuf, count, i;
int error = 0;
- ASSERT(context != NULL);
- dp = context->dp;
- ASSERT(dp != NULL);
ASSERT(dp->i_afp != NULL);
sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
ASSERT(sf != NULL);
if (!sf->hdr.count)
return 0;
- cursor = context->cursor;
- ASSERT(cursor != NULL);
trace_xfs_attr_list_sf(context);
@@ -205,7 +200,7 @@ out:
STATIC int
xfs_attr_node_list_lookup(
struct xfs_attr_list_context *context,
- struct attrlist_cursor_kern *cursor,
+ struct xfs_attrlist_cursor_kern *cursor,
struct xfs_buf **pbp)
{
struct xfs_da3_icnode_hdr nodehdr;
@@ -279,7 +274,7 @@ xfs_attr_node_list_lookup(
return 0;
out_corruptbuf:
- xfs_buf_corruption_error(bp);
+ xfs_buf_mark_corrupt(bp);
xfs_trans_brelse(tp, bp);
return -EFSCORRUPTED;
}
@@ -288,8 +283,8 @@ STATIC int
xfs_attr_node_list(
struct xfs_attr_list_context *context)
{
+ struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
struct xfs_attr3_icleaf_hdr leafhdr;
- struct attrlist_cursor_kern *cursor;
struct xfs_attr_leafblock *leaf;
struct xfs_da_intnode *node;
struct xfs_buf *bp;
@@ -299,7 +294,6 @@ xfs_attr_node_list(
trace_xfs_attr_node_list(context);
- cursor = context->cursor;
cursor->initted = 1;
/*
@@ -394,7 +388,7 @@ xfs_attr3_leaf_list_int(
struct xfs_buf *bp,
struct xfs_attr_list_context *context)
{
- struct attrlist_cursor_kern *cursor;
+ struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_attr_leaf_entry *entries;
@@ -408,7 +402,6 @@ xfs_attr3_leaf_list_int(
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
- cursor = context->cursor;
cursor->initted = 1;
/*
@@ -452,8 +445,8 @@ xfs_attr3_leaf_list_int(
}
if ((entry->flags & XFS_ATTR_INCOMPLETE) &&
- !(context->flags & ATTR_INCOMPLETE))
- continue; /* skip incomplete entries */
+ !context->allow_incomplete)
+ continue;
if (entry->flags & XFS_ATTR_LOCAL) {
xfs_attr_leaf_name_local_t *name_loc;
@@ -488,14 +481,15 @@ xfs_attr3_leaf_list_int(
* Copy out attribute entries for attr_list(), for leaf attribute lists.
*/
STATIC int
-xfs_attr_leaf_list(xfs_attr_list_context_t *context)
+xfs_attr_leaf_list(
+ struct xfs_attr_list_context *context)
{
- int error;
- struct xfs_buf *bp;
+ struct xfs_buf *bp;
+ int error;
trace_xfs_attr_leaf_list(context);
- context->cursor->blkno = 0;
+ context->cursor.blkno = 0;
error = xfs_attr3_leaf_read(context->tp, context->dp, 0, &bp);
if (error)
return error;
@@ -506,7 +500,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
}
int
-xfs_attr_list_int_ilocked(
+xfs_attr_list_ilocked(
struct xfs_attr_list_context *context)
{
struct xfs_inode *dp = context->dp;
@@ -526,12 +520,12 @@ xfs_attr_list_int_ilocked(
}
int
-xfs_attr_list_int(
- xfs_attr_list_context_t *context)
+xfs_attr_list(
+ struct xfs_attr_list_context *context)
{
- int error;
- xfs_inode_t *dp = context->dp;
- uint lock_mode;
+ struct xfs_inode *dp = context->dp;
+ uint lock_mode;
+ int error;
XFS_STATS_INC(dp->i_mount, xs_attr_list);
@@ -539,130 +533,7 @@ xfs_attr_list_int(
return -EIO;
lock_mode = xfs_ilock_attr_map_shared(dp);
- error = xfs_attr_list_int_ilocked(context);
+ error = xfs_attr_list_ilocked(context);
xfs_iunlock(dp, lock_mode);
return error;
}
-
-#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
- (((struct attrlist_ent *) 0)->a_name - (char *) 0)
-#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
- ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(uint32_t)-1) \
- & ~(sizeof(uint32_t)-1))
-
-/*
- * Format an attribute and copy it out to the user's buffer.
- * Take care to check values and protect against them changing later,
- * we may be reading them directly out of a user buffer.
- */
-STATIC void
-xfs_attr_put_listent(
- xfs_attr_list_context_t *context,
- int flags,
- unsigned char *name,
- int namelen,
- int valuelen)
-{
- struct attrlist *alist = (struct attrlist *)context->alist;
- attrlist_ent_t *aep;
- int arraytop;
-
- ASSERT(!context->seen_enough);
- ASSERT(!(context->flags & ATTR_KERNOVAL));
- ASSERT(context->count >= 0);
- ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
- ASSERT(context->firstu >= sizeof(*alist));
- ASSERT(context->firstu <= context->bufsize);
-
- /*
- * Only list entries in the right namespace.
- */
- if (((context->flags & ATTR_SECURE) == 0) !=
- ((flags & XFS_ATTR_SECURE) == 0))
- return;
- if (((context->flags & ATTR_ROOT) == 0) !=
- ((flags & XFS_ATTR_ROOT) == 0))
- return;
-
- arraytop = sizeof(*alist) +
- context->count * sizeof(alist->al_offset[0]);
- context->firstu -= ATTR_ENTSIZE(namelen);
- if (context->firstu < arraytop) {
- trace_xfs_attr_list_full(context);
- alist->al_more = 1;
- context->seen_enough = 1;
- return;
- }
-
- aep = (attrlist_ent_t *)&context->alist[context->firstu];
- aep->a_valuelen = valuelen;
- memcpy(aep->a_name, name, namelen);
- aep->a_name[namelen] = 0;
- alist->al_offset[context->count++] = context->firstu;
- alist->al_count = context->count;
- trace_xfs_attr_list_add(context);
- return;
-}
-
-/*
- * Generate a list of extended attribute names and optionally
- * also value lengths. Positive return value follows the XFS
- * convention of being an error, zero or negative return code
- * is the length of the buffer returned (negated), indicating
- * success.
- */
-int
-xfs_attr_list(
- xfs_inode_t *dp,
- char *buffer,
- int bufsize,
- int flags,
- attrlist_cursor_kern_t *cursor)
-{
- xfs_attr_list_context_t context;
- struct attrlist *alist;
- int error;
-
- /*
- * Validate the cursor.
- */
- if (cursor->pad1 || cursor->pad2)
- return -EINVAL;
- if ((cursor->initted == 0) &&
- (cursor->hashval || cursor->blkno || cursor->offset))
- return -EINVAL;
-
- /* Only internal consumers can retrieve incomplete attrs. */
- if (flags & ATTR_INCOMPLETE)
- return -EINVAL;
-
- /*
- * Check for a properly aligned buffer.
- */
- if (((long)buffer) & (sizeof(int)-1))
- return -EFAULT;
- if (flags & ATTR_KERNOVAL)
- bufsize = 0;
-
- /*
- * Initialize the output buffer.
- */
- memset(&context, 0, sizeof(context));
- context.dp = dp;
- context.cursor = cursor;
- context.resynch = 1;
- context.flags = flags;
- context.alist = buffer;
- context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
- context.firstu = context.bufsize;
- context.put_listent = xfs_attr_put_listent;
-
- alist = (struct attrlist *)context.alist;
- alist->al_count = 0;
- alist->al_more = 0;
- alist->al_offset[0] = context.bufsize;
-
- error = xfs_attr_list_int(&context);
- ASSERT(error <= 0);
- return error;
-}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index e62fb5216341..4f800f7fe888 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1062,7 +1062,6 @@ xfs_collapse_file_space(
int error;
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
- uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
bool done = false;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
@@ -1078,32 +1077,34 @@ xfs_collapse_file_space(
if (error)
return error;
- while (!error && !done) {
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
- &tp);
- if (error)
- break;
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
+ if (error)
+ return error;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
- ip->i_gdquot, ip->i_pdquot, resblks, 0,
- XFS_QMOPT_RES_REGBLKS);
- if (error)
- goto out_trans_cancel;
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+ while (!done) {
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
&done);
if (error)
goto out_trans_cancel;
+ if (done)
+ break;
- error = xfs_trans_commit(tp);
+ /* finish any deferred frees and roll the transaction */
+ error = xfs_defer_finish(&tp);
+ if (error)
+ goto out_trans_cancel;
}
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -1146,35 +1147,41 @@ xfs_insert_file_space(
if (error)
return error;
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
+ XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
/*
* The extent shifting code works on extent granularity. So, if stop_fsb
* is not the starting block of extent, we need to split the extent at
* stop_fsb.
*/
- error = xfs_bmap_split_extent(ip, stop_fsb);
+ error = xfs_bmap_split_extent(tp, ip, stop_fsb);
if (error)
- return error;
+ goto out_trans_cancel;
- while (!error && !done) {
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
- &tp);
+ do {
+ error = xfs_trans_roll_inode(&tp, ip);
if (error)
- break;
+ goto out_trans_cancel;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
&done, stop_fsb);
if (error)
goto out_trans_cancel;
+ } while (!done);
- error = xfs_trans_commit(tp);
- }
-
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -1442,12 +1449,12 @@ xfs_swap_extent_forks(
* event of a crash. Set the owner change log flags now and leave the
* bmbt scan as the last step.
*/
- if (ip->i_d.di_version == 3 &&
- ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
- (*target_log_flags) |= XFS_ILOG_DOWNER;
- if (tip->i_d.di_version == 3 &&
- tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
- (*src_log_flags) |= XFS_ILOG_DOWNER;
+ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ (*target_log_flags) |= XFS_ILOG_DOWNER;
+ if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ (*src_log_flags) |= XFS_ILOG_DOWNER;
+ }
/*
* Swap the data forks of the inodes
@@ -1482,7 +1489,7 @@ xfs_swap_extent_forks(
(*src_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
- ASSERT(ip->i_d.di_version < 3 ||
+ ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
(*src_log_flags & XFS_ILOG_DOWNER));
(*src_log_flags) |= XFS_ILOG_DBROOT;
break;
@@ -1494,7 +1501,7 @@ xfs_swap_extent_forks(
break;
case XFS_DINODE_FMT_BTREE:
(*target_log_flags) |= XFS_ILOG_DBROOT;
- ASSERT(tip->i_d.di_version < 3 ||
+ ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
(*target_log_flags & XFS_ILOG_DOWNER));
break;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 217e4f82a44a..f880141a2268 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -727,8 +727,9 @@ found:
if (!bp->b_addr) {
error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) {
- xfs_warn(target->bt_mount,
- "%s: failed to map pagesn", __func__);
+ xfs_warn_ratelimited(target->bt_mount,
+ "%s: failed to map %u pages", __func__,
+ bp->b_page_count);
xfs_buf_relse(bp);
return error;
}
@@ -1238,7 +1239,7 @@ xfs_buf_ioerror_alert(
struct xfs_buf *bp,
xfs_failaddr_t func)
{
- xfs_alert(bp->b_mount,
+ xfs_alert_ratelimited(bp->b_mount,
"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
-bp->b_error);
@@ -1573,6 +1574,28 @@ xfs_buf_zero(
}
/*
+ * Log a message about and stale a buffer that a caller has decided is corrupt.
+ *
+ * This function should be called for the kinds of metadata corruption that
+ * cannot be detect from a verifier, such as incorrect inter-block relationship
+ * data. Do /not/ call this function from a verifier function.
+ *
+ * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will
+ * be marked stale, but b_error will not be set. The caller is responsible for
+ * releasing the buffer or fixing it.
+ */
+void
+__xfs_buf_mark_corrupt(
+ struct xfs_buf *bp,
+ xfs_failaddr_t fa)
+{
+ ASSERT(bp->b_flags & XBF_DONE);
+
+ xfs_buf_corruption_error(bp, fa);
+ xfs_buf_stale(bp);
+}
+
+/*
* Handling of buffer targets (buftargs).
*/
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index d79a1fe5d738..9a04c53c2488 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -272,6 +272,8 @@ static inline int xfs_buf_submit(struct xfs_buf *bp)
}
void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
+void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
+#define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
/* Buffer Utility Routines */
extern void *xfs_buf_offset(struct xfs_buf *, size_t);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 663810e6cd59..1545657c3ca0 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -345,7 +345,7 @@ xfs_buf_item_format(
* occurs during recovery.
*/
if (bip->bli_flags & XFS_BLI_INODE_BUF) {
- if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
+ if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
xfs_log_item_in_current_chkpt(lip)))
bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 0d3b640cf1cc..871ec22c9aee 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -147,7 +147,7 @@ xfs_dir2_block_getdents(
xfs_off_t cook;
struct xfs_da_geometry *geo = args->geo;
int lock_mode;
- unsigned int offset;
+ unsigned int offset, next_offset;
unsigned int end;
/*
@@ -173,9 +173,10 @@ xfs_dir2_block_getdents(
* Loop over the data portion of the block.
* Each object is a real entry (dep) or an unused one (dup).
*/
- offset = geo->data_entry_offset;
end = xfs_dir3_data_end_offset(geo, bp->b_addr);
- while (offset < end) {
+ for (offset = geo->data_entry_offset;
+ offset < end;
+ offset = next_offset) {
struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
uint8_t filetype;
@@ -184,14 +185,15 @@ xfs_dir2_block_getdents(
* Unused, skip it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- offset += be16_to_cpu(dup->length);
+ next_offset = offset + be16_to_cpu(dup->length);
continue;
}
/*
* Bump pointer for the next iteration.
*/
- offset += xfs_dir2_data_entsize(dp->i_mount, dep->namelen);
+ next_offset = offset +
+ xfs_dir2_data_entsize(dp->i_mount, dep->namelen);
/*
* The entry is before the desired starting point, skip it.
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 0b8350e84d28..f979d0d7e6cd 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -31,6 +31,7 @@ xfs_trim_extents(
struct block_device *bdev = mp->m_ddev_targp->bt_bdev;
struct xfs_btree_cur *cur;
struct xfs_buf *agbp;
+ struct xfs_agf *agf;
struct xfs_perag *pag;
int error;
int i;
@@ -47,14 +48,14 @@ xfs_trim_extents(
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
if (error)
goto out_put_perag;
+ agf = agbp->b_addr;
cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
/*
* Look up the longest btree in the AGF and start with it.
*/
- error = xfs_alloc_lookup_ge(cur, 0,
- be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
+ error = xfs_alloc_lookup_ge(cur, 0, be32_to_cpu(agf->agf_longest), &i);
if (error)
goto out_del_cursor;
@@ -75,7 +76,7 @@ xfs_trim_extents(
error = -EFSCORRUPTED;
goto out_del_cursor;
}
- ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
+ ASSERT(flen <= be32_to_cpu(agf->agf_longest));
/*
* use daddr format for all range/len calculations as that is
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index d223e1ae90a6..711376ca269f 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -829,9 +829,9 @@ xfs_qm_id_for_quotatype(
{
switch (type) {
case XFS_DQ_USER:
- return ip->i_d.di_uid;
+ return i_uid_read(VFS_I(ip));
case XFS_DQ_GROUP:
- return ip->i_d.di_gid;
+ return i_gid_read(VFS_I(ip));
case XFS_DQ_PROJ:
return ip->i_d.di_projid;
}
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index d60647d7197b..cf65e2e43c6e 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -307,36 +307,62 @@ xfs_qm_qoffend_logitem_committed(
{
struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
- struct xfs_ail *ailp = qfs->qql_item.li_ailp;
- /*
- * Delete the qoff-start logitem from the AIL.
- * xfs_trans_ail_delete() drops the AIL lock.
- */
- spin_lock(&ailp->ail_lock);
- xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_qm_qoff_logitem_relse(qfs);
- kmem_free(qfs->qql_item.li_lv_shadow);
kmem_free(lip->li_lv_shadow);
- kmem_free(qfs);
kmem_free(qfe);
return (xfs_lsn_t)-1;
}
+STATIC void
+xfs_qm_qoff_logitem_release(
+ struct xfs_log_item *lip)
+{
+ struct xfs_qoff_logitem *qoff = QOFF_ITEM(lip);
+
+ if (test_bit(XFS_LI_ABORTED, &lip->li_flags)) {
+ if (qoff->qql_start_lip)
+ xfs_qm_qoff_logitem_relse(qoff->qql_start_lip);
+ xfs_qm_qoff_logitem_relse(qoff);
+ }
+}
+
static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.iop_size = xfs_qm_qoff_logitem_size,
.iop_format = xfs_qm_qoff_logitem_format,
.iop_committed = xfs_qm_qoffend_logitem_committed,
.iop_push = xfs_qm_qoff_logitem_push,
+ .iop_release = xfs_qm_qoff_logitem_release,
};
static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.iop_size = xfs_qm_qoff_logitem_size,
.iop_format = xfs_qm_qoff_logitem_format,
.iop_push = xfs_qm_qoff_logitem_push,
+ .iop_release = xfs_qm_qoff_logitem_release,
};
/*
+ * Delete the quotaoff intent from the AIL and free it. On success,
+ * this should only be called for the start item. It can be used for
+ * either on shutdown or abort.
+ */
+void
+xfs_qm_qoff_logitem_relse(
+ struct xfs_qoff_logitem *qoff)
+{
+ struct xfs_log_item *lip = &qoff->qql_item;
+
+ ASSERT(test_bit(XFS_LI_IN_AIL, &lip->li_flags) ||
+ test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
+ XFS_FORCED_SHUTDOWN(lip->li_mountp));
+ xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+ kmem_free(lip->li_lv_shadow);
+ kmem_free(qoff);
+}
+
+/*
* Allocate and initialize an quotaoff item of the correct quota type(s).
*/
struct xfs_qoff_logitem *
diff --git a/fs/xfs/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h
index 3bb19e556ade..2b86a43d7ce2 100644
--- a/fs/xfs/xfs_dquot_item.h
+++ b/fs/xfs/xfs_dquot_item.h
@@ -28,6 +28,7 @@ void xfs_qm_dquot_logitem_init(struct xfs_dquot *dqp);
struct xfs_qoff_logitem *xfs_qm_qoff_logitem_init(struct xfs_mount *mp,
struct xfs_qoff_logitem *start,
uint flags);
+void xfs_qm_qoff_logitem_relse(struct xfs_qoff_logitem *);
struct xfs_qoff_logitem *xfs_trans_get_qoff_item(struct xfs_trans *tp,
struct xfs_qoff_logitem *startqoff,
uint flags);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 331765afc53e..a21e9cc6516a 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -345,16 +345,19 @@ xfs_corruption_error(
* Complain about the kinds of metadata corruption that we can't detect from a
* verifier, such as incorrect inter-block relationship data. Does not set
* bp->b_error.
+ *
+ * Call xfs_buf_mark_corrupt, not this function.
*/
void
xfs_buf_corruption_error(
- struct xfs_buf *bp)
+ struct xfs_buf *bp,
+ xfs_failaddr_t fa)
{
struct xfs_mount *mp = bp->b_mount;
xfs_alert_tag(mp, XFS_PTAG_VERIFIER_ERROR,
"Metadata corruption detected at %pS, %s block 0x%llx",
- __return_address, bp->b_ops->name, bp->b_bn);
+ fa, bp->b_ops->name, bp->b_bn);
xfs_alert(mp, "Unmount and run xfs_repair");
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 31a5d321ba9a..1717b7508356 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -15,7 +15,7 @@ extern void xfs_corruption_error(const char *tag, int level,
struct xfs_mount *mp, const void *buf, size_t bufsize,
const char *filename, int linenum,
xfs_failaddr_t failaddr);
-void xfs_buf_corruption_error(struct xfs_buf *bp);
+void xfs_buf_corruption_error(struct xfs_buf *bp, xfs_failaddr_t fa);
extern void xfs_buf_verifier_error(struct xfs_buf *bp, int error,
const char *name, const void *buf, size_t bufsz,
xfs_failaddr_t failaddr);
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 918456ca29e1..4eebcec4aae6 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -344,7 +344,7 @@ xfs_getfsmap_datadev_helper(
xfs_fsblock_t fsb;
xfs_daddr_t rec_daddr;
- fsb = XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, rec->rm_startblock);
+ fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.agno, rec->rm_startblock);
rec_daddr = XFS_FSB_TO_DADDR(mp, fsb);
return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
@@ -362,7 +362,7 @@ xfs_getfsmap_datadev_bnobt_helper(
struct xfs_rmap_irec irec;
xfs_daddr_t rec_daddr;
- rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_private.a.agno,
+ rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_ag.agno,
rec->ar_startblock);
irec.rm_startblock = rec->ar_startblock;
@@ -896,6 +896,14 @@ xfs_getfsmap(
info.format_arg = arg;
info.head = head;
+ /*
+ * If fsmap runs concurrently with a scrub, the freeze can be delayed
+ * indefinitely as we walk the rmapbt and iterate over metadata
+ * buffers. Freeze quiesces the log (which waits for the buffer LRU to
+ * be emptied) and that won't happen while we're reading buffers.
+ */
+ sb_start_write(mp->m_super);
+
/* For each device we support... */
for (i = 0; i < XFS_GETFSMAP_DEVS; i++) {
/* Is this device within the range the user asked for? */
@@ -935,6 +943,7 @@ xfs_getfsmap(
if (tp)
xfs_trans_cancel(tp);
+ sb_end_write(mp->m_super);
head->fmh_oflags = FMH_OF_DEV_T;
return error;
}
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 8dc2e5414276..a7be7a9e5c1a 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -289,6 +289,8 @@ xfs_reinit_inode(
uint64_t version = inode_peek_iversion(inode);
umode_t mode = inode->i_mode;
dev_t dev = inode->i_rdev;
+ kuid_t uid = inode->i_uid;
+ kgid_t gid = inode->i_gid;
error = inode_init_always(mp->m_super, inode);
@@ -297,6 +299,8 @@ xfs_reinit_inode(
inode_set_iversion_queried(inode, version);
inode->i_mode = mode;
inode->i_rdev = dev;
+ inode->i_uid = uid;
+ inode->i_gid = gid;
return error;
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c5077e6326c7..14b922f2a6db 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -801,26 +801,18 @@ xfs_ialloc(
return error;
ASSERT(ip != NULL);
inode = VFS_I(ip);
-
- /*
- * We always convert v1 inodes to v2 now - we only support filesystems
- * with >= v2 inode capability, so there is no reason for ever leaving
- * an inode in v1 format.
- */
- if (ip->i_d.di_version == 1)
- ip->i_d.di_version = 2;
-
inode->i_mode = mode;
set_nlink(inode, nlink);
- ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
- ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
+ inode->i_uid = current_fsuid();
inode->i_rdev = rdev;
ip->i_d.di_projid = prid;
if (pip && XFS_INHERIT_GID(pip)) {
- ip->i_d.di_gid = pip->i_d.di_gid;
+ inode->i_gid = VFS_I(pip)->i_gid;
if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
inode->i_mode |= S_ISGID;
+ } else {
+ inode->i_gid = current_fsgid();
}
/*
@@ -828,9 +820,8 @@ xfs_ialloc(
* ID or one of the supplementary group IDs, the S_ISGID bit is cleared
* (and only if the irix_sgid_inherit compatibility variable is set).
*/
- if ((irix_sgid_inherit) &&
- (inode->i_mode & S_ISGID) &&
- (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
+ if (irix_sgid_inherit &&
+ (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
inode->i_mode &= ~S_ISGID;
ip->i_d.di_size = 0;
@@ -847,14 +838,13 @@ xfs_ialloc(
ip->i_d.di_dmstate = 0;
ip->i_d.di_flags = 0;
- if (ip->i_d.di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
inode_set_iversion(inode, 1);
ip->i_d.di_flags2 = 0;
ip->i_d.di_cowextsize = 0;
ip->i_d.di_crtime = tv;
}
-
flags = XFS_ILOG_CORE;
switch (mode & S_IFMT) {
case S_IFIFO:
@@ -907,20 +897,13 @@ xfs_ialloc(
ip->i_d.di_flags |= di_flags;
}
- if (pip &&
- (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
- pip->i_d.di_version == 3 &&
- ip->i_d.di_version == 3) {
- uint64_t di_flags2 = 0;
-
+ if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) {
if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
- di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+ ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
}
if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
- di_flags2 |= XFS_DIFLAG2_DAX;
-
- ip->i_d.di_flags2 |= di_flags2;
+ ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
}
/* FALLTHROUGH */
case S_IFLNK:
@@ -1122,7 +1105,6 @@ xfs_bumplink(
{
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
- ASSERT(ip->i_d.di_version > 1);
inc_nlink(VFS_I(ip));
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
@@ -1158,8 +1140,7 @@ xfs_create(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
- xfs_kgid_to_gid(current_fsgid()), prid,
+ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
@@ -1309,8 +1290,7 @@ xfs_create_tmpfile(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
- xfs_kgid_to_gid(current_fsgid()), prid,
+ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
@@ -2119,7 +2099,7 @@ xfs_iunlink_update_bucket(
unsigned int bucket_index,
xfs_agino_t new_agino)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
+ struct xfs_agi *agi = agibp->b_addr;
xfs_agino_t old_value;
int offset;
@@ -2135,7 +2115,7 @@ xfs_iunlink_update_bucket(
* head of the list.
*/
if (old_value == new_agino) {
- xfs_buf_corruption_error(agibp);
+ xfs_buf_mark_corrupt(agibp);
return -EFSCORRUPTED;
}
@@ -2259,7 +2239,7 @@ xfs_iunlink(
error = xfs_read_agi(mp, tp, agno, &agibp);
if (error)
return error;
- agi = XFS_BUF_TO_AGI(agibp);
+ agi = agibp->b_addr;
/*
* Get the index into the agi hash table for the list this inode will
@@ -2269,7 +2249,7 @@ xfs_iunlink(
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
if (next_agino == agino ||
!xfs_verify_agino_or_null(mp, agno, next_agino)) {
- xfs_buf_corruption_error(agibp);
+ xfs_buf_mark_corrupt(agibp);
return -EFSCORRUPTED;
}
@@ -2443,7 +2423,7 @@ xfs_iunlink_remove(
error = xfs_read_agi(mp, tp, agno, &agibp);
if (error)
return error;
- agi = XFS_BUF_TO_AGI(agibp);
+ agi = agibp->b_addr;
/*
* Get the index into the agi hash table for the list this inode will
@@ -3807,7 +3787,6 @@ xfs_iflush_int(
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
ASSERT(iip != NULL && iip->ili_fields != 0);
- ASSERT(ip->i_d.di_version > 1);
/* set *dip = inode's place in the buffer */
dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
@@ -3868,7 +3847,7 @@ xfs_iflush_int(
* backwards compatibility with old kernels that predate logging all
* inode changes.
*/
- if (ip->i_d.di_version < 3)
+ if (!xfs_sb_version_has_v3inode(&mp->m_sb))
ip->i_d.di_flushiter++;
/* Check the inline fork data before we write out. */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 8bd5d0de6321..4a3d13d4a022 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -125,7 +125,7 @@ xfs_inode_item_size(
*nvecs += 2;
*nbytes += sizeof(struct xfs_inode_log_format) +
- xfs_log_dinode_size(ip->i_d.di_version);
+ xfs_log_dinode_size(ip->i_mount);
xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
if (XFS_IFORK_Q(ip))
@@ -305,11 +305,9 @@ xfs_inode_to_log_dinode(
struct inode *inode = VFS_I(ip);
to->di_magic = XFS_DINODE_MAGIC;
-
- to->di_version = from->di_version;
to->di_format = from->di_format;
- to->di_uid = from->di_uid;
- to->di_gid = from->di_gid;
+ to->di_uid = i_uid_read(inode);
+ to->di_gid = i_gid_read(inode);
to->di_projid_lo = from->di_projid & 0xffff;
to->di_projid_hi = from->di_projid >> 16;
@@ -339,7 +337,8 @@ xfs_inode_to_log_dinode(
/* log a dummy value to ensure log structure is fully initialised */
to->di_next_unlinked = NULLAGINO;
- if (from->di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ to->di_version = 3;
to->di_changecount = inode_peek_iversion(inode);
to->di_crtime.t_sec = from->di_crtime.tv_sec;
to->di_crtime.t_nsec = from->di_crtime.tv_nsec;
@@ -351,6 +350,7 @@ xfs_inode_to_log_dinode(
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to->di_flushiter = 0;
} else {
+ to->di_version = 2;
to->di_flushiter = from->di_flushiter;
}
}
@@ -370,7 +370,7 @@ xfs_inode_item_format_core(
dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE);
xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn);
- xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_d.di_version));
+ xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount));
}
/*
@@ -395,8 +395,6 @@ xfs_inode_item_format(
struct xfs_log_iovec *vecp = NULL;
struct xfs_inode_log_format *ilf;
- ASSERT(ip->i_d.di_version > 1);
-
ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT);
ilf->ilf_type = XFS_LI_INODE;
ilf->ilf_ino = ip->i_ino;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index d42de92cb283..cdfb3cd9a25b 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -35,6 +35,8 @@
#include "xfs_health.h"
#include "xfs_reflink.h"
#include "xfs_ioctl.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
#include <linux/mount.h>
#include <linux/namei.h>
@@ -292,62 +294,173 @@ xfs_readlink_by_handle(
return error;
}
-STATIC int
-xfs_attrlist_by_handle(
- struct file *parfilp,
- void __user *arg)
+/*
+ * Format an attribute and copy it out to the user's buffer.
+ * Take care to check values and protect against them changing later,
+ * we may be reading them directly out of a user buffer.
+ */
+static void
+xfs_ioc_attr_put_listent(
+ struct xfs_attr_list_context *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen)
{
- int error = -ENOMEM;
- attrlist_cursor_kern_t *cursor;
- struct xfs_fsop_attrlist_handlereq __user *p = arg;
- xfs_fsop_attrlist_handlereq_t al_hreq;
- struct dentry *dentry;
- char *kbuf;
+ struct xfs_attrlist *alist = context->buffer;
+ struct xfs_attrlist_ent *aep;
+ int arraytop;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
- return -EFAULT;
- if (al_hreq.buflen < sizeof(struct attrlist) ||
- al_hreq.buflen > XFS_XATTR_LIST_MAX)
+ ASSERT(!context->seen_enough);
+ ASSERT(context->count >= 0);
+ ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
+ ASSERT(context->firstu >= sizeof(*alist));
+ ASSERT(context->firstu <= context->bufsize);
+
+ /*
+ * Only list entries in the right namespace.
+ */
+ if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK))
+ return;
+
+ arraytop = sizeof(*alist) +
+ context->count * sizeof(alist->al_offset[0]);
+
+ /* decrement by the actual bytes used by the attr */
+ context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) +
+ namelen + 1, sizeof(uint32_t));
+ if (context->firstu < arraytop) {
+ trace_xfs_attr_list_full(context);
+ alist->al_more = 1;
+ context->seen_enough = 1;
+ return;
+ }
+
+ aep = context->buffer + context->firstu;
+ aep->a_valuelen = valuelen;
+ memcpy(aep->a_name, name, namelen);
+ aep->a_name[namelen] = 0;
+ alist->al_offset[context->count++] = context->firstu;
+ alist->al_count = context->count;
+ trace_xfs_attr_list_add(context);
+}
+
+static unsigned int
+xfs_attr_filter(
+ u32 ioc_flags)
+{
+ if (ioc_flags & XFS_IOC_ATTR_ROOT)
+ return XFS_ATTR_ROOT;
+ if (ioc_flags & XFS_IOC_ATTR_SECURE)
+ return XFS_ATTR_SECURE;
+ return 0;
+}
+
+static unsigned int
+xfs_attr_flags(
+ u32 ioc_flags)
+{
+ if (ioc_flags & XFS_IOC_ATTR_CREATE)
+ return XATTR_CREATE;
+ if (ioc_flags & XFS_IOC_ATTR_REPLACE)
+ return XATTR_REPLACE;
+ return 0;
+}
+
+int
+xfs_ioc_attr_list(
+ struct xfs_inode *dp,
+ void __user *ubuf,
+ int bufsize,
+ int flags,
+ struct xfs_attrlist_cursor __user *ucursor)
+{
+ struct xfs_attr_list_context context = { };
+ struct xfs_attrlist *alist;
+ void *buffer;
+ int error;
+
+ if (bufsize < sizeof(struct xfs_attrlist) ||
+ bufsize > XFS_XATTR_LIST_MAX)
return -EINVAL;
/*
* Reject flags, only allow namespaces.
*/
- if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
+ if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
+ return -EINVAL;
+ if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
return -EINVAL;
- dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
+ /*
+ * Validate the cursor.
+ */
+ if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor)))
+ return -EFAULT;
+ if (context.cursor.pad1 || context.cursor.pad2)
+ return -EINVAL;
+ if (!context.cursor.initted &&
+ (context.cursor.hashval || context.cursor.blkno ||
+ context.cursor.offset))
+ return -EINVAL;
- kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
- if (!kbuf)
- goto out_dput;
+ buffer = kmem_zalloc_large(bufsize, 0);
+ if (!buffer)
+ return -ENOMEM;
- cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
- error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
- al_hreq.flags, cursor);
+ /*
+ * Initialize the output buffer.
+ */
+ context.dp = dp;
+ context.resynch = 1;
+ context.attr_filter = xfs_attr_filter(flags);
+ context.buffer = buffer;
+ context.bufsize = round_down(bufsize, sizeof(uint32_t));
+ context.firstu = context.bufsize;
+ context.put_listent = xfs_ioc_attr_put_listent;
+
+ alist = context.buffer;
+ alist->al_count = 0;
+ alist->al_more = 0;
+ alist->al_offset[0] = context.bufsize;
+
+ error = xfs_attr_list(&context);
if (error)
- goto out_kfree;
+ goto out_free;
- if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
+ if (copy_to_user(ubuf, buffer, bufsize) ||
+ copy_to_user(ucursor, &context.cursor, sizeof(context.cursor)))
error = -EFAULT;
- goto out_kfree;
- }
+out_free:
+ kmem_free(buffer);
+ return error;
+}
- if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
- error = -EFAULT;
+STATIC int
+xfs_attrlist_by_handle(
+ struct file *parfilp,
+ struct xfs_fsop_attrlist_handlereq __user *p)
+{
+ struct xfs_fsop_attrlist_handlereq al_hreq;
+ struct dentry *dentry;
+ int error = -ENOMEM;
-out_kfree:
- kmem_free(kbuf);
-out_dput:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
+ return -EFAULT;
+
+ dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer,
+ al_hreq.buflen, al_hreq.flags, &p->pos);
dput(dentry);
return error;
}
-int
+static int
xfs_attrmulti_attr_get(
struct inode *inode,
unsigned char *name,
@@ -355,31 +468,33 @@ xfs_attrmulti_attr_get(
uint32_t *len,
uint32_t flags)
{
- unsigned char *kbuf;
- int error = -EFAULT;
- size_t namelen;
+ struct xfs_da_args args = {
+ .dp = XFS_I(inode),
+ .attr_filter = xfs_attr_filter(flags),
+ .attr_flags = xfs_attr_flags(flags),
+ .name = name,
+ .namelen = strlen(name),
+ .valuelen = *len,
+ };
+ int error;
if (*len > XFS_XATTR_SIZE_MAX)
return -EINVAL;
- kbuf = kmem_zalloc_large(*len, 0);
- if (!kbuf)
- return -ENOMEM;
- namelen = strlen(name);
- error = xfs_attr_get(XFS_I(inode), name, namelen, &kbuf, (int *)len,
- flags);
+ error = xfs_attr_get(&args);
if (error)
goto out_kfree;
- if (copy_to_user(ubuf, kbuf, *len))
+ *len = args.valuelen;
+ if (copy_to_user(ubuf, args.value, args.valuelen))
error = -EFAULT;
out_kfree:
- kmem_free(kbuf);
+ kmem_free(args.value);
return error;
}
-int
+static int
xfs_attrmulti_attr_set(
struct inode *inode,
unsigned char *name,
@@ -387,42 +502,75 @@ xfs_attrmulti_attr_set(
uint32_t len,
uint32_t flags)
{
- unsigned char *kbuf;
+ struct xfs_da_args args = {
+ .dp = XFS_I(inode),
+ .attr_filter = xfs_attr_filter(flags),
+ .attr_flags = xfs_attr_flags(flags),
+ .name = name,
+ .namelen = strlen(name),
+ };
int error;
- size_t namelen;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
- if (len > XFS_XATTR_SIZE_MAX)
- return -EINVAL;
- kbuf = memdup_user(ubuf, len);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
+ if (ubuf) {
+ if (len > XFS_XATTR_SIZE_MAX)
+ return -EINVAL;
+ args.value = memdup_user(ubuf, len);
+ if (IS_ERR(args.value))
+ return PTR_ERR(args.value);
+ args.valuelen = len;
+ }
- namelen = strlen(name);
- error = xfs_attr_set(XFS_I(inode), name, namelen, kbuf, len, flags);
- if (!error)
- xfs_forget_acl(inode, name, flags);
- kfree(kbuf);
+ error = xfs_attr_set(&args);
+ if (!error && (flags & XFS_IOC_ATTR_ROOT))
+ xfs_forget_acl(inode, name);
+ kfree(args.value);
return error;
}
int
-xfs_attrmulti_attr_remove(
+xfs_ioc_attrmulti_one(
+ struct file *parfilp,
struct inode *inode,
- unsigned char *name,
+ uint32_t opcode,
+ void __user *uname,
+ void __user *value,
+ uint32_t *len,
uint32_t flags)
{
+ unsigned char *name;
int error;
- size_t namelen;
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
- namelen = strlen(name);
- error = xfs_attr_remove(XFS_I(inode), name, namelen, flags);
- if (!error)
- xfs_forget_acl(inode, name, flags);
+ if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE))
+ return -EINVAL;
+
+ name = strndup_user(uname, MAXNAMELEN);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ switch (opcode) {
+ case ATTR_OP_GET:
+ error = xfs_attrmulti_attr_get(inode, name, value, len, flags);
+ break;
+ case ATTR_OP_REMOVE:
+ value = NULL;
+ *len = 0;
+ /* fall through */
+ case ATTR_OP_SET:
+ error = mnt_want_write_file(parfilp);
+ if (error)
+ break;
+ error = xfs_attrmulti_attr_set(inode, name, value, *len, flags);
+ mnt_drop_write_file(parfilp);
+ break;
+ default:
+ error = -EINVAL;
+ break;
+ }
+
+ kfree(name);
return error;
}
@@ -436,7 +584,6 @@ xfs_attrmulti_by_handle(
xfs_fsop_attrmulti_handlereq_t am_hreq;
struct dentry *dentry;
unsigned int i, size;
- unsigned char *attr_name;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -462,63 +609,17 @@ xfs_attrmulti_by_handle(
goto out_dput;
}
- error = -ENOMEM;
- attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
- if (!attr_name)
- goto out_kfree_ops;
-
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
- if ((ops[i].am_flags & ATTR_ROOT) &&
- (ops[i].am_flags & ATTR_SECURE)) {
- ops[i].am_error = -EINVAL;
- continue;
- }
- ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
-
- ops[i].am_error = strncpy_from_user((char *)attr_name,
- ops[i].am_attrname, MAXNAMELEN);
- if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
- error = -ERANGE;
- if (ops[i].am_error < 0)
- break;
-
- switch (ops[i].am_opcode) {
- case ATTR_OP_GET:
- ops[i].am_error = xfs_attrmulti_attr_get(
- d_inode(dentry), attr_name,
- ops[i].am_attrvalue, &ops[i].am_length,
- ops[i].am_flags);
- break;
- case ATTR_OP_SET:
- ops[i].am_error = mnt_want_write_file(parfilp);
- if (ops[i].am_error)
- break;
- ops[i].am_error = xfs_attrmulti_attr_set(
- d_inode(dentry), attr_name,
- ops[i].am_attrvalue, ops[i].am_length,
- ops[i].am_flags);
- mnt_drop_write_file(parfilp);
- break;
- case ATTR_OP_REMOVE:
- ops[i].am_error = mnt_want_write_file(parfilp);
- if (ops[i].am_error)
- break;
- ops[i].am_error = xfs_attrmulti_attr_remove(
- d_inode(dentry), attr_name,
- ops[i].am_flags);
- mnt_drop_write_file(parfilp);
- break;
- default:
- ops[i].am_error = -EINVAL;
- }
+ ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
+ d_inode(dentry), ops[i].am_opcode,
+ ops[i].am_attrname, ops[i].am_attrvalue,
+ &ops[i].am_length, ops[i].am_flags);
}
if (copy_to_user(am_hreq.ops, ops, size))
error = -EFAULT;
- kfree(attr_name);
- out_kfree_ops:
kfree(ops);
out_dput:
dput(dentry);
@@ -1162,7 +1263,7 @@ xfs_ioctl_setattr_xflags(
/* diflags2 only valid for v3 inodes. */
di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
- if (di_flags2 && ip->i_d.di_version < 3)
+ if (di_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb))
return -EINVAL;
ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
@@ -1372,8 +1473,7 @@ xfs_ioctl_setattr_check_cowextsize(
if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
return 0;
- if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) ||
- ip->i_d.di_version != 3)
+ if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb))
return -EINVAL;
if (fa->fsx_cowextsize == 0)
@@ -1434,9 +1534,9 @@ xfs_ioctl_setattr(
* because the i_*dquot fields will get updated anyway.
*/
if (XFS_IS_QUOTA_ON(mp)) {
- code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
- ip->i_d.di_gid, fa->fsx_projid,
- XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
+ code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
+ VFS_I(ip)->i_gid, fa->fsx_projid,
+ XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
if (code)
return code;
}
@@ -1501,7 +1601,6 @@ xfs_ioctl_setattr(
olddquot = xfs_qm_vop_chown(tp, ip,
&ip->i_pdquot, pdqp);
}
- ASSERT(ip->i_d.di_version > 1);
ip->i_d.di_projid = fa->fsx_projid;
}
@@ -1514,7 +1613,7 @@ xfs_ioctl_setattr(
ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
else
ip->i_d.di_extsize = 0;
- if (ip->i_d.di_version == 3 &&
+ if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
(ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
mp->m_sb.sb_blocklog;
diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h
index 420bd95dc326..bab6a5a92407 100644
--- a/fs/xfs/xfs_ioctl.h
+++ b/fs/xfs/xfs_ioctl.h
@@ -6,6 +6,11 @@
#ifndef __XFS_IOCTL_H__
#define __XFS_IOCTL_H__
+struct xfs_bstat;
+struct xfs_ibulk;
+struct xfs_inogrp;
+
+
extern int
xfs_ioc_space(
struct file *filp,
@@ -30,27 +35,11 @@ xfs_readlink_by_handle(
struct file *parfilp,
xfs_fsop_handlereq_t *hreq);
-extern int
-xfs_attrmulti_attr_get(
- struct inode *inode,
- unsigned char *name,
- unsigned char __user *ubuf,
- uint32_t *len,
- uint32_t flags);
-
-extern int
-xfs_attrmulti_attr_set(
- struct inode *inode,
- unsigned char *name,
- const unsigned char __user *ubuf,
- uint32_t len,
- uint32_t flags);
-
-extern int
-xfs_attrmulti_attr_remove(
- struct inode *inode,
- unsigned char *name,
- uint32_t flags);
+int xfs_ioc_attrmulti_one(struct file *parfilp, struct inode *inode,
+ uint32_t opcode, void __user *uname, void __user *value,
+ uint32_t *len, uint32_t flags);
+int xfs_ioc_attr_list(struct xfs_inode *dp, void __user *ubuf, int bufsize,
+ int flags, struct xfs_attrlist_cursor __user *ucursor);
extern struct dentry *
xfs_handle_to_dentry(
@@ -70,10 +59,6 @@ xfs_file_compat_ioctl(
unsigned int cmd,
unsigned long arg);
-struct xfs_ibulk;
-struct xfs_bstat;
-struct xfs_inogrp;
-
int xfs_fsbulkstat_one_fmt(struct xfs_ibulk *breq,
const struct xfs_bulkstat *bstat);
int xfs_fsinumbers_fmt(struct xfs_ibulk *breq, const struct xfs_inumbers *igrp);
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 769581a79c58..c1771e728117 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -352,56 +352,24 @@ xfs_compat_handlereq_to_dentry(
STATIC int
xfs_compat_attrlist_by_handle(
struct file *parfilp,
- void __user *arg)
+ compat_xfs_fsop_attrlist_handlereq_t __user *p)
{
- int error;
- attrlist_cursor_kern_t *cursor;
- compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
compat_xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
- char *kbuf;
+ int error;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (copy_from_user(&al_hreq, arg,
- sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
+ if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
return -EFAULT;
- if (al_hreq.buflen < sizeof(struct attrlist) ||
- al_hreq.buflen > XFS_XATTR_LIST_MAX)
- return -EINVAL;
-
- /*
- * Reject flags, only allow namespaces.
- */
- if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
- return -EINVAL;
dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- error = -ENOMEM;
- kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
- if (!kbuf)
- goto out_dput;
-
- cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
- error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
- al_hreq.flags, cursor);
- if (error)
- goto out_kfree;
-
- if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
- error = -EFAULT;
- goto out_kfree;
- }
-
- if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
- error = -EFAULT;
-
-out_kfree:
- kmem_free(kbuf);
-out_dput:
+ error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)),
+ compat_ptr(al_hreq.buffer), al_hreq.buflen,
+ al_hreq.flags, &p->pos);
dput(dentry);
return error;
}
@@ -416,7 +384,6 @@ xfs_compat_attrmulti_by_handle(
compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
struct dentry *dentry;
unsigned int i, size;
- unsigned char *attr_name;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -443,64 +410,18 @@ xfs_compat_attrmulti_by_handle(
goto out_dput;
}
- error = -ENOMEM;
- attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
- if (!attr_name)
- goto out_kfree_ops;
-
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
- if ((ops[i].am_flags & ATTR_ROOT) &&
- (ops[i].am_flags & ATTR_SECURE)) {
- ops[i].am_error = -EINVAL;
- continue;
- }
- ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
-
- ops[i].am_error = strncpy_from_user((char *)attr_name,
+ ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
+ d_inode(dentry), ops[i].am_opcode,
compat_ptr(ops[i].am_attrname),
- MAXNAMELEN);
- if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
- error = -ERANGE;
- if (ops[i].am_error < 0)
- break;
-
- switch (ops[i].am_opcode) {
- case ATTR_OP_GET:
- ops[i].am_error = xfs_attrmulti_attr_get(
- d_inode(dentry), attr_name,
- compat_ptr(ops[i].am_attrvalue),
- &ops[i].am_length, ops[i].am_flags);
- break;
- case ATTR_OP_SET:
- ops[i].am_error = mnt_want_write_file(parfilp);
- if (ops[i].am_error)
- break;
- ops[i].am_error = xfs_attrmulti_attr_set(
- d_inode(dentry), attr_name,
- compat_ptr(ops[i].am_attrvalue),
- ops[i].am_length, ops[i].am_flags);
- mnt_drop_write_file(parfilp);
- break;
- case ATTR_OP_REMOVE:
- ops[i].am_error = mnt_want_write_file(parfilp);
- if (ops[i].am_error)
- break;
- ops[i].am_error = xfs_attrmulti_attr_remove(
- d_inode(dentry), attr_name,
- ops[i].am_flags);
- mnt_drop_write_file(parfilp);
- break;
- default:
- ops[i].am_error = -EINVAL;
- }
+ compat_ptr(ops[i].am_attrvalue),
+ &ops[i].am_length, ops[i].am_flags);
}
if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
error = -EFAULT;
- kfree(attr_name);
- out_kfree_ops:
kfree(ops);
out_dput:
dput(dentry);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 81f2f93caec0..f7a99b3bbcf7 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -22,7 +22,6 @@
#include "xfs_iomap.h"
#include "xfs_error.h"
-#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/security.h>
#include <linux/iversion.h>
@@ -50,10 +49,15 @@ xfs_initxattrs(
int error = 0;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
- error = xfs_attr_set(ip, xattr->name,
- strlen(xattr->name),
- xattr->value, xattr->value_len,
- ATTR_SECURE);
+ struct xfs_da_args args = {
+ .dp = ip,
+ .attr_filter = XFS_ATTR_SECURE,
+ .name = xattr->name,
+ .namelen = strlen(xattr->name),
+ .value = xattr->value,
+ .valuelen = xattr->value_len,
+ };
+ error = xfs_attr_set(&args);
if (error < 0)
break;
}
@@ -553,7 +557,7 @@ xfs_vn_getattr(
stat->blocks =
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
- if (ip->i_d.di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
stat->btime = ip->i_d.di_crtime;
@@ -692,9 +696,7 @@ xfs_setattr_nonsize(
*/
ASSERT(udqp == NULL);
ASSERT(gdqp == NULL);
- error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),
- xfs_kgid_to_gid(gid),
- ip->i_d.di_projid,
+ error = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid,
qflags, &udqp, &gdqp, NULL);
if (error)
return error;
@@ -763,7 +765,6 @@ xfs_setattr_nonsize(
olddquot1 = xfs_qm_vop_chown(tp, ip,
&ip->i_udquot, udqp);
}
- ip->i_d.di_uid = xfs_kuid_to_uid(uid);
inode->i_uid = uid;
}
if (!gid_eq(igid, gid)) {
@@ -775,7 +776,6 @@ xfs_setattr_nonsize(
olddquot2 = xfs_qm_vop_chown(tp, ip,
&ip->i_gdquot, gdqp);
}
- ip->i_d.di_gid = xfs_kgid_to_gid(gid);
inode->i_gid = gid;
}
}
@@ -1304,9 +1304,6 @@ xfs_setup_inode(
/* make the inode look hashed for the writeback code */
inode_fake_hash(inode);
- inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
- inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
-
i_size_write(inode, ip->i_d.di_size);
xfs_diflags_to_iflags(inode, ip);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 4b31c29b7e6b..ff2da28fed90 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -86,8 +86,8 @@ xfs_bulkstat_one_int(
*/
buf->bs_projectid = ip->i_d.di_projid;
buf->bs_ino = ino;
- buf->bs_uid = dic->di_uid;
- buf->bs_gid = dic->di_gid;
+ buf->bs_uid = i_uid_read(inode);
+ buf->bs_gid = i_gid_read(inode);
buf->bs_size = dic->di_size;
buf->bs_nlink = inode->i_nlink;
@@ -110,7 +110,7 @@ xfs_bulkstat_one_int(
buf->bs_forkoff = XFS_IFORK_BOFF(ip);
buf->bs_version = XFS_BULKSTAT_VERSION_V5;
- if (dic->di_version == 3) {
+ if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
buf->bs_cowextsize_blks = dic->di_cowextsize;
}
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 8738bb03f253..9f70d2f68e05 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -60,6 +60,7 @@ typedef __u32 xfs_nlink_t;
#include <linux/list_sort.h>
#include <linux/ratelimit.h>
#include <linux/rhashtable.h>
+#include <linux/xattr.h>
#include <asm/page.h>
#include <asm/div64.h>
@@ -163,32 +164,6 @@ struct xstats {
extern struct xstats xfsstats;
-/* Kernel uid/gid conversion. These are used to convert to/from the on disk
- * uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally.
- * The conversion here is type only, the value will remain the same since we
- * are converting to the init_user_ns. The uid is later mapped to a particular
- * user namespace value when crossing the kernel/user boundary.
- */
-static inline uint32_t xfs_kuid_to_uid(kuid_t uid)
-{
- return from_kuid(&init_user_ns, uid);
-}
-
-static inline kuid_t xfs_uid_to_kuid(uint32_t uid)
-{
- return make_kuid(&init_user_ns, uid);
-}
-
-static inline uint32_t xfs_kgid_to_gid(kgid_t gid)
-{
- return from_kgid(&init_user_ns, gid);
-}
-
-static inline kgid_t xfs_gid_to_kgid(uint32_t gid)
-{
- return make_kgid(&init_user_ns, gid);
-}
-
static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev)
{
return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index f6006d94a581..4a53768c5397 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -47,8 +47,7 @@ xlog_dealloc_log(
/* local state machine functions */
STATIC void xlog_state_done_syncing(
- struct xlog_in_core *iclog,
- bool aborted);
+ struct xlog_in_core *iclog);
STATIC int
xlog_state_get_iclog_space(
struct xlog *log,
@@ -63,11 +62,6 @@ xlog_state_switch_iclogs(
struct xlog_in_core *iclog,
int eventual_size);
STATIC void
-xlog_state_want_sync(
- struct xlog *log,
- struct xlog_in_core *iclog);
-
-STATIC void
xlog_grant_push_ail(
struct xlog *log,
int need_bytes);
@@ -597,26 +591,21 @@ xlog_state_release_iclog(
return 0;
}
-int
+void
xfs_log_release_iclog(
- struct xfs_mount *mp,
struct xlog_in_core *iclog)
{
- struct xlog *log = mp->m_log;
- bool sync;
-
- if (iclog->ic_state == XLOG_STATE_IOERROR) {
- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- return -EIO;
- }
+ struct xlog *log = iclog->ic_log;
+ bool sync = false;
if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
- sync = __xlog_state_release_iclog(log, iclog);
+ if (iclog->ic_state != XLOG_STATE_IOERROR)
+ sync = __xlog_state_release_iclog(log, iclog);
spin_unlock(&log->l_icloglock);
- if (sync)
- xlog_sync(log, iclog);
}
- return 0;
+
+ if (sync)
+ xlog_sync(log, iclog);
}
/*
@@ -855,6 +844,31 @@ xfs_log_mount_cancel(
}
/*
+ * Wait for the iclog to be written disk, or return an error if the log has been
+ * shut down.
+ */
+static int
+xlog_wait_on_iclog(
+ struct xlog_in_core *iclog)
+ __releases(iclog->ic_log->l_icloglock)
+{
+ struct xlog *log = iclog->ic_log;
+
+ if (!XLOG_FORCED_SHUTDOWN(log) &&
+ iclog->ic_state != XLOG_STATE_ACTIVE &&
+ iclog->ic_state != XLOG_STATE_DIRTY) {
+ XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
+ xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
+ } else {
+ spin_unlock(&log->l_icloglock);
+ }
+
+ if (XLOG_FORCED_SHUTDOWN(log))
+ return -EIO;
+ return 0;
+}
+
+/*
* Final log writes as part of unmount.
*
* Mark the filesystem clean as unmount happens. Note that during relocation
@@ -919,20 +933,13 @@ out_err:
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt);
- xlog_state_want_sync(log, iclog);
+ if (iclog->ic_state == XLOG_STATE_ACTIVE)
+ xlog_state_switch_iclogs(log, iclog, 0);
+ else
+ ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+ iclog->ic_state == XLOG_STATE_IOERROR);
error = xlog_state_release_iclog(log, iclog);
- switch (iclog->ic_state) {
- default:
- if (!XLOG_FORCED_SHUTDOWN(log)) {
- xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
- break;
- }
- /* fall through */
- case XLOG_STATE_ACTIVE:
- case XLOG_STATE_DIRTY:
- spin_unlock(&log->l_icloglock);
- break;
- }
+ xlog_wait_on_iclog(iclog);
if (tic) {
trace_xfs_log_umount_write(log, tic);
@@ -941,6 +948,18 @@ out_err:
}
}
+static void
+xfs_log_unmount_verify_iclog(
+ struct xlog *log)
+{
+ struct xlog_in_core *iclog = log->l_iclog;
+
+ do {
+ ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+ ASSERT(iclog->ic_offset == 0);
+ } while ((iclog = iclog->ic_next) != log->l_iclog);
+}
+
/*
* Unmount record used to have a string "Unmount filesystem--" in the
* data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
@@ -948,16 +967,11 @@ out_err:
* currently architecture converted and "Unmount" is a bit foo.
* As far as I know, there weren't any dependencies on the old behaviour.
*/
-
-static int
-xfs_log_unmount_write(xfs_mount_t *mp)
+static void
+xfs_log_unmount_write(
+ struct xfs_mount *mp)
{
- struct xlog *log = mp->m_log;
- xlog_in_core_t *iclog;
-#ifdef DEBUG
- xlog_in_core_t *first_iclog;
-#endif
- int error;
+ struct xlog *log = mp->m_log;
/*
* Don't write out unmount record on norecovery mounts or ro devices.
@@ -966,57 +980,16 @@ xfs_log_unmount_write(xfs_mount_t *mp)
if (mp->m_flags & XFS_MOUNT_NORECOVERY ||
xfs_readonly_buftarg(log->l_targ)) {
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
- return 0;
+ return;
}
- error = xfs_log_force(mp, XFS_LOG_SYNC);
- ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
-
-#ifdef DEBUG
- first_iclog = iclog = log->l_iclog;
- do {
- if (iclog->ic_state != XLOG_STATE_IOERROR) {
- ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
- ASSERT(iclog->ic_offset == 0);
- }
- iclog = iclog->ic_next;
- } while (iclog != first_iclog);
-#endif
- if (! (XLOG_FORCED_SHUTDOWN(log))) {
- xfs_log_write_unmount_record(mp);
- } else {
- /*
- * We're already in forced_shutdown mode, couldn't
- * even attempt to write out the unmount transaction.
- *
- * Go through the motions of sync'ing and releasing
- * the iclog, even though no I/O will actually happen,
- * we need to wait for other log I/Os that may already
- * be in progress. Do this as a separate section of
- * code so we'll know if we ever get stuck here that
- * we're in this odd situation of trying to unmount
- * a file system that went into forced_shutdown as
- * the result of an unmount..
- */
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
- atomic_inc(&iclog->ic_refcnt);
- xlog_state_want_sync(log, iclog);
- error = xlog_state_release_iclog(log, iclog);
- switch (iclog->ic_state) {
- case XLOG_STATE_ACTIVE:
- case XLOG_STATE_DIRTY:
- case XLOG_STATE_IOERROR:
- spin_unlock(&log->l_icloglock);
- break;
- default:
- xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
- break;
- }
- }
+ xfs_log_force(mp, XFS_LOG_SYNC);
- return error;
-} /* xfs_log_unmount_write */
+ if (XLOG_FORCED_SHUTDOWN(log))
+ return;
+ xfs_log_unmount_verify_iclog(log);
+ xfs_log_write_unmount_record(mp);
+}
/*
* Empty the log for unmount/freeze.
@@ -1279,7 +1252,6 @@ xlog_ioend_work(
struct xlog_in_core *iclog =
container_of(work, struct xlog_in_core, ic_end_io_work);
struct xlog *log = iclog->ic_log;
- bool aborted = false;
int error;
error = blk_status_to_errno(iclog->ic_bio.bi_status);
@@ -1295,17 +1267,9 @@ xlog_ioend_work(
if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
xfs_alert(log->l_mp, "log I/O error %d", error);
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
- /*
- * This flag will be propagated to the trans-committed
- * callback routines to let them know that the log-commit
- * didn't succeed.
- */
- aborted = true;
- } else if (iclog->ic_state == XLOG_STATE_IOERROR) {
- aborted = true;
}
- xlog_state_done_syncing(iclog, aborted);
+ xlog_state_done_syncing(iclog);
bio_uninit(&iclog->ic_bio);
/*
@@ -1739,7 +1703,7 @@ xlog_bio_end_io(
&iclog->ic_end_io_work);
}
-static void
+static int
xlog_map_iclog_data(
struct bio *bio,
void *data,
@@ -1750,11 +1714,14 @@ xlog_map_iclog_data(
unsigned int off = offset_in_page(data);
size_t len = min_t(size_t, count, PAGE_SIZE - off);
- WARN_ON_ONCE(bio_add_page(bio, page, len, off) != len);
+ if (bio_add_page(bio, page, len, off) != len)
+ return -EIO;
data += len;
count -= len;
} while (count);
+
+ return 0;
}
STATIC void
@@ -1784,7 +1751,7 @@ xlog_write_iclog(
* the buffer manually, the code needs to be kept in sync
* with the I/O completion path.
*/
- xlog_state_done_syncing(iclog, true);
+ xlog_state_done_syncing(iclog);
up(&iclog->ic_sema);
return;
}
@@ -1798,7 +1765,10 @@ xlog_write_iclog(
if (need_flush)
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
- xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count);
+ if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
+ xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
+ return;
+ }
if (is_vmalloc_addr(iclog->ic_data))
flush_kernel_vmap_range(iclog->ic_data, count);
@@ -2328,7 +2298,11 @@ xlog_write_copy_finish(
*record_cnt = 0;
*data_cnt = 0;
- xlog_state_want_sync(log, iclog);
+ if (iclog->ic_state == XLOG_STATE_ACTIVE)
+ xlog_state_switch_iclogs(log, iclog, 0);
+ else
+ ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+ iclog->ic_state == XLOG_STATE_IOERROR);
if (!commit_iclog)
goto release_iclog;
spin_unlock(&log->l_icloglock);
@@ -2575,111 +2549,106 @@ next_lv:
*****************************************************************************
*/
+static void
+xlog_state_activate_iclog(
+ struct xlog_in_core *iclog,
+ int *iclogs_changed)
+{
+ ASSERT(list_empty_careful(&iclog->ic_callbacks));
+
+ /*
+ * If the number of ops in this iclog indicate it just contains the
+ * dummy transaction, we can change state into IDLE (the second time
+ * around). Otherwise we should change the state into NEED a dummy.
+ * We don't need to cover the dummy.
+ */
+ if (*iclogs_changed == 0 &&
+ iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
+ *iclogs_changed = 1;
+ } else {
+ /*
+ * We have two dirty iclogs so start over. This could also be
+ * num of ops indicating this is not the dummy going out.
+ */
+ *iclogs_changed = 2;
+ }
+
+ iclog->ic_state = XLOG_STATE_ACTIVE;
+ iclog->ic_offset = 0;
+ iclog->ic_header.h_num_logops = 0;
+ memset(iclog->ic_header.h_cycle_data, 0,
+ sizeof(iclog->ic_header.h_cycle_data));
+ iclog->ic_header.h_lsn = 0;
+}
+
/*
- * An iclog has just finished IO completion processing, so we need to update
- * the iclog state and propagate that up into the overall log state. Hence we
- * prepare the iclog for cleaning, and then clean all the pending dirty iclogs
- * starting from the head, and then wake up any threads that are waiting for the
- * iclog to be marked clean.
- *
- * The ordering of marking iclogs ACTIVE must be maintained, so an iclog
- * doesn't become ACTIVE beyond one that is SYNCING. This is also required to
- * maintain the notion that we use a ordered wait queue to hold off would be
- * writers to the log when every iclog is trying to sync to disk.
- *
- * Caller must hold the icloglock before calling us.
- *
- * State Change: !IOERROR -> DIRTY -> ACTIVE
+ * Loop through all iclogs and mark all iclogs currently marked DIRTY as
+ * ACTIVE after iclog I/O has completed.
*/
-STATIC void
-xlog_state_clean_iclog(
+static void
+xlog_state_activate_iclogs(
struct xlog *log,
- struct xlog_in_core *dirty_iclog)
+ int *iclogs_changed)
{
- struct xlog_in_core *iclog;
- int changed = 0;
-
- /* Prepare the completed iclog. */
- if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
- dirty_iclog->ic_state = XLOG_STATE_DIRTY;
+ struct xlog_in_core *iclog = log->l_iclog;
- /* Walk all the iclogs to update the ordered active state. */
- iclog = log->l_iclog;
do {
- if (iclog->ic_state == XLOG_STATE_DIRTY) {
- iclog->ic_state = XLOG_STATE_ACTIVE;
- iclog->ic_offset = 0;
- ASSERT(list_empty_careful(&iclog->ic_callbacks));
- /*
- * If the number of ops in this iclog indicate it just
- * contains the dummy transaction, we can
- * change state into IDLE (the second time around).
- * Otherwise we should change the state into
- * NEED a dummy.
- * We don't need to cover the dummy.
- */
- if (!changed &&
- (be32_to_cpu(iclog->ic_header.h_num_logops) ==
- XLOG_COVER_OPS)) {
- changed = 1;
- } else {
- /*
- * We have two dirty iclogs so start over
- * This could also be num of ops indicates
- * this is not the dummy going out.
- */
- changed = 2;
- }
- iclog->ic_header.h_num_logops = 0;
- memset(iclog->ic_header.h_cycle_data, 0,
- sizeof(iclog->ic_header.h_cycle_data));
- iclog->ic_header.h_lsn = 0;
- } else if (iclog->ic_state == XLOG_STATE_ACTIVE)
- /* do nothing */;
- else
- break; /* stop cleaning */
- iclog = iclog->ic_next;
- } while (iclog != log->l_iclog);
-
+ if (iclog->ic_state == XLOG_STATE_DIRTY)
+ xlog_state_activate_iclog(iclog, iclogs_changed);
+ /*
+ * The ordering of marking iclogs ACTIVE must be maintained, so
+ * an iclog doesn't become ACTIVE beyond one that is SYNCING.
+ */
+ else if (iclog->ic_state != XLOG_STATE_ACTIVE)
+ break;
+ } while ((iclog = iclog->ic_next) != log->l_iclog);
+}
+static int
+xlog_covered_state(
+ int prev_state,
+ int iclogs_changed)
+{
/*
- * Wake up threads waiting in xfs_log_force() for the dirty iclog
- * to be cleaned.
+ * We usually go to NEED. But we go to NEED2 if the changed indicates we
+ * are done writing the dummy record. If we are done with the second
+ * dummy recored (DONE2), then we go to IDLE.
*/
- wake_up_all(&dirty_iclog->ic_force_wait);
+ switch (prev_state) {
+ case XLOG_STATE_COVER_IDLE:
+ case XLOG_STATE_COVER_NEED:
+ case XLOG_STATE_COVER_NEED2:
+ break;
+ case XLOG_STATE_COVER_DONE:
+ if (iclogs_changed == 1)
+ return XLOG_STATE_COVER_NEED2;
+ break;
+ case XLOG_STATE_COVER_DONE2:
+ if (iclogs_changed == 1)
+ return XLOG_STATE_COVER_IDLE;
+ break;
+ default:
+ ASSERT(0);
+ }
- /*
- * Change state for the dummy log recording.
- * We usually go to NEED. But we go to NEED2 if the changed indicates
- * we are done writing the dummy record.
- * If we are done with the second dummy recored (DONE2), then
- * we go to IDLE.
- */
- if (changed) {
- switch (log->l_covered_state) {
- case XLOG_STATE_COVER_IDLE:
- case XLOG_STATE_COVER_NEED:
- case XLOG_STATE_COVER_NEED2:
- log->l_covered_state = XLOG_STATE_COVER_NEED;
- break;
+ return XLOG_STATE_COVER_NEED;
+}
- case XLOG_STATE_COVER_DONE:
- if (changed == 1)
- log->l_covered_state = XLOG_STATE_COVER_NEED2;
- else
- log->l_covered_state = XLOG_STATE_COVER_NEED;
- break;
+STATIC void
+xlog_state_clean_iclog(
+ struct xlog *log,
+ struct xlog_in_core *dirty_iclog)
+{
+ int iclogs_changed = 0;
- case XLOG_STATE_COVER_DONE2:
- if (changed == 1)
- log->l_covered_state = XLOG_STATE_COVER_IDLE;
- else
- log->l_covered_state = XLOG_STATE_COVER_NEED;
- break;
+ dirty_iclog->ic_state = XLOG_STATE_DIRTY;
- default:
- ASSERT(0);
- }
+ xlog_state_activate_iclogs(log, &iclogs_changed);
+ wake_up_all(&dirty_iclog->ic_force_wait);
+
+ if (iclogs_changed) {
+ log->l_covered_state = xlog_covered_state(log->l_covered_state,
+ iclogs_changed);
}
}
@@ -2808,8 +2777,7 @@ xlog_state_iodone_process_iclog(
static void
xlog_state_do_iclog_callbacks(
struct xlog *log,
- struct xlog_in_core *iclog,
- bool aborted)
+ struct xlog_in_core *iclog)
__releases(&log->l_icloglock)
__acquires(&log->l_icloglock)
{
@@ -2821,7 +2789,7 @@ xlog_state_do_iclog_callbacks(
list_splice_init(&iclog->ic_callbacks, &tmp);
spin_unlock(&iclog->ic_callback_lock);
- xlog_cil_process_committed(&tmp, aborted);
+ xlog_cil_process_committed(&tmp);
spin_lock(&iclog->ic_callback_lock);
}
@@ -2836,8 +2804,7 @@ xlog_state_do_iclog_callbacks(
STATIC void
xlog_state_do_callback(
- struct xlog *log,
- bool aborted)
+ struct xlog *log)
{
struct xlog_in_core *iclog;
struct xlog_in_core *first_iclog;
@@ -2878,9 +2845,11 @@ xlog_state_do_callback(
* we'll have to run at least one more complete loop.
*/
cycled_icloglock = true;
- xlog_state_do_iclog_callbacks(log, iclog, aborted);
-
- xlog_state_clean_iclog(log, iclog);
+ xlog_state_do_iclog_callbacks(log, iclog);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ wake_up_all(&iclog->ic_force_wait);
+ else
+ xlog_state_clean_iclog(log, iclog);
iclog = iclog->ic_next;
} while (first_iclog != iclog);
@@ -2916,25 +2885,22 @@ xlog_state_do_callback(
*/
STATIC void
xlog_state_done_syncing(
- struct xlog_in_core *iclog,
- bool aborted)
+ struct xlog_in_core *iclog)
{
struct xlog *log = iclog->ic_log;
spin_lock(&log->l_icloglock);
-
ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
/*
* If we got an error, either on the first buffer, or in the case of
- * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
- * and none should ever be attempted to be written to disk
- * again.
+ * split log writes, on the second, we shut down the file system and
+ * no iclogs should ever be attempted to be written to disk again.
*/
- if (iclog->ic_state == XLOG_STATE_SYNCING)
+ if (!XLOG_FORCED_SHUTDOWN(log)) {
+ ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
iclog->ic_state = XLOG_STATE_DONE_SYNC;
- else
- ASSERT(iclog->ic_state == XLOG_STATE_IOERROR);
+ }
/*
* Someone could be sleeping prior to writing out the next
@@ -2943,9 +2909,8 @@ xlog_state_done_syncing(
*/
wake_up_all(&iclog->ic_write_wait);
spin_unlock(&log->l_icloglock);
- xlog_state_do_callback(log, aborted); /* also cleans log */
-} /* xlog_state_done_syncing */
-
+ xlog_state_do_callback(log); /* also cleans log */
+}
/*
* If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
@@ -3152,11 +3117,12 @@ xlog_ungrant_log_space(
}
/*
- * This routine will mark the current iclog in the ring as WANT_SYNC
- * and move the current iclog pointer to the next iclog in the ring.
- * When this routine is called from xlog_state_get_iclog_space(), the
- * exact size of the iclog has not yet been determined. All we know is
- * that every data block. We have run out of space in this log record.
+ * Mark the current iclog in the ring as WANT_SYNC and move the current iclog
+ * pointer to the next iclog in the ring.
+ *
+ * When called from xlog_state_get_iclog_space(), the exact size of the iclog
+ * has not yet been determined, all we know is that we have run out of space in
+ * the current iclog.
*/
STATIC void
xlog_state_switch_iclogs(
@@ -3165,6 +3131,8 @@ xlog_state_switch_iclogs(
int eventual_size)
{
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+ assert_spin_locked(&log->l_icloglock);
+
if (!eventual_size)
eventual_size = iclog->ic_offset;
iclog->ic_state = XLOG_STATE_WANT_SYNC;
@@ -3259,9 +3227,6 @@ xfs_log_force(
* previous iclog and go to sleep.
*/
iclog = iclog->ic_prev;
- if (iclog->ic_state == XLOG_STATE_ACTIVE ||
- iclog->ic_state == XLOG_STATE_DIRTY)
- goto out_unlock;
} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
if (atomic_read(&iclog->ic_refcnt) == 0) {
/*
@@ -3277,8 +3242,7 @@ xfs_log_force(
if (xlog_state_release_iclog(log, iclog))
goto out_error;
- if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn ||
- iclog->ic_state == XLOG_STATE_DIRTY)
+ if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
goto out_unlock;
} else {
/*
@@ -3298,17 +3262,8 @@ xfs_log_force(
;
}
- if (!(flags & XFS_LOG_SYNC))
- goto out_unlock;
-
- if (iclog->ic_state == XLOG_STATE_IOERROR)
- goto out_error;
- XFS_STATS_INC(mp, xs_log_force_sleep);
- xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
- if (iclog->ic_state == XLOG_STATE_IOERROR)
- return -EIO;
- return 0;
-
+ if (flags & XFS_LOG_SYNC)
+ return xlog_wait_on_iclog(iclog);
out_unlock:
spin_unlock(&log->l_icloglock);
return 0;
@@ -3339,9 +3294,6 @@ __xfs_log_force_lsn(
goto out_unlock;
}
- if (iclog->ic_state == XLOG_STATE_DIRTY)
- goto out_unlock;
-
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
/*
* We sleep here if we haven't already slept (e.g. this is the
@@ -3375,20 +3327,8 @@ __xfs_log_force_lsn(
*log_flushed = 1;
}
- if (!(flags & XFS_LOG_SYNC) ||
- (iclog->ic_state == XLOG_STATE_ACTIVE ||
- iclog->ic_state == XLOG_STATE_DIRTY))
- goto out_unlock;
-
- if (iclog->ic_state == XLOG_STATE_IOERROR)
- goto out_error;
-
- XFS_STATS_INC(mp, xs_log_force_sleep);
- xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
- if (iclog->ic_state == XLOG_STATE_IOERROR)
- return -EIO;
- return 0;
-
+ if (flags & XFS_LOG_SYNC)
+ return xlog_wait_on_iclog(iclog);
out_unlock:
spin_unlock(&log->l_icloglock);
return 0;
@@ -3434,26 +3374,6 @@ xfs_log_force_lsn(
return ret;
}
-/*
- * Called when we want to mark the current iclog as being ready to sync to
- * disk.
- */
-STATIC void
-xlog_state_want_sync(
- struct xlog *log,
- struct xlog_in_core *iclog)
-{
- assert_spin_locked(&log->l_icloglock);
-
- if (iclog->ic_state == XLOG_STATE_ACTIVE) {
- xlog_state_switch_iclogs(log, iclog, 0);
- } else {
- ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
- iclog->ic_state == XLOG_STATE_IOERROR);
- }
-}
-
-
/*****************************************************************************
*
* TICKET functions
@@ -3937,7 +3857,7 @@ xfs_log_force_umount(
spin_lock(&log->l_cilp->xc_push_lock);
wake_up_all(&log->l_cilp->xc_commit_wait);
spin_unlock(&log->l_cilp->xc_push_lock);
- xlog_state_do_callback(log, true);
+ xlog_state_do_callback(log);
/* return non-zero if log IOERROR transition had already happened */
return retval;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 84e06805160f..cc77cc36560a 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -121,8 +121,7 @@ void xfs_log_mount_cancel(struct xfs_mount *);
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp);
-int xfs_log_release_iclog(struct xfs_mount *mp,
- struct xlog_in_core *iclog);
+void xfs_log_release_iclog(struct xlog_in_core *iclog);
int xfs_log_reserve(struct xfs_mount *mp,
int length,
int count,
@@ -138,7 +137,7 @@ void xfs_log_ticket_put(struct xlog_ticket *ticket);
void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_lsn_t *commit_lsn, bool regrant);
-void xlog_cil_process_committed(struct list_head *list, bool aborted);
+void xlog_cil_process_committed(struct list_head *list);
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
void xfs_log_work_queue(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 48435cf2aa16..64cc0bf2ab3b 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -574,10 +574,10 @@ xlog_discard_busy_extents(
*/
static void
xlog_cil_committed(
- struct xfs_cil_ctx *ctx,
- bool abort)
+ struct xfs_cil_ctx *ctx)
{
struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
+ bool abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log);
/*
* If the I/O failed, we're aborting the commit and already shutdown.
@@ -613,37 +613,38 @@ xlog_cil_committed(
void
xlog_cil_process_committed(
- struct list_head *list,
- bool aborted)
+ struct list_head *list)
{
struct xfs_cil_ctx *ctx;
while ((ctx = list_first_entry_or_null(list,
struct xfs_cil_ctx, iclog_entry))) {
list_del(&ctx->iclog_entry);
- xlog_cil_committed(ctx, aborted);
+ xlog_cil_committed(ctx);
}
}
/*
- * Push the Committed Item List to the log. If @push_seq flag is zero, then it
- * is a background flush and so we can chose to ignore it. Otherwise, if the
- * current sequence is the same as @push_seq we need to do a flush. If
- * @push_seq is less than the current sequence, then it has already been
+ * Push the Committed Item List to the log.
+ *
+ * If the current sequence is the same as xc_push_seq we need to do a flush. If
+ * xc_push_seq is less than the current sequence, then it has already been
* flushed and we don't need to do anything - the caller will wait for it to
* complete if necessary.
*
- * @push_seq is a value rather than a flag because that allows us to do an
- * unlocked check of the sequence number for a match. Hence we can allows log
- * forces to run racily and not issue pushes for the same sequence twice. If we
- * get a race between multiple pushes for the same sequence they will block on
- * the first one and then abort, hence avoiding needless pushes.
+ * xc_push_seq is checked unlocked against the sequence number for a match.
+ * Hence we can allow log forces to run racily and not issue pushes for the
+ * same sequence twice. If we get a race between multiple pushes for the same
+ * sequence they will block on the first one and then abort, hence avoiding
+ * needless pushes.
*/
-STATIC int
-xlog_cil_push(
- struct xlog *log)
+static void
+xlog_cil_push_work(
+ struct work_struct *work)
{
- struct xfs_cil *cil = log->l_cilp;
+ struct xfs_cil *cil =
+ container_of(work, struct xfs_cil, xc_push_work);
+ struct xlog *log = cil->xc_log;
struct xfs_log_vec *lv;
struct xfs_cil_ctx *ctx;
struct xfs_cil_ctx *new_ctx;
@@ -657,9 +658,6 @@ xlog_cil_push(
xfs_lsn_t commit_lsn;
xfs_lsn_t push_seq;
- if (!cil)
- return 0;
-
new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
new_ctx->ticket = xlog_cil_ticket_alloc(log);
@@ -867,28 +865,20 @@ restart:
spin_unlock(&cil->xc_push_lock);
/* release the hounds! */
- return xfs_log_release_iclog(log->l_mp, commit_iclog);
+ xfs_log_release_iclog(commit_iclog);
+ return;
out_skip:
up_write(&cil->xc_ctx_lock);
xfs_log_ticket_put(new_ctx->ticket);
kmem_free(new_ctx);
- return 0;
+ return;
out_abort_free_ticket:
xfs_log_ticket_put(tic);
out_abort:
- xlog_cil_committed(ctx, true);
- return -EIO;
-}
-
-static void
-xlog_cil_push_work(
- struct work_struct *work)
-{
- struct xfs_cil *cil = container_of(work, struct xfs_cil,
- xc_push_work);
- xlog_cil_push(cil->xc_log);
+ ASSERT(XLOG_FORCED_SHUTDOWN(log));
+ xlog_cil_committed(ctx);
}
/*
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index b192c5a9f9fd..2b0aec37e73e 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -402,7 +402,8 @@ struct xlog {
#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
-#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
+#define XLOG_FORCED_SHUTDOWN(log) \
+ (unlikely((log)->l_flags & XLOG_IO_ERROR))
/* common routines */
extern int
@@ -525,12 +526,6 @@ xlog_cil_force(struct xlog *log)
}
/*
- * Unmount record type is used as a pseudo transaction type for the ticket.
- * It's value must be outside the range of XFS_TRANS_* values.
- */
-#define XLOG_UNMOUNT_REC_TYPE (-1U)
-
-/*
* Wrapper function for waiting on a wait queue serialised against wakeups
* by a spinlock. This matches the semantics of all the wait queues used in the
* log code.
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 25cfc85dbaa7..11c3502b07b1 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2869,8 +2869,8 @@ xfs_recover_inode_owner_change(
return -ENOMEM;
/* instantiate the inode */
+ ASSERT(dip->di_version >= 3);
xfs_inode_from_disk(ip, dip);
- ASSERT(ip->i_d.di_version >= 3);
error = xfs_iformat_fork(ip, dip);
if (error)
@@ -2997,7 +2997,7 @@ xlog_recover_inode_pass2(
* superblock flag to determine whether we need to look at di_flushiter
* to skip replay when the on disk inode is newer than the log one
*/
- if (!xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (!xfs_sb_version_has_v3inode(&mp->m_sb) &&
ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
/*
* Deal with the wrap case, DI_MAX_FLUSH is less
@@ -3068,7 +3068,7 @@ xlog_recover_inode_pass2(
error = -EFSCORRUPTED;
goto out_release;
}
- isize = xfs_log_dinode_size(ldip->di_version);
+ isize = xfs_log_dinode_size(mp);
if (unlikely(item->ri_buf[1].i_len > isize)) {
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
XFS_ERRLEVEL_LOW, mp, ldip,
@@ -4947,7 +4947,7 @@ xlog_recover_clear_agi_bucket(
if (error)
goto out_abort;
- agi = XFS_BUF_TO_AGI(agibp);
+ agi = agibp->b_addr;
agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket);
@@ -5083,7 +5083,7 @@ xlog_recover_process_iunlinks(
* buffer reference though, so that it stays pinned in memory
* while we need the buffer.
*/
- agi = XFS_BUF_TO_AGI(agibp);
+ agi = agibp->b_addr;
xfs_buf_unlock(agibp);
for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
@@ -5636,7 +5636,7 @@ xlog_do_recover(
/* Convert superblock from on-disk format */
sbp = &mp->m_sb;
- xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
+ xfs_sb_from_disk(sbp, bp->b_addr);
xfs_buf_relse(bp);
/* re-initialise in-core superblock and geometry structures */
@@ -5809,7 +5809,6 @@ xlog_recover_check_summary(
struct xlog *log)
{
xfs_mount_t *mp;
- xfs_agf_t *agfp;
xfs_buf_t *agfbp;
xfs_buf_t *agibp;
xfs_agnumber_t agno;
@@ -5829,7 +5828,8 @@ xlog_recover_check_summary(
xfs_alert(mp, "%s agf read failed agno %d error %d",
__func__, agno, error);
} else {
- agfp = XFS_BUF_TO_AGF(agfbp);
+ struct xfs_agf *agfp = agfbp->b_addr;
+
freeblks += be32_to_cpu(agfp->agf_freeblks) +
be32_to_cpu(agfp->agf_flcount);
xfs_buf_relse(agfbp);
@@ -5840,7 +5840,7 @@ xlog_recover_check_summary(
xfs_alert(mp, "%s agi read failed agno %d error %d",
__func__, agno, error);
} else {
- struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
+ struct xfs_agi *agi = agibp->b_addr;
itotal += be32_to_cpu(agi->agi_count);
ifree += be32_to_cpu(agi->agi_freecount);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 56efe140c923..c5513e5a226a 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -310,7 +310,7 @@ reread:
/*
* Initialize the mount structure from the superblock.
*/
- xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
+ xfs_sb_from_disk(sbp, bp->b_addr);
/*
* If we haven't validated the superblock, do so now before we try
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 0b0909657bad..cabdb755adae 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -326,16 +326,16 @@ xfs_qm_dqattach_locked(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
- error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
- doalloc, &ip->i_udquot);
+ error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
+ XFS_DQ_USER, doalloc, &ip->i_udquot);
if (error)
goto done;
ASSERT(ip->i_udquot);
}
if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
- error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
- doalloc, &ip->i_gdquot);
+ error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
+ XFS_DQ_GROUP, doalloc, &ip->i_gdquot);
if (error)
goto done;
ASSERT(ip->i_gdquot);
@@ -871,12 +871,20 @@ xfs_qm_reset_dqcounts(
ddq->d_bcount = 0;
ddq->d_icount = 0;
ddq->d_rtbcount = 0;
- ddq->d_btimer = 0;
- ddq->d_itimer = 0;
- ddq->d_rtbtimer = 0;
- ddq->d_bwarns = 0;
- ddq->d_iwarns = 0;
- ddq->d_rtbwarns = 0;
+
+ /*
+ * dquot id 0 stores the default grace period and the maximum
+ * warning limit that were set by the administrator, so we
+ * should not reset them.
+ */
+ if (ddq->d_id != 0) {
+ ddq->d_btimer = 0;
+ ddq->d_itimer = 0;
+ ddq->d_rtbtimer = 0;
+ ddq->d_bwarns = 0;
+ ddq->d_iwarns = 0;
+ ddq->d_rtbwarns = 0;
+ }
if (xfs_sb_version_hascrc(&mp->m_sb)) {
xfs_update_cksum((char *)&dqb[j],
@@ -1613,8 +1621,8 @@ xfs_qm_dqfree_one(
int
xfs_qm_vop_dqalloc(
struct xfs_inode *ip,
- xfs_dqid_t uid,
- xfs_dqid_t gid,
+ kuid_t uid,
+ kgid_t gid,
prid_t prid,
uint flags,
struct xfs_dquot **O_udqpp,
@@ -1622,6 +1630,8 @@ xfs_qm_vop_dqalloc(
struct xfs_dquot **O_pdqpp)
{
struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ struct user_namespace *user_ns = inode->i_sb->s_user_ns;
struct xfs_dquot *uq = NULL;
struct xfs_dquot *gq = NULL;
struct xfs_dquot *pq = NULL;
@@ -1635,7 +1645,7 @@ xfs_qm_vop_dqalloc(
xfs_ilock(ip, lockflags);
if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
- gid = ip->i_d.di_gid;
+ gid = inode->i_gid;
/*
* Attach the dquot(s) to this inode, doing a dquot allocation
@@ -1650,7 +1660,7 @@ xfs_qm_vop_dqalloc(
}
if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
- if (ip->i_d.di_uid != uid) {
+ if (!uid_eq(inode->i_uid, uid)) {
/*
* What we need is the dquot that has this uid, and
* if we send the inode to dqget, the uid of the inode
@@ -1661,7 +1671,8 @@ xfs_qm_vop_dqalloc(
* holding ilock.
*/
xfs_iunlock(ip, lockflags);
- error = xfs_qm_dqget(mp, uid, XFS_DQ_USER, true, &uq);
+ error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
+ XFS_DQ_USER, true, &uq);
if (error) {
ASSERT(error != -ENOENT);
return error;
@@ -1682,9 +1693,10 @@ xfs_qm_vop_dqalloc(
}
}
if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
- if (ip->i_d.di_gid != gid) {
+ if (!gid_eq(inode->i_gid, gid)) {
xfs_iunlock(ip, lockflags);
- error = xfs_qm_dqget(mp, gid, XFS_DQ_GROUP, true, &gq);
+ error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
+ XFS_DQ_GROUP, true, &gq);
if (error) {
ASSERT(error != -ENOENT);
goto error_rele;
@@ -1810,7 +1822,7 @@ xfs_qm_vop_chown_reserve(
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
if (XFS_IS_UQUOTA_ON(mp) && udqp &&
- ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
+ i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) {
udq_delblks = udqp;
/*
* If there are delayed allocation blocks, then we have to
@@ -1823,7 +1835,7 @@ xfs_qm_vop_chown_reserve(
}
}
if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
- ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
+ i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) {
gdq_delblks = gdqp;
if (delblks) {
ASSERT(ip->i_gdquot);
@@ -1920,14 +1932,15 @@ xfs_qm_vop_create_dqattach(
if (udqp && XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
- ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
+ ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id));
ip->i_udquot = xfs_qm_dqhold(udqp);
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
- ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
+ ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id));
+
ip->i_gdquot = xfs_qm_dqhold(gdqp);
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 1ea82764bf89..5d5ac65aa1cc 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -29,8 +29,6 @@ xfs_qm_log_quotaoff(
int error;
struct xfs_qoff_logitem *qoffi;
- *qoffstartp = NULL;
-
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
if (error)
goto out;
@@ -62,7 +60,7 @@ out:
STATIC int
xfs_qm_log_quotaoff_end(
struct xfs_mount *mp,
- struct xfs_qoff_logitem *startqoff,
+ struct xfs_qoff_logitem **startqoff,
uint flags)
{
struct xfs_trans *tp;
@@ -73,9 +71,10 @@ xfs_qm_log_quotaoff_end(
if (error)
return error;
- qoffi = xfs_trans_get_qoff_item(tp, startqoff,
+ qoffi = xfs_trans_get_qoff_item(tp, *startqoff,
flags & XFS_ALL_QUOTA_ACCT);
xfs_trans_log_quotaoff_item(tp, qoffi);
+ *startqoff = NULL;
/*
* We have to make sure that the transaction is secure on disk before we
@@ -103,7 +102,7 @@ xfs_qm_scall_quotaoff(
uint dqtype;
int error;
uint inactivate_flags;
- struct xfs_qoff_logitem *qoffstart;
+ struct xfs_qoff_logitem *qoffstart = NULL;
/*
* No file system can have quotas enabled on disk but not in core.
@@ -228,7 +227,7 @@ xfs_qm_scall_quotaoff(
* So, we have QUOTAOFF start and end logitems; the start
* logitem won't get overwritten until the end logitem appears...
*/
- error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
+ error = xfs_qm_log_quotaoff_end(mp, &qoffstart, flags);
if (error) {
/* We're screwed now. Shutdown is the only option. */
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
@@ -261,6 +260,8 @@ xfs_qm_scall_quotaoff(
}
out_unlock:
+ if (error && qoffstart)
+ xfs_qm_qoff_logitem_relse(qoffstart);
mutex_unlock(&q->qi_quotaofflock);
return error;
}
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index efe42ae7a2f3..aa8fc1f55fbd 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -86,7 +86,7 @@ extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
struct xfs_mount *, struct xfs_dquot *,
struct xfs_dquot *, struct xfs_dquot *, int64_t, long, uint);
-extern int xfs_qm_vop_dqalloc(struct xfs_inode *, xfs_dqid_t, xfs_dqid_t,
+extern int xfs_qm_vop_dqalloc(struct xfs_inode *, kuid_t, kgid_t,
prid_t, uint, struct xfs_dquot **, struct xfs_dquot **,
struct xfs_dquot **);
extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
@@ -109,7 +109,7 @@ extern void xfs_qm_unmount_quotas(struct xfs_mount *);
#else
static inline int
-xfs_qm_vop_dqalloc(struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid,
+xfs_qm_vop_dqalloc(struct xfs_inode *ip, kuid_t kuid, kgid_t kgid,
prid_t prid, uint flags, struct xfs_dquot **udqp,
struct xfs_dquot **gdqp, struct xfs_dquot **pdqp)
{
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 113883c4f202..f70f1255220b 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -57,13 +57,13 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
/* Loop over all stats groups */
for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
- len += snprintf(buf + len, PATH_MAX - len, "%s",
+ len += scnprintf(buf + len, PATH_MAX - len, "%s",
xstats[i].desc);
/* inner loop does each group */
for (; j < xstats[i].endpoint; j++)
- len += snprintf(buf + len, PATH_MAX - len, " %u",
+ len += scnprintf(buf + len, PATH_MAX - len, " %u",
counter_val(stats, j));
- len += snprintf(buf + len, PATH_MAX - len, "\n");
+ len += scnprintf(buf + len, PATH_MAX - len, "\n");
}
/* extra precision counters */
for_each_possible_cpu(i) {
@@ -72,9 +72,9 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
}
- len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
+ len += scnprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
- len += snprintf(buf + len, PATH_MAX-len, "debug %u\n",
+ len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
#if defined(DEBUG)
1);
#else
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index d762d42ed0ff..fa0fa3c70f1a 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -182,9 +182,7 @@ xfs_symlink(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp,
- xfs_kuid_to_uid(current_fsuid()),
- xfs_kgid_to_gid(current_fsgid()), prid,
+ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
@@ -194,7 +192,7 @@ xfs_symlink(
* The symlink will fit into the inode data fork?
* There can't be any attributes so we get the whole variable part.
*/
- if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
+ if (pathlen <= XFS_LITINO(mp))
fs_blocks = 0;
else
fs_blocks = xfs_symlink_blocks(mp, pathlen);
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index bc85b89f88ca..120398a37c2a 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -6,6 +6,7 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
+#include "xfs_bit.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
@@ -27,6 +28,7 @@
#include "xfs_log_recover.h"
#include "xfs_filestream.h"
#include "xfs_fsmap.h"
+#include "xfs_btree_staging.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index e242988f57fb..efc7751550d9 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -35,6 +35,12 @@ struct xfs_icreate_log;
struct xfs_owner_info;
struct xfs_trans_res;
struct xfs_inobt_rec_incore;
+union xfs_btree_ptr;
+
+#define XFS_ATTR_FILTER_FLAGS \
+ { XFS_ATTR_ROOT, "ROOT" }, \
+ { XFS_ATTR_SECURE, "SECURE" }, \
+ { XFS_ATTR_INCOMPLETE, "INCOMPLETE" }
DECLARE_EVENT_CLASS(xfs_attr_list_class,
TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -45,39 +51,39 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class,
__field(u32, hashval)
__field(u32, blkno)
__field(u32, offset)
- __field(void *, alist)
+ __field(void *, buffer)
__field(int, bufsize)
__field(int, count)
__field(int, firstu)
__field(int, dupcnt)
- __field(int, flags)
+ __field(unsigned int, attr_filter)
),
TP_fast_assign(
__entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
__entry->ino = ctx->dp->i_ino;
- __entry->hashval = ctx->cursor->hashval;
- __entry->blkno = ctx->cursor->blkno;
- __entry->offset = ctx->cursor->offset;
- __entry->alist = ctx->alist;
+ __entry->hashval = ctx->cursor.hashval;
+ __entry->blkno = ctx->cursor.blkno;
+ __entry->offset = ctx->cursor.offset;
+ __entry->buffer = ctx->buffer;
__entry->bufsize = ctx->bufsize;
__entry->count = ctx->count;
__entry->firstu = ctx->firstu;
- __entry->flags = ctx->flags;
+ __entry->attr_filter = ctx->attr_filter;
),
TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
- "alist %p size %u count %u firstu %u flags %d %s",
+ "buffer %p size %u count %u firstu %u filter %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->hashval,
__entry->blkno,
__entry->offset,
__entry->dupcnt,
- __entry->alist,
+ __entry->buffer,
__entry->bufsize,
__entry->count,
__entry->firstu,
- __entry->flags,
- __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS)
+ __print_flags(__entry->attr_filter, "|",
+ XFS_ATTR_FILTER_FLAGS)
)
)
@@ -169,31 +175,31 @@ TRACE_EVENT(xfs_attr_list_node_descend,
__field(u32, hashval)
__field(u32, blkno)
__field(u32, offset)
- __field(void *, alist)
+ __field(void *, buffer)
__field(int, bufsize)
__field(int, count)
__field(int, firstu)
__field(int, dupcnt)
- __field(int, flags)
+ __field(unsigned int, attr_filter)
__field(u32, bt_hashval)
__field(u32, bt_before)
),
TP_fast_assign(
__entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
__entry->ino = ctx->dp->i_ino;
- __entry->hashval = ctx->cursor->hashval;
- __entry->blkno = ctx->cursor->blkno;
- __entry->offset = ctx->cursor->offset;
- __entry->alist = ctx->alist;
+ __entry->hashval = ctx->cursor.hashval;
+ __entry->blkno = ctx->cursor.blkno;
+ __entry->offset = ctx->cursor.offset;
+ __entry->buffer = ctx->buffer;
__entry->bufsize = ctx->bufsize;
__entry->count = ctx->count;
__entry->firstu = ctx->firstu;
- __entry->flags = ctx->flags;
+ __entry->attr_filter = ctx->attr_filter;
__entry->bt_hashval = be32_to_cpu(btree->hashval);
__entry->bt_before = be32_to_cpu(btree->before);
),
TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
- "alist %p size %u count %u firstu %u flags %d %s "
+ "buffer %p size %u count %u firstu %u filter %s "
"node hashval %u, node before %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
@@ -201,12 +207,12 @@ TRACE_EVENT(xfs_attr_list_node_descend,
__entry->blkno,
__entry->offset,
__entry->dupcnt,
- __entry->alist,
+ __entry->buffer,
__entry->bufsize,
__entry->count,
__entry->firstu,
- __entry->flags,
- __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
+ __print_flags(__entry->attr_filter, "|",
+ XFS_ATTR_FILTER_FLAGS),
__entry->bt_hashval,
__entry->bt_before)
);
@@ -1701,7 +1707,8 @@ DECLARE_EVENT_CLASS(xfs_attr_class,
__field(int, namelen)
__field(int, valuelen)
__field(xfs_dahash_t, hashval)
- __field(int, flags)
+ __field(unsigned int, attr_filter)
+ __field(unsigned int, attr_flags)
__field(int, op_flags)
),
TP_fast_assign(
@@ -1712,11 +1719,12 @@ DECLARE_EVENT_CLASS(xfs_attr_class,
__entry->namelen = args->namelen;
__entry->valuelen = args->valuelen;
__entry->hashval = args->hashval;
- __entry->flags = args->flags;
+ __entry->attr_filter = args->attr_filter;
+ __entry->attr_flags = args->attr_flags;
__entry->op_flags = args->op_flags;
),
TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d valuelen %d "
- "hashval 0x%x flags %s op_flags %s",
+ "hashval 0x%x filter %s flags %s op_flags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->namelen,
@@ -1724,7 +1732,11 @@ DECLARE_EVENT_CLASS(xfs_attr_class,
__entry->namelen,
__entry->valuelen,
__entry->hashval,
- __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
+ __print_flags(__entry->attr_filter, "|",
+ XFS_ATTR_FILTER_FLAGS),
+ __print_flags(__entry->attr_flags, "|",
+ { XATTR_CREATE, "CREATE" },
+ { XATTR_REPLACE, "REPLACE" }),
__print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
)
@@ -3594,6 +3606,151 @@ TRACE_EVENT(xfs_check_new_dalign,
__entry->calc_rootino)
)
+TRACE_EVENT(xfs_btree_commit_afakeroot,
+ TP_PROTO(struct xfs_btree_cur *cur),
+ TP_ARGS(cur),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_btnum_t, btnum)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(unsigned int, levels)
+ __field(unsigned int, blocks)
+ ),
+ TP_fast_assign(
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->btnum = cur->bc_btnum;
+ __entry->agno = cur->bc_ag.agno;
+ __entry->agbno = cur->bc_ag.afake->af_root;
+ __entry->levels = cur->bc_ag.afake->af_levels;
+ __entry->blocks = cur->bc_ag.afake->af_blocks;
+ ),
+ TP_printk("dev %d:%d btree %s ag %u levels %u blocks %u root %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
+ __entry->agno,
+ __entry->levels,
+ __entry->blocks,
+ __entry->agbno)
+)
+
+TRACE_EVENT(xfs_btree_commit_ifakeroot,
+ TP_PROTO(struct xfs_btree_cur *cur),
+ TP_ARGS(cur),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_btnum_t, btnum)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agino_t, agino)
+ __field(unsigned int, levels)
+ __field(unsigned int, blocks)
+ __field(int, whichfork)
+ ),
+ TP_fast_assign(
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->btnum = cur->bc_btnum;
+ __entry->agno = XFS_INO_TO_AGNO(cur->bc_mp,
+ cur->bc_ino.ip->i_ino);
+ __entry->agino = XFS_INO_TO_AGINO(cur->bc_mp,
+ cur->bc_ino.ip->i_ino);
+ __entry->levels = cur->bc_ino.ifake->if_levels;
+ __entry->blocks = cur->bc_ino.ifake->if_blocks;
+ __entry->whichfork = cur->bc_ino.whichfork;
+ ),
+ TP_printk("dev %d:%d btree %s ag %u agino %u whichfork %s levels %u blocks %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
+ __entry->agno,
+ __entry->agino,
+ __entry->whichfork == XFS_ATTR_FORK ? "attr" : "data",
+ __entry->levels,
+ __entry->blocks)
+)
+
+TRACE_EVENT(xfs_btree_bload_level_geometry,
+ TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
+ uint64_t nr_this_level, unsigned int nr_per_block,
+ unsigned int desired_npb, uint64_t blocks,
+ uint64_t blocks_with_extra),
+ TP_ARGS(cur, level, nr_this_level, nr_per_block, desired_npb, blocks,
+ blocks_with_extra),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_btnum_t, btnum)
+ __field(unsigned int, level)
+ __field(unsigned int, nlevels)
+ __field(uint64_t, nr_this_level)
+ __field(unsigned int, nr_per_block)
+ __field(unsigned int, desired_npb)
+ __field(unsigned long long, blocks)
+ __field(unsigned long long, blocks_with_extra)
+ ),
+ TP_fast_assign(
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->btnum = cur->bc_btnum;
+ __entry->level = level;
+ __entry->nlevels = cur->bc_nlevels;
+ __entry->nr_this_level = nr_this_level;
+ __entry->nr_per_block = nr_per_block;
+ __entry->desired_npb = desired_npb;
+ __entry->blocks = blocks;
+ __entry->blocks_with_extra = blocks_with_extra;
+ ),
+ TP_printk("dev %d:%d btree %s level %u/%u nr_this_level %llu nr_per_block %u desired_npb %u blocks %llu blocks_with_extra %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
+ __entry->level,
+ __entry->nlevels,
+ __entry->nr_this_level,
+ __entry->nr_per_block,
+ __entry->desired_npb,
+ __entry->blocks,
+ __entry->blocks_with_extra)
+)
+
+TRACE_EVENT(xfs_btree_bload_block,
+ TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
+ uint64_t block_idx, uint64_t nr_blocks,
+ union xfs_btree_ptr *ptr, unsigned int nr_records),
+ TP_ARGS(cur, level, block_idx, nr_blocks, ptr, nr_records),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_btnum_t, btnum)
+ __field(unsigned int, level)
+ __field(unsigned long long, block_idx)
+ __field(unsigned long long, nr_blocks)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(unsigned int, nr_records)
+ ),
+ TP_fast_assign(
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->btnum = cur->bc_btnum;
+ __entry->level = level;
+ __entry->block_idx = block_idx;
+ __entry->nr_blocks = nr_blocks;
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+ xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
+
+ __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
+ __entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb);
+ } else {
+ __entry->agno = cur->bc_ag.agno;
+ __entry->agbno = be32_to_cpu(ptr->s);
+ }
+ __entry->nr_records = nr_records;
+ ),
+ TP_printk("dev %d:%d btree %s level %u block %llu/%llu fsb (%u/%u) recs %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
+ __entry->level,
+ __entry->block_idx,
+ __entry->nr_blocks,
+ __entry->agno,
+ __entry->agbno,
+ __entry->nr_records)
+)
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 3b208f9a865c..1adc6bc53a56 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -306,6 +306,11 @@ xfs_trans_alloc(
*
* Note the zero-length reservation; this transaction MUST be cancelled
* without any dirty data.
+ *
+ * Callers should obtain freeze protection to avoid two conflicts with fs
+ * freezing: (1) having active transactions trip the m_active_trans ASSERTs;
+ * and (2) grabbing buffers at the same time that freeze is trying to drain
+ * the buffer LRU list.
*/
int
xfs_trans_alloc_empty(
@@ -450,7 +455,7 @@ xfs_trans_apply_sb_deltas(
int whole = 0;
bp = xfs_trans_getsb(tp, tp->t_mountp);
- sbp = XFS_BUF_TO_SBP(bp);
+ sbp = bp->b_addr;
/*
* Check that superblock mods match the mods made to AGF counters.
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 00cc5b8734be..2ef0dfbfb303 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -32,6 +32,7 @@ STATIC void
xfs_ail_check(
struct xfs_ail *ailp,
struct xfs_log_item *lip)
+ __must_hold(&ailp->ail_lock)
{
struct xfs_log_item *prev_lip;
struct xfs_log_item *next_lip;
@@ -529,8 +530,9 @@ xfsaild(
{
struct xfs_ail *ailp = data;
long tout = 0; /* milliseconds */
+ unsigned int noreclaim_flag;
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
set_freezable();
while (1) {
@@ -601,6 +603,7 @@ xfsaild(
tout = xfsaild_push(ailp);
}
+ memalloc_noreclaim_restore(noreclaim_flag);
return 0;
}
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index b0fedb543f97..fc5d7276026e 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -12,53 +12,30 @@
#include "xfs_inode.h"
#include "xfs_attr.h"
#include "xfs_acl.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
#include <linux/posix_acl_xattr.h>
-#include <linux/xattr.h>
static int
xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
struct inode *inode, const char *name, void *value, size_t size)
{
- int xflags = handler->flags;
- struct xfs_inode *ip = XFS_I(inode);
- int error, asize = size;
- size_t namelen = strlen(name);
-
- /* Convert Linux syscall to XFS internal ATTR flags */
- if (!size) {
- xflags |= ATTR_KERNOVAL;
- value = NULL;
- }
+ struct xfs_da_args args = {
+ .dp = XFS_I(inode),
+ .attr_filter = handler->flags,
+ .name = name,
+ .namelen = strlen(name),
+ .value = value,
+ .valuelen = size,
+ };
+ int error;
- error = xfs_attr_get(ip, name, namelen, (unsigned char **)&value,
- &asize, xflags);
+ error = xfs_attr_get(&args);
if (error)
return error;
- return asize;
-}
-
-void
-xfs_forget_acl(
- struct inode *inode,
- const char *name,
- int xflags)
-{
- /*
- * Invalidate any cached ACLs if the user has bypassed the ACL
- * interface. We don't validate the content whatsoever so it is caller
- * responsibility to provide data in valid format and ensure i_mode is
- * consistent.
- */
- if (xflags & ATTR_ROOT) {
-#ifdef CONFIG_XFS_POSIX_ACL
- if (!strcmp(name, SGI_ACL_FILE))
- forget_cached_acl(inode, ACL_TYPE_ACCESS);
- else if (!strcmp(name, SGI_ACL_DEFAULT))
- forget_cached_acl(inode, ACL_TYPE_DEFAULT);
-#endif
- }
+ return args.valuelen;
}
static int
@@ -66,25 +43,20 @@ xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
struct inode *inode, const char *name, const void *value,
size_t size, int flags)
{
- int xflags = handler->flags;
- struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_da_args args = {
+ .dp = XFS_I(inode),
+ .attr_filter = handler->flags,
+ .attr_flags = flags,
+ .name = name,
+ .namelen = strlen(name),
+ .value = (void *)value,
+ .valuelen = size,
+ };
int error;
- size_t namelen = strlen(name);
-
- /* Convert Linux syscall to XFS internal ATTR flags */
- if (flags & XATTR_CREATE)
- xflags |= ATTR_CREATE;
- if (flags & XATTR_REPLACE)
- xflags |= ATTR_REPLACE;
-
- if (value)
- error = xfs_attr_set(ip, name, namelen, (void *)value, size,
- xflags);
- else
- error = xfs_attr_remove(ip, name, namelen, xflags);
- if (!error)
- xfs_forget_acl(inode, name, xflags);
+ error = xfs_attr_set(&args);
+ if (!error && (handler->flags & XFS_ATTR_ROOT))
+ xfs_forget_acl(inode, name);
return error;
}
@@ -97,14 +69,14 @@ static const struct xattr_handler xfs_xattr_user_handler = {
static const struct xattr_handler xfs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
- .flags = ATTR_ROOT,
+ .flags = XFS_ATTR_ROOT,
.get = xfs_xattr_get,
.set = xfs_xattr_set,
};
static const struct xattr_handler xfs_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
- .flags = ATTR_SECURE,
+ .flags = XFS_ATTR_SECURE,
.get = xfs_xattr_get,
.set = xfs_xattr_set,
};
@@ -134,7 +106,7 @@ __xfs_xattr_put_listent(
if (context->count < 0 || context->seen_enough)
return;
- if (!context->alist)
+ if (!context->buffer)
goto compute_size;
arraytop = context->count + prefix_len + namelen + 1;
@@ -143,7 +115,7 @@ __xfs_xattr_put_listent(
context->seen_enough = 1;
return;
}
- offset = (char *)context->alist + context->count;
+ offset = context->buffer + context->count;
strncpy(offset, prefix, prefix_len);
offset += prefix_len;
strncpy(offset, (char *)name, namelen); /* real name */
@@ -218,7 +190,6 @@ xfs_vn_listxattr(
size_t size)
{
struct xfs_attr_list_context context;
- struct attrlist_cursor_kern cursor = { 0 };
struct inode *inode = d_inode(dentry);
int error;
@@ -227,14 +198,13 @@ xfs_vn_listxattr(
*/
memset(&context, 0, sizeof(context));
context.dp = XFS_I(inode);
- context.cursor = &cursor;
context.resynch = 1;
- context.alist = size ? data : NULL;
+ context.buffer = size ? data : NULL;
context.bufsize = size;
context.firstu = context.bufsize;
context.put_listent = xfs_xattr_put_listent;
- error = xfs_attr_list_int(&context);
+ error = xfs_attr_list(&context);
if (error)
return error;
if (context.count < 0)
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 87fc14e97d2b..49b519f36b69 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20200214
+#define ACPI_CA_VERSION 0x20200326
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index b818ba60e19d..ec66779cb193 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -43,6 +43,7 @@
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
+#define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -274,7 +275,8 @@ struct acpi_ivrs_header {
/* Values for subtable Type above */
enum acpi_ivrs_type {
- ACPI_IVRS_TYPE_HARDWARE = 0x10,
+ ACPI_IVRS_TYPE_HARDWARE1 = 0x10,
+ ACPI_IVRS_TYPE_HARDWARE2 = 0x11,
ACPI_IVRS_TYPE_MEMORY1 = 0x20,
ACPI_IVRS_TYPE_MEMORY2 = 0x21,
ACPI_IVRS_TYPE_MEMORY3 = 0x22
@@ -301,13 +303,26 @@ enum acpi_ivrs_type {
/* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */
-struct acpi_ivrs_hardware {
+struct acpi_ivrs_hardware_10 {
struct acpi_ivrs_header header;
u16 capability_offset; /* Offset for IOMMU control fields */
u64 base_address; /* IOMMU control registers */
u16 pci_segment_group;
u16 info; /* MSI number and unit ID */
- u32 reserved;
+ u32 feature_reporting;
+};
+
+/* 0x11: I/O Virtualization Hardware Definition Block (IVHD) */
+
+struct acpi_ivrs_hardware_11 {
+ struct acpi_ivrs_header header;
+ u16 capability_offset; /* Offset for IOMMU control fields */
+ u64 base_address; /* IOMMU control registers */
+ u16 pci_segment_group;
+ u16 info; /* MSI number and unit ID */
+ u32 attributes;
+ u64 efr_register_image;
+ u64 reserved;
};
/* Masks for Info field above */
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 2bf3baf819bb..b0b163b9efc6 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -39,7 +39,7 @@
#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */
#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
-#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Migrations Table */
+#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Mitigations Table */
#define ACPI_SIG_XENV "XENV" /* Xen Environment table */
#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */
@@ -673,10 +673,10 @@ struct acpi_table_wpbt {
/*******************************************************************************
*
- * WSMT - Windows SMM Security Migrations Table
+ * WSMT - Windows SMM Security Mitigations Table
* Version 1
*
- * Conforms to "Windows SMM Security Migrations Table",
+ * Conforms to "Windows SMM Security Mitigations Table",
* Version 1.0, April 18, 2016
*
******************************************************************************/
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 9dd4689a39cf..9e1367b19069 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -57,4 +57,4 @@
#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500"
#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301"
-#endif /* __AUUID_H__ */
+#endif /* __ACUUID_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 47805172e73d..683e124ad517 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -297,6 +297,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
}
#endif
+static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg,
+ bool direct)
+{
+ if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
+ return fn(arg);
+ return work_on_cpu(cpu, fn, arg);
+}
+
/* in processor_perflib.c */
#ifdef CONFIG_CPU_FREQ
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index cd17d50697cc..36341dfded70 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -4,6 +4,58 @@
# (This file is not included when SRCARCH=um since UML borrows several
# asm headers from the host architecutre.)
+mandatory-y += atomic.h
+mandatory-y += barrier.h
+mandatory-y += bitops.h
+mandatory-y += bug.h
+mandatory-y += bugs.h
+mandatory-y += cacheflush.h
+mandatory-y += checksum.h
+mandatory-y += compat.h
+mandatory-y += current.h
+mandatory-y += delay.h
+mandatory-y += device.h
+mandatory-y += div64.h
mandatory-y += dma-contiguous.h
+mandatory-y += dma-mapping.h
+mandatory-y += dma.h
+mandatory-y += emergency-restart.h
+mandatory-y += exec.h
+mandatory-y += fb.h
+mandatory-y += ftrace.h
+mandatory-y += futex.h
+mandatory-y += hardirq.h
+mandatory-y += hw_irq.h
+mandatory-y += io.h
+mandatory-y += irq.h
+mandatory-y += irq_regs.h
+mandatory-y += irq_work.h
+mandatory-y += kdebug.h
+mandatory-y += kmap_types.h
+mandatory-y += kprobes.h
+mandatory-y += linkage.h
+mandatory-y += local.h
+mandatory-y += mm-arch-hooks.h
+mandatory-y += mmiowb.h
+mandatory-y += mmu.h
+mandatory-y += mmu_context.h
+mandatory-y += module.h
mandatory-y += msi.h
+mandatory-y += pci.h
+mandatory-y += percpu.h
+mandatory-y += pgalloc.h
+mandatory-y += preempt.h
+mandatory-y += sections.h
+mandatory-y += serial.h
+mandatory-y += shmparam.h
mandatory-y += simd.h
+mandatory-y += switch_to.h
+mandatory-y += timex.h
+mandatory-y += tlbflush.h
+mandatory-y += topology.h
+mandatory-y += trace_clock.h
+mandatory-y += uaccess.h
+mandatory-y += unaligned.h
+mandatory-y += vga.h
+mandatory-y += word-at-a-time.h
+mandatory-y += xor.h
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 19eadac415c4..aea9aee1f3e9 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -2,10 +2,8 @@
#ifndef _ASM_GENERIC_GPIO_H
#define _ASM_GENERIC_GPIO_H
-#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/of.h>
#ifdef CONFIG_GPIOLIB
@@ -140,6 +138,8 @@ static inline void gpio_unexport(unsigned gpio)
#else /* !CONFIG_GPIOLIB */
+#include <linux/kernel.h>
+
static inline bool gpio_is_valid(int number)
{
/* only non-negative numbers are valid */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index e2e2bef07dd2..329b8c8ca703 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -10,6 +10,7 @@
#include <linux/mm_types.h>
#include <linux/bug.h>
#include <linux/errno.h>
+#include <asm-generic/pgtable_uffd.h>
#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
diff --git a/include/asm-generic/pgtable_uffd.h b/include/asm-generic/pgtable_uffd.h
new file mode 100644
index 000000000000..828966d4c281
--- /dev/null
+++ b/include/asm-generic/pgtable_uffd.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_GENERIC_PGTABLE_UFFD_H
+#define _ASM_GENERIC_PGTABLE_UFFD_H
+
+#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static __always_inline int pte_uffd_wp(pte_t pte)
+{
+ return 0;
+}
+
+static __always_inline int pmd_uffd_wp(pmd_t pmd)
+{
+ return 0;
+}
+
+static __always_inline pte_t pte_mkuffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static __always_inline pte_t pte_clear_uffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static __always_inline pte_t pte_swp_mkuffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline int pte_swp_uffd_wp(pte_t pte)
+{
+ return 0;
+}
+
+static __always_inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_uffd_wp(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
+#endif /* _ASM_GENERIC_PGTABLE_UFFD_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index f391f6b500b4..3f1649a8cf55 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -13,6 +13,7 @@
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
+#include <linux/hugetlb_inline.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -398,7 +399,7 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
* We rely on tlb_end_vma() to issue a flush, such that when we reset
* these values the batch is empty.
*/
- tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
+ tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
}
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 1b3ebe8593c0..62c68550aab6 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -43,27 +43,33 @@
*
* Memory Structure:
*
- * To support the needs of the most prominent user of AEAD ciphers, namely
- * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
- * to.
- *
- * The scatter list pointing to the input data must contain:
- *
- * * for RFC4106 ciphers, the concatenation of
- * associated authentication data || IV || plaintext or ciphertext. Note, the
- * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
- * the API call of aead_request_set_ad must provide the length of the AAD and
- * the IV. The API call of aead_request_set_crypt only points to the size of
- * the input plaintext or ciphertext.
- *
- * * for "normal" AEAD ciphers, the concatenation of
- * associated authentication data || plaintext or ciphertext.
- *
- * It is important to note that if multiple scatter gather list entries form
- * the input data mentioned above, the first entry must not point to a NULL
- * buffer. If there is any potential where the AAD buffer can be NULL, the
- * calling code must contain a precaution to ensure that this does not result
- * in the first scatter gather list entry pointing to a NULL buffer.
+ * The source scatterlist must contain the concatenation of
+ * associated data || plaintext or ciphertext.
+ *
+ * The destination scatterlist has the same layout, except that the plaintext
+ * (resp. ciphertext) will grow (resp. shrink) by the authentication tag size
+ * during encryption (resp. decryption).
+ *
+ * In-place encryption/decryption is enabled by using the same scatterlist
+ * pointer for both the source and destination.
+ *
+ * Even in the out-of-place case, space must be reserved in the destination for
+ * the associated data, even though it won't be written to. This makes the
+ * in-place and out-of-place cases more consistent. It is permissible for the
+ * "destination" associated data to alias the "source" associated data.
+ *
+ * As with the other scatterlist crypto APIs, zero-length scatterlist elements
+ * are not allowed in the used part of the scatterlist. Thus, if there is no
+ * associated data, the first element must point to the plaintext/ciphertext.
+ *
+ * To meet the needs of IPsec, a special quirk applies to rfc4106, rfc4309,
+ * rfc4543, and rfc7539esp ciphers. For these ciphers, the final 'ivsize' bytes
+ * of the associated data buffer must contain a second copy of the IV. This is
+ * in addition to the copy passed to aead_request_set_crypt(). These two IV
+ * copies must not differ; different implementations of the same algorithm may
+ * behave differently in that case. Note that the algorithm might not actually
+ * treat the IV as associated data; nevertheless the length passed to
+ * aead_request_set_ad() must include it.
*/
struct crypto_aead;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 24cfa96f98ea..56527c85d122 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -66,7 +66,7 @@ struct af_alg_sgl {
struct af_alg_tsgl {
struct list_head list;
unsigned int cur; /* Last processed SG entry */
- struct scatterlist sg[0]; /* Array of SGs forming the SGL */
+ struct scatterlist sg[]; /* Array of SGs forming the SGL */
};
#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 9d4d5cc47969..0b34a12c4a1c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -129,6 +129,7 @@ struct dw_hdmi_plat_data {
unsigned long input_bus_format;
unsigned long input_bus_encoding;
bool use_drm_infoframe;
+ bool ycbcr_420_allowed;
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
index 1cc77bf38324..d96626a0e3fa 100644
--- a/include/drm/bridge/mhl.h
+++ b/include/drm/bridge/mhl.h
@@ -327,13 +327,13 @@ struct mhl_burst_bits_per_pixel_fmt {
struct {
u8 stream_id;
u8 pixel_format;
- } __packed desc[0];
+ } __packed desc[];
} __packed;
struct mhl_burst_emsc_support {
struct mhl3_burst_header hdr;
u8 num_entries;
- __be16 burst_id[0];
+ __be16 burst_id[];
} __packed;
struct mhl_burst_audio_descr {
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 951dfb15c27b..7b6cb4774e7d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -670,6 +670,9 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
}
int __must_check
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
+int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int __must_check
@@ -992,4 +995,77 @@ drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state)
return state->active || state->self_refresh_active;
}
+/**
+ * struct drm_bus_cfg - bus configuration
+ *
+ * This structure stores the configuration of a physical bus between two
+ * components in an output pipeline, usually between two bridges, an encoder
+ * and a bridge, or a bridge and a connector.
+ *
+ * The bus configuration is stored in &drm_bridge_state separately for the
+ * input and output buses, as seen from the point of view of each bridge. The
+ * bus configuration of a bridge output is usually identical to the
+ * configuration of the next bridge's input, but may differ if the signals are
+ * modified between the two bridges, for instance by an inverter on the board.
+ * The input and output configurations of a bridge may differ if the bridge
+ * modifies the signals internally, for instance by performing format
+ * conversion, or modifying signals polarities.
+ */
+struct drm_bus_cfg {
+ /**
+ * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format)
+ *
+ * This field should not be directly modified by drivers
+ * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus
+ * format negotiation).
+ */
+ u32 format;
+
+ /**
+ * @flags: DRM_BUS_* flags used on this bus
+ */
+ u32 flags;
+};
+
+/**
+ * struct drm_bridge_state - Atomic bridge state object
+ */
+struct drm_bridge_state {
+ /**
+ * @base: inherit from &drm_private_state
+ */
+ struct drm_private_state base;
+
+ /**
+ * @bridge: the bridge this state refers to
+ */
+ struct drm_bridge *bridge;
+
+ /**
+ * @input_bus_cfg: input bus configuration
+ */
+ struct drm_bus_cfg input_bus_cfg;
+
+ /**
+ * @output_bus_cfg: input bus configuration
+ */
+ struct drm_bus_cfg output_bus_cfg;
+};
+
+static inline struct drm_bridge_state *
+drm_priv_to_bridge_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct drm_bridge_state, base);
+}
+
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 9db3cac48f4f..b268180c97eb 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -224,4 +224,12 @@ drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state,
return old_plane_state->crtc && !new_plane_state->crtc;
}
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 8171dea4cc22..3f8f1d627f7c 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -26,6 +26,8 @@
#include <linux/types.h>
+struct drm_bridge;
+struct drm_bridge_state;
struct drm_crtc;
struct drm_crtc_state;
struct drm_plane;
@@ -80,3 +82,14 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
struct drm_private_state *state);
+
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge);
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 694e153a7531..ea2aa5ebae34 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -23,15 +23,32 @@
#ifndef __DRM_BRIDGE_H__
#define __DRM_BRIDGE_H__
-#include <linux/list.h>
#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_atomic.h>
#include <drm/drm_encoder.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
struct drm_bridge;
struct drm_bridge_timings;
+struct drm_connector;
struct drm_panel;
+struct edid;
+struct i2c_adapter;
+
+/**
+ * enum drm_bridge_attach_flags - Flags for &drm_bridge_funcs.attach
+ */
+enum drm_bridge_attach_flags {
+ /**
+ * @DRM_BRIDGE_ATTACH_NO_CONNECTOR: When this flag is set the bridge
+ * shall not create a drm_connector.
+ */
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR = BIT(0),
+};
/**
* struct drm_bridge_funcs - drm_bridge control functions
@@ -41,7 +58,8 @@ struct drm_bridge_funcs {
* @attach:
*
* This callback is invoked whenever our bridge is being attached to a
- * &drm_encoder.
+ * &drm_encoder. The flags argument tunes the behaviour of the attach
+ * operation (see DRM_BRIDGE_ATTACH_*).
*
* The @attach callback is optional.
*
@@ -49,7 +67,8 @@ struct drm_bridge_funcs {
*
* Zero on success, error code on failure.
*/
- int (*attach)(struct drm_bridge *bridge);
+ int (*attach)(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags);
/**
* @detach:
@@ -109,7 +128,9 @@ struct drm_bridge_funcs {
* this function passes all other callbacks must succeed for this
* configuration.
*
- * The @mode_fixup callback is optional.
+ * The mode_fixup callback is optional. &drm_bridge_funcs.mode_fixup()
+ * is not called when &drm_bridge_funcs.atomic_check() is implemented,
+ * so only one of them should be provided.
*
* NOTE:
*
@@ -263,7 +284,7 @@ struct drm_bridge_funcs {
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_enable:
@@ -288,7 +309,7 @@ struct drm_bridge_funcs {
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_disable:
*
@@ -311,7 +332,7 @@ struct drm_bridge_funcs {
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_post_disable:
@@ -337,7 +358,275 @@ struct drm_bridge_funcs {
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
+
+ /**
+ * @atomic_duplicate_state:
+ *
+ * Duplicate the current bridge state object (which is guaranteed to be
+ * non-NULL).
+ *
+ * The atomic_duplicate_state hook is mandatory if the bridge
+ * implements any of the atomic hooks, and should be left unassigned
+ * otherwise. For bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_duplicate_state() helper function shall be
+ * used to implement this hook.
+ *
+ * RETURNS:
+ * A valid drm_bridge_state object or NULL if the allocation fails.
+ */
+ struct drm_bridge_state *(*atomic_duplicate_state)(struct drm_bridge *bridge);
+
+ /**
+ * @atomic_destroy_state:
+ *
+ * Destroy a bridge state object previously allocated by
+ * &drm_bridge_funcs.atomic_duplicate_state().
+ *
+ * The atomic_destroy_state hook is mandatory if the bridge implements
+ * any of the atomic hooks, and should be left unassigned otherwise.
+ * For bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_destroy_state() helper function shall be
+ * used to implement this hook.
+ */
+ void (*atomic_destroy_state)(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+
+ /**
+ * @atomic_get_output_bus_fmts:
+ *
+ * Return the supported bus formats on the output end of a bridge.
+ * The returned array must be allocated with kmalloc() and will be
+ * freed by the caller. If the allocation fails, NULL should be
+ * returned. num_output_fmts must be set to the returned array size.
+ * Formats listed in the returned array should be listed in decreasing
+ * preference order (the core will try all formats until it finds one
+ * that works).
+ *
+ * This method is only called on the last element of the bridge chain
+ * as part of the bus format negotiation process that happens in
+ * &drm_atomic_bridge_chain_select_bus_fmts().
+ * This method is optional. When not implemented, the core will
+ * fall back to &drm_connector.display_info.bus_formats[0] if
+ * &drm_connector.display_info.num_bus_formats > 0,
+ * or to MEDIA_BUS_FMT_FIXED otherwise.
+ */
+ u32 *(*atomic_get_output_bus_fmts)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts);
+
+ /**
+ * @atomic_get_input_bus_fmts:
+ *
+ * Return the supported bus formats on the input end of a bridge for
+ * a specific output bus format.
+ *
+ * The returned array must be allocated with kmalloc() and will be
+ * freed by the caller. If the allocation fails, NULL should be
+ * returned. num_output_fmts must be set to the returned array size.
+ * Formats listed in the returned array should be listed in decreasing
+ * preference order (the core will try all formats until it finds one
+ * that works). When the format is not supported NULL should be
+ * returned and num_output_fmts should be set to 0.
+ *
+ * This method is called on all elements of the bridge chain as part of
+ * the bus format negotiation process that happens in
+ * drm_atomic_bridge_chain_select_bus_fmts().
+ * This method is optional. When not implemented, the core will bypass
+ * bus format negotiation on this element of the bridge without
+ * failing, and the previous element in the chain will be passed
+ * MEDIA_BUS_FMT_FIXED as its output bus format.
+ *
+ * Bridge drivers that need to support being linked to bridges that are
+ * not supporting bus format negotiation should handle the
+ * output_fmt == MEDIA_BUS_FMT_FIXED case appropriately, by selecting a
+ * sensible default value or extracting this information from somewhere
+ * else (FW property, &drm_display_mode, &drm_display_info, ...)
+ *
+ * Note: Even if input format selection on the first bridge has no
+ * impact on the negotiation process (bus format negotiation stops once
+ * we reach the first element of the chain), drivers are expected to
+ * return accurate input formats as the input format may be used to
+ * configure the CRTC output appropriately.
+ */
+ u32 *(*atomic_get_input_bus_fmts)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+ /**
+ * @atomic_check:
+ *
+ * This method is responsible for checking bridge state correctness.
+ * It can also check the state of the surrounding components in chain
+ * to make sure the whole pipeline can work properly.
+ *
+ * &drm_bridge_funcs.atomic_check() hooks are called in reverse
+ * order (from the last to the first bridge).
+ *
+ * This method is optional. &drm_bridge_funcs.mode_fixup() is not
+ * called when &drm_bridge_funcs.atomic_check() is implemented, so only
+ * one of them should be provided.
+ *
+ * If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or
+ * &drm_bridge_state.output_bus_cfg.flags it should should happen in
+ * this function. By default the &drm_bridge_state.output_bus_cfg.flags
+ * field is set to the next bridge
+ * &drm_bridge_state.input_bus_cfg.flags value or
+ * &drm_connector.display_info.bus_flags if the bridge is the last
+ * element in the chain.
+ *
+ * RETURNS:
+ * zero if the check passed, a negative error code otherwise.
+ */
+ int (*atomic_check)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+
+ /**
+ * @atomic_reset:
+ *
+ * Reset the bridge to a predefined state (or retrieve its current
+ * state) and return a &drm_bridge_state object matching this state.
+ * This function is called at attach time.
+ *
+ * The atomic_reset hook is mandatory if the bridge implements any of
+ * the atomic hooks, and should be left unassigned otherwise. For
+ * bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_reset() helper function shall be used to
+ * implement this hook.
+ *
+ * Note that the atomic_reset() semantics is not exactly matching the
+ * reset() semantics found on other components (connector, plane, ...).
+ *
+ * 1. The reset operation happens when the bridge is attached, not when
+ * drm_mode_config_reset() is called
+ * 2. It's meant to be used exclusively on bridges that have been
+ * converted to the ATOMIC API
+ *
+ * RETURNS:
+ * A valid drm_bridge_state object in case of success, an ERR_PTR()
+ * giving the reason of the failure otherwise.
+ */
+ struct drm_bridge_state *(*atomic_reset)(struct drm_bridge *bridge);
+
+ /**
+ * @detect:
+ *
+ * Check if anything is attached to the bridge output.
+ *
+ * This callback is optional, if not implemented the bridge will be
+ * considered as always having a component attached to its output.
+ * Bridges that implement this callback shall set the
+ * DRM_BRIDGE_OP_DETECT flag in their &drm_bridge->ops.
+ *
+ * RETURNS:
+ *
+ * drm_connector_status indicating the bridge output status.
+ */
+ enum drm_connector_status (*detect)(struct drm_bridge *bridge);
+
+ /**
+ * @get_modes:
+ *
+ * Fill all modes currently valid for the sink into the &drm_connector
+ * with drm_mode_probed_add().
+ *
+ * The @get_modes callback is mostly intended to support non-probeable
+ * displays such as many fixed panels. Bridges that support reading
+ * EDID shall leave @get_modes unimplemented and implement the
+ * &drm_bridge_funcs->get_edid callback instead.
+ *
+ * This callback is optional. Bridges that implement it shall set the
+ * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
+ *
+ * The connector parameter shall be used for the sole purpose of
+ * filling modes, and shall not be stored internally by bridge drivers
+ * for future usage.
+ *
+ * RETURNS:
+ *
+ * The number of modes added by calling drm_mode_probed_add().
+ */
+ int (*get_modes)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @get_edid:
+ *
+ * Read and parse the EDID data of the connected display.
+ *
+ * The @get_edid callback is the preferred way of reporting mode
+ * information for a display connected to the bridge output. Bridges
+ * that support reading EDID shall implement this callback and leave
+ * the @get_modes callback unimplemented.
+ *
+ * The caller of this operation shall first verify the output
+ * connection status and refrain from reading EDID from a disconnected
+ * output.
+ *
+ * This callback is optional. Bridges that implement it shall set the
+ * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
+ *
+ * The connector parameter shall be used for the sole purpose of EDID
+ * retrieval and parsing, and shall not be stored internally by bridge
+ * drivers for future usage.
+ *
+ * RETURNS:
+ *
+ * An edid structure newly allocated with kmalloc() (or similar) on
+ * success, or NULL otherwise. The caller is responsible for freeing
+ * the returned edid structure with kfree().
+ */
+ struct edid *(*get_edid)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hpd_notify:
+ *
+ * Notify the bridge of hot plug detection.
+ *
+ * This callback is optional, it may be implemented by bridges that
+ * need to be notified of display connection or disconnection for
+ * internal reasons. One use case is to reset the internal state of CEC
+ * controllers for HDMI bridges.
+ */
+ void (*hpd_notify)(struct drm_bridge *bridge,
+ enum drm_connector_status status);
+
+ /**
+ * @hpd_enable:
+ *
+ * Enable hot plug detection. From now on the bridge shall call
+ * drm_bridge_hpd_notify() each time a change is detected in the output
+ * connection status, until hot plug detection gets disabled with
+ * @hpd_disable.
+ *
+ * This callback is optional and shall only be implemented by bridges
+ * that support hot-plug notification without polling. Bridges that
+ * implement it shall also implement the @hpd_disable callback and set
+ * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
+ */
+ void (*hpd_enable)(struct drm_bridge *bridge);
+
+ /**
+ * @hpd_disable:
+ *
+ * Disable hot plug detection. Once this function returns the bridge
+ * shall not call drm_bridge_hpd_notify() when a change in the output
+ * connection status occurs.
+ *
+ * This callback is optional and shall only be implemented by bridges
+ * that support hot-plug notification without polling. Bridges that
+ * implement it shall also implement the @hpd_enable callback and set
+ * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
+ */
+ void (*hpd_disable)(struct drm_bridge *bridge);
};
/**
@@ -377,9 +666,44 @@ struct drm_bridge_timings {
};
/**
+ * enum drm_bridge_ops - Bitmask of operations supported by the bridge
+ */
+enum drm_bridge_ops {
+ /**
+ * @DRM_BRIDGE_OP_DETECT: The bridge can detect displays connected to
+ * its output. Bridges that set this flag shall implement the
+ * &drm_bridge_funcs->detect callback.
+ */
+ DRM_BRIDGE_OP_DETECT = BIT(0),
+ /**
+ * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
+ * connected to its output. Bridges that set this flag shall implement
+ * the &drm_bridge_funcs->get_edid callback.
+ */
+ DRM_BRIDGE_OP_EDID = BIT(1),
+ /**
+ * @DRM_BRIDGE_OP_HPD: The bridge can detect hot-plug and hot-unplug
+ * without requiring polling. Bridges that set this flag shall
+ * implement the &drm_bridge_funcs->hpd_enable and
+ * &drm_bridge_funcs->hpd_disable callbacks if they support enabling
+ * and disabling hot-plug detection dynamically.
+ */
+ DRM_BRIDGE_OP_HPD = BIT(2),
+ /**
+ * @DRM_BRIDGE_OP_MODES: The bridge can retrieve the modes supported
+ * by the display at its output. This does not include reading EDID
+ * which is separately covered by @DRM_BRIDGE_OP_EDID. Bridges that set
+ * this flag shall implement the &drm_bridge_funcs->get_modes callback.
+ */
+ DRM_BRIDGE_OP_MODES = BIT(3),
+};
+
+/**
* struct drm_bridge - central DRM bridge control structure
*/
struct drm_bridge {
+ /** @base: inherit from &drm_private_object */
+ struct drm_private_obj base;
/** @dev: DRM device this bridge belongs to */
struct drm_device *dev;
/** @encoder: encoder to which this bridge is connected */
@@ -402,13 +726,52 @@ struct drm_bridge {
const struct drm_bridge_funcs *funcs;
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
+ /** @ops: bitmask of operations supported by the bridge */
+ enum drm_bridge_ops ops;
+ /**
+ * @type: Type of the connection at the bridge output
+ * (DRM_MODE_CONNECTOR_*). For bridges at the end of this chain this
+ * identifies the type of connected display.
+ */
+ int type;
+ /**
+ * @interlace_allowed: Indicate that the bridge can handle interlaced
+ * modes.
+ */
+ bool interlace_allowed;
+ /**
+ * @ddc: Associated I2C adapter for DDC access, if any.
+ */
+ struct i2c_adapter *ddc;
+ /** private: */
+ /**
+ * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
+ */
+ struct mutex hpd_mutex;
+ /**
+ * @hpd_cb: Hot plug detection callback, registered with
+ * drm_bridge_hpd_enable().
+ */
+ void (*hpd_cb)(void *data, enum drm_connector_status status);
+ /**
+ * @hpd_data: Private data passed to the Hot plug detection callback
+ * @hpd_cb.
+ */
+ void *hpd_data;
};
+static inline struct drm_bridge *
+drm_priv_to_bridge(struct drm_private_obj *priv)
+{
+ return container_of(priv, struct drm_bridge, base);
+}
+
void drm_bridge_add(struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
- struct drm_bridge *previous);
+ struct drm_bridge *previous,
+ enum drm_bridge_attach_flags flags);
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
@@ -482,6 +845,9 @@ void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
void drm_bridge_chain_enable(struct drm_bridge *bridge);
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
@@ -491,6 +857,27 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+ void (*cb)(void *data,
+ enum drm_connector_status status),
+ void *data);
+void drm_bridge_hpd_disable(struct drm_bridge *bridge);
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status);
+
#ifdef CONFIG_DRM_PANEL_BRIDGE
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h
new file mode 100644
index 000000000000..33f6c3bbdb4a
--- /dev/null
+++ b/include/drm/drm_bridge_connector.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __DRM_BRIDGE_CONNECTOR_H__
+#define __DRM_BRIDGE_CONNECTOR_H__
+
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector);
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector);
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+ struct drm_encoder *encoder);
+
+#endif /* __DRM_BRIDGE_CONNECTOR_H__ */
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 5cf2c5dd8b1e..3ed5dee899fd 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -44,6 +44,11 @@ struct drm_client_funcs {
* returns zero gets the privilege to restore and no more clients are
* called. This callback is not called after @unregister has been called.
*
+ * Note that the core does not guarantee exclusion against concurrent
+ * drm_open(). Clients need to ensure this themselves, for example by
+ * using drm_master_internal_acquire() and
+ * drm_master_internal_release().
+ *
* This callback is optional.
*/
int (*restore)(struct drm_client_dev *client);
@@ -156,7 +161,7 @@ int drm_client_modeset_create(struct drm_client_dev *client);
void drm_client_modeset_free(struct drm_client_dev *client);
int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height);
bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation);
-int drm_client_modeset_commit_force(struct drm_client_dev *client);
+int drm_client_modeset_commit_locked(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 221910948b37..19ae6bb5c85b 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -254,6 +254,23 @@ enum drm_panel_orientation {
DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+/**
+ * struct drm_monitor_range_info - Panel's Monitor range in EDID for
+ * &drm_display_info
+ *
+ * This struct is used to store a frequency range supported by panel
+ * as parsed from EDID's detailed monitor range descriptor block.
+ *
+ * @min_vfreq: This is the min supported refresh rate in Hz from
+ * EDID's detailed monitor range.
+ * @max_vfreq: This is the max supported refresh rate in Hz from
+ * EDID's detailed monitor range
+ */
+struct drm_monitor_range_info {
+ u8 min_vfreq;
+ u8 max_vfreq;
+};
+
/*
* This is a consolidated colorimetry list supported by HDMI and
* DP protocol standard. The respective connectors will register
@@ -435,6 +452,14 @@ struct drm_display_info {
bool dvi_dual;
/**
+ * @is_hdmi: True if the sink is an HDMI device.
+ *
+ * This field shall be used instead of calling
+ * drm_detect_hdmi_monitor() when possible.
+ */
+ bool is_hdmi;
+
+ /**
* @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
*/
bool has_hdmi_infoframe;
@@ -465,6 +490,11 @@ struct drm_display_info {
* @non_desktop: Non desktop display (HMD).
*/
bool non_desktop;
+
+ /**
+ * @monitor_range: Frequency range supported by monitor range descriptor
+ */
+ struct drm_monitor_range_info monitor_range;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -1357,6 +1387,12 @@ struct drm_connector {
* rev1.1 4.2.2.6
*/
bool edid_corrupt;
+ /**
+ * @real_edid_checksum: real edid checksum for corrupted edid block.
+ * Required in Displayport 1.4 compliance testing
+ * rev1.1 4.2.2.6
+ */
+ u8 real_edid_checksum;
/** @debugfs_entry: debugfs directory for this connector */
struct dentry *debugfs_entry;
@@ -1512,6 +1548,7 @@ drm_connector_is_unregistered(struct drm_connector *connector)
DRM_CONNECTOR_UNREGISTERED;
}
+const char *drm_get_connector_type_name(unsigned int connector_type);
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
@@ -1552,8 +1589,13 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status);
void drm_connector_set_vrr_capable_property(
struct drm_connector *connector, bool capable);
-int drm_connector_init_panel_orientation_property(
- struct drm_connector *connector, int width, int height);
+int drm_connector_set_panel_orientation(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation);
+int drm_connector_set_panel_orientation_with_quirk(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation,
+ int width, int height);
int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
int min, int max);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 5e9b15a0e8c5..59b51a09cae6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -174,12 +174,25 @@ struct drm_crtc_state {
* @no_vblank:
*
* Reflects the ability of a CRTC to send VBLANK events. This state
- * usually depends on the pipeline configuration, and the main usuage
- * is CRTCs feeding a writeback connector operating in oneshot mode.
- * In this case the VBLANK event is only generated when a job is queued
- * to the writeback connector, and we want the core to fake VBLANK
- * events when this part of the pipeline hasn't changed but others had
- * or when the CRTC and connectors are being disabled.
+ * usually depends on the pipeline configuration. If set to true, DRM
+ * atomic helpers will send out a fake VBLANK event during display
+ * updates after all hardware changes have been committed. This is
+ * implemented in drm_atomic_helper_fake_vblank().
+ *
+ * One usage is for drivers and/or hardware without support for VBLANK
+ * interrupts. Such drivers typically do not initialize vblanking
+ * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs
+ * without initialized vblanking, this field is set to true in
+ * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be
+ * send out on each update of the display pipeline by
+ * drm_atomic_helper_fake_vblank().
+ *
+ * Another usage is CRTCs feeding a writeback connector operating in
+ * oneshot mode. In this case the fake VBLANK event is only generated
+ * when a job is queued to the writeback connector, and we want the
+ * core to fake VBLANK events when this part of the pipeline hasn't
+ * changed but others had or when the CRTC and connectors are being
+ * disabled.
*
* __drm_atomic_helper_crtc_duplicate_state() will not reset the value
* from the current state, the CRTC driver is then responsible for
@@ -335,7 +348,14 @@ struct drm_crtc_state {
* - Events for disabled CRTCs are not allowed, and drivers can ignore
* that case.
*
- * This can be handled by the drm_crtc_send_vblank_event() function,
+ * For very simple hardware without VBLANK interrupt, enabling
+ * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers
+ * send a fake VBLANK event at the end of the display update after all
+ * hardware changes have been applied. See
+ * drm_atomic_helper_fake_vblank().
+ *
+ * For more complex hardware this
+ * can be handled by the drm_crtc_send_vblank_event() function,
* which the driver should call on the provided event upon completion of
* the atomic commit. Note that if the driver supports vblank signalling
* and timestamping the vblank counters and timestamps must agree with
@@ -867,6 +887,47 @@ struct drm_crtc_funcs {
* new drivers as the replacement of &drm_driver.disable_vblank hook.
*/
void (*disable_vblank)(struct drm_crtc *crtc);
+
+ /**
+ * @get_vblank_timestamp:
+ *
+ * Called by drm_get_last_vbltimestamp(). Should return a precise
+ * timestamp when the most recent vblank interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of vblank will start scanning out,
+ * the time immediately after end of the vblank interval. If the
+ * @crtc is currently inside vblank, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * Parameters:
+ *
+ * crtc:
+ * CRTC for which timestamp should be returned.
+ * max_error:
+ * Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * vblank_time:
+ * Target location for returned vblank timestamp.
+ * in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Returns:
+ *
+ * True on success, false on failure, which means the core should
+ * fallback to a simple timestamp taken in drm_crtc_handle_vblank().
+ */
+ bool (*get_vblank_timestamp)(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
};
/**
@@ -974,11 +1035,12 @@ struct drm_crtc {
* Programmed mode in hw, after adjustments for encoders, crtc, panel
* scaling etc. Should only be used by legacy drivers, for high
* precision vblank timestamps in
- * drm_calc_vbltimestamp_from_scanoutpos().
+ * drm_crtc_vblank_helper_get_vblank_timestamp().
*
* Note that atomic drivers should not use this, but instead use
* &drm_crtc_state.adjusted_mode. And for high-precision timestamps
- * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
+ * drm_crtc_vblank_helper_get_vblank_timestamp() used
+ * &drm_vblank_crtc.hwmode,
* which is filled out by calling drm_calc_timestamping_constants().
*/
struct drm_display_mode hwmode;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 1acfc3bbd3fb..bb60a949f416 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -144,7 +144,7 @@ struct drm_device {
* Usage counter for outstanding files open,
* protected by drm_global_mutex
*/
- int open_count;
+ atomic_t open_count;
/** @filelist_mutex: Protects @filelist. */
struct mutex filelist_mutex;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index bc04467f7c3a..c6119e4c169a 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -392,6 +392,8 @@
# define DP_DS_12BPC 2
# define DP_DS_16BPC 3
+#define DP_MAX_DOWNSTREAM_PORTS 0x10
+
/* DP Forward error Correction Registers */
#define DP_FEC_CAPABILITY 0x090 /* 1.4 */
# define DP_FEC_CAPABLE (1 << 0)
@@ -1457,6 +1459,9 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum);
+
int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4]);
int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
@@ -1493,13 +1498,16 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
+u32 drm_dp_get_edid_quirks(const struct edid *edid);
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
* Display Port sink and branch devices in the wild have a variety of bugs, try
* to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them.
+ * implement workarounds for them. Note that because some devices have
+ * unreliable OUIDs, the EDID of sinks should also be checked for quirks using
+ * drm_dp_get_edid_quirks().
*/
enum drm_dp_quirk {
/**
@@ -1530,19 +1538,31 @@ enum drm_dp_quirk {
* The DSC caps can be read from the physical aux instead.
*/
DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
+ /**
+ * @DP_QUIRK_FORCE_DPCD_BACKLIGHT:
+ *
+ * The device is telling the truth when it says that it uses DPCD
+ * backlight controls, even if the system's firmware disagrees. This
+ * quirk should be checked against both the ident and panel EDID.
+ * When present, the driver should honor the DPCD backlight
+ * capabilities advertised.
+ */
+ DP_QUIRK_FORCE_DPCD_BACKLIGHT,
};
/**
* drm_dp_has_quirk() - does the DP device have a specific quirk
* @desc: Device decriptor filled by drm_dp_read_desc()
+ * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks()
* @quirk: Quirk to query for
*
* Return true if DP device identified by @desc has @quirk.
*/
static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks,
+ enum drm_dp_quirk quirk)
{
- return desc->quirks & BIT(quirk);
+ return (desc->quirks | edid_quirks) & BIT(quirk);
}
#ifdef CONFIG_DRM_DP_CEC
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 41725d88d27e..3cde42b333c3 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -479,7 +479,6 @@ struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
- void (*register_connector)(struct drm_connector *connector);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
};
@@ -591,6 +590,11 @@ struct drm_dp_mst_topology_mgr {
bool payload_id_table_cleared : 1;
/**
+ * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
+ */
+ bool is_waiting_for_dwn_reply : 1;
+
+ /**
* @mst_primary: Pointer to the primary/first branch device.
*/
struct drm_dp_mst_branch *mst_primary;
@@ -620,11 +624,6 @@ struct drm_dp_mst_topology_mgr {
struct mutex qlock;
/**
- * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
- */
- bool is_waiting_for_dwn_reply;
-
- /**
* @tx_msg_downq: List of pending down replies.
*/
struct list_head tx_msg_downq;
@@ -635,11 +634,13 @@ struct drm_dp_mst_topology_mgr {
struct mutex payload_lock;
/**
* @proposed_vcpis: Array of pointers for the new VCPI allocation. The
- * VCPI structure itself is &drm_dp_mst_port.vcpi.
+ * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
+ * this array is determined by @max_payloads.
*/
struct drm_dp_vcpi **proposed_vcpis;
/**
- * @payloads: Array of payloads.
+ * @payloads: Array of payloads. The size of this array is determined
+ * by @max_payloads.
*/
struct drm_dp_payload *payloads;
/**
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index cf13470810a5..97109df5beac 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -269,159 +269,6 @@ struct drm_driver {
void (*release) (struct drm_device *);
/**
- * @get_vblank_counter:
- *
- * Driver callback for fetching a raw hardware vblank counter for the
- * CRTC specified with the pipe argument. If a device doesn't have a
- * hardware counter, the driver can simply leave the hook as NULL.
- * The DRM core will account for missed vblank events while interrupts
- * where disabled based on system timestamps.
- *
- * Wraparound handling and loss of events due to modesetting is dealt
- * with in the DRM core code, as long as drivers call
- * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
- * enabling a CRTC.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.get_vblank_counter instead.
- *
- * Returns:
- *
- * Raw vblank counter value.
- */
- u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @enable_vblank:
- *
- * Enable vblank interrupts for the CRTC specified with the pipe
- * argument.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.enable_vblank instead.
- *
- * Returns:
- *
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
- int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @disable_vblank:
- *
- * Disable vblank interrupts for the CRTC specified with the pipe
- * argument.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.disable_vblank instead.
- */
- void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @get_scanout_position:
- *
- * Called by vblank timestamping code.
- *
- * Returns the current display scanout position from a crtc, and an
- * optional accurate ktime_get() timestamp of when position was
- * measured. Note that this is a helper callback which is only used if a
- * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the
- * @get_vblank_timestamp callback.
- *
- * Parameters:
- *
- * dev:
- * DRM device.
- * pipe:
- * Id of the crtc to query.
- * in_vblank_irq:
- * True when called from drm_crtc_handle_vblank(). Some drivers
- * need to apply some workarounds for gpu-specific vblank irq quirks
- * if flag is set.
- * vpos:
- * Target location for current vertical scanout position.
- * hpos:
- * Target location for current horizontal scanout position.
- * stime:
- * Target location for timestamp taken immediately before
- * scanout position query. Can be NULL to skip timestamp.
- * etime:
- * Target location for timestamp taken immediately after
- * scanout position query. Can be NULL to skip timestamp.
- * mode:
- * Current display timings.
- *
- * Returns vpos as a positive number while in active scanout area.
- * Returns vpos as a negative number inside vblank, counting the number
- * of scanlines to go until end of vblank, e.g., -1 means "one scanline
- * until start of active scanout / end of vblank."
- *
- * Returns:
- *
- * True on success, false if a reliable scanout position counter could
- * not be read out.
- *
- * FIXME:
- *
- * Since this is a helper to implement @get_vblank_timestamp, we should
- * move it to &struct drm_crtc_helper_funcs, like all the other
- * helper-internal hooks.
- */
- bool (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
- /**
- * @get_vblank_timestamp:
- *
- * Called by drm_get_last_vbltimestamp(). Should return a precise
- * timestamp when the most recent VBLANK interval ended or will end.
- *
- * Specifically, the timestamp in @vblank_time should correspond as
- * closely as possible to the time when the first video scanline of
- * the video frame after the end of VBLANK will start scanning out,
- * the time immediately after end of the VBLANK interval. If the
- * @crtc is currently inside VBLANK, this will be a time in the future.
- * If the @crtc is currently scanning out a frame, this will be the
- * past start time of the current scanout. This is meant to adhere
- * to the OpenML OML_sync_control extension specification.
- *
- * Paramters:
- *
- * dev:
- * dev DRM device handle.
- * pipe:
- * crtc for which timestamp should be returned.
- * max_error:
- * Maximum allowable timestamp error in nanoseconds.
- * Implementation should strive to provide timestamp
- * with an error of at most max_error nanoseconds.
- * Returns true upper bound on error for timestamp.
- * vblank_time:
- * Target location for returned vblank timestamp.
- * in_vblank_irq:
- * True when called from drm_crtc_handle_vblank(). Some drivers
- * need to apply some workarounds for gpu-specific vblank irq quirks
- * if flag is set.
- *
- * Returns:
- *
- * True on success, false on failure, which means the core should
- * fallback to a simple timestamp taken in drm_crtc_handle_vblank().
- *
- * FIXME:
- *
- * We should move this hook to &struct drm_crtc_funcs like all the other
- * vblank hooks.
- */
- bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
- int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq);
-
- /**
* @irq_handler:
*
* Interrupt handler called when using drm_irq_install(). Not used by
@@ -458,20 +305,6 @@ struct drm_driver {
void (*irq_uninstall) (struct drm_device *dev);
/**
- * @master_create:
- *
- * Called whenever a new master is created. Only used by vmwgfx.
- */
- int (*master_create)(struct drm_device *dev, struct drm_master *master);
-
- /**
- * @master_destroy:
- *
- * Called whenever a master is destroyed. Only used by vmwgfx.
- */
- void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
-
- /**
* @master_set:
*
* Called whenever the minor master is set. Only used by vmwgfx.
@@ -775,6 +608,9 @@ struct drm_driver {
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
+ u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
+ int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
+ void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
int dev_priv_size;
};
@@ -824,6 +660,25 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
}
/**
+ * drm_core_check_all_features - check driver feature flags mask
+ * @dev: DRM device to check
+ * @features: feature flag(s) mask
+ *
+ * This checks @dev for driver features, see &drm_driver.driver_features,
+ * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
+ *
+ * Returns true if all features in the @features mask are supported, false
+ * otherwise.
+ */
+static inline bool drm_core_check_all_features(const struct drm_device *dev,
+ u32 features)
+{
+ u32 supported = dev->driver->driver_features & dev->driver_features;
+
+ return features && (supported & features) == features;
+}
+
+/**
* drm_core_check_feature - check driver feature flags
* @dev: DRM device to check
* @feature: feature flag
@@ -833,9 +688,10 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
*
* Returns true if the @feature is supported, false otherwise.
*/
-static inline bool drm_core_check_feature(const struct drm_device *dev, u32 feature)
+static inline bool drm_core_check_feature(const struct drm_device *dev,
+ enum drm_driver_feature feature)
{
- return dev->driver->driver_features & dev->driver_features & feature;
+ return drm_core_check_all_features(dev, feature);
}
/**
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index f0b03d401c27..34b15e3d070c 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -91,6 +91,11 @@ struct detailed_data_string {
u8 str[13];
} __attribute__((packed));
+#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00
+#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01
+#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02
+#define DRM_EDID_CVT_SUPPORT_FLAG 0x04
+
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 5623994b6e9e..4370e039c015 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -174,7 +174,8 @@ struct drm_encoder {
struct drm_crtc *crtc;
/**
- * @bridge_chain: Bridges attached to this encoder.
+ * @bridge_chain: Bridges attached to this encoder. Drivers shall not
+ * access this field directly.
*/
struct list_head bridge_chain;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 1c6633da0f91..208dbf87afa3 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -213,8 +213,7 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
-int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper, int max_conn);
+int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper);
void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
@@ -279,8 +278,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev,
}
static inline int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper,
- int max_conn)
+ struct drm_fb_helper *helper)
{
/* So drivers can use it to free the struct */
helper->dev = dev;
@@ -453,27 +451,6 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
#endif
-/* TODO: There's a todo entry to remove these three */
-static inline int
-drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- return 0;
-}
-
/**
* drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
* @a: memory range, users of which are to be removed
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 8b099b347817..5aaf1c4593a9 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -374,6 +374,7 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
+int drm_release_noglobal(struct inode *inode, struct file *filp);
__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
@@ -390,4 +391,13 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
+#ifdef CONFIG_MMU
+struct drm_vma_offset_manager;
+unsigned long drm_get_unmapped_area(struct file *file,
+ unsigned long uaddr, unsigned long len,
+ unsigned long pgoff, unsigned long flags,
+ struct drm_vma_offset_manager *mgr);
+#endif /* CONFIG_MMU */
+
+
#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 573e9fd109bf..0f6e47213d8d 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -6,6 +6,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_modes.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -205,4 +206,12 @@ struct drm_vram_mm *drm_vram_helper_alloc_mm(
struct drm_device *dev, uint64_t vram_base, size_t vram_size);
void drm_vram_helper_release_mm(struct drm_device *dev);
+/*
+ * Mode-config helpers
+ */
+
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+
#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 06a11202a097..c6bab4986a65 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -276,7 +276,7 @@ void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
#define DRM_HDCP_2_VRL_LENGTH_SIZE 3
#define DRM_HDCP_2_DCP_SIG_SIZE 384
#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4
-#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC) >> 6)
+#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC0) >> 6)
struct hdcp_srm_header {
u8 srm_id;
@@ -288,8 +288,8 @@ struct hdcp_srm_header {
struct drm_device;
struct drm_connector;
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
- u8 *ksvs, u32 ksv_count);
+int drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
+ u8 *ksvs, u32 ksv_count);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector, bool hdcp_content_type);
void drm_hdcp_update_content_protection(struct drm_connector *connector,
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 5745710453c8..dcef3598f49e 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -194,17 +194,11 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock);
#ifdef CONFIG_PCI
-void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
#else
-static inline void __drm_legacy_pci_free(struct drm_device *dev,
- drm_dma_handle_t *dmah)
-{
-}
-
static inline int drm_legacy_pci_init(struct drm_driver *driver,
struct pci_driver *pdriver)
{
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 67c66f5ee591..33f325f5af2b 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -110,6 +110,18 @@ struct mipi_dbi_dev {
unsigned int rotation;
/**
+ * @left_offset: Horizontal offset of the display relative to the
+ * controller's driver array
+ */
+ unsigned int left_offset;
+
+ /**
+ * @top_offset: Vertical offset of the display relative to the
+ * controller's driver array
+ */
+ unsigned int top_offset;
+
+ /**
* @backlight: backlight device (optional)
*/
struct backlight_device *backlight;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index d7939c054259..ee8b0e80ca90 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -272,7 +272,7 @@ static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
*/
static inline bool drm_mm_initialized(const struct drm_mm *mm)
{
- return mm->hole_stack.next;
+ return READ_ONCE(mm->hole_stack.next);
}
/**
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index e946e20c61d8..99134d4f35eb 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -371,20 +371,13 @@ struct drm_display_mode {
int crtc_vtotal;
/**
- * @private:
+ * @private_flags:
*
- * Pointer for driver private data. This can only be used for mode
+ * Driver private flags. private_flags can only be used for mode
* objects passed to drivers in modeset operations. It shouldn't be used
* by atomic drivers since they can store any additional data by
* subclassing state structures.
*/
- int *private;
-
- /**
- * @private_flags:
- *
- * Similar to @private, but just an integer.
- */
int private_flags;
/**
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 5a87f1bd7a3f..7c20b1c8b6a7 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -450,6 +450,53 @@ struct drm_crtc_helper_funcs {
*/
void (*atomic_disable)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
+
+ /**
+ * @get_scanout_position:
+ *
+ * Called by vblank timestamping code.
+ *
+ * Returns the current display scanout position from a CRTC and an
+ * optional accurate ktime_get() timestamp of when the position was
+ * measured. Note that this is a helper callback which is only used
+ * if a driver uses drm_crtc_vblank_helper_get_vblank_timestamp()
+ * for the @drm_crtc_funcs.get_vblank_timestamp callback.
+ *
+ * Parameters:
+ *
+ * crtc:
+ * The CRTC.
+ * in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq
+ * quirks if the flag is set.
+ * vpos:
+ * Target location for current vertical scanout position.
+ * hpos:
+ * Target location for current horizontal scanout position.
+ * stime:
+ * Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * etime:
+ * Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
+ * mode:
+ * Current display timings.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * Returns:
+ *
+ * True on success, false if a reliable scanout position counter could
+ * not be read out.
+ */
+ bool (*get_scanout_position)(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
};
/**
@@ -646,22 +693,6 @@ struct drm_encoder_helper_funcs {
struct drm_connector_state *conn_state);
/**
- * @get_crtc:
- *
- * This callback is used by the legacy CRTC helpers to work around
- * deficiencies in its own book-keeping.
- *
- * Do not use, use atomic helpers instead, which get the book keeping
- * right.
- *
- * FIXME:
- *
- * Currently only nouveau is using this, and as soon as nouveau is
- * atomic we can ditch this hook.
- */
- struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
-
- /**
* @detect:
*
* This callback can be used by drivers who want to do detection on the
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 121f7aabccd1..6193cb555acc 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -198,7 +198,8 @@ static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
}
#endif
-#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
int drm_panel_of_backlight(struct drm_panel *panel);
#else
static inline int drm_panel_of_backlight(struct drm_panel *panel)
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 9031e217b506..3941b0255ecf 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -45,10 +45,6 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align);
void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver);
-
#else
static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
@@ -62,13 +58,6 @@ static inline void drm_pci_free(struct drm_device *dev,
{
}
-static inline int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- return -ENOSYS;
-}
-
#endif
#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 8f99d389792d..ca7cee8e728a 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -382,42 +382,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
-
-#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \
-})
-
-/**
- * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
- *
- * @dev: device pointer
- * @fmt: printf() like format string.
- */
-#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
- _DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
- fmt, ##__VA_ARGS__)
/*
* struct drm_device based logging
@@ -541,16 +505,42 @@ void __drm_err(const char *format, ...);
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
-#define DRM_DEBUG_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__); \
+})
+
+/*
+ * struct drm_device based WARNs
+ *
+ * drm_WARN*() acts like WARN*(), but with the key difference of
+ * using device specific information so that we know from which device
+ * warning is originating from.
+ *
+ * Prefer drm_device based drm_WARN* over regular WARN*
+ */
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* Helper for struct drm_device based WARNs */
+#define drm_WARN(drm, condition, format, arg...) \
+ WARN(condition, "%s %s: " format, \
+ dev_driver_string((drm)->dev), \
+ dev_name((drm)->dev), ## arg)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define drm_WARN_ONCE(drm, condition, format, arg...) \
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string((drm)->dev), \
+ dev_name((drm)->dev), ## arg)
+
+#define drm_WARN_ON(drm, x) \
+ drm_WARN((drm), (x), "%s", \
+ "drm_WARN_ON(" __stringify(x) ")")
-#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define drm_WARN_ON_ONCE(drm, x) \
+ drm_WARN_ONCE((drm), (x), "%s", \
+ "drm_WARN_ON_ONCE(" __stringify(x) ")")
#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 15afee9cf049..a026375464ff 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -100,8 +100,11 @@ struct drm_simple_display_pipe_funcs {
* This is the function drivers should submit the
* &drm_pending_vblank_event from. Using either
* drm_crtc_arm_vblank_event(), when the driver supports vblank
- * interrupt handling, or drm_crtc_send_vblank_event() directly in case
- * the hardware lacks vblank support entirely.
+ * interrupt handling, or drm_crtc_send_vblank_event() for more
+ * complex case. In case the hardware lacks vblank support entirely,
+ * drivers can set &struct drm_crtc_state.no_vblank in
+ * &struct drm_simple_display_pipe_funcs.check and let DRM's
+ * atomic helper fake a vblank event.
*/
void (*update)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state);
@@ -178,4 +181,8 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
const uint64_t *format_modifiers,
struct drm_connector *connector);
+int drm_simple_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ int encoder_type);
+
#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c16c44052b3d..dd9f5b9e56e4 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -174,13 +174,13 @@ struct drm_vblank_crtc {
unsigned int pipe;
/**
* @framedur_ns: Frame/Field duration in ns, used by
- * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by
* drm_calc_timestamping_constants().
*/
int framedur_ns;
/**
* @linedur_ns: Line duration in ns, used by
- * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by
* drm_calc_timestamping_constants().
*/
int linedur_ns;
@@ -190,8 +190,8 @@ struct drm_vblank_crtc {
*
* Cache of the current hardware display mode. Only valid when @enabled
* is set. This is used by helpers like
- * drm_calc_vbltimestamp_from_scanoutpos(). We can't just access the
- * hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode,
+ * drm_crtc_vblank_helper_get_vblank_timestamp(). We can't just access
+ * the hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode,
* because that one is really hard to get from interrupt context.
*/
struct drm_display_mode hwmode;
@@ -206,6 +206,7 @@ struct drm_vblank_crtc {
};
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime);
@@ -229,13 +230,32 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe, int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq);
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count);
+
+/*
+ * Helpers for struct drm_crtc_funcs
+ */
+
+typedef bool (*drm_vblank_get_scanout_position_func)(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime,
+ ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq,
+ drm_vblank_get_scanout_position_func get_scanout_position);
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+
#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 589be851f8a1..26b04ff62676 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -262,7 +262,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
- * @score: score to help loadbalancer pick a idle sched
+ * @num_jobs: the number of jobs in queue in the scheduler
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -283,8 +283,8 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
- bool ready;
+ atomic_t num_jobs;
+ bool ready;
bool free_guilty;
};
@@ -297,6 +297,10 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
@@ -337,5 +341,8 @@ void drm_sched_fence_finished(struct drm_sched_fence *fence);
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
unsigned long remaining);
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
index 4d48de8890ca..702f613243bb 100644
--- a/include/drm/i915_mei_hdcp_interface.h
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -12,7 +12,6 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
/**
* enum hdcp_port_type - HDCP port implementation type defined by ME FW
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 66ca49db9633..0a9d042e075a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -135,18 +135,14 @@ struct ttm_tt;
* @num_pages: Actual number of pages.
* @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
- * zero, the object is put on the delayed delete list.
- * @list_kref: List reference count of this buffer object. This member is
- * used to avoid destruction while the buffer object is still on a list.
- * Lru lists may keep one refcount, the delayed delete list, and kref != 0
- * keeps one refcount. When this refcount reaches zero,
- * the object is destroyed.
+ * zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
+ * @deleted: True if the object is only a zombie and already deleted.
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
@@ -183,9 +179,7 @@ struct ttm_buffer_object {
/**
* Members not needing protection.
*/
-
struct kref kref;
- struct kref list_kref;
/**
* Members protected by the bo::resv::reserved lock.
@@ -195,6 +189,7 @@ struct ttm_buffer_object {
struct file *persistent_swap_storage;
struct ttm_tt *ttm;
bool evicted;
+ bool deleted;
/**
* Members protected by the bdev::lru_lock.
@@ -732,7 +727,8 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
- pgoff_t num_prefault);
+ pgoff_t num_prefault,
+ pgoff_t fault_page_size);
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index cac7a8a0825a..c9e0fd09f4b2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -210,8 +210,6 @@ struct ttm_mem_type_manager {
* struct ttm_bo_driver
*
* @create_ttm_backend_entry: Callback to create a struct ttm_backend.
- * @invalidate_caches: Callback to invalidate read caches when a buffer object
- * has been evicted.
* @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
* structure.
* @evict_flags: Callback to obtain placement flags when a buffer is evicted.
@@ -256,19 +254,6 @@ struct ttm_bo_driver {
*/
void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
- /**
- * struct ttm_bo_driver member invalidate_caches
- *
- * @bdev: the buffer object device.
- * @flags: new placement of the rebound buffer object.
- *
- * A previosly evicted buffer has been rebound in a
- * potentially new location. Tell the driver that it might
- * consider invalidating read (texture) caches on the next command
- * submission as a consequence.
- */
-
- int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man);
diff --git a/include/dt-bindings/arm/coresight-cti-dt.h b/include/dt-bindings/arm/coresight-cti-dt.h
new file mode 100644
index 000000000000..61e7bdf8ea6e
--- /dev/null
+++ b/include/dt-bindings/arm/coresight-cti-dt.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for the defined trigger signal
+ * types on CoreSight CTI.
+ */
+
+#ifndef _DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H
+#define _DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H
+
+#define GEN_IO 0
+#define GEN_INTREQ 1
+#define GEN_INTACK 2
+#define GEN_HALTREQ 3
+#define GEN_RESTARTREQ 4
+#define PE_EDBGREQ 5
+#define PE_DBGRESTART 6
+#define PE_CTIIRQ 7
+#define PE_PMUIRQ 8
+#define PE_DBGTRIGGER 9
+#define ETM_EXTOUT 10
+#define ETM_EXTIN 11
+#define SNK_FULL 12
+#define SNK_ACQCOMP 13
+#define SNK_FLUSHCOMP 14
+#define SNK_FLUSHIN 15
+#define SNK_TRIGIN 16
+#define STM_ASYNCOUT 17
+#define STM_TOUT_SPTE 18
+#define STM_TOUT_SW 19
+#define STM_TOUT_HETE 20
+#define STM_HWEVENT 21
+#define ELA_TSTART 22
+#define ELA_TSTOP 23
+#define ELA_DBGREQ 24
+#define CTI_TRIG_MAX 25
+
+#endif /*_DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H */
diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h
index babd08a1d226..76b07826ed05 100644
--- a/include/dt-bindings/bus/ti-sysc.h
+++ b/include/dt-bindings/bus/ti-sysc.h
@@ -18,6 +18,10 @@
#define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4)
+/* PRUSS sysc found on AM33xx/AM43xx/AM57xx */
+#define SYSC_PRUSS_SUB_MWAIT (1 << 5)
+#define SYSC_PRUSS_STANDBY_INIT (1 << 4)
+
/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */
#define SYSC_IDLE_FORCE 0
#define SYSC_IDLE_NO 1
diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h
index f0f04e0a249e..33b8826d936b 100644
--- a/include/dt-bindings/clock/dm814.h
+++ b/include/dt-bindings/clock/dm814.h
@@ -34,4 +34,9 @@
#define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220)
#define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224)
+/* alwon_ethernet clocks */
+#define DM814_ETHERNET_CLKCTRL_OFFSET 0x1d4
+#define DM814_ETHERNET_CLKCTRL_INDEX(offset) ((offset) - DM814_ETHERNET_CLKCTRL_OFFSET)
+#define DM814_ETHERNET_CPGMAC0_CLKCTRL DM814_ETHERNET_CLKCTRL_INDEX(0x1d4)
+
#endif
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index 0837c1a7ae49..b0d65d73db96 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -143,5 +143,7 @@
#define CLKID_CPU1_CLK 253
#define CLKID_CPU2_CLK 254
#define CLKID_CPU3_CLK 255
+#define CLKID_SPICC0_SCLK 258
+#define CLKID_SPICC1_SCLK 261
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index db0763e96173..4073eb7a9da1 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -146,5 +146,6 @@
#define CLKID_CTS_VDAC 201
#define CLKID_HDMI_TX 202
#define CLKID_HDMI 205
+#define CLKID_ACODEC 206
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index e6a670e1a3f8..1d4c0dfe0202 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -451,5 +451,6 @@
#define IMX7D_SNVS_CLK 442
#define IMX7D_CAAM_CLK 443
#define IMX7D_KPP_ROOT_CLK 444
-#define IMX7D_CLK_END 445
+#define IMX7D_PXP_CLK 445
+#define IMX7D_CLK_END 446
#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
index edeece2289f0..e63a5530aed7 100644
--- a/include/dt-bindings/clock/imx8mm-clock.h
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -265,6 +265,15 @@
#define IMX8MM_SYS_PLL2_333M_CG 244
#define IMX8MM_SYS_PLL2_500M_CG 245
-#define IMX8MM_CLK_END 246
+#define IMX8MM_CLK_M4_CORE 246
+#define IMX8MM_CLK_VPU_CORE 247
+#define IMX8MM_CLK_GPU3D_CORE 248
+#define IMX8MM_CLK_GPU2D_CORE 249
+
+#define IMX8MM_CLK_CLKO2 250
+
+#define IMX8MM_CLK_A53_CORE 251
+
+#define IMX8MM_CLK_END 252
#endif
diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
index 65ac6eb6c733..621ea0e87c67 100644
--- a/include/dt-bindings/clock/imx8mn-clock.h
+++ b/include/dt-bindings/clock/imx8mn-clock.h
@@ -228,6 +228,12 @@
#define IMX8MN_SYS_PLL2_333M_CG 209
#define IMX8MN_SYS_PLL2_500M_CG 210
-#define IMX8MN_CLK_END 211
+#define IMX8MN_CLK_SNVS_ROOT 211
+#define IMX8MN_CLK_GPU_CORE 212
+#define IMX8MN_CLK_GPU_SHADER 213
+
+#define IMX8MN_CLK_A53_CORE 214
+
+#define IMX8MN_CLK_END 215
#endif
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 2fab63186bca..47ab082238b4 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -173,7 +173,7 @@
#define IMX8MP_CLK_IPP_DO_CLKO1 164
#define IMX8MP_CLK_IPP_DO_CLKO2 165
#define IMX8MP_CLK_HDMI_FDCC_TST 166
-#define IMX8MP_CLK_HDMI_27M 167
+#define IMX8MP_CLK_HDMI_24M 167
#define IMX8MP_CLK_HDMI_REF_266M 168
#define IMX8MP_CLK_USDHC3 169
#define IMX8MP_CLK_MEDIA_CAM1_PIX 170
@@ -294,7 +294,8 @@
#define IMX8MP_CLK_DRAM_ALT_ROOT 285
#define IMX8MP_CLK_DRAM_CORE 286
#define IMX8MP_CLK_ARM 287
+#define IMX8MP_CLK_A53_CORE 288
-#define IMX8MP_CLK_END 288
+#define IMX8MP_CLK_END 289
#endif
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index 3bab9b21c8d7..9b8045d75b8b 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -424,6 +424,13 @@
#define IMX8MQ_SYS2_PLL_500M_CG 283
#define IMX8MQ_SYS2_PLL_1000M_CG 284
-#define IMX8MQ_CLK_END 285
+#define IMX8MQ_CLK_GPU_CORE 285
+#define IMX8MQ_CLK_GPU_SHADER 286
+#define IMX8MQ_CLK_M4_CORE 287
+#define IMX8MQ_CLK_VPU_CORE 288
+
+#define IMX8MQ_CLK_A53_CORE 289
+
+#define IMX8MQ_CLK_END 290
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 4b1a7724f20d..06bb7fe4c62f 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -26,6 +26,9 @@
#define MMP2_CLK_VCTCXO_4 25
#define MMP2_CLK_UART_PLL 26
#define MMP2_CLK_USB_PLL 27
+#define MMP3_CLK_PLL1_P 28
+#define MMP3_CLK_PLL2_P 29
+#define MMP3_CLK_PLL3 30
/* apb periphrals */
#define MMP2_CLK_TWSI0 60
@@ -50,6 +53,10 @@
#define MMP2_CLK_SSP2 79
#define MMP2_CLK_SSP3 80
#define MMP2_CLK_TIMER 81
+#define MMP2_CLK_THERMAL0 82
+#define MMP3_CLK_THERMAL1 83
+#define MMP3_CLK_THERMAL2 84
+#define MMP3_CLK_THERMAL3 85
/* axi periphrals */
#define MMP2_CLK_SDH0 101
@@ -74,6 +81,12 @@
#define MMP2_CLK_DISP0_LCDC 120
#define MMP2_CLK_USBHSIC0 121
#define MMP2_CLK_USBHSIC1 122
+#define MMP2_CLK_GPU_BUS 123
+#define MMP3_CLK_GPU_BUS MMP2_CLK_GPU_BUS
+#define MMP2_CLK_GPU_3D 124
+#define MMP3_CLK_GPU_3D MMP2_CLK_GPU_3D
+#define MMP3_CLK_GPU_2D 125
+#define MMP3_CLK_SDH4 126
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h
index e8029b2e92d7..1258fd05db68 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc7180.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC7180_H
@@ -132,6 +132,11 @@
#define GCC_VIDEO_GPLL0_DIV_CLK_SRC 122
#define GCC_VIDEO_THROTTLE_AXI_CLK 123
#define GCC_VIDEO_XO_CLK 124
+#define GCC_MSS_CFG_AHB_CLK 125
+#define GCC_MSS_MFAB_AXIS_CLK 126
+#define GCC_MSS_NAV_AXI_CLK 127
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 128
+#define GCC_MSS_SNOC_AXI_CLK 129
/* GCC resets */
#define GCC_QUSB2PHY_PRIM_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h
index 90d60ef94c64..3e1a91876610 100644
--- a/include/dt-bindings/clock/qcom,gcc-sm8150.h
+++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h
@@ -240,4 +240,8 @@
#define GCC_USB30_SEC_BCR 27
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 28
+/* GCC GDSCRs */
+#define USB30_PRIM_GDSC 4
+#define USB30_SEC_GDSC 5
+
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8250.h b/include/dt-bindings/clock/qcom,gcc-sm8250.h
new file mode 100644
index 000000000000..7b7abe327e37
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm8250.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8250_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL4 2
+#define GPLL9 3
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 4
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 5
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 6
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 7
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 8
+#define GCC_BOOT_ROM_AHB_CLK 9
+#define GCC_CAMERA_AHB_CLK 10
+#define GCC_CAMERA_HF_AXI_CLK 11
+#define GCC_CAMERA_SF_AXI_CLK 12
+#define GCC_CAMERA_XO_CLK 13
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 14
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 15
+#define GCC_CPUSS_AHB_CLK 16
+#define GCC_CPUSS_AHB_CLK_SRC 17
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 18
+#define GCC_CPUSS_DVM_BUS_CLK 19
+#define GCC_CPUSS_RBCPR_CLK 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 22
+#define GCC_DISP_AHB_CLK 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_SF_AXI_CLK 25
+#define GCC_DISP_XO_CLK 26
+#define GCC_GP1_CLK 27
+#define GCC_GP1_CLK_SRC 28
+#define GCC_GP2_CLK 29
+#define GCC_GP2_CLK_SRC 30
+#define GCC_GP3_CLK 31
+#define GCC_GP3_CLK_SRC 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_GPU_IREF_EN 36
+#define GCC_GPU_MEMNOC_GFX_CLK 37
+#define GCC_GPU_SNOC_DVM_GFX_CLK 38
+#define GCC_NPU_AXI_CLK 39
+#define GCC_NPU_BWMON_AXI_CLK 40
+#define GCC_NPU_BWMON_CFG_AHB_CLK 41
+#define GCC_NPU_CFG_AHB_CLK 42
+#define GCC_NPU_DMA_CLK 43
+#define GCC_NPU_GPLL0_CLK_SRC 44
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 45
+#define GCC_PCIE0_PHY_REFGEN_CLK 46
+#define GCC_PCIE1_PHY_REFGEN_CLK 47
+#define GCC_PCIE2_PHY_REFGEN_CLK 48
+#define GCC_PCIE_0_AUX_CLK 49
+#define GCC_PCIE_0_AUX_CLK_SRC 50
+#define GCC_PCIE_0_CFG_AHB_CLK 51
+#define GCC_PCIE_0_MSTR_AXI_CLK 52
+#define GCC_PCIE_0_PIPE_CLK 53
+#define GCC_PCIE_0_SLV_AXI_CLK 54
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 55
+#define GCC_PCIE_1_AUX_CLK 56
+#define GCC_PCIE_1_AUX_CLK_SRC 57
+#define GCC_PCIE_1_CFG_AHB_CLK 58
+#define GCC_PCIE_1_MSTR_AXI_CLK 59
+#define GCC_PCIE_1_PIPE_CLK 60
+#define GCC_PCIE_1_SLV_AXI_CLK 61
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 62
+#define GCC_PCIE_2_AUX_CLK 63
+#define GCC_PCIE_2_AUX_CLK_SRC 64
+#define GCC_PCIE_2_CFG_AHB_CLK 65
+#define GCC_PCIE_2_MSTR_AXI_CLK 66
+#define GCC_PCIE_2_PIPE_CLK 67
+#define GCC_PCIE_2_SLV_AXI_CLK 68
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 69
+#define GCC_PCIE_MDM_CLKREF_EN 70
+#define GCC_PCIE_PHY_AUX_CLK 71
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 72
+#define GCC_PCIE_WIFI_CLKREF_EN 73
+#define GCC_PCIE_WIGIG_CLKREF_EN 74
+#define GCC_PDM2_CLK 75
+#define GCC_PDM2_CLK_SRC 76
+#define GCC_PDM_AHB_CLK 77
+#define GCC_PDM_XO4_CLK 78
+#define GCC_PRNG_AHB_CLK 79
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 80
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 81
+#define GCC_QMIP_DISP_AHB_CLK 82
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 83
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 84
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 85
+#define GCC_QUPV3_WRAP0_CORE_CLK 86
+#define GCC_QUPV3_WRAP0_S0_CLK 87
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 88
+#define GCC_QUPV3_WRAP0_S1_CLK 89
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 90
+#define GCC_QUPV3_WRAP0_S2_CLK 91
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 92
+#define GCC_QUPV3_WRAP0_S3_CLK 93
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 94
+#define GCC_QUPV3_WRAP0_S4_CLK 95
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 96
+#define GCC_QUPV3_WRAP0_S5_CLK 97
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 98
+#define GCC_QUPV3_WRAP0_S6_CLK 99
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 100
+#define GCC_QUPV3_WRAP0_S7_CLK 101
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 103
+#define GCC_QUPV3_WRAP1_CORE_CLK 104
+#define GCC_QUPV3_WRAP1_S0_CLK 105
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S1_CLK 107
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 108
+#define GCC_QUPV3_WRAP1_S2_CLK 109
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_S3_CLK 111
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 112
+#define GCC_QUPV3_WRAP1_S4_CLK 113
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 114
+#define GCC_QUPV3_WRAP1_S5_CLK 115
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 117
+#define GCC_QUPV3_WRAP2_CORE_CLK 118
+#define GCC_QUPV3_WRAP2_S0_CLK 119
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 120
+#define GCC_QUPV3_WRAP2_S1_CLK 121
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 122
+#define GCC_QUPV3_WRAP2_S2_CLK 123
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 124
+#define GCC_QUPV3_WRAP2_S3_CLK 125
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 126
+#define GCC_QUPV3_WRAP2_S4_CLK 127
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 128
+#define GCC_QUPV3_WRAP2_S5_CLK 129
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 130
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 131
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 132
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 133
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 134
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 135
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 136
+#define GCC_SDCC2_AHB_CLK 137
+#define GCC_SDCC2_APPS_CLK 138
+#define GCC_SDCC2_APPS_CLK_SRC 139
+#define GCC_SDCC4_AHB_CLK 140
+#define GCC_SDCC4_APPS_CLK 141
+#define GCC_SDCC4_APPS_CLK_SRC 142
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 143
+#define GCC_TSIF_AHB_CLK 144
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 145
+#define GCC_TSIF_REF_CLK 146
+#define GCC_TSIF_REF_CLK_SRC 147
+#define GCC_UFS_1X_CLKREF_EN 148
+#define GCC_UFS_CARD_AHB_CLK 149
+#define GCC_UFS_CARD_AXI_CLK 150
+#define GCC_UFS_CARD_AXI_CLK_SRC 151
+#define GCC_UFS_CARD_ICE_CORE_CLK 152
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 153
+#define GCC_UFS_CARD_PHY_AUX_CLK 154
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 155
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 156
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 157
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 158
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 159
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 160
+#define GCC_UFS_PHY_AHB_CLK 161
+#define GCC_UFS_PHY_AXI_CLK 162
+#define GCC_UFS_PHY_AXI_CLK_SRC 163
+#define GCC_UFS_PHY_ICE_CORE_CLK 164
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 165
+#define GCC_UFS_PHY_PHY_AUX_CLK 166
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 167
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 169
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 170
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 171
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 172
+#define GCC_USB30_PRIM_MASTER_CLK 173
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 174
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 175
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 176
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 177
+#define GCC_USB30_PRIM_SLEEP_CLK 178
+#define GCC_USB30_SEC_MASTER_CLK 179
+#define GCC_USB30_SEC_MASTER_CLK_SRC 180
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 181
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 182
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 183
+#define GCC_USB30_SEC_SLEEP_CLK 184
+#define GCC_USB3_PRIM_PHY_AUX_CLK 185
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 186
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 187
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 188
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 189
+#define GCC_USB3_SEC_CLKREF_EN 190
+#define GCC_USB3_SEC_PHY_AUX_CLK 191
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 192
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 193
+#define GCC_USB3_SEC_PHY_PIPE_CLK 194
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 195
+#define GCC_VIDEO_AHB_CLK 196
+#define GCC_VIDEO_AXI0_CLK 197
+#define GCC_VIDEO_AXI1_CLK 198
+#define GCC_VIDEO_XO_CLK 199
+
+/* GCC resets */
+#define GCC_GPU_BCR 0
+#define GCC_MMSS_BCR 1
+#define GCC_NPU_BWMON_BCR 2
+#define GCC_NPU_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_2_BCR 14
+#define GCC_PCIE_2_LINK_DOWN_BCR 15
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 16
+#define GCC_PCIE_2_PHY_BCR 17
+#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 18
+#define GCC_PCIE_PHY_BCR 19
+#define GCC_PCIE_PHY_CFG_AHB_BCR 20
+#define GCC_PCIE_PHY_COM_BCR 21
+#define GCC_PDM_BCR 22
+#define GCC_PRNG_BCR 23
+#define GCC_QUPV3_WRAPPER_0_BCR 24
+#define GCC_QUPV3_WRAPPER_1_BCR 25
+#define GCC_QUPV3_WRAPPER_2_BCR 26
+#define GCC_QUSB2PHY_PRIM_BCR 27
+#define GCC_QUSB2PHY_SEC_BCR 28
+#define GCC_SDCC2_BCR 29
+#define GCC_SDCC4_BCR 30
+#define GCC_TSIF_BCR 31
+#define GCC_UFS_CARD_BCR 32
+#define GCC_UFS_PHY_BCR 33
+#define GCC_USB30_PRIM_BCR 34
+#define GCC_USB30_SEC_BCR 35
+#define GCC_USB3_DP_PHY_PRIM_BCR 36
+#define GCC_USB3_DP_PHY_SEC_BCR 37
+#define GCC_USB3_PHY_PRIM_BCR 38
+#define GCC_USB3_PHY_SEC_BCR 39
+#define GCC_USB3PHY_PHY_PRIM_BCR 40
+#define GCC_USB3PHY_PHY_SEC_BCR 41
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 42
+#define GCC_VIDEO_AXI0_CLK_ARES 43
+#define GCC_VIDEO_AXI1_CLK_ARES 44
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define PCIE_2_GDSC 2
+#define UFS_CARD_GDSC 3
+#define UFS_PHY_GDSC 4
+#define USB30_PRIM_GDSC 5
+#define USB30_SEC_GDSC 6
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 7
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 8
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7180.h b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
index 0e4643b08b49..65e706d7d9c6 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sc7180.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
@@ -15,7 +15,8 @@
#define GPU_CC_CXO_CLK 6
#define GPU_CC_GMU_CLK_SRC 7
-/* CAM_CC GDSCRs */
+/* GPU_CC GDSCRs */
#define CX_GDSC 0
+#define GX_GDSC 1
#endif
diff --git a/include/dt-bindings/clock/qcom,mss-sc7180.h b/include/dt-bindings/clock/qcom,mss-sc7180.h
new file mode 100644
index 000000000000..f15a9ded2961
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mss-sc7180.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_MSS_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_MSS_SC7180_H
+
+#define MSS_AXI_CRYPTO_CLK 0
+#define MSS_AXI_NAV_CLK 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index 8e3095720552..ae74c43c485d 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -37,6 +37,10 @@
#define RPM_XO_A0 27
#define RPM_XO_A1 28
#define RPM_XO_A2 29
+#define RPM_NSS_FABRIC_0_CLK 30
+#define RPM_NSS_FABRIC_0_A_CLK 31
+#define RPM_NSS_FABRIC_1_CLK 32
+#define RPM_NSS_FABRIC_1_A_CLK 33
/* SMD RPM clocks */
#define RPM_SMD_XO_CLK_SRC 0
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index edcab3f7b7d3..2e6c54e65455 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. */
#ifndef _DT_BINDINGS_CLK_MSM_RPMH_H
@@ -19,5 +19,7 @@
#define RPMH_RF_CLK3 10
#define RPMH_RF_CLK3_A 11
#define RPMH_IPA_CLK 12
+#define RPMH_LN_BB_CLK1 13
+#define RPMH_LN_BB_CLK1_A 14
#endif
diff --git a/include/dt-bindings/clock/sprd,sc9863a-clk.h b/include/dt-bindings/clock/sprd,sc9863a-clk.h
new file mode 100644
index 000000000000..901ba59676c2
--- /dev/null
+++ b/include/dt-bindings/clock/sprd,sc9863a-clk.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Unisoc SC9863A platform clocks
+ *
+ * Copyright (C) 2019, Unisoc Communications Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SC9863A_H_
+#define _DT_BINDINGS_CLK_SC9863A_H_
+
+#define CLK_MPLL0_GATE 0
+#define CLK_DPLL0_GATE 1
+#define CLK_LPLL_GATE 2
+#define CLK_GPLL_GATE 3
+#define CLK_DPLL1_GATE 4
+#define CLK_MPLL1_GATE 5
+#define CLK_MPLL2_GATE 6
+#define CLK_ISPPLL_GATE 7
+#define CLK_PMU_APB_NUM (CLK_ISPPLL_GATE + 1)
+
+#define CLK_AUDIO_GATE 0
+#define CLK_RPLL 1
+#define CLK_RPLL_390M 2
+#define CLK_RPLL_260M 3
+#define CLK_RPLL_195M 4
+#define CLK_RPLL_26M 5
+#define CLK_ANLG_PHY_G5_NUM (CLK_RPLL_26M + 1)
+
+#define CLK_TWPLL 0
+#define CLK_TWPLL_768M 1
+#define CLK_TWPLL_384M 2
+#define CLK_TWPLL_192M 3
+#define CLK_TWPLL_96M 4
+#define CLK_TWPLL_48M 5
+#define CLK_TWPLL_24M 6
+#define CLK_TWPLL_12M 7
+#define CLK_TWPLL_512M 8
+#define CLK_TWPLL_256M 9
+#define CLK_TWPLL_128M 10
+#define CLK_TWPLL_64M 11
+#define CLK_TWPLL_307M2 12
+#define CLK_TWPLL_219M4 13
+#define CLK_TWPLL_170M6 14
+#define CLK_TWPLL_153M6 15
+#define CLK_TWPLL_76M8 16
+#define CLK_TWPLL_51M2 17
+#define CLK_TWPLL_38M4 18
+#define CLK_TWPLL_19M2 19
+#define CLK_LPLL 20
+#define CLK_LPLL_409M6 21
+#define CLK_LPLL_245M76 22
+#define CLK_GPLL 23
+#define CLK_ISPPLL 24
+#define CLK_ISPPLL_468M 25
+#define CLK_ANLG_PHY_G1_NUM (CLK_ISPPLL_468M + 1)
+
+#define CLK_DPLL0 0
+#define CLK_DPLL1 1
+#define CLK_DPLL0_933M 2
+#define CLK_DPLL0_622M3 3
+#define CLK_DPLL0_400M 4
+#define CLK_DPLL0_266M7 5
+#define CLK_DPLL0_123M1 6
+#define CLK_DPLL0_50M 7
+#define CLK_ANLG_PHY_G7_NUM (CLK_DPLL0_50M + 1)
+
+#define CLK_MPLL0 0
+#define CLK_MPLL1 1
+#define CLK_MPLL2 2
+#define CLK_MPLL2_675M 3
+#define CLK_ANLG_PHY_G4_NUM (CLK_MPLL2_675M + 1)
+
+#define CLK_AP_APB 0
+#define CLK_AP_CE 1
+#define CLK_NANDC_ECC 2
+#define CLK_NANDC_26M 3
+#define CLK_EMMC_32K 4
+#define CLK_SDIO0_32K 5
+#define CLK_SDIO1_32K 6
+#define CLK_SDIO2_32K 7
+#define CLK_OTG_UTMI 8
+#define CLK_AP_UART0 9
+#define CLK_AP_UART1 10
+#define CLK_AP_UART2 11
+#define CLK_AP_UART3 12
+#define CLK_AP_UART4 13
+#define CLK_AP_I2C0 14
+#define CLK_AP_I2C1 15
+#define CLK_AP_I2C2 16
+#define CLK_AP_I2C3 17
+#define CLK_AP_I2C4 18
+#define CLK_AP_I2C5 19
+#define CLK_AP_I2C6 20
+#define CLK_AP_SPI0 21
+#define CLK_AP_SPI1 22
+#define CLK_AP_SPI2 23
+#define CLK_AP_SPI3 24
+#define CLK_AP_IIS0 25
+#define CLK_AP_IIS1 26
+#define CLK_AP_IIS2 27
+#define CLK_SIM0 28
+#define CLK_SIM0_32K 29
+#define CLK_AP_CLK_NUM (CLK_SIM0_32K + 1)
+
+#define CLK_13M 0
+#define CLK_6M5 1
+#define CLK_4M3 2
+#define CLK_2M 3
+#define CLK_250K 4
+#define CLK_RCO_25M 5
+#define CLK_RCO_4M 6
+#define CLK_RCO_2M 7
+#define CLK_EMC 8
+#define CLK_AON_APB 9
+#define CLK_ADI 10
+#define CLK_AUX0 11
+#define CLK_AUX1 12
+#define CLK_AUX2 13
+#define CLK_PROBE 14
+#define CLK_PWM0 15
+#define CLK_PWM1 16
+#define CLK_PWM2 17
+#define CLK_AON_THM 18
+#define CLK_AUDIF 19
+#define CLK_CPU_DAP 20
+#define CLK_CPU_TS 21
+#define CLK_DJTAG_TCK 22
+#define CLK_EMC_REF 23
+#define CLK_CSSYS 24
+#define CLK_AON_PMU 25
+#define CLK_PMU_26M 26
+#define CLK_AON_TMR 27
+#define CLK_POWER_CPU 28
+#define CLK_AP_AXI 29
+#define CLK_SDIO0_2X 30
+#define CLK_SDIO1_2X 31
+#define CLK_SDIO2_2X 32
+#define CLK_EMMC_2X 33
+#define CLK_DPU 34
+#define CLK_DPU_DPI 35
+#define CLK_OTG_REF 36
+#define CLK_SDPHY_APB 37
+#define CLK_ALG_IO_APB 38
+#define CLK_GPU_CORE 39
+#define CLK_GPU_SOC 40
+#define CLK_MM_EMC 41
+#define CLK_MM_AHB 42
+#define CLK_BPC 43
+#define CLK_DCAM_IF 44
+#define CLK_ISP 45
+#define CLK_JPG 46
+#define CLK_CPP 47
+#define CLK_SENSOR0 48
+#define CLK_SENSOR1 49
+#define CLK_SENSOR2 50
+#define CLK_MM_VEMC 51
+#define CLK_MM_VAHB 52
+#define CLK_VSP 53
+#define CLK_CORE0 54
+#define CLK_CORE1 55
+#define CLK_CORE2 56
+#define CLK_CORE3 57
+#define CLK_CORE4 58
+#define CLK_CORE5 59
+#define CLK_CORE6 60
+#define CLK_CORE7 61
+#define CLK_SCU 62
+#define CLK_ACE 63
+#define CLK_AXI_PERIPH 64
+#define CLK_AXI_ACP 65
+#define CLK_ATB 66
+#define CLK_DEBUG_APB 67
+#define CLK_GIC 68
+#define CLK_PERIPH 69
+#define CLK_AON_CLK_NUM (CLK_VSP + 1)
+
+#define CLK_OTG_EB 0
+#define CLK_DMA_EB 1
+#define CLK_CE_EB 2
+#define CLK_NANDC_EB 3
+#define CLK_SDIO0_EB 4
+#define CLK_SDIO1_EB 5
+#define CLK_SDIO2_EB 6
+#define CLK_EMMC_EB 7
+#define CLK_EMMC_32K_EB 8
+#define CLK_SDIO0_32K_EB 9
+#define CLK_SDIO1_32K_EB 10
+#define CLK_SDIO2_32K_EB 11
+#define CLK_NANDC_26M_EB 12
+#define CLK_DMA_EB2 13
+#define CLK_CE_EB2 14
+#define CLK_AP_AHB_GATE_NUM (CLK_CE_EB2 + 1)
+
+#define CLK_GPIO_EB 0
+#define CLK_PWM0_EB 1
+#define CLK_PWM1_EB 2
+#define CLK_PWM2_EB 3
+#define CLK_PWM3_EB 4
+#define CLK_KPD_EB 5
+#define CLK_AON_SYST_EB 6
+#define CLK_AP_SYST_EB 7
+#define CLK_AON_TMR_EB 8
+#define CLK_EFUSE_EB 9
+#define CLK_EIC_EB 10
+#define CLK_INTC_EB 11
+#define CLK_ADI_EB 12
+#define CLK_AUDIF_EB 13
+#define CLK_AUD_EB 14
+#define CLK_VBC_EB 15
+#define CLK_PIN_EB 16
+#define CLK_AP_WDG_EB 17
+#define CLK_MM_EB 18
+#define CLK_AON_APB_CKG_EB 19
+#define CLK_CA53_TS0_EB 20
+#define CLK_CA53_TS1_EB 21
+#define CLK_CS53_DAP_EB 22
+#define CLK_PMU_EB 23
+#define CLK_THM_EB 24
+#define CLK_AUX0_EB 25
+#define CLK_AUX1_EB 26
+#define CLK_AUX2_EB 27
+#define CLK_PROBE_EB 28
+#define CLK_EMC_REF_EB 29
+#define CLK_CA53_WDG_EB 30
+#define CLK_AP_TMR1_EB 31
+#define CLK_AP_TMR2_EB 32
+#define CLK_DISP_EMC_EB 33
+#define CLK_ZIP_EMC_EB 34
+#define CLK_GSP_EMC_EB 35
+#define CLK_MM_VSP_EB 36
+#define CLK_MDAR_EB 37
+#define CLK_RTC4M0_CAL_EB 38
+#define CLK_RTC4M1_CAL_EB 39
+#define CLK_DJTAG_EB 40
+#define CLK_MBOX_EB 41
+#define CLK_AON_DMA_EB 42
+#define CLK_AON_APB_DEF_EB 43
+#define CLK_CA5_TS0_EB 44
+#define CLK_DBG_EB 45
+#define CLK_DBG_EMC_EB 46
+#define CLK_CROSS_TRIG_EB 47
+#define CLK_SERDES_DPHY_EB 48
+#define CLK_ARCH_RTC_EB 49
+#define CLK_KPD_RTC_EB 50
+#define CLK_AON_SYST_RTC_EB 51
+#define CLK_AP_SYST_RTC_EB 52
+#define CLK_AON_TMR_RTC_EB 53
+#define CLK_AP_TMR0_RTC_EB 54
+#define CLK_EIC_RTC_EB 55
+#define CLK_EIC_RTCDV5_EB 56
+#define CLK_AP_WDG_RTC_EB 57
+#define CLK_CA53_WDG_RTC_EB 58
+#define CLK_THM_RTC_EB 59
+#define CLK_ATHMA_RTC_EB 60
+#define CLK_GTHMA_RTC_EB 61
+#define CLK_ATHMA_RTC_A_EB 62
+#define CLK_GTHMA_RTC_A_EB 63
+#define CLK_AP_TMR1_RTC_EB 64
+#define CLK_AP_TMR2_RTC_EB 65
+#define CLK_DXCO_LC_RTC_EB 66
+#define CLK_BB_CAL_RTC_EB 67
+#define CLK_GNU_EB 68
+#define CLK_DISP_EB 69
+#define CLK_MM_EMC_EB 70
+#define CLK_POWER_CPU_EB 71
+#define CLK_HW_I2C_EB 72
+#define CLK_MM_VSP_EMC_EB 73
+#define CLK_VSP_EB 74
+#define CLK_CSSYS_EB 75
+#define CLK_DMC_EB 76
+#define CLK_ROSC_EB 77
+#define CLK_S_D_CFG_EB 78
+#define CLK_S_D_REF_EB 79
+#define CLK_B_DMA_EB 80
+#define CLK_ANLG_EB 81
+#define CLK_ANLG_APB_EB 82
+#define CLK_BSMTMR_EB 83
+#define CLK_AP_AXI_EB 84
+#define CLK_AP_INTC0_EB 85
+#define CLK_AP_INTC1_EB 86
+#define CLK_AP_INTC2_EB 87
+#define CLK_AP_INTC3_EB 88
+#define CLK_AP_INTC4_EB 89
+#define CLK_AP_INTC5_EB 90
+#define CLK_SCC_EB 91
+#define CLK_DPHY_CFG_EB 92
+#define CLK_DPHY_REF_EB 93
+#define CLK_CPHY_CFG_EB 94
+#define CLK_OTG_REF_EB 95
+#define CLK_SERDES_EB 96
+#define CLK_AON_AP_EMC_EB 97
+#define CLK_AON_APB_GATE_NUM (CLK_AON_AP_EMC_EB + 1)
+
+#define CLK_MAHB_CKG_EB 0
+#define CLK_MDCAM_EB 1
+#define CLK_MISP_EB 2
+#define CLK_MAHBCSI_EB 3
+#define CLK_MCSI_S_EB 4
+#define CLK_MCSI_T_EB 5
+#define CLK_DCAM_AXI_EB 6
+#define CLK_ISP_AXI_EB 7
+#define CLK_MCSI_EB 8
+#define CLK_MCSI_S_CKG_EB 9
+#define CLK_MCSI_T_CKG_EB 10
+#define CLK_SENSOR0_EB 11
+#define CLK_SENSOR1_EB 12
+#define CLK_SENSOR2_EB 13
+#define CLK_MCPHY_CFG_EB 14
+#define CLK_MM_GATE_NUM (CLK_MCPHY_CFG_EB + 1)
+
+#define CLK_SIM0_EB 0
+#define CLK_IIS0_EB 1
+#define CLK_IIS1_EB 2
+#define CLK_IIS2_EB 3
+#define CLK_SPI0_EB 4
+#define CLK_SPI1_EB 5
+#define CLK_SPI2_EB 6
+#define CLK_I2C0_EB 7
+#define CLK_I2C1_EB 8
+#define CLK_I2C2_EB 9
+#define CLK_I2C3_EB 10
+#define CLK_I2C4_EB 11
+#define CLK_UART0_EB 12
+#define CLK_UART1_EB 13
+#define CLK_UART2_EB 14
+#define CLK_UART3_EB 15
+#define CLK_UART4_EB 16
+#define CLK_SIM0_32K_EB 17
+#define CLK_SPI3_EB 18
+#define CLK_I2C5_EB 19
+#define CLK_I2C6_EB 20
+#define CLK_AP_APB_GATE_NUM (CLK_I2C6_EB + 1)
+
+#endif /* _DT_BINDINGS_CLK_SC9863A_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index e512a1c9b0fc..318eb15c414c 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -131,7 +131,7 @@
#define CLK_AVS 109
#define CLK_HDMI 110
#define CLK_HDMI_DDC 111
-
+#define CLK_MBUS 112
#define CLK_DSI_DPHY 113
#define CLK_GPU 114
diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h
index bb5c2c999c05..df59aaf5bf34 100644
--- a/include/dt-bindings/clock/tegra114-car.h
+++ b/include/dt-bindings/clock/tegra114-car.h
@@ -228,6 +228,8 @@
#define TEGRA114_CLK_CLK_M 201
#define TEGRA114_CLK_CLK_M_DIV2 202
#define TEGRA114_CLK_CLK_M_DIV4 203
+#define TEGRA114_CLK_OSC_DIV2 202
+#define TEGRA114_CLK_OSC_DIV4 203
#define TEGRA114_CLK_PLL_REF 204
#define TEGRA114_CLK_PLL_C 205
#define TEGRA114_CLK_PLL_C_OUT1 206
@@ -274,7 +276,7 @@
#define TEGRA114_CLK_CLK_OUT_2 246
#define TEGRA114_CLK_CLK_OUT_3 247
#define TEGRA114_CLK_BLINK 248
-/* 249 */
+#define TEGRA114_CLK_OSC 249
/* 250 */
/* 251 */
#define TEGRA114_CLK_XUSB_HOST_SRC 252
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
index 0c4f5be0a742..2a9acd592bff 100644
--- a/include/dt-bindings/clock/tegra124-car-common.h
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -227,6 +227,8 @@
#define TEGRA124_CLK_CLK_M 201
#define TEGRA124_CLK_CLK_M_DIV2 202
#define TEGRA124_CLK_CLK_M_DIV4 203
+#define TEGRA124_CLK_OSC_DIV2 202
+#define TEGRA124_CLK_OSC_DIV4 203
#define TEGRA124_CLK_PLL_REF 204
#define TEGRA124_CLK_PLL_C 205
#define TEGRA124_CLK_PLL_C_OUT1 206
@@ -273,7 +275,7 @@
#define TEGRA124_CLK_CLK_OUT_2 246
#define TEGRA124_CLK_CLK_OUT_3 247
#define TEGRA124_CLK_BLINK 248
-/* 249 */
+#define TEGRA124_CLK_OSC 249
/* 250 */
/* 251 */
#define TEGRA124_CLK_XUSB_HOST_SRC 252
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 44f60623f99b..7a8f10b9a66d 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -262,6 +262,8 @@
#define TEGRA210_CLK_CLK_M 233
#define TEGRA210_CLK_CLK_M_DIV2 234
#define TEGRA210_CLK_CLK_M_DIV4 235
+#define TEGRA210_CLK_OSC_DIV2 234
+#define TEGRA210_CLK_OSC_DIV4 235
#define TEGRA210_CLK_PLL_REF 236
#define TEGRA210_CLK_PLL_C 237
#define TEGRA210_CLK_PLL_C_OUT1 238
@@ -355,7 +357,7 @@
#define TEGRA210_CLK_PLL_A_OUT_ADSP 323
#define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324
/* 325 */
-/* 326 */
+#define TEGRA210_CLK_OSC 326
/* 327 */
/* 328 */
/* 329 */
diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h
index 3c90f1535551..7b542c10fc27 100644
--- a/include/dt-bindings/clock/tegra30-car.h
+++ b/include/dt-bindings/clock/tegra30-car.h
@@ -196,6 +196,8 @@
#define TEGRA30_CLK_CLK_M 171
#define TEGRA30_CLK_CLK_M_DIV2 172
#define TEGRA30_CLK_CLK_M_DIV4 173
+#define TEGRA30_CLK_OSC_DIV2 172
+#define TEGRA30_CLK_OSC_DIV4 173
#define TEGRA30_CLK_PLL_REF 174
#define TEGRA30_CLK_PLL_C 175
#define TEGRA30_CLK_PLL_C_OUT1 176
@@ -243,7 +245,7 @@
#define TEGRA30_CLK_HCLK 217
#define TEGRA30_CLK_PCLK 218
/* 219 */
-/* 220 */
+#define TEGRA30_CLK_OSC 220
/* 221 */
/* 222 */
/* 223 */
diff --git a/include/dt-bindings/interconnect/qcom,osm-l3.h b/include/dt-bindings/interconnect/qcom,osm-l3.h
new file mode 100644
index 000000000000..54858ff7674d
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,osm-l3.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_OSM_L3_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_OSM_L3_H
+
+#define MASTER_OSM_L3_APPS 0
+#define SLAVE_OSM_L3 1
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc7180.h b/include/dt-bindings/interconnect/qcom,sc7180.h
new file mode 100644
index 000000000000..f9970f6032eb
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc7180.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm SC7180 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7180_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7180_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QSPI 1
+#define MASTER_QUP_0 2
+#define MASTER_SDCC_2 3
+#define MASTER_EMMC 4
+#define MASTER_UFS_MEM 5
+#define SLAVE_A1NOC_SNOC 6
+#define SLAVE_SERVICE_A1NOC 7
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_1 2
+#define MASTER_USB3 3
+#define MASTER_CRYPTO 4
+#define MASTER_IPA 5
+#define MASTER_QDSS_ETR 6
+#define SLAVE_A2NOC_SNOC 7
+#define SLAVE_SERVICE_A2NOC 8
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_HF1_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define SLAVE_CAMNOC_UNCOMP 3
+
+#define MASTER_NPU 0
+#define MASTER_NPU_PROC 1
+#define SLAVE_CDSP_GEM_NOC 2
+
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AHB2PHY_SOUTH 4
+#define SLAVE_AHB2PHY_CENTER 5
+#define SLAVE_AOP 6
+#define SLAVE_AOSS 7
+#define SLAVE_BOOT_ROM 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 10
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 11
+#define SLAVE_CLK_CTL 12
+#define SLAVE_RBCPR_CX_CFG 13
+#define SLAVE_RBCPR_MX_CFG 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_DCC_CFG 16
+#define SLAVE_CNOC_DDRSS 17
+#define SLAVE_DISPLAY_CFG 18
+#define SLAVE_DISPLAY_RT_THROTTLE_CFG 19
+#define SLAVE_DISPLAY_THROTTLE_CFG 20
+#define SLAVE_EMMC_CFG 21
+#define SLAVE_GLM 22
+#define SLAVE_GFX3D_CFG 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_CNOC_MNOC_CFG 26
+#define SLAVE_CNOC_MSS 27
+#define SLAVE_NPU_CFG 28
+#define SLAVE_NPU_DMA_BWMON_CFG 29
+#define SLAVE_NPU_PROC_BWMON_CFG 30
+#define SLAVE_PDM 31
+#define SLAVE_PIMEM_CFG 32
+#define SLAVE_PRNG 33
+#define SLAVE_QDSS_CFG 34
+#define SLAVE_QM_CFG 35
+#define SLAVE_QM_MPU_CFG 36
+#define SLAVE_QSPI_0 37
+#define SLAVE_QUP_0 38
+#define SLAVE_QUP_1 39
+#define SLAVE_SDCC_2 40
+#define SLAVE_SECURITY 41
+#define SLAVE_SNOC_CFG 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM_WEST 44
+#define SLAVE_TLMM_NORTH 45
+#define SLAVE_TLMM_SOUTH 46
+#define SLAVE_UFS_MEM_CFG 47
+#define SLAVE_USB3 48
+#define SLAVE_VENUS_CFG 49
+#define SLAVE_VENUS_THROTTLE_CFG 50
+#define SLAVE_VSENSE_CTRL_CFG 51
+#define SLAVE_SERVICE_CNOC 52
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_APPSS_PROC 0
+#define MASTER_SYS_TCU 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_SNOC_GC_MEM_NOC 6
+#define MASTER_SNOC_SF_MEM_NOC 7
+#define MASTER_GFX3D 8
+#define SLAVE_MSS_PROC_MS_MPU_CFG 9
+#define SLAVE_GEM_NOC_SNOC 10
+#define SLAVE_LLCC 11
+#define SLAVE_SERVICE_GEM_NOC 12
+
+#define MASTER_IPA_CORE 0
+#define SLAVE_IPA_CORE 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP0 4
+#define MASTER_ROTATOR 5
+#define MASTER_VIDEO_P0 6
+#define MASTER_VIDEO_PROC 7
+#define SLAVE_MNOC_HF_MEM_NOC 8
+#define SLAVE_MNOC_SF_MEM_NOC 9
+#define SLAVE_SERVICE_MNOC 10
+
+#define MASTER_NPU_SYS 0
+#define MASTER_NPU_NOC_CFG 1
+#define SLAVE_NPU_CAL_DP0 2
+#define SLAVE_NPU_CP 3
+#define SLAVE_NPU_INT_DMA_BWMON_CFG 4
+#define SLAVE_NPU_DPM 5
+#define SLAVE_ISENSE_CFG 6
+#define SLAVE_NPU_LLM_CFG 7
+#define SLAVE_NPU_TCM 8
+#define SLAVE_NPU_COMPUTE_NOC 9
+#define SLAVE_SERVICE_NPU_NOC 10
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_SNOC_CFG 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define SLAVE_APPSS 5
+#define SLAVE_SNOC_CNOC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_IMEM 9
+#define SLAVE_PIMEM 10
+#define SLAVE_SERVICE_SNOC 11
+#define SLAVE_QDSS_STM 12
+#define SLAVE_TCU 13
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h
index 7b2393be7361..290be38f40e6 100644
--- a/include/dt-bindings/interconnect/qcom,sdm845.h
+++ b/include/dt-bindings/interconnect/qcom,sdm845.h
@@ -10,134 +10,139 @@
#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H
#define MASTER_A1NOC_CFG 0
-#define MASTER_BLSP_1 1
-#define MASTER_TSIF 2
-#define MASTER_SDCC_2 3
-#define MASTER_SDCC_4 4
-#define MASTER_UFS_CARD 5
-#define MASTER_UFS_MEM 6
-#define MASTER_PCIE_0 7
-#define MASTER_A2NOC_CFG 8
-#define MASTER_QDSS_BAM 9
-#define MASTER_BLSP_2 10
-#define MASTER_CNOC_A2NOC 11
-#define MASTER_CRYPTO 12
-#define MASTER_IPA 13
-#define MASTER_PCIE_1 14
-#define MASTER_QDSS_ETR 15
-#define MASTER_USB3_0 16
-#define MASTER_USB3_1 17
-#define MASTER_CAMNOC_HF0_UNCOMP 18
-#define MASTER_CAMNOC_HF1_UNCOMP 19
-#define MASTER_CAMNOC_SF_UNCOMP 20
-#define MASTER_SPDM 21
-#define MASTER_TIC 22
-#define MASTER_SNOC_CNOC 23
-#define MASTER_QDSS_DAP 24
-#define MASTER_CNOC_DC_NOC 25
-#define MASTER_APPSS_PROC 26
-#define MASTER_GNOC_CFG 27
-#define MASTER_LLCC 28
-#define MASTER_TCU_0 29
-#define MASTER_MEM_NOC_CFG 30
-#define MASTER_GNOC_MEM_NOC 31
-#define MASTER_MNOC_HF_MEM_NOC 32
-#define MASTER_MNOC_SF_MEM_NOC 33
-#define MASTER_SNOC_GC_MEM_NOC 34
-#define MASTER_SNOC_SF_MEM_NOC 35
-#define MASTER_GFX3D 36
-#define MASTER_CNOC_MNOC_CFG 37
-#define MASTER_CAMNOC_HF0 38
-#define MASTER_CAMNOC_HF1 39
-#define MASTER_CAMNOC_SF 40
-#define MASTER_MDP0 41
-#define MASTER_MDP1 42
-#define MASTER_ROTATOR 43
-#define MASTER_VIDEO_P0 44
-#define MASTER_VIDEO_P1 45
-#define MASTER_VIDEO_PROC 46
-#define MASTER_SNOC_CFG 47
-#define MASTER_A1NOC_SNOC 48
-#define MASTER_A2NOC_SNOC 49
-#define MASTER_GNOC_SNOC 50
-#define MASTER_MEM_NOC_SNOC 51
-#define MASTER_ANOC_PCIE_SNOC 52
-#define MASTER_PIMEM 53
-#define MASTER_GIC 54
-#define SLAVE_A1NOC_SNOC 55
-#define SLAVE_SERVICE_A1NOC 56
-#define SLAVE_ANOC_PCIE_A1NOC_SNOC 57
-#define SLAVE_A2NOC_SNOC 58
-#define SLAVE_ANOC_PCIE_SNOC 59
-#define SLAVE_SERVICE_A2NOC 60
-#define SLAVE_CAMNOC_UNCOMP 61
-#define SLAVE_A1NOC_CFG 62
-#define SLAVE_A2NOC_CFG 63
-#define SLAVE_AOP 64
-#define SLAVE_AOSS 65
-#define SLAVE_CAMERA_CFG 66
-#define SLAVE_CLK_CTL 67
-#define SLAVE_CDSP_CFG 68
-#define SLAVE_RBCPR_CX_CFG 69
-#define SLAVE_CRYPTO_0_CFG 70
-#define SLAVE_DCC_CFG 71
-#define SLAVE_CNOC_DDRSS 72
-#define SLAVE_DISPLAY_CFG 73
-#define SLAVE_GLM 74
-#define SLAVE_GFX3D_CFG 75
-#define SLAVE_IMEM_CFG 76
-#define SLAVE_IPA_CFG 77
-#define SLAVE_CNOC_MNOC_CFG 78
-#define SLAVE_PCIE_0_CFG 79
-#define SLAVE_PCIE_1_CFG 80
-#define SLAVE_PDM 81
-#define SLAVE_SOUTH_PHY_CFG 82
-#define SLAVE_PIMEM_CFG 83
-#define SLAVE_PRNG 84
-#define SLAVE_QDSS_CFG 85
-#define SLAVE_BLSP_2 86
-#define SLAVE_BLSP_1 87
-#define SLAVE_SDCC_2 88
-#define SLAVE_SDCC_4 89
-#define SLAVE_SNOC_CFG 90
-#define SLAVE_SPDM_WRAPPER 91
-#define SLAVE_SPSS_CFG 92
-#define SLAVE_TCSR 93
-#define SLAVE_TLMM_NORTH 94
-#define SLAVE_TLMM_SOUTH 95
-#define SLAVE_TSIF 96
-#define SLAVE_UFS_CARD_CFG 97
-#define SLAVE_UFS_MEM_CFG 98
-#define SLAVE_USB3_0 99
-#define SLAVE_USB3_1 100
-#define SLAVE_VENUS_CFG 101
-#define SLAVE_VSENSE_CTRL_CFG 102
-#define SLAVE_CNOC_A2NOC 103
-#define SLAVE_SERVICE_CNOC 104
-#define SLAVE_LLCC_CFG 105
-#define SLAVE_MEM_NOC_CFG 106
-#define SLAVE_GNOC_SNOC 107
-#define SLAVE_GNOC_MEM_NOC 108
-#define SLAVE_SERVICE_GNOC 109
-#define SLAVE_EBI1 110
-#define SLAVE_MSS_PROC_MS_MPU_CFG 111
-#define SLAVE_MEM_NOC_GNOC 112
-#define SLAVE_LLCC 113
-#define SLAVE_MEM_NOC_SNOC 114
-#define SLAVE_SERVICE_MEM_NOC 115
-#define SLAVE_MNOC_SF_MEM_NOC 116
-#define SLAVE_MNOC_HF_MEM_NOC 117
-#define SLAVE_SERVICE_MNOC 118
-#define SLAVE_APPSS 119
-#define SLAVE_SNOC_CNOC 120
-#define SLAVE_SNOC_MEM_NOC_GC 121
-#define SLAVE_SNOC_MEM_NOC_SF 122
-#define SLAVE_IMEM 123
-#define SLAVE_PCIE_0 124
-#define SLAVE_PCIE_1 125
-#define SLAVE_PIMEM 126
-#define SLAVE_SERVICE_SNOC 127
-#define SLAVE_QDSS_STM 128
-#define SLAVE_TCU 129
+#define MASTER_TSIF 1
+#define MASTER_SDCC_2 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_CARD 4
+#define MASTER_UFS_MEM 5
+#define MASTER_PCIE_0 6
+#define SLAVE_A1NOC_SNOC 7
+#define SLAVE_SERVICE_A1NOC 8
+#define SLAVE_ANOC_PCIE_A1NOC_SNOC 9
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_CNOC_A2NOC 2
+#define MASTER_CRYPTO 3
+#define MASTER_IPA 4
+#define MASTER_PCIE_1 5
+#define MASTER_QDSS_ETR 6
+#define MASTER_USB3_0 7
+#define MASTER_USB3_1 8
+#define SLAVE_A2NOC_SNOC 9
+#define SLAVE_ANOC_PCIE_SNOC 10
+#define SLAVE_SERVICE_A2NOC 11
+
+#define MASTER_SPDM 0
+#define MASTER_TIC 1
+#define MASTER_SNOC_CNOC 2
+#define MASTER_QDSS_DAP 3
+#define SLAVE_A1NOC_CFG 4
+#define SLAVE_A2NOC_CFG 5
+#define SLAVE_AOP 6
+#define SLAVE_AOSS 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CLK_CTL 9
+#define SLAVE_CDSP_CFG 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_DCC_CFG 13
+#define SLAVE_CNOC_DDRSS 14
+#define SLAVE_DISPLAY_CFG 15
+#define SLAVE_GLM 16
+#define SLAVE_GFX3D_CFG 17
+#define SLAVE_IMEM_CFG 18
+#define SLAVE_IPA_CFG 19
+#define SLAVE_CNOC_MNOC_CFG 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_SOUTH_PHY_CFG 24
+#define SLAVE_PIMEM_CFG 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_BLSP_2 28
+#define SLAVE_BLSP_1 29
+#define SLAVE_SDCC_2 30
+#define SLAVE_SDCC_4 31
+#define SLAVE_SNOC_CFG 32
+#define SLAVE_SPDM_WRAPPER 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM_NORTH 36
+#define SLAVE_TLMM_SOUTH 37
+#define SLAVE_TSIF 38
+#define SLAVE_UFS_CARD_CFG 39
+#define SLAVE_UFS_MEM_CFG 40
+#define SLAVE_USB3_0 41
+#define SLAVE_USB3_1 42
+#define SLAVE_VENUS_CFG 43
+#define SLAVE_VSENSE_CTRL_CFG 44
+#define SLAVE_CNOC_A2NOC 45
+#define SLAVE_SERVICE_CNOC 46
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_MEM_NOC_CFG 2
+
+#define MASTER_APPSS_PROC 0
+#define MASTER_GNOC_CFG 1
+#define SLAVE_GNOC_SNOC 2
+#define SLAVE_GNOC_MEM_NOC 3
+#define SLAVE_SERVICE_GNOC 4
+
+#define MASTER_TCU_0 0
+#define MASTER_MEM_NOC_CFG 1
+#define MASTER_GNOC_MEM_NOC 2
+#define MASTER_MNOC_HF_MEM_NOC 3
+#define MASTER_MNOC_SF_MEM_NOC 4
+#define MASTER_SNOC_GC_MEM_NOC 5
+#define MASTER_SNOC_SF_MEM_NOC 6
+#define MASTER_GFX3D 7
+#define SLAVE_MSS_PROC_MS_MPU_CFG 8
+#define SLAVE_MEM_NOC_GNOC 9
+#define SLAVE_LLCC 10
+#define SLAVE_MEM_NOC_SNOC 11
+#define SLAVE_SERVICE_MEM_NOC 12
+#define MASTER_LLCC 13
+#define SLAVE_EBI1 14
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP0 4
+#define MASTER_MDP1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+#define MASTER_CAMNOC_HF0_UNCOMP 13
+#define MASTER_CAMNOC_HF1_UNCOMP 14
+#define MASTER_CAMNOC_SF_UNCOMP 15
+#define SLAVE_CAMNOC_UNCOMP 16
+
+#define MASTER_SNOC_CFG 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GNOC_SNOC 3
+#define MASTER_MEM_NOC_SNOC 4
+#define MASTER_ANOC_PCIE_SNOC 5
+#define MASTER_PIMEM 6
+#define MASTER_GIC 7
+#define SLAVE_APPSS 8
+#define SLAVE_SNOC_CNOC 9
+#define SLAVE_SNOC_MEM_NOC_GC 10
+#define SLAVE_SNOC_MEM_NOC_SF 11
+#define SLAVE_IMEM 12
+#define SLAVE_PCIE_0 13
+#define SLAVE_PCIE_1 14
+#define SLAVE_PIMEM 15
+#define SLAVE_SERVICE_SNOC 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
#endif
diff --git a/include/dt-bindings/power/meson-a1-power.h b/include/dt-bindings/power/meson-a1-power.h
new file mode 100644
index 000000000000..6cf50bfb8ccf
--- /dev/null
+++ b/include/dt-bindings/power/meson-a1-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2019 Amlogic, Inc.
+ * Author: Jianxin Pan <jianxin.pan@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_A1_POWER_H
+#define _DT_BINDINGS_MESON_A1_POWER_H
+
+#define PWRC_DSPA_ID 8
+#define PWRC_DSPB_ID 9
+#define PWRC_UART_ID 10
+#define PWRC_DMC_ID 11
+#define PWRC_I2C_ID 12
+#define PWRC_PSRAM_ID 13
+#define PWRC_ACODEC_ID 14
+#define PWRC_AUDIO_ID 15
+#define PWRC_OTP_ID 16
+#define PWRC_DMA_ID 17
+#define PWRC_SD_EMMC_ID 18
+#define PWRC_RAMA_ID 19
+#define PWRC_RAMB_ID 20
+#define PWRC_IR_ID 21
+#define PWRC_SPICC_ID 22
+#define PWRC_SPIFC_ID 23
+#define PWRC_USB_ID 24
+#define PWRC_NIC_ID 25
+#define PWRC_PDMIN_ID 26
+#define PWRC_RSA_ID 27
+#define PWRC_MAX_ID 28
+
+#endif
diff --git a/include/dt-bindings/soc/tegra-pmc.h b/include/dt-bindings/soc/tegra-pmc.h
new file mode 100644
index 000000000000..a99a457471ee
--- /dev/null
+++ b/include/dt-bindings/soc/tegra-pmc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_SOC_TEGRA_PMC_H
+#define _DT_BINDINGS_SOC_TEGRA_PMC_H
+
+#define TEGRA_PMC_CLK_OUT_1 0
+#define TEGRA_PMC_CLK_OUT_2 1
+#define TEGRA_PMC_CLK_OUT_3 2
+#define TEGRA_PMC_CLK_BLINK 3
+
+#define TEGRA_PMC_CLK_MAX 4
+
+#endif /* _DT_BINDINGS_SOC_TEGRA_PMC_H */
diff --git a/include/dt-bindings/sound/meson-aiu.h b/include/dt-bindings/sound/meson-aiu.h
new file mode 100644
index 000000000000..1051b8af298b
--- /dev/null
+++ b/include/dt-bindings/sound/meson-aiu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_MESON_AIU_H
+#define __DT_MESON_AIU_H
+
+#define AIU_CPU 0
+#define AIU_HDMI 1
+#define AIU_ACODEC 2
+
+#define CPU_I2S_FIFO 0
+#define CPU_SPDIF_FIFO 1
+#define CPU_I2S_ENCODER 2
+#define CPU_SPDIF_ENCODER 3
+
+#define CTRL_I2S 0
+#define CTRL_PCM 1
+#define CTRL_OUT 2
+
+#endif /* __DT_MESON_AIU_H */
diff --git a/include/dt-bindings/sound/meson-g12a-toacodec.h b/include/dt-bindings/sound/meson-g12a-toacodec.h
new file mode 100644
index 000000000000..69d7a75592a2
--- /dev/null
+++ b/include/dt-bindings/sound/meson-g12a-toacodec.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_MESON_G12A_TOACODEC_H
+#define __DT_MESON_G12A_TOACODEC_H
+
+#define TOACODEC_IN_A 0
+#define TOACODEC_IN_B 1
+#define TOACODEC_IN_C 2
+#define TOACODEC_OUT 3
+
+#endif /* __DT_MESON_G12A_TOACODEC_H */
diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h
index f6a7ba4dccd4..3fee04f81439 100644
--- a/include/keys/big_key-type.h
+++ b/include/keys/big_key-type.h
@@ -17,6 +17,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep);
extern void big_key_revoke(struct key *key);
extern void big_key_destroy(struct key *key);
extern void big_key_describe(const struct key *big_key, struct seq_file *m);
-extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen);
+extern long big_key_read(const struct key *key, char *buffer, size_t buflen);
#endif /* _KEYS_BIG_KEY_TYPE_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index d5e73266a81a..be61fcddc02a 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -41,8 +41,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep);
extern void user_revoke(struct key *key);
extern void user_destroy(struct key *key);
extern void user_describe(const struct key *user, struct seq_file *m);
-extern long user_read(const struct key *key,
- char __user *buffer, size_t buflen);
+extern long user_read(const struct key *key, char *buffer, size_t buflen);
static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key)
{
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 2dfb550c6723..9b0c46a6ca1f 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -81,6 +81,17 @@ struct kunit_resource {
struct kunit;
+/* Size of log associated with test. */
+#define KUNIT_LOG_SIZE 512
+
+/*
+ * TAP specifies subtest stream indentation of 4 spaces, 8 spaces for a
+ * sub-subtest. See the "Subtests" section in
+ * https://node-tap.org/tap-protocol/
+ */
+#define KUNIT_SUBTEST_INDENT " "
+#define KUNIT_SUBSUBTEST_INDENT " "
+
/**
* struct kunit_case - represents an individual test case.
*
@@ -123,8 +134,14 @@ struct kunit_case {
/* private: internal use only. */
bool success;
+ char *log;
};
+static inline char *kunit_status_to_string(bool status)
+{
+ return status ? "ok" : "not ok";
+}
+
/**
* KUNIT_CASE - A helper for creating a &struct kunit_case
*
@@ -157,6 +174,10 @@ struct kunit_suite {
int (*init)(struct kunit *test);
void (*exit)(struct kunit *test);
struct kunit_case *test_cases;
+
+ /* private - internal use only */
+ struct dentry *debugfs;
+ char *log;
};
/**
@@ -175,6 +196,7 @@ struct kunit {
/* private: internal use only. */
const char *name; /* Read only after initialization! */
+ char *log; /* Points at case log after initialization */
struct kunit_try_catch try_catch;
/*
* success starts as true, and may only be set to false during a
@@ -193,10 +215,19 @@ struct kunit {
struct list_head resources; /* Protected by lock. */
};
-void kunit_init_test(struct kunit *test, const char *name);
+void kunit_init_test(struct kunit *test, const char *name, char *log);
int kunit_run_tests(struct kunit_suite *suite);
+size_t kunit_suite_num_test_cases(struct kunit_suite *suite);
+
+unsigned int kunit_test_case_num(struct kunit_suite *suite,
+ struct kunit_case *test_case);
+
+int __kunit_test_suites_init(struct kunit_suite **suites);
+
+void __kunit_test_suites_exit(struct kunit_suite **suites);
+
/**
* kunit_test_suites() - used to register one or more &struct kunit_suite
* with KUnit.
@@ -226,20 +257,22 @@ int kunit_run_tests(struct kunit_suite *suite);
static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \
static int kunit_test_suites_init(void) \
{ \
- unsigned int i; \
- for (i = 0; suites[i] != NULL; i++) \
- kunit_run_tests(suites[i]); \
- return 0; \
+ return __kunit_test_suites_init(suites); \
} \
late_initcall(kunit_test_suites_init); \
static void __exit kunit_test_suites_exit(void) \
{ \
- return; \
+ return __kunit_test_suites_exit(suites); \
} \
module_exit(kunit_test_suites_exit)
#define kunit_test_suite(suite) kunit_test_suites(&suite)
+#define kunit_suite_for_each_test_case(suite, test_case) \
+ for (test_case = suite->test_cases; test_case->run_case; test_case++)
+
+bool kunit_suite_has_succeeded(struct kunit_suite *suite);
+
/*
* Like kunit_alloc_resource() below, but returns the struct kunit_resource
* object that contains the allocation. This is mostly for testing purposes.
@@ -356,8 +389,22 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
void kunit_cleanup(struct kunit *test);
-#define kunit_printk(lvl, test, fmt, ...) \
- printk(lvl "\t# %s: " fmt, (test)->name, ##__VA_ARGS__)
+void kunit_log_append(char *log, const char *fmt, ...);
+
+/*
+ * printk and log to per-test or per-suite log buffer. Logging only done
+ * if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used.
+ */
+#define kunit_log(lvl, test_or_suite, fmt, ...) \
+ do { \
+ printk(lvl fmt, ##__VA_ARGS__); \
+ kunit_log_append((test_or_suite)->log, fmt "\n", \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define kunit_printk(lvl, test, fmt, ...) \
+ kunit_log(lvl, test, KUNIT_SUBTEST_INDENT "# %s: " fmt, \
+ (test)->name, ##__VA_ARGS__)
/**
* kunit_info() - Prints an INFO level message associated with @test.
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 63457908c9c4..69f4164d6477 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -231,6 +231,9 @@ struct vgic_dist {
/* distributor enabled */
bool enabled;
+ /* Wants SGIs without active state */
+ bool nassgireq;
+
struct vgic_irq *spis;
struct vgic_io_device dist_iodev;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0f24d701fbdc..9f70b7807d53 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -488,6 +488,11 @@ void __init acpi_nvs_nosave_s3(void);
void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
+int acpi_register_wakeup_handler(
+ int wake_irq, bool (*wakeup)(void *context), void *context);
+void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context);
+
struct acpi_osc_context {
char *uuid_str; /* UUID string */
int rev;
@@ -530,8 +535,9 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
#define OSC_PCI_MSI_SUPPORT 0x00000010
+#define OSC_PCI_EDR_SUPPORT 0x00000080
#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
-#define OSC_PCI_SUPPORT_MASKS 0x0000011f
+#define OSC_PCI_SUPPORT_MASKS 0x0000019f
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
@@ -540,7 +546,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
-#define OSC_PCI_CONTROL_MASKS 0x0000003f
+#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080
+#define OSC_PCI_CONTROL_MASKS 0x000000bf
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
diff --git a/include/linux/aer.h b/include/linux/aer.h
index fa19e01f418a..97f64ba1b34a 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -44,8 +44,7 @@ struct aer_capability_regs {
/* PCIe port driver needs this function to enable AER */
int pci_enable_pcie_error_reporting(struct pci_dev *dev);
int pci_disable_pcie_error_reporting(struct pci_dev *dev);
-int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
-int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
+int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
void pci_save_aer_state(struct pci_dev *dev);
void pci_restore_aer_state(struct pci_dev *dev);
#else
@@ -57,11 +56,7 @@ static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
return -EINVAL;
}
-static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index b40fc633f3be..a345d9fed3d8 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -44,7 +44,13 @@ struct linux_binprm {
* exec has happened. Used to sanitize execution environment
* and to set AT_SECURE auxv for glibc.
*/
- secureexec:1;
+ secureexec:1,
+ /*
+ * Set by flush_old_exec, when exec_mmap has been called.
+ * This is past the point of no return, when the
+ * exec_update_mutex has been taken.
+ */
+ called_exec_mmap:1;
#ifdef __alpha__
unsigned int taso:1;
#endif
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e52ceb1a73d3..99058eb81042 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -50,7 +50,13 @@
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
- * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
+ * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region
+ * bitmap_next_set_region(map, &start, &end, nbits) Find next set region
+ * bitmap_for_each_clear_region(map, rs, re, start, end)
+ * Iterate over all clear regions
+ * bitmap_for_each_set_region(map, rs, re, start, end)
+ * Iterate over all set regions
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 47f54b459c26..9acf654f0b19 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -162,7 +162,7 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
*
* This is safe to use for 16- and 8-bit types as well.
*/
-static inline __s32 sign_extend32(__u32 value, int index)
+static __always_inline __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
@@ -173,7 +173,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
-static inline __s64 sign_extend64(__u64 value, int index)
+static __always_inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
diff --git a/include/linux/bits.h b/include/linux/bits.h
index a740bbcf3cd2..4671fbf28842 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -18,12 +18,30 @@
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
-#define GENMASK(h, l) \
+#if !defined(__ASSEMBLY__) && \
+ (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#include <linux/build_bug.h>
+#define GENMASK_INPUT_CHECK(h, l) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+ __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+#else
+/*
+ * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+ * disable the input check if that is the case.
+ */
+#define GENMASK_INPUT_CHECK(h, l) 0
+#endif
+
+#define __GENMASK(h, l) \
(((~UL(0)) - (UL(1) << (l)) + 1) & \
(~UL(0) >> (BITS_PER_LONG - 1 - (h))))
+#define GENMASK(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-#define GENMASK_ULL(h, l) \
+#define __GENMASK_ULL(h, l) \
(((~ULL(0)) - (ULL(1) << (l)) + 1) & \
(~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define GENMASK_ULL(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index d11e183fcb54..9903088891fa 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -216,7 +216,8 @@ static inline int __init xbc_node_compose_key(struct xbc_node *node,
}
/* XBC node initializer */
-int __init xbc_init(char *buf);
+int __init xbc_init(char *buf, const char **emsg, int *epos);
+
/* XBC cleanup data structures */
void __init xbc_destroy_all(void);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 63097cb243cb..52661155f85f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -94,6 +94,11 @@ enum {
* Enable legacy local memory.events.
*/
CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),
+
+ /*
+ * Enable recursive subtree protection
+ */
+ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6),
};
/* cftype->flags */
@@ -628,8 +633,9 @@ struct cgroup_subsys {
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
void (*post_attach)(void);
- int (*can_fork)(struct task_struct *task);
- void (*cancel_fork)(struct task_struct *task);
+ int (*can_fork)(struct task_struct *task,
+ struct css_set *cset);
+ void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task);
void (*release)(struct task_struct *task);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e75d2191226b..4598e4da6b1b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -27,6 +27,8 @@
#include <linux/cgroup-defs.h>
+struct kernel_clone_args;
+
#ifdef CONFIG_CGROUPS
/*
@@ -58,9 +60,6 @@ struct css_task_iter {
struct list_head *tcset_head;
struct list_head *task_pos;
- struct list_head *tasks_head;
- struct list_head *mg_tasks_head;
- struct list_head *dying_tasks_head;
struct list_head *cur_tasks_head;
struct css_set *cur_cset;
@@ -122,9 +121,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
void cgroup_fork(struct task_struct *p);
-extern int cgroup_can_fork(struct task_struct *p);
-extern void cgroup_cancel_fork(struct task_struct *p);
-extern void cgroup_post_fork(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+extern void cgroup_cancel_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+extern void cgroup_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
void cgroup_exit(struct task_struct *p);
void cgroup_release(struct task_struct *p);
void cgroup_free(struct task_struct *p);
@@ -708,9 +710,12 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) { return -EINVAL; }
static inline void cgroup_fork(struct task_struct *p) {}
-static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
-static inline void cgroup_cancel_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) { return 0; }
+static inline void cgroup_cancel_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) {}
static inline void cgroup_exit(struct task_struct *p) {}
static inline void cgroup_release(struct task_struct *p) {}
static inline void cgroup_free(struct task_struct *p) {}
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 390437887b46..49a53a137610 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -12,6 +12,9 @@
#ifndef AT91_PMC_H
#define AT91_PMC_H
+#define AT91_PMC_V1 (1) /* PMC version 1 */
+#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */
+
#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
@@ -30,16 +33,34 @@
#define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */
#define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */
+#define AT91_PMC_PLL_CTRL0 0x0C /* PLL Control Register 0 [for SAM9X60] */
+#define AT91_PMC_PLL_CTRL0_ENPLL (1 << 28) /* Enable PLL */
+#define AT91_PMC_PLL_CTRL0_ENPLLCK (1 << 29) /* Enable PLL clock for PMC */
+#define AT91_PMC_PLL_CTRL0_ENLOCK (1 << 31) /* Enable PLL lock */
+
+#define AT91_PMC_PLL_CTRL1 0x10 /* PLL Control Register 1 [for SAM9X60] */
+
#define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */
#define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */
#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
+#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */
+#define AT91_PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL /* Default PLL ACR value for UPLL */
+#define AT91_PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL /* Default PLL ACR value for PLLA */
+#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */
+#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */
+
#define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */
#define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */
#define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */
#define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */
#define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */
+#define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */
+#define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */
+#define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */
+#define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */
+
#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */
#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */
#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */
@@ -180,6 +201,8 @@
#define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */
#define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */
+#define AT91_PMC_PLL_ISR0 0xEC /* PLL Interrupt Status Register 0 [SAM9X60 only] */
+
#define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/
#define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */
#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5e88e7e33abe..034b0a644efc 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -347,7 +347,7 @@ static inline void *offset_to_ptr(const int *off)
* compiler has support to do so.
*/
#define compiletime_assert(condition, msg) \
- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 72393a8c1a6c..e970f97a7fcb 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -129,22 +129,13 @@ struct ftrace_likely_data {
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
/*
- * Force always-inline if the user requests it so via the .config.
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
* semantics rather than c99. This prevents multiple symbol definition errors
* of extern inline functions at link time.
* A lot of inline functions can cause havoc with function tracing.
- * Do not use __always_inline here, since currently it expands to inline again
- * (which would break users of __always_inline).
*/
-#if !defined(CONFIG_OPTIMIZE_INLINING)
-#define inline inline __attribute__((__always_inline__)) __gnu_inline \
- __inline_maybe_unused notrace
-#else
-#define inline inline __gnu_inline \
- __inline_maybe_unused notrace
-#endif
+#define inline inline __gnu_inline __inline_maybe_unused notrace
/*
* gcc provides both __inline__ and __inline as alternate spellings of
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 44e552de419c..193cc9dbf448 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -41,6 +41,7 @@ enum coresight_dev_type {
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
CORESIGHT_DEV_TYPE_HELPER,
+ CORESIGHT_DEV_TYPE_ECT,
};
enum coresight_dev_subtype_sink {
@@ -68,6 +69,12 @@ enum coresight_dev_subtype_helper {
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
};
+/* Embedded Cross Trigger (ECT) sub-types */
+enum coresight_dev_subtype_ect {
+ CORESIGHT_DEV_SUBTYPE_ECT_NONE,
+ CORESIGHT_DEV_SUBTYPE_ECT_CTI,
+};
+
/**
* union coresight_dev_subtype - further characterisation of a type
* @sink_subtype: type of sink this component is, as defined
@@ -78,6 +85,8 @@ enum coresight_dev_subtype_helper {
* by @coresight_dev_subtype_source.
* @helper_subtype: type of helper this component is, as defined
* by @coresight_dev_subtype_helper.
+ * @ect_subtype: type of cross trigger this component is, as
+ * defined by @coresight_dev_subtype_ect
*/
union coresight_dev_subtype {
/* We have some devices which acts as LINK and SINK */
@@ -87,6 +96,7 @@ union coresight_dev_subtype {
};
enum coresight_dev_subtype_source source_subtype;
enum coresight_dev_subtype_helper helper_subtype;
+ enum coresight_dev_subtype_ect ect_subtype;
};
/**
@@ -153,6 +163,8 @@ struct coresight_connection {
* activated but not yet enabled. Enabling for a _sink_
* appens when a source has been selected for that it.
* @ea: Device attribute for sink representation under PMU directory.
+ * @ect_dev: Associated cross trigger device. Not part of the trace data
+ * path or connections.
*/
struct coresight_device {
struct coresight_platform_data *pdata;
@@ -166,6 +178,8 @@ struct coresight_device {
/* sink specific fields */
bool activated; /* true only if a sink is part of a path */
struct dev_ext_attribute *ea;
+ /* cross trigger handling */
+ struct coresight_device *ect_dev;
};
/*
@@ -196,6 +210,7 @@ static struct coresight_dev_list (var) = { \
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
#define helper_ops(csdev) csdev->ops->helper_ops
+#define ect_ops(csdev) csdev->ops->ect_ops
/**
* struct coresight_ops_sink - basic operations for a sink
@@ -262,11 +277,23 @@ struct coresight_ops_helper {
int (*disable)(struct coresight_device *csdev, void *data);
};
+/**
+ * struct coresight_ops_ect - Ops for an embedded cross trigger device
+ *
+ * @enable : Enable the device
+ * @disable : Disable the device
+ */
+struct coresight_ops_ect {
+ int (*enable)(struct coresight_device *csdev);
+ int (*disable)(struct coresight_device *csdev);
+};
+
struct coresight_ops {
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
const struct coresight_ops_helper *helper_ops;
+ const struct coresight_ops_ect *ect_ops;
};
#ifdef CONFIG_CORESIGHT
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index d672b7db0cfc..a274d95fa66e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -35,6 +35,7 @@ struct debugfs_regset32 {
const struct debugfs_reg32 *regs;
int nregs;
void __iomem *base;
+ struct device *dev; /* Optional device for Runtime PM */
};
extern struct dentry *arch_debugfs_dir;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index abf5459a5b9d..1ade486fc2bb 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -43,18 +43,6 @@ struct dma_buf_ops {
bool cache_sgt_mapping;
/**
- * @dynamic_mapping:
- *
- * If true the framework makes sure that the map/unmap_dma_buf
- * callbacks are always called with the dma_resv object locked.
- *
- * If false the framework makes sure that the map/unmap_dma_buf
- * callbacks are always called without the dma_resv object locked.
- * Mutual exclusive with @cache_sgt_mapping.
- */
- bool dynamic_mapping;
-
- /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -94,13 +82,42 @@ struct dma_buf_ops {
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
/**
+ * @pin:
+ *
+ * This is called by dma_buf_pin and lets the exporter know that the
+ * DMA-buf can't be moved any more.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional and should only be used in limited use
+ * cases like scanout and not for temporary pin operations.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+ int (*pin)(struct dma_buf_attachment *attach);
+
+ /**
+ * @unpin:
+ *
+ * This is called by dma_buf_unpin and lets the exporter know that the
+ * DMA-buf can be moved again.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct dma_buf_attachment *attach);
+
+ /**
* @map_dma_buf:
*
* This is called by dma_buf_map_attachment() and is used to map a
* shared &dma_buf into device address space, and it is mandatory. It
- * can only be called if @attach has been called successfully. This
- * essentially pins the DMA buffer into place, and it cannot be moved
- * any more
+ * can only be called if @attach has been called successfully.
*
* This call may sleep, e.g. when the backing storage first needs to be
* allocated, or moved to a location suitable for all currently attached
@@ -141,9 +158,8 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * It should also unpin the backing storage if this is the last mapping
- * of the DMA buffer, it the exporter supports backing storage
- * migration.
+ * For static dma_buf handling this might also unpins the backing
+ * storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
struct sg_table *,
@@ -312,6 +328,34 @@ struct dma_buf {
};
/**
+ * struct dma_buf_attach_ops - importer operations for an attachment
+ * @move_notify: [optional] notification that the DMA-buf is moving
+ *
+ * Attachment operations implemented by the importer.
+ */
+struct dma_buf_attach_ops {
+ /**
+ * @move_notify
+ *
+ * If this callback is provided the framework can avoid pinning the
+ * backing store while mappings exists.
+ *
+ * This callback is called with the lock of the reservation object
+ * associated with the dma_buf held and the mapping function must be
+ * called with this lock held as well. This makes sure that no mapping
+ * is created concurrently with an ongoing move operation.
+ *
+ * Mappings stay valid and are not directly affected by this callback.
+ * But the DMA-buf can now be in a different physical location, so all
+ * mappings should be destroyed and re-created as soon as possible.
+ *
+ * New mappings can be created after this callback returns, and will
+ * point to the new location of the DMA-buf.
+ */
+ void (*move_notify)(struct dma_buf_attachment *attach);
+};
+
+/**
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
@@ -319,8 +363,9 @@ struct dma_buf {
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
- * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
- * dma_resv lock held.
+ * @importer_ops: importer operations for this attachment, if provided
+ * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
+ * @importer_priv: importer specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -337,7 +382,8 @@ struct dma_buf_attachment {
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
- bool dynamic_mapping;
+ const struct dma_buf_attach_ops *importer_ops;
+ void *importer_priv;
void *priv;
};
@@ -399,7 +445,7 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
*/
static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
{
- return dmabuf->ops->dynamic_mapping;
+ return !!dmabuf->ops->pin;
}
/**
@@ -413,16 +459,19 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
static inline bool
dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
{
- return attach->dynamic_mapping;
+ return !!attach->importer_ops;
}
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
- bool dynamic_mapping);
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv);
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach);
+int dma_buf_pin(struct dma_buf_attachment *attach);
+void dma_buf_unpin(struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index ca9b5770caee..b59f1b6be3e9 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -108,7 +108,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
}
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
-void *uncached_kernel_address(void *addr);
-void *cached_kernel_address(void *addr);
+void *arch_dma_set_uncached(void *addr, size_t size);
+void arch_dma_clear_uncached(void *addr, size_t size);
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 64461fc64e1b..21065c04c4ac 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -300,6 +300,8 @@ struct dma_router {
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
* @name: backlink name for sysfs
+ * @dbg_client_name: slave name for debugfs in format:
+ * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
@@ -318,6 +320,9 @@ struct dma_chan {
int chan_id;
struct dma_chan_dev *dev;
const char *name;
+#ifdef CONFIG_DEBUG_FS
+ char *dbg_client_name;
+#endif
struct list_head device_node;
struct dma_chan_percpu __percpu *local;
@@ -618,10 +623,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
{
- if (tx->unmap) {
- dmaengine_unmap_put(tx->unmap);
- tx->unmap = NULL;
- }
+ if (!tx->unmap)
+ return;
+
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
}
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
@@ -805,7 +811,9 @@ struct dma_filter {
* called and there are no further references to this structure. This
* must be implemented to free resources however many existing drivers
* do not and are therefore not safe to unbind while in use.
- *
+ * @dbg_summary_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra,
+ * controller specific information.
*/
struct dma_device {
struct kref ref;
@@ -891,6 +899,11 @@ struct dma_device {
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
void (*device_release)(struct dma_device *dev);
+ /* debugfs support */
+#ifdef CONFIG_DEBUG_FS
+ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
+ struct dentry *dbg_dev_root;
+#endif
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -1155,14 +1168,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
static inline bool dmaengine_check_align(enum dmaengine_alignment align,
size_t off1, size_t off2, size_t len)
{
- size_t mask;
-
- if (!align)
- return true;
- mask = (1 << align) - 1;
- if (mask & (off1 | off2 | len))
- return false;
- return true;
+ return !(((1 << align) - 1) & (off1 | off2 | len));
}
static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
@@ -1236,9 +1242,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
{
if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
return dma_dev_to_maxpq(dma);
- else if (dmaf_p_disabled_continue(flags))
+ if (dmaf_p_disabled_continue(flags))
return dma_dev_to_maxpq(dma) - 1;
- else if (dmaf_continue(flags))
+ if (dmaf_continue(flags))
return dma_dev_to_maxpq(dma) - 3;
BUG();
}
@@ -1249,7 +1255,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
if (inc) {
if (dir_icg)
return dir_icg;
- else if (sgl)
+ if (sgl)
return icg;
}
@@ -1415,11 +1421,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
static inline void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{
- if (st) {
- st->last = last;
- st->used = used;
- st->residue = residue;
- }
+ if (!st)
+ return;
+
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
}
#ifdef CONFIG_DMA_ENGINE
@@ -1496,12 +1503,11 @@ static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
if (ret)
return ret;
- if (caps.descriptor_reuse) {
- tx->flags |= DMA_CTRL_REUSE;
- return 0;
- } else {
+ if (!caps.descriptor_reuse)
return -EPERM;
- }
+
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
}
static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
@@ -1517,10 +1523,10 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
{
/* this is supported for reusable desc, so check that */
- if (dmaengine_desc_test_reuse(desc))
- return desc->desc_free(desc);
- else
+ if (!dmaengine_desc_test_reuse(desc))
return -EPERM;
+
+ return desc->desc_free(desc);
}
/* --- DMA device --- */
@@ -1566,9 +1572,7 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir)
case DMA_DEV_TO_DEV:
return "DEV_TO_DEV";
default:
- break;
+ return "invalid";
}
-
- return "invalid";
}
#endif /* DMAENGINE_H */
diff --git a/include/linux/err.h b/include/linux/err.h
index 87be24350e91..a139c64aef2a 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -62,9 +62,6 @@ static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
return 0;
}
-/* Deprecated */
-#define PTR_RET(p) PTR_ERR_OR_ZERO(p)
-
#endif
#endif /* _LINUX_ERR_H */
diff --git a/include/linux/extcon-provider.h b/include/linux/extcon-provider.h
index 1c143d200caa..fa70945f4e6b 100644
--- a/include/linux/extcon-provider.h
+++ b/include/linux/extcon-provider.h
@@ -17,30 +17,30 @@ struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
/* Following APIs register/unregister the extcon device. */
-extern int extcon_dev_register(struct extcon_dev *edev);
-extern void extcon_dev_unregister(struct extcon_dev *edev);
-extern int devm_extcon_dev_register(struct device *dev,
+int extcon_dev_register(struct extcon_dev *edev);
+void extcon_dev_unregister(struct extcon_dev *edev);
+int devm_extcon_dev_register(struct device *dev,
struct extcon_dev *edev);
-extern void devm_extcon_dev_unregister(struct device *dev,
+void devm_extcon_dev_unregister(struct device *dev,
struct extcon_dev *edev);
/* Following APIs allocate/free the memory of the extcon device. */
-extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
-extern void extcon_dev_free(struct extcon_dev *edev);
-extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
+void extcon_dev_free(struct extcon_dev *edev);
+struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
const unsigned int *cable);
-extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
/* Synchronize the state and property value for each external connector. */
-extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+int extcon_sync(struct extcon_dev *edev, unsigned int id);
/*
* Following APIs set the connected state of each external connector.
* The 'id' argument indicates the defined external connector.
*/
-extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+int extcon_set_state(struct extcon_dev *edev, unsigned int id,
bool state);
-extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
bool state);
/*
@@ -52,13 +52,13 @@ extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
* for each external connector. They are used to set the capability of the
* property of each external connector based on the id and property.
*/
-extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+int extcon_set_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val);
-extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
+int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val);
-extern int extcon_set_property_capability(struct extcon_dev *edev,
+int extcon_set_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
#else /* CONFIG_EXTCON */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 1b1d77ec2114..fd183fb9c20f 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -286,6 +286,11 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
{
return ERR_PTR(-ENODEV);
}
+
+static inline const char *extcon_get_edev_name(struct extcon_dev *edev)
+{
+ return NULL;
+}
#endif /* CONFIG_EXTCON */
/*
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index ac3f4888b3df..3c383ddd92dd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -125,6 +125,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_RESIZEFS_FLAG 0x00004000
#define CP_DISABLED_QUICK_FLAG 0x00002000
#define CP_DISABLED_FLAG 0x00001000
#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index b79fa9bb7359..3049a6c06d9e 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -47,7 +47,8 @@
* Directory entry modification events - reported only to directory
* where entry is modified and not to a watching parent.
*/
-#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
+ FAN_DIR_MODIFY)
/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h
index 6312c8cb084a..891057434858 100644
--- a/include/linux/firmware/imx/ipc.h
+++ b/include/linux/firmware/imx/ipc.h
@@ -25,7 +25,6 @@ enum imx_sc_rpc_svc {
IMX_SC_RPC_SVC_PAD = 6,
IMX_SC_RPC_SVC_MISC = 7,
IMX_SC_RPC_SVC_IRQ = 8,
- IMX_SC_RPC_SVC_ABORT = 9
};
struct imx_sc_rpc_msg {
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 6669e2a1d5fd..95b0da2326a9 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -12,6 +12,8 @@ enum {
SM_EFUSE_WRITE,
SM_EFUSE_USER_MAX,
SM_GET_CHIP_ID,
+ SM_A1_PWRC_SET,
+ SM_A1_PWRC_GET,
};
struct meson_sm_firmware;
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index c8c42214e7fb..8efa5ac22d7e 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -82,6 +82,7 @@ enum pm_api_id {
PM_CLOCK_GETRATE,
PM_CLOCK_SETPARENT,
PM_CLOCK_GETPARENT,
+ PM_SECURE_AES = 47,
PM_FEATURE_CHECK = 63,
PM_API_MAX,
};
@@ -322,12 +323,13 @@ struct zynqmp_eemi_ops {
const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
+ int (*aes)(const u64 address, u32 *out);
};
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 *ret_payload);
-#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
#else
static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3d69de600494..4f6f59b4f22a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -526,6 +526,11 @@ static inline void i_mmap_lock_write(struct address_space *mapping)
down_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_write(struct address_space *mapping)
+{
+ return down_write_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_unlock_write(struct address_space *mapping)
{
up_write(&mapping->i_mmap_rwsem);
@@ -3394,7 +3399,7 @@ static inline bool io_is_direct(struct file *filp)
return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
}
-static inline bool vma_is_dax(struct vm_area_struct *vma)
+static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
}
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 54d9436600c7..2b5f8366dbe1 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -381,6 +381,22 @@ int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
+/**
+ * struct fsl_mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct fsl_mc_version {
+ u32 major;
+ u32 minor;
+ u32 revision;
+};
+
+struct fsl_mc_version *fsl_mc_get_version(void);
+
int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
u16 mc_io_flags,
struct fsl_mc_io **new_mc_io);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index a2d5d175d3c1..5ab28f6c7d26 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -18,39 +18,63 @@
#include <linux/bug.h>
/*
- * Notify this @dir inode about a change in the directory entry @dentry.
+ * Notify this @dir inode about a change in a child directory entry.
+ * The directory entry may have turned positive or negative or its inode may
+ * have changed (i.e. renamed over).
*
* Unlike fsnotify_parent(), the event will be reported regardless of the
* FS_EVENT_ON_CHILD mask on the parent inode.
*/
-static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry,
- __u32 mask)
+static inline void fsnotify_name(struct inode *dir, __u32 mask,
+ struct inode *child,
+ const struct qstr *name, u32 cookie)
{
- return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE,
- &dentry->d_name, 0);
+ fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie);
+ /*
+ * Send another flavor of the event without child inode data and
+ * without the specific event type (e.g. FS_CREATE|FS_IS_DIR).
+ * The name is relative to the dir inode the event is reported to.
+ */
+ fsnotify(dir, FS_DIR_MODIFY, dir, FSNOTIFY_EVENT_INODE, name, 0);
}
-/* Notify this dentry's parent about a child's events. */
-static inline int fsnotify_parent(const struct path *path,
- struct dentry *dentry, __u32 mask)
+static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
+ __u32 mask)
{
- if (!dentry)
- dentry = path->dentry;
-
- return __fsnotify_parent(path, dentry, mask);
+ fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0);
}
/*
- * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when
- * an event is on a path.
+ * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when
+ * an event is on a file/dentry.
*/
-static inline int fsnotify_path(struct inode *inode, const struct path *path,
- __u32 mask)
+static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
+{
+ struct inode *inode = d_inode(dentry);
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_parent(dentry, mask, inode, FSNOTIFY_EVENT_INODE);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+}
+
+static inline int fsnotify_file(struct file *file, __u32 mask)
{
- int ret = fsnotify_parent(path, NULL, mask);
+ const struct path *path = &file->f_path;
+ struct inode *inode = file_inode(file);
+ int ret;
+
+ if (file->f_mode & FMODE_NONOTIFY)
+ return 0;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+ ret = fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
if (ret)
return ret;
+
return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
@@ -58,19 +82,16 @@ static inline int fsnotify_path(struct inode *inode, const struct path *path,
static inline int fsnotify_perm(struct file *file, int mask)
{
int ret;
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
__u32 fsnotify_mask = 0;
- if (file->f_mode & FMODE_NONOTIFY)
- return 0;
if (!(mask & (MAY_READ | MAY_OPEN)))
return 0;
+
if (mask & MAY_OPEN) {
fsnotify_mask = FS_OPEN_PERM;
if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM);
+ ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
if (ret)
return ret;
@@ -79,10 +100,7 @@ static inline int fsnotify_perm(struct file *file, int mask)
fsnotify_mask = FS_ACCESS_PERM;
}
- if (S_ISDIR(inode->i_mode))
- fsnotify_mask |= FS_ISDIR;
-
- return fsnotify_path(inode, path, fsnotify_mask);
+ return fsnotify_file(file, fsnotify_mask);
}
/*
@@ -122,10 +140,8 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
mask |= FS_ISDIR;
}
- fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
- fs_cookie);
- fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
- fs_cookie);
+ fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie);
+ fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie);
if (target)
fsnotify_link_count(target);
@@ -180,12 +196,13 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
* Note: We have to pass also the linked inode ptr as some filesystems leave
* new_dentry->d_inode NULL and instantiate inode pointer later
*/
-static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
+static inline void fsnotify_link(struct inode *dir, struct inode *inode,
+ struct dentry *new_dentry)
{
fsnotify_link_count(inode);
audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, &new_dentry->d_name, 0);
+ fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0);
}
/*
@@ -229,15 +246,7 @@ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
*/
static inline void fsnotify_access(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
- __u32 mask = FS_ACCESS;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY))
- fsnotify_path(inode, path, mask);
+ fsnotify_file(file, FS_ACCESS);
}
/*
@@ -245,15 +254,7 @@ static inline void fsnotify_access(struct file *file)
*/
static inline void fsnotify_modify(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
- __u32 mask = FS_MODIFY;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY))
- fsnotify_path(inode, path, mask);
+ fsnotify_file(file, FS_MODIFY);
}
/*
@@ -261,16 +262,12 @@ static inline void fsnotify_modify(struct file *file)
*/
static inline void fsnotify_open(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
__u32 mask = FS_OPEN;
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
if (file->f_flags & __FMODE_EXEC)
mask |= FS_OPEN_EXEC;
- fsnotify_path(inode, path, mask);
+ fsnotify_file(file, mask);
}
/*
@@ -278,16 +275,10 @@ static inline void fsnotify_open(struct file *file)
*/
static inline void fsnotify_close(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
- fmode_t mode = file->f_mode;
- __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
+ __u32 mask = (file->f_mode & FMODE_WRITE) ? FS_CLOSE_WRITE :
+ FS_CLOSE_NOWRITE;
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY))
- fsnotify_path(inode, path, mask);
+ fsnotify_file(file, mask);
}
/*
@@ -295,14 +286,7 @@ static inline void fsnotify_close(struct file *file)
*/
static inline void fsnotify_xattr(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
- __u32 mask = FS_ATTRIB;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify_parent(NULL, dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_dentry(dentry, FS_ATTRIB);
}
/*
@@ -311,7 +295,6 @@ static inline void fsnotify_xattr(struct dentry *dentry)
*/
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{
- struct inode *inode = dentry->d_inode;
__u32 mask = 0;
if (ia_valid & ATTR_UID)
@@ -332,13 +315,8 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
if (ia_valid & ATTR_MODE)
mask |= FS_ATTRIB;
- if (mask) {
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify_parent(NULL, dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
- }
+ if (mask)
+ fsnotify_dentry(dentry, mask);
}
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 1915bdba2fad..f0c506405b54 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -47,18 +47,18 @@
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
+#define FS_DIR_MODIFY 0x00080000 /* Directory entry was modified */
#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
-#define FS_ISDIR 0x40000000 /* event occurred against dir */
-#define FS_IN_ONESHOT 0x80000000 /* only send event once */
-
-#define FS_DN_RENAME 0x10000000 /* file renamed */
-#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
-
/* This inode cares about things that happen to its children. Always set for
* dnotify and inotify. */
#define FS_EVENT_ON_CHILD 0x08000000
+#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
+#define FS_ISDIR 0x40000000 /* event occurred against dir */
+#define FS_IN_ONESHOT 0x80000000 /* only send event once */
+
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
/*
@@ -67,7 +67,8 @@
* The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
* when a directory entry inside a child subdir changes.
*/
-#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE)
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | \
+ FS_DIR_MODIFY)
#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
FS_OPEN_EXEC_PERM)
@@ -133,8 +134,7 @@ struct fsnotify_ops {
*/
struct fsnotify_event {
struct list_head list;
- /* inode may ONLY be dereferenced during handle_event(). */
- struct inode *inode; /* either the inode the event happened to or its parent */
+ unsigned long objectid; /* identifier for queue merges */
};
/*
@@ -213,10 +213,36 @@ struct fsnotify_group {
};
};
-/* when calling fsnotify tell it if the data is a path or inode */
-#define FSNOTIFY_EVENT_NONE 0
-#define FSNOTIFY_EVENT_PATH 1
-#define FSNOTIFY_EVENT_INODE 2
+/* When calling fsnotify tell it if the data is a path or inode */
+enum fsnotify_data_type {
+ FSNOTIFY_EVENT_NONE,
+ FSNOTIFY_EVENT_PATH,
+ FSNOTIFY_EVENT_INODE,
+};
+
+static inline const struct inode *fsnotify_data_inode(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return data;
+ case FSNOTIFY_EVENT_PATH:
+ return d_inode(((const struct path *)data)->dentry);
+ default:
+ return NULL;
+ }
+}
+
+static inline const struct path *fsnotify_data_path(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_PATH:
+ return data;
+ default:
+ return NULL;
+ }
+}
enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_INODE,
@@ -351,9 +377,10 @@ struct fsnotify_mark {
/* called from the vfs helpers */
/* main fsnotify call to send events */
-extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const struct qstr *name, u32 cookie);
-extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
+extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data,
+ int data_type, const struct qstr *name, u32 cookie);
+extern int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ int data_type);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
@@ -500,21 +527,22 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
static inline void fsnotify_init_event(struct fsnotify_event *event,
- struct inode *inode)
+ unsigned long objectid)
{
INIT_LIST_HEAD(&event->list);
- event->inode = inode;
+ event->objectid = objectid;
}
#else
-static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const struct qstr *name, u32 cookie)
+static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data,
+ int data_type, const struct qstr *name, u32 cookie)
{
return 0;
}
-static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
+static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
+ const void *data, int data_type)
{
return 0;
}
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index e5b817cb86e7..4aba4c86c626 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -124,6 +124,8 @@ struct vm_area_struct;
*
* Reclaim modifiers
* ~~~~~~~~~~~~~~~~~
+ * Please note that all the following flags are only applicable to sleepable
+ * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
*
* %__GFP_IO can start physical IO.
*
@@ -485,6 +487,12 @@ static inline void arch_free_page(struct page *page, int order) { }
#ifndef HAVE_ARCH_ALLOC_PAGE
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
+#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+static inline int arch_make_page_accessible(struct page *page)
+{
+ return 0;
+}
+#endif
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 2157717c2136..008ad3ee56b7 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -102,11 +102,9 @@ void devm_gpio_free(struct device *dev, unsigned int gpio);
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bug.h>
-#include <linux/pinctrl/pinctrl.h>
struct device;
struct gpio_chip;
-struct pinctrl_dev;
static inline bool gpio_is_valid(int number)
{
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index bf2d017dd7b7..901aab89d025 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -2,9 +2,10 @@
#ifndef __LINUX_GPIO_CONSUMER_H
#define __LINUX_GPIO_CONSUMER_H
+#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/compiler_types.h>
#include <linux/err.h>
-#include <linux/kernel.h>
struct device;
@@ -156,6 +157,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_array *array_info,
unsigned long *value_bitmap);
+int gpiod_set_config(struct gpio_desc *desc, unsigned long config);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_toggle_active_low(struct gpio_desc *desc);
@@ -189,6 +191,8 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
#else /* CONFIG_GPIOLIB */
+#include <linux/kernel.h>
+
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
@@ -470,6 +474,13 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
return 0;
}
+static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return -ENOSYS;
+}
+
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
/* GPIO can never have been requested */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 6ef05bccc0a6..b8fc92c177eb 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -87,7 +87,7 @@ struct gpio_irq_chip {
* @need_valid_mask to make these GPIO lines unavailable for
* translation.
*/
- int (*child_to_parent_hwirq)(struct gpio_chip *chip,
+ int (*child_to_parent_hwirq)(struct gpio_chip *gc,
unsigned int child_hwirq,
unsigned int child_type,
unsigned int *parent_hwirq,
@@ -102,7 +102,7 @@ struct gpio_irq_chip {
* variant named &gpiochip_populate_parent_fwspec_fourcell is also
* available.
*/
- void *(*populate_parent_alloc_arg)(struct gpio_chip *chip,
+ void *(*populate_parent_alloc_arg)(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type);
@@ -114,7 +114,7 @@ struct gpio_irq_chip {
* callback. If this is not specified, then a default callback will be
* provided that returns the line offset.
*/
- unsigned int (*child_offset_to_irq)(struct gpio_chip *chip,
+ unsigned int (*child_offset_to_irq)(struct gpio_chip *gc,
unsigned int pin);
/**
@@ -209,7 +209,7 @@ struct gpio_irq_chip {
* a particular driver wants to clear IRQ related registers
* in order to avoid undesired events.
*/
- int (*init_hw)(struct gpio_chip *chip);
+ int (*init_hw)(struct gpio_chip *gc);
/**
* @init_valid_mask: optional routine to initialize @valid_mask, to be
@@ -220,7 +220,7 @@ struct gpio_irq_chip {
* then directly set some bits to "0" if they cannot be used for
* interrupts.
*/
- void (*init_valid_mask)(struct gpio_chip *chip,
+ void (*init_valid_mask)(struct gpio_chip *gc,
unsigned long *valid_mask,
unsigned int ngpios);
@@ -348,40 +348,40 @@ struct gpio_chip {
struct device *parent;
struct module *owner;
- int (*request)(struct gpio_chip *chip,
+ int (*request)(struct gpio_chip *gc,
unsigned offset);
- void (*free)(struct gpio_chip *chip,
+ void (*free)(struct gpio_chip *gc,
unsigned offset);
- int (*get_direction)(struct gpio_chip *chip,
+ int (*get_direction)(struct gpio_chip *gc,
unsigned offset);
- int (*direction_input)(struct gpio_chip *chip,
+ int (*direction_input)(struct gpio_chip *gc,
unsigned offset);
- int (*direction_output)(struct gpio_chip *chip,
+ int (*direction_output)(struct gpio_chip *gc,
unsigned offset, int value);
- int (*get)(struct gpio_chip *chip,
+ int (*get)(struct gpio_chip *gc,
unsigned offset);
- int (*get_multiple)(struct gpio_chip *chip,
+ int (*get_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
- void (*set)(struct gpio_chip *chip,
+ void (*set)(struct gpio_chip *gc,
unsigned offset, int value);
- void (*set_multiple)(struct gpio_chip *chip,
+ void (*set_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
- int (*set_config)(struct gpio_chip *chip,
+ int (*set_config)(struct gpio_chip *gc,
unsigned offset,
unsigned long config);
- int (*to_irq)(struct gpio_chip *chip,
+ int (*to_irq)(struct gpio_chip *gc,
unsigned offset);
void (*dbg_show)(struct seq_file *s,
- struct gpio_chip *chip);
+ struct gpio_chip *gc);
- int (*init_valid_mask)(struct gpio_chip *chip,
+ int (*init_valid_mask)(struct gpio_chip *gc,
unsigned long *valid_mask,
unsigned int ngpios);
- int (*add_pin_ranges)(struct gpio_chip *chip);
+ int (*add_pin_ranges)(struct gpio_chip *gc);
int base;
u16 ngpio;
@@ -458,11 +458,11 @@ struct gpio_chip {
#endif /* CONFIG_OF_GPIO */
};
-extern const char *gpiochip_is_requested(struct gpio_chip *chip,
+extern const char *gpiochip_is_requested(struct gpio_chip *gc,
unsigned offset);
/* add/remove chips */
-extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
@@ -490,43 +490,43 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
* Otherwise it returns zero as a success code.
*/
#ifdef CONFIG_LOCKDEP
-#define gpiochip_add_data(chip, data) ({ \
+#define gpiochip_add_data(gc, data) ({ \
static struct lock_class_key lock_key; \
static struct lock_class_key request_key; \
- gpiochip_add_data_with_key(chip, data, &lock_key, \
+ gpiochip_add_data_with_key(gc, data, &lock_key, \
&request_key); \
})
#else
-#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
+#define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL)
#endif /* CONFIG_LOCKDEP */
-static inline int gpiochip_add(struct gpio_chip *chip)
+static inline int gpiochip_add(struct gpio_chip *gc)
{
- return gpiochip_add_data(chip, NULL);
+ return gpiochip_add_data(gc, NULL);
}
-extern void gpiochip_remove(struct gpio_chip *chip);
-extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
+extern void gpiochip_remove(struct gpio_chip *gc);
+extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc,
void *data);
extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *chip, void *data));
+ int (*match)(struct gpio_chip *gc, void *data));
-bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
-int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset);
/* Line status inquiry for drivers */
-bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
-bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset);
+bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset);
/* Sleep persistence inquiry for drivers */
-bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset);
-bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset);
+bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset);
/* get driver data */
-void *gpiochip_get_data(struct gpio_chip *chip);
+void *gpiochip_get_data(struct gpio_chip *gc);
struct bgpio_pdata {
const char *label;
@@ -536,23 +536,23 @@ struct bgpio_pdata {
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type);
-void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type);
#else
-static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type)
{
return NULL;
}
-static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
+static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
unsigned int parent_hwirq,
unsigned int parent_type)
{
@@ -572,6 +572,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
+#define BGPIOF_NO_SET_ON_INPUT BIT(6)
int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq);
@@ -582,11 +583,11 @@ int gpiochip_irq_domain_activate(struct irq_domain *domain,
void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *data);
-void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
+void gpiochip_set_nested_irqchip(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int parent_irq);
-int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
@@ -595,7 +596,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
-bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
unsigned int offset);
#ifdef CONFIG_LOCKDEP
@@ -606,7 +607,7 @@ bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
* boilerplate static inlines provides such a key for each
* unique instance.
*/
-static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+static inline int gpiochip_irqchip_add(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
@@ -615,12 +616,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
static struct lock_class_key lock_key;
static struct lock_class_key request_key;
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ return gpiochip_irqchip_add_key(gc, irqchip, first_irq,
handler, type, false,
&lock_key, &request_key);
}
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
@@ -630,35 +631,35 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
static struct lock_class_key lock_key;
static struct lock_class_key request_key;
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ return gpiochip_irqchip_add_key(gc, irqchip, first_irq,
handler, type, true,
&lock_key, &request_key);
}
#else /* ! CONFIG_LOCKDEP */
-static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+static inline int gpiochip_irqchip_add(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ return gpiochip_irqchip_add_key(gc, irqchip, first_irq,
handler, type, false, NULL, NULL);
}
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ return gpiochip_irqchip_add_key(gc, irqchip, first_irq,
handler, type, true, NULL, NULL);
}
#endif /* CONFIG_LOCKDEP */
-int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
-void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
-int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset);
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset);
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset,
unsigned long config);
/**
@@ -675,25 +676,25 @@ struct gpio_pin_range {
#ifdef CONFIG_PINCTRL
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins);
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group);
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+void gpiochip_remove_pin_ranges(struct gpio_chip *gc);
#else /* ! CONFIG_PINCTRL */
static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
return 0;
}
static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
+gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group)
{
@@ -701,27 +702,27 @@ gpiochip_add_pingroup_range(struct gpio_chip *chip,
}
static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+gpiochip_remove_pin_ranges(struct gpio_chip *gc)
{
}
#endif /* CONFIG_PINCTRL */
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
unsigned int hwnum,
const char *label,
enum gpio_lookup_flags lflags,
enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
-void devprop_gpiochip_set_names(struct gpio_chip *chip,
+void devprop_gpiochip_set_names(struct gpio_chip *gc,
const struct fwnode_handle *fwnode);
#ifdef CONFIG_GPIOLIB
/* lock/unlock as IRQ */
-int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
+int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset);
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
@@ -735,14 +736,14 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
-static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
+static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
WARN_ON(1);
return -EINVAL;
}
-static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
+static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
WARN_ON(1);
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 417d2c4bc60d..78b6ea5fa8ba 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -145,7 +145,7 @@ static inline void hash_del_rcu(struct hlist_node *node)
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
- * @tmp: a &struct used for temporary storage
+ * @tmp: a &struct hlist_node used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
@@ -197,7 +197,7 @@ static inline void hash_del_rcu(struct hlist_node *node)
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
- * @tmp: a &struct used for temporary storage
+ * @tmp: a &struct hlist_node used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 9918a6c910c5..9613d796cfb1 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -207,7 +207,7 @@ struct hdmi_drm_infoframe {
u16 max_fall;
};
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index ddf9f7144c43..7475051100c7 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -3,58 +3,8 @@
* Copyright 2013 Red Hat Inc.
*
* Authors: Jérôme Glisse <jglisse@redhat.com>
- */
-/*
- * Heterogeneous Memory Management (HMM)
- *
- * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
- * is for. Here we focus on the HMM API description, with some explanation of
- * the underlying implementation.
- *
- * Short description: HMM provides a set of helpers to share a virtual address
- * space between CPU and a device, so that the device can access any valid
- * address of the process (while still obeying memory protection). HMM also
- * provides helpers to migrate process memory to device memory, and back. Each
- * set of functionality (address space mirroring, and migration to and from
- * device memory) can be used independently of the other.
- *
- *
- * HMM address space mirroring API:
- *
- * Use HMM address space mirroring if you want to mirror a range of the CPU
- * page tables of a process into a device page table. Here, "mirror" means "keep
- * synchronized". Prerequisites: the device must provide the ability to write-
- * protect its page tables (at PAGE_SIZE granularity), and must be able to
- * recover from the resulting potential page faults.
- *
- * HMM guarantees that at any point in time, a given virtual address points to
- * either the same memory in both CPU and device page tables (that is: CPU and
- * device page tables each point to the same pages), or that one page table (CPU
- * or device) points to no entry, while the other still points to the old page
- * for the address. The latter case happens when the CPU page table update
- * happens first, and then the update is mirrored over to the device page table.
- * This does not cause any issue, because the CPU page table cannot start
- * pointing to a new page until the device page table is invalidated.
- *
- * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
- * updates to each device driver that has registered a mirror. It also provides
- * some API calls to help with taking a snapshot of the CPU page table, and to
- * synchronize with any updates that might happen concurrently.
*
- *
- * HMM migration to and from device memory:
- *
- * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
- * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
- * of the device memory, and allows the device driver to manage its memory
- * using those struct pages. Having struct pages for device memory makes
- * migration easier. Because that memory is not addressable by the CPU it must
- * never be pinned to the device; in other words, any CPU page fault can always
- * cause the device memory to be migrated (copied/moved) back to regular memory.
- *
- * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
- * allows use of a device DMA engine to perform the copy operation between
- * regular system memory and device memory.
+ * See Documentation/vm/hmm.rst for reasons and overview of what HMM is.
*/
#ifndef LINUX_HMM_H
#define LINUX_HMM_H
@@ -74,7 +24,6 @@
* Flags:
* HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
* HMM_PFN_WRITE: CPU page table has write permission set
- * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
*
* The driver provides a flags array for mapping page protections to device
* PTE bits. If the driver valid bit for an entry is bit 3,
@@ -86,7 +35,6 @@
enum hmm_pfn_flag_e {
HMM_PFN_VALID = 0,
HMM_PFN_WRITE,
- HMM_PFN_DEVICE_PRIVATE,
HMM_PFN_FLAG_MAX
};
@@ -122,9 +70,6 @@ enum hmm_pfn_value_e {
*
* @notifier: a mmu_interval_notifier that includes the start/end
* @notifier_seq: result of mmu_interval_read_begin()
- * @hmm: the core HMM structure this range is active against
- * @vma: the vm area struct for the range
- * @list: all range lock are on a list
* @start: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
* @pfns: array of pfns (big enough for the range)
@@ -132,8 +77,8 @@ enum hmm_pfn_value_e {
* @values: pfn value for some special case (none, special, error, ...)
* @default_flags: default flags for the range (write, read, ... see hmm doc)
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
- * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
- * @valid: pfns array did not change since it has been fill by an HMM function
+ * @pfn_shift: pfn shift value (should be <= PAGE_SHIFT)
+ * @dev_private_owner: owner of device private pages
*/
struct hmm_range {
struct mmu_interval_notifier *notifier;
@@ -146,6 +91,7 @@ struct hmm_range {
uint64_t default_flags;
uint64_t pfn_flags_mask;
uint8_t pfn_shift;
+ void *dev_private_owner;
};
/*
@@ -172,70 +118,9 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang
}
/*
- * hmm_device_entry_to_pfn() - return pfn value store in a device entry
- * @range: range use to decode device entry value
- * @entry: device entry to extract pfn from
- * Return: pfn value if device entry is valid, -1UL otherwise
- */
-static inline unsigned long
-hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
-{
- if (pfn == range->values[HMM_PFN_NONE])
- return -1UL;
- if (pfn == range->values[HMM_PFN_ERROR])
- return -1UL;
- if (pfn == range->values[HMM_PFN_SPECIAL])
- return -1UL;
- if (!(pfn & range->flags[HMM_PFN_VALID]))
- return -1UL;
- return (pfn >> range->pfn_shift);
-}
-
-/*
- * hmm_device_entry_from_page() - create a valid device entry for a page
- * @range: range use to encode HMM pfn value
- * @page: page for which to create the device entry
- * Return: valid device entry for the page
- */
-static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
- struct page *page)
-{
- return (page_to_pfn(page) << range->pfn_shift) |
- range->flags[HMM_PFN_VALID];
-}
-
-/*
- * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
- * @range: range use to encode HMM pfn value
- * @pfn: pfn value for which to create the device entry
- * Return: valid device entry for the pfn
- */
-static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
- unsigned long pfn)
-{
- return (pfn << range->pfn_shift) |
- range->flags[HMM_PFN_VALID];
-}
-
-/*
- * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
- */
-#define HMM_FAULT_ALLOW_RETRY (1 << 0)
-
-/* Don't fault in missing PTEs, just snapshot the current state. */
-#define HMM_FAULT_SNAPSHOT (1 << 1)
-
-#ifdef CONFIG_HMM_MIRROR
-/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
-long hmm_range_fault(struct hmm_range *range, unsigned int flags);
-#else
-static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
-{
- return -EOPNOTSUPP;
-}
-#endif
+long hmm_range_fault(struct hmm_range *range);
/*
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 5aca3d1bdb32..cfbb0a87c5f0 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -46,9 +46,46 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
- int prot_numa);
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+ unsigned long cp_flags);
+vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
+ pgprot_t pgprot, bool write);
+
+/**
+ * vmf_insert_pfn_pmd - insert a pmd size pfn
+ * @vmf: Structure describing the fault
+ * @pfn: pfn to insert
+ * @pgprot: page protection to use
+ * @write: whether it's a write fault
+ *
+ * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
+ *
+ * Return: vm_fault_t value.
+ */
+static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
+ bool write)
+{
+ return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
+}
+vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
+ pgprot_t pgprot, bool write);
+
+/**
+ * vmf_insert_pfn_pud - insert a pud size pfn
+ * @vmf: Structure describing the fault
+ * @pfn: pfn to insert
+ * @pgprot: page protection to use
+ * @write: whether it's a write fault
+ *
+ * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
+ *
+ * Return: vm_fault_t value.
+ */
+static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
+ bool write)
+{
+ return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
+}
+
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
@@ -87,8 +124,6 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
-extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
-
extern unsigned long transparent_hugepage_flags;
/*
@@ -100,7 +135,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
if (vma->vm_flags & VM_NOHUGEPAGE)
return false;
- if (is_vma_temporary_stack(vma))
+ if (vma_is_temporary_stack(vma))
return false;
if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
@@ -289,7 +324,11 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-#define hpage_nr_pages(x) 1
+static inline int hpage_nr_pages(struct page *page)
+{
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ return 1;
+}
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1e897e4168ac..5ea05879a0a9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -46,7 +46,52 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+#ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On private mappings, the counter to uncharge reservations is stored
+ * here. If these fields are 0, then either the mapping is shared, or
+ * cgroup accounting is disabled for this resv_map.
+ */
+ struct page_counter *reservation_counter;
+ unsigned long pages_per_hpage;
+ struct cgroup_subsys_state *css;
+#endif
+};
+
+/*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ * across the pages in a mapping.
+ *
+ * The region data structures are embedded into a resv_map and protected
+ * by a resv_map's lock. The set of regions within the resv_map represent
+ * reservations for huge pages, or huge pages that have already been
+ * instantiated within the map. The from and to elements are huge page
+ * indicies into the associated mapping. from indicates the starting index
+ * of the region. to represents the first index past the end of the region.
+ *
+ * For example, a file region structure with from == 0 and to == 4 represents
+ * four huge pages in a mapping. It is important to note that the to element
+ * represents the first element past the end of the region. This is used in
+ * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
+ *
+ * Interval notation of the form [from, to) will be used to indicate that
+ * the endpoint from is inclusive and to is exclusive.
+ */
+struct file_region {
+ struct list_head link;
+ long from;
+ long to;
+#ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On shared mappings, each reserved region appears as a struct
+ * file_region in resv_map. These fields hold the info needed to
+ * uncharge each reservation.
+ */
+ struct page_counter *reservation_counter;
+ struct cgroup_subsys_state *css;
+#endif
};
+
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -109,6 +154,8 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
+
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
@@ -151,6 +198,12 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
+static inline struct address_space *hugetlb_page_mapping_lock_write(
+ struct page *hpage)
+{
+ return NULL;
+}
+
static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
pte_t *ptep)
{
@@ -390,7 +443,10 @@ static inline bool is_file_hugepages(struct file *file)
return is_file_shm_hugepages(file);
}
-
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return HUGETLBFS_SB(i->i_sb)->hstate;
+}
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) false
@@ -402,6 +458,10 @@ hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
return ERR_PTR(-ENOSYS);
}
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return NULL;
+}
#endif /* !CONFIG_HUGETLBFS */
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
@@ -432,8 +492,8 @@ struct hstate {
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
- struct cftype cgroup_files_dfl[5];
- struct cftype cgroup_files_legacy[5];
+ struct cftype cgroup_files_dfl[7];
+ struct cftype cgroup_files_legacy[9];
#endif
char name[HSTATE_NAME_LEN];
};
@@ -472,11 +532,6 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
-static inline struct hstate *hstate_inode(struct inode *i)
-{
- return HUGETLBFS_SB(i->i_sb)->hstate;
-}
-
static inline struct hstate *hstate_file(struct file *f)
{
return hstate_inode(file_inode(f));
@@ -729,11 +784,6 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return NULL;
}
-static inline struct hstate *hstate_inode(struct inode *i)
-{
- return NULL;
-}
-
static inline struct hstate *page_hstate(struct page *page)
{
return NULL;
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 063962f6dfc6..2ad6e92f124a 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -18,34 +18,96 @@
#include <linux/mmdebug.h>
struct hugetlb_cgroup;
+struct resv_map;
+struct file_region;
+
/*
* Minimum page order trackable by hugetlb cgroup.
- * At least 3 pages are necessary for all the tracking information.
+ * At least 4 pages are necessary for all the tracking information.
+ * The second tail page (hpage[2]) is the fault usage cgroup.
+ * The third tail page (hpage[3]) is the reservation usage cgroup.
*/
#define HUGETLB_CGROUP_MIN_ORDER 2
#ifdef CONFIG_CGROUP_HUGETLB
+enum hugetlb_memory_event {
+ HUGETLB_MAX,
+ HUGETLB_NR_MEMORY_EVENTS,
+};
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+struct hugetlb_cgroup {
+ struct cgroup_subsys_state css;
+
+ /*
+ * the counter to account for hugepages from hugetlb.
+ */
+ struct page_counter hugepage[HUGE_MAX_HSTATE];
+
+ /*
+ * the counter to account for hugepage reservations from hugetlb.
+ */
+ struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
+
+ atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+ atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+
+ /* Handle for "hugetlb.events" */
+ struct cgroup_file events_file[HUGE_MAX_HSTATE];
+
+ /* Handle for "hugetlb.events.local" */
+ struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
+};
+
+static inline struct hugetlb_cgroup *
+__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
- return (struct hugetlb_cgroup *)page[2].private;
+ if (rsvd)
+ return (struct hugetlb_cgroup *)page[3].private;
+ else
+ return (struct hugetlb_cgroup *)page[2].private;
+}
+
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+ return __hugetlb_cgroup_from_page(page, false);
+}
+
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_rsvd(struct page *page)
+{
+ return __hugetlb_cgroup_from_page(page, true);
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline int __set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1;
- page[2].private = (unsigned long)h_cg;
+ if (rsvd)
+ page[3].private = (unsigned long)h_cg;
+ else
+ page[2].private = (unsigned long)h_cg;
return 0;
}
+static inline int set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+ return __set_hugetlb_cgroup(page, h_cg, false);
+}
+
+static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+ return __set_hugetlb_cgroup(page, h_cg, true);
+}
+
static inline bool hugetlb_cgroup_disabled(void)
{
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
@@ -53,25 +115,67 @@ static inline bool hugetlb_cgroup_disabled(void)
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
+extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page);
+extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page);
extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page);
+extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
+ struct page *page);
+
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ unsigned long start,
+ unsigned long end);
+
+extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages);
+
extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage,
struct page *newhpage);
#else
+static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages)
+{
+}
+
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
{
return NULL;
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_resv(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_rsvd(struct page *page)
+{
+ return NULL;
+}
+
+static inline int set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+ return 0;
+}
+
+static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+ struct hugetlb_cgroup *h_cg)
{
return 0;
}
@@ -81,28 +185,57 @@ static inline bool hugetlb_cgroup_disabled(void)
return true;
}
-static inline int
-hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup **ptr)
+static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
{
return 0;
}
-static inline void
-hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg,
- struct page *page)
+static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return 0;
+}
+
+static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
{
}
static inline void
-hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
+hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
+ unsigned long nr_pages,
+ struct page *page)
+{
+}
+static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
{
}
static inline void
-hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg)
+hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ unsigned long start,
+ unsigned long end)
{
}
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index 585ad6fc3847..8c5459034f92 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -15,24 +15,19 @@
/**
* i2c_smbus_alert_setup - platform data for the smbus_alert i2c client
- * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0)
- * triggered
* @irq: IRQ number, if the smbus_alert driver should take care of interrupt
* handling
*
* If irq is not specified, the smbus_alert driver doesn't take care of
* interrupt handling. In that case it is up to the I2C bus driver to either
* handle the interrupts or to poll for alerts.
- *
- * If irq is specified then it it crucial that alert_edge_triggered is
- * properly set.
*/
struct i2c_smbus_alert_setup {
int irq;
};
-struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
- struct i2c_smbus_alert_setup *setup);
+struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index f6b942150631..456fc17ecb1c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -39,6 +39,14 @@ enum i2c_slave_event;
typedef int (*i2c_slave_cb_t)(struct i2c_client *client,
enum i2c_slave_event event, u8 *val);
+/* I2C Frequency Modes */
+#define I2C_MAX_STANDARD_MODE_FREQ 100000
+#define I2C_MAX_FAST_MODE_FREQ 400000
+#define I2C_MAX_FAST_MODE_PLUS_FREQ 1000000
+#define I2C_MAX_TURBO_MODE_FREQ 1400000
+#define I2C_MAX_HIGH_SPEED_MODE_FREQ 3400000
+#define I2C_MAX_ULTRA_FAST_MODE_FREQ 5000000
+
struct module;
struct property_entry;
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 1659217e9b60..aefe758f4466 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -30,8 +30,7 @@ extern void ima_kexec_cmdline(const void *buf, int size);
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
-#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390) \
- || defined(CONFIG_PPC_SECURE_BOOT)
+#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
extern bool arch_ima_get_secureboot(void);
extern const char * const *arch_get_ima_policy(void);
#else
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index d6e2ab538ef2..8f2820c5e69e 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -24,7 +24,7 @@ struct pt_regs;
* @handle_irq: highlevel irq-events handler
* @preflow_handler: handler called before the flow handler (currently used by sparc)
* @action: the irq action chain
- * @status: status information
+ * @status_use_accessors: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index ceca42de4438..61a9ced3aa50 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -58,16 +58,21 @@ do { \
} while (0)
# define lockdep_hrtimer_enter(__hrtimer) \
- do { \
- if (!__hrtimer->is_hard) \
- current->irq_config = 1; \
- } while (0)
-
-# define lockdep_hrtimer_exit(__hrtimer) \
- do { \
- if (!__hrtimer->is_hard) \
+({ \
+ bool __expires_hardirq = true; \
+ \
+ if (!__hrtimer->is_hard) { \
+ current->irq_config = 1; \
+ __expires_hardirq = false; \
+ } \
+ __expires_hardirq; \
+})
+
+# define lockdep_hrtimer_exit(__expires_hardirq) \
+ do { \
+ if (!__expires_hardirq) \
current->irq_config = 0; \
- } while (0)
+ } while (0)
# define lockdep_posixtimer_enter() \
do { \
@@ -102,8 +107,8 @@ do { \
# define lockdep_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
-# define lockdep_hrtimer_enter(__hrtimer) do { } while (0)
-# define lockdep_hrtimer_exit(__hrtimer) do { } while (0)
+# define lockdep_hrtimer_enter(__hrtimer) false
+# define lockdep_hrtimer_exit(__context) do { } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
# define lockdep_posixtimer_exit() do { } while (0)
# define lockdep_irq_work_enter(__work) do { } while (0)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5cde9e7c2664..31314ca7c635 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -190,7 +190,7 @@ void kasan_init_tags(void);
void *kasan_reset_tag(const void *addr);
-void kasan_report(unsigned long addr, size_t size,
+bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
#else /* CONFIG_KASAN_SW_TAGS */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index dded2e5a9f42..89f6a4214a70 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -37,8 +37,10 @@ enum kernfs_node_type {
KERNFS_LINK = 0x0004,
};
-#define KERNFS_TYPE_MASK 0x000f
-#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+#define KERNFS_TYPE_MASK 0x000f
+#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+#define KERNFS_MAX_USER_XATTRS 128
+#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10)
enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
@@ -78,6 +80,11 @@ enum kernfs_root_flag {
* fhandle to access nodes of the fs.
*/
KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004,
+
+ /*
+ * Support user xattrs to be written to nodes rooted at this root.
+ */
+ KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008,
};
/* type-specific structures for kernfs_node union members */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 4ded94bcf274..2ab2d6d6aeab 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -127,7 +127,7 @@ struct key_type {
* much is copied into the buffer
* - shouldn't do the copy if the buffer is NULL
*/
- long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+ long (*read)(const struct key *key, char *buffer, size_t buflen);
/* handle request_key() for this type instead of invoking
* /sbin/request-key (optional)
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index fc4b0b10210f..86249476b57f 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -247,6 +247,37 @@ __kfifo_int_must_check_helper(int val)
})
/**
+ * kfifo_is_empty_spinlocked - returns true if the fifo is empty using
+ * a spinlock for locking
+ * @fifo: address of the fifo to be used
+ * @lock: spinlock to be used for locking
+ */
+#define kfifo_is_empty_spinlocked(fifo, lock) \
+({ \
+ unsigned long __flags; \
+ bool __ret; \
+ spin_lock_irqsave(lock, __flags); \
+ __ret = kfifo_is_empty(fifo); \
+ spin_unlock_irqrestore(lock, __flags); \
+ __ret; \
+})
+
+/**
+ * kfifo_is_empty_spinlocked_noirqsave - returns true if the fifo is empty
+ * using a spinlock for locking, doesn't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @lock: spinlock to be used for locking
+ */
+#define kfifo_is_empty_spinlocked_noirqsave(fifo, lock) \
+({ \
+ bool __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_is_empty(fifo); \
+ spin_unlock(lock); \
+ __ret; \
+})
+
+/**
* kfifo_is_full - returns true if the fifo is full
* @fifo: address of the fifo to be used
*/
@@ -517,6 +548,26 @@ __kfifo_uint_must_check_helper( \
__ret; \
})
+/**
+ * kfifo_in_spinlocked_noirqsave - put data into fifo using a spinlock for
+ * locking, don't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @buf: the data to be added
+ * @n: number of elements to be added
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This is a variant of kfifo_in_spinlocked() but uses spin_lock/unlock()
+ * for locking and doesn't disable interrupts.
+ */
+#define kfifo_in_spinlocked_noirqsave(fifo, buf, n, lock) \
+({ \
+ unsigned int __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_in(fifo, buf, n); \
+ spin_unlock(lock); \
+ __ret; \
+})
+
/* alias for kfifo_in_spinlocked, will be removed in a future release */
#define kfifo_in_locked(fifo, buf, n, lock) \
kfifo_in_spinlocked(fifo, buf, n, lock)
@@ -569,6 +620,28 @@ __kfifo_uint_must_check_helper( \
}) \
)
+/**
+ * kfifo_out_spinlocked_noirqsave - get data from the fifo using a spinlock
+ * for locking, don't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @buf: pointer to the storage buffer
+ * @n: max. number of elements to get
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This is a variant of kfifo_out_spinlocked() which uses spin_lock/unlock()
+ * for locking and doesn't disable interrupts.
+ */
+#define kfifo_out_spinlocked_noirqsave(fifo, buf, n, lock) \
+__kfifo_uint_must_check_helper( \
+({ \
+ unsigned int __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_out(fifo, buf, n); \
+ spin_unlock(lock); \
+ __ret; \
+}) \
+)
+
/* alias for kfifo_out_spinlocked, will be removed in a future release */
#define kfifo_out_locked(fifo, buf, n, lock) \
kfifo_out_spinlocked(fifo, buf, n, lock)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 0f9da966934e..8bbcaad7ef0f 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -165,7 +165,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
do { \
kthread_init_work(&(dwork)->work, (fn)); \
timer_setup(&(dwork)->timer, \
- kthread_delayed_work_timer_fn, 0); \
+ kthread_delayed_work_timer_fn, \
+ TIMER_IRQSAFE); \
} while (0)
int kthread_worker_fn(void *worker_ptr);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bcb9b2ac0791..6d58beb65454 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -360,6 +360,10 @@ static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *mem
return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
}
+#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
+#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
+#endif
+
struct kvm_s390_adapter_int {
u64 ind_addr;
u64 summary_addr;
@@ -431,11 +435,11 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
*/
struct kvm_memslots {
u64 generation;
- struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */
short id_to_index[KVM_MEM_SLOTS_NUM];
atomic_t lru_slot;
int used_slots;
+ struct kvm_memory_slot memslots[];
};
struct kvm {
@@ -493,7 +497,7 @@ struct kvm {
#endif
long tlbs_dirty;
struct list_head devices;
- bool manual_dirty_log_protect;
+ u64 manual_dirty_log_protect;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
@@ -527,6 +531,11 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
+{
+ return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
+}
+
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
@@ -572,10 +581,11 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
return vcpu->vcpu_idx;
}
-#define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
- memslot++)
+#define kvm_for_each_memslot(memslot, slots) \
+ for (memslot = &slots->memslots[0]; \
+ memslot < slots->memslots + slots->used_slots; memslot++) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
+ } else
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -635,12 +645,15 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
return __kvm_memslots(vcpu->kvm, as_id);
}
-static inline struct kvm_memory_slot *
-id_to_memslot(struct kvm_memslots *slots, int id)
+static inline
+struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
+ if (index < 0)
+ return NULL;
+
slot = &slots->memslots[index];
WARN_ON(slot->id != id);
@@ -669,10 +682,7 @@ int kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages);
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -680,11 +690,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
-bool kvm_largepages_enabled(void);
-void kvm_disable_largepages(void);
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
@@ -704,7 +712,6 @@ void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
-kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
@@ -819,23 +826,20 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
-int kvm_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log, int *is_dirty);
-
-int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *flush);
-int kvm_clear_dirty_log_protect(struct kvm *kvm,
- struct kvm_clear_dirty_log *log, bool *flush);
-
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
unsigned long mask);
-
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log);
-int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
- struct kvm_clear_dirty_log *log);
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
+
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
+int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ int *is_dirty, struct kvm_memory_slot **memslot);
+#endif
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
@@ -882,9 +886,9 @@ void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
-int kvm_arch_hardware_setup(void);
+int kvm_arch_hardware_setup(void *opaque);
void kvm_arch_hardware_unsetup(void);
-int kvm_arch_check_processor_compat(void);
+int kvm_arch_check_processor_compat(void *opaque);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
@@ -1018,6 +1022,8 @@ bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
* gfn_to_memslot() itself isn't here as an inline because that would
* bloat other code too much.
+ *
+ * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
*/
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
@@ -1026,6 +1032,9 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
int slot = atomic_read(&slots->lru_slot);
struct kvm_memory_slot *memslots = slots->memslots;
+ if (unlikely(!slots->used_slots))
+ return NULL;
+
if (gfn >= memslots[slot].base_gfn &&
gfn < memslots[slot].base_gfn + memslots[slot].npages)
return &memslots[slot];
diff --git a/include/linux/list.h b/include/linux/list.h
index 884216db3246..aff44d34f4e4 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -989,7 +989,7 @@ static inline void hlist_move_list(struct hlist_head *old,
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
- * @n: another &struct hlist_node to use as temporary storage
+ * @n: a &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e9ba01336d4e..1b4150ff64be 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1367,12 +1367,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
void memcg_kmem_put_cache(struct kmem_cache *cachep);
#ifdef CONFIG_MEMCG_KMEM
-int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
-void __memcg_kmem_uncharge(struct page *page, int order);
-int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg);
-void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
- unsigned int nr_pages);
+int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
+ unsigned int nr_pages);
+void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
+int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
+void __memcg_kmem_uncharge_page(struct page *page, int order);
extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;
@@ -1394,32 +1393,33 @@ static inline bool memcg_kmem_enabled(void)
return static_branch_unlikely(&memcg_kmem_enabled_key);
}
-static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
{
if (memcg_kmem_enabled())
- return __memcg_kmem_charge(page, gfp, order);
+ return __memcg_kmem_charge_page(page, gfp, order);
return 0;
}
-static inline void memcg_kmem_uncharge(struct page *page, int order)
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{
if (memcg_kmem_enabled())
- __memcg_kmem_uncharge(page, order);
+ __memcg_kmem_uncharge_page(page, order);
}
-static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
- int order, struct mem_cgroup *memcg)
+static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
+ unsigned int nr_pages)
{
if (memcg_kmem_enabled())
- return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ return __memcg_kmem_charge(memcg, gfp, nr_pages);
return 0;
}
-static inline void memcg_kmem_uncharge_memcg(struct page *page, int order,
- struct mem_cgroup *memcg)
+static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
+ unsigned int nr_pages)
{
if (memcg_kmem_enabled())
- __memcg_kmem_uncharge_memcg(memcg, 1 << order);
+ __memcg_kmem_uncharge(memcg, nr_pages);
}
/*
@@ -1436,21 +1436,23 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p);
#else
-static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
{
return 0;
}
-static inline void memcg_kmem_uncharge(struct page *page, int order)
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{
}
-static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
{
return 0;
}
-static inline void __memcg_kmem_uncharge(struct page *page, int order)
+static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 0b8d791b6669..439a89e758d8 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -26,7 +26,6 @@
struct memory_block {
unsigned long start_section_nr;
unsigned long state; /* serialized by the dev->lock */
- int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
int phys_device; /* to which fru does this belong? */
struct device dev;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index f4d59155f3d4..ef55115320fb 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -47,9 +47,13 @@ enum {
/* Types for control the zone type of onlined and offlined memory */
enum {
- MMOP_OFFLINE = -1,
- MMOP_ONLINE_KEEP,
+ /* Offline the memory. */
+ MMOP_OFFLINE = 0,
+ /* Online the memory. Zone depends, see default_zone_for_pfn(). */
+ MMOP_ONLINE,
+ /* Online the memory to ZONE_NORMAL. */
MMOP_ONLINE_KERNEL,
+ /* Online the memory to ZONE_MOVABLE. */
MMOP_ONLINE_MOVABLE,
};
@@ -113,7 +117,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions);
extern u64 max_mem_size;
-extern bool memhp_auto_online;
+extern int memhp_online_type_from_str(const char *str);
+
+/* Default online_type (MMOP_*) when new memory blocks are added. */
+extern int memhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5228c62af416..8165278c348a 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -173,34 +173,7 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol);
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
-static inline bool vma_migratable(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- return false;
-
- /*
- * DAX device mappings require predictable access latency, so avoid
- * incurring periodic faults.
- */
- if (vma_is_dax(vma))
- return false;
-
-#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
- if (vma->vm_flags & VM_HUGETLB)
- return false;
-#endif
-
- /*
- * Migration allocates pages in the highest zone. If we cannot
- * do so then migration (at least from node to node) is not
- * possible.
- */
- if (vma->vm_file &&
- gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
- < policy_zone)
- return false;
- return true;
-}
+extern bool vma_migratable(struct vm_area_struct *vma);
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 6fefb09af7c3..8b37c4c9222c 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -98,11 +98,12 @@ struct dev_pagemap_ops {
* @ref: reference count that pins the devm_memremap_pages() mapping
* @internal_ref: internal reference if @ref is not provided by the caller
* @done: completion for @internal_ref
- * @dev: host device of the mapping for debug
- * @data: private data pointer for page_free()
* @type: memory type: see MEMORY_* in memory_hotplug.h
* @flags: PGMAP_* flags to specify defailed behavior
* @ops: method table
+ * @owner: an opaque pointer identifying the entity that manages this
+ * instance. Used by various helpers to make sure that no
+ * foreign ZONE_DEVICE memory is accessed.
*/
struct dev_pagemap {
struct vmem_altmap altmap;
@@ -113,6 +114,7 @@ struct dev_pagemap {
enum memory_type type;
unsigned int flags;
const struct dev_pagemap_ops *ops;
+ void *owner;
};
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 81e7dcbd94df..6e2962ef5b81 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -33,7 +33,7 @@ struct wm8994_ldo_pdata {
* DRC configurations are specified with a label and a set of register
* values to write (the enable bits will be ignored). At runtime an
* enumerated control will be presented for each DRC block allowing
- * the user to choose the configration to use.
+ * the user to choose the configuration to use.
*
* Configurations may be generated by hand or by using the DRC control
* panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
new file mode 100644
index 000000000000..ad1996001965
--- /dev/null
+++ b/include/linux/mhi.h
@@ -0,0 +1,700 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+#ifndef _MHI_H_
+#define _MHI_H_
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/mutex.h>
+#include <linux/rwlock_types.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct mhi_chan;
+struct mhi_event;
+struct mhi_ctxt;
+struct mhi_cmd;
+struct mhi_buf_info;
+
+/**
+ * enum mhi_callback - MHI callback
+ * @MHI_CB_IDLE: MHI entered idle state
+ * @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_LPM_ENTER: MHI host entered low power mode
+ * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
+ * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env
+ * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
+ * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover)
+ * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state
+ * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
+ */
+enum mhi_callback {
+ MHI_CB_IDLE,
+ MHI_CB_PENDING_DATA,
+ MHI_CB_LPM_ENTER,
+ MHI_CB_LPM_EXIT,
+ MHI_CB_EE_RDDM,
+ MHI_CB_EE_MISSION_MODE,
+ MHI_CB_SYS_ERROR,
+ MHI_CB_FATAL_ERROR,
+ MHI_CB_BW_REQ,
+};
+
+/**
+ * enum mhi_flags - Transfer flags
+ * @MHI_EOB: End of buffer for bulk transfer
+ * @MHI_EOT: End of transfer
+ * @MHI_CHAIN: Linked transfer
+ */
+enum mhi_flags {
+ MHI_EOB,
+ MHI_EOT,
+ MHI_CHAIN,
+};
+
+/**
+ * enum mhi_device_type - Device types
+ * @MHI_DEVICE_XFER: Handles data transfer
+ * @MHI_DEVICE_CONTROLLER: Control device
+ */
+enum mhi_device_type {
+ MHI_DEVICE_XFER,
+ MHI_DEVICE_CONTROLLER,
+};
+
+/**
+ * enum mhi_ch_type - Channel types
+ * @MHI_CH_TYPE_INVALID: Invalid channel type
+ * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device
+ * @MHI_CH_TYPE_INBOUND: Inbound channel from the device
+ * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine
+ * multiple packets and send them as a single
+ * large packet to reduce CPU consumption
+ */
+enum mhi_ch_type {
+ MHI_CH_TYPE_INVALID = 0,
+ MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
+ MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
+ MHI_CH_TYPE_INBOUND_COALESCED = 3,
+};
+
+/**
+ * struct image_info - Firmware and RDDM table table
+ * @mhi_buf - Buffer for firmware and RDDM table
+ * @entries - # of entries in table
+ */
+struct image_info {
+ struct mhi_buf *mhi_buf;
+ struct bhi_vec_entry *bhi_vec;
+ u32 entries;
+};
+
+/**
+ * struct mhi_link_info - BW requirement
+ * target_link_speed - Link speed as defined by TLS bits in LinkControl reg
+ * target_link_width - Link width as defined by NLW bits in LinkStatus reg
+ */
+struct mhi_link_info {
+ unsigned int target_link_speed;
+ unsigned int target_link_width;
+};
+
+/**
+ * enum mhi_ee_type - Execution environment types
+ * @MHI_EE_PBL: Primary Bootloader
+ * @MHI_EE_SBL: Secondary Bootloader
+ * @MHI_EE_AMSS: Modem, aka the primary runtime EE
+ * @MHI_EE_RDDM: Ram dump download mode
+ * @MHI_EE_WFW: WLAN firmware mode
+ * @MHI_EE_PTHRU: Passthrough
+ * @MHI_EE_EDL: Embedded downloader
+ */
+enum mhi_ee_type {
+ MHI_EE_PBL,
+ MHI_EE_SBL,
+ MHI_EE_AMSS,
+ MHI_EE_RDDM,
+ MHI_EE_WFW,
+ MHI_EE_PTHRU,
+ MHI_EE_EDL,
+ MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
+ MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
+ MHI_EE_NOT_SUPPORTED,
+ MHI_EE_MAX,
+};
+
+/**
+ * enum mhi_state - MHI states
+ * @MHI_STATE_RESET: Reset state
+ * @MHI_STATE_READY: Ready state
+ * @MHI_STATE_M0: M0 state
+ * @MHI_STATE_M1: M1 state
+ * @MHI_STATE_M2: M2 state
+ * @MHI_STATE_M3: M3 state
+ * @MHI_STATE_M3_FAST: M3 Fast state
+ * @MHI_STATE_BHI: BHI state
+ * @MHI_STATE_SYS_ERR: System Error state
+ */
+enum mhi_state {
+ MHI_STATE_RESET = 0x0,
+ MHI_STATE_READY = 0x1,
+ MHI_STATE_M0 = 0x2,
+ MHI_STATE_M1 = 0x3,
+ MHI_STATE_M2 = 0x4,
+ MHI_STATE_M3 = 0x5,
+ MHI_STATE_M3_FAST = 0x6,
+ MHI_STATE_BHI = 0x7,
+ MHI_STATE_SYS_ERR = 0xFF,
+ MHI_STATE_MAX,
+};
+
+/**
+ * enum mhi_ch_ee_mask - Execution environment mask for channel
+ * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE
+ * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE
+ * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE
+ * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE
+ * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE
+ * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE
+ * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE
+ */
+enum mhi_ch_ee_mask {
+ MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
+ MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
+ MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
+ MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
+ MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
+ MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
+ MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
+};
+
+/**
+ * enum mhi_er_data_type - Event ring data types
+ * @MHI_ER_DATA: Only client data over this ring
+ * @MHI_ER_CTRL: MHI control data and client data
+ */
+enum mhi_er_data_type {
+ MHI_ER_DATA,
+ MHI_ER_CTRL,
+};
+
+/**
+ * enum mhi_db_brst_mode - Doorbell mode
+ * @MHI_DB_BRST_DISABLE: Burst mode disable
+ * @MHI_DB_BRST_ENABLE: Burst mode enable
+ */
+enum mhi_db_brst_mode {
+ MHI_DB_BRST_DISABLE = 0x2,
+ MHI_DB_BRST_ENABLE = 0x3,
+};
+
+/**
+ * struct mhi_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @local_elements: The local ring length of the channel
+ * @event_ring: The event rung index that services this channel
+ * @dir: Direction that data may flow on this channel
+ * @type: Channel type
+ * @ee_mask: Execution Environment mask for this channel
+ * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds
+ for UL channels, multiple of 8 ring elements for DL channels
+ * @doorbell: Doorbell mode
+ * @lpm_notify: The channel master requires low power mode notifications
+ * @offload_channel: The client manages the channel completely
+ * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
+ * @auto_queue: Framework will automatically queue buffers for DL traffic
+ * @auto_start: Automatically start (open) this channel
+ * @wake-capable: Channel capable of waking up the system
+ */
+struct mhi_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ u32 local_elements;
+ u32 event_ring;
+ enum dma_data_direction dir;
+ enum mhi_ch_type type;
+ u32 ee_mask;
+ u32 pollcfg;
+ enum mhi_db_brst_mode doorbell;
+ bool lpm_notify;
+ bool offload_channel;
+ bool doorbell_mode_switch;
+ bool auto_queue;
+ bool auto_start;
+ bool wake_capable;
+};
+
+/**
+ * struct mhi_event_config - Event ring configuration structure for controller
+ * @num_elements: The number of elements that can be queued to this ring
+ * @irq_moderation_ms: Delay irq for additional events to be aggregated
+ * @irq: IRQ associated with this ring
+ * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring
+ * @priority: Priority of this ring. Use 1 for now
+ * @mode: Doorbell mode
+ * @data_type: Type of data this ring will process
+ * @hardware_event: This ring is associated with hardware channels
+ * @client_managed: This ring is client managed
+ * @offload_channel: This ring is associated with an offloaded channel
+ */
+struct mhi_event_config {
+ u32 num_elements;
+ u32 irq_moderation_ms;
+ u32 irq;
+ u32 channel;
+ u32 priority;
+ enum mhi_db_brst_mode mode;
+ enum mhi_er_data_type data_type;
+ bool hardware_event;
+ bool client_managed;
+ bool offload_channel;
+};
+
+/**
+ * struct mhi_controller_config - Root MHI controller configuration
+ * @max_channels: Maximum number of channels supported
+ * @timeout_ms: Timeout value for operations. 0 means use default
+ * @buf_len: Size of automatically allocated buffers. 0 means use default
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ * @num_events: Number of event rings defined in @event_cfg
+ * @event_cfg: Array of defined event rings
+ * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access
+ * @m2_no_db: Host is not allowed to ring DB in M2 state
+ */
+struct mhi_controller_config {
+ u32 max_channels;
+ u32 timeout_ms;
+ u32 buf_len;
+ u32 num_channels;
+ struct mhi_channel_config *ch_cfg;
+ u32 num_events;
+ struct mhi_event_config *event_cfg;
+ bool use_bounce_buf;
+ bool m2_no_db;
+};
+
+/**
+ * struct mhi_controller - Master MHI controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * controller (required)
+ * @mhi_dev: MHI device instance for the controller
+ * @regs: Base address of MHI MMIO register space (required)
+ * @bhi: Points to base of MHI BHI register space
+ * @bhie: Points to base of MHI BHIe register space
+ * @wake_db: MHI WAKE doorbell register address
+ * @iova_start: IOMMU starting address for data (required)
+ * @iova_stop: IOMMU stop address for data (required)
+ * @fw_image: Firmware image name for normal booting (required)
+ * @edl_image: Firmware image name for emergency download mode (optional)
+ * @rddm_size: RAM dump size that host should allocate for debugging purpose
+ * @sbl_size: SBL image size downloaded through BHIe (optional)
+ * @seg_len: BHIe vector size (optional)
+ * @fbc_image: Points to firmware image buffer
+ * @rddm_image: Points to RAM dump buffer
+ * @mhi_chan: Points to the channel configuration table
+ * @lpm_chans: List of channels that require LPM notifications
+ * @irq: base irq # to request (required)
+ * @max_chan: Maximum number of channels the controller supports
+ * @total_ev_rings: Total # of event rings allocated
+ * @hw_ev_rings: Number of hardware event rings
+ * @sw_ev_rings: Number of software event rings
+ * @nr_irqs_req: Number of IRQs required to operate (optional)
+ * @nr_irqs: Number of IRQ allocated by bus master (required)
+ * @family_number: MHI controller family number
+ * @device_number: MHI controller device number
+ * @major_version: MHI controller major revision number
+ * @minor_version: MHI controller minor revision number
+ * @mhi_event: MHI event ring configurations table
+ * @mhi_cmd: MHI command ring configurations table
+ * @mhi_ctxt: MHI device context, shared memory between host and device
+ * @pm_mutex: Mutex for suspend/resume operation
+ * @pm_lock: Lock for protecting MHI power management state
+ * @timeout_ms: Timeout in ms for state transitions
+ * @pm_state: MHI power management state
+ * @db_access: DB access states
+ * @ee: MHI device execution environment
+ * @dev_state: MHI device state
+ * @dev_wake: Device wakeup count
+ * @pending_pkts: Pending packets for the controller
+ * @transition_list: List of MHI state transitions
+ * @transition_lock: Lock for protecting MHI state transition list
+ * @wlock: Lock for protecting device wakeup
+ * @mhi_link_info: Device bandwidth info
+ * @st_worker: State transition worker
+ * @fw_worker: Firmware download worker
+ * @syserr_worker: System error worker
+ * @state_event: State change event
+ * @status_cb: CB function to notify power states of the device (required)
+ * @link_status: CB function to query link status of the device (required)
+ * @wake_get: CB function to assert device wake (optional)
+ * @wake_put: CB function to de-assert device wake (optional)
+ * @wake_toggle: CB function to assert and de-assert device wake (optional)
+ * @runtime_get: CB function to controller runtime resume (required)
+ * @runtimet_put: CB function to decrement pm usage (required)
+ * @map_single: CB function to create TRE buffer
+ * @unmap_single: CB function to destroy TRE buffer
+ * @buffer_len: Bounce buffer length
+ * @bounce_buf: Use of bounce buffer
+ * @fbc_download: MHI host needs to do complete image transfer (optional)
+ * @pre_init: MHI host needs to do pre-initialization before power up
+ * @wake_set: Device wakeup set flag
+ *
+ * Fields marked as (required) need to be populated by the controller driver
+ * before calling mhi_register_controller(). For the fields marked as (optional)
+ * they can be populated depending on the usecase.
+ *
+ * The following fields are present for the purpose of implementing any device
+ * specific quirks or customizations for specific MHI revisions used in device
+ * by the controller drivers. The MHI stack will just populate these fields
+ * during mhi_register_controller():
+ * family_number
+ * device_number
+ * major_version
+ * minor_version
+ */
+struct mhi_controller {
+ struct device *cntrl_dev;
+ struct mhi_device *mhi_dev;
+ void __iomem *regs;
+ void __iomem *bhi;
+ void __iomem *bhie;
+ void __iomem *wake_db;
+
+ dma_addr_t iova_start;
+ dma_addr_t iova_stop;
+ const char *fw_image;
+ const char *edl_image;
+ size_t rddm_size;
+ size_t sbl_size;
+ size_t seg_len;
+ struct image_info *fbc_image;
+ struct image_info *rddm_image;
+ struct mhi_chan *mhi_chan;
+ struct list_head lpm_chans;
+ int *irq;
+ u32 max_chan;
+ u32 total_ev_rings;
+ u32 hw_ev_rings;
+ u32 sw_ev_rings;
+ u32 nr_irqs_req;
+ u32 nr_irqs;
+ u32 family_number;
+ u32 device_number;
+ u32 major_version;
+ u32 minor_version;
+
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_ctxt *mhi_ctxt;
+
+ struct mutex pm_mutex;
+ rwlock_t pm_lock;
+ u32 timeout_ms;
+ u32 pm_state;
+ u32 db_access;
+ enum mhi_ee_type ee;
+ enum mhi_state dev_state;
+ atomic_t dev_wake;
+ atomic_t pending_pkts;
+ struct list_head transition_list;
+ spinlock_t transition_lock;
+ spinlock_t wlock;
+ struct mhi_link_info mhi_link_info;
+ struct work_struct st_worker;
+ struct work_struct fw_worker;
+ struct work_struct syserr_worker;
+ wait_queue_head_t state_event;
+
+ void (*status_cb)(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb);
+ int (*link_status)(struct mhi_controller *mhi_cntrl);
+ void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
+ void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
+ void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
+ int (*runtime_get)(struct mhi_controller *mhi_cntrl);
+ void (*runtime_put)(struct mhi_controller *mhi_cntrl);
+ int (*map_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ void (*unmap_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+
+ size_t buffer_len;
+ bool bounce_buf;
+ bool fbc_download;
+ bool pre_init;
+ bool wake_set;
+};
+
+/**
+ * struct mhi_device - Structure representing a MHI device which binds
+ * to channels
+ * @id: Pointer to MHI device ID struct
+ * @chan_name: Name of the channel to which the device binds
+ * @mhi_cntrl: Controller the device belongs to
+ * @ul_chan: UL channel for the device
+ * @dl_chan: DL channel for the device
+ * @dev: Driver model device node for the MHI device
+ * @dev_type: MHI device type
+ * @ul_chan_id: MHI channel id for UL transfer
+ * @dl_chan_id: MHI channel id for DL transfer
+ * @dev_wake: Device wakeup counter
+ */
+struct mhi_device {
+ const struct mhi_device_id *id;
+ const char *chan_name;
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *ul_chan;
+ struct mhi_chan *dl_chan;
+ struct device dev;
+ enum mhi_device_type dev_type;
+ int ul_chan_id;
+ int dl_chan_id;
+ u32 dev_wake;
+};
+
+/**
+ * struct mhi_result - Completed buffer information
+ * @buf_addr: Address of data buffer
+ * @bytes_xferd: # of bytes transferred
+ * @dir: Channel direction
+ * @transaction_status: Status of last transaction
+ */
+struct mhi_result {
+ void *buf_addr;
+ size_t bytes_xferd;
+ enum dma_data_direction dir;
+ int transaction_status;
+};
+
+/**
+ * struct mhi_buf - MHI Buffer description
+ * @buf: Virtual address of the buffer
+ * @name: Buffer label. For offload channel, configurations name must be:
+ * ECA - Event context array data
+ * CCA - Channel context array data
+ * @dma_addr: IOMMU address of the buffer
+ * @len: # of bytes
+ */
+struct mhi_buf {
+ void *buf;
+ const char *name;
+ dma_addr_t dma_addr;
+ size_t len;
+};
+
+/**
+ * struct mhi_driver - Structure representing a MHI client driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL data transfer
+ * @dl_xfer_cb: CB function for DL data transfer
+ * @status_cb: CB functions for asynchronous status
+ * @driver: Device driver model driver
+ */
+struct mhi_driver {
+ const struct mhi_device_id *id_table;
+ int (*probe)(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_device *mhi_dev);
+ void (*ul_xfer_cb)(struct mhi_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_device *mhi_dev,
+ struct mhi_result *result);
+ void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb);
+ struct device_driver driver;
+};
+
+#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
+#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
+
+/**
+ * mhi_register_controller - Register MHI controller
+ * @mhi_cntrl: MHI controller to register
+ * @config: Configuration to use for the controller
+ */
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ struct mhi_controller_config *config);
+
+/**
+ * mhi_unregister_controller - Unregister MHI controller
+ * @mhi_cntrl: MHI controller to unregister
+ */
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl);
+
+/*
+ * module_mhi_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_driver_register() and
+ * mhi_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_driver_register, \
+ mhi_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_driver_register(mhi_drv) \
+ __mhi_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_driver_register - Register driver with MHI framework
+ * @mhi_drv: Driver associated with the device
+ * @owner: The module owner
+ */
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_driver_unregister - Unregister a driver for mhi_devices
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_driver_unregister(struct mhi_driver *mhi_drv);
+
+/**
+ * mhi_set_mhi_state - Set MHI device state
+ * @mhi_cntrl: MHI controller
+ * @state: State to set
+ */
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_state state);
+
+/**
+ * mhi_prepare_for_power_up - Do pre-initialization before power up.
+ * This is optional, call this before power up if
+ * the controller does not want bus framework to
+ * automatically free any allocated memory during
+ * shutdown process.
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_async_power_up - Start MHI power up sequence
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_sync_power_up - Start MHI power up sequence and wait till the device
+ * device enters valid EE state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_power_down - Start MHI power down sequence
+ * @mhi_cntrl: MHI controller
+ * @graceful: Link is still accessible, so do a graceful shutdown process
+ */
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
+ * mhi_unprepare_after_power_down - Free any allocated memory after power down
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_download_rddm_img - Download ramdump image from device for
+ * debugging purpose.
+ * @mhi_cntrl: MHI controller
+ * @in_panic: Download rddm image during kernel panic
+ */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
+
+/**
+ * mhi_force_rddm_mode - Force device into rddm mode
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_mhi_state - Get MHI state of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_device_get - Disable device low power mode
+ * @mhi_dev: Device associated with the channel
+ */
+void mhi_device_get(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_device_get_sync - Disable device low power mode. Synchronously
+ * take the controller out of suspended state
+ * @mhi_dev: Device associated with the channel
+ */
+int mhi_device_get_sync(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_device_put - Re-enable device low power mode
+ * @mhi_dev: Device associated with the channel
+ */
+void mhi_device_put(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_prepare_for_transfer - Setup channel for data transfer
+ * @mhi_dev: Device associated with the channels
+ */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_unprepare_from_transfer - Unprepare the channels
+ * @mhi_dev: Device associated with the channels
+ */
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_poll - Poll for any available data in DL direction
+ * @mhi_dev: Device associated with the channels
+ * @budget: # of events to process
+ */
+int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
+
+/**
+ * mhi_queue_dma - Send or receive DMA mapped buffers from client device
+ * over MHI channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @mhi_buf: Buffer for holding the DMA mapped data
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_buf - Send or receive raw buffers from client device over MHI
+ * channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @buf: Buffer for holding the data
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_skb - Send or receive SKBs from client device over MHI channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @skb: Buffer for holding SKBs
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+
+#endif /* _MHI_H_ */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 72120061b7d4..3e546cbf03dd 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -196,6 +196,14 @@ struct migrate_vma {
unsigned long npages;
unsigned long start;
unsigned long end;
+
+ /*
+ * Set to the owner value also stored in page->pgmap->owner for
+ * migrating out of device private memory. If set only device
+ * private pages with this owner are migrated. If not set
+ * device private pages are not migrated at all.
+ */
+ void *src_owner;
};
int migrate_vma_setup(struct migrate_vma *args);
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index becde6981a95..c7a93002a3c1 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -25,20 +25,31 @@
#define TEMP_MINOR 131 /* Temperature Sensor */
#define APM_MINOR_DEV 134
#define RTC_MINOR 135
-#define EFI_RTC_MINOR 136 /* EFI Time services */
+/*#define EFI_RTC_MINOR 136 was EFI Time services */
#define VHCI_MINOR 137
#define SUN_OPENPROM_MINOR 139
#define DMAPI_MINOR 140 /* unused */
#define NVRAM_MINOR 144
+#define SBUS_FLASH_MINOR 152
#define SGI_MMTIMER 153
+#define PMU_MINOR 154
#define STORE_QUEUE_MINOR 155 /* unused */
+#define LCD_MINOR 156
+#define AC_MINOR 157
+#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */
+#define NWFLASH_MINOR 160 /* MAJOR is 10 - miscdevice */
+#define ENVCTRL_MINOR 162
#define I2O_MINOR 166
+#define UCTRL_MINOR 174
#define AGPGART_MINOR 175
+#define TOSH_MINOR_DEV 181
#define HWRNG_MINOR 183
#define MICROCODE_MINOR 184
+#define KEYPAD_MINOR 185
#define IRNET_MINOR 187
#define D7S_MINOR 193
#define VFIO_MINOR 196
+#define PXA3XX_GCU_MINOR 197
#define TUN_MINOR 200
#define CUSE_MINOR 203
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
@@ -49,6 +60,7 @@
#define MISC_MCELOG_MINOR 227
#define HPET_MINOR 228
#define FUSE_MINOR 229
+#define SNAPSHOT_MINOR 231
#define KVM_MINOR 232
#define BTRFS_MINOR 234
#define AUTOFS_MINOR 235
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0e62c3db45e5..2b90097a6cf9 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1211,6 +1211,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index d143b8bd55c9..6f8f79ef829b 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -213,23 +213,6 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-struct mlx5_bfreg_info {
- u32 *sys_pages;
- int num_low_latency_bfregs;
- unsigned int *count;
-
- /*
- * protect bfreg allocation data structs
- */
- struct mutex lock;
- u32 ver;
- bool lib_uar_4k;
- u32 num_sys_pages;
- u32 num_static_sys_pages;
- u32 total_num_bfregs;
- u32 num_dyn_bfregs;
-};
-
struct mlx5_cmd_first {
__be32 data[4];
};
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index a5cf5c76f348..e2d13e074067 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -77,6 +77,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_EGRESS,
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
+ MLX5_FLOW_NAMESPACE_RDMA_TX,
};
enum {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index cc55cee3b53c..69b27c7dfc3e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -709,7 +709,7 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
- u8 reserved_at_a00[0x200];
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
@@ -879,7 +879,11 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp_csum[0x1];
u8 swp_lso[0x1];
u8 cqe_checksum_full[0x1];
- u8 reserved_at_24[0x5];
+ u8 tunnel_stateless_geneve_tx[0x1];
+ u8 tunnel_stateless_mpls_over_udp[0x1];
+ u8 tunnel_stateless_mpls_over_gre[0x1];
+ u8 tunnel_stateless_vxlan_gpe[0x1];
+ u8 tunnel_stateless_ipv4_over_vxlan[0x1];
u8 tunnel_stateless_ip_over_ip[0x1];
u8 reserved_at_2a[0x6];
u8 max_vxlan_udp_ports[0x8];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c54fb96cb1e6..e2f938c5a9d8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -27,6 +27,7 @@
#include <linux/memremap.h>
#include <linux/overflow.h>
#include <linux/sizes.h>
+#include <linux/sched.h>
struct mempolicy;
struct anon_vma;
@@ -356,10 +357,12 @@ extern unsigned int kobjsize(const void *objp);
/*
* Special vmas that are non-mergable, non-mlock()able.
- * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+/* This mask prevents VMA from being scanned with khugepaged */
+#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
+
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
@@ -378,15 +381,75 @@ extern unsigned int kobjsize(const void *objp);
*/
extern pgprot_t protection_map[16];
-#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
-#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
-#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
-#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
-#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
-#define FAULT_FLAG_TRIED 0x20 /* Second try */
-#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
-#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
-#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
+/**
+ * Fault flag definitions.
+ *
+ * @FAULT_FLAG_WRITE: Fault was a write fault.
+ * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
+ * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_sem and wait when retrying.
+ * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
+ * @FAULT_FLAG_TRIED: The fault has been tried once.
+ * @FAULT_FLAG_USER: The fault originated in userspace.
+ * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
+ * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
+ * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
+ *
+ * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
+ * whether we would allow page faults to retry by specifying these two
+ * fault flags correctly. Currently there can be three legal combinations:
+ *
+ * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
+ * this is the first try
+ *
+ * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
+ * we've already tried at least once
+ *
+ * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
+ *
+ * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
+ * be used. Note that page faults can be allowed to retry for multiple times,
+ * in which case we'll have an initial fault with flags (a) then later on
+ * continuous faults with flags (b). We should always try to detect pending
+ * signals before a retry to make sure the continuous page faults can still be
+ * interrupted if necessary.
+ */
+#define FAULT_FLAG_WRITE 0x01
+#define FAULT_FLAG_MKWRITE 0x02
+#define FAULT_FLAG_ALLOW_RETRY 0x04
+#define FAULT_FLAG_RETRY_NOWAIT 0x08
+#define FAULT_FLAG_KILLABLE 0x10
+#define FAULT_FLAG_TRIED 0x20
+#define FAULT_FLAG_USER 0x40
+#define FAULT_FLAG_REMOTE 0x80
+#define FAULT_FLAG_INSTRUCTION 0x100
+#define FAULT_FLAG_INTERRUPTIBLE 0x200
+
+/*
+ * The default fault flags that should be used by most of the
+ * arch-specific page fault handlers.
+ */
+#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
+ FAULT_FLAG_KILLABLE | \
+ FAULT_FLAG_INTERRUPTIBLE)
+
+/**
+ * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
+ *
+ * This is mostly used for places where we want to try to avoid taking
+ * the mmap_sem for too long a time when waiting for another condition
+ * to change, in which case we can try to be polite to release the
+ * mmap_sem in the first round to avoid potential starvation of other
+ * processes that would also want the mmap_sem.
+ *
+ * Return: true if the page fault allows retry and this is the first
+ * attempt of the fault handling; false otherwise.
+ */
+static inline bool fault_flag_allow_retry_first(unsigned int flags)
+{
+ return (flags & FAULT_FLAG_ALLOW_RETRY) &&
+ (!(flags & FAULT_FLAG_TRIED));
+}
#define FAULT_FLAG_TRACE \
{ FAULT_FLAG_WRITE, "WRITE" }, \
@@ -397,7 +460,8 @@ extern pgprot_t protection_map[16];
{ FAULT_FLAG_TRIED, "TRIED" }, \
{ FAULT_FLAG_USER, "USER" }, \
{ FAULT_FLAG_REMOTE, "REMOTE" }, \
- { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
+ { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
+ { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -541,6 +605,36 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
return !vma->vm_ops;
}
+static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
+{
+ int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
+
+ if (!maybe_stack)
+ return false;
+
+ if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+ VM_STACK_INCOMPLETE_SETUP)
+ return true;
+
+ return false;
+}
+
+static inline bool vma_is_foreign(struct vm_area_struct *vma)
+{
+ if (!current->mm)
+ return true;
+
+ if (current->mm != vma->vm_mm)
+ return true;
+
+ return false;
+}
+
+static inline bool vma_is_accessible(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+}
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
@@ -770,6 +864,24 @@ static inline unsigned int compound_order(struct page *page)
return page[1].compound_order;
}
+static inline bool hpage_pincount_available(struct page *page)
+{
+ /*
+ * Can the page->hpage_pinned_refcount field be used? That field is in
+ * the 3rd page of the compound page, so the smallest (2-page) compound
+ * pages cannot support it.
+ */
+ page = compound_head(page);
+ return PageCompound(page) && compound_order(page) > 1;
+}
+
+static inline int compound_pincount(struct page *page)
+{
+ VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
+ page = compound_head(page);
+ return atomic_read(compound_pincount_ptr(page));
+}
+
static inline void set_compound_order(struct page *page, unsigned int order)
{
page[1].compound_order = order;
@@ -1001,6 +1113,8 @@ static inline void get_page(struct page *page)
page_ref_inc(page);
}
+bool __must_check try_grab_page(struct page *page, unsigned int flags);
+
static inline __must_check bool try_get_page(struct page *page)
{
page = compound_head(page);
@@ -1029,29 +1143,87 @@ static inline void put_page(struct page *page)
__put_page(page);
}
-/**
- * unpin_user_page() - release a gup-pinned page
- * @page: pointer to page to be released
+/*
+ * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
+ * the page's refcount so that two separate items are tracked: the original page
+ * reference count, and also a new count of how many pin_user_pages() calls were
+ * made against the page. ("gup-pinned" is another term for the latter).
*
- * Pages that were pinned via pin_user_pages*() must be released via either
- * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
- * that eventually such pages can be separately tracked and uniquely handled. In
- * particular, interactions with RDMA and filesystems need special handling.
+ * With this scheme, pin_user_pages() becomes special: such pages are marked as
+ * distinct from normal pages. As such, the unpin_user_page() call (and its
+ * variants) must be used in order to release gup-pinned pages.
*
- * unpin_user_page() and put_page() are not interchangeable, despite this early
- * implementation that makes them look the same. unpin_user_page() calls must
- * be perfectly matched up with pin*() calls.
+ * Choice of value:
+ *
+ * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
+ * counts with respect to pin_user_pages() and unpin_user_page() becomes
+ * simpler, due to the fact that adding an even power of two to the page
+ * refcount has the effect of using only the upper N bits, for the code that
+ * counts up using the bias value. This means that the lower bits are left for
+ * the exclusive use of the original code that increments and decrements by one
+ * (or at least, by much smaller values than the bias value).
+ *
+ * Of course, once the lower bits overflow into the upper bits (and this is
+ * OK, because subtraction recovers the original values), then visual inspection
+ * no longer suffices to directly view the separate counts. However, for normal
+ * applications that don't have huge page reference counts, this won't be an
+ * issue.
+ *
+ * Locking: the lockless algorithm described in page_cache_get_speculative()
+ * and page_cache_gup_pin_speculative() provides safe operation for
+ * get_user_pages and page_mkclean and other calls that race to set up page
+ * table entries.
*/
-static inline void unpin_user_page(struct page *page)
-{
- put_page(page);
-}
+#define GUP_PIN_COUNTING_BIAS (1U << 10)
+void unpin_user_page(struct page *page);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
-
void unpin_user_pages(struct page **pages, unsigned long npages);
+/**
+ * page_maybe_dma_pinned() - report if a page is pinned for DMA.
+ *
+ * This function checks if a page has been pinned via a call to
+ * pin_user_pages*().
+ *
+ * For non-huge pages, the return value is partially fuzzy: false is not fuzzy,
+ * because it means "definitely not pinned for DMA", but true means "probably
+ * pinned for DMA, but possibly a false positive due to having at least
+ * GUP_PIN_COUNTING_BIAS worth of normal page references".
+ *
+ * False positives are OK, because: a) it's unlikely for a page to get that many
+ * refcounts, and b) all the callers of this routine are expected to be able to
+ * deal gracefully with a false positive.
+ *
+ * For huge pages, the result will be exactly correct. That's because we have
+ * more tracking data available: the 3rd struct page in the compound page is
+ * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS
+ * scheme).
+ *
+ * For more information, please see Documentation/vm/pin_user_pages.rst.
+ *
+ * @page: pointer to page to be queried.
+ * @Return: True, if it is likely that the page has been "dma-pinned".
+ * False, if the page is definitely not dma-pinned.
+ */
+static inline bool page_maybe_dma_pinned(struct page *page)
+{
+ if (hpage_pincount_available(page))
+ return compound_pincount(page) > 0;
+
+ /*
+ * page_ref_count() is signed. If that refcount overflows, then
+ * page_ref_count() returns a negative value, and callers will avoid
+ * further incrementing the refcount.
+ *
+ * Here, for that overflow case, use the signed bit to count a little
+ * bit higher via unsigned math, and thus still get an accurate result.
+ */
+ return ((unsigned int)page_ref_count(compound_head(page))) >=
+ GUP_PIN_COUNTING_BIAS;
+}
+
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif
@@ -1599,9 +1771,26 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
bool need_rmap_locks);
+
+/*
+ * Flags used by change_protection(). For now we make it a bitmap so
+ * that we can pass in multiple flags just like parameters. However
+ * for now all the callers are only use one of the flags at the same
+ * time.
+ */
+/* Whether we should allow dirty bit accounting */
+#define MM_CP_DIRTY_ACCT (1UL << 0)
+/* Whether this protection change is for NUMA hints */
+#define MM_CP_PROT_NUMA (1UL << 1)
+/* Whether this change is for write protecting */
+#define MM_CP_UFFD_WP (1UL << 2) /* do wp */
+#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */
+#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
+ MM_CP_UFFD_WP_RESOLVE)
+
extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa);
+ unsigned long cp_flags);
extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
@@ -2364,26 +2553,7 @@ struct vm_unmapped_area_info {
unsigned long align_offset;
};
-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
-
-/*
- * Search for an unmapped address range.
- *
- * We are looking for a range that:
- * - does not intersect with any VMA;
- * - is contained within the [low_limit, high_limit) interval;
- * - is at least the desired size.
- * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
- */
-static inline unsigned long
-vm_unmapped_area(struct vm_unmapped_area_info *info)
-{
- if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
- return unmapped_area_topdown(info);
- else
- return unmapped_area(info);
-}
+extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);
@@ -2867,6 +3037,23 @@ extern long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src,
unsigned int pages_per_huge_page,
bool allow_pagefault);
+
+/**
+ * vma_is_special_huge - Are transhuge page-table entries considered special?
+ * @vma: Pointer to the struct vm_area_struct to consider
+ *
+ * Whether transhuge page-table entries are considered "special" following
+ * the definition in vm_normal_page().
+ *
+ * Return: true if transhuge page-table entries should be considered special,
+ * false otherwise.
+ */
+static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
+{
+ return vma_is_dax(vma) || (vma->vm_file &&
+ (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+}
+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 6f2fef7b0784..219bef41d87c 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -6,19 +6,20 @@
#include <linux/swap.h>
/**
- * page_is_file_cache - should the page be on a file LRU or anon LRU?
+ * page_is_file_lru - should the page be on a file LRU or anon LRU?
* @page: the page to test
*
- * Returns 1 if @page is page cache page backed by a regular filesystem,
- * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
- * Used by functions that manipulate the LRU lists, to sort a page
- * onto the right LRU list.
+ * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
+ * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
+ * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
+ * functions that manipulate the LRU lists, to sort a page onto the right LRU
+ * list.
*
* We would like to get this info without a page flag, but the state
* needs to survive until the page is last deleted from the LRU, which
* could be as far down as __page_cache_release.
*/
-static inline int page_is_file_cache(struct page *page)
+static inline int page_is_file_lru(struct page *page)
{
return !PageSwapBacked(page);
}
@@ -75,7 +76,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
*/
static inline enum lru_list page_lru_base_type(struct page *page)
{
- if (page_is_file_cache(page))
+ if (page_is_file_lru(page))
return LRU_INACTIVE_FILE;
return LRU_INACTIVE_ANON;
}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c28911c3afa8..4aba6c0c2ba8 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -137,7 +137,7 @@ struct page {
};
struct { /* Second tail page of compound page */
unsigned long _compound_pad_1; /* compound_head */
- unsigned long _compound_pad_2;
+ atomic_t hpage_pinned_refcount;
/* For both global and memcg */
struct list_head deferred_list;
};
@@ -226,6 +226,11 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page)
return &page[1].compound_mapcount;
}
+static inline atomic_t *compound_pincount_ptr(struct page *page)
+{
+ return &page[2].hpage_pinned_refcount;
+}
+
/*
* Used for sizing the vmemmap region on some architectures
*/
@@ -284,8 +289,8 @@ struct vm_userfaultfd_ctx {};
#endif /* CONFIG_USERFAULTFD */
/*
- * This struct defines a memory VMM memory area. There is one of these
- * per VM-area/task. A VM area is any part of the process virtual memory
+ * This struct describes a virtual memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
*/
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 462f6873905a..e9892bf9eba9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -100,41 +100,6 @@ struct free_area {
unsigned long nr_free;
};
-/* Used for pages not on another list */
-static inline void add_to_free_area(struct page *page, struct free_area *area,
- int migratetype)
-{
- list_add(&page->lru, &area->free_list[migratetype]);
- area->nr_free++;
-}
-
-/* Used for pages not on another list */
-static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
- int migratetype)
-{
- list_add_tail(&page->lru, &area->free_list[migratetype]);
- area->nr_free++;
-}
-
-#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
-/* Used to preserve page allocation order entropy */
-void add_to_free_area_random(struct page *page, struct free_area *area,
- int migratetype);
-#else
-static inline void add_to_free_area_random(struct page *page,
- struct free_area *area, int migratetype)
-{
- add_to_free_area(page, area, migratetype);
-}
-#endif
-
-/* Used for pages which are on another list */
-static inline void move_to_free_area(struct page *page, struct free_area *area,
- int migratetype)
-{
- list_move(&page->lru, &area->free_list[migratetype]);
-}
-
static inline struct page *get_page_from_free_area(struct free_area *area,
int migratetype)
{
@@ -142,15 +107,6 @@ static inline struct page *get_page_from_free_area(struct free_area *area,
struct page, lru);
}
-static inline void del_page_from_free_area(struct page *page,
- struct free_area *area)
-{
- list_del(&page->lru);
- __ClearPageBuddy(page);
- set_page_private(page, 0);
- area->nr_free--;
-}
-
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
return list_empty(&area->free_list[migratetype]);
@@ -243,6 +199,8 @@ enum node_stat_item {
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
+ NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
+ NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
NR_VM_NODE_STAT_ITEMS
};
@@ -706,7 +664,6 @@ struct deferred_split {
* Memory statistics and page replacement data structures are maintained on a
* per-zone basis.
*/
-struct bootmem_data;
typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES];
struct zonelist node_zonelists[MAX_ZONELISTS];
@@ -1185,7 +1142,9 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+#endif
/* See declaration of similar field in struct zone */
unsigned long pageblock_flags[0];
};
@@ -1372,7 +1331,7 @@ static inline int pfn_valid(unsigned long pfn)
}
#endif
-static inline int pfn_present(unsigned long pfn)
+static inline int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
@@ -1409,7 +1368,7 @@ void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
-#define pfn_present pfn_valid
+#define pfn_in_present_section pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index f8b66d43acf6..4c2ddd0941a7 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -819,4 +819,17 @@ struct wmi_device_id {
const void *context;
};
+#define MHI_DEVICE_MODALIAS_FMT "mhi:%s"
+#define MHI_NAME_SIZE 32
+
+/**
+ * struct mhi_device_id - MHI device identification
+ * @chan: MHI channel name
+ * @driver_data: driver data;
+ */
+struct mhi_device_id {
+ const char chan[MHI_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 249e8d9bfbcd..2d1f4a61f4ac 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/uio.h>
+#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/of.h>
@@ -194,10 +195,43 @@ struct mtd_debug_info {
const char *partid;
};
+/**
+ * struct mtd_part - MTD partition specific fields
+ *
+ * @node: list node used to add an MTD partition to the parent partition list
+ * @offset: offset of the partition relatively to the parent offset
+ * @flags: original flags (before the mtdpart logic decided to tweak them based
+ * on flash constraints, like eraseblock/pagesize alignment)
+ *
+ * This struct is embedded in mtd_info and contains partition-specific
+ * properties/fields.
+ */
+struct mtd_part {
+ struct list_head node;
+ u64 offset;
+ u32 flags;
+};
+
+/**
+ * struct mtd_master - MTD master specific fields
+ *
+ * @partitions_lock: lock protecting accesses to the partition list. Protects
+ * not only the master partition list, but also all
+ * sub-partitions.
+ * @suspended: et to 1 when the device is suspended, 0 otherwise
+ *
+ * This struct is embedded in mtd_info and contains master-specific
+ * properties/fields. The master is the root MTD device from the MTD partition
+ * point of view.
+ */
+struct mtd_master {
+ struct mutex partitions_lock;
+ unsigned int suspended : 1;
+};
+
struct mtd_info {
u_char type;
uint32_t flags;
- uint32_t orig_flags; /* Flags as before running mtd checks */
uint64_t size; // Total size of the MTD
/* "Major" erase size for the device. Naïve users may take this
@@ -339,8 +373,52 @@ struct mtd_info {
int usecount;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
+
+ /*
+ * Parent device from the MTD partition point of view.
+ *
+ * MTD masters do not have any parent, MTD partitions do. The parent
+ * MTD device can itself be a partition.
+ */
+ struct mtd_info *parent;
+
+ /* List of partitions attached to this MTD device */
+ struct list_head partitions;
+
+ union {
+ struct mtd_part part;
+ struct mtd_master master;
+ };
};
+static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
+{
+ while (mtd->parent)
+ mtd = mtd->parent;
+
+ return mtd;
+}
+
+static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs)
+{
+ while (mtd->parent) {
+ ofs += mtd->part.offset;
+ mtd = mtd->parent;
+ }
+
+ return ofs;
+}
+
+static inline bool mtd_is_partition(const struct mtd_info *mtd)
+{
+ return mtd->parent;
+}
+
+static inline bool mtd_has_partitions(const struct mtd_info *mtd)
+{
+ return !list_empty(&mtd->partitions);
+}
+
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc);
int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
@@ -392,13 +470,16 @@ static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
loff_t ofs, size_t len)
{
- if (!mtd->_max_bad_blocks)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_max_bad_blocks)
return -ENOTSUPP;
if (mtd->size < (len + ofs) || ofs < 0)
return -EINVAL;
- return mtd->_max_bad_blocks(mtd, ofs, len);
+ return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
+ len);
}
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
@@ -439,8 +520,10 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
static inline void mtd_sync(struct mtd_info *mtd)
{
- if (mtd->_sync)
- mtd->_sync(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (master->_sync)
+ master->_sync(master);
}
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
@@ -452,13 +535,31 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
static inline int mtd_suspend(struct mtd_info *mtd)
{
- return mtd->_suspend ? mtd->_suspend(mtd) : 0;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (master->master.suspended)
+ return 0;
+
+ ret = master->_suspend ? master->_suspend(master) : 0;
+ if (ret)
+ return ret;
+
+ master->master.suspended = 1;
+ return 0;
}
static inline void mtd_resume(struct mtd_info *mtd)
{
- if (mtd->_resume)
- mtd->_resume(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->master.suspended)
+ return;
+
+ if (master->_resume)
+ master->_resume(master);
+
+ master->master.suspended = 0;
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -538,7 +639,9 @@ static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
static inline int mtd_has_oob(const struct mtd_info *mtd)
{
- return mtd->_read_oob && mtd->_write_oob;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return master->_read_oob && master->_write_oob;
}
static inline int mtd_type_is_nand(const struct mtd_info *mtd)
@@ -548,7 +651,9 @@ static inline int mtd_type_is_nand(const struct mtd_info *mtd)
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
- return !!mtd->_block_isbad;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return !!master->_block_isbad;
}
/* Kernel-side ioctl definitions */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 11cb0c50cd84..e545c050d3e8 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -105,7 +105,6 @@ extern void deregister_mtd_parser(struct mtd_part_parser *parser);
module_driver(__mtd_part_parser, register_mtd_parser, \
deregister_mtd_parser)
-int mtd_is_partition(const struct mtd_info *mtd);
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 4ab9bccfcde0..1e76196f9829 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1064,6 +1064,8 @@ struct nand_legacy {
* @lock: lock protecting the suspended field. Also used to
* serialize accesses to the NAND device.
* @suspended: set to 1 when the device is suspended, 0 when it's not.
+ * @suspend: [REPLACEABLE] specific NAND device suspend operation
+ * @resume: [REPLACEABLE] specific NAND device resume operation
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -1077,6 +1079,8 @@ struct nand_legacy {
* @manufacturer: [INTERN] Contains manufacturer information
* @manufacturer.desc: [INTERN] Contains manufacturer's description
* @manufacturer.priv: [INTERN] Contains manufacturer private information
+ * @lock_area: [REPLACEABLE] specific NAND chip lock operation
+ * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation
*/
struct nand_chip {
@@ -1117,6 +1121,8 @@ struct nand_chip {
struct mutex lock;
unsigned int suspended : 1;
+ int (*suspend)(struct nand_chip *chip);
+ void (*resume)(struct nand_chip *chip);
uint8_t *oob_poi;
struct nand_controller *controller;
@@ -1136,6 +1142,9 @@ struct nand_chip {
const struct nand_manufacturer *desc;
void *priv;
} manufacturer;
+
+ int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
};
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
@@ -1215,7 +1224,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
* struct nand_flash_dev - NAND Flash Device ID Structure
* @name: a human-readable name of the NAND chip
* @dev_id: the device ID (the second byte of the full chip ID array)
- * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
+ * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
* memory address as ``id[0]``)
* @dev_id: device ID part of the full chip ID array (refers the same memory
* address as ``id[1]``)
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 5abd91cc6dfa..1e2af0ec1f03 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -12,23 +12,6 @@
#include <linux/spi/spi-mem.h>
/*
- * Manufacturer IDs
- *
- * The first byte returned from the flash after sending opcode SPINOR_OP_RDID.
- * Sometimes these are the same as CFI IDs, but sometimes they aren't.
- */
-#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
-#define SNOR_MFR_GIGADEVICE 0xc8
-#define SNOR_MFR_INTEL CFI_MFR_INTEL
-#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
-#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
-#define SNOR_MFR_ISSI CFI_MFR_PMC
-#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
-#define SNOR_MFR_SPANSION CFI_MFR_AMD
-#define SNOR_MFR_SST CFI_MFR_SST
-#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */
-
-/*
* Note on opcode nomenclature: some opcodes have a format like
* SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
* of I/O lines used for the opcode, address, and data (respectively). The
@@ -128,7 +111,9 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_BP3 BIT(5) /* Block protect 3 */
#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */
+#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */
#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
/* Spansion/Cypress specific status bits */
@@ -137,6 +122,8 @@
#define SR1_QUAD_EN_BIT6 BIT(6)
+#define SR_BP_SHIFT 2
+
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
@@ -225,110 +212,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
return spi_nor_get_protocol_data_nbits(proto);
}
-enum spi_nor_option_flags {
- SNOR_F_USE_FSR = BIT(0),
- SNOR_F_HAS_SR_TB = BIT(1),
- SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
- SNOR_F_READY_XSR_RDY = BIT(3),
- SNOR_F_USE_CLSR = BIT(4),
- SNOR_F_BROKEN_RESET = BIT(5),
- SNOR_F_4B_OPCODES = BIT(6),
- SNOR_F_HAS_4BAIT = BIT(7),
- SNOR_F_HAS_LOCK = BIT(8),
- SNOR_F_HAS_16BIT_SR = BIT(9),
- SNOR_F_NO_READ_CR = BIT(10),
- SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
-
-};
-
-/**
- * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
- * @size: the size of the sector/block erased by the erase type.
- * JEDEC JESD216B imposes erase sizes to be a power of 2.
- * @size_shift: @size is a power of 2, the shift is stored in
- * @size_shift.
- * @size_mask: the size mask based on @size_shift.
- * @opcode: the SPI command op code to erase the sector/block.
- * @idx: Erase Type index as sorted in the Basic Flash Parameter
- * Table. It will be used to synchronize the supported
- * Erase Types with the ones identified in the SFDP
- * optional tables.
- */
-struct spi_nor_erase_type {
- u32 size;
- u32 size_shift;
- u32 size_mask;
- u8 opcode;
- u8 idx;
-};
-
-/**
- * struct spi_nor_erase_command - Used for non-uniform erases
- * The structure is used to describe a list of erase commands to be executed
- * once we validate that the erase can be performed. The elements in the list
- * are run-length encoded.
- * @list: for inclusion into the list of erase commands.
- * @count: how many times the same erase command should be
- * consecutively used.
- * @size: the size of the sector/block erased by the command.
- * @opcode: the SPI command op code to erase the sector/block.
- */
-struct spi_nor_erase_command {
- struct list_head list;
- u32 count;
- u32 size;
- u8 opcode;
-};
-
-/**
- * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
- * @offset: the offset in the data array of erase region start.
- * LSB bits are used as a bitmask encoding flags to
- * determine if this region is overlaid, if this region is
- * the last in the SPI NOR flash memory and to indicate
- * all the supported erase commands inside this region.
- * The erase types are sorted in ascending order with the
- * smallest Erase Type size being at BIT(0).
- * @size: the size of the region in bytes.
- */
-struct spi_nor_erase_region {
- u64 offset;
- u64 size;
-};
-
-#define SNOR_ERASE_TYPE_MAX 4
-#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
-
-#define SNOR_LAST_REGION BIT(4)
-#define SNOR_OVERLAID_REGION BIT(5)
-
-#define SNOR_ERASE_FLAGS_MAX 6
-#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
-
-/**
- * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
- * @regions: array of erase regions. The regions are consecutive in
- * address space. Walking through the regions is done
- * incrementally.
- * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
- * sector size (legacy implementation).
- * @erase_type: an array of erase types shared by all the regions.
- * The erase types are sorted in ascending order, with the
- * smallest Erase Type size being the first member in the
- * erase_type array.
- * @uniform_erase_type: bitmask encoding erase types that can erase the
- * entire memory. This member is completed at init by
- * uniform and non-uniform SPI NOR flash memories if they
- * support at least one erase type that can erase the
- * entire memory.
- */
-struct spi_nor_erase_map {
- struct spi_nor_erase_region *regions;
- struct spi_nor_erase_region uniform_region;
- struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
- u8 uniform_erase_type;
-};
-
/**
* struct spi_nor_hwcaps - Structure for describing the hardware capabilies
* supported by the SPI controller (bus master).
@@ -404,61 +287,7 @@ struct spi_nor_hwcaps {
#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \
SNOR_HWCAPS_PP_MASK)
-struct spi_nor_read_command {
- u8 num_mode_clocks;
- u8 num_wait_states;
- u8 opcode;
- enum spi_nor_protocol proto;
-};
-
-struct spi_nor_pp_command {
- u8 opcode;
- enum spi_nor_protocol proto;
-};
-
-enum spi_nor_read_command_index {
- SNOR_CMD_READ,
- SNOR_CMD_READ_FAST,
- SNOR_CMD_READ_1_1_1_DTR,
-
- /* Dual SPI */
- SNOR_CMD_READ_1_1_2,
- SNOR_CMD_READ_1_2_2,
- SNOR_CMD_READ_2_2_2,
- SNOR_CMD_READ_1_2_2_DTR,
-
- /* Quad SPI */
- SNOR_CMD_READ_1_1_4,
- SNOR_CMD_READ_1_4_4,
- SNOR_CMD_READ_4_4_4,
- SNOR_CMD_READ_1_4_4_DTR,
-
- /* Octal SPI */
- SNOR_CMD_READ_1_1_8,
- SNOR_CMD_READ_1_8_8,
- SNOR_CMD_READ_8_8_8,
- SNOR_CMD_READ_1_8_8_DTR,
-
- SNOR_CMD_READ_MAX
-};
-
-enum spi_nor_pp_command_index {
- SNOR_CMD_PP,
-
- /* Quad SPI */
- SNOR_CMD_PP_1_1_4,
- SNOR_CMD_PP_1_4_4,
- SNOR_CMD_PP_4_4_4,
-
- /* Octal SPI */
- SNOR_CMD_PP_1_1_8,
- SNOR_CMD_PP_1_8_8,
- SNOR_CMD_PP_8_8_8,
-
- SNOR_CMD_PP_MAX
-};
-
-/* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */
+/* Forward declaration that is used in 'struct spi_nor_controller_ops' */
struct spi_nor;
/**
@@ -489,68 +318,13 @@ struct spi_nor_controller_ops {
int (*erase)(struct spi_nor *nor, loff_t offs);
};
-/**
- * struct spi_nor_locking_ops - SPI NOR locking methods
- * @lock: lock a region of the SPI NOR.
- * @unlock: unlock a region of the SPI NOR.
- * @is_locked: check if a region of the SPI NOR is completely locked
- */
-struct spi_nor_locking_ops {
- int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
-};
-
-/**
- * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings.
- * Includes legacy flash parameters and settings that can be overwritten
- * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216
- * Serial Flash Discoverable Parameters (SFDP) tables.
- *
- * @size: the flash memory density in bytes.
- * @page_size: the page size of the SPI NOR flash memory.
- * @hwcaps: describes the read and page program hardware
- * capabilities.
- * @reads: read capabilities ordered by priority: the higher index
- * in the array, the higher priority.
- * @page_programs: page program capabilities ordered by priority: the
- * higher index in the array, the higher priority.
- * @erase_map: the erase map parsed from the SFDP Sector Map Parameter
- * Table.
- * @quad_enable: enables SPI NOR quad mode.
- * @set_4byte: puts the SPI NOR in 4 byte addressing mode.
- * @convert_addr: converts an absolute address into something the flash
- * will understand. Particularly useful when pagesize is
- * not a power-of-2.
- * @setup: configures the SPI NOR memory. Useful for SPI NOR
- * flashes that have peculiarities to the SPI NOR standard
- * e.g. different opcodes, specific address calculation,
- * page size, etc.
- * @locking_ops: SPI NOR locking methods.
- */
-struct spi_nor_flash_parameter {
- u64 size;
- u32 page_size;
-
- struct spi_nor_hwcaps hwcaps;
- struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
- struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
-
- struct spi_nor_erase_map erase_map;
-
- int (*quad_enable)(struct spi_nor *nor);
- int (*set_4byte)(struct spi_nor *nor, bool enable);
- u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
- int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
-
- const struct spi_nor_locking_ops *locking_ops;
-};
-
-/**
- * struct flash_info - Forward declaration of a structure used internally by
- * spi_nor_scan()
+/*
+ * Forward declarations that are used internally by the core and manufacturer
+ * drivers.
*/
struct flash_info;
+struct spi_nor_manufacturer;
+struct spi_nor_flash_parameter;
/**
* struct spi_nor - Structure for defining a the SPI NOR layer
@@ -562,6 +336,7 @@ struct flash_info;
* layer is not DMA-able
* @bouncebuf_size: size of the bounce buffer
* @info: spi-nor part JDEC MFR id and other info
+ * @manufacturer: spi-nor manufacturer
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
@@ -578,6 +353,7 @@ struct flash_info;
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
* hooks, or dynamically when parsing the SFDP tables.
+ * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes.
* @priv: the private data
*/
struct spi_nor {
@@ -588,6 +364,7 @@ struct spi_nor {
u8 *bouncebuf;
size_t bouncebuf_size;
const struct flash_info *info;
+ const struct spi_nor_manufacturer *manufacturer;
u32 page_size;
u8 addr_width;
u8 erase_opcode;
@@ -602,40 +379,16 @@ struct spi_nor {
const struct spi_nor_controller_ops *controller_ops;
- struct spi_nor_flash_parameter params;
+ struct spi_nor_flash_parameter *params;
+
+ struct {
+ struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc;
+ } dirmap;
void *priv;
};
-static u64 __maybe_unused
-spi_nor_region_is_last(const struct spi_nor_erase_region *region)
-{
- return region->offset & SNOR_LAST_REGION;
-}
-
-static u64 __maybe_unused
-spi_nor_region_end(const struct spi_nor_erase_region *region)
-{
- return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
-}
-
-static void __maybe_unused
-spi_nor_region_mark_end(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_LAST_REGION;
-}
-
-static void __maybe_unused
-spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
-{
- region->offset |= SNOR_OVERLAID_REGION;
-}
-
-static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor)
-{
- return !!nor->params.erase_map.uniform_erase_type;
-}
-
static inline void spi_nor_set_flash_node(struct spi_nor *nor,
struct device_node *np)
{
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 4ea558bd3c46..1077c45721ff 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -32,9 +32,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_READID_OP(ndummy, buf, len) \
+#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
- SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_ADDR(naddr, 0, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
@@ -176,37 +176,46 @@ struct spinand_device;
* @data: buffer containing the id bytes. Currently 4 bytes large, but can
* be extended if required
* @len: ID length
- *
- * struct_spinand_id->data contains all bytes returned after a READ_ID command,
- * including dummy bytes if the chip does not emit ID bytes right after the
- * READ_ID command. The responsibility to extract real ID bytes is left to
- * struct_manufacurer_ops->detect().
*/
struct spinand_id {
u8 data[SPINAND_MAX_ID_LEN];
int len;
};
+enum spinand_readid_method {
+ SPINAND_READID_METHOD_OPCODE,
+ SPINAND_READID_METHOD_OPCODE_ADDR,
+ SPINAND_READID_METHOD_OPCODE_DUMMY,
+};
+
+/**
+ * struct spinand_devid - SPI NAND device id structure
+ * @id: device id of current chip
+ * @len: number of bytes in device id
+ * @method: method to read chip id
+ * There are 3 possible variants:
+ * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately
+ * after read_id opcode.
+ * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after
+ * read_id opcode + 1-byte address.
+ * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after
+ * read_id opcode + 1 dummy byte.
+ */
+struct spinand_devid {
+ const u8 *id;
+ const u8 len;
+ const enum spinand_readid_method method;
+};
+
/**
* struct manufacurer_ops - SPI NAND manufacturer specific operations
- * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
- * the core calls the struct_manufacurer_ops->detect() hook of each
- * registered manufacturer until one of them return 1. Note that
- * the first thing to check in this hook is that the manufacturer ID
- * in struct_spinand_device->id matches the manufacturer whose
- * ->detect() hook has been called. Should return 1 if there's a
- * match, 0 if the manufacturer ID does not match and a negative
- * error code otherwise. When true is returned, the core assumes
- * that properties of the NAND chip (spinand->base.memorg and
- * spinand->base.eccreq) have been filled
* @init: initialize a SPI NAND device
* @cleanup: cleanup a SPI NAND device
*
* Each SPI NAND manufacturer driver should implement this interface so that
- * NAND chips coming from this vendor can be detected and initialized properly.
+ * NAND chips coming from this vendor can be initialized properly.
*/
struct spinand_manufacturer_ops {
- int (*detect)(struct spinand_device *spinand);
int (*init)(struct spinand_device *spinand);
void (*cleanup)(struct spinand_device *spinand);
};
@@ -215,11 +224,16 @@ struct spinand_manufacturer_ops {
* struct spinand_manufacturer - SPI NAND manufacturer instance
* @id: manufacturer ID
* @name: manufacturer name
+ * @devid_len: number of bytes in device ID
+ * @chips: supported SPI NANDs under current manufacturer
+ * @nchips: number of SPI NANDs available in chips array
* @ops: manufacturer operations
*/
struct spinand_manufacturer {
u8 id;
char *name;
+ const struct spinand_info *chips;
+ const size_t nchips;
const struct spinand_manufacturer_ops *ops;
};
@@ -270,6 +284,7 @@ struct spinand_ecc_info {
};
#define SPINAND_HAS_QE_BIT BIT(0)
+#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
/**
* struct spinand_info - Structure used to describe SPI NAND chips
@@ -291,7 +306,7 @@ struct spinand_ecc_info {
*/
struct spinand_info {
const char *model;
- u16 devid;
+ struct spinand_devid devid;
u32 flags;
struct nand_memory_organization memorg;
struct nand_ecc_req eccreq;
@@ -305,6 +320,13 @@ struct spinand_info {
unsigned int target);
};
+#define SPINAND_ID(__method, ...) \
+ { \
+ .id = (const u8[]){ __VA_ARGS__ }, \
+ .len = sizeof((u8[]){ __VA_ARGS__ }), \
+ .method = __method, \
+ }
+
#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
{ \
.read_cache = __read, \
@@ -451,9 +473,10 @@ static inline void spinand_set_of_node(struct spinand_device *spinand,
nanddev_set_of_node(&spinand->base, np);
}
-int spinand_match_and_init(struct spinand_device *dev,
+int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *table,
- unsigned int table_size, u16 devid);
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 0dd980d7318f..a4bb992623c4 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -15,7 +15,7 @@ enum { MAX_NESTED_LINKS = 8 };
/*
* Type of the last component on LOOKUP_PARENT
*/
-enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
+enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
/* pathwalk mode */
#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */
@@ -23,6 +23,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */
#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */
#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */
+#define LOOKUP_MOUNTPOINT 0x0080 /* follow mounts in the end */
#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */
#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */
@@ -64,7 +65,6 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne
extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
-extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 5d5b91e54f73..73eda45f1cfd 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -354,6 +354,7 @@ static inline unsigned long nfs_save_change_attribute(struct inode *dir)
extern int nfs_sync_mapping(struct address_space *mapping);
extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping);
extern void nfs_zap_caches(struct inode *);
+extern void nfs_set_inode_stale(struct inode *inode);
extern void nfs_invalidate_atime(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
struct nfs_fattr *, struct nfs4_label *);
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 0bbd587fac6a..c32c15216da3 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -139,9 +139,14 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
+extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
+extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern int nfs_page_set_headlock(struct nfs_page *req);
+extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
/*
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 6838c149f335..440230488025 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1266,16 +1266,25 @@ struct nfstime4 {
struct pnfs_commit_bucket {
struct list_head written;
struct list_head committing;
- struct pnfs_layout_segment *wlseg;
- struct pnfs_layout_segment *clseg;
+ struct pnfs_layout_segment *lseg;
struct nfs_writeverf direct_verf;
};
+struct pnfs_commit_array {
+ struct list_head cinfo_list;
+ struct list_head lseg_list;
+ struct pnfs_layout_segment *lseg;
+ struct rcu_head rcu;
+ refcount_t refcount;
+ unsigned int nbuckets;
+ struct pnfs_commit_bucket buckets[];
+};
+
struct pnfs_ds_commit_info {
- int nwritten;
- int ncommitting;
- int nbuckets;
- struct pnfs_commit_bucket *buckets;
+ struct list_head commits;
+ unsigned int nwritten;
+ unsigned int ncommitting;
+ const struct pnfs_commit_ops *ops;
};
struct nfs41_state_protection {
@@ -1386,22 +1395,11 @@ struct nfs41_free_stateid_res {
unsigned int status;
};
-static inline void
-nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
-{
- kfree(cinfo->buckets);
-}
-
#else
struct pnfs_ds_commit_info {
};
-static inline void
-nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
-{
-}
-
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index d3776be48c53..1b311d27c9b8 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -63,6 +63,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
+int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val);
/* direct nvmem device read/write interface */
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
@@ -138,6 +139,12 @@ static inline int nvmem_cell_read_u32(struct device *dev,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_u64(struct device *dev,
+ const char *cell_id, u64 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index eac7ab109df4..763022ed3456 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -10,21 +10,27 @@ struct of_pci_range_parser {
struct device_node *node;
const __be32 *range;
const __be32 *end;
- int np;
+ int na;
+ int ns;
int pna;
bool dma;
};
+#define of_range_parser of_pci_range_parser
struct of_pci_range {
- u32 pci_space;
- u64 pci_addr;
+ union {
+ u64 pci_addr;
+ u64 bus_addr;
+ };
u64 cpu_addr;
u64 size;
u32 flags;
};
+#define of_range of_pci_range
#define for_each_of_pci_range(parser, range) \
for (; of_pci_range_parser_one(parser, range);)
+#define for_each_of_range for_each_of_pci_range
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
@@ -143,4 +149,3 @@ static inline int of_pci_range_to_resource(struct of_pci_range *range,
#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
-
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 16967390a3fe..f821095218b0 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -11,9 +11,8 @@
#define __LINUX_OF_GPIO_H
#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio.h> /* FIXME: Shouldn't be here */
#include <linux/of.h>
struct device_node;
@@ -34,6 +33,8 @@ enum of_gpio_flags {
#ifdef CONFIG_OF_GPIO
+#include <linux/kernel.h>
+
/*
* OF GPIO chip for memory mapped banks
*/
@@ -63,6 +64,8 @@ extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
#else /* CONFIG_OF_GPIO */
+#include <linux/errno.h>
+
/* Drivers may not strictly depend on the GPIO support, so let them link. */
static inline int of_get_named_gpio_flags(struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 77de28bfefb0..222f6f7b2bb3 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -63,6 +63,11 @@
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
* to become unlocked.
*
+ * PG_swapbacked is set when a page uses swap as a backing storage. This are
+ * usually PageAnon or shmem pages but please note that even anonymous pages
+ * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
+ * a result of MADV_FREE).
+ *
* PG_uptodate tells whether the page's contents is valid. When a read
* completes, the page becomes uptodate, unless a disk I/O error happened.
*
@@ -163,6 +168,9 @@ enum pageflags {
/* non-lru isolated movable page */
PG_isolated = PG_reclaim,
+
+ /* Only valid for buddy pages. Used to track pages that are reported */
+ PG_reported = PG_uptodate,
};
#ifndef __GENERATING_BOUNDS_H
@@ -432,6 +440,14 @@ PAGEFLAG(Idle, idle, PF_ANY)
#endif
/*
+ * PageReported() is used to track reported free pages within the Buddy
+ * allocator. We can use the non-atomic version of the test and set
+ * operations as both should be shielded with the zone lock to prevent
+ * any possible races on the setting or clearing of the bit.
+ */
+__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
+
+/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 14d14beb1f7f..d27701199a4d 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -102,6 +102,15 @@ static inline void page_ref_sub(struct page *page, int nr)
__page_ref_mod(page, -nr);
}
+static inline int page_ref_sub_return(struct page *page, int nr)
+{
+ int ret = atomic_sub_return(nr, &page->_refcount);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, -nr, ret);
+ return ret;
+}
+
static inline void page_ref_inc(struct page *page)
{
atomic_inc(&page->_refcount);
diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h
new file mode 100644
index 000000000000..3b99e0ec24f2
--- /dev/null
+++ b/include/linux/page_reporting.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PAGE_REPORTING_H
+#define _LINUX_PAGE_REPORTING_H
+
+#include <linux/mmzone.h>
+#include <linux/scatterlist.h>
+
+/* This value should always be a power of 2, see page_reporting_cycle() */
+#define PAGE_REPORTING_CAPACITY 32
+
+struct page_reporting_dev_info {
+ /* function that alters pages to make them "reported" */
+ int (*report)(struct page_reporting_dev_info *prdev,
+ struct scatterlist *sg, unsigned int nents);
+
+ /* work struct for processing reports */
+ struct delayed_work work;
+
+ /* Current state of page reporting */
+ atomic_t state;
+};
+
+/* Tear-down and bring-up for page reporting devices */
+void page_reporting_unregister(struct page_reporting_dev_info *prdev);
+int page_reporting_register(struct page_reporting_dev_info *prdev);
+#endif /*_LINUX_PAGE_REPORTING_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ccb14b6a16b5..a8f7bd8ea1c6 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -33,8 +33,8 @@ enum mapping_flags {
/**
* mapping_set_error - record a writeback error in the address_space
- * @mapping - the mapping in which an error should be set
- * @error - the error to set in the mapping
+ * @mapping: the mapping in which an error should be set
+ * @error: the error to set in the mapping
*
* When writeback fails in some way, we must record that error so that
* userspace can be informed when fsync and the like are called. We endeavor
@@ -70,11 +70,9 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
-static inline int mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(struct address_space *mapping)
{
- if (mapping)
- return test_bit(AS_UNEVICTABLE, &mapping->flags);
- return !!mapping;
+ return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
static inline void mapping_set_exiting(struct address_space *mapping)
@@ -305,9 +303,9 @@ static inline struct page *find_lock_page(struct address_space *mapping,
* atomic allocation!
*/
static inline struct page *find_or_create_page(struct address_space *mapping,
- pgoff_t offset, gfp_t gfp_mask)
+ pgoff_t index, gfp_t gfp_mask)
{
- return pagecache_get_page(mapping, offset,
+ return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
gfp_mask);
}
@@ -333,14 +331,17 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
-static inline struct page *find_subpage(struct page *page, pgoff_t offset)
+/*
+ * Given the page we found in the page cache, return the page corresponding
+ * to this index in the file
+ */
+static inline struct page *find_subpage(struct page *head, pgoff_t index)
{
- if (PageHuge(page))
- return page;
-
- VM_BUG_ON_PAGE(PageTail(page), page);
+ /* HugeTLBfs wants the head page regardless */
+ if (PageHuge(head))
+ return head;
- return page + (offset & (compound_nr(page) - 1));
+ return head + (index & (hpage_nr_pages(head) - 1));
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 62b7fdcc661c..2d155bfb8fbf 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -112,6 +112,14 @@ extern const guid_t pci_acpi_dsm_guid;
#define RESET_DELAY_DSM 0x08
#define FUNCTION_DELAY_DSM 0x09
+#ifdef CONFIG_PCIE_EDR
+void pci_acpi_add_edr_notifier(struct pci_dev *pdev);
+void pci_acpi_remove_edr_notifier(struct pci_dev *pdev);
+#else
+static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { }
+static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { }
+#endif /* CONFIG_PCIE_EDR */
+
#else /* CONFIG_ACPI */
static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 56f1846b9d39..e0ed9d01f6e5 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -53,7 +53,8 @@ struct pci_epc_ops {
phys_addr_t addr);
int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
int (*get_msi)(struct pci_epc *epc, u8 func_no);
- int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts,
+ enum pci_barno, u32 offset);
int (*get_msix)(struct pci_epc *epc, u8 func_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
@@ -71,6 +72,7 @@ struct pci_epc_ops {
* @bitmap: bitmap to manage the PCI address space
* @pages: number of bits representing the address region
* @page_size: size of each page
+ * @lock: mutex to protect bitmap
*/
struct pci_epc_mem {
phys_addr_t phys_base;
@@ -78,6 +80,8 @@ struct pci_epc_mem {
unsigned long *bitmap;
size_t page_size;
int pages;
+ /* mutex to protect against concurrent access for memory allocation*/
+ struct mutex lock;
};
/**
@@ -88,7 +92,9 @@ struct pci_epc_mem {
* @mem: address space of the endpoint controller
* @max_functions: max number of functions that can be configured in this EPC
* @group: configfs group representing the PCI EPC device
- * @lock: spinlock to protect pci_epc ops
+ * @lock: mutex to protect pci_epc ops
+ * @function_num_map: bitmap to manage physical function number
+ * @notifier: used to notify EPF of any EPC events (like linkup)
*/
struct pci_epc {
struct device dev;
@@ -97,8 +103,10 @@ struct pci_epc {
struct pci_epc_mem *mem;
u8 max_functions;
struct config_group *group;
- /* spinlock to protect against concurrent access of EP controller */
- spinlock_t lock;
+ /* mutex to protect against concurrent access of EP controller */
+ struct mutex lock;
+ unsigned long function_num_map;
+ struct atomic_notifier_head notifier;
};
/**
@@ -113,6 +121,7 @@ struct pci_epc {
*/
struct pci_epc_features {
unsigned int linkup_notifier : 1;
+ unsigned int core_init_notifier : 1;
unsigned int msi_capable : 1;
unsigned int msix_capable : 1;
u8 reserved_bar;
@@ -141,6 +150,12 @@ static inline void *epc_get_drvdata(struct pci_epc *epc)
return dev_get_drvdata(&epc->dev);
}
+static inline int
+pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&epc->notifier, nb);
+}
+
struct pci_epc *
__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
@@ -151,6 +166,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_init_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr);
@@ -165,7 +181,8 @@ void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr);
int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
+ enum pci_barno, u32 offset);
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 2d6f07556682..6644ff3b0702 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -15,6 +15,11 @@
struct pci_epf;
+enum pci_notify_event {
+ CORE_INIT,
+ LINK_UP,
+};
+
enum pci_barno {
BAR_0,
BAR_1,
@@ -55,13 +60,10 @@ struct pci_epf_header {
* @bind: ops to perform when a EPC device has been bound to EPF device
* @unbind: ops to perform when a binding has been lost between a EPC device
* and EPF device
- * @linkup: ops to perform when the EPC device has established a connection with
- * a host system
*/
struct pci_epf_ops {
int (*bind)(struct pci_epf *epf);
void (*unbind)(struct pci_epf *epf);
- void (*linkup)(struct pci_epf *epf);
};
/**
@@ -92,10 +94,12 @@ struct pci_epf_driver {
/**
* struct pci_epf_bar - represents the BAR of EPF device
* @phys_addr: physical address that should be mapped to the BAR
+ * @addr: virtual address corresponding to the @phys_addr
* @size: the size of the address space present in BAR
*/
struct pci_epf_bar {
dma_addr_t phys_addr;
+ void *addr;
size_t size;
enum pci_barno barno;
int flags;
@@ -112,6 +116,8 @@ struct pci_epf_bar {
* @epc: the EPC device to which this EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
+ * @nb: notifier block to notify EPF of any EPC events (like linkup)
+ * @lock: mutex to protect pci_epf_ops
*/
struct pci_epf {
struct device dev;
@@ -125,6 +131,22 @@ struct pci_epf {
struct pci_epc *epc;
struct pci_epf_driver *driver;
struct list_head list;
+ struct notifier_block nb;
+ /* mutex to protect against concurrent access of pci_epf_ops */
+ struct mutex lock;
+};
+
+/**
+ * struct pci_epf_msix_tbl - represents the MSIX table entry structure
+ * @msg_addr: Writes to this address will trigger MSIX interrupt in host
+ * @msg_data: Data that should be written to @msg_addr to trigger MSIX interrupt
+ * @vector_ctrl: Identifies if the function is prohibited from sending a message
+ * using this MSIX table entry
+ */
+struct pci_epf_msix_tbl {
+ u64 msg_addr;
+ u32 msg_data;
+ u32 vector_ctrl;
};
#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
@@ -154,5 +176,4 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
-void pci_epf_linkup(struct pci_epf *epf);
#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a048fba311d2..83ce1cdf5676 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -243,7 +243,7 @@ enum pcie_link_width {
PCIE_LNK_WIDTH_UNKNOWN = 0xff,
};
-/* Based on the PCI Hotplug Spec, but some values are made up by us */
+/* See matching string table in pci_speed_string() */
enum pci_bus_speed {
PCI_SPEED_33MHz = 0x00,
PCI_SPEED_66MHz = 0x01,
@@ -451,6 +451,11 @@ struct pci_dev {
const struct attribute_group **msi_irq_groups;
#endif
struct pci_vpd *vpd;
+#ifdef CONFIG_PCIE_DPC
+ u16 dpc_cap;
+ unsigned int dpc_rp_extensions:1;
+ u8 dpc_rp_log_size;
+#endif
#ifdef CONFIG_PCI_ATS
union {
struct pci_sriov *sriov; /* PF: SR-IOV info */
@@ -517,7 +522,9 @@ struct pci_host_bridge {
unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
unsigned int native_pme:1; /* OS may use PCIe PME */
unsigned int native_ltr:1; /* OS may use PCIe LTR */
+ unsigned int native_dpc:1; /* OS may use PCIe DPC */
unsigned int preserve_config:1; /* Preserve FW resource setup */
+ unsigned int size_windows:1; /* Enable root bus sizing */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
@@ -1224,7 +1231,6 @@ int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
-void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
/* Power management related routines */
int pci_save_state(struct pci_dev *dev);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 977e66875a96..1dfc4e1dcb94 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2585,6 +2585,8 @@
#define PCI_VENDOR_ID_AMAZON 0x1d0f
+#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
+
#define PCI_VENDOR_ID_HYGON 0x1d94
#define PCI_VENDOR_ID_HXT 0x1dbf
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 4f052496cdfd..0a4f54dd4737 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
- s64 ret = fbc->count;
+ /* Prevent reloads of fbc->count */
+ s64 ret = READ_ONCE(fbc->count);
- barrier(); /* Prevent reloads of fbc->count */
if (ret >= 0)
return ret;
return 0;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8768a39b5258..9c3e7619c929 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1020,6 +1020,7 @@ struct perf_sample_data {
u64 stack_user_size;
u64 phys_addr;
+ u64 cgroup;
} ____cacheline_aligned;
/* default value for data source */
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 998ae7d24450..01a0d4e28506 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -62,6 +62,7 @@ struct pid
unsigned int level;
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
+ struct hlist_head inodes;
/* wait queue for pidfd notifications */
wait_queue_head_t wait_pidfd;
struct rcu_head rcu;
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 2ed6af88794b..4956e362e55e 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -33,7 +33,6 @@ struct pid_namespace {
unsigned int level;
struct pid_namespace *parent;
#ifdef CONFIG_PROC_FS
- struct vfsmount *proc_mnt;
struct dentry *proc_self;
struct dentry *proc_thread_self;
#endif
@@ -42,7 +41,6 @@ struct pid_namespace {
#endif
struct user_namespace *user_ns;
struct ucounts *ucounts;
- struct work_struct proc_work;
kgid_t pid_gid;
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 7ce23450a1cb..2aef59df93d7 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -186,7 +186,7 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
const char *pin_group, const unsigned **pins,
unsigned *num_pins);
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
#else
static inline
diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h
index dd5971937a64..644af1d89cfa 100644
--- a/include/linux/platform_data/pm33xx.h
+++ b/include/linux/platform_data/pm33xx.h
@@ -46,9 +46,13 @@ struct am33xx_pm_sram_addr {
};
struct am33xx_pm_platform_data {
- int (*init)(void);
+ int (*init)(int (*idle)(u32 wfi_flags));
+ int (*deinit)(void);
int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long),
unsigned long args);
+ int (*cpu_suspend)(int (*fn)(unsigned long), unsigned long args);
+ void (*begin_suspend)(void);
+ void (*finish_suspend)(void);
struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
void __iomem *(*get_rtc_base_addr)(void);
void (*save_context)(void);
diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h
deleted file mode 100644
index 7e3a16097672..000000000000
--- a/include/linux/platform_data/remoteproc-omap.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Remote Processor - omap-specific bits
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- * Copyright (C) 2011 Google, Inc.
- */
-
-#ifndef _PLAT_REMOTEPROC_H
-#define _PLAT_REMOTEPROC_H
-
-struct rproc_ops;
-struct platform_device;
-
-/*
- * struct omap_rproc_pdata - omap remoteproc's platform data
- * @name: the remoteproc's name
- * @oh_name: omap hwmod device
- * @oh_name_opt: optional, secondary omap hwmod device
- * @firmware: name of firmware file to load
- * @mbox_name: name of omap mailbox device to use with this rproc
- * @ops: start/stop rproc handlers
- * @device_enable: omap-specific handler for enabling a device
- * @device_shutdown: omap-specific handler for shutting down a device
- * @set_bootaddr: omap-specific handler for setting the rproc boot address
- */
-struct omap_rproc_pdata {
- const char *name;
- const char *oh_name;
- const char *oh_name_opt;
- const char *firmware;
- const char *mbox_name;
- const struct rproc_ops *ops;
- int (*device_enable)(struct platform_device *pdev);
- int (*device_shutdown)(struct platform_device *pdev);
- void (*set_bootaddr)(u32);
-};
-
-#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE)
-
-void __init omap_rproc_reserve_cma(void);
-
-#else
-
-static inline void __init omap_rproc_reserve_cma(void)
-{
-}
-
-#endif
-
-#endif /* _PLAT_REMOTEPROC_H */
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index 4f733a411d18..ca8337695c2a 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -10,7 +10,7 @@
#include <drm/drm_fourcc.h>
#include <linux/fb.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
/* format array, use it to initialize a "struct simplefb_format" array */
#define SIMPLEFB_FORMATS \
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 2cbde6542849..c59999ce044e 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -17,6 +17,7 @@ enum ti_sysc_module_type {
TI_SYSC_OMAP4_MCASP,
TI_SYSC_OMAP4_USB_HOST_FS,
TI_SYSC_DRA7_MCAN,
+ TI_SYSC_PRUSS,
};
struct ti_sysc_cookie {
@@ -49,6 +50,9 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_MODULE_QUIRK_PRUSS BIT(24)
+#define SYSC_MODULE_QUIRK_DSS_RESET BIT(23)
+#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22)
#define SYSC_QUIRK_CLKDM_NOAUTO BIT(21)
#define SYSC_QUIRK_FORCE_MSTANDBY BIT(20)
#define SYSC_MODULE_QUIRK_AESS BIT(19)
@@ -141,6 +145,7 @@ struct clk;
struct ti_sysc_platform_data {
struct of_dev_auxdata *auxdata;
+ bool (*soc_type_gp)(void);
int (*init_clockdomain)(struct device *dev, struct clk *fck,
struct clk *ick, struct ti_sysc_cookie *cookie);
void (*clkdm_deny_idle)(struct device *dev,
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index ad19e68e1fc3..ae94dcebd936 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -248,7 +248,7 @@ struct charger_manager {
u64 charging_end_time;
};
-#ifdef CONFIG_CHARGER_MANAGER
+#if IS_ENABLED(CONFIG_CHARGER_MANAGER)
extern void cm_notify_event(struct power_supply *psy,
enum cm_event_types type, char *msg);
#else
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 3dfa92633af3..45c05fd9c99d 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -5,6 +5,7 @@
#ifndef _LINUX_PROC_FS_H
#define _LINUX_PROC_FS_H
+#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/fs.h>
@@ -12,7 +13,21 @@ struct proc_dir_entry;
struct seq_file;
struct seq_operations;
+enum {
+ /*
+ * All /proc entries using this ->proc_ops instance are never removed.
+ *
+ * If in doubt, ignore this flag.
+ */
+#ifdef MODULE
+ PROC_ENTRY_PERMANENT = 0U,
+#else
+ PROC_ENTRY_PERMANENT = 1U << 0,
+#endif
+};
+
struct proc_ops {
+ unsigned int proc_flags;
int (*proc_open)(struct inode *, struct file *);
ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *);
ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *);
@@ -25,14 +40,14 @@ struct proc_ops {
#endif
int (*proc_mmap)(struct file *, struct vm_area_struct *);
unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
-};
+} __randomize_layout;
#ifdef CONFIG_PROC_FS
typedef int (*proc_write_t)(struct file *, char *, size_t);
extern void proc_root_init(void);
-extern void proc_flush_task(struct task_struct *);
+extern void proc_flush_pid(struct pid *);
extern struct proc_dir_entry *proc_symlink(const char *,
struct proc_dir_entry *, const char *);
@@ -105,7 +120,7 @@ static inline void proc_root_init(void)
{
}
-static inline void proc_flush_task(struct task_struct *task)
+static inline void proc_flush_pid(struct pid *pid)
{
}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index adff08bfecf9..6abe85c34681 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -50,16 +50,11 @@ enum {
#ifdef CONFIG_PROC_FS
-extern int pid_ns_prepare_proc(struct pid_namespace *ns);
-extern void pid_ns_release_proc(struct pid_namespace *ns);
extern int proc_alloc_inum(unsigned int *pino);
extern void proc_free_inum(unsigned int inum);
#else /* CONFIG_PROC_FS */
-static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; }
-static inline void pid_ns_release_proc(struct pid_namespace *ns) {}
-
static inline int proc_alloc_inum(unsigned int *inum)
{
*inum = 1;
diff --git a/include/linux/random.h b/include/linux/random.h
index d319f9a1e429..45e1f8fa742b 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -7,6 +7,8 @@
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/once.h>
@@ -185,6 +187,26 @@ static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
}
#endif
+/*
+ * Called from the boot CPU during startup; not valid to call once
+ * secondary CPUs are up and preemption is possible.
+ */
+#ifndef arch_get_random_seed_long_early
+static inline bool __init arch_get_random_seed_long_early(unsigned long *v)
+{
+ WARN_ON(system_state != SYSTEM_BOOTING);
+ return arch_get_random_seed_long(v);
+}
+#endif
+
+#ifndef arch_get_random_long_early
+static inline bool __init arch_get_random_long_early(unsigned long *v)
+{
+ WARN_ON(system_state != SYSTEM_BOOTING);
+ return arch_get_random_long(v);
+}
+#endif
+
/* Pseudo random number generator from numerical recipes. */
static inline u32 next_pseudo_random32(u32 seed)
{
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 16ad66683ad0..9c07d7958c53 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -329,7 +329,7 @@ struct rproc;
struct rproc_mem_entry {
void *va;
dma_addr_t dma;
- int len;
+ size_t len;
u32 da;
void *priv;
char name[32];
@@ -369,12 +369,14 @@ enum rsc_handling_status {
* expects to find it
* @sanity_check: sanity check the fw image
* @get_boot_addr: get boot address to entry point specified in firmware
+ * @panic: optional callback to react to system panic, core will delay
+ * panic at least the returned number of milliseconds
*/
struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
- void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
+ void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len);
int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc,
int offset, int avail);
@@ -382,7 +384,8 @@ struct rproc_ops {
struct rproc *rproc, const struct firmware *fw);
int (*load)(struct rproc *rproc, const struct firmware *fw);
int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
- u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
+ u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
+ unsigned long (*panic)(struct rproc *rproc);
};
/**
@@ -498,7 +501,7 @@ struct rproc {
int num_traces;
struct list_head carveouts;
struct list_head mappings;
- u32 bootaddr;
+ u64 bootaddr;
struct list_head rvdevs;
struct list_head subdevs;
struct idr notifyids;
@@ -514,6 +517,7 @@ struct rproc {
bool auto_boot;
struct list_head dump_segments;
int nb_vdev;
+ u8 elf_class;
};
/**
@@ -599,13 +603,13 @@ void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
- void *va, dma_addr_t dma, int len, u32 da,
+ void *va, dma_addr_t dma, size_t len, u32 da,
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
int (*release)(struct rproc *, struct rproc_mem_entry *),
const char *name, ...);
struct rproc_mem_entry *
-rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...);
int rproc_boot(struct rproc *rproc);
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index df0124eabece..c76b2f3b3ac4 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -135,10 +135,10 @@ void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
-struct ring_buffer_event *
-ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
+void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
+bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 23990bd29040..bba3db3f7efa 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -34,18 +34,6 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs);
}
-static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
-{
- rtc_time64_to_tm(time, tm);
-}
-
-static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
-{
- *time = rtc_tm_to_time64(tm);
-
- return 0;
-}
-
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/cdev.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5d0d2fc018e9..4418f5cb8324 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -945,8 +945,8 @@ struct task_struct {
struct seccomp seccomp;
/* Thread group tracking: */
- u32 parent_exec_id;
- u32 self_exec_id;
+ u64 parent_exec_id;
+ u64 self_exec_id;
/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
spinlock_t alloc_lock;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 88050259c466..3e5b090c16d4 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -10,6 +10,8 @@
#include <linux/cred.h>
#include <linux/refcount.h>
#include <linux/posix-timers.h>
+#include <linux/mm_types.h>
+#include <asm/ptrace.h>
/*
* Types defining task->signal and task->sighand and APIs using them:
@@ -224,7 +226,14 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
- * (notably. ptrace) */
+ * (notably. ptrace)
+ * Deprecated do not use in new code.
+ * Use exec_update_mutex instead.
+ */
+ struct mutex exec_update_mutex; /* Held while task_struct is being
+ * updated during exec, and may have
+ * inconsistent permissions.
+ */
} __randomize_layout;
/*
@@ -370,6 +379,20 @@ static inline int signal_pending_state(long state, struct task_struct *p)
}
/*
+ * This should only be used in fault handlers to decide whether we
+ * should stop the current fault routine to handle the signals
+ * instead, especially with the case where we've got interrupted with
+ * a VM_FAULT_RETRY.
+ */
+static inline bool fault_signal_pending(vm_fault_t fault_flags,
+ struct pt_regs *regs)
+{
+ return unlikely((fault_flags & VM_FAULT_RETRY) &&
+ (fatal_signal_pending(current) ||
+ (user_mode(regs) && signal_pending(current))));
+}
+
+/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
* This is required every time the blocked sigset_t changes.
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index f1879884238e..38359071236a 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -13,6 +13,7 @@
struct task_struct;
struct rusage;
union thread_union;
+struct css_set;
/* All the bits taken by the old clone syscall. */
#define CLONE_LEGACY_FLAGS 0xffffffffULL
@@ -29,6 +30,9 @@ struct kernel_clone_args {
pid_t *set_tid;
/* Number of elements in *set_tid */
size_t set_tid_size;
+ int cgroup;
+ struct cgroup *cgrp;
+ struct css_set *cset;
};
/*
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 770c2bf3aa43..1672cf6f7614 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -21,7 +21,6 @@ struct seq_file {
size_t pad_until;
loff_t index;
loff_t read_pos;
- u64 version;
struct mutex lock;
const struct seq_operations *op;
int poll_event;
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index d56fefef8905..7a35a6901221 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -78,6 +78,7 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
extern int shmem_unuse(unsigned int type, bool frontswap,
unsigned long *fs_pages_to_unuse);
+extern bool shmem_huge_enabled(struct vm_area_struct *vma);
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@ -114,15 +115,6 @@ static inline bool shmem_file(struct file *file)
extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
-extern bool shmem_huge_enabled(struct vm_area_struct *vma);
-#else
-static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
-{
- return false;
-}
-#endif
-
#ifdef CONFIG_SHMEM
extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 28b1a2b4459e..3a2ac7072dbb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -47,8 +47,8 @@
* A. IP checksum related features
*
* Drivers advertise checksum offload capabilities in the features of a device.
- * From the stack's point of view these are capabilities offered by the driver,
- * a driver typically only advertises features that it is capable of offloading
+ * From the stack's point of view these are capabilities offered by the driver.
+ * A driver typically only advertises features that it is capable of offloading
* to its device.
*
* The checksum related features are:
@@ -63,7 +63,7 @@
* TCP or UDP packets over IPv4. These are specifically
* unencapsulated packets of the form IPv4|TCP or
* IPv4|UDP where the Protocol field in the IPv4 header
- * is TCP or UDP. The IPv4 header may contain IP options
+ * is TCP or UDP. The IPv4 header may contain IP options.
* This feature cannot be set in features for a device
* with NETIF_F_HW_CSUM also set. This feature is being
* DEPRECATED (see below).
@@ -79,13 +79,13 @@
* DEPRECATED (see below).
*
* NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
- * This flag is used only used to disable the RX checksum
+ * This flag is only used to disable the RX checksum
* feature for a device. The stack will accept receive
* checksum indication in packets received on a device
* regardless of whether NETIF_F_RXCSUM is set.
*
* B. Checksumming of received packets by device. Indication of checksum
- * verification is in set skb->ip_summed. Possible values are:
+ * verification is set in skb->ip_summed. Possible values are:
*
* CHECKSUM_NONE:
*
@@ -115,16 +115,16 @@
* the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
* For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
* and a device is able to verify the checksums for UDP (possibly zero),
- * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
+ * GRE (checksum flag is set) and TCP, skb->csum_level would be set to
* two. If the device were only able to verify the UDP checksum and not
- * GRE, either because it doesn't support GRE checksum of because GRE
+ * GRE, either because it doesn't support GRE checksum or because GRE
* checksum is bad, skb->csum_level would be set to zero (TCP checksum is
* not considered in this case).
*
* CHECKSUM_COMPLETE:
*
* This is the most generic way. The device supplied checksum of the _whole_
- * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
+ * packet as seen by netif_rx() and fills in skb->csum. This means the
* hardware doesn't need to parse L3/L4 headers to implement this.
*
* Notes:
@@ -153,8 +153,8 @@
* from skb->csum_start up to the end, and to record/write the checksum at
* offset skb->csum_start + skb->csum_offset. A driver may verify that the
* csum_start and csum_offset values are valid values given the length and
- * offset of the packet, however they should not attempt to validate that the
- * checksum refers to a legitimate transport layer checksum-- it is the
+ * offset of the packet, but it should not attempt to validate that the
+ * checksum refers to a legitimate transport layer checksum -- it is the
* purview of the stack to validate that csum_start and csum_offset are set
* correctly.
*
@@ -178,18 +178,18 @@
*
* CHECKSUM_UNNECESSARY:
*
- * This has the same meaning on as CHECKSUM_NONE for checksum offload on
+ * This has the same meaning as CHECKSUM_NONE for checksum offload on
* output.
*
* CHECKSUM_COMPLETE:
* Not used in checksum output. If a driver observes a packet with this value
- * set in skbuff, if should treat as CHECKSUM_NONE being set.
+ * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set.
*
* D. Non-IP checksum (CRC) offloads
*
* NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
* offloading the SCTP CRC in a packet. To perform this offload the stack
- * will set set csum_start and csum_offset accordingly, set ip_summed to
+ * will set csum_start and csum_offset accordingly, set ip_summed to
* CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
* the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
* A driver that supports both IP checksum offload and SCTP CRC32c offload
@@ -200,10 +200,10 @@
* NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
* offloading the FCOE CRC in a packet. To perform this offload the stack
* will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
- * accordingly. Note the there is no indication in the skbuff that the
- * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
+ * accordingly. Note that there is no indication in the skbuff that the
+ * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
* both IP checksum offload and FCOE CRC offload must verify which offload
- * is configured for a packet presumably by inspecting packet headers.
+ * is configured for a packet, presumably by inspecting packet headers.
*
* E. Checksumming on output with GSO.
*
@@ -211,9 +211,9 @@
* is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
* gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
* part of the GSO operation is implied. If a checksum is being offloaded
- * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
- * are set to refer to the outermost checksum being offload (two offloaded
- * checksums are possible with UDP encapsulation).
+ * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and
+ * csum_offset are set to refer to the outermost checksum being offloaded
+ * (two offloaded checksums are possible with UDP encapsulation).
*/
/* Don't change this without changing skb_csum_unnecessary! */
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index c5d52e2cb275..7f0bc3cf4d61 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -85,6 +85,7 @@ struct apr_device {
uint16_t domain_id;
uint32_t version;
char name[APR_NAME_SIZE];
+ const char *service_path;
spinlock_t lock;
struct list_head node;
};
diff --git a/include/linux/soc/qcom/pdr.h b/include/linux/soc/qcom/pdr.h
new file mode 100644
index 000000000000..83a8ea612e69
--- /dev/null
+++ b/include/linux/soc/qcom/pdr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PDR_HELPER__
+#define __QCOM_PDR_HELPER__
+
+#include <linux/soc/qcom/qmi.h>
+
+#define SERVREG_NAME_LENGTH 64
+
+struct pdr_service;
+struct pdr_handle;
+
+enum servreg_service_state {
+ SERVREG_LOCATOR_ERR = 0x1,
+ SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF,
+ SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF,
+ SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF,
+ SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF,
+};
+
+struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
+ char *service_path,
+ void *priv), void *priv);
+struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
+ const char *service_name,
+ const char *service_path);
+int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds);
+void pdr_handle_release(struct pdr_handle *pdr);
+
+#endif
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index 5efa2b67fa55..e712f94b89fc 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -88,6 +88,7 @@ struct qmi_elem_info {
#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5
#define QMI_ERR_INVALID_ID_V01 41
#define QMI_ERR_ENCODING_V01 58
+#define QMI_ERR_DISABLED_V01 69
#define QMI_ERR_INCOMPATIBLE_STATE_V01 90
#define QMI_ERR_NOT_SUPPORTED_V01 94
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index b451bb622335..00f5826092e3 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -80,6 +80,21 @@ enum sdw_slave_status {
};
/**
+ * enum sdw_clk_stop_type: clock stop operations
+ *
+ * @SDW_CLK_PRE_PREPARE: pre clock stop prepare
+ * @SDW_CLK_POST_PREPARE: post clock stop prepare
+ * @SDW_CLK_PRE_DEPREPARE: pre clock stop de-prepare
+ * @SDW_CLK_POST_DEPREPARE: post clock stop de-prepare
+ */
+enum sdw_clk_stop_type {
+ SDW_CLK_PRE_PREPARE = 0,
+ SDW_CLK_POST_PREPARE,
+ SDW_CLK_PRE_DEPREPARE,
+ SDW_CLK_POST_DEPREPARE,
+};
+
+/**
* enum sdw_command_response - Command response as defined by SDW spec
* @SDW_CMD_OK: cmd was successful
* @SDW_CMD_IGNORED: cmd was ignored
@@ -284,6 +299,7 @@ struct sdw_dpn_audio_mode {
* @max_async_buffer: Number of samples that this port can buffer in
* asynchronous modes
* @block_pack_mode: Type of block port mode supported
+ * @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register
* @port_encoding: Payload Channel Sample encoding schemes supported
* @audio_modes: Audio modes supported
*/
@@ -307,6 +323,7 @@ struct sdw_dpn_prop {
u32 modes;
u32 max_async_buffer;
bool block_pack_mode;
+ bool read_only_wordlength;
u32 port_encoding;
struct sdw_dpn_audio_mode *audio_modes;
};
@@ -424,6 +441,29 @@ struct sdw_slave_id {
__u8 sdw_version:4;
};
+/*
+ * Helper macros to extract the MIPI-defined IDs
+ *
+ * Spec definition
+ * Register Bit Contents
+ * DevId_0 [7:4] 47:44 sdw_version
+ * DevId_0 [3:0] 43:40 unique_id
+ * DevId_1 39:32 mfg_id [15:8]
+ * DevId_2 31:24 mfg_id [7:0]
+ * DevId_3 23:16 part_id [15:8]
+ * DevId_4 15:08 part_id [7:0]
+ * DevId_5 07:00 class_id
+ *
+ * The MIPI DisCo for SoundWire defines in addition the link_id as bits 51:48
+ */
+
+#define SDW_DISCO_LINK_ID(adr) (((adr) >> 48) & GENMASK(3, 0))
+#define SDW_VERSION(adr) (((adr) >> 44) & GENMASK(3, 0))
+#define SDW_UNIQUE_ID(adr) (((adr) >> 40) & GENMASK(3, 0))
+#define SDW_MFG_ID(adr) (((adr) >> 24) & GENMASK(15, 0))
+#define SDW_PART_ID(adr) (((adr) >> 8) & GENMASK(15, 0))
+#define SDW_CLASS_ID(adr) ((adr) & GENMASK(7, 0))
+
/**
* struct sdw_slave_intr_status - Slave interrupt status
* @control_port: control port status
@@ -533,6 +573,11 @@ struct sdw_slave_ops {
int (*port_prep)(struct sdw_slave *slave,
struct sdw_prepare_ch *prepare_ch,
enum sdw_port_prep_ops pre_ops);
+ int (*get_clk_stop_mode)(struct sdw_slave *slave);
+ int (*clk_stop)(struct sdw_slave *slave,
+ enum sdw_clk_stop_mode mode,
+ enum sdw_clk_stop_type type);
+
};
/**
@@ -575,6 +620,7 @@ struct sdw_slave {
#endif
struct list_head node;
struct completion *port_ready;
+ enum sdw_clk_stop_mode curr_clk_stop_mode;
u16 dev_num;
u16 dev_num_sticky;
bool probed;
@@ -892,6 +938,9 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream);
int sdw_enable_stream(struct sdw_stream_runtime *stream);
int sdw_disable_stream(struct sdw_stream_runtime *stream);
int sdw_deprepare_stream(struct sdw_stream_runtime *stream);
+int sdw_bus_prep_clk_stop(struct sdw_bus *bus);
+int sdw_bus_clk_stop(struct sdw_bus *bus);
+int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
/* messaging and data APIs */
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 3efa97d482cb..24d49c732341 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -19,4 +19,6 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
+unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
+
#endif
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 0f64de7caa39..10891b70fc7b 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -179,6 +179,9 @@ sunrpc_cache_update(struct cache_detail *detail,
extern int
sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h);
+extern int
+sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
+ struct cache_head *h);
extern void cache_clean_deferred(void *owner);
@@ -206,11 +209,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h)
{
+ if (h->expiry_time < seconds_since_boot())
+ return true;
if (!test_bit(CACHE_VALID, &h->flags))
return false;
-
- return (h->expiry_time < seconds_since_boot()) ||
- (detail->flush_time >= h->last_refresh);
+ return detail->flush_time >= h->last_refresh;
}
extern int cache_check(struct cache_detail *detail,
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 92d182fd8e3b..320c672d84de 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -58,7 +58,8 @@ enum {
enum {
rpcrdma_fixed_maxsz = 4,
rpcrdma_segment_maxsz = 4,
- rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz,
+ rpcrdma_readseg_maxsz = 1 + rpcrdma_segment_maxsz,
+ rpcrdma_readchunk_maxsz = 1 + rpcrdma_readseg_maxsz,
};
/*
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index a6ef35184ef1..df696efdd675 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -132,6 +132,7 @@ struct rpc_task_setup {
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
#define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */
+#define RPC_TASK_CRED_NOREF 0x8000 /* No refcount on the credential */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 1afe38eb33f7..fd390894a584 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -380,7 +380,7 @@ struct svc_deferred_req {
struct cache_deferred_req handle;
size_t xprt_hlen;
int argslen;
- __be32 args[0];
+ __be32 args[];
};
struct svc_process_info {
@@ -517,6 +517,9 @@ void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
+int svc_encode_read_payload(struct svc_rqst *rqstp,
+ unsigned int offset,
+ unsigned int length);
unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
struct page **pages,
struct kvec *first, size_t total);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 40f65888dd38..78fe2ac6dc6c 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -52,6 +52,7 @@
/* Default and maximum inline threshold sizes */
enum {
+ RPCRDMA_PULLUP_THRESH = RPCRDMA_V1_DEF_INLINE_SIZE >> 1,
RPCRDMA_DEF_INLINE_THRESH = 4096,
RPCRDMA_MAX_INLINE_THRESH = 65536
};
@@ -132,11 +133,16 @@ struct svc_rdma_recv_ctxt {
struct ib_sge rc_recv_sge;
void *rc_recv_buf;
struct xdr_buf rc_arg;
+ struct xdr_stream rc_stream;
bool rc_temp;
u32 rc_byte_len;
unsigned int rc_page_count;
unsigned int rc_hdr_count;
u32 rc_inv_rkey;
+ __be32 *rc_write_list;
+ __be32 *rc_reply_chunk;
+ unsigned int rc_read_payload_offset;
+ unsigned int rc_read_payload_length;
struct page *rc_pages[RPCSVC_MAXPAGES];
};
@@ -144,6 +150,8 @@ struct svc_rdma_send_ctxt {
struct list_head sc_list;
struct ib_send_wr sc_send_wr;
struct ib_cqe sc_cqe;
+ struct xdr_buf sc_hdrbuf;
+ struct xdr_stream sc_stream;
void *sc_xprt_buf;
int sc_page_count;
int sc_cur_sge_no;
@@ -170,9 +178,11 @@ extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp,
struct svc_rdma_recv_ctxt *head, __be32 *p);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
- __be32 *wr_ch, struct xdr_buf *xdr);
+ __be32 *wr_ch, struct xdr_buf *xdr,
+ unsigned int offset,
+ unsigned long length);
extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
- __be32 *rp_ch, bool writelist,
+ const struct svc_rdma_recv_ctxt *rctxt,
struct xdr_buf *xdr);
/* svc_rdma_sendto.c */
@@ -182,13 +192,13 @@ extern struct svc_rdma_send_ctxt *
extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr);
-extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt,
- unsigned int len);
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt,
- struct xdr_buf *xdr, __be32 *wr_lst);
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ struct xdr_buf *xdr);
extern int svc_rdma_sendto(struct svc_rqst *);
+extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
+ unsigned int length);
/* svc_rdma_transport.c */
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ea6f46be9cb7..9e1e046de176 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -21,6 +21,8 @@ struct svc_xprt_ops {
int (*xpo_has_wspace)(struct svc_xprt *);
int (*xpo_recvfrom)(struct svc_rqst *);
int (*xpo_sendto)(struct svc_rqst *);
+ int (*xpo_read_payload)(struct svc_rqst *, unsigned int,
+ unsigned int);
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index b41f34977995..01bb41908c93 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -184,24 +184,9 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
-extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
-/*
- * Helper structure for copying from an sk_buff.
- */
-struct xdr_skb_reader {
- struct sk_buff *skb;
- unsigned int offset;
- size_t count;
- __wsum csum;
-};
-
-typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
-
-extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
-
extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
@@ -301,6 +286,59 @@ xdr_align_size(size_t n)
}
/**
+ * xdr_pad_size - Calculate size of an object's pad
+ * @n: Size of an object being XDR encoded (in bytes)
+ *
+ * This implementation avoids the need for conditional
+ * branches or modulo division.
+ *
+ * Return value:
+ * Size (in bytes) of the needed XDR pad
+ */
+static inline size_t xdr_pad_size(size_t n)
+{
+ return xdr_align_size(n) - n;
+}
+
+/**
+ * xdr_stream_encode_item_present - Encode a "present" list item
+ * @xdr: pointer to xdr_stream
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr)
+{
+ const size_t len = sizeof(__be32);
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ *p = xdr_one;
+ return len;
+}
+
+/**
+ * xdr_stream_encode_item_absent - Encode a "not present" list item
+ * @xdr: pointer to xdr_stream
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
+{
+ const size_t len = sizeof(__be32);
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ *p = xdr_zero;
+ return len;
+}
+
+/**
* xdr_stream_encode_u32 - Encode a 32-bit integer
* @xdr: pointer to xdr_stream
* @n: integer to encode
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 2b2055b035ee..4fcc6fd0cbd6 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -566,38 +566,4 @@ static inline void queue_up_suspend_work(void) {}
#endif /* !CONFIG_PM_AUTOSLEEP */
-#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
-/*
- * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
- * to save/restore additional information to/from the array of page
- * frame numbers in the hibernation image. For s390 this is used to
- * save and restore the storage key for each page that is included
- * in the hibernation image.
- */
-unsigned long page_key_additional_pages(unsigned long pages);
-int page_key_alloc(unsigned long pages);
-void page_key_free(void);
-void page_key_read(unsigned long *pfn);
-void page_key_memorize(unsigned long *pfn);
-void page_key_write(void *address);
-
-#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
-
-static inline unsigned long page_key_additional_pages(unsigned long pages)
-{
- return 0;
-}
-
-static inline int page_key_alloc(unsigned long pages)
-{
- return 0;
-}
-
-static inline void page_key_free(void) {}
-static inline void page_key_read(unsigned long *pfn) {}
-static inline void page_key_memorize(unsigned long *pfn) {}
-static inline void page_key_write(void *address) {}
-
-#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
-
#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1e99f7ac1d7e..b835d8dbea0e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -374,7 +374,6 @@ extern int sysctl_min_slab_ratio;
#define node_reclaim_mode 0
#endif
-extern int page_evictable(struct page *page);
extern void check_move_unevictable_pages(struct pagevec *pvec);
extern int kswapd_run(int nid);
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 877fd239b6ff..d9b7c9132c2f 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -68,6 +68,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
if (pte_swp_soft_dirty(pte))
pte = pte_swp_clear_soft_dirty(pte);
+ if (pte_swp_uffd_wp(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
@@ -348,7 +350,8 @@ static inline void num_poisoned_pages_inc(void)
}
#endif
-#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
+#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \
+ defined(CONFIG_DEVICE_PRIVATE)
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 4beb51009b62..80bb865b3a33 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -297,9 +297,10 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct kobject *target, const char *link_name);
void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name);
-int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
- struct kobject *target_kobj,
- const char *target_name);
+int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name,
+ const char *symlink_name);
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
@@ -512,10 +513,10 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj,
{
}
-static inline int __compat_only_sysfs_link_entry_to_kobj(
- struct kobject *kobj,
- struct kobject *target_kobj,
- const char *target_name)
+static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
+ struct kobject *target_kobj,
+ const char *target_name,
+ const char *symlink_name)
{
return 0;
}
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 7a03f68fb982..1412e9cc79ce 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -49,7 +49,6 @@ struct tee_shm_pool;
*/
struct tee_context {
struct tee_device *teedev;
- struct list_head list_shm;
void *data;
struct kref refcount;
bool releasing;
@@ -168,9 +167,7 @@ void tee_device_unregister(struct tee_device *teedev);
/**
* struct tee_shm - shared memory object
- * @teedev: device used to allocate the object
- * @ctx: context using the object, if NULL the context is gone
- * @link link element
+ * @ctx: context using the object
* @paddr: physical address of the shared memory
* @kaddr: virtual address of the shared memory
* @size: size of shared memory
@@ -185,9 +182,7 @@ void tee_device_unregister(struct tee_device *teedev);
* subsystem and from drivers that implements their own shm pool manager.
*/
struct tee_shm {
- struct tee_device *teedev;
struct tee_context *ctx;
- struct list_head link;
phys_addr_t paddr;
void *kaddr;
size_t size;
@@ -319,18 +314,6 @@ void *tee_get_drvdata(struct tee_device *teedev);
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
/**
- * tee_shm_priv_alloc() - Allocate shared memory privately
- * @dev: Device that allocates the shared memory
- * @size: Requested size of shared memory
- *
- * Allocates shared memory buffer that is not associated with any client
- * context. Such buffers are owned by TEE driver and used for internal calls.
- *
- * @returns a pointer to 'struct tee_shm'
- */
-struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
-
-/**
* tee_shm_register() - Register shared memory buffer
* @ctx: Context that registers the shared memory
* @addr: Address is userspace of the shared buffer
diff --git a/include/linux/topology.h b/include/linux/topology.h
index eb2fe6edd73c..608fa4aadf0e 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -130,20 +130,11 @@ static inline int numa_node_id(void)
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
*/
DECLARE_PER_CPU(int, _numa_mem_);
-extern int _node_numa_mem_[MAX_NUMNODES];
#ifndef set_numa_mem
static inline void set_numa_mem(int node)
{
this_cpu_write(_numa_mem_, node);
- _node_numa_mem_[numa_node_id()] = node;
-}
-#endif
-
-#ifndef node_to_mem_node
-static inline int node_to_mem_node(int node)
-{
- return _node_numa_mem_[node];
}
#endif
@@ -166,7 +157,6 @@ static inline int cpu_to_mem(int cpu)
static inline void set_cpu_numa_mem(int cpu, int node)
{
per_cpu(_numa_mem_, cpu) = node;
- _node_numa_mem_[cpu_to_node(cpu)] = node;
}
#endif
@@ -180,13 +170,6 @@ static inline int numa_mem_id(void)
}
#endif
-#ifndef node_to_mem_node
-static inline int node_to_mem_node(int node)
-{
- return node;
-}
-#endif
-
#ifndef cpu_to_mem
static inline int cpu_to_mem(int cpu)
{
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 6c7a10a6d71e..5c6943354049 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -85,6 +85,8 @@ struct trace_iterator {
struct mutex mutex;
struct ring_buffer_iter **buffer_iter;
unsigned long iter_flags;
+ void *temp; /* temp holder */
+ unsigned int temp_size;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
diff --git a/include/linux/uacce.h b/include/linux/uacce.h
new file mode 100644
index 000000000000..0e215e6d0534
--- /dev/null
+++ b/include/linux/uacce.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_UACCE_H
+#define _LINUX_UACCE_H
+
+#include <linux/cdev.h>
+#include <uapi/misc/uacce/uacce.h>
+
+#define UACCE_NAME "uacce"
+#define UACCE_MAX_REGION 2
+#define UACCE_MAX_NAME_SIZE 64
+
+struct uacce_queue;
+struct uacce_device;
+
+/**
+ * struct uacce_qfile_region - structure of queue file region
+ * @type: type of the region
+ */
+struct uacce_qfile_region {
+ enum uacce_qfrt type;
+};
+
+/**
+ * struct uacce_ops - uacce device operations
+ * @get_available_instances: get available instances left of the device
+ * @get_queue: get a queue from the device
+ * @put_queue: free a queue to the device
+ * @start_queue: make the queue start work after get_queue
+ * @stop_queue: make the queue stop work before put_queue
+ * @is_q_updated: check whether the task is finished
+ * @mmap: mmap addresses of queue to user space
+ * @ioctl: ioctl for user space users of the queue
+ */
+struct uacce_ops {
+ int (*get_available_instances)(struct uacce_device *uacce);
+ int (*get_queue)(struct uacce_device *uacce, unsigned long arg,
+ struct uacce_queue *q);
+ void (*put_queue)(struct uacce_queue *q);
+ int (*start_queue)(struct uacce_queue *q);
+ void (*stop_queue)(struct uacce_queue *q);
+ int (*is_q_updated)(struct uacce_queue *q);
+ int (*mmap)(struct uacce_queue *q, struct vm_area_struct *vma,
+ struct uacce_qfile_region *qfr);
+ long (*ioctl)(struct uacce_queue *q, unsigned int cmd,
+ unsigned long arg);
+};
+
+/**
+ * struct uacce_interface - interface required for uacce_register()
+ * @name: the uacce device name. Will show up in sysfs
+ * @flags: uacce device attributes
+ * @ops: pointer to the struct uacce_ops
+ */
+struct uacce_interface {
+ char name[UACCE_MAX_NAME_SIZE];
+ unsigned int flags;
+ const struct uacce_ops *ops;
+};
+
+enum uacce_q_state {
+ UACCE_Q_ZOMBIE = 0,
+ UACCE_Q_INIT,
+ UACCE_Q_STARTED,
+};
+
+/**
+ * struct uacce_queue
+ * @uacce: pointer to uacce
+ * @priv: private pointer
+ * @wait: wait queue head
+ * @list: index into uacce_mm
+ * @uacce_mm: the corresponding mm
+ * @qfrs: pointer of qfr regions
+ * @state: queue state machine
+ */
+struct uacce_queue {
+ struct uacce_device *uacce;
+ void *priv;
+ wait_queue_head_t wait;
+ struct list_head list;
+ struct uacce_mm *uacce_mm;
+ struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
+ enum uacce_q_state state;
+};
+
+/**
+ * struct uacce_device
+ * @algs: supported algorithms
+ * @api_ver: api version
+ * @ops: pointer to the struct uacce_ops
+ * @qf_pg_num: page numbers of the queue file regions
+ * @parent: pointer to the parent device
+ * @is_vf: whether virtual function
+ * @flags: uacce attributes
+ * @dev_id: id of the uacce device
+ * @cdev: cdev of the uacce
+ * @dev: dev of the uacce
+ * @priv: private pointer of the uacce
+ * @mm_list: list head of uacce_mm->list
+ * @mm_lock: lock for mm_list
+ * @inode: core vfs
+ */
+struct uacce_device {
+ const char *algs;
+ const char *api_ver;
+ const struct uacce_ops *ops;
+ unsigned long qf_pg_num[UACCE_MAX_REGION];
+ struct device *parent;
+ bool is_vf;
+ u32 flags;
+ u32 dev_id;
+ struct cdev *cdev;
+ struct device dev;
+ void *priv;
+ struct list_head mm_list;
+ struct mutex mm_lock;
+ struct inode *inode;
+};
+
+/**
+ * struct uacce_mm - keep track of queues bound to a process
+ * @list: index into uacce_device
+ * @queues: list of queues
+ * @mm: the mm struct
+ * @lock: protects the list of queues
+ * @pasid: pasid of the uacce_mm
+ * @handle: iommu_sva handle return from iommu_sva_bind_device
+ */
+struct uacce_mm {
+ struct list_head list;
+ struct list_head queues;
+ struct mm_struct *mm;
+ struct mutex lock;
+ int pasid;
+ struct iommu_sva *handle;
+};
+
+#if IS_ENABLED(CONFIG_UACCE)
+
+struct uacce_device *uacce_alloc(struct device *parent,
+ struct uacce_interface *interface);
+int uacce_register(struct uacce_device *uacce);
+void uacce_remove(struct uacce_device *uacce);
+
+#else /* CONFIG_UACCE */
+
+static inline
+struct uacce_device *uacce_alloc(struct device *parent,
+ struct uacce_interface *interface)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int uacce_register(struct uacce_device *uacce)
+{
+ return -EINVAL;
+}
+
+static inline void uacce_remove(struct uacce_device *uacce) {}
+
+#endif /* CONFIG_UACCE */
+
+#endif /* _LINUX_UACCE_H */
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 01081c4726c0..54bf6b118401 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -24,10 +24,10 @@ struct uio_map;
* struct uio_mem - description of a UIO memory region
* @name: name of the memory region for identification
* @addr: address of the device's memory rounded to page
- * size (phys_addr is used since addr can be
- * logical, virtual, or physical & phys_addr_t
- * should always be large enough to handle any of
- * the address types)
+ * size (phys_addr is used since addr can be
+ * logical, virtual, or physical & phys_addr_t
+ * should always be large enough to handle any of
+ * the address types)
* @offs: offset of device memory within the page
* @size: size of IO (multiple of page size)
* @memtype: type of memory addr points to
@@ -67,16 +67,16 @@ struct uio_port {
#define MAX_UIO_PORT_REGIONS 5
struct uio_device {
- struct module *owner;
+ struct module *owner;
struct device dev;
- int minor;
- atomic_t event;
- struct fasync_struct *async_queue;
- wait_queue_head_t wait;
- struct uio_info *info;
+ int minor;
+ atomic_t event;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t wait;
+ struct uio_info *info;
struct mutex info_lock;
- struct kobject *map_dir;
- struct kobject *portio_dir;
+ struct kobject *map_dir;
+ struct kobject *portio_dir;
};
/**
@@ -123,6 +123,15 @@ extern int __must_check
extern void uio_unregister_device(struct uio_info *info);
extern void uio_event_notify(struct uio_info *info);
+extern int __must_check
+ __devm_uio_register_device(struct module *owner,
+ struct device *parent,
+ struct uio_info *info);
+
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define devm_uio_register_device(parent, info) \
+ __devm_uio_register_device(THIS_MODULE, parent, info)
+
/* defines for uio_info->irq */
#define UIO_IRQ_CUSTOM -1
#define UIO_IRQ_NONE 0
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
index 8bdb8fa01bd4..c43ff5918c8a 100644
--- a/include/linux/unaligned/be_byteshift.h
+++ b/include/linux/unaligned/be_byteshift.h
@@ -40,17 +40,17 @@ static inline void __put_unaligned_be64(u64 val, u8 *p)
static inline u16 get_unaligned_be16(const void *p)
{
- return __get_unaligned_be16((const u8 *)p);
+ return __get_unaligned_be16(p);
}
static inline u32 get_unaligned_be32(const void *p)
{
- return __get_unaligned_be32((const u8 *)p);
+ return __get_unaligned_be32(p);
}
static inline u64 get_unaligned_be64(const void *p)
{
- return __get_unaligned_be64((const u8 *)p);
+ return __get_unaligned_be64(p);
}
static inline void put_unaligned_be16(u16 val, void *p)
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
index 57d3114656e5..303289492859 100644
--- a/include/linux/unaligned/generic.h
+++ b/include/linux/unaligned/generic.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_UNALIGNED_GENERIC_H
#define _LINUX_UNALIGNED_GENERIC_H
+#include <linux/types.h>
+
/*
* Cause a link-time error if we try an unaligned access other than
* 1,2,4 or 8 bytes long
@@ -66,4 +68,48 @@ extern void __bad_unaligned_access_size(void);
} \
(void)0; })
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
+
#endif /* _LINUX_UNALIGNED_GENERIC_H */
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
index 1628b75866f0..2248dcb0df76 100644
--- a/include/linux/unaligned/le_byteshift.h
+++ b/include/linux/unaligned/le_byteshift.h
@@ -40,17 +40,17 @@ static inline void __put_unaligned_le64(u64 val, u8 *p)
static inline u16 get_unaligned_le16(const void *p)
{
- return __get_unaligned_le16((const u8 *)p);
+ return __get_unaligned_le16(p);
}
static inline u32 get_unaligned_le32(const void *p)
{
- return __get_unaligned_le32((const u8 *)p);
+ return __get_unaligned_le32(p);
}
static inline u64 get_unaligned_le64(const void *p)
{
- return __get_unaligned_le64((const u8 *)p);
+ return __get_unaligned_le64(p);
}
static inline void put_unaligned_le16(u16 val, void *p)
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index 5e31740c7e40..ead8c9a47c6a 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -156,6 +156,18 @@ struct uac2_feature_unit_descriptor {
__u8 bmaControls[]; /* variable length */
} __attribute__((packed));
+/* 4.7.2.10 Effect Unit Descriptor */
+
+struct uac2_effect_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __le16 wEffectType;
+ __u8 bSourceID;
+ __u8 bmaControls[]; /* variable length */
+} __attribute__((packed));
+
/* 4.9.2 Class-Specific AS Interface Descriptor */
struct uac2_as_header_descriptor {
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index ac9d71e24b81..a8e5f3ea9bb2 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -14,6 +14,8 @@
#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <asm-generic/pgtable_uffd.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -34,11 +36,14 @@ extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len,
- bool *mmap_changing);
+ bool *mmap_changing, __u64 mode);
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long len,
bool *mmap_changing);
+extern int mwriteprotect_range(struct mm_struct *dst_mm,
+ unsigned long start, unsigned long len,
+ bool enable_wp, bool *mmap_changing);
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
@@ -52,6 +57,23 @@ static inline bool userfaultfd_missing(struct vm_area_struct *vma)
return vma->vm_flags & VM_UFFD_MISSING;
}
+static inline bool userfaultfd_wp(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_UFFD_WP;
+}
+
+static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
+ pte_t pte)
+{
+ return userfaultfd_wp(vma) && pte_uffd_wp(pte);
+}
+
+static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
+ return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
+}
+
static inline bool userfaultfd_armed(struct vm_area_struct *vma)
{
return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
@@ -96,6 +118,24 @@ static inline bool userfaultfd_missing(struct vm_area_struct *vma)
return false;
}
+static inline bool userfaultfd_wp(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
+ pte_t pte)
+{
+ return false;
+}
+
+static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
+ return false;
+}
+
+
static inline bool userfaultfd_armed(struct vm_area_struct *vma)
{
return false;
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index e42a711a2800..5d92ee15d098 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -26,6 +26,9 @@
* operations documented below
* @mmap: Perform mmap(2) on a region of the device file descriptor
* @request: Request for the bus driver to release the device
+ * @match: Optional device name match callback (return: 0 for no-match, >0 for
+ * match, -errno for abort (ex. match with insufficient or incorrect
+ * additional args)
*/
struct vfio_device_ops {
char *name;
@@ -39,6 +42,7 @@ struct vfio_device_ops {
unsigned long arg);
int (*mmap)(void *device_data, struct vm_area_struct *vma);
void (*request)(void *device_data, unsigned int count);
+ int (*match)(void *device_data, char *buf);
};
extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
@@ -82,6 +86,8 @@ struct vfio_iommu_driver_ops {
struct notifier_block *nb);
int (*unregister_notifier)(void *iommu_data,
struct notifier_block *nb);
+ int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
+ void *data, size_t count, bool write);
};
extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
@@ -94,6 +100,8 @@ extern void vfio_unregister_iommu_driver(
*/
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
+extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device
+ *dev);
extern bool vfio_external_group_match_file(struct vfio_group *group,
struct file *filep);
extern int vfio_external_user_iommu_id(struct vfio_group *group);
@@ -107,6 +115,15 @@ extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
int npage);
+extern int vfio_group_pin_pages(struct vfio_group *group,
+ unsigned long *user_iova_pfn, int npage,
+ int prot, unsigned long *phys_pfn);
+extern int vfio_group_unpin_pages(struct vfio_group *group,
+ unsigned long *user_iova_pfn, int npage);
+
+extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
+ void *data, size_t len, bool write);
+
/* each type has independent events */
enum vfio_notify_type {
VFIO_IOMMU_NOTIFY = 0,
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 47a3441cf4c4..ffef0f279747 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -73,9 +73,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
THP_FAULT_ALLOC,
THP_FAULT_FALLBACK,
+ THP_FAULT_FALLBACK_CHARGE,
THP_COLLAPSE_ALLOC,
THP_COLLAPSE_ALLOC_FAILED,
THP_FILE_ALLOC,
+ THP_FILE_FALLBACK,
+ THP_FILE_FALLBACK_CHARGE,
THP_FILE_MAPPED,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
@@ -115,6 +118,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define THP_FILE_ALLOC ({ BUILD_BUG(); 0; })
+#define THP_FILE_FALLBACK ({ BUILD_BUG(); 0; })
+#define THP_FILE_FALLBACK_CHARGE ({ BUILD_BUG(); 0; })
#define THP_FILE_MAPPED ({ BUILD_BUG(); 0; })
#endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index e48554e6526c..8b505d22fc0e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -665,7 +665,7 @@ int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif
-int __init workqueue_init_early(void);
-int __init workqueue_init(void);
+void __init workqueue_init_early(void);
+void __init workqueue_init(void);
#endif
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index f73e1775ded0..14c893433139 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -32,8 +32,8 @@
* The following internal entries have a special meaning:
*
* 0-62: Sibling entries
- * 256: Zero entry
- * 257: Retry entry
+ * 256: Retry entry
+ * 257: Zero entry
*
* Errors are also represented as internal entries, but use the negative
* space (-4094 to -2). They're never stored in the slots array; only
@@ -1648,6 +1648,7 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
xa_mark_t mark)
{
struct xa_node *node = xas->xa_node;
+ void *entry;
unsigned int offset;
if (unlikely(xas_not_node(node) || node->shift))
@@ -1659,7 +1660,10 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
return NULL;
if (offset == XA_CHUNK_SIZE)
return xas_find_marked(xas, max, mark);
- return xa_entry(xas->xa, node, offset);
+ entry = xa_entry(xas->xa, node, offset);
+ if (!entry)
+ return xas_find_marked(xas, max, mark);
+ return entry;
}
/*
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 6dad031be3c2..4cf6e11f4a3c 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -102,7 +102,8 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size);
int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
- const void *value, size_t size, int flags);
+ const void *value, size_t size, int flags,
+ ssize_t *removed_size);
ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer,
size_t size);
void simple_xattr_list_add(struct simple_xattrs *xattrs,
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index acc60d8a3b3b..dd5b5bd781a4 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -73,7 +73,6 @@ enum p9_req_status_t {
* @wq: wait_queue for the client to block on for this request
* @tc: the request fcall structure
* @rc: the response fcall structure
- * @aux: transport specific data (provided for trans_fd migration)
* @req_list: link for higher level objects to chain requests
*/
struct p9_req_t {
@@ -83,7 +82,6 @@ struct p9_req_t {
wait_queue_head_t wq;
struct p9_fcall tc;
struct p9_fcall rc;
- void *aux;
struct list_head req_list;
};
@@ -200,6 +198,8 @@ int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_remove(struct p9_fid *fid);
int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
+int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ int *err);
int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f7543c095b33..9947eb1e9eb6 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -254,6 +254,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
return rt->rt6i_flags & RTF_ANYCAST ||
(rt->rt6i_dst.plen < 127 &&
+ !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 6eb627b3c99b..4ff7c81e6717 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -901,7 +901,7 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
{
struct nft_expr *expr;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) {
+ if (__nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) {
expr = nft_set_ext_expr(ext);
expr->ops->eval(expr, regs, pkt);
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 6d84784d33fa..3e8c6d4b4b59 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2553,9 +2553,9 @@ sk_is_refcounted(struct sock *sk)
}
/**
- * skb_steal_sock
- * @skb to steal the socket from
- * @refcounted is set to true if the socket is reference-counted
+ * skb_steal_sock - steal a socket from an sk_buff
+ * @skb: sk_buff to steal the socket from
+ * @refcounted: is set to true if the socket is reference-counted
*/
static inline struct sock *
skb_steal_sock(struct sk_buff *skb, bool *refcounted)
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 870b5e6c06db..e06d13388ae7 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -39,6 +39,7 @@
int rdma_query_gid(struct ib_device *device, u8 port_num, int index,
union ib_gid *gid);
+void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr);
const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
const union ib_gid *gid,
enum ib_gid_type gid_type,
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 8ec482e391aa..058cfbc2b37f 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -360,7 +360,6 @@ struct ib_cm_req_param {
u32 starting_psn;
const void *private_data;
u8 private_data_len;
- u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
u8 remote_cm_response_timeout;
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
index f8982e4e9702..2fd9bfb6d648 100644
--- a/include/rdma/ib_fmr_pool.h
+++ b/include/rdma/ib_fmr_pool.h
@@ -73,7 +73,7 @@ struct ib_pool_fmr {
int remap_count;
u64 io_virtual_address;
int page_list_len;
- u64 page_list[0];
+ u64 page_list[];
};
struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 1f779fad3a1e..bbc5cfb57cd2 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1876,7 +1876,7 @@ struct ib_flow_eth_filter {
__be16 ether_type;
__be16 vlan_tag;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_eth {
@@ -1890,7 +1890,7 @@ struct ib_flow_ib_filter {
__be16 dlid;
__u8 sl;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_ib {
@@ -1915,7 +1915,7 @@ struct ib_flow_ipv4_filter {
u8 ttl;
u8 flags;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_ipv4 {
@@ -1933,7 +1933,7 @@ struct ib_flow_ipv6_filter {
u8 traffic_class;
u8 hop_limit;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_ipv6 {
@@ -1947,7 +1947,7 @@ struct ib_flow_tcp_udp_filter {
__be16 dst_port;
__be16 src_port;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_tcp_udp {
@@ -1959,7 +1959,7 @@ struct ib_flow_spec_tcp_udp {
struct ib_flow_tunnel_filter {
__be32 tunnel_id;
- u8 real_sz[0];
+ u8 real_sz[];
};
/* ib_flow_spec_tunnel describes the Vxlan tunnel
@@ -1976,7 +1976,7 @@ struct ib_flow_esp_filter {
__be32 spi;
__be32 seq;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_esp {
@@ -1991,7 +1991,7 @@ struct ib_flow_gre_filter {
__be16 protocol;
__be32 key;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_gre {
@@ -2004,7 +2004,7 @@ struct ib_flow_spec_gre {
struct ib_flow_mpls_filter {
__be32 tag;
/* Must be last */
- u8 real_sz[0];
+ u8 real_sz[];
};
struct ib_flow_spec_mpls {
@@ -3627,35 +3627,8 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
bad_recv_wr ? : &dummy);
}
-/**
- * ib_create_qp_user - Creates a QP associated with the specified protection
- * domain.
- * @pd: The protection domain associated with the QP.
- * @qp_init_attr: A list of initial attributes required to create the
- * QP. If QP creation succeeds, then the attributes are updated to
- * the actual capabilities of the created QP.
- * @udata: Valid user data or NULL for kernel objects
- */
-struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata);
-
-/**
- * ib_create_qp - Creates a kernel QP associated with the specified protection
- * domain.
- * @pd: The protection domain associated with the QP.
- * @qp_init_attr: A list of initial attributes required to create the
- * QP. If QP creation succeeds, then the attributes are updated to
- * the actual capabilities of the created QP.
- * @udata: Valid user data or NULL for kernel objects
- *
- * NOTE: for user qp use ib_create_qp_user with valid udata!
- */
-static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr)
-{
- return ib_create_qp_user(pd, qp_init_attr, NULL);
-}
+struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr);
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h
index 0c07a70bd7f6..e90b149fe92a 100644
--- a/include/rdma/opa_vnic.h
+++ b/include/rdma/opa_vnic.h
@@ -75,7 +75,7 @@
struct opa_vnic_rdma_netdev {
struct rdma_netdev rn; /* keep this first */
/* followed by device private data */
- char *dev_priv[0];
+ char *dev_priv[];
};
static inline void *opa_vnic_priv(const struct net_device *dev)
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
index 72a3856d4057..ce6c888f7fe7 100644
--- a/include/rdma/rdmavt_mr.h
+++ b/include/rdma/rdmavt_mr.h
@@ -85,7 +85,7 @@ struct rvt_mregion {
u8 lkey_published; /* in global table */
struct percpu_ref refcount;
struct completion comp; /* complete when refcount goes to zero */
- struct rvt_segarray *map[0]; /* the segments */
+ struct rvt_segarray *map[]; /* the segments */
};
#define RVT_MAX_LKEY_TABLE_BITS 23
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0d5c70e2d8ab..5fc10108703a 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -191,7 +191,7 @@ struct rvt_swqe {
u32 ssn; /* send sequence number */
u32 length; /* total length of data in sg_list */
void *priv; /* driver dependent field */
- struct rvt_sge sg_list[0];
+ struct rvt_sge sg_list[];
};
/**
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 28570ac2b6a0..9f3b1e004046 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -173,7 +173,7 @@ enum uapi_radix_data {
UVERBS_API_OBJ_KEY_BITS = 5,
UVERBS_API_OBJ_KEY_SHIFT =
UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT,
- UVERBS_API_OBJ_KEY_NUM_CORE = 24,
+ UVERBS_API_OBJ_KEY_NUM_CORE = 20,
UVERBS_API_OBJ_KEY_NUM_DRIVER =
(1 << UVERBS_API_OBJ_KEY_BITS) - UVERBS_API_OBJ_KEY_NUM_CORE,
UVERBS_API_OBJ_KEY_MASK = GENMASK(31, UVERBS_API_OBJ_KEY_SHIFT),
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 92b11c7e0b4f..5225a23f2d0e 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -60,6 +60,7 @@ enum iscsi_uevent_e {
ISCSI_UEVENT_LOGOUT_FLASHNODE_SID = UEVENT_BASE + 30,
ISCSI_UEVENT_SET_CHAP = UEVENT_BASE + 31,
ISCSI_UEVENT_GET_HOST_STATS = UEVENT_BASE + 32,
+ ISCSI_UEVENT_DESTROY_SESSION_ASYNC = UEVENT_BASE + 33,
/* up events */
ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -311,7 +312,7 @@ enum iscsi_param_type {
struct iscsi_param_info {
uint32_t len; /* Actual length of the param value */
uint16_t param; /* iscsi param */
- uint8_t value[0]; /* length sized value follows */
+ uint8_t value[]; /* length sized value follows */
} __packed;
struct iscsi_iface_param_info {
@@ -320,7 +321,7 @@ struct iscsi_iface_param_info {
uint16_t param; /* iscsi param value */
uint8_t iface_type; /* IPv4 or IPv6 */
uint8_t param_type; /* iscsi_param_type */
- uint8_t value[0]; /* length sized value follows */
+ uint8_t value[]; /* length sized value follows */
} __packed;
/*
@@ -697,7 +698,7 @@ enum iscsi_flashnode_param {
struct iscsi_flashnode_param_info {
uint32_t len; /* Actual length of the param */
uint16_t param; /* iscsi param value */
- uint8_t value[0]; /* length sized value follows */
+ uint8_t value[]; /* length sized value follows */
} __packed;
enum iscsi_discovery_parent_type {
@@ -815,7 +816,7 @@ struct iscsi_stats {
* up to ISCSI_STATS_CUSTOM_MAX
*/
uint32_t custom_length;
- struct iscsi_stats_custom custom[0]
+ struct iscsi_stats_custom custom[]
__attribute__ ((aligned (sizeof(uint64_t))));
};
@@ -946,7 +947,7 @@ struct iscsi_offload_host_stats {
* up to ISCSI_HOST_STATS_CUSTOM_MAX
*/
uint32_t custom_length;
- struct iscsi_host_stats_custom custom[0]
+ struct iscsi_host_stats_custom custom[]
__aligned(sizeof(uint64_t));
};
diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h
index fa0c820a1663..6b8128005af8 100644
--- a/include/scsi/scsi_bsg_iscsi.h
+++ b/include/scsi/scsi_bsg_iscsi.h
@@ -52,7 +52,7 @@ struct iscsi_bsg_host_vendor {
uint64_t vendor_id;
/* start of vendor command area */
- uint32_t vendor_cmd[0];
+ uint32_t vendor_cmd[];
};
/* Response:
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index a2849bb9cd19..80ac89e47b47 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -159,7 +159,6 @@ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
}
-extern void scsi_put_command(struct scsi_cmnd *);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index f8312a3e5b42..c3cba2aaf934 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -110,7 +110,6 @@ struct scsi_device {
atomic_t device_blocked; /* Device returned QUEUE_FULL. */
spinlock_t list_lock;
- struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry;
unsigned short queue_depth; /* How deep of a queue we want */
unsigned short max_queue_depth; /* max queue depth */
@@ -204,6 +203,9 @@ struct scsi_device {
unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
* creation time */
+
+ bool offline_already; /* Device offline message logged */
+
atomic_t disk_events_disable_depth; /* disable depth for disk events */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
@@ -231,7 +233,7 @@ struct scsi_device {
struct mutex state_mutex;
enum scsi_device_state sdev_state;
struct task_struct *quiesced_by;
- unsigned long sdev_data[0];
+ unsigned long sdev_data[];
} __attribute__((aligned(sizeof(unsigned long))));
#define to_scsi_device(d) \
@@ -315,7 +317,7 @@ struct scsi_target {
char scsi_level;
enum scsi_target_state state;
void *hostdata; /* available to low-level driver */
- unsigned long starget_data[0]; /* for the transport */
+ unsigned long starget_data[]; /* for the transport */
/* starget_data must be the last element!!!! */
} __attribute__((aligned(sizeof(unsigned long))));
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index f577647bf5f2..822e8cda8d9b 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -62,6 +62,9 @@ struct scsi_host_template {
void __user *arg);
#endif
+ int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+ int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+
/*
* The queuecommand function is used to queue up a scsi
* command block to the LLDD. When the driver finished
@@ -426,9 +429,6 @@ struct scsi_host_template {
/* True if the controller does not support WRITE SAME */
unsigned no_write_same:1;
- /* True if the low-level driver supports blk-mq only */
- unsigned force_blk_mq:1;
-
/*
* Countdown for host blocking with no commands outstanding.
*/
@@ -627,8 +627,6 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
- unsigned use_cmd_list:1;
-
/* Host responded with short (<36 bytes) INQUIRY result */
unsigned short_inquiry:1;
@@ -685,7 +683,7 @@ struct Scsi_Host {
* and also because some compilers (m68k) don't automatically force
* alignment to a long boundary.
*/
- unsigned long hostdata[0] /* Used for storage of host specific stuff */
+ unsigned long hostdata[] /* Used for storage of host specific stuff */
__attribute__ ((aligned (sizeof(unsigned long))));
};
@@ -735,6 +733,8 @@ extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
extern const char *scsi_host_state_name(enum scsi_host_state);
+extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
+ int status);
static inline int __must_check scsi_add_host(struct Scsi_Host *host,
struct device *dev)
@@ -759,6 +759,11 @@ static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
extern void scsi_unblock_requests(struct Scsi_Host *);
extern void scsi_block_requests(struct Scsi_Host *);
+extern int scsi_host_block(struct Scsi_Host *shost);
+extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
+
+void scsi_host_busy_iter(struct Scsi_Host *,
+ bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
struct class_container;
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index 4fe69d863b5d..b465799f4d2d 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -27,7 +27,7 @@ struct scsi_device;
typedef struct scsi_ioctl_command {
unsigned int inlen;
unsigned int outlen;
- unsigned char data[0];
+ unsigned char data[];
} Scsi_Ioctl_Command;
typedef struct scsi_idlun {
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 325ae731d9ad..bdcb6d69d154 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -188,8 +188,16 @@ extern void iscsi_ping_comp_event(uint32_t host_no,
uint32_t status, uint32_t pid,
uint32_t data_size, uint8_t *data);
+/* iscsi class connection state */
+enum iscsi_connection_state {
+ ISCSI_CONN_UP = 0,
+ ISCSI_CONN_DOWN,
+ ISCSI_CONN_FAILED,
+};
+
struct iscsi_cls_conn {
struct list_head conn_list; /* item in connlist */
+ struct list_head conn_list_err; /* item in connlist_err */
void *dd_data; /* LLD private data */
struct iscsi_transport *transport;
uint32_t cid; /* connection id */
@@ -197,6 +205,7 @@ struct iscsi_cls_conn {
struct iscsi_endpoint *ep;
struct device dev; /* sysfs transport/container device */
+ enum iscsi_connection_state state;
};
#define iscsi_dev_to_conn(_dev) \
@@ -225,6 +234,7 @@ struct iscsi_cls_session {
struct work_struct unblock_work;
struct work_struct scan_work;
struct work_struct unbind_work;
+ struct work_struct destroy_work;
/* recovery fields */
int recovery_tmo;
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 29c7ad04d2e2..7327e12f3373 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -24,7 +24,7 @@
* http://sg.danny.cz/sg [alternatively check the MAINTAINERS file]
* The documentation for the sg version 3 driver can be found at:
* http://sg.danny.cz/sg/p/sg_v3_ho.html
- * Also see: <kernel_source>/Documentation/scsi/scsi-generic.txt
+ * Also see: <kernel_source>/Documentation/scsi/scsi-generic.rst
*
* For utility and test programs see: http://sg.danny.cz/sg/sg3_utils.html
*/
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 9220758d5087..177d8026e96f 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -109,7 +109,7 @@ struct srp_direct_buf {
struct srp_indirect_buf {
struct srp_direct_buf table_desc;
__be32 len;
- struct srp_direct_buf desc_list[0];
+ struct srp_direct_buf desc_list[];
} __attribute__((packed));
/* Immediate data buffer descriptor as defined in SRP2. */
@@ -244,7 +244,7 @@ struct srp_cmd {
u8 reserved4;
u8 add_cdb_len;
u8 cdb[16];
- u8 add_data[0];
+ u8 add_data[];
};
enum {
@@ -274,7 +274,7 @@ struct srp_rsp {
__be32 data_in_res_cnt;
__be32 sense_data_len;
__be32 resp_data_len;
- u8 data[0];
+ u8 data[];
} __attribute__((packed));
struct srp_cred_req {
@@ -306,7 +306,7 @@ struct srp_aer_req {
struct scsi_lun lun;
__be32 sense_data_len;
u32 reserved3;
- u8 sense_data[0];
+ u8 sense_data[];
} __attribute__((packed));
struct srp_aer_rsp {
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index 672cfb58046f..c9d849924f89 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright NXP
+ * Copyright 2017-2019 NXP
*
*/
#ifndef __FSL_DPAA2_IO_H
@@ -109,6 +109,10 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
const struct dpaa2_fd *fd);
+int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, u32 fqid,
+ const struct dpaa2_fd *fd, int number_of_frame);
+int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, u32 *fqid,
+ const struct dpaa2_fd *fd, int number_of_frame);
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
u16 qdbin, const struct dpaa2_fd *fd);
int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid,
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index ba0e838f962a..dc4e79468094 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -178,10 +178,10 @@ struct ucc_fast_info {
struct ucc_fast_private {
struct ucc_fast_info *uf_info;
struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */
- u32 __iomem *p_ucce; /* a pointer to the event register in memory. */
- u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
+ __be32 __iomem *p_ucce; /* a pointer to the event register in memory. */
+ __be32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
#ifdef CONFIG_UGETH_TX_ON_DEMAND
- u16 __iomem *p_utodr; /* pointer to the transmit on demand register */
+ __be16 __iomem *p_utodr;/* pointer to the transmit on demand register */
#endif
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h
index d187a6be83bc..11a216e4e919 100644
--- a/include/soc/fsl/qe/ucc_slow.h
+++ b/include/soc/fsl/qe/ucc_slow.h
@@ -184,7 +184,7 @@ struct ucc_slow_info {
struct ucc_slow_private {
struct ucc_slow_info *us_info;
struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
- struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */
+ struct ucc_slow_pram __iomem *us_pram; /* a pointer to the parameter RAM */
s32 us_pram_offset;
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
@@ -196,13 +196,12 @@ struct ucc_slow_private {
and length for first BD in a frame */
s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
- struct qe_bd *confBd; /* next BD for confirm after Tx */
- struct qe_bd *tx_bd; /* next BD for new Tx request */
- struct qe_bd *rx_bd; /* next BD to collect after Rx */
+ struct qe_bd __iomem *confBd; /* next BD for confirm after Tx */
+ struct qe_bd __iomem *tx_bd; /* next BD for new Tx request */
+ struct qe_bd __iomem *rx_bd; /* next BD to collect after Rx */
void *p_rx_frame; /* accumulating receive frame */
- u16 *p_ucce; /* a pointer to the event register in memory.
- */
- u16 *p_uccm; /* a pointer to the mask register in memory */
+ __be16 __iomem *p_ucce; /* a pointer to the event register in memory */
+ __be16 __iomem *p_uccm; /* a pointer to the mask register in memory */
u16 saved_uccm; /* a saved mask for the RX Interrupt bits */
#ifdef STATISTICS
u32 tx_frames; /* Transmitted frames counters */
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
index 619e07c75da9..f9ec353d24a5 100644
--- a/include/soc/qcom/rpmh.h
+++ b/include/soc/qcom/rpmh.h
@@ -20,8 +20,6 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state,
int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 *n);
-int rpmh_flush(const struct device *dev);
-
int rpmh_invalidate(const struct device *dev);
#else
@@ -40,9 +38,6 @@ static inline int rpmh_write_batch(const struct device *dev,
const struct tcs_cmd *cmd, u32 *n)
{ return -ENODEV; }
-static inline int rpmh_flush(const struct device *dev)
-{ return -ENODEV; }
-
static inline int rpmh_invalidate(const struct device *dev)
{ return -ENODEV; }
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index cac6f610b3fe..8f8e73e5cd45 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef _ABI_BPMP_ABI_H_
@@ -2119,6 +2119,7 @@ enum {
CMD_UPHY_PCIE_LANE_MARGIN_STATUS = 2,
CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3,
CMD_UPHY_PCIE_CONTROLLER_STATE = 4,
+ CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF = 5,
CMD_UPHY_MAX,
};
@@ -2151,6 +2152,11 @@ struct cmd_uphy_pcie_controller_state_request {
uint8_t enable;
} __ABI_PACKED;
+struct cmd_uphy_ep_controller_pll_off_request {
+ /** @brief EP controller number, valid: 0, 4, 5 */
+ uint8_t ep_controller;
+} __ABI_PACKED;
+
/**
* @ingroup UPHY
* @brief Request with #MRQ_UPHY
@@ -2165,6 +2171,7 @@ struct cmd_uphy_pcie_controller_state_request {
* |CMD_UPHY_PCIE_LANE_MARGIN_STATUS | |
* |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request |
* |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request |
+ * |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF |cmd_uphy_ep_controller_pll_off_request |
*
*/
@@ -2178,6 +2185,7 @@ struct mrq_uphy_request {
struct cmd_uphy_margin_control_request uphy_set_margin_control;
struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
struct cmd_uphy_pcie_controller_state_request controller_state;
+ struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off;
} __UNION_ANON;
} __ABI_PACKED;
diff --git a/include/soc/tegra/cpuidle.h b/include/soc/tegra/cpuidle.h
index 029ba1f4b2cc..5665975015d8 100644
--- a/include/soc/tegra/cpuidle.h
+++ b/include/soc/tegra/cpuidle.h
@@ -6,7 +6,7 @@
#ifndef __SOC_TEGRA_CPUIDLE_H__
#define __SOC_TEGRA_CPUIDLE_H__
-#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_CPU_IDLE)
+#ifdef CONFIG_ARM_TEGRA_CPUIDLE
void tegra_cpuidle_pcie_irqs_in_use(void);
#else
static inline void tegra_cpuidle_pcie_irqs_in_use(void)
diff --git a/arch/arm/mach-tegra/irq.h b/include/soc/tegra/irq.h
index 7a94cf121448..8eb11a7109e4 100644
--- a/arch/arm/mach-tegra/irq.h
+++ b/include/soc/tegra/irq.h
@@ -3,9 +3,11 @@
* Copyright (c) 2012, NVIDIA Corporation. All rights reserved.
*/
-#ifndef __TEGRA_IRQ_H
-#define __TEGRA_IRQ_H
+#ifndef __SOC_TEGRA_IRQ_H
+#define __SOC_TEGRA_IRQ_H
+#if defined(CONFIG_ARM)
bool tegra_pending_sgi(void);
-
#endif
+
+#endif /* __SOC_TEGRA_IRQ_H */
diff --git a/include/soc/tegra/pm.h b/include/soc/tegra/pm.h
index 951fcd738d55..08477d7bfab9 100644
--- a/include/soc/tegra/pm.h
+++ b/include/soc/tegra/pm.h
@@ -6,6 +6,8 @@
#ifndef __SOC_TEGRA_PM_H__
#define __SOC_TEGRA_PM_H__
+#include <linux/errno.h>
+
enum tegra_suspend_mode {
TEGRA_SUSPEND_NONE = 0,
TEGRA_SUSPEND_LP2, /* CPU voltage off */
@@ -20,6 +22,12 @@ tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode);
/* low-level resume entry point */
void tegra_resume(void);
+
+int tegra30_pm_secondary_cpu_suspend(unsigned long arg);
+void tegra_pm_clear_cpu_in_lp2(void);
+void tegra_pm_set_cpu_in_lp2(void);
+int tegra_pm_enter_lp2(void);
+int tegra_pm_park_secondary_cpu(unsigned long cpu);
#else
static inline enum tegra_suspend_mode
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode)
@@ -30,6 +38,29 @@ tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode)
static inline void tegra_resume(void)
{
}
+
+static inline int tegra30_pm_secondary_cpu_suspend(unsigned long arg)
+{
+ return -ENOTSUPP;
+}
+
+static inline void tegra_pm_clear_cpu_in_lp2(void)
+{
+}
+
+static inline void tegra_pm_set_cpu_in_lp2(void)
+{
+}
+
+static inline int tegra_pm_enter_lp2(void)
+{
+ return -ENOTSUPP;
+}
+
+static inline int tegra_pm_park_secondary_cpu(unsigned long cpu)
+{
+ return -ENOTSUPP;
+}
#endif /* CONFIG_PM_SLEEP */
#endif /* __SOC_TEGRA_PM_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 57e58faf660b..0dd52b0a5c1b 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -113,8 +113,9 @@ enum tegra_io_pad {
TEGRA_IO_PAD_PEX_CLK_BIAS,
TEGRA_IO_PAD_PEX_CLK1,
TEGRA_IO_PAD_PEX_CLK2,
- TEGRA_IO_PAD_PEX_CLK2_BIAS,
TEGRA_IO_PAD_PEX_CLK3,
+ TEGRA_IO_PAD_PEX_CLK_2_BIAS,
+ TEGRA_IO_PAD_PEX_CLK_2,
TEGRA_IO_PAD_PEX_CNTRL,
TEGRA_IO_PAD_PEX_CTL2,
TEGRA_IO_PAD_PEX_L0_RST_N,
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index bc88d6f964da..6ce8effa0b12 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -23,7 +23,6 @@ struct snd_compr_ops;
* struct snd_compr_runtime: runtime stream description
* @state: stream state
* @ops: pointer to DSP callbacks
- * @dma_buffer_p: runtime dma buffer pointer
* @buffer: pointer to kernel buffer, valid only when not in mmap mode or
* DSP doesn't implement copy
* @buffer_size: size of the above buffer
@@ -34,11 +33,14 @@ struct snd_compr_ops;
* @total_bytes_transferred: cumulative bytes transferred by offload DSP
* @sleep: poll sleep
* @private_data: driver private data pointer
+ * @dma_area: virtual buffer address
+ * @dma_addr: physical buffer address (not accessible from main CPU)
+ * @dma_bytes: size of DMA area
+ * @dma_buffer_p: runtime dma buffer pointer
*/
struct snd_compr_runtime {
snd_pcm_state_t state;
struct snd_compr_ops *ops;
- struct snd_dma_buffer *dma_buffer_p;
void *buffer;
u64 buffer_size;
u32 fragment_size;
@@ -47,6 +49,11 @@ struct snd_compr_runtime {
u64 total_bytes_transferred;
wait_queue_head_t sleep;
void *private_data;
+
+ unsigned char *dma_area;
+ dma_addr_t dma_addr;
+ size_t dma_bytes;
+ struct snd_dma_buffer *dma_buffer_p;
};
/**
@@ -60,6 +67,7 @@ struct snd_compr_runtime {
* @metadata_set: metadata set flag, true when set
* @next_track: has userspace signal next track transition, true when set
* @private_data: pointer to DSP private data
+ * @dma_buffer: allocated buffer if any
*/
struct snd_compr_stream {
const char *name;
@@ -71,6 +79,7 @@ struct snd_compr_stream {
bool metadata_set;
bool next_track;
void *private_data;
+ struct snd_dma_buffer dma_buffer;
};
/**
@@ -180,21 +189,34 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
/**
* snd_compr_set_runtime_buffer - Set the Compress runtime buffer
- * @substream: compress substream to set
+ * @stream: compress stream to set
* @bufp: the buffer information, NULL to clear
*
* Copy the buffer information to runtime buffer when @bufp is non-NULL.
* Otherwise it clears the current buffer information.
*/
-static inline void snd_compr_set_runtime_buffer(
- struct snd_compr_stream *substream,
- struct snd_dma_buffer *bufp)
+static inline void
+snd_compr_set_runtime_buffer(struct snd_compr_stream *stream,
+ struct snd_dma_buffer *bufp)
{
- struct snd_compr_runtime *runtime = substream->runtime;
-
- runtime->dma_buffer_p = bufp;
+ struct snd_compr_runtime *runtime = stream->runtime;
+
+ if (bufp) {
+ runtime->dma_buffer_p = bufp;
+ runtime->dma_area = bufp->area;
+ runtime->dma_addr = bufp->addr;
+ runtime->dma_bytes = bufp->bytes;
+ } else {
+ runtime->dma_buffer_p = NULL;
+ runtime->dma_area = NULL;
+ runtime->dma_addr = 0;
+ runtime->dma_bytes = 0;
+ }
}
+int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size);
+int snd_compr_free_pages(struct snd_compr_stream *stream);
+
int snd_compr_stop_error(struct snd_compr_stream *stream,
snd_pcm_state_t state);
diff --git a/include/sound/core.h b/include/sound/core.h
index ac8b692b69b4..381a010a1bd4 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -266,6 +266,7 @@ void snd_device_disconnect(struct snd_card *card, void *device_data);
void snd_device_disconnect_all(struct snd_card *card);
void snd_device_free(struct snd_card *card, void *device_data);
void snd_device_free_all(struct snd_card *card);
+int snd_device_get_state(struct snd_card *card, void *device_data);
/* isadma.c */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index d4299e146d95..affedc2801c4 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -513,6 +513,7 @@ struct hdac_stream {
struct snd_pcm_substream *substream; /* assigned substream,
* set in PCM open
*/
+ struct snd_compr_stream *cstream;
unsigned int format_val; /* format value to be set in the
* controller and the codec
*/
@@ -527,6 +528,7 @@ struct hdac_stream {
bool locked:1;
bool stripe:1; /* apply stripe control */
+ u64 curr_pos;
/* timestamp */
unsigned long start_wallclk; /* start + minimum wallclk */
unsigned long period_wallclk; /* wallclk for period */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index f657ff08f317..2ba5df2c9e23 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -644,6 +644,11 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
#define snd_pcm_group_for_each_entry(s, substream) \
list_for_each_entry(s, &substream->group->substreams, link_list)
+#define for_each_pcm_streams(stream) \
+ for (stream = SNDRV_PCM_STREAM_PLAYBACK; \
+ stream <= SNDRV_PCM_STREAM_LAST; \
+ stream++)
+
/**
* snd_pcm_running - Check whether the substream is in a running state
* @substream: substream to check
@@ -1122,7 +1127,14 @@ snd_pcm_kernel_readv(struct snd_pcm_substream *substream,
return __snd_pcm_lib_xfer(substream, bufs, false, frames, true);
}
-int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime);
+int snd_pcm_hw_limit_rates(struct snd_pcm_hardware *hw);
+
+static inline int
+snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime)
+{
+ return snd_pcm_hw_limit_rates(&runtime->hw);
+}
+
unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate);
unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
@@ -1415,6 +1427,15 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
return 1ULL << (__force int) pcm_format;
}
+/**
+ * pcm_for_each_format - helper to iterate for each format type
+ * @f: the iterator variable in snd_pcm_format_t type
+ */
+#define pcm_for_each_format(f) \
+ for ((f) = SNDRV_PCM_FORMAT_FIRST; \
+ (__force int)(f) <= (__force int)SNDRV_PCM_FORMAT_LAST; \
+ (f) = (__force snd_pcm_format_t)((__force int)(f) + 1))
+
/* printk helpers */
#define pcm_err(pcm, fmt, args...) \
dev_err((pcm)->card->dev, fmt, ##args)
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 661450a2095b..36f94735d23d 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -133,6 +133,13 @@ static inline int snd_mask_test(const struct snd_mask *mask, unsigned int val)
return mask->bits[MASK_OFS(val)] & MASK_BIT(val);
}
+/* Most of drivers need only this one */
+static inline int snd_mask_test_format(const struct snd_mask *mask,
+ snd_pcm_format_t format)
+{
+ return snd_mask_test(mask, (__force unsigned int)format);
+}
+
static inline int snd_mask_single(const struct snd_mask *mask)
{
int i, c = 0;
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
index bc2c31734df1..e1f790561ac1 100644
--- a/include/sound/rt5682.h
+++ b/include/sound/rt5682.h
@@ -24,6 +24,12 @@ enum rt5682_jd_src {
RT5682_JD1,
};
+enum rt5682_dai_clks {
+ RT5682_DAI_WCLK_IDX,
+ RT5682_DAI_BCLK_IDX,
+ RT5682_DAI_NUM_CLKS,
+};
+
struct rt5682_platform_data {
int ldo1_en; /* GPIO for LDO1_EN */
@@ -32,6 +38,10 @@ struct rt5682_platform_data {
enum rt5682_dmic1_clk_pin dmic1_clk_pin;
enum rt5682_jd_src jd_src;
unsigned int btndet_delay;
+ unsigned int dmic_clk_rate;
+ unsigned int dmic_delay;
+
+ const char *dai_clk_names[RT5682_DAI_NUM_CLKS];
};
#endif
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index a217a87cae86..392e953d561e 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -75,18 +75,45 @@ struct snd_soc_acpi_mach_params {
};
/**
- * snd_soc_acpi_link_adr: ACPI-based list of _ADR, with a variable
- * number of devices per link
- *
+ * snd_soc_acpi_endpoint - endpoint descriptor
+ * @num: endpoint number (mandatory, unique per device)
+ * @aggregated: 0 (independent) or 1 (logically grouped)
+ * @group_position: zero-based order (only when @aggregated is 1)
+ * @group_id: platform-unique group identifier (only when @aggregrated is 1)
+ */
+struct snd_soc_acpi_endpoint {
+ u8 num;
+ u8 aggregated;
+ u8 group_position;
+ u8 group_id;
+};
+
+/**
+ * snd_soc_acpi_adr_device - descriptor for _ADR-enumerated device
+ * @adr: 64 bit ACPI _ADR value
+ * @num_endpoints: number of endpoints for this device
+ * @endpoints: array of endpoints
+ */
+struct snd_soc_acpi_adr_device {
+ const u64 adr;
+ const u8 num_endpoints;
+ const struct snd_soc_acpi_endpoint *endpoints;
+};
+
+/**
+ * snd_soc_acpi_link_adr - ACPI-based list of _ADR enumerated devices
* @mask: one bit set indicates the link this list applies to
- * @num_adr: ARRAY_SIZE of adr
- * @adr: array of _ADR (represented as u64).
+ * @num_adr: ARRAY_SIZE of devices
+ * @adr_d: array of devices
+ *
+ * The number of devices per link can be more than 1, e.g. in SoundWire
+ * multi-drop configurations.
*/
struct snd_soc_acpi_link_adr {
const u32 mask;
const u32 num_adr;
- const u64 *adr;
+ const struct snd_soc_acpi_adr_device *adr_d;
};
/**
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index eaaeb00e9e84..78bac995db15 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -202,6 +202,8 @@ struct snd_soc_dai_ops {
int (*set_sdw_stream)(struct snd_soc_dai *dai,
void *stream, int direction);
+ void *(*get_sdw_stream)(struct snd_soc_dai *dai, int direction);
+
/*
* DAI digital mute - optional.
* Called by soc-core to minimise any pops.
@@ -322,9 +324,7 @@ struct snd_soc_dai {
struct snd_soc_dai_driver *driver;
/* DAI runtime info */
- unsigned int capture_active; /* stream usage count */
- unsigned int playback_active; /* stream usage count */
- unsigned int probed:1;
+ unsigned int stream_active[SNDRV_PCM_STREAM_LAST + 1]; /* usage count */
unsigned int active;
@@ -348,8 +348,27 @@ struct snd_soc_dai {
unsigned int rx_mask;
struct list_head list;
+
+ /* bit field */
+ unsigned int probed:1;
+ unsigned int started:1;
};
+static inline struct snd_soc_pcm_stream *
+snd_soc_dai_get_pcm_stream(const struct snd_soc_dai *dai, int stream)
+{
+ return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ &dai->driver->playback : &dai->driver->capture;
+}
+
+static inline
+struct snd_soc_dapm_widget *snd_soc_dai_get_widget(
+ struct snd_soc_dai *dai, int stream)
+{
+ return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ dai->playback_widget : dai->capture_widget;
+}
+
static inline void *snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai,
const struct snd_pcm_substream *ss)
{
@@ -406,4 +425,23 @@ static inline int snd_soc_dai_set_sdw_stream(struct snd_soc_dai *dai,
return -ENOTSUPP;
}
+/**
+ * snd_soc_dai_get_sdw_stream() - Retrieves SDW stream from DAI
+ * @dai: DAI
+ * @direction: Stream direction(Playback/Capture)
+ *
+ * This routine only retrieves that was previously configured
+ * with snd_soc_dai_get_sdw_stream()
+ *
+ * Returns pointer to stream or -ENOTSUPP if callback is not supported;
+ */
+static inline void *snd_soc_dai_get_sdw_stream(struct snd_soc_dai *dai,
+ int direction)
+{
+ if (dai->driver->ops->get_sdw_stream)
+ return dai->driver->ops->get_sdw_stream(dai, direction);
+ else
+ return ERR_PTR(-ENOTSUPP);
+}
+
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 1b6afbc1a4ed..08495f8d86dc 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -482,6 +482,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list,
bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
enum snd_soc_dapm_direction));
+void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list);
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
@@ -691,6 +692,11 @@ struct snd_soc_dapm_widget_list {
struct snd_soc_dapm_widget *widgets[0];
};
+#define for_each_dapm_widgets(list, i, widget) \
+ for ((i) = 0; \
+ (i) < list->num_widgets && (widget = list->widgets[i]); \
+ (i)++)
+
struct snd_soc_dapm_stats {
int power_checks;
int path_checks;
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index b654ebfc8766..0f6c50b17bba 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -132,17 +132,8 @@ int snd_soc_dpcm_be_can_update(struct snd_soc_pcm_runtime *fe,
struct snd_pcm_substream *
snd_soc_dpcm_get_substream(struct snd_soc_pcm_runtime *be, int stream);
-/* get the BE runtime state */
-enum snd_soc_dpcm_state
- snd_soc_dpcm_be_get_state(struct snd_soc_pcm_runtime *be, int stream);
-
-/* set the BE runtime state */
-void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream,
- enum snd_soc_dpcm_state state);
-
-/* internal use only */
-int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute);
-int soc_dpcm_runtime_update(struct snd_soc_card *);
+/* update audio routing between PCMs and any DAI links */
+int snd_soc_dpcm_runtime_update(struct snd_soc_card *card);
#ifdef CONFIG_DEBUG_FS
void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
@@ -154,6 +145,7 @@ static inline void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list_);
+void dpcm_path_put(struct snd_soc_dapm_widget_list **list);
int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list, int new);
int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream);
@@ -167,10 +159,4 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
int event);
-static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
-{
- kfree(*list);
-}
-
-
#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 8a2266676b2d..13458e4fbb13 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -471,6 +471,9 @@ bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd);
void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream);
void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
+int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hardware *hw, int stream);
+
int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
unsigned int dai_fmt);
@@ -855,6 +858,11 @@ struct snd_soc_dai_link {
((platform) = &link->platforms[i]); \
(i)++)
+#define for_each_link_cpus(link, i, cpu) \
+ for ((i) = 0; \
+ ((i) < link->num_cpus) && ((cpu) = &link->cpus[i]); \
+ (i)++)
+
/*
* Sample 1 : Single CPU/Codec/Platform
*
@@ -1058,6 +1066,7 @@ struct snd_soc_card {
const struct snd_soc_dapm_route *of_dapm_routes;
int num_of_dapm_routes;
bool fully_routed;
+ bool disable_route_checks;
/* lists of probed devices belonging to this card */
struct list_head component_dev_list;
@@ -1109,6 +1118,14 @@ struct snd_soc_card {
#define for_each_card_components(card, component) \
list_for_each_entry(component, &(card)->component_dev_list, card_list)
+#define for_each_card_dapms(card, dapm) \
+ list_for_each_entry(dapm, &card->dapm_list, list)
+
+#define for_each_card_widgets(card, w)\
+ list_for_each_entry(w, &card->widgets, list)
+#define for_each_card_widgets_safe(card, w, _w) \
+ list_for_each_entry_safe(w, _w, &card->widgets, list)
+
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
struct device *dev;
@@ -1128,10 +1145,14 @@ struct snd_soc_pcm_runtime {
struct snd_compr *compr;
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai;
+ struct snd_soc_dai **dais;
struct snd_soc_dai **codec_dais;
unsigned int num_codecs;
+ struct snd_soc_dai **cpu_dais;
+ unsigned int num_cpus;
+
struct delayed_work delayed_work;
void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd);
#ifdef CONFIG_DEBUG_FS
@@ -1148,16 +1169,31 @@ struct snd_soc_pcm_runtime {
int num_components;
struct snd_soc_component *components[0]; /* CPU/Codec/Platform */
};
+/* see soc_new_pcm_runtime() */
+#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
+#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->num_cpus]
+
#define for_each_rtd_components(rtd, i, component) \
for ((i) = 0; \
((i) < rtd->num_components) && ((component) = rtd->components[i]);\
(i)++)
-#define for_each_rtd_codec_dai(rtd, i, dai)\
- for ((i) = 0; \
- ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
+#define for_each_rtd_cpu_dais(rtd, i, dai) \
+ for ((i) = 0; \
+ ((i) < rtd->num_cpus) && ((dai) = rtd->cpu_dais[i]); \
(i)++)
-#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
+#define for_each_rtd_cpu_dais_rollback(rtd, i, dai) \
+ for (; (--(i) >= 0) && ((dai) = rtd->cpu_dais[i]);)
+#define for_each_rtd_codec_dais(rtd, i, dai) \
+ for ((i) = 0; \
+ ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
+ (i)++)
+#define for_each_rtd_codec_dais_rollback(rtd, i, dai) \
for (; (--(i) >= 0) && ((dai) = rtd->codec_dais[i]);)
+#define for_each_rtd_dais(rtd, i, dai) \
+ for ((i) = 0; \
+ ((i) < (rtd)->num_cpus + (rtd)->num_codecs) && \
+ ((dai) = (rtd)->dais[i]); \
+ (i)++)
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h
index 5f1ef5565be6..04e48227f542 100644
--- a/include/sound/sof/dai-intel.h
+++ b/include/sound/sof/dai-intel.h
@@ -87,6 +87,15 @@ struct sof_ipc_dai_hda_params {
uint32_t link_dma_ch;
} __packed;
+/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */
+struct sof_ipc_dai_alh_params {
+ struct sof_ipc_hdr hdr;
+ uint32_t stream_id;
+
+ /* reserved for future use */
+ uint32_t reserved[15];
+} __packed;
+
/* DMIC Configuration Request - SOF_IPC_DAI_DMIC_CONFIG */
/* This struct is defined per 2ch PDM controller available in the platform.
@@ -179,13 +188,4 @@ struct sof_ipc_dai_dmic_params {
struct sof_ipc_dai_dmic_pdm_ctrl pdm[0];
} __packed;
-/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */
-struct sof_ipc_dai_alh_params {
- struct sof_ipc_hdr hdr;
- uint32_t stream_id;
-
- /* reserved for future use */
- uint32_t reserved[15];
-} __packed;
-
#endif
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index bf3edd9c08b4..b79479575cc8 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -51,6 +51,7 @@
#define SOF_IPC_GLB_TRACE_MSG SOF_GLB_TYPE(0x9U)
#define SOF_IPC_GLB_GDB_DEBUG SOF_GLB_TYPE(0xAU)
#define SOF_IPC_GLB_TEST_MSG SOF_GLB_TYPE(0xBU)
+#define SOF_IPC_GLB_PROBE SOF_GLB_TYPE(0xCU)
/*
* DSP Command Message Types
@@ -102,6 +103,16 @@
#define SOF_IPC_STREAM_VORBIS_PARAMS SOF_CMD_TYPE(0x010)
#define SOF_IPC_STREAM_VORBIS_FREE SOF_CMD_TYPE(0x011)
+/* probe */
+#define SOF_IPC_PROBE_INIT SOF_CMD_TYPE(0x001)
+#define SOF_IPC_PROBE_DEINIT SOF_CMD_TYPE(0x002)
+#define SOF_IPC_PROBE_DMA_ADD SOF_CMD_TYPE(0x003)
+#define SOF_IPC_PROBE_DMA_INFO SOF_CMD_TYPE(0x004)
+#define SOF_IPC_PROBE_DMA_REMOVE SOF_CMD_TYPE(0x005)
+#define SOF_IPC_PROBE_POINT_ADD SOF_CMD_TYPE(0x006)
+#define SOF_IPC_PROBE_POINT_INFO SOF_CMD_TYPE(0x007)
+#define SOF_IPC_PROBE_POINT_REMOVE SOF_CMD_TYPE(0x008)
+
/* trace */
#define SOF_IPC_TRACE_DMA_PARAMS SOF_CMD_TYPE(0x001)
#define SOF_IPC_TRACE_DMA_POSITION SOF_CMD_TYPE(0x002)
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 1c560144996c..438a11fcf272 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -28,9 +28,9 @@
/* extended data types that can be appended onto end of sof_ipc_fw_ready */
enum sof_ipc_ext_data {
- SOF_IPC_EXT_DMA_BUFFER = 0,
- SOF_IPC_EXT_WINDOW,
- SOF_IPC_EXT_CC_INFO,
+ SOF_IPC_EXT_UNUSED = 0,
+ SOF_IPC_EXT_WINDOW = 1,
+ SOF_IPC_EXT_CC_INFO = 2,
};
/* FW version - SOF_IPC_GLB_VERSION */
@@ -83,22 +83,6 @@ struct sof_ipc_ext_data_hdr {
uint32_t type; /**< SOF_IPC_EXT_ */
} __packed;
-struct sof_ipc_dma_buffer_elem {
- struct sof_ipc_hdr hdr;
- uint32_t type; /**< SOF_IPC_REGION_ */
- uint32_t id; /**< platform specific - used to map to host memory */
- struct sof_ipc_host_buffer buffer;
-} __packed;
-
-/* extended data DMA buffers for IPC, trace and debug */
-struct sof_ipc_dma_buffer_data {
- struct sof_ipc_ext_data_hdr ext_hdr;
- uint32_t num_buffers;
-
- /* host files in buffer[n].buffer */
- struct sof_ipc_dma_buffer_elem buffer[];
-} __packed;
-
struct sof_ipc_window_elem {
struct sof_ipc_hdr hdr;
uint32_t type; /**< SOF_IPC_REGION_ */
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 8e76178fedf0..402e0250c508 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -53,9 +53,10 @@ struct sof_ipc_comp {
uint32_t id;
enum sof_comp_type type;
uint32_t pipeline_id;
+ uint32_t core;
/* reserved for future use */
- uint32_t reserved[2];
+ uint32_t reserved[1];
} __packed;
/*
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index a49d37140a64..591cd9e4692c 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -676,7 +676,7 @@ struct iscsi_session {
atomic_t session_logout;
atomic_t session_reinstatement;
atomic_t session_stop_active;
- atomic_t sleep_on_sess_wait_comp;
+ atomic_t session_close;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 51b6f50eabee..1b752d8ea529 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -116,10 +116,4 @@ static inline bool target_dev_configured(struct se_device *se_dev)
return !!(se_dev->dev_flags & DF_CONFIGURED);
}
-/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
-static inline uint32_t get_unaligned_be24(const uint8_t *const p)
-{
- return get_unaligned_be32(p - 1) & 0xffffffU;
-}
-
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 1728e883b7b2..6d4a694f6ea7 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -74,8 +74,6 @@
#define DA_EMULATE_MODEL_ALIAS 0
/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
#define DA_EMULATE_WRITE_CACHE 0
-/* Emulation for UNIT ATTENTION Interlock Control */
-#define DA_EMULATE_UA_INTLLCK_CTRL 0
/* Emulation for TASK_ABORTED status (TAS) by default */
#define DA_EMULATE_TAS 1
/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
@@ -433,6 +431,13 @@ enum target_prot_type {
TARGET_DIF_TYPE3_PROT,
};
+/* Emulation for UNIT ATTENTION Interlock Control */
+enum target_ua_intlck_ctrl {
+ TARGET_UA_INTLCK_CTRL_CLEAR = 0,
+ TARGET_UA_INTLCK_CTRL_NO_CLEAR = 1,
+ TARGET_UA_INTLCK_CTRL_ESTABLISH_UA = 2,
+};
+
enum target_core_dif_check {
TARGET_DIF_CHECK_GUARD = 0x1 << 0,
TARGET_DIF_CHECK_APPTAG = 0x1 << 1,
@@ -663,26 +668,26 @@ struct se_dev_entry {
};
struct se_dev_attrib {
- int emulate_model_alias;
- int emulate_dpo;
- int emulate_fua_write;
- int emulate_fua_read;
- int emulate_write_cache;
- int emulate_ua_intlck_ctrl;
- int emulate_tas;
- int emulate_tpu;
- int emulate_tpws;
- int emulate_caw;
- int emulate_3pc;
- int emulate_pr;
+ bool emulate_model_alias;
+ bool emulate_dpo; /* deprecated */
+ bool emulate_fua_write;
+ bool emulate_fua_read; /* deprecated */
+ bool emulate_write_cache;
+ enum target_ua_intlck_ctrl emulate_ua_intlck_ctrl;
+ bool emulate_tas;
+ bool emulate_tpu;
+ bool emulate_tpws;
+ bool emulate_caw;
+ bool emulate_3pc;
+ bool emulate_pr;
enum target_prot_type pi_prot_type;
enum target_prot_type hw_pi_prot_type;
- int pi_prot_verify;
- int enforce_pr_isids;
- int force_pr_aptpl;
- int is_nonrot;
- int emulate_rest_reord;
- int unmap_zeroes_data;
+ bool pi_prot_verify;
+ bool enforce_pr_isids;
+ bool force_pr_aptpl;
+ bool is_nonrot;
+ bool emulate_rest_reord;
+ bool unmap_zeroes_data;
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 67a97838c2a0..d97adfc327f0 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -153,7 +153,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
#define show_compress_algorithm(type) \
__print_symbolic(type, \
{ COMPRESS_LZO, "LZO" }, \
- { COMPRESS_LZ4, "LZ4" })
+ { COMPRESS_LZ4, "LZ4" }, \
+ { COMPRESS_ZSTD, "ZSTD" })
struct f2fs_sb_info;
struct f2fs_io_info;
diff --git a/include/trace/events/gpu_mem.h b/include/trace/events/gpu_mem.h
new file mode 100644
index 000000000000..1897822a9150
--- /dev/null
+++ b/include/trace/events/gpu_mem.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GPU memory trace points
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu_mem
+
+#if !defined(_TRACE_GPU_MEM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPU_MEM_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * The gpu_memory_total event indicates that there's an update to either the
+ * global or process total gpu memory counters.
+ *
+ * This event should be emitted whenever the kernel device driver allocates,
+ * frees, imports, unimports memory in the GPU addressable space.
+ *
+ * @gpu_id: This is the gpu id.
+ *
+ * @pid: Put 0 for global total, while positive pid for process total.
+ *
+ * @size: Virtual size of the allocation in bytes.
+ *
+ */
+TRACE_EVENT(gpu_mem_total,
+
+ TP_PROTO(uint32_t gpu_id, uint32_t pid, uint64_t size),
+
+ TP_ARGS(gpu_id, pid, size),
+
+ TP_STRUCT__entry(
+ __field(uint32_t, gpu_id)
+ __field(uint32_t, pid)
+ __field(uint64_t, size)
+ ),
+
+ TP_fast_assign(
+ __entry->gpu_id = gpu_id;
+ __entry->pid = pid;
+ __entry->size = size;
+ ),
+
+ TP_printk("gpu_id=%u pid=%u size=%llu",
+ __entry->gpu_id,
+ __entry->pid,
+ __entry->size)
+);
+
+#endif /* _TRACE_GPU_MEM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index d82a0f4e824d..70e32ff096ec 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -13,6 +13,7 @@
EM( SCAN_PMD_NULL, "pmd_null") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \
+ EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \
EM( SCAN_PAGE_RO, "no_writable_page") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
new file mode 100644
index 000000000000..4661f7ba07c0
--- /dev/null
+++ b/include/trace/events/mmap.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmap
+
+#if !defined(_TRACE_MMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMAP_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vm_unmapped_area,
+
+ TP_PROTO(unsigned long addr, struct vm_unmapped_area_info *info),
+
+ TP_ARGS(addr, info),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, total_vm)
+ __field(unsigned long, flags)
+ __field(unsigned long, length)
+ __field(unsigned long, low_limit)
+ __field(unsigned long, high_limit)
+ __field(unsigned long, align_mask)
+ __field(unsigned long, align_offset)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->total_vm = current->mm->total_vm;
+ __entry->flags = info->flags;
+ __entry->length = info->length;
+ __entry->low_limit = info->low_limit;
+ __entry->high_limit = info->high_limit;
+ __entry->align_mask = info->align_mask;
+ __entry->align_offset = info->align_offset;
+ ),
+
+ TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n",
+ IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr,
+ IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0,
+ __entry->total_vm, __entry->flags, __entry->length,
+ __entry->low_limit, __entry->high_limit, __entry->align_mask,
+ __entry->align_offset)
+);
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index a1675d43777e..5fb752034386 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -154,6 +154,7 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
{VM_ACCOUNT, "account" }, \
{VM_NORESERVE, "noreserve" }, \
{VM_HUGETLB, "hugetlb" }, \
+ {VM_SYNC, "sync" }, \
__VM_ARCH_SPECIFIC_1 , \
{VM_WIPEONFORK, "wipeonfork" }, \
{VM_DONTDUMP, "dontdump" }, \
diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h
new file mode 100644
index 000000000000..b71f680968eb
--- /dev/null
+++ b/include/trace/events/qla.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_TRACE_QLA_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_QLA_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qla
+
+#define QLA_MSG_MAX 256
+
+DECLARE_EVENT_CLASS(qla_log_event,
+ TP_PROTO(const char *buf,
+ struct va_format *vaf),
+
+ TP_ARGS(buf, vaf),
+
+ TP_STRUCT__entry(
+ __string(buf, buf)
+ __dynamic_array(char, msg, QLA_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(buf, buf);
+ vsnprintf(__get_str(msg), QLA_MSG_MAX, vaf->fmt, *vaf->va);
+ ),
+
+ TP_printk("%s %s", __get_str(buf), __get_str(msg))
+);
+
+DEFINE_EVENT(qla_log_event, ql_dbg_log,
+ TP_PROTO(const char *buf, struct va_format *vaf),
+ TP_ARGS(buf, vaf)
+);
+
+#endif /* _TRACE_QLA_H */
+
+#define TRACE_INCLUDE_FILE qla
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 9827f535f032..32d88c4fb063 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -126,7 +126,7 @@ DEFINE_GSSAPI_EVENT(verify_mic);
DEFINE_GSSAPI_EVENT(wrap);
DEFINE_GSSAPI_EVENT(unwrap);
-TRACE_EVENT(rpcgss_accept_upcall,
+TRACE_EVENT(rpcgss_svc_accept_upcall,
TP_PROTO(
__be32 xid,
u32 major_status,
@@ -154,6 +154,29 @@ TRACE_EVENT(rpcgss_accept_upcall,
)
);
+TRACE_EVENT(rpcgss_svc_accept,
+ TP_PROTO(
+ __be32 xid,
+ size_t len
+ ),
+
+ TP_ARGS(xid, len),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(size_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(xid);
+ __entry->len = len;
+ ),
+
+ TP_printk("xid=0x%08x len=%zu",
+ __entry->xid, __entry->len
+ )
+);
+
/**
** GSS auth unwrap failures
@@ -268,6 +291,40 @@ TRACE_EVENT(rpcgss_need_reencode,
__entry->ret ? "" : "un")
);
+DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
+ TP_PROTO(
+ __be32 xid,
+ u32 seqno
+ ),
+
+ TP_ARGS(xid, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(xid);
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("xid=0x%08x seqno=%u, request discarded",
+ __entry->xid, __entry->seqno)
+);
+
+#define DEFINE_SVC_SEQNO_EVENT(name) \
+ DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_##name, \
+ TP_PROTO( \
+ __be32 xid, \
+ u32 seqno \
+ ), \
+ TP_ARGS(xid, seqno))
+
+DEFINE_SVC_SEQNO_EVENT(large_seqno);
+DEFINE_SVC_SEQNO_EVENT(old_seqno);
+
+
/**
** gssd upcall related trace events
**/
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index c0e4c93324f5..051f26fedc4d 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -104,12 +104,12 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class,
TP_fast_assign(
__entry->r_xprt = r_xprt;
__entry->rc = rc;
- __entry->connect_status = r_xprt->rx_ep.rep_connected;
+ __entry->connect_status = r_xprt->rx_ep->re_connect_status;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connect status=%d",
+ TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
__get_str(addr), __get_str(port), __entry->r_xprt,
__entry->rc, __entry->connect_status
)
@@ -228,20 +228,20 @@ DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
TP_ARGS(wc, frwr),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
__field(unsigned int, status)
__field(unsigned int, vendor_err)
),
TP_fast_assign(
- __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
+ __entry->mr_id = frwr->fr_mr->res.id;
__entry->status = wc->status;
__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
),
TP_printk(
- "mr=%p: %s (%u/0x%x)",
- __entry->mr, rdma_show_wc_status(__entry->status),
+ "mr.id=%u: %s (%u/0x%x)",
+ __entry->mr_id, rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
);
@@ -274,7 +274,8 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
TP_ARGS(mr),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
+ __field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
@@ -282,15 +283,16 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
),
TP_fast_assign(
- __entry->mr = mr;
+ __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
__entry->dir = mr->mr_dir;
),
- TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
- __entry->mr, __entry->length,
+ TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
+ __entry->mr_id, __entry->nents, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
xprtrdma_show_direction(__entry->dir)
)
@@ -340,68 +342,37 @@ DECLARE_EVENT_CLASS(xprtrdma_cb_event,
** Connection events
**/
-TRACE_EVENT(xprtrdma_cm_event,
- TP_PROTO(
- const struct rpcrdma_xprt *r_xprt,
- struct rdma_cm_event *event
- ),
-
- TP_ARGS(r_xprt, event),
-
- TP_STRUCT__entry(
- __field(const void *, r_xprt)
- __field(unsigned int, event)
- __field(int, status)
- __string(addr, rpcrdma_addrstr(r_xprt))
- __string(port, rpcrdma_portstr(r_xprt))
- ),
-
- TP_fast_assign(
- __entry->r_xprt = r_xprt;
- __entry->event = event->event;
- __entry->status = event->status;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
- ),
-
- TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
- __get_str(addr), __get_str(port),
- __entry->r_xprt, rdma_show_cm_event(__entry->event),
- __entry->event, __entry->status
- )
-);
-
TRACE_EVENT(xprtrdma_inline_thresh,
TP_PROTO(
- const struct rpcrdma_xprt *r_xprt
+ const struct rpcrdma_ep *ep
),
- TP_ARGS(r_xprt),
+ TP_ARGS(ep),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
__field(unsigned int, inline_send)
__field(unsigned int, inline_recv)
__field(unsigned int, max_send)
__field(unsigned int, max_recv)
- __string(addr, rpcrdma_addrstr(r_xprt))
- __string(port, rpcrdma_portstr(r_xprt))
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
- const struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ const struct rdma_cm_id *id = ep->re_id;
- __entry->r_xprt = r_xprt;
- __entry->inline_send = ep->rep_inline_send;
- __entry->inline_recv = ep->rep_inline_recv;
- __entry->max_send = ep->rep_max_inline_send;
- __entry->max_recv = ep->rep_max_inline_recv;
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ __entry->inline_send = ep->re_inline_send;
+ __entry->inline_recv = ep->re_inline_recv;
+ __entry->max_send = ep->re_max_inline_send;
+ __entry->max_recv = ep->re_max_inline_recv;
+ memcpy(__entry->srcaddr, &id->route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
),
- TP_printk("peer=[%s]:%s r_xprt=%p neg send/recv=%u/%u, calc send/recv=%u/%u",
- __get_str(addr), __get_str(port), __entry->r_xprt,
+ TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
+ __entry->srcaddr, __entry->dstaddr,
__entry->inline_send, __entry->inline_recv,
__entry->max_send, __entry->max_recv
)
@@ -409,11 +380,10 @@ TRACE_EVENT(xprtrdma_inline_thresh,
DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);
+DEFINE_CONN_EVENT(flush_dct);
DEFINE_RXPRT_EVENT(xprtrdma_create);
DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
-DEFINE_RXPRT_EVENT(xprtrdma_remove);
-DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
DEFINE_RXPRT_EVENT(xprtrdma_op_close);
DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
@@ -480,32 +450,33 @@ TRACE_EVENT(xprtrdma_op_set_cto,
TRACE_EVENT(xprtrdma_qp_event,
TP_PROTO(
- const struct rpcrdma_xprt *r_xprt,
+ const struct rpcrdma_ep *ep,
const struct ib_event *event
),
- TP_ARGS(r_xprt, event),
+ TP_ARGS(ep, event),
TP_STRUCT__entry(
- __field(const void *, r_xprt)
- __field(unsigned int, event)
+ __field(unsigned long, event)
__string(name, event->device->name)
- __string(addr, rpcrdma_addrstr(r_xprt))
- __string(port, rpcrdma_portstr(r_xprt))
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
- __entry->r_xprt = r_xprt;
+ const struct rdma_cm_id *id = ep->re_id;
+
__entry->event = event->event;
__assign_str(name, event->device->name);
- __assign_str(addr, rpcrdma_addrstr(r_xprt));
- __assign_str(port, rpcrdma_portstr(r_xprt));
+ memcpy(__entry->srcaddr, &id->route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
- __get_str(addr), __get_str(port), __entry->r_xprt,
- __get_str(name), rdma_show_ib_event(__entry->event),
- __entry->event
+ TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
+ __entry->srcaddr, __entry->dstaddr, __get_str(name),
+ rdma_show_ib_event(__entry->event), __entry->event
)
);
@@ -801,7 +772,7 @@ TRACE_EVENT(xprtrdma_post_recvs,
__entry->r_xprt = r_xprt;
__entry->count = count;
__entry->status = status;
- __entry->posted = r_xprt->rx_ep.rep_receive_count;
+ __entry->posted = r_xprt->rx_ep->re_receive_count;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
@@ -920,17 +891,17 @@ TRACE_EVENT(xprtrdma_frwr_alloc,
TP_ARGS(mr, rc),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
__field(int, rc)
),
TP_fast_assign(
- __entry->mr = mr;
- __entry->rc = rc;
+ __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->rc = rc;
),
- TP_printk("mr=%p: rc=%d",
- __entry->mr, __entry->rc
+ TP_printk("mr.id=%u: rc=%d",
+ __entry->mr_id, __entry->rc
)
);
@@ -943,7 +914,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg,
TP_ARGS(mr, rc),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
+ __field(int, nents)
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
@@ -952,7 +924,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg,
),
TP_fast_assign(
- __entry->mr = mr;
+ __entry->mr_id = mr->frwr.fr_mr->res.id;
+ __entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
@@ -960,8 +933,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg,
__entry->rc = rc;
),
- TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
- __entry->mr, __entry->length,
+ TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
+ __entry->mr_id, __entry->nents, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
xprtrdma_show_direction(__entry->dir),
__entry->rc
@@ -977,21 +950,21 @@ TRACE_EVENT(xprtrdma_frwr_sgerr,
TP_ARGS(mr, sg_nents),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
__field(u64, addr)
__field(u32, dir)
__field(int, nents)
),
TP_fast_assign(
- __entry->mr = mr;
+ __entry->mr_id = mr->frwr.fr_mr->res.id;
__entry->addr = mr->mr_sg->dma_address;
__entry->dir = mr->mr_dir;
__entry->nents = sg_nents;
),
- TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
- __entry->mr, __entry->addr,
+ TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
+ __entry->mr_id, __entry->addr,
xprtrdma_show_direction(__entry->dir),
__entry->nents
)
@@ -1006,7 +979,7 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
TP_ARGS(mr, num_mapped),
TP_STRUCT__entry(
- __field(const void *, mr)
+ __field(u32, mr_id)
__field(u64, addr)
__field(u32, dir)
__field(int, num_mapped)
@@ -1014,15 +987,15 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
),
TP_fast_assign(
- __entry->mr = mr;
+ __entry->mr_id = mr->frwr.fr_mr->res.id;
__entry->addr = mr->mr_sg->dma_address;
__entry->dir = mr->mr_dir;
__entry->num_mapped = num_mapped;
__entry->nents = mr->mr_nents;
),
- TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
- __entry->mr, __entry->addr,
+ TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
+ __entry->mr_id, __entry->addr,
xprtrdma_show_direction(__entry->dir),
__entry->num_mapped, __entry->nents
)
@@ -1031,7 +1004,7 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
DEFINE_MR_EVENT(localinv);
DEFINE_MR_EVENT(map);
DEFINE_MR_EVENT(unmap);
-DEFINE_MR_EVENT(remoteinv);
+DEFINE_MR_EVENT(reminv);
DEFINE_MR_EVENT(recycle);
TRACE_EVENT(xprtrdma_dma_maperr,
@@ -1469,7 +1442,7 @@ DECLARE_EVENT_CLASS(svcrdma_segment_event,
);
#define DEFINE_SEGMENT_EVENT(name) \
- DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
+ DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
TP_PROTO( \
u32 handle, \
u32 length, \
@@ -1477,8 +1450,11 @@ DECLARE_EVENT_CLASS(svcrdma_segment_event,
), \
TP_ARGS(handle, length, offset))
-DEFINE_SEGMENT_EVENT(rseg);
-DEFINE_SEGMENT_EVENT(wseg);
+DEFINE_SEGMENT_EVENT(decode_wseg);
+DEFINE_SEGMENT_EVENT(encode_rseg);
+DEFINE_SEGMENT_EVENT(send_rseg);
+DEFINE_SEGMENT_EVENT(encode_wseg);
+DEFINE_SEGMENT_EVENT(send_wseg);
DECLARE_EVENT_CLASS(svcrdma_chunk_event,
TP_PROTO(
@@ -1501,17 +1477,19 @@ DECLARE_EVENT_CLASS(svcrdma_chunk_event,
);
#define DEFINE_CHUNK_EVENT(name) \
- DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
+ DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \
TP_PROTO( \
u32 length \
), \
TP_ARGS(length))
-DEFINE_CHUNK_EVENT(pzr);
-DEFINE_CHUNK_EVENT(write);
-DEFINE_CHUNK_EVENT(reply);
+DEFINE_CHUNK_EVENT(send_pzr);
+DEFINE_CHUNK_EVENT(encode_write_chunk);
+DEFINE_CHUNK_EVENT(send_write_chunk);
+DEFINE_CHUNK_EVENT(encode_read_chunk);
+DEFINE_CHUNK_EVENT(send_reply_chunk);
-TRACE_EVENT(svcrdma_encode_read,
+TRACE_EVENT(svcrdma_send_read_chunk,
TP_PROTO(
u32 length,
u32 position
@@ -1634,6 +1612,24 @@ TRACE_EVENT(svcrdma_dma_map_rwctx,
)
);
+TRACE_EVENT(svcrdma_send_pullup,
+ TP_PROTO(
+ unsigned int len
+ ),
+
+ TP_ARGS(len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->len = len;
+ ),
+
+ TP_printk("len=%u", __entry->len)
+);
+
TRACE_EVENT(svcrdma_send_failed,
TP_PROTO(
const struct svc_rqst *rqst,
@@ -1813,34 +1809,6 @@ TRACE_EVENT(svcrdma_post_rw,
DEFINE_SENDCOMP_EVENT(read);
DEFINE_SENDCOMP_EVENT(write);
-TRACE_EVENT(svcrdma_cm_event,
- TP_PROTO(
- const struct rdma_cm_event *event,
- const struct sockaddr *sap
- ),
-
- TP_ARGS(event, sap),
-
- TP_STRUCT__entry(
- __field(unsigned int, event)
- __field(int, status)
- __array(__u8, addr, INET6_ADDRSTRLEN + 10)
- ),
-
- TP_fast_assign(
- __entry->event = event->event;
- __entry->status = event->status;
- snprintf(__entry->addr, sizeof(__entry->addr) - 1,
- "%pISpc", sap);
- ),
-
- TP_printk("addr=%s event=%s (%u/%d)",
- __entry->addr,
- rdma_show_cm_event(__entry->event),
- __entry->event, __entry->status
- )
-);
-
TRACE_EVENT(svcrdma_qp_error,
TP_PROTO(
const struct ib_event *event,
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ee993575d2fa..ffd2215950dc 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -14,6 +14,49 @@
#include <linux/net.h>
#include <linux/tracepoint.h>
+DECLARE_EVENT_CLASS(xdr_buf_class,
+ TP_PROTO(
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(xdr),
+
+ TP_STRUCT__entry(
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
+ ),
+
+ TP_fast_assign(
+ __entry->head_base = xdr->head[0].iov_base;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
+ ),
+
+ TP_printk("head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ __entry->head_base, __entry->head_len, __entry->page_len,
+ __entry->tail_base, __entry->tail_len, __entry->msg_len
+ )
+);
+
+#define DEFINE_XDRBUF_EVENT(name) \
+ DEFINE_EVENT(xdr_buf_class, name, \
+ TP_PROTO( \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(xdr))
+
+DEFINE_XDRBUF_EVENT(xprt_sendto);
+DEFINE_XDRBUF_EVENT(xprt_recvfrom);
+DEFINE_XDRBUF_EVENT(svc_recvfrom);
+DEFINE_XDRBUF_EVENT(svc_sendto);
+
TRACE_DEFINE_ENUM(RPC_AUTH_OK);
TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED);
TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDCRED);
@@ -1292,6 +1335,39 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
DEFINE_SVC_DEFERRED_EVENT(drop);
DEFINE_SVC_DEFERRED_EVENT(revisit);
+DECLARE_EVENT_CLASS(cache_event,
+ TP_PROTO(
+ const struct cache_detail *cd,
+ const struct cache_head *h
+ ),
+
+ TP_ARGS(cd, h),
+
+ TP_STRUCT__entry(
+ __field(const struct cache_head *, h)
+ __string(name, cd->name)
+ ),
+
+ TP_fast_assign(
+ __entry->h = h;
+ __assign_str(name, cd->name);
+ ),
+
+ TP_printk("cache=%s entry=%p", __get_str(name), __entry->h)
+);
+#define DEFINE_CACHE_EVENT(name) \
+ DEFINE_EVENT(cache_event, name, \
+ TP_PROTO( \
+ const struct cache_detail *cd, \
+ const struct cache_head *h \
+ ), \
+ TP_ARGS(cd, h))
+DEFINE_CACHE_EVENT(cache_entry_expired);
+DEFINE_CACHE_EVENT(cache_entry_upcall);
+DEFINE_CACHE_EVENT(cache_entry_update);
+DEFINE_CACHE_EVENT(cache_entry_make_negative);
+DEFINE_CACHE_EVENT(cache_entry_no_listener);
+
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 914a872dd343..77408edd29d2 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -137,6 +137,7 @@ TRACE_EVENT(target_sequencer_start,
TP_STRUCT__entry(
__field( unsigned int, unpacked_lun )
+ __field( unsigned long long, tag )
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
@@ -146,6 +147,7 @@ TRACE_EVENT(target_sequencer_start,
TP_fast_assign(
__entry->unpacked_lun = cmd->orig_fe_lun;
+ __entry->tag = cmd->tag;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
@@ -153,9 +155,9 @@ TRACE_EVENT(target_sequencer_start,
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
- TP_printk("%s -> LUN %03u %s data_length %6u CDB %s (TA:%s C:%02x)",
+ TP_printk("%s -> LUN %03u tag %#llx %s data_length %6u CDB %s (TA:%s C:%02x)",
__get_str(initiator), __entry->unpacked_lun,
- show_opcode_name(__entry->opcode),
+ __entry->tag, show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
scsi_command_size(__entry->cdb) <= 16 ?
@@ -172,6 +174,7 @@ TRACE_EVENT(target_cmd_complete,
TP_STRUCT__entry(
__field( unsigned int, unpacked_lun )
+ __field( unsigned long long, tag )
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
@@ -184,6 +187,7 @@ TRACE_EVENT(target_cmd_complete,
TP_fast_assign(
__entry->unpacked_lun = cmd->orig_fe_lun;
+ __entry->tag = cmd->tag;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
@@ -195,8 +199,9 @@ TRACE_EVENT(target_cmd_complete,
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
- TP_printk("%s <- LUN %03u status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)",
+ TP_printk("%s <- LUN %03u tag %#llx status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)",
__get_str(initiator), __entry->unpacked_lun,
+ __entry->tag,
show_scsi_status_name(__entry->scsi_status),
__entry->sense_length, __entry->sense_length ? " / " : "",
__print_hex(__entry->sense_data, __entry->sense_length),
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index a5ab2973e8dc..74bb594ccb25 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -323,7 +323,7 @@ TRACE_EVENT(mm_vmscan_writepage,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
__entry->reclaim_flags = trace_reclaim_flags(
- page_is_file_cache(page));
+ page_is_file_lru(page));
),
TP_printk("page=%p pfn=%lu flags=%s",
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index ac3879829bb5..65f69723cbdc 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -125,9 +125,10 @@ extern "C" {
/* Flag that BO sharing will be explicitly synchronized */
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
/* Flag that indicates allocating MQD gart on GFX9, where the mtype
- * for the second page onward should be set to NC.
+ * for the second page onward should be set to NC. It should never
+ * be used by user space applications.
*/
-#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
+#define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8)
/* Flag that BO may contain sensitive data that must be wiped before
* releasing the memory
*/
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 868bf7996c0f..808b48a93330 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -948,6 +948,8 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 829c0a48577f..2813e579b480 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1619,6 +1619,27 @@ struct drm_i915_gem_context_param {
* By default, new contexts allow persistence.
*/
#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
+
+/*
+ * I915_CONTEXT_PARAM_RINGSIZE:
+ *
+ * Sets the size of the CS ringbuffer to use for logical ring contexts. This
+ * applies a limit of how many batches can be queued to HW before the caller
+ * is blocked due to lack of space for more commands.
+ *
+ * Only reliably possible to be set prior to first use, i.e. during
+ * construction. At any later point, the current execution must be flushed as
+ * the ring can only be changed while the context is idle. Note, the ringsize
+ * can be specified as a constructor property, see
+ * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
+ *
+ * Only applies to the current set of engine and lost when those engines
+ * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
+ *
+ * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
+ * Default is 16 KiB.
+ */
+#define I915_CONTEXT_PARAM_RINGSIZE 0xc
/* Must be kept compact -- no holes and well documented */
__u64 value;
diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h
index 95a00fb867e6..1ec58d652a5a 100644
--- a/include/uapi/drm/lima_drm.h
+++ b/include/uapi/drm/lima_drm.h
@@ -32,12 +32,19 @@ struct drm_lima_get_param {
__u64 value; /* out, parameter value */
};
+/*
+ * heap buffer dynamically increase backup memory size when GP task fail
+ * due to lack of heap memory. size field of heap buffer is an up bound of
+ * the backup memory which can be set to a fairly large value.
+ */
+#define LIMA_BO_FLAG_HEAP (1 << 0)
+
/**
* create a buffer for used by GPU
*/
struct drm_lima_gem_create {
__u32 size; /* in, buffer size */
- __u32 flags; /* in, currently no flags, must be zero */
+ __u32 flags; /* in, buffer flags */
__u32 handle; /* out, GEM buffer handle */
__u32 pad; /* pad, must be zero */
};
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index fcb741e3068f..02e917507479 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -86,6 +86,9 @@ extern "C" {
*
* DRM_VMW_PARAM_SM4_1
* SM4_1 support is enabled.
+ *
+ * DRM_VMW_PARAM_SM5
+ * SM5 support is enabled.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -103,6 +106,7 @@ extern "C" {
#define DRM_VMW_PARAM_DX 12
#define DRM_VMW_PARAM_HW_CAPS2 13
#define DRM_VMW_PARAM_SM4_1 14
+#define DRM_VMW_PARAM_SM5 15
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -1133,7 +1137,7 @@ struct drm_vmw_handle_close_arg {
* svga3d surface flags split into 2, upper half and lower half.
*/
enum drm_vmw_surface_version {
- drm_vmw_gb_surface_v1
+ drm_vmw_gb_surface_v1,
};
/**
@@ -1144,6 +1148,7 @@ enum drm_vmw_surface_version {
* @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
* @multisample_pattern: Multisampling pattern when msaa is supported.
* @quality_level: Precision settings for each sample.
+ * @buffer_byte_stride: Buffer byte stride.
* @must_be_zero: Reserved for future usage.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
@@ -1152,10 +1157,11 @@ enum drm_vmw_surface_version {
struct drm_vmw_gb_surface_create_ext_req {
struct drm_vmw_gb_surface_create_req base;
enum drm_vmw_surface_version version;
- uint32_t svga3d_flags_upper_32_bits;
- SVGA3dMSPattern multisample_pattern;
- SVGA3dMSQualityLevel quality_level;
- uint64_t must_be_zero;
+ __u32 svga3d_flags_upper_32_bits;
+ __u32 multisample_pattern;
+ __u32 quality_level;
+ __u32 buffer_byte_stride;
+ __u32 must_be_zero;
};
/**
diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h
index aac550a52f80..8847dbf24151 100644
--- a/include/uapi/linux/coresight-stm.h
+++ b/include/uapi/linux/coresight-stm.h
@@ -2,8 +2,10 @@
#ifndef __UAPI_CORESIGHT_STM_H_
#define __UAPI_CORESIGHT_STM_H_
-#define STM_FLAG_TIMESTAMPED BIT(3)
-#define STM_FLAG_GUARANTEED BIT(7)
+#include <linux/const.h>
+
+#define STM_FLAG_TIMESTAMPED _BITUL(3)
+#define STM_FLAG_GUARANTEED _BITUL(7)
/*
* The CoreSight STM supports guaranteed and invariant timing
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index b9effa6f8503..a88c7c6d0692 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -24,10 +24,11 @@
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */
+#define FAN_DIR_MODIFY 0x00080000 /* Directory entry was modified */
-#define FAN_ONDIR 0x40000000 /* event occurred against dir */
+#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */
-#define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */
+#define FAN_ONDIR 0x40000000 /* Event occurred against dir */
/* helper events */
#define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */
@@ -116,6 +117,7 @@ struct fanotify_event_metadata {
};
#define FAN_EVENT_INFO_TYPE_FID 1
+#define FAN_EVENT_INFO_TYPE_DFID_NAME 2
/* Variable length info record following event metadata */
struct fanotify_event_info_header {
@@ -124,7 +126,12 @@ struct fanotify_event_info_header {
__u16 len;
};
-/* Unique file identifier info record */
+/*
+ * Unique file identifier info record. This is used both for
+ * FAN_EVENT_INFO_TYPE_FID records and for FAN_EVENT_INFO_TYPE_DFID_NAME
+ * records. For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null
+ * terminated name immediately after the file handle.
+ */
struct fanotify_event_info_fid {
struct fanotify_event_info_header hdr;
__kernel_fsid_t fsid;
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index 799cf823d493..0206383c0383 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -18,7 +18,7 @@
* struct gpiochip_info - Information about a certain GPIO chip
* @name: the Linux kernel name of this GPIO chip
* @label: a functional name for this GPIO chip, such as a product
- * number, may be NULL
+ * number, may be empty
* @lines: number of GPIO lines on this chip
*/
struct gpiochip_info {
@@ -44,10 +44,10 @@ struct gpiochip_info {
* @flags: various flags for this line
* @name: the name of this GPIO line, such as the output pin of the line on the
* chip, a rail or a pin header name on a board, as specified by the gpio
- * chip, may be NULL
+ * chip, may be empty
* @consumer: a functional name for the consumer of this GPIO line as set by
- * whatever is using it, will be NULL if there is no current user but may
- * also be NULL if the consumer doesn't set this up
+ * whatever is using it, will be empty if there is no current user but may
+ * also be empty if the consumer doesn't set this up
*/
struct gpioline_info {
__u32 line_offset;
@@ -59,6 +59,34 @@ struct gpioline_info {
/* Maximum number of requested handles */
#define GPIOHANDLES_MAX 64
+/* Possible line status change events */
+enum {
+ GPIOLINE_CHANGED_REQUESTED = 1,
+ GPIOLINE_CHANGED_RELEASED,
+ GPIOLINE_CHANGED_CONFIG,
+};
+
+/**
+ * struct gpioline_info_changed - Information about a change in status
+ * of a GPIO line
+ * @info: updated line information
+ * @timestamp: estimate of time of status change occurrence, in nanoseconds
+ * and GPIOLINE_CHANGED_CONFIG
+ * @event_type: one of GPIOLINE_CHANGED_REQUESTED, GPIOLINE_CHANGED_RELEASED
+ *
+ * Note: struct gpioline_info embedded here has 32-bit alignment on its own,
+ * but it works fine with 64-bit alignment too. With its 72 byte size, we can
+ * guarantee there are no implicit holes between it and subsequent members.
+ * The 20-byte padding at the end makes sure we don't add any implicit padding
+ * at the end of the structure on 64-bit architectures.
+ */
+struct gpioline_info_changed {
+ struct gpioline_info info;
+ __u64 timestamp;
+ __u32 event_type;
+ __u32 padding[5]; /* for future use */
+};
+
/* Linerequest flags */
#define GPIOHANDLE_REQUEST_INPUT (1UL << 0)
#define GPIOHANDLE_REQUEST_OUTPUT (1UL << 1)
@@ -176,6 +204,8 @@ struct gpioevent_data {
#define GPIO_GET_CHIPINFO_IOCTL _IOR(0xB4, 0x01, struct gpiochip_info)
#define GPIO_GET_LINEINFO_IOCTL _IOWR(0xB4, 0x02, struct gpioline_info)
+#define GPIO_GET_LINEINFO_WATCH_IOCTL _IOWR(0xB4, 0x0b, struct gpioline_info)
+#define GPIO_GET_LINEINFO_UNWATCH_IOCTL _IOWR(0xB4, 0x0c, __u32)
#define GPIO_GET_LINEHANDLE_IOCTL _IOWR(0xB4, 0x03, struct gpiohandle_request)
#define GPIO_GET_LINEEVENT_IOCTL _IOWR(0xB4, 0x04, struct gpioevent_request)
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index 849ef1515d04..1f412fbf561b 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -83,21 +83,6 @@ enum dsa_completion_status {
#define DSA_COMP_STATUS_MASK 0x7f
#define DSA_COMP_STATUS_WRITE 0x80
-struct dsa_batch_desc {
- uint32_t pasid:20;
- uint32_t rsvd:11;
- uint32_t priv:1;
- uint32_t flags:24;
- uint32_t opcode:8;
- uint64_t completion_addr;
- uint64_t desc_list_addr;
- uint64_t rsvd1;
- uint32_t desc_count;
- uint16_t interrupt_handle;
- uint16_t rsvd2;
- uint8_t rsvd3[24];
-} __attribute__((packed));
-
struct dsa_hw_desc {
uint32_t pasid:20;
uint32_t rsvd:11;
@@ -109,6 +94,7 @@ struct dsa_hw_desc {
uint64_t src_addr;
uint64_t rdback_addr;
uint64_t pattern;
+ uint64_t desc_list_addr;
};
union {
uint64_t dst_addr;
@@ -116,7 +102,10 @@ struct dsa_hw_desc {
uint64_t src2_addr;
uint64_t comp_pattern;
};
- uint32_t xfer_size;
+ union {
+ uint32_t xfer_size;
+ uint32_t desc_count;
+ };
uint16_t int_handle;
uint16_t rsvd1;
union {
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4b95f9a31a2f..428c7dde6b4b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -474,12 +474,17 @@ struct kvm_s390_mem_op {
__u32 size; /* amount of bytes */
__u32 op; /* type of operation */
__u64 buf; /* buffer in userspace */
- __u8 ar; /* the access register number */
- __u8 reserved[31]; /* should be set to 0 */
+ union {
+ __u8 ar; /* the access register number */
+ __u32 sida_offset; /* offset into the sida */
+ __u8 reserved[32]; /* should be set to 0 */
+ };
};
/* types for kvm_s390_mem_op->op */
#define KVM_S390_MEMOP_LOGICAL_READ 0
#define KVM_S390_MEMOP_LOGICAL_WRITE 1
+#define KVM_S390_MEMOP_SIDA_READ 2
+#define KVM_S390_MEMOP_SIDA_WRITE 3
/* flags for kvm_s390_mem_op->flags */
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
@@ -1010,6 +1015,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_NISV_TO_USER 177
#define KVM_CAP_ARM_INJECT_EXT_DABT 178
#define KVM_CAP_S390_VCPU_RESETS 179
+#define KVM_CAP_S390_PROTECTED 180
+#define KVM_CAP_PPC_SECURE_GUEST 181
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1478,6 +1485,39 @@ struct kvm_enc_region {
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
+struct kvm_s390_pv_sec_parm {
+ __u64 origin;
+ __u64 length;
+};
+
+struct kvm_s390_pv_unp {
+ __u64 addr;
+ __u64 size;
+ __u64 tweak;
+};
+
+enum pv_cmd_id {
+ KVM_PV_ENABLE,
+ KVM_PV_DISABLE,
+ KVM_PV_SET_SEC_PARMS,
+ KVM_PV_UNPACK,
+ KVM_PV_VERIFY,
+ KVM_PV_PREP_RESET,
+ KVM_PV_UNSHARE_ALL,
+};
+
+struct kvm_pv_cmd {
+ __u32 cmd; /* Command to be executed */
+ __u16 rc; /* Ultravisor return code */
+ __u16 rrc; /* Ultravisor return reason code */
+ __u64 data; /* Data or address */
+ __u32 flags; /* flags for future extensions. Must be 0 for now */
+ __u32 reserved[3];
+};
+
+/* Available with KVM_CAP_S390_PROTECTED */
+#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
@@ -1628,4 +1668,7 @@ struct kvm_hyperv_eventfd {
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
+#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
+#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index fc1a64c3447b..923cc162609c 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -5,8 +5,9 @@
#include <asm/mman.h>
#include <asm-generic/hugetlb_encode.h>
-#define MREMAP_MAYMOVE 1
-#define MREMAP_FIXED 2
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+#define MREMAP_DONTUNMAP 4
#define OVERCOMMIT_GUESS 0
#define OVERCOMMIT_ALWAYS 1
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 30f2a87270dc..4565456c0ef4 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -276,6 +276,7 @@ enum nft_rule_compat_attributes {
* @NFT_SET_TIMEOUT: set uses timeouts
* @NFT_SET_EVAL: set can be updated from the evaluation path
* @NFT_SET_OBJECT: set contains stateful objects
+ * @NFT_SET_CONCAT: set contains a concatenation
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
@@ -285,6 +286,7 @@ enum nft_set_flags {
NFT_SET_TIMEOUT = 0x10,
NFT_SET_EVAL = 0x20,
NFT_SET_OBJECT = 0x40,
+ NFT_SET_CONCAT = 0x80,
};
/**
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 434e6506abaa..49ddcdc61c09 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -48,6 +48,7 @@ struct idletimer_tg_info_v1 {
char label[MAX_IDLETIMER_LABEL_SIZE];
+ __u8 send_nl_msg; /* unused: for compatibility with Android */
__u8 timer_type;
/* for kernel module internal use only */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 5437690483cd..f9701410d3b5 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -605,6 +605,7 @@
#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
+#define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */
#define PCI_EXP_SLTSTA 26 /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */
@@ -680,6 +681,7 @@
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
+#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index cbf422e56696..c3ab4c826297 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -19,5 +19,13 @@
#define PCITEST_MSIX _IOW('P', 0x7, int)
#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
+#define PCITEST_CLEAR_IRQ _IO('P', 0x10)
+
+#define PCITEST_FLAGS_USE_DMA 0x00000001
+
+struct pci_endpoint_test_xfer_param {
+ unsigned long size;
+ unsigned char flags;
+};
#endif /* __UAPI_LINUX_PCITEST_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 397cfd65b3fe..7b2d6fc9e6ed 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -142,8 +142,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_REGS_INTR = 1U << 18,
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_AUX = 1U << 20,
+ PERF_SAMPLE_CGROUP = 1U << 21,
- PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 22, /* non-ABI */
__PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
@@ -381,7 +382,8 @@ struct perf_event_attr {
ksymbol : 1, /* include ksymbol events */
bpf_event : 1, /* include bpf events */
aux_output : 1, /* generate AUX records instead of events */
- __reserved_1 : 32;
+ cgroup : 1, /* include cgroup events */
+ __reserved_1 : 31;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -1012,6 +1014,16 @@ enum perf_event_type {
*/
PERF_RECORD_BPF_EVENT = 18,
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * char path[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_CGROUP = 19,
+
PERF_RECORD_MAX, /* non-ABI */
};
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index 095af360326a..83bba58d47f4 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -12,6 +12,9 @@
#ifndef _UAPI_LINUX_RTC_H_
#define _UAPI_LINUX_RTC_H_
+#include <linux/const.h>
+#include <linux/ioctl.h>
+
/*
* The struct used to pass data via the following ioctl. Similar to the
* struct tm in <time.h>, but it needs to be here so that the kernel
@@ -92,10 +95,10 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
-#define RTC_VL_DATA_INVALID BIT(0) /* Voltage too low, RTC data is invalid */
-#define RTC_VL_BACKUP_LOW BIT(1) /* Backup voltage is low */
-#define RTC_VL_BACKUP_EMPTY BIT(2) /* Backup empty or not present */
-#define RTC_VL_ACCURACY_LOW BIT(3) /* Voltage is low, RTC accuracy is reduced */
+#define RTC_VL_DATA_INVALID _BITUL(0) /* Voltage too low, RTC data is invalid */
+#define RTC_VL_BACKUP_LOW _BITUL(1) /* Backup voltage is low */
+#define RTC_VL_BACKUP_EMPTY _BITUL(2) /* Backup empty or not present */
+#define RTC_VL_ACCURACY_LOW _BITUL(3) /* Voltage is low, RTC accuracy is reduced */
#define RTC_VL_READ _IOR('p', 0x13, unsigned int) /* Voltage low detection */
#define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 2e3bc22c6f20..3bac0a8ceab2 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -35,6 +35,7 @@
/* Flags for the clone3() syscall. */
#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
+#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */
/*
* cloning flags intersect with CSIGNAL so can be used with unshare and clone3
@@ -81,6 +82,8 @@
* @set_tid_size: This defines the size of the array referenced
* in @set_tid. This cannot be larger than the
* kernel's limit of nested PID namespaces.
+ * @cgroup: If CLONE_INTO_CGROUP is specified set this to
+ * a file descriptor for the cgroup.
*
* The structure is versioned by size and thus extensible.
* New struct members must go at the end of the struct and
@@ -97,11 +100,13 @@ struct clone_args {
__aligned_u64 tls;
__aligned_u64 set_tid;
__aligned_u64 set_tid_size;
+ __aligned_u64 cgroup;
};
#endif
#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
+#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */
/*
* Scheduling policies
diff --git a/include/uapi/linux/um_timetravel.h b/include/uapi/linux/um_timetravel.h
new file mode 100644
index 000000000000..ca3238222b6d
--- /dev/null
+++ b/include/uapi/linux/um_timetravel.h
@@ -0,0 +1,128 @@
+/*
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ */
+#ifndef _UAPI_LINUX_UM_TIMETRAVEL_H
+#define _UAPI_LINUX_UM_TIMETRAVEL_H
+#include <linux/types.h>
+
+/**
+ * struct um_timetravel_msg - UM time travel message
+ *
+ * This is the basic message type, going in both directions.
+ *
+ * This is the message passed between the host (user-mode Linux instance)
+ * and the calendar (the application on the other side of the socket) in
+ * order to implement common scheduling.
+ *
+ * Whenever UML has an event it will request runtime for it from the
+ * calendar, and then wait for its turn until it can run, etc. Note
+ * that it will only ever request the single next runtime, i.e. multiple
+ * REQUEST messages override each other.
+ */
+struct um_timetravel_msg {
+ /**
+ * @op: operation value from &enum um_timetravel_ops
+ */
+ __u32 op;
+
+ /**
+ * @seq: sequence number for the message - shall be reflected in
+ * the ACK response, and should be checked while processing
+ * the response to see if it matches
+ */
+ __u32 seq;
+
+ /**
+ * @time: time in nanoseconds
+ */
+ __u64 time;
+};
+
+/**
+ * enum um_timetravel_ops - Operation codes
+ */
+enum um_timetravel_ops {
+ /**
+ * @UM_TIMETRAVEL_ACK: response (ACK) to any previous message,
+ * this usually doesn't carry any data in the 'time' field
+ * unless otherwise specified below
+ */
+ UM_TIMETRAVEL_ACK = 0,
+
+ /**
+ * @UM_TIMETRAVEL_START: initialize the connection, the time
+ * field contains an (arbitrary) ID to possibly be able
+ * to distinguish the connections.
+ */
+ UM_TIMETRAVEL_START = 1,
+
+ /**
+ * @UM_TIMETRAVEL_REQUEST: request to run at the given time
+ * (host -> calendar)
+ */
+ UM_TIMETRAVEL_REQUEST = 2,
+
+ /**
+ * @UM_TIMETRAVEL_WAIT: Indicate waiting for the previously requested
+ * runtime, new requests may be made while waiting (e.g. due to
+ * interrupts); the time field is ignored. The calendar must process
+ * this message and later send a %UM_TIMETRAVEL_RUN message when
+ * the host can run again.
+ * (host -> calendar)
+ */
+ UM_TIMETRAVEL_WAIT = 3,
+
+ /**
+ * @UM_TIMETRAVEL_GET: return the current time from the calendar in the
+ * ACK message, the time in the request message is ignored
+ * (host -> calendar)
+ */
+ UM_TIMETRAVEL_GET = 4,
+
+ /**
+ * @UM_TIMETRAVEL_UPDATE: time update to the calendar, must be sent e.g.
+ * before kicking an interrupt to another calendar
+ * (host -> calendar)
+ */
+ UM_TIMETRAVEL_UPDATE = 5,
+
+ /**
+ * @UM_TIMETRAVEL_RUN: run time request granted, current time is in
+ * the time field
+ * (calendar -> host)
+ */
+ UM_TIMETRAVEL_RUN = 6,
+
+ /**
+ * @UM_TIMETRAVEL_FREE_UNTIL: Enable free-running until the given time,
+ * this is a message from the calendar telling the host that it can
+ * freely do its own scheduling for anything before the indicated
+ * time.
+ * Note that if a calendar sends this message once, the host may
+ * assume that it will also do so in the future, if it implements
+ * wraparound semantics for the time field.
+ * (calendar -> host)
+ */
+ UM_TIMETRAVEL_FREE_UNTIL = 7,
+
+ /**
+ * @UM_TIMETRAVEL_GET_TOD: Return time of day, typically used once at
+ * boot by the virtual machines to get a synchronized time from
+ * the simulation.
+ */
+ UM_TIMETRAVEL_GET_TOD = 8,
+};
+
+#endif /* _UAPI_LINUX_UM_TIMETRAVEL_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 48f1a7c2f1f0..e7e98bde221f 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -19,7 +19,8 @@
* means the userland is reading).
*/
#define UFFD_API ((__u64)0xAA)
-#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \
+#define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
+ UFFD_FEATURE_EVENT_FORK | \
UFFD_FEATURE_EVENT_REMAP | \
UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_UNMAP | \
@@ -34,7 +35,8 @@
#define UFFD_API_RANGE_IOCTLS \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
- (__u64)1 << _UFFDIO_ZEROPAGE)
+ (__u64)1 << _UFFDIO_ZEROPAGE | \
+ (__u64)1 << _UFFDIO_WRITEPROTECT)
#define UFFD_API_RANGE_IOCTLS_BASIC \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY)
@@ -52,6 +54,7 @@
#define _UFFDIO_WAKE (0x02)
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_API (0x3F)
/* userfaultfd ioctl ids */
@@ -68,6 +71,8 @@
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
+#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
+ struct uffdio_writeprotect)
/* read() structure */
struct uffd_msg {
@@ -203,13 +208,14 @@ struct uffdio_copy {
__u64 dst;
__u64 src;
__u64 len;
+#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0)
/*
- * There will be a wrprotection flag later that allows to map
- * pages wrprotected on the fly. And such a flag will be
- * available if the wrprotection ioctl are implemented for the
- * range according to the uffdio_register.ioctls.
+ * UFFDIO_COPY_MODE_WP will map the page write protected on
+ * the fly. UFFDIO_COPY_MODE_WP is available only if the
+ * write protected ioctl is implemented for the range
+ * according to the uffdio_register.ioctls.
*/
-#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0)
+#define UFFDIO_COPY_MODE_WP ((__u64)1<<1)
__u64 mode;
/*
@@ -231,4 +237,24 @@ struct uffdio_zeropage {
__s64 zeropage;
};
+struct uffdio_writeprotect {
+ struct uffdio_range range;
+/*
+ * UFFDIO_WRITEPROTECT_MODE_WP: set the flag to write protect a range,
+ * unset the flag to undo protection of a range which was previously
+ * write protected.
+ *
+ * UFFDIO_WRITEPROTECT_MODE_DONTWAKE: set the flag to avoid waking up
+ * any wait thread after the operation succeeds.
+ *
+ * NOTE: Write protecting a region (WP=1) is unrelated to page faults,
+ * therefore DONTWAKE flag is meaningless with WP=1. Removing write
+ * protection (WP=0) in response to a page fault wakes the faulting
+ * task unless DONTWAKE is set.
+ */
+#define UFFDIO_WRITEPROTECT_MODE_WP ((__u64)1<<0)
+#define UFFDIO_WRITEPROTECT_MODE_DONTWAKE ((__u64)1<<1)
+ __u64 mode;
+};
+
#endif /* _LINUX_USERFAULTFD_H */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 9e843a147ead..015516bcfaa3 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -707,6 +707,43 @@ struct vfio_device_ioeventfd {
#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
+/**
+ * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17,
+ * struct vfio_device_feature)
+ *
+ * Get, set, or probe feature data of the device. The feature is selected
+ * using the FEATURE_MASK portion of the flags field. Support for a feature
+ * can be probed by setting both the FEATURE_MASK and PROBE bits. A probe
+ * may optionally include the GET and/or SET bits to determine read vs write
+ * access of the feature respectively. Probing a feature will return success
+ * if the feature is supported and all of the optionally indicated GET/SET
+ * methods are supported. The format of the data portion of the structure is
+ * specific to the given feature. The data portion is not required for
+ * probing. GET and SET are mutually exclusive, except for use with PROBE.
+ *
+ * Return 0 on success, -errno on failure.
+ */
+struct vfio_device_feature {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_DEVICE_FEATURE_MASK (0xffff) /* 16-bit feature index */
+#define VFIO_DEVICE_FEATURE_GET (1 << 16) /* Get feature into data[] */
+#define VFIO_DEVICE_FEATURE_SET (1 << 17) /* Set feature from data[] */
+#define VFIO_DEVICE_FEATURE_PROBE (1 << 18) /* Probe feature support */
+ __u8 data[];
+};
+
+#define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17)
+
+/*
+ * Provide support for setting a PCI VF Token, which is used as a shared
+ * secret between PF and VF drivers. This feature may only be set on a
+ * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
+ * open VFs. Data provided when setting this feature is a 16-byte array
+ * (__u8 b[16]), representing a UUID.
+ */
+#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0)
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index a1966cd7b677..19974392d324 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -36,6 +36,7 @@
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
#define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */
#define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */
+#define VIRTIO_BALLOON_F_REPORTING 5 /* Page reporting virtqueue */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
diff --git a/include/uapi/misc/uacce/hisi_qm.h b/include/uapi/misc/uacce/hisi_qm.h
new file mode 100644
index 000000000000..6435f0bcb556
--- /dev/null
+++ b/include/uapi/misc/uacce/hisi_qm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_HISI_QM_H
+#define _UAPI_HISI_QM_H
+
+#include <linux/types.h>
+
+/**
+ * struct hisi_qp_ctx - User data for hisi qp.
+ * @id: qp_index return to user space
+ * @qc_type: Accelerator algorithm type
+ */
+struct hisi_qp_ctx {
+ __u16 id;
+ __u16 qc_type;
+};
+
+#define HISI_QM_API_VER_BASE "hisi_qm_v1"
+#define HISI_QM_API_VER2_BASE "hisi_qm_v2"
+
+/* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */
+#define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx)
+
+#endif
diff --git a/include/uapi/misc/uacce/uacce.h b/include/uapi/misc/uacce/uacce.h
new file mode 100644
index 000000000000..cc7185678f47
--- /dev/null
+++ b/include/uapi/misc/uacce/uacce.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPIUUACCE_H
+#define _UAPIUUACCE_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * UACCE_CMD_START_Q: Start queue
+ */
+#define UACCE_CMD_START_Q _IO('W', 0)
+
+/*
+ * UACCE_CMD_PUT_Q:
+ * User actively stop queue and free queue resource immediately
+ * Optimization method since close fd may delay
+ */
+#define UACCE_CMD_PUT_Q _IO('W', 1)
+
+/*
+ * UACCE Device flags:
+ * UACCE_DEV_SVA: Shared Virtual Addresses
+ * Support PASID
+ * Support device page faults (PCI PRI or SMMU Stall)
+ */
+#define UACCE_DEV_SVA BIT(0)
+
+/**
+ * enum uacce_qfrt: queue file region type
+ * @UACCE_QFRT_MMIO: device mmio region
+ * @UACCE_QFRT_DUS: device user share region
+ */
+enum uacce_qfrt {
+ UACCE_QFRT_MMIO = 0,
+ UACCE_QFRT_DUS = 1,
+};
+
+#endif
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 624f5b53eb1f..df1cc3641bda 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -49,6 +49,7 @@ enum {
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
+ MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
};
enum {
@@ -78,6 +79,7 @@ struct mlx5_ib_alloc_ucontext_req {
enum mlx5_lib_caps {
MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
+ MLX5_LIB_CAP_DYN_UAR = (__u64)1 << 1,
};
enum mlx5_ib_alloc_uctx_v2_flags {
@@ -266,6 +268,7 @@ struct mlx5_ib_query_device_resp {
enum mlx5_ib_create_cq_flags {
MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
+ MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1,
};
struct mlx5_ib_create_cq {
@@ -275,6 +278,9 @@ struct mlx5_ib_create_cq {
__u8 cqe_comp_en;
__u8 cqe_comp_res_format;
__u16 flags;
+ __u16 uar_page_index;
+ __u16 reserved0;
+ __u32 reserved1;
};
struct mlx5_ib_create_cq_resp {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index afe7da6f2b8e..24f3388c3182 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -131,6 +131,23 @@ enum mlx5_ib_var_obj_methods {
MLX5_IB_METHOD_VAR_OBJ_DESTROY,
};
+enum mlx5_ib_uar_alloc_attrs {
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+};
+
+enum mlx5_ib_uar_obj_destroy_attrs {
+ MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_uar_obj_methods {
+ MLX5_IB_METHOD_UAR_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_UAR_OBJ_DESTROY,
+};
+
enum mlx5_ib_devx_umem_reg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
@@ -143,6 +160,22 @@ enum mlx5_ib_devx_umem_dereg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
};
+enum mlx5_ib_pp_obj_methods {
+ MLX5_IB_METHOD_PP_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_PP_OBJ_DESTROY,
+};
+
+enum mlx5_ib_pp_alloc_attrs {
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+};
+
+enum mlx5_ib_pp_obj_destroy_attrs {
+ MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
enum mlx5_ib_devx_umem_methods {
MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_METHOD_DEVX_UMEM_DEREG,
@@ -173,6 +206,8 @@ enum mlx5_ib_objects {
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
MLX5_IB_OBJECT_VAR,
+ MLX5_IB_OBJECT_PP,
+ MLX5_IB_OBJECT_UAR,
};
enum mlx5_ib_flow_matcher_create_attrs {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 88b6ca70c2fe..56b26eaea083 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -44,6 +44,7 @@ enum mlx5_ib_uapi_flow_table_type {
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX = 0x4,
};
enum mlx5_ib_uapi_flow_action_packet_reformat_type {
@@ -73,5 +74,14 @@ struct mlx5_ib_uapi_devx_async_event_hdr {
__u8 out_data[];
};
+enum mlx5_ib_uapi_pp_alloc_flags {
+ MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX = 1 << 0,
+};
+
+enum mlx5_ib_uapi_uar_alloc_type {
+ MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF = 0x0,
+ MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC = 0x1,
+};
+
#endif
diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index 76f627f0d13b..66318c44acd7 100644
--- a/include/uapi/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
@@ -9,6 +9,7 @@
#define _FC_ELS_H_
#include <linux/types.h>
+#include <asm/byteorder.h>
/*
* Fibre Channel Switch - Enhanced Link Services definitions.
@@ -40,6 +41,7 @@ enum fc_els_cmd {
ELS_REC = 0x13, /* read exchange concise */
ELS_SRR = 0x14, /* sequence retransmission request */
ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */
+ ELS_RDF = 0x19, /* Register Diagnostic Functions */
ELS_PRLI = 0x20, /* process login */
ELS_PRLO = 0x21, /* process logout */
ELS_SCN = 0x22, /* state change notification */
@@ -108,6 +110,7 @@ enum fc_els_cmd {
[ELS_REC] = "REC", \
[ELS_SRR] = "SRR", \
[ELS_FPIN] = "FPIN", \
+ [ELS_RDF] = "RDF", \
[ELS_PRLI] = "PRLI", \
[ELS_PRLO] = "PRLO", \
[ELS_SCN] = "SCN", \
@@ -208,6 +211,99 @@ enum fc_els_rjt_explan {
};
/*
+ * Link Service TLV Descriptor Tag Values
+ */
+enum fc_ls_tlv_dtag {
+ ELS_DTAG_LS_REQ_INFO = 0x00000001,
+ /* Link Service Request Information Descriptor */
+ ELS_DTAG_LNK_INTEGRITY = 0x00020001,
+ /* Link Integrity Notification Descriptor */
+ ELS_DTAG_DELIVERY = 0x00020002,
+ /* Delivery Notification Descriptor */
+ ELS_DTAG_PEER_CONGEST = 0x00020003,
+ /* Peer Congestion Notification Descriptor */
+ ELS_DTAG_CONGESTION = 0x00020004,
+ /* Congestion Notification Descriptor */
+ ELS_DTAG_FPIN_REGISTER = 0x00030001,
+ /* FPIN Registration Descriptor */
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_LS_TLV_DTAG_INIT { \
+ { ELS_DTAG_LS_REQ_INFO, "Link Service Request Information" }, \
+ { ELS_DTAG_LNK_INTEGRITY, "Link Integrity Notification" }, \
+ { ELS_DTAG_DELIVERY, "Delivery Notification Present" }, \
+ { ELS_DTAG_PEER_CONGEST, "Peer Congestion Notification" }, \
+ { ELS_DTAG_CONGESTION, "Congestion Notification" }, \
+ { ELS_DTAG_FPIN_REGISTER, "FPIN Registration" }, \
+}
+
+
+/*
+ * Generic Link Service TLV Descriptor format
+ *
+ * This structure, as it defines no payload, will also be referred to
+ * as the "tlv header" - which contains the tag and len fields.
+ */
+struct fc_tlv_desc {
+ __be32 desc_tag; /* Notification Descriptor Tag */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __u8 desc_value[0]; /* Descriptor Value */
+};
+
+/* Descriptor tag and len fields are considered the mandatory header
+ * for a descriptor
+ */
+#define FC_TLV_DESC_HDR_SZ sizeof(struct fc_tlv_desc)
+
+/*
+ * Macro, used when initializing payloads, to return the descriptor length.
+ * Length is size of descriptor minus the tag and len fields.
+ */
+#define FC_TLV_DESC_LENGTH_FROM_SZ(desc) \
+ (sizeof(desc) - FC_TLV_DESC_HDR_SZ)
+
+/* Macro, used on received payloads, to return the descriptor length */
+#define FC_TLV_DESC_SZ_FROM_LENGTH(tlv) \
+ (__be32_to_cpu((tlv)->desc_len) + FC_TLV_DESC_HDR_SZ)
+
+/*
+ * This helper is used to walk descriptors in a descriptor list.
+ * Given the address of the current descriptor, which minimally contains a
+ * tag and len field, calculate the address of the next descriptor based
+ * on the len field.
+ */
+static inline void *fc_tlv_next_desc(void *desc)
+{
+ struct fc_tlv_desc *tlv = desc;
+
+ return (desc + FC_TLV_DESC_SZ_FROM_LENGTH(tlv));
+}
+
+
+/*
+ * Link Service Request Information Descriptor
+ */
+struct fc_els_lsri_desc {
+ __be32 desc_tag; /* descriptor tag (0x0000 0001) */
+ __be32 desc_len; /* Length of Descriptor (in bytes) (4).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ struct {
+ __u8 cmd; /* ELS cmd byte */
+ __u8 bytes[3]; /* bytes 1..3 */
+ } rqst_w0; /* Request word 0 */
+};
+
+
+/*
* Common service parameters (N ports).
*/
struct fc_els_csp {
@@ -819,24 +915,61 @@ enum fc_els_clid_ic {
};
+enum fc_fpin_li_event_types {
+ FPIN_LI_UNKNOWN = 0x0,
+ FPIN_LI_LINK_FAILURE = 0x1,
+ FPIN_LI_LOSS_OF_SYNC = 0x2,
+ FPIN_LI_LOSS_OF_SIG = 0x3,
+ FPIN_LI_PRIM_SEQ_ERR = 0x4,
+ FPIN_LI_INVALID_TX_WD = 0x5,
+ FPIN_LI_INVALID_CRC = 0x6,
+ FPIN_LI_DEVICE_SPEC = 0xF,
+};
+
/*
- * Fabric Notification Descriptor Tag values
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
*/
-enum fc_fn_dtag {
- ELS_FN_DTAG_LNK_INTEGRITY = 0x00020001, /* Link Integrity */
- ELS_FN_DTAG_PEER_CONGEST = 0x00020003, /* Peer Congestion */
- ELS_FN_DTAG_CONGESTION = 0x00020004, /* Congestion */
-};
+#define FC_FPIN_LI_EVT_TYPES_INIT { \
+ { FPIN_LI_UNKNOWN, "Unknown" }, \
+ { FPIN_LI_LINK_FAILURE, "Link Failure" }, \
+ { FPIN_LI_LOSS_OF_SYNC, "Loss of Synchronization" }, \
+ { FPIN_LI_LOSS_OF_SIG, "Loss of Signal" }, \
+ { FPIN_LI_PRIM_SEQ_ERR, "Primitive Sequence Protocol Error" }, \
+ { FPIN_LI_INVALID_TX_WD, "Invalid Transmission Word" }, \
+ { FPIN_LI_INVALID_CRC, "Invalid CRC" }, \
+ { FPIN_LI_DEVICE_SPEC, "Device Specific" }, \
+}
+
/*
- * Fabric Notification Descriptor
+ * Link Integrity Notification Descriptor
*/
-struct fc_fn_desc {
- __be32 fn_desc_tag; /* Notification Descriptor Tag */
- __be32 fn_desc_value_len; /* Length of Descriptor Value field
- * (in bytes)
- */
- __u8 fn_desc_value[0]; /* Descriptor Value */
+struct fc_fn_li_desc {
+ __be32 desc_tag; /* Descriptor Tag (0x00020001) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be64 detecting_wwpn; /* Port Name that detected event */
+ __be64 attached_wwpn; /* Port Name of device attached to
+ * detecting Port Name
+ */
+ __be16 event_type; /* see enum fc_fpin_li_event_types */
+ __be16 event_modifier; /* Implementation specific value
+ * describing the event type
+ */
+ __be32 event_threshold;/* duration in ms of the link
+ * integrity detection cycle
+ */
+ __be32 event_count; /* minimum number of event
+ * occurrences during the event
+ * threshold to caause the LI event
+ */
+ __be32 pname_count; /* number of portname_list elements */
+ __be64 pname_list[0]; /* list of N_Port_Names accessible
+ * through the attached port
+ */
};
/*
@@ -845,8 +978,56 @@ struct fc_fn_desc {
struct fc_els_fpin {
__u8 fpin_cmd; /* command (0x16) */
__u8 fpin_zero[3]; /* specified as zero - part of cmd */
- __be32 fpin_desc_cnt; /* count of descriptors */
- struct fc_fn_desc fpin_desc[0]; /* Descriptor list */
+ __be32 desc_len; /* Length of Descriptor List (in bytes).
+ * Size of ELS excluding fpin_cmd,
+ * fpin_zero and desc_len fields.
+ */
+ struct fc_tlv_desc fpin_desc[0]; /* Descriptor list */
+};
+
+/* Diagnostic Function Descriptor - FPIN Registration */
+struct fc_df_desc_fpin_reg {
+ __be32 desc_tag; /* FPIN Registration (0x00030001) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be32 count; /* Number of desc_tags elements */
+ __be32 desc_tags[0]; /* Array of Descriptor Tags.
+ * Each tag indicates a function
+ * supported by the N_Port (request)
+ * or by the N_Port and Fabric
+ * Controller (reply; may be a subset
+ * of the request).
+ * See ELS_FN_DTAG_xxx for tag values.
+ */
};
+/*
+ * ELS_RDF - Register Diagnostic Functions
+ */
+struct fc_els_rdf {
+ __u8 fpin_cmd; /* command (0x19) */
+ __u8 fpin_zero[3]; /* specified as zero - part of cmd */
+ __be32 desc_len; /* Length of Descriptor List (in bytes).
+ * Size of ELS excluding fpin_cmd,
+ * fpin_zero and desc_len fields.
+ */
+ struct fc_tlv_desc desc[0]; /* Descriptor list */
+};
+
+/*
+ * ELS RDF LS_ACC Response.
+ */
+struct fc_els_rdf_resp {
+ struct fc_els_ls_acc acc_hdr;
+ __be32 desc_list_len; /* Length of response (in
+ * bytes). Excludes acc_hdr
+ * and desc_list_len fields.
+ */
+ struct fc_els_lsri_desc lsri;
+ struct fc_tlv_desc desc[0]; /* Supported Descriptor list */
+};
+
+
#endif /* _FC_ELS_H_ */
diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 3ae65e93235c..7f5930801f72 100644
--- a/include/uapi/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
@@ -209,7 +209,7 @@ struct fc_bsg_host_vendor {
__u64 vendor_id;
/* start of vendor command area */
- __u32 vendor_cmd[0];
+ __u32 vendor_cmd[];
};
/* Response:
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 6048553c119d..a74ca232f1fc 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -17,6 +17,7 @@
#define __LINUX_UAPI_SND_ASOC_H
#include <linux/types.h>
+#include <sound/asound.h>
/*
* Maximum number of channels topology kcontrol can represent.
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 56d95673ce0f..7184265c0b0d 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -31,7 +31,7 @@
#include <sound/compress_params.h>
-#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
+#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 2, 0)
/**
* struct snd_compressed_buffer - compressed buffer
* @fragment_size: size of buffer fragment in bytes
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 9c96fb0e4d90..79b14389ae41 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -75,7 +75,9 @@
#define SND_AUDIOCODEC_G723_1 ((__u32) 0x0000000C)
#define SND_AUDIOCODEC_G729 ((__u32) 0x0000000D)
#define SND_AUDIOCODEC_BESPOKE ((__u32) 0x0000000E)
-#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_BESPOKE
+#define SND_AUDIOCODEC_ALAC ((__u32) 0x0000000F)
+#define SND_AUDIOCODEC_APE ((__u32) 0x00000010)
+#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_APE
/*
* Profile and modes are listed with bit masks. This allows for a
@@ -142,6 +144,9 @@
#define SND_AUDIOPROFILE_WMA8 ((__u32) 0x00000002)
#define SND_AUDIOPROFILE_WMA9 ((__u32) 0x00000004)
#define SND_AUDIOPROFILE_WMA10 ((__u32) 0x00000008)
+#define SND_AUDIOPROFILE_WMA9_PRO ((__u32) 0x00000010)
+#define SND_AUDIOPROFILE_WMA9_LOSSLESS ((__u32) 0x00000020)
+#define SND_AUDIOPROFILE_WMA10_LOSSLESS ((__u32) 0x00000040)
#define SND_AUDIOMODE_WMA_LEVEL1 ((__u32) 0x00000001)
#define SND_AUDIOMODE_WMA_LEVEL2 ((__u32) 0x00000002)
@@ -326,6 +331,33 @@ struct snd_dec_flac {
__u16 reserved;
} __attribute__((packed, aligned(4)));
+struct snd_dec_wma {
+ __u32 encoder_option;
+ __u32 adv_encoder_option;
+ __u32 adv_encoder_option2;
+ __u32 reserved;
+} __attribute__((packed, aligned(4)));
+
+struct snd_dec_alac {
+ __u32 frame_length;
+ __u8 compatible_version;
+ __u8 pb;
+ __u8 mb;
+ __u8 kb;
+ __u32 max_run;
+ __u32 max_frame_bytes;
+} __attribute__((packed, aligned(4)));
+
+struct snd_dec_ape {
+ __u16 compatible_version;
+ __u16 compression_level;
+ __u32 format_flags;
+ __u32 blocks_per_frame;
+ __u32 final_frame_blocks;
+ __u32 total_frames;
+ __u32 seek_table_present;
+} __attribute__((packed, aligned(4)));
+
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
@@ -333,6 +365,9 @@ union snd_codec_options {
struct snd_enc_flac flac;
struct snd_enc_generic generic;
struct snd_dec_flac flac_d;
+ struct snd_dec_wma wma_d;
+ struct snd_dec_alac alac_d;
+ struct snd_dec_ape ape_d;
} __attribute__((packed, aligned(4)));
/** struct snd_codec_desc - description of codec capabilities
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index c0ef1643c753..5995b79d6df1 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -26,7 +26,7 @@
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 12
+#define SOF_ABI_MINOR 13
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index 1f9bc133e230..77252cb46361 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -231,7 +231,7 @@ struct mmp_path {
/* layers */
int overlay_num;
- struct mmp_overlay overlays[0];
+ struct mmp_overlay overlays[];
};
extern struct mmp_path *mmp_get_path(const char *name);
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index b6571c3cfa31..c4a93ce1de48 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -10,7 +10,7 @@
*
* This is the register set for the fimd and new style framebuffer interface
* found from the S3C2443 onwards into the S3C2416, S3C2450, the
- * S3C64XX series such as the S3C6400 and S3C6410, and EXYNOS series.
+ * S3C64XX series such as the S3C6400 and S3C6410, and Exynos series.
*/
/* VIDCON0 */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 850a43bd69d3..8c0d1edc121c 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -209,15 +209,8 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
unsigned int nr_pages, grant_ref_t *grefs);
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr);
-int xenbus_map_ring(struct xenbus_device *dev,
- grant_ref_t *gnt_refs, unsigned int nr_grefs,
- grant_handle_t *handles, unsigned long *vaddrs,
- bool *leaked);
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
-int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t *handles, unsigned int nr_handles,
- unsigned long *vaddrs);
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
int xenbus_free_evtchn(struct xenbus_device *dev, int port);
diff --git a/init/Kconfig b/init/Kconfig
index f095ec64bb91..574b7215aa6a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -872,7 +872,7 @@ config BLK_CGROUP
This option only enables generic Block IO controller infrastructure.
One needs to also enable actual IO controlling logic/policy. For
enabling proportional weight division of disk bandwidth in CFQ, set
- CONFIG_CFQ_GROUP_IOSCHED=y; for enabling throttling policy, set
+ CONFIG_BFQ_GROUP_IOSCHED=y; for enabling throttling policy, set
CONFIG_BLK_DEV_THROTTLING=y.
See Documentation/admin-guide/cgroup-v1/blkio-controller.rst for more information.
@@ -1029,7 +1029,8 @@ config CGROUP_PERF
help
This option extends the perf per-cpu mode to restrict monitoring
to threads which belong to the cgroup specified and run on the
- designated cpu.
+ designated cpu. Or this can be used to have cgroup ID in samples
+ so that it can monitor performance events among cgroups.
Say N if unsure.
@@ -1537,7 +1538,6 @@ config AIO
config IO_URING
bool "Enable IO uring support" if EXPERT
- select ANON_INODES
select IO_WQ
default y
help
@@ -1555,6 +1555,11 @@ config ADVISE_SYSCALLS
applications use these syscalls, you can disable this option to save
space.
+config HAVE_ARCH_USERFAULTFD_WP
+ bool
+ help
+ Arch has userfaultfd write protection support
+
config MEMBARRIER
bool "Enable membarrier() system call" if EXPERT
default y
diff --git a/init/init_task.c b/init/init_task.c
index 9e5cbe5eab7b..bd403ed3e418 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -26,6 +26,7 @@ static struct signal_struct init_signals = {
.multiprocess = HLIST_HEAD_INIT,
.rlim = INIT_RLIMITS,
.cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
+ .exec_update_mutex = __MUTEX_INITIALIZER(init_signals.exec_update_mutex),
#ifdef CONFIG_POSIX_TIMERS
.posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
.cputimer = {
diff --git a/init/main.c b/init/main.c
index ee4947af823f..e488213857e2 100644
--- a/init/main.c
+++ b/init/main.c
@@ -353,6 +353,8 @@ static int __init bootconfig_params(char *param, char *val,
static void __init setup_boot_config(const char *cmdline)
{
static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
+ const char *msg;
+ int pos;
u32 size, csum;
char *data, *copy;
u32 *hdr;
@@ -400,10 +402,14 @@ static void __init setup_boot_config(const char *cmdline)
memcpy(copy, data, size);
copy[size] = '\0';
- ret = xbc_init(copy);
- if (ret < 0)
- pr_err("Failed to parse bootconfig\n");
- else {
+ ret = xbc_init(copy, &msg, &pos);
+ if (ret < 0) {
+ if (pos < 0)
+ pr_err("Failed to init bootconfig: %s.\n", msg);
+ else
+ pr_err("Failed to parse bootconfig: %s at %d.\n",
+ msg, pos);
+ } else {
pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret);
/* keys starting with "kernel." are passed via cmdline */
extra_command_line = xbc_make_cmdline("kernel");
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 49a05ba3000d..dc8307bf2d74 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -239,11 +239,10 @@ static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
info->msg_tree_rightmost = rb_prev(node);
rb_erase(node, &info->msg_tree);
- if (info->node_cache) {
+ if (info->node_cache)
kfree(leaf);
- } else {
+ else
info->node_cache = leaf;
- }
}
static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
diff --git a/ipc/shm.c b/ipc/shm.c
index ce1ca9f7c6e9..0ba6add05b35 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1332,7 +1332,7 @@ static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
}
}
-long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
+static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version)
{
struct ipc_namespace *ns;
struct shmid64_ds sem64;
diff --git a/ipc/util.c b/ipc/util.c
index fe61df53775a..97638eb2d7cb 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -885,6 +885,7 @@ static int sysvipc_proc_release(struct inode *inode, struct file *file)
}
static const struct proc_ops sysvipc_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = sysvipc_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
diff --git a/kernel/.gitignore b/kernel/.gitignore
index 34d1e77ee9df..78701ea37c97 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
kheaders.md5
timeconst.h
hz.bc
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
index f0d243318452..3596448bfdab 100644
--- a/kernel/audit_fsnotify.c
+++ b/kernel/audit_fsnotify.c
@@ -160,23 +160,14 @@ static int audit_mark_handle_event(struct fsnotify_group *group,
{
struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
struct audit_fsnotify_mark *audit_mark;
- const struct inode *inode = NULL;
+ const struct inode *inode = fsnotify_data_inode(data, data_type);
audit_mark = container_of(inode_mark, struct audit_fsnotify_mark, mark);
BUG_ON(group != audit_fsnotify_group);
- switch (data_type) {
- case (FSNOTIFY_EVENT_PATH):
- inode = ((const struct path *)data)->dentry->d_inode;
- break;
- case (FSNOTIFY_EVENT_INODE):
- inode = (const struct inode *)data;
- break;
- default:
- BUG();
+ if (WARN_ON(!inode))
return 0;
- }
if (mask & (FS_CREATE|FS_MOVED_TO|FS_DELETE|FS_MOVED_FROM)) {
if (audit_compare_dname_path(dname, audit_mark->path, AUDIT_NAME_FULL))
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 8a8fd732ff6d..e09c551ae52d 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -471,25 +471,13 @@ static int audit_watch_handle_event(struct fsnotify_group *group,
struct fsnotify_iter_info *iter_info)
{
struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
- const struct inode *inode;
+ const struct inode *inode = fsnotify_data_inode(data, data_type);
struct audit_parent *parent;
parent = container_of(inode_mark, struct audit_parent, mark);
BUG_ON(group != audit_watch_group);
-
- switch (data_type) {
- case (FSNOTIFY_EVENT_PATH):
- inode = d_backing_inode(((const struct path *)data)->dentry);
- break;
- case (FSNOTIFY_EVENT_INODE):
- inode = (const struct inode *)data;
- break;
- default:
- BUG();
- inode = NULL;
- break;
- }
+ WARN_ON(!inode);
if (mask & (FS_CREATE|FS_MOVED_TO) && inode)
audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0);
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index f2d7cea86ffe..191c329e482a 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -38,10 +38,7 @@ static bool cgroup_no_v1_named;
*/
static struct workqueue_struct *cgroup_pidlist_destroy_wq;
-/*
- * Protects cgroup_subsys->release_agent_path. Modifying it also requires
- * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
- */
+/* protects cgroup_subsys->release_agent_path */
static DEFINE_SPINLOCK(release_agent_path_lock);
bool cgroup1_ssid_disabled(int ssid)
@@ -775,22 +772,29 @@ void cgroup1_release_agent(struct work_struct *work)
{
struct cgroup *cgrp =
container_of(work, struct cgroup, release_agent_work);
- char *pathbuf = NULL, *agentbuf = NULL;
+ char *pathbuf, *agentbuf;
char *argv[3], *envp[3];
int ret;
- mutex_lock(&cgroup_mutex);
+ /* snoop agent path and exit early if empty */
+ if (!cgrp->root->release_agent_path[0])
+ return;
+ /* prepare argument buffers */
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
- agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
- if (!pathbuf || !agentbuf || !strlen(agentbuf))
- goto out;
+ agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!pathbuf || !agentbuf)
+ goto out_free;
- spin_lock_irq(&css_set_lock);
- ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
- spin_unlock_irq(&css_set_lock);
+ spin_lock(&release_agent_path_lock);
+ strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
+ spin_unlock(&release_agent_path_lock);
+ if (!agentbuf[0])
+ goto out_free;
+
+ ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
if (ret < 0 || ret >= PATH_MAX)
- goto out;
+ goto out_free;
argv[0] = agentbuf;
argv[1] = pathbuf;
@@ -801,11 +805,7 @@ void cgroup1_release_agent(struct work_struct *work)
envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[2] = NULL;
- mutex_unlock(&cgroup_mutex);
call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
- goto out_free;
-out:
- mutex_unlock(&cgroup_mutex);
out_free:
kfree(agentbuf);
kfree(pathbuf);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 915dda3f7f19..06b5ea9d899d 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1813,12 +1813,14 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
enum cgroup2_param {
Opt_nsdelegate,
Opt_memory_localevents,
+ Opt_memory_recursiveprot,
nr__cgroup2_params
};
static const struct fs_parameter_spec cgroup2_fs_parameters[] = {
fsparam_flag("nsdelegate", Opt_nsdelegate),
fsparam_flag("memory_localevents", Opt_memory_localevents),
+ fsparam_flag("memory_recursiveprot", Opt_memory_recursiveprot),
{}
};
@@ -1839,6 +1841,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
case Opt_memory_localevents:
ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
return 0;
+ case Opt_memory_recursiveprot:
+ ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
+ return 0;
}
return -EINVAL;
}
@@ -1855,6 +1860,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags)
cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS;
+
+ if (root_flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
+ cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT;
+ else
+ cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT;
}
}
@@ -1864,6 +1874,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root
seq_puts(seq, ",nsdelegate");
if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
seq_puts(seq, ",memory_localevents");
+ if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)
+ seq_puts(seq, ",memory_recursiveprot");
return 0;
}
@@ -1954,7 +1966,8 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
root->kf_root = kernfs_create_root(kf_sops,
KERNFS_ROOT_CREATE_DEACTIVATED |
- KERNFS_ROOT_SUPPORT_EXPORTOP,
+ KERNFS_ROOT_SUPPORT_EXPORTOP |
+ KERNFS_ROOT_SUPPORT_USER_XATTR,
root_cgrp);
if (IS_ERR(root->kf_root)) {
ret = PTR_ERR(root->kf_root);
@@ -2714,11 +2727,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
{
DEFINE_CGROUP_MGCTX(mgctx);
struct task_struct *task;
- int ret;
-
- ret = cgroup_migrate_vet_dst(dst_cgrp);
- if (ret)
- return ret;
+ int ret = 0;
/* look up all src csets */
spin_lock_irq(&css_set_lock);
@@ -4148,7 +4157,8 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
} else if (likely(!(pos->flags & CSS_RELEASED))) {
next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
} else {
- list_for_each_entry_rcu(next, &parent->children, sibling)
+ list_for_each_entry_rcu(next, &parent->children, sibling,
+ lockdep_is_held(&cgroup_mutex))
if (next->serial_nr > pos->serial_nr)
break;
}
@@ -4391,29 +4401,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
lockdep_assert_held(&css_set_lock);
- /* Advance to the next non-empty css_set */
- do {
- cset = css_task_iter_next_css_set(it);
- if (!cset) {
- it->task_pos = NULL;
- return;
+ /* Advance to the next non-empty css_set and find first non-empty tasks list*/
+ while ((cset = css_task_iter_next_css_set(it))) {
+ if (!list_empty(&cset->tasks)) {
+ it->cur_tasks_head = &cset->tasks;
+ break;
+ } else if (!list_empty(&cset->mg_tasks)) {
+ it->cur_tasks_head = &cset->mg_tasks;
+ break;
+ } else if (!list_empty(&cset->dying_tasks)) {
+ it->cur_tasks_head = &cset->dying_tasks;
+ break;
}
- } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
-
- if (!list_empty(&cset->tasks)) {
- it->task_pos = cset->tasks.next;
- it->cur_tasks_head = &cset->tasks;
- } else if (!list_empty(&cset->mg_tasks)) {
- it->task_pos = cset->mg_tasks.next;
- it->cur_tasks_head = &cset->mg_tasks;
- } else {
- it->task_pos = cset->dying_tasks.next;
- it->cur_tasks_head = &cset->dying_tasks;
}
-
- it->tasks_head = &cset->tasks;
- it->mg_tasks_head = &cset->mg_tasks;
- it->dying_tasks_head = &cset->dying_tasks;
+ if (!cset) {
+ it->task_pos = NULL;
+ return;
+ }
+ it->task_pos = it->cur_tasks_head->next;
/*
* We don't keep css_sets locked across iteration steps and thus
@@ -4458,24 +4463,24 @@ static void css_task_iter_advance(struct css_task_iter *it)
repeat:
if (it->task_pos) {
/*
- * Advance iterator to find next entry. cset->tasks is
- * consumed first and then ->mg_tasks. After ->mg_tasks,
- * we move onto the next cset.
+ * Advance iterator to find next entry. We go through cset
+ * tasks, mg_tasks and dying_tasks, when consumed we move onto
+ * the next cset.
*/
if (it->flags & CSS_TASK_ITER_SKIPPED)
it->flags &= ~CSS_TASK_ITER_SKIPPED;
else
it->task_pos = it->task_pos->next;
- if (it->task_pos == it->tasks_head) {
- it->task_pos = it->mg_tasks_head->next;
- it->cur_tasks_head = it->mg_tasks_head;
+ if (it->task_pos == &it->cur_cset->tasks) {
+ it->cur_tasks_head = &it->cur_cset->mg_tasks;
+ it->task_pos = it->cur_tasks_head->next;
}
- if (it->task_pos == it->mg_tasks_head) {
- it->task_pos = it->dying_tasks_head->next;
- it->cur_tasks_head = it->dying_tasks_head;
+ if (it->task_pos == &it->cur_cset->mg_tasks) {
+ it->cur_tasks_head = &it->cur_cset->dying_tasks;
+ it->task_pos = it->cur_tasks_head->next;
}
- if (it->task_pos == it->dying_tasks_head)
+ if (it->task_pos == &it->cur_cset->dying_tasks)
css_task_iter_advance_css_set(it);
} else {
/* called from start, proceed to the first cset */
@@ -4493,12 +4498,12 @@ repeat:
goto repeat;
/* and dying leaders w/o live member threads */
- if (it->cur_tasks_head == it->dying_tasks_head &&
+ if (it->cur_tasks_head == &it->cur_cset->dying_tasks &&
!atomic_read(&task->signal->live))
goto repeat;
} else {
/* skip all dying ones */
- if (it->cur_tasks_head == it->dying_tasks_head)
+ if (it->cur_tasks_head == &it->cur_cset->dying_tasks)
goto repeat;
}
}
@@ -4662,13 +4667,28 @@ static int cgroup_procs_show(struct seq_file *s, void *v)
return 0;
}
+static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
+{
+ int ret;
+ struct inode *inode;
+
+ lockdep_assert_held(&cgroup_mutex);
+
+ inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
+ if (!inode)
+ return -ENOMEM;
+
+ ret = inode_permission(inode, MAY_WRITE);
+ iput(inode);
+ return ret;
+}
+
static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
struct cgroup *dst_cgrp,
struct super_block *sb)
{
struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
struct cgroup *com_cgrp = src_cgrp;
- struct inode *inode;
int ret;
lockdep_assert_held(&cgroup_mutex);
@@ -4678,12 +4698,7 @@ static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
com_cgrp = cgroup_parent(com_cgrp);
/* %current should be authorized to migrate to the common ancestor */
- inode = kernfs_get_inode(sb, com_cgrp->procs_file.kn);
- if (!inode)
- return -ENOMEM;
-
- ret = inode_permission(inode, MAY_WRITE);
- iput(inode);
+ ret = cgroup_may_write(com_cgrp, sb);
if (ret)
return ret;
@@ -4699,6 +4714,26 @@ static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
return 0;
}
+static int cgroup_attach_permissions(struct cgroup *src_cgrp,
+ struct cgroup *dst_cgrp,
+ struct super_block *sb, bool threadgroup)
+{
+ int ret = 0;
+
+ ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb);
+ if (ret)
+ return ret;
+
+ ret = cgroup_migrate_vet_dst(dst_cgrp);
+ if (ret)
+ return ret;
+
+ if (!threadgroup && (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp))
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
@@ -4721,8 +4756,8 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
spin_unlock_irq(&css_set_lock);
- ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp,
- of->file->f_path.dentry->d_sb);
+ ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
+ of->file->f_path.dentry->d_sb, true);
if (ret)
goto out_finish;
@@ -4766,16 +4801,11 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of,
spin_unlock_irq(&css_set_lock);
/* thread migrations follow the cgroup.procs delegation rule */
- ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp,
- of->file->f_path.dentry->d_sb);
+ ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
+ of->file->f_path.dentry->d_sb, false);
if (ret)
goto out_finish;
- /* and must be contained in the same domain */
- ret = -EOPNOTSUPP;
- if (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp)
- goto out_finish;
-
ret = cgroup_attach_task(dst_cgrp, task, false);
out_finish:
@@ -5864,8 +5894,7 @@ out:
* @child: pointer to task_struct of forking parent process.
*
* A task is associated with the init_css_set until cgroup_post_fork()
- * attaches it to the parent's css_set. Empty cg_list indicates that
- * @child isn't holding reference to its css_set.
+ * attaches it to the target css_set.
*/
void cgroup_fork(struct task_struct *child)
{
@@ -5873,21 +5902,172 @@ void cgroup_fork(struct task_struct *child)
INIT_LIST_HEAD(&child->cg_list);
}
+static struct cgroup *cgroup_get_from_file(struct file *f)
+{
+ struct cgroup_subsys_state *css;
+ struct cgroup *cgrp;
+
+ css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
+ if (IS_ERR(css))
+ return ERR_CAST(css);
+
+ cgrp = css->cgroup;
+ if (!cgroup_on_dfl(cgrp)) {
+ cgroup_put(cgrp);
+ return ERR_PTR(-EBADF);
+ }
+
+ return cgrp;
+}
+
+/**
+ * cgroup_css_set_fork - find or create a css_set for a child process
+ * @kargs: the arguments passed to create the child process
+ *
+ * This functions finds or creates a new css_set which the child
+ * process will be attached to in cgroup_post_fork(). By default,
+ * the child process will be given the same css_set as its parent.
+ *
+ * If CLONE_INTO_CGROUP is specified this function will try to find an
+ * existing css_set which includes the requested cgroup and if not create
+ * a new css_set that the child will be attached to later. If this function
+ * succeeds it will hold cgroup_threadgroup_rwsem on return. If
+ * CLONE_INTO_CGROUP is requested this function will grab cgroup mutex
+ * before grabbing cgroup_threadgroup_rwsem and will hold a reference
+ * to the target cgroup.
+ */
+static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
+ __acquires(&cgroup_mutex) __acquires(&cgroup_threadgroup_rwsem)
+{
+ int ret;
+ struct cgroup *dst_cgrp = NULL;
+ struct css_set *cset;
+ struct super_block *sb;
+ struct file *f;
+
+ if (kargs->flags & CLONE_INTO_CGROUP)
+ mutex_lock(&cgroup_mutex);
+
+ cgroup_threadgroup_change_begin(current);
+
+ spin_lock_irq(&css_set_lock);
+ cset = task_css_set(current);
+ get_css_set(cset);
+ spin_unlock_irq(&css_set_lock);
+
+ if (!(kargs->flags & CLONE_INTO_CGROUP)) {
+ kargs->cset = cset;
+ return 0;
+ }
+
+ f = fget_raw(kargs->cgroup);
+ if (!f) {
+ ret = -EBADF;
+ goto err;
+ }
+ sb = f->f_path.dentry->d_sb;
+
+ dst_cgrp = cgroup_get_from_file(f);
+ if (IS_ERR(dst_cgrp)) {
+ ret = PTR_ERR(dst_cgrp);
+ dst_cgrp = NULL;
+ goto err;
+ }
+
+ if (cgroup_is_dead(dst_cgrp)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /*
+ * Verify that we the target cgroup is writable for us. This is
+ * usually done by the vfs layer but since we're not going through
+ * the vfs layer here we need to do it "manually".
+ */
+ ret = cgroup_may_write(dst_cgrp, sb);
+ if (ret)
+ goto err;
+
+ ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
+ !(kargs->flags & CLONE_THREAD));
+ if (ret)
+ goto err;
+
+ kargs->cset = find_css_set(cset, dst_cgrp);
+ if (!kargs->cset) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ put_css_set(cset);
+ fput(f);
+ kargs->cgrp = dst_cgrp;
+ return ret;
+
+err:
+ cgroup_threadgroup_change_end(current);
+ mutex_unlock(&cgroup_mutex);
+ if (f)
+ fput(f);
+ if (dst_cgrp)
+ cgroup_put(dst_cgrp);
+ put_css_set(cset);
+ if (kargs->cset)
+ put_css_set(kargs->cset);
+ return ret;
+}
+
+/**
+ * cgroup_css_set_put_fork - drop references we took during fork
+ * @kargs: the arguments passed to create the child process
+ *
+ * Drop references to the prepared css_set and target cgroup if
+ * CLONE_INTO_CGROUP was requested.
+ */
+static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
+ __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
+{
+ cgroup_threadgroup_change_end(current);
+
+ if (kargs->flags & CLONE_INTO_CGROUP) {
+ struct cgroup *cgrp = kargs->cgrp;
+ struct css_set *cset = kargs->cset;
+
+ mutex_unlock(&cgroup_mutex);
+
+ if (cset) {
+ put_css_set(cset);
+ kargs->cset = NULL;
+ }
+
+ if (cgrp) {
+ cgroup_put(cgrp);
+ kargs->cgrp = NULL;
+ }
+ }
+}
+
/**
* cgroup_can_fork - called on a new task before the process is exposed
- * @child: the task in question.
+ * @child: the child process
*
- * This calls the subsystem can_fork() callbacks. If the can_fork() callback
- * returns an error, the fork aborts with that error code. This allows for
- * a cgroup subsystem to conditionally allow or deny new forks.
+ * This prepares a new css_set for the child process which the child will
+ * be attached to in cgroup_post_fork().
+ * This calls the subsystem can_fork() callbacks. If the cgroup_can_fork()
+ * callback returns an error, the fork aborts with that error code. This
+ * allows for a cgroup subsystem to conditionally allow or deny new forks.
*/
-int cgroup_can_fork(struct task_struct *child)
+int cgroup_can_fork(struct task_struct *child, struct kernel_clone_args *kargs)
{
struct cgroup_subsys *ss;
int i, j, ret;
+ ret = cgroup_css_set_fork(kargs);
+ if (ret)
+ return ret;
+
do_each_subsys_mask(ss, i, have_canfork_callback) {
- ret = ss->can_fork(child);
+ ret = ss->can_fork(child, kargs->cset);
if (ret)
goto out_revert;
} while_each_subsys_mask();
@@ -5899,54 +6079,64 @@ out_revert:
if (j >= i)
break;
if (ss->cancel_fork)
- ss->cancel_fork(child);
+ ss->cancel_fork(child, kargs->cset);
}
+ cgroup_css_set_put_fork(kargs);
+
return ret;
}
/**
* cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
- * @child: the task in question
+ * @child: the child process
+ * @kargs: the arguments passed to create the child process
*
* This calls the cancel_fork() callbacks if a fork failed *after*
- * cgroup_can_fork() succeded.
+ * cgroup_can_fork() succeded and cleans up references we took to
+ * prepare a new css_set for the child process in cgroup_can_fork().
*/
-void cgroup_cancel_fork(struct task_struct *child)
+void cgroup_cancel_fork(struct task_struct *child,
+ struct kernel_clone_args *kargs)
{
struct cgroup_subsys *ss;
int i;
for_each_subsys(ss, i)
if (ss->cancel_fork)
- ss->cancel_fork(child);
+ ss->cancel_fork(child, kargs->cset);
+
+ cgroup_css_set_put_fork(kargs);
}
/**
- * cgroup_post_fork - called on a new task after adding it to the task list
- * @child: the task in question
- *
- * Adds the task to the list running through its css_set if necessary and
- * call the subsystem fork() callbacks. Has to be after the task is
- * visible on the task list in case we race with the first call to
- * cgroup_task_iter_start() - to guarantee that the new task ends up on its
- * list.
+ * cgroup_post_fork - finalize cgroup setup for the child process
+ * @child: the child process
+ *
+ * Attach the child process to its css_set calling the subsystem fork()
+ * callbacks.
*/
-void cgroup_post_fork(struct task_struct *child)
+void cgroup_post_fork(struct task_struct *child,
+ struct kernel_clone_args *kargs)
+ __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
{
struct cgroup_subsys *ss;
struct css_set *cset;
int i;
+ cset = kargs->cset;
+ kargs->cset = NULL;
+
spin_lock_irq(&css_set_lock);
/* init tasks are special, only link regular threads */
if (likely(child->pid)) {
WARN_ON_ONCE(!list_empty(&child->cg_list));
- cset = task_css_set(current); /* current is @child's parent */
- get_css_set(cset);
cset->nr_tasks++;
css_set_move_task(child, NULL, cset, false);
+ } else {
+ put_css_set(cset);
+ cset = NULL;
}
/*
@@ -5978,6 +6168,17 @@ void cgroup_post_fork(struct task_struct *child)
do_each_subsys_mask(ss, i, have_fork_callback) {
ss->fork(child);
} while_each_subsys_mask();
+
+ /* Make the new cset the root_cset of the new cgroup namespace. */
+ if (kargs->flags & CLONE_NEWCGROUP) {
+ struct css_set *rcset = child->nsproxy->cgroup_ns->root_cset;
+
+ get_css_set(cset);
+ child->nsproxy->cgroup_ns->root_cset = cset;
+ put_css_set(rcset);
+ }
+
+ cgroup_css_set_put_fork(kargs);
}
/**
@@ -6164,7 +6365,6 @@ EXPORT_SYMBOL_GPL(cgroup_get_from_path);
*/
struct cgroup *cgroup_get_from_fd(int fd)
{
- struct cgroup_subsys_state *css;
struct cgroup *cgrp;
struct file *f;
@@ -6172,17 +6372,8 @@ struct cgroup *cgroup_get_from_fd(int fd)
if (!f)
return ERR_PTR(-EBADF);
- css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
+ cgrp = cgroup_get_from_file(f);
fput(f);
- if (IS_ERR(css))
- return ERR_CAST(css);
-
- cgrp = css->cgroup;
- if (!cgroup_on_dfl(cgrp)) {
- cgroup_put(cgrp);
- return ERR_PTR(-EBADF);
- }
-
return cgrp;
}
EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
@@ -6412,7 +6603,10 @@ static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "nsdelegate\nmemory_localevents\n");
+ return snprintf(buf, PAGE_SIZE,
+ "nsdelegate\n"
+ "memory_localevents\n"
+ "memory_recursiveprot\n");
}
static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 58f5073acff7..729d3a5c772e 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -358,8 +358,12 @@ static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
/*
- * Cgroup v2 behavior is used when on default hierarchy or the
- * cgroup_v2_mode flag is set.
+ * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
+ * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
+ * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
+ * With v2 behavior, "cpus" and "mems" are always what the users have
+ * requested and won't be changed by hotplug events. Only the effective
+ * cpus or mems will be affected.
*/
static inline bool is_in_v2_mode(void)
{
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index 138059eb730d..511af87f685e 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -33,6 +33,7 @@
#include <linux/atomic.h>
#include <linux/cgroup.h>
#include <linux/slab.h>
+#include <linux/sched/task.h>
#define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
#define PIDS_MAX_STR "max"
@@ -214,13 +215,16 @@ static void pids_cancel_attach(struct cgroup_taskset *tset)
* task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
* on cgroup_threadgroup_change_begin() held by the copy_process().
*/
-static int pids_can_fork(struct task_struct *task)
+static int pids_can_fork(struct task_struct *task, struct css_set *cset)
{
struct cgroup_subsys_state *css;
struct pids_cgroup *pids;
int err;
- css = task_css_check(current, pids_cgrp_id, true);
+ if (cset)
+ css = cset->subsys[pids_cgrp_id];
+ else
+ css = task_css_check(current, pids_cgrp_id, true);
pids = css_pids(css);
err = pids_try_charge(pids, 1);
if (err) {
@@ -235,12 +239,15 @@ static int pids_can_fork(struct task_struct *task)
return err;
}
-static void pids_cancel_fork(struct task_struct *task)
+static void pids_cancel_fork(struct task_struct *task, struct css_set *cset)
{
struct cgroup_subsys_state *css;
struct pids_cgroup *pids;
- css = task_css_check(current, pids_cgrp_id, true);
+ if (cset)
+ css = cset->subsys[pids_cgrp_id];
+ else
+ css = task_css_check(current, pids_cgrp_id, true);
pids = css_pids(css);
pids_uncharge(pids, 1);
}
diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config
index 7fa0c4ae6394..8a44b93da0f3 100644
--- a/kernel/configs/tiny.config
+++ b/kernel/configs/tiny.config
@@ -6,7 +6,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KERNEL_XZ=y
# CONFIG_KERNEL_LZO is not set
# CONFIG_KERNEL_LZ4 is not set
-CONFIG_OPTIMIZE_INLINING=y
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y
diff --git a/kernel/cred.c b/kernel/cred.c
index 809a985b1793..71a792616917 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -675,8 +675,6 @@ void __init cred_init(void)
* The caller may change these controls afterwards if desired.
*
* Returns the new credentials or NULL if out of memory.
- *
- * Does not take, and does not return holding current->cred_replace_mutex.
*/
struct cred *prepare_kernel_cred(struct task_struct *daemon)
{
diff --git a/kernel/debug/kdb/.gitignore b/kernel/debug/kdb/.gitignore
index 396d12eda9e8..df259542a236 100644
--- a/kernel/debug/kdb/.gitignore
+++ b/kernel/debug/kdb/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
gen-kdb_cmds.c
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index ba12e9f4661e..515379cbf209 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -399,6 +399,13 @@ int kdb_set(int argc, const char **argv)
return KDB_ARGCOUNT;
/*
+ * Censor sensitive variables
+ */
+ if (strcmp(argv[1], "PROMPT") == 0 &&
+ !kdb_check_flags(KDB_ENABLE_MEM_READ, kdb_cmd_enabled, false))
+ return KDB_NOPERM;
+
+ /*
* Check for internal variables
*/
if (strcmp(argv[1], "KDBDEBUG") == 0) {
@@ -1102,12 +1109,12 @@ static int handle_ctrl_cmd(char *cmd)
case CTRL_P:
if (cmdptr != cmd_tail)
cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
- strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+ strscpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
return 1;
case CTRL_N:
if (cmdptr != cmd_head)
cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
- strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+ strscpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
return 1;
}
return 0;
@@ -1298,12 +1305,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
*(cmd_hist[cmd_head]) = '\0';
do_full_getstr:
-#if defined(CONFIG_SMP)
+ /* PROMPT can only be set if we have MEM_READ permission. */
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
raw_smp_processor_id());
-#else
- snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
-#endif
if (defcmd_in_progress)
strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
@@ -1314,7 +1318,7 @@ do_full_getstr:
if (*cmdbuf != '\n') {
if (*cmdbuf < 32) {
if (cmdptr == cmd_head) {
- strncpy(cmd_hist[cmd_head], cmd_cur,
+ strscpy(cmd_hist[cmd_head], cmd_cur,
CMD_BUFLEN);
*(cmd_hist[cmd_head] +
strlen(cmd_hist[cmd_head])-1) = '\0';
@@ -1324,7 +1328,7 @@ do_full_getstr:
cmdbuf = cmd_cur;
goto do_full_getstr;
} else {
- strncpy(cmd_hist[cmd_head], cmd_cur,
+ strscpy(cmd_hist[cmd_head], cmd_cur,
CMD_BUFLEN);
}
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 551b0eb7028a..2a0c4985f38e 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -134,7 +134,7 @@ static void *__dma_alloc_from_coherent(struct device *dev,
spin_lock_irqsave(&mem->spinlock, flags);
- if (unlikely(size > (mem->size << PAGE_SHIFT)))
+ if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
goto err;
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
@@ -144,8 +144,9 @@ static void *__dma_alloc_from_coherent(struct device *dev,
/*
* Memory was found in the coherent area.
*/
- *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
- ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ *dma_handle = dma_get_device_base(dev, mem) +
+ ((dma_addr_t)pageno << PAGE_SHIFT);
+ ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
spin_unlock_irqrestore(&mem->spinlock, flags);
memset(ret, 0, size);
return ret;
@@ -194,7 +195,7 @@ static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
int order, void *vaddr)
{
if (mem && vaddr >= mem->virt_base && vaddr <
- (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
unsigned long flags;
@@ -238,10 +239,10 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
{
if (mem && vaddr >= mem->virt_base && vaddr + size <=
- (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
- int user_count = vma_pages(vma);
+ unsigned long user_count = vma_pages(vma);
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
*ret = -ENXIO;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index ac7956c38f69..a8560052a915 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -157,11 +157,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
- if (!ret) {
- dma_free_contiguous(dev, page, size);
- return ret;
- }
-
+ if (!ret)
+ goto out_free_pages;
memset(ret, 0, size);
goto done;
}
@@ -174,8 +171,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
* so log an error and fail.
*/
dev_info(dev, "Rejecting highmem page from CMA.\n");
- dma_free_contiguous(dev, page, size);
- return NULL;
+ goto out_free_pages;
}
ret = page_address(page);
@@ -184,10 +180,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
memset(ret, 0, size);
- if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
dma_alloc_need_uncached(dev, attrs)) {
arch_dma_prep_coherent(page, size);
- ret = uncached_kernel_address(ret);
+ ret = arch_dma_set_uncached(ret, size);
+ if (IS_ERR(ret))
+ goto out_free_pages;
}
done:
if (force_dma_unencrypted(dev))
@@ -195,6 +193,9 @@ done:
else
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
+out_free_pages:
+ dma_free_contiguous(dev, page, size);
+ return NULL;
}
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
@@ -218,6 +219,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
vunmap(cpu_addr);
+ else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
+ arch_dma_clear_uncached(cpu_addr, size);
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
}
@@ -225,7 +228,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
@@ -235,7 +238,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
- if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 318435c5bf0b..55e44417f66d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
+#include <linux/hugetlb.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
@@ -387,6 +388,7 @@ static atomic_t nr_freq_events __read_mostly;
static atomic_t nr_switch_events __read_mostly;
static atomic_t nr_ksymbol_events __read_mostly;
static atomic_t nr_bpf_events __read_mostly;
+static atomic_t nr_cgroup_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
@@ -1295,7 +1297,7 @@ static void put_ctx(struct perf_event_context *ctx)
* function.
*
* Lock order:
- * cred_guard_mutex
+ * exec_update_mutex
* task_struct::perf_event_mutex
* perf_event_context::mutex
* perf_event::child_mutex;
@@ -1861,6 +1863,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
if (sample_type & PERF_SAMPLE_PHYS_ADDR)
size += sizeof(data->phys_addr);
+ if (sample_type & PERF_SAMPLE_CGROUP)
+ size += sizeof(data->cgroup);
+
event->header_size = size;
}
@@ -4608,6 +4613,8 @@ static void unaccount_event(struct perf_event *event)
atomic_dec(&nr_comm_events);
if (event->attr.namespaces)
atomic_dec(&nr_namespaces_events);
+ if (event->attr.cgroup)
+ atomic_dec(&nr_cgroup_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.freq)
@@ -6864,6 +6871,9 @@ void perf_output_sample(struct perf_output_handle *handle,
if (sample_type & PERF_SAMPLE_PHYS_ADDR)
perf_output_put(handle, data->phys_addr);
+ if (sample_type & PERF_SAMPLE_CGROUP)
+ perf_output_put(handle, data->cgroup);
+
if (sample_type & PERF_SAMPLE_AUX) {
perf_output_put(handle, data->aux_size);
@@ -7063,6 +7073,16 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_PHYS_ADDR)
data->phys_addr = perf_virt_to_phys(data->addr);
+#ifdef CONFIG_CGROUP_PERF
+ if (sample_type & PERF_SAMPLE_CGROUP) {
+ struct cgroup *cgrp;
+
+ /* protected by RCU */
+ cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup;
+ data->cgroup = cgroup_id(cgrp);
+ }
+#endif
+
if (sample_type & PERF_SAMPLE_AUX) {
u64 size;
@@ -7736,6 +7756,105 @@ void perf_event_namespaces(struct task_struct *task)
}
/*
+ * cgroup tracking
+ */
+#ifdef CONFIG_CGROUP_PERF
+
+struct perf_cgroup_event {
+ char *path;
+ int path_size;
+ struct {
+ struct perf_event_header header;
+ u64 id;
+ char path[];
+ } event_id;
+};
+
+static int perf_event_cgroup_match(struct perf_event *event)
+{
+ return event->attr.cgroup;
+}
+
+static void perf_event_cgroup_output(struct perf_event *event, void *data)
+{
+ struct perf_cgroup_event *cgroup_event = data;
+ struct perf_output_handle handle;
+ struct perf_sample_data sample;
+ u16 header_size = cgroup_event->event_id.header.size;
+ int ret;
+
+ if (!perf_event_cgroup_match(event))
+ return;
+
+ perf_event_header__init_id(&cgroup_event->event_id.header,
+ &sample, event);
+ ret = perf_output_begin(&handle, event,
+ cgroup_event->event_id.header.size);
+ if (ret)
+ goto out;
+
+ perf_output_put(&handle, cgroup_event->event_id);
+ __output_copy(&handle, cgroup_event->path, cgroup_event->path_size);
+
+ perf_event__output_id_sample(event, &handle, &sample);
+
+ perf_output_end(&handle);
+out:
+ cgroup_event->event_id.header.size = header_size;
+}
+
+static void perf_event_cgroup(struct cgroup *cgrp)
+{
+ struct perf_cgroup_event cgroup_event;
+ char path_enomem[16] = "//enomem";
+ char *pathname;
+ size_t size;
+
+ if (!atomic_read(&nr_cgroup_events))
+ return;
+
+ cgroup_event = (struct perf_cgroup_event){
+ .event_id = {
+ .header = {
+ .type = PERF_RECORD_CGROUP,
+ .misc = 0,
+ .size = sizeof(cgroup_event.event_id),
+ },
+ .id = cgroup_id(cgrp),
+ },
+ };
+
+ pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (pathname == NULL) {
+ cgroup_event.path = path_enomem;
+ } else {
+ /* just to be sure to have enough space for alignment */
+ cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64));
+ cgroup_event.path = pathname;
+ }
+
+ /*
+ * Since our buffer works in 8 byte units we need to align our string
+ * size to a multiple of 8. However, we must guarantee the tail end is
+ * zero'd out to avoid leaking random bits to userspace.
+ */
+ size = strlen(cgroup_event.path) + 1;
+ while (!IS_ALIGNED(size, sizeof(u64)))
+ cgroup_event.path[size++] = '\0';
+
+ cgroup_event.event_id.header.size += size;
+ cgroup_event.path_size = size;
+
+ perf_iterate_sb(perf_event_cgroup_output,
+ &cgroup_event,
+ NULL);
+
+ kfree(pathname);
+}
+
+#endif
+
+/*
* mmap tracking
*/
@@ -7855,7 +7974,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
flags |= MAP_EXECUTABLE;
if (vma->vm_flags & VM_LOCKED)
flags |= MAP_LOCKED;
- if (vma->vm_flags & VM_HUGETLB)
+ if (is_vm_hugetlb_page(vma))
flags |= MAP_HUGETLB;
if (file) {
@@ -10778,6 +10897,8 @@ static void account_event(struct perf_event *event)
atomic_inc(&nr_comm_events);
if (event->attr.namespaces)
atomic_inc(&nr_namespaces_events);
+ if (event->attr.cgroup)
+ atomic_inc(&nr_cgroup_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (event->attr.freq)
@@ -11157,6 +11278,12 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
ret = perf_reg_validate(attr->sample_regs_intr);
+
+#ifndef CONFIG_CGROUP_PERF
+ if (attr->sample_type & PERF_SAMPLE_CGROUP)
+ return -EINVAL;
+#endif
+
out:
return ret;
@@ -11425,14 +11552,14 @@ SYSCALL_DEFINE5(perf_event_open,
}
if (task) {
- err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+ err = mutex_lock_interruptible(&task->signal->exec_update_mutex);
if (err)
goto err_task;
/*
* Reuse ptrace permission checks for now.
*
- * We must hold cred_guard_mutex across this and any potential
+ * We must hold exec_update_mutex across this and any potential
* perf_install_in_context() call for this new event to
* serialize against exec() altering our credentials (and the
* perf_event_exit_task() that could imply).
@@ -11721,7 +11848,7 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_unlock(&ctx->mutex);
if (task) {
- mutex_unlock(&task->signal->cred_guard_mutex);
+ mutex_unlock(&task->signal->exec_update_mutex);
put_task_struct(task);
}
@@ -11757,7 +11884,7 @@ err_alloc:
free_event(event);
err_cred:
if (task)
- mutex_unlock(&task->signal->cred_guard_mutex);
+ mutex_unlock(&task->signal->exec_update_mutex);
err_task:
if (task)
put_task_struct(task);
@@ -12062,7 +12189,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
/*
* When a child task exits, feed back event values to parent events.
*
- * Can be called with cred_guard_mutex held when called from
+ * Can be called with exec_update_mutex held when called from
* install_exec_creds().
*/
void perf_event_exit_task(struct task_struct *child)
@@ -12754,6 +12881,12 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
kfree(jc);
}
+static int perf_cgroup_css_online(struct cgroup_subsys_state *css)
+{
+ perf_event_cgroup(css->cgroup);
+ return 0;
+}
+
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
@@ -12775,6 +12908,7 @@ static void perf_cgroup_attach(struct cgroup_taskset *tset)
struct cgroup_subsys perf_event_cgrp_subsys = {
.css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free,
+ .css_online = perf_cgroup_css_online,
.attach = perf_cgroup_attach,
/*
* Implicitly enable on dfl hierarchy so that perf events can
diff --git a/kernel/exit.c b/kernel/exit.c
index d70d47159640..389a88cb3081 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -182,6 +182,7 @@ void put_task_struct_rcu_user(struct task_struct *task)
void release_task(struct task_struct *p)
{
struct task_struct *leader;
+ struct pid *thread_pid;
int zap_leader;
repeat:
/* don't need to get the RCU readlock here - the process is dead and
@@ -190,11 +191,11 @@ repeat:
atomic_dec(&__task_cred(p)->user->processes);
rcu_read_unlock();
- proc_flush_task(p);
cgroup_release(p);
write_lock_irq(&tasklist_lock);
ptrace_release_task(p);
+ thread_pid = get_pid(p->thread_pid);
__exit_signal(p);
/*
@@ -217,6 +218,7 @@ repeat:
}
write_unlock_irq(&tasklist_lock);
+ proc_flush_pid(thread_pid);
release_thread(p);
put_task_struct_rcu_user(p);
diff --git a/kernel/extable.c b/kernel/extable.c
index 7681f87e89dd..b0ea5eb0c3b4 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -34,7 +34,8 @@ u32 __initdata __visible main_extable_sort_needed = 1;
/* Sort the kernel's built-in exception table */
void __init sort_main_extable(void)
{
- if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
+ if (main_extable_sort_needed &&
+ &__stop___ex_table > &__start___ex_table) {
pr_notice("Sorting __ex_table...\n");
sort_extable(__start___ex_table, __stop___ex_table);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index d90af13431c7..4385f3d639f2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -281,7 +281,7 @@ static inline void free_thread_stack(struct task_struct *tsk)
MEMCG_KERNEL_STACK_KB,
-(int)(PAGE_SIZE / 1024));
- memcg_kmem_uncharge(vm->pages[i], 0);
+ memcg_kmem_uncharge_page(vm->pages[i], 0);
}
for (i = 0; i < NR_CACHED_STACKS; i++) {
@@ -361,6 +361,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
if (new) {
*new = *orig;
INIT_LIST_HEAD(&new->anon_vma_chain);
+ new->vm_next = new->vm_prev = NULL;
}
return new;
}
@@ -413,12 +414,13 @@ static int memcg_charge_kernel_stack(struct task_struct *tsk)
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
/*
- * If memcg_kmem_charge() fails, page->mem_cgroup
- * pointer is NULL, and both memcg_kmem_uncharge()
+ * If memcg_kmem_charge_page() fails, page->mem_cgroup
+ * pointer is NULL, and both memcg_kmem_uncharge_page()
* and mod_memcg_page_state() in free_thread_stack()
* will ignore this page. So it's safe.
*/
- ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0);
+ ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL,
+ 0);
if (ret)
return ret;
@@ -552,14 +554,15 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
if (retval)
goto fail_nomem_anon_vma_fork;
if (tmp->vm_flags & VM_WIPEONFORK) {
- /* VM_WIPEONFORK gets a clean slate in the child. */
+ /*
+ * VM_WIPEONFORK gets a clean slate in the child.
+ * Don't prepare anon_vma until fault since we don't
+ * copy page for current vma.
+ */
tmp->anon_vma = NULL;
- if (anon_vma_prepare(tmp))
- goto fail_nomem_anon_vma_fork;
} else if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
- tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {
struct inode *inode = file_inode(file);
@@ -1224,7 +1227,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
struct mm_struct *mm;
int err;
- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ err = mutex_lock_killable(&task->signal->exec_update_mutex);
if (err)
return ERR_PTR(err);
@@ -1234,7 +1237,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
mmput(mm);
mm = ERR_PTR(-EACCES);
}
- mutex_unlock(&task->signal->cred_guard_mutex);
+ mutex_unlock(&task->signal->exec_update_mutex);
return mm;
}
@@ -1594,6 +1597,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
mutex_init(&sig->cred_guard_mutex);
+ mutex_init(&sig->exec_update_mutex);
return 0;
}
@@ -2174,16 +2178,15 @@ static __latent_entropy struct task_struct *copy_process(
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
- cgroup_threadgroup_change_begin(current);
/*
* Ensure that the cgroup subsystem policies allow the new process to be
* forked. It should be noted the the new process's css_set can be changed
* between here and cgroup_post_fork() if an organisation operation is in
* progress.
*/
- retval = cgroup_can_fork(p);
+ retval = cgroup_can_fork(p, args);
if (retval)
- goto bad_fork_cgroup_threadgroup_change_end;
+ goto bad_fork_put_pidfd;
/*
* From this point on we must avoid any synchronous user-space
@@ -2288,8 +2291,7 @@ static __latent_entropy struct task_struct *copy_process(
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
- cgroup_post_fork(p);
- cgroup_threadgroup_change_end(current);
+ cgroup_post_fork(p, args);
perf_event_fork(p);
trace_task_newtask(p, clone_flags);
@@ -2300,9 +2302,7 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_cancel_cgroup:
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
- cgroup_cancel_fork(p);
-bad_fork_cgroup_threadgroup_change_end:
- cgroup_threadgroup_change_end(current);
+ cgroup_cancel_fork(p, args);
bad_fork_put_pidfd:
if (clone_flags & CLONE_PIDFD) {
fput(pidfile);
@@ -2631,6 +2631,9 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
!valid_signal(args.exit_signal)))
return -EINVAL;
+ if ((args.flags & CLONE_INTO_CGROUP) && args.cgroup < 0)
+ return -EINVAL;
+
*kargs = (struct kernel_clone_args){
.flags = args.flags,
.pidfd = u64_to_user_ptr(args.pidfd),
@@ -2641,6 +2644,7 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
.stack_size = args.stack_size,
.tls = args.tls,
.set_tid_size = args.set_tid_size,
+ .cgroup = args.cgroup,
};
if (args.set_tid &&
@@ -2684,7 +2688,8 @@ static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
static bool clone3_args_valid(struct kernel_clone_args *kargs)
{
/* Verify that no unknown flags are passed along. */
- if (kargs->flags & ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND))
+ if (kargs->flags &
+ ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP))
return false;
/*
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index e5eb5ea7ea59..5e891c3c2d93 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -58,7 +58,7 @@ struct gcov_node {
struct dentry *dentry;
struct dentry **links;
int num_loaded;
- char name[0];
+ char name[];
};
static const char objtree[] = OBJTREE;
diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c
index 801ee4b0b969..acb83558e5df 100644
--- a/kernel/gcov/gcc_3_4.c
+++ b/kernel/gcov/gcc_3_4.c
@@ -38,7 +38,7 @@ static struct gcov_info *gcov_info_head;
struct gcov_fn_info {
unsigned int ident;
unsigned int checksum;
- unsigned int n_ctrs[0];
+ unsigned int n_ctrs[];
};
/**
@@ -78,7 +78,7 @@ struct gcov_info {
unsigned int n_functions;
const struct gcov_fn_info *functions;
unsigned int ctr_mask;
- struct gcov_ctr_info counts[0];
+ struct gcov_ctr_info counts[];
};
/**
@@ -352,7 +352,7 @@ struct gcov_iterator {
unsigned int count;
int num_types;
- struct type_info type_info[0];
+ struct type_info type_info[];
};
static struct gcov_fn_info *get_func(struct gcov_iterator *iter)
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index ec37563674d6..908fdf5098c3 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -68,7 +68,7 @@ struct gcov_fn_info {
unsigned int ident;
unsigned int lineno_checksum;
unsigned int cfg_checksum;
- struct gcov_ctr_info ctrs[0];
+ struct gcov_ctr_info ctrs[];
};
/**
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index a9b3f660dee7..16c8c605f4b0 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -175,7 +175,6 @@ unsigned long kallsyms_lookup_name(const char *name)
}
return module_kallsyms_lookup_name(name);
}
-EXPORT_SYMBOL_GPL(kallsyms_lookup_name);
int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
unsigned long),
@@ -194,7 +193,6 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
}
return module_kallsyms_on_each_symbol(fn, data);
}
-EXPORT_SYMBOL_GPL(kallsyms_on_each_symbol);
static unsigned long get_symbol_pos(unsigned long addr,
unsigned long *symbolsize,
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index a0e3d7a0e8b8..b3ff9288c6cc 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
/*
* One should have enough rights to inspect task details.
*/
- ret = kcmp_lock(&task1->signal->cred_guard_mutex,
- &task2->signal->cred_guard_mutex);
+ ret = kcmp_lock(&task1->signal->exec_update_mutex,
+ &task2->signal->exec_update_mutex);
if (ret)
goto err;
if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
@@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
}
err_unlock:
- kcmp_unlock(&task1->signal->cred_guard_mutex,
- &task2->signal->cred_guard_mutex);
+ kcmp_unlock(&task1->signal->exec_update_mutex,
+ &task2->signal->exec_update_mutex);
err:
put_task_struct(task1);
put_task_struct(task2);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bc6addd9152b..8b2b311afa95 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -35,7 +35,7 @@
* (u64) THREAD_SIZE * 8UL);
*
* If you need less than 50 threads would mean we're dealing with systems
- * smaller than 3200 pages. This assuems you are capable of having ~13M memory,
+ * smaller than 3200 pages. This assumes you are capable of having ~13M memory,
* and this would only be an be an upper limit, after which the OOM killer
* would take effect. Systems like these are very unlikely if modules are
* enabled.
diff --git a/kernel/module.c b/kernel/module.c
index 33569a01d6e1..3447f3b74870 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4355,6 +4355,7 @@ static int modules_open(struct inode *inode, struct file *file)
}
static const struct proc_ops modules_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = modules_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
diff --git a/kernel/padata.c b/kernel/padata.c
index 72777c10bb9c..a6afa12fb75e 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -512,7 +512,7 @@ static int padata_replace_one(struct padata_shell *ps)
static int padata_replace(struct padata_instance *pinst)
{
struct padata_shell *ps;
- int err;
+ int err = 0;
pinst->flags |= PADATA_RESET;
@@ -1038,12 +1038,13 @@ EXPORT_SYMBOL(padata_alloc_shell);
*/
void padata_free_shell(struct padata_shell *ps)
{
- struct padata_instance *pinst = ps->pinst;
+ if (!ps)
+ return;
- mutex_lock(&pinst->lock);
+ mutex_lock(&ps->pinst->lock);
list_del(&ps->list);
padata_free_pd(rcu_dereference_protected(ps->pd, 1));
- mutex_unlock(&pinst->lock);
+ mutex_unlock(&ps->pinst->lock);
kfree(ps);
}
diff --git a/kernel/pid.c b/kernel/pid.c
index 647b4bb457b5..bc21c0fb26d8 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -144,9 +144,6 @@ void free_pid(struct pid *pid)
/* Handle a fork failure of the first process */
WARN_ON(ns->child_reaper);
ns->pid_allocated = 0;
- /* fall through */
- case 0:
- schedule_work(&ns->proc_work);
break;
}
@@ -257,17 +254,13 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
*/
retval = -ENOMEM;
- if (unlikely(is_child_reaper(pid))) {
- if (pid_ns_prepare_proc(ns))
- goto out_free;
- }
-
get_pid_ns(ns);
refcount_set(&pid->count, 1);
for (type = 0; type < PIDTYPE_MAX; ++type)
INIT_HLIST_HEAD(&pid->tasks[type]);
init_waitqueue_head(&pid->wait_pidfd);
+ INIT_HLIST_HEAD(&pid->inodes);
upid = pid->numbers + ns->level;
spin_lock_irq(&pidmap_lock);
@@ -594,7 +587,7 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd)
struct file *file;
int ret;
- ret = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ ret = mutex_lock_killable(&task->signal->exec_update_mutex);
if (ret)
return ERR_PTR(ret);
@@ -603,7 +596,7 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd)
else
file = ERR_PTR(-EPERM);
- mutex_unlock(&task->signal->cred_guard_mutex);
+ mutex_unlock(&task->signal->exec_update_mutex);
return file ?: ERR_PTR(-EBADF);
}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index d40017e79ebe..01f8ba32cc0c 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -57,12 +57,6 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
return READ_ONCE(*pkc);
}
-static void proc_cleanup_work(struct work_struct *work)
-{
- struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work);
- pid_ns_release_proc(ns);
-}
-
static struct ucounts *inc_pid_namespaces(struct user_namespace *ns)
{
return inc_ucount(ns, current_euid(), UCOUNT_PID_NAMESPACES);
@@ -114,7 +108,6 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
ns->user_ns = get_user_ns(user_ns);
ns->ucounts = ucounts;
ns->pid_allocated = PIDNS_ADDING;
- INIT_WORK(&ns->proc_work, proc_cleanup_work);
return ns;
@@ -231,20 +224,27 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
} while (rc != -ECHILD);
/*
- * kernel_wait4() above can't reap the EXIT_DEAD children but we do not
- * really care, we could reparent them to the global init. We could
- * exit and reap ->child_reaper even if it is not the last thread in
- * this pid_ns, free_pid(pid_allocated == 0) calls proc_cleanup_work(),
- * pid_ns can not go away until proc_kill_sb() drops the reference.
+ * kernel_wait4() misses EXIT_DEAD children, and EXIT_ZOMBIE
+ * process whose parents processes are outside of the pid
+ * namespace. Such processes are created with setns()+fork().
+ *
+ * If those EXIT_ZOMBIE processes are not reaped by their
+ * parents before their parents exit, they will be reparented
+ * to pid_ns->child_reaper. Thus pidns->child_reaper needs to
+ * stay valid until they all go away.
+ *
+ * The code relies on the the pid_ns->child_reaper ignoring
+ * SIGCHILD to cause those EXIT_ZOMBIE processes to be
+ * autoreaped if reparented.
*
- * But this ns can also have other tasks injected by setns()+fork().
- * Again, ignoring the user visible semantics we do not really need
- * to wait until they are all reaped, but they can be reparented to
- * us and thus we need to ensure that pid->child_reaper stays valid
- * until they all go away. See free_pid()->wake_up_process().
+ * Semantically it is also desirable to wait for EXIT_ZOMBIE
+ * processes before allowing the child_reaper to be reaped, as
+ * that gives the invariant that when the init process of a
+ * pid namespace is reaped all of the processes in the pid
+ * namespace are gone.
*
- * We rely on ignored SIGCHLD, an injected zombie must be autoreaped
- * if reparented.
+ * Once all of the other tasks are gone from the pid_namespace
+ * free_pid() will awaken this task.
*/
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 7cbfbeacd68a..c208566c844b 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -80,9 +80,6 @@ config HIBERNATION
For more information take a look at <file:Documentation/power/swsusp.rst>.
-config ARCH_SAVE_PAGE_KEYS
- bool
-
config PM_STD_PARTITION
string "Default resume partition"
depends on HIBERNATION
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 6dbeedb7354c..86aba8706b16 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -678,7 +678,7 @@ static int load_image_and_restore(void)
error = swsusp_read(&flags);
swsusp_close(FMODE_READ);
if (!error)
- hibernation_restore(flags & SF_PLATFORM_MODE);
+ error = hibernation_restore(flags & SF_PLATFORM_MODE);
pr_err("Failed to load image, recovering.\n");
swsusp_free();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 69b7a8aeca3b..40f86ec4ab30 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -535,6 +535,13 @@ static ssize_t pm_debug_messages_store(struct kobject *kobj,
power_attr(pm_debug_messages);
+static int __init pm_debug_messages_setup(char *str)
+{
+ pm_debug_messages_on = true;
+ return 1;
+}
+__setup("pm_debug_messages", pm_debug_messages_setup);
+
/**
* __pm_pr_dbg - Print a suspend debug message to the kernel log.
* @defer: Whether or not to use printk_deferred() to print the message.
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index d82b7b88d616..659800157b17 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1744,9 +1744,6 @@ int hibernate_preallocate_memory(void)
count += highmem;
count -= totalreserve_pages;
- /* Add number of pages required for page keys (s390 only). */
- size += page_key_additional_pages(saveable);
-
/* Compute the maximum number of saveable pages to leave in memory. */
max_size = (count - (size + PAGES_FOR_IO)) / 2
- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
@@ -2075,8 +2072,6 @@ static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
buf[j] = memory_bm_next_pfn(bm);
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
- /* Save page key for data page (s390 only). */
- page_key_read(buf + j);
}
}
@@ -2226,9 +2221,6 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
- /* Extract and buffer page key for data page (s390 only). */
- page_key_memorize(buf + j);
-
if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
memory_bm_set_bit(bm, buf[j]);
else
@@ -2623,11 +2615,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (error)
return error;
- /* Allocate buffer for page keys. */
- error = page_key_alloc(nr_copy_pages);
- if (error)
- return error;
-
hibernate_restore_protection_begin();
} else if (handle->cur <= nr_meta_pages + 1) {
error = unpack_orig_pfns(buffer, &copy_bm);
@@ -2649,8 +2636,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
}
} else {
copy_last_highmem_page();
- /* Restore page key for data page (s390 only). */
- page_key_write(handle->buffer);
hibernate_restore_protect_page(handle->buffer);
handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer))
@@ -2673,9 +2658,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
void snapshot_write_finalize(struct snapshot_handle *handle)
{
copy_last_highmem_page();
- /* Restore page key for data page (s390 only). */
- page_key_write(handle->buffer);
- page_key_free();
hibernate_restore_protect_page(handle->buffer);
/* Do that only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 58ed9478787f..ef90eb1fb86e 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -27,8 +27,6 @@
#include "power.h"
-#define SNAPSHOT_MINOR 231
-
static struct snapshot_data {
struct snapshot_handle handle;
int swap;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d7fb20adabeb..1ea3dddafe69 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2799,7 +2799,7 @@ static void task_numa_work(struct callback_head *work)
* Skip inaccessible VMAs to avoid any confusion between
* PROT_NONE and NUMA hinting ptes
*/
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ if (!vma_is_accessible(vma))
continue;
do {
diff --git a/kernel/signal.c b/kernel/signal.c
index 5b2396350dd1..e58a6c619824 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1931,7 +1931,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
* This is only possible if parent == real_parent.
* Check if it has changed security domain.
*/
- if (tsk->parent_exec_id != tsk->parent->self_exec_id)
+ if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
sig = SIGCHLD;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 94638f695e60..8a176d8727a3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -212,6 +212,11 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
+#ifdef CONFIG_COMPACTION
+static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table,
+ int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+#endif
#endif
#ifdef CONFIG_PRINTK
@@ -1467,7 +1472,7 @@ static struct ctl_table vm_table[] = {
.data = &sysctl_compact_unevictable_allowed,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax_warn_RT_change,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
@@ -2555,6 +2560,28 @@ int proc_dointvec(struct ctl_table *table, int write,
return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL);
}
+#ifdef CONFIG_COMPACTION
+static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table,
+ int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret, old;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write)
+ return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ old = *(int *)table->data;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret)
+ return ret;
+ if (old != *(int *)table->data)
+ pr_warn_once("sysctl attribute %s changed by %s[%d]\n",
+ table->procname, current->comm,
+ task_pid_nr(current));
+ return ret;
+}
+#endif
+
/**
* proc_douintvec - read a vector of unsigned integers
* @table: the sysctl table
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d0a5ba37aff4..d89da1c7e005 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1480,6 +1480,7 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
unsigned long flags) __must_hold(&cpu_base->lock)
{
enum hrtimer_restart (*fn)(struct hrtimer *);
+ bool expires_in_hardirq;
int restart;
lockdep_assert_held(&cpu_base->lock);
@@ -1514,11 +1515,11 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
*/
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
trace_hrtimer_expire_entry(timer, now);
- lockdep_hrtimer_enter(timer);
+ expires_in_hardirq = lockdep_hrtimer_enter(timer);
restart = fn(timer);
- lockdep_hrtimer_exit(timer);
+ lockdep_hrtimer_exit(expires_in_hardirq);
trace_hrtimer_expire_exit(timer);
raw_spin_lock_irq(&cpu_base->lock);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fd81c7de77a7..041694a1eb74 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -102,7 +102,7 @@ static bool ftrace_pids_enabled(struct ftrace_ops *ops)
tr = ops->private;
- return tr->function_pids != NULL;
+ return tr->function_pids != NULL || tr->function_no_pids != NULL;
}
static void ftrace_update_trampoline(struct ftrace_ops *ops);
@@ -139,13 +139,23 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
#endif
}
+#define FTRACE_PID_IGNORE -1
+#define FTRACE_PID_TRACE -2
+
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs)
{
struct trace_array *tr = op->private;
+ int pid;
- if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
- return;
+ if (tr) {
+ pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
+ if (pid == FTRACE_PID_IGNORE)
+ return;
+ if (pid != FTRACE_PID_TRACE &&
+ pid != current->pid)
+ return;
+ }
op->saved_func(ip, parent_ip, op, regs);
}
@@ -6923,11 +6933,17 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
+ struct trace_pid_list *no_pid_list;
pid_list = rcu_dereference_sched(tr->function_pids);
+ no_pid_list = rcu_dereference_sched(tr->function_no_pids);
- this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
- trace_ignore_this_task(pid_list, next));
+ if (trace_ignore_this_task(pid_list, no_pid_list, next))
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+ FTRACE_PID_IGNORE);
+ else
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+ next->pid);
}
static void
@@ -6940,6 +6956,9 @@ ftrace_pid_follow_sched_process_fork(void *data,
pid_list = rcu_dereference_sched(tr->function_pids);
trace_filter_add_remove_task(pid_list, self, task);
+
+ pid_list = rcu_dereference_sched(tr->function_no_pids);
+ trace_filter_add_remove_task(pid_list, self, task);
}
static void
@@ -6950,6 +6969,9 @@ ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
pid_list = rcu_dereference_sched(tr->function_pids);
trace_filter_add_remove_task(pid_list, NULL, task);
+
+ pid_list = rcu_dereference_sched(tr->function_no_pids);
+ trace_filter_add_remove_task(pid_list, NULL, task);
}
void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
@@ -6967,42 +6989,57 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
}
}
-static void clear_ftrace_pids(struct trace_array *tr)
+static void clear_ftrace_pids(struct trace_array *tr, int type)
{
struct trace_pid_list *pid_list;
+ struct trace_pid_list *no_pid_list;
int cpu;
pid_list = rcu_dereference_protected(tr->function_pids,
lockdep_is_held(&ftrace_lock));
- if (!pid_list)
+ no_pid_list = rcu_dereference_protected(tr->function_no_pids,
+ lockdep_is_held(&ftrace_lock));
+
+ /* Make sure there's something to do */
+ if (!pid_type_enabled(type, pid_list, no_pid_list))
return;
- unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
+ /* See if the pids still need to be checked after this */
+ if (!still_need_pid_events(type, pid_list, no_pid_list)) {
+ unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
+ }
- for_each_possible_cpu(cpu)
- per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
+ if (type & TRACE_PIDS)
+ rcu_assign_pointer(tr->function_pids, NULL);
- rcu_assign_pointer(tr->function_pids, NULL);
+ if (type & TRACE_NO_PIDS)
+ rcu_assign_pointer(tr->function_no_pids, NULL);
/* Wait till all users are no longer using pid filtering */
synchronize_rcu();
- trace_free_pid_list(pid_list);
+ if ((type & TRACE_PIDS) && pid_list)
+ trace_free_pid_list(pid_list);
+
+ if ((type & TRACE_NO_PIDS) && no_pid_list)
+ trace_free_pid_list(no_pid_list);
}
void ftrace_clear_pids(struct trace_array *tr)
{
mutex_lock(&ftrace_lock);
- clear_ftrace_pids(tr);
+ clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
mutex_unlock(&ftrace_lock);
}
-static void ftrace_pid_reset(struct trace_array *tr)
+static void ftrace_pid_reset(struct trace_array *tr, int type)
{
mutex_lock(&ftrace_lock);
- clear_ftrace_pids(tr);
+ clear_ftrace_pids(tr, type);
ftrace_update_pid_func();
ftrace_startup_all(0);
@@ -7066,9 +7103,45 @@ static const struct seq_operations ftrace_pid_sops = {
.show = fpid_show,
};
-static int
-ftrace_pid_open(struct inode *inode, struct file *file)
+static void *fnpid_start(struct seq_file *m, loff_t *pos)
+ __acquires(RCU)
+{
+ struct trace_pid_list *pid_list;
+ struct trace_array *tr = m->private;
+
+ mutex_lock(&ftrace_lock);
+ rcu_read_lock_sched();
+
+ pid_list = rcu_dereference_sched(tr->function_no_pids);
+
+ if (!pid_list)
+ return !(*pos) ? FTRACE_NO_PIDS : NULL;
+
+ return trace_pid_start(pid_list, pos);
+}
+
+static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
{
+ struct trace_array *tr = m->private;
+ struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
+
+ if (v == FTRACE_NO_PIDS) {
+ (*pos)++;
+ return NULL;
+ }
+ return trace_pid_next(pid_list, v, pos);
+}
+
+static const struct seq_operations ftrace_no_pid_sops = {
+ .start = fnpid_start,
+ .next = fnpid_next,
+ .stop = fpid_stop,
+ .show = fpid_show,
+};
+
+static int pid_open(struct inode *inode, struct file *file, int type)
+{
+ const struct seq_operations *seq_ops;
struct trace_array *tr = inode->i_private;
struct seq_file *m;
int ret = 0;
@@ -7079,9 +7152,18 @@ ftrace_pid_open(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
- ftrace_pid_reset(tr);
+ ftrace_pid_reset(tr, type);
+
+ switch (type) {
+ case TRACE_PIDS:
+ seq_ops = &ftrace_pid_sops;
+ break;
+ case TRACE_NO_PIDS:
+ seq_ops = &ftrace_no_pid_sops;
+ break;
+ }
- ret = seq_open(file, &ftrace_pid_sops);
+ ret = seq_open(file, seq_ops);
if (ret < 0) {
trace_array_put(tr);
} else {
@@ -7093,10 +7175,23 @@ ftrace_pid_open(struct inode *inode, struct file *file)
return ret;
}
+static int
+ftrace_pid_open(struct inode *inode, struct file *file)
+{
+ return pid_open(inode, file, TRACE_PIDS);
+}
+
+static int
+ftrace_no_pid_open(struct inode *inode, struct file *file)
+{
+ return pid_open(inode, file, TRACE_NO_PIDS);
+}
+
static void ignore_task_cpu(void *data)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
+ struct trace_pid_list *no_pid_list;
/*
* This function is called by on_each_cpu() while the
@@ -7104,18 +7199,25 @@ static void ignore_task_cpu(void *data)
*/
pid_list = rcu_dereference_protected(tr->function_pids,
mutex_is_locked(&ftrace_lock));
+ no_pid_list = rcu_dereference_protected(tr->function_no_pids,
+ mutex_is_locked(&ftrace_lock));
- this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
- trace_ignore_this_task(pid_list, current));
+ if (trace_ignore_this_task(pid_list, no_pid_list, current))
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+ FTRACE_PID_IGNORE);
+ else
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+ current->pid);
}
static ssize_t
-ftrace_pid_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+pid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, int type)
{
struct seq_file *m = filp->private_data;
struct trace_array *tr = m->private;
- struct trace_pid_list *filtered_pids = NULL;
+ struct trace_pid_list *filtered_pids;
+ struct trace_pid_list *other_pids;
struct trace_pid_list *pid_list;
ssize_t ret;
@@ -7124,19 +7226,39 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
mutex_lock(&ftrace_lock);
- filtered_pids = rcu_dereference_protected(tr->function_pids,
+ switch (type) {
+ case TRACE_PIDS:
+ filtered_pids = rcu_dereference_protected(tr->function_pids,
+ lockdep_is_held(&ftrace_lock));
+ other_pids = rcu_dereference_protected(tr->function_no_pids,
+ lockdep_is_held(&ftrace_lock));
+ break;
+ case TRACE_NO_PIDS:
+ filtered_pids = rcu_dereference_protected(tr->function_no_pids,
+ lockdep_is_held(&ftrace_lock));
+ other_pids = rcu_dereference_protected(tr->function_pids,
lockdep_is_held(&ftrace_lock));
+ break;
+ }
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
if (ret < 0)
goto out;
- rcu_assign_pointer(tr->function_pids, pid_list);
+ switch (type) {
+ case TRACE_PIDS:
+ rcu_assign_pointer(tr->function_pids, pid_list);
+ break;
+ case TRACE_NO_PIDS:
+ rcu_assign_pointer(tr->function_no_pids, pid_list);
+ break;
+ }
+
if (filtered_pids) {
synchronize_rcu();
trace_free_pid_list(filtered_pids);
- } else if (pid_list) {
+ } else if (pid_list && !other_pids) {
/* Register a probe to set whether to ignore the tracing of a task */
register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
}
@@ -7159,6 +7281,20 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
return ret;
}
+static ssize_t
+ftrace_pid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
+}
+
+static ssize_t
+ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
+}
+
static int
ftrace_pid_release(struct inode *inode, struct file *file)
{
@@ -7177,10 +7313,20 @@ static const struct file_operations ftrace_pid_fops = {
.release = ftrace_pid_release,
};
+static const struct file_operations ftrace_no_pid_fops = {
+ .open = ftrace_no_pid_open,
+ .write = ftrace_no_pid_write,
+ .read = seq_read,
+ .llseek = tracing_lseek,
+ .release = ftrace_pid_release,
+};
+
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
{
trace_create_file("set_ftrace_pid", 0644, d_tracer,
tr, &ftrace_pid_fops);
+ trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer,
+ tr, &ftrace_no_pid_fops);
}
void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 61f0e92ace99..6f0b42ceeb00 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -441,6 +441,7 @@ enum {
struct ring_buffer_per_cpu {
int cpu;
atomic_t record_disabled;
+ atomic_t resize_disabled;
struct trace_buffer *buffer;
raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
@@ -484,7 +485,6 @@ struct trace_buffer {
unsigned flags;
int cpus;
atomic_t record_disabled;
- atomic_t resize_disabled;
cpumask_var_t cpumask;
struct lock_class_key *reader_lock_key;
@@ -503,10 +503,14 @@ struct trace_buffer {
struct ring_buffer_iter {
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long head;
+ unsigned long next_event;
struct buffer_page *head_page;
struct buffer_page *cache_reader_page;
unsigned long cache_read;
u64 read_stamp;
+ u64 page_stamp;
+ struct ring_buffer_event *event;
+ int missed_events;
};
/**
@@ -1737,18 +1741,24 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
size = nr_pages * BUF_PAGE_SIZE;
- /*
- * Don't succeed if resizing is disabled, as a reader might be
- * manipulating the ring buffer and is expecting a sane state while
- * this is true.
- */
- if (atomic_read(&buffer->resize_disabled))
- return -EBUSY;
-
/* prevent another thread from changing buffer sizes */
mutex_lock(&buffer->mutex);
+
if (cpu_id == RING_BUFFER_ALL_CPUS) {
+ /*
+ * Don't succeed if resizing is disabled, as a reader might be
+ * manipulating the ring buffer and is expecting a sane state while
+ * this is true.
+ */
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+ if (atomic_read(&cpu_buffer->resize_disabled)) {
+ err = -EBUSY;
+ goto out_err_unlock;
+ }
+ }
+
/* calculate the pages to update */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
@@ -1816,6 +1826,16 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
if (nr_pages == cpu_buffer->nr_pages)
goto out;
+ /*
+ * Don't succeed if resizing is disabled, as a reader might be
+ * manipulating the ring buffer and is expecting a sane state while
+ * this is true.
+ */
+ if (atomic_read(&cpu_buffer->resize_disabled)) {
+ err = -EBUSY;
+ goto out_err_unlock;
+ }
+
cpu_buffer->nr_pages_to_update = nr_pages -
cpu_buffer->nr_pages;
@@ -1885,6 +1905,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
free_buffer_page(bpage);
}
}
+ out_err_unlock:
mutex_unlock(&buffer->mutex);
return err;
}
@@ -1913,15 +1934,63 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->read);
}
-static __always_inline struct ring_buffer_event *
-rb_iter_head_event(struct ring_buffer_iter *iter)
+static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
{
- return __rb_page_index(iter->head_page, iter->head);
+ return local_read(&bpage->page->commit);
}
-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
+static struct ring_buffer_event *
+rb_iter_head_event(struct ring_buffer_iter *iter)
{
- return local_read(&bpage->page->commit);
+ struct ring_buffer_event *event;
+ struct buffer_page *iter_head_page = iter->head_page;
+ unsigned long commit;
+ unsigned length;
+
+ if (iter->head != iter->next_event)
+ return iter->event;
+
+ /*
+ * When the writer goes across pages, it issues a cmpxchg which
+ * is a mb(), which will synchronize with the rmb here.
+ * (see rb_tail_page_update() and __rb_reserve_next())
+ */
+ commit = rb_page_commit(iter_head_page);
+ smp_rmb();
+ event = __rb_page_index(iter_head_page, iter->head);
+ length = rb_event_length(event);
+
+ /*
+ * READ_ONCE() doesn't work on functions and we don't want the
+ * compiler doing any crazy optimizations with length.
+ */
+ barrier();
+
+ if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
+ /* Writer corrupted the read? */
+ goto reset;
+
+ memcpy(iter->event, event, length);
+ /*
+ * If the page stamp is still the same after this rmb() then the
+ * event was safely copied without the writer entering the page.
+ */
+ smp_rmb();
+
+ /* Make sure the page didn't change since we read this */
+ if (iter->page_stamp != iter_head_page->page->time_stamp ||
+ commit > rb_page_commit(iter_head_page))
+ goto reset;
+
+ iter->next_event = iter->head + length;
+ return iter->event;
+ reset:
+ /* Reset to the beginning */
+ iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
+ iter->head = 0;
+ iter->next_event = 0;
+ iter->missed_events = 1;
+ return NULL;
}
/* Size is determined by what has been committed */
@@ -1959,8 +2028,9 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
else
rb_inc_page(cpu_buffer, &iter->head_page);
- iter->read_stamp = iter->head_page->page->time_stamp;
+ iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
iter->head = 0;
+ iter->next_event = 0;
}
/*
@@ -3547,14 +3617,18 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
/* Iterator usage is expected to have record disabled */
iter->head_page = cpu_buffer->reader_page;
iter->head = cpu_buffer->reader_page->read;
+ iter->next_event = iter->head;
iter->cache_reader_page = iter->head_page;
iter->cache_read = cpu_buffer->read;
- if (iter->head)
+ if (iter->head) {
iter->read_stamp = cpu_buffer->read_stamp;
- else
+ iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
+ } else {
iter->read_stamp = iter->head_page->page->time_stamp;
+ iter->page_stamp = iter->read_stamp;
+ }
}
/**
@@ -3590,17 +3664,38 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
struct buffer_page *reader;
struct buffer_page *head_page;
struct buffer_page *commit_page;
+ struct buffer_page *curr_commit_page;
unsigned commit;
+ u64 curr_commit_ts;
+ u64 commit_ts;
cpu_buffer = iter->cpu_buffer;
-
- /* Remember, trace recording is off when iterator is in use */
reader = cpu_buffer->reader_page;
head_page = cpu_buffer->head_page;
commit_page = cpu_buffer->commit_page;
+ commit_ts = commit_page->page->time_stamp;
+
+ /*
+ * When the writer goes across pages, it issues a cmpxchg which
+ * is a mb(), which will synchronize with the rmb here.
+ * (see rb_tail_page_update())
+ */
+ smp_rmb();
commit = rb_page_commit(commit_page);
+ /* We want to make sure that the commit page doesn't change */
+ smp_rmb();
- return ((iter->head_page == commit_page && iter->head == commit) ||
+ /* Make sure commit page didn't change */
+ curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
+ curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
+
+ /* If the commit page changed, then there's more data */
+ if (curr_commit_page != commit_page ||
+ curr_commit_ts != commit_ts)
+ return 0;
+
+ /* Still racy, as it may return a false positive, but that's OK */
+ return ((iter->head_page == commit_page && iter->head >= commit) ||
(iter->head_page == reader && commit_page == head_page &&
head_page->read == commit &&
iter->head == rb_page_commit(cpu_buffer->reader_page)));
@@ -3828,15 +3923,22 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_advance_iter(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
- struct ring_buffer_event *event;
- unsigned length;
cpu_buffer = iter->cpu_buffer;
+ /* If head == next_event then we need to jump to the next event */
+ if (iter->head == iter->next_event) {
+ /* If the event gets overwritten again, there's nothing to do */
+ if (rb_iter_head_event(iter) == NULL)
+ return;
+ }
+
+ iter->head = iter->next_event;
+
/*
* Check if we are at the end of the buffer.
*/
- if (iter->head >= rb_page_size(iter->head_page)) {
+ if (iter->next_event >= rb_page_size(iter->head_page)) {
/* discarded commits can make the page empty */
if (iter->head_page == cpu_buffer->commit_page)
return;
@@ -3844,27 +3946,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
return;
}
- event = rb_iter_head_event(iter);
-
- length = rb_event_length(event);
-
- /*
- * This should not be called to advance the header if we are
- * at the tail of the buffer.
- */
- if (RB_WARN_ON(cpu_buffer,
- (iter->head_page == cpu_buffer->commit_page) &&
- (iter->head + length > rb_commit_index(cpu_buffer))))
- return;
-
- rb_update_iter_read_stamp(iter, event);
-
- iter->head += length;
-
- /* check for end of page padding */
- if ((iter->head >= rb_page_size(iter->head_page)) &&
- (iter->head_page != cpu_buffer->commit_page))
- rb_inc_iter(iter);
+ rb_update_iter_read_stamp(iter, iter->event);
}
static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
@@ -3952,6 +4034,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
int nr_loops = 0;
+ bool failed = false;
if (ts)
*ts = 0;
@@ -3978,10 +4061,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
* to a data event, we should never loop more than three times.
* Once for going to next page, once on time extend, and
* finally once to get the event.
- * (We never hit the following condition more than thrice).
+ * We should never hit the following condition more than thrice,
+ * unless the buffer is very small, and there's a writer
+ * that is causing the reader to fail getting an event.
*/
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
+ if (++nr_loops > 3) {
+ RB_WARN_ON(cpu_buffer, !failed);
return NULL;
+ }
if (rb_per_cpu_empty(cpu_buffer))
return NULL;
@@ -3992,6 +4079,10 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
}
event = rb_iter_head_event(iter);
+ if (!event) {
+ failed = true;
+ goto again;
+ }
switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
@@ -4102,6 +4193,20 @@ ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
return event;
}
+/** ring_buffer_iter_dropped - report if there are dropped events
+ * @iter: The ring buffer iterator
+ *
+ * Returns true if there was dropped events since the last peek.
+ */
+bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
+{
+ bool ret = iter->missed_events != 0;
+
+ iter->missed_events = 0;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
+
/**
* ring_buffer_iter_peek - peek at the next event to be read
* @iter: The ring buffer iterator
@@ -4208,16 +4313,21 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
- iter = kmalloc(sizeof(*iter), flags);
+ iter = kzalloc(sizeof(*iter), flags);
if (!iter)
return NULL;
+ iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
+ if (!iter->event) {
+ kfree(iter);
+ return NULL;
+ }
+
cpu_buffer = buffer->buffers[cpu];
iter->cpu_buffer = cpu_buffer;
- atomic_inc(&buffer->resize_disabled);
- atomic_inc(&cpu_buffer->record_disabled);
+ atomic_inc(&cpu_buffer->resize_disabled);
return iter;
}
@@ -4290,42 +4400,31 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
rb_check_pages(cpu_buffer);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- atomic_dec(&cpu_buffer->record_disabled);
- atomic_dec(&cpu_buffer->buffer->resize_disabled);
+ atomic_dec(&cpu_buffer->resize_disabled);
+ kfree(iter->event);
kfree(iter);
}
EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
/**
- * ring_buffer_read - read the next item in the ring buffer by the iterator
+ * ring_buffer_iter_advance - advance the iterator to the next location
* @iter: The ring buffer iterator
- * @ts: The time stamp of the event read.
*
- * This reads the next event in the ring buffer and increments the iterator.
+ * Move the location of the iterator such that the next read will
+ * be the next location of the iterator.
*/
-struct ring_buffer_event *
-ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
+void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
{
- struct ring_buffer_event *event;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- again:
- event = rb_iter_peek(iter, ts);
- if (!event)
- goto out;
-
- if (event->type_len == RINGBUF_TYPE_PADDING)
- goto again;
rb_advance_iter(iter);
- out:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- return event;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
-EXPORT_SYMBOL_GPL(ring_buffer_read);
+EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
/**
* ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -4406,7 +4505,7 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
- atomic_inc(&buffer->resize_disabled);
+ atomic_inc(&cpu_buffer->resize_disabled);
atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */
@@ -4427,7 +4526,7 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
- atomic_dec(&buffer->resize_disabled);
+ atomic_dec(&cpu_buffer->resize_disabled);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6b11e4e2150c..8d2b98812625 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -386,16 +386,22 @@ trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
* Returns false if @task should be traced.
*/
bool
-trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
+trace_ignore_this_task(struct trace_pid_list *filtered_pids,
+ struct trace_pid_list *filtered_no_pids,
+ struct task_struct *task)
{
/*
- * Return false, because if filtered_pids does not exist,
- * all pids are good to trace.
+ * If filterd_no_pids is not empty, and the task's pid is listed
+ * in filtered_no_pids, then return true.
+ * Otherwise, if filtered_pids is empty, that means we can
+ * trace all tasks. If it has content, then only trace pids
+ * within filtered_pids.
*/
- if (!filtered_pids)
- return false;
- return !trace_find_filtered_pid(filtered_pids, task->pid);
+ return (filtered_pids &&
+ !trace_find_filtered_pid(filtered_pids, task->pid)) ||
+ (filtered_no_pids &&
+ trace_find_filtered_pid(filtered_no_pids, task->pid));
}
/**
@@ -3378,7 +3384,7 @@ static void trace_iterator_increment(struct trace_iterator *iter)
iter->idx++;
if (buf_iter)
- ring_buffer_read(buf_iter, NULL);
+ ring_buffer_iter_advance(buf_iter);
}
static struct trace_entry *
@@ -3388,11 +3394,15 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
- if (buf_iter)
+ if (buf_iter) {
event = ring_buffer_iter_peek(buf_iter, ts);
- else
+ if (lost_events)
+ *lost_events = ring_buffer_iter_dropped(buf_iter) ?
+ (unsigned long)-1 : 0;
+ } else {
event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
lost_events);
+ }
if (event) {
iter->ent_size = ring_buffer_event_length(event);
@@ -3462,11 +3472,51 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
return next;
}
+#define STATIC_TEMP_BUF_SIZE 128
+static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
+
/* Find the next real entry, without updating the iterator itself */
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts)
{
- return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
+ /* __find_next_entry will reset ent_size */
+ int ent_size = iter->ent_size;
+ struct trace_entry *entry;
+
+ /*
+ * If called from ftrace_dump(), then the iter->temp buffer
+ * will be the static_temp_buf and not created from kmalloc.
+ * If the entry size is greater than the buffer, we can
+ * not save it. Just return NULL in that case. This is only
+ * used to add markers when two consecutive events' time
+ * stamps have a large delta. See trace_print_lat_context()
+ */
+ if (iter->temp == static_temp_buf &&
+ STATIC_TEMP_BUF_SIZE < ent_size)
+ return NULL;
+
+ /*
+ * The __find_next_entry() may call peek_next_entry(), which may
+ * call ring_buffer_peek() that may make the contents of iter->ent
+ * undefined. Need to copy iter->ent now.
+ */
+ if (iter->ent && iter->ent != iter->temp) {
+ if ((!iter->temp || iter->temp_size < iter->ent_size) &&
+ !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
+ kfree(iter->temp);
+ iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
+ if (!iter->temp)
+ return NULL;
+ }
+ memcpy(iter->temp, iter->ent, iter->ent_size);
+ iter->temp_size = iter->ent_size;
+ iter->ent = iter->temp;
+ }
+ entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
+ /* Put back the original ent_size */
+ iter->ent_size = ent_size;
+
+ return entry;
}
/* Find the next real entry, and increment the iterator to the next entry */
@@ -3538,7 +3588,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
if (ts >= iter->array_buffer->time_start)
break;
entries++;
- ring_buffer_read(buf_iter, NULL);
+ ring_buffer_iter_advance(buf_iter);
}
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
@@ -3981,8 +4031,12 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
enum print_line_t ret;
if (iter->lost_events) {
- trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
- iter->cpu, iter->lost_events);
+ if (iter->lost_events == (unsigned long)-1)
+ trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
+ iter->cpu);
+ else
+ trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
+ iter->cpu, iter->lost_events);
if (trace_seq_has_overflowed(&iter->seq))
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -4198,6 +4252,18 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
goto release;
/*
+ * trace_find_next_entry() may need to save off iter->ent.
+ * It will place it into the iter->temp buffer. As most
+ * events are less than 128, allocate a buffer of that size.
+ * If one is greater, then trace_find_next_entry() will
+ * allocate a new buffer to adjust for the bigger iter->ent.
+ * It's not critical if it fails to get allocated here.
+ */
+ iter->temp = kmalloc(128, GFP_KERNEL);
+ if (iter->temp)
+ iter->temp_size = 128;
+
+ /*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
@@ -4237,8 +4303,11 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
- /* stop the trace while dumping if we are not opening "snapshot" */
- if (!iter->snapshot)
+ /*
+ * If pause-on-trace is enabled, then stop the trace while
+ * dumping, unless this is the "snapshot" file
+ */
+ if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
tracing_stop_tr(tr);
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
@@ -4269,6 +4338,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
+ kfree(iter->temp);
kfree(iter->buffer_iter);
release:
seq_release_private(inode, file);
@@ -4334,7 +4404,7 @@ static int tracing_release(struct inode *inode, struct file *file)
if (iter->trace && iter->trace->close)
iter->trace->close(iter);
- if (!iter->snapshot)
+ if (!iter->snapshot && tr->stop_count)
/* reenable tracing if it was previously enabled */
tracing_start_tr(tr);
@@ -4344,6 +4414,7 @@ static int tracing_release(struct inode *inode, struct file *file)
mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started);
+ kfree(iter->temp);
kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file);
@@ -4964,6 +5035,8 @@ static const char readme_msg[] =
#ifdef CONFIG_FUNCTION_TRACER
" set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
"\t\t (function)\n"
+ " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
+ "\t\t (function)\n"
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
" set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
@@ -9146,6 +9219,9 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
/* Simulate the iterator */
trace_init_global_iter(&iter);
+ /* Can not use kmalloc for iter.temp */
+ iter.temp = static_temp_buf;
+ iter.temp_size = STATIC_TEMP_BUF_SIZE;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
@@ -9334,7 +9410,7 @@ __init static int tracer_alloc_buffers(void)
goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */
- if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
+ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
/* Must be called before global_trace.buffer is allocated */
trace_printk_init_buffers();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 99372dd7d168..4eb1d004d5f2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -178,10 +178,10 @@ struct trace_array_cpu {
kuid_t uid;
char comm[TASK_COMM_LEN];
- bool ignore_pid;
#ifdef CONFIG_FUNCTION_TRACER
- bool ftrace_ignore_pid;
+ int ftrace_ignore_pid;
#endif
+ bool ignore_pid;
};
struct tracer;
@@ -207,6 +207,30 @@ struct trace_pid_list {
unsigned long *pids;
};
+enum {
+ TRACE_PIDS = BIT(0),
+ TRACE_NO_PIDS = BIT(1),
+};
+
+static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
+ struct trace_pid_list *no_pid_list)
+{
+ /* Return true if the pid list in type has pids */
+ return ((type & TRACE_PIDS) && pid_list) ||
+ ((type & TRACE_NO_PIDS) && no_pid_list);
+}
+
+static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
+ struct trace_pid_list *no_pid_list)
+{
+ /*
+ * Turning off what is in @type, return true if the "other"
+ * pid list, still has pids in it.
+ */
+ return (!(type & TRACE_PIDS) && pid_list) ||
+ (!(type & TRACE_NO_PIDS) && no_pid_list);
+}
+
typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
/**
@@ -285,6 +309,7 @@ struct trace_array {
#endif
#endif
struct trace_pid_list __rcu *filtered_pids;
+ struct trace_pid_list __rcu *filtered_no_pids;
/*
* max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are
@@ -331,6 +356,7 @@ struct trace_array {
#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops *ops;
struct trace_pid_list __rcu *function_pids;
+ struct trace_pid_list __rcu *function_no_pids;
#ifdef CONFIG_DYNAMIC_FTRACE
/* All of these are protected by the ftrace_lock */
struct list_head func_probes;
@@ -557,12 +583,7 @@ struct tracer {
* caller, and we can skip the current check.
*/
enum {
- TRACE_BUFFER_BIT,
- TRACE_BUFFER_NMI_BIT,
- TRACE_BUFFER_IRQ_BIT,
- TRACE_BUFFER_SIRQ_BIT,
-
- /* Start of function recursion bits */
+ /* Function recursion bits */
TRACE_FTRACE_BIT,
TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT,
@@ -787,6 +808,7 @@ extern int pid_max;
bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
pid_t search_pid);
bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
+ struct trace_pid_list *filtered_no_pids,
struct task_struct *task);
void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
struct task_struct *self,
@@ -1307,6 +1329,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
C(IRQ_INFO, "irq-info"), \
C(MARKERS, "markers"), \
C(EVENT_FORK, "event-fork"), \
+ C(PAUSE_ON_TRACE, "pause-on-trace"), \
FUNCTION_FLAGS \
FGRAPH_FLAGS \
STACK_FLAGS \
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index f22746f3c132..a523da0dae0a 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -325,14 +325,16 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
__field_desc( long, timestamp, tv_nsec )
__field( unsigned int, nmi_count )
__field( unsigned int, seqnum )
+ __field( unsigned int, count )
),
- F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
+ F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tcount:%d\tnmi-ts:%llu\tnmi-count:%u\n",
__entry->seqnum,
__entry->tv_sec,
__entry->tv_nsec,
__entry->duration,
__entry->outer_duration,
+ __entry->count,
__entry->nmi_total_ts,
__entry->nmi_count)
);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f38234ecea18..242f59e7f17d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -232,10 +232,13 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
{
struct trace_array *tr = trace_file->tr;
struct trace_array_cpu *data;
+ struct trace_pid_list *no_pid_list;
struct trace_pid_list *pid_list;
pid_list = rcu_dereference_raw(tr->filtered_pids);
- if (!pid_list)
+ no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
+
+ if (!pid_list && !no_pid_list)
return false;
data = this_cpu_ptr(tr->array_buffer.data);
@@ -510,6 +513,9 @@ event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
pid_list = rcu_dereference_raw(tr->filtered_pids);
trace_filter_add_remove_task(pid_list, NULL, task);
+
+ pid_list = rcu_dereference_raw(tr->filtered_no_pids);
+ trace_filter_add_remove_task(pid_list, NULL, task);
}
static void
@@ -522,6 +528,9 @@ event_filter_pid_sched_process_fork(void *data,
pid_list = rcu_dereference_sched(tr->filtered_pids);
trace_filter_add_remove_task(pid_list, self, task);
+
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
+ trace_filter_add_remove_task(pid_list, self, task);
}
void trace_event_follow_fork(struct trace_array *tr, bool enable)
@@ -544,13 +553,23 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array *tr = data;
+ struct trace_pid_list *no_pid_list;
struct trace_pid_list *pid_list;
+ bool ret;
pid_list = rcu_dereference_sched(tr->filtered_pids);
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
- this_cpu_write(tr->array_buffer.data->ignore_pid,
- trace_ignore_this_task(pid_list, prev) &&
- trace_ignore_this_task(pid_list, next));
+ /*
+ * Sched switch is funny, as we only want to ignore it
+ * in the notrace case if both prev and next should be ignored.
+ */
+ ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
+ trace_ignore_this_task(NULL, no_pid_list, next);
+
+ this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
+ (trace_ignore_this_task(pid_list, NULL, prev) &&
+ trace_ignore_this_task(pid_list, NULL, next)));
}
static void
@@ -558,18 +577,21 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
struct trace_array *tr = data;
+ struct trace_pid_list *no_pid_list;
struct trace_pid_list *pid_list;
pid_list = rcu_dereference_sched(tr->filtered_pids);
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
this_cpu_write(tr->array_buffer.data->ignore_pid,
- trace_ignore_this_task(pid_list, next));
+ trace_ignore_this_task(pid_list, no_pid_list, next));
}
static void
event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
{
struct trace_array *tr = data;
+ struct trace_pid_list *no_pid_list;
struct trace_pid_list *pid_list;
/* Nothing to do if we are already tracing */
@@ -577,15 +599,17 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
this_cpu_write(tr->array_buffer.data->ignore_pid,
- trace_ignore_this_task(pid_list, task));
+ trace_ignore_this_task(pid_list, no_pid_list, task));
}
static void
event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
{
struct trace_array *tr = data;
+ struct trace_pid_list *no_pid_list;
struct trace_pid_list *pid_list;
/* Nothing to do if we are not tracing */
@@ -593,23 +617,15 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
return;
pid_list = rcu_dereference_sched(tr->filtered_pids);
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
/* Set tracing if current is enabled */
this_cpu_write(tr->array_buffer.data->ignore_pid,
- trace_ignore_this_task(pid_list, current));
+ trace_ignore_this_task(pid_list, no_pid_list, current));
}
-static void __ftrace_clear_event_pids(struct trace_array *tr)
+static void unregister_pid_events(struct trace_array *tr)
{
- struct trace_pid_list *pid_list;
- struct trace_event_file *file;
- int cpu;
-
- pid_list = rcu_dereference_protected(tr->filtered_pids,
- lockdep_is_held(&event_mutex));
- if (!pid_list)
- return;
-
unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
@@ -621,26 +637,55 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
+}
- list_for_each_entry(file, &tr->events, list) {
- clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
+{
+ struct trace_pid_list *pid_list;
+ struct trace_pid_list *no_pid_list;
+ struct trace_event_file *file;
+ int cpu;
+
+ pid_list = rcu_dereference_protected(tr->filtered_pids,
+ lockdep_is_held(&event_mutex));
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
+ lockdep_is_held(&event_mutex));
+
+ /* Make sure there's something to do */
+ if (!pid_type_enabled(type, pid_list, no_pid_list))
+ return;
+
+ if (!still_need_pid_events(type, pid_list, no_pid_list)) {
+ unregister_pid_events(tr);
+
+ list_for_each_entry(file, &tr->events, list) {
+ clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+ }
+
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
}
- for_each_possible_cpu(cpu)
- per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
+ if (type & TRACE_PIDS)
+ rcu_assign_pointer(tr->filtered_pids, NULL);
- rcu_assign_pointer(tr->filtered_pids, NULL);
+ if (type & TRACE_NO_PIDS)
+ rcu_assign_pointer(tr->filtered_no_pids, NULL);
/* Wait till all users are no longer using pid filtering */
tracepoint_synchronize_unregister();
- trace_free_pid_list(pid_list);
+ if ((type & TRACE_PIDS) && pid_list)
+ trace_free_pid_list(pid_list);
+
+ if ((type & TRACE_NO_PIDS) && no_pid_list)
+ trace_free_pid_list(no_pid_list);
}
-static void ftrace_clear_event_pids(struct trace_array *tr)
+static void ftrace_clear_event_pids(struct trace_array *tr, int type)
{
mutex_lock(&event_mutex);
- __ftrace_clear_event_pids(tr);
+ __ftrace_clear_event_pids(tr, type);
mutex_unlock(&event_mutex);
}
@@ -1013,15 +1058,32 @@ static void t_stop(struct seq_file *m, void *p)
}
static void *
-p_next(struct seq_file *m, void *v, loff_t *pos)
+__next(struct seq_file *m, void *v, loff_t *pos, int type)
{
struct trace_array *tr = m->private;
- struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
+ struct trace_pid_list *pid_list;
+
+ if (type == TRACE_PIDS)
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
+ else
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
return trace_pid_next(pid_list, v, pos);
}
-static void *p_start(struct seq_file *m, loff_t *pos)
+static void *
+p_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return __next(m, v, pos, TRACE_PIDS);
+}
+
+static void *
+np_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return __next(m, v, pos, TRACE_NO_PIDS);
+}
+
+static void *__start(struct seq_file *m, loff_t *pos, int type)
__acquires(RCU)
{
struct trace_pid_list *pid_list;
@@ -1036,7 +1098,10 @@ static void *p_start(struct seq_file *m, loff_t *pos)
mutex_lock(&event_mutex);
rcu_read_lock_sched();
- pid_list = rcu_dereference_sched(tr->filtered_pids);
+ if (type == TRACE_PIDS)
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
+ else
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
if (!pid_list)
return NULL;
@@ -1044,6 +1109,18 @@ static void *p_start(struct seq_file *m, loff_t *pos)
return trace_pid_start(pid_list, pos);
}
+static void *p_start(struct seq_file *m, loff_t *pos)
+ __acquires(RCU)
+{
+ return __start(m, pos, TRACE_PIDS);
+}
+
+static void *np_start(struct seq_file *m, loff_t *pos)
+ __acquires(RCU)
+{
+ return __start(m, pos, TRACE_NO_PIDS);
+}
+
static void p_stop(struct seq_file *m, void *p)
__releases(RCU)
{
@@ -1588,6 +1665,7 @@ static void ignore_task_cpu(void *data)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
+ struct trace_pid_list *no_pid_list;
/*
* This function is called by on_each_cpu() while the
@@ -1595,18 +1673,50 @@ static void ignore_task_cpu(void *data)
*/
pid_list = rcu_dereference_protected(tr->filtered_pids,
mutex_is_locked(&event_mutex));
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
+ mutex_is_locked(&event_mutex));
this_cpu_write(tr->array_buffer.data->ignore_pid,
- trace_ignore_this_task(pid_list, current));
+ trace_ignore_this_task(pid_list, no_pid_list, current));
+}
+
+static void register_pid_events(struct trace_array *tr)
+{
+ /*
+ * Register a probe that is called before all other probes
+ * to set ignore_pid if next or prev do not match.
+ * Register a probe this is called after all other probes
+ * to only keep ignore_pid set if next pid matches.
+ */
+ register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
+ tr, INT_MAX);
+ register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
+ tr, 0);
+
+ register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
+ tr, INT_MAX);
+ register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
+ tr, 0);
+
+ register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
+ tr, INT_MAX);
+ register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
+ tr, 0);
+
+ register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
+ tr, INT_MAX);
+ register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
+ tr, 0);
}
static ssize_t
-ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+event_pid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, int type)
{
struct seq_file *m = filp->private_data;
struct trace_array *tr = m->private;
struct trace_pid_list *filtered_pids = NULL;
+ struct trace_pid_list *other_pids = NULL;
struct trace_pid_list *pid_list;
struct trace_event_file *file;
ssize_t ret;
@@ -1620,14 +1730,26 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
mutex_lock(&event_mutex);
- filtered_pids = rcu_dereference_protected(tr->filtered_pids,
- lockdep_is_held(&event_mutex));
+ if (type == TRACE_PIDS) {
+ filtered_pids = rcu_dereference_protected(tr->filtered_pids,
+ lockdep_is_held(&event_mutex));
+ other_pids = rcu_dereference_protected(tr->filtered_no_pids,
+ lockdep_is_held(&event_mutex));
+ } else {
+ filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
+ lockdep_is_held(&event_mutex));
+ other_pids = rcu_dereference_protected(tr->filtered_pids,
+ lockdep_is_held(&event_mutex));
+ }
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
if (ret < 0)
goto out;
- rcu_assign_pointer(tr->filtered_pids, pid_list);
+ if (type == TRACE_PIDS)
+ rcu_assign_pointer(tr->filtered_pids, pid_list);
+ else
+ rcu_assign_pointer(tr->filtered_no_pids, pid_list);
list_for_each_entry(file, &tr->events, list) {
set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
@@ -1636,32 +1758,8 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
if (filtered_pids) {
tracepoint_synchronize_unregister();
trace_free_pid_list(filtered_pids);
- } else if (pid_list) {
- /*
- * Register a probe that is called before all other probes
- * to set ignore_pid if next or prev do not match.
- * Register a probe this is called after all other probes
- * to only keep ignore_pid set if next pid matches.
- */
- register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
- tr, INT_MAX);
- register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
- tr, 0);
-
- register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
- tr, INT_MAX);
- register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
- tr, 0);
-
- register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
- tr, INT_MAX);
- register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
- tr, 0);
-
- register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
- tr, INT_MAX);
- register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
- tr, 0);
+ } else if (pid_list && !other_pids) {
+ register_pid_events(tr);
}
/*
@@ -1680,9 +1778,24 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
return ret;
}
+static ssize_t
+ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
+}
+
+static ssize_t
+ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
+}
+
static int ftrace_event_avail_open(struct inode *inode, struct file *file);
static int ftrace_event_set_open(struct inode *inode, struct file *file);
static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
+static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
static int ftrace_event_release(struct inode *inode, struct file *file);
static const struct seq_operations show_event_seq_ops = {
@@ -1706,6 +1819,13 @@ static const struct seq_operations show_set_pid_seq_ops = {
.stop = p_stop,
};
+static const struct seq_operations show_set_no_pid_seq_ops = {
+ .start = np_start,
+ .next = np_next,
+ .show = trace_pid_show,
+ .stop = p_stop,
+};
+
static const struct file_operations ftrace_avail_fops = {
.open = ftrace_event_avail_open,
.read = seq_read,
@@ -1729,6 +1849,14 @@ static const struct file_operations ftrace_set_event_pid_fops = {
.release = ftrace_event_release,
};
+static const struct file_operations ftrace_set_event_notrace_pid_fops = {
+ .open = ftrace_event_set_npid_open,
+ .read = seq_read,
+ .write = ftrace_event_npid_write,
+ .llseek = seq_lseek,
+ .release = ftrace_event_release,
+};
+
static const struct file_operations ftrace_enable_fops = {
.open = tracing_open_generic,
.read = event_enable_read,
@@ -1858,7 +1986,28 @@ ftrace_event_set_pid_open(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
- ftrace_clear_event_pids(tr);
+ ftrace_clear_event_pids(tr, TRACE_PIDS);
+
+ ret = ftrace_event_open(inode, file, seq_ops);
+ if (ret < 0)
+ trace_array_put(tr);
+ return ret;
+}
+
+static int
+ftrace_event_set_npid_open(struct inode *inode, struct file *file)
+{
+ const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
+ struct trace_array *tr = inode->i_private;
+ int ret;
+
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
+
+ if ((file->f_mode & FMODE_WRITE) &&
+ (file->f_flags & O_TRUNC))
+ ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
ret = ftrace_event_open(inode, file, seq_ops);
if (ret < 0)
@@ -3075,6 +3224,11 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
if (!entry)
pr_warn("Could not create tracefs 'set_event_pid' entry\n");
+ entry = tracefs_create_file("set_event_notrace_pid", 0644, parent,
+ tr, &ftrace_set_event_notrace_pid_fops);
+ if (!entry)
+ pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");
+
/* ring buffer internal formats */
entry = trace_create_file("header_page", 0444, d_events,
ring_buffer_print_page_header,
@@ -3158,7 +3312,7 @@ int event_trace_del_tracer(struct trace_array *tr)
clear_event_triggers(tr);
/* Clear the pid list */
- __ftrace_clear_event_pids(tr);
+ __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
/* Disable any running events */
__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 7d71546ba00a..4a9c49c08ec9 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -482,7 +482,7 @@ get_return_for_leaf(struct trace_iterator *iter,
/* this is a leaf, now advance the iterator */
if (ring_iter)
- ring_buffer_read(ring_iter, NULL);
+ ring_buffer_iter_advance(ring_iter);
return next;
}
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index a48808c43249..e2be7bb7ef7e 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -83,6 +83,7 @@ struct hwlat_sample {
u64 nmi_total_ts; /* Total time spent in NMIs */
struct timespec64 timestamp; /* wall time */
int nmi_count; /* # NMIs during this sample */
+ int count; /* # of iteratons over threash */
};
/* keep the global state somewhere. */
@@ -124,6 +125,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
entry->timestamp = sample->timestamp;
entry->nmi_total_ts = sample->nmi_total_ts;
entry->nmi_count = sample->nmi_count;
+ entry->count = sample->count;
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit_nostack(buffer, event);
@@ -167,12 +169,14 @@ void trace_hwlat_callback(bool enter)
static int get_sample(void)
{
struct trace_array *tr = hwlat_trace;
+ struct hwlat_sample s;
time_type start, t1, t2, last_t2;
- s64 diff, total, last_total = 0;
+ s64 diff, outer_diff, total, last_total = 0;
u64 sample = 0;
u64 thresh = tracing_thresh;
u64 outer_sample = 0;
int ret = -1;
+ unsigned int count = 0;
do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
@@ -186,6 +190,7 @@ static int get_sample(void)
init_time(last_t2, 0);
start = time_get(); /* start timestamp */
+ outer_diff = 0;
do {
@@ -194,14 +199,14 @@ static int get_sample(void)
if (time_u64(last_t2)) {
/* Check the delta from outer loop (t2 to next t1) */
- diff = time_to_us(time_sub(t1, last_t2));
+ outer_diff = time_to_us(time_sub(t1, last_t2));
/* This shouldn't happen */
- if (diff < 0) {
+ if (outer_diff < 0) {
pr_err(BANNER "time running backwards\n");
goto out;
}
- if (diff > outer_sample)
- outer_sample = diff;
+ if (outer_diff > outer_sample)
+ outer_sample = outer_diff;
}
last_t2 = t2;
@@ -217,6 +222,12 @@ static int get_sample(void)
/* This checks the inner loop (t1 to t2) */
diff = time_to_us(time_sub(t2, t1)); /* current diff */
+ if (diff > thresh || outer_diff > thresh) {
+ if (!count)
+ ktime_get_real_ts64(&s.timestamp);
+ count++;
+ }
+
/* This shouldn't happen */
if (diff < 0) {
pr_err(BANNER "time running backwards\n");
@@ -236,7 +247,6 @@ static int get_sample(void)
/* If we exceed the threshold value, we have found a hardware latency */
if (sample > thresh || outer_sample > thresh) {
- struct hwlat_sample s;
u64 latency;
ret = 1;
@@ -249,9 +259,9 @@ static int get_sample(void)
s.seqnum = hwlat_data.count;
s.duration = sample;
s.outer_duration = outer_sample;
- ktime_get_real_ts64(&s.timestamp);
s.nmi_total_ts = nmi_total_ts;
s.nmi_count = nmi_count;
+ s.count = count;
trace_hwlat_sample(&s);
latency = max(sample, outer_sample);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 362cca52f5de..d0568af4a0ef 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1078,6 +1078,8 @@ static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
int i;
seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
+ if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
+ seq_printf(m, "%d", tk->rp.maxactive);
seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
trace_probe_name(&tk->tp));
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index b4909082f6a4..9a121e147102 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -617,22 +617,19 @@ int trace_print_context(struct trace_iterator *iter)
int trace_print_lat_context(struct trace_iterator *iter)
{
+ struct trace_entry *entry, *next_entry;
struct trace_array *tr = iter->tr;
- /* trace_find_next_entry will reset ent_size */
- int ent_size = iter->ent_size;
struct trace_seq *s = &iter->seq;
- u64 next_ts;
- struct trace_entry *entry = iter->ent,
- *next_entry = trace_find_next_entry(iter, NULL,
- &next_ts);
unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
+ u64 next_ts;
- /* Restore the original ent_size */
- iter->ent_size = ent_size;
-
+ next_entry = trace_find_next_entry(iter, NULL, &next_ts);
if (!next_entry)
next_ts = iter->ts;
+ /* trace_find_next_entry() may change iter->ent */
+ entry = iter->ent;
+
if (verbose) {
char comm[TASK_COMM_LEN];
@@ -1158,12 +1155,12 @@ trace_hwlat_print(struct trace_iterator *iter, int flags,
trace_assign_type(field, entry);
- trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld",
+ trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d",
field->seqnum,
field->duration,
field->outer_duration,
(long long)field->timestamp.tv_sec,
- field->timestamp.tv_nsec);
+ field->timestamp.tv_nsec, field->count);
if (field->nmi_count) {
/*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4e01c448b4b4..3816a18c251e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2834,7 +2834,7 @@ void flush_workqueue(struct workqueue_struct *wq)
* First flushers are responsible for cascading flushes and
* handling overflow. Non-first flushers can simply return.
*/
- if (wq->first_flusher != &this_flusher)
+ if (READ_ONCE(wq->first_flusher) != &this_flusher)
return;
mutex_lock(&wq->mutex);
@@ -2843,7 +2843,7 @@ void flush_workqueue(struct workqueue_struct *wq)
if (wq->first_flusher != &this_flusher)
goto out_unlock;
- wq->first_flusher = NULL;
+ WRITE_ONCE(wq->first_flusher, NULL);
WARN_ON_ONCE(!list_empty(&this_flusher.list));
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
@@ -5898,7 +5898,7 @@ static void __init wq_numa_init(void)
* items. Actual work item execution starts only after kthreads can be
* created and scheduled right before early initcalls.
*/
-int __init workqueue_init_early(void)
+void __init workqueue_init_early(void)
{
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
@@ -5965,8 +5965,6 @@ int __init workqueue_init_early(void)
!system_unbound_wq || !system_freezable_wq ||
!system_power_efficient_wq ||
!system_freezable_power_efficient_wq);
-
- return 0;
}
/**
@@ -5978,7 +5976,7 @@ int __init workqueue_init_early(void)
* are no kworkers executing the work items yet. Populate the worker pools
* with the initial workers and enable future kworker creations.
*/
-int __init workqueue_init(void)
+void __init workqueue_init(void)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
@@ -6025,6 +6023,4 @@ int __init workqueue_init(void)
wq_online = true;
wq_watchdog_init();
-
- return 0;
}
diff --git a/lib/.gitignore b/lib/.gitignore
index f2a39c9e5485..327cb2c7f2c9 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
gen_crc32table
gen_crc64table
crc32table.h
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4cb4671b1d9e..21d9c5f6e7ec 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -98,7 +98,7 @@ config DYNAMIC_DEBUG
bool "Enable dynamic printk() support"
default n
depends on PRINTK
- depends on DEBUG_FS
+ depends on (DEBUG_FS || PROC_FS)
help
Compiles debug level messages into the kernel, which would not
@@ -116,8 +116,9 @@ config DYNAMIC_DEBUG
Usage:
Dynamic debugging is controlled via the 'dynamic_debug/control' file,
- which is contained in the 'debugfs' filesystem. Thus, the debugfs
- filesystem must first be mounted before making use of this feature.
+ which is contained in the 'debugfs' filesystem or procfs.
+ Thus, the debugfs or procfs filesystem must first be mounted before
+ making use of this feature.
We refer the control file as: <debugfs>/dynamic_debug/control. This
file contains a list of the debug statements that can be enabled. The
format for each line of the file is:
@@ -306,18 +307,6 @@ config HEADERS_INSTALL
user-space program samples. It is also needed by some features such
as uapi header sanity checks.
-config OPTIMIZE_INLINING
- def_bool y
- help
- This option determines if the kernel forces gcc to inline the functions
- developers have marked 'inline'. Doing so takes away freedom from gcc to
- do what it thinks is best, which is desirable for the gcc 3.x series of
- compilers. The gcc 4.x series have a rewritten inlining algorithm and
- enabling this option will generate a smaller kernel there. Hopefully
- this algorithm is so good that allowing gcc 4.x and above to make the
- decision will become the default in the future. Until then this option
- is there to test gcc for this.
-
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
help
@@ -989,6 +978,18 @@ config WQ_WATCHDOG
state. This can be configured through kernel parameter
"workqueue.watchdog_thresh" and its sysfs counterpart.
+config TEST_LOCKUP
+ tristate "Test module to generate lockups"
+ help
+ This builds the "test_lockup" module that helps to make sure
+ that watchdogs and lockup detectors are working properly.
+
+ Depending on module parameters it could emulate soft or hard
+ lockup, "hung task", or locking arbitrary lock for a long time.
+ Also it could generate series of lockups with cooling-down periods.
+
+ If unsure, say N.
+
endmenu # "Debug lockups and hangs"
menu "Scheduler Debugging"
@@ -1656,7 +1657,7 @@ config FAILSLAB
Provide fault-injection capability for kmalloc.
config FAIL_PAGE_ALLOC
- bool "Fault-injection capabilitiy for alloc_pages()"
+ bool "Fault-injection capability for alloc_pages()"
depends on FAULT_INJECTION
help
Provide fault-injection capability for alloc_pages().
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 0e04fcb3ab3d..48469c95d78e 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -2,18 +2,50 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
-config UBSAN
+menuconfig UBSAN
bool "Undefined behaviour sanity checker"
help
- This option enables undefined behaviour sanity checker
+ This option enables the Undefined Behaviour sanity checker.
Compile-time instrumentation is used to detect various undefined
- behaviours in runtime. Various types of checks may be enabled
- via boot parameter ubsan_handle
- (see: Documentation/dev-tools/ubsan.rst).
+ behaviours at runtime. For more details, see:
+ Documentation/dev-tools/ubsan.rst
+
+if UBSAN
+
+config UBSAN_TRAP
+ bool "On Sanitizer warnings, abort the running kernel code"
+ depends on $(cc-option, -fsanitize-undefined-trap-on-error)
+ help
+ Building kernels with Sanitizer features enabled tends to grow
+ the kernel size by around 5%, due to adding all the debugging
+ text on failure paths. To avoid this, Sanitizer instrumentation
+ can just issue a trap. This reduces the kernel size overhead but
+ turns all warnings (including potentially harmless conditions)
+ into full exceptions that abort the running kernel code
+ (regardless of context, locks held, etc), which may destabilize
+ the system. For some system builders this is an acceptable
+ trade-off.
+
+config UBSAN_BOUNDS
+ bool "Perform array index bounds checking"
+ default UBSAN
+ help
+ This option enables detection of directly indexed out of bounds
+ array accesses, where the array size is known at compile time.
+ Note that this does not protect array overflows via bad calls
+ to the {str,mem}*cpy() family of functions (that is addressed
+ by CONFIG_FORTIFY_SOURCE).
+
+config UBSAN_MISC
+ bool "Enable all other Undefined Behavior sanity checks"
+ default UBSAN
+ help
+ This option enables all sanity checks that don't have their
+ own Kconfig options. Disable this if you only want to have
+ individually selected checks.
config UBSAN_SANITIZE_ALL
bool "Enable instrumentation for the entire kernel"
- depends on UBSAN
depends on ARCH_HAS_UBSAN_SANITIZE_ALL
# We build with -Wno-maybe-uninitilzed, but we still want to
@@ -30,7 +62,6 @@ config UBSAN_SANITIZE_ALL
config UBSAN_NO_ALIGNMENT
bool "Disable checking of pointers alignment"
- depends on UBSAN
default y if HAVE_EFFICIENT_UNALIGNED_ACCESS
help
This option disables the check of unaligned memory accesses.
@@ -43,7 +74,9 @@ config UBSAN_ALIGNMENT
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
- depends on m && UBSAN
+ depends on m
help
This is a test module for UBSAN.
It triggers various undefined behavior, and detect it.
+
+endif # if UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index 09a8acb0cf92..685aee60de1d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -87,9 +87,11 @@ obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
+CFLAGS_test_stackinit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
+obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
@@ -221,6 +223,10 @@ obj-$(CONFIG_MEMREGION) += memregion.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
+# stackdepot.c should not be instrumented or call instrumented functions.
+# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
+# file.
+CFLAGS_stackdepot.o += -fno-builtin
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
KCOV_INSTRUMENT_stackdepot.o := n
@@ -280,7 +286,9 @@ quiet_cmd_build_OID_registry = GEN $@
clean-files += oid_registry_data.c
obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+ifneq ($(CONFIG_UBSAN_TRAP),y)
obj-$(CONFIG_UBSAN) += ubsan.o
+endif
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
diff --git a/lib/bch.c b/lib/bch.c
index 5db6d3a4c8a6..052d3fb753a0 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -102,7 +102,7 @@
*/
struct gf_poly {
unsigned int deg; /* polynomial degree */
- unsigned int c[0]; /* polynomial terms */
+ unsigned int c[]; /* polynomial terms */
};
/* given its degree, compute a polynomial size in bytes */
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index ec3ce7fd299f..912ef4921398 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -29,12 +29,14 @@ static int xbc_node_num __initdata;
static char *xbc_data __initdata;
static size_t xbc_data_size __initdata;
static struct xbc_node *last_parent __initdata;
+static const char *xbc_err_msg __initdata;
+static int xbc_err_pos __initdata;
static int __init xbc_parse_error(const char *msg, const char *p)
{
- int pos = p - xbc_data;
+ xbc_err_msg = msg;
+ xbc_err_pos = (int)(p - xbc_data);
- pr_err("Parse error at pos %d: %s\n", pos, msg);
return -EINVAL;
}
@@ -738,33 +740,44 @@ void __init xbc_destroy_all(void)
/**
* xbc_init() - Parse given XBC file and build XBC internal tree
* @buf: boot config text
+ * @emsg: A pointer of const char * to store the error message
+ * @epos: A pointer of int to store the error position
*
* This parses the boot config text in @buf. @buf must be a
* null terminated string and smaller than XBC_DATA_MAX.
* Return the number of stored nodes (>0) if succeeded, or -errno
* if there is any error.
+ * In error cases, @emsg will be updated with an error message and
+ * @epos will be updated with the error position which is the byte offset
+ * of @buf. If the error is not a parser error, @epos will be -1.
*/
-int __init xbc_init(char *buf)
+int __init xbc_init(char *buf, const char **emsg, int *epos)
{
char *p, *q;
int ret, c;
+ if (epos)
+ *epos = -1;
+
if (xbc_data) {
- pr_err("Error: bootconfig is already initialized.\n");
+ if (emsg)
+ *emsg = "Bootconfig is already initialized";
return -EBUSY;
}
ret = strlen(buf);
if (ret > XBC_DATA_MAX - 1 || ret == 0) {
- pr_err("Error: Config data is %s.\n",
- ret ? "too big" : "empty");
+ if (emsg)
+ *emsg = ret ? "Config data is too big" :
+ "Config data is empty";
return -ERANGE;
}
xbc_nodes = memblock_alloc(sizeof(struct xbc_node) * XBC_NODE_MAX,
SMP_CACHE_BYTES);
if (!xbc_nodes) {
- pr_err("Failed to allocate memory for bootconfig nodes.\n");
+ if (emsg)
+ *emsg = "Failed to allocate bootconfig nodes";
return -ENOMEM;
}
memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
@@ -814,9 +827,13 @@ int __init xbc_init(char *buf)
if (!ret)
ret = xbc_verify_tree();
- if (ret < 0)
+ if (ret < 0) {
+ if (epos)
+ *epos = xbc_err_pos;
+ if (emsg)
+ *emsg = xbc_err_msg;
xbc_destroy_all();
- else
+ } else
ret = xbc_node_num;
return ret;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c60409138e13..8f199f403ab5 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -876,6 +876,14 @@ static const struct file_operations ddebug_proc_fops = {
.write = ddebug_proc_write
};
+static const struct proc_ops proc_fops = {
+ .proc_open = ddebug_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
+ .proc_write = ddebug_proc_write
+};
+
/*
* Allocate a new ddebug_table for the given module
* and add it to the global list.
@@ -991,15 +999,25 @@ static void ddebug_remove_all_tables(void)
static __initdata int ddebug_init_success;
-static int __init dynamic_debug_init_debugfs(void)
+static int __init dynamic_debug_init_control(void)
{
- struct dentry *dir;
+ struct proc_dir_entry *procfs_dir;
+ struct dentry *debugfs_dir;
if (!ddebug_init_success)
return -ENODEV;
- dir = debugfs_create_dir("dynamic_debug", NULL);
- debugfs_create_file("control", 0644, dir, NULL, &ddebug_proc_fops);
+ /* Create the control file in debugfs if it is enabled */
+ if (debugfs_initialized()) {
+ debugfs_dir = debugfs_create_dir("dynamic_debug", NULL);
+ debugfs_create_file("control", 0644, debugfs_dir, NULL,
+ &ddebug_proc_fops);
+ }
+
+ /* Also create the control file in procfs */
+ procfs_dir = proc_mkdir("dynamic_debug", NULL);
+ if (procfs_dir)
+ proc_create("control", 0644, procfs_dir, &proc_fops);
return 0;
}
@@ -1013,7 +1031,7 @@ static int __init dynamic_debug_init(void)
int n = 0, entries = 0, modct = 0;
int verbose_bytes = 0;
- if (__start___verbose == __stop___verbose) {
+ if (&__start___verbose == &__stop___verbose) {
pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
return 1;
}
@@ -1077,4 +1095,4 @@ out_err:
early_initcall(dynamic_debug_init);
/* Debugfs setup must be done later */
-fs_initcall(dynamic_debug_init_debugfs);
+fs_initcall(dynamic_debug_init_control);
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 065aa16f448b..95d12e3d6d95 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -14,6 +14,14 @@ menuconfig KUNIT
if KUNIT
+config KUNIT_DEBUGFS
+ bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation"
+ help
+ Enable debugfs representation for kunit. Currently this consists
+ of /sys/kernel/debug/kunit/<test_suite>/results files for each
+ test suite, which allow users to see results of the last test suite
+ run that occurred.
+
config KUNIT_TEST
tristate "KUnit test for KUnit"
help
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index fab55649b69a..724b94311ca3 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -5,6 +5,10 @@ kunit-objs += test.o \
assert.o \
try-catch.o
+ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
+kunit-objs += debugfs.o
+endif
+
obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
# string-stream-test compiles built-in only.
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index b24bebca052d..33acdaa28a7d 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -6,6 +6,7 @@
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
#include <kunit/assert.h>
+#include <kunit/test.h>
#include "string-stream.h"
@@ -53,12 +54,12 @@ void kunit_unary_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
if (unary_assert->expected_true)
string_stream_add(stream,
- "\tExpected %s to be true, but is false\n",
- unary_assert->condition);
+ KUNIT_SUBTEST_INDENT "Expected %s to be true, but is false\n",
+ unary_assert->condition);
else
string_stream_add(stream,
- "\tExpected %s to be false, but is true\n",
- unary_assert->condition);
+ KUNIT_SUBTEST_INDENT "Expected %s to be false, but is true\n",
+ unary_assert->condition);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
@@ -72,13 +73,13 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
if (!ptr_assert->value) {
string_stream_add(stream,
- "\tExpected %s is not null, but is\n",
- ptr_assert->text);
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ ptr_assert->text);
} else if (IS_ERR(ptr_assert->value)) {
string_stream_add(stream,
- "\tExpected %s is not error, but is: %ld\n",
- ptr_assert->text,
- PTR_ERR(ptr_assert->value));
+ KUNIT_SUBTEST_INDENT "Expected %s is not error, but is: %ld\n",
+ ptr_assert->text,
+ PTR_ERR(ptr_assert->value));
}
kunit_assert_print_msg(assert, stream);
}
@@ -92,16 +93,16 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %lld\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %lld",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
@@ -114,16 +115,16 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %pK\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %pK",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
@@ -136,16 +137,16 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
kunit_base_assert_format(assert, stream);
string_stream_add(stream,
- "\tExpected %s %s %s, but\n",
- binary_assert->left_text,
- binary_assert->operation,
- binary_assert->right_text);
- string_stream_add(stream, "\t\t%s == %s\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, "\t\t%s == %s",
- binary_assert->right_text,
- binary_assert->right_value);
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->left_text,
+ binary_assert->operation,
+ binary_assert->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %s",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
new file mode 100644
index 000000000000..9214c493d8b7
--- /dev/null
+++ b/lib/kunit/debugfs.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ * Author: Alan Maguire <alan.maguire@oracle.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include <kunit/test.h>
+
+#include "string-stream.h"
+
+#define KUNIT_DEBUGFS_ROOT "kunit"
+#define KUNIT_DEBUGFS_RESULTS "results"
+
+/*
+ * Create a debugfs representation of test suites:
+ *
+ * Path Semantics
+ * /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for
+ * testsuite
+ *
+ */
+
+static struct dentry *debugfs_rootdir;
+
+void kunit_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(debugfs_rootdir);
+}
+
+void kunit_debugfs_init(void)
+{
+ if (!debugfs_rootdir)
+ debugfs_rootdir = debugfs_create_dir(KUNIT_DEBUGFS_ROOT, NULL);
+}
+
+static void debugfs_print_result(struct seq_file *seq,
+ struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ if (!test_case || !test_case->log)
+ return;
+
+ seq_printf(seq, "%s", test_case->log);
+}
+
+/*
+ * /sys/kernel/debug/kunit/<testsuite>/results shows all results for testsuite.
+ */
+static int debugfs_print_results(struct seq_file *seq, void *v)
+{
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+ bool success = kunit_suite_has_succeeded(suite);
+ struct kunit_case *test_case;
+
+ if (!suite || !suite->log)
+ return 0;
+
+ seq_printf(seq, "%s", suite->log);
+
+ kunit_suite_for_each_test_case(suite, test_case)
+ debugfs_print_result(seq, suite, test_case);
+
+ seq_printf(seq, "%s %d - %s\n",
+ kunit_status_to_string(success), 1, suite->name);
+ return 0;
+}
+
+static int debugfs_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static int debugfs_results_open(struct inode *inode, struct file *file)
+{
+ struct kunit_suite *suite;
+
+ suite = (struct kunit_suite *)inode->i_private;
+
+ return single_open(file, debugfs_print_results, suite);
+}
+
+static const struct file_operations debugfs_results_fops = {
+ .open = debugfs_results_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = debugfs_release,
+};
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ /* Allocate logs before creating debugfs representation. */
+ suite->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+ kunit_suite_for_each_test_case(suite, test_case)
+ test_case->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+
+ suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
+
+ debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
+ suite->debugfs,
+ suite, &debugfs_results_fops);
+}
+
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ debugfs_remove_recursive(suite->debugfs);
+ kfree(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ kfree(test_case->log);
+}
diff --git a/lib/kunit/debugfs.h b/lib/kunit/debugfs.h
new file mode 100644
index 000000000000..dcc7d7556107
--- /dev/null
+++ b/lib/kunit/debugfs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef _KUNIT_DEBUGFS_H
+#define _KUNIT_DEBUGFS_H
+
+#include <kunit/test.h>
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite);
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite);
+void kunit_debugfs_init(void);
+void kunit_debugfs_cleanup(void);
+
+#else
+
+static inline void kunit_debugfs_create_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_destroy_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_init(void) { }
+
+static inline void kunit_debugfs_cleanup(void) { }
+
+#endif /* CONFIG_KUNIT_DEBUGFS */
+
+#endif /* _KUNIT_DEBUGFS_H */
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index ccb8d2e332f7..4f3d36a72f8f 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -134,7 +134,7 @@ static void kunit_resource_test_init_resources(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
- kunit_init_test(&ctx->test, "testing_test_init_test");
+ kunit_init_test(&ctx->test, "testing_test_init_test", NULL);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
}
@@ -301,7 +301,7 @@ static int kunit_resource_test_init(struct kunit *test)
test->priv = ctx;
- kunit_init_test(&ctx->test, "test_test_context");
+ kunit_init_test(&ctx->test, "test_test_context", NULL);
return 0;
}
@@ -329,6 +329,44 @@ static struct kunit_suite kunit_resource_test_suite = {
.exit = kunit_resource_test_exit,
.test_cases = kunit_resource_test_cases,
};
-kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite);
+
+static void kunit_log_test(struct kunit *test);
+
+static struct kunit_case kunit_log_test_cases[] = {
+ KUNIT_CASE(kunit_log_test),
+ {}
+};
+
+static struct kunit_suite kunit_log_test_suite = {
+ .name = "kunit-log-test",
+ .test_cases = kunit_log_test_cases,
+};
+
+static void kunit_log_test(struct kunit *test)
+{
+ struct kunit_suite *suite = &kunit_log_test_suite;
+
+ kunit_log(KERN_INFO, test, "put this in log.");
+ kunit_log(KERN_INFO, test, "this too.");
+ kunit_log(KERN_INFO, suite, "add to suite log.");
+ kunit_log(KERN_INFO, suite, "along with this.");
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(test->log, "put this in log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(test->log, "this too."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(suite->log, "add to suite log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(suite->log, "along with this."));
+#else
+ KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
+ KUNIT_EXPECT_PTR_EQ(test, suite->log, (char *)NULL);
+#endif
+}
+
+kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
+ &kunit_log_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 9242f932896c..7a6430a7fca0 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/sched/debug.h>
+#include "debugfs.h"
#include "string-stream.h"
#include "try-catch-impl.h"
@@ -28,73 +29,117 @@ static void kunit_print_tap_version(void)
}
}
-static size_t kunit_test_cases_len(struct kunit_case *test_cases)
+/*
+ * Append formatted message to log, size of which is limited to
+ * KUNIT_LOG_SIZE bytes (including null terminating byte).
+ */
+void kunit_log_append(char *log, const char *fmt, ...)
+{
+ char line[KUNIT_LOG_SIZE];
+ va_list args;
+ int len_left;
+
+ if (!log)
+ return;
+
+ len_left = KUNIT_LOG_SIZE - strlen(log) - 1;
+ if (len_left <= 0)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(line, sizeof(line), fmt, args);
+ va_end(args);
+
+ strncat(log, line, len_left);
+}
+EXPORT_SYMBOL_GPL(kunit_log_append);
+
+size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
{
struct kunit_case *test_case;
size_t len = 0;
- for (test_case = test_cases; test_case->run_case; test_case++)
+ kunit_suite_for_each_test_case(suite, test_case)
len++;
return len;
}
+EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
static void kunit_print_subtest_start(struct kunit_suite *suite)
{
kunit_print_tap_version();
- pr_info("\t# Subtest: %s\n", suite->name);
- pr_info("\t1..%zd\n", kunit_test_cases_len(suite->test_cases));
+ kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
+ suite->name);
+ kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
+ kunit_suite_num_test_cases(suite));
}
-static void kunit_print_ok_not_ok(bool should_indent,
+static void kunit_print_ok_not_ok(void *test_or_suite,
+ bool is_test,
bool is_ok,
size_t test_number,
const char *description)
{
- const char *indent, *ok_not_ok;
-
- if (should_indent)
- indent = "\t";
- else
- indent = "";
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit *test = is_test ? test_or_suite : NULL;
- if (is_ok)
- ok_not_ok = "ok";
+ /*
+ * We do not log the test suite results as doing so would
+ * mean debugfs display would consist of the test suite
+ * description and status prior to individual test results.
+ * Hence directly printk the suite status, and we will
+ * separately seq_printf() the suite status for the debugfs
+ * representation.
+ */
+ if (suite)
+ pr_info("%s %zd - %s",
+ kunit_status_to_string(is_ok),
+ test_number, description);
else
- ok_not_ok = "not ok";
-
- pr_info("%s%s %zd - %s\n", indent, ok_not_ok, test_number, description);
+ kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
+ kunit_status_to_string(is_ok),
+ test_number, description);
}
-static bool kunit_suite_has_succeeded(struct kunit_suite *suite)
+bool kunit_suite_has_succeeded(struct kunit_suite *suite)
{
const struct kunit_case *test_case;
- for (test_case = suite->test_cases; test_case->run_case; test_case++)
+ kunit_suite_for_each_test_case(suite, test_case) {
if (!test_case->success)
return false;
+ }
return true;
}
+EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
static void kunit_print_subtest_end(struct kunit_suite *suite)
{
static size_t kunit_suite_counter = 1;
- kunit_print_ok_not_ok(false,
+ kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
suite->name);
}
-static void kunit_print_test_case_ok_not_ok(struct kunit_case *test_case,
- size_t test_number)
+unsigned int kunit_test_case_num(struct kunit_suite *suite,
+ struct kunit_case *test_case)
{
- kunit_print_ok_not_ok(true,
- test_case->success,
- test_number,
- test_case->name);
+ struct kunit_case *tc;
+ unsigned int i = 1;
+
+ kunit_suite_for_each_test_case(suite, tc) {
+ if (tc == test_case)
+ return i;
+ i++;
+ }
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(kunit_test_case_num);
static void kunit_print_string_stream(struct kunit *test,
struct string_stream *stream)
@@ -102,6 +147,9 @@ static void kunit_print_string_stream(struct kunit *test,
struct string_stream_fragment *fragment;
char *buf;
+ if (string_stream_is_empty(stream))
+ return;
+
buf = string_stream_get_string(stream);
if (!buf) {
kunit_err(test,
@@ -175,11 +223,14 @@ void kunit_do_assertion(struct kunit *test,
}
EXPORT_SYMBOL_GPL(kunit_do_assertion);
-void kunit_init_test(struct kunit *test, const char *name)
+void kunit_init_test(struct kunit *test, const char *name, char *log)
{
spin_lock_init(&test->lock);
INIT_LIST_HEAD(&test->resources);
test->name = name;
+ test->log = log;
+ if (test->log)
+ test->log[0] = '\0';
test->success = true;
}
EXPORT_SYMBOL_GPL(kunit_init_test);
@@ -290,7 +341,7 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
struct kunit_try_catch *try_catch;
struct kunit test;
- kunit_init_test(&test, test_case->name);
+ kunit_init_test(&test, test_case->name, test_case->log);
try_catch = &test.try_catch;
kunit_try_catch_init(try_catch,
@@ -303,19 +354,20 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
kunit_try_catch_run(try_catch, &context);
test_case->success = test.success;
+
+ kunit_print_ok_not_ok(&test, true, test_case->success,
+ kunit_test_case_num(suite, test_case),
+ test_case->name);
}
int kunit_run_tests(struct kunit_suite *suite)
{
struct kunit_case *test_case;
- size_t test_case_count = 1;
kunit_print_subtest_start(suite);
- for (test_case = suite->test_cases; test_case->run_case; test_case++) {
+ kunit_suite_for_each_test_case(suite, test_case)
kunit_run_case_catch_errors(suite, test_case);
- kunit_print_test_case_ok_not_ok(test_case, test_case_count++);
- }
kunit_print_subtest_end(suite);
@@ -323,6 +375,37 @@ int kunit_run_tests(struct kunit_suite *suite)
}
EXPORT_SYMBOL_GPL(kunit_run_tests);
+static void kunit_init_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_create_suite(suite);
+}
+
+int __kunit_test_suites_init(struct kunit_suite **suites)
+{
+ unsigned int i;
+
+ for (i = 0; suites[i] != NULL; i++) {
+ kunit_init_suite(suites[i]);
+ kunit_run_tests(suites[i]);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
+
+static void kunit_exit_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_destroy_suite(suite);
+}
+
+void __kunit_test_suites_exit(struct kunit_suite **suites)
+{
+ unsigned int i;
+
+ for (i = 0; suites[i] != NULL; i++)
+ kunit_exit_suite(suites[i]);
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
+
struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
kunit_resource_init_t init,
kunit_resource_free_t free,
@@ -489,12 +572,15 @@ EXPORT_SYMBOL_GPL(kunit_cleanup);
static int __init kunit_init(void)
{
+ kunit_debugfs_init();
+
return 0;
}
late_initcall(kunit_init);
static void __exit kunit_exit(void)
{
+ kunit_debugfs_cleanup();
}
module_exit(kunit_exit);
diff --git a/lib/list-test.c b/lib/list-test.c
index 76babb1df889..ee09505df16f 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -659,7 +659,7 @@ static void list_test_list_for_each_prev_safe(struct kunit *test)
static void list_test_list_for_each_entry(struct kunit *test)
{
struct list_test_struct entries[5], *cur;
- static LIST_HEAD(list);
+ LIST_HEAD(list);
int i = 0;
for (i = 0; i < 5; ++i) {
@@ -680,7 +680,7 @@ static void list_test_list_for_each_entry(struct kunit *test)
static void list_test_list_for_each_entry_reverse(struct kunit *test)
{
struct list_test_struct entries[5], *cur;
- static LIST_HEAD(list);
+ LIST_HEAD(list);
int i = 0;
for (i = 0; i < 5; ++i) {
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 4f6c6ebbbbde..8d092609928e 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -50,9 +50,10 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
* @flags: PERCPU_REF_INIT_* flags
* @gfp: allocation mask to use
*
- * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
- * refcount of 1; analagous to atomic_long_set(ref, 1). See the
- * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
+ * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
+ * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
+ * change the start state to atomic with the latter setting the initial refcount
+ * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c8fa1d274530..2ee6ae3b0ade 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -56,14 +56,6 @@ struct kmem_cache *radix_tree_node_cachep;
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
/*
- * The IDA is even shorter since it uses a bitmap at the last level.
- */
-#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
-#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
-
-/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index 3de0d8921286..6be57745afd1 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mktables
altivec*.c
int*.c
diff --git a/lib/rbtree.c b/lib/rbtree.c
index abc86c6a3177..8545872e61db 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -503,7 +503,7 @@ struct rb_node *rb_next(const struct rb_node *node)
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
- node=node->rb_left;
+ node = node->rb_left;
return (struct rb_node *)node;
}
@@ -535,7 +535,7 @@ struct rb_node *rb_prev(const struct rb_node *node)
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
- node=node->rb_right;
+ node = node->rb_right;
return (struct rb_node *)node;
}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5813072bc589..5d63a8857f36 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -832,7 +832,7 @@ EXPORT_SYMBOL(sg_miter_stop);
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
* @to_buffer: transfer direction (true == from an sg list to a
- * buffer, false == from a buffer to an sg list
+ * buffer, false == from a buffer to an sg list)
*
* Returns the number of copied bytes.
*
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 81c69c08d1d1..2caffc64e4c8 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -20,6 +20,7 @@
*/
#include <linux/gfp.h>
+#include <linux/interrupt.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -202,9 +203,20 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
- void *slab = stack_slabs[parts.slabindex];
+ void *slab;
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
- struct stack_record *stack = slab + offset;
+ struct stack_record *stack;
+
+ *entries = NULL;
+ if (parts.slabindex > depot_index) {
+ WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
+ parts.slabindex, depot_index, handle);
+ return 0;
+ }
+ slab = stack_slabs[parts.slabindex];
+ if (!slab)
+ return 0;
+ stack = slab + offset;
*entries = stack->entries;
return stack->size;
@@ -305,3 +317,26 @@ fast_exit:
return retval;
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+
+static inline int in_irqentry_text(unsigned long ptr)
+{
+ return (ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end) ||
+ (ptr >= (unsigned long)&__softirqentry_text_start &&
+ ptr < (unsigned long)&__softirqentry_text_end);
+}
+
+unsigned int filter_irq_stacks(unsigned long *entries,
+ unsigned int nr_entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_entries; i++) {
+ if (in_irqentry_text(entries[i])) {
+ /* Include the irqentry function into the stack. */
+ return i + 1;
+ }
+ }
+ return nr_entries;
+}
+EXPORT_SYMBOL_GPL(filter_irq_stacks);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 61ed71c1daba..6b13150667f5 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -278,6 +278,8 @@ static void __init test_replace(void)
unsigned int nlongs = DIV_ROUND_UP(nbits, BITS_PER_LONG);
DECLARE_BITMAP(bmap, 1024);
+ BUILD_BUG_ON(EXP2_IN_BITS < nbits * 2);
+
bitmap_zero(bmap, 1024);
bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
expect_eq_bitmap(bmap, exp3_0_1, nbits);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 3872d250ed2c..e3087d90e00d 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -285,6 +285,24 @@ static noinline void __init kmalloc_oob_in_memset(void)
kfree(ptr);
}
+static noinline void __init kmalloc_memmove_invalid_size(void)
+{
+ char *ptr;
+ size_t size = 64;
+ volatile size_t invalid_size = -2;
+
+ pr_info("invalid size in memmove\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ memset((char *)ptr, 0, 64);
+ memmove((char *)ptr, (char *)ptr + 4, invalid_size);
+ kfree(ptr);
+}
+
static noinline void __init kmalloc_uaf(void)
{
char *ptr;
@@ -799,6 +817,7 @@ static int __init kmalloc_tests_init(void)
kmalloc_oob_memset_4();
kmalloc_oob_memset_8();
kmalloc_oob_memset_16();
+ kmalloc_memmove_invalid_size();
kmalloc_uaf();
kmalloc_uaf_memset();
kmalloc_uaf2();
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 9cf77628fc91..e651c37d56db 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -204,7 +204,7 @@ static void test_kmod_put_module(struct kmod_test_device_info *info)
case TEST_KMOD_DRIVER:
break;
case TEST_KMOD_FS_TYPE:
- if (info && info->fs_sync && info->fs_sync->owner)
+ if (info->fs_sync && info->fs_sync->owner)
module_put(info->fs_sync->owner);
break;
default:
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
new file mode 100644
index 000000000000..ea09ca335b21
--- /dev/null
+++ b/lib/test_lockup.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module to generate lockups
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/clock.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+
+static unsigned int time_secs;
+module_param(time_secs, uint, 0600);
+MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0");
+
+static unsigned int time_nsecs;
+module_param(time_nsecs, uint, 0600);
+MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0");
+
+static unsigned int cooldown_secs;
+module_param(cooldown_secs, uint, 0600);
+MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0");
+
+static unsigned int cooldown_nsecs;
+module_param(cooldown_nsecs, uint, 0600);
+MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0");
+
+static unsigned int iterations = 1;
+module_param(iterations, uint, 0600);
+MODULE_PARM_DESC(iterations, "lockup iterations, default 1");
+
+static bool all_cpus;
+module_param(all_cpus, bool, 0400);
+MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once");
+
+static int wait_state;
+static char *state = "R";
+module_param(state, charp, 0400);
+MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state");
+
+static bool use_hrtimer;
+module_param(use_hrtimer, bool, 0400);
+MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping");
+
+static bool iowait;
+module_param(iowait, bool, 0400);
+MODULE_PARM_DESC(iowait, "account sleep time as iowait");
+
+static bool lock_read;
+module_param(lock_read, bool, 0400);
+MODULE_PARM_DESC(lock_read, "lock read-write locks for read");
+
+static bool lock_single;
+module_param(lock_single, bool, 0400);
+MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu");
+
+static bool reacquire_locks;
+module_param(reacquire_locks, bool, 0400);
+MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations");
+
+static bool touch_softlockup;
+module_param(touch_softlockup, bool, 0600);
+MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations");
+
+static bool touch_hardlockup;
+module_param(touch_hardlockup, bool, 0600);
+MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations");
+
+static bool call_cond_resched;
+module_param(call_cond_resched, bool, 0600);
+MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
+
+static bool measure_lock_wait;
+module_param(measure_lock_wait, bool, 0400);
+MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
+
+static unsigned long lock_wait_threshold = ULONG_MAX;
+module_param(lock_wait_threshold, ulong, 0400);
+MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off");
+
+static bool test_disable_irq;
+module_param_named(disable_irq, test_disable_irq, bool, 0400);
+MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups");
+
+static bool disable_softirq;
+module_param(disable_softirq, bool, 0400);
+MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers");
+
+static bool disable_preempt;
+module_param(disable_preempt, bool, 0400);
+MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups");
+
+static bool lock_rcu;
+module_param(lock_rcu, bool, 0400);
+MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
+
+static bool lock_mmap_sem;
+module_param(lock_mmap_sem, bool, 0400);
+MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_sem: block procfs interfaces");
+
+static unsigned long lock_rwsem_ptr;
+module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address");
+
+static unsigned long lock_mutex_ptr;
+module_param_unsafe(lock_mutex_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address");
+
+static unsigned long lock_spinlock_ptr;
+module_param_unsafe(lock_spinlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address");
+
+static unsigned long lock_rwlock_ptr;
+module_param_unsafe(lock_rwlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address");
+
+static unsigned int alloc_pages_nr;
+module_param_unsafe(alloc_pages_nr, uint, 0600);
+MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks");
+
+static unsigned int alloc_pages_order;
+module_param(alloc_pages_order, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_order, "page order to allocate");
+
+static gfp_t alloc_pages_gfp = GFP_KERNEL;
+module_param_unsafe(alloc_pages_gfp, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL");
+
+static bool alloc_pages_atomic;
+module_param(alloc_pages_atomic, bool, 0400);
+MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC");
+
+static bool reallocate_pages;
+module_param(reallocate_pages, bool, 0400);
+MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
+
+struct file *test_file;
+struct inode *test_inode;
+static char test_file_path[256];
+module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
+MODULE_PARM_DESC(file_path, "file path to test");
+
+static bool test_lock_inode;
+module_param_named(lock_inode, test_lock_inode, bool, 0400);
+MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem");
+
+static bool test_lock_mapping;
+module_param_named(lock_mapping, test_lock_mapping, bool, 0400);
+MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem");
+
+static bool test_lock_sb_umount;
+module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400);
+MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
+
+static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
+
+static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
+
+static struct task_struct *main_task;
+static int master_cpu;
+
+static void test_lock(bool master, bool verbose)
+{
+ u64 uninitialized_var(wait_start);
+
+ if (measure_lock_wait)
+ wait_start = local_clock();
+
+ if (lock_mutex_ptr && master) {
+ if (verbose)
+ pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr);
+ mutex_lock((struct mutex *)lock_mutex_ptr);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (verbose)
+ pr_notice("lock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ if (lock_read)
+ down_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ down_write((struct rw_semaphore *)lock_rwsem_ptr);
+ }
+
+ if (lock_mmap_sem && master) {
+ if (verbose)
+ pr_notice("lock mmap_sem pid=%d\n", main_task->pid);
+ if (lock_read)
+ down_read(&main_task->mm->mmap_sem);
+ else
+ down_write(&main_task->mm->mmap_sem);
+ }
+
+ if (test_disable_irq)
+ local_irq_disable();
+
+ if (disable_softirq)
+ local_bh_disable();
+
+ if (disable_preempt)
+ preempt_disable();
+
+ if (lock_rcu)
+ rcu_read_lock();
+
+ if (lock_spinlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ spin_lock((spinlock_t *)lock_spinlock_ptr);
+ }
+
+ if (lock_rwlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ if (lock_read)
+ read_lock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_lock((rwlock_t *)lock_rwlock_ptr);
+ }
+
+ if (measure_lock_wait) {
+ s64 cur_wait = local_clock() - wait_start;
+ s64 max_wait = atomic64_read(&max_lock_wait);
+
+ do {
+ if (cur_wait < max_wait)
+ break;
+ max_wait = atomic64_cmpxchg(&max_lock_wait,
+ max_wait, cur_wait);
+ } while (max_wait != cur_wait);
+
+ if (cur_wait > lock_wait_threshold)
+ pr_notice_ratelimited("lock wait %lld ns\n", cur_wait);
+ }
+}
+
+static void test_unlock(bool master, bool verbose)
+{
+ if (lock_rwlock_ptr && master) {
+ if (lock_read)
+ read_unlock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_unlock((rwlock_t *)lock_rwlock_ptr);
+ if (verbose)
+ pr_notice("unlock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ }
+
+ if (lock_spinlock_ptr && master) {
+ spin_unlock((spinlock_t *)lock_spinlock_ptr);
+ if (verbose)
+ pr_notice("unlock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ }
+
+ if (lock_rcu)
+ rcu_read_unlock();
+
+ if (disable_preempt)
+ preempt_enable();
+
+ if (disable_softirq)
+ local_bh_enable();
+
+ if (test_disable_irq)
+ local_irq_enable();
+
+ if (lock_mmap_sem && master) {
+ if (lock_read)
+ up_read(&main_task->mm->mmap_sem);
+ else
+ up_write(&main_task->mm->mmap_sem);
+ if (verbose)
+ pr_notice("unlock mmap_sem pid=%d\n", main_task->pid);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (lock_read)
+ up_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ up_write((struct rw_semaphore *)lock_rwsem_ptr);
+ if (verbose)
+ pr_notice("unlock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ }
+
+ if (lock_mutex_ptr && master) {
+ mutex_unlock((struct mutex *)lock_mutex_ptr);
+ if (verbose)
+ pr_notice("unlock mutex %ps\n",
+ (void *)lock_mutex_ptr);
+ }
+}
+
+static void test_alloc_pages(struct list_head *pages)
+{
+ struct page *page;
+ unsigned int i;
+
+ for (i = 0; i < alloc_pages_nr; i++) {
+ page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
+ if (!page) {
+ atomic_inc(&alloc_pages_failed);
+ break;
+ }
+ list_add(&page->lru, pages);
+ }
+}
+
+static void test_free_pages(struct list_head *pages)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, pages, lru)
+ __free_pages(page, alloc_pages_order);
+ INIT_LIST_HEAD(pages);
+}
+
+static void test_wait(unsigned int secs, unsigned int nsecs)
+{
+ if (wait_state == TASK_RUNNING) {
+ if (secs)
+ mdelay(secs * MSEC_PER_SEC);
+ if (nsecs)
+ ndelay(nsecs);
+ return;
+ }
+
+ __set_current_state(wait_state);
+ if (use_hrtimer) {
+ ktime_t time;
+
+ time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs);
+ schedule_hrtimeout(&time, HRTIMER_MODE_REL);
+ } else {
+ schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs));
+ }
+}
+
+static void test_lockup(bool master)
+{
+ u64 lockup_start = local_clock();
+ unsigned int iter = 0;
+ LIST_HEAD(pages);
+
+ pr_notice("Start on CPU%d\n", raw_smp_processor_id());
+
+ test_lock(master, true);
+
+ test_alloc_pages(&pages);
+
+ while (iter++ < iterations && !signal_pending(main_task)) {
+
+ if (iowait)
+ current->in_iowait = 1;
+
+ test_wait(time_secs, time_nsecs);
+
+ if (iowait)
+ current->in_iowait = 0;
+
+ if (reallocate_pages)
+ test_free_pages(&pages);
+
+ if (reacquire_locks)
+ test_unlock(master, false);
+
+ if (touch_softlockup)
+ touch_softlockup_watchdog();
+
+ if (touch_hardlockup)
+ touch_nmi_watchdog();
+
+ if (call_cond_resched)
+ cond_resched();
+
+ test_wait(cooldown_secs, cooldown_nsecs);
+
+ if (reacquire_locks)
+ test_lock(master, false);
+
+ if (reallocate_pages)
+ test_alloc_pages(&pages);
+ }
+
+ pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(),
+ local_clock() - lockup_start);
+
+ test_free_pages(&pages);
+
+ test_unlock(master, true);
+}
+
+DEFINE_PER_CPU(struct work_struct, test_works);
+
+static void test_work_fn(struct work_struct *work)
+{
+ test_lockup(!lock_single ||
+ work == per_cpu_ptr(&test_works, master_cpu));
+}
+
+static bool test_kernel_ptr(unsigned long addr, int size)
+{
+ void *ptr = (void *)addr;
+ char buf;
+
+ if (!addr)
+ return false;
+
+ /* should be at least readable kernel address */
+ if (access_ok(ptr, 1) ||
+ access_ok(ptr + size - 1, 1) ||
+ probe_kernel_address(ptr, buf) ||
+ probe_kernel_address(ptr + size - 1, buf)) {
+ pr_err("invalid kernel ptr: %#lx\n", addr);
+ return true;
+ }
+
+ return false;
+}
+
+static bool __maybe_unused test_magic(unsigned long addr, int offset,
+ unsigned int expected)
+{
+ void *ptr = (void *)addr + offset;
+ unsigned int magic = 0;
+
+ if (!addr)
+ return false;
+
+ if (probe_kernel_address(ptr, magic) || magic != expected) {
+ pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
+ addr, offset, magic, expected);
+ return true;
+ }
+
+ return false;
+}
+
+static int __init test_lockup_init(void)
+{
+ u64 test_start = local_clock();
+
+ main_task = current;
+
+ switch (state[0]) {
+ case 'S':
+ wait_state = TASK_INTERRUPTIBLE;
+ break;
+ case 'D':
+ wait_state = TASK_UNINTERRUPTIBLE;
+ break;
+ case 'K':
+ wait_state = TASK_KILLABLE;
+ break;
+ case 'R':
+ wait_state = TASK_RUNNING;
+ break;
+ default:
+ pr_err("unknown state=%s\n", state);
+ return -EINVAL;
+ }
+
+ if (alloc_pages_atomic)
+ alloc_pages_gfp = GFP_ATOMIC;
+
+ if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) ||
+ test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) ||
+ test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) ||
+ test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore)))
+ return -EINVAL;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, rlock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwlock_ptr,
+ offsetof(rwlock_t, magic),
+ RWLOCK_MAGIC) ||
+ test_magic(lock_mutex_ptr,
+ offsetof(struct mutex, wait_lock.rlock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwsem_ptr,
+ offsetof(struct rw_semaphore, wait_lock.magic),
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+#endif
+
+ if ((wait_state != TASK_RUNNING ||
+ (call_cond_resched && !reacquire_locks) ||
+ (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
+ (test_disable_irq || disable_softirq || disable_preempt ||
+ lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
+ pr_err("refuse to sleep in atomic context\n");
+ return -EINVAL;
+ }
+
+ if (lock_mmap_sem && !main_task->mm) {
+ pr_err("no mm to lock mmap_sem\n");
+ return -EINVAL;
+ }
+
+ if (test_file_path[0]) {
+ test_file = filp_open(test_file_path, O_RDONLY, 0);
+ if (IS_ERR(test_file)) {
+ pr_err("cannot find file_path\n");
+ return -EINVAL;
+ }
+ test_inode = file_inode(test_file);
+ } else if (test_lock_inode ||
+ test_lock_mapping ||
+ test_lock_sb_umount) {
+ pr_err("no file to lock\n");
+ return -EINVAL;
+ }
+
+ if (test_lock_inode && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem;
+
+ if (test_lock_mapping && test_file && test_file->f_mapping)
+ lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem;
+
+ if (test_lock_sb_umount && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
+
+ pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
+ main_task->pid, time_secs, time_nsecs,
+ cooldown_secs, cooldown_nsecs, iterations, state,
+ all_cpus ? "all_cpus " : "",
+ iowait ? "iowait " : "",
+ test_disable_irq ? "disable_irq " : "",
+ disable_softirq ? "disable_softirq " : "",
+ disable_preempt ? "disable_preempt " : "",
+ lock_rcu ? "lock_rcu " : "",
+ lock_read ? "lock_read " : "",
+ touch_softlockup ? "touch_softlockup " : "",
+ touch_hardlockup ? "touch_hardlockup " : "",
+ call_cond_resched ? "call_cond_resched " : "",
+ reacquire_locks ? "reacquire_locks " : "");
+
+ if (alloc_pages_nr)
+ pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n",
+ alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp,
+ reallocate_pages ? "reallocate_pages " : "");
+
+ if (all_cpus) {
+ unsigned int cpu;
+
+ cpus_read_lock();
+
+ preempt_disable();
+ master_cpu = smp_processor_id();
+ for_each_online_cpu(cpu) {
+ INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&test_works, cpu));
+ }
+ preempt_enable();
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(&test_works, cpu));
+
+ cpus_read_unlock();
+ } else {
+ test_lockup(true);
+ }
+
+ if (measure_lock_wait)
+ pr_notice("Maximum lock wait: %lld ns\n",
+ atomic64_read(&max_lock_wait));
+
+ if (alloc_pages_nr)
+ pr_notice("Page allocation failed %u times\n",
+ atomic_read(&alloc_pages_failed));
+
+ pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
+
+ if (test_file)
+ fput(test_file);
+
+ if (signal_pending(main_task))
+ return -EINTR;
+
+ return -EAGAIN;
+}
+module_init(test_lockup_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>");
+MODULE_DESCRIPTION("Test module to generate lockups");
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
index 2d7d257a430e..f93b1e145ada 100644
--- a/lib/test_stackinit.c
+++ b/lib/test_stackinit.c
@@ -92,8 +92,9 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
* @var_type: type to be tested for zeroing initialization
* @which: is this a SCALAR, STRING, or STRUCT type?
* @init_level: what kind of initialization is performed
+ * @xfail: is this test expected to fail?
*/
-#define DEFINE_TEST_DRIVER(name, var_type, which) \
+#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
/* Returns 0 on success, 1 on failure. */ \
static noinline __init int test_ ## name (void) \
{ \
@@ -139,13 +140,14 @@ static noinline __init int test_ ## name (void) \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] == 0xFF); \
\
- if (sum == 0) \
+ if (sum == 0) { \
pr_info(#name " ok\n"); \
- else \
- pr_warn(#name " FAIL (uninit bytes: %d)\n", \
- sum); \
- \
- return (sum != 0); \
+ return 0; \
+ } else { \
+ pr_warn(#name " %sFAIL (uninit bytes: %d)\n", \
+ (xfail) ? "X" : "", sum); \
+ return (xfail) ? 0 : 1; \
+ } \
}
#define DEFINE_TEST(name, var_type, which, init_level) \
/* no-op to force compiler into ignoring "uninitialized" vars */\
@@ -189,7 +191,7 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
\
return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
} \
-DEFINE_TEST_DRIVER(name, var_type, which)
+DEFINE_TEST_DRIVER(name, var_type, which, 0)
/* Structure with no padding. */
struct test_packed {
@@ -326,8 +328,14 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
return __leaf_switch_none(2, fill);
}
-DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR);
-DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR);
+/*
+ * These are expected to fail for most configurations because neither
+ * GCC nor Clang have a way to perform initialization of variables in
+ * non-code areas (i.e. in a switch statement before the first "case").
+ * https://bugs.llvm.org/show_bug.cgi?id=44916
+ */
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, 1);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, 1);
static int __init test_stackinit_init(void)
{
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 55c14e8c8859..d4f97925dbd8 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -12,6 +12,9 @@
static unsigned int tests_run;
static unsigned int tests_passed;
+static const unsigned int order_limit =
+ IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
+
#ifndef XA_DEBUG
# ifdef __KERNEL__
void xa_dump(const struct xarray *xa) { }
@@ -959,6 +962,20 @@ static noinline void check_multi_find_2(struct xarray *xa)
}
}
+static noinline void check_multi_find_3(struct xarray *xa)
+{
+ unsigned int order;
+
+ for (order = 5; order < order_limit; order++) {
+ unsigned long index = 1UL << (order - 5);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
+ xa_erase_index(xa, 0);
+ }
+}
+
static noinline void check_find_1(struct xarray *xa)
{
unsigned long i, j, k;
@@ -1081,6 +1098,7 @@ static noinline void check_find(struct xarray *xa)
for (i = 2; i < 10; i++)
check_multi_find_1(xa, i);
check_multi_find_2(xa);
+ check_multi_find_3(xa);
}
/* See find_swap_entry() in mm/shmem.c */
@@ -1138,6 +1156,42 @@ static noinline void check_find_entry(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_pause(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned int order;
+ unsigned long index = 1;
+ unsigned int count = 0;
+
+ for (order = 0; order < order_limit; order++) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ xa_destroy(xa);
+}
+
static noinline void check_move_tiny(struct xarray *xa)
{
XA_STATE(xas, xa, 0);
@@ -1646,6 +1700,7 @@ static int xarray_checks(void)
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
+ check_pause(&array);
check_account(&array);
check_destroy(&array);
check_move(&array);
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index b352903c50e3..277cb4417ac2 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -52,7 +52,7 @@ struct ts_bm
u8 * pattern;
unsigned int patlen;
unsigned int bad_shift[ASIZE];
- unsigned int good_shift[0];
+ unsigned int good_shift[];
};
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 9c873cadab7c..ab749ec10ab5 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -32,7 +32,7 @@
struct ts_fsm
{
unsigned int ntokens;
- struct ts_fsm_token tokens[0];
+ struct ts_fsm_token tokens[];
};
/* other values derived from ctype.h */
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 94617e014b3a..c77a3d537f24 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -36,7 +36,7 @@ struct ts_kmp
{
u8 * pattern;
unsigned int pattern_len;
- unsigned int prefix_tbl[0];
+ unsigned int prefix_tbl[];
};
static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 7b9b58aee72c..f8c0ccf35f29 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -45,13 +45,6 @@ static bool was_reported(struct source_location *location)
return test_and_set_bit(REPORTED_BIT, &location->reported);
}
-static void print_source_location(const char *prefix,
- struct source_location *loc)
-{
- pr_err("%s %s:%d:%d\n", prefix, loc->file_name,
- loc->line & LINE_MASK, loc->column & COLUMN_MASK);
-}
-
static bool suppress_report(struct source_location *loc)
{
return current->in_ubsan || was_reported(loc);
@@ -140,13 +133,14 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
}
}
-static void ubsan_prologue(struct source_location *location)
+static void ubsan_prologue(struct source_location *loc, const char *reason)
{
current->in_ubsan++;
pr_err("========================================"
"========================================\n");
- print_source_location("UBSAN: Undefined behaviour in", location);
+ pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
+ loc->line & LINE_MASK, loc->column & COLUMN_MASK);
}
static void ubsan_epilogue(void)
@@ -156,6 +150,17 @@ static void ubsan_epilogue(void)
"========================================\n");
current->in_ubsan--;
+
+ if (panic_on_warn) {
+ /*
+ * This thread may hit another WARN() in the panic path.
+ * Resetting this prevents additional WARN() from panicking the
+ * system on this thread. Other threads are blocked by the
+ * panic_mutex in panic().
+ */
+ panic_on_warn = 0;
+ panic("panic_on_warn set ...\n");
+ }
}
static void handle_overflow(struct overflow_data *data, void *lhs,
@@ -169,12 +174,12 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, type_is_signed(type) ?
+ "signed-integer-overflow" :
+ "unsigned-integer-overflow");
val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
- pr_err("%s integer overflow:\n",
- type_is_signed(type) ? "signed" : "unsigned");
pr_err("%s %c %s cannot be represented in type %s\n",
lhs_val_str,
op,
@@ -214,7 +219,7 @@ void __ubsan_handle_negate_overflow(struct overflow_data *data,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "negation-overflow");
val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
@@ -234,7 +239,7 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "division-overflow");
val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
@@ -253,7 +258,7 @@ static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "null-ptr-deref");
pr_err("%s null pointer of type %s\n",
type_check_kinds[data->type_check_kind],
@@ -268,7 +273,7 @@ static void handle_misaligned_access(struct type_mismatch_data_common *data,
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "misaligned-access");
pr_err("%s misaligned address %p for type %s\n",
type_check_kinds[data->type_check_kind],
@@ -284,7 +289,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location);
+ ubsan_prologue(data->location, "object-size-mismatch");
pr_err("%s address %p with insufficient space\n",
type_check_kinds[data->type_check_kind],
(void *) ptr);
@@ -343,7 +348,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "array-index-out-of-bounds");
val_to_string(index_str, sizeof(index_str), data->index_type, index);
pr_err("index %s is out of range for type %s\n", index_str,
@@ -364,7 +369,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
if (suppress_report(&data->location))
goto out;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "shift-out-of-bounds");
val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
@@ -396,7 +401,7 @@ EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
{
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "unreachable");
pr_err("calling __builtin_unreachable()\n");
ubsan_epilogue();
panic("can't return from __builtin_unreachable()");
@@ -411,7 +416,7 @@ void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location);
+ ubsan_prologue(&data->location, "invalid-load");
val_to_string(val_str, sizeof(val_str), data->type, val);
diff --git a/lib/xarray.c b/lib/xarray.c
index 1d9fab7db8da..e9e641d3c0c3 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -970,7 +970,7 @@ void xas_pause(struct xa_state *xas)
xas->xa_node = XAS_RESTART;
if (node) {
- unsigned int offset = xas->xa_offset;
+ unsigned long offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
@@ -1208,6 +1208,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
+ continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@@ -1836,10 +1838,11 @@ static bool xas_sibling(struct xa_state *xas)
struct xa_node *node = xas->xa_node;
unsigned long mask;
- if (!node)
+ if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
return false;
mask = (XA_CHUNK_SIZE << node->shift) - 1;
- return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
+ return (xas->xa_index & mask) >
+ ((unsigned long)xas->xa_offset << node->shift);
}
/**
diff --git a/mm/Kconfig b/mm/Kconfig
index ab80933be65f..691021492e78 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -237,6 +237,17 @@ config COMPACTION
linux-mm@kvack.org.
#
+# support for free page reporting
+config PAGE_REPORTING
+ bool "Free page reporting"
+ def_bool n
+ help
+ Free page reporting allows for the incremental acquisition of
+ free pages from the buddy allocator for the purpose of reporting
+ those pages to another entity, such as a hypervisor, so that the
+ memory can be freed within the host for other uses.
+
+#
# support for page migration
#
config MIGRATION
@@ -420,10 +431,6 @@ config THP_SWAP
For selection by architectures with reasonable THP sizes.
-config TRANSPARENT_HUGE_PAGECACHE
- def_bool y
- depends on TRANSPARENT_HUGEPAGE
-
#
# UP and nommu archs use km based percpu allocator
#
@@ -526,7 +533,6 @@ config MEM_SOFT_DIRTY
config ZSWAP
bool "Compressed cache for swap pages (EXPERIMENTAL)"
depends on FRONTSWAP && CRYPTO=y
- select CRYPTO_LZO
select ZPOOL
help
A lightweight compressed cache for swap pages. It takes
@@ -542,6 +548,123 @@ config ZSWAP
they have not be fully explored on the large set of potential
configurations and workloads that exist.
+choice
+ prompt "Compressed cache for swap pages default compressor"
+ depends on ZSWAP
+ default ZSWAP_COMPRESSOR_DEFAULT_LZO
+ help
+ Selects the default compression algorithm for the compressed cache
+ for swap pages.
+
+ For an overview what kind of performance can be expected from
+ a particular compression algorithm please refer to the benchmarks
+ available at the following LWN page:
+ https://lwn.net/Articles/751795/
+
+ If in doubt, select 'LZO'.
+
+ The selection made here can be overridden by using the kernel
+ command line 'zswap.compressor=' option.
+
+config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
+ bool "Deflate"
+ select CRYPTO_DEFLATE
+ help
+ Use the Deflate algorithm as the default compression algorithm.
+
+config ZSWAP_COMPRESSOR_DEFAULT_LZO
+ bool "LZO"
+ select CRYPTO_LZO
+ help
+ Use the LZO algorithm as the default compression algorithm.
+
+config ZSWAP_COMPRESSOR_DEFAULT_842
+ bool "842"
+ select CRYPTO_842
+ help
+ Use the 842 algorithm as the default compression algorithm.
+
+config ZSWAP_COMPRESSOR_DEFAULT_LZ4
+ bool "LZ4"
+ select CRYPTO_LZ4
+ help
+ Use the LZ4 algorithm as the default compression algorithm.
+
+config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
+ bool "LZ4HC"
+ select CRYPTO_LZ4HC
+ help
+ Use the LZ4HC algorithm as the default compression algorithm.
+
+config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
+ bool "zstd"
+ select CRYPTO_ZSTD
+ help
+ Use the zstd algorithm as the default compression algorithm.
+endchoice
+
+config ZSWAP_COMPRESSOR_DEFAULT
+ string
+ depends on ZSWAP
+ default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
+ default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
+ default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
+ default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
+ default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
+ default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
+ default ""
+
+choice
+ prompt "Compressed cache for swap pages default allocator"
+ depends on ZSWAP
+ default ZSWAP_ZPOOL_DEFAULT_ZBUD
+ help
+ Selects the default allocator for the compressed cache for
+ swap pages.
+ The default is 'zbud' for compatibility, however please do
+ read the description of each of the allocators below before
+ making a right choice.
+
+ The selection made here can be overridden by using the kernel
+ command line 'zswap.zpool=' option.
+
+config ZSWAP_ZPOOL_DEFAULT_ZBUD
+ bool "zbud"
+ select ZBUD
+ help
+ Use the zbud allocator as the default allocator.
+
+config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+ bool "z3fold"
+ select Z3FOLD
+ help
+ Use the z3fold allocator as the default allocator.
+
+config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
+ bool "zsmalloc"
+ select ZSMALLOC
+ help
+ Use the zsmalloc allocator as the default allocator.
+endchoice
+
+config ZSWAP_ZPOOL_DEFAULT
+ string
+ depends on ZSWAP
+ default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
+ default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+ default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
+ default ""
+
+config ZSWAP_DEFAULT_ON
+ bool "Enable the compressed cache for swap pages by default"
+ depends on ZSWAP
+ help
+ If selected, the compressed cache for swap pages will be enabled
+ at boot, otherwise it will be disabled.
+
+ The selection made here can be overridden by using the kernel
+ command line 'zswap.enabled=' option.
+
config ZPOOL
tristate "Common API for compressed memory storage"
help
@@ -714,7 +837,7 @@ config GUP_GET_PTE_LOW_HIGH
config READ_ONLY_THP_FOR_FS
bool "Read-only THP for filesystems (EXPERIMENTAL)"
- depends on TRANSPARENT_HUGE_PAGECACHE && SHMEM
+ depends on TRANSPARENT_HUGEPAGE && SHMEM
help
Allow khugepaged to put read-only file-backed pages in THP.
diff --git a/mm/Makefile b/mm/Makefile
index 272e66039e70..fccd3756b25f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -6,6 +6,7 @@
KASAN_SANITIZE_slab_common.o := n
KASAN_SANITIZE_slab.o := n
KASAN_SANITIZE_slub.o := n
+KCSAN_SANITIZE_kmemleak.o := n
# These files are disabled because they produce non-interesting and/or
# flaky coverage that is not a function of syscall inputs. E.g. slab is out of
@@ -110,3 +111,4 @@ obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
+obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 672d3c78c6ab..46f0fcc93081 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -481,6 +481,7 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page,
*/
static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
struct compact_control *cc)
+ __acquires(lock)
{
/* Track if the lock is contended in async mode */
if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
@@ -894,12 +895,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/*
* Regardless of being on LRU, compound pages such as THP and
- * hugetlbfs are not to be compacted. We can potentially save
- * a lot of iterations if we skip them at once. The check is
- * racy, but we can consider only valid values and the only
- * danger is skipping too much.
+ * hugetlbfs are not to be compacted unless we are attempting
+ * an allocation much larger than the huge page size (eg CMA).
+ * We can potentially save a lot of iterations if we skip them
+ * at once. The check is racy, but we can consider only valid
+ * values and the only danger is skipping too much.
*/
- if (PageCompound(page)) {
+ if (PageCompound(page) && !cc->alloc_contig) {
const unsigned int order = compound_order(page);
if (likely(order < MAX_ORDER))
@@ -969,7 +971,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* and it's on LRU. It can only be a THP so the order
* is safe to read and it's 0 for tail pages.
*/
- if (unlikely(PageCompound(page))) {
+ if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
low_pfn += compound_nr(page) - 1;
goto isolate_fail;
}
@@ -981,12 +983,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (__isolate_lru_page(page, isolate_mode) != 0)
goto isolate_fail;
- VM_BUG_ON_PAGE(PageCompound(page), page);
+ /* The whole page is taken off the LRU; skip the tail pages. */
+ if (PageCompound(page))
+ low_pfn += compound_nr(page) - 1;
/* Successfully isolated */
del_page_from_lru_list(page, lruvec, page_lru(page));
- inc_node_page_state(page,
- NR_ISOLATED_ANON + page_is_file_cache(page));
+ mod_node_page_state(page_pgdat(page),
+ NR_ISOLATED_ANON + page_is_file_lru(page),
+ hpage_nr_pages(page));
isolate_success:
list_add(&page->lru, &cc->migratepages);
@@ -1590,7 +1595,11 @@ typedef enum {
* Allow userspace to control policy on scanning the unevictable LRU for
* compactable pages.
*/
+#ifdef CONFIG_PREEMPT_RT
+int sysctl_compact_unevictable_allowed __read_mostly = 0;
+#else
int sysctl_compact_unevictable_allowed __read_mostly = 1;
+#endif
static inline void
update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
@@ -2174,7 +2183,6 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
ret = COMPACT_CONTENDED;
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
- last_migrated_pfn = 0;
goto out;
case ISOLATE_NONE:
if (update_cached) {
@@ -2310,8 +2318,7 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.page = NULL,
};
- if (capture)
- current->capture_control = &capc;
+ current->capture_control = &capc;
ret = compact_zone(&cc, &capc);
@@ -2333,6 +2340,7 @@ int sysctl_extfrag_threshold = 500;
* @alloc_flags: The allocation flags of the current allocation
* @ac: The context of current allocation
* @prio: Determines how hard direct compaction should try to succeed
+ * @capture: Pointer to free page created by compaction will be stored here
*
* This is the main entry point for direct page compaction.
*/
diff --git a/mm/debug.c b/mm/debug.c
index ecccd9f17801..2189357f0987 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -44,8 +44,10 @@ const struct trace_print_flags vmaflag_names[] = {
void __dump_page(struct page *page, const char *reason)
{
+ struct page *head = compound_head(page);
struct address_space *mapping;
bool page_poisoned = PagePoisoned(page);
+ bool compound = PageCompound(page);
/*
* Accessing the pageblock without the zone lock. It could change to
* "isolate" again in the meantime, but since we are just dumping the
@@ -66,25 +68,43 @@ void __dump_page(struct page *page, const char *reason)
goto hex_only;
}
- mapping = page_mapping(page);
+ if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
+ /* Corrupt page, cannot call page_mapping */
+ mapping = page->mapping;
+ head = page;
+ compound = false;
+ } else {
+ mapping = page_mapping(page);
+ }
/*
* Avoid VM_BUG_ON() in page_mapcount().
* page->_mapcount space in struct page is used by sl[aou]b pages to
* encode own info.
*/
- mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+ mapcount = PageSlab(head) ? 0 : page_mapcount(page);
- if (PageCompound(page))
- pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
- "index:%#lx compound_mapcount: %d\n",
- page, page_ref_count(page), mapcount,
- page->mapping, page_to_pgoff(page),
- compound_mapcount(page));
+ if (compound)
+ if (hpage_pincount_available(page)) {
+ pr_warn("page:%px refcount:%d mapcount:%d mapping:%p "
+ "index:%#lx head:%px order:%u "
+ "compound_mapcount:%d compound_pincount:%d\n",
+ page, page_ref_count(head), mapcount,
+ mapping, page_to_pgoff(page), head,
+ compound_order(head), compound_mapcount(page),
+ compound_pincount(page));
+ } else {
+ pr_warn("page:%px refcount:%d mapcount:%d mapping:%p "
+ "index:%#lx head:%px order:%u "
+ "compound_mapcount:%d\n",
+ page, page_ref_count(head), mapcount,
+ mapping, page_to_pgoff(page), head,
+ compound_order(head), compound_mapcount(page));
+ }
else
- pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
+ pr_warn("page:%px refcount:%d mapcount:%d mapping:%p index:%#lx\n",
page, page_ref_count(page), mapcount,
- page->mapping, page_to_pgoff(page));
+ mapping, page_to_pgoff(page));
if (PageKsm(page))
type = "ksm ";
else if (PageAnon(page))
@@ -106,6 +126,10 @@ hex_only:
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
sizeof(struct page), false);
+ if (head != page)
+ print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
+ sizeof(unsigned long), head,
+ sizeof(struct page), false);
if (reason)
pr_warn("page dumped because: %s\n", reason);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index fe5d33060415..f9fb9bbd733e 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -144,9 +144,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
else if (size < 4)
size = 4;
- if ((size % align) != 0)
- size = ALIGN(size, align);
-
+ size = ALIGN(size, align);
allocation = max_t(size_t, size, PAGE_SIZE);
if (!boundary)
diff --git a/mm/filemap.c b/mm/filemap.c
index 1784478270e1..23a051a7ef0f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1386,7 +1386,7 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault_flag_allow_retry_first(flags)) {
/*
* CAUTION! In this case, mmap_sem is not released
* even though return 0.
@@ -1536,7 +1536,6 @@ out:
return page;
}
-EXPORT_SYMBOL(find_get_entry);
/**
* find_lock_entry - locate, pin and lock a page cache entry
@@ -1575,42 +1574,39 @@ repeat:
EXPORT_SYMBOL(find_lock_entry);
/**
- * pagecache_get_page - find and get a page reference
- * @mapping: the address_space to search
- * @offset: the page index
- * @fgp_flags: PCG flags
- * @gfp_mask: gfp mask to use for the page cache data page allocation
- *
- * Looks up the page cache slot at @mapping & @offset.
+ * pagecache_get_page - Find and get a reference to a page.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ * @fgp_flags: %FGP flags modify how the page is returned.
+ * @gfp_mask: Memory allocation flags to use if %FGP_CREAT is specified.
*
- * PCG flags modify how the page is returned.
+ * Looks up the page cache entry at @mapping & @index.
*
- * @fgp_flags can be:
+ * @fgp_flags can be zero or more of these flags:
*
- * - FGP_ACCESSED: the page will be marked accessed
- * - FGP_LOCK: Page is return locked
- * - FGP_CREAT: If page is not present then a new page is allocated using
- * @gfp_mask and added to the page cache and the VM's LRU
- * list. The page is returned locked and with an increased
- * refcount.
- * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
- * its own locking dance if the page is already in cache, or unlock the page
- * before returning if we had to add the page to pagecache.
+ * * %FGP_ACCESSED - The page will be marked accessed.
+ * * %FGP_LOCK - The page is returned locked.
+ * * %FGP_CREAT - If no page is present then a new page is allocated using
+ * @gfp_mask and added to the page cache and the VM's LRU list.
+ * The page is returned locked and with an increased refcount.
+ * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
+ * page is already in cache. If the page was allocated, unlock it before
+ * returning so the caller can do the same dance.
*
- * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
- * if the GFP flags specified for FGP_CREAT are atomic.
+ * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
+ * if the %GFP flags specified for %FGP_CREAT are atomic.
*
* If there is a page cache page, it is returned with an increased refcount.
*
- * Return: the found page or %NULL otherwise.
+ * Return: The found page or %NULL otherwise.
*/
-struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t gfp_mask)
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
+ int fgp_flags, gfp_t gfp_mask)
{
struct page *page;
repeat:
- page = find_get_entry(mapping, offset);
+ page = find_get_entry(mapping, index);
if (xa_is_value(page))
page = NULL;
if (!page)
@@ -1632,7 +1628,7 @@ repeat:
put_page(page);
goto repeat;
}
- VM_BUG_ON_PAGE(page->index != offset, page);
+ VM_BUG_ON_PAGE(page->index != index, page);
}
if (fgp_flags & FGP_ACCESSED)
@@ -1657,7 +1653,7 @@ no_page:
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
- err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
+ err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
if (unlikely(err)) {
put_page(page);
page = NULL;
@@ -1697,6 +1693,11 @@ EXPORT_SYMBOL(pagecache_get_page);
* Any shadow entries of evicted pages, or swap entries from
* shmem/tmpfs, are included in the returned array.
*
+ * If it finds a Transparent Huge Page, head or tail, find_get_entries()
+ * stops at that page: the caller is likely to have a better way to handle
+ * the compound page as a whole, and then skip its extent, than repeatedly
+ * calling find_get_entries() to return all its tails.
+ *
* Return: the number of pages and shadow entries which were found.
*/
unsigned find_get_entries(struct address_space *mapping,
@@ -1728,8 +1729,15 @@ unsigned find_get_entries(struct address_space *mapping,
/* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto put_page;
- page = find_subpage(page, xas.xa_index);
+ /*
+ * Terminate early on finding a THP, to allow the caller to
+ * handle it all at once; but continue if this is hugetlbfs.
+ */
+ if (PageTransHuge(page) && !PageHuge(page)) {
+ page = find_subpage(page, xas.xa_index);
+ nr_entries = ret + 1;
+ }
export:
indices[ret] = xas.xa_index;
entries[ret] = page;
@@ -1962,8 +1970,7 @@ EXPORT_SYMBOL(find_get_pages_range_tag);
*
* It is going insane. Fix it by quickly scaling down the readahead size.
*/
-static void shrink_readahead_size_eio(struct file *filp,
- struct file_ra_state *ra)
+static void shrink_readahead_size_eio(struct file_ra_state *ra)
{
ra->ra_pages /= 4;
}
@@ -2188,7 +2195,7 @@ readpage:
goto find_page;
}
unlock_page(page);
- shrink_readahead_size_eio(filp, ra);
+ shrink_readahead_size_eio(ra);
error = -EIO;
goto readpage_error;
}
@@ -2416,7 +2423,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
pgoff_t offset = vmf->pgoff;
/* If we don't want any read-ahead, don't bother */
- if (vmf->vma->vm_flags & VM_RAND_READ)
+ if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
return fpin;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
@@ -2491,7 +2498,7 @@ retry_find:
if (!page) {
if (fpin)
goto out_retry;
- return vmf_error(-ENOMEM);
+ return VM_FAULT_OOM;
}
}
@@ -2560,7 +2567,7 @@ page_not_uptodate:
goto retry_find;
/* Things didn't work out. Return zero to tell the mm layer so. */
- shrink_readahead_size_eio(file, ra);
+ shrink_readahead_size_eio(ra);
return VM_FAULT_SIGBUS;
out_retry:
@@ -2823,6 +2830,14 @@ filler:
unlock_page(page);
goto out;
}
+
+ /*
+ * A previous I/O error may have been due to temporary
+ * failures.
+ * Clear page error before actual read, PG_error will be
+ * set again if read page fails.
+ */
+ ClearPageError(page);
goto filler;
out:
diff --git a/mm/gup.c b/mm/gup.c
index 1b521e0ac1de..a21230569520 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -29,6 +29,22 @@ struct follow_page_context {
unsigned int page_mask;
};
+static void hpage_pincount_add(struct page *page, int refs)
+{
+ VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
+ VM_BUG_ON_PAGE(page != compound_head(page), page);
+
+ atomic_add(refs, compound_pincount_ptr(page));
+}
+
+static void hpage_pincount_sub(struct page *page, int refs)
+{
+ VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
+ VM_BUG_ON_PAGE(page != compound_head(page), page);
+
+ atomic_sub(refs, compound_pincount_ptr(page));
+}
+
/*
* Return the compound head page with ref appropriately incremented,
* or NULL if that failed.
@@ -44,6 +60,195 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
return head;
}
+/*
+ * try_grab_compound_head() - attempt to elevate a page's refcount, by a
+ * flags-dependent amount.
+ *
+ * "grab" names in this file mean, "look at flags to decide whether to use
+ * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
+ *
+ * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
+ * same time. (That's true throughout the get_user_pages*() and
+ * pin_user_pages*() APIs.) Cases:
+ *
+ * FOLL_GET: page's refcount will be incremented by 1.
+ * FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
+ *
+ * Return: head page (with refcount appropriately incremented) for success, or
+ * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
+ * considered failure, and furthermore, a likely bug in the caller, so a warning
+ * is also emitted.
+ */
+static __maybe_unused struct page *try_grab_compound_head(struct page *page,
+ int refs,
+ unsigned int flags)
+{
+ if (flags & FOLL_GET)
+ return try_get_compound_head(page, refs);
+ else if (flags & FOLL_PIN) {
+ int orig_refs = refs;
+
+ /*
+ * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
+ * path, so fail and let the caller fall back to the slow path.
+ */
+ if (unlikely(flags & FOLL_LONGTERM) &&
+ is_migrate_cma_page(page))
+ return NULL;
+
+ /*
+ * When pinning a compound page of order > 1 (which is what
+ * hpage_pincount_available() checks for), use an exact count to
+ * track it, via hpage_pincount_add/_sub().
+ *
+ * However, be sure to *also* increment the normal page refcount
+ * field at least once, so that the page really is pinned.
+ */
+ if (!hpage_pincount_available(page))
+ refs *= GUP_PIN_COUNTING_BIAS;
+
+ page = try_get_compound_head(page, refs);
+ if (!page)
+ return NULL;
+
+ if (hpage_pincount_available(page))
+ hpage_pincount_add(page, refs);
+
+ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
+ orig_refs);
+
+ return page;
+ }
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+/**
+ * try_grab_page() - elevate a page's refcount by a flag-dependent amount
+ *
+ * This might not do anything at all, depending on the flags argument.
+ *
+ * "grab" names in this file mean, "look at flags to decide whether to use
+ * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
+ *
+ * @page: pointer to page to be grabbed
+ * @flags: gup flags: these are the FOLL_* flag values.
+ *
+ * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
+ * time. Cases:
+ *
+ * FOLL_GET: page's refcount will be incremented by 1.
+ * FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
+ *
+ * Return: true for success, or if no action was required (if neither FOLL_PIN
+ * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
+ * FOLL_PIN was set, but the page could not be grabbed.
+ */
+bool __must_check try_grab_page(struct page *page, unsigned int flags)
+{
+ WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
+
+ if (flags & FOLL_GET)
+ return try_get_page(page);
+ else if (flags & FOLL_PIN) {
+ int refs = 1;
+
+ page = compound_head(page);
+
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+ return false;
+
+ if (hpage_pincount_available(page))
+ hpage_pincount_add(page, 1);
+ else
+ refs = GUP_PIN_COUNTING_BIAS;
+
+ /*
+ * Similar to try_grab_compound_head(): even if using the
+ * hpage_pincount_add/_sub() routines, be sure to
+ * *also* increment the normal page refcount field at least
+ * once, so that the page really is pinned.
+ */
+ page_ref_add(page, refs);
+
+ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
+ }
+
+ return true;
+}
+
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+static bool __unpin_devmap_managed_user_page(struct page *page)
+{
+ int count, refs = 1;
+
+ if (!page_is_devmap_managed(page))
+ return false;
+
+ if (hpage_pincount_available(page))
+ hpage_pincount_sub(page, 1);
+ else
+ refs = GUP_PIN_COUNTING_BIAS;
+
+ count = page_ref_sub_return(page, refs);
+
+ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1);
+ /*
+ * devmap page refcounts are 1-based, rather than 0-based: if
+ * refcount is 1, then the page is free and the refcount is
+ * stable because nobody holds a reference on the page.
+ */
+ if (count == 1)
+ free_devmap_managed_page(page);
+ else if (!count)
+ __put_page(page);
+
+ return true;
+}
+#else
+static bool __unpin_devmap_managed_user_page(struct page *page)
+{
+ return false;
+}
+#endif /* CONFIG_DEV_PAGEMAP_OPS */
+
+/**
+ * unpin_user_page() - release a dma-pinned page
+ * @page: pointer to page to be released
+ *
+ * Pages that were pinned via pin_user_pages*() must be released via either
+ * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
+ * that such pages can be separately tracked and uniquely handled. In
+ * particular, interactions with RDMA and filesystems need special handling.
+ */
+void unpin_user_page(struct page *page)
+{
+ int refs = 1;
+
+ page = compound_head(page);
+
+ /*
+ * For devmap managed pages we need to catch refcount transition from
+ * GUP_PIN_COUNTING_BIAS to 1, when refcount reach one it means the
+ * page is free and we need to inform the device driver through
+ * callback. See include/linux/memremap.h and HMM for details.
+ */
+ if (__unpin_devmap_managed_user_page(page))
+ return;
+
+ if (hpage_pincount_available(page))
+ hpage_pincount_sub(page, 1);
+ else
+ refs = GUP_PIN_COUNTING_BIAS;
+
+ if (page_ref_sub_and_test(page, refs))
+ __put_page(page);
+
+ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1);
+}
+EXPORT_SYMBOL(unpin_user_page);
+
/**
* unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
* @pages: array of pages to be maybe marked dirty, and definitely released.
@@ -146,7 +351,8 @@ static struct page *no_page_table(struct vm_area_struct *vma,
* But we can only make this optimization where a hole would surely
* be zero-filled if handle_mm_fault() actually did handle it.
*/
- if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
+ if ((flags & FOLL_DUMP) &&
+ (vma_is_anonymous(vma) || !vma->vm_ops->fault))
return ERR_PTR(-EFAULT);
return NULL;
}
@@ -193,6 +399,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
struct page *page;
spinlock_t *ptl;
pte_t *ptep, pte;
+ int ret;
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
@@ -230,10 +437,11 @@ retry:
}
page = vm_normal_page(vma, address, pte);
- if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
+ if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
/*
- * Only return device mapping pages in the FOLL_GET case since
- * they are only valid while holding the pgmap reference.
+ * Only return device mapping pages in the FOLL_GET or FOLL_PIN
+ * case since they are only valid while holding the pgmap
+ * reference.
*/
*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
if (*pgmap)
@@ -250,8 +458,6 @@ retry:
if (is_zero_pfn(pte_pfn(pte))) {
page = pte_page(pte);
} else {
- int ret;
-
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
@@ -259,7 +465,6 @@ retry:
}
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
- int ret;
get_page(page);
pte_unmap_unlock(ptep, ptl);
lock_page(page);
@@ -271,9 +476,21 @@ retry:
goto retry;
}
- if (flags & FOLL_GET) {
- if (unlikely(!try_get_page(page))) {
- page = ERR_PTR(-ENOMEM);
+ /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
+ if (unlikely(!try_grab_page(page, flags))) {
+ page = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ /*
+ * We need to make the page accessible if and only if we are going
+ * to access its content (the FOLL_PIN case). Please see
+ * Documentation/core-api/pin_user_pages.rst for details.
+ */
+ if (flags & FOLL_PIN) {
+ ret = arch_make_page_accessible(page);
+ if (ret) {
+ unpin_user_page(page);
+ page = ERR_PTR(ret);
goto out;
}
}
@@ -537,7 +754,7 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
/* make this handle hugepd */
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
- BUG_ON(flags & FOLL_GET);
+ WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
return page;
}
@@ -630,12 +847,12 @@ unmap:
}
/*
- * mmap_sem must be held on entry. If @nonblocking != NULL and
- * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
- * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
+ * mmap_sem must be held on entry. If @locked != NULL and *@flags
+ * does not include FOLL_NOWAIT, the mmap_sem may be released. If it
+ * is, *@locked will be set to 0 and -EBUSY returned.
*/
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
- unsigned long address, unsigned int *flags, int *nonblocking)
+ unsigned long address, unsigned int *flags, int *locked)
{
unsigned int fault_flags = 0;
vm_fault_t ret;
@@ -647,12 +864,15 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
- if (nonblocking)
- fault_flags |= FAULT_FLAG_ALLOW_RETRY;
+ if (locked)
+ fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (*flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
if (*flags & FOLL_TRIED) {
- VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
+ /*
+ * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
+ * can co-exist
+ */
fault_flags |= FAULT_FLAG_TRIED;
}
@@ -673,8 +893,8 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
}
if (ret & VM_FAULT_RETRY) {
- if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
- *nonblocking = 0;
+ if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
+ *locked = 0;
return -EBUSY;
}
@@ -751,7 +971,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
- * @nonblocking: whether waiting for disk IO or mmap_sem contention
+ * @locked: whether we're still with the mmap_sem held
*
* Returns either number of pages pinned (which may be less than the
* number requested), or an error. Details about the return value:
@@ -786,13 +1006,11 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* appropriate) must be called after the page is finished with, and
* before put_page is called.
*
- * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
- * or mmap_sem contention, and if waiting is needed to pin all pages,
- * *@nonblocking will be set to 0. Further, if @gup_flags does not
- * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
- * this case.
+ * If @locked != NULL, *@locked will be set to 0 when mmap_sem is
+ * released by an up_read(). That can happen if @gup_flags does not
+ * have FOLL_NOWAIT.
*
- * A caller using such a combination of @nonblocking and @gup_flags
+ * A caller using such a combination of @locked and @gup_flags
* must therefore hold the mmap_sem for reading only, and recognize
* when it's been released. Otherwise, it must be held for either
* reading or writing and will not be released.
@@ -804,7 +1022,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas, int *nonblocking)
+ struct vm_area_struct **vmas, int *locked)
{
long ret = 0, i = 0;
struct vm_area_struct *vma = NULL;
@@ -850,7 +1068,17 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
- gup_flags, nonblocking);
+ gup_flags, locked);
+ if (locked && *locked == 0) {
+ /*
+ * We've got a VM_FAULT_RETRY
+ * and we've lost mmap_sem.
+ * We must stop here.
+ */
+ BUG_ON(gup_flags & FOLL_NOWAIT);
+ BUG_ON(ret != 0);
+ goto out;
+ }
continue;
}
}
@@ -868,13 +1096,13 @@ retry:
page = follow_page_mask(vma, start, foll_flags, &ctx);
if (!page) {
ret = faultin_page(tsk, vma, start, &foll_flags,
- nonblocking);
+ locked);
switch (ret) {
case 0:
goto retry;
case -EBUSY:
ret = 0;
- /* FALLTHRU */
+ fallthrough;
case -EFAULT:
case -ENOMEM:
case -EHWPOISON:
@@ -980,7 +1208,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
address = untagged_addr(address);
if (unlocked)
- fault_flags |= FAULT_FLAG_ALLOW_RETRY;
+ fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
retry:
vma = find_extend_vma(mm, address);
@@ -1004,7 +1232,6 @@ retry:
down_read(&mm->mmap_sem);
if (!(fault_flags & FAULT_FLAG_TRIED)) {
*unlocked = true;
- fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
fault_flags |= FAULT_FLAG_TRIED;
goto retry;
}
@@ -1088,17 +1315,36 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
if (likely(pages))
pages += ret;
start += ret << PAGE_SHIFT;
+ lock_dropped = true;
+retry:
/*
* Repeat on the address that fired VM_FAULT_RETRY
- * without FAULT_FLAG_ALLOW_RETRY but with
- * FAULT_FLAG_TRIED.
+ * with both FAULT_FLAG_ALLOW_RETRY and
+ * FAULT_FLAG_TRIED. Note that GUP can be interrupted
+ * by fatal signals, so we need to check it before we
+ * start trying again otherwise it can loop forever.
*/
+
+ if (fatal_signal_pending(current))
+ break;
+
*locked = 1;
- lock_dropped = true;
- down_read(&mm->mmap_sem);
+ ret = down_read_killable(&mm->mmap_sem);
+ if (ret) {
+ BUG_ON(ret > 0);
+ if (!pages_done)
+ pages_done = ret;
+ break;
+ }
+
ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
- pages, NULL, NULL);
+ pages, NULL, locked);
+ if (!*locked) {
+ /* Continue to retry until we succeeded */
+ BUG_ON(ret != 0);
+ goto retry;
+ }
if (ret != 1) {
BUG_ON(ret > 1);
if (!pages_done)
@@ -1129,7 +1375,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
* @vma: target vma
* @start: start address
* @end: end address
- * @nonblocking:
+ * @locked: whether the mmap_sem is still held
*
* This takes care of mlocking the pages too if VM_LOCKED is set.
*
@@ -1137,14 +1383,14 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
*
* vma->vm_mm->mmap_sem must be held.
*
- * If @nonblocking is NULL, it may be held for read or write and will
+ * If @locked is NULL, it may be held for read or write and will
* be unperturbed.
*
- * If @nonblocking is non-NULL, it must held for read only and may be
- * released. If it's released, *@nonblocking will be set to 0.
+ * If @locked is non-NULL, it must held for read only and may be
+ * released. If it's released, *@locked will be set to 0.
*/
long populate_vma_page_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end, int *nonblocking)
+ unsigned long start, unsigned long end, int *locked)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
@@ -1171,7 +1417,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
* We want mlock to succeed for regions that have any permissions
* other than PROT_NONE.
*/
- if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+ if (vma_is_accessible(vma))
gup_flags |= FOLL_FORCE;
/*
@@ -1179,7 +1425,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
* not result in a stack expansion that recurses back here.
*/
return __get_user_pages(current, mm, start, nr_pages, gup_flags,
- NULL, NULL, nonblocking);
+ NULL, NULL, locked);
}
/*
@@ -1431,7 +1677,7 @@ check_again:
list_add_tail(&head->lru, &cma_page_list);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON +
- page_is_file_cache(head),
+ page_is_file_lru(head),
hpage_nr_pages(head));
}
}
@@ -1557,6 +1803,37 @@ static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
}
#endif /* CONFIG_FS_DAX || CONFIG_CMA */
+#ifdef CONFIG_MMU
+static long __get_user_pages_remote(struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *locked)
+{
+ /*
+ * Parts of FOLL_LONGTERM behavior are incompatible with
+ * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
+ * vmas. However, this only comes up if locked is set, and there are
+ * callers that do request FOLL_LONGTERM, but do not set locked. So,
+ * allow what we can.
+ */
+ if (gup_flags & FOLL_LONGTERM) {
+ if (WARN_ON_ONCE(locked))
+ return -EINVAL;
+ /*
+ * This will check the vmas (even if our vmas arg is NULL)
+ * and return -ENOTSUPP if DAX isn't allowed in this case:
+ */
+ return __gup_longterm_locked(tsk, mm, start, nr_pages, pages,
+ vmas, gup_flags | FOLL_TOUCH |
+ FOLL_REMOTE);
+ }
+
+ return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+ locked,
+ gup_flags | FOLL_TOUCH | FOLL_REMOTE);
+}
+
/*
* get_user_pages_remote() - pin user pages in memory
* @tsk: the task_struct to use for page fault accounting, or
@@ -1619,7 +1896,6 @@ static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
* should use get_user_pages because it cannot pass
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
-#ifdef CONFIG_MMU
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
@@ -1632,28 +1908,8 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
return -EINVAL;
- /*
- * Parts of FOLL_LONGTERM behavior are incompatible with
- * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
- * vmas. However, this only comes up if locked is set, and there are
- * callers that do request FOLL_LONGTERM, but do not set locked. So,
- * allow what we can.
- */
- if (gup_flags & FOLL_LONGTERM) {
- if (WARN_ON_ONCE(locked))
- return -EINVAL;
- /*
- * This will check the vmas (even if our vmas arg is NULL)
- * and return -ENOTSUPP if DAX isn't allowed in this case:
- */
- return __gup_longterm_locked(tsk, mm, start, nr_pages, pages,
- vmas, gup_flags | FOLL_TOUCH |
- FOLL_REMOTE);
- }
-
- return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
- locked,
- gup_flags | FOLL_TOUCH | FOLL_REMOTE);
+ return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
+ pages, vmas, locked);
}
EXPORT_SYMBOL(get_user_pages_remote);
@@ -1665,6 +1921,15 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
{
return 0;
}
+
+static long __get_user_pages_remote(struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *locked)
+{
+ return 0;
+}
#endif /* !CONFIG_MMU */
/*
@@ -1804,7 +2069,31 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* This code is based heavily on the PowerPC implementation by Nick Piggin.
*/
#ifdef CONFIG_HAVE_FAST_GUP
+
+static void put_compound_head(struct page *page, int refs, unsigned int flags)
+{
+ if (flags & FOLL_PIN) {
+ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
+ refs);
+
+ if (hpage_pincount_available(page))
+ hpage_pincount_sub(page, refs);
+ else
+ refs *= GUP_PIN_COUNTING_BIAS;
+ }
+
+ VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
+ /*
+ * Calling put_page() for each ref is unnecessarily slow. Only the last
+ * ref needs a put_page().
+ */
+ if (refs > 1)
+ page_ref_sub(page, refs - 1);
+ put_page(page);
+}
+
#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
+
/*
* WARNING: only to be used in the get_user_pages_fast() implementation.
*
@@ -1860,13 +2149,17 @@ static inline pte_t gup_get_pte(pte_t *ptep)
#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
+ unsigned int flags,
struct page **pages)
{
while ((*nr) - nr_start) {
struct page *page = pages[--(*nr)];
ClearPageReferenced(page);
- put_page(page);
+ if (flags & FOLL_PIN)
+ unpin_user_page(page);
+ else
+ put_page(page);
}
}
@@ -1899,7 +2192,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
- undo_dev_pagemap(nr, nr_start, pages);
+ undo_dev_pagemap(nr, nr_start, flags, pages);
goto pte_unmap;
}
} else if (pte_special(pte))
@@ -1908,17 +2201,30 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
- head = try_get_compound_head(page, 1);
+ head = try_grab_compound_head(page, 1, flags);
if (!head)
goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_page(head);
+ put_compound_head(head, 1, flags);
goto pte_unmap;
}
VM_BUG_ON_PAGE(compound_head(page) != head, page);
+ /*
+ * We need to make the page accessible if and only if we are
+ * going to access its content (the FOLL_PIN case). Please
+ * see Documentation/core-api/pin_user_pages.rst for
+ * details.
+ */
+ if (flags & FOLL_PIN) {
+ ret = arch_make_page_accessible(page);
+ if (ret) {
+ unpin_user_page(page);
+ goto pte_unmap;
+ }
+ }
SetPageReferenced(page);
pages[*nr] = page;
(*nr)++;
@@ -1953,7 +2259,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
int nr_start = *nr;
struct dev_pagemap *pgmap = NULL;
@@ -1963,12 +2270,15 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
pgmap = get_dev_pagemap(pfn, pgmap);
if (unlikely(!pgmap)) {
- undo_dev_pagemap(nr, nr_start, pages);
+ undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
SetPageReferenced(page);
pages[*nr] = page;
- get_page(page);
+ if (unlikely(!try_grab_page(page, flags))) {
+ undo_dev_pagemap(nr, nr_start, flags, pages);
+ return 0;
+ }
(*nr)++;
pfn++;
} while (addr += PAGE_SIZE, addr != end);
@@ -1979,48 +2289,52 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
}
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
+ if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
- undo_dev_pagemap(nr, nr_start, pages);
+ undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
return 1;
}
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
unsigned long fault_pfn;
int nr_start = *nr;
fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
+ if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
- undo_dev_pagemap(nr, nr_start, pages);
+ undo_dev_pagemap(nr, nr_start, flags, pages);
return 0;
}
return 1;
}
#else
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
}
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
BUILD_BUG();
return 0;
@@ -2038,18 +2352,6 @@ static int record_subpages(struct page *page, unsigned long addr,
return nr;
}
-static void put_compound_head(struct page *page, int refs)
-{
- VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
- /*
- * Calling put_page() for each ref is unnecessarily slow. Only the last
- * ref needs a put_page().
- */
- if (refs > 1)
- page_ref_sub(page, refs - 1);
- put_page(page);
-}
-
#ifdef CONFIG_ARCH_HAS_HUGEPD
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz)
@@ -2083,12 +2385,12 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
- head = try_get_compound_head(head, refs);
+ head = try_grab_compound_head(head, refs, flags);
if (!head)
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_compound_head(head, refs);
+ put_compound_head(head, refs, flags);
return 0;
}
@@ -2136,18 +2438,19 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
if (pmd_devmap(orig)) {
if (unlikely(flags & FOLL_LONGTERM))
return 0;
- return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
+ return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
+ pages, nr);
}
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
- head = try_get_compound_head(pmd_page(orig), refs);
+ head = try_grab_compound_head(pmd_page(orig), refs, flags);
if (!head)
return 0;
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
- put_compound_head(head, refs);
+ put_compound_head(head, refs, flags);
return 0;
}
@@ -2157,7 +2460,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
}
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
- unsigned long end, unsigned int flags, struct page **pages, int *nr)
+ unsigned long end, unsigned int flags,
+ struct page **pages, int *nr)
{
struct page *head, *page;
int refs;
@@ -2168,18 +2472,19 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
if (pud_devmap(orig)) {
if (unlikely(flags & FOLL_LONGTERM))
return 0;
- return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
+ return __gup_device_huge_pud(orig, pudp, addr, end, flags,
+ pages, nr);
}
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
- head = try_get_compound_head(pud_page(orig), refs);
+ head = try_grab_compound_head(pud_page(orig), refs, flags);
if (!head)
return 0;
if (unlikely(pud_val(orig) != pud_val(*pudp))) {
- put_compound_head(head, refs);
+ put_compound_head(head, refs, flags);
return 0;
}
@@ -2203,12 +2508,12 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
refs = record_subpages(page, addr, end, pages + *nr);
- head = try_get_compound_head(pgd_page(orig), refs);
+ head = try_grab_compound_head(pgd_page(orig), refs, flags);
if (!head)
return 0;
if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
- put_compound_head(head, refs);
+ put_compound_head(head, refs, flags);
return 0;
}
@@ -2370,7 +2675,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
{
unsigned long len, end;
unsigned long flags;
- int nr = 0;
+ int nr_pinned = 0;
+ /*
+ * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
+ * because gup fast is always a "pin with a +1 page refcount" request.
+ */
+ unsigned int gup_flags = FOLL_GET;
+
+ if (write)
+ gup_flags |= FOLL_WRITE;
start = untagged_addr(start) & PAGE_MASK;
len = (unsigned long) nr_pages << PAGE_SHIFT;
@@ -2396,11 +2709,11 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
gup_fast_permitted(start, end)) {
local_irq_save(flags);
- gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
+ gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
local_irq_restore(flags);
}
- return nr;
+ return nr_pinned;
}
EXPORT_SYMBOL_GPL(__get_user_pages_fast);
@@ -2432,10 +2745,10 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
struct page **pages)
{
unsigned long addr, len, end;
- int nr = 0, ret = 0;
+ int nr_pinned = 0, ret = 0;
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
- FOLL_FORCE | FOLL_PIN)))
+ FOLL_FORCE | FOLL_PIN | FOLL_GET)))
return -EINVAL;
start = untagged_addr(start) & PAGE_MASK;
@@ -2451,25 +2764,25 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
gup_fast_permitted(start, end)) {
local_irq_disable();
- gup_pgd_range(addr, end, gup_flags, pages, &nr);
+ gup_pgd_range(addr, end, gup_flags, pages, &nr_pinned);
local_irq_enable();
- ret = nr;
+ ret = nr_pinned;
}
- if (nr < nr_pages) {
+ if (nr_pinned < nr_pages) {
/* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
+ start += nr_pinned << PAGE_SHIFT;
+ pages += nr_pinned;
- ret = __gup_longterm_unlocked(start, nr_pages - nr,
+ ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned,
gup_flags, pages);
/* Have to be a bit careful with return values */
- if (nr > 0) {
+ if (nr_pinned > 0) {
if (ret < 0)
- ret = nr;
+ ret = nr_pinned;
else
- ret += nr;
+ ret += nr_pinned;
}
}
@@ -2478,11 +2791,11 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
/**
* get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @gup_flags: flags modifying pin behaviour
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @gup_flags: flags modifying pin behaviour
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
@@ -2502,6 +2815,13 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
return -EINVAL;
+ /*
+ * The caller may or may not have explicitly set FOLL_GET; either way is
+ * OK. However, internally (within mm/gup.c), gup fast variants must set
+ * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
+ * request.
+ */
+ gup_flags |= FOLL_GET;
return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
@@ -2509,9 +2829,18 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast);
/**
* pin_user_pages_fast() - pin user pages in memory without taking locks
*
- * For now, this is a placeholder function, until various call sites are
- * converted to use the correct get_user_pages*() or pin_user_pages*() API. So,
- * this is identical to get_user_pages_fast().
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @gup_flags: flags modifying pin behaviour
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
+ * get_user_pages_fast() for documentation on the function arguments, because
+ * the arguments here are identical.
+ *
+ * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
+ * see Documentation/vm/pin_user_pages.rst for further details.
*
* This is intended for Case 1 (DIO) in Documentation/vm/pin_user_pages.rst. It
* is NOT intended for Case 2 (RDMA: long-term pins).
@@ -2519,21 +2848,39 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast);
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
{
- /*
- * This is a placeholder, until the pin functionality is activated.
- * Until then, just behave like the corresponding get_user_pages*()
- * routine.
- */
- return get_user_pages_fast(start, nr_pages, gup_flags, pages);
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE(gup_flags & FOLL_GET))
+ return -EINVAL;
+
+ gup_flags |= FOLL_PIN;
+ return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
}
EXPORT_SYMBOL_GPL(pin_user_pages_fast);
/**
* pin_user_pages_remote() - pin pages of a remote process (task != current)
*
- * For now, this is a placeholder function, until various call sites are
- * converted to use the correct get_user_pages*() or pin_user_pages*() API. So,
- * this is identical to get_user_pages_remote().
+ * @tsk: the task_struct to use for page fault accounting, or
+ * NULL if faults are not to be recorded.
+ * @mm: mm_struct of target mm
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @gup_flags: flags modifying lookup behaviour
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long. Or NULL, if caller
+ * only intends to ensure the pages are faulted in.
+ * @vmas: array of pointers to vmas corresponding to each page.
+ * Or NULL if the caller does not require them.
+ * @locked: pointer to lock flag indicating whether lock is held and
+ * subsequently whether VM_FAULT_RETRY functionality can be
+ * utilised. Lock must initially be held.
+ *
+ * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
+ * get_user_pages_remote() for documentation on the function arguments, because
+ * the arguments here are identical.
+ *
+ * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
+ * see Documentation/vm/pin_user_pages.rst for details.
*
* This is intended for Case 1 (DIO) in Documentation/vm/pin_user_pages.rst. It
* is NOT intended for Case 2 (RDMA: long-term pins).
@@ -2543,22 +2890,33 @@ long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked)
{
- /*
- * This is a placeholder, until the pin functionality is activated.
- * Until then, just behave like the corresponding get_user_pages*()
- * routine.
- */
- return get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, pages,
- vmas, locked);
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE(gup_flags & FOLL_GET))
+ return -EINVAL;
+
+ gup_flags |= FOLL_PIN;
+ return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
+ pages, vmas, locked);
}
EXPORT_SYMBOL(pin_user_pages_remote);
/**
* pin_user_pages() - pin user pages in memory for use by other devices
*
- * For now, this is a placeholder function, until various call sites are
- * converted to use the correct get_user_pages*() or pin_user_pages*() API. So,
- * this is identical to get_user_pages().
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @gup_flags: flags modifying lookup behaviour
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long. Or NULL, if caller
+ * only intends to ensure the pages are faulted in.
+ * @vmas: array of pointers to vmas corresponding to each page.
+ * Or NULL if the caller does not require them.
+ *
+ * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
+ * FOLL_PIN is set.
+ *
+ * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
+ * see Documentation/vm/pin_user_pages.rst for details.
*
* This is intended for Case 1 (DIO) in Documentation/vm/pin_user_pages.rst. It
* is NOT intended for Case 2 (RDMA: long-term pins).
@@ -2567,11 +2925,12 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- /*
- * This is a placeholder, until the pin functionality is activated.
- * Until then, just behave like the corresponding get_user_pages*()
- * routine.
- */
- return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE(gup_flags & FOLL_GET))
+ return -EINVAL;
+
+ gup_flags |= FOLL_PIN;
+ return __gup_longterm_locked(current, current->mm, start, nr_pages,
+ pages, vmas, gup_flags);
}
EXPORT_SYMBOL(pin_user_pages);
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 8dba38e79a9f..be690fa66a46 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -8,6 +8,8 @@
#define GUP_FAST_BENCHMARK _IOWR('g', 1, struct gup_benchmark)
#define GUP_LONGTERM_BENCHMARK _IOWR('g', 2, struct gup_benchmark)
#define GUP_BENCHMARK _IOWR('g', 3, struct gup_benchmark)
+#define PIN_FAST_BENCHMARK _IOWR('g', 4, struct gup_benchmark)
+#define PIN_BENCHMARK _IOWR('g', 5, struct gup_benchmark)
struct gup_benchmark {
__u64 get_delta_usec;
@@ -19,6 +21,48 @@ struct gup_benchmark {
__u64 expansion[10]; /* For future use */
};
+static void put_back_pages(unsigned int cmd, struct page **pages,
+ unsigned long nr_pages)
+{
+ unsigned long i;
+
+ switch (cmd) {
+ case GUP_FAST_BENCHMARK:
+ case GUP_LONGTERM_BENCHMARK:
+ case GUP_BENCHMARK:
+ for (i = 0; i < nr_pages; i++)
+ put_page(pages[i]);
+ break;
+
+ case PIN_FAST_BENCHMARK:
+ case PIN_BENCHMARK:
+ unpin_user_pages(pages, nr_pages);
+ break;
+ }
+}
+
+static void verify_dma_pinned(unsigned int cmd, struct page **pages,
+ unsigned long nr_pages)
+{
+ unsigned long i;
+ struct page *page;
+
+ switch (cmd) {
+ case PIN_FAST_BENCHMARK:
+ case PIN_BENCHMARK:
+ for (i = 0; i < nr_pages; i++) {
+ page = pages[i];
+ if (WARN(!page_maybe_dma_pinned(page),
+ "pages[%lu] is NOT dma-pinned\n", i)) {
+
+ dump_page(page, "gup_benchmark failure");
+ break;
+ }
+ }
+ break;
+ }
+}
+
static int __gup_benchmark_ioctl(unsigned int cmd,
struct gup_benchmark *gup)
{
@@ -66,6 +110,14 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
nr = get_user_pages(addr, nr, gup->flags, pages + i,
NULL);
break;
+ case PIN_FAST_BENCHMARK:
+ nr = pin_user_pages_fast(addr, nr, gup->flags,
+ pages + i);
+ break;
+ case PIN_BENCHMARK:
+ nr = pin_user_pages(addr, nr, gup->flags, pages + i,
+ NULL);
+ break;
default:
kvfree(pages);
ret = -EINVAL;
@@ -78,15 +130,22 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
}
end_time = ktime_get();
+ /* Shifting the meaning of nr_pages: now it is actual number pinned: */
+ nr_pages = i;
+
gup->get_delta_usec = ktime_us_delta(end_time, start_time);
gup->size = addr - gup->addr;
+ /*
+ * Take an un-benchmark-timed moment to verify DMA pinned
+ * state: print a warning if any non-dma-pinned pages are found:
+ */
+ verify_dma_pinned(cmd, pages, nr_pages);
+
start_time = ktime_get();
- for (i = 0; i < nr_pages; i++) {
- if (!pages[i])
- break;
- put_page(pages[i]);
- }
+
+ put_back_pages(cmd, pages, nr_pages);
+
end_time = ktime_get();
gup->put_delta_usec = ktime_us_delta(end_time, start_time);
@@ -105,6 +164,8 @@ static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
case GUP_FAST_BENCHMARK:
case GUP_LONGTERM_BENCHMARK:
case GUP_BENCHMARK:
+ case PIN_FAST_BENCHMARK:
+ case PIN_BENCHMARK:
break;
default:
return -EINVAL;
diff --git a/mm/hmm.c b/mm/hmm.c
index 72e5a6d9a417..280585833adf 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -28,41 +28,25 @@
struct hmm_vma_walk {
struct hmm_range *range;
- struct dev_pagemap *pgmap;
unsigned long last;
- unsigned int flags;
};
-static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
- bool write_fault, uint64_t *pfn)
-{
- unsigned int flags = FAULT_FLAG_REMOTE;
- struct hmm_vma_walk *hmm_vma_walk = walk->private;
- struct hmm_range *range = hmm_vma_walk->range;
- struct vm_area_struct *vma = walk->vma;
- vm_fault_t ret;
-
- if (!vma)
- goto err;
-
- if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY)
- flags |= FAULT_FLAG_ALLOW_RETRY;
- if (write_fault)
- flags |= FAULT_FLAG_WRITE;
-
- ret = handle_mm_fault(vma, addr, flags);
- if (ret & VM_FAULT_RETRY) {
- /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */
- return -EAGAIN;
- }
- if (ret & VM_FAULT_ERROR)
- goto err;
-
- return -EBUSY;
+enum {
+ HMM_NEED_FAULT = 1 << 0,
+ HMM_NEED_WRITE_FAULT = 1 << 1,
+ HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
+};
-err:
- *pfn = range->values[HMM_PFN_ERROR];
- return -EFAULT;
+/*
+ * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
+ * @range: range use to encode HMM pfn value
+ * @pfn: pfn value for which to create the device entry
+ * Return: valid device entry for the pfn
+ */
+static uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
+ unsigned long pfn)
+{
+ return (pfn << range->pfn_shift) | range->flags[HMM_PFN_VALID];
}
static int hmm_pfns_fill(unsigned long addr, unsigned long end,
@@ -79,56 +63,43 @@ static int hmm_pfns_fill(unsigned long addr, unsigned long end,
}
/*
- * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
+ * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
* @addr: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
- * @fault: should we fault or not ?
- * @write_fault: write fault ?
+ * @required_fault: HMM_NEED_* flags
* @walk: mm_walk structure
- * Return: 0 on success, -EBUSY after page fault, or page fault error
+ * Return: -EBUSY after page fault, or page fault error
*
* This function will be called whenever pmd_none() or pte_none() returns true,
* or whenever there is no page directory covering the virtual address range.
*/
-static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
- bool fault, bool write_fault,
- struct mm_walk *walk)
+static int hmm_vma_fault(unsigned long addr, unsigned long end,
+ unsigned int required_fault, struct mm_walk *walk)
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
- struct hmm_range *range = hmm_vma_walk->range;
- uint64_t *pfns = range->pfns;
- unsigned long i;
+ struct vm_area_struct *vma = walk->vma;
+ unsigned int fault_flags = FAULT_FLAG_REMOTE;
+ WARN_ON_ONCE(!required_fault);
hmm_vma_walk->last = addr;
- i = (addr - range->start) >> PAGE_SHIFT;
-
- if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
- return -EPERM;
-
- for (; addr < end; addr += PAGE_SIZE, i++) {
- pfns[i] = range->values[HMM_PFN_NONE];
- if (fault || write_fault) {
- int ret;
- ret = hmm_vma_do_fault(walk, addr, write_fault,
- &pfns[i]);
- if (ret != -EBUSY)
- return ret;
- }
+ if (required_fault & HMM_NEED_WRITE_FAULT) {
+ if (!(vma->vm_flags & VM_WRITE))
+ return -EPERM;
+ fault_flags |= FAULT_FLAG_WRITE;
}
- return (fault || write_fault) ? -EBUSY : 0;
+ for (; addr < end; addr += PAGE_SIZE)
+ if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR)
+ return -EFAULT;
+ return -EBUSY;
}
-static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
- uint64_t pfns, uint64_t cpu_flags,
- bool *fault, bool *write_fault)
+static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
+ uint64_t pfns, uint64_t cpu_flags)
{
struct hmm_range *range = hmm_vma_walk->range;
- if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
- return;
-
/*
* So we not only consider the individual per page request we also
* consider the default flags requested for the range. The API can
@@ -143,46 +114,44 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
/* We aren't ask to do anything ... */
if (!(pfns & range->flags[HMM_PFN_VALID]))
- return;
- /* If this is device memory then only fault if explicitly requested */
- if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
- /* Do we fault on device memory ? */
- if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
- *write_fault = pfns & range->flags[HMM_PFN_WRITE];
- *fault = true;
- }
- return;
- }
+ return 0;
- /* If CPU page table is not valid then we need to fault */
- *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
/* Need to write fault ? */
if ((pfns & range->flags[HMM_PFN_WRITE]) &&
- !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
- *write_fault = true;
- *fault = true;
- }
+ !(cpu_flags & range->flags[HMM_PFN_WRITE]))
+ return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
+
+ /* If CPU page table is not valid then we need to fault */
+ if (!(cpu_flags & range->flags[HMM_PFN_VALID]))
+ return HMM_NEED_FAULT;
+ return 0;
}
-static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
- const uint64_t *pfns, unsigned long npages,
- uint64_t cpu_flags, bool *fault,
- bool *write_fault)
+static unsigned int
+hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
+ const uint64_t *pfns, unsigned long npages,
+ uint64_t cpu_flags)
{
+ struct hmm_range *range = hmm_vma_walk->range;
+ unsigned int required_fault = 0;
unsigned long i;
- if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
- *fault = *write_fault = false;
- return;
- }
+ /*
+ * If the default flags do not request to fault pages, and the mask does
+ * not allow for individual pages to be faulted, then
+ * hmm_pte_need_fault() will always return 0.
+ */
+ if (!((range->default_flags | range->pfn_flags_mask) &
+ range->flags[HMM_PFN_VALID]))
+ return 0;
- *fault = *write_fault = false;
for (i = 0; i < npages; ++i) {
- hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
- fault, write_fault);
- if ((*write_fault))
- return;
+ required_fault |=
+ hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags);
+ if (required_fault == HMM_NEED_ALL_BITS)
+ return required_fault;
}
+ return required_fault;
}
static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
@@ -190,16 +159,23 @@ static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
- bool fault, write_fault;
+ unsigned int required_fault;
unsigned long i, npages;
uint64_t *pfns;
i = (addr - range->start) >> PAGE_SHIFT;
npages = (end - addr) >> PAGE_SHIFT;
pfns = &range->pfns[i];
- hmm_range_need_fault(hmm_vma_walk, pfns, npages,
- 0, &fault, &write_fault);
- return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+ required_fault = hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0);
+ if (!walk->vma) {
+ if (required_fault)
+ return -EFAULT;
+ return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
+ }
+ if (required_fault)
+ return hmm_vma_fault(addr, end, required_fault, walk);
+ hmm_vma_walk->last = addr;
+ return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
}
static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
@@ -218,31 +194,19 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
unsigned long pfn, npages, i;
- bool fault, write_fault;
+ unsigned int required_fault;
uint64_t cpu_flags;
npages = (end - addr) >> PAGE_SHIFT;
cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
- hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
- &fault, &write_fault);
-
- if (pmd_protnone(pmd) || fault || write_fault)
- return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+ required_fault =
+ hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags);
+ if (required_fault)
+ return hmm_vma_fault(addr, end, required_fault, walk);
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
- if (pmd_devmap(pmd)) {
- hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
- hmm_vma_walk->pgmap);
- if (unlikely(!hmm_vma_walk->pgmap))
- return -EBUSY;
- }
+ for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
- }
- if (hmm_vma_walk->pgmap) {
- put_dev_pagemap(hmm_vma_walk->pgmap);
- hmm_vma_walk->pgmap = NULL;
- }
hmm_vma_walk->last = end;
return 0;
}
@@ -252,6 +216,14 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
unsigned long end, uint64_t *pfns, pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool hmm_is_device_private_entry(struct hmm_range *range,
+ swp_entry_t entry)
+{
+ return is_device_private_entry(entry) &&
+ device_private_entry_to_page(entry)->pgmap->owner ==
+ range->dev_private_owner;
+}
+
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
{
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
@@ -267,102 +239,81 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
- bool fault, write_fault;
+ unsigned int required_fault;
uint64_t cpu_flags;
pte_t pte = *ptep;
uint64_t orig_pfn = *pfn;
- *pfn = range->values[HMM_PFN_NONE];
- fault = write_fault = false;
-
if (pte_none(pte)) {
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
- &fault, &write_fault);
- if (fault || write_fault)
+ required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
+ if (required_fault)
goto fault;
+ *pfn = range->values[HMM_PFN_NONE];
return 0;
}
if (!pte_present(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
- if (!non_swap_entry(entry)) {
- cpu_flags = pte_to_hmm_pfn_flags(range, pte);
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
- &fault, &write_fault);
- if (fault || write_fault)
- goto fault;
- return 0;
- }
-
/*
- * This is a special swap entry, ignore migration, use
- * device and report anything else as error.
+ * Never fault in device private pages pages, but just report
+ * the PFN even if not present.
*/
- if (is_device_private_entry(entry)) {
- cpu_flags = range->flags[HMM_PFN_VALID] |
- range->flags[HMM_PFN_DEVICE_PRIVATE];
- cpu_flags |= is_write_device_private_entry(entry) ?
- range->flags[HMM_PFN_WRITE] : 0;
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
- &fault, &write_fault);
- if (fault || write_fault)
- goto fault;
+ if (hmm_is_device_private_entry(range, entry)) {
*pfn = hmm_device_entry_from_pfn(range,
- swp_offset(entry));
- *pfn |= cpu_flags;
+ device_private_entry_to_pfn(entry));
+ *pfn |= range->flags[HMM_PFN_VALID];
+ if (is_write_device_private_entry(entry))
+ *pfn |= range->flags[HMM_PFN_WRITE];
return 0;
}
- if (is_migration_entry(entry)) {
- if (fault || write_fault) {
- pte_unmap(ptep);
- hmm_vma_walk->last = addr;
- migration_entry_wait(walk->mm, pmdp, addr);
- return -EBUSY;
- }
+ required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
+ if (!required_fault) {
+ *pfn = range->values[HMM_PFN_NONE];
return 0;
}
+ if (!non_swap_entry(entry))
+ goto fault;
+
+ if (is_migration_entry(entry)) {
+ pte_unmap(ptep);
+ hmm_vma_walk->last = addr;
+ migration_entry_wait(walk->mm, pmdp, addr);
+ return -EBUSY;
+ }
+
/* Report error for everything else */
- *pfn = range->values[HMM_PFN_ERROR];
+ pte_unmap(ptep);
return -EFAULT;
- } else {
- cpu_flags = pte_to_hmm_pfn_flags(range, pte);
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
- &fault, &write_fault);
}
- if (fault || write_fault)
+ cpu_flags = pte_to_hmm_pfn_flags(range, pte);
+ required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags);
+ if (required_fault)
goto fault;
- if (pte_devmap(pte)) {
- hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
- hmm_vma_walk->pgmap);
- if (unlikely(!hmm_vma_walk->pgmap))
- return -EBUSY;
- } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
- if (!is_zero_pfn(pte_pfn(pte))) {
- *pfn = range->values[HMM_PFN_SPECIAL];
+ /*
+ * Since each architecture defines a struct page for the zero page, just
+ * fall through and treat it like a normal page.
+ */
+ if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
+ if (hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0)) {
+ pte_unmap(ptep);
return -EFAULT;
}
- /*
- * Since each architecture defines a struct page for the zero
- * page, just fall through and treat it like a normal page.
- */
+ *pfn = range->values[HMM_PFN_SPECIAL];
+ return 0;
}
*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
return 0;
fault:
- if (hmm_vma_walk->pgmap) {
- put_dev_pagemap(hmm_vma_walk->pgmap);
- hmm_vma_walk->pgmap = NULL;
- }
pte_unmap(ptep);
/* Fault any virtual address we were asked to fault */
- return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+ return hmm_vma_fault(addr, end, required_fault, walk);
}
static int hmm_vma_walk_pmd(pmd_t *pmdp,
@@ -372,8 +323,9 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
- uint64_t *pfns = range->pfns;
- unsigned long addr = start, i;
+ uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
+ unsigned long npages = (end - start) >> PAGE_SHIFT;
+ unsigned long addr = start;
pte_t *ptep;
pmd_t pmd;
@@ -383,24 +335,19 @@ again:
return hmm_vma_walk_hole(start, end, -1, walk);
if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
- bool fault, write_fault;
- unsigned long npages;
- uint64_t *pfns;
-
- i = (addr - range->start) >> PAGE_SHIFT;
- npages = (end - addr) >> PAGE_SHIFT;
- pfns = &range->pfns[i];
-
- hmm_range_need_fault(hmm_vma_walk, pfns, npages,
- 0, &fault, &write_fault);
- if (fault || write_fault) {
+ if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) {
hmm_vma_walk->last = addr;
pmd_migration_entry_wait(walk->mm, pmdp);
return -EBUSY;
}
- return 0;
- } else if (!pmd_present(pmd))
+ return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
+ }
+
+ if (!pmd_present(pmd)) {
+ if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
+ return -EFAULT;
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+ }
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
@@ -417,8 +364,7 @@ again:
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
goto again;
- i = (addr - range->start) >> PAGE_SHIFT;
- return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
+ return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
}
/*
@@ -427,31 +373,23 @@ again:
* entry pointing to pte directory or it is a bad pmd that will not
* recover.
*/
- if (pmd_bad(pmd))
+ if (pmd_bad(pmd)) {
+ if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
+ return -EFAULT;
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+ }
ptep = pte_offset_map(pmdp, addr);
- i = (addr - range->start) >> PAGE_SHIFT;
- for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
+ for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
int r;
- r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
+ r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
if (r) {
- /* hmm_vma_handle_pte() did unmap pte directory */
+ /* hmm_vma_handle_pte() did pte_unmap() */
hmm_vma_walk->last = addr;
return r;
}
}
- if (hmm_vma_walk->pgmap) {
- /*
- * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
- * so that we can leverage get_dev_pagemap() optimization which
- * will not re-take a reference on a pgmap if we already have
- * one.
- */
- put_dev_pagemap(hmm_vma_walk->pgmap);
- hmm_vma_walk->pgmap = NULL;
- }
pte_unmap(ptep - 1);
hmm_vma_walk->last = addr;
@@ -487,18 +425,18 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
pud = READ_ONCE(*pudp);
if (pud_none(pud)) {
- ret = hmm_vma_walk_hole(start, end, -1, walk);
- goto out_unlock;
+ spin_unlock(ptl);
+ return hmm_vma_walk_hole(start, end, -1, walk);
}
if (pud_huge(pud) && pud_devmap(pud)) {
unsigned long i, npages, pfn;
+ unsigned int required_fault;
uint64_t *pfns, cpu_flags;
- bool fault, write_fault;
if (!pud_present(pud)) {
- ret = hmm_vma_walk_hole(start, end, -1, walk);
- goto out_unlock;
+ spin_unlock(ptl);
+ return hmm_vma_walk_hole(start, end, -1, walk);
}
i = (addr - range->start) >> PAGE_SHIFT;
@@ -506,29 +444,17 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
pfns = &range->pfns[i];
cpu_flags = pud_to_hmm_pfn_flags(range, pud);
- hmm_range_need_fault(hmm_vma_walk, pfns, npages,
- cpu_flags, &fault, &write_fault);
- if (fault || write_fault) {
- ret = hmm_vma_walk_hole_(addr, end, fault,
- write_fault, walk);
- goto out_unlock;
+ required_fault = hmm_range_need_fault(hmm_vma_walk, pfns,
+ npages, cpu_flags);
+ if (required_fault) {
+ spin_unlock(ptl);
+ return hmm_vma_fault(addr, end, required_fault, walk);
}
pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- for (i = 0; i < npages; ++i, ++pfn) {
- hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
- hmm_vma_walk->pgmap);
- if (unlikely(!hmm_vma_walk->pgmap)) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ for (i = 0; i < npages; ++i, ++pfn)
pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
cpu_flags;
- }
- if (hmm_vma_walk->pgmap) {
- put_dev_pagemap(hmm_vma_walk->pgmap);
- hmm_vma_walk->pgmap = NULL;
- }
hmm_vma_walk->last = end;
goto out_unlock;
}
@@ -554,24 +480,20 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
uint64_t orig_pfn, cpu_flags;
- bool fault, write_fault;
+ unsigned int required_fault;
spinlock_t *ptl;
pte_t entry;
- int ret = 0;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte);
i = (start - range->start) >> PAGE_SHIFT;
orig_pfn = range->pfns[i];
- range->pfns[i] = range->values[HMM_PFN_NONE];
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
- fault = write_fault = false;
- hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
- &fault, &write_fault);
- if (fault || write_fault) {
- ret = -ENOENT;
- goto unlock;
+ required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags);
+ if (required_fault) {
+ spin_unlock(ptl);
+ return hmm_vma_fault(addr, end, required_fault, walk);
}
pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
@@ -579,14 +501,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
cpu_flags;
hmm_vma_walk->last = end;
-
-unlock:
spin_unlock(ptl);
-
- if (ret == -ENOENT)
- return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
-
- return ret;
+ return 0;
}
#else
#define hmm_vma_walk_hugetlb_entry NULL
@@ -599,40 +515,32 @@ static int hmm_vma_walk_test(unsigned long start, unsigned long end,
struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
- /*
- * Skip vma ranges that don't have struct page backing them or
- * map I/O devices directly.
- */
- if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
- return -EFAULT;
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
+ vma->vm_flags & VM_READ)
+ return 0;
/*
+ * vma ranges that don't have struct page backing them or map I/O
+ * devices directly cannot be handled by hmm_range_fault().
+ *
* If the vma does not allow read access, then assume that it does not
- * allow write access either. HMM does not support architectures
- * that allow write without read.
+ * allow write access either. HMM does not support architectures that
+ * allow write without read.
+ *
+ * If a fault is requested for an unsupported range then it is a hard
+ * failure.
*/
- if (!(vma->vm_flags & VM_READ)) {
- bool fault, write_fault;
-
- /*
- * Check to see if a fault is requested for any page in the
- * range.
- */
- hmm_range_need_fault(hmm_vma_walk, range->pfns +
- ((start - range->start) >> PAGE_SHIFT),
- (end - start) >> PAGE_SHIFT,
- 0, &fault, &write_fault);
- if (fault || write_fault)
- return -EFAULT;
-
- hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
- hmm_vma_walk->last = end;
+ if (hmm_range_need_fault(hmm_vma_walk,
+ range->pfns +
+ ((start - range->start) >> PAGE_SHIFT),
+ (end - start) >> PAGE_SHIFT, 0))
+ return -EFAULT;
- /* Skip this vma and continue processing the next vma. */
- return 1;
- }
+ hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+ hmm_vma_walk->last = end;
- return 0;
+ /* Skip this vma and continue processing the next vma. */
+ return 1;
}
static const struct mm_walk_ops hmm_walk_ops = {
@@ -645,8 +553,7 @@ static const struct mm_walk_ops hmm_walk_ops = {
/**
* hmm_range_fault - try to fault some address in a virtual address range
- * @range: range being faulted
- * @flags: HMM_FAULT_* flags
+ * @range: argument structure
*
* Return: the number of valid pages in range->pfns[] (from range start
* address), which may be zero. On error one of the following status codes
@@ -657,26 +564,19 @@ static const struct mm_walk_ops hmm_walk_ops = {
* -ENOMEM: Out of memory.
* -EPERM: Invalid permission (e.g., asking for write and range is read
* only).
- * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped.
* -EBUSY: The range has been invalidated and the caller needs to wait for
* the invalidation to finish.
- * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access
- * that range) number of valid pages in range->pfns[] (from
- * range start address).
- *
- * This is similar to a regular CPU page fault except that it will not trigger
- * any memory migration if the memory being faulted is not accessible by CPUs
- * and caller does not ask for migration.
+ * -EFAULT: A page was requested to be valid and could not be made valid
+ * ie it has no backing VMA or it is illegal to access
*
- * On error, for one virtual address in the range, the function will mark the
- * corresponding HMM pfn entry with an error flag.
+ * This is similar to get_user_pages(), except that it can read the page tables
+ * without mutating them (ie causing faults).
*/
-long hmm_range_fault(struct hmm_range *range, unsigned int flags)
+long hmm_range_fault(struct hmm_range *range)
{
struct hmm_vma_walk hmm_vma_walk = {
.range = range,
.last = range->start,
- .flags = flags,
};
struct mm_struct *mm = range->notifier->mm;
int ret;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 24ad53b4dfc0..6ecd1045113b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -326,7 +326,7 @@ static struct attribute *hugepage_attr[] = {
&defrag_attr.attr,
&use_zero_page_attr.attr,
&hpage_pmd_size_attr.attr,
-#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
+#ifdef CONFIG_SHMEM
&shmem_enabled_attr.attr,
#endif
#ifdef CONFIG_DEBUG_VM
@@ -597,6 +597,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
put_page(page);
count_vm_event(THP_FAULT_FALLBACK);
+ count_vm_event(THP_FAULT_FALLBACK_CHARGE);
return VM_FAULT_FALLBACK;
}
@@ -824,11 +825,24 @@ out_unlock:
pte_free(mm, pgtable);
}
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
+/**
+ * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
+ * @vmf: Structure describing the fault
+ * @pfn: pfn to insert
+ * @pgprot: page protection to use
+ * @write: whether it's a write fault
+ *
+ * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
+ * also consult the vmf_insert_mixed_prot() documentation when
+ * @pgprot != @vmf->vma->vm_page_prot.
+ *
+ * Return: vm_fault_t value.
+ */
+vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
+ pgprot_t pgprot, bool write)
{
unsigned long addr = vmf->address & PMD_MASK;
struct vm_area_struct *vma = vmf->vma;
- pgprot_t pgprot = vma->vm_page_prot;
pgtable_t pgtable = NULL;
/*
@@ -856,7 +870,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
return VM_FAULT_NOPAGE;
}
-EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
@@ -902,11 +916,24 @@ out_unlock:
spin_unlock(ptl);
}
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
+/**
+ * vmf_insert_pfn_pud_prot - insert a pud size pfn
+ * @vmf: Structure describing the fault
+ * @pfn: pfn to insert
+ * @pgprot: page protection to use
+ * @write: whether it's a write fault
+ *
+ * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
+ * also consult the vmf_insert_mixed_prot() documentation when
+ * @pgprot != @vmf->vma->vm_page_prot.
+ *
+ * Return: vm_fault_t value.
+ */
+vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
+ pgprot_t pgprot, bool write)
{
unsigned long addr = vmf->address & PUD_MASK;
struct vm_area_struct *vma = vmf->vma;
- pgprot_t pgprot = vma->vm_page_prot;
/*
* If we had pud_special, we could avoid all these restrictions,
@@ -927,7 +954,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
return VM_FAULT_NOPAGE;
}
-EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -958,6 +985,11 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
*/
WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+ (FOLL_PIN | FOLL_GET)))
+ return NULL;
+
if (flags & FOLL_WRITE && !pmd_write(*pmd))
return NULL;
@@ -973,7 +1005,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
* device mapped pages can only be returned if the
* caller will manage the page reference count.
*/
- if (!(flags & FOLL_GET))
+ if (!(flags & (FOLL_GET | FOLL_PIN)))
return ERR_PTR(-EEXIST);
pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
@@ -981,7 +1013,8 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
if (!*pgmap)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
- get_page(page);
+ if (!try_grab_page(page, flags))
+ page = ERR_PTR(-ENOMEM);
return page;
}
@@ -1011,6 +1044,14 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = -EAGAIN;
pmd = *src_pmd;
+ /*
+ * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
+ * does not have the VM_UFFD_WP, which means that the uffd
+ * fork event is not enabled.
+ */
+ if (!(vma->vm_flags & VM_UFFD_WP))
+ pmd = pmd_clear_uffd_wp(pmd);
+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
if (unlikely(is_swap_pmd(pmd))) {
swp_entry_t entry = pmd_to_swp_entry(pmd);
@@ -1101,6 +1142,11 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
if (flags & FOLL_WRITE && !pud_write(*pud))
return NULL;
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+ (FOLL_PIN | FOLL_GET)))
+ return NULL;
+
if (pud_present(*pud) && pud_devmap(*pud))
/* pass */;
else
@@ -1112,8 +1158,10 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
/*
* device mapped pages can only be returned if the
* caller will manage the page reference count.
+ *
+ * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
*/
- if (!(flags & FOLL_GET))
+ if (!(flags & (FOLL_GET | FOLL_PIN)))
return ERR_PTR(-EEXIST);
pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
@@ -1121,7 +1169,8 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
if (!*pgmap)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
- get_page(page);
+ if (!try_grab_page(page, flags))
+ page = ERR_PTR(-ENOMEM);
return page;
}
@@ -1406,6 +1455,7 @@ alloc:
put_page(page);
ret |= VM_FAULT_FALLBACK;
count_vm_event(THP_FAULT_FALLBACK);
+ count_vm_event(THP_FAULT_FALLBACK_CHARGE);
goto out;
}
@@ -1497,8 +1547,13 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
page = pmd_page(*pmd);
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+
+ if (!try_grab_page(page, flags))
+ return ERR_PTR(-ENOMEM);
+
if (flags & FOLL_TOUCH)
touch_pmd(vma, addr, pmd, flags);
+
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/*
* We don't mlock() pte-mapped THPs. This way we can avoid
@@ -1535,8 +1590,6 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
skip_mlock:
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
- if (flags & FOLL_GET)
- get_page(page);
out:
return page;
@@ -1802,7 +1855,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
tlb->fullmm);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
- if (vma_is_dax(vma)) {
+ if (vma_is_special_huge(vma)) {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
@@ -1934,13 +1987,16 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
* - HPAGE_PMD_NR is protections changed and TLB flush necessary
*/
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot, int prot_numa)
+ unsigned long addr, pgprot_t newprot, unsigned long cp_flags)
{
struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl;
pmd_t entry;
bool preserve_write;
int ret;
+ bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
+ bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
+ bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
@@ -2007,6 +2063,17 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
entry = pmd_modify(entry, newprot);
if (preserve_write)
entry = pmd_mk_savedwrite(entry);
+ if (uffd_wp) {
+ entry = pmd_wrprotect(entry);
+ entry = pmd_mkuffd_wp(entry);
+ } else if (uffd_wp_resolve) {
+ /*
+ * Leave the write bit to be handled by PF interrupt
+ * handler, then things like COW could be properly
+ * handled.
+ */
+ entry = pmd_clear_uffd_wp(entry);
+ }
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
@@ -2066,7 +2133,7 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
*/
pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
- if (vma_is_dax(vma)) {
+ if (vma_is_special_huge(vma)) {
spin_unlock(ptl);
/* No zero page support yet */
} else {
@@ -2155,7 +2222,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
struct page *page;
pgtable_t pgtable;
pmd_t old_pmd, _pmd;
- bool young, write, soft_dirty, pmd_migration = false;
+ bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
unsigned long addr;
int i;
@@ -2175,7 +2242,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
*/
if (arch_needs_pgtable_deposit())
zap_deposited_table(mm, pmd);
- if (vma_is_dax(vma))
+ if (vma_is_special_huge(vma))
return;
page = pmd_page(_pmd);
if (!PageDirty(page) && pmd_dirty(_pmd))
@@ -2230,6 +2297,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
write = is_write_migration_entry(entry);
young = false;
soft_dirty = pmd_swp_soft_dirty(old_pmd);
+ uffd_wp = pmd_swp_uffd_wp(old_pmd);
} else {
page = pmd_page(old_pmd);
if (pmd_dirty(old_pmd))
@@ -2237,6 +2305,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
write = pmd_write(old_pmd);
young = pmd_young(old_pmd);
soft_dirty = pmd_soft_dirty(old_pmd);
+ uffd_wp = pmd_uffd_wp(old_pmd);
}
VM_BUG_ON_PAGE(!page_count(page), page);
page_ref_add(page, HPAGE_PMD_NR - 1);
@@ -2261,6 +2330,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = swp_entry_to_pte(swp_entry);
if (soft_dirty)
entry = pte_swp_mksoft_dirty(entry);
+ if (uffd_wp)
+ entry = pte_swp_mkuffd_wp(entry);
} else {
entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
entry = maybe_mkwrite(entry, vma);
@@ -2270,6 +2341,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = pte_mkold(entry);
if (soft_dirty)
entry = pte_mksoft_dirty(entry);
+ if (uffd_wp)
+ entry = pte_mkuffd_wp(entry);
}
pte = pte_offset_map(&_pmd, addr);
BUG_ON(!pte_none(*pte));
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dd8737a94bec..f5fb53fdfa02 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -220,132 +220,303 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
return subpool_inode(file_inode(vma->vm_file));
}
-/*
- * Region tracking -- allows tracking of reservations and instantiated pages
- * across the pages in a mapping.
- *
- * The region data structures are embedded into a resv_map and protected
- * by a resv_map's lock. The set of regions within the resv_map represent
- * reservations for huge pages, or huge pages that have already been
- * instantiated within the map. The from and to elements are huge page
- * indicies into the associated mapping. from indicates the starting index
- * of the region. to represents the first index past the end of the region.
- *
- * For example, a file region structure with from == 0 and to == 4 represents
- * four huge pages in a mapping. It is important to note that the to element
- * represents the first element past the end of the region. This is used in
- * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
- *
- * Interval notation of the form [from, to) will be used to indicate that
- * the endpoint from is inclusive and to is exclusive.
+/* Helper that removes a struct file_region from the resv_map cache and returns
+ * it for use.
*/
-struct file_region {
- struct list_head link;
- long from;
- long to;
-};
+static struct file_region *
+get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
+{
+ struct file_region *nrg = NULL;
+
+ VM_BUG_ON(resv->region_cache_count <= 0);
+
+ resv->region_cache_count--;
+ nrg = list_first_entry(&resv->region_cache, struct file_region, link);
+ VM_BUG_ON(!nrg);
+ list_del(&nrg->link);
+
+ nrg->from = from;
+ nrg->to = to;
+
+ return nrg;
+}
+
+static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
+ struct file_region *rg)
+{
+#ifdef CONFIG_CGROUP_HUGETLB
+ nrg->reservation_counter = rg->reservation_counter;
+ nrg->css = rg->css;
+ if (rg->css)
+ css_get(rg->css);
+#endif
+}
+
+/* Helper that records hugetlb_cgroup uncharge info. */
+static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
+ struct hstate *h,
+ struct resv_map *resv,
+ struct file_region *nrg)
+{
+#ifdef CONFIG_CGROUP_HUGETLB
+ if (h_cg) {
+ nrg->reservation_counter =
+ &h_cg->rsvd_hugepage[hstate_index(h)];
+ nrg->css = &h_cg->css;
+ if (!resv->pages_per_hpage)
+ resv->pages_per_hpage = pages_per_huge_page(h);
+ /* pages_per_hpage should be the same for all entries in
+ * a resv_map.
+ */
+ VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
+ } else {
+ nrg->reservation_counter = NULL;
+ nrg->css = NULL;
+ }
+#endif
+}
+
+static bool has_same_uncharge_info(struct file_region *rg,
+ struct file_region *org)
+{
+#ifdef CONFIG_CGROUP_HUGETLB
+ return rg && org &&
+ rg->reservation_counter == org->reservation_counter &&
+ rg->css == org->css;
+
+#else
+ return true;
+#endif
+}
+
+static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
+{
+ struct file_region *nrg = NULL, *prg = NULL;
+
+ prg = list_prev_entry(rg, link);
+ if (&prg->link != &resv->regions && prg->to == rg->from &&
+ has_same_uncharge_info(prg, rg)) {
+ prg->to = rg->to;
+
+ list_del(&rg->link);
+ kfree(rg);
+
+ coalesce_file_region(resv, prg);
+ return;
+ }
+
+ nrg = list_next_entry(rg, link);
+ if (&nrg->link != &resv->regions && nrg->from == rg->to &&
+ has_same_uncharge_info(nrg, rg)) {
+ nrg->from = rg->from;
+
+ list_del(&rg->link);
+ kfree(rg);
+
+ coalesce_file_region(resv, nrg);
+ return;
+ }
+}
/* Must be called with resv->lock held. Calling this with count_only == true
* will count the number of pages to be added but will not modify the linked
- * list.
+ * list. If regions_needed != NULL and count_only == true, then regions_needed
+ * will indicate the number of file_regions needed in the cache to carry out to
+ * add the regions for this range.
*/
static long add_reservation_in_range(struct resv_map *resv, long f, long t,
+ struct hugetlb_cgroup *h_cg,
+ struct hstate *h, long *regions_needed,
bool count_only)
{
- long chg = 0;
+ long add = 0;
struct list_head *head = &resv->regions;
+ long last_accounted_offset = f;
struct file_region *rg = NULL, *trg = NULL, *nrg = NULL;
- /* Locate the region we are before or in. */
- list_for_each_entry(rg, head, link)
- if (f <= rg->to)
- break;
-
- /* Round our left edge to the current segment if it encloses us. */
- if (f > rg->from)
- f = rg->from;
+ if (regions_needed)
+ *regions_needed = 0;
- chg = t - f;
+ /* In this loop, we essentially handle an entry for the range
+ * [last_accounted_offset, rg->from), at every iteration, with some
+ * bounds checking.
+ */
+ list_for_each_entry_safe(rg, trg, head, link) {
+ /* Skip irrelevant regions that start before our range. */
+ if (rg->from < f) {
+ /* If this region ends after the last accounted offset,
+ * then we need to update last_accounted_offset.
+ */
+ if (rg->to > last_accounted_offset)
+ last_accounted_offset = rg->to;
+ continue;
+ }
- /* Check for and consume any regions we now overlap with. */
- nrg = rg;
- list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
- if (&rg->link == head)
- break;
+ /* When we find a region that starts beyond our range, we've
+ * finished.
+ */
if (rg->from > t)
break;
- /* We overlap with this area, if it extends further than
- * us then we must extend ourselves. Account for its
- * existing reservation.
+ /* Add an entry for last_accounted_offset -> rg->from, and
+ * update last_accounted_offset.
+ */
+ if (rg->from > last_accounted_offset) {
+ add += rg->from - last_accounted_offset;
+ if (!count_only) {
+ nrg = get_file_region_entry_from_cache(
+ resv, last_accounted_offset, rg->from);
+ record_hugetlb_cgroup_uncharge_info(h_cg, h,
+ resv, nrg);
+ list_add(&nrg->link, rg->link.prev);
+ coalesce_file_region(resv, nrg);
+ } else if (regions_needed)
+ *regions_needed += 1;
+ }
+
+ last_accounted_offset = rg->to;
+ }
+
+ /* Handle the case where our range extends beyond
+ * last_accounted_offset.
+ */
+ if (last_accounted_offset < t) {
+ add += t - last_accounted_offset;
+ if (!count_only) {
+ nrg = get_file_region_entry_from_cache(
+ resv, last_accounted_offset, t);
+ record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
+ list_add(&nrg->link, rg->link.prev);
+ coalesce_file_region(resv, nrg);
+ } else if (regions_needed)
+ *regions_needed += 1;
+ }
+
+ VM_BUG_ON(add < 0);
+ return add;
+}
+
+/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
+ */
+static int allocate_file_region_entries(struct resv_map *resv,
+ int regions_needed)
+ __must_hold(&resv->lock)
+{
+ struct list_head allocated_regions;
+ int to_allocate = 0, i = 0;
+ struct file_region *trg = NULL, *rg = NULL;
+
+ VM_BUG_ON(regions_needed < 0);
+
+ INIT_LIST_HEAD(&allocated_regions);
+
+ /*
+ * Check for sufficient descriptors in the cache to accommodate
+ * the number of in progress add operations plus regions_needed.
+ *
+ * This is a while loop because when we drop the lock, some other call
+ * to region_add or region_del may have consumed some region_entries,
+ * so we keep looping here until we finally have enough entries for
+ * (adds_in_progress + regions_needed).
+ */
+ while (resv->region_cache_count <
+ (resv->adds_in_progress + regions_needed)) {
+ to_allocate = resv->adds_in_progress + regions_needed -
+ resv->region_cache_count;
+
+ /* At this point, we should have enough entries in the cache
+ * for all the existings adds_in_progress. We should only be
+ * needing to allocate for regions_needed.
*/
- if (rg->to > t) {
- chg += rg->to - t;
- t = rg->to;
+ VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
+
+ spin_unlock(&resv->lock);
+ for (i = 0; i < to_allocate; i++) {
+ trg = kmalloc(sizeof(*trg), GFP_KERNEL);
+ if (!trg)
+ goto out_of_memory;
+ list_add(&trg->link, &allocated_regions);
}
- chg -= rg->to - rg->from;
- if (!count_only && rg != nrg) {
+ spin_lock(&resv->lock);
+
+ list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
list_del(&rg->link);
- kfree(rg);
+ list_add(&rg->link, &resv->region_cache);
+ resv->region_cache_count++;
}
}
- if (!count_only) {
- nrg->from = f;
- nrg->to = t;
- }
+ return 0;
- return chg;
+out_of_memory:
+ list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
+ list_del(&rg->link);
+ kfree(rg);
+ }
+ return -ENOMEM;
}
/*
* Add the huge page range represented by [f, t) to the reserve
- * map. Existing regions will be expanded to accommodate the specified
- * range, or a region will be taken from the cache. Sufficient regions
- * must exist in the cache due to the previous call to region_chg with
- * the same range.
+ * map. Regions will be taken from the cache to fill in this range.
+ * Sufficient regions should exist in the cache due to the previous
+ * call to region_chg with the same range, but in some cases the cache will not
+ * have sufficient entries due to races with other code doing region_add or
+ * region_del. The extra needed entries will be allocated.
*
- * Return the number of new huge pages added to the map. This
- * number is greater than or equal to zero.
+ * regions_needed is the out value provided by a previous call to region_chg.
+ *
+ * Return the number of new huge pages added to the map. This number is greater
+ * than or equal to zero. If file_region entries needed to be allocated for
+ * this operation and we were not able to allocate, it ruturns -ENOMEM.
+ * region_add of regions of length 1 never allocate file_regions and cannot
+ * fail; region_chg will always allocate at least 1 entry and a region_add for
+ * 1 page will only require at most 1 entry.
*/
-static long region_add(struct resv_map *resv, long f, long t)
+static long region_add(struct resv_map *resv, long f, long t,
+ long in_regions_needed, struct hstate *h,
+ struct hugetlb_cgroup *h_cg)
{
- struct list_head *head = &resv->regions;
- struct file_region *rg, *nrg;
- long add = 0;
+ long add = 0, actual_regions_needed = 0;
spin_lock(&resv->lock);
- /* Locate the region we are either in or before. */
- list_for_each_entry(rg, head, link)
- if (f <= rg->to)
- break;
+retry:
+
+ /* Count how many regions are actually needed to execute this add. */
+ add_reservation_in_range(resv, f, t, NULL, NULL, &actual_regions_needed,
+ true);
/*
- * If no region exists which can be expanded to include the
- * specified range, pull a region descriptor from the cache
- * and use it for this range.
+ * Check for sufficient descriptors in the cache to accommodate
+ * this add operation. Note that actual_regions_needed may be greater
+ * than in_regions_needed, as the resv_map may have been modified since
+ * the region_chg call. In this case, we need to make sure that we
+ * allocate extra entries, such that we have enough for all the
+ * existing adds_in_progress, plus the excess needed for this
+ * operation.
*/
- if (&rg->link == head || t < rg->from) {
- VM_BUG_ON(resv->region_cache_count <= 0);
-
- resv->region_cache_count--;
- nrg = list_first_entry(&resv->region_cache, struct file_region,
- link);
- list_del(&nrg->link);
+ if (actual_regions_needed > in_regions_needed &&
+ resv->region_cache_count <
+ resv->adds_in_progress +
+ (actual_regions_needed - in_regions_needed)) {
+ /* region_add operation of range 1 should never need to
+ * allocate file_region entries.
+ */
+ VM_BUG_ON(t - f <= 1);
- nrg->from = f;
- nrg->to = t;
- list_add(&nrg->link, rg->link.prev);
+ if (allocate_file_region_entries(
+ resv, actual_regions_needed - in_regions_needed)) {
+ return -ENOMEM;
+ }
- add += t - f;
- goto out_locked;
+ goto retry;
}
- add = add_reservation_in_range(resv, f, t, false);
+ add = add_reservation_in_range(resv, f, t, h_cg, h, NULL, false);
+
+ resv->adds_in_progress -= in_regions_needed;
-out_locked:
- resv->adds_in_progress--;
spin_unlock(&resv->lock);
VM_BUG_ON(add < 0);
return add;
@@ -358,46 +529,37 @@ out_locked:
* call to region_add that will actually modify the reserve
* map to add the specified range [f, t). region_chg does
* not change the number of huge pages represented by the
- * map. A new file_region structure is added to the cache
- * as a placeholder, so that the subsequent region_add
- * call will have all the regions it needs and will not fail.
+ * map. A number of new file_region structures is added to the cache as a
+ * placeholder, for the subsequent region_add call to use. At least 1
+ * file_region structure is added.
+ *
+ * out_regions_needed is the number of regions added to the
+ * resv->adds_in_progress. This value needs to be provided to a follow up call
+ * to region_add or region_abort for proper accounting.
*
* Returns the number of huge pages that need to be added to the existing
* reservation map for the range [f, t). This number is greater or equal to
* zero. -ENOMEM is returned if a new file_region structure or cache entry
* is needed and can not be allocated.
*/
-static long region_chg(struct resv_map *resv, long f, long t)
+static long region_chg(struct resv_map *resv, long f, long t,
+ long *out_regions_needed)
{
long chg = 0;
spin_lock(&resv->lock);
-retry_locked:
- resv->adds_in_progress++;
- /*
- * Check for sufficient descriptors in the cache to accommodate
- * the number of in progress add operations.
- */
- if (resv->adds_in_progress > resv->region_cache_count) {
- struct file_region *trg;
-
- VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
- /* Must drop lock to allocate a new descriptor. */
- resv->adds_in_progress--;
- spin_unlock(&resv->lock);
+ /* Count how many hugepages in this range are NOT respresented. */
+ chg = add_reservation_in_range(resv, f, t, NULL, NULL,
+ out_regions_needed, true);
- trg = kmalloc(sizeof(*trg), GFP_KERNEL);
- if (!trg)
- return -ENOMEM;
+ if (*out_regions_needed == 0)
+ *out_regions_needed = 1;
- spin_lock(&resv->lock);
- list_add(&trg->link, &resv->region_cache);
- resv->region_cache_count++;
- goto retry_locked;
- }
+ if (allocate_file_region_entries(resv, *out_regions_needed))
+ return -ENOMEM;
- chg = add_reservation_in_range(resv, f, t, true);
+ resv->adds_in_progress += *out_regions_needed;
spin_unlock(&resv->lock);
return chg;
@@ -408,17 +570,20 @@ retry_locked:
* of the resv_map keeps track of the operations in progress between
* calls to region_chg and region_add. Operations are sometimes
* aborted after the call to region_chg. In such cases, region_abort
- * is called to decrement the adds_in_progress counter.
+ * is called to decrement the adds_in_progress counter. regions_needed
+ * is the value returned by the region_chg call, it is used to decrement
+ * the adds_in_progress counter.
*
* NOTE: The range arguments [f, t) are not needed or used in this
* routine. They are kept to make reading the calling code easier as
* arguments will match the associated region_chg call.
*/
-static void region_abort(struct resv_map *resv, long f, long t)
+static void region_abort(struct resv_map *resv, long f, long t,
+ long regions_needed)
{
spin_lock(&resv->lock);
VM_BUG_ON(!resv->region_cache_count);
- resv->adds_in_progress--;
+ resv->adds_in_progress -= regions_needed;
spin_unlock(&resv->lock);
}
@@ -486,11 +651,17 @@ retry:
/* New entry for end of split region */
nrg->from = t;
nrg->to = rg->to;
+
+ copy_hugetlb_cgroup_uncharge_info(nrg, rg);
+
INIT_LIST_HEAD(&nrg->link);
/* Original entry is trimmed */
rg->to = f;
+ hugetlb_cgroup_uncharge_file_region(
+ resv, rg, nrg->to - nrg->from);
+
list_add(&nrg->link, &rg->link);
nrg = NULL;
break;
@@ -498,6 +669,8 @@ retry:
if (f <= rg->from && t >= rg->to) { /* Remove entire region */
del += rg->to - rg->from;
+ hugetlb_cgroup_uncharge_file_region(resv, rg,
+ rg->to - rg->from);
list_del(&rg->link);
kfree(rg);
continue;
@@ -506,9 +679,15 @@ retry:
if (f <= rg->from) { /* Trim beginning of region */
del += t - rg->from;
rg->from = t;
+
+ hugetlb_cgroup_uncharge_file_region(resv, rg,
+ t - rg->from);
} else { /* Trim end of region */
del += rg->to - f;
rg->to = f;
+
+ hugetlb_cgroup_uncharge_file_region(resv, rg,
+ rg->to - f);
}
}
@@ -650,6 +829,25 @@ static void set_vma_private_data(struct vm_area_struct *vma,
vma->vm_private_data = (void *)value;
}
+static void
+resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
+ struct hugetlb_cgroup *h_cg,
+ struct hstate *h)
+{
+#ifdef CONFIG_CGROUP_HUGETLB
+ if (!h_cg || !h) {
+ resv_map->reservation_counter = NULL;
+ resv_map->pages_per_hpage = 0;
+ resv_map->css = NULL;
+ } else {
+ resv_map->reservation_counter =
+ &h_cg->rsvd_hugepage[hstate_index(h)];
+ resv_map->pages_per_hpage = pages_per_huge_page(h);
+ resv_map->css = &h_cg->css;
+ }
+#endif
+}
+
struct resv_map *resv_map_alloc(void)
{
struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
@@ -666,6 +864,13 @@ struct resv_map *resv_map_alloc(void)
INIT_LIST_HEAD(&resv_map->regions);
resv_map->adds_in_progress = 0;
+ /*
+ * Initialize these to 0. On shared mappings, 0's here indicate these
+ * fields don't do cgroup accounting. On private mappings, these will be
+ * re-initialized to the proper values, to indicate that hugetlb cgroup
+ * reservations are to be un-charged from here.
+ */
+ resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
INIT_LIST_HEAD(&resv_map->region_cache);
list_add(&rg->link, &resv_map->region_cache);
@@ -1009,6 +1214,9 @@ static void destroy_compound_gigantic_page(struct page *page,
struct page *p = page + 1;
atomic_set(compound_mapcount_ptr(page), 0);
+ if (hpage_pincount_available(page))
+ atomic_set(compound_pincount_ptr(page), 0);
+
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
clear_compound_head(p);
set_page_refcounted(p);
@@ -1069,6 +1277,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1 << PG_writeback);
}
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
+ VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
set_page_refcounted(page);
if (hstate_is_gigantic(h)) {
@@ -1180,6 +1389,8 @@ static void __free_huge_page(struct page *page)
clear_page_huge_active(page);
hugetlb_cgroup_uncharge_page(hstate_index(h),
pages_per_huge_page(h), page);
+ hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
+ pages_per_huge_page(h), page);
if (restore_reserve)
h->resv_huge_pages++;
@@ -1254,6 +1465,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, NULL);
+ set_hugetlb_cgroup_rsvd(page, NULL);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
@@ -1287,6 +1499,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
set_compound_head(p, page);
}
atomic_set(compound_mapcount_ptr(page), -1);
+
+ if (hpage_pincount_available(page))
+ atomic_set(compound_pincount_ptr(page), 0);
}
/*
@@ -1313,7 +1528,107 @@ int PageHeadHuge(struct page *page_head)
if (!PageHead(page_head))
return 0;
- return get_compound_page_dtor(page_head) == free_huge_page;
+ return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
+}
+
+/*
+ * Find address_space associated with hugetlbfs page.
+ * Upon entry page is locked and page 'was' mapped although mapped state
+ * could change. If necessary, use anon_vma to find vma and associated
+ * address space. The returned mapping may be stale, but it can not be
+ * invalid as page lock (which is held) is required to destroy mapping.
+ */
+static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
+{
+ struct anon_vma *anon_vma;
+ pgoff_t pgoff_start, pgoff_end;
+ struct anon_vma_chain *avc;
+ struct address_space *mapping = page_mapping(hpage);
+
+ /* Simple file based mapping */
+ if (mapping)
+ return mapping;
+
+ /*
+ * Even anonymous hugetlbfs mappings are associated with an
+ * underlying hugetlbfs file (see hugetlb_file_setup in mmap
+ * code). Find a vma associated with the anonymous vma, and
+ * use the file pointer to get address_space.
+ */
+ anon_vma = page_lock_anon_vma_read(hpage);
+ if (!anon_vma)
+ return mapping; /* NULL */
+
+ /* Use first found vma */
+ pgoff_start = page_to_pgoff(hpage);
+ pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1;
+ anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
+ pgoff_start, pgoff_end) {
+ struct vm_area_struct *vma = avc->vma;
+
+ mapping = vma->vm_file->f_mapping;
+ break;
+ }
+
+ anon_vma_unlock_read(anon_vma);
+ return mapping;
+}
+
+/*
+ * Find and lock address space (mapping) in write mode.
+ *
+ * Upon entry, the page is locked which allows us to find the mapping
+ * even in the case of an anon page. However, locking order dictates
+ * the i_mmap_rwsem be acquired BEFORE the page lock. This is hugetlbfs
+ * specific. So, we first try to lock the sema while still holding the
+ * page lock. If this works, great! If not, then we need to drop the
+ * page lock and then acquire i_mmap_rwsem and reacquire page lock. Of
+ * course, need to revalidate state along the way.
+ */
+struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
+{
+ struct address_space *mapping, *mapping2;
+
+ mapping = _get_hugetlb_page_mapping(hpage);
+retry:
+ if (!mapping)
+ return mapping;
+
+ /*
+ * If no contention, take lock and return
+ */
+ if (i_mmap_trylock_write(mapping))
+ return mapping;
+
+ /*
+ * Must drop page lock and wait on mapping sema.
+ * Note: Once page lock is dropped, mapping could become invalid.
+ * As a hack, increase map count until we lock page again.
+ */
+ atomic_inc(&hpage->_mapcount);
+ unlock_page(hpage);
+ i_mmap_lock_write(mapping);
+ lock_page(hpage);
+ atomic_add_negative(-1, &hpage->_mapcount);
+
+ /* verify page is still mapped */
+ if (!page_mapped(hpage)) {
+ i_mmap_unlock_write(mapping);
+ return NULL;
+ }
+
+ /*
+ * Get address space again and verify it is the same one
+ * we locked. If not, drop lock and retry.
+ */
+ mapping2 = _get_hugetlb_page_mapping(hpage);
+ if (mapping2 != mapping) {
+ i_mmap_unlock_write(mapping);
+ mapping = mapping2;
+ goto retry;
+ }
+
+ return mapping;
}
pgoff_t __basepage_index(struct page *page)
@@ -1695,6 +2010,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
* of size 'delta'.
*/
static int gather_surplus_pages(struct hstate *h, int delta)
+ __must_hold(&hugetlb_lock)
{
struct list_head surplus_list;
struct page *page, *tmp;
@@ -1870,6 +2186,7 @@ static long __vma_reservation_common(struct hstate *h,
struct resv_map *resv;
pgoff_t idx;
long ret;
+ long dummy_out_regions_needed;
resv = vma_resv_map(vma);
if (!resv)
@@ -1878,20 +2195,29 @@ static long __vma_reservation_common(struct hstate *h,
idx = vma_hugecache_offset(h, vma, addr);
switch (mode) {
case VMA_NEEDS_RESV:
- ret = region_chg(resv, idx, idx + 1);
+ ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
+ /* We assume that vma_reservation_* routines always operate on
+ * 1 page, and that adding to resv map a 1 page entry can only
+ * ever require 1 region.
+ */
+ VM_BUG_ON(dummy_out_regions_needed != 1);
break;
case VMA_COMMIT_RESV:
- ret = region_add(resv, idx, idx + 1);
+ ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
+ /* region_add calls of range 1 should never fail. */
+ VM_BUG_ON(ret < 0);
break;
case VMA_END_RESV:
- region_abort(resv, idx, idx + 1);
+ region_abort(resv, idx, idx + 1, 1);
ret = 0;
break;
case VMA_ADD_RESV:
- if (vma->vm_flags & VM_MAYSHARE)
- ret = region_add(resv, idx, idx + 1);
- else {
- region_abort(resv, idx, idx + 1);
+ if (vma->vm_flags & VM_MAYSHARE) {
+ ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
+ /* region_add calls of range 1 should never fail. */
+ VM_BUG_ON(ret < 0);
+ } else {
+ region_abort(resv, idx, idx + 1, 1);
ret = region_del(resv, idx, idx + 1);
}
break;
@@ -2002,6 +2328,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
long gbl_chg;
int ret, idx;
struct hugetlb_cgroup *h_cg;
+ bool deferred_reserve;
idx = hstate_index(h);
/*
@@ -2039,9 +2366,19 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
gbl_chg = 1;
}
+ /* If this allocation is not consuming a reservation, charge it now.
+ */
+ deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma);
+ if (deferred_reserve) {
+ ret = hugetlb_cgroup_charge_cgroup_rsvd(
+ idx, pages_per_huge_page(h), &h_cg);
+ if (ret)
+ goto out_subpool_put;
+ }
+
ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
if (ret)
- goto out_subpool_put;
+ goto out_uncharge_cgroup_reservation;
spin_lock(&hugetlb_lock);
/*
@@ -2064,6 +2401,14 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
/* Fall through */
}
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
+ /* If allocation is not consuming a reservation, also store the
+ * hugetlb_cgroup pointer on the page.
+ */
+ if (deferred_reserve) {
+ hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
+ h_cg, page);
+ }
+
spin_unlock(&hugetlb_lock);
set_page_private(page, (unsigned long)spool);
@@ -2088,6 +2433,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
out_uncharge_cgroup:
hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
+out_uncharge_cgroup_reservation:
+ if (deferred_reserve)
+ hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
+ h_cg);
out_subpool_put:
if (map_chg || avoid_reserve)
hugepage_subpool_put_pages(spool, 1);
@@ -3188,9 +3537,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
end = vma_hugecache_offset(h, vma, vma->vm_end);
reserve = (end - start) - region_count(resv, start, end);
-
- kref_put(&resv->refs, resv_map_release);
-
+ hugetlb_cgroup_uncharge_counter(resv, start, end);
if (reserve) {
/*
* Decrement reserve counts. The global reserve count may be
@@ -3199,6 +3546,8 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
hugetlb_acct_memory(h, -gbl_reserve);
}
+
+ kref_put(&resv->refs, resv_map_release);
}
static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
@@ -3306,6 +3655,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
int cow;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
+ struct address_space *mapping = vma->vm_file->f_mapping;
struct mmu_notifier_range range;
int ret = 0;
@@ -3316,6 +3666,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
vma->vm_start,
vma->vm_end);
mmu_notifier_invalidate_range_start(&range);
+ } else {
+ /*
+ * For shared mappings i_mmap_rwsem must be held to call
+ * huge_pte_alloc, otherwise the returned ptep could go
+ * away if part of a shared pmd and another thread calls
+ * huge_pmd_unshare.
+ */
+ i_mmap_lock_read(mapping);
}
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
@@ -3393,6 +3751,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
if (cow)
mmu_notifier_invalidate_range_end(&range);
+ else
+ i_mmap_unlock_read(mapping);
return ret;
}
@@ -3812,16 +4172,17 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
}
/*
- * Use page lock to guard against racing truncation
- * before we get page_table_lock.
+ * We can not race with truncation due to holding i_mmap_rwsem.
+ * i_size is modified when holding i_mmap_rwsem, so check here
+ * once for faults beyond end of file.
*/
+ size = i_size_read(mapping->host) >> huge_page_shift(h);
+ if (idx >= size)
+ goto out;
+
retry:
page = find_lock_page(mapping, idx);
if (!page) {
- size = i_size_read(mapping->host) >> huge_page_shift(h);
- if (idx >= size)
- goto out;
-
/*
* Check for page in userfault range
*/
@@ -3841,13 +4202,15 @@ retry:
};
/*
- * hugetlb_fault_mutex must be dropped before
- * handling userfault. Reacquire after handling
- * fault to make calling code simpler.
+ * hugetlb_fault_mutex and i_mmap_rwsem must be
+ * dropped before handling userfault. Reacquire
+ * after handling fault to make calling code simpler.
*/
hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
+ i_mmap_lock_read(mapping);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
goto out;
}
@@ -3925,10 +4288,6 @@ retry:
}
ptl = huge_pte_lock(h, mm, ptep);
- size = i_size_read(mapping->host) >> huge_page_shift(h);
- if (idx >= size)
- goto backout;
-
ret = 0;
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
@@ -4012,6 +4371,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (ptep) {
+ /*
+ * Since we hold no locks, ptep could be stale. That is
+ * OK as we are only making decisions based on content and
+ * not actually modifying content here.
+ */
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
migration_entry_wait_huge(vma, mm, ptep);
@@ -4025,14 +4389,31 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
}
+ /*
+ * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
+ * until finished with ptep. This serves two purposes:
+ * 1) It prevents huge_pmd_unshare from being called elsewhere
+ * and making the ptep no longer valid.
+ * 2) It synchronizes us with i_size modifications during truncation.
+ *
+ * ptep could have already be assigned via huge_pte_offset. That
+ * is OK, as huge_pte_alloc will return the same value unless
+ * something has changed.
+ */
mapping = vma->vm_file->f_mapping;
- idx = vma_hugecache_offset(h, vma, haddr);
+ i_mmap_lock_read(mapping);
+ ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
+ if (!ptep) {
+ i_mmap_unlock_read(mapping);
+ return VM_FAULT_OOM;
+ }
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
+ idx = vma_hugecache_offset(h, vma, haddr);
hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -4120,6 +4501,7 @@ out_ptl:
}
out_mutex:
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
/*
* Generally it's safe to hold refcount during waiting page lock. But
* here we just wait to defer the next page fault to avoid busy loop and
@@ -4266,7 +4648,7 @@ out_release_nounlock:
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, unsigned long *nr_pages,
- long i, unsigned int flags, int *nonblocking)
+ long i, unsigned int flags, int *locked)
{
unsigned long pfn_offset;
unsigned long vaddr = *position;
@@ -4337,14 +4719,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(ptl);
if (flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
- if (nonblocking)
- fault_flags |= FAULT_FLAG_ALLOW_RETRY;
+ if (locked)
+ fault_flags |= FAULT_FLAG_ALLOW_RETRY |
+ FAULT_FLAG_KILLABLE;
if (flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY |
FAULT_FLAG_RETRY_NOWAIT;
if (flags & FOLL_TRIED) {
- VM_WARN_ON_ONCE(fault_flags &
- FAULT_FLAG_ALLOW_RETRY);
+ /*
+ * Note: FAULT_FLAG_ALLOW_RETRY and
+ * FAULT_FLAG_TRIED can co-exist
+ */
fault_flags |= FAULT_FLAG_TRIED;
}
ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
@@ -4354,9 +4739,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
break;
}
if (ret & VM_FAULT_RETRY) {
- if (nonblocking &&
+ if (locked &&
!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
- *nonblocking = 0;
+ *locked = 0;
*nr_pages = 0;
/*
* VM_FAULT_RETRY must not return an
@@ -4376,19 +4761,6 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = pte_page(huge_ptep_get(pte));
/*
- * Instead of doing 'try_get_page()' below in the same_page
- * loop, just check the count once here.
- */
- if (unlikely(page_count(page) <= 0)) {
- if (pages) {
- spin_unlock(ptl);
- remainder = 0;
- err = -ENOMEM;
- break;
- }
- }
-
- /*
* If subpage information not requested, update counters
* and skip the same_page loop below.
*/
@@ -4405,7 +4777,22 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
- get_page(pages[i]);
+ /*
+ * try_grab_page() should always succeed here, because:
+ * a) we hold the ptl lock, and b) we've just checked
+ * that the huge page is present in the page tables. If
+ * the huge page is present, then the tail pages must
+ * also be present. The ptl prevents the head page and
+ * tail pages from being rearranged in any way. So this
+ * page must be available at this point, unless the page
+ * refcount overflowed:
+ */
+ if (WARN_ON_ONCE(!try_grab_page(pages[i], flags))) {
+ spin_unlock(ptl);
+ remainder = 0;
+ err = -ENOMEM;
+ break;
+ }
}
if (vmas)
@@ -4541,11 +4928,12 @@ int hugetlb_reserve_pages(struct inode *inode,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
- long ret, chg;
+ long ret, chg, add = -1;
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
struct resv_map *resv_map;
- long gbl_reserve;
+ struct hugetlb_cgroup *h_cg = NULL;
+ long gbl_reserve, regions_needed = 0;
/* This should never happen */
if (from > to) {
@@ -4575,9 +4963,10 @@ int hugetlb_reserve_pages(struct inode *inode,
*/
resv_map = inode_resv_map(inode);
- chg = region_chg(resv_map, from, to);
+ chg = region_chg(resv_map, from, to, &regions_needed);
} else {
+ /* Private mapping. */
resv_map = resv_map_alloc();
if (!resv_map)
return -ENOMEM;
@@ -4593,6 +4982,21 @@ int hugetlb_reserve_pages(struct inode *inode,
goto out_err;
}
+ ret = hugetlb_cgroup_charge_cgroup_rsvd(
+ hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
+
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
+ /* For private mappings, the hugetlb_cgroup uncharge info hangs
+ * of the resv_map.
+ */
+ resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
+ }
+
/*
* There must be enough pages in the subpool for the mapping. If
* the subpool has a minimum size, there may be some global
@@ -4601,7 +5005,7 @@ int hugetlb_reserve_pages(struct inode *inode,
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
if (gbl_reserve < 0) {
ret = -ENOSPC;
- goto out_err;
+ goto out_uncharge_cgroup;
}
/*
@@ -4610,9 +5014,7 @@ int hugetlb_reserve_pages(struct inode *inode,
*/
ret = hugetlb_acct_memory(h, gbl_reserve);
if (ret < 0) {
- /* put back original number of pages, chg */
- (void)hugepage_subpool_put_pages(spool, chg);
- goto out_err;
+ goto out_put_pages;
}
/*
@@ -4627,9 +5029,12 @@ int hugetlb_reserve_pages(struct inode *inode,
* else has to be done for private mappings here
*/
if (!vma || vma->vm_flags & VM_MAYSHARE) {
- long add = region_add(resv_map, from, to);
+ add = region_add(resv_map, from, to, regions_needed, h, h_cg);
- if (unlikely(chg > add)) {
+ if (unlikely(add < 0)) {
+ hugetlb_acct_memory(h, -gbl_reserve);
+ goto out_put_pages;
+ } else if (unlikely(chg > add)) {
/*
* pages in this range were added to the reserve
* map between region_chg and region_add. This
@@ -4639,17 +5044,29 @@ int hugetlb_reserve_pages(struct inode *inode,
*/
long rsv_adjust;
+ hugetlb_cgroup_uncharge_cgroup_rsvd(
+ hstate_index(h),
+ (chg - add) * pages_per_huge_page(h), h_cg);
+
rsv_adjust = hugepage_subpool_put_pages(spool,
chg - add);
hugetlb_acct_memory(h, -rsv_adjust);
}
}
return 0;
+out_put_pages:
+ /* put back original number of pages, chg */
+ (void)hugepage_subpool_put_pages(spool, chg);
+out_uncharge_cgroup:
+ hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
+ chg * pages_per_huge_page(h), h_cg);
out_err:
if (!vma || vma->vm_flags & VM_MAYSHARE)
- /* Don't call region_abort if region_chg failed */
- if (chg >= 0)
- region_abort(resv_map, from, to);
+ /* Only call region_abort if the region_chg succeeded but the
+ * region_add failed or didn't run.
+ */
+ if (chg >= 0 && add < 0)
+ region_abort(resv_map, from, to, regions_needed);
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_put(&resv_map->refs, resv_map_release);
return ret;
@@ -4740,7 +5157,7 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
- unsigned long check_addr = *start;
+ unsigned long check_addr;
if (!(vma->vm_flags & VM_MAYSHARE))
return;
@@ -4765,10 +5182,12 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
- * code much cleaner. pmd allocation is essential for the shared case because
- * pud has to be populated inside the same i_mmap_rwsem section - otherwise
- * racing tasks could either miss the sharing (see huge_pte_offset) or select a
- * bad pmd for sharing.
+ * code much cleaner.
+ *
+ * This routine must be called with i_mmap_rwsem held in at least read mode.
+ * For hugetlbfs, this prevents removal of any page table entries associated
+ * with the address space. This is important as we are setting up sharing
+ * based on existing page table entries (mappings).
*/
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
@@ -4785,7 +5204,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
if (!vma_shareable(vma, addr))
return (pte_t *)pmd_alloc(mm, pud, addr);
- i_mmap_lock_read(mapping);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;
@@ -4815,7 +5233,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
spin_unlock(ptl);
out:
pte = (pte_t *)pmd_alloc(mm, pud, addr);
- i_mmap_unlock_read(mapping);
return pte;
}
@@ -4826,7 +5243,7 @@ out:
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
- * called with page table lock held.
+ * Called with page table lock held and i_mmap_rwsem held in write mode.
*
* returns: 1 successfully unmapped a shared pte page
* 0 the underlying pte page is not shared, or it is the last user
@@ -4965,6 +5382,12 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
struct page *page = NULL;
spinlock_t *ptl;
pte_t pte;
+
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+ (FOLL_PIN | FOLL_GET)))
+ return NULL;
+
retry:
ptl = pmd_lockptr(mm, pmd);
spin_lock(ptl);
@@ -4977,8 +5400,18 @@ retry:
pte = huge_ptep_get((pte_t *)pmd);
if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
- if (flags & FOLL_GET)
- get_page(page);
+ /*
+ * try_grab_page() should always succeed here, because: a) we
+ * hold the pmd (ptl) lock, and b) we've just checked that the
+ * huge pmd (head) page is present in the page tables. The ptl
+ * prevents the head page and tail pages from being rearranged
+ * in any way. So this page must be available at this point,
+ * unless the page refcount overflowed:
+ */
+ if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
+ page = NULL;
+ goto out;
+ }
} else {
if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
@@ -4999,7 +5432,7 @@ struct page * __weak
follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags)
{
- if (flags & FOLL_GET)
+ if (flags & (FOLL_GET | FOLL_PIN))
return NULL;
return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
@@ -5008,7 +5441,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
struct page * __weak
follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
{
- if (flags & FOLL_GET)
+ if (flags & (FOLL_GET | FOLL_PIN))
return NULL;
return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 5280bcf459af..aabf65d4d91b 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -23,29 +23,6 @@
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
-enum hugetlb_memory_event {
- HUGETLB_MAX,
- HUGETLB_NR_MEMORY_EVENTS,
-};
-
-struct hugetlb_cgroup {
- struct cgroup_subsys_state css;
-
- /*
- * the counter to account for hugepages from hugetlb.
- */
- struct page_counter hugepage[HUGE_MAX_HSTATE];
-
- atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
- atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
-
- /* Handle for "hugetlb.events" */
- struct cgroup_file events_file[HUGE_MAX_HSTATE];
-
- /* Handle for "hugetlb.events.local" */
- struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
-};
-
#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff)
@@ -55,6 +32,27 @@ struct hugetlb_cgroup {
static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
+static inline struct page_counter *
+__hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
+ bool rsvd)
+{
+ if (rsvd)
+ return &h_cg->rsvd_hugepage[idx];
+ return &h_cg->hugepage[idx];
+}
+
+static inline struct page_counter *
+hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
+{
+ return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
+}
+
+static inline struct page_counter *
+hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
+{
+ return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
+}
+
static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
{
@@ -83,8 +81,12 @@ static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
int idx;
for (idx = 0; idx < hugetlb_max_hstate; idx++) {
- if (page_counter_read(&h_cg->hugepage[idx]))
+ if (page_counter_read(
+ hugetlb_cgroup_counter_from_cgroup(h_cg, idx)) ||
+ page_counter_read(hugetlb_cgroup_counter_from_cgroup_rsvd(
+ h_cg, idx))) {
return true;
+ }
}
return false;
}
@@ -95,18 +97,34 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
int idx;
for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
- struct page_counter *counter = &h_cgroup->hugepage[idx];
- struct page_counter *parent = NULL;
+ struct page_counter *fault_parent = NULL;
+ struct page_counter *rsvd_parent = NULL;
unsigned long limit;
int ret;
- if (parent_h_cgroup)
- parent = &parent_h_cgroup->hugepage[idx];
- page_counter_init(counter, parent);
+ if (parent_h_cgroup) {
+ fault_parent = hugetlb_cgroup_counter_from_cgroup(
+ parent_h_cgroup, idx);
+ rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
+ parent_h_cgroup, idx);
+ }
+ page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
+ idx),
+ fault_parent);
+ page_counter_init(
+ hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
+ rsvd_parent);
limit = round_down(PAGE_COUNTER_MAX,
1 << huge_page_order(&hstates[idx]));
- ret = page_counter_set_max(counter, limit);
+
+ ret = page_counter_set_max(
+ hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
+ limit);
+ VM_BUG_ON(ret);
+ ret = page_counter_set_max(
+ hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
+ limit);
VM_BUG_ON(ret);
}
}
@@ -136,7 +154,6 @@ static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
kfree(h_cgroup);
}
-
/*
* Should be called with hugetlb_lock held.
* Since we are holding hugetlb_lock, pages cannot get moved from
@@ -213,8 +230,9 @@ static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
!hugetlb_cgroup_is_root(hugetlb));
}
-int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup **ptr)
+static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr,
+ bool rsvd)
{
int ret = 0;
struct page_counter *counter;
@@ -237,50 +255,103 @@ again:
}
rcu_read_unlock();
- if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages,
- &counter)) {
+ if (!page_counter_try_charge(
+ __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
+ nr_pages, &counter)) {
ret = -ENOMEM;
hugetlb_event(h_cg, idx, HUGETLB_MAX);
+ css_put(&h_cg->css);
+ goto done;
}
- css_put(&h_cg->css);
+ /* Reservations take a reference to the css because they do not get
+ * reparented.
+ */
+ if (!rsvd)
+ css_put(&h_cg->css);
done:
*ptr = h_cg;
return ret;
}
+int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
+}
+
+int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
+}
+
/* Should be called with hugetlb_lock held */
-void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg,
- struct page *page)
+static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page, bool rsvd)
{
if (hugetlb_cgroup_disabled() || !h_cg)
return;
- set_hugetlb_cgroup(page, h_cg);
+ __set_hugetlb_cgroup(page, h_cg, rsvd);
return;
}
+void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+ __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
+}
+
+void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+ __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
+}
+
/*
* Should be called with hugetlb_lock held
*/
-void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
- struct page *page)
+static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page, bool rsvd)
{
struct hugetlb_cgroup *h_cg;
if (hugetlb_cgroup_disabled())
return;
lockdep_assert_held(&hugetlb_lock);
- h_cg = hugetlb_cgroup_from_page(page);
+ h_cg = __hugetlb_cgroup_from_page(page, rsvd);
if (unlikely(!h_cg))
return;
- set_hugetlb_cgroup(page, NULL);
- page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
+ __set_hugetlb_cgroup(page, NULL, rsvd);
+
+ page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
+ rsvd),
+ nr_pages);
+
+ if (rsvd)
+ css_put(&h_cg->css);
+
return;
}
-void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg)
+void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page)
+{
+ __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
+}
+
+void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
+ struct page *page)
+{
+ __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
+}
+
+static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ bool rsvd)
{
if (hugetlb_cgroup_disabled() || !h_cg)
return;
@@ -288,34 +359,91 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
return;
- page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
- return;
+ page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
+ rsvd),
+ nr_pages);
+
+ if (rsvd)
+ css_put(&h_cg->css);
+}
+
+void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+ __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
+}
+
+void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+ __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
+}
+
+void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
+ unsigned long end)
+{
+ if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
+ !resv->css)
+ return;
+
+ page_counter_uncharge(resv->reservation_counter,
+ (end - start) * resv->pages_per_hpage);
+ css_put(resv->css);
+}
+
+void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages)
+{
+ if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
+ return;
+
+ if (rg->reservation_counter && resv->pages_per_hpage && nr_pages > 0 &&
+ !resv->reservation_counter) {
+ page_counter_uncharge(rg->reservation_counter,
+ nr_pages * resv->pages_per_hpage);
+ css_put(rg->css);
+ }
}
enum {
RES_USAGE,
+ RES_RSVD_USAGE,
RES_LIMIT,
+ RES_RSVD_LIMIT,
RES_MAX_USAGE,
+ RES_RSVD_MAX_USAGE,
RES_FAILCNT,
+ RES_RSVD_FAILCNT,
};
static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct page_counter *counter;
+ struct page_counter *rsvd_counter;
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
+ rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
switch (MEMFILE_ATTR(cft->private)) {
case RES_USAGE:
return (u64)page_counter_read(counter) * PAGE_SIZE;
+ case RES_RSVD_USAGE:
+ return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
case RES_LIMIT:
return (u64)counter->max * PAGE_SIZE;
+ case RES_RSVD_LIMIT:
+ return (u64)rsvd_counter->max * PAGE_SIZE;
case RES_MAX_USAGE:
return (u64)counter->watermark * PAGE_SIZE;
+ case RES_RSVD_MAX_USAGE:
+ return (u64)rsvd_counter->watermark * PAGE_SIZE;
case RES_FAILCNT:
return counter->failcnt;
+ case RES_RSVD_FAILCNT:
+ return rsvd_counter->failcnt;
default:
BUG();
}
@@ -337,10 +465,16 @@ static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
1 << huge_page_order(&hstates[idx]));
switch (MEMFILE_ATTR(cft->private)) {
+ case RES_RSVD_USAGE:
+ counter = &h_cg->rsvd_hugepage[idx];
+ fallthrough;
case RES_USAGE:
val = (u64)page_counter_read(counter);
seq_printf(seq, "%llu\n", val * PAGE_SIZE);
break;
+ case RES_RSVD_LIMIT:
+ counter = &h_cg->rsvd_hugepage[idx];
+ fallthrough;
case RES_LIMIT:
val = (u64)counter->max;
if (val == limit)
@@ -364,6 +498,7 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
int ret, idx;
unsigned long nr_pages;
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
+ bool rsvd = false;
if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
return -EINVAL;
@@ -377,9 +512,14 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx]));
switch (MEMFILE_ATTR(of_cft(of)->private)) {
+ case RES_RSVD_LIMIT:
+ rsvd = true;
+ fallthrough;
case RES_LIMIT:
mutex_lock(&hugetlb_limit_mutex);
- ret = page_counter_set_max(&h_cg->hugepage[idx], nr_pages);
+ ret = page_counter_set_max(
+ __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
+ nr_pages);
mutex_unlock(&hugetlb_limit_mutex);
break;
default:
@@ -405,18 +545,25 @@ static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
int ret = 0;
- struct page_counter *counter;
+ struct page_counter *counter, *rsvd_counter;
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
+ rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
switch (MEMFILE_ATTR(of_cft(of)->private)) {
case RES_MAX_USAGE:
page_counter_reset_watermark(counter);
break;
+ case RES_RSVD_MAX_USAGE:
+ page_counter_reset_watermark(rsvd_counter);
+ break;
case RES_FAILCNT:
counter->failcnt = 0;
break;
+ case RES_RSVD_FAILCNT:
+ rsvd_counter->failcnt = 0;
+ break;
default:
ret = -EINVAL;
break;
@@ -471,7 +618,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
struct hstate *h = &hstates[idx];
/* format the size */
- mem_fmt(buf, 32, huge_page_size(h));
+ mem_fmt(buf, sizeof(buf), huge_page_size(h));
/* Add the limit file */
cft = &h->cgroup_files_dfl[0];
@@ -481,15 +628,30 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
cft->write = hugetlb_cgroup_write_dfl;
cft->flags = CFTYPE_NOT_ON_ROOT;
- /* Add the current usage file */
+ /* Add the reservation limit file */
cft = &h->cgroup_files_dfl[1];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
+ cft->seq_show = hugetlb_cgroup_read_u64_max;
+ cft->write = hugetlb_cgroup_write_dfl;
+ cft->flags = CFTYPE_NOT_ON_ROOT;
+
+ /* Add the current usage file */
+ cft = &h->cgroup_files_dfl[2];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
cft->seq_show = hugetlb_cgroup_read_u64_max;
cft->flags = CFTYPE_NOT_ON_ROOT;
+ /* Add the current reservation usage file */
+ cft = &h->cgroup_files_dfl[3];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
+ cft->seq_show = hugetlb_cgroup_read_u64_max;
+ cft->flags = CFTYPE_NOT_ON_ROOT;
+
/* Add the events file */
- cft = &h->cgroup_files_dfl[2];
+ cft = &h->cgroup_files_dfl[4];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
cft->private = MEMFILE_PRIVATE(idx, 0);
cft->seq_show = hugetlb_events_show;
@@ -497,7 +659,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
cft->flags = CFTYPE_NOT_ON_ROOT;
/* Add the events.local file */
- cft = &h->cgroup_files_dfl[3];
+ cft = &h->cgroup_files_dfl[5];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
cft->private = MEMFILE_PRIVATE(idx, 0);
cft->seq_show = hugetlb_events_local_show;
@@ -506,7 +668,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
cft->flags = CFTYPE_NOT_ON_ROOT;
/* NULL terminate the last cft */
- cft = &h->cgroup_files_dfl[4];
+ cft = &h->cgroup_files_dfl[6];
memset(cft, 0, sizeof(*cft));
WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
@@ -520,7 +682,7 @@ static void __init __hugetlb_cgroup_file_legacy_init(int idx)
struct hstate *h = &hstates[idx];
/* format the size */
- mem_fmt(buf, 32, huge_page_size(h));
+ mem_fmt(buf, sizeof(buf), huge_page_size(h));
/* Add the limit file */
cft = &h->cgroup_files_legacy[0];
@@ -529,28 +691,55 @@ static void __init __hugetlb_cgroup_file_legacy_init(int idx)
cft->read_u64 = hugetlb_cgroup_read_u64;
cft->write = hugetlb_cgroup_write_legacy;
- /* Add the usage file */
+ /* Add the reservation limit file */
cft = &h->cgroup_files_legacy[1];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
+ cft->read_u64 = hugetlb_cgroup_read_u64;
+ cft->write = hugetlb_cgroup_write_legacy;
+
+ /* Add the usage file */
+ cft = &h->cgroup_files_legacy[2];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
cft->read_u64 = hugetlb_cgroup_read_u64;
+ /* Add the reservation usage file */
+ cft = &h->cgroup_files_legacy[3];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
+ cft->read_u64 = hugetlb_cgroup_read_u64;
+
/* Add the MAX usage file */
- cft = &h->cgroup_files_legacy[2];
+ cft = &h->cgroup_files_legacy[4];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
cft->write = hugetlb_cgroup_reset;
cft->read_u64 = hugetlb_cgroup_read_u64;
+ /* Add the MAX reservation usage file */
+ cft = &h->cgroup_files_legacy[5];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
+ cft->write = hugetlb_cgroup_reset;
+ cft->read_u64 = hugetlb_cgroup_read_u64;
+
/* Add the failcntfile */
- cft = &h->cgroup_files_legacy[3];
+ cft = &h->cgroup_files_legacy[6];
snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
- cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
+ cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
+ cft->write = hugetlb_cgroup_reset;
+ cft->read_u64 = hugetlb_cgroup_read_u64;
+
+ /* Add the reservation failcntfile */
+ cft = &h->cgroup_files_legacy[7];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
cft->write = hugetlb_cgroup_reset;
cft->read_u64 = hugetlb_cgroup_read_u64;
/* NULL terminate the last cft */
- cft = &h->cgroup_files_legacy[4];
+ cft = &h->cgroup_files_legacy[8];
memset(cft, 0, sizeof(*cft));
WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
@@ -585,6 +774,7 @@ void __init hugetlb_cgroup_file_init(void)
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
{
struct hugetlb_cgroup *h_cg;
+ struct hugetlb_cgroup *h_cg_rsvd;
struct hstate *h = page_hstate(oldhpage);
if (hugetlb_cgroup_disabled())
@@ -593,10 +783,13 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
spin_lock(&hugetlb_lock);
h_cg = hugetlb_cgroup_from_page(oldhpage);
+ h_cg_rsvd = hugetlb_cgroup_from_page_rsvd(oldhpage);
set_hugetlb_cgroup(oldhpage, NULL);
+ set_hugetlb_cgroup_rsvd(oldhpage, NULL);
/* move the h_cg details to new cgroup */
set_hugetlb_cgroup(newhpage, h_cg);
+ set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
list_move(&newhpage->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
return;
diff --git a/mm/internal.h b/mm/internal.h
index 3cf20ab3ca01..b5634e78f01d 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,6 +63,29 @@ static inline unsigned long ra_submit(struct file_ra_state *ra,
ra->start, ra->size, ra->async_size);
}
+/**
+ * page_evictable - test whether a page is evictable
+ * @page: the page to test
+ *
+ * Test whether page is evictable--i.e., should be placed on active/inactive
+ * lists vs unevictable list.
+ *
+ * Reasons page might not be evictable:
+ * (1) page's mapping marked unevictable
+ * (2) page is part of an mlocked VMA
+ *
+ */
+static inline bool page_evictable(struct page *page)
+{
+ bool ret;
+
+ /* Prevent address_space of inode and swap cache from being freed */
+ rcu_read_lock();
+ ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
+ rcu_read_unlock();
+ return ret;
+}
+
/*
* Turn a non-refcounted page (->_refcount == 0) into refcounted with
* a count of one.
@@ -157,6 +180,8 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
}
extern int __isolate_free_page(struct page *page, unsigned int order);
+extern void __putback_isolated_page(struct page *page, unsigned int order,
+ int mt);
extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order);
@@ -206,6 +231,7 @@ struct compact_control {
bool whole_zone; /* Whole zone should/has been scanned */
bool contended; /* Signal lock or sched contention */
bool rescan; /* Rescanning the same pageblock */
+ bool alloc_contig; /* alloc_contig_range allocation */
};
/*
@@ -377,10 +403,10 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
/*
* FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
* anything, so we only pin the file and drop the mmap_sem if only
- * FAULT_FLAG_ALLOW_RETRY is set.
+ * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
*/
- if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
- FAULT_FLAG_ALLOW_RETRY) {
+ if (fault_flag_allow_retry_first(flags) &&
+ !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
fpin = get_file(vmf->vma->vm_file);
up_read(&vmf->vma->vm_mm->mmap_sem);
}
@@ -532,7 +558,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
#else
#define ALLOC_NOFRAGMENT 0x0
#endif
-#define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */
+#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
enum ttu_flags;
struct tlbflush_unmap_batch;
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6aa51723b92b..2906358e42f0 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -15,7 +15,6 @@
*/
#include <linux/export.h>
-#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
@@ -42,28 +41,6 @@
#include "kasan.h"
#include "../slab.h"
-static inline int in_irqentry_text(unsigned long ptr)
-{
- return (ptr >= (unsigned long)&__irqentry_text_start &&
- ptr < (unsigned long)&__irqentry_text_end) ||
- (ptr >= (unsigned long)&__softirqentry_text_start &&
- ptr < (unsigned long)&__softirqentry_text_end);
-}
-
-static inline unsigned int filter_irq_stacks(unsigned long *entries,
- unsigned int nr_entries)
-{
- unsigned int i;
-
- for (i = 0; i < nr_entries; i++) {
- if (in_irqentry_text(entries[i])) {
- /* Include the irqentry function into the stack. */
- return i + 1;
- }
- }
- return nr_entries;
-}
-
static inline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
@@ -105,7 +82,8 @@ EXPORT_SYMBOL(__kasan_check_write);
#undef memset
void *memset(void *addr, int c, size_t len)
{
- check_memory_region((unsigned long)addr, len, true, _RET_IP_);
+ if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
+ return NULL;
return __memset(addr, c, len);
}
@@ -114,8 +92,9 @@ void *memset(void *addr, int c, size_t len)
#undef memmove
void *memmove(void *dest, const void *src, size_t len)
{
- check_memory_region((unsigned long)src, len, false, _RET_IP_);
- check_memory_region((unsigned long)dest, len, true, _RET_IP_);
+ if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
+ !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
+ return NULL;
return __memmove(dest, src, len);
}
@@ -124,8 +103,9 @@ void *memmove(void *dest, const void *src, size_t len)
#undef memcpy
void *memcpy(void *dest, const void *src, size_t len)
{
- check_memory_region((unsigned long)src, len, false, _RET_IP_);
- check_memory_region((unsigned long)dest, len, true, _RET_IP_);
+ if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
+ !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
+ return NULL;
return __memcpy(dest, src, len);
}
@@ -634,12 +614,21 @@ void kasan_free_shadow(const struct vm_struct *vm)
#endif
extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
+extern bool report_enabled(void);
-void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
+bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
{
unsigned long flags = user_access_save();
- __kasan_report(addr, size, is_write, ip);
+ bool ret = false;
+
+ if (likely(report_enabled())) {
+ __kasan_report(addr, size, is_write, ip);
+ ret = true;
+ }
+
user_access_restore(flags);
+
+ return ret;
}
#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 616f9dd82d12..56ff8885fe2e 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -173,17 +173,18 @@ static __always_inline bool check_memory_region_inline(unsigned long addr,
if (unlikely(size == 0))
return true;
+ if (unlikely(addr + size < addr))
+ return !kasan_report(addr, size, write, ret_ip);
+
if (unlikely((void *)addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
- kasan_report(addr, size, write, ret_ip);
- return false;
+ return !kasan_report(addr, size, write, ret_ip);
}
if (likely(!memory_is_poisoned(addr, size)))
return true;
- kasan_report(addr, size, write, ret_ip);
- return false;
+ return !kasan_report(addr, size, write, ret_ip);
}
bool check_memory_region(unsigned long addr, size_t size, bool write,
diff --git a/mm/kasan/generic_report.c b/mm/kasan/generic_report.c
index 2d97efd4954f..e200acb2d292 100644
--- a/mm/kasan/generic_report.c
+++ b/mm/kasan/generic_report.c
@@ -110,6 +110,17 @@ static const char *get_wild_bug_type(struct kasan_access_info *info)
const char *get_bug_type(struct kasan_access_info *info)
{
+ /*
+ * If access_size is a negative number, then it has reason to be
+ * defined as out-of-bounds bug type.
+ *
+ * Casting negative numbers to size_t would indeed turn up as
+ * a large size_t and its value will be larger than ULONG_MAX/2,
+ * so that this can qualify as out-of-bounds.
+ */
+ if (info->access_addr + info->access_size < info->access_addr)
+ return "out-of-bounds";
+
if (addr_has_shadow(info->access_addr))
return get_shadow_bug_type(info);
return get_wild_bug_type(info);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 3a083274628e..e8f37199d885 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -153,7 +153,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
void *find_first_bad_addr(void *addr, size_t size);
const char *get_bug_type(struct kasan_access_info *info);
-void kasan_report(unsigned long addr, size_t size,
+bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_invalid_free(void *object, unsigned long ip);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 5ef9f24f566b..80f23c9da6b0 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -92,8 +92,16 @@ static void end_report(unsigned long *flags)
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, *flags);
- if (panic_on_warn)
+ if (panic_on_warn) {
+ /*
+ * This thread may hit another WARN() in the panic path.
+ * Resetting this prevents additional WARN() from panicking the
+ * system on this thread. Other threads are blocked by the
+ * panic_mutex in panic().
+ */
+ panic_on_warn = 0;
panic("panic_on_warn set ...\n");
+ }
kasan_enable_current();
}
@@ -446,7 +454,7 @@ static void print_shadow_for_address(const void *addr)
}
}
-static bool report_enabled(void)
+bool report_enabled(void)
{
if (current->kasan_depth)
return false;
@@ -478,9 +486,6 @@ void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned lon
void *untagged_addr;
unsigned long flags;
- if (likely(!report_enabled()))
- return;
-
disable_trace_on_warning();
tagged_addr = (void *)addr;
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 0e987c9ca052..25b7734e7013 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -86,6 +86,9 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
if (unlikely(size == 0))
return true;
+ if (unlikely(addr + size < addr))
+ return !kasan_report(addr, size, write, ret_ip);
+
tag = get_tag((const void *)addr);
/*
@@ -111,15 +114,13 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
untagged_addr = reset_tag((const void *)addr);
if (unlikely(untagged_addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
- kasan_report(addr, size, write, ret_ip);
- return false;
+ return !kasan_report(addr, size, write, ret_ip);
}
shadow_first = kasan_mem_to_shadow(untagged_addr);
shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
if (*shadow != tag) {
- kasan_report(addr, size, write, ret_ip);
- return false;
+ return !kasan_report(addr, size, write, ret_ip);
}
}
diff --git a/mm/kasan/tags_report.c b/mm/kasan/tags_report.c
index 969ae08f59d7..bee43717d6f0 100644
--- a/mm/kasan/tags_report.c
+++ b/mm/kasan/tags_report.c
@@ -60,6 +60,17 @@ const char *get_bug_type(struct kasan_access_info *info)
}
#endif
+ /*
+ * If access_size is a negative number, then it has reason to be
+ * defined as out-of-bounds bug type.
+ *
+ * Casting negative numbers to size_t would indeed turn up as
+ * a large size_t and its value will be larger than ULONG_MAX/2,
+ * so that this can qualify as out-of-bounds.
+ */
+ if (info->access_addr + info->access_size < info->access_addr)
+ return "out-of-bounds";
+
return "invalid-access";
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b679908743cb..99d77ffb79c2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -29,6 +29,7 @@ enum scan_result {
SCAN_PMD_NULL,
SCAN_EXCEED_NONE_PTE,
SCAN_PTE_NON_PRESENT,
+ SCAN_PTE_UFFD_WP,
SCAN_PAGE_RO,
SCAN_LACK_REFERENCED_PAGE,
SCAN_PAGE_NULL,
@@ -308,8 +309,6 @@ struct attribute_group khugepaged_attr_group = {
};
#endif /* CONFIG_SYSFS */
-#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
-
int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
@@ -416,14 +415,12 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
(IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
vma->vm_file &&
(vm_flags & VM_DENYWRITE))) {
- if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
- return false;
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
HPAGE_PMD_NR);
}
if (!vma->anon_vma || vma->vm_ops)
return false;
- if (is_vma_temporary_stack(vma))
+ if (vma_is_temporary_stack(vma))
return false;
return !(vm_flags & VM_NO_KHUGEPAGED);
}
@@ -515,7 +512,7 @@ void __khugepaged_exit(struct mm_struct *mm)
static void release_pte_page(struct page *page)
{
- dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
+ dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_lru(page));
unlock_page(page);
putback_lru_page(page);
}
@@ -615,7 +612,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
goto out;
}
inc_node_page_state(page,
- NR_ISOLATED_ANON + page_is_file_cache(page));
+ NR_ISOLATED_ANON + page_is_file_lru(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -1141,6 +1138,15 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
pte_t pteval = *_pte;
if (is_swap_pte(pteval)) {
if (++unmapped <= khugepaged_max_ptes_swap) {
+ /*
+ * Always be strict with uffd-wp
+ * enabled swap entries. Please see
+ * comment below for pte_uffd_wp().
+ */
+ if (pte_swp_uffd_wp(pteval)) {
+ result = SCAN_PTE_UFFD_WP;
+ goto out_unmap;
+ }
continue;
} else {
result = SCAN_EXCEED_SWAP_PTE;
@@ -1160,6 +1166,19 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
result = SCAN_PTE_NON_PRESENT;
goto out_unmap;
}
+ if (pte_uffd_wp(pteval)) {
+ /*
+ * Don't collapse the page if any of the small
+ * PTEs are armed with uffd write protection.
+ * Here we can also mark the new huge pmd as
+ * write protected if any of the small ones is
+ * marked but that could bring uknown
+ * userfault messages that falls outside of
+ * the registered range. So, just be simple.
+ */
+ result = SCAN_PTE_UFFD_WP;
+ goto out_unmap;
+ }
if (pte_write(pteval))
writable = true;
@@ -1260,7 +1279,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
}
}
-#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
+#ifdef CONFIG_SHMEM
/*
* Notify khugepaged that given addr of the mm is pte-mapped THP. Then
* khugepaged should try to collapse the page table.
@@ -1975,6 +1994,8 @@ skip:
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+ if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
+ goto skip;
while (khugepaged_scan.address < hend) {
int ret;
@@ -1986,14 +2007,10 @@ skip:
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
- struct file *file;
+ struct file *file = get_file(vma->vm_file);
pgoff_t pgoff = linear_page_index(vma,
khugepaged_scan.address);
- if (shmem_file(vma->vm_file)
- && !shmem_huge_enabled(vma))
- goto skip;
- file = get_file(vma->vm_file);
up_read(&mm->mmap_sem);
ret = 1;
khugepaged_scan_file(mm, file, pgoff, hpage);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 3a4259eeb5a0..e362dc3d2028 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1947,7 +1947,7 @@ void __init kmemleak_init(void)
create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
KMEMLEAK_GREY, GFP_ATOMIC);
/* only register .data..ro_after_init if not within .data */
- if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+ if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
create_object((unsigned long)__start_ro_after_init,
__end_ro_after_init - __start_ro_after_init,
KMEMLEAK_GREY, GFP_ATOMIC);
diff --git a/mm/ksm.c b/mm/ksm.c
index d17c7d57d0d8..a558da9e7177 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -455,7 +455,7 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
/*
* We use break_ksm to break COW on a ksm page: it's a stripped down
*
- * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
+ * if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1)
* put_page(page);
*
* but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
@@ -2813,8 +2813,7 @@ static int ksm_memory_callback(struct notifier_block *self,
*/
ksm_check_stable_tree(mn->start_pfn,
mn->start_pfn + mn->nr_pages);
- /* fallthrough */
-
+ fallthrough;
case MEM_CANCEL_OFFLINE:
mutex_lock(&ksm_thread_mutex);
ksm_run &= ~KSM_RUN_OFFLINE;
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 0f1f6b06b7f3..4d5294c39bba 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -57,16 +57,6 @@ list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
return &nlru->lru;
}
-static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
-{
- struct page *page;
-
- if (!memcg_kmem_enabled())
- return NULL;
- page = virt_to_head_page(ptr);
- return memcg_from_slab_page(page);
-}
-
static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
struct mem_cgroup **memcg_ptr)
@@ -77,7 +67,7 @@ list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
if (!nlru->memcg_lrus)
goto out;
- memcg = mem_cgroup_from_kmem(ptr);
+ memcg = mem_cgroup_from_obj(ptr);
if (!memcg)
goto out;
@@ -233,7 +223,7 @@ restart:
switch (ret) {
case LRU_REMOVED_RETRY:
assert_spin_locked(&nlru->lock);
- /* fall through */
+ fallthrough;
case LRU_REMOVED:
isolated++;
nlru->nr_items--;
diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c
index 71070dda9643..2c7d03675903 100644
--- a/mm/mapping_dirty_helpers.c
+++ b/mm/mapping_dirty_helpers.c
@@ -111,26 +111,60 @@ static int clean_record_pte(pte_t *pte, unsigned long addr,
return 0;
}
-/* wp_clean_pmd_entry - The pagewalk pmd callback. */
+/*
+ * wp_clean_pmd_entry - The pagewalk pmd callback.
+ *
+ * Dirty-tracking should take place on the PTE level, so
+ * WARN() if encountering a dirty huge pmd.
+ * Furthermore, never split huge pmds, since that currently
+ * causes dirty info loss. The pagefault handler should do
+ * that if needed.
+ */
static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- /* Dirty-tracking should be handled on the pte level */
pmd_t pmdval = pmd_read_atomic(pmd);
+ if (!pmd_trans_unstable(&pmdval))
+ return 0;
+
+ if (pmd_none(pmdval)) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
+
+ /* Huge pmd, present or migrated */
+ walk->action = ACTION_CONTINUE;
if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval))
WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
return 0;
}
-/* wp_clean_pud_entry - The pagewalk pud callback. */
+/*
+ * wp_clean_pud_entry - The pagewalk pud callback.
+ *
+ * Dirty-tracking should take place on the PTE level, so
+ * WARN() if encountering a dirty huge puds.
+ * Furthermore, never split huge puds, since that currently
+ * causes dirty info loss. The pagefault handler should do
+ * that if needed.
+ */
static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- /* Dirty-tracking should be handled on the pte level */
pud_t pudval = READ_ONCE(*pud);
+ if (!pud_trans_unstable(&pudval))
+ return 0;
+
+ if (pud_none(pudval)) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
+
+ /* Huge pud */
+ walk->action = ACTION_CONTINUE;
if (pud_trans_huge(pudval) || pud_devmap(pudval))
WARN_ON(pud_write(pudval) || pud_dirty(pudval));
diff --git a/mm/memblock.c b/mm/memblock.c
index eba94ee3de0b..4d06bbaded0f 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1698,7 +1698,7 @@ static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
void __init memblock_enforce_memory_limit(phys_addr_t limit)
{
- phys_addr_t max_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr;
if (!limit)
return;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7ddf91c4295f..05b4ec2c6499 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -334,7 +334,7 @@ static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
if (!old)
return 0;
- new = kvmalloc(sizeof(*new) + size, GFP_KERNEL);
+ new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
if (!new)
return -ENOMEM;
@@ -378,7 +378,7 @@ static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
mutex_lock(&memcg_shrinker_map_mutex);
size = memcg_shrinker_map_size;
for_each_node(nid) {
- map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
+ map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
if (!map) {
memcg_free_shrinker_maps(memcg);
ret = -ENOMEM;
@@ -656,7 +656,7 @@ retry:
*/
__mem_cgroup_remove_exceeded(mz, mctz);
if (!soft_limit_excess(mz->memcg) ||
- !css_tryget_online(&mz->memcg->css))
+ !css_tryget(&mz->memcg->css))
goto retry;
done:
return mz;
@@ -759,13 +759,12 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
{
- struct page *page = virt_to_head_page(p);
- pg_data_t *pgdat = page_pgdat(page);
+ pg_data_t *pgdat = page_pgdat(virt_to_page(p));
struct mem_cgroup *memcg;
struct lruvec *lruvec;
rcu_read_lock();
- memcg = memcg_from_slab_page(page);
+ memcg = mem_cgroup_from_obj(p);
/* Untracked pages have no memcg, no lruvec. Update only the node */
if (!memcg || memcg == root_mem_cgroup) {
@@ -973,7 +972,8 @@ struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
return NULL;
rcu_read_lock();
- if (!memcg || !css_tryget_online(&memcg->css))
+ /* Page should not get uncharged and freed memcg under us. */
+ if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
memcg = root_mem_cgroup;
rcu_read_unlock();
return memcg;
@@ -986,10 +986,13 @@ EXPORT_SYMBOL(get_mem_cgroup_from_page);
static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
{
if (unlikely(current->active_memcg)) {
- struct mem_cgroup *memcg = root_mem_cgroup;
+ struct mem_cgroup *memcg;
rcu_read_lock();
- if (css_tryget_online(&current->active_memcg->css))
+ /* current->active_memcg must hold a ref. */
+ if (WARN_ON_ONCE(!css_tryget(&current->active_memcg->css)))
+ memcg = root_mem_cgroup;
+ else
memcg = current->active_memcg;
rcu_read_unlock();
return memcg;
@@ -1518,11 +1521,11 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
- K((u64)memcg->memory.max), memcg->memory.failcnt);
+ K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->swap)),
- K((u64)memcg->swap.max), memcg->swap.failcnt);
+ K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
else {
pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memsw)),
@@ -1549,13 +1552,13 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
unsigned long max;
- max = memcg->memory.max;
+ max = READ_ONCE(memcg->memory.max);
if (mem_cgroup_swappiness(memcg)) {
unsigned long memsw_max;
unsigned long swap_max;
memsw_max = memcg->memsw.max;
- swap_max = memcg->swap.max;
+ swap_max = READ_ONCE(memcg->swap.max);
swap_max = min(swap_max, (unsigned long)total_swap_pages);
max = min(max + swap_max, memsw_max);
}
@@ -1928,6 +1931,14 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
goto out;
/*
+ * If the victim task has been asynchronously moved to a different
+ * memory cgroup, we might end up killing tasks outside oom_domain.
+ * In this case it's better to ignore memory.group.oom.
+ */
+ if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
+ goto out;
+
+ /*
* Traverse the memory cgroup hierarchy from the victim task's
* cgroup up to the OOMing cgroup (or root) to find the
* highest-level memory cgroup with oom.group set.
@@ -2239,11 +2250,12 @@ static void reclaim_high(struct mem_cgroup *memcg,
gfp_t gfp_mask)
{
do {
- if (page_counter_read(&memcg->memory) <= memcg->high)
+ if (page_counter_read(&memcg->memory) <= READ_ONCE(memcg->high))
continue;
memcg_memory_event(memcg, MEMCG_HIGH);
try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
- } while ((memcg = parent_mem_cgroup(memcg)));
+ } while ((memcg = parent_mem_cgroup(memcg)) &&
+ !mem_cgroup_is_root(memcg));
}
static void high_work_func(struct work_struct *work)
@@ -2579,7 +2591,7 @@ done_restock:
* reclaim, the cost of mismatch is negligible.
*/
do {
- if (page_counter_read(&memcg->memory) > memcg->high) {
+ if (page_counter_read(&memcg->memory) > READ_ONCE(memcg->high)) {
/* Don't bother a random interrupted task */
if (in_interrupt()) {
schedule_work(&memcg->high_work);
@@ -2882,18 +2894,16 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep)
}
/**
- * __memcg_kmem_charge_memcg: charge a kmem page
- * @page: page to charge
- * @gfp: reclaim mode
- * @order: allocation order
+ * __memcg_kmem_charge: charge a number of kernel pages to a memcg
* @memcg: memory cgroup to charge
+ * @gfp: reclaim mode
+ * @nr_pages: number of pages to charge
*
* Returns 0 on success, an error code on failure.
*/
-int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg)
+int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
+ unsigned int nr_pages)
{
- unsigned int nr_pages = 1 << order;
struct page_counter *counter;
int ret;
@@ -2920,14 +2930,29 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
}
/**
- * __memcg_kmem_charge: charge a kmem page to the current memory cgroup
+ * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
+ * @memcg: memcg to uncharge
+ * @nr_pages: number of pages to uncharge
+ */
+void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
+{
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ page_counter_uncharge(&memcg->kmem, nr_pages);
+
+ page_counter_uncharge(&memcg->memory, nr_pages);
+ if (do_memsw_account())
+ page_counter_uncharge(&memcg->memsw, nr_pages);
+}
+
+/**
+ * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
* @page: page to charge
* @gfp: reclaim mode
* @order: allocation order
*
* Returns 0 on success, an error code on failure.
*/
-int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
{
struct mem_cgroup *memcg;
int ret = 0;
@@ -2937,7 +2962,7 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
memcg = get_mem_cgroup_from_current();
if (!mem_cgroup_is_root(memcg)) {
- ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
if (!ret) {
page->mem_cgroup = memcg;
__SetPageKmemcg(page);
@@ -2948,26 +2973,11 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
}
/**
- * __memcg_kmem_uncharge_memcg: uncharge a kmem page
- * @memcg: memcg to uncharge
- * @nr_pages: number of pages to uncharge
- */
-void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
- unsigned int nr_pages)
-{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- page_counter_uncharge(&memcg->kmem, nr_pages);
-
- page_counter_uncharge(&memcg->memory, nr_pages);
- if (do_memsw_account())
- page_counter_uncharge(&memcg->memsw, nr_pages);
-}
-/**
- * __memcg_kmem_uncharge: uncharge a kmem page
+ * __memcg_kmem_uncharge_page: uncharge a kmem page
* @page: page to uncharge
* @order: allocation order
*/
-void __memcg_kmem_uncharge(struct page *page, int order)
+void __memcg_kmem_uncharge_page(struct page *page, int order)
{
struct mem_cgroup *memcg = page->mem_cgroup;
unsigned int nr_pages = 1 << order;
@@ -2976,7 +2986,7 @@ void __memcg_kmem_uncharge(struct page *page, int order)
return;
VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
- __memcg_kmem_uncharge_memcg(memcg, nr_pages);
+ __memcg_kmem_uncharge(memcg, nr_pages);
page->mem_cgroup = NULL;
/* slab pages do not have PageKmemcg flag set */
@@ -3067,7 +3077,7 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
* Make sure that the new limit (memsw or memory limit) doesn't
* break our basic invariant rule memory.max <= memsw.max.
*/
- limits_invariant = memsw ? max >= memcg->memory.max :
+ limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
max <= memcg->memsw.max;
if (!limits_invariant) {
mutex_unlock(&memcg_max_mutex);
@@ -3814,8 +3824,8 @@ static int memcg_stat_show(struct seq_file *m, void *v)
/* Hierarchical information */
memory = memsw = PAGE_COUNTER_MAX;
for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
- memory = min(memory, mi->memory.max);
- memsw = min(memsw, mi->memsw.max);
+ memory = min(memory, READ_ONCE(mi->memory.max));
+ memsw = min(memsw, READ_ONCE(mi->memsw.max));
}
seq_printf(m, "hierarchical_memory_limit %llu\n",
(u64)memory * PAGE_SIZE);
@@ -4324,7 +4334,8 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
*pheadroom = PAGE_COUNTER_MAX;
while ((parent = parent_mem_cgroup(memcg))) {
- unsigned long ceiling = min(memcg->memory.max, memcg->high);
+ unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
+ READ_ONCE(memcg->high));
unsigned long used = page_counter_read(&memcg->memory);
*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
@@ -4792,7 +4803,8 @@ static struct cftype mem_cgroup_legacy_files[] = {
.write = mem_cgroup_reset,
.read_u64 = mem_cgroup_read_u64,
},
-#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
+#if defined(CONFIG_MEMCG_KMEM) && \
+ (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
{
.name = "kmem.slabinfo",
.seq_start = memcg_slab_start,
@@ -4861,7 +4873,8 @@ static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
}
}
-static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
+static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
+ unsigned int n)
{
refcount_add(n, &memcg->id.ref);
}
@@ -5044,7 +5057,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (!memcg)
return ERR_PTR(error);
- memcg->high = PAGE_COUNTER_MAX;
+ WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX);
memcg->soft_limit = PAGE_COUNTER_MAX;
if (parent) {
memcg->swappiness = mem_cgroup_swappiness(parent);
@@ -5197,7 +5210,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
page_counter_set_min(&memcg->memory, 0);
page_counter_set_low(&memcg->memory, 0);
- memcg->high = PAGE_COUNTER_MAX;
+ WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX);
memcg->soft_limit = PAGE_COUNTER_MAX;
memcg_wb_domain_size_changed(memcg);
}
@@ -5800,7 +5813,7 @@ retry:
switch (get_mctgt_type(vma, addr, ptent, &target)) {
case MC_TARGET_DEVICE:
device = true;
- /* fall through */
+ fallthrough;
case MC_TARGET_PAGE:
page = target.page;
/*
@@ -6013,7 +6026,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
if (err)
return err;
- memcg->high = high;
+ WRITE_ONCE(memcg->high, high);
for (;;) {
unsigned long nr_pages = page_counter_read(&memcg->memory);
@@ -6236,6 +6249,117 @@ struct cgroup_subsys memory_cgrp_subsys = {
.early_init = 0,
};
+/*
+ * This function calculates an individual cgroup's effective
+ * protection which is derived from its own memory.min/low, its
+ * parent's and siblings' settings, as well as the actual memory
+ * distribution in the tree.
+ *
+ * The following rules apply to the effective protection values:
+ *
+ * 1. At the first level of reclaim, effective protection is equal to
+ * the declared protection in memory.min and memory.low.
+ *
+ * 2. To enable safe delegation of the protection configuration, at
+ * subsequent levels the effective protection is capped to the
+ * parent's effective protection.
+ *
+ * 3. To make complex and dynamic subtrees easier to configure, the
+ * user is allowed to overcommit the declared protection at a given
+ * level. If that is the case, the parent's effective protection is
+ * distributed to the children in proportion to how much protection
+ * they have declared and how much of it they are utilizing.
+ *
+ * This makes distribution proportional, but also work-conserving:
+ * if one cgroup claims much more protection than it uses memory,
+ * the unused remainder is available to its siblings.
+ *
+ * 4. Conversely, when the declared protection is undercommitted at a
+ * given level, the distribution of the larger parental protection
+ * budget is NOT proportional. A cgroup's protection from a sibling
+ * is capped to its own memory.min/low setting.
+ *
+ * 5. However, to allow protecting recursive subtrees from each other
+ * without having to declare each individual cgroup's fixed share
+ * of the ancestor's claim to protection, any unutilized -
+ * "floating" - protection from up the tree is distributed in
+ * proportion to each cgroup's *usage*. This makes the protection
+ * neutral wrt sibling cgroups and lets them compete freely over
+ * the shared parental protection budget, but it protects the
+ * subtree as a whole from neighboring subtrees.
+ *
+ * Note that 4. and 5. are not in conflict: 4. is about protecting
+ * against immediate siblings whereas 5. is about protecting against
+ * neighboring subtrees.
+ */
+static unsigned long effective_protection(unsigned long usage,
+ unsigned long parent_usage,
+ unsigned long setting,
+ unsigned long parent_effective,
+ unsigned long siblings_protected)
+{
+ unsigned long protected;
+ unsigned long ep;
+
+ protected = min(usage, setting);
+ /*
+ * If all cgroups at this level combined claim and use more
+ * protection then what the parent affords them, distribute
+ * shares in proportion to utilization.
+ *
+ * We are using actual utilization rather than the statically
+ * claimed protection in order to be work-conserving: claimed
+ * but unused protection is available to siblings that would
+ * otherwise get a smaller chunk than what they claimed.
+ */
+ if (siblings_protected > parent_effective)
+ return protected * parent_effective / siblings_protected;
+
+ /*
+ * Ok, utilized protection of all children is within what the
+ * parent affords them, so we know whatever this child claims
+ * and utilizes is effectively protected.
+ *
+ * If there is unprotected usage beyond this value, reclaim
+ * will apply pressure in proportion to that amount.
+ *
+ * If there is unutilized protection, the cgroup will be fully
+ * shielded from reclaim, but we do return a smaller value for
+ * protection than what the group could enjoy in theory. This
+ * is okay. With the overcommit distribution above, effective
+ * protection is always dependent on how memory is actually
+ * consumed among the siblings anyway.
+ */
+ ep = protected;
+
+ /*
+ * If the children aren't claiming (all of) the protection
+ * afforded to them by the parent, distribute the remainder in
+ * proportion to the (unprotected) memory of each cgroup. That
+ * way, cgroups that aren't explicitly prioritized wrt each
+ * other compete freely over the allowance, but they are
+ * collectively protected from neighboring trees.
+ *
+ * We're using unprotected memory for the weight so that if
+ * some cgroups DO claim explicit protection, we don't protect
+ * the same bytes twice.
+ */
+ if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
+ return ep;
+
+ if (parent_effective > siblings_protected && usage > protected) {
+ unsigned long unclaimed;
+
+ unclaimed = parent_effective - siblings_protected;
+ unclaimed *= usage - protected;
+ unclaimed /= parent_usage - siblings_protected;
+
+ ep += unclaimed;
+ }
+
+ return ep;
+}
+
/**
* mem_cgroup_protected - check if memory consumption is in the normal range
* @root: the top ancestor of the sub-tree being checked
@@ -6249,70 +6373,12 @@ struct cgroup_subsys memory_cgrp_subsys = {
* MEMCG_PROT_LOW: cgroup memory is protected as long there is
* an unprotected supply of reclaimable memory from other cgroups.
* MEMCG_PROT_MIN: cgroup memory is protected
- *
- * @root is exclusive; it is never protected when looked at directly
- *
- * To provide a proper hierarchical behavior, effective memory.min/low values
- * are used. Below is the description of how effective memory.low is calculated.
- * Effective memory.min values is calculated in the same way.
- *
- * Effective memory.low is always equal or less than the original memory.low.
- * If there is no memory.low overcommittment (which is always true for
- * top-level memory cgroups), these two values are equal.
- * Otherwise, it's a part of parent's effective memory.low,
- * calculated as a cgroup's memory.low usage divided by sum of sibling's
- * memory.low usages, where memory.low usage is the size of actually
- * protected memory.
- *
- * low_usage
- * elow = min( memory.low, parent->elow * ------------------ ),
- * siblings_low_usage
- *
- * | memory.current, if memory.current < memory.low
- * low_usage = |
- * | 0, otherwise.
- *
- *
- * Such definition of the effective memory.low provides the expected
- * hierarchical behavior: parent's memory.low value is limiting
- * children, unprotected memory is reclaimed first and cgroups,
- * which are not using their guarantee do not affect actual memory
- * distribution.
- *
- * For example, if there are memcgs A, A/B, A/C, A/D and A/E:
- *
- * A A/memory.low = 2G, A/memory.current = 6G
- * //\\
- * BC DE B/memory.low = 3G B/memory.current = 2G
- * C/memory.low = 1G C/memory.current = 2G
- * D/memory.low = 0 D/memory.current = 2G
- * E/memory.low = 10G E/memory.current = 0
- *
- * and the memory pressure is applied, the following memory distribution
- * is expected (approximately):
- *
- * A/memory.current = 2G
- *
- * B/memory.current = 1.3G
- * C/memory.current = 0.6G
- * D/memory.current = 0
- * E/memory.current = 0
- *
- * These calculations require constant tracking of the actual low usages
- * (see propagate_protected_usage()), as well as recursive calculation of
- * effective memory.low values. But as we do call mem_cgroup_protected()
- * path for each memory cgroup top-down from the reclaim,
- * it's possible to optimize this part, and save calculated elow
- * for next usage. This part is intentionally racy, but it's ok,
- * as memory.low is a best-effort mechanism.
*/
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
struct mem_cgroup *memcg)
{
+ unsigned long usage, parent_usage;
struct mem_cgroup *parent;
- unsigned long emin, parent_emin;
- unsigned long elow, parent_elow;
- unsigned long usage;
if (mem_cgroup_disabled())
return MEMCG_PROT_NONE;
@@ -6326,52 +6392,32 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
if (!usage)
return MEMCG_PROT_NONE;
- emin = memcg->memory.min;
- elow = memcg->memory.low;
-
parent = parent_mem_cgroup(memcg);
/* No parent means a non-hierarchical mode on v1 memcg */
if (!parent)
return MEMCG_PROT_NONE;
- if (parent == root)
- goto exit;
-
- parent_emin = READ_ONCE(parent->memory.emin);
- emin = min(emin, parent_emin);
- if (emin && parent_emin) {
- unsigned long min_usage, siblings_min_usage;
-
- min_usage = min(usage, memcg->memory.min);
- siblings_min_usage = atomic_long_read(
- &parent->memory.children_min_usage);
-
- if (min_usage && siblings_min_usage)
- emin = min(emin, parent_emin * min_usage /
- siblings_min_usage);
+ if (parent == root) {
+ memcg->memory.emin = READ_ONCE(memcg->memory.min);
+ memcg->memory.elow = memcg->memory.low;
+ goto out;
}
- parent_elow = READ_ONCE(parent->memory.elow);
- elow = min(elow, parent_elow);
- if (elow && parent_elow) {
- unsigned long low_usage, siblings_low_usage;
+ parent_usage = page_counter_read(&parent->memory);
- low_usage = min(usage, memcg->memory.low);
- siblings_low_usage = atomic_long_read(
- &parent->memory.children_low_usage);
+ WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
+ READ_ONCE(memcg->memory.min),
+ READ_ONCE(parent->memory.emin),
+ atomic_long_read(&parent->memory.children_min_usage)));
- if (low_usage && siblings_low_usage)
- elow = min(elow, parent_elow * low_usage /
- siblings_low_usage);
- }
-
-exit:
- memcg->memory.emin = emin;
- memcg->memory.elow = elow;
+ WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
+ memcg->memory.low, READ_ONCE(parent->memory.elow),
+ atomic_long_read(&parent->memory.children_low_usage)));
- if (usage <= emin)
+out:
+ if (usage <= memcg->memory.emin)
return MEMCG_PROT_MIN;
- else if (usage <= elow)
+ else if (usage <= memcg->memory.elow)
return MEMCG_PROT_LOW;
else
return MEMCG_PROT_NONE;
@@ -6759,7 +6805,7 @@ void mem_cgroup_sk_alloc(struct sock *sk)
goto out;
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
goto out;
- if (css_tryget_online(&memcg->css))
+ if (css_tryget(&memcg->css))
sk->sk_memcg = memcg;
out:
rcu_read_unlock();
@@ -7080,7 +7126,8 @@ bool mem_cgroup_swap_full(struct page *page)
return false;
for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
- if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
+ if (page_counter_read(&memcg->swap) * 2 >=
+ READ_ONCE(memcg->swap.max))
return true;
return false;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 41c634f45d45..a96364be8ab4 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -954,7 +954,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
struct address_space *mapping;
LIST_HEAD(tokill);
- bool unmap_success;
+ bool unmap_success = true;
int kill = 1, forcekill;
struct page *hpage = *hpagep;
bool mlocked = PageMlocked(hpage);
@@ -1016,7 +1016,32 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (kill)
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
- unmap_success = try_to_unmap(hpage, ttu);
+ if (!PageHuge(hpage)) {
+ unmap_success = try_to_unmap(hpage, ttu);
+ } else {
+ /*
+ * For hugetlb pages, try_to_unmap could potentially call
+ * huge_pmd_unshare. Because of this, take semaphore in
+ * write mode here and set TTU_RMAP_LOCKED to indicate we
+ * have taken the lock at this higer level.
+ *
+ * Note that the call to hugetlb_page_mapping_lock_write
+ * is necessary even if mapping is already set. It handles
+ * ugliness of potentially having to drop page lock to obtain
+ * i_mmap_rwsem.
+ */
+ mapping = hugetlb_page_mapping_lock_write(hpage);
+
+ if (mapping) {
+ unmap_success = try_to_unmap(hpage,
+ ttu|TTU_RMAP_LOCKED);
+ i_mmap_unlock_write(mapping);
+ } else {
+ pr_info("Memory failure: %#lx: could not find mapping for mapped huge page\n",
+ pfn);
+ unmap_success = false;
+ }
+ }
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
@@ -1785,7 +1810,7 @@ static int __soft_offline_page(struct page *page, int flags)
*/
if (!__PageMovable(page))
inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
+ page_is_file_lru(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
diff --git a/mm/memory.c b/mm/memory.c
index e8bfdf0d9d1d..19874d133a66 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -733,6 +733,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(*src_pte))
pte = pte_swp_mksoft_dirty(pte);
+ if (pte_swp_uffd_wp(*src_pte))
+ pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
} else if (is_device_private_entry(entry)) {
@@ -762,6 +764,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
is_cow_mapping(vm_flags)) {
make_device_private_entry_read(&entry);
pte = swp_entry_to_pte(entry);
+ if (pte_swp_uffd_wp(*src_pte))
+ pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
@@ -785,6 +789,14 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
+ /*
+ * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
+ * does not have the VM_UFFD_WP, which means that the uffd
+ * fork event is not enabled.
+ */
+ if (!(vm_flags & VM_UFFD_WP))
+ pte = pte_clear_uffd_wp(pte);
+
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
@@ -1939,8 +1951,8 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
* @addr: target user address to start at
- * @pfn: physical address of kernel memory
- * @size: size of map area
+ * @pfn: page frame number of kernel physical memory address
+ * @size: size of mapping area
* @prot: page protection flags for this mapping
*
* Note: this is only safe if the mm semaphore is held when called.
@@ -2009,7 +2021,7 @@ EXPORT_SYMBOL(remap_pfn_range);
/**
* vm_iomap_memory - remap memory to userspace
* @vma: user vma to map to
- * @start: start of area
+ * @start: start of the physical memory to be mapped
* @len: size of area
*
* This is a simplified io_remap_pfn_range() for common driver use. The
@@ -2752,6 +2764,11 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ if (userfaultfd_pte_wp(vma, *vmf->pte)) {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ return handle_userfault(vmf, VM_UFFD_WP);
+ }
+
vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
if (!vmf->page) {
/*
@@ -3085,6 +3102,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
flush_icache_page(vma, page);
if (pte_swp_soft_dirty(vmf->orig_pte))
pte = pte_mksoft_dirty(pte);
+ if (pte_swp_uffd_wp(vmf->orig_pte)) {
+ pte = pte_mkuffd_wp(pte);
+ pte = pte_wrprotect(pte);
+ }
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
vmf->orig_pte = pte;
@@ -3373,7 +3394,7 @@ map_pte:
return 0;
}
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void deposit_prealloc_pte(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -3475,8 +3496,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
pte_t entry;
vm_fault_t ret;
- if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
- IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
+ if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
/* THP on COW? */
VM_BUG_ON_PAGE(memcg, page);
@@ -3949,31 +3969,40 @@ static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
/* `inline' is required to avoid gcc 4.1.2 build error */
static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
{
- if (vma_is_anonymous(vmf->vma))
+ if (vma_is_anonymous(vmf->vma)) {
+ if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
+ return handle_userfault(vmf, VM_UFFD_WP);
return do_huge_pmd_wp_page(vmf, orig_pmd);
- if (vmf->vma->vm_ops->huge_fault)
- return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
+ }
+ if (vmf->vma->vm_ops->huge_fault) {
+ vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
+
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
+ }
- /* COW handled on pte level: split pmd */
- VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
+ /* COW or write-notify handled on pte level: split pmd. */
__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
return VM_FAULT_FALLBACK;
}
-static inline bool vma_is_accessible(struct vm_area_struct *vma)
-{
- return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
-}
-
static vm_fault_t create_huge_pud(struct vm_fault *vmf)
{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
/* No support for anonymous transparent PUD pages yet */
if (vma_is_anonymous(vmf->vma))
- return VM_FAULT_FALLBACK;
- if (vmf->vma->vm_ops->huge_fault)
- return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
+ goto split;
+ if (vmf->vma->vm_ops->huge_fault) {
+ vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
+
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
+ }
+split:
+ /* COW or write-notify not handled on PUD level: split pud.*/
+ __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return VM_FAULT_FALLBACK;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 19389cdc16a5..635e8e286598 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -67,18 +67,17 @@ void put_online_mems(void)
bool movable_node_enabled = false;
#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
-bool memhp_auto_online;
+int memhp_default_online_type = MMOP_OFFLINE;
#else
-bool memhp_auto_online = true;
+int memhp_default_online_type = MMOP_ONLINE;
#endif
-EXPORT_SYMBOL_GPL(memhp_auto_online);
static int __init setup_memhp_default_state(char *str)
{
- if (!strcmp(str, "online"))
- memhp_auto_online = true;
- else if (!strcmp(str, "offline"))
- memhp_auto_online = false;
+ const int online_type = memhp_online_type_from_str(str);
+
+ if (online_type >= 0)
+ memhp_default_online_type = online_type;
return 1;
}
@@ -105,7 +104,13 @@ static struct resource *register_memory_resource(u64 start, u64 size)
unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
char *resource_name = "System RAM";
- if (start + size > max_mem_size)
+ /*
+ * Make sure value parsed from 'mem=' only restricts memory adding
+ * while booting, so that memory hotplug won't be impacted. Please
+ * refer to document of 'mem=' in kernel-parameters.txt for more
+ * details.
+ */
+ if (start + size > max_mem_size && system_state < SYSTEM_RUNNING)
return ERR_PTR(-E2BIG);
/*
@@ -301,8 +306,9 @@ static int check_hotplug_memory_addressable(unsigned long pfn,
int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
struct mhp_restrictions *restrictions)
{
+ const unsigned long end_pfn = pfn + nr_pages;
+ unsigned long cur_nr_pages;
int err;
- unsigned long nr, start_sec, end_sec;
struct vmem_altmap *altmap = restrictions->altmap;
err = check_hotplug_memory_addressable(pfn, nr_pages);
@@ -325,18 +331,13 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
if (err)
return err;
- start_sec = pfn_to_section_nr(pfn);
- end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
- for (nr = start_sec; nr <= end_sec; nr++) {
- unsigned long pfns;
-
- pfns = min(nr_pages, PAGES_PER_SECTION
- - (pfn & ~PAGE_SECTION_MASK));
- err = sparse_add_section(nid, pfn, pfns, altmap);
+ for (; pfn < end_pfn; pfn += cur_nr_pages) {
+ /* Select all remaining pages up to the next section boundary */
+ cur_nr_pages = min(end_pfn - pfn,
+ SECTION_ALIGN_UP(pfn + 1) - pfn);
+ err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
if (err)
break;
- pfn += pfns;
- nr_pages -= pfns;
cond_resched();
}
vmemmap_populate_print_last();
@@ -494,7 +495,7 @@ static void __remove_section(unsigned long pfn, unsigned long nr_pages,
unsigned long map_offset,
struct vmem_altmap *altmap)
{
- struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
+ struct mem_section *ms = __pfn_to_section(pfn);
if (WARN_ON_ONCE(!valid_section(ms)))
return;
@@ -528,7 +529,8 @@ void __remove_pages(unsigned long pfn, unsigned long nr_pages,
for (; pfn < end_pfn; pfn += cur_nr_pages) {
cond_resched();
/* Select all remaining pages up to the next section boundary */
- cur_nr_pages = min(end_pfn - pfn, -(pfn | PAGE_SECTION_MASK));
+ cur_nr_pages = min(end_pfn - pfn,
+ SECTION_ALIGN_UP(pfn + 1) - pfn);
__remove_section(pfn, cur_nr_pages, map_offset, altmap);
map_offset = 0;
}
@@ -988,6 +990,7 @@ static int check_hotplug_memory_range(u64 start, u64 size)
static int online_memory_block(struct memory_block *mem, void *arg)
{
+ mem->online_type = memhp_default_online_type;
return device_online(&mem->dev);
}
@@ -1060,7 +1063,7 @@ int __ref add_memory_resource(int nid, struct resource *res)
mem_hotplug_done();
/* online pages if requested */
- if (memhp_auto_online)
+ if (memhp_default_online_type != MMOP_OFFLINE)
walk_memory_blocks(start, size, NULL, online_memory_block);
return ret;
@@ -1317,7 +1320,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
list_add_tail(&page->lru, &source);
if (!__PageMovable(page))
inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
+ page_is_file_lru(page));
} else {
pr_warn("failed to isolate pfn %lx\n", pfn);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 977c641f78cf..b4a039d779b0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -442,6 +442,7 @@ static inline bool queue_pages_required(struct page *page,
*/
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
unsigned long end, struct mm_walk *walk)
+ __releases(ptl)
{
int ret = 0;
struct page *page;
@@ -557,9 +558,10 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
+ int ret = 0;
#ifdef CONFIG_HUGETLB_PAGE
struct queue_pages *qp = walk->private;
- unsigned long flags = qp->flags;
+ unsigned long flags = (qp->flags & MPOL_MF_VALID);
struct page *page;
spinlock_t *ptl;
pte_t entry;
@@ -571,16 +573,44 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
page = pte_page(entry);
if (!queue_pages_required(page, qp))
goto unlock;
+
+ if (flags == MPOL_MF_STRICT) {
+ /*
+ * STRICT alone means only detecting misplaced page and no
+ * need to further check other vma.
+ */
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (!vma_migratable(walk->vma)) {
+ /*
+ * Must be STRICT with MOVE*, otherwise .test_walk() have
+ * stopped walking current vma.
+ * Detecting misplaced page but allow migrating pages which
+ * have been queued.
+ */
+ ret = 1;
+ goto unlock;
+ }
+
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) ||
- (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
- isolate_huge_page(page, qp->pagelist);
+ (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
+ if (!isolate_huge_page(page, qp->pagelist) &&
+ (flags & MPOL_MF_STRICT))
+ /*
+ * Failed to isolate page but allow migrating pages
+ * which have been queued.
+ */
+ ret = 1;
+ }
unlock:
spin_unlock(ptl);
#else
BUG();
#endif
- return 0;
+ return ret;
}
#ifdef CONFIG_NUMA_BALANCING
@@ -598,7 +628,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
{
int nr_updated;
- nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
+ nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
@@ -621,7 +651,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
unsigned long flags = qp->flags;
/* range check first */
- VM_BUG_ON((vma->vm_start > start) || (vma->vm_end < end));
+ VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
if (!qp->first) {
qp->first = vma;
@@ -649,8 +679,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
if (flags & MPOL_MF_LAZY) {
/* Similar to task_numa_work, skip inaccessible VMAs */
- if (!is_vm_hugetlb_page(vma) &&
- (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
+ if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
!(vma->vm_flags & VM_MIXEDMAP))
change_prot_numa(vma, start, endvma);
return 1;
@@ -852,7 +881,6 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
switch (p->mode) {
case MPOL_BIND:
- /* Fall through */
case MPOL_INTERLEAVE:
*nodes = p->v.nodes;
break;
@@ -994,7 +1022,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
if (!isolate_lru_page(head)) {
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
- NR_ISOLATED_ANON + page_is_file_cache(head),
+ NR_ISOLATED_ANON + page_is_file_lru(head),
hpage_nr_pages(head));
} else if (flags & MPOL_MF_STRICT) {
/*
@@ -1714,6 +1742,34 @@ COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
#endif /* CONFIG_COMPAT */
+bool vma_migratable(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ return false;
+
+ /*
+ * DAX device mappings require predictable access latency, so avoid
+ * incurring periodic faults.
+ */
+ if (vma_is_dax(vma))
+ return false;
+
+ if (is_vm_hugetlb_page(vma) &&
+ !hugepage_migration_supported(hstate_vma(vma)))
+ return false;
+
+ /*
+ * Migration allocates pages in the highest zone. If we cannot
+ * do so then migration (at least from node to node) is not
+ * possible.
+ */
+ if (vma->vm_file &&
+ gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
+ < policy_zone)
+ return false;
+ return true;
+}
+
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
@@ -2009,7 +2065,6 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
break;
case MPOL_BIND:
- /* Fall through */
case MPOL_INTERLEAVE:
*mask = mempolicy->v.nodes;
break;
@@ -2276,7 +2331,6 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
switch (a->mode) {
case MPOL_BIND:
- /* Fall through */
case MPOL_INTERLEAVE:
return !!nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED:
@@ -2841,7 +2895,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
switch (mode) {
case MPOL_PREFERRED:
/*
- * Insist on a nodelist of one node only
+ * Insist on a nodelist of one node only, although later
+ * we use first_node(nodes) to grab a single node, so here
+ * nodelist (or nodes) cannot be empty.
*/
if (nodelist) {
char *rest = nodelist;
@@ -2849,6 +2905,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
rest++;
if (*rest)
goto out;
+ if (nodes_empty(nodes))
+ goto out;
}
break;
case MPOL_INTERLEAVE:
diff --git a/mm/memremap.c b/mm/memremap.c
index 09b5b7adc773..9b2c97ceb775 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -181,6 +181,10 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
WARN(1, "Missing migrate_to_ram method\n");
return ERR_PTR(-EINVAL);
}
+ if (!pgmap->owner) {
+ WARN(1, "Missing owner\n");
+ return ERR_PTR(-EINVAL);
+ }
break;
case MEMORY_DEVICE_FS_DAX:
if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
diff --git a/mm/migrate.c b/mm/migrate.c
index b1092876e537..7160c1556f79 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
put_page(page);
} else {
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
- page_is_file_cache(page), -hpage_nr_pages(page));
+ page_is_file_lru(page), -hpage_nr_pages(page));
putback_lru_page(page);
}
}
@@ -243,11 +243,15 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
entry = pte_to_swp_entry(*pvmw.pte);
if (is_write_migration_entry(entry))
pte = maybe_mkwrite(pte, vma);
+ else if (pte_swp_uffd_wp(*pvmw.pte))
+ pte = pte_mkuffd_wp(pte);
if (unlikely(is_zone_device_page(new))) {
if (is_device_private_page(new)) {
entry = make_device_private_entry(new, pte_write(pte));
pte = swp_entry_to_pte(entry);
+ if (pte_swp_uffd_wp(*pvmw.pte))
+ pte = pte_mkuffd_wp(pte);
}
}
@@ -647,6 +651,14 @@ void migrate_page_states(struct page *newpage, struct page *page)
if (PageWriteback(newpage))
end_page_writeback(newpage);
+ /*
+ * PG_readahead shares the same bit with PG_reclaim. The above
+ * end_page_writeback() may clear PG_readahead mistakenly, so set the
+ * bit after that.
+ */
+ if (PageReadahead(page))
+ SetPageReadahead(newpage);
+
copy_page_owner(page, newpage);
mem_cgroup_migrate(page, newpage);
@@ -1211,7 +1223,7 @@ out:
*/
if (likely(!__PageMovable(page)))
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
- page_is_file_cache(page), -hpage_nr_pages(page));
+ page_is_file_lru(page), -hpage_nr_pages(page));
}
/*
@@ -1282,6 +1294,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
int page_was_mapped = 0;
struct page *new_hpage;
struct anon_vma *anon_vma = NULL;
+ struct address_space *mapping = NULL;
/*
* Migratability of hugepages depends on architectures and their size.
@@ -1329,18 +1342,36 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
goto put_anon;
if (page_mapped(hpage)) {
+ /*
+ * try_to_unmap could potentially call huge_pmd_unshare.
+ * Because of this, take semaphore in write mode here and
+ * set TTU_RMAP_LOCKED to let lower levels know we have
+ * taken the lock.
+ */
+ mapping = hugetlb_page_mapping_lock_write(hpage);
+ if (unlikely(!mapping))
+ goto unlock_put_anon;
+
try_to_unmap(hpage,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
+ TTU_RMAP_LOCKED);
page_was_mapped = 1;
+ /*
+ * Leave mapping locked until after subsequent call to
+ * remove_migration_ptes()
+ */
}
if (!page_mapped(hpage))
rc = move_to_new_page(new_hpage, hpage, mode);
- if (page_was_mapped)
+ if (page_was_mapped) {
remove_migration_ptes(hpage,
- rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
+ rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, true);
+ i_mmap_unlock_write(mapping);
+ }
+unlock_put_anon:
unlock_page(new_hpage);
put_anon:
@@ -1499,9 +1530,6 @@ static int do_move_pages_to_node(struct mm_struct *mm,
{
int err;
- if (list_empty(pagelist))
- return 0;
-
err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
MIGRATE_SYNC, MR_SYSCALL);
if (err)
@@ -1568,7 +1596,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
err = 1;
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
- NR_ISOLATED_ANON + page_is_file_cache(head),
+ NR_ISOLATED_ANON + page_is_file_lru(head),
hpage_nr_pages(head));
}
out_putpage:
@@ -1583,6 +1611,32 @@ out:
return err;
}
+static int move_pages_and_store_status(struct mm_struct *mm, int node,
+ struct list_head *pagelist, int __user *status,
+ int start, int i, unsigned long nr_pages)
+{
+ int err;
+
+ if (list_empty(pagelist))
+ return 0;
+
+ err = do_move_pages_to_node(mm, pagelist, node);
+ if (err) {
+ /*
+ * Positive err means the number of failed
+ * pages to migrate. Since we are going to
+ * abort and return the number of non-migrated
+ * pages, so need to incude the rest of the
+ * nr_pages that have not been attempted as
+ * well.
+ */
+ if (err > 0)
+ err += nr_pages - i - 1;
+ return err;
+ }
+ return store_status(status, start, node, i - start);
+}
+
/*
* Migrate an array of page address onto an array of nodes and fill
* the corresponding array of status.
@@ -1626,21 +1680,8 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
current_node = node;
start = i;
} else if (node != current_node) {
- err = do_move_pages_to_node(mm, &pagelist, current_node);
- if (err) {
- /*
- * Positive err means the number of failed
- * pages to migrate. Since we are going to
- * abort and return the number of non-migrated
- * pages, so need to incude the rest of the
- * nr_pages that have not been attempted as
- * well.
- */
- if (err > 0)
- err += nr_pages - i - 1;
- goto out;
- }
- err = store_status(status, start, current_node, i - start);
+ err = move_pages_and_store_status(mm, current_node,
+ &pagelist, status, start, i, nr_pages);
if (err)
goto out;
start = i;
@@ -1654,49 +1695,29 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
err = add_page_for_migration(mm, addr, current_node,
&pagelist, flags & MPOL_MF_MOVE_ALL);
- if (!err) {
- /* The page is already on the target node */
- err = store_status(status, i, current_node, 1);
- if (err)
- goto out_flush;
- continue;
- } else if (err > 0) {
+ if (err > 0) {
/* The page is successfully queued for migration */
continue;
}
- err = store_status(status, i, err, 1);
+ /*
+ * If the page is already on the target node (!err), store the
+ * node, otherwise, store the err.
+ */
+ err = store_status(status, i, err ? : current_node, 1);
if (err)
goto out_flush;
- err = do_move_pages_to_node(mm, &pagelist, current_node);
- if (err) {
- if (err > 0)
- err += nr_pages - i - 1;
+ err = move_pages_and_store_status(mm, current_node, &pagelist,
+ status, start, i, nr_pages);
+ if (err)
goto out;
- }
- if (i > start) {
- err = store_status(status, start, current_node, i - start);
- if (err)
- goto out;
- }
current_node = NUMA_NO_NODE;
}
out_flush:
- if (list_empty(&pagelist))
- return err;
-
/* Make sure we do not overwrite the existing error */
- err1 = do_move_pages_to_node(mm, &pagelist, current_node);
- /*
- * Don't have to report non-attempted pages here since:
- * - If the above loop is done gracefully all pages have been
- * attempted.
- * - If the above loop is aborted it means a fatal error
- * happened, should return ret.
- */
- if (!err1)
- err1 = store_status(status, start, current_node, i - start);
+ err1 = move_pages_and_store_status(mm, current_node, &pagelist,
+ status, start, i, nr_pages);
if (err >= 0)
err = err1;
out:
@@ -1938,7 +1959,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 0;
}
- page_lru = page_is_file_cache(page);
+ page_lru = page_is_file_lru(page);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
hpage_nr_pages(page));
@@ -1974,7 +1995,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
* Don't migrate file pages that are mapped in multiple processes
* with execute permissions as they are probably shared libraries.
*/
- if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
+ if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
(vma->vm_flags & VM_EXEC))
goto out;
@@ -1982,7 +2003,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
* Also do not migrate dirty pages as not all filesystems can move
* dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
*/
- if (page_is_file_cache(page) && PageDirty(page))
+ if (page_is_file_lru(page) && PageDirty(page))
goto out;
isolated = numamigrate_isolate_page(pgdat, page);
@@ -1997,7 +2018,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
if (!list_empty(&migratepages)) {
list_del(&page->lru);
dec_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
+ page_is_file_lru(page));
putback_lru_page(page);
}
isolated = 0;
@@ -2027,7 +2048,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
struct page *new_page = NULL;
- int page_lru = page_is_file_cache(page);
+ int page_lru = page_is_file_lru(page);
unsigned long start = address & HPAGE_PMD_MASK;
new_page = alloc_pages_node(node,
@@ -2241,7 +2262,7 @@ again:
arch_enter_lazy_mmu_mode();
for (; addr < end; addr += PAGE_SIZE, ptep++) {
- unsigned long mpfn, pfn;
+ unsigned long mpfn = 0, pfn;
struct page *page;
swp_entry_t entry;
pte_t pte;
@@ -2255,8 +2276,6 @@ again:
}
if (!pte_present(pte)) {
- mpfn = 0;
-
/*
* Only care about unaddressable device page special
* page table entry. Other special swap entries are not
@@ -2267,11 +2286,16 @@ again:
goto next;
page = device_private_entry_to_page(entry);
+ if (page->pgmap->owner != migrate->src_owner)
+ goto next;
+
mpfn = migrate_pfn(page_to_pfn(page)) |
MIGRATE_PFN_MIGRATE;
if (is_write_device_private_entry(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
+ if (migrate->src_owner)
+ goto next;
pfn = pte_pfn(pte);
if (is_zero_pfn(pfn)) {
mpfn = MIGRATE_PFN_MIGRATE;
@@ -2318,6 +2342,8 @@ again:
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pte))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pte))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, addr, ptep, swp_pte);
/*
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 5c918388de99..7da6991d9435 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -37,7 +37,7 @@ void __init mminit_verify_zonelist(void)
struct zonelist *zonelist;
int i, listid, zoneid;
- BUG_ON(MAX_ZONELISTS > 2);
+ BUILD_BUG_ON(MAX_ZONELISTS > 2);
for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
/* Identify the zone and nodelist */
diff --git a/mm/mmap.c b/mm/mmap.c
index d681a20eb4ea..8d77dbbb80fe 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -53,6 +53,9 @@
#include <asm/tlb.h>
#include <asm/mmu_context.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmap.h>
+
#include "internal.h"
#ifndef arch_mmap_check
@@ -1457,7 +1460,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
* with MAP_SHARED to preserve backward compatibility.
*/
flags &= LEGACY_MAP_MASK;
- /* fall through */
+ fallthrough;
case MAP_SHARED_VALIDATE:
if (flags & ~flags_mask)
return -EOPNOTSUPP;
@@ -1484,8 +1487,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE))
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
-
- /* fall through */
+ fallthrough;
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
return -EACCES;
@@ -1848,7 +1850,7 @@ unacct_error:
return error;
}
-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
/*
* We implement the search by looking for an rbtree node that
@@ -1951,7 +1953,7 @@ found:
return gap_start;
}
-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -2050,6 +2052,27 @@ found_highest:
return gap_end;
}
+/*
+ * Search for an unmapped address range.
+ *
+ * We are looking for a range that:
+ * - does not intersect with any VMA;
+ * - is contained within the [low_limit, high_limit) interval;
+ * - is at least the desired size.
+ * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
+ */
+unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
+{
+ unsigned long addr;
+
+ if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
+ addr = unmapped_area_topdown(info);
+ else
+ addr = unmapped_area(info);
+
+ trace_vm_unmapped_area(addr, info);
+ return addr;
+}
#ifndef arch_get_mmap_end
#define arch_get_mmap_end(addr) (TASK_SIZE)
@@ -2334,8 +2357,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
gap_addr = TASK_SIZE;
next = vma->vm_next;
- if (next && next->vm_start < gap_addr &&
- (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+ if (next && next->vm_start < gap_addr && vma_is_accessible(next)) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
@@ -2416,7 +2438,7 @@ int expand_downwards(struct vm_area_struct *vma,
prev = vma->vm_prev;
/* Check that both stack segments have the same anon_vma? */
if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
- (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+ vma_is_accessible(prev)) {
if (address - prev->vm_end < stack_guard_gap)
return -ENOMEM;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 311c0dadf71c..1d823b050329 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -37,12 +37,16 @@
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa)
+ unsigned long cp_flags)
{
pte_t *pte, oldpte;
spinlock_t *ptl;
unsigned long pages = 0;
int target_node = NUMA_NO_NODE;
+ bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
+ bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
+ bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
+ bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
/*
* Can be called with only the mmap_sem for reading by
@@ -98,7 +102,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* it cannot move them all from MIGRATE_ASYNC
* context.
*/
- if (page_is_file_cache(page) && PageDirty(page))
+ if (page_is_file_lru(page) && PageDirty(page))
continue;
/*
@@ -114,6 +118,19 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (preserve_write)
ptent = pte_mk_savedwrite(ptent);
+ if (uffd_wp) {
+ ptent = pte_wrprotect(ptent);
+ ptent = pte_mkuffd_wp(ptent);
+ } else if (uffd_wp_resolve) {
+ /*
+ * Leave the write bit to be handled
+ * by PF interrupt handler, then
+ * things like COW could be properly
+ * handled.
+ */
+ ptent = pte_clear_uffd_wp(ptent);
+ }
+
/* Avoid taking write faults for known dirty pages */
if (dirty_accountable && pte_dirty(ptent) &&
(pte_soft_dirty(ptent) ||
@@ -122,11 +139,11 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
}
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
pages++;
- } else if (IS_ENABLED(CONFIG_MIGRATION)) {
+ } else if (is_swap_pte(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
+ pte_t newpte;
if (is_write_migration_entry(entry)) {
- pte_t newpte;
/*
* A protection check is difficult so
* just be safe and disable write
@@ -135,22 +152,28 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
newpte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(oldpte))
newpte = pte_swp_mksoft_dirty(newpte);
- set_pte_at(vma->vm_mm, addr, pte, newpte);
-
- pages++;
- }
-
- if (is_write_device_private_entry(entry)) {
- pte_t newpte;
-
+ if (pte_swp_uffd_wp(oldpte))
+ newpte = pte_swp_mkuffd_wp(newpte);
+ } else if (is_write_device_private_entry(entry)) {
/*
* We do not preserve soft-dirtiness. See
* copy_one_pte() for explanation.
*/
make_device_private_entry_read(&entry);
newpte = swp_entry_to_pte(entry);
- set_pte_at(vma->vm_mm, addr, pte, newpte);
+ if (pte_swp_uffd_wp(oldpte))
+ newpte = pte_swp_mkuffd_wp(newpte);
+ } else {
+ newpte = oldpte;
+ }
+
+ if (uffd_wp)
+ newpte = pte_swp_mkuffd_wp(newpte);
+ else if (uffd_wp_resolve)
+ newpte = pte_swp_clear_uffd_wp(newpte);
+ if (!pte_same(oldpte, newpte)) {
+ set_pte_at(vma->vm_mm, addr, pte, newpte);
pages++;
}
}
@@ -188,7 +211,7 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
- pgprot_t newprot, int dirty_accountable, int prot_numa)
+ pgprot_t newprot, unsigned long cp_flags)
{
pmd_t *pmd;
unsigned long next;
@@ -229,7 +252,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
__split_huge_pmd(vma, pmd, addr, false, NULL);
} else {
int nr_ptes = change_huge_pmd(vma, pmd, addr,
- newprot, prot_numa);
+ newprot, cp_flags);
if (nr_ptes) {
if (nr_ptes == HPAGE_PMD_NR) {
@@ -244,7 +267,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
/* fall through, the trans huge pmd just split */
}
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
- dirty_accountable, prot_numa);
+ cp_flags);
pages += this_pages;
next:
cond_resched();
@@ -260,7 +283,7 @@ next:
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
p4d_t *p4d, unsigned long addr, unsigned long end,
- pgprot_t newprot, int dirty_accountable, int prot_numa)
+ pgprot_t newprot, unsigned long cp_flags)
{
pud_t *pud;
unsigned long next;
@@ -272,7 +295,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
if (pud_none_or_clear_bad(pud))
continue;
pages += change_pmd_range(vma, pud, addr, next, newprot,
- dirty_accountable, prot_numa);
+ cp_flags);
} while (pud++, addr = next, addr != end);
return pages;
@@ -280,7 +303,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
pgd_t *pgd, unsigned long addr, unsigned long end,
- pgprot_t newprot, int dirty_accountable, int prot_numa)
+ pgprot_t newprot, unsigned long cp_flags)
{
p4d_t *p4d;
unsigned long next;
@@ -292,7 +315,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
if (p4d_none_or_clear_bad(p4d))
continue;
pages += change_pud_range(vma, p4d, addr, next, newprot,
- dirty_accountable, prot_numa);
+ cp_flags);
} while (p4d++, addr = next, addr != end);
return pages;
@@ -300,7 +323,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
static unsigned long change_protection_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa)
+ unsigned long cp_flags)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
@@ -317,7 +340,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
if (pgd_none_or_clear_bad(pgd))
continue;
pages += change_p4d_range(vma, pgd, addr, next, newprot,
- dirty_accountable, prot_numa);
+ cp_flags);
} while (pgd++, addr = next, addr != end);
/* Only flush the TLB if we actually modified any entries: */
@@ -330,14 +353,17 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa)
+ unsigned long cp_flags)
{
unsigned long pages;
+ BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
+
if (is_vm_hugetlb_page(vma))
pages = hugetlb_change_protection(vma, start, end, newprot);
else
- pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
+ pages = change_protection_range(vma, start, end, newprot,
+ cp_flags);
return pages;
}
@@ -459,7 +485,7 @@ success:
vma_set_page_prot(vma);
change_protection(vma, start, end, vma->vm_page_prot,
- dirty_accountable, 0);
+ dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
/*
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
diff --git a/mm/mremap.c b/mm/mremap.c
index d28f08a36b96..a7e282ead438 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -133,7 +133,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
* such races:
*
* - During exec() shift_arg_pages(), we use a specially tagged vma
- * which rmap call sites look for using is_vma_temporary_stack().
+ * which rmap call sites look for using vma_is_temporary_stack().
*
* - During mremap(), new_vma is often known to be placed after vma
* in rmap traversal order. This ensures rmap will always observe
@@ -318,8 +318,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long old_addr, unsigned long old_len,
unsigned long new_len, unsigned long new_addr,
- bool *locked, struct vm_userfaultfd_ctx *uf,
- struct list_head *uf_unmap)
+ bool *locked, unsigned long flags,
+ struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma;
@@ -408,11 +408,32 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (unlikely(vma->vm_flags & VM_PFNMAP))
untrack_pfn_moved(vma);
+ if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
+ if (vm_flags & VM_ACCOUNT) {
+ /* Always put back VM_ACCOUNT since we won't unmap */
+ vma->vm_flags |= VM_ACCOUNT;
+
+ vm_acct_memory(vma_pages(new_vma));
+ }
+
+ /* We always clear VM_LOCKED[ONFAULT] on the old vma */
+ vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
+
+ /* Because we won't unmap we don't need to touch locked_vm */
+ goto out;
+ }
+
if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
/* OOM: unable to split vma, just get accounts right */
vm_unacct_memory(excess >> PAGE_SHIFT);
excess = 0;
}
+
+ if (vm_flags & VM_LOCKED) {
+ mm->locked_vm += new_len >> PAGE_SHIFT;
+ *locked = true;
+ }
+out:
mm->hiwater_vm = hiwater_vm;
/* Restore VM_ACCOUNT if one or two pieces of vma left */
@@ -422,16 +443,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
vma->vm_next->vm_flags |= VM_ACCOUNT;
}
- if (vm_flags & VM_LOCKED) {
- mm->locked_vm += new_len >> PAGE_SHIFT;
- *locked = true;
- }
-
return new_addr;
}
static struct vm_area_struct *vma_to_resize(unsigned long addr,
- unsigned long old_len, unsigned long new_len, unsigned long *p)
+ unsigned long old_len, unsigned long new_len, unsigned long flags,
+ unsigned long *p)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = find_vma(mm, addr);
@@ -453,6 +470,10 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
return ERR_PTR(-EINVAL);
}
+ if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) ||
+ vma->vm_flags & VM_SHARED))
+ return ERR_PTR(-EINVAL);
+
if (is_vm_hugetlb_page(vma))
return ERR_PTR(-EINVAL);
@@ -497,7 +518,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
unsigned long new_addr, unsigned long new_len, bool *locked,
- struct vm_userfaultfd_ctx *uf,
+ unsigned long flags, struct vm_userfaultfd_ctx *uf,
struct list_head *uf_unmap_early,
struct list_head *uf_unmap)
{
@@ -505,7 +526,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
unsigned long charged = 0;
- unsigned long map_flags;
+ unsigned long map_flags = 0;
if (offset_in_page(new_addr))
goto out;
@@ -534,9 +555,11 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
return -ENOMEM;
- ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
- if (ret)
- goto out;
+ if (flags & MREMAP_FIXED) {
+ ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
+ if (ret)
+ goto out;
+ }
if (old_len >= new_len) {
ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
@@ -545,13 +568,22 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
old_len = new_len;
}
- vma = vma_to_resize(addr, old_len, new_len, &charged);
+ vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
- map_flags = MAP_FIXED;
+ /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
+ if (flags & MREMAP_DONTUNMAP &&
+ !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (flags & MREMAP_FIXED)
+ map_flags |= MAP_FIXED;
+
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
@@ -561,10 +593,16 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
if (IS_ERR_VALUE(ret))
goto out1;
- ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
+ /* We got a new mapping */
+ if (!(flags & MREMAP_FIXED))
+ new_addr = ret;
+
+ ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
uf_unmap);
+
if (!(offset_in_page(ret)))
goto out;
+
out1:
vm_unacct_memory(charged);
@@ -618,12 +656,21 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
*/
addr = untagged_addr(addr);
- if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
return ret;
if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
return ret;
+ /*
+ * MREMAP_DONTUNMAP is always a move and it does not allow resizing
+ * in the process.
+ */
+ if (flags & MREMAP_DONTUNMAP &&
+ (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
+ return ret;
+
+
if (offset_in_page(addr))
return ret;
@@ -641,9 +688,10 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (down_write_killable(&current->mm->mmap_sem))
return -EINTR;
- if (flags & MREMAP_FIXED) {
+ if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
ret = mremap_to(addr, old_len, new_addr, new_len,
- &locked, &uf, &uf_unmap_early, &uf_unmap);
+ &locked, flags, &uf, &uf_unmap_early,
+ &uf_unmap);
goto out;
}
@@ -671,7 +719,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
/*
* Ok, we need to grow..
*/
- vma = vma_to_resize(addr, old_len, new_len, &charged);
+ vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
@@ -721,7 +769,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
}
ret = move_vma(vma, addr, old_len, new_len, new_addr,
- &locked, &uf, &uf_unmap);
+ &locked, flags, &uf, &uf_unmap);
}
out:
if (offset_in_page(ret)) {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2caf780a42e7..7326b54ab728 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2182,12 +2182,12 @@ int write_cache_pages(struct address_space *mapping,
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
}
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
+ tag_pages_for_writeback(mapping, index, end);
tag = PAGECACHE_TAG_TOWRITE;
- else
+ } else {
tag = PAGECACHE_TAG_DIRTY;
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag_pages_for_writeback(mapping, index, end);
+ }
done_index = index;
while (!done && (index <= end)) {
int i;
@@ -2655,7 +2655,7 @@ int clear_page_dirty_for_io(struct page *page)
struct address_space *mapping = page_mapping(page);
int ret = 0;
- BUG_ON(!PageLocked(page));
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
if (mapping && mapping_cap_account_dirty(mapping)) {
struct inode *inode = mapping->host;
@@ -2764,7 +2764,7 @@ int test_clear_page_writeback(struct page *page)
int __test_set_page_writeback(struct page *page, bool keep_write)
{
struct address_space *mapping = page_mapping(page);
- int ret;
+ int ret, access_ret;
lock_page_memcg(page);
if (mapping && mapping_use_writeback_tags(mapping)) {
@@ -2807,6 +2807,13 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
}
unlock_page_memcg(page);
+ access_ret = arch_make_page_accessible(page);
+ /*
+ * If writeback has been triggered on a page that cannot be made
+ * accessible, it is too late to recover here.
+ */
+ VM_BUG_ON_PAGE(access_ret != 0, page);
+
return ret;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3c4eb750a199..114c56c3685d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -74,6 +74,7 @@
#include <asm/div64.h>
#include "internal.h"
#include "shuffle.h"
+#include "page_reporting.h"
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
static DEFINE_MUTEX(pcp_batch_high_lock);
@@ -95,7 +96,6 @@ DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
*/
DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
-int _node_numa_mem_[MAX_NUMNODES];
#endif
/* work_structs for global per-cpu drains */
@@ -689,6 +689,8 @@ void prep_compound_page(struct page *page, unsigned int order)
set_compound_head(p, page);
}
atomic_set(compound_mapcount_ptr(page), -1);
+ if (hpage_pincount_available(page))
+ atomic_set(compound_pincount_ptr(page), 0);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -791,32 +793,25 @@ static inline void set_page_order(struct page *page, unsigned int order)
*
* For recording page's order, we use page_private(page).
*/
-static inline int page_is_buddy(struct page *page, struct page *buddy,
+static inline bool page_is_buddy(struct page *page, struct page *buddy,
unsigned int order)
{
- if (page_is_guard(buddy) && page_order(buddy) == order) {
- if (page_zone_id(page) != page_zone_id(buddy))
- return 0;
-
- VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
+ if (!page_is_guard(buddy) && !PageBuddy(buddy))
+ return false;
- return 1;
- }
+ if (page_order(buddy) != order)
+ return false;
- if (PageBuddy(buddy) && page_order(buddy) == order) {
- /*
- * zone check is done late to avoid uselessly
- * calculating zone/node ids for pages that could
- * never merge.
- */
- if (page_zone_id(page) != page_zone_id(buddy))
- return 0;
+ /*
+ * zone check is done late to avoid uselessly calculating
+ * zone/node ids for pages that could never merge.
+ */
+ if (page_zone_id(page) != page_zone_id(buddy))
+ return false;
- VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
- return 1;
- }
- return 0;
+ return true;
}
#ifdef CONFIG_COMPACTION
@@ -870,6 +865,78 @@ compaction_capture(struct capture_control *capc, struct page *page,
}
#endif /* CONFIG_COMPACTION */
+/* Used for pages not on another list */
+static inline void add_to_free_list(struct page *page, struct zone *zone,
+ unsigned int order, int migratetype)
+{
+ struct free_area *area = &zone->free_area[order];
+
+ list_add(&page->lru, &area->free_list[migratetype]);
+ area->nr_free++;
+}
+
+/* Used for pages not on another list */
+static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
+ unsigned int order, int migratetype)
+{
+ struct free_area *area = &zone->free_area[order];
+
+ list_add_tail(&page->lru, &area->free_list[migratetype]);
+ area->nr_free++;
+}
+
+/* Used for pages which are on another list */
+static inline void move_to_free_list(struct page *page, struct zone *zone,
+ unsigned int order, int migratetype)
+{
+ struct free_area *area = &zone->free_area[order];
+
+ list_move(&page->lru, &area->free_list[migratetype]);
+}
+
+static inline void del_page_from_free_list(struct page *page, struct zone *zone,
+ unsigned int order)
+{
+ /* clear reported state and update reported page count */
+ if (page_reported(page))
+ __ClearPageReported(page);
+
+ list_del(&page->lru);
+ __ClearPageBuddy(page);
+ set_page_private(page, 0);
+ zone->free_area[order].nr_free--;
+}
+
+/*
+ * If this is not the largest possible page, check if the buddy
+ * of the next-highest order is free. If it is, it's possible
+ * that pages are being freed that will coalesce soon. In case,
+ * that is happening, add the free page to the tail of the list
+ * so it's less likely to be used soon and more likely to be merged
+ * as a higher order page
+ */
+static inline bool
+buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
+ struct page *page, unsigned int order)
+{
+ struct page *higher_page, *higher_buddy;
+ unsigned long combined_pfn;
+
+ if (order >= MAX_ORDER - 2)
+ return false;
+
+ if (!pfn_valid_within(buddy_pfn))
+ return false;
+
+ combined_pfn = buddy_pfn & pfn;
+ higher_page = page + (combined_pfn - pfn);
+ buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
+ higher_buddy = higher_page + (buddy_pfn - combined_pfn);
+
+ return pfn_valid_within(buddy_pfn) &&
+ page_is_buddy(higher_page, higher_buddy, order + 1);
+}
+
/*
* Freeing function for a buddy system allocator.
*
@@ -897,13 +964,14 @@ compaction_capture(struct capture_control *capc, struct page *page,
static inline void __free_one_page(struct page *page,
unsigned long pfn,
struct zone *zone, unsigned int order,
- int migratetype)
+ int migratetype, bool report)
{
- unsigned long combined_pfn;
+ struct capture_control *capc = task_capc(zone);
unsigned long uninitialized_var(buddy_pfn);
- struct page *buddy;
+ unsigned long combined_pfn;
unsigned int max_order;
- struct capture_control *capc = task_capc(zone);
+ struct page *buddy;
+ bool to_tail;
max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
@@ -938,7 +1006,7 @@ continue_merging:
if (page_is_guard(buddy))
clear_page_guard(zone, buddy, order, migratetype);
else
- del_page_from_free_area(buddy, &zone->free_area[order]);
+ del_page_from_free_list(buddy, zone, order);
combined_pfn = buddy_pfn & pfn;
page = page + (combined_pfn - pfn);
pfn = combined_pfn;
@@ -972,35 +1040,19 @@ continue_merging:
done_merging:
set_page_order(page, order);
- /*
- * If this is not the largest possible page, check if the buddy
- * of the next-highest order is free. If it is, it's possible
- * that pages are being freed that will coalesce soon. In case,
- * that is happening, add the free page to the tail of the list
- * so it's less likely to be used soon and more likely to be merged
- * as a higher order page
- */
- if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)
- && !is_shuffle_order(order)) {
- struct page *higher_page, *higher_buddy;
- combined_pfn = buddy_pfn & pfn;
- higher_page = page + (combined_pfn - pfn);
- buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
- higher_buddy = higher_page + (buddy_pfn - combined_pfn);
- if (pfn_valid_within(buddy_pfn) &&
- page_is_buddy(higher_page, higher_buddy, order + 1)) {
- add_to_free_area_tail(page, &zone->free_area[order],
- migratetype);
- return;
- }
- }
-
if (is_shuffle_order(order))
- add_to_free_area_random(page, &zone->free_area[order],
- migratetype);
+ to_tail = shuffle_pick_tail();
else
- add_to_free_area(page, &zone->free_area[order], migratetype);
+ to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
+ if (to_tail)
+ add_to_free_list_tail(page, zone, order, migratetype);
+ else
+ add_to_free_list(page, zone, order, migratetype);
+
+ /* Notify page reporting subsystem of freed page */
+ if (report)
+ page_reporting_notify_free(order);
}
/*
@@ -1152,7 +1204,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
if (PageMappingFlags(page))
page->mapping = NULL;
if (memcg_kmem_enabled() && PageKmemcg(page))
- __memcg_kmem_uncharge(page, order);
+ __memcg_kmem_uncharge_page(page, order);
if (check_free)
bad += free_pages_check(page);
if (bad)
@@ -1317,7 +1369,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
if (unlikely(isolated_pageblocks))
mt = get_pageblock_migratetype(page);
- __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+ __free_one_page(page, page_to_pfn(page), zone, 0, mt, true);
trace_mm_page_pcpu_drain(page, 0, mt);
}
spin_unlock(&zone->lock);
@@ -1333,7 +1385,7 @@ static void free_one_page(struct zone *zone,
is_migrate_isolate(migratetype))) {
migratetype = get_pfnblock_migratetype(page, pfn);
}
- __free_one_page(page, pfn, zone, order, migratetype);
+ __free_one_page(page, pfn, zone, order, migratetype, true);
spin_unlock(&zone->lock);
}
@@ -2014,13 +2066,11 @@ void __init init_cma_reserved_pageblock(struct page *page)
* -- nyc
*/
static inline void expand(struct zone *zone, struct page *page,
- int low, int high, struct free_area *area,
- int migratetype)
+ int low, int high, int migratetype)
{
unsigned long size = 1 << high;
while (high > low) {
- area--;
high--;
size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
@@ -2034,7 +2084,7 @@ static inline void expand(struct zone *zone, struct page *page,
if (set_page_guard(zone, &page[size], high, migratetype))
continue;
- add_to_free_area(&page[size], area, migratetype);
+ add_to_free_list(&page[size], zone, high, migratetype);
set_page_order(&page[size], high);
}
}
@@ -2192,8 +2242,8 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
page = get_page_from_free_area(area, migratetype);
if (!page)
continue;
- del_page_from_free_area(page, area);
- expand(zone, page, order, current_order, area, migratetype);
+ del_page_from_free_list(page, zone, current_order);
+ expand(zone, page, order, current_order, migratetype);
set_pcppage_migratetype(page, migratetype);
return page;
}
@@ -2267,7 +2317,7 @@ static int move_freepages(struct zone *zone,
VM_BUG_ON_PAGE(page_zone(page) != zone, page);
order = page_order(page);
- move_to_free_area(page, &zone->free_area[order], migratetype);
+ move_to_free_list(page, zone, order, migratetype);
page += 1 << order;
pages_moved += 1 << order;
}
@@ -2383,7 +2433,6 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
unsigned int alloc_flags, int start_type, bool whole_block)
{
unsigned int current_order = page_order(page);
- struct free_area *area;
int free_pages, movable_pages, alike_pages;
int old_block_type;
@@ -2454,8 +2503,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
return;
single_page:
- area = &zone->free_area[current_order];
- move_to_free_area(page, area, start_type);
+ move_to_free_list(page, zone, current_order, start_type);
}
/*
@@ -3126,7 +3174,6 @@ EXPORT_SYMBOL_GPL(split_page);
int __isolate_free_page(struct page *page, unsigned int order)
{
- struct free_area *area = &page_zone(page)->free_area[order];
unsigned long watermark;
struct zone *zone;
int mt;
@@ -3152,7 +3199,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
/* Remove page from free list */
- del_page_from_free_area(page, area);
+ del_page_from_free_list(page, zone, order);
/*
* Set the pageblock if the isolated page is at least half of a
@@ -3173,6 +3220,25 @@ int __isolate_free_page(struct page *page, unsigned int order)
return 1UL << order;
}
+/**
+ * __putback_isolated_page - Return a now-isolated page back where we got it
+ * @page: Page that was isolated
+ * @order: Order of the isolated page
+ *
+ * This function is meant to return a page pulled from the free lists via
+ * __isolate_free_page back to the free lists they were pulled from.
+ */
+void __putback_isolated_page(struct page *page, unsigned int order, int mt)
+{
+ struct zone *zone = page_zone(page);
+
+ /* zone lock should be held when this function is called */
+ lockdep_assert_held(&zone->lock);
+
+ /* Return isolated page to tail of freelist. */
+ __free_one_page(page, page_to_pfn(page), zone, order, mt, false);
+}
+
/*
* Update NUMA hit/miss statistics
*
@@ -3459,8 +3525,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return true;
}
#endif
- if (alloc_harder &&
- !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
+ if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
return true;
}
return false;
@@ -3535,10 +3600,13 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
static inline unsigned int
alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
{
- unsigned int alloc_flags = 0;
+ unsigned int alloc_flags;
- if (gfp_mask & __GFP_KSWAPD_RECLAIM)
- alloc_flags |= ALLOC_KSWAPD;
+ /*
+ * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
+ * to save a branch.
+ */
+ alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
#ifdef CONFIG_ZONE_DMA32
if (!zone)
@@ -4174,8 +4242,13 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
{
unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
- /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
+ /*
+ * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
+ * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
+ * to save two branches.
+ */
BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
+ BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
/*
* The caller may dip into page reserves a bit more if the caller
@@ -4183,7 +4256,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
* policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
* set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
*/
- alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
+ alloc_flags |= (__force int)
+ (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
if (gfp_mask & __GFP_ATOMIC) {
/*
@@ -4200,9 +4274,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
} else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER;
- if (gfp_mask & __GFP_KSWAPD_RECLAIM)
- alloc_flags |= ALLOC_KSWAPD;
-
#ifdef CONFIG_CMA
if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
@@ -4745,14 +4816,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
* Restore the original nodemask if it was potentially replaced with
* &cpuset_current_mems_allowed to optimize the fast-path attempt.
*/
- if (unlikely(ac.nodemask != nodemask))
- ac.nodemask = nodemask;
+ ac.nodemask = nodemask;
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
out:
if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
- unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) {
+ unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
__free_pages(page, order);
page = NULL;
}
@@ -7867,8 +7937,8 @@ int __meminit init_per_zone_wmark_min(void)
min_free_kbytes = new_min_free_kbytes;
if (min_free_kbytes < 128)
min_free_kbytes = 128;
- if (min_free_kbytes > 65536)
- min_free_kbytes = 65536;
+ if (min_free_kbytes > 262144)
+ min_free_kbytes = 262144;
} else {
pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
new_min_free_kbytes, user_min_free_kbytes);
@@ -8253,15 +8323,20 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
/*
* Hugepages are not in LRU lists, but they're movable.
+ * THPs are on the LRU, but need to be counted as #small pages.
* We need not scan over tail pages because we don't
* handle each tail page individually in migration.
*/
- if (PageHuge(page)) {
+ if (PageHuge(page) || PageTransCompound(page)) {
struct page *head = compound_head(page);
unsigned int skip_pages;
- if (!hugepage_migration_supported(page_hstate(head)))
+ if (PageHuge(page)) {
+ if (!hugepage_migration_supported(page_hstate(head)))
+ return page;
+ } else if (!PageLRU(head) && !__PageMovable(head)) {
return page;
+ }
skip_pages = compound_nr(head) - (page - head);
iter += skip_pages - 1;
@@ -8402,6 +8477,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
.ignore_skip_hint = true,
.no_set_skip_hint = true,
.gfp_mask = current_gfp_context(gfp_mask),
+ .alloc_contig = true,
};
INIT_LIST_HEAD(&cc.migratepages);
@@ -8709,7 +8785,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
BUG_ON(!PageBuddy(page));
order = page_order(page);
offlined_pages += 1 << order;
- del_page_from_free_area(page, &zone->free_area[order]);
+ del_page_from_free_list(page, zone, order);
pfn += (1 << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
diff --git a/mm/page_counter.c b/mm/page_counter.c
index de31470655f6..c56db2d5e159 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -17,29 +17,24 @@ static void propagate_protected_usage(struct page_counter *c,
unsigned long usage)
{
unsigned long protected, old_protected;
+ unsigned long low, min;
long delta;
if (!c->parent)
return;
- if (c->min || atomic_long_read(&c->min_usage)) {
- if (usage <= c->min)
- protected = usage;
- else
- protected = 0;
-
+ min = READ_ONCE(c->min);
+ if (min || atomic_long_read(&c->min_usage)) {
+ protected = min(usage, min);
old_protected = atomic_long_xchg(&c->min_usage, protected);
delta = protected - old_protected;
if (delta)
atomic_long_add(delta, &c->parent->children_min_usage);
}
- if (c->low || atomic_long_read(&c->low_usage)) {
- if (usage <= c->low)
- protected = usage;
- else
- protected = 0;
-
+ low = READ_ONCE(c->low);
+ if (low || atomic_long_read(&c->low_usage)) {
+ protected = min(usage, low);
old_protected = atomic_long_xchg(&c->low_usage, protected);
delta = protected - old_protected;
if (delta)
@@ -213,7 +208,7 @@ void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
- counter->min = nr_pages;
+ WRITE_ONCE(counter->min, nr_pages);
for (c = counter; c; c = c->parent)
propagate_protected_usage(c, atomic_long_read(&c->usage));
@@ -230,7 +225,7 @@ void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;
- counter->low = nr_pages;
+ WRITE_ONCE(counter->low, nr_pages);
for (c = counter; c; c = c->parent)
propagate_protected_usage(c, atomic_long_read(&c->usage));
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 4ade843ff588..a3616f7a0e9e 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -303,11 +303,8 @@ static int __meminit online_page_ext(unsigned long start_pfn,
VM_BUG_ON(!node_state(nid, N_ONLINE));
}
- for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
- if (!pfn_present(pfn))
- continue;
+ for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
fail = init_section_page_ext(pfn, nid);
- }
if (!fail)
return 0;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index a9fd7c740c23..2c11a38d6e87 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -117,13 +117,11 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
__mod_zone_freepage_state(zone, nr_pages, migratetype);
}
set_pageblock_migratetype(page, migratetype);
+ if (isolated_page)
+ __putback_isolated_page(page, order, migratetype);
zone->nr_isolate_pageblock--;
out:
spin_unlock_irqrestore(&zone->lock, flags);
- if (isolated_page) {
- post_alloc_hook(page, order, __GFP_MOVABLE);
- __free_pages(page, order);
- }
}
static inline struct page *
diff --git a/mm/page_reporting.c b/mm/page_reporting.c
new file mode 100644
index 000000000000..3bbd471cfc81
--- /dev/null
+++ b/mm/page_reporting.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/page_reporting.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+
+#include "page_reporting.h"
+#include "internal.h"
+
+#define PAGE_REPORTING_DELAY (2 * HZ)
+static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly;
+
+enum {
+ PAGE_REPORTING_IDLE = 0,
+ PAGE_REPORTING_REQUESTED,
+ PAGE_REPORTING_ACTIVE
+};
+
+/* request page reporting */
+static void
+__page_reporting_request(struct page_reporting_dev_info *prdev)
+{
+ unsigned int state;
+
+ /* Check to see if we are in desired state */
+ state = atomic_read(&prdev->state);
+ if (state == PAGE_REPORTING_REQUESTED)
+ return;
+
+ /*
+ * If reporting is already active there is nothing we need to do.
+ * Test against 0 as that represents PAGE_REPORTING_IDLE.
+ */
+ state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED);
+ if (state != PAGE_REPORTING_IDLE)
+ return;
+
+ /*
+ * Delay the start of work to allow a sizable queue to build. For
+ * now we are limiting this to running no more than once every
+ * couple of seconds.
+ */
+ schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
+}
+
+/* notify prdev of free page reporting request */
+void __page_reporting_notify(void)
+{
+ struct page_reporting_dev_info *prdev;
+
+ /*
+ * We use RCU to protect the pr_dev_info pointer. In almost all
+ * cases this should be present, however in the unlikely case of
+ * a shutdown this will be NULL and we should exit.
+ */
+ rcu_read_lock();
+ prdev = rcu_dereference(pr_dev_info);
+ if (likely(prdev))
+ __page_reporting_request(prdev);
+
+ rcu_read_unlock();
+}
+
+static void
+page_reporting_drain(struct page_reporting_dev_info *prdev,
+ struct scatterlist *sgl, unsigned int nents, bool reported)
+{
+ struct scatterlist *sg = sgl;
+
+ /*
+ * Drain the now reported pages back into their respective
+ * free lists/areas. We assume at least one page is populated.
+ */
+ do {
+ struct page *page = sg_page(sg);
+ int mt = get_pageblock_migratetype(page);
+ unsigned int order = get_order(sg->length);
+
+ __putback_isolated_page(page, order, mt);
+
+ /* If the pages were not reported due to error skip flagging */
+ if (!reported)
+ continue;
+
+ /*
+ * If page was not comingled with another page we can
+ * consider the result to be "reported" since the page
+ * hasn't been modified, otherwise we will need to
+ * report on the new larger page when we make our way
+ * up to that higher order.
+ */
+ if (PageBuddy(page) && page_order(page) == order)
+ __SetPageReported(page);
+ } while ((sg = sg_next(sg)));
+
+ /* reinitialize scatterlist now that it is empty */
+ sg_init_table(sgl, nents);
+}
+
+/*
+ * The page reporting cycle consists of 4 stages, fill, report, drain, and
+ * idle. We will cycle through the first 3 stages until we cannot obtain a
+ * full scatterlist of pages, in that case we will switch to idle.
+ */
+static int
+page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
+ unsigned int order, unsigned int mt,
+ struct scatterlist *sgl, unsigned int *offset)
+{
+ struct free_area *area = &zone->free_area[order];
+ struct list_head *list = &area->free_list[mt];
+ unsigned int page_len = PAGE_SIZE << order;
+ struct page *page, *next;
+ long budget;
+ int err = 0;
+
+ /*
+ * Perform early check, if free area is empty there is
+ * nothing to process so we can skip this free_list.
+ */
+ if (list_empty(list))
+ return err;
+
+ spin_lock_irq(&zone->lock);
+
+ /*
+ * Limit how many calls we will be making to the page reporting
+ * device for this list. By doing this we avoid processing any
+ * given list for too long.
+ *
+ * The current value used allows us enough calls to process over a
+ * sixteenth of the current list plus one additional call to handle
+ * any pages that may have already been present from the previous
+ * list processed. This should result in us reporting all pages on
+ * an idle system in about 30 seconds.
+ *
+ * The division here should be cheap since PAGE_REPORTING_CAPACITY
+ * should always be a power of 2.
+ */
+ budget = DIV_ROUND_UP(area->nr_free, PAGE_REPORTING_CAPACITY * 16);
+
+ /* loop through free list adding unreported pages to sg list */
+ list_for_each_entry_safe(page, next, list, lru) {
+ /* We are going to skip over the reported pages. */
+ if (PageReported(page))
+ continue;
+
+ /*
+ * If we fully consumed our budget then update our
+ * state to indicate that we are requesting additional
+ * processing and exit this list.
+ */
+ if (budget < 0) {
+ atomic_set(&prdev->state, PAGE_REPORTING_REQUESTED);
+ next = page;
+ break;
+ }
+
+ /* Attempt to pull page from list and place in scatterlist */
+ if (*offset) {
+ if (!__isolate_free_page(page, order)) {
+ next = page;
+ break;
+ }
+
+ /* Add page to scatter list */
+ --(*offset);
+ sg_set_page(&sgl[*offset], page, page_len, 0);
+
+ continue;
+ }
+
+ /*
+ * Make the first non-reported page in the free list
+ * the new head of the free list before we release the
+ * zone lock.
+ */
+ if (&page->lru != list && !list_is_first(&page->lru, list))
+ list_rotate_to_front(&page->lru, list);
+
+ /* release lock before waiting on report processing */
+ spin_unlock_irq(&zone->lock);
+
+ /* begin processing pages in local list */
+ err = prdev->report(prdev, sgl, PAGE_REPORTING_CAPACITY);
+
+ /* reset offset since the full list was reported */
+ *offset = PAGE_REPORTING_CAPACITY;
+
+ /* update budget to reflect call to report function */
+ budget--;
+
+ /* reacquire zone lock and resume processing */
+ spin_lock_irq(&zone->lock);
+
+ /* flush reported pages from the sg list */
+ page_reporting_drain(prdev, sgl, PAGE_REPORTING_CAPACITY, !err);
+
+ /*
+ * Reset next to first entry, the old next isn't valid
+ * since we dropped the lock to report the pages
+ */
+ next = list_first_entry(list, struct page, lru);
+
+ /* exit on error */
+ if (err)
+ break;
+ }
+
+ /* Rotate any leftover pages to the head of the freelist */
+ if (&next->lru != list && !list_is_first(&next->lru, list))
+ list_rotate_to_front(&next->lru, list);
+
+ spin_unlock_irq(&zone->lock);
+
+ return err;
+}
+
+static int
+page_reporting_process_zone(struct page_reporting_dev_info *prdev,
+ struct scatterlist *sgl, struct zone *zone)
+{
+ unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY;
+ unsigned long watermark;
+ int err = 0;
+
+ /* Generate minimum watermark to be able to guarantee progress */
+ watermark = low_wmark_pages(zone) +
+ (PAGE_REPORTING_CAPACITY << PAGE_REPORTING_MIN_ORDER);
+
+ /*
+ * Cancel request if insufficient free memory or if we failed
+ * to allocate page reporting statistics for the zone.
+ */
+ if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
+ return err;
+
+ /* Process each free list starting from lowest order/mt */
+ for (order = PAGE_REPORTING_MIN_ORDER; order < MAX_ORDER; order++) {
+ for (mt = 0; mt < MIGRATE_TYPES; mt++) {
+ /* We do not pull pages from the isolate free list */
+ if (is_migrate_isolate(mt))
+ continue;
+
+ err = page_reporting_cycle(prdev, zone, order, mt,
+ sgl, &offset);
+ if (err)
+ return err;
+ }
+ }
+
+ /* report the leftover pages before going idle */
+ leftover = PAGE_REPORTING_CAPACITY - offset;
+ if (leftover) {
+ sgl = &sgl[offset];
+ err = prdev->report(prdev, sgl, leftover);
+
+ /* flush any remaining pages out from the last report */
+ spin_lock_irq(&zone->lock);
+ page_reporting_drain(prdev, sgl, leftover, !err);
+ spin_unlock_irq(&zone->lock);
+ }
+
+ return err;
+}
+
+static void page_reporting_process(struct work_struct *work)
+{
+ struct delayed_work *d_work = to_delayed_work(work);
+ struct page_reporting_dev_info *prdev =
+ container_of(d_work, struct page_reporting_dev_info, work);
+ int err = 0, state = PAGE_REPORTING_ACTIVE;
+ struct scatterlist *sgl;
+ struct zone *zone;
+
+ /*
+ * Change the state to "Active" so that we can track if there is
+ * anyone requests page reporting after we complete our pass. If
+ * the state is not altered by the end of the pass we will switch
+ * to idle and quit scheduling reporting runs.
+ */
+ atomic_set(&prdev->state, state);
+
+ /* allocate scatterlist to store pages being reported on */
+ sgl = kmalloc_array(PAGE_REPORTING_CAPACITY, sizeof(*sgl), GFP_KERNEL);
+ if (!sgl)
+ goto err_out;
+
+ sg_init_table(sgl, PAGE_REPORTING_CAPACITY);
+
+ for_each_zone(zone) {
+ err = page_reporting_process_zone(prdev, sgl, zone);
+ if (err)
+ break;
+ }
+
+ kfree(sgl);
+err_out:
+ /*
+ * If the state has reverted back to requested then there may be
+ * additional pages to be processed. We will defer for 2s to allow
+ * more pages to accumulate.
+ */
+ state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE);
+ if (state == PAGE_REPORTING_REQUESTED)
+ schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
+}
+
+static DEFINE_MUTEX(page_reporting_mutex);
+DEFINE_STATIC_KEY_FALSE(page_reporting_enabled);
+
+int page_reporting_register(struct page_reporting_dev_info *prdev)
+{
+ int err = 0;
+
+ mutex_lock(&page_reporting_mutex);
+
+ /* nothing to do if already in use */
+ if (rcu_access_pointer(pr_dev_info)) {
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ /* initialize state and work structures */
+ atomic_set(&prdev->state, PAGE_REPORTING_IDLE);
+ INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
+
+ /* Begin initial flush of zones */
+ __page_reporting_request(prdev);
+
+ /* Assign device to allow notifications */
+ rcu_assign_pointer(pr_dev_info, prdev);
+
+ /* enable page reporting notification */
+ if (!static_key_enabled(&page_reporting_enabled)) {
+ static_branch_enable(&page_reporting_enabled);
+ pr_info("Free page reporting enabled\n");
+ }
+err_out:
+ mutex_unlock(&page_reporting_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(page_reporting_register);
+
+void page_reporting_unregister(struct page_reporting_dev_info *prdev)
+{
+ mutex_lock(&page_reporting_mutex);
+
+ if (rcu_access_pointer(pr_dev_info) == prdev) {
+ /* Disable page reporting notification */
+ RCU_INIT_POINTER(pr_dev_info, NULL);
+ synchronize_rcu();
+
+ /* Flush any existing work, and lock it out */
+ cancel_delayed_work_sync(&prdev->work);
+ }
+
+ mutex_unlock(&page_reporting_mutex);
+}
+EXPORT_SYMBOL_GPL(page_reporting_unregister);
diff --git a/mm/page_reporting.h b/mm/page_reporting.h
new file mode 100644
index 000000000000..aa6d37f4dc22
--- /dev/null
+++ b/mm/page_reporting.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MM_PAGE_REPORTING_H
+#define _MM_PAGE_REPORTING_H
+
+#include <linux/mmzone.h>
+#include <linux/pageblock-flags.h>
+#include <linux/page-isolation.h>
+#include <linux/jump_label.h>
+#include <linux/slab.h>
+#include <asm/pgtable.h>
+#include <linux/scatterlist.h>
+
+#define PAGE_REPORTING_MIN_ORDER pageblock_order
+
+#ifdef CONFIG_PAGE_REPORTING
+DECLARE_STATIC_KEY_FALSE(page_reporting_enabled);
+void __page_reporting_notify(void);
+
+static inline bool page_reported(struct page *page)
+{
+ return static_branch_unlikely(&page_reporting_enabled) &&
+ PageReported(page);
+}
+
+/**
+ * page_reporting_notify_free - Free page notification to start page processing
+ *
+ * This function is meant to act as a screener for __page_reporting_notify
+ * which will determine if a give zone has crossed over the high-water mark
+ * that will justify us beginning page treatment. If we have crossed that
+ * threshold then it will start the process of pulling some pages and
+ * placing them in the batch list for treatment.
+ */
+static inline void page_reporting_notify_free(unsigned int order)
+{
+ /* Called from hot path in __free_one_page() */
+ if (!static_branch_unlikely(&page_reporting_enabled))
+ return;
+
+ /* Determine if we have crossed reporting threshold */
+ if (order < PAGE_REPORTING_MIN_ORDER)
+ return;
+
+ /* This will add a few cycles, but should be called infrequently */
+ __page_reporting_notify();
+}
+#else /* CONFIG_PAGE_REPORTING */
+#define page_reported(_page) false
+
+static inline void page_reporting_notify_free(unsigned int order)
+{
+}
+#endif /* CONFIG_PAGE_REPORTING */
+#endif /*_MM_PAGE_REPORTING_H */
diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c
index a5a8b22816ff..32558063c3f9 100644
--- a/mm/percpu-stats.c
+++ b/mm/percpu-stats.c
@@ -3,7 +3,7 @@
* mm/percpu-debug.c
*
* Copyright (C) 2017 Facebook Inc.
- * Copyright (C) 2017 Dennis Zhou <dennisz@fb.com>
+ * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
*
* Prints statistics about the percpu allocator and backing chunks.
*/
diff --git a/mm/percpu.c b/mm/percpu.c
index e9844086b236..d7e3bc649f4e 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -6,7 +6,7 @@
* Copyright (C) 2009 Tejun Heo <tj@kernel.org>
*
* Copyright (C) 2017 Facebook Inc.
- * Copyright (C) 2017 Dennis Zhou <dennisszhou@gmail.com>
+ * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
*
* The percpu allocator handles both static and dynamic areas. Percpu
* areas are allocated in chunks which are divided into units. There is
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index de41e830cdac..74e957e302fe 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -206,7 +206,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
/*
- * Explicitly map EACCES to EPERM as EPERM is a more a
+ * Explicitly map EACCES to EPERM as EPERM is a more
* appropriate error code for process_vw_readv/writev
*/
if (rc == -EACCES)
diff --git a/mm/rmap.c b/mm/rmap.c
index b3e381919835..f79a206b271a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -22,9 +22,10 @@
*
* inode->i_mutex (while writing or truncating, not reading or faulting)
* mm->mmap_sem
- * page->flags PG_locked (lock_page)
+ * page->flags PG_locked (lock_page) * (see huegtlbfs below)
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
* mapping->i_mmap_rwsem
+ * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
* pgdat->lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -43,6 +44,11 @@
* anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
* ->tasklist_lock
* pte map lock
+ *
+ * * hugetlbfs PageHuge() pages take locks in this order:
+ * mapping->i_mmap_rwsem
+ * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
+ * page->flags PG_locked (lock_page)
*/
#include <linux/mm.h>
@@ -269,19 +275,6 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
- struct vm_area_struct *prev = dst->vm_prev, *pprev = src->vm_prev;
-
- /*
- * If parent share anon_vma with its vm_prev, keep this sharing in in
- * child.
- *
- * 1. Parent has vm_prev, which implies we have vm_prev.
- * 2. Parent and its vm_prev have the same anon_vma.
- */
- if (!dst->anon_vma && src->anon_vma &&
- pprev && pprev->anon_vma == src->anon_vma)
- dst->anon_vma = prev->anon_vma;
-
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma;
@@ -940,7 +933,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
set_pte_at(vma->vm_mm, address, pte, entry);
ret = 1;
} else {
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t *pmd = pvmw.pmd;
pmd_t entry;
@@ -1178,6 +1171,9 @@ void page_add_new_anon_rmap(struct page *page,
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
/* increment count (starts at -1) */
atomic_set(compound_mapcount_ptr(page), 0);
+ if (hpage_pincount_available(page))
+ atomic_set(compound_pincount_ptr(page), 0);
+
__inc_node_page_state(page, NR_ANON_THPS);
} else {
/* Anon THP always mapped first with PMD */
@@ -1376,7 +1372,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
struct page *subpage;
bool ret = true;
struct mmu_notifier_range range;
- enum ttu_flags flags = (enum ttu_flags)arg;
+ enum ttu_flags flags = (enum ttu_flags)(long)arg;
/* munlock has nothing to gain from examining un-locked vmas */
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
@@ -1406,6 +1402,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/*
* If sharing is possible, start and end will be adjusted
* accordingly.
+ *
+ * If called for a huge page, caller must hold i_mmap_rwsem
+ * in write mode as it is possible to call huge_pmd_unshare.
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
@@ -1453,6 +1452,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
address = pvmw.address;
if (PageHuge(page)) {
+ /*
+ * To call huge_pmd_unshare, i_mmap_rwsem must be
+ * held in write mode. Caller needs to explicitly
+ * do this outside rmap routines.
+ */
+ VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
/*
* huge_pmd_unshare unmapped an entire PMD
@@ -1497,6 +1502,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
/*
* No need to invalidate here it will synchronize on
@@ -1596,6 +1603,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
/*
* No need to invalidate here it will synchronize on
@@ -1662,6 +1671,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
@@ -1696,23 +1707,9 @@ discard:
return ret;
}
-bool is_vma_temporary_stack(struct vm_area_struct *vma)
-{
- int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
-
- if (!maybe_stack)
- return false;
-
- if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
- VM_STACK_INCOMPLETE_SETUP)
- return true;
-
- return false;
-}
-
static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
{
- return is_vma_temporary_stack(vma);
+ return vma_is_temporary_stack(vma);
}
static int page_mapcount_is_zero(struct page *page)
@@ -1974,6 +1971,9 @@ void hugepage_add_new_anon_rmap(struct page *page,
{
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(compound_mapcount_ptr(page), 0);
+ if (hpage_pincount_available(page))
+ atomic_set(compound_pincount_ptr(page), 0);
+
__page_set_anon_rmap(page, vma, address, 1);
}
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/mm/shmem.c b/mm/shmem.c
index aad3ba74b0e9..d722eb830317 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -410,7 +410,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
#define SHMEM_HUGE_DENY (-1)
#define SHMEM_HUGE_FORCE (-2)
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* ifdef here to avoid bloating shmem.o when not necessary */
static int shmem_huge __read_mostly;
@@ -580,7 +580,7 @@ static long shmem_unused_huge_count(struct super_block *sb,
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
return READ_ONCE(sbinfo->shrinklist_len);
}
-#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
+#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
#define shmem_huge SHMEM_HUGE_DENY
@@ -589,11 +589,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
{
return 0;
}
-#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
{
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
(shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
shmem_huge != SHMEM_HUGE_DENY)
return true;
@@ -789,6 +789,32 @@ void shmem_unlock_mapping(struct address_space *mapping)
}
/*
+ * Check whether a hole-punch or truncation needs to split a huge page,
+ * returning true if no split was required, or the split has been successful.
+ *
+ * Eviction (or truncation to 0 size) should never need to split a huge page;
+ * but in rare cases might do so, if shmem_undo_range() failed to trylock on
+ * head, and then succeeded to trylock on tail.
+ *
+ * A split can only succeed when there are no additional references on the
+ * huge page: so the split below relies upon find_get_entries() having stopped
+ * when it found a subpage of the huge page, without getting further references.
+ */
+static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
+{
+ if (!PageTransCompound(page))
+ return true;
+
+ /* Just proceed to delete a huge page wholly within the range punched */
+ if (PageHead(page) &&
+ page->index >= start && page->index + HPAGE_PMD_NR <= end)
+ return true;
+
+ /* Try to split huge page, so we can truly punch the hole or truncate */
+ return split_huge_page(page) >= 0;
+}
+
+/*
* Remove range of pages and swap entries from page cache, and free them.
* If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
*/
@@ -838,31 +864,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (!trylock_page(page))
continue;
- if (PageTransTail(page)) {
- /* Middle of THP: zero out the page */
- clear_highpage(page);
- unlock_page(page);
- continue;
- } else if (PageTransHuge(page)) {
- if (index == round_down(end, HPAGE_PMD_NR)) {
- /*
- * Range ends in the middle of THP:
- * zero out the page
- */
- clear_highpage(page);
- unlock_page(page);
- continue;
- }
- index += HPAGE_PMD_NR - 1;
- i += HPAGE_PMD_NR - 1;
- }
-
- if (!unfalloc || !PageUptodate(page)) {
- VM_BUG_ON_PAGE(PageTail(page), page);
- if (page_mapping(page) == mapping) {
- VM_BUG_ON_PAGE(PageWriteback(page), page);
+ if ((!unfalloc || !PageUptodate(page)) &&
+ page_mapping(page) == mapping) {
+ VM_BUG_ON_PAGE(PageWriteback(page), page);
+ if (shmem_punch_compound(page, start, end))
truncate_inode_page(mapping, page);
- }
}
unlock_page(page);
}
@@ -936,43 +942,25 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
lock_page(page);
- if (PageTransTail(page)) {
- /* Middle of THP: zero out the page */
- clear_highpage(page);
- unlock_page(page);
- /*
- * Partial thp truncate due 'start' in middle
- * of THP: don't need to look on these pages
- * again on !pvec.nr restart.
- */
- if (index != round_down(end, HPAGE_PMD_NR))
- start++;
- continue;
- } else if (PageTransHuge(page)) {
- if (index == round_down(end, HPAGE_PMD_NR)) {
- /*
- * Range ends in the middle of THP:
- * zero out the page
- */
- clear_highpage(page);
- unlock_page(page);
- continue;
- }
- index += HPAGE_PMD_NR - 1;
- i += HPAGE_PMD_NR - 1;
- }
-
if (!unfalloc || !PageUptodate(page)) {
- VM_BUG_ON_PAGE(PageTail(page), page);
- if (page_mapping(page) == mapping) {
- VM_BUG_ON_PAGE(PageWriteback(page), page);
- truncate_inode_page(mapping, page);
- } else {
+ if (page_mapping(page) != mapping) {
/* Page was replaced by swap: retry */
unlock_page(page);
index--;
break;
}
+ VM_BUG_ON_PAGE(PageWriteback(page), page);
+ if (shmem_punch_compound(page, start, end))
+ truncate_inode_page(mapping, page);
+ else {
+ /* Wipe the page and don't get stuck */
+ clear_highpage(page);
+ flush_dcache_page(page);
+ set_page_dirty(page);
+ if (index <
+ round_up(start, HPAGE_PMD_NR))
+ start = index + 1;
+ }
}
unlock_page(page);
}
@@ -1059,7 +1047,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
* Part of the huge page can be beyond i_size: subject
* to shrink under memory pressure.
*/
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
spin_lock(&sbinfo->shrinklist_lock);
/*
* _careful to defend against unlocked access to
@@ -1472,9 +1460,6 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
pgoff_t hindex;
struct page *page;
- if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
- return NULL;
-
hindex = round_down(index, HPAGE_PMD_NR);
if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
XA_PRESENT))
@@ -1486,6 +1471,8 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_destroy(&pvma);
if (page)
prep_transhuge_page(page);
+ else
+ count_vm_event(THP_FILE_FALLBACK);
return page;
}
@@ -1511,7 +1498,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
int nr;
int err = -ENOSPC;
- if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
huge = false;
nr = huge ? HPAGE_PMD_NR : 1;
@@ -1813,17 +1800,20 @@ repeat:
if (shmem_huge == SHMEM_HUGE_FORCE)
goto alloc_huge;
switch (sbinfo->huge) {
- loff_t i_size;
- pgoff_t off;
case SHMEM_HUGE_NEVER:
goto alloc_nohuge;
- case SHMEM_HUGE_WITHIN_SIZE:
+ case SHMEM_HUGE_WITHIN_SIZE: {
+ loff_t i_size;
+ pgoff_t off;
+
off = round_up(index, HPAGE_PMD_NR);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
if (i_size >= HPAGE_PMD_SIZE &&
i_size >> PAGE_SHIFT >= off)
goto alloc_huge;
- /* fallthrough */
+
+ fallthrough;
+ }
case SHMEM_HUGE_ADVISE:
if (sgp_huge == SGP_HUGE)
goto alloc_huge;
@@ -1871,8 +1861,13 @@ alloc_nohuge:
error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
PageTransHuge(page));
- if (error)
+ if (error) {
+ if (PageTransHuge(page)) {
+ count_vm_event(THP_FILE_FALLBACK);
+ count_vm_event(THP_FILE_FALLBACK_CHARGE);
+ }
goto unacct;
+ }
error = shmem_add_to_page_cache(page, mapping, hindex,
NULL, gfp & GFP_RECLAIM_MASK);
if (error) {
@@ -2089,7 +2084,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
get_area = current->mm->get_unmapped_area;
addr = get_area(file, uaddr, len, pgoff, flags);
- if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return addr;
if (IS_ERR_VALUE(addr))
return addr;
@@ -2228,7 +2223,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
(vma->vm_end & HPAGE_PMD_MASK)) {
khugepaged_enter(vma, vma->vm_flags);
@@ -3113,12 +3108,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
error = security_inode_init_security(inode, dir, &dentry->d_name,
shmem_initxattrs, NULL);
- if (error) {
- if (error != -EOPNOTSUPP) {
- iput(inode);
- return error;
- }
- error = 0;
+ if (error && error != -EOPNOTSUPP) {
+ iput(inode);
+ return error;
}
inode->i_size = len-1;
@@ -3243,7 +3235,7 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
struct shmem_inode_info *info = SHMEM_I(inode);
name = xattr_full_name(handler, name);
- return simple_xattr_set(&info->xattrs, name, value, size, flags);
+ return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
}
static const struct xattr_handler shmem_security_xattr_handler = {
@@ -3455,7 +3447,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
case Opt_huge:
ctx->huge = result.uint_32;
if (ctx->huge != SHMEM_HUGE_NEVER &&
- !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
+ !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
has_transparent_hugepage()))
goto unsupported_parameter;
ctx->seen |= SHMEM_SEEN_HUGE;
@@ -3601,7 +3593,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
seq_printf(seq, ",gid=%u",
from_kgid_munged(&init_user_ns, sbinfo->gid));
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
if (sbinfo->huge)
seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
@@ -3846,7 +3838,7 @@ static const struct super_operations shmem_ops = {
.evict_inode = shmem_evict_inode,
.drop_inode = generic_delete_inode,
.put_super = shmem_put_super,
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
.nr_cached_objects = shmem_unused_huge_count,
.free_cached_objects = shmem_unused_huge_scan,
#endif
@@ -3908,7 +3900,7 @@ int __init shmem_init(void)
goto out1;
}
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
else
@@ -3924,7 +3916,7 @@ out2:
return error;
}
-#if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
static ssize_t shmem_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -3976,9 +3968,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
struct kobj_attribute shmem_enabled_attr =
__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
-#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool shmem_huge_enabled(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
@@ -4004,7 +3996,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
if (i_size >= HPAGE_PMD_SIZE &&
i_size >> PAGE_SHIFT >= off)
return true;
- /* fall through */
+ fallthrough;
case SHMEM_HUGE_ADVISE:
/* TODO: implement fadvise() hints */
return (vma->vm_flags & VM_HUGEPAGE);
@@ -4013,7 +4005,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
return false;
}
}
-#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#else /* !CONFIG_SHMEM */
@@ -4182,7 +4174,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
vma->vm_file = file;
vma->vm_ops = &shmem_vm_ops;
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
(vma->vm_end & HPAGE_PMD_MASK)) {
khugepaged_enter(vma, vma->vm_flags);
diff --git a/mm/shuffle.c b/mm/shuffle.c
index b3fe97fd6654..44406d9977c7 100644
--- a/mm/shuffle.c
+++ b/mm/shuffle.c
@@ -72,7 +72,7 @@ static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order)
return NULL;
/* ...is the pfn in a present section or a hole? */
- if (!pfn_present(pfn))
+ if (!pfn_in_present_section(pfn))
return NULL;
/* ...is the page free and currently on a free_area list? */
@@ -183,11 +183,11 @@ void __meminit __shuffle_free_memory(pg_data_t *pgdat)
shuffle_zone(z);
}
-void add_to_free_area_random(struct page *page, struct free_area *area,
- int migratetype)
+bool shuffle_pick_tail(void)
{
static u64 rand;
static u8 rand_bits;
+ bool ret;
/*
* The lack of locking is deliberate. If 2 threads race to
@@ -198,10 +198,10 @@ void add_to_free_area_random(struct page *page, struct free_area *area,
rand = get_random_u64();
}
- if (rand & 1)
- add_to_free_area(page, area, migratetype);
- else
- add_to_free_area_tail(page, area, migratetype);
+ ret = rand & 1;
+
rand_bits--;
rand >>= 1;
+
+ return ret;
}
diff --git a/mm/shuffle.h b/mm/shuffle.h
index 777a257a0d2f..4d79f03b6658 100644
--- a/mm/shuffle.h
+++ b/mm/shuffle.h
@@ -22,6 +22,7 @@ enum mm_shuffle_ctl {
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
extern void page_alloc_shuffle(enum mm_shuffle_ctl ctl);
extern void __shuffle_free_memory(pg_data_t *pgdat);
+extern bool shuffle_pick_tail(void);
static inline void shuffle_free_memory(pg_data_t *pgdat)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
@@ -44,6 +45,11 @@ static inline bool is_shuffle_order(int order)
return order >= SHUFFLE_ORDER;
}
#else
+static inline bool shuffle_pick_tail(void)
+{
+ return false;
+}
+
static inline void shuffle_free_memory(pg_data_t *pgdat)
{
}
diff --git a/mm/slab.h b/mm/slab.h
index 7e94700aa78c..207c83ef6e06 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -348,6 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
+ unsigned int nr_pages = 1 << order;
struct mem_cgroup *memcg;
struct lruvec *lruvec;
int ret;
@@ -360,21 +361,21 @@ static __always_inline int memcg_charge_slab(struct page *page,
if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
- (1 << order));
- percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
+ nr_pages);
+ percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
return 0;
}
- ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ ret = memcg_kmem_charge(memcg, gfp, nr_pages);
if (ret)
goto out;
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
- mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
+ mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages);
/* transer try_charge() page references to kmem_cache */
- percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
- css_put_many(&memcg->css, 1 << order);
+ percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
+ css_put_many(&memcg->css, nr_pages);
out:
css_put(&memcg->css);
return ret;
@@ -387,6 +388,7 @@ out:
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
+ unsigned int nr_pages = 1 << order;
struct mem_cgroup *memcg;
struct lruvec *lruvec;
@@ -394,15 +396,15 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
memcg = READ_ONCE(s->memcg_params.memcg);
if (likely(!mem_cgroup_is_root(memcg))) {
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
- mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
- memcg_kmem_uncharge_memcg(page, order, memcg);
+ mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages);
+ memcg_kmem_uncharge(memcg, nr_pages);
} else {
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
- -(1 << order));
+ -nr_pages);
}
rcu_read_unlock();
- percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
+ percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages);
}
extern void slab_init_memcg_params(struct kmem_cache *);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 1907cb2903c7..93ec4a574d8d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1521,7 +1521,7 @@ void dump_unreclaimable_slab(void)
mutex_unlock(&slab_mutex);
}
-#if defined(CONFIG_MEMCG)
+#if defined(CONFIG_MEMCG_KMEM)
void *memcg_slab_start(struct seq_file *m, loff_t *pos)
{
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
@@ -1581,6 +1581,7 @@ static int slabinfo_open(struct inode *inode, struct file *file)
}
static const struct proc_ops slabinfo_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = slabinfo_open,
.proc_read = seq_read,
.proc_write = slabinfo_write,
diff --git a/mm/slub.c b/mm/slub.c
index 6589b41d5a60..332d4b459a90 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -259,7 +259,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
* freepointer to be restored incorrectly.
*/
return (void *)((unsigned long)ptr ^ s->random ^
- (unsigned long)kasan_reset_tag((void *)ptr_addr));
+ swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
#else
return ptr;
#endif
@@ -449,6 +449,7 @@ static DEFINE_SPINLOCK(object_map_lock);
* not vanish from under us.
*/
static unsigned long *get_map(struct kmem_cache *s, struct page *page)
+ __acquires(&object_map_lock)
{
void *p;
void *addr = page_address(page);
@@ -465,7 +466,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
return object_map;
}
-static void put_map(unsigned long *map)
+static void put_map(unsigned long *map) __releases(&object_map_lock)
{
VM_BUG_ON(map != object_map);
lockdep_assert_held(&object_map_lock);
@@ -2205,11 +2206,11 @@ static void unfreeze_partials(struct kmem_cache *s,
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
- while ((page = c->partial)) {
+ while ((page = slub_percpu_partial(c))) {
struct page new;
struct page old;
- c->partial = page->next;
+ slub_set_percpu_partial(c, page);
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
@@ -2282,7 +2283,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
if (oldpage) {
pobjects = oldpage->pobjects;
pages = oldpage->pages;
- if (drain && pobjects > s->cpu_partial) {
+ if (drain && pobjects > slub_cpu_partial(s)) {
unsigned long flags;
/*
* partial array is full. Move the existing
@@ -2307,7 +2308,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
!= oldpage);
- if (unlikely(!s->cpu_partial)) {
+ if (unlikely(!slub_cpu_partial(s))) {
unsigned long flags;
local_irq_save(flags);
@@ -3512,15 +3513,15 @@ static void set_cpu_partial(struct kmem_cache *s)
* 50% to keep some capacity around for frees.
*/
if (!kmem_cache_has_cpu_partial(s))
- s->cpu_partial = 0;
+ slub_set_cpu_partial(s, 0);
else if (s->size >= PAGE_SIZE)
- s->cpu_partial = 2;
+ slub_set_cpu_partial(s, 2);
else if (s->size >= 1024)
- s->cpu_partial = 6;
+ slub_set_cpu_partial(s, 6);
else if (s->size >= 256)
- s->cpu_partial = 13;
+ slub_set_cpu_partial(s, 13);
else
- s->cpu_partial = 30;
+ slub_set_cpu_partial(s, 30);
#endif
}
@@ -3581,6 +3582,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/
s->offset = size;
size += sizeof(void *);
+ } else if (size > sizeof(void *)) {
+ /*
+ * Store freelist pointer near middle of object to keep
+ * it away from the edges of the object to avoid small
+ * sized over/underflows from neighboring allocations.
+ */
+ s->offset = ALIGN(size / 2, sizeof(void *));
}
#ifdef CONFIG_SLUB_DEBUG
diff --git a/mm/sparse.c b/mm/sparse.c
index 65599e8bd636..1aee5a481571 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -209,6 +209,7 @@ static inline unsigned long first_present_section_nr(void)
return next_present_section_nr(-1);
}
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
static void subsection_mask_set(unsigned long *map, unsigned long pfn,
unsigned long nr_pages)
{
@@ -243,6 +244,11 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
nr_pages -= pfns;
}
}
+#else
+void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
+{
+}
+#endif
/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
@@ -660,39 +666,67 @@ static void free_map_bootmem(struct page *memmap)
vmemmap_free(start, end, NULL);
}
-#else
-struct page * __meminit populate_section_memmap(unsigned long pfn,
- unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
+
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
- struct page *page, *ret;
- unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+ DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
+ struct mem_section *ms = __pfn_to_section(pfn);
+ unsigned long *subsection_map = ms->usage
+ ? &ms->usage->subsection_map[0] : NULL;
- page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
- if (page)
- goto got_map_page;
+ subsection_mask_set(map, pfn, nr_pages);
+ if (subsection_map)
+ bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
- ret = vmalloc(memmap_size);
- if (ret)
- goto got_map_ptr;
+ if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
+ "section already deactivated (%#lx + %ld)\n",
+ pfn, nr_pages))
+ return -EINVAL;
- return NULL;
-got_map_page:
- ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
-got_map_ptr:
+ bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
+ return 0;
+}
- return ret;
+static bool is_subsection_map_empty(struct mem_section *ms)
+{
+ return bitmap_empty(&ms->usage->subsection_map[0],
+ SUBSECTIONS_PER_SECTION);
}
-static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap)
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
- struct page *memmap = pfn_to_page(pfn);
+ struct mem_section *ms = __pfn_to_section(pfn);
+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+ unsigned long *subsection_map;
+ int rc = 0;
- if (is_vmalloc_addr(memmap))
- vfree(memmap);
+ subsection_mask_set(map, pfn, nr_pages);
+
+ subsection_map = &ms->usage->subsection_map[0];
+
+ if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
+ rc = -EINVAL;
+ else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
+ rc = -EEXIST;
else
- free_pages((unsigned long)memmap,
- get_order(sizeof(struct page) * PAGES_PER_SECTION));
+ bitmap_or(subsection_map, map, subsection_map,
+ SUBSECTIONS_PER_SECTION);
+
+ return rc;
+}
+#else
+struct page * __meminit populate_section_memmap(unsigned long pfn,
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
+{
+ return kvmalloc_node(array_size(sizeof(struct page),
+ PAGES_PER_SECTION), GFP_KERNEL, nid);
+}
+
+static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap)
+{
+ kvfree(pfn_to_page(pfn));
}
static void free_map_bootmem(struct page *memmap)
@@ -724,48 +758,51 @@ static void free_map_bootmem(struct page *memmap)
put_page_bootmem(page);
}
}
+
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ return 0;
+}
+
+static bool is_subsection_map_empty(struct mem_section *ms)
+{
+ return true;
+}
+
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ return 0;
+}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+/*
+ * To deactivate a memory region, there are 3 cases to handle across
+ * two configurations (SPARSEMEM_VMEMMAP={y,n}):
+ *
+ * 1. deactivation of a partial hot-added section (only possible in
+ * the SPARSEMEM_VMEMMAP=y case).
+ * a) section was present at memory init.
+ * b) section was hot-added post memory init.
+ * 2. deactivation of a complete hot-added section.
+ * 3. deactivation of a complete section from memory init.
+ *
+ * For 1, when subsection_map does not empty we will not be freeing the
+ * usage map, but still need to free the vmemmap range.
+ *
+ * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
+ */
static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{
- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
- DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
struct mem_section *ms = __pfn_to_section(pfn);
bool section_is_early = early_section(ms);
struct page *memmap = NULL;
bool empty;
- unsigned long *subsection_map = ms->usage
- ? &ms->usage->subsection_map[0] : NULL;
-
- subsection_mask_set(map, pfn, nr_pages);
- if (subsection_map)
- bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
- if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
- "section already deactivated (%#lx + %ld)\n",
- pfn, nr_pages))
+ if (clear_subsection_map(pfn, nr_pages))
return;
- /*
- * There are 3 cases to handle across two configurations
- * (SPARSEMEM_VMEMMAP={y,n}):
- *
- * 1/ deactivation of a partial hot-added section (only possible
- * in the SPARSEMEM_VMEMMAP=y case).
- * a/ section was present at memory init
- * b/ section was hot-added post memory init
- * 2/ deactivation of a complete hot-added section
- * 3/ deactivation of a complete section from memory init
- *
- * For 1/, when subsection_map does not empty we will not be
- * freeing the usage map, but still need to free the vmemmap
- * range.
- *
- * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
- */
- bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
- empty = bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION);
+ empty = is_subsection_map_empty(ms);
if (empty) {
unsigned long section_nr = pfn_to_section_nr(pfn);
@@ -801,31 +838,19 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
static struct page * __meminit section_activate(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap)
{
- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
struct mem_section *ms = __pfn_to_section(pfn);
struct mem_section_usage *usage = NULL;
- unsigned long *subsection_map;
struct page *memmap;
int rc = 0;
- subsection_mask_set(map, pfn, nr_pages);
-
if (!ms->usage) {
usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
if (!usage)
return ERR_PTR(-ENOMEM);
ms->usage = usage;
}
- subsection_map = &ms->usage->subsection_map[0];
-
- if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
- rc = -EINVAL;
- else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
- rc = -EEXIST;
- else
- bitmap_or(subsection_map, map, subsection_map,
- SUBSECTIONS_PER_SECTION);
+ rc = fill_subsection_map(pfn, nr_pages);
if (rc) {
if (usage)
ms->usage = NULL;
@@ -861,6 +886,10 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
*
* This is only intended for hotplug.
*
+ * Note that only VMEMMAP supports sub-section aligned hotplug,
+ * the proper alignment and size are gated by check_pfn_span().
+ *
+ *
* Return:
* * 0 - On success.
* * -EEXIST - Section has been present.
@@ -894,7 +923,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
/* Align memmap to section boundary in the subsection case */
if (section_nr_to_pfn(section_nr) != start_pfn)
- memmap = pfn_to_kaddr(section_nr_to_pfn(section_nr));
+ memmap = pfn_to_page(section_nr_to_pfn(section_nr));
sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
return 0;
diff --git a/mm/swap.c b/mm/swap.c
index cf39d24ada2a..bf9a79fed62d 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -276,7 +276,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
void *arg)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- int file = page_is_file_cache(page);
+ int file = page_is_file_lru(page);
int lru = page_lru_base_type(page);
del_page_from_lru_list(page, lruvec, lru);
@@ -394,7 +394,7 @@ void mark_page_accessed(struct page *page)
else
__lru_cache_activate_page(page);
ClearPageReferenced(page);
- if (page_is_file_cache(page))
+ if (page_is_file_lru(page))
workingset_activation(page);
}
if (page_is_idle(page))
@@ -515,7 +515,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
return;
active = PageActive(page);
- file = page_is_file_cache(page);
+ file = page_is_file_lru(page);
lru = page_lru_base_type(page);
del_page_from_lru_list(page, lruvec, lru + active);
@@ -548,7 +548,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
- int file = page_is_file_cache(page);
+ int file = page_is_file_lru(page);
int lru = page_lru_base_type(page);
del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
@@ -573,9 +573,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
ClearPageActive(page);
ClearPageReferenced(page);
/*
- * lazyfree pages are clean anonymous pages. They have
- * SwapBacked flag cleared to distinguish normal anonymous
- * pages
+ * Lazyfree pages are clean anonymous pages. They have
+ * PG_swapbacked flag cleared, to distinguish them from normal
+ * anonymous pages
*/
ClearPageSwapBacked(page);
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
@@ -931,7 +931,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
VM_BUG_ON_PAGE(PageLRU(page), page);
- SetPageLRU(page);
/*
* Page becomes evictable in two ways:
* 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
@@ -958,11 +957,12 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
* looking at the same page) and the evictable page will be stranded
* in an unevictable LRU.
*/
- smp_mb();
+ SetPageLRU(page);
+ smp_mb__after_atomic();
if (page_evictable(page)) {
lru = page_lru(page);
- update_page_reclaim_stat(lruvec, page_is_file_cache(page),
+ update_page_reclaim_stat(lruvec, page_is_file_lru(page),
PageActive(page));
if (was_unevictable)
count_vm_event(UNEVICTABLE_PGRESCUED);
@@ -986,7 +986,6 @@ void __pagevec_lru_add(struct pagevec *pvec)
{
pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
}
-EXPORT_SYMBOL(__pagevec_lru_add);
/**
* pagevec_lookup_entries - gang pagecache lookup
@@ -1005,6 +1004,10 @@ EXPORT_SYMBOL(__pagevec_lru_add);
* ascending indexes. There may be holes in the indices due to
* not-present entries.
*
+ * Only one subpage of a Transparent Huge Page is returned in one call:
+ * allowing truncate_inode_pages_range() to evict the whole THP without
+ * cycling through a pagevec of extra references.
+ *
* pagevec_lookup_entries() returns the number of entries which were
* found.
*/
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 63a7b4563a57..0975adc72253 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -309,7 +309,7 @@ direct_free:
swp_entry_t get_swap_page(struct page *page)
{
- swp_entry_t entry, *pentry;
+ swp_entry_t entry;
struct swap_slots_cache *cache;
entry.val = 0;
@@ -336,13 +336,11 @@ swp_entry_t get_swap_page(struct page *page)
if (cache->slots) {
repeat:
if (cache->nr) {
- pentry = &cache->slots[cache->cur++];
- entry = *pentry;
- pentry->val = 0;
+ entry = cache->slots[cache->cur];
+ cache->slots[cache->cur++].val = 0;
cache->nr--;
- } else {
- if (refill_swap_slots_cache(cache))
- goto repeat;
+ } else if (refill_swap_slots_cache(cache)) {
+ goto repeat;
}
}
mutex_unlock(&cache->alloc_lock);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8e7ce9a9bc5e..ebed37bbf7a3 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -116,7 +116,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
- unsigned long i, nr = compound_nr(page);
+ unsigned long i, nr = hpage_nr_pages(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index be33e6176cd9..5871a2aa86a5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2132,7 +2132,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
swp_entry_t entry;
unsigned int i;
- if (!si->inuse_pages)
+ if (!READ_ONCE(si->inuse_pages))
return 0;
if (!frontswap)
@@ -2148,7 +2148,7 @@ retry:
spin_lock(&mmlist_lock);
p = &init_mm.mmlist;
- while (si->inuse_pages &&
+ while (READ_ONCE(si->inuse_pages) &&
!signal_pending(current) &&
(p = p->next) != &init_mm.mmlist) {
@@ -2177,7 +2177,7 @@ retry:
mmput(prev_mm);
i = 0;
- while (si->inuse_pages &&
+ while (READ_ONCE(si->inuse_pages) &&
!signal_pending(current) &&
(i = find_next_to_unuse(si, i, frontswap)) != 0) {
@@ -2219,7 +2219,7 @@ retry:
* been preempted after get_swap_page(), temporarily hiding that swap.
* It's easy and robust (though cpu-intensive) just to keep retrying.
*/
- if (si->inuse_pages) {
+ if (READ_ONCE(si->inuse_pages)) {
if (!signal_pending(current))
goto retry;
retval = -EINTR;
@@ -2797,6 +2797,7 @@ static int swaps_open(struct inode *inode, struct file *file)
}
static const struct proc_ops swaps_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_open = swaps_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
@@ -3475,7 +3476,7 @@ int swap_duplicate(swp_entry_t entry)
*
* Called when allocating swap cache for existing swap entry,
* This can return error codes. Returns 0 at success.
- * -EBUSY means there is a swap cache.
+ * -EEXIST means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
int swapcache_prepare(swp_entry_t entry)
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 1b0d7abad1d4..512576e171ce 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -53,7 +53,8 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep)
+ struct page **pagep,
+ bool wp_copy)
{
struct mem_cgroup *memcg;
pte_t _dst_pte, *dst_pte;
@@ -99,9 +100,13 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
goto out_release;
- _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
- if (dst_vma->vm_flags & VM_WRITE)
- _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
+ _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
+ if (dst_vma->vm_flags & VM_WRITE) {
+ if (wp_copy)
+ _dst_pte = pte_mkuffd_wp(_dst_pte);
+ else
+ _dst_pte = pte_mkwrite(_dst_pte);
+ }
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
if (dst_vma->vm_file) {
@@ -276,10 +281,14 @@ retry:
BUG_ON(dst_addr >= dst_start + len);
/*
- * Serialize via hugetlb_fault_mutex
+ * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
+ * i_mmap_rwsem ensures the dst_pte remains valid even
+ * in the case of shared pmds. fault mutex prevents
+ * races with other faulting threads.
*/
- idx = linear_page_index(dst_vma, dst_addr);
mapping = dst_vma->vm_file->f_mapping;
+ i_mmap_lock_read(mapping);
+ idx = linear_page_index(dst_vma, dst_addr);
hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -287,6 +296,7 @@ retry:
dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
goto out_unlock;
}
@@ -294,6 +304,7 @@ retry:
dst_pteval = huge_ptep_get(dst_pte);
if (!huge_pte_none(dst_pteval)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
goto out_unlock;
}
@@ -301,6 +312,7 @@ retry:
dst_addr, src_addr, &page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ i_mmap_unlock_read(mapping);
vm_alloc_shared = vm_shared;
cond_resched();
@@ -408,7 +420,8 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
unsigned long dst_addr,
unsigned long src_addr,
struct page **page,
- bool zeropage)
+ bool zeropage,
+ bool wp_copy)
{
ssize_t err;
@@ -425,11 +438,13 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
if (!(dst_vma->vm_flags & VM_SHARED)) {
if (!zeropage)
err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
- dst_addr, src_addr, page);
+ dst_addr, src_addr, page,
+ wp_copy);
else
err = mfill_zeropage_pte(dst_mm, dst_pmd,
dst_vma, dst_addr);
} else {
+ VM_WARN_ON_ONCE(wp_copy);
if (!zeropage)
err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
dst_vma, dst_addr,
@@ -447,7 +462,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
unsigned long src_start,
unsigned long len,
bool zeropage,
- bool *mmap_changing)
+ bool *mmap_changing,
+ __u64 mode)
{
struct vm_area_struct *dst_vma;
ssize_t err;
@@ -455,6 +471,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
unsigned long src_addr, dst_addr;
long copied;
struct page *page;
+ bool wp_copy;
/*
* Sanitize the command parameters:
@@ -501,6 +518,14 @@ retry:
goto out_unlock;
/*
+ * validate 'mode' now that we know the dst_vma: don't allow
+ * a wrprotect copy if the userfaultfd didn't register as WP.
+ */
+ wp_copy = mode & UFFDIO_COPY_MODE_WP;
+ if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
+ goto out_unlock;
+
+ /*
* If this is a HUGETLB vma, pass off to appropriate routine
*/
if (is_vm_hugetlb_page(dst_vma))
@@ -555,7 +580,7 @@ retry:
BUG_ON(pmd_trans_huge(*dst_pmd));
err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
- src_addr, &page, zeropage);
+ src_addr, &page, zeropage, wp_copy);
cond_resched();
if (unlikely(err == -ENOENT)) {
@@ -602,14 +627,68 @@ out:
ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len,
- bool *mmap_changing)
+ bool *mmap_changing, __u64 mode)
{
return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
- mmap_changing);
+ mmap_changing, mode);
}
ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
unsigned long len, bool *mmap_changing)
{
- return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);
+ return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0);
+}
+
+int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
+ unsigned long len, bool enable_wp, bool *mmap_changing)
+{
+ struct vm_area_struct *dst_vma;
+ pgprot_t newprot;
+ int err;
+
+ /*
+ * Sanitize the command parameters:
+ */
+ BUG_ON(start & ~PAGE_MASK);
+ BUG_ON(len & ~PAGE_MASK);
+
+ /* Does the address range wrap, or is the span zero-sized? */
+ BUG_ON(start + len <= start);
+
+ down_read(&dst_mm->mmap_sem);
+
+ /*
+ * If memory mappings are changing because of non-cooperative
+ * operation (e.g. mremap) running in parallel, bail out and
+ * request the user to retry later
+ */
+ err = -EAGAIN;
+ if (mmap_changing && READ_ONCE(*mmap_changing))
+ goto out_unlock;
+
+ err = -ENOENT;
+ dst_vma = find_dst_vma(dst_mm, start, len);
+ /*
+ * Make sure the vma is not shared, that the dst range is
+ * both valid and fully within a single existing vma.
+ */
+ if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+ goto out_unlock;
+ if (!userfaultfd_wp(dst_vma))
+ goto out_unlock;
+ if (!vma_is_anonymous(dst_vma))
+ goto out_unlock;
+
+ if (enable_wp)
+ newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
+ else
+ newprot = vm_get_page_prot(dst_vma->vm_flags);
+
+ change_protection(dst_vma, start, start + len, newprot,
+ enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
+
+ err = 0;
+out_unlock:
+ up_read(&dst_mm->mmap_sem);
+ return err;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6b8eeb0ecee5..399f219544f7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3368,7 +3368,7 @@ retry:
goto overflow;
/*
- * If required width exeeds current VA block, move
+ * If required width exceeds current VA block, move
* base downwards and then recheck.
*/
if (base + end > va->va_end) {
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 4bac22fe1aa2..d69019fc3789 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -280,7 +280,7 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
enum vmpressure_levels level;
/* For now, no users for root-level efficiency */
- if (!memcg || memcg == root_mem_cgroup)
+ if (!memcg || mem_cgroup_is_root(memcg))
return;
spin_lock(&vmpr->sr_lock);
@@ -371,10 +371,8 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
int ret = 0;
spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL);
- if (!spec) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!spec)
+ return -ENOMEM;
/* Find required level */
token = strsep(&spec, ",");
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 876370565455..b06868fc4926 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -919,7 +919,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
* exceptional entries and shadow exceptional entries in the
* same address_space.
*/
- if (reclaimed && page_is_file_cache(page) &&
+ if (reclaimed && page_is_file_lru(page) &&
!mapping_exiting(mapping) && !dax_mapping(mapping))
shadow = workingset_eviction(page, target_memcg);
__delete_from_page_cache(page, shadow);
@@ -1043,7 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
* Anonymous pages are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them
*/
- if (!page_is_file_cache(page) ||
+ if (!page_is_file_lru(page) ||
(PageAnon(page) && !PageSwapBacked(page))) {
*dirty = false;
*writeback = false;
@@ -1084,9 +1084,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
while (!list_empty(page_list)) {
struct address_space *mapping;
struct page *page;
- int may_enter_fs;
enum page_references references = PAGEREF_RECLAIM;
- bool dirty, writeback;
+ bool dirty, writeback, may_enter_fs;
unsigned int nr_pages;
cond_resched();
@@ -1267,7 +1266,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto activate_locked_split;
}
- may_enter_fs = 1;
+ may_enter_fs = true;
/* Adding to swap updated mapping */
mapping = page_mapping(page);
@@ -1316,7 +1315,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* the rest of the LRU for clean pages and see
* the same dirty pages again (PageReclaim).
*/
- if (page_is_file_cache(page) &&
+ if (page_is_file_lru(page) &&
(!current_is_kswapd() || !PageReclaim(page) ||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/*
@@ -1460,7 +1459,7 @@ activate_locked:
try_to_free_swap(page);
VM_BUG_ON_PAGE(PageActive(page), page);
if (!PageMlocked(page)) {
- int type = page_is_file_cache(page);
+ int type = page_is_file_lru(page);
SetPageActive(page);
stat->nr_activate[type] += nr_pages;
count_memcg_page_event(page, PGACTIVATE);
@@ -1498,7 +1497,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
LIST_HEAD(clean_pages);
list_for_each_entry_safe(page, next, page_list, lru) {
- if (page_is_file_cache(page) && !PageDirty(page) &&
+ if (page_is_file_lru(page) && !PageDirty(page) &&
!__PageMovable(page) && !PageUnevictable(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
@@ -2054,7 +2053,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
* IO, plus JVM can create lots of anon VM_EXEC pages,
* so we ignore them here.
*/
- if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
+ if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
list_add(&page->lru, &l_active);
continue;
}
@@ -2096,7 +2095,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
unsigned long reclaim_pages(struct list_head *page_list)
{
- int nid = -1;
+ int nid = NUMA_NO_NODE;
unsigned long nr_reclaimed = 0;
LIST_HEAD(node_page_list);
struct reclaim_stat dummy_stat;
@@ -2111,7 +2110,7 @@ unsigned long reclaim_pages(struct list_head *page_list)
while (!list_empty(page_list)) {
page = lru_to_page(page_list);
- if (nid == -1) {
+ if (nid == NUMA_NO_NODE) {
nid = page_to_nid(page);
INIT_LIST_HEAD(&node_page_list);
}
@@ -2132,7 +2131,7 @@ unsigned long reclaim_pages(struct list_head *page_list)
putback_lru_page(page);
}
- nid = -1;
+ nid = NUMA_NO_NODE;
}
if (!list_empty(&node_page_list)) {
@@ -2427,10 +2426,8 @@ out:
case SCAN_FILE:
case SCAN_ANON:
/* Scan one type exclusively */
- if ((scan_balance == SCAN_FILE) != file) {
- lruvec_size = 0;
+ if ((scan_balance == SCAN_FILE) != file)
scan = 0;
- }
break;
default:
/* Look ma, no brain */
@@ -3096,7 +3093,6 @@ retry:
if (sc->memcg_low_skipped) {
sc->priority = initial_priority;
sc->force_deactivate = 0;
- sc->skipped_deactivate = 0;
sc->memcg_low_reclaim = 1;
sc->memcg_low_skipped = 0;
goto retry;
@@ -3136,8 +3132,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
/* kswapd must be awake if processes are being throttled */
if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
- pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
- (enum zone_type)ZONE_NORMAL);
+ if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL)
+ WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL);
+
wake_up_interruptible(&pgdat->kswapd_wait);
}
@@ -3769,9 +3766,9 @@ out:
static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
enum zone_type prev_classzone_idx)
{
- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
- return prev_classzone_idx;
- return pgdat->kswapd_classzone_idx;
+ enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
+
+ return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx;
}
static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
@@ -3815,8 +3812,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o
* the previous request that slept prematurely.
*/
if (remaining) {
- pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
- pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
+ WRITE_ONCE(pgdat->kswapd_classzone_idx,
+ kswapd_classzone_idx(pgdat, classzone_idx));
+
+ if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
+ WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
}
finish_wait(&pgdat->kswapd_wait, &wait);
@@ -3893,12 +3893,12 @@ static int kswapd(void *p)
tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
set_freezable();
- pgdat->kswapd_order = 0;
- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
+ WRITE_ONCE(pgdat->kswapd_order, 0);
+ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
for ( ; ; ) {
bool ret;
- alloc_order = reclaim_order = pgdat->kswapd_order;
+ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
kswapd_try_sleep:
@@ -3906,10 +3906,10 @@ kswapd_try_sleep:
classzone_idx);
/* Read the new order and classzone_idx */
- alloc_order = reclaim_order = pgdat->kswapd_order;
+ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
- pgdat->kswapd_order = 0;
- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
+ WRITE_ONCE(pgdat->kswapd_order, 0);
+ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
ret = try_to_freeze();
if (kthread_should_stop())
@@ -3953,20 +3953,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
enum zone_type classzone_idx)
{
pg_data_t *pgdat;
+ enum zone_type curr_idx;
if (!managed_zone(zone))
return;
if (!cpuset_zone_allowed(zone, gfp_flags))
return;
+
pgdat = zone->zone_pgdat;
+ curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
+
+ if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx)
+ WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx);
+
+ if (READ_ONCE(pgdat->kswapd_order) < order)
+ WRITE_ONCE(pgdat->kswapd_order, order);
- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
- pgdat->kswapd_classzone_idx = classzone_idx;
- else
- pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
- classzone_idx);
- pgdat->kswapd_order = max(pgdat->kswapd_order, order);
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
@@ -4030,27 +4033,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
}
#endif /* CONFIG_HIBERNATION */
-/* It's optimal to keep kswapds on the same CPUs as their memory, but
- not required for correctness. So if the last cpu in a node goes
- away, we get changed to run anywhere: as the first one comes back,
- restore their cpu bindings. */
-static int kswapd_cpu_online(unsigned int cpu)
-{
- int nid;
-
- for_each_node_state(nid, N_MEMORY) {
- pg_data_t *pgdat = NODE_DATA(nid);
- const struct cpumask *mask;
-
- mask = cpumask_of_node(pgdat->node_id);
-
- if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
- /* One of our CPUs online: restore mask */
- set_cpus_allowed_ptr(pgdat->kswapd, mask);
- }
- return 0;
-}
-
/*
* This kswapd start function will be called by init and node-hot-add.
* On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
@@ -4090,15 +4072,11 @@ void kswapd_stop(int nid)
static int __init kswapd_init(void)
{
- int nid, ret;
+ int nid;
swap_setup();
for_each_node_state(nid, N_MEMORY)
kswapd_run(nid);
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "mm/vmscan:online", kswapd_cpu_online,
- NULL);
- WARN_ON(ret < 0);
return 0;
}
@@ -4277,29 +4255,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
}
#endif
-/*
- * page_evictable - test whether a page is evictable
- * @page: the page to test
- *
- * Test whether page is evictable--i.e., should be placed on active/inactive
- * lists vs unevictable list.
- *
- * Reasons page might not be evictable:
- * (1) page's mapping marked unevictable
- * (2) page is part of an mlocked VMA
- *
- */
-int page_evictable(struct page *page)
-{
- int ret;
-
- /* Prevent address_space of inode and swap cache from being freed */
- rcu_read_lock();
- ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
- rcu_read_unlock();
- return ret;
-}
-
/**
* check_move_unevictable_pages - check pages for evictability and move to
* appropriate zone lru list
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 78d53378db99..96d21a792b57 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1168,6 +1168,8 @@ const char * const vmstat_text[] = {
"nr_dirtied",
"nr_written",
"nr_kernel_misc_reclaimable",
+ "nr_foll_pin_acquired",
+ "nr_foll_pin_released",
/* enum writeback_stat_item counters */
"nr_dirty_threshold",
@@ -1254,9 +1256,12 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"thp_fault_alloc",
"thp_fault_fallback",
+ "thp_fault_fallback_charge",
"thp_collapse_alloc",
"thp_collapse_alloc_failed",
"thp_file_alloc",
+ "thp_file_fallback",
+ "thp_file_fallback_charge",
"thp_file_mapped",
"thp_split_page",
"thp_split_page_failed",
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 22d17ecfe7df..2f836a2b993f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -424,7 +424,7 @@ static void *zs_zpool_map(void *pool, unsigned long handle,
case ZPOOL_MM_WO:
zs_mm = ZS_MM_WO;
break;
- case ZPOOL_MM_RW: /* fall through */
+ case ZPOOL_MM_RW:
default:
zs_mm = ZS_MM_RW;
break;
@@ -891,12 +891,12 @@ static inline int trypin_tag(unsigned long handle)
return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
-static void pin_tag(unsigned long handle)
+static void pin_tag(unsigned long handle) __acquires(bitlock)
{
bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
-static void unpin_tag(unsigned long handle)
+static void unpin_tag(unsigned long handle) __releases(bitlock)
{
bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
@@ -1833,12 +1833,12 @@ static void migrate_lock_init(struct zspage *zspage)
rwlock_init(&zspage->lock);
}
-static void migrate_read_lock(struct zspage *zspage)
+static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
{
read_lock(&zspage->lock);
}
-static void migrate_read_unlock(struct zspage *zspage)
+static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
{
read_unlock(&zspage->lock);
}
diff --git a/mm/zswap.c b/mm/zswap.c
index 55094e63b72d..fbb782924ccc 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -77,8 +77,8 @@ static bool zswap_pool_reached_full;
#define ZSWAP_PARAM_UNSET ""
-/* Enable/disable zswap (disabled by default) */
-static bool zswap_enabled;
+/* Enable/disable zswap */
+static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
static int zswap_enabled_param_set(const char *,
const struct kernel_param *);
static struct kernel_param_ops zswap_enabled_param_ops = {
@@ -88,8 +88,7 @@ static struct kernel_param_ops zswap_enabled_param_ops = {
module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
/* Crypto compressor to use */
-#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
-static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
+static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
static int zswap_compressor_param_set(const char *,
const struct kernel_param *);
static struct kernel_param_ops zswap_compressor_param_ops = {
@@ -101,8 +100,7 @@ module_param_cb(compressor, &zswap_compressor_param_ops,
&zswap_compressor, 0644);
/* Compressed storage zpool to use */
-#define ZSWAP_ZPOOL_DEFAULT "zbud"
-static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
+static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
static int zswap_zpool_param_set(const char *, const struct kernel_param *);
static struct kernel_param_ops zswap_zpool_param_ops = {
.set = zswap_zpool_param_set,
@@ -599,11 +597,12 @@ static __init struct zswap_pool *__zswap_pool_create_fallback(void)
bool has_comp, has_zpool;
has_comp = crypto_has_comp(zswap_compressor, 0, 0);
- if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
+ if (!has_comp && strcmp(zswap_compressor,
+ CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
pr_err("compressor %s not available, using default %s\n",
- zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
+ zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
param_free_charp(&zswap_compressor);
- zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
+ zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
has_comp = crypto_has_comp(zswap_compressor, 0, 0);
}
if (!has_comp) {
@@ -614,11 +613,12 @@ static __init struct zswap_pool *__zswap_pool_create_fallback(void)
}
has_zpool = zpool_has_pool(zswap_zpool_type);
- if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
+ if (!has_zpool && strcmp(zswap_zpool_type,
+ CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
pr_err("zpool %s not available, using default %s\n",
- zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
+ zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
param_free_charp(&zswap_zpool_type);
- zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
+ zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
has_zpool = zpool_has_pool(zswap_zpool_type);
}
if (!has_zpool) {
diff --git a/net/9p/client.c b/net/9p/client.c
index 1d48afc7033c..fc1f3635e5dd 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1549,82 +1549,94 @@ EXPORT_SYMBOL(p9_client_unlinkat);
int
p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
{
- struct p9_client *clnt = fid->clnt;
- struct p9_req_t *req;
int total = 0;
*err = 0;
+ while (iov_iter_count(to)) {
+ int count;
+
+ count = p9_client_read_once(fid, offset, to, err);
+ if (!count || *err)
+ break;
+ offset += count;
+ total += count;
+ }
+ return total;
+}
+EXPORT_SYMBOL(p9_client_read);
+
+int
+p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ int *err)
+{
+ struct p9_client *clnt = fid->clnt;
+ struct p9_req_t *req;
+ int count = iov_iter_count(to);
+ int rsize, non_zc = 0;
+ char *dataptr;
+
+ *err = 0;
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
- while (iov_iter_count(to)) {
- int count = iov_iter_count(to);
- int rsize, non_zc = 0;
- char *dataptr;
+ rsize = fid->iounit;
+ if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
+ rsize = clnt->msize - P9_IOHDRSZ;
- rsize = fid->iounit;
- if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
- rsize = clnt->msize - P9_IOHDRSZ;
+ if (count < rsize)
+ rsize = count;
- if (count < rsize)
- rsize = count;
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ /* response header len is 11
+ * PDU Header(7) + IO Size (4)
+ */
+ req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize,
+ 0, 11, "dqd", fid->fid,
+ offset, rsize);
+ } else {
+ non_zc = 1;
+ req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
+ rsize);
+ }
+ if (IS_ERR(req)) {
+ *err = PTR_ERR(req);
+ return 0;
+ }
- /* Don't bother zerocopy for small IO (< 1024) */
- if (clnt->trans_mod->zc_request && rsize > 1024) {
- /*
- * response header len is 11
- * PDU Header(7) + IO Size (4)
- */
- req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize,
- 0, 11, "dqd", fid->fid,
- offset, rsize);
- } else {
- non_zc = 1;
- req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
- rsize);
- }
- if (IS_ERR(req)) {
- *err = PTR_ERR(req);
- break;
- }
+ *err = p9pdu_readf(&req->rc, clnt->proto_version,
+ "D", &count, &dataptr);
+ if (*err) {
+ trace_9p_protocol_dump(clnt, &req->rc);
+ p9_tag_remove(clnt, req);
+ return 0;
+ }
+ if (rsize < count) {
+ pr_err("bogus RREAD count (%d > %d)\n", count, rsize);
+ count = rsize;
+ }
- *err = p9pdu_readf(&req->rc, clnt->proto_version,
- "D", &count, &dataptr);
- if (*err) {
- trace_9p_protocol_dump(clnt, &req->rc);
- p9_tag_remove(clnt, req);
- break;
- }
- if (rsize < count) {
- pr_err("bogus RREAD count (%d > %d)\n", count, rsize);
- count = rsize;
- }
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+ if (!count) {
+ p9_tag_remove(clnt, req);
+ return 0;
+ }
- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
- if (!count) {
- p9_tag_remove(clnt, req);
- break;
- }
+ if (non_zc) {
+ int n = copy_to_iter(dataptr, count, to);
- if (non_zc) {
- int n = copy_to_iter(dataptr, count, to);
- total += n;
- offset += n;
- if (n != count) {
- *err = -EFAULT;
- p9_tag_remove(clnt, req);
- break;
- }
- } else {
- iov_iter_advance(to, count);
- total += count;
- offset += count;
+ if (n != count) {
+ *err = -EFAULT;
+ p9_tag_remove(clnt, req);
+ return n;
}
- p9_tag_remove(clnt, req);
+ } else {
+ iov_iter_advance(to, count);
}
- return total;
+ p9_tag_remove(clnt, req);
+ return count;
}
-EXPORT_SYMBOL(p9_client_read);
+EXPORT_SYMBOL(p9_client_read_once);
int
p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
diff --git a/net/bpfilter/.gitignore b/net/bpfilter/.gitignore
index e97084e3eea2..f34e85ee8204 100644
--- a/net/bpfilter/.gitignore
+++ b/net/bpfilter/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
bpfilter_umh
diff --git a/net/core/dev.c b/net/core/dev.c
index 9c9e763bfe0e..df8097b8e286 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4140,7 +4140,8 @@ EXPORT_SYMBOL(netdev_max_backlog);
int netdev_tstamp_prequeue __read_mostly = 1;
int netdev_budget __read_mostly = 300;
-unsigned int __read_mostly netdev_budget_usecs = 2000;
+/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
+unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 5bf8d22a47ec..39d37d0ef575 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1065,11 +1065,12 @@ static void neigh_timer_handler(struct timer_list *t)
neigh->updated = jiffies;
atomic_set(&neigh->probes, 0);
notify = 1;
- next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
+ next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+ HZ/100);
}
} else {
/* NUD_PROBE|NUD_INCOMPLETE */
- next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
+ next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
}
if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
@@ -1125,7 +1126,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = now;
next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
- HZ/2);
+ HZ/100);
neigh_add_timer(neigh, next);
immediate_probe = true;
} else {
@@ -1427,7 +1428,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
neigh->nud_state = NUD_INCOMPLETE;
atomic_set(&neigh->probes, neigh_max_probes(neigh));
neigh_add_timer(neigh,
- jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
+ jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+ HZ/100));
}
EXPORT_SYMBOL(__neigh_set_probe_once);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index cf0215734ceb..4773ad6ec111 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -80,7 +80,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
struct net_device *netdev = to_net_dev(dev);
struct net *net = dev_net(netdev);
unsigned long new;
- int ret = -EINVAL;
+ int ret;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
diff --git a/net/core/sock.c b/net/core/sock.c
index 0510826bf860..90509c37d291 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -574,7 +574,7 @@ static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
/* Sorry... */
ret = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_RAW))
+ if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
goto out;
ret = -EINVAL;
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 3e1a90669006..ad53eb31d40f 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -302,7 +302,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
* - the key's semaphore is read-locked
*/
static long dns_resolver_read(const struct key *key,
- char __user *buffer, size_t buflen)
+ char *buffer, size_t buflen)
{
int err = PTR_ERR(key->payload.data[dns_key_error]);
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 5465a395da04..1decb25f6764 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -69,10 +69,16 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
else
multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
- if (!data[IFLA_HSR_VERSION])
+ if (!data[IFLA_HSR_VERSION]) {
hsr_version = 0;
- else
+ } else {
hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
+ if (hsr_version > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only versions 0..1 are supported");
+ return -EINVAL;
+ }
+ }
return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 30fa42f5997d..c0dd561aa190 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -614,12 +614,15 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
return NULL;
}
-static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
+static int ip_mc_autojoin_config(struct net *net, bool join,
+ const struct in_ifaddr *ifa)
{
+#if defined(CONFIG_IP_MULTICAST)
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ifa->ifa_address,
.imr_ifindex = ifa->ifa_dev->dev->ifindex,
};
+ struct sock *sk = net->ipv4.mc_autojoin_sk;
int ret;
ASSERT_RTNL();
@@ -632,6 +635,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
release_sock(sk);
return ret;
+#else
+ return -EOPNOTSUPP;
+#endif
}
static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -675,7 +681,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
continue;
if (ipv4_is_multicast(ifa->ifa_address))
- ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
+ ip_mc_autojoin_config(net, false, ifa);
__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
return 0;
}
@@ -940,8 +946,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
- int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
- true, ifa);
+ int ret = ip_mc_autojoin_config(net, true, ifa);
if (ret < 0) {
inet_free_ifa(ifa);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 84a28b539c43..24e319dfb510 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1357,7 +1357,7 @@ retry:
regen_advance = idev->cnf.regen_max_retry *
idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
+ max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
/* recalculate max_desync_factor each time and update
* idev->desync_factor if it's larger
@@ -4121,7 +4121,8 @@ static void addrconf_dad_work(struct work_struct *w)
ifp->dad_probes--;
addrconf_mod_dad_work(ifp,
- NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
+ max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
+ HZ/100));
spin_unlock(&ifp->lock);
write_unlock_bh(&idev->lock);
@@ -4527,7 +4528,7 @@ restart:
!(ifp->flags&IFA_F_TENTATIVE)) {
unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
ifp->idev->cnf.dad_transmits *
- NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
+ max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
if (age >= ifp->prefered_lft - regen_advance) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 2688f3e82165..fc5000370030 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -229,6 +229,25 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
return res;
}
+static bool icmpv6_rt_has_prefsrc(struct sock *sk, u8 type,
+ struct flowi6 *fl6)
+{
+ struct net *net = sock_net(sk);
+ struct dst_entry *dst;
+ bool res = false;
+
+ dst = ip6_route_output(net, sk, fl6);
+ if (!dst->error) {
+ struct rt6_info *rt = (struct rt6_info *)dst;
+ struct in6_addr prefsrc;
+
+ rt6_get_prefsrc(rt, &prefsrc);
+ res = !ipv6_addr_any(&prefsrc);
+ }
+ dst_release(dst);
+ return res;
+}
+
/*
* an inline helper for the "simple" if statement below
* checks if parameter problem report is caused by an
@@ -527,7 +546,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
saddr = force_saddr;
if (saddr) {
fl6.saddr = *saddr;
- } else {
+ } else if (!icmpv6_rt_has_prefsrc(sk, type, &fl6)) {
/* select a more meaningful saddr from input if */
struct net_device *in_netdev;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 6ffa153e5166..1ecd4e9b0bdf 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1359,8 +1359,8 @@ skip_defrtr:
if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/HZ) {
rtime = (rtime*HZ)/1000;
- if (rtime < HZ/10)
- rtime = HZ/10;
+ if (rtime < HZ/100)
+ rtime = HZ/100;
NEIGH_VAR_SET(in6_dev->nd_parms, RETRANS_TIME, rtime);
in6_dev->tstamp = jiffies;
send_ifinfo_notify = true;
diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c
index dc4f20e23bf7..d38b476fc7f2 100644
--- a/net/ipv6/rpl.c
+++ b/net/ipv6/rpl.c
@@ -48,7 +48,7 @@ void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
outhdr->cmpri = 0;
outhdr->cmpre = 0;
- for (i = 0; i <= n; i++)
+ for (i = 0; i < n; i++)
ipv6_rpl_addr_decompress(&outhdr->rpl_segaddr[i], daddr,
ipv6_rpl_segdata_pos(inhdr, i),
inhdr->cmpri);
@@ -66,7 +66,7 @@ static unsigned char ipv6_rpl_srh_calc_cmpri(const struct ipv6_rpl_sr_hdr *inhdr
int i;
for (plen = 0; plen < sizeof(*daddr); plen++) {
- for (i = 0; i <= n; i++) {
+ for (i = 0; i < n; i++) {
if (daddr->s6_addr[plen] !=
inhdr->rpl_segaddr[i].s6_addr[plen])
return plen;
@@ -114,7 +114,7 @@ void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr,
outhdr->cmpri = cmpri;
outhdr->cmpre = cmpre;
- for (i = 0; i <= n; i++)
+ for (i = 0; i < n; i++)
ipv6_rpl_addr_compress(ipv6_rpl_segdata_pos(outhdr, i),
&inhdr->rpl_segaddr[i], cmpri);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index f5a9bdc4980c..ebb381c3f1b9 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -920,51 +920,51 @@ static const struct genl_ops l2tp_nl_ops[] = {
.cmd = L2TP_CMD_TUNNEL_CREATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = l2tp_nl_cmd_tunnel_create,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_TUNNEL_DELETE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = l2tp_nl_cmd_tunnel_delete,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_TUNNEL_MODIFY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = l2tp_nl_cmd_tunnel_modify,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_TUNNEL_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = l2tp_nl_cmd_tunnel_get,
.dumpit = l2tp_nl_cmd_tunnel_dump,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_SESSION_CREATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = l2tp_nl_cmd_session_create,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_SESSION_DELETE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = l2tp_nl_cmd_session_delete,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_SESSION_MODIFY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = l2tp_nl_cmd_session_modify,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_SESSION_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = l2tp_nl_cmd_session_get,
.dumpit = l2tp_nl_cmd_session_dump,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
};
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index bd220ee4aac9..faf57585b892 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -4,6 +4,8 @@
* Copyright (c) 2017 - 2019, Intel Corporation.
*/
+#define pr_fmt(fmt) "MPTCP: " fmt
+
#include <linux/kernel.h>
#include <net/tcp.h>
#include <net/mptcp.h>
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 064639f72487..977d9c8b1453 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -3,6 +3,8 @@
*
* Copyright (c) 2019, Intel Corporation.
*/
+#define pr_fmt(fmt) "MPTCP: " fmt
+
#include <linux/kernel.h>
#include <net/tcp.h>
#include <net/mptcp.h>
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index a0ce7f324499..86d61ab34c7c 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -4,6 +4,8 @@
* Copyright (c) 2020, Red Hat, Inc.
*/
+#define pr_fmt(fmt) "MPTCP: " fmt
+
#include <linux/inet.h>
#include <linux/kernel.h>
#include <net/tcp.h>
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 8dd17589217d..340cb955af25 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -86,7 +86,8 @@ find_set_type(const char *name, u8 family, u8 revision)
{
struct ip_set_type *type;
- list_for_each_entry_rcu(type, &ip_set_type_list, list)
+ list_for_each_entry_rcu(type, &ip_set_type_list, list,
+ lockdep_is_held(&ip_set_type_mutex))
if (STRNCMP(type->name, name) &&
(type->family == family ||
type->family == NFPROTO_UNSPEC) &&
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d0ab5ffa1e2c..9adfbc7e8ae7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3542,6 +3542,7 @@ cont:
continue;
if (!strcmp(set->name, i->name)) {
kfree(set->name);
+ set->name = NULL;
return -ENFILE;
}
}
@@ -3961,8 +3962,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
NFT_SET_MAP | NFT_SET_EVAL |
- NFT_SET_OBJECT))
- return -EINVAL;
+ NFT_SET_OBJECT | NFT_SET_CONCAT))
+ return -EOPNOTSUPP;
/* Only one of these operations is supported */
if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
(NFT_SET_MAP | NFT_SET_OBJECT))
@@ -4000,7 +4001,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
if (objtype == NFT_OBJECT_UNSPEC ||
objtype > NFT_OBJECT_MAX)
- return -EINVAL;
+ return -EOPNOTSUPP;
} else if (flags & NFT_SET_OBJECT)
return -EINVAL;
else
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 1e70359d633c..f1363b8aabba 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -29,7 +29,7 @@ void nft_lookup_eval(const struct nft_expr *expr,
{
const struct nft_lookup *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
- const struct nft_set_ext *ext;
+ const struct nft_set_ext *ext = NULL;
bool found;
found = set->ops->lookup(nft_net(pkt), set, &regs->data[priv->sreg],
@@ -39,11 +39,13 @@ void nft_lookup_eval(const struct nft_expr *expr,
return;
}
- if (set->flags & NFT_SET_MAP)
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), set->dlen);
+ if (ext) {
+ if (set->flags & NFT_SET_MAP)
+ nft_data_copy(&regs->data[priv->dreg],
+ nft_set_ext_data(ext), set->dlen);
- nft_set_elem_update_expr(ext, regs, pkt);
+ nft_set_elem_update_expr(ext, regs, pkt);
+ }
}
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 32f0fc8be3a4..2a81ea421819 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -81,7 +81,6 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
u32 idx, off;
nft_bitmap_location(set, key, &idx, &off);
- *ext = NULL;
return nft_bitmap_active(priv->bitmap, idx, off, genmask);
}
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 3a5552e14f75..3ffef454d469 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -218,27 +218,26 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
/* Detect overlaps as we descend the tree. Set the flag in these cases:
*
- * a1. |__ _ _? >|__ _ _ (insert start after existing start)
- * a2. _ _ __>| ?_ _ __| (insert end before existing end)
- * a3. _ _ ___| ?_ _ _>| (insert end after existing end)
- * a4. >|__ _ _ _ _ __| (insert start before existing end)
+ * a1. _ _ __>| ?_ _ __| (insert end before existing end)
+ * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
+ * a3. _ _ ___? >|_ _ __| (insert start before existing end)
*
* and clear it later on, as we eventually reach the points indicated by
* '?' above, in the cases described below. We'll always meet these
* later, locally, due to tree ordering, and overlaps for the intervals
* that are the closest together are always evaluated last.
*
- * b1. |__ _ _! >|__ _ _ (insert start after existing end)
- * b2. _ _ __>| !_ _ __| (insert end before existing start)
- * b3. !_____>| (insert end after existing start)
+ * b1. _ _ __>| !_ _ __| (insert end before existing start)
+ * b2. _ _ ___| !_ _ _>| (insert end after existing start)
+ * b3. _ _ ___! >|_ _ __| (insert start after existing end)
*
- * Case a4. resolves to b1.:
+ * Case a3. resolves to b3.:
* - if the inserted start element is the leftmost, because the '0'
* element in the tree serves as end element
* - otherwise, if an existing end is found. Note that end elements are
* always inserted after corresponding start elements.
*
- * For a new, rightmost pair of elements, we'll hit cases b1. and b3.,
+ * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
* in that order.
*
* The flag is also cleared in two special cases:
@@ -262,9 +261,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
p = &parent->rb_left;
if (nft_rbtree_interval_start(new)) {
- overlap = nft_rbtree_interval_start(rbe) &&
- nft_set_elem_active(&rbe->ext,
- genmask);
+ if (nft_rbtree_interval_end(rbe) &&
+ nft_set_elem_active(&rbe->ext, genmask))
+ overlap = false;
} else {
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 75bd0e5dd312..7b2f359bfce4 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -346,6 +346,9 @@ static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
pr_debug("checkentry targinfo%s\n", info->label);
+ if (info->send_nl_msg)
+ return -EOPNOTSUPP;
+
ret = idletimer_tg_helper((struct idletimer_tg_info *)info);
if(ret < 0)
{
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index fd8a01ca7a2d..2398d7238300 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -462,12 +462,14 @@ static void flow_table_copy_flows(struct table_instance *old,
struct hlist_head *head = &old->buckets[i];
if (ufid)
- hlist_for_each_entry(flow, head,
- ufid_table.node[old_ver])
+ hlist_for_each_entry_rcu(flow, head,
+ ufid_table.node[old_ver],
+ lockdep_ovsl_is_held())
ufid_table_instance_insert(new, flow);
else
- hlist_for_each_entry(flow, head,
- flow_table.node[old_ver])
+ hlist_for_each_entry_rcu(flow, head,
+ flow_table.node[old_ver],
+ lockdep_ovsl_is_held())
table_instance_insert(new, flow);
}
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index e22092e4a783..7ed31b5e77e4 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -906,20 +906,21 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
node = NULL;
if (addr->sq_node == QRTR_NODE_BCAST) {
- enqueue_fn = qrtr_bcast_enqueue;
- if (addr->sq_port != QRTR_PORT_CTRL) {
+ if (addr->sq_port != QRTR_PORT_CTRL &&
+ qrtr_local_nid != QRTR_NODE_BCAST) {
release_sock(sk);
return -ENOTCONN;
}
+ enqueue_fn = qrtr_bcast_enqueue;
} else if (addr->sq_node == ipc->us.sq_node) {
enqueue_fn = qrtr_local_enqueue;
} else {
- enqueue_fn = qrtr_node_enqueue;
node = qrtr_node_lookup(addr->sq_node);
if (!node) {
release_sock(sk);
return -ECONNRESET;
}
+ enqueue_fn = qrtr_node_enqueue;
}
plen = (len + 3) & ~3;
diff --git a/net/rds/message.c b/net/rds/message.c
index 50f13f1d4ae0..bbecb8cb873e 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2020 Oracle and/or its affiliates.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -162,12 +162,12 @@ static void rds_message_purge(struct rds_message *rm)
if (rm->rdma.op_active)
rds_rdma_free_op(&rm->rdma);
if (rm->rdma.op_rdma_mr)
- rds_mr_put(rm->rdma.op_rdma_mr);
+ kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final);
if (rm->atomic.op_active)
rds_atomic_free_op(&rm->atomic);
if (rm->atomic.op_rdma_mr)
- rds_mr_put(rm->atomic.op_rdma_mr);
+ kref_put(&rm->atomic.op_rdma_mr->r_kref, __rds_put_mr_final);
}
void rds_message_put(struct rds_message *rm)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 585e6b3b69ce..113e442101ce 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2020 Oracle and/or its affiliates.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -84,7 +84,7 @@ static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
- refcount_inc(&insert->r_refcount);
+ kref_get(&insert->r_kref);
}
return NULL;
}
@@ -99,10 +99,7 @@ static void rds_destroy_mr(struct rds_mr *mr)
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
- mr->r_key, refcount_read(&mr->r_refcount));
-
- if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
- return;
+ mr->r_key, kref_read(&mr->r_kref));
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node))
@@ -115,8 +112,10 @@ static void rds_destroy_mr(struct rds_mr *mr)
mr->r_trans->free_mr(trans_private, mr->r_invalidate);
}
-void __rds_put_mr_final(struct rds_mr *mr)
+void __rds_put_mr_final(struct kref *kref)
{
+ struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref);
+
rds_destroy_mr(mr);
kfree(mr);
}
@@ -140,8 +139,7 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
- rds_destroy_mr(mr);
- rds_mr_put(mr);
+ kref_put(&mr->r_kref, __rds_put_mr_final);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
@@ -242,7 +240,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
goto out;
}
- refcount_set(&mr->r_refcount, 1);
+ kref_init(&mr->r_kref);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
@@ -343,7 +341,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
- refcount_inc(&mr->r_refcount);
+ kref_get(&mr->r_kref);
*mr_ret = mr;
}
@@ -351,7 +349,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
out:
kfree(pages);
if (mr)
- rds_mr_put(mr);
+ kref_put(&mr->r_kref, __rds_put_mr_final);
return ret;
}
@@ -434,13 +432,7 @@ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
if (!mr)
return -EINVAL;
- /*
- * call rds_destroy_mr() ourselves so that we're sure it's done by the time
- * we return. If we let rds_mr_put() do it it might not happen until
- * someone else drops their ref.
- */
- rds_destroy_mr(mr);
- rds_mr_put(mr);
+ kref_put(&mr->r_kref, __rds_put_mr_final);
return 0;
}
@@ -464,6 +456,14 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
return;
}
+ /* Get a reference so that the MR won't go away before calling
+ * sync_mr() below.
+ */
+ kref_get(&mr->r_kref);
+
+ /* If it is going to be freed, remove it from the tree now so
+ * that no other thread can find it and free it.
+ */
if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
@@ -477,12 +477,13 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
+ /* Release the reference held above. */
+ kref_put(&mr->r_kref, __rds_put_mr_final);
+
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
- if (zot_me) {
- rds_destroy_mr(mr);
- rds_mr_put(mr);
- }
+ if (zot_me)
+ kref_put(&mr->r_kref, __rds_put_mr_final);
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
@@ -490,7 +491,7 @@ void rds_rdma_free_op(struct rm_rdma_op *ro)
unsigned int i;
if (ro->op_odp_mr) {
- rds_mr_put(ro->op_odp_mr);
+ kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
} else {
for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->op_sg[i]);
@@ -730,7 +731,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
goto out_pages;
}
RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
- refcount_set(&local_odp_mr->r_refcount, 1);
+ kref_init(&local_odp_mr->r_kref);
local_odp_mr->r_trans = rs->rs_transport;
local_odp_mr->r_sock = rs;
local_odp_mr->r_trans_private =
@@ -827,7 +828,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
if (!mr)
err = -EINVAL; /* invalid r_key */
else
- refcount_inc(&mr->r_refcount);
+ kref_get(&mr->r_kref);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
diff --git a/net/rds/rds.h b/net/rds/rds.h
index e4a603523083..8e18cd2aec51 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -291,7 +291,7 @@ struct rds_incoming {
struct rds_mr {
struct rb_node r_rb_node;
- refcount_t r_refcount;
+ struct kref r_kref;
u32 r_key;
/* A copy of the creation flags */
@@ -299,19 +299,11 @@ struct rds_mr {
unsigned int r_invalidate:1;
unsigned int r_write:1;
- /* This is for RDS_MR_DEAD.
- * It would be nice & consistent to make this part of the above
- * bit field here, but we need to use test_and_set_bit.
- */
- unsigned long r_state;
struct rds_sock *r_sock; /* back pointer to the socket that owns us */
struct rds_transport *r_trans;
void *r_trans_private;
};
-/* Flags for mr->r_state */
-#define RDS_MR_DEAD 0
-
static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
{
return r_key | (((u64) offset) << 32);
@@ -946,12 +938,7 @@ void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
-void __rds_put_mr_final(struct rds_mr *mr);
-static inline void rds_mr_put(struct rds_mr *mr)
-{
- if (refcount_dec_and_test(&mr->r_refcount))
- __rds_put_mr_final(mr);
-}
+void __rds_put_mr_final(struct kref *kref);
static inline bool rds_destroy_pending(struct rds_connection *conn)
{
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 6c3f35fac42d..0c98313dd7a8 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -31,7 +31,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *);
static void rxrpc_destroy(struct key *);
static void rxrpc_destroy_s(struct key *);
static void rxrpc_describe(const struct key *, struct seq_file *);
-static long rxrpc_read(const struct key *, char __user *, size_t);
+static long rxrpc_read(const struct key *, char *, size_t);
/*
* rxrpc defined keys take an arbitrary string as the description and an
@@ -1042,12 +1042,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key);
* - this returns the result in XDR form
*/
static long rxrpc_read(const struct key *key,
- char __user *buffer, size_t buflen)
+ char *buffer, size_t buflen)
{
const struct rxrpc_key_token *token;
const struct krb5_principal *princ;
size_t size;
- __be32 __user *xdr, *oldxdr;
+ __be32 *xdr, *oldxdr;
u32 cnlen, toksize, ntoks, tok, zero;
u16 toksizes[AFSTOKEN_MAX];
int loop;
@@ -1124,30 +1124,25 @@ static long rxrpc_read(const struct key *key,
if (!buffer || buflen < size)
return size;
- xdr = (__be32 __user *) buffer;
+ xdr = (__be32 *)buffer;
zero = 0;
#define ENCODE(x) \
do { \
- __be32 y = htonl(x); \
- if (put_user(y, xdr++) < 0) \
- goto fault; \
+ *xdr++ = htonl(x); \
} while(0)
#define ENCODE_DATA(l, s) \
do { \
u32 _l = (l); \
ENCODE(l); \
- if (copy_to_user(xdr, (s), _l) != 0) \
- goto fault; \
- if (_l & 3 && \
- copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
- goto fault; \
+ memcpy(xdr, (s), _l); \
+ if (_l & 3) \
+ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \
xdr += (_l + 3) >> 2; \
} while(0)
#define ENCODE64(x) \
do { \
__be64 y = cpu_to_be64(x); \
- if (copy_to_user(xdr, &y, 8) != 0) \
- goto fault; \
+ memcpy(xdr, &y, 8); \
xdr += 8 >> 2; \
} while(0)
#define ENCODE_STR(s) \
@@ -1238,8 +1233,4 @@ static long rxrpc_read(const struct key *key,
ASSERTCMP((char __user *) xdr - buffer, ==, size);
_leave(" = %zu", size);
return size;
-
-fault:
- _leave(" = -EFAULT");
- return -EFAULT;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f6a3b969ead0..55bd1429678f 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1667,6 +1667,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
skb_ext_del(skb, TC_SKB_EXT);
tp = rcu_dereference_bh(fchain->filter_chain);
+ last_executed_chain = fchain->index;
}
ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 065345832a69..61e95029c18f 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -151,6 +151,7 @@ static int tcindex_init(struct tcf_proto *tp)
p->mask = 0xffff;
p->hash = DEFAULT_HASH_SIZE;
p->fall_through = 1;
+ refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
rcu_assign_pointer(tp->root, p);
return 0;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 24ca861815b1..25fbd8d9de74 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -20,6 +20,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/auth_gss.h>
+#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/gss_err.h>
#include <linux/workqueue.h>
@@ -1050,7 +1051,7 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
goto err_put_mech;
auth = &gss_auth->rpc_auth;
auth->au_cslack = GSS_CRED_SLACK >> 2;
- auth->au_rslack = GSS_VERF_SLACK >> 2;
+ auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
auth->au_verfsize = GSS_VERF_SLACK >> 2;
auth->au_ralign = GSS_VERF_SLACK >> 2;
auth->au_flags = 0;
@@ -1724,8 +1725,9 @@ bad_mic:
goto out;
}
-static int gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- struct rpc_task *task, struct xdr_stream *xdr)
+static noinline_for_stack int
+gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_rqst *rqstp = task->tk_rqstp;
struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
@@ -1816,8 +1818,9 @@ out:
return -EAGAIN;
}
-static int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
- struct rpc_task *task, struct xdr_stream *xdr)
+static noinline_for_stack int
+gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ struct rpc_task *task, struct xdr_stream *xdr)
{
struct rpc_rqst *rqstp = task->tk_rqstp;
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
@@ -1877,7 +1880,7 @@ static int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
else
iov = snd_buf->head;
p = iov->iov_base + iov->iov_len;
- pad = 3 - ((snd_buf->len - offset - 1) & 3);
+ pad = xdr_pad_size(snd_buf->len - offset);
memset(p, 0, pad);
iov->iov_len += pad;
snd_buf->len += pad;
@@ -1934,35 +1937,69 @@ gss_unwrap_resp_auth(struct rpc_cred *cred)
return 0;
}
-static int
+/*
+ * RFC 2203, Section 5.3.2.2
+ *
+ * struct rpc_gss_integ_data {
+ * opaque databody_integ<>;
+ * opaque checksum<>;
+ * };
+ *
+ * struct rpc_gss_data_t {
+ * unsigned int seq_num;
+ * proc_req_arg_t arg;
+ * };
+ */
+static noinline_for_stack int
gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
struct xdr_stream *xdr)
{
- struct xdr_buf integ_buf, *rcv_buf = &rqstp->rq_rcv_buf;
- u32 data_offset, mic_offset, integ_len, maj_stat;
+ struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
struct rpc_auth *auth = cred->cr_auth;
+ u32 len, offset, seqno, maj_stat;
struct xdr_netobj mic;
- __be32 *p;
+ int ret;
- p = xdr_inline_decode(xdr, 2 * sizeof(*p));
- if (unlikely(!p))
+ ret = -EIO;
+ mic.data = NULL;
+
+ /* opaque databody_integ<>; */
+ if (xdr_stream_decode_u32(xdr, &len))
goto unwrap_failed;
- integ_len = be32_to_cpup(p++);
- if (integ_len & 3)
+ if (len & 3)
goto unwrap_failed;
- data_offset = (u8 *)(p) - (u8 *)rcv_buf->head[0].iov_base;
- mic_offset = integ_len + data_offset;
- if (mic_offset > rcv_buf->len)
+ offset = rcv_buf->len - xdr_stream_remaining(xdr);
+ if (xdr_stream_decode_u32(xdr, &seqno))
goto unwrap_failed;
- if (be32_to_cpup(p) != rqstp->rq_seqno)
+ if (seqno != rqstp->rq_seqno)
goto bad_seqno;
+ if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
+ goto unwrap_failed;
- if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len))
+ /*
+ * The xdr_stream now points to the beginning of the
+ * upper layer payload, to be passed below to
+ * rpcauth_unwrap_resp_decode(). The checksum, which
+ * follows the upper layer payload in @rcv_buf, is
+ * located and parsed without updating the xdr_stream.
+ */
+
+ /* opaque checksum<>; */
+ offset += len;
+ if (xdr_decode_word(rcv_buf, offset, &len))
+ goto unwrap_failed;
+ offset += sizeof(__be32);
+ if (offset + len > rcv_buf->len)
goto unwrap_failed;
- if (xdr_buf_read_mic(rcv_buf, &mic, mic_offset))
+ mic.len = len;
+ mic.data = kmalloc(len, GFP_NOFS);
+ if (!mic.data)
+ goto unwrap_failed;
+ if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len))
goto unwrap_failed;
- maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
+
+ maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
if (maj_stat != GSS_S_COMPLETE)
@@ -1970,19 +2007,24 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
auth->au_ralign = auth->au_verfsize + 2;
- return 0;
+ ret = 0;
+
+out:
+ kfree(mic.data);
+ return ret;
+
unwrap_failed:
trace_rpcgss_unwrap_failed(task);
- return -EIO;
+ goto out;
bad_seqno:
- trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(p));
- return -EIO;
+ trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
+ goto out;
bad_mic:
trace_rpcgss_verify_mic(task, maj_stat);
- return -EIO;
+ goto out;
}
-static int
+static noinline_for_stack int
gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
struct xdr_stream *xdr)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 65b67b257302..54ae5be62f6a 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -55,10 +55,6 @@
#include "gss_rpc_upcall.h"
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
/* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
* into replies.
*
@@ -184,6 +180,11 @@ static struct cache_head *rsi_alloc(void)
return NULL;
}
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall_timeout(cd, h);
+}
+
static void rsi_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
@@ -282,6 +283,7 @@ static const struct cache_detail rsi_cache_template = {
.hash_size = RSI_HASHMAX,
.name = "auth.rpcsec.init",
.cache_put = rsi_put,
+ .cache_upcall = rsi_upcall,
.cache_request = rsi_request,
.cache_parse = rsi_parse,
.match = rsi_match,
@@ -428,6 +430,11 @@ rsc_alloc(void)
return NULL;
}
+static int rsc_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return -EINVAL;
+}
+
static int rsc_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
@@ -554,6 +561,7 @@ static const struct cache_detail rsc_cache_template = {
.hash_size = RSC_HASHMAX,
.name = "auth.rpcsec.context",
.cache_put = rsc_put,
+ .cache_upcall = rsc_upcall,
.cache_parse = rsc_parse,
.match = rsc_match,
.init = rsc_init,
@@ -713,14 +721,12 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
}
if (gc->gc_seq > MAXSEQ) {
- dprintk("RPC: svcauth_gss: discarding request with "
- "large sequence number %d\n", gc->gc_seq);
+ trace_rpcgss_svc_large_seqno(rqstp->rq_xid, gc->gc_seq);
*authp = rpcsec_gsserr_ctxproblem;
return SVC_DENIED;
}
if (!gss_check_seq_num(rsci, gc->gc_seq)) {
- dprintk("RPC: svcauth_gss: discarding request with "
- "old sequence number %d\n", gc->gc_seq);
+ trace_rpcgss_svc_old_seqno(rqstp->rq_xid, gc->gc_seq);
return SVC_DROP;
}
return SVC_OK;
@@ -961,7 +967,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
/* XXX: This is very inefficient. It would be better to either do
* this while we encrypt, or maybe in the receive code, if we can peak
* ahead and work out the service and mechanism there. */
- offset = buf->head[0].iov_len % 4;
+ offset = xdr_pad_size(buf->head[0].iov_len);
if (offset) {
buf->buflen = RPCSVC_MAXPAYLOAD;
xdr_shift_buf(buf, offset);
@@ -1245,7 +1251,6 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
if (!ud->found_creds) {
/* userspace seem buggy, we should always get at least a
* mapping to nobody */
- dprintk("RPC: No creds found!\n");
goto out;
} else {
struct timespec64 boot;
@@ -1311,8 +1316,8 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
if (status)
goto out;
- trace_rpcgss_accept_upcall(rqstp->rq_xid, ud.major_status,
- ud.minor_status);
+ trace_rpcgss_svc_accept_upcall(rqstp->rq_xid, ud.major_status,
+ ud.minor_status);
switch (ud.major_status) {
case GSS_S_CONTINUE_NEEDED:
@@ -1320,31 +1325,23 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
break;
case GSS_S_COMPLETE:
status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
- if (status) {
- pr_info("%s: gss_proxy_save_rsc failed (%d)\n",
- __func__, status);
+ if (status)
goto out;
- }
cli_handle.data = (u8 *)&handle;
cli_handle.len = sizeof(handle);
break;
default:
- ret = SVC_CLOSE;
goto out;
}
/* Got an answer to the upcall; use it: */
if (gss_write_init_verf(sn->rsc_cache, rqstp,
- &cli_handle, &ud.major_status)) {
- pr_info("%s: gss_write_init_verf failed\n", __func__);
+ &cli_handle, &ud.major_status))
goto out;
- }
if (gss_write_resv(resv, PAGE_SIZE,
&cli_handle, &ud.out_token,
- ud.major_status, ud.minor_status)) {
- pr_info("%s: gss_write_resv failed\n", __func__);
+ ud.major_status, ud.minor_status))
goto out;
- }
ret = SVC_COMPLETE;
out:
@@ -1495,8 +1492,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
int ret;
struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
- dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
- argv->iov_len);
+ trace_rpcgss_svc_accept(rqstp->rq_xid, argv->iov_len);
*authp = rpc_autherr_badcred;
if (!svcdata)
@@ -1680,7 +1676,8 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
goto out;
integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
integ_len = resbuf->len - integ_offset;
- BUG_ON(integ_len % 4);
+ if (integ_len & 3)
+ goto out;
*p++ = htonl(integ_len);
*p++ = htonl(gc->gc_seq);
if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) {
@@ -1704,7 +1701,8 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
resv->iov_len += XDR_QUADLEN(mic.len) << 2;
/* not strictly required: */
resbuf->len += XDR_QUADLEN(mic.len) << 2;
- BUG_ON(resv->iov_len > PAGE_SIZE);
+ if (resv->iov_len > PAGE_SIZE)
+ goto out_err;
out:
stat = 0;
out_err:
@@ -1740,9 +1738,11 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
* both the head and tail.
*/
if (resbuf->tail[0].iov_base) {
- BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
- + PAGE_SIZE);
- BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
+ if (resbuf->tail[0].iov_base >=
+ resbuf->head[0].iov_base + PAGE_SIZE)
+ return -EINVAL;
+ if (resbuf->tail[0].iov_base < resbuf->head[0].iov_base)
+ return -EINVAL;
if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len
+ 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
return -ENOMEM;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index bd843a81afa0..af0ddd28b081 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -32,13 +32,13 @@
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <trace/events/sunrpc.h>
#include "netns.h"
#define RPCDBG_FACILITY RPCDBG_CACHE
static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
static void cache_revisit_request(struct cache_head *item);
-static bool cache_listeners_exist(struct cache_detail *detail);
static void cache_init(struct cache_head *h, struct cache_detail *detail)
{
@@ -65,13 +65,14 @@ static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
rcu_read_lock();
hlist_for_each_entry_rcu(tmp, head, cache_list) {
- if (detail->match(tmp, key)) {
- if (cache_is_expired(detail, tmp))
- continue;
- tmp = cache_get_rcu(tmp);
- rcu_read_unlock();
- return tmp;
- }
+ if (!detail->match(tmp, key))
+ continue;
+ if (test_bit(CACHE_VALID, &tmp->flags) &&
+ cache_is_expired(detail, tmp))
+ continue;
+ tmp = cache_get_rcu(tmp);
+ rcu_read_unlock();
+ return tmp;
}
rcu_read_unlock();
return NULL;
@@ -113,18 +114,21 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
spin_lock(&detail->hash_lock);
/* check if entry appeared while we slept */
- hlist_for_each_entry_rcu(tmp, head, cache_list) {
- if (detail->match(tmp, key)) {
- if (cache_is_expired(detail, tmp)) {
- sunrpc_begin_cache_remove_entry(tmp, detail);
- freeme = tmp;
- break;
- }
- cache_get(tmp);
- spin_unlock(&detail->hash_lock);
- cache_put(new, detail);
- return tmp;
+ hlist_for_each_entry_rcu(tmp, head, cache_list,
+ lockdep_is_held(&detail->hash_lock)) {
+ if (!detail->match(tmp, key))
+ continue;
+ if (test_bit(CACHE_VALID, &tmp->flags) &&
+ cache_is_expired(detail, tmp)) {
+ sunrpc_begin_cache_remove_entry(tmp, detail);
+ trace_cache_entry_expired(detail, tmp);
+ freeme = tmp;
+ break;
}
+ cache_get(tmp);
+ spin_unlock(&detail->hash_lock);
+ cache_put(new, detail);
+ return tmp;
}
hlist_add_head_rcu(&new->cache_list, head);
@@ -174,6 +178,25 @@ static void cache_fresh_unlocked(struct cache_head *head,
}
}
+static void cache_make_negative(struct cache_detail *detail,
+ struct cache_head *h)
+{
+ set_bit(CACHE_NEGATIVE, &h->flags);
+ trace_cache_entry_make_negative(detail, h);
+}
+
+static void cache_entry_update(struct cache_detail *detail,
+ struct cache_head *h,
+ struct cache_head *new)
+{
+ if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
+ detail->update(h, new);
+ trace_cache_entry_update(detail, h);
+ } else {
+ cache_make_negative(detail, h);
+ }
+}
+
struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
struct cache_head *new, struct cache_head *old, int hash)
{
@@ -186,10 +209,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
if (!test_bit(CACHE_VALID, &old->flags)) {
spin_lock(&detail->hash_lock);
if (!test_bit(CACHE_VALID, &old->flags)) {
- if (test_bit(CACHE_NEGATIVE, &new->flags))
- set_bit(CACHE_NEGATIVE, &old->flags);
- else
- detail->update(old, new);
+ cache_entry_update(detail, old, new);
cache_fresh_locked(old, new->expiry_time, detail);
spin_unlock(&detail->hash_lock);
cache_fresh_unlocked(old, detail);
@@ -207,10 +227,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
detail->init(tmp, old);
spin_lock(&detail->hash_lock);
- if (test_bit(CACHE_NEGATIVE, &new->flags))
- set_bit(CACHE_NEGATIVE, &tmp->flags);
- else
- detail->update(tmp, new);
+ cache_entry_update(detail, tmp, new);
hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
detail->entries++;
cache_get(tmp);
@@ -224,13 +241,6 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
}
EXPORT_SYMBOL_GPL(sunrpc_cache_update);
-static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- if (cd->cache_upcall)
- return cd->cache_upcall(cd, h);
- return sunrpc_cache_pipe_upcall(cd, h);
-}
-
static inline int cache_is_valid(struct cache_head *h)
{
if (!test_bit(CACHE_VALID, &h->flags))
@@ -259,7 +269,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
spin_lock(&detail->hash_lock);
rv = cache_is_valid(h);
if (rv == -EAGAIN) {
- set_bit(CACHE_NEGATIVE, &h->flags);
+ cache_make_negative(detail, h);
cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
detail);
rv = -ENOENT;
@@ -303,17 +313,14 @@ int cache_check(struct cache_detail *detail,
(h->expiry_time != 0 && age > refresh_age/2)) {
dprintk("RPC: Want update, refage=%lld, age=%lld\n",
refresh_age, age);
- if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
- switch (cache_make_upcall(detail, h)) {
- case -EINVAL:
- rv = try_to_negate_entry(detail, h);
- break;
- case -EAGAIN:
- cache_fresh_unlocked(h, detail);
- break;
- }
- } else if (!cache_listeners_exist(detail))
+ switch (detail->cache_upcall(detail, h)) {
+ case -EINVAL:
rv = try_to_negate_entry(detail, h);
+ break;
+ case -EAGAIN:
+ cache_fresh_unlocked(h, detail);
+ break;
+ }
}
if (rv == -EAGAIN) {
@@ -468,6 +475,7 @@ static int cache_clean(void)
continue;
sunrpc_begin_cache_remove_entry(ch, current_detail);
+ trace_cache_entry_expired(current_detail, ch);
rv = 1;
break;
}
@@ -1195,20 +1203,12 @@ static bool cache_listeners_exist(struct cache_detail *detail)
*
* Each request is at most one page long.
*/
-int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
+static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
{
-
char *buf;
struct cache_request *crq;
int ret = 0;
- if (!detail->cache_request)
- return -EINVAL;
-
- if (!cache_listeners_exist(detail)) {
- warn_no_listener(detail);
- return -EINVAL;
- }
if (test_bit(CACHE_CLEANED, &h->flags))
/* Too late to make an upcall */
return -EAGAIN;
@@ -1231,6 +1231,7 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
if (test_bit(CACHE_PENDING, &h->flags)) {
crq->item = cache_get(h);
list_add_tail(&crq->q.list, &detail->queue);
+ trace_cache_entry_upcall(detail, h);
} else
/* Lost a race, no longer PENDING, so don't enqueue */
ret = -EAGAIN;
@@ -1242,8 +1243,27 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
}
return ret;
}
+
+int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
+{
+ if (test_and_set_bit(CACHE_PENDING, &h->flags))
+ return 0;
+ return cache_pipe_upcall(detail, h);
+}
EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
+int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
+ struct cache_head *h)
+{
+ if (!cache_listeners_exist(detail)) {
+ warn_no_listener(detail);
+ trace_cache_entry_no_listener(detail, h);
+ return -EINVAL;
+ }
+ return sunrpc_cache_pipe_upcall(detail, h);
+}
+EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
+
/*
* parse a message from user-space and pass it
* to an appropriate cache
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7324b21f923e..325a0858700f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1099,8 +1099,9 @@ rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
task->tk_msg.rpc_proc = msg->rpc_proc;
task->tk_msg.rpc_argp = msg->rpc_argp;
task->tk_msg.rpc_resp = msg->rpc_resp;
- if (msg->rpc_cred != NULL)
- task->tk_msg.rpc_cred = get_cred(msg->rpc_cred);
+ task->tk_msg.rpc_cred = msg->rpc_cred;
+ if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
+ get_cred(task->tk_msg.rpc_cred);
}
}
@@ -1126,6 +1127,9 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
task = rpc_new_task(task_setup_data);
+ if (!RPC_IS_ASYNC(task))
+ task->tk_flags |= RPC_TASK_CRED_NOREF;
+
rpc_task_set_client(task, task_setup_data->rpc_client);
rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
@@ -2509,6 +2513,7 @@ call_decode(struct rpc_task *task)
goto out;
req->rq_rcv_buf.len = req->rq_private_buf.len;
+ trace_xprt_recvfrom(&req->rq_rcv_buf);
/* Check that the softirq receive buffer is valid */
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 55e900255b0c..7eba20a88438 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -204,10 +204,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
struct rpc_task *task,
unsigned char queue_priority)
{
- WARN_ON_ONCE(RPC_IS_QUEUED(task));
- if (RPC_IS_QUEUED(task))
- return;
-
INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
if (RPC_IS_PRIORITY(queue))
__rpc_add_wait_queue_priority(queue, task, queue_priority);
@@ -382,7 +378,7 @@ static void rpc_make_runnable(struct workqueue_struct *wq,
* NB: An RPC task will only receive interrupt-driven events as long
* as it's on a wait queue.
*/
-static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
+static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
struct rpc_task *task,
unsigned char queue_priority)
{
@@ -395,12 +391,23 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
}
+static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
+ struct rpc_task *task,
+ unsigned char queue_priority)
+{
+ if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
+ return;
+ __rpc_do_sleep_on_priority(q, task, queue_priority);
+}
+
static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
struct rpc_task *task, unsigned long timeout,
unsigned char queue_priority)
{
+ if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
+ return;
if (time_is_after_jiffies(timeout)) {
- __rpc_sleep_on_priority(q, task, queue_priority);
+ __rpc_do_sleep_on_priority(q, task, queue_priority);
__rpc_add_timer(q, task, timeout);
} else
task->tk_status = -ETIMEDOUT;
@@ -1162,7 +1169,8 @@ static void rpc_release_resources_task(struct rpc_task *task)
{
xprt_release(task);
if (task->tk_msg.rpc_cred) {
- put_cred(task->tk_msg.rpc_cred);
+ if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
+ put_cred(task->tk_msg.rpc_cred);
task->tk_msg.rpc_cred = NULL;
}
rpc_task_release_client(task);
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 1a864f1ed119..3fc8af8bb961 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -14,9 +14,24 @@
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/udp.h>
+#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/xdr.h>
#include <linux/export.h>
+#include "socklib.h"
+
+/*
+ * Helper structure for copying from an sk_buff.
+ */
+struct xdr_skb_reader {
+ struct sk_buff *skb;
+ unsigned int offset;
+ size_t count;
+ __wsum csum;
+};
+
+typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to,
+ size_t len);
/**
* xdr_skb_read_bits - copy some data bits from skb to internal buffer
@@ -186,3 +201,129 @@ no_checksum:
return 0;
}
EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
+
+static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t seek)
+{
+ if (seek)
+ iov_iter_advance(&msg->msg_iter, seek);
+ return sock_sendmsg(sock, msg);
+}
+
+static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t seek)
+{
+ iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
+ return xprt_sendmsg(sock, msg, seek);
+}
+
+static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
+ struct xdr_buf *xdr, size_t base)
+{
+ int err;
+
+ err = xdr_alloc_bvec(xdr, GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
+ xdr->page_len + xdr->page_base);
+ return xprt_sendmsg(sock, msg, base + xdr->page_base);
+}
+
+/* Common case:
+ * - stream transport
+ * - sending from byte 0 of the message
+ * - the message is wholly contained in @xdr's head iovec
+ */
+static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
+ rpc_fraghdr marker, struct kvec *vec,
+ size_t base)
+{
+ struct kvec iov[2] = {
+ [0] = {
+ .iov_base = &marker,
+ .iov_len = sizeof(marker)
+ },
+ [1] = *vec,
+ };
+ size_t len = iov[0].iov_len + iov[1].iov_len;
+
+ iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
+ return xprt_sendmsg(sock, msg, base);
+}
+
+/**
+ * xprt_sock_sendmsg - write an xdr_buf directly to a socket
+ * @sock: open socket to send on
+ * @msg: socket message metadata
+ * @xdr: xdr_buf containing this request
+ * @base: starting position in the buffer
+ * @marker: stream record marker field
+ * @sent_p: return the total number of bytes successfully queued for sending
+ *
+ * Return values:
+ * On success, returns zero and fills in @sent_p.
+ * %-ENOTSOCK if @sock is not a struct socket.
+ */
+int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ struct xdr_buf *xdr, unsigned int base,
+ rpc_fraghdr marker, unsigned int *sent_p)
+{
+ unsigned int rmsize = marker ? sizeof(marker) : 0;
+ unsigned int remainder = rmsize + xdr->len - base;
+ unsigned int want;
+ int err = 0;
+
+ *sent_p = 0;
+
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ msg->msg_flags |= MSG_MORE;
+ want = xdr->head[0].iov_len + rmsize;
+ if (base < want) {
+ unsigned int len = want - base;
+
+ remainder -= len;
+ if (remainder == 0)
+ msg->msg_flags &= ~MSG_MORE;
+ if (rmsize)
+ err = xprt_send_rm_and_kvec(sock, msg, marker,
+ &xdr->head[0], base);
+ else
+ err = xprt_send_kvec(sock, msg, &xdr->head[0], base);
+ if (remainder == 0 || err != len)
+ goto out;
+ *sent_p += err;
+ base = 0;
+ } else {
+ base -= want;
+ }
+
+ if (base < xdr->page_len) {
+ unsigned int len = xdr->page_len - base;
+
+ remainder -= len;
+ if (remainder == 0)
+ msg->msg_flags &= ~MSG_MORE;
+ err = xprt_send_pagedata(sock, msg, xdr, base);
+ if (remainder == 0 || err != len)
+ goto out;
+ *sent_p += err;
+ base = 0;
+ } else {
+ base -= xdr->page_len;
+ }
+
+ if (base >= xdr->tail[0].iov_len)
+ return 0;
+ msg->msg_flags &= ~MSG_MORE;
+ err = xprt_send_kvec(sock, msg, &xdr->tail[0], base);
+out:
+ if (err > 0) {
+ *sent_p += err;
+ err = 0;
+ }
+ return err;
+}
diff --git a/net/sunrpc/socklib.h b/net/sunrpc/socklib.h
new file mode 100644
index 000000000000..c48114ad6f00
--- /dev/null
+++ b/net/sunrpc/socklib.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
+ * Copyright (C) 2020, Oracle.
+ */
+
+#ifndef _NET_SUNRPC_SOCKLIB_H_
+#define _NET_SUNRPC_SOCKLIB_H_
+
+int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb);
+int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ struct xdr_buf *xdr, unsigned int base,
+ rpc_fraghdr marker, unsigned int *sent_p);
+
+#endif /* _NET_SUNRPC_SOCKLIB_H_ */
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
index c9bacb3c930f..47a756503d11 100644
--- a/net/sunrpc/sunrpc.h
+++ b/net/sunrpc/sunrpc.h
@@ -50,10 +50,6 @@ static inline int sock_is_loopback(struct sock *sk)
return loopback;
}
-int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
- struct page *headpage, unsigned long headoffset,
- struct page *tailpage, unsigned long tailoffset);
-
int rpc_clients_notifier_register(void);
void rpc_clients_notifier_unregister(void);
#endif /* _NET_SUNRPC_SUNRPC_H */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 187dd4e73d64..9ed3126600ce 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1529,10 +1529,6 @@ svc_process(struct svc_rqst *rqstp)
goto out_drop;
}
- /* Reserve space for the record marker */
- if (rqstp->rq_prot == IPPROTO_TCP)
- svc_putnl(resv, 0);
-
/* Returns 1 for send, 0 for drop */
if (likely(svc_process_common(rqstp, argv, resv)))
return svc_send(rqstp);
@@ -1637,6 +1633,22 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
EXPORT_SYMBOL_GPL(svc_max_payload);
/**
+ * svc_encode_read_payload - mark a range of bytes as a READ payload
+ * @rqstp: svc_rqst to operate on
+ * @offset: payload's byte offset in rqstp->rq_res
+ * @length: size of payload, in bytes
+ *
+ * Returns zero on success, or a negative errno if a permanent
+ * error occurred.
+ */
+int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset,
+ unsigned int length)
+{
+ return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length);
+}
+EXPORT_SYMBOL_GPL(svc_encode_read_payload);
+
+/**
* svc_fill_write_vector - Construct data argument for VFS write call
* @rqstp: svc_rqst to operate on
* @pages: list of pages containing data payload
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index de3c077733a7..e27e3532ec75 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -104,8 +104,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
}
EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
-/*
- * Format the transport list for printing
+/**
+ * svc_print_xprts - Format the transport list for printing
+ * @buf: target buffer for formatted address
+ * @maxlen: length of target buffer
+ *
+ * Fills in @buf with a string containing a list of transport names, each name
+ * terminated with '\n'. If the buffer is too small, some entries may be
+ * missing, but it is guaranteed that all lines in the output buffer are
+ * complete.
+ *
+ * Returns positive length of the filled-in string.
*/
int svc_print_xprts(char *buf, int maxlen)
{
@@ -118,9 +127,9 @@ int svc_print_xprts(char *buf, int maxlen)
list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
int slen;
- sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
- slen = strlen(tmpstr);
- if (len + slen > maxlen)
+ slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n",
+ xcl->xcl_name, xcl->xcl_max_payload);
+ if (slen >= sizeof(tmpstr) || len + slen >= maxlen)
break;
len += slen;
strcat(buf, tmpstr);
@@ -802,6 +811,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
len = svc_deferred_recv(rqstp);
else
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
+ if (len > 0)
+ trace_svc_recvfrom(&rqstp->rq_arg);
rqstp->rq_stime = ktime_get();
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
@@ -905,6 +916,7 @@ int svc_send(struct svc_rqst *rqstp)
xb->len = xb->head[0].iov_len +
xb->page_len +
xb->tail[0].iov_len;
+ trace_svc_sendto(xb);
/* Grab mutex to serialize outgoing data. */
mutex_lock(&xprt->xpt_mutex);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 04aa80a2d752..6c8f802c4261 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -148,6 +148,11 @@ static struct cache_head *ip_map_alloc(void)
return NULL;
}
+static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h);
+}
+
static void ip_map_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
@@ -467,6 +472,11 @@ static struct cache_head *unix_gid_alloc(void)
return NULL;
}
+static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall_timeout(cd, h);
+}
+
static void unix_gid_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
@@ -584,6 +594,7 @@ static const struct cache_detail unix_gid_cache_template = {
.hash_size = GID_HASHMAX,
.name = "auth.unix.gid",
.cache_put = unix_gid_put,
+ .cache_upcall = unix_gid_upcall,
.cache_request = unix_gid_request,
.cache_parse = unix_gid_parse,
.cache_show = unix_gid_show,
@@ -881,6 +892,7 @@ static const struct cache_detail ip_map_cache_template = {
.hash_size = IP_HASHMAX,
.name = "auth.unix.ip",
.cache_put = ip_map_put,
+ .cache_upcall = ip_map_upcall,
.cache_request = ip_map_request,
.cache_parse = ip_map_parse,
.cache_show = ip_map_show,
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 2934dd711715..519cf9c4f8fd 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -55,6 +55,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/xprt.h>
+#include "socklib.h"
#include "sunrpc.h"
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -174,109 +175,10 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
}
}
-/*
- * send routine intended to be shared by the fore- and back-channel
- */
-int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
- struct page *headpage, unsigned long headoffset,
- struct page *tailpage, unsigned long tailoffset)
-{
- int result;
- int size;
- struct page **ppage = xdr->pages;
- size_t base = xdr->page_base;
- unsigned int pglen = xdr->page_len;
- unsigned int flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
- int slen;
- int len = 0;
-
- slen = xdr->len;
-
- /* send head */
- if (slen == xdr->head[0].iov_len)
- flags = 0;
- len = kernel_sendpage(sock, headpage, headoffset,
- xdr->head[0].iov_len, flags);
- if (len != xdr->head[0].iov_len)
- goto out;
- slen -= xdr->head[0].iov_len;
- if (slen == 0)
- goto out;
-
- /* send page data */
- size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
- while (pglen > 0) {
- if (slen == size)
- flags = 0;
- result = kernel_sendpage(sock, *ppage, base, size, flags);
- if (result > 0)
- len += result;
- if (result != size)
- goto out;
- slen -= size;
- pglen -= size;
- size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
- base = 0;
- ppage++;
- }
-
- /* send tail */
- if (xdr->tail[0].iov_len) {
- result = kernel_sendpage(sock, tailpage, tailoffset,
- xdr->tail[0].iov_len, 0);
- if (result > 0)
- len += result;
- }
-
-out:
- return len;
-}
-
-
-/*
- * Generic sendto routine
- */
-static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
+static int svc_sock_read_payload(struct svc_rqst *rqstp, unsigned int offset,
+ unsigned int length)
{
- struct svc_sock *svsk =
- container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
- struct socket *sock = svsk->sk_sock;
- union {
- struct cmsghdr hdr;
- long all[SVC_PKTINFO_SPACE / sizeof(long)];
- } buffer;
- struct cmsghdr *cmh = &buffer.hdr;
- int len = 0;
- unsigned long tailoff;
- unsigned long headoff;
- RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
-
- if (rqstp->rq_prot == IPPROTO_UDP) {
- struct msghdr msg = {
- .msg_name = &rqstp->rq_addr,
- .msg_namelen = rqstp->rq_addrlen,
- .msg_control = cmh,
- .msg_controllen = sizeof(buffer),
- .msg_flags = MSG_MORE,
- };
-
- svc_set_cmsg_data(rqstp, cmh);
-
- if (sock_sendmsg(sock, &msg) < 0)
- goto out;
- }
-
- tailoff = ((unsigned long)xdr->tail[0].iov_base) & (PAGE_SIZE-1);
- headoff = 0;
- len = svc_send_common(sock, xdr, rqstp->rq_respages[0], headoff,
- rqstp->rq_respages[0], tailoff);
-
-out:
- dprintk("svc: socket %p sendto([%p %zu... ], %d) = %d (addr %s)\n",
- svsk, xdr->head[0].iov_base, xdr->head[0].iov_len,
- xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
-
- return len;
+ return 0;
}
/*
@@ -600,17 +502,43 @@ out_free:
return 0;
}
-static int
-svc_udp_sendto(struct svc_rqst *rqstp)
+/**
+ * svc_udp_sendto - Send out a reply on a UDP socket
+ * @rqstp: completed svc_rqst
+ *
+ * Returns the number of bytes sent, or a negative errno.
+ */
+static int svc_udp_sendto(struct svc_rqst *rqstp)
{
- int error;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
+ struct xdr_buf *xdr = &rqstp->rq_res;
+ union {
+ struct cmsghdr hdr;
+ long all[SVC_PKTINFO_SPACE / sizeof(long)];
+ } buffer;
+ struct cmsghdr *cmh = &buffer.hdr;
+ struct msghdr msg = {
+ .msg_name = &rqstp->rq_addr,
+ .msg_namelen = rqstp->rq_addrlen,
+ .msg_control = cmh,
+ .msg_controllen = sizeof(buffer),
+ };
+ unsigned int uninitialized_var(sent);
+ int err;
- error = svc_sendto(rqstp, &rqstp->rq_res);
- if (error == -ECONNREFUSED)
- /* ICMP error on earlier request. */
- error = svc_sendto(rqstp, &rqstp->rq_res);
+ svc_set_cmsg_data(rqstp, cmh);
- return error;
+ err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
+ xdr_free_bvec(xdr);
+ if (err == -ECONNREFUSED) {
+ /* ICMP error on earlier request. */
+ err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
+ xdr_free_bvec(xdr);
+ }
+ if (err < 0)
+ return err;
+ return sent;
}
static int svc_udp_has_wspace(struct svc_xprt *xprt)
@@ -653,6 +581,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
.xpo_create = svc_udp_create,
.xpo_recvfrom = svc_udp_recvfrom,
.xpo_sendto = svc_udp_sendto,
+ .xpo_read_payload = svc_sock_read_payload,
.xpo_release_rqst = svc_release_udp_skb,
.xpo_detach = svc_sock_detach,
.xpo_free = svc_sock_free,
@@ -1128,35 +1057,39 @@ err_noclose:
return 0; /* record not complete */
}
-/*
- * Send out data on TCP socket.
+/**
+ * svc_tcp_sendto - Send out a reply on a TCP socket
+ * @rqstp: completed svc_rqst
+ *
+ * Returns the number of bytes sent, or a negative errno.
*/
static int svc_tcp_sendto(struct svc_rqst *rqstp)
{
- struct xdr_buf *xbufp = &rqstp->rq_res;
- int sent;
- __be32 reclen;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
+ struct xdr_buf *xdr = &rqstp->rq_res;
+ rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
+ (u32)xdr->len);
+ struct msghdr msg = {
+ .msg_flags = 0,
+ };
+ unsigned int uninitialized_var(sent);
+ int err;
- /* Set up the first element of the reply kvec.
- * Any other kvecs that may be in use have been taken
- * care of by the server implementation itself.
- */
- reclen = htonl(0x80000000|((xbufp->len ) - 4));
- memcpy(xbufp->head[0].iov_base, &reclen, 4);
-
- sent = svc_sendto(rqstp, &rqstp->rq_res);
- if (sent != xbufp->len) {
- printk(KERN_NOTICE
- "rpc-srv/tcp: %s: %s %d when sending %d bytes "
- "- shutting down socket\n",
- rqstp->rq_xprt->xpt_server->sv_name,
- (sent<0)?"got error":"sent only",
- sent, xbufp->len);
- set_bit(XPT_CLOSE, &rqstp->rq_xprt->xpt_flags);
- svc_xprt_enqueue(rqstp->rq_xprt);
- sent = -EAGAIN;
- }
+ err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
+ xdr_free_bvec(xdr);
+ if (err < 0 || sent != (xdr->len + sizeof(marker)))
+ goto out_close;
return sent;
+
+out_close:
+ pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
+ xprt->xpt_server->sv_name,
+ (err < 0) ? "got error" : "sent",
+ (err < 0) ? err : sent, xdr->len);
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ svc_xprt_enqueue(xprt);
+ return -EAGAIN;
}
static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
@@ -1171,6 +1104,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
.xpo_create = svc_tcp_create,
.xpo_recvfrom = svc_tcp_recvfrom,
.xpo_sendto = svc_tcp_sendto,
+ .xpo_read_payload = svc_sock_read_payload,
.xpo_release_rqst = svc_release_skb,
.xpo_detach = svc_tcp_sock_detach,
.xpo_free = svc_sock_free,
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index e5497dc2475b..15b58c5144f9 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1235,61 +1235,6 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
}
EXPORT_SYMBOL_GPL(xdr_encode_word);
-/**
- * xdr_buf_read_mic() - obtain the address of the GSS mic from xdr buf
- * @buf: pointer to buffer containing a mic
- * @mic: on success, returns the address of the mic
- * @offset: the offset in buf where mic may be found
- *
- * This function may modify the xdr buf if the mic is found to be straddling
- * a boundary between head, pages, and tail. On success the mic can be read
- * from the address returned. There is no need to free the mic.
- *
- * Return: Success returns 0, otherwise an integer error.
- */
-int xdr_buf_read_mic(struct xdr_buf *buf, struct xdr_netobj *mic, unsigned int offset)
-{
- struct xdr_buf subbuf;
- unsigned int boundary;
-
- if (xdr_decode_word(buf, offset, &mic->len))
- return -EFAULT;
- offset += 4;
-
- /* Is the mic partially in the head? */
- boundary = buf->head[0].iov_len;
- if (offset < boundary && (offset + mic->len) > boundary)
- xdr_shift_buf(buf, boundary - offset);
-
- /* Is the mic partially in the pages? */
- boundary += buf->page_len;
- if (offset < boundary && (offset + mic->len) > boundary)
- xdr_shrink_pagelen(buf, boundary - offset);
-
- if (xdr_buf_subsegment(buf, &subbuf, offset, mic->len))
- return -EFAULT;
-
- /* Is the mic contained entirely in the head? */
- mic->data = subbuf.head[0].iov_base;
- if (subbuf.head[0].iov_len == mic->len)
- return 0;
- /* ..or is the mic contained entirely in the tail? */
- mic->data = subbuf.tail[0].iov_base;
- if (subbuf.tail[0].iov_len == mic->len)
- return 0;
-
- /* Find a contiguous area in @buf to hold all of @mic */
- if (mic->len > buf->buflen - buf->len)
- return -ENOMEM;
- if (buf->tail[0].iov_len != 0)
- mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
- else
- mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
- __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
- return 0;
-}
-EXPORT_SYMBOL_GPL(xdr_buf_read_mic);
-
/* Returns 0 on success, or else a negative error code. */
static int
xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 1aafe8d3f3f4..493a30a296fc 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1117,8 +1117,6 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
- task->tk_pid, ntohl(req->rq_xid), copied);
trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
xprt->stat.recvs++;
@@ -1462,6 +1460,7 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
*/
req->rq_ntrans++;
+ trace_xprt_sendto(&req->rq_snd_buf);
connect_cookie = xprt->connect_cookie;
status = xprt->ops->send_request(req);
if (status != 0) {
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 1a0ae0c61353..c92c1aac270a 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -44,10 +44,10 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
size_t maxmsg;
- maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv);
+ maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv);
maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
return maxmsg - RPCRDMA_HDRLEN_MIN;
}
@@ -115,7 +115,7 @@ int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
if (rc < 0)
goto failed_marshal;
- if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
+ if (rpcrdma_post_sends(r_xprt, req))
goto drop_connection;
return 0;
@@ -190,7 +190,7 @@ create_req:
if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
return NULL;
- size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE);
+ size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
if (!req)
return NULL;
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 125297c9aa3e..ef997880e17a 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -52,7 +52,7 @@
/**
* frwr_release_mr - Destroy one MR
- * @mr: MR allocated by frwr_init_mr
+ * @mr: MR allocated by frwr_mr_init
*
*/
void frwr_release_mr(struct rpcrdma_mr *mr)
@@ -74,7 +74,7 @@ static void frwr_mr_recycle(struct rpcrdma_mr *mr)
if (mr->mr_dir != DMA_NONE) {
trace_xprtrdma_mr_unmap(mr);
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
+ ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
mr->mr_dir = DMA_NONE;
}
@@ -106,21 +106,22 @@ void frwr_reset(struct rpcrdma_req *req)
}
/**
- * frwr_init_mr - Initialize one MR
- * @ia: interface adapter
+ * frwr_mr_init - Initialize one MR
+ * @r_xprt: controlling transport instance
* @mr: generic MR to prepare for FRWR
*
* Returns zero if successful. Otherwise a negative errno
* is returned.
*/
-int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
+int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
{
- unsigned int depth = ia->ri_max_frwr_depth;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ unsigned int depth = ep->re_max_fr_depth;
struct scatterlist *sg;
struct ib_mr *frmr;
int rc;
- frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
+ frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
if (IS_ERR(frmr))
goto out_mr_err;
@@ -128,6 +129,7 @@ int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
if (!sg)
goto out_list_err;
+ mr->mr_xprt = r_xprt;
mr->frwr.fr_mr = frmr;
mr->mr_dir = DMA_NONE;
INIT_LIST_HEAD(&mr->mr_list);
@@ -149,29 +151,24 @@ out_list_err:
/**
* frwr_query_device - Prepare a transport for use with FRWR
- * @r_xprt: controlling transport instance
+ * @ep: endpoint to fill in
* @device: RDMA device to query
*
* On success, sets:
- * ep->rep_attr
- * ep->rep_max_requests
- * ia->ri_max_rdma_segs
- *
- * And these FRWR-related fields:
- * ia->ri_max_frwr_depth
- * ia->ri_mrtype
+ * ep->re_attr
+ * ep->re_max_requests
+ * ep->re_max_rdma_segs
+ * ep->re_max_fr_depth
+ * ep->re_mrtype
*
* Return values:
* On success, returns zero.
* %-EINVAL - the device does not support FRWR memory registration
* %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
*/
-int frwr_query_device(struct rpcrdma_xprt *r_xprt,
- const struct ib_device *device)
+int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
{
const struct ib_device_attr *attrs = &device->attrs;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
int max_qp_wr, depth, delta;
unsigned int max_sge;
@@ -188,23 +185,23 @@ int frwr_query_device(struct rpcrdma_xprt *r_xprt,
pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
return -ENOMEM;
}
- ep->rep_attr.cap.max_send_sge = max_sge;
- ep->rep_attr.cap.max_recv_sge = 1;
+ ep->re_attr.cap.max_send_sge = max_sge;
+ ep->re_attr.cap.max_recv_sge = 1;
- ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
+ ep->re_mrtype = IB_MR_TYPE_MEM_REG;
if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
- ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
+ ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
/* Quirk: Some devices advertise a large max_fast_reg_page_list_len
* capability, but perform optimally when the MRs are not larger
* than a page.
*/
if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
- ia->ri_max_frwr_depth = attrs->max_sge_rd;
+ ep->re_max_fr_depth = attrs->max_sge_rd;
else
- ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
- if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
- ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
+ ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
+ if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
+ ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
/* Add room for frwr register and invalidate WRs.
* 1. FRWR reg WR for head
@@ -220,11 +217,11 @@ int frwr_query_device(struct rpcrdma_xprt *r_xprt,
/* Calculate N if the device max FRWR depth is smaller than
* RPCRDMA_MAX_DATA_SEGS.
*/
- if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
- delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
+ if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
+ delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
do {
depth += 2; /* FRWR reg + invalidate */
- delta -= ia->ri_max_frwr_depth;
+ delta -= ep->re_max_fr_depth;
} while (delta > 0);
}
@@ -233,34 +230,34 @@ int frwr_query_device(struct rpcrdma_xprt *r_xprt,
max_qp_wr -= 1;
if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
return -ENOMEM;
- if (ep->rep_max_requests > max_qp_wr)
- ep->rep_max_requests = max_qp_wr;
- ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
- if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
- ep->rep_max_requests = max_qp_wr / depth;
- if (!ep->rep_max_requests)
+ if (ep->re_max_requests > max_qp_wr)
+ ep->re_max_requests = max_qp_wr;
+ ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
+ if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
+ ep->re_max_requests = max_qp_wr / depth;
+ if (!ep->re_max_requests)
return -ENOMEM;
- ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
+ ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
}
- ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
- ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
- ep->rep_attr.cap.max_recv_wr = ep->rep_max_requests;
- ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
- ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
-
- ia->ri_max_rdma_segs =
- DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth);
+ ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
+ ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
+ ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
+ ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
+ ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
+
+ ep->re_max_rdma_segs =
+ DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
/* Reply chunks require segments for head and tail buffers */
- ia->ri_max_rdma_segs += 2;
- if (ia->ri_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
- ia->ri_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
+ ep->re_max_rdma_segs += 2;
+ if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
+ ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
/* Ensure the underlying device is capable of conveying the
* largest r/wsize NFS will ask for. This guarantees that
* failing over from one RDMA device to another will not
* break NFS I/O.
*/
- if ((ia->ri_max_rdma_segs * ia->ri_max_frwr_depth) < RPCRDMA_MAX_SEGS)
+ if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
return -ENOMEM;
return 0;
@@ -286,14 +283,14 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr *mr)
{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
struct ib_reg_wr *reg_wr;
int i, n, dma_nents;
struct ib_mr *ibmr;
u8 key;
- if (nsegs > ia->ri_max_frwr_depth)
- nsegs = ia->ri_max_frwr_depth;
+ if (nsegs > ep->re_max_fr_depth)
+ nsegs = ep->re_max_fr_depth;
for (i = 0; i < nsegs;) {
if (seg->mr_page)
sg_set_page(&mr->mr_sg[i],
@@ -306,7 +303,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
++seg;
++i;
- if (ia->ri_mrtype == IB_MR_TYPE_SG_GAPS)
+ if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
continue;
if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
@@ -315,7 +312,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
mr->mr_dir = rpcrdma_data_dir(writing);
mr->mr_nents = i;
- dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents,
+ dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
mr->mr_dir);
if (!dma_nents)
goto out_dmamap_err;
@@ -356,8 +353,8 @@ out_mapmr_err:
/**
* frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
- * @cq: completion queue (ignored)
- * @wc: completed WR
+ * @cq: completion queue
+ * @wc: WCE for a completed FastReg WR
*
*/
static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
@@ -369,20 +366,25 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_fastreg(wc, frwr);
/* The MR will get recycled when the associated req is retransmitted */
+
+ rpcrdma_flush_disconnect(cq, wc);
}
/**
- * frwr_send - post Send WR containing the RPC Call message
- * @ia: interface adapter
- * @req: Prepared RPC Call
+ * frwr_send - post Send WRs containing the RPC Call message
+ * @r_xprt: controlling transport instance
+ * @req: prepared RPC Call
*
* For FRWR, chain any FastReg WRs to the Send WR. Only a
* single ib_post_send call is needed to register memory
* and then post the Send WR.
*
- * Returns the result of ib_post_send.
+ * Returns the return code from ib_post_send.
+ *
+ * Caller must hold the transport send lock to ensure that the
+ * pointers to the transport's rdma_cm_id and QP are stable.
*/
-int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
+int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
struct ib_send_wr *post_wr;
struct rpcrdma_mr *mr;
@@ -403,7 +405,7 @@ int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
post_wr = &frwr->fr_regwr.wr;
}
- return ib_post_send(ia->ri_id->qp, post_wr, NULL);
+ return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL);
}
/**
@@ -419,7 +421,7 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
list_for_each_entry(mr, mrs, mr_list)
if (mr->mr_handle == rep->rr_inv_rkey) {
list_del_init(&mr->mr_list);
- trace_xprtrdma_mr_remoteinv(mr);
+ trace_xprtrdma_mr_reminv(mr);
rpcrdma_mr_put(mr);
break; /* only one invalidated MR per RPC */
}
@@ -435,8 +437,8 @@ static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
/**
* frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
- * @cq: completion queue (ignored)
- * @wc: completed WR
+ * @cq: completion queue
+ * @wc: WCE for a completed LocalInv WR
*
*/
static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
@@ -449,12 +451,14 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li(wc, frwr);
__frwr_release_mr(wc, mr);
+
+ rpcrdma_flush_disconnect(cq, wc);
}
/**
* frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
- * @cq: completion queue (ignored)
- * @wc: completed WR
+ * @cq: completion queue
+ * @wc: WCE for a completed LocalInv WR
*
* Awaken anyone waiting for an MR to finish being fenced.
*/
@@ -469,6 +473,8 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
trace_xprtrdma_wc_li_wake(wc, frwr);
__frwr_release_mr(wc, mr);
complete(&frwr->fr_linv_done);
+
+ rpcrdma_flush_disconnect(cq, wc);
}
/**
@@ -526,10 +532,10 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
/* Transport disconnect drains the receive CQ before it
* replaces the QP. The RPC reply handler won't call us
- * unless ri_id->qp is a valid pointer.
+ * unless re_id->qp is a valid pointer.
*/
bad_wr = NULL;
- rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
+ rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
/* The final LOCAL_INV WR in the chain is supposed to
* do the wake. If it was never posted, the wake will
@@ -556,8 +562,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
/**
* frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
- * @cq: completion queue (ignored)
- * @wc: completed WR
+ * @cq: completion queue
+ * @wc: WCE for a completed LocalInv WR
*
*/
static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -575,6 +581,8 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
/* Ensure @rep is generated before __frwr_release_mr */
smp_rmb();
rpcrdma_complete_rqst(rep);
+
+ rpcrdma_flush_disconnect(cq, wc);
}
/**
@@ -629,10 +637,10 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
/* Transport disconnect drains the receive CQ before it
* replaces the QP. The RPC reply handler won't call us
- * unless ri_id->qp is a valid pointer.
+ * unless re_id->qp is a valid pointer.
*/
bad_wr = NULL;
- rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
+ rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
if (!rc)
return;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 28020ec104d4..4a81e6995d3e 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -103,21 +103,20 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
/**
* rpcrdma_set_max_header_sizes - Initialize inline payload sizes
- * @r_xprt: transport instance to initialize
+ * @ep: endpoint to initialize
*
* The max_inline fields contain the maximum size of an RPC message
* so the marshaling code doesn't have to repeat this calculation
* for every RPC.
*/
-void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
+void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep)
{
- unsigned int maxsegs = r_xprt->rx_ia.ri_max_rdma_segs;
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ unsigned int maxsegs = ep->re_max_rdma_segs;
- ep->rep_max_inline_send =
- ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
- ep->rep_max_inline_recv =
- ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
+ ep->re_max_inline_send =
+ ep->re_inline_send - rpcrdma_max_call_header_size(maxsegs);
+ ep->re_max_inline_recv =
+ ep->re_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
}
/* The client can send a request inline as long as the RPCRDMA header
@@ -132,9 +131,10 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
struct rpc_rqst *rqst)
{
struct xdr_buf *xdr = &rqst->rq_snd_buf;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
unsigned int count, remaining, offset;
- if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
+ if (xdr->len > ep->re_max_inline_send)
return false;
if (xdr->page_len) {
@@ -145,7 +145,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
remaining -= min_t(unsigned int,
PAGE_SIZE - offset, remaining);
offset = 0;
- if (++count > r_xprt->rx_ep.rep_attr.cap.max_send_sge)
+ if (++count > ep->re_attr.cap.max_send_sge)
return false;
}
}
@@ -162,7 +162,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
struct rpc_rqst *rqst)
{
- return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
+ return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
}
/* The client is required to provide a Reply chunk if the maximum
@@ -176,7 +176,7 @@ rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
const struct xdr_buf *buf = &rqst->rq_rcv_buf;
return (buf->head[0].iov_len + buf->tail[0].iov_len) <
- r_xprt->rx_ep.rep_max_inline_recv;
+ r_xprt->rx_ep->re_max_inline_recv;
}
/* Split @vec on page boundaries into SGEs. FMR registers pages, not
@@ -255,7 +255,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
/* When encoding a Read chunk, the tail iovec contains an
* XDR pad and may be omitted.
*/
- if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
+ if (type == rpcrdma_readch && r_xprt->rx_ep->re_implicit_roundup)
goto out;
/* When encoding a Write chunk, some servers need to see an
@@ -263,7 +263,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
* layer provides space in the tail iovec that may be used
* for this purpose.
*/
- if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
+ if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup)
goto out;
if (xdrbuf->tail[0].iov_len)
@@ -275,32 +275,6 @@ out:
return n;
}
-static inline int
-encode_item_present(struct xdr_stream *xdr)
-{
- __be32 *p;
-
- p = xdr_reserve_space(xdr, sizeof(*p));
- if (unlikely(!p))
- return -EMSGSIZE;
-
- *p = xdr_one;
- return 0;
-}
-
-static inline int
-encode_item_not_present(struct xdr_stream *xdr)
-{
- __be32 *p;
-
- p = xdr_reserve_space(xdr, sizeof(*p));
- if (unlikely(!p))
- return -EMSGSIZE;
-
- *p = xdr_zero;
- return 0;
-}
-
static void
xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
{
@@ -414,7 +388,7 @@ static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
} while (nsegs);
done:
- return encode_item_not_present(xdr);
+ return xdr_stream_encode_item_absent(xdr);
}
/* Register and XDR encode the Write list. Supports encoding a list
@@ -453,7 +427,7 @@ static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
if (nsegs < 0)
return nsegs;
- if (encode_item_present(xdr) < 0)
+ if (xdr_stream_encode_item_present(xdr) < 0)
return -EMSGSIZE;
segcount = xdr_reserve_space(xdr, sizeof(*segcount));
if (unlikely(!segcount))
@@ -480,7 +454,7 @@ static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
*segcount = cpu_to_be32(nchunks);
done:
- return encode_item_not_present(xdr);
+ return xdr_stream_encode_item_absent(xdr);
}
/* Register and XDR encode the Reply chunk. Supports encoding an array
@@ -507,14 +481,14 @@ static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
__be32 *segcount;
if (wtype != rpcrdma_replych)
- return encode_item_not_present(xdr);
+ return xdr_stream_encode_item_absent(xdr);
seg = req->rl_segments;
nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
if (nsegs < 0)
return nsegs;
- if (encode_item_present(xdr) < 0)
+ if (xdr_stream_encode_item_present(xdr) < 0)
return -EMSGSIZE;
segcount = xdr_reserve_space(xdr, sizeof(*segcount));
if (unlikely(!segcount))
@@ -1476,8 +1450,8 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
if (credits == 0)
credits = 1; /* don't deadlock */
- else if (credits > r_xprt->rx_ep.rep_max_requests)
- credits = r_xprt->rx_ep.rep_max_requests;
+ else if (credits > r_xprt->rx_ep->re_max_requests)
+ credits = r_xprt->rx_ep->re_max_requests;
if (buf->rb_credits != credits)
rpcrdma_update_cwnd(r_xprt, credits);
rpcrdma_post_recvs(r_xprt, false);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 908e78bb87c6..d510a3a15d4b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -117,7 +117,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
{
int ret;
- ret = svc_rdma_map_reply_msg(rdma, ctxt, &rqst->rq_snd_buf, NULL);
+ ret = svc_rdma_map_reply_msg(rdma, ctxt, NULL, &rqst->rq_snd_buf);
if (ret < 0)
return -EIO;
@@ -181,7 +181,9 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
if (!ctxt)
goto drop_connection;
- p = ctxt->sc_xprt_buf;
+ p = xdr_reserve_space(&ctxt->sc_stream, RPCRDMA_HDRLEN_MIN);
+ if (!p)
+ goto put_ctxt;
*p++ = rqst->rq_xid;
*p++ = rpcrdma_version;
*p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests);
@@ -189,7 +191,6 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
*p++ = xdr_zero;
*p++ = xdr_zero;
*p = xdr_zero;
- svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_MIN);
#ifdef SVCRDMA_BACKCHANNEL_DEBUG
pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
@@ -197,12 +198,13 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
rqst->rq_xtime = ktime_get();
rc = svc_rdma_bc_sendto(rdma, rqst, ctxt);
- if (rc) {
- svc_rdma_send_ctxt_put(rdma, ctxt);
- goto drop_connection;
- }
+ if (rc)
+ goto put_ctxt;
return 0;
+put_ctxt:
+ svc_rdma_send_ctxt_put(rdma, ctxt);
+
drop_connection:
dprintk("svcrdma: failed to send bc call\n");
return -ENOTCONN;
@@ -250,6 +252,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
{
dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
+ xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 96bccd398469..54469b72b25f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -193,6 +193,7 @@ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
out:
ctxt->rc_page_count = 0;
+ ctxt->rc_read_payload_length = 0;
return ctxt;
out_empty:
@@ -357,15 +358,14 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
arg->len = ctxt->rc_byte_len;
}
-/* This accommodates the largest possible Write chunk,
- * in one segment.
+/* This accommodates the largest possible Write chunk.
*/
-#define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
+#define MAX_BYTES_WRITE_CHUNK ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
/* This accommodates the largest possible Position-Zero
- * Read chunk or Reply chunk, in one segment.
+ * Read chunk or Reply chunk.
*/
-#define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
+#define MAX_BYTES_SPECIAL_CHUNK ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
/* Sanity check the Read list.
*
@@ -373,7 +373,7 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
* - This implementation supports only one Read chunk.
*
* Sanity checks:
- * - Read list does not overflow buffer.
+ * - Read list does not overflow Receive buffer.
* - Segment size limited by largest NFS data payload.
*
* The segment count is limited to how many segments can
@@ -381,30 +381,44 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
* buffer. That's about 40 Read segments for a 1KB inline
* threshold.
*
- * Returns pointer to the following Write list.
+ * Return values:
+ * %true: Read list is valid. @rctxt's xdr_stream is updated
+ * to point to the first byte past the Read list.
+ * %false: Read list is corrupt. @rctxt's xdr_stream is left
+ * in an unknown state.
*/
-static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
+static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
{
- u32 position;
+ u32 position, len;
bool first;
+ __be32 *p;
+
+ p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
+ if (!p)
+ return false;
+ len = 0;
first = true;
- while (*p++ != xdr_zero) {
+ while (*p != xdr_zero) {
+ p = xdr_inline_decode(&rctxt->rc_stream,
+ rpcrdma_readseg_maxsz * sizeof(*p));
+ if (!p)
+ return false;
+
if (first) {
- position = be32_to_cpup(p++);
+ position = be32_to_cpup(p);
first = false;
- } else if (be32_to_cpup(p++) != position) {
- return NULL;
+ } else if (be32_to_cpup(p) != position) {
+ return false;
}
- p++; /* handle */
- if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
- return NULL;
- p += 2; /* offset */
+ p += 2;
+ len += be32_to_cpup(p);
- if (p > end)
- return NULL;
+ p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
+ if (!p)
+ return false;
}
- return p;
+ return len <= MAX_BYTES_SPECIAL_CHUNK;
}
/* The segment count is limited to how many segments can
@@ -412,67 +426,100 @@ static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
* buffer. That's about 60 Write segments for a 1KB inline
* threshold.
*/
-static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
- u32 maxlen)
+static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt, u32 maxlen)
{
- u32 i, segcount;
+ u32 i, segcount, total;
+ __be32 *p;
+
+ p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
+ if (!p)
+ return false;
+ segcount = be32_to_cpup(p);
- segcount = be32_to_cpup(p++);
+ total = 0;
for (i = 0; i < segcount; i++) {
- p++; /* handle */
- if (be32_to_cpup(p++) > maxlen)
- return NULL;
- p += 2; /* offset */
+ u32 handle, length;
+ u64 offset;
- if (p > end)
- return NULL;
- }
+ p = xdr_inline_decode(&rctxt->rc_stream,
+ rpcrdma_segment_maxsz * sizeof(*p));
+ if (!p)
+ return false;
+
+ handle = be32_to_cpup(p++);
+ length = be32_to_cpup(p++);
+ xdr_decode_hyper(p, &offset);
+ trace_svcrdma_decode_wseg(handle, length, offset);
- return p;
+ total += length;
+ }
+ return total <= maxlen;
}
/* Sanity check the Write list.
*
* Implementation limits:
- * - This implementation supports only one Write chunk.
+ * - This implementation currently supports only one Write chunk.
*
* Sanity checks:
- * - Write list does not overflow buffer.
- * - Segment size limited by largest NFS data payload.
- *
- * Returns pointer to the following Reply chunk.
+ * - Write list does not overflow Receive buffer.
+ * - Chunk size limited by largest NFS data payload.
+ *
+ * Return values:
+ * %true: Write list is valid. @rctxt's xdr_stream is updated
+ * to point to the first byte past the Write list.
+ * %false: Write list is corrupt. @rctxt's xdr_stream is left
+ * in an unknown state.
*/
-static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
+static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
{
- u32 chcount;
+ u32 chcount = 0;
+ __be32 *p;
- chcount = 0;
- while (*p++ != xdr_zero) {
- p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
+ p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
+ if (!p)
+ return false;
+ rctxt->rc_write_list = p;
+ while (*p != xdr_zero) {
+ if (!xdr_check_write_chunk(rctxt, MAX_BYTES_WRITE_CHUNK))
+ return false;
+ ++chcount;
+ p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
if (!p)
- return NULL;
- if (chcount++ > 1)
- return NULL;
+ return false;
}
- return p;
+ if (!chcount)
+ rctxt->rc_write_list = NULL;
+ return chcount < 2;
}
/* Sanity check the Reply chunk.
*
* Sanity checks:
- * - Reply chunk does not overflow buffer.
- * - Segment size limited by largest NFS data payload.
- *
- * Returns pointer to the following RPC header.
+ * - Reply chunk does not overflow Receive buffer.
+ * - Chunk size limited by largest NFS data payload.
+ *
+ * Return values:
+ * %true: Reply chunk is valid. @rctxt's xdr_stream is updated
+ * to point to the first byte past the Reply chunk.
+ * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
+ * in an unknown state.
*/
-static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
+static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
{
- if (*p++ != xdr_zero) {
- p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
- if (!p)
- return NULL;
+ __be32 *p;
+
+ p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
+ if (!p)
+ return false;
+ rctxt->rc_reply_chunk = p;
+ if (*p != xdr_zero) {
+ if (!xdr_check_write_chunk(rctxt, MAX_BYTES_SPECIAL_CHUNK))
+ return false;
+ } else {
+ rctxt->rc_reply_chunk = NULL;
}
- return p;
+ return true;
}
/* RPC-over-RDMA Version One private extension: Remote Invalidation.
@@ -537,60 +584,61 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey);
}
-/* On entry, xdr->head[0].iov_base points to first byte in the
- * RPC-over-RDMA header.
+/**
+ * svc_rdma_xdr_decode_req - Decode the transport header
+ * @rq_arg: xdr_buf containing ingress RPC/RDMA message
+ * @rctxt: state of decoding
+ *
+ * On entry, xdr->head[0].iov_base points to first byte of the
+ * RPC-over-RDMA transport header.
*
* On successful exit, head[0] points to first byte past the
* RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
+ *
* The length of the RPC-over-RDMA header is returned.
*
* Assumptions:
* - The transport header is entirely contained in the head iovec.
*/
-static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
+static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
+ struct svc_rdma_recv_ctxt *rctxt)
{
- __be32 *p, *end, *rdma_argp;
+ __be32 *p, *rdma_argp;
unsigned int hdr_len;
- /* Verify that there's enough bytes for header + something */
- if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
- goto out_short;
-
rdma_argp = rq_arg->head[0].iov_base;
- if (*(rdma_argp + 1) != rpcrdma_version)
- goto out_version;
+ xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
- switch (*(rdma_argp + 3)) {
+ p = xdr_inline_decode(&rctxt->rc_stream,
+ rpcrdma_fixed_maxsz * sizeof(*p));
+ if (unlikely(!p))
+ goto out_short;
+ p++;
+ if (*p != rpcrdma_version)
+ goto out_version;
+ p += 2;
+ switch (*p) {
case rdma_msg:
break;
case rdma_nomsg:
break;
-
case rdma_done:
goto out_drop;
-
case rdma_error:
goto out_drop;
-
default:
goto out_proc;
}
- end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
- p = xdr_check_read_list(rdma_argp + 4, end);
- if (!p)
- goto out_inval;
- p = xdr_check_write_list(p, end);
- if (!p)
+ if (!xdr_check_read_list(rctxt))
goto out_inval;
- p = xdr_check_reply_chunk(p, end);
- if (!p)
+ if (!xdr_check_write_list(rctxt))
goto out_inval;
- if (p > end)
+ if (!xdr_check_reply_chunk(rctxt))
goto out_inval;
- rq_arg->head[0].iov_base = p;
- hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
+ rq_arg->head[0].iov_base = rctxt->rc_stream.p;
+ hdr_len = xdr_stream_pos(&rctxt->rc_stream);
rq_arg->head[0].iov_len -= hdr_len;
rq_arg->len -= hdr_len;
trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
@@ -650,7 +698,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
__be32 *rdma_argp, int status)
{
struct svc_rdma_send_ctxt *ctxt;
- unsigned int length;
__be32 *p;
int ret;
@@ -658,29 +705,46 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
if (!ctxt)
return;
- p = ctxt->sc_xprt_buf;
+ p = xdr_reserve_space(&ctxt->sc_stream,
+ rpcrdma_fixed_maxsz * sizeof(*p));
+ if (!p)
+ goto put_ctxt;
+
*p++ = *rdma_argp;
*p++ = *(rdma_argp + 1);
*p++ = xprt->sc_fc_credits;
- *p++ = rdma_error;
+ *p = rdma_error;
+
switch (status) {
case -EPROTONOSUPPORT:
+ p = xdr_reserve_space(&ctxt->sc_stream, 3 * sizeof(*p));
+ if (!p)
+ goto put_ctxt;
+
*p++ = err_vers;
*p++ = rpcrdma_version;
- *p++ = rpcrdma_version;
+ *p = rpcrdma_version;
trace_svcrdma_err_vers(*rdma_argp);
break;
default:
- *p++ = err_chunk;
+ p = xdr_reserve_space(&ctxt->sc_stream, sizeof(*p));
+ if (!p)
+ goto put_ctxt;
+
+ *p = err_chunk;
trace_svcrdma_err_chunk(*rdma_argp);
}
- length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf;
- svc_rdma_sync_reply_hdr(xprt, ctxt, length);
+ ctxt->sc_send_wr.num_sge = 1;
ctxt->sc_send_wr.opcode = IB_WR_SEND;
+ ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len;
ret = svc_rdma_send(xprt, &ctxt->sc_send_wr);
if (ret)
- svc_rdma_send_ctxt_put(xprt, ctxt);
+ goto put_ctxt;
+ return;
+
+put_ctxt:
+ svc_rdma_send_ctxt_put(xprt, ctxt);
}
/* By convention, backchannel calls arrive via rdma_msg type
@@ -785,7 +849,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_next_page = rqstp->rq_respages;
p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
- ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
+ ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
if (ret < 0)
goto out_err;
if (ret == 0)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 48fe3b16b0d9..bd7c195d872e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -41,7 +41,7 @@ struct svc_rdma_rw_ctxt {
struct rdma_rw_ctx rw_ctx;
int rw_nents;
struct sg_table rw_sg_table;
- struct scatterlist rw_first_sgl[0];
+ struct scatterlist rw_first_sgl[];
};
static inline struct svc_rdma_rw_ctxt *
@@ -439,7 +439,8 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
if (ret < 0)
goto out_initerr;
- trace_svcrdma_encode_wseg(seg_handle, write_len, seg_offset);
+ trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset);
+
list_add(&ctxt->rw_list, &cc->cc_rwctxts);
cc->cc_sqecount += ret;
if (write_len == seg_length - info->wi_seg_off) {
@@ -482,18 +483,19 @@ static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
vec->iov_len);
}
-/* Send an xdr_buf's page list by itself. A Write chunk is
- * just the page list. a Reply chunk is the head, page list,
- * and tail. This function is shared between the two types
- * of chunk.
+/* Send an xdr_buf's page list by itself. A Write chunk is just
+ * the page list. A Reply chunk is @xdr's head, page list, and
+ * tail. This function is shared between the two types of chunk.
*/
static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
- struct xdr_buf *xdr)
+ struct xdr_buf *xdr,
+ unsigned int offset,
+ unsigned long length)
{
info->wi_xdr = xdr;
- info->wi_next_off = 0;
+ info->wi_next_off = offset - xdr->head[0].iov_len;
return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
- xdr->page_len);
+ length);
}
/**
@@ -501,6 +503,8 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
* @rdma: controlling RDMA transport
* @wr_ch: Write chunk provided by client
* @xdr: xdr_buf containing the data payload
+ * @offset: payload's byte offset in @xdr
+ * @length: size of payload, in bytes
*
* Returns a non-negative number of bytes the chunk consumed, or
* %-E2BIG if the payload was larger than the Write chunk,
@@ -510,19 +514,20 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
*/
int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
- struct xdr_buf *xdr)
+ struct xdr_buf *xdr,
+ unsigned int offset, unsigned long length)
{
struct svc_rdma_write_info *info;
int ret;
- if (!xdr->page_len)
+ if (!length)
return 0;
info = svc_rdma_write_info_alloc(rdma, wr_ch);
if (!info)
return -ENOMEM;
- ret = svc_rdma_send_xdr_pagelist(info, xdr);
+ ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length);
if (ret < 0)
goto out_err;
@@ -530,8 +535,8 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
if (ret < 0)
goto out_err;
- trace_svcrdma_encode_write(xdr->page_len);
- return xdr->page_len;
+ trace_svcrdma_send_write_chunk(xdr->page_len);
+ return length;
out_err:
svc_rdma_write_info_free(info);
@@ -541,8 +546,7 @@ out_err:
/**
* svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
* @rdma: controlling RDMA transport
- * @rp_ch: Reply chunk provided by client
- * @writelist: true if client provided a Write list
+ * @rctxt: Write and Reply chunks from client
* @xdr: xdr_buf containing an RPC Reply
*
* Returns a non-negative number of bytes the chunk consumed, or
@@ -552,13 +556,14 @@ out_err:
* %-ENOTCONN if posting failed (connection is lost),
* %-EIO if rdma_rw initialization failed (DMA mapping, etc).
*/
-int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
- bool writelist, struct xdr_buf *xdr)
+int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ struct xdr_buf *xdr)
{
struct svc_rdma_write_info *info;
int consumed, ret;
- info = svc_rdma_write_info_alloc(rdma, rp_ch);
+ info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk);
if (!info)
return -ENOMEM;
@@ -570,8 +575,10 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
/* Send the page list in the Reply chunk only if the
* client did not provide Write chunks.
*/
- if (!writelist && xdr->page_len) {
- ret = svc_rdma_send_xdr_pagelist(info, xdr);
+ if (!rctxt->rc_write_list && xdr->page_len) {
+ ret = svc_rdma_send_xdr_pagelist(info, xdr,
+ xdr->head[0].iov_len,
+ xdr->page_len);
if (ret < 0)
goto out_err;
consumed += xdr->page_len;
@@ -588,7 +595,7 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
if (ret < 0)
goto out_err;
- trace_svcrdma_encode_reply(consumed);
+ trace_svcrdma_send_reply_chunk(consumed);
return consumed;
out_err:
@@ -691,7 +698,7 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
if (ret < 0)
break;
- trace_svcrdma_encode_rseg(rs_handle, rs_length, rs_offset);
+ trace_svcrdma_send_rseg(rs_handle, rs_length, rs_offset);
info->ri_chunklen += rs_length;
}
@@ -722,7 +729,7 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
if (ret < 0)
goto out;
- trace_svcrdma_encode_read(info->ri_chunklen, info->ri_position);
+ trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position);
head->rc_hdr_count = 0;
@@ -778,7 +785,7 @@ static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
if (ret < 0)
goto out;
- trace_svcrdma_encode_pzr(info->ri_chunklen);
+ trace_svcrdma_send_pzr(info->ri_chunklen);
head->rc_arg.len += info->ri_chunklen;
head->rc_arg.buflen += info->ri_chunklen;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index f3f108090aa4..90cba3058f04 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -151,6 +151,8 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
ctxt->sc_cqe.done = svc_rdma_wc_send;
ctxt->sc_xprt_buf = buffer;
+ xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
+ rdma->sc_max_req_size);
ctxt->sc_sges[0].addr = addr;
for (i = 0; i < rdma->sc_max_send_sges; i++)
@@ -204,6 +206,10 @@ struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
spin_unlock(&rdma->sc_send_lock);
out:
+ rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
+ xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
+ ctxt->sc_xprt_buf, NULL);
+
ctxt->sc_send_wr.num_sge = 0;
ctxt->sc_cur_sge_no = 0;
ctxt->sc_page_count = 0;
@@ -295,6 +301,12 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
might_sleep();
+ /* Sync the transport header buffer */
+ ib_dma_sync_single_for_device(rdma->sc_pd->device,
+ wr->sg_list[0].addr,
+ wr->sg_list[0].length,
+ DMA_TO_DEVICE);
+
/* If the SQ is full, wait until an SQ entry is available */
while (1) {
if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
@@ -322,166 +334,173 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
return ret;
}
-static u32 xdr_padsize(u32 len)
+/**
+ * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
+ * @sctxt: Send context for the RPC Reply
+ *
+ * Return values:
+ * On success, returns length in bytes of the Reply XDR buffer
+ * that was consumed by the Reply Read list
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
{
- return (len & 3) ? (4 - (len & 3)) : 0;
+ /* RPC-over-RDMA version 1 replies never have a Read list. */
+ return xdr_stream_encode_item_absent(&sctxt->sc_stream);
}
-/* Returns length of transport header, in bytes.
+/**
+ * svc_rdma_encode_write_segment - Encode one Write segment
+ * @src: matching Write chunk in the RPC Call header
+ * @sctxt: Send context for the RPC Reply
+ * @remaining: remaining bytes of the payload left in the Write chunk
+ *
+ * Return values:
+ * On success, returns length in bytes of the Reply XDR buffer
+ * that was consumed by the Write segment
+ * %-EMSGSIZE on XDR buffer overflow
*/
-static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp)
+static ssize_t svc_rdma_encode_write_segment(__be32 *src,
+ struct svc_rdma_send_ctxt *sctxt,
+ unsigned int *remaining)
{
- unsigned int nsegs;
__be32 *p;
-
- p = rdma_resp;
-
- /* RPC-over-RDMA V1 replies never have a Read list. */
- p += rpcrdma_fixed_maxsz + 1;
-
- /* Skip Write list. */
- while (*p++ != xdr_zero) {
- nsegs = be32_to_cpup(p++);
- p += nsegs * rpcrdma_segment_maxsz;
- }
-
- /* Skip Reply chunk. */
- if (*p++ != xdr_zero) {
- nsegs = be32_to_cpup(p++);
- p += nsegs * rpcrdma_segment_maxsz;
+ const size_t len = rpcrdma_segment_maxsz * sizeof(*p);
+ u32 handle, length;
+ u64 offset;
+
+ p = xdr_reserve_space(&sctxt->sc_stream, len);
+ if (!p)
+ return -EMSGSIZE;
+
+ handle = be32_to_cpup(src++);
+ length = be32_to_cpup(src++);
+ xdr_decode_hyper(src, &offset);
+
+ *p++ = cpu_to_be32(handle);
+ if (*remaining < length) {
+ /* segment only partly filled */
+ length = *remaining;
+ *remaining = 0;
+ } else {
+ /* entire segment was consumed */
+ *remaining -= length;
}
+ *p++ = cpu_to_be32(length);
+ xdr_encode_hyper(p, offset);
- return (unsigned long)p - (unsigned long)rdma_resp;
+ trace_svcrdma_encode_wseg(handle, length, offset);
+ return len;
}
-/* One Write chunk is copied from Call transport header to Reply
- * transport header. Each segment's length field is updated to
- * reflect number of bytes consumed in the segment.
- *
- * Returns number of segments in this chunk.
+/**
+ * svc_rdma_encode_write_chunk - Encode one Write chunk
+ * @src: matching Write chunk in the RPC Call header
+ * @sctxt: Send context for the RPC Reply
+ * @remaining: size in bytes of the payload in the Write chunk
+ *
+ * Copy a Write chunk from the Call transport header to the
+ * Reply transport header. Update each segment's length field
+ * to reflect the number of bytes written in that segment.
+ *
+ * Return values:
+ * On success, returns length in bytes of the Reply XDR buffer
+ * that was consumed by the Write chunk
+ * %-EMSGSIZE on XDR buffer overflow
*/
-static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src,
+static ssize_t svc_rdma_encode_write_chunk(__be32 *src,
+ struct svc_rdma_send_ctxt *sctxt,
unsigned int remaining)
{
unsigned int i, nsegs;
- u32 seg_len;
+ ssize_t len, ret;
- /* Write list discriminator */
- *dst++ = *src++;
+ len = 0;
+ trace_svcrdma_encode_write_chunk(remaining);
- /* number of segments in this chunk */
- nsegs = be32_to_cpup(src);
- *dst++ = *src++;
+ src++;
+ ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
+ if (ret < 0)
+ return -EMSGSIZE;
+ len += ret;
- for (i = nsegs; i; i--) {
- /* segment's RDMA handle */
- *dst++ = *src++;
-
- /* bytes returned in this segment */
- seg_len = be32_to_cpu(*src);
- if (remaining >= seg_len) {
- /* entire segment was consumed */
- *dst = *src;
- remaining -= seg_len;
- } else {
- /* segment only partly filled */
- *dst = cpu_to_be32(remaining);
- remaining = 0;
- }
- dst++; src++;
+ nsegs = be32_to_cpup(src++);
+ ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs);
+ if (ret < 0)
+ return -EMSGSIZE;
+ len += ret;
- /* segment's RDMA offset */
- *dst++ = *src++;
- *dst++ = *src++;
+ for (i = nsegs; i; i--) {
+ ret = svc_rdma_encode_write_segment(src, sctxt, &remaining);
+ if (ret < 0)
+ return -EMSGSIZE;
+ src += rpcrdma_segment_maxsz;
+ len += ret;
}
- return nsegs;
+ return len;
}
-/* The client provided a Write list in the Call message. Fill in
- * the segments in the first Write chunk in the Reply's transport
+/**
+ * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
+ * @rctxt: Reply context with information about the RPC Call
+ * @sctxt: Send context for the RPC Reply
+ * @length: size in bytes of the payload in the first Write chunk
+ *
+ * The client provides a Write chunk list in the Call message. Fill
+ * in the segments in the first Write chunk in the Reply's transport
* header with the number of bytes consumed in each segment.
* Remaining chunks are returned unused.
*
* Assumptions:
* - Client has provided only one Write chunk
- */
-static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch,
- unsigned int consumed)
-{
- unsigned int nsegs;
- __be32 *p, *q;
-
- /* RPC-over-RDMA V1 replies never have a Read list. */
- p = rdma_resp + rpcrdma_fixed_maxsz + 1;
-
- q = wr_ch;
- while (*q != xdr_zero) {
- nsegs = xdr_encode_write_chunk(p, q, consumed);
- q += 2 + nsegs * rpcrdma_segment_maxsz;
- p += 2 + nsegs * rpcrdma_segment_maxsz;
- consumed = 0;
- }
-
- /* Terminate Write list */
- *p++ = xdr_zero;
-
- /* Reply chunk discriminator; may be replaced later */
- *p = xdr_zero;
-}
-
-/* The client provided a Reply chunk in the Call message. Fill in
- * the segments in the Reply chunk in the Reply message with the
- * number of bytes consumed in each segment.
*
- * Assumptions:
- * - Reply can always fit in the provided Reply chunk
+ * Return values:
+ * On success, returns length in bytes of the Reply XDR buffer
+ * that was consumed by the Reply's Write list
+ * %-EMSGSIZE on XDR buffer overflow
*/
-static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch,
- unsigned int consumed)
+static ssize_t
+svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt,
+ struct svc_rdma_send_ctxt *sctxt,
+ unsigned int length)
{
- __be32 *p;
+ ssize_t len, ret;
- /* Find the Reply chunk in the Reply's xprt header.
- * RPC-over-RDMA V1 replies never have a Read list.
- */
- p = rdma_resp + rpcrdma_fixed_maxsz + 1;
+ ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length);
+ if (ret < 0)
+ return ret;
+ len = ret;
- /* Skip past Write list */
- while (*p++ != xdr_zero)
- p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
+ /* Terminate the Write list */
+ ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
+ if (ret < 0)
+ return ret;
- xdr_encode_write_chunk(p, rp_ch, consumed);
+ return len + ret;
}
-/* Parse the RPC Call's transport header.
+/**
+ * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
+ * @rctxt: Reply context with information about the RPC Call
+ * @sctxt: Send context for the RPC Reply
+ * @length: size in bytes of the payload in the Reply chunk
+ *
+ * Assumptions:
+ * - Reply can always fit in the client-provided Reply chunk
+ *
+ * Return values:
+ * On success, returns length in bytes of the Reply XDR buffer
+ * that was consumed by the Reply's Reply chunk
+ * %-EMSGSIZE on XDR buffer overflow
*/
-static void svc_rdma_get_write_arrays(__be32 *rdma_argp,
- __be32 **write, __be32 **reply)
+static ssize_t
+svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt,
+ struct svc_rdma_send_ctxt *sctxt,
+ unsigned int length)
{
- __be32 *p;
-
- p = rdma_argp + rpcrdma_fixed_maxsz;
-
- /* Read list */
- while (*p++ != xdr_zero)
- p += 5;
-
- /* Write list */
- if (*p != xdr_zero) {
- *write = p;
- while (*p++ != xdr_zero)
- p += 1 + be32_to_cpu(*p) * 4;
- } else {
- *write = NULL;
- p++;
- }
-
- /* Reply chunk */
- if (*p != xdr_zero)
- *reply = p;
- else
- *reply = NULL;
+ return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt,
+ length);
}
static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
@@ -520,38 +539,36 @@ static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
}
/**
- * svc_rdma_sync_reply_hdr - DMA sync the transport header buffer
+ * svc_rdma_pull_up_needed - Determine whether to use pull-up
* @rdma: controlling transport
- * @ctxt: send_ctxt for the Send WR
- * @len: length of transport header
+ * @sctxt: send_ctxt for the Send WR
+ * @rctxt: Write and Reply chunks provided by client
+ * @xdr: xdr_buf containing RPC message to transmit
*
- */
-void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt,
- unsigned int len)
-{
- ctxt->sc_sges[0].length = len;
- ctxt->sc_send_wr.num_sge++;
- ib_dma_sync_single_for_device(rdma->sc_pd->device,
- ctxt->sc_sges[0].addr, len,
- DMA_TO_DEVICE);
-}
-
-/* If the xdr_buf has more elements than the device can
- * transmit in a single RDMA Send, then the reply will
- * have to be copied into a bounce buffer.
+ * Returns:
+ * %true if pull-up must be used
+ * %false otherwise
*/
static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
- struct xdr_buf *xdr,
- __be32 *wr_lst)
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ struct xdr_buf *xdr)
{
int elements;
+ /* For small messages, copying bytes is cheaper than DMA mapping.
+ */
+ if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
+ return true;
+
+ /* Check whether the xdr_buf has more elements than can
+ * fit in a single RDMA Send.
+ */
/* xdr->head */
elements = 1;
/* xdr->pages */
- if (!wr_lst) {
+ if (!rctxt || !rctxt->rc_write_list) {
unsigned int remaining;
unsigned long pageoff;
@@ -573,29 +590,36 @@ static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
return elements >= rdma->sc_max_send_sges;
}
-/* The device is not capable of sending the reply directly.
- * Assemble the elements of @xdr into the transport header
- * buffer.
+/**
+ * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
+ * @rdma: controlling transport
+ * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
+ * @rctxt: Write and Reply chunks provided by client
+ * @xdr: prepared xdr_buf containing RPC message
+ *
+ * The device is not capable of sending the reply directly.
+ * Assemble the elements of @xdr into the transport header buffer.
+ *
+ * Returns zero on success, or a negative errno on failure.
*/
static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt,
- struct xdr_buf *xdr, __be32 *wr_lst)
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr)
{
unsigned char *dst, *tailbase;
unsigned int taillen;
- dst = ctxt->sc_xprt_buf;
- dst += ctxt->sc_sges[0].length;
-
+ dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len;
memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
dst += xdr->head[0].iov_len;
tailbase = xdr->tail[0].iov_base;
taillen = xdr->tail[0].iov_len;
- if (wr_lst) {
+ if (rctxt && rctxt->rc_write_list) {
u32 xdrpad;
- xdrpad = xdr_padsize(xdr->page_len);
+ xdrpad = xdr_pad_size(xdr->page_len);
if (taillen && xdrpad) {
tailbase += xdrpad;
taillen -= xdrpad;
@@ -621,29 +645,26 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
if (taillen)
memcpy(dst, tailbase, taillen);
- ctxt->sc_sges[0].length += xdr->len;
- ib_dma_sync_single_for_device(rdma->sc_pd->device,
- ctxt->sc_sges[0].addr,
- ctxt->sc_sges[0].length,
- DMA_TO_DEVICE);
-
+ sctxt->sc_sges[0].length += xdr->len;
+ trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
return 0;
}
-/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
+/* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
* @rdma: controlling transport
- * @ctxt: send_ctxt for the Send WR
+ * @sctxt: send_ctxt for the Send WR
+ * @rctxt: Write and Reply chunks provided by client
* @xdr: prepared xdr_buf containing RPC message
- * @wr_lst: pointer to Call header's Write list, or NULL
*
* Load the xdr_buf into the ctxt's sge array, and DMA map each
- * element as it is added.
+ * element as it is added. The Send WR's num_sge field is set.
*
* Returns zero on success, or a negative errno on failure.
*/
int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt,
- struct xdr_buf *xdr, __be32 *wr_lst)
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ struct xdr_buf *xdr)
{
unsigned int len, remaining;
unsigned long page_off;
@@ -652,11 +673,24 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
u32 xdr_pad;
int ret;
- if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
- return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
+ /* Set up the (persistently-mapped) transport header SGE. */
+ sctxt->sc_send_wr.num_sge = 1;
+ sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
- ++ctxt->sc_cur_sge_no;
- ret = svc_rdma_dma_map_buf(rdma, ctxt,
+ /* If there is a Reply chunk, nothing follows the transport
+ * header, and we're done here.
+ */
+ if (rctxt && rctxt->rc_reply_chunk)
+ return 0;
+
+ /* For pull-up, svc_rdma_send() will sync the transport header.
+ * No additional DMA mapping is necessary.
+ */
+ if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
+ return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
+
+ ++sctxt->sc_cur_sge_no;
+ ret = svc_rdma_dma_map_buf(rdma, sctxt,
xdr->head[0].iov_base,
xdr->head[0].iov_len);
if (ret < 0)
@@ -667,10 +701,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
* have added XDR padding in the tail buffer, and that
* should not be included inline.
*/
- if (wr_lst) {
+ if (rctxt && rctxt->rc_write_list) {
base = xdr->tail[0].iov_base;
len = xdr->tail[0].iov_len;
- xdr_pad = xdr_padsize(xdr->page_len);
+ xdr_pad = xdr_pad_size(xdr->page_len);
if (len && xdr_pad) {
base += xdr_pad;
@@ -686,8 +720,8 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
while (remaining) {
len = min_t(u32, PAGE_SIZE - page_off, remaining);
- ++ctxt->sc_cur_sge_no;
- ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
+ ++sctxt->sc_cur_sge_no;
+ ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++,
page_off, len);
if (ret < 0)
return ret;
@@ -700,8 +734,8 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
len = xdr->tail[0].iov_len;
tail:
if (len) {
- ++ctxt->sc_cur_sge_no;
- ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
+ ++sctxt->sc_cur_sge_no;
+ ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len);
if (ret < 0)
return ret;
}
@@ -748,18 +782,14 @@ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
*/
static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
- struct svc_rdma_recv_ctxt *rctxt,
- struct svc_rqst *rqstp,
- __be32 *wr_lst, __be32 *rp_ch)
+ const struct svc_rdma_recv_ctxt *rctxt,
+ struct svc_rqst *rqstp)
{
int ret;
- if (!rp_ch) {
- ret = svc_rdma_map_reply_msg(rdma, sctxt,
- &rqstp->rq_res, wr_lst);
- if (ret < 0)
- return ret;
- }
+ ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
+ if (ret < 0)
+ return ret;
svc_rdma_save_io_pages(rqstp, sctxt);
@@ -769,8 +799,6 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
} else {
sctxt->sc_send_wr.opcode = IB_WR_SEND;
}
- dprintk("svcrdma: posting Send WR with %u sge(s)\n",
- sctxt->sc_send_wr.num_sge);
return svc_rdma_send(rdma, &sctxt->sc_send_wr);
}
@@ -785,26 +813,31 @@ static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt,
struct svc_rqst *rqstp)
{
+ struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
+ __be32 *rdma_argp = rctxt->rc_recv_buf;
__be32 *p;
- int ret;
- p = ctxt->sc_xprt_buf;
- trace_svcrdma_err_chunk(*p);
- p += 3;
+ rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
+ xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
+ NULL);
+
+ p = xdr_reserve_space(&ctxt->sc_stream, RPCRDMA_HDRLEN_ERR);
+ if (!p)
+ return -ENOMSG;
+
+ *p++ = *rdma_argp;
+ *p++ = *(rdma_argp + 1);
+ *p++ = rdma->sc_fc_credits;
*p++ = rdma_error;
*p = err_chunk;
- svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR);
+ trace_svcrdma_err_chunk(*rdma_argp);
svc_rdma_save_io_pages(rqstp, ctxt);
+ ctxt->sc_send_wr.num_sge = 1;
ctxt->sc_send_wr.opcode = IB_WR_SEND;
- ret = svc_rdma_send(rdma, &ctxt->sc_send_wr);
- if (ret) {
- svc_rdma_send_ctxt_put(rdma, ctxt);
- return ret;
- }
-
- return 0;
+ ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len;
+ return svc_rdma_send(rdma, &ctxt->sc_send_wr);
}
/**
@@ -825,14 +858,14 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
- __be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch;
+ __be32 *rdma_argp = rctxt->rc_recv_buf;
+ __be32 *wr_lst = rctxt->rc_write_list;
+ __be32 *rp_ch = rctxt->rc_reply_chunk;
struct xdr_buf *xdr = &rqstp->rq_res;
struct svc_rdma_send_ctxt *sctxt;
+ __be32 *p;
int ret;
- rdma_argp = rctxt->rc_recv_buf;
- svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch);
-
/* Create the RDMA response header. xprt->xpt_mutex,
* acquired in svc_send(), serializes RPC replies. The
* code path below that inserts the credit grant value
@@ -843,36 +876,52 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
sctxt = svc_rdma_send_ctxt_get(rdma);
if (!sctxt)
goto err0;
- rdma_resp = sctxt->sc_xprt_buf;
- p = rdma_resp;
+ p = xdr_reserve_space(&sctxt->sc_stream,
+ rpcrdma_fixed_maxsz * sizeof(*p));
+ if (!p)
+ goto err0;
*p++ = *rdma_argp;
*p++ = *(rdma_argp + 1);
*p++ = rdma->sc_fc_credits;
- *p++ = rp_ch ? rdma_nomsg : rdma_msg;
-
- /* Start with empty chunks */
- *p++ = xdr_zero;
- *p++ = xdr_zero;
- *p = xdr_zero;
+ *p = rp_ch ? rdma_nomsg : rdma_msg;
+ if (svc_rdma_encode_read_list(sctxt) < 0)
+ goto err0;
if (wr_lst) {
/* XXX: Presume the client sent only one Write chunk */
- ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr);
+ unsigned long offset;
+ unsigned int length;
+
+ if (rctxt->rc_read_payload_length) {
+ offset = rctxt->rc_read_payload_offset;
+ length = rctxt->rc_read_payload_length;
+ } else {
+ offset = xdr->head[0].iov_len;
+ length = xdr->page_len;
+ }
+ ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
+ length);
if (ret < 0)
goto err2;
- svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
+ if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0)
+ goto err0;
+ } else {
+ if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
+ goto err0;
}
if (rp_ch) {
- ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr);
+ ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
if (ret < 0)
goto err2;
- svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
+ if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
+ goto err0;
+ } else {
+ if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
+ goto err0;
}
- svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp));
- ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp,
- wr_lst, rp_ch);
+ ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
if (ret < 0)
goto err1;
ret = 0;
@@ -900,3 +949,30 @@ out:
ret = -ENOTCONN;
goto out;
}
+
+/**
+ * svc_rdma_read_payload - special processing for a READ payload
+ * @rqstp: svc_rqst to operate on
+ * @offset: payload's byte offset in @xdr
+ * @length: size of payload, in bytes
+ *
+ * Returns zero on success.
+ *
+ * For the moment, just record the xdr_buf location of the READ
+ * payload. svc_rdma_sendto will use that location later when
+ * we actually send the payload.
+ */
+int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
+ unsigned int length)
+{
+ struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
+
+ /* XXX: Just one READ payload slot for now, since our
+ * transport implementation currently supports only one
+ * Write chunk.
+ */
+ rctxt->rc_read_payload_offset = offset;
+ rctxt->rc_read_payload_length = length;
+
+ return 0;
+}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 145a3615c319..8bb99980ae85 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -82,6 +82,7 @@ static const struct svc_xprt_ops svc_rdma_ops = {
.xpo_create = svc_rdma_create,
.xpo_recvfrom = svc_rdma_recvfrom,
.xpo_sendto = svc_rdma_sendto,
+ .xpo_read_payload = svc_rdma_read_payload,
.xpo_release_rqst = svc_rdma_release_rqst,
.xpo_detach = svc_rdma_detach,
.xpo_free = svc_rdma_free,
@@ -240,10 +241,6 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id,
static int rdma_listen_handler(struct rdma_cm_id *cma_id,
struct rdma_cm_event *event)
{
- struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr;
-
- trace_svcrdma_cm_event(event, sap);
-
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
@@ -265,12 +262,9 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
static int rdma_cma_handler(struct rdma_cm_id *cma_id,
struct rdma_cm_event *event)
{
- struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.dst_addr;
struct svcxprt_rdma *rdma = cma_id->context;
struct svc_xprt *xprt = &rdma->sc_xprt;
- trace_svcrdma_cm_event(event, sap);
-
switch (event->event) {
case RDMA_CM_EVENT_ESTABLISHED:
/* Accept complete */
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 3cfeba68ee9a..659da37020a4 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -240,9 +240,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
int rc;
- rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
+ rc = rpcrdma_xprt_connect(r_xprt);
xprt_clear_connecting(xprt);
- if (r_xprt->rx_ep.rep_connected > 0) {
+ if (r_xprt->rx_ep && r_xprt->rx_ep->re_connect_status > 0) {
+ xprt->connect_cookie++;
xprt->stat.connect_count++;
xprt->stat.connect_time += (long)jiffies -
xprt->stat.connect_start;
@@ -265,7 +266,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
trace_xprtrdma_op_inject_dsc(r_xprt);
- rdma_disconnect(r_xprt->rx_ia.ri_id);
+ rdma_disconnect(r_xprt->rx_ep->re_id);
}
/**
@@ -284,9 +285,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
- rpcrdma_ep_destroy(r_xprt);
+ rpcrdma_xprt_disconnect(r_xprt);
rpcrdma_buffer_destroy(&r_xprt->rx_buf);
- rpcrdma_ia_close(&r_xprt->rx_ia);
xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
@@ -316,10 +316,15 @@ xprt_setup_rdma(struct xprt_create *args)
if (args->addrlen > sizeof(xprt->addr))
return ERR_PTR(-EBADF);
+ if (!try_module_get(THIS_MODULE))
+ return ERR_PTR(-EIO);
+
xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0,
xprt_rdma_slot_table_entries);
- if (!xprt)
+ if (!xprt) {
+ module_put(THIS_MODULE);
return ERR_PTR(-ENOMEM);
+ }
xprt->timeout = &xprt_rdma_default_timeout;
xprt->connect_timeout = xprt->timeout->to_initval;
@@ -347,23 +352,17 @@ xprt_setup_rdma(struct xprt_create *args)
xprt_rdma_format_addresses(xprt, sap);
new_xprt = rpcx_to_rdmax(xprt);
- rc = rpcrdma_ia_open(new_xprt);
- if (rc)
- goto out1;
-
- rc = rpcrdma_ep_create(new_xprt);
- if (rc)
- goto out2;
-
rc = rpcrdma_buffer_create(new_xprt);
- if (rc)
- goto out3;
-
- if (!try_module_get(THIS_MODULE))
- goto out4;
+ if (rc) {
+ xprt_rdma_free_addresses(xprt);
+ xprt_free(xprt);
+ module_put(THIS_MODULE);
+ return ERR_PTR(rc);
+ }
INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
xprt_rdma_connect_worker);
+
xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
dprintk("RPC: %s: %s:%s\n", __func__,
@@ -371,19 +370,6 @@ xprt_setup_rdma(struct xprt_create *args)
xprt->address_strings[RPC_DISPLAY_PORT]);
trace_xprtrdma_create(new_xprt);
return xprt;
-
-out4:
- rpcrdma_buffer_destroy(&new_xprt->rx_buf);
- rc = -ENODEV;
-out3:
- rpcrdma_ep_destroy(new_xprt);
-out2:
- rpcrdma_ia_close(&new_xprt->rx_ia);
-out1:
- trace_xprtrdma_op_destroy(new_xprt);
- xprt_rdma_free_addresses(xprt);
- xprt_free(xprt);
- return ERR_PTR(rc);
}
/**
@@ -398,26 +384,11 @@ out1:
void xprt_rdma_close(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-
- might_sleep();
trace_xprtrdma_op_close(r_xprt);
- /* Prevent marshaling and sending of new requests */
- xprt_clear_connected(xprt);
-
- if (test_and_clear_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags)) {
- rpcrdma_ia_remove(ia);
- goto out;
- }
-
- if (ep->rep_connected == -ENODEV)
- return;
- rpcrdma_ep_disconnect(ep, ia);
+ rpcrdma_xprt_disconnect(r_xprt);
-out:
xprt->reestablish_timeout = 0;
++xprt->connect_cookie;
xprt_disconnect_done(xprt);
@@ -517,10 +488,11 @@ static void
xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
unsigned long delay;
delay = 0;
- if (r_xprt->rx_ep.rep_connected != 0) {
+ if (ep && ep->re_connect_status != 0) {
delay = xprt_reconnect_delay(xprt);
xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
}
@@ -694,7 +666,7 @@ xprt_rdma_send_request(struct rpc_rqst *rqst)
goto drop_connection;
rqst->rq_xtime = ktime_get();
- if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
+ if (rpcrdma_post_sends(r_xprt, req))
goto drop_connection;
rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 353f61ac8d51..cdd84c09df10 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -84,6 +84,7 @@ static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
+static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep);
static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
gfp_t flags);
@@ -96,17 +97,17 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
*/
static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rdma_cm_id *id = r_xprt->rx_ep->re_id;
/* Flush Receives, then wait for deferred Reply work
* to complete.
*/
- ib_drain_rq(ia->ri_id->qp);
+ ib_drain_rq(id->qp);
/* Deferred Reply processing might have scheduled
* local invalidations.
*/
- ib_drain_sq(ia->ri_id->qp);
+ ib_drain_sq(id->qp);
}
/**
@@ -115,26 +116,43 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
* @context: ep that owns QP where event occurred
*
* Called from the RDMA provider (device driver) possibly in an interrupt
- * context.
+ * context. The QP is always destroyed before the ID, so the ID will be
+ * reliably available when this handler is invoked.
*/
-static void
-rpcrdma_qp_event_handler(struct ib_event *event, void *context)
+static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
{
struct rpcrdma_ep *ep = context;
- struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
- rx_ep);
- trace_xprtrdma_qp_event(r_xprt, event);
+ trace_xprtrdma_qp_event(ep, event);
+}
+
+/**
+ * rpcrdma_flush_disconnect - Disconnect on flushed completion
+ * @cq: completion queue
+ * @wc: work completion entry
+ *
+ * Must be called in process context.
+ */
+void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rpcrdma_xprt *r_xprt = cq->cq_context;
+ struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+
+ if (wc->status != IB_WC_SUCCESS &&
+ r_xprt->rx_ep->re_connect_status == 1) {
+ r_xprt->rx_ep->re_connect_status = -ECONNABORTED;
+ trace_xprtrdma_flush_dct(r_xprt, wc->status);
+ xprt_force_disconnect(xprt);
+ }
}
/**
* rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
* @cq: completion queue
- * @wc: completed WR
+ * @wc: WCE for a completed Send WR
*
*/
-static void
-rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
+static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_sendctx *sc =
@@ -143,25 +161,25 @@ rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_send(sc, wc);
rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc);
+ rpcrdma_flush_disconnect(cq, wc);
}
/**
* rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
- * @cq: completion queue (ignored)
- * @wc: completed WR
+ * @cq: completion queue
+ * @wc: WCE for a completed Receive WR
*
*/
-static void
-rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
+static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
rr_cqe);
- struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
+ struct rpcrdma_xprt *r_xprt = cq->cq_context;
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_receive(wc);
- --r_xprt->rx_ep.rep_receive_count;
+ --r_xprt->rx_ep->re_receive_count;
if (wc->status != IB_WC_SUCCESS)
goto out_flushed;
@@ -178,35 +196,35 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
return;
out_flushed:
+ rpcrdma_flush_disconnect(cq, wc);
rpcrdma_rep_destroy(rep);
}
-static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt,
+static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
struct rdma_conn_param *param)
{
const struct rpcrdma_connect_private *pmsg = param->private_data;
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
unsigned int rsize, wsize;
/* Default settings for RPC-over-RDMA Version One */
- r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
+ ep->re_implicit_roundup = xprt_rdma_pad_optimize;
rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
if (pmsg &&
pmsg->cp_magic == rpcrdma_cmp_magic &&
pmsg->cp_version == RPCRDMA_CMP_VERSION) {
- r_xprt->rx_ia.ri_implicit_roundup = true;
+ ep->re_implicit_roundup = true;
rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
}
- if (rsize < ep->rep_inline_recv)
- ep->rep_inline_recv = rsize;
- if (wsize < ep->rep_inline_send)
- ep->rep_inline_send = wsize;
+ if (rsize < ep->re_inline_recv)
+ ep->re_inline_recv = rsize;
+ if (wsize < ep->re_inline_send)
+ ep->re_inline_send = wsize;
- rpcrdma_set_max_header_sizes(r_xprt);
+ rpcrdma_set_max_header_sizes(ep);
}
/**
@@ -220,116 +238,103 @@ static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt,
static int
rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
- struct rpcrdma_xprt *r_xprt = id->context;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
- struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+ struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
+ struct rpcrdma_ep *ep = id->context;
+ struct rpc_xprt *xprt = ep->re_xprt;
might_sleep();
- trace_xprtrdma_cm_event(r_xprt, event);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- ia->ri_async_rc = 0;
- complete(&ia->ri_done);
+ ep->re_async_rc = 0;
+ complete(&ep->re_done);
return 0;
case RDMA_CM_EVENT_ADDR_ERROR:
- ia->ri_async_rc = -EPROTO;
- complete(&ia->ri_done);
+ ep->re_async_rc = -EPROTO;
+ complete(&ep->re_done);
return 0;
case RDMA_CM_EVENT_ROUTE_ERROR:
- ia->ri_async_rc = -ENETUNREACH;
- complete(&ia->ri_done);
+ ep->re_async_rc = -ENETUNREACH;
+ complete(&ep->re_done);
return 0;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- pr_info("rpcrdma: removing device %s for %s:%s\n",
- ia->ri_id->device->name,
- rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
-#endif
- init_completion(&ia->ri_remove_done);
- set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
- ep->rep_connected = -ENODEV;
+ pr_info("rpcrdma: removing device %s for %pISpc\n",
+ ep->re_id->device->name, sap);
+ /* fall through */
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ ep->re_connect_status = -ENODEV;
xprt_force_disconnect(xprt);
- wait_for_completion(&ia->ri_remove_done);
-
- ia->ri_id = NULL;
- /* Return 1 to ensure the core destroys the id. */
- return 1;
+ goto disconnected;
case RDMA_CM_EVENT_ESTABLISHED:
- ++xprt->connect_cookie;
- ep->rep_connected = 1;
- rpcrdma_update_cm_private(r_xprt, &event->param.conn);
- trace_xprtrdma_inline_thresh(r_xprt);
- wake_up_all(&ep->rep_connect_wait);
+ kref_get(&ep->re_kref);
+ ep->re_connect_status = 1;
+ rpcrdma_update_cm_private(ep, &event->param.conn);
+ trace_xprtrdma_inline_thresh(ep);
+ wake_up_all(&ep->re_connect_wait);
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
- ep->rep_connected = -ENOTCONN;
+ ep->re_connect_status = -ENOTCONN;
goto disconnected;
case RDMA_CM_EVENT_UNREACHABLE:
- ep->rep_connected = -ENETUNREACH;
+ ep->re_connect_status = -ENETUNREACH;
goto disconnected;
case RDMA_CM_EVENT_REJECTED:
- dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
- rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
- rdma_reject_msg(id, event->status));
- ep->rep_connected = -ECONNREFUSED;
+ dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
+ sap, rdma_reject_msg(id, event->status));
+ ep->re_connect_status = -ECONNREFUSED;
if (event->status == IB_CM_REJ_STALE_CONN)
- ep->rep_connected = -EAGAIN;
+ ep->re_connect_status = -EAGAIN;
goto disconnected;
case RDMA_CM_EVENT_DISCONNECTED:
- ep->rep_connected = -ECONNABORTED;
+ ep->re_connect_status = -ECONNABORTED;
disconnected:
- xprt_force_disconnect(xprt);
- wake_up_all(&ep->rep_connect_wait);
- break;
+ return rpcrdma_ep_destroy(ep);
default:
break;
}
- dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__,
- rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
- ia->ri_id->device->name, rdma_event_msg(event->event));
+ dprintk("RPC: %s: %pISpc on %s/frwr: %s\n", __func__, sap,
+ ep->re_id->device->name, rdma_event_msg(event->event));
return 0;
}
-static struct rdma_cm_id *
-rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
+static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_ep *ep)
{
unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
+ struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct rdma_cm_id *id;
int rc;
- init_completion(&ia->ri_done);
+ init_completion(&ep->re_done);
- id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
- xprt, RDMA_PS_TCP, IB_QPT_RC);
+ id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep,
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id))
return id;
- ia->ri_async_rc = -ETIMEDOUT;
- rc = rdma_resolve_addr(id, NULL,
- (struct sockaddr *)&xprt->rx_xprt.addr,
+ ep->re_async_rc = -ETIMEDOUT;
+ rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr,
RDMA_RESOLVE_TIMEOUT);
if (rc)
goto out;
- rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
+ rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
if (rc < 0)
goto out;
- rc = ia->ri_async_rc;
+ rc = ep->re_async_rc;
if (rc)
goto out;
- ia->ri_async_rc = -ETIMEDOUT;
+ ep->re_async_rc = -ETIMEDOUT;
rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
if (rc)
goto out;
- rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
+ rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
if (rc < 0)
goto out;
- rc = ia->ri_async_rc;
+ rc = ep->re_async_rc;
if (rc)
goto out;
@@ -340,356 +345,181 @@ out:
return ERR_PTR(rc);
}
-/*
- * Exported functions.
- */
-
-/**
- * rpcrdma_ia_open - Open and initialize an Interface Adapter.
- * @xprt: transport with IA to (re)initialize
- *
- * Returns 0 on success, negative errno if an appropriate
- * Interface Adapter could not be found and opened.
- */
-int
-rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
+static void rpcrdma_ep_put(struct kref *kref)
{
- struct rpcrdma_ia *ia = &xprt->rx_ia;
- int rc;
+ struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
- ia->ri_id = rpcrdma_create_id(xprt, ia);
- if (IS_ERR(ia->ri_id)) {
- rc = PTR_ERR(ia->ri_id);
- goto out_err;
+ if (ep->re_id->qp) {
+ rdma_destroy_qp(ep->re_id);
+ ep->re_id->qp = NULL;
}
- ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0);
- if (IS_ERR(ia->ri_pd)) {
- rc = PTR_ERR(ia->ri_pd);
- pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
- goto out_err;
- }
+ if (ep->re_attr.recv_cq)
+ ib_free_cq(ep->re_attr.recv_cq);
+ ep->re_attr.recv_cq = NULL;
+ if (ep->re_attr.send_cq)
+ ib_free_cq(ep->re_attr.send_cq);
+ ep->re_attr.send_cq = NULL;
- return 0;
+ if (ep->re_pd)
+ ib_dealloc_pd(ep->re_pd);
+ ep->re_pd = NULL;
-out_err:
- rpcrdma_ia_close(ia);
- return rc;
+ kfree(ep);
+ module_put(THIS_MODULE);
}
-/**
- * rpcrdma_ia_remove - Handle device driver unload
- * @ia: interface adapter being removed
- *
- * Divest transport H/W resources associated with this adapter,
- * but allow it to be restored later.
- *
- * Caller must hold the transport send lock.
+/* Returns:
+ * %0 if @ep still has a positive kref count, or
+ * %1 if @ep was destroyed successfully.
*/
-void
-rpcrdma_ia_remove(struct rpcrdma_ia *ia)
+static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep)
{
- struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
- rx_ia);
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
-
- /* This is similar to rpcrdma_ep_destroy, but:
- * - Don't cancel the connect worker.
- * - Don't call rpcrdma_ep_disconnect, which waits
- * for another conn upcall, which will deadlock.
- * - rdma_disconnect is unneeded, the underlying
- * connection is already gone.
- */
- if (ia->ri_id->qp) {
- rpcrdma_xprt_drain(r_xprt);
- rdma_destroy_qp(ia->ri_id);
- ia->ri_id->qp = NULL;
- }
- ib_free_cq(ep->rep_attr.recv_cq);
- ep->rep_attr.recv_cq = NULL;
- ib_free_cq(ep->rep_attr.send_cq);
- ep->rep_attr.send_cq = NULL;
-
- /* The ULP is responsible for ensuring all DMA
- * mappings and MRs are gone.
- */
- rpcrdma_reps_unmap(r_xprt);
- rpcrdma_reqs_reset(r_xprt);
- rpcrdma_mrs_destroy(r_xprt);
- rpcrdma_sendctxs_destroy(r_xprt);
- ib_dealloc_pd(ia->ri_pd);
- ia->ri_pd = NULL;
-
- /* Allow waiters to continue */
- complete(&ia->ri_remove_done);
-
- trace_xprtrdma_remove(r_xprt);
-}
-
-/**
- * rpcrdma_ia_close - Clean up/close an IA.
- * @ia: interface adapter to close
- *
- */
-void
-rpcrdma_ia_close(struct rpcrdma_ia *ia)
-{
- if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
- if (ia->ri_id->qp)
- rdma_destroy_qp(ia->ri_id);
- rdma_destroy_id(ia->ri_id);
- }
- ia->ri_id = NULL;
-
- /* If the pd is still busy, xprtrdma missed freeing a resource */
- if (ia->ri_pd && !IS_ERR(ia->ri_pd))
- ib_dealloc_pd(ia->ri_pd);
- ia->ri_pd = NULL;
+ return kref_put(&ep->re_kref, rpcrdma_ep_put);
}
-/**
- * rpcrdma_ep_create - Create unconnected endpoint
- * @r_xprt: transport to instantiate
- *
- * Returns zero on success, or a negative errno.
- */
-int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
+static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
{
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
- struct ib_cq *sendcq, *recvcq;
+ struct rpcrdma_connect_private *pmsg;
+ struct ib_device *device;
+ struct rdma_cm_id *id;
+ struct rpcrdma_ep *ep;
int rc;
- ep->rep_max_requests = r_xprt->rx_xprt.max_reqs;
- ep->rep_inline_send = xprt_rdma_max_inline_write;
- ep->rep_inline_recv = xprt_rdma_max_inline_read;
+ ep = kzalloc(sizeof(*ep), GFP_NOFS);
+ if (!ep)
+ return -EAGAIN;
+ ep->re_xprt = &r_xprt->rx_xprt;
+ kref_init(&ep->re_kref);
- rc = frwr_query_device(r_xprt, ia->ri_id->device);
+ id = rpcrdma_create_id(r_xprt, ep);
+ if (IS_ERR(id)) {
+ rc = PTR_ERR(id);
+ goto out_free;
+ }
+ __module_get(THIS_MODULE);
+ device = id->device;
+ ep->re_id = id;
+
+ ep->re_max_requests = r_xprt->rx_xprt.max_reqs;
+ ep->re_inline_send = xprt_rdma_max_inline_write;
+ ep->re_inline_recv = xprt_rdma_max_inline_read;
+ rc = frwr_query_device(ep, device);
if (rc)
- return rc;
- r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->rep_max_requests);
+ goto out_destroy;
+
+ r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests);
- ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
- ep->rep_attr.qp_context = ep;
- ep->rep_attr.srq = NULL;
- ep->rep_attr.cap.max_inline_data = 0;
- ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
- ep->rep_attr.qp_type = IB_QPT_RC;
- ep->rep_attr.port_num = ~0;
+ ep->re_attr.event_handler = rpcrdma_qp_event_handler;
+ ep->re_attr.qp_context = ep;
+ ep->re_attr.srq = NULL;
+ ep->re_attr.cap.max_inline_data = 0;
+ ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ ep->re_attr.qp_type = IB_QPT_RC;
+ ep->re_attr.port_num = ~0;
dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
"iovs: send %d recv %d\n",
__func__,
- ep->rep_attr.cap.max_send_wr,
- ep->rep_attr.cap.max_recv_wr,
- ep->rep_attr.cap.max_send_sge,
- ep->rep_attr.cap.max_recv_sge);
-
- ep->rep_send_batch = ep->rep_max_requests >> 3;
- ep->rep_send_count = ep->rep_send_batch;
- init_waitqueue_head(&ep->rep_connect_wait);
- ep->rep_receive_count = 0;
-
- sendcq = ib_alloc_cq_any(ia->ri_id->device, r_xprt,
- ep->rep_attr.cap.max_send_wr + 1,
- IB_POLL_WORKQUEUE);
- if (IS_ERR(sendcq)) {
- rc = PTR_ERR(sendcq);
- goto out1;
+ ep->re_attr.cap.max_send_wr,
+ ep->re_attr.cap.max_recv_wr,
+ ep->re_attr.cap.max_send_sge,
+ ep->re_attr.cap.max_recv_sge);
+
+ ep->re_send_batch = ep->re_max_requests >> 3;
+ ep->re_send_count = ep->re_send_batch;
+ init_waitqueue_head(&ep->re_connect_wait);
+
+ ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt,
+ ep->re_attr.cap.max_send_wr,
+ IB_POLL_WORKQUEUE);
+ if (IS_ERR(ep->re_attr.send_cq)) {
+ rc = PTR_ERR(ep->re_attr.send_cq);
+ goto out_destroy;
}
- recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
- ep->rep_attr.cap.max_recv_wr + 1,
- IB_POLL_WORKQUEUE);
- if (IS_ERR(recvcq)) {
- rc = PTR_ERR(recvcq);
- goto out2;
+ ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt,
+ ep->re_attr.cap.max_recv_wr,
+ IB_POLL_WORKQUEUE);
+ if (IS_ERR(ep->re_attr.recv_cq)) {
+ rc = PTR_ERR(ep->re_attr.recv_cq);
+ goto out_destroy;
}
-
- ep->rep_attr.send_cq = sendcq;
- ep->rep_attr.recv_cq = recvcq;
+ ep->re_receive_count = 0;
/* Initialize cma parameters */
- memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
+ memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma));
/* Prepare RDMA-CM private message */
+ pmsg = &ep->re_cm_private;
pmsg->cp_magic = rpcrdma_cmp_magic;
pmsg->cp_version = RPCRDMA_CMP_VERSION;
pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
- pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send);
- pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv);
- ep->rep_remote_cma.private_data = pmsg;
- ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
+ pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send);
+ pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv);
+ ep->re_remote_cma.private_data = pmsg;
+ ep->re_remote_cma.private_data_len = sizeof(*pmsg);
/* Client offers RDMA Read but does not initiate */
- ep->rep_remote_cma.initiator_depth = 0;
- ep->rep_remote_cma.responder_resources =
- min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom);
+ ep->re_remote_cma.initiator_depth = 0;
+ ep->re_remote_cma.responder_resources =
+ min_t(int, U8_MAX, device->attrs.max_qp_rd_atom);
/* Limit transport retries so client can detect server
* GID changes quickly. RPC layer handles re-establishing
* transport connection and retransmission.
*/
- ep->rep_remote_cma.retry_count = 6;
+ ep->re_remote_cma.retry_count = 6;
/* RPC-over-RDMA handles its own flow control. In addition,
* make all RNR NAKs visible so we know that RPC-over-RDMA
* flow control is working correctly (no NAKs should be seen).
*/
- ep->rep_remote_cma.flow_control = 0;
- ep->rep_remote_cma.rnr_retry_count = 0;
+ ep->re_remote_cma.flow_control = 0;
+ ep->re_remote_cma.rnr_retry_count = 0;
- return 0;
-
-out2:
- ib_free_cq(sendcq);
-out1:
- return rc;
-}
-
-/**
- * rpcrdma_ep_destroy - Disconnect and destroy endpoint.
- * @r_xprt: transport instance to shut down
- *
- */
-void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt)
-{
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-
- if (ia->ri_id && ia->ri_id->qp) {
- rpcrdma_ep_disconnect(ep, ia);
- rdma_destroy_qp(ia->ri_id);
- ia->ri_id->qp = NULL;
- }
-
- if (ep->rep_attr.recv_cq)
- ib_free_cq(ep->rep_attr.recv_cq);
- if (ep->rep_attr.send_cq)
- ib_free_cq(ep->rep_attr.send_cq);
-}
-
-/* Re-establish a connection after a device removal event.
- * Unlike a normal reconnection, a fresh PD and a new set
- * of MRs and buffers is needed.
- */
-static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
- int rc, err;
-
- trace_xprtrdma_reinsert(r_xprt);
-
- rc = -EHOSTUNREACH;
- if (rpcrdma_ia_open(r_xprt))
- goto out1;
-
- rc = -ENOMEM;
- err = rpcrdma_ep_create(r_xprt);
- if (err) {
- pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
- goto out2;
- }
- memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr));
-
- rc = -ENETUNREACH;
- err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
- if (err) {
- pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
- goto out3;
- }
- return 0;
-
-out3:
- rpcrdma_ep_destroy(r_xprt);
-out2:
- rpcrdma_ia_close(ia);
-out1:
- return rc;
-}
-
-static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rdma_cm_id *id, *old;
- int err, rc;
-
- rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia);
-
- rc = -EHOSTUNREACH;
- id = rpcrdma_create_id(r_xprt, ia);
- if (IS_ERR(id))
- goto out;
-
- /* As long as the new ID points to the same device as the
- * old ID, we can reuse the transport's existing PD and all
- * previously allocated MRs. Also, the same device means
- * the transport's previous DMA mappings are still valid.
- *
- * This is a sanity check only. There should be no way these
- * point to two different devices here.
- */
- old = id;
- rc = -ENETUNREACH;
- if (ia->ri_id->device != id->device) {
- pr_err("rpcrdma: can't reconnect on different device!\n");
+ ep->re_pd = ib_alloc_pd(device, 0);
+ if (IS_ERR(ep->re_pd)) {
+ rc = PTR_ERR(ep->re_pd);
goto out_destroy;
}
- err = rdma_create_qp(id, ia->ri_pd, qp_init_attr);
- if (err)
+ rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr);
+ if (rc)
goto out_destroy;
- /* Atomically replace the transport's ID and QP. */
- rc = 0;
- old = ia->ri_id;
- ia->ri_id = id;
- rdma_destroy_qp(old);
+ r_xprt->rx_ep = ep;
+ return 0;
out_destroy:
- rdma_destroy_id(old);
-out:
+ rpcrdma_ep_destroy(ep);
+ rdma_destroy_id(id);
+out_free:
+ kfree(ep);
+ r_xprt->rx_ep = NULL;
return rc;
}
-/*
- * Connect unconnected endpoint.
+/**
+ * rpcrdma_xprt_connect - Connect an unconnected transport
+ * @r_xprt: controlling transport instance
+ *
+ * Returns 0 on success or a negative errno.
*/
-int
-rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
+int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
{
- struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
- rx_ia);
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
- struct ib_qp_init_attr qp_init_attr;
+ struct rpcrdma_ep *ep;
int rc;
retry:
- memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr));
- switch (ep->rep_connected) {
- case 0:
- rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr);
- if (rc) {
- rc = -ENETUNREACH;
- goto out_noupdate;
- }
- break;
- case -ENODEV:
- rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr);
- if (rc)
- goto out_noupdate;
- break;
- default:
- rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr);
- if (rc)
- goto out;
- }
+ rpcrdma_xprt_disconnect(r_xprt);
+ rc = rpcrdma_ep_create(r_xprt);
+ if (rc)
+ return rc;
+ ep = r_xprt->rx_ep;
- ep->rep_connected = 0;
+ ep->re_connect_status = 0;
xprt_clear_connected(xprt);
rpcrdma_reset_cwnd(r_xprt);
@@ -699,64 +529,68 @@ retry:
if (rc)
goto out;
- rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
+ rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
if (rc)
goto out;
if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
- wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
- if (ep->rep_connected <= 0) {
- if (ep->rep_connected == -EAGAIN)
+ wait_event_interruptible(ep->re_connect_wait,
+ ep->re_connect_status != 0);
+ if (ep->re_connect_status <= 0) {
+ if (ep->re_connect_status == -EAGAIN)
goto retry;
- rc = ep->rep_connected;
+ rc = ep->re_connect_status;
goto out;
}
rc = rpcrdma_reqs_setup(r_xprt);
if (rc) {
- rpcrdma_ep_disconnect(ep, ia);
+ rpcrdma_xprt_disconnect(r_xprt);
goto out;
}
rpcrdma_mrs_create(r_xprt);
out:
if (rc)
- ep->rep_connected = rc;
-
-out_noupdate:
+ ep->re_connect_status = rc;
trace_xprtrdma_connect(r_xprt, rc);
return rc;
}
/**
- * rpcrdma_ep_disconnect - Disconnect underlying transport
- * @ep: endpoint to disconnect
- * @ia: associated interface adapter
+ * rpcrdma_xprt_disconnect - Disconnect underlying transport
+ * @r_xprt: controlling transport instance
*
* Caller serializes. Either the transport send lock is held,
* or we're being called to destroy the transport.
+ *
+ * On return, @r_xprt is completely divested of all hardware
+ * resources and prepared for the next ->connect operation.
*/
-void
-rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
+void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
{
- struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
- rx_ep);
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ struct rdma_cm_id *id;
int rc;
- /* returns without wait if ID is not connected */
- rc = rdma_disconnect(ia->ri_id);
- if (!rc)
- wait_event_interruptible(ep->rep_connect_wait,
- ep->rep_connected != 1);
- else
- ep->rep_connected = rc;
+ if (!ep)
+ return;
+
+ id = ep->re_id;
+ rc = rdma_disconnect(id);
trace_xprtrdma_disconnect(r_xprt, rc);
rpcrdma_xprt_drain(r_xprt);
+ rpcrdma_reps_unmap(r_xprt);
rpcrdma_reqs_reset(r_xprt);
rpcrdma_mrs_destroy(r_xprt);
rpcrdma_sendctxs_destroy(r_xprt);
+
+ if (rpcrdma_ep_destroy(ep))
+ rdma_destroy_id(id);
+
+ r_xprt->rx_ep = NULL;
}
/* Fixed-size circular FIFO queue. This implementation is wait-free and
@@ -793,7 +627,7 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
{
struct rpcrdma_sendctx *sc;
- sc = kzalloc(struct_size(sc, sc_sges, ep->rep_attr.cap.max_send_sge),
+ sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
GFP_KERNEL);
if (!sc)
return NULL;
@@ -813,14 +647,14 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
* the ->send_request call to fail temporarily before too many
* Sends are posted.
*/
- i = r_xprt->rx_ep.rep_max_requests + RPCRDMA_MAX_BC_REQUESTS;
+ i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
if (!buf->rb_sc_ctxs)
return -ENOMEM;
buf->rb_sc_last = i - 1;
for (i = 0; i <= buf->rb_sc_last; i++) {
- sc = rpcrdma_sendctx_create(&r_xprt->rx_ep);
+ sc = rpcrdma_sendctx_create(r_xprt->rx_ep);
if (!sc)
return -ENOMEM;
@@ -924,10 +758,10 @@ static void
rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
unsigned int count;
- for (count = 0; count < ia->ri_max_rdma_segs; count++) {
+ for (count = 0; count < ep->re_max_rdma_segs; count++) {
struct rpcrdma_mr *mr;
int rc;
@@ -935,14 +769,12 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
if (!mr)
break;
- rc = frwr_init_mr(ia, mr);
+ rc = frwr_mr_init(r_xprt, mr);
if (rc) {
kfree(mr);
break;
}
- mr->mr_xprt = r_xprt;
-
spin_lock(&buf->rb_lock);
rpcrdma_mr_push(mr, &buf->rb_mrs);
list_add(&mr->mr_all, &buf->rb_all_mrs);
@@ -973,12 +805,12 @@ rpcrdma_mr_refresh_worker(struct work_struct *work)
void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
- /* If there is no underlying device, it's no use to
- * wake the refresh worker.
+ /* If there is no underlying connection, it's no use
+ * to wake the refresh worker.
*/
- if (ep->rep_connected != -ENODEV) {
+ if (ep->re_connect_status == 1) {
/* The work is scheduled on a WQ_MEM_RECLAIM
* workqueue in order to prevent MR allocation
* from recursing into NFS during direct reclaim.
@@ -1042,7 +874,7 @@ int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
/* Compute maximum header buffer size in bytes */
maxhdrsize = rpcrdma_fixed_maxsz + 3 +
- r_xprt->rx_ia.ri_max_rdma_segs * rpcrdma_readchunk_maxsz;
+ r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
maxhdrsize *= sizeof(__be32);
rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
DMA_TO_DEVICE, GFP_KERNEL);
@@ -1120,7 +952,7 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
if (rep == NULL)
goto out;
- rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv,
+ rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
DMA_FROM_DEVICE, GFP_KERNEL);
if (!rep->rr_rdmabuf)
goto out_free;
@@ -1345,7 +1177,7 @@ void rpcrdma_mr_put(struct rpcrdma_mr *mr)
if (mr->mr_dir != DMA_NONE) {
trace_xprtrdma_mr_unmap(mr);
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
+ ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
mr->mr_dir = DMA_NONE;
}
@@ -1463,7 +1295,7 @@ bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_regbuf *rb)
{
- struct ib_device *device = r_xprt->rx_ia.ri_id->device;
+ struct ib_device *device = r_xprt->rx_ep->re_id->device;
if (rb->rg_direction == DMA_NONE)
return false;
@@ -1476,7 +1308,7 @@ bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
}
rb->rg_device = device;
- rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey;
+ rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey;
return true;
}
@@ -1502,31 +1334,28 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
}
/**
- * rpcrdma_ep_post - Post WRs to a transport's Send Queue
- * @ia: transport's device information
- * @ep: transport's RDMA endpoint information
+ * rpcrdma_post_sends - Post WRs to a transport's Send Queue
+ * @r_xprt: controlling transport instance
* @req: rpcrdma_req containing the Send WR to post
*
* Returns 0 if the post was successful, otherwise -ENOTCONN
* is returned.
*/
-int
-rpcrdma_ep_post(struct rpcrdma_ia *ia,
- struct rpcrdma_ep *ep,
- struct rpcrdma_req *req)
+int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
struct ib_send_wr *send_wr = &req->rl_wr;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
int rc;
- if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) {
+ if (!ep->re_send_count || kref_read(&req->rl_kref) > 1) {
send_wr->send_flags |= IB_SEND_SIGNALED;
- ep->rep_send_count = ep->rep_send_batch;
+ ep->re_send_count = ep->re_send_batch;
} else {
send_wr->send_flags &= ~IB_SEND_SIGNALED;
- --ep->rep_send_count;
+ --ep->re_send_count;
}
- rc = frwr_send(ia, req);
+ rc = frwr_send(r_xprt, req);
trace_xprtrdma_post_send(req, rc);
if (rc)
return -ENOTCONN;
@@ -1542,7 +1371,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
struct ib_recv_wr *wr, *bad_wr;
struct rpcrdma_rep *rep;
int needed, count, rc;
@@ -1551,9 +1380,9 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
count = 0;
needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
- if (likely(ep->rep_receive_count > needed))
+ if (likely(ep->re_receive_count > needed))
goto out;
- needed -= ep->rep_receive_count;
+ needed -= ep->re_receive_count;
if (!temp)
needed += RPCRDMA_MAX_RECV_BATCH;
@@ -1579,7 +1408,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
if (!wr)
goto out;
- rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
+ rc = ib_post_recv(ep->re_id->qp, wr,
(const struct ib_recv_wr **)&bad_wr);
out:
trace_xprtrdma_post_recvs(r_xprt, count, rc);
@@ -1593,6 +1422,6 @@ out:
--count;
}
}
- ep->rep_receive_count += count;
+ ep->re_receive_count += count;
return;
}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 37d5080c250b..0a16fdb09b2c 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -65,43 +65,33 @@
#define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
/*
- * Interface Adapter -- one per transport instance
+ * RDMA Endpoint -- connection endpoint details
*/
-struct rpcrdma_ia {
- struct rdma_cm_id *ri_id;
- struct ib_pd *ri_pd;
- int ri_async_rc;
- unsigned int ri_max_rdma_segs;
- unsigned int ri_max_frwr_depth;
- bool ri_implicit_roundup;
- enum ib_mr_type ri_mrtype;
- unsigned long ri_flags;
- struct completion ri_done;
- struct completion ri_remove_done;
-};
-
-enum {
- RPCRDMA_IAF_REMOVING = 0,
-};
-
-/*
- * RDMA Endpoint -- one per transport instance
- */
-
struct rpcrdma_ep {
- unsigned int rep_send_count;
- unsigned int rep_send_batch;
- unsigned int rep_max_inline_send;
- unsigned int rep_max_inline_recv;
- int rep_connected;
- struct ib_qp_init_attr rep_attr;
- wait_queue_head_t rep_connect_wait;
- struct rpcrdma_connect_private rep_cm_private;
- struct rdma_conn_param rep_remote_cma;
- unsigned int rep_max_requests; /* depends on device */
- unsigned int rep_inline_send; /* negotiated */
- unsigned int rep_inline_recv; /* negotiated */
- int rep_receive_count;
+ struct kref re_kref;
+ struct rdma_cm_id *re_id;
+ struct ib_pd *re_pd;
+ unsigned int re_max_rdma_segs;
+ unsigned int re_max_fr_depth;
+ bool re_implicit_roundup;
+ enum ib_mr_type re_mrtype;
+ struct completion re_done;
+ unsigned int re_send_count;
+ unsigned int re_send_batch;
+ unsigned int re_max_inline_send;
+ unsigned int re_max_inline_recv;
+ int re_async_rc;
+ int re_connect_status;
+ struct ib_qp_init_attr re_attr;
+ wait_queue_head_t re_connect_wait;
+ struct rpc_xprt *re_xprt;
+ struct rpcrdma_connect_private
+ re_cm_private;
+ struct rdma_conn_param re_remote_cma;
+ int re_receive_count;
+ unsigned int re_max_requests; /* depends on device */
+ unsigned int re_inline_send; /* negotiated */
+ unsigned int re_inline_recv; /* negotiated */
};
/* Pre-allocate extra Work Requests for handling backward receives
@@ -422,8 +412,7 @@ struct rpcrdma_stats {
*/
struct rpcrdma_xprt {
struct rpc_xprt rx_xprt;
- struct rpcrdma_ia rx_ia;
- struct rpcrdma_ep rx_ep;
+ struct rpcrdma_ep *rx_ep;
struct rpcrdma_buffer rx_buf;
struct delayed_work rx_connect_worker;
struct rpc_timeout rx_timeout;
@@ -455,22 +444,13 @@ extern int xprt_rdma_pad_optimize;
extern unsigned int xprt_rdma_memreg_strategy;
/*
- * Interface Adapter calls - xprtrdma/verbs.c
- */
-int rpcrdma_ia_open(struct rpcrdma_xprt *xprt);
-void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
-void rpcrdma_ia_close(struct rpcrdma_ia *);
-
-/*
* Endpoint calls - xprtrdma/verbs.c
*/
-int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt);
-void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt);
-int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
-void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
+void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc);
+int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
+void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
-int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
- struct rpcrdma_req *);
+int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
/*
@@ -536,15 +516,14 @@ rpcrdma_data_dir(bool writing)
/* Memory registration calls xprtrdma/frwr_ops.c
*/
void frwr_reset(struct rpcrdma_req *req);
-int frwr_query_device(struct rpcrdma_xprt *r_xprt,
- const struct ib_device *device);
-int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
+int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device);
+int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr);
void frwr_release_mr(struct rpcrdma_mr *mr);
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, __be32 xid,
struct rpcrdma_mr *mr);
-int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
+int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
@@ -569,7 +548,7 @@ int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
enum rpcrdma_chunktype rtype);
void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc);
int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
-void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
+void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep);
void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt);
void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d86c664ea6af..0bda8a73e8a8 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -54,6 +54,7 @@
#include <trace/events/sunrpc.h>
+#include "socklib.h"
#include "sunrpc.h"
static void xs_close(struct rpc_xprt *xprt);
@@ -749,125 +750,6 @@ xs_stream_start_connect(struct sock_xprt *transport)
#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
-static int xs_sendmsg(struct socket *sock, struct msghdr *msg, size_t seek)
-{
- if (seek)
- iov_iter_advance(&msg->msg_iter, seek);
- return sock_sendmsg(sock, msg);
-}
-
-static int xs_send_kvec(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t seek)
-{
- iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
- return xs_sendmsg(sock, msg, seek);
-}
-
-static int xs_send_pagedata(struct socket *sock, struct msghdr *msg, struct xdr_buf *xdr, size_t base)
-{
- int err;
-
- err = xdr_alloc_bvec(xdr, GFP_KERNEL);
- if (err < 0)
- return err;
-
- iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec,
- xdr_buf_pagecount(xdr),
- xdr->page_len + xdr->page_base);
- return xs_sendmsg(sock, msg, base + xdr->page_base);
-}
-
-#define xs_record_marker_len() sizeof(rpc_fraghdr)
-
-/* Common case:
- * - stream transport
- * - sending from byte 0 of the message
- * - the message is wholly contained in @xdr's head iovec
- */
-static int xs_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
- rpc_fraghdr marker, struct kvec *vec, size_t base)
-{
- struct kvec iov[2] = {
- [0] = {
- .iov_base = &marker,
- .iov_len = sizeof(marker)
- },
- [1] = *vec,
- };
- size_t len = iov[0].iov_len + iov[1].iov_len;
-
- iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
- return xs_sendmsg(sock, msg, base);
-}
-
-/**
- * xs_sendpages - write pages directly to a socket
- * @sock: socket to send on
- * @addr: UDP only -- address of destination
- * @addrlen: UDP only -- length of destination address
- * @xdr: buffer containing this request
- * @base: starting position in the buffer
- * @rm: stream record marker field
- * @sent_p: return the total number of bytes successfully queued for sending
- *
- */
-static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, rpc_fraghdr rm, int *sent_p)
-{
- struct msghdr msg = {
- .msg_name = addr,
- .msg_namelen = addrlen,
- .msg_flags = XS_SENDMSG_FLAGS | MSG_MORE,
- };
- unsigned int rmsize = rm ? sizeof(rm) : 0;
- unsigned int remainder = rmsize + xdr->len - base;
- unsigned int want;
- int err = 0;
-
- if (unlikely(!sock))
- return -ENOTSOCK;
-
- want = xdr->head[0].iov_len + rmsize;
- if (base < want) {
- unsigned int len = want - base;
- remainder -= len;
- if (remainder == 0)
- msg.msg_flags &= ~MSG_MORE;
- if (rmsize)
- err = xs_send_rm_and_kvec(sock, &msg, rm,
- &xdr->head[0], base);
- else
- err = xs_send_kvec(sock, &msg, &xdr->head[0], base);
- if (remainder == 0 || err != len)
- goto out;
- *sent_p += err;
- base = 0;
- } else
- base -= want;
-
- if (base < xdr->page_len) {
- unsigned int len = xdr->page_len - base;
- remainder -= len;
- if (remainder == 0)
- msg.msg_flags &= ~MSG_MORE;
- err = xs_send_pagedata(sock, &msg, xdr, base);
- if (remainder == 0 || err != len)
- goto out;
- *sent_p += err;
- base = 0;
- } else
- base -= xdr->page_len;
-
- if (base >= xdr->tail[0].iov_len)
- return 0;
- msg.msg_flags &= ~MSG_MORE;
- err = xs_send_kvec(sock, &msg, &xdr->tail[0], base);
-out:
- if (err > 0) {
- *sent_p += err;
- err = 0;
- }
- return err;
-}
-
/**
* xs_nospace - handle transmit was incomplete
* @req: pointer to RPC request
@@ -959,8 +841,11 @@ static int xs_local_send_request(struct rpc_rqst *req)
struct xdr_buf *xdr = &req->rq_snd_buf;
rpc_fraghdr rm = xs_stream_record_marker(xdr);
unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
+ struct msghdr msg = {
+ .msg_flags = XS_SENDMSG_FLAGS,
+ };
+ unsigned int uninitialized_var(sent);
int status;
- int sent = 0;
/* Close the stream if the previous transmission was incomplete */
if (xs_send_request_was_aborted(transport, req)) {
@@ -972,8 +857,8 @@ static int xs_local_send_request(struct rpc_rqst *req)
req->rq_svec->iov_base, req->rq_svec->iov_len);
req->rq_xtime = ktime_get();
- status = xs_sendpages(transport->sock, NULL, 0, xdr,
- transport->xmit.offset, rm, &sent);
+ status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
+ transport->xmit.offset, rm, &sent);
dprintk("RPC: %s(%u) = %d\n",
__func__, xdr->len - transport->xmit.offset, status);
@@ -1025,7 +910,12 @@ static int xs_udp_send_request(struct rpc_rqst *req)
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
struct xdr_buf *xdr = &req->rq_snd_buf;
- int sent = 0;
+ struct msghdr msg = {
+ .msg_name = xs_addr(xprt),
+ .msg_namelen = xprt->addrlen,
+ .msg_flags = XS_SENDMSG_FLAGS,
+ };
+ unsigned int uninitialized_var(sent);
int status;
xs_pktdump("packet data:",
@@ -1039,8 +929,7 @@ static int xs_udp_send_request(struct rpc_rqst *req)
return -EBADSLT;
req->rq_xtime = ktime_get();
- status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
- xdr, 0, 0, &sent);
+ status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
dprintk("RPC: xs_udp_send_request(%u) = %d\n",
xdr->len, status);
@@ -1106,9 +995,12 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
struct xdr_buf *xdr = &req->rq_snd_buf;
rpc_fraghdr rm = xs_stream_record_marker(xdr);
unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
+ struct msghdr msg = {
+ .msg_flags = XS_SENDMSG_FLAGS,
+ };
bool vm_wait = false;
+ unsigned int uninitialized_var(sent);
int status;
- int sent;
/* Close the stream if the previous transmission was incomplete */
if (xs_send_request_was_aborted(transport, req)) {
@@ -1129,9 +1021,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
* called sendmsg(). */
req->rq_xtime = ktime_get();
while (1) {
- sent = 0;
- status = xs_sendpages(transport->sock, NULL, 0, xdr,
- transport->xmit.offset, rm, &sent);
+ status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
+ transport->xmit.offset, rm, &sent);
dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
xdr->len - transport->xmit.offset, status);
@@ -1970,7 +1861,7 @@ static int xs_local_setup_socket(struct sock_xprt *transport)
struct rpc_xprt *xprt = &transport->xprt;
struct file *filp;
struct socket *sock;
- int status = -EIO;
+ int status;
status = __sock_create(xprt->xprt_net, AF_LOCAL,
SOCK_STREAM, 0, &sock, 1);
@@ -2636,46 +2527,25 @@ static void bc_free(struct rpc_task *task)
free_page((unsigned long)buf);
}
-/*
- * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
- * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
- */
static int bc_sendto(struct rpc_rqst *req)
{
- int len;
- struct xdr_buf *xbufp = &req->rq_snd_buf;
+ struct xdr_buf *xdr = &req->rq_snd_buf;
struct sock_xprt *transport =
container_of(req->rq_xprt, struct sock_xprt, xprt);
- unsigned long headoff;
- unsigned long tailoff;
- struct page *tailpage;
struct msghdr msg = {
- .msg_flags = MSG_MORE
+ .msg_flags = 0,
};
rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
- (u32)xbufp->len);
- struct kvec iov = {
- .iov_base = &marker,
- .iov_len = sizeof(marker),
- };
+ (u32)xdr->len);
+ unsigned int sent = 0;
+ int err;
req->rq_xtime = ktime_get();
-
- len = kernel_sendmsg(transport->sock, &msg, &iov, 1, iov.iov_len);
- if (len != iov.iov_len)
+ err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
+ xdr_free_bvec(xdr);
+ if (err < 0 || sent != (xdr->len + sizeof(marker)))
return -EAGAIN;
-
- tailpage = NULL;
- if (xbufp->tail[0].iov_len)
- tailpage = virt_to_page(xbufp->tail[0].iov_base);
- tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
- headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
- len = svc_send_common(transport->sock, xbufp,
- virt_to_page(xbufp->head[0].iov_base), headoff,
- tailpage, tailoff);
- if (len != xbufp->len)
- return -EAGAIN;
- return len;
+ return sent;
}
/*
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 156efce50dbd..0e989005bdc2 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -56,9 +56,9 @@ enum {
TLS_NUM_PROTS,
};
-static struct proto *saved_tcpv6_prot;
+static const struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
-static struct proto *saved_tcpv4_prot;
+static const struct proto *saved_tcpv4_prot;
static DEFINE_MUTEX(tcpv4_prot_mutex);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static struct proto_ops tls_sw_proto_ops;
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
index 61cbc304a3d3..1a29cd69d6cf 100644
--- a/net/wireless/.gitignore
+++ b/net/wireless/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
shipped-certs.c
extra-certs.c
diff --git a/samples/auxdisplay/.gitignore b/samples/auxdisplay/.gitignore
index 7af222860a96..2ed744c0e741 100644
--- a/samples/auxdisplay/.gitignore
+++ b/samples/auxdisplay/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
cfag12864b-example
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index 74d31fd3c99c..23837f2ed458 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
cpustat
fds_example
hbm
diff --git a/samples/connector/.gitignore b/samples/connector/.gitignore
index d2b9c32accd4..d86f2ff9c947 100644
--- a/samples/connector/.gitignore
+++ b/samples/connector/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
ucon
diff --git a/samples/hidraw/.gitignore b/samples/hidraw/.gitignore
index 05e51a685242..d7a6074ebcf9 100644
--- a/samples/hidraw/.gitignore
+++ b/samples/hidraw/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
hid-example
diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c
index c58504774118..418c46fe5ffc 100644
--- a/samples/hw_breakpoint/data_breakpoint.c
+++ b/samples/hw_breakpoint/data_breakpoint.c
@@ -23,7 +23,7 @@
struct perf_event * __percpu *sample_hbp;
-static char ksym_name[KSYM_NAME_LEN] = "pid_max";
+static char ksym_name[KSYM_NAME_LEN] = "jiffies";
module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
" write operations on the kernel symbol");
@@ -41,11 +41,15 @@ static int __init hw_break_module_init(void)
{
int ret;
struct perf_event_attr attr;
+ void *addr = __symbol_get(ksym_name);
+
+ if (!addr)
+ return -ENXIO;
hw_breakpoint_init(&attr);
- attr.bp_addr = kallsyms_lookup_name(ksym_name);
+ attr.bp_addr = (unsigned long)addr;
attr.bp_len = HW_BREAKPOINT_LEN_4;
- attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+ attr.bp_type = HW_BREAKPOINT_W;
sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler, NULL);
if (IS_ERR((void __force *)sample_hbp)) {
@@ -66,6 +70,7 @@ fail:
static void __exit hw_break_module_exit(void)
{
unregister_wide_hw_breakpoint(sample_hbp);
+ symbol_put(ksym_name);
printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name);
}
diff --git a/samples/mei/.gitignore b/samples/mei/.gitignore
index f356b81ca1ec..db5e802f041e 100644
--- a/samples/mei/.gitignore
+++ b/samples/mei/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
mei-amt-version
diff --git a/samples/mic/mpssd/.gitignore b/samples/mic/mpssd/.gitignore
index 8b7c72f07c92..aa03f1eb37a0 100644
--- a/samples/mic/mpssd/.gitignore
+++ b/samples/mic/mpssd/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
mpssd
diff --git a/samples/pidfd/.gitignore b/samples/pidfd/.gitignore
index be52b3ba6e4b..eea857fca736 100644
--- a/samples/pidfd/.gitignore
+++ b/samples/pidfd/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
pidfd-metadata
diff --git a/samples/seccomp/.gitignore b/samples/seccomp/.gitignore
index d1e2e817d556..4a5a5b7db30b 100644
--- a/samples/seccomp/.gitignore
+++ b/samples/seccomp/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
bpf-direct
bpf-fancy
dropper
diff --git a/samples/timers/.gitignore b/samples/timers/.gitignore
index c5c45d7ec0df..40510c33cf08 100644
--- a/samples/timers/.gitignore
+++ b/samples/timers/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
hpet_example
diff --git a/samples/vfs/.gitignore b/samples/vfs/.gitignore
index 0806eb0be62d..8fdabf7e5373 100644
--- a/samples/vfs/.gitignore
+++ b/samples/vfs/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
test-fsmount
test-statx
diff --git a/samples/watchdog/.gitignore b/samples/watchdog/.gitignore
index ff0ebb540333..74153b831244 100644
--- a/samples/watchdog/.gitignore
+++ b/samples/watchdog/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
watchdog-simple
diff --git a/scripts/.gitignore b/scripts/.gitignore
index ef45f96cd7a5..0d1c8e217cd7 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
bin2c
kallsyms
unifdef
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index 019771b845c5..5b15bc425ec9 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -1,16 +1,26 @@
# SPDX-License-Identifier: GPL-2.0
ifdef CONFIG_UBSAN
+
+ifdef CONFIG_UBSAN_ALIGNMENT
+ CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
+endif
+
+ifdef CONFIG_UBSAN_BOUNDS
+ CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds)
+endif
+
+ifdef CONFIG_UBSAN_MISC
CFLAGS_UBSAN += $(call cc-option, -fsanitize=shift)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow)
- CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=bool)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=enum)
+endif
-ifdef CONFIG_UBSAN_ALIGNMENT
- CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
+ifdef CONFIG_UBSAN_TRAP
+ CFLAGS_UBSAN += $(call cc-option, -fsanitize-undefined-trap-on-error)
endif
# -fsanitize=* options makes GCC less smart than usual and
diff --git a/scripts/basic/.gitignore b/scripts/basic/.gitignore
index a776371a3502..98ae1f509592 100644
--- a/scripts/basic/.gitignore
+++ b/scripts/basic/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
fixdep
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a63380c6b0d2..d64c67b67e3c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -64,6 +64,7 @@ my $color = "auto";
my $allow_c99_comments = 1; # Can be overridden by --ignore C99_COMMENT_TOLERANCE
# git output parsing needs US English output, so first set backtick child process LANGUAGE
my $git_command ='export LANGUAGE=en_US.UTF-8; git';
+my $tabsize = 8;
sub help {
my ($exitcode) = @_;
@@ -98,6 +99,7 @@ Options:
--show-types show the specific message type in the output
--max-line-length=n set the maximum line length, if exceeded, warn
--min-conf-desc-length=n set the min description length, if shorter, warn
+ --tab-size=n set the number of spaces for tab (default 8)
--root=PATH PATH to the kernel tree root
--no-summary suppress the per-file summary
--mailback only produce a report in case of warnings/errors
@@ -215,6 +217,7 @@ GetOptions(
'list-types!' => \$list_types,
'max-line-length=i' => \$max_line_length,
'min-conf-desc-length=i' => \$min_conf_desc_length,
+ 'tab-size=i' => \$tabsize,
'root=s' => \$root,
'summary!' => \$summary,
'mailback!' => \$mailback,
@@ -267,6 +270,9 @@ if ($color =~ /^[01]$/) {
die "Invalid color mode: $color\n";
}
+# skip TAB size 1 to avoid additional checks on $tabsize - 1
+die "Invalid TAB size: $tabsize\n" if ($tabsize < 2);
+
sub hash_save_array_words {
my ($hashRef, $arrayRef) = @_;
@@ -804,12 +810,12 @@ sub build_types {
}x;
$Type = qr{
$NonptrType
- (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*\s*(?:const\s*)?|\[\])+|(?:\s*\[\s*\])+)?
+ (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*\s*(?:const\s*)?|\[\])+|(?:\s*\[\s*\])+){0,4}
(?:\s+$Inline|\s+$Modifier)*
}x;
$TypeMisordered = qr{
$NonptrTypeMisordered
- (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*\s*(?:const\s*)?|\[\])+|(?:\s*\[\s*\])+)?
+ (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*\s*(?:const\s*)?|\[\])+|(?:\s*\[\s*\])+){0,4}
(?:\s+$Inline|\s+$Modifier)*
}x;
$Declare = qr{(?:$Storage\s+(?:$Inline\s+)?)?$Type};
@@ -1118,6 +1124,7 @@ sub parse_email {
my ($formatted_email) = @_;
my $name = "";
+ my $name_comment = "";
my $address = "";
my $comment = "";
@@ -1150,6 +1157,10 @@ sub parse_email {
$name = trim($name);
$name =~ s/^\"|\"$//g;
+ $name =~ s/(\s*\([^\)]+\))\s*//;
+ if (defined($1)) {
+ $name_comment = trim($1);
+ }
$address = trim($address);
$address =~ s/^\<|\>$//g;
@@ -1158,7 +1169,7 @@ sub parse_email {
$name = "\"$name\"";
}
- return ($name, $address, $comment);
+ return ($name, $name_comment, $address, $comment);
}
sub format_email {
@@ -1184,6 +1195,23 @@ sub format_email {
return $formatted_email;
}
+sub reformat_email {
+ my ($email) = @_;
+
+ my ($email_name, $name_comment, $email_address, $comment) = parse_email($email);
+ return format_email($email_name, $email_address);
+}
+
+sub same_email_addresses {
+ my ($email1, $email2) = @_;
+
+ my ($email1_name, $name1_comment, $email1_address, $comment1) = parse_email($email1);
+ my ($email2_name, $name2_comment, $email2_address, $comment2) = parse_email($email2);
+
+ return $email1_name eq $email2_name &&
+ $email1_address eq $email2_address;
+}
+
sub which {
my ($bin) = @_;
@@ -1217,7 +1245,7 @@ sub expand_tabs {
if ($c eq "\t") {
$res .= ' ';
$n++;
- for (; ($n % 8) != 0; $n++) {
+ for (; ($n % $tabsize) != 0; $n++) {
$res .= ' ';
}
next;
@@ -2230,7 +2258,7 @@ sub string_find_replace {
sub tabify {
my ($leading) = @_;
- my $source_indent = 8;
+ my $source_indent = $tabsize;
my $max_spaces_before_tab = $source_indent - 1;
my $spaces_to_tab = " " x $source_indent;
@@ -2272,6 +2300,19 @@ sub pos_last_openparen {
return length(expand_tabs(substr($line, 0, $last_openparen))) + 1;
}
+sub get_raw_comment {
+ my ($line, $rawline) = @_;
+ my $comment = '';
+
+ for my $i (0 .. (length($line) - 1)) {
+ if (substr($line, $i, 1) eq "$;") {
+ $comment .= substr($rawline, $i, 1);
+ }
+ }
+
+ return $comment;
+}
+
sub process {
my $filename = shift;
@@ -2294,6 +2335,7 @@ sub process {
my $is_binding_patch = -1;
my $in_header_lines = $file ? 0 : 1;
my $in_commit_log = 0; #Scanning lines before patch
+ my $has_patch_separator = 0; #Found a --- line
my $has_commit_log = 0; #Encountered lines before patch
my $commit_log_lines = 0; #Number of commit log lines
my $commit_log_possible_stack_dump = 0;
@@ -2433,6 +2475,7 @@ sub process {
$sline =~ s/$;/ /g; #with comments as spaces
my $rawline = $rawlines[$linenr - 1];
+ my $raw_comment = get_raw_comment($line, $rawline);
# check if it's a mode change, rename or start of a patch
if (!$in_commit_log &&
@@ -2604,21 +2647,26 @@ sub process {
$author = $1;
$author = encode("utf8", $author) if ($line =~ /=\?utf-8\?/i);
$author =~ s/"//g;
+ $author = reformat_email($author);
}
# Check the patch for a signoff:
- if ($line =~ /^\s*signed-off-by:/i) {
+ if ($line =~ /^\s*signed-off-by:\s*(.*)/i) {
$signoff++;
$in_commit_log = 0;
if ($author ne '') {
- my $l = $line;
- $l =~ s/"//g;
- if ($l =~ /^\s*signed-off-by:\s*\Q$author\E/i) {
- $authorsignoff = 1;
+ if (same_email_addresses($1, $author)) {
+ $authorsignoff = 1;
}
}
}
+# Check for patch separator
+ if ($line =~ /^---$/) {
+ $has_patch_separator = 1;
+ $in_commit_log = 0;
+ }
+
# Check if MAINTAINERS is being updated. If so, there's probably no need to
# emit the "does MAINTAINERS need updating?" message on file add/move/delete
if ($line =~ /^\s*MAINTAINERS\s*\|/) {
@@ -2664,7 +2712,7 @@ sub process {
}
}
- my ($email_name, $email_address, $comment) = parse_email($email);
+ my ($email_name, $name_comment, $email_address, $comment) = parse_email($email);
my $suggested_email = format_email(($email_name, $email_address));
if ($suggested_email eq "") {
ERROR("BAD_SIGN_OFF",
@@ -2675,9 +2723,7 @@ sub process {
$dequoted =~ s/" </ </;
# Don't force email to have quotes
# Allow just an angle bracketed address
- if ("$dequoted$comment" ne $email &&
- "<$email_address>$comment" ne $email &&
- "$suggested_email$comment" ne $email) {
+ if (!same_email_addresses($email, $suggested_email)) {
WARN("BAD_SIGN_OFF",
"email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
}
@@ -2720,10 +2766,10 @@ sub process {
"A patch subject line should describe the change not the tool that found it\n" . $herecurr);
}
-# Check for unwanted Gerrit info
- if ($in_commit_log && $line =~ /^\s*change-id:/i) {
+# Check for Gerrit Change-Ids not in any patch context
+ if ($realfile eq '' && !$has_patch_separator && $line =~ /^\s*change-id:/i) {
ERROR("GERRIT_CHANGE_ID",
- "Remove Gerrit Change-Id's before submitting upstream.\n" . $herecurr);
+ "Remove Gerrit Change-Id's before submitting upstream\n" . $herecurr);
}
# Check if the commit log is in a possible stack dump
@@ -2761,7 +2807,7 @@ sub process {
# Check for git id commit length and improperly formed commit descriptions
if ($in_commit_log && !$commit_log_possible_stack_dump &&
- $line !~ /^\s*(?:Link|Patchwork|http|https|BugLink):/i &&
+ $line !~ /^\s*(?:Link|Patchwork|http|https|BugLink|base-commit):/i &&
$line !~ /^This reverts commit [0-9a-f]{7,40}/ &&
($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i &&
@@ -3087,7 +3133,7 @@ sub process {
$comment = '/*';
} elsif ($realfile =~ /\.(c|dts|dtsi)$/) {
$comment = '//';
- } elsif (($checklicenseline == 2) || $realfile =~ /\.(sh|pl|py|awk|tc)$/) {
+ } elsif (($checklicenseline == 2) || $realfile =~ /\.(sh|pl|py|awk|tc|yaml)$/) {
$comment = '#';
} elsif ($realfile =~ /\.rst$/) {
$comment = '..';
@@ -3111,6 +3157,17 @@ sub process {
WARN("SPDX_LICENSE_TAG",
"'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
}
+ if ($realfile =~ m@^Documentation/devicetree/bindings/@ &&
+ not $spdx_license =~ /GPL-2\.0.*BSD-2-Clause/) {
+ my $msg_level = \&WARN;
+ $msg_level = \&CHK if ($file);
+ if (&{$msg_level}("SPDX_LICENSE_TAG",
+
+ "DT binding documents should be licensed (GPL-2.0-only OR BSD-2-Clause)\n" . $herecurr) &&
+ $fix) {
+ $fixed[$fixlinenr] =~ s/SPDX-License-Identifier: .*/SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)/;
+ }
+ }
}
}
}
@@ -3198,7 +3255,7 @@ sub process {
next if ($realfile !~ /\.(h|c|pl|dtsi|dts)$/);
# at the beginning of a line any tabs must come first and anything
-# more than 8 must use tabs.
+# more than $tabsize must use tabs.
if ($rawline =~ /^\+\s* \t\s*\S/ ||
$rawline =~ /^\+\s* \s*/) {
my $herevet = "$here\n" . cat_vet($rawline) . "\n";
@@ -3217,7 +3274,7 @@ sub process {
"please, no space before tabs\n" . $herevet) &&
$fix) {
while ($fixed[$fixlinenr] =~
- s/(^\+.*) {8,8}\t/$1\t\t/) {}
+ s/(^\+.*) {$tabsize,$tabsize}\t/$1\t\t/) {}
while ($fixed[$fixlinenr] =~
s/(^\+.*) +\t/$1\t/) {}
}
@@ -3239,11 +3296,11 @@ sub process {
if ($perl_version_ok &&
$sline =~ /^\+\t+( +)(?:$c90_Keywords\b|\{\s*$|\}\s*(?:else\b|while\b|\s*$)|$Declare\s*$Ident\s*[;=])/) {
my $indent = length($1);
- if ($indent % 8) {
+ if ($indent % $tabsize) {
if (WARN("TABSTOP",
"Statements should start on a tabstop\n" . $herecurr) &&
$fix) {
- $fixed[$fixlinenr] =~ s@(^\+\t+) +@$1 . "\t" x ($indent/8)@e;
+ $fixed[$fixlinenr] =~ s@(^\+\t+) +@$1 . "\t" x ($indent/$tabsize)@e;
}
}
}
@@ -3261,8 +3318,8 @@ sub process {
my $newindent = $2;
my $goodtabindent = $oldindent .
- "\t" x ($pos / 8) .
- " " x ($pos % 8);
+ "\t" x ($pos / $tabsize) .
+ " " x ($pos % $tabsize);
my $goodspaceindent = $oldindent . " " x $pos;
if ($newindent ne $goodtabindent &&
@@ -3733,11 +3790,11 @@ sub process {
#print "line<$line> prevline<$prevline> indent<$indent> sindent<$sindent> check<$check> continuation<$continuation> s<$s> cond_lines<$cond_lines> stat_real<$stat_real> stat<$stat>\n";
if ($check && $s ne '' &&
- (($sindent % 8) != 0 ||
+ (($sindent % $tabsize) != 0 ||
($sindent < $indent) ||
($sindent == $indent &&
($s !~ /^\s*(?:\}|\{|else\b)/)) ||
- ($sindent > $indent + 8))) {
+ ($sindent > $indent + $tabsize))) {
WARN("SUSPECT_CODE_INDENT",
"suspect code indent for conditional statements ($indent, $sindent)\n" . $herecurr . "$stat_real\n");
}
@@ -4014,7 +4071,7 @@ sub process {
}
# check for function declarations without arguments like "int foo()"
- if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) {
+ if ($line =~ /(\b$Type\s*$Ident)\s*\(\s*\)/) {
if (ERROR("FUNCTION_WITHOUT_ARGS",
"Bad function definition - $1() should probably be $1(void)\n" . $herecurr) &&
$fix) {
@@ -4582,7 +4639,7 @@ sub process {
($op eq '>' &&
$ca =~ /<\S+\@\S+$/))
{
- $ok = 1;
+ $ok = 1;
}
# for asm volatile statements
@@ -4917,7 +4974,7 @@ sub process {
# conditional.
substr($s, 0, length($c), '');
$s =~ s/\n.*//g;
- $s =~ s/$;//g; # Remove any comments
+ $s =~ s/$;//g; # Remove any comments
if (length($c) && $s !~ /^\s*{?\s*\\*\s*$/ &&
$c !~ /}\s*while\s*/)
{
@@ -4956,7 +5013,7 @@ sub process {
# if and else should not have general statements after it
if ($line =~ /^.\s*(?:}\s*)?else\b(.*)/) {
my $s = $1;
- $s =~ s/$;//g; # Remove any comments
+ $s =~ s/$;//g; # Remove any comments
if ($s !~ /^\s*(?:\sif|(?:{|)\s*\\?\s*$)/) {
ERROR("TRAILING_STATEMENTS",
"trailing statements should be on next line\n" . $herecurr);
@@ -5132,7 +5189,7 @@ sub process {
{
}
- # Flatten any obvious string concatentation.
+ # Flatten any obvious string concatenation.
while ($dstat =~ s/($String)\s*$Ident/$1/ ||
$dstat =~ s/$Ident\s*($String)/$1/)
{
@@ -6230,13 +6287,17 @@ sub process {
}
# check for function declarations that have arguments without identifier names
+# while avoiding uninitialized_var(x)
if (defined $stat &&
- $stat =~ /^.\s*(?:extern\s+)?$Type\s*(?:$Ident|\(\s*\*\s*$Ident\s*\))\s*\(\s*([^{]+)\s*\)\s*;/s &&
- $1 ne "void") {
- my $args = trim($1);
+ $stat =~ /^.\s*(?:extern\s+)?$Type\s*(?:($Ident)|\(\s*\*\s*$Ident\s*\))\s*\(\s*([^{]+)\s*\)\s*;/s &&
+ (!defined($1) ||
+ (defined($1) && $1 ne "uninitialized_var")) &&
+ $2 ne "void") {
+ my $args = trim($2);
while ($args =~ m/\s*($Type\s*(?:$Ident|\(\s*\*\s*$Ident?\s*\)\s*$balanced_parens)?)/g) {
my $arg = trim($1);
- if ($arg =~ /^$Type$/ && $arg !~ /enum\s+$Ident$/) {
+ if ($arg =~ /^$Type$/ &&
+ $arg !~ /enum\s+$Ident$/) {
WARN("FUNCTION_ARGUMENTS",
"function definition argument '$arg' should also have an identifier name\n" . $herecurr);
}
@@ -6389,6 +6450,28 @@ sub process {
}
}
+# check for /* fallthrough */ like comment, prefer fallthrough;
+ my @fallthroughs = (
+ 'fallthrough',
+ '@fallthrough@',
+ 'lint -fallthrough[ \t]*',
+ 'intentional(?:ly)?[ \t]*fall(?:(?:s | |-)[Tt]|t)hr(?:ough|u|ew)',
+ '(?:else,?\s*)?FALL(?:S | |-)?THR(?:OUGH|U|EW)[ \t.!]*(?:-[^\n\r]*)?',
+ 'Fall(?:(?:s | |-)[Tt]|t)hr(?:ough|u|ew)[ \t.!]*(?:-[^\n\r]*)?',
+ 'fall(?:s | |-)?thr(?:ough|u|ew)[ \t.!]*(?:-[^\n\r]*)?',
+ );
+ if ($raw_comment ne '') {
+ foreach my $ft (@fallthroughs) {
+ if ($raw_comment =~ /$ft/) {
+ my $msg_level = \&WARN;
+ $msg_level = \&CHK if ($file);
+ &{$msg_level}("PREFER_FALLTHROUGH",
+ "Prefer 'fallthrough;' over fallthrough comment\n" . $herecurr);
+ last;
+ }
+ }
+ }
+
# check for switch/default statements without a break;
if ($perl_version_ok &&
defined $stat &&
diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check
index 997202a18ddb..9a8cc10cffd0 100755
--- a/scripts/documentation-file-ref-check
+++ b/scripts/documentation-file-ref-check
@@ -12,7 +12,7 @@ use Getopt::Long qw(:config no_auto_abbrev);
# to mention a past documentation file, for example, to give credits for
# the original work.
my %false_positives = (
- "Documentation/scsi/scsi_mid_low_api.txt" => "Documentation/Configure.help",
+ "Documentation/scsi/scsi_mid_low_api.rst" => "Documentation/Configure.help",
"drivers/vhost/vhost.c" => "Documentation/virtual/lguest/lguest.c",
);
diff --git a/scripts/dtc/.gitignore b/scripts/dtc/.gitignore
index 2e6e60d64ede..b814e6076bdb 100644
--- a/scripts/dtc/.gitignore
+++ b/scripts/dtc/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
dtc
diff --git a/scripts/dtc/Makefile.dtc b/scripts/dtc/Makefile.dtc
deleted file mode 100644
index 9c467b096f03..000000000000
--- a/scripts/dtc/Makefile.dtc
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Makefile.dtc
-#
-# This is not a complete Makefile of itself. Instead, it is designed to
-# be easily embeddable into other systems of Makefiles.
-#
-DTC_SRCS = \
- checks.c \
- data.c \
- dtc.c \
- flattree.c \
- fstree.c \
- livetree.c \
- srcpos.c \
- treesource.c \
- util.c
-
-ifneq ($(NO_YAML),1)
-DTC_SRCS += yamltree.c
-endif
-
-DTC_GEN_SRCS = dtc-lexer.lex.c dtc-parser.tab.c
-DTC_OBJS = $(DTC_SRCS:%.c=%.o) $(DTC_GEN_SRCS:%.c=%.o)
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 756f0fa9203f..4b3c486f1399 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -352,7 +352,7 @@ static void check_unit_address_vs_reg(struct check *c, struct dt_info *dti,
FAIL(c, dti, node, "node has a reg or ranges property, but no unit name");
} else {
if (unitname[0])
- FAIL(c, dti, node, "node has a unit name, but no reg property");
+ FAIL(c, dti, node, "node has a unit name, but no reg or ranges property");
}
}
WARNING(unit_address_vs_reg, check_unit_address_vs_reg, NULL);
@@ -765,13 +765,15 @@ static void check_ranges_format(struct check *c, struct dt_info *dti,
{
struct property *prop;
int c_addr_cells, p_addr_cells, c_size_cells, p_size_cells, entrylen;
+ const char *ranges = c->data;
- prop = get_property(node, "ranges");
+ prop = get_property(node, ranges);
if (!prop)
return;
if (!node->parent) {
- FAIL_PROP(c, dti, node, prop, "Root node has a \"ranges\" property");
+ FAIL_PROP(c, dti, node, prop, "Root node has a \"%s\" property",
+ ranges);
return;
}
@@ -783,23 +785,24 @@ static void check_ranges_format(struct check *c, struct dt_info *dti,
if (prop->val.len == 0) {
if (p_addr_cells != c_addr_cells)
- FAIL_PROP(c, dti, node, prop, "empty \"ranges\" property but its "
+ FAIL_PROP(c, dti, node, prop, "empty \"%s\" property but its "
"#address-cells (%d) differs from %s (%d)",
- c_addr_cells, node->parent->fullpath,
+ ranges, c_addr_cells, node->parent->fullpath,
p_addr_cells);
if (p_size_cells != c_size_cells)
- FAIL_PROP(c, dti, node, prop, "empty \"ranges\" property but its "
+ FAIL_PROP(c, dti, node, prop, "empty \"%s\" property but its "
"#size-cells (%d) differs from %s (%d)",
- c_size_cells, node->parent->fullpath,
+ ranges, c_size_cells, node->parent->fullpath,
p_size_cells);
} else if ((prop->val.len % entrylen) != 0) {
- FAIL_PROP(c, dti, node, prop, "\"ranges\" property has invalid length (%d bytes) "
+ FAIL_PROP(c, dti, node, prop, "\"%s\" property has invalid length (%d bytes) "
"(parent #address-cells == %d, child #address-cells == %d, "
- "#size-cells == %d)", prop->val.len,
+ "#size-cells == %d)", ranges, prop->val.len,
p_addr_cells, c_addr_cells, c_size_cells);
}
}
-WARNING(ranges_format, check_ranges_format, NULL, &addr_size_cells);
+WARNING(ranges_format, check_ranges_format, "ranges", &addr_size_cells);
+WARNING(dma_ranges_format, check_ranges_format, "dma-ranges", &addr_size_cells);
static const struct bus_type pci_bus = {
.name = "PCI",
@@ -1780,7 +1783,7 @@ static struct check *check_table[] = {
&property_name_chars_strict,
&node_name_chars_strict,
- &addr_size_cells, &reg_format, &ranges_format,
+ &addr_size_cells, &reg_format, &ranges_format, &dma_ranges_format,
&unit_address_vs_reg,
&unit_address_format,
diff --git a/scripts/dtc/libfdt/Makefile.libfdt b/scripts/dtc/libfdt/Makefile.libfdt
deleted file mode 100644
index e54639738c8e..000000000000
--- a/scripts/dtc/libfdt/Makefile.libfdt
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
-# Makefile.libfdt
-#
-# This is not a complete Makefile of itself. Instead, it is designed to
-# be easily embeddable into other systems of Makefiles.
-#
-LIBFDT_soname = libfdt.$(SHAREDLIB_EXT).1
-LIBFDT_INCLUDES = fdt.h libfdt.h libfdt_env.h
-LIBFDT_VERSION = version.lds
-LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c fdt_empty_tree.c \
- fdt_addresses.c fdt_overlay.c
-LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o)
-LIBFDT_LIB = libfdt-$(DTC_VERSION).$(SHAREDLIB_EXT)
-
-libfdt_clean:
- @$(VECHO) CLEAN "(libfdt)"
- rm -f $(STD_CLEANFILES:%=$(LIBFDT_dir)/%)
- rm -f $(LIBFDT_dir)/$(LIBFDT_soname)
diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c
index d6ce7c052dc8..c28fcc115771 100644
--- a/scripts/dtc/libfdt/fdt.c
+++ b/scripts/dtc/libfdt/fdt.c
@@ -19,15 +19,21 @@ int32_t fdt_ro_probe_(const void *fdt)
{
uint32_t totalsize = fdt_totalsize(fdt);
+ if (can_assume(VALID_DTB))
+ return totalsize;
+
if (fdt_magic(fdt) == FDT_MAGIC) {
/* Complete tree */
- if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
- return -FDT_ERR_BADVERSION;
- if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)
- return -FDT_ERR_BADVERSION;
+ if (!can_assume(LATEST)) {
+ if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
+ return -FDT_ERR_BADVERSION;
+ if (fdt_last_comp_version(fdt) >
+ FDT_LAST_SUPPORTED_VERSION)
+ return -FDT_ERR_BADVERSION;
+ }
} else if (fdt_magic(fdt) == FDT_SW_MAGIC) {
/* Unfinished sequential-write blob */
- if (fdt_size_dt_struct(fdt) == 0)
+ if (!can_assume(VALID_INPUT) && fdt_size_dt_struct(fdt) == 0)
return -FDT_ERR_BADSTATE;
} else {
return -FDT_ERR_BADMAGIC;
@@ -70,44 +76,59 @@ size_t fdt_header_size_(uint32_t version)
return FDT_V17_SIZE;
}
+size_t fdt_header_size(const void *fdt)
+{
+ return can_assume(LATEST) ? FDT_V17_SIZE :
+ fdt_header_size_(fdt_version(fdt));
+}
+
int fdt_check_header(const void *fdt)
{
size_t hdrsize;
if (fdt_magic(fdt) != FDT_MAGIC)
return -FDT_ERR_BADMAGIC;
+ if (!can_assume(LATEST)) {
+ if ((fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
+ || (fdt_last_comp_version(fdt) >
+ FDT_LAST_SUPPORTED_VERSION))
+ return -FDT_ERR_BADVERSION;
+ if (fdt_version(fdt) < fdt_last_comp_version(fdt))
+ return -FDT_ERR_BADVERSION;
+ }
hdrsize = fdt_header_size(fdt);
- if ((fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
- || (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION))
- return -FDT_ERR_BADVERSION;
- if (fdt_version(fdt) < fdt_last_comp_version(fdt))
- return -FDT_ERR_BADVERSION;
-
- if ((fdt_totalsize(fdt) < hdrsize)
- || (fdt_totalsize(fdt) > INT_MAX))
- return -FDT_ERR_TRUNCATED;
+ if (!can_assume(VALID_DTB)) {
- /* Bounds check memrsv block */
- if (!check_off_(hdrsize, fdt_totalsize(fdt), fdt_off_mem_rsvmap(fdt)))
- return -FDT_ERR_TRUNCATED;
+ if ((fdt_totalsize(fdt) < hdrsize)
+ || (fdt_totalsize(fdt) > INT_MAX))
+ return -FDT_ERR_TRUNCATED;
- /* Bounds check structure block */
- if (fdt_version(fdt) < 17) {
+ /* Bounds check memrsv block */
if (!check_off_(hdrsize, fdt_totalsize(fdt),
- fdt_off_dt_struct(fdt)))
+ fdt_off_mem_rsvmap(fdt)))
return -FDT_ERR_TRUNCATED;
- } else {
+ }
+
+ if (!can_assume(VALID_DTB)) {
+ /* Bounds check structure block */
+ if (!can_assume(LATEST) && fdt_version(fdt) < 17) {
+ if (!check_off_(hdrsize, fdt_totalsize(fdt),
+ fdt_off_dt_struct(fdt)))
+ return -FDT_ERR_TRUNCATED;
+ } else {
+ if (!check_block_(hdrsize, fdt_totalsize(fdt),
+ fdt_off_dt_struct(fdt),
+ fdt_size_dt_struct(fdt)))
+ return -FDT_ERR_TRUNCATED;
+ }
+
+ /* Bounds check strings block */
if (!check_block_(hdrsize, fdt_totalsize(fdt),
- fdt_off_dt_struct(fdt),
- fdt_size_dt_struct(fdt)))
+ fdt_off_dt_strings(fdt),
+ fdt_size_dt_strings(fdt)))
return -FDT_ERR_TRUNCATED;
}
- /* Bounds check strings block */
- if (!check_block_(hdrsize, fdt_totalsize(fdt),
- fdt_off_dt_strings(fdt), fdt_size_dt_strings(fdt)))
- return -FDT_ERR_TRUNCATED;
-
return 0;
}
@@ -115,12 +136,13 @@ const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len)
{
unsigned absoffset = offset + fdt_off_dt_struct(fdt);
- if ((absoffset < offset)
- || ((absoffset + len) < absoffset)
- || (absoffset + len) > fdt_totalsize(fdt))
- return NULL;
+ if (!can_assume(VALID_INPUT))
+ if ((absoffset < offset)
+ || ((absoffset + len) < absoffset)
+ || (absoffset + len) > fdt_totalsize(fdt))
+ return NULL;
- if (fdt_version(fdt) >= 0x11)
+ if (can_assume(LATEST) || fdt_version(fdt) >= 0x11)
if (((offset + len) < offset)
|| ((offset + len) > fdt_size_dt_struct(fdt)))
return NULL;
@@ -137,7 +159,7 @@ uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
*nextoffset = -FDT_ERR_TRUNCATED;
tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE);
- if (!tagp)
+ if (!can_assume(VALID_DTB) && !tagp)
return FDT_END; /* premature end */
tag = fdt32_to_cpu(*tagp);
offset += FDT_TAGSIZE;
@@ -149,18 +171,19 @@ uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
do {
p = fdt_offset_ptr(fdt, offset++, 1);
} while (p && (*p != '\0'));
- if (!p)
+ if (!can_assume(VALID_DTB) && !p)
return FDT_END; /* premature end */
break;
case FDT_PROP:
lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp));
- if (!lenp)
+ if (!can_assume(VALID_DTB) && !lenp)
return FDT_END; /* premature end */
/* skip-name offset, length and value */
offset += sizeof(struct fdt_property) - FDT_TAGSIZE
+ fdt32_to_cpu(*lenp);
- if (fdt_version(fdt) < 0x10 && fdt32_to_cpu(*lenp) >= 8 &&
+ if (!can_assume(LATEST) &&
+ fdt_version(fdt) < 0x10 && fdt32_to_cpu(*lenp) >= 8 &&
((offset - fdt32_to_cpu(*lenp)) % 8) != 0)
offset += 4;
break;
@@ -183,6 +206,8 @@ uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
int fdt_check_node_offset_(const void *fdt, int offset)
{
+ if (can_assume(VALID_INPUT))
+ return offset;
if ((offset < 0) || (offset % FDT_TAGSIZE)
|| (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE))
return -FDT_ERR_BADOFFSET;
diff --git a/scripts/dtc/libfdt/fdt_ro.c b/scripts/dtc/libfdt/fdt_ro.c
index a5c2797cde65..e03570a56eb5 100644
--- a/scripts/dtc/libfdt/fdt_ro.c
+++ b/scripts/dtc/libfdt/fdt_ro.c
@@ -33,17 +33,26 @@ static int fdt_nodename_eq_(const void *fdt, int offset,
const char *fdt_get_string(const void *fdt, int stroffset, int *lenp)
{
- int32_t totalsize = fdt_ro_probe_(fdt);
- uint32_t absoffset = stroffset + fdt_off_dt_strings(fdt);
+ int32_t totalsize;
+ uint32_t absoffset;
size_t len;
int err;
const char *s, *n;
+ if (can_assume(VALID_INPUT)) {
+ s = (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
+
+ if (lenp)
+ *lenp = strlen(s);
+ return s;
+ }
+ totalsize = fdt_ro_probe_(fdt);
err = totalsize;
if (totalsize < 0)
goto fail;
err = -FDT_ERR_BADOFFSET;
+ absoffset = stroffset + fdt_off_dt_strings(fdt);
if (absoffset >= totalsize)
goto fail;
len = totalsize - absoffset;
@@ -51,7 +60,7 @@ const char *fdt_get_string(const void *fdt, int stroffset, int *lenp)
if (fdt_magic(fdt) == FDT_MAGIC) {
if (stroffset < 0)
goto fail;
- if (fdt_version(fdt) >= 17) {
+ if (can_assume(LATEST) || fdt_version(fdt) >= 17) {
if (stroffset >= fdt_size_dt_strings(fdt))
goto fail;
if ((fdt_size_dt_strings(fdt) - stroffset) < len)
@@ -151,10 +160,13 @@ static const struct fdt_reserve_entry *fdt_mem_rsv(const void *fdt, int n)
int offset = n * sizeof(struct fdt_reserve_entry);
int absoffset = fdt_off_mem_rsvmap(fdt) + offset;
- if (absoffset < fdt_off_mem_rsvmap(fdt))
- return NULL;
- if (absoffset > fdt_totalsize(fdt) - sizeof(struct fdt_reserve_entry))
- return NULL;
+ if (!can_assume(VALID_INPUT)) {
+ if (absoffset < fdt_off_mem_rsvmap(fdt))
+ return NULL;
+ if (absoffset > fdt_totalsize(fdt) -
+ sizeof(struct fdt_reserve_entry))
+ return NULL;
+ }
return fdt_mem_rsv_(fdt, n);
}
@@ -164,7 +176,7 @@ int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
FDT_RO_PROBE(fdt);
re = fdt_mem_rsv(fdt, n);
- if (!re)
+ if (!can_assume(VALID_INPUT) && !re)
return -FDT_ERR_BADOFFSET;
*address = fdt64_ld(&re->address);
@@ -295,7 +307,7 @@ const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
nameptr = nh->name;
- if (fdt_version(fdt) < 0x10) {
+ if (!can_assume(LATEST) && fdt_version(fdt) < 0x10) {
/*
* For old FDT versions, match the naming conventions of V16:
* give only the leaf name (after all /). The actual tree
@@ -346,7 +358,8 @@ static const struct fdt_property *fdt_get_property_by_offset_(const void *fdt,
int err;
const struct fdt_property *prop;
- if ((err = fdt_check_prop_offset_(fdt, offset)) < 0) {
+ if (!can_assume(VALID_INPUT) &&
+ (err = fdt_check_prop_offset_(fdt, offset)) < 0) {
if (lenp)
*lenp = err;
return NULL;
@@ -367,7 +380,7 @@ const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
/* Prior to version 16, properties may need realignment
* and this API does not work. fdt_getprop_*() will, however. */
- if (fdt_version(fdt) < 0x10) {
+ if (!can_assume(LATEST) && fdt_version(fdt) < 0x10) {
if (lenp)
*lenp = -FDT_ERR_BADVERSION;
return NULL;
@@ -388,7 +401,8 @@ static const struct fdt_property *fdt_get_property_namelen_(const void *fdt,
(offset = fdt_next_property_offset(fdt, offset))) {
const struct fdt_property *prop;
- if (!(prop = fdt_get_property_by_offset_(fdt, offset, lenp))) {
+ prop = fdt_get_property_by_offset_(fdt, offset, lenp);
+ if (!can_assume(LIBFDT_FLAWLESS) && !prop) {
offset = -FDT_ERR_INTERNAL;
break;
}
@@ -413,7 +427,7 @@ const struct fdt_property *fdt_get_property_namelen(const void *fdt,
{
/* Prior to version 16, properties may need realignment
* and this API does not work. fdt_getprop_*() will, however. */
- if (fdt_version(fdt) < 0x10) {
+ if (!can_assume(LATEST) && fdt_version(fdt) < 0x10) {
if (lenp)
*lenp = -FDT_ERR_BADVERSION;
return NULL;
@@ -444,8 +458,8 @@ const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
return NULL;
/* Handle realignment */
- if (fdt_version(fdt) < 0x10 && (poffset + sizeof(*prop)) % 8 &&
- fdt32_ld(&prop->len) >= 8)
+ if (!can_assume(LATEST) && fdt_version(fdt) < 0x10 &&
+ (poffset + sizeof(*prop)) % 8 && fdt32_ld(&prop->len) >= 8)
return prop->data + 4;
return prop->data;
}
@@ -461,19 +475,24 @@ const void *fdt_getprop_by_offset(const void *fdt, int offset,
if (namep) {
const char *name;
int namelen;
- name = fdt_get_string(fdt, fdt32_ld(&prop->nameoff),
- &namelen);
- if (!name) {
- if (lenp)
- *lenp = namelen;
- return NULL;
+
+ if (!can_assume(VALID_INPUT)) {
+ name = fdt_get_string(fdt, fdt32_ld(&prop->nameoff),
+ &namelen);
+ if (!name) {
+ if (lenp)
+ *lenp = namelen;
+ return NULL;
+ }
+ *namep = name;
+ } else {
+ *namep = fdt_string(fdt, fdt32_ld(&prop->nameoff));
}
- *namep = name;
}
/* Handle realignment */
- if (fdt_version(fdt) < 0x10 && (offset + sizeof(*prop)) % 8 &&
- fdt32_ld(&prop->len) >= 8)
+ if (!can_assume(LATEST) && fdt_version(fdt) < 0x10 &&
+ (offset + sizeof(*prop)) % 8 && fdt32_ld(&prop->len) >= 8)
return prop->data + 4;
return prop->data;
}
@@ -598,10 +617,12 @@ int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
}
}
- if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
- return -FDT_ERR_BADOFFSET;
- else if (offset == -FDT_ERR_BADOFFSET)
- return -FDT_ERR_BADSTRUCTURE;
+ if (!can_assume(VALID_INPUT)) {
+ if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
+ return -FDT_ERR_BADOFFSET;
+ else if (offset == -FDT_ERR_BADOFFSET)
+ return -FDT_ERR_BADSTRUCTURE;
+ }
return offset; /* error from fdt_next_node() */
}
@@ -613,7 +634,8 @@ int fdt_node_depth(const void *fdt, int nodeoffset)
err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
if (err)
- return (err < 0) ? err : -FDT_ERR_INTERNAL;
+ return (can_assume(LIBFDT_FLAWLESS) || err < 0) ? err :
+ -FDT_ERR_INTERNAL;
return nodedepth;
}
@@ -833,66 +855,3 @@ int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
return offset; /* error from fdt_next_node() */
}
-
-int fdt_check_full(const void *fdt, size_t bufsize)
-{
- int err;
- int num_memrsv;
- int offset, nextoffset = 0;
- uint32_t tag;
- unsigned depth = 0;
- const void *prop;
- const char *propname;
-
- if (bufsize < FDT_V1_SIZE)
- return -FDT_ERR_TRUNCATED;
- err = fdt_check_header(fdt);
- if (err != 0)
- return err;
- if (bufsize < fdt_totalsize(fdt))
- return -FDT_ERR_TRUNCATED;
-
- num_memrsv = fdt_num_mem_rsv(fdt);
- if (num_memrsv < 0)
- return num_memrsv;
-
- while (1) {
- offset = nextoffset;
- tag = fdt_next_tag(fdt, offset, &nextoffset);
-
- if (nextoffset < 0)
- return nextoffset;
-
- switch (tag) {
- case FDT_NOP:
- break;
-
- case FDT_END:
- if (depth != 0)
- return -FDT_ERR_BADSTRUCTURE;
- return 0;
-
- case FDT_BEGIN_NODE:
- depth++;
- if (depth > INT_MAX)
- return -FDT_ERR_BADSTRUCTURE;
- break;
-
- case FDT_END_NODE:
- if (depth == 0)
- return -FDT_ERR_BADSTRUCTURE;
- depth--;
- break;
-
- case FDT_PROP:
- prop = fdt_getprop_by_offset(fdt, offset, &propname,
- &err);
- if (!prop)
- return err;
- break;
-
- default:
- return -FDT_ERR_INTERNAL;
- }
- }
-}
diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
index 8795947c00dd..524b520c8486 100644
--- a/scripts/dtc/libfdt/fdt_rw.c
+++ b/scripts/dtc/libfdt/fdt_rw.c
@@ -24,14 +24,16 @@ static int fdt_blocks_misordered_(const void *fdt,
static int fdt_rw_probe_(void *fdt)
{
+ if (can_assume(VALID_DTB))
+ return 0;
FDT_RO_PROBE(fdt);
- if (fdt_version(fdt) < 17)
+ if (!can_assume(LATEST) && fdt_version(fdt) < 17)
return -FDT_ERR_BADVERSION;
if (fdt_blocks_misordered_(fdt, sizeof(struct fdt_reserve_entry),
fdt_size_dt_struct(fdt)))
return -FDT_ERR_BADLAYOUT;
- if (fdt_version(fdt) > 17)
+ if (!can_assume(LATEST) && fdt_version(fdt) > 17)
fdt_set_version(fdt, 17);
return 0;
@@ -44,7 +46,7 @@ static int fdt_rw_probe_(void *fdt)
return err_; \
}
-static inline int fdt_data_size_(void *fdt)
+static inline unsigned int fdt_data_size_(void *fdt)
{
return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
}
@@ -52,15 +54,16 @@ static inline int fdt_data_size_(void *fdt)
static int fdt_splice_(void *fdt, void *splicepoint, int oldlen, int newlen)
{
char *p = splicepoint;
- char *end = (char *)fdt + fdt_data_size_(fdt);
+ unsigned int dsize = fdt_data_size_(fdt);
+ size_t soff = p - (char *)fdt;
- if (((p + oldlen) < p) || ((p + oldlen) > end))
+ if ((oldlen < 0) || (soff + oldlen < soff) || (soff + oldlen > dsize))
return -FDT_ERR_BADOFFSET;
- if ((p < (char *)fdt) || ((end - oldlen + newlen) < (char *)fdt))
+ if ((p < (char *)fdt) || (dsize + newlen < oldlen))
return -FDT_ERR_BADOFFSET;
- if ((end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt)))
+ if (dsize - oldlen + newlen > fdt_totalsize(fdt))
return -FDT_ERR_NOSPACE;
- memmove(p + newlen, p + oldlen, end - p - oldlen);
+ memmove(p + newlen, p + oldlen, ((char *)fdt + dsize) - (p + oldlen));
return 0;
}
@@ -112,6 +115,15 @@ static int fdt_splice_string_(void *fdt, int newlen)
return 0;
}
+/**
+ * fdt_find_add_string_() - Find or allocate a string
+ *
+ * @fdt: pointer to the device tree to check/adjust
+ * @s: string to find/add
+ * @allocated: Set to 0 if the string was found, 1 if not found and so
+ * allocated. Ignored if can_assume(NO_ROLLBACK)
+ * @return offset of string in the string table (whether found or added)
+ */
static int fdt_find_add_string_(void *fdt, const char *s, int *allocated)
{
char *strtab = (char *)fdt + fdt_off_dt_strings(fdt);
@@ -120,7 +132,8 @@ static int fdt_find_add_string_(void *fdt, const char *s, int *allocated)
int len = strlen(s) + 1;
int err;
- *allocated = 0;
+ if (!can_assume(NO_ROLLBACK))
+ *allocated = 0;
p = fdt_find_string_(strtab, fdt_size_dt_strings(fdt), s);
if (p)
@@ -132,7 +145,8 @@ static int fdt_find_add_string_(void *fdt, const char *s, int *allocated)
if (err)
return err;
- *allocated = 1;
+ if (!can_assume(NO_ROLLBACK))
+ *allocated = 1;
memcpy(new, s, len);
return (new - strtab);
@@ -206,7 +220,8 @@ static int fdt_add_property_(void *fdt, int nodeoffset, const char *name,
err = fdt_splice_struct_(fdt, *prop, 0, proplen);
if (err) {
- if (allocated)
+ /* Delete the string if we failed to add it */
+ if (!can_assume(NO_ROLLBACK) && allocated)
fdt_del_last_string_(fdt, name);
return err;
}
@@ -411,7 +426,7 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
* sizeof(struct fdt_reserve_entry);
- if (fdt_version(fdt) >= 17) {
+ if (can_assume(LATEST) || fdt_version(fdt) >= 17) {
struct_size = fdt_size_dt_struct(fdt);
} else {
struct_size = 0;
@@ -421,7 +436,8 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
return struct_size;
}
- if (!fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) {
+ if (can_assume(LIBFDT_ORDER) |
+ !fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) {
/* no further work necessary */
err = fdt_move(fdt, buf, bufsize);
if (err)
diff --git a/scripts/dtc/libfdt/fdt_sw.c b/scripts/dtc/libfdt/fdt_sw.c
index 76bea22f734f..26759d5dfb8c 100644
--- a/scripts/dtc/libfdt/fdt_sw.c
+++ b/scripts/dtc/libfdt/fdt_sw.c
@@ -12,10 +12,13 @@
static int fdt_sw_probe_(void *fdt)
{
- if (fdt_magic(fdt) == FDT_MAGIC)
- return -FDT_ERR_BADSTATE;
- else if (fdt_magic(fdt) != FDT_SW_MAGIC)
- return -FDT_ERR_BADMAGIC;
+ if (!can_assume(VALID_INPUT)) {
+ if (fdt_magic(fdt) == FDT_MAGIC)
+ return -FDT_ERR_BADSTATE;
+ else if (fdt_magic(fdt) != FDT_SW_MAGIC)
+ return -FDT_ERR_BADMAGIC;
+ }
+
return 0;
}
@@ -38,7 +41,7 @@ static int fdt_sw_probe_memrsv_(void *fdt)
if (err)
return err;
- if (fdt_off_dt_strings(fdt) != 0)
+ if (!can_assume(VALID_INPUT) && fdt_off_dt_strings(fdt) != 0)
return -FDT_ERR_BADSTATE;
return 0;
}
@@ -64,7 +67,8 @@ static int fdt_sw_probe_struct_(void *fdt)
if (err)
return err;
- if (fdt_off_dt_strings(fdt) != fdt_totalsize(fdt))
+ if (!can_assume(VALID_INPUT) &&
+ fdt_off_dt_strings(fdt) != fdt_totalsize(fdt))
return -FDT_ERR_BADSTATE;
return 0;
}
@@ -151,7 +155,8 @@ int fdt_resize(void *fdt, void *buf, int bufsize)
headsize = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
tailsize = fdt_size_dt_strings(fdt);
- if ((headsize + tailsize) > fdt_totalsize(fdt))
+ if (!can_assume(VALID_DTB) &&
+ headsize + tailsize > fdt_totalsize(fdt))
return -FDT_ERR_INTERNAL;
if ((headsize + tailsize) > bufsize)
diff --git a/scripts/dtc/libfdt/libfdt.h b/scripts/dtc/libfdt/libfdt.h
index 8907b09b86cc..36fadcdea516 100644
--- a/scripts/dtc/libfdt/libfdt.h
+++ b/scripts/dtc/libfdt/libfdt.h
@@ -266,11 +266,12 @@ fdt_set_hdr_(size_dt_struct);
* fdt_header_size - return the size of the tree's header
* @fdt: pointer to a flattened device tree
*/
+size_t fdt_header_size(const void *fdt);
+
+/**
+ * fdt_header_size_ - internal function which takes a version number
+ */
size_t fdt_header_size_(uint32_t version);
-static inline size_t fdt_header_size(const void *fdt)
-{
- return fdt_header_size_(fdt_version(fdt));
-}
/**
* fdt_check_header - sanity check a device tree header
diff --git a/scripts/dtc/libfdt/libfdt_internal.h b/scripts/dtc/libfdt/libfdt_internal.h
index 058c7358d441..d4e0bd49c037 100644
--- a/scripts/dtc/libfdt/libfdt_internal.h
+++ b/scripts/dtc/libfdt/libfdt_internal.h
@@ -48,4 +48,126 @@ static inline struct fdt_reserve_entry *fdt_mem_rsv_w_(void *fdt, int n)
#define FDT_SW_MAGIC (~FDT_MAGIC)
+/**********************************************************************/
+/* Checking controls */
+/**********************************************************************/
+
+#ifndef FDT_ASSUME_MASK
+#define FDT_ASSUME_MASK 0
+#endif
+
+/*
+ * Defines assumptions which can be enabled. Each of these can be enabled
+ * individually. For maximum safety, don't enable any assumptions!
+ *
+ * For minimal code size and no safety, use ASSUME_PERFECT at your own risk.
+ * You should have another method of validating the device tree, such as a
+ * signature or hash check before using libfdt.
+ *
+ * For situations where security is not a concern it may be safe to enable
+ * ASSUME_SANE.
+ */
+enum {
+ /*
+ * This does essentially no checks. Only the latest device-tree
+ * version is correctly handled. Inconsistencies or errors in the device
+ * tree may cause undefined behaviour or crashes. Invalid parameters
+ * passed to libfdt may do the same.
+ *
+ * If an error occurs when modifying the tree it may leave the tree in
+ * an intermediate (but valid) state. As an example, adding a property
+ * where there is insufficient space may result in the property name
+ * being added to the string table even though the property itself is
+ * not added to the struct section.
+ *
+ * Only use this if you have a fully validated device tree with
+ * the latest supported version and wish to minimise code size.
+ */
+ ASSUME_PERFECT = 0xff,
+
+ /*
+ * This assumes that the device tree is sane. i.e. header metadata
+ * and basic hierarchy are correct.
+ *
+ * With this assumption enabled, normal device trees produced by libfdt
+ * and the compiler should be handled safely. Malicious device trees and
+ * complete garbage may cause libfdt to behave badly or crash. Truncated
+ * device trees (e.g. those only partially loaded) can also cause
+ * problems.
+ *
+ * Note: Only checks that relate exclusively to the device tree itself
+ * (not the parameters passed to libfdt) are disabled by this
+ * assumption. This includes checking headers, tags and the like.
+ */
+ ASSUME_VALID_DTB = 1 << 0,
+
+ /*
+ * This builds on ASSUME_VALID_DTB and further assumes that libfdt
+ * functions are called with valid parameters, i.e. not trigger
+ * FDT_ERR_BADOFFSET or offsets that are out of bounds. It disables any
+ * extensive checking of parameters and the device tree, making various
+ * assumptions about correctness.
+ *
+ * It doesn't make sense to enable this assumption unless
+ * ASSUME_VALID_DTB is also enabled.
+ */
+ ASSUME_VALID_INPUT = 1 << 1,
+
+ /*
+ * This disables checks for device-tree version and removes all code
+ * which handles older versions.
+ *
+ * Only enable this if you know you have a device tree with the latest
+ * version.
+ */
+ ASSUME_LATEST = 1 << 2,
+
+ /*
+ * This assumes that it is OK for a failed addition to the device tree,
+ * due to lack of space or some other problem, to skip any rollback
+ * steps (such as dropping the property name from the string table).
+ * This is safe to enable in most circumstances, even though it may
+ * leave the tree in a sub-optimal state.
+ */
+ ASSUME_NO_ROLLBACK = 1 << 3,
+
+ /*
+ * This assumes that the device tree components appear in a 'convenient'
+ * order, i.e. the memory reservation block first, then the structure
+ * block and finally the string block.
+ *
+ * This order is not specified by the device-tree specification,
+ * but is expected by libfdt. The device-tree compiler always created
+ * device trees with this order.
+ *
+ * This assumption disables a check in fdt_open_into() and removes the
+ * ability to fix the problem there. This is safe if you know that the
+ * device tree is correctly ordered. See fdt_blocks_misordered_().
+ */
+ ASSUME_LIBFDT_ORDER = 1 << 4,
+
+ /*
+ * This assumes that libfdt itself does not have any internal bugs. It
+ * drops certain checks that should never be needed unless libfdt has an
+ * undiscovered bug.
+ *
+ * This can generally be considered safe to enable.
+ */
+ ASSUME_LIBFDT_FLAWLESS = 1 << 5,
+};
+
+/**
+ * can_assume_() - check if a particular assumption is enabled
+ *
+ * @mask: Mask to check (ASSUME_...)
+ * @return true if that assumption is enabled, else false
+ */
+static inline bool can_assume_(int mask)
+{
+ return FDT_ASSUME_MASK & mask;
+}
+
+/** helper macros for checking assumptions */
+#define can_assume(_assume) can_assume_(ASSUME_ ## _assume)
+
#endif /* LIBFDT_INTERNAL_H */
diff --git a/scripts/dtc/update-dtc-source.sh b/scripts/dtc/update-dtc-source.sh
index 7dd29a0362b8..bc704e2a6a4a 100755
--- a/scripts/dtc/update-dtc-source.sh
+++ b/scripts/dtc/update-dtc-source.sh
@@ -32,9 +32,9 @@ DTC_UPSTREAM_PATH=`pwd`/../dtc
DTC_LINUX_PATH=`pwd`/scripts/dtc
DTC_SOURCE="checks.c data.c dtc.c dtc.h flattree.c fstree.c livetree.c srcpos.c \
- srcpos.h treesource.c util.c util.h version_gen.h yamltree.c Makefile.dtc \
+ srcpos.h treesource.c util.c util.h version_gen.h yamltree.c \
dtc-lexer.l dtc-parser.y"
-LIBFDT_SOURCE="Makefile.libfdt fdt.c fdt.h fdt_addresses.c fdt_empty_tree.c \
+LIBFDT_SOURCE="fdt.c fdt.h fdt_addresses.c fdt_empty_tree.c \
fdt_overlay.c fdt_ro.c fdt_rw.c fdt_strerror.c fdt_sw.c \
fdt_wip.c libfdt.h libfdt_env.h libfdt_internal.h"
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index 6dba95d23207..61dd7112d6e4 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.5.0-gc40aeb60"
+#define DTC_VERSION "DTC 1.6.0-g87a656ae"
diff --git a/scripts/gcc-plugins/.gitignore b/scripts/gcc-plugins/.gitignore
index de92ed9e3d83..b04e0f0f033e 100644
--- a/scripts/gcc-plugins/.gitignore
+++ b/scripts/gcc-plugins/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
randomize_layout_seed.h
diff --git a/scripts/gdb/linux/.gitignore b/scripts/gdb/linux/.gitignore
index 2573543842d0..43234cbcb529 100644
--- a/scripts/gdb/linux/.gitignore
+++ b/scripts/gdb/linux/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.pyc
*.pyo
constants.py
diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore
index b119c7da2863..999af710f83d 100644
--- a/scripts/genksyms/.gitignore
+++ b/scripts/genksyms/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
genksyms
diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore
index b5bf92f66d11..12a67fdab541 100644
--- a/scripts/kconfig/.gitignore
+++ b/scripts/kconfig/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated files
-#
+# SPDX-License-Identifier: GPL-2.0-only
*.moc
*conf-cfg
diff --git a/scripts/mod/.gitignore b/scripts/mod/.gitignore
index 3bd11b603173..07e4a39f90a6 100644
--- a/scripts/mod/.gitignore
+++ b/scripts/mod/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
elfconfig.h
mk_elfconfig
modpost
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index d3c237b9b7c0..010be8ba2116 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -238,5 +238,8 @@ int main(void)
DEVID(wmi_device_id);
DEVID_FIELD(wmi_device_id, guid_string);
+ DEVID(mhi_device_id);
+ DEVID_FIELD(mhi_device_id, chan);
+
return 0;
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index f81cbe021a47..02d5d79da284 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1353,6 +1353,15 @@ static int do_wmi_entry(const char *filename, void *symval, char *alias)
return 1;
}
+/* Looks like: mhi:S */
+static int do_mhi_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD_ADDR(symval, mhi_device_id, chan);
+ sprintf(alias, MHI_DEVICE_MODALIAS_FMT, *chan);
+
+ return 1;
+}
+
/* Does namelen bytes of name exactly match the symbol? */
static bool sym_is(const char *name, unsigned namelen, const char *symbol)
{
@@ -1426,6 +1435,7 @@ static const struct devtable devtable[] = {
{"typec", SIZE_typec_device_id, do_typec_entry},
{"tee", SIZE_tee_client_device_id, do_tee_entry},
{"wmi", SIZE_wmi_device_id, do_wmi_entry},
+ {"mhi", SIZE_mhi_device_id, do_mhi_entry},
};
/* Create MODULE_ALIAS() statements.
diff --git a/scripts/selinux/genheaders/.gitignore b/scripts/selinux/genheaders/.gitignore
index 4c0b646ff8d5..5fcadd307908 100644
--- a/scripts/selinux/genheaders/.gitignore
+++ b/scripts/selinux/genheaders/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
genheaders
diff --git a/scripts/selinux/mdp/.gitignore b/scripts/selinux/mdp/.gitignore
index 654546d8dffd..a7482287e77f 100644
--- a/scripts/selinux/mdp/.gitignore
+++ b/scripts/selinux/mdp/.gitignore
@@ -1,2 +1,2 @@
-# Generated file
+# SPDX-License-Identifier: GPL-2.0-only
mdp
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index ffa838f3a2b5..d9cd24cf0d40 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -177,7 +177,9 @@ atomatically||automatically
atomicly||atomically
atempt||attempt
attachement||attachment
+attatch||attach
attched||attached
+attemp||attempt
attemps||attempts
attemping||attempting
attepmpt||attempt
@@ -235,11 +237,13 @@ brievely||briefly
brigde||bridge
broadcase||broadcast
broadcat||broadcast
+bufer||buffer
bufufer||buffer
cacluated||calculated
caculate||calculate
caculation||calculation
cadidate||candidate
+cahces||caches
calender||calendar
calescing||coalescing
calle||called
@@ -338,7 +342,6 @@ conditon||condition
condtion||condition
conected||connected
conector||connector
-connecetd||connected
configration||configuration
configuartion||configuration
configuation||configuration
@@ -349,7 +352,9 @@ configuretion||configuration
configutation||configuration
conider||consider
conjuction||conjunction
+connecetd||connected
connectinos||connections
+connetor||connector
connnection||connection
connnections||connections
consistancy||consistency
@@ -469,6 +474,7 @@ difinition||definition
digial||digital
dimention||dimension
dimesions||dimensions
+disgest||digest
dispalying||displaying
diplay||display
directon||direction
@@ -553,6 +559,7 @@ etsablishment||establishment
etsbalishment||establishment
excecutable||executable
exceded||exceeded
+exceeed||exceed
excellant||excellent
execeeded||exceeded
execeeds||exceeds
@@ -742,6 +749,7 @@ initialzing||initializing
initilization||initialization
initilize||initialize
initliaze||initialize
+initilized||initialized
inofficial||unofficial
inrerface||interface
insititute||institute
@@ -802,6 +810,7 @@ irrelevent||irrelevant
isnt||isn't
isssue||issue
issus||issues
+iteraions||iterations
iternations||iterations
itertation||iteration
itslef||itself
@@ -828,6 +837,7 @@ libary||library
librairies||libraries
libraris||libraries
licenceing||licencing
+limted||limited
logaritmic||logarithmic
loggging||logging
loggin||login
@@ -924,6 +934,7 @@ nerver||never
nescessary||necessary
nessessary||necessary
noticable||noticeable
+notication||notification
notications||notifications
notifcations||notifications
notifed||notified
@@ -1007,6 +1018,7 @@ pendantic||pedantic
peprocessor||preprocessor
perfoming||performing
perfomring||performing
+periperal||peripheral
peripherial||peripheral
permissons||permissions
peroid||period
@@ -1043,6 +1055,7 @@ prefferably||preferably
premption||preemption
prepaired||prepared
preperation||preparation
+preprare||prepare
pressre||pressure
primative||primitive
princliple||principle
@@ -1064,6 +1077,7 @@ processsed||processed
processsing||processing
procteted||protected
prodecure||procedure
+progamming||programming
progams||programs
progess||progress
programers||programmers
@@ -1151,12 +1165,14 @@ replys||replies
reponse||response
representaion||representation
reqeust||request
+reqister||register
requestied||requested
requiere||require
requirment||requirement
requred||required
requried||required
requst||request
+requsted||requested
reregisteration||reregistration
reseting||resetting
reseved||reserved
@@ -1227,10 +1243,12 @@ seqeuncer||sequencer
seqeuencer||sequencer
sequece||sequence
sequencial||sequential
+serivce||service
serveral||several
servive||service
setts||sets
settting||setting
+shapshot||snapshot
shotdown||shutdown
shoud||should
shouldnt||shouldn't
@@ -1328,6 +1346,7 @@ swithcing||switching
swithed||switched
swithing||switching
swtich||switch
+syfs||sysfs
symetric||symmetric
synax||syntax
synchonized||synchronized
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 85005d6b7f10..0968a3070eff 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -14,6 +14,8 @@ BEGIN {
printf("\n")
vernum = "[0-9]+([.]?[0-9]+)+"
+ libc = "libc[.]so[.][0-9]+$"
+ libcpp = "(libg|stdc)[+]+[.]so[.][0-9]+$"
printversion("GNU C", version("gcc -dumpversion"))
printversion("GNU Make", version("make --version"))
@@ -35,26 +37,14 @@ BEGIN {
printversion("Bison", version("bison --version"))
printversion("Flex", version("flex --version"))
- while (getline <"/proc/self/maps" > 0) {
- if (/libc.*\.so$/) {
- n = split($0, procmaps, "/")
- if (match(procmaps[n], vernum)) {
- ver = substr(procmaps[n], RSTART, RLENGTH)
- printversion("Linux C Library", ver)
- break
- }
- }
+ while ("ldconfig -p 2>/dev/null" | getline > 0) {
+ if ($NF ~ libc && !seen[ver = version("readlink " $NF)]++)
+ printversion("Linux C Library", ver)
+ else if ($NF ~ libcpp && !seen[ver = version("readlink " $NF)]++)
+ printversion("Linux C++ Library", ver)
}
printversion("Dynamic linker (ldd)", version("ldd --version"))
-
- while ("ldconfig -p 2>/dev/null" | getline > 0) {
- if (/(libg|stdc)[+]+\.so/) {
- libcpp = $NF
- break
- }
- }
- printversion("Linux C++ Library", version("readlink " libcpp))
printversion("Procps", version("ps --version"))
printversion("Net-tools", version("ifconfig --version"))
printversion("Kbd", version("loadkeys -V"))
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index d5b291e94264..6d1eb1c15c18 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -1,6 +1,4 @@
-#
-# Generated include files
-#
+# SPDX-License-Identifier: GPL-2.0-only
net_names.h
capability_names.h
rlim_names.h
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index ea1aae3d07b3..e9cbadade74b 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -6,8 +6,6 @@
* Dmitry Kasatkin <dmitry.kasatkin@intel.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index 55aec161d0e1..4e0d6778277e 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -6,8 +6,6 @@
* Dmitry Kasatkin <dmitry.kasatkin@intel.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/err.h>
#include <linux/ratelimit.h>
#include <linux/key-type.h>
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index d485f6fc908e..35682852ddea 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -10,8 +10,6 @@
* Using root's kernel master key (kmk), calculate the HMAC
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/export.h>
#include <linux/crypto.h>
#include <linux/xattr.h>
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index f9a81b187fae..d361d7fdafc4 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -11,8 +11,6 @@
* evm_inode_removexattr, and evm_verifyxattr
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/audit.h>
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index c11c1f7b3ddd..39ad1038d45d 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -10,8 +10,6 @@
* - Get the key and enable EVM
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/audit.h>
#include <linux/uaccess.h>
#include <linux/init.h>
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 3f3ee4e2eb0d..edde88dbe576 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -327,3 +327,10 @@ config IMA_QUEUE_EARLY_BOOT_KEYS
depends on IMA_MEASURE_ASYMMETRIC_KEYS
depends on SYSTEM_TRUSTED_KEYRING
default y
+
+config IMA_SECURE_AND_OR_TRUSTED_BOOT
+ bool
+ depends on IMA_ARCH_POLICY
+ help
+ This option is selected by architectures to enable secure and/or
+ trusted boot based on IMA runtime policies.
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 064a256f8725..67dabca670e2 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -11,6 +11,6 @@ ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
ima-$(CONFIG_IMA_APPRAISE_MODSIG) += ima_modsig.o
ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
-obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
-obj-$(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) += ima_asymmetric_keys.o
-obj-$(CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS) += ima_queue_keys.o
+ima-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
+ima-$(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) += ima_asymmetric_keys.o
+ima-$(CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS) += ima_queue_keys.o
diff --git a/security/integrity/ima/ima_asymmetric_keys.c b/security/integrity/ima/ima_asymmetric_keys.c
index 7678f0e3e84d..aaae80c4e376 100644
--- a/security/integrity/ima/ima_asymmetric_keys.c
+++ b/security/integrity/ima/ima_asymmetric_keys.c
@@ -9,8 +9,6 @@
* create or update.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <keys/asymmetric-type.h>
#include "ima.h"
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 7967a6904851..423c84f95a14 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -10,8 +10,6 @@
* Calculates md5/sha1 file hash, template hash, boot-aggreate hash
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/ratelimit.h>
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 2000e8df0301..a71e822a6e92 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -12,8 +12,6 @@
* current measurement list and IMA statistics
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <linux/init.h>
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 195cb4079b2b..567468188a61 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -11,8 +11,6 @@
* initialization and cleanup functions
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 9e94eca48b89..121de3e04af2 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -6,7 +6,6 @@
* Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
* Mimi Zohar <zohar@linux.vnet.ibm.com>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 9fe949c6a530..9d0abedeae77 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -15,8 +15,6 @@
* and ima_file_check.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/file.h>
#include <linux/binfmts.h>
@@ -757,6 +755,9 @@ void process_buffer_measurement(const void *buf, int size,
ima_free_template_entry(entry);
out:
+ if (ret < 0)
+ pr_devel("%s: failed, result: %d\n", __func__, ret);
+
return;
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 453427048999..c334e0dc6083 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -7,8 +7,6 @@
* - initialize default measure policy rules
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 1ce8b1701566..8753212ddb18 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -15,8 +15,6 @@
* ever removed or changed during the boot-cycle.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/rculist.h>
#include <linux/slab.h>
#include "ima.h"
diff --git a/security/integrity/ima/ima_queue_keys.c b/security/integrity/ima/ima_queue_keys.c
index c87c72299191..cb3e3f501593 100644
--- a/security/integrity/ima/ima_queue_keys.c
+++ b/security/integrity/ima/ima_queue_keys.c
@@ -8,8 +8,6 @@
* Enables deferred processing of keys
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/workqueue.h>
#include <keys/asymmetric-type.h>
#include "ima.h"
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 6aa6408603e3..062d9ad49afb 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -9,8 +9,6 @@
* Helpers to manage template descriptors.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/rculist.h>
#include "ima.h"
#include "ima_template_lib.h"
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 32ae05d88257..9cd1e50f3ccc 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -9,8 +9,6 @@
* Library of supported template fields.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "ima_template_lib.h"
static bool ima_template_hash_algo_allowed(u8 algo)
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 73fc286834d7..298b73794d8b 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -6,6 +6,12 @@
* Mimi Zohar <zohar@us.ibm.com>
*/
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/integrity.h>
#include <crypto/sha.h>
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 001abe530a0d..82008f900930 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -352,7 +352,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
* read the key data
* - the key's semaphore is read-locked
*/
-long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+long big_key_read(const struct key *key, char *buffer, size_t buflen)
{
size_t datalen = (size_t)key->payload.data[big_key_len];
long ret;
@@ -391,9 +391,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
ret = datalen;
- /* copy decrypted data to user */
- if (copy_to_user(buffer, buf->virt, datalen) != 0)
- ret = -EFAULT;
+ /* copy out decrypted data */
+ memcpy(buffer, buf->virt, datalen);
err_fput:
fput(file);
@@ -401,9 +400,7 @@ error:
big_key_free_buffer(buf);
} else {
ret = datalen;
- if (copy_to_user(buffer, key->payload.data[big_key_data],
- datalen) != 0)
- ret = -EFAULT;
+ memcpy(buffer, key->payload.data[big_key_data], datalen);
}
return ret;
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 60720f58cbe0..f6797ba44bf7 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -902,14 +902,14 @@ out:
}
/*
- * encrypted_read - format and copy the encrypted data to userspace
+ * encrypted_read - format and copy out the encrypted data
*
* The resulting datablob format is:
* <master-key name> <decrypted data length> <encrypted iv> <encrypted data>
*
* On success, return to userspace the encrypted key datablob size.
*/
-static long encrypted_read(const struct key *key, char __user *buffer,
+static long encrypted_read(const struct key *key, char *buffer,
size_t buflen)
{
struct encrypted_key_payload *epayload;
@@ -957,8 +957,7 @@ static long encrypted_read(const struct key *key, char __user *buffer,
key_put(mkey);
memzero_explicit(derived_key, sizeof(derived_key));
- if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
- ret = -EFAULT;
+ memcpy(buffer, ascii_buf, asciiblob_len);
kzfree(ascii_buf);
return asciiblob_len;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index ba3e2da14cef..6d0ca48ae9a5 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -16,6 +16,8 @@
#include <linux/keyctl.h>
#include <linux/refcount.h>
#include <linux/compat.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
struct iovec;
@@ -349,4 +351,14 @@ static inline void key_check(const struct key *key)
#endif
+/*
+ * Helper function to clear and free a kvmalloc'ed memory object.
+ */
+static inline void __kvzfree(const void *addr, size_t len)
+{
+ if (addr) {
+ memset((void *)addr, 0, len);
+ kvfree(addr);
+ }
+}
#endif /* _INTERNAL_H */
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index d1a3dea58dee..5e01192e222a 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -339,7 +339,7 @@ long keyctl_update_key(key_serial_t id,
payload = NULL;
if (plen) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL);
+ payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
goto error;
@@ -360,7 +360,7 @@ long keyctl_update_key(key_serial_t id,
key_ref_put(key_ref);
error2:
- kzfree(payload);
+ __kvzfree(payload, plen);
error:
return ret;
}
@@ -798,6 +798,21 @@ error:
}
/*
+ * Call the read method
+ */
+static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen)
+{
+ long ret;
+
+ down_read(&key->sem);
+ ret = key_validate(key);
+ if (ret == 0)
+ ret = key->type->read(key, buffer, buflen);
+ up_read(&key->sem);
+ return ret;
+}
+
+/*
* Read a key's payload.
*
* The key must either grant the caller Read permission, or it must grant the
@@ -812,26 +827,28 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
struct key *key;
key_ref_t key_ref;
long ret;
+ char *key_data = NULL;
+ size_t key_data_len;
/* find the key first */
key_ref = lookup_user_key(keyid, 0, 0);
if (IS_ERR(key_ref)) {
ret = -ENOKEY;
- goto error;
+ goto out;
}
key = key_ref_to_ptr(key_ref);
ret = key_read_state(key);
if (ret < 0)
- goto error2; /* Negatively instantiated */
+ goto key_put_out; /* Negatively instantiated */
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
- goto error2;
+ goto key_put_out;
/* we can't; see if it's searchable from this process's keyrings
* - we automatically take account of the fact that it may be
@@ -839,26 +856,78 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
*/
if (!is_key_possessed(key_ref)) {
ret = -EACCES;
- goto error2;
+ goto key_put_out;
}
/* the key is probably readable - now try to read it */
can_read_key:
- ret = -EOPNOTSUPP;
- if (key->type->read) {
- /* Read the data with the semaphore held (since we might sleep)
- * to protect against the key being updated or revoked.
+ if (!key->type->read) {
+ ret = -EOPNOTSUPP;
+ goto key_put_out;
+ }
+
+ if (!buffer || !buflen) {
+ /* Get the key length from the read method */
+ ret = __keyctl_read_key(key, NULL, 0);
+ goto key_put_out;
+ }
+
+ /*
+ * Read the data with the semaphore held (since we might sleep)
+ * to protect against the key being updated or revoked.
+ *
+ * Allocating a temporary buffer to hold the keys before
+ * transferring them to user buffer to avoid potential
+ * deadlock involving page fault and mmap_sem.
+ *
+ * key_data_len = (buflen <= PAGE_SIZE)
+ * ? buflen : actual length of key data
+ *
+ * This prevents allocating arbitrary large buffer which can
+ * be much larger than the actual key length. In the latter case,
+ * at least 2 passes of this loop is required.
+ */
+ key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0;
+ for (;;) {
+ if (key_data_len) {
+ key_data = kvmalloc(key_data_len, GFP_KERNEL);
+ if (!key_data) {
+ ret = -ENOMEM;
+ goto key_put_out;
+ }
+ }
+
+ ret = __keyctl_read_key(key, key_data, key_data_len);
+
+ /*
+ * Read methods will just return the required length without
+ * any copying if the provided length isn't large enough.
+ */
+ if (ret <= 0 || ret > buflen)
+ break;
+
+ /*
+ * The key may change (unlikely) in between 2 consecutive
+ * __keyctl_read_key() calls. In this case, we reallocate
+ * a larger buffer and redo the key read when
+ * key_data_len < ret <= buflen.
*/
- down_read(&key->sem);
- ret = key_validate(key);
- if (ret == 0)
- ret = key->type->read(key, buffer, buflen);
- up_read(&key->sem);
+ if (ret > key_data_len) {
+ if (unlikely(key_data))
+ __kvzfree(key_data, key_data_len);
+ key_data_len = ret;
+ continue; /* Allocate buffer */
+ }
+
+ if (copy_to_user(buffer, key_data, ret))
+ ret = -EFAULT;
+ break;
}
+ __kvzfree(key_data, key_data_len);
-error2:
+key_put_out:
key_put(key);
-error:
+out:
return ret;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index febf36c6ddc5..5ca620d31cd3 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -459,7 +459,6 @@ static int keyring_read_iterator(const void *object, void *data)
{
struct keyring_read_iterator_context *ctx = data;
const struct key *key = keyring_ptr_to_key(object);
- int ret;
kenter("{%s,%d},,{%zu/%zu}",
key->type->name, key->serial, ctx->count, ctx->buflen);
@@ -467,10 +466,7 @@ static int keyring_read_iterator(const void *object, void *data)
if (ctx->count >= ctx->buflen)
return 1;
- ret = put_user(key->serial, ctx->buffer);
- if (ret < 0)
- return ret;
- ctx->buffer++;
+ *ctx->buffer++ = key->serial;
ctx->count += sizeof(key->serial);
return 0;
}
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index ecba39c93fd9..41e9735006d0 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -22,7 +22,7 @@ static int request_key_auth_instantiate(struct key *,
static void request_key_auth_describe(const struct key *, struct seq_file *);
static void request_key_auth_revoke(struct key *);
static void request_key_auth_destroy(struct key *);
-static long request_key_auth_read(const struct key *, char __user *, size_t);
+static long request_key_auth_read(const struct key *, char *, size_t);
/*
* The request-key authorisation key type definition.
@@ -80,7 +80,7 @@ static void request_key_auth_describe(const struct key *key,
* - the key's semaphore is read-locked
*/
static long request_key_auth_read(const struct key *key,
- char __user *buffer, size_t buflen)
+ char *buffer, size_t buflen)
{
struct request_key_auth *rka = dereference_key_locked(key);
size_t datalen;
@@ -97,8 +97,7 @@ static long request_key_auth_read(const struct key *key,
if (buflen > datalen)
buflen = datalen;
- if (copy_to_user(buffer, rka->callout_info, buflen) != 0)
- ret = -EFAULT;
+ memcpy(buffer, rka->callout_info, buflen);
}
return ret;
diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
index d2c5ec1e040b..8001ab07e63b 100644
--- a/security/keys/trusted-keys/trusted_tpm1.c
+++ b/security/keys/trusted-keys/trusted_tpm1.c
@@ -1130,11 +1130,10 @@ out:
* trusted_read - copy the sealed blob data to userspace in hex.
* On success, return to userspace the trusted key datablob size.
*/
-static long trusted_read(const struct key *key, char __user *buffer,
+static long trusted_read(const struct key *key, char *buffer,
size_t buflen)
{
const struct trusted_key_payload *p;
- char *ascii_buf;
char *bufp;
int i;
@@ -1143,18 +1142,9 @@ static long trusted_read(const struct key *key, char __user *buffer,
return -EINVAL;
if (buffer && buflen >= 2 * p->blob_len) {
- ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL);
- if (!ascii_buf)
- return -ENOMEM;
-
- bufp = ascii_buf;
+ bufp = buffer;
for (i = 0; i < p->blob_len; i++)
bufp = hex_byte_pack(bufp, p->blob[i]);
- if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
- kzfree(ascii_buf);
- return -EFAULT;
- }
- kzfree(ascii_buf);
}
return 2 * p->blob_len;
}
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 6f12de4ce549..07d4287e9084 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(user_describe);
* read the key data
* - the key's semaphore is read-locked
*/
-long user_read(const struct key *key, char __user *buffer, size_t buflen)
+long user_read(const struct key *key, char *buffer, size_t buflen)
{
const struct user_key_payload *upayload;
long ret;
@@ -181,8 +181,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen)
if (buflen > upayload->datalen)
buflen = upayload->datalen;
- if (copy_to_user(buffer, upayload->data, buflen) != 0)
- ret = -EFAULT;
+ memcpy(buffer, upayload->data, buflen);
}
return ret;
diff --git a/security/selinux/.gitignore b/security/selinux/.gitignore
index 2e5040a3d48b..168fae13ca5a 100644
--- a/security/selinux/.gitignore
+++ b/security/selinux/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
av_permissions.h
flask.h
diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore
index dc0f220a210b..9f300cdce362 100644
--- a/security/tomoyo/.gitignore
+++ b/security/tomoyo/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
builtin-policy.h
policy/*.conf
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index a86c95d89824..e81083e1bc68 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -38,7 +38,7 @@ int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct dma_slave_config config;
int ret;
- dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dma_params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if (!dma_params)
return 0;
@@ -47,7 +47,7 @@ int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
return ret;
snd_dmaengine_pcm_set_config_from_dai_data(substream,
- snd_soc_dai_get_dma_data(rtd->cpu_dai, substream),
+ snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream),
&config);
ret = dmaengine_slave_config(chan, &config);
@@ -95,7 +95,7 @@ int pxa2xx_pcm_open(struct snd_pcm_substream *substream)
runtime->hw = pxa2xx_pcm_hardware;
- dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dma_params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if (!dma_params)
return 0;
@@ -120,7 +120,7 @@ int pxa2xx_pcm_open(struct snd_pcm_substream *substream)
return ret;
return snd_dmaengine_pcm_open(
- substream, dma_request_slave_channel(rtd->cpu_dai->dev,
+ substream, dma_request_slave_channel(asoc_rtd_to_cpu(rtd, 0)->dev,
dma_params->chan_name));
}
EXPORT_SYMBOL(pxa2xx_pcm_open);
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 9de1c9a0173e..509290f2efa8 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -488,6 +488,48 @@ out:
}
#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
+int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
+{
+ struct snd_dma_buffer *dmab;
+ int ret;
+
+ if (snd_BUG_ON(!(stream) || !(stream)->runtime))
+ return -EINVAL;
+ dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
+ if (!dmab)
+ return -ENOMEM;
+ dmab->dev = stream->dma_buffer.dev;
+ ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
+ if (ret < 0) {
+ kfree(dmab);
+ return ret;
+ }
+
+ snd_compr_set_runtime_buffer(stream, dmab);
+ stream->runtime->dma_bytes = size;
+ return 1;
+}
+EXPORT_SYMBOL(snd_compr_malloc_pages);
+
+int snd_compr_free_pages(struct snd_compr_stream *stream)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+
+ if (snd_BUG_ON(!(stream) || !(stream)->runtime))
+ return -EINVAL;
+ if (runtime->dma_area == NULL)
+ return 0;
+ if (runtime->dma_buffer_p != &stream->dma_buffer) {
+ /* It's a newly allocated buffer. Release it now. */
+ snd_dma_free_pages(runtime->dma_buffer_p);
+ kfree(runtime->dma_buffer_p);
+ }
+
+ snd_compr_set_runtime_buffer(stream, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(snd_compr_free_pages);
+
/* revisit this with snd_pcm_preallocate_xxx */
static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
struct snd_compr_params *params)
diff --git a/sound/core/device.c b/sound/core/device.c
index cdc5af526739..bf0b04a7ee79 100644
--- a/sound/core/device.c
+++ b/sound/core/device.c
@@ -237,3 +237,24 @@ void snd_device_free_all(struct snd_card *card)
list_for_each_entry_safe_reverse(dev, next, &card->devices, list)
__snd_device_free(dev);
}
+
+/**
+ * snd_device_get_state - Get the current state of the given device
+ * @card: the card instance
+ * @device_data: the data pointer to release
+ *
+ * Returns the current state of the given device object. For the valid
+ * device, either @SNDRV_DEV_BUILD, @SNDRV_DEV_REGISTERED or
+ * @SNDRV_DEV_DISCONNECTED is returned.
+ * Or for a non-existing device, -1 is returned as an error.
+ */
+int snd_device_get_state(struct snd_card *card, void *device_data)
+{
+ struct snd_device *dev;
+
+ dev = look_for_dev(card, device_data);
+ if (dev)
+ return dev->state;
+ return -1;
+}
+EXPORT_SYMBOL_GPL(snd_device_get_state);
diff --git a/sound/core/info.c b/sound/core/info.c
index ca87ae4c30ba..8c6bc5241df5 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -604,7 +604,7 @@ int snd_info_card_free(struct snd_card *card)
*/
int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
{
- int c = -1;
+ int c;
if (snd_BUG_ON(!buffer || !buffer->buffer))
return 1;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 13db77771f0f..930def8201f4 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -884,20 +884,17 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
sformat = snd_pcm_plug_slave_format(format, sformat_mask);
if ((__force int)sformat < 0 ||
- !snd_mask_test(sformat_mask, (__force int)sformat)) {
- for (sformat = (__force snd_pcm_format_t)0;
- (__force int)sformat <= (__force int)SNDRV_PCM_FORMAT_LAST;
- sformat = (__force snd_pcm_format_t)((__force int)sformat + 1)) {
- if (snd_mask_test(sformat_mask, (__force int)sformat) &&
+ !snd_mask_test_format(sformat_mask, sformat)) {
+ pcm_for_each_format(sformat) {
+ if (snd_mask_test_format(sformat_mask, sformat) &&
snd_pcm_oss_format_to(sformat) >= 0)
- break;
- }
- if ((__force int)sformat > (__force int)SNDRV_PCM_FORMAT_LAST) {
- pcm_dbg(substream->pcm, "Cannot find a format!!!\n");
- err = -EINVAL;
- goto failure;
+ goto format_found;
}
+ pcm_dbg(substream->pcm, "Cannot find a format!!!\n");
+ err = -EINVAL;
+ goto failure;
}
+ format_found:
err = _snd_pcm_hw_param_set(sparams, SNDRV_PCM_HW_PARAM_FORMAT, (__force int)sformat, 0);
if (err < 0)
goto failure;
@@ -1220,8 +1217,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
if (ret < 0)
break;
}
+ mutex_unlock(&runtime->oss.params_lock);
ret = __snd_pcm_lib_xfer(substream, (void *)ptr, true,
frames, in_kernel);
+ mutex_lock(&runtime->oss.params_lock);
if (ret != -EPIPE && ret != -ESTRPIPE)
break;
/* test, if we can't store new data, because the stream */
@@ -1257,8 +1256,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
ret = snd_pcm_oss_capture_position_fixup(substream, &delay);
if (ret < 0)
break;
+ mutex_unlock(&runtime->oss.params_lock);
ret = __snd_pcm_lib_xfer(substream, (void *)ptr, true,
frames, in_kernel);
+ mutex_lock(&runtime->oss.params_lock);
if (ret == -EPIPE) {
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 752d078908e9..fbda4ebf38b3 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -196,82 +196,74 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
return 0;
}
-snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames)
+static snd_pcm_sframes_t calc_dst_frames(struct snd_pcm_substream *plug,
+ snd_pcm_sframes_t frames)
{
- struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
- int stream;
+ struct snd_pcm_plugin *plugin, *plugin_next;
- if (snd_BUG_ON(!plug))
- return -ENXIO;
- if (drv_frames == 0)
- return 0;
- stream = snd_pcm_plug_stream(plug);
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- plugin = snd_pcm_plug_last(plug);
- while (plugin && drv_frames > 0) {
- if (drv_frames > plugin->buf_frames)
- drv_frames = plugin->buf_frames;
- plugin_prev = plugin->prev;
- if (plugin->src_frames)
- drv_frames = plugin->src_frames(plugin, drv_frames);
- plugin = plugin_prev;
+ plugin = snd_pcm_plug_first(plug);
+ while (plugin && frames > 0) {
+ plugin_next = plugin->next;
+ if (plugin->dst_frames) {
+ frames = plugin->dst_frames(plugin, frames);
+ if (frames < 0)
+ return frames;
}
- } else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
- plugin = snd_pcm_plug_first(plug);
- while (plugin && drv_frames > 0) {
- plugin_next = plugin->next;
- if (plugin->dst_frames)
- drv_frames = plugin->dst_frames(plugin, drv_frames);
- if (drv_frames > plugin->buf_frames)
- drv_frames = plugin->buf_frames;
- plugin = plugin_next;
+ if (frames > plugin->buf_frames)
+ frames = plugin->buf_frames;
+ plugin = plugin_next;
+ }
+ return frames;
+}
+
+static snd_pcm_sframes_t calc_src_frames(struct snd_pcm_substream *plug,
+ snd_pcm_sframes_t frames)
+{
+ struct snd_pcm_plugin *plugin, *plugin_prev;
+
+ plugin = snd_pcm_plug_last(plug);
+ while (plugin && frames > 0) {
+ if (frames > plugin->buf_frames)
+ frames = plugin->buf_frames;
+ plugin_prev = plugin->prev;
+ if (plugin->src_frames) {
+ frames = plugin->src_frames(plugin, frames);
+ if (frames < 0)
+ return frames;
}
- } else
+ plugin = plugin_prev;
+ }
+ return frames;
+}
+
+snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames)
+{
+ if (snd_BUG_ON(!plug))
+ return -ENXIO;
+ switch (snd_pcm_plug_stream(plug)) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ return calc_src_frames(plug, drv_frames);
+ case SNDRV_PCM_STREAM_CAPTURE:
+ return calc_dst_frames(plug, drv_frames);
+ default:
snd_BUG();
- return drv_frames;
+ return -EINVAL;
+ }
}
snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames)
{
- struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
- snd_pcm_sframes_t frames;
- int stream;
-
if (snd_BUG_ON(!plug))
return -ENXIO;
- if (clt_frames == 0)
- return 0;
- frames = clt_frames;
- stream = snd_pcm_plug_stream(plug);
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- plugin = snd_pcm_plug_first(plug);
- while (plugin && frames > 0) {
- plugin_next = plugin->next;
- if (plugin->dst_frames) {
- frames = plugin->dst_frames(plugin, frames);
- if (frames < 0)
- return frames;
- }
- if (frames > plugin->buf_frames)
- frames = plugin->buf_frames;
- plugin = plugin_next;
- }
- } else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
- plugin = snd_pcm_plug_last(plug);
- while (plugin) {
- if (frames > plugin->buf_frames)
- frames = plugin->buf_frames;
- plugin_prev = plugin->prev;
- if (plugin->src_frames) {
- frames = plugin->src_frames(plugin, frames);
- if (frames < 0)
- return frames;
- }
- plugin = plugin_prev;
- }
- } else
+ switch (snd_pcm_plug_stream(plug)) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ return calc_dst_frames(plug, clt_frames);
+ case SNDRV_PCM_STREAM_CAPTURE:
+ return calc_src_frames(plug, clt_frames);
+ default:
snd_BUG();
- return frames;
+ return -EINVAL;
+ }
}
static int snd_pcm_plug_formats(const struct snd_mask *mask,
diff --git a/sound/core/oss/rate.c b/sound/core/oss/rate.c
index 7cd09cef6961..d381f4c967c9 100644
--- a/sound/core/oss/rate.c
+++ b/sound/core/oss/rate.c
@@ -47,7 +47,7 @@ struct rate_priv {
unsigned int pos;
rate_f func;
snd_pcm_sframes_t old_src_frames, old_dst_frames;
- struct rate_channel channels[0];
+ struct rate_channel channels[];
};
static void rate_init(struct snd_pcm_plugin *plugin)
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index a141a301369f..b6d2331a82f7 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -1019,7 +1019,7 @@ static ssize_t show_pcm_class(struct device *dev,
str = "none";
else
str = strs[pcm->dev_class];
- return snprintf(buf, PAGE_SIZE, "%s\n", str);
+ return sprintf(buf, "%s\n", str);
}
static DEVICE_ATTR(pcm_class, 0444, show_pcm_class, NULL);
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 5749a8a49784..4d059ff2b2e4 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -240,6 +240,7 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
{
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct dma_tx_state state;
enum dma_status status;
unsigned int buf_size;
@@ -250,9 +251,12 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
buf_size = snd_pcm_lib_buffer_bytes(substream);
if (state.residue > 0 && state.residue <= buf_size)
pos = buf_size - state.residue;
+
+ runtime->delay = bytes_to_frames(runtime,
+ state.in_flight_bytes);
}
- return bytes_to_frames(substream->runtime, pos);
+ return bytes_to_frames(runtime, pos);
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
@@ -426,7 +430,7 @@ int snd_dmaengine_pcm_refine_runtime_hwparams(
* default assumption is that it supports 1, 2 and 4 bytes
* widths.
*/
- for (i = SNDRV_PCM_FORMAT_FIRST; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+ pcm_for_each_format(i) {
int bits = snd_pcm_format_physical_width(i);
/*
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index a6a541511534..257d412eac5d 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -42,6 +42,11 @@ struct pcm_format_data {
/* we do lots of calculations on snd_pcm_format_t; shut up sparse */
#define INT __force int
+static bool valid_format(snd_pcm_format_t format)
+{
+ return (INT)format >= 0 && (INT)format <= (INT)SNDRV_PCM_FORMAT_LAST;
+}
+
static const struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
[SNDRV_PCM_FORMAT_S8] = {
.width = 8, .phys = 8, .le = -1, .signd = 1,
@@ -259,7 +264,7 @@ static const struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] =
int snd_pcm_format_signed(snd_pcm_format_t format)
{
int val;
- if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
+ if (!valid_format(format))
return -EINVAL;
if ((val = pcm_formats[(INT)format].signd) < 0)
return -EINVAL;
@@ -307,7 +312,7 @@ EXPORT_SYMBOL(snd_pcm_format_linear);
int snd_pcm_format_little_endian(snd_pcm_format_t format)
{
int val;
- if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
+ if (!valid_format(format))
return -EINVAL;
if ((val = pcm_formats[(INT)format].le) < 0)
return -EINVAL;
@@ -343,7 +348,7 @@ EXPORT_SYMBOL(snd_pcm_format_big_endian);
int snd_pcm_format_width(snd_pcm_format_t format)
{
int val;
- if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
+ if (!valid_format(format))
return -EINVAL;
if ((val = pcm_formats[(INT)format].width) == 0)
return -EINVAL;
@@ -361,7 +366,7 @@ EXPORT_SYMBOL(snd_pcm_format_width);
int snd_pcm_format_physical_width(snd_pcm_format_t format)
{
int val;
- if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
+ if (!valid_format(format))
return -EINVAL;
if ((val = pcm_formats[(INT)format].phys) == 0)
return -EINVAL;
@@ -394,7 +399,7 @@ EXPORT_SYMBOL(snd_pcm_format_size);
*/
const unsigned char *snd_pcm_format_silence_64(snd_pcm_format_t format)
{
- if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
+ if (!valid_format(format))
return NULL;
if (! pcm_formats[(INT)format].phys)
return NULL;
@@ -418,7 +423,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
unsigned char *dst;
const unsigned char *pat;
- if ((INT)format < 0 || (INT)format > (INT)SNDRV_PCM_FORMAT_LAST)
+ if (!valid_format(format))
return -EINVAL;
if (samples == 0)
return 0;
@@ -474,32 +479,32 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
EXPORT_SYMBOL(snd_pcm_format_set_silence);
/**
- * snd_pcm_limit_hw_rates - determine rate_min/rate_max fields
- * @runtime: the runtime instance
+ * snd_pcm_hw_limit_rates - determine rate_min/rate_max fields
+ * @hw: the pcm hw instance
*
* Determines the rate_min and rate_max fields from the rates bits of
- * the given runtime->hw.
+ * the given hw.
*
* Return: Zero if successful.
*/
-int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime)
+int snd_pcm_hw_limit_rates(struct snd_pcm_hardware *hw)
{
int i;
for (i = 0; i < (int)snd_pcm_known_rates.count; i++) {
- if (runtime->hw.rates & (1 << i)) {
- runtime->hw.rate_min = snd_pcm_known_rates.list[i];
+ if (hw->rates & (1 << i)) {
+ hw->rate_min = snd_pcm_known_rates.list[i];
break;
}
}
for (i = (int)snd_pcm_known_rates.count - 1; i >= 0; i--) {
- if (runtime->hw.rates & (1 << i)) {
- runtime->hw.rate_max = snd_pcm_known_rates.list[i];
+ if (hw->rates & (1 << i)) {
+ hw->rate_max = snd_pcm_known_rates.list[i];
break;
}
}
return 0;
}
-EXPORT_SYMBOL(snd_pcm_limit_hw_rates);
+EXPORT_SYMBOL(snd_pcm_hw_limit_rates);
/**
* snd_pcm_rate_to_rate_bit - converts sample rate to SNDRV_PCM_RATE_xxx bit
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index cbdf061667fa..aef860256278 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -228,6 +228,9 @@ int snd_pcm_info_user(struct snd_pcm_substream *substream,
return err;
}
+/* macro for simplified cast */
+#define PARAM_MASK_BIT(b) (1U << (__force int)(b))
+
static bool hw_support_mmap(struct snd_pcm_substream *substream)
{
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
@@ -257,7 +260,7 @@ static int constrain_mask_params(struct snd_pcm_substream *substream,
return -EINVAL;
/* This parameter is not requested to change by a caller. */
- if (!(params->rmask & (1 << k)))
+ if (!(params->rmask & PARAM_MASK_BIT(k)))
continue;
if (trace_hw_mask_param_enabled())
@@ -271,7 +274,7 @@ static int constrain_mask_params(struct snd_pcm_substream *substream,
/* Set corresponding flag so that the caller gets it. */
trace_hw_mask_param(substream, k, 0, &old_mask, m);
- params->cmask |= 1 << k;
+ params->cmask |= PARAM_MASK_BIT(k);
}
return 0;
@@ -293,7 +296,7 @@ static int constrain_interval_params(struct snd_pcm_substream *substream,
return -EINVAL;
/* This parameter is not requested to change by a caller. */
- if (!(params->rmask & (1 << k)))
+ if (!(params->rmask & PARAM_MASK_BIT(k)))
continue;
if (trace_hw_interval_param_enabled())
@@ -307,7 +310,7 @@ static int constrain_interval_params(struct snd_pcm_substream *substream,
/* Set corresponding flag so that the caller gets it. */
trace_hw_interval_param(substream, k, 0, &old_interval, i);
- params->cmask |= 1 << k;
+ params->cmask |= PARAM_MASK_BIT(k);
}
return 0;
@@ -349,7 +352,7 @@ static int constrain_params_by_rules(struct snd_pcm_substream *substream,
* have 0 so that the parameters are never changed anymore.
*/
for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
- vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
+ vstamps[k] = (params->rmask & PARAM_MASK_BIT(k)) ? 1 : 0;
/* Due to the above design, actual sequence number starts at 2. */
stamp = 2;
@@ -417,7 +420,7 @@ retry:
hw_param_interval(params, r->var));
}
- params->cmask |= (1 << r->var);
+ params->cmask |= PARAM_MASK_BIT(r->var);
vstamps[r->var] = stamp;
again = true;
}
@@ -486,9 +489,9 @@ int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
params->info = 0;
params->fifo_size = 0;
- if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
+ if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
params->msbits = 0;
- if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
+ if (params->rmask & PARAM_MASK_BIT(SNDRV_PCM_HW_PARAM_RATE)) {
params->rate_num = 0;
params->rate_den = 0;
}
@@ -2293,21 +2296,21 @@ static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
- unsigned int k;
+ snd_pcm_format_t k;
const struct snd_interval *i =
hw_param_interval_c(params, rule->deps[0]);
struct snd_mask m;
struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
snd_mask_any(&m);
- for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
+ pcm_for_each_format(k) {
int bits;
- if (! snd_mask_test(mask, k))
+ if (!snd_mask_test_format(mask, k))
continue;
bits = snd_pcm_format_physical_width(k);
if (bits <= 0)
continue; /* ignore invalid formats */
if ((unsigned)bits < i->min || (unsigned)bits > i->max)
- snd_mask_reset(&m, k);
+ snd_mask_reset(&m, (__force unsigned)k);
}
return snd_mask_refine(mask, &m);
}
@@ -2316,14 +2319,15 @@ static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
- unsigned int k;
+ snd_pcm_format_t k;
+
t.min = UINT_MAX;
t.max = 0;
t.openmin = 0;
t.openmax = 0;
- for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
+ pcm_for_each_format(k) {
int bits;
- if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
+ if (!snd_mask_test_format(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
continue;
bits = snd_pcm_format_physical_width(k);
if (bits <= 0)
@@ -2505,16 +2509,16 @@ static int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
unsigned int mask = 0;
if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
- mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_INTERLEAVED);
if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
- mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_RW_NONINTERLEAVED);
if (hw_support_mmap(substream)) {
if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
- mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);
if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
- mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED);
if (hw->info & SNDRV_PCM_INFO_COMPLEX)
- mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
+ mask |= PARAM_MASK_BIT(SNDRV_PCM_ACCESS_MMAP_COMPLEX);
}
err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
if (err < 0)
@@ -2524,7 +2528,8 @@ static int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
if (err < 0)
return err;
- err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
+ err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT,
+ PARAM_MASK_BIT(SNDRV_PCM_SUBFORMAT_STD));
if (err < 0)
return err;
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index d78a27271d6d..251eaf1152e2 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -118,7 +118,7 @@ struct loopback_cable {
struct loopback_setup {
unsigned int notify: 1;
unsigned int rate_shift;
- unsigned int format;
+ snd_pcm_format_t format;
unsigned int rate;
unsigned int channels;
struct snd_ctl_elem_id active_id;
@@ -1432,7 +1432,7 @@ static int loopback_format_info(struct snd_kcontrol *kcontrol,
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
- uinfo->value.integer.max = SNDRV_PCM_FORMAT_LAST;
+ uinfo->value.integer.max = (__force int)SNDRV_PCM_FORMAT_LAST;
uinfo->value.integer.step = 1;
return 0;
}
@@ -1443,7 +1443,7 @@ static int loopback_format_get(struct snd_kcontrol *kcontrol,
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] =
- loopback->setup[kcontrol->id.subdevice]
+ (__force int)loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].format;
return 0;
}
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 02ac3f4e0c02..b5486de08b97 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -901,10 +901,10 @@ static int snd_card_dummy_new_mixer(struct snd_dummy *dummy)
static void print_formats(struct snd_dummy *dummy,
struct snd_info_buffer *buffer)
{
- int i;
+ snd_pcm_format_t i;
- for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
- if (dummy->pcm_hw.formats & (1ULL << i))
+ pcm_for_each_format(i) {
+ if (dummy->pcm_hw.formats & pcm_format_to_bits(i))
snd_iprintf(buffer, " %s", snd_pcm_format_name(i));
}
}
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 976d8cb9a34f..2c8e3392a490 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -509,7 +509,7 @@ MODULE_DEVICE_TABLE(ieee1394, bebob_id_table);
static struct fw_driver bebob_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "snd-bebob",
+ .name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = bebob_probe,
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f5fc0e7c024..c84b913a9fe0 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -192,7 +192,7 @@ MODULE_DEVICE_TABLE(ieee1394, snd_dg00x_id_table);
static struct fw_driver dg00x_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "snd-firewire-digi00x",
+ .name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = snd_dg00x_probe,
diff --git a/sound/firewire/fireface/ff.c b/sound/firewire/fireface/ff.c
index f5a016560eb8..b62a4fd22407 100644
--- a/sound/firewire/fireface/ff.c
+++ b/sound/firewire/fireface/ff.c
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(ieee1394, snd_ff_id_table);
static struct fw_driver ff_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "snd-fireface",
+ .name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = snd_ff_probe,
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 134fc9ee26b9..b1cc013a3540 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -362,7 +362,7 @@ MODULE_DEVICE_TABLE(ieee1394, efw_id_table);
static struct fw_driver efw_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "snd-fireworks",
+ .name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = efw_probe,
diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
index c29a97f6f638..6f38335fe10b 100644
--- a/sound/firewire/tascam/tascam-hwdep.c
+++ b/sound/firewire/tascam/tascam-hwdep.c
@@ -17,6 +17,7 @@
static long tscm_hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
long count, loff_t *offset)
+ __releases(&tscm->lock)
{
struct snd_firewire_event_lock_status event = {
.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
@@ -36,6 +37,7 @@ static long tscm_hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
static long tscm_hwdep_read_queue(struct snd_tscm *tscm, char __user *buf,
long remained, loff_t *offset)
+ __releases(&tscm->lock)
{
char __user *pos = buf;
unsigned int type = SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL;
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index addc464503bc..5dac0d9fc58e 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(ieee1394, snd_tscm_id_table);
static struct fw_driver tscm_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "snd-firewire-tascam",
+ .name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = snd_tscm_probe,
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index 9a526aeef8da..e3119f5cb0d5 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -204,7 +204,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_set_chip_name);
*/
int snd_hdac_codec_modalias(struct hdac_device *codec, char *buf, size_t size)
{
- return snprintf(buf, size, "hdaudio:v%08Xr%08Xa%02X\n",
+ return scnprintf(buf, size, "hdaudio:v%08Xr%08Xa%02X\n",
codec->vendor_id, codec->revision_id, codec->type);
}
EXPORT_SYMBOL_GPL(snd_hdac_codec_modalias);
diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c
index e377ac93f37f..8e8257c574b0 100644
--- a/sound/isa/sb/emu8000_pcm.c
+++ b/sound/isa/sb/emu8000_pcm.c
@@ -435,7 +435,7 @@ enum {
#define LOOP_WRITE(rec, offset, _buf, count, mode) \
do { \
struct snd_emu8000 *emu = (rec)->emu; \
- unsigned short *buf = (unsigned short *)(_buf); \
+ unsigned short *buf = (__force unsigned short *)(_buf); \
snd_emu8000_write_wait(emu, 1); \
EMU8000_SMALW_WRITE(emu, offset); \
while (count > 0) { \
@@ -492,7 +492,7 @@ static int emu8k_pcm_silence(struct snd_pcm_substream *subs,
#define LOOP_WRITE(rec, pos, _buf, count, mode) \
do { \
struct snd_emu8000 *emu = rec->emu; \
- unsigned short *buf = (unsigned short *)(_buf); \
+ unsigned short *buf = (__force unsigned short *)(_buf); \
snd_emu8000_write_wait(emu, 1); \
EMU8000_SMALW_WRITE(emu, pos + rec->loop_start[0]); \
if (rec->voices > 1) \
diff --git a/sound/oss/.gitignore b/sound/oss/.gitignore
index 12a3920d6fb6..ac678430408b 100644
--- a/sound/oss/.gitignore
+++ b/sound/oss/.gitignore
@@ -1,3 +1,3 @@
-#Ignore generated files
+# SPDX-License-Identifier: GPL-2.0-only
pss_boot.h
trix_boot.h
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 4f524a9dbbca..4462375d2d82 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1070,7 +1070,7 @@ static int snd_ali_trigger(struct snd_pcm_substream *substream,
{
struct snd_ali *codec = snd_pcm_substream_chip(substream);
struct snd_pcm_substream *s;
- unsigned int what, whati, capture_flag;
+ unsigned int what, whati;
struct snd_ali_voice *pvoice, *evoice;
unsigned int val;
int do_start;
@@ -1088,7 +1088,7 @@ static int snd_ali_trigger(struct snd_pcm_substream *substream,
return -EINVAL;
}
- what = whati = capture_flag = 0;
+ what = whati = 0;
snd_pcm_group_for_each_entry(s, substream) {
if ((struct snd_ali *) snd_pcm_substream_chip(s) == codec) {
pvoice = s->runtime->private_data;
@@ -1110,8 +1110,6 @@ static int snd_ali_trigger(struct snd_pcm_substream *substream,
evoice->running = 0;
}
snd_pcm_trigger_done(s, substream);
- if (pvoice->mode)
- capture_flag = 1;
}
}
spin_lock(&codec->reg_lock);
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index a89a7e603ca8..6ff581733a19 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1789,6 +1789,7 @@ int snd_emu10k1_create(struct snd_card *card,
int idx, err;
int is_audigy;
size_t page_table_size;
+ __le32 *pgtbl;
unsigned int silent_page;
const struct snd_emu_chip_details *c;
static const struct snd_device_ops ops = {
@@ -2009,8 +2010,9 @@ int snd_emu10k1_create(struct snd_card *card,
/* Clear silent pages and set up pointers */
memset(emu->silent_page.area, 0, emu->silent_page.bytes);
silent_page = emu->silent_page.addr << emu->address_mode;
+ pgtbl = (__le32 *)emu->ptb_pages.area;
for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
- ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
+ pgtbl[idx] = cpu_to_le32(silent_page | idx);
/* set up voice indices */
for (idx = 0; idx < NUM_G; idx++) {
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index bd48335d09d7..e1d3082a4fe9 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -184,6 +184,7 @@ comment "Set to Y if you want auto-loading the codec driver"
config SND_HDA_CODEC_CA0132_DSP
bool "Support new DSP code for CA0132 codec"
depends on SND_HDA_CODEC_CA0132
+ default y
select SND_HDA_DSP_LOADER
select FW_LOADER
help
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 53e7732ef752..a34a2c9f4bcf 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -88,7 +88,7 @@ struct hda_conn_list {
struct list_head list;
int len;
hda_nid_t nid;
- hda_nid_t conns[0];
+ hda_nid_t conns[];
};
/* look up the cached results */
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 2609e391ce54..9765652a73d7 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -373,7 +373,7 @@ static int azx_get_sync_time(ktime_t *device,
u32 wallclk_ctr, wallclk_cycles;
bool direction;
u32 dma_select;
- u32 timeout = 200;
+ u32 timeout;
u32 retry_count = 0;
runtime = substream->runtime;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index ded8bc07d755..34fe753a46fb 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1180,6 +1180,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
{}
@@ -2698,7 +2699,7 @@ struct dsp_image_seg {
u32 magic;
u32 chip_addr;
u32 count;
- u32 data[0];
+ u32 data[];
};
static const u32 g_magic_value = 0x4c46584d;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5119a9ae3d8a..bb287a916dae 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -154,7 +154,6 @@ struct hdmi_spec {
struct hda_multi_out multiout;
struct hda_pcm_stream pcm_playback;
- bool use_jack_detect; /* jack detection enabled */
bool use_acomp_notifier; /* use eld_notify callback for hotplug */
bool acomp_registered; /* audio component registered in this driver */
struct drm_audio_component_audio_ops drm_audio_ops;
@@ -753,7 +752,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
* Unsolicited events
*/
-static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
int dev_id)
@@ -764,8 +763,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
if (pin_idx < 0)
return;
mutex_lock(&spec->pcm_lock);
- if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
- snd_hda_jack_report_sync(codec);
+ hdmi_present_sense(get_pin(spec, pin_idx), 1);
mutex_unlock(&spec->pcm_lock);
}
@@ -779,21 +777,9 @@ static void jack_callback(struct hda_codec *codec,
check_presence_and_report(codec, jack->nid, jack->dev_id);
}
-static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
+static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res,
+ struct hda_jack_tbl *jack)
{
- int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
- struct hda_jack_tbl *jack;
-
- if (codec->dp_mst) {
- int dev_entry =
- (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
-
- jack = snd_hda_jack_tbl_get_from_tag(codec, tag, dev_entry);
- } else {
- jack = snd_hda_jack_tbl_get_from_tag(codec, tag, 0);
- }
- if (!jack)
- return;
jack->jack_dirty = 1;
codec_dbg(codec,
@@ -853,7 +839,7 @@ static void hdmi_unsol_event(struct hda_codec *codec, unsigned int res)
}
if (subtag == 0)
- hdmi_intrinsic_event(codec, res);
+ hdmi_intrinsic_event(codec, res, jack);
else
hdmi_non_intrinsic_event(codec, res);
}
@@ -1480,21 +1466,60 @@ static void hdmi_pcm_reset_pin(struct hdmi_spec *spec,
per_pin->channels = 0;
}
+static struct snd_jack *pin_idx_to_pcm_jack(struct hda_codec *codec,
+ struct hdmi_spec_per_pin *per_pin)
+{
+ struct hdmi_spec *spec = codec->spec;
+
+ if (per_pin->pcm_idx >= 0)
+ return spec->pcm_rec[per_pin->pcm_idx].jack;
+ else
+ return NULL;
+}
+
/* update per_pin ELD from the given new ELD;
* setup info frame and notification accordingly
+ * also notify ELD kctl and report jack status changes
*/
-static bool update_eld(struct hda_codec *codec,
+static void update_eld(struct hda_codec *codec,
struct hdmi_spec_per_pin *per_pin,
- struct hdmi_eld *eld)
+ struct hdmi_eld *eld,
+ int repoll)
{
struct hdmi_eld *pin_eld = &per_pin->sink_eld;
struct hdmi_spec *spec = codec->spec;
+ struct snd_jack *pcm_jack;
bool old_eld_valid = pin_eld->eld_valid;
bool eld_changed;
int pcm_idx;
+ if (eld->eld_valid) {
+ if (eld->eld_size <= 0 ||
+ snd_hdmi_parse_eld(codec, &eld->info, eld->eld_buffer,
+ eld->eld_size) < 0) {
+ eld->eld_valid = false;
+ if (repoll) {
+ schedule_delayed_work(&per_pin->work,
+ msecs_to_jiffies(300));
+ return;
+ }
+ }
+ }
+
+ if (!eld->eld_valid || eld->eld_size <= 0) {
+ eld->eld_valid = false;
+ eld->eld_size = 0;
+ }
+
/* for monitor disconnection, save pcm_idx firstly */
pcm_idx = per_pin->pcm_idx;
+
+ /*
+ * pcm_idx >=0 before update_eld() means it is in monitor
+ * disconnected event. Jack must be fetched before update_eld().
+ */
+ pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
+
if (spec->dyn_pcm_assign) {
if (eld->eld_valid) {
hdmi_attach_hda_pcm(spec, per_pin);
@@ -1509,6 +1534,8 @@ static bool update_eld(struct hda_codec *codec,
*/
if (pcm_idx == -1)
pcm_idx = per_pin->pcm_idx;
+ if (!pcm_jack)
+ pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
if (eld->eld_valid)
snd_hdmi_show_eld(codec, &eld->info);
@@ -1547,42 +1574,17 @@ static bool update_eld(struct hda_codec *codec,
SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO,
&get_hdmi_pcm(spec, pcm_idx)->eld_ctl->id);
- return eld_changed;
-}
-static struct snd_jack *pin_idx_to_pcm_jack(struct hda_codec *codec,
- struct hdmi_spec_per_pin *per_pin)
-{
- struct hdmi_spec *spec = codec->spec;
- struct snd_jack *jack = NULL;
- struct hda_jack_tbl *jack_tbl;
-
- /* if !dyn_pcm_assign, get jack from hda_jack_tbl
- * in !dyn_pcm_assign case, spec->pcm_rec[].jack is not
- * NULL even after snd_hda_jack_tbl_clear() is called to
- * free snd_jack. This may cause access invalid memory
- * when calling snd_jack_report
- */
- if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign) {
- jack = spec->pcm_rec[per_pin->pcm_idx].jack;
- } else if (!spec->dyn_pcm_assign) {
- /*
- * jack tbl doesn't support DP MST
- * DP MST will use dyn_pcm_assign,
- * so DP MST will never come here
- */
- jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
- per_pin->dev_id);
- if (jack_tbl)
- jack = jack_tbl->jack;
- }
- return jack;
+ if (eld_changed && pcm_jack)
+ snd_jack_report(pcm_jack,
+ (eld->monitor_present && eld->eld_valid) ?
+ SND_JACK_AVOUT : 0);
}
+
/* update ELD and jack state via HD-audio verbs */
-static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
+static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
int repoll)
{
- struct hda_jack_tbl *jack;
struct hda_codec *codec = per_pin->codec;
struct hdmi_spec *spec = codec->spec;
struct hdmi_eld *eld = &spec->temp_eld;
@@ -1597,9 +1599,11 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
* the unsolicited response to avoid custom WARs.
*/
int present;
- bool ret;
- bool do_repoll = false;
- struct snd_jack *pcm_jack = NULL;
+ int ret;
+
+ ret = snd_hda_power_up_pm(codec);
+ if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec)))
+ goto out;
present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
@@ -1618,62 +1622,12 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
if (spec->ops.pin_get_eld(codec, pin_nid, dev_id,
eld->eld_buffer, &eld->eld_size) < 0)
eld->eld_valid = false;
- else {
- if (snd_hdmi_parse_eld(codec, &eld->info, eld->eld_buffer,
- eld->eld_size) < 0)
- eld->eld_valid = false;
- }
- if (!eld->eld_valid && repoll)
- do_repoll = true;
}
- if (do_repoll) {
- schedule_delayed_work(&per_pin->work, msecs_to_jiffies(300));
- } else {
- /*
- * pcm_idx >=0 before update_eld() means it is in monitor
- * disconnected event. Jack must be fetched before
- * update_eld().
- */
- pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
- update_eld(codec, per_pin, eld);
- if (!pcm_jack)
- pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
- }
-
- ret = !repoll || !eld->monitor_present || eld->eld_valid;
-
- jack = snd_hda_jack_tbl_get_mst(codec, pin_nid, per_pin->dev_id);
- if (jack) {
- jack->block_report = !ret;
- jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
- AC_PINSENSE_PRESENCE : 0;
-
- if (spec->dyn_pcm_assign && pcm_jack && !do_repoll) {
- int state = 0;
-
- if (jack->pin_sense & AC_PINSENSE_PRESENCE)
- state = SND_JACK_AVOUT;
- snd_jack_report(pcm_jack, state);
- }
-
- /*
- * snd_hda_jack_pin_sense() call at the beginning of this
- * function, updates jack->pins_sense and clears
- * jack->jack_dirty, therefore snd_hda_jack_report_sync() will
- * not override the jack->pin_sense.
- *
- * snd_hda_jack_report_sync() is superfluous for dyn_pcm_assign
- * case. The jack->pin_sense update was already performed, and
- * hda_jack->jack is NULL for dyn_pcm_assign.
- *
- * Don't call snd_hda_jack_report_sync() for
- * dyn_pcm_assign.
- */
- ret = ret && !spec->dyn_pcm_assign;
- }
+ update_eld(codec, per_pin, eld, repoll);
mutex_unlock(&per_pin->lock);
- return ret;
+ out:
+ snd_hda_power_down_pm(codec);
}
/* update ELD and jack state via audio component */
@@ -1682,64 +1636,25 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
{
struct hdmi_spec *spec = codec->spec;
struct hdmi_eld *eld = &spec->temp_eld;
- struct snd_jack *jack = NULL;
- bool changed;
- int size;
mutex_lock(&per_pin->lock);
eld->monitor_present = false;
- size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
+ eld->eld_size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
per_pin->dev_id, &eld->monitor_present,
eld->eld_buffer, ELD_MAX_SIZE);
- if (size > 0) {
- size = min(size, ELD_MAX_SIZE);
- if (snd_hdmi_parse_eld(codec, &eld->info,
- eld->eld_buffer, size) < 0)
- size = -EINVAL;
- }
-
- if (size > 0) {
- eld->eld_valid = true;
- eld->eld_size = size;
- } else {
- eld->eld_valid = false;
- eld->eld_size = 0;
- }
-
- /* pcm_idx >=0 before update_eld() means it is in monitor
- * disconnected event. Jack must be fetched before update_eld()
- */
- jack = pin_idx_to_pcm_jack(codec, per_pin);
- changed = update_eld(codec, per_pin, eld);
- if (jack == NULL)
- jack = pin_idx_to_pcm_jack(codec, per_pin);
- if (changed && jack)
- snd_jack_report(jack,
- (eld->monitor_present && eld->eld_valid) ?
- SND_JACK_AVOUT : 0);
+ eld->eld_valid = (eld->eld_size > 0);
+ update_eld(codec, per_pin, eld, 0);
mutex_unlock(&per_pin->lock);
}
-static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
{
struct hda_codec *codec = per_pin->codec;
- int ret;
- /* no temporary power up/down needed for component notifier */
- if (!codec_has_acomp(codec)) {
- ret = snd_hda_power_up_pm(codec);
- if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) {
- snd_hda_power_down_pm(codec);
- return false;
- }
- ret = hdmi_present_sense_via_verbs(per_pin, repoll);
- snd_hda_power_down_pm(codec);
- } else {
+ if (!codec_has_acomp(codec))
+ hdmi_present_sense_via_verbs(per_pin, repoll);
+ else
sync_eld_via_acomp(codec, per_pin);
- ret = false; /* don't call snd_hda_jack_report_sync() */
- }
-
- return ret;
}
static void hdmi_repoll_eld(struct work_struct *work)
@@ -1759,8 +1674,7 @@ static void hdmi_repoll_eld(struct work_struct *work)
per_pin->repoll_count = 0;
mutex_lock(&spec->pcm_lock);
- if (hdmi_present_sense(per_pin, per_pin->repoll_count))
- snd_hda_jack_report_sync(per_pin->codec);
+ hdmi_present_sense(per_pin, per_pin->repoll_count);
mutex_unlock(&spec->pcm_lock);
}
@@ -2206,15 +2120,23 @@ static void free_hdmi_jack_priv(struct snd_jack *jack)
pcm->jack = NULL;
}
-static int add_hdmi_jack_kctl(struct hda_codec *codec,
- struct hdmi_spec *spec,
- int pcm_idx,
- const char *name)
+static int generic_hdmi_build_jack(struct hda_codec *codec, int pcm_idx)
{
+ char hdmi_str[32] = "HDMI/DP";
+ struct hdmi_spec *spec = codec->spec;
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pcm_idx);
struct snd_jack *jack;
+ int pcmdev = get_pcm_rec(spec, pcm_idx)->device;
int err;
- err = snd_jack_new(codec->card, name, SND_JACK_AVOUT, &jack,
+ if (pcmdev > 0)
+ sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev);
+ if (!spec->dyn_pcm_assign &&
+ !is_jack_detectable(codec, per_pin->pin_nid))
+ strncat(hdmi_str, " Phantom",
+ sizeof(hdmi_str) - strlen(hdmi_str) - 1);
+
+ err = snd_jack_new(codec->card, hdmi_str, SND_JACK_AVOUT, &jack,
true, false);
if (err < 0)
return err;
@@ -2225,48 +2147,6 @@ static int add_hdmi_jack_kctl(struct hda_codec *codec,
return 0;
}
-static int generic_hdmi_build_jack(struct hda_codec *codec, int pcm_idx)
-{
- char hdmi_str[32] = "HDMI/DP";
- struct hdmi_spec *spec = codec->spec;
- struct hdmi_spec_per_pin *per_pin;
- struct hda_jack_tbl *jack;
- int pcmdev = get_pcm_rec(spec, pcm_idx)->device;
- bool phantom_jack;
- int ret;
-
- if (pcmdev > 0)
- sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev);
-
- if (spec->dyn_pcm_assign)
- return add_hdmi_jack_kctl(codec, spec, pcm_idx, hdmi_str);
-
- /* for !dyn_pcm_assign, we still use hda_jack for compatibility */
- /* if !dyn_pcm_assign, it must be non-MST mode.
- * This means pcms and pins are statically mapped.
- * And pcm_idx is pin_idx.
- */
- per_pin = get_pin(spec, pcm_idx);
- phantom_jack = !is_jack_detectable(codec, per_pin->pin_nid);
- if (phantom_jack)
- strncat(hdmi_str, " Phantom",
- sizeof(hdmi_str) - strlen(hdmi_str) - 1);
- ret = snd_hda_jack_add_kctl_mst(codec, per_pin->pin_nid,
- per_pin->dev_id, hdmi_str, phantom_jack,
- 0, NULL);
- if (ret < 0)
- return ret;
- jack = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
- per_pin->dev_id);
- if (jack == NULL)
- return 0;
- /* assign jack->jack to pcm_rec[].jack to
- * align with dyn_pcm_assign mode
- */
- spec->pcm_rec[pcm_idx].jack = jack->jack;
- return 0;
-}
-
static int generic_hdmi_build_controls(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
@@ -2355,7 +2235,6 @@ static int generic_hdmi_init(struct hda_codec *codec)
int pin_idx;
mutex_lock(&spec->bind_lock);
- spec->use_jack_detect = !codec->jackpoll_interval;
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
hda_nid_t pin_nid = per_pin->pin_nid;
@@ -2365,12 +2244,8 @@ static int generic_hdmi_init(struct hda_codec *codec)
hdmi_init_pin(codec, pin_nid);
if (codec_has_acomp(codec))
continue;
- if (spec->use_jack_detect)
- snd_hda_jack_detect_enable(codec, pin_nid, dev_id);
- else
- snd_hda_jack_detect_enable_callback_mst(codec, pin_nid,
- dev_id,
- jack_callback);
+ snd_hda_jack_detect_enable_callback_mst(codec, pin_nid, dev_id,
+ jack_callback);
}
mutex_unlock(&spec->bind_lock);
return 0;
@@ -2532,12 +2407,6 @@ static void reprogram_jack_detect(struct hda_codec *codec, hda_nid_t nid,
unsigned int val = use_acomp ? 0 : (AC_USRSP_EN | tbl->tag);
snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_UNSOLICITED_ENABLE, val);
- } else {
- /* if no jack entry was defined beforehand, create a new one
- * at need (i.e. only when notifier is cleared)
- */
- if (!use_acomp)
- snd_hda_jack_detect_enable(codec, nid, dev_id);
}
}
@@ -2553,13 +2422,11 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
spec->use_acomp_notifier = use_acomp;
spec->codec->relaxed_resume = use_acomp;
/* reprogram each jack detection logic depending on the notifier */
- if (spec->use_jack_detect) {
- for (i = 0; i < spec->num_pins; i++)
- reprogram_jack_detect(spec->codec,
- get_pin(spec, i)->pin_nid,
- get_pin(spec, i)->dev_id,
- use_acomp);
- }
+ for (i = 0; i < spec->num_pins; i++)
+ reprogram_jack_detect(spec->codec,
+ get_pin(spec, i)->pin_nid,
+ get_pin(spec, i)->dev_id,
+ use_acomp);
mutex_unlock(&spec->bind_lock);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 63e1a56f705b..f66a48154a57 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -107,6 +107,7 @@ struct alc_spec {
unsigned int done_hp_init:1;
unsigned int no_shutup_pins:1;
unsigned int ultra_low_power:1;
+ unsigned int has_hs_key:1;
/* for PLL fix */
hda_nid_t pll_nid;
@@ -367,7 +368,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0215:
case 0x10ec0233:
case 0x10ec0235:
+ case 0x10ec0236:
case 0x10ec0255:
+ case 0x10ec0256:
case 0x10ec0257:
case 0x10ec0282:
case 0x10ec0283:
@@ -379,11 +382,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0300:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
- case 0x10ec0236:
- case 0x10ec0256:
- alc_write_coef_idx(codec, 0x36, 0x5757);
- alc_update_coef_idx(codec, 0x10, 1<<9, 0);
- break;
case 0x10ec0275:
alc_update_coef_idx(codec, 0xe, 0, 1<<0);
break;
@@ -2982,6 +2980,107 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
return alc_parse_auto_config(codec, alc269_ignore, ssids);
}
+static const struct hda_jack_keymap alc_headset_btn_keymap[] = {
+ { SND_JACK_BTN_0, KEY_PLAYPAUSE },
+ { SND_JACK_BTN_1, KEY_VOICECOMMAND },
+ { SND_JACK_BTN_2, KEY_VOLUMEUP },
+ { SND_JACK_BTN_3, KEY_VOLUMEDOWN },
+ {}
+};
+
+static void alc_headset_btn_callback(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ int report = 0;
+
+ if (jack->unsol_res & (7 << 13))
+ report |= SND_JACK_BTN_0;
+
+ if (jack->unsol_res & (1 << 16 | 3 << 8))
+ report |= SND_JACK_BTN_1;
+
+ /* Volume up key */
+ if (jack->unsol_res & (7 << 23))
+ report |= SND_JACK_BTN_2;
+
+ /* Volume down key */
+ if (jack->unsol_res & (7 << 10))
+ report |= SND_JACK_BTN_3;
+
+ jack->jack->button_state = report;
+}
+
+static void alc_disable_headset_jack_key(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (!spec->has_hs_key)
+ return;
+
+ switch (codec->core.vendor_id) {
+ case 0x10ec0215:
+ case 0x10ec0225:
+ case 0x10ec0285:
+ case 0x10ec0295:
+ case 0x10ec0289:
+ case 0x10ec0299:
+ alc_write_coef_idx(codec, 0x48, 0x0);
+ alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
+ alc_update_coef_idx(codec, 0x44, 0x0045 << 8, 0x0);
+ break;
+ case 0x10ec0236:
+ case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x48, 0x0);
+ alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
+ break;
+ }
+}
+
+static void alc_enable_headset_jack_key(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (!spec->has_hs_key)
+ return;
+
+ switch (codec->core.vendor_id) {
+ case 0x10ec0215:
+ case 0x10ec0225:
+ case 0x10ec0285:
+ case 0x10ec0295:
+ case 0x10ec0289:
+ case 0x10ec0299:
+ alc_write_coef_idx(codec, 0x48, 0xd011);
+ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+ alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
+ break;
+ case 0x10ec0236:
+ case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x48, 0xd011);
+ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+ break;
+ }
+}
+
+static void alc_fixup_headset_jack(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ spec->has_hs_key = 1;
+ snd_hda_jack_detect_enable_callback(codec, 0x55,
+ alc_headset_btn_callback);
+ snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
+ SND_JACK_HEADSET, alc_headset_btn_keymap);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ alc_enable_headset_jack_key(codec);
+ break;
+ }
+}
+
static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
{
alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0);
@@ -3269,7 +3368,13 @@ static void alc256_init(struct hda_codec *codec)
alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */
alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15);
- alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
+ /*
+ * Expose headphone mic (or possibly Line In on some machines) instead
+ * of PC Beep on 1Ah, and disable 1Ah loopback for all outputs. See
+ * Documentation/sound/hd-audio/realtek-pc-beep.rst for details of
+ * this register.
+ */
+ alc_write_coef_idx(codec, 0x36, 0x5757);
}
static void alc256_shutup(struct hda_codec *codec)
@@ -3372,6 +3477,8 @@ static void alc225_shutup(struct hda_codec *codec)
if (!hp_pin)
hp_pin = 0x21;
+
+ alc_disable_headset_jack_key(codec);
/* 3k pull low control for Headset jack. */
alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
@@ -3411,6 +3518,9 @@ static void alc225_shutup(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x4a, 3<<4, 2<<4);
msleep(30);
}
+
+ alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+ alc_enable_headset_jack_key(codec);
}
static void alc_default_init(struct hda_codec *codec)
@@ -4008,6 +4118,12 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
alc_fixup_hp_gpio_led(codec, action, 0x08, 0x10);
}
+static void alc285_fixup_hp_gpio_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_hp_gpio_led(codec, action, 0x04, 0x00);
+}
+
static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -5375,17 +5491,6 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
}
}
-static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec,
- const struct hda_fixup *fix,
- int action)
-{
- if (action != HDA_FIXUP_ACT_PRE_PROBE)
- return;
-
- snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1);
- snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP);
-}
-
static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
@@ -5662,69 +5767,6 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
snd_hda_override_wcaps(codec, 0x03, 0);
}
-static const struct hda_jack_keymap alc_headset_btn_keymap[] = {
- { SND_JACK_BTN_0, KEY_PLAYPAUSE },
- { SND_JACK_BTN_1, KEY_VOICECOMMAND },
- { SND_JACK_BTN_2, KEY_VOLUMEUP },
- { SND_JACK_BTN_3, KEY_VOLUMEDOWN },
- {}
-};
-
-static void alc_headset_btn_callback(struct hda_codec *codec,
- struct hda_jack_callback *jack)
-{
- int report = 0;
-
- if (jack->unsol_res & (7 << 13))
- report |= SND_JACK_BTN_0;
-
- if (jack->unsol_res & (1 << 16 | 3 << 8))
- report |= SND_JACK_BTN_1;
-
- /* Volume up key */
- if (jack->unsol_res & (7 << 23))
- report |= SND_JACK_BTN_2;
-
- /* Volume down key */
- if (jack->unsol_res & (7 << 10))
- report |= SND_JACK_BTN_3;
-
- jack->jack->button_state = report;
-}
-
-static void alc_fixup_headset_jack(struct hda_codec *codec,
- const struct hda_fixup *fix, int action)
-{
-
- switch (action) {
- case HDA_FIXUP_ACT_PRE_PROBE:
- snd_hda_jack_detect_enable_callback(codec, 0x55,
- alc_headset_btn_callback);
- snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
- SND_JACK_HEADSET, alc_headset_btn_keymap);
- break;
- case HDA_FIXUP_ACT_INIT:
- switch (codec->core.vendor_id) {
- case 0x10ec0215:
- case 0x10ec0225:
- case 0x10ec0285:
- case 0x10ec0295:
- case 0x10ec0289:
- case 0x10ec0299:
- alc_write_coef_idx(codec, 0x48, 0xd011);
- alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
- alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
- break;
- case 0x10ec0236:
- case 0x10ec0256:
- alc_write_coef_idx(codec, 0x48, 0xd011);
- alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
- break;
- }
- break;
- }
-}
-
static void alc295_fixup_chromebook(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -5863,8 +5905,6 @@ enum {
ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
- ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
- ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2,
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
@@ -5923,6 +5963,7 @@ enum {
ALC294_FIXUP_ASUS_DUAL_SPK,
ALC285_FIXUP_THINKPAD_HEADSET_JACK,
ALC294_FIXUP_ASUS_HPE,
+ ALC285_FIXUP_HP_GPIO_LED,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6604,23 +6645,6 @@ static const struct hda_fixup alc269_fixups[] = {
{}
}
},
- [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
- .type = HDA_FIXUP_VERBS,
- .v.verbs = (const struct hda_verb[]) {
- /* Disable pass-through path for FRONT 14h */
- {0x20, AC_VERB_SET_COEF_INDEX, 0x36},
- {0x20, AC_VERB_SET_PROC_COEF, 0x1737},
- {}
- },
- .chained = true,
- .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
- },
- [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = {
- .type = HDA_FIXUP_FUNC,
- .v.func = alc256_fixup_dell_xps_13_headphone_noise2,
- .chained = true,
- .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE
- },
[ALC293_FIXUP_LENOVO_SPK_NOISE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_disable_aamix,
@@ -7061,6 +7085,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
},
+ [ALC285_FIXUP_HP_GPIO_LED] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_hp_gpio_led,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7114,17 +7142,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
- SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
@@ -7208,6 +7233,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -7477,7 +7503,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc298-dell1"},
{.id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, .name = "alc298-dell-aio"},
{.id = ALC275_FIXUP_DELL_XPS, .name = "alc275-dell-xps"},
- {.id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, .name = "alc256-dell-xps13"},
{.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
{.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
{.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 21ab9cc50c71..65a887b217ee 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -30,7 +30,7 @@
#if K1212_DEBUG_LEVEL > 0
#define K1212_DEBUG_PRINTK(fmt,args...) printk(KERN_DEBUG fmt,##args)
#else
-#define K1212_DEBUG_PRINTK(fmt,...)
+#define K1212_DEBUG_PRINTK(fmt,...) do { } while (0)
#endif
#if K1212_DEBUG_LEVEL > 1
#define K1212_DEBUG_PRINTK_VERBOSE(fmt,args...) printk(KERN_DEBUG fmt,##args)
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index cc06f0a1a7e4..227aece17e39 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -3353,7 +3353,8 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
return;
}
} else {
- int err = -EINVAL;
+ int err;
+
err = hdsp_request_fw_loader(hdsp);
if (err < 0) {
snd_iprintf(buffer,
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 799789c8eea9..8b03e2dc503f 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -414,6 +414,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
{
unsigned int i, idx, ofs, rest;
struct via82xx *chip = snd_pcm_substream_chip(substream);
+ __le32 *pgtbl;
if (dev->table.area == NULL) {
/* the start of each lists must be aligned to 8 bytes,
@@ -435,6 +436,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
/* fill the entries */
idx = 0;
ofs = 0;
+ pgtbl = (__le32 *)dev->table.area;
for (i = 0; i < periods; i++) {
rest = fragsize;
/* fill descriptors for a period.
@@ -451,7 +453,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
return -EINVAL;
}
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
- ((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr);
+ pgtbl[idx << 1] = cpu_to_le32(addr);
r = snd_pcm_sgbuf_get_chunk_size(substream, ofs, rest);
rest -= r;
if (! rest) {
@@ -466,7 +468,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
"tbl %d: at %d size %d (rest %d)\n",
idx, ofs, r, rest);
*/
- ((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag);
+ pgtbl[(idx<<1) + 1] = cpu_to_le32(r | flag);
dev->idx_table[idx].offset = ofs;
dev->idx_table[idx].size = r;
ofs += r;
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 84e589803e2e..607b7100db1c 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -267,6 +267,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
{
unsigned int i, idx, ofs, rest;
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
+ __le32 *pgtbl;
if (dev->table.area == NULL) {
/* the start of each lists must be aligned to 8 bytes,
@@ -288,6 +289,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
/* fill the entries */
idx = 0;
ofs = 0;
+ pgtbl = (__le32 *)dev->table.area;
for (i = 0; i < periods; i++) {
rest = fragsize;
/* fill descriptors for a period.
@@ -304,7 +306,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
return -EINVAL;
}
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
- ((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr);
+ pgtbl[idx << 1] = cpu_to_le32(addr);
r = PAGE_SIZE - (ofs % PAGE_SIZE);
if (rest < r)
r = rest;
@@ -321,7 +323,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
"tbl %d: at %d size %d (rest %d)\n",
idx, ofs, r, rest);
*/
- ((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag);
+ pgtbl[(idx<<1) + 1] = cpu_to_le32(r | flag);
dev->idx_table[idx].offset = ofs;
dev->idx_table[idx].size = r;
ofs += r;
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index 093806d735c6..9554a0c506af 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -40,6 +40,7 @@ static int keywest_probe(struct i2c_client *client,
static int keywest_attach_adapter(struct i2c_adapter *adapter)
{
struct i2c_board_info info;
+ struct i2c_client *client;
if (! keywest_ctx)
return -EINVAL;
@@ -50,9 +51,11 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "keywest", I2C_NAME_SIZE);
info.addr = keywest_ctx->addr;
- keywest_ctx->client = i2c_new_device(adapter, &info);
- if (!keywest_ctx->client)
- return -ENODEV;
+ client = i2c_new_client_device(adapter, &info);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+ keywest_ctx->client = client;
+
/*
* We know the driver is already loaded, so the device should be
* already bound. If not it means binding failed, and then there
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index 5f40517717c4..bce4cee5cb54 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -26,3 +26,13 @@ config SND_SOC_AMD_ACP3x
depends on X86 && PCI
help
This option enables ACP v3.x I2S support on AMD platform
+
+config SND_SOC_AMD_RV_RT5682_MACH
+ tristate "AMD RV support for RT5682"
+ select SND_SOC_RT5682
+ select SND_SOC_MAX98357A
+ select SND_SOC_CROS_EC_CODEC
+ select I2C_CROS_EC_TUNNEL
+ depends on SND_SOC_AMD_ACP3x && I2C && CROS_EC
+ help
+ This option enables machine driver for RT5682 and MAX9835.
diff --git a/sound/soc/amd/Makefile b/sound/soc/amd/Makefile
index c4ddc6adb6f0..e6f3d9b469f3 100644
--- a/sound/soc/amd/Makefile
+++ b/sound/soc/amd/Makefile
@@ -2,8 +2,10 @@
acp_audio_dma-objs := acp-pcm-dma.o
snd-soc-acp-da7219mx98357-mach-objs := acp-da7219-max98357a.o
snd-soc-acp-rt5645-mach-objs := acp-rt5645.o
+snd-soc-acp-rt5682-mach-objs := acp3x-rt5682-max9836.o
obj-$(CONFIG_SND_SOC_AMD_ACP) += acp_audio_dma.o
obj-$(CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH) += snd-soc-acp-da7219mx98357-mach.o
obj-$(CONFIG_SND_SOC_AMD_CZ_RT5645_MACH) += snd-soc-acp-rt5645-mach.o
obj-$(CONFIG_SND_SOC_AMD_ACP3x) += raven/
+obj-$(CONFIG_SND_SOC_AMD_RV_RT5682_MACH) += snd-soc-acp-rt5682-mach.o
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index 7a5621e5e233..9414d7269c4f 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -54,7 +54,7 @@ static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
struct snd_soc_card *card = rtd->card;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_component *component = codec_dai->component;
dev_info(rtd->dev, "codec dai name = %s\n", codec_dai->name);
diff --git a/sound/soc/amd/acp-rt5645.c b/sound/soc/amd/acp-rt5645.c
index 91abeb92b648..73b31f88a6b5 100644
--- a/sound/soc/amd/acp-rt5645.c
+++ b/sound/soc/amd/acp-rt5645.c
@@ -48,7 +48,7 @@ static int cz_aif1_hw_params(struct snd_pcm_substream *substream,
{
int ret = 0;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
ret = snd_soc_dai_set_pll(codec_dai, 0, RT5645_PLL1_S_MCLK,
CZ_PLAT_CLK, params_rate(params) * 512);
@@ -73,7 +73,7 @@ static int cz_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_card *card;
struct snd_soc_component *codec;
- codec = rtd->codec_dai->component;
+ codec = asoc_rtd_to_codec(rtd, 0)->component;
card = rtd->card;
ret = snd_soc_card_jack_new(card, "Headset Jack",
diff --git a/sound/soc/amd/acp3x-rt5682-max9836.c b/sound/soc/amd/acp3x-rt5682-max9836.c
new file mode 100644
index 000000000000..024a7ee54cd5
--- /dev/null
+++ b/sound/soc/amd/acp3x-rt5682-max9836.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Machine driver for AMD ACP Audio engine using DA7219 & MAX98357 codec.
+//
+//Copyright 2016 Advanced Micro Devices, Inc.
+
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc-dapm.h>
+#include <sound/jack.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+
+#include "raven/acp3x.h"
+#include "../codecs/rt5682.h"
+
+#define PCO_PLAT_CLK 48000000
+#define RT5682_PLL_FREQ (48000 * 512)
+#define DUAL_CHANNEL 2
+
+static struct snd_soc_jack pco_jack;
+static struct clk *rt5682_dai_wclk;
+static struct clk *rt5682_dai_bclk;
+static struct gpio_desc *dmic_sel;
+
+static int acp3x_5682_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
+
+ dev_info(rtd->dev, "codec dai name = %s\n", codec_dai->name);
+
+ /* set rt5682 dai fmt */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0) {
+ dev_err(rtd->card->dev,
+ "Failed to set rt5682 dai fmt: %d\n", ret);
+ return ret;
+ }
+
+ /* set codec PLL */
+ ret = snd_soc_dai_set_pll(codec_dai, RT5682_PLL2, RT5682_PLL2_S_MCLK,
+ PCO_PLAT_CLK, RT5682_PLL_FREQ);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set rt5682 PLL: %d\n", ret);
+ return ret;
+ }
+
+ /* Set codec sysclk */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL2,
+ RT5682_PLL_FREQ, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev,
+ "Failed to set rt5682 SYSCLK: %d\n", ret);
+ return ret;
+ }
+
+ /* Set tdm/i2s1 master bclk ratio */
+ ret = snd_soc_dai_set_bclk_ratio(codec_dai, 64);
+ if (ret < 0) {
+ dev_err(rtd->dev,
+ "Failed to set rt5682 tdm bclk ratio: %d\n", ret);
+ return ret;
+ }
+
+ rt5682_dai_wclk = clk_get(component->dev, "rt5682-dai-wclk");
+ rt5682_dai_bclk = clk_get(component->dev, "rt5682-dai-bclk");
+
+ ret = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_LINEOUT |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &pco_jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "HP jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ snd_jack_set_key(pco_jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(pco_jack.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(pco_jack.jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(pco_jack.jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ ret = snd_soc_component_set_jack(component, &pco_jack, NULL);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack call-back failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int rt5682_clk_enable(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ /* RT5682 will support only 48K output with 48M mclk */
+ clk_set_rate(rt5682_dai_wclk, 48000);
+ clk_set_rate(rt5682_dai_bclk, 48000 * 64);
+ ret = clk_prepare_enable(rt5682_dai_wclk);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't enable wclk %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void rt5682_clk_disable(void)
+{
+ clk_disable_unprepare(rt5682_dai_wclk);
+}
+
+static const unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static const unsigned int rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int acp3x_5682_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct acp3x_platform_info *machine = snd_soc_card_get_drvdata(card);
+
+ machine->play_i2s_instance = I2S_SP_INSTANCE;
+ machine->cap_i2s_instance = I2S_SP_INSTANCE;
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ return rt5682_clk_enable(substream);
+}
+
+static int acp3x_max_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct acp3x_platform_info *machine = snd_soc_card_get_drvdata(card);
+
+ machine->play_i2s_instance = I2S_BT_INSTANCE;
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+ return rt5682_clk_enable(substream);
+}
+
+static int acp3x_ec_dmic0_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct acp3x_platform_info *machine = snd_soc_card_get_drvdata(card);
+
+ machine->cap_i2s_instance = I2S_BT_INSTANCE;
+ snd_soc_dai_set_bclk_ratio(codec_dai, 64);
+ if (dmic_sel)
+ gpiod_set_value(dmic_sel, 0);
+
+ return rt5682_clk_enable(substream);
+}
+
+static int acp3x_ec_dmic1_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct acp3x_platform_info *machine = snd_soc_card_get_drvdata(card);
+
+ machine->cap_i2s_instance = I2S_BT_INSTANCE;
+ snd_soc_dai_set_bclk_ratio(codec_dai, 64);
+ if (dmic_sel)
+ gpiod_set_value(dmic_sel, 1);
+
+ return rt5682_clk_enable(substream);
+}
+
+static void rt5682_shutdown(struct snd_pcm_substream *substream)
+{
+ rt5682_clk_disable();
+}
+
+static const struct snd_soc_ops acp3x_5682_ops = {
+ .startup = acp3x_5682_startup,
+ .shutdown = rt5682_shutdown,
+};
+
+static const struct snd_soc_ops acp3x_max_play_ops = {
+ .startup = acp3x_max_startup,
+ .shutdown = rt5682_shutdown,
+};
+
+static const struct snd_soc_ops acp3x_ec_cap0_ops = {
+ .startup = acp3x_ec_dmic0_startup,
+ .shutdown = rt5682_shutdown,
+};
+
+static const struct snd_soc_ops acp3x_ec_cap1_ops = {
+ .startup = acp3x_ec_dmic1_startup,
+ .shutdown = rt5682_shutdown,
+};
+
+SND_SOC_DAILINK_DEF(acp3x_i2s,
+ DAILINK_COMP_ARRAY(COMP_CPU("acp3x_i2s_playcap.0")));
+SND_SOC_DAILINK_DEF(acp3x_bt,
+ DAILINK_COMP_ARRAY(COMP_CPU("acp3x_i2s_playcap.2")));
+
+SND_SOC_DAILINK_DEF(rt5682,
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-10EC5682:00", "rt5682-aif1")));
+SND_SOC_DAILINK_DEF(max,
+ DAILINK_COMP_ARRAY(COMP_CODEC("MX98357A:00", "HiFi")));
+SND_SOC_DAILINK_DEF(cros_ec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("GOOG0013:00", "EC Codec I2S RX")));
+
+SND_SOC_DAILINK_DEF(platform,
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("acp3x_rv_i2s_dma.0")));
+
+static struct snd_soc_dai_link acp3x_dai_5682_98357[] = {
+ {
+ .name = "acp3x-5682-play",
+ .stream_name = "Playback",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .init = acp3x_5682_init,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &acp3x_5682_ops,
+ SND_SOC_DAILINK_REG(acp3x_i2s, rt5682, platform),
+ },
+ {
+ .name = "acp3x-max98357-play",
+ .stream_name = "HiFi Playback",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .dpcm_playback = 1,
+ .ops = &acp3x_max_play_ops,
+ SND_SOC_DAILINK_REG(acp3x_bt, max, platform),
+ },
+ {
+ .name = "acp3x-ec-dmic0-capture",
+ .stream_name = "Capture DMIC0",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_capture = 1,
+ .ops = &acp3x_ec_cap0_ops,
+ SND_SOC_DAILINK_REG(acp3x_bt, cros_ec, platform),
+ },
+ {
+ .name = "acp3x-ec-dmic1-capture",
+ .stream_name = "Capture DMIC1",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_capture = 1,
+ .ops = &acp3x_ec_cap1_ops,
+ SND_SOC_DAILINK_REG(acp3x_bt, cros_ec, platform),
+ },
+};
+
+static const struct snd_soc_dapm_widget acp3x_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_SPK("Spk", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route acp3x_audio_route[] = {
+ {"Headphone Jack", NULL, "HPOL"},
+ {"Headphone Jack", NULL, "HPOR"},
+ {"IN1P", NULL, "Headset Mic"},
+ {"Spk", NULL, "Speaker"},
+};
+
+static const struct snd_kcontrol_new acp3x_mc_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Spk"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static struct snd_soc_card acp3x_card = {
+ .name = "acp3xalc5682m98357",
+ .owner = THIS_MODULE,
+ .dai_link = acp3x_dai_5682_98357,
+ .num_links = ARRAY_SIZE(acp3x_dai_5682_98357),
+ .dapm_widgets = acp3x_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(acp3x_widgets),
+ .dapm_routes = acp3x_audio_route,
+ .num_dapm_routes = ARRAY_SIZE(acp3x_audio_route),
+ .controls = acp3x_mc_controls,
+ .num_controls = ARRAY_SIZE(acp3x_mc_controls),
+};
+
+static int acp3x_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct snd_soc_card *card;
+ struct acp3x_platform_info *machine;
+
+ machine = devm_kzalloc(&pdev->dev, sizeof(*machine), GFP_KERNEL);
+ if (!machine)
+ return -ENOMEM;
+
+ card = &acp3x_card;
+ acp3x_card.dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+
+ dmic_sel = devm_gpiod_get(&pdev->dev, "dmic", GPIOD_OUT_LOW);
+ if (IS_ERR(dmic_sel)) {
+ dev_err(&pdev->dev, "DMIC gpio failed err=%ld\n",
+ PTR_ERR(dmic_sel));
+ return PTR_ERR(dmic_sel);
+ }
+
+ ret = devm_snd_soc_register_card(&pdev->dev, &acp3x_card);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "devm_snd_soc_register_card(%s) failed: %d\n",
+ acp3x_card.name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct acpi_device_id acp3x_audio_acpi_match[] = {
+ { "AMDI5682", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, acp3x_audio_acpi_match);
+
+static struct platform_driver acp3x_audio = {
+ .driver = {
+ .name = "acp3x-alc5682-max98357",
+ .acpi_match_table = ACPI_PTR(acp3x_audio_acpi_match),
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = acp3x_probe,
+};
+
+module_platform_driver(acp3x_audio);
+
+MODULE_AUTHOR("akshu.agrawal@amd.com");
+MODULE_DESCRIPTION("ALC5682 & MAX98357 audio support");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/amd/raven/acp3x-i2s.c b/sound/soc/amd/raven/acp3x-i2s.c
index 91a388184e52..3a3c47e820ab 100644
--- a/sound/soc/amd/raven/acp3x-i2s.c
+++ b/sound/soc/amd/raven/acp3x-i2s.c
@@ -42,7 +42,7 @@ static int acp3x_i2s_set_tdm_slot(struct snd_soc_dai *cpu_dai,
u32 tx_mask, u32 rx_mask, int slots, int slot_width)
{
struct i2s_dev_data *adata;
- u32 val, reg_val, frmt_reg, frm_len;
+ u32 frm_len;
u16 slot_len;
adata = snd_soc_dai_get_drvdata(cpu_dai);
@@ -64,36 +64,7 @@ static int acp3x_i2s_set_tdm_slot(struct snd_soc_dai *cpu_dai,
default:
return -EINVAL;
}
-
- /* Enable I2S/BT channels TDM, respective TX/RX frame lengths.*/
-
frm_len = FRM_LEN | (slots << 15) | (slot_len << 18);
- if (adata->substream_type == SNDRV_PCM_STREAM_PLAYBACK) {
- switch (adata->i2s_instance) {
- case I2S_BT_INSTANCE:
- reg_val = mmACP_BTTDM_ITER;
- frmt_reg = mmACP_BTTDM_TXFRMT;
- break;
- case I2S_SP_INSTANCE:
- default:
- reg_val = mmACP_I2STDM_ITER;
- frmt_reg = mmACP_I2STDM_TXFRMT;
- }
- } else {
- switch (adata->i2s_instance) {
- case I2S_BT_INSTANCE:
- reg_val = mmACP_BTTDM_IRER;
- frmt_reg = mmACP_BTTDM_RXFRMT;
- break;
- case I2S_SP_INSTANCE:
- default:
- reg_val = mmACP_I2STDM_IRER;
- frmt_reg = mmACP_I2STDM_RXFRMT;
- }
- }
- val = rv_readl(adata->acp3x_base + reg_val);
- rv_writel(val | 0x2, adata->acp3x_base + reg_val);
- rv_writel(frm_len, adata->acp3x_base + frmt_reg);
adata->tdm_fmt = frm_len;
return 0;
}
@@ -105,12 +76,14 @@ static int acp3x_i2s_hwparams(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *prtd;
struct snd_soc_card *card;
struct acp3x_platform_info *pinfo;
+ struct i2s_dev_data *adata;
u32 val;
- u32 reg_val;
+ u32 reg_val, frmt_reg;
prtd = substream->private_data;
rtd = substream->runtime->private_data;
card = prtd->card;
+ adata = snd_soc_dai_get_drvdata(dai);
pinfo = snd_soc_card_get_drvdata(card);
if (pinfo) {
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -141,21 +114,30 @@ static int acp3x_i2s_hwparams(struct snd_pcm_substream *substream,
switch (rtd->i2s_instance) {
case I2S_BT_INSTANCE:
reg_val = mmACP_BTTDM_ITER;
+ frmt_reg = mmACP_BTTDM_TXFRMT;
break;
case I2S_SP_INSTANCE:
default:
reg_val = mmACP_I2STDM_ITER;
+ frmt_reg = mmACP_I2STDM_TXFRMT;
}
} else {
switch (rtd->i2s_instance) {
case I2S_BT_INSTANCE:
reg_val = mmACP_BTTDM_IRER;
+ frmt_reg = mmACP_BTTDM_RXFRMT;
break;
case I2S_SP_INSTANCE:
default:
reg_val = mmACP_I2STDM_IRER;
+ frmt_reg = mmACP_I2STDM_RXFRMT;
}
}
+ if (adata->tdm_mode) {
+ val = rv_readl(rtd->acp3x_base + reg_val);
+ rv_writel(val | 0x2, rtd->acp3x_base + reg_val);
+ rv_writel(adata->tdm_fmt, rtd->acp3x_base + frmt_reg);
+ }
val = rv_readl(rtd->acp3x_base + reg_val);
val = val | (rtd->xfer_resolution << 3);
rv_writel(val, rtd->acp3x_base + reg_val);
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index d62c0d90c41e..e362f0bc9e46 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -458,7 +458,8 @@ static int acp3x_resume(struct device *dev)
reg_val = mmACP_I2STDM_ITER;
frmt_val = mmACP_I2STDM_TXFRMT;
}
- rv_writel((rtd->xfer_resolution << 3), rtd->acp3x_base + reg_val);
+ rv_writel((rtd->xfer_resolution << 3),
+ rtd->acp3x_base + reg_val);
}
if (adata->capture_stream && adata->capture_stream->runtime) {
struct i2s_stream_instance *rtd =
@@ -474,7 +475,8 @@ static int acp3x_resume(struct device *dev)
reg_val = mmACP_I2STDM_IRER;
frmt_val = mmACP_I2STDM_RXFRMT;
}
- rv_writel((rtd->xfer_resolution << 3), rtd->acp3x_base + reg_val);
+ rv_writel((rtd->xfer_resolution << 3),
+ rtd->acp3x_base + reg_val);
}
if (adata->tdm_mode == TDM_ENABLE) {
rv_writel(adata->tdm_fmt, adata->acp3x_base + frmt_val);
diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c
index da60e2ec5535..f25ce50f1a90 100644
--- a/sound/soc/amd/raven/pci-acp3x.c
+++ b/sound/soc/amd/raven/pci-acp3x.c
@@ -38,8 +38,13 @@ static int acp3x_power_on(void __iomem *acp3x_base)
timeout = 0;
while (++timeout < 500) {
val = rv_readl(acp3x_base + mmACP_PGFSM_STATUS);
- if (!val)
+ if (!val) {
+ /* Set PME_EN as after ACP power On,
+ * PME_EN gets cleared
+ */
+ rv_writel(0x1, acp3x_base + mmACP_PME_EN);
return 0;
+ }
udelay(1);
}
return -ETIMEDOUT;
diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
index db67f5ba1e9a..cb03c4f7324c 100644
--- a/sound/soc/atmel/atmel-pcm-dma.c
+++ b/sound/soc/atmel/atmel-pcm-dma.c
@@ -56,7 +56,7 @@ static void atmel_pcm_dma_irq(u32 ssc_sr,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct atmel_pcm_dma_params *prtd;
- prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ prtd = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if (ssc_sr & prtd->mask->ssc_error) {
if (snd_pcm_running(substream))
@@ -83,7 +83,7 @@ static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
struct ssc_device *ssc;
int ret;
- prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ prtd = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
ssc = prtd->ssc;
ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
diff --git a/sound/soc/atmel/atmel-pcm-pdc.c b/sound/soc/atmel/atmel-pcm-pdc.c
index 59c1331a6984..a8daebcbf6c8 100644
--- a/sound/soc/atmel/atmel-pcm-pdc.c
+++ b/sound/soc/atmel/atmel-pcm-pdc.c
@@ -213,7 +213,7 @@ static int atmel_pcm_hw_params(struct snd_soc_component *component,
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
runtime->dma_bytes = params_buffer_bytes(params);
- prtd->params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ prtd->params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
prtd->params->dma_intr_handler = atmel_pcm_dma_irq;
prtd->dma_buffer = runtime->dma_addr;
diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c
index 776b27d3686e..148c943cb538 100644
--- a/sound/soc/atmel/atmel_wm8904.c
+++ b/sound/soc/atmel/atmel_wm8904.c
@@ -27,7 +27,7 @@ static int atmel_asoc_wm8904_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_pll(codec_dai, WM8904_FLL_MCLK, WM8904_FLL_MCLK,
diff --git a/sound/soc/atmel/mchp-i2s-mcc.c b/sound/soc/atmel/mchp-i2s-mcc.c
index befc2a3a05b0..3cb63886195f 100644
--- a/sound/soc/atmel/mchp-i2s-mcc.c
+++ b/sound/soc/atmel/mchp-i2s-mcc.c
@@ -239,10 +239,10 @@ struct mchp_i2s_mcc_dev {
unsigned int frame_length;
int tdm_slots;
int channels;
- int gclk_use:1;
- int gclk_running:1;
- int tx_rdy:1;
- int rx_rdy:1;
+ unsigned int gclk_use:1;
+ unsigned int gclk_running:1;
+ unsigned int tx_rdy:1;
+ unsigned int rx_rdy:1;
};
static irqreturn_t mchp_i2s_mcc_interrupt(int irq, void *dev_id)
diff --git a/sound/soc/atmel/mikroe-proto.c b/sound/soc/atmel/mikroe-proto.c
index aa6d0d78566f..f9a85fd01b79 100644
--- a/sound/soc/atmel/mikroe-proto.c
+++ b/sound/soc/atmel/mikroe-proto.c
@@ -21,7 +21,7 @@
static int snd_proto_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/* Set proto sysclk */
int ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index b1bef2bf142d..ed1f69b57024 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -96,7 +96,7 @@ static const struct snd_soc_dapm_route intercon[] = {
*/
static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct device *dev = rtd->dev;
int ret;
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 7822425d5e61..9fbc3c1113cc 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -40,7 +40,7 @@ struct sam9x5_drvdata {
*/
static int sam9x5_wm8731_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct device *dev = rtd->dev;
int ret;
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index d6b692fff29a..d649037bda9b 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -95,7 +95,7 @@ static struct snd_soc_card db1550_ac97_machine = {
static int db1200_i2s_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/* WM8731 has its own 12MHz crystal */
snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index 8f855644c6b4..e82bbf2d1eea 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -281,7 +281,7 @@ static int au1xpsc_pcm_open(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int stype = substream->stream, *dmaids;
- dmaids = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dmaids = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if (!dmaids)
return -ENODEV; /* whoa, has ordering changed? */
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index c9a038a5e2d3..4e246c7e78f2 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -195,7 +195,7 @@ static int alchemy_pcm_open(struct snd_soc_component *component,
int *dmaids, s = substream->stream;
char *name;
- dmaids = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dmaids = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if (!dmaids)
return -ENODEV; /* whoa, has ordering changed? */
diff --git a/sound/soc/au1x/psc-ac97.c b/sound/soc/au1x/psc-ac97.c
index 0227993c5da8..05eb36991f14 100644
--- a/sound/soc/au1x/psc-ac97.c
+++ b/sound/soc/au1x/psc-ac97.c
@@ -58,7 +58,7 @@ static struct au1xpsc_audio_data *au1xpsc_ac97_workdata;
static inline struct au1xpsc_audio_data *ac97_to_pscdata(struct snd_ac97 *x)
{
struct snd_soc_card *c = x->bus->card->private_data;
- return snd_soc_dai_get_drvdata(c->rtd->cpu_dai);
+ return snd_soc_dai_get_drvdata(c->asoc_rtd_to_cpu(rtd, 0));
}
#else
diff --git a/sound/soc/bcm/Kconfig b/sound/soc/bcm/Kconfig
index 0037e96aa228..4218057b0874 100644
--- a/sound/soc/bcm/Kconfig
+++ b/sound/soc/bcm/Kconfig
@@ -17,3 +17,12 @@ config SND_SOC_CYGNUS
Cygnus chips (bcm958300, bcm958305, bcm911360)
If you don't know what to do here, say N.
+
+config SND_BCM63XX_I2S_WHISTLER
+ tristate "SoC Audio support for the Broadcom BCM63XX I2S module"
+ select REGMAP_MMIO
+ help
+ Say Y if you want to add support for ASoC audio on Broadcom
+ DSL/PON chips (bcm63158, bcm63178)
+
+ If you don't know what to do here, say N
diff --git a/sound/soc/bcm/Makefile b/sound/soc/bcm/Makefile
index b81fa421ec27..7c2d7899603b 100644
--- a/sound/soc/bcm/Makefile
+++ b/sound/soc/bcm/Makefile
@@ -9,3 +9,7 @@ snd-soc-cygnus-objs := cygnus-pcm.o cygnus-ssp.o
obj-$(CONFIG_SND_SOC_CYGNUS) += snd-soc-cygnus.o
+# BCM63XX Platform Support
+snd-soc-63xx-objs := bcm63xx-i2s-whistler.o bcm63xx-pcm-whistler.o
+
+obj-$(CONFIG_SND_BCM63XX_I2S_WHISTLER) += snd-soc-63xx.o \ No newline at end of file
diff --git a/sound/soc/bcm/bcm63xx-i2s-whistler.c b/sound/soc/bcm/bcm63xx-i2s-whistler.c
new file mode 100644
index 000000000000..246a57ac6679
--- /dev/null
+++ b/sound/soc/bcm/bcm63xx-i2s-whistler.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// linux/sound/bcm/bcm63xx-i2s-whistler.c
+// BCM63xx whistler i2s driver
+// Copyright (c) 2020 Broadcom Corporation
+// Author: Kevin-Ke Li <kevin-ke.li@broadcom.com>
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "bcm63xx-i2s.h"
+
+#define DRV_NAME "brcm-i2s"
+
+static bool brcm_i2s_wr_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case I2S_TX_CFG ... I2S_TX_DESC_IFF_LEN:
+ case I2S_TX_CFG_2 ... I2S_RX_DESC_IFF_LEN:
+ case I2S_RX_CFG_2 ... I2S_REG_MAX:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool brcm_i2s_rd_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case I2S_TX_CFG ... I2S_REG_MAX:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool brcm_i2s_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case I2S_TX_CFG:
+ case I2S_TX_IRQ_CTL:
+ case I2S_TX_DESC_IFF_ADDR:
+ case I2S_TX_DESC_IFF_LEN:
+ case I2S_TX_DESC_OFF_ADDR:
+ case I2S_TX_DESC_OFF_LEN:
+ case I2S_TX_CFG_2:
+ case I2S_RX_CFG:
+ case I2S_RX_IRQ_CTL:
+ case I2S_RX_DESC_OFF_ADDR:
+ case I2S_RX_DESC_OFF_LEN:
+ case I2S_RX_DESC_IFF_LEN:
+ case I2S_RX_DESC_IFF_ADDR:
+ case I2S_RX_CFG_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config brcm_i2s_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = I2S_REG_MAX,
+ .writeable_reg = brcm_i2s_wr_reg,
+ .readable_reg = brcm_i2s_rd_reg,
+ .volatile_reg = brcm_i2s_volatile_reg,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int bcm63xx_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int ret = 0;
+ struct bcm_i2s_priv *i2s_priv = snd_soc_dai_get_drvdata(dai);
+
+ ret = clk_set_rate(i2s_priv->i2s_clk, params_rate(params));
+ if (ret < 0)
+ dev_err(i2s_priv->dev,
+ "Can't set sample rate, err: %d\n", ret);
+
+ return ret;
+}
+
+static int bcm63xx_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ unsigned int slavemode;
+ struct bcm_i2s_priv *i2s_priv = snd_soc_dai_get_drvdata(dai);
+ struct regmap *regmap_i2s = i2s_priv->regmap_i2s;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(regmap_i2s, I2S_TX_CFG,
+ I2S_TX_OUT_R | I2S_TX_DATA_ALIGNMENT |
+ I2S_TX_DATA_ENABLE | I2S_TX_CLOCK_ENABLE,
+ I2S_TX_OUT_R | I2S_TX_DATA_ALIGNMENT |
+ I2S_TX_DATA_ENABLE | I2S_TX_CLOCK_ENABLE);
+ regmap_write(regmap_i2s, I2S_TX_IRQ_CTL, 0);
+ regmap_write(regmap_i2s, I2S_TX_IRQ_IFF_THLD, 0);
+ regmap_write(regmap_i2s, I2S_TX_IRQ_OFF_THLD, 1);
+
+ /* TX and RX block each have an independent bit to indicate
+ * if it is generating the clock for the I2S bus. The bus
+ * clocks need to be generated from either the TX or RX block,
+ * but not both
+ */
+ regmap_read(regmap_i2s, I2S_RX_CFG_2, &slavemode);
+ if (slavemode & I2S_RX_SLAVE_MODE_MASK)
+ regmap_update_bits(regmap_i2s, I2S_TX_CFG_2,
+ I2S_TX_SLAVE_MODE_MASK,
+ I2S_TX_MASTER_MODE);
+ else
+ regmap_update_bits(regmap_i2s, I2S_TX_CFG_2,
+ I2S_TX_SLAVE_MODE_MASK,
+ I2S_TX_SLAVE_MODE);
+ } else {
+ regmap_update_bits(regmap_i2s, I2S_RX_CFG,
+ I2S_RX_IN_R | I2S_RX_DATA_ALIGNMENT |
+ I2S_RX_CLOCK_ENABLE,
+ I2S_RX_IN_R | I2S_RX_DATA_ALIGNMENT |
+ I2S_RX_CLOCK_ENABLE);
+ regmap_write(regmap_i2s, I2S_RX_IRQ_CTL, 0);
+ regmap_write(regmap_i2s, I2S_RX_IRQ_IFF_THLD, 0);
+ regmap_write(regmap_i2s, I2S_RX_IRQ_OFF_THLD, 1);
+
+ regmap_read(regmap_i2s, I2S_TX_CFG_2, &slavemode);
+ if (slavemode & I2S_TX_SLAVE_MODE_MASK)
+ regmap_update_bits(regmap_i2s, I2S_RX_CFG_2,
+ I2S_RX_SLAVE_MODE_MASK, 0);
+ else
+ regmap_update_bits(regmap_i2s, I2S_RX_CFG_2,
+ I2S_RX_SLAVE_MODE_MASK,
+ I2S_RX_SLAVE_MODE);
+ }
+ return 0;
+}
+
+static void bcm63xx_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ unsigned int enabled, slavemode;
+ struct bcm_i2s_priv *i2s_priv = snd_soc_dai_get_drvdata(dai);
+ struct regmap *regmap_i2s = i2s_priv->regmap_i2s;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(regmap_i2s, I2S_TX_CFG,
+ I2S_TX_OUT_R | I2S_TX_DATA_ALIGNMENT |
+ I2S_TX_DATA_ENABLE | I2S_TX_CLOCK_ENABLE, 0);
+ regmap_write(regmap_i2s, I2S_TX_IRQ_CTL, 1);
+ regmap_write(regmap_i2s, I2S_TX_IRQ_IFF_THLD, 4);
+ regmap_write(regmap_i2s, I2S_TX_IRQ_OFF_THLD, 4);
+
+ regmap_read(regmap_i2s, I2S_TX_CFG_2, &slavemode);
+ slavemode = slavemode & I2S_TX_SLAVE_MODE_MASK;
+ if (!slavemode) {
+ regmap_read(regmap_i2s, I2S_RX_CFG, &enabled);
+ enabled = enabled & I2S_RX_ENABLE_MASK;
+ if (enabled)
+ regmap_update_bits(regmap_i2s, I2S_RX_CFG_2,
+ I2S_RX_SLAVE_MODE_MASK,
+ I2S_RX_MASTER_MODE);
+ }
+ regmap_update_bits(regmap_i2s, I2S_TX_CFG_2,
+ I2S_TX_SLAVE_MODE_MASK,
+ I2S_TX_SLAVE_MODE);
+ } else {
+ regmap_update_bits(regmap_i2s, I2S_RX_CFG,
+ I2S_RX_IN_R | I2S_RX_DATA_ALIGNMENT |
+ I2S_RX_CLOCK_ENABLE, 0);
+ regmap_write(regmap_i2s, I2S_RX_IRQ_CTL, 1);
+ regmap_write(regmap_i2s, I2S_RX_IRQ_IFF_THLD, 4);
+ regmap_write(regmap_i2s, I2S_RX_IRQ_OFF_THLD, 4);
+
+ regmap_read(regmap_i2s, I2S_RX_CFG_2, &slavemode);
+ slavemode = slavemode & I2S_RX_SLAVE_MODE_MASK;
+ if (!slavemode) {
+ regmap_read(regmap_i2s, I2S_TX_CFG, &enabled);
+ enabled = enabled & I2S_TX_ENABLE_MASK;
+ if (enabled)
+ regmap_update_bits(regmap_i2s, I2S_TX_CFG_2,
+ I2S_TX_SLAVE_MODE_MASK,
+ I2S_TX_MASTER_MODE);
+ }
+
+ regmap_update_bits(regmap_i2s, I2S_RX_CFG_2,
+ I2S_RX_SLAVE_MODE_MASK, I2S_RX_SLAVE_MODE);
+ }
+}
+
+static const struct snd_soc_dai_ops bcm63xx_i2s_dai_ops = {
+ .startup = bcm63xx_i2s_startup,
+ .shutdown = bcm63xx_i2s_shutdown,
+ .hw_params = bcm63xx_i2s_hw_params,
+};
+
+static struct snd_soc_dai_driver bcm63xx_i2s_dai = {
+ .name = DRV_NAME,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &bcm63xx_i2s_dai_ops,
+ .symmetric_rates = 1,
+ .symmetric_channels = 1,
+};
+
+static const struct snd_soc_component_driver bcm63xx_i2s_component = {
+ .name = "bcm63xx",
+};
+
+static int bcm63xx_i2s_dev_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ void __iomem *regs;
+ struct resource *r_mem, *region;
+ struct bcm_i2s_priv *i2s_priv;
+ struct regmap *regmap_i2s;
+ struct clk *i2s_clk;
+
+ i2s_priv = devm_kzalloc(&pdev->dev, sizeof(*i2s_priv), GFP_KERNEL);
+ if (!i2s_priv)
+ return -ENOMEM;
+
+ i2s_clk = devm_clk_get(&pdev->dev, "i2sclk");
+ if (IS_ERR(i2s_clk)) {
+ dev_err(&pdev->dev, "%s: cannot get a brcm clock: %ld\n",
+ __func__, PTR_ERR(i2s_clk));
+ return PTR_ERR(i2s_clk);
+ }
+
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_mem) {
+ dev_err(&pdev->dev, "Unable to get register resource.\n");
+ return -ENODEV;
+ }
+
+ region = devm_request_mem_region(&pdev->dev, r_mem->start,
+ resource_size(r_mem), DRV_NAME);
+ if (!region) {
+ dev_err(&pdev->dev, "Memory region already claimed\n");
+ return -EBUSY;
+ }
+
+ regs = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ return ret;
+ }
+
+ regmap_i2s = devm_regmap_init_mmio(&pdev->dev,
+ regs, &brcm_i2s_regmap_config);
+ if (IS_ERR(regmap_i2s))
+ return PTR_ERR(regmap_i2s);
+
+ regmap_update_bits(regmap_i2s, I2S_MISC_CFG,
+ I2S_PAD_LVL_LOOP_DIS_MASK,
+ I2S_PAD_LVL_LOOP_DIS_ENABLE);
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &bcm63xx_i2s_component,
+ &bcm63xx_i2s_dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the dai\n");
+ return ret;
+ }
+
+ i2s_priv->dev = &pdev->dev;
+ i2s_priv->i2s_clk = i2s_clk;
+ i2s_priv->regmap_i2s = regmap_i2s;
+ dev_set_drvdata(&pdev->dev, i2s_priv);
+
+ ret = bcm63xx_soc_platform_probe(pdev, i2s_priv);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register the pcm\n");
+
+ return ret;
+}
+
+static int bcm63xx_i2s_dev_remove(struct platform_device *pdev)
+{
+ bcm63xx_soc_platform_remove(pdev);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id snd_soc_bcm_audio_match[] = {
+ {.compatible = "brcm,bcm63xx-i2s"},
+ { }
+};
+#endif
+
+static struct platform_driver bcm63xx_i2s_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(snd_soc_bcm_audio_match),
+ },
+ .probe = bcm63xx_i2s_dev_probe,
+ .remove = bcm63xx_i2s_dev_remove,
+};
+
+module_platform_driver(bcm63xx_i2s_driver);
+
+MODULE_AUTHOR("Kevin,Li <kevin-ke.li@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom DSL XPON ASOC I2S Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/bcm/bcm63xx-i2s.h b/sound/soc/bcm/bcm63xx-i2s.h
new file mode 100644
index 000000000000..edc328ba53d3
--- /dev/null
+++ b/sound/soc/bcm/bcm63xx-i2s.h
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// linux/sound/soc/bcm/bcm63xx-i2s.h
+// Copyright (c) 2020 Broadcom Corporation
+// Author: Kevin-Ke Li <kevin-ke.li@broadcom.com>
+
+#ifndef __BCM63XX_I2S_H
+#define __BCM63XX_I2S_H
+
+#define I2S_DESC_FIFO_DEPTH 8
+#define I2S_MISC_CFG (0x003C)
+#define I2S_PAD_LVL_LOOP_DIS_MASK (1 << 2)
+#define I2S_PAD_LVL_LOOP_DIS_ENABLE I2S_PAD_LVL_LOOP_DIS_MASK
+
+#define I2S_TX_ENABLE_MASK (1 << 31)
+#define I2S_TX_ENABLE I2S_TX_ENABLE_MASK
+#define I2S_TX_OUT_R (1 << 19)
+#define I2S_TX_DATA_ALIGNMENT (1 << 2)
+#define I2S_TX_DATA_ENABLE (1 << 1)
+#define I2S_TX_CLOCK_ENABLE (1 << 0)
+
+#define I2S_TX_DESC_OFF_LEVEL_SHIFT 12
+#define I2S_TX_DESC_OFF_LEVEL_MASK (0x0F << I2S_TX_DESC_OFF_LEVEL_SHIFT)
+#define I2S_TX_DESC_IFF_LEVEL_SHIFT 8
+#define I2S_TX_DESC_IFF_LEVEL_MASK (0x0F << I2S_TX_DESC_IFF_LEVEL_SHIFT)
+#define I2S_TX_DESC_OFF_INTR_EN_MSK (1 << 1)
+#define I2S_TX_DESC_OFF_INTR_EN I2S_TX_DESC_OFF_INTR_EN_MSK
+
+#define I2S_TX_CFG (0x0000)
+#define I2S_TX_IRQ_CTL (0x0004)
+#define I2S_TX_IRQ_EN (0x0008)
+#define I2S_TX_IRQ_IFF_THLD (0x000c)
+#define I2S_TX_IRQ_OFF_THLD (0x0010)
+#define I2S_TX_DESC_IFF_ADDR (0x0014)
+#define I2S_TX_DESC_IFF_LEN (0x0018)
+#define I2S_TX_DESC_OFF_ADDR (0x001C)
+#define I2S_TX_DESC_OFF_LEN (0x0020)
+#define I2S_TX_CFG_2 (0x0024)
+#define I2S_TX_SLAVE_MODE_SHIFT 13
+#define I2S_TX_SLAVE_MODE_MASK (1 << I2S_TX_SLAVE_MODE_SHIFT)
+#define I2S_TX_SLAVE_MODE I2S_TX_SLAVE_MODE_MASK
+#define I2S_TX_MASTER_MODE 0
+#define I2S_TX_INTR_MASK 0x0F
+
+#define I2S_RX_ENABLE_MASK (1 << 31)
+#define I2S_RX_ENABLE I2S_RX_ENABLE_MASK
+#define I2S_RX_IN_R (1 << 19)
+#define I2S_RX_DATA_ALIGNMENT (1 << 2)
+#define I2S_RX_CLOCK_ENABLE (1 << 0)
+
+#define I2S_RX_DESC_OFF_LEVEL_SHIFT 12
+#define I2S_RX_DESC_OFF_LEVEL_MASK (0x0F << I2S_RX_DESC_OFF_LEVEL_SHIFT)
+#define I2S_RX_DESC_IFF_LEVEL_SHIFT 8
+#define I2S_RX_DESC_IFF_LEVEL_MASK (0x0F << I2S_RX_DESC_IFF_LEVEL_SHIFT)
+#define I2S_RX_DESC_OFF_INTR_EN_MSK (1 << 1)
+#define I2S_RX_DESC_OFF_INTR_EN I2S_RX_DESC_OFF_INTR_EN_MSK
+
+#define I2S_RX_CFG (0x0040) /* 20c0 */
+#define I2S_RX_IRQ_CTL (0x0044)
+#define I2S_RX_IRQ_EN (0x0048)
+#define I2S_RX_IRQ_IFF_THLD (0x004C)
+#define I2S_RX_IRQ_OFF_THLD (0x0050)
+#define I2S_RX_DESC_IFF_ADDR (0x0054)
+#define I2S_RX_DESC_IFF_LEN (0x0058)
+#define I2S_RX_DESC_OFF_ADDR (0x005C)
+#define I2S_RX_DESC_OFF_LEN (0x0060)
+#define I2S_RX_CFG_2 (0x0064)
+#define I2S_RX_SLAVE_MODE_SHIFT 13
+#define I2S_RX_SLAVE_MODE_MASK (1 << I2S_RX_SLAVE_MODE_SHIFT)
+#define I2S_RX_SLAVE_MODE I2S_RX_SLAVE_MODE_MASK
+#define I2S_RX_MASTER_MODE 0
+#define I2S_RX_INTR_MASK 0x0F
+
+#define I2S_REG_MAX 0x007C
+
+struct bcm_i2s_priv {
+ struct device *dev;
+ struct resource *r_irq;
+ struct regmap *regmap_i2s;
+ struct clk *i2s_clk;
+ struct snd_pcm_substream *play_substream;
+ struct snd_pcm_substream *capture_substream;
+ struct i2s_dma_desc *play_dma_desc;
+ struct i2s_dma_desc *capture_dma_desc;
+};
+
+extern int bcm63xx_soc_platform_probe(struct platform_device *pdev,
+ struct bcm_i2s_priv *i2s_priv);
+extern int bcm63xx_soc_platform_remove(struct platform_device *pdev);
+
+#endif
diff --git a/sound/soc/bcm/bcm63xx-pcm-whistler.c b/sound/soc/bcm/bcm63xx-pcm-whistler.c
new file mode 100644
index 000000000000..e46c390683e7
--- /dev/null
+++ b/sound/soc/bcm/bcm63xx-pcm-whistler.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// linux/sound/bcm/bcm63xx-pcm-whistler.c
+// BCM63xx whistler pcm interface
+// Copyright (c) 2020 Broadcom Corporation
+// Author: Kevin-Ke Li <kevin-ke.li@broadcom.com>
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <sound/soc.h>
+#include "bcm63xx-i2s.h"
+
+
+struct i2s_dma_desc {
+ unsigned char *dma_area;
+ dma_addr_t dma_addr;
+ unsigned int dma_len;
+};
+
+struct bcm63xx_runtime_data {
+ int dma_len;
+ dma_addr_t dma_addr;
+ dma_addr_t dma_addr_next;
+};
+
+static const struct snd_pcm_hardware bcm63xx_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE, /* support S32 only */
+ .period_bytes_max = 8192 - 32,
+ .periods_min = 1,
+ .periods_max = PAGE_SIZE/sizeof(struct i2s_dma_desc),
+ .buffer_bytes_max = 128 * 1024,
+ .fifo_size = 32,
+};
+
+static int bcm63xx_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct i2s_dma_desc *dma_desc;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+ runtime->dma_bytes = params_buffer_bytes(params);
+
+ dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
+ if (!dma_desc)
+ return -ENOMEM;
+
+ snd_soc_dai_set_dma_data(asoc_rtd_to_cpu(rtd, 0), substream, dma_desc);
+
+ return 0;
+}
+
+static int bcm63xx_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct i2s_dma_desc *dma_desc;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ dma_desc = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
+ kfree(dma_desc);
+ snd_pcm_set_runtime_buffer(substream, NULL);
+
+ return 0;
+}
+
+static int bcm63xx_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_soc_pcm_runtime *rtd;
+ struct bcm_i2s_priv *i2s_priv;
+ struct regmap *regmap_i2s;
+
+ rtd = substream->private_data;
+ i2s_priv = dev_get_drvdata(asoc_rtd_to_cpu(rtd, 0)->dev);
+ regmap_i2s = i2s_priv->regmap_i2s;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ regmap_update_bits(regmap_i2s,
+ I2S_TX_IRQ_EN,
+ I2S_TX_DESC_OFF_INTR_EN,
+ I2S_TX_DESC_OFF_INTR_EN);
+ regmap_update_bits(regmap_i2s,
+ I2S_TX_CFG,
+ I2S_TX_ENABLE_MASK,
+ I2S_TX_ENABLE);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ regmap_write(regmap_i2s,
+ I2S_TX_IRQ_EN,
+ 0);
+ regmap_update_bits(regmap_i2s,
+ I2S_TX_CFG,
+ I2S_TX_ENABLE_MASK,
+ 0);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else {
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ regmap_update_bits(regmap_i2s,
+ I2S_RX_IRQ_EN,
+ I2S_RX_DESC_OFF_INTR_EN_MSK,
+ I2S_RX_DESC_OFF_INTR_EN);
+ regmap_update_bits(regmap_i2s,
+ I2S_RX_CFG,
+ I2S_RX_ENABLE_MASK,
+ I2S_RX_ENABLE);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ regmap_update_bits(regmap_i2s,
+ I2S_RX_IRQ_EN,
+ I2S_RX_DESC_OFF_INTR_EN_MSK,
+ 0);
+ regmap_update_bits(regmap_i2s,
+ I2S_RX_CFG,
+ I2S_RX_ENABLE_MASK,
+ 0);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+ return ret;
+}
+
+static int bcm63xx_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct i2s_dma_desc *dma_desc;
+ struct regmap *regmap_i2s;
+ struct bcm_i2s_priv *i2s_priv;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ uint32_t regaddr_desclen, regaddr_descaddr;
+
+ dma_desc = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
+ dma_desc->dma_len = snd_pcm_lib_period_bytes(substream);
+ dma_desc->dma_addr = runtime->dma_addr;
+ dma_desc->dma_area = runtime->dma_area;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regaddr_desclen = I2S_TX_DESC_IFF_LEN;
+ regaddr_descaddr = I2S_TX_DESC_IFF_ADDR;
+ } else {
+ regaddr_desclen = I2S_RX_DESC_IFF_LEN;
+ regaddr_descaddr = I2S_RX_DESC_IFF_ADDR;
+ }
+
+ i2s_priv = dev_get_drvdata(asoc_rtd_to_cpu(rtd, 0)->dev);
+ regmap_i2s = i2s_priv->regmap_i2s;
+
+ regmap_write(regmap_i2s, regaddr_desclen, dma_desc->dma_len);
+ regmap_write(regmap_i2s, regaddr_descaddr, dma_desc->dma_addr);
+
+ return 0;
+}
+
+static snd_pcm_uframes_t
+bcm63xx_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ snd_pcm_uframes_t x;
+ struct bcm63xx_runtime_data *prtd = substream->runtime->private_data;
+
+ if ((void *)prtd->dma_addr_next == NULL)
+ prtd->dma_addr_next = substream->runtime->dma_addr;
+
+ x = bytes_to_frames(substream->runtime,
+ prtd->dma_addr_next - substream->runtime->dma_addr);
+
+ return x == substream->runtime->buffer_size ? 0 : x;
+}
+
+static int bcm63xx_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ return dma_mmap_wc(substream->pcm->card->dev, vma,
+ runtime->dma_area,
+ runtime->dma_addr,
+ runtime->dma_bytes);
+
+}
+
+static int bcm63xx_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm63xx_runtime_data *prtd;
+
+ runtime->hw = bcm63xx_pcm_hardware;
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+ if (ret)
+ goto out;
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+ if (ret)
+ goto out;
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ goto out;
+
+ ret = -ENOMEM;
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ goto out;
+
+ runtime->private_data = prtd;
+ return 0;
+out:
+ return ret;
+}
+
+static int bcm63xx_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm63xx_runtime_data *prtd = runtime->private_data;
+
+ kfree(prtd);
+ return 0;
+}
+
+static irqreturn_t i2s_dma_isr(int irq, void *bcm_i2s_priv)
+{
+ unsigned int availdepth, ifflevel, offlevel, int_status, val_1, val_2;
+ struct bcm63xx_runtime_data *prtd;
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
+ struct regmap *regmap_i2s;
+ struct i2s_dma_desc *dma_desc;
+ struct snd_soc_pcm_runtime *rtd;
+ struct bcm_i2s_priv *i2s_priv;
+
+ i2s_priv = (struct bcm_i2s_priv *)bcm_i2s_priv;
+ regmap_i2s = i2s_priv->regmap_i2s;
+
+ /* rx */
+ regmap_read(regmap_i2s, I2S_RX_IRQ_CTL, &int_status);
+
+ if (int_status & I2S_RX_DESC_OFF_INTR_EN_MSK) {
+ substream = i2s_priv->capture_substream;
+ runtime = substream->runtime;
+ rtd = substream->private_data;
+ prtd = runtime->private_data;
+ dma_desc = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
+
+ offlevel = (int_status & I2S_RX_DESC_OFF_LEVEL_MASK) >>
+ I2S_RX_DESC_OFF_LEVEL_SHIFT;
+ while (offlevel) {
+ regmap_read(regmap_i2s, I2S_RX_DESC_OFF_ADDR, &val_1);
+ regmap_read(regmap_i2s, I2S_RX_DESC_OFF_LEN, &val_2);
+ offlevel--;
+ }
+ prtd->dma_addr_next = val_1 + val_2;
+ ifflevel = (int_status & I2S_RX_DESC_IFF_LEVEL_MASK) >>
+ I2S_RX_DESC_IFF_LEVEL_SHIFT;
+
+ availdepth = I2S_DESC_FIFO_DEPTH - ifflevel;
+ while (availdepth) {
+ dma_desc->dma_addr +=
+ snd_pcm_lib_period_bytes(substream);
+ dma_desc->dma_area +=
+ snd_pcm_lib_period_bytes(substream);
+ if (dma_desc->dma_addr - runtime->dma_addr >=
+ runtime->dma_bytes) {
+ dma_desc->dma_addr = runtime->dma_addr;
+ dma_desc->dma_area = runtime->dma_area;
+ }
+
+ prtd->dma_addr = dma_desc->dma_addr;
+ regmap_write(regmap_i2s, I2S_RX_DESC_IFF_LEN,
+ snd_pcm_lib_period_bytes(substream));
+ regmap_write(regmap_i2s, I2S_RX_DESC_IFF_ADDR,
+ dma_desc->dma_addr);
+ availdepth--;
+ }
+
+ snd_pcm_period_elapsed(substream);
+
+ /* Clear interrupt by writing 0 */
+ regmap_update_bits(regmap_i2s, I2S_RX_IRQ_CTL,
+ I2S_RX_INTR_MASK, 0);
+ }
+
+ /* tx */
+ regmap_read(regmap_i2s, I2S_TX_IRQ_CTL, &int_status);
+
+ if (int_status & I2S_TX_DESC_OFF_INTR_EN_MSK) {
+ substream = i2s_priv->play_substream;
+ runtime = substream->runtime;
+ rtd = substream->private_data;
+ prtd = runtime->private_data;
+ dma_desc = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
+
+ offlevel = (int_status & I2S_TX_DESC_OFF_LEVEL_MASK) >>
+ I2S_TX_DESC_OFF_LEVEL_SHIFT;
+ while (offlevel) {
+ regmap_read(regmap_i2s, I2S_TX_DESC_OFF_ADDR, &val_1);
+ regmap_read(regmap_i2s, I2S_TX_DESC_OFF_LEN, &val_2);
+ prtd->dma_addr_next = val_1 + val_2;
+ offlevel--;
+ }
+
+ ifflevel = (int_status & I2S_TX_DESC_IFF_LEVEL_MASK) >>
+ I2S_TX_DESC_IFF_LEVEL_SHIFT;
+ availdepth = I2S_DESC_FIFO_DEPTH - ifflevel;
+
+ while (availdepth) {
+ dma_desc->dma_addr +=
+ snd_pcm_lib_period_bytes(substream);
+ dma_desc->dma_area +=
+ snd_pcm_lib_period_bytes(substream);
+
+ if (dma_desc->dma_addr - runtime->dma_addr >=
+ runtime->dma_bytes) {
+ dma_desc->dma_addr = runtime->dma_addr;
+ dma_desc->dma_area = runtime->dma_area;
+ }
+
+ prtd->dma_addr = dma_desc->dma_addr;
+ regmap_write(regmap_i2s, I2S_TX_DESC_IFF_LEN,
+ snd_pcm_lib_period_bytes(substream));
+ regmap_write(regmap_i2s, I2S_TX_DESC_IFF_ADDR,
+ dma_desc->dma_addr);
+ availdepth--;
+ }
+
+ snd_pcm_period_elapsed(substream);
+
+ /* Clear interrupt by writing 0 */
+ regmap_update_bits(regmap_i2s, I2S_TX_IRQ_CTL,
+ I2S_TX_INTR_MASK, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm63xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size = bcm63xx_pcm_hardware.buffer_bytes_max;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = pcm->card->dev;
+ buf->private_data = NULL;
+
+ buf->area = dma_alloc_wc(pcm->card->dev,
+ size, &buf->addr,
+ GFP_KERNEL);
+ if (!buf->area)
+ return -ENOMEM;
+ buf->bytes = size;
+ return 0;
+}
+
+static int bcm63xx_soc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm = rtd->pcm;
+ struct bcm_i2s_priv *i2s_priv;
+ int ret;
+
+ i2s_priv = dev_get_drvdata(asoc_rtd_to_cpu(rtd, 0)->dev);
+
+ of_dma_configure(pcm->card->dev, pcm->card->dev->of_node, 1);
+
+ ret = dma_coerce_mask_and_coherent(pcm->card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
+
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+ ret = bcm63xx_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret)
+ goto out;
+
+ i2s_priv->play_substream =
+ pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ }
+
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+ ret = bcm63xx_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE);
+ if (ret)
+ goto out;
+ i2s_priv->capture_substream =
+ pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ }
+
+out:
+ return ret;
+}
+
+static void bcm63xx_pcm_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
+{
+ int stream;
+ struct snd_dma_buffer *buf;
+ struct snd_pcm_substream *substream;
+
+ for (stream = 0; stream < 2; stream++) {
+ substream = pcm->streams[stream].substream;
+ if (!substream)
+ continue;
+ buf = &substream->dma_buffer;
+ if (!buf->area)
+ continue;
+ dma_free_wc(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+ }
+}
+
+static const struct snd_soc_component_driver bcm63xx_soc_platform = {
+ .open = bcm63xx_pcm_open,
+ .close = bcm63xx_pcm_close,
+ .hw_params = bcm63xx_pcm_hw_params,
+ .hw_free = bcm63xx_pcm_hw_free,
+ .prepare = bcm63xx_pcm_prepare,
+ .trigger = bcm63xx_pcm_trigger,
+ .pointer = bcm63xx_pcm_pointer,
+ .mmap = bcm63xx_pcm_mmap,
+ .pcm_construct = bcm63xx_soc_pcm_new,
+ .pcm_destruct = bcm63xx_pcm_free_dma_buffers,
+};
+
+int bcm63xx_soc_platform_probe(struct platform_device *pdev,
+ struct bcm_i2s_priv *i2s_priv)
+{
+ int ret;
+
+ i2s_priv->r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!i2s_priv->r_irq) {
+ dev_err(&pdev->dev, "Unable to get register irq resource.\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, i2s_priv->r_irq->start, i2s_dma_isr,
+ i2s_priv->r_irq->flags, "i2s_dma", (void *)i2s_priv);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "i2s_init: failed to request interrupt.ret=%d\n", ret);
+ return ret;
+ }
+
+ return devm_snd_soc_register_component(&pdev->dev,
+ &bcm63xx_soc_platform, NULL, 0);
+}
+
+int bcm63xx_soc_platform_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+MODULE_AUTHOR("Kevin,Li <kevin-ke.li@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom DSL XPON ASOC PCM Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/bcm/cygnus-pcm.c b/sound/soc/bcm/cygnus-pcm.c
index 3a80c613bc3f..f96d27c8b301 100644
--- a/sound/soc/bcm/cygnus-pcm.c
+++ b/sound/soc/bcm/cygnus-pcm.c
@@ -209,7 +209,7 @@ static struct cygnus_aio_port *cygnus_dai_get_dma_data(
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- return snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+ return snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(soc_runtime, 0), substream);
}
static void ringbuf_set_initial(void __iomem *audio_io,
@@ -359,7 +359,7 @@ static void disable_intr(struct snd_pcm_substream *substream)
aio = cygnus_dai_get_dma_data(substream);
- dev_dbg(rtd->cpu_dai->dev, "%s on port %d\n", __func__, aio->portnum);
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "%s on port %d\n", __func__, aio->portnum);
/* The port number maps to the bit position to be set */
set_mask = BIT(aio->portnum);
@@ -590,7 +590,7 @@ static int cygnus_pcm_open(struct snd_soc_component *component,
if (!aio)
return -ENODEV;
- dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "%s port %d\n", __func__, aio->portnum);
snd_soc_set_runtime_hwparams(substream, &cygnus_pcm_hw);
@@ -623,7 +623,7 @@ static int cygnus_pcm_close(struct snd_soc_component *component,
aio = cygnus_dai_get_dma_data(substream);
- dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "%s port %d\n", __func__, aio->portnum);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
aio->play_stream = NULL;
@@ -631,7 +631,7 @@ static int cygnus_pcm_close(struct snd_soc_component *component,
aio->capture_stream = NULL;
if (!aio->play_stream && !aio->capture_stream)
- dev_dbg(rtd->cpu_dai->dev, "freed port %d\n", aio->portnum);
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "freed port %d\n", aio->portnum);
return 0;
}
@@ -645,7 +645,7 @@ static int cygnus_pcm_hw_params(struct snd_soc_component *component,
struct cygnus_aio_port *aio;
aio = cygnus_dai_get_dma_data(substream);
- dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "%s port %d\n", __func__, aio->portnum);
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
runtime->dma_bytes = params_buffer_bytes(params);
@@ -660,7 +660,7 @@ static int cygnus_pcm_hw_free(struct snd_soc_component *component,
struct cygnus_aio_port *aio;
aio = cygnus_dai_get_dma_data(substream);
- dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "%s port %d\n", __func__, aio->portnum);
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
@@ -678,12 +678,12 @@ static int cygnus_pcm_prepare(struct snd_soc_component *component,
struct ringbuf_regs *p_rbuf = NULL;
aio = cygnus_dai_get_dma_data(substream);
- dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "%s port %d\n", __func__, aio->portnum);
bufsize = snd_pcm_lib_buffer_bytes(substream);
periodsize = snd_pcm_lib_period_bytes(substream);
- dev_dbg(rtd->cpu_dai->dev, "%s (buf_size %lu) (period_size %lu)\n",
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "%s (buf_size %lu) (period_size %lu)\n",
__func__, bufsize, periodsize);
configure_ringbuf_regs(substream);
@@ -745,11 +745,11 @@ static int cygnus_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
buf->area = dma_alloc_coherent(pcm->card->dev, size,
&buf->addr, GFP_KERNEL);
- dev_dbg(rtd->cpu_dai->dev, "%s: size 0x%zx @ %pK\n",
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "%s: size 0x%zx @ %pK\n",
__func__, size, buf->area);
if (!buf->area) {
- dev_err(rtd->cpu_dai->dev, "%s: dma_alloc failed\n", __func__);
+ dev_err(asoc_rtd_to_cpu(rtd, 0)->dev, "%s: dma_alloc failed\n", __func__);
return -ENOMEM;
}
buf->bytes = size;
diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
index 10961190068e..ccf65f087ea6 100644
--- a/sound/soc/cirrus/edb93xx.c
+++ b/sound/soc/cirrus/edb93xx.c
@@ -23,8 +23,8 @@ static int edb93xx_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int err;
unsigned int mclk_rate;
unsigned int rate = params_rate(params);
diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
index 70c2f3e08d6d..cb133e80b7c3 100644
--- a/sound/soc/cirrus/snappercl15.c
+++ b/sound/soc/cirrus/snappercl15.c
@@ -23,8 +23,8 @@ static int snappercl15_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int err;
err = snd_soc_dai_set_sysclk(codec_dai, 0, CODEC_CLOCK,
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index ea912439e446..e6a0c5d05fa5 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -14,262 +14,264 @@ menu "CODEC drivers"
config SND_SOC_ALL_CODECS
tristate "Build all ASoC CODEC drivers"
depends on COMPILE_TEST
- select SND_SOC_88PM860X if MFD_88PM860X
- select SND_SOC_L3
- select SND_SOC_AB8500_CODEC if ABX500_CORE
- select SND_SOC_AC97_CODEC
- select SND_SOC_AD1836 if SPI_MASTER
- select SND_SOC_AD193X_SPI if SPI_MASTER
- select SND_SOC_AD193X_I2C if I2C
- select SND_SOC_AD1980 if SND_SOC_AC97_BUS
- select SND_SOC_AD73311
- select SND_SOC_ADAU1373 if I2C
- select SND_SOC_ADAU1761_I2C if I2C
- select SND_SOC_ADAU1761_SPI if SPI
- select SND_SOC_ADAU1781_I2C if I2C
- select SND_SOC_ADAU1781_SPI if SPI
- select SND_SOC_ADAV801 if SPI_MASTER
- select SND_SOC_ADAV803 if I2C
- select SND_SOC_ADAU1977_SPI if SPI_MASTER
- select SND_SOC_ADAU1977_I2C if I2C
- select SND_SOC_ADAU1701 if I2C
- select SND_SOC_ADAU7002
- select SND_SOC_ADAU7118_I2C if I2C
- select SND_SOC_ADAU7118_HW
- select SND_SOC_ADS117X
- select SND_SOC_AK4104 if SPI_MASTER
- select SND_SOC_AK4118 if I2C
- select SND_SOC_AK4458 if I2C
- select SND_SOC_AK4535 if I2C
- select SND_SOC_AK4554
- select SND_SOC_AK4613 if I2C
- select SND_SOC_AK4641 if I2C
- select SND_SOC_AK4642 if I2C
- select SND_SOC_AK4671 if I2C
- select SND_SOC_AK5386
- select SND_SOC_AK5558 if I2C
- select SND_SOC_ALC5623 if I2C
- select SND_SOC_ALC5632 if I2C
- select SND_SOC_BT_SCO
- select SND_SOC_BD28623
- select SND_SOC_CQ0093VC
- select SND_SOC_CROS_EC_CODEC if CROS_EC
- select SND_SOC_CS35L32 if I2C
- select SND_SOC_CS35L33 if I2C
- select SND_SOC_CS35L34 if I2C
- select SND_SOC_CS35L35 if I2C
- select SND_SOC_CS35L36 if I2C
- select SND_SOC_CS42L42 if I2C
- select SND_SOC_CS42L51_I2C if I2C
- select SND_SOC_CS42L52 if I2C && INPUT
- select SND_SOC_CS42L56 if I2C && INPUT
- select SND_SOC_CS42L73 if I2C
- select SND_SOC_CS4265 if I2C
- select SND_SOC_CS4270 if I2C
- select SND_SOC_CS4271_I2C if I2C
- select SND_SOC_CS4271_SPI if SPI_MASTER
- select SND_SOC_CS42XX8_I2C if I2C
- select SND_SOC_CS43130 if I2C
- select SND_SOC_CS4341 if SND_SOC_I2C_AND_SPI
- select SND_SOC_CS4349 if I2C
- select SND_SOC_CS47L15 if MFD_CS47L15
- select SND_SOC_CS47L24 if MFD_CS47L24
- select SND_SOC_CS47L35 if MFD_CS47L35
- select SND_SOC_CS47L85 if MFD_CS47L85
- select SND_SOC_CS47L90 if MFD_CS47L90
- select SND_SOC_CS47L92 if MFD_CS47L92
- select SND_SOC_CS53L30 if I2C
- select SND_SOC_CX20442 if TTY
- select SND_SOC_CX2072X if I2C
- select SND_SOC_DA7210 if SND_SOC_I2C_AND_SPI
- select SND_SOC_DA7213 if I2C
- select SND_SOC_DA7218 if I2C
- select SND_SOC_DA7219 if I2C
- select SND_SOC_DA732X if I2C
- select SND_SOC_DA9055 if I2C
- select SND_SOC_DMIC if GPIOLIB
- select SND_SOC_ES8316 if I2C
- select SND_SOC_ES8328_SPI if SPI_MASTER
- select SND_SOC_ES8328_I2C if I2C
- select SND_SOC_ES7134
- select SND_SOC_ES7241
- select SND_SOC_GTM601
- select SND_SOC_HDAC_HDMI
- select SND_SOC_HDAC_HDA
- select SND_SOC_ICS43432
- select SND_SOC_INNO_RK3036
- select SND_SOC_ISABELLE if I2C
- select SND_SOC_JZ4740_CODEC
- select SND_SOC_JZ4725B_CODEC
- select SND_SOC_JZ4770_CODEC
- select SND_SOC_LM4857 if I2C
- select SND_SOC_LM49453 if I2C
- select SND_SOC_LOCHNAGAR_SC if MFD_LOCHNAGAR
- select SND_SOC_MAX98088 if I2C
- select SND_SOC_MAX98090 if I2C
- select SND_SOC_MAX98095 if I2C
- select SND_SOC_MAX98357A if GPIOLIB
- select SND_SOC_MAX98371 if I2C
- select SND_SOC_MAX98504 if I2C
- select SND_SOC_MAX9867 if I2C
- select SND_SOC_MAX98925 if I2C
- select SND_SOC_MAX98926 if I2C
- select SND_SOC_MAX98927 if I2C
- select SND_SOC_MAX98373 if I2C
- select SND_SOC_MAX9850 if I2C
- select SND_SOC_MAX9860 if I2C
- select SND_SOC_MAX9759
- select SND_SOC_MAX9768 if I2C
- select SND_SOC_MAX9877 if I2C
- select SND_SOC_MC13783 if MFD_MC13XXX
- select SND_SOC_ML26124 if I2C
- select SND_SOC_MT6351 if MTK_PMIC_WRAP
- select SND_SOC_MT6358 if MTK_PMIC_WRAP
- select SND_SOC_MT6660 if I2C
- select SND_SOC_NAU8540 if I2C
- select SND_SOC_NAU8810 if I2C
- select SND_SOC_NAU8822 if I2C
- select SND_SOC_NAU8824 if I2C
- select SND_SOC_NAU8825 if I2C
- select SND_SOC_HDMI_CODEC
- select SND_SOC_PCM1681 if I2C
- select SND_SOC_PCM1789_I2C if I2C
- select SND_SOC_PCM179X_I2C if I2C
- select SND_SOC_PCM179X_SPI if SPI_MASTER
- select SND_SOC_PCM186X_I2C if I2C
- select SND_SOC_PCM186X_SPI if SPI_MASTER
- select SND_SOC_PCM3008
- select SND_SOC_PCM3060_I2C if I2C
- select SND_SOC_PCM3060_SPI if SPI_MASTER
- select SND_SOC_PCM3168A_I2C if I2C
- select SND_SOC_PCM3168A_SPI if SPI_MASTER
- select SND_SOC_PCM5102A
- select SND_SOC_PCM512x_I2C if I2C
- select SND_SOC_PCM512x_SPI if SPI_MASTER
- select SND_SOC_RK3328
- select SND_SOC_RT274 if I2C
- select SND_SOC_RT286 if I2C
- select SND_SOC_RT298 if I2C
- select SND_SOC_RT1011 if I2C
- select SND_SOC_RT1015 if I2C
- select SND_SOC_RT1305 if I2C
- select SND_SOC_RT1308 if I2C
- select SND_SOC_RT5514 if I2C
- select SND_SOC_RT5616 if I2C
- select SND_SOC_RT5631 if I2C
- select SND_SOC_RT5640 if I2C
- select SND_SOC_RT5645 if I2C
- select SND_SOC_RT5651 if I2C
- select SND_SOC_RT5659 if I2C
- select SND_SOC_RT5660 if I2C
- select SND_SOC_RT5663 if I2C
- select SND_SOC_RT5665 if I2C
- select SND_SOC_RT5668 if I2C
- select SND_SOC_RT5670 if I2C
- select SND_SOC_RT5677 if I2C && SPI_MASTER
- select SND_SOC_RT5682 if I2C
- select SND_SOC_RT700_SDW if SOUNDWIRE
- select SND_SOC_RT711_SDW if SOUNDWIRE
- select SND_SOC_RT715_SDW if SOUNDWIRE
- select SND_SOC_RT1308_SDW if SOUNDWIRE
- select SND_SOC_SGTL5000 if I2C
- select SND_SOC_SI476X if MFD_SI476X_CORE
- select SND_SOC_SIMPLE_AMPLIFIER
- select SND_SOC_SIRF_AUDIO_CODEC
- select SND_SOC_SPDIF
- select SND_SOC_SSM2305
- select SND_SOC_SSM2518 if I2C
- select SND_SOC_SSM2602_SPI if SPI_MASTER
- select SND_SOC_SSM2602_I2C if I2C
- select SND_SOC_SSM4567 if I2C
- select SND_SOC_STA32X if I2C
- select SND_SOC_STA350 if I2C
- select SND_SOC_STA529 if I2C
- select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
- select SND_SOC_STI_SAS
- select SND_SOC_TAS2552 if I2C
- select SND_SOC_TAS2562 if I2C
- select SND_SOC_TAS2770 if I2C
- select SND_SOC_TAS5086 if I2C
- select SND_SOC_TAS571X if I2C
- select SND_SOC_TAS5720 if I2C
- select SND_SOC_TAS6424 if I2C
- select SND_SOC_TDA7419 if I2C
- select SND_SOC_TFA9879 if I2C
- select SND_SOC_TLV320AIC23_I2C if I2C
- select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
- select SND_SOC_TLV320AIC26 if SPI_MASTER
- select SND_SOC_TLV320AIC31XX if I2C
- select SND_SOC_TLV320AIC32X4_I2C if I2C && COMMON_CLK
- select SND_SOC_TLV320AIC32X4_SPI if SPI_MASTER && COMMON_CLK
- select SND_SOC_TLV320AIC3X if I2C
- select SND_SOC_TPA6130A2 if I2C
- select SND_SOC_TLV320DAC33 if I2C
- select SND_SOC_TSCS42XX if I2C
- select SND_SOC_TSCS454 if I2C
- select SND_SOC_TS3A227E if I2C
- select SND_SOC_TWL4030 if TWL4030_CORE
- select SND_SOC_TWL6040 if TWL6040_CORE
- select SND_SOC_UDA1334 if GPIOLIB
- select SND_SOC_UDA134X
- select SND_SOC_UDA1380 if I2C
- select SND_SOC_WCD9335 if SLIMBUS
- select SND_SOC_WCD934X if MFD_WCD934X && COMMON_CLK
- select SND_SOC_WL1273 if MFD_WL1273_CORE
- select SND_SOC_WM0010 if SPI_MASTER
- select SND_SOC_WM1250_EV1 if I2C
- select SND_SOC_WM2000 if I2C
- select SND_SOC_WM2200 if I2C
- select SND_SOC_WM5100 if I2C
- select SND_SOC_WM5102 if MFD_WM5102
- select SND_SOC_WM5110 if MFD_WM5110
- select SND_SOC_WM8350 if MFD_WM8350
- select SND_SOC_WM8400 if MFD_WM8400
- select SND_SOC_WM8510 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8523 if I2C
- select SND_SOC_WM8524 if GPIOLIB
- select SND_SOC_WM8580 if I2C
- select SND_SOC_WM8711 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8727
- select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8737 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8741 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8750 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8770 if SPI_MASTER
- select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8782
- select SND_SOC_WM8804_I2C if I2C
- select SND_SOC_WM8804_SPI if SPI_MASTER
- select SND_SOC_WM8900 if I2C
- select SND_SOC_WM8903 if I2C
- select SND_SOC_WM8904 if I2C
- select SND_SOC_WM8940 if I2C
- select SND_SOC_WM8955 if I2C
- select SND_SOC_WM8960 if I2C
- select SND_SOC_WM8961 if I2C
- select SND_SOC_WM8962 if I2C && INPUT
- select SND_SOC_WM8971 if I2C
- select SND_SOC_WM8974 if I2C
- select SND_SOC_WM8978 if I2C
- select SND_SOC_WM8983 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8985 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8988 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8990 if I2C
- select SND_SOC_WM8991 if I2C
- select SND_SOC_WM8993 if I2C
- select SND_SOC_WM8994 if MFD_WM8994
- select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI
- select SND_SOC_WM8996 if I2C
- select SND_SOC_WM8997 if MFD_WM8997
- select SND_SOC_WM8998 if MFD_WM8998
- select SND_SOC_WM9081 if I2C
- select SND_SOC_WM9090 if I2C
- select SND_SOC_WM9705 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
- select SND_SOC_WM9712 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
- select SND_SOC_WM9713 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
- select SND_SOC_WSA881X if SOUNDWIRE
+ imply SND_SOC_88PM860X
+ imply SND_SOC_L3
+ imply SND_SOC_AB8500_CODEC
+ imply SND_SOC_AC97_CODEC
+ imply SND_SOC_AD1836
+ imply SND_SOC_AD193X_SPI
+ imply SND_SOC_AD193X_I2C
+ imply SND_SOC_AD1980
+ imply SND_SOC_AD73311
+ imply SND_SOC_ADAU1373
+ imply SND_SOC_ADAU1761_I2C
+ imply SND_SOC_ADAU1761_SPI
+ imply SND_SOC_ADAU1781_I2C
+ imply SND_SOC_ADAU1781_SPI
+ imply SND_SOC_ADAV801
+ imply SND_SOC_ADAV803
+ imply SND_SOC_ADAU1977_SPI
+ imply SND_SOC_ADAU1977_I2C
+ imply SND_SOC_ADAU1701
+ imply SND_SOC_ADAU7002
+ imply SND_SOC_ADAU7118_I2C
+ imply SND_SOC_ADAU7118_HW
+ imply SND_SOC_ADS117X
+ imply SND_SOC_AK4104
+ imply SND_SOC_AK4118
+ imply SND_SOC_AK4458
+ imply SND_SOC_AK4535
+ imply SND_SOC_AK4554
+ imply SND_SOC_AK4613
+ imply SND_SOC_AK4641
+ imply SND_SOC_AK4642
+ imply SND_SOC_AK4671
+ imply SND_SOC_AK5386
+ imply SND_SOC_AK5558
+ imply SND_SOC_ALC5623
+ imply SND_SOC_ALC5632
+ imply SND_SOC_BT_SCO
+ imply SND_SOC_BD28623
+ imply SND_SOC_CQ0093VC
+ imply SND_SOC_CROS_EC_CODEC
+ imply SND_SOC_CS35L32
+ imply SND_SOC_CS35L33
+ imply SND_SOC_CS35L34
+ imply SND_SOC_CS35L35
+ imply SND_SOC_CS35L36
+ imply SND_SOC_CS42L42
+ imply SND_SOC_CS42L51_I2C
+ imply SND_SOC_CS42L52
+ imply SND_SOC_CS42L56
+ imply SND_SOC_CS42L73
+ imply SND_SOC_CS4265
+ imply SND_SOC_CS4270
+ imply SND_SOC_CS4271_I2C
+ imply SND_SOC_CS4271_SPI
+ imply SND_SOC_CS42XX8_I2C
+ imply SND_SOC_CS43130
+ imply SND_SOC_CS4341
+ imply SND_SOC_CS4349
+ imply SND_SOC_CS47L15
+ imply SND_SOC_CS47L24
+ imply SND_SOC_CS47L35
+ imply SND_SOC_CS47L85
+ imply SND_SOC_CS47L90
+ imply SND_SOC_CS47L92
+ imply SND_SOC_CS53L30
+ imply SND_SOC_CX20442
+ imply SND_SOC_CX2072X
+ imply SND_SOC_DA7210
+ imply SND_SOC_DA7213
+ imply SND_SOC_DA7218
+ imply SND_SOC_DA7219
+ imply SND_SOC_DA732X
+ imply SND_SOC_DA9055
+ imply SND_SOC_DMIC
+ imply SND_SOC_ES8316
+ imply SND_SOC_ES8328_SPI
+ imply SND_SOC_ES8328_I2C
+ imply SND_SOC_ES7134
+ imply SND_SOC_ES7241
+ imply SND_SOC_GTM601
+ imply SND_SOC_HDAC_HDMI
+ imply SND_SOC_HDAC_HDA
+ imply SND_SOC_ICS43432
+ imply SND_SOC_INNO_RK3036
+ imply SND_SOC_ISABELLE
+ imply SND_SOC_JZ4740_CODEC
+ imply SND_SOC_JZ4725B_CODEC
+ imply SND_SOC_JZ4770_CODEC
+ imply SND_SOC_LM4857
+ imply SND_SOC_LM49453
+ imply SND_SOC_LOCHNAGAR_SC
+ imply SND_SOC_MAX98088
+ imply SND_SOC_MAX98090
+ imply SND_SOC_MAX98095
+ imply SND_SOC_MAX98357A
+ imply SND_SOC_MAX98371
+ imply SND_SOC_MAX98504
+ imply SND_SOC_MAX9867
+ imply SND_SOC_MAX98925
+ imply SND_SOC_MAX98926
+ imply SND_SOC_MAX98927
+ imply SND_SOC_MAX98373
+ imply SND_SOC_MAX9850
+ imply SND_SOC_MAX9860
+ imply SND_SOC_MAX9759
+ imply SND_SOC_MAX9768
+ imply SND_SOC_MAX9877
+ imply SND_SOC_MC13783
+ imply SND_SOC_ML26124
+ imply SND_SOC_MT6351
+ imply SND_SOC_MT6358
+ imply SND_SOC_MT6660
+ imply SND_SOC_NAU8540
+ imply SND_SOC_NAU8810
+ imply SND_SOC_NAU8822
+ imply SND_SOC_NAU8824
+ imply SND_SOC_NAU8825
+ imply SND_SOC_HDMI_CODEC
+ imply SND_SOC_PCM1681
+ imply SND_SOC_PCM1789_I2C
+ imply SND_SOC_PCM179X_I2C
+ imply SND_SOC_PCM179X_SPI
+ imply SND_SOC_PCM186X_I2C
+ imply SND_SOC_PCM186X_SPI
+ imply SND_SOC_PCM3008
+ imply SND_SOC_PCM3060_I2C
+ imply SND_SOC_PCM3060_SPI
+ imply SND_SOC_PCM3168A_I2C
+ imply SND_SOC_PCM3168A_SPI
+ imply SND_SOC_PCM5102A
+ imply SND_SOC_PCM512x_I2C
+ imply SND_SOC_PCM512x_SPI
+ imply SND_SOC_RK3328
+ imply SND_SOC_RT274
+ imply SND_SOC_RT286
+ imply SND_SOC_RT298
+ imply SND_SOC_RT1011
+ imply SND_SOC_RT1015
+ imply SND_SOC_RT1305
+ imply SND_SOC_RT1308
+ imply SND_SOC_RT5514
+ imply SND_SOC_RT5616
+ imply SND_SOC_RT5631
+ imply SND_SOC_RT5640
+ imply SND_SOC_RT5645
+ imply SND_SOC_RT5651
+ imply SND_SOC_RT5659
+ imply SND_SOC_RT5660
+ imply SND_SOC_RT5663
+ imply SND_SOC_RT5665
+ imply SND_SOC_RT5668
+ imply SND_SOC_RT5670
+ imply SND_SOC_RT5677
+ imply SND_SOC_RT5682
+ imply SND_SOC_RT5682_SDW
+ imply SND_SOC_RT700_SDW
+ imply SND_SOC_RT711_SDW
+ imply SND_SOC_RT715_SDW
+ imply SND_SOC_RT1308_SDW
+ imply SND_SOC_SGTL5000
+ imply SND_SOC_SI476X
+ imply SND_SOC_SIMPLE_AMPLIFIER
+ imply SND_SOC_SIRF_AUDIO_CODEC
+ imply SND_SOC_SPDIF
+ imply SND_SOC_SSM2305
+ imply SND_SOC_SSM2518
+ imply SND_SOC_SSM2602_SPI
+ imply SND_SOC_SSM2602_I2C
+ imply SND_SOC_SSM4567
+ imply SND_SOC_STA32X
+ imply SND_SOC_STA350
+ imply SND_SOC_STA529
+ imply SND_SOC_STAC9766
+ imply SND_SOC_STI_SAS
+ imply SND_SOC_TAS2552
+ imply SND_SOC_TAS2562
+ imply SND_SOC_TAS2770
+ imply SND_SOC_TAS5086
+ imply SND_SOC_TAS571X
+ imply SND_SOC_TAS5720
+ imply SND_SOC_TAS6424
+ imply SND_SOC_TDA7419
+ imply SND_SOC_TFA9879
+ imply SND_SOC_TLV320ADCX140
+ imply SND_SOC_TLV320AIC23_I2C
+ imply SND_SOC_TLV320AIC23_SPI
+ imply SND_SOC_TLV320AIC26
+ imply SND_SOC_TLV320AIC31XX
+ imply SND_SOC_TLV320AIC32X4_I2C
+ imply SND_SOC_TLV320AIC32X4_SPI
+ imply SND_SOC_TLV320AIC3X
+ imply SND_SOC_TPA6130A2
+ imply SND_SOC_TLV320DAC33
+ imply SND_SOC_TSCS42XX
+ imply SND_SOC_TSCS454
+ imply SND_SOC_TS3A227E
+ imply SND_SOC_TWL4030
+ imply SND_SOC_TWL6040
+ imply SND_SOC_UDA1334
+ imply SND_SOC_UDA134X
+ imply SND_SOC_UDA1380
+ imply SND_SOC_WCD9335
+ imply SND_SOC_WCD934X
+ imply SND_SOC_WL1273
+ imply SND_SOC_WM0010
+ imply SND_SOC_WM1250_EV1
+ imply SND_SOC_WM2000
+ imply SND_SOC_WM2200
+ imply SND_SOC_WM5100
+ imply SND_SOC_WM5102
+ imply SND_SOC_WM5110
+ imply SND_SOC_WM8350
+ imply SND_SOC_WM8400
+ imply SND_SOC_WM8510
+ imply SND_SOC_WM8523
+ imply SND_SOC_WM8524
+ imply SND_SOC_WM8580
+ imply SND_SOC_WM8711
+ imply SND_SOC_WM8727
+ imply SND_SOC_WM8728
+ imply SND_SOC_WM8731
+ imply SND_SOC_WM8737
+ imply SND_SOC_WM8741
+ imply SND_SOC_WM8750
+ imply SND_SOC_WM8753
+ imply SND_SOC_WM8770
+ imply SND_SOC_WM8776
+ imply SND_SOC_WM8782
+ imply SND_SOC_WM8804_I2C
+ imply SND_SOC_WM8804_SPI
+ imply SND_SOC_WM8900
+ imply SND_SOC_WM8903
+ imply SND_SOC_WM8904
+ imply SND_SOC_WM8940
+ imply SND_SOC_WM8955
+ imply SND_SOC_WM8960
+ imply SND_SOC_WM8961
+ imply SND_SOC_WM8962
+ imply SND_SOC_WM8971
+ imply SND_SOC_WM8974
+ imply SND_SOC_WM8978
+ imply SND_SOC_WM8983
+ imply SND_SOC_WM8985
+ imply SND_SOC_WM8988
+ imply SND_SOC_WM8990
+ imply SND_SOC_WM8991
+ imply SND_SOC_WM8993
+ imply SND_SOC_WM8994
+ imply SND_SOC_WM8995
+ imply SND_SOC_WM8996
+ imply SND_SOC_WM8997
+ imply SND_SOC_WM8998
+ imply SND_SOC_WM9081
+ imply SND_SOC_WM9090
+ imply SND_SOC_WM9705
+ imply SND_SOC_WM9712
+ imply SND_SOC_WM9713
+ imply SND_SOC_WSA881X
help
Normally ASoC codec drivers are only built if a machine driver which
uses them is also built since they are only usable with a machine
@@ -283,6 +285,7 @@ config SND_SOC_ALL_CODECS
config SND_SOC_88PM860X
tristate
+ depends on MFD_88PM860X
config SND_SOC_ARIZONA
tristate
@@ -318,6 +321,7 @@ config SND_SOC_WM_ADSP
config SND_SOC_AB8500_CODEC
tristate
+ depends on ABX500_CORE
config SND_SOC_AC97_CODEC
tristate "Build generic ASoC AC97 CODEC driver"
@@ -326,21 +330,25 @@ config SND_SOC_AC97_CODEC
config SND_SOC_AD1836
tristate
+ depends on SPI_MASTER
config SND_SOC_AD193X
tristate
config SND_SOC_AD193X_SPI
tristate
+ depends on SPI_MASTER
select SND_SOC_AD193X
config SND_SOC_AD193X_I2C
tristate
+ depends on I2C
select SND_SOC_AD193X
config SND_SOC_AD1980
- select REGMAP_AC97
tristate
+ depends on SND_SOC_AC97_BUS
+ select REGMAP_AC97
config SND_SOC_AD73311
tristate
@@ -350,6 +358,7 @@ config SND_SOC_ADAU_UTILS
config SND_SOC_ADAU1373
tristate
+ depends on I2C
select SND_SOC_ADAU_UTILS
config SND_SOC_ADAU1701
@@ -384,11 +393,13 @@ config SND_SOC_ADAU1781
config SND_SOC_ADAU1781_I2C
tristate
+ depends on I2C
select SND_SOC_ADAU1781
select REGMAP_I2C
config SND_SOC_ADAU1781_SPI
tristate
+ depends on SPI_MASTER
select SND_SOC_ADAU1781
select REGMAP_SPI
@@ -397,11 +408,13 @@ config SND_SOC_ADAU1977
config SND_SOC_ADAU1977_SPI
tristate
+ depends on SPI_MASTER
select SND_SOC_ADAU1977
select REGMAP_SPI
config SND_SOC_ADAU1977_I2C
tristate
+ depends on I2C
select SND_SOC_ADAU1977
select REGMAP_I2C
@@ -440,10 +453,12 @@ config SND_SOC_ADAV80X
config SND_SOC_ADAV801
tristate
+ depends on SPI_MASTER
select SND_SOC_ADAV80X
config SND_SOC_ADAV803
tristate
+ depends on I2C
select SND_SOC_ADAV80X
config SND_SOC_ADS117X
@@ -465,6 +480,7 @@ config SND_SOC_AK4458
config SND_SOC_AK4535
tristate
+ depends on I2C
config SND_SOC_AK4554
tristate "AKM AK4554 CODEC"
@@ -475,6 +491,7 @@ config SND_SOC_AK4613
config SND_SOC_AK4641
tristate
+ depends on I2C
config SND_SOC_AK4642
tristate "AKM AK4642 CODEC"
@@ -482,6 +499,7 @@ config SND_SOC_AK4642
config SND_SOC_AK4671
tristate
+ depends on I2C
config SND_SOC_AK5386
tristate "AKM AK5638 CODEC"
@@ -497,6 +515,7 @@ config SND_SOC_ALC5623
config SND_SOC_ALC5632
tristate
+ depends on I2C
config SND_SOC_BD28623
tristate "ROHM BD28623 CODEC"
@@ -631,6 +650,7 @@ config SND_SOC_CS47L15
config SND_SOC_CS47L24
tristate
+ depends on MFD_CS47L24
config SND_SOC_CS47L35
tristate
@@ -697,6 +717,7 @@ config SND_SOC_L3
config SND_SOC_DA7210
tristate
+ depends on I2C
config SND_SOC_DA7213
tristate "Dialog DA7213 CODEC"
@@ -704,15 +725,19 @@ config SND_SOC_DA7213
config SND_SOC_DA7218
tristate
+ depends on I2C
config SND_SOC_DA7219
tristate
+ depends on I2C
config SND_SOC_DA732X
tristate
+ depends on I2C
config SND_SOC_DA9055
tristate
+ depends on I2C
config SND_SOC_DMIC
tristate "Generic Digital Microphone CODEC"
@@ -772,9 +797,11 @@ config SND_SOC_INNO_RK3036
config SND_SOC_ISABELLE
tristate
+ depends on I2C
config SND_SOC_LM49453
tristate
+ depends on I2C
config SND_SOC_LOCHNAGAR_SC
tristate "Lochnagar Sound Card"
@@ -801,17 +828,20 @@ config SND_SOC_MAX98088
depends on I2C
config SND_SOC_MAX98090
- tristate
+ tristate
+ depends on I2C
config SND_SOC_MAX98095
- tristate
+ tristate
+ depends on I2C
config SND_SOC_MAX98357A
tristate "Maxim MAX98357A CODEC"
depends on GPIOLIB
config SND_SOC_MAX98371
- tristate
+ tristate
+ depends on I2C
config SND_SOC_MAX98504
tristate "Maxim MAX98504 speaker amplifier"
@@ -822,10 +852,12 @@ config SND_SOC_MAX9867
depends on I2C
config SND_SOC_MAX98925
- tristate
+ tristate
+ depends on I2C
config SND_SOC_MAX98926
tristate
+ depends on I2C
config SND_SOC_MAX98927
tristate "Maxim Integrated MAX98927 Speaker Amplifier"
@@ -837,6 +869,7 @@ config SND_SOC_MAX98373
config SND_SOC_MAX9850
tristate
+ depends on I2C
config SND_SOC_MAX9860
tristate "Maxim MAX9860 Mono Audio Voice Codec"
@@ -1015,26 +1048,32 @@ config SND_SOC_RT298
config SND_SOC_RT1011
tristate
+ depends on I2C
config SND_SOC_RT1015
tristate
+ depends on I2C
config SND_SOC_RT1305
tristate
+ depends on I2C
config SND_SOC_RT1308
tristate
+ depends on I2C
config SND_SOC_RT1308_SDW
tristate "Realtek RT1308 Codec - SDW"
- depends on SOUNDWIRE
+ depends on I2C && SOUNDWIRE
select REGMAP_SOUNDWIRE
config SND_SOC_RT5514
tristate
+ depends on I2C
config SND_SOC_RT5514_SPI
tristate
+ depends on SPI_MASTER
config SND_SOC_RT5514_SPI_BUILTIN
bool # force RT5514_SPI to be built-in to avoid link errors
@@ -1050,33 +1089,43 @@ config SND_SOC_RT5631
config SND_SOC_RT5640
tristate
+ depends on I2C
config SND_SOC_RT5645
tristate
+ depends on I2C
config SND_SOC_RT5651
tristate
+ depends on I2C
config SND_SOC_RT5659
tristate
+ depends on I2C
config SND_SOC_RT5660
tristate
+ depends on I2C
config SND_SOC_RT5663
tristate
+ depends on I2C
config SND_SOC_RT5665
tristate
+ depends on I2C
config SND_SOC_RT5668
tristate
+ depends on I2C
config SND_SOC_RT5670
tristate
+ depends on I2C
config SND_SOC_RT5677
tristate
+ depends on I2C
select REGMAP_I2C
select REGMAP_IRQ
@@ -1086,6 +1135,13 @@ config SND_SOC_RT5677_SPI
config SND_SOC_RT5682
tristate
+ depends on I2C || SOUNDWIRE
+
+config SND_SOC_RT5682_SDW
+ tristate "Realtek RT5682 Codec - SDW"
+ depends on SOUNDWIRE
+ select SND_SOC_RT5682
+ select REGMAP_SOUNDWIRE
config SND_SOC_RT700
tristate
@@ -1153,6 +1209,7 @@ config SND_SOC_SSM2305
config SND_SOC_SSM2518
tristate
+ depends on I2C
config SND_SOC_SSM2602
tristate
@@ -1184,9 +1241,11 @@ config SND_SOC_STA350
config SND_SOC_STA529
tristate
+ depends on I2C
config SND_SOC_STAC9766
tristate
+ depends on SND_SOC_AC97_BUS
config SND_SOC_STI_SAS
tristate "codec Audio support for STI SAS codec"
@@ -1281,6 +1340,15 @@ config SND_SOC_TLV320AIC3X
config SND_SOC_TLV320DAC33
tristate
+ depends on I2C
+
+config SND_SOC_TLV320ADCX140
+ tristate "Texas Instruments TLV320ADCX140 CODEC family"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Add support for Texas Instruments tlv320adc3140, tlv320adc5140 and
+ tlv320adc6140 quad channel ADCs.
config SND_SOC_TS3A227E
tristate "TI Headset/Mic detect and keypress chip"
@@ -1301,11 +1369,13 @@ config SND_SOC_TSCS454
Add support for Tempo Semiconductor's TSCS454 audio CODEC.
config SND_SOC_TWL4030
- select MFD_TWL4030_AUDIO
tristate
+ depends on TWL4030_CORE
+ select MFD_TWL4030_AUDIO
config SND_SOC_TWL6040
tristate
+ depends on TWL6040_CORE
config SND_SOC_UDA1334
tristate "NXP UDA1334 DAC"
@@ -1345,30 +1415,40 @@ config SND_SOC_WL1273
config SND_SOC_WM0010
tristate
+ depends on SPI_MASTER
config SND_SOC_WM1250_EV1
tristate
+ depends on I2C
config SND_SOC_WM2000
tristate
+ depends on I2C
config SND_SOC_WM2200
tristate
+ depends on I2C
config SND_SOC_WM5100
tristate
+ depends on I2C
config SND_SOC_WM5102
tristate
+ depends on MFD_WM5102
config SND_SOC_WM5110
tristate
+ depends on MFD_WM5110
config SND_SOC_WM8350
tristate
+ depends on MFD_WM8350
config SND_SOC_WM8400
tristate
+ # FIXME nothing selects SND_SOC_WM8400??
+ depends on MFD_WM8400
config SND_SOC_WM8510
tristate "Wolfson Microelectronics WM8510 CODEC"
@@ -1456,9 +1536,11 @@ config SND_SOC_WM8904
config SND_SOC_WM8940
tristate
+ depends on I2C
config SND_SOC_WM8955
tristate
+ depends on I2C
config SND_SOC_WM8960
tristate "Wolfson Microelectronics WM8960 CODEC"
@@ -1466,6 +1548,7 @@ config SND_SOC_WM8960
config SND_SOC_WM8961
tristate
+ depends on I2C
config SND_SOC_WM8962
tristate "Wolfson Microelectronics WM8962 CODEC"
@@ -1473,6 +1556,7 @@ config SND_SOC_WM8962
config SND_SOC_WM8971
tristate
+ depends on I2C
config SND_SOC_WM8974
tristate "Wolfson Microelectronics WM8974 codec"
@@ -1484,6 +1568,7 @@ config SND_SOC_WM8978
config SND_SOC_WM8983
tristate
+ depends on I2C
config SND_SOC_WM8985
tristate "Wolfson Microelectronics WM8985 and WM8758 codec driver"
@@ -1494,12 +1579,15 @@ config SND_SOC_WM8988
config SND_SOC_WM8990
tristate
+ depends on I2C
config SND_SOC_WM8991
tristate
+ depends on I2C
config SND_SOC_WM8993
tristate
+ depends on I2C
config SND_SOC_WM8994
tristate
@@ -1509,12 +1597,15 @@ config SND_SOC_WM8995
config SND_SOC_WM8996
tristate
+ depends on I2C
config SND_SOC_WM8997
tristate
+ depends on MFD_WM8997
config SND_SOC_WM8998
tristate
+ depends on MFD_WM8998
config SND_SOC_WM9081
tristate
@@ -1522,19 +1613,23 @@ config SND_SOC_WM9081
config SND_SOC_WM9090
tristate
+ depends on I2C
config SND_SOC_WM9705
tristate
+ depends on SND_SOC_AC97_BUS
select REGMAP_AC97
select AC97_BUS_COMPAT if AC97_BUS_NEW
config SND_SOC_WM9712
tristate
+ depends on SND_SOC_AC97_BUS
select REGMAP_AC97
select AC97_BUS_COMPAT if AC97_BUS_NEW
config SND_SOC_WM9713
tristate
+ depends on SND_SOC_AC97_BUS
select REGMAP_AC97
select AC97_BUS_COMPAT if AC97_BUS_NEW
@@ -1555,6 +1650,7 @@ config SND_SOC_ZX_AUD96P22
# Amp
config SND_SOC_LM4857
tristate
+ depends on I2C
config SND_SOC_MAX9759
tristate "Maxim MAX9759 speaker Amplifier"
@@ -1562,15 +1658,19 @@ config SND_SOC_MAX9759
config SND_SOC_MAX9768
tristate
+ depends on I2C
config SND_SOC_MAX9877
tristate
+ depends on I2C
config SND_SOC_MC13783
tristate
+ depends on MFD_MC13XXX
config SND_SOC_ML26124
tristate
+ depends on I2C
config SND_SOC_MT6351
tristate "MediaTek MT6351 Codec"
@@ -1608,6 +1708,7 @@ config SND_SOC_NAU8824
config SND_SOC_NAU8825
tristate
+ depends on I2C
config SND_SOC_TPA6130A2
tristate "Texas Instruments TPA6130A2 headphone amplifier"
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index ba1b4b3fa2da..03533157cda6 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -177,6 +177,7 @@ snd-soc-rt5670-objs := rt5670.o
snd-soc-rt5677-objs := rt5677.o
snd-soc-rt5677-spi-objs := rt5677-spi.o
snd-soc-rt5682-objs := rt5682.o
+snd-soc-rt5682-sdw-objs := rt5682-sdw.o
snd-soc-rt700-objs := rt700.o rt700-sdw.o
snd-soc-rt711-objs := rt711.o rt711-sdw.o
snd-soc-rt715-objs := rt715.o rt715-sdw.o
@@ -218,6 +219,7 @@ snd-soc-tlv320aic32x4-i2c-objs := tlv320aic32x4-i2c.o
snd-soc-tlv320aic32x4-spi-objs := tlv320aic32x4-spi.o
snd-soc-tlv320aic3x-objs := tlv320aic3x.o
snd-soc-tlv320dac33-objs := tlv320dac33.o
+snd-soc-tlv320adcx140-objs := tlv320adcx140.o
snd-soc-tscs42xx-objs := tscs42xx.o
snd-soc-tscs454-objs := tscs454.o
snd-soc-ts3a227e-objs := ts3a227e.o
@@ -476,6 +478,7 @@ obj-$(CONFIG_SND_SOC_RT5670) += snd-soc-rt5670.o
obj-$(CONFIG_SND_SOC_RT5677) += snd-soc-rt5677.o
obj-$(CONFIG_SND_SOC_RT5677_SPI) += snd-soc-rt5677-spi.o
obj-$(CONFIG_SND_SOC_RT5682) += snd-soc-rt5682.o
+obj-$(CONFIG_SND_SOC_RT5682_SDW) += snd-soc-rt5682-sdw.o
obj-$(CONFIG_SND_SOC_RT700) += snd-soc-rt700.o
obj-$(CONFIG_SND_SOC_RT711) += snd-soc-rt711.o
obj-$(CONFIG_SND_SOC_RT715) += snd-soc-rt715.o
@@ -516,6 +519,7 @@ obj-$(CONFIG_SND_SOC_TLV320AIC32X4_I2C) += snd-soc-tlv320aic32x4-i2c.o
obj-$(CONFIG_SND_SOC_TLV320AIC32X4_SPI) += snd-soc-tlv320aic32x4-spi.o
obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
+obj-$(CONFIG_SND_SOC_TLV320ADCX140) += snd-soc-tlv320adcx140.o
obj-$(CONFIG_SND_SOC_TSCS42XX) += snd-soc-tscs42xx.o
obj-$(CONFIG_SND_SOC_TSCS454) += snd-soc-tscs454.o
obj-$(CONFIG_SND_SOC_TS3A227E) += snd-soc-ts3a227e.o
diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c
index 6a24f570c5e8..d3dc42aa6825 100644
--- a/sound/soc/codecs/cros_ec_codec.c
+++ b/sound/soc/codecs/cros_ec_codec.c
@@ -45,6 +45,9 @@ struct cros_ec_codec_priv {
/* DMIC */
atomic_t dmic_probed;
+ /* I2S_RX */
+ uint32_t i2s_rx_bclk_ratio;
+
/* WoV */
bool wov_enabled;
uint8_t *wov_audio_shm_p;
@@ -259,6 +262,7 @@ static int i2s_rx_hw_params(struct snd_pcm_substream *substream,
snd_soc_component_get_drvdata(component);
struct ec_param_ec_codec_i2s_rx p;
enum ec_codec_i2s_rx_sample_depth depth;
+ uint32_t bclk;
int ret;
if (params_rate(params) != 48000)
@@ -284,15 +288,29 @@ static int i2s_rx_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- dev_dbg(component->dev, "set bclk to %u\n",
- snd_soc_params_to_bclk(params));
+ if (priv->i2s_rx_bclk_ratio)
+ bclk = params_rate(params) * priv->i2s_rx_bclk_ratio;
+ else
+ bclk = snd_soc_params_to_bclk(params);
+
+ dev_dbg(component->dev, "set bclk to %u\n", bclk);
p.cmd = EC_CODEC_I2S_RX_SET_BCLK;
- p.set_bclk_param.bclk = snd_soc_params_to_bclk(params);
+ p.set_bclk_param.bclk = bclk;
return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
(uint8_t *)&p, sizeof(p), NULL, 0);
}
+static int i2s_rx_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct snd_soc_component *component = dai->component;
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+
+ priv->i2s_rx_bclk_ratio = ratio;
+ return 0;
+}
+
static int i2s_rx_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_component *component = dai->component;
@@ -340,6 +358,7 @@ static int i2s_rx_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
static const struct snd_soc_dai_ops i2s_rx_dai_ops = {
.hw_params = i2s_rx_hw_params,
.set_fmt = i2s_rx_set_fmt,
+ .set_bclk_ratio = i2s_rx_set_bclk_ratio,
};
static int i2s_rx_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 04b86a51e055..62f412d6f9f2 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -356,9 +356,9 @@ static int cs4271_hw_params(struct snd_pcm_substream *substream,
*/
if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
- !dai->capture_active) ||
+ !dai->stream_active[SNDRV_PCM_STREAM_CAPTURE]) ||
(substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
- !dai->playback_active)) {
+ !dai->stream_active[SNDRV_PCM_STREAM_PLAYBACK])) {
ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
CS4271_MODE2_PDN,
CS4271_MODE2_PDN);
diff --git a/sound/soc/codecs/cs47l15.c b/sound/soc/codecs/cs47l15.c
index e8840dc142ef..8d1869bf7f9c 100644
--- a/sound/soc/codecs/cs47l15.c
+++ b/sound/soc/codecs/cs47l15.c
@@ -1239,12 +1239,12 @@ static int cs47l15_open(struct snd_compr_stream *stream)
struct madera *madera = priv->madera;
int n_adsp;
- if (strcmp(rtd->codec_dai->name, "cs47l15-dsp-trace") == 0) {
+ if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l15-dsp-trace") == 0) {
n_adsp = 0;
} else {
dev_err(madera->dev,
"No suitable compressed stream for DAI '%s'\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
return -EINVAL;
}
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 25bffc2968f0..6b0570f59630 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1076,14 +1076,14 @@ static int cs47l24_open(struct snd_compr_stream *stream)
struct arizona *arizona = priv->core.arizona;
int n_adsp;
- if (strcmp(rtd->codec_dai->name, "cs47l24-dsp-voicectrl") == 0) {
+ if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l24-dsp-voicectrl") == 0) {
n_adsp = 2;
- } else if (strcmp(rtd->codec_dai->name, "cs47l24-dsp-trace") == 0) {
+ } else if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l24-dsp-trace") == 0) {
n_adsp = 1;
} else {
dev_err(arizona->dev,
"No suitable compressed stream for DAI '%s'\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
return -EINVAL;
}
diff --git a/sound/soc/codecs/cs47l35.c b/sound/soc/codecs/cs47l35.c
index 3d48a0d9ecc5..18839807c9d1 100644
--- a/sound/soc/codecs/cs47l35.c
+++ b/sound/soc/codecs/cs47l35.c
@@ -1514,14 +1514,14 @@ static int cs47l35_open(struct snd_compr_stream *stream)
struct madera *madera = priv->madera;
int n_adsp;
- if (strcmp(rtd->codec_dai->name, "cs47l35-dsp-voicectrl") == 0) {
+ if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l35-dsp-voicectrl") == 0) {
n_adsp = 2;
- } else if (strcmp(rtd->codec_dai->name, "cs47l35-dsp-trace") == 0) {
+ } else if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l35-dsp-trace") == 0) {
n_adsp = 0;
} else {
dev_err(madera->dev,
"No suitable compressed stream for DAI '%s'\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
return -EINVAL;
}
diff --git a/sound/soc/codecs/cs47l85.c b/sound/soc/codecs/cs47l85.c
index bef3471f482d..a575113207f0 100644
--- a/sound/soc/codecs/cs47l85.c
+++ b/sound/soc/codecs/cs47l85.c
@@ -2457,14 +2457,14 @@ static int cs47l85_open(struct snd_compr_stream *stream)
struct madera *madera = priv->madera;
int n_adsp;
- if (strcmp(rtd->codec_dai->name, "cs47l85-dsp-voicectrl") == 0) {
+ if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l85-dsp-voicectrl") == 0) {
n_adsp = 5;
- } else if (strcmp(rtd->codec_dai->name, "cs47l85-dsp-trace") == 0) {
+ } else if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l85-dsp-trace") == 0) {
n_adsp = 0;
} else {
dev_err(madera->dev,
"No suitable compressed stream for DAI '%s'\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
return -EINVAL;
}
diff --git a/sound/soc/codecs/cs47l90.c b/sound/soc/codecs/cs47l90.c
index 266eade82764..81a1311b14e6 100644
--- a/sound/soc/codecs/cs47l90.c
+++ b/sound/soc/codecs/cs47l90.c
@@ -2368,14 +2368,14 @@ static int cs47l90_open(struct snd_compr_stream *stream)
struct madera *madera = priv->madera;
int n_adsp;
- if (strcmp(rtd->codec_dai->name, "cs47l90-dsp-voicectrl") == 0) {
+ if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l90-dsp-voicectrl") == 0) {
n_adsp = 5;
- } else if (strcmp(rtd->codec_dai->name, "cs47l90-dsp-trace") == 0) {
+ } else if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l90-dsp-trace") == 0) {
n_adsp = 0;
} else {
dev_err(madera->dev,
"No suitable compressed stream for DAI '%s'\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
return -EINVAL;
}
diff --git a/sound/soc/codecs/cs47l92.c b/sound/soc/codecs/cs47l92.c
index 942040fd354f..15fc213d178d 100644
--- a/sound/soc/codecs/cs47l92.c
+++ b/sound/soc/codecs/cs47l92.c
@@ -1840,12 +1840,12 @@ static int cs47l92_open(struct snd_compr_stream *stream)
struct madera *madera = priv->madera;
int n_adsp;
- if (strcmp(rtd->codec_dai->name, "cs47l92-dsp-trace") == 0) {
+ if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "cs47l92-dsp-trace") == 0) {
n_adsp = 0;
} else {
dev_err(madera->dev,
"No suitable compressed stream for DAI '%s'\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
return -EINVAL;
}
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index e6558475e006..fba9b749839d 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1998,11 +1998,11 @@ static struct hdac_hdmi_drv_data intel_drv_data = {
static int hdac_hdmi_dev_probe(struct hdac_device *hdev)
{
- struct hdac_hdmi_priv *hdmi_priv = NULL;
+ struct hdac_hdmi_priv *hdmi_priv;
struct snd_soc_dai_driver *hdmi_dais = NULL;
- struct hdac_ext_link *hlink = NULL;
+ struct hdac_ext_link *hlink;
int num_dais = 0;
- int ret = 0;
+ int ret;
struct hdac_driver *hdrv = drv_to_hdac_driver(hdev->dev.driver);
const struct hda_device_id *hdac_id = hdac_get_device_id(hdev, hdrv);
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 16313b973eaa..a8bd793a7867 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -5,6 +5,7 @@
*/
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio.h>
@@ -24,26 +25,24 @@ struct max98357a_priv {
unsigned int sdmode_delay;
};
-static int max98357a_daiops_trigger(struct snd_pcm_substream *substream,
- int cmd, struct snd_soc_dai *dai)
+static int max98357a_sdmode_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
{
- struct max98357a_priv *max98357a = snd_soc_dai_get_drvdata(dai);
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct max98357a_priv *max98357a =
+ snd_soc_component_get_drvdata(component);
if (!max98357a->sdmode)
return 0;
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- mdelay(max98357a->sdmode_delay);
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ msleep(max98357a->sdmode_delay);
gpiod_set_value(max98357a->sdmode, 1);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dev_dbg(component->dev, "set sdmode to 1");
+ } else if (event & SND_SOC_DAPM_PRE_PMD) {
gpiod_set_value(max98357a->sdmode, 0);
- break;
+ dev_dbg(component->dev, "set sdmode to 0");
}
return 0;
@@ -51,10 +50,14 @@ static int max98357a_daiops_trigger(struct snd_pcm_substream *substream,
static const struct snd_soc_dapm_widget max98357a_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("Speaker"),
+ SND_SOC_DAPM_OUT_DRV_E("SD_MODE", SND_SOC_NOPM, 0, 0, NULL, 0,
+ max98357a_sdmode_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
};
static const struct snd_soc_dapm_route max98357a_dapm_routes[] = {
- {"Speaker", NULL, "HiFi Playback"},
+ {"SD_MODE", NULL, "HiFi Playback"},
+ {"Speaker", NULL, "SD_MODE"},
};
static const struct snd_soc_component_driver max98357a_component_driver = {
@@ -68,10 +71,6 @@ static const struct snd_soc_component_driver max98357a_component_driver = {
.non_legacy_dai_naming = 1,
};
-static const struct snd_soc_dai_ops max98357a_dai_ops = {
- .trigger = max98357a_daiops_trigger,
-};
-
static struct snd_soc_dai_driver max98357a_dai_driver = {
.name = "HiFi",
.playback = {
@@ -91,7 +90,6 @@ static struct snd_soc_dai_driver max98357a_dai_driver = {
.channels_min = 1,
.channels_max = 2,
},
- .ops = &max98357a_dai_ops,
};
static int max98357a_platform_probe(struct platform_device *pdev)
@@ -135,6 +133,7 @@ MODULE_DEVICE_TABLE(of, max98357a_device_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id max98357a_acpi_match[] = {
{ "MX98357A", 0 },
+ { "MX98360A", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, max98357a_acpi_match);
diff --git a/sound/soc/codecs/mt6660.c b/sound/soc/codecs/mt6660.c
index a36c416caad4..d1797003c83d 100644
--- a/sound/soc/codecs/mt6660.c
+++ b/sound/soc/codecs/mt6660.c
@@ -1,15 +1,13 @@
-// SPDX-License-Identifier: GPL-2.0 //
+// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 MediaTek Inc.
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
-#include <linux/debugfs.h>
#include <sound/soc.h>
#include <sound/tlv.h>
#include <sound/pcm_params.h>
@@ -225,14 +223,87 @@ static int _mt6660_chip_power_on(struct mt6660_chip *chip, int on_off)
0x01, on_off ? 0x00 : 0x01);
}
+struct reg_table {
+ uint32_t addr;
+ uint32_t mask;
+ uint32_t val;
+};
+
+static const struct reg_table mt6660_setting_table[] = {
+ { 0x20, 0x80, 0x00 },
+ { 0x30, 0x01, 0x00 },
+ { 0x50, 0x1c, 0x04 },
+ { 0xB1, 0x0c, 0x00 },
+ { 0xD3, 0x03, 0x03 },
+ { 0xE0, 0x01, 0x00 },
+ { 0x98, 0x44, 0x04 },
+ { 0xB9, 0xff, 0x82 },
+ { 0xB7, 0x7777, 0x7273 },
+ { 0xB6, 0x07, 0x03 },
+ { 0x6B, 0xe0, 0x20 },
+ { 0x07, 0xff, 0x70 },
+ { 0xBB, 0xff, 0x20 },
+ { 0x69, 0xff, 0x40 },
+ { 0xBD, 0xffff, 0x17f8 },
+ { 0x70, 0xff, 0x15 },
+ { 0x7C, 0xff, 0x00 },
+ { 0x46, 0xff, 0x1d },
+ { 0x1A, 0xffffffff, 0x7fdb7ffe },
+ { 0x1B, 0xffffffff, 0x7fdb7ffe },
+ { 0x51, 0xff, 0x58 },
+ { 0xA2, 0xff, 0xce },
+ { 0x33, 0xffff, 0x7fff },
+ { 0x4C, 0xffff, 0x0116 },
+ { 0x16, 0x1800, 0x0800 },
+ { 0x68, 0x1f, 0x07 },
+};
+
+static int mt6660_component_setting(struct snd_soc_component *component)
+{
+ struct mt6660_chip *chip = snd_soc_component_get_drvdata(component);
+ int ret = 0;
+ size_t i = 0;
+
+ ret = _mt6660_chip_power_on(chip, 1);
+ if (ret < 0) {
+ dev_err(component->dev, "%s chip power on failed\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt6660_setting_table); i++) {
+ ret = snd_soc_component_update_bits(component,
+ mt6660_setting_table[i].addr,
+ mt6660_setting_table[i].mask,
+ mt6660_setting_table[i].val);
+ if (ret < 0) {
+ dev_err(component->dev, "%s update 0x%02x failed\n",
+ __func__, mt6660_setting_table[i].addr);
+ return ret;
+ }
+ }
+
+ ret = _mt6660_chip_power_on(chip, 0);
+ if (ret < 0) {
+ dev_err(component->dev, "%s chip power off failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
static int mt6660_component_probe(struct snd_soc_component *component)
{
struct mt6660_chip *chip = snd_soc_component_get_drvdata(component);
+ int ret;
dev_dbg(component->dev, "%s\n", __func__);
snd_soc_component_init_regmap(component, chip->regmap);
- return 0;
+ ret = mt6660_component_setting(component);
+ if (ret < 0)
+ dev_err(chip->dev, "mt6660 component setting failed\n");
+
+ return ret;
}
static void mt6660_component_remove(struct snd_soc_component *component)
@@ -506,4 +577,4 @@ module_i2c_driver(mt6660_i2c_driver);
MODULE_AUTHOR("Jeff Chang <jeff_chang@richtek.com>");
MODULE_DESCRIPTION("MT6660 SPKAMP Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.7_G");
+MODULE_VERSION("1.0.8_G");
diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c
index 287c962ba00d..115706a55577 100644
--- a/sound/soc/codecs/rk3328_codec.c
+++ b/sound/soc/codecs/rk3328_codec.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -31,7 +32,7 @@
struct rk3328_codec_priv {
struct regmap *regmap;
- struct regmap *grf;
+ struct gpio_desc *mute;
struct clk *mclk;
struct clk *pclk;
unsigned int sclk;
@@ -106,16 +107,6 @@ static int rk3328_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
-static void rk3328_analog_output(struct rk3328_codec_priv *rk3328, int mute)
-{
- unsigned int val = BIT(17);
-
- if (mute)
- val |= BIT(1);
-
- regmap_write(rk3328->grf, RK3328_GRF_SOC_CON10, val);
-}
-
static int rk3328_digital_mute(struct snd_soc_dai *dai, int mute)
{
struct rk3328_codec_priv *rk3328 =
@@ -205,7 +196,7 @@ static int rk3328_codec_open_playback(struct rk3328_codec_priv *rk3328)
}
msleep(rk3328->spk_depop_time);
- rk3328_analog_output(rk3328, 1);
+ gpiod_set_value(rk3328->mute, 0);
regmap_update_bits(rk3328->regmap, HPOUTL_GAIN_CTRL,
HPOUTL_GAIN_MASK, OUT_VOLUME);
@@ -246,7 +237,7 @@ static int rk3328_codec_close_playback(struct rk3328_codec_priv *rk3328)
{
size_t i;
- rk3328_analog_output(rk3328, 0);
+ gpiod_set_value(rk3328->mute, 1);
regmap_update_bits(rk3328->regmap, HPOUTL_GAIN_CTRL,
HPOUTL_GAIN_MASK, 0);
@@ -446,7 +437,6 @@ static int rk3328_platform_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "missing 'rockchip,grf'\n");
return PTR_ERR(grf);
}
- rk3328->grf = grf;
/* enable i2s_acodec_en */
regmap_write(grf, RK3328_GRF_SOC_CON2,
(BIT(14) << 16 | BIT(14)));
@@ -458,7 +448,18 @@ static int rk3328_platform_probe(struct platform_device *pdev)
rk3328->spk_depop_time = 200;
}
- rk3328_analog_output(rk3328, 0);
+ rk3328->mute = gpiod_get_optional(&pdev->dev, "mute", GPIOD_OUT_HIGH);
+ if (IS_ERR(rk3328->mute))
+ return PTR_ERR(rk3328->mute);
+ /*
+ * Rock64 is the only supported platform to have widely relied on
+ * this; if we do happen to come across an old DTB, just leave the
+ * external mute forced off.
+ */
+ if (!rk3328->mute && of_machine_is_compatible("pine64,rock64")) {
+ dev_warn(&pdev->dev, "assuming implicit control of GPIO_MUTE; update devicetree if possible\n");
+ regmap_write(grf, RK3328_GRF_SOC_CON10, BIT(17) | BIT(1));
+ }
rk3328->mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(rk3328->mclk))
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index a887d5ccb10d..d181c217d835 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -102,6 +102,7 @@ struct pll_calc_map {
static const struct pll_calc_map pll_preset_table[] = {
{19200000, 4096000, 23, 14, 1, false},
{19200000, 24576000, 3, 30, 3, false},
+ {3840000, 24576000, 3, 30, 0, true},
};
static unsigned int find_best_div(unsigned int in,
diff --git a/sound/soc/codecs/rl6231.h b/sound/soc/codecs/rl6231.h
index 31a9643b0afd..6d8ed0377296 100644
--- a/sound/soc/codecs/rl6231.h
+++ b/sound/soc/codecs/rl6231.h
@@ -10,7 +10,7 @@
#ifndef __RL6231_H__
#define __RL6231_H__
-#define RL6231_PLL_INP_MAX 40000000
+#define RL6231_PLL_INP_MAX 50000000
#define RL6231_PLL_INP_MIN 256000
#define RL6231_PLL_N_MAX 0x1ff
#define RL6231_PLL_K_MAX 0x1f
diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c
index 66eb55b4ffd4..bb310bc7febd 100644
--- a/sound/soc/codecs/rt1015.c
+++ b/sound/soc/codecs/rt1015.c
@@ -444,7 +444,7 @@ static int rt1015_boost_mode_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static int rt5518_bypass_boost_get(struct snd_kcontrol *kcontrol,
+static int rt1015_bypass_boost_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
@@ -457,7 +457,7 @@ static int rt5518_bypass_boost_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static int rt5518_bypass_boost_put(struct snd_kcontrol *kcontrol,
+static int rt1015_bypass_boost_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
@@ -497,7 +497,7 @@ static const struct snd_kcontrol_new rt1015_snd_controls[] = {
rt1015_boost_mode_get, rt1015_boost_mode_put),
SOC_ENUM("Mono LR Select", rt1015_mono_lr_sel),
SOC_SINGLE_EXT("Bypass Boost", SND_SOC_NOPM, 0, 1, 0,
- rt5518_bypass_boost_get, rt5518_bypass_boost_put),
+ rt1015_bypass_boost_get, rt1015_bypass_boost_put),
};
static int rt1015_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
@@ -841,12 +841,12 @@ static void rt1015_remove(struct snd_soc_component *component)
#define RT1015_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
-struct snd_soc_dai_ops rt1015_aif_dai_ops = {
+static struct snd_soc_dai_ops rt1015_aif_dai_ops = {
.hw_params = rt1015_hw_params,
.set_fmt = rt1015_set_dai_fmt,
};
-struct snd_soc_dai_driver rt1015_dai[] = {
+static struct snd_soc_dai_driver rt1015_dai[] = {
{
.name = "rt1015-aif",
.id = 0,
diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
index d930f60cb797..a5a7e46de246 100644
--- a/sound/soc/codecs/rt1308-sdw.c
+++ b/sound/soc/codecs/rt1308-sdw.c
@@ -507,6 +507,28 @@ static void rt1308_sdw_shutdown(struct snd_pcm_substream *substream,
kfree(stream);
}
+static int rt1308_sdw_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt1308_sdw_priv *rt1308 =
+ snd_soc_component_get_drvdata(component);
+
+ if (tx_mask)
+ return -EINVAL;
+
+ if (slots > 2)
+ return -EINVAL;
+
+ rt1308->rx_mask = rx_mask;
+ rt1308->slots = slots;
+ /* slot_width is not used since it's irrelevant for SoundWire */
+
+ return 0;
+}
+
static int rt1308_sdw_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
@@ -517,7 +539,7 @@ static int rt1308_sdw_hw_params(struct snd_pcm_substream *substream,
struct sdw_port_config port_config;
enum sdw_data_direction direction;
struct sdw_stream_data *stream;
- int retval, port, num_channels;
+ int retval, port, num_channels, ch_mask;
dev_dbg(dai->dev, "%s %s", __func__, dai->name);
stream = snd_soc_dai_get_dma_data(dai, substream);
@@ -537,13 +559,20 @@ static int rt1308_sdw_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ if (rt1308->slots) {
+ num_channels = rt1308->slots;
+ ch_mask = rt1308->rx_mask;
+ } else {
+ num_channels = params_channels(params);
+ ch_mask = (1 << num_channels) - 1;
+ }
+
stream_config.frame_rate = params_rate(params);
- stream_config.ch_count = params_channels(params);
+ stream_config.ch_count = num_channels;
stream_config.bps = snd_pcm_format_width(params_format(params));
stream_config.direction = direction;
- num_channels = params_channels(params);
- port_config.ch_mask = (1 << (num_channels)) - 1;
+ port_config.ch_mask = ch_mask;
port_config.num = port;
retval = sdw_stream_add_slave(rt1308->sdw_slave, &stream_config,
@@ -597,6 +626,7 @@ static const struct snd_soc_dai_ops rt1308_aif_dai_ops = {
.hw_free = rt1308_sdw_pcm_hw_free,
.set_sdw_stream = rt1308_set_sdw_stream,
.shutdown = rt1308_sdw_shutdown,
+ .set_tdm_slot = rt1308_sdw_set_tdm_slot,
};
#define RT1308_STEREO_RATES SNDRV_PCM_RATE_48000
diff --git a/sound/soc/codecs/rt1308-sdw.h b/sound/soc/codecs/rt1308-sdw.h
index c9341e70d6cf..c5ce75666dcc 100644
--- a/sound/soc/codecs/rt1308-sdw.h
+++ b/sound/soc/codecs/rt1308-sdw.h
@@ -160,6 +160,8 @@ struct rt1308_sdw_priv {
struct sdw_bus_params params;
bool hw_init;
bool first_hw_init;
+ int rx_mask;
+ int slots;
};
struct sdw_stream_data {
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index e66d08398f74..89e0f58512fa 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -1604,7 +1604,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
- int pd, idx = -EINVAL;
+ int pd, idx;
pd = rl6231_get_pre_div(rt5659->regmap,
RT5659_ADDA_CLK_1, RT5659_I2S_PD1_SFT);
diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
new file mode 100644
index 000000000000..a2d1d3ae1e31
--- /dev/null
+++ b/sound/soc/codecs/rt5682-sdw.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// rt5682-sdw.c -- RT5682 ALSA SoC audio component driver
+//
+// Copyright 2019 Realtek Semiconductor Corp.
+// Author: Oder Chiou <oder_chiou@realtek.com>
+//
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/acpi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "rt5682.h"
+#include "rt5682-sdw.h"
+
+static bool rt5682_sdw_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x00e0:
+ case 0x00f0:
+ case 0x3000:
+ case 0x3001:
+ case 0x3004:
+ case 0x3005:
+ case 0x3008:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config rt5682_sdw_regmap = {
+ .name = "sdw",
+ .reg_bits = 32,
+ .val_bits = 8,
+ .max_register = RT5682_I2C_MODE,
+ .readable_reg = rt5682_sdw_readable_register,
+ .cache_type = REGCACHE_NONE,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int rt5682_update_status(struct sdw_slave *slave,
+ enum sdw_slave_status status)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(&slave->dev);
+
+ /* Update the status */
+ rt5682->status = status;
+
+ if (status == SDW_SLAVE_UNATTACHED)
+ rt5682->hw_init = false;
+
+ /*
+ * Perform initialization only if slave status is present and
+ * hw_init flag is false
+ */
+ if (rt5682->hw_init || rt5682->status != SDW_SLAVE_ATTACHED)
+ return 0;
+
+ /* perform I/O transfers required for Slave initialization */
+ return rt5682_io_init(&slave->dev, slave);
+}
+
+static int rt5682_read_prop(struct sdw_slave *slave)
+{
+ struct sdw_slave_prop *prop = &slave->prop;
+ int nval, i, num_of_ports = 1;
+ u32 bit;
+ unsigned long addr;
+ struct sdw_dpn_prop *dpn;
+
+ prop->paging_support = false;
+
+ /* first we need to allocate memory for set bits in port lists */
+ prop->source_ports = 0x4; /* BITMAP: 00000100 */
+ prop->sink_ports = 0x2; /* BITMAP: 00000010 */
+
+ nval = hweight32(prop->source_ports);
+ num_of_ports += nval;
+ prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval,
+ sizeof(*prop->src_dpn_prop),
+ GFP_KERNEL);
+ if (!prop->src_dpn_prop)
+ return -ENOMEM;
+
+ i = 0;
+ dpn = prop->src_dpn_prop;
+ addr = prop->source_ports;
+ for_each_set_bit(bit, &addr, 32) {
+ dpn[i].num = bit;
+ dpn[i].type = SDW_DPN_FULL;
+ dpn[i].simple_ch_prep_sm = true;
+ dpn[i].ch_prep_timeout = 10;
+ i++;
+ }
+
+ /* do this again for sink now */
+ nval = hweight32(prop->sink_ports);
+ num_of_ports += nval;
+ prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval,
+ sizeof(*prop->sink_dpn_prop),
+ GFP_KERNEL);
+ if (!prop->sink_dpn_prop)
+ return -ENOMEM;
+
+ i = 0;
+ dpn = prop->sink_dpn_prop;
+ addr = prop->sink_ports;
+ for_each_set_bit(bit, &addr, 32) {
+ dpn[i].num = bit;
+ dpn[i].type = SDW_DPN_FULL;
+ dpn[i].simple_ch_prep_sm = true;
+ dpn[i].ch_prep_timeout = 10;
+ i++;
+ }
+
+ /* Allocate port_ready based on num_of_ports */
+ slave->port_ready = devm_kcalloc(&slave->dev, num_of_ports,
+ sizeof(*slave->port_ready),
+ GFP_KERNEL);
+ if (!slave->port_ready)
+ return -ENOMEM;
+
+ /* Initialize completion */
+ for (i = 0; i < num_of_ports; i++)
+ init_completion(&slave->port_ready[i]);
+
+ /* set the timeout values */
+ prop->clk_stop_timeout = 20;
+
+ /* wake-up event */
+ prop->wake_capable = 1;
+
+ return 0;
+}
+
+/* Bus clock frequency */
+#define RT5682_CLK_FREQ_9600000HZ 9600000
+#define RT5682_CLK_FREQ_12000000HZ 12000000
+#define RT5682_CLK_FREQ_6000000HZ 6000000
+#define RT5682_CLK_FREQ_4800000HZ 4800000
+#define RT5682_CLK_FREQ_2400000HZ 2400000
+#define RT5682_CLK_FREQ_12288000HZ 12288000
+
+static int rt5682_clock_config(struct device *dev)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+ unsigned int clk_freq, value;
+
+ clk_freq = (rt5682->params.curr_dr_freq >> 1);
+
+ switch (clk_freq) {
+ case RT5682_CLK_FREQ_12000000HZ:
+ value = 0x0;
+ break;
+ case RT5682_CLK_FREQ_6000000HZ:
+ value = 0x1;
+ break;
+ case RT5682_CLK_FREQ_9600000HZ:
+ value = 0x2;
+ break;
+ case RT5682_CLK_FREQ_4800000HZ:
+ value = 0x3;
+ break;
+ case RT5682_CLK_FREQ_2400000HZ:
+ value = 0x4;
+ break;
+ case RT5682_CLK_FREQ_12288000HZ:
+ value = 0x5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write(rt5682->sdw_regmap, 0xe0, value);
+ regmap_write(rt5682->sdw_regmap, 0xf0, value);
+
+ dev_dbg(dev, "%s complete, clk_freq=%d\n", __func__, clk_freq);
+
+ return 0;
+}
+
+static int rt5682_bus_config(struct sdw_slave *slave,
+ struct sdw_bus_params *params)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(&slave->dev);
+ int ret;
+
+ memcpy(&rt5682->params, params, sizeof(*params));
+
+ ret = rt5682_clock_config(&slave->dev);
+ if (ret < 0)
+ dev_err(&slave->dev, "Invalid clk config");
+
+ return ret;
+}
+
+static int rt5682_interrupt_callback(struct sdw_slave *slave,
+ struct sdw_slave_intr_status *status)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(&slave->dev);
+
+ dev_dbg(&slave->dev,
+ "%s control_port_stat=%x", __func__, status->control_port);
+
+ if (status->control_port & 0x4) {
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(250));
+ }
+
+ return 0;
+}
+
+static struct sdw_slave_ops rt5682_slave_ops = {
+ .read_prop = rt5682_read_prop,
+ .interrupt_callback = rt5682_interrupt_callback,
+ .update_status = rt5682_update_status,
+ .bus_config = rt5682_bus_config,
+};
+
+static int rt5682_sdw_probe(struct sdw_slave *slave,
+ const struct sdw_device_id *id)
+{
+ struct regmap *regmap;
+
+ /* Assign ops */
+ slave->ops = &rt5682_slave_ops;
+
+ /* Regmap Initialization */
+ regmap = devm_regmap_init_sdw(slave, &rt5682_sdw_regmap);
+ if (IS_ERR(regmap))
+ return -EINVAL;
+
+ rt5682_sdw_init(&slave->dev, regmap, slave);
+
+ return 0;
+}
+
+static int rt5682_sdw_remove(struct sdw_slave *slave)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(&slave->dev);
+
+ if (rt5682 && rt5682->hw_init)
+ cancel_delayed_work(&rt5682->jack_detect_work);
+
+ return 0;
+}
+
+static const struct sdw_device_id rt5682_id[] = {
+ SDW_SLAVE_ENTRY(0x025d, 0x5682, 0),
+ {},
+};
+MODULE_DEVICE_TABLE(sdw, rt5682_id);
+
+static int __maybe_unused rt5682_dev_suspend(struct device *dev)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+
+ if (!rt5682->hw_init)
+ return 0;
+
+ regcache_cache_only(rt5682->regmap, true);
+ regcache_mark_dirty(rt5682->regmap);
+
+ return 0;
+}
+
+static int __maybe_unused rt5682_dev_resume(struct device *dev)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+ unsigned long time;
+
+ if (!rt5682->hw_init)
+ return 0;
+
+ if (!slave->unattach_request)
+ goto regmap_sync;
+
+ time = wait_for_completion_timeout(&slave->initialization_complete,
+ msecs_to_jiffies(RT5682_PROBE_TIMEOUT));
+ if (!time) {
+ dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ return -ETIMEDOUT;
+ }
+
+regmap_sync:
+ slave->unattach_request = 0;
+ regcache_cache_only(rt5682->regmap, false);
+ regcache_sync(rt5682->regmap);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rt5682_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(rt5682_dev_suspend, rt5682_dev_resume)
+ SET_RUNTIME_PM_OPS(rt5682_dev_suspend, rt5682_dev_resume, NULL)
+};
+
+static struct sdw_driver rt5682_sdw_driver = {
+ .driver = {
+ .name = "rt5682",
+ .owner = THIS_MODULE,
+ .pm = &rt5682_pm,
+ },
+ .probe = rt5682_sdw_probe,
+ .remove = rt5682_sdw_remove,
+ .ops = &rt5682_slave_ops,
+ .id_table = rt5682_id,
+};
+module_sdw_driver(rt5682_sdw_driver);
+
+MODULE_DESCRIPTION("ASoC RT5682 driver SDW");
+MODULE_AUTHOR("Oder Chiou <oder_chiou@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5682-sdw.h b/sound/soc/codecs/rt5682-sdw.h
new file mode 100644
index 000000000000..76e6f607066e
--- /dev/null
+++ b/sound/soc/codecs/rt5682-sdw.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * rt5682-sdw.h -- RT5682 SDW ALSA SoC audio driver
+ *
+ * Copyright 2019 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ */
+
+#ifndef __RT5682_SDW_H__
+#define __RT5682_SDW_H__
+
+#define RT5682_SDW_ADDR_L 0x3000
+#define RT5682_SDW_ADDR_H 0x3001
+#define RT5682_SDW_DATA_L 0x3004
+#define RT5682_SDW_DATA_H 0x3005
+#define RT5682_SDW_CMD 0x3008
+
+#define RT5682_PROBE_TIMEOUT 2000
+
+#endif /* __RT5682_SDW_H__ */
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index ae6f6121bc1b..c9268a230daa 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -11,13 +11,13 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/acpi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <linux/regulator/consumer.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -31,8 +31,7 @@
#include "rl6231.h"
#include "rt5682.h"
-
-#define RT5682_NUM_SUPPLIES 3
+#include "rt5682-sdw.h"
static const char *rt5682_supply_names[RT5682_NUM_SUPPLIES] = {
"AVDD",
@@ -45,35 +44,15 @@ static const struct rt5682_platform_data i2s_default_platform_data = {
.dmic1_clk_pin = RT5682_DMIC1_CLK_GPIO3,
.jd_src = RT5682_JD1,
.btndet_delay = 16,
-};
-
-struct rt5682_priv {
- struct snd_soc_component *component;
- struct rt5682_platform_data pdata;
- struct regmap *regmap;
- struct snd_soc_jack *hs_jack;
- struct regulator_bulk_data supplies[RT5682_NUM_SUPPLIES];
- struct delayed_work jack_detect_work;
- struct delayed_work jd_check_work;
- struct mutex calibrate_mutex;
-
- int sysclk;
- int sysclk_src;
- int lrck[RT5682_AIFS];
- int bclk[RT5682_AIFS];
- int master[RT5682_AIFS];
-
- int pll_src;
- int pll_in;
- int pll_out;
-
- int jack_type;
+ .dai_clk_names[RT5682_DAI_WCLK_IDX] = "rt5682-dai-wclk",
+ .dai_clk_names[RT5682_DAI_BCLK_IDX] = "rt5682-dai-bclk",
};
static const struct reg_sequence patch_list[] = {
{RT5682_HP_IMP_SENS_CTRL_19, 0x1000},
{RT5682_DAC_ADC_DIG_VOL1, 0xa020},
{RT5682_I2C_CTRL, 0x000f},
+ {RT5682_PLL2_INTERNAL, 0x8266},
};
static const struct reg_default rt5682_reg[] = {
@@ -221,7 +200,7 @@ static const struct reg_default rt5682_reg[] = {
{0x0148, 0x0000},
{0x0149, 0x0000},
{0x0150, 0x79a1},
- {0x0151, 0x0000},
+ {0x0156, 0xaaaa},
{0x0160, 0x4ec0},
{0x0161, 0x0080},
{0x0162, 0x0200},
@@ -805,10 +784,27 @@ static const struct snd_kcontrol_new rt5682_if1_45_adc_swap_mux =
static const struct snd_kcontrol_new rt5682_if1_67_adc_swap_mux =
SOC_DAPM_ENUM("IF1 67 ADC Swap Mux", rt5682_if1_67_adc_enum);
-static void rt5682_reset(struct regmap *regmap)
+static const char * const rt5682_dac_select[] = {
+ "IF1", "SOUND"
+};
+
+static SOC_ENUM_SINGLE_DECL(rt5682_dacl_enum,
+ RT5682_AD_DA_MIXER, RT5682_DAC1_L_SEL_SFT, rt5682_dac_select);
+
+static const struct snd_kcontrol_new rt5682_dac_l_mux =
+ SOC_DAPM_ENUM("DAC L Mux", rt5682_dacl_enum);
+
+static SOC_ENUM_SINGLE_DECL(rt5682_dacr_enum,
+ RT5682_AD_DA_MIXER, RT5682_DAC1_R_SEL_SFT, rt5682_dac_select);
+
+static const struct snd_kcontrol_new rt5682_dac_r_mux =
+ SOC_DAPM_ENUM("DAC R Mux", rt5682_dacr_enum);
+
+static void rt5682_reset(struct rt5682_priv *rt5682)
{
- regmap_write(regmap, RT5682_RESET, 0);
- regmap_write(regmap, RT5682_I2C_MODE, 1);
+ regmap_write(rt5682->regmap, RT5682_RESET, 0);
+ if (!rt5682->is_sdw)
+ regmap_write(rt5682->regmap, RT5682_I2C_MODE, 1);
}
/**
* rt5682_sel_asrc_clk_src - select ASRC clock source for a set of filters
@@ -871,6 +867,8 @@ static int rt5682_button_detect(struct snd_soc_component *component)
static void rt5682_enable_push_button_irq(struct snd_soc_component *component,
bool enable)
{
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
if (enable) {
snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
RT5682_SAR_BUTT_DET_MASK, RT5682_SAR_BUTT_DET_EN);
@@ -880,8 +878,15 @@ static void rt5682_enable_push_button_irq(struct snd_soc_component *component,
snd_soc_component_update_bits(component, RT5682_4BTN_IL_CMD_2,
RT5682_4BTN_IL_MASK | RT5682_4BTN_IL_RST_MASK,
RT5682_4BTN_IL_EN | RT5682_4BTN_IL_NOR);
- snd_soc_component_update_bits(component, RT5682_IRQ_CTRL_3,
- RT5682_IL_IRQ_MASK, RT5682_IL_IRQ_EN);
+ if (rt5682->is_sdw)
+ snd_soc_component_update_bits(component,
+ RT5682_IRQ_CTRL_3,
+ RT5682_IL_IRQ_MASK | RT5682_IL_IRQ_TYPE_MASK,
+ RT5682_IL_IRQ_EN | RT5682_IL_IRQ_PUL);
+ else
+ snd_soc_component_update_bits(component,
+ RT5682_IRQ_CTRL_3, RT5682_IL_IRQ_MASK,
+ RT5682_IL_IRQ_EN);
} else {
snd_soc_component_update_bits(component, RT5682_IRQ_CTRL_3,
RT5682_IL_IRQ_MASK, RT5682_IL_IRQ_DIS);
@@ -909,6 +914,7 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
int jack_insert)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct snd_soc_dapm_context *dapm = &component->dapm;
unsigned int val, count;
if (jack_insert) {
@@ -917,10 +923,10 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
RT5682_PWR_VREF2 | RT5682_PWR_MB,
RT5682_PWR_VREF2 | RT5682_PWR_MB);
snd_soc_component_update_bits(component,
- RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0);
+ RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0);
usleep_range(15000, 20000);
snd_soc_component_update_bits(component,
- RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
+ RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
RT5682_PWR_CBJ, RT5682_PWR_CBJ);
@@ -951,8 +957,13 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
- snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
- RT5682_PWR_VREF2 | RT5682_PWR_MB, 0);
+ if (snd_soc_dapm_get_pin_status(dapm, "MICBIAS"))
+ snd_soc_component_update_bits(component,
+ RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0);
+ else
+ snd_soc_component_update_bits(component,
+ RT5682_PWR_ANLG_1,
+ RT5682_PWR_VREF2 | RT5682_PWR_MB, 0);
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
RT5682_PWR_CBJ, 0);
@@ -999,62 +1010,69 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
rt5682->hs_jack = hs_jack;
- if (!hs_jack) {
- regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
- RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
- regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
- RT5682_POW_JDH | RT5682_POW_JDL, 0);
- cancel_delayed_work_sync(&rt5682->jack_detect_work);
- return 0;
- }
+ if (!rt5682->is_sdw) {
+ if (!hs_jack) {
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ cancel_delayed_work_sync(&rt5682->jack_detect_work);
+ return 0;
+ }
- switch (rt5682->pdata.jd_src) {
- case RT5682_JD1:
- snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_2,
- RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
- snd_soc_component_write(component, RT5682_CBJ_CTRL_1, 0xd042);
- snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_3,
- RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
- snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
- RT5682_SAR_POW_MASK, RT5682_SAR_POW_EN);
- regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
- RT5682_GP1_PIN_MASK, RT5682_GP1_PIN_IRQ);
- regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ switch (rt5682->pdata.jd_src) {
+ case RT5682_JD1:
+ snd_soc_component_update_bits(component,
+ RT5682_CBJ_CTRL_2, RT5682_EXT_JD_SRC,
+ RT5682_EXT_JD_SRC_MANUAL);
+ snd_soc_component_write(component, RT5682_CBJ_CTRL_1,
+ 0xd042);
+ snd_soc_component_update_bits(component,
+ RT5682_CBJ_CTRL_3, RT5682_CBJ_IN_BUF_EN,
+ RT5682_CBJ_IN_BUF_EN);
+ snd_soc_component_update_bits(component,
+ RT5682_SAR_IL_CMD_1, RT5682_SAR_POW_MASK,
+ RT5682_SAR_POW_EN);
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP1_PIN_MASK, RT5682_GP1_PIN_IRQ);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
RT5682_POW_IRQ | RT5682_POW_JDH |
RT5682_POW_ANA, RT5682_POW_IRQ |
RT5682_POW_JDH | RT5682_POW_ANA);
- regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_2,
- RT5682_PWR_JDH | RT5682_PWR_JDL,
- RT5682_PWR_JDH | RT5682_PWR_JDL);
- regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
- RT5682_JD1_EN_MASK | RT5682_JD1_POL_MASK,
- RT5682_JD1_EN | RT5682_JD1_POL_NOR);
- regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_4,
- 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
- rt5682->pdata.btndet_delay));
- regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_5,
- 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
- rt5682->pdata.btndet_delay));
- regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_6,
- 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
- rt5682->pdata.btndet_delay));
- regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_7,
- 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
- rt5682->pdata.btndet_delay));
- mod_delayed_work(system_power_efficient_wq,
- &rt5682->jack_detect_work, msecs_to_jiffies(250));
- break;
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_2,
+ RT5682_PWR_JDH | RT5682_PWR_JDL,
+ RT5682_PWR_JDH | RT5682_PWR_JDL);
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK | RT5682_JD1_POL_MASK,
+ RT5682_JD1_EN | RT5682_JD1_POL_NOR);
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_4,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_5,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_6,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_7,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work,
+ msecs_to_jiffies(250));
+ break;
- case RT5682_JD_NULL:
- regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
- RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
- regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
- RT5682_POW_JDH | RT5682_POW_JDL, 0);
- break;
+ case RT5682_JD_NULL:
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ break;
- default:
- dev_warn(component->dev, "Wrong JD source\n");
- break;
+ default:
+ dev_warn(component->dev, "Wrong JD source\n");
+ break;
+ }
}
return 0;
@@ -1134,11 +1152,13 @@ static void rt5682_jack_detect_handler(struct work_struct *work)
SND_JACK_BTN_0 | SND_JACK_BTN_1 |
SND_JACK_BTN_2 | SND_JACK_BTN_3);
- if (rt5682->jack_type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3))
- schedule_delayed_work(&rt5682->jd_check_work, 0);
- else
- cancel_delayed_work_sync(&rt5682->jd_check_work);
+ if (!rt5682->is_sdw) {
+ if (rt5682->jack_type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3))
+ schedule_delayed_work(&rt5682->jd_check_work, 0);
+ else
+ cancel_delayed_work_sync(&rt5682->jd_check_work);
+ }
mutex_unlock(&rt5682->calibrate_mutex);
}
@@ -1146,7 +1166,7 @@ static void rt5682_jack_detect_handler(struct work_struct *work)
static const struct snd_kcontrol_new rt5682_snd_controls[] = {
/* DAC Digital Volume */
SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
- RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv),
+ RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 87, 0, dac_vol_tlv),
/* IN Boost Volume */
SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL,
@@ -1177,11 +1197,11 @@ static int rt5682_div_sel(struct rt5682_priv *rt5682,
}
for (i = 0; i < size - 1; i++) {
- pr_info("div[%d]=%d\n", i, div[i]);
+ dev_dbg(rt5682->component->dev, "div[%d]=%d\n", i, div[i]);
if (target * div[i] == rt5682->sysclk)
return i;
if (target * div[i + 1] > rt5682->sysclk) {
- pr_err("can't find div for sysclk %d\n",
+ dev_dbg(rt5682->component->dev, "can't find div for sysclk %d\n",
rt5682->sysclk);
return i;
}
@@ -1211,10 +1231,13 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
- int idx = -EINVAL;
+ int idx = -EINVAL, dmic_clk_rate = 3072000;
static const int div[] = {2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128};
- idx = rt5682_div_sel(rt5682, 1500000, div, ARRAY_SIZE(div));
+ if (rt5682->pdata.dmic_clk_rate)
+ dmic_clk_rate = rt5682->pdata.dmic_clk_rate;
+
+ idx = rt5682_div_sel(rt5682, dmic_clk_rate, div, ARRAY_SIZE(div));
snd_soc_component_update_bits(component, RT5682_DMIC_CTRL_1,
RT5682_DMIC_CLK_MASK, idx << RT5682_DMIC_CLK_SFT);
@@ -1232,6 +1255,9 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48};
static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48};
+ if (rt5682->is_sdw)
+ return 0;
+
val = snd_soc_component_read32(component, RT5682_GPIO_CTRL_1) &
RT5682_GP4_PIN_MASK;
if (w->shift == RT5682_PWR_ADC_S1F_BIT &&
@@ -1278,6 +1304,21 @@ static int is_sys_clk_from_pll1(struct snd_soc_dapm_widget *w,
return 0;
}
+static int is_sys_clk_from_pll2(struct snd_soc_dapm_widget *w,
+ struct snd_soc_dapm_widget *sink)
+{
+ unsigned int val;
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ val = snd_soc_component_read32(component, RT5682_GLB_CLK);
+ val &= RT5682_SCLK_SRC_MASK;
+ if (val == RT5682_SCLK_SRC_PLL2)
+ return 1;
+ else
+ return 0;
+}
+
static int is_using_asrc(struct snd_soc_dapm_widget *w,
struct snd_soc_dapm_widget *sink)
{
@@ -1503,10 +1544,18 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
static int set_dmic_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ unsigned int delay = 50;
+
+ if (rt5682->pdata.dmic_delay)
+ delay = rt5682->pdata.dmic_delay;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
/*Add delay to avoid pop noise*/
- msleep(150);
+ msleep(delay);
break;
default:
@@ -1516,7 +1565,7 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
return 0;
}
-static int rt5655_set_verf(struct snd_soc_dapm_widget *w,
+static int rt5682_set_verf(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
@@ -1592,9 +1641,12 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("PLL2B", RT5682_PWR_ANLG_3, RT5682_PWR_PLL2B_BIT,
0, NULL, 0),
SND_SOC_DAPM_SUPPLY("PLL2F", RT5682_PWR_ANLG_3, RT5682_PWR_PLL2F_BIT,
- 0, NULL, 0),
+ 0, set_filter_clk, SND_SOC_DAPM_PRE_PMU),
SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0,
- rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ rt5682_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS", SND_SOC_NOPM, 0, 0, NULL, 0),
/* ASRC */
SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682_PLL_TRACK_1,
@@ -1686,6 +1738,8 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("IF1 DAC1 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("SOUND DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("SOUND DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
/* Digital Interface Select */
SND_SOC_DAPM_MUX("IF1 01 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
@@ -1702,12 +1756,19 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
SND_SOC_DAPM_MUX("ADCDAT Mux", SND_SOC_NOPM, 0, 0,
&rt5682_adcdat_pin_ctrl),
+ SND_SOC_DAPM_MUX("DAC L Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_dac_l_mux),
+ SND_SOC_DAPM_MUX("DAC R Mux", SND_SOC_NOPM, 0, 0,
+ &rt5682_dac_r_mux),
+
/* Audio Interface */
SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0,
RT5682_I2S1_SDP, RT5682_SEL_ADCDAT_SFT, 1),
SND_SOC_DAPM_AIF_OUT("AIF2TX", "AIF2 Capture", 0,
RT5682_I2S2_SDP, RT5682_I2S2_PIN_CFG_SFT, 1),
SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SDWRX", "SDW Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SDWTX", "SDW Capture", 0, SND_SOC_NOPM, 0, 0),
/* Output Side */
/* DAC mixer before sound effect */
@@ -1776,7 +1837,11 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
/*PLL*/
{"ADC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll1},
+ {"ADC Stereo1 Filter", NULL, "PLL2B", is_sys_clk_from_pll2},
+ {"ADC Stereo1 Filter", NULL, "PLL2F", is_sys_clk_from_pll2},
{"DAC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll1},
+ {"DAC Stereo1 Filter", NULL, "PLL2B", is_sys_clk_from_pll2},
+ {"DAC Stereo1 Filter", NULL, "PLL2F", is_sys_clk_from_pll2},
/*ASRC*/
{"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
@@ -1860,8 +1925,8 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
{"IF1_ADC Mux", "Slot 2", "IF1 23 ADC Swap Mux"},
{"IF1_ADC Mux", "Slot 4", "IF1 45 ADC Swap Mux"},
{"IF1_ADC Mux", "Slot 6", "IF1 67 ADC Swap Mux"},
- {"IF1_ADC Mux", NULL, "I2S1"},
{"ADCDAT Mux", "ADCDAT1", "IF1_ADC Mux"},
+ {"AIF1TX", NULL, "I2S1"},
{"AIF1TX", NULL, "ADCDAT Mux"},
{"IF2 ADC Swap Mux", "L/R", "Stereo1 ADC MIX"},
{"IF2 ADC Swap Mux", "R/L", "Stereo1 ADC MIX"},
@@ -1870,6 +1935,10 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
{"ADCDAT Mux", "ADCDAT2", "IF2 ADC Swap Mux"},
{"AIF2TX", NULL, "ADCDAT Mux"},
+ {"SDWTX", NULL, "PLL2B"},
+ {"SDWTX", NULL, "PLL2F"},
+ {"SDWTX", NULL, "ADCDAT Mux"},
+
{"IF1 DAC1 L", NULL, "AIF1RX"},
{"IF1 DAC1 L", NULL, "I2S1"},
{"IF1 DAC1 L", NULL, "DAC Stereo1 Filter"},
@@ -1877,10 +1946,24 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
{"IF1 DAC1 R", NULL, "I2S1"},
{"IF1 DAC1 R", NULL, "DAC Stereo1 Filter"},
+ {"SOUND DAC L", NULL, "SDWRX"},
+ {"SOUND DAC L", NULL, "DAC Stereo1 Filter"},
+ {"SOUND DAC L", NULL, "PLL2B"},
+ {"SOUND DAC L", NULL, "PLL2F"},
+ {"SOUND DAC R", NULL, "SDWRX"},
+ {"SOUND DAC R", NULL, "DAC Stereo1 Filter"},
+ {"SOUND DAC R", NULL, "PLL2B"},
+ {"SOUND DAC R", NULL, "PLL2F"},
+
+ {"DAC L Mux", "IF1", "IF1 DAC1 L"},
+ {"DAC L Mux", "SOUND", "SOUND DAC L"},
+ {"DAC R Mux", "IF1", "IF1 DAC1 R"},
+ {"DAC R Mux", "SOUND", "SOUND DAC R"},
+
{"DAC1 MIXL", "Stereo ADC Switch", "Stereo1 ADC MIXL"},
- {"DAC1 MIXL", "DAC1 Switch", "IF1 DAC1 L"},
+ {"DAC1 MIXL", "DAC1 Switch", "DAC L Mux"},
{"DAC1 MIXR", "Stereo ADC Switch", "Stereo1 ADC MIXR"},
- {"DAC1 MIXR", "DAC1 Switch", "IF1 DAC1 R"},
+ {"DAC1 MIXR", "DAC1 Switch", "DAC R Mux"},
{"Stereo1 DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"},
{"Stereo1 DAC MIXL", "DAC R1 Switch", "DAC1 MIXR"},
@@ -2033,8 +2116,10 @@ static int rt5682_hw_params(struct snd_pcm_substream *substream,
RT5682_I2S1_DL_MASK, len_1);
if (rt5682->master[RT5682_AIF1]) {
snd_soc_component_update_bits(component,
- RT5682_ADDA_CLK_1, RT5682_I2S_M_DIV_MASK,
- pre_div << RT5682_I2S_M_DIV_SFT);
+ RT5682_ADDA_CLK_1, RT5682_I2S_M_DIV_MASK |
+ RT5682_I2S_CLK_SRC_MASK,
+ pre_div << RT5682_I2S_M_DIV_SFT |
+ (rt5682->sysclk_src) << RT5682_I2S_CLK_SRC_SFT);
}
if (params_channels(params) == 1) /* mono mode */
snd_soc_component_update_bits(component,
@@ -2207,61 +2292,157 @@ static int rt5682_set_component_pll(struct snd_soc_component *component,
unsigned int freq_out)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
- struct rl6231_pll_code pll_code;
+ struct rl6231_pll_code pll_code, pll2f_code, pll2b_code;
+ unsigned int pll2_fout1;
int ret;
- if (source == rt5682->pll_src && freq_in == rt5682->pll_in &&
- freq_out == rt5682->pll_out)
+ if (source == rt5682->pll_src[pll_id] &&
+ freq_in == rt5682->pll_in[pll_id] &&
+ freq_out == rt5682->pll_out[pll_id])
return 0;
if (!freq_in || !freq_out) {
dev_dbg(component->dev, "PLL disabled\n");
- rt5682->pll_in = 0;
- rt5682->pll_out = 0;
+ rt5682->pll_in[pll_id] = 0;
+ rt5682->pll_out[pll_id] = 0;
snd_soc_component_update_bits(component, RT5682_GLB_CLK,
RT5682_SCLK_SRC_MASK, RT5682_SCLK_SRC_MCLK);
return 0;
}
- switch (source) {
- case RT5682_PLL1_S_MCLK:
- snd_soc_component_update_bits(component, RT5682_GLB_CLK,
- RT5682_PLL1_SRC_MASK, RT5682_PLL1_SRC_MCLK);
- break;
- case RT5682_PLL1_S_BCLK1:
- snd_soc_component_update_bits(component, RT5682_GLB_CLK,
- RT5682_PLL1_SRC_MASK, RT5682_PLL1_SRC_BCLK1);
- break;
- default:
- dev_err(component->dev, "Unknown PLL Source %d\n", source);
- return -EINVAL;
- }
+ if (pll_id == RT5682_PLL2) {
+ switch (source) {
+ case RT5682_PLL2_S_MCLK:
+ snd_soc_component_update_bits(component,
+ RT5682_GLB_CLK, RT5682_PLL2_SRC_MASK,
+ RT5682_PLL2_SRC_MCLK);
+ break;
+ default:
+ dev_err(component->dev, "Unknown PLL2 Source %d\n",
+ source);
+ return -EINVAL;
+ }
- ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
- if (ret < 0) {
- dev_err(component->dev, "Unsupport input clock %d\n", freq_in);
- return ret;
+ /**
+ * PLL2 concatenates 2 PLL units.
+ * We suggest the Fout of the front PLL is 3.84MHz.
+ */
+ pll2_fout1 = 3840000;
+ ret = rl6231_pll_calc(freq_in, pll2_fout1, &pll2f_code);
+ if (ret < 0) {
+ dev_err(component->dev, "Unsupport input clock %d\n",
+ freq_in);
+ return ret;
+ }
+ dev_dbg(component->dev, "PLL2F: fin=%d fout=%d bypass=%d m=%d n=%d k=%d\n",
+ freq_in, pll2_fout1,
+ pll2f_code.m_bp,
+ (pll2f_code.m_bp ? 0 : pll2f_code.m_code),
+ pll2f_code.n_code, pll2f_code.k_code);
+
+ ret = rl6231_pll_calc(pll2_fout1, freq_out, &pll2b_code);
+ if (ret < 0) {
+ dev_err(component->dev, "Unsupport input clock %d\n",
+ pll2_fout1);
+ return ret;
+ }
+ dev_dbg(component->dev, "PLL2B: fin=%d fout=%d bypass=%d m=%d n=%d k=%d\n",
+ pll2_fout1, freq_out,
+ pll2b_code.m_bp,
+ (pll2b_code.m_bp ? 0 : pll2b_code.m_code),
+ pll2b_code.n_code, pll2b_code.k_code);
+
+ snd_soc_component_write(component, RT5682_PLL2_CTRL_1,
+ pll2f_code.k_code << RT5682_PLL2F_K_SFT |
+ pll2b_code.k_code << RT5682_PLL2B_K_SFT |
+ pll2b_code.m_code);
+ snd_soc_component_write(component, RT5682_PLL2_CTRL_2,
+ pll2f_code.m_code << RT5682_PLL2F_M_SFT |
+ pll2b_code.n_code);
+ snd_soc_component_write(component, RT5682_PLL2_CTRL_3,
+ pll2f_code.n_code << RT5682_PLL2F_N_SFT);
+ snd_soc_component_update_bits(component, RT5682_PLL2_CTRL_4,
+ RT5682_PLL2B_M_BP_MASK | RT5682_PLL2F_M_BP_MASK | 0xf,
+ (pll2b_code.m_bp ? 1 : 0) << RT5682_PLL2B_M_BP_SFT |
+ (pll2f_code.m_bp ? 1 : 0) << RT5682_PLL2F_M_BP_SFT |
+ 0xf);
+ } else {
+ switch (source) {
+ case RT5682_PLL1_S_MCLK:
+ snd_soc_component_update_bits(component,
+ RT5682_GLB_CLK, RT5682_PLL1_SRC_MASK,
+ RT5682_PLL1_SRC_MCLK);
+ break;
+ case RT5682_PLL1_S_BCLK1:
+ snd_soc_component_update_bits(component,
+ RT5682_GLB_CLK, RT5682_PLL1_SRC_MASK,
+ RT5682_PLL1_SRC_BCLK1);
+ break;
+ default:
+ dev_err(component->dev, "Unknown PLL1 Source %d\n",
+ source);
+ return -EINVAL;
+ }
+
+ ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
+ if (ret < 0) {
+ dev_err(component->dev, "Unsupport input clock %d\n",
+ freq_in);
+ return ret;
+ }
+
+ dev_dbg(component->dev, "bypass=%d m=%d n=%d k=%d\n",
+ pll_code.m_bp, (pll_code.m_bp ? 0 : pll_code.m_code),
+ pll_code.n_code, pll_code.k_code);
+
+ snd_soc_component_write(component, RT5682_PLL_CTRL_1,
+ pll_code.n_code << RT5682_PLL_N_SFT | pll_code.k_code);
+ snd_soc_component_write(component, RT5682_PLL_CTRL_2,
+ (pll_code.m_bp ? 0 : pll_code.m_code) << RT5682_PLL_M_SFT |
+ pll_code.m_bp << RT5682_PLL_M_BP_SFT | RT5682_PLL_RST);
}
- dev_dbg(component->dev, "bypass=%d m=%d n=%d k=%d\n",
- pll_code.m_bp, (pll_code.m_bp ? 0 : pll_code.m_code),
- pll_code.n_code, pll_code.k_code);
+ rt5682->pll_in[pll_id] = freq_in;
+ rt5682->pll_out[pll_id] = freq_out;
+ rt5682->pll_src[pll_id] = source;
- snd_soc_component_write(component, RT5682_PLL_CTRL_1,
- pll_code.n_code << RT5682_PLL_N_SFT | pll_code.k_code);
- snd_soc_component_write(component, RT5682_PLL_CTRL_2,
- (pll_code.m_bp ? 0 : pll_code.m_code) << RT5682_PLL_M_SFT |
- pll_code.m_bp << RT5682_PLL_M_BP_SFT | RT5682_PLL_RST);
+ return 0;
+}
- rt5682->pll_in = freq_in;
- rt5682->pll_out = freq_out;
- rt5682->pll_src = source;
+static int rt5682_set_bclk1_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+
+ rt5682->bclk[dai->id] = ratio;
+
+ switch (ratio) {
+ case 256:
+ snd_soc_component_update_bits(component, RT5682_TDM_TCON_CTRL,
+ RT5682_TDM_BCLK_MS1_MASK, RT5682_TDM_BCLK_MS1_256);
+ break;
+ case 128:
+ snd_soc_component_update_bits(component, RT5682_TDM_TCON_CTRL,
+ RT5682_TDM_BCLK_MS1_MASK, RT5682_TDM_BCLK_MS1_128);
+ break;
+ case 64:
+ snd_soc_component_update_bits(component, RT5682_TDM_TCON_CTRL,
+ RT5682_TDM_BCLK_MS1_MASK, RT5682_TDM_BCLK_MS1_64);
+ break;
+ case 32:
+ snd_soc_component_update_bits(component, RT5682_TDM_TCON_CTRL,
+ RT5682_TDM_BCLK_MS1_MASK, RT5682_TDM_BCLK_MS1_32);
+ break;
+ default:
+ dev_err(dai->dev, "Invalid bclk1 ratio %d\n", ratio);
+ return -EINVAL;
+ }
return 0;
}
-static int rt5682_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+static int rt5682_set_bclk2_ratio(struct snd_soc_dai *dai, unsigned int ratio)
{
struct snd_soc_component *component = dai->component;
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
@@ -2280,7 +2461,7 @@ static int rt5682_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
RT5682_I2S2_BCLK_MS2_32);
break;
default:
- dev_err(dai->dev, "Invalid bclk ratio %d\n", ratio);
+ dev_err(dai->dev, "Invalid bclk2 ratio %d\n", ratio);
return -EINVAL;
}
@@ -2319,12 +2500,392 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
return 0;
}
+#ifdef CONFIG_COMMON_CLK
+#define CLK_PLL2_FIN 48000000
+#define CLK_PLL2_FOUT 24576000
+#define CLK_48 48000
+
+static bool rt5682_clk_check(struct rt5682_priv *rt5682)
+{
+ if (!rt5682->master[RT5682_AIF1]) {
+ dev_err(rt5682->component->dev, "sysclk/dai not set correctly\n");
+ return false;
+ }
+ return true;
+}
+
+static int rt5682_wclk_prepare(struct clk_hw *hw)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(hw, struct rt5682_priv,
+ dai_clks_hw[RT5682_DAI_WCLK_IDX]);
+ struct snd_soc_component *component = rt5682->component;
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+
+ if (!rt5682_clk_check(rt5682))
+ return -EINVAL;
+
+ snd_soc_dapm_mutex_lock(dapm);
+
+ snd_soc_dapm_force_enable_pin_unlocked(dapm, "MICBIAS");
+ snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+ RT5682_PWR_MB, RT5682_PWR_MB);
+ snd_soc_dapm_force_enable_pin_unlocked(dapm, "I2S1");
+ snd_soc_dapm_force_enable_pin_unlocked(dapm, "PLL2F");
+ snd_soc_dapm_force_enable_pin_unlocked(dapm, "PLL2B");
+ snd_soc_dapm_sync_unlocked(dapm);
+
+ snd_soc_dapm_mutex_unlock(dapm);
+
+ return 0;
+}
+
+static void rt5682_wclk_unprepare(struct clk_hw *hw)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(hw, struct rt5682_priv,
+ dai_clks_hw[RT5682_DAI_WCLK_IDX]);
+ struct snd_soc_component *component = rt5682->component;
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+
+ if (!rt5682_clk_check(rt5682))
+ return;
+
+ snd_soc_dapm_mutex_lock(dapm);
+
+ snd_soc_dapm_disable_pin_unlocked(dapm, "MICBIAS");
+ if (!rt5682->jack_type)
+ snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+ RT5682_PWR_MB, 0);
+ snd_soc_dapm_disable_pin_unlocked(dapm, "I2S1");
+ snd_soc_dapm_disable_pin_unlocked(dapm, "PLL2F");
+ snd_soc_dapm_disable_pin_unlocked(dapm, "PLL2B");
+ snd_soc_dapm_sync_unlocked(dapm);
+
+ snd_soc_dapm_mutex_unlock(dapm);
+}
+
+static unsigned long rt5682_wclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(hw, struct rt5682_priv,
+ dai_clks_hw[RT5682_DAI_WCLK_IDX]);
+
+ if (!rt5682_clk_check(rt5682))
+ return 0;
+ /*
+ * Only accept to set wclk rate to 48kHz temporarily.
+ */
+ return CLK_48;
+}
+
+static long rt5682_wclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(hw, struct rt5682_priv,
+ dai_clks_hw[RT5682_DAI_WCLK_IDX]);
+
+ if (!rt5682_clk_check(rt5682))
+ return -EINVAL;
+ /*
+ * Only accept to set wclk rate to 48kHz temporarily.
+ */
+ return CLK_48;
+}
+
+static int rt5682_wclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(hw, struct rt5682_priv,
+ dai_clks_hw[RT5682_DAI_WCLK_IDX]);
+ struct snd_soc_component *component = rt5682->component;
+ struct clk *parent_clk;
+ const char * const clk_name = __clk_get_name(hw->clk);
+ int pre_div;
+
+ if (!rt5682_clk_check(rt5682))
+ return -EINVAL;
+
+ /*
+ * Whether the wclk's parent clk (mclk) exists or not, please ensure
+ * it is fixed or set to 48MHz before setting wclk rate. It's a
+ * temporary limitation. Only accept 48MHz clk as the clk provider.
+ *
+ * It will set the codec anyway by assuming mclk is 48MHz.
+ */
+ parent_clk = clk_get_parent(hw->clk);
+ if (!parent_clk)
+ dev_warn(component->dev,
+ "Parent mclk of wclk not acquired in driver. Please ensure mclk was provided as %d Hz.\n",
+ CLK_PLL2_FIN);
+
+ if (parent_rate != CLK_PLL2_FIN)
+ dev_warn(component->dev, "clk %s only support %d Hz input\n",
+ clk_name, CLK_PLL2_FIN);
+
+ /*
+ * It's a temporary limitation. Only accept to set wclk rate to 48kHz.
+ * It will force wclk to 48kHz even it's not.
+ */
+ if (rate != CLK_48) {
+ dev_warn(component->dev, "clk %s only support %d Hz output\n",
+ clk_name, CLK_48);
+ rate = CLK_48;
+ }
+
+ /*
+ * To achieve the rate conversion from 48MHz to 48kHz, PLL2 is needed.
+ */
+ rt5682_set_component_pll(component, RT5682_PLL2, RT5682_PLL2_S_MCLK,
+ CLK_PLL2_FIN, CLK_PLL2_FOUT);
+
+ rt5682_set_component_sysclk(component, RT5682_SCLK_S_PLL2, 0,
+ CLK_PLL2_FOUT, SND_SOC_CLOCK_IN);
+
+ pre_div = rl6231_get_clk_info(rt5682->sysclk, rate);
+
+ snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1,
+ RT5682_I2S_M_DIV_MASK | RT5682_I2S_CLK_SRC_MASK,
+ pre_div << RT5682_I2S_M_DIV_SFT |
+ (rt5682->sysclk_src) << RT5682_I2S_CLK_SRC_SFT);
+
+ return 0;
+}
+
+static unsigned long rt5682_bclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(hw, struct rt5682_priv,
+ dai_clks_hw[RT5682_DAI_BCLK_IDX]);
+ struct snd_soc_component *component = rt5682->component;
+ unsigned int bclks_per_wclk;
+
+ snd_soc_component_read(component, RT5682_TDM_TCON_CTRL,
+ &bclks_per_wclk);
+
+ switch (bclks_per_wclk & RT5682_TDM_BCLK_MS1_MASK) {
+ case RT5682_TDM_BCLK_MS1_256:
+ return parent_rate * 256;
+ case RT5682_TDM_BCLK_MS1_128:
+ return parent_rate * 128;
+ case RT5682_TDM_BCLK_MS1_64:
+ return parent_rate * 64;
+ case RT5682_TDM_BCLK_MS1_32:
+ return parent_rate * 32;
+ default:
+ return 0;
+ }
+}
+
+static unsigned long rt5682_bclk_get_factor(unsigned long rate,
+ unsigned long parent_rate)
+{
+ unsigned long factor;
+
+ factor = rate / parent_rate;
+ if (factor < 64)
+ return 32;
+ else if (factor < 128)
+ return 64;
+ else if (factor < 256)
+ return 128;
+ else
+ return 256;
+}
+
+static long rt5682_bclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(hw, struct rt5682_priv,
+ dai_clks_hw[RT5682_DAI_BCLK_IDX]);
+ unsigned long factor;
+
+ if (!*parent_rate || !rt5682_clk_check(rt5682))
+ return -EINVAL;
+
+ /*
+ * BCLK rates are set as a multiplier of WCLK in HW.
+ * We don't allow changing the parent WCLK. We just do
+ * some rounding down based on the parent WCLK rate
+ * and find the appropriate multiplier of BCLK to
+ * get the rounded down BCLK value.
+ */
+ factor = rt5682_bclk_get_factor(rate, *parent_rate);
+
+ return *parent_rate * factor;
+}
+
+static int rt5682_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rt5682_priv *rt5682 =
+ container_of(hw, struct rt5682_priv,
+ dai_clks_hw[RT5682_DAI_BCLK_IDX]);
+ struct snd_soc_component *component = rt5682->component;
+ struct snd_soc_dai *dai = NULL;
+ unsigned long factor;
+
+ if (!rt5682_clk_check(rt5682))
+ return -EINVAL;
+
+ factor = rt5682_bclk_get_factor(rate, parent_rate);
+
+ for_each_component_dais(component, dai)
+ if (dai->id == RT5682_AIF1)
+ break;
+ if (!dai) {
+ dev_err(component->dev, "dai %d not found in component\n",
+ RT5682_AIF1);
+ return -ENODEV;
+ }
+
+ return rt5682_set_bclk1_ratio(dai, factor);
+}
+
+static const struct clk_ops rt5682_dai_clk_ops[RT5682_DAI_NUM_CLKS] = {
+ [RT5682_DAI_WCLK_IDX] = {
+ .prepare = rt5682_wclk_prepare,
+ .unprepare = rt5682_wclk_unprepare,
+ .recalc_rate = rt5682_wclk_recalc_rate,
+ .round_rate = rt5682_wclk_round_rate,
+ .set_rate = rt5682_wclk_set_rate,
+ },
+ [RT5682_DAI_BCLK_IDX] = {
+ .recalc_rate = rt5682_bclk_recalc_rate,
+ .round_rate = rt5682_bclk_round_rate,
+ .set_rate = rt5682_bclk_set_rate,
+ },
+};
+
+static int rt5682_register_dai_clks(struct snd_soc_component *component)
+{
+ struct device *dev = component->dev;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct rt5682_platform_data *pdata = &rt5682->pdata;
+ struct clk_init_data init;
+ struct clk *dai_clk;
+ struct clk_lookup *dai_clk_lookup;
+ struct clk_hw *dai_clk_hw;
+ const char *parent_name;
+ int i, ret;
+
+ for (i = 0; i < RT5682_DAI_NUM_CLKS; ++i) {
+ dai_clk_hw = &rt5682->dai_clks_hw[i];
+
+ switch (i) {
+ case RT5682_DAI_WCLK_IDX:
+ /* Make MCLK the parent of WCLK */
+ if (rt5682->mclk) {
+ parent_name = __clk_get_name(rt5682->mclk);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ } else {
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ }
+ break;
+ case RT5682_DAI_BCLK_IDX:
+ /* Make WCLK the parent of BCLK */
+ parent_name = __clk_get_name(
+ rt5682->dai_clks[RT5682_DAI_WCLK_IDX]);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ break;
+ default:
+ dev_err(dev, "Invalid clock index\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ init.name = pdata->dai_clk_names[i];
+ init.ops = &rt5682_dai_clk_ops[i];
+ init.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_GATE;
+ dai_clk_hw->init = &init;
+
+ dai_clk = devm_clk_register(dev, dai_clk_hw);
+ if (IS_ERR(dai_clk)) {
+ dev_warn(dev, "Failed to register %s: %ld\n",
+ init.name, PTR_ERR(dai_clk));
+ ret = PTR_ERR(dai_clk);
+ goto err;
+ }
+ rt5682->dai_clks[i] = dai_clk;
+
+ if (dev->of_node) {
+ devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ dai_clk_hw);
+ } else {
+ dai_clk_lookup = clkdev_create(dai_clk, init.name,
+ "%s", dev_name(dev));
+ if (!dai_clk_lookup) {
+ ret = -ENOMEM;
+ goto err;
+ } else {
+ rt5682->dai_clks_lookup[i] = dai_clk_lookup;
+ }
+ }
+ }
+
+ return 0;
+
+err:
+ do {
+ if (rt5682->dai_clks_lookup[i])
+ clkdev_drop(rt5682->dai_clks_lookup[i]);
+ } while (i-- > 0);
+
+ return ret;
+}
+#endif /* CONFIG_COMMON_CLK */
+
static int rt5682_probe(struct snd_soc_component *component)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct sdw_slave *slave;
+ unsigned long time;
+#ifdef CONFIG_COMMON_CLK
+ int ret;
+#endif
rt5682->component = component;
+ if (rt5682->is_sdw) {
+ slave = rt5682->slave;
+ time = wait_for_completion_timeout(
+ &slave->initialization_complete,
+ msecs_to_jiffies(RT5682_PROBE_TIMEOUT));
+ if (!time) {
+ dev_err(&slave->dev, "Initialization not complete, timed out\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+#ifdef CONFIG_COMMON_CLK
+ /* Check if MCLK provided */
+ rt5682->mclk = devm_clk_get(component->dev, "mclk");
+ if (IS_ERR(rt5682->mclk)) {
+ if (PTR_ERR(rt5682->mclk) != -ENOENT) {
+ ret = PTR_ERR(rt5682->mclk);
+ return ret;
+ }
+ rt5682->mclk = NULL;
+ } else {
+ /* Register CCF DAI clock control */
+ ret = rt5682_register_dai_clks(component);
+ if (ret)
+ return ret;
+ }
+ /* Initial setup for CCF */
+ rt5682->lrck[RT5682_AIF1] = CLK_48;
+#endif
+ }
+
return 0;
}
@@ -2332,7 +2893,16 @@ static void rt5682_remove(struct snd_soc_component *component)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
- rt5682_reset(rt5682->regmap);
+#ifdef CONFIG_COMMON_CLK
+ int i;
+
+ for (i = RT5682_DAI_NUM_CLKS - 1; i >= 0; --i) {
+ if (rt5682->dai_clks_lookup[i])
+ clkdev_drop(rt5682->dai_clks_lookup[i]);
+ }
+#endif
+
+ rt5682_reset(rt5682);
}
#ifdef CONFIG_PM
@@ -2369,14 +2939,203 @@ static const struct snd_soc_dai_ops rt5682_aif1_dai_ops = {
.hw_params = rt5682_hw_params,
.set_fmt = rt5682_set_dai_fmt,
.set_tdm_slot = rt5682_set_tdm_slot,
+ .set_bclk_ratio = rt5682_set_bclk1_ratio,
};
static const struct snd_soc_dai_ops rt5682_aif2_dai_ops = {
.hw_params = rt5682_hw_params,
.set_fmt = rt5682_set_dai_fmt,
- .set_bclk_ratio = rt5682_set_bclk_ratio,
+ .set_bclk_ratio = rt5682_set_bclk2_ratio,
};
+#if IS_ENABLED(CONFIG_SND_SOC_RT5682_SDW)
+struct sdw_stream_data {
+ struct sdw_stream_runtime *sdw_stream;
+};
+
+static int rt5682_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
+ int direction)
+{
+ struct sdw_stream_data *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ stream->sdw_stream = (struct sdw_stream_runtime *)sdw_stream;
+
+ /* Use tx_mask or rx_mask to configure stream tag and set dma_data */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ dai->playback_dma_data = stream;
+ else
+ dai->capture_dma_data = stream;
+
+ return 0;
+}
+
+static void rt5682_sdw_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_stream_data *stream;
+
+ stream = snd_soc_dai_get_dma_data(dai, substream);
+ snd_soc_dai_set_dma_data(dai, substream, NULL);
+ kfree(stream);
+}
+
+static int rt5682_sdw_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct sdw_stream_config stream_config;
+ struct sdw_port_config port_config;
+ enum sdw_data_direction direction;
+ struct sdw_stream_data *stream;
+ int retval, port, num_channels;
+ unsigned int val_p = 0, val_c = 0, osr_p = 0, osr_c = 0;
+
+ dev_dbg(dai->dev, "%s %s", __func__, dai->name);
+ stream = snd_soc_dai_get_dma_data(dai, substream);
+
+ if (!stream)
+ return -ENOMEM;
+
+ if (!rt5682->slave)
+ return -EINVAL;
+
+ /* SoundWire specific configuration */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ direction = SDW_DATA_DIR_RX;
+ port = 1;
+ } else {
+ direction = SDW_DATA_DIR_TX;
+ port = 2;
+ }
+
+ stream_config.frame_rate = params_rate(params);
+ stream_config.ch_count = params_channels(params);
+ stream_config.bps = snd_pcm_format_width(params_format(params));
+ stream_config.direction = direction;
+
+ num_channels = params_channels(params);
+ port_config.ch_mask = (1 << (num_channels)) - 1;
+ port_config.num = port;
+
+ retval = sdw_stream_add_slave(rt5682->slave, &stream_config,
+ &port_config, 1, stream->sdw_stream);
+ if (retval) {
+ dev_err(dai->dev, "Unable to configure port\n");
+ return retval;
+ }
+
+ switch (params_rate(params)) {
+ case 48000:
+ val_p = RT5682_SDW_REF_1_48K;
+ val_c = RT5682_SDW_REF_2_48K;
+ break;
+ case 96000:
+ val_p = RT5682_SDW_REF_1_96K;
+ val_c = RT5682_SDW_REF_2_96K;
+ break;
+ case 192000:
+ val_p = RT5682_SDW_REF_1_192K;
+ val_c = RT5682_SDW_REF_2_192K;
+ break;
+ case 32000:
+ val_p = RT5682_SDW_REF_1_32K;
+ val_c = RT5682_SDW_REF_2_32K;
+ break;
+ case 24000:
+ val_p = RT5682_SDW_REF_1_24K;
+ val_c = RT5682_SDW_REF_2_24K;
+ break;
+ case 16000:
+ val_p = RT5682_SDW_REF_1_16K;
+ val_c = RT5682_SDW_REF_2_16K;
+ break;
+ case 12000:
+ val_p = RT5682_SDW_REF_1_12K;
+ val_c = RT5682_SDW_REF_2_12K;
+ break;
+ case 8000:
+ val_p = RT5682_SDW_REF_1_8K;
+ val_c = RT5682_SDW_REF_2_8K;
+ break;
+ case 44100:
+ val_p = RT5682_SDW_REF_1_44K;
+ val_c = RT5682_SDW_REF_2_44K;
+ break;
+ case 88200:
+ val_p = RT5682_SDW_REF_1_88K;
+ val_c = RT5682_SDW_REF_2_88K;
+ break;
+ case 176400:
+ val_p = RT5682_SDW_REF_1_176K;
+ val_c = RT5682_SDW_REF_2_176K;
+ break;
+ case 22050:
+ val_p = RT5682_SDW_REF_1_22K;
+ val_c = RT5682_SDW_REF_2_22K;
+ break;
+ case 11025:
+ val_p = RT5682_SDW_REF_1_11K;
+ val_c = RT5682_SDW_REF_2_11K;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (params_rate(params) <= 48000) {
+ osr_p = RT5682_DAC_OSR_D_8;
+ osr_c = RT5682_ADC_OSR_D_8;
+ } else if (params_rate(params) <= 96000) {
+ osr_p = RT5682_DAC_OSR_D_4;
+ osr_c = RT5682_ADC_OSR_D_4;
+ } else {
+ osr_p = RT5682_DAC_OSR_D_2;
+ osr_c = RT5682_ADC_OSR_D_2;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(rt5682->regmap, RT5682_SDW_REF_CLK,
+ RT5682_SDW_REF_1_MASK, val_p);
+ regmap_update_bits(rt5682->regmap, RT5682_ADDA_CLK_1,
+ RT5682_DAC_OSR_MASK, osr_p);
+ } else {
+ regmap_update_bits(rt5682->regmap, RT5682_SDW_REF_CLK,
+ RT5682_SDW_REF_2_MASK, val_c);
+ regmap_update_bits(rt5682->regmap, RT5682_ADDA_CLK_1,
+ RT5682_ADC_OSR_MASK, osr_c);
+ }
+
+ return retval;
+}
+
+static int rt5682_sdw_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct sdw_stream_data *stream =
+ snd_soc_dai_get_dma_data(dai, substream);
+
+ if (!rt5682->slave)
+ return -EINVAL;
+
+ sdw_stream_remove_slave(rt5682->slave, stream->sdw_stream);
+ return 0;
+}
+
+static struct snd_soc_dai_ops rt5682_sdw_ops = {
+ .hw_params = rt5682_sdw_hw_params,
+ .hw_free = rt5682_sdw_hw_free,
+ .set_sdw_stream = rt5682_set_sdw_stream,
+ .shutdown = rt5682_sdw_shutdown,
+};
+#endif
+
static struct snd_soc_dai_driver rt5682_dai[] = {
{
.name = "rt5682-aif1",
@@ -2409,6 +3168,27 @@ static struct snd_soc_dai_driver rt5682_dai[] = {
},
.ops = &rt5682_aif2_dai_ops,
},
+#if IS_ENABLED(CONFIG_SND_SOC_RT5682_SDW)
+ {
+ .name = "rt5682-sdw",
+ .id = RT5682_SDW,
+ .playback = {
+ .stream_name = "SDW Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .capture = {
+ .stream_name = "SDW Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .ops = &rt5682_sdw_ops,
+ },
+#endif
};
static const struct snd_soc_component_driver soc_component_dev_rt5682 = {
@@ -2461,10 +3241,21 @@ static int rt5682_parse_dt(struct rt5682_priv *rt5682, struct device *dev)
&rt5682->pdata.jd_src);
device_property_read_u32(dev, "realtek,btndet-delay",
&rt5682->pdata.btndet_delay);
+ device_property_read_u32(dev, "realtek,dmic-clk-rate-hz",
+ &rt5682->pdata.dmic_clk_rate);
+ device_property_read_u32(dev, "realtek,dmic-delay-ms",
+ &rt5682->pdata.dmic_delay);
rt5682->pdata.ldo1_en = of_get_named_gpio(dev->of_node,
"realtek,ldo1-en-gpios", 0);
+ if (device_property_read_string_array(dev, "clock-output-names",
+ rt5682->pdata.dai_clk_names,
+ RT5682_DAI_NUM_CLKS) < 0)
+ dev_warn(dev, "Using default DAI clk names: %s, %s\n",
+ rt5682->pdata.dai_clk_names[RT5682_DAI_WCLK_IDX],
+ rt5682->pdata.dai_clk_names[RT5682_DAI_BCLK_IDX]);
+
return 0;
}
@@ -2474,7 +3265,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
mutex_lock(&rt5682->calibrate_mutex);
- rt5682_reset(rt5682->regmap);
+ rt5682_reset(rt5682);
regmap_write(rt5682->regmap, RT5682_I2C_CTRL, 0x000f);
regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2af);
usleep_range(15000, 20000);
@@ -2520,6 +3311,221 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
}
+#if IS_ENABLED(CONFIG_SND_SOC_RT5682_SDW)
+static int rt5682_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+ unsigned int data_l, data_h;
+
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_CMD, 0);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_H, (reg >> 8) & 0xff);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_L, (reg & 0xff));
+ regmap_read(rt5682->sdw_regmap, RT5682_SDW_DATA_H, &data_h);
+ regmap_read(rt5682->sdw_regmap, RT5682_SDW_DATA_L, &data_l);
+
+ *val = (data_h << 8) | data_l;
+
+ dev_vdbg(dev, "[%s] %04x => %04x\n", __func__, reg, *val);
+
+ return 0;
+}
+
+static int rt5682_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_CMD, 1);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_H, (reg >> 8) & 0xff);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_L, (reg & 0xff));
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_DATA_H, (val >> 8) & 0xff);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_DATA_L, (val & 0xff));
+
+ dev_vdbg(dev, "[%s] %04x <= %04x\n", __func__, reg, val);
+
+ return 0;
+}
+
+static const struct regmap_config rt5682_sdw_regmap = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = RT5682_I2C_MODE,
+ .volatile_reg = rt5682_volatile_register,
+ .readable_reg = rt5682_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt5682_reg,
+ .num_reg_defaults = ARRAY_SIZE(rt5682_reg),
+ .use_single_read = true,
+ .use_single_write = true,
+ .reg_read = rt5682_sdw_read,
+ .reg_write = rt5682_sdw_write,
+};
+
+int rt5682_sdw_init(struct device *dev, struct regmap *regmap,
+ struct sdw_slave *slave)
+{
+ struct rt5682_priv *rt5682;
+ int ret;
+
+ rt5682 = devm_kzalloc(dev, sizeof(*rt5682), GFP_KERNEL);
+ if (!rt5682)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, rt5682);
+ rt5682->slave = slave;
+ rt5682->sdw_regmap = regmap;
+ rt5682->is_sdw = true;
+
+ rt5682->regmap = devm_regmap_init(dev, NULL, dev, &rt5682_sdw_regmap);
+ if (IS_ERR(rt5682->regmap)) {
+ ret = PTR_ERR(rt5682->regmap);
+ dev_err(dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Mark hw_init to false
+ * HW init will be performed when device reports present
+ */
+ rt5682->hw_init = false;
+ rt5682->first_hw_init = false;
+
+ mutex_init(&rt5682->calibrate_mutex);
+ INIT_DELAYED_WORK(&rt5682->jack_detect_work,
+ rt5682_jack_detect_handler);
+
+ ret = devm_snd_soc_register_component(dev, &soc_component_dev_rt5682,
+ rt5682_dai, ARRAY_SIZE(rt5682_dai));
+
+ dev_dbg(&slave->dev, "%s\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rt5682_sdw_init);
+
+int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+ int ret = 0;
+ unsigned int val;
+
+ if (rt5682->hw_init)
+ return 0;
+
+ regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
+ if (val != DEVICE_ID) {
+ pr_err("Device with ID register %x is not rt5682\n", val);
+ return -ENODEV;
+ }
+
+ /*
+ * PM runtime is only enabled when a Slave reports as Attached
+ */
+ if (!rt5682->first_hw_init) {
+ /* set autosuspend parameters */
+ pm_runtime_set_autosuspend_delay(&slave->dev, 3000);
+ pm_runtime_use_autosuspend(&slave->dev);
+
+ /* update count of parent 'active' children */
+ pm_runtime_set_active(&slave->dev);
+
+ /* make sure the device does not suspend immediately */
+ pm_runtime_mark_last_busy(&slave->dev);
+
+ pm_runtime_enable(&slave->dev);
+ }
+
+ pm_runtime_get_noresume(&slave->dev);
+
+ rt5682_reset(rt5682);
+
+ if (rt5682->first_hw_init) {
+ regcache_cache_only(rt5682->regmap, false);
+ regcache_cache_bypass(rt5682->regmap, true);
+ }
+
+ rt5682_calibrate(rt5682);
+
+ if (rt5682->first_hw_init) {
+ regcache_cache_bypass(rt5682->regmap, false);
+ regcache_mark_dirty(rt5682->regmap);
+ regcache_sync(rt5682->regmap);
+
+ /* volatile registers */
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
+ RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
+
+ goto reinit;
+ }
+
+ ret = regmap_multi_reg_write(rt5682->regmap, patch_list,
+ ARRAY_SIZE(patch_list));
+ if (ret != 0)
+ dev_warn(dev, "Failed to apply regmap patch: %d\n", ret);
+
+ regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0000);
+
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
+ RT5682_LDO1_DVO_MASK | RT5682_HP_DRIVER_MASK,
+ RT5682_LDO1_DVO_12 | RT5682_HP_DRIVER_5X);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
+ regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
+ regmap_update_bits(rt5682->regmap, RT5682_BIAS_CUR_CTRL_8,
+ RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
+ regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
+ RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
+ regmap_update_bits(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1,
+ RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
+
+ /* Soundwire */
+ regmap_write(rt5682->regmap, RT5682_PLL2_INTERNAL, 0xa266);
+ regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_1, 0x1700);
+ regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_2, 0x0006);
+ regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_3, 0x2600);
+ regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_4, 0x0c8f);
+ regmap_write(rt5682->regmap, RT5682_PLL_TRACK_2, 0x3000);
+ regmap_write(rt5682->regmap, RT5682_PLL_TRACK_3, 0x4000);
+ regmap_update_bits(rt5682->regmap, RT5682_GLB_CLK,
+ RT5682_SCLK_SRC_MASK | RT5682_PLL2_SRC_MASK,
+ RT5682_SCLK_SRC_PLL2 | RT5682_PLL2_SRC_SDW);
+
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
+ RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
+ regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
+ RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
+ regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
+ RT5682_SAR_POW_MASK, RT5682_SAR_POW_EN);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ RT5682_POW_IRQ | RT5682_POW_JDH |
+ RT5682_POW_ANA, RT5682_POW_IRQ |
+ RT5682_POW_JDH | RT5682_POW_ANA);
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_2,
+ RT5682_PWR_JDH, RT5682_PWR_JDH);
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK | RT5682_JD1_IRQ_MASK,
+ RT5682_JD1_EN | RT5682_JD1_IRQ_PUL);
+
+reinit:
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(250));
+
+ /* Mark Slave initialization complete */
+ rt5682->hw_init = true;
+ rt5682->first_hw_init = true;
+
+ pm_runtime_mark_last_busy(&slave->dev);
+ pm_runtime_put_autosuspend(&slave->dev);
+
+ dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rt5682_io_init);
+#endif
+
static int rt5682_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -2586,7 +3592,7 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
return -ENODEV;
}
- rt5682_reset(rt5682->regmap);
+ rt5682_reset(rt5682);
mutex_init(&rt5682->calibrate_mutex);
rt5682_calibrate(rt5682);
@@ -2651,6 +3657,8 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
regmap_update_bits(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1,
RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
+ regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
+ RT5682_FIFO_CLK_DIV_MASK, RT5682_FIFO_CLK_DIV_2);
INIT_DELAYED_WORK(&rt5682->jack_detect_work,
rt5682_jack_detect_handler);
@@ -2676,7 +3684,7 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
{
struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
- rt5682_reset(rt5682->regmap);
+ rt5682_reset(rt5682);
}
#ifdef CONFIG_OF
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index 18faaa2a49a0..0baeece84ec4 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -10,6 +10,12 @@
#define __RT5682_H__
#include <sound/rt5682.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
#define DEVICE_ID 0x6530
@@ -177,7 +183,7 @@
#define RT5682_TEST_MODE_CTRL_4 0x0148
#define RT5682_TEST_MODE_CTRL_5 0x0149
#define RT5682_PLL1_INTERNAL 0x0150
-#define RT5682_PLL2_INTERNAL 0x0151
+#define RT5682_PLL2_INTERNAL 0x0156
#define RT5682_STO_NG2_CTRL_1 0x0160
#define RT5682_STO_NG2_CTRL_2 0x0161
#define RT5682_STO_NG2_CTRL_3 0x0162
@@ -651,6 +657,8 @@
#define RT5682_DMIC_1_EN_SFT 15
#define RT5682_DMIC_1_DIS (0x0 << 15)
#define RT5682_DMIC_1_EN (0x1 << 15)
+#define RT5682_FIFO_CLK_DIV_MASK (0x7 << 12)
+#define RT5682_FIFO_CLK_DIV_2 (0x1 << 12)
#define RT5682_DMIC_1_DP_MASK (0x3 << 4)
#define RT5682_DMIC_1_DP_SFT 4
#define RT5682_DMIC_1_DP_GPIO2 (0x0 << 4)
@@ -738,7 +746,7 @@
#define RT5682_ADC_OSR_D_24 (0x7 << 12)
#define RT5682_ADC_OSR_D_32 (0x8 << 12)
#define RT5682_ADC_OSR_D_48 (0x9 << 12)
-#define RT5682_I2S_M_DIV_MASK (0xf << 12)
+#define RT5682_I2S_M_DIV_MASK (0xf << 8)
#define RT5682_I2S_M_DIV_SFT 8
#define RT5682_I2S_M_D_1 (0x0 << 8)
#define RT5682_I2S_M_D_2 (0x1 << 8)
@@ -820,6 +828,12 @@
#define RT5682_TDM_DF_PCM_B (0x3 << 11)
#define RT5682_TDM_DF_PCM_A_N (0x6 << 11)
#define RT5682_TDM_DF_PCM_B_N (0x7 << 11)
+#define RT5682_TDM_BCLK_MS1_MASK (0x3 << 9)
+#define RT5682_TDM_BCLK_MS1_SFT 9
+#define RT5682_TDM_BCLK_MS1_32 (0x0 << 9)
+#define RT5682_TDM_BCLK_MS1_64 (0x1 << 9)
+#define RT5682_TDM_BCLK_MS1_128 (0x2 << 9)
+#define RT5682_TDM_BCLK_MS1_256 (0x3 << 9)
#define RT5682_TDM_CL_MASK (0x3 << 4)
#define RT5682_TDM_CL_16 (0x0 << 4)
#define RT5682_TDM_CL_20 (0x1 << 4)
@@ -835,8 +849,8 @@
#define RT5682_TDM_M_LP_INV (0x1 << 1)
#define RT5682_TDM_MS_MASK (0x1 << 0)
#define RT5682_TDM_MS_SFT 0
-#define RT5682_TDM_MS_M (0x0 << 0)
-#define RT5682_TDM_MS_S (0x1 << 0)
+#define RT5682_TDM_MS_S (0x0 << 0)
+#define RT5682_TDM_MS_M (0x1 << 0)
/* Global Clock Control (0x0080) */
#define RT5682_SCLK_SRC_MASK (0x7 << 13)
@@ -1049,6 +1063,28 @@
#define RT5682_PWR_CLK1M_PD (0x0 << 8)
#define RT5682_PWR_CLK1M_PU (0x1 << 8)
+/* PLL2 M/N/K Code Control 1 (0x009b) */
+#define RT5682_PLL2F_K_MASK (0x1f << 8)
+#define RT5682_PLL2F_K_SFT 8
+#define RT5682_PLL2B_K_MASK (0xf << 4)
+#define RT5682_PLL2B_K_SFT 4
+#define RT5682_PLL2B_M_MASK (0xf << 0)
+
+/* PLL2 M/N/K Code Control 2 (0x009c) */
+#define RT5682_PLL2F_M_MASK (0x3f << 8)
+#define RT5682_PLL2F_M_SFT 8
+#define RT5682_PLL2B_N_MASK (0x3f << 0)
+
+/* PLL2 M/N/K Code Control 2 (0x009d) */
+#define RT5682_PLL2F_N_MASK (0x7f << 8)
+#define RT5682_PLL2F_N_SFT 8
+
+/* PLL2 M/N/K Code Control 2 (0x009e) */
+#define RT5682_PLL2B_M_BP_MASK (0x1 << 11)
+#define RT5682_PLL2B_M_BP_SFT 11
+#define RT5682_PLL2F_M_BP_MASK (0x1 << 7)
+#define RT5682_PLL2F_M_BP_SFT 7
+
/* RC Clock Control (0x009f) */
#define RT5682_POW_IRQ (0x1 << 15)
#define RT5682_POW_JDH (0x1 << 14)
@@ -1091,11 +1127,17 @@
#define RT5682_JD1_POL_MASK (0x1 << 13)
#define RT5682_JD1_POL_NOR (0x0 << 13)
#define RT5682_JD1_POL_INV (0x1 << 13)
+#define RT5682_JD1_IRQ_MASK (0x1 << 10)
+#define RT5682_JD1_IRQ_LEV (0x0 << 10)
+#define RT5682_JD1_IRQ_PUL (0x1 << 10)
/* IRQ Control 3 (0x00b8) */
#define RT5682_IL_IRQ_MASK (0x1 << 7)
#define RT5682_IL_IRQ_DIS (0x0 << 7)
#define RT5682_IL_IRQ_EN (0x1 << 7)
+#define RT5682_IL_IRQ_TYPE_MASK (0x1 << 4)
+#define RT5682_IL_IRQ_LEV (0x0 << 4)
+#define RT5682_IL_IRQ_PUL (0x1 << 4)
/* GPIO Control 1 (0x00c0) */
#define RT5682_GP1_PIN_MASK (0x3 << 14)
@@ -1309,11 +1351,19 @@ enum {
RT5682_PLL1_S_MCLK,
RT5682_PLL1_S_BCLK1,
RT5682_PLL1_S_RCCLK,
+ RT5682_PLL2_S_MCLK,
+};
+
+enum {
+ RT5682_PLL1,
+ RT5682_PLL2,
+ RT5682_PLLS,
};
enum {
RT5682_AIF1,
RT5682_AIF2,
+ RT5682_SDW,
RT5682_AIFS
};
@@ -1329,7 +1379,49 @@ enum {
RT5682_CLK_SEL_I2S2_ASRC,
};
+#define RT5682_NUM_SUPPLIES 3
+
+struct rt5682_priv {
+ struct snd_soc_component *component;
+ struct rt5682_platform_data pdata;
+ struct regmap *regmap;
+ struct regmap *sdw_regmap;
+ struct snd_soc_jack *hs_jack;
+ struct regulator_bulk_data supplies[RT5682_NUM_SUPPLIES];
+ struct delayed_work jack_detect_work;
+ struct delayed_work jd_check_work;
+ struct mutex calibrate_mutex;
+ struct sdw_slave *slave;
+ enum sdw_slave_status status;
+ struct sdw_bus_params params;
+ bool hw_init;
+ bool first_hw_init;
+ bool is_sdw;
+
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw dai_clks_hw[RT5682_DAI_NUM_CLKS];
+ struct clk_lookup *dai_clks_lookup[RT5682_DAI_NUM_CLKS];
+ struct clk *dai_clks[RT5682_DAI_NUM_CLKS];
+ struct clk *mclk;
+#endif
+
+ int sysclk;
+ int sysclk_src;
+ int lrck[RT5682_AIFS];
+ int bclk[RT5682_AIFS];
+ int master[RT5682_AIFS];
+
+ int pll_src[RT5682_PLLS];
+ int pll_in[RT5682_PLLS];
+ int pll_out[RT5682_PLLS];
+
+ int jack_type;
+};
+
int rt5682_sel_asrc_clk_src(struct snd_soc_component *component,
unsigned int filter_mask, unsigned int clk_src);
+int rt5682_sdw_init(struct device *dev, struct regmap *regmap,
+ struct sdw_slave *slave);
+int rt5682_io_init(struct device *dev, struct sdw_slave *slave);
#endif /* __RT5682_H__ */
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
index be52886a5edb..7fae88655a0f 100644
--- a/sound/soc/codecs/tas2562.c
+++ b/sound/soc/codecs/tas2562.c
@@ -26,6 +26,24 @@
#define TAS2562_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |\
SNDRV_PCM_FORMAT_S32_LE)
+/* DVC equation involves floating point math
+ * round(10^(volume in dB/20)*2^30)
+ * so create a lookup table for 2dB step
+ */
+static const unsigned int float_vol_db_lookup[] = {
+0x00000d43, 0x000010b2, 0x00001505, 0x00001a67, 0x00002151,
+0x000029f1, 0x000034cd, 0x00004279, 0x000053af, 0x0000695b,
+0x0000695b, 0x0000a6fa, 0x0000d236, 0x000108a4, 0x00014d2a,
+0x0001a36e, 0x00021008, 0x000298c0, 0x000344df, 0x00041d8f,
+0x00052e5a, 0x000685c8, 0x00083621, 0x000a566d, 0x000d03a7,
+0x0010624d, 0x0014a050, 0x0019f786, 0x0020b0bc, 0x0029279d,
+0x0033cf8d, 0x004139d3, 0x00521d50, 0x00676044, 0x0082248a,
+0x00a3d70a, 0x00ce4328, 0x0103ab3d, 0x0146e75d, 0x019b8c27,
+0x02061b89, 0x028c423f, 0x03352529, 0x0409c2b0, 0x05156d68,
+0x080e9f96, 0x0a24b062, 0x0cc509ab, 0x10137987, 0x143d1362,
+0x197a967f, 0x2013739e, 0x28619ae9, 0x32d64617, 0x40000000
+};
+
struct tas2562_data {
struct snd_soc_component *component;
struct gpio_desc *sdz_gpio;
@@ -34,6 +52,12 @@ struct tas2562_data {
struct i2c_client *client;
int v_sense_slot;
int i_sense_slot;
+ int volume_lvl;
+};
+
+enum tas256x_model {
+ TAS2562,
+ TAS2563,
};
static int tas2562_set_bias_level(struct snd_soc_component *component,
@@ -383,21 +407,81 @@ static int tas2562_dac_event(struct snd_soc_dapm_widget *w,
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ int ret;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- dev_info(tas2562->dev, "SND_SOC_DAPM_POST_PMU\n");
+ ret = snd_soc_component_update_bits(component,
+ TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK,
+ TAS2562_MUTE);
+ if (ret)
+ goto end;
break;
case SND_SOC_DAPM_PRE_PMD:
- dev_info(tas2562->dev, "SND_SOC_DAPM_PRE_PMD\n");
+ ret = snd_soc_component_update_bits(component,
+ TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK,
+ TAS2562_SHUTDOWN);
+ if (ret)
+ goto end;
break;
default:
- break;
+ dev_err(tas2562->dev, "Not supported evevt\n");
+ return -EINVAL;
}
+end:
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2562_volume_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = tas2562->volume_lvl;
return 0;
}
+static int tas2562_volume_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ int ret;
+ u32 reg_val;
+
+ reg_val = float_vol_db_lookup[ucontrol->value.integer.value[0]/2];
+ ret = snd_soc_component_write(component, TAS2562_DVC_CFG4,
+ (reg_val & 0xff));
+ if (ret)
+ return ret;
+ ret = snd_soc_component_write(component, TAS2562_DVC_CFG3,
+ ((reg_val >> 8) & 0xff));
+ if (ret)
+ return ret;
+ ret = snd_soc_component_write(component, TAS2562_DVC_CFG2,
+ ((reg_val >> 16) & 0xff));
+ if (ret)
+ return ret;
+ ret = snd_soc_component_write(component, TAS2562_DVC_CFG1,
+ ((reg_val >> 24) & 0xff));
+ if (ret)
+ return ret;
+
+ tas2562->volume_lvl = ucontrol->value.integer.value[0];
+
+ return ret;
+}
+
+/* Digital Volume Control. From 0 dB to -110 dB in 1 dB steps */
+static const DECLARE_TLV_DB_SCALE(dvc_tlv, -11000, 100, 0);
+
static DECLARE_TLV_DB_SCALE(tas2562_dac_tlv, 850, 50, 0);
static const struct snd_kcontrol_new isense_switch =
@@ -409,14 +493,24 @@ static const struct snd_kcontrol_new vsense_switch =
1, 1);
static const struct snd_kcontrol_new tas2562_snd_controls[] = {
- SOC_SINGLE_TLV("Amp Gain Volume", TAS2562_PB_CFG1, 0, 0x1c, 0,
+ SOC_SINGLE_TLV("Amp Gain Volume", TAS2562_PB_CFG1, 1, 0x1c, 0,
tas2562_dac_tlv),
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Digital Volume Control",
+ .index = 0,
+ .tlv.p = dvc_tlv,
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = snd_soc_info_volsw,
+ .get = tas2562_volume_control_get,
+ .put = tas2562_volume_control_put,
+ .private_value = SOC_SINGLE_VALUE(TAS2562_DVC_CFG1, 0, 110, 0, 0) ,
+ },
};
static const struct snd_soc_dapm_widget tas2562_dapm_widgets[] = {
SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0, &tas2562_asi1_mux),
- SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2562_dac_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_SWITCH("ISENSE", TAS2562_PWR_CTRL, 3, 1, &isense_switch),
@@ -431,7 +525,7 @@ static const struct snd_soc_dapm_route tas2562_audio_map[] = {
{"ASI1 Sel", "Left", "ASI1"},
{"ASI1 Sel", "Right", "ASI1"},
{"ASI1 Sel", "LeftRightDiv2", "ASI1"},
- { "DAC", NULL, "DAC IN" },
+ { "DAC", NULL, "ASI1 Sel" },
{ "OUT", NULL, "DAC" },
{"ISENSE", "Switch", "IMON"},
{"VSENSE", "Switch", "VMON"},
@@ -472,6 +566,13 @@ static struct snd_soc_dai_driver tas2562_dai[] = {
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = TAS2562_FORMATS,
},
+ .capture = {
+ .stream_name = "ASI1 Capture",
+ .channels_min = 0,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = TAS2562_FORMATS,
+ },
.ops = &tas2562_speaker_dai_ops,
},
};
@@ -495,6 +596,10 @@ static const struct reg_default tas2562_reg_defaults[] = {
{ TAS2562_PB_CFG1, 0x20 },
{ TAS2562_TDM_CFG0, 0x09 },
{ TAS2562_TDM_CFG1, 0x02 },
+ { TAS2562_DVC_CFG1, 0x40 },
+ { TAS2562_DVC_CFG2, 0x40 },
+ { TAS2562_DVC_CFG3, 0x00 },
+ { TAS2562_DVC_CFG4, 0x00 },
};
static const struct regmap_config tas2562_regmap_config = {
@@ -564,13 +669,15 @@ static int tas2562_probe(struct i2c_client *client,
}
static const struct i2c_device_id tas2562_id[] = {
- { "tas2562", 0 },
+ { "tas2562", TAS2562 },
+ { "tas2563", TAS2563 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tas2562_id);
static const struct of_device_id tas2562_of_match[] = {
{ .compatible = "ti,tas2562", },
+ { .compatible = "ti,tas2563", },
{ },
};
MODULE_DEVICE_TABLE(of, tas2562_of_match);
diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
index 62e659ab786d..28e75fc431d0 100644
--- a/sound/soc/codecs/tas2562.h
+++ b/sound/soc/codecs/tas2562.h
@@ -35,12 +35,14 @@
#define TAS2562_REV_ID TAS2562_REG(0, 0x7d)
/* Page 2 */
-#define TAS2562_DVC_CFG1 TAS2562_REG(2, 0x01)
-#define TAS2562_DVC_CFG2 TAS2562_REG(2, 0x02)
+#define TAS2562_DVC_CFG1 TAS2562_REG(2, 0x0c)
+#define TAS2562_DVC_CFG2 TAS2562_REG(2, 0x0d)
+#define TAS2562_DVC_CFG3 TAS2562_REG(2, 0x0e)
+#define TAS2562_DVC_CFG4 TAS2562_REG(2, 0x0f)
#define TAS2562_RESET BIT(0)
-#define TAS2562_MODE_MASK 0x3
+#define TAS2562_MODE_MASK GENMASK(1,0)
#define TAS2562_ACTIVE 0x0
#define TAS2562_MUTE 0x1
#define TAS2562_SHUTDOWN 0x2
@@ -73,8 +75,8 @@
#define TAS2562_TDM_CFG2_RXWLEN_24B BIT(3)
#define TAS2562_TDM_CFG2_RXWLEN_32B (BIT(2) | BIT(3))
-#define TAS2562_VSENSE_POWER_EN BIT(2)
-#define TAS2562_ISENSE_POWER_EN BIT(3)
+#define TAS2562_VSENSE_POWER_EN 2
+#define TAS2562_ISENSE_POWER_EN 3
#define TAS2562_TDM_CFG5_VSNS_EN BIT(6)
#define TAS2562_TDM_CFG5_VSNS_SLOT_MASK GENMASK(5, 0)
diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
new file mode 100644
index 000000000000..38897568ee96
--- /dev/null
+++ b/sound/soc/codecs/tlv320adcx140.c
@@ -0,0 +1,920 @@
+// SPDX-License-Identifier: GPL-2.0
+// TLV320ADCX140 Sound driver
+// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "tlv320adcx140.h"
+
+struct adcx140_priv {
+ struct snd_soc_component *component;
+ struct regulator *supply_areg;
+ struct gpio_desc *gpio_reset;
+ struct regmap *regmap;
+ struct device *dev;
+
+ int micbias_vg;
+
+ unsigned int dai_fmt;
+ unsigned int tdm_delay;
+ unsigned int slot_width;
+};
+
+static const struct reg_default adcx140_reg_defaults[] = {
+ { ADCX140_PAGE_SELECT, 0x00 },
+ { ADCX140_SW_RESET, 0x00 },
+ { ADCX140_SLEEP_CFG, 0x00 },
+ { ADCX140_SHDN_CFG, 0x05 },
+ { ADCX140_ASI_CFG0, 0x30 },
+ { ADCX140_ASI_CFG1, 0x00 },
+ { ADCX140_ASI_CFG2, 0x00 },
+ { ADCX140_ASI_CH1, 0x00 },
+ { ADCX140_ASI_CH2, 0x01 },
+ { ADCX140_ASI_CH3, 0x02 },
+ { ADCX140_ASI_CH4, 0x03 },
+ { ADCX140_ASI_CH5, 0x04 },
+ { ADCX140_ASI_CH6, 0x05 },
+ { ADCX140_ASI_CH7, 0x06 },
+ { ADCX140_ASI_CH8, 0x07 },
+ { ADCX140_MST_CFG0, 0x02 },
+ { ADCX140_MST_CFG1, 0x48 },
+ { ADCX140_ASI_STS, 0xff },
+ { ADCX140_CLK_SRC, 0x10 },
+ { ADCX140_PDMCLK_CFG, 0x40 },
+ { ADCX140_PDM_CFG, 0x00 },
+ { ADCX140_GPIO_CFG0, 0x22 },
+ { ADCX140_GPO_CFG1, 0x00 },
+ { ADCX140_GPO_CFG2, 0x00 },
+ { ADCX140_GPO_CFG3, 0x00 },
+ { ADCX140_GPO_CFG4, 0x00 },
+ { ADCX140_GPO_VAL, 0x00 },
+ { ADCX140_GPIO_MON, 0x00 },
+ { ADCX140_GPI_CFG0, 0x00 },
+ { ADCX140_GPI_CFG1, 0x00 },
+ { ADCX140_GPI_MON, 0x00 },
+ { ADCX140_INT_CFG, 0x00 },
+ { ADCX140_INT_MASK0, 0xff },
+ { ADCX140_INT_LTCH0, 0x00 },
+ { ADCX140_BIAS_CFG, 0x00 },
+ { ADCX140_CH1_CFG0, 0x00 },
+ { ADCX140_CH1_CFG1, 0x00 },
+ { ADCX140_CH1_CFG2, 0xc9 },
+ { ADCX140_CH1_CFG3, 0x80 },
+ { ADCX140_CH1_CFG4, 0x00 },
+ { ADCX140_CH2_CFG0, 0x00 },
+ { ADCX140_CH2_CFG1, 0x00 },
+ { ADCX140_CH2_CFG2, 0xc9 },
+ { ADCX140_CH2_CFG3, 0x80 },
+ { ADCX140_CH2_CFG4, 0x00 },
+ { ADCX140_CH3_CFG0, 0x00 },
+ { ADCX140_CH3_CFG1, 0x00 },
+ { ADCX140_CH3_CFG2, 0xc9 },
+ { ADCX140_CH3_CFG3, 0x80 },
+ { ADCX140_CH3_CFG4, 0x00 },
+ { ADCX140_CH4_CFG0, 0x00 },
+ { ADCX140_CH4_CFG1, 0x00 },
+ { ADCX140_CH4_CFG2, 0xc9 },
+ { ADCX140_CH4_CFG3, 0x80 },
+ { ADCX140_CH4_CFG4, 0x00 },
+ { ADCX140_CH5_CFG2, 0xc9 },
+ { ADCX140_CH5_CFG3, 0x80 },
+ { ADCX140_CH5_CFG4, 0x00 },
+ { ADCX140_CH6_CFG2, 0xc9 },
+ { ADCX140_CH6_CFG3, 0x80 },
+ { ADCX140_CH6_CFG4, 0x00 },
+ { ADCX140_CH7_CFG2, 0xc9 },
+ { ADCX140_CH7_CFG3, 0x80 },
+ { ADCX140_CH7_CFG4, 0x00 },
+ { ADCX140_CH8_CFG2, 0xc9 },
+ { ADCX140_CH8_CFG3, 0x80 },
+ { ADCX140_CH8_CFG4, 0x00 },
+ { ADCX140_DSP_CFG0, 0x01 },
+ { ADCX140_DSP_CFG1, 0x40 },
+ { ADCX140_DRE_CFG0, 0x7b },
+ { ADCX140_AGC_CFG0, 0xe7 },
+ { ADCX140_IN_CH_EN, 0xf0 },
+ { ADCX140_ASI_OUT_CH_EN, 0x00 },
+ { ADCX140_PWR_CFG, 0x00 },
+ { ADCX140_DEV_STS0, 0x00 },
+ { ADCX140_DEV_STS1, 0x80 },
+};
+
+static const struct regmap_range_cfg adcx140_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = 12 * 128,
+ .selector_reg = ADCX140_PAGE_SELECT,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 128,
+ },
+};
+
+static bool adcx140_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADCX140_SW_RESET:
+ case ADCX140_DEV_STS0:
+ case ADCX140_DEV_STS1:
+ case ADCX140_ASI_STS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config adcx140_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_defaults = adcx140_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adcx140_reg_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .ranges = adcx140_ranges,
+ .num_ranges = ARRAY_SIZE(adcx140_ranges),
+ .max_register = 12 * 128,
+ .volatile_reg = adcx140_volatile,
+};
+
+/* Digital Volume control. From -100 to 27 dB in 0.5 dB steps */
+static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10000, 50, 0);
+
+/* ADC gain. From 0 to 42 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(adc_tlv, 0, 100, 0);
+
+/* DRE Level. From -12 dB to -66 dB in 1 dB steps */
+static DECLARE_TLV_DB_SCALE(dre_thresh_tlv, -6600, 100, 0);
+/* DRE Max Gain. From 2 dB to 26 dB in 2 dB steps */
+static DECLARE_TLV_DB_SCALE(dre_gain_tlv, 200, 200, 0);
+
+/* AGC Level. From -6 dB to -36 dB in 2 dB steps */
+static DECLARE_TLV_DB_SCALE(agc_thresh_tlv, -3600, 200, 0);
+/* AGC Max Gain. From 3 dB to 42 dB in 3 dB steps */
+static DECLARE_TLV_DB_SCALE(agc_gain_tlv, 300, 300, 0);
+
+static const char * const decimation_filter_text[] = {
+ "Linear Phase", "Low Latency", "Ultra-low Latency"
+};
+
+static SOC_ENUM_SINGLE_DECL(decimation_filter_enum, ADCX140_DSP_CFG0, 4,
+ decimation_filter_text);
+
+static const struct snd_kcontrol_new decimation_filter_controls[] = {
+ SOC_DAPM_ENUM("Decimation Filter", decimation_filter_enum),
+};
+
+static const char * const resistor_text[] = {
+ "2.5 kOhm", "10 kOhm", "20 kOhm"
+};
+
+static SOC_ENUM_SINGLE_DECL(in1_resistor_enum, ADCX140_CH1_CFG0, 2,
+ resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2_resistor_enum, ADCX140_CH2_CFG0, 2,
+ resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3_resistor_enum, ADCX140_CH3_CFG0, 2,
+ resistor_text);
+static SOC_ENUM_SINGLE_DECL(in4_resistor_enum, ADCX140_CH4_CFG0, 2,
+ resistor_text);
+
+static const struct snd_kcontrol_new in1_resistor_controls[] = {
+ SOC_DAPM_ENUM("CH1 Resistor Select", in1_resistor_enum),
+};
+static const struct snd_kcontrol_new in2_resistor_controls[] = {
+ SOC_DAPM_ENUM("CH2 Resistor Select", in2_resistor_enum),
+};
+static const struct snd_kcontrol_new in3_resistor_controls[] = {
+ SOC_DAPM_ENUM("CH3 Resistor Select", in3_resistor_enum),
+};
+static const struct snd_kcontrol_new in4_resistor_controls[] = {
+ SOC_DAPM_ENUM("CH4 Resistor Select", in4_resistor_enum),
+};
+
+/* Analog/Digital Selection */
+static const char *adcx140_mic_sel_text[] = {"Analog", "Line In", "Digital"};
+static const char *adcx140_analog_sel_text[] = {"Analog", "Line In"};
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic1p_enum,
+ ADCX140_CH1_CFG0, 5,
+ adcx140_mic_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic1p_control =
+SOC_DAPM_ENUM("MIC1P MUX", adcx140_mic1p_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic1_analog_enum,
+ ADCX140_CH1_CFG0, 7,
+ adcx140_analog_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic1_analog_control =
+SOC_DAPM_ENUM("MIC1 Analog MUX", adcx140_mic1_analog_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic1m_enum,
+ ADCX140_CH1_CFG0, 5,
+ adcx140_mic_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic1m_control =
+SOC_DAPM_ENUM("MIC1M MUX", adcx140_mic1m_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic2p_enum,
+ ADCX140_CH2_CFG0, 5,
+ adcx140_mic_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic2p_control =
+SOC_DAPM_ENUM("MIC2P MUX", adcx140_mic2p_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic2_analog_enum,
+ ADCX140_CH2_CFG0, 7,
+ adcx140_analog_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic2_analog_control =
+SOC_DAPM_ENUM("MIC2 Analog MUX", adcx140_mic2_analog_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic2m_enum,
+ ADCX140_CH2_CFG0, 5,
+ adcx140_mic_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic2m_control =
+SOC_DAPM_ENUM("MIC2M MUX", adcx140_mic2m_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic3p_enum,
+ ADCX140_CH3_CFG0, 5,
+ adcx140_mic_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic3p_control =
+SOC_DAPM_ENUM("MIC3P MUX", adcx140_mic3p_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic3_analog_enum,
+ ADCX140_CH3_CFG0, 7,
+ adcx140_analog_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic3_analog_control =
+SOC_DAPM_ENUM("MIC3 Analog MUX", adcx140_mic3_analog_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic3m_enum,
+ ADCX140_CH3_CFG0, 5,
+ adcx140_mic_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic3m_control =
+SOC_DAPM_ENUM("MIC3M MUX", adcx140_mic3m_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic4p_enum,
+ ADCX140_CH4_CFG0, 5,
+ adcx140_mic_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic4p_control =
+SOC_DAPM_ENUM("MIC4P MUX", adcx140_mic4p_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic4_analog_enum,
+ ADCX140_CH4_CFG0, 7,
+ adcx140_analog_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic4_analog_control =
+SOC_DAPM_ENUM("MIC4 Analog MUX", adcx140_mic4_analog_enum);
+
+static SOC_ENUM_SINGLE_DECL(adcx140_mic4m_enum,
+ ADCX140_CH4_CFG0, 5,
+ adcx140_mic_sel_text);
+
+static const struct snd_kcontrol_new adcx140_dapm_mic4m_control =
+SOC_DAPM_ENUM("MIC4M MUX", adcx140_mic4m_enum);
+
+static const struct snd_kcontrol_new adcx140_dapm_ch1_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_ASI_OUT_CH_EN, 7, 1, 0);
+static const struct snd_kcontrol_new adcx140_dapm_ch2_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_ASI_OUT_CH_EN, 6, 1, 0);
+static const struct snd_kcontrol_new adcx140_dapm_ch3_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_ASI_OUT_CH_EN, 5, 1, 0);
+static const struct snd_kcontrol_new adcx140_dapm_ch4_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_ASI_OUT_CH_EN, 4, 1, 0);
+
+static const struct snd_kcontrol_new adcx140_dapm_ch1_dre_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_CH1_CFG0, 0, 1, 0);
+static const struct snd_kcontrol_new adcx140_dapm_ch2_dre_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_CH2_CFG0, 0, 1, 0);
+static const struct snd_kcontrol_new adcx140_dapm_ch3_dre_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_CH3_CFG0, 0, 1, 0);
+static const struct snd_kcontrol_new adcx140_dapm_ch4_dre_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_CH4_CFG0, 0, 1, 0);
+
+static const struct snd_kcontrol_new adcx140_dapm_dre_en_switch =
+ SOC_DAPM_SINGLE("Switch", ADCX140_DSP_CFG1, 3, 1, 0);
+
+/* Output Mixer */
+static const struct snd_kcontrol_new adcx140_output_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Digital CH1 Switch", 0, 0, 0, 0),
+ SOC_DAPM_SINGLE("Digital CH2 Switch", 0, 0, 0, 0),
+ SOC_DAPM_SINGLE("Digital CH3 Switch", 0, 0, 0, 0),
+ SOC_DAPM_SINGLE("Digital CH4 Switch", 0, 0, 0, 0),
+};
+
+static const struct snd_soc_dapm_widget adcx140_dapm_widgets[] = {
+ /* Analog Differential Inputs */
+ SND_SOC_DAPM_INPUT("MIC1P"),
+ SND_SOC_DAPM_INPUT("MIC1M"),
+ SND_SOC_DAPM_INPUT("MIC2P"),
+ SND_SOC_DAPM_INPUT("MIC2M"),
+ SND_SOC_DAPM_INPUT("MIC3P"),
+ SND_SOC_DAPM_INPUT("MIC3M"),
+ SND_SOC_DAPM_INPUT("MIC4P"),
+ SND_SOC_DAPM_INPUT("MIC4M"),
+
+ SND_SOC_DAPM_OUTPUT("CH1_OUT"),
+ SND_SOC_DAPM_OUTPUT("CH2_OUT"),
+ SND_SOC_DAPM_OUTPUT("CH3_OUT"),
+ SND_SOC_DAPM_OUTPUT("CH4_OUT"),
+ SND_SOC_DAPM_OUTPUT("CH5_OUT"),
+ SND_SOC_DAPM_OUTPUT("CH6_OUT"),
+ SND_SOC_DAPM_OUTPUT("CH7_OUT"),
+ SND_SOC_DAPM_OUTPUT("CH8_OUT"),
+
+ SND_SOC_DAPM_MIXER("Output Mixer", SND_SOC_NOPM, 0, 0,
+ &adcx140_output_mixer_controls[0],
+ ARRAY_SIZE(adcx140_output_mixer_controls)),
+
+ /* Input Selection to MIC_PGA */
+ SND_SOC_DAPM_MUX("MIC1P Input Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic1p_control),
+ SND_SOC_DAPM_MUX("MIC2P Input Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic2p_control),
+ SND_SOC_DAPM_MUX("MIC3P Input Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic3p_control),
+ SND_SOC_DAPM_MUX("MIC4P Input Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic4p_control),
+
+ /* Input Selection to MIC_PGA */
+ SND_SOC_DAPM_MUX("MIC1 Analog Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic1_analog_control),
+ SND_SOC_DAPM_MUX("MIC2 Analog Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic2_analog_control),
+ SND_SOC_DAPM_MUX("MIC3 Analog Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic3_analog_control),
+ SND_SOC_DAPM_MUX("MIC4 Analog Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic4_analog_control),
+
+ SND_SOC_DAPM_MUX("MIC1M Input Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic1m_control),
+ SND_SOC_DAPM_MUX("MIC2M Input Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic2m_control),
+ SND_SOC_DAPM_MUX("MIC3M Input Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic3m_control),
+ SND_SOC_DAPM_MUX("MIC4M Input Mux", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_mic4m_control),
+
+ SND_SOC_DAPM_PGA("MIC_GAIN_CTL_CH1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MIC_GAIN_CTL_CH2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MIC_GAIN_CTL_CH3", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MIC_GAIN_CTL_CH4", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_ADC("CH1_ADC", "CH1 Capture", ADCX140_IN_CH_EN, 7, 0),
+ SND_SOC_DAPM_ADC("CH2_ADC", "CH2 Capture", ADCX140_IN_CH_EN, 6, 0),
+ SND_SOC_DAPM_ADC("CH3_ADC", "CH3 Capture", ADCX140_IN_CH_EN, 5, 0),
+ SND_SOC_DAPM_ADC("CH4_ADC", "CH4 Capture", ADCX140_IN_CH_EN, 4, 0),
+
+ SND_SOC_DAPM_SWITCH("CH1_ASI_EN", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_ch1_en_switch),
+ SND_SOC_DAPM_SWITCH("CH2_ASI_EN", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_ch2_en_switch),
+ SND_SOC_DAPM_SWITCH("CH3_ASI_EN", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_ch3_en_switch),
+ SND_SOC_DAPM_SWITCH("CH4_ASI_EN", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_ch4_en_switch),
+
+ SND_SOC_DAPM_SWITCH("DRE_ENABLE", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_dre_en_switch),
+
+ SND_SOC_DAPM_SWITCH("CH1_DRE_EN", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_ch1_dre_en_switch),
+ SND_SOC_DAPM_SWITCH("CH2_DRE_EN", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_ch2_dre_en_switch),
+ SND_SOC_DAPM_SWITCH("CH3_DRE_EN", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_ch3_dre_en_switch),
+ SND_SOC_DAPM_SWITCH("CH4_DRE_EN", SND_SOC_NOPM, 0, 0,
+ &adcx140_dapm_ch4_dre_en_switch),
+
+ SND_SOC_DAPM_MUX("IN1 Analog Mic Resistor", SND_SOC_NOPM, 0, 0,
+ in1_resistor_controls),
+ SND_SOC_DAPM_MUX("IN2 Analog Mic Resistor", SND_SOC_NOPM, 0, 0,
+ in2_resistor_controls),
+ SND_SOC_DAPM_MUX("IN3 Analog Mic Resistor", SND_SOC_NOPM, 0, 0,
+ in3_resistor_controls),
+ SND_SOC_DAPM_MUX("IN4 Analog Mic Resistor", SND_SOC_NOPM, 0, 0,
+ in4_resistor_controls),
+
+ SND_SOC_DAPM_MUX("Decimation Filter", SND_SOC_NOPM, 0, 0,
+ decimation_filter_controls),
+};
+
+static const struct snd_soc_dapm_route adcx140_audio_map[] = {
+ /* Outputs */
+ {"CH1_OUT", NULL, "Output Mixer"},
+ {"CH2_OUT", NULL, "Output Mixer"},
+ {"CH3_OUT", NULL, "Output Mixer"},
+ {"CH4_OUT", NULL, "Output Mixer"},
+
+ {"CH1_ASI_EN", "Switch", "CH1_ADC"},
+ {"CH2_ASI_EN", "Switch", "CH2_ADC"},
+ {"CH3_ASI_EN", "Switch", "CH3_ADC"},
+ {"CH4_ASI_EN", "Switch", "CH4_ADC"},
+
+ {"Decimation Filter", "Linear Phase", "DRE_ENABLE"},
+ {"Decimation Filter", "Low Latency", "DRE_ENABLE"},
+ {"Decimation Filter", "Ultra-low Latency", "DRE_ENABLE"},
+
+ {"DRE_ENABLE", "Switch", "CH1_DRE_EN"},
+ {"DRE_ENABLE", "Switch", "CH2_DRE_EN"},
+ {"DRE_ENABLE", "Switch", "CH3_DRE_EN"},
+ {"DRE_ENABLE", "Switch", "CH4_DRE_EN"},
+
+ {"CH1_DRE_EN", "Switch", "CH1_ADC"},
+ {"CH2_DRE_EN", "Switch", "CH2_ADC"},
+ {"CH3_DRE_EN", "Switch", "CH3_ADC"},
+ {"CH4_DRE_EN", "Switch", "CH4_ADC"},
+
+ /* Mic input */
+ {"CH1_ADC", NULL, "MIC_GAIN_CTL_CH1"},
+ {"CH2_ADC", NULL, "MIC_GAIN_CTL_CH2"},
+ {"CH3_ADC", NULL, "MIC_GAIN_CTL_CH3"},
+ {"CH4_ADC", NULL, "MIC_GAIN_CTL_CH4"},
+
+ {"MIC_GAIN_CTL_CH1", NULL, "IN1 Analog Mic Resistor"},
+ {"MIC_GAIN_CTL_CH1", NULL, "IN1 Analog Mic Resistor"},
+ {"MIC_GAIN_CTL_CH2", NULL, "IN2 Analog Mic Resistor"},
+ {"MIC_GAIN_CTL_CH2", NULL, "IN2 Analog Mic Resistor"},
+ {"MIC_GAIN_CTL_CH3", NULL, "IN3 Analog Mic Resistor"},
+ {"MIC_GAIN_CTL_CH3", NULL, "IN3 Analog Mic Resistor"},
+ {"MIC_GAIN_CTL_CH4", NULL, "IN4 Analog Mic Resistor"},
+ {"MIC_GAIN_CTL_CH4", NULL, "IN4 Analog Mic Resistor"},
+
+ {"IN1 Analog Mic Resistor", "2.5 kOhm", "MIC1P Input Mux"},
+ {"IN1 Analog Mic Resistor", "10 kOhm", "MIC1P Input Mux"},
+ {"IN1 Analog Mic Resistor", "20 kOhm", "MIC1P Input Mux"},
+
+ {"IN1 Analog Mic Resistor", "2.5 kOhm", "MIC1M Input Mux"},
+ {"IN1 Analog Mic Resistor", "10 kOhm", "MIC1M Input Mux"},
+ {"IN1 Analog Mic Resistor", "20 kOhm", "MIC1M Input Mux"},
+
+ {"IN2 Analog Mic Resistor", "2.5 kOhm", "MIC2P Input Mux"},
+ {"IN2 Analog Mic Resistor", "10 kOhm", "MIC2P Input Mux"},
+ {"IN2 Analog Mic Resistor", "20 kOhm", "MIC2P Input Mux"},
+
+ {"IN2 Analog Mic Resistor", "2.5 kOhm", "MIC2M Input Mux"},
+ {"IN2 Analog Mic Resistor", "10 kOhm", "MIC2M Input Mux"},
+ {"IN2 Analog Mic Resistor", "20 kOhm", "MIC2M Input Mux"},
+
+ {"IN3 Analog Mic Resistor", "2.5 kOhm", "MIC3P Input Mux"},
+ {"IN3 Analog Mic Resistor", "10 kOhm", "MIC3P Input Mux"},
+ {"IN3 Analog Mic Resistor", "20 kOhm", "MIC3P Input Mux"},
+
+ {"IN3 Analog Mic Resistor", "2.5 kOhm", "MIC3M Input Mux"},
+ {"IN3 Analog Mic Resistor", "10 kOhm", "MIC3M Input Mux"},
+ {"IN3 Analog Mic Resistor", "20 kOhm", "MIC3M Input Mux"},
+
+ {"IN4 Analog Mic Resistor", "2.5 kOhm", "MIC4P Input Mux"},
+ {"IN4 Analog Mic Resistor", "10 kOhm", "MIC4P Input Mux"},
+ {"IN4 Analog Mic Resistor", "20 kOhm", "MIC4P Input Mux"},
+
+ {"IN4 Analog Mic Resistor", "2.5 kOhm", "MIC4M Input Mux"},
+ {"IN4 Analog Mic Resistor", "10 kOhm", "MIC4M Input Mux"},
+ {"IN4 Analog Mic Resistor", "20 kOhm", "MIC4M Input Mux"},
+
+ {"MIC1 Analog Mux", "Line In", "MIC1P"},
+ {"MIC2 Analog Mux", "Line In", "MIC2P"},
+ {"MIC3 Analog Mux", "Line In", "MIC3P"},
+ {"MIC4 Analog Mux", "Line In", "MIC4P"},
+
+ {"MIC1P Input Mux", "Analog", "MIC1P"},
+ {"MIC1M Input Mux", "Analog", "MIC1M"},
+ {"MIC2P Input Mux", "Analog", "MIC2P"},
+ {"MIC2M Input Mux", "Analog", "MIC2M"},
+ {"MIC3P Input Mux", "Analog", "MIC3P"},
+ {"MIC3M Input Mux", "Analog", "MIC3M"},
+ {"MIC4P Input Mux", "Analog", "MIC4P"},
+ {"MIC4M Input Mux", "Analog", "MIC4M"},
+};
+
+static const struct snd_kcontrol_new adcx140_snd_controls[] = {
+ SOC_SINGLE_TLV("Analog CH1 Mic Gain Volume", ADCX140_CH1_CFG1, 2, 42, 0,
+ adc_tlv),
+ SOC_SINGLE_TLV("Analog CH2 Mic Gain Volume", ADCX140_CH1_CFG2, 2, 42, 0,
+ adc_tlv),
+ SOC_SINGLE_TLV("Analog CH3 Mic Gain Volume", ADCX140_CH1_CFG3, 2, 42, 0,
+ adc_tlv),
+ SOC_SINGLE_TLV("Analog CH4 Mic Gain Volume", ADCX140_CH1_CFG4, 2, 42, 0,
+ adc_tlv),
+
+ SOC_SINGLE_TLV("DRE Threshold", ADCX140_DRE_CFG0, 4, 9, 0,
+ dre_thresh_tlv),
+ SOC_SINGLE_TLV("DRE Max Gain", ADCX140_DRE_CFG0, 0, 12, 0,
+ dre_gain_tlv),
+
+ SOC_SINGLE_TLV("AGC Threshold", ADCX140_AGC_CFG0, 4, 15, 0,
+ agc_thresh_tlv),
+ SOC_SINGLE_TLV("AGC Max Gain", ADCX140_AGC_CFG0, 0, 13, 0,
+ agc_gain_tlv),
+
+ SOC_SINGLE_TLV("Digital CH1 Out Volume", ADCX140_CH1_CFG2,
+ 0, 0xff, 0, dig_vol_tlv),
+ SOC_SINGLE_TLV("Digital CH2 Out Volume", ADCX140_CH2_CFG2,
+ 0, 0xff, 0, dig_vol_tlv),
+ SOC_SINGLE_TLV("Digital CH3 Out Volume", ADCX140_CH3_CFG2,
+ 0, 0xff, 0, dig_vol_tlv),
+ SOC_SINGLE_TLV("Digital CH4 Out Volume", ADCX140_CH4_CFG2,
+ 0, 0xff, 0, dig_vol_tlv),
+ SOC_SINGLE_TLV("Digital CH5 Out Volume", ADCX140_CH5_CFG2,
+ 0, 0xff, 0, dig_vol_tlv),
+ SOC_SINGLE_TLV("Digital CH6 Out Volume", ADCX140_CH6_CFG2,
+ 0, 0xff, 0, dig_vol_tlv),
+ SOC_SINGLE_TLV("Digital CH7 Out Volume", ADCX140_CH7_CFG2,
+ 0, 0xff, 0, dig_vol_tlv),
+ SOC_SINGLE_TLV("Digital CH8 Out Volume", ADCX140_CH8_CFG2,
+ 0, 0xff, 0, dig_vol_tlv),
+};
+
+static int adcx140_reset(struct adcx140_priv *adcx140)
+{
+ int ret = 0;
+
+ if (adcx140->gpio_reset) {
+ gpiod_direction_output(adcx140->gpio_reset, 0);
+ /* 8.4.1: wait for hw shutdown (25ms) + >= 1ms */
+ usleep_range(30000, 100000);
+ gpiod_direction_output(adcx140->gpio_reset, 1);
+ } else {
+ ret = regmap_write(adcx140->regmap, ADCX140_SW_RESET,
+ ADCX140_RESET);
+ }
+
+ /* 8.4.2: wait >= 10 ms after entering sleep mode. */
+ usleep_range(10000, 100000);
+
+ return 0;
+}
+
+static int adcx140_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ u8 data = 0;
+
+ switch (params_width(params)) {
+ case 16:
+ data = ADCX140_16_BIT_WORD;
+ break;
+ case 20:
+ data = ADCX140_20_BIT_WORD;
+ break;
+ case 24:
+ data = ADCX140_24_BIT_WORD;
+ break;
+ case 32:
+ data = ADCX140_32_BIT_WORD;
+ break;
+ default:
+ dev_err(component->dev, "%s: Unsupported width %d\n",
+ __func__, params_width(params));
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, ADCX140_ASI_CFG0,
+ ADCX140_WORD_LEN_MSK, data);
+
+ return 0;
+}
+
+static int adcx140_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_component *component = codec_dai->component;
+ struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
+ u8 iface_reg1 = 0;
+ u8 iface_reg2 = 0;
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ iface_reg2 |= ADCX140_BCLK_FSYNC_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBM_CFS:
+ default:
+ dev_err(component->dev, "Invalid DAI master/slave interface\n");
+ return -EINVAL;
+ }
+
+ /* signal polarity */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_IF:
+ iface_reg1 |= ADCX140_FSYNCINV_BIT;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ iface_reg1 |= ADCX140_BCLKINV_BIT | ADCX140_FSYNCINV_BIT;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ iface_reg1 |= ADCX140_BCLKINV_BIT;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ dev_err(component->dev, "Invalid DAI clock signal polarity\n");
+ return -EINVAL;
+ }
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ iface_reg1 |= ADCX140_I2S_MODE_BIT;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ iface_reg1 |= ADCX140_LEFT_JUST_BIT;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ break;
+ default:
+ dev_err(component->dev, "Invalid DAI interface format\n");
+ return -EINVAL;
+ }
+
+ adcx140->dai_fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+
+ snd_soc_component_update_bits(component, ADCX140_ASI_CFG0,
+ ADCX140_FSYNCINV_BIT |
+ ADCX140_BCLKINV_BIT |
+ ADCX140_ASI_FORMAT_MSK,
+ iface_reg1);
+ snd_soc_component_update_bits(component, ADCX140_MST_CFG0,
+ ADCX140_BCLK_FSYNC_MASTER, iface_reg2);
+
+ return 0;
+}
+
+static int adcx140_set_dai_tdm_slot(struct snd_soc_dai *codec_dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = codec_dai->component;
+ struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
+ unsigned int lsb;
+
+ if (tx_mask != rx_mask) {
+ dev_err(component->dev, "tx and rx masks must be symmetric\n");
+ return -EINVAL;
+ }
+
+ /* TDM based on DSP mode requires slots to be adjacent */
+ lsb = __ffs(tx_mask);
+ if ((lsb + 1) != __fls(tx_mask)) {
+ dev_err(component->dev, "Invalid mask, slots must be adjacent\n");
+ return -EINVAL;
+ }
+
+ switch (slot_width) {
+ case 16:
+ case 20:
+ case 24:
+ case 32:
+ break;
+ default:
+ dev_err(component->dev, "Unsupported slot width %d\n", slot_width);
+ return -EINVAL;
+ }
+
+ adcx140->tdm_delay = lsb;
+ adcx140->slot_width = slot_width;
+
+ return 0;
+}
+
+static int adcx140_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
+ int offset = 0;
+ int width = adcx140->slot_width;
+
+ if (!width)
+ width = substream->runtime->sample_bits;
+
+ /* TDM slot selection only valid in DSP_A/_B mode */
+ if (adcx140->dai_fmt == SND_SOC_DAIFMT_DSP_A)
+ offset += (adcx140->tdm_delay * width + 1);
+ else if (adcx140->dai_fmt == SND_SOC_DAIFMT_DSP_B)
+ offset += adcx140->tdm_delay * width;
+
+ /* Configure data offset */
+ snd_soc_component_update_bits(component, ADCX140_ASI_CFG1,
+ ADCX140_TX_OFFSET_MASK, offset);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops adcx140_dai_ops = {
+ .hw_params = adcx140_hw_params,
+ .set_fmt = adcx140_set_dai_fmt,
+ .prepare = adcx140_prepare,
+ .set_tdm_slot = adcx140_set_dai_tdm_slot,
+};
+
+static int adcx140_codec_probe(struct snd_soc_component *component)
+{
+ struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
+ int sleep_cfg_val = ADCX140_WAKE_DEV;
+ u8 bias_source;
+ u8 vref_source;
+ int ret;
+
+ ret = device_property_read_u8(adcx140->dev, "ti,mic-bias-source",
+ &bias_source);
+ if (ret)
+ bias_source = ADCX140_MIC_BIAS_VAL_VREF;
+
+ if (bias_source < ADCX140_MIC_BIAS_VAL_VREF ||
+ bias_source > ADCX140_MIC_BIAS_VAL_AVDD) {
+ dev_err(adcx140->dev, "Mic Bias source value is invalid\n");
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u8(adcx140->dev, "ti,vref-source",
+ &vref_source);
+ if (ret)
+ vref_source = ADCX140_MIC_BIAS_VREF_275V;
+
+ if (vref_source < ADCX140_MIC_BIAS_VREF_275V ||
+ vref_source > ADCX140_MIC_BIAS_VREF_1375V) {
+ dev_err(adcx140->dev, "Mic Bias source value is invalid\n");
+ return -EINVAL;
+ }
+
+ bias_source |= vref_source;
+
+ ret = adcx140_reset(adcx140);
+ if (ret)
+ goto out;
+
+ if(adcx140->supply_areg == NULL)
+ sleep_cfg_val |= ADCX140_AREG_INTERNAL;
+
+ ret = regmap_write(adcx140->regmap, ADCX140_SLEEP_CFG, sleep_cfg_val);
+ if (ret) {
+ dev_err(adcx140->dev, "setting sleep config failed %d\n", ret);
+ goto out;
+ }
+
+ /* 8.4.3: Wait >= 1ms after entering active mode. */
+ usleep_range(1000, 100000);
+
+ ret = regmap_update_bits(adcx140->regmap, ADCX140_BIAS_CFG,
+ ADCX140_MIC_BIAS_VAL_MSK |
+ ADCX140_MIC_BIAS_VREF_MSK, bias_source);
+ if (ret)
+ dev_err(adcx140->dev, "setting MIC bias failed %d\n", ret);
+out:
+ return ret;
+}
+
+static int adcx140_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
+ int pwr_cfg = 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ case SND_SOC_BIAS_STANDBY:
+ pwr_cfg = ADCX140_PWR_CFG_BIAS_PDZ | ADCX140_PWR_CFG_PLL_PDZ |
+ ADCX140_PWR_CFG_ADC_PDZ;
+ break;
+ case SND_SOC_BIAS_OFF:
+ pwr_cfg = 0x0;
+ break;
+ }
+
+ return regmap_write(adcx140->regmap, ADCX140_PWR_CFG, pwr_cfg);
+}
+
+static const struct snd_soc_component_driver soc_codec_driver_adcx140 = {
+ .probe = adcx140_codec_probe,
+ .set_bias_level = adcx140_set_bias_level,
+ .controls = adcx140_snd_controls,
+ .num_controls = ARRAY_SIZE(adcx140_snd_controls),
+ .dapm_widgets = adcx140_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adcx140_dapm_widgets),
+ .dapm_routes = adcx140_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(adcx140_audio_map),
+ .suspend_bias_off = 1,
+ .idle_bias_on = 0,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static struct snd_soc_dai_driver adcx140_dai_driver[] = {
+ {
+ .name = "tlv320adcx140-codec",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = ADCX140_MAX_CHANNELS,
+ .rates = ADCX140_RATES,
+ .formats = ADCX140_FORMATS,
+ },
+ .ops = &adcx140_dai_ops,
+ .symmetric_rates = 1,
+ }
+};
+
+static const struct of_device_id tlv320adcx140_of_match[] = {
+ { .compatible = "ti,tlv320adc3140" },
+ { .compatible = "ti,tlv320adc5140" },
+ { .compatible = "ti,tlv320adc6140" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tlv320adcx140_of_match);
+
+static int adcx140_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct adcx140_priv *adcx140;
+ int ret;
+
+ adcx140 = devm_kzalloc(&i2c->dev, sizeof(*adcx140), GFP_KERNEL);
+ if (!adcx140)
+ return -ENOMEM;
+
+ adcx140->gpio_reset = devm_gpiod_get_optional(adcx140->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(adcx140->gpio_reset))
+ dev_info(&i2c->dev, "Reset GPIO not defined\n");
+
+ adcx140->supply_areg = devm_regulator_get_optional(adcx140->dev,
+ "areg");
+ if (IS_ERR(adcx140->supply_areg)) {
+ if (PTR_ERR(adcx140->supply_areg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else
+ adcx140->supply_areg = NULL;
+ } else {
+ ret = regulator_enable(adcx140->supply_areg);
+ if (ret) {
+ dev_err(adcx140->dev, "Failed to enable areg\n");
+ return ret;
+ }
+ }
+
+ adcx140->regmap = devm_regmap_init_i2c(i2c, &adcx140_i2c_regmap);
+ if (IS_ERR(adcx140->regmap)) {
+ ret = PTR_ERR(adcx140->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+ adcx140->dev = &i2c->dev;
+ i2c_set_clientdata(i2c, adcx140);
+
+ return devm_snd_soc_register_component(&i2c->dev,
+ &soc_codec_driver_adcx140,
+ adcx140_dai_driver, 1);
+}
+
+static const struct i2c_device_id adcx140_i2c_id[] = {
+ { "tlv320adc3140", 0 },
+ { "tlv320adc5140", 1 },
+ { "tlv320adc6140", 2 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adcx140_i2c_id);
+
+static struct i2c_driver adcx140_i2c_driver = {
+ .driver = {
+ .name = "tlv320adcx140-codec",
+ .of_match_table = of_match_ptr(tlv320adcx140_of_match),
+ },
+ .probe = adcx140_i2c_probe,
+ .id_table = adcx140_i2c_id,
+};
+module_i2c_driver(adcx140_i2c_driver);
+
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_DESCRIPTION("ASoC TLV320ADCX140 CODEC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tlv320adcx140.h b/sound/soc/codecs/tlv320adcx140.h
new file mode 100644
index 000000000000..6d055e55909e
--- /dev/null
+++ b/sound/soc/codecs/tlv320adcx140.h
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+// TLV320ADCX104 Sound driver
+// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+
+#ifndef _TLV320ADCX140_H
+#define _TLV320ADCX140_H
+
+#define ADCX140_RATES (SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000)
+
+#define ADCX140_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+#define ADCX140_PAGE_SELECT 0x00
+#define ADCX140_SW_RESET 0x01
+#define ADCX140_SLEEP_CFG 0x02
+#define ADCX140_SHDN_CFG 0x05
+#define ADCX140_ASI_CFG0 0x07
+#define ADCX140_ASI_CFG1 0x08
+#define ADCX140_ASI_CFG2 0x09
+#define ADCX140_ASI_CH1 0x0b
+#define ADCX140_ASI_CH2 0x0c
+#define ADCX140_ASI_CH3 0x0d
+#define ADCX140_ASI_CH4 0x0e
+#define ADCX140_ASI_CH5 0x0f
+#define ADCX140_ASI_CH6 0x10
+#define ADCX140_ASI_CH7 0x11
+#define ADCX140_ASI_CH8 0x12
+#define ADCX140_MST_CFG0 0x13
+#define ADCX140_MST_CFG1 0x14
+#define ADCX140_ASI_STS 0x15
+#define ADCX140_CLK_SRC 0x16
+#define ADCX140_PDMCLK_CFG 0x1f
+#define ADCX140_PDM_CFG 0x20
+#define ADCX140_GPIO_CFG0 0x21
+#define ADCX140_GPO_CFG1 0x22
+#define ADCX140_GPO_CFG2 0x23
+#define ADCX140_GPO_CFG3 0x24
+#define ADCX140_GPO_CFG4 0x25
+#define ADCX140_GPO_VAL 0x29
+#define ADCX140_GPIO_MON 0x2a
+#define ADCX140_GPI_CFG0 0x2b
+#define ADCX140_GPI_CFG1 0x2c
+#define ADCX140_GPI_MON 0x2f
+#define ADCX140_INT_CFG 0x32
+#define ADCX140_INT_MASK0 0x33
+#define ADCX140_INT_LTCH0 0x36
+#define ADCX140_BIAS_CFG 0x3b
+#define ADCX140_CH1_CFG0 0x3c
+#define ADCX140_CH1_CFG1 0x3d
+#define ADCX140_CH1_CFG2 0x3e
+#define ADCX140_CH1_CFG3 0x3f
+#define ADCX140_CH1_CFG4 0x40
+#define ADCX140_CH2_CFG0 0x41
+#define ADCX140_CH2_CFG1 0x42
+#define ADCX140_CH2_CFG2 0x43
+#define ADCX140_CH2_CFG3 0x44
+#define ADCX140_CH2_CFG4 0x45
+#define ADCX140_CH3_CFG0 0x46
+#define ADCX140_CH3_CFG1 0x47
+#define ADCX140_CH3_CFG2 0x48
+#define ADCX140_CH3_CFG3 0x49
+#define ADCX140_CH3_CFG4 0x4a
+#define ADCX140_CH4_CFG0 0x4b
+#define ADCX140_CH4_CFG1 0x4c
+#define ADCX140_CH4_CFG2 0x4d
+#define ADCX140_CH4_CFG3 0x4e
+#define ADCX140_CH4_CFG4 0x4f
+#define ADCX140_CH5_CFG2 0x52
+#define ADCX140_CH5_CFG3 0x53
+#define ADCX140_CH5_CFG4 0x54
+#define ADCX140_CH6_CFG2 0x57
+#define ADCX140_CH6_CFG3 0x58
+#define ADCX140_CH6_CFG4 0x59
+#define ADCX140_CH7_CFG2 0x5c
+#define ADCX140_CH7_CFG3 0x5d
+#define ADCX140_CH7_CFG4 0x5e
+#define ADCX140_CH8_CFG2 0x61
+#define ADCX140_CH8_CFG3 0x62
+#define ADCX140_CH8_CFG4 0x63
+#define ADCX140_DSP_CFG0 0x6b
+#define ADCX140_DSP_CFG1 0x6c
+#define ADCX140_DRE_CFG0 0x6d
+#define ADCX140_AGC_CFG0 0x70
+#define ADCX140_IN_CH_EN 0x73
+#define ADCX140_ASI_OUT_CH_EN 0x74
+#define ADCX140_PWR_CFG 0x75
+#define ADCX140_DEV_STS0 0x76
+#define ADCX140_DEV_STS1 0x77
+
+#define ADCX140_RESET BIT(0)
+
+#define ADCX140_WAKE_DEV BIT(0)
+#define ADCX140_AREG_INTERNAL BIT(7)
+
+#define ADCX140_BCLKINV_BIT BIT(2)
+#define ADCX140_FSYNCINV_BIT BIT(3)
+#define ADCX140_INV_MSK (ADCX140_BCLKINV_BIT | ADCX140_FSYNCINV_BIT)
+#define ADCX140_BCLK_FSYNC_MASTER BIT(7)
+#define ADCX140_I2S_MODE_BIT BIT(6)
+#define ADCX140_LEFT_JUST_BIT BIT(7)
+#define ADCX140_ASI_FORMAT_MSK (ADCX140_I2S_MODE_BIT | ADCX140_LEFT_JUST_BIT)
+
+#define ADCX140_16_BIT_WORD 0x0
+#define ADCX140_20_BIT_WORD BIT(4)
+#define ADCX140_24_BIT_WORD BIT(5)
+#define ADCX140_32_BIT_WORD (BIT(4) | BIT(5))
+#define ADCX140_WORD_LEN_MSK 0x30
+
+#define ADCX140_MAX_CHANNELS 8
+
+#define ADCX140_MIC_BIAS_VAL_VREF 0
+#define ADCX140_MIC_BIAS_VAL_VREF_1096 1
+#define ADCX140_MIC_BIAS_VAL_AVDD 6
+#define ADCX140_MIC_BIAS_VAL_MSK GENMASK(6, 4)
+
+#define ADCX140_MIC_BIAS_VREF_275V 0
+#define ADCX140_MIC_BIAS_VREF_25V 1
+#define ADCX140_MIC_BIAS_VREF_1375V 2
+#define ADCX140_MIC_BIAS_VREF_MSK GENMASK(1, 0)
+
+#define ADCX140_PWR_CFG_BIAS_PDZ BIT(7)
+#define ADCX140_PWR_CFG_ADC_PDZ BIT(6)
+#define ADCX140_PWR_CFG_PLL_PDZ BIT(5)
+
+#define ADCX140_TX_OFFSET_MASK GENMASK(4, 0)
+
+#endif /* _TLV320ADCX140_ */
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index f11ffa28683b..700cc1212770 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -4926,11 +4926,11 @@ static const struct regmap_range_cfg wcd9335_ranges[] = {
.name = "WCD9335",
.range_min = 0x0,
.range_max = WCD9335_MAX_REGISTER,
- .selector_reg = WCD9335_REG(0x0, 0),
+ .selector_reg = WCD9335_SEL_REGISTER,
.selector_mask = 0xff,
.selector_shift = 0,
- .window_start = 0x0,
- .window_len = 0x1000,
+ .window_start = 0x800,
+ .window_len = 0x100,
},
};
@@ -4968,12 +4968,12 @@ static const struct regmap_range_cfg wcd9335_ifc_ranges[] = {
{
.name = "WCD9335-IFC-DEV",
.range_min = 0x0,
- .range_max = WCD9335_REG(0, 0x7ff),
- .selector_reg = WCD9335_REG(0, 0x0),
- .selector_mask = 0xff,
+ .range_max = WCD9335_MAX_REGISTER,
+ .selector_reg = WCD9335_SEL_REGISTER,
+ .selector_mask = 0xfff,
.selector_shift = 0,
- .window_start = 0x0,
- .window_len = 0x1000,
+ .window_start = 0x800,
+ .window_len = 0x400,
},
};
@@ -4981,7 +4981,7 @@ static struct regmap_config wcd9335_ifc_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.can_multi_write = true,
- .max_register = WCD9335_REG(0, 0x7FF),
+ .max_register = WCD9335_MAX_REGISTER,
.ranges = wcd9335_ifc_ranges,
.num_ranges = ARRAY_SIZE(wcd9335_ifc_ranges),
};
diff --git a/sound/soc/codecs/wcd9335.h b/sound/soc/codecs/wcd9335.h
index 4d9be2496c30..72060824c743 100644
--- a/sound/soc/codecs/wcd9335.h
+++ b/sound/soc/codecs/wcd9335.h
@@ -8,9 +8,9 @@
* in slimbus mode the reg base starts from 0x800
* in i2s/i2c mode the reg base is 0x0
*/
-#define WCD9335_REG(pg, r) ((pg << 12) | (r) | 0x800)
+#define WCD9335_REG(pg, r) ((pg << 8) | (r))
#define WCD9335_REG_OFFSET(r) (r & 0xFF)
-#define WCD9335_PAGE_OFFSET(r) ((r >> 12) & 0xFF)
+#define WCD9335_PAGE_OFFSET(r) ((r >> 8) & 0xFF)
/* Page-0 Registers */
#define WCD9335_PAGE0_PAGE_REGISTER WCD9335_REG(0x00, 0x000)
@@ -600,7 +600,8 @@
#define WCD9335_CDC_CLK_RST_CTRL_FS_CNT_ENABLE BIT(0)
#define WCD9335_CDC_CLK_RST_CTRL_FS_CNT_DISABLE 0
#define WCD9335_CDC_TOP_TOP_CFG1 WCD9335_REG(0x0d, 0x082)
-#define WCD9335_MAX_REGISTER WCD9335_REG(0x80, 0x0FF)
+#define WCD9335_MAX_REGISTER 0xffff
+#define WCD9335_SEL_REGISTER 0x800
/* SLIMBUS Slave Registers */
#define WCD9335_SLIM_PGD_PORT_INT_EN0 WCD9335_REG(0, 0x30)
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index 158e878abd6c..5269857e2746 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -3,7 +3,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/wcd934x/registers.h>
@@ -11,10 +10,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_clk.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -1202,11 +1198,6 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
WCD934X_ANA_RCO_BG_EN_MASK, 0);
usleep_range(100, 110);
- } else if (sido_src == SIDO_SOURCE_RCO_BG) {
- regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
- WCD934X_ANA_RCO_BG_EN_MASK,
- WCD934X_ANA_RCO_BG_ENABLE);
- usleep_range(100, 110);
regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
WCD934X_ANA_BUCK_PRE_EN1_MASK,
WCD934X_ANA_BUCK_PRE_EN1_ENABLE);
@@ -1219,6 +1210,11 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
WCD934X_ANA_BUCK_HI_ACCU_EN_MASK,
WCD934X_ANA_BUCK_HI_ACCU_ENABLE);
usleep_range(100, 110);
+ } else if (sido_src == SIDO_SOURCE_RCO_BG) {
+ regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
+ WCD934X_ANA_RCO_BG_EN_MASK,
+ WCD934X_ANA_RCO_BG_ENABLE);
+ usleep_range(100, 110);
}
wcd->sido_input_src = sido_src;
@@ -1883,20 +1879,16 @@ static int wcd934x_set_channel_map(struct snd_soc_dai *dai,
return -EINVAL;
}
- if (wcd->rx_chs) {
- wcd->num_rx_port = rx_num;
- for (i = 0; i < rx_num; i++) {
- wcd->rx_chs[i].ch_num = rx_slot[i];
- INIT_LIST_HEAD(&wcd->rx_chs[i].list);
- }
+ wcd->num_rx_port = rx_num;
+ for (i = 0; i < rx_num; i++) {
+ wcd->rx_chs[i].ch_num = rx_slot[i];
+ INIT_LIST_HEAD(&wcd->rx_chs[i].list);
}
- if (wcd->tx_chs) {
- wcd->num_tx_port = tx_num;
- for (i = 0; i < tx_num; i++) {
- wcd->tx_chs[i].ch_num = tx_slot[i];
- INIT_LIST_HEAD(&wcd->tx_chs[i].list);
- }
+ wcd->num_tx_port = tx_num;
+ for (i = 0; i < tx_num; i++) {
+ wcd->tx_chs[i].ch_num = tx_slot[i];
+ INIT_LIST_HEAD(&wcd->tx_chs[i].list);
}
return 0;
@@ -3392,18 +3384,15 @@ static void wcd934x_codec_hphdelay_lutbypass(struct snd_soc_component *comp,
{
u8 hph_dly_mask;
u16 hph_lut_bypass_reg = 0;
- u16 hph_comp_ctrl7 = 0;
switch (interp_idx) {
case INTERP_HPHL:
hph_dly_mask = 1;
hph_lut_bypass_reg = WCD934X_CDC_TOP_HPHL_COMP_LUT;
- hph_comp_ctrl7 = WCD934X_CDC_COMPANDER1_CTL7;
break;
case INTERP_HPHR:
hph_dly_mask = 2;
hph_lut_bypass_reg = WCD934X_CDC_TOP_HPHR_COMP_LUT;
- hph_comp_ctrl7 = WCD934X_CDC_COMPANDER2_CTL7;
break;
default:
return;
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 727d6703c905..fbcee21736e8 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -43,7 +43,7 @@ struct dfw_binrec {
u8 command;
u32 length:24;
u32 address;
- uint8_t data[0];
+ uint8_t data[];
} __packed;
struct dfw_inforec {
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 9dc215b5c504..499e87d1dfcc 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2245,14 +2245,14 @@ static int wm5110_open(struct snd_compr_stream *stream)
struct arizona *arizona = priv->core.arizona;
int n_adsp;
- if (strcmp(rtd->codec_dai->name, "wm5110-dsp-voicectrl") == 0) {
+ if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "wm5110-dsp-voicectrl") == 0) {
n_adsp = 2;
- } else if (strcmp(rtd->codec_dai->name, "wm5110-dsp-trace") == 0) {
+ } else if (strcmp(asoc_rtd_to_codec(rtd, 0)->name, "wm5110-dsp-trace") == 0) {
n_adsp = 0;
} else {
dev_err(arizona->dev,
"No suitable compressed stream for DAI '%s'\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
return -EINVAL;
}
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index dc4fe4f5239d..06ba36595ddd 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -196,14 +196,6 @@ SOC_DAPM_SINGLE("MicN Switch", WM8974_INPUT, 1, 1, 0),
SOC_DAPM_SINGLE("MicP Switch", WM8974_INPUT, 0, 1, 0),
};
-/* AUX Input boost vol */
-static const struct snd_kcontrol_new wm8974_aux_boost_controls =
-SOC_DAPM_SINGLE("Aux Volume", WM8974_ADCBOOST, 0, 7, 0);
-
-/* Mic Input boost vol */
-static const struct snd_kcontrol_new wm8974_mic_boost_controls =
-SOC_DAPM_SINGLE("Mic Volume", WM8974_ADCBOOST, 4, 7, 0);
-
static const struct snd_soc_dapm_widget wm8974_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Speaker Mixer", WM8974_POWER3, 2, 0,
&wm8974_speaker_mixer_controls[0],
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index d3d32b501aca..1ef69409ccd1 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1436,12 +1436,12 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
subname = NULL; /* don't append subname */
break;
case 2:
- ret = snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
+ ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
"%s%c %.12s %x", dsp->name, *region_name,
wm_adsp_fw_text[dsp->fw], alg_region->alg);
break;
default:
- ret = snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
+ ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
"%s %.12s %x", dsp->name,
wm_adsp_fw_text[dsp->fw], alg_region->alg);
break;
@@ -3467,22 +3467,22 @@ int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream)
if (wm_adsp_fw[dsp->fw].num_caps == 0) {
adsp_err(dsp, "%s: Firmware does not support compressed API\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
ret = -ENXIO;
goto out;
}
if (wm_adsp_fw[dsp->fw].compr_direction != stream->direction) {
adsp_err(dsp, "%s: Firmware does not support stream direction\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
ret = -EINVAL;
goto out;
}
list_for_each_entry(tmp, &dsp->compr_list, list) {
- if (!strcmp(tmp->name, rtd->codec_dai->name)) {
+ if (!strcmp(tmp->name, asoc_rtd_to_codec(rtd, 0)->name)) {
adsp_err(dsp, "%s: Only a single stream supported per dai\n",
- rtd->codec_dai->name);
+ asoc_rtd_to_codec(rtd, 0)->name);
ret = -EBUSY;
goto out;
}
@@ -3496,7 +3496,7 @@ int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream)
compr->dsp = dsp;
compr->stream = stream;
- compr->name = rtd->codec_dai->name;
+ compr->name = asoc_rtd_to_codec(rtd, 0)->name;
list_add_tail(&compr->list, &dsp->compr_list);
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index b59f1d0e7f84..f2d6f2f81f14 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -676,7 +676,6 @@ struct wsa881x_priv {
int active_ports;
bool port_prepared[WSA881X_MAX_SWR_PORTS];
bool port_enable[WSA881X_MAX_SWR_PORTS];
- bool stream_prepared;
};
static void wsa881x_init(struct wsa881x_priv *wsa881x)
@@ -954,41 +953,6 @@ static const struct snd_soc_dapm_widget wsa881x_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("SPKR"),
};
-static int wsa881x_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct wsa881x_priv *wsa881x = dev_get_drvdata(dai->dev);
- int ret;
-
- if (wsa881x->stream_prepared) {
- sdw_disable_stream(wsa881x->sruntime);
- sdw_deprepare_stream(wsa881x->sruntime);
- wsa881x->stream_prepared = false;
- }
-
-
- ret = sdw_prepare_stream(wsa881x->sruntime);
- if (ret)
- return ret;
-
- /**
- * NOTE: there is a strict hw requirement about the ordering of port
- * enables and actual PA enable. PA enable should only happen after
- * soundwire ports are enabled if not DC on the line is accumulated
- * resulting in Click/Pop Noise
- * PA enable/mute are handled as part of DAPM and digital mute.
- */
-
- ret = sdw_enable_stream(wsa881x->sruntime);
- if (ret) {
- sdw_deprepare_stream(wsa881x->sruntime);
- return ret;
- }
- wsa881x->stream_prepared = true;
-
- return ret;
-}
-
static int wsa881x_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -1016,12 +980,7 @@ static int wsa881x_hw_free(struct snd_pcm_substream *substream,
{
struct wsa881x_priv *wsa881x = dev_get_drvdata(dai->dev);
- if (wsa881x->stream_prepared) {
- sdw_disable_stream(wsa881x->sruntime);
- sdw_deprepare_stream(wsa881x->sruntime);
- sdw_stream_remove_slave(wsa881x->slave, wsa881x->sruntime);
- wsa881x->stream_prepared = false;
- }
+ sdw_stream_remove_slave(wsa881x->slave, wsa881x->sruntime);
return 0;
}
@@ -1052,7 +1011,6 @@ static int wsa881x_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
static struct snd_soc_dai_ops wsa881x_dai_ops = {
.hw_params = wsa881x_hw_params,
- .prepare = wsa881x_prepare,
.hw_free = wsa881x_hw_free,
.mute_stream = wsa881x_digital_mute,
.set_sdw_stream = wsa881x_set_sdw_stream,
@@ -1150,7 +1108,7 @@ static int wsa881x_probe(struct sdw_slave *pdev,
wsa881x->sconfig.type = SDW_STREAM_PDM;
pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0);
pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
- gpiod_set_value(wsa881x->sd_n, 1);
+ gpiod_direction_output(wsa881x->sd_n, 1);
wsa881x->regmap = devm_regmap_init_sdw(pdev, &wsa881x_regmap_config);
if (IS_ERR(wsa881x->regmap)) {
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 7eeca2150b2d..515f88456dbd 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -422,15 +422,15 @@ static int dw_i2s_resume(struct snd_soc_component *component)
{
struct dw_i2s_dev *dev = snd_soc_component_get_drvdata(component);
struct snd_soc_dai *dai;
+ int stream;
if (dev->capability & DW_I2S_MASTER)
clk_enable(dev->clk);
for_each_component_dais(component, dai) {
- if (dai->playback_active)
- dw_i2s_config(dev, SNDRV_PCM_STREAM_PLAYBACK);
- if (dai->capture_active)
- dw_i2s_config(dev, SNDRV_PCM_STREAM_CAPTURE);
+ for_each_pcm_streams(stream)
+ if (dai->stream_active[stream])
+ dw_i2s_config(dev, stream);
}
return 0;
diff --git a/sound/soc/dwc/dwc-pcm.c b/sound/soc/dwc/dwc-pcm.c
index 4b25aca3804f..9868e7373d36 100644
--- a/sound/soc/dwc/dwc-pcm.c
+++ b/sound/soc/dwc/dwc-pcm.c
@@ -140,7 +140,7 @@ static int dw_pcm_open(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
snd_soc_set_runtime_hwparams(substream, &dw_pcm_hardware);
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 6f3b768489f6..4ff2d21bb32f 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -31,8 +31,8 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, 0,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 9ce55feaac22..bb33601fab84 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -159,7 +159,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
return 0;
/* Specific configurations of DAIs starts from here */
- ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, cpu_priv->sysclk_id[tx],
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0), cpu_priv->sysclk_id[tx],
cpu_priv->sysclk_freq[tx],
cpu_priv->sysclk_dir[tx]);
if (ret && ret != -ENOTSUPP) {
@@ -168,7 +168,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
}
if (cpu_priv->slot_width) {
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2,
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2,
cpu_priv->slot_width);
if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to set TDM slot for cpu dai\n");
@@ -257,7 +257,7 @@ static int fsl_asoc_card_set_bias_level(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- codec_dai = rtd->codec_dai;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
if (dapm->dev != codec_dai->dev)
return 0;
@@ -446,14 +446,14 @@ static int fsl_asoc_card_late_probe(struct snd_soc_card *card)
struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(card);
struct snd_soc_pcm_runtime *rtd = list_first_entry(
&card->rtd_list, struct snd_soc_pcm_runtime, list);
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct codec_priv *codec_priv = &priv->codec_priv;
struct device *dev = card->dev;
int ret;
if (fsl_asoc_card_is_ac97(priv)) {
#if IS_ENABLED(CONFIG_SND_AC97_CODEC)
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_ac97 *ac97 = snd_soc_component_get_drvdata(component);
/*
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index ece130f59d15..e7178817d7a7 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -152,7 +152,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
for_each_dpcm_be(rtd, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *substream_be;
- struct snd_soc_dai *dai = be->cpu_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_cpu(be, 0);
if (dpcm->fe != rtd)
continue;
@@ -169,7 +169,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
}
/* Override dma_data of the Front-End and config its dmaengine */
- dma_params_fe = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dma_params_fe = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
dma_params_fe->addr = asrc_priv->paddr + REG_ASRDx(!dir, index);
dma_params_fe->maxburst = dma_params_be->maxburst;
@@ -328,7 +328,7 @@ static int fsl_asrc_dma_startup(struct snd_soc_component *component,
goto dma_chan_err;
}
- dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
/* Refine the snd_imx_hardware according to caps of DMA. */
ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream,
@@ -400,7 +400,7 @@ static int fsl_asrc_dma_pcm_new(struct snd_soc_component *component,
return ret;
}
- for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_LAST; i++) {
+ for_each_pcm_streams(i) {
substream = pcm->streams[i].substream;
if (!substream)
continue;
@@ -428,7 +428,7 @@ static void fsl_asrc_dma_pcm_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream;
int i;
- for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_LAST; i++) {
+ for_each_pcm_streams(i) {
substream = pcm->streams[i].substream;
if (!substream)
continue;
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 7858a5499ac5..c711d2d93280 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -370,7 +370,7 @@ static int spdif_set_sample_rate(struct snd_pcm_substream *substream,
int sample_rate)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
struct regmap *regmap = spdif_priv->regmap;
struct platform_device *pdev = spdif_priv->pdev;
@@ -458,7 +458,7 @@ static int fsl_spdif_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct platform_device *pdev = spdif_priv->pdev;
struct regmap *regmap = spdif_priv->regmap;
u32 scr, mask;
@@ -534,7 +534,7 @@ static void fsl_spdif_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct regmap *regmap = spdif_priv->regmap;
u32 scr, mask, i;
@@ -569,7 +569,7 @@ static int fsl_spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
struct platform_device *pdev = spdif_priv->pdev;
u32 sample_rate = params_rate(params);
@@ -597,7 +597,7 @@ static int fsl_spdif_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct regmap *regmap = spdif_priv->regmap;
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
u32 intr = SIE_INTR_FOR(tx);
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 5c97269be346..bad89b0d129e 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -631,7 +631,7 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
int ret;
ret = clk_prepare_enable(ssi->clk);
@@ -655,7 +655,7 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
clk_disable_unprepare(ssi->clk);
}
@@ -854,7 +854,7 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
if (fsl_ssi_is_i2s_master(ssi) &&
ssi->baudclk_streams & BIT(substream->stream)) {
@@ -1059,7 +1059,7 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
switch (cmd) {
diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
index 5ef6881395e0..e09b45de0efd 100644
--- a/sound/soc/fsl/imx-audmix.c
+++ b/sound/soc/fsl/imx-audmix.c
@@ -85,13 +85,13 @@ static int imx_audmix_fe_hw_params(struct snd_pcm_substream *substream,
dir = tx ? SND_SOC_CLOCK_OUT : SND_SOC_CLOCK_IN;
/* set DAI configuration */
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai, fmt);
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0), fmt);
if (ret) {
dev_err(dev, "failed to set cpu dai fmt: %d\n", ret);
return ret;
}
- ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, FSL_SAI_CLK_MAST1, 0, dir);
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0), FSL_SAI_CLK_MAST1, 0, dir);
if (ret) {
dev_err(dev, "failed to set cpu sysclk: %d\n", ret);
return ret;
@@ -101,7 +101,7 @@ static int imx_audmix_fe_hw_params(struct snd_pcm_substream *substream,
* Per datasheet, AUDMIX expects 8 slots and 32 bits
* for every slot in TDM mode.
*/
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, BIT(channels) - 1,
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), BIT(channels) - 1,
BIT(channels) - 1, 8, 32);
if (ret)
dev_err(dev, "failed to set cpu dai tdm slot: %d\n", ret);
@@ -125,7 +125,7 @@ static int imx_audmix_be_hw_params(struct snd_pcm_substream *substream,
fmt |= SND_SOC_DAIFMT_CBM_CFM;
/* set AUDMIX DAI configuration */
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai, fmt);
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0), fmt);
if (ret)
dev_err(dev, "failed to set AUDMIX DAI fmt: %d\n", ret);
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index 2b679680c93f..fab2d6c56653 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -27,8 +27,8 @@ static int imx_mc13783_hifi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 4, 16);
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 15e8b9343c35..f45cb4bbb6c4 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -30,7 +30,7 @@ static int imx_sgtl5000_dai_init(struct snd_soc_pcm_runtime *rtd)
struct device *dev = rtd->card->dev;
int ret;
- ret = snd_soc_dai_set_sysclk(rtd->codec_dai, SGTL5000_SYSCLK,
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0), SGTL5000_SYSCLK,
data->clk_frequency, SND_SOC_CLOCK_IN);
if (ret) {
dev_err(dev, "could not set codec driver clock params\n");
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index ed7211d744b3..3b8c796d7829 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -115,7 +115,7 @@ static int psc_dma_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct snd_pcm_runtime *runtime = substream->runtime;
struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma);
struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs;
@@ -217,7 +217,7 @@ static int psc_dma_open(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct psc_dma_stream *s;
int rc;
@@ -245,7 +245,7 @@ static int psc_dma_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct psc_dma_stream *s;
dev_dbg(psc_dma->dev, "psc_dma_close(substream=%p)\n", substream);
@@ -271,7 +271,7 @@ psc_dma_pointer(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct psc_dma_stream *s;
dma_addr_t count;
@@ -298,7 +298,7 @@ static int psc_dma_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
- struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_pcm *pcm = rtd->pcm;
size_t size = psc_dma_hardware.buffer_bytes_max;
int rc;
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c
index 9bc01f374b39..1ab4fbda08cb 100644
--- a/sound/soc/fsl/mpc5200_psc_i2s.c
+++ b/sound/soc/fsl/mpc5200_psc_i2s.c
@@ -39,7 +39,7 @@ static int psc_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
u32 mode;
dev_dbg(psc_dma->dev, "%s(substream=%p) p_size=%i p_bytes=%i"
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index 23617eb09ba1..f7bd90051ce7 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -105,7 +105,7 @@ static int mpc8610_hpcd_startup(struct snd_pcm_substream *substream)
int ret = 0;
/* Tell the codec driver what the serial protocol is. */
- ret = snd_soc_dai_set_fmt(rtd->codec_dai, machine_data->dai_format);
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0), machine_data->dai_format);
if (ret < 0) {
dev_err(dev, "could not set codec driver audio format\n");
return ret;
@@ -115,7 +115,7 @@ static int mpc8610_hpcd_startup(struct snd_pcm_substream *substream)
* Tell the codec driver what the MCLK frequency is, and whether it's
* a slave or master.
*/
- ret = snd_soc_dai_set_sysclk(rtd->codec_dai, 0,
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0), 0,
machine_data->clk_frequency,
machine_data->codec_clk_direction);
if (ret < 0) {
diff --git a/sound/soc/fsl/mx27vis-aic32x4.c b/sound/soc/fsl/mx27vis-aic32x4.c
index 38ac4a397742..a36d4e8cd55c 100644
--- a/sound/soc/fsl/mx27vis-aic32x4.c
+++ b/sound/soc/fsl/mx27vis-aic32x4.c
@@ -37,8 +37,8 @@ static int mx27vis_aic32x4_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, 0,
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index 6114b01b90f7..fe3091590f20 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -128,7 +128,7 @@ static int p1022_ds_startup(struct snd_pcm_substream *substream)
int ret = 0;
/* Tell the codec driver what the serial protocol is. */
- ret = snd_soc_dai_set_fmt(rtd->codec_dai, mdata->dai_format);
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0), mdata->dai_format);
if (ret < 0) {
dev_err(dev, "could not set codec driver audio format\n");
return ret;
@@ -138,7 +138,7 @@ static int p1022_ds_startup(struct snd_pcm_substream *substream)
* Tell the codec driver what the MCLK frequency is, and whether it's
* a slave or master.
*/
- ret = snd_soc_dai_set_sysclk(rtd->codec_dai, 0, mdata->clk_frequency,
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0), 0, mdata->clk_frequency,
mdata->codec_clk_direction);
if (ret < 0) {
dev_err(dev, "could not set codec driver clock params\n");
diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c
index 72687235c0ae..f5374fe354ab 100644
--- a/sound/soc/fsl/p1022_rdk.c
+++ b/sound/soc/fsl/p1022_rdk.c
@@ -134,14 +134,14 @@ static int p1022_rdk_startup(struct snd_pcm_substream *substream)
int ret = 0;
/* Tell the codec driver what the serial protocol is. */
- ret = snd_soc_dai_set_fmt(rtd->codec_dai, mdata->dai_format);
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0), mdata->dai_format);
if (ret < 0) {
dev_err(dev, "could not set codec driver audio format (ret=%i)\n",
ret);
return ret;
}
- ret = snd_soc_dai_set_pll(rtd->codec_dai, 0, 0, mdata->clk_frequency,
+ ret = snd_soc_dai_set_pll(asoc_rtd_to_codec(rtd, 0), 0, 0, mdata->clk_frequency,
mdata->clk_frequency);
if (ret < 0) {
dev_err(dev, "could not set codec PLL frequency (ret=%i)\n",
diff --git a/sound/soc/fsl/wm1133-ev1.c b/sound/soc/fsl/wm1133-ev1.c
index 52d321bede9c..8b1551c55452 100644
--- a/sound/soc/fsl/wm1133-ev1.c
+++ b/sound/soc/fsl/wm1133-ev1.c
@@ -76,8 +76,8 @@ static int wm1133_ev1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int i, found = 0;
snd_pcm_format_t format = params_format(params);
unsigned int rate = params_rate(params);
@@ -196,7 +196,7 @@ static struct snd_soc_jack_pin mic_jack_pins[] = {
static int wm1133_ev1_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
/* Headphone jack detection */
snd_soc_card_jack_new(rtd->card, "Headphone", SND_JACK_HEADPHONE,
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 9b794775df53..8c54dc6710fe 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -213,8 +213,8 @@ EXPORT_SYMBOL_GPL(asoc_simple_startup);
void asoc_simple_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct asoc_simple_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
@@ -249,8 +249,8 @@ int asoc_simple_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct asoc_simple_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
@@ -331,22 +331,70 @@ static int asoc_simple_init_dai(struct snd_soc_dai *dai,
return 0;
}
+static int asoc_simple_init_dai_link_params(struct snd_soc_pcm_runtime *rtd,
+ struct simple_dai_props *dai_props)
+{
+ struct snd_soc_dai_link *dai_link = rtd->dai_link;
+ struct snd_soc_component *component;
+ struct snd_soc_pcm_stream *params;
+ struct snd_pcm_hardware hw;
+ int i, ret, stream;
+
+ /* Only codecs should have non_legacy_dai_naming set. */
+ for_each_rtd_components(rtd, i, component) {
+ if (!component->driver->non_legacy_dai_naming)
+ return 0;
+ }
+
+ /* Assumes the capabilities are the same for all supported streams */
+ for_each_pcm_streams(stream) {
+ ret = snd_soc_runtime_calc_hw(rtd, &hw, stream);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(rtd->dev, "simple-card: no valid dai_link params\n");
+ return ret;
+ }
+
+ params = devm_kzalloc(rtd->dev, sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ params->formats = hw.formats;
+ params->rates = hw.rates;
+ params->rate_min = hw.rate_min;
+ params->rate_max = hw.rate_max;
+ params->channels_min = hw.channels_min;
+ params->channels_max = hw.channels_max;
+
+ dai_link->params = params;
+ dai_link->num_params = 1;
+
+ return 0;
+}
+
int asoc_simple_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct asoc_simple_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
int ret;
- ret = asoc_simple_init_dai(rtd->codec_dai,
+ ret = asoc_simple_init_dai(asoc_rtd_to_codec(rtd, 0),
dai_props->codec_dai);
if (ret < 0)
return ret;
- ret = asoc_simple_init_dai(rtd->cpu_dai,
+ ret = asoc_simple_init_dai(asoc_rtd_to_cpu(rtd, 0),
dai_props->cpu_dai);
if (ret < 0)
return ret;
+ ret = asoc_simple_init_dai_link_params(rtd, dai_props);
+ if (ret < 0)
+ return ret;
+
return 0;
}
EXPORT_SYMBOL_GPL(asoc_simple_dai_init);
diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
index fdd2c73fd2fa..a495d1050d49 100644
--- a/sound/soc/img/img-i2s-in.c
+++ b/sound/soc/img/img-i2s-in.c
@@ -397,7 +397,7 @@ static int img_i2s_in_dma_prepare_slave_config(struct snd_pcm_substream *st,
struct snd_dmaengine_dai_dma_data *dma_data;
int ret;
- dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, st);
+ dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), st);
ret = snd_hwparams_to_dma_slave_config(st, params, sc);
if (ret)
diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
index 4b1853409633..db052ec17d5d 100644
--- a/sound/soc/img/img-i2s-out.c
+++ b/sound/soc/img/img-i2s-out.c
@@ -403,7 +403,7 @@ static int img_i2s_out_dma_prepare_slave_config(struct snd_pcm_substream *st,
struct snd_dmaengine_dai_dma_data *dma_data;
int ret;
- dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, st);
+ dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), st);
ret = snd_hwparams_to_dma_slave_config(st, params, sc);
if (ret)
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index baef461a99f1..f883c9340eee 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -1333,7 +1333,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
dai->capture_widget->name);
w = dai->capture_widget;
snd_soc_dapm_widget_for_each_source_path(w, p) {
- if (p->connected && !p->connected(w, p->sink))
+ if (p->connected && !p->connected(w, p->source))
continue;
if (p->connect && p->source->power &&
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 340bd2be39a7..82f2b6357778 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -649,7 +649,7 @@ static snd_pcm_uframes_t sst_soc_pointer(struct snd_soc_component *component,
static int sst_soc_pcm_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_pcm *pcm = rtd->pcm;
if (dai->driver->playback.channels_min ||
@@ -741,7 +741,7 @@ static int sst_soc_prepare(struct device *dev)
/* set the SSPs to idle */
for_each_card_rtds(drv->soc_card, rtd) {
- struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
if (dai->active) {
send_ssp_cmd(dai, dai->name, 0);
@@ -762,7 +762,7 @@ static void sst_soc_complete(struct device *dev)
/* restart SSPs */
for_each_card_rtds(drv->soc_card, rtd) {
- struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
if (dai->active) {
sst_handle_vb_timer(dai, true);
diff --git a/sound/soc/intel/atom/sst/sst_pci.c b/sound/soc/intel/atom/sst/sst_pci.c
index d952719bc098..5862fe968083 100644
--- a/sound/soc/intel/atom/sst/sst_pci.c
+++ b/sound/soc/intel/atom/sst/sst_pci.c
@@ -99,7 +99,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram);
do_release_regions:
pci_release_regions(pci);
- return 0;
+ return ret;
}
/*
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 9ca2567d0059..556c3104e641 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -289,7 +289,6 @@ config SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
select SND_SOC_DA7219
select SND_SOC_MAX98357A
select SND_SOC_DMIC
- select SND_HDA_CODEC_HDMI if SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_HDAC_HDMI
config SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
@@ -302,6 +301,7 @@ config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
tristate "Broxton with DA7219 and MAX98357A in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
+ depends on SND_HDA_CODEC_HDMI
select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
help
This adds support for ASoC machine driver for Broxton-P platforms
@@ -402,6 +402,7 @@ config SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH
tristate "GLK with DA7219 and MAX98357A in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
+ depends on SND_HDA_CODEC_HDMI
select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
help
This adds support for ASoC machine driver for Geminilake platforms
@@ -413,10 +414,10 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
tristate "GLK with RT5682 and MAX98357A in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
+ depends on SND_HDA_CODEC_HDMI
select SND_SOC_RT5682
select SND_SOC_MAX98357A
select SND_SOC_DMIC
- select SND_HDA_CODEC_HDMI if SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_HDAC_HDMI
help
This adds support for ASoC machine driver for Geminilake platforms
@@ -430,7 +431,7 @@ if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
tristate "SKL/KBL/BXT/APL with HDA Codecs"
- select SND_HDA_CODEC_HDMI if SND_SOC_SOF_HDA_AUDIO_CODEC
+ depends on SND_HDA_CODEC_HDMI
select SND_SOC_HDAC_HDMI
select SND_SOC_DMIC
# SND_SOC_HDAC_HDA is already selected
@@ -448,15 +449,31 @@ config SND_SOC_INTEL_SOF_RT5682_MACH
depends on I2C && ACPI
depends on (SND_SOC_SOF_HDA_LINK && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
(SND_SOC_SOF_BAYTRAIL && (X86_INTEL_LPSS || COMPILE_TEST))
+ depends on SND_HDA_CODEC_HDMI
+ select SND_SOC_MAX98373
+ select SND_SOC_RT1015
select SND_SOC_RT5682
select SND_SOC_DMIC
- select SND_HDA_CODEC_HDMI if SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_HDAC_HDMI
help
This adds support for ASoC machine driver for SOF platforms
with rt5682 codec.
Say Y if you have such a device.
If unsure select "N".
+
+config SND_SOC_INTEL_SOF_PCM512x_MACH
+ tristate "SOF with TI PCM512x codec"
+ depends on I2C && ACPI
+ depends on (SND_SOC_SOF_HDA_AUDIO_CODEC && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
+ (SND_SOC_SOF_BAYTRAIL && (X86_INTEL_LPSS || COMPILE_TEST))
+ depends on SND_HDA_CODEC_HDMI
+ select SND_SOC_PCM512x_I2C
+ help
+ This adds support for ASoC machine driver for SOF platforms
+ with TI PCM512x I2S audio codec.
+ Say Y or m if you have such a device.
+ If unsure select "N".
+
endif ## SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK)
@@ -476,11 +493,11 @@ config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
tristate "CML with RT1011 and RT5682 in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
+ depends on SND_HDA_CODEC_HDMI
select SND_SOC_RT1011
select SND_SOC_RT5682
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
- select SND_HDA_CODEC_HDMI if SND_SOC_SOF_HDA_AUDIO_CODEC
help
This adds support for ASoC machine driver for SOF platform with
RT1011 + RT5682 I2S codec.
@@ -492,19 +509,43 @@ endif ## SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK
if SND_SOC_SOF_JASPERLAKE
config SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH
- tristate "SOF with DA7219 and MAX98373 in I2S Mode"
+ tristate "SOF with DA7219 and MAX98373/MAX98360A in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
+ depends on SND_HDA_CODEC_HDMI
select SND_SOC_DA7219
select SND_SOC_MAX98373
select SND_SOC_DMIC
- select SND_HDA_CODEC_HDMI if SND_SOC_SOF_HDA_AUDIO_CODEC
help
This adds support for ASoC machine driver for SOF platforms
- with DA7219 + MAX98373 I2S audio codec.
+ with DA7219 + MAX98373/MAX98360A I2S audio codec.
Say Y if you have such a device.
If unsure select "N".
endif ## SND_SOC_SOF_JASPERLAKE
+if SND_SOC_SOF_INTEL_SOUNDWIRE
+
+config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
+ tristate "SoundWire generic machine driver"
+ depends on I2C && ACPI
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ depends on SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES || COMPILE_TEST
+ depends on SOUNDWIRE
+ depends on SND_HDA_CODEC_HDMI
+ select SND_SOC_RT700_SDW
+ select SND_SOC_RT711_SDW
+ select SND_SOC_RT1308_SDW
+ select SND_SOC_RT1308
+ select SND_SOC_RT715_SDW
+ select SND_SOC_RT5682_SDW
+ select SND_SOC_DMIC
+ help
+ Add support for Intel SoundWire-based platforms connected to
+ RT700, RT711, RT1308 and RT715
+ If unsure select "N".
+
+endif
+
+
endif ## SND_SOC_INTEL_MACH
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index b74ddd49bd39..1ef6e60bc2a0 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -7,6 +7,7 @@ snd-soc-sst-bdw-rt5677-mach-objs := bdw-rt5677.o
snd-soc-sst-broadwell-objs := broadwell.o
snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o hda_dsp_common.o
snd-soc-sst-bxt-rt298-objs := bxt_rt298.o hda_dsp_common.o
+snd-soc-sst-sof-pcm512x-objs := sof_pcm512x.o hda_dsp_common.o
snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o hda_dsp_common.o
snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
@@ -18,7 +19,7 @@ snd-soc-sst-byt-cht-cx2072x-objs := bytcht_cx2072x.o
snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
-snd-soc-sof_rt5682-objs := sof_rt5682.o hda_dsp_common.o
+snd-soc-sof_rt5682-objs := sof_rt5682.o hda_dsp_common.o sof_maxim_common.o
snd-soc-cml_rt1011_rt5682-objs := cml_rt1011_rt5682.o hda_dsp_common.o
snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
@@ -30,13 +31,18 @@ snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o hda_dsp_c
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
snd-soc-sof_da7219_max98373-objs := sof_da7219_max98373.o hda_dsp_common.o
-
+snd-soc-sof-sdw-objs += sof_sdw.o \
+ sof_sdw_rt711.o sof_sdw_rt700.o \
+ sof_sdw_rt1308.o sof_sdw_rt715.o \
+ sof_sdw_rt5682.o \
+ sof_sdw_dmic.o sof_sdw_hdmi.o hda_dsp_common.o
obj-$(CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH) += snd-soc-sof_rt5682.o
obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
+obj-$(CONFIG_SND_SOC_INTEL_SOF_PCM512x_MACH) += snd-soc-sst-sof-pcm512x.o
obj-$(CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH) += snd-soc-sst-glk-rt5682_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
obj-$(CONFIG_SND_SOC_INTEL_BDW_RT5650_MACH) += snd-soc-sst-bdw-rt5650-mach.o
@@ -62,4 +68,4 @@ obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH) += snd-skl_nau88l25_max9
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH) += snd-soc-skl_nau88l25_ssm4567.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH) += snd-soc-skl_hda_dsp.o
obj-$(CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH) += snd-soc-sof_da7219_max98373.o
-
+obj-$(CONFIG_SND_SOC_INTEL_SOUNDWIRE_SOF_MACH) += snd-soc-sof-sdw.o
diff --git a/sound/soc/intel/boards/bdw-rt5650.c b/sound/soc/intel/boards/bdw-rt5650.c
index 1a302436d450..6c2fdb5659ed 100644
--- a/sound/soc/intel/boards/bdw-rt5650.c
+++ b/sound/soc/intel/boards/bdw-rt5650.c
@@ -107,7 +107,7 @@ static int bdw_rt5650_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
/* Workaround: set codec PLL to 19.2MHz that PLL source is
@@ -166,8 +166,8 @@ static int bdw_rt5650_init(struct snd_soc_pcm_runtime *rtd)
{
struct bdw_rt5650_priv *bdw_rt5650 =
snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
int ret;
/* Enable codec ASRC function for Stereo DAC/Stereo1 ADC/DMIC/I2S1.
@@ -226,9 +226,6 @@ SND_SOC_DAILINK_DEF(be,
#if IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
SND_SOC_DAILINK_DEF(ssp0_port,
DAILINK_COMP_ARRAY(COMP_CPU("ssp0-port")));
-#else
-SND_SOC_DAILINK_DEF(ssp0_port,
- DAILINK_COMP_ARRAY(COMP_DUMMY()));
#endif
static struct snd_soc_dai_link bdw_rt5650_dais[] = {
@@ -264,7 +261,11 @@ static struct snd_soc_dai_link bdw_rt5650_dais[] = {
.dpcm_playback = 1,
.dpcm_capture = 1,
.init = bdw_rt5650_init,
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
+ SND_SOC_DAILINK_REG(dummy, be, dummy),
+#else
SND_SOC_DAILINK_REG(ssp0_port, be, platform),
+#endif
},
};
@@ -298,7 +299,7 @@ static int bdw_rt5650_probe(struct platform_device *pdev)
return -ENOMEM;
/* override plaform name, if required */
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
ret = snd_soc_fixup_dai_links_platform_name(&bdw_rt5650_card,
mach->mach_params.platform);
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index bb643c99069d..6b4b64098d36 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -157,7 +157,7 @@ static int bdw_rt5677_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, RT5677_SCLK_S_MCLK, 24576000,
@@ -174,7 +174,7 @@ static int bdw_rt5677_dsp_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, RT5677_SCLK_S_PLL1, 24576000,
@@ -226,7 +226,7 @@ static int bdw_rt5677_init(struct snd_soc_pcm_runtime *rtd)
{
struct bdw_rt5677_priv *bdw_rt5677 =
snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
int ret;
@@ -298,9 +298,6 @@ SND_SOC_DAILINK_DEF(be,
#if IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
SND_SOC_DAILINK_DEF(ssp0_port,
DAILINK_COMP_ARRAY(COMP_CPU("ssp0-port")));
-#else
-SND_SOC_DAILINK_DEF(ssp0_port,
- DAILINK_COMP_ARRAY(COMP_DUMMY()));
#endif
/* Wake on voice interface */
@@ -350,7 +347,11 @@ static struct snd_soc_dai_link bdw_rt5677_dais[] = {
.dpcm_playback = 1,
.dpcm_capture = 1,
.init = bdw_rt5677_init,
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
+ SND_SOC_DAILINK_REG(dummy, be, dummy),
+#else
SND_SOC_DAILINK_REG(ssp0_port, be, platform),
+#endif
},
};
@@ -412,7 +413,7 @@ static int bdw_rt5677_probe(struct platform_device *pdev)
}
/* override plaform name, if required */
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
ret = snd_soc_fixup_dai_links_platform_name(&bdw_rt5677_card,
mach->mach_params.platform);
if (ret)
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index b9c12e24c70b..acb4e36682cb 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -70,7 +70,7 @@ static const struct snd_soc_dapm_route broadwell_rt286_map[] = {
static int broadwell_rt286_codec_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
int ret = 0;
ret = snd_soc_card_jack_new(rtd->card, "Headset",
SND_JACK_HEADSET | SND_JACK_BTN_0, &broadwell_headset,
@@ -104,7 +104,7 @@ static int broadwell_rt286_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, RT286_SCLK_S_PLL, 24000000,
@@ -167,9 +167,6 @@ SND_SOC_DAILINK_DEF(codec,
#if IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
SND_SOC_DAILINK_DEF(ssp0_port,
DAILINK_COMP_ARRAY(COMP_CPU("ssp0-port")));
-#else
-SND_SOC_DAILINK_DEF(ssp0_port,
- DAILINK_COMP_ARRAY(COMP_DUMMY()));
#endif
/* broadwell digital audio interface glue - connects codec <--> CPU */
@@ -226,7 +223,11 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.ops = &broadwell_rt286_ops,
.dpcm_playback = 1,
.dpcm_capture = 1,
+#if !IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
+ SND_SOC_DAILINK_REG(dummy, codec, dummy),
+#else
SND_SOC_DAILINK_REG(ssp0_port, codec, platform),
+#endif
},
};
@@ -283,7 +284,7 @@ static int broadwell_audio_probe(struct platform_device *pdev)
broadwell_rt286.dev = &pdev->dev;
/* override plaform name, if required */
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
ret = snd_soc_fixup_dai_links_platform_name(&broadwell_rt286,
mach->mach_params.platform);
if (ret)
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 9177401c37a5..44016c16f25e 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -179,8 +179,8 @@ static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
int clk_freq;
/* Configure sysclk for codec */
@@ -226,7 +226,7 @@ static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
static int broxton_hdmi_init(struct snd_soc_pcm_runtime *rtd)
{
struct bxt_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct bxt_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -244,7 +244,7 @@ static int broxton_hdmi_init(struct snd_soc_pcm_runtime *rtd)
static int broxton_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
dapm = snd_soc_component_get_dapm(component);
snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
@@ -721,7 +721,7 @@ static int broxton_audio_probe(struct platform_device *pdev)
}
/* override plaform name, if required */
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
platform_name = mach->mach_params.platform;
ret = snd_soc_fixup_dai_links_platform_name(&broxton_audio_card,
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 4b67f261377c..7a4decf34191 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -155,7 +155,7 @@ static const struct snd_soc_dapm_route geminilake_rt298_map[] = {
static int broxton_rt298_fe_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
dapm = snd_soc_component_get_dapm(component);
snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
@@ -165,7 +165,7 @@ static int broxton_rt298_fe_init(struct snd_soc_pcm_runtime *rtd)
static int broxton_rt298_codec_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
int ret = 0;
ret = snd_soc_card_jack_new(rtd->card, "Headset",
@@ -186,7 +186,7 @@ static int broxton_rt298_codec_init(struct snd_soc_pcm_runtime *rtd)
static int broxton_hdmi_init(struct snd_soc_pcm_runtime *rtd)
{
struct bxt_rt286_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct bxt_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -225,7 +225,7 @@ static int broxton_rt298_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL,
@@ -627,7 +627,7 @@ static int broxton_audio_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(card, ctx);
/* override plaform name, if required */
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
platform_name = mach->mach_params.platform;
ret = snd_soc_fixup_dai_links_platform_name(card,
diff --git a/sound/soc/intel/boards/byt-max98090.c b/sound/soc/intel/boards/byt-max98090.c
index 01739ad75b12..f5097da28828 100644
--- a/sound/soc/intel/boards/byt-max98090.c
+++ b/sound/soc/intel/boards/byt-max98090.c
@@ -89,7 +89,7 @@ static int byt_max98090_init(struct snd_soc_pcm_runtime *runtime)
card->dapm.idle_bias_off = true;
- ret = snd_soc_dai_set_sysclk(runtime->codec_dai,
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(runtime, 0),
M98090_REG_SYSTEM_CLOCK,
25000000, SND_SOC_CLOCK_IN);
if (ret < 0) {
diff --git a/sound/soc/intel/boards/byt-rt5640.c b/sound/soc/intel/boards/byt-rt5640.c
index 0c76dafdd572..ace232f8aed6 100644
--- a/sound/soc/intel/boards/byt-rt5640.c
+++ b/sound/soc/intel/boards/byt-rt5640.c
@@ -73,7 +73,7 @@ static int byt_rt5640_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_PLL1,
@@ -123,7 +123,7 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
- struct snd_soc_component *component = runtime->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
struct snd_soc_card *card = runtime->card;
const struct snd_soc_dapm_route *custom_map;
int num_routes;
diff --git a/sound/soc/intel/boards/bytcht_cx2072x.c b/sound/soc/intel/boards/bytcht_cx2072x.c
index 67f06c95eec5..3b3df7c9008c 100644
--- a/sound/soc/intel/boards/bytcht_cx2072x.c
+++ b/sound/soc/intel/boards/bytcht_cx2072x.c
@@ -70,7 +70,7 @@ static const struct acpi_gpio_mapping byt_cht_cx2072x_acpi_gpios[] = {
static int byt_cht_cx2072x_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
- struct snd_soc_component *codec = rtd->codec_dai->component;
+ struct snd_soc_component *codec = asoc_rtd_to_codec(rtd, 0)->component;
int ret;
if (devm_acpi_dev_add_driver_gpios(codec->dev,
@@ -80,7 +80,7 @@ static int byt_cht_cx2072x_init(struct snd_soc_pcm_runtime *rtd)
card->dapm.idle_bias_off = true;
/* set the default PLL rate, the clock is handled by the codec driver */
- ret = snd_soc_dai_set_sysclk(rtd->codec_dai, CX2072X_MCLK_EXTERNAL_PLL,
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0), CX2072X_MCLK_EXTERNAL_PLL,
19200000, SND_SOC_CLOCK_IN);
if (ret) {
dev_err(rtd->dev, "Could not set sysclk\n");
@@ -97,7 +97,7 @@ static int byt_cht_cx2072x_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_component_set_jack(codec, &byt_cht_cx2072x_headset, NULL);
- snd_soc_dai_set_bclk_ratio(rtd->codec_dai, 50);
+ snd_soc_dai_set_bclk_ratio(asoc_rtd_to_codec(rtd, 0), 50);
return ret;
}
@@ -123,7 +123,7 @@ static int byt_cht_cx2072x_fixup(struct snd_soc_pcm_runtime *rtd,
* with explicit setting to I2S 2ch 24-bit. The word length is set with
* dai_set_tdm_slot() since there is no other API exposed
*/
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
@@ -132,7 +132,7 @@ static int byt_cht_cx2072x_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2, 24);
if (ret < 0) {
dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index eda7a500cad6..5e96e7d02733 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -78,7 +78,7 @@ static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
* with explicit setting to I2S 2ch 24-bit. The word length is set with
* dai_set_tdm_slot() since there is no other API exposed
*/
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
@@ -87,7 +87,7 @@ static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2, 24);
if (ret < 0) {
dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
return ret;
@@ -106,7 +106,7 @@ static int aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, DA7213_CLKSRC_MCLK,
@@ -127,7 +127,7 @@ static int aif1_hw_params(struct snd_pcm_substream *substream,
static int aif1_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_pll(codec_dai, 0,
@@ -231,7 +231,7 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
int ret_val = 0;
int i;
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
card = &bytcht_da7213_card;
card->dev = &pdev->dev;
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 0adc5a5e134a..ddcd070100ef 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -157,7 +157,7 @@ static struct snd_soc_jack_pin byt_cht_es8316_jack_pins[] = {
static int byt_cht_es8316_init(struct snd_soc_pcm_runtime *runtime)
{
- struct snd_soc_component *codec = runtime->codec_dai->component;
+ struct snd_soc_component *codec = asoc_rtd_to_codec(runtime, 0)->component;
struct snd_soc_card *card = runtime->card;
struct byt_cht_es8316_private *priv = snd_soc_card_get_drvdata(card);
const struct snd_soc_dapm_route *custom_map;
@@ -212,7 +212,7 @@ static int byt_cht_es8316_init(struct snd_soc_pcm_runtime *runtime)
if (ret)
dev_err(card->dev, "unable to enable MCLK\n");
- ret = snd_soc_dai_set_sysclk(runtime->codec_dai, 0, 19200000,
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(runtime, 0), 0, 19200000,
SND_SOC_CLOCK_IN);
if (ret < 0) {
dev_err(card->dev, "can't set codec clock %d\n", ret);
@@ -262,7 +262,7 @@ static int byt_cht_es8316_codec_fixup(struct snd_soc_pcm_runtime *rtd,
* with explicit setting to I2S 2ch 24-bit. The word length is set with
* dai_set_tdm_slot() since there is no other API exposed
*/
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS
@@ -272,7 +272,7 @@ static int byt_cht_es8316_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, bits);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2, bits);
if (ret < 0) {
dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/bytcht_nocodec.c b/sound/soc/intel/boards/bytcht_nocodec.c
index 479af808ef43..8c0dab1f4030 100644
--- a/sound/soc/intel/boards/bytcht_nocodec.c
+++ b/sound/soc/intel/boards/bytcht_nocodec.c
@@ -58,7 +58,7 @@ static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
* with explicit setting to I2S 2ch 24-bit. The word length is set with
* dai_set_tdm_slot() since there is no other API exposed
*/
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
@@ -68,7 +68,7 @@ static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2, 24);
if (ret < 0) {
dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 6bd9ae813be2..33fb8ea4e5cb 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -381,7 +381,7 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
return byt_rt5640_prepare_and_enable_pll1(dai, params_rate(params));
}
@@ -805,7 +805,7 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
struct byt_rt5640_private *priv = snd_soc_card_get_drvdata(card);
- struct snd_soc_component *component = runtime->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
const struct snd_soc_dapm_route *custom_map;
int num_routes;
int ret;
@@ -962,7 +962,7 @@ static int byt_rt5640_codec_fixup(struct snd_soc_pcm_runtime *rtd,
* with explicit setting to I2S 2ch. The word length is set with
* dai_set_tdm_slot() since there is no other API exposed
*/
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
@@ -971,7 +971,7 @@ static int byt_rt5640_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, bits);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2, bits);
if (ret < 0) {
dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 5074bb53f98e..214ef41e23e6 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -348,7 +348,7 @@ static int byt_rt5651_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
snd_pcm_format_t format = params_format(params);
int rate = params_rate(params);
int bclk_ratio;
@@ -540,7 +540,7 @@ static int byt_rt5651_add_codec_device_props(struct device *i2c_dev)
static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
- struct snd_soc_component *codec = runtime->codec_dai->component;
+ struct snd_soc_component *codec = asoc_rtd_to_codec(runtime, 0)->component;
struct byt_rt5651_private *priv = snd_soc_card_get_drvdata(card);
const struct snd_soc_dapm_route *custom_map;
int num_routes;
@@ -685,7 +685,7 @@ static int byt_rt5651_codec_fixup(struct snd_soc_pcm_runtime *rtd,
* with explicit setting to I2S 2ch. The word length is set with
* dai_set_tdm_slot() since there is no other API exposed
*/
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS
@@ -696,7 +696,7 @@ static int byt_rt5651_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, bits);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2, bits);
if (ret < 0) {
dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 70bb86f3342f..135701738a44 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -113,7 +113,7 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, M98090_REG_SYSTEM_CLOCK,
@@ -257,7 +257,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
int ret = 0;
unsigned int fmt = 0;
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 16);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2, 16);
if (ret < 0) {
dev_err(rtd->dev, "can't set cpu_dai slot fmt: %d\n", ret);
return ret;
@@ -266,7 +266,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS;
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai, fmt);
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0), fmt);
if (ret < 0) {
dev_err(rtd->dev, "can't set cpu_dai set fmt: %d\n", ret);
return ret;
@@ -553,7 +553,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* override plaform name, if required */
snd_soc_card_cht.dev = &pdev->dev;
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
platform_name = mach->mach_params.platform;
ret_val = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cht,
diff --git a/sound/soc/intel/boards/cht_bsw_nau8824.c b/sound/soc/intel/boards/cht_bsw_nau8824.c
index 501bad3976fb..f456150f89c2 100644
--- a/sound/soc/intel/boards/cht_bsw_nau8824.c
+++ b/sound/soc/intel/boards/cht_bsw_nau8824.c
@@ -73,7 +73,7 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, NAU8824_CLK_FLL_FS, 0,
@@ -96,7 +96,7 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
{
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
struct snd_soc_jack *jack = &ctx->jack;
- struct snd_soc_dai *codec_dai = runtime->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0);
struct snd_soc_component *component = codec_dai->component;
int ret, jack_type;
@@ -259,7 +259,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* override plaform name, if required */
snd_soc_card_cht.dev = &pdev->dev;
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
platform_name = mach->mach_params.platform;
ret_val = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cht,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index b5b016d493f1..e64eca56e426 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -208,7 +208,7 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
/* set codec PLL source to the 19.2MHz platform clock (MCLK) */
@@ -252,7 +252,7 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
- struct snd_soc_component *component = runtime->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
int jack_type;
int ret;
@@ -359,7 +359,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
* with explicit setting to I2S 2ch 16-bit. The word length is set with
* dai_set_tdm_slot() since there is no other API exposed
*/
- ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS
@@ -369,7 +369,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_fmt(rtd->codec_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0),
SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS
@@ -379,7 +379,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 16);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_cpu(rtd, 0), 0x3, 0x3, 2, 16);
if (ret < 0) {
dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
return ret;
@@ -393,7 +393,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
/*
* Default mode for SSP configuration is TDM 4 slot
*/
- ret = snd_soc_dai_set_fmt(rtd->codec_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0),
SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBS_CFS);
@@ -403,7 +403,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
}
/* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
- ret = snd_soc_dai_set_tdm_slot(rtd->codec_dai, 0xF, 0xF, 4, 24);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(rtd, 0), 0xF, 0xF, 4, 24);
if (ret < 0) {
dev_err(rtd->dev, "can't set codec TDM slot %d\n", ret);
return ret;
@@ -539,7 +539,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
if (!drv)
return -ENOMEM;
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
for (i = 0; i < ARRAY_SIZE(snd_soc_cards); i++) {
if (acpi_dev_found(snd_soc_cards[i].codec_id) &&
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 9d657421730a..097023a3ec14 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -144,7 +144,7 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
/* set codec PLL source to the 19.2MHz platform clock (MCLK) */
@@ -176,7 +176,7 @@ static const struct acpi_gpio_mapping cht_rt5672_gpios[] = {
static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
- struct snd_soc_dai *codec_dai = runtime->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0);
struct snd_soc_component *component = codec_dai->component;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
@@ -255,7 +255,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
/*
* Default mode for SSP configuration is TDM 4 slot
*/
- ret = snd_soc_dai_set_fmt(rtd->codec_dai,
+ ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0),
SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBS_CFS);
@@ -265,7 +265,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
}
/* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
- ret = snd_soc_dai_set_tdm_slot(rtd->codec_dai, 0xF, 0xF, 4, 24);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(rtd, 0), 0xF, 0xF, 4, 24);
if (ret < 0) {
dev_err(rtd->dev, "can't set codec TDM slot %d\n", ret);
return ret;
diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
index dd80d0186a6c..8167b2977e1d 100644
--- a/sound/soc/intel/boards/cml_rt1011_rt5682.c
+++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
@@ -85,7 +85,7 @@ static const struct snd_soc_dapm_route cml_rt1011_rt5682_map[] = {
static int cml_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_jack *jack;
int ret;
@@ -125,7 +125,7 @@ static int cml_rt5682_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int clk_id, clk_freq, pll_out, ret;
clk_id = RT5682_PLL1_S_MCLK;
@@ -164,8 +164,7 @@ static int cml_rt1011_hw_params(struct snd_pcm_substream *substream,
srate = params_rate(params);
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
/* 100 Fs to drive 24 bit data */
ret = snd_soc_dai_set_pll(codec_dai, 0, RT1011_PLL1_S_BCLK,
@@ -275,7 +274,7 @@ static int sof_card_late_probe(struct snd_soc_card *card)
static int hdmi_init(struct snd_soc_pcm_runtime *rtd)
{
struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -447,12 +446,12 @@ static int snd_cml_rt1011_probe(struct platform_device *pdev)
const char *platform_name;
int ret;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
snd_soc_card_cml.dev = &pdev->dev;
platform_name = mach->mach_params.platform;
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index 8e947bad143c..f13158e4a1fc 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -136,8 +136,8 @@ static int geminilake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct glk_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_jack *jack;
int ret;
@@ -188,7 +188,7 @@ static int geminilake_rt5682_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
/* Set valid bitmask & configuration for I2S in 24 bit */
@@ -208,7 +208,7 @@ static struct snd_soc_ops geminilake_rt5682_ops = {
static int geminilake_hdmi_init(struct snd_soc_pcm_runtime *rtd)
{
struct glk_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct glk_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -225,7 +225,7 @@ static int geminilake_hdmi_init(struct snd_soc_pcm_runtime *rtd)
static int geminilake_rt5682_fe_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
struct snd_soc_dapm_context *dapm;
int ret;
@@ -409,6 +409,7 @@ static struct snd_soc_dai_link geminilake_dais[] = {
.init = NULL,
.capture_only = 1,
.nonatomic = 1,
+ .dynamic = 1,
SND_SOC_DAILINK_REG(echoref, dummy, platform),
},
[GLK_DPCM_AUDIO_REF_CP] = {
@@ -604,7 +605,7 @@ static int geminilake_audio_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(card, ctx);
/* override plaform name, if required */
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
platform_name = mach->mach_params.platform;
ret = snd_soc_fixup_dai_links_platform_name(card, platform_name);
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index 3dadf9bff796..3ed53d7db4e6 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -56,7 +56,7 @@ static int haswell_rt5640_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_MCLK, 12288000,
@@ -193,7 +193,7 @@ static int haswell_audio_probe(struct platform_device *pdev)
haswell_rt5640.dev = &pdev->dev;
/* override plaform name, if required */
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
ret = snd_soc_fixup_dai_links_platform_name(&haswell_rt5640,
mach->mach_params.platform);
if (ret)
diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
index bc7f9a9ce9af..32cd90b8d4c4 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
@@ -159,8 +159,8 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_jack *jack;
int ret;
@@ -203,7 +203,7 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct kbl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -236,7 +236,7 @@ static int kabylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
static int kabylake_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
dapm = snd_soc_component_get_dapm(component);
snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
index 7a13e9b35187..abd4e3839678 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -176,10 +176,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *runtime = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int ret, j;
- for (j = 0; j < runtime->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = runtime->codec_dais[j];
+ for_each_rtd_codec_dais(runtime, j, codec_dai) {
if (!strcmp(codec_dai->component->name, MAX98927_DEV0_NAME)) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
@@ -221,10 +221,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
static int kabylake_ssp0_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int j, ret;
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
const char *name = codec_dai->component->name;
struct snd_soc_component *component = codec_dai->component;
struct snd_soc_dapm_context *dapm =
@@ -331,7 +331,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_jack *jack;
struct snd_soc_card *card = rtd->card;
int ret;
@@ -381,7 +381,7 @@ static int kabylake_dmic_init(struct snd_soc_pcm_runtime *rtd)
static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct kbl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -414,7 +414,7 @@ static int kabylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
static int kabylake_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
dapm = snd_soc_component_get_dapm(component);
snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
diff --git a/sound/soc/intel/boards/kbl_rt5660.c b/sound/soc/intel/boards/kbl_rt5660.c
index e23dea9ab79a..6460e3f0c974 100644
--- a/sound/soc/intel/boards/kbl_rt5660.c
+++ b/sound/soc/intel/boards/kbl_rt5660.c
@@ -157,7 +157,7 @@ static int kabylake_rt5660_codec_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
ret = devm_acpi_dev_add_driver_gpios(component->dev, acpi_rt5660_gpios);
@@ -210,7 +210,7 @@ static int kabylake_rt5660_codec_init(struct snd_soc_pcm_runtime *rtd)
static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct kbl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -244,7 +244,7 @@ static int kabylake_rt5660_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai,
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index d8f2ff7139a9..658a9da3a40f 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -242,7 +242,7 @@ static int kabylake_rt5663_fe_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
dapm = snd_soc_component_get_dapm(component);
ret = snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
@@ -258,7 +258,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
struct kbl_rt5663_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_jack *jack;
/*
@@ -305,7 +305,7 @@ static int kabylake_rt5663_max98927_codec_init(struct snd_soc_pcm_runtime *rtd)
static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
{
struct kbl_rt5663_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct kbl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -431,7 +431,7 @@ static int kabylake_rt5663_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
/* use ASRC for internal clocks, as PLL rate isn't multiple of BCLK */
@@ -472,7 +472,7 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int ret = 0, j;
- for_each_rtd_codec_dai(rtd, j, codec_dai) {
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
/*
* Use channel 4 and 5 for the first amp
@@ -962,7 +962,7 @@ static int kabylake_audio_probe(struct platform_device *pdev)
kabylake_audio_card->dev = &pdev->dev;
snd_soc_card_set_drvdata(kabylake_audio_card, ctx);
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
if (mach)
dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 96c814f36458..1b1f8d7a4ea3 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -206,7 +206,7 @@ static struct snd_soc_codec_conf max98927_codec_conf[] = {
static int kabylake_rt5663_fe_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
int ret;
dapm = snd_soc_component_get_dapm(component);
@@ -221,7 +221,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_jack *jack;
/*
@@ -255,7 +255,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct kbl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -372,7 +372,7 @@ static int kabylake_rt5663_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
/* use ASRC for internal clocks, as PLL rate isn't multiple of BCLK */
@@ -399,7 +399,7 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int ret = 0, j;
- for_each_rtd_codec_dai(rtd, j, codec_dai) {
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name, RT5514_DEV_NAME)) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0, 8, 16);
if (ret < 0) {
@@ -772,7 +772,7 @@ static int kabylake_audio_probe(struct platform_device *pdev)
kabylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&kabylake_audio_card, ctx);
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
if (mach)
dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.h b/sound/soc/intel/boards/skl_hda_dsp_common.h
index d6150670ca05..e8545d13062f 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_common.h
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.h
@@ -49,6 +49,10 @@ static inline int skl_hda_hdmi_build_controls(struct snd_soc_card *card)
struct snd_soc_component *component;
struct skl_hda_hdmi_pcm *pcm;
+ /* HDMI disabled, do not create controls */
+ if (list_empty(&ctx->hdmi_pcm_list))
+ return 0;
+
pcm = list_first_entry(&ctx->hdmi_pcm_list, struct skl_hda_hdmi_pcm,
head);
component = pcm->codec_dai->component;
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index 11eaee9ae41f..3be764299ab0 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -61,6 +61,9 @@ static const struct snd_soc_dapm_route skl_hda_map[] = {
{ "Alt Analog CPU Capture", NULL, "Alt Analog Codec Capture" },
};
+SND_SOC_DAILINK_DEF(dummy_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("snd-soc-dummy", "snd-soc-dummy-dai")));
+
static int skl_hda_card_late_probe(struct snd_soc_card *card)
{
return skl_hda_hdmi_jack_init(card);
@@ -114,13 +117,19 @@ static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
{
struct snd_soc_card *card = &hda_soc_card;
struct snd_soc_dai_link *dai_link;
- u32 codec_count, codec_mask;
+ u32 codec_count, codec_mask, idisp_mask;
int i, num_links, num_route;
codec_mask = mach_params->codec_mask;
codec_count = hweight_long(codec_mask);
+ idisp_mask = codec_mask & IDISP_CODEC_MASK;
+
+ if (!codec_count || codec_count > 2 ||
+ (codec_count == 2 && !idisp_mask))
+ return -EINVAL;
- if (codec_count == 1 && codec_mask & IDISP_CODEC_MASK) {
+ if (codec_mask == idisp_mask) {
+ /* topology with iDisp as the only HDA codec */
num_links = IDISP_DAI_COUNT + DMIC_DAI_COUNT;
num_route = IDISP_ROUTE_COUNT;
@@ -135,13 +144,19 @@ static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
skl_hda_be_dai_links[IDISP_DAI_COUNT +
HDAC_DAI_COUNT + i];
}
- } else if (codec_count == 2 && codec_mask & IDISP_CODEC_MASK) {
+ } else {
+ /* topology with external and iDisp HDA codecs */
num_links = ARRAY_SIZE(skl_hda_be_dai_links);
num_route = ARRAY_SIZE(skl_hda_map);
card->dapm_widgets = skl_hda_widgets;
card->num_dapm_widgets = ARRAY_SIZE(skl_hda_widgets);
- } else {
- return -EINVAL;
+ if (!idisp_mask) {
+ for (i = 0; i < IDISP_DAI_COUNT; i++) {
+ skl_hda_be_dai_links[i].codecs = dummy_codec;
+ skl_hda_be_dai_links[i].num_codecs =
+ ARRAY_SIZE(dummy_codec);
+ }
+ }
}
card->num_links = num_links;
@@ -167,7 +182,7 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
if (!mach)
return -EINVAL;
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index e6de3b28d840..d7b8154c43a4 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -157,7 +157,7 @@ static int skylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
static int skylake_nau8825_codec_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
/*
* Headset buttons map to the google Reference headset.
@@ -182,7 +182,7 @@ static int skylake_nau8825_codec_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
{
struct skl_nau8825_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct skl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -200,7 +200,7 @@ static int skylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
{
struct skl_nau8825_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct skl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -218,7 +218,7 @@ static int skylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
{
struct skl_nau8825_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct skl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -236,7 +236,7 @@ static int skylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_nau8825_fe_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
dapm = snd_soc_component_get_dapm(component);
snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
@@ -296,7 +296,7 @@ static int skylake_nau8825_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai,
@@ -660,7 +660,7 @@ static int skylake_audio_probe(struct platform_device *pdev)
skylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
if (mach)
dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index c99c8b23e509..4b317bcf6ea0 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -161,12 +161,12 @@ static int skylake_ssm4567_codec_init(struct snd_soc_pcm_runtime *rtd)
int ret;
/* Slot 1 for left */
- ret = snd_soc_dai_set_tdm_slot(rtd->codec_dais[0], 0x01, 0x01, 2, 48);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(rtd, 0), 0x01, 0x01, 2, 48);
if (ret < 0)
return ret;
/* Slot 2 for right */
- ret = snd_soc_dai_set_tdm_slot(rtd->codec_dais[1], 0x02, 0x02, 2, 48);
+ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(rtd, 1), 0x02, 0x02, 2, 48);
if (ret < 0)
return ret;
@@ -176,7 +176,7 @@ static int skylake_ssm4567_codec_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_nau8825_codec_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
/*
* 4 buttons here map to the google Reference headset
@@ -201,7 +201,7 @@ static int skylake_nau8825_codec_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
{
struct skl_nau88125_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct skl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -219,7 +219,7 @@ static int skylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
{
struct skl_nau88125_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct skl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -238,7 +238,7 @@ static int skylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
{
struct skl_nau88125_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct skl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -256,7 +256,7 @@ static int skylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_nau8825_fe_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
dapm = snd_soc_component_get_dapm(component);
snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
@@ -348,7 +348,7 @@ static int skylake_nau8825_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai,
@@ -686,6 +686,7 @@ static struct snd_soc_card skylake_audio_card = {
.codec_conf = ssm4567_codec_conf,
.num_configs = ARRAY_SIZE(ssm4567_codec_conf),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = skylake_card_late_probe,
};
@@ -703,7 +704,7 @@ static int skylake_audio_probe(struct platform_device *pdev)
skylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
if (mach)
dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index a9aec66a2351..903ae1b28ec9 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -112,7 +112,7 @@ static const struct snd_soc_dapm_route skylake_rt286_map[] = {
static int skylake_rt286_fe_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm;
- struct snd_soc_component *component = rtd->cpu_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_cpu(rtd, 0)->component;
dapm = snd_soc_component_get_dapm(component);
snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
@@ -122,7 +122,7 @@ static int skylake_rt286_fe_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_rt286_codec_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
int ret;
ret = snd_soc_card_jack_new(rtd->card, "Headset",
@@ -143,7 +143,7 @@ static int skylake_rt286_codec_init(struct snd_soc_pcm_runtime *rtd)
static int skylake_hdmi_init(struct snd_soc_pcm_runtime *rtd)
{
struct skl_rt286_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct skl_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -229,7 +229,7 @@ static int skylake_rt286_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, RT286_SCLK_S_PLL, 24000000,
diff --git a/sound/soc/intel/boards/sof_da7219_max98373.c b/sound/soc/intel/boards/sof_da7219_max98373.c
index 8f44f13d2848..b707dd3b5625 100644
--- a/sound/soc/intel/boards/sof_da7219_max98373.c
+++ b/sound/soc/intel/boards/sof_da7219_max98373.c
@@ -2,7 +2,7 @@
// Copyright(c) 2019 Intel Corporation.
/*
- * Intel SOF Machine driver for DA7219 + MAX98373 codec
+ * Intel SOF Machine driver for DA7219 + MAX98373/MAX98360A codec
*/
#include <linux/input.h>
@@ -69,11 +69,20 @@ static const struct snd_kcontrol_new controls[] = {
SOC_DAPM_PIN_SWITCH("Right Spk"),
};
+static const struct snd_kcontrol_new m98360a_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Spk"),
+};
+
+/* For MAX98373 amp */
static const struct snd_soc_dapm_widget widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
+
SND_SOC_DAPM_SPK("Left Spk", NULL),
SND_SOC_DAPM_SPK("Right Spk", NULL),
+
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_POST_PMD |
SND_SOC_DAPM_PRE_PMU),
@@ -83,21 +92,45 @@ static const struct snd_soc_dapm_route audio_map[] = {
{ "Headphone Jack", NULL, "HPL" },
{ "Headphone Jack", NULL, "HPR" },
+ { "MIC", NULL, "Headset Mic" },
+
+ { "Headphone Jack", NULL, "Platform Clock" },
+ { "Headset Mic", NULL, "Platform Clock" },
+
{ "Left Spk", NULL, "Left BE_OUT" },
{ "Right Spk", NULL, "Right BE_OUT" },
+};
+
+/* For MAX98360A amp */
+static const struct snd_soc_dapm_widget max98360a_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+
+ SND_SOC_DAPM_SPK("Spk", NULL),
+
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU),
+};
+
+static const struct snd_soc_dapm_route max98360a_map[] = {
+ { "Headphone Jack", NULL, "HPL" },
+ { "Headphone Jack", NULL, "HPR" },
{ "MIC", NULL, "Headset Mic" },
{ "Headphone Jack", NULL, "Platform Clock" },
{ "Headset Mic", NULL, "Platform Clock" },
+
+ {"Spk", NULL, "Speaker"},
};
static struct snd_soc_jack headset;
static int da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
struct snd_soc_jack *jack;
int ret;
@@ -140,7 +173,7 @@ static int ssp1_hw_params(struct snd_pcm_substream *substream,
int ret, j;
for (j = 0; j < runtime->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = runtime->codec_dais[j];
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, j);
if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
/* vmon_slot_no = 0 imon_slot_no = 1 for TX slots */
@@ -181,7 +214,7 @@ static struct snd_soc_codec_conf max98373_codec_conf[] = {
static int hdmi_init(struct snd_soc_pcm_runtime *rtd)
{
struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -224,6 +257,9 @@ SND_SOC_DAILINK_DEF(ssp1_amps,
/* Left */ COMP_CODEC(MAXIM_DEV0_NAME, MAX98373_CODEC_DAI),
/* Right */ COMP_CODEC(MAXIM_DEV1_NAME, MAX98373_CODEC_DAI)));
+SND_SOC_DAILINK_DEF(ssp1_m98360a,
+ DAILINK_COMP_ARRAY(COMP_CODEC("MX98360A:00", "HiFi")));
+
SND_SOC_DAILINK_DEF(dmic_pin,
DAILINK_COMP_ARRAY(COMP_CPU("DMIC01 Pin")));
SND_SOC_DAILINK_DEF(dmic_codec,
@@ -320,6 +356,21 @@ static struct snd_soc_card card_da7219_m98373 = {
.late_probe = card_late_probe,
};
+static struct snd_soc_card card_da7219_m98360a = {
+ .name = "da7219max98360a",
+ .owner = THIS_MODULE,
+ .dai_link = dais,
+ .num_links = ARRAY_SIZE(dais),
+ .controls = m98360a_controls,
+ .num_controls = ARRAY_SIZE(m98360a_controls),
+ .dapm_widgets = max98360a_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98360a_widgets),
+ .dapm_routes = max98360a_map,
+ .num_dapm_routes = ARRAY_SIZE(max98360a_map),
+ .fully_routed = true,
+ .late_probe = card_late_probe,
+};
+
static int audio_probe(struct platform_device *pdev)
{
static struct snd_soc_card *card;
@@ -327,15 +378,26 @@ static int audio_probe(struct platform_device *pdev)
struct card_private *ctx;
int ret;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ /* By default dais[0] is configured for max98373 */
+ if (!strcmp(pdev->name, "sof_da7219_max98360a")) {
+ dais[0] = (struct snd_soc_dai_link) {
+ .name = "SSP1-Codec",
+ .id = 0,
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .ignore_pmdown_time = 1,
+ SND_SOC_DAILINK_REG(ssp1_pin, ssp1_m98360a, platform) };
+ }
+
INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
card = (struct snd_soc_card *)pdev->id_entry->driver_data;
card->dev = &pdev->dev;
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
ret = snd_soc_fixup_dai_links_platform_name(card,
mach->mach_params.platform);
if (ret)
@@ -351,13 +413,17 @@ static const struct platform_device_id board_ids[] = {
.name = "sof_da7219_max98373",
.driver_data = (kernel_ulong_t)&card_da7219_m98373,
},
+ {
+ .name = "sof_da7219_max98360a",
+ .driver_data = (kernel_ulong_t)&card_da7219_m98360a,
+ },
{ }
};
static struct platform_driver audio = {
.probe = audio_probe,
.driver = {
- .name = "sof_da7219_max98373",
+ .name = "sof_da7219_max98_360a_373",
.pm = &snd_soc_pm_ops,
},
.id_table = board_ids,
@@ -368,4 +434,5 @@ module_platform_driver(audio)
MODULE_DESCRIPTION("ASoC Intel(R) SOF Machine driver");
MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sof_da7219_max98360a");
MODULE_ALIAS("platform:sof_da7219_max98373");
diff --git a/sound/soc/intel/boards/sof_maxim_common.c b/sound/soc/intel/boards/sof_maxim_common.c
new file mode 100644
index 000000000000..463b39a7ccfd
--- /dev/null
+++ b/sound/soc/intel/boards/sof_maxim_common.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright(c) 2020 Intel Corporation. All rights reserved.
+#include <linux/string.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/soc-dapm.h>
+#include <uapi/sound/asound.h>
+#include "sof_maxim_common.h"
+
+static const struct snd_soc_dapm_route max_98373_dapm_routes[] = {
+ /* speaker */
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+};
+
+static struct snd_soc_codec_conf max_98373_codec_conf[] = {
+ {
+ .dlc = COMP_CODEC_CONF(MAX_98373_DEV0_NAME),
+ .name_prefix = "Right",
+ },
+ {
+ .dlc = COMP_CODEC_CONF(MAX_98373_DEV1_NAME),
+ .name_prefix = "Left",
+ },
+};
+
+struct snd_soc_dai_link_component max_98373_components[] = {
+ { /* For Left */
+ .name = MAX_98373_DEV0_NAME,
+ .dai_name = MAX_98373_CODEC_DAI,
+ },
+ { /* For Right */
+ .name = MAX_98373_DEV1_NAME,
+ .dai_name = MAX_98373_CODEC_DAI,
+ },
+};
+
+static int max98373_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
+ int j;
+
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
+ if (!strcmp(codec_dai->component->name, MAX_98373_DEV0_NAME)) {
+ /* DEV0 tdm slot configuration */
+ snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
+ }
+ if (!strcmp(codec_dai->component->name, MAX_98373_DEV1_NAME)) {
+ /* DEV1 tdm slot configuration */
+ snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16);
+ }
+ }
+ return 0;
+}
+
+struct snd_soc_ops max_98373_ops = {
+ .hw_params = max98373_hw_params,
+};
+
+int max98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, max_98373_dapm_routes,
+ ARRAY_SIZE(max_98373_dapm_routes));
+ if (ret)
+ dev_err(rtd->dev, "Speaker map addition failed: %d\n", ret);
+ return ret;
+}
+
+void sof_max98373_codec_conf(struct snd_soc_card *card)
+{
+ card->codec_conf = max_98373_codec_conf;
+ card->num_configs = ARRAY_SIZE(max_98373_codec_conf);
+}
diff --git a/sound/soc/intel/boards/sof_maxim_common.h b/sound/soc/intel/boards/sof_maxim_common.h
new file mode 100644
index 000000000000..406bf0e81155
--- /dev/null
+++ b/sound/soc/intel/boards/sof_maxim_common.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+/*
+ * This file defines data structures used in Machine Driver for Intel
+ * platforms with Maxim Codecs.
+ */
+#ifndef __SOF_MAXIM_COMMON_H
+#define __SOF_MAXIM_COMMON_H
+
+#include <sound/soc.h>
+
+#define MAX_98373_CODEC_DAI "max98373-aif1"
+#define MAX_98373_DEV0_NAME "i2c-MX98373:00"
+#define MAX_98373_DEV1_NAME "i2c-MX98373:01"
+
+extern struct snd_soc_dai_link_component max_98373_components[2];
+extern struct snd_soc_ops max_98373_ops;
+
+int max98373_spk_codec_init(struct snd_soc_pcm_runtime *rtd);
+void sof_max98373_codec_conf(struct snd_soc_card *card);
+#endif /* __SOF_MAXIM_COMMON_H */
diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
new file mode 100644
index 000000000000..fb7811899999
--- /dev/null
+++ b/sound/soc/intel/boards/sof_pcm512x.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018-2020 Intel Corporation.
+
+/*
+ * Intel SOF Machine Driver for Intel platforms with TI PCM512x codec,
+ * e.g. Up or Up2 with Hifiberry DAC+ HAT
+ */
+#include <linux/clk.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../codecs/pcm512x.h"
+#include "../common/soc-intel-quirks.h"
+#include "hda_dsp_common.h"
+
+#define NAME_SIZE 32
+
+#define SOF_PCM512X_SSP_CODEC(quirk) ((quirk) & GENMASK(3, 0))
+#define SOF_PCM512X_SSP_CODEC_MASK (GENMASK(3, 0))
+
+#define IDISP_CODEC_MASK 0x4
+
+/* Default: SSP5 */
+static unsigned long sof_pcm512x_quirk = SOF_PCM512X_SSP_CODEC(5);
+
+static bool is_legacy_cpu;
+
+struct sof_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct sof_card_private {
+ struct list_head hdmi_pcm_list;
+ bool idisp_codec;
+};
+
+static int sof_pcm512x_quirk_cb(const struct dmi_system_id *id)
+{
+ sof_pcm512x_quirk = (unsigned long)id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id sof_pcm512x_quirk_table[] = {
+ {
+ .callback = sof_pcm512x_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UP-CHT01"),
+ },
+ .driver_data = (void *)(SOF_PCM512X_SSP_CODEC(2)),
+ },
+ {}
+};
+
+static int sof_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
+ struct sof_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ /* dai_link id is 1:1 mapped to the PCM device */
+ pcm->device = rtd->dai_link->id;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int sof_pcm512x_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *codec = asoc_rtd_to_codec(rtd, 0)->component;
+
+ snd_soc_component_update_bits(codec, PCM512x_GPIO_EN, 0x08, 0x08);
+ snd_soc_component_update_bits(codec, PCM512x_GPIO_OUTPUT_4, 0x0f, 0x02);
+ snd_soc_component_update_bits(codec, PCM512x_GPIO_CONTROL_1,
+ 0x08, 0x08);
+
+ return 0;
+}
+
+static int aif1_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *codec = asoc_rtd_to_codec(rtd, 0)->component;
+
+ snd_soc_component_update_bits(codec, PCM512x_GPIO_CONTROL_1,
+ 0x08, 0x08);
+
+ return 0;
+}
+
+static void aif1_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *codec = asoc_rtd_to_codec(rtd, 0)->component;
+
+ snd_soc_component_update_bits(codec, PCM512x_GPIO_CONTROL_1,
+ 0x08, 0x00);
+}
+
+static const struct snd_soc_ops sof_pcm512x_ops = {
+ .startup = aif1_startup,
+ .shutdown = aif1_shutdown,
+};
+
+static struct snd_soc_dai_link_component platform_component[] = {
+ {
+ /* name might be overridden during probe */
+ .name = "0000:00:1f.3"
+ }
+};
+
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
+static int sof_card_late_probe(struct snd_soc_card *card)
+{
+ struct sof_card_private *ctx = snd_soc_card_get_drvdata(card);
+ struct sof_hdmi_pcm *pcm;
+
+ /* HDMI is not supported by SOF on Baytrail/CherryTrail */
+ if (is_legacy_cpu)
+ return 0;
+
+ if (list_empty(&ctx->hdmi_pcm_list))
+ return -EINVAL;
+
+ if (!ctx->idisp_codec)
+ return 0;
+
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm, head);
+
+ return hda_dsp_hdmi_build_controls(card, pcm->codec_dai->component);
+}
+#else
+static int sof_card_late_probe(struct snd_soc_card *card)
+{
+ return 0;
+}
+#endif
+
+static const struct snd_kcontrol_new sof_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Ext Spk"),
+};
+
+static const struct snd_soc_dapm_widget sof_widgets[] = {
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+};
+
+static const struct snd_soc_dapm_widget dmic_widgets[] = {
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+};
+
+static const struct snd_soc_dapm_route sof_map[] = {
+ /* Speaker */
+ {"Ext Spk", NULL, "OUTR"},
+ {"Ext Spk", NULL, "OUTL"},
+};
+
+static const struct snd_soc_dapm_route dmic_map[] = {
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
+};
+
+static int dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, dmic_widgets,
+ ARRAY_SIZE(dmic_widgets));
+ if (ret) {
+ dev_err(card->dev, "DMic widget addition failed: %d\n", ret);
+ /* Don't need to add routes if widget addition failed */
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, dmic_map,
+ ARRAY_SIZE(dmic_map));
+
+ if (ret)
+ dev_err(card->dev, "DMic map addition failed: %d\n", ret);
+
+ return ret;
+}
+
+/* sof audio machine driver for pcm512x codec */
+static struct snd_soc_card sof_audio_card_pcm512x = {
+ .name = "pcm512x",
+ .owner = THIS_MODULE,
+ .controls = sof_controls,
+ .num_controls = ARRAY_SIZE(sof_controls),
+ .dapm_widgets = sof_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sof_widgets),
+ .dapm_routes = sof_map,
+ .num_dapm_routes = ARRAY_SIZE(sof_map),
+ .fully_routed = true,
+ .late_probe = sof_card_late_probe,
+};
+
+SND_SOC_DAILINK_DEF(pcm512x_component,
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-104C5122:00", "pcm512x-hifi")));
+SND_SOC_DAILINK_DEF(dmic_component,
+ DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi")));
+
+static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+ int ssp_codec,
+ int dmic_be_num,
+ int hdmi_num,
+ bool idisp_codec)
+{
+ struct snd_soc_dai_link_component *idisp_components;
+ struct snd_soc_dai_link_component *cpus;
+ struct snd_soc_dai_link *links;
+ int i, id = 0;
+
+ links = devm_kcalloc(dev, sof_audio_card_pcm512x.num_links,
+ sizeof(struct snd_soc_dai_link), GFP_KERNEL);
+ cpus = devm_kcalloc(dev, sof_audio_card_pcm512x.num_links,
+ sizeof(struct snd_soc_dai_link_component), GFP_KERNEL);
+ if (!links || !cpus)
+ goto devm_err;
+
+ /* codec SSP */
+ links[id].name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d-Codec", ssp_codec);
+ if (!links[id].name)
+ goto devm_err;
+
+ links[id].id = id;
+ links[id].codecs = pcm512x_component;
+ links[id].num_codecs = ARRAY_SIZE(pcm512x_component);
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].init = sof_pcm512x_codec_init;
+ links[id].ops = &sof_pcm512x_ops;
+ links[id].nonatomic = true;
+ links[id].dpcm_playback = 1;
+ /*
+ * capture only supported with specific versions of the Hifiberry DAC+
+ * links[id].dpcm_capture = 1;
+ */
+ links[id].no_pcm = 1;
+ links[id].cpus = &cpus[id];
+ links[id].num_cpus = 1;
+ if (is_legacy_cpu) {
+ links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "ssp%d-port",
+ ssp_codec);
+ if (!links[id].cpus->dai_name)
+ goto devm_err;
+ } else {
+ links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d Pin",
+ ssp_codec);
+ if (!links[id].cpus->dai_name)
+ goto devm_err;
+ }
+ id++;
+
+ /* dmic */
+ if (dmic_be_num > 0) {
+ /* at least we have dmic01 */
+ links[id].name = "dmic01";
+ links[id].cpus = &cpus[id];
+ links[id].cpus->dai_name = "DMIC01 Pin";
+ links[id].init = dmic_init;
+ if (dmic_be_num > 1) {
+ /* set up 2 BE links at most */
+ links[id + 1].name = "dmic16k";
+ links[id + 1].cpus = &cpus[id + 1];
+ links[id + 1].cpus->dai_name = "DMIC16k Pin";
+ dmic_be_num = 2;
+ }
+ }
+
+ for (i = 0; i < dmic_be_num; i++) {
+ links[id].id = id;
+ links[id].num_cpus = 1;
+ links[id].codecs = dmic_component;
+ links[id].num_codecs = ARRAY_SIZE(dmic_component);
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].ignore_suspend = 1;
+ links[id].dpcm_capture = 1;
+ links[id].no_pcm = 1;
+ id++;
+ }
+
+ /* HDMI */
+ if (hdmi_num > 0) {
+ idisp_components = devm_kcalloc(dev, hdmi_num,
+ sizeof(struct snd_soc_dai_link_component),
+ GFP_KERNEL);
+ if (!idisp_components)
+ goto devm_err;
+ }
+ for (i = 1; i <= hdmi_num; i++) {
+ links[id].name = devm_kasprintf(dev, GFP_KERNEL,
+ "iDisp%d", i);
+ if (!links[id].name)
+ goto devm_err;
+
+ links[id].id = id;
+ links[id].cpus = &cpus[id];
+ links[id].num_cpus = 1;
+ links[id].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "iDisp%d Pin", i);
+ if (!links[id].cpus->dai_name)
+ goto devm_err;
+
+ /*
+ * topology cannot be loaded if codec is missing, so
+ * use the dummy codec if needed
+ */
+ if (idisp_codec) {
+ idisp_components[i - 1].name = "ehdaudio0D2";
+ idisp_components[i - 1].dai_name =
+ devm_kasprintf(dev, GFP_KERNEL,
+ "intel-hdmi-hifi%d", i);
+ } else {
+ idisp_components[i - 1].name = "snd-soc-dummy";
+ idisp_components[i - 1].dai_name = "snd-soc-dummy-dai";
+ }
+ if (!idisp_components[i - 1].dai_name)
+ goto devm_err;
+
+ links[id].codecs = &idisp_components[i - 1];
+ links[id].num_codecs = 1;
+ links[id].platforms = platform_component;
+ links[id].num_platforms = ARRAY_SIZE(platform_component);
+ links[id].init = sof_hdmi_init;
+ links[id].dpcm_playback = 1;
+ links[id].no_pcm = 1;
+ id++;
+ }
+
+ return links;
+devm_err:
+ return NULL;
+}
+
+static int sof_audio_probe(struct platform_device *pdev)
+{
+ struct snd_soc_acpi_mach *mach = pdev->dev.platform_data;
+ struct snd_soc_dai_link *dai_links;
+ struct sof_card_private *ctx;
+ int dmic_be_num, hdmi_num;
+ int ret, ssp_codec;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ hdmi_num = 0;
+ if (soc_intel_is_byt() || soc_intel_is_cht()) {
+ is_legacy_cpu = true;
+ dmic_be_num = 0;
+ /* default quirk for legacy cpu */
+ sof_pcm512x_quirk = SOF_PCM512X_SSP_CODEC(2);
+ } else {
+ dmic_be_num = 2;
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
+ if (mach->mach_params.common_hdmi_codec_drv &&
+ (mach->mach_params.codec_mask & IDISP_CODEC_MASK))
+ ctx->idisp_codec = true;
+
+ /* links are always present in topology */
+ hdmi_num = 3;
+#endif
+ }
+
+ dmi_check_system(sof_pcm512x_quirk_table);
+
+ dev_dbg(&pdev->dev, "sof_pcm512x_quirk = %lx\n", sof_pcm512x_quirk);
+
+ ssp_codec = sof_pcm512x_quirk & SOF_PCM512X_SSP_CODEC_MASK;
+
+ /* compute number of dai links */
+ sof_audio_card_pcm512x.num_links = 1 + dmic_be_num + hdmi_num;
+
+ dai_links = sof_card_dai_links_create(&pdev->dev, ssp_codec,
+ dmic_be_num, hdmi_num,
+ ctx->idisp_codec);
+ if (!dai_links)
+ return -ENOMEM;
+
+ sof_audio_card_pcm512x.dai_link = dai_links;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ sof_audio_card_pcm512x.dev = &pdev->dev;
+
+ /* set platform name for each dailink */
+ ret = snd_soc_fixup_dai_links_platform_name(&sof_audio_card_pcm512x,
+ mach->mach_params.platform);
+ if (ret)
+ return ret;
+
+ snd_soc_card_set_drvdata(&sof_audio_card_pcm512x, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev,
+ &sof_audio_card_pcm512x);
+}
+
+static int sof_pcm512x_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct snd_soc_component *component = NULL;
+
+ for_each_card_components(card, component) {
+ if (!strcmp(component->name, pcm512x_component[0].name)) {
+ snd_soc_component_set_jack(component, NULL, NULL);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver sof_audio = {
+ .probe = sof_audio_probe,
+ .remove = sof_pcm512x_remove,
+ .driver = {
+ .name = "sof_pcm512x",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+module_platform_driver(sof_audio)
+
+MODULE_DESCRIPTION("ASoC Intel(R) SOF + PCM512x Machine driver");
+MODULE_AUTHOR("Pierre-Louis Bossart");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sof_pcm512x");
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 5d878873a8e0..8c29431b5847 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright(c) 2019 Intel Corporation.
+// Copyright(c) 2019-2020 Intel Corporation.
/*
* Intel SOF Machine Driver with Realtek rt5682 Codec
- * and speaker codec MAX98357A
+ * and speaker codec MAX98357A or RT1015.
*/
#include <linux/i2c.h>
#include <linux/input.h>
@@ -18,10 +18,12 @@
#include <sound/soc.h>
#include <sound/rt5682.h>
#include <sound/soc-acpi.h>
+#include "../../codecs/rt1015.h"
#include "../../codecs/rt5682.h"
#include "../../codecs/hdac_hdmi.h"
#include "../common/soc-intel-quirks.h"
#include "hda_dsp_common.h"
+#include "sof_maxim_common.h"
#define NAME_SIZE 32
@@ -39,6 +41,8 @@
#define SOF_RT5682_NUM_HDMIDEV_MASK (GENMASK(12, 10))
#define SOF_RT5682_NUM_HDMIDEV(quirk) \
((quirk << SOF_RT5682_NUM_HDMIDEV_SHIFT) & SOF_RT5682_NUM_HDMIDEV_MASK)
+#define SOF_RT1015_SPEAKER_AMP_PRESENT BIT(13)
+#define SOF_MAX98373_SPEAKER_AMP_PRESENT BIT(14)
/* Default: MCLK on, MCLK 19.2M, SSP0 */
static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN |
@@ -120,7 +124,7 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
static int sof_hdmi_init(struct snd_soc_pcm_runtime *rtd)
{
struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct sof_hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -139,7 +143,7 @@ static int sof_hdmi_init(struct snd_soc_pcm_runtime *rtd)
static int sof_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_jack *jack;
int ret;
@@ -207,7 +211,7 @@ static int sof_rt5682_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int clk_id, clk_freq, pll_out, ret;
if (sof_rt5682_quirk & SOF_RT5682_MCLK_EN) {
@@ -260,6 +264,42 @@ static struct snd_soc_ops sof_rt5682_ops = {
.hw_params = sof_rt5682_hw_params,
};
+static int sof_rt1015_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *codec_dai;
+ int i, ret;
+
+ if (!snd_soc_card_get_codec_dai(card, "rt1015-aif"))
+ return 0;
+
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT1015_PLL_S_BCLK,
+ params_rate(params) * 50,
+ params_rate(params) * 256);
+ if (ret < 0) {
+ dev_err(card->dev, "failed to set pll\n");
+ return ret;
+ }
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT1015_SCLK_S_PLL,
+ params_rate(params) * 256,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "failed to set sysclk\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops sof_rt1015_ops = {
+ .hw_params = sof_rt1015_hw_params,
+};
+
static struct snd_soc_dai_link_component platform_component[] = {
{
/* name might be overridden during probe */
@@ -316,12 +356,17 @@ static const struct snd_kcontrol_new sof_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone Jack"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Spk"),
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+
};
static const struct snd_soc_dapm_widget sof_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_SPK("Spk", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
};
static const struct snd_soc_dapm_widget dmic_widgets[] = {
@@ -342,11 +387,22 @@ static const struct snd_soc_dapm_route speaker_map[] = {
{ "Spk", NULL, "Speaker" },
};
+static const struct snd_soc_dapm_route speaker_map_lr[] = {
+ { "Left Spk", NULL, "Left SPO" },
+ { "Right Spk", NULL, "Right SPO" },
+};
+
static const struct snd_soc_dapm_route dmic_map[] = {
/* digital mics */
{"DMic", NULL, "SoC DMIC"},
};
+static int speaker_codec_init_lr(struct snd_soc_pcm_runtime *rtd)
+{
+ return snd_soc_dapm_add_routes(&rtd->card->dapm, speaker_map_lr,
+ ARRAY_SIZE(speaker_map_lr));
+}
+
static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
@@ -382,6 +438,17 @@ static int dmic_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
+static struct snd_soc_codec_conf rt1015_amp_conf[] = {
+ {
+ .dlc = COMP_CODEC_CONF("i2c-10EC1015:00"),
+ .name_prefix = "Left",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("i2c-10EC1015:01"),
+ .name_prefix = "Right",
+ },
+};
+
/* sof audio machine driver for rt5682 codec */
static struct snd_soc_card sof_audio_card_rt5682 = {
.name = "rt5682", /* the sof- prefix is added by the core */
@@ -417,6 +484,17 @@ static struct snd_soc_dai_link_component max98357a_component[] = {
}
};
+static struct snd_soc_dai_link_component rt1015_components[] = {
+ {
+ .name = "i2c-10EC1015:00",
+ .dai_name = "rt1015-aif",
+ },
+ {
+ .name = "i2c-10EC1015:01",
+ .dai_name = "rt1015-aif",
+ },
+};
+
static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
int ssp_codec,
int ssp_amp,
@@ -556,11 +634,24 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
goto devm_err;
links[id].id = id;
- links[id].codecs = max98357a_component;
- links[id].num_codecs = ARRAY_SIZE(max98357a_component);
+ if (sof_rt5682_quirk & SOF_RT1015_SPEAKER_AMP_PRESENT) {
+ links[id].codecs = rt1015_components;
+ links[id].num_codecs = ARRAY_SIZE(rt1015_components);
+ links[id].init = speaker_codec_init_lr;
+ links[id].ops = &sof_rt1015_ops;
+ } else if (sof_rt5682_quirk &
+ SOF_MAX98373_SPEAKER_AMP_PRESENT) {
+ links[id].codecs = max_98373_components;
+ links[id].num_codecs = ARRAY_SIZE(max_98373_components);
+ links[id].init = max98373_spk_codec_init;
+ links[id].ops = &max_98373_ops;
+ } else {
+ links[id].codecs = max98357a_component;
+ links[id].num_codecs = ARRAY_SIZE(max98357a_component);
+ links[id].init = speaker_codec_init;
+ }
links[id].platforms = platform_component;
links[id].num_platforms = ARRAY_SIZE(platform_component);
- links[id].init = speaker_codec_init,
links[id].nonatomic = true;
links[id].dpcm_playback = 1;
links[id].no_pcm = 1;
@@ -604,7 +695,7 @@ static int sof_audio_probe(struct platform_device *pdev)
dmi_check_system(sof_rt5682_quirk_table);
- mach = (&pdev->dev)->platform_data;
+ mach = pdev->dev.platform_data;
/* A speaker amp might not be present when the quirk claims one is.
* Detect this via whether the machine driver match includes quirk_data.
@@ -662,6 +753,9 @@ static int sof_audio_probe(struct platform_device *pdev)
if (sof_rt5682_quirk & SOF_SPEAKER_AMP_PRESENT)
sof_audio_card_rt5682.num_links++;
+ if (sof_rt5682_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT)
+ sof_max98373_codec_conf(&sof_audio_card_rt5682);
+
dai_links = sof_card_dai_links_create(&pdev->dev, ssp_codec, ssp_amp,
dmic_be_num, hdmi_num);
if (!dai_links)
@@ -669,6 +763,11 @@ static int sof_audio_probe(struct platform_device *pdev)
sof_audio_card_rt5682.dai_link = dai_links;
+ if (sof_rt5682_quirk & SOF_RT1015_SPEAKER_AMP_PRESENT) {
+ sof_audio_card_rt5682.codec_conf = rt1015_amp_conf;
+ sof_audio_card_rt5682.num_configs = ARRAY_SIZE(rt1015_amp_conf);
+ }
+
INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
sof_audio_card_rt5682.dev = &pdev->dev;
@@ -714,6 +813,24 @@ static const struct platform_device_id board_ids[] = {
SOF_RT5682_SSP_AMP(1) |
SOF_RT5682_NUM_HDMIDEV(4)),
},
+ {
+ .name = "jsl_rt5682_rt1015",
+ .driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
+ SOF_RT5682_MCLK_24MHZ |
+ SOF_RT5682_SSP_CODEC(0) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_RT1015_SPEAKER_AMP_PRESENT |
+ SOF_RT5682_SSP_AMP(1)),
+ },
+ {
+ .name = "tgl_max98373_rt5682",
+ .driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
+ SOF_RT5682_SSP_CODEC(0) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_MAX98373_SPEAKER_AMP_PRESENT |
+ SOF_RT5682_SSP_AMP(1) |
+ SOF_RT5682_NUM_HDMIDEV(4)),
+ },
{ }
};
@@ -735,3 +852,5 @@ MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sof_rt5682");
MODULE_ALIAS("platform:tgl_max98357a_rt5682");
+MODULE_ALIAS("platform:jsl_rt5682_rt1015");
+MODULE_ALIAS("platform:tgl_max98373_rt5682");
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
new file mode 100644
index 000000000000..a64dc563b47e
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * sof_sdw - ASOC Machine driver for Intel SoundWire platforms
+ */
+
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "sof_sdw_common.h"
+
+unsigned long sof_sdw_quirk = SOF_RT711_JD_SRC_JD1;
+
+#define INC_ID(BE, CPU, LINK) do { (BE)++; (CPU)++; (LINK)++; } while (0)
+
+static int sof_sdw_quirk_cb(const struct dmi_system_id *id)
+{
+ sof_sdw_quirk = (unsigned long)id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "09C6")
+ },
+ .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
+ SOF_RT715_DAI_ID_FIX),
+ },
+ {
+ /* early version of SKU 09C6 */
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0983")
+ },
+ .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
+ SOF_RT715_DAI_ID_FIX),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "098F"),
+ },
+ .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
+ SOF_RT715_DAI_ID_FIX |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0990"),
+ },
+ .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
+ SOF_RT715_DAI_ID_FIX |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "Tiger Lake Client Platform"),
+ },
+ .driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
+ SOF_SDW_TGL_HDMI | SOF_SDW_PCH_DMIC |
+ SOF_SSP_PORT(SOF_I2S_SSP2)),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"),
+ },
+ .driver_data = (void *)SOF_SDW_PCH_DMIC,
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CometLake Client"),
+ },
+ .driver_data = (void *)SOF_SDW_PCH_DMIC,
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Volteer"),
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI | SOF_SDW_PCH_DMIC),
+ },
+
+ {}
+};
+
+static struct snd_soc_codec_conf codec_conf[] = {
+ {
+ .dlc = COMP_CODEC_CONF("sdw:0:25d:711:0"),
+ .name_prefix = "rt711",
+ },
+ /* rt1308 w/ I2S connection */
+ {
+ .dlc = COMP_CODEC_CONF("i2c-10EC1308:00"),
+ .name_prefix = "rt1308-1",
+ },
+ /* rt1308 left on link 1 */
+ {
+ .dlc = COMP_CODEC_CONF("sdw:1:25d:1308:0"),
+ .name_prefix = "rt1308-1",
+ },
+ /* two 1308s on link1 with different unique id */
+ {
+ .dlc = COMP_CODEC_CONF("sdw:1:25d:1308:0:0"),
+ .name_prefix = "rt1308-1",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("sdw:1:25d:1308:0:2"),
+ .name_prefix = "rt1308-2",
+ },
+ /* rt1308 right on link 2 */
+ {
+ .dlc = COMP_CODEC_CONF("sdw:2:25d:1308:0"),
+ .name_prefix = "rt1308-2",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("sdw:3:25d:715:0"),
+ .name_prefix = "rt715",
+ },
+ {
+ .dlc = COMP_CODEC_CONF("sdw:0:25d:5682:0"),
+ .name_prefix = "rt5682",
+ },
+};
+
+static struct snd_soc_dai_link_component dmic_component[] = {
+ {
+ .name = "dmic-codec",
+ .dai_name = "dmic-hifi",
+ }
+};
+
+static struct snd_soc_dai_link_component platform_component[] = {
+ {
+ /* name might be overridden during probe */
+ .name = "0000:00:1f.3"
+ }
+};
+
+/* these wrappers are only needed to avoid typecast compilation errors */
+static int sdw_startup(struct snd_pcm_substream *substream)
+{
+ return sdw_startup_stream(substream);
+}
+
+static void sdw_shutdown(struct snd_pcm_substream *substream)
+{
+ sdw_shutdown_stream(substream);
+}
+
+static const struct snd_soc_ops sdw_ops = {
+ .startup = sdw_startup,
+ .shutdown = sdw_shutdown,
+};
+
+static struct sof_sdw_codec_info codec_info_list[] = {
+ {
+ .id = 0x700,
+ .direction = {true, true},
+ .dai_name = "rt700-aif1",
+ .init = sof_sdw_rt700_init,
+ },
+ {
+ .id = 0x711,
+ .direction = {true, true},
+ .dai_name = "rt711-aif1",
+ .init = sof_sdw_rt711_init,
+ },
+ {
+ .id = 0x1308,
+ .acpi_id = "10EC1308",
+ .direction = {true, false},
+ .dai_name = "rt1308-aif",
+ .ops = &sof_sdw_rt1308_i2s_ops,
+ .init = sof_sdw_rt1308_init,
+ },
+ {
+ .id = 0x715,
+ .direction = {false, true},
+ .dai_name = "rt715-aif2",
+ .init = sof_sdw_rt715_init,
+ },
+ {
+ .id = 0x5682,
+ .direction = {true, true},
+ .dai_name = "rt5682-sdw",
+ .init = sof_sdw_rt5682_init,
+ },
+};
+
+static inline int find_codec_info_part(unsigned int part_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
+ if (part_id == codec_info_list[i].id)
+ break;
+
+ if (i == ARRAY_SIZE(codec_info_list))
+ return -EINVAL;
+
+ return i;
+}
+
+static inline int find_codec_info_acpi(const u8 *acpi_id)
+{
+ int i;
+
+ if (!acpi_id[0])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
+ if (!memcmp(codec_info_list[i].acpi_id, acpi_id,
+ ACPI_ID_LEN))
+ break;
+
+ if (i == ARRAY_SIZE(codec_info_list))
+ return -EINVAL;
+
+ return i;
+}
+
+/*
+ * get BE dailink number and CPU DAI number based on sdw link adr.
+ * Since some sdw slaves may be aggregated, the CPU DAI number
+ * may be larger than the number of BE dailinks.
+ */
+static int get_sdw_dailink_info(const struct snd_soc_acpi_link_adr *links,
+ int *sdw_be_num, int *sdw_cpu_dai_num)
+{
+ const struct snd_soc_acpi_link_adr *link;
+ bool group_visited[SDW_MAX_GROUPS];
+ bool no_aggregation;
+ int i;
+
+ no_aggregation = sof_sdw_quirk & SOF_SDW_NO_AGGREGATION;
+ *sdw_cpu_dai_num = 0;
+ *sdw_be_num = 0;
+
+ if (!links)
+ return -EINVAL;
+
+ for (i = 0; i < SDW_MAX_GROUPS; i++)
+ group_visited[i] = false;
+
+ for (link = links; link->num_adr; link++) {
+ const struct snd_soc_acpi_endpoint *endpoint;
+ int part_id, codec_index;
+ int stream;
+ u64 adr;
+
+ adr = link->adr_d->adr;
+ part_id = SDW_PART_ID(adr);
+ codec_index = find_codec_info_part(part_id);
+ if (codec_index < 0)
+ return codec_index;
+
+ endpoint = link->adr_d->endpoints;
+
+ /* count DAI number for playback and capture */
+ for_each_pcm_streams(stream) {
+ if (!codec_info_list[codec_index].direction[stream])
+ continue;
+
+ (*sdw_cpu_dai_num)++;
+
+ /* count BE for each non-aggregated slave or group */
+ if (!endpoint->aggregated || no_aggregation ||
+ !group_visited[endpoint->group_id])
+ (*sdw_be_num)++;
+ }
+
+ if (endpoint->aggregated)
+ group_visited[endpoint->group_id] = true;
+ }
+
+ return 0;
+}
+
+static void init_dai_link(struct snd_soc_dai_link *dai_links, int be_id,
+ char *name, int playback, int capture,
+ struct snd_soc_dai_link_component *cpus,
+ int cpus_num,
+ struct snd_soc_dai_link_component *codecs,
+ int codecs_num,
+ int (*init)(struct snd_soc_pcm_runtime *rtd),
+ const struct snd_soc_ops *ops)
+{
+ dai_links->id = be_id;
+ dai_links->name = name;
+ dai_links->platforms = platform_component;
+ dai_links->num_platforms = ARRAY_SIZE(platform_component);
+ dai_links->nonatomic = true;
+ dai_links->no_pcm = 1;
+ dai_links->cpus = cpus;
+ dai_links->num_cpus = cpus_num;
+ dai_links->codecs = codecs;
+ dai_links->num_codecs = codecs_num;
+ dai_links->dpcm_playback = playback;
+ dai_links->dpcm_capture = capture;
+ dai_links->init = init;
+ dai_links->ops = ops;
+}
+
+static bool is_unique_device(const struct snd_soc_acpi_link_adr *link,
+ unsigned int sdw_version,
+ unsigned int mfg_id,
+ unsigned int part_id,
+ unsigned int class_id,
+ int index_in_link
+ )
+{
+ int i;
+
+ for (i = 0; i < link->num_adr; i++) {
+ unsigned int sdw1_version, mfg1_id, part1_id, class1_id;
+ u64 adr;
+
+ /* skip itself */
+ if (i == index_in_link)
+ continue;
+
+ adr = link->adr_d[i].adr;
+
+ sdw1_version = SDW_VERSION(adr);
+ mfg1_id = SDW_MFG_ID(adr);
+ part1_id = SDW_PART_ID(adr);
+ class1_id = SDW_CLASS_ID(adr);
+
+ if (sdw_version == sdw1_version &&
+ mfg_id == mfg1_id &&
+ part_id == part1_id &&
+ class_id == class1_id)
+ return false;
+ }
+
+ return true;
+}
+
+static int create_codec_dai_name(struct device *dev,
+ const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link_component *codec,
+ int offset)
+{
+ int i;
+
+ for (i = 0; i < link->num_adr; i++) {
+ unsigned int sdw_version, unique_id, mfg_id;
+ unsigned int link_id, part_id, class_id;
+ int codec_index, comp_index;
+ char *codec_str;
+ u64 adr;
+
+ adr = link->adr_d[i].adr;
+
+ sdw_version = SDW_VERSION(adr);
+ link_id = SDW_DISCO_LINK_ID(adr);
+ unique_id = SDW_UNIQUE_ID(adr);
+ mfg_id = SDW_MFG_ID(adr);
+ part_id = SDW_PART_ID(adr);
+ class_id = SDW_CLASS_ID(adr);
+
+ comp_index = i + offset;
+ if (is_unique_device(link, sdw_version, mfg_id, part_id,
+ class_id, i)) {
+ codec_str = "sdw:%x:%x:%x:%x";
+ codec[comp_index].name =
+ devm_kasprintf(dev, GFP_KERNEL, codec_str,
+ link_id, mfg_id, part_id,
+ class_id);
+ } else {
+ codec_str = "sdw:%x:%x:%x:%x:%x";
+ codec[comp_index].name =
+ devm_kasprintf(dev, GFP_KERNEL, codec_str,
+ link_id, mfg_id, part_id,
+ class_id, unique_id);
+ }
+
+ if (!codec[comp_index].name)
+ return -ENOMEM;
+
+ codec_index = find_codec_info_part(part_id);
+ if (codec_index < 0)
+ return codec_index;
+
+ codec[comp_index].dai_name =
+ codec_info_list[codec_index].dai_name;
+ }
+
+ return 0;
+}
+
+static int set_codec_init_func(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ bool playback)
+{
+ int i;
+
+ for (i = 0; i < link->num_adr; i++) {
+ unsigned int part_id;
+ int codec_index;
+
+ part_id = SDW_PART_ID(link->adr_d[i].adr);
+ codec_index = find_codec_info_part(part_id);
+
+ if (codec_index < 0)
+ return codec_index;
+
+ if (codec_info_list[codec_index].init)
+ codec_info_list[codec_index].init(link, dai_links,
+ &codec_info_list[codec_index],
+ playback);
+ }
+
+ return 0;
+}
+
+/*
+ * check endpoint status in slaves and gather link ID for all slaves in
+ * the same group to generate different CPU DAI. Now only support
+ * one sdw link with all slaves set with only single group id.
+ *
+ * one slave on one sdw link with aggregated = 0
+ * one sdw BE DAI <---> one-cpu DAI <---> one-codec DAI
+ *
+ * two or more slaves on one sdw link with aggregated = 0
+ * one sdw BE DAI <---> one-cpu DAI <---> multi-codec DAIs
+ *
+ * multiple links with multiple slaves with aggregated = 1
+ * one sdw BE DAI <---> 1 .. N CPU DAIs <----> 1 .. N codec DAIs
+ */
+static int get_slave_info(const struct snd_soc_acpi_link_adr *adr_link,
+ struct device *dev, int *cpu_dai_id, int *cpu_dai_num,
+ int *codec_num, int *group_id,
+ bool *group_generated)
+{
+ const struct snd_soc_acpi_adr_device *adr_d;
+ const struct snd_soc_acpi_link_adr *adr_next;
+ bool no_aggregation;
+ int index = 0;
+
+ no_aggregation = sof_sdw_quirk & SOF_SDW_NO_AGGREGATION;
+ *codec_num = adr_link->num_adr;
+ adr_d = adr_link->adr_d;
+
+ /* make sure the link mask has a single bit set */
+ if (!is_power_of_2(adr_link->mask))
+ return -EINVAL;
+
+ cpu_dai_id[index++] = ffs(adr_link->mask) - 1;
+ if (!adr_d->endpoints->aggregated || no_aggregation) {
+ *cpu_dai_num = 1;
+ *group_id = 0;
+ return 0;
+ }
+
+ *group_id = adr_d->endpoints->group_id;
+
+ /* gather other link ID of slaves in the same group */
+ for (adr_next = adr_link + 1; adr_next && adr_next->num_adr;
+ adr_next++) {
+ const struct snd_soc_acpi_endpoint *endpoint;
+
+ endpoint = adr_next->adr_d->endpoints;
+ if (!endpoint->aggregated ||
+ endpoint->group_id != *group_id)
+ continue;
+
+ /* make sure the link mask has a single bit set */
+ if (!is_power_of_2(adr_next->mask))
+ return -EINVAL;
+
+ if (index >= SDW_MAX_CPU_DAIS) {
+ dev_err(dev, " cpu_dai_id array overflows");
+ return -EINVAL;
+ }
+
+ cpu_dai_id[index++] = ffs(adr_next->mask) - 1;
+ *codec_num += adr_next->num_adr;
+ }
+
+ /*
+ * indicate CPU DAIs for this group have been generated
+ * to avoid generating CPU DAIs for this group again.
+ */
+ group_generated[*group_id] = true;
+ *cpu_dai_num = index;
+
+ return 0;
+}
+
+static int create_sdw_dailink(struct device *dev, int *be_index,
+ struct snd_soc_dai_link *dai_links,
+ int sdw_be_num, int sdw_cpu_dai_num,
+ struct snd_soc_dai_link_component *cpus,
+ const struct snd_soc_acpi_link_adr *link,
+ int *cpu_id, bool *group_generated)
+{
+ const struct snd_soc_acpi_link_adr *link_next;
+ struct snd_soc_dai_link_component *codecs;
+ int cpu_dai_id[SDW_MAX_CPU_DAIS];
+ int cpu_dai_num, cpu_dai_index;
+ unsigned int part_id, group_id;
+ int codec_idx = 0;
+ int i = 0, j = 0;
+ int codec_index;
+ int codec_num;
+ int stream;
+ int ret;
+ int k;
+
+ ret = get_slave_info(link, dev, cpu_dai_id, &cpu_dai_num, &codec_num,
+ &group_id, group_generated);
+ if (ret)
+ return ret;
+
+ codecs = devm_kcalloc(dev, codec_num, sizeof(*codecs), GFP_KERNEL);
+ if (!codecs)
+ return -ENOMEM;
+
+ /* generate codec name on different links in the same group */
+ for (link_next = link; link_next && link_next->num_adr &&
+ i < cpu_dai_num; link_next++) {
+ const struct snd_soc_acpi_endpoint *endpoints;
+
+ endpoints = link_next->adr_d->endpoints;
+ if (group_id && (!endpoints->aggregated ||
+ endpoints->group_id != group_id))
+ continue;
+
+ /* skip the link excluded by this processed group */
+ if (cpu_dai_id[i] != ffs(link_next->mask) - 1)
+ continue;
+
+ ret = create_codec_dai_name(dev, link_next, codecs, codec_idx);
+ if (ret < 0)
+ return ret;
+
+ /* check next link to create codec dai in the processed group */
+ i++;
+ codec_idx += link_next->num_adr;
+ }
+
+ /* find codec info to create BE DAI */
+ part_id = SDW_PART_ID(link->adr_d[0].adr);
+ codec_index = find_codec_info_part(part_id);
+ if (codec_index < 0)
+ return codec_index;
+
+ cpu_dai_index = *cpu_id;
+ for_each_pcm_streams(stream) {
+ char *name, *cpu_name;
+ int playback, capture;
+ static const char * const sdw_stream_name[] = {
+ "SDW%d-Playback",
+ "SDW%d-Capture",
+ };
+
+ if (!codec_info_list[codec_index].direction[stream])
+ continue;
+
+ /* create stream name according to first link id */
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ sdw_stream_name[stream], cpu_dai_id[0]);
+ if (!name)
+ return -ENOMEM;
+
+ /*
+ * generate CPU DAI name base on the sdw link ID and
+ * PIN ID with offset of 2 according to sdw dai driver.
+ */
+ for (k = 0; k < cpu_dai_num; k++) {
+ cpu_name = devm_kasprintf(dev, GFP_KERNEL,
+ "SDW%d Pin%d", cpu_dai_id[k],
+ j + SDW_INTEL_BIDIR_PDI_BASE);
+ if (!cpu_name)
+ return -ENOMEM;
+
+ if (cpu_dai_index >= sdw_cpu_dai_num) {
+ dev_err(dev, "invalid cpu dai index %d",
+ cpu_dai_index);
+ return -EINVAL;
+ }
+
+ cpus[cpu_dai_index++].dai_name = cpu_name;
+ }
+
+ if (*be_index >= sdw_be_num) {
+ dev_err(dev, " invalid be dai index %d", *be_index);
+ return -EINVAL;
+ }
+
+ if (*cpu_id >= sdw_cpu_dai_num) {
+ dev_err(dev, " invalid cpu dai index %d", *cpu_id);
+ return -EINVAL;
+ }
+
+ playback = (stream == SNDRV_PCM_STREAM_PLAYBACK);
+ capture = (stream == SNDRV_PCM_STREAM_CAPTURE);
+ init_dai_link(dai_links + *be_index, *be_index, name,
+ playback, capture,
+ cpus + *cpu_id, cpu_dai_num,
+ codecs, codec_num,
+ NULL, &sdw_ops);
+
+ ret = set_codec_init_func(link, dai_links + (*be_index)++,
+ playback);
+ if (ret < 0) {
+ dev_err(dev, "failed to init codec %d", codec_index);
+ return ret;
+ }
+
+ *cpu_id += cpu_dai_num;
+ j++;
+ }
+
+ return 0;
+}
+
+/*
+ * DAI link ID of SSP & DMIC & HDMI are based on last
+ * link ID used by sdw link. Since be_id may be changed
+ * in init func of sdw codec, it is not equal to be_id
+ */
+static inline int get_next_be_id(struct snd_soc_dai_link *links,
+ int be_id)
+{
+ return links[be_id - 1].id + 1;
+}
+
+static int sof_card_dai_links_create(struct device *dev,
+ struct snd_soc_acpi_mach *mach,
+ struct snd_soc_card *card)
+{
+ int ssp_num, sdw_be_num = 0, hdmi_num = 0, dmic_num;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ struct snd_soc_dai_link_component *idisp_components;
+#endif
+ struct snd_soc_dai_link_component *ssp_components;
+ struct snd_soc_acpi_mach_params *mach_params;
+ const struct snd_soc_acpi_link_adr *adr_link;
+ struct snd_soc_dai_link_component *cpus;
+ bool group_generated[SDW_MAX_GROUPS];
+ int ssp_codec_index, ssp_mask;
+ struct snd_soc_dai_link *links;
+ int num_links, link_id = 0;
+ char *name, *cpu_name;
+ int total_cpu_dai_num;
+ int sdw_cpu_dai_num;
+ int i, j, be_id = 0;
+ int cpu_id = 0;
+ int comp_num;
+ int ret;
+
+ /* reset amp_num to ensure amp_num++ starts from 0 in each probe */
+ for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
+ codec_info_list[i].amp_num = 0;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ hdmi_num = sof_sdw_quirk & SOF_SDW_TGL_HDMI ?
+ SOF_TGL_HDMI_COUNT : SOF_PRE_TGL_HDMI_COUNT;
+#endif
+
+ ssp_mask = SOF_SSP_GET_PORT(sof_sdw_quirk);
+ /*
+ * on generic tgl platform, I2S or sdw mode is supported
+ * based on board rework. A ACPI device is registered in
+ * system only when I2S mode is supported, not sdw mode.
+ * Here check ACPI ID to confirm I2S is supported.
+ */
+ ssp_codec_index = find_codec_info_acpi(mach->id);
+ ssp_num = ssp_codec_index >= 0 ? hweight_long(ssp_mask) : 0;
+ comp_num = hdmi_num + ssp_num;
+
+ mach_params = &mach->mach_params;
+ ret = get_sdw_dailink_info(mach_params->links,
+ &sdw_be_num, &sdw_cpu_dai_num);
+ if (ret < 0) {
+ dev_err(dev, "failed to get sdw link info %d", ret);
+ return ret;
+ }
+
+ /* enable dmic01 & dmic16k */
+ dmic_num = (sof_sdw_quirk & SOF_SDW_PCH_DMIC) ? 2 : 0;
+ comp_num += dmic_num;
+
+ dev_dbg(dev, "sdw %d, ssp %d, dmic %d, hdmi %d", sdw_be_num, ssp_num,
+ dmic_num, hdmi_num);
+
+ /* allocate BE dailinks */
+ num_links = comp_num + sdw_be_num;
+ links = devm_kcalloc(dev, num_links, sizeof(*links), GFP_KERNEL);
+
+ /* allocated CPU DAIs */
+ total_cpu_dai_num = comp_num + sdw_cpu_dai_num;
+ cpus = devm_kcalloc(dev, total_cpu_dai_num, sizeof(*cpus),
+ GFP_KERNEL);
+
+ if (!links || !cpus)
+ return -ENOMEM;
+
+ /* SDW */
+ if (!sdw_be_num)
+ goto SSP;
+
+ adr_link = mach_params->links;
+ if (!adr_link)
+ return -EINVAL;
+
+ /*
+ * SoundWire Slaves aggregated in the same group may be
+ * located on different hardware links. Clear array to indicate
+ * CPU DAIs for this group have not been generated.
+ */
+ for (i = 0; i < SDW_MAX_GROUPS; i++)
+ group_generated[i] = false;
+
+ /* generate DAI links by each sdw link */
+ for (; adr_link->num_adr; adr_link++) {
+ const struct snd_soc_acpi_endpoint *endpoint;
+
+ endpoint = adr_link->adr_d->endpoints;
+ if (endpoint->aggregated && !endpoint->group_id) {
+ dev_err(dev, "invalid group id on link %x",
+ adr_link->mask);
+ continue;
+ }
+
+ /* this group has been generated */
+ if (endpoint->aggregated &&
+ group_generated[endpoint->group_id])
+ continue;
+
+ ret = create_sdw_dailink(dev, &be_id, links, sdw_be_num,
+ sdw_cpu_dai_num, cpus, adr_link,
+ &cpu_id, group_generated);
+ if (ret < 0) {
+ dev_err(dev, "failed to create dai link %d", be_id);
+ return -ENOMEM;
+ }
+ }
+
+ /* non-sdw DAI follows sdw DAI */
+ link_id = be_id;
+
+ /* get BE ID for non-sdw DAI */
+ be_id = get_next_be_id(links, be_id);
+
+SSP:
+ /* SSP */
+ if (!ssp_num)
+ goto DMIC;
+
+ for (i = 0, j = 0; ssp_mask; i++, ssp_mask >>= 1) {
+ struct sof_sdw_codec_info *info;
+ int playback, capture;
+ char *codec_name;
+
+ if (!(ssp_mask & 0x1))
+ continue;
+
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "SSP%d-Codec", i);
+ if (!name)
+ return -ENOMEM;
+
+ cpu_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", i);
+ if (!cpu_name)
+ return -ENOMEM;
+
+ ssp_components = devm_kzalloc(dev, sizeof(*ssp_components),
+ GFP_KERNEL);
+ if (!ssp_components)
+ return -ENOMEM;
+
+ info = &codec_info_list[ssp_codec_index];
+ codec_name = devm_kasprintf(dev, GFP_KERNEL, "i2c-%s:0%d",
+ info->acpi_id, j++);
+ if (!codec_name)
+ return -ENOMEM;
+
+ ssp_components->name = codec_name;
+ ssp_components->dai_name = info->dai_name;
+ cpus[cpu_id].dai_name = cpu_name;
+
+ playback = info->direction[SNDRV_PCM_STREAM_PLAYBACK];
+ capture = info->direction[SNDRV_PCM_STREAM_CAPTURE];
+ init_dai_link(links + link_id, be_id, name,
+ playback, capture,
+ cpus + cpu_id, 1,
+ ssp_components, 1,
+ NULL, info->ops);
+
+ ret = info->init(NULL, links + link_id, info, 0);
+ if (ret < 0)
+ return ret;
+
+ INC_ID(be_id, cpu_id, link_id);
+ }
+
+DMIC:
+ /* dmic */
+ if (dmic_num > 0) {
+ cpus[cpu_id].dai_name = "DMIC01 Pin";
+ init_dai_link(links + link_id, be_id, "dmic01",
+ 0, 1, // DMIC only supports capture
+ cpus + cpu_id, 1,
+ dmic_component, 1,
+ sof_sdw_dmic_init, NULL);
+ INC_ID(be_id, cpu_id, link_id);
+
+ cpus[cpu_id].dai_name = "DMIC16k Pin";
+ init_dai_link(links + link_id, be_id, "dmic16k",
+ 0, 1, // DMIC only supports capture
+ cpus + cpu_id, 1,
+ dmic_component, 1,
+ /* don't call sof_sdw_dmic_init() twice */
+ NULL, NULL);
+ INC_ID(be_id, cpu_id, link_id);
+ }
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ /* HDMI */
+ if (hdmi_num > 0) {
+ idisp_components = devm_kcalloc(dev, hdmi_num,
+ sizeof(*idisp_components),
+ GFP_KERNEL);
+ if (!idisp_components)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdmi_num; i++) {
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "iDisp%d", i + 1);
+ if (!name)
+ return -ENOMEM;
+
+ idisp_components[i].name = "ehdaudio0D2";
+ idisp_components[i].dai_name = devm_kasprintf(dev,
+ GFP_KERNEL,
+ "intel-hdmi-hifi%d",
+ i + 1);
+ if (!idisp_components[i].dai_name)
+ return -ENOMEM;
+
+ cpu_name = devm_kasprintf(dev, GFP_KERNEL,
+ "iDisp%d Pin", i + 1);
+ if (!cpu_name)
+ return -ENOMEM;
+
+ cpus[cpu_id].dai_name = cpu_name;
+ init_dai_link(links + link_id, be_id, name,
+ 1, 0, // HDMI only supports playback
+ cpus + cpu_id, 1,
+ idisp_components + i, 1,
+ sof_sdw_hdmi_init, NULL);
+ INC_ID(be_id, cpu_id, link_id);
+ }
+#endif
+
+ card->dai_link = links;
+ card->num_links = num_links;
+
+ return 0;
+}
+
+/* SoC card */
+static const char sdw_card_long_name[] = "Intel Soundwire SOF";
+
+static struct snd_soc_card card_sof_sdw = {
+ .name = "soundwire",
+ .late_probe = sof_sdw_hdmi_card_late_probe,
+ .codec_conf = codec_conf,
+ .num_configs = ARRAY_SIZE(codec_conf),
+};
+
+static int mc_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &card_sof_sdw;
+ struct snd_soc_acpi_mach *mach;
+ struct mc_private *ctx;
+ int ret;
+
+ dev_dbg(&pdev->dev, "Entry %s\n", __func__);
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ dmi_check_system(sof_sdw_quirk_table);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+#endif
+
+ card->dev = &pdev->dev;
+
+ mach = pdev->dev.platform_data;
+ ret = sof_card_dai_links_create(&pdev->dev, mach,
+ card);
+ if (ret < 0)
+ return ret;
+
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
+ snd_soc_card_set_drvdata(card, ctx);
+
+ card->components = devm_kasprintf(card->dev, GFP_KERNEL,
+ "cfg-spk:%d",
+ (sof_sdw_quirk & SOF_SDW_FOUR_SPK) ? 4 : 2);
+ if (!card->components)
+ return -ENOMEM;
+
+ card->long_name = sdw_card_long_name;
+
+ /* Register the card */
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret) {
+ dev_err(card->dev, "snd_soc_register_card failed %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, card);
+
+ return ret;
+}
+
+static struct platform_driver sof_sdw_driver = {
+ .driver = {
+ .name = "sof_sdw",
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = mc_probe,
+};
+
+module_platform_driver(sof_sdw_driver);
+
+MODULE_DESCRIPTION("ASoC SoundWire Generic Machine driver");
+MODULE_AUTHOR("Bard Liao <yung-chuan.liao@linux.intel.com>");
+MODULE_AUTHOR("Rander Wang <rander.wang@linux.intel.com>");
+MODULE_AUTHOR("Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sof_sdw");
diff --git a/sound/soc/intel/boards/sof_sdw_common.h b/sound/soc/intel/boards/sof_sdw_common.h
new file mode 100644
index 000000000000..dd593ff3575b
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw_common.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2020 Intel Corporation
+ */
+
+/*
+ * sof_sdw_common.h - prototypes for common helpers
+ */
+
+#ifndef SND_SOC_SOF_SDW_COMMON_H
+#define SND_SOC_SOF_SDW_COMMON_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define MAX_NO_PROPS 2
+#define MAX_HDMI_NUM 4
+#define SDW_DMIC_DAI_ID 4
+#define SDW_MAX_CPU_DAIS 16
+#define SDW_INTEL_BIDIR_PDI_BASE 2
+
+/* 8 combinations with 4 links + unused group 0 */
+#define SDW_MAX_GROUPS 9
+
+enum {
+ SOF_RT711_JD_SRC_JD1 = 1,
+ SOF_RT711_JD_SRC_JD2 = 2,
+};
+
+enum {
+ SOF_PRE_TGL_HDMI_COUNT = 3,
+ SOF_TGL_HDMI_COUNT = 4,
+};
+
+enum {
+ SOF_I2S_SSP0 = BIT(0),
+ SOF_I2S_SSP1 = BIT(1),
+ SOF_I2S_SSP2 = BIT(2),
+ SOF_I2S_SSP3 = BIT(3),
+ SOF_I2S_SSP4 = BIT(4),
+ SOF_I2S_SSP5 = BIT(5),
+};
+
+#define SOF_RT711_JDSRC(quirk) ((quirk) & GENMASK(1, 0))
+#define SOF_SDW_FOUR_SPK BIT(2)
+#define SOF_SDW_TGL_HDMI BIT(3)
+#define SOF_SDW_PCH_DMIC BIT(4)
+#define SOF_SSP_PORT(x) (((x) & GENMASK(5, 0)) << 5)
+#define SOF_SSP_GET_PORT(quirk) (((quirk) >> 5) & GENMASK(5, 0))
+#define SOF_RT715_DAI_ID_FIX BIT(11)
+#define SOF_SDW_NO_AGGREGATION BIT(12)
+
+struct sof_sdw_codec_info {
+ const int id;
+ int amp_num;
+ const u8 acpi_id[ACPI_ID_LEN];
+ const bool direction[2]; // playback & capture support
+ const char *dai_name;
+ const struct snd_soc_ops *ops;
+
+ int (*init)(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback);
+};
+
+struct mc_private {
+ struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
+ struct snd_soc_jack sdw_headset;
+};
+
+extern unsigned long sof_sdw_quirk;
+
+/* generic HDMI support */
+int sof_sdw_hdmi_init(struct snd_soc_pcm_runtime *rtd);
+
+int sof_sdw_hdmi_card_late_probe(struct snd_soc_card *card);
+
+/* DMIC support */
+int sof_sdw_dmic_init(struct snd_soc_pcm_runtime *rtd);
+
+/* RT711 support */
+int sof_sdw_rt711_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback);
+
+/* RT700 support */
+int sof_sdw_rt700_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback);
+
+/* RT1308 support */
+extern struct snd_soc_ops sof_sdw_rt1308_i2s_ops;
+
+int sof_sdw_rt1308_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback);
+
+/* RT715 support */
+int sof_sdw_rt715_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback);
+
+/* RT5682 support */
+int sof_sdw_rt5682_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback);
+
+#endif
diff --git a/sound/soc/intel/boards/sof_sdw_dmic.c b/sound/soc/intel/boards/sof_sdw_dmic.c
new file mode 100644
index 000000000000..e92176bf0ad4
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw_dmic.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * sof_sdw_dmic - Helpers to handle dmic from generic machine driver
+ */
+
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "sof_sdw_common.h"
+
+static const struct snd_soc_dapm_widget dmic_widgets[] = {
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+};
+
+static const struct snd_soc_dapm_route dmic_map[] = {
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
+};
+
+int sof_sdw_dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, dmic_widgets,
+ ARRAY_SIZE(dmic_widgets));
+ if (ret) {
+ dev_err(card->dev, "DMic widget addition failed: %d\n", ret);
+ /* Don't need to add routes if widget addition failed */
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, dmic_map,
+ ARRAY_SIZE(dmic_map));
+
+ if (ret)
+ dev_err(card->dev, "DMic map addition failed: %d\n", ret);
+
+ return ret;
+}
+
diff --git a/sound/soc/intel/boards/sof_sdw_hdmi.c b/sound/soc/intel/boards/sof_sdw_hdmi.c
new file mode 100644
index 000000000000..c7b5612a39e6
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw_hdmi.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * sof_sdw_hdmi - Helpers to handle HDMI from generic machine driver
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <sound/jack.h>
+#include "sof_sdw_common.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "hda_dsp_common.h"
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+static struct snd_soc_jack hdmi[MAX_HDMI_NUM];
+
+struct hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+int sof_sdw_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct mc_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ /* dai_link id is 1:1 mapped to the PCM device */
+ pcm->device = rtd->dai_link->id;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+#define NAME_SIZE 32
+int sof_sdw_hdmi_card_late_probe(struct snd_soc_card *card)
+{
+ struct mc_private *ctx = snd_soc_card_get_drvdata(card);
+ struct hdmi_pcm *pcm;
+ struct snd_soc_component *component = NULL;
+ int err, i = 0;
+ char jack_name[NAME_SIZE];
+
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &hdmi[i],
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = snd_jack_add_new_kctl(hdmi[i].jack,
+ jack_name, SND_JACK_AVOUT);
+ if (err)
+ dev_warn(component->dev, "failed creating Jack kctl\n");
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &hdmi[i]);
+ if (err < 0)
+ return err;
+
+ i++;
+ }
+
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+}
+#else
+int hdmi_card_late_probe(struct snd_soc_card *card)
+{
+ return 0;
+}
+#endif
diff --git a/sound/soc/intel/boards/sof_sdw_rt1308.c b/sound/soc/intel/boards/sof_sdw_rt1308.c
new file mode 100644
index 000000000000..321768e54d08
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw_rt1308.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * sof_sdw_rt1308 - Helpers to handle RT1308 from generic machine driver
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "sof_sdw_common.h"
+#include "../../codecs/rt1308.h"
+
+static const struct snd_soc_dapm_widget rt1308_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+};
+
+/*
+ * dapm routes for rt1308 will be registered dynamically according
+ * to the number of rt1308 used. The first two entries will be registered
+ * for one codec case, and the last two entries are also registered
+ * if two 1308s are used.
+ */
+static const struct snd_soc_dapm_route rt1308_map[] = {
+ { "Speaker", NULL, "rt1308-1 SPOL" },
+ { "Speaker", NULL, "rt1308-1 SPOR" },
+ { "Speaker", NULL, "rt1308-2 SPOL" },
+ { "Speaker", NULL, "rt1308-2 SPOR" },
+};
+
+static const struct snd_kcontrol_new rt1308_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+};
+
+static int first_spk_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ card->components = devm_kasprintf(card->dev, GFP_KERNEL,
+ "%s spk:rt1308",
+ card->components);
+ if (!card->components)
+ return -ENOMEM;
+
+ ret = snd_soc_add_card_controls(card, rt1308_controls,
+ ARRAY_SIZE(rt1308_controls));
+ if (ret) {
+ dev_err(card->dev, "rt1308 controls addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, rt1308_widgets,
+ ARRAY_SIZE(rt1308_widgets));
+ if (ret) {
+ dev_err(card->dev, "rt1308 widgets addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, rt1308_map, 2);
+ if (ret)
+ dev_err(rtd->dev, "failed to add first SPK map: %d\n", ret);
+
+ return ret;
+}
+
+static int second_spk_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, rt1308_map + 2, 2);
+ if (ret)
+ dev_err(rtd->dev, "failed to add second SPK map: %d\n", ret);
+
+ return ret;
+}
+
+static int all_spk_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+
+ ret = first_spk_init(rtd);
+ if (ret)
+ return ret;
+
+ return second_spk_init(rtd);
+}
+
+static int rt1308_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int clk_id, clk_freq, pll_out;
+ int err;
+
+ clk_id = RT1308_PLL_S_MCLK;
+ clk_freq = 38400000;
+
+ pll_out = params_rate(params) * 512;
+
+ /* Set rt1308 pll */
+ err = snd_soc_dai_set_pll(codec_dai, 0, clk_id, clk_freq, pll_out);
+ if (err < 0) {
+ dev_err(card->dev, "Failed to set RT1308 PLL: %d\n", err);
+ return err;
+ }
+
+ /* Set rt1308 sysclk */
+ err = snd_soc_dai_set_sysclk(codec_dai, RT1308_FS_SYS_S_PLL, pll_out,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "Failed to set RT1308 SYSCLK: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/* machine stream operations */
+struct snd_soc_ops sof_sdw_rt1308_i2s_ops = {
+ .hw_params = rt1308_i2s_hw_params,
+};
+
+int sof_sdw_rt1308_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback)
+{
+ info->amp_num++;
+ if (info->amp_num == 1)
+ dai_links->init = first_spk_init;
+
+ if (info->amp_num == 2) {
+ /*
+ * if two 1308s are in one dai link, the init function
+ * in this dai link will be first set for the first speaker,
+ * and it should be reset to initialize all speakers when
+ * the second speaker is found.
+ */
+ if (dai_links->init)
+ dai_links->init = all_spk_init;
+ else
+ dai_links->init = second_spk_init;
+ }
+
+ return 0;
+}
diff --git a/sound/soc/intel/boards/sof_sdw_rt5682.c b/sound/soc/intel/boards/sof_sdw_rt5682.c
new file mode 100644
index 000000000000..5aa6211a1ed9
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw_rt5682.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * sof_sdw_rt5682 - Helpers to handle RT5682 from generic machine driver
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <sound/jack.h>
+#include "sof_sdw_common.h"
+
+static const struct snd_soc_dapm_widget rt5682_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route rt5682_map[] = {
+ /*Headphones*/
+ { "Headphone", NULL, "rt5682 HPOL" },
+ { "Headphone", NULL, "rt5682 HPOR" },
+ { "rt5682 IN1P", NULL, "Headset Mic" },
+};
+
+static const struct snd_kcontrol_new rt5682_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static struct snd_soc_jack_pin rt5682_jack_pins[] = {
+ {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int rt5682_rtd_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct mc_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_jack *jack;
+ int ret;
+
+ card->components = devm_kasprintf(card->dev, GFP_KERNEL,
+ "%s hs:rt5682",
+ card->components);
+ if (!card->components)
+ return -ENOMEM;
+
+ ret = snd_soc_add_card_controls(card, rt5682_controls,
+ ARRAY_SIZE(rt5682_controls));
+ if (ret) {
+ dev_err(card->dev, "rt5682 control addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, rt5682_widgets,
+ ARRAY_SIZE(rt5682_widgets));
+ if (ret) {
+ dev_err(card->dev, "rt5682 widgets addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, rt5682_map,
+ ARRAY_SIZE(rt5682_map));
+
+ if (ret) {
+ dev_err(card->dev, "rt5682 map addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ &ctx->sdw_headset,
+ rt5682_jack_pins,
+ ARRAY_SIZE(rt5682_jack_pins));
+ if (ret) {
+ dev_err(rtd->card->dev, "Headset Jack creation failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ jack = &ctx->sdw_headset;
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+
+ ret = snd_soc_component_set_jack(component, jack, NULL);
+
+ if (ret)
+ dev_err(rtd->card->dev, "Headset Jack call-back failed: %d\n",
+ ret);
+
+ return ret;
+}
+
+int sof_sdw_rt5682_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback)
+{
+ /*
+ * headset should be initialized once.
+ * Do it with dai link for playback.
+ */
+ if (!playback)
+ return 0;
+
+ dai_links->init = rt5682_rtd_init;
+
+ return 0;
+}
diff --git a/sound/soc/intel/boards/sof_sdw_rt700.c b/sound/soc/intel/boards/sof_sdw_rt700.c
new file mode 100644
index 000000000000..2ee4e6910d7f
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw_rt700.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * sof_sdw_rt700 - Helpers to handle RT700 from generic machine driver
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <sound/jack.h>
+#include "sof_sdw_common.h"
+
+static const struct snd_soc_dapm_widget rt700_widgets[] = {
+ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_MIC("AMIC", NULL),
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+};
+
+static const struct snd_soc_dapm_route rt700_map[] = {
+ /* Headphones */
+ { "Headphones", NULL, "HP" },
+ { "Speaker", NULL, "SPK" },
+ { "MIC2", NULL, "AMIC" },
+};
+
+static const struct snd_kcontrol_new rt700_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphones"),
+ SOC_DAPM_PIN_SWITCH("AMIC"),
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+};
+
+static struct snd_soc_jack_pin rt700_jack_pins[] = {
+ {
+ .pin = "Headphones",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "AMIC",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int rt700_rtd_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct mc_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_jack *jack;
+ int ret;
+
+ card->components = devm_kasprintf(card->dev, GFP_KERNEL,
+ "%s hs:rt700",
+ card->components);
+ if (!card->components)
+ return -ENOMEM;
+
+ ret = snd_soc_add_card_controls(card, rt700_controls,
+ ARRAY_SIZE(rt700_controls));
+ if (ret) {
+ dev_err(card->dev, "rt700 controls addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, rt700_widgets,
+ ARRAY_SIZE(rt700_widgets));
+ if (ret) {
+ dev_err(card->dev, "rt700 widgets addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, rt700_map,
+ ARRAY_SIZE(rt700_map));
+
+ if (ret) {
+ dev_err(card->dev, "rt700 map addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ &ctx->sdw_headset,
+ rt700_jack_pins,
+ ARRAY_SIZE(rt700_jack_pins));
+ if (ret) {
+ dev_err(rtd->card->dev, "Headset Jack creation failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ jack = &ctx->sdw_headset;
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ ret = snd_soc_component_set_jack(component, jack, NULL);
+ if (ret)
+ dev_err(rtd->card->dev, "Headset Jack call-back failed: %d\n",
+ ret);
+
+ return ret;
+}
+
+int sof_sdw_rt700_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback)
+{
+ /*
+ * headset should be initialized once.
+ * Do it with dai link for playback.
+ */
+ if (!playback)
+ return 0;
+
+ dai_links->init = rt700_rtd_init;
+
+ return 0;
+}
diff --git a/sound/soc/intel/boards/sof_sdw_rt711.c b/sound/soc/intel/boards/sof_sdw_rt711.c
new file mode 100644
index 000000000000..2a4917e3d561
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw_rt711.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * sof_sdw_rt711 - Helpers to handle RT711 from generic machine driver
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include <sound/jack.h>
+#include "sof_sdw_common.h"
+
+/*
+ * Note this MUST be called before snd_soc_register_card(), so that the props
+ * are in place before the codec component driver's probe function parses them.
+ */
+static int rt711_add_codec_device_props(const char *sdw_dev_name)
+{
+ struct property_entry props[MAX_NO_PROPS] = {};
+ struct device *sdw_dev;
+ int ret;
+
+ sdw_dev = bus_find_device_by_name(&sdw_bus_type, NULL, sdw_dev_name);
+ if (!sdw_dev)
+ return -EPROBE_DEFER;
+
+ if (SOF_RT711_JDSRC(sof_sdw_quirk)) {
+ props[0] = PROPERTY_ENTRY_U32("realtek,jd-src",
+ SOF_RT711_JDSRC(sof_sdw_quirk));
+ }
+
+ ret = device_add_properties(sdw_dev, props);
+ put_device(sdw_dev);
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget rt711_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route rt711_map[] = {
+ /* Headphones */
+ { "Headphone", NULL, "rt711 HP" },
+ { "rt711 MIC2", NULL, "Headset Mic" },
+};
+
+static const struct snd_kcontrol_new rt711_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+static struct snd_soc_jack_pin rt711_jack_pins[] = {
+ {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static int rt711_rtd_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct mc_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_jack *jack;
+ int ret;
+
+ card->components = devm_kasprintf(card->dev, GFP_KERNEL,
+ "%s hs:rt711",
+ card->components);
+ if (!card->components)
+ return -ENOMEM;
+
+ ret = snd_soc_add_card_controls(card, rt711_controls,
+ ARRAY_SIZE(rt711_controls));
+ if (ret) {
+ dev_err(card->dev, "rt711 controls addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_new_controls(&card->dapm, rt711_widgets,
+ ARRAY_SIZE(rt711_widgets));
+ if (ret) {
+ dev_err(card->dev, "rt711 widgets addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, rt711_map,
+ ARRAY_SIZE(rt711_map));
+
+ if (ret) {
+ dev_err(card->dev, "rt711 map addition failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ &ctx->sdw_headset,
+ rt711_jack_pins,
+ ARRAY_SIZE(rt711_jack_pins));
+ if (ret) {
+ dev_err(rtd->card->dev, "Headset Jack creation failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ jack = &ctx->sdw_headset;
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ ret = snd_soc_component_set_jack(component, jack, NULL);
+
+ if (ret)
+ dev_err(rtd->card->dev, "Headset Jack call-back failed: %d\n",
+ ret);
+
+ return ret;
+}
+
+int sof_sdw_rt711_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback)
+{
+ int ret;
+
+ /*
+ * headset should be initialized once.
+ * Do it with dai link for playback.
+ */
+ if (!playback)
+ return 0;
+
+ ret = rt711_add_codec_device_props("sdw:0:25d:711:0");
+ if (ret < 0)
+ return ret;
+
+ dai_links->init = rt711_rtd_init;
+
+ return 0;
+}
diff --git a/sound/soc/intel/boards/sof_sdw_rt715.c b/sound/soc/intel/boards/sof_sdw_rt715.c
new file mode 100644
index 000000000000..321e1cbc03ed
--- /dev/null
+++ b/sound/soc/intel/boards/sof_sdw_rt715.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * sof_sdw_rt715 - Helpers to handle RT715 from generic machine driver
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "sof_sdw_common.h"
+
+static int rt715_rtd_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+
+ card->components = devm_kasprintf(card->dev, GFP_KERNEL,
+ "%s mic:rt715",
+ card->components);
+ if (!card->components)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int sof_sdw_rt715_init(const struct snd_soc_acpi_link_adr *link,
+ struct snd_soc_dai_link *dai_links,
+ struct sof_sdw_codec_info *info,
+ bool playback)
+{
+ /*
+ * DAI ID is fixed at SDW_DMIC_DAI_ID for 715 to
+ * keep sdw DMIC and HDMI setting static in UCM
+ */
+ if (sof_sdw_quirk & SOF_RT715_DAI_ID_FIX)
+ dai_links->id = SDW_DMIC_DAI_ID;
+
+ dai_links->init = rt715_rtd_init;
+
+ return 0;
+}
diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
index 4a5adae1d785..f5092bc48364 100644
--- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
@@ -65,7 +65,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
},
{
.id = "104C5122",
- .drv_name = "bxt-pcm512x",
+ .drv_name = "sof_pcm512x",
.sof_fw_filename = "sof-apl.ri",
.sof_tplg_filename = "sof-apl-pcm512x.tplg",
},
diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
index d0fb43c2b9f6..2752dc955733 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
@@ -174,6 +174,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
.sof_fw_filename = "sof-cht.ri",
.sof_tplg_filename = "sof-cht-cx2072x.tplg",
},
+ {
+ .id = "104C5122",
+ .drv_name = "sof_pcm512x",
+ .sof_fw_filename = "sof-cht.ri",
+ .sof_tplg_filename = "sof-cht-src-50khz-pcm512x.tplg",
+ },
+
#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
/*
* This is always last in the table so that it is selected only when
diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
index f55634c4c2e8..bcedec6c6117 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
@@ -59,42 +59,112 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
-static const u64 rt711_0_adr[] = {
- 0x000010025D071100
+static const struct snd_soc_acpi_endpoint single_endpoint = {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
};
-static const u64 rt1308_1_adr[] = {
- 0x000110025D130800
+static const struct snd_soc_acpi_endpoint spk_l_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
};
-static const u64 rt1308_2_adr[] = {
- 0x000210025D130800
+static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
};
-static const u64 rt715_3_adr[] = {
- 0x000310025D071500
+static const struct snd_soc_acpi_adr_device rt700_1_adr[] = {
+ {
+ .adr = 0x000110025D070000,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_link_adr cml_rvp[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt700_1_adr),
+ .adr_d = rt700_1_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
+ {
+ .adr = 0x000010025D071100,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1308_1_adr[] = {
+ {
+ .adr = 0x000110025D130800,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1308_2_adr[] = {
+ {
+ .adr = 0x000210025D130800,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
+ {
+ .adr = 0x000110025D130800,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
+ {
+ .adr = 0x000210025D130800,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
+ {
+ .adr = 0x000310025D071500,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
};
static const struct snd_soc_acpi_link_adr cml_3_in_1_default[] = {
{
.mask = BIT(0),
.num_adr = ARRAY_SIZE(rt711_0_adr),
- .adr = rt711_0_adr,
+ .adr_d = rt711_0_adr,
},
{
.mask = BIT(1),
- .num_adr = ARRAY_SIZE(rt1308_1_adr),
- .adr = rt1308_1_adr,
+ .num_adr = ARRAY_SIZE(rt1308_1_group1_adr),
+ .adr_d = rt1308_1_group1_adr,
},
{
.mask = BIT(2),
- .num_adr = ARRAY_SIZE(rt1308_2_adr),
- .adr = rt1308_2_adr,
+ .num_adr = ARRAY_SIZE(rt1308_2_group1_adr),
+ .adr_d = rt1308_2_group1_adr,
},
{
.mask = BIT(3),
.num_adr = ARRAY_SIZE(rt715_3_adr),
- .adr = rt715_3_adr,
+ .adr_d = rt715_3_adr,
},
{}
};
@@ -103,17 +173,17 @@ static const struct snd_soc_acpi_link_adr cml_3_in_1_mono_amp[] = {
{
.mask = BIT(0),
.num_adr = ARRAY_SIZE(rt711_0_adr),
- .adr = rt711_0_adr,
+ .adr_d = rt711_0_adr,
},
{
.mask = BIT(1),
.num_adr = ARRAY_SIZE(rt1308_1_adr),
- .adr = rt1308_1_adr,
+ .adr_d = rt1308_1_adr,
},
{
.mask = BIT(3),
.num_adr = ARRAY_SIZE(rt715_3_adr),
- .adr = rt715_3_adr,
+ .adr_d = rt715_3_adr,
},
{}
};
@@ -122,7 +192,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_sdw_machines[] = {
{
.link_mask = 0xF, /* 4 active links required */
.links = cml_3_in_1_default,
- .drv_name = "sdw_rt711_rt1308_rt715",
+ .drv_name = "sof_sdw",
.sof_fw_filename = "sof-cml.ri",
.sof_tplg_filename = "sof-cml-rt711-rt1308-rt715.tplg",
},
@@ -134,13 +204,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_sdw_machines[] = {
*/
.link_mask = 0xF,
.links = cml_3_in_1_mono_amp,
- .drv_name = "sdw_rt711_rt1308_rt715",
+ .drv_name = "sof_sdw",
.sof_fw_filename = "sof-cml.ri",
.sof_tplg_filename = "sof-cml-rt711-rt1308-mono-rt715.tplg",
},
{
.link_mask = 0x2, /* RT700 connected on Link1 */
- .drv_name = "sdw_rt700",
+ .links = cml_rvp,
+ .drv_name = "sof_sdw",
.sof_fw_filename = "sof-cml.ri",
.sof_tplg_filename = "sof-cml-rt700.tplg",
},
diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
index 752733013d54..ef8500349f2f 100644
--- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
@@ -33,55 +33,112 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[] = {
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_icl_machines);
-static const u64 rt700_0_adr[] = {
- 0x000010025D070000
+static const struct snd_soc_acpi_endpoint single_endpoint = {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+};
+
+static const struct snd_soc_acpi_endpoint spk_l_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+};
+
+static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
+};
+
+static const struct snd_soc_acpi_adr_device rt700_0_adr[] = {
+ {
+ .adr = 0x000010025D070000,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
};
static const struct snd_soc_acpi_link_adr icl_rvp[] = {
{
.mask = BIT(0),
.num_adr = ARRAY_SIZE(rt700_0_adr),
- .adr = rt700_0_adr,
+ .adr_d = rt700_0_adr,
},
{}
};
-static const u64 rt711_0_adr[] = {
- 0x000010025D071100
+static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
+ {
+ .adr = 0x000010025D071100,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1308_1_adr[] = {
+ {
+ .adr = 0x000110025D130800,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
};
-static const u64 rt1308_1_adr[] = {
- 0x000110025D130800
+static const struct snd_soc_acpi_adr_device rt1308_2_adr[] = {
+ {
+ .adr = 0x000210025D130800,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
};
-static const u64 rt1308_2_adr[] = {
- 0x000210025D130800
+static const struct snd_soc_acpi_adr_device rt1308_1_group1_adr[] = {
+ {
+ .adr = 0x000110025D130800,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ }
};
-static const u64 rt715_3_adr[] = {
- 0x000310025D071500
+static const struct snd_soc_acpi_adr_device rt1308_2_group1_adr[] = {
+ {
+ .adr = 0x000210025D130800,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt715_3_adr[] = {
+ {
+ .adr = 0x000310025D071500,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
};
static const struct snd_soc_acpi_link_adr icl_3_in_1_default[] = {
{
.mask = BIT(0),
.num_adr = ARRAY_SIZE(rt711_0_adr),
- .adr = rt711_0_adr,
+ .adr_d = rt711_0_adr,
},
{
.mask = BIT(1),
- .num_adr = ARRAY_SIZE(rt1308_1_adr),
- .adr = rt1308_1_adr,
+ .num_adr = ARRAY_SIZE(rt1308_1_group1_adr),
+ .adr_d = rt1308_1_group1_adr,
},
{
.mask = BIT(2),
- .num_adr = ARRAY_SIZE(rt1308_2_adr),
- .adr = rt1308_2_adr,
+ .num_adr = ARRAY_SIZE(rt1308_2_group1_adr),
+ .adr_d = rt1308_2_group1_adr,
},
{
.mask = BIT(3),
.num_adr = ARRAY_SIZE(rt715_3_adr),
- .adr = rt715_3_adr,
+ .adr_d = rt715_3_adr,
},
{}
};
@@ -90,17 +147,17 @@ static const struct snd_soc_acpi_link_adr icl_3_in_1_mono_amp[] = {
{
.mask = BIT(0),
.num_adr = ARRAY_SIZE(rt711_0_adr),
- .adr = rt711_0_adr,
+ .adr_d = rt711_0_adr,
},
{
.mask = BIT(1),
.num_adr = ARRAY_SIZE(rt1308_1_adr),
- .adr = rt1308_1_adr,
+ .adr_d = rt1308_1_adr,
},
{
.mask = BIT(3),
.num_adr = ARRAY_SIZE(rt715_3_adr),
- .adr = rt715_3_adr,
+ .adr_d = rt715_3_adr,
},
{}
};
@@ -109,21 +166,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_sdw_machines[] = {
{
.link_mask = 0xF, /* 4 active links required */
.links = icl_3_in_1_default,
- .drv_name = "sdw_rt711_rt1308_rt715",
+ .drv_name = "sof_sdw",
.sof_fw_filename = "sof-icl.ri",
.sof_tplg_filename = "sof-icl-rt711-rt1308-rt715.tplg",
},
{
.link_mask = 0xB, /* 3 active links required */
.links = icl_3_in_1_mono_amp,
- .drv_name = "sdw_rt711_rt1308_rt715",
+ .drv_name = "sof_sdw",
.sof_fw_filename = "sof-icl.ri",
.sof_tplg_filename = "sof-icl-rt711-rt1308-rt715-mono.tplg",
},
{
.link_mask = 0x1, /* rt700 connected on link0 */
.links = icl_rvp,
- .drv_name = "sdw_rt700",
+ .drv_name = "sof_sdw",
.sof_fw_filename = "sof-icl.ri",
.sof_tplg_filename = "sof-icl-rt700.tplg",
},
diff --git a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
index ed2b125f6a11..4388a32718d8 100644
--- a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
@@ -2,20 +2,50 @@
/*
* soc-apci-intel-jsl-match.c - tables and support for JSL ACPI enumeration.
*
- * Copyright (c) 2019, Intel Corporation.
+ * Copyright (c) 2019-2020, Intel Corporation.
*
*/
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
+static struct snd_soc_acpi_codecs jsl_7219_98373_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98373"}
+};
+
+static struct snd_soc_acpi_codecs rt1015_spk = {
+ .num_codecs = 1,
+ .codecs = {"10EC1015"}
+};
+
+/*
+ * When adding new entry to the snd_soc_acpi_intel_jsl_machines array,
+ * use .quirk_data member to distinguish different machine driver,
+ * and keep ACPI .id field unchanged for the common codec.
+ */
struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[] = {
{
.id = "DLGS7219",
.drv_name = "sof_da7219_max98373",
- .machine_quirk = snd_soc_acpi_codec_list,
.sof_fw_filename = "sof-jsl.ri",
.sof_tplg_filename = "sof-jsl-da7219.tplg",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &jsl_7219_98373_codecs,
+ },
+ {
+ .id = "DLGS7219",
+ .drv_name = "sof_da7219_max98360a",
+ .sof_fw_filename = "sof-jsl.ri",
+ .sof_tplg_filename = "sof-jsl-da7219-mx98360a.tplg",
+ },
+ {
+ .id = "10EC5682",
+ .drv_name = "jsl_rt5682_rt1015",
+ .sof_fw_filename = "sof-jsl.ri",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &rt1015_spk,
+ .sof_tplg_filename = "sof-jsl-rt5682-rt1015.tplg",
},
{},
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
index 5984dd151f3e..449d9d2286ae 100644
--- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
@@ -14,20 +14,61 @@ static struct snd_soc_acpi_codecs tgl_codecs = {
.codecs = {"MX98357A"}
};
-static const u64 rt711_0_adr[] = {
- 0x000010025D071100
+static const struct snd_soc_acpi_endpoint single_endpoint = {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
};
-static const u64 rt1308_1_adr[] = {
- 0x000120025D130800,
- 0x000122025D130800
+static const struct snd_soc_acpi_endpoint spk_l_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+};
+
+static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
+};
+
+static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
+ {
+ .adr = 0x000010025D071100,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1308_1_adr[] = {
+ {
+ .adr = 0x000120025D130800,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ },
+ {
+ .adr = 0x000122025D130800,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt5682_0_adr[] = {
+ {
+ .adr = 0x000021025D568200,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ }
};
static const struct snd_soc_acpi_link_adr tgl_i2s_rt1308[] = {
{
.mask = BIT(0),
.num_adr = ARRAY_SIZE(rt711_0_adr),
- .adr = rt711_0_adr,
+ .adr_d = rt711_0_adr,
},
{}
};
@@ -36,24 +77,38 @@ static const struct snd_soc_acpi_link_adr tgl_rvp[] = {
{
.mask = BIT(0),
.num_adr = ARRAY_SIZE(rt711_0_adr),
- .adr = rt711_0_adr,
+ .adr_d = rt711_0_adr,
},
{
.mask = BIT(1),
.num_adr = ARRAY_SIZE(rt1308_1_adr),
- .adr = rt1308_1_adr,
+ .adr_d = rt1308_1_adr,
},
{}
};
+static const struct snd_soc_acpi_link_adr tgl_chromebook_base[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt5682_0_adr),
+ .adr_d = rt5682_0_adr,
+ },
+ {}
+};
+
+static struct snd_soc_acpi_codecs tgl_max98373_amp = {
+ .num_codecs = 1,
+ .codecs = {"MX98373"}
+};
+
struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[] = {
{
.id = "10EC1308",
- .drv_name = "rt711_rt1308",
+ .drv_name = "sof_sdw",
.link_mask = 0x1, /* RT711 on SoundWire link0 */
.links = tgl_i2s_rt1308,
.sof_fw_filename = "sof-tgl.ri",
- .sof_tplg_filename = "sof-tgl-rt711-rt1308.tplg",
+ .sof_tplg_filename = "sof-tgl-rt711-i2s-rt1308.tplg",
},
{
.id = "10EC5682",
@@ -63,6 +118,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[] = {
.sof_fw_filename = "sof-tgl.ri",
.sof_tplg_filename = "sof-tgl-max98357a-rt5682.tplg",
},
+ {
+ .id = "10EC5682",
+ .drv_name = "tgl_max98373_rt5682",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &tgl_max98373_amp,
+ .sof_fw_filename = "sof-tgl.ri",
+ .sof_tplg_filename = "sof-tgl-max98373-rt5682.tplg",
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_tgl_machines);
@@ -72,10 +135,17 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[] = {
{
.link_mask = 0x3, /* rt711 on link 0 and 2 rt1308s on link 1 */
.links = tgl_rvp,
- .drv_name = "sdw_rt711_rt1308_rt715",
+ .drv_name = "sof_sdw",
.sof_fw_filename = "sof-tgl.ri",
.sof_tplg_filename = "sof-tgl-rt711-rt1308.tplg",
},
+ {
+ .link_mask = 0x1, /* this will only enable rt5682 for now */
+ .links = tgl_chromebook_base,
+ .drv_name = "sof_sdw",
+ .sof_fw_filename = "sof-tgl.ri",
+ .sof_tplg_filename = "sof-tgl-rt5682.tplg",
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_tgl_sdw_machines);
diff --git a/sound/soc/intel/haswell/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index 033d7c05d7fb..c183f8e94ee4 100644
--- a/sound/soc/intel/haswell/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -476,7 +476,7 @@ static int hsw_pcm_hw_params(struct snd_soc_component *component,
u8 channels;
int ret, dai;
- dai = mod_map[rtd->cpu_dai->id].dai_id;
+ dai = mod_map[asoc_rtd_to_cpu(rtd, 0)->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
/* check if we are being called a subsequent time */
@@ -494,7 +494,7 @@ static int hsw_pcm_hw_params(struct snd_soc_component *component,
}
pcm_data->allocated = false;
- pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id,
+ pcm_data->stream = sst_hsw_stream_new(hsw, asoc_rtd_to_cpu(rtd, 0)->id,
hsw_notify_pointer, pcm_data);
if (pcm_data->stream == NULL) {
dev_err(rtd->dev, "error: failed to create stream\n");
@@ -509,7 +509,7 @@ static int hsw_pcm_hw_params(struct snd_soc_component *component,
path_id = SST_HSW_STREAM_PATH_SSP0_IN;
/* DSP stream type depends on DAI ID */
- switch (rtd->cpu_dai->id) {
+ switch (asoc_rtd_to_cpu(rtd, 0)->id) {
case 0:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
stream_type = SST_HSW_STREAM_TYPE_SYSTEM;
@@ -533,7 +533,7 @@ static int hsw_pcm_hw_params(struct snd_soc_component *component,
break;
default:
dev_err(rtd->dev, "error: invalid DAI ID %d\n",
- rtd->cpu_dai->id);
+ asoc_rtd_to_cpu(rtd, 0)->id);
return -EINVAL;
}
@@ -595,7 +595,7 @@ static int hsw_pcm_hw_params(struct snd_soc_component *component,
dmab = snd_pcm_get_dma_buf(substream);
ret = create_adsp_page_table(substream, pdata, rtd, runtime->dma_area,
- runtime->dma_bytes, rtd->cpu_dai->id);
+ runtime->dma_bytes, asoc_rtd_to_cpu(rtd, 0)->id);
if (ret < 0)
return ret;
@@ -608,7 +608,7 @@ static int hsw_pcm_hw_params(struct snd_soc_component *component,
pages = runtime->dma_bytes / PAGE_SIZE;
ret = sst_hsw_stream_buffer(hsw, pcm_data->stream,
- pdata->dmab[rtd->cpu_dai->id][substream->stream].addr,
+ pdata->dmab[asoc_rtd_to_cpu(rtd, 0)->id][substream->stream].addr,
pages, runtime->dma_bytes, 0,
snd_sgbuf_get_addr(dmab, 0) >> PAGE_SHIFT);
if (ret < 0) {
@@ -661,7 +661,7 @@ static int hsw_pcm_trigger(struct snd_soc_component *component,
snd_pcm_uframes_t pos;
int dai;
- dai = mod_map[rtd->cpu_dai->id].dai_id;
+ dai = mod_map[asoc_rtd_to_cpu(rtd, 0)->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
sst_stream = pcm_data->stream;
@@ -770,7 +770,7 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_soc_component *component,
u32 position;
int dai;
- dai = mod_map[rtd->cpu_dai->id].dai_id;
+ dai = mod_map[asoc_rtd_to_cpu(rtd, 0)->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
position = sst_hsw_get_dsp_position(hsw, pcm_data->stream);
@@ -791,7 +791,7 @@ static int hsw_pcm_open(struct snd_soc_component *component,
struct sst_hsw *hsw = pdata->hsw;
int dai;
- dai = mod_map[rtd->cpu_dai->id].dai_id;
+ dai = mod_map[asoc_rtd_to_cpu(rtd, 0)->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
mutex_lock(&pcm_data->mutex);
@@ -801,7 +801,7 @@ static int hsw_pcm_open(struct snd_soc_component *component,
snd_soc_set_runtime_hwparams(substream, &hsw_pcm_hardware);
- pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id,
+ pcm_data->stream = sst_hsw_stream_new(hsw, asoc_rtd_to_cpu(rtd, 0)->id,
hsw_notify_pointer, pcm_data);
if (pcm_data->stream == NULL) {
dev_err(rtd->dev, "error: failed to create stream\n");
@@ -824,7 +824,7 @@ static int hsw_pcm_close(struct snd_soc_component *component,
struct sst_hsw *hsw = pdata->hsw;
int ret, dai;
- dai = mod_map[rtd->cpu_dai->id].dai_id;
+ dai = mod_map[asoc_rtd_to_cpu(rtd, 0)->id].dai_id;
pcm_data = &pdata->pcm[dai][substream->stream];
mutex_lock(&pcm_data->mutex);
@@ -923,9 +923,9 @@ static int hsw_pcm_new(struct snd_soc_component *component,
hsw_pcm_hardware.buffer_bytes_max);
}
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream)
- priv_data->pcm[rtd->cpu_dai->id][SNDRV_PCM_STREAM_PLAYBACK].hsw_pcm = pcm;
+ priv_data->pcm[asoc_rtd_to_cpu(rtd, 0)->id][SNDRV_PCM_STREAM_PLAYBACK].hsw_pcm = pcm;
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream)
- priv_data->pcm[rtd->cpu_dai->id][SNDRV_PCM_STREAM_CAPTURE].hsw_pcm = pcm;
+ priv_data->pcm[asoc_rtd_to_cpu(rtd, 0)->id][SNDRV_PCM_STREAM_CAPTURE].hsw_pcm = pcm;
return 0;
}
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 92a82e6b5fe6..38b9d7494083 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -17,7 +17,6 @@
#include "skl.h"
#define BXT_BASEFW_TIMEOUT 3000
-#define BXT_INIT_TIMEOUT 300
#define BXT_ROM_INIT_TIMEOUT 70
#define BXT_IPC_PURGE_FW 0x01004000
@@ -38,8 +37,6 @@
/* Delay before scheduling D0i3 entry */
#define BXT_D0I3_DELAY 5000
-#define BXT_FW_ROM_INIT_RETRY 3
-
static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c
index 4f64f097e9ae..c6abcd5aa67b 100644
--- a/sound/soc/intel/skylake/cnl-sst.c
+++ b/sound/soc/intel/skylake/cnl-sst.c
@@ -57,18 +57,34 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize)
ctx->dsp_ops.stream_tag = stream_tag;
memcpy(ctx->dmab.area, fwdata, fwsize);
+ ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK);
+ if (ret < 0) {
+ dev_err(ctx->dev, "dsp core0 power up failed\n");
+ ret = -EIO;
+ goto base_fw_load_failed;
+ }
+
/* purge FW request */
sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR,
CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE |
((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID)));
- ret = cnl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
+ ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "dsp boot core failed ret: %d\n", ret);
+ dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
ret = -EIO;
goto base_fw_load_failed;
}
+ ret = sst_dsp_register_poll(ctx, CNL_ADSP_REG_HIPCIDA,
+ CNL_ADSP_REG_HIPCIDA_DONE,
+ CNL_ADSP_REG_HIPCIDA_DONE,
+ BXT_INIT_TIMEOUT, "HIPCIDA Done");
+ if (ret < 0) {
+ dev_err(ctx->dev, "timeout for purge request: %d\n", ret);
+ goto base_fw_load_failed;
+ }
+
/* enable interrupt */
cnl_ipc_int_enable(ctx);
cnl_ipc_op_int_enable(ctx);
@@ -109,7 +125,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
{
struct firmware stripped_fw;
struct skl_dev *cnl = ctx->thread_context;
- int ret;
+ int ret, i;
if (!ctx->fw) {
ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
@@ -131,12 +147,16 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
stripped_fw.size = ctx->fw->size;
skl_dsp_strip_extended_manifest(&stripped_fw);
- ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
- if (ret < 0) {
- dev_err(ctx->dev, "prepare firmware failed: %d\n", ret);
- goto cnl_load_base_firmware_failed;
+ for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
+ ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
+ if (!ret)
+ break;
+ dev_dbg(ctx->dev, "prepare firmware failed: %d\n", ret);
}
+ if (ret < 0)
+ goto cnl_load_base_firmware_failed;
+
ret = sst_transfer_fw_host_dma(ctx);
if (ret < 0) {
dev_err(ctx->dev, "transfer firmware failed: %d\n", ret);
@@ -158,6 +178,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
return 0;
cnl_load_base_firmware_failed:
+ dev_err(ctx->dev, "firmware load failed: %d\n", ret);
release_firmware(ctx->fw);
ctx->fw = NULL;
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 19f328d71f24..d9c8f5cb389e 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -182,7 +182,8 @@ void skl_nhlt_remove_sysfs(struct skl_dev *skl)
{
struct device *dev = &skl->pci->dev;
- sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
+ if (skl->nhlt)
+ sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
}
/*
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index b99509675d29..89dcccdfb1cd 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -112,10 +112,7 @@ static void skl_set_suspend_active(struct snd_pcm_substream *substream,
struct snd_soc_dapm_widget *w;
struct skl_dev *skl = bus_to_skl(bus);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- w = dai->playback_widget;
- else
- w = dai->capture_widget;
+ w = snd_soc_dai_get_widget(dai, substream->stream);
if (w->ignore_suspend && enable)
skl->supend_active++;
@@ -475,10 +472,7 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
if (!mconfig)
return -EIO;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- w = dai->playback_widget;
- else
- w = dai->capture_widget;
+ w = snd_soc_dai_get_widget(dai, substream->stream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
@@ -551,7 +545,7 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *link_dev;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct skl_pipe_params p_params = {0};
struct hdac_ext_link *link;
int stream_tag;
@@ -650,7 +644,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
link_dev->link_prepared = 0;
- link = snd_hdac_ext_bus_get_link(bus, rtd->codec_dai->component->name);
+ link = snd_hdac_ext_bus_get_link(bus, asoc_rtd_to_codec(rtd, 0)->component->name);
if (!link)
return -EINVAL;
@@ -1080,7 +1074,7 @@ static int skl_platform_soc_open(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai_link *dai_link = rtd->dai_link;
- dev_dbg(rtd->cpu_dai->dev, "In %s:%s\n", __func__,
+ dev_dbg(asoc_rtd_to_cpu(rtd, 0)->dev, "In %s:%s\n", __func__,
dai_link->cpus->dai_name);
snd_soc_set_runtime_hwparams(substream, &azx_pcm_hw);
@@ -1232,7 +1226,7 @@ static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
u64 nsec)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
u64 codec_frames, codec_nsecs;
if (!codec_dai->driver->ops->delay)
@@ -1287,7 +1281,7 @@ static int skl_platform_soc_get_time_info(
static int skl_platform_soc_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
struct hdac_bus *bus = dev_get_drvdata(dai->dev);
struct snd_pcm *pcm = rtd->pcm;
unsigned int size;
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index cdfec0fca577..1df9ef422f61 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -67,6 +67,8 @@ struct skl_dev;
#define SKL_FW_INIT 0x1
#define SKL_FW_RFW_START 0xf
+#define BXT_FW_ROM_INIT_RETRY 3
+#define BXT_INIT_TIMEOUT 300
#define SKL_ADSPIC_IPC 1
#define SKL_ADSPIS_IPC 1
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index f755ca2484cf..63182bfd7941 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -130,6 +130,7 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
struct hdac_ext_link *hlink;
int ret;
+ snd_hdac_set_codec_wakeup(bus, true);
skl_enable_miscbdcge(bus->dev, false);
ret = snd_hdac_bus_init_chip(bus, full_reset);
@@ -138,6 +139,7 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
skl_enable_miscbdcge(bus->dev, true);
+ snd_hdac_set_codec_wakeup(bus, false);
return ret;
}
@@ -359,7 +361,7 @@ static int skl_resume(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_bus *bus = pci_get_drvdata(pci);
struct skl_dev *skl = bus_to_skl(bus);
- struct hdac_ext_link *hlink = NULL;
+ struct hdac_ext_link *hlink;
int ret;
/*
@@ -481,13 +483,8 @@ static struct skl_ssp_clk skl_ssp_clks[] = {
static struct snd_soc_acpi_mach *skl_find_hda_machine(struct skl_dev *skl,
struct snd_soc_acpi_mach *machines)
{
- struct hdac_bus *bus = skl_to_bus(skl);
struct snd_soc_acpi_mach *mach;
- /* check if we have any codecs detected on bus */
- if (bus->codec_mask == 0)
- return NULL;
-
/* point to common table */
mach = snd_soc_acpi_intel_hda_machines;
@@ -636,6 +633,9 @@ static int skl_clock_device_register(struct skl_dev *skl)
struct platform_device_info pdevinfo = {NULL};
struct skl_clk_pdata *clk_pdata;
+ if (!skl->nhlt)
+ return 0;
+
clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata),
GFP_KERNEL);
if (!clk_pdata)
@@ -794,7 +794,7 @@ static void skl_probe_work(struct work_struct *work)
{
struct skl_dev *skl = container_of(work, struct skl_dev, probe_work);
struct hdac_bus *bus = skl_to_bus(skl);
- struct hdac_ext_link *hlink = NULL;
+ struct hdac_ext_link *hlink;
int err;
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
@@ -803,6 +803,9 @@ static void skl_probe_work(struct work_struct *work)
return;
}
+ skl_init_pci(skl);
+ skl_dum_set(bus);
+
err = skl_init_chip(bus, true);
if (err < 0) {
dev_err(bus->dev, "Init chip failed with err: %d\n", err);
@@ -918,8 +921,6 @@ static int skl_first_init(struct hdac_bus *bus)
return -ENXIO;
}
- snd_hdac_bus_reset_link(bus, true);
-
snd_hdac_bus_parse_capabilities(bus);
/* check if PPCAP exists */
@@ -967,11 +968,7 @@ static int skl_first_init(struct hdac_bus *bus)
if (err < 0)
return err;
- /* initialize chip */
- skl_init_pci(skl);
- skl_dum_set(bus);
-
- return skl_init_chip(bus, true);
+ return 0;
}
static int skl_probe(struct pci_dev *pci,
@@ -1064,8 +1061,6 @@ static int skl_probe(struct pci_dev *pci,
if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(bus);
- snd_hdac_bus_stop_chip(bus);
-
/* create device for soc dmic */
err = skl_dmic_device_register(skl);
if (err < 0) {
@@ -1082,7 +1077,8 @@ out_dsp_free:
out_clk_free:
skl_clock_device_unregister(skl);
out_nhlt_free:
- intel_nhlt_free(skl->nhlt);
+ if (skl->nhlt)
+ intel_nhlt_free(skl->nhlt);
out_free:
skl_free(bus);
@@ -1131,7 +1127,8 @@ static void skl_remove(struct pci_dev *pci)
skl_dmic_device_unregister(skl);
skl_clock_device_unregister(skl);
skl_nhlt_remove_sysfs(skl);
- intel_nhlt_free(skl->nhlt);
+ if (skl->nhlt)
+ intel_nhlt_free(skl->nhlt);
skl_free(bus);
dev_set_drvdata(&pci->dev, NULL);
}
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 9d5405881209..6f6f8dad0356 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -49,12 +49,8 @@
#define JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 12
#define JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 8
-#define JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 24
-#define JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 16
-#define JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_MASK \
- (0xf << JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET)
-#define JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_MASK \
- (0x1f << JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET)
+#define JZ4760_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 24
+#define JZ4760_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 16
#define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK (0x7 << 19)
#define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK (0x7 << 16)
@@ -83,16 +79,23 @@
#define JZ_AIC_I2S_STATUS_BUSY BIT(2)
#define JZ_AIC_CLK_DIV_MASK 0xf
-#define I2SDIV_DV_SHIFT 8
+#define I2SDIV_DV_SHIFT 0
#define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
#define I2SDIV_IDV_SHIFT 8
#define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
enum jz47xx_i2s_version {
JZ_I2S_JZ4740,
+ JZ_I2S_JZ4760,
+ JZ_I2S_JZ4770,
JZ_I2S_JZ4780,
};
+struct i2s_soc_info {
+ enum jz47xx_i2s_version version;
+ struct snd_soc_dai_driver *dai;
+};
+
struct jz4740_i2s {
struct resource *mem;
void __iomem *base;
@@ -104,7 +107,7 @@ struct jz4740_i2s {
struct snd_dmaengine_dai_dma_data playback_dma_data;
struct snd_dmaengine_dai_dma_data capture_dma_data;
- enum jz47xx_i2s_version version;
+ const struct i2s_soc_info *soc_info;
};
static inline uint32_t jz4740_i2s_read(const struct jz4740_i2s *i2s,
@@ -284,7 +287,7 @@ static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK;
ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET;
- if (i2s->version >= JZ_I2S_JZ4780) {
+ if (i2s->soc_info->version >= JZ_I2S_JZ4770) {
div_reg &= ~I2SDIV_IDV_MASK;
div_reg |= (div - 1) << I2SDIV_IDV_SHIFT;
} else {
@@ -398,9 +401,9 @@ static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai)
snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data,
&i2s->capture_dma_data);
- if (i2s->version >= JZ_I2S_JZ4780) {
- conf = (7 << JZ4780_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) |
- (8 << JZ4780_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) |
+ if (i2s->soc_info->version >= JZ_I2S_JZ4760) {
+ conf = (7 << JZ4760_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) |
+ (8 << JZ4760_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) |
JZ_AIC_CONF_OVERFLOW_PLAY_LAST |
JZ_AIC_CONF_I2S |
JZ_AIC_CONF_INTERNAL_CODEC;
@@ -457,7 +460,17 @@ static struct snd_soc_dai_driver jz4740_i2s_dai = {
.ops = &jz4740_i2s_dai_ops,
};
-static struct snd_soc_dai_driver jz4780_i2s_dai = {
+static const struct i2s_soc_info jz4740_i2s_soc_info = {
+ .version = JZ_I2S_JZ4740,
+ .dai = &jz4740_i2s_dai,
+};
+
+static const struct i2s_soc_info jz4760_i2s_soc_info = {
+ .version = JZ_I2S_JZ4760,
+ .dai = &jz4740_i2s_dai,
+};
+
+static struct snd_soc_dai_driver jz4770_i2s_dai = {
.probe = jz4740_i2s_dai_probe,
.remove = jz4740_i2s_dai_remove,
.playback = {
@@ -475,6 +488,16 @@ static struct snd_soc_dai_driver jz4780_i2s_dai = {
.ops = &jz4740_i2s_dai_ops,
};
+static const struct i2s_soc_info jz4770_i2s_soc_info = {
+ .version = JZ_I2S_JZ4770,
+ .dai = &jz4770_i2s_dai,
+};
+
+static const struct i2s_soc_info jz4780_i2s_soc_info = {
+ .version = JZ_I2S_JZ4780,
+ .dai = &jz4770_i2s_dai,
+};
+
static const struct snd_soc_component_driver jz4740_i2s_component = {
.name = "jz4740-i2s",
.suspend = jz4740_i2s_suspend,
@@ -483,8 +506,10 @@ static const struct snd_soc_component_driver jz4740_i2s_component = {
#ifdef CONFIG_OF
static const struct of_device_id jz4740_of_matches[] = {
- { .compatible = "ingenic,jz4740-i2s", .data = (void *)JZ_I2S_JZ4740 },
- { .compatible = "ingenic,jz4780-i2s", .data = (void *)JZ_I2S_JZ4780 },
+ { .compatible = "ingenic,jz4740-i2s", .data = &jz4740_i2s_soc_info },
+ { .compatible = "ingenic,jz4760-i2s", .data = &jz4760_i2s_soc_info },
+ { .compatible = "ingenic,jz4770-i2s", .data = &jz4770_i2s_soc_info },
+ { .compatible = "ingenic,jz4780-i2s", .data = &jz4780_i2s_soc_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jz4740_of_matches);
@@ -492,45 +517,40 @@ MODULE_DEVICE_TABLE(of, jz4740_of_matches);
static int jz4740_i2s_dev_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct jz4740_i2s *i2s;
struct resource *mem;
int ret;
- i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
+ i2s = devm_kzalloc(dev, sizeof(*i2s), GFP_KERNEL);
if (!i2s)
return -ENOMEM;
- i2s->version =
- (enum jz47xx_i2s_version)of_device_get_match_data(&pdev->dev);
+ i2s->soc_info = device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2s->base = devm_ioremap_resource(&pdev->dev, mem);
+ i2s->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(i2s->base))
return PTR_ERR(i2s->base);
i2s->phys_base = mem->start;
- i2s->clk_aic = devm_clk_get(&pdev->dev, "aic");
+ i2s->clk_aic = devm_clk_get(dev, "aic");
if (IS_ERR(i2s->clk_aic))
return PTR_ERR(i2s->clk_aic);
- i2s->clk_i2s = devm_clk_get(&pdev->dev, "i2s");
+ i2s->clk_i2s = devm_clk_get(dev, "i2s");
if (IS_ERR(i2s->clk_i2s))
return PTR_ERR(i2s->clk_i2s);
platform_set_drvdata(pdev, i2s);
- if (i2s->version == JZ_I2S_JZ4780)
- ret = devm_snd_soc_register_component(&pdev->dev,
- &jz4740_i2s_component, &jz4780_i2s_dai, 1);
- else
- ret = devm_snd_soc_register_component(&pdev->dev,
- &jz4740_i2s_component, &jz4740_i2s_dai, 1);
-
+ ret = devm_snd_soc_register_component(dev, &jz4740_i2s_component,
+ i2s->soc_info->dai, 1);
if (ret)
return ret;
- return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL,
+ return devm_snd_dmaengine_pcm_register(dev, NULL,
SND_DMAENGINE_PCM_FLAG_COMPAT);
}
diff --git a/sound/soc/kirkwood/armada-370-db.c b/sound/soc/kirkwood/armada-370-db.c
index 8c3c808bda9a..4f66b011f1b4 100644
--- a/sound/soc/kirkwood/armada-370-db.c
+++ b/sound/soc/kirkwood/armada-370-db.c
@@ -19,7 +19,7 @@ static int a370db_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
unsigned int freq;
switch (params_rate(params)) {
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index f882b4003edf..e037826b2451 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -20,7 +20,7 @@
static struct kirkwood_dma_data *kirkwood_priv(struct snd_pcm_substream *subs)
{
struct snd_soc_pcm_runtime *soc_runtime = subs->private_data;
- return snd_soc_dai_get_drvdata(soc_runtime->cpu_dai);
+ return snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(soc_runtime, 0));
}
static const struct snd_pcm_hardware kirkwood_dma_snd_hw = {
diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.c b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
index 4254f3a954dd..375e3b492922 100644
--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.c
+++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
@@ -40,7 +40,7 @@ int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
struct snd_pcm_runtime *runtime = substream->runtime;
- int memif_num = rtd->cpu_dai->id;
+ int memif_num = asoc_rtd_to_cpu(rtd, 0)->id;
struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
int ret;
@@ -100,7 +100,7 @@ void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
- struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ struct mtk_base_afe_memif *memif = &afe->memif[asoc_rtd_to_cpu(rtd, 0)->id];
int irq_id;
irq_id = memif->irq_usage;
@@ -122,7 +122,7 @@ int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
- int id = rtd->cpu_dai->id;
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
struct mtk_base_afe_memif *memif = &afe->memif[id];
int ret;
unsigned int channels = params_channels(params);
@@ -199,7 +199,7 @@ int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime * const runtime = substream->runtime;
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
- int id = rtd->cpu_dai->id;
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
struct mtk_base_afe_memif *memif = &afe->memif[id];
struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
const struct mtk_base_irq_data *irq_data = irqs->irq_data;
@@ -265,7 +265,7 @@ int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
- int id = rtd->cpu_dai->id;
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
int pbuf_size;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
index 44dfef713905..0a1a65c86f0e 100644
--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
@@ -82,7 +82,7 @@ snd_pcm_uframes_t mtk_afe_pcm_pointer(struct snd_soc_component *component,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
- struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ struct mtk_base_afe_memif *memif = &afe->memif[asoc_rtd_to_cpu(rtd, 0)->id];
const struct mtk_base_memif_data *memif_data = memif->data;
struct regmap *regmap = afe->regmap;
struct device *dev = afe->dev;
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
index 488603a0c4b1..f0250b0dd734 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
@@ -497,7 +497,7 @@ static int mt2701_memif_fs(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int fs;
- if (rtd->cpu_dai->id != MT2701_MEMIF_ULBT)
+ if (asoc_rtd_to_cpu(rtd, 0)->id != MT2701_MEMIF_ULBT)
fs = mt2701_afe_i2s_fs(rate);
else
fs = (rate == 16000 ? 1 : 0);
diff --git a/sound/soc/mediatek/mt2701/mt2701-cs42448.c b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
index b6941796efca..c47af9b6949b 100644
--- a/sound/soc/mediatek/mt2701/mt2701-cs42448.c
+++ b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
@@ -128,8 +128,8 @@ static int mt2701_cs42448_be_ops_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
unsigned int mclk_rate;
unsigned int rate = params_rate(params);
unsigned int div_mclk_over_bck = rate > 192000 ? 2 : 4;
diff --git a/sound/soc/mediatek/mt2701/mt2701-wm8960.c b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
index 8c4c89e4c616..0122e7df067f 100644
--- a/sound/soc/mediatek/mt2701/mt2701-wm8960.c
+++ b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
@@ -25,8 +25,8 @@ static int mt2701_wm8960_be_ops_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int mclk_rate;
unsigned int rate = params_rate(params);
unsigned int div_mclk_over_bck = rate > 192000 ? 2 : 4;
diff --git a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
index 378bfc16ef52..7f930556d961 100644
--- a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
+++ b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
@@ -143,7 +143,7 @@ static int mt6797_memif_fs(struct snd_pcm_substream *substream,
struct snd_soc_component *component =
snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
- int id = rtd->cpu_dai->id;
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
return mt6797_rate_transform(afe->dev, rate, id);
}
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 461e4de8c918..1e3f2d786066 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -485,7 +485,7 @@ static int mt8173_memif_fs(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
- struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ struct mtk_base_afe_memif *memif = &afe->memif[asoc_rtd_to_cpu(rtd, 0)->id];
int fs;
if (memif->data->id == MT8173_AFE_MEMIF_DAI ||
diff --git a/sound/soc/mediatek/mt8173/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c
index 22c00600c999..37693d354e66 100644
--- a/sound/soc/mediatek/mt8173/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c
@@ -53,7 +53,7 @@ static int mt8173_max98090_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
return snd_soc_dai_set_sysclk(codec_dai, 0, params_rate(params) * 256,
SND_SOC_CLOCK_IN);
@@ -67,7 +67,7 @@ static int mt8173_max98090_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
struct snd_soc_card *card = runtime->card;
- struct snd_soc_component *component = runtime->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
/* enable jack detection */
ret = snd_soc_card_jack_new(card, "Headphone", SND_JACK_HEADPHONE,
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
index 2e1e61d8f127..51009a172777 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
@@ -47,7 +47,7 @@ static int mt8173_rt5650_rt5514_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int i, ret;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
/* pll from mclk 12.288M */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
params_rate(params) * 512);
@@ -73,7 +73,7 @@ static struct snd_soc_jack mt8173_rt5650_rt5514_jack;
static int mt8173_rt5650_rt5514_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
- struct snd_soc_component *component = runtime->codec_dais[0]->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
int ret;
rt5645_sel_asrc_clk_src(component,
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index ebcc0b86286b..247ac7690805 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -51,7 +51,7 @@ static int mt8173_rt5650_rt5676_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int i, ret;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
/* pll from mclk 12.288M */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
params_rate(params) * 512);
@@ -77,8 +77,8 @@ static struct snd_soc_jack mt8173_rt5650_rt5676_jack;
static int mt8173_rt5650_rt5676_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
- struct snd_soc_component *component = runtime->codec_dais[0]->component;
- struct snd_soc_component *component_sub = runtime->codec_dais[1]->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
+ struct snd_soc_component *component_sub = asoc_rtd_to_codec(runtime, 1)->component;
int ret;
rt5645_sel_asrc_clk_src(component,
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index ef6f23675286..2065c94dbf99 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -11,6 +11,7 @@
#include <linux/of_gpio.h>
#include <sound/soc.h>
#include <sound/jack.h>
+#include <sound/hdmi-codec.h>
#include "../../codecs/rt5645.h"
#define MCLK_FOR_CODECS 12288000
@@ -77,7 +78,7 @@ static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
break;
}
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
/* pll from mclk */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, mclk_clock,
params_rate(params) * 512);
@@ -98,13 +99,13 @@ static const struct snd_soc_ops mt8173_rt5650_ops = {
.hw_params = mt8173_rt5650_hw_params,
};
-static struct snd_soc_jack mt8173_rt5650_jack;
+static struct snd_soc_jack mt8173_rt5650_jack, mt8173_rt5650_hdmi_jack;
static int mt8173_rt5650_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
- struct snd_soc_component *component = runtime->codec_dais[0]->component;
- const char *codec_capture_dai = runtime->codec_dais[1]->name;
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
+ const char *codec_capture_dai = asoc_rtd_to_codec(runtime, 1)->name;
int ret;
rt5645_sel_asrc_clk_src(component,
@@ -144,6 +145,19 @@ static int mt8173_rt5650_init(struct snd_soc_pcm_runtime *runtime)
&mt8173_rt5650_jack);
}
+static int mt8173_rt5650_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT,
+ &mt8173_rt5650_hdmi_jack, NULL, 0);
+ if (ret)
+ return ret;
+
+ return hdmi_codec_set_jack_detect(asoc_rtd_to_codec(rtd, 0)->component,
+ &mt8173_rt5650_hdmi_jack);
+}
+
enum {
DAI_LINK_PLAYBACK,
DAI_LINK_CAPTURE,
@@ -222,6 +236,7 @@ static struct snd_soc_dai_link mt8173_rt5650_dais[] = {
.name = "HDMI BE",
.no_pcm = 1,
.dpcm_playback = 1,
+ .init = mt8173_rt5650_hdmi_init,
SND_SOC_DAILINK_REG(hdmi_be),
},
};
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
index 6e2270bbb10e..c8ded53bde1d 100644
--- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
@@ -146,7 +146,7 @@ static int mt8183_memif_fs(struct snd_pcm_substream *substream,
struct snd_soc_component *component =
snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
- int id = rtd->cpu_dai->id;
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
return mt8183_rate_transform(afe->dev, rate, id);
}
diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
index c65493721e90..5b3dfa79b4ae 100644
--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
@@ -16,7 +16,9 @@
#include "../../codecs/da7219-aad.h"
#include "../../codecs/da7219.h"
-static struct snd_soc_jack headset_jack;
+struct mt8183_da7219_max98357_priv {
+ struct snd_soc_jack headset_jack;
+};
static int mt8183_mt6358_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
@@ -26,7 +28,7 @@ static int mt8183_mt6358_i2s_hw_params(struct snd_pcm_substream *substream,
unsigned int mclk_fs_ratio = 128;
unsigned int mclk_fs = rate * mclk_fs_ratio;
- return snd_soc_dai_set_sysclk(rtd->cpu_dai,
+ return snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0),
0, mclk_fs, SND_SOC_CLOCK_OUT);
}
@@ -38,19 +40,19 @@ static int mt8183_da7219_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
unsigned int rate = params_rate(params);
unsigned int mclk_fs_ratio = 256;
unsigned int mclk_fs = rate * mclk_fs_ratio;
unsigned int freq;
int ret = 0, j;
- ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, 0,
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0), 0,
mclk_fs, SND_SOC_CLOCK_OUT);
if (ret < 0)
dev_err(rtd->dev, "failed to set cpu dai sysclk\n");
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name, "da7219.5-001a")) {
ret = snd_soc_dai_set_sysclk(codec_dai,
@@ -80,10 +82,10 @@ static int mt8183_da7219_i2s_hw_params(struct snd_pcm_substream *substream,
static int mt8183_da7219_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int ret = 0, j;
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name, "da7219.5-001a")) {
ret = snd_soc_dai_set_pll(codec_dai,
@@ -116,6 +118,46 @@ static int mt8183_i2s_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
+static int
+mt8183_da7219_max98357_bt_sco_startup(
+ struct snd_pcm_substream *substream)
+{
+ static const unsigned int rates[] = {
+ 8000, 16000
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+ };
+ static const unsigned int channels[] = {
+ 1,
+ };
+ static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+ };
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+ runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ return 0;
+}
+
+static const struct snd_soc_ops mt8183_da7219_max98357_bt_sco_ops = {
+ .startup = mt8183_da7219_max98357_bt_sco_startup,
+};
+
/* FE */
SND_SOC_DAILINK_DEFS(playback1,
DAILINK_COMP_ARRAY(COMP_CPU("DL1")),
@@ -222,6 +264,7 @@ static struct snd_soc_dai_link mt8183_da7219_max98357_dai_links[] = {
SND_SOC_DPCM_TRIGGER_PRE},
.dynamic = 1,
.dpcm_playback = 1,
+ .ops = &mt8183_da7219_max98357_bt_sco_ops,
SND_SOC_DAILINK_REG(playback2),
},
{
@@ -240,6 +283,7 @@ static struct snd_soc_dai_link mt8183_da7219_max98357_dai_links[] = {
SND_SOC_DPCM_TRIGGER_PRE},
.dynamic = 1,
.dpcm_capture = 1,
+ .ops = &mt8183_da7219_max98357_bt_sco_ops,
SND_SOC_DAILINK_REG(capture1),
},
{
@@ -351,8 +395,12 @@ static struct snd_soc_dai_link mt8183_da7219_max98357_dai_links[] = {
{
.name = "TDM",
.no_pcm = 1,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_IB_IF |
+ SND_SOC_DAIFMT_CBM_CFM,
.dpcm_playback = 1,
.ignore_suspend = 1,
+ .be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
SND_SOC_DAILINK_REG(tdm),
},
};
@@ -372,9 +420,31 @@ static struct snd_soc_codec_conf mt6358_codec_conf[] = {
},
};
+static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speakers"),
+};
+
+static const
+struct snd_soc_dapm_widget mt8183_da7219_max98357_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Speakers", NULL),
+ SND_SOC_DAPM_PINCTRL("TDM_OUT_PINCTRL",
+ "aud_tdm_out_on", "aud_tdm_out_off"),
+};
+
+static const struct snd_soc_dapm_route mt8183_da7219_max98357_dapm_routes[] = {
+ {"Speakers", NULL, "Speaker"},
+ {"I2S Playback", NULL, "TDM_OUT_PINCTRL"},
+};
+
static struct snd_soc_card mt8183_da7219_max98357_card = {
.name = "mt8183_da7219_max98357",
.owner = THIS_MODULE,
+ .controls = mt8183_da7219_max98357_snd_controls,
+ .num_controls = ARRAY_SIZE(mt8183_da7219_max98357_snd_controls),
+ .dapm_widgets = mt8183_da7219_max98357_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8183_da7219_max98357_dapm_widgets),
+ .dapm_routes = mt8183_da7219_max98357_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8183_da7219_max98357_dapm_routes),
.dai_link = mt8183_da7219_max98357_dai_links,
.num_links = ARRAY_SIZE(mt8183_da7219_max98357_dai_links),
.aux_dev = &mt8183_da7219_max98357_headset_dev,
@@ -387,6 +457,8 @@ static int
mt8183_da7219_max98357_headset_init(struct snd_soc_component *component)
{
int ret;
+ struct mt8183_da7219_max98357_priv *priv =
+ snd_soc_card_get_drvdata(component->card);
/* Enable Headset and 4 Buttons Jack detection */
ret = snd_soc_card_jack_new(&mt8183_da7219_max98357_card,
@@ -394,12 +466,12 @@ mt8183_da7219_max98357_headset_init(struct snd_soc_component *component)
SND_JACK_HEADSET |
SND_JACK_BTN_0 | SND_JACK_BTN_1 |
SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &headset_jack,
+ &priv->headset_jack,
NULL, 0);
if (ret)
return ret;
- da7219_aad_jack_det(component, &headset_jack);
+ da7219_aad_jack_det(component, &priv->headset_jack);
return ret;
}
@@ -409,7 +481,8 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
struct snd_soc_card *card = &mt8183_da7219_max98357_card;
struct device_node *platform_node;
struct snd_soc_dai_link *dai_link;
- struct pinctrl *default_pins;
+ struct mt8183_da7219_max98357_priv *priv;
+ struct pinctrl *pinctrl;
int ret, i;
card->dev = &pdev->dev;
@@ -436,22 +509,21 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
- ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret) {
- dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ snd_soc_card_set_drvdata(card, priv);
+
+ pinctrl = devm_pinctrl_get_select(&pdev->dev, PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(pinctrl)) {
+ ret = PTR_ERR(pinctrl);
+ dev_err(&pdev->dev, "%s failed to select default state %d\n",
__func__, ret);
return ret;
}
- default_pins =
- devm_pinctrl_get_select(&pdev->dev, PINCTRL_STATE_DEFAULT);
- if (IS_ERR(default_pins)) {
- dev_err(&pdev->dev, "%s set pins failed\n",
- __func__);
- return PTR_ERR(default_pins);
- }
-
- return ret;
+ return devm_snd_soc_register_card(&pdev->dev, card);
}
#ifdef CONFIG_OF
@@ -478,4 +550,3 @@ MODULE_DESCRIPTION("MT8183-DA7219-MAX98357 ALSA SoC machine driver");
MODULE_AUTHOR("Shunli Wang <shunli.wang@mediatek.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mt8183_da7219_max98357 soc card");
-
diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
index 0555f7d73d05..1fca8df109b4 100644
--- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
+++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
@@ -41,7 +41,7 @@ static int mt8183_mt6358_i2s_hw_params(struct snd_pcm_substream *substream,
unsigned int mclk_fs_ratio = 128;
unsigned int mclk_fs = rate * mclk_fs_ratio;
- return snd_soc_dai_set_sysclk(rtd->cpu_dai,
+ return snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0),
0, mclk_fs, SND_SOC_CLOCK_OUT);
}
diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
index 2e3676147cea..8b6295283989 100644
--- a/sound/soc/meson/Kconfig
+++ b/sound/soc/meson/Kconfig
@@ -2,6 +2,16 @@
menu "ASoC support for Amlogic platforms"
depends on ARCH_MESON || COMPILE_TEST
+config SND_MESON_AIU
+ tristate "Amlogic AIU"
+ select SND_MESON_CODEC_GLUE
+ select SND_PCM_IEC958
+ imply SND_SOC_MESON_T9015
+ imply SND_SOC_HDMI_CODEC if DRM_MESON_DW_HDMI
+ help
+ Select Y or M to add support for the Audio output subsystem found
+ in the Amlogic Meson8, Meson8b and GX SoC families
+
config SND_MESON_AXG_FIFO
tristate
select REGMAP_MMIO
@@ -50,6 +60,7 @@ config SND_MESON_AXG_TDMOUT
config SND_MESON_AXG_SOUND_CARD
tristate "Amlogic AXG Sound Card Support"
select SND_MESON_AXG_TDM_INTERFACE
+ select SND_MESON_CARD_UTILS
imply SND_MESON_AXG_FRDDR
imply SND_MESON_AXG_TODDR
imply SND_MESON_AXG_TDMIN
@@ -85,11 +96,41 @@ config SND_MESON_AXG_PDM
Select Y or M to add support for PDM input embedded
in the Amlogic AXG SoC family
+config SND_MESON_CARD_UTILS
+ tristate
+
+config SND_MESON_CODEC_GLUE
+ tristate
+
+config SND_MESON_GX_SOUND_CARD
+ tristate "Amlogic GX Sound Card Support"
+ select SND_MESON_CARD_UTILS
+ imply SND_MESON_AIU
+ help
+ Select Y or M to add support for the GXBB/GXL SoC sound card
+
+config SND_MESON_G12A_TOACODEC
+ tristate "Amlogic G12A To Internal DAC Control Support"
+ select SND_MESON_CODEC_GLUE
+ select REGMAP_MMIO
+ imply SND_SOC_MESON_T9015
+ help
+ Select Y or M to add support for the internal audio DAC on the
+ g12a SoC family
+
config SND_MESON_G12A_TOHDMITX
tristate "Amlogic G12A To HDMI TX Control Support"
select REGMAP_MMIO
+ select SND_MESON_CODEC_GLUE
imply SND_SOC_HDMI_CODEC
help
Select Y or M to add support for HDMI audio on the g12a SoC
family
+
+config SND_SOC_MESON_T9015
+ tristate "Amlogic T9015 DAC"
+ select REGMAP_MMIO
+ help
+ Say Y or M if you want to add support for the internal DAC found
+ on GXL, G12 and SM1 SoC family.
endmenu
diff --git a/sound/soc/meson/Makefile b/sound/soc/meson/Makefile
index 1a8b1470ed84..e446bc980481 100644
--- a/sound/soc/meson/Makefile
+++ b/sound/soc/meson/Makefile
@@ -1,5 +1,13 @@
# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+snd-soc-meson-aiu-objs := aiu.o
+snd-soc-meson-aiu-objs += aiu-acodec-ctrl.o
+snd-soc-meson-aiu-objs += aiu-codec-ctrl.o
+snd-soc-meson-aiu-objs += aiu-encoder-i2s.o
+snd-soc-meson-aiu-objs += aiu-encoder-spdif.o
+snd-soc-meson-aiu-objs += aiu-fifo.o
+snd-soc-meson-aiu-objs += aiu-fifo-i2s.o
+snd-soc-meson-aiu-objs += aiu-fifo-spdif.o
snd-soc-meson-axg-fifo-objs := axg-fifo.o
snd-soc-meson-axg-frddr-objs := axg-frddr.o
snd-soc-meson-axg-toddr-objs := axg-toddr.o
@@ -11,8 +19,14 @@ snd-soc-meson-axg-sound-card-objs := axg-card.o
snd-soc-meson-axg-spdifin-objs := axg-spdifin.o
snd-soc-meson-axg-spdifout-objs := axg-spdifout.o
snd-soc-meson-axg-pdm-objs := axg-pdm.o
+snd-soc-meson-card-utils-objs := meson-card-utils.o
+snd-soc-meson-codec-glue-objs := meson-codec-glue.o
+snd-soc-meson-gx-sound-card-objs := gx-card.o
+snd-soc-meson-g12a-toacodec-objs := g12a-toacodec.o
snd-soc-meson-g12a-tohdmitx-objs := g12a-tohdmitx.o
+snd-soc-meson-t9015-objs := t9015.o
+obj-$(CONFIG_SND_MESON_AIU) += snd-soc-meson-aiu.o
obj-$(CONFIG_SND_MESON_AXG_FIFO) += snd-soc-meson-axg-fifo.o
obj-$(CONFIG_SND_MESON_AXG_FRDDR) += snd-soc-meson-axg-frddr.o
obj-$(CONFIG_SND_MESON_AXG_TODDR) += snd-soc-meson-axg-toddr.o
@@ -24,4 +38,9 @@ obj-$(CONFIG_SND_MESON_AXG_SOUND_CARD) += snd-soc-meson-axg-sound-card.o
obj-$(CONFIG_SND_MESON_AXG_SPDIFIN) += snd-soc-meson-axg-spdifin.o
obj-$(CONFIG_SND_MESON_AXG_SPDIFOUT) += snd-soc-meson-axg-spdifout.o
obj-$(CONFIG_SND_MESON_AXG_PDM) += snd-soc-meson-axg-pdm.o
+obj-$(CONFIG_SND_MESON_CARD_UTILS) += snd-soc-meson-card-utils.o
+obj-$(CONFIG_SND_MESON_CODEC_GLUE) += snd-soc-meson-codec-glue.o
+obj-$(CONFIG_SND_MESON_GX_SOUND_CARD) += snd-soc-meson-gx-sound-card.o
+obj-$(CONFIG_SND_MESON_G12A_TOACODEC) += snd-soc-meson-g12a-toacodec.o
obj-$(CONFIG_SND_MESON_G12A_TOHDMITX) += snd-soc-meson-g12a-tohdmitx.o
+obj-$(CONFIG_SND_SOC_MESON_T9015) += snd-soc-meson-t9015.o
diff --git a/sound/soc/meson/aiu-acodec-ctrl.c b/sound/soc/meson/aiu-acodec-ctrl.c
new file mode 100644
index 000000000000..7078197e0cc5
--- /dev/null
+++ b/sound/soc/meson/aiu-acodec-ctrl.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include <dt-bindings/sound/meson-aiu.h>
+#include "aiu.h"
+#include "meson-codec-glue.h"
+
+#define CTRL_DIN_EN 15
+#define CTRL_CLK_INV BIT(14)
+#define CTRL_LRCLK_INV BIT(13)
+#define CTRL_I2S_IN_BCLK_SRC BIT(11)
+#define CTRL_DIN_LRCLK_SRC_SHIFT 6
+#define CTRL_DIN_LRCLK_SRC (0x3 << CTRL_DIN_LRCLK_SRC_SHIFT)
+#define CTRL_BCLK_MCLK_SRC GENMASK(5, 4)
+#define CTRL_DIN_SKEW GENMASK(3, 2)
+#define CTRL_I2S_OUT_LANE_SRC 0
+
+#define AIU_ACODEC_OUT_CHMAX 2
+
+static const char * const aiu_acodec_ctrl_mux_texts[] = {
+ "DISABLED", "I2S", "PCM",
+};
+
+static int aiu_acodec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_kcontrol_component(kcontrol);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int mux, changed;
+
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, e->reg,
+ CTRL_DIN_LRCLK_SRC,
+ FIELD_PREP(CTRL_DIN_LRCLK_SRC,
+ mux));
+
+ if (!changed)
+ return 0;
+
+ /* Force disconnect of the mux while updating */
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
+
+ snd_soc_component_update_bits(component, e->reg,
+ CTRL_DIN_LRCLK_SRC |
+ CTRL_BCLK_MCLK_SRC,
+ FIELD_PREP(CTRL_DIN_LRCLK_SRC, mux) |
+ FIELD_PREP(CTRL_BCLK_MCLK_SRC, mux));
+
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+
+ return 0;
+}
+
+static SOC_ENUM_SINGLE_DECL(aiu_acodec_ctrl_mux_enum, AIU_ACODEC_CTRL,
+ CTRL_DIN_LRCLK_SRC_SHIFT,
+ aiu_acodec_ctrl_mux_texts);
+
+static const struct snd_kcontrol_new aiu_acodec_ctrl_mux =
+ SOC_DAPM_ENUM_EXT("ACodec Source", aiu_acodec_ctrl_mux_enum,
+ snd_soc_dapm_get_enum_double,
+ aiu_acodec_ctrl_mux_put_enum);
+
+static const struct snd_kcontrol_new aiu_acodec_ctrl_out_enable =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", AIU_ACODEC_CTRL,
+ CTRL_DIN_EN, 1, 0);
+
+static const struct snd_soc_dapm_widget aiu_acodec_ctrl_widgets[] = {
+ SND_SOC_DAPM_MUX("ACODEC SRC", SND_SOC_NOPM, 0, 0,
+ &aiu_acodec_ctrl_mux),
+ SND_SOC_DAPM_SWITCH("ACODEC OUT EN", SND_SOC_NOPM, 0, 0,
+ &aiu_acodec_ctrl_out_enable),
+};
+
+static int aiu_acodec_ctrl_input_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct meson_codec_glue_input *data;
+ int ret;
+
+ ret = meson_codec_glue_input_hw_params(substream, params, dai);
+ if (ret)
+ return ret;
+
+ /* The glue will provide 1 lane out of the 4 to the output */
+ data = meson_codec_glue_input_get_data(dai);
+ data->params.channels_min = min_t(unsigned int, AIU_ACODEC_OUT_CHMAX,
+ data->params.channels_min);
+ data->params.channels_max = min_t(unsigned int, AIU_ACODEC_OUT_CHMAX,
+ data->params.channels_max);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops aiu_acodec_ctrl_input_ops = {
+ .hw_params = aiu_acodec_ctrl_input_hw_params,
+ .set_fmt = meson_codec_glue_input_set_fmt,
+};
+
+static const struct snd_soc_dai_ops aiu_acodec_ctrl_output_ops = {
+ .startup = meson_codec_glue_output_startup,
+};
+
+#define AIU_ACODEC_CTRL_FORMATS \
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+#define AIU_ACODEC_STREAM(xname, xsuffix, xchmax) \
+{ \
+ .stream_name = xname " " xsuffix, \
+ .channels_min = 1, \
+ .channels_max = (xchmax), \
+ .rate_min = 5512, \
+ .rate_max = 192000, \
+ .formats = AIU_ACODEC_CTRL_FORMATS, \
+}
+
+#define AIU_ACODEC_INPUT(xname) { \
+ .name = "ACODEC CTRL " xname, \
+ .playback = AIU_ACODEC_STREAM(xname, "Playback", 8), \
+ .ops = &aiu_acodec_ctrl_input_ops, \
+ .probe = meson_codec_glue_input_dai_probe, \
+ .remove = meson_codec_glue_input_dai_remove, \
+}
+
+#define AIU_ACODEC_OUTPUT(xname) { \
+ .name = "ACODEC CTRL " xname, \
+ .capture = AIU_ACODEC_STREAM(xname, "Capture", AIU_ACODEC_OUT_CHMAX), \
+ .ops = &aiu_acodec_ctrl_output_ops, \
+}
+
+static struct snd_soc_dai_driver aiu_acodec_ctrl_dai_drv[] = {
+ [CTRL_I2S] = AIU_ACODEC_INPUT("ACODEC I2S IN"),
+ [CTRL_PCM] = AIU_ACODEC_INPUT("ACODEC PCM IN"),
+ [CTRL_OUT] = AIU_ACODEC_OUTPUT("ACODEC OUT"),
+};
+
+static const struct snd_soc_dapm_route aiu_acodec_ctrl_routes[] = {
+ { "ACODEC SRC", "I2S", "ACODEC I2S IN Playback" },
+ { "ACODEC SRC", "PCM", "ACODEC PCM IN Playback" },
+ { "ACODEC OUT EN", "Switch", "ACODEC SRC" },
+ { "ACODEC OUT Capture", NULL, "ACODEC OUT EN" },
+};
+
+static const struct snd_kcontrol_new aiu_acodec_ctrl_controls[] = {
+ SOC_SINGLE("ACODEC I2S Lane Select", AIU_ACODEC_CTRL,
+ CTRL_I2S_OUT_LANE_SRC, 3, 0),
+};
+
+static int aiu_acodec_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ return aiu_of_xlate_dai_name(component, args, dai_name, AIU_ACODEC);
+}
+
+static int aiu_acodec_ctrl_component_probe(struct snd_soc_component *component)
+{
+ /*
+ * NOTE: Din Skew setting
+ * According to the documentation, the following update adds one delay
+ * to the din line. Without this, the output saturates. This happens
+ * regardless of the link format (i2s or left_j) so it is not clear what
+ * it actually does but it seems to be required
+ */
+ snd_soc_component_update_bits(component, AIU_ACODEC_CTRL,
+ CTRL_DIN_SKEW,
+ FIELD_PREP(CTRL_DIN_SKEW, 2));
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver aiu_acodec_ctrl_component = {
+ .name = "AIU Internal DAC Codec Control",
+ .probe = aiu_acodec_ctrl_component_probe,
+ .controls = aiu_acodec_ctrl_controls,
+ .num_controls = ARRAY_SIZE(aiu_acodec_ctrl_controls),
+ .dapm_widgets = aiu_acodec_ctrl_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aiu_acodec_ctrl_widgets),
+ .dapm_routes = aiu_acodec_ctrl_routes,
+ .num_dapm_routes = ARRAY_SIZE(aiu_acodec_ctrl_routes),
+ .of_xlate_dai_name = aiu_acodec_of_xlate_dai_name,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+int aiu_acodec_ctrl_register_component(struct device *dev)
+{
+ return snd_soc_register_component(dev, &aiu_acodec_ctrl_component,
+ aiu_acodec_ctrl_dai_drv,
+ ARRAY_SIZE(aiu_acodec_ctrl_dai_drv));
+}
diff --git a/sound/soc/meson/aiu-codec-ctrl.c b/sound/soc/meson/aiu-codec-ctrl.c
new file mode 100644
index 000000000000..4b773d3e8b07
--- /dev/null
+++ b/sound/soc/meson/aiu-codec-ctrl.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include <dt-bindings/sound/meson-aiu.h>
+#include "aiu.h"
+#include "meson-codec-glue.h"
+
+#define CTRL_CLK_SEL GENMASK(1, 0)
+#define CTRL_DATA_SEL_SHIFT 4
+#define CTRL_DATA_SEL (0x3 << CTRL_DATA_SEL_SHIFT)
+
+static const char * const aiu_codec_ctrl_mux_texts[] = {
+ "DISABLED", "PCM", "I2S",
+};
+
+static int aiu_codec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_kcontrol_component(kcontrol);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int mux, changed;
+
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, e->reg,
+ CTRL_DATA_SEL,
+ FIELD_PREP(CTRL_DATA_SEL, mux));
+
+ if (!changed)
+ return 0;
+
+ /* Force disconnect of the mux while updating */
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
+
+ /* Reset the source first */
+ snd_soc_component_update_bits(component, e->reg,
+ CTRL_CLK_SEL |
+ CTRL_DATA_SEL,
+ FIELD_PREP(CTRL_CLK_SEL, 0) |
+ FIELD_PREP(CTRL_DATA_SEL, 0));
+
+ /* Set the appropriate source */
+ snd_soc_component_update_bits(component, e->reg,
+ CTRL_CLK_SEL |
+ CTRL_DATA_SEL,
+ FIELD_PREP(CTRL_CLK_SEL, mux) |
+ FIELD_PREP(CTRL_DATA_SEL, mux));
+
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+
+ return 0;
+}
+
+static SOC_ENUM_SINGLE_DECL(aiu_hdmi_ctrl_mux_enum, AIU_HDMI_CLK_DATA_CTRL,
+ CTRL_DATA_SEL_SHIFT,
+ aiu_codec_ctrl_mux_texts);
+
+static const struct snd_kcontrol_new aiu_hdmi_ctrl_mux =
+ SOC_DAPM_ENUM_EXT("HDMI Source", aiu_hdmi_ctrl_mux_enum,
+ snd_soc_dapm_get_enum_double,
+ aiu_codec_ctrl_mux_put_enum);
+
+static const struct snd_soc_dapm_widget aiu_hdmi_ctrl_widgets[] = {
+ SND_SOC_DAPM_MUX("HDMI CTRL SRC", SND_SOC_NOPM, 0, 0,
+ &aiu_hdmi_ctrl_mux),
+};
+
+static const struct snd_soc_dai_ops aiu_codec_ctrl_input_ops = {
+ .hw_params = meson_codec_glue_input_hw_params,
+ .set_fmt = meson_codec_glue_input_set_fmt,
+};
+
+static const struct snd_soc_dai_ops aiu_codec_ctrl_output_ops = {
+ .startup = meson_codec_glue_output_startup,
+};
+
+#define AIU_CODEC_CTRL_FORMATS \
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+#define AIU_CODEC_CTRL_STREAM(xname, xsuffix) \
+{ \
+ .stream_name = xname " " xsuffix, \
+ .channels_min = 1, \
+ .channels_max = 8, \
+ .rate_min = 5512, \
+ .rate_max = 192000, \
+ .formats = AIU_CODEC_CTRL_FORMATS, \
+}
+
+#define AIU_CODEC_CTRL_INPUT(xname) { \
+ .name = "CODEC CTRL " xname, \
+ .playback = AIU_CODEC_CTRL_STREAM(xname, "Playback"), \
+ .ops = &aiu_codec_ctrl_input_ops, \
+ .probe = meson_codec_glue_input_dai_probe, \
+ .remove = meson_codec_glue_input_dai_remove, \
+}
+
+#define AIU_CODEC_CTRL_OUTPUT(xname) { \
+ .name = "CODEC CTRL " xname, \
+ .capture = AIU_CODEC_CTRL_STREAM(xname, "Capture"), \
+ .ops = &aiu_codec_ctrl_output_ops, \
+}
+
+static struct snd_soc_dai_driver aiu_hdmi_ctrl_dai_drv[] = {
+ [CTRL_I2S] = AIU_CODEC_CTRL_INPUT("HDMI I2S IN"),
+ [CTRL_PCM] = AIU_CODEC_CTRL_INPUT("HDMI PCM IN"),
+ [CTRL_OUT] = AIU_CODEC_CTRL_OUTPUT("HDMI OUT"),
+};
+
+static const struct snd_soc_dapm_route aiu_hdmi_ctrl_routes[] = {
+ { "HDMI CTRL SRC", "I2S", "HDMI I2S IN Playback" },
+ { "HDMI CTRL SRC", "PCM", "HDMI PCM IN Playback" },
+ { "HDMI OUT Capture", NULL, "HDMI CTRL SRC" },
+};
+
+static int aiu_hdmi_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ return aiu_of_xlate_dai_name(component, args, dai_name, AIU_HDMI);
+}
+
+static const struct snd_soc_component_driver aiu_hdmi_ctrl_component = {
+ .name = "AIU HDMI Codec Control",
+ .dapm_widgets = aiu_hdmi_ctrl_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aiu_hdmi_ctrl_widgets),
+ .dapm_routes = aiu_hdmi_ctrl_routes,
+ .num_dapm_routes = ARRAY_SIZE(aiu_hdmi_ctrl_routes),
+ .of_xlate_dai_name = aiu_hdmi_of_xlate_dai_name,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+int aiu_hdmi_ctrl_register_component(struct device *dev)
+{
+ return snd_soc_register_component(dev, &aiu_hdmi_ctrl_component,
+ aiu_hdmi_ctrl_dai_drv,
+ ARRAY_SIZE(aiu_hdmi_ctrl_dai_drv));
+}
+
diff --git a/sound/soc/meson/aiu-encoder-i2s.c b/sound/soc/meson/aiu-encoder-i2s.c
new file mode 100644
index 000000000000..832e22d275fe
--- /dev/null
+++ b/sound/soc/meson/aiu-encoder-i2s.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "aiu.h"
+
+#define AIU_I2S_SOURCE_DESC_MODE_8CH BIT(0)
+#define AIU_I2S_SOURCE_DESC_MODE_24BIT BIT(5)
+#define AIU_I2S_SOURCE_DESC_MODE_32BIT BIT(9)
+#define AIU_I2S_SOURCE_DESC_MODE_SPLIT BIT(11)
+#define AIU_RST_SOFT_I2S_FAST BIT(0)
+
+#define AIU_I2S_DAC_CFG_MSB_FIRST BIT(2)
+#define AIU_I2S_MISC_HOLD_EN BIT(2)
+#define AIU_CLK_CTRL_I2S_DIV_EN BIT(0)
+#define AIU_CLK_CTRL_I2S_DIV GENMASK(3, 2)
+#define AIU_CLK_CTRL_AOCLK_INVERT BIT(6)
+#define AIU_CLK_CTRL_LRCLK_INVERT BIT(7)
+#define AIU_CLK_CTRL_LRCLK_SKEW GENMASK(9, 8)
+#define AIU_CLK_CTRL_MORE_HDMI_AMCLK BIT(6)
+#define AIU_CLK_CTRL_MORE_I2S_DIV GENMASK(5, 0)
+#define AIU_CODEC_DAC_LRCLK_CTRL_DIV GENMASK(11, 0)
+
+static void aiu_encoder_i2s_divider_enable(struct snd_soc_component *component,
+ bool enable)
+{
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL,
+ AIU_CLK_CTRL_I2S_DIV_EN,
+ enable ? AIU_CLK_CTRL_I2S_DIV_EN : 0);
+}
+
+static void aiu_encoder_i2s_hold(struct snd_soc_component *component,
+ bool enable)
+{
+ snd_soc_component_update_bits(component, AIU_I2S_MISC,
+ AIU_I2S_MISC_HOLD_EN,
+ enable ? AIU_I2S_MISC_HOLD_EN : 0);
+}
+
+static int aiu_encoder_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ aiu_encoder_i2s_hold(component, false);
+ return 0;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ aiu_encoder_i2s_hold(component, true);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aiu_encoder_i2s_setup_desc(struct snd_soc_component *component,
+ struct snd_pcm_hw_params *params)
+{
+ /* Always operate in split (classic interleaved) mode */
+ unsigned int desc = AIU_I2S_SOURCE_DESC_MODE_SPLIT;
+ unsigned int val;
+
+ /* Reset required to update the pipeline */
+ snd_soc_component_write(component, AIU_RST_SOFT, AIU_RST_SOFT_I2S_FAST);
+ snd_soc_component_read(component, AIU_I2S_SYNC, &val);
+
+ switch (params_physical_width(params)) {
+ case 16: /* Nothing to do */
+ break;
+
+ case 32:
+ desc |= (AIU_I2S_SOURCE_DESC_MODE_24BIT |
+ AIU_I2S_SOURCE_DESC_MODE_32BIT);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_channels(params)) {
+ case 2: /* Nothing to do */
+ break;
+ case 8:
+ desc |= AIU_I2S_SOURCE_DESC_MODE_8CH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, AIU_I2S_SOURCE_DESC,
+ AIU_I2S_SOURCE_DESC_MODE_8CH |
+ AIU_I2S_SOURCE_DESC_MODE_24BIT |
+ AIU_I2S_SOURCE_DESC_MODE_32BIT |
+ AIU_I2S_SOURCE_DESC_MODE_SPLIT,
+ desc);
+
+ return 0;
+}
+
+static int aiu_encoder_i2s_set_legacy_div(struct snd_soc_component *component,
+ struct snd_pcm_hw_params *params,
+ unsigned int bs)
+{
+ switch (bs) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ /* These are the only valid legacy dividers */
+ break;
+
+ default:
+ dev_err(component->dev, "Unsupported i2s divider: %u\n", bs);
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL,
+ AIU_CLK_CTRL_I2S_DIV,
+ FIELD_PREP(AIU_CLK_CTRL_I2S_DIV,
+ __ffs(bs)));
+
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL_MORE,
+ AIU_CLK_CTRL_MORE_I2S_DIV,
+ FIELD_PREP(AIU_CLK_CTRL_MORE_I2S_DIV,
+ 0));
+
+ return 0;
+}
+
+static int aiu_encoder_i2s_set_more_div(struct snd_soc_component *component,
+ struct snd_pcm_hw_params *params,
+ unsigned int bs)
+{
+ /*
+ * NOTE: this HW is odd.
+ * In most configuration, the i2s divider is 'mclk / blck'.
+ * However, in 16 bits - 8ch mode, this factor needs to be
+ * increased by 50% to get the correct output rate.
+ * No idea why !
+ */
+ if (params_width(params) == 16 && params_channels(params) == 8) {
+ if (bs % 2) {
+ dev_err(component->dev,
+ "Cannot increase i2s divider by 50%%\n");
+ return -EINVAL;
+ }
+ bs += bs / 2;
+ }
+
+ /* Use CLK_MORE for mclk to bclk divider */
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL,
+ AIU_CLK_CTRL_I2S_DIV,
+ FIELD_PREP(AIU_CLK_CTRL_I2S_DIV, 0));
+
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL_MORE,
+ AIU_CLK_CTRL_MORE_I2S_DIV,
+ FIELD_PREP(AIU_CLK_CTRL_MORE_I2S_DIV,
+ bs - 1));
+
+ return 0;
+}
+
+static int aiu_encoder_i2s_set_clocks(struct snd_soc_component *component,
+ struct snd_pcm_hw_params *params)
+{
+ struct aiu *aiu = snd_soc_component_get_drvdata(component);
+ unsigned int srate = params_rate(params);
+ unsigned int fs, bs;
+ int ret;
+
+ /* Get the oversampling factor */
+ fs = DIV_ROUND_CLOSEST(clk_get_rate(aiu->i2s.clks[MCLK].clk), srate);
+
+ if (fs % 64)
+ return -EINVAL;
+
+ /* Send data MSB first */
+ snd_soc_component_update_bits(component, AIU_I2S_DAC_CFG,
+ AIU_I2S_DAC_CFG_MSB_FIRST,
+ AIU_I2S_DAC_CFG_MSB_FIRST);
+
+ /* Set bclk to lrlck ratio */
+ snd_soc_component_update_bits(component, AIU_CODEC_DAC_LRCLK_CTRL,
+ AIU_CODEC_DAC_LRCLK_CTRL_DIV,
+ FIELD_PREP(AIU_CODEC_DAC_LRCLK_CTRL_DIV,
+ 64 - 1));
+
+ bs = fs / 64;
+
+ if (aiu->platform->has_clk_ctrl_more_i2s_div)
+ ret = aiu_encoder_i2s_set_more_div(component, params, bs);
+ else
+ ret = aiu_encoder_i2s_set_legacy_div(component, params, bs);
+
+ if (ret)
+ return ret;
+
+ /* Make sure amclk is used for HDMI i2s as well */
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL_MORE,
+ AIU_CLK_CTRL_MORE_HDMI_AMCLK,
+ AIU_CLK_CTRL_MORE_HDMI_AMCLK);
+
+ return 0;
+}
+
+static int aiu_encoder_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ int ret;
+
+ /* Disable the clock while changing the settings */
+ aiu_encoder_i2s_divider_enable(component, false);
+
+ ret = aiu_encoder_i2s_setup_desc(component, params);
+ if (ret) {
+ dev_err(dai->dev, "setting i2s desc failed\n");
+ return ret;
+ }
+
+ ret = aiu_encoder_i2s_set_clocks(component, params);
+ if (ret) {
+ dev_err(dai->dev, "setting i2s clocks failed\n");
+ return ret;
+ }
+
+ aiu_encoder_i2s_divider_enable(component, true);
+
+ return 0;
+}
+
+static int aiu_encoder_i2s_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+
+ aiu_encoder_i2s_divider_enable(component, false);
+
+ return 0;
+}
+
+static int aiu_encoder_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ unsigned int inv = fmt & SND_SOC_DAIFMT_INV_MASK;
+ unsigned int val = 0;
+ unsigned int skew;
+
+ /* Only CPU Master / Codec Slave supported ATM */
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
+ return -EINVAL;
+
+ if (inv == SND_SOC_DAIFMT_NB_IF ||
+ inv == SND_SOC_DAIFMT_IB_IF)
+ val |= AIU_CLK_CTRL_LRCLK_INVERT;
+
+ if (inv == SND_SOC_DAIFMT_IB_NF ||
+ inv == SND_SOC_DAIFMT_IB_IF)
+ val |= AIU_CLK_CTRL_AOCLK_INVERT;
+
+ /* Signal skew */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ /* Invert sample clock for i2s */
+ val ^= AIU_CLK_CTRL_LRCLK_INVERT;
+ skew = 1;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ skew = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val |= FIELD_PREP(AIU_CLK_CTRL_LRCLK_SKEW, skew);
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL,
+ AIU_CLK_CTRL_LRCLK_INVERT |
+ AIU_CLK_CTRL_AOCLK_INVERT |
+ AIU_CLK_CTRL_LRCLK_SKEW,
+ val);
+
+ return 0;
+}
+
+static int aiu_encoder_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct aiu *aiu = snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ if (WARN_ON(clk_id != 0))
+ return -EINVAL;
+
+ if (dir == SND_SOC_CLOCK_IN)
+ return 0;
+
+ ret = clk_set_rate(aiu->i2s.clks[MCLK].clk, freq);
+ if (ret)
+ dev_err(dai->dev, "Failed to set sysclk to %uHz", freq);
+
+ return ret;
+}
+
+static const unsigned int hw_channels[] = {2, 8};
+static const struct snd_pcm_hw_constraint_list hw_channel_constraints = {
+ .list = hw_channels,
+ .count = ARRAY_SIZE(hw_channels),
+ .mask = 0,
+};
+
+static int aiu_encoder_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct aiu *aiu = snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ /* Make sure the encoder gets either 2 or 8 channels */
+ ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &hw_channel_constraints);
+ if (ret) {
+ dev_err(dai->dev, "adding channels constraints failed\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(aiu->i2s.clk_num, aiu->i2s.clks);
+ if (ret)
+ dev_err(dai->dev, "failed to enable i2s clocks\n");
+
+ return ret;
+}
+
+static void aiu_encoder_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct aiu *aiu = snd_soc_component_get_drvdata(dai->component);
+
+ clk_bulk_disable_unprepare(aiu->i2s.clk_num, aiu->i2s.clks);
+}
+
+const struct snd_soc_dai_ops aiu_encoder_i2s_dai_ops = {
+ .trigger = aiu_encoder_i2s_trigger,
+ .hw_params = aiu_encoder_i2s_hw_params,
+ .hw_free = aiu_encoder_i2s_hw_free,
+ .set_fmt = aiu_encoder_i2s_set_fmt,
+ .set_sysclk = aiu_encoder_i2s_set_sysclk,
+ .startup = aiu_encoder_i2s_startup,
+ .shutdown = aiu_encoder_i2s_shutdown,
+};
+
diff --git a/sound/soc/meson/aiu-encoder-spdif.c b/sound/soc/meson/aiu-encoder-spdif.c
new file mode 100644
index 000000000000..de850913975f
--- /dev/null
+++ b/sound/soc/meson/aiu-encoder-spdif.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <sound/pcm_params.h>
+#include <sound/pcm_iec958.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "aiu.h"
+
+#define AIU_958_MISC_NON_PCM BIT(0)
+#define AIU_958_MISC_MODE_16BITS BIT(1)
+#define AIU_958_MISC_16BITS_ALIGN GENMASK(6, 5)
+#define AIU_958_MISC_MODE_32BITS BIT(7)
+#define AIU_958_MISC_U_FROM_STREAM BIT(12)
+#define AIU_958_MISC_FORCE_LR BIT(13)
+#define AIU_958_CTRL_HOLD_EN BIT(0)
+#define AIU_CLK_CTRL_958_DIV_EN BIT(1)
+#define AIU_CLK_CTRL_958_DIV GENMASK(5, 4)
+#define AIU_CLK_CTRL_958_DIV_MORE BIT(12)
+
+#define AIU_CS_WORD_LEN 4
+#define AIU_958_INTERNAL_DIV 2
+
+static void
+aiu_encoder_spdif_divider_enable(struct snd_soc_component *component,
+ bool enable)
+{
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL,
+ AIU_CLK_CTRL_958_DIV_EN,
+ enable ? AIU_CLK_CTRL_958_DIV_EN : 0);
+}
+
+static void aiu_encoder_spdif_hold(struct snd_soc_component *component,
+ bool enable)
+{
+ snd_soc_component_update_bits(component, AIU_958_CTRL,
+ AIU_958_CTRL_HOLD_EN,
+ enable ? AIU_958_CTRL_HOLD_EN : 0);
+}
+
+static int
+aiu_encoder_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ aiu_encoder_spdif_hold(component, false);
+ return 0;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ aiu_encoder_spdif_hold(component, true);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aiu_encoder_spdif_setup_cs_word(struct snd_soc_component *component,
+ struct snd_pcm_hw_params *params)
+{
+ u8 cs[AIU_CS_WORD_LEN];
+ unsigned int val;
+ int ret;
+
+ ret = snd_pcm_create_iec958_consumer_hw_params(params, cs,
+ AIU_CS_WORD_LEN);
+ if (ret < 0)
+ return ret;
+
+ /* Write the 1st half word */
+ val = cs[1] | cs[0] << 8;
+ snd_soc_component_write(component, AIU_958_CHSTAT_L0, val);
+ snd_soc_component_write(component, AIU_958_CHSTAT_R0, val);
+
+ /* Write the 2nd half word */
+ val = cs[3] | cs[2] << 8;
+ snd_soc_component_write(component, AIU_958_CHSTAT_L1, val);
+ snd_soc_component_write(component, AIU_958_CHSTAT_R1, val);
+
+ return 0;
+}
+
+static int aiu_encoder_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct aiu *aiu = snd_soc_component_get_drvdata(component);
+ unsigned int val = 0, mrate;
+ int ret;
+
+ /* Disable the clock while changing the settings */
+ aiu_encoder_spdif_divider_enable(component, false);
+
+ switch (params_physical_width(params)) {
+ case 16:
+ val |= AIU_958_MISC_MODE_16BITS;
+ val |= FIELD_PREP(AIU_958_MISC_16BITS_ALIGN, 2);
+ break;
+ case 32:
+ val |= AIU_958_MISC_MODE_32BITS;
+ break;
+ default:
+ dev_err(dai->dev, "Unsupport physical width\n");
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, AIU_958_MISC,
+ AIU_958_MISC_NON_PCM |
+ AIU_958_MISC_MODE_16BITS |
+ AIU_958_MISC_16BITS_ALIGN |
+ AIU_958_MISC_MODE_32BITS |
+ AIU_958_MISC_FORCE_LR |
+ AIU_958_MISC_U_FROM_STREAM,
+ val);
+
+ /* Set the stream channel status word */
+ ret = aiu_encoder_spdif_setup_cs_word(component, params);
+ if (ret) {
+ dev_err(dai->dev, "failed to set channel status word\n");
+ return ret;
+ }
+
+ snd_soc_component_update_bits(component, AIU_CLK_CTRL,
+ AIU_CLK_CTRL_958_DIV |
+ AIU_CLK_CTRL_958_DIV_MORE,
+ FIELD_PREP(AIU_CLK_CTRL_958_DIV,
+ __ffs(AIU_958_INTERNAL_DIV)));
+
+ /* 2 * 32bits per subframe * 2 channels = 128 */
+ mrate = params_rate(params) * 128 * AIU_958_INTERNAL_DIV;
+ ret = clk_set_rate(aiu->spdif.clks[MCLK].clk, mrate);
+ if (ret) {
+ dev_err(dai->dev, "failed to set mclk rate\n");
+ return ret;
+ }
+
+ aiu_encoder_spdif_divider_enable(component, true);
+
+ return 0;
+}
+
+static int aiu_encoder_spdif_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+
+ aiu_encoder_spdif_divider_enable(component, false);
+
+ return 0;
+}
+
+static int aiu_encoder_spdif_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct aiu *aiu = snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ /*
+ * NOTE: Make sure the spdif block is on its own divider.
+ *
+ * The spdif can be clocked by the i2s master clock or its own
+ * clock. We should (in theory) change the source depending on the
+ * origin of the data.
+ *
+ * However, considering the clocking scheme used on these platforms,
+ * the master clocks will pick the same PLL source when they are
+ * playing from the same FIFO. The clock should be in sync so, it
+ * should not be necessary to reparent the spdif master clock.
+ */
+ ret = clk_set_parent(aiu->spdif.clks[MCLK].clk,
+ aiu->spdif_mclk);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(aiu->spdif.clk_num, aiu->spdif.clks);
+ if (ret)
+ dev_err(dai->dev, "failed to enable spdif clocks\n");
+
+ return ret;
+}
+
+static void aiu_encoder_spdif_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct aiu *aiu = snd_soc_component_get_drvdata(dai->component);
+
+ clk_bulk_disable_unprepare(aiu->spdif.clk_num, aiu->spdif.clks);
+}
+
+const struct snd_soc_dai_ops aiu_encoder_spdif_dai_ops = {
+ .trigger = aiu_encoder_spdif_trigger,
+ .hw_params = aiu_encoder_spdif_hw_params,
+ .hw_free = aiu_encoder_spdif_hw_free,
+ .startup = aiu_encoder_spdif_startup,
+ .shutdown = aiu_encoder_spdif_shutdown,
+};
diff --git a/sound/soc/meson/aiu-fifo-i2s.c b/sound/soc/meson/aiu-fifo-i2s.c
new file mode 100644
index 000000000000..9a5271ce80fe
--- /dev/null
+++ b/sound/soc/meson/aiu-fifo-i2s.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "aiu.h"
+#include "aiu-fifo.h"
+
+#define AIU_I2S_SOURCE_DESC_MODE_8CH BIT(0)
+#define AIU_I2S_SOURCE_DESC_MODE_24BIT BIT(5)
+#define AIU_I2S_SOURCE_DESC_MODE_32BIT BIT(9)
+#define AIU_I2S_SOURCE_DESC_MODE_SPLIT BIT(11)
+#define AIU_MEM_I2S_MASKS_IRQ_BLOCK GENMASK(31, 16)
+#define AIU_MEM_I2S_CONTROL_MODE_16BIT BIT(6)
+#define AIU_MEM_I2S_BUF_CNTL_INIT BIT(0)
+#define AIU_RST_SOFT_I2S_FAST BIT(0)
+
+#define AIU_FIFO_I2S_BLOCK 256
+
+static struct snd_pcm_hardware fifo_i2s_pcm = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE),
+ .formats = AIU_FORMATS,
+ .rate_min = 5512,
+ .rate_max = 192000,
+ .channels_min = 2,
+ .channels_max = 8,
+ .period_bytes_min = AIU_FIFO_I2S_BLOCK,
+ .period_bytes_max = AIU_FIFO_I2S_BLOCK * USHRT_MAX,
+ .periods_min = 2,
+ .periods_max = UINT_MAX,
+
+ /* No real justification for this */
+ .buffer_bytes_max = 1 * 1024 * 1024,
+};
+
+static int aiu_fifo_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ unsigned int val;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ snd_soc_component_write(component, AIU_RST_SOFT,
+ AIU_RST_SOFT_I2S_FAST);
+ snd_soc_component_read(component, AIU_I2S_SYNC, &val);
+ break;
+ }
+
+ return aiu_fifo_trigger(substream, cmd, dai);
+}
+
+static int aiu_fifo_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ int ret;
+
+ ret = aiu_fifo_prepare(substream, dai);
+ if (ret)
+ return ret;
+
+ snd_soc_component_update_bits(component,
+ AIU_MEM_I2S_BUF_CNTL,
+ AIU_MEM_I2S_BUF_CNTL_INIT,
+ AIU_MEM_I2S_BUF_CNTL_INIT);
+ snd_soc_component_update_bits(component,
+ AIU_MEM_I2S_BUF_CNTL,
+ AIU_MEM_I2S_BUF_CNTL_INIT, 0);
+
+ return 0;
+}
+
+static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct aiu_fifo *fifo = dai->playback_dma_data;
+ unsigned int val;
+ int ret;
+
+ ret = aiu_fifo_hw_params(substream, params, dai);
+ if (ret)
+ return ret;
+
+ switch (params_physical_width(params)) {
+ case 16:
+ val = AIU_MEM_I2S_CONTROL_MODE_16BIT;
+ break;
+ case 32:
+ val = 0;
+ break;
+ default:
+ dev_err(dai->dev, "Unsupported physical width %u\n",
+ params_physical_width(params));
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, AIU_MEM_I2S_CONTROL,
+ AIU_MEM_I2S_CONTROL_MODE_16BIT,
+ val);
+
+ /* Setup the irq periodicity */
+ val = params_period_bytes(params) / fifo->fifo_block;
+ val = FIELD_PREP(AIU_MEM_I2S_MASKS_IRQ_BLOCK, val);
+ snd_soc_component_update_bits(component, AIU_MEM_I2S_MASKS,
+ AIU_MEM_I2S_MASKS_IRQ_BLOCK, val);
+
+ return 0;
+}
+
+const struct snd_soc_dai_ops aiu_fifo_i2s_dai_ops = {
+ .trigger = aiu_fifo_i2s_trigger,
+ .prepare = aiu_fifo_i2s_prepare,
+ .hw_params = aiu_fifo_i2s_hw_params,
+ .hw_free = aiu_fifo_hw_free,
+ .startup = aiu_fifo_startup,
+ .shutdown = aiu_fifo_shutdown,
+};
+
+int aiu_fifo_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct aiu *aiu = snd_soc_component_get_drvdata(component);
+ struct aiu_fifo *fifo;
+ int ret;
+
+ ret = aiu_fifo_dai_probe(dai);
+ if (ret)
+ return ret;
+
+ fifo = dai->playback_dma_data;
+
+ fifo->pcm = &fifo_i2s_pcm;
+ fifo->mem_offset = AIU_MEM_I2S_START;
+ fifo->fifo_block = AIU_FIFO_I2S_BLOCK;
+ fifo->pclk = aiu->i2s.clks[PCLK].clk;
+ fifo->irq = aiu->i2s.irq;
+
+ return 0;
+}
diff --git a/sound/soc/meson/aiu-fifo-spdif.c b/sound/soc/meson/aiu-fifo-spdif.c
new file mode 100644
index 000000000000..44eb6faacf44
--- /dev/null
+++ b/sound/soc/meson/aiu-fifo-spdif.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "aiu.h"
+#include "aiu-fifo.h"
+
+#define AIU_IEC958_DCU_FF_CTRL_EN BIT(0)
+#define AIU_IEC958_DCU_FF_CTRL_AUTO_DISABLE BIT(1)
+#define AIU_IEC958_DCU_FF_CTRL_IRQ_MODE GENMASK(3, 2)
+#define AIU_IEC958_DCU_FF_CTRL_IRQ_OUT_THD BIT(2)
+#define AIU_IEC958_DCU_FF_CTRL_IRQ_FRAME_READ BIT(3)
+#define AIU_IEC958_DCU_FF_CTRL_SYNC_HEAD_EN BIT(4)
+#define AIU_IEC958_DCU_FF_CTRL_BYTE_SEEK BIT(5)
+#define AIU_IEC958_DCU_FF_CTRL_CONTINUE BIT(6)
+#define AIU_MEM_IEC958_CONTROL_ENDIAN GENMASK(5, 3)
+#define AIU_MEM_IEC958_CONTROL_RD_DDR BIT(6)
+#define AIU_MEM_IEC958_CONTROL_MODE_16BIT BIT(7)
+#define AIU_MEM_IEC958_CONTROL_MODE_LINEAR BIT(8)
+#define AIU_MEM_IEC958_BUF_CNTL_INIT BIT(0)
+
+#define AIU_FIFO_SPDIF_BLOCK 8
+
+static struct snd_pcm_hardware fifo_spdif_pcm = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE),
+ .formats = AIU_FORMATS,
+ .rate_min = 5512,
+ .rate_max = 192000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .period_bytes_min = AIU_FIFO_SPDIF_BLOCK,
+ .period_bytes_max = AIU_FIFO_SPDIF_BLOCK * USHRT_MAX,
+ .periods_min = 2,
+ .periods_max = UINT_MAX,
+
+ /* No real justification for this */
+ .buffer_bytes_max = 1 * 1024 * 1024,
+};
+
+static void fifo_spdif_dcu_enable(struct snd_soc_component *component,
+ bool enable)
+{
+ snd_soc_component_update_bits(component, AIU_IEC958_DCU_FF_CTRL,
+ AIU_IEC958_DCU_FF_CTRL_EN,
+ enable ? AIU_IEC958_DCU_FF_CTRL_EN : 0);
+}
+
+static int fifo_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ int ret;
+
+ ret = aiu_fifo_trigger(substream, cmd, dai);
+ if (ret)
+ return ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ fifo_spdif_dcu_enable(component, true);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_STOP:
+ fifo_spdif_dcu_enable(component, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fifo_spdif_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ int ret;
+
+ ret = aiu_fifo_prepare(substream, dai);
+ if (ret)
+ return ret;
+
+ snd_soc_component_update_bits(component,
+ AIU_MEM_IEC958_BUF_CNTL,
+ AIU_MEM_IEC958_BUF_CNTL_INIT,
+ AIU_MEM_IEC958_BUF_CNTL_INIT);
+ snd_soc_component_update_bits(component,
+ AIU_MEM_IEC958_BUF_CNTL,
+ AIU_MEM_IEC958_BUF_CNTL_INIT, 0);
+
+ return 0;
+}
+
+static int fifo_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ unsigned int val;
+ int ret;
+
+ ret = aiu_fifo_hw_params(substream, params, dai);
+ if (ret)
+ return ret;
+
+ val = AIU_MEM_IEC958_CONTROL_RD_DDR |
+ AIU_MEM_IEC958_CONTROL_MODE_LINEAR;
+
+ switch (params_physical_width(params)) {
+ case 16:
+ val |= AIU_MEM_IEC958_CONTROL_MODE_16BIT;
+ break;
+ case 32:
+ break;
+ default:
+ dev_err(dai->dev, "Unsupported physical width %u\n",
+ params_physical_width(params));
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, AIU_MEM_IEC958_CONTROL,
+ AIU_MEM_IEC958_CONTROL_ENDIAN |
+ AIU_MEM_IEC958_CONTROL_RD_DDR |
+ AIU_MEM_IEC958_CONTROL_MODE_LINEAR |
+ AIU_MEM_IEC958_CONTROL_MODE_16BIT,
+ val);
+
+ /* Number bytes read by the FIFO between each IRQ */
+ snd_soc_component_write(component, AIU_IEC958_BPF,
+ params_period_bytes(params));
+
+ /*
+ * AUTO_DISABLE and SYNC_HEAD are enabled by default but
+ * this should be disabled in PCM (uncompressed) mode
+ */
+ snd_soc_component_update_bits(component, AIU_IEC958_DCU_FF_CTRL,
+ AIU_IEC958_DCU_FF_CTRL_AUTO_DISABLE |
+ AIU_IEC958_DCU_FF_CTRL_IRQ_MODE |
+ AIU_IEC958_DCU_FF_CTRL_SYNC_HEAD_EN,
+ AIU_IEC958_DCU_FF_CTRL_IRQ_FRAME_READ);
+
+ return 0;
+}
+
+const struct snd_soc_dai_ops aiu_fifo_spdif_dai_ops = {
+ .trigger = fifo_spdif_trigger,
+ .prepare = fifo_spdif_prepare,
+ .hw_params = fifo_spdif_hw_params,
+ .hw_free = aiu_fifo_hw_free,
+ .startup = aiu_fifo_startup,
+ .shutdown = aiu_fifo_shutdown,
+};
+
+int aiu_fifo_spdif_dai_probe(struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct aiu *aiu = snd_soc_component_get_drvdata(component);
+ struct aiu_fifo *fifo;
+ int ret;
+
+ ret = aiu_fifo_dai_probe(dai);
+ if (ret)
+ return ret;
+
+ fifo = dai->playback_dma_data;
+
+ fifo->pcm = &fifo_spdif_pcm;
+ fifo->mem_offset = AIU_MEM_IEC958_START;
+ fifo->fifo_block = 1;
+ fifo->pclk = aiu->spdif.clks[PCLK].clk;
+ fifo->irq = aiu->spdif.irq;
+
+ return 0;
+}
diff --git a/sound/soc/meson/aiu-fifo.c b/sound/soc/meson/aiu-fifo.c
new file mode 100644
index 000000000000..d9cede4c33ff
--- /dev/null
+++ b/sound/soc/meson/aiu-fifo.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "aiu-fifo.h"
+
+#define AIU_MEM_START 0x00
+#define AIU_MEM_RD 0x04
+#define AIU_MEM_END 0x08
+#define AIU_MEM_MASKS 0x0c
+#define AIU_MEM_MASK_CH_RD GENMASK(7, 0)
+#define AIU_MEM_MASK_CH_MEM GENMASK(15, 8)
+#define AIU_MEM_CONTROL 0x10
+#define AIU_MEM_CONTROL_INIT BIT(0)
+#define AIU_MEM_CONTROL_FILL_EN BIT(1)
+#define AIU_MEM_CONTROL_EMPTY_EN BIT(2)
+
+static struct snd_soc_dai *aiu_fifo_dai(struct snd_pcm_substream *ss)
+{
+ struct snd_soc_pcm_runtime *rtd = ss->private_data;
+
+ return asoc_rtd_to_cpu(rtd, 0);
+}
+
+snd_pcm_uframes_t aiu_fifo_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_dai *dai = aiu_fifo_dai(substream);
+ struct aiu_fifo *fifo = dai->playback_dma_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned int addr;
+
+ snd_soc_component_read(component, fifo->mem_offset + AIU_MEM_RD,
+ &addr);
+
+ return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
+}
+
+static void aiu_fifo_enable(struct snd_soc_dai *dai, bool enable)
+{
+ struct snd_soc_component *component = dai->component;
+ struct aiu_fifo *fifo = dai->playback_dma_data;
+ unsigned int en_mask = (AIU_MEM_CONTROL_FILL_EN |
+ AIU_MEM_CONTROL_EMPTY_EN);
+
+ snd_soc_component_update_bits(component,
+ fifo->mem_offset + AIU_MEM_CONTROL,
+ en_mask, enable ? en_mask : 0);
+}
+
+int aiu_fifo_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ aiu_fifo_enable(dai, true);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_STOP:
+ aiu_fifo_enable(dai, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int aiu_fifo_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct aiu_fifo *fifo = dai->playback_dma_data;
+
+ snd_soc_component_update_bits(component,
+ fifo->mem_offset + AIU_MEM_CONTROL,
+ AIU_MEM_CONTROL_INIT,
+ AIU_MEM_CONTROL_INIT);
+ snd_soc_component_update_bits(component,
+ fifo->mem_offset + AIU_MEM_CONTROL,
+ AIU_MEM_CONTROL_INIT, 0);
+ return 0;
+}
+
+int aiu_fifo_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_component *component = dai->component;
+ struct aiu_fifo *fifo = dai->playback_dma_data;
+ dma_addr_t end;
+ int ret;
+
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (ret < 0)
+ return ret;
+
+ /* Setup the fifo boundaries */
+ end = runtime->dma_addr + runtime->dma_bytes - fifo->fifo_block;
+ snd_soc_component_write(component, fifo->mem_offset + AIU_MEM_START,
+ runtime->dma_addr);
+ snd_soc_component_write(component, fifo->mem_offset + AIU_MEM_RD,
+ runtime->dma_addr);
+ snd_soc_component_write(component, fifo->mem_offset + AIU_MEM_END,
+ end);
+
+ /* Setup the fifo to read all the memory - no skip */
+ snd_soc_component_update_bits(component,
+ fifo->mem_offset + AIU_MEM_MASKS,
+ AIU_MEM_MASK_CH_RD | AIU_MEM_MASK_CH_MEM,
+ FIELD_PREP(AIU_MEM_MASK_CH_RD, 0xff) |
+ FIELD_PREP(AIU_MEM_MASK_CH_MEM, 0xff));
+
+ return 0;
+}
+
+int aiu_fifo_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static irqreturn_t aiu_fifo_isr(int irq, void *dev_id)
+{
+ struct snd_pcm_substream *playback = dev_id;
+
+ snd_pcm_period_elapsed(playback);
+
+ return IRQ_HANDLED;
+}
+
+int aiu_fifo_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct aiu_fifo *fifo = dai->playback_dma_data;
+ int ret;
+
+ snd_soc_set_runtime_hwparams(substream, fifo->pcm);
+
+ /*
+ * Make sure the buffer and period size are multiple of the fifo burst
+ * size
+ */
+ ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ fifo->fifo_block);
+ if (ret)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ fifo->fifo_block);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(fifo->pclk);
+ if (ret)
+ return ret;
+
+ ret = request_irq(fifo->irq, aiu_fifo_isr, 0, dev_name(dai->dev),
+ substream);
+ if (ret)
+ clk_disable_unprepare(fifo->pclk);
+
+ return ret;
+}
+
+void aiu_fifo_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct aiu_fifo *fifo = dai->playback_dma_data;
+
+ free_irq(fifo->irq, substream);
+ clk_disable_unprepare(fifo->pclk);
+}
+
+int aiu_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_pcm_substream *substream =
+ rtd->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ struct snd_card *card = rtd->card->snd_card;
+ struct aiu_fifo *fifo = dai->playback_dma_data;
+ size_t size = fifo->pcm->buffer_bytes_max;
+
+ snd_pcm_lib_preallocate_pages(substream,
+ SNDRV_DMA_TYPE_DEV,
+ card->dev, size, size);
+
+ return 0;
+}
+
+int aiu_fifo_dai_probe(struct snd_soc_dai *dai)
+{
+ struct aiu_fifo *fifo;
+
+ fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
+ if (!fifo)
+ return -ENOMEM;
+
+ dai->playback_dma_data = fifo;
+
+ return 0;
+}
+
+int aiu_fifo_dai_remove(struct snd_soc_dai *dai)
+{
+ kfree(dai->playback_dma_data);
+
+ return 0;
+}
+
diff --git a/sound/soc/meson/aiu-fifo.h b/sound/soc/meson/aiu-fifo.h
new file mode 100644
index 000000000000..42ce266677cc
--- /dev/null
+++ b/sound/soc/meson/aiu-fifo.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2020 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _MESON_AIU_FIFO_H
+#define _MESON_AIU_FIFO_H
+
+struct snd_pcm_hardware;
+struct snd_soc_component_driver;
+struct snd_soc_dai_driver;
+struct clk;
+struct snd_pcm_ops;
+struct snd_pcm_substream;
+struct snd_soc_dai;
+struct snd_pcm_hw_params;
+struct platform_device;
+
+struct aiu_fifo {
+ struct snd_pcm_hardware *pcm;
+ unsigned int mem_offset;
+ unsigned int fifo_block;
+ struct clk *pclk;
+ int irq;
+};
+
+int aiu_fifo_dai_probe(struct snd_soc_dai *dai);
+int aiu_fifo_dai_remove(struct snd_soc_dai *dai);
+
+snd_pcm_uframes_t aiu_fifo_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+
+int aiu_fifo_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai);
+int aiu_fifo_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int aiu_fifo_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+int aiu_fifo_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int aiu_fifo_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+void aiu_fifo_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int aiu_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
+
+#endif /* _MESON_AIU_FIFO_H */
diff --git a/sound/soc/meson/aiu.c b/sound/soc/meson/aiu.c
new file mode 100644
index 000000000000..dc35ca79021c
--- /dev/null
+++ b/sound/soc/meson/aiu.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include <dt-bindings/sound/meson-aiu.h>
+#include "aiu.h"
+#include "aiu-fifo.h"
+
+#define AIU_I2S_MISC_958_SRC_SHIFT 3
+
+static const char * const aiu_spdif_encode_sel_texts[] = {
+ "SPDIF", "I2S",
+};
+
+static SOC_ENUM_SINGLE_DECL(aiu_spdif_encode_sel_enum, AIU_I2S_MISC,
+ AIU_I2S_MISC_958_SRC_SHIFT,
+ aiu_spdif_encode_sel_texts);
+
+static const struct snd_kcontrol_new aiu_spdif_encode_mux =
+ SOC_DAPM_ENUM("SPDIF Buffer Src", aiu_spdif_encode_sel_enum);
+
+static const struct snd_soc_dapm_widget aiu_cpu_dapm_widgets[] = {
+ SND_SOC_DAPM_MUX("SPDIF SRC SEL", SND_SOC_NOPM, 0, 0,
+ &aiu_spdif_encode_mux),
+};
+
+static const struct snd_soc_dapm_route aiu_cpu_dapm_routes[] = {
+ { "I2S Encoder Playback", NULL, "I2S FIFO Playback" },
+ { "SPDIF SRC SEL", "SPDIF", "SPDIF FIFO Playback" },
+ { "SPDIF SRC SEL", "I2S", "I2S FIFO Playback" },
+ { "SPDIF Encoder Playback", NULL, "SPDIF SRC SEL" },
+};
+
+int aiu_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name,
+ unsigned int component_id)
+{
+ struct snd_soc_dai *dai;
+ int id;
+
+ if (args->args_count != 2)
+ return -EINVAL;
+
+ if (args->args[0] != component_id)
+ return -EINVAL;
+
+ id = args->args[1];
+
+ if (id < 0 || id >= component->num_dai)
+ return -EINVAL;
+
+ for_each_component_dais(component, dai) {
+ if (id == 0)
+ break;
+ id--;
+ }
+
+ *dai_name = dai->driver->name;
+
+ return 0;
+}
+
+static int aiu_cpu_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name)
+{
+ return aiu_of_xlate_dai_name(component, args, dai_name, AIU_CPU);
+}
+
+static int aiu_cpu_component_probe(struct snd_soc_component *component)
+{
+ struct aiu *aiu = snd_soc_component_get_drvdata(component);
+
+ /* Required for the SPDIF Source control operation */
+ return clk_prepare_enable(aiu->i2s.clks[PCLK].clk);
+}
+
+static void aiu_cpu_component_remove(struct snd_soc_component *component)
+{
+ struct aiu *aiu = snd_soc_component_get_drvdata(component);
+
+ clk_disable_unprepare(aiu->i2s.clks[PCLK].clk);
+}
+
+static const struct snd_soc_component_driver aiu_cpu_component = {
+ .name = "AIU CPU",
+ .dapm_widgets = aiu_cpu_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aiu_cpu_dapm_widgets),
+ .dapm_routes = aiu_cpu_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(aiu_cpu_dapm_routes),
+ .of_xlate_dai_name = aiu_cpu_of_xlate_dai_name,
+ .pointer = aiu_fifo_pointer,
+ .probe = aiu_cpu_component_probe,
+ .remove = aiu_cpu_component_remove,
+};
+
+static struct snd_soc_dai_driver aiu_cpu_dai_drv[] = {
+ [CPU_I2S_FIFO] = {
+ .name = "I2S FIFO",
+ .playback = {
+ .stream_name = "I2S FIFO Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
+ .formats = AIU_FORMATS,
+ },
+ .ops = &aiu_fifo_i2s_dai_ops,
+ .pcm_new = aiu_fifo_pcm_new,
+ .probe = aiu_fifo_i2s_dai_probe,
+ .remove = aiu_fifo_dai_remove,
+ },
+ [CPU_SPDIF_FIFO] = {
+ .name = "SPDIF FIFO",
+ .playback = {
+ .stream_name = "SPDIF FIFO Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
+ .formats = AIU_FORMATS,
+ },
+ .ops = &aiu_fifo_spdif_dai_ops,
+ .pcm_new = aiu_fifo_pcm_new,
+ .probe = aiu_fifo_spdif_dai_probe,
+ .remove = aiu_fifo_dai_remove,
+ },
+ [CPU_I2S_ENCODER] = {
+ .name = "I2S Encoder",
+ .playback = {
+ .stream_name = "I2S Encoder Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = AIU_FORMATS,
+ },
+ .ops = &aiu_encoder_i2s_dai_ops,
+ },
+ [CPU_SPDIF_ENCODER] = {
+ .name = "SPDIF Encoder",
+ .playback = {
+ .stream_name = "SPDIF Encoder Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = (SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000),
+ .formats = AIU_FORMATS,
+ },
+ .ops = &aiu_encoder_spdif_dai_ops,
+ }
+};
+
+static const struct regmap_config aiu_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x2ac,
+};
+
+static int aiu_clk_bulk_get(struct device *dev,
+ const char * const *ids,
+ unsigned int num,
+ struct aiu_interface *interface)
+{
+ struct clk_bulk_data *clks;
+ int i, ret;
+
+ clks = devm_kcalloc(dev, num, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ clks[i].id = ids[i];
+
+ ret = devm_clk_bulk_get(dev, num, clks);
+ if (ret < 0)
+ return ret;
+
+ interface->clks = clks;
+ interface->clk_num = num;
+ return 0;
+}
+
+static const char * const aiu_i2s_ids[] = {
+ [PCLK] = "i2s_pclk",
+ [AOCLK] = "i2s_aoclk",
+ [MCLK] = "i2s_mclk",
+ [MIXER] = "i2s_mixer",
+};
+
+static const char * const aiu_spdif_ids[] = {
+ [PCLK] = "spdif_pclk",
+ [AOCLK] = "spdif_aoclk",
+ [MCLK] = "spdif_mclk_sel"
+};
+
+static int aiu_clk_get(struct device *dev)
+{
+ struct aiu *aiu = dev_get_drvdata(dev);
+ int ret;
+
+ aiu->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(aiu->pclk)) {
+ if (PTR_ERR(aiu->pclk) != -EPROBE_DEFER)
+ dev_err(dev, "Can't get the aiu pclk\n");
+ return PTR_ERR(aiu->pclk);
+ }
+
+ aiu->spdif_mclk = devm_clk_get(dev, "spdif_mclk");
+ if (IS_ERR(aiu->spdif_mclk)) {
+ if (PTR_ERR(aiu->spdif_mclk) != -EPROBE_DEFER)
+ dev_err(dev, "Can't get the aiu spdif master clock\n");
+ return PTR_ERR(aiu->spdif_mclk);
+ }
+
+ ret = aiu_clk_bulk_get(dev, aiu_i2s_ids, ARRAY_SIZE(aiu_i2s_ids),
+ &aiu->i2s);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Can't get the i2s clocks\n");
+ return ret;
+ }
+
+ ret = aiu_clk_bulk_get(dev, aiu_spdif_ids, ARRAY_SIZE(aiu_spdif_ids),
+ &aiu->spdif);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Can't get the spdif clocks\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(aiu->pclk);
+ if (ret) {
+ dev_err(dev, "peripheral clock enable failed\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ aiu->pclk);
+ if (ret)
+ dev_err(dev, "failed to add reset action on pclk");
+
+ return ret;
+}
+
+static int aiu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ struct regmap *map;
+ struct aiu *aiu;
+ int ret;
+
+ aiu = devm_kzalloc(dev, sizeof(*aiu), GFP_KERNEL);
+ if (!aiu)
+ return -ENOMEM;
+
+ aiu->platform = device_get_match_data(dev);
+ if (!aiu->platform)
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, aiu);
+
+ ret = device_reset(dev);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to reset device\n");
+ return ret;
+ }
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ map = devm_regmap_init_mmio(dev, regs, &aiu_regmap_cfg);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ aiu->i2s.irq = platform_get_irq_byname(pdev, "i2s");
+ if (aiu->i2s.irq < 0)
+ return aiu->i2s.irq;
+
+ aiu->spdif.irq = platform_get_irq_byname(pdev, "spdif");
+ if (aiu->spdif.irq < 0)
+ return aiu->spdif.irq;
+
+ ret = aiu_clk_get(dev);
+ if (ret)
+ return ret;
+
+ /* Register the cpu component of the aiu */
+ ret = snd_soc_register_component(dev, &aiu_cpu_component,
+ aiu_cpu_dai_drv,
+ ARRAY_SIZE(aiu_cpu_dai_drv));
+ if (ret) {
+ dev_err(dev, "Failed to register cpu component\n");
+ return ret;
+ }
+
+ /* Register the hdmi codec control component */
+ ret = aiu_hdmi_ctrl_register_component(dev);
+ if (ret) {
+ dev_err(dev, "Failed to register hdmi control component\n");
+ goto err;
+ }
+
+ /* Register the internal dac control component on gxl */
+ if (aiu->platform->has_acodec) {
+ ret = aiu_acodec_ctrl_register_component(dev);
+ if (ret) {
+ dev_err(dev,
+ "Failed to register acodec control component\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ snd_soc_unregister_component(dev);
+ return ret;
+}
+
+static int aiu_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_component(&pdev->dev);
+
+ return 0;
+}
+
+static const struct aiu_platform_data aiu_gxbb_pdata = {
+ .has_acodec = false,
+ .has_clk_ctrl_more_i2s_div = true,
+};
+
+static const struct aiu_platform_data aiu_gxl_pdata = {
+ .has_acodec = true,
+ .has_clk_ctrl_more_i2s_div = true,
+};
+
+static const struct aiu_platform_data aiu_meson8_pdata = {
+ .has_acodec = false,
+ .has_clk_ctrl_more_i2s_div = false,
+};
+
+static const struct of_device_id aiu_of_match[] = {
+ { .compatible = "amlogic,aiu-gxbb", .data = &aiu_gxbb_pdata },
+ { .compatible = "amlogic,aiu-gxl", .data = &aiu_gxl_pdata },
+ { .compatible = "amlogic,aiu-meson8", .data = &aiu_meson8_pdata },
+ { .compatible = "amlogic,aiu-meson8b", .data = &aiu_meson8_pdata },
+ {}
+};
+MODULE_DEVICE_TABLE(of, aiu_of_match);
+
+static struct platform_driver aiu_pdrv = {
+ .probe = aiu_probe,
+ .remove = aiu_remove,
+ .driver = {
+ .name = "meson-aiu",
+ .of_match_table = aiu_of_match,
+ },
+};
+module_platform_driver(aiu_pdrv);
+
+MODULE_DESCRIPTION("Meson AIU Driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/aiu.h b/sound/soc/meson/aiu.h
new file mode 100644
index 000000000000..87aa19ac4af3
--- /dev/null
+++ b/sound/soc/meson/aiu.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _MESON_AIU_H
+#define _MESON_AIU_H
+
+struct clk;
+struct clk_bulk_data;
+struct device;
+struct of_phandle_args;
+struct snd_soc_dai;
+struct snd_soc_dai_ops;
+
+enum aiu_clk_ids {
+ PCLK = 0,
+ AOCLK,
+ MCLK,
+ MIXER
+};
+
+struct aiu_interface {
+ struct clk_bulk_data *clks;
+ unsigned int clk_num;
+ int irq;
+};
+
+struct aiu_platform_data {
+ bool has_acodec;
+ bool has_clk_ctrl_more_i2s_div;
+};
+
+struct aiu {
+ struct clk *pclk;
+ struct clk *spdif_mclk;
+ struct aiu_interface i2s;
+ struct aiu_interface spdif;
+ const struct aiu_platform_data *platform;
+};
+
+#define AIU_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+int aiu_of_xlate_dai_name(struct snd_soc_component *component,
+ struct of_phandle_args *args,
+ const char **dai_name,
+ unsigned int component_id);
+
+int aiu_hdmi_ctrl_register_component(struct device *dev);
+int aiu_acodec_ctrl_register_component(struct device *dev);
+
+int aiu_fifo_i2s_dai_probe(struct snd_soc_dai *dai);
+int aiu_fifo_spdif_dai_probe(struct snd_soc_dai *dai);
+
+extern const struct snd_soc_dai_ops aiu_fifo_i2s_dai_ops;
+extern const struct snd_soc_dai_ops aiu_fifo_spdif_dai_ops;
+extern const struct snd_soc_dai_ops aiu_encoder_i2s_dai_ops;
+extern const struct snd_soc_dai_ops aiu_encoder_spdif_dai_ops;
+
+#define AIU_IEC958_BPF 0x000
+#define AIU_958_MISC 0x010
+#define AIU_IEC958_DCU_FF_CTRL 0x01c
+#define AIU_958_CHSTAT_L0 0x020
+#define AIU_958_CHSTAT_L1 0x024
+#define AIU_958_CTRL 0x028
+#define AIU_I2S_SOURCE_DESC 0x034
+#define AIU_I2S_DAC_CFG 0x040
+#define AIU_I2S_SYNC 0x044
+#define AIU_I2S_MISC 0x048
+#define AIU_RST_SOFT 0x054
+#define AIU_CLK_CTRL 0x058
+#define AIU_CLK_CTRL_MORE 0x064
+#define AIU_CODEC_DAC_LRCLK_CTRL 0x0a0
+#define AIU_HDMI_CLK_DATA_CTRL 0x0a8
+#define AIU_ACODEC_CTRL 0x0b0
+#define AIU_958_CHSTAT_R0 0x0c0
+#define AIU_958_CHSTAT_R1 0x0c4
+#define AIU_MEM_I2S_START 0x180
+#define AIU_MEM_I2S_MASKS 0x18c
+#define AIU_MEM_I2S_CONTROL 0x190
+#define AIU_MEM_IEC958_START 0x194
+#define AIU_MEM_IEC958_CONTROL 0x1a4
+#define AIU_MEM_I2S_BUF_CNTL 0x1d8
+#define AIU_MEM_IEC958_BUF_CNTL 0x1fc
+
+#endif /* _MESON_AIU_H */
diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
index 1f698adde506..af46845f4ef2 100644
--- a/sound/soc/meson/axg-card.c
+++ b/sound/soc/meson/axg-card.c
@@ -9,11 +9,7 @@
#include <sound/soc-dai.h>
#include "axg-tdm.h"
-
-struct axg_card {
- struct snd_soc_card card;
- void **link_data;
-};
+#include "meson-card.h"
struct axg_dai_link_tdm_mask {
u32 tx;
@@ -41,161 +37,15 @@ static const struct snd_soc_pcm_stream codec_params = {
.channels_max = 8,
};
-#define PREFIX "amlogic,"
-
-static int axg_card_reallocate_links(struct axg_card *priv,
- unsigned int num_links)
-{
- struct snd_soc_dai_link *links;
- void **ldata;
-
- links = krealloc(priv->card.dai_link,
- num_links * sizeof(*priv->card.dai_link),
- GFP_KERNEL | __GFP_ZERO);
- ldata = krealloc(priv->link_data,
- num_links * sizeof(*priv->link_data),
- GFP_KERNEL | __GFP_ZERO);
-
- if (!links || !ldata) {
- dev_err(priv->card.dev, "failed to allocate links\n");
- return -ENOMEM;
- }
-
- priv->card.dai_link = links;
- priv->link_data = ldata;
- priv->card.num_links = num_links;
- return 0;
-}
-
-static int axg_card_parse_dai(struct snd_soc_card *card,
- struct device_node *node,
- struct device_node **dai_of_node,
- const char **dai_name)
-{
- struct of_phandle_args args;
- int ret;
-
- if (!dai_name || !dai_of_node || !node)
- return -EINVAL;
-
- ret = of_parse_phandle_with_args(node, "sound-dai",
- "#sound-dai-cells", 0, &args);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(card->dev, "can't parse dai %d\n", ret);
- return ret;
- }
- *dai_of_node = args.np;
-
- return snd_soc_get_dai_name(&args, dai_name);
-}
-
-static int axg_card_set_link_name(struct snd_soc_card *card,
- struct snd_soc_dai_link *link,
- struct device_node *node,
- const char *prefix)
-{
- char *name = devm_kasprintf(card->dev, GFP_KERNEL, "%s.%s",
- prefix, node->full_name);
- if (!name)
- return -ENOMEM;
-
- link->name = name;
- link->stream_name = name;
-
- return 0;
-}
-
-static void axg_card_clean_references(struct axg_card *priv)
-{
- struct snd_soc_card *card = &priv->card;
- struct snd_soc_dai_link *link;
- struct snd_soc_dai_link_component *codec;
- struct snd_soc_aux_dev *aux;
- int i, j;
-
- if (card->dai_link) {
- for_each_card_prelinks(card, i, link) {
- if (link->cpus)
- of_node_put(link->cpus->of_node);
- for_each_link_codecs(link, j, codec)
- of_node_put(codec->of_node);
- }
- }
-
- if (card->aux_dev) {
- for_each_card_pre_auxs(card, i, aux)
- of_node_put(aux->dlc.of_node);
- }
-
- kfree(card->dai_link);
- kfree(priv->link_data);
-}
-
-static int axg_card_add_aux_devices(struct snd_soc_card *card)
-{
- struct device_node *node = card->dev->of_node;
- struct snd_soc_aux_dev *aux;
- int num, i;
-
- num = of_count_phandle_with_args(node, "audio-aux-devs", NULL);
- if (num == -ENOENT) {
- /*
- * It is ok to have no auxiliary devices but for this card it
- * is a strange situtation. Let's warn the about it.
- */
- dev_warn(card->dev, "card has no auxiliary devices\n");
- return 0;
- } else if (num < 0) {
- dev_err(card->dev, "error getting auxiliary devices: %d\n",
- num);
- return num;
- }
-
- aux = devm_kcalloc(card->dev, num, sizeof(*aux), GFP_KERNEL);
- if (!aux)
- return -ENOMEM;
- card->aux_dev = aux;
- card->num_aux_devs = num;
-
- for_each_card_pre_auxs(card, i, aux) {
- aux->dlc.of_node =
- of_parse_phandle(node, "audio-aux-devs", i);
- if (!aux->dlc.of_node)
- return -EINVAL;
- }
-
- return 0;
-}
-
static int axg_card_tdm_be_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct axg_card *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct meson_card *priv = snd_soc_card_get_drvdata(rtd->card);
struct axg_dai_link_tdm_data *be =
(struct axg_dai_link_tdm_data *)priv->link_data[rtd->num];
- struct snd_soc_dai *codec_dai;
- unsigned int mclk;
- int ret, i;
-
- if (be->mclk_fs) {
- mclk = params_rate(params) * be->mclk_fs;
-
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
- SND_SOC_CLOCK_IN);
- if (ret && ret != -ENOTSUPP)
- return ret;
- }
-
- ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, 0, mclk,
- SND_SOC_CLOCK_OUT);
- if (ret && ret != -ENOTSUPP)
- return ret;
- }
- return 0;
+ return meson_card_i2s_set_sysclk(substream, params, be->mclk_fs);
}
static const struct snd_soc_ops axg_card_tdm_be_ops = {
@@ -204,13 +54,13 @@ static const struct snd_soc_ops axg_card_tdm_be_ops = {
static int axg_card_tdm_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct axg_card *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct meson_card *priv = snd_soc_card_get_drvdata(rtd->card);
struct axg_dai_link_tdm_data *be =
(struct axg_dai_link_tdm_data *)priv->link_data[rtd->num];
struct snd_soc_dai *codec_dai;
int ret, i;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_tdm_slot(codec_dai,
be->codec_masks[i].tx,
be->codec_masks[i].rx,
@@ -222,10 +72,10 @@ static int axg_card_tdm_dai_init(struct snd_soc_pcm_runtime *rtd)
}
}
- ret = axg_tdm_set_tdm_slots(rtd->cpu_dai, be->tx_mask, be->rx_mask,
+ ret = axg_tdm_set_tdm_slots(asoc_rtd_to_cpu(rtd, 0), be->tx_mask, be->rx_mask,
be->slots, be->slot_width);
if (ret) {
- dev_err(rtd->cpu_dai->dev, "setting tdm link slots failed\n");
+ dev_err(asoc_rtd_to_cpu(rtd, 0)->dev, "setting tdm link slots failed\n");
return ret;
}
@@ -234,16 +84,16 @@ static int axg_card_tdm_dai_init(struct snd_soc_pcm_runtime *rtd)
static int axg_card_tdm_dai_lb_init(struct snd_soc_pcm_runtime *rtd)
{
- struct axg_card *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct meson_card *priv = snd_soc_card_get_drvdata(rtd->card);
struct axg_dai_link_tdm_data *be =
(struct axg_dai_link_tdm_data *)priv->link_data[rtd->num];
int ret;
/* The loopback rx_mask is the pad tx_mask */
- ret = axg_tdm_set_tdm_slots(rtd->cpu_dai, NULL, be->tx_mask,
+ ret = axg_tdm_set_tdm_slots(asoc_rtd_to_cpu(rtd, 0), NULL, be->tx_mask,
be->slots, be->slot_width);
if (ret) {
- dev_err(rtd->cpu_dai->dev, "setting tdm link slots failed\n");
+ dev_err(asoc_rtd_to_cpu(rtd, 0)->dev, "setting tdm link slots failed\n");
return ret;
}
@@ -253,14 +103,14 @@ static int axg_card_tdm_dai_lb_init(struct snd_soc_pcm_runtime *rtd)
static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
int *index)
{
- struct axg_card *priv = snd_soc_card_get_drvdata(card);
+ struct meson_card *priv = snd_soc_card_get_drvdata(card);
struct snd_soc_dai_link *pad = &card->dai_link[*index];
struct snd_soc_dai_link *lb;
struct snd_soc_dai_link_component *dlc;
int ret;
/* extend links */
- ret = axg_card_reallocate_links(priv, card->num_links + 1);
+ ret = meson_card_reallocate_links(card, card->num_links + 1);
if (ret)
return ret;
@@ -304,32 +154,6 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
return 0;
}
-static unsigned int axg_card_parse_daifmt(struct device_node *node,
- struct device_node *cpu_node)
-{
- struct device_node *bitclkmaster = NULL;
- struct device_node *framemaster = NULL;
- unsigned int daifmt;
-
- daifmt = snd_soc_of_parse_daifmt(node, PREFIX,
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
-
- /* If no master is provided, default to cpu master */
- if (!bitclkmaster || bitclkmaster == cpu_node) {
- daifmt |= (!framemaster || framemaster == cpu_node) ?
- SND_SOC_DAIFMT_CBS_CFS : SND_SOC_DAIFMT_CBS_CFM;
- } else {
- daifmt |= (!framemaster || framemaster == cpu_node) ?
- SND_SOC_DAIFMT_CBM_CFS : SND_SOC_DAIFMT_CBM_CFM;
- }
-
- of_node_put(bitclkmaster);
- of_node_put(framemaster);
-
- return daifmt;
-}
-
static int axg_card_parse_cpu_tdm_slots(struct snd_soc_card *card,
struct snd_soc_dai_link *link,
struct device_node *node,
@@ -424,7 +248,7 @@ static int axg_card_parse_tdm(struct snd_soc_card *card,
struct device_node *node,
int *index)
{
- struct axg_card *priv = snd_soc_card_get_drvdata(card);
+ struct meson_card *priv = snd_soc_card_get_drvdata(card);
struct snd_soc_dai_link *link = &card->dai_link[*index];
struct axg_dai_link_tdm_data *be;
int ret;
@@ -438,7 +262,7 @@ static int axg_card_parse_tdm(struct snd_soc_card *card,
/* Setup tdm link */
link->ops = &axg_card_tdm_be_ops;
link->init = axg_card_tdm_dai_init;
- link->dai_fmt = axg_card_parse_daifmt(node, link->cpus->of_node);
+ link->dai_fmt = meson_card_parse_daifmt(node, link->cpus->of_node);
of_property_read_u32(node, "mclk-fs", &be->mclk_fs);
@@ -462,97 +286,25 @@ static int axg_card_parse_tdm(struct snd_soc_card *card,
return 0;
}
-static int axg_card_set_be_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *link,
- struct device_node *node)
-{
- struct snd_soc_dai_link_component *codec;
- struct device_node *np;
- int ret, num_codecs;
-
- link->no_pcm = 1;
- link->dpcm_playback = 1;
- link->dpcm_capture = 1;
-
- num_codecs = of_get_child_count(node);
- if (!num_codecs) {
- dev_err(card->dev, "be link %s has no codec\n",
- node->full_name);
- return -EINVAL;
- }
-
- codec = devm_kcalloc(card->dev, num_codecs, sizeof(*codec), GFP_KERNEL);
- if (!codec)
- return -ENOMEM;
-
- link->codecs = codec;
- link->num_codecs = num_codecs;
-
- for_each_child_of_node(node, np) {
- ret = axg_card_parse_dai(card, np, &codec->of_node,
- &codec->dai_name);
- if (ret) {
- of_node_put(np);
- return ret;
- }
-
- codec++;
- }
-
- ret = axg_card_set_link_name(card, link, node, "be");
- if (ret)
- dev_err(card->dev, "error setting %pOFn link name\n", np);
-
- return ret;
-}
-
-static int axg_card_set_fe_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *link,
- struct device_node *node,
- bool is_playback)
-{
- struct snd_soc_dai_link_component *codec;
-
- codec = devm_kzalloc(card->dev, sizeof(*codec), GFP_KERNEL);
- if (!codec)
- return -ENOMEM;
-
- link->codecs = codec;
- link->num_codecs = 1;
-
- link->dynamic = 1;
- link->dpcm_merged_format = 1;
- link->dpcm_merged_chan = 1;
- link->dpcm_merged_rate = 1;
- link->codecs->dai_name = "snd-soc-dummy-dai";
- link->codecs->name = "snd-soc-dummy";
-
- if (is_playback)
- link->dpcm_playback = 1;
- else
- link->dpcm_capture = 1;
-
- return axg_card_set_link_name(card, link, node, "fe");
-}
-
static int axg_card_cpu_is_capture_fe(struct device_node *np)
{
- return of_device_is_compatible(np, PREFIX "axg-toddr");
+ return of_device_is_compatible(np, DT_PREFIX "axg-toddr");
}
static int axg_card_cpu_is_playback_fe(struct device_node *np)
{
- return of_device_is_compatible(np, PREFIX "axg-frddr");
+ return of_device_is_compatible(np, DT_PREFIX "axg-frddr");
}
static int axg_card_cpu_is_tdm_iface(struct device_node *np)
{
- return of_device_is_compatible(np, PREFIX "axg-tdm-iface");
+ return of_device_is_compatible(np, DT_PREFIX "axg-tdm-iface");
}
static int axg_card_cpu_is_codec(struct device_node *np)
{
- return of_device_is_compatible(np, PREFIX "g12a-tohdmitx");
+ return of_device_is_compatible(np, DT_PREFIX "g12a-tohdmitx") ||
+ of_device_is_compatible(np, DT_PREFIX "g12a-toacodec");
}
static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
@@ -569,17 +321,17 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
dai_link->cpus = cpu;
dai_link->num_cpus = 1;
- ret = axg_card_parse_dai(card, np, &dai_link->cpus->of_node,
- &dai_link->cpus->dai_name);
+ ret = meson_card_parse_dai(card, np, &dai_link->cpus->of_node,
+ &dai_link->cpus->dai_name);
if (ret)
return ret;
if (axg_card_cpu_is_playback_fe(dai_link->cpus->of_node))
- ret = axg_card_set_fe_link(card, dai_link, np, true);
+ ret = meson_card_set_fe_link(card, dai_link, np, true);
else if (axg_card_cpu_is_capture_fe(dai_link->cpus->of_node))
- ret = axg_card_set_fe_link(card, dai_link, np, false);
+ ret = meson_card_set_fe_link(card, dai_link, np, false);
else
- ret = axg_card_set_be_link(card, dai_link, np);
+ ret = meson_card_set_be_link(card, dai_link, np);
if (ret)
return ret;
@@ -592,121 +344,21 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
return ret;
}
-static int axg_card_add_links(struct snd_soc_card *card)
-{
- struct axg_card *priv = snd_soc_card_get_drvdata(card);
- struct device_node *node = card->dev->of_node;
- struct device_node *np;
- int num, i, ret;
-
- num = of_get_child_count(node);
- if (!num) {
- dev_err(card->dev, "card has no links\n");
- return -EINVAL;
- }
-
- ret = axg_card_reallocate_links(priv, num);
- if (ret)
- return ret;
-
- i = 0;
- for_each_child_of_node(node, np) {
- ret = axg_card_add_link(card, np, &i);
- if (ret) {
- of_node_put(np);
- return ret;
- }
-
- i++;
- }
-
- return 0;
-}
-
-static int axg_card_parse_of_optional(struct snd_soc_card *card,
- const char *propname,
- int (*func)(struct snd_soc_card *c,
- const char *p))
-{
- /* If property is not provided, don't fail ... */
- if (!of_property_read_bool(card->dev->of_node, propname))
- return 0;
-
- /* ... but do fail if it is provided and the parsing fails */
- return func(card, propname);
-}
+static const struct meson_card_match_data axg_card_match_data = {
+ .add_link = axg_card_add_link,
+};
static const struct of_device_id axg_card_of_match[] = {
- { .compatible = "amlogic,axg-sound-card", },
- {}
+ {
+ .compatible = "amlogic,axg-sound-card",
+ .data = &axg_card_match_data,
+ }, {}
};
MODULE_DEVICE_TABLE(of, axg_card_of_match);
-static int axg_card_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct axg_card *priv;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, priv);
- snd_soc_card_set_drvdata(&priv->card, priv);
-
- priv->card.owner = THIS_MODULE;
- priv->card.dev = dev;
-
- ret = snd_soc_of_parse_card_name(&priv->card, "model");
- if (ret < 0)
- return ret;
-
- ret = axg_card_parse_of_optional(&priv->card, "audio-routing",
- snd_soc_of_parse_audio_routing);
- if (ret) {
- dev_err(dev, "error while parsing routing\n");
- return ret;
- }
-
- ret = axg_card_parse_of_optional(&priv->card, "audio-widgets",
- snd_soc_of_parse_audio_simple_widgets);
- if (ret) {
- dev_err(dev, "error while parsing widgets\n");
- return ret;
- }
-
- ret = axg_card_add_links(&priv->card);
- if (ret)
- goto out_err;
-
- ret = axg_card_add_aux_devices(&priv->card);
- if (ret)
- goto out_err;
-
- ret = devm_snd_soc_register_card(dev, &priv->card);
- if (ret)
- goto out_err;
-
- return 0;
-
-out_err:
- axg_card_clean_references(priv);
- return ret;
-}
-
-static int axg_card_remove(struct platform_device *pdev)
-{
- struct axg_card *priv = platform_get_drvdata(pdev);
-
- axg_card_clean_references(priv);
-
- return 0;
-}
-
static struct platform_driver axg_card_pdrv = {
- .probe = axg_card_probe,
- .remove = axg_card_remove,
+ .probe = meson_card_probe,
+ .remove = meson_card_remove,
.driver = {
.name = "axg-sound-card",
.of_match_table = axg_card_of_match,
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
index c12b0d5e8ebf..2e9b56b29d31 100644
--- a/sound/soc/meson/axg-fifo.c
+++ b/sound/soc/meson/axg-fifo.c
@@ -47,7 +47,7 @@ static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
{
struct snd_soc_pcm_runtime *rtd = ss->private_data;
- return rtd->cpu_dai;
+ return asoc_rtd_to_cpu(rtd, 0);
}
static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
diff --git a/sound/soc/meson/g12a-toacodec.c b/sound/soc/meson/g12a-toacodec.c
new file mode 100644
index 000000000000..9339fabccb79
--- /dev/null
+++ b/sound/soc/meson/g12a-toacodec.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include <dt-bindings/sound/meson-g12a-toacodec.h>
+#include "axg-tdm.h"
+#include "meson-codec-glue.h"
+
+#define G12A_TOACODEC_DRV_NAME "g12a-toacodec"
+
+#define TOACODEC_CTRL0 0x0
+#define CTRL0_ENABLE_SHIFT 31
+#define CTRL0_DAT_SEL_SHIFT 14
+#define CTRL0_DAT_SEL (0x3 << CTRL0_DAT_SEL_SHIFT)
+#define CTRL0_LANE_SEL 12
+#define CTRL0_LRCLK_SEL GENMASK(9, 8)
+#define CTRL0_BLK_CAP_INV BIT(7)
+#define CTRL0_BCLK_O_INV BIT(6)
+#define CTRL0_BCLK_SEL GENMASK(5, 4)
+#define CTRL0_MCLK_SEL GENMASK(2, 0)
+
+#define TOACODEC_OUT_CHMAX 2
+
+static const char * const g12a_toacodec_mux_texts[] = {
+ "I2S A", "I2S B", "I2S C",
+};
+
+static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_kcontrol_component(kcontrol);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int mux, changed;
+
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, e->reg,
+ CTRL0_DAT_SEL,
+ FIELD_PREP(CTRL0_DAT_SEL, mux));
+
+ if (!changed)
+ return 0;
+
+ /* Force disconnect of the mux while updating */
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
+
+ snd_soc_component_update_bits(component, e->reg,
+ CTRL0_DAT_SEL |
+ CTRL0_LRCLK_SEL |
+ CTRL0_BCLK_SEL,
+ FIELD_PREP(CTRL0_DAT_SEL, mux) |
+ FIELD_PREP(CTRL0_LRCLK_SEL, mux) |
+ FIELD_PREP(CTRL0_BCLK_SEL, mux));
+
+ /*
+ * FIXME:
+ * On this soc, the glue gets the MCLK directly from the clock
+ * controller instead of going the through the TDM interface.
+ *
+ * Here we assume interface A uses clock A, etc ... While it is
+ * true for now, it could be different. Instead the glue should
+ * find out the clock used by the interface and select the same
+ * source. For that, we will need regmap backed clock mux which
+ * is a work in progress
+ */
+ snd_soc_component_update_bits(component, e->reg,
+ CTRL0_MCLK_SEL,
+ FIELD_PREP(CTRL0_MCLK_SEL, mux));
+
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+
+ return 0;
+}
+
+static SOC_ENUM_SINGLE_DECL(g12a_toacodec_mux_enum, TOACODEC_CTRL0,
+ CTRL0_DAT_SEL_SHIFT,
+ g12a_toacodec_mux_texts);
+
+static const struct snd_kcontrol_new g12a_toacodec_mux =
+ SOC_DAPM_ENUM_EXT("Source", g12a_toacodec_mux_enum,
+ snd_soc_dapm_get_enum_double,
+ g12a_toacodec_mux_put_enum);
+
+static const struct snd_kcontrol_new g12a_toacodec_out_enable =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", TOACODEC_CTRL0,
+ CTRL0_ENABLE_SHIFT, 1, 0);
+
+static const struct snd_soc_dapm_widget g12a_toacodec_widgets[] = {
+ SND_SOC_DAPM_MUX("SRC", SND_SOC_NOPM, 0, 0,
+ &g12a_toacodec_mux),
+ SND_SOC_DAPM_SWITCH("OUT EN", SND_SOC_NOPM, 0, 0,
+ &g12a_toacodec_out_enable),
+};
+
+static int g12a_toacodec_input_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct meson_codec_glue_input *data;
+ int ret;
+
+ ret = meson_codec_glue_input_hw_params(substream, params, dai);
+ if (ret)
+ return ret;
+
+ /* The glue will provide 1 lane out of the 4 to the output */
+ data = meson_codec_glue_input_get_data(dai);
+ data->params.channels_min = min_t(unsigned int, TOACODEC_OUT_CHMAX,
+ data->params.channels_min);
+ data->params.channels_max = min_t(unsigned int, TOACODEC_OUT_CHMAX,
+ data->params.channels_max);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops g12a_toacodec_input_ops = {
+ .hw_params = g12a_toacodec_input_hw_params,
+ .set_fmt = meson_codec_glue_input_set_fmt,
+};
+
+static const struct snd_soc_dai_ops g12a_toacodec_output_ops = {
+ .startup = meson_codec_glue_output_startup,
+};
+
+#define TOACODEC_STREAM(xname, xsuffix, xchmax) \
+{ \
+ .stream_name = xname " " xsuffix, \
+ .channels_min = 1, \
+ .channels_max = (xchmax), \
+ .rate_min = 5512, \
+ .rate_max = 192000, \
+ .formats = AXG_TDM_FORMATS, \
+}
+
+#define TOACODEC_INPUT(xname, xid) { \
+ .name = xname, \
+ .id = (xid), \
+ .playback = TOACODEC_STREAM(xname, "Playback", 8), \
+ .ops = &g12a_toacodec_input_ops, \
+ .probe = meson_codec_glue_input_dai_probe, \
+ .remove = meson_codec_glue_input_dai_remove, \
+}
+
+#define TOACODEC_OUTPUT(xname, xid) { \
+ .name = xname, \
+ .id = (xid), \
+ .capture = TOACODEC_STREAM(xname, "Capture", TOACODEC_OUT_CHMAX), \
+ .ops = &g12a_toacodec_output_ops, \
+}
+
+static struct snd_soc_dai_driver g12a_toacodec_dai_drv[] = {
+ TOACODEC_INPUT("IN A", TOACODEC_IN_A),
+ TOACODEC_INPUT("IN B", TOACODEC_IN_B),
+ TOACODEC_INPUT("IN C", TOACODEC_IN_C),
+ TOACODEC_OUTPUT("OUT", TOACODEC_OUT),
+};
+
+static int g12a_toacodec_component_probe(struct snd_soc_component *c)
+{
+ /* Initialize the static clock parameters */
+ return snd_soc_component_write(c, TOACODEC_CTRL0,
+ CTRL0_BLK_CAP_INV);
+}
+
+static const struct snd_soc_dapm_route g12a_toacodec_routes[] = {
+ { "SRC", "I2S A", "IN A Playback" },
+ { "SRC", "I2S B", "IN B Playback" },
+ { "SRC", "I2S C", "IN C Playback" },
+ { "OUT EN", "Switch", "SRC" },
+ { "OUT Capture", NULL, "OUT EN" },
+};
+
+static const struct snd_kcontrol_new g12a_toacodec_controls[] = {
+ SOC_SINGLE("Lane Select", TOACODEC_CTRL0, CTRL0_LANE_SEL, 3, 0),
+};
+
+static const struct snd_soc_component_driver g12a_toacodec_component_drv = {
+ .probe = g12a_toacodec_component_probe,
+ .controls = g12a_toacodec_controls,
+ .num_controls = ARRAY_SIZE(g12a_toacodec_controls),
+ .dapm_widgets = g12a_toacodec_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(g12a_toacodec_widgets),
+ .dapm_routes = g12a_toacodec_routes,
+ .num_dapm_routes = ARRAY_SIZE(g12a_toacodec_routes),
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct regmap_config g12a_toacodec_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static const struct of_device_id g12a_toacodec_of_match[] = {
+ { .compatible = "amlogic,g12a-toacodec", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, g12a_toacodec_of_match);
+
+static int g12a_toacodec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *regs;
+ struct regmap *map;
+ int ret;
+
+ ret = device_reset(dev);
+ if (ret)
+ return ret;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ map = devm_regmap_init_mmio(dev, regs, &g12a_toacodec_regmap_cfg);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ return devm_snd_soc_register_component(dev,
+ &g12a_toacodec_component_drv, g12a_toacodec_dai_drv,
+ ARRAY_SIZE(g12a_toacodec_dai_drv));
+}
+
+static struct platform_driver g12a_toacodec_pdrv = {
+ .driver = {
+ .name = G12A_TOACODEC_DRV_NAME,
+ .of_match_table = g12a_toacodec_of_match,
+ },
+ .probe = g12a_toacodec_probe,
+};
+module_platform_driver(g12a_toacodec_pdrv);
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic G12a To Internal DAC Codec Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
index 8a0db28a6a40..9b2b59536ced 100644
--- a/sound/soc/meson/g12a-tohdmitx.c
+++ b/sound/soc/meson/g12a-tohdmitx.c
@@ -13,112 +13,51 @@
#include <sound/soc-dai.h>
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+#include "meson-codec-glue.h"
#define G12A_TOHDMITX_DRV_NAME "g12a-tohdmitx"
#define TOHDMITX_CTRL0 0x0
#define CTRL0_ENABLE_SHIFT 31
-#define CTRL0_I2S_DAT_SEL GENMASK(13, 12)
+#define CTRL0_I2S_DAT_SEL_SHIFT 12
+#define CTRL0_I2S_DAT_SEL (0x3 << CTRL0_I2S_DAT_SEL_SHIFT)
#define CTRL0_I2S_LRCLK_SEL GENMASK(9, 8)
#define CTRL0_I2S_BLK_CAP_INV BIT(7)
#define CTRL0_I2S_BCLK_O_INV BIT(6)
#define CTRL0_I2S_BCLK_SEL GENMASK(5, 4)
#define CTRL0_SPDIF_CLK_CAP_INV BIT(3)
#define CTRL0_SPDIF_CLK_O_INV BIT(2)
-#define CTRL0_SPDIF_SEL BIT(1)
+#define CTRL0_SPDIF_SEL_SHIFT 1
+#define CTRL0_SPDIF_SEL (0x1 << CTRL0_SPDIF_SEL_SHIFT)
#define CTRL0_SPDIF_CLK_SEL BIT(0)
-struct g12a_tohdmitx_input {
- struct snd_soc_pcm_stream params;
- unsigned int fmt;
-};
-
-static struct snd_soc_dapm_widget *
-g12a_tohdmitx_get_input(struct snd_soc_dapm_widget *w)
-{
- struct snd_soc_dapm_path *p = NULL;
- struct snd_soc_dapm_widget *in;
-
- snd_soc_dapm_widget_for_each_source_path(w, p) {
- if (!p->connect)
- continue;
-
- /* Check that we still are in the same component */
- if (snd_soc_dapm_to_component(w->dapm) !=
- snd_soc_dapm_to_component(p->source->dapm))
- continue;
-
- if (p->source->id == snd_soc_dapm_dai_in)
- return p->source;
-
- in = g12a_tohdmitx_get_input(p->source);
- if (in)
- return in;
- }
-
- return NULL;
-}
-
-static struct g12a_tohdmitx_input *
-g12a_tohdmitx_get_input_data(struct snd_soc_dapm_widget *w)
-{
- struct snd_soc_dapm_widget *in =
- g12a_tohdmitx_get_input(w);
- struct snd_soc_dai *dai;
-
- if (WARN_ON(!in))
- return NULL;
-
- dai = in->priv;
-
- return dai->playback_dma_data;
-}
-
static const char * const g12a_tohdmitx_i2s_mux_texts[] = {
"I2S A", "I2S B", "I2S C",
};
-static SOC_ENUM_SINGLE_EXT_DECL(g12a_tohdmitx_i2s_mux_enum,
- g12a_tohdmitx_i2s_mux_texts);
-
-static int g12a_tohdmitx_get_input_val(struct snd_soc_component *component,
- unsigned int mask)
-{
- unsigned int val;
-
- snd_soc_component_read(component, TOHDMITX_CTRL0, &val);
- return (val & mask) >> __ffs(mask);
-}
-
-static int g12a_tohdmitx_i2s_mux_get_enum(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component =
- snd_soc_dapm_kcontrol_component(kcontrol);
-
- ucontrol->value.enumerated.item[0] =
- g12a_tohdmitx_get_input_val(component, CTRL0_I2S_DAT_SEL);
-
- return 0;
-}
-
static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component =
snd_soc_dapm_kcontrol_component(kcontrol);
struct snd_soc_dapm_context *dapm =
snd_soc_dapm_kcontrol_dapm(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int mux = ucontrol->value.enumerated.item[0];
- unsigned int val = g12a_tohdmitx_get_input_val(component,
- CTRL0_I2S_DAT_SEL);
+ unsigned int mux, changed;
+
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, e->reg,
+ CTRL0_I2S_DAT_SEL,
+ FIELD_PREP(CTRL0_I2S_DAT_SEL,
+ mux));
+
+ if (!changed)
+ return 0;
/* Force disconnect of the mux while updating */
- if (val != mux)
- snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
- snd_soc_component_update_bits(component, TOHDMITX_CTRL0,
+ snd_soc_component_update_bits(component, e->reg,
CTRL0_I2S_DAT_SEL |
CTRL0_I2S_LRCLK_SEL |
CTRL0_I2S_BCLK_SEL,
@@ -131,30 +70,19 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
return 0;
}
+static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_i2s_mux_enum, TOHDMITX_CTRL0,
+ CTRL0_I2S_DAT_SEL_SHIFT,
+ g12a_tohdmitx_i2s_mux_texts);
+
static const struct snd_kcontrol_new g12a_tohdmitx_i2s_mux =
SOC_DAPM_ENUM_EXT("I2S Source", g12a_tohdmitx_i2s_mux_enum,
- g12a_tohdmitx_i2s_mux_get_enum,
+ snd_soc_dapm_get_enum_double,
g12a_tohdmitx_i2s_mux_put_enum);
static const char * const g12a_tohdmitx_spdif_mux_texts[] = {
"SPDIF A", "SPDIF B",
};
-static SOC_ENUM_SINGLE_EXT_DECL(g12a_tohdmitx_spdif_mux_enum,
- g12a_tohdmitx_spdif_mux_texts);
-
-static int g12a_tohdmitx_spdif_mux_get_enum(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component =
- snd_soc_dapm_kcontrol_component(kcontrol);
-
- ucontrol->value.enumerated.item[0] =
- g12a_tohdmitx_get_input_val(component, CTRL0_SPDIF_SEL);
-
- return 0;
-}
-
static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -163,13 +91,18 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_context *dapm =
snd_soc_dapm_kcontrol_dapm(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int mux = ucontrol->value.enumerated.item[0];
- unsigned int val = g12a_tohdmitx_get_input_val(component,
- CTRL0_SPDIF_SEL);
+ unsigned int mux, changed;
+
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, TOHDMITX_CTRL0,
+ CTRL0_SPDIF_SEL,
+ FIELD_PREP(CTRL0_SPDIF_SEL, mux));
+
+ if (!changed)
+ return 0;
/* Force disconnect of the mux while updating */
- if (val != mux)
- snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, 0, NULL, NULL);
snd_soc_component_update_bits(component, TOHDMITX_CTRL0,
CTRL0_SPDIF_SEL |
@@ -182,9 +115,13 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
return 0;
}
+static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_spdif_mux_enum, TOHDMITX_CTRL0,
+ CTRL0_SPDIF_SEL_SHIFT,
+ g12a_tohdmitx_spdif_mux_texts);
+
static const struct snd_kcontrol_new g12a_tohdmitx_spdif_mux =
SOC_DAPM_ENUM_EXT("SPDIF Source", g12a_tohdmitx_spdif_mux_enum,
- g12a_tohdmitx_spdif_mux_get_enum,
+ snd_soc_dapm_get_enum_double,
g12a_tohdmitx_spdif_mux_put_enum);
static const struct snd_kcontrol_new g12a_tohdmitx_out_enable =
@@ -202,83 +139,13 @@ static const struct snd_soc_dapm_widget g12a_tohdmitx_widgets[] = {
&g12a_tohdmitx_out_enable),
};
-static int g12a_tohdmitx_input_probe(struct snd_soc_dai *dai)
-{
- struct g12a_tohdmitx_input *data;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- dai->playback_dma_data = data;
- return 0;
-}
-
-static int g12a_tohdmitx_input_remove(struct snd_soc_dai *dai)
-{
- kfree(dai->playback_dma_data);
- return 0;
-}
-
-static int g12a_tohdmitx_input_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct g12a_tohdmitx_input *data = dai->playback_dma_data;
-
- data->params.rates = snd_pcm_rate_to_rate_bit(params_rate(params));
- data->params.rate_min = params_rate(params);
- data->params.rate_max = params_rate(params);
- data->params.formats = 1 << params_format(params);
- data->params.channels_min = params_channels(params);
- data->params.channels_max = params_channels(params);
- data->params.sig_bits = dai->driver->playback.sig_bits;
-
- return 0;
-}
-
-
-static int g12a_tohdmitx_input_set_fmt(struct snd_soc_dai *dai,
- unsigned int fmt)
-{
- struct g12a_tohdmitx_input *data = dai->playback_dma_data;
-
- /* Save the source stream format for the downstream link */
- data->fmt = fmt;
- return 0;
-}
-
-static int g12a_tohdmitx_output_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct g12a_tohdmitx_input *in_data =
- g12a_tohdmitx_get_input_data(dai->capture_widget);
-
- if (!in_data)
- return -ENODEV;
-
- if (WARN_ON(!rtd->dai_link->params)) {
- dev_warn(dai->dev, "codec2codec link expected\n");
- return -EINVAL;
- }
-
- /* Replace link params with the input params */
- rtd->dai_link->params = &in_data->params;
-
- if (!in_data->fmt)
- return 0;
-
- return snd_soc_runtime_set_dai_fmt(rtd, in_data->fmt);
-}
-
static const struct snd_soc_dai_ops g12a_tohdmitx_input_ops = {
- .hw_params = g12a_tohdmitx_input_hw_params,
- .set_fmt = g12a_tohdmitx_input_set_fmt,
+ .hw_params = meson_codec_glue_input_hw_params,
+ .set_fmt = meson_codec_glue_input_set_fmt,
};
static const struct snd_soc_dai_ops g12a_tohdmitx_output_ops = {
- .startup = g12a_tohdmitx_output_startup,
+ .startup = meson_codec_glue_output_startup,
};
#define TOHDMITX_SPDIF_FORMATS \
@@ -305,8 +172,8 @@ static const struct snd_soc_dai_ops g12a_tohdmitx_output_ops = {
.id = (xid), \
.playback = TOHDMITX_STREAM(xname, "Playback", xfmt, xchmax), \
.ops = &g12a_tohdmitx_input_ops, \
- .probe = g12a_tohdmitx_input_probe, \
- .remove = g12a_tohdmitx_input_remove, \
+ .probe = meson_codec_glue_input_dai_probe, \
+ .remove = meson_codec_glue_input_dai_remove, \
}
#define TOHDMITX_OUT(xname, xid, xfmt, xchmax) { \
diff --git a/sound/soc/meson/gx-card.c b/sound/soc/meson/gx-card.c
new file mode 100644
index 000000000000..7b01dcb73e5e
--- /dev/null
+++ b/sound/soc/meson/gx-card.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "meson-card.h"
+
+struct gx_dai_link_i2s_data {
+ unsigned int mclk_fs;
+};
+
+/*
+ * Base params for the codec to codec links
+ * Those will be over-written by the CPU side of the link
+ */
+static const struct snd_soc_pcm_stream codec_params = {
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 5525,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 8,
+};
+
+static int gx_card_i2s_be_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct meson_card *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct gx_dai_link_i2s_data *be =
+ (struct gx_dai_link_i2s_data *)priv->link_data[rtd->num];
+
+ return meson_card_i2s_set_sysclk(substream, params, be->mclk_fs);
+}
+
+static const struct snd_soc_ops gx_card_i2s_be_ops = {
+ .hw_params = gx_card_i2s_be_hw_params,
+};
+
+static int gx_card_parse_i2s(struct snd_soc_card *card,
+ struct device_node *node,
+ int *index)
+{
+ struct meson_card *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai_link *link = &card->dai_link[*index];
+ struct gx_dai_link_i2s_data *be;
+
+ /* Allocate i2s link parameters */
+ be = devm_kzalloc(card->dev, sizeof(*be), GFP_KERNEL);
+ if (!be)
+ return -ENOMEM;
+ priv->link_data[*index] = be;
+
+ /* Setup i2s link */
+ link->ops = &gx_card_i2s_be_ops;
+ link->dai_fmt = meson_card_parse_daifmt(node, link->cpus->of_node);
+
+ of_property_read_u32(node, "mclk-fs", &be->mclk_fs);
+
+ return 0;
+}
+
+static int gx_card_cpu_identify(struct snd_soc_dai_link_component *c,
+ char *match)
+{
+ if (of_device_is_compatible(c->of_node, DT_PREFIX "aiu")) {
+ if (strstr(c->dai_name, match))
+ return 1;
+ }
+
+ /* dai not matched */
+ return 0;
+}
+
+static int gx_card_add_link(struct snd_soc_card *card, struct device_node *np,
+ int *index)
+{
+ struct snd_soc_dai_link *dai_link = &card->dai_link[*index];
+ struct snd_soc_dai_link_component *cpu;
+ int ret;
+
+ cpu = devm_kzalloc(card->dev, sizeof(*cpu), GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ dai_link->cpus = cpu;
+ dai_link->num_cpus = 1;
+
+ ret = meson_card_parse_dai(card, np, &dai_link->cpus->of_node,
+ &dai_link->cpus->dai_name);
+ if (ret)
+ return ret;
+
+ if (gx_card_cpu_identify(dai_link->cpus, "FIFO"))
+ ret = meson_card_set_fe_link(card, dai_link, np, true);
+ else
+ ret = meson_card_set_be_link(card, dai_link, np);
+
+ if (ret)
+ return ret;
+
+ /* Check if the cpu is the i2s encoder and parse i2s data */
+ if (gx_card_cpu_identify(dai_link->cpus, "I2S Encoder"))
+ ret = gx_card_parse_i2s(card, np, index);
+
+ /* Or apply codec to codec params if necessary */
+ else if (gx_card_cpu_identify(dai_link->cpus, "CODEC CTRL"))
+ dai_link->params = &codec_params;
+
+ return ret;
+}
+
+static const struct meson_card_match_data gx_card_match_data = {
+ .add_link = gx_card_add_link,
+};
+
+static const struct of_device_id gx_card_of_match[] = {
+ {
+ .compatible = "amlogic,gx-sound-card",
+ .data = &gx_card_match_data,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, gx_card_of_match);
+
+static struct platform_driver gx_card_pdrv = {
+ .probe = meson_card_probe,
+ .remove = meson_card_remove,
+ .driver = {
+ .name = "gx-sound-card",
+ .of_match_table = gx_card_of_match,
+ },
+};
+module_platform_driver(gx_card_pdrv);
+
+MODULE_DESCRIPTION("Amlogic GX ALSA machine driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/meson-card-utils.c b/sound/soc/meson/meson-card-utils.c
new file mode 100644
index 000000000000..2ca8c98e204f
--- /dev/null
+++ b/sound/soc/meson/meson-card-utils.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/soc.h>
+
+#include "meson-card.h"
+
+int meson_card_i2s_set_sysclk(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ unsigned int mclk_fs)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
+ unsigned int mclk;
+ int ret, i;
+
+ if (!mclk_fs)
+ return 0;
+
+ mclk = params_rate(params) * mclk_fs;
+
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (ret && ret != -ENOTSUPP)
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0), 0, mclk,
+ SND_SOC_CLOCK_OUT);
+ if (ret && ret != -ENOTSUPP)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_card_i2s_set_sysclk);
+
+int meson_card_reallocate_links(struct snd_soc_card *card,
+ unsigned int num_links)
+{
+ struct meson_card *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai_link *links;
+ void **ldata;
+
+ links = krealloc(priv->card.dai_link,
+ num_links * sizeof(*priv->card.dai_link),
+ GFP_KERNEL | __GFP_ZERO);
+ ldata = krealloc(priv->link_data,
+ num_links * sizeof(*priv->link_data),
+ GFP_KERNEL | __GFP_ZERO);
+
+ if (!links || !ldata) {
+ dev_err(priv->card.dev, "failed to allocate links\n");
+ return -ENOMEM;
+ }
+
+ priv->card.dai_link = links;
+ priv->link_data = ldata;
+ priv->card.num_links = num_links;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_card_reallocate_links);
+
+int meson_card_parse_dai(struct snd_soc_card *card,
+ struct device_node *node,
+ struct device_node **dai_of_node,
+ const char **dai_name)
+{
+ struct of_phandle_args args;
+ int ret;
+
+ if (!dai_name || !dai_of_node || !node)
+ return -EINVAL;
+
+ ret = of_parse_phandle_with_args(node, "sound-dai",
+ "#sound-dai-cells", 0, &args);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(card->dev, "can't parse dai %d\n", ret);
+ return ret;
+ }
+ *dai_of_node = args.np;
+
+ return snd_soc_get_dai_name(&args, dai_name);
+}
+EXPORT_SYMBOL_GPL(meson_card_parse_dai);
+
+static int meson_card_set_link_name(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node,
+ const char *prefix)
+{
+ char *name = devm_kasprintf(card->dev, GFP_KERNEL, "%s.%s",
+ prefix, node->full_name);
+ if (!name)
+ return -ENOMEM;
+
+ link->name = name;
+ link->stream_name = name;
+
+ return 0;
+}
+
+unsigned int meson_card_parse_daifmt(struct device_node *node,
+ struct device_node *cpu_node)
+{
+ struct device_node *bitclkmaster = NULL;
+ struct device_node *framemaster = NULL;
+ unsigned int daifmt;
+
+ daifmt = snd_soc_of_parse_daifmt(node, DT_PREFIX,
+ &bitclkmaster, &framemaster);
+ daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+
+ /* If no master is provided, default to cpu master */
+ if (!bitclkmaster || bitclkmaster == cpu_node) {
+ daifmt |= (!framemaster || framemaster == cpu_node) ?
+ SND_SOC_DAIFMT_CBS_CFS : SND_SOC_DAIFMT_CBS_CFM;
+ } else {
+ daifmt |= (!framemaster || framemaster == cpu_node) ?
+ SND_SOC_DAIFMT_CBM_CFS : SND_SOC_DAIFMT_CBM_CFM;
+ }
+
+ of_node_put(bitclkmaster);
+ of_node_put(framemaster);
+
+ return daifmt;
+}
+EXPORT_SYMBOL_GPL(meson_card_parse_daifmt);
+
+int meson_card_set_be_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node)
+{
+ struct snd_soc_dai_link_component *codec;
+ struct device_node *np;
+ int ret, num_codecs;
+
+ link->no_pcm = 1;
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 1;
+
+ num_codecs = of_get_child_count(node);
+ if (!num_codecs) {
+ dev_err(card->dev, "be link %s has no codec\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ codec = devm_kcalloc(card->dev, num_codecs, sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+
+ link->codecs = codec;
+ link->num_codecs = num_codecs;
+
+ for_each_child_of_node(node, np) {
+ ret = meson_card_parse_dai(card, np, &codec->of_node,
+ &codec->dai_name);
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
+
+ codec++;
+ }
+
+ ret = meson_card_set_link_name(card, link, node, "be");
+ if (ret)
+ dev_err(card->dev, "error setting %pOFn link name\n", np);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(meson_card_set_be_link);
+
+int meson_card_set_fe_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node,
+ bool is_playback)
+{
+ struct snd_soc_dai_link_component *codec;
+
+ codec = devm_kzalloc(card->dev, sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+
+ link->codecs = codec;
+ link->num_codecs = 1;
+
+ link->dynamic = 1;
+ link->dpcm_merged_format = 1;
+ link->dpcm_merged_chan = 1;
+ link->dpcm_merged_rate = 1;
+ link->codecs->dai_name = "snd-soc-dummy-dai";
+ link->codecs->name = "snd-soc-dummy";
+
+ if (is_playback)
+ link->dpcm_playback = 1;
+ else
+ link->dpcm_capture = 1;
+
+ return meson_card_set_link_name(card, link, node, "fe");
+}
+EXPORT_SYMBOL_GPL(meson_card_set_fe_link);
+
+static int meson_card_add_links(struct snd_soc_card *card)
+{
+ struct meson_card *priv = snd_soc_card_get_drvdata(card);
+ struct device_node *node = card->dev->of_node;
+ struct device_node *np;
+ int num, i, ret;
+
+ num = of_get_child_count(node);
+ if (!num) {
+ dev_err(card->dev, "card has no links\n");
+ return -EINVAL;
+ }
+
+ ret = meson_card_reallocate_links(card, num);
+ if (ret)
+ return ret;
+
+ i = 0;
+ for_each_child_of_node(node, np) {
+ ret = priv->match_data->add_link(card, np, &i);
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int meson_card_parse_of_optional(struct snd_soc_card *card,
+ const char *propname,
+ int (*func)(struct snd_soc_card *c,
+ const char *p))
+{
+ /* If property is not provided, don't fail ... */
+ if (!of_property_read_bool(card->dev->of_node, propname))
+ return 0;
+
+ /* ... but do fail if it is provided and the parsing fails */
+ return func(card, propname);
+}
+
+static int meson_card_add_aux_devices(struct snd_soc_card *card)
+{
+ struct device_node *node = card->dev->of_node;
+ struct snd_soc_aux_dev *aux;
+ int num, i;
+
+ num = of_count_phandle_with_args(node, "audio-aux-devs", NULL);
+ if (num == -ENOENT) {
+ return 0;
+ } else if (num < 0) {
+ dev_err(card->dev, "error getting auxiliary devices: %d\n",
+ num);
+ return num;
+ }
+
+ aux = devm_kcalloc(card->dev, num, sizeof(*aux), GFP_KERNEL);
+ if (!aux)
+ return -ENOMEM;
+ card->aux_dev = aux;
+ card->num_aux_devs = num;
+
+ for_each_card_pre_auxs(card, i, aux) {
+ aux->dlc.of_node =
+ of_parse_phandle(node, "audio-aux-devs", i);
+ if (!aux->dlc.of_node)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void meson_card_clean_references(struct meson_card *priv)
+{
+ struct snd_soc_card *card = &priv->card;
+ struct snd_soc_dai_link *link;
+ struct snd_soc_dai_link_component *codec;
+ struct snd_soc_aux_dev *aux;
+ int i, j;
+
+ if (card->dai_link) {
+ for_each_card_prelinks(card, i, link) {
+ if (link->cpus)
+ of_node_put(link->cpus->of_node);
+ for_each_link_codecs(link, j, codec)
+ of_node_put(codec->of_node);
+ }
+ }
+
+ if (card->aux_dev) {
+ for_each_card_pre_auxs(card, i, aux)
+ of_node_put(aux->dlc.of_node);
+ }
+
+ kfree(card->dai_link);
+ kfree(priv->link_data);
+}
+
+int meson_card_probe(struct platform_device *pdev)
+{
+ const struct meson_card_match_data *data;
+ struct device *dev = &pdev->dev;
+ struct meson_card *priv;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ snd_soc_card_set_drvdata(&priv->card, priv);
+
+ priv->card.owner = THIS_MODULE;
+ priv->card.dev = dev;
+ priv->match_data = data;
+
+ ret = snd_soc_of_parse_card_name(&priv->card, "model");
+ if (ret < 0)
+ return ret;
+
+ ret = meson_card_parse_of_optional(&priv->card, "audio-routing",
+ snd_soc_of_parse_audio_routing);
+ if (ret) {
+ dev_err(dev, "error while parsing routing\n");
+ return ret;
+ }
+
+ ret = meson_card_parse_of_optional(&priv->card, "audio-widgets",
+ snd_soc_of_parse_audio_simple_widgets);
+ if (ret) {
+ dev_err(dev, "error while parsing widgets\n");
+ return ret;
+ }
+
+ ret = meson_card_add_links(&priv->card);
+ if (ret)
+ goto out_err;
+
+ ret = meson_card_add_aux_devices(&priv->card);
+ if (ret)
+ goto out_err;
+
+ ret = devm_snd_soc_register_card(dev, &priv->card);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ meson_card_clean_references(priv);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(meson_card_probe);
+
+int meson_card_remove(struct platform_device *pdev)
+{
+ struct meson_card *priv = platform_get_drvdata(pdev);
+
+ meson_card_clean_references(priv);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_card_remove);
+
+MODULE_DESCRIPTION("Amlogic Sound Card Utils");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/meson-card.h b/sound/soc/meson/meson-card.h
new file mode 100644
index 000000000000..74314071c80d
--- /dev/null
+++ b/sound/soc/meson/meson-card.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _MESON_SND_CARD_H
+#define _MESON_SND_CARD_H
+
+struct device_node;
+struct platform_device;
+
+struct snd_soc_card;
+struct snd_pcm_substream;
+struct snd_pcm_hw_params;
+
+#define DT_PREFIX "amlogic,"
+
+struct meson_card_match_data {
+ int (*add_link)(struct snd_soc_card *card,
+ struct device_node *node,
+ int *index);
+};
+
+struct meson_card {
+ const struct meson_card_match_data *match_data;
+ struct snd_soc_card card;
+ void **link_data;
+};
+
+unsigned int meson_card_parse_daifmt(struct device_node *node,
+ struct device_node *cpu_node);
+
+int meson_card_i2s_set_sysclk(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ unsigned int mclk_fs);
+
+int meson_card_reallocate_links(struct snd_soc_card *card,
+ unsigned int num_links);
+int meson_card_parse_dai(struct snd_soc_card *card,
+ struct device_node *node,
+ struct device_node **dai_of_node,
+ const char **dai_name);
+int meson_card_set_be_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node);
+int meson_card_set_fe_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link,
+ struct device_node *node,
+ bool is_playback);
+
+int meson_card_probe(struct platform_device *pdev);
+int meson_card_remove(struct platform_device *pdev);
+
+#endif /* _MESON_SND_CARD_H */
diff --git a/sound/soc/meson/meson-codec-glue.c b/sound/soc/meson/meson-codec-glue.c
new file mode 100644
index 000000000000..524a33472337
--- /dev/null
+++ b/sound/soc/meson/meson-codec-glue.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "meson-codec-glue.h"
+
+static struct snd_soc_dapm_widget *
+meson_codec_glue_get_input(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_path *p = NULL;
+ struct snd_soc_dapm_widget *in;
+
+ snd_soc_dapm_widget_for_each_source_path(w, p) {
+ if (!p->connect)
+ continue;
+
+ /* Check that we still are in the same component */
+ if (snd_soc_dapm_to_component(w->dapm) !=
+ snd_soc_dapm_to_component(p->source->dapm))
+ continue;
+
+ if (p->source->id == snd_soc_dapm_dai_in)
+ return p->source;
+
+ in = meson_codec_glue_get_input(p->source);
+ if (in)
+ return in;
+ }
+
+ return NULL;
+}
+
+static void meson_codec_glue_input_set_data(struct snd_soc_dai *dai,
+ struct meson_codec_glue_input *data)
+{
+ dai->playback_dma_data = data;
+}
+
+struct meson_codec_glue_input *
+meson_codec_glue_input_get_data(struct snd_soc_dai *dai)
+{
+ return dai->playback_dma_data;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_get_data);
+
+static struct meson_codec_glue_input *
+meson_codec_glue_output_get_input_data(struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_dapm_widget *in =
+ meson_codec_glue_get_input(w);
+ struct snd_soc_dai *dai;
+
+ if (WARN_ON(!in))
+ return NULL;
+
+ dai = in->priv;
+
+ return meson_codec_glue_input_get_data(dai);
+}
+
+int meson_codec_glue_input_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct meson_codec_glue_input *data =
+ meson_codec_glue_input_get_data(dai);
+
+ data->params.rates = snd_pcm_rate_to_rate_bit(params_rate(params));
+ data->params.rate_min = params_rate(params);
+ data->params.rate_max = params_rate(params);
+ data->params.formats = 1ULL << (__force int) params_format(params);
+ data->params.channels_min = params_channels(params);
+ data->params.channels_max = params_channels(params);
+ data->params.sig_bits = dai->driver->playback.sig_bits;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_hw_params);
+
+int meson_codec_glue_input_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct meson_codec_glue_input *data =
+ meson_codec_glue_input_get_data(dai);
+
+ /* Save the source stream format for the downstream link */
+ data->fmt = fmt;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_set_fmt);
+
+int meson_codec_glue_output_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct meson_codec_glue_input *in_data =
+ meson_codec_glue_output_get_input_data(dai->capture_widget);
+
+ if (!in_data)
+ return -ENODEV;
+
+ if (WARN_ON(!rtd->dai_link->params)) {
+ dev_warn(dai->dev, "codec2codec link expected\n");
+ return -EINVAL;
+ }
+
+ /* Replace link params with the input params */
+ rtd->dai_link->params = &in_data->params;
+
+ if (!in_data->fmt)
+ return 0;
+
+ return snd_soc_runtime_set_dai_fmt(rtd, in_data->fmt);
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_output_startup);
+
+int meson_codec_glue_input_dai_probe(struct snd_soc_dai *dai)
+{
+ struct meson_codec_glue_input *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ meson_codec_glue_input_set_data(dai, data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_dai_probe);
+
+int meson_codec_glue_input_dai_remove(struct snd_soc_dai *dai)
+{
+ struct meson_codec_glue_input *data =
+ meson_codec_glue_input_get_data(dai);
+
+ kfree(data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_codec_glue_input_dai_remove);
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic Codec Glue Helpers");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/meson/meson-codec-glue.h b/sound/soc/meson/meson-codec-glue.h
new file mode 100644
index 000000000000..07f99446c0c6
--- /dev/null
+++ b/sound/soc/meson/meson-codec-glue.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _MESON_CODEC_GLUE_H
+#define _MESON_CODEC_GLUE_H
+
+#include <sound/soc.h>
+
+struct meson_codec_glue_input {
+ struct snd_soc_pcm_stream params;
+ unsigned int fmt;
+};
+
+/* Input helpers */
+struct meson_codec_glue_input *
+meson_codec_glue_input_get_data(struct snd_soc_dai *dai);
+int meson_codec_glue_input_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+int meson_codec_glue_input_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt);
+int meson_codec_glue_input_dai_probe(struct snd_soc_dai *dai);
+int meson_codec_glue_input_dai_remove(struct snd_soc_dai *dai);
+
+/* Output helpers */
+int meson_codec_glue_output_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+
+#endif /* _MESON_CODEC_GLUE_H */
diff --git a/sound/soc/meson/t9015.c b/sound/soc/meson/t9015.c
new file mode 100644
index 000000000000..56d2592c16d5
--- /dev/null
+++ b/sound/soc/meson/t9015.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define BLOCK_EN 0x00
+#define LORN_EN 0
+#define LORP_EN 1
+#define LOLN_EN 2
+#define LOLP_EN 3
+#define DACR_EN 4
+#define DACL_EN 5
+#define DACR_INV 20
+#define DACL_INV 21
+#define DACR_SRC 22
+#define DACL_SRC 23
+#define REFP_BUF_EN BIT(12)
+#define BIAS_CURRENT_EN BIT(13)
+#define VMID_GEN_FAST BIT(14)
+#define VMID_GEN_EN BIT(15)
+#define I2S_MODE BIT(30)
+#define VOL_CTRL0 0x04
+#define GAIN_H 31
+#define GAIN_L 23
+#define VOL_CTRL1 0x08
+#define DAC_MONO 8
+#define RAMP_RATE 10
+#define VC_RAMP_MODE 12
+#define MUTE_MODE 13
+#define UNMUTE_MODE 14
+#define DAC_SOFT_MUTE 15
+#define DACR_VC 16
+#define DACL_VC 24
+#define LINEOUT_CFG 0x0c
+#define LORN_POL 0
+#define LORP_POL 4
+#define LOLN_POL 8
+#define LOLP_POL 12
+#define POWER_CFG 0x10
+
+struct t9015 {
+ struct clk *pclk;
+ struct regulator *avdd;
+};
+
+static int t9015_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ unsigned int val;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ val = I2S_MODE;
+ break;
+
+ case SND_SOC_DAIFMT_CBS_CFS:
+ val = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, BLOCK_EN, I2S_MODE, val);
+
+ if (((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_I2S) &&
+ ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_LEFT_J))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops t9015_dai_ops = {
+ .set_fmt = t9015_dai_set_fmt,
+};
+
+static struct snd_soc_dai_driver t9015_dai = {
+ .name = "t9015-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = (SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ },
+ .ops = &t9015_dai_ops,
+};
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(dac_vol_tlv, -9525, 0);
+
+static const char * const ramp_rate_txt[] = { "Fast", "Slow" };
+static SOC_ENUM_SINGLE_DECL(ramp_rate_enum, VOL_CTRL1, RAMP_RATE,
+ ramp_rate_txt);
+
+static const char * const dacr_in_txt[] = { "Right", "Left" };
+static SOC_ENUM_SINGLE_DECL(dacr_in_enum, BLOCK_EN, DACR_SRC, dacr_in_txt);
+
+static const char * const dacl_in_txt[] = { "Left", "Right" };
+static SOC_ENUM_SINGLE_DECL(dacl_in_enum, BLOCK_EN, DACL_SRC, dacl_in_txt);
+
+static const char * const mono_txt[] = { "Stereo", "Mono"};
+static SOC_ENUM_SINGLE_DECL(mono_enum, VOL_CTRL1, DAC_MONO, mono_txt);
+
+static const struct snd_kcontrol_new t9015_snd_controls[] = {
+ /* Volume Controls */
+ SOC_ENUM("Playback Channel Mode", mono_enum),
+ SOC_SINGLE("Playback Switch", VOL_CTRL1, DAC_SOFT_MUTE, 1, 1),
+ SOC_DOUBLE_TLV("Playback Volume", VOL_CTRL1, DACL_VC, DACR_VC,
+ 0xff, 0, dac_vol_tlv),
+
+ /* Ramp Controls */
+ SOC_ENUM("Ramp Rate", ramp_rate_enum),
+ SOC_SINGLE("Volume Ramp Switch", VOL_CTRL1, VC_RAMP_MODE, 1, 0),
+ SOC_SINGLE("Mute Ramp Switch", VOL_CTRL1, MUTE_MODE, 1, 0),
+ SOC_SINGLE("Unmute Ramp Switch", VOL_CTRL1, UNMUTE_MODE, 1, 0),
+};
+
+static const struct snd_kcontrol_new t9015_right_dac_mux =
+ SOC_DAPM_ENUM("Right DAC Source", dacr_in_enum);
+static const struct snd_kcontrol_new t9015_left_dac_mux =
+ SOC_DAPM_ENUM("Left DAC Source", dacl_in_enum);
+
+static const struct snd_soc_dapm_widget t9015_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("Right IN", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("Left IN", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("Right DAC Sel", SND_SOC_NOPM, 0, 0,
+ &t9015_right_dac_mux),
+ SND_SOC_DAPM_MUX("Left DAC Sel", SND_SOC_NOPM, 0, 0,
+ &t9015_left_dac_mux),
+ SND_SOC_DAPM_DAC("Right DAC", NULL, BLOCK_EN, DACR_EN, 0),
+ SND_SOC_DAPM_DAC("Left DAC", NULL, BLOCK_EN, DACL_EN, 0),
+ SND_SOC_DAPM_OUT_DRV("Right- Driver", BLOCK_EN, LORN_EN, 0,
+ NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("Right+ Driver", BLOCK_EN, LORP_EN, 0,
+ NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("Left- Driver", BLOCK_EN, LOLN_EN, 0,
+ NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("Left+ Driver", BLOCK_EN, LOLP_EN, 0,
+ NULL, 0),
+ SND_SOC_DAPM_OUTPUT("LORN"),
+ SND_SOC_DAPM_OUTPUT("LORP"),
+ SND_SOC_DAPM_OUTPUT("LOLN"),
+ SND_SOC_DAPM_OUTPUT("LOLP"),
+};
+
+static const struct snd_soc_dapm_route t9015_dapm_routes[] = {
+ { "Right IN", NULL, "Playback" },
+ { "Left IN", NULL, "Playback" },
+ { "Right DAC Sel", "Right", "Right IN" },
+ { "Right DAC Sel", "Left", "Left IN" },
+ { "Left DAC Sel", "Right", "Right IN" },
+ { "Left DAC Sel", "Left", "Left IN" },
+ { "Right DAC", NULL, "Right DAC Sel" },
+ { "Left DAC", NULL, "Left DAC Sel" },
+ { "Right- Driver", NULL, "Right DAC" },
+ { "Right+ Driver", NULL, "Right DAC" },
+ { "Left- Driver", NULL, "Left DAC" },
+ { "Left+ Driver", NULL, "Left DAC" },
+ { "LORN", NULL, "Right- Driver", },
+ { "LORP", NULL, "Right+ Driver", },
+ { "LOLN", NULL, "Left- Driver", },
+ { "LOLP", NULL, "Left+ Driver", },
+};
+
+static int t9015_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct t9015 *priv = snd_soc_component_get_drvdata(component);
+ enum snd_soc_bias_level now =
+ snd_soc_component_get_bias_level(component);
+ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ snd_soc_component_update_bits(component, BLOCK_EN,
+ BIAS_CURRENT_EN,
+ BIAS_CURRENT_EN);
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ snd_soc_component_update_bits(component, BLOCK_EN,
+ BIAS_CURRENT_EN,
+ 0);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ ret = regulator_enable(priv->avdd);
+ if (ret) {
+ dev_err(component->dev, "AVDD enable failed\n");
+ return ret;
+ }
+
+ if (now == SND_SOC_BIAS_OFF) {
+ snd_soc_component_update_bits(component, BLOCK_EN,
+ VMID_GEN_EN | VMID_GEN_FAST | REFP_BUF_EN,
+ VMID_GEN_EN | VMID_GEN_FAST | REFP_BUF_EN);
+
+ mdelay(200);
+ snd_soc_component_update_bits(component, BLOCK_EN,
+ VMID_GEN_FAST,
+ 0);
+ }
+
+ break;
+ case SND_SOC_BIAS_OFF:
+ snd_soc_component_update_bits(component, BLOCK_EN,
+ VMID_GEN_EN | VMID_GEN_FAST | REFP_BUF_EN,
+ 0);
+
+ regulator_disable(priv->avdd);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver t9015_codec_driver = {
+ .set_bias_level = t9015_set_bias_level,
+ .controls = t9015_snd_controls,
+ .num_controls = ARRAY_SIZE(t9015_snd_controls),
+ .dapm_widgets = t9015_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(t9015_dapm_widgets),
+ .dapm_routes = t9015_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(t9015_dapm_routes),
+ .suspend_bias_off = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct regmap_config t9015_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = POWER_CFG,
+};
+
+static int t9015_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct t9015 *priv;
+ void __iomem *regs;
+ struct regmap *regmap;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ if (PTR_ERR(priv->pclk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get core clock\n");
+ return PTR_ERR(priv->pclk);
+ }
+
+ priv->avdd = devm_regulator_get(dev, "AVDD");
+ if (IS_ERR(priv->avdd)) {
+ if (PTR_ERR(priv->avdd) != -EPROBE_DEFER)
+ dev_err(dev, "failed to AVDD\n");
+ return PTR_ERR(priv->avdd);
+ }
+
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dev, "core clock enable failed\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))clk_disable_unprepare,
+ priv->pclk);
+ if (ret)
+ return ret;
+
+ ret = device_reset(dev);
+ if (ret) {
+ dev_err(dev, "reset failed\n");
+ return ret;
+ }
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "register map failed\n");
+ return PTR_ERR(regs);
+ }
+
+ regmap = devm_regmap_init_mmio(dev, regs, &t9015_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ /*
+ * Initialize output polarity:
+ * ATM the output polarity is fixed but in the future it might useful
+ * to add DT property to set this depending on the platform needs
+ */
+ regmap_write(regmap, LINEOUT_CFG, 0x1111);
+
+ return devm_snd_soc_register_component(dev, &t9015_codec_driver,
+ &t9015_dai, 1);
+}
+
+static const struct of_device_id t9015_ids[] = {
+ { .compatible = "amlogic,t9015", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, t9015_ids);
+
+static struct platform_driver t9015_driver = {
+ .driver = {
+ .name = "t9015-codec",
+ .of_match_table = of_match_ptr(t9015_ids),
+ },
+ .probe = t9015_probe,
+};
+
+module_platform_driver(t9015_driver);
+
+MODULE_DESCRIPTION("ASoC Amlogic T9015 codec driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 9841e1da9782..f46d7aca8cf6 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -20,8 +20,8 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int rate = params_rate(params);
u32 mclk;
int ret;
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 295cfffa4646..d4c0f580a565 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -81,6 +81,9 @@ config SND_PXA2XX_SOC_TOSA
depends on SND_PXA2XX_SOC && MACH_TOSA
depends on MFD_TC6393XB
depends on AC97_BUS=n
+ select REGMAP
+ select AC97_BUS_NEW
+ select AC97_BUS_COMPAT
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -91,6 +94,9 @@ config SND_PXA2XX_SOC_E740
tristate "SoC AC97 Audio support for e740"
depends on SND_PXA2XX_SOC && MACH_E740
depends on AC97_BUS=n
+ select REGMAP
+ select AC97_BUS_NEW
+ select AC97_BUS_COMPAT
select SND_SOC_WM9705
select SND_PXA2XX_SOC_AC97
help
@@ -101,6 +107,7 @@ config SND_PXA2XX_SOC_E750
tristate "SoC AC97 Audio support for e750"
depends on SND_PXA2XX_SOC && MACH_E750
depends on AC97_BUS=n
+ select REGMAP
select SND_SOC_WM9705
select SND_PXA2XX_SOC_AC97
help
@@ -111,7 +118,10 @@ config SND_PXA2XX_SOC_E800
tristate "SoC AC97 Audio support for e800"
depends on SND_PXA2XX_SOC && MACH_E800
depends on AC97_BUS=n
+ select REGMAP
select SND_SOC_WM9712
+ select AC97_BUS_NEW
+ select AC97_BUS_COMPAT
select SND_PXA2XX_SOC_AC97
help
Say Y if you want to add support for SoC audio on the
@@ -122,6 +132,9 @@ config SND_PXA2XX_SOC_EM_X270
depends on SND_PXA2XX_SOC && (MACH_EM_X270 || MACH_EXEDA || \
MACH_CM_X300)
depends on AC97_BUS=n
+ select REGMAP
+ select AC97_BUS_NEW
+ select AC97_BUS_COMPAT
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -133,6 +146,9 @@ config SND_PXA2XX_SOC_PALM27X
depends on SND_PXA2XX_SOC && (MACH_PALMLD || MACH_PALMTX || \
MACH_PALMT5 || MACH_PALMTE2)
depends on AC97_BUS=n
+ select REGMAP
+ select AC97_BUS_NEW
+ select AC97_BUS_COMPAT
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -163,7 +179,10 @@ config SND_SOC_ZYLONITE
tristate "SoC Audio support for Marvell Zylonite"
depends on SND_PXA2XX_SOC && MACH_ZYLONITE
depends on AC97_BUS=n
+ select AC97_BUS_NEW
+ select AC97_BUS_COMPAT
select SND_PXA2XX_SOC_AC97
+ select REGMAP
select SND_PXA_SOC_SSP
select SND_SOC_WM9713
help
@@ -193,6 +212,9 @@ config SND_PXA2XX_SOC_MIOA701
tristate "SoC Audio support for MIO A701"
depends on SND_PXA2XX_SOC && MACH_MIOA701
depends on AC97_BUS=n
+ select REGMAP
+ select AC97_BUS_NEW
+ select AC97_BUS_COMPAT
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9713
help
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index 53b1435ced3f..016a91199485 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -44,8 +44,8 @@ static int brownstone_wm8994_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int freq_out, sspa_mclk, sysclk;
if (params_rate(params) > 11025) {
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index d81082323fb4..6fbef9a0afa7 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -116,8 +116,8 @@ static int corgi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int clk = 0;
int ret = 0;
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 0139343dbcce..b4da9a9a6521 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -54,8 +54,8 @@ static int hx4700_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret = 0;
/* set the I2S system clock as output */
diff --git a/sound/soc/pxa/imote2.c b/sound/soc/pxa/imote2.c
index 514e17724fc3..3014e8244ab4 100644
--- a/sound/soc/pxa/imote2.c
+++ b/sound/soc/pxa/imote2.c
@@ -12,8 +12,8 @@ static int imote2_asoc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int clk = 0;
int ret;
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 6483cff5b73d..e4c818f4cd62 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -83,8 +83,8 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int width;
int ret = 0;
@@ -121,8 +121,8 @@ static int magician_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret = 0;
/* set codec DAI configuration */
@@ -358,10 +358,10 @@ static int __init magician_init(void)
adapter = i2c_get_adapter(0);
if (!adapter)
return -ENODEV;
- client = i2c_new_device(adapter, i2c_board_info);
+ client = i2c_new_client_device(adapter, i2c_board_info);
i2c_put_adapter(adapter);
- if (!client)
- return -ENODEV;
+ if (IS_ERR(client))
+ return PTR_ERR(client);
ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER");
if (ret)
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 76e054d514a8..bf27b277c01f 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -73,7 +73,7 @@ static int rear_amp_event(struct snd_soc_dapm_widget *widget,
struct snd_soc_component *component;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- component = rtd->codec_dai->component;
+ component = asoc_rtd_to_codec(rtd, 0)->component;
return rear_amp_power(component, SND_SOC_DAPM_EVENT_ON(event));
}
@@ -117,7 +117,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
/* Prepare GPIO8 for rear speaker amplifier */
snd_soc_component_update_bits(component, AC97_GPIO_CFG, 0x100, 0x100);
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 287b5da739e5..3fe6c4c5a3ab 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -112,7 +112,7 @@ static int mmp_pcm_open(struct snd_soc_component *component,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct platform_device *pdev = to_platform_device(component->dev);
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct mmp_dma_data dma_data;
struct resource *r;
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index e701637a9ae9..3548a2634a63 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -251,7 +251,7 @@ static int mmp_sspa_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(dai);
struct ssp_device *sspa = sspa_priv->sspa;
struct snd_dmaengine_dai_dma_data *dma_params;
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 59ef04d0467a..287984a564c8 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -90,8 +90,8 @@ static int poodle_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int clk = 0;
int ret = 0;
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 5f1c477b5833..9a32bf72127a 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -96,7 +96,7 @@ static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
if (IS_ERR(clk_i2s))
return PTR_ERR(clk_i2s);
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index f7babffb7228..6d8174f62935 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -117,8 +117,8 @@ static int spitz_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int clk = 0;
int ret = 0;
diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c
index d8f79e2266b1..d5f2961b1a3e 100644
--- a/sound/soc/pxa/ttc-dkb.c
+++ b/sound/soc/pxa/ttc-dkb.c
@@ -61,7 +61,7 @@ static const struct snd_soc_dapm_route ttc_audio_map[] = {
static int ttc_pm860x_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
/* Headset jack detection */
snd_soc_card_jack_new(rtd->card, "Headphone Jack", SND_JACK_HEADPHONE |
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index f9a33cb36f5b..6eee1aefc89a 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -34,8 +34,8 @@ static int z2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int clk = 0;
int ret = 0;
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index 567dc133ea92..447b59b8bd33 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -66,7 +66,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
static int zylonite_wm9713_init(struct snd_soc_pcm_runtime *rtd)
{
if (clk_pout)
- snd_soc_dai_set_pll(rtd->codec_dai, 0, 0,
+ snd_soc_dai_set_pll(asoc_rtd_to_codec(rtd, 0), 0, 0,
clk_get_rate(pout), 0);
return 0;
@@ -76,8 +76,8 @@ static int zylonite_voice_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int wm9713_div = 0;
int ret = 0;
int rate = params_rate(params);
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 6530d2462a9e..f51b28d1b94d 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -99,7 +99,7 @@ config SND_SOC_MSM8996
config SND_SOC_SDM845
tristate "SoC Machine driver for SDM845 boards"
- depends on QCOM_APR && CROS_EC && I2C
+ depends on QCOM_APR && CROS_EC && I2C && SOUNDWIRE
select SND_SOC_QDSP6
select SND_SOC_QCOM_COMMON
select SND_SOC_RT5663
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index ac75838bbfab..2ef090f4af9e 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -33,9 +33,9 @@ struct apq8016_sbc_data {
static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai;
struct snd_soc_component *component;
- struct snd_soc_dai_link *dai_link = rtd->dai_link;
struct snd_soc_card *card = rtd->card;
struct apq8016_sbc_data *pdata = snd_soc_card_get_drvdata(card);
int i, rval;
@@ -90,10 +90,9 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
pdata->jack_setup = true;
}
- for (i = 0 ; i < dai_link->num_codecs; i++) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
- component = dai->component;
+ component = codec_dai->component;
/* Set default mclk for internal codec */
rval = snd_soc_component_set_sysclk(component, 0, 0, DEFAULT_MCLK_RATE,
SND_SOC_CLOCK_IN);
diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
index 94363fd6846a..d55e3ad96716 100644
--- a/sound/soc/qcom/apq8096.c
+++ b/sound/soc/qcom/apq8096.c
@@ -31,8 +31,8 @@ static int msm_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
u32 rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
u32 rx_ch_cnt = 0, tx_ch_cnt = 0;
int ret = 0;
@@ -66,7 +66,7 @@ static struct snd_soc_ops apq8096_ops = {
static int apq8096_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/*
* Codec SLIMBUS configuration
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index b05091c283b7..34f7fd1bab1c 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -55,7 +55,7 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct lpass_variant *v = drvdata->variant;
int ret, dma_ch, dir = substream->stream;
@@ -529,7 +529,7 @@ static void lpass_platform_pcm_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream;
int i;
- for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
+ for_each_pcm_streams(i) {
substream = pcm->streams[i].substream;
if (substream) {
snd_dma_free_pages(&substream->dma_buffer);
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index c0d422d0ab94..f6c7cddf08e8 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -41,6 +41,9 @@
#define Q6ASM_DAI_TX 1
#define Q6ASM_DAI_RX 2
+#define ALAC_CH_LAYOUT_MONO ((101 << 16) | 1)
+#define ALAC_CH_LAYOUT_STEREO ((101 << 16) | 2)
+
enum stream_state {
Q6ASM_STREAM_IDLE = 0,
Q6ASM_STREAM_STOPPED,
@@ -69,6 +72,8 @@ struct q6asm_dai_rtd {
};
struct q6asm_dai_data {
+ struct snd_soc_dai_driver *dais;
+ int num_dais;
long long int sid;
};
@@ -250,7 +255,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM,
- prtd->bits_per_sample);
+ 0, prtd->bits_per_sample);
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM,
prtd->bits_per_sample);
@@ -328,7 +333,7 @@ static int q6asm_dai_open(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = soc_prtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_prtd, 0);
struct q6asm_dai_rtd *prtd;
struct q6asm_dai_data *pdata;
struct device *dev = component->dev;
@@ -540,7 +545,7 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
struct snd_soc_pcm_runtime *rtd = stream->private_data;
struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_compr_runtime *runtime = stream->runtime;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct q6asm_dai_data *pdata;
struct device *dev = c->dev;
struct q6asm_dai_rtd *prtd;
@@ -627,10 +632,17 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
int dir = stream->direction;
struct q6asm_dai_data *pdata;
struct q6asm_flac_cfg flac_cfg;
+ struct q6asm_wma_cfg wma_cfg;
+ struct q6asm_alac_cfg alac_cfg;
+ struct q6asm_ape_cfg ape_cfg;
+ unsigned int wma_v9 = 0;
struct device *dev = c->dev;
int ret;
union snd_codec_options *codec_options;
struct snd_dec_flac *flac;
+ struct snd_dec_wma *wma;
+ struct snd_dec_alac *alac;
+ struct snd_dec_ape *ape;
codec_options = &(prtd->codec_param.codec.options);
@@ -652,7 +664,7 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
prtd->bits_per_sample = 16;
if (dir == SND_COMPRESS_PLAYBACK) {
ret = q6asm_open_write(prtd->audio_client, params->codec.id,
- prtd->bits_per_sample);
+ params->codec.profile, prtd->bits_per_sample);
if (ret < 0) {
dev_err(dev, "q6asm_open_write failed\n");
@@ -692,6 +704,126 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
return -EIO;
}
break;
+
+ case SND_AUDIOCODEC_WMA:
+ wma = &codec_options->wma_d;
+
+ memset(&wma_cfg, 0x0, sizeof(struct q6asm_wma_cfg));
+
+ wma_cfg.sample_rate = params->codec.sample_rate;
+ wma_cfg.num_channels = params->codec.ch_in;
+ wma_cfg.bytes_per_sec = params->codec.bit_rate / 8;
+ wma_cfg.block_align = params->codec.align;
+ wma_cfg.bits_per_sample = prtd->bits_per_sample;
+ wma_cfg.enc_options = wma->encoder_option;
+ wma_cfg.adv_enc_options = wma->adv_encoder_option;
+ wma_cfg.adv_enc_options2 = wma->adv_encoder_option2;
+
+ if (wma_cfg.num_channels == 1)
+ wma_cfg.channel_mask = 4; /* Mono Center */
+ else if (wma_cfg.num_channels == 2)
+ wma_cfg.channel_mask = 3; /* Stereo FL/FR */
+ else
+ return -EINVAL;
+
+ /* check the codec profile */
+ switch (params->codec.profile) {
+ case SND_AUDIOPROFILE_WMA9:
+ wma_cfg.fmtag = 0x161;
+ wma_v9 = 1;
+ break;
+
+ case SND_AUDIOPROFILE_WMA10:
+ wma_cfg.fmtag = 0x166;
+ break;
+
+ case SND_AUDIOPROFILE_WMA9_PRO:
+ wma_cfg.fmtag = 0x162;
+ break;
+
+ case SND_AUDIOPROFILE_WMA9_LOSSLESS:
+ wma_cfg.fmtag = 0x163;
+ break;
+
+ case SND_AUDIOPROFILE_WMA10_LOSSLESS:
+ wma_cfg.fmtag = 0x167;
+ break;
+
+ default:
+ dev_err(dev, "Unknown WMA profile:%x\n",
+ params->codec.profile);
+ return -EIO;
+ }
+
+ if (wma_v9)
+ ret = q6asm_stream_media_format_block_wma_v9(
+ prtd->audio_client, &wma_cfg);
+ else
+ ret = q6asm_stream_media_format_block_wma_v10(
+ prtd->audio_client, &wma_cfg);
+ if (ret < 0) {
+ dev_err(dev, "WMA9 CMD failed:%d\n", ret);
+ return -EIO;
+ }
+ break;
+
+ case SND_AUDIOCODEC_ALAC:
+ memset(&alac_cfg, 0x0, sizeof(alac_cfg));
+ alac = &codec_options->alac_d;
+
+ alac_cfg.sample_rate = params->codec.sample_rate;
+ alac_cfg.avg_bit_rate = params->codec.bit_rate;
+ alac_cfg.bit_depth = prtd->bits_per_sample;
+ alac_cfg.num_channels = params->codec.ch_in;
+
+ alac_cfg.frame_length = alac->frame_length;
+ alac_cfg.pb = alac->pb;
+ alac_cfg.mb = alac->mb;
+ alac_cfg.kb = alac->kb;
+ alac_cfg.max_run = alac->max_run;
+ alac_cfg.compatible_version = alac->compatible_version;
+ alac_cfg.max_frame_bytes = alac->max_frame_bytes;
+
+ switch (params->codec.ch_in) {
+ case 1:
+ alac_cfg.channel_layout_tag = ALAC_CH_LAYOUT_MONO;
+ break;
+ case 2:
+ alac_cfg.channel_layout_tag = ALAC_CH_LAYOUT_STEREO;
+ break;
+ }
+ ret = q6asm_stream_media_format_block_alac(prtd->audio_client,
+ &alac_cfg);
+ if (ret < 0) {
+ dev_err(dev, "ALAC CMD Format block failed:%d\n", ret);
+ return -EIO;
+ }
+ break;
+
+ case SND_AUDIOCODEC_APE:
+ memset(&ape_cfg, 0x0, sizeof(ape_cfg));
+ ape = &codec_options->ape_d;
+
+ ape_cfg.sample_rate = params->codec.sample_rate;
+ ape_cfg.num_channels = params->codec.ch_in;
+ ape_cfg.bits_per_sample = prtd->bits_per_sample;
+
+ ape_cfg.compatible_version = ape->compatible_version;
+ ape_cfg.compression_level = ape->compression_level;
+ ape_cfg.format_flags = ape->format_flags;
+ ape_cfg.blocks_per_frame = ape->blocks_per_frame;
+ ape_cfg.final_frame_blocks = ape->final_frame_blocks;
+ ape_cfg.total_frames = ape->total_frames;
+ ape_cfg.seek_table_present = ape->seek_table_present;
+
+ ret = q6asm_stream_media_format_block_ape(prtd->audio_client,
+ &ape_cfg);
+ if (ret < 0) {
+ dev_err(dev, "APE CMD Format block failed:%d\n", ret);
+ return -EIO;
+ }
+ break;
+
default:
break;
}
@@ -791,9 +923,12 @@ static int q6asm_dai_compr_get_caps(struct snd_compr_stream *stream,
caps->max_fragment_size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE;
caps->min_fragments = COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
caps->max_fragments = COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
- caps->num_codecs = 2;
+ caps->num_codecs = 5;
caps->codecs[0] = SND_AUDIOCODEC_MP3;
caps->codecs[1] = SND_AUDIOCODEC_FLAC;
+ caps->codecs[2] = SND_AUDIOCODEC_WMA;
+ caps->codecs[3] = SND_AUDIOCODEC_ALAC;
+ caps->codecs[4] = SND_AUDIOCODEC_APE;
return 0;
}
@@ -889,7 +1024,7 @@ static const struct snd_soc_component_driver q6asm_fe_dai_component = {
.compr_ops = &q6asm_dai_compr_ops,
};
-static struct snd_soc_dai_driver q6asm_fe_dais[] = {
+static struct snd_soc_dai_driver q6asm_fe_dais_template[] = {
Q6ASM_FEDAI_DRIVER(1),
Q6ASM_FEDAI_DRIVER(2),
Q6ASM_FEDAI_DRIVER(3),
@@ -903,10 +1038,22 @@ static struct snd_soc_dai_driver q6asm_fe_dais[] = {
static int of_q6asm_parse_dai_data(struct device *dev,
struct q6asm_dai_data *pdata)
{
- static struct snd_soc_dai_driver *dai_drv;
+ struct snd_soc_dai_driver *dai_drv;
struct snd_soc_pcm_stream empty_stream;
struct device_node *node;
- int ret, id, dir;
+ int ret, id, dir, idx = 0;
+
+
+ pdata->num_dais = of_get_child_count(dev->of_node);
+ if (!pdata->num_dais) {
+ dev_err(dev, "No dais found in DT\n");
+ return -EINVAL;
+ }
+
+ pdata->dais = devm_kcalloc(dev, pdata->num_dais, sizeof(*dai_drv),
+ GFP_KERNEL);
+ if (!pdata->dais)
+ return -ENOMEM;
memset(&empty_stream, 0, sizeof(empty_stream));
@@ -917,7 +1064,8 @@ static int of_q6asm_parse_dai_data(struct device *dev,
continue;
}
- dai_drv = &q6asm_fe_dais[id];
+ dai_drv = &pdata->dais[idx++];
+ *dai_drv = q6asm_fe_dais_template[id];
ret = of_property_read_u32(node, "direction", &dir);
if (ret)
@@ -955,11 +1103,12 @@ static int q6asm_dai_probe(struct platform_device *pdev)
dev_set_drvdata(dev, pdata);
- of_q6asm_parse_dai_data(dev, pdata);
+ rc = of_q6asm_parse_dai_data(dev, pdata);
+ if (rc)
+ return rc;
return devm_snd_soc_register_component(dev, &q6asm_fe_dai_component,
- q6asm_fe_dais,
- ARRAY_SIZE(q6asm_fe_dais));
+ pdata->dais, pdata->num_dais);
}
static const struct of_device_id q6asm_dai_device_id[] = {
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index 36e0eab13a98..0e0e8f7a460a 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -39,6 +39,8 @@
#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
#define ASM_MEDIA_FMT_MP3 0x00010BE9
#define ASM_MEDIA_FMT_FLAC 0x00010C16
+#define ASM_MEDIA_FMT_WMA_V9 0x00010DA8
+#define ASM_MEDIA_FMT_WMA_V10 0x00010DA7
#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
#define ASM_DATA_CMD_READ_V2 0x00010DAC
#define ASM_SESSION_CMD_SUSPEND 0x00010DEC
@@ -46,6 +48,8 @@
#define ASM_STREAM_CMD_OPEN_READ_V3 0x00010DB4
#define ASM_DATA_EVENT_READ_DONE_V2 0x00010D9A
#define ASM_STREAM_CMD_OPEN_READWRITE_V2 0x00010D8D
+#define ASM_MEDIA_FMT_ALAC 0x00012f31
+#define ASM_MEDIA_FMT_APE 0x00012f32
#define ASM_LEGACY_STREAM_SESSION 0
@@ -104,6 +108,63 @@ struct asm_flac_fmt_blk_v2 {
u16 reserved;
} __packed;
+struct asm_wmastdv9_fmt_blk_v2 {
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u16 fmtag;
+ u16 num_channels;
+ u32 sample_rate;
+ u32 bytes_per_sec;
+ u16 blk_align;
+ u16 bits_per_sample;
+ u32 channel_mask;
+ u16 enc_options;
+ u16 reserved;
+} __packed;
+
+struct asm_wmaprov10_fmt_blk_v2 {
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u16 fmtag;
+ u16 num_channels;
+ u32 sample_rate;
+ u32 bytes_per_sec;
+ u16 blk_align;
+ u16 bits_per_sample;
+ u32 channel_mask;
+ u16 enc_options;
+ u16 advanced_enc_options1;
+ u32 advanced_enc_options2;
+} __packed;
+
+struct asm_alac_fmt_blk_v2 {
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u32 frame_length;
+ u8 compatible_version;
+ u8 bit_depth;
+ u8 pb;
+ u8 mb;
+ u8 kb;
+ u8 num_channels;
+ u16 max_run;
+ u32 max_frame_bytes;
+ u32 avg_bit_rate;
+ u32 sample_rate;
+ u32 channel_layout_tag;
+} __packed;
+
+struct asm_ape_fmt_blk_v2 {
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u16 compatible_version;
+ u16 compression_level;
+ u32 format_flags;
+ u32 blocks_per_frame;
+ u32 final_frame_blocks;
+ u32 total_frames;
+ u16 bits_per_sample;
+ u16 num_channels;
+ u32 sample_rate;
+ u32 seek_table_present;
+} __packed;
+
struct asm_stream_cmd_set_encdec_param {
u32 param_id;
u32 param_size;
@@ -858,7 +919,7 @@ err:
* Return: Will be an negative value on error or zero on success
*/
int q6asm_open_write(struct audio_client *ac, uint32_t format,
- uint16_t bits_per_sample)
+ u32 codec_profile, uint16_t bits_per_sample)
{
struct asm_stream_cmd_open_write_v3 *open;
struct apr_pkt *pkt;
@@ -894,6 +955,30 @@ int q6asm_open_write(struct audio_client *ac, uint32_t format,
case SND_AUDIOCODEC_FLAC:
open->dec_fmt_id = ASM_MEDIA_FMT_FLAC;
break;
+ case SND_AUDIOCODEC_WMA:
+ switch (codec_profile) {
+ case SND_AUDIOPROFILE_WMA9:
+ open->dec_fmt_id = ASM_MEDIA_FMT_WMA_V9;
+ break;
+ case SND_AUDIOPROFILE_WMA10:
+ case SND_AUDIOPROFILE_WMA9_PRO:
+ case SND_AUDIOPROFILE_WMA9_LOSSLESS:
+ case SND_AUDIOPROFILE_WMA10_LOSSLESS:
+ open->dec_fmt_id = ASM_MEDIA_FMT_WMA_V10;
+ break;
+ default:
+ dev_err(ac->dev, "Invalid codec profile 0x%x\n",
+ codec_profile);
+ rc = -EINVAL;
+ goto err;
+ }
+ break;
+ case SND_AUDIOCODEC_ALAC:
+ open->dec_fmt_id = ASM_MEDIA_FMT_ALAC;
+ break;
+ case SND_AUDIOCODEC_APE:
+ open->dec_fmt_id = ASM_MEDIA_FMT_APE;
+ break;
default:
dev_err(ac->dev, "Invalid format 0x%x\n", format);
rc = -EINVAL;
@@ -1075,6 +1160,162 @@ int q6asm_stream_media_format_block_flac(struct audio_client *ac,
return rc;
}
EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_flac);
+
+int q6asm_stream_media_format_block_wma_v9(struct audio_client *ac,
+ struct q6asm_wma_cfg *cfg)
+{
+ struct asm_wmastdv9_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+ fmt->fmtag = cfg->fmtag;
+ fmt->num_channels = cfg->num_channels;
+ fmt->sample_rate = cfg->sample_rate;
+ fmt->bytes_per_sec = cfg->bytes_per_sec;
+ fmt->blk_align = cfg->block_align;
+ fmt->bits_per_sample = cfg->bits_per_sample;
+ fmt->channel_mask = cfg->channel_mask;
+ fmt->enc_options = cfg->enc_options;
+ fmt->reserved = 0;
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ kfree(pkt);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_wma_v9);
+
+int q6asm_stream_media_format_block_wma_v10(struct audio_client *ac,
+ struct q6asm_wma_cfg *cfg)
+{
+ struct asm_wmaprov10_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+ fmt->fmtag = cfg->fmtag;
+ fmt->num_channels = cfg->num_channels;
+ fmt->sample_rate = cfg->sample_rate;
+ fmt->bytes_per_sec = cfg->bytes_per_sec;
+ fmt->blk_align = cfg->block_align;
+ fmt->bits_per_sample = cfg->bits_per_sample;
+ fmt->channel_mask = cfg->channel_mask;
+ fmt->enc_options = cfg->enc_options;
+ fmt->advanced_enc_options1 = cfg->adv_enc_options;
+ fmt->advanced_enc_options2 = cfg->adv_enc_options2;
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ kfree(pkt);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_wma_v10);
+
+int q6asm_stream_media_format_block_alac(struct audio_client *ac,
+ struct q6asm_alac_cfg *cfg)
+{
+ struct asm_alac_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+
+ fmt->frame_length = cfg->frame_length;
+ fmt->compatible_version = cfg->compatible_version;
+ fmt->bit_depth = cfg->bit_depth;
+ fmt->num_channels = cfg->num_channels;
+ fmt->max_run = cfg->max_run;
+ fmt->max_frame_bytes = cfg->max_frame_bytes;
+ fmt->avg_bit_rate = cfg->avg_bit_rate;
+ fmt->sample_rate = cfg->sample_rate;
+ fmt->channel_layout_tag = cfg->channel_layout_tag;
+ fmt->pb = cfg->pb;
+ fmt->mb = cfg->mb;
+ fmt->kb = cfg->kb;
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ kfree(pkt);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_alac);
+
+int q6asm_stream_media_format_block_ape(struct audio_client *ac,
+ struct q6asm_ape_cfg *cfg)
+{
+ struct asm_ape_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+
+ fmt->compatible_version = cfg->compatible_version;
+ fmt->compression_level = cfg->compression_level;
+ fmt->format_flags = cfg->format_flags;
+ fmt->blocks_per_frame = cfg->blocks_per_frame;
+ fmt->final_frame_blocks = cfg->final_frame_blocks;
+ fmt->total_frames = cfg->total_frames;
+ fmt->bits_per_sample = cfg->bits_per_sample;
+ fmt->num_channels = cfg->num_channels;
+ fmt->sample_rate = cfg->sample_rate;
+ fmt->seek_table_present = cfg->seek_table_present;
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ kfree(pkt);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_ape);
+
/**
* q6asm_enc_cfg_blk_pcm_format_support() - setup pcm configuration for capture
*
diff --git a/sound/soc/qcom/qdsp6/q6asm.h b/sound/soc/qcom/qdsp6/q6asm.h
index 6764f55f7078..38a207d6cd95 100644
--- a/sound/soc/qcom/qdsp6/q6asm.h
+++ b/sound/soc/qcom/qdsp6/q6asm.h
@@ -45,6 +45,47 @@ struct q6asm_flac_cfg {
u16 md5_sum;
};
+struct q6asm_wma_cfg {
+ u32 fmtag;
+ u32 num_channels;
+ u32 sample_rate;
+ u32 bytes_per_sec;
+ u32 block_align;
+ u32 bits_per_sample;
+ u32 channel_mask;
+ u32 enc_options;
+ u32 adv_enc_options;
+ u32 adv_enc_options2;
+};
+
+struct q6asm_alac_cfg {
+ u32 frame_length;
+ u8 compatible_version;
+ u8 bit_depth;
+ u8 pb;
+ u8 mb;
+ u8 kb;
+ u8 num_channels;
+ u16 max_run;
+ u32 max_frame_bytes;
+ u32 avg_bit_rate;
+ u32 sample_rate;
+ u32 channel_layout_tag;
+};
+
+struct q6asm_ape_cfg {
+ u16 compatible_version;
+ u16 compression_level;
+ u32 format_flags;
+ u32 blocks_per_frame;
+ u32 final_frame_blocks;
+ u32 total_frames;
+ u16 bits_per_sample;
+ u16 num_channels;
+ u32 sample_rate;
+ u32 seek_table_present;
+};
+
typedef void (*q6asm_cb) (uint32_t opcode, uint32_t token,
void *payload, void *priv);
struct audio_client;
@@ -55,7 +96,7 @@ void q6asm_audio_client_free(struct audio_client *ac);
int q6asm_write_async(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
uint32_t lsw_ts, uint32_t flags);
int q6asm_open_write(struct audio_client *ac, uint32_t format,
- uint16_t bits_per_sample);
+ u32 codec_profile, uint16_t bits_per_sample);
int q6asm_open_read(struct audio_client *ac, uint32_t format,
uint16_t bits_per_sample);
@@ -69,6 +110,14 @@ int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
uint16_t bits_per_sample);
int q6asm_stream_media_format_block_flac(struct audio_client *ac,
struct q6asm_flac_cfg *cfg);
+int q6asm_stream_media_format_block_wma_v9(struct audio_client *ac,
+ struct q6asm_wma_cfg *cfg);
+int q6asm_stream_media_format_block_wma_v10(struct audio_client *ac,
+ struct q6asm_wma_cfg *cfg);
+int q6asm_stream_media_format_block_alac(struct audio_client *ac,
+ struct q6asm_alac_cfg *cfg);
+int q6asm_stream_media_format_block_ape(struct audio_client *ac,
+ struct q6asm_ape_cfg *cfg);
int q6asm_run(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
uint32_t lsw_ts);
int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index 20724102e85a..46e50612b92c 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -918,25 +918,6 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MM_UL6", NULL, "MultiMedia6 Mixer"},
{"MM_UL7", NULL, "MultiMedia7 Mixer"},
{"MM_UL8", NULL, "MultiMedia8 Mixer"},
-
- {"MM_DL1", NULL, "MultiMedia1 Playback" },
- {"MM_DL2", NULL, "MultiMedia2 Playback" },
- {"MM_DL3", NULL, "MultiMedia3 Playback" },
- {"MM_DL4", NULL, "MultiMedia4 Playback" },
- {"MM_DL5", NULL, "MultiMedia5 Playback" },
- {"MM_DL6", NULL, "MultiMedia6 Playback" },
- {"MM_DL7", NULL, "MultiMedia7 Playback" },
- {"MM_DL8", NULL, "MultiMedia8 Playback" },
-
- {"MultiMedia1 Capture", NULL, "MM_UL1"},
- {"MultiMedia2 Capture", NULL, "MM_UL2"},
- {"MultiMedia3 Capture", NULL, "MM_UL3"},
- {"MultiMedia4 Capture", NULL, "MM_UL4"},
- {"MultiMedia5 Capture", NULL, "MM_UL5"},
- {"MultiMedia6 Capture", NULL, "MM_UL6"},
- {"MultiMedia7 Capture", NULL, "MM_UL7"},
- {"MultiMedia8 Capture", NULL, "MM_UL8"},
-
};
static int routing_hw_params(struct snd_soc_component *component,
@@ -945,7 +926,7 @@ static int routing_hw_params(struct snd_soc_component *component,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct msm_routing_data *data = dev_get_drvdata(component->dev);
- unsigned int be_id = rtd->cpu_dai->id;
+ unsigned int be_id = asoc_rtd_to_cpu(rtd, 0)->id;
struct session_data *session;
int path_type;
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 3b5547a27aad..b2de65c7f95c 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -11,6 +11,7 @@
#include <sound/pcm_params.h>
#include <sound/jack.h>
#include <sound/soc.h>
+#include <linux/soundwire/sdw.h>
#include <uapi/linux/input-event-codes.h>
#include "common.h"
#include "qdsp6/q6afe.h"
@@ -31,10 +32,12 @@
struct sdm845_snd_data {
struct snd_soc_jack jack;
bool jack_setup;
+ bool stream_prepared[SLIM_MAX_RX_PORTS];
struct snd_soc_card *card;
uint32_t pri_mi2s_clk_count;
uint32_t sec_mi2s_clk_count;
uint32_t quat_tdm_clk_count;
+ struct sdw_stream_runtime *sruntime[SLIM_MAX_RX_PORTS];
};
static unsigned int tdm_slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
@@ -43,14 +46,21 @@ static int sdm845_slim_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai_link *dai_link = rtd->dai_link;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai;
+ struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(rtd->card);
u32 rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
+ struct sdw_stream_runtime *sruntime;
u32 rx_ch_cnt = 0, tx_ch_cnt = 0;
int ret = 0, i;
- for (i = 0 ; i < dai_link->num_codecs; i++) {
- ret = snd_soc_dai_get_channel_map(rtd->codec_dais[i],
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ sruntime = snd_soc_dai_get_sdw_stream(codec_dai,
+ substream->stream);
+ if (sruntime != ERR_PTR(-ENOTSUPP))
+ pdata->sruntime[cpu_dai->id] = sruntime;
+
+ ret = snd_soc_dai_get_channel_map(codec_dai,
&tx_ch_cnt, tx_ch, &rx_ch_cnt, rx_ch);
if (ret != 0 && ret != -ENOTSUPP) {
@@ -76,7 +86,8 @@ static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai;
int ret = 0, j;
int channels, slot_width;
@@ -125,8 +136,7 @@ static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
}
}
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name_prefix, "Left")) {
ret = snd_soc_dai_set_tdm_slot(
@@ -161,8 +171,8 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret = 0;
switch (cpu_dai->id) {
@@ -210,11 +220,10 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_component *component;
struct snd_soc_card *card = rtd->card;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card);
struct snd_jack *jack;
- struct snd_soc_dai_link *dai_link = rtd->dai_link;
/*
* Codec SLIMBUS configuration
* RX1, RX2, RX3, RX4, RX5, RX6, RX7, RX8, RX9, RX10, RX11, RX12, RX13
@@ -266,8 +275,8 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
}
break;
case SLIMBUS_0_RX...SLIMBUS_6_TX:
- for (i = 0 ; i < dai_link->num_codecs; i++) {
- rval = snd_soc_dai_set_channel_map(rtd->codec_dais[i],
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ rval = snd_soc_dai_set_channel_map(codec_dai,
ARRAY_SIZE(tx_ch),
tx_ch,
ARRAY_SIZE(rx_ch),
@@ -275,7 +284,7 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
if (rval != 0 && rval != -ENOTSUPP)
return rval;
- snd_soc_dai_set_sysclk(rtd->codec_dais[i], 0,
+ snd_soc_dai_set_sysclk(codec_dai, 0,
WCD934X_DEFAULT_MCLK_RATE,
SNDRV_PCM_STREAM_PLAYBACK);
}
@@ -295,8 +304,8 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int j;
int ret;
@@ -345,8 +354,7 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
codec_dai_fmt |= SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_DSP_B;
- for (j = 0; j < rtd->num_codecs; j++) {
- codec_dai = rtd->codec_dais[j];
+ for_each_rtd_codec_dais(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name_prefix,
"Left")) {
@@ -386,7 +394,7 @@ static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
switch (cpu_dai->id) {
case PRIMARY_MI2S_RX:
@@ -427,8 +435,65 @@ static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
}
}
+static int sdm845_snd_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+ int ret;
+
+ if (!sruntime)
+ return 0;
+
+ if (data->stream_prepared[cpu_dai->id]) {
+ sdw_disable_stream(sruntime);
+ sdw_deprepare_stream(sruntime);
+ data->stream_prepared[cpu_dai->id] = false;
+ }
+
+ ret = sdw_prepare_stream(sruntime);
+ if (ret)
+ return ret;
+
+ /**
+ * NOTE: there is a strict hw requirement about the ordering of port
+ * enables and actual WSA881x PA enable. PA enable should only happen
+ * after soundwire ports are enabled if not DC on the line is
+ * accumulated resulting in Click/Pop Noise
+ * PA enable/mute are handled as part of codec DAPM and digital mute.
+ */
+
+ ret = sdw_enable_stream(sruntime);
+ if (ret) {
+ sdw_deprepare_stream(sruntime);
+ return ret;
+ }
+ data->stream_prepared[cpu_dai->id] = true;
+
+ return ret;
+}
+
+static int sdm845_snd_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+
+ if (sruntime && data->stream_prepared[cpu_dai->id]) {
+ sdw_disable_stream(sruntime);
+ sdw_deprepare_stream(sruntime);
+ data->stream_prepared[cpu_dai->id] = false;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_ops sdm845_be_ops = {
.hw_params = sdm845_snd_hw_params,
+ .hw_free = sdm845_snd_hw_free,
+ .prepare = sdm845_snd_prepare,
.startup = sdm845_snd_startup,
.shutdown = sdm845_snd_shutdown,
};
diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c
index e6666e597265..3a6e18709b9e 100644
--- a/sound/soc/qcom/storm.c
+++ b/sound/soc/qcom/storm.c
@@ -39,7 +39,7 @@ static int storm_ops_hw_params(struct snd_pcm_substream *substream,
*/
sysclk_freq = rate * bitwidth * 2 * STORM_SYSCLK_MULT;
- ret = snd_soc_dai_set_sysclk(soc_runtime->cpu_dai, 0, sysclk_freq, 0);
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(soc_runtime, 0), 0, sysclk_freq, 0);
if (ret) {
dev_err(card->dev, "error setting sysclk to %u: %d\n",
sysclk_freq, ret);
diff --git a/sound/soc/rockchip/rk3288_hdmi_analog.c b/sound/soc/rockchip/rk3288_hdmi_analog.c
index 767700c34ee2..01078155a914 100644
--- a/sound/soc/rockchip/rk3288_hdmi_analog.c
+++ b/sound/soc/rockchip/rk3288_hdmi_analog.c
@@ -67,8 +67,8 @@ static int rk_hw_params(struct snd_pcm_substream *substream,
{
int ret = 0;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int mclk;
switch (params_rate(params)) {
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
index d951100bf770..f45e5aaa4b30 100644
--- a/sound/soc/rockchip/rk3399_gru_sound.c
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -57,7 +57,7 @@ static int rockchip_sound_max98357a_hw_params(struct snd_pcm_substream *substrea
mclk = params_rate(params) * SOUND_FS;
- ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, 0, mclk, 0);
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0), 0, mclk, 0);
if (ret) {
dev_err(rtd->card->dev, "%s() error setting sysclk to %u: %d\n",
__func__, mclk, ret);
@@ -71,8 +71,8 @@ static int rockchip_sound_rt5514_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
unsigned int mclk;
int ret;
@@ -103,8 +103,8 @@ static int rockchip_sound_da7219_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int mclk, ret;
/* in bypass mode, the mclk has to be one of the frequencies below */
@@ -153,8 +153,8 @@ static int rockchip_sound_da7219_hw_params(struct snd_pcm_substream *substream,
static int rockchip_sound_da7219_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dais[0]->component;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
/* We need default MCLK and PLL settings for the accessory detection */
@@ -206,7 +206,7 @@ static int rockchip_sound_dmic_hw_params(struct snd_pcm_substream *substream,
mclk = params_rate(params) * SOUND_FS;
- ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, 0, mclk, 0);
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0), 0, mclk, 0);
if (ret) {
dev_err(rtd->card->dev, "%s() error setting sysclk to %u: %d\n",
__func__, mclk, ret);
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index 60930fa85aa4..1f527d3763ce 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -146,8 +146,8 @@ static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
{
int ret = 0;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int mclk;
switch (params_rate(params)) {
@@ -227,7 +227,7 @@ static struct snd_soc_jack rk_hdmi_jack;
static int rk_hdmi_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
- struct snd_soc_component *component = runtime->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component;
int ret;
/* enable jack detection */
diff --git a/sound/soc/rockchip/rockchip_rt5645.c b/sound/soc/rockchip/rockchip_rt5645.c
index 26b67b245484..0617ccf4e42c 100644
--- a/sound/soc/rockchip/rockchip_rt5645.c
+++ b/sound/soc/rockchip/rockchip_rt5645.c
@@ -56,8 +56,8 @@ static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
{
int ret = 0;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int mclk;
switch (params_rate(params)) {
@@ -113,7 +113,7 @@ static int rk_init(struct snd_soc_pcm_runtime *runtime)
return ret;
}
- return rt5645_set_jack_detect(runtime->codec_dai->component,
+ return rt5645_set_jack_detect(asoc_rtd_to_codec(runtime, 0)->component,
&headset_jack,
&headset_jack,
&headset_jack);
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 1a0b163ca47b..112911dc271b 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -151,7 +151,7 @@ config SND_SOC_TOBERMORY
config SND_SOC_BELLS
tristate "Audio support for Wolfson Bells"
- depends on MFD_ARIZONA && I2C && SPI_MASTER
+ depends on MFD_ARIZONA && MFD_WM5102 && MFD_WM5110 && I2C && SPI_MASTER
depends on MACH_WLF_CRAGG_6410 || COMPILE_TEST
select SND_SAMSUNG_I2S
select SND_SOC_WM5102
@@ -204,7 +204,7 @@ config SND_SOC_ARNDALE
config SND_SOC_SAMSUNG_TM2_WM5110
tristate "SoC I2S Audio support for WM5110 on TM2 board"
- depends on SND_SOC_SAMSUNG && MFD_ARIZONA && I2C && SPI_MASTER
+ depends on SND_SOC_SAMSUNG && MFD_ARIZONA && MFD_WM5110 && I2C && SPI_MASTER
depends on GPIOLIB || COMPILE_TEST
select SND_SOC_MAX98504
select SND_SOC_WM5110
diff --git a/sound/soc/samsung/arndale.c b/sound/soc/samsung/arndale.c
index d64602950cbd..c81ece78e036 100644
--- a/sound/soc/samsung/arndale.c
+++ b/sound/soc/samsung/arndale.c
@@ -21,8 +21,8 @@ static int arndale_rt5631_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int rfs, ret;
unsigned long rclk;
@@ -56,7 +56,7 @@ static int arndale_wm1811_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
unsigned int rfs, rclk;
/* Ensure AIF1CLK is >= 3 MHz for optimal performance */
@@ -174,7 +174,9 @@ static int arndale_audio_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_card(card->dev, card);
if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "snd_soc_register_card() failed: %d\n", ret);
goto err_put_of_nodes;
}
return 0;
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c
index 5de633497f83..8b83f39c3ac9 100644
--- a/sound/soc/samsung/bells.c
+++ b/sound/soc/samsung/bells.c
@@ -60,7 +60,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[DAI_DSP_CODEC]);
- codec_dai = rtd->codec_dai;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
component = codec_dai->component;
if (dapm->dev != codec_dai->dev)
@@ -106,7 +106,7 @@ static int bells_set_bias_level_post(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[DAI_DSP_CODEC]);
- codec_dai = rtd->codec_dai;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
component = codec_dai->component;
if (dapm->dev != codec_dai->dev)
@@ -152,11 +152,11 @@ static int bells_late_probe(struct snd_soc_card *card)
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[DAI_AP_DSP]);
- wm0010 = rtd->codec_dai->component;
+ wm0010 = asoc_rtd_to_codec(rtd, 0)->component;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[DAI_DSP_CODEC]);
- component = rtd->codec_dai->component;
- aif1_dai = rtd->codec_dai;
+ component = asoc_rtd_to_codec(rtd, 0)->component;
+ aif1_dai = asoc_rtd_to_codec(rtd, 0);
ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_SYSCLK,
ARIZONA_CLK_SRC_FLL1,
@@ -195,7 +195,7 @@ static int bells_late_probe(struct snd_soc_card *card)
}
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[DAI_CODEC_CP]);
- aif2_dai = rtd->cpu_dai;
+ aif2_dai = asoc_rtd_to_cpu(rtd, 0);
ret = snd_soc_dai_set_sysclk(aif2_dai, ARIZONA_CLK_ASYNCCLK, 0, 0);
if (ret != 0) {
@@ -207,8 +207,8 @@ static int bells_late_probe(struct snd_soc_card *card)
return 0;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[DAI_CODEC_SUB]);
- aif3_dai = rtd->cpu_dai;
- wm9081_dai = rtd->codec_dai;
+ aif3_dai = asoc_rtd_to_cpu(rtd, 0);
+ wm9081_dai = asoc_rtd_to_codec(rtd, 0);
ret = snd_soc_dai_set_sysclk(aif3_dai, ARIZONA_CLK_SYSCLK, 0, 0);
if (ret != 0) {
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
index a95c34e53a2b..9139a1e7e200 100644
--- a/sound/soc/samsung/h1940_uda1380.c
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -68,7 +68,7 @@ static int h1940_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int div;
int ret;
unsigned int rate = params_rate(params);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index a57bb989a0ef..f86e3028b402 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -932,7 +932,7 @@ static int i2s_trigger(struct snd_pcm_substream *substream,
struct samsung_i2s_priv *priv = snd_soc_dai_get_drvdata(dai);
int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct i2s_dai *i2s = to_info(rtd->cpu_dai);
+ struct i2s_dai *i2s = to_info(asoc_rtd_to_cpu(rtd, 0));
unsigned long flags;
switch (cmd) {
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 949d2e029962..30899016cf08 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -33,8 +33,8 @@ static int jive_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct s3c_i2sv2_rate_calc div;
unsigned int clk = 0;
int ret = 0;
diff --git a/sound/soc/samsung/littlemill.c b/sound/soc/samsung/littlemill.c
index 59904f44118b..f4375c49f7f4 100644
--- a/sound/soc/samsung/littlemill.c
+++ b/sound/soc/samsung/littlemill.c
@@ -23,7 +23,7 @@ static int littlemill_set_bias_level(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- aif1_dai = rtd->codec_dai;
+ aif1_dai = asoc_rtd_to_codec(rtd, 0);
if (dapm->dev != aif1_dai->dev)
return 0;
@@ -70,7 +70,7 @@ static int littlemill_set_bias_level_post(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- aif1_dai = rtd->codec_dai;
+ aif1_dai = asoc_rtd_to_codec(rtd, 0);
if (dapm->dev != aif1_dai->dev)
return 0;
@@ -105,7 +105,7 @@ static int littlemill_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
sample_rate = params_rate(params);
@@ -181,7 +181,7 @@ static int bbclk_ev(struct snd_soc_dapm_widget *w,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[1]);
- aif2_dai = rtd->cpu_dai;
+ aif2_dai = asoc_rtd_to_cpu(rtd, 0);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -264,11 +264,11 @@ static int littlemill_late_probe(struct snd_soc_card *card)
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- component = rtd->codec_dai->component;
- aif1_dai = rtd->codec_dai;
+ component = asoc_rtd_to_codec(rtd, 0)->component;
+ aif1_dai = asoc_rtd_to_codec(rtd, 0);
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[1]);
- aif2_dai = rtd->cpu_dai;
+ aif2_dai = asoc_rtd_to_cpu(rtd, 0);
ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_MCLK2,
32768, SND_SOC_CLOCK_IN);
@@ -325,7 +325,7 @@ static int littlemill_probe(struct platform_device *pdev)
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
diff --git a/sound/soc/samsung/lowland.c b/sound/soc/samsung/lowland.c
index 098eefc764db..998d10cf8c94 100644
--- a/sound/soc/samsung/lowland.c
+++ b/sound/soc/samsung/lowland.c
@@ -32,7 +32,7 @@ static struct snd_soc_jack_pin lowland_headset_pins[] = {
static int lowland_wm5100_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
int ret;
ret = snd_soc_component_set_sysclk(component, WM5100_CLK_SYSCLK,
@@ -65,7 +65,7 @@ static int lowland_wm5100_init(struct snd_soc_pcm_runtime *rtd)
static int lowland_wm9081_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
snd_soc_dapm_nc_pin(&rtd->card->dapm, "LINEOUT");
@@ -183,7 +183,7 @@ static int lowland_probe(struct platform_device *pdev)
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index 1339e41e9860..b7ce1da854ce 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -26,8 +26,8 @@ static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int pll_out = 0, bclk = 0;
int ret = 0;
unsigned long iis_clkrate;
@@ -100,7 +100,7 @@ static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream,
static int neo1973_hifi_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/* disable the PLL */
return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0, 0);
@@ -118,7 +118,7 @@ static int neo1973_voice_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
unsigned int pcmdiv = 0;
int ret = 0;
unsigned long iis_clkrate;
@@ -155,7 +155,7 @@ static int neo1973_voice_hw_params(struct snd_pcm_substream *substream,
static int neo1973_voice_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/* disable the PLL */
return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0);
diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
index f0f5fa9c27d3..6eda5af989fe 100644
--- a/sound/soc/samsung/odroid.c
+++ b/sound/soc/samsung/odroid.c
@@ -98,7 +98,7 @@ static int odroid_card_be_hw_params(struct snd_pcm_substream *substream,
return ret;
if (rtd->num_codecs > 1) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[1];
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 1);
ret = snd_soc_dai_set_sysclk(codec_dai, 0, rclk_freq,
SND_SOC_CLOCK_IN);
@@ -311,7 +311,9 @@ static int odroid_audio_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_card(dev, card);
if (ret < 0) {
- dev_err(dev, "snd_soc_register_card() failed: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "snd_soc_register_card() failed: %d\n",
+ ret);
goto err_put_clk_i2s;
}
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index f6e67d0e7882..a5b1a12b3496 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -212,7 +212,7 @@ static int s3c_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
unsigned long flags;
dev_dbg(pcm->dev, "Entered %s\n", __func__);
@@ -256,7 +256,7 @@ static int s3c_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *socdai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
void __iomem *regs = pcm->regs;
struct clk *clk;
int sclk_div, sync_div;
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
index 4b247e91ae5b..3afe63c0923e 100644
--- a/sound/soc/samsung/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -149,7 +149,7 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int div;
int ret;
unsigned int rate = params_rate(params);
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index 593be1b668d6..358887848293 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -380,7 +380,7 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct s3c_i2sv2_info *i2s = to_info(rtd->cpu_dai);
+ struct s3c_i2sv2_info *i2s = to_info(asoc_rtd_to_cpu(rtd, 0));
int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
unsigned long irqs;
int ret = 0;
diff --git a/sound/soc/samsung/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c
index 4543705b8d87..fd2a4da086f3 100644
--- a/sound/soc/samsung/s3c24xx_simtec.c
+++ b/sound/soc/samsung/s3c24xx_simtec.c
@@ -160,8 +160,8 @@ static int simtec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, 0,
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 55d2a802a6cb..abb5c4713c53 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -51,7 +51,7 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct s3c24xx_uda134x *priv = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret = 0;
mutex_lock(&priv->clk_lock);
@@ -119,8 +119,8 @@ static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int clk = 0;
int ret = 0;
int clk_source, fs_mode;
diff --git a/sound/soc/samsung/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
index fab3db9fdb98..36bef136d57f 100644
--- a/sound/soc/samsung/smartq_wm8987.c
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -25,8 +25,8 @@ static int smartq_hifi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int clk = 0;
int ret;
diff --git a/sound/soc/samsung/smdk_spdif.c b/sound/soc/samsung/smdk_spdif.c
index 4baef84d29ee..776a270261bf 100644
--- a/sound/soc/samsung/smdk_spdif.c
+++ b/sound/soc/samsung/smdk_spdif.c
@@ -101,7 +101,7 @@ static int smdk_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned long pll_out, rclk_rate;
int ret, ratio;
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index d096ff912260..02074c34a2b2 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -23,7 +23,7 @@ static int smdk_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
unsigned int pll_out;
int rfs, ret;
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index 28f8be000aa1..a9f345f19a8a 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -45,7 +45,7 @@ static int smdk_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
unsigned int pll_out;
int ret;
@@ -178,7 +178,7 @@ static int smdk_audio_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
return ret;
diff --git a/sound/soc/samsung/smdk_wm8994pcm.c b/sound/soc/samsung/smdk_wm8994pcm.c
index 2e3dc7320c62..746930dde5d7 100644
--- a/sound/soc/samsung/smdk_wm8994pcm.c
+++ b/sound/soc/samsung/smdk_wm8994pcm.c
@@ -44,8 +44,8 @@ static int smdk_wm8994_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned long mclk_freq;
int rfs, ret;
@@ -118,7 +118,7 @@ static int snd_smdk_probe(struct platform_device *pdev)
smdk_pcm.dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, &smdk_pcm);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret);
return ret;
diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c
index f075aae9561a..40c5de8df0ff 100644
--- a/sound/soc/samsung/snow.c
+++ b/sound/soc/samsung/snow.c
@@ -110,9 +110,9 @@ static int snow_late_probe(struct snd_soc_card *card)
/* In the multi-codec case codec_dais 0 is MAX98095 and 1 is HDMI. */
if (rtd->num_codecs > 1)
- codec_dai = rtd->codec_dais[0];
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
else
- codec_dai = rtd->codec_dai;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
/* Set the MCLK rate for the codec */
return snd_soc_dai_set_sysclk(codec_dai, 0,
@@ -216,7 +216,9 @@ static int snow_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_card(dev, card);
if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "snd_soc_register_card failed (%d)\n", ret);
return ret;
}
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 1a9f08a50394..759fc6644329 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -142,7 +142,7 @@ static int spdif_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
+ struct samsung_spdif_info *spdif = to_info(asoc_rtd_to_cpu(rtd, 0));
unsigned long flags;
dev_dbg(spdif->dev, "Entered %s\n", __func__);
@@ -178,7 +178,7 @@ static int spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *socdai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
+ struct samsung_spdif_info *spdif = to_info(asoc_rtd_to_cpu(rtd, 0));
void __iomem *regs = spdif->regs;
struct snd_dmaengine_dai_dma_data *dma_data;
u32 con, clkcon, cstas;
@@ -194,7 +194,7 @@ static int spdif_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_data);
+ snd_soc_dai_set_dma_data(asoc_rtd_to_cpu(rtd, 0), substream, dma_data);
spin_lock_irqsave(&spdif->lock, flags);
@@ -280,7 +280,7 @@ static void spdif_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
+ struct samsung_spdif_info *spdif = to_info(asoc_rtd_to_cpu(rtd, 0));
void __iomem *regs = spdif->regs;
u32 con, clkcon;
diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c
index ea0d1ec67f01..f5f6ba00d073 100644
--- a/sound/soc/samsung/speyside.c
+++ b/sound/soc/samsung/speyside.c
@@ -25,7 +25,7 @@ static int speyside_set_bias_level(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[1]);
- codec_dai = rtd->codec_dai;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
if (dapm->dev != codec_dai->dev)
return 0;
@@ -61,7 +61,7 @@ static int speyside_set_bias_level_post(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[1]);
- codec_dai = rtd->codec_dai;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
if (dapm->dev != codec_dai->dev)
return 0;
@@ -131,7 +131,7 @@ static void speyside_set_polarity(struct snd_soc_component *component,
static int speyside_wm0010_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_sysclk(dai, 0, MCLK_AUDIO_RATE, 0);
@@ -143,7 +143,7 @@ static int speyside_wm0010_init(struct snd_soc_pcm_runtime *rtd)
static int speyside_wm8996_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_component *component = dai->component;
int ret;
@@ -330,7 +330,7 @@ static int speyside_probe(struct platform_device *pdev)
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
index 10ff14b856f2..6dfd540e2d74 100644
--- a/sound/soc/samsung/tm2_wm5110.c
+++ b/sound/soc/samsung/tm2_wm5110.c
@@ -93,7 +93,7 @@ static int tm2_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(rtd->card);
switch (params_rate(params)) {
@@ -134,7 +134,7 @@ static int tm2_aif2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
unsigned int asyncclk_rate;
int ret;
@@ -188,7 +188,7 @@ static int tm2_aif2_hw_params(struct snd_pcm_substream *substream,
static int tm2_aif2_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
int ret;
/* disable FLL2 */
@@ -209,7 +209,7 @@ static int tm2_hdmi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int bfs;
int bitwidth, ret;
@@ -284,7 +284,7 @@ static int tm2_set_bias_level(struct snd_soc_card *card,
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- if (dapm->dev != rtd->codec_dai->dev)
+ if (dapm->dev != asoc_rtd_to_codec(rtd, 0)->dev)
return 0;
switch (level) {
@@ -315,8 +315,8 @@ static int tm2_late_probe(struct snd_soc_card *card)
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[TM2_DAI_AIF1]);
- aif1_dai = rtd->codec_dai;
- priv->component = rtd->codec_dai->component;
+ aif1_dai = asoc_rtd_to_codec(rtd, 0);
+ priv->component = asoc_rtd_to_codec(rtd, 0)->component;
ret = snd_soc_dai_set_sysclk(aif1_dai, ARIZONA_CLK_SYSCLK, 0, 0);
if (ret < 0) {
@@ -325,7 +325,7 @@ static int tm2_late_probe(struct snd_soc_card *card)
}
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[TM2_DAI_AIF2]);
- aif2_dai = rtd->codec_dai;
+ aif2_dai = asoc_rtd_to_codec(rtd, 0);
ret = snd_soc_dai_set_sysclk(aif2_dai, ARIZONA_CLK_ASYNCCLK, 0, 0);
if (ret < 0) {
@@ -611,7 +611,8 @@ static int tm2_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_card(dev, card);
if (ret < 0) {
- dev_err(dev, "Failed to register card: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to register card: %d\n", ret);
goto dai_node_put;
}
diff --git a/sound/soc/samsung/tobermory.c b/sound/soc/samsung/tobermory.c
index fdce28cc26c4..c962d2c2a7f7 100644
--- a/sound/soc/samsung/tobermory.c
+++ b/sound/soc/samsung/tobermory.c
@@ -23,7 +23,7 @@ static int tobermory_set_bias_level(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- codec_dai = rtd->codec_dai;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
if (dapm->dev != codec_dai->dev)
return 0;
@@ -66,7 +66,7 @@ static int tobermory_set_bias_level_post(struct snd_soc_card *card,
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- codec_dai = rtd->codec_dai;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
if (dapm->dev != codec_dai->dev)
return 0;
@@ -181,8 +181,8 @@ static int tobermory_late_probe(struct snd_soc_card *card)
int ret;
rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- component = rtd->codec_dai->component;
- codec_dai = rtd->codec_dai;
+ component = asoc_rtd_to_codec(rtd, 0)->component;
+ codec_dai = asoc_rtd_to_codec(rtd, 0);
ret = snd_soc_dai_set_sysclk(codec_dai, WM8962_SYSCLK_MCLK,
32768, SND_SOC_CLOCK_IN);
@@ -229,7 +229,7 @@ static int tobermory_probe(struct platform_device *pdev)
card->dev = &pdev->dev;
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret)
+ if (ret && ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index eee1a1e994cb..a35de78f14a9 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -119,7 +119,7 @@ static int camelot_pcm_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
+ struct camelot_pcm *cam = &cam_pcm_data[asoc_rtd_to_cpu(rtd, 0)->id];
int recv = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 0:1;
int ret, dmairq;
@@ -132,7 +132,7 @@ static int camelot_pcm_open(struct snd_soc_component *component,
ret = dmabrg_request_irq(dmairq, camelot_rxdma, cam);
if (unlikely(ret)) {
pr_debug("audio unit %d irqs already taken!\n",
- rtd->cpu_dai->id);
+ asoc_rtd_to_cpu(rtd, 0)->id);
return -EBUSY;
}
(void)dmabrg_request_irq(dmairq + 1,camelot_rxdma, cam);
@@ -141,7 +141,7 @@ static int camelot_pcm_open(struct snd_soc_component *component,
ret = dmabrg_request_irq(dmairq, camelot_txdma, cam);
if (unlikely(ret)) {
pr_debug("audio unit %d irqs already taken!\n",
- rtd->cpu_dai->id);
+ asoc_rtd_to_cpu(rtd, 0)->id);
return -EBUSY;
}
(void)dmabrg_request_irq(dmairq + 1, camelot_txdma, cam);
@@ -153,7 +153,7 @@ static int camelot_pcm_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
+ struct camelot_pcm *cam = &cam_pcm_data[asoc_rtd_to_cpu(rtd, 0)->id];
int recv = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 0:1;
int dmairq;
@@ -175,7 +175,7 @@ static int camelot_hw_params(struct snd_soc_component *component,
struct snd_pcm_hw_params *hw_params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
+ struct camelot_pcm *cam = &cam_pcm_data[asoc_rtd_to_cpu(rtd, 0)->id];
int recv = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 0:1;
int ret;
@@ -194,7 +194,7 @@ static int camelot_prepare(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
+ struct camelot_pcm *cam = &cam_pcm_data[asoc_rtd_to_cpu(rtd, 0)->id];
pr_debug("PCM data: addr 0x%08lx len %d\n",
(u32)runtime->dma_addr, runtime->dma_bytes);
@@ -242,7 +242,7 @@ static int camelot_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
+ struct camelot_pcm *cam = &cam_pcm_data[asoc_rtd_to_cpu(rtd, 0)->id];
int recv = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 0:1;
switch (cmd) {
@@ -270,7 +270,7 @@ static snd_pcm_uframes_t camelot_pos(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
+ struct camelot_pcm *cam = &cam_pcm_data[asoc_rtd_to_cpu(rtd, 0)->id];
int recv = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 0:1;
unsigned long pos;
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 4b35ef402604..1c3c4fdc9bef 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -408,7 +408,7 @@ static struct snd_soc_dai *fsi_get_dai(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- return rtd->cpu_dai;
+ return asoc_rtd_to_cpu(rtd, 0);
}
static struct fsi_priv *fsi_get_priv_frm_dai(struct snd_soc_dai *dai)
@@ -1938,8 +1938,7 @@ static int fsi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- master->base = devm_ioremap(&pdev->dev,
- res->start, resource_size(res));
+ master->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!master->base) {
dev_err(&pdev->dev, "Unable to ioremap FSI registers.\n");
return -ENXIO;
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index 991557e25eba..d5702fbf176b 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -46,7 +46,7 @@ static int migor_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
unsigned int rate = params_rate(params);
@@ -67,7 +67,7 @@ static int migor_hw_params(struct snd_pcm_substream *substream,
clk_set_rate(&siumckb_clk, codec_freq);
dev_dbg(codec_dai->dev, "%s: configure %luHz\n", __func__, codec_freq);
- ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, SIU_CLKB_EXT,
+ ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(rtd, 0), SIU_CLKB_EXT,
codec_freq / 2, SND_SOC_CLOCK_IN);
if (!ret)
@@ -79,7 +79,7 @@ static int migor_hw_params(struct snd_pcm_substream *substream,
static int migor_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
if (use_count) {
use_count--;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 0bfcb77e5f65..4349f2fb823f 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -696,7 +696,7 @@ struct snd_soc_dai *rsnd_substream_to_dai(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- return rtd->cpu_dai;
+ return asoc_rtd_to_cpu(rtd, 0);
}
static
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 392a1c5b15d3..50062eb79adb 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -810,9 +810,10 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
int playback = 0, capture = 0;
int i;
- if (rtd->num_codecs > 1) {
+ if (rtd->num_cpus > 1 ||
+ rtd->num_codecs > 1) {
dev_err(rtd->card->dev,
- "Compress ASoC: Multicodec not supported\n");
+ "Compress ASoC: Multi CPU/Codec not supported\n");
return -EINVAL;
}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 068d809c349a..843b8b1c89d4 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -365,19 +365,20 @@ EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int playback = SNDRV_PCM_STREAM_PLAYBACK;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
dev_dbg(rtd->dev,
"ASoC: pop wq checking: %s status: %s waiting: %s\n",
codec_dai->driver->playback.stream_name,
- codec_dai->playback_active ? "active" : "inactive",
+ codec_dai->stream_active[playback] ? "active" : "inactive",
rtd->pop_wait ? "yes" : "no");
/* are we waiting on this codec DAI stream */
if (rtd->pop_wait == 1) {
rtd->pop_wait = 0;
- snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
+ snd_soc_dapm_stream_event(rtd, playback,
SND_SOC_DAPM_STREAM_STOP);
}
@@ -431,6 +432,7 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
struct snd_soc_component *component;
struct device *dev;
int ret;
+ int stream;
/*
* for rtd->dev
@@ -465,23 +467,31 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
rtd->dev = dev;
INIT_LIST_HEAD(&rtd->list);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
+ for_each_pcm_streams(stream) {
+ INIT_LIST_HEAD(&rtd->dpcm[stream].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[stream].fe_clients);
+ }
dev_set_drvdata(dev, rtd);
INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
/*
- * for rtd->codec_dais
+ * for rtd->dais
*/
- rtd->codec_dais = devm_kcalloc(dev, dai_link->num_codecs,
+ rtd->dais = devm_kcalloc(dev, dai_link->num_cpus + dai_link->num_codecs,
sizeof(struct snd_soc_dai *),
GFP_KERNEL);
- if (!rtd->codec_dais)
+ if (!rtd->dais)
goto free_rtd;
/*
+ * dais = [][][][][][][][][][][][][][][][][][]
+ * ^cpu_dais ^codec_dais
+ * |--- num_cpus ---|--- num_codecs --|
+ */
+ rtd->cpu_dais = &rtd->dais[0];
+ rtd->codec_dais = &rtd->dais[dai_link->num_cpus];
+
+ /*
* rtd remaining settings
*/
rtd->card = card;
@@ -514,6 +524,7 @@ int snd_soc_suspend(struct device *dev)
struct snd_soc_card *card = dev_get_drvdata(dev);
struct snd_soc_component *component;
struct snd_soc_pcm_runtime *rtd;
+ int playback = SNDRV_PCM_STREAM_PLAYBACK;
int i;
/* If the card is not initialized yet there is nothing to do */
@@ -536,10 +547,9 @@ int snd_soc_suspend(struct device *dev)
if (rtd->dai_link->ignore_suspend)
continue;
- for_each_rtd_codec_dai(rtd, i, dai) {
- if (dai->playback_active)
- snd_soc_dai_digital_mute(dai, 1,
- SNDRV_PCM_STREAM_PLAYBACK);
+ for_each_rtd_codec_dais(rtd, i, dai) {
+ if (dai->stream_active[playback])
+ snd_soc_dai_digital_mute(dai, 1, playback);
}
}
@@ -558,17 +568,14 @@ int snd_soc_suspend(struct device *dev)
snd_soc_flush_all_delayed_work(card);
for_each_card_rtds(card, rtd) {
+ int stream;
if (rtd->dai_link->ignore_suspend)
continue;
- snd_soc_dapm_stream_event(rtd,
- SNDRV_PCM_STREAM_PLAYBACK,
- SND_SOC_DAPM_STREAM_SUSPEND);
-
- snd_soc_dapm_stream_event(rtd,
- SNDRV_PCM_STREAM_CAPTURE,
- SND_SOC_DAPM_STREAM_SUSPEND);
+ for_each_pcm_streams(stream)
+ snd_soc_dapm_stream_event(rtd, stream,
+ SND_SOC_DAPM_STREAM_SUSPEND);
}
/* Recheck all endpoints too, their state is affected by suspend */
@@ -664,30 +671,27 @@ static void soc_resume_deferred(struct work_struct *work)
}
for_each_card_rtds(card, rtd) {
+ int stream;
if (rtd->dai_link->ignore_suspend)
continue;
- snd_soc_dapm_stream_event(rtd,
- SNDRV_PCM_STREAM_PLAYBACK,
- SND_SOC_DAPM_STREAM_RESUME);
-
- snd_soc_dapm_stream_event(rtd,
- SNDRV_PCM_STREAM_CAPTURE,
- SND_SOC_DAPM_STREAM_RESUME);
+ for_each_pcm_streams(stream)
+ snd_soc_dapm_stream_event(rtd, stream,
+ SND_SOC_DAPM_STREAM_RESUME);
}
/* unmute any active DACs */
for_each_card_rtds(card, rtd) {
struct snd_soc_dai *dai;
+ int playback = SNDRV_PCM_STREAM_PLAYBACK;
if (rtd->dai_link->ignore_suspend)
continue;
- for_each_rtd_codec_dai(rtd, i, dai) {
- if (dai->playback_active)
- snd_soc_dai_digital_mute(dai, 0,
- SNDRV_PCM_STREAM_PLAYBACK);
+ for_each_rtd_codec_dais(rtd, i, dai) {
+ if (dai->stream_active[playback])
+ snd_soc_dai_digital_mute(dai, 0, playback);
}
}
@@ -837,7 +841,7 @@ static int soc_dai_link_sanity_check(struct snd_soc_card *card,
struct snd_soc_dai_link *link)
{
int i;
- struct snd_soc_dai_link_component *codec, *platform;
+ struct snd_soc_dai_link_component *cpu, *codec, *platform;
for_each_link_codecs(link, i, codec) {
/*
@@ -886,44 +890,38 @@ static int soc_dai_link_sanity_check(struct snd_soc_card *card,
return -EPROBE_DEFER;
}
- /* FIXME */
- if (link->num_cpus > 1) {
- dev_err(card->dev,
- "ASoC: multi cpu is not yet supported %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * CPU device may be specified by either name or OF node, but
- * can be left unspecified, and will be matched based on DAI
- * name alone..
- */
- if (link->cpus->name && link->cpus->of_node) {
- dev_err(card->dev,
- "ASoC: Neither/both cpu name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
+ for_each_link_cpus(link, i, cpu) {
+ /*
+ * CPU device may be specified by either name or OF node, but
+ * can be left unspecified, and will be matched based on DAI
+ * name alone..
+ */
+ if (cpu->name && cpu->of_node) {
+ dev_err(card->dev,
+ "ASoC: Neither/both cpu name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
- /*
- * Defer card registration if cpu dai component is not added to
- * component list.
- */
- if ((link->cpus->of_node || link->cpus->name) &&
- !soc_find_component(link->cpus))
- return -EPROBE_DEFER;
+ /*
+ * Defer card registration if cpu dai component is not added to
+ * component list.
+ */
+ if ((cpu->of_node || cpu->name) &&
+ !soc_find_component(cpu))
+ return -EPROBE_DEFER;
- /*
- * At least one of CPU DAI name or CPU device name/node must be
- * specified
- */
- if (!link->cpus->dai_name &&
- !(link->cpus->name || link->cpus->of_node)) {
- dev_err(card->dev,
- "ASoC: Neither cpu_dai_name nor cpu_name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
+ /*
+ * At least one of CPU DAI name or CPU device name/node must be
+ * specified
+ */
+ if (!cpu->dai_name &&
+ !(cpu->name || cpu->of_node)) {
+ dev_err(card->dev,
+ "ASoC: Neither cpu_dai_name nor cpu_name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
}
return 0;
@@ -966,7 +964,7 @@ int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link)
{
struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_dai_link_component *codec, *platform;
+ struct snd_soc_dai_link_component *codec, *platform, *cpu;
struct snd_soc_component *component;
int i, ret;
@@ -991,14 +989,19 @@ int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
if (!rtd)
return -ENOMEM;
- /* FIXME: we need multi CPU support in the future */
- rtd->cpu_dai = snd_soc_find_dai(dai_link->cpus);
- if (!rtd->cpu_dai) {
- dev_info(card->dev, "ASoC: CPU DAI %s not registered\n",
- dai_link->cpus->dai_name);
- goto _err_defer;
+ rtd->num_cpus = dai_link->num_cpus;
+ for_each_link_cpus(dai_link, i, cpu) {
+ rtd->cpu_dais[i] = snd_soc_find_dai(cpu);
+ if (!rtd->cpu_dais[i]) {
+ dev_info(card->dev, "ASoC: CPU DAI %s not registered\n",
+ cpu->dai_name);
+ goto _err_defer;
+ }
+ snd_soc_rtd_add_component(rtd, rtd->cpu_dais[i]->component);
}
- snd_soc_rtd_add_component(rtd, rtd->cpu_dai->component);
+
+ /* Single cpu links expect cpu and cpu_dai in runtime data */
+ rtd->cpu_dai = rtd->cpu_dais[0];
/* Find CODEC from registered CODECs */
rtd->num_codecs = dai_link->num_codecs;
@@ -1034,20 +1037,20 @@ _err_defer:
}
EXPORT_SYMBOL_GPL(snd_soc_add_pcm_runtime);
-static int soc_dai_pcm_new(struct snd_soc_dai **dais, int num_dais,
- struct snd_soc_pcm_runtime *rtd)
+static int soc_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_soc_dai *dai;
int i, ret = 0;
- for (i = 0; i < num_dais; ++i) {
- struct snd_soc_dai_driver *drv = dais[i]->driver;
+ for_each_rtd_dais(rtd, i, dai) {
+ struct snd_soc_dai_driver *drv = dai->driver;
if (drv->pcm_new)
- ret = drv->pcm_new(rtd, dais[i]);
+ ret = drv->pcm_new(rtd, dai);
if (ret < 0) {
- dev_err(dais[i]->dev,
+ dev_err(dai->dev,
"ASoC: Failed to bind %s with pcm device\n",
- dais[i]->name);
+ dai->name);
return ret;
}
}
@@ -1118,12 +1121,8 @@ static int soc_init_pcm_runtime(struct snd_soc_card *card,
dai_link->stream_name, ret);
return ret;
}
- ret = soc_dai_pcm_new(&cpu_dai, 1, rtd);
- if (ret < 0)
- return ret;
- ret = soc_dai_pcm_new(rtd->codec_dais,
- rtd->num_codecs, rtd);
- return ret;
+
+ return soc_dai_pcm_new(rtd);
}
static void soc_set_name_prefix(struct snd_soc_card *card,
@@ -1256,8 +1255,18 @@ static int soc_probe_component(struct snd_soc_card *card,
ret = snd_soc_dapm_add_routes(dapm,
component->driver->dapm_routes,
component->driver->num_dapm_routes);
- if (ret < 0)
- goto err_probe;
+ if (ret < 0) {
+ if (card->disable_route_checks) {
+ dev_info(card->dev,
+ "%s: disable_route_checks set, ignoring errors on add_routes\n",
+ __func__);
+ } else {
+ dev_err(card->dev,
+ "%s: snd_soc_dapm_add_routes failed: %d\n",
+ __func__, ret);
+ goto err_probe;
+ }
+ }
/* see for_each_card_components */
list_add(&component->card_list, &card->component_dev_list);
@@ -1309,24 +1318,22 @@ static int soc_probe_dai(struct snd_soc_dai *dai, int order)
static void soc_remove_link_dais(struct snd_soc_card *card)
{
int i;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
struct snd_soc_pcm_runtime *rtd;
int order;
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
- /* remove the CODEC DAI */
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- soc_remove_dai(codec_dai, order);
-
- soc_remove_dai(rtd->cpu_dai, order);
+ /* remove DAIs */
+ for_each_rtd_dais(rtd, i, dai)
+ soc_remove_dai(dai, order);
}
}
}
static int soc_probe_link_dais(struct snd_soc_card *card)
{
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
struct snd_soc_pcm_runtime *rtd;
int i, order, ret;
@@ -1337,13 +1344,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card)
"ASoC: probe %s dai link %d late %d\n",
card->name, rtd->num, order);
- ret = soc_probe_dai(rtd->cpu_dai, order);
- if (ret)
- return ret;
-
- /* probe the CODEC DAI */
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- ret = soc_probe_dai(codec_dai, order);
+ /* probe the CPU DAI */
+ for_each_rtd_dais(rtd, i, dai) {
+ ret = soc_probe_dai(dai, order);
if (ret)
return ret;
}
@@ -1471,12 +1474,13 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
unsigned int dai_fmt)
{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
+ unsigned int inv_dai_fmt;
unsigned int i;
int ret;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
if (ret != 0 && ret != -ENOTSUPP) {
dev_warn(codec_dai->dev,
@@ -1489,33 +1493,33 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
* Flip the polarity for the "CPU" end of a CODEC<->CODEC link
* the component which has non_legacy_dai_naming is Codec
*/
- if (cpu_dai->component->driver->non_legacy_dai_naming) {
- unsigned int inv_dai_fmt;
-
- inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK;
- switch (dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
- break;
- case SND_SOC_DAIFMT_CBM_CFS:
- inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
- break;
- case SND_SOC_DAIFMT_CBS_CFM:
- inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFS;
- break;
- case SND_SOC_DAIFMT_CBS_CFS:
- inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
- break;
- }
-
- dai_fmt = inv_dai_fmt;
+ inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK;
+ switch (dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFS;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+ break;
}
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ unsigned int fmt = dai_fmt;
- ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt);
- if (ret != 0 && ret != -ENOTSUPP) {
- dev_warn(cpu_dai->dev,
- "ASoC: Failed to set DAI format: %d\n", ret);
- return ret;
+ if (cpu_dai->component->driver->non_legacy_dai_naming)
+ fmt = inv_dai_fmt;
+
+ ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+ if (ret != 0 && ret != -ENOTSUPP) {
+ dev_warn(cpu_dai->dev,
+ "ASoC: Failed to set DAI format: %d\n", ret);
+ return ret;
+ }
}
return 0;
@@ -1938,8 +1942,18 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
ret = snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
card->num_dapm_routes);
- if (ret < 0)
- goto probe_end;
+ if (ret < 0) {
+ if (card->disable_route_checks) {
+ dev_info(card->dev,
+ "%s: disable_route_checks set, ignoring errors on add_routes\n",
+ __func__);
+ } else {
+ dev_err(card->dev,
+ "%s: snd_soc_dapm_add_routes failed: %d\n",
+ __func__, ret);
+ goto probe_end;
+ }
+ }
ret = snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes,
card->num_of_dapm_routes);
@@ -3102,6 +3116,14 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
*dai_name = dai->driver->name;
if (!*dai_name)
*dai_name = pos->name;
+ } else if (ret) {
+ /*
+ * if another error than ENOTSUPP is returned go on and
+ * check if another component is provided with the same
+ * node. This may happen if a device provides several
+ * components
+ */
+ continue;
}
break;
diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
index 51031e330179..19142f6e533c 100644
--- a/sound/soc/soc-dai.c
+++ b/sound/soc/soc-dai.c
@@ -295,17 +295,24 @@ int snd_soc_dai_startup(struct snd_soc_dai *dai,
{
int ret = 0;
- if (dai->driver->ops->startup)
+ if (!dai->started &&
+ dai->driver->ops->startup)
ret = dai->driver->ops->startup(substream, dai);
+ if (ret == 0)
+ dai->started = 1;
+
return ret;
}
void snd_soc_dai_shutdown(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream)
{
- if (dai->driver->ops->shutdown)
+ if (dai->started &&
+ dai->driver->ops->shutdown)
dai->driver->ops->shutdown(substream, dai);
+
+ dai->started = 0;
}
int snd_soc_dai_prepare(struct snd_soc_dai *dai,
@@ -383,12 +390,7 @@ int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
*/
bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int dir)
{
- struct snd_soc_pcm_stream *stream;
-
- if (dir == SNDRV_PCM_STREAM_PLAYBACK)
- stream = &dai->driver->playback;
- else
- stream = &dai->driver->capture;
+ struct snd_soc_pcm_stream *stream = snd_soc_dai_get_pcm_stream(dai, dir);
/* If the codec specifies any channels at all, it supports the stream */
return stream->channels_min;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 9fb54e6fe254..04da7928c873 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -302,7 +302,7 @@ void dapm_mark_endpoints_dirty(struct snd_soc_card *card)
mutex_lock(&card->dapm_mutex);
- list_for_each_entry(w, &card->widgets, list) {
+ for_each_card_widgets(card, w) {
if (w->is_ep) {
dapm_mark_dirty(w, "Rechecking endpoints");
if (w->is_ep & SND_SOC_DAPM_EP_SINK)
@@ -589,7 +589,7 @@ static void dapm_reset(struct snd_soc_card *card)
memset(&card->dapm_stats, 0, sizeof(card->dapm_stats));
- list_for_each_entry(w, &card->widgets, list) {
+ for_each_card_widgets(card, w) {
w->new_power = w->power;
w->power_checked = false;
}
@@ -833,7 +833,7 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
*kcontrol = NULL;
- list_for_each_entry(w, &dapm->card->widgets, list) {
+ for_each_card_widgets(dapm->card, w) {
if (w == kcontrolw || w->dapm != kcontrolw->dapm)
continue;
for (i = 0; i < w->num_kcontrols; i++) {
@@ -1105,6 +1105,11 @@ static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget)
}
}
+static void dapm_widget_list_free(struct snd_soc_dapm_widget_list **list)
+{
+ kfree(*list);
+}
+
static int dapm_widget_list_create(struct snd_soc_dapm_widget_list **list,
struct list_head *widgets)
{
@@ -1310,6 +1315,11 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
return paths;
}
+void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list)
+{
+ dapm_widget_list_free(list);
+}
+
/*
* Handler for regulator supply widget.
*/
@@ -1706,9 +1716,8 @@ static void dapm_seq_run(struct snd_soc_card *card,
i, cur_subseq);
}
- list_for_each_entry(d, &card->dapm_list, list) {
+ for_each_card_dapms(card, d)
soc_dapm_async_complete(d);
- }
}
static void dapm_widget_update(struct snd_soc_card *card)
@@ -1724,9 +1733,7 @@ static void dapm_widget_update(struct snd_soc_card *card)
wlist = dapm_kcontrol_get_wlist(update->kcontrol);
- for (wi = 0; wi < wlist->num_widgets; wi++) {
- w = wlist->widgets[wi];
-
+ for_each_dapm_widgets(wlist, wi, w) {
if (w->event && (w->event_flags & SND_SOC_DAPM_PRE_REG)) {
ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
if (ret != 0)
@@ -1753,9 +1760,7 @@ static void dapm_widget_update(struct snd_soc_card *card)
w->name, ret);
}
- for (wi = 0; wi < wlist->num_widgets; wi++) {
- w = wlist->widgets[wi];
-
+ for_each_dapm_widgets(wlist, wi, w) {
if (w->event && (w->event_flags & SND_SOC_DAPM_POST_REG)) {
ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
if (ret != 0)
@@ -1943,7 +1948,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
trace_snd_soc_dapm_start(card);
- list_for_each_entry(d, &card->dapm_list, list) {
+ for_each_card_dapms(card, d) {
if (dapm_idle_bias_off(d))
d->target_bias_level = SND_SOC_BIAS_OFF;
else
@@ -1962,7 +1967,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
dapm_power_one_widget(w, &up_list, &down_list);
}
- list_for_each_entry(w, &card->widgets, list) {
+ for_each_card_widgets(card, w) {
switch (w->id) {
case snd_soc_dapm_pre:
case snd_soc_dapm_post:
@@ -2007,10 +2012,10 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
* they're not ground referenced.
*/
bias = SND_SOC_BIAS_OFF;
- list_for_each_entry(d, &card->dapm_list, list)
+ for_each_card_dapms(card, d)
if (d->target_bias_level > bias)
bias = d->target_bias_level;
- list_for_each_entry(d, &card->dapm_list, list)
+ for_each_card_dapms(card, d)
if (!dapm_idle_bias_off(d))
d->target_bias_level = bias;
@@ -2019,7 +2024,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
/* Run card bias changes at first */
dapm_pre_sequence_async(&card->dapm, 0);
/* Run other bias changes in parallel */
- list_for_each_entry(d, &card->dapm_list, list) {
+ for_each_card_dapms(card, d) {
if (d != &card->dapm && d->bias_level != d->target_bias_level)
async_schedule_domain(dapm_pre_sequence_async, d,
&async_domain);
@@ -2043,7 +2048,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
dapm_seq_run(card, &up_list, event, true);
/* Run all the bias changes in parallel */
- list_for_each_entry(d, &card->dapm_list, list) {
+ for_each_card_dapms(card, d) {
if (d != &card->dapm && d->bias_level != d->target_bias_level)
async_schedule_domain(dapm_post_sequence_async, d,
&async_domain);
@@ -2053,7 +2058,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
dapm_post_sequence_async(&card->dapm, 0);
/* do we need to notify any clients that DAPM event is complete */
- list_for_each_entry(d, &card->dapm_list, list) {
+ for_each_card_dapms(card, d) {
if (!d->component)
continue;
@@ -2286,7 +2291,7 @@ int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
card->update = NULL;
mutex_unlock(&card->dapm_mutex);
if (ret > 0)
- soc_dpcm_runtime_update(card);
+ snd_soc_dpcm_runtime_update(card);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_mux_update_power);
@@ -2351,7 +2356,7 @@ int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
card->update = NULL;
mutex_unlock(&card->dapm_mutex);
if (ret > 0)
- soc_dpcm_runtime_update(card);
+ snd_soc_dpcm_runtime_update(card);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power);
@@ -2371,7 +2376,7 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
if (!cmpnt->card)
return 0;
- list_for_each_entry(w, &cmpnt->card->widgets, list) {
+ for_each_card_widgets(cmpnt->card, w) {
if (w->dapm != dapm)
continue;
@@ -2431,7 +2436,7 @@ static ssize_t dapm_widget_show(struct device *dev,
mutex_lock(&rtd->card->dapm_mutex);
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
struct snd_soc_component *cmpnt = codec_dai->component;
count += dapm_widget_show_component(cmpnt, buf + count);
@@ -2491,7 +2496,7 @@ static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
{
struct snd_soc_dapm_widget *w, *next_w;
- list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
+ for_each_card_widgets_safe(dapm->card, w, next_w) {
if (w->dapm != dapm)
continue;
snd_soc_dapm_free_widget(w);
@@ -2506,7 +2511,7 @@ static struct snd_soc_dapm_widget *dapm_find_widget(
struct snd_soc_dapm_widget *w;
struct snd_soc_dapm_widget *fallback = NULL;
- list_for_each_entry(w, &dapm->card->widgets, list) {
+ for_each_card_widgets(dapm->card, w) {
if (!strcmp(w->name, pin)) {
if (w->dapm == dapm)
return w;
@@ -2624,10 +2629,7 @@ static int dapm_update_dai_unlocked(struct snd_pcm_substream *substream,
struct snd_soc_dapm_widget *w;
int ret;
- if (dir == SNDRV_PCM_STREAM_PLAYBACK)
- w = dai->playback_widget;
- else
- w = dai->capture_widget;
+ w = snd_soc_dai_get_widget(dai, dir);
if (!w)
return 0;
@@ -2908,7 +2910,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
* find src and dest widgets over all widgets but favor a widget from
* current DAPM context
*/
- list_for_each_entry(w, &dapm->card->widgets, list) {
+ for_each_card_widgets(dapm->card, w) {
if (!wsink && !(strcmp(w->name, sink))) {
wtsink = w;
if (w->dapm == dapm) {
@@ -3187,7 +3189,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_card *card)
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
- list_for_each_entry(w, &card->widgets, list)
+ for_each_card_widgets(card, w)
{
if (w->new)
continue;
@@ -3394,7 +3396,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
mutex_unlock(&card->dapm_mutex);
if (ret > 0)
- soc_dpcm_runtime_update(card);
+ snd_soc_dpcm_runtime_update(card);
return change;
}
@@ -3499,7 +3501,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
mutex_unlock(&card->dapm_mutex);
if (ret > 0)
- soc_dpcm_runtime_update(card);
+ snd_soc_dpcm_runtime_update(card);
return change;
}
@@ -3604,6 +3606,9 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
ret = PTR_ERR(w->pinctrl);
goto request_failed;
}
+
+ /* set to sleep_state when initializing */
+ dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD);
break;
case snd_soc_dapm_clock_supply:
w->clk = devm_clk_get(dapm->dev, w->name);
@@ -3698,6 +3703,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
w->dapm = dapm;
INIT_LIST_HEAD(&w->list);
INIT_LIST_HEAD(&w->dirty);
+ /* see for_each_card_widgets */
list_add_tail(&w->list, &dapm->card->widgets);
snd_soc_dapm_for_each_direction(dir) {
@@ -4222,7 +4228,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
struct snd_soc_dai *dai;
/* For each DAI widget... */
- list_for_each_entry(dai_w, &card->widgets, list) {
+ for_each_card_widgets(card, dai_w) {
switch (dai_w->id) {
case snd_soc_dapm_dai_in:
case snd_soc_dapm_dai_out:
@@ -4241,7 +4247,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
dai = dai_w->priv;
/* ...find all widgets with the same stream and link them */
- list_for_each_entry(w, &card->widgets, list) {
+ for_each_card_widgets(card, w) {
if (w->dapm != dai_w->dapm)
continue;
@@ -4271,16 +4277,15 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
return 0;
}
-static void dapm_connect_dai_link_widgets(struct snd_soc_card *card,
- struct snd_soc_pcm_runtime *rtd)
+static void dapm_add_valid_dai_widget(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *codec_dai,
+ struct snd_soc_dai *cpu_dai)
{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
struct snd_soc_dapm_widget *playback = NULL, *capture = NULL;
struct snd_soc_dapm_widget *codec, *playback_cpu, *capture_cpu;
struct snd_pcm_substream *substream;
struct snd_pcm_str *streams = rtd->pcm->streams;
- int i;
if (rtd->dai_link->params) {
playback_cpu = cpu_dai->capture_widget;
@@ -4292,77 +4297,92 @@ static void dapm_connect_dai_link_widgets(struct snd_soc_card *card,
capture_cpu = capture;
}
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- /* connect BE DAI playback if widgets are valid */
- codec = codec_dai->playback_widget;
-
- if (playback_cpu && codec) {
- if (!playback) {
- substream = streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
- playback = snd_soc_dapm_new_dai(card, substream,
- "playback");
- if (IS_ERR(playback)) {
- dev_err(rtd->dev,
- "ASoC: Failed to create DAI %s: %ld\n",
- codec_dai->name,
- PTR_ERR(playback));
- continue;
- }
-
- snd_soc_dapm_add_path(&card->dapm, playback_cpu,
- playback, NULL, NULL);
+ /* connect BE DAI playback if widgets are valid */
+ codec = codec_dai->playback_widget;
+
+ if (playback_cpu && codec) {
+ if (!playback) {
+ substream = streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ playback = snd_soc_dapm_new_dai(card, substream,
+ "playback");
+ if (IS_ERR(playback)) {
+ dev_err(rtd->dev,
+ "ASoC: Failed to create DAI %s: %ld\n",
+ codec_dai->name,
+ PTR_ERR(playback));
+ goto capture;
}
- dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n",
- cpu_dai->component->name, playback_cpu->name,
- codec_dai->component->name, codec->name);
-
- snd_soc_dapm_add_path(&card->dapm, playback, codec,
- NULL, NULL);
+ snd_soc_dapm_add_path(&card->dapm, playback_cpu,
+ playback, NULL, NULL);
}
- }
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- /* connect BE DAI capture if widgets are valid */
- codec = codec_dai->capture_widget;
-
- if (codec && capture_cpu) {
- if (!capture) {
- substream = streams[SNDRV_PCM_STREAM_CAPTURE].substream;
- capture = snd_soc_dapm_new_dai(card, substream,
- "capture");
- if (IS_ERR(capture)) {
- dev_err(rtd->dev,
- "ASoC: Failed to create DAI %s: %ld\n",
- codec_dai->name,
- PTR_ERR(capture));
- continue;
- }
-
- snd_soc_dapm_add_path(&card->dapm, capture,
- capture_cpu, NULL, NULL);
+ dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n",
+ cpu_dai->component->name, playback_cpu->name,
+ codec_dai->component->name, codec->name);
+
+ snd_soc_dapm_add_path(&card->dapm, playback, codec,
+ NULL, NULL);
+ }
+
+capture:
+ /* connect BE DAI capture if widgets are valid */
+ codec = codec_dai->capture_widget;
+
+ if (codec && capture_cpu) {
+ if (!capture) {
+ substream = streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ capture = snd_soc_dapm_new_dai(card, substream,
+ "capture");
+ if (IS_ERR(capture)) {
+ dev_err(rtd->dev,
+ "ASoC: Failed to create DAI %s: %ld\n",
+ codec_dai->name,
+ PTR_ERR(capture));
+ return;
}
- dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n",
- codec_dai->component->name, codec->name,
- cpu_dai->component->name, capture_cpu->name);
-
- snd_soc_dapm_add_path(&card->dapm, codec, capture,
- NULL, NULL);
+ snd_soc_dapm_add_path(&card->dapm, capture,
+ capture_cpu, NULL, NULL);
}
+
+ dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n",
+ codec_dai->component->name, codec->name,
+ cpu_dai->component->name, capture_cpu->name);
+
+ snd_soc_dapm_add_path(&card->dapm, codec, capture,
+ NULL, NULL);
}
}
+static void dapm_connect_dai_link_widgets(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *codec_dai;
+ int i;
+
+ if (rtd->num_cpus == 1) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai)
+ dapm_add_valid_dai_widget(card, rtd, codec_dai,
+ rtd->cpu_dais[0]);
+ } else if (rtd->num_codecs == rtd->num_cpus) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai)
+ dapm_add_valid_dai_widget(card, rtd, codec_dai,
+ rtd->cpu_dais[i]);
+ } else {
+ dev_err(card->dev,
+ "N cpus to M codecs link is not supported yet\n");
+ }
+
+}
+
static void soc_dapm_dai_stream_event(struct snd_soc_dai *dai, int stream,
int event)
{
struct snd_soc_dapm_widget *w;
unsigned int ep;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- w = dai->playback_widget;
- else
- w = dai->capture_widget;
+ w = snd_soc_dai_get_widget(dai, stream);
if (w) {
dapm_mark_dirty(w, "stream event");
@@ -4413,12 +4433,11 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
static void soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
int event)
{
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
int i;
- soc_dapm_dai_stream_event(rtd->cpu_dai, stream, event);
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- soc_dapm_dai_stream_event(codec_dai, stream, event);
+ for_each_rtd_dais(rtd, i, dai)
+ soc_dapm_dai_stream_event(dai, stream, event);
dapm_power_widgets(rtd->card, event);
}
@@ -4754,6 +4773,7 @@ void snd_soc_dapm_init(struct snd_soc_dapm_context *dapm,
}
INIT_LIST_HEAD(&dapm->list);
+ /* see for_each_card_dapms */
list_add(&dapm->list, &card->dapm_list);
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_init);
@@ -4767,7 +4787,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
mutex_lock(&card->dapm_mutex);
- list_for_each_entry(w, &dapm->card->widgets, list) {
+ for_each_card_widgets(dapm->card, w) {
if (w->dapm != dapm)
continue;
if (w->power) {
@@ -4800,7 +4820,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card)
{
struct snd_soc_dapm_context *dapm;
- list_for_each_entry(dapm, &card->dapm_list, list) {
+ for_each_card_dapms(card, dapm) {
if (dapm != &card->dapm) {
soc_dapm_shutdown_dapm(dapm);
if (dapm->bias_level == SND_SOC_BIAS_STANDBY)
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 2cc25651661c..facf1922a714 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -62,6 +62,12 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
struct snd_dmaengine_dai_dma_data *dma_data;
int ret;
+ if (rtd->num_cpus > 1) {
+ dev_err(rtd->dev,
+ "%s doesn't support Multi CPU yet\n", __func__);
+ return -EINVAL;
+ }
+
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
@@ -118,6 +124,12 @@ dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
struct snd_dmaengine_dai_dma_data *dma_data;
struct snd_pcm_hardware hw;
+ if (rtd->num_cpus > 1) {
+ dev_err(rtd->dev,
+ "%s doesn't support Multi CPU yet\n", __func__);
+ return -EINVAL;
+ }
+
if (pcm->config && pcm->config->pcm_hardware)
return snd_soc_set_runtime_hwparams(substream,
pcm->config->pcm_hardware);
@@ -185,6 +197,12 @@ static struct dma_chan *dmaengine_pcm_compat_request_channel(
struct snd_dmaengine_dai_dma_data *dma_data;
dma_filter_fn fn = NULL;
+ if (rtd->num_cpus > 1) {
+ dev_err(rtd->dev,
+ "%s doesn't support Multi CPU yet\n", __func__);
+ return NULL;
+ }
+
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
@@ -237,7 +255,7 @@ static int dmaengine_pcm_new(struct snd_soc_component *component,
max_buffer_size = SIZE_MAX;
}
- for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
+ for_each_pcm_streams(i) {
substream = rtd->pcm->streams[i].substream;
if (!substream)
continue;
@@ -371,8 +389,7 @@ static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
dev = config->dma_dev;
}
- for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
- i++) {
+ for_each_pcm_streams(i) {
if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
name = "rx-tx";
else
@@ -401,8 +418,7 @@ static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
{
unsigned int i;
- for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
- i++) {
+ for_each_pcm_streams(i) {
if (!pcm->chan[i])
continue;
dma_release_channel(pcm->chan[i]);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 2c59b3688ca0..e256d438ee68 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -28,6 +28,180 @@
#define DPCM_MAX_BE_USERS 8
+#ifdef CONFIG_DEBUG_FS
+static const char *dpcm_state_string(enum snd_soc_dpcm_state state)
+{
+ switch (state) {
+ case SND_SOC_DPCM_STATE_NEW:
+ return "new";
+ case SND_SOC_DPCM_STATE_OPEN:
+ return "open";
+ case SND_SOC_DPCM_STATE_HW_PARAMS:
+ return "hw_params";
+ case SND_SOC_DPCM_STATE_PREPARE:
+ return "prepare";
+ case SND_SOC_DPCM_STATE_START:
+ return "start";
+ case SND_SOC_DPCM_STATE_STOP:
+ return "stop";
+ case SND_SOC_DPCM_STATE_SUSPEND:
+ return "suspend";
+ case SND_SOC_DPCM_STATE_PAUSED:
+ return "paused";
+ case SND_SOC_DPCM_STATE_HW_FREE:
+ return "hw_free";
+ case SND_SOC_DPCM_STATE_CLOSE:
+ return "close";
+ }
+
+ return "unknown";
+}
+
+static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
+ int stream, char *buf, size_t size)
+{
+ struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params;
+ struct snd_soc_dpcm *dpcm;
+ ssize_t offset = 0;
+ unsigned long flags;
+
+ /* FE state */
+ offset += scnprintf(buf + offset, size - offset,
+ "[%s - %s]\n", fe->dai_link->name,
+ stream ? "Capture" : "Playback");
+
+ offset += scnprintf(buf + offset, size - offset, "State: %s\n",
+ dpcm_state_string(fe->dpcm[stream].state));
+
+ if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
+ offset += scnprintf(buf + offset, size - offset,
+ "Hardware Params: "
+ "Format = %s, Channels = %d, Rate = %d\n",
+ snd_pcm_format_name(params_format(params)),
+ params_channels(params),
+ params_rate(params));
+
+ /* BEs state */
+ offset += scnprintf(buf + offset, size - offset, "Backends:\n");
+
+ if (list_empty(&fe->dpcm[stream].be_clients)) {
+ offset += scnprintf(buf + offset, size - offset,
+ " No active DSP links\n");
+ goto out;
+ }
+
+ spin_lock_irqsave(&fe->card->dpcm_lock, flags);
+ for_each_dpcm_be(fe, stream, dpcm) {
+ struct snd_soc_pcm_runtime *be = dpcm->be;
+ params = &dpcm->hw_params;
+
+ offset += scnprintf(buf + offset, size - offset,
+ "- %s\n", be->dai_link->name);
+
+ offset += scnprintf(buf + offset, size - offset,
+ " State: %s\n",
+ dpcm_state_string(be->dpcm[stream].state));
+
+ if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+ (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
+ offset += scnprintf(buf + offset, size - offset,
+ " Hardware Params: "
+ "Format = %s, Channels = %d, Rate = %d\n",
+ snd_pcm_format_name(params_format(params)),
+ params_channels(params),
+ params_rate(params));
+ }
+ spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
+out:
+ return offset;
+}
+
+static ssize_t dpcm_state_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct snd_soc_pcm_runtime *fe = file->private_data;
+ ssize_t out_count = PAGE_SIZE, offset = 0, ret = 0;
+ int stream;
+ char *buf;
+
+ if (fe->num_cpus > 1) {
+ dev_err(fe->dev,
+ "%s doesn't support Multi CPU yet\n", __func__);
+ return -EINVAL;
+ }
+
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for_each_pcm_streams(stream)
+ if (snd_soc_dai_stream_valid(fe->cpu_dai, stream))
+ offset += dpcm_show_state(fe, stream,
+ buf + offset,
+ out_count - offset);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
+
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations dpcm_state_fops = {
+ .open = simple_open,
+ .read = dpcm_state_read_file,
+ .llseek = default_llseek,
+};
+
+void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
+{
+ if (!rtd->dai_link)
+ return;
+
+ if (!rtd->dai_link->dynamic)
+ return;
+
+ if (!rtd->card->debugfs_card_root)
+ return;
+
+ rtd->debugfs_dpcm_root = debugfs_create_dir(rtd->dai_link->name,
+ rtd->card->debugfs_card_root);
+
+ debugfs_create_file("state", 0444, rtd->debugfs_dpcm_root,
+ rtd, &dpcm_state_fops);
+}
+
+static void dpcm_create_debugfs_state(struct snd_soc_dpcm *dpcm, int stream)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "%s:%s", dpcm->be->dai_link->name,
+ stream ? "capture" : "playback");
+ if (name) {
+ dpcm->debugfs_state = debugfs_create_dir(
+ name, dpcm->fe->debugfs_dpcm_root);
+ debugfs_create_u32("state", 0644, dpcm->debugfs_state,
+ &dpcm->state);
+ kfree(name);
+ }
+}
+
+static void dpcm_remove_debugfs_state(struct snd_soc_dpcm *dpcm)
+{
+ debugfs_remove_recursive(dpcm->debugfs_state);
+}
+
+#else
+static inline void dpcm_create_debugfs_state(struct snd_soc_dpcm *dpcm,
+ int stream)
+{
+}
+
+static inline void dpcm_remove_debugfs_state(struct snd_soc_dpcm *dpcm)
+{
+}
+#endif
+
static int soc_rtd_startup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
@@ -82,6 +256,21 @@ static int soc_rtd_trigger(struct snd_soc_pcm_runtime *rtd,
return 0;
}
+static void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd,
+ int stream, int action)
+{
+ struct snd_soc_dai *dai;
+ int i;
+
+ lockdep_assert_held(&rtd->card->pcm_mutex);
+
+ for_each_rtd_dais(rtd, i, dai) {
+ dai->stream_active[stream] += action;
+ dai->active += action;
+ dai->component->active += action;
+ }
+}
+
/**
* snd_soc_runtime_activate() - Increment active count for PCM runtime components
* @rtd: ASoC PCM runtime that is activated
@@ -94,29 +283,9 @@ static int soc_rtd_trigger(struct snd_soc_pcm_runtime *rtd,
*/
void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream)
{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
- int i;
-
- lockdep_assert_held(&rtd->card->pcm_mutex);
-
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- cpu_dai->playback_active++;
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- codec_dai->playback_active++;
- } else {
- cpu_dai->capture_active++;
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- codec_dai->capture_active++;
- }
-
- cpu_dai->active++;
- cpu_dai->component->active++;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- codec_dai->active++;
- codec_dai->component->active++;
- }
+ snd_soc_runtime_action(rtd, stream, 1);
}
+EXPORT_SYMBOL_GPL(snd_soc_runtime_activate);
/**
* snd_soc_runtime_deactivate() - Decrement active count for PCM runtime components
@@ -130,29 +299,9 @@ void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream)
*/
void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream)
{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
- int i;
-
- lockdep_assert_held(&rtd->card->pcm_mutex);
-
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- cpu_dai->playback_active--;
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- codec_dai->playback_active--;
- } else {
- cpu_dai->capture_active--;
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- codec_dai->capture_active--;
- }
-
- cpu_dai->active--;
- cpu_dai->component->active--;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- codec_dai->component->active--;
- codec_dai->active--;
- }
+ snd_soc_runtime_action(rtd, stream, -1);
}
+EXPORT_SYMBOL_GPL(snd_soc_runtime_deactivate);
/**
* snd_soc_runtime_ignore_pmdown_time() - Check whether to ignore the power down delay
@@ -287,8 +436,8 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
+ struct snd_soc_dai *cpu_dai;
unsigned int rate, channels, sample_bits, symmetry, i;
rate = params_rate(params);
@@ -296,40 +445,51 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
sample_bits = snd_pcm_format_physical_width(params_format(params));
/* reject unmatched parameters when applying symmetry */
- symmetry = cpu_dai->driver->symmetric_rates ||
- rtd->dai_link->symmetric_rates;
+ symmetry = rtd->dai_link->symmetric_rates;
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- symmetry |= codec_dai->driver->symmetric_rates;
+ for_each_rtd_cpu_dais(rtd, i, dai)
+ symmetry |= dai->driver->symmetric_rates;
- if (symmetry && cpu_dai->rate && cpu_dai->rate != rate) {
- dev_err(rtd->dev, "ASoC: unmatched rate symmetry: %d - %d\n",
- cpu_dai->rate, rate);
- return -EINVAL;
+ if (symmetry) {
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ if (cpu_dai->rate && cpu_dai->rate != rate) {
+ dev_err(rtd->dev, "ASoC: unmatched rate symmetry: %d - %d\n",
+ cpu_dai->rate, rate);
+ return -EINVAL;
+ }
+ }
}
- symmetry = cpu_dai->driver->symmetric_channels ||
- rtd->dai_link->symmetric_channels;
+ symmetry = rtd->dai_link->symmetric_channels;
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- symmetry |= codec_dai->driver->symmetric_channels;
+ for_each_rtd_dais(rtd, i, dai)
+ symmetry |= dai->driver->symmetric_channels;
- if (symmetry && cpu_dai->channels && cpu_dai->channels != channels) {
- dev_err(rtd->dev, "ASoC: unmatched channel symmetry: %d - %d\n",
- cpu_dai->channels, channels);
- return -EINVAL;
+ if (symmetry) {
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ if (cpu_dai->channels &&
+ cpu_dai->channels != channels) {
+ dev_err(rtd->dev, "ASoC: unmatched channel symmetry: %d - %d\n",
+ cpu_dai->channels, channels);
+ return -EINVAL;
+ }
+ }
}
- symmetry = cpu_dai->driver->symmetric_samplebits ||
- rtd->dai_link->symmetric_samplebits;
+ symmetry = rtd->dai_link->symmetric_samplebits;
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- symmetry |= codec_dai->driver->symmetric_samplebits;
+ for_each_rtd_dais(rtd, i, dai)
+ symmetry |= dai->driver->symmetric_samplebits;
- if (symmetry && cpu_dai->sample_bits && cpu_dai->sample_bits != sample_bits) {
- dev_err(rtd->dev, "ASoC: unmatched sample bits symmetry: %d - %d\n",
- cpu_dai->sample_bits, sample_bits);
- return -EINVAL;
+ if (symmetry) {
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ if (cpu_dai->sample_bits &&
+ cpu_dai->sample_bits != sample_bits) {
+ dev_err(rtd->dev, "ASoC: unmatched sample bits symmetry: %d - %d\n",
+ cpu_dai->sample_bits, sample_bits);
+ return -EINVAL;
+ }
+ }
}
return 0;
@@ -338,20 +498,19 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
static bool soc_pcm_has_symmetry(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai_driver *cpu_driver = rtd->cpu_dai->driver;
struct snd_soc_dai_link *link = rtd->dai_link;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
unsigned int symmetry, i;
- symmetry = cpu_driver->symmetric_rates || link->symmetric_rates ||
- cpu_driver->symmetric_channels || link->symmetric_channels ||
- cpu_driver->symmetric_samplebits || link->symmetric_samplebits;
+ symmetry = link->symmetric_rates ||
+ link->symmetric_channels ||
+ link->symmetric_samplebits;
- for_each_rtd_codec_dai(rtd, i, codec_dai)
+ for_each_rtd_dais(rtd, i, dai)
symmetry = symmetry ||
- codec_dai->driver->symmetric_rates ||
- codec_dai->driver->symmetric_channels ||
- codec_dai->driver->symmetric_samplebits;
+ dai->driver->symmetric_rates ||
+ dai->driver->symmetric_channels ||
+ dai->driver->symmetric_samplebits;
return symmetry;
}
@@ -373,77 +532,98 @@ static void soc_pcm_set_msb(struct snd_pcm_substream *substream, int bits)
static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
+ struct snd_soc_pcm_stream *pcm_codec, *pcm_cpu;
+ int stream = substream->stream;
int i;
- unsigned int bits = 0, cpu_bits;
+ unsigned int bits = 0, cpu_bits = 0;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- if (codec_dai->driver->playback.sig_bits == 0) {
- bits = 0;
- break;
- }
- bits = max(codec_dai->driver->playback.sig_bits, bits);
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ pcm_codec = snd_soc_dai_get_pcm_stream(codec_dai, stream);
+
+ if (pcm_codec->sig_bits == 0) {
+ bits = 0;
+ break;
}
- cpu_bits = cpu_dai->driver->playback.sig_bits;
- } else {
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- if (codec_dai->driver->capture.sig_bits == 0) {
- bits = 0;
- break;
- }
- bits = max(codec_dai->driver->capture.sig_bits, bits);
+ bits = max(pcm_codec->sig_bits, bits);
+ }
+
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ pcm_cpu = snd_soc_dai_get_pcm_stream(cpu_dai, stream);
+
+ if (pcm_cpu->sig_bits == 0) {
+ cpu_bits = 0;
+ break;
}
- cpu_bits = cpu_dai->driver->capture.sig_bits;
+ cpu_bits = max(pcm_cpu->sig_bits, cpu_bits);
}
soc_pcm_set_msb(substream, bits);
soc_pcm_set_msb(substream, cpu_bits);
}
-static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
+/**
+ * snd_soc_runtime_calc_hw() - Calculate hw limits for a PCM stream
+ * @rtd: ASoC PCM runtime
+ * @hw: PCM hardware parameters (output)
+ * @stream: Direction of the PCM stream
+ *
+ * Calculates the subset of stream parameters supported by all DAIs
+ * associated with the PCM stream.
+ */
+int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hardware *hw, int stream)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_pcm_hardware *hw = &runtime->hw;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai;
- struct snd_soc_dai_driver *cpu_dai_drv = rtd->cpu_dai->driver;
- struct snd_soc_dai_driver *codec_dai_drv;
+ struct snd_soc_dai *cpu_dai;
struct snd_soc_pcm_stream *codec_stream;
struct snd_soc_pcm_stream *cpu_stream;
unsigned int chan_min = 0, chan_max = UINT_MAX;
+ unsigned int cpu_chan_min = 0, cpu_chan_max = UINT_MAX;
unsigned int rate_min = 0, rate_max = UINT_MAX;
- unsigned int rates = UINT_MAX;
+ unsigned int cpu_rate_min = 0, cpu_rate_max = UINT_MAX;
+ unsigned int rates = UINT_MAX, cpu_rates = UINT_MAX;
u64 formats = ULLONG_MAX;
int i;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- cpu_stream = &cpu_dai_drv->playback;
- else
- cpu_stream = &cpu_dai_drv->capture;
+ /* first calculate min/max only for CPUs in the DAI link */
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
- /* first calculate min/max only for CODECs in the DAI link */
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ /*
+ * Skip CPUs which don't support the current stream type.
+ * Otherwise, since the rate, channel, and format values will
+ * zero in that case, we would have no usable settings left,
+ * causing the resulting setup to fail.
+ */
+ if (!snd_soc_dai_stream_valid(cpu_dai, stream))
+ continue;
+
+ cpu_stream = snd_soc_dai_get_pcm_stream(cpu_dai, stream);
+
+ cpu_chan_min = max(cpu_chan_min, cpu_stream->channels_min);
+ cpu_chan_max = min(cpu_chan_max, cpu_stream->channels_max);
+ cpu_rate_min = max(cpu_rate_min, cpu_stream->rate_min);
+ cpu_rate_max = min_not_zero(cpu_rate_max, cpu_stream->rate_max);
+ formats &= cpu_stream->formats;
+ cpu_rates = snd_pcm_rate_mask_intersect(cpu_stream->rates,
+ cpu_rates);
+ }
+
+ /* second calculate min/max only for CODECs in the DAI link */
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
/*
* Skip CODECs which don't support the current stream type.
* Otherwise, since the rate, channel, and format values will
* zero in that case, we would have no usable settings left,
* causing the resulting setup to fail.
- * At least one CODEC should match, otherwise we should have
- * bailed out on a higher level, since there would be no
- * CODEC to support the transfer direction in that case.
*/
- if (!snd_soc_dai_stream_valid(codec_dai,
- substream->stream))
+ if (!snd_soc_dai_stream_valid(codec_dai, stream))
continue;
- codec_dai_drv = codec_dai->driver;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- codec_stream = &codec_dai_drv->playback;
- else
- codec_stream = &codec_dai_drv->capture;
+ codec_stream = snd_soc_dai_get_pcm_stream(codec_dai, stream);
+
chan_min = max(chan_min, codec_stream->channels_min);
chan_max = min(chan_max, codec_stream->channels_max);
rate_min = max(rate_min, codec_stream->rate_min);
@@ -452,74 +632,107 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
rates = snd_pcm_rate_mask_intersect(codec_stream->rates, rates);
}
+ /* Verify both a valid CPU DAI and a valid CODEC DAI were found */
+ if (!chan_min || !cpu_chan_min)
+ return -EINVAL;
+
/*
* chan min/max cannot be enforced if there are multiple CODEC DAIs
- * connected to a single CPU DAI, use CPU DAI's directly and let
+ * connected to CPU DAI(s), use CPU DAI's directly and let
* channel allocation be fixed up later
*/
if (rtd->num_codecs > 1) {
- chan_min = cpu_stream->channels_min;
- chan_max = cpu_stream->channels_max;
+ chan_min = cpu_chan_min;
+ chan_max = cpu_chan_max;
}
- hw->channels_min = max(chan_min, cpu_stream->channels_min);
- hw->channels_max = min(chan_max, cpu_stream->channels_max);
- if (hw->formats)
- hw->formats &= formats & cpu_stream->formats;
- else
- hw->formats = formats & cpu_stream->formats;
- hw->rates = snd_pcm_rate_mask_intersect(rates, cpu_stream->rates);
+ /* finally find a intersection between CODECs and CPUs */
+ hw->channels_min = max(chan_min, cpu_chan_min);
+ hw->channels_max = min(chan_max, cpu_chan_max);
+ hw->formats = formats;
+ hw->rates = snd_pcm_rate_mask_intersect(rates, cpu_rates);
- snd_pcm_limit_hw_rates(runtime);
+ snd_pcm_hw_limit_rates(hw);
- hw->rate_min = max(hw->rate_min, cpu_stream->rate_min);
+ hw->rate_min = max(hw->rate_min, cpu_rate_min);
hw->rate_min = max(hw->rate_min, rate_min);
- hw->rate_max = min_not_zero(hw->rate_max, cpu_stream->rate_max);
+ hw->rate_max = min_not_zero(hw->rate_max, cpu_rate_max);
hw->rate_max = min_not_zero(hw->rate_max, rate_max);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_runtime_calc_hw);
-static int soc_pcm_components_open(struct snd_pcm_substream *substream,
- struct snd_soc_component **last)
+static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
{
+ struct snd_pcm_hardware *hw = &substream->runtime->hw;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ u64 formats = hw->formats;
+
+ /*
+ * At least one CPU and one CODEC should match. Otherwise, we should
+ * have bailed out on a higher level, since there would be no CPU or
+ * CODEC to support the transfer direction in that case.
+ */
+ snd_soc_runtime_calc_hw(rtd, hw, substream->stream);
+
+ if (formats)
+ hw->formats &= formats;
+}
+
+static int soc_pcm_components_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *last = NULL;
struct snd_soc_component *component;
int i, ret = 0;
for_each_rtd_components(rtd, i, component) {
- *last = component;
+ last = component;
ret = snd_soc_component_module_get_when_open(component);
if (ret < 0) {
dev_err(component->dev,
"ASoC: can't get module %s\n",
component->name);
- return ret;
+ break;
}
ret = snd_soc_component_open(component, substream);
if (ret < 0) {
+ snd_soc_component_module_put_when_close(component);
dev_err(component->dev,
"ASoC: can't open component %s: %d\n",
component->name, ret);
- return ret;
+ break;
}
}
- *last = NULL;
- return 0;
+
+ if (ret < 0) {
+ /* rollback on error */
+ for_each_rtd_components(rtd, i, component) {
+ if (component == last)
+ break;
+
+ snd_soc_component_close(component, substream);
+ snd_soc_component_module_put_when_close(component);
+ }
+ }
+
+ return ret;
}
-static int soc_pcm_components_close(struct snd_pcm_substream *substream,
- struct snd_soc_component *last)
+static int soc_pcm_components_close(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
- int i, ret = 0;
+ int i, r, ret = 0;
for_each_rtd_components(rtd, i, component) {
- if (component == last)
- break;
+ r = snd_soc_component_close(component, substream);
+ if (r < 0)
+ ret = r; /* use last ret */
- ret |= snd_soc_component_close(component, substream);
snd_soc_component_module_put_when_close(component);
}
@@ -527,6 +740,45 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream,
}
/*
+ * Called by ALSA when a PCM substream is closed. Private data can be
+ * freed here. The cpu DAI, codec DAI, machine and components are also
+ * shutdown.
+ */
+static int soc_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component;
+ struct snd_soc_dai *dai;
+ int i;
+
+ mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
+
+ snd_soc_runtime_deactivate(rtd, substream->stream);
+
+ for_each_rtd_dais(rtd, i, dai)
+ snd_soc_dai_shutdown(dai, substream);
+
+ soc_rtd_shutdown(rtd, substream);
+
+ soc_pcm_components_close(substream);
+
+ snd_soc_dapm_stream_stop(rtd, substream->stream);
+
+ mutex_unlock(&rtd->card->pcm_mutex);
+
+ for_each_rtd_components(rtd, i, component) {
+ pm_runtime_mark_last_busy(component->dev);
+ pm_runtime_put_autosuspend(component->dev);
+ }
+
+ for_each_rtd_components(rtd, i, component)
+ if (!component->active)
+ pinctrl_pm_select_sleep_state(component->dev);
+
+ return 0;
+}
+
+/*
* Called by ALSA when a PCM substream is opened, the runtime->hw record is
* then initialized and any private data can be allocated. This also calls
* startup for the cpu DAI, component, machine and codec DAI.
@@ -536,9 +788,9 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
const char *codec_dai_name = "multicodec";
+ const char *cpu_dai_name = "multicpu";
int i, ret = 0;
for_each_rtd_components(rtd, i, component)
@@ -549,38 +801,31 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- /* startup the audio subsystem */
- ret = snd_soc_dai_startup(cpu_dai, substream);
- if (ret < 0) {
- dev_err(cpu_dai->dev, "ASoC: can't open interface %s: %d\n",
- cpu_dai->name, ret);
- goto out;
- }
-
- ret = soc_pcm_components_open(substream, &component);
+ ret = soc_pcm_components_open(substream);
if (ret < 0)
goto component_err;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- ret = snd_soc_dai_startup(codec_dai, substream);
+ ret = soc_rtd_startup(rtd, substream);
+ if (ret < 0) {
+ pr_err("ASoC: %s startup failed: %d\n",
+ rtd->dai_link->name, ret);
+ goto rtd_startup_err;
+ }
+
+ /* startup the audio subsystem */
+ for_each_rtd_dais(rtd, i, dai) {
+ ret = snd_soc_dai_startup(dai, substream);
if (ret < 0) {
- dev_err(codec_dai->dev,
- "ASoC: can't open codec %s: %d\n",
- codec_dai->name, ret);
- goto codec_dai_err;
+ dev_err(dai->dev,
+ "ASoC: can't open DAI %s: %d\n",
+ dai->name, ret);
+ goto config_err;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- codec_dai->tx_mask = 0;
+ dai->tx_mask = 0;
else
- codec_dai->rx_mask = 0;
- }
-
- ret = soc_rtd_startup(rtd, substream);
- if (ret < 0) {
- pr_err("ASoC: %s startup failed: %d\n",
- rtd->dai_link->name, ret);
- goto machine_err;
+ dai->rx_mask = 0;
}
/* Dynamic PCM DAI links compat checks use dynamic capabilities */
@@ -593,46 +838,43 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
if (rtd->num_codecs == 1)
codec_dai_name = rtd->codec_dai->name;
+ if (rtd->num_cpus == 1)
+ cpu_dai_name = rtd->cpu_dai->name;
+
if (soc_pcm_has_symmetry(substream))
runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX;
ret = -EINVAL;
if (!runtime->hw.rates) {
printk(KERN_ERR "ASoC: %s <-> %s No matching rates\n",
- codec_dai_name, cpu_dai->name);
+ codec_dai_name, cpu_dai_name);
goto config_err;
}
if (!runtime->hw.formats) {
printk(KERN_ERR "ASoC: %s <-> %s No matching formats\n",
- codec_dai_name, cpu_dai->name);
+ codec_dai_name, cpu_dai_name);
goto config_err;
}
if (!runtime->hw.channels_min || !runtime->hw.channels_max ||
runtime->hw.channels_min > runtime->hw.channels_max) {
printk(KERN_ERR "ASoC: %s <-> %s No matching channels\n",
- codec_dai_name, cpu_dai->name);
+ codec_dai_name, cpu_dai_name);
goto config_err;
}
soc_pcm_apply_msb(substream);
/* Symmetry only applies if we've already got an active stream. */
- if (cpu_dai->active) {
- ret = soc_pcm_apply_symmetry(substream, cpu_dai);
- if (ret != 0)
- goto config_err;
- }
-
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- if (codec_dai->active) {
- ret = soc_pcm_apply_symmetry(substream, codec_dai);
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->active) {
+ ret = soc_pcm_apply_symmetry(substream, dai);
if (ret != 0)
goto config_err;
}
}
pr_debug("ASoC: %s <-> %s info:\n",
- codec_dai_name, cpu_dai->name);
+ codec_dai_name, cpu_dai_name);
pr_debug("ASoC: rate mask 0x%x\n", runtime->hw.rates);
pr_debug("ASoC: min ch %d max ch %d\n", runtime->hw.channels_min,
runtime->hw.channels_max);
@@ -647,20 +889,13 @@ dynamic:
return 0;
config_err:
- soc_rtd_shutdown(rtd, substream);
-
-machine_err:
- i = rtd->num_codecs;
-
-codec_dai_err:
- for_each_rtd_codec_dai_rollback(rtd, i, codec_dai)
- snd_soc_dai_shutdown(codec_dai, substream);
+ for_each_rtd_dais(rtd, i, dai)
+ snd_soc_dai_shutdown(dai, substream);
+ soc_rtd_shutdown(rtd, substream);
+rtd_startup_err:
+ soc_pcm_components_close(substream);
component_err:
- soc_pcm_components_close(substream, component);
-
- snd_soc_dai_shutdown(cpu_dai, substream);
-out:
mutex_unlock(&rtd->card->pcm_mutex);
for_each_rtd_components(rtd, i, component) {
@@ -686,59 +921,6 @@ static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd)
}
/*
- * Called by ALSA when a PCM substream is closed. Private data can be
- * freed here. The cpu DAI, codec DAI, machine and components are also
- * shutdown.
- */
-static int soc_pcm_close(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
- int i;
-
- mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
-
- snd_soc_runtime_deactivate(rtd, substream->stream);
-
- /* clear the corresponding DAIs rate when inactive */
- if (!cpu_dai->active)
- cpu_dai->rate = 0;
-
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- if (!codec_dai->active)
- codec_dai->rate = 0;
- }
-
- snd_soc_dai_digital_mute(cpu_dai, 1, substream->stream);
-
- snd_soc_dai_shutdown(cpu_dai, substream);
-
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- snd_soc_dai_shutdown(codec_dai, substream);
-
- soc_rtd_shutdown(rtd, substream);
-
- soc_pcm_components_close(substream, NULL);
-
- snd_soc_dapm_stream_stop(rtd, substream->stream);
-
- mutex_unlock(&rtd->card->pcm_mutex);
-
- for_each_rtd_components(rtd, i, component) {
- pm_runtime_mark_last_busy(component->dev);
- pm_runtime_put_autosuspend(component->dev);
- }
-
- for_each_rtd_components(rtd, i, component)
- if (!component->active)
- pinctrl_pm_select_sleep_state(component->dev);
-
- return 0;
-}
-
-/*
* Called by ALSA when the PCM substream is prepared, can set format, sample
* rate, etc. This function is non atomic and can be called multiple times,
* it can refer to the runtime info.
@@ -747,8 +929,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
int i, ret = 0;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
@@ -769,23 +950,15 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- ret = snd_soc_dai_prepare(codec_dai, substream);
+ for_each_rtd_dais(rtd, i, dai) {
+ ret = snd_soc_dai_prepare(dai, substream);
if (ret < 0) {
- dev_err(codec_dai->dev,
- "ASoC: codec DAI prepare error: %d\n",
- ret);
+ dev_err(dai->dev,
+ "ASoC: DAI prepare error: %d\n", ret);
goto out;
}
}
- ret = snd_soc_dai_prepare(cpu_dai, substream);
- if (ret < 0) {
- dev_err(cpu_dai->dev,
- "ASoC: cpu DAI prepare error: %d\n", ret);
- goto out;
- }
-
/* cancel any delayed stream shutdown that is pending */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
rtd->pop_wait) {
@@ -796,10 +969,8 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
snd_soc_dapm_stream_event(rtd, substream->stream,
SND_SOC_DAPM_STREAM_START);
- for_each_rtd_codec_dai(rtd, i, codec_dai)
- snd_soc_dai_digital_mute(codec_dai, 0,
- substream->stream);
- snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
+ for_each_rtd_dais(rtd, i, dai)
+ snd_soc_dai_digital_mute(dai, 0, substream->stream);
out:
mutex_unlock(&rtd->card->pcm_mutex);
@@ -822,13 +993,15 @@ static int soc_pcm_components_hw_free(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
- int i, ret = 0;
+ int i, r, ret = 0;
for_each_rtd_components(rtd, i, component) {
if (component == last)
break;
- ret |= snd_soc_component_hw_free(component, substream);
+ r = snd_soc_component_hw_free(component, substream);
+ if (r < 0)
+ ret = r; /* use last ret */
}
return ret;
@@ -844,7 +1017,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
int i, ret = 0;
@@ -861,7 +1034,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
goto out;
}
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
struct snd_pcm_hw_params codec_params;
/*
@@ -908,17 +1081,26 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
snd_soc_dapm_update_dai(substream, &codec_params, codec_dai);
}
- ret = snd_soc_dai_hw_params(cpu_dai, substream, params);
- if (ret < 0)
- goto interface_err;
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ /*
+ * Skip CPUs which don't support the current stream
+ * type. See soc_pcm_init_runtime_hw() for more details
+ */
+ if (!snd_soc_dai_stream_valid(cpu_dai, substream->stream))
+ continue;
- /* store the parameters for each DAIs */
- cpu_dai->rate = params_rate(params);
- cpu_dai->channels = params_channels(params);
- cpu_dai->sample_bits =
- snd_pcm_format_physical_width(params_format(params));
+ ret = snd_soc_dai_hw_params(cpu_dai, substream, params);
+ if (ret < 0)
+ goto interface_err;
+
+ /* store the parameters for each DAI */
+ cpu_dai->rate = params_rate(params);
+ cpu_dai->channels = params_channels(params);
+ cpu_dai->sample_bits =
+ snd_pcm_format_physical_width(params_format(params));
- snd_soc_dapm_update_dai(substream, params, cpu_dai);
+ snd_soc_dapm_update_dai(substream, params, cpu_dai);
+ }
for_each_rtd_components(rtd, i, component) {
ret = snd_soc_component_hw_params(component, substream, params);
@@ -938,14 +1120,21 @@ out:
component_err:
soc_pcm_components_hw_free(substream, component);
- snd_soc_dai_hw_free(cpu_dai, substream);
- cpu_dai->rate = 0;
+ i = rtd->num_cpus;
interface_err:
+ for_each_rtd_cpu_dais_rollback(rtd, i, cpu_dai) {
+ if (!snd_soc_dai_stream_valid(cpu_dai, substream->stream))
+ continue;
+
+ snd_soc_dai_hw_free(cpu_dai, substream);
+ cpu_dai->rate = 0;
+ }
+
i = rtd->num_codecs;
codec_err:
- for_each_rtd_codec_dai_rollback(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais_rollback(rtd, i, codec_dai) {
if (!snd_soc_dai_stream_valid(codec_dai, substream->stream))
continue;
@@ -965,34 +1154,23 @@ codec_err:
static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
- bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct snd_soc_dai *dai;
int i;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
/* clear the corresponding DAIs parameters when going to be inactive */
- if (cpu_dai->active == 1) {
- cpu_dai->rate = 0;
- cpu_dai->channels = 0;
- cpu_dai->sample_bits = 0;
- }
+ for_each_rtd_dais(rtd, i, dai) {
+ int active = dai->stream_active[substream->stream];
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- if (codec_dai->active == 1) {
- codec_dai->rate = 0;
- codec_dai->channels = 0;
- codec_dai->sample_bits = 0;
+ if (dai->active == 1) {
+ dai->rate = 0;
+ dai->channels = 0;
+ dai->sample_bits = 0;
}
- }
- /* apply codec digital mute */
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- if ((playback && codec_dai->playback_active == 1) ||
- (!playback && codec_dai->capture_active == 1))
- snd_soc_dai_digital_mute(codec_dai, 1,
- substream->stream);
+ if (active == 1)
+ snd_soc_dai_digital_mute(dai, 1, substream->stream);
}
/* free any machine hw params */
@@ -1002,15 +1180,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
soc_pcm_components_hw_free(substream, NULL);
/* now free hw params for the DAIs */
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- if (!snd_soc_dai_stream_valid(codec_dai, substream->stream))
+ for_each_rtd_dais(rtd, i, dai) {
+ if (!snd_soc_dai_stream_valid(dai, substream->stream))
continue;
- snd_soc_dai_hw_free(codec_dai, substream);
+ snd_soc_dai_hw_free(dai, substream);
}
- snd_soc_dai_hw_free(cpu_dai, substream);
-
mutex_unlock(&rtd->card->pcm_mutex);
return 0;
}
@@ -1019,8 +1195,7 @@ static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
int i, ret;
ret = soc_rtd_trigger(rtd, substream, cmd);
@@ -1033,12 +1208,8 @@ static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd)
return ret;
}
- ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
- if (ret < 0)
- return ret;
-
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- ret = snd_soc_dai_trigger(codec_dai, substream, cmd);
+ for_each_rtd_dais(rtd, i, dai) {
+ ret = snd_soc_dai_trigger(dai, substream, cmd);
if (ret < 0)
return ret;
}
@@ -1050,20 +1221,15 @@ static int soc_pcm_trigger_stop(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
int i, ret;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- ret = snd_soc_dai_trigger(codec_dai, substream, cmd);
+ for_each_rtd_dais(rtd, i, dai) {
+ ret = snd_soc_dai_trigger(dai, substream, cmd);
if (ret < 0)
return ret;
}
- ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
- if (ret < 0)
- return ret;
-
for_each_rtd_components(rtd, i, component) {
ret = snd_soc_component_trigger(component, substream, cmd);
if (ret < 0)
@@ -1103,20 +1269,15 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
int i, ret;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- ret = snd_soc_dai_bespoke_trigger(codec_dai, substream, cmd);
+ for_each_rtd_dais(rtd, i, dai) {
+ ret = snd_soc_dai_bespoke_trigger(dai, substream, cmd);
if (ret < 0)
return ret;
}
- ret = snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
- if (ret < 0)
- return ret;
-
return 0;
}
/*
@@ -1127,12 +1288,13 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t offset = 0;
snd_pcm_sframes_t delay = 0;
snd_pcm_sframes_t codec_delay = 0;
+ snd_pcm_sframes_t cpu_delay = 0;
int i;
/* clearing the previous total delay */
@@ -1143,9 +1305,13 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
/* base delay if assigned in pointer callback */
delay = runtime->delay;
- delay += snd_soc_dai_delay(cpu_dai, substream);
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ cpu_delay = max(cpu_delay,
+ snd_soc_dai_delay(cpu_dai, substream));
+ }
+ delay += cpu_delay;
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
codec_delay = max(codec_delay,
snd_soc_dai_delay(codec_dai, substream));
}
@@ -1162,9 +1328,6 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
{
struct snd_soc_dpcm *dpcm;
unsigned long flags;
-#ifdef CONFIG_DEBUG_FS
- char *name;
-#endif
/* only add new dpcms */
for_each_dpcm_be(fe, stream, dpcm) {
@@ -1189,17 +1352,8 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
stream ? "capture" : "playback", fe->dai_link->name,
stream ? "<-" : "->", be->dai_link->name);
-#ifdef CONFIG_DEBUG_FS
- name = kasprintf(GFP_KERNEL, "%s:%s", be->dai_link->name,
- stream ? "capture" : "playback");
- if (name) {
- dpcm->debugfs_state = debugfs_create_dir(name,
- fe->debugfs_dpcm_root);
- debugfs_create_u32("state", 0644, dpcm->debugfs_state,
- &dpcm->state);
- kfree(name);
- }
-#endif
+ dpcm_create_debugfs_state(dpcm, stream);
+
return 1;
}
@@ -1252,9 +1406,8 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
/* BEs still alive need new FE */
dpcm_be_reparent(fe, dpcm->be, stream);
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(dpcm->debugfs_state);
-#endif
+ dpcm_remove_debugfs_state(dpcm);
+
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
list_del(&dpcm->list_be);
list_del(&dpcm->list_fe);
@@ -1268,74 +1421,41 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
struct snd_soc_dapm_widget *widget, int stream)
{
struct snd_soc_pcm_runtime *be;
+ struct snd_soc_dapm_widget *w;
struct snd_soc_dai *dai;
int i;
dev_dbg(card->dev, "ASoC: find BE for widget %s\n", widget->name);
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- for_each_card_rtds(card, be) {
-
- if (!be->dai_link->no_pcm)
- continue;
-
- dev_dbg(card->dev, "ASoC: try BE : %s\n",
- be->cpu_dai->playback_widget ?
- be->cpu_dai->playback_widget->name : "(not set)");
+ for_each_card_rtds(card, be) {
- if (be->cpu_dai->playback_widget == widget)
- return be;
-
- for_each_rtd_codec_dai(be, i, dai) {
- if (dai->playback_widget == widget)
- return be;
- }
- }
- } else {
-
- for_each_card_rtds(card, be) {
+ if (!be->dai_link->no_pcm)
+ continue;
- if (!be->dai_link->no_pcm)
- continue;
+ for_each_rtd_dais(be, i, dai) {
+ w = snd_soc_dai_get_widget(dai, stream);
- dev_dbg(card->dev, "ASoC: try BE %s\n",
- be->cpu_dai->capture_widget ?
- be->cpu_dai->capture_widget->name : "(not set)");
+ dev_dbg(card->dev, "ASoC: try BE : %s\n",
+ w ? w->name : "(not set)");
- if (be->cpu_dai->capture_widget == widget)
+ if (w == widget)
return be;
-
- for_each_rtd_codec_dai(be, i, dai) {
- if (dai->capture_widget == widget)
- return be;
- }
}
}
- /* dai link name and stream name set correctly ? */
- dev_err(card->dev, "ASoC: can't get %s BE for %s\n",
- stream ? "capture" : "playback", widget->name);
+ /* Widget provided is not a BE */
return NULL;
}
-static inline struct snd_soc_dapm_widget *
- dai_get_widget(struct snd_soc_dai *dai, int stream)
-{
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- return dai->playback_widget;
- else
- return dai->capture_widget;
-}
-
static int widget_in_list(struct snd_soc_dapm_widget_list *list,
struct snd_soc_dapm_widget *widget)
{
+ struct snd_soc_dapm_widget *w;
int i;
- for (i = 0; i < list->num_widgets; i++) {
- if (widget == list->widgets[i])
+ for_each_dapm_widgets(list, i, w)
+ if (widget == w)
return 1;
- }
return 0;
}
@@ -1345,36 +1465,17 @@ static bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget,
{
struct snd_soc_card *card = widget->dapm->card;
struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_dai *dai;
- int i;
+ int stream;
- if (dir == SND_SOC_DAPM_DIR_OUT) {
- for_each_card_rtds(card, rtd) {
- if (!rtd->dai_link->no_pcm)
- continue;
-
- if (rtd->cpu_dai->playback_widget == widget)
- return true;
-
- for_each_rtd_codec_dai(rtd, i, dai) {
- if (dai->playback_widget == widget)
- return true;
- }
- }
- } else { /* SND_SOC_DAPM_DIR_IN */
- for_each_card_rtds(card, rtd) {
- if (!rtd->dai_link->no_pcm)
- continue;
-
- if (rtd->cpu_dai->capture_widget == widget)
- return true;
+ /* adjust dir to stream */
+ if (dir == SND_SOC_DAPM_DIR_OUT)
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ else
+ stream = SNDRV_PCM_STREAM_CAPTURE;
- for_each_rtd_codec_dai(rtd, i, dai) {
- if (dai->capture_widget == widget)
- return true;
- }
- }
- }
+ rtd = dpcm_get_be(card, widget, stream);
+ if (rtd)
+ return true;
return false;
}
@@ -1385,6 +1486,12 @@ int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
int paths;
+ if (fe->num_cpus > 1) {
+ dev_err(fe->dev,
+ "%s doesn't support Multi CPU yet\n", __func__);
+ return -EINVAL;
+ }
+
/* get number of valid DAI paths and their widgets */
paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list,
dpcm_end_walk_at_be);
@@ -1395,37 +1502,42 @@ int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
return paths;
}
-static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
- struct snd_soc_dapm_widget_list **list_)
+void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
+{
+ snd_soc_dapm_dai_free_widgets(list);
+}
+
+static bool dpcm_be_is_active(struct snd_soc_dpcm *dpcm, int stream,
+ struct snd_soc_dapm_widget_list *list)
{
- struct snd_soc_dpcm *dpcm;
- struct snd_soc_dapm_widget_list *list = *list_;
struct snd_soc_dapm_widget *widget;
struct snd_soc_dai *dai;
- int prune = 0;
- int do_prune;
-
- /* Destroy any old FE <--> BE connections */
- for_each_dpcm_be(fe, stream, dpcm) {
- unsigned int i;
+ unsigned int i;
- /* is there a valid CPU DAI widget for this BE */
- widget = dai_get_widget(dpcm->be->cpu_dai, stream);
+ /* is there a valid DAI widget for this BE */
+ for_each_rtd_dais(dpcm->be, i, dai) {
+ widget = snd_soc_dai_get_widget(dai, stream);
- /* prune the BE if it's no longer in our active list */
+ /*
+ * The BE is pruned only if none of the dai
+ * widgets are in the active list.
+ */
if (widget && widget_in_list(list, widget))
- continue;
+ return true;
+ }
- /* is there a valid CODEC DAI widget for this BE */
- do_prune = 1;
- for_each_rtd_codec_dai(dpcm->be, i, dai) {
- widget = dai_get_widget(dai, stream);
+ return false;
+}
- /* prune the BE if it's no longer in our active list */
- if (widget && widget_in_list(list, widget))
- do_prune = 0;
- }
- if (!do_prune)
+static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
+ struct snd_soc_dapm_widget_list **list_)
+{
+ struct snd_soc_dpcm *dpcm;
+ int prune = 0;
+
+ /* Destroy any old FE <--> BE connections */
+ for_each_dpcm_be(fe, stream, dpcm) {
+ if (dpcm_be_is_active(dpcm, stream, *list_))
continue;
dev_dbg(fe->dev, "ASoC: pruning %s BE %s for %s\n",
@@ -1446,12 +1558,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_card *card = fe->card;
struct snd_soc_dapm_widget_list *list = *list_;
struct snd_soc_pcm_runtime *be;
+ struct snd_soc_dapm_widget *widget;
int i, new = 0, err;
/* Create any new FE <--> BE connections */
- for (i = 0; i < list->num_widgets; i++) {
+ for_each_dapm_widgets(list, i, widget) {
- switch (list->widgets[i]->id) {
+ switch (widget->id) {
case snd_soc_dapm_dai_in:
if (stream != SNDRV_PCM_STREAM_PLAYBACK)
continue;
@@ -1465,17 +1578,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
}
/* is there a valid BE rtd for this widget */
- be = dpcm_get_be(card, list->widgets[i], stream);
+ be = dpcm_get_be(card, widget, stream);
if (!be) {
dev_err(fe->dev, "ASoC: no BE found for %s\n",
- list->widgets[i]->name);
+ widget->name);
continue;
}
- /* make sure BE is a real BE */
- if (!be->dai_link->no_pcm)
- continue;
-
/* don't connect if FE is not running */
if (!fe->dpcm[stream].runtime && !fe->fe_compr)
continue;
@@ -1484,7 +1593,7 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
err = dpcm_be_connect(fe, be, stream);
if (err < 0) {
dev_err(fe->dev, "ASoC: can't connect %s\n",
- list->widgets[i]->name);
+ widget->name);
break;
} else if (err == 0) /* already connected */
continue;
@@ -1671,11 +1780,10 @@ static void dpcm_runtime_merge_format(struct snd_pcm_substream *substream,
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
- struct snd_soc_dai_driver *codec_dai_drv;
struct snd_soc_pcm_stream *codec_stream;
int i;
- for_each_rtd_codec_dai(be, i, dai) {
+ for_each_rtd_codec_dais(be, i, dai) {
/*
* Skip CODECs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
@@ -1683,11 +1791,7 @@ static void dpcm_runtime_merge_format(struct snd_pcm_substream *substream,
if (!snd_soc_dai_stream_valid(dai, stream))
continue;
- codec_dai_drv = dai->driver;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- codec_stream = &codec_dai_drv->playback;
- else
- codec_stream = &codec_dai_drv->capture;
+ codec_stream = snd_soc_dai_get_pcm_stream(dai, stream);
*formats &= codec_stream->formats;
}
@@ -1712,30 +1816,33 @@ static void dpcm_runtime_merge_chan(struct snd_pcm_substream *substream,
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
- struct snd_soc_dai_driver *cpu_dai_drv = be->cpu_dai->driver;
- struct snd_soc_dai_driver *codec_dai_drv;
struct snd_soc_pcm_stream *codec_stream;
struct snd_soc_pcm_stream *cpu_stream;
+ struct snd_soc_dai *dai;
+ int i;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- cpu_stream = &cpu_dai_drv->playback;
- else
- cpu_stream = &cpu_dai_drv->capture;
+ for_each_rtd_cpu_dais(be, i, dai) {
+ /*
+ * Skip CPUs which don't support the current stream
+ * type. See soc_pcm_init_runtime_hw() for more details
+ */
+ if (!snd_soc_dai_stream_valid(dai, stream))
+ continue;
+
+ cpu_stream = snd_soc_dai_get_pcm_stream(dai, stream);
- *channels_min = max(*channels_min, cpu_stream->channels_min);
- *channels_max = min(*channels_max, cpu_stream->channels_max);
+ *channels_min = max(*channels_min,
+ cpu_stream->channels_min);
+ *channels_max = min(*channels_max,
+ cpu_stream->channels_max);
+ }
/*
* chan min/max cannot be enforced if there are multiple CODEC
* DAIs connected to a single CPU DAI, use CPU DAI's directly
*/
if (be->num_codecs == 1) {
- codec_dai_drv = be->codec_dais[0]->driver;
-
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- codec_stream = &codec_dai_drv->playback;
- else
- codec_stream = &codec_dai_drv->capture;
+ codec_stream = snd_soc_dai_get_pcm_stream(be->codec_dais[0], stream);
*channels_min = max(*channels_min,
codec_stream->channels_min);
@@ -1764,41 +1871,23 @@ static void dpcm_runtime_merge_rate(struct snd_pcm_substream *substream,
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
- struct snd_soc_dai_driver *cpu_dai_drv = be->cpu_dai->driver;
- struct snd_soc_dai_driver *codec_dai_drv;
- struct snd_soc_pcm_stream *codec_stream;
- struct snd_soc_pcm_stream *cpu_stream;
+ struct snd_soc_pcm_stream *pcm;
struct snd_soc_dai *dai;
int i;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- cpu_stream = &cpu_dai_drv->playback;
- else
- cpu_stream = &cpu_dai_drv->capture;
-
- *rate_min = max(*rate_min, cpu_stream->rate_min);
- *rate_max = min_not_zero(*rate_max, cpu_stream->rate_max);
- *rates = snd_pcm_rate_mask_intersect(*rates, cpu_stream->rates);
-
- for_each_rtd_codec_dai(be, i, dai) {
+ for_each_rtd_dais(be, i, dai) {
/*
- * Skip CODECs which don't support the current stream
+ * Skip DAIs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
if (!snd_soc_dai_stream_valid(dai, stream))
continue;
- codec_dai_drv = dai->driver;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- codec_stream = &codec_dai_drv->playback;
- else
- codec_stream = &codec_dai_drv->capture;
+ pcm = snd_soc_dai_get_pcm_stream(dai, stream);
- *rate_min = max(*rate_min, codec_stream->rate_min);
- *rate_max = min_not_zero(*rate_max,
- codec_stream->rate_max);
- *rates = snd_pcm_rate_mask_intersect(*rates,
- codec_stream->rates);
+ *rate_min = max(*rate_min, pcm->rate_min);
+ *rate_max = min_not_zero(*rate_max, pcm->rate_max);
+ *rates = snd_pcm_rate_mask_intersect(*rates, pcm->rates);
}
}
}
@@ -1807,13 +1896,21 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai_driver *cpu_dai_drv = cpu_dai->driver;
+ struct snd_soc_dai *cpu_dai;
+ int i;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- dpcm_init_runtime_hw(runtime, &cpu_dai_drv->playback);
- else
- dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture);
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ /*
+ * Skip CPUs which don't support the current stream
+ * type. See soc_pcm_init_runtime_hw() for more details
+ */
+ if (!snd_soc_dai_stream_valid(cpu_dai, substream->stream))
+ continue;
+
+ dpcm_init_runtime_hw(runtime,
+ snd_soc_dai_get_pcm_stream(cpu_dai,
+ substream->stream));
+ }
dpcm_runtime_merge_format(substream, &runtime->hw.formats);
dpcm_runtime_merge_chan(substream, &runtime->hw.channels_min,
@@ -1850,18 +1947,21 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
{
struct snd_soc_dpcm *dpcm;
struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
- struct snd_soc_dai *fe_cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *fe_cpu_dai;
int err;
+ int i;
/* apply symmetry for FE */
if (soc_pcm_has_symmetry(fe_substream))
fe_substream->runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX;
- /* Symmetry only applies if we've got an active stream. */
- if (fe_cpu_dai->active) {
- err = soc_pcm_apply_symmetry(fe_substream, fe_cpu_dai);
- if (err < 0)
- return err;
+ for_each_rtd_cpu_dais (fe, i, fe_cpu_dai) {
+ /* Symmetry only applies if we've got an active stream. */
+ if (fe_cpu_dai->active) {
+ err = soc_pcm_apply_symmetry(fe_substream, fe_cpu_dai);
+ if (err < 0)
+ return err;
+ }
}
/* apply symmetry for BE */
@@ -1870,7 +1970,7 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_dai *dai;
int i;
/* A backend may not have the requested substream */
@@ -1885,17 +1985,9 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
be_substream->runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX;
/* Symmetry only applies if we've got an active stream. */
- if (rtd->cpu_dai->active) {
- err = soc_pcm_apply_symmetry(fe_substream,
- rtd->cpu_dai);
- if (err < 0)
- return err;
- }
-
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
- if (codec_dai->active) {
- err = soc_pcm_apply_symmetry(fe_substream,
- codec_dai);
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->active) {
+ err = soc_pcm_apply_symmetry(fe_substream, dai);
if (err < 0)
return err;
}
@@ -1913,7 +2005,7 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
- ret = dpcm_be_dai_startup(fe, fe_substream->stream);
+ ret = dpcm_be_dai_startup(fe, stream);
if (ret < 0) {
dev_err(fe->dev,"ASoC: failed to start some BEs %d\n", ret);
goto be_err;
@@ -1934,17 +2026,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
snd_pcm_limit_hw_rates(runtime);
ret = dpcm_apply_symmetry(fe_substream, stream);
- if (ret < 0) {
+ if (ret < 0)
dev_err(fe->dev, "ASoC: failed to apply dpcm symmetry %d\n",
ret);
- goto unwind;
- }
-
- dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
- return 0;
unwind:
- dpcm_be_dai_startup_unwind(fe, fe_substream->stream);
+ if (ret < 0)
+ dpcm_be_dai_startup_unwind(fe, stream);
be_err:
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
return ret;
@@ -1998,7 +2086,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
/* shutdown the BEs */
- dpcm_be_dai_shutdown(fe, substream->stream);
+ dpcm_be_dai_shutdown(fe, stream);
dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
@@ -2176,9 +2264,9 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
- memcpy(&fe->dpcm[substream->stream].hw_params, params,
+ memcpy(&fe->dpcm[stream].hw_params, params,
sizeof(struct snd_pcm_hw_params));
- ret = dpcm_be_dai_hw_params(fe, substream->stream);
+ ret = dpcm_be_dai_hw_params(fe, stream);
if (ret < 0) {
dev_err(fe->dev,"ASoC: hw_params BE failed %d\n", ret);
goto out;
@@ -2500,7 +2588,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
goto out;
}
- ret = dpcm_be_dai_prepare(fe, substream->stream);
+ ret = dpcm_be_dai_prepare(fe, stream);
if (ret < 0)
goto out;
@@ -2652,36 +2740,18 @@ disconnect:
return ret;
}
-static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream)
-{
- int ret;
-
- dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
- ret = dpcm_run_update_startup(fe, stream);
- if (ret < 0)
- dev_err(fe->dev, "ASoC: failed to startup some BEs\n");
- dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
-
- return ret;
-}
-
-static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
-{
- int ret;
-
- dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
- ret = dpcm_run_update_shutdown(fe, stream);
- if (ret < 0)
- dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
- dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
-
- return ret;
-}
-
static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
{
struct snd_soc_dapm_widget_list *list;
+ int stream;
int count, paths;
+ int ret;
+
+ if (fe->num_cpus > 1) {
+ dev_err(fe->dev,
+ "%s doesn't support Multi CPU yet\n", __func__);
+ return -EINVAL;
+ }
if (!fe->dai_link->dynamic)
return 0;
@@ -2694,74 +2764,53 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
dev_dbg(fe->dev, "ASoC: DPCM %s runtime update for FE %s\n",
new ? "new" : "old", fe->dai_link->name);
- /* skip if FE doesn't have playback capability */
- if (!snd_soc_dai_stream_valid(fe->cpu_dai, SNDRV_PCM_STREAM_PLAYBACK) ||
- !snd_soc_dai_stream_valid(fe->codec_dai, SNDRV_PCM_STREAM_PLAYBACK))
- goto capture;
-
- /* skip if FE isn't currently playing */
- if (!fe->cpu_dai->playback_active || !fe->codec_dai->playback_active)
- goto capture;
-
- paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
- if (paths < 0) {
- dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
- fe->dai_link->name, "playback");
- return paths;
- }
-
- /* update any playback paths */
- count = dpcm_process_paths(fe, SNDRV_PCM_STREAM_PLAYBACK, &list, new);
- if (count) {
- if (new)
- dpcm_run_new_update(fe, SNDRV_PCM_STREAM_PLAYBACK);
- else
- dpcm_run_old_update(fe, SNDRV_PCM_STREAM_PLAYBACK);
-
- dpcm_clear_pending_state(fe, SNDRV_PCM_STREAM_PLAYBACK);
- dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
- }
+ for_each_pcm_streams(stream) {
- dpcm_path_put(&list);
+ /* skip if FE doesn't have playback/capture capability */
+ if (!snd_soc_dai_stream_valid(fe->cpu_dai, stream) ||
+ !snd_soc_dai_stream_valid(fe->codec_dai, stream))
+ continue;
-capture:
- /* skip if FE doesn't have capture capability */
- if (!snd_soc_dai_stream_valid(fe->cpu_dai, SNDRV_PCM_STREAM_CAPTURE) ||
- !snd_soc_dai_stream_valid(fe->codec_dai, SNDRV_PCM_STREAM_CAPTURE))
- return 0;
+ /* skip if FE isn't currently playing/capturing */
+ if (!fe->cpu_dai->stream_active[stream] ||
+ !fe->codec_dai->stream_active[stream])
+ continue;
- /* skip if FE isn't currently capturing */
- if (!fe->cpu_dai->capture_active || !fe->codec_dai->capture_active)
- return 0;
+ paths = dpcm_path_get(fe, stream, &list);
+ if (paths < 0) {
+ dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
+ fe->dai_link->name,
+ stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ "playback" : "capture");
+ return paths;
+ }
- paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
- if (paths < 0) {
- dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
- fe->dai_link->name, "capture");
- return paths;
- }
+ /* update any playback/capture paths */
+ count = dpcm_process_paths(fe, stream, &list, new);
+ if (count) {
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
+ if (new)
+ ret = dpcm_run_update_startup(fe, stream);
+ else
+ ret = dpcm_run_update_shutdown(fe, stream);
+ if (ret < 0)
+ dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
+ dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
- /* update any old capture paths */
- count = dpcm_process_paths(fe, SNDRV_PCM_STREAM_CAPTURE, &list, new);
- if (count) {
- if (new)
- dpcm_run_new_update(fe, SNDRV_PCM_STREAM_CAPTURE);
- else
- dpcm_run_old_update(fe, SNDRV_PCM_STREAM_CAPTURE);
+ dpcm_clear_pending_state(fe, stream);
+ dpcm_be_disconnect(fe, stream);
+ }
- dpcm_clear_pending_state(fe, SNDRV_PCM_STREAM_CAPTURE);
- dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_CAPTURE);
+ dpcm_path_put(&list);
}
- dpcm_path_put(&list);
-
return 0;
}
/* Called by DAPM mixer/mux changes to update audio routing between PCMs and
* any DAI links.
*/
-int soc_dpcm_runtime_update(struct snd_soc_card *card)
+int snd_soc_dpcm_runtime_update(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *fe;
int ret = 0;
@@ -2785,38 +2834,40 @@ out:
mutex_unlock(&card->mutex);
return ret;
}
-int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
+EXPORT_SYMBOL_GPL(snd_soc_dpcm_runtime_update);
+
+static void dpcm_fe_dai_cleanup(struct snd_pcm_substream *fe_substream)
{
+ struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
struct snd_soc_dpcm *dpcm;
- struct snd_soc_dai *dai;
+ int stream = fe_substream->stream;
- for_each_dpcm_be(fe, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
+ /* mark FE's links ready to prune */
+ for_each_dpcm_be(fe, stream, dpcm)
+ dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
- struct snd_soc_pcm_runtime *be = dpcm->be;
- int i;
+ dpcm_be_disconnect(fe, stream);
- if (be->dai_link->ignore_suspend)
- continue;
+ fe->dpcm[stream].runtime = NULL;
+}
- for_each_rtd_codec_dai(be, i, dai) {
- struct snd_soc_dai_driver *drv = dai->driver;
+static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
+{
+ struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
+ int ret;
- dev_dbg(be->dev, "ASoC: BE digital mute %s\n",
- be->dai_link->name);
+ mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ ret = dpcm_fe_dai_shutdown(fe_substream);
- if (drv->ops && drv->ops->digital_mute &&
- dai->playback_active)
- drv->ops->digital_mute(dai, mute);
- }
- }
+ dpcm_fe_dai_cleanup(fe_substream);
- return 0;
+ mutex_unlock(&fe->card->mutex);
+ return ret;
}
static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
{
struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
- struct snd_soc_dpcm *dpcm;
struct snd_soc_dapm_widget_list *list;
int ret;
int stream = fe_substream->stream;
@@ -2826,8 +2877,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
ret = dpcm_path_get(fe, stream, &list);
if (ret < 0) {
- mutex_unlock(&fe->card->mutex);
- return ret;
+ goto open_end;
} else if (ret == 0) {
dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
fe->dai_link->name, stream ? "capture" : "playback");
@@ -2837,37 +2887,12 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
dpcm_process_paths(fe, stream, &list, 1);
ret = dpcm_fe_dai_startup(fe_substream);
- if (ret < 0) {
- /* clean up all links */
- for_each_dpcm_be(fe, stream, dpcm)
- dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
-
- dpcm_be_disconnect(fe, stream);
- fe->dpcm[stream].runtime = NULL;
- }
+ if (ret < 0)
+ dpcm_fe_dai_cleanup(fe_substream);
dpcm_clear_pending_state(fe, stream);
dpcm_path_put(&list);
- mutex_unlock(&fe->card->mutex);
- return ret;
-}
-
-static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
-{
- struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
- struct snd_soc_dpcm *dpcm;
- int stream = fe_substream->stream, ret;
-
- mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
- ret = dpcm_fe_dai_shutdown(fe_substream);
-
- /* mark FE's links ready to prune */
- for_each_dpcm_be(fe, stream, dpcm)
- dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
-
- dpcm_be_disconnect(fe, stream);
-
- fe->dpcm[stream].runtime = NULL;
+open_end:
mutex_unlock(&fe->card->mutex);
return ret;
}
@@ -2876,7 +2901,7 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
{
struct snd_soc_dai *codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai;
struct snd_soc_component *component;
struct snd_pcm *pcm;
char new_name[64];
@@ -2888,22 +2913,29 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
capture = rtd->dai_link->dpcm_capture;
} else {
/* Adapt stream for codec2codec links */
- struct snd_soc_pcm_stream *cpu_capture = rtd->dai_link->params ?
- &cpu_dai->driver->playback : &cpu_dai->driver->capture;
- struct snd_soc_pcm_stream *cpu_playback = rtd->dai_link->params ?
- &cpu_dai->driver->capture : &cpu_dai->driver->playback;
+ int cpu_capture = rtd->dai_link->params ?
+ SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+ int cpu_playback = rtd->dai_link->params ?
+ SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+
+ for_each_rtd_codec_dais(rtd, i, codec_dai) {
+ if (rtd->num_cpus == 1) {
+ cpu_dai = rtd->cpu_dais[0];
+ } else if (rtd->num_cpus == rtd->num_codecs) {
+ cpu_dai = rtd->cpu_dais[i];
+ } else {
+ dev_err(rtd->card->dev,
+ "N cpus to M codecs link is not supported yet\n");
+ return -EINVAL;
+ }
- for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) &&
- snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_CAPTURE))
+ snd_soc_dai_stream_valid(cpu_dai, cpu_playback))
playback = 1;
if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_CAPTURE) &&
- snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK))
+ snd_soc_dai_stream_valid(cpu_dai, cpu_capture))
capture = 1;
}
-
- capture = capture && cpu_capture->channels_min;
- playback = playback && cpu_playback->channels_min;
}
if (rtd->dai_link->playback_only) {
@@ -3017,7 +3049,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
out:
dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
(rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
- cpu_dai->name);
+ (rtd->num_cpus > 1) ? "multicpu" : rtd->cpu_dai->name);
return ret;
}
@@ -3050,33 +3082,17 @@ struct snd_pcm_substream *
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_get_substream);
-/* get the BE runtime state */
-enum snd_soc_dpcm_state
- snd_soc_dpcm_be_get_state(struct snd_soc_pcm_runtime *be, int stream)
-{
- return be->dpcm[stream].state;
-}
-EXPORT_SYMBOL_GPL(snd_soc_dpcm_be_get_state);
-
-/* set the BE runtime state */
-void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be,
- int stream, enum snd_soc_dpcm_state state)
-{
- be->dpcm[stream].state = state;
-}
-EXPORT_SYMBOL_GPL(snd_soc_dpcm_be_set_state);
-
-/*
- * We can only hw_free, stop, pause or suspend a BE DAI if any of it's FE
- * are not running, paused or suspended for the specified stream direction.
- */
-int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream)
+static int snd_soc_dpcm_check_state(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be,
+ int stream,
+ const enum snd_soc_dpcm_state *states,
+ int num_states)
{
struct snd_soc_dpcm *dpcm;
int state;
int ret = 1;
unsigned long flags;
+ int i;
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_fe(be, stream, dpcm) {
@@ -3085,18 +3101,34 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
continue;
state = dpcm->fe->dpcm[stream].state;
- if (state == SND_SOC_DPCM_STATE_START ||
- state == SND_SOC_DPCM_STATE_PAUSED ||
- state == SND_SOC_DPCM_STATE_SUSPEND) {
- ret = 0;
- break;
+ for (i = 0; i < num_states; i++) {
+ if (state == states[i]) {
+ ret = 0;
+ break;
+ }
}
}
spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
- /* it's safe to free/stop this BE DAI */
+ /* it's safe to do this BE DAI */
return ret;
}
+
+/*
+ * We can only hw_free, stop, pause or suspend a BE DAI if any of it's FE
+ * are not running, paused or suspended for the specified stream direction.
+ */
+int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
+ struct snd_soc_pcm_runtime *be, int stream)
+{
+ const enum snd_soc_dpcm_state state[] = {
+ SND_SOC_DPCM_STATE_START,
+ SND_SOC_DPCM_STATE_PAUSED,
+ SND_SOC_DPCM_STATE_SUSPEND,
+ };
+
+ return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
+}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop);
/*
@@ -3106,168 +3138,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop);
int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream)
{
- struct snd_soc_dpcm *dpcm;
- int state;
- int ret = 1;
- unsigned long flags;
-
- spin_lock_irqsave(&fe->card->dpcm_lock, flags);
- for_each_dpcm_fe(be, stream, dpcm) {
-
- if (dpcm->fe == fe)
- continue;
+ const enum snd_soc_dpcm_state state[] = {
+ SND_SOC_DPCM_STATE_START,
+ SND_SOC_DPCM_STATE_PAUSED,
+ SND_SOC_DPCM_STATE_SUSPEND,
+ SND_SOC_DPCM_STATE_PREPARE,
+ };
- state = dpcm->fe->dpcm[stream].state;
- if (state == SND_SOC_DPCM_STATE_START ||
- state == SND_SOC_DPCM_STATE_PAUSED ||
- state == SND_SOC_DPCM_STATE_SUSPEND ||
- state == SND_SOC_DPCM_STATE_PREPARE) {
- ret = 0;
- break;
- }
- }
- spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
-
- /* it's safe to change hw_params */
- return ret;
+ return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
-
-#ifdef CONFIG_DEBUG_FS
-static const char *dpcm_state_string(enum snd_soc_dpcm_state state)
-{
- switch (state) {
- case SND_SOC_DPCM_STATE_NEW:
- return "new";
- case SND_SOC_DPCM_STATE_OPEN:
- return "open";
- case SND_SOC_DPCM_STATE_HW_PARAMS:
- return "hw_params";
- case SND_SOC_DPCM_STATE_PREPARE:
- return "prepare";
- case SND_SOC_DPCM_STATE_START:
- return "start";
- case SND_SOC_DPCM_STATE_STOP:
- return "stop";
- case SND_SOC_DPCM_STATE_SUSPEND:
- return "suspend";
- case SND_SOC_DPCM_STATE_PAUSED:
- return "paused";
- case SND_SOC_DPCM_STATE_HW_FREE:
- return "hw_free";
- case SND_SOC_DPCM_STATE_CLOSE:
- return "close";
- }
-
- return "unknown";
-}
-
-static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
- int stream, char *buf, size_t size)
-{
- struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params;
- struct snd_soc_dpcm *dpcm;
- ssize_t offset = 0;
- unsigned long flags;
-
- /* FE state */
- offset += scnprintf(buf + offset, size - offset,
- "[%s - %s]\n", fe->dai_link->name,
- stream ? "Capture" : "Playback");
-
- offset += scnprintf(buf + offset, size - offset, "State: %s\n",
- dpcm_state_string(fe->dpcm[stream].state));
-
- if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
- (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
- offset += scnprintf(buf + offset, size - offset,
- "Hardware Params: "
- "Format = %s, Channels = %d, Rate = %d\n",
- snd_pcm_format_name(params_format(params)),
- params_channels(params),
- params_rate(params));
-
- /* BEs state */
- offset += scnprintf(buf + offset, size - offset, "Backends:\n");
-
- if (list_empty(&fe->dpcm[stream].be_clients)) {
- offset += scnprintf(buf + offset, size - offset,
- " No active DSP links\n");
- goto out;
- }
-
- spin_lock_irqsave(&fe->card->dpcm_lock, flags);
- for_each_dpcm_be(fe, stream, dpcm) {
- struct snd_soc_pcm_runtime *be = dpcm->be;
- params = &dpcm->hw_params;
-
- offset += scnprintf(buf + offset, size - offset,
- "- %s\n", be->dai_link->name);
-
- offset += scnprintf(buf + offset, size - offset,
- " State: %s\n",
- dpcm_state_string(be->dpcm[stream].state));
-
- if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
- (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
- offset += scnprintf(buf + offset, size - offset,
- " Hardware Params: "
- "Format = %s, Channels = %d, Rate = %d\n",
- snd_pcm_format_name(params_format(params)),
- params_channels(params),
- params_rate(params));
- }
- spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
-out:
- return offset;
-}
-
-static ssize_t dpcm_state_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct snd_soc_pcm_runtime *fe = file->private_data;
- ssize_t out_count = PAGE_SIZE, offset = 0, ret = 0;
- char *buf;
-
- buf = kmalloc(out_count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (snd_soc_dai_stream_valid(fe->cpu_dai, SNDRV_PCM_STREAM_PLAYBACK))
- offset += dpcm_show_state(fe, SNDRV_PCM_STREAM_PLAYBACK,
- buf + offset, out_count - offset);
-
- if (snd_soc_dai_stream_valid(fe->cpu_dai, SNDRV_PCM_STREAM_CAPTURE))
- offset += dpcm_show_state(fe, SNDRV_PCM_STREAM_CAPTURE,
- buf + offset, out_count - offset);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
-
- kfree(buf);
- return ret;
-}
-
-static const struct file_operations dpcm_state_fops = {
- .open = simple_open,
- .read = dpcm_state_read_file,
- .llseek = default_llseek,
-};
-
-void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
-{
- if (!rtd->dai_link)
- return;
-
- if (!rtd->dai_link->dynamic)
- return;
-
- if (!rtd->card->debugfs_card_root)
- return;
-
- rtd->debugfs_dpcm_root = debugfs_create_dir(rtd->dai_link->name,
- rtd->card->debugfs_card_root);
-
- debugfs_create_file("state", 0444, rtd->debugfs_dpcm_root,
- rtd, &dpcm_state_fops);
-}
-#endif
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 575da6aba807..1f81cd2d29cf 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -251,7 +251,7 @@ static int soc_tplg_vendor_load_(struct soc_tplg *tplg,
{
int ret = 0;
- if (tplg->comp && tplg->ops && tplg->ops->vendor_load)
+ if (tplg->ops && tplg->ops->vendor_load)
ret = tplg->ops->vendor_load(tplg->comp, tplg->index, hdr);
else {
dev_err(tplg->dev, "ASoC: no vendor load callback for ID %d\n",
@@ -283,7 +283,7 @@ static int soc_tplg_vendor_load(struct soc_tplg *tplg,
static int soc_tplg_widget_load(struct soc_tplg *tplg,
struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *tplg_w)
{
- if (tplg->comp && tplg->ops && tplg->ops->widget_load)
+ if (tplg->ops && tplg->ops->widget_load)
return tplg->ops->widget_load(tplg->comp, tplg->index, w,
tplg_w);
@@ -295,7 +295,7 @@ static int soc_tplg_widget_load(struct soc_tplg *tplg,
static int soc_tplg_widget_ready(struct soc_tplg *tplg,
struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *tplg_w)
{
- if (tplg->comp && tplg->ops && tplg->ops->widget_ready)
+ if (tplg->ops && tplg->ops->widget_ready)
return tplg->ops->widget_ready(tplg->comp, tplg->index, w,
tplg_w);
@@ -307,7 +307,7 @@ static int soc_tplg_dai_load(struct soc_tplg *tplg,
struct snd_soc_dai_driver *dai_drv,
struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai)
{
- if (tplg->comp && tplg->ops && tplg->ops->dai_load)
+ if (tplg->ops && tplg->ops->dai_load)
return tplg->ops->dai_load(tplg->comp, tplg->index, dai_drv,
pcm, dai);
@@ -318,7 +318,7 @@ static int soc_tplg_dai_load(struct soc_tplg *tplg,
static int soc_tplg_dai_link_load(struct soc_tplg *tplg,
struct snd_soc_dai_link *link, struct snd_soc_tplg_link_config *cfg)
{
- if (tplg->comp && tplg->ops && tplg->ops->link_load)
+ if (tplg->ops && tplg->ops->link_load)
return tplg->ops->link_load(tplg->comp, tplg->index, link, cfg);
return 0;
@@ -327,7 +327,7 @@ static int soc_tplg_dai_link_load(struct soc_tplg *tplg,
/* tell the component driver that all firmware has been loaded in this request */
static void soc_tplg_complete(struct soc_tplg *tplg)
{
- if (tplg->comp && tplg->ops && tplg->ops->complete)
+ if (tplg->ops && tplg->ops->complete)
tplg->ops->complete(tplg->comp);
}
@@ -684,7 +684,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_bind_event);
static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
struct snd_kcontrol_new *k, struct snd_soc_tplg_ctl_hdr *hdr)
{
- if (tplg->comp && tplg->ops && tplg->ops->control_load)
+ if (tplg->ops && tplg->ops->control_load)
return tplg->ops->control_load(tplg->comp, tplg->index, k,
hdr);
@@ -1174,7 +1174,7 @@ static int soc_tplg_kcontrol_elems_load(struct soc_tplg *tplg,
static int soc_tplg_add_route(struct soc_tplg *tplg,
struct snd_soc_dapm_route *route)
{
- if (tplg->comp && tplg->ops && tplg->ops->dapm_route_load)
+ if (tplg->ops && tplg->ops->dapm_route_load)
return tplg->ops->dapm_route_load(tplg->comp, tplg->index,
route);
@@ -2564,7 +2564,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
}
/* pass control to component driver for optional further init */
- if (tplg->comp && tplg->ops && tplg->ops->manifest)
+ if (tplg->ops && tplg->ops->manifest)
ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
if (!abi_match) /* free the duplicated one */
@@ -2736,6 +2736,10 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
struct soc_tplg tplg;
int ret;
+ /* component needs to exist to keep and reference data while parsing */
+ if (!comp)
+ return -EINVAL;
+
/* setup parsing context */
memset(&tplg, 0, sizeof(tplg));
tplg.fw = fw;
@@ -2774,7 +2778,7 @@ void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
{
struct snd_soc_dapm_widget *w, *next_w;
- list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
+ for_each_card_widgets_safe(dapm->card, w, next_w) {
/* make sure we are a widget with correct context */
if (w->dobj.type != SND_SOC_DOBJ_WIDGET || w->dapm != dapm)
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index 827b0ec92522..4dda4b62509f 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -41,6 +41,15 @@ config SND_SOC_SOF_OF
required to enable i.MX8 devices.
Say Y if you need this option. If unsure select "N".
+config SND_SOC_SOF_DEBUG_PROBES
+ bool "SOF enable data probing"
+ select SND_SOC_COMPRESS
+ help
+ This option enables the data probing feature that can be used to
+ gather data directly from specific points of the audio pipeline.
+ Say Y if you want to enable probes.
+ If unsure, select "N".
+
config SND_SOC_SOF_DEVELOPER_SUPPORT
bool "SOF developer options support"
depends on EXPERT
diff --git a/sound/soc/sof/Makefile b/sound/soc/sof/Makefile
index 0a8bc72c28a5..8eca2f85c90e 100644
--- a/sound/soc/sof/Makefile
+++ b/sound/soc/sof/Makefile
@@ -2,6 +2,7 @@
snd-sof-objs := core.o ops.o loader.o ipc.o pcm.o pm.o debug.o topology.o\
control.o trace.o utils.o sof-audio.o
+snd-sof-$(CONFIG_SND_SOC_SOF_DEBUG_PROBES) += probe.o compress.o
snd-sof-pci-objs := sof-pci-dev.o
snd-sof-acpi-objs := sof-acpi-dev.o
diff --git a/sound/soc/sof/compress.c b/sound/soc/sof/compress.c
new file mode 100644
index 000000000000..7354dc6a49cf
--- /dev/null
+++ b/sound/soc/sof/compress.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <sound/soc.h>
+#include "compress.h"
+#include "ops.h"
+#include "probe.h"
+
+struct snd_compr_ops sof_probe_compressed_ops = {
+ .copy = sof_probe_compr_copy,
+};
+EXPORT_SYMBOL(sof_probe_compressed_ops);
+
+int sof_probe_compr_open(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ ret = snd_sof_probe_compr_assign(sdev, cstream, dai);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to assign probe stream: %d\n", ret);
+ return ret;
+ }
+
+ sdev->extractor_stream_tag = ret;
+ return 0;
+}
+EXPORT_SYMBOL(sof_probe_compr_open);
+
+int sof_probe_compr_free(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+ struct sof_probe_point_desc *desc;
+ size_t num_desc;
+ int i, ret;
+
+ /* disconnect all probe points */
+ ret = sof_ipc_probe_points_info(sdev, &desc, &num_desc);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to get probe points: %d\n", ret);
+ goto exit;
+ }
+
+ for (i = 0; i < num_desc; i++)
+ sof_ipc_probe_points_remove(sdev, &desc[i].buffer_id, 1);
+ kfree(desc);
+
+exit:
+ ret = sof_ipc_probe_deinit(sdev);
+ if (ret < 0)
+ dev_err(dai->dev, "Failed to deinit probe: %d\n", ret);
+
+ sdev->extractor_stream_tag = SOF_PROBE_INVALID_NODE_ID;
+ snd_compr_free_pages(cstream);
+
+ return snd_sof_probe_compr_free(sdev, cstream, dai);
+}
+EXPORT_SYMBOL(sof_probe_compr_free);
+
+int sof_probe_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_compr_runtime *rtd = cstream->runtime;
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ cstream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV_SG;
+ cstream->dma_buffer.dev.dev = sdev->dev;
+ ret = snd_compr_malloc_pages(cstream, rtd->buffer_size);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_sof_probe_compr_set_params(sdev, cstream, params, dai);
+ if (ret < 0)
+ return ret;
+
+ ret = sof_ipc_probe_init(sdev, sdev->extractor_stream_tag,
+ rtd->dma_bytes);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to init probe: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sof_probe_compr_set_params);
+
+int sof_probe_compr_trigger(struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+
+ return snd_sof_probe_compr_trigger(sdev, cstream, cmd, dai);
+}
+EXPORT_SYMBOL(sof_probe_compr_trigger);
+
+int sof_probe_compr_pointer(struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai)
+{
+ struct snd_sof_dev *sdev =
+ snd_soc_component_get_drvdata(dai->component);
+
+ return snd_sof_probe_compr_pointer(sdev, cstream, tstamp, dai);
+}
+EXPORT_SYMBOL(sof_probe_compr_pointer);
+
+int sof_probe_compr_copy(struct snd_compr_stream *cstream,
+ char __user *buf, size_t count)
+{
+ struct snd_compr_runtime *rtd = cstream->runtime;
+ unsigned int offset, n;
+ void *ptr;
+ int ret;
+
+ if (count > rtd->buffer_size)
+ count = rtd->buffer_size;
+
+ div_u64_rem(rtd->total_bytes_transferred, rtd->buffer_size, &offset);
+ ptr = rtd->dma_area + offset;
+ n = rtd->buffer_size - offset;
+
+ if (count < n) {
+ ret = copy_to_user(buf, ptr, count);
+ } else {
+ ret = copy_to_user(buf, ptr, n);
+ ret += copy_to_user(buf + n, rtd->dma_area, count - n);
+ }
+
+ if (ret)
+ return count - ret;
+ return count;
+}
+EXPORT_SYMBOL(sof_probe_compr_copy);
diff --git a/sound/soc/sof/compress.h b/sound/soc/sof/compress.h
new file mode 100644
index 000000000000..800f163603e1
--- /dev/null
+++ b/sound/soc/sof/compress.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+ *
+ * Author: Cezary Rojewski <cezary.rojewski@intel.com>
+ */
+
+#ifndef __SOF_COMPRESS_H
+#define __SOF_COMPRESS_H
+
+#include <sound/compress_driver.h>
+
+extern struct snd_compr_ops sof_probe_compressed_ops;
+
+int sof_probe_compr_open(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+int sof_probe_compr_free(struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+int sof_probe_compr_set_params(struct snd_compr_stream *cstream,
+ struct snd_compr_params *params, struct snd_soc_dai *dai);
+int sof_probe_compr_trigger(struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai);
+int sof_probe_compr_pointer(struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai);
+int sof_probe_compr_copy(struct snd_compr_stream *cstream,
+ char __user *buf, size_t count);
+
+#endif
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 34cefbaf2d2a..91acfae7935c 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -14,6 +14,9 @@
#include <sound/sof.h>
#include "sof-priv.h"
#include "ops.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+#include "probe.h"
+#endif
/* see SOF_DBG_ flags */
int sof_core_debug;
@@ -286,12 +289,15 @@ int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
/* initialize sof device */
sdev->dev = dev;
- /* initialize default D0 sub-state */
- sdev->d0_substate = SOF_DSP_D0I0;
+ /* initialize default DSP power state */
+ sdev->dsp_power_state.state = SOF_DSP_PM_D0;
sdev->pdata = plat_data;
sdev->first_boot = true;
sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ sdev->extractor_stream_tag = SOF_PROBE_INVALID_NODE_ID;
+#endif
dev_set_drvdata(dev, sdev);
/* check all mandatory ops */
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
index d2b3b99d3a20..b5c0d6cf72cc 100644
--- a/sound/soc/sof/debug.c
+++ b/sound/soc/sof/debug.c
@@ -17,6 +17,221 @@
#include "sof-priv.h"
#include "ops.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+#include "probe.h"
+
+/**
+ * strsplit_u32 - Split string into sequence of u32 tokens
+ * @buf: String to split into tokens.
+ * @delim: String containing delimiter characters.
+ * @tkns: Returned u32 sequence pointer.
+ * @num_tkns: Returned number of tokens obtained.
+ */
+static int
+strsplit_u32(char **buf, const char *delim, u32 **tkns, size_t *num_tkns)
+{
+ char *s;
+ u32 *data, *tmp;
+ size_t count = 0;
+ size_t cap = 32;
+ int ret = 0;
+
+ *tkns = NULL;
+ *num_tkns = 0;
+ data = kcalloc(cap, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ while ((s = strsep(buf, delim)) != NULL) {
+ ret = kstrtouint(s, 0, data + count);
+ if (ret)
+ goto exit;
+ if (++count >= cap) {
+ cap *= 2;
+ tmp = krealloc(data, cap * sizeof(*data), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ data = tmp;
+ }
+ }
+
+ if (!count)
+ goto exit;
+ *tkns = kmemdup(data, count * sizeof(*data), GFP_KERNEL);
+ if (*tkns == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ *num_tkns = count;
+
+exit:
+ kfree(data);
+ return ret;
+}
+
+static int tokenize_input(const char __user *from, size_t count,
+ loff_t *ppos, u32 **tkns, size_t *num_tkns)
+{
+ char *buf;
+ int ret;
+
+ buf = kmalloc(count + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_write_to_buffer(buf, count, ppos, from, count);
+ if (ret != count) {
+ ret = ret >= 0 ? -EIO : ret;
+ goto exit;
+ }
+
+ buf[count] = '\0';
+ ret = strsplit_u32((char **)&buf, ",", tkns, num_tkns);
+exit:
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t probe_points_read(struct file *file,
+ char __user *to, size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ struct sof_probe_point_desc *desc;
+ size_t num_desc, len = 0;
+ char *buf;
+ int i, ret;
+
+ if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
+ dev_warn(sdev->dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = sof_ipc_probe_points_info(sdev, &desc, &num_desc);
+ if (ret < 0)
+ goto exit;
+
+ for (i = 0; i < num_desc; i++) {
+ ret = snprintf(buf + len, PAGE_SIZE - len,
+ "Id: %#010x Purpose: %d Node id: %#x\n",
+ desc[i].buffer_id, desc[i].purpose, desc[i].stream_tag);
+ if (ret < 0)
+ goto free_desc;
+ len += ret;
+ }
+
+ ret = simple_read_from_buffer(to, count, ppos, buf, len);
+free_desc:
+ kfree(desc);
+exit:
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t probe_points_write(struct file *file,
+ const char __user *from, size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ struct sof_probe_point_desc *desc;
+ size_t num_tkns, bytes;
+ u32 *tkns;
+ int ret;
+
+ if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
+ dev_warn(sdev->dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ ret = tokenize_input(from, count, ppos, &tkns, &num_tkns);
+ if (ret < 0)
+ return ret;
+ bytes = sizeof(*tkns) * num_tkns;
+ if (!num_tkns || (bytes % sizeof(*desc))) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ desc = (struct sof_probe_point_desc *)tkns;
+ ret = sof_ipc_probe_points_add(sdev,
+ desc, bytes / sizeof(*desc));
+ if (!ret)
+ ret = count;
+exit:
+ kfree(tkns);
+ return ret;
+}
+
+static const struct file_operations probe_points_fops = {
+ .open = simple_open,
+ .read = probe_points_read,
+ .write = probe_points_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t probe_points_remove_write(struct file *file,
+ const char __user *from, size_t count, loff_t *ppos)
+{
+ struct snd_sof_dfsentry *dfse = file->private_data;
+ struct snd_sof_dev *sdev = dfse->sdev;
+ size_t num_tkns;
+ u32 *tkns;
+ int ret;
+
+ if (sdev->extractor_stream_tag == SOF_PROBE_INVALID_NODE_ID) {
+ dev_warn(sdev->dev, "no extractor stream running\n");
+ return -ENOENT;
+ }
+
+ ret = tokenize_input(from, count, ppos, &tkns, &num_tkns);
+ if (ret < 0)
+ return ret;
+ if (!num_tkns) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = sof_ipc_probe_points_remove(sdev, tkns, num_tkns);
+ if (!ret)
+ ret = count;
+exit:
+ kfree(tkns);
+ return ret;
+}
+
+static const struct file_operations probe_points_remove_fops = {
+ .open = simple_open,
+ .write = probe_points_remove_write,
+ .llseek = default_llseek,
+};
+
+static int snd_sof_debugfs_probe_item(struct snd_sof_dev *sdev,
+ const char *name, mode_t mode,
+ const struct file_operations *fops)
+{
+ struct snd_sof_dfsentry *dfse;
+
+ dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
+ if (!dfse)
+ return -ENOMEM;
+
+ dfse->type = SOF_DFSENTRY_TYPE_BUF;
+ dfse->sdev = sdev;
+
+ debugfs_create_file(name, mode, sdev->debugfs_root, dfse, fops);
+ /* add to dfsentry list */
+ list_add(&dfse->list, &sdev->dfsentry_list);
+
+ return 0;
+}
+#endif
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
#define MAX_IPC_FLOOD_DURATION_MS 1000
#define MAX_IPC_FLOOD_COUNT 10000
@@ -436,6 +651,17 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
return err;
}
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ err = snd_sof_debugfs_probe_item(sdev, "probe_points",
+ 0644, &probe_points_fops);
+ if (err < 0)
+ return err;
+ err = snd_sof_debugfs_probe_item(sdev, "probe_points_remove",
+ 0200, &probe_points_remove_fops);
+ if (err < 0)
+ return err;
+#endif
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST)
/* create read-write ipc_flood_count debugfs entry */
err = snd_sof_debugfs_buf_item(sdev, NULL, 0,
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index b2556f5e2871..b692752b2178 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -138,7 +138,7 @@ static int imx8_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
/*
* DSP control.
*/
-static int imx8_run(struct snd_sof_dev *sdev)
+static int imx8x_run(struct snd_sof_dev *sdev)
{
struct imx8_priv *dsp_priv = (struct imx8_priv *)sdev->private;
int ret;
@@ -178,6 +178,24 @@ static int imx8_run(struct snd_sof_dev *sdev)
return 0;
}
+static int imx8_run(struct snd_sof_dev *sdev)
+{
+ struct imx8_priv *dsp_priv = (struct imx8_priv *)sdev->private;
+ int ret;
+
+ ret = imx_sc_misc_set_control(dsp_priv->sc_ipc, IMX_SC_R_DSP,
+ IMX_SC_C_OFS_SEL, 0);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Error system address offset source select\n");
+ return ret;
+ }
+
+ imx_sc_pm_cpu_start(dsp_priv->sc_ipc, IMX_SC_R_DSP, true,
+ RESET_VECTOR_VADDR);
+
+ return 0;
+}
+
static int imx8_probe(struct snd_sof_dev *sdev)
{
struct platform_device *pdev =
@@ -360,7 +378,7 @@ static struct snd_soc_dai_driver imx8_dai[] = {
},
};
-/* i.MX8 ops */
+/* i.MX8 ops */
struct snd_sof_dsp_ops sof_imx8_ops = {
/* probe and remove */
.probe = imx8_probe,
@@ -390,6 +408,39 @@ struct snd_sof_dsp_ops sof_imx8_ops = {
/* DAI drivers */
.drv = imx8_dai,
.num_drv = 1, /* we have only 1 ESAI interface on i.MX8 */
+};
+EXPORT_SYMBOL(sof_imx8_ops);
+
+/* i.MX8X ops */
+struct snd_sof_dsp_ops sof_imx8x_ops = {
+ /* probe and remove */
+ .probe = imx8_probe,
+ .remove = imx8_remove,
+ /* DSP core boot */
+ .run = imx8x_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* ipc */
+ .send_msg = imx8_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = imx8_get_mailbox_offset,
+ .get_window_offset = imx8_get_window_offset,
+
+ .ipc_msg_data = imx8_ipc_msg_data,
+ .ipc_pcm_params = imx8_ipc_pcm_params,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+ .get_bar_index = imx8_get_bar_index,
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = imx8_dai,
+ .num_drv = 1, /* we have only 1 ESAI interface on i.MX8 */
/* ALSA HW info flags */
.hw_info = SNDRV_PCM_INFO_MMAP |
@@ -398,6 +449,6 @@ struct snd_sof_dsp_ops sof_imx8_ops = {
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
};
-EXPORT_SYMBOL(sof_imx8_ops);
+EXPORT_SYMBOL(sof_imx8x_ops);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
index 56a837d2cb95..c9a2bee4b55c 100644
--- a/sound/soc/sof/intel/Kconfig
+++ b/sound/soc/sof/intel/Kconfig
@@ -305,6 +305,15 @@ config SND_SOC_SOF_HDA_AUDIO_CODEC
Say Y if you want to enable HDAudio codecs with SOF.
If unsure select "N".
+config SND_SOC_SOF_HDA_PROBES
+ bool "SOF enable probes over HDA"
+ depends on SND_SOC_SOF_DEBUG_PROBES
+ help
+ This option enables the data probing for Intel(R).
+ Intel(R) Skylake and newer platforms.
+ Say Y if you want to enable probes.
+ If unsure, select "N".
+
config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
bool "SOF enable DMI Link L1"
help
@@ -315,17 +324,6 @@ config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
Say Y if you want to enable DMI Link L1
If unsure, select "N".
-config SND_SOC_SOF_HDA_COMMON_HDMI_CODEC
- bool "SOF common HDA HDMI codec driver"
- depends on SND_SOC_SOF_HDA_LINK
- depends on SND_HDA_CODEC_HDMI
- default SND_HDA_CODEC_HDMI
- help
- This adds support for HDMI audio by using the common HDA
- HDMI/DisplayPort codec driver.
- Say Y if you want to use the common codec driver with SOF.
- If unsure select "Y".
-
endif ## SND_SOC_SOF_HDA_COMMON
config SND_SOC_SOF_HDA_LINK_BASELINE
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
index b8f58e006e29..cee02a2e00f4 100644
--- a/sound/soc/sof/intel/Makefile
+++ b/sound/soc/sof/intel/Makefile
@@ -9,6 +9,7 @@ snd-sof-intel-hda-common-objs := hda.o hda-loader.o hda-stream.o hda-trace.o \
hda-dsp.o hda-ipc.o hda-ctrl.o hda-pcm.o \
hda-dai.o hda-bus.o \
apl.o cnl.o
+snd-sof-intel-hda-common-$(CONFIG_SND_SOC_SOF_HDA_PROBES) += hda-compress.o
snd-sof-intel-hda-objs := hda-codec.o
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
index 2483b15699e7..02218d22e51f 100644
--- a/sound/soc/sof/intel/apl.c
+++ b/sound/soc/sof/intel/apl.c
@@ -73,6 +73,15 @@ const struct snd_sof_dsp_ops sof_apl_ops = {
.pcm_trigger = hda_dsp_pcm_trigger,
.pcm_pointer = hda_dsp_pcm_pointer,
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+ /* probe callbacks */
+ .probe_assign = hda_probe_compr_assign,
+ .probe_free = hda_probe_compr_free,
+ .probe_set_params = hda_probe_compr_set_params,
+ .probe_trigger = hda_probe_compr_trigger,
+ .probe_pointer = hda_probe_compr_pointer,
+#endif
+
/* firmware loading */
.load_firmware = snd_sof_load_firmware_raw,
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
index 9e2d8afe0535..e427d00eca71 100644
--- a/sound/soc/sof/intel/cnl.c
+++ b/sound/soc/sof/intel/cnl.c
@@ -65,11 +65,6 @@ static irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
hda_dsp_ipc_get_reply(sdev);
snd_sof_ipc_reply(sdev, msg);
- if (sdev->code_loading) {
- sdev->code_loading = 0;
- wake_up(&sdev->waitq);
- }
-
cnl_ipc_dsp_done(sdev);
spin_unlock_irq(&sdev->ipc_lock);
@@ -171,23 +166,48 @@ static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
static int cnl_ipc_send_msg(struct snd_sof_dev *sdev,
struct snd_sof_ipc_msg *msg)
{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
+ struct sof_ipc_cmd_hdr *hdr;
u32 dr = 0;
u32 dd = 0;
+ /*
+ * Currently the only compact IPC supported is the PM_GATE
+ * IPC which is used for transitioning the DSP between the
+ * D0I0 and D0I3 states. And these are sent only during the
+ * set_power_state() op. Therefore, there will never be a case
+ * that a compact IPC results in the DSP exiting D0I3 without
+ * the host and FW being in sync.
+ */
if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
/* send the message via IPC registers */
snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
dd);
snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
CNL_DSP_REG_HIPCIDR_BUSY | dr);
- } else {
- /* send the message via mailbox */
- sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
- msg->msg_size);
- snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
- CNL_DSP_REG_HIPCIDR_BUSY);
+ return 0;
}
+ /* send the message via mailbox */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY);
+
+ hdr = msg->msg_data;
+
+ /*
+ * Use mod_delayed_work() to schedule the delayed work
+ * to avoid scheduling multiple workqueue items when
+ * IPCs are sent at a high-rate. mod_delayed_work()
+ * modifies the timer if the work is pending.
+ * Also, a new delayed work should not be queued after the
+ * the CTX_SAVE IPC, which is sent before the DSP enters D3.
+ */
+ if (hdr->cmd != (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE))
+ mod_delayed_work(system_wq, &hdev->d0i3_work,
+ msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
+
return 0;
}
@@ -259,6 +279,15 @@ const struct snd_sof_dsp_ops sof_cnl_ops = {
.pcm_trigger = hda_dsp_pcm_trigger,
.pcm_pointer = hda_dsp_pcm_pointer,
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+ /* probe callbacks */
+ .probe_assign = hda_probe_compr_assign,
+ .probe_free = hda_probe_compr_free,
+ .probe_set_params = hda_probe_compr_set_params,
+ .probe_trigger = hda_probe_compr_trigger,
+ .probe_pointer = hda_probe_compr_pointer,
+#endif
+
/* firmware loading */
.load_firmware = snd_sof_load_firmware_raw,
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index ff45075ef720..3041fbbb010a 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -113,8 +113,14 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
if (ret < 0)
return ret;
- if ((resp & 0xFFFF0000) == IDISP_VID_INTEL)
+ if ((resp & 0xFFFF0000) == IDISP_VID_INTEL) {
+ if (!hdev->bus->audio_component) {
+ dev_dbg(sdev->dev,
+ "iDisp hw present but no driver\n");
+ return -ENOENT;
+ }
hda_priv->need_display_power = true;
+ }
/*
* if common HDMI codec driver is not used, codec load
@@ -203,6 +209,9 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev)
struct hdac_bus *bus = sof_to_bus(sdev);
int ret;
+ if (!bus->audio_component)
+ return 0;
+
/* power down unconditionally */
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
diff --git a/sound/soc/sof/intel/hda-compress.c b/sound/soc/sof/intel/hda-compress.c
new file mode 100644
index 000000000000..38a1ebec8478
--- /dev/null
+++ b/sound/soc/sof/intel/hda-compress.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <sound/hdaudio_ext.h>
+#include <sound/soc.h>
+#include "../sof-priv.h"
+#include "hda.h"
+
+static inline struct hdac_ext_stream *
+hda_compr_get_stream(struct snd_compr_stream *cstream)
+{
+ return cstream->runtime->private_data;
+}
+
+int hda_probe_compr_assign(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream;
+
+ stream = hda_dsp_stream_get(sdev, cstream->direction);
+ if (!stream)
+ return -EBUSY;
+
+ hdac_stream(stream)->curr_pos = 0;
+ hdac_stream(stream)->cstream = cstream;
+ cstream->runtime->private_data = stream;
+
+ return hdac_stream(stream)->stream_tag;
+}
+
+int hda_probe_compr_free(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream = hda_compr_get_stream(cstream);
+ int ret;
+
+ ret = hda_dsp_stream_put(sdev, cstream->direction,
+ hdac_stream(stream)->stream_tag);
+ if (ret < 0) {
+ dev_dbg(sdev->dev, "stream put failed: %d\n", ret);
+ return ret;
+ }
+
+ hdac_stream(stream)->cstream = NULL;
+ cstream->runtime->private_data = NULL;
+
+ return 0;
+}
+
+int hda_probe_compr_set_params(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream = hda_compr_get_stream(cstream);
+ struct hdac_stream *hstream = hdac_stream(stream);
+ struct snd_dma_buffer *dmab;
+ u32 bits, rate;
+ int bps, ret;
+
+ dmab = cstream->runtime->dma_buffer_p;
+ /* compr params do not store bit depth, default to S32_LE */
+ bps = snd_pcm_format_physical_width(SNDRV_PCM_FORMAT_S32_LE);
+ if (bps < 0)
+ return bps;
+ bits = hda_dsp_get_bits(sdev, bps);
+ rate = hda_dsp_get_mult_div(sdev, params->codec.sample_rate);
+
+ hstream->format_val = rate | bits | (params->codec.ch_out - 1);
+ hstream->bufsize = cstream->runtime->buffer_size;
+ hstream->period_bytes = cstream->runtime->fragment_size;
+ hstream->no_period_wakeup = 0;
+
+ ret = hda_dsp_stream_hw_params(sdev, stream, dmab, NULL);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hda_probe_compr_trigger(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream = hda_compr_get_stream(cstream);
+
+ return hda_dsp_stream_trigger(sdev, stream, cmd);
+}
+
+int hda_probe_compr_pointer(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai)
+{
+ struct hdac_ext_stream *stream = hda_compr_get_stream(cstream);
+ struct snd_soc_pcm_stream *pstream;
+
+ pstream = &dai->driver->capture;
+ tstamp->copied_total = hdac_stream(stream)->curr_pos;
+ tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates);
+
+ return 0;
+}
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
index 871b71a15a63..6288b2f99540 100644
--- a/sound/soc/sof/intel/hda-ctrl.c
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <sound/hdaudio_ext.h>
#include <sound/hda_register.h>
+#include <sound/hda_component.h>
#include "../ops.h"
#include "hda.h"
@@ -64,15 +65,32 @@ int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev)
struct hdac_bus *bus = sof_to_bus(sdev);
u32 cap, offset, feature;
int count = 0;
+ int ret;
+
+ /*
+ * On some devices, one reset cycle is necessary before reading
+ * capabilities
+ */
+ ret = hda_dsp_ctrl_link_reset(sdev, true);
+ if (ret < 0)
+ return ret;
+ ret = hda_dsp_ctrl_link_reset(sdev, false);
+ if (ret < 0)
+ return ret;
offset = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_LLCH);
do {
- cap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, offset);
-
dev_dbg(sdev->dev, "checking for capabilities at offset 0x%x\n",
offset & SOF_HDA_CAP_NEXT_MASK);
+ cap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, offset);
+
+ if (cap == -1) {
+ dev_dbg(bus->dev, "Invalid capability reg read\n");
+ break;
+ }
+
feature = (cap & SOF_HDA_CAP_ID_MASK) >> SOF_HDA_CAP_ID_OFF;
switch (feature) {
@@ -105,8 +123,8 @@ int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev)
bus->mlcap = bus->remap_addr + offset;
break;
default:
- dev_vdbg(sdev->dev, "found capability %d at 0x%x\n",
- feature, offset);
+ dev_dbg(sdev->dev, "found capability %d at 0x%x\n",
+ feature, offset);
break;
}
@@ -176,6 +194,9 @@ int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
if (bus->chip_init)
return 0;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_set_codec_wakeup(bus, true);
+#endif
hda_dsp_ctrl_misc_clock_gating(sdev, false);
if (full_reset) {
@@ -183,7 +204,7 @@ int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
ret = hda_dsp_ctrl_link_reset(sdev, true);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to reset HDA controller\n");
- return ret;
+ goto err;
}
usleep_range(500, 1000);
@@ -192,7 +213,7 @@ int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
ret = hda_dsp_ctrl_link_reset(sdev, false);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
- return ret;
+ goto err;
}
usleep_range(1000, 1200);
@@ -202,7 +223,8 @@ int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
/* check to see if controller is ready */
if (!snd_hdac_chip_readb(bus, GCTL)) {
dev_dbg(bus->dev, "controller not ready!\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto err;
}
/* Accept unsolicited responses */
@@ -268,7 +290,11 @@ int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev, bool full_reset)
bus->chip_init = true;
+err:
hda_dsp_ctrl_misc_clock_gating(sdev, true);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ snd_hdac_set_codec_wakeup(bus, false);
+#endif
return ret;
}
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 9c6e3f990ee3..833dc303b394 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -204,7 +204,7 @@ static int hda_link_hw_params(struct snd_pcm_substream *substream,
struct hdac_bus *bus = hstream->bus;
struct hdac_ext_stream *link_dev;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct sof_intel_hda_stream *hda_stream;
struct hda_pipe_params p_params = {0};
struct hdac_ext_link *link;
@@ -293,7 +293,7 @@ static int hda_link_pcm_trigger(struct snd_pcm_substream *substream,
bus = hstream->bus;
rtd = snd_pcm_substream_chip(substream);
- link = snd_hdac_ext_bus_get_link(bus, rtd->codec_dai->component->name);
+ link = snd_hdac_ext_bus_get_link(bus, asoc_rtd_to_codec(rtd, 0)->component->name);
if (!link)
return -EINVAL;
@@ -374,7 +374,7 @@ static int hda_link_hw_free(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- link = snd_hdac_ext_bus_get_link(bus, rtd->codec_dai->component->name);
+ link = snd_hdac_ext_bus_get_link(bus, asoc_rtd_to_codec(rtd, 0)->component->name);
if (!link)
return -EINVAL;
@@ -399,6 +399,19 @@ static const struct snd_soc_dai_ops hda_link_dai_ops = {
.trigger = hda_link_pcm_trigger,
.prepare = hda_link_pcm_prepare,
};
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+#include "../compress.h"
+
+static struct snd_soc_cdai_ops sof_probe_compr_ops = {
+ .startup = sof_probe_compr_open,
+ .shutdown = sof_probe_compr_free,
+ .set_params = sof_probe_compr_set_params,
+ .trigger = sof_probe_compr_trigger,
+ .pointer = sof_probe_compr_pointer,
+};
+
+#endif
#endif
/*
@@ -409,56 +422,167 @@ static const struct snd_soc_dai_ops hda_link_dai_ops = {
struct snd_soc_dai_driver skl_dai[] = {
{
.name = "SSP0 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "SSP1 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "SSP2 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "SSP3 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "SSP4 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "SSP5 Pin",
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "DMIC01 Pin",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ },
},
{
.name = "DMIC16k Pin",
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 4,
+ },
},
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
{
.name = "iDisp1 Pin",
.ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "iDisp2 Pin",
.ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "iDisp3 Pin",
.ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "iDisp4 Pin",
.ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 8,
+ },
},
{
.name = "Analog CPU DAI",
.ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
},
{
.name = "Digital CPU DAI",
.ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
},
{
.name = "Alt Analog CPU DAI",
.ops = &hda_link_dai_ops,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 16,
+ },
+},
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+{
+ .name = "Probe Extraction CPU DAI",
+ .compress_new = snd_soc_new_compress,
+ .cops = &sof_probe_compr_ops,
+ .capture = {
+ .stream_name = "Probe Extraction",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ },
},
#endif
+#endif
};
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index 0848b79967a9..99087b6afb67 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -15,12 +15,21 @@
* Hardware interface for generic Intel audio DSP HDA IP
*/
+#include <linux/module.h>
#include <sound/hdaudio_ext.h>
#include <sound/hda_register.h>
+#include "../sof-audio.h"
#include "../ops.h"
#include "hda.h"
#include "hda-ipc.h"
+static bool hda_enable_trace_D0I3_S0;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
+module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
+MODULE_PARM_DESC(enable_trace_D0I3_S0,
+ "SOF HDA enable trace when the DSP is in D0I3 in S0");
+#endif
+
/*
* DSP Core control.
*/
@@ -334,17 +343,15 @@ static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
pm_gate.flags = flags;
/* send pm_gate ipc to dsp */
- return sof_ipc_tx_message(sdev->ipc, pm_gate.hdr.cmd, &pm_gate,
- sizeof(pm_gate), &reply, sizeof(reply));
+ return sof_ipc_tx_message_no_pm(sdev->ipc, pm_gate.hdr.cmd,
+ &pm_gate, sizeof(pm_gate), &reply,
+ sizeof(reply));
}
-int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
- enum sof_d0_substate d0_substate)
+static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
{
struct hdac_bus *bus = sof_to_bus(sdev);
- u32 flags;
int ret;
- u8 value;
/* Write to D0I3C after Command-In-Progress bit is cleared */
ret = hda_dsp_wait_d0i3c_done(sdev);
@@ -354,7 +361,6 @@ int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
}
/* Update D0I3C register */
- value = d0_substate == SOF_DSP_D0I3 ? SOF_HDA_VS_D0I3C_I3 : 0;
snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value);
/* Wait for cmd in progress to be cleared before exiting the function */
@@ -367,20 +373,218 @@ int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
dev_vdbg(bus->dev, "D0I3C updated, register = 0x%x\n",
snd_hdac_chip_readb(bus, VS_D0I3C));
- if (d0_substate == SOF_DSP_D0I0)
- flags = HDA_PM_PPG;/* prevent power gating in D0 */
- else
- flags = HDA_PM_NO_DMA_TRACE;/* disable DMA trace in D0I3*/
+ return 0;
+}
- /* sending pm_gate IPC */
- ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
+static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ u32 flags = 0;
+ int ret;
+ u8 value = 0;
+
+ /*
+ * Sanity check for illegal state transitions
+ * The only allowed transitions are:
+ * 1. D3 -> D0I0
+ * 2. D0I0 -> D0I3
+ * 3. D0I3 -> D0I0
+ */
+ switch (sdev->dsp_power_state.state) {
+ case SOF_DSP_PM_D0:
+ /* Follow the sequence below for D0 substate transitions */
+ break;
+ case SOF_DSP_PM_D3:
+ /* Follow regular flow for D3 -> D0 transition */
+ return 0;
+ default:
+ dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
+ sdev->dsp_power_state.state, target_state->state);
+ return -EINVAL;
+ }
+
+ /* Set flags and register value for D0 target substate */
+ if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
+ value = SOF_HDA_VS_D0I3C_I3;
+
+ /*
+ * Trace DMA is disabled by default when the DSP enters D0I3.
+ * But it can be kept enabled when the DSP enters D0I3 while the
+ * system is in S0 for debug.
+ */
+ if (hda_enable_trace_D0I3_S0 &&
+ sdev->system_suspend_target != SOF_SUSPEND_NONE)
+ flags = HDA_PM_NO_DMA_TRACE;
+ } else {
+ /* prevent power gating in D0I0 */
+ flags = HDA_PM_PPG;
+ }
+
+ /* update D0I3C register */
+ ret = hda_dsp_update_d0i3c_register(sdev, value);
if (ret < 0)
+ return ret;
+
+ /*
+ * Notify the DSP of the state change.
+ * If this IPC fails, revert the D0I3C register update in order
+ * to prevent partial state change.
+ */
+ ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
+ if (ret < 0) {
dev_err(sdev->dev,
"error: PM_GATE ipc error %d\n", ret);
+ goto revert;
+ }
+
+ return ret;
+
+revert:
+ /* fallback to the previous register value */
+ value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
+
+ /*
+ * This can fail but return the IPC error to signal that
+ * the state change failed.
+ */
+ hda_dsp_update_d0i3c_register(sdev, value);
return ret;
}
+/* helper to log DSP state */
+static void hda_dsp_state_log(struct snd_sof_dev *sdev)
+{
+ switch (sdev->dsp_power_state.state) {
+ case SOF_DSP_PM_D0:
+ switch (sdev->dsp_power_state.substate) {
+ case SOF_HDA_DSP_PM_D0I0:
+ dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
+ break;
+ case SOF_HDA_DSP_PM_D0I3:
+ dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
+ sdev->dsp_power_state.substate);
+ break;
+ }
+ break;
+ case SOF_DSP_PM_D1:
+ dev_dbg(sdev->dev, "Current DSP power state: D1\n");
+ break;
+ case SOF_DSP_PM_D2:
+ dev_dbg(sdev->dev, "Current DSP power state: D2\n");
+ break;
+ case SOF_DSP_PM_D3_HOT:
+ dev_dbg(sdev->dev, "Current DSP power state: D3_HOT\n");
+ break;
+ case SOF_DSP_PM_D3:
+ dev_dbg(sdev->dev, "Current DSP power state: D3\n");
+ break;
+ case SOF_DSP_PM_D3_COLD:
+ dev_dbg(sdev->dev, "Current DSP power state: D3_COLD\n");
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
+ sdev->dsp_power_state.state);
+ break;
+ }
+}
+
+/*
+ * All DSP power state transitions are initiated by the driver.
+ * If the requested state change fails, the error is simply returned.
+ * Further state transitions are attempted only when the set_power_save() op
+ * is called again either because of a new IPC sent to the DSP or
+ * during system suspend/resume.
+ */
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
+{
+ int ret = 0;
+
+ /*
+ * When the DSP is already in D0I3 and the target state is D0I3,
+ * it could be the case that the DSP is in D0I3 during S0
+ * and the system is suspending to S0Ix. Therefore,
+ * hda_dsp_set_D0_state() must be called to disable trace DMA
+ * by sending the PM_GATE IPC to the FW.
+ */
+ if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
+ sdev->system_suspend_target == SOF_SUSPEND_S0IX)
+ goto set_state;
+
+ /*
+ * For all other cases, return without doing anything if
+ * the DSP is already in the target state.
+ */
+ if (target_state->state == sdev->dsp_power_state.state &&
+ target_state->substate == sdev->dsp_power_state.substate)
+ return 0;
+
+set_state:
+ switch (target_state->state) {
+ case SOF_DSP_PM_D0:
+ ret = hda_dsp_set_D0_state(sdev, target_state);
+ break;
+ case SOF_DSP_PM_D3:
+ /* The only allowed transition is: D0I0 -> D3 */
+ if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
+ sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
+ break;
+
+ dev_err(sdev->dev,
+ "error: transition from %d to %d not allowed\n",
+ sdev->dsp_power_state.state, target_state->state);
+ return -EINVAL;
+ default:
+ dev_err(sdev->dev, "error: target state unsupported %d\n",
+ target_state->state);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "failed to set requested target DSP state %d substate %d\n",
+ target_state->state, target_state->substate);
+ return ret;
+ }
+
+ sdev->dsp_power_state = *target_state;
+ hda_dsp_state_log(sdev);
+ return ret;
+}
+
+/*
+ * Audio DSP states may transform as below:-
+ *
+ * Opportunistic D0I3 in S0
+ * Runtime +---------------------+ Delayed D0i3 work timeout
+ * suspend | +--------------------+
+ * +------------+ D0I0(active) | |
+ * | | <---------------+ |
+ * | +--------> | New IPC | |
+ * | |Runtime +--^--+---------^--+--+ (via mailbox) | |
+ * | |resume | | | | | |
+ * | | | | | | | |
+ * | | System| | | | | |
+ * | | resume| | S3/S0IX | | | |
+ * | | | | suspend | | S0IX | |
+ * | | | | | |suspend | |
+ * | | | | | | | |
+ * | | | | | | | |
+ * +-v---+-----------+--v-------+ | | +------+----v----+
+ * | | | +-----------> |
+ * | D3 (suspended) | | | D0I3 |
+ * | | +--------------+ |
+ * | | System resume | |
+ * +----------------------------+ +----------------+
+ *
+ * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
+ * ignored the suspend trigger. Otherwise the DSP
+ * is in D3.
+ */
+
static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
@@ -390,6 +594,8 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
#endif
int ret;
+ hda_sdw_int_enable(sdev, false);
+
/* disable IPC interrupts */
hda_dsp_ipc_int_disable(sdev);
@@ -486,10 +692,24 @@ int hda_dsp_resume(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ .substate = SOF_HDA_DSP_PM_D0I0,
+ };
+ int ret;
- if (sdev->s0_suspend) {
+ /* resume from D0I3 */
+ if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
hda_codec_i915_display_power(sdev, true);
+ /* Set DSP power state */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
+ target_state.state, target_state.substate);
+ return ret;
+ }
+
/* restore L1SEN bit */
if (hda->l1_support_changed)
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
@@ -503,13 +723,26 @@ int hda_dsp_resume(struct snd_sof_dev *sdev)
}
/* init hda controller. DSP cores will be powered up during fw boot */
- return hda_resume(sdev, false);
+ ret = hda_resume(sdev, false);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
}
int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+ int ret;
+
/* init hda controller. DSP cores will be powered up during fw boot */
- return hda_resume(sdev, true);
+ ret = hda_resume(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
}
int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
@@ -527,21 +760,47 @@ int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D3,
+ };
+ int ret;
+
/* stop hda controller and power dsp off */
- return hda_suspend(sdev, true);
+ ret = hda_suspend(sdev, true);
+ if (ret < 0)
+ return ret;
+
+ return snd_sof_dsp_set_power_state(sdev, &target_state);
}
-int hda_dsp_suspend(struct snd_sof_dev *sdev)
+int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
struct hdac_bus *bus = sof_to_bus(sdev);
struct pci_dev *pci = to_pci_dev(sdev->dev);
+ const struct sof_dsp_power_state target_dsp_state = {
+ .state = target_state,
+ .substate = target_state == SOF_DSP_PM_D0 ?
+ SOF_HDA_DSP_PM_D0I3 : 0,
+ };
int ret;
- if (sdev->s0_suspend) {
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
+ if (target_state == SOF_DSP_PM_D0) {
/* we can't keep a wakeref to display driver at suspend */
hda_codec_i915_display_power(sdev, false);
+ /* Set DSP power state */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
+ target_dsp_state.state,
+ target_dsp_state.substate);
+ return ret;
+ }
+
/* enable L1SEN to make sure the system can enter S0Ix */
hda->l1_support_changed =
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
@@ -562,7 +821,7 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev)
return ret;
}
- return 0;
+ return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
}
int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
@@ -588,7 +847,7 @@ int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
*/
if (stream->link_substream) {
rtd = snd_pcm_substream_chip(stream->link_substream);
- name = rtd->codec_dai->component->name;
+ name = asoc_rtd_to_codec(rtd, 0)->component->name;
link = snd_hdac_ext_bus_get_link(bus, name);
if (!link)
return -EINVAL;
@@ -606,3 +865,33 @@ int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
#endif
return 0;
}
+
+void hda_dsp_d0i3_work(struct work_struct *work)
+{
+ struct sof_intel_hda_dev *hdev = container_of(work,
+ struct sof_intel_hda_dev,
+ d0i3_work.work);
+ struct hdac_bus *bus = &hdev->hbus.core;
+ struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
+ struct sof_dsp_power_state target_state;
+ int ret;
+
+ target_state.state = SOF_DSP_PM_D0;
+
+ /* DSP can enter D0I3 iff only D0I3-compatible streams are active */
+ if (snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
+ target_state.substate = SOF_HDA_DSP_PM_D0I3;
+ else
+ target_state.substate = SOF_HDA_DSP_PM_D0I0;
+
+ /* remain in D0I0 */
+ if (target_state.substate == SOF_HDA_DSP_PM_D0I0)
+ return;
+
+ /* This can fail but error cannot be propagated */
+ ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+ if (ret < 0)
+ dev_err_ratelimited(sdev->dev,
+ "error: failed to set DSP state %d substate %d\n",
+ target_state.state, target_state.substate);
+}
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
index 1837f66e361f..6062bb6011fb 100644
--- a/sound/soc/sof/intel/hda-ipc.c
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -106,7 +106,9 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
ret = reply.error;
} else {
/* reply correct size ? */
- if (reply.hdr.size != msg->reply_size) {
+ if (reply.hdr.size != msg->reply_size &&
+ /* getter payload is never known upfront */
+ !(reply.hdr.cmd & SOF_IPC_GLB_PROBE)) {
dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
msg->reply_size, reply.hdr.size);
ret = -EINVAL;
@@ -123,12 +125,6 @@ out:
}
-static bool hda_dsp_ipc_is_sof(uint32_t msg)
-{
- return (msg & (HDA_DSP_IPC_PURGE_FW | 0xf << 9)) != msg ||
- (msg & HDA_DSP_IPC_PURGE_FW) != HDA_DSP_IPC_PURGE_FW;
-}
-
/* IPC handler thread */
irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
{
@@ -174,17 +170,9 @@ irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
*/
spin_lock_irq(&sdev->ipc_lock);
- /* handle immediate reply from DSP core - ignore ROM messages */
- if (hda_dsp_ipc_is_sof(msg)) {
- hda_dsp_ipc_get_reply(sdev);
- snd_sof_ipc_reply(sdev, msg);
- }
-
- /* wake up sleeper if we are loading code */
- if (sdev->code_loading) {
- sdev->code_loading = 0;
- wake_up(&sdev->waitq);
- }
+ /* handle immediate reply from DSP core */
+ hda_dsp_ipc_get_reply(sdev);
+ snd_sof_ipc_reply(sdev, msg);
/* set the done bit */
hda_dsp_ipc_dsp_done(sdev);
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
index 8852184a2569..e1550ccd0a49 100644
--- a/sound/soc/sof/intel/hda-loader.c
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -131,6 +131,12 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, const void *fwdata,
goto err;
}
+ /* set DONE bit to clear the reply IPC message */
+ snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
+ chip->ipc_ack,
+ chip->ipc_ack_mask,
+ chip->ipc_ack_mask);
+
/* step 5: power down corex */
ret = hda_dsp_core_power_down(sdev,
chip->cores_mask & ~(HDA_DSP_CORE_MASK(0)));
@@ -173,9 +179,6 @@ static int cl_trigger(struct snd_sof_dev *sdev,
/* code loader is special case that reuses stream ops */
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- wait_event_timeout(sdev->waitq, !sdev->code_loading,
- HDA_DSP_CL_TRIGGER_TIMEOUT);
-
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
1 << hstream->index,
1 << hstream->index);
@@ -344,6 +347,24 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
}
/*
+ * When a SoundWire link is in clock stop state, a Slave
+ * device may trigger in-band wakes for events such as jack
+ * insertion or acoustic event detection. This event will lead
+ * to a WAKEEN interrupt, handled by the PCI device and routed
+ * to PME if the PCI device is in D3. The resume function in
+ * audio PCI driver will be invoked by ACPI for PME event and
+ * initialize the device and process WAKEEN interrupt.
+ *
+ * The WAKEEN interrupt should be processed ASAP to prevent an
+ * interrupt flood, otherwise other interrupts, such IPC,
+ * cannot work normally. The WAKEEN is handled after the ROM
+ * is initialized successfully, which ensures power rails are
+ * enabled before accessing the SoundWire SHIM registers
+ */
+ if (!sdev->first_boot)
+ hda_sdw_process_wakeen(sdev);
+
+ /*
* at this point DSP ROM has been initialized and
* should be ready for code loading and firmware boot
*/
@@ -396,6 +417,19 @@ int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
/* post fw run operations */
int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
{
+ int ret;
+
+ if (sdev->first_boot) {
+ ret = hda_sdw_startup(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: could not startup SoundWire links\n");
+ return ret;
+ }
+ }
+
+ hda_sdw_int_enable(sdev, true);
+
/* re-enable clock gating and power gating */
return hda_dsp_ctrl_clock_power_gating(sdev, true);
}
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
index 23872f6e708d..a46a6baa1c3f 100644
--- a/sound/soc/sof/intel/hda-pcm.c
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -27,7 +27,7 @@
#define SDnFMT_BITS(x) ((x) << 4)
#define SDnFMT_CHAN(x) ((x) << 0)
-static inline u32 get_mult_div(struct snd_sof_dev *sdev, int rate)
+u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate)
{
switch (rate) {
case 8000:
@@ -61,7 +61,7 @@ static inline u32 get_mult_div(struct snd_sof_dev *sdev, int rate)
}
};
-static inline u32 get_bits(struct snd_sof_dev *sdev, int sample_bits)
+u32 hda_dsp_get_bits(struct snd_sof_dev *sdev, int sample_bits)
{
switch (sample_bits) {
case 8:
@@ -95,8 +95,8 @@ int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
u32 size, rate, bits;
size = params_buffer_bytes(params);
- rate = get_mult_div(sdev, params_rate(params));
- bits = get_bits(sdev, params_width(params));
+ rate = hda_dsp_get_mult_div(sdev, params_rate(params));
+ bits = hda_dsp_get_bits(sdev, params_width(params));
hstream->substream = substream;
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index c0ab9bb2a797..5d386956906f 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -547,6 +547,8 @@ int hda_dsp_stream_hw_free(struct snd_sof_dev *sdev,
SOF_HDA_REG_PP_PPCTL, mask, 0);
spin_unlock_irq(&bus->reg_lock);
+ stream->substream = NULL;
+
return 0;
}
@@ -571,6 +573,22 @@ bool hda_dsp_check_stream_irq(struct snd_sof_dev *sdev)
return ret;
}
+static void
+hda_dsp_set_bytes_transferred(struct hdac_stream *hstream, u64 buffer_size)
+{
+ u64 prev_pos, pos, num_bytes;
+
+ div64_u64_rem(hstream->curr_pos, buffer_size, &prev_pos);
+ pos = snd_hdac_stream_get_pos_posbuf(hstream);
+
+ if (pos < prev_pos)
+ num_bytes = (buffer_size - prev_pos) + pos;
+ else
+ num_bytes = pos - prev_pos;
+
+ hstream->curr_pos += num_bytes;
+}
+
static bool hda_dsp_stream_check(struct hdac_bus *bus, u32 status)
{
struct sof_intel_hda_dev *sof_hda = bus_to_sof_hda(bus);
@@ -588,14 +606,19 @@ static bool hda_dsp_stream_check(struct hdac_bus *bus, u32 status)
snd_hdac_stream_writeb(s, SD_STS, sd_status);
active = true;
- if (!s->substream ||
+ if ((!s->substream && !s->cstream) ||
!s->running ||
(sd_status & SOF_HDA_CL_DMA_SD_INT_COMPLETE) == 0)
continue;
/* Inform ALSA only in case not do that with IPC */
- if (sof_hda->no_ipc_position)
+ if (s->substream && sof_hda->no_ipc_position) {
snd_sof_pcm_period_elapsed(s->substream);
+ } else if (s->cstream) {
+ hda_dsp_set_bytes_transferred(s,
+ s->cstream->runtime->buffer_size);
+ snd_compr_fragment_elapsed(s->cstream);
+ }
}
}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 25946a1c2822..211e91e79eae 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -18,10 +18,14 @@
#include <sound/hdaudio_ext.h>
#include <sound/hda_register.h>
+#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
#include <sound/intel-nhlt.h>
#include <sound/sof.h>
#include <sound/sof/xtensa.h>
+#include "../sof-audio.h"
#include "../ops.h"
#include "hda.h"
@@ -34,6 +38,235 @@
#define EXCEPT_MAX_HDR_SIZE 0x400
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+/*
+ * The default for SoundWire clock stop quirks is to power gate the IP
+ * and do a Bus Reset, this will need to be modified when the DSP
+ * needs to remain in D0i3 so that the Master does not lose context
+ * and enumeration is not required on clock restart
+ */
+static int sdw_clock_stop_quirks = SDW_INTEL_CLK_STOP_BUS_RESET;
+module_param(sdw_clock_stop_quirks, int, 0444);
+MODULE_PARM_DESC(sdw_clock_stop_quirks, "SOF SoundWire clock stop quirks");
+
+static int sdw_params_stream(struct device *dev,
+ struct sdw_intel_stream_params_data *params_data)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_soc_dai *d = params_data->dai;
+ struct sof_ipc_dai_config config;
+ struct sof_ipc_reply reply;
+ int link_id = params_data->link_id;
+ int alh_stream_id = params_data->alh_stream_id;
+ int ret;
+ u32 size = sizeof(config);
+
+ memset(&config, 0, size);
+ config.hdr.size = size;
+ config.hdr.cmd = SOF_IPC_GLB_DAI_MSG | SOF_IPC_DAI_CONFIG;
+ config.type = SOF_DAI_INTEL_ALH;
+ config.dai_index = (link_id << 8) | (d->id);
+ config.alh.stream_id = alh_stream_id;
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config.hdr.cmd, &config, size, &reply,
+ sizeof(reply));
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to set DAI hw_params for link %d dai->id %d ALH %d\n",
+ link_id, d->id, alh_stream_id);
+ }
+
+ return ret;
+}
+
+static int sdw_free_stream(struct device *dev,
+ struct sdw_intel_stream_free_data *free_data)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_soc_dai *d = free_data->dai;
+ struct sof_ipc_dai_config config;
+ struct sof_ipc_reply reply;
+ int link_id = free_data->link_id;
+ int ret;
+ u32 size = sizeof(config);
+
+ memset(&config, 0, size);
+ config.hdr.size = size;
+ config.hdr.cmd = SOF_IPC_GLB_DAI_MSG | SOF_IPC_DAI_CONFIG;
+ config.type = SOF_DAI_INTEL_ALH;
+ config.dai_index = (link_id << 8) | d->id;
+ config.alh.stream_id = 0xFFFF; /* invalid value on purpose */
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config.hdr.cmd, &config, size, &reply,
+ sizeof(reply));
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: failed to free stream for link %d dai->id %d\n",
+ link_id, d->id);
+ }
+
+ return ret;
+}
+
+static const struct sdw_intel_ops sdw_callback = {
+ .params_stream = sdw_params_stream,
+ .free_stream = sdw_free_stream,
+};
+
+void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+ sdw_intel_enable_irq(sdev->bar[HDA_DSP_BAR], enable);
+}
+
+static int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ acpi_handle handle;
+ int ret;
+
+ handle = ACPI_HANDLE(sdev->dev);
+
+ /* save ACPI info for the probe step */
+ hdev = sdev->pdata->hw_pdata;
+
+ ret = sdw_intel_acpi_scan(handle, &hdev->info);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s failed\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hda_sdw_probe(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ struct sdw_intel_res res;
+ void *sdw;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ memset(&res, 0, sizeof(res));
+
+ res.mmio_base = sdev->bar[HDA_DSP_BAR];
+ res.irq = sdev->ipc_irq;
+ res.handle = hdev->info.handle;
+ res.parent = sdev->dev;
+ res.ops = &sdw_callback;
+ res.dev = sdev->dev;
+ res.clock_stop_quirks = sdw_clock_stop_quirks;
+
+ /*
+ * ops and arg fields are not populated for now,
+ * they will be needed when the DAI callbacks are
+ * provided
+ */
+
+ /* we could filter links here if needed, e.g for quirks */
+ res.count = hdev->info.count;
+ res.link_mask = hdev->info.link_mask;
+
+ sdw = sdw_intel_probe(&res);
+ if (!sdw) {
+ dev_err(sdev->dev, "error: SoundWire probe failed\n");
+ return -EINVAL;
+ }
+
+ /* save context */
+ hdev->sdw = sdw;
+
+ return 0;
+}
+
+int hda_sdw_startup(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return 0;
+
+ return sdw_intel_startup(hdev->sdw);
+}
+
+static int hda_sdw_exit(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ hda_sdw_int_enable(sdev, false);
+
+ if (hdev->sdw)
+ sdw_intel_exit(hdev->sdw);
+ hdev->sdw = NULL;
+
+ return 0;
+}
+
+static bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+ bool ret = false;
+ u32 irq_status;
+
+ hdev = sdev->pdata->hw_pdata;
+
+ if (!hdev->sdw)
+ return ret;
+
+ /* store status */
+ irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS2);
+
+ /* invalid message ? */
+ if (irq_status == 0xffffffff)
+ goto out;
+
+ /* SDW message ? */
+ if (irq_status & HDA_DSP_REG_ADSPIS2_SNDW)
+ ret = true;
+
+out:
+ return ret;
+}
+
+static irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
+{
+ return sdw_intel_thread(irq, context);
+}
+
+static bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+ if (hdev->sdw &&
+ snd_sof_dsp_read(sdev, HDA_DSP_BAR,
+ HDA_DSP_REG_SNDW_WAKE_STS))
+ return true;
+
+ return false;
+}
+
+void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
+{
+ struct sof_intel_hda_dev *hdev;
+
+ hdev = sdev->pdata->hw_pdata;
+ if (!hdev->sdw)
+ return;
+
+ sdw_intel_process_wakeen_event(hdev->sdw);
+}
+
+#endif
+
/*
* Debug
*/
@@ -54,8 +287,7 @@ static int hda_dmic_num = -1;
module_param_named(dmic_num, hda_dmic_num, int, 0444);
MODULE_PARM_DESC(dmic_num, "SOF HDA DMIC number");
-static bool hda_codec_use_common_hdmi =
- IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_COMMON_HDMI_CODEC);
+static bool hda_codec_use_common_hdmi = IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI);
module_param_named(use_common_hdmi, hda_codec_use_common_hdmi, bool, 0444);
MODULE_PARM_DESC(use_common_hdmi, "SOF HDA use common HDMI codec driver");
#endif
@@ -288,10 +520,8 @@ static int hda_init(struct snd_sof_dev *sdev)
/* init i915 and HDMI codecs */
ret = hda_codec_i915_init(sdev);
- if (ret < 0) {
- dev_err(sdev->dev, "error: init i915 and HDMI codec failed\n");
- return ret;
- }
+ if (ret < 0)
+ dev_warn(sdev->dev, "init of i915 and HDMI codec failed\n");
/* get controller capabilities */
ret = hda_dsp_ctrl_get_caps(sdev);
@@ -349,9 +579,12 @@ static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
static int hda_init_caps(struct snd_sof_dev *sdev)
{
struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
struct hdac_ext_link *hlink;
#endif
+ struct sof_intel_hda_dev *hdev = pdata->hw_pdata;
+ u32 link_mask;
int ret = 0;
device_disable_async_suspend(bus->dev);
@@ -365,12 +598,37 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
if (ret < 0) {
dev_err(bus->dev, "error: init chip failed with ret: %d\n",
ret);
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
- hda_codec_i915_exit(sdev);
-#endif
return ret;
}
+ /* scan SoundWire capabilities exposed by DSDT */
+ ret = hda_sdw_acpi_scan(sdev);
+ if (ret < 0) {
+ dev_dbg(sdev->dev, "skipping SoundWire, ACPI scan error\n");
+ goto skip_soundwire;
+ }
+
+ link_mask = hdev->info.link_mask;
+ if (!link_mask) {
+ dev_dbg(sdev->dev, "skipping SoundWire, no links enabled\n");
+ goto skip_soundwire;
+ }
+
+ /*
+ * probe/allocate SoundWire resources.
+ * The hardware configuration takes place in hda_sdw_startup
+ * after power rails are enabled.
+ * It's entirely possible to have a mix of I2S/DMIC/SoundWire
+ * devices, so we allocate the resources in all cases.
+ */
+ ret = hda_sdw_probe(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: SoundWire probe error\n");
+ return ret;
+ }
+
+skip_soundwire:
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(bus);
@@ -379,7 +637,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
hda_codec_probe_bus(sdev, hda_codec_use_common_hdmi);
if (!HDA_IDISP_CODEC(bus->codec_mask))
- hda_codec_i915_exit(sdev);
+ hda_codec_i915_display_power(sdev, false);
/*
* we are done probing so decrement link counts
@@ -427,6 +685,7 @@ static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
/* deal with streams and controller first */
if (hda_dsp_check_stream_irq(sdev))
@@ -435,6 +694,12 @@ static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
if (hda_dsp_check_ipc_irq(sdev))
sof_ops(sdev)->irq_thread(irq, sdev);
+ if (hda_dsp_check_sdw_irq(sdev))
+ hda_dsp_sdw_thread(irq, hdev->sdw);
+
+ if (hda_sdw_check_wakeen_irq(sdev))
+ hda_sdw_process_wakeen(sdev);
+
/* enable GIE interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
SOF_HDA_INTCTL,
@@ -590,12 +855,11 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
hda_dsp_ctrl_ppcap_enable(sdev, true);
hda_dsp_ctrl_ppcap_int_enable(sdev, true);
- /* initialize waitq for code loading */
- init_waitqueue_head(&sdev->waitq);
-
/* set default mailbox offset for FW ready message */
sdev->dsp_box.offset = HDA_DSP_MBOX_UPLINK_OFFSET;
+ INIT_DELAYED_WORK(&hdev->d0i3_work, hda_dsp_d0i3_work);
+
return 0;
free_ipc_irq:
@@ -621,11 +885,16 @@ int hda_dsp_remove(struct snd_sof_dev *sdev)
struct pci_dev *pci = to_pci_dev(sdev->dev);
const struct sof_intel_dsp_desc *chip = hda->desc;
+ /* cancel any attempt for DSP D0I3 */
+ cancel_delayed_work_sync(&hda->d0i3_work);
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
/* codec removal, invoke bus_device_remove */
snd_hdac_ext_bus_device_remove(bus);
#endif
+ hda_sdw_exit(sdev);
+
if (!IS_ERR_OR_NULL(hda->dmic_dev))
platform_device_unregister(hda->dmic_dev);
@@ -694,12 +963,11 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev)
/*
* If no machine driver is found, then:
*
- * hda machine driver is used if :
- * 1. there is one HDMI codec and one external HDAudio codec
- * 2. only HDMI codec
+ * generic hda machine driver can handle:
+ * - one HDMI codec, and/or
+ * - one external HDAudio codec
*/
- if (!pdata->machine && codec_num <= 2 &&
- HDA_IDISP_CODEC(bus->codec_mask)) {
+ if (!pdata->machine && codec_num <= 2) {
hda_mach = snd_soc_acpi_intel_hda_machines;
/* topology: use the info from hda_machines */
@@ -709,7 +977,7 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev)
dev_info(bus->dev, "using HDA machine driver %s now\n",
hda_mach->drv_name);
- if (codec_num == 1)
+ if (codec_num == 1 && HDA_IDISP_CODEC(bus->codec_mask))
idisp_str = "-idisp";
else
idisp_str = "";
@@ -763,6 +1031,123 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev)
}
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+/* Check if all Slaves defined on the link can be found */
+static bool link_slaves_found(struct snd_sof_dev *sdev,
+ const struct snd_soc_acpi_link_adr *link,
+ struct sdw_intel_ctx *sdw)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct sdw_intel_slave_id *ids = sdw->ids;
+ int num_slaves = sdw->num_slaves;
+ unsigned int part_id, link_id, unique_id, mfg_id;
+ int i, j;
+
+ for (i = 0; i < link->num_adr; i++) {
+ u64 adr = link->adr_d[i].adr;
+
+ mfg_id = SDW_MFG_ID(adr);
+ part_id = SDW_PART_ID(adr);
+ link_id = SDW_DISCO_LINK_ID(adr);
+ for (j = 0; j < num_slaves; j++) {
+ if (ids[j].link_id != link_id ||
+ ids[j].id.part_id != part_id ||
+ ids[j].id.mfg_id != mfg_id)
+ continue;
+ /*
+ * we have to check unique id
+ * if there is more than one
+ * Slave on the link
+ */
+ unique_id = SDW_UNIQUE_ID(adr);
+ if (link->num_adr == 1 ||
+ ids[j].id.unique_id == SDW_IGNORED_UNIQUE_ID ||
+ ids[j].id.unique_id == unique_id) {
+ dev_dbg(bus->dev,
+ "found %x at link %d\n",
+ part_id, link_id);
+ break;
+ }
+ }
+ if (j == num_slaves) {
+ dev_dbg(bus->dev,
+ "Slave %x not found\n",
+ part_id);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int hda_sdw_machine_select(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ const struct snd_soc_acpi_link_adr *link;
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct snd_soc_acpi_mach *mach;
+ struct sof_intel_hda_dev *hdev;
+ u32 link_mask;
+ int i;
+
+ hdev = pdata->hw_pdata;
+ link_mask = hdev->info.link_mask;
+
+ /*
+ * Select SoundWire machine driver if needed using the
+ * alternate tables. This case deals with SoundWire-only
+ * machines, for mixed cases with I2C/I2S the detection relies
+ * on the HID list.
+ */
+ if (link_mask && !pdata->machine) {
+ for (mach = pdata->desc->alt_machines;
+ mach && mach->link_mask; mach++) {
+ if (mach->link_mask != link_mask)
+ continue;
+
+ /* No need to match adr if there is no links defined */
+ if (!mach->links)
+ break;
+
+ link = mach->links;
+ for (i = 0; i < hdev->info.count && link->num_adr;
+ i++, link++) {
+ /*
+ * Try next machine if any expected Slaves
+ * are not found on this link.
+ */
+ if (!link_slaves_found(sdev, link, hdev->sdw))
+ break;
+ }
+ /* Found if all Slaves are checked */
+ if (i == hdev->info.count || !link->num_adr)
+ break;
+ }
+ if (mach && mach->link_mask) {
+ dev_dbg(bus->dev,
+ "SoundWire machine driver %s topology %s\n",
+ mach->drv_name,
+ mach->sof_tplg_filename);
+ pdata->machine = mach;
+ mach->mach_params.links = mach->links;
+ mach->mach_params.link_mask = mach->link_mask;
+ mach->mach_params.platform = dev_name(sdev->dev);
+ pdata->fw_filename = mach->sof_fw_filename;
+ pdata->tplg_filename = mach->sof_tplg_filename;
+ } else {
+ dev_info(sdev->dev,
+ "No SoundWire machine driver found\n");
+ }
+ }
+
+ return 0;
+}
+#else
+static int hda_sdw_machine_select(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+#endif
+
void hda_set_mach_params(const struct snd_soc_acpi_mach *mach,
struct device *dev)
{
@@ -782,9 +1167,19 @@ void hda_machine_select(struct snd_sof_dev *sdev)
if (mach) {
sof_pdata->tplg_filename = mach->sof_tplg_filename;
sof_pdata->machine = mach;
+
+ if (mach->link_mask) {
+ mach->mach_params.links = mach->links;
+ mach->mach_params.link_mask = mach->link_mask;
+ }
}
/*
+ * If I2S fails, try SoundWire
+ */
+ hda_sdw_machine_select(sdev);
+
+ /*
* Choose HDA generic machine driver if mach is NULL.
* Otherwise, set certain mach params.
*/
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index 6191d9192fae..e9825798de77 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -11,6 +11,9 @@
#ifndef __SOF_INTEL_HDA_H
#define __SOF_INTEL_HDA_H
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include <sound/compress_driver.h>
#include <sound/hda_codec.h>
#include <sound/hdaudio_ext.h>
#include "shim.h"
@@ -174,7 +177,6 @@
* value cannot be read back within the specified time.
*/
#define HDA_DSP_STREAM_RUN_TIMEOUT 300
-#define HDA_DSP_CL_TRIGGER_TIMEOUT 300
#define HDA_DSP_SPIB_ENABLE 1
#define HDA_DSP_SPIB_DISABLE 0
@@ -230,6 +232,9 @@
#define HDA_DSP_REG_ADSPIC2 (HDA_DSP_GEN_BASE + 0x10)
#define HDA_DSP_REG_ADSPIS2 (HDA_DSP_GEN_BASE + 0x14)
+#define HDA_DSP_REG_ADSPIS2_SNDW BIT(5)
+#define HDA_DSP_REG_SNDW_WAKE_STS 0x2C192
+
/* Intel HD Audio Inter-Processor Communication Registers */
#define HDA_DSP_IPC_BASE 0x40
#define HDA_DSP_REG_HIPCT (HDA_DSP_IPC_BASE + 0x00)
@@ -348,7 +353,13 @@
/* Number of DAIs */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+#define SOF_SKL_NUM_DAIS 16
+#else
#define SOF_SKL_NUM_DAIS 15
+#endif
+
#else
#define SOF_SKL_NUM_DAIS 8
#endif
@@ -392,6 +403,19 @@ struct sof_intel_dsp_bdl {
#define SOF_HDA_PLAYBACK 0
#define SOF_HDA_CAPTURE 1
+/*
+ * Time in ms for opportunistic D0I3 entry delay.
+ * This has been deliberately chosen to be long to avoid race conditions.
+ * Could be optimized in future.
+ */
+#define SOF_HDA_D0I3_WORK_DELAY_MS 5000
+
+/* HDA DSP D0 substate */
+enum sof_hda_D0_substate {
+ SOF_HDA_DSP_PM_D0I0, /* default D0 substate */
+ SOF_HDA_DSP_PM_D0I3, /* low power D0 substate */
+};
+
/* represents DSP HDA controller frontend - i.e. host facing control */
struct sof_intel_hda_dev {
@@ -414,6 +438,15 @@ struct sof_intel_hda_dev {
/* DMIC device */
struct platform_device *dmic_dev;
+
+ /* delayed work to enter D0I3 opportunistically */
+ struct delayed_work d0i3_work;
+
+ /* ACPI information stored between scan and probe steps */
+ struct sdw_intel_acpi_info info;
+
+ /* sdw context allocated by SoundWire driver */
+ struct sdw_intel_ctx *sdw;
};
static inline struct hdac_bus *sof_to_bus(struct snd_sof_dev *s)
@@ -469,9 +502,9 @@ void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev);
void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev);
int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
- enum sof_d0_substate d0_substate);
+ const struct sof_dsp_power_state *target_state);
-int hda_dsp_suspend(struct snd_sof_dev *sdev);
+int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state);
int hda_dsp_resume(struct snd_sof_dev *sdev);
int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
@@ -481,10 +514,13 @@ void hda_dsp_dump_skl(struct snd_sof_dev *sdev, u32 flags);
void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
void hda_ipc_dump(struct snd_sof_dev *sdev);
void hda_ipc_irq_dump(struct snd_sof_dev *sdev);
+void hda_dsp_d0i3_work(struct work_struct *work);
/*
* DSP PCM Operations.
*/
+u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate);
+u32 hda_dsp_get_bits(struct snd_sof_dev *sdev, int sample_bits);
int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream);
int hda_dsp_pcm_close(struct snd_sof_dev *sdev,
@@ -533,6 +569,29 @@ int hda_ipc_pcm_params(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
const struct sof_ipc_pcm_params_reply *reply);
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_PROBES)
+/*
+ * Probe Compress Operations.
+ */
+int hda_probe_compr_assign(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+int hda_probe_compr_free(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai);
+int hda_probe_compr_set_params(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai);
+int hda_probe_compr_trigger(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai);
+int hda_probe_compr_pointer(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai);
+#endif
+
/*
* DSP IPC Operations.
*/
@@ -606,6 +665,61 @@ int hda_dsp_trace_init(struct snd_sof_dev *sdev, u32 *stream_tag);
int hda_dsp_trace_release(struct snd_sof_dev *sdev);
int hda_dsp_trace_trigger(struct snd_sof_dev *sdev, int cmd);
+/*
+ * SoundWire support
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+
+int hda_sdw_startup(struct snd_sof_dev *sdev);
+void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable);
+void hda_sdw_process_wakeen(struct snd_sof_dev *sdev);
+
+#else
+
+static inline int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_probe(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_startup(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline int hda_sdw_exit(struct snd_sof_dev *sdev)
+{
+ return 0;
+}
+
+static inline void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
+{
+}
+
+static inline bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+static inline irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
+{
+ return IRQ_HANDLED;
+}
+
+static inline bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
+{
+ return false;
+}
+
+static inline void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
+{
+}
+#endif
+
/* common dai driver */
extern struct snd_soc_dai_driver skl_dai[];
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index 78aa1da7c7a9..1c6794918cbb 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -214,15 +214,17 @@ static int tx_wait_done(struct snd_sof_ipc *ipc, struct snd_sof_ipc_msg *msg,
snd_sof_handle_fw_exception(ipc->sdev);
ret = -ETIMEDOUT;
} else {
- /* copy the data returned from DSP */
ret = msg->reply_error;
- if (msg->reply_size)
- memcpy(reply_data, msg->reply_data, msg->reply_size);
- if (ret < 0)
+ if (ret < 0) {
dev_err(sdev->dev, "error: ipc error for 0x%x size %zu\n",
hdr->cmd, msg->reply_size);
- else
+ } else {
ipc_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd);
+ if (msg->reply_size)
+ /* copy the data returned from DSP */
+ memcpy(reply_data, msg->reply_data,
+ msg->reply_size);
+ }
}
return ret;
@@ -268,7 +270,6 @@ static int sof_ipc_tx_message_unlocked(struct snd_sof_ipc *ipc, u32 header,
spin_unlock_irq(&sdev->ipc_lock);
if (ret < 0) {
- /* So far IPC TX never fails, consider making the above void */
dev_err_ratelimited(sdev->dev,
"error: ipc tx failed with error %d\n",
ret);
@@ -289,6 +290,32 @@ int sof_ipc_tx_message(struct snd_sof_ipc *ipc, u32 header,
void *msg_data, size_t msg_bytes, void *reply_data,
size_t reply_bytes)
{
+ const struct sof_dsp_power_state target_state = {
+ .state = SOF_DSP_PM_D0,
+ };
+ int ret;
+
+ /* ensure the DSP is in D0 before sending a new IPC */
+ ret = snd_sof_dsp_set_power_state(ipc->sdev, &target_state);
+ if (ret < 0) {
+ dev_err(ipc->sdev->dev, "error: resuming DSP %d\n", ret);
+ return ret;
+ }
+
+ return sof_ipc_tx_message_no_pm(ipc, header, msg_data, msg_bytes,
+ reply_data, reply_bytes);
+}
+EXPORT_SYMBOL(sof_ipc_tx_message);
+
+/*
+ * send IPC message from host to DSP without modifying the DSP state.
+ * This will be used for IPC's that can be handled by the DSP
+ * even in a low-power D0 substate.
+ */
+int sof_ipc_tx_message_no_pm(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes)
+{
int ret;
if (msg_bytes > SOF_IPC_MSG_MAX_SIZE ||
@@ -305,7 +332,7 @@ int sof_ipc_tx_message(struct snd_sof_ipc *ipc, u32 header,
return ret;
}
-EXPORT_SYMBOL(sof_ipc_tx_message);
+EXPORT_SYMBOL(sof_ipc_tx_message_no_pm);
/* handle reply message from DSP */
int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
index fc4ab51bacf4..1f2e0be812bd 100644
--- a/sound/soc/sof/loader.c
+++ b/sound/soc/sof/loader.c
@@ -95,9 +95,6 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset)
/* process structure data */
switch (ext_hdr->type) {
- case SOF_IPC_EXT_DMA_BUFFER:
- ret = 0;
- break;
case SOF_IPC_EXT_WINDOW:
ret = get_ext_windows(sdev, ext_hdr);
break;
@@ -469,9 +466,6 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
const char *fw_filename;
int ret;
- /* set code loading condition to true */
- sdev->code_loading = 1;
-
/* Don't request firmware again if firmware is already requested */
if (plat_data->fw)
return 0;
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
index e929a6e0058f..a771500ac442 100644
--- a/sound/soc/sof/ops.h
+++ b/sound/soc/sof/ops.h
@@ -146,10 +146,11 @@ static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev)
return 0;
}
-static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev)
+static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev,
+ u32 target_state)
{
if (sof_ops(sdev)->suspend)
- return sof_ops(sdev)->suspend(sdev);
+ return sof_ops(sdev)->suspend(sdev, target_state);
return 0;
}
@@ -193,14 +194,15 @@ static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
return 0;
}
-static inline int snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
- enum sof_d0_substate substate)
+static inline int
+snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
+ const struct sof_dsp_power_state *target_state)
{
if (sof_ops(sdev)->set_power_state)
- return sof_ops(sdev)->set_power_state(sdev, substate);
+ return sof_ops(sdev)->set_power_state(sdev, target_state);
- /* D0 substate is not supported */
- return -ENOTSUPP;
+ /* D0 substate is not supported, do nothing here. */
+ return 0;
}
/* debug */
@@ -391,6 +393,49 @@ snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev,
return 0;
}
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+static inline int
+snd_sof_probe_compr_assign(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, struct snd_soc_dai *dai)
+{
+ return sof_ops(sdev)->probe_assign(sdev, cstream, dai);
+}
+
+static inline int
+snd_sof_probe_compr_free(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, struct snd_soc_dai *dai)
+{
+ return sof_ops(sdev)->probe_free(sdev, cstream, dai);
+}
+
+static inline int
+snd_sof_probe_compr_set_params(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params, struct snd_soc_dai *dai)
+{
+ return sof_ops(sdev)->probe_set_params(sdev, cstream, params, dai);
+}
+
+static inline int
+snd_sof_probe_compr_trigger(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ return sof_ops(sdev)->probe_trigger(sdev, cstream, cmd, dai);
+}
+
+static inline int
+snd_sof_probe_compr_pointer(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai)
+{
+ if (sof_ops(sdev) && sof_ops(sdev)->probe_pointer)
+ return sof_ops(sdev)->probe_pointer(sdev, cstream, tstamp, dai);
+
+ return 0;
+}
+#endif
+
/* machine driver */
static inline int
snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata)
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
index 29435ba2d329..47cd741f2a8c 100644
--- a/sound/soc/sof/pcm.c
+++ b/sound/soc/sof/pcm.c
@@ -16,6 +16,9 @@
#include "sof-priv.h"
#include "sof-audio.h"
#include "ops.h"
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+#include "compress.h"
+#endif
/* Create DMA buffer page table for DSP */
static int create_page_table(struct snd_soc_component *component,
@@ -54,7 +57,7 @@ static int sof_pcm_dsp_params(struct snd_sof_pcm *spcm, struct snd_pcm_substream
/*
* sof pcm period elapse work
*/
-static void sof_pcm_period_elapsed_work(struct work_struct *work)
+void snd_sof_pcm_period_elapsed_work(struct work_struct *work)
{
struct snd_sof_pcm_stream *sps =
container_of(work, struct snd_sof_pcm_stream,
@@ -372,7 +375,7 @@ static int sof_pcm_trigger(struct snd_soc_component *component,
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_START;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
- if (sdev->s0_suspend &&
+ if (sdev->system_suspend_target == SOF_SUSPEND_S0IX &&
spcm->stream[substream->stream].d0i3_compatible) {
/*
* trap the event, not sending trigger stop to
@@ -472,8 +475,6 @@ static int sof_pcm_open(struct snd_soc_component *component,
dev_dbg(component->dev, "pcm: open stream %d dir %d\n",
spcm->pcm.pcm_id, substream->stream);
- INIT_WORK(&spcm->stream[substream->stream].period_elapsed_work,
- sof_pcm_period_elapsed_work);
caps = &spcm->pcm.caps[substream->stream];
@@ -598,8 +599,7 @@ static int sof_pcm_new(struct snd_soc_component *component,
snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
- le32_to_cpu(caps->buffer_size_min),
- le32_to_cpu(caps->buffer_size_max));
+ 0, le32_to_cpu(caps->buffer_size_max));
capture:
stream = SNDRV_PCM_STREAM_CAPTURE;
@@ -621,8 +621,7 @@ capture:
snd_pcm_set_managed_buffer(pcm->streams[stream].substream,
SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
- le32_to_cpu(caps->buffer_size_min),
- le32_to_cpu(caps->buffer_size_max));
+ 0, le32_to_cpu(caps->buffer_size_max));
return 0;
}
@@ -788,6 +787,10 @@ void snd_sof_new_platform_drv(struct snd_sof_dev *sdev)
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMPRESS)
pd->compr_ops = &sof_compressed_ops;
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ /* override cops when probe support is enabled */
+ pd->compr_ops = &sof_probe_compressed_ops;
+#endif
pd->pcm_construct = sof_pcm_new;
pd->ignore_machine = drv_name;
pd->be_hw_params_fixup = sof_pcm_dai_link_fixup;
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
index a0cde053b61a..c410822d9920 100644
--- a/sound/soc/sof/pm.c
+++ b/sound/soc/sof/pm.c
@@ -12,6 +12,42 @@
#include "sof-priv.h"
#include "sof-audio.h"
+/*
+ * Helper function to determine the target DSP state during
+ * system suspend. This function only cares about the device
+ * D-states. Platform-specific substates, if any, should be
+ * handled by the platform-specific parts.
+ */
+static u32 snd_sof_dsp_power_target(struct snd_sof_dev *sdev)
+{
+ u32 target_dsp_state;
+
+ switch (sdev->system_suspend_target) {
+ case SOF_SUSPEND_S3:
+ /* DSP should be in D3 if the system is suspending to S3 */
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ case SOF_SUSPEND_S0IX:
+ /*
+ * Currently, the only criterion for retaining the DSP in D0
+ * is that there are streams that ignored the suspend trigger.
+ * Additional criteria such Soundwire clock-stop mode and
+ * device suspend latency considerations will be added later.
+ */
+ if (snd_sof_stream_suspend_ignored(sdev))
+ target_dsp_state = SOF_DSP_PM_D0;
+ else
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ default:
+ /* This case would be during runtime suspend */
+ target_dsp_state = SOF_DSP_PM_D3;
+ break;
+ }
+
+ return target_dsp_state;
+}
+
static int sof_send_pm_ctx_ipc(struct snd_sof_dev *sdev, int cmd)
{
struct sof_ipc_pm_ctx pm_ctx;
@@ -50,6 +86,7 @@ static void sof_cache_debugfs(struct snd_sof_dev *sdev)
static int sof_resume(struct device *dev, bool runtime_resume)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ u32 old_state = sdev->dsp_power_state.state;
int ret;
/* do nothing if dsp resume callbacks are not set */
@@ -74,6 +111,10 @@ static int sof_resume(struct device *dev, bool runtime_resume)
return ret;
}
+ /* Nothing further to do if resuming from a low-power D0 substate */
+ if (!runtime_resume && old_state == SOF_DSP_PM_D0)
+ return 0;
+
sdev->fw_state = SOF_FW_BOOT_PREPARE;
/* load the firmware */
@@ -124,15 +165,13 @@ static int sof_resume(struct device *dev, bool runtime_resume)
"error: ctx_restore ipc error during resume %d\n",
ret);
- /* initialize default D0 sub-state */
- sdev->d0_substate = SOF_DSP_D0I0;
-
return ret;
}
static int sof_suspend(struct device *dev, bool runtime_suspend)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ u32 target_state = 0;
int ret;
/* do nothing if dsp suspend callback is not set */
@@ -140,10 +179,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
return 0;
if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
- goto power_down;
-
- /* release trace */
- snd_sof_release_trace(sdev);
+ goto suspend;
/* set restore_stream for all streams during system suspend */
if (!runtime_suspend) {
@@ -156,6 +192,15 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
}
}
+ target_state = snd_sof_dsp_power_target(sdev);
+
+ /* Skip to platform-specific suspend if DSP is entering D0 */
+ if (target_state == SOF_DSP_PM_D0)
+ goto suspend;
+
+ /* release trace */
+ snd_sof_release_trace(sdev);
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
/* cache debugfs contents during runtime suspend */
if (runtime_suspend)
@@ -179,22 +224,26 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
ret);
}
-power_down:
+suspend:
/* return if the DSP was not probed successfully */
if (sdev->fw_state == SOF_FW_BOOT_NOT_STARTED)
return 0;
- /* power down all DSP cores */
+ /* platform-specific suspend */
if (runtime_suspend)
ret = snd_sof_dsp_runtime_suspend(sdev);
else
- ret = snd_sof_dsp_suspend(sdev);
+ ret = snd_sof_dsp_suspend(sdev, target_state);
if (ret < 0)
dev_err(sdev->dev,
"error: failed to power down DSP during suspend %d\n",
ret);
+ /* Do not reset FW state if DSP is in D0 */
+ if (target_state == SOF_DSP_PM_D0)
+ return ret;
+
/* reset FW state */
sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
@@ -221,112 +270,14 @@ int snd_sof_runtime_resume(struct device *dev)
}
EXPORT_SYMBOL(snd_sof_runtime_resume);
-int snd_sof_set_d0_substate(struct snd_sof_dev *sdev,
- enum sof_d0_substate d0_substate)
-{
- int ret;
-
- if (sdev->d0_substate == d0_substate)
- return 0;
-
- /* do platform specific set_state */
- ret = snd_sof_dsp_set_power_state(sdev, d0_substate);
- if (ret < 0)
- return ret;
-
- /* update dsp D0 sub-state */
- sdev->d0_substate = d0_substate;
-
- return 0;
-}
-EXPORT_SYMBOL(snd_sof_set_d0_substate);
-
-/*
- * Audio DSP states may transform as below:-
- *
- * D0I3 compatible stream
- * Runtime +---------------------+ opened only, timeout
- * suspend | +--------------------+
- * +------------+ D0(active) | |
- * | | <---------------+ |
- * | +--------> | | |
- * | |Runtime +--^--+---------^--+--+ The last | |
- * | |resume | | | | opened D0I3 | |
- * | | | | | | compatible | |
- * | | resume| | | | stream closed | |
- * | | from | | D3 | | | |
- * | | D3 | |suspend | | d0i3 | |
- * | | | | | |suspend | |
- * | | | | | | | |
- * | | | | | | | |
- * +-v---+-----------+--v-------+ | | +------+----v----+
- * | | | +-----------> |
- * | D3 (suspended) | | | D0I3 +-----+
- * | | +--------------+ | |
- * | | resume from | | |
- * +-------------------^--------+ d0i3 suspend +----------------+ |
- * | |
- * | D3 suspend |
- * +------------------------------------------------+
- *
- * d0i3_suspend = s0_suspend && D0I3 stream opened,
- * D3 suspend = !d0i3_suspend,
- */
-
int snd_sof_resume(struct device *dev)
{
- struct snd_sof_dev *sdev = dev_get_drvdata(dev);
- int ret;
-
- if (snd_sof_dsp_d0i3_on_suspend(sdev)) {
- /* resume from D0I3 */
- dev_dbg(sdev->dev, "DSP will exit from D0i3...\n");
- ret = snd_sof_set_d0_substate(sdev, SOF_DSP_D0I0);
- if (ret == -ENOTSUPP) {
- /* fallback to resume from D3 */
- dev_dbg(sdev->dev, "D0i3 not supported, fall back to resume from D3...\n");
- goto d3_resume;
- } else if (ret < 0) {
- dev_err(sdev->dev, "error: failed to exit from D0I3 %d\n",
- ret);
- return ret;
- }
-
- /* platform-specific resume from D0i3 */
- return snd_sof_dsp_resume(sdev);
- }
-
-d3_resume:
- /* resume from D3 */
return sof_resume(dev, false);
}
EXPORT_SYMBOL(snd_sof_resume);
int snd_sof_suspend(struct device *dev)
{
- struct snd_sof_dev *sdev = dev_get_drvdata(dev);
- int ret;
-
- if (snd_sof_dsp_d0i3_on_suspend(sdev)) {
- /* suspend to D0i3 */
- dev_dbg(sdev->dev, "DSP is trying to enter D0i3...\n");
- ret = snd_sof_set_d0_substate(sdev, SOF_DSP_D0I3);
- if (ret == -ENOTSUPP) {
- /* fallback to D3 suspend */
- dev_dbg(sdev->dev, "D0i3 not supported, fall back to D3...\n");
- goto d3_suspend;
- } else if (ret < 0) {
- dev_err(sdev->dev, "error: failed to enter D0I3, %d\n",
- ret);
- return ret;
- }
-
- /* platform-specific suspend to D0i3 */
- return snd_sof_dsp_suspend(sdev);
- }
-
-d3_suspend:
- /* suspend to D3 */
return sof_suspend(dev, false);
}
EXPORT_SYMBOL(snd_sof_suspend);
@@ -336,10 +287,13 @@ int snd_sof_prepare(struct device *dev)
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
#if defined(CONFIG_ACPI)
- sdev->s0_suspend = acpi_target_system_state() == ACPI_STATE_S0;
+ if (acpi_target_system_state() == ACPI_STATE_S0)
+ sdev->system_suspend_target = SOF_SUSPEND_S0IX;
+ else
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
#else
/* will suspend to S3 by default */
- sdev->s0_suspend = false;
+ sdev->system_suspend_target = SOF_SUSPEND_S3;
#endif
return 0;
@@ -350,6 +304,6 @@ void snd_sof_complete(struct device *dev)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
- sdev->s0_suspend = false;
+ sdev->system_suspend_target = SOF_SUSPEND_NONE;
}
EXPORT_SYMBOL(snd_sof_complete);
diff --git a/sound/soc/sof/probe.c b/sound/soc/sof/probe.c
new file mode 100644
index 000000000000..c38169fe00c5
--- /dev/null
+++ b/sound/soc/sof/probe.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include "sof-priv.h"
+#include "probe.h"
+
+/**
+ * sof_ipc_probe_init - initialize data probing
+ * @sdev: SOF sound device
+ * @stream_tag: Extractor stream tag
+ * @buffer_size: DMA buffer size to set for extractor
+ *
+ * Host chooses whether extraction is supported or not by providing
+ * valid stream tag to DSP. Once specified, stream described by that
+ * tag will be tied to DSP for extraction for the entire lifetime of
+ * probe.
+ *
+ * Probing is initialized only once and each INIT request must be
+ * matched by DEINIT call.
+ */
+int sof_ipc_probe_init(struct snd_sof_dev *sdev,
+ u32 stream_tag, size_t buffer_size)
+{
+ struct sof_ipc_probe_dma_add_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, dma, 1);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_INIT;
+ msg->num_elems = 1;
+ msg->dma[0].stream_tag = stream_tag;
+ msg->dma[0].dma_buffer_size = buffer_size;
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_init);
+
+/**
+ * sof_ipc_probe_deinit - cleanup after data probing
+ * @sdev: SOF sound device
+ *
+ * Host sends DEINIT request to free previously initialized probe
+ * on DSP side once it is no longer needed. DEINIT only when there
+ * are no probes connected and with all injectors detached.
+ */
+int sof_ipc_probe_deinit(struct snd_sof_dev *sdev)
+{
+ struct sof_ipc_cmd_hdr msg;
+ struct sof_ipc_reply reply;
+
+ msg.size = sizeof(msg);
+ msg.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_DEINIT;
+
+ return sof_ipc_tx_message(sdev->ipc, msg.cmd, &msg, msg.size,
+ &reply, sizeof(reply));
+}
+EXPORT_SYMBOL(sof_ipc_probe_deinit);
+
+static int sof_ipc_probe_info(struct snd_sof_dev *sdev, unsigned int cmd,
+ void **params, size_t *num_params)
+{
+ struct sof_ipc_probe_info_params msg = {{{0}}};
+ struct sof_ipc_probe_info_params *reply;
+ size_t bytes;
+ int ret;
+
+ *params = NULL;
+ *num_params = 0;
+
+ reply = kzalloc(SOF_IPC_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!reply)
+ return -ENOMEM;
+ msg.rhdr.hdr.size = sizeof(msg);
+ msg.rhdr.hdr.cmd = SOF_IPC_GLB_PROBE | cmd;
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg.rhdr.hdr.cmd, &msg,
+ msg.rhdr.hdr.size, reply, SOF_IPC_MSG_MAX_SIZE);
+ if (ret < 0 || reply->rhdr.error < 0)
+ goto exit;
+
+ if (!reply->num_elems)
+ goto exit;
+
+ if (cmd == SOF_IPC_PROBE_DMA_INFO)
+ bytes = sizeof(reply->dma[0]);
+ else
+ bytes = sizeof(reply->desc[0]);
+ bytes *= reply->num_elems;
+ *params = kmemdup(&reply->dma[0], bytes, GFP_KERNEL);
+ if (!*params) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ *num_params = reply->num_elems;
+
+exit:
+ kfree(reply);
+ return ret;
+}
+
+/**
+ * sof_ipc_probe_dma_info - retrieve list of active injection dmas
+ * @sdev: SOF sound device
+ * @dma: Returned list of active dmas
+ * @num_dma: Returned count of active dmas
+ *
+ * Host sends DMA_INFO request to obtain list of injection dmas it
+ * can use to transfer data over with.
+ *
+ * Note that list contains only injection dmas as there is only one
+ * extractor (dma) and it is always assigned on probing init.
+ * DSP knows exactly where data from extraction probes is going to,
+ * which is not the case for injection where multiple streams
+ * could be engaged.
+ */
+int sof_ipc_probe_dma_info(struct snd_sof_dev *sdev,
+ struct sof_probe_dma **dma, size_t *num_dma)
+{
+ return sof_ipc_probe_info(sdev, SOF_IPC_PROBE_DMA_INFO,
+ (void **)dma, num_dma);
+}
+EXPORT_SYMBOL(sof_ipc_probe_dma_info);
+
+/**
+ * sof_ipc_probe_dma_add - attach to specified dmas
+ * @sdev: SOF sound device
+ * @dma: List of streams (dmas) to attach to
+ * @num_dma: Number of elements in @dma
+ *
+ * Contrary to extraction, injection streams are never assigned
+ * on init. Before attempting any data injection, host is responsible
+ * for specifying streams which will be later used to transfer data
+ * to connected probe points.
+ */
+int sof_ipc_probe_dma_add(struct snd_sof_dev *sdev,
+ struct sof_probe_dma *dma, size_t num_dma)
+{
+ struct sof_ipc_probe_dma_add_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, dma, num_dma);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_dma;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_DMA_ADD;
+ memcpy(&msg->dma[0], dma, size - sizeof(*msg));
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_dma_add);
+
+/**
+ * sof_ipc_probe_dma_remove - detach from specified dmas
+ * @sdev: SOF sound device
+ * @stream_tag: List of stream tags to detach from
+ * @num_stream_tag: Number of elements in @stream_tag
+ *
+ * Host sends DMA_REMOVE request to free previously attached stream
+ * from being occupied for injection. Each detach operation should
+ * match equivalent DMA_ADD. Detach only when all probes tied to
+ * given stream have been disconnected.
+ */
+int sof_ipc_probe_dma_remove(struct snd_sof_dev *sdev,
+ unsigned int *stream_tag, size_t num_stream_tag)
+{
+ struct sof_ipc_probe_dma_remove_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, stream_tag, num_stream_tag);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_stream_tag;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_DMA_REMOVE;
+ memcpy(&msg->stream_tag[0], stream_tag, size - sizeof(*msg));
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_dma_remove);
+
+/**
+ * sof_ipc_probe_points_info - retrieve list of active probe points
+ * @sdev: SOF sound device
+ * @desc: Returned list of active probes
+ * @num_desc: Returned count of active probes
+ *
+ * Host sends PROBE_POINT_INFO request to obtain list of active probe
+ * points, valid for disconnection when given probe is no longer
+ * required.
+ */
+int sof_ipc_probe_points_info(struct snd_sof_dev *sdev,
+ struct sof_probe_point_desc **desc, size_t *num_desc)
+{
+ return sof_ipc_probe_info(sdev, SOF_IPC_PROBE_POINT_INFO,
+ (void **)desc, num_desc);
+}
+EXPORT_SYMBOL(sof_ipc_probe_points_info);
+
+/**
+ * sof_ipc_probe_points_add - connect specified probes
+ * @sdev: SOF sound device
+ * @desc: List of probe points to connect
+ * @num_desc: Number of elements in @desc
+ *
+ * Dynamically connects to provided set of endpoints. Immediately
+ * after connection is established, host must be prepared to
+ * transfer data from or to target stream given the probing purpose.
+ *
+ * Each probe point should be removed using PROBE_POINT_REMOVE
+ * request when no longer needed.
+ */
+int sof_ipc_probe_points_add(struct snd_sof_dev *sdev,
+ struct sof_probe_point_desc *desc, size_t num_desc)
+{
+ struct sof_ipc_probe_point_add_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, desc, num_desc);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_desc;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_POINT_ADD;
+ memcpy(&msg->desc[0], desc, size - sizeof(*msg));
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_points_add);
+
+/**
+ * sof_ipc_probe_points_remove - disconnect specified probes
+ * @sdev: SOF sound device
+ * @buffer_id: List of probe points to disconnect
+ * @num_buffer_id: Number of elements in @desc
+ *
+ * Removes previously connected probes from list of active probe
+ * points and frees all resources on DSP side.
+ */
+int sof_ipc_probe_points_remove(struct snd_sof_dev *sdev,
+ unsigned int *buffer_id, size_t num_buffer_id)
+{
+ struct sof_ipc_probe_point_remove_params *msg;
+ struct sof_ipc_reply reply;
+ size_t size = struct_size(msg, buffer_id, num_buffer_id);
+ int ret;
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+ msg->hdr.size = size;
+ msg->num_elems = num_buffer_id;
+ msg->hdr.cmd = SOF_IPC_GLB_PROBE | SOF_IPC_PROBE_POINT_REMOVE;
+ memcpy(&msg->buffer_id[0], buffer_id, size - sizeof(*msg));
+
+ ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
+ &reply, sizeof(reply));
+ kfree(msg);
+ return ret;
+}
+EXPORT_SYMBOL(sof_ipc_probe_points_remove);
diff --git a/sound/soc/sof/probe.h b/sound/soc/sof/probe.h
new file mode 100644
index 000000000000..45daa5552834
--- /dev/null
+++ b/sound/soc/sof/probe.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
+ *
+ * Author: Cezary Rojewski <cezary.rojewski@intel.com>
+ */
+
+#ifndef __SOF_PROBE_H
+#define __SOF_PROBE_H
+
+#include <sound/sof/header.h>
+
+struct snd_sof_dev;
+
+#define SOF_PROBE_INVALID_NODE_ID UINT_MAX
+
+struct sof_probe_dma {
+ unsigned int stream_tag;
+ unsigned int dma_buffer_size;
+} __packed;
+
+enum sof_connection_purpose {
+ SOF_CONNECTION_PURPOSE_EXTRACT = 1,
+ SOF_CONNECTION_PURPOSE_INJECT,
+};
+
+struct sof_probe_point_desc {
+ unsigned int buffer_id;
+ unsigned int purpose;
+ unsigned int stream_tag;
+} __packed;
+
+struct sof_ipc_probe_dma_add_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ struct sof_probe_dma dma[0];
+} __packed;
+
+struct sof_ipc_probe_info_params {
+ struct sof_ipc_reply rhdr;
+ unsigned int num_elems;
+ union {
+ struct sof_probe_dma dma[0];
+ struct sof_probe_point_desc desc[0];
+ };
+} __packed;
+
+struct sof_ipc_probe_dma_remove_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ unsigned int stream_tag[0];
+} __packed;
+
+struct sof_ipc_probe_point_add_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ struct sof_probe_point_desc desc[0];
+} __packed;
+
+struct sof_ipc_probe_point_remove_params {
+ struct sof_ipc_cmd_hdr hdr;
+ unsigned int num_elems;
+ unsigned int buffer_id[0];
+} __packed;
+
+int sof_ipc_probe_init(struct snd_sof_dev *sdev,
+ u32 stream_tag, size_t buffer_size);
+int sof_ipc_probe_deinit(struct snd_sof_dev *sdev);
+int sof_ipc_probe_dma_info(struct snd_sof_dev *sdev,
+ struct sof_probe_dma **dma, size_t *num_dma);
+int sof_ipc_probe_dma_add(struct snd_sof_dev *sdev,
+ struct sof_probe_dma *dma, size_t num_dma);
+int sof_ipc_probe_dma_remove(struct snd_sof_dev *sdev,
+ unsigned int *stream_tag, size_t num_stream_tag);
+int sof_ipc_probe_points_info(struct snd_sof_dev *sdev,
+ struct sof_probe_point_desc **desc, size_t *num_desc);
+int sof_ipc_probe_points_add(struct snd_sof_dev *sdev,
+ struct sof_probe_point_desc *desc, size_t num_desc);
+int sof_ipc_probe_points_remove(struct snd_sof_dev *sdev,
+ unsigned int *buffer_id, size_t num_buffer_id);
+
+#endif
diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
index 0d8f65b9ae25..fc4ed2a8a914 100644
--- a/sound/soc/sof/sof-audio.c
+++ b/sound/soc/sof/sof-audio.c
@@ -11,7 +11,40 @@
#include "sof-audio.h"
#include "ops.h"
-bool snd_sof_dsp_d0i3_on_suspend(struct snd_sof_dev *sdev)
+/*
+ * helper to determine if there are only D0i3 compatible
+ * streams active
+ */
+bool snd_sof_dsp_only_d0i3_compatible_stream_active(struct snd_sof_dev *sdev)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_sof_pcm *spcm;
+ bool d0i3_compatible_active = false;
+ int dir;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ for_each_pcm_streams(dir) {
+ substream = spcm->stream[dir].substream;
+ if (!substream || !substream->runtime)
+ continue;
+
+ /*
+ * substream->runtime being not NULL indicates that
+ * that the stream is open. No need to check the
+ * stream state.
+ */
+ if (!spcm->stream[dir].d0i3_compatible)
+ return false;
+
+ d0i3_compatible_active = true;
+ }
+ }
+
+ return d0i3_compatible_active;
+}
+EXPORT_SYMBOL(snd_sof_dsp_only_d0i3_compatible_stream_active);
+
+bool snd_sof_stream_suspend_ignored(struct snd_sof_dev *sdev)
{
struct snd_sof_pcm *spcm;
@@ -38,7 +71,14 @@ int sof_set_hw_params_upon_resume(struct device *dev)
* have been suspended.
*/
list_for_each_entry(spcm, &sdev->pcm_list, list) {
- for (dir = 0; dir <= SNDRV_PCM_STREAM_CAPTURE; dir++) {
+ for_each_pcm_streams(dir) {
+ /*
+ * do not reset hw_params upon resume for streams that
+ * were kept running during suspend
+ */
+ if (spcm->stream[dir].suspend_ignored)
+ continue;
+
substream = spcm->stream[dir].substream;
if (!substream || !substream->runtime)
continue;
@@ -279,16 +319,11 @@ struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_soc_component *scomp,
int dir;
list_for_each_entry(spcm, &sdev->pcm_list, list) {
- dir = SNDRV_PCM_STREAM_PLAYBACK;
- if (spcm->stream[dir].comp_id == comp_id) {
- *direction = dir;
- return spcm;
- }
-
- dir = SNDRV_PCM_STREAM_CAPTURE;
- if (spcm->stream[dir].comp_id == comp_id) {
- *direction = dir;
- return spcm;
+ for_each_pcm_streams(dir) {
+ if (spcm->stream[dir].comp_id == comp_id) {
+ *direction = dir;
+ return spcm;
+ }
}
}
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
index a62fb2da6a6e..bf65f31af858 100644
--- a/sound/soc/sof/sof-audio.h
+++ b/sound/soc/sof/sof-audio.h
@@ -11,6 +11,8 @@
#ifndef __SOUND_SOC_SOF_AUDIO_H
#define __SOUND_SOC_SOF_AUDIO_H
+#include <linux/workqueue.h>
+
#include <sound/soc.h>
#include <sound/control.h>
#include <sound/sof/stream.h> /* needs to be included before control.h */
@@ -189,6 +191,7 @@ struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_soc_component *scomp,
struct snd_sof_pcm *snd_sof_find_spcm_pcm_id(struct snd_soc_component *scomp,
unsigned int pcm_id);
void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream);
+void snd_sof_pcm_period_elapsed_work(struct work_struct *work);
/*
* Mixer IPC
@@ -202,7 +205,8 @@ int snd_sof_ipc_set_get_comp_data(struct snd_sof_control *scontrol,
/* PM */
int sof_restore_pipelines(struct device *dev);
int sof_set_hw_params_upon_resume(struct device *dev);
-bool snd_sof_dsp_d0i3_on_suspend(struct snd_sof_dev *sdev);
+bool snd_sof_stream_suspend_ignored(struct snd_sof_dev *sdev);
+bool snd_sof_dsp_only_d0i3_compatible_stream_active(struct snd_sof_dev *sdev);
/* Machine driver enumeration */
int sof_machine_register(struct snd_sof_dev *sdev, void *pdata);
diff --git a/sound/soc/sof/sof-of-dev.c b/sound/soc/sof/sof-of-dev.c
index 39ea8af6213f..16e49f2ee629 100644
--- a/sound/soc/sof/sof-of-dev.c
+++ b/sound/soc/sof/sof-of-dev.c
@@ -13,12 +13,21 @@
#include "ops.h"
extern struct snd_sof_dsp_ops sof_imx8_ops;
+extern struct snd_sof_dsp_ops sof_imx8x_ops;
/* platform specific devices */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8)
static struct sof_dev_desc sof_of_imx8qxp_desc = {
.default_fw_path = "imx/sof",
.default_tplg_path = "imx/sof-tplg",
+ .default_fw_filename = "sof-imx8x.ri",
+ .nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
+ .ops = &sof_imx8x_ops,
+};
+
+static struct sof_dev_desc sof_of_imx8qm_desc = {
+ .default_fw_path = "imx/sof",
+ .default_tplg_path = "imx/sof-tplg",
.default_fw_filename = "sof-imx8.ri",
.nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
.ops = &sof_imx8_ops,
@@ -103,6 +112,7 @@ static int sof_of_remove(struct platform_device *pdev)
static const struct of_device_id sof_of_ids[] = {
#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8)
{ .compatible = "fsl,imx8qxp-dsp", .data = &sof_of_imx8qxp_desc},
+ { .compatible = "fsl,imx8qm-dsp", .data = &sof_of_imx8qm_desc},
#endif
{ }
};
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
index bc2337cf1142..a4b297c842df 100644
--- a/sound/soc/sof/sof-priv.h
+++ b/sound/soc/sof/sof-priv.h
@@ -54,10 +54,26 @@ extern int sof_core_debug;
(IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE) || \
IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST))
-/* DSP D0ix sub-state */
-enum sof_d0_substate {
- SOF_DSP_D0I0 = 0, /* DSP default D0 substate */
- SOF_DSP_D0I3, /* DSP D0i3(low power) substate*/
+/* DSP power state */
+enum sof_dsp_power_states {
+ SOF_DSP_PM_D0,
+ SOF_DSP_PM_D1,
+ SOF_DSP_PM_D2,
+ SOF_DSP_PM_D3_HOT,
+ SOF_DSP_PM_D3,
+ SOF_DSP_PM_D3_COLD,
+};
+
+struct sof_dsp_power_state {
+ u32 state;
+ u32 substate; /* platform-specific */
+};
+
+/* System suspend target state */
+enum sof_system_suspend_state {
+ SOF_SUSPEND_NONE = 0,
+ SOF_SUSPEND_S0IX,
+ SOF_SUSPEND_S3,
};
struct snd_sof_dev;
@@ -154,6 +170,27 @@ struct snd_sof_dsp_ops {
snd_pcm_uframes_t (*pcm_pointer)(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream); /* optional */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ /* Except for probe_pointer, all probe ops are mandatory */
+ int (*probe_assign)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai); /* mandatory */
+ int (*probe_free)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_soc_dai *dai); /* mandatory */
+ int (*probe_set_params)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params,
+ struct snd_soc_dai *dai); /* mandatory */
+ int (*probe_trigger)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream, int cmd,
+ struct snd_soc_dai *dai); /* mandatory */
+ int (*probe_pointer)(struct snd_sof_dev *sdev,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp,
+ struct snd_soc_dai *dai); /* optional */
+#endif
+
/* host read DSP stream data */
void (*ipc_msg_data)(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
@@ -169,14 +206,15 @@ struct snd_sof_dsp_ops {
int (*post_fw_run)(struct snd_sof_dev *sof_dev); /* optional */
/* DSP PM */
- int (*suspend)(struct snd_sof_dev *sof_dev); /* optional */
+ int (*suspend)(struct snd_sof_dev *sof_dev,
+ u32 target_state); /* optional */
int (*resume)(struct snd_sof_dev *sof_dev); /* optional */
int (*runtime_suspend)(struct snd_sof_dev *sof_dev); /* optional */
int (*runtime_resume)(struct snd_sof_dev *sof_dev); /* optional */
int (*runtime_idle)(struct snd_sof_dev *sof_dev); /* optional */
int (*set_hw_params_upon_resume)(struct snd_sof_dev *sdev); /* optional */
int (*set_power_state)(struct snd_sof_dev *sdev,
- enum sof_d0_substate d0_substate); /* optional */
+ const struct sof_dsp_power_state *target_state); /* optional */
/* DSP clocking */
int (*set_clk)(struct snd_sof_dev *sof_dev, u32 freq); /* optional */
@@ -323,10 +361,11 @@ struct snd_sof_dev {
*/
struct snd_soc_component_driver plat_drv;
- /* power states related */
- enum sof_d0_substate d0_substate;
- /* flag to track if the intended power target of suspend is S0ix */
- bool s0_suspend;
+ /* current DSP power state */
+ struct sof_dsp_power_state dsp_power_state;
+
+ /* Intended power target of system suspend */
+ enum sof_system_suspend_state system_suspend_target;
/* DSP firmware boot */
wait_queue_head_t boot_wait;
@@ -376,16 +415,15 @@ struct snd_sof_dev {
u32 enabled_cores_mask; /* keep track of enabled cores */
/* FW configuration */
- struct sof_ipc_dma_buffer_data *info_buffer;
struct sof_ipc_window *info_window;
/* IPC timeouts in ms */
int ipc_timeout;
int boot_timeout;
- /* Wait queue for code loading */
- wait_queue_head_t waitq;
- int code_loading;
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
+ unsigned int extractor_stream_tag;
+#endif
/* DMA for Trace */
struct snd_dma_buffer dmatb;
@@ -417,8 +455,6 @@ int snd_sof_resume(struct device *dev);
int snd_sof_suspend(struct device *dev);
int snd_sof_prepare(struct device *dev);
void snd_sof_complete(struct device *dev);
-int snd_sof_set_d0_substate(struct snd_sof_dev *sdev,
- enum sof_d0_substate d0_substate);
void snd_sof_new_platform_drv(struct snd_sof_dev *sdev);
@@ -454,6 +490,9 @@ int snd_sof_ipc_valid(struct snd_sof_dev *sdev);
int sof_ipc_tx_message(struct snd_sof_ipc *ipc, u32 header,
void *msg_data, size_t msg_bytes, void *reply_data,
size_t reply_bytes);
+int sof_ipc_tx_message_no_pm(struct snd_sof_ipc *ipc, u32 header,
+ void *msg_data, size_t msg_bytes,
+ void *reply_data, size_t reply_bytes);
/*
* Trace/debug
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 9f4f8868b386..fe8ba3e05e08 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -9,6 +9,7 @@
//
#include <linux/firmware.h>
+#include <linux/workqueue.h>
#include <sound/tlv.h>
#include <sound/pcm_params.h>
#include <uapi/sound/sof/tokens.h>
@@ -1240,6 +1241,8 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp,
{
struct snd_soc_card *card = scomp->card;
struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai *cpu_dai;
+ int i;
list_for_each_entry(rtd, &card->rtd_list, list) {
dev_vdbg(scomp->dev, "tplg: check widget: %s stream: %s dai stream: %s\n",
@@ -1254,13 +1257,15 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp,
switch (w->id) {
case snd_soc_dapm_dai_out:
- rtd->cpu_dai->capture_widget = w;
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+ cpu_dai->capture_widget = w;
dai->name = rtd->dai_link->name;
dev_dbg(scomp->dev, "tplg: connected widget %s -> DAI link %s\n",
w->name, rtd->dai_link->name);
break;
case snd_soc_dapm_dai_in:
- rtd->cpu_dai->playback_widget = w;
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+ cpu_dai->playback_widget = w;
dai->name = rtd->dai_link->name;
dev_dbg(scomp->dev, "tplg: connected widget %s -> DAI link %s\n",
w->name, rtd->dai_link->name);
@@ -2444,7 +2449,7 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
struct snd_soc_tplg_stream_caps *caps;
struct snd_soc_tplg_private *private = &pcm->priv;
struct snd_sof_pcm *spcm;
- int stream = SNDRV_PCM_STREAM_PLAYBACK;
+ int stream;
int ret = 0;
/* nothing to do for BEs atm */
@@ -2456,8 +2461,12 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
return -ENOMEM;
spcm->scomp = scomp;
- spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].comp_id = COMP_ID_UNASSIGNED;
- spcm->stream[SNDRV_PCM_STREAM_CAPTURE].comp_id = COMP_ID_UNASSIGNED;
+
+ for_each_pcm_streams(stream) {
+ spcm->stream[stream].comp_id = COMP_ID_UNASSIGNED;
+ INIT_WORK(&spcm->stream[stream].period_elapsed_work,
+ snd_sof_pcm_period_elapsed_work);
+ }
spcm->pcm = *pcm;
dev_dbg(scomp->dev, "tplg: load pcm %s\n", pcm->dai_name);
@@ -2478,8 +2487,10 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
if (!spcm->pcm.playback)
goto capture;
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+
dev_vdbg(scomp->dev, "tplg: pcm %s stream tokens: playback d0i3:%d\n",
- spcm->pcm.pcm_name, spcm->stream[0].d0i3_compatible);
+ spcm->pcm.pcm_name, spcm->stream[stream].d0i3_compatible);
caps = &spcm->pcm.caps[stream];
@@ -2509,7 +2520,7 @@ capture:
return ret;
dev_vdbg(scomp->dev, "tplg: pcm %s stream tokens: capture d0i3:%d\n",
- spcm->pcm.pcm_name, spcm->stream[1].d0i3_compatible);
+ spcm->pcm.pcm_name, spcm->stream[stream].d0i3_compatible);
caps = &spcm->pcm.caps[stream];
diff --git a/sound/soc/sprd/Kconfig b/sound/soc/sprd/Kconfig
index 5474fd3de8c0..5e0ac8278572 100644
--- a/sound/soc/sprd/Kconfig
+++ b/sound/soc/sprd/Kconfig
@@ -8,7 +8,7 @@ config SND_SOC_SPRD
the Spreadtrum SoCs' Audio interfaces.
config SND_SOC_SPRD_MCDT
- bool "Spreadtrum multi-channel data transfer support"
+ tristate "Spreadtrum multi-channel data transfer support"
depends on SND_SOC_SPRD
help
Say y here to enable multi-channel data transfer support. It
diff --git a/sound/soc/sprd/sprd-mcdt.h b/sound/soc/sprd/sprd-mcdt.h
index 9cc7e207ac76..679e3af3baad 100644
--- a/sound/soc/sprd/sprd-mcdt.h
+++ b/sound/soc/sprd/sprd-mcdt.h
@@ -48,7 +48,7 @@ struct sprd_mcdt_chan {
struct list_head list;
};
-#ifdef CONFIG_SND_SOC_SPRD_MCDT
+#if IS_ENABLED(CONFIG_SND_SOC_SPRD_MCDT)
struct sprd_mcdt_chan *sprd_mcdt_request_chan(u8 channel,
enum sprd_mcdt_channel_type type);
void sprd_mcdt_free_chan(struct sprd_mcdt_chan *chan);
diff --git a/sound/soc/sprd/sprd-pcm-compress.c b/sound/soc/sprd/sprd-pcm-compress.c
index 6cddf551bc11..74d48340cade 100644
--- a/sound/soc/sprd/sprd-pcm-compress.c
+++ b/sound/soc/sprd/sprd-pcm-compress.c
@@ -135,7 +135,7 @@ static int sprd_platform_compr_dma_config(struct snd_compr_stream *cstream,
struct snd_soc_component *component =
snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
- struct sprd_compr_data *data = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct sprd_compr_data *data = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct sprd_pcm_dma_params *dma_params = data->dma_params;
struct sprd_compr_dma *dma = &stream->dma[channel];
struct dma_slave_config config = { };
@@ -321,7 +321,7 @@ static int sprd_platform_compr_open(struct snd_compr_stream *cstream)
struct snd_soc_component *component =
snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
- struct sprd_compr_data *data = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct sprd_compr_data *data = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct sprd_compr_stream *stream;
struct sprd_compr_callback cb;
int stream_id = cstream->direction, ret;
diff --git a/sound/soc/sprd/sprd-pcm-dma.c b/sound/soc/sprd/sprd-pcm-dma.c
index 2284558684bc..d12d3cad8cbd 100644
--- a/sound/soc/sprd/sprd-pcm-dma.c
+++ b/sound/soc/sprd/sprd-pcm-dma.c
@@ -200,7 +200,7 @@ static int sprd_pcm_hw_params(struct snd_soc_component *component,
unsigned long flags;
int ret, i, j, sg_num;
- dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dma_params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if (!dma_params) {
dev_warn(component->dev, "no dma parameters setting\n");
dma_private->params = NULL;
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index 51407a21c440..16ff02953015 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -215,7 +215,7 @@ static int stm32_adfsdm_trigger(struct snd_soc_component *component,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -235,7 +235,7 @@ static int stm32_adfsdm_pcm_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
int ret;
ret = snd_soc_set_runtime_hwparams(substream, &stm32_adfsdm_pcm_hw);
@@ -250,7 +250,7 @@ static int stm32_adfsdm_pcm_close(struct snd_soc_component *component,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
priv->substream = NULL;
@@ -263,7 +263,7 @@ static snd_pcm_uframes_t stm32_adfsdm_pcm_pointer(
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
return bytes_to_frames(substream->runtime, priv->pos);
}
@@ -274,7 +274,7 @@ static int stm32_adfsdm_pcm_hw_params(struct snd_soc_component *component,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
priv->pcm_buff = substream->runtime->dma_area;
@@ -287,7 +287,7 @@ static int stm32_adfsdm_pcm_new(struct snd_soc_component *component,
{
struct snd_pcm *pcm = rtd->pcm;
struct stm32_adfsdm_priv *priv =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
unsigned int size = DFSDM_MAX_PERIODS * DFSDM_MAX_PERIOD_SIZE;
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
index 3e7226a53e53..7c4d63c33f15 100644
--- a/sound/soc/stm/stm32_i2s.c
+++ b/sound/soc/stm/stm32_i2s.c
@@ -831,25 +831,33 @@ static int stm32_i2s_parse_dt(struct platform_device *pdev,
/* Get clocks */
i2s->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(i2s->pclk)) {
- dev_err(&pdev->dev, "Could not get pclk\n");
+ if (PTR_ERR(i2s->pclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get pclk: %ld\n",
+ PTR_ERR(i2s->pclk));
return PTR_ERR(i2s->pclk);
}
i2s->i2sclk = devm_clk_get(&pdev->dev, "i2sclk");
if (IS_ERR(i2s->i2sclk)) {
- dev_err(&pdev->dev, "Could not get i2sclk\n");
+ if (PTR_ERR(i2s->i2sclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get i2sclk: %ld\n",
+ PTR_ERR(i2s->i2sclk));
return PTR_ERR(i2s->i2sclk);
}
i2s->x8kclk = devm_clk_get(&pdev->dev, "x8k");
if (IS_ERR(i2s->x8kclk)) {
- dev_err(&pdev->dev, "missing x8k parent clock\n");
+ if (PTR_ERR(i2s->x8kclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get x8k parent clock: %ld\n",
+ PTR_ERR(i2s->x8kclk));
return PTR_ERR(i2s->x8kclk);
}
i2s->x11kclk = devm_clk_get(&pdev->dev, "x11k");
if (IS_ERR(i2s->x11kclk)) {
- dev_err(&pdev->dev, "missing x11k parent clock\n");
+ if (PTR_ERR(i2s->x11kclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get x11k parent clock: %ld\n",
+ PTR_ERR(i2s->x11kclk));
return PTR_ERR(i2s->x11kclk);
}
@@ -866,12 +874,24 @@ static int stm32_i2s_parse_dt(struct platform_device *pdev,
}
/* Reset */
- rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (!IS_ERR(rst)) {
- reset_control_assert(rst);
- udelay(2);
- reset_control_deassert(rst);
+ rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ if (PTR_ERR(rst) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Reset controller error %ld\n",
+ PTR_ERR(rst));
+ return PTR_ERR(rst);
}
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+
+ return 0;
+}
+
+static int stm32_i2s_remove(struct platform_device *pdev)
+{
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+ snd_soc_unregister_component(&pdev->dev);
return 0;
}
@@ -903,42 +923,51 @@ static int stm32_i2s_probe(struct platform_device *pdev)
i2s->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "pclk",
i2s->base, i2s->regmap_conf);
if (IS_ERR(i2s->regmap)) {
- dev_err(&pdev->dev, "regmap init failed\n");
+ if (PTR_ERR(i2s->regmap) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Regmap init error %ld\n",
+ PTR_ERR(i2s->regmap));
return PTR_ERR(i2s->regmap);
}
- ret = devm_snd_soc_register_component(&pdev->dev, &stm32_i2s_component,
- i2s->dai_drv, 1);
- if (ret)
+ ret = snd_dmaengine_pcm_register(&pdev->dev, &stm32_i2s_pcm_config, 0);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "PCM DMA register error %d\n", ret);
return ret;
+ }
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
- &stm32_i2s_pcm_config, 0);
- if (ret)
+ ret = snd_soc_register_component(&pdev->dev, &stm32_i2s_component,
+ i2s->dai_drv, 1);
+ if (ret) {
+ snd_dmaengine_pcm_unregister(&pdev->dev);
return ret;
+ }
/* Set SPI/I2S in i2s mode */
ret = regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
I2S_CGFR_I2SMOD, I2S_CGFR_I2SMOD);
if (ret)
- return ret;
+ goto error;
ret = regmap_read(i2s->regmap, STM32_I2S_IPIDR_REG, &val);
if (ret)
- return ret;
+ goto error;
if (val == I2S_IPIDR_NUMBER) {
ret = regmap_read(i2s->regmap, STM32_I2S_HWCFGR_REG, &val);
if (ret)
- return ret;
+ goto error;
if (!FIELD_GET(I2S_HWCFGR_I2S_SUPPORT_MASK, val)) {
dev_err(&pdev->dev,
"Device does not support i2s mode\n");
- return -EPERM;
+ ret = -EPERM;
+ goto error;
}
ret = regmap_read(i2s->regmap, STM32_I2S_VERR_REG, &val);
+ if (ret)
+ goto error;
dev_dbg(&pdev->dev, "I2S version: %lu.%lu registered\n",
FIELD_GET(I2S_VERR_MAJ_MASK, val),
@@ -946,6 +975,11 @@ static int stm32_i2s_probe(struct platform_device *pdev)
}
return ret;
+
+error:
+ stm32_i2s_remove(pdev);
+
+ return ret;
}
MODULE_DEVICE_TABLE(of, stm32_i2s_ids);
@@ -981,6 +1015,7 @@ static struct platform_driver stm32_i2s_driver = {
.pm = &stm32_i2s_pm_ops,
},
.probe = stm32_i2s_probe,
+ .remove = stm32_i2s_remove,
};
module_platform_driver(stm32_i2s_driver);
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index e20267504b16..058757c721f0 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -174,20 +174,26 @@ static int stm32_sai_probe(struct platform_device *pdev)
if (!STM_SAI_IS_F4(sai)) {
sai->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(sai->pclk)) {
- dev_err(&pdev->dev, "missing bus clock pclk\n");
+ if (PTR_ERR(sai->pclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "missing bus clock pclk: %ld\n",
+ PTR_ERR(sai->pclk));
return PTR_ERR(sai->pclk);
}
}
sai->clk_x8k = devm_clk_get(&pdev->dev, "x8k");
if (IS_ERR(sai->clk_x8k)) {
- dev_err(&pdev->dev, "missing x8k parent clock\n");
+ if (PTR_ERR(sai->clk_x8k) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "missing x8k parent clock: %ld\n",
+ PTR_ERR(sai->clk_x8k));
return PTR_ERR(sai->clk_x8k);
}
sai->clk_x11k = devm_clk_get(&pdev->dev, "x11k");
if (IS_ERR(sai->clk_x11k)) {
- dev_err(&pdev->dev, "missing x11k parent clock\n");
+ if (PTR_ERR(sai->clk_x11k) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "missing x11k parent clock: %ld\n",
+ PTR_ERR(sai->clk_x11k));
return PTR_ERR(sai->clk_x11k);
}
@@ -197,12 +203,16 @@ static int stm32_sai_probe(struct platform_device *pdev)
return sai->irq;
/* reset */
- rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (!IS_ERR(rst)) {
- reset_control_assert(rst);
- udelay(2);
- reset_control_deassert(rst);
+ rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ if (PTR_ERR(rst) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Reset controller error %ld\n",
+ PTR_ERR(rst));
+ return PTR_ERR(rst);
}
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
/* Enable peripheral clock to allow register access */
ret = clk_prepare_enable(sai->pclk);
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index 10eb4b8e8e7e..2bd280c01c33 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1238,7 +1238,7 @@ static int stm32_sai_pcm_process_spdif(struct snd_pcm_substream *substream,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
int *ptr = (int *)(runtime->dma_area + hwoff +
channel * (runtime->dma_bytes / runtime->channels));
@@ -1380,7 +1380,9 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
sai->regmap = devm_regmap_init_mmio(&pdev->dev, base,
sai->regmap_config);
if (IS_ERR(sai->regmap)) {
- dev_err(&pdev->dev, "Failed to initialize MMIO\n");
+ if (PTR_ERR(sai->regmap) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Regmap init error %ld\n",
+ PTR_ERR(sai->regmap));
return PTR_ERR(sai->regmap);
}
@@ -1471,7 +1473,9 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
of_node_put(args.np);
sai->sai_ck = devm_clk_get(&pdev->dev, "sai_ck");
if (IS_ERR(sai->sai_ck)) {
- dev_err(&pdev->dev, "Missing kernel clock sai_ck\n");
+ if (PTR_ERR(sai->sai_ck) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Missing kernel clock sai_ck: %ld\n",
+ PTR_ERR(sai->sai_ck));
return PTR_ERR(sai->sai_ck);
}
@@ -1545,7 +1549,8 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
if (ret) {
- dev_err(&pdev->dev, "Could not register pcm dma\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not register pcm dma\n");
return ret;
}
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
index 3769d9ce5dbe..1bfa3b2ba974 100644
--- a/sound/soc/stm/stm32_spdifrx.c
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -406,7 +406,9 @@ static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
spdifrx->ctrl_chan = dma_request_chan(dev, "rx-ctrl");
if (IS_ERR(spdifrx->ctrl_chan)) {
- dev_err(dev, "dma_request_slave_channel failed\n");
+ if (PTR_ERR(spdifrx->ctrl_chan) != -EPROBE_DEFER)
+ dev_err(dev, "dma_request_slave_channel error %ld\n",
+ PTR_ERR(spdifrx->ctrl_chan));
return PTR_ERR(spdifrx->ctrl_chan);
}
@@ -929,7 +931,9 @@ static int stm32_spdifrx_parse_of(struct platform_device *pdev,
spdifrx->kclk = devm_clk_get(&pdev->dev, "kclk");
if (IS_ERR(spdifrx->kclk)) {
- dev_err(&pdev->dev, "Could not get kclk\n");
+ if (PTR_ERR(spdifrx->kclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get kclk: %ld\n",
+ PTR_ERR(spdifrx->kclk));
return PTR_ERR(spdifrx->kclk);
}
@@ -940,6 +944,22 @@ static int stm32_spdifrx_parse_of(struct platform_device *pdev,
return 0;
}
+static int stm32_spdifrx_remove(struct platform_device *pdev)
+{
+ struct stm32_spdifrx_data *spdifrx = platform_get_drvdata(pdev);
+
+ if (spdifrx->ctrl_chan)
+ dma_release_channel(spdifrx->ctrl_chan);
+
+ if (spdifrx->dmab)
+ snd_dma_free_pages(spdifrx->dmab);
+
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+ snd_soc_unregister_component(&pdev->dev);
+
+ return 0;
+}
+
static int stm32_spdifrx_probe(struct platform_device *pdev)
{
struct stm32_spdifrx_data *spdifrx;
@@ -967,7 +987,9 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
spdifrx->base,
spdifrx->regmap_conf);
if (IS_ERR(spdifrx->regmap)) {
- dev_err(&pdev->dev, "Regmap init failed\n");
+ if (PTR_ERR(spdifrx->regmap) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Regmap init error %ld\n",
+ PTR_ERR(spdifrx->regmap));
return PTR_ERR(spdifrx->regmap);
}
@@ -978,37 +1000,46 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
return ret;
}
- rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (!IS_ERR(rst)) {
- reset_control_assert(rst);
- udelay(2);
- reset_control_deassert(rst);
+ rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ if (PTR_ERR(rst) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Reset controller error %ld\n",
+ PTR_ERR(rst));
+ return PTR_ERR(rst);
}
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
- ret = devm_snd_soc_register_component(&pdev->dev,
- &stm32_spdifrx_component,
- stm32_spdifrx_dai,
- ARRAY_SIZE(stm32_spdifrx_dai));
- if (ret)
+ pcm_config = &stm32_spdifrx_pcm_config;
+ ret = snd_dmaengine_pcm_register(&pdev->dev, pcm_config, 0);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "PCM DMA register error %d\n", ret);
return ret;
+ }
+
+ ret = snd_soc_register_component(&pdev->dev,
+ &stm32_spdifrx_component,
+ stm32_spdifrx_dai,
+ ARRAY_SIZE(stm32_spdifrx_dai));
+ if (ret) {
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+ return ret;
+ }
ret = stm32_spdifrx_dma_ctrl_register(&pdev->dev, spdifrx);
if (ret)
goto error;
- pcm_config = &stm32_spdifrx_pcm_config;
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, pcm_config, 0);
- if (ret) {
- dev_err(&pdev->dev, "PCM DMA register returned %d\n", ret);
- goto error;
- }
-
ret = regmap_read(spdifrx->regmap, STM32_SPDIFRX_IDR, &idr);
if (ret)
goto error;
if (idr == SPDIFRX_IPIDR_NUMBER) {
ret = regmap_read(spdifrx->regmap, STM32_SPDIFRX_VERR, &ver);
+ if (ret)
+ goto error;
dev_dbg(&pdev->dev, "SPDIFRX version: %lu.%lu registered\n",
FIELD_GET(SPDIFRX_VERR_MAJ_MASK, ver),
@@ -1018,27 +1049,11 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
return ret;
error:
- if (!IS_ERR(spdifrx->ctrl_chan))
- dma_release_channel(spdifrx->ctrl_chan);
- if (spdifrx->dmab)
- snd_dma_free_pages(spdifrx->dmab);
+ stm32_spdifrx_remove(pdev);
return ret;
}
-static int stm32_spdifrx_remove(struct platform_device *pdev)
-{
- struct stm32_spdifrx_data *spdifrx = platform_get_drvdata(pdev);
-
- if (spdifrx->ctrl_chan)
- dma_release_channel(spdifrx->ctrl_chan);
-
- if (spdifrx->dmab)
- snd_dma_free_pages(spdifrx->dmab);
-
- return 0;
-}
-
MODULE_DEVICE_TABLE(of, stm32_spdifrx_ids);
#ifdef CONFIG_PM_SLEEP
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 98a9fe645521..86779a99df75 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -244,7 +244,7 @@ static int sun4i_spdif_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct sun4i_spdif_dev *host = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct sun4i_spdif_dev *host = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
return -EINVAL;
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index 686561df8e13..ca51af114419 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -86,7 +86,6 @@
#define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
struct sun8i_codec {
- struct device *dev;
struct regmap *regmap;
struct clk *clk_module;
struct clk *clk_bus;
@@ -542,8 +541,6 @@ static int sun8i_codec_probe(struct platform_device *pdev)
if (!scodec)
return -ENOMEM;
- scodec->dev = &pdev->dev;
-
scodec->clk_module = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(scodec->clk_module)) {
dev_err(&pdev->dev, "Failed to get the module clock\n");
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index 9e8b1497efd3..ec39ecba1e8b 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -37,7 +37,7 @@ static int tegra_alc5632_asoc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct tegra_alc5632 *alc5632 = snd_soc_card_get_drvdata(card);
int srate, mclk;
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index 4954a33ff46b..d800b62b36f8 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -38,7 +38,7 @@ static int tegra_max98090_asoc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct tegra_max98090 *machine = snd_soc_card_get_drvdata(card);
int srate, mclk;
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index d46915a3ec4c..9878bc3eb89e 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -40,7 +40,7 @@ static int tegra_rt5640_asoc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct tegra_rt5640 *machine = snd_soc_card_get_drvdata(card);
int srate, mclk;
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
index 81cb6cc6236e..5821313db977 100644
--- a/sound/soc/tegra/tegra_rt5677.c
+++ b/sound/soc/tegra/tegra_rt5677.c
@@ -42,7 +42,7 @@ static int tegra_rt5677_asoc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(card);
int srate, mclk, err;
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index e13b81d29cf3..dc411ba2e36d 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -36,7 +36,7 @@ static int tegra_sgtl5000_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct tegra_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
int srate, mclk;
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index f6dd790dad71..0d653a605358 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -40,7 +40,7 @@ static int tegra_wm8753_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
int srate, mclk;
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index f08d3489c3cf..9b5651502f12 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -45,7 +45,7 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
int srate, mclk;
@@ -143,19 +143,37 @@ static int tegra_wm8903_event_hp(struct snd_soc_dapm_widget *w,
return 0;
}
+static int tegra_wm8903_event_int_mic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
+
+ if (!gpio_is_valid(machine->gpio_int_mic_en))
+ return 0;
+
+ gpio_set_value_cansleep(machine->gpio_int_mic_en,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget tegra_wm8903_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Int Spk", tegra_wm8903_event_int_spk),
SND_SOC_DAPM_HP("Headphone Jack", tegra_wm8903_event_hp),
SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", tegra_wm8903_event_int_mic),
};
static const struct snd_kcontrol_new tegra_wm8903_controls[] = {
SOC_DAPM_PIN_SWITCH("Int Spk"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
};
static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_component *component = codec_dai->component;
struct snd_soc_card *card = rtd->card;
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
@@ -187,7 +205,7 @@ static int tegra_wm8903_remove(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *rtd =
snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_component *component = codec_dai->component;
wm8903_mic_detect(component, NULL, 0, 0);
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index 3f67ddd13674..f9834afaa2e8 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -35,7 +35,7 @@ static int trimslice_asoc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct tegra_trimslice *trimslice = snd_soc_card_get_drvdata(card);
int srate, mclk;
diff --git a/sound/soc/ti/Kconfig b/sound/soc/ti/Kconfig
index 29f61053ab62..c5408c129f34 100644
--- a/sound/soc/ti/Kconfig
+++ b/sound/soc/ti/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Audio support for Texas Instruments SoCs"
-depends on DMA_OMAP || TI_EDMA || COMPILE_TEST
+depends on DMA_OMAP || TI_EDMA || TI_K3_UDMA || COMPILE_TEST
config SND_SOC_TI_EDMA_PCM
tristate
@@ -10,6 +10,10 @@ config SND_SOC_TI_SDMA_PCM
tristate
select SND_SOC_GENERIC_DMAENGINE_PCM
+config SND_SOC_TI_UDMA_PCM
+ tristate
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+
comment "Texas Instruments DAI support for:"
config SND_SOC_DAVINCI_ASP
tristate "daVinci Audio Serial Port (ASP) or McBSP support"
@@ -24,6 +28,7 @@ config SND_SOC_DAVINCI_MCASP
tristate "Multichannel Audio Serial Port (McASP) support"
select SND_SOC_TI_EDMA_PCM
select SND_SOC_TI_SDMA_PCM
+ select SND_SOC_TI_UDMA_PCM
help
Say Y or M here if you want to have support for McASP IP found in
various Texas Instruments SoCs like:
@@ -31,6 +36,7 @@ config SND_SOC_DAVINCI_MCASP
- Sitara line of SoCs (AM335x, AM438x, etc)
- DRA7x devices
- Keystone devices
+ - K3 devices (am654, j721e)
config SND_SOC_DAVINCI_VCIF
tristate "daVinci Voice Interface (VCIF) support"
diff --git a/sound/soc/ti/Makefile b/sound/soc/ti/Makefile
index 08c44d56ef3e..ea48c6679cc7 100644
--- a/sound/soc/ti/Makefile
+++ b/sound/soc/ti/Makefile
@@ -3,9 +3,11 @@
# Platform drivers
snd-soc-ti-edma-objs := edma-pcm.o
snd-soc-ti-sdma-objs := sdma-pcm.o
+snd-soc-ti-udma-objs := udma-pcm.o
obj-$(CONFIG_SND_SOC_TI_EDMA_PCM) += snd-soc-ti-edma.o
obj-$(CONFIG_SND_SOC_TI_SDMA_PCM) += snd-soc-ti-sdma.o
+obj-$(CONFIG_SND_SOC_TI_UDMA_PCM) += snd-soc-ti-udma.o
# CPU DAI drivers
snd-soc-davinci-asp-objs := davinci-i2s.o
diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
index 8e2fb81ad05c..e17cd5e939f0 100644
--- a/sound/soc/ti/ams-delta.c
+++ b/sound/soc/ti/ams-delta.c
@@ -460,14 +460,14 @@ static void ams_delta_shutdown(struct snd_pcm_substream *substream)
static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct snd_soc_dapm_context *dapm = &card->dapm;
int ret;
/* Codec is ready, now add/activate board specific controls */
/* Store a pointer to the codec structure for tty ldisc use */
- cx20442_codec = rtd->codec_dai->component;
+ cx20442_codec = asoc_rtd_to_codec(rtd, 0)->component;
/* Add hook switch - can be used to control the codec from userspace
* even if line discipline fails */
diff --git a/sound/soc/ti/davinci-evm.c b/sound/soc/ti/davinci-evm.c
index 686b23d7a90d..2cfbeebdfb41 100644
--- a/sound/soc/ti/davinci-evm.c
+++ b/sound/soc/ti/davinci-evm.c
@@ -54,8 +54,8 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_soc_card *soc_card = rtd->card;
int ret = 0;
unsigned sysclk = ((struct snd_soc_card_drvdata_davinci *)
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index e1e937eb1dc1..734ffe925c4d 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -38,6 +38,7 @@
#include "edma-pcm.h"
#include "sdma-pcm.h"
+#include "udma-pcm.h"
#include "davinci-mcasp.h"
#define MCASP_MAX_AFIFO_DEPTH 64
@@ -1764,10 +1765,8 @@ static struct davinci_mcasp_pdata *davinci_mcasp_set_pdata_from_of(
} else if (match) {
pdata = devm_kmemdup(&pdev->dev, match->data, sizeof(*pdata),
GFP_KERNEL);
- if (!pdata) {
- ret = -ENOMEM;
- return pdata;
- }
+ if (!pdata)
+ return NULL;
} else {
/* control shouldn't reach here. something is wrong */
ret = -EINVAL;
@@ -1875,6 +1874,7 @@ nodata:
enum {
PCM_EDMA,
PCM_SDMA,
+ PCM_UDMA,
};
static const char *sdma_prefix = "ti,omap";
@@ -1912,6 +1912,8 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
dev_dbg(mcasp->dev, "DMA controller compatible = \"%s\"\n", tmp);
if (!strncmp(tmp, sdma_prefix, strlen(sdma_prefix)))
return PCM_SDMA;
+ else if (strstr(tmp, "udmap"))
+ return PCM_UDMA;
return PCM_EDMA;
}
@@ -2371,6 +2373,9 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
case PCM_SDMA:
ret = sdma_pcm_platform_register(&pdev->dev, "tx", "rx");
break;
+ case PCM_UDMA:
+ ret = udma_pcm_platform_register(&pdev->dev);
+ break;
default:
dev_err(&pdev->dev, "No DMA controller found (%d)\n", ret);
case -EPROBE_DEFER:
diff --git a/sound/soc/ti/davinci-vcif.c b/sound/soc/ti/davinci-vcif.c
index c84650e4a7aa..ee4d3ef821a1 100644
--- a/sound/soc/ti/davinci-vcif.c
+++ b/sound/soc/ti/davinci-vcif.c
@@ -43,7 +43,7 @@ static void davinci_vcif_start(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct davinci_vcif_dev *davinci_vcif_dev =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
u32 w;
@@ -62,7 +62,7 @@ static void davinci_vcif_stop(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct davinci_vcif_dev *davinci_vcif_dev =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
u32 w;
diff --git a/sound/soc/ti/n810.c b/sound/soc/ti/n810.c
index 3ad2b6daf31e..a1672b479cb7 100644
--- a/sound/soc/ti/n810.c
+++ b/sound/soc/ti/n810.c
@@ -101,7 +101,7 @@ static int n810_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int err;
/* Set the codec system clock for DAC and ADC */
diff --git a/sound/soc/ti/omap-abe-twl6040.c b/sound/soc/ti/omap-abe-twl6040.c
index 6d564ab5e437..61e45fea5dd8 100644
--- a/sound/soc/ti/omap-abe-twl6040.c
+++ b/sound/soc/ti/omap-abe-twl6040.c
@@ -46,7 +46,7 @@ static int omap_abe_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int clk_id, freq;
@@ -78,7 +78,7 @@ static int omap_abe_dmic_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret = 0;
ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_DMIC_SYSCLK_PAD_CLKS,
@@ -166,7 +166,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_card *card = rtd->card;
struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int hs_trim;
diff --git a/sound/soc/ti/omap-mcbsp-st.c b/sound/soc/ti/omap-mcbsp-st.c
index 1a3fe854e856..5a32b54bbf3b 100644
--- a/sound/soc/ti/omap-mcbsp-st.c
+++ b/sound/soc/ti/omap-mcbsp-st.c
@@ -489,7 +489,7 @@ OMAP_MCBSP_ST_CONTROLS(3);
int omap_mcbsp_st_add_controls(struct snd_soc_pcm_runtime *rtd, int port_id)
{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
if (!mcbsp->st_data) {
diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
index 302d5c493c29..3d41ca2238d4 100644
--- a/sound/soc/ti/omap-mcbsp.c
+++ b/sound/soc/ti/omap-mcbsp.c
@@ -737,7 +737,7 @@ static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream,
unsigned int packet_size)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
int words;
@@ -902,7 +902,7 @@ static snd_pcm_sframes_t omap_mcbsp_dai_delay(
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
u16 fifo_use;
snd_pcm_sframes_t delay;
diff --git a/sound/soc/ti/omap-mcpdm.c b/sound/soc/ti/omap-mcpdm.c
index d7ac4df6f2d9..f2dbadea33bb 100644
--- a/sound/soc/ti/omap-mcpdm.c
+++ b/sound/soc/ti/omap-mcpdm.c
@@ -532,7 +532,7 @@ static const struct snd_soc_component_driver omap_mcpdm_component = {
void omap_mcpdm_configure_dn_offsets(struct snd_soc_pcm_runtime *rtd,
u8 rx1, u8 rx2)
{
- struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
mcpdm->dn_rx_offset = MCPDM_DNOFST_RX1(rx1) | MCPDM_DNOFST_RX2(rx2);
}
diff --git a/sound/soc/ti/omap3pandora.c b/sound/soc/ti/omap3pandora.c
index 545f8dac9bd5..b04146311b31 100644
--- a/sound/soc/ti/omap3pandora.c
+++ b/sound/soc/ti/omap3pandora.c
@@ -32,8 +32,8 @@ static int omap3pandora_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret;
/* Set the codec system clock for DAC and ADC */
diff --git a/sound/soc/ti/osk5912.c b/sound/soc/ti/osk5912.c
index 1ca466bc4025..e01485cc51a1 100644
--- a/sound/soc/ti/osk5912.c
+++ b/sound/soc/ti/osk5912.c
@@ -39,7 +39,7 @@ static int osk_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int err;
/* Set the codec system clock for DAC and ADC */
diff --git a/sound/soc/ti/rx51.c b/sound/soc/ti/rx51.c
index fdb0dc85fe67..2a714a004163 100644
--- a/sound/soc/ti/rx51.c
+++ b/sound/soc/ti/rx51.c
@@ -103,7 +103,7 @@ static int rx51_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/* Set the codec system clock for DAC and ADC */
return snd_soc_dai_set_sysclk(codec_dai, 0, 19200000,
diff --git a/sound/soc/ti/udma-pcm.c b/sound/soc/ti/udma-pcm.c
new file mode 100644
index 000000000000..39830caaaf7c
--- /dev/null
+++ b/sound/soc/ti/udma-pcm.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
+
+#include "udma-pcm.h"
+
+static const struct snd_pcm_hardware udma_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+ SNDRV_PCM_INFO_INTERLEAVED,
+ .buffer_bytes_max = SIZE_MAX,
+ .period_bytes_min = 32,
+ .period_bytes_max = SZ_64K,
+ .periods_min = 2,
+ .periods_max = UINT_MAX,
+};
+
+static const struct snd_dmaengine_pcm_config udma_dmaengine_pcm_config = {
+ .pcm_hardware = &udma_pcm_hardware,
+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
+};
+
+int udma_pcm_platform_register(struct device *dev)
+{
+ return devm_snd_dmaengine_pcm_register(dev, &udma_dmaengine_pcm_config,
+ 0);
+}
+EXPORT_SYMBOL_GPL(udma_pcm_platform_register);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("UDMA PCM ASoC platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/ti/udma-pcm.h b/sound/soc/ti/udma-pcm.h
new file mode 100644
index 000000000000..54111e7312c1
--- /dev/null
+++ b/sound/soc/ti/udma-pcm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __UDMA_PCM_H__
+#define __UDMA_PCM_H__
+
+#if IS_ENABLED(CONFIG_SND_SOC_TI_UDMA_PCM)
+int udma_pcm_platform_register(struct device *dev);
+#else
+static inline int udma_pcm_platform_register(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_SND_SOC_TI_UDMA_PCM */
+
+#endif /* __UDMA_PCM_H__ */
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 985487cc3a55..4b1cd4da3e36 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -269,7 +269,7 @@ static int txx9aclc_pcm_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
- struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_pcm *pcm = rtd->pcm;
struct platform_device *pdev = to_platform_device(component->dev);
struct txx9aclc_soc_device *dev;
diff --git a/sound/soc/uniphier/aio-compress.c b/sound/soc/uniphier/aio-compress.c
index 17f773ac5ca1..232d3cc5bce0 100644
--- a/sound/soc/uniphier/aio-compress.c
+++ b/sound/soc/uniphier/aio-compress.c
@@ -23,7 +23,7 @@ static int uniphier_aio_comprdma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_compr *compr = rtd->compr;
struct device *dev = compr->card->dev;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[compr->direction];
size_t size = AUD_RING_SIZE;
int dma_dir = DMA_FROM_DEVICE, ret;
@@ -56,7 +56,7 @@ static int uniphier_aio_comprdma_free(struct snd_soc_pcm_runtime *rtd)
{
struct snd_compr *compr = rtd->compr;
struct device *dev = compr->card->dev;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[compr->direction];
int dma_dir = DMA_FROM_DEVICE;
@@ -73,7 +73,7 @@ static int uniphier_aio_comprdma_free(struct snd_soc_pcm_runtime *rtd)
static int uniphier_aio_compr_open(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int ret;
@@ -98,7 +98,7 @@ static int uniphier_aio_compr_open(struct snd_compr_stream *cstream)
static int uniphier_aio_compr_free(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int ret;
@@ -118,7 +118,7 @@ static int uniphier_aio_compr_get_params(struct snd_compr_stream *cstream,
struct snd_codec *params)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
*params = sub->cparams.codec;
@@ -130,7 +130,7 @@ static int uniphier_aio_compr_set_params(struct snd_compr_stream *cstream,
struct snd_compr_params *params)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
struct device *dev = &aio->chip->pdev->dev;
int ret;
@@ -165,7 +165,7 @@ static int uniphier_aio_compr_set_params(struct snd_compr_stream *cstream,
static int uniphier_aio_compr_hw_free(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
sub->setting = 0;
@@ -177,7 +177,7 @@ static int uniphier_aio_compr_prepare(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int bytes = runtime->fragment_size;
unsigned long flags;
@@ -215,7 +215,7 @@ static int uniphier_aio_compr_trigger(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
struct device *dev = &aio->chip->pdev->dev;
int bytes = runtime->fragment_size, ret = 0;
@@ -248,7 +248,7 @@ static int uniphier_aio_compr_pointer(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int bytes = runtime->fragment_size;
unsigned long flags;
@@ -322,7 +322,7 @@ static int uniphier_aio_compr_copy(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
struct device *carddev = rtd->compr->card->dev;
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
size_t cnt = min_t(size_t, count, aio_rb_space_to_end(sub) / 2);
int bytes = runtime->fragment_size;
diff --git a/sound/soc/uniphier/aio-dma.c b/sound/soc/uniphier/aio-dma.c
index da83423c52e2..4bbcb007df41 100644
--- a/sound/soc/uniphier/aio-dma.c
+++ b/sound/soc/uniphier/aio-dma.c
@@ -109,7 +109,7 @@ static int uniphier_aiodma_prepare(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
int bytes = runtime->period_size *
runtime->channels * samples_to_bytes(runtime, 1);
@@ -136,7 +136,7 @@ static int uniphier_aiodma_trigger(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
struct device *dev = &aio->chip->pdev->dev;
int bytes = runtime->period_size *
@@ -172,7 +172,7 @@ static snd_pcm_uframes_t uniphier_aiodma_pointer(
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
- struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
+ struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
int bytes = runtime->period_size *
runtime->channels * samples_to_bytes(runtime, 1);
diff --git a/sound/soc/ux500/mop500_ab8500.c b/sound/soc/ux500/mop500_ab8500.c
index 77655084bbde..6aaa19829a73 100644
--- a/sound/soc/ux500/mop500_ab8500.c
+++ b/sound/soc/ux500/mop500_ab8500.c
@@ -215,8 +215,8 @@ static int mop500_ab8500_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct device *dev = rtd->card->dev;
unsigned int fmt;
int channels, ret = 0, driver_mode, slots;
@@ -339,7 +339,7 @@ static int mop500_ab8500_hw_params(struct snd_pcm_substream *substream,
static int mop500_ab8500_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
mutex_lock(&mop500_ab8500_params_lock);
__clear_bit(cpu_dai->id, &mop500_ab8500_usage);
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
index 9445dbe8e039..39b96c132bc8 100644
--- a/sound/soc/ux500/ux500_pcm.c
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -46,7 +46,7 @@ static const struct snd_pcm_hardware ux500_pcm_hw = {
static struct dma_chan *ux500_pcm_request_chan(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
- struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
u16 per_data_width, mem_data_width;
struct stedma40_chan_cfg *dma_cfg;
struct ux500_msp_dma_params *dma_params;
@@ -86,7 +86,7 @@ static int ux500_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
struct dma_slave_config *slave_config)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct msp_i2s_platform_data *pdata = rtd->cpu_dai->dev->platform_data;
+ struct msp_i2s_platform_data *pdata = asoc_rtd_to_cpu(rtd, 0)->dev->platform_data;
struct snd_dmaengine_dai_dma_data *snd_dma_params;
struct ux500_msp_dma_params *ste_dma_params;
dma_addr_t dma_addr;
@@ -94,11 +94,11 @@ static int ux500_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
if (pdata) {
ste_dma_params =
- snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
dma_addr = ste_dma_params->tx_rx_addr;
} else {
snd_dma_params =
- snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
dma_addr = snd_dma_params->addr;
}
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index bcf442faff7c..68af2176b19c 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -373,7 +373,7 @@ static int xtfpga_pcm_open(struct snd_soc_component *component,
void *p;
snd_soc_set_runtime_hwparams(substream, &xtfpga_pcm_hardware);
- p = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ p = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
runtime->private_data = p;
return 0;
diff --git a/sound/soc/zte/zx-spdif.c b/sound/soc/zte/zx-spdif.c
index 60382ec23832..a3a07c0730e6 100644
--- a/sound/soc/zte/zx-spdif.c
+++ b/sound/soc/zte/zx-spdif.c
@@ -322,7 +322,6 @@ static int zx_spdif_probe(struct platform_device *pdev)
zx_spdif->mapbase = res->start;
zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(zx_spdif->reg_base)) {
- dev_err(&pdev->dev, "ioremap failed!\n");
return PTR_ERR(zx_spdif->reg_base);
}
diff --git a/sound/soc/zte/zx-tdm.c b/sound/soc/zte/zx-tdm.c
index 0e5a05b25a77..4f787185d630 100644
--- a/sound/soc/zte/zx-tdm.c
+++ b/sound/soc/zte/zx-tdm.c
@@ -371,7 +371,6 @@ static struct snd_soc_dai_driver zx_tdm_dai = {
static int zx_tdm_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct of_phandle_args out_args;
unsigned int dma_reg_offset;
struct zx_tdm_info *zx_tdm;
@@ -384,7 +383,7 @@ static int zx_tdm_probe(struct platform_device *pdev)
if (!zx_tdm)
return -ENOMEM;
- zx_tdm->dev = dev;
+ zx_tdm->dev = &pdev->dev;
zx_tdm->dai_wclk = devm_clk_get(&pdev->dev, "wclk");
if (IS_ERR(zx_tdm->dai_wclk)) {
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 78edd7d2f418..56031026b113 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -13,6 +13,7 @@ snd-usb-audio-objs := card.o \
mixer_scarlett.o \
mixer_scarlett_gen2.o \
mixer_us16x08.o \
+ mixer_s1810c.o \
pcm.o \
power.o \
proc.o \
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 827fb0bc8b56..fd6fd1726ea0 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -72,6 +72,7 @@ static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
static bool ignore_ctl_error;
static bool autoclock = true;
static char *quirk_alias[SNDRV_CARDS];
+static char *delayed_register[SNDRV_CARDS];
bool snd_usb_use_vmalloc = true;
bool snd_usb_skip_validation;
@@ -95,6 +96,8 @@ module_param(autoclock, bool, 0444);
MODULE_PARM_DESC(autoclock, "Enable auto-clock selection for UAC2 devices (default: yes).");
module_param_array(quirk_alias, charp, NULL, 0444);
MODULE_PARM_DESC(quirk_alias, "Quirk aliases, e.g. 0123abcd:5678beef.");
+module_param_array(delayed_register, charp, NULL, 0444);
+MODULE_PARM_DESC(delayed_register, "Quirk for delayed registration, given by id:iface, e.g. 0123abcd:4.");
module_param_named(use_vmalloc, snd_usb_use_vmalloc, bool, 0444);
MODULE_PARM_DESC(use_vmalloc, "Use vmalloc for PCM intermediate buffers (default: yes).");
module_param_named(skip_validation, snd_usb_skip_validation, bool, 0444);
@@ -525,6 +528,21 @@ static bool get_alias_id(struct usb_device *dev, unsigned int *id)
return false;
}
+static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface)
+{
+ int i;
+ unsigned int id, inum;
+
+ for (i = 0; i < ARRAY_SIZE(delayed_register); i++) {
+ if (delayed_register[i] &&
+ sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 &&
+ id == chip->usb_id)
+ return inum != iface;
+ }
+
+ return false;
+}
+
static const struct usb_device_id usb_audio_ids[]; /* defined below */
/* look for the corresponding quirk */
@@ -662,10 +680,22 @@ static int usb_audio_probe(struct usb_interface *intf,
goto __error;
}
- /* we are allowed to call snd_card_register() many times */
- err = snd_card_register(chip->card);
- if (err < 0)
- goto __error;
+ if (chip->need_delayed_register) {
+ dev_info(&dev->dev,
+ "Found post-registration device assignment: %08x:%02x\n",
+ chip->usb_id, ifnum);
+ chip->need_delayed_register = false; /* clear again */
+ }
+
+ /* we are allowed to call snd_card_register() many times, but first
+ * check to see if a device needs to skip it or do anything special
+ */
+ if (!snd_usb_registration_quirk(chip, ifnum) &&
+ !check_delayed_register_option(chip, ifnum)) {
+ err = snd_card_register(chip->card);
+ if (err < 0)
+ goto __error;
+ }
if (quirk && quirk->shares_media_device) {
/* don't want to fail when snd_media_device_create() fails */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index a48313dfa967..b118cf97607f 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -151,16 +151,15 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i
return ret;
}
-/*
- * Assume the clock is valid if clock source supports only one single sample
- * rate, the terminal is connected directly to it (there is no clock selector)
- * and clock type is internal. This is to deal with some Denon DJ controllers
- * that always reports that clock is invalid.
- */
static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
struct audioformat *fmt,
int source_id)
{
+ bool ret = false;
+ int count;
+ unsigned char data;
+ struct usb_device *dev = chip->dev;
+
if (fmt->protocol == UAC_VERSION_2) {
struct uac_clock_source_descriptor *cs_desc =
snd_usb_find_clock_source(chip->ctrl_intf, source_id);
@@ -168,13 +167,51 @@ static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
if (!cs_desc)
return false;
- return (fmt->nr_rates == 1 &&
- (fmt->clock & 0xff) == cs_desc->bClockID &&
- (cs_desc->bmAttributes & 0x3) !=
- UAC_CLOCK_SOURCE_TYPE_EXT);
+ /*
+ * Assume the clock is valid if clock source supports only one
+ * single sample rate, the terminal is connected directly to it
+ * (there is no clock selector) and clock type is internal.
+ * This is to deal with some Denon DJ controllers that always
+ * reports that clock is invalid.
+ */
+ if (fmt->nr_rates == 1 &&
+ (fmt->clock & 0xff) == cs_desc->bClockID &&
+ (cs_desc->bmAttributes & 0x3) !=
+ UAC_CLOCK_SOURCE_TYPE_EXT)
+ return true;
+ }
+
+ /*
+ * MOTU MicroBook IIc
+ * Sample rate changes takes more than 2 seconds for this device. Clock
+ * validity request returns false during that period.
+ */
+ if (chip->usb_id == USB_ID(0x07fd, 0x0004)) {
+ count = 0;
+
+ while ((!ret) && (count < 50)) {
+ int err;
+
+ msleep(100);
+
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ UAC2_CS_CONTROL_CLOCK_VALID << 8,
+ snd_usb_ctrl_intf(chip) | (source_id << 8),
+ &data, sizeof(data));
+ if (err < 0) {
+ dev_warn(&dev->dev,
+ "%s(): cannot get clock validity for id %d\n",
+ __func__, source_id);
+ return false;
+ }
+
+ ret = !!data;
+ count++;
+ }
}
- return false;
+ return ret;
}
static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 9f5cb4ed3a0c..50e1874c847c 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -247,6 +247,36 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
return 0;
}
+
+/*
+ * Presonus Studio 1810c supports a limited set of sampling
+ * rates per altsetting but reports the full set each time.
+ * If we don't filter out the unsupported rates and attempt
+ * to configure the card, it will hang refusing to do any
+ * further audio I/O until a hard reset is performed.
+ *
+ * The list of supported rates per altsetting (set of available
+ * I/O channels) is described in the owner's manual, section 2.2.
+ */
+static bool s1810c_valid_sample_rate(struct audioformat *fp,
+ unsigned int rate)
+{
+ switch (fp->altsetting) {
+ case 1:
+ /* All ADAT ports available */
+ return rate <= 48000;
+ case 2:
+ /* Half of ADAT ports available */
+ return (rate == 88200 || rate == 96000);
+ case 3:
+ /* Analog I/O only (no S/PDIF nor ADAT) */
+ return rate >= 176400;
+ default:
+ return false;
+ }
+ return false;
+}
+
/*
* Helper function to walk the array of sample rate triplets reported by
* the device. The problem is that we need to parse whole array first to
@@ -283,6 +313,12 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
}
for (rate = min; rate <= max; rate += res) {
+
+ /* Filter out invalid rates on Presonus Studio 1810c */
+ if (chip->usb_id == USB_ID(0x0194f, 0x010c) &&
+ !s1810c_valid_sample_rate(fp, rate))
+ goto skip_rate;
+
if (fp->rate_table)
fp->rate_table[nr_rates] = rate;
if (!fp->rate_min || rate < fp->rate_min)
@@ -297,6 +333,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
break;
}
+skip_rate:
/* avoid endless loop */
if (res == 0)
break;
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 392e5fda680c..047b90595d65 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -91,7 +91,7 @@ struct usb_ms_endpoint_descriptor {
__u8 bDescriptorType;
__u8 bDescriptorSubtype;
__u8 bNumEmbMIDIJack;
- __u8 baAssocJackID[0];
+ __u8 baAssocJackID[];
} __attribute__ ((packed));
struct snd_usb_midi_in_endpoint;
@@ -1826,6 +1826,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi,
return 0;
}
+static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor(
+ struct usb_host_endpoint *hostep)
+{
+ unsigned char *extra = hostep->extra;
+ int extralen = hostep->extralen;
+
+ while (extralen > 3) {
+ struct usb_ms_endpoint_descriptor *ms_ep =
+ (struct usb_ms_endpoint_descriptor *)extra;
+
+ if (ms_ep->bLength > 3 &&
+ ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT &&
+ ms_ep->bDescriptorSubtype == UAC_MS_GENERAL)
+ return ms_ep;
+ if (!extra[0])
+ break;
+ extralen -= extra[0];
+ extra += extra[0];
+ }
+ return NULL;
+}
+
/*
* Returns MIDIStreaming device capabilities.
*/
@@ -1863,11 +1885,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
ep = get_ep_desc(hostep);
if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep))
continue;
- ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra;
- if (hostep->extralen < 4 ||
- ms_ep->bLength < 4 ||
- ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT ||
- ms_ep->bDescriptorSubtype != UAC_MS_GENERAL)
+ ms_ep = find_usb_ms_endpoint_descriptor(hostep);
+ if (!ms_ep)
continue;
if (usb_endpoint_dir_out(ep)) {
if (endpoints[epidx].out_ep) {
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 81b2db0edd5f..721d12130d0c 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -292,6 +292,11 @@ static int uac2_ctl_value_size(int val_type)
* retrieve a mixer value
*/
+static inline int mixer_ctrl_intf(struct usb_mixer_interface *mixer)
+{
+ return get_iface_desc(mixer->hostif)->bInterfaceNumber;
+}
+
static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request,
int validx, int *value_ret)
{
@@ -306,7 +311,7 @@ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request,
return -EIO;
while (timeout-- > 0) {
- idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+ idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8);
err = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, idx, buf, val_len);
@@ -354,7 +359,7 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
if (ret)
goto error;
- idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+ idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8);
ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, idx, buf, size);
@@ -479,7 +484,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
return -EIO;
while (timeout-- > 0) {
- idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+ idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8);
err = snd_usb_ctl_msg(chip->dev,
usb_sndctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
@@ -901,6 +906,12 @@ static int parse_term_effect_unit(struct mixer_build *state,
struct usb_audio_term *term,
void *p1, int id)
{
+ struct uac2_effect_unit_descriptor *d = p1;
+ int err;
+
+ err = __check_input_term(state, d->bSourceID, term);
+ if (err < 0)
+ return err;
term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
term->id = id;
return 0;
@@ -1203,7 +1214,7 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
get_ctl_value(cval, UAC_GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) {
usb_audio_err(cval->head.mixer->chip,
"%d:%d: cannot get min/max values for control %d (id %d)\n",
- cval->head.id, snd_usb_ctrl_intf(cval->head.mixer->chip),
+ cval->head.id, mixer_ctrl_intf(cval->head.mixer),
cval->control, cval->head.id);
return -EINVAL;
}
@@ -1422,7 +1433,7 @@ static int mixer_ctl_connector_get(struct snd_kcontrol *kcontrol,
if (ret)
goto error;
- idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+ idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8);
if (cval->head.mixer->protocol == UAC_VERSION_2) {
struct uac2_connectors_ctl_blk uac2_conn;
@@ -1674,6 +1685,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer,
/* get min/max values */
get_min_max_with_quirks(cval, 0, kctl);
+ /* skip a bogus volume range */
+ if (cval->max <= cval->min) {
+ usb_audio_dbg(mixer->chip,
+ "[%d] FU [%s] skipped due to invalid volume\n",
+ cval->head.id, kctl->id.name);
+ snd_ctl_free_one(kctl);
+ return;
+ }
+
+
if (control == UAC_FU_VOLUME) {
check_mapped_dB(map, cval);
if (cval->dBmin < cval->dBmax || !cval->initialized) {
@@ -3203,7 +3224,7 @@ static void snd_usb_mixer_proc_read(struct snd_info_entry *entry,
list_for_each_entry(mixer, &chip->mixer_list, list) {
snd_iprintf(buffer,
"USB Mixer: usb_id=0x%08x, ctrlif=%i, ctlerr=%i\n",
- chip->usb_id, snd_usb_ctrl_intf(chip),
+ chip->usb_id, mixer_ctrl_intf(mixer),
mixer->ignore_ctl_error);
snd_iprintf(buffer, "Card: %s\n", chip->card->longname);
for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) {
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index c237e24f08d9..02b036b2aefb 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -34,6 +34,7 @@
#include "mixer_scarlett.h"
#include "mixer_scarlett_gen2.h"
#include "mixer_us16x08.h"
+#include "mixer_s1810c.h"
#include "helper.h"
struct std_mono_table {
@@ -2277,6 +2278,10 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x2a39, 0x3fd4): /* RME */
err = snd_rme_controls_create(mixer);
break;
+
+ case USB_ID(0x0194f, 0x010c): /* Presonus Studio 1810c */
+ err = snd_sc1810_init_mixer(mixer);
+ break;
}
return err;
diff --git a/sound/usb/mixer_s1810c.c b/sound/usb/mixer_s1810c.c
new file mode 100644
index 000000000000..6483e47bafd0
--- /dev/null
+++ b/sound/usb/mixer_s1810c.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Presonus Studio 1810c driver for ALSA
+ * Copyright (C) 2019 Nick Kossifidis <mickflemm@gmail.com>
+ *
+ * Based on reverse engineering of the communication protocol
+ * between the windows driver / Univeral Control (UC) program
+ * and the device, through usbmon.
+ *
+ * For now this bypasses the mixer, with all channels split,
+ * so that the software can mix with greater flexibility.
+ * It also adds controls for the 4 buttons on the front of
+ * the device.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/control.h>
+
+#include "usbaudio.h"
+#include "mixer.h"
+#include "mixer_quirks.h"
+#include "helper.h"
+#include "mixer_s1810c.h"
+
+#define SC1810C_CMD_REQ 160
+#define SC1810C_CMD_REQTYPE \
+ (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT)
+#define SC1810C_CMD_F1 0x50617269
+#define SC1810C_CMD_F2 0x14
+
+/*
+ * DISCLAIMER: These are just guesses based on the
+ * dumps I got.
+ *
+ * It seems like a selects between
+ * device (0), mixer (0x64) and output (0x65)
+ *
+ * For mixer (0x64):
+ * * b selects an input channel (see below).
+ * * c selects an output channel pair (see below).
+ * * d selects left (0) or right (1) of that pair.
+ * * e 0-> disconnect, 0x01000000-> connect,
+ * 0x0109-> used for stereo-linking channels,
+ * e is also used for setting volume levels
+ * in which case b is also set so I guess
+ * this way it is possible to set the volume
+ * level from the specified input to the
+ * specified output.
+ *
+ * IN Channels:
+ * 0 - 7 Mic/Inst/Line (Analog inputs)
+ * 8 - 9 S/PDIF
+ * 10 - 17 ADAT
+ * 18 - 35 DAW (Inputs from the host)
+ *
+ * OUT Channels (pairs):
+ * 0 -> Main out
+ * 1 -> Line1/2
+ * 2 -> Line3/4
+ * 3 -> S/PDIF
+ * 4 -> ADAT?
+ *
+ * For device (0):
+ * * b and c are not used, at least not on the
+ * dumps I got.
+ * * d sets the control id to be modified
+ * (see below).
+ * * e sets the setting for that control.
+ * (so for the switches I was interested
+ * in it's 0/1)
+ *
+ * For output (0x65):
+ * * b is the output channel (see above).
+ * * c is zero.
+ * * e I guess the same as with mixer except 0x0109
+ * which I didn't see in my dumps.
+ *
+ * The two fixed fields have the same values for
+ * mixer and output but a different set for device.
+ */
+struct s1810c_ctl_packet {
+ u32 a;
+ u32 b;
+ u32 fixed1;
+ u32 fixed2;
+ u32 c;
+ u32 d;
+ u32 e;
+};
+
+#define SC1810C_CTL_LINE_SW 0
+#define SC1810C_CTL_MUTE_SW 1
+#define SC1810C_CTL_AB_SW 3
+#define SC1810C_CTL_48V_SW 4
+
+#define SC1810C_SET_STATE_REQ 161
+#define SC1810C_SET_STATE_REQTYPE SC1810C_CMD_REQTYPE
+#define SC1810C_SET_STATE_F1 0x64656D73
+#define SC1810C_SET_STATE_F2 0xF4
+
+#define SC1810C_GET_STATE_REQ 162
+#define SC1810C_GET_STATE_REQTYPE \
+ (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN)
+#define SC1810C_GET_STATE_F1 SC1810C_SET_STATE_F1
+#define SC1810C_GET_STATE_F2 SC1810C_SET_STATE_F2
+
+#define SC1810C_STATE_F1_IDX 2
+#define SC1810C_STATE_F2_IDX 3
+
+/*
+ * This packet includes mixer volumes and
+ * various other fields, it's an extended
+ * version of ctl_packet, with a and b
+ * being zero and different f1/f2.
+ */
+struct s1810c_state_packet {
+ u32 fields[63];
+};
+
+#define SC1810C_STATE_48V_SW 58
+#define SC1810C_STATE_LINE_SW 59
+#define SC1810C_STATE_MUTE_SW 60
+#define SC1810C_STATE_AB_SW 62
+
+struct s1810_mixer_state {
+ uint16_t seqnum;
+ struct mutex usb_mutex;
+ struct mutex data_mutex;
+};
+
+static int
+snd_s1810c_send_ctl_packet(struct usb_device *dev, u32 a,
+ u32 b, u32 c, u32 d, u32 e)
+{
+ struct s1810c_ctl_packet pkt = { 0 };
+ int ret = 0;
+
+ pkt.fixed1 = SC1810C_CMD_F1;
+ pkt.fixed2 = SC1810C_CMD_F2;
+
+ pkt.a = a;
+ pkt.b = b;
+ pkt.c = c;
+ pkt.d = d;
+ /*
+ * Value for settings 0/1 for this
+ * output channel is always 0 (probably because
+ * there is no ADAT output on 1810c)
+ */
+ pkt.e = (c == 4) ? 0 : e;
+
+ ret = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
+ SC1810C_CMD_REQ,
+ SC1810C_CMD_REQTYPE, 0, 0, &pkt, sizeof(pkt));
+ if (ret < 0) {
+ dev_warn(&dev->dev, "could not send ctl packet\n");
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * When opening Universal Control the program periodicaly
+ * sends and receives state packets for syncinc state between
+ * the device and the host.
+ *
+ * Note that if we send only the request to get data back we'll
+ * get an error, we need to first send an empty state packet and
+ * then ask to receive a filled. Their seqnumbers must also match.
+ */
+static int
+snd_sc1810c_get_status_field(struct usb_device *dev,
+ u32 *field, int field_idx, uint16_t *seqnum)
+{
+ struct s1810c_state_packet pkt_out = { { 0 } };
+ struct s1810c_state_packet pkt_in = { { 0 } };
+ int ret = 0;
+
+ pkt_out.fields[SC1810C_STATE_F1_IDX] = SC1810C_SET_STATE_F1;
+ pkt_out.fields[SC1810C_STATE_F2_IDX] = SC1810C_SET_STATE_F2;
+ ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ SC1810C_SET_STATE_REQ,
+ SC1810C_SET_STATE_REQTYPE,
+ (*seqnum), 0, &pkt_out, sizeof(pkt_out));
+ if (ret < 0) {
+ dev_warn(&dev->dev, "could not send state packet (%d)\n", ret);
+ return ret;
+ }
+
+ ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ SC1810C_GET_STATE_REQ,
+ SC1810C_GET_STATE_REQTYPE,
+ (*seqnum), 0, &pkt_in, sizeof(pkt_in));
+ if (ret < 0) {
+ dev_warn(&dev->dev, "could not get state field %u (%d)\n",
+ field_idx, ret);
+ return ret;
+ }
+
+ (*field) = pkt_in.fields[field_idx];
+ (*seqnum)++;
+ return 0;
+}
+
+/*
+ * This is what I got when bypassing the mixer with
+ * all channels split. I'm not 100% sure of what's going
+ * on, I could probably clean this up based on my observations
+ * but I prefer to keep the same behavior as the windows driver.
+ */
+static int snd_s1810c_init_mixer_maps(struct snd_usb_audio *chip)
+{
+ u32 a, b, c, e, n, off;
+ struct usb_device *dev = chip->dev;
+
+ /* Set initial volume levels ? */
+ a = 0x64;
+ e = 0xbc;
+ for (n = 0; n < 2; n++) {
+ off = n * 18;
+ for (b = off, c = 0; b < 18 + off; b++) {
+ /* This channel to all outputs ? */
+ for (c = 0; c <= 8; c++) {
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 1, e);
+ }
+ /* This channel to main output (again) */
+ snd_s1810c_send_ctl_packet(dev, a, b, 0, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, b, 0, 1, e);
+ }
+ /*
+ * I noticed on UC that DAW channels have different
+ * initial volumes, so this makes sense.
+ */
+ e = 0xb53bf0;
+ }
+
+ /* Connect analog outputs ? */
+ a = 0x65;
+ e = 0x01000000;
+ for (b = 1; b < 3; b++) {
+ snd_s1810c_send_ctl_packet(dev, a, b, 0, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, b, 0, 1, e);
+ }
+ snd_s1810c_send_ctl_packet(dev, a, 0, 0, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, 0, 0, 1, e);
+
+ /* Set initial volume levels for S/PDIF mappings ? */
+ a = 0x64;
+ e = 0xbc;
+ c = 3;
+ for (n = 0; n < 2; n++) {
+ off = n * 18;
+ for (b = off; b < 18 + off; b++) {
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 1, e);
+ }
+ e = 0xb53bf0;
+ }
+
+ /* Connect S/PDIF output ? */
+ a = 0x65;
+ e = 0x01000000;
+ snd_s1810c_send_ctl_packet(dev, a, 3, 0, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, 3, 0, 1, e);
+
+ /* Connect all outputs (again) ? */
+ a = 0x65;
+ e = 0x01000000;
+ for (b = 0; b < 4; b++) {
+ snd_s1810c_send_ctl_packet(dev, a, b, 0, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, b, 0, 1, e);
+ }
+
+ /* Basic routing to get sound out of the device */
+ a = 0x64;
+ e = 0x01000000;
+ for (c = 0; c < 4; c++) {
+ for (b = 0; b < 36; b++) {
+ if ((c == 0 && b == 18) || /* DAW1/2 -> Main */
+ (c == 1 && b == 20) || /* DAW3/4 -> Line3/4 */
+ (c == 2 && b == 22) || /* DAW4/5 -> Line5/6 */
+ (c == 3 && b == 24)) { /* DAW5/6 -> S/PDIF */
+ /* Left */
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 1, 0);
+ b++;
+ /* Right */
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 0, 0);
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 1, e);
+ } else {
+ /* Leave the rest disconnected */
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 0, 0);
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 1, 0);
+ }
+ }
+ }
+
+ /* Set initial volume levels for S/PDIF (again) ? */
+ a = 0x64;
+ e = 0xbc;
+ c = 3;
+ for (n = 0; n < 2; n++) {
+ off = n * 18;
+ for (b = off; b < 18 + off; b++) {
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, b, c, 1, e);
+ }
+ e = 0xb53bf0;
+ }
+
+ /* Connect S/PDIF outputs (again) ? */
+ a = 0x65;
+ e = 0x01000000;
+ snd_s1810c_send_ctl_packet(dev, a, 3, 0, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, 3, 0, 1, e);
+
+ /* Again ? */
+ snd_s1810c_send_ctl_packet(dev, a, 3, 0, 0, e);
+ snd_s1810c_send_ctl_packet(dev, a, 3, 0, 1, e);
+
+ return 0;
+}
+
+/*
+ * Sync state with the device and retrieve the requested field,
+ * whose index is specified in (kctl->private_value & 0xFF),
+ * from the received fields array.
+ */
+static int
+snd_s1810c_get_switch_state(struct usb_mixer_interface *mixer,
+ struct snd_kcontrol *kctl, u32 *state)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ struct s1810_mixer_state *private = mixer->private_data;
+ u32 field = 0;
+ u32 ctl_idx = (u32) (kctl->private_value & 0xFF);
+ int ret = 0;
+
+ mutex_lock(&private->usb_mutex);
+ ret = snd_sc1810c_get_status_field(chip->dev, &field,
+ ctl_idx, &private->seqnum);
+ if (ret < 0)
+ goto unlock;
+
+ *state = field;
+ unlock:
+ mutex_unlock(&private->usb_mutex);
+ return ret ? ret : 0;
+}
+
+/*
+ * Send a control packet to the device for the control id
+ * specified in (kctl->private_value >> 8) with value
+ * specified in (kctl->private_value >> 16).
+ */
+static int
+snd_s1810c_set_switch_state(struct usb_mixer_interface *mixer,
+ struct snd_kcontrol *kctl)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ struct s1810_mixer_state *private = mixer->private_data;
+ u32 pval = (u32) kctl->private_value;
+ u32 ctl_id = (pval >> 8) & 0xFF;
+ u32 ctl_val = (pval >> 16) & 0x1;
+ int ret = 0;
+
+ mutex_lock(&private->usb_mutex);
+ ret = snd_s1810c_send_ctl_packet(chip->dev, 0, 0, 0, ctl_id, ctl_val);
+ mutex_unlock(&private->usb_mutex);
+ return ret;
+}
+
+/* Generic get/set/init functions for switch controls */
+
+static int
+snd_s1810c_switch_get(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ctl_elem)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl);
+ struct usb_mixer_interface *mixer = list->mixer;
+ struct s1810_mixer_state *private = mixer->private_data;
+ u32 pval = (u32) kctl->private_value;
+ u32 ctl_idx = pval & 0xFF;
+ u32 state = 0;
+ int ret = 0;
+
+ mutex_lock(&private->data_mutex);
+ ret = snd_s1810c_get_switch_state(mixer, kctl, &state);
+ if (ret < 0)
+ goto unlock;
+
+ switch (ctl_idx) {
+ case SC1810C_STATE_LINE_SW:
+ case SC1810C_STATE_AB_SW:
+ ctl_elem->value.enumerated.item[0] = (int)state;
+ break;
+ default:
+ ctl_elem->value.integer.value[0] = (long)state;
+ }
+
+ unlock:
+ mutex_unlock(&private->data_mutex);
+ return (ret < 0) ? ret : 0;
+}
+
+static int
+snd_s1810c_switch_set(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_value *ctl_elem)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl);
+ struct usb_mixer_interface *mixer = list->mixer;
+ struct s1810_mixer_state *private = mixer->private_data;
+ u32 pval = (u32) kctl->private_value;
+ u32 ctl_idx = pval & 0xFF;
+ u32 curval = 0;
+ u32 newval = 0;
+ int ret = 0;
+
+ mutex_lock(&private->data_mutex);
+ ret = snd_s1810c_get_switch_state(mixer, kctl, &curval);
+ if (ret < 0)
+ goto unlock;
+
+ switch (ctl_idx) {
+ case SC1810C_STATE_LINE_SW:
+ case SC1810C_STATE_AB_SW:
+ newval = (u32) ctl_elem->value.enumerated.item[0];
+ break;
+ default:
+ newval = (u32) ctl_elem->value.integer.value[0];
+ }
+
+ if (curval == newval)
+ goto unlock;
+
+ kctl->private_value &= ~(0x1 << 16);
+ kctl->private_value |= (unsigned int)(newval & 0x1) << 16;
+ ret = snd_s1810c_set_switch_state(mixer, kctl);
+
+ unlock:
+ mutex_unlock(&private->data_mutex);
+ return (ret < 0) ? 0 : 1;
+}
+
+static int
+snd_s1810c_switch_init(struct usb_mixer_interface *mixer,
+ const struct snd_kcontrol_new *new_kctl)
+{
+ struct snd_kcontrol *kctl;
+ struct usb_mixer_elem_info *elem;
+
+ elem = kzalloc(sizeof(struct usb_mixer_elem_info), GFP_KERNEL);
+ if (!elem)
+ return -ENOMEM;
+
+ elem->head.mixer = mixer;
+ elem->control = 0;
+ elem->head.id = 0;
+ elem->channels = 1;
+
+ kctl = snd_ctl_new1(new_kctl, elem);
+ if (!kctl) {
+ kfree(elem);
+ return -ENOMEM;
+ }
+ kctl->private_free = snd_usb_mixer_elem_free;
+
+ return snd_usb_mixer_add_control(&elem->head, kctl);
+}
+
+static int
+snd_s1810c_line_sw_info(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const texts[2] = {
+ "Preamp On (Mic/Inst)",
+ "Preamp Off (Line in)"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
+}
+
+static const struct snd_kcontrol_new snd_s1810c_line_sw = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Line 1/2 Source Type",
+ .info = snd_s1810c_line_sw_info,
+ .get = snd_s1810c_switch_get,
+ .put = snd_s1810c_switch_set,
+ .private_value = (SC1810C_STATE_LINE_SW | SC1810C_CTL_LINE_SW << 8)
+};
+
+static const struct snd_kcontrol_new snd_s1810c_mute_sw = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Mute Main Out Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = snd_s1810c_switch_get,
+ .put = snd_s1810c_switch_set,
+ .private_value = (SC1810C_STATE_MUTE_SW | SC1810C_CTL_MUTE_SW << 8)
+};
+
+static const struct snd_kcontrol_new snd_s1810c_48v_sw = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "48V Phantom Power On Mic Inputs Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = snd_s1810c_switch_get,
+ .put = snd_s1810c_switch_set,
+ .private_value = (SC1810C_STATE_48V_SW | SC1810C_CTL_48V_SW << 8)
+};
+
+static int
+snd_s1810c_ab_sw_info(struct snd_kcontrol *kctl,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const texts[2] = {
+ "1/2",
+ "3/4"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
+}
+
+static const struct snd_kcontrol_new snd_s1810c_ab_sw = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Headphone 1 Source Route",
+ .info = snd_s1810c_ab_sw_info,
+ .get = snd_s1810c_switch_get,
+ .put = snd_s1810c_switch_set,
+ .private_value = (SC1810C_STATE_AB_SW | SC1810C_CTL_AB_SW << 8)
+};
+
+static void snd_sc1810_mixer_state_free(struct usb_mixer_interface *mixer)
+{
+ struct s1810_mixer_state *private = mixer->private_data;
+ kfree(private);
+ mixer->private_data = NULL;
+}
+
+/* Entry point, called from mixer_quirks.c */
+int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer)
+{
+ struct s1810_mixer_state *private = NULL;
+ struct snd_usb_audio *chip = mixer->chip;
+ struct usb_device *dev = chip->dev;
+ int ret = 0;
+
+ /* Run this only once */
+ if (!list_empty(&chip->mixer_list))
+ return 0;
+
+ dev_info(&dev->dev,
+ "Presonus Studio 1810c, device_setup: %u\n", chip->setup);
+ if (chip->setup == 1)
+ dev_info(&dev->dev, "(8out/18in @ 48KHz)\n");
+ else if (chip->setup == 2)
+ dev_info(&dev->dev, "(6out/8in @ 192KHz)\n");
+ else
+ dev_info(&dev->dev, "(8out/14in @ 96KHz)\n");
+
+ ret = snd_s1810c_init_mixer_maps(chip);
+ if (ret < 0)
+ return ret;
+
+ private = kzalloc(sizeof(struct s1810_mixer_state), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ mutex_init(&private->usb_mutex);
+ mutex_init(&private->data_mutex);
+
+ mixer->private_data = private;
+ mixer->private_free = snd_sc1810_mixer_state_free;
+
+ private->seqnum = 1;
+
+ ret = snd_s1810c_switch_init(mixer, &snd_s1810c_line_sw);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_s1810c_switch_init(mixer, &snd_s1810c_mute_sw);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_s1810c_switch_init(mixer, &snd_s1810c_48v_sw);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw);
+ if (ret < 0)
+ return ret;
+ return ret;
+}
diff --git a/sound/usb/mixer_s1810c.h b/sound/usb/mixer_s1810c.h
new file mode 100644
index 000000000000..a79a3743cff3
--- /dev/null
+++ b/sound/usb/mixer_s1810c.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Presonus Studio 1810c driver for ALSA
+ * Copyright (C) 2019 Nick Kossifidis <mickflemm@gmail.com>
+ */
+
+int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer);
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index bd258f1ec2dd..a4e4064f9aee 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -357,7 +357,12 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x81;
ifnum = 1;
goto add_sync_ep_from_ifnum;
- case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */
+ case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II/IIc */
+ /* MicroBook IIc */
+ if (altsd->bInterfaceClass == USB_CLASS_AUDIO)
+ return 0;
+
+ /* MicroBook II */
ep = 0x84;
ifnum = 0;
goto add_sync_ep_from_ifnum;
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index ffbf4bd9208c..4174ad11fca6 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -70,7 +70,7 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
snd_iprintf(buffer, " Interface %d\n", fp->iface);
snd_iprintf(buffer, " Altset %d\n", fp->altsetting);
snd_iprintf(buffer, " Format:");
- for (fmt = 0; fmt <= SNDRV_PCM_FORMAT_LAST; ++fmt)
+ pcm_for_each_format(fmt)
if (fp->formats & pcm_format_to_bits(fmt))
snd_iprintf(buffer, " %s",
snd_pcm_format_name(fmt));
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index d187aa6d50db..1c8719292eee 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3472,7 +3472,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
},
/* MOTU Microbook II */
{
- USB_DEVICE(0x07fd, 0x0004),
+ USB_DEVICE_VENDOR_SPEC(0x07fd, 0x0004),
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
.vendor_name = "MOTU",
.product_name = "MicroBookII",
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 7f558f4b4520..86f192a3043d 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1252,6 +1252,38 @@ static int fasttrackpro_skip_setting_quirk(struct snd_usb_audio *chip,
return 0; /* keep this altsetting */
}
+static int s1810c_skip_setting_quirk(struct snd_usb_audio *chip,
+ int iface, int altno)
+{
+ /*
+ * Altno settings:
+ *
+ * Playback (Interface 1):
+ * 1: 6 Analog + 2 S/PDIF
+ * 2: 6 Analog + 2 S/PDIF
+ * 3: 6 Analog
+ *
+ * Capture (Interface 2):
+ * 1: 8 Analog + 2 S/PDIF + 8 ADAT
+ * 2: 8 Analog + 2 S/PDIF + 4 ADAT
+ * 3: 8 Analog
+ */
+
+ /*
+ * I'll leave 2 as the default one and
+ * use device_setup to switch to the
+ * other two.
+ */
+ if ((chip->setup == 0 || chip->setup > 2) && altno != 2)
+ return 1;
+ else if (chip->setup == 1 && altno != 1)
+ return 1;
+ else if (chip->setup == 2 && altno != 3)
+ return 1;
+
+ return 0;
+}
+
int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip,
int iface,
int altno)
@@ -1265,6 +1297,10 @@ int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip,
/* fasttrackpro usb: skip altsets incompatible with device_setup */
if (chip->usb_id == USB_ID(0x0763, 0x2012))
return fasttrackpro_skip_setting_quirk(chip, iface, altno);
+ /* presonus studio 1810c: skip altsets incompatible with device_setup */
+ if (chip->usb_id == USB_ID(0x0194f, 0x010c))
+ return s1810c_skip_setting_quirk(chip, iface, altno);
+
return 0;
}
@@ -1316,7 +1352,15 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
case USB_ID(0x2466, 0x8010): /* Fractal Audio Axe-Fx 3 */
return snd_usb_axefx3_boot_quirk(dev);
case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */
- return snd_usb_motu_microbookii_boot_quirk(dev);
+ /*
+ * For some reason interface 3 with vendor-spec class is
+ * detected on MicroBook IIc.
+ */
+ if (get_iface_desc(intf->altsetting)->bInterfaceClass ==
+ USB_CLASS_VENDOR_SPEC &&
+ get_iface_desc(intf->altsetting)->bInterfaceNumber < 3)
+ return snd_usb_motu_microbookii_boot_quirk(dev);
+ break;
}
return 0;
@@ -1754,5 +1798,47 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
else
fp->ep_attr |= USB_ENDPOINT_SYNC_SYNC;
break;
+ case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook IIc */
+ /*
+ * MaxPacketsOnly attribute is erroneously set in endpoint
+ * descriptors. As a result this card produces noise with
+ * all sample rates other than 96 KHz.
+ */
+ fp->attributes &= ~UAC_EP_CS_ATTR_FILL_MAX;
+ break;
}
}
+
+/*
+ * registration quirk:
+ * the registration is skipped if a device matches with the given ID,
+ * unless the interface reaches to the defined one. This is for delaying
+ * the registration until the last known interface, so that the card and
+ * devices appear at the same time.
+ */
+
+struct registration_quirk {
+ unsigned int usb_id; /* composed via USB_ID() */
+ unsigned int interface; /* the interface to trigger register */
+};
+
+#define REG_QUIRK_ENTRY(vendor, product, iface) \
+ { .usb_id = USB_ID(vendor, product), .interface = (iface) }
+
+static const struct registration_quirk registration_quirks[] = {
+ REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */
+ { 0 } /* terminator */
+};
+
+/* return true if skipping registration */
+bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface)
+{
+ const struct registration_quirk *q;
+
+ for (q = registration_quirks; q->usb_id; q++)
+ if (chip->usb_id == q->usb_id)
+ return iface != q->interface;
+
+ /* Register as normal */
+ return false;
+}
diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h
index df0355843a4c..c76cf24a640a 100644
--- a/sound/usb/quirks.h
+++ b/sound/usb/quirks.h
@@ -51,4 +51,6 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
struct audioformat *fp,
int stream);
+bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface);
+
#endif /* __USBAUDIO_QUIRKS_H */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index afd5aa574611..15296f2c902c 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -502,6 +502,9 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
subs = &as->substream[stream];
if (subs->ep_num)
continue;
+ if (snd_device_get_state(chip->card, as->pcm) !=
+ SNDRV_DEV_BUILD)
+ chip->need_delayed_register = true;
err = snd_pcm_new_stream(as->pcm, stream, 1);
if (err < 0)
return err;
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 6fe3ab582ec6..1c892c7f14d7 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -34,6 +34,7 @@ struct snd_usb_audio {
unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
unsigned int tx_length_quirk:1; /* Put length specifier in transfers */
unsigned int setup_fmt_after_resume_quirk:1; /* setup the format to interface after resume */
+ unsigned int need_delayed_register:1; /* warn for delayed registration */
int num_interfaces;
int num_suspended_intf;
int sample_rate_read_error;
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index 772f6f3ccbb1..37d290fe9d43 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -906,11 +906,12 @@ static const struct snd_pcm_ops snd_usX2Y_pcm_ops =
*/
static void usX2Y_audio_stream_free(struct snd_usX2Y_substream **usX2Y_substream)
{
- kfree(usX2Y_substream[SNDRV_PCM_STREAM_PLAYBACK]);
- usX2Y_substream[SNDRV_PCM_STREAM_PLAYBACK] = NULL;
+ int stream;
- kfree(usX2Y_substream[SNDRV_PCM_STREAM_CAPTURE]);
- usX2Y_substream[SNDRV_PCM_STREAM_CAPTURE] = NULL;
+ for_each_pcm_streams(stream) {
+ kfree(usX2Y_substream[stream]);
+ usX2Y_substream[stream] = NULL;
+ }
}
static void snd_usX2Y_pcm_private_free(struct snd_pcm *pcm)
diff --git a/tools/accounting/.gitignore b/tools/accounting/.gitignore
index 86485203c4ae..c45fb4ed4309 100644
--- a/tools/accounting/.gitignore
+++ b/tools/accounting/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
getdelays
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index 8cb504d30384..5ef1c15e88ad 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
msg.g.version = 0x1;
na = (struct nlattr *) GENLMSG_DATA(&msg);
na->nla_type = nla_type;
- na->nla_len = nla_len + 1 + NLA_HDRLEN;
+ na->nla_len = nla_len + NLA_HDRLEN;
memcpy(NLA_DATA(na), nla_data, nla_len);
msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
index cb52a3a8b8fc..4205ed4158bf 100644
--- a/tools/arch/x86/include/asm/unistd_64.h
+++ b/tools/arch/x86/include/asm/unistd_64.h
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NR_userfaultfd
+#define __NR_userfaultfd 282
+#endif
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 298
#endif
diff --git a/tools/bootconfig/.gitignore b/tools/bootconfig/.gitignore
index e7644dfaa4a7..b77513cae685 100644
--- a/tools/bootconfig/.gitignore
+++ b/tools/bootconfig/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
bootconfig
diff --git a/tools/bootconfig/Makefile b/tools/bootconfig/Makefile
index a6146ac64458..da5975775337 100644
--- a/tools/bootconfig/Makefile
+++ b/tools/bootconfig/Makefile
@@ -1,23 +1,30 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for bootconfig command
+include ../scripts/Makefile.include
bindir ?= /usr/bin
-HEADER = include/linux/bootconfig.h
-CFLAGS = -Wall -g -I./include
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
-PROGS = bootconfig
+LIBSRC = $(srctree)/lib/bootconfig.c $(srctree)/include/linux/bootconfig.h
+CFLAGS = -Wall -g -I$(CURDIR)/include
-all: $(PROGS)
+ALL_TARGETS := bootconfig
+ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
-bootconfig: ../../lib/bootconfig.c main.c $(HEADER)
+all: $(ALL_PROGRAMS)
+
+$(OUTPUT)bootconfig: main.c $(LIBSRC)
$(CC) $(filter %.c,$^) $(CFLAGS) -o $@
-install: $(PROGS)
- install bootconfig $(DESTDIR)$(bindir)
+test: $(ALL_PROGRAMS) test-bootconfig.sh
+ ./test-bootconfig.sh $(OUTPUT)
-test: bootconfig
- ./test-bootconfig.sh
+install: $(ALL_PROGRAMS)
+ install $(OUTPUT)bootconfig $(DESTDIR)$(bindir)
clean:
- $(RM) -f *.o bootconfig
+ $(RM) -f $(OUTPUT)*.o $(ALL_PROGRAMS)
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index a9b97814d1a9..16b9a420e6fd 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -130,6 +130,7 @@ int load_xbc_from_initrd(int fd, char **buf)
int ret;
u32 size = 0, csum = 0, rcsum;
char magic[BOOTCONFIG_MAGIC_LEN];
+ const char *msg;
ret = fstat(fd, &stat);
if (ret < 0)
@@ -182,10 +183,12 @@ int load_xbc_from_initrd(int fd, char **buf)
return -EINVAL;
}
- ret = xbc_init(*buf);
+ ret = xbc_init(*buf, &msg, NULL);
/* Wrong data */
- if (ret < 0)
+ if (ret < 0) {
+ pr_err("parse error: %s.\n", msg);
return ret;
+ }
return size;
}
@@ -244,11 +247,34 @@ int delete_xbc(const char *path)
return ret;
}
+static void show_xbc_error(const char *data, const char *msg, int pos)
+{
+ int lin = 1, col, i;
+
+ if (pos < 0) {
+ pr_err("Error: %s.\n", msg);
+ return;
+ }
+
+ /* Note that pos starts from 0 but lin and col should start from 1. */
+ col = pos + 1;
+ for (i = 0; i < pos; i++) {
+ if (data[i] == '\n') {
+ lin++;
+ col = pos - i;
+ }
+ }
+ pr_err("Parse Error: %s at %d:%d\n", msg, lin, col);
+
+}
+
int apply_xbc(const char *path, const char *xbc_path)
{
u32 size, csum;
char *buf, *data;
int ret, fd;
+ const char *msg;
+ int pos;
ret = load_xbc_file(xbc_path, &buf);
if (ret < 0) {
@@ -267,11 +293,12 @@ int apply_xbc(const char *path, const char *xbc_path)
*(u32 *)(data + size + 4) = csum;
/* Check the data format */
- ret = xbc_init(buf);
+ ret = xbc_init(buf, &msg, &pos);
if (ret < 0) {
- pr_err("Failed to parse %s: %d\n", xbc_path, ret);
+ show_xbc_error(data, msg, pos);
free(data);
free(buf);
+
return ret;
}
printf("Apply %s to %s\n", xbc_path, path);
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index 1411f4c3454f..81b350ffd03f 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -3,9 +3,16 @@
echo "Boot config test script"
-BOOTCONF=./bootconfig
-INITRD=`mktemp initrd-XXXX`
-TEMPCONF=`mktemp temp-XXXX.bconf`
+if [ -d "$1" ]; then
+ TESTDIR=$1
+else
+ TESTDIR=.
+fi
+BOOTCONF=${TESTDIR}/bootconfig
+
+INITRD=`mktemp ${TESTDIR}/initrd-XXXX`
+TEMPCONF=`mktemp ${TESTDIR}/temp-XXXX.bconf`
+OUTFILE=`mktemp ${TESTDIR}/tempout-XXXX`
NG=0
cleanup() {
@@ -65,7 +72,6 @@ new_size=$(stat -c %s $INITRD)
xpass test $new_size -eq $initrd_size
echo "No error messge while applying"
-OUTFILE=`mktemp tempout-XXXX`
dd if=/dev/zero of=$INITRD bs=4096 count=1
printf " \0\0\0 \0\0\0" >> $INITRD
$BOOTCONF -a $TEMPCONF $INITRD > $OUTFILE 2>&1
diff --git a/tools/bpf/.gitignore b/tools/bpf/.gitignore
index 59024197e71d..cf53342175e7 100644
--- a/tools/bpf/.gitignore
+++ b/tools/bpf/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
FEATURE-DUMP.bpf
feature
bpf_asm
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index 8d6e8901ed2b..26cde83e1ca3 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.d
/_bpftool
/bpftool
diff --git a/tools/bpf/runqslower/.gitignore b/tools/bpf/runqslower/.gitignore
index 90a456a2a72f..ffdb70230c8b 100644
--- a/tools/bpf/runqslower/.gitignore
+++ b/tools/bpf/runqslower/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
/.output
diff --git a/tools/build/.gitignore b/tools/build/.gitignore
index a776371a3502..98ae1f509592 100644
--- a/tools/build/.gitignore
+++ b/tools/build/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
fixdep
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 574c2e0b9d20..3e0c019ef297 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -72,7 +72,8 @@ FEATURE_TESTS_BASIC := \
setns \
libaio \
libzstd \
- disassembler-four-args
+ disassembler-four-args \
+ file-handle
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
diff --git a/tools/build/feature/.gitignore b/tools/build/feature/.gitignore
index 09b335b98842..15fcd34acdb9 100644
--- a/tools/build/feature/.gitignore
+++ b/tools/build/feature/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.d
*.bin
*.output
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index ab8e89a7009c..92012381393a 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -68,7 +68,8 @@ FILES= \
test-llvm-version.bin \
test-libaio.bin \
test-libzstd.bin \
- test-clang-bpf-global-var.bin
+ test-clang-bpf-global-var.bin \
+ test-file-handle.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -327,6 +328,8 @@ $(OUTPUT)test-clang-bpf-global-var.bin:
$(CLANG) -S -g -target bpf -o - $(patsubst %.bin,%.c,$(@F)) | \
grep BTF_KIND_VAR
+$(OUTPUT)test-file-handle.bin:
+ $(BUILD)
###############################
diff --git a/tools/build/feature/test-file-handle.c b/tools/build/feature/test-file-handle.c
new file mode 100644
index 000000000000..4d3b03b27a0b
--- /dev/null
+++ b/tools/build/feature/test-file-handle.c
@@ -0,0 +1,17 @@
+#define _GNU_SOURCE
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <inttypes.h>
+
+int main(void)
+{
+ struct {
+ struct file_handle fh;
+ uint64_t cgroup_id;
+ } handle;
+ int mount_id;
+
+ name_to_handle_at(AT_FDCWD, "/", &handle.fh, &mount_id, 0);
+ return 0;
+}
diff --git a/tools/cgroup/.gitignore b/tools/cgroup/.gitignore
index 633cd9b874f9..46a82775f2ca 100644
--- a/tools/cgroup/.gitignore
+++ b/tools/cgroup/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
cgroup_event_listener
diff --git a/tools/gpio/.gitignore b/tools/gpio/.gitignore
index a94c0e83b209..a00d604027a2 100644
--- a/tools/gpio/.gitignore
+++ b/tools/gpio/.gitignore
@@ -1,4 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
gpio-event-mon
gpio-hammer
+gpio-watch
lsgpio
include/linux/gpio.h
diff --git a/tools/gpio/Build b/tools/gpio/Build
index 4141f35837db..67c7b7f6a717 100644
--- a/tools/gpio/Build
+++ b/tools/gpio/Build
@@ -2,3 +2,4 @@ gpio-utils-y += gpio-utils.o
lsgpio-y += lsgpio.o gpio-utils.o
gpio-hammer-y += gpio-hammer.o gpio-utils.o
gpio-event-mon-y += gpio-event-mon.o gpio-utils.o
+gpio-watch-y += gpio-watch.o
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 6080de58861f..440434027557 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -18,7 +18,7 @@ MAKEFLAGS += -r
override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
-ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon
+ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon gpio-watch
ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
all: $(ALL_PROGRAMS)
@@ -35,7 +35,7 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
prepare: $(OUTPUT)include/linux/gpio.h
-GPIO_UTILS_IN := $(output)gpio-utils-in.o
+GPIO_UTILS_IN := $(OUTPUT)gpio-utils-in.o
$(GPIO_UTILS_IN): prepare FORCE
$(Q)$(MAKE) $(build)=gpio-utils
@@ -66,6 +66,15 @@ $(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+#
+# gpio-watch
+#
+GPIO_WATCH_IN := $(OUTPUT)gpio-watch-in.o
+$(GPIO_WATCH_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=gpio-watch
+$(OUTPUT)gpio-watch: $(GPIO_WATCH_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+
clean:
rm -f $(ALL_PROGRAMS)
rm -f $(OUTPUT)include/linux/gpio.h
diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c
index 0e0060a6eb34..9fd926e8cb52 100644
--- a/tools/gpio/gpio-hammer.c
+++ b/tools/gpio/gpio-hammer.c
@@ -77,7 +77,7 @@ int hammer_device(const char *device_name, unsigned int *lines, int nlines,
fprintf(stdout, "[%c] ", swirr[j]);
j++;
- if (j == sizeof(swirr)-1)
+ if (j == sizeof(swirr) - 1)
j = 0;
fprintf(stdout, "[");
@@ -135,7 +135,14 @@ int main(int argc, char **argv)
device_name = optarg;
break;
case 'o':
- lines[i] = strtoul(optarg, NULL, 10);
+ /*
+ * Avoid overflow. Do not immediately error, we want to
+ * be able to accurately report on the amount of times
+ * '-o' was given to give an accurate error message
+ */
+ if (i < GPIOHANDLES_MAX)
+ lines[i] = strtoul(optarg, NULL, 10);
+
i++;
break;
case '?':
@@ -143,6 +150,14 @@ int main(int argc, char **argv)
return -1;
}
}
+
+ if (i >= GPIOHANDLES_MAX) {
+ fprintf(stderr,
+ "Only %d occurrences of '-o' are allowed, %d were found\n",
+ GPIOHANDLES_MAX, i + 1);
+ return -1;
+ }
+
nlines = i;
if (!device_name || !nlines) {
diff --git a/tools/gpio/gpio-utils.c b/tools/gpio/gpio-utils.c
index 53470de6a502..06003789e7c7 100644
--- a/tools/gpio/gpio-utils.c
+++ b/tools/gpio/gpio-utils.c
@@ -17,7 +17,7 @@
#include <linux/gpio.h>
#include "gpio-utils.h"
-#define COMSUMER "gpio-utils"
+#define CONSUMER "gpio-utils"
/**
* doc: Operation of gpio
@@ -209,7 +209,7 @@ int gpiotools_gets(const char *device_name, unsigned int *lines,
ret = gpiotools_request_linehandle(device_name, lines, nlines,
GPIOHANDLE_REQUEST_INPUT, data,
- COMSUMER);
+ CONSUMER);
if (ret < 0)
return ret;
@@ -259,7 +259,7 @@ int gpiotools_sets(const char *device_name, unsigned int *lines,
ret = gpiotools_request_linehandle(device_name, lines, nlines,
GPIOHANDLE_REQUEST_OUTPUT, data,
- COMSUMER);
+ CONSUMER);
if (ret < 0)
return ret;
diff --git a/tools/gpio/gpio-watch.c b/tools/gpio/gpio-watch.c
new file mode 100644
index 000000000000..5cea24fddfa7
--- /dev/null
+++ b/tools/gpio/gpio-watch.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * gpio-watch - monitor unrequested lines for property changes using the
+ * character device
+ *
+ * Copyright (C) 2019 BayLibre SAS
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/gpio.h>
+#include <poll.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+int main(int argc, char **argv)
+{
+ struct gpioline_info_changed chg;
+ struct gpioline_info req;
+ struct pollfd pfd;
+ int fd, i, j, ret;
+ char *event, *end;
+ ssize_t rd;
+
+ if (argc < 3)
+ goto err_usage;
+
+ fd = open(argv[1], O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ perror("unable to open gpiochip");
+ return EXIT_FAILURE;
+ }
+
+ for (i = 0, j = 2; i < argc - 2; i++, j++) {
+ memset(&req, 0, sizeof(req));
+
+ req.line_offset = strtoul(argv[j], &end, 0);
+ if (*end != '\0')
+ goto err_usage;
+
+ ret = ioctl(fd, GPIO_GET_LINEINFO_WATCH_IOCTL, &req);
+ if (ret) {
+ perror("unable to set up line watch");
+ return EXIT_FAILURE;
+ }
+ }
+
+ pfd.fd = fd;
+ pfd.events = POLLIN | POLLPRI;
+
+ for (;;) {
+ ret = poll(&pfd, 1, 5000);
+ if (ret < 0) {
+ perror("error polling the linechanged fd");
+ return EXIT_FAILURE;
+ } else if (ret > 0) {
+ memset(&chg, 0, sizeof(chg));
+ rd = read(pfd.fd, &chg, sizeof(chg));
+ if (rd < 0 || rd != sizeof(chg)) {
+ if (rd != sizeof(chg))
+ errno = EIO;
+
+ perror("error reading line change event");
+ return EXIT_FAILURE;
+ }
+
+ switch (chg.event_type) {
+ case GPIOLINE_CHANGED_REQUESTED:
+ event = "requested";
+ break;
+ case GPIOLINE_CHANGED_RELEASED:
+ event = "released";
+ break;
+ case GPIOLINE_CHANGED_CONFIG:
+ event = "config changed";
+ break;
+ default:
+ fprintf(stderr,
+ "invalid event type received from the kernel\n");
+ return EXIT_FAILURE;
+ }
+
+ printf("line %u: %s at %llu\n",
+ chg.info.line_offset, event, chg.timestamp);
+ }
+ }
+
+ return 0;
+
+err_usage:
+ printf("%s: <gpiochip> <line0> <line1> ...\n", argv[0]);
+ return EXIT_FAILURE;
+}
diff --git a/tools/iio/.gitignore b/tools/iio/.gitignore
index 3758202618bd..5bd6f4df98b7 100644
--- a/tools/iio/.gitignore
+++ b/tools/iio/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
iio_event_monitor
iio_generic_buffer
lsiio
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 397cfd65b3fe..7b2d6fc9e6ed 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -142,8 +142,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_REGS_INTR = 1U << 18,
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_AUX = 1U << 20,
+ PERF_SAMPLE_CGROUP = 1U << 21,
- PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 22, /* non-ABI */
__PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
@@ -381,7 +382,8 @@ struct perf_event_attr {
ksymbol : 1, /* include ksymbol events */
bpf_event : 1, /* include bpf events */
aux_output : 1, /* generate AUX records instead of events */
- __reserved_1 : 32;
+ cgroup : 1, /* include cgroup events */
+ __reserved_1 : 31;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -1012,6 +1014,16 @@ enum perf_event_type {
*/
PERF_RECORD_BPF_EVENT = 18,
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * char path[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_CGROUP = 19,
+
PERF_RECORD_MAX, /* non-ABI */
};
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 4cf93110c259..e83fc8e868f4 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -25,7 +25,7 @@ import sys
import locale
import os
import time
-import optparse
+import argparse
import ctypes
import fcntl
import resource
@@ -33,6 +33,8 @@ import struct
import re
import subprocess
from collections import defaultdict, namedtuple
+from functools import reduce
+from datetime import datetime
VMX_EXIT_REASONS = {
'EXCEPTION_NMI': 0,
@@ -873,7 +875,7 @@ class Stats(object):
if options.debugfs:
providers.append(DebugfsProvider(options.pid, options.fields,
- options.dbgfs_include_past))
+ options.debugfs_include_past))
if options.tracepoints or not providers:
providers.append(TracepointProvider(options.pid, options.fields))
@@ -974,15 +976,17 @@ DELAY_DEFAULT = 3.0
MAX_GUEST_NAME_LEN = 48
MAX_REGEX_LEN = 44
SORT_DEFAULT = 0
+MIN_DELAY = 0.1
+MAX_DELAY = 25.5
class Tui(object):
"""Instruments curses to draw a nice text ui."""
- def __init__(self, stats):
+ def __init__(self, stats, opts):
self.stats = stats
self.screen = None
self._delay_initial = 0.25
- self._delay_regular = DELAY_DEFAULT
+ self._delay_regular = opts.set_delay
self._sorting = SORT_DEFAULT
self._display_guests = 0
@@ -1183,7 +1187,7 @@ class Tui(object):
if not self._is_running_guest(self.stats.pid_filter):
if self._gname:
- try: # ...to identify the guest by name in case it's back
+ try: # ...to identify the guest by name in case it's back
pids = self.get_pid_from_gname(self._gname)
if len(pids) == 1:
self._refresh_header(pids[0])
@@ -1282,7 +1286,8 @@ class Tui(object):
' p filter by guest name/PID',
' q quit',
' r reset stats',
- ' s set update interval',
+ ' s set delay between refreshs (value range: '
+ '%s-%s secs)' % (MIN_DELAY, MAX_DELAY),
' x toggle reporting of stats for individual child trace'
' events',
'Any other key refreshes statistics immediately')
@@ -1336,8 +1341,8 @@ class Tui(object):
msg = ''
while True:
self.screen.erase()
- self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' %
- DELAY_DEFAULT, curses.A_BOLD)
+ self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).'
+ % DELAY_DEFAULT, curses.A_BOLD)
self.screen.addstr(4, 0, msg)
self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
self._delay_regular)
@@ -1348,11 +1353,9 @@ class Tui(object):
try:
if len(val) > 0:
delay = float(val)
- if delay < 0.1:
- msg = '"' + str(val) + '": Value must be >=0.1'
- continue
- if delay > 25.5:
- msg = '"' + str(val) + '": Value must be <=25.5'
+ err = is_delay_valid(delay)
+ if err is not None:
+ msg = err
continue
else:
delay = DELAY_DEFAULT
@@ -1488,33 +1491,64 @@ def batch(stats):
pass
-def log(stats):
- """Prints statistics as reiterating key block, multiple value blocks."""
- keys = sorted(stats.get().keys())
-
- def banner():
+class StdFormat(object):
+ def __init__(self, keys):
+ self._banner = ''
for key in keys:
- print(key.split(' ')[0], end=' ')
- print()
+ self._banner += key.split(' ')[0] + ' '
- def statline():
- s = stats.get()
+ def get_banner(self):
+ return self._banner
+
+ @staticmethod
+ def get_statline(keys, s):
+ res = ''
for key in keys:
- print(' %9d' % s[key].delta, end=' ')
- print()
+ res += ' %9d' % s[key].delta
+ return res
+
+
+class CSVFormat(object):
+ def __init__(self, keys):
+ self._banner = 'timestamp'
+ self._banner += reduce(lambda res, key: "{},{!s}".format(res,
+ key.split(' ')[0]), keys, '')
+
+ def get_banner(self):
+ return self._banner
+
+ @staticmethod
+ def get_statline(keys, s):
+ return reduce(lambda res, key: "{},{!s}".format(res, s[key].delta),
+ keys, '')
+
+
+def log(stats, opts, frmt, keys):
+ """Prints statistics as reiterating key block, multiple value blocks."""
line = 0
banner_repeat = 20
while True:
try:
- time.sleep(1)
+ time.sleep(opts.set_delay)
if line % banner_repeat == 0:
- banner()
- statline()
+ print(frmt.get_banner())
+ print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") +
+ frmt.get_statline(keys, stats.get()))
line += 1
except KeyboardInterrupt:
break
+def is_delay_valid(delay):
+ """Verify delay is in valid value range."""
+ msg = None
+ if delay < MIN_DELAY:
+ msg = '"' + str(delay) + '": Delay must be >=%s' % MIN_DELAY
+ if delay > MAX_DELAY:
+ msg = '"' + str(delay) + '": Delay must be <=%s' % MAX_DELAY
+ return msg
+
+
def get_options():
"""Returns processed program arguments."""
description_text = """
@@ -1545,89 +1579,85 @@ Interactive Commands:
p filter by PID
q quit
r reset stats
- s set update interval
+ s set update interval (value range: 0.1-25.5 secs)
x toggle reporting of stats for individual child trace events
Press any other key to refresh statistics immediately.
""" % (PATH_DEBUGFS_KVM, PATH_DEBUGFS_TRACING)
- class PlainHelpFormatter(optparse.IndentedHelpFormatter):
- def format_description(self, description):
- if description:
- return description + "\n"
- else:
- return ""
-
- def cb_guest_to_pid(option, opt, val, parser):
- try:
- pids = Tui.get_pid_from_gname(val)
- except:
- sys.exit('Error while searching for guest "{}". Use "-p" to '
- 'specify a pid instead?'.format(val))
- if len(pids) == 0:
- sys.exit('Error: No guest by the name "{}" found'.format(val))
- if len(pids) > 1:
- sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
- 'to specify the desired pid'.format(" ".join(pids)))
- parser.values.pid = pids[0]
-
- optparser = optparse.OptionParser(description=description_text,
- formatter=PlainHelpFormatter())
- optparser.add_option('-1', '--once', '--batch',
- action='store_true',
- default=False,
- dest='once',
- help='run in batch mode for one second',
- )
- optparser.add_option('-i', '--debugfs-include-past',
- action='store_true',
- default=False,
- dest='dbgfs_include_past',
- help='include all available data on past events for '
- 'debugfs',
- )
- optparser.add_option('-l', '--log',
- action='store_true',
- default=False,
- dest='log',
- help='run in logging mode (like vmstat)',
- )
- optparser.add_option('-t', '--tracepoints',
- action='store_true',
- default=False,
- dest='tracepoints',
- help='retrieve statistics from tracepoints',
- )
- optparser.add_option('-d', '--debugfs',
- action='store_true',
- default=False,
- dest='debugfs',
- help='retrieve statistics from debugfs',
- )
- optparser.add_option('-f', '--fields',
- action='store',
- default='',
- dest='fields',
- help='''fields to display (regex)
- "-f help" for a list of available events''',
- )
- optparser.add_option('-p', '--pid',
- action='store',
- default=0,
- type='int',
- dest='pid',
- help='restrict statistics to pid',
- )
- optparser.add_option('-g', '--guest',
- action='callback',
- type='string',
- dest='pid',
- metavar='GUEST',
- help='restrict statistics to guest by name',
- callback=cb_guest_to_pid,
- )
- options, unkn = optparser.parse_args(sys.argv)
- if len(unkn) != 1:
- sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
+ class Guest_to_pid(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ try:
+ pids = Tui.get_pid_from_gname(values)
+ except:
+ sys.exit('Error while searching for guest "{}". Use "-p" to '
+ 'specify a pid instead?'.format(values))
+ if len(pids) == 0:
+ sys.exit('Error: No guest by the name "{}" found'
+ .format(values))
+ if len(pids) > 1:
+ sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
+ ' to specify the desired pid'.format(" ".join(pids)))
+ namespace.pid = pids[0]
+
+ argparser = argparse.ArgumentParser(description=description_text,
+ formatter_class=argparse
+ .RawTextHelpFormatter)
+ argparser.add_argument('-1', '--once', '--batch',
+ action='store_true',
+ default=False,
+ help='run in batch mode for one second',
+ )
+ argparser.add_argument('-c', '--csv',
+ action='store_true',
+ default=False,
+ help='log in csv format - requires option -l/--log',
+ )
+ argparser.add_argument('-d', '--debugfs',
+ action='store_true',
+ default=False,
+ help='retrieve statistics from debugfs',
+ )
+ argparser.add_argument('-f', '--fields',
+ default='',
+ help='''fields to display (regex)
+"-f help" for a list of available events''',
+ )
+ argparser.add_argument('-g', '--guest',
+ type=str,
+ help='restrict statistics to guest by name',
+ action=Guest_to_pid,
+ )
+ argparser.add_argument('-i', '--debugfs-include-past',
+ action='store_true',
+ default=False,
+ help='include all available data on past events for'
+ ' debugfs',
+ )
+ argparser.add_argument('-l', '--log',
+ action='store_true',
+ default=False,
+ help='run in logging mode (like vmstat)',
+ )
+ argparser.add_argument('-p', '--pid',
+ type=int,
+ default=0,
+ help='restrict statistics to pid',
+ )
+ argparser.add_argument('-s', '--set-delay',
+ type=float,
+ default=DELAY_DEFAULT,
+ metavar='DELAY',
+ help='set delay between refreshs (value range: '
+ '%s-%s secs)' % (MIN_DELAY, MAX_DELAY),
+ )
+ argparser.add_argument('-t', '--tracepoints',
+ action='store_true',
+ default=False,
+ help='retrieve statistics from tracepoints',
+ )
+ options = argparser.parse_args()
+ if options.csv and not options.log:
+ sys.exit('Error: Option -c/--csv requires -l/--log')
try:
# verify that we were passed a valid regex up front
re.compile(options.fields)
@@ -1693,6 +1723,10 @@ def main():
sys.stderr.write('Did you use a (unsupported) tid instead of a pid?\n')
sys.exit('Specified pid does not exist.')
+ err = is_delay_valid(options.set_delay)
+ if err is not None:
+ sys.exit('Error: ' + err)
+
stats = Stats(options)
if options.fields == 'help':
@@ -1704,12 +1738,18 @@ def main():
sys.exit(0)
if options.log:
- log(stats)
+ keys = sorted(stats.get().keys())
+ if options.csv:
+ frmt = CSVFormat(keys)
+ else:
+ frmt = StdFormat(keys)
+ log(stats, options, frmt, keys)
elif not options.once:
- with Tui(stats) as tui:
+ with Tui(stats, options) as tui:
tui.show_stats()
else:
batch(stats)
+
if __name__ == "__main__":
main()
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index c057ba52364e..a97ded2aedad 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -49,7 +49,7 @@ INTERACTIVE COMMANDS
*r*:: reset stats
-*s*:: set update interval
+*s*:: set delay between refreshs
*x*:: toggle reporting of stats for child trace events
:: *Note*: The stats for the parents summarize the respective child trace
@@ -64,37 +64,45 @@ OPTIONS
--batch::
run in batch mode for one second
--l::
---log::
- run in logging mode (like vmstat)
-
--t::
---tracepoints::
- retrieve statistics from tracepoints
+-c::
+--csv=<file>::
+ log in csv format - requires option -l/--log
-d::
--debugfs::
retrieve statistics from debugfs
+-f<fields>::
+--fields=<fields>::
+ fields to display (regex), "-f help" for a list of available events
+
+-g<guest>::
+--guest=<guest_name>::
+ limit statistics to one virtual machine (guest name)
+
+-h::
+--help::
+ show help message
+
-i::
--debugfs-include-past::
include all available data on past events for debugfs
+-l::
+--log::
+ run in logging mode (like vmstat)
+
-p<pid>::
--pid=<pid>::
limit statistics to one virtual machine (pid)
--g<guest>::
---guest=<guest_name>::
- limit statistics to one virtual machine (guest name)
+-s::
+--set-delay::
+ set delay between refreshs (value range: 0.1-25.5 secs)
--f<fields>::
---fields=<fields>::
- fields to display (regex), "-f help" for a list of available events
-
--h::
---help::
- show help message
+-t::
+--tracepoints::
+ retrieve statistics from tracepoints
SEE ALSO
--------
diff --git a/tools/laptop/dslm/.gitignore b/tools/laptop/dslm/.gitignore
index 9fc984e64386..f7f1296b96ae 100644
--- a/tools/laptop/dslm/.gitignore
+++ b/tools/laptop/dslm/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
dslm
diff --git a/tools/leds/.gitignore b/tools/leds/.gitignore
index ac96d9f53dfc..06bd3ee1b7c9 100644
--- a/tools/leds/.gitignore
+++ b/tools/leds/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
uledmon
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index e97c2ebcf447..8a81b3679d2b 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
libbpf_version.h
libbpf.pc
FEATURE-DUMP.libbpf
diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore
index cc0e7a9f99e3..6c308ac4388c 100644
--- a/tools/lib/lockdep/.gitignore
+++ b/tools/lib/lockdep/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
liblockdep.so.*
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 18106899cb4e..69b44d2cc0f5 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -105,6 +105,12 @@ struct perf_record_bpf_event {
__u8 tag[BPF_TAG_SIZE]; // prog tag
};
+struct perf_record_cgroup {
+ struct perf_event_header header;
+ __u64 id;
+ char path[PATH_MAX];
+};
+
struct perf_record_sample {
struct perf_event_header header;
__u64 array[];
@@ -352,6 +358,7 @@ union perf_event {
struct perf_record_mmap2 mmap2;
struct perf_record_comm comm;
struct perf_record_namespaces namespaces;
+ struct perf_record_cgroup cgroup;
struct perf_record_fork fork;
struct perf_record_lost lost;
struct perf_record_lost_samples lost_samples;
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 2548ff8c4d9c..06ac7bd2144b 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -497,7 +497,7 @@ struct rb_node *rb_next(const struct rb_node *node)
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
- node=node->rb_left;
+ node = node->rb_left;
return (struct rb_node *)node;
}
@@ -528,7 +528,7 @@ struct rb_node *rb_prev(const struct rb_node *node)
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
- node=node->rb_right;
+ node = node->rb_right;
return (struct rb_node *)node;
}
diff --git a/tools/lib/traceevent/.gitignore b/tools/lib/traceevent/.gitignore
index 9e9f25fb1922..7123c70b9ebc 100644
--- a/tools/lib/traceevent/.gitignore
+++ b/tools/lib/traceevent/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
TRACEEVENT-CFLAGS
libtraceevent-dynamic-list
libtraceevent.so.*
diff --git a/tools/memory-model/.gitignore b/tools/memory-model/.gitignore
index b1d34c52f3c3..cf4cd66d8fbf 100644
--- a/tools/memory-model/.gitignore
+++ b/tools/memory-model/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
litmus
diff --git a/tools/memory-model/litmus-tests/.gitignore b/tools/memory-model/litmus-tests/.gitignore
index 6e2ddc54152f..c492a1ddad91 100644
--- a/tools/memory-model/litmus-tests/.gitignore
+++ b/tools/memory-model/litmus-tests/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.litmus.out
diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore
index 914cff12899b..45cefda24c7b 100644
--- a/tools/objtool/.gitignore
+++ b/tools/objtool/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
arch/x86/lib/inat-tables.c
objtool
fixdep
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index 32b7c6f9043d..0a1344c45213 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -30,14 +30,17 @@ struct pci_test {
int irqtype;
bool set_irqtype;
bool get_irqtype;
+ bool clear_irq;
bool read;
bool write;
bool copy;
unsigned long size;
+ bool use_dma;
};
static int run_test(struct pci_test *test)
{
+ struct pci_endpoint_test_xfer_param param;
int ret = -EINVAL;
int fd;
@@ -74,6 +77,15 @@ static int run_test(struct pci_test *test)
fprintf(stdout, "%s\n", irq[ret]);
}
+ if (test->clear_irq) {
+ ret = ioctl(fd, PCITEST_CLEAR_IRQ);
+ fprintf(stdout, "CLEAR IRQ:\t\t");
+ if (ret < 0)
+ fprintf(stdout, "FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
if (test->legacyirq) {
ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
fprintf(stdout, "LEGACY IRQ:\t");
@@ -102,7 +114,10 @@ static int run_test(struct pci_test *test)
}
if (test->write) {
- ret = ioctl(fd, PCITEST_WRITE, test->size);
+ param.size = test->size;
+ if (test->use_dma)
+ param.flags = PCITEST_FLAGS_USE_DMA;
+ ret = ioctl(fd, PCITEST_WRITE, &param);
fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size);
if (ret < 0)
fprintf(stdout, "TEST FAILED\n");
@@ -111,7 +126,10 @@ static int run_test(struct pci_test *test)
}
if (test->read) {
- ret = ioctl(fd, PCITEST_READ, test->size);
+ param.size = test->size;
+ if (test->use_dma)
+ param.flags = PCITEST_FLAGS_USE_DMA;
+ ret = ioctl(fd, PCITEST_READ, &param);
fprintf(stdout, "READ (%7ld bytes):\t\t", test->size);
if (ret < 0)
fprintf(stdout, "TEST FAILED\n");
@@ -120,7 +138,10 @@ static int run_test(struct pci_test *test)
}
if (test->copy) {
- ret = ioctl(fd, PCITEST_COPY, test->size);
+ param.size = test->size;
+ if (test->use_dma)
+ param.flags = PCITEST_FLAGS_USE_DMA;
+ ret = ioctl(fd, PCITEST_COPY, &param);
fprintf(stdout, "COPY (%7ld bytes):\t\t", test->size);
if (ret < 0)
fprintf(stdout, "TEST FAILED\n");
@@ -153,7 +174,7 @@ int main(int argc, char **argv)
/* set default endpoint device */
test->device = "/dev/pci-endpoint-test.0";
- while ((c = getopt(argc, argv, "D:b:m:x:i:Ilhrwcs:")) != EOF)
+ while ((c = getopt(argc, argv, "D:b:m:x:i:deIlhrwcs:")) != EOF)
switch (c) {
case 'D':
test->device = optarg;
@@ -194,9 +215,15 @@ int main(int argc, char **argv)
case 'c':
test->copy = true;
continue;
+ case 'e':
+ test->clear_irq = true;
+ continue;
case 's':
test->size = strtoul(optarg, NULL, 0);
continue;
+ case 'd':
+ test->use_dma = true;
+ continue;
case 'h':
default:
usage:
@@ -208,7 +235,9 @@ usage:
"\t-m <msi num> MSI test (msi number between 1..32)\n"
"\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n"
"\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n"
+ "\t-e Clear IRQ\n"
"\t-I Get current IRQ type configured\n"
+ "\t-d Use DMA\n"
"\t-l Legacy IRQ test\n"
"\t-r Read buffer test\n"
"\t-w Write buffer test\n"
diff --git a/tools/pcmcia/.gitignore b/tools/pcmcia/.gitignore
index 53d081336757..94cb97b77f06 100644
--- a/tools/pcmcia/.gitignore
+++ b/tools/pcmcia/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
crc32hash
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index bf1252dc2cb0..f3f84781fd74 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
PERF-CFLAGS
PERF-GUI-VARS
PERF-VERSION-FILE
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 8ead55593984..f16d8a71d3f5 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -405,14 +405,16 @@ ui.*::
This option is only applied to TUI.
call-graph.*::
- When sub-commands 'top' and 'report' work with -g/—-children
- there're options in control of call-graph.
+ The following controls the handling of call-graphs (obtained via the
+ -g/--call-graph options).
call-graph.record-mode::
- The record-mode can be 'fp' (frame pointer), 'dwarf' and 'lbr'.
- The value of 'dwarf' is effective only if perf detect needed library
- (libunwind or a recent version of libdw).
- 'lbr' only work for cpus that support it.
+ The mode for user space can be 'fp' (frame pointer), 'dwarf'
+ and 'lbr'. The value 'dwarf' is effective only if libunwind
+ (or a recent version of libdw) is present on the system;
+ the value 'lbr' only works for certain cpus. The method for
+ kernel space is controlled not by this option but by the
+ kernel config (CONFIG_UNWINDER_*).
call-graph.dump-size::
The size of stack to dump in order to do post-unwinding. Default is 8192 (byte).
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 7f4db7592467..b3f3b3f1c161 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -237,16 +237,22 @@ OPTIONS
option and remains only for backward compatibility. See --event.
-g::
- Enables call-graph (stack chain/backtrace) recording.
+ Enables call-graph (stack chain/backtrace) recording for both
+ kernel space and user space.
--call-graph::
Setup and enable call-graph (stack chain/backtrace) recording,
- implies -g. Default is "fp".
+ implies -g. Default is "fp" (for user space).
- Allows specifying "fp" (frame pointer) or "dwarf"
- (DWARF's CFI - Call Frame Information) or "lbr"
- (Hardware Last Branch Record facility) as the method to collect
- the information used to show the call graphs.
+ The unwinding method used for kernel space is dependent on the
+ unwinder used by the active kernel configuration, i.e
+ CONFIG_UNWINDER_FRAME_POINTER (fp) or CONFIG_UNWINDER_ORC (orc)
+
+ Any option specified here controls the method used for user space.
+
+ Valid options are "fp" (frame pointer), "dwarf" (DWARF's CFI -
+ Call Frame Information) or "lbr" (Hardware Last Branch Record
+ facility).
In some systems, where binaries are build with gcc
--fomit-frame-pointer, using the "fp" method will produce bogus
@@ -385,7 +391,10 @@ displayed with the weight and local_weight sort keys. This currently works for
abort events and some memory events in precise mode on modern Intel CPUs.
--namespaces::
-Record events of type PERF_RECORD_NAMESPACES.
+Record events of type PERF_RECORD_NAMESPACES. This enables 'cgroup_id' sort key.
+
+--all-cgroups::
+Record events of type PERF_RECORD_CGROUP. This enables 'cgroup' sort key.
--transaction::
Record transaction flags for transaction related events.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index bd0a029d4c08..f569b9ea4002 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -95,6 +95,7 @@ OPTIONS
abort cost. This is the global weight.
- local_weight: Local weight version of the weight above.
- cgroup_id: ID derived from cgroup namespace device and inode numbers.
+ - cgroup: cgroup pathname in the cgroupfs.
- transaction: Transaction abort flags.
- overhead: Overhead percentage of sample
- overhead_sys: Overhead percentage of sample running in system mode
@@ -377,6 +378,11 @@ OPTIONS
Show event group information together. It forces group output also
if there are no groups defined in data file.
+--group-sort-idx::
+ Sort the output by the event at the index n in group. If n is invalid,
+ sort by the first event. It can support multiple groups with different
+ amount of events. WARNING: This should be used on grouped events.
+
--demangle::
Demangle symbol names to human readable form. It's enabled by default,
disable with --no-demangle.
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index db6a36aac47e..963487e82edc 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -319,6 +319,9 @@ OPTIONS
--show-bpf-events
Display bpf events i.e. events of type PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT.
+--show-cgroup-events
+ Display cgroup events i.e. events of type PERF_RECORD_CGROUP.
+
--demangle::
Demangle symbol names to human readable form. It's enabled by default,
disable with --no-demangle.
@@ -390,6 +393,9 @@ include::itrace.txt[]
--reltime::
Print time stamps relative to trace start.
+--deltatime::
+ Print time stamps relative to previous event.
+
--per-event-dump::
Create per event files with a "perf.data.EVENT.dump" name instead of
printing to stdout, useful, for instance, for generating flamegraphs.
@@ -406,6 +412,14 @@ include::itrace.txt[]
--xed::
Run xed disassembler on output. Requires installing the xed disassembler.
+-S::
+--symbols=symbol[,symbol...]::
+ Only consider the listed symbols. Symbols are typically a name
+ but they may also be hexadecimal address.
+
+ For example, to select the symbol noploop or the address 0x4007a0:
+ perf script --symbols=noploop,0x4007a0
+
--call-trace::
Show call stream for intel_pt traces. The CPUs are interleaved, but
can be filtered with -C.
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 324b6b53c86b..487737a725e9 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -53,6 +53,11 @@ Default is to monitor all CPUS.
--group::
Put the counters into a counter group.
+--group-sort-idx::
+ Sort the output by the event at the index n in group. If n is invalid,
+ sort by the first event. It can support multiple groups with different
+ amount of events. WARNING: This should be used on grouped events.
+
-F <freq>::
--freq=<freq>::
Profile at this frequency. Use 'max' to use the currently maximum
@@ -272,6 +277,10 @@ Default is to monitor all CPUS.
Record events of type PERF_RECORD_NAMESPACES and display it with the
'cgroup_id' sort key.
+--all-cgroups::
+ Record events of type PERF_RECORD_CGROUP and display it with the
+ 'cgroup' sort key.
+
--switch-on EVENT_NAME::
Only consider events after this event is found.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 80e55e796be9..12a8204d63c6 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -228,8 +228,17 @@ strip-libs = $(filter-out -l%,$(1))
PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
+# Python 3.8 changed the output of `python-config --ldflags` to not include the
+# '-lpythonX.Y' flag unless '--embed' is also passed. The feature check for
+# libpython fails if that flag is not included in LDFLAGS
+ifeq ($(shell $(PYTHON_CONFIG_SQ) --ldflags --embed 2>&1 1>/dev/null; echo $$?), 0)
+ PYTHON_CONFIG_LDFLAGS := --ldflags --embed
+else
+ PYTHON_CONFIG_LDFLAGS := --ldflags
+endif
+
ifdef PYTHON_CONFIG
- PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+ PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null)
PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
@@ -348,6 +357,10 @@ ifeq ($(feature-gettid), 1)
CFLAGS += -DHAVE_GETTID
endif
+ifeq ($(feature-file-handle), 1)
+ CFLAGS += -DHAVE_FILE_HANDLE
+endif
+
ifdef NO_LIBELF
NO_DWARF := 1
NO_DEMANGLE := 1
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 3eda9d4b88e7..d15a311408f1 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -231,6 +231,7 @@ TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
BPF_DIR = $(srctree)/tools/lib/bpf/
SUBCMD_DIR = $(srctree)/tools/lib/subcmd/
LIBPERF_DIR = $(srctree)/tools/lib/perf/
+DOC_DIR = $(srctree)/tools/perf/Documentation/
# Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
# Without this setting the output feature dump file misses some features, for
@@ -573,7 +574,7 @@ arch_errno_hdr_dir := $(srctree)/tools
arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh
$(arch_errno_name_array): $(arch_errno_tbl)
- $(Q)$(SHELL) '$(arch_errno_tbl)' $(CC) $(arch_errno_hdr_dir) > $@
+ $(Q)$(SHELL) '$(arch_errno_tbl)' $(firstword $(CC)) $(arch_errno_hdr_dir) > $@
sync_file_range_arrays := $(beauty_outdir)/sync_file_range_arrays.c
sync_file_range_tbls := $(srctree)/tools/perf/trace/beauty/sync_file_range.sh
@@ -792,7 +793,6 @@ $(LIBSUBCMD): FORCE
$(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) $(OUTPUT)libsubcmd.a
$(LIBSUBCMD)-clean:
- $(call QUIET_CLEAN, libsubcmd)
$(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) clean
help:
@@ -832,7 +832,7 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
# 'make doc' should call 'make -C Documentation all'
$(DOC_TARGETS):
- $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
+ $(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:doc=all)
TAG_FOLDERS= . ../lib ../include
TAG_FILES= ../../include/uapi/linux/perf_event.h
@@ -959,7 +959,7 @@ install-python_ext:
# 'make install-doc' should call 'make -C Documentation install'
$(INSTALL_DOC_TARGETS):
- $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:-doc=)
+ $(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:-doc=)
### Cleaning rules
@@ -1008,7 +1008,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(rename_flags_array) \
$(OUTPUT)$(arch_errno_name_array) \
$(OUTPUT)$(sync_file_range_arrays)
- $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
+ $(call QUIET_CLEAN, Documentation) \
+ $(MAKE) -C $(DOC_DIR) O=$(OUTPUT) clean >/dev/null
#
# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY)
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 0a7782c61209..5c13438c7bd4 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,6 +1,6 @@
perf-y += header.o
+perf-y += machine.o
perf-y += perf_regs.o
-perf-y += sym-handling.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c
new file mode 100644
index 000000000000..d41b27e781d3
--- /dev/null
+++ b/tools/perf/arch/arm64/util/machine.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <string.h>
+#include "debug.h"
+#include "symbol.h"
+
+/* On arm64, kernel text segment start at high memory address,
+ * for example 0xffff 0000 8xxx xxxx. Modules start at a low memory
+ * address, like 0xffff 0000 00ax xxxx. When only samll amount of
+ * memory is used by modules, gap between end of module's text segment
+ * and start of kernel text segment may be reach 2G.
+ * Therefore do not fill this gap and do not assign it to the kernel dso map.
+ */
+
+#define SYMBOL_LIMIT (1 << 12) /* 4K */
+
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+ if ((strchr(p->name, '[') && strchr(c->name, '[') == NULL) ||
+ (strchr(p->name, '[') == NULL && strchr(c->name, '[')))
+ /* Limit range of last symbol in module and kernel */
+ p->end += SYMBOL_LIMIT;
+ else
+ p->end = c->start;
+ pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
+}
diff --git a/tools/perf/arch/arm64/util/sym-handling.c b/tools/perf/arch/arm64/util/sym-handling.c
deleted file mode 100644
index 8dfa3e5229f1..000000000000
--- a/tools/perf/arch/arm64/util/sym-handling.c
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
- */
-
-#include "symbol.h" // for the elf__needs_adjust_symbols() prototype
-#include <stdbool.h>
-
-#ifdef HAVE_LIBELF_SUPPORT
-#include <gelf.h>
-
-bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
-{
- return ehdr.e_type == ET_EXEC ||
- ehdr.e_type == ET_REL ||
- ehdr.e_type == ET_DYN;
-}
-#endif
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 7cf0b8803097..e5c9504f8586 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,5 +1,4 @@
perf-y += header.o
-perf-y += sym-handling.o
perf-y += kvm-stat.o
perf-y += perf_regs.o
perf-y += mem-events.o
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index abb7a12d8f93..0856b32f9e08 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -10,16 +10,6 @@
#include "probe-event.h"
#include "probe-file.h"
-#ifdef HAVE_LIBELF_SUPPORT
-bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
-{
- return ehdr.e_type == ET_EXEC ||
- ehdr.e_type == ET_REL ||
- ehdr.e_type == ET_DYN;
-}
-
-#endif
-
int arch__choose_best_symbol(struct symbol *syma,
struct symbol *symb __maybe_unused)
{
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 5e697cd2224a..c94a002f295e 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -455,6 +455,7 @@ static struct perf_diff pdiff = {
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.namespaces = perf_event__process_namespaces,
+ .cgroup = perf_event__process_cgroup,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4c301466101b..1ab349abe904 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1397,6 +1397,11 @@ static int record__synthesize(struct record *rec, bool tail)
if (err < 0)
pr_warning("Couldn't synthesize bpf events.\n");
+ err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
+ machine);
+ if (err < 0)
+ pr_warning("Couldn't synthesize cgroup events.\n");
+
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
process_synthesized_event, opts->sample_address,
1);
@@ -1428,6 +1433,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (rec->opts.record_namespaces)
tool->namespace_events = true;
+ if (rec->opts.record_cgroup) {
+#ifdef HAVE_FILE_HANDLE
+ tool->cgroup_events = true;
+#else
+ pr_err("cgroup tracking is not supported\n");
+ return -1;
+#endif
+ }
+
if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
signal(SIGUSR2, snapshot_sig_handler);
if (rec->opts.auxtrace_snapshot_mode)
@@ -2358,6 +2372,8 @@ static struct option __record_options[] = {
"per thread proc mmap processing timeout in ms"),
OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
"Record namespaces events"),
+ OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
+ "Record cgroup events"),
OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
"Record context switch events"),
OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 5f4045df76f4..26d8fc27e427 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -635,7 +635,7 @@ static int report__browse_hists(struct report *rep)
* Usually "ret" is the last pressed key, and we only
* care if the key notifies us to switch data file.
*/
- if (ret != K_SWITCH_INPUT_DATA)
+ if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD)
ret = 0;
break;
case 2:
@@ -1105,6 +1105,7 @@ int cmd_report(int argc, const char **argv)
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
+ .cgroup = perf_event__process_cgroup,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
@@ -1227,6 +1228,10 @@ int cmd_report(int argc, const char **argv)
"Show a column with the sum of periods"),
OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
"Show event group information together"),
+ OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
+ "Sort the output by the event at the index n in group. "
+ "If n is invalid, sort by the first event. "
+ "WARNING: should be used on grouped events."),
OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
"use branch records for per branch histogram filling",
parse_branch_mode),
@@ -1369,6 +1374,12 @@ repeat:
setup_forced_leader(&report, session->evlist);
+ if (symbol_conf.group_sort_idx && !session->evlist->nr_groups) {
+ parse_options_usage(NULL, options, "group-sort-idx", 0);
+ ret = -EINVAL;
+ goto error;
+ }
+
if (itrace_synth_opts.last_branch)
has_br_stack = true;
@@ -1470,7 +1481,7 @@ repeat:
sort_order = sort_tmp;
}
- if ((last_key != K_SWITCH_INPUT_DATA) &&
+ if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) &&
(setup_sorting(session->evlist) < 0)) {
if (sort_order)
parse_options_usage(report_usage, options, "s", 1);
@@ -1549,7 +1560,7 @@ repeat:
sort__setup_elide(stdout);
ret = __cmd_report(&report);
- if (ret == K_SWITCH_INPUT_DATA) {
+ if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) {
perf_session__delete(session);
last_key = K_SWITCH_INPUT_DATA;
goto repeat;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 656b347f6dd8..1f57a7ecdf3d 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -63,7 +63,9 @@
static char const *script_name;
static char const *generate_script_lang;
static bool reltime;
+static bool deltatime;
static u64 initial_time;
+static u64 previous_time;
static bool debug_mode;
static u64 last_timestamp;
static u64 nr_unordered;
@@ -704,6 +706,13 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
if (!initial_time)
initial_time = sample->time;
t = sample->time - initial_time;
+ } else if (deltatime) {
+ if (previous_time)
+ t = sample->time - previous_time;
+ else {
+ t = 0;
+ }
+ previous_time = sample->time;
}
nsecs = t;
secs = nsecs / NSEC_PER_SEC;
@@ -1685,6 +1694,7 @@ struct perf_script {
bool show_lost_events;
bool show_round_events;
bool show_bpf_events;
+ bool show_cgroup_events;
bool allocated;
bool per_event_dump;
struct evswitch evswitch;
@@ -2203,6 +2213,41 @@ out:
return ret;
}
+static int process_cgroup_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+ int ret = -1;
+
+ thread = machine__findnew_thread(machine, sample->pid, sample->tid);
+ if (thread == NULL) {
+ pr_debug("problem processing CGROUP event, skipping it.\n");
+ return -1;
+ }
+
+ if (perf_event__process_cgroup(tool, event, sample, machine) < 0)
+ goto out;
+
+ if (!evsel->core.attr.sample_id_all) {
+ sample->cpu = 0;
+ sample->time = 0;
+ }
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_CGROUP, stdout);
+ perf_event__fprintf(event, stdout);
+ }
+ ret = 0;
+out:
+ thread__put(thread);
+ return ret;
+}
+
static int process_fork_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -2542,6 +2587,8 @@ static int __cmd_script(struct perf_script *script)
script->tool.context_switch = process_switch_event;
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
+ if (script->show_cgroup_events)
+ script->tool.cgroup = process_cgroup_event;
if (script->show_lost_events)
script->tool.lost = process_lost_event;
if (script->show_round_events) {
@@ -3218,10 +3265,10 @@ static char *get_script_path(const char *script_root, const char *suffix)
__script_root = get_script_root(script_dirent, suffix);
if (__script_root && !strcmp(script_root, __script_root)) {
free(__script_root);
- closedir(lang_dir);
closedir(scripts_dir);
scnprintf(script_path, MAXPATHLEN, "%s/%s",
lang_path, script_dirent->d_name);
+ closedir(lang_dir);
return strdup(script_path);
}
free(__script_root);
@@ -3467,6 +3514,7 @@ int cmd_script(int argc, const char **argv)
.mmap2 = perf_event__process_mmap2,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
+ .cgroup = perf_event__process_cgroup,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.attr = process_attr,
@@ -3555,6 +3603,7 @@ int cmd_script(int argc, const char **argv)
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
+ OPT_BOOLEAN(0, "deltatime", &deltatime, "Show time stamps relative to previous event"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -3567,6 +3616,8 @@ int cmd_script(int argc, const char **argv)
"Show context switch events (if recorded)"),
OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
"Show namespace events (if recorded)"),
+ OPT_BOOLEAN('\0', "show-cgroup-events", &script.show_cgroup_events,
+ "Show cgroup events (if recorded)"),
OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events,
"Show lost events (if recorded)"),
OPT_BOOLEAN('\0', "show-round-events", &script.show_round_events,
@@ -3651,6 +3702,13 @@ int cmd_script(int argc, const char **argv)
}
}
+ if (reltime && deltatime) {
+ fprintf(stderr,
+ "reltime and deltatime - the two don't get along well. "
+ "Please limit to --reltime or --deltatime.\n");
+ return -1;
+ }
+
if (itrace_synth_opts.callchain &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index d2539b793f9d..289cf83e658a 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -616,6 +616,7 @@ static void *display_thread_tui(void *arg)
.arg = top,
.refresh = top->delay_secs,
};
+ int ret;
/* In order to read symbols from other namespaces perf to needs to call
* setns(2). This isn't permitted if the struct_fs has multiple users.
@@ -626,6 +627,7 @@ static void *display_thread_tui(void *arg)
prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
+repeat:
perf_top__sort_new_samples(top);
/*
@@ -638,13 +640,18 @@ static void *display_thread_tui(void *arg)
hists->uid_filter_str = top->record_opts.target.uid_str;
}
- perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
+ ret = perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
top->min_percent,
&top->session->header.env,
!top->record_opts.overwrite,
&top->annotation_opts);
- stop_top();
+ if (ret == K_RELOAD) {
+ top->zero = true;
+ goto repeat;
+ } else
+ stop_top();
+
return NULL;
}
@@ -1246,6 +1253,14 @@ static int __cmd_top(struct perf_top *top)
if (opts->record_namespaces)
top->tool.namespace_events = true;
+ if (opts->record_cgroup) {
+#ifdef HAVE_FILE_HANDLE
+ top->tool.cgroup_events = true;
+#else
+ pr_err("cgroup tracking is not supported.\n");
+ return -1;
+#endif
+ }
ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
&top->session->machines.host,
@@ -1253,6 +1268,11 @@ static int __cmd_top(struct perf_top *top)
if (ret < 0)
pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
+ ret = perf_event__synthesize_cgroups(&top->tool, perf_event__process,
+ &top->session->machines.host);
+ if (ret < 0)
+ pr_debug("Couldn't synthesize cgroup events.\n");
+
machine__synthesize_threads(&top->session->machines.host, &opts->target,
top->evlist->core.threads, false,
top->nr_threads_synthesize);
@@ -1545,6 +1565,12 @@ int cmd_top(int argc, const char **argv)
"number of thread to run event synthesize"),
OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
"Record namespaces events"),
+ OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
+ "Record cgroup events"),
+ OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
+ "Sort the output by the event at the index n in group. "
+ "If n is invalid, sort by the first event. "
+ "WARNING: should be used on grouped events."),
OPTS_EVSWITCH(&top.evswitch),
OPT_END()
};
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json b/tools/perf/pmu-events/arch/test/test_cpu/branch.json
index 93ddfd8053ca..93ddfd8053ca 100644
--- a/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
+++ b/tools/perf/pmu-events/arch/test/test_cpu/branch.json
diff --git a/tools/perf/pmu-events/arch/test/test_cpu/other.json b/tools/perf/pmu-events/arch/test/test_cpu/other.json
new file mode 100644
index 000000000000..7d53d7ecd723
--- /dev/null
+++ b/tools/perf/pmu-events/arch/test/test_cpu/other.json
@@ -0,0 +1,26 @@
+[
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "EventName": "SEGMENT_REG_LOADS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of segment register loads."
+ },
+ {
+ "EventCode": "0x9",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "DISPATCH_BLOCKED.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Memory cluster signals to block micro-op dispatch for any reason"
+ },
+ {
+ "EventCode": "0x3A",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "EIST_TRANS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/test/test_cpu/uncore.json b/tools/perf/pmu-events/arch/test/test_cpu/uncore.json
new file mode 100644
index 000000000000..d0a890cc814d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/test/test_cpu/uncore.json
@@ -0,0 +1,21 @@
+[
+ {
+ "EventCode": "0x02",
+ "EventName": "uncore_hisi_ddrc.flux_wcmd",
+ "BriefDescription": "DDRC write commands",
+ "PublicDescription": "DDRC write commands",
+ "Unit": "hisi_sccl,ddrc"
+ },
+ {
+ "Unit": "CBO",
+ "EventCode": "0x22",
+ "UMask": "0x81",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
+ "CounterMask": "0",
+ "Invert": "0",
+ "EdgeDetect": "0"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
deleted file mode 100644
index 6221a840fcea..000000000000
--- a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
+++ /dev/null
@@ -1,329 +0,0 @@
-[
- {
- "EventName": "ic_fw32",
- "EventCode": "0x80",
- "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
- },
- {
- "EventName": "ic_fw32_miss",
- "EventCode": "0x81",
- "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
- },
- {
- "EventName": "ic_cache_fill_l2",
- "EventCode": "0x82",
- "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
- },
- {
- "EventName": "ic_cache_fill_sys",
- "EventCode": "0x83",
- "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
- },
- {
- "EventName": "bp_l1_tlb_miss_l2_hit",
- "EventCode": "0x84",
- "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
- },
- {
- "EventName": "bp_l1_tlb_miss_l2_miss",
- "EventCode": "0x85",
- "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs."
- },
- {
- "EventName": "bp_snp_re_sync",
- "EventCode": "0x86",
- "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
- },
- {
- "EventName": "ic_fetch_stall.ic_stall_any",
- "EventCode": "0x87",
- "BriefDescription": "IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
- "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
- "UMask": "0x4"
- },
- {
- "EventName": "ic_fetch_stall.ic_stall_dq_empty",
- "EventCode": "0x87",
- "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
- "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
- "UMask": "0x2"
- },
- {
- "EventName": "ic_fetch_stall.ic_stall_back_pressure",
- "EventCode": "0x87",
- "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
- "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
- "UMask": "0x1"
- },
- {
- "EventName": "ic_cache_inval.l2_invalidating_probe",
- "EventCode": "0x8c",
- "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS).",
- "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to L2 invalidating probe (external or LS).",
- "UMask": "0x2"
- },
- {
- "EventName": "ic_cache_inval.fill_invalidated",
- "EventCode": "0x8c",
- "BriefDescription": "IC line invalidated due to overwriting fill response.",
- "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to overwriting fill response.",
- "UMask": "0x1"
- },
- {
- "EventName": "bp_tlb_rel",
- "EventCode": "0x99",
- "BriefDescription": "The number of ITLB reload requests."
- },
- {
- "EventName": "l2_request_g1.rd_blk_l",
- "EventCode": "0x60",
- "BriefDescription": "Requests to L2 Group1.",
- "PublicDescription": "Requests to L2 Group1.",
- "UMask": "0x80"
- },
- {
- "EventName": "l2_request_g1.rd_blk_x",
- "EventCode": "0x60",
- "BriefDescription": "Requests to L2 Group1.",
- "PublicDescription": "Requests to L2 Group1.",
- "UMask": "0x40"
- },
- {
- "EventName": "l2_request_g1.ls_rd_blk_c_s",
- "EventCode": "0x60",
- "BriefDescription": "Requests to L2 Group1.",
- "PublicDescription": "Requests to L2 Group1.",
- "UMask": "0x20"
- },
- {
- "EventName": "l2_request_g1.cacheable_ic_read",
- "EventCode": "0x60",
- "BriefDescription": "Requests to L2 Group1.",
- "PublicDescription": "Requests to L2 Group1.",
- "UMask": "0x10"
- },
- {
- "EventName": "l2_request_g1.change_to_x",
- "EventCode": "0x60",
- "BriefDescription": "Requests to L2 Group1.",
- "PublicDescription": "Requests to L2 Group1.",
- "UMask": "0x8"
- },
- {
- "EventName": "l2_request_g1.prefetch_l2",
- "EventCode": "0x60",
- "BriefDescription": "Requests to L2 Group1.",
- "PublicDescription": "Requests to L2 Group1.",
- "UMask": "0x4"
- },
- {
- "EventName": "l2_request_g1.l2_hw_pf",
- "EventCode": "0x60",
- "BriefDescription": "Requests to L2 Group1.",
- "PublicDescription": "Requests to L2 Group1.",
- "UMask": "0x2"
- },
- {
- "EventName": "l2_request_g1.other_requests",
- "EventCode": "0x60",
- "BriefDescription": "Events covered by l2_request_g2.",
- "PublicDescription": "Requests to L2 Group1. Events covered by l2_request_g2.",
- "UMask": "0x1"
- },
- {
- "EventName": "l2_request_g2.group1",
- "EventCode": "0x61",
- "BriefDescription": "All Group 1 commands not in unit0.",
- "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. All Group 1 commands not in unit0.",
- "UMask": "0x80"
- },
- {
- "EventName": "l2_request_g2.ls_rd_sized",
- "EventCode": "0x61",
- "BriefDescription": "RdSized, RdSized32, RdSized64.",
- "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSized, RdSized32, RdSized64.",
- "UMask": "0x40"
- },
- {
- "EventName": "l2_request_g2.ls_rd_sized_nc",
- "EventCode": "0x61",
- "BriefDescription": "RdSizedNC, RdSized32NC, RdSized64NC.",
- "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSizedNC, RdSized32NC, RdSized64NC.",
- "UMask": "0x20"
- },
- {
- "EventName": "l2_request_g2.ic_rd_sized",
- "EventCode": "0x61",
- "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "UMask": "0x10"
- },
- {
- "EventName": "l2_request_g2.ic_rd_sized_nc",
- "EventCode": "0x61",
- "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "UMask": "0x8"
- },
- {
- "EventName": "l2_request_g2.smc_inval",
- "EventCode": "0x61",
- "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "UMask": "0x4"
- },
- {
- "EventName": "l2_request_g2.bus_locks_originator",
- "EventCode": "0x61",
- "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "UMask": "0x2"
- },
- {
- "EventName": "l2_request_g2.bus_locks_responses",
- "EventCode": "0x61",
- "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
- "UMask": "0x1"
- },
- {
- "EventName": "l2_latency.l2_cycles_waiting_on_fills",
- "EventCode": "0x62",
- "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
- "PublicDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
- "UMask": "0x1"
- },
- {
- "EventName": "l2_wcb_req.wcb_write",
- "EventCode": "0x63",
- "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
- "BriefDescription": "LS to L2 WCB write requests.",
- "UMask": "0x40"
- },
- {
- "EventName": "l2_wcb_req.wcb_close",
- "EventCode": "0x63",
- "BriefDescription": "LS to L2 WCB close requests.",
- "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
- "UMask": "0x20"
- },
- {
- "EventName": "l2_wcb_req.zero_byte_store",
- "EventCode": "0x63",
- "BriefDescription": "LS to L2 WCB zero byte store requests.",
- "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
- "UMask": "0x4"
- },
- {
- "EventName": "l2_wcb_req.cl_zero",
- "EventCode": "0x63",
- "PublicDescription": "LS to L2 WCB cache line zeroing requests.",
- "BriefDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
- "UMask": "0x1"
- },
- {
- "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
- "EventCode": "0x64",
- "BriefDescription": "LS ReadBlock C/S Hit.",
- "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS ReadBlock C/S Hit.",
- "UMask": "0x80"
- },
- {
- "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
- "EventCode": "0x64",
- "BriefDescription": "LS Read Block L Hit X.",
- "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block L Hit X.",
- "UMask": "0x40"
- },
- {
- "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
- "EventCode": "0x64",
- "BriefDescription": "LsRdBlkL Hit Shared.",
- "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkL Hit Shared.",
- "UMask": "0x20"
- },
- {
- "EventName": "l2_cache_req_stat.ls_rd_blk_x",
- "EventCode": "0x64",
- "BriefDescription": "LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
- "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
- "UMask": "0x10"
- },
- {
- "EventName": "l2_cache_req_stat.ls_rd_blk_c",
- "EventCode": "0x64",
- "BriefDescription": "LS Read Block C S L X Change to X Miss.",
- "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block C S L X Change to X Miss.",
- "UMask": "0x8"
- },
- {
- "EventName": "l2_cache_req_stat.ic_fill_hit_x",
- "EventCode": "0x64",
- "BriefDescription": "IC Fill Hit Exclusive Stale.",
- "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Exclusive Stale.",
- "UMask": "0x4"
- },
- {
- "EventName": "l2_cache_req_stat.ic_fill_hit_s",
- "EventCode": "0x64",
- "BriefDescription": "IC Fill Hit Shared.",
- "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Shared.",
- "UMask": "0x2"
- },
- {
- "EventName": "l2_cache_req_stat.ic_fill_miss",
- "EventCode": "0x64",
- "BriefDescription": "IC Fill Miss.",
- "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Miss.",
- "UMask": "0x1"
- },
- {
- "EventName": "l2_fill_pending.l2_fill_busy",
- "EventCode": "0x6d",
- "BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
- "PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
- "UMask": "0x1"
- },
- {
- "EventName": "l3_request_g1.caching_l3_cache_accesses",
- "EventCode": "0x01",
- "BriefDescription": "Caching: L3 cache accesses",
- "UMask": "0x80",
- "Unit": "L3PMC"
- },
- {
- "EventName": "l3_lookup_state.all_l3_req_typs",
- "EventCode": "0x04",
- "BriefDescription": "All L3 Request Types",
- "UMask": "0xff",
- "Unit": "L3PMC"
- },
- {
- "EventName": "l3_comb_clstr_state.other_l3_miss_typs",
- "EventCode": "0x06",
- "BriefDescription": "Other L3 Miss Request Types",
- "UMask": "0xfe",
- "Unit": "L3PMC"
- },
- {
- "EventName": "l3_comb_clstr_state.request_miss",
- "EventCode": "0x06",
- "BriefDescription": "L3 cache misses",
- "UMask": "0x01",
- "Unit": "L3PMC"
- },
- {
- "EventName": "xi_sys_fill_latency",
- "EventCode": "0x90",
- "BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.",
- "UMask": "0x00",
- "Unit": "L3PMC"
- },
- {
- "EventName": "xi_ccx_sdp_req1.all_l3_miss_req_typs",
- "EventCode": "0x9a",
- "BriefDescription": "All L3 Miss Request Types. Ignores SliceMask and ThreadMask.",
- "UMask": "0x3f",
- "Unit": "L3PMC"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/other.json b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
deleted file mode 100644
index b26a00d05a2e..000000000000
--- a/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
+++ /dev/null
@@ -1,65 +0,0 @@
-[
- {
- "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
- "EventCode": "0x28a",
- "BriefDescription": "OC to IC mode switch.",
- "PublicDescription": "OC Mode Switch. OC to IC mode switch.",
- "UMask": "0x2"
- },
- {
- "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
- "EventCode": "0x28a",
- "BriefDescription": "IC to OC mode switch.",
- "PublicDescription": "OC Mode Switch. IC to OC mode switch.",
- "UMask": "0x1"
- },
- {
- "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
- "EventCode": "0xaf",
- "BriefDescription": "RETIRE Tokens unavailable.",
- "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
- "UMask": "0x40"
- },
- {
- "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
- "EventCode": "0xaf",
- "BriefDescription": "AGSQ Tokens unavailable.",
- "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
- "UMask": "0x20"
- },
- {
- "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
- "EventCode": "0xaf",
- "BriefDescription": "ALU tokens total unavailable.",
- "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
- "UMask": "0x10"
- },
- {
- "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
- "EventCode": "0xaf",
- "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
- "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
- "UMask": "0x8"
- },
- {
- "EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
- "EventCode": "0xaf",
- "BriefDescription": "ALSQ 3 Tokens unavailable.",
- "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
- "UMask": "0x4"
- },
- {
- "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
- "EventCode": "0xaf",
- "BriefDescription": "ALSQ 2 Tokens unavailable.",
- "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
- "UMask": "0x2"
- },
- {
- "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
- "EventCode": "0xaf",
- "BriefDescription": "ALSQ 1 Tokens unavailable.",
- "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
- "UMask": "0x1"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/branch.json b/tools/perf/pmu-events/arch/x86/amdzen1/branch.json
new file mode 100644
index 000000000000..a9943eeb8d6b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/branch.json
@@ -0,0 +1,23 @@
+[
+ {
+ "EventName": "bp_l1_btb_correct",
+ "EventCode": "0x8a",
+ "BriefDescription": "L1 BTB Correction."
+ },
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 BTB Correction."
+ },
+ {
+ "EventName": "bp_dyn_ind_pred",
+ "EventCode": "0x8e",
+ "BriefDescription": "Dynamic Indirect Predictions.",
+ "PublicDescription": "Indirect Branch Prediction for potential multi-target branch (speculative)."
+ },
+ {
+ "EventName": "bp_de_redirect",
+ "EventCode": "0x91",
+ "BriefDescription": "Decoder Overrides Existing Branch Prediction (speculative)."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
new file mode 100644
index 000000000000..404d4c569c01
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
@@ -0,0 +1,294 @@
+[
+ {
+ "EventName": "ic_fw32",
+ "EventCode": "0x80",
+ "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+ },
+ {
+ "EventName": "ic_fw32_miss",
+ "EventCode": "0x81",
+ "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_miss",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs."
+ },
+ {
+ "EventName": "bp_snp_re_sync",
+ "EventCode": "0x86",
+ "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_any",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ic_cache_inval.l2_invalidating_probe",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_cache_inval.fill_invalidated",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "bp_tlb_rel",
+ "EventCode": "0x99",
+ "BriefDescription": "The number of ITLB reload requests."
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache reads (including hardware and software prefetch).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache stores.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache shared reads.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Instruction cache reads.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2_cmd",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g1.group2",
+ "EventCode": "0x60",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_request_g2.group1",
+ "EventCode": "0x61",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g1 (PMCx060).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized non-cacheable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g2.smc_inval",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_originator",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_responses",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+ "EventCode": "0x62",
+ "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_write",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB write requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_close",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB close requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_wcb_req.zero_byte_store",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_wcb_req.cl_zero",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache shared read hit in L2",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit in L2.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit on shared line in L2.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache store or state change hit in L2.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types).",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit clean line in L2.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_fill_pending.l2_fill_busy",
+ "EventCode": "0x6d",
+ "BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l3_request_g1.caching_l3_cache_accesses",
+ "EventCode": "0x01",
+ "BriefDescription": "Caching: L3 cache accesses",
+ "UMask": "0x80",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_lookup_state.all_l3_req_typs",
+ "EventCode": "0x04",
+ "BriefDescription": "All L3 Request Types",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.other_l3_miss_typs",
+ "EventCode": "0x06",
+ "BriefDescription": "Other L3 Miss Request Types",
+ "UMask": "0xfe",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.request_miss",
+ "EventCode": "0x06",
+ "BriefDescription": "L3 cache misses",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_sys_fill_latency",
+ "EventCode": "0x90",
+ "BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.",
+ "UMask": "0x00",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_ccx_sdp_req1.all_l3_miss_req_typs",
+ "EventCode": "0x9a",
+ "BriefDescription": "All L3 Miss Request Types. Ignores SliceMask and ThreadMask.",
+ "UMask": "0x3f",
+ "Unit": "L3PMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json b/tools/perf/pmu-events/arch/x86/amdzen1/core.json
index 1079544eeed5..7e1aa8273935 100644
--- a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/core.json
@@ -62,7 +62,6 @@
"EventName": "ex_ret_brn_ind_misp",
"EventCode": "0xca",
"BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
- "PublicDescription": "Retired Indirect Branch Instructions Mispredicted."
},
{
"EventName": "ex_ret_mmx_fp_instr.sse_instr",
@@ -91,11 +90,6 @@
"BriefDescription": "Retired Conditional Branch Instructions."
},
{
- "EventName": "ex_ret_cond_misp",
- "EventCode": "0xd2",
- "BriefDescription": "Retired Conditional Branch Instructions Mispredicted."
- },
- {
"EventName": "ex_div_busy",
"EventCode": "0xd3",
"BriefDescription": "Div Cycles Busy count."
@@ -108,22 +102,19 @@
{
"EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
"EventCode": "0x1cf",
- "BriefDescription": "Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
- "PublicDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
"UMask": "0x4"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
"EventCode": "0x1cf",
- "BriefDescription": "Number of Ops tagged by IBS that retired.",
- "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
"UMask": "0x2"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
"EventCode": "0x1cf",
- "BriefDescription": "Number of Ops tagged by IBS.",
- "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
"UMask": "0x1"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
index ea4711983d1d..a35542bd3b36 100644
--- a/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
@@ -2,18 +2,74 @@
{
"EventName": "fpu_pipe_assignment.dual",
"EventCode": "0x00",
- "BriefDescription": "Total number multi-pipe uOps.",
- "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to Pipe 3.",
+ "BriefDescription": "Total number multi-pipe uOps assigned to all pipes.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to all pipes.",
"UMask": "0xf0"
},
{
+ "EventName": "fpu_pipe_assignment.dual3",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number multi-pipe uOps assigned to pipe 3.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to pipe 3.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.dual2",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number multi-pipe uOps assigned to pipe 2.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to pipe 2.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.dual1",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number multi-pipe uOps assigned to pipe 1.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to pipe 1.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.dual0",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number multi-pipe uOps assigned to pipe 0.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to pipe 0.",
+ "UMask": "0x10"
+ },
+ {
"EventName": "fpu_pipe_assignment.total",
"EventCode": "0x00",
- "BriefDescription": "Total number uOps.",
- "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to Pipe 3.",
+ "BriefDescription": "Total number uOps assigned to all fpu pipes.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to all pipes.",
"UMask": "0xf"
},
{
+ "EventName": "fpu_pipe_assignment.total3",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps on pipe 3.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total2",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps on pipe 2.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total1",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps on pipe 1.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total0",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps on pipe 0.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
+ "UMask": "0x1"
+ },
+ {
"EventName": "fp_sched_empty",
"EventCode": "0x01",
"BriefDescription": "This is a speculative event. The number of cycles in which the FPU scheduler is empty. Note that some Ops like FP loads bypass the scheduler."
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json b/tools/perf/pmu-events/arch/x86/amdzen1/memory.json
index fa2d60d4def0..b33a3c308019 100644
--- a/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/memory.json
@@ -3,28 +3,24 @@
"EventName": "ls_locks.bus_lock",
"EventCode": "0x25",
"BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
- "PublicDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
"UMask": "0x1"
},
{
"EventName": "ls_dispatch.ld_st_dispatch",
"EventCode": "0x29",
- "BriefDescription": "Load-op-Stores.",
- "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
+ "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
"UMask": "0x4"
},
{
"EventName": "ls_dispatch.store_dispatch",
"EventCode": "0x29",
- "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
- "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "BriefDescription": "Counts the number of stores dispatched to the LS unit. Unit Masks ADDed.",
"UMask": "0x2"
},
{
"EventName": "ls_dispatch.ld_dispatch",
"EventCode": "0x29",
- "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
- "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "BriefDescription": "Counts the number of loads dispatched to the LS unit. Unit Masks ADDed.",
"UMask": "0x1"
},
{
@@ -38,83 +34,114 @@
"BriefDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
},
{
+ "EventName": "ls_mab_alloc.dc_prefetcher",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB allocates by type - DC prefetcher.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_mab_alloc.stores",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB allocates by type - stores.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_mab_alloc.loads",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB allocates by type - loads.",
+ "UMask": "0x01"
+ },
+ {
"EventName": "ls_l1_d_tlb_miss.all",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss or Reload off all sizes.",
- "PublicDescription": "L1 DTLB Miss or Reload off all sizes.",
"UMask": "0xff"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss of a page of 1G size.",
- "PublicDescription": "L1 DTLB Miss of a page of 1G size.",
"UMask": "0x80"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss of a page of 2M size.",
- "PublicDescription": "L1 DTLB Miss of a page of 2M size.",
"UMask": "0x40"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_miss",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss of a page of 32K size.",
- "PublicDescription": "L1 DTLB Miss of a page of 32K size.",
"UMask": "0x20"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss of a page of 4K size.",
- "PublicDescription": "L1 DTLB Miss of a page of 4K size.",
"UMask": "0x10"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 1G size.",
- "PublicDescription": "L1 DTLB Reload of a page of 1G size.",
"UMask": "0x8"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 2M size.",
- "PublicDescription": "L1 DTLB Reload of a page of 2M size.",
"UMask": "0x4"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 32K size.",
- "PublicDescription": "L1 DTLB Reload of a page of 32K size.",
"UMask": "0x2"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 4K size.",
- "PublicDescription": "L1 DTLB Reload of a page of 4K size.",
"UMask": "0x1"
},
{
- "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_iside",
+ "EventName": "ls_tablewalker.iside",
"EventCode": "0x46",
- "BriefDescription": "Tablewalker allocation.",
- "PublicDescription": "Tablewalker allocation.",
+ "BriefDescription": "Total Page Table Walks on I-side.",
"UMask": "0xc"
},
{
- "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_dside",
+ "EventName": "ls_tablewalker.ic_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 1.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type0",
"EventCode": "0x46",
- "BriefDescription": "Tablewalker allocation.",
- "PublicDescription": "Tablewalker allocation.",
+ "BriefDescription": "Total Page Table Walks IC Type 0.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_tablewalker.dside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on D-side.",
"UMask": "0x3"
},
{
+ "EventName": "ls_tablewalker.dc_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 1.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 0.",
+ "UMask": "0x1"
+ },
+ {
"EventName": "ls_misal_accesses",
"EventCode": "0x47",
"BriefDescription": "Misaligned loads."
@@ -123,35 +150,30 @@
"EventName": "ls_pref_instr_disp.prefetch_nta",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
- "PublicDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
"UMask": "0x4"
},
{
"EventName": "ls_pref_instr_disp.store_prefetch_w",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
- "PublicDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
"UMask": "0x2"
},
{
"EventName": "ls_pref_instr_disp.load_prefetch_w",
"EventCode": "0x4b",
- "BriefDescription": "Prefetch, Prefetch_T0_T1_T2.",
- "PublicDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
+ "BriefDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
"UMask": "0x1"
},
{
"EventName": "ls_inef_sw_pref.mab_mch_cnt",
"EventCode": "0x52",
- "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
- "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
"UMask": "0x2"
},
{
"EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
"EventCode": "0x52",
- "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
- "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
"UMask": "0x1"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/other.json b/tools/perf/pmu-events/arch/x86/amdzen1/other.json
new file mode 100644
index 000000000000..ff780098d36e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/other.json
@@ -0,0 +1,56 @@
+[
+ {
+ "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. OC to IC mode switch.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. IC to OC mode switch.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3_0 Tokens unavailable.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/branch.json b/tools/perf/pmu-events/arch/x86/amdzen2/branch.json
new file mode 100644
index 000000000000..ef4166a66288
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/branch.json
@@ -0,0 +1,52 @@
+[
+ {
+ "EventName": "bp_l1_btb_correct",
+ "EventCode": "0x8a",
+ "BriefDescription": "L1 Branch Prediction Overrides Existing Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 Branch Prediction Overrides Existing Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_dyn_ind_pred",
+ "EventCode": "0x8e",
+ "BriefDescription": "Dynamic Indirect Predictions.",
+ "PublicDescription": "Indirect Branch Prediction for potential multi-target branch (speculative)."
+ },
+ {
+ "EventName": "bp_de_redirect",
+ "EventCode": "0x91",
+ "BriefDescription": "Decoder Overrides Existing Branch Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB.",
+ "UMask": "0xFF"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if1g",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 1GB page.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if2m",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 2MB page.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if4k",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 4KB page.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "bp_tlb_rel",
+ "EventCode": "0x99",
+ "BriefDescription": "The number of ITLB reload requests."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
new file mode 100644
index 000000000000..1c60bfa0f00b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
@@ -0,0 +1,338 @@
+[
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache reads (including hardware and software prefetch).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache stores.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache shared reads.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Instruction cache reads.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2_cmd",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g1.group2",
+ "EventCode": "0x60",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_request_g2.group1",
+ "EventCode": "0x61",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g1 (PMCx060).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized non-cacheable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g2.smc_inval",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_originator",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_responses",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+ "EventCode": "0x62",
+ "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_write",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB write requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_close",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB close requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_wcb_req.zero_byte_store",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_wcb_req.cl_zero",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache shared read hit in L2",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit in L2.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit on shared line in L2.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache store or state change hit in L2.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types).",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit clean line in L2.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_fill_pending.l2_fill_busy",
+ "EventCode": "0x6d",
+ "BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_pf_hit_l2",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetch hit in L2.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetcher hits in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit the L3.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetcher misses in L3. All L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ic_fw32",
+ "EventCode": "0x80",
+ "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+ },
+ {
+ "EventName": "ic_fw32_miss",
+ "EventCode": "0x81",
+ "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 1GB page.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 2MB page.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 4KB page.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "bp_snp_re_sync",
+ "EventCode": "0x86",
+ "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_any",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ic_cache_inval.l2_invalidating_probe",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_cache_inval.fill_invalidated",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. OC to IC mode switch.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. IC to OC mode switch.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l3_request_g1.caching_l3_cache_accesses",
+ "EventCode": "0x01",
+ "BriefDescription": "Caching: L3 cache accesses",
+ "UMask": "0x80",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_lookup_state.all_l3_req_typs",
+ "EventCode": "0x04",
+ "BriefDescription": "All L3 Request Types",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.other_l3_miss_typs",
+ "EventCode": "0x06",
+ "BriefDescription": "Other L3 Miss Request Types",
+ "UMask": "0xfe",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.request_miss",
+ "EventCode": "0x06",
+ "BriefDescription": "L3 cache misses",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_sys_fill_latency",
+ "EventCode": "0x90",
+ "BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.",
+ "UMask": "0x00",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_ccx_sdp_req1.all_l3_miss_req_typs",
+ "EventCode": "0x9A",
+ "BriefDescription": "All L3 Miss Request Types. Ignores SliceMask and ThreadMask.",
+ "UMask": "0x3f",
+ "Unit": "L3PMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/core.json b/tools/perf/pmu-events/arch/x86/amdzen2/core.json
new file mode 100644
index 000000000000..de89e5a44ff1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/core.json
@@ -0,0 +1,130 @@
+[
+ {
+ "EventName": "ex_ret_instr",
+ "EventCode": "0xc0",
+ "BriefDescription": "Retired Instructions."
+ },
+ {
+ "EventName": "ex_ret_cops",
+ "EventCode": "0xc1",
+ "BriefDescription": "Retired Uops.",
+ "PublicDescription": "The number of micro-ops retired. This count includes all processor activity (instructions, exceptions, interrupts, microcode assists, etc.). The number of events logged per cycle can vary from 0 to 8."
+ },
+ {
+ "EventName": "ex_ret_brn",
+ "EventCode": "0xc2",
+ "BriefDescription": "Retired Branch Instructions.",
+ "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_misp",
+ "EventCode": "0xc3",
+ "BriefDescription": "Retired Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of branch instructions retired, of any type, that were not correctly predicted. This includes those for which prediction is not attempted (far control transfers, exceptions and interrupts)."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn",
+ "EventCode": "0xc4",
+ "BriefDescription": "Retired Taken Branch Instructions.",
+ "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn_misp",
+ "EventCode": "0xc5",
+ "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_far",
+ "EventCode": "0xc6",
+ "BriefDescription": "Retired Far Control Transfers.",
+ "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+ },
+ {
+ "EventName": "ex_ret_brn_resync",
+ "EventCode": "0xc7",
+ "BriefDescription": "Retired Branch Resyncs.",
+ "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+ },
+ {
+ "EventName": "ex_ret_near_ret",
+ "EventCode": "0xc8",
+ "BriefDescription": "Retired Near Returns.",
+ "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+ },
+ {
+ "EventName": "ex_ret_near_ret_mispred",
+ "EventCode": "0xc9",
+ "BriefDescription": "Retired Near Returns Mispredicted.",
+ "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_brn_ind_misp",
+ "EventCode": "0xca",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted."
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "MMX instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "x87 instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_cond",
+ "EventCode": "0xd1",
+ "BriefDescription": "Retired Conditional Branch Instructions."
+ },
+ {
+ "EventName": "ex_ret_cond_misp",
+ "EventCode": "0xd2",
+ "BriefDescription": "Retired Conditional Branch Instructions Mispredicted."
+ },
+ {
+ "EventName": "ex_div_busy",
+ "EventCode": "0xd3",
+ "BriefDescription": "Div Cycles Busy count."
+ },
+ {
+ "EventName": "ex_div_count",
+ "EventCode": "0xd4",
+ "BriefDescription": "Div Op Count."
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_fus_brnch_inst",
+ "EventCode": "0x1d0",
+ "BriefDescription": "Retired Fused Instructions. The number of fuse-branch instructions retired per cycle. The number of events logged per cycle can vary from 0-8.",
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
new file mode 100644
index 000000000000..622a0c420e46
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
@@ -0,0 +1,140 @@
+[
+ {
+ "EventName": "fpu_pipe_assignment.total",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps.",
+ "PublicDescription": "Total number of fp uOps. The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS.",
+ "UMask": "0xf"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total3",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 3.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total2",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 2.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total1",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 1.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total0",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps on pipe 0.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.all",
+ "EventCode": "0x03",
+ "BriefDescription": "All FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mac_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Multiply-add FLOPS. Multiply-add counts as 2 FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "PublicDescription": "",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Divide/square root FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Multiply FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Add/subtract FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.optimized",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Scalar Ops optimized. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass). This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops eliminated. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE bottom-executing uOps retired. The number of serializing Ops retired.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 bottom-executing uOps retired. The number of serializing Ops retired.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits. The number of serializing Ops retired.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_spill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. YMM spill fault.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. YMM fill fault.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_disp_faults.xmm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. XMM fill fault.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_disp_faults.x87_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. x87 fill fault.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/memory.json b/tools/perf/pmu-events/arch/x86/amdzen2/memory.json
new file mode 100644
index 000000000000..715046b339cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/memory.json
@@ -0,0 +1,341 @@
+[
+ {
+ "EventName": "ls_bad_status2.stli_other",
+ "EventCode": "0x24",
+ "BriefDescription": "Non-forwardable conflict; used to reduce STLI's via software. All reasons. Store To Load Interlock (STLI) are loads that were unable to complete because of a possible match with an older store, and the older store could not do STLF for some reason.",
+ "PublicDescription" : "Store-to-load conflicts: A load was unable to complete due to a non-forwardable conflict with an older store. Most commonly, a load's address range partially but not completely overlaps with an uncompleted older store. Software can avoid this problem by using same-size and same-alignment loads and stores when accessing the same data. Vector/SIMD code is particularly susceptible to this problem; software should construct wide vector stores by manipulating vector elements in registers using shuffle/blend/swap instructions prior to storing to memory, instead of using narrow element-by-element stores.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_locks.spec_lock_hi_spec",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. High speculative cacheable lock speculation succeeded.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_locks.spec_lock_lo_spec",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Low speculative cacheable lock speculation succeeded.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_locks.non_spec_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Non-speculative lock succeeded.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_locks.bus_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type. Comparable to legacy bus lock.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_ret_cl_flush",
+ "EventCode": "0x26",
+ "BriefDescription": "Number of retired CLFLUSH instructions."
+ },
+ {
+ "EventName": "ls_ret_cpuid",
+ "EventCode": "0x27",
+ "BriefDescription": "Number of retired CPUID instructions."
+ },
+ {
+ "EventName": "ls_dispatch.ld_st_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Dispatch of a single op that performs a load from and store to the same memory address. Number of single ops that do load/store to an address.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_dispatch.store_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Number of stores dispatched. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_dispatch.ld_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Number of loads dispatched. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_smi_rx",
+ "EventCode": "0x2B",
+ "BriefDescription": "Number of SMIs received."
+ },
+ {
+ "EventName": "ls_int_taken",
+ "EventCode": "0x2C",
+ "BriefDescription": "Number of interrupts taken."
+ },
+ {
+ "EventName": "ls_rdtsc",
+ "EventCode": "0x2D",
+ "BriefDescription": "Number of reads of the TSC (RDTSC instructions). The count is speculative."
+ },
+ {
+ "EventName": "ls_stlf",
+ "EventCode": "0x35",
+ "BriefDescription": "Number of STLF hits."
+ },
+ {
+ "EventName": "ls_st_commit_cancel2.st_commit_cancel_wcb_full",
+ "EventCode": "0x37",
+ "BriefDescription": "A non-cacheable store and the non-cacheable commit buffer is full."
+ },
+ {
+ "EventName": "ls_dc_accesses",
+ "EventCode": "0x40",
+ "BriefDescription": "Number of accesses to the dcache for load/store references.",
+ "PublicDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+ },
+ {
+ "EventName": "ls_mab_alloc.dc_prefetcher",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. DC prefetcher.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_mab_alloc.stores",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. Stores.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_mab_alloc.loads",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. Loads.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_refills_from_sys.ls_mabresp_rmt_dram",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. DRAM or IO from different die.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_refills_from_sys.ls_mabresp_rmt_cache",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. Hit in cache; Remote CCX and the address's Home Node is on a different die.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_refills_from_sys.ls_mabresp_lcl_dram",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. DRAM or IO from this thread's die.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_refills_from_sys.ls_mabresp_lcl_cache",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. Hit in cache; local CCX (not Local L2), or Remote CCX and the address's Home Node is on this thread's die.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_refills_from_sys.ls_mabresp_lcl_l2",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. Local L2 hit.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all",
+ "EventCode": "0x45",
+ "BriefDescription": "All L1 DTLB Misses or Reloads.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that miss in the L2 TLB.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that miss in the L2 TLB.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload coalesced page miss.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that miss the L2 TLB.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that hit in the L2 TLB.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that hit in the L2 TLB.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload hit a coalesced page.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that hit in the L2 TLB.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_tablewalker.iside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on I-side.",
+ "UMask": "0xc"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 1.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 0.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_tablewalker.dside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on D-side.",
+ "UMask": "0x3"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 1.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 0.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_misal_accesses",
+ "EventCode": "0x47",
+ "BriefDescription": "Misaligned loads."
+ },
+ {
+ "EventName": "ls_pref_instr_disp",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative).",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_nta",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchNTA instruction. See docAPM3 PREFETCHlevel.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). See docAPM3 PREFETCHW.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). Prefetch_T0_T1_T2. PrefetchT0, T1 and T2 instructions. See docAPM3 PREFETCHlevel.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fill.ls_mabresp_rmt_dram",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From DRAM (home node remote).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fill.ls_mabresp_rmt_cache",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From another cache (home node remote).",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_dram",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. DRAM or IO from this thread's die. From DRAM (home node local).",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_cache",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From another cache (home node local).",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_l2",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. Local L2 hit.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fill.ls_mabresp_rmt_dram",
+ "EventCode": "0x5A",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM (home node remote).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fill.ls_mabresp_rmt_cache",
+ "EventCode": "0x5A",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From another cache (home node remote).",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_dram",
+ "EventCode": "0x5A",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM (home node local).",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_cache",
+ "EventCode": "0x5A",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From another cache (home node local).",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_l2",
+ "EventCode": "0x5A",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. Local L2 hit.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_not_halted_cyc",
+ "EventCode": "0x76",
+ "BriefDescription": "Cycles not in Halt."
+ },
+ {
+ "EventName": "ls_tlb_flush",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLB Flushes"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/other.json b/tools/perf/pmu-events/arch/x86/amdzen2/other.json
new file mode 100644
index 000000000000..e94994d4a60e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/other.json
@@ -0,0 +1,115 @@
+[
+ {
+ "EventName": "de_dis_uop_queue_empty_di0",
+ "EventCode": "0xa9",
+ "BriefDescription": "Cycles where the Micro-Op Queue is empty."
+ },
+ {
+ "EventName": "de_dis_uops_from_decoder",
+ "EventCode": "0xaa",
+ "BriefDescription": "Ops dispatched from either the decoders, OpCache or both.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "de_dis_uops_from_decoder.opcache_dispatched",
+ "EventCode": "0xaa",
+ "BriefDescription": "Count of dispatched Ops from OpCache.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "de_dis_uops_from_decoder.decoder_dispatched",
+ "EventCode": "0xaa",
+ "BriefDescription": "Count of dispatched Ops from Decoder.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_misc_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. FP Miscellaneous resource unavailable. Applies to the recovery of mispredicts with FP ops.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_sch_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. FP scheduler resource stall. Applies to ops that use the FP scheduler.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Floating point register file resource stall. Applies to all FP ops that have a destination register.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.taken_branch_buffer_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Taken branch buffer resource stall.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_sched_misc_token_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Scheduler miscellaneous resource stall.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.store_queue_token_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Store queue resource stall. Applies to all ops with store semantics.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.load_queue_token_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Load queue resource stall. Applies to all ops with load semantics.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_phy_reg_file_token_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Physical Register File resource stall. Applies to all ops that have an integer destination register.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.sc_agu_dispatch_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. SC AGU dispatch stall.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ3_0_TokenStall.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
index 45a34ce4fe89..8cdc7c13dc2a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -297,7 +297,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
index 961fe4395758..16fd8a7490fc 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
@@ -115,7 +115,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
index 746734ce09be..1eb0415fa11a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -297,7 +297,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index a728c6e5119b..7fde0d2943cd 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -316,7 +316,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
index 5402cd3120f9..f57c5f3506c2 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -267,7 +267,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
index 832f3cb40b34..311a005dc35b 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -267,7 +267,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
index d69b2a8fc0bc..28e25447d3ef 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -285,7 +285,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
index 5f465fd81315..db23db2e98be 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -285,7 +285,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index 3e909b306003..dbb33e00b72a 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -171,7 +171,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 745ced083844..25b06cf98747 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -36,4 +36,5 @@ GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
GenuineIntel-6-7D,v1,icelake,core
GenuineIntel-6-7E,v1,icelake,core
GenuineIntel-6-86,v1,tremontx,core
-AuthenticAMD-23-[[:xdigit:]]+,v1,amdfam17h,core
+AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
+AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
index 50c053235752..fb2d7b8875f8 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -171,7 +171,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index f97e8316ad2f..8704efeb8d31 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -304,7 +304,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 35f5db1786f7..b4f91137f40c 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -316,7 +316,7 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:k / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 3c4236a5bad8..fa86c5f997cc 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -771,6 +771,19 @@ static void print_mapping_table_suffix(FILE *outfp)
fprintf(outfp, "};\n");
}
+static void print_mapping_test_table(FILE *outfp)
+{
+ /*
+ * Print the terminating, NULL entry.
+ */
+ fprintf(outfp, "{\n");
+ fprintf(outfp, "\t.cpuid = \"testcpu\",\n");
+ fprintf(outfp, "\t.version = \"v1\",\n");
+ fprintf(outfp, "\t.type = \"core\",\n");
+ fprintf(outfp, "\t.table = pme_test_cpu,\n");
+ fprintf(outfp, "},\n");
+}
+
static int process_mapfile(FILE *outfp, char *fpath)
{
int n = 16384;
@@ -848,6 +861,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
}
out:
+ print_mapping_test_table(outfp);
print_mapping_table_suffix(outfp);
fclose(mapfp);
free(line);
@@ -1168,6 +1182,22 @@ int main(int argc, char *argv[])
goto empty_map;
}
+ sprintf(ldirname, "%s/test", start_dirname);
+
+ rc = nftw(ldirname, process_one_file, maxfds, 0);
+ if (rc && verbose) {
+ pr_info("%s: Error walking file tree %s rc=%d for test\n",
+ prog, ldirname, rc);
+ goto empty_map;
+ } else if (rc < 0) {
+ /* Make build fail */
+ free_arch_std_events();
+ ret = 1;
+ goto out_free_mapfile;
+ } else if (rc) {
+ goto empty_map;
+ }
+
if (close_table)
print_events_table_suffix(eventsfp);
diff --git a/tools/perf/tests/.gitignore b/tools/perf/tests/.gitignore
index 8cc30e731c73..d053b325f728 100644
--- a/tools/perf/tests/.gitignore
+++ b/tools/perf/tests/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
llvm-src-base.c
llvm-src-kbuild.c
llvm-src-prologue.c
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 1692529639b0..b3d1bf13ca07 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -14,6 +14,7 @@ perf-y += evsel-roundtrip-name.o
perf-y += evsel-tp-sched.o
perf-y += fdarray.o
perf-y += pmu.o
+perf-y += pmu-events.o
perf-y += hists_common.o
perf-y += hists_link.o
perf-y += hists_filter.o
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 54d9516c9839..b6322eb0f423 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -73,6 +73,10 @@ static struct test generic_tests[] = {
.func = test__pmu,
},
{
+ .desc = "PMU events",
+ .func = test__pmu_events,
+ },
+ {
.desc = "DSO data read",
.func = test__dso_data,
},
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index c850d1664c56..5d0c3a9c47a1 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -28,9 +28,13 @@ endif
PARALLEL_OPT=
ifeq ($(SET_PARALLEL),1)
- cores := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
- ifeq ($(cores),0)
- cores := 1
+ ifeq ($(JOBS),)
+ cores := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+ ifeq ($(cores),0)
+ cores := 1
+ endif
+ else
+ cores=$(JOBS)
endif
PARALLEL_OPT="-j$(cores)"
endif
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
new file mode 100644
index 000000000000..d64261da8bf7
--- /dev/null
+++ b/tools/perf/tests/pmu-events.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "parse-events.h"
+#include "pmu.h"
+#include "tests.h"
+#include <errno.h>
+#include <stdio.h>
+#include <linux/kernel.h>
+#include <linux/zalloc.h>
+#include "debug.h"
+#include "../pmu-events/pmu-events.h"
+
+struct perf_pmu_test_event {
+ struct pmu_event event;
+
+ /* extra events for aliases */
+ const char *alias_str;
+
+ /*
+ * Note: For when PublicDescription does not exist in the JSON, we
+ * will have no long_desc in pmu_event.long_desc, but long_desc may
+ * be set in the alias.
+ */
+ const char *alias_long_desc;
+};
+
+static struct perf_pmu_test_event test_cpu_events[] = {
+ {
+ .event = {
+ .name = "bp_l1_btb_correct",
+ .event = "event=0x8a",
+ .desc = "L1 BTB Correction",
+ .topic = "branch",
+ },
+ .alias_str = "event=0x8a",
+ .alias_long_desc = "L1 BTB Correction",
+ },
+ {
+ .event = {
+ .name = "bp_l2_btb_correct",
+ .event = "event=0x8b",
+ .desc = "L2 BTB Correction",
+ .topic = "branch",
+ },
+ .alias_str = "event=0x8b",
+ .alias_long_desc = "L2 BTB Correction",
+ },
+ {
+ .event = {
+ .name = "segment_reg_loads.any",
+ .event = "umask=0x80,period=200000,event=0x6",
+ .desc = "Number of segment register loads",
+ .topic = "other",
+ },
+ .alias_str = "umask=0x80,(null)=0x30d40,event=0x6",
+ .alias_long_desc = "Number of segment register loads",
+ },
+ {
+ .event = {
+ .name = "dispatch_blocked.any",
+ .event = "umask=0x20,period=200000,event=0x9",
+ .desc = "Memory cluster signals to block micro-op dispatch for any reason",
+ .topic = "other",
+ },
+ .alias_str = "umask=0x20,(null)=0x30d40,event=0x9",
+ .alias_long_desc = "Memory cluster signals to block micro-op dispatch for any reason",
+ },
+ {
+ .event = {
+ .name = "eist_trans",
+ .event = "umask=0x0,period=200000,event=0x3a",
+ .desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
+ .topic = "other",
+ },
+ .alias_str = "umask=0,(null)=0x30d40,event=0x3a",
+ .alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
+ },
+ { /* sentinel */
+ .event = {
+ .name = NULL,
+ },
+ },
+};
+
+static struct perf_pmu_test_event test_uncore_events[] = {
+ {
+ .event = {
+ .name = "uncore_hisi_ddrc.flux_wcmd",
+ .event = "event=0x2",
+ .desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
+ .topic = "uncore",
+ .long_desc = "DDRC write commands",
+ .pmu = "hisi_sccl,ddrc",
+ },
+ .alias_str = "event=0x2",
+ .alias_long_desc = "DDRC write commands",
+ },
+ {
+ .event = {
+ .name = "unc_cbo_xsnp_response.miss_eviction",
+ .event = "umask=0x81,event=0x22",
+ .desc = "Unit: uncore_cbox A cross-core snoop resulted from L3 Eviction which misses in some processor core",
+ .topic = "uncore",
+ .long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
+ .pmu = "uncore_cbox",
+ },
+ .alias_str = "umask=0x81,event=0x22",
+ .alias_long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
+ },
+ { /* sentinel */
+ .event = {
+ .name = NULL,
+ },
+ }
+};
+
+const int total_test_events_size = ARRAY_SIZE(test_uncore_events);
+
+static bool is_same(const char *reference, const char *test)
+{
+ if (!reference && !test)
+ return true;
+
+ if (reference && !test)
+ return false;
+
+ if (!reference && test)
+ return false;
+
+ return !strcmp(reference, test);
+}
+
+static struct pmu_events_map *__test_pmu_get_events_map(void)
+{
+ struct pmu_events_map *map;
+
+ for (map = &pmu_events_map[0]; map->cpuid; map++) {
+ if (!strcmp(map->cpuid, "testcpu"))
+ return map;
+ }
+
+ pr_err("could not find test events map\n");
+
+ return NULL;
+}
+
+/* Verify generated events from pmu-events.c is as expected */
+static int __test_pmu_event_table(void)
+{
+ struct pmu_events_map *map = __test_pmu_get_events_map();
+ struct pmu_event *table;
+ int map_events = 0, expected_events;
+
+ /* ignore 2x sentinels */
+ expected_events = ARRAY_SIZE(test_cpu_events) +
+ ARRAY_SIZE(test_uncore_events) - 2;
+
+ if (!map)
+ return -1;
+
+ for (table = map->table; table->name; table++) {
+ struct perf_pmu_test_event *test;
+ struct pmu_event *te;
+ bool found = false;
+
+ if (table->pmu)
+ test = &test_uncore_events[0];
+ else
+ test = &test_cpu_events[0];
+
+ te = &test->event;
+
+ for (; te->name; test++, te = &test->event) {
+ if (strcmp(table->name, te->name))
+ continue;
+ found = true;
+ map_events++;
+
+ if (!is_same(table->desc, te->desc)) {
+ pr_debug2("testing event table %s: mismatched desc, %s vs %s\n",
+ table->name, table->desc, te->desc);
+ return -1;
+ }
+
+ if (!is_same(table->topic, te->topic)) {
+ pr_debug2("testing event table %s: mismatched topic, %s vs %s\n",
+ table->name, table->topic,
+ te->topic);
+ return -1;
+ }
+
+ if (!is_same(table->long_desc, te->long_desc)) {
+ pr_debug2("testing event table %s: mismatched long_desc, %s vs %s\n",
+ table->name, table->long_desc,
+ te->long_desc);
+ return -1;
+ }
+
+ if (!is_same(table->unit, te->unit)) {
+ pr_debug2("testing event table %s: mismatched unit, %s vs %s\n",
+ table->name, table->unit,
+ te->unit);
+ return -1;
+ }
+
+ if (!is_same(table->perpkg, te->perpkg)) {
+ pr_debug2("testing event table %s: mismatched perpkg, %s vs %s\n",
+ table->name, table->perpkg,
+ te->perpkg);
+ return -1;
+ }
+
+ if (!is_same(table->metric_expr, te->metric_expr)) {
+ pr_debug2("testing event table %s: mismatched metric_expr, %s vs %s\n",
+ table->name, table->metric_expr,
+ te->metric_expr);
+ return -1;
+ }
+
+ if (!is_same(table->metric_name, te->metric_name)) {
+ pr_debug2("testing event table %s: mismatched metric_name, %s vs %s\n",
+ table->name, table->metric_name,
+ te->metric_name);
+ return -1;
+ }
+
+ if (!is_same(table->deprecated, te->deprecated)) {
+ pr_debug2("testing event table %s: mismatched deprecated, %s vs %s\n",
+ table->name, table->deprecated,
+ te->deprecated);
+ return -1;
+ }
+
+ pr_debug("testing event table %s: pass\n", table->name);
+ }
+
+ if (!found) {
+ pr_err("testing event table: could not find event %s\n",
+ table->name);
+ return -1;
+ }
+ }
+
+ if (map_events != expected_events) {
+ pr_err("testing event table: found %d, but expected %d\n",
+ map_events, expected_events);
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct perf_pmu_alias *find_alias(const char *test_event, struct list_head *aliases)
+{
+ struct perf_pmu_alias *alias;
+
+ list_for_each_entry(alias, aliases, list)
+ if (!strcmp(test_event, alias->name))
+ return alias;
+
+ return NULL;
+}
+
+/* Verify aliases are as expected */
+static int __test__pmu_event_aliases(char *pmu_name, int *count)
+{
+ struct perf_pmu_test_event *test;
+ struct pmu_event *te;
+ struct perf_pmu *pmu;
+ LIST_HEAD(aliases);
+ int res = 0;
+ bool use_uncore_table;
+ struct pmu_events_map *map = __test_pmu_get_events_map();
+
+ if (!map)
+ return -1;
+
+ if (is_pmu_core(pmu_name)) {
+ test = &test_cpu_events[0];
+ use_uncore_table = false;
+ } else {
+ test = &test_uncore_events[0];
+ use_uncore_table = true;
+ }
+
+ pmu = zalloc(sizeof(*pmu));
+ if (!pmu)
+ return -1;
+
+ pmu->name = pmu_name;
+
+ pmu_add_cpu_aliases_map(&aliases, pmu, map);
+
+ for (te = &test->event; te->name; test++, te = &test->event) {
+ struct perf_pmu_alias *alias = find_alias(te->name, &aliases);
+
+ if (!alias) {
+ bool uncore_match = pmu_uncore_alias_match(pmu_name,
+ te->pmu);
+
+ if (use_uncore_table && !uncore_match) {
+ pr_debug3("testing aliases PMU %s: skip matching alias %s\n",
+ pmu_name, te->name);
+ continue;
+ }
+
+ pr_debug2("testing aliases PMU %s: no alias, alias_table->name=%s\n",
+ pmu_name, te->name);
+ res = -1;
+ break;
+ }
+
+ if (!is_same(alias->desc, te->desc)) {
+ pr_debug2("testing aliases PMU %s: mismatched desc, %s vs %s\n",
+ pmu_name, alias->desc, te->desc);
+ res = -1;
+ break;
+ }
+
+ if (!is_same(alias->long_desc, test->alias_long_desc)) {
+ pr_debug2("testing aliases PMU %s: mismatched long_desc, %s vs %s\n",
+ pmu_name, alias->long_desc,
+ test->alias_long_desc);
+ res = -1;
+ break;
+ }
+
+ if (!is_same(alias->str, test->alias_str)) {
+ pr_debug2("testing aliases PMU %s: mismatched str, %s vs %s\n",
+ pmu_name, alias->str, test->alias_str);
+ res = -1;
+ break;
+ }
+
+ if (!is_same(alias->topic, te->topic)) {
+ pr_debug2("testing aliases PMU %s: mismatched topic, %s vs %s\n",
+ pmu_name, alias->topic, te->topic);
+ res = -1;
+ break;
+ }
+
+ (*count)++;
+ pr_debug2("testing aliases PMU %s: matched event %s\n",
+ pmu_name, alias->name);
+ }
+
+ free(pmu);
+ return res;
+}
+
+int test__pmu_events(struct test *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct perf_pmu *pmu = NULL;
+
+ if (__test_pmu_event_table())
+ return -1;
+
+ while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+ int count = 0;
+
+ if (list_empty(&pmu->format)) {
+ pr_debug2("skipping testing PMU %s\n", pmu->name);
+ continue;
+ }
+
+ if (__test__pmu_event_aliases(pmu->name, &count)) {
+ pr_debug("testing PMU %s aliases: failed\n", pmu->name);
+ return -1;
+ }
+
+ if (count == 0)
+ pr_debug3("testing PMU %s aliases: no events to match\n",
+ pmu->name);
+ else
+ pr_debug("testing PMU %s aliases: pass\n", pmu->name);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 14239e472187..61865699c3f4 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -151,6 +151,9 @@ static bool samples_same(const struct perf_sample *s1,
if (type & PERF_SAMPLE_PHYS_ADDR)
COMP(phys_addr);
+ if (type & PERF_SAMPLE_CGROUP)
+ COMP(cgroup);
+
if (type & PERF_SAMPLE_AUX) {
COMP(aux_sample.size);
if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
@@ -230,6 +233,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
.regs = regs,
},
.phys_addr = 113,
+ .cgroup = 114,
.aux_sample = {
.size = sizeof(aux_data),
.data = (void *)aux_data,
@@ -336,7 +340,7 @@ int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_u
* were added. Please actually update the test rather than just change
* the condition below.
*/
- if (PERF_SAMPLE_MAX > PERF_SAMPLE_AUX << 1) {
+ if (PERF_SAMPLE_MAX > PERF_SAMPLE_CGROUP << 1) {
pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
return -1;
}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 9a160fef47c9..61a1ab032080 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -49,6 +49,7 @@ int test__perf_evsel__roundtrip_name_test(struct test *test, int subtest);
int test__perf_evsel__tp_sched_test(struct test *test, int subtest);
int test__syscall_openat_tp_fields(struct test *test, int subtest);
int test__pmu(struct test *test, int subtest);
+int test__pmu_events(struct test *test, int subtest);
int test__attr(struct test *test, int subtest);
int test__dso_data(struct test *test, int subtest);
int test__dso_data_cache(struct test *test, int subtest);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index f36dee499320..487e54ef56a9 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -677,7 +677,7 @@ static int hist_browser__title(struct hist_browser *browser, char *bf, size_t si
return browser->title ? browser->title(browser, bf, size) : 0;
}
-static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_lost_event, char *title, int key)
+static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_lost_event, char *title, size_t size, int key)
{
switch (key) {
case K_TIMER: {
@@ -703,7 +703,7 @@ static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_l
ui_browser__warn_lost_events(&browser->b);
}
- hist_browser__title(browser, title, sizeof(title));
+ hist_browser__title(browser, title, size);
ui_browser__show_title(&browser->b, title);
break;
}
@@ -764,13 +764,13 @@ int hist_browser__run(struct hist_browser *browser, const char *help,
if (ui_browser__show(&browser->b, title, "%s", help) < 0)
return -1;
- if (key && hist_browser__handle_hotkey(browser, warn_lost_event, title, key))
+ if (key && hist_browser__handle_hotkey(browser, warn_lost_event, title, sizeof(title), key))
goto out;
while (1) {
key = ui_browser__run(&browser->b, delay_secs);
- if (hist_browser__handle_hotkey(browser, warn_lost_event, title, key))
+ if (hist_browser__handle_hotkey(browser, warn_lost_event, title, sizeof(title), key))
break;
}
out:
@@ -2465,13 +2465,41 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
return 0;
}
+static struct symbol *symbol__new_unresolved(u64 addr, struct map *map)
+{
+ struct annotated_source *src;
+ struct symbol *sym;
+ char name[64];
+
+ snprintf(name, sizeof(name), "%.*" PRIx64, BITS_PER_LONG / 4, addr);
+
+ sym = symbol__new(addr, ANNOTATION_DUMMY_LEN, 0, 0, name);
+ if (sym) {
+ src = symbol__hists(sym, 1);
+ if (!src) {
+ symbol__delete(sym);
+ return NULL;
+ }
+
+ dso__insert_symbol(map->dso, sym);
+ }
+
+ return sym;
+}
+
static int
add_annotate_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
- struct map_symbol *ms)
+ struct map_symbol *ms,
+ u64 addr)
{
- if (ms->sym == NULL || ms->map->dso->annotate_warned ||
- symbol__annotation(ms->sym)->src == NULL)
+ if (!ms->map || !ms->map->dso || ms->map->dso->annotate_warned)
+ return 0;
+
+ if (!ms->sym)
+ ms->sym = symbol__new_unresolved(addr, ms->map);
+
+ if (ms->sym == NULL || symbol__annotation(ms->sym)->src == NULL)
return 0;
if (asprintf(optstr, "Annotate %s", ms->sym->name) < 0)
@@ -2964,7 +2992,8 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
"s Switch to another data file in PWD\n"
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
- "/ Filter symbol by name";
+ "/ Filter symbol by name\n"
+ "0-9 Sort by event n in group";
static const char top_help[] = HIST_BROWSER_HELP_COMMON
"P Print histograms to perf.hist.N\n"
"t Zoom into current Thread\n"
@@ -3025,6 +3054,31 @@ do_hotkey: // key came straight from options ui__popup_menu()
* go to the next or previous
*/
goto out_free_stack;
+ case '0' ... '9':
+ if (!symbol_conf.event_group ||
+ evsel->core.nr_members < 2) {
+ snprintf(buf, sizeof(buf),
+ "Sort by index only available with group events!");
+ helpline = buf;
+ continue;
+ }
+
+ if (key - '0' == symbol_conf.group_sort_idx)
+ continue;
+
+ symbol_conf.group_sort_idx = key - '0';
+
+ if (symbol_conf.group_sort_idx >= evsel->core.nr_members) {
+ snprintf(buf, sizeof(buf),
+ "Max event group index to sort is %d (index from 0 to %d)",
+ evsel->core.nr_members - 1,
+ evsel->core.nr_members - 1);
+ helpline = buf;
+ continue;
+ }
+
+ key = K_RELOAD;
+ goto out_free_stack;
case 'a':
if (!hists__has(hists, sym)) {
ui_browser__warning(&browser->b, delay_secs * 2,
@@ -3033,21 +3087,45 @@ do_hotkey: // key came straight from options ui__popup_menu()
continue;
}
- if (browser->selection == NULL ||
- browser->selection->sym == NULL ||
- browser->selection->map->dso->annotate_warned)
+ if (!browser->selection ||
+ !browser->selection->map ||
+ !browser->selection->map->dso ||
+ browser->selection->map->dso->annotate_warned) {
continue;
+ }
- if (symbol__annotation(browser->selection->sym)->src == NULL) {
- ui_browser__warning(&browser->b, delay_secs * 2,
- "No samples for the \"%s\" symbol.\n\n"
- "Probably appeared just in a callchain",
- browser->selection->sym->name);
- continue;
+ if (!browser->selection->sym) {
+ if (!browser->he_selection)
+ continue;
+
+ if (sort__mode == SORT_MODE__BRANCH) {
+ bi = browser->he_selection->branch_info;
+ if (!bi || !bi->to.ms.map)
+ continue;
+
+ actions->ms.sym = symbol__new_unresolved(bi->to.al_addr, bi->to.ms.map);
+ actions->ms.map = bi->to.ms.map;
+ } else {
+ actions->ms.sym = symbol__new_unresolved(browser->he_selection->ip,
+ browser->selection->map);
+ actions->ms.map = browser->selection->map;
+ }
+
+ if (!actions->ms.sym)
+ continue;
+ } else {
+ if (symbol__annotation(browser->selection->sym)->src == NULL) {
+ ui_browser__warning(&browser->b, delay_secs * 2,
+ "No samples for the \"%s\" symbol.\n\n"
+ "Probably appeared just in a callchain",
+ browser->selection->sym->name);
+ continue;
+ }
+
+ actions->ms.map = browser->selection->map;
+ actions->ms.sym = browser->selection->sym;
}
- actions->ms.map = browser->selection->map;
- actions->ms.sym = browser->selection->sym;
do_annotate(browser, actions);
continue;
case 'P':
@@ -3219,17 +3297,20 @@ do_hotkey: // key came straight from options ui__popup_menu()
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
- &bi->from.ms);
+ &bi->from.ms,
+ bi->from.al_addr);
if (bi->to.ms.sym != bi->from.ms.sym)
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
- &bi->to.ms);
+ &bi->to.ms,
+ bi->to.al_addr);
} else {
nr_options += add_annotate_opt(browser,
&actions[nr_options],
&options[nr_options],
- browser->selection);
+ browser->selection,
+ browser->he_selection->ip);
}
skip_annotation:
nr_options += add_thread_opt(browser, &actions[nr_options],
@@ -3440,6 +3521,7 @@ browse_hists:
pos = perf_evsel__prev(pos);
goto browse_hists;
case K_SWITCH_INPUT_DATA:
+ case K_RELOAD:
case 'q':
case CTRL('c'):
goto out;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index f73675500061..025f4c7f96bf 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -151,15 +151,90 @@ static int field_cmp(u64 field_a, u64 field_b)
return 0;
}
+static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
+ hpp_field_fn get_field, int nr_members,
+ u64 **fields_a, u64 **fields_b)
+{
+ u64 *fa = calloc(nr_members, sizeof(*fa)),
+ *fb = calloc(nr_members, sizeof(*fb));
+ struct hist_entry *pair;
+
+ if (!fa || !fb)
+ goto out_free;
+
+ list_for_each_entry(pair, &a->pairs.head, pairs.node) {
+ struct evsel *evsel = hists_to_evsel(pair->hists);
+ fa[perf_evsel__group_idx(evsel)] = get_field(pair);
+ }
+
+ list_for_each_entry(pair, &b->pairs.head, pairs.node) {
+ struct evsel *evsel = hists_to_evsel(pair->hists);
+ fb[perf_evsel__group_idx(evsel)] = get_field(pair);
+ }
+
+ *fields_a = fa;
+ *fields_b = fb;
+ return 0;
+out_free:
+ free(fa);
+ free(fb);
+ *fields_a = *fields_b = NULL;
+ return -1;
+}
+
+static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
+ hpp_field_fn get_field, int idx)
+{
+ struct evsel *evsel = hists_to_evsel(a->hists);
+ u64 *fields_a, *fields_b;
+ int cmp, nr_members, ret, i;
+
+ cmp = field_cmp(get_field(a), get_field(b));
+ if (!perf_evsel__is_group_event(evsel))
+ return cmp;
+
+ nr_members = evsel->core.nr_members;
+ if (idx < 1 || idx >= nr_members)
+ return cmp;
+
+ ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
+ if (ret) {
+ ret = cmp;
+ goto out;
+ }
+
+ ret = field_cmp(fields_a[idx], fields_b[idx]);
+ if (ret)
+ goto out;
+
+ for (i = 1; i < nr_members; i++) {
+ if (i != idx) {
+ ret = field_cmp(fields_a[i], fields_b[i]);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ free(fields_a);
+ free(fields_b);
+
+ return ret;
+}
+
static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
hpp_field_fn get_field)
{
s64 ret;
int i, nr_members;
struct evsel *evsel;
- struct hist_entry *pair;
u64 *fields_a, *fields_b;
+ if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
+ return __hpp__group_sort_idx(a, b, get_field,
+ symbol_conf.group_sort_idx);
+ }
+
ret = field_cmp(get_field(a), get_field(b));
if (ret || !symbol_conf.event_group)
return ret;
@@ -169,22 +244,10 @@ static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
return ret;
nr_members = evsel->core.nr_members;
- fields_a = calloc(nr_members, sizeof(*fields_a));
- fields_b = calloc(nr_members, sizeof(*fields_b));
-
- if (!fields_a || !fields_b)
+ i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
+ if (i)
goto out;
- list_for_each_entry(pair, &a->pairs.head, pairs.node) {
- evsel = hists_to_evsel(pair->hists);
- fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
- }
-
- list_for_each_entry(pair, &b->pairs.head, pairs.node) {
- evsel = hists_to_evsel(pair->hists);
- fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
- }
-
for (i = 1; i < nr_members; i++) {
ret = field_cmp(fields_a[i], fields_b[i]);
if (ret)
diff --git a/tools/perf/ui/keysyms.h b/tools/perf/ui/keysyms.h
index fbfac29077f2..04cc4e5c031f 100644
--- a/tools/perf/ui/keysyms.h
+++ b/tools/perf/ui/keysyms.h
@@ -25,5 +25,6 @@
#define K_ERROR -2
#define K_RESIZE -3
#define K_SWITCH_INPUT_DATA -4
+#define K_RELOAD -5
#endif /* _PERF_KEYSYMS_H_ */
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 07c775938d46..2d88069d6428 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -74,6 +74,7 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
#define ANNOTATION__CYCLES_WIDTH 6
#define ANNOTATION__MINMAX_CYCLES_WIDTH 19
#define ANNOTATION__AVG_IPC_WIDTH 36
+#define ANNOTATION_DUMMY_LEN 256
struct annotation_options {
bool hide_src_code,
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 5bc9d3b01bd9..b73fb7823048 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -191,3 +191,83 @@ int parse_cgroups(const struct option *opt, const char *str,
}
return 0;
}
+
+static struct cgroup *__cgroup__findnew(struct rb_root *root, uint64_t id,
+ bool create, const char *path)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct cgroup *cgrp;
+
+ while (*p != NULL) {
+ parent = *p;
+ cgrp = rb_entry(parent, struct cgroup, node);
+
+ if (cgrp->id == id)
+ return cgrp;
+
+ if (cgrp->id < id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ if (!create)
+ return NULL;
+
+ cgrp = malloc(sizeof(*cgrp));
+ if (cgrp == NULL)
+ return NULL;
+
+ cgrp->name = strdup(path);
+ if (cgrp->name == NULL) {
+ free(cgrp);
+ return NULL;
+ }
+
+ cgrp->fd = -1;
+ cgrp->id = id;
+ refcount_set(&cgrp->refcnt, 1);
+
+ rb_link_node(&cgrp->node, parent, p);
+ rb_insert_color(&cgrp->node, root);
+
+ return cgrp;
+}
+
+struct cgroup *cgroup__findnew(struct perf_env *env, uint64_t id,
+ const char *path)
+{
+ struct cgroup *cgrp;
+
+ down_write(&env->cgroups.lock);
+ cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path);
+ up_write(&env->cgroups.lock);
+ return cgrp;
+}
+
+struct cgroup *cgroup__find(struct perf_env *env, uint64_t id)
+{
+ struct cgroup *cgrp;
+
+ down_read(&env->cgroups.lock);
+ cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL);
+ up_read(&env->cgroups.lock);
+ return cgrp;
+}
+
+void perf_env__purge_cgroups(struct perf_env *env)
+{
+ struct rb_node *node;
+ struct cgroup *cgrp;
+
+ down_write(&env->cgroups.lock);
+ while (!RB_EMPTY_ROOT(&env->cgroups.tree)) {
+ node = rb_first(&env->cgroups.tree);
+ cgrp = rb_entry(node, struct cgroup, node);
+
+ rb_erase(node, &env->cgroups.tree);
+ cgroup__put(cgrp);
+ }
+ up_write(&env->cgroups.lock);
+}
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
index 2ec11f01090d..e98d5975fe55 100644
--- a/tools/perf/util/cgroup.h
+++ b/tools/perf/util/cgroup.h
@@ -3,16 +3,19 @@
#define __CGROUP_H__
#include <linux/refcount.h>
+#include <linux/rbtree.h>
+#include "util/env.h"
struct option;
struct cgroup {
- char *name;
- int fd;
- refcount_t refcnt;
+ struct rb_node node;
+ u64 id;
+ char *name;
+ int fd;
+ refcount_t refcnt;
};
-
extern int nr_cgroups; /* number of explicit cgroups defined */
struct cgroup *cgroup__get(struct cgroup *cgroup);
@@ -26,4 +29,10 @@ void evlist__set_default_cgroup(struct evlist *evlist, struct cgroup *cgroup);
int parse_cgroups(const struct option *opt, const char *str, int unset);
+struct cgroup *cgroup__findnew(struct perf_env *env, uint64_t id,
+ const char *path);
+struct cgroup *cgroup__find(struct perf_env *env, uint64_t id);
+
+void perf_env__purge_cgroups(struct perf_env *env);
+
#endif /* __CGROUP_H__ */
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 983b7388f22b..dc5c5e6fc502 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -317,7 +317,7 @@ static void set_max_cpu_num(void)
/* get the highest possible cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
- if (ret == PATH_MAX) {
+ if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
@@ -328,7 +328,7 @@ static void set_max_cpu_num(void)
/* get the highest present cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
- if (ret == PATH_MAX) {
+ if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
@@ -356,7 +356,7 @@ static void set_max_node_num(void)
/* get the highest possible cpu number for a sparse allocation */
ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
- if (ret == PATH_MAX) {
+ if (ret >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
goto out;
}
@@ -441,7 +441,7 @@ int cpu__setup_cpunode_map(void)
return 0;
n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
- if (n == PATH_MAX) {
+ if (n >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
return -1;
}
@@ -456,7 +456,7 @@ int cpu__setup_cpunode_map(void)
continue;
n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
- if (n == PATH_MAX) {
+ if (n >= PATH_MAX) {
pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
continue;
}
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index 591707c69c39..939471731ea6 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -26,13 +26,29 @@ static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
return 0;
}
+static bool dso_id__empty(struct dso_id *id)
+{
+ if (!id)
+ return true;
+
+ return !id->maj && !id->min && !id->ino && !id->ino_generation;
+}
+
+static void dso__inject_id(struct dso *dso, struct dso_id *id)
+{
+ dso->id.maj = id->maj;
+ dso->id.min = id->min;
+ dso->id.ino = id->ino;
+ dso->id.ino_generation = id->ino_generation;
+}
+
static int dso_id__cmp(struct dso_id *a, struct dso_id *b)
{
/*
* The second is always dso->id, so zeroes if not set, assume passing
* NULL for a means a zeroed id
*/
- if (a == NULL)
+ if (dso_id__empty(a) || dso_id__empty(b))
return 0;
return __dso_id__cmp(a, b);
@@ -249,6 +265,10 @@ struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{
struct dso *dso = __dsos__find_id(dsos, name, id, false);
+
+ if (dso && dso_id__empty(&dso->id) && !dso_id__empty(id))
+ dso__inject_id(dso, id);
+
return dso ? dso : __dsos__addnew_id(dsos, name, id);
}
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 4154f944f474..fadc59708ece 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -6,6 +6,7 @@
#include <linux/ctype.h>
#include <linux/zalloc.h>
#include "bpf-event.h"
+#include "cgroup.h"
#include <errno.h>
#include <sys/utsname.h>
#include <bpf/libbpf.h>
@@ -168,6 +169,7 @@ void perf_env__exit(struct perf_env *env)
int i;
perf_env__purge_bpf(env);
+ perf_env__purge_cgroups(env);
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 11d05ae3606a..7632075a8792 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -88,6 +88,12 @@ struct perf_env {
u32 btfs_cnt;
} bpf_progs;
+ /* same reason as above (for perf-top) */
+ struct {
+ struct rw_semaphore lock;
+ struct rb_root tree;
+ } cgroups;
+
/* For fast cpu to numa node lookup via perf_env__numa_node */
int *numa_map;
int nr_numa_map;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index c5447ff516a2..dc0e11214ae1 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -54,6 +54,7 @@ static const char *perf_event__names[] = {
[PERF_RECORD_NAMESPACES] = "NAMESPACES",
[PERF_RECORD_KSYMBOL] = "KSYMBOL",
[PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
+ [PERF_RECORD_CGROUP] = "CGROUP",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
@@ -180,6 +181,12 @@ size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
return ret;
}
+size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " cgroup: %" PRI_lu64 " %s\n",
+ event->cgroup.id, event->cgroup.path);
+}
+
int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
@@ -196,6 +203,14 @@ int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
return machine__process_namespaces_event(machine, event, sample);
}
+int perf_event__process_cgroup(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_cgroup_event(machine, event, sample);
+}
+
int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
@@ -417,6 +432,9 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_NAMESPACES:
ret += perf_event__fprintf_namespaces(event, fp);
break;
+ case PERF_RECORD_CGROUP:
+ ret += perf_event__fprintf_cgroup(event, fp);
+ break;
case PERF_RECORD_MMAP2:
ret += perf_event__fprintf_mmap2(event, fp);
break;
@@ -599,10 +617,23 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
al->sym = map__find_symbol(al->map, al->addr);
}
- if (symbol_conf.sym_list &&
- (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
- al->sym->name))) {
- al->filtered |= (1 << HIST_FILTER__SYMBOL);
+ if (symbol_conf.sym_list) {
+ int ret = 0;
+ char al_addr_str[32];
+ size_t sz = sizeof(al_addr_str);
+
+ if (al->sym) {
+ ret = strlist__has_entry(symbol_conf.sym_list,
+ al->sym->name);
+ }
+ if (!(ret && al->sym)) {
+ snprintf(al_addr_str, sz, "0x%"PRIx64,
+ al->map->unmap_ip(al->map, al->sym->start));
+ ret = strlist__has_entry(symbol_conf.sym_list,
+ al_addr_str);
+ }
+ if (!ret)
+ al->filtered |= (1 << HIST_FILTER__SYMBOL);
}
return 0;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 3cda40a2fafc..b8289f160f07 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -135,6 +135,7 @@ struct perf_sample {
u32 raw_size;
u64 data_src;
u64 phys_addr;
+ u64 cgroup;
u32 flags;
u16 insn_len;
u8 cpumode;
@@ -322,6 +323,10 @@ int perf_event__process_namespaces(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
+int perf_event__process_cgroup(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
int perf_event__process_mmap(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -377,6 +382,7 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 816d930d774e..eb880efbce16 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1104,6 +1104,11 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
if (opts->record_namespaces)
attr->namespaces = track;
+ if (opts->record_cgroup) {
+ attr->cgroup = track && !perf_missing_features.cgroup;
+ perf_evsel__set_sample_bit(evsel, CGROUP);
+ }
+
if (opts->record_switch_events)
attr->context_switch = track;
@@ -1287,6 +1292,7 @@ void perf_evsel__exit(struct evsel *evsel)
perf_thread_map__put(evsel->core.threads);
zfree(&evsel->group_name);
zfree(&evsel->name);
+ zfree(&evsel->pmu_name);
perf_evsel__object.fini(evsel);
}
@@ -1788,7 +1794,11 @@ try_fallback:
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.branch_hw_idx &&
+ if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) {
+ perf_missing_features.cgroup = true;
+ pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n");
+ goto out_close;
+ } else if (!perf_missing_features.branch_hw_idx &&
(evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
perf_missing_features.branch_hw_idx = true;
pr_debug2("switching off branch HW index support\n");
@@ -2266,6 +2276,12 @@ int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array++;
}
+ data->cgroup = 0;
+ if (type & PERF_SAMPLE_CGROUP) {
+ data->cgroup = *array;
+ array++;
+ }
+
if (type & PERF_SAMPLE_AUX) {
OVERFLOW_CHECK_u64(array);
sz = *array++;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 33804740e2ca..53187c501ee8 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -120,6 +120,7 @@ struct perf_missing_features {
bool bpf;
bool aux_output;
bool branch_hw_idx;
+ bool cgroup;
};
extern struct perf_missing_features perf_missing_features;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index e74a5acf66d9..283a69ff6a3d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -10,6 +10,7 @@
#include "mem-events.h"
#include "session.h"
#include "namespaces.h"
+#include "cgroup.h"
#include "sort.h"
#include "units.h"
#include "evlist.h"
@@ -194,6 +195,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
}
+ hists__new_col_len(hists, HISTC_CGROUP, 6);
hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
hists__new_col_len(hists, HISTC_CPU, 3);
hists__new_col_len(hists, HISTC_SOCKET, 6);
@@ -222,6 +224,16 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
if (h->trace_output)
hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
+
+ if (h->cgroup) {
+ const char *cgrp_name = "unknown";
+ struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
+ h->cgroup);
+ if (cgrp != NULL)
+ cgrp_name = cgrp->name;
+
+ hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
+ }
}
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
@@ -691,6 +703,7 @@ __hists__add_entry(struct hists *hists,
.dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
},
+ .cgroup = sample->cgroup,
.ms = {
.maps = al->maps,
.map = al->map,
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 0aa63aeb58ec..4141295a66fa 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -38,6 +38,7 @@ enum hist_column {
HISTC_THREAD,
HISTC_COMM,
HISTC_CGROUP_ID,
+ HISTC_CGROUP,
HISTC_PARENT,
HISTC_CPU,
HISTC_SOCKET,
@@ -536,6 +537,7 @@ static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused,
#define K_LEFT -1000
#define K_RIGHT -2000
#define K_SWITCH_INPUT_DATA -3000
+#define K_RELOAD -4000
#endif
unsigned int hists__sort_list_width(struct hists *hists);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index fd14f1489802..97142e9671be 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -33,6 +33,7 @@
#include "asm/bug.h"
#include "bpf-event.h"
#include <internal/lib.h> // page_size
+#include "cgroup.h"
#include <linux/ctype.h>
#include <symbol/kallsyms.h>
@@ -654,6 +655,22 @@ int machine__process_namespaces_event(struct machine *machine __maybe_unused,
return err;
}
+int machine__process_cgroup_event(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct cgroup *cgrp;
+
+ if (dump_trace)
+ perf_event__fprintf_cgroup(event, stdout);
+
+ cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
+ if (cgrp == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
int machine__process_lost_event(struct machine *machine __maybe_unused,
union perf_event *event, struct perf_sample *sample __maybe_unused)
{
@@ -1878,6 +1895,8 @@ int machine__process_event(struct machine *machine, union perf_event *event,
ret = machine__process_mmap_event(machine, event, sample); break;
case PERF_RECORD_NAMESPACES:
ret = machine__process_namespaces_event(machine, event, sample); break;
+ case PERF_RECORD_CGROUP:
+ ret = machine__process_cgroup_event(machine, event, sample); break;
case PERF_RECORD_MMAP2:
ret = machine__process_mmap2_event(machine, event, sample); break;
case PERF_RECORD_FORK:
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index be0a930eca89..fa1be9ea00fa 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -128,6 +128,9 @@ int machine__process_switch_event(struct machine *machine,
int machine__process_namespaces_event(struct machine *machine,
union perf_event *event,
struct perf_sample *sample);
+int machine__process_cgroup_event(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample);
int machine__process_mmap_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index c3a8c701609a..926449a7cdbf 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -95,13 +95,16 @@ struct egroup {
static struct evsel *find_evsel_group(struct evlist *perf_evlist,
const char **ids,
int idnum,
- struct evsel **metric_events)
+ struct evsel **metric_events,
+ bool *evlist_used)
{
struct evsel *ev;
- int i = 0;
+ int i = 0, j = 0;
bool leader_found;
evlist__for_each_entry (perf_evlist, ev) {
+ if (evlist_used[j++])
+ continue;
if (!strcmp(ev->name, ids[i])) {
if (!metric_events[i])
metric_events[i] = ev;
@@ -109,22 +112,17 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
if (i == idnum)
break;
} else {
- if (i + 1 == idnum) {
- /* Discard the whole match and start again */
- i = 0;
- memset(metric_events, 0,
- sizeof(struct evsel *) * idnum);
- continue;
- }
-
- if (!strcmp(ev->name, ids[i]))
- metric_events[i] = ev;
- else {
- /* Discard the whole match and start again */
- i = 0;
- memset(metric_events, 0,
- sizeof(struct evsel *) * idnum);
- continue;
+ /* Discard the whole match and start again */
+ i = 0;
+ memset(metric_events, 0,
+ sizeof(struct evsel *) * idnum);
+
+ if (!strcmp(ev->name, ids[i])) {
+ if (!metric_events[i])
+ metric_events[i] = ev;
+ i++;
+ if (i == idnum)
+ break;
}
}
}
@@ -146,7 +144,10 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
!strcmp(ev->name, metric_events[i]->name)) {
ev->metric_leader = metric_events[i];
}
+ j++;
}
+ ev = metric_events[i];
+ evlist_used[ev->idx] = true;
}
return metric_events[0];
@@ -162,6 +163,13 @@ static int metricgroup__setup_events(struct list_head *groups,
int ret = 0;
struct egroup *eg;
struct evsel *evsel;
+ bool *evlist_used;
+
+ evlist_used = calloc(perf_evlist->core.nr_entries, sizeof(bool));
+ if (!evlist_used) {
+ ret = -ENOMEM;
+ return ret;
+ }
list_for_each_entry (eg, groups, nd) {
struct evsel **metric_events;
@@ -172,7 +180,7 @@ static int metricgroup__setup_events(struct list_head *groups,
break;
}
evsel = find_evsel_group(perf_evlist, eg->ids, eg->idnum,
- metric_events);
+ metric_events, evlist_used);
if (!evsel) {
pr_debug("Cannot resolve %s: %s\n",
eg->metric_name, eg->metric_expr);
@@ -196,6 +204,9 @@ static int metricgroup__setup_events(struct list_head *groups,
expr->metric_events = metric_events;
list_add(&expr->nd, &me->head);
}
+
+ free(evlist_used);
+
return ret;
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index a7dc0b096974..10107747b361 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1449,7 +1449,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL,
auto_merge_stats, NULL);
if (evsel) {
- evsel->pmu_name = name;
+ evsel->pmu_name = name ? strdup(name) : NULL;
evsel->use_uncore_alias = use_uncore_alias;
return 0;
} else {
@@ -1497,7 +1497,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel->snapshot = info.snapshot;
evsel->metric_expr = info.metric_expr;
evsel->metric_name = info.metric_name;
- evsel->pmu_name = name;
+ evsel->pmu_name = name ? strdup(name) : NULL;
evsel->use_uncore_alias = use_uncore_alias;
evsel->percore = config_term_percore(&evsel->config_terms);
}
@@ -1547,7 +1547,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
if (!parse_events_add_pmu(parse_state, list,
pmu->name, head,
true, true)) {
- pr_debug("%s -> %s/%s/\n", config,
+ pr_debug("%s -> %s/%s/\n", str,
pmu->name, alias->str);
ok++;
}
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 7b1c8ee537cf..baa48f28d57d 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -342,11 +342,13 @@ bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUT
* Because the prefix cycles is mixed up with cpu-cycles.
* loads and stores are mixed up with cache event
*/
-cycles-ct { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
-cycles-t { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
-mem-loads { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
-mem-stores { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
-topdown-[a-z-]+ { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
+cycles-ct |
+cycles-t |
+mem-loads |
+mem-stores |
+topdown-[a-z-]+ |
+tx-capacity-[a-z-]+ |
+el-capacity-[a-z-]+ { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
L1-dcache|l1-d|l1d|L1-data |
L1-icache|l1-i|l1i|L1-instruction |
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 355d3458d4e6..b94fa07f5d32 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -35,6 +35,7 @@ static void __p_sample_type(char *buf, size_t size, u64 value)
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
bit_name(WEIGHT), bit_name(PHYS_ADDR), bit_name(AUX),
+ bit_name(CGROUP),
{ .name = NULL, }
};
#undef bit_name
@@ -132,6 +133,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(ksymbol, p_unsigned);
PRINT_ATTRf(bpf_event, p_unsigned);
PRINT_ATTRf(aux_output, p_unsigned);
+ PRINT_ATTRf(cgroup, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 8b99fd312aae..ef6a63f3d386 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -21,7 +21,6 @@
#include "pmu.h"
#include "parse-events.h"
#include "header.h"
-#include "pmu-events/pmu-events.h"
#include "string2.h"
#include "strbuf.h"
#include "fncache.h"
@@ -699,7 +698,7 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
return map;
}
-static bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
+bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
{
char *tmp = NULL, *tok, *str;
bool res;
@@ -744,16 +743,11 @@ out:
* to the current running CPU. Then, add all PMU events from that table
* as aliases.
*/
-static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
+void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
+ struct pmu_events_map *map)
{
int i;
- struct pmu_events_map *map;
const char *name = pmu->name;
-
- map = perf_pmu__find_map(pmu);
- if (!map)
- return;
-
/*
* Found a matching PMU events table. Create aliases
*/
@@ -788,6 +782,17 @@ new_alias:
}
}
+static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
+{
+ struct pmu_events_map *map;
+
+ map = perf_pmu__find_map(pmu);
+ if (!map)
+ return;
+
+ pmu_add_cpu_aliases_map(head, pmu, map);
+}
+
struct perf_event_attr * __weak
perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
{
@@ -979,12 +984,11 @@ static int pmu_resolve_param_term(struct parse_events_term *term,
struct parse_events_term *t;
list_for_each_entry(t, head_terms, list) {
- if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
- if (!strcmp(t->config, term->config)) {
- t->used = true;
- *value = t->val.num;
- return 0;
- }
+ if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM &&
+ t->config && !strcmp(t->config, term->config)) {
+ t->used = true;
+ *value = t->val.num;
+ return 0;
}
}
@@ -1395,6 +1399,11 @@ static void wordwrap(char *s, int start, int max, int corr)
}
}
+bool is_pmu_core(const char *name)
+{
+ return !strcmp(name, "cpu") || is_arm_pmu_core(name);
+}
+
void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
bool long_desc, bool details_flag, bool deprecated)
{
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 6737e3d5d568..5fb3f16828df 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -7,6 +7,7 @@
#include <linux/perf_event.h>
#include <stdbool.h>
#include "parse-events.h"
+#include "pmu-events/pmu-events.h"
struct perf_evsel_config_term;
@@ -87,6 +88,7 @@ int perf_pmu__format_parse(char *dir, struct list_head *head);
struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
+bool is_pmu_core(const char *name);
void print_pmu_events(const char *event_glob, bool name_only, bool quiet,
bool long_desc, bool details_flag,
bool deprecated);
@@ -97,8 +99,11 @@ int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
int perf_pmu__test(void);
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
+void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
+ struct pmu_events_map *map);
struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
+bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index e7279ea6043a..a9d9c142eb7c 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -34,3 +34,4 @@ util/string.c
util/symbol_fprintf.c
util/units.c
util/affinity.c
+util/rwsem.c
diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h
index 5421fd2ad383..24316458be20 100644
--- a/tools/perf/util/record.h
+++ b/tools/perf/util/record.h
@@ -34,6 +34,7 @@ struct record_opts {
bool auxtrace_snapshot_on_exit;
bool auxtrace_sample_mode;
bool record_namespaces;
+ bool record_cgroup;
bool record_switch_events;
bool all_kernel;
bool all_user;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 8c1b27cd8b99..2c372cf5495e 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -694,6 +694,9 @@ static int regs_map(struct regs_dump *regs, uint64_t mask, char *bf, int size)
bf[0] = 0;
+ if (!regs || !regs->regs)
+ return 0;
+
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++];
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 055b00abd56d..0b0bfe5bef17 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -471,6 +471,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->comm = process_event_stub;
if (tool->namespaces == NULL)
tool->namespaces = process_event_stub;
+ if (tool->cgroup == NULL)
+ tool->cgroup = process_event_stub;
if (tool->fork == NULL)
tool->fork = process_event_stub;
if (tool->exit == NULL)
@@ -1436,6 +1438,8 @@ static int machines__deliver_event(struct machines *machines,
return tool->comm(tool, event, sample, machine);
case PERF_RECORD_NAMESPACES:
return tool->namespaces(tool, event, sample, machine);
+ case PERF_RECORD_CGROUP:
+ return tool->cgroup(tool, event, sample, machine);
case PERF_RECORD_FORK:
return tool->fork(tool, event, sample, machine);
case PERF_RECORD_EXIT:
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 8a065a6f9713..347b2c0789e4 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -3,7 +3,7 @@ from subprocess import Popen, PIPE
from re import sub
cc = getenv("CC")
-cc_is_clang = b"clang version" in Popen([cc, "-v"], stderr=PIPE).stderr.readline()
+cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline()
def clang_has_option(option):
return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index ab0cfd790ad0..f14cc728c358 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -12,6 +12,7 @@
#include "cacheline.h"
#include "comm.h"
#include "map.h"
+#include "maps.h"
#include "symbol.h"
#include "map_symbol.h"
#include "branch.h"
@@ -25,6 +26,8 @@
#include "mem-events.h"
#include "annotate.h"
#include "time-utils.h"
+#include "cgroup.h"
+#include "machine.h"
#include <linux/kernel.h>
#include <linux/string.h>
@@ -634,6 +637,39 @@ struct sort_entry sort_cgroup_id = {
.se_width_idx = HISTC_CGROUP_ID,
};
+/* --sort cgroup */
+
+static int64_t
+sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->cgroup - left->cgroup;
+}
+
+static int hist_entry__cgroup_snprintf(struct hist_entry *he,
+ char *bf, size_t size,
+ unsigned int width __maybe_unused)
+{
+ const char *cgrp_name = "N/A";
+
+ if (he->cgroup) {
+ struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
+ he->cgroup);
+ if (cgrp != NULL)
+ cgrp_name = cgrp->name;
+ else
+ cgrp_name = "unknown";
+ }
+
+ return repsep_snprintf(bf, size, "%s", cgrp_name);
+}
+
+struct sort_entry sort_cgroup = {
+ .se_header = "Cgroup",
+ .se_cmp = sort__cgroup_cmp,
+ .se_snprintf = hist_entry__cgroup_snprintf,
+ .se_width_idx = HISTC_CGROUP,
+};
+
/* --sort socket */
static int64_t
@@ -869,7 +905,8 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
if (he->branch_info) {
struct addr_map_symbol *from = &he->branch_info->from;
- return _hist_entry__sym_snprintf(&from->ms, from->addr, he->level, bf, size, width);
+ return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
+ he->level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
@@ -881,7 +918,8 @@ static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
if (he->branch_info) {
struct addr_map_symbol *to = &he->branch_info->to;
- return _hist_entry__sym_snprintf(&to->ms, to->addr, he->level, bf, size, width);
+ return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
+ he->level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
@@ -1658,6 +1696,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_TRACE, "trace", sort_trace),
DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
+ DIM(SORT_CGROUP, "cgroup", sort_cgroup),
DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
DIM(SORT_TIME, "time", sort_time),
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 6c862d62d052..cfa6ac6f7d06 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -101,6 +101,7 @@ struct hist_entry {
struct thread *thread;
struct comm *comm;
struct namespace_id cgroup_id;
+ u64 cgroup;
u64 ip;
u64 transaction;
s32 socket;
@@ -224,6 +225,7 @@ enum sort_type {
SORT_TRACE,
SORT_SYM_SIZE,
SORT_DSO_SIZE,
+ SORT_CGROUP,
SORT_CGROUP_ID,
SORT_SYM_IPC_NULL,
SORT_TIME,
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 76c6052b12e2..9e757d18d713 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -115,11 +115,11 @@ static void aggr_printout(struct perf_stat_config *config,
fprintf(config->output, "S%d-D%d-C%*d%s",
cpu_map__id_to_socket(id),
cpu_map__id_to_die(id),
- config->csv_output ? 0 : -5,
+ config->csv_output ? 0 : -3,
cpu_map__id_to_cpu(id), config->csv_sep);
} else {
- fprintf(config->output, "CPU%*d%s ",
- config->csv_output ? 0 : -5,
+ fprintf(config->output, "CPU%*d%s",
+ config->csv_output ? 0 : -7,
evsel__cpus(evsel)->map[id],
config->csv_sep);
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 1965aefccb02..be5b493f8284 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -704,9 +704,15 @@ void symsrc__destroy(struct symsrc *ss)
close(ss->fd);
}
-bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
+bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
{
- return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
+ /*
+ * Usually vmlinux is an ELF file with type ET_EXEC for most
+ * architectures; except Arm64 kernel is linked with option
+ * '-share', so need to check type ET_DYN.
+ */
+ return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
+ ehdr.e_type == ET_DYN;
}
int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index 10f1ec3e0349..b916afb95ec5 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -73,6 +73,7 @@ struct symbol_conf {
const char *symfs;
int res_sample;
int pad_output_len_dso;
+ int group_sort_idx;
};
extern struct symbol_conf symbol_conf;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 3f28af39f9c6..a661b122d9d8 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -16,6 +16,7 @@
#include "util/synthetic-events.h"
#include "util/target.h"
#include "util/time-utils.h"
+#include "util/cgroup.h"
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -414,6 +415,127 @@ out:
return rc;
}
+#ifdef HAVE_FILE_HANDLE
+static int perf_event__synthesize_cgroup(struct perf_tool *tool,
+ union perf_event *event,
+ char *path, size_t mount_len,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
+ size_t path_len = strlen(path) - mount_len + 1;
+ struct {
+ struct file_handle fh;
+ uint64_t cgroup_id;
+ } handle;
+ int mount_id;
+
+ while (path_len % sizeof(u64))
+ path[mount_len + path_len++] = '\0';
+
+ memset(&event->cgroup, 0, event_size);
+
+ event->cgroup.header.type = PERF_RECORD_CGROUP;
+ event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
+
+ handle.fh.handle_bytes = sizeof(handle.cgroup_id);
+ if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
+ pr_debug("stat failed: %s\n", path);
+ return -1;
+ }
+
+ event->cgroup.id = handle.cgroup_id;
+ strncpy(event->cgroup.path, path + mount_len, path_len);
+ memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
+ pr_debug("process synth event failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
+ union perf_event *event,
+ char *path, size_t mount_len,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ size_t pos = strlen(path);
+ DIR *d;
+ struct dirent *dent;
+ int ret = 0;
+
+ if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
+ process, machine) < 0)
+ return -1;
+
+ d = opendir(path);
+ if (d == NULL) {
+ pr_debug("failed to open directory: %s\n", path);
+ return -1;
+ }
+
+ while ((dent = readdir(d)) != NULL) {
+ if (dent->d_type != DT_DIR)
+ continue;
+ if (!strcmp(dent->d_name, ".") ||
+ !strcmp(dent->d_name, ".."))
+ continue;
+
+ /* any sane path should be less than PATH_MAX */
+ if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
+ continue;
+
+ if (path[pos - 1] != '/')
+ strcat(path, "/");
+ strcat(path, dent->d_name);
+
+ ret = perf_event__walk_cgroup_tree(tool, event, path,
+ mount_len, process, machine);
+ if (ret < 0)
+ break;
+
+ path[pos] = '\0';
+ }
+
+ closedir(d);
+ return ret;
+}
+
+int perf_event__synthesize_cgroups(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event event;
+ char cgrp_root[PATH_MAX];
+ size_t mount_len; /* length of mount point in the path */
+
+ if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
+ pr_debug("cannot find cgroup mount point\n");
+ return -1;
+ }
+
+ mount_len = strlen(cgrp_root);
+ /* make sure the path starts with a slash (after mount point) */
+ strcat(cgrp_root, "/");
+
+ if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
+ process, machine) < 0)
+ return -1;
+
+ return 0;
+}
+#else
+int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return -1;
+}
+#endif
+
int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
struct machine *machine)
{
@@ -1230,6 +1352,9 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_PHYS_ADDR)
result += sizeof(u64);
+ if (type & PERF_SAMPLE_CGROUP)
+ result += sizeof(u64);
+
if (type & PERF_SAMPLE_AUX) {
result += sizeof(u64);
result += sample->aux_sample.size;
@@ -1404,6 +1529,11 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
array++;
}
+ if (type & PERF_SAMPLE_CGROUP) {
+ *array = sample->cgroup;
+ array++;
+ }
+
if (type & PERF_SAMPLE_AUX) {
sz = sample->aux_sample.size;
*array++ = sz;
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
index baead0cdc381..e7a3e9589738 100644
--- a/tools/perf/util/synthetic-events.h
+++ b/tools/perf/util/synthetic-events.h
@@ -45,6 +45,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handl
int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data);
int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_namespaces(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_cgroups(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample);
int perf_event__synthesize_stat_config(struct perf_tool *tool, struct perf_stat_config *config, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process, bool attrs);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 2abbf668b8de..3fb67bd31e4a 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -46,6 +46,7 @@ struct perf_tool {
mmap2,
comm,
namespaces,
+ cgroup,
fork,
exit,
lost,
@@ -78,6 +79,7 @@ struct perf_tool {
bool ordered_events;
bool ordering_requires_timestamps;
bool namespace_events;
+ bool cgroup_events;
bool no_warn;
enum show_feature_header show_feat_hdr;
};
diff --git a/tools/power/acpi/.gitignore b/tools/power/acpi/.gitignore
index f698a0e5bfa6..0b319fc8bb17 100644
--- a/tools/power/acpi/.gitignore
+++ b/tools/power/acpi/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
/acpidbg
/acpidump
/ec
diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore
index 1f9977cc609c..7677329c42a6 100644
--- a/tools/power/cpupower/.gitignore
+++ b/tools/power/cpupower/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
.libs
libcpupower.so
libcpupower.so.*
diff --git a/tools/power/x86/intel-speed-select/.gitignore b/tools/power/x86/intel-speed-select/.gitignore
index f61145925ce9..a814f89fe75f 100644
--- a/tools/power/x86/intel-speed-select/.gitignore
+++ b/tools/power/x86/intel-speed-select/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
include/
intel-speed-select
diff --git a/tools/power/x86/turbostat/.gitignore b/tools/power/x86/turbostat/.gitignore
index 7521370d3568..e13109b43cd1 100644
--- a/tools/power/x86/turbostat/.gitignore
+++ b/tools/power/x86/turbostat/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
turbostat
diff --git a/tools/spi/.gitignore b/tools/spi/.gitignore
index 4280576397e8..14ddba3d2195 100644
--- a/tools/spi/.gitignore
+++ b/tools/spi/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
spidev_fdx
spidev_test
diff --git a/tools/testing/kunit/.gitattributes b/tools/testing/kunit/.gitattributes
new file mode 100644
index 000000000000..5b7da1fc3b8f
--- /dev/null
+++ b/tools/testing/kunit/.gitattributes
@@ -0,0 +1 @@
+test_data/* binary
diff --git a/tools/testing/kunit/.gitignore b/tools/testing/kunit/.gitignore
index c791ff59a37a..1c63e31f7edf 100644
--- a/tools/testing/kunit/.gitignore
+++ b/tools/testing/kunit/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod] \ No newline at end of file
diff --git a/tools/testing/kunit/configs/broken_on_uml.config b/tools/testing/kunit/configs/broken_on_uml.config
new file mode 100644
index 000000000000..239b9f03da2c
--- /dev/null
+++ b/tools/testing/kunit/configs/broken_on_uml.config
@@ -0,0 +1,41 @@
+# These are currently broken on UML and prevent allyesconfig from building
+# CONFIG_STATIC_LINK is not set
+# CONFIG_UML_NET_VECTOR is not set
+# CONFIG_UML_NET_VDE is not set
+# CONFIG_UML_NET_PCAP is not set
+# CONFIG_NET_PTP_CLASSIFY is not set
+# CONFIG_IP_VS is not set
+# CONFIG_BRIDGE_EBT_BROUTE is not set
+# CONFIG_BRIDGE_EBT_T_FILTER is not set
+# CONFIG_BRIDGE_EBT_T_NAT is not set
+# CONFIG_MTD_NAND_CADENCE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_BLK_DEV_NULL_BLK is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+# CONFIG_NULL_TTY is not set
+# CONFIG_PTP_1588_CLOCK is not set
+# CONFIG_PINCTRL_EQUILIBRIUM is not set
+# CONFIG_DMABUF_SELFTESTS is not set
+# CONFIG_COMEDI is not set
+# CONFIG_XIL_AXIS_FIFO is not set
+# CONFIG_EXFAT_FS is not set
+# CONFIG_STM_DUMMY is not set
+# CONFIG_FSI_MASTER_ASPEED is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_UBIFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_CRYPTO_DEV_SAFEXCEL is not set
+# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set
+# CONFIG_KCOV is not set
+# CONFIG_LKDTM is not set
+# CONFIG_REED_SOLOMON_TEST is not set
+# CONFIG_TEST_RHASHTABLE is not set
+# CONFIG_TEST_MEMINIT is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+# CONFIG_DEBUG_INFO_BTF is not set
+# CONFIG_PTP_1588_CLOCK_INES is not set
+# CONFIG_QCOM_CPR is not set
+# CONFIG_RESET_BRCMSTB_RESCAL is not set
+# CONFIG_RESET_INTEL_GW is not set
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 180ad1e1b04f..7dca74774dd2 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -22,7 +22,9 @@ import kunit_parser
KunitResult = namedtuple('KunitResult', ['status','result'])
-KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', 'build_dir', 'defconfig'])
+KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs',
+ 'build_dir', 'defconfig',
+ 'alltests', 'make_options'])
KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
@@ -47,7 +49,7 @@ def get_kernel_root_path():
def run_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitRequest) -> KunitResult:
config_start = time.time()
- success = linux.build_reconfig(request.build_dir)
+ success = linux.build_reconfig(request.build_dir, request.make_options)
config_end = time.time()
if not success:
return KunitResult(KunitStatus.CONFIG_FAILURE, 'could not configure kernel')
@@ -55,24 +57,24 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
kunit_parser.print_with_timestamp('Building KUnit Kernel ...')
build_start = time.time()
- success = linux.build_um_kernel(request.jobs, request.build_dir)
+ success = linux.build_um_kernel(request.alltests,
+ request.jobs,
+ request.build_dir,
+ request.make_options)
build_end = time.time()
if not success:
return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel')
kunit_parser.print_with_timestamp('Starting KUnit Kernel ...')
test_start = time.time()
-
- test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS,
- [],
- 'Tests not Parsed.')
+ kunit_output = linux.run_kernel(
+ timeout=None if request.alltests else request.timeout,
+ build_dir=request.build_dir)
if request.raw_output:
- kunit_parser.raw_output(
- linux.run_kernel(timeout=request.timeout,
- build_dir=request.build_dir))
+ raw_output = kunit_parser.raw_output(kunit_output)
+ isolated = list(kunit_parser.isolate_kunit_output(raw_output))
+ test_result = kunit_parser.parse_test_result(isolated)
else:
- kunit_output = linux.run_kernel(timeout=request.timeout,
- build_dir=request.build_dir)
test_result = kunit_parser.parse_run_tests(kunit_output)
test_end = time.time()
@@ -120,6 +122,14 @@ def main(argv, linux=None):
help='Uses a default .kunitconfig.',
action='store_true')
+ run_parser.add_argument('--alltests',
+ help='Run all KUnit tests through allyesconfig',
+ action='store_true')
+
+ run_parser.add_argument('--make_options',
+ help='X=Y make option, can be repeated.',
+ action='append')
+
cli_args = parser.parse_args(argv)
if cli_args.subcommand == 'run':
@@ -143,7 +153,9 @@ def main(argv, linux=None):
cli_args.timeout,
cli_args.jobs,
cli_args.build_dir,
- cli_args.defconfig)
+ cli_args.defconfig,
+ cli_args.alltests,
+ cli_args.make_options)
result = run_tests(linux, request)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
index ebf3942b23f5..e75063d603b5 100644
--- a/tools/testing/kunit/kunit_config.py
+++ b/tools/testing/kunit/kunit_config.py
@@ -9,16 +9,18 @@
import collections
import re
-CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_\w+ is not set$'
-CONFIG_PATTERN = r'^CONFIG_\w+=\S+$'
-
-KconfigEntryBase = collections.namedtuple('KconfigEntry', ['raw_entry'])
+CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
+CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+)$'
+KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
class KconfigEntry(KconfigEntryBase):
def __str__(self) -> str:
- return self.raw_entry
+ if self.value == 'n':
+ return r'# CONFIG_%s is not set' % (self.name)
+ else:
+ return r'CONFIG_%s=%s' % (self.name, self.value)
class KconfigParseError(Exception):
@@ -38,7 +40,17 @@ class Kconfig(object):
self._entries.append(entry)
def is_subset_of(self, other: 'Kconfig') -> bool:
- return self.entries().issubset(other.entries())
+ for a in self.entries():
+ found = False
+ for b in other.entries():
+ if a.name != b.name:
+ continue
+ if a.value != b.value:
+ return False
+ found = True
+ if a.value != 'n' and found == False:
+ return False
+ return True
def write_to_file(self, path: str) -> None:
with open(path, 'w') as f:
@@ -54,9 +66,20 @@ class Kconfig(object):
line = line.strip()
if not line:
continue
- elif config_matcher.match(line) or is_not_set_matcher.match(line):
- self._entries.append(KconfigEntry(line))
- elif line[0] == '#':
+
+ match = config_matcher.match(line)
+ if match:
+ entry = KconfigEntry(match.group(1), match.group(2))
+ self.add_entry(entry)
+ continue
+
+ empty_match = is_not_set_matcher.match(line)
+ if empty_match:
+ entry = KconfigEntry(empty_match.group(1), 'n')
+ self.add_entry(entry)
+ continue
+
+ if line[0] == '#':
continue
else:
raise KconfigParseError('Failed to parse: ' + line)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index d99ae75ef72f..63dbda2d029f 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -10,11 +10,16 @@
import logging
import subprocess
import os
+import signal
+
+from contextlib import ExitStack
import kunit_config
+import kunit_parser
KCONFIG_PATH = '.config'
kunitconfig_path = '.kunitconfig'
+BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config'
class ConfigError(Exception):
"""Represents an error trying to configure the Linux kernel."""
@@ -35,19 +40,40 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output)
- def make_olddefconfig(self, build_dir):
+ def make_olddefconfig(self, build_dir, make_options):
command = ['make', 'ARCH=um', 'olddefconfig']
+ if make_options:
+ command.extend(make_options)
if build_dir:
command += ['O=' + build_dir]
try:
- subprocess.check_output(command)
+ subprocess.check_output(command, stderr=subprocess.PIPE)
except OSError as e:
raise ConfigError('Could not call make command: ' + e)
except subprocess.CalledProcessError as e:
raise ConfigError(e.output)
- def make(self, jobs, build_dir):
+ def make_allyesconfig(self):
+ kunit_parser.print_with_timestamp(
+ 'Enabling all CONFIGs for UML...')
+ process = subprocess.Popen(
+ ['make', 'ARCH=um', 'allyesconfig'],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.STDOUT)
+ process.wait()
+ kunit_parser.print_with_timestamp(
+ 'Disabling broken configs to run KUnit tests...')
+ with ExitStack() as es:
+ config = open(KCONFIG_PATH, 'a')
+ disable = open(BROKEN_ALLCONFIG_PATH, 'r').read()
+ config.write(disable)
+ kunit_parser.print_with_timestamp(
+ 'Starting Kernel with all configs takes a few minutes...')
+
+ def make(self, jobs, build_dir, make_options):
command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
+ if make_options:
+ command.extend(make_options)
if build_dir:
command += ['O=' + build_dir]
try:
@@ -57,18 +83,16 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise BuildError(e.output)
- def linux_bin(self, params, timeout, build_dir):
+ def linux_bin(self, params, timeout, build_dir, outfile):
"""Runs the Linux UML binary. Must be named 'linux'."""
linux_bin = './linux'
if build_dir:
linux_bin = os.path.join(build_dir, 'linux')
- process = subprocess.Popen(
- [linux_bin] + params,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- process.wait(timeout=timeout)
- return process
+ with open(outfile, 'w') as output:
+ process = subprocess.Popen([linux_bin] + params,
+ stdout=output,
+ stderr=subprocess.STDOUT)
+ process.wait(timeout)
def get_kconfig_path(build_dir):
@@ -84,6 +108,7 @@ class LinuxSourceTree(object):
self._kconfig = kunit_config.Kconfig()
self._kconfig.read_from_file(kunitconfig_path)
self._ops = LinuxSourceTreeOperations()
+ signal.signal(signal.SIGINT, self.signal_handler)
def clean(self):
try:
@@ -107,19 +132,19 @@ class LinuxSourceTree(object):
return False
return True
- def build_config(self, build_dir):
+ def build_config(self, build_dir, make_options):
kconfig_path = get_kconfig_path(build_dir)
if build_dir and not os.path.exists(build_dir):
os.mkdir(build_dir)
self._kconfig.write_to_file(kconfig_path)
try:
- self._ops.make_olddefconfig(build_dir)
+ self._ops.make_olddefconfig(build_dir, make_options)
except ConfigError as e:
logging.error(e)
return False
return self.validate_config(build_dir)
- def build_reconfig(self, build_dir):
+ def build_reconfig(self, build_dir, make_options):
"""Creates a new .config if it is not a subset of the .kunitconfig."""
kconfig_path = get_kconfig_path(build_dir)
if os.path.exists(kconfig_path):
@@ -128,26 +153,33 @@ class LinuxSourceTree(object):
if not self._kconfig.is_subset_of(existing_kconfig):
print('Regenerating .config ...')
os.remove(kconfig_path)
- return self.build_config(build_dir)
+ return self.build_config(build_dir, make_options)
else:
return True
else:
print('Generating .config ...')
- return self.build_config(build_dir)
+ return self.build_config(build_dir, make_options)
- def build_um_kernel(self, jobs, build_dir):
+ def build_um_kernel(self, alltests, jobs, build_dir, make_options):
+ if alltests:
+ self._ops.make_allyesconfig()
try:
- self._ops.make_olddefconfig(build_dir)
- self._ops.make(jobs, build_dir)
+ self._ops.make_olddefconfig(build_dir, make_options)
+ self._ops.make(jobs, build_dir, make_options)
except (ConfigError, BuildError) as e:
logging.error(e)
return False
return self.validate_config(build_dir)
- def run_kernel(self, args=[], timeout=None, build_dir=''):
- args.extend(['mem=256M'])
- process = self._ops.linux_bin(args, timeout, build_dir)
- with open(os.path.join(build_dir, 'test.log'), 'w') as f:
- for line in process.stdout:
- f.write(line.rstrip().decode('ascii') + '\n')
- yield line.rstrip().decode('ascii')
+ def run_kernel(self, args=[], build_dir='', timeout=None):
+ args.extend(['mem=1G'])
+ outfile = 'test.log'
+ self._ops.linux_bin(args, timeout, build_dir, outfile)
+ subprocess.call(['stty', 'sane'])
+ with open(outfile, 'r') as file:
+ for line in file:
+ yield line
+
+ def signal_handler(self, sig, frame):
+ logging.error('Build interruption occurred. Cleaning console.')
+ subprocess.call(['stty', 'sane'])
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 4ffbae0f6732..64aac9dcd431 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -46,23 +46,26 @@ class TestStatus(Enum):
TEST_CRASHED = auto()
NO_TESTS = auto()
-kunit_start_re = re.compile(r'^TAP version [0-9]+$')
-kunit_end_re = re.compile('List of all partitions:')
+kunit_start_re = re.compile(r'TAP version [0-9]+$')
+kunit_end_re = re.compile('(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
def isolate_kunit_output(kernel_output):
started = False
for line in kernel_output:
- if kunit_start_re.match(line):
+ if kunit_start_re.search(line):
+ prefix_len = len(line.split('TAP version')[0])
started = True
- yield line
- elif kunit_end_re.match(line):
+ yield line[prefix_len:] if prefix_len > 0 else line
+ elif kunit_end_re.search(line):
break
elif started:
- yield line
+ yield line[prefix_len:] if prefix_len > 0 else line
def raw_output(kernel_output):
for line in kernel_output:
print(line)
+ yield line
DIVIDER = '=' * 60
@@ -91,7 +94,7 @@ def print_log(log):
for m in log:
print_with_timestamp(m)
-TAP_ENTRIES = re.compile(r'^(TAP|\t?ok|\t?not ok|\t?[0-9]+\.\.[0-9]+|\t?#).*$')
+TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
def consume_non_diagnositic(lines: List[str]) -> None:
while lines and not TAP_ENTRIES.match(lines[0]):
@@ -104,22 +107,20 @@ def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None:
OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
-OK_NOT_OK_SUBTEST = re.compile(r'^\t(ok|not ok) [0-9]+ - (.*)$')
+OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) [0-9]+ - (.*)$')
-def parse_ok_not_ok_test_case(lines: List[str],
- test_case: TestCase,
- expecting_test_case: bool) -> bool:
+def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
save_non_diagnositic(lines, test_case)
if not lines:
- if expecting_test_case:
- test_case.status = TestStatus.TEST_CRASHED
- return True
- else:
- return False
+ test_case.status = TestStatus.TEST_CRASHED
+ return True
line = lines[0]
match = OK_NOT_OK_SUBTEST.match(line)
+ while not match and lines:
+ line = lines.pop(0)
+ match = OK_NOT_OK_SUBTEST.match(line)
if match:
test_case.log.append(lines.pop(0))
test_case.name = match.group(2)
@@ -133,7 +134,7 @@ def parse_ok_not_ok_test_case(lines: List[str],
else:
return False
-SUBTEST_DIAGNOSTIC = re.compile(r'^\t# .*?: (.*)$')
+SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# .*?: (.*)$')
DIAGNOSTIC_CRASH_MESSAGE = 'kunit test case crashed!'
def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
@@ -150,17 +151,17 @@ def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
else:
return False
-def parse_test_case(lines: List[str], expecting_test_case: bool) -> TestCase:
+def parse_test_case(lines: List[str]) -> TestCase:
test_case = TestCase()
save_non_diagnositic(lines, test_case)
while parse_diagnostic(lines, test_case):
pass
- if parse_ok_not_ok_test_case(lines, test_case, expecting_test_case):
+ if parse_ok_not_ok_test_case(lines, test_case):
return test_case
else:
return None
-SUBTEST_HEADER = re.compile(r'^\t# Subtest: (.*)$')
+SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
def parse_subtest_header(lines: List[str]) -> str:
consume_non_diagnositic(lines)
@@ -173,7 +174,7 @@ def parse_subtest_header(lines: List[str]) -> str:
else:
return None
-SUBTEST_PLAN = re.compile(r'\t[0-9]+\.\.([0-9]+)')
+SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
def parse_subtest_plan(lines: List[str]) -> int:
consume_non_diagnositic(lines)
@@ -234,11 +235,11 @@ def parse_test_suite(lines: List[str]) -> TestSuite:
expected_test_case_num = parse_subtest_plan(lines)
if not expected_test_case_num:
return None
- test_case = parse_test_case(lines, expected_test_case_num > 0)
- expected_test_case_num -= 1
- while test_case:
+ while expected_test_case_num > 0:
+ test_case = parse_test_case(lines)
+ if not test_case:
+ break
test_suite.cases.append(test_case)
- test_case = parse_test_case(lines, expected_test_case_num > 0)
expected_test_case_num -= 1
if parse_ok_not_ok_test_suite(lines, test_suite):
test_suite.status = bubble_up_test_case_errors(test_suite)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index cba97756ac4a..984588d6ba95 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -37,7 +37,7 @@ class KconfigTest(unittest.TestCase):
self.assertTrue(kconfig0.is_subset_of(kconfig0))
kconfig1 = kunit_config.Kconfig()
- kconfig1.add_entry(kunit_config.KconfigEntry('CONFIG_TEST=y'))
+ kconfig1.add_entry(kunit_config.KconfigEntry('TEST', 'y'))
self.assertTrue(kconfig1.is_subset_of(kconfig1))
self.assertTrue(kconfig0.is_subset_of(kconfig1))
self.assertFalse(kconfig1.is_subset_of(kconfig0))
@@ -51,15 +51,15 @@ class KconfigTest(unittest.TestCase):
expected_kconfig = kunit_config.Kconfig()
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('CONFIG_UML=y'))
+ kunit_config.KconfigEntry('UML', 'y'))
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('CONFIG_MMU=y'))
+ kunit_config.KconfigEntry('MMU', 'y'))
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('CONFIG_TEST=y'))
+ kunit_config.KconfigEntry('TEST', 'y'))
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('CONFIG_EXAMPLE_TEST=y'))
+ kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('# CONFIG_MK8 is not set'))
+ kunit_config.KconfigEntry('MK8', 'n'))
self.assertEqual(kconfig.entries(), expected_kconfig.entries())
@@ -68,15 +68,15 @@ class KconfigTest(unittest.TestCase):
expected_kconfig = kunit_config.Kconfig()
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('CONFIG_UML=y'))
+ kunit_config.KconfigEntry('UML', 'y'))
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('CONFIG_MMU=y'))
+ kunit_config.KconfigEntry('MMU', 'y'))
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('CONFIG_TEST=y'))
+ kunit_config.KconfigEntry('TEST', 'y'))
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('CONFIG_EXAMPLE_TEST=y'))
+ kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
expected_kconfig.add_entry(
- kunit_config.KconfigEntry('# CONFIG_MK8 is not set'))
+ kunit_config.KconfigEntry('MK8', 'n'))
expected_kconfig.write_to_file(kconfig_path)
@@ -108,6 +108,36 @@ class KUnitParserTest(unittest.TestCase):
self.assertContains('ok 1 - example', result)
file.close()
+ def test_output_with_prefix_isolated_correctly(self):
+ log_path = get_absolute_path(
+ 'test_data/test_pound_sign.log')
+ with open(log_path) as file:
+ result = kunit_parser.isolate_kunit_output(file.readlines())
+ self.assertContains('TAP version 14\n', result)
+ self.assertContains(' # Subtest: kunit-resource-test', result)
+ self.assertContains(' 1..5', result)
+ self.assertContains(' ok 1 - kunit_resource_test_init_resources', result)
+ self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result)
+ self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result)
+ self.assertContains(' foo bar #', result)
+ self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result)
+ self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result)
+ self.assertContains('ok 1 - kunit-resource-test', result)
+ self.assertContains(' foo bar # non-kunit output', result)
+ self.assertContains(' # Subtest: kunit-try-catch-test', result)
+ self.assertContains(' 1..2', result)
+ self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch',
+ result)
+ self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch',
+ result)
+ self.assertContains('ok 2 - kunit-try-catch-test', result)
+ self.assertContains(' # Subtest: string-stream-test', result)
+ self.assertContains(' 1..3', result)
+ self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result)
+ self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result)
+ self.assertContains(' ok 3 - string_stream_test_get_string', result)
+ self.assertContains('ok 3 - string-stream-test', result)
+
def test_parse_successful_test_log(self):
all_passed_log = get_absolute_path(
'test_data/test_is_test_passed-all_passed.log')
@@ -150,6 +180,45 @@ class KUnitParserTest(unittest.TestCase):
result.status)
file.close()
+ def test_ignores_prefix_printk_time(self):
+ prefix_log = get_absolute_path(
+ 'test_data/test_config_printk_time.log')
+ with open(prefix_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual('kunit-resource-test', result.suites[0].name)
+
+ def test_ignores_multiple_prefixes(self):
+ prefix_log = get_absolute_path(
+ 'test_data/test_multiple_prefixes.log')
+ with open(prefix_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual('kunit-resource-test', result.suites[0].name)
+
+ def test_prefix_mixed_kernel_output(self):
+ mixed_prefix_log = get_absolute_path(
+ 'test_data/test_interrupted_tap_output.log')
+ with open(mixed_prefix_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual('kunit-resource-test', result.suites[0].name)
+
+ def test_prefix_poundsign(self):
+ pound_log = get_absolute_path('test_data/test_pound_sign.log')
+ with open(pound_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual('kunit-resource-test', result.suites[0].name)
+
+ def test_kernel_panic_end(self):
+ panic_log = get_absolute_path('test_data/test_kernel_panic_interrupt.log')
+ with open(panic_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual('kunit-resource-test', result.suites[0].name)
+
+ def test_pound_no_prefix(self):
+ pound_log = get_absolute_path('test_data/test_pound_no_prefix.log')
+ with open(pound_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.assertEqual('kunit-resource-test', result.suites[0].name)
+
class StrContains(str):
def __eq__(self, other):
return self in other
@@ -174,7 +243,8 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run'], self.linux_source_mock)
assert self.linux_source_mock.build_reconfig.call_count == 1
assert self.linux_source_mock.run_kernel.call_count == 1
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=300)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ build_dir='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_fail(self):
@@ -189,25 +259,27 @@ class KUnitMainTest(unittest.TestCase):
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output'], self.linux_source_mock)
+ with self.assertRaises(SystemExit) as e:
+ kunit.main(['run', '--raw_output'], self.linux_source_mock)
+ assert type(e.exception) == SystemExit
+ assert e.exception.code == 1
assert self.linux_source_mock.build_reconfig.call_count == 1
assert self.linux_source_mock.run_kernel.call_count == 1
- for kall in self.print_mock.call_args_list:
- assert kall != mock.call(StrContains('Testing complete.'))
- assert kall != mock.call(StrContains(' 0 tests run'))
def test_run_timeout(self):
timeout = 3453
kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
assert self.linux_source_mock.build_reconfig.call_count == 1
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=timeout)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ build_dir='', timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_builddir(self):
build_dir = '.kunit'
kunit.main(['run', '--build_dir', build_dir], self.linux_source_mock)
assert self.linux_source_mock.build_reconfig.call_count == 1
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+ build_dir=build_dir, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
if __name__ == '__main__':
diff --git a/tools/testing/kunit/test_data/test_config_printk_time.log b/tools/testing/kunit/test_data/test_config_printk_time.log
new file mode 100644
index 000000000000..c02ca773946d
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_config_printk_time.log
@@ -0,0 +1,31 @@
+[ 0.060000] printk: console [mc-1] enabled
+[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0
+[ 0.060000] TAP version 14
+[ 0.060000] # Subtest: kunit-resource-test
+[ 0.060000] 1..5
+[ 0.060000] ok 1 - kunit_resource_test_init_resources
+[ 0.060000] ok 2 - kunit_resource_test_alloc_resource
+[ 0.060000] ok 3 - kunit_resource_test_destroy_resource
+[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources
+[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering
+[ 0.060000] ok 1 - kunit-resource-test
+[ 0.060000] # Subtest: kunit-try-catch-test
+[ 0.060000] 1..2
+[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch
+[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch
+[ 0.060000] ok 2 - kunit-try-catch-test
+[ 0.060000] # Subtest: string-stream-test
+[ 0.060000] 1..3
+[ 0.060000] ok 1 - string_stream_test_empty_on_creation
+[ 0.060000] ok 2 - string_stream_test_not_empty_after_add
+[ 0.060000] ok 3 - string_stream_test_get_string
+[ 0.060000] ok 3 - string-stream-test
+[ 0.060000] List of all partitions:
+[ 0.060000] No filesystem could mount root, tried:
+[ 0.060000]
+[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0)
+[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2
+[ 0.060000] Stack:
+[ 0.060000] 602086f8 601bc260 705c0000 705c0000
+[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab
+[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 \ No newline at end of file
diff --git a/tools/testing/kunit/test_data/test_interrupted_tap_output.log b/tools/testing/kunit/test_data/test_interrupted_tap_output.log
new file mode 100644
index 000000000000..5c73fb3a1c6f
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_interrupted_tap_output.log
@@ -0,0 +1,37 @@
+[ 0.060000] printk: console [mc-1] enabled
+[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0
+[ 0.060000] TAP version 14
+[ 0.060000] # Subtest: kunit-resource-test
+[ 0.060000] 1..5
+[ 0.060000] ok 1 - kunit_resource_test_init_resources
+[ 0.060000] ok 2 - kunit_resource_test_alloc_resource
+[ 0.060000] ok 3 - kunit_resource_test_destroy_resource
+[ 0.060000] kAFS: Red Hat AFS client v0.1 registering.
+[ 0.060000] FS-Cache: Netfs 'afs' registered for caching
+[ 0.060000] *** VALIDATE kAFS ***
+[ 0.060000] Btrfs loaded, crc32c=crc32c-generic, debug=on, assert=on, integrity-checker=on, ref-verify=on
+[ 0.060000] BTRFS: selftest: sectorsize: 4096 nodesize: 4096
+[ 0.060000] BTRFS: selftest: running btrfs free space cache tests
+[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources
+[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering
+[ 0.060000] ok 1 - kunit-resource-test
+[ 0.060000] # Subtest: kunit-try-catch-test
+[ 0.060000] 1..2
+[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch
+[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch
+[ 0.060000] ok 2 - kunit-try-catch-test
+[ 0.060000] # Subtest: string-stream-test
+[ 0.060000] 1..3
+[ 0.060000] ok 1 - string_stream_test_empty_on_creation
+[ 0.060000] ok 2 - string_stream_test_not_empty_after_add
+[ 0.060000] ok 3 - string_stream_test_get_string
+[ 0.060000] ok 3 - string-stream-test
+[ 0.060000] List of all partitions:
+[ 0.060000] No filesystem could mount root, tried:
+[ 0.060000]
+[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0)
+[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2
+[ 0.060000] Stack:
+[ 0.060000] 602086f8 601bc260 705c0000 705c0000
+[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab
+[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 \ No newline at end of file
diff --git a/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log b/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log
new file mode 100644
index 000000000000..c045eee75f27
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log
@@ -0,0 +1,25 @@
+[ 0.060000] printk: console [mc-1] enabled
+[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0
+[ 0.060000] TAP version 14
+[ 0.060000] # Subtest: kunit-resource-test
+[ 0.060000] 1..5
+[ 0.060000] ok 1 - kunit_resource_test_init_resources
+[ 0.060000] ok 2 - kunit_resource_test_alloc_resource
+[ 0.060000] ok 3 - kunit_resource_test_destroy_resource
+[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources
+[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering
+[ 0.060000] ok 1 - kunit-resource-test
+[ 0.060000] # Subtest: kunit-try-catch-test
+[ 0.060000] 1..2
+[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch
+[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch
+[ 0.060000] ok 2 - kunit-try-catch-test
+[ 0.060000] # Subtest: string-stream-test
+[ 0.060000] 1..3
+[ 0.060000] ok 1 - string_stream_test_empty_on_creation
+[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0)
+[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2
+[ 0.060000] Stack:
+[ 0.060000] 602086f8 601bc260 705c0000 705c0000
+[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab
+[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 \ No newline at end of file
diff --git a/tools/testing/kunit/test_data/test_multiple_prefixes.log b/tools/testing/kunit/test_data/test_multiple_prefixes.log
new file mode 100644
index 000000000000..bc48407dcc36
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_multiple_prefixes.log
@@ -0,0 +1,31 @@
+[ 0.060000][ T1] printk: console [mc-1] enabled
+[ 0.060000][ T1] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0
+[ 0.060000][ T1] TAP version 14
+[ 0.060000][ T1] # Subtest: kunit-resource-test
+[ 0.060000][ T1] 1..5
+[ 0.060000][ T1] ok 1 - kunit_resource_test_init_resources
+[ 0.060000][ T1] ok 2 - kunit_resource_test_alloc_resource
+[ 0.060000][ T1] ok 3 - kunit_resource_test_destroy_resource
+[ 0.060000][ T1] ok 4 - kunit_resource_test_cleanup_resources
+[ 0.060000][ T1] ok 5 - kunit_resource_test_proper_free_ordering
+[ 0.060000][ T1] ok 1 - kunit-resource-test
+[ 0.060000][ T1] # Subtest: kunit-try-catch-test
+[ 0.060000][ T1] 1..2
+[ 0.060000][ T1] ok 1 - kunit_test_try_catch_successful_try_no_catch
+[ 0.060000][ T1] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch
+[ 0.060000][ T1] ok 2 - kunit-try-catch-test
+[ 0.060000][ T1] # Subtest: string-stream-test
+[ 0.060000][ T1] 1..3
+[ 0.060000][ T1] ok 1 - string_stream_test_empty_on_creation
+[ 0.060000][ T1] ok 2 - string_stream_test_not_empty_after_add
+[ 0.060000][ T1] ok 3 - string_stream_test_get_string
+[ 0.060000][ T1] ok 3 - string-stream-test
+[ 0.060000][ T1] List of all partitions:
+[ 0.060000][ T1] No filesystem could mount root, tried:
+[ 0.060000][ T1]
+[ 0.060000][ T1] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0)
+[ 0.060000][ T1] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2
+[ 0.060000][ T1] Stack:
+[ 0.060000][ T1] 602086f8 601bc260 705c0000 705c0000
+[ 0.060000][ T1] 602086f8 6005fcec 705c0000 6002c6ab
+[ 0.060000][ T1] 6005fcec 601bc260 705c0000 3000000010 \ No newline at end of file
diff --git a/tools/testing/kunit/test_data/test_output_with_prefix_isolated_correctly.log b/tools/testing/kunit/test_data/test_output_with_prefix_isolated_correctly.log
new file mode 100644
index 000000000000..0f87cdabebb0
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_output_with_prefix_isolated_correctly.log
@@ -0,0 +1,33 @@
+[ 0.060000] printk: console [mc-1] enabled
+[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0
+[ 0.060000] TAP version 14
+[ 0.060000] # Subtest: kunit-resource-test
+[ 0.060000] 1..5
+[ 0.060000] ok 1 - kunit_resource_test_init_resources
+[ 0.060000] ok 2 - kunit_resource_test_alloc_resource
+[ 0.060000] ok 3 - kunit_resource_test_destroy_resource
+[ 0.060000] foo bar #
+[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources
+[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering
+[ 0.060000] ok 1 - kunit-resource-test
+[ 0.060000] foo bar # non-kunit output
+[ 0.060000] # Subtest: kunit-try-catch-test
+[ 0.060000] 1..2
+[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch
+[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch
+[ 0.060000] ok 2 - kunit-try-catch-test
+[ 0.060000] # Subtest: string-stream-test
+[ 0.060000] 1..3
+[ 0.060000] ok 1 - string_stream_test_empty_on_creation
+[ 0.060000] ok 2 - string_stream_test_not_empty_after_add
+[ 0.060000] ok 3 - string_stream_test_get_string
+[ 0.060000] ok 3 - string-stream-test
+[ 0.060000] List of all partitions:
+[ 0.060000] No filesystem could mount root, tried:
+[ 0.060000]
+[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0)
+[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2
+[ 0.060000] Stack:
+[ 0.060000] 602086f8 601bc260 705c0000 705c0000
+[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab
+[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 \ No newline at end of file
diff --git a/tools/testing/kunit/test_data/test_pound_no_prefix.log b/tools/testing/kunit/test_data/test_pound_no_prefix.log
new file mode 100644
index 000000000000..2ceb360be7d5
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_pound_no_prefix.log
@@ -0,0 +1,33 @@
+ printk: console [mc-1] enabled
+ random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0
+ TAP version 14
+ # Subtest: kunit-resource-test
+ 1..5
+ ok 1 - kunit_resource_test_init_resources
+ ok 2 - kunit_resource_test_alloc_resource
+ ok 3 - kunit_resource_test_destroy_resource
+ foo bar #
+ ok 4 - kunit_resource_test_cleanup_resources
+ ok 5 - kunit_resource_test_proper_free_ordering
+ ok 1 - kunit-resource-test
+ foo bar # non-kunit output
+ # Subtest: kunit-try-catch-test
+ 1..2
+ ok 1 - kunit_test_try_catch_successful_try_no_catch
+ ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch
+ ok 2 - kunit-try-catch-test
+ # Subtest: string-stream-test
+ 1..3
+ ok 1 - string_stream_test_empty_on_creation
+ ok 2 - string_stream_test_not_empty_after_add
+ ok 3 - string_stream_test_get_string
+ ok 3 - string-stream-test
+ List of all partitions:
+ No filesystem could mount root, tried:
+
+ Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0)
+ CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2
+ Stack:
+ 602086f8 601bc260 705c0000 705c0000
+ 602086f8 6005fcec 705c0000 6002c6ab
+ 6005fcec 601bc260 705c0000 3000000010 \ No newline at end of file
diff --git a/tools/testing/kunit/test_data/test_pound_sign.log b/tools/testing/kunit/test_data/test_pound_sign.log
new file mode 100644
index 000000000000..28ffa5ba03bf
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_pound_sign.log
@@ -0,0 +1,33 @@
+[ 0.060000] printk: console [mc-1] enabled
+[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0
+[ 0.060000] TAP version 14
+[ 0.060000] # Subtest: kunit-resource-test
+[ 0.060000] 1..5
+[ 0.060000] ok 1 - kunit_resource_test_init_resources
+[ 0.060000] ok 2 - kunit_resource_test_alloc_resource
+[ 0.060000] ok 3 - kunit_resource_test_destroy_resource
+[ 0.060000] foo bar #
+[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources
+[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering
+[ 0.060000] ok 1 - kunit-resource-test
+[ 0.060000] foo bar # non-kunit output
+[ 0.060000] # Subtest: kunit-try-catch-test
+[ 0.060000] 1..2
+[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch
+[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch
+[ 0.060000] ok 2 - kunit-try-catch-test
+[ 0.060000] # Subtest: string-stream-test
+[ 0.060000] 1..3
+[ 0.060000] ok 1 - string_stream_test_empty_on_creation
+[ 0.060000] ok 2 - string_stream_test_not_empty_after_add
+[ 0.060000] ok 3 - string_stream_test_get_string
+[ 0.060000] ok 3 - string-stream-test
+[ 0.060000] List of all partitions:
+[ 0.060000] No filesystem could mount root, tried:
+[ 0.060000]
+[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0)
+[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2
+[ 0.060000] Stack:
+[ 0.060000] 602086f8 601bc260 705c0000 705c0000
+[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab
+[ 0.060000] 6005fcec 601bc260 705c0000 3000000010
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index 3834899b6693..d971516401e6 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
generated/map-shift.h
idr.c
idr-test
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 397d6b612502..aa6abfe0749c 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -7,8 +7,8 @@ LDLIBS+= -lpthread -lurcu
TARGETS = main idr-test multiorder xarray
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
- regression4.o \
- tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
+ regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
+ iteration_check_2.o benchmark.o
ifndef SHIFT
SHIFT=3
diff --git a/tools/testing/radix-tree/iteration_check_2.c b/tools/testing/radix-tree/iteration_check_2.c
new file mode 100644
index 000000000000..aac5c50a3674
--- /dev/null
+++ b/tools/testing/radix-tree/iteration_check_2.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * iteration_check_2.c: Check that deleting a tagged entry doesn't cause
+ * an RCU walker to finish early.
+ * Copyright (c) 2020 Oracle
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+#include <pthread.h>
+#include "test.h"
+
+static volatile bool test_complete;
+
+static void *iterator(void *arg)
+{
+ XA_STATE(xas, arg, 0);
+ void *entry;
+
+ rcu_register_thread();
+
+ while (!test_complete) {
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
+ ;
+ rcu_read_unlock();
+ assert(xas.xa_index >= 100);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void *throbber(void *arg)
+{
+ struct xarray *xa = arg;
+
+ rcu_register_thread();
+
+ while (!test_complete) {
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ xa_store(xa, i, xa_mk_value(i), GFP_KERNEL);
+ xa_set_mark(xa, i, XA_MARK_0);
+ }
+ for (i = 0; i < 100; i++)
+ xa_erase(xa, i);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+void iteration_test2(unsigned test_duration)
+{
+ pthread_t threads[2];
+ DEFINE_XARRAY(array);
+ int i;
+
+ printv(1, "Running iteration test 2 for %d seconds\n", test_duration);
+
+ test_complete = false;
+
+ xa_store(&array, 100, xa_mk_value(100), GFP_KERNEL);
+ xa_set_mark(&array, 100, XA_MARK_0);
+
+ if (pthread_create(&threads[0], NULL, iterator, &array)) {
+ perror("create iterator thread");
+ exit(1);
+ }
+ if (pthread_create(&threads[1], NULL, throbber, &array)) {
+ perror("create throbber thread");
+ exit(1);
+ }
+
+ sleep(test_duration);
+ test_complete = true;
+
+ for (i = 0; i < 2; i++) {
+ if (pthread_join(threads[i], NULL)) {
+ perror("pthread_join");
+ exit(1);
+ }
+ }
+
+ xa_destroy(&array);
+}
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index 44a0d1ad4408..2d9c59df60de 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -19,37 +19,44 @@ int test_verbose;
struct kmem_cache {
pthread_mutex_t lock;
- int size;
+ unsigned int size;
+ unsigned int align;
int nr_objs;
void *objs;
void (*ctor)(void *);
};
-void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
+void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
{
- struct radix_tree_node *node;
+ void *p;
- if (!(flags & __GFP_DIRECT_RECLAIM))
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
return NULL;
pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs) {
+ struct radix_tree_node *node = cachep->objs;
cachep->nr_objs--;
- node = cachep->objs;
cachep->objs = node->parent;
pthread_mutex_unlock(&cachep->lock);
node->parent = NULL;
+ p = node;
} else {
pthread_mutex_unlock(&cachep->lock);
- node = malloc(cachep->size);
+ if (cachep->align)
+ posix_memalign(&p, cachep->align, cachep->size);
+ else
+ p = malloc(cachep->size);
if (cachep->ctor)
- cachep->ctor(node);
+ cachep->ctor(p);
+ else if (gfp & __GFP_ZERO)
+ memset(p, 0, cachep->size);
}
uatomic_inc(&nr_allocated);
if (kmalloc_verbose)
- printf("Allocating %p from slab\n", node);
- return node;
+ printf("Allocating %p from slab\n", p);
+ return p;
}
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
@@ -59,7 +66,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
if (kmalloc_verbose)
printf("Freeing %p to slab\n", objp);
pthread_mutex_lock(&cachep->lock);
- if (cachep->nr_objs > 10) {
+ if (cachep->nr_objs > 10 || cachep->align) {
memset(objp, POISON_FREE, cachep->size);
free(objp);
} else {
@@ -98,13 +105,14 @@ void kfree(void *p)
}
struct kmem_cache *
-kmem_cache_create(const char *name, size_t size, size_t offset,
- unsigned long flags, void (*ctor)(void *))
+kmem_cache_create(const char *name, unsigned int size, unsigned int align,
+ unsigned int flags, void (*ctor)(void *))
{
struct kmem_cache *ret = malloc(sizeof(*ret));
pthread_mutex_init(&ret->lock, NULL);
ret->size = size;
+ ret->align = align;
ret->nr_objs = 0;
ret->objs = NULL;
ret->ctor = ctor;
diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h
index a037def0dec6..2958830ce4d7 100644
--- a/tools/testing/radix-tree/linux/slab.h
+++ b/tools/testing/radix-tree/linux/slab.h
@@ -20,8 +20,8 @@ static inline void *kzalloc(size_t size, gfp_t gfp)
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
-struct kmem_cache *
-kmem_cache_create(const char *name, size_t size, size_t offset,
- unsigned long flags, void (*ctor)(void *));
+struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
+ unsigned int align, unsigned int flags,
+ void (*ctor)(void *));
#endif /* SLAB_H */
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index 7a22d6e3732e..f2cbc8e5b97c 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -311,6 +311,7 @@ int main(int argc, char **argv)
regression4_test();
iteration_test(0, 10 + 90 * long_run);
iteration_test(7, 10 + 90 * long_run);
+ iteration_test2(10 + 90 * long_run);
single_thread_tests(long_run);
/* Free any remaining preallocated nodes */
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
index 1ee4b2c0ad10..34dab4d18744 100644
--- a/tools/testing/radix-tree/test.h
+++ b/tools/testing/radix-tree/test.h
@@ -34,6 +34,7 @@ void xarray_tests(void);
void tag_check(void);
void multiorder_checks(void);
void iteration_test(unsigned order, unsigned duration);
+void iteration_test2(unsigned duration);
void benchmark(void);
void idr_checks(void);
void ida_tests(void);
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index 304fdf1a21dc..055a5019b13c 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
gpiogpio-event-mon
gpiogpio-hammer
gpioinclude/
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 077818d0197f..2ff68702fd41 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -38,6 +38,7 @@ TARGETS += net/mptcp
TARGETS += netfilter
TARGETS += nsfs
TARGETS += pidfd
+TARGETS += pid_namespace
TARGETS += powerpc
TARGETS += proc
TARGETS += pstore
@@ -91,7 +92,7 @@ override LDFLAGS =
override MAKEFLAGS =
endif
-# Append kselftest to KBUILD_OUTPUT to avoid cluttering
+# Append kselftest to KBUILD_OUTPUT and O to avoid cluttering
# KBUILD_OUTPUT with selftest objects and headers installed
# by selftests Makefile or lib.mk.
ifdef building_out_of_srctree
@@ -99,7 +100,7 @@ override LDFLAGS =
endif
ifneq ($(O),)
- BUILD := $(O)
+ BUILD := $(O)/kselftest
else
ifneq ($(KBUILD_OUTPUT),)
BUILD := $(KBUILD_OUTPUT)/kselftest
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index 7c462714b418..9258306cafe9 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -21,7 +21,7 @@ all:
override define INSTALL_RULE
mkdir -p $(INSTALL_PATH)
- install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
+install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
@for SUBDIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$SUBDIR; \
diff --git a/tools/testing/selftests/android/ion/.gitignore b/tools/testing/selftests/android/ion/.gitignore
index 95e8f4561474..78eae9972bb1 100644
--- a/tools/testing/selftests/android/ion/.gitignore
+++ b/tools/testing/selftests/android/ion/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ionapp_export
ionapp_import
ionmap_test
diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile
index 0eb7ab626e1c..42b71f005332 100644
--- a/tools/testing/selftests/android/ion/Makefile
+++ b/tools/testing/selftests/android/ion/Makefile
@@ -17,4 +17,4 @@ include ../../lib.mk
$(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
$(OUTPUT)/ionapp_import: ionapp_import.c ipcsocket.c ionutils.c
-$(OUTPUT)/ionmap_test: ionmap_test.c ionutils.c
+$(OUTPUT)/ionmap_test: ionmap_test.c ionutils.c ipcsocket.c
diff --git a/tools/testing/selftests/arm64/signal/.gitignore b/tools/testing/selftests/arm64/signal/.gitignore
index 3c5b4e8ff894..78c902045ca7 100644
--- a/tools/testing/selftests/arm64/signal/.gitignore
+++ b/tools/testing/selftests/arm64/signal/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mangle_*
fake_sigreturn_*
!*.[ch]
diff --git a/tools/testing/selftests/arm64/tags/.gitignore b/tools/testing/selftests/arm64/tags/.gitignore
index e8fae8d61ed6..f4f6c5112463 100644
--- a/tools/testing/selftests/arm64/tags/.gitignore
+++ b/tools/testing/selftests/arm64/tags/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
tags_test
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 2198cd876675..c30079c86998 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
test_verifier
test_maps
test_lru_map
diff --git a/tools/testing/selftests/bpf/map_tests/.gitignore b/tools/testing/selftests/bpf/map_tests/.gitignore
index 45984a364647..89c4a3d37544 100644
--- a/tools/testing/selftests/bpf/map_tests/.gitignore
+++ b/tools/testing/selftests/bpf/map_tests/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
tests.h
diff --git a/tools/testing/selftests/bpf/prog_tests/.gitignore b/tools/testing/selftests/bpf/prog_tests/.gitignore
index 45984a364647..89c4a3d37544 100644
--- a/tools/testing/selftests/bpf/prog_tests/.gitignore
+++ b/tools/testing/selftests/bpf/prog_tests/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
tests.h
diff --git a/tools/testing/selftests/bpf/verifier/.gitignore b/tools/testing/selftests/bpf/verifier/.gitignore
index 45984a364647..89c4a3d37544 100644
--- a/tools/testing/selftests/bpf/verifier/.gitignore
+++ b/tools/testing/selftests/bpf/verifier/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
tests.h
diff --git a/tools/testing/selftests/breakpoints/.gitignore b/tools/testing/selftests/breakpoints/.gitignore
index a23bb4a6f06c..def2e97dab9a 100644
--- a/tools/testing/selftests/breakpoints/.gitignore
+++ b/tools/testing/selftests/breakpoints/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
breakpoint_test
step_after_suspend_test
diff --git a/tools/testing/selftests/capabilities/.gitignore b/tools/testing/selftests/capabilities/.gitignore
index b732dd0d4738..426d9adca67c 100644
--- a/tools/testing/selftests/capabilities/.gitignore
+++ b/tools/testing/selftests/capabilities/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
test_execve
validate_cap
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index 7f9835624793..aa6de65b0838 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
test_memcontrol
test_core
test_freezer
diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile
index 66aafe1f5746..967f268fde74 100644
--- a/tools/testing/selftests/cgroup/Makefile
+++ b/tools/testing/selftests/cgroup/Makefile
@@ -11,6 +11,6 @@ TEST_GEN_PROGS += test_freezer
include ../lib.mk
-$(OUTPUT)/test_memcontrol: cgroup_util.c
-$(OUTPUT)/test_core: cgroup_util.c
-$(OUTPUT)/test_freezer: cgroup_util.c
+$(OUTPUT)/test_memcontrol: cgroup_util.c ../clone3/clone3_selftests.h
+$(OUTPUT)/test_core: cgroup_util.c ../clone3/clone3_selftests.h
+$(OUTPUT)/test_freezer: cgroup_util.c ../clone3/clone3_selftests.h
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 8f7131dcf1ff..8a637ca7d73a 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -15,6 +15,7 @@
#include <unistd.h>
#include "cgroup_util.h"
+#include "../clone3/clone3_selftests.h"
static ssize_t read_text(const char *path, char *buf, size_t max_len)
{
@@ -331,12 +332,112 @@ int cg_run(const char *cgroup,
}
}
+pid_t clone_into_cgroup(int cgroup_fd)
+{
+#ifdef CLONE_ARGS_SIZE_VER2
+ pid_t pid;
+
+ struct clone_args args = {
+ .flags = CLONE_INTO_CGROUP,
+ .exit_signal = SIGCHLD,
+ .cgroup = cgroup_fd,
+ };
+
+ pid = sys_clone3(&args, sizeof(struct clone_args));
+ /*
+ * Verify that this is a genuine test failure:
+ * ENOSYS -> clone3() not available
+ * E2BIG -> CLONE_INTO_CGROUP not available
+ */
+ if (pid < 0 && (errno == ENOSYS || errno == E2BIG))
+ goto pretend_enosys;
+
+ return pid;
+
+pretend_enosys:
+#endif
+ errno = ENOSYS;
+ return -ENOSYS;
+}
+
+int clone_reap(pid_t pid, int options)
+{
+ int ret;
+ siginfo_t info = {
+ .si_signo = 0,
+ };
+
+again:
+ ret = waitid(P_PID, pid, &info, options | __WALL | __WNOTHREAD);
+ if (ret < 0) {
+ if (errno == EINTR)
+ goto again;
+ return -1;
+ }
+
+ if (options & WEXITED) {
+ if (WIFEXITED(info.si_status))
+ return WEXITSTATUS(info.si_status);
+ }
+
+ if (options & WSTOPPED) {
+ if (WIFSTOPPED(info.si_status))
+ return WSTOPSIG(info.si_status);
+ }
+
+ if (options & WCONTINUED) {
+ if (WIFCONTINUED(info.si_status))
+ return 0;
+ }
+
+ return -1;
+}
+
+int dirfd_open_opath(const char *dir)
+{
+ return open(dir, O_DIRECTORY | O_CLOEXEC | O_NOFOLLOW | O_PATH);
+}
+
+#define close_prot_errno(fd) \
+ if (fd >= 0) { \
+ int _e_ = errno; \
+ close(fd); \
+ errno = _e_; \
+ }
+
+static int clone_into_cgroup_run_nowait(const char *cgroup,
+ int (*fn)(const char *cgroup, void *arg),
+ void *arg)
+{
+ int cgroup_fd;
+ pid_t pid;
+
+ cgroup_fd = dirfd_open_opath(cgroup);
+ if (cgroup_fd < 0)
+ return -1;
+
+ pid = clone_into_cgroup(cgroup_fd);
+ close_prot_errno(cgroup_fd);
+ if (pid == 0)
+ exit(fn(cgroup, arg));
+
+ return pid;
+}
+
int cg_run_nowait(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
void *arg)
{
int pid;
+ pid = clone_into_cgroup_run_nowait(cgroup, fn, arg);
+ if (pid > 0)
+ return pid;
+
+ /* Genuine test failure. */
+ if (pid < 0 && errno != ENOSYS)
+ return -1;
+
pid = fork();
if (pid == 0) {
char buf[64];
@@ -450,3 +551,28 @@ int proc_read_strstr(int pid, bool thread, const char *item, const char *needle)
return strstr(buf, needle) ? 0 : -1;
}
+
+int clone_into_cgroup_run_wait(const char *cgroup)
+{
+ int cgroup_fd;
+ pid_t pid;
+
+ cgroup_fd = dirfd_open_opath(cgroup);
+ if (cgroup_fd < 0)
+ return -1;
+
+ pid = clone_into_cgroup(cgroup_fd);
+ close_prot_errno(cgroup_fd);
+ if (pid < 0)
+ return -1;
+
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+
+ /*
+ * We don't care whether this fails. We only care whether the initial
+ * clone succeeded.
+ */
+ (void)clone_reap(pid, WEXITED);
+ return 0;
+}
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index 49c54fbdb229..5a1305dd1f0b 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -50,3 +50,7 @@ extern int cg_wait_for_proc_count(const char *cgroup, int count);
extern int cg_killall(const char *cgroup);
extern ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size);
extern int proc_read_strstr(int pid, bool thread, const char *item, const char *needle);
+extern pid_t clone_into_cgroup(int cgroup_fd);
+extern int clone_reap(pid_t pid, int options);
+extern int clone_into_cgroup_run_wait(const char *cgroup);
+extern int dirfd_open_opath(const char *dir);
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index e19ce940cd6a..3df648c37876 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -2,7 +2,10 @@
#include <linux/limits.h>
#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
#include <unistd.h>
+#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <signal.h>
@@ -12,6 +15,115 @@
#include "../kselftest.h"
#include "cgroup_util.h"
+static int touch_anon(char *buf, size_t size)
+{
+ int fd;
+ char *pos = buf;
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ while (size > 0) {
+ ssize_t ret = read(fd, pos, size);
+
+ if (ret < 0) {
+ if (errno != EINTR) {
+ close(fd);
+ return -1;
+ }
+ } else {
+ pos += ret;
+ size -= ret;
+ }
+ }
+ close(fd);
+
+ return 0;
+}
+
+static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg)
+{
+ int ppid = getppid();
+ size_t size = (size_t)arg;
+ void *buf;
+
+ buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ 0, 0);
+ if (buf == MAP_FAILED)
+ return -1;
+
+ if (touch_anon((char *)buf, size)) {
+ munmap(buf, size);
+ return -1;
+ }
+
+ while (getppid() == ppid)
+ sleep(1);
+
+ munmap(buf, size);
+ return 0;
+}
+
+/*
+ * Create a child process that allocates and touches 100MB, then waits to be
+ * killed. Wait until the child is attached to the cgroup, kill all processes
+ * in that cgroup and wait until "cgroup.procs" is empty. At this point try to
+ * destroy the empty cgroup. The test helps detect race conditions between
+ * dying processes leaving the cgroup and cgroup destruction path.
+ */
+static int test_cgcore_destroy(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cg_test = NULL;
+ int child_pid;
+ char buf[PAGE_SIZE];
+
+ cg_test = cg_name(root, "cg_test");
+
+ if (!cg_test)
+ goto cleanup;
+
+ for (int i = 0; i < 10; i++) {
+ if (cg_create(cg_test))
+ goto cleanup;
+
+ child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit,
+ (void *) MB(100));
+
+ if (child_pid < 0)
+ goto cleanup;
+
+ /* wait for the child to enter cgroup */
+ if (cg_wait_for_proc_count(cg_test, 1))
+ goto cleanup;
+
+ if (cg_killall(cg_test))
+ goto cleanup;
+
+ /* wait for cgroup to be empty */
+ while (1) {
+ if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf)))
+ goto cleanup;
+ if (buf[0] == '\0')
+ break;
+ usleep(1000);
+ }
+
+ if (rmdir(cg_test))
+ goto cleanup;
+
+ if (waitpid(child_pid, NULL, 0) < 0)
+ goto cleanup;
+ }
+ ret = KSFT_PASS;
+cleanup:
+ if (cg_test)
+ cg_destroy(cg_test);
+ free(cg_test);
+ return ret;
+}
+
/*
* A(0) - B(0) - C(1)
* \ D(0)
@@ -25,8 +137,11 @@
static int test_cgcore_populated(const char *root)
{
int ret = KSFT_FAIL;
+ int err;
char *cg_test_a = NULL, *cg_test_b = NULL;
char *cg_test_c = NULL, *cg_test_d = NULL;
+ int cgroup_fd = -EBADF;
+ pid_t pid;
cg_test_a = cg_name(root, "cg_test_a");
cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
@@ -78,6 +193,52 @@ static int test_cgcore_populated(const char *root)
if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
goto cleanup;
+ /* Test that we can directly clone into a new cgroup. */
+ cgroup_fd = dirfd_open_opath(cg_test_d);
+ if (cgroup_fd < 0)
+ goto cleanup;
+
+ pid = clone_into_cgroup(cgroup_fd);
+ if (pid < 0) {
+ if (errno == ENOSYS)
+ goto cleanup_pass;
+ goto cleanup;
+ }
+
+ if (pid == 0) {
+ if (raise(SIGSTOP))
+ exit(EXIT_FAILURE);
+ exit(EXIT_SUCCESS);
+ }
+
+ err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n");
+
+ (void)clone_reap(pid, WSTOPPED);
+ (void)kill(pid, SIGCONT);
+ (void)clone_reap(pid, WEXITED);
+
+ if (err)
+ goto cleanup;
+
+ if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
+ goto cleanup;
+
+ /* Remove cgroup. */
+ if (cg_test_d) {
+ cg_destroy(cg_test_d);
+ free(cg_test_d);
+ cg_test_d = NULL;
+ }
+
+ pid = clone_into_cgroup(cgroup_fd);
+ if (pid < 0)
+ goto cleanup_pass;
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+ (void)clone_reap(pid, WEXITED);
+ goto cleanup;
+
+cleanup_pass:
ret = KSFT_PASS;
cleanup:
@@ -93,6 +254,8 @@ cleanup:
free(cg_test_c);
free(cg_test_b);
free(cg_test_a);
+ if (cgroup_fd >= 0)
+ close(cgroup_fd);
return ret;
}
@@ -136,6 +299,16 @@ static int test_cgcore_invalid_domain(const char *root)
if (errno != EOPNOTSUPP)
goto cleanup;
+ if (!clone_into_cgroup_run_wait(child))
+ goto cleanup;
+
+ if (errno == ENOSYS)
+ goto cleanup_pass;
+
+ if (errno != EOPNOTSUPP)
+ goto cleanup;
+
+cleanup_pass:
ret = KSFT_PASS;
cleanup:
@@ -345,6 +518,9 @@ static int test_cgcore_internal_process_constraint(const char *root)
if (!cg_enter_current(parent))
goto cleanup;
+ if (!clone_into_cgroup_run_wait(parent))
+ goto cleanup;
+
ret = KSFT_PASS;
cleanup:
@@ -512,6 +688,7 @@ struct corecg_test {
T(test_cgcore_populated),
T(test_cgcore_proc_migration),
T(test_cgcore_thread_migration),
+ T(test_cgcore_destroy),
};
#undef T
diff --git a/tools/testing/selftests/clone3/.gitignore b/tools/testing/selftests/clone3/.gitignore
index 0dc4f32c6cb8..a81085742d40 100644
--- a/tools/testing/selftests/clone3/.gitignore
+++ b/tools/testing/selftests/clone3/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
clone3
clone3_clear_sighand
clone3_set_tid
diff --git a/tools/testing/selftests/clone3/clone3_selftests.h b/tools/testing/selftests/clone3/clone3_selftests.h
index a3f2c8ad8bcc..91c1a78ddb39 100644
--- a/tools/testing/selftests/clone3/clone3_selftests.h
+++ b/tools/testing/selftests/clone3/clone3_selftests.h
@@ -5,12 +5,24 @@
#define _GNU_SOURCE
#include <sched.h>
+#include <linux/sched.h>
+#include <linux/types.h>
#include <stdint.h>
#include <syscall.h>
-#include <linux/types.h>
+#include <sys/wait.h>
+
+#include "../kselftest.h"
#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
+#ifndef CLONE_INTO_CGROUP
+#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */
+#endif
+
+#ifndef CLONE_ARGS_SIZE_VER0
+#define CLONE_ARGS_SIZE_VER0 64
+#endif
+
#ifndef __NR_clone3
#define __NR_clone3 -1
struct clone_args {
@@ -22,10 +34,13 @@ struct clone_args {
__aligned_u64 stack;
__aligned_u64 stack_size;
__aligned_u64 tls;
+#define CLONE_ARGS_SIZE_VER1 80
__aligned_u64 set_tid;
__aligned_u64 set_tid_size;
+#define CLONE_ARGS_SIZE_VER2 88
+ __aligned_u64 cgroup;
};
-#endif
+#endif /* __NR_clone3 */
static pid_t sys_clone3(struct clone_args *args, size_t size)
{
diff --git a/tools/testing/selftests/drivers/.gitignore b/tools/testing/selftests/drivers/.gitignore
index f6aebcc27b76..ca74f2e1c719 100644
--- a/tools/testing/selftests/drivers/.gitignore
+++ b/tools/testing/selftests/drivers/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
/dma-buf/udmabuf
diff --git a/tools/testing/selftests/efivarfs/.gitignore b/tools/testing/selftests/efivarfs/.gitignore
index 33618493562b..807407f7f58b 100644
--- a/tools/testing/selftests/efivarfs/.gitignore
+++ b/tools/testing/selftests/efivarfs/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
create-read
open-unlink
diff --git a/tools/testing/selftests/exec/.gitignore b/tools/testing/selftests/exec/.gitignore
index b02279da6fa1..c078ece12ff0 100644
--- a/tools/testing/selftests/exec/.gitignore
+++ b/tools/testing/selftests/exec/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
subdir*
script*
execveat
diff --git a/tools/testing/selftests/filesystems/.gitignore b/tools/testing/selftests/filesystems/.gitignore
index 8449cf6716ce..f0c0ff20d6cf 100644
--- a/tools/testing/selftests/filesystems/.gitignore
+++ b/tools/testing/selftests/filesystems/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
dnotify_test
devpts_pts
diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore
index 8a5d9bf63dd4..8e5cf9084894 100644
--- a/tools/testing/selftests/filesystems/binderfs/.gitignore
+++ b/tools/testing/selftests/filesystems/binderfs/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
binderfs_test
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
index 58cb659b56b4..8af25ae96049 100644
--- a/tools/testing/selftests/filesystems/binderfs/Makefile
+++ b/tools/testing/selftests/filesystems/binderfs/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -I../../../../../usr/include/
+CFLAGS += -I../../../../../usr/include/ -pthread
TEST_GEN_PROGS := binderfs_test
+binderfs_test: binderfs_test.c ../../kselftest.h ../../kselftest_harness.h
+
include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
index 8c2ed962e1c7..8a6b507e34a8 100644
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -3,114 +3,47 @@
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
+#include <pthread.h>
#include <sched.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/fsuid.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
+#include <sys/socket.h>
#include <sys/stat.h>
+#include <sys/sysinfo.h>
#include <sys/types.h>
+#include <sys/wait.h>
#include <unistd.h>
#include <linux/android/binder.h>
#include <linux/android/binderfs.h>
-#include "../../kselftest.h"
-
-static ssize_t write_nointr(int fd, const void *buf, size_t count)
-{
- ssize_t ret;
-again:
- ret = write(fd, buf, count);
- if (ret < 0 && errno == EINTR)
- goto again;
-
- return ret;
-}
-static void write_to_file(const char *filename, const void *buf, size_t count,
- int allowed_errno)
-{
- int fd, saved_errno;
- ssize_t ret;
+#include "../../kselftest.h"
+#include "../../kselftest_harness.h"
- fd = open(filename, O_WRONLY | O_CLOEXEC);
- if (fd < 0)
- ksft_exit_fail_msg("%s - Failed to open file %s\n",
- strerror(errno), filename);
+#define DEFAULT_THREADS 4
- ret = write_nointr(fd, buf, count);
- if (ret < 0) {
- if (allowed_errno && (errno == allowed_errno)) {
- close(fd);
- return;
- }
+#define PTR_TO_INT(p) ((int)((intptr_t)(p)))
+#define INT_TO_PTR(u) ((void *)((intptr_t)(u)))
- goto on_error;
+#define close_prot_errno_disarm(fd) \
+ if (fd >= 0) { \
+ int _e_ = errno; \
+ close(fd); \
+ errno = _e_; \
+ fd = -EBADF; \
}
- if ((size_t)ret != count)
- goto on_error;
-
- close(fd);
- return;
-
-on_error:
- saved_errno = errno;
- close(fd);
- errno = saved_errno;
-
- if (ret < 0)
- ksft_exit_fail_msg("%s - Failed to write to file %s\n",
- strerror(errno), filename);
-
- ksft_exit_fail_msg("Failed to write to file %s\n", filename);
-}
-
-static void change_to_userns(void)
-{
- int ret;
- uid_t uid;
- gid_t gid;
- /* {g,u}id_map files only allow a max of 4096 bytes written to them */
- char idmap[4096];
-
- uid = getuid();
- gid = getgid();
-
- ret = unshare(CLONE_NEWUSER);
- if (ret < 0)
- ksft_exit_fail_msg("%s - Failed to unshare user namespace\n",
- strerror(errno));
-
- write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT);
-
- ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid);
- if (ret < 0 || (size_t)ret >= sizeof(idmap))
- ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
- strerror(errno));
-
- write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0);
-
- ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid);
- if (ret < 0 || (size_t)ret >= sizeof(idmap))
- ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
- strerror(errno));
-
- write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0);
-
- ret = setgid(0);
- if (ret)
- ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
- strerror(errno));
+#define log_exit(format, ...) \
+ ({ \
+ fprintf(stderr, format "\n", ##__VA_ARGS__); \
+ exit(EXIT_FAILURE); \
+ })
- ret = setuid(0);
- if (ret)
- ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
- strerror(errno));
-}
-
-static void change_to_mountns(void)
+static void change_mountns(void)
{
int ret;
@@ -132,36 +65,31 @@ static void rmdir_protect_errno(const char *dir)
errno = saved_errno;
}
-static void __do_binderfs_test(void)
+static int __do_binderfs_test(void)
{
int fd, ret, saved_errno;
size_t len;
ssize_t wret;
- bool keep = false;
struct binderfs_device device = { 0 };
struct binder_version version = { 0 };
+ char binderfs_mntpt[] = P_tmpdir "/binderfs_XXXXXX",
+ device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME];
- change_to_mountns();
-
- ret = mkdir("/dev/binderfs", 0755);
- if (ret < 0) {
- if (errno != EEXIST)
- ksft_exit_fail_msg(
- "%s - Failed to create binderfs mountpoint\n",
- strerror(errno));
+ change_mountns();
- keep = true;
- }
+ if (!mkdtemp(binderfs_mntpt))
+ ksft_exit_fail_msg(
+ "%s - Failed to create binderfs mountpoint\n",
+ strerror(errno));
- ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
+ ret = mount(NULL, binderfs_mntpt, "binder", 0, 0);
if (ret < 0) {
if (errno != ENODEV)
ksft_exit_fail_msg("%s - Failed to mount binderfs\n",
strerror(errno));
- keep ? : rmdir_protect_errno("/dev/binderfs");
- ksft_exit_skip(
- "The Android binderfs filesystem is not available\n");
+ rmdir_protect_errno(binderfs_mntpt);
+ return 1;
}
/* binderfs mount test passed */
@@ -169,7 +97,8 @@ static void __do_binderfs_test(void)
memcpy(device.name, "my-binder", strlen("my-binder"));
- fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
+ snprintf(device_path, sizeof(device_path), "%s/binder-control", binderfs_mntpt);
+ fd = open(device_path, O_RDONLY | O_CLOEXEC);
if (fd < 0)
ksft_exit_fail_msg(
"%s - Failed to open binder-control device\n",
@@ -180,7 +109,7 @@ static void __do_binderfs_test(void)
close(fd);
errno = saved_errno;
if (ret < 0) {
- keep ? : rmdir_protect_errno("/dev/binderfs");
+ rmdir_protect_errno(binderfs_mntpt);
ksft_exit_fail_msg(
"%s - Failed to allocate new binder device\n",
strerror(errno));
@@ -193,9 +122,10 @@ static void __do_binderfs_test(void)
/* binder device allocation test passed */
ksft_inc_pass_cnt();
- fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY);
+ snprintf(device_path, sizeof(device_path), "%s/my-binder", binderfs_mntpt);
+ fd = open(device_path, O_CLOEXEC | O_RDONLY);
if (fd < 0) {
- keep ? : rmdir_protect_errno("/dev/binderfs");
+ rmdir_protect_errno(binderfs_mntpt);
ksft_exit_fail_msg("%s - Failed to open my-binder device\n",
strerror(errno));
}
@@ -205,7 +135,7 @@ static void __do_binderfs_test(void)
close(fd);
errno = saved_errno;
if (ret < 0) {
- keep ? : rmdir_protect_errno("/dev/binderfs");
+ rmdir_protect_errno(binderfs_mntpt);
ksft_exit_fail_msg(
"%s - Failed to open perform BINDER_VERSION request\n",
strerror(errno));
@@ -217,9 +147,9 @@ static void __do_binderfs_test(void)
/* binder transaction with binderfs binder device passed */
ksft_inc_pass_cnt();
- ret = unlink("/dev/binderfs/my-binder");
+ ret = unlink(device_path);
if (ret < 0) {
- keep ? : rmdir_protect_errno("/dev/binderfs");
+ rmdir_protect_errno(binderfs_mntpt);
ksft_exit_fail_msg("%s - Failed to delete binder device\n",
strerror(errno));
}
@@ -227,12 +157,13 @@ static void __do_binderfs_test(void)
/* binder device removal passed */
ksft_inc_pass_cnt();
- ret = unlink("/dev/binderfs/binder-control");
+ snprintf(device_path, sizeof(device_path), "%s/binder-control", binderfs_mntpt);
+ ret = unlink(device_path);
if (!ret) {
- keep ? : rmdir_protect_errno("/dev/binderfs");
+ rmdir_protect_errno(binderfs_mntpt);
ksft_exit_fail_msg("Managed to delete binder-control device\n");
} else if (errno != EPERM) {
- keep ? : rmdir_protect_errno("/dev/binderfs");
+ rmdir_protect_errno(binderfs_mntpt);
ksft_exit_fail_msg(
"%s - Failed to delete binder-control device but exited with unexpected error code\n",
strerror(errno));
@@ -242,34 +173,341 @@ static void __do_binderfs_test(void)
ksft_inc_xfail_cnt();
on_error:
- ret = umount2("/dev/binderfs", MNT_DETACH);
- keep ?: rmdir_protect_errno("/dev/binderfs");
+ ret = umount2(binderfs_mntpt, MNT_DETACH);
+ rmdir_protect_errno(binderfs_mntpt);
if (ret < 0)
ksft_exit_fail_msg("%s - Failed to unmount binderfs\n",
strerror(errno));
/* binderfs unmount test passed */
ksft_inc_pass_cnt();
+ return 0;
}
-static void binderfs_test_privileged()
+static int wait_for_pid(pid_t pid)
{
- if (geteuid() != 0)
- ksft_print_msg(
- "Tests are not run as root. Skipping privileged tests\n");
- else
- __do_binderfs_test();
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static int setid_userns_root(void)
+{
+ if (setuid(0))
+ return -1;
+ if (setgid(0))
+ return -1;
+
+ setfsuid(0);
+ setfsgid(0);
+
+ return 0;
+}
+
+enum idmap_type {
+ UID_MAP,
+ GID_MAP,
+};
+
+static ssize_t read_nointr(int fd, void *buf, size_t count)
+{
+ ssize_t ret;
+again:
+ ret = read(fd, buf, count);
+ if (ret < 0 && errno == EINTR)
+ goto again;
+
+ return ret;
+}
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+again:
+ ret = write(fd, buf, count);
+ if (ret < 0 && errno == EINTR)
+ goto again;
+
+ return ret;
+}
+
+static int write_id_mapping(enum idmap_type type, pid_t pid, const char *buf,
+ size_t buf_size)
+{
+ int fd;
+ int ret;
+ char path[4096];
+
+ if (type == GID_MAP) {
+ int setgroups_fd;
+
+ snprintf(path, sizeof(path), "/proc/%d/setgroups", pid);
+ setgroups_fd = open(path, O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
+ if (setgroups_fd < 0 && errno != ENOENT)
+ return -1;
+
+ if (setgroups_fd >= 0) {
+ ret = write_nointr(setgroups_fd, "deny", sizeof("deny") - 1);
+ close_prot_errno_disarm(setgroups_fd);
+ if (ret != sizeof("deny") - 1)
+ return -1;
+ }
+ }
+
+ switch (type) {
+ case UID_MAP:
+ ret = snprintf(path, sizeof(path), "/proc/%d/uid_map", pid);
+ break;
+ case GID_MAP:
+ ret = snprintf(path, sizeof(path), "/proc/%d/gid_map", pid);
+ break;
+ default:
+ return -1;
+ }
+ if (ret < 0 || ret >= sizeof(path))
+ return -E2BIG;
+
+ fd = open(path, O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
+ if (fd < 0)
+ return -1;
+
+ ret = write_nointr(fd, buf, buf_size);
+ close_prot_errno_disarm(fd);
+ if (ret != buf_size)
+ return -1;
+
+ return 0;
+}
+
+static void change_userns(int syncfds[2])
+{
+ int ret;
+ char buf;
+
+ close_prot_errno_disarm(syncfds[1]);
+
+ ret = unshare(CLONE_NEWUSER);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to unshare user namespace\n",
+ strerror(errno));
+
+ ret = write_nointr(syncfds[0], "1", 1);
+ if (ret != 1)
+ ksft_exit_fail_msg("write_nointr() failed\n");
+
+ ret = read_nointr(syncfds[0], &buf, 1);
+ if (ret != 1)
+ ksft_exit_fail_msg("read_nointr() failed\n");
+
+ close_prot_errno_disarm(syncfds[0]);
+
+ if (setid_userns_root())
+ ksft_exit_fail_msg("setid_userns_root() failed");
+}
+
+static void change_idmaps(int syncfds[2], pid_t pid)
+{
+ int ret;
+ char buf;
+ char id_map[4096];
+
+ close_prot_errno_disarm(syncfds[0]);
+
+ ret = read_nointr(syncfds[1], &buf, 1);
+ if (ret != 1)
+ ksft_exit_fail_msg("read_nointr() failed\n");
+
+ snprintf(id_map, sizeof(id_map), "0 %d 1\n", getuid());
+ ret = write_id_mapping(UID_MAP, pid, id_map, strlen(id_map));
+ if (ret)
+ ksft_exit_fail_msg("write_id_mapping(UID_MAP) failed");
+
+ snprintf(id_map, sizeof(id_map), "0 %d 1\n", getgid());
+ ret = write_id_mapping(GID_MAP, pid, id_map, strlen(id_map));
+ if (ret)
+ ksft_exit_fail_msg("write_id_mapping(GID_MAP) failed");
+
+ ret = write_nointr(syncfds[1], "1", 1);
+ if (ret != 1)
+ ksft_exit_fail_msg("write_nointr() failed");
+
+ close_prot_errno_disarm(syncfds[1]);
}
-static void binderfs_test_unprivileged()
+static void *binder_version_thread(void *data)
{
- change_to_userns();
- __do_binderfs_test();
+ int fd = PTR_TO_INT(data);
+ struct binder_version version = { 0 };
+ int ret;
+
+ ret = ioctl(fd, BINDER_VERSION, &version);
+ if (ret < 0)
+ ksft_print_msg("%s - Failed to open perform BINDER_VERSION request\n", strerror(errno));
+
+ pthread_exit(data);
}
-int main(int argc, char *argv[])
+/*
+ * Regression test:
+ * 2669b8b0c798 ("binder: prevent UAF for binderfs devices")
+ * f0fe2c0f050d ("binder: prevent UAF for binderfs devices II")
+ * 211b64e4b5b6 ("binderfs: use refcount for binder control devices too")
+ */
+TEST(binderfs_stress)
{
- binderfs_test_privileged();
- binderfs_test_unprivileged();
- ksft_exit_pass();
+ int fds[1000];
+ int syncfds[2];
+ pid_t pid;
+ int fd, ret;
+ size_t len;
+ struct binderfs_device device = { 0 };
+ char binderfs_mntpt[] = P_tmpdir "/binderfs_XXXXXX",
+ device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME];
+
+ ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, syncfds);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to create socket pair", strerror(errno));
+
+ pid = fork();
+ if (pid < 0) {
+ close_prot_errno_disarm(syncfds[0]);
+ close_prot_errno_disarm(syncfds[1]);
+ ksft_exit_fail_msg("%s - Failed to fork", strerror(errno));
+ }
+
+ if (pid == 0) {
+ int i, j, k, nthreads;
+ pthread_attr_t attr;
+ pthread_t threads[DEFAULT_THREADS];
+ change_userns(syncfds);
+ change_mountns();
+
+ if (!mkdtemp(binderfs_mntpt))
+ log_exit("%s - Failed to create binderfs mountpoint\n",
+ strerror(errno));
+
+ ret = mount(NULL, binderfs_mntpt, "binder", 0, 0);
+ if (ret < 0)
+ log_exit("%s - Failed to mount binderfs\n", strerror(errno));
+
+ for (int i = 0; i < ARRAY_SIZE(fds); i++) {
+
+ snprintf(device_path, sizeof(device_path),
+ "%s/binder-control", binderfs_mntpt);
+ fd = open(device_path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ log_exit("%s - Failed to open binder-control device\n", strerror(errno));
+
+ memset(&device, 0, sizeof(device));
+ snprintf(device.name, sizeof(device.name), "%d", i);
+ ret = ioctl(fd, BINDER_CTL_ADD, &device);
+ close_prot_errno_disarm(fd);
+ if (ret < 0)
+ log_exit("%s - Failed to allocate new binder device\n", strerror(errno));
+
+ snprintf(device_path, sizeof(device_path), "%s/%d",
+ binderfs_mntpt, i);
+ fds[i] = open(device_path, O_RDONLY | O_CLOEXEC);
+ if (fds[i] < 0)
+ log_exit("%s - Failed to open binder device\n", strerror(errno));
+ }
+
+ ret = umount2(binderfs_mntpt, MNT_DETACH);
+ rmdir_protect_errno(binderfs_mntpt);
+ if (ret < 0)
+ log_exit("%s - Failed to unmount binderfs\n", strerror(errno));
+
+ nthreads = get_nprocs_conf();
+ if (nthreads > DEFAULT_THREADS)
+ nthreads = DEFAULT_THREADS;
+
+ pthread_attr_init(&attr);
+ for (k = 0; k < ARRAY_SIZE(fds); k++) {
+ for (i = 0; i < nthreads; i++) {
+ ret = pthread_create(&threads[i], &attr, binder_version_thread, INT_TO_PTR(fds[k]));
+ if (ret) {
+ ksft_print_msg("%s - Failed to create thread %d\n", strerror(errno), i);
+ break;
+ }
+ }
+
+ for (j = 0; j < i; j++) {
+ void *fdptr = NULL;
+
+ ret = pthread_join(threads[j], &fdptr);
+ if (ret)
+ ksft_print_msg("%s - Failed to join thread %d for fd %d\n", strerror(errno), j, PTR_TO_INT(fdptr));
+ }
+ }
+ pthread_attr_destroy(&attr);
+
+ for (k = 0; k < ARRAY_SIZE(fds); k++)
+ close(fds[k]);
+
+ exit(EXIT_SUCCESS);
+ }
+
+ change_idmaps(syncfds, pid);
+
+ ret = wait_for_pid(pid);
+ if (ret)
+ ksft_exit_fail_msg("wait_for_pid() failed");
}
+
+TEST(binderfs_test_privileged)
+{
+ if (geteuid() != 0)
+ XFAIL(return, "Tests are not run as root. Skipping privileged tests");
+
+ if (__do_binderfs_test() == 1)
+ XFAIL(return, "The Android binderfs filesystem is not available");
+}
+
+TEST(binderfs_test_unprivileged)
+{
+ int ret;
+ int syncfds[2];
+ pid_t pid;
+
+ ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, syncfds);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s - Failed to create socket pair", strerror(errno));
+
+ pid = fork();
+ if (pid < 0) {
+ close_prot_errno_disarm(syncfds[0]);
+ close_prot_errno_disarm(syncfds[1]);
+ ksft_exit_fail_msg("%s - Failed to fork", strerror(errno));
+ }
+
+ if (pid == 0) {
+ change_userns(syncfds);
+ if (__do_binderfs_test() == 1)
+ exit(2);
+ exit(EXIT_SUCCESS);
+ }
+
+ change_idmaps(syncfds, pid);
+
+ ret = wait_for_pid(pid);
+ if (ret) {
+ if (ret == 2)
+ XFAIL(return, "The Android binderfs filesystem is not available");
+ else
+ ksft_exit_fail_msg("wait_for_pid() failed");
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/epoll/.gitignore b/tools/testing/selftests/filesystems/epoll/.gitignore
index 9ae8db44ec14..9090157258b1 100644
--- a/tools/testing/selftests/filesystems/epoll/.gitignore
+++ b/tools/testing/selftests/filesystems/epoll/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
epoll_wakeup_test
diff --git a/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c b/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c
index 37a04dab56f0..11eee0b60040 100644
--- a/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c
+++ b/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c
@@ -7,13 +7,14 @@
#include <pthread.h>
#include <sys/epoll.h>
#include <sys/socket.h>
+#include <sys/eventfd.h>
#include "../../kselftest_harness.h"
struct epoll_mtcontext
{
int efd[3];
int sfd[4];
- int count;
+ volatile int count;
pthread_t main;
pthread_t waiter;
@@ -3071,4 +3072,68 @@ TEST(epoll58)
close(ctx.sfd[3]);
}
+static void *epoll59_thread(void *ctx_)
+{
+ struct epoll_mtcontext *ctx = ctx_;
+ struct epoll_event e;
+ int i;
+
+ for (i = 0; i < 100000; i++) {
+ while (ctx->count == 0)
+ ;
+
+ e.events = EPOLLIN | EPOLLERR | EPOLLET;
+ epoll_ctl(ctx->efd[0], EPOLL_CTL_MOD, ctx->sfd[0], &e);
+ ctx->count = 0;
+ }
+
+ return NULL;
+}
+
+/*
+ * t0
+ * (p) \
+ * e0
+ * (et) /
+ * e0
+ *
+ * Based on https://bugzilla.kernel.org/show_bug.cgi?id=205933
+ */
+TEST(epoll59)
+{
+ pthread_t emitter;
+ struct pollfd pfd;
+ struct epoll_event e;
+ struct epoll_mtcontext ctx = { 0 };
+ int i, ret;
+
+ signal(SIGUSR1, signal_handler);
+
+ ctx.efd[0] = epoll_create1(0);
+ ASSERT_GE(ctx.efd[0], 0);
+
+ ctx.sfd[0] = eventfd(1, 0);
+ ASSERT_GE(ctx.sfd[0], 0);
+
+ e.events = EPOLLIN | EPOLLERR | EPOLLET;
+ ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0);
+
+ ASSERT_EQ(pthread_create(&emitter, NULL, epoll59_thread, &ctx), 0);
+
+ for (i = 0; i < 100000; i++) {
+ ret = epoll_wait(ctx.efd[0], &e, 1, 1000);
+ ASSERT_GT(ret, 0);
+
+ while (ctx.count != 0)
+ ;
+ ctx.count = 1;
+ }
+ if (pthread_tryjoin_np(emitter, NULL) < 0) {
+ pthread_kill(emitter, SIGUSR1);
+ pthread_join(emitter, NULL);
+ }
+ close(ctx.efd[0]);
+ close(ctx.sfd[0]);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/ftrace/.gitignore b/tools/testing/selftests/ftrace/.gitignore
index 98d8a5a63049..2659417cb2c7 100644
--- a/tools/testing/selftests/ftrace/.gitignore
+++ b/tools/testing/selftests/ftrace/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
logs
diff --git a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
new file mode 100644
index 000000000000..f0f366f18d0c
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
@@ -0,0 +1,125 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event tracing - restricts events based on pid notrace filtering
+# flags: instance
+
+do_reset() {
+ echo > set_event
+ echo > set_event_pid
+ echo > set_event_notrace_pid
+ echo 0 > options/event-fork
+ echo 0 > events/enable
+ clear_trace
+ echo 1 > tracing_on
+}
+
+fail() { #msg
+ cat trace
+ do_reset
+ echo $1
+ exit_fail
+}
+
+count_pid() {
+ pid=$@
+ cat trace | grep -v '^#' | sed -e 's/[^-]*-\([0-9]*\).*/\1/' | grep $pid | wc -l
+}
+
+count_no_pid() {
+ pid=$1
+ cat trace | grep -v '^#' | sed -e 's/[^-]*-\([0-9]*\).*/\1/' | grep -v $pid | wc -l
+}
+
+enable_system() {
+ system=$1
+
+ if [ -d events/$system ]; then
+ echo 1 > events/$system/enable
+ fi
+}
+
+enable_events() {
+ echo 0 > tracing_on
+ # Enable common groups of events, as all events can allow for
+ # events to be traced via scheduling that we don't care to test.
+ enable_system syscalls
+ enable_system rcu
+ enable_system block
+ enable_system exceptions
+ enable_system irq
+ enable_system net
+ enable_system power
+ enable_system signal
+ enable_system sock
+ enable_system timer
+ enable_system thermal
+ echo 1 > tracing_on
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f set_event_pid -o ! -f set_event_notrace_pid ]; then
+ echo "event pid notrace filtering is not supported"
+ exit_unsupported
+fi
+
+echo 0 > options/event-fork
+
+do_reset
+
+read mypid rest < /proc/self/stat
+
+echo $mypid > set_event_notrace_pid
+grep -q $mypid set_event_notrace_pid
+
+enable_events
+
+yield
+
+echo 0 > tracing_on
+
+cnt=`count_pid $mypid`
+if [ $cnt -ne 0 ]; then
+ fail "Filtered out task has events"
+fi
+
+cnt=`count_no_pid $mypid`
+if [ $cnt -eq 0 ]; then
+ fail "No other events were recorded"
+fi
+
+do_reset
+
+echo $mypid > set_event_notrace_pid
+echo 1 > options/event-fork
+
+enable_events
+
+yield &
+child=$!
+echo "child = $child"
+wait $child
+
+echo 0 > tracing_on
+
+cnt=`count_pid $mypid`
+if [ $cnt -ne 0 ]; then
+ fail "Filtered out task has events"
+fi
+
+cnt=`count_pid $child`
+if [ $cnt -ne 0 ]; then
+ fail "Child of filtered out taskhas events"
+fi
+
+cnt=`count_no_pid $mypid`
+if [ $cnt -eq 0 ]; then
+ fail "No other events were recorded"
+fi
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc
new file mode 100644
index 000000000000..8aa46a2ea133
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc
@@ -0,0 +1,108 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: ftrace - function pid notrace filters
+# flags: instance
+
+# Make sure that function pid matching filter with notrace works.
+
+if ! grep -q function available_tracers; then
+ echo "no function tracer configured"
+ exit_unsupported
+fi
+
+if [ ! -f set_ftrace_notrace_pid ]; then
+ echo "set_ftrace_notrace_pid not found? Is function tracer not set?"
+ exit_unsupported
+fi
+
+if [ ! -f set_ftrace_filter ]; then
+ echo "set_ftrace_filter not found? Is function tracer not set?"
+ exit_unsupported
+fi
+
+do_function_fork=1
+
+if [ ! -f options/function-fork ]; then
+ do_function_fork=0
+ echo "no option for function-fork found. Option will not be tested."
+fi
+
+read PID _ < /proc/self/stat
+
+if [ $do_function_fork -eq 1 ]; then
+ # default value of function-fork option
+ orig_value=`grep function-fork trace_options`
+fi
+
+do_reset() {
+ if [ $do_function_fork -eq 0 ]; then
+ return
+ fi
+
+ echo > set_ftrace_notrace_pid
+ echo $orig_value > trace_options
+}
+
+fail() { # msg
+ do_reset
+ echo $1
+ exit_fail
+}
+
+do_test() {
+ disable_tracing
+
+ echo do_execve* > set_ftrace_filter
+ echo *do_fork >> set_ftrace_filter
+
+ echo $PID > set_ftrace_notrace_pid
+ echo function > current_tracer
+
+ if [ $do_function_fork -eq 1 ]; then
+ # don't allow children to be traced
+ echo nofunction-fork > trace_options
+ fi
+
+ enable_tracing
+ yield
+
+ count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+ count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+ # count_pid should be 0
+ if [ $count_pid -ne 0 -o $count_other -eq 0 ]; then
+ fail "PID filtering not working? traced task = $count_pid; other tasks = $count_other "
+ fi
+
+ disable_tracing
+ clear_trace
+
+ if [ $do_function_fork -eq 0 ]; then
+ return
+ fi
+
+ # allow children to be traced
+ echo function-fork > trace_options
+
+ # With pid in both set_ftrace_pid and set_ftrace_notrace_pid
+ # there should not be any tasks traced.
+
+ echo $PID > set_ftrace_pid
+
+ enable_tracing
+ yield
+
+ count_pid=`cat trace | grep -v ^# | grep $PID | wc -l`
+ count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l`
+
+ # both should be zero
+ if [ $count_pid -ne 0 -o $count_other -ne 0 ]; then
+ fail "PID filtering not following fork? traced task = $count_pid; other tasks = $count_other "
+ fi
+}
+
+do_test
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
index 0c04282d33dd..1947387fe976 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
@@ -41,7 +41,7 @@ fi
echo '** ENABLE EVENTS'
-echo 1 > events/enable
+echo 1 > events/sched/enable
echo '** ENABLE TRACING'
enable_tracing
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
index 18fdaab9f570..68ff3f45c720 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
@@ -23,7 +23,7 @@ if [ ! -f events/sched/sched_process_fork/hist ]; then
exit_unsupported
fi
-echo "Test histogram multiple tiggers"
+echo "Test histogram multiple triggers"
echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
echo 'hist:keys=parent_comm:vals=child_pid' >> events/sched/sched_process_fork/trigger
diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
index a09f57061902..0efcd494daab 100644
--- a/tools/testing/selftests/futex/functional/.gitignore
+++ b/tools/testing/selftests/futex/functional/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
futex_requeue_pi
futex_requeue_pi_mismatched_ops
futex_requeue_pi_signal_restart
diff --git a/tools/testing/selftests/gpio/.gitignore b/tools/testing/selftests/gpio/.gitignore
index 7d14f743d1a4..4c69408f3e84 100644
--- a/tools/testing/selftests/gpio/.gitignore
+++ b/tools/testing/selftests/gpio/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
gpio-mockup-chardev
diff --git a/tools/testing/selftests/ia64/.gitignore b/tools/testing/selftests/ia64/.gitignore
index ab806edc8732..e962fb2a08d5 100644
--- a/tools/testing/selftests/ia64/.gitignore
+++ b/tools/testing/selftests/ia64/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
aliasing-test
diff --git a/tools/testing/selftests/intel_pstate/.gitignore b/tools/testing/selftests/intel_pstate/.gitignore
index 3bfcbae5fa13..862de222a3f3 100644
--- a/tools/testing/selftests/intel_pstate/.gitignore
+++ b/tools/testing/selftests/intel_pstate/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
aperf
msr
diff --git a/tools/testing/selftests/ipc/.gitignore b/tools/testing/selftests/ipc/.gitignore
index 9af04c9353c0..9ed280e4c704 100644
--- a/tools/testing/selftests/ipc/.gitignore
+++ b/tools/testing/selftests/ipc/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
msgque_test
msgque
diff --git a/tools/testing/selftests/ir/.gitignore b/tools/testing/selftests/ir/.gitignore
index 070ea0c75fb8..0bbada8c1811 100644
--- a/tools/testing/selftests/ir/.gitignore
+++ b/tools/testing/selftests/ir/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
ir_loopback
diff --git a/tools/testing/selftests/kcmp/.gitignore b/tools/testing/selftests/kcmp/.gitignore
index 5a9b3732b2de..38ccdfe80ef7 100644
--- a/tools/testing/selftests/kcmp/.gitignore
+++ b/tools/testing/selftests/kcmp/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
kcmp_test
kcmp-test-file
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 5336b26506ab..2902f6a78f8a 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -635,10 +635,12 @@
struct __test_metadata {
const char *name;
void (*fn)(struct __test_metadata *);
+ pid_t pid; /* pid of test when being run */
int termsig;
int passed;
int trigger; /* extra handler after the evaluation */
- int timeout;
+ int timeout; /* seconds to wait for test timeout */
+ bool timed_out; /* did this test timeout instead of exiting? */
__u8 step;
bool no_print; /* manual trigger when TH_LOG_STREAM is not available */
struct __test_metadata *prev, *next;
@@ -695,64 +697,116 @@ static inline int __bail(int for_realz, bool no_print, __u8 step)
return 0;
}
-void __run_test(struct __test_metadata *t)
+struct __test_metadata *__active_test;
+static void __timeout_handler(int sig, siginfo_t *info, void *ucontext)
{
- pid_t child_pid;
+ struct __test_metadata *t = __active_test;
+
+ /* Sanity check handler execution environment. */
+ if (!t) {
+ fprintf(TH_LOG_STREAM,
+ "no active test in SIGARLM handler!?\n");
+ abort();
+ }
+ if (sig != SIGALRM || sig != info->si_signo) {
+ fprintf(TH_LOG_STREAM,
+ "%s: SIGALRM handler caught signal %d!?\n",
+ t->name, sig != SIGALRM ? sig : info->si_signo);
+ abort();
+ }
+
+ t->timed_out = true;
+ kill(t->pid, SIGKILL);
+}
+
+void __wait_for_test(struct __test_metadata *t)
+{
+ struct sigaction action = {
+ .sa_sigaction = __timeout_handler,
+ .sa_flags = SA_SIGINFO,
+ };
+ struct sigaction saved_action;
int status;
+ if (sigaction(SIGALRM, &action, &saved_action)) {
+ t->passed = 0;
+ fprintf(TH_LOG_STREAM,
+ "%s: unable to install SIGARLM handler\n",
+ t->name);
+ return;
+ }
+ __active_test = t;
+ t->timed_out = false;
+ alarm(t->timeout);
+ waitpid(t->pid, &status, 0);
+ alarm(0);
+ if (sigaction(SIGALRM, &saved_action, NULL)) {
+ t->passed = 0;
+ fprintf(TH_LOG_STREAM,
+ "%s: unable to uninstall SIGARLM handler\n",
+ t->name);
+ return;
+ }
+ __active_test = NULL;
+
+ if (t->timed_out) {
+ t->passed = 0;
+ fprintf(TH_LOG_STREAM,
+ "%s: Test terminated by timeout\n", t->name);
+ } else if (WIFEXITED(status)) {
+ t->passed = t->termsig == -1 ? !WEXITSTATUS(status) : 0;
+ if (t->termsig != -1) {
+ fprintf(TH_LOG_STREAM,
+ "%s: Test exited normally "
+ "instead of by signal (code: %d)\n",
+ t->name,
+ WEXITSTATUS(status));
+ } else if (!t->passed) {
+ fprintf(TH_LOG_STREAM,
+ "%s: Test failed at step #%d\n",
+ t->name,
+ WEXITSTATUS(status));
+ }
+ } else if (WIFSIGNALED(status)) {
+ t->passed = 0;
+ if (WTERMSIG(status) == SIGABRT) {
+ fprintf(TH_LOG_STREAM,
+ "%s: Test terminated by assertion\n",
+ t->name);
+ } else if (WTERMSIG(status) == t->termsig) {
+ t->passed = 1;
+ } else {
+ fprintf(TH_LOG_STREAM,
+ "%s: Test terminated unexpectedly "
+ "by signal %d\n",
+ t->name,
+ WTERMSIG(status));
+ }
+ } else {
+ fprintf(TH_LOG_STREAM,
+ "%s: Test ended in some other way [%u]\n",
+ t->name,
+ status);
+ }
+}
+
+void __run_test(struct __test_metadata *t)
+{
t->passed = 1;
t->trigger = 0;
printf("[ RUN ] %s\n", t->name);
- alarm(t->timeout);
- child_pid = fork();
- if (child_pid < 0) {
+ t->pid = fork();
+ if (t->pid < 0) {
printf("ERROR SPAWNING TEST CHILD\n");
t->passed = 0;
- } else if (child_pid == 0) {
+ } else if (t->pid == 0) {
t->fn(t);
/* return the step that failed or 0 */
_exit(t->passed ? 0 : t->step);
} else {
- /* TODO(wad) add timeout support. */
- waitpid(child_pid, &status, 0);
- if (WIFEXITED(status)) {
- t->passed = t->termsig == -1 ? !WEXITSTATUS(status) : 0;
- if (t->termsig != -1) {
- fprintf(TH_LOG_STREAM,
- "%s: Test exited normally "
- "instead of by signal (code: %d)\n",
- t->name,
- WEXITSTATUS(status));
- } else if (!t->passed) {
- fprintf(TH_LOG_STREAM,
- "%s: Test failed at step #%d\n",
- t->name,
- WEXITSTATUS(status));
- }
- } else if (WIFSIGNALED(status)) {
- t->passed = 0;
- if (WTERMSIG(status) == SIGABRT) {
- fprintf(TH_LOG_STREAM,
- "%s: Test terminated by assertion\n",
- t->name);
- } else if (WTERMSIG(status) == t->termsig) {
- t->passed = 1;
- } else {
- fprintf(TH_LOG_STREAM,
- "%s: Test terminated unexpectedly "
- "by signal %d\n",
- t->name,
- WTERMSIG(status));
- }
- } else {
- fprintf(TH_LOG_STREAM,
- "%s: Test ended in some other way [%u]\n",
- t->name,
- status);
- }
+ __wait_for_test(t);
}
printf("[ %4s ] %s\n", (t->passed ? "OK" : "FAIL"), t->name);
- alarm(0);
}
static int test_harness_run(int __attribute__((unused)) argc,
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 30072c3f52fb..a9b2b48947ff 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,13 +1,17 @@
-/s390x/sync_regs_test
+# SPDX-License-Identifier: GPL-2.0-only
/s390x/memop
+/s390x/resets
+/s390x/sync_regs_test
/x86_64/cr4_cpuid_sync_test
/x86_64/evmcs_test
/x86_64/hyperv_cpuid
/x86_64/mmio_warning_test
/x86_64/platform_info_test
+/x86_64/set_memory_region_test
/x86_64/set_sregs_test
/x86_64/smm_test
/x86_64/state_test
+/x86_64/svm_vmcall_test
/x86_64/sync_regs_test
/x86_64/vmx_close_while_nested_test
/x86_64/vmx_dirty_log_test
@@ -15,5 +19,7 @@
/x86_64/vmx_tsc_adjust_test
/x86_64/xss_msr_test
/clear_dirty_log_test
+/demand_paging_test
/dirty_log_test
/kvm_create_max_vcpus
+/steal_time
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index d91c53b726e6..712a2ddd2a27 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -7,7 +7,7 @@ top_srcdir = ../../../..
KSFT_KHDR_INSTALL := 1
UNAME_M := $(shell uname -m)
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
@@ -17,27 +17,33 @@ TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
+TEST_GEN_PROGS_x86_64 += x86_64/set_memory_region_test
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
-TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
+TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
+TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
+TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
+TEST_GEN_PROGS_aarch64 += steal_time
TEST_GEN_PROGS_s390x = s390x/memop
-TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/resets
+TEST_GEN_PROGS_s390x += s390x/sync_regs_test
+TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/clear_dirty_log_test.c b/tools/testing/selftests/kvm/clear_dirty_log_test.c
index 749336937d37..11672ec6f74e 100644
--- a/tools/testing/selftests/kvm/clear_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/clear_dirty_log_test.c
@@ -1,2 +1,6 @@
#define USE_CLEAR_DIRTY_LOG
+#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
+#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
+#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
+ KVM_DIRTY_LOG_INITIALLY_SET)
#include "dirty_log_test.c"
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
new file mode 100644
index 000000000000..360cd3ea4cd6
--- /dev/null
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM demand paging test
+ * Adapted from dirty_log_test.c
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ * Copyright (C) 2019, Google, Inc.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_name */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <asm/unistd.h>
+#include <time.h>
+#include <poll.h>
+#include <pthread.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/userfaultfd.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#ifdef __NR_userfaultfd
+
+/* The memory slot index demand page */
+#define TEST_MEM_SLOT_INDEX 1
+
+/* Default guest test virtual memory offset */
+#define DEFAULT_GUEST_TEST_MEM 0xc0000000
+
+#define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */
+
+#ifdef PRINT_PER_PAGE_UPDATES
+#define PER_PAGE_DEBUG(...) printf(__VA_ARGS__)
+#else
+#define PER_PAGE_DEBUG(...) _no_printf(__VA_ARGS__)
+#endif
+
+#ifdef PRINT_PER_VCPU_UPDATES
+#define PER_VCPU_DEBUG(...) printf(__VA_ARGS__)
+#else
+#define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
+#endif
+
+#define MAX_VCPUS 512
+
+/*
+ * Guest/Host shared variables. Ensure addr_gva2hva() and/or
+ * sync_global_to/from_guest() are used when accessing from
+ * the host. READ/WRITE_ONCE() should also be used with anything
+ * that may change.
+ */
+static uint64_t host_page_size;
+static uint64_t guest_page_size;
+
+static char *guest_data_prototype;
+
+/*
+ * Guest physical memory offset of the testing memory slot.
+ * This will be set to the topmost valid physical address minus
+ * the test memory size.
+ */
+static uint64_t guest_test_phys_mem;
+
+/*
+ * Guest virtual memory offset of the testing memory slot.
+ * Must not conflict with identity mapped test code.
+ */
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+
+struct vcpu_args {
+ uint64_t gva;
+ uint64_t pages;
+
+ /* Only used by the host userspace part of the vCPU thread */
+ int vcpu_id;
+ struct kvm_vm *vm;
+};
+
+static struct vcpu_args vcpu_args[MAX_VCPUS];
+
+/*
+ * Continuously write to the first 8 bytes of each page in the demand paging
+ * memory region.
+ */
+static void guest_code(uint32_t vcpu_id)
+{
+ uint64_t gva;
+ uint64_t pages;
+ int i;
+
+ /* Make sure vCPU args data structure is not corrupt. */
+ GUEST_ASSERT(vcpu_args[vcpu_id].vcpu_id == vcpu_id);
+
+ gva = vcpu_args[vcpu_id].gva;
+ pages = vcpu_args[vcpu_id].pages;
+
+ for (i = 0; i < pages; i++) {
+ uint64_t addr = gva + (i * guest_page_size);
+
+ addr &= ~(host_page_size - 1);
+ *(uint64_t *)addr = 0x0123456789ABCDEF;
+ }
+
+ GUEST_SYNC(1);
+}
+
+static void *vcpu_worker(void *data)
+{
+ int ret;
+ struct vcpu_args *args = (struct vcpu_args *)data;
+ struct kvm_vm *vm = args->vm;
+ int vcpu_id = args->vcpu_id;
+ struct kvm_run *run;
+ struct timespec start, end, ts_diff;
+
+ vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+ run = vcpu_state(vm, vcpu_id);
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
+ /* Let the guest access its memory */
+ ret = _vcpu_run(vm, vcpu_id);
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
+ if (get_ucall(vm, vcpu_id, NULL) != UCALL_SYNC) {
+ TEST_ASSERT(false,
+ "Invalid guest sync status: exit_reason=%s\n",
+ exit_reason_str(run->exit_reason));
+ }
+
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ ts_diff = timespec_sub(end, start);
+ PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
+ ts_diff.tv_sec, ts_diff.tv_nsec);
+
+ return NULL;
+}
+
+#define PAGE_SHIFT_4K 12
+#define PTES_PER_4K_PT 512
+
+static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
+ uint64_t vcpu_memory_bytes)
+{
+ struct kvm_vm *vm;
+ uint64_t pages = DEFAULT_GUEST_PHY_PAGES;
+
+ /* Account for a few pages per-vCPU for stacks */
+ pages += DEFAULT_STACK_PGS * vcpus;
+
+ /*
+ * Reserve twice the ammount of memory needed to map the test region and
+ * the page table / stacks region, at 4k, for page tables. Do the
+ * calculation with 4K page size: the smallest of all archs. (e.g., 64K
+ * page size guest will need even less memory for page tables).
+ */
+ pages += (2 * pages) / PTES_PER_4K_PT;
+ pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
+ PTES_PER_4K_PT;
+ pages = vm_adjust_num_guest_pages(mode, pages);
+
+ pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+
+ vm = _vm_create(mode, pages, O_RDWR);
+ kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+#ifdef __x86_64__
+ vm_create_irqchip(vm);
+#endif
+ return vm;
+}
+
+static int handle_uffd_page_request(int uffd, uint64_t addr)
+{
+ pid_t tid;
+ struct timespec start;
+ struct timespec end;
+ struct uffdio_copy copy;
+ int r;
+
+ tid = syscall(__NR_gettid);
+
+ copy.src = (uint64_t)guest_data_prototype;
+ copy.dst = addr;
+ copy.len = host_page_size;
+ copy.mode = 0;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
+ r = ioctl(uffd, UFFDIO_COPY, &copy);
+ if (r == -1) {
+ pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n",
+ addr, tid, errno);
+ return r;
+ }
+
+ clock_gettime(CLOCK_MONOTONIC, &end);
+
+ PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
+ timespec_to_ns(timespec_sub(end, start)));
+ PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
+ host_page_size, addr, tid);
+
+ return 0;
+}
+
+bool quit_uffd_thread;
+
+struct uffd_handler_args {
+ int uffd;
+ int pipefd;
+ useconds_t delay;
+};
+
+static void *uffd_handler_thread_fn(void *arg)
+{
+ struct uffd_handler_args *uffd_args = (struct uffd_handler_args *)arg;
+ int uffd = uffd_args->uffd;
+ int pipefd = uffd_args->pipefd;
+ useconds_t delay = uffd_args->delay;
+ int64_t pages = 0;
+ struct timespec start, end, ts_diff;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ while (!quit_uffd_thread) {
+ struct uffd_msg msg;
+ struct pollfd pollfd[2];
+ char tmp_chr;
+ int r;
+ uint64_t addr;
+
+ pollfd[0].fd = uffd;
+ pollfd[0].events = POLLIN;
+ pollfd[1].fd = pipefd;
+ pollfd[1].events = POLLIN;
+
+ r = poll(pollfd, 2, -1);
+ switch (r) {
+ case -1:
+ pr_info("poll err");
+ continue;
+ case 0:
+ continue;
+ case 1:
+ break;
+ default:
+ pr_info("Polling uffd returned %d", r);
+ return NULL;
+ }
+
+ if (pollfd[0].revents & POLLERR) {
+ pr_info("uffd revents has POLLERR");
+ return NULL;
+ }
+
+ if (pollfd[1].revents & POLLIN) {
+ r = read(pollfd[1].fd, &tmp_chr, 1);
+ TEST_ASSERT(r == 1,
+ "Error reading pipefd in UFFD thread\n");
+ return NULL;
+ }
+
+ if (!pollfd[0].revents & POLLIN)
+ continue;
+
+ r = read(uffd, &msg, sizeof(msg));
+ if (r == -1) {
+ if (errno == EAGAIN)
+ continue;
+ pr_info("Read of uffd gor errno %d", errno);
+ return NULL;
+ }
+
+ if (r != sizeof(msg)) {
+ pr_info("Read on uffd returned unexpected size: %d bytes", r);
+ return NULL;
+ }
+
+ if (!(msg.event & UFFD_EVENT_PAGEFAULT))
+ continue;
+
+ if (delay)
+ usleep(delay);
+ addr = msg.arg.pagefault.address;
+ r = handle_uffd_page_request(uffd, addr);
+ if (r < 0)
+ return NULL;
+ pages++;
+ }
+
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ ts_diff = timespec_sub(end, start);
+ PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
+ pages, ts_diff.tv_sec, ts_diff.tv_nsec,
+ pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
+
+ return NULL;
+}
+
+static int setup_demand_paging(struct kvm_vm *vm,
+ pthread_t *uffd_handler_thread, int pipefd,
+ useconds_t uffd_delay,
+ struct uffd_handler_args *uffd_args,
+ void *hva, uint64_t len)
+{
+ int uffd;
+ struct uffdio_api uffdio_api;
+ struct uffdio_register uffdio_register;
+
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ if (uffd == -1) {
+ pr_info("uffd creation failed\n");
+ return -1;
+ }
+
+ uffdio_api.api = UFFD_API;
+ uffdio_api.features = 0;
+ if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
+ pr_info("ioctl uffdio_api failed\n");
+ return -1;
+ }
+
+ uffdio_register.range.start = (uint64_t)hva;
+ uffdio_register.range.len = len;
+ uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
+ pr_info("ioctl uffdio_register failed\n");
+ return -1;
+ }
+
+ if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) !=
+ UFFD_API_RANGE_IOCTLS) {
+ pr_info("unexpected userfaultfd ioctl set\n");
+ return -1;
+ }
+
+ uffd_args->uffd = uffd;
+ uffd_args->pipefd = pipefd;
+ uffd_args->delay = uffd_delay;
+ pthread_create(uffd_handler_thread, NULL, uffd_handler_thread_fn,
+ uffd_args);
+
+ PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
+ hva, hva + len);
+
+ return 0;
+}
+
+static void run_test(enum vm_guest_mode mode, bool use_uffd,
+ useconds_t uffd_delay, int vcpus,
+ uint64_t vcpu_memory_bytes)
+{
+ pthread_t *vcpu_threads;
+ pthread_t *uffd_handler_threads = NULL;
+ struct uffd_handler_args *uffd_args = NULL;
+ struct timespec start, end, ts_diff;
+ int *pipefds = NULL;
+ struct kvm_vm *vm;
+ uint64_t guest_num_pages;
+ int vcpu_id;
+ int r;
+
+ vm = create_vm(mode, vcpus, vcpu_memory_bytes);
+
+ guest_page_size = vm_get_page_size(vm);
+
+ TEST_ASSERT(vcpu_memory_bytes % guest_page_size == 0,
+ "Guest memory size is not guest page size aligned.");
+
+ guest_num_pages = (vcpus * vcpu_memory_bytes) / guest_page_size;
+ guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
+
+ /*
+ * If there should be more memory in the guest test region than there
+ * can be pages in the guest, it will definitely cause problems.
+ */
+ TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
+ "Requested more guest memory than address space allows.\n"
+ " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+ guest_num_pages, vm_get_max_gfn(vm), vcpus,
+ vcpu_memory_bytes);
+
+ host_page_size = getpagesize();
+ TEST_ASSERT(vcpu_memory_bytes % host_page_size == 0,
+ "Guest memory size is not host page size aligned.");
+
+ guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
+ guest_page_size;
+ guest_test_phys_mem &= ~(host_page_size - 1);
+
+#ifdef __s390x__
+ /* Align to 1M (segment size) */
+ guest_test_phys_mem &= ~((1 << 20) - 1);
+#endif
+
+ pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
+
+ /* Add an extra memory slot for testing demand paging */
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+ guest_test_phys_mem,
+ TEST_MEM_SLOT_INDEX,
+ guest_num_pages, 0);
+
+ /* Do mapping for the demand paging memory slot */
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+
+ ucall_init(vm, NULL);
+
+ guest_data_prototype = malloc(host_page_size);
+ TEST_ASSERT(guest_data_prototype,
+ "Failed to allocate buffer for guest data pattern");
+ memset(guest_data_prototype, 0xAB, host_page_size);
+
+ vcpu_threads = malloc(vcpus * sizeof(*vcpu_threads));
+ TEST_ASSERT(vcpu_threads, "Memory allocation failed");
+
+ if (use_uffd) {
+ uffd_handler_threads =
+ malloc(vcpus * sizeof(*uffd_handler_threads));
+ TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
+
+ uffd_args = malloc(vcpus * sizeof(*uffd_args));
+ TEST_ASSERT(uffd_args, "Memory allocation failed");
+
+ pipefds = malloc(sizeof(int) * vcpus * 2);
+ TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
+ }
+
+ for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+ vm_paddr_t vcpu_gpa;
+ void *vcpu_hva;
+
+ vm_vcpu_add_default(vm, vcpu_id, guest_code);
+
+ vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
+ PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
+ vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
+
+ /* Cache the HVA pointer of the region */
+ vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
+
+ if (use_uffd) {
+ /*
+ * Set up user fault fd to handle demand paging
+ * requests.
+ */
+ r = pipe2(&pipefds[vcpu_id * 2],
+ O_CLOEXEC | O_NONBLOCK);
+ TEST_ASSERT(!r, "Failed to set up pipefd");
+
+ r = setup_demand_paging(vm,
+ &uffd_handler_threads[vcpu_id],
+ pipefds[vcpu_id * 2],
+ uffd_delay, &uffd_args[vcpu_id],
+ vcpu_hva, vcpu_memory_bytes);
+ if (r < 0)
+ exit(-r);
+ }
+
+#ifdef __x86_64__
+ vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
+#endif
+
+ vcpu_args[vcpu_id].vm = vm;
+ vcpu_args[vcpu_id].vcpu_id = vcpu_id;
+ vcpu_args[vcpu_id].gva = guest_test_virt_mem +
+ (vcpu_id * vcpu_memory_bytes);
+ vcpu_args[vcpu_id].pages = vcpu_memory_bytes / guest_page_size;
+ }
+
+ /* Export the shared variables to the guest */
+ sync_global_to_guest(vm, host_page_size);
+ sync_global_to_guest(vm, guest_page_size);
+ sync_global_to_guest(vm, vcpu_args);
+
+ pr_info("Finished creating vCPUs and starting uffd threads\n");
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
+ for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+ pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
+ &vcpu_args[vcpu_id]);
+ }
+
+ pr_info("Started all vCPUs\n");
+
+ /* Wait for the vcpu threads to quit */
+ for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+ pthread_join(vcpu_threads[vcpu_id], NULL);
+ PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id);
+ }
+
+ pr_info("All vCPU threads joined\n");
+
+ clock_gettime(CLOCK_MONOTONIC, &end);
+
+ if (use_uffd) {
+ char c;
+
+ /* Tell the user fault fd handler threads to quit */
+ for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+ r = write(pipefds[vcpu_id * 2 + 1], &c, 1);
+ TEST_ASSERT(r == 1, "Unable to write to pipefd");
+
+ pthread_join(uffd_handler_threads[vcpu_id], NULL);
+ }
+ }
+
+ ts_diff = timespec_sub(end, start);
+ pr_info("Total guest execution time: %ld.%.9lds\n",
+ ts_diff.tv_sec, ts_diff.tv_nsec);
+ pr_info("Overall demand paging rate: %f pgs/sec\n",
+ guest_num_pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
+
+ ucall_uninit(vm);
+ kvm_vm_free(vm);
+
+ free(guest_data_prototype);
+ free(vcpu_threads);
+ if (use_uffd) {
+ free(uffd_handler_threads);
+ free(uffd_args);
+ free(pipefds);
+ }
+}
+
+struct guest_mode {
+ bool supported;
+ bool enabled;
+};
+static struct guest_mode guest_modes[NUM_VM_MODES];
+
+#define guest_mode_init(mode, supported, enabled) ({ \
+ guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
+})
+
+static void help(char *name)
+{
+ int i;
+
+ puts("");
+ printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
+ " [-b memory] [-v vcpus]\n", name);
+ printf(" -m: specify the guest mode ID to test\n"
+ " (default: test all supported modes)\n"
+ " This option may be used multiple times.\n"
+ " Guest mode IDs:\n");
+ for (i = 0; i < NUM_VM_MODES; ++i) {
+ printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
+ guest_modes[i].supported ? " (supported)" : "");
+ }
+ printf(" -u: use User Fault FD to handle vCPU page\n"
+ " faults.\n");
+ printf(" -d: add a delay in usec to the User Fault\n"
+ " FD handler to simulate demand paging\n"
+ " overheads. Ignored without -u.\n");
+ printf(" -b: specify the size of the memory region which should be\n"
+ " demand paged by each vCPU. e.g. 10M or 3G.\n"
+ " Default: 1G\n");
+ printf(" -v: specify the number of vCPUs to run.\n");
+ puts("");
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ bool mode_selected = false;
+ uint64_t vcpu_memory_bytes = DEFAULT_GUEST_TEST_MEM_SIZE;
+ int vcpus = 1;
+ unsigned int mode;
+ int opt, i;
+ bool use_uffd = false;
+ useconds_t uffd_delay = 0;
+
+#ifdef __x86_64__
+ guest_mode_init(VM_MODE_PXXV48_4K, true, true);
+#endif
+#ifdef __aarch64__
+ guest_mode_init(VM_MODE_P40V48_4K, true, true);
+ guest_mode_init(VM_MODE_P40V48_64K, true, true);
+ {
+ unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+
+ if (limit >= 52)
+ guest_mode_init(VM_MODE_P52V48_64K, true, true);
+ if (limit >= 48) {
+ guest_mode_init(VM_MODE_P48V48_4K, true, true);
+ guest_mode_init(VM_MODE_P48V48_64K, true, true);
+ }
+ }
+#endif
+#ifdef __s390x__
+ guest_mode_init(VM_MODE_P40V48_4K, true, true);
+#endif
+
+ while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) {
+ switch (opt) {
+ case 'm':
+ if (!mode_selected) {
+ for (i = 0; i < NUM_VM_MODES; ++i)
+ guest_modes[i].enabled = false;
+ mode_selected = true;
+ }
+ mode = strtoul(optarg, NULL, 10);
+ TEST_ASSERT(mode < NUM_VM_MODES,
+ "Guest mode ID %d too big", mode);
+ guest_modes[mode].enabled = true;
+ break;
+ case 'u':
+ use_uffd = true;
+ break;
+ case 'd':
+ uffd_delay = strtoul(optarg, NULL, 0);
+ TEST_ASSERT(uffd_delay >= 0,
+ "A negative UFFD delay is not supported.");
+ break;
+ case 'b':
+ vcpu_memory_bytes = parse_size(optarg);
+ break;
+ case 'v':
+ vcpus = atoi(optarg);
+ TEST_ASSERT(vcpus > 0,
+ "Must have a positive number of vCPUs");
+ TEST_ASSERT(vcpus <= MAX_VCPUS,
+ "This test does not currently support\n"
+ "more than %d vCPUs.", MAX_VCPUS);
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ break;
+ }
+ }
+
+ for (i = 0; i < NUM_VM_MODES; ++i) {
+ if (!guest_modes[i].enabled)
+ continue;
+ TEST_ASSERT(guest_modes[i].supported,
+ "Guest mode ID %d (%s) not supported.",
+ i, vm_guest_mode_string(i));
+ run_test(i, use_uffd, uffd_delay, vcpus, vcpu_memory_bytes);
+ }
+
+ return 0;
+}
+
+#else /* __NR_userfaultfd */
+
+#warning "missing __NR_userfaultfd definition"
+
+int main(void)
+{
+ print_skip("__NR_userfaultfd must be present for userfaultfd test");
+ return KSFT_SKIP;
+}
+
+#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 5614222a6628..752ec158ac59 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -166,24 +166,22 @@ static void *vcpu_worker(void *data)
pages_count += TEST_PAGES_PER_LOOP;
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
} else {
- TEST_ASSERT(false,
- "Invalid guest sync status: "
- "exit_reason=%s\n",
- exit_reason_str(run->exit_reason));
+ TEST_FAIL("Invalid guest sync status: "
+ "exit_reason=%s\n",
+ exit_reason_str(run->exit_reason));
}
}
- DEBUG("Dirtied %"PRIu64" pages\n", pages_count);
+ pr_info("Dirtied %"PRIu64" pages\n", pages_count);
return NULL;
}
-static void vm_dirty_log_verify(unsigned long *bmap)
+static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
{
+ uint64_t step = vm_num_host_pages(mode, 1);
uint64_t page;
uint64_t *value_ptr;
- uint64_t step = host_page_size >= guest_page_size ? 1 :
- guest_page_size / host_page_size;
for (page = 0; page < host_num_pages; page += step) {
value_ptr = host_test_mem + page * host_page_size;
@@ -252,6 +250,8 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
struct kvm_vm *vm;
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
+ pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+
vm = _vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
#ifdef __x86_64__
@@ -264,6 +264,10 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
#define DIRTY_MEM_BITS 30 /* 1G */
#define PAGE_SHIFT_4K 12
+#ifdef USE_CLEAR_DIRTY_LOG
+static u64 dirty_log_manual_caps;
+#endif
+
static void run_test(enum vm_guest_mode mode, unsigned long iterations,
unsigned long interval, uint64_t phys_offset)
{
@@ -289,14 +293,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
* case where the size is not aligned to 64 pages.
*/
guest_num_pages = (1ul << (DIRTY_MEM_BITS -
- vm_get_page_shift(vm))) + 16;
-#ifdef __s390x__
- /* Round up to multiple of 1M (segment size) */
- guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
-#endif
+ vm_get_page_shift(vm))) + 3;
+ guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
+
host_page_size = getpagesize();
- host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
- !!((guest_num_pages * guest_page_size) % host_page_size);
+ host_num_pages = vm_num_host_pages(mode, guest_num_pages);
if (!phys_offset) {
guest_test_phys_mem = (vm_get_max_gfn(vm) -
@@ -311,7 +312,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
guest_test_phys_mem &= ~((1 << 20) - 1);
#endif
- DEBUG("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
+ pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_alloc(host_num_pages);
host_bmap_track = bitmap_alloc(host_num_pages);
@@ -320,7 +321,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
struct kvm_enable_cap cap = {};
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
- cap.args[0] = 1;
+ cap.args[0] = dirty_log_manual_caps;
vm_enable_cap(vm, &cap);
#endif
@@ -332,8 +333,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
KVM_MEM_LOG_DIRTY_PAGES);
/* Do mapping for the dirty track memory slot */
- virt_map(vm, guest_test_virt_mem, guest_test_phys_mem,
- guest_num_pages * guest_page_size, 0);
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
@@ -341,9 +341,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
#ifdef __x86_64__
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
#endif
-#ifdef __aarch64__
ucall_init(vm, NULL);
-#endif
/* Export the shared variables to the guest */
sync_global_to_guest(vm, host_page_size);
@@ -369,7 +367,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
host_num_pages);
#endif
- vm_dirty_log_verify(bmap);
+ vm_dirty_log_verify(mode, bmap);
iteration++;
sync_global_to_guest(vm, iteration);
}
@@ -378,9 +376,9 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
host_quit = true;
pthread_join(vcpu_thread, NULL);
- DEBUG("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
- "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
- host_track_next_count);
+ pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
+ "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
+ host_track_next_count);
free(bmap);
free(host_bmap_track);
@@ -388,15 +386,14 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_free(vm);
}
-struct vm_guest_mode_params {
+struct guest_mode {
bool supported;
bool enabled;
};
-struct vm_guest_mode_params vm_guest_mode_params[NUM_VM_MODES];
+static struct guest_mode guest_modes[NUM_VM_MODES];
-#define vm_guest_mode_params_init(mode, supported, enabled) \
-({ \
- vm_guest_mode_params[mode] = (struct vm_guest_mode_params){ supported, enabled }; \
+#define guest_mode_init(mode, supported, enabled) ({ \
+ guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
})
static void help(char *name)
@@ -419,7 +416,7 @@ static void help(char *name)
" Guest mode IDs:\n");
for (i = 0; i < NUM_VM_MODES; ++i) {
printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
- vm_guest_mode_params[i].supported ? " (supported)" : "");
+ guest_modes[i].supported ? " (supported)" : "");
}
puts("");
exit(0);
@@ -433,34 +430,38 @@ int main(int argc, char *argv[])
uint64_t phys_offset = 0;
unsigned int mode;
int opt, i;
-#ifdef __aarch64__
- unsigned int host_ipa_limit;
-#endif
#ifdef USE_CLEAR_DIRTY_LOG
- if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2)) {
- fprintf(stderr, "KVM_CLEAR_DIRTY_LOG not available, skipping tests\n");
+ dirty_log_manual_caps =
+ kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+ if (!dirty_log_manual_caps) {
+ print_skip("KVM_CLEAR_DIRTY_LOG not available");
exit(KSFT_SKIP);
}
+ dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
+ KVM_DIRTY_LOG_INITIALLY_SET);
#endif
#ifdef __x86_64__
- vm_guest_mode_params_init(VM_MODE_PXXV48_4K, true, true);
+ guest_mode_init(VM_MODE_PXXV48_4K, true, true);
#endif
#ifdef __aarch64__
- vm_guest_mode_params_init(VM_MODE_P40V48_4K, true, true);
- vm_guest_mode_params_init(VM_MODE_P40V48_64K, true, true);
-
- host_ipa_limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
- if (host_ipa_limit >= 52)
- vm_guest_mode_params_init(VM_MODE_P52V48_64K, true, true);
- if (host_ipa_limit >= 48) {
- vm_guest_mode_params_init(VM_MODE_P48V48_4K, true, true);
- vm_guest_mode_params_init(VM_MODE_P48V48_64K, true, true);
+ guest_mode_init(VM_MODE_P40V48_4K, true, true);
+ guest_mode_init(VM_MODE_P40V48_64K, true, true);
+
+ {
+ unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+
+ if (limit >= 52)
+ guest_mode_init(VM_MODE_P52V48_64K, true, true);
+ if (limit >= 48) {
+ guest_mode_init(VM_MODE_P48V48_4K, true, true);
+ guest_mode_init(VM_MODE_P48V48_64K, true, true);
+ }
}
#endif
#ifdef __s390x__
- vm_guest_mode_params_init(VM_MODE_P40V48_4K, true, true);
+ guest_mode_init(VM_MODE_P40V48_4K, true, true);
#endif
while ((opt = getopt(argc, argv, "hi:I:p:m:")) != -1) {
@@ -477,13 +478,13 @@ int main(int argc, char *argv[])
case 'm':
if (!mode_selected) {
for (i = 0; i < NUM_VM_MODES; ++i)
- vm_guest_mode_params[i].enabled = false;
+ guest_modes[i].enabled = false;
mode_selected = true;
}
mode = strtoul(optarg, NULL, 10);
TEST_ASSERT(mode < NUM_VM_MODES,
"Guest mode ID %d too big", mode);
- vm_guest_mode_params[mode].enabled = true;
+ guest_modes[mode].enabled = true;
break;
case 'h':
default:
@@ -495,15 +496,15 @@ int main(int argc, char *argv[])
TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
TEST_ASSERT(interval > 0, "Interval must be greater than zero");
- DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
- iterations, interval);
+ pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
+ iterations, interval);
srandom(time(0));
for (i = 0; i < NUM_VM_MODES; ++i) {
- if (!vm_guest_mode_params[i].enabled)
+ if (!guest_modes[i].enabled)
continue;
- TEST_ASSERT(vm_guest_mode_params[i].supported,
+ TEST_ASSERT(guest_modes[i].supported,
"Guest mode ID %d (%s) not supported.",
i, vm_guest_mode_string(i));
run_test(i, iterations, interval, phys_offset);
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
index 4912d23844bc..d8f4d6bfe05d 100644
--- a/tools/testing/selftests/kvm/include/evmcs.h
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -16,6 +16,8 @@
#define u32 uint32_t
#define u64 uint64_t
+#define EVMCS_VERSION 1
+
extern bool enable_evmcs;
struct hv_vp_assist_page {
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index ae0d14c2540a..a99b875f50d2 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -16,7 +16,8 @@
#include "sparsebit.h"
-/* Callers of kvm_util only have an incomplete/opaque description of the
+/*
+ * Callers of kvm_util only have an incomplete/opaque description of the
* structure kvm_util is using to maintain the state of a VM.
*/
struct kvm_vm;
@@ -24,12 +25,6 @@ struct kvm_vm;
typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
-#ifndef NDEBUG
-#define DEBUG(...) printf(__VA_ARGS__);
-#else
-#define DEBUG(...)
-#endif
-
/* Minimum allocated guest virtual and physical addresses */
#define KVM_UTIL_MIN_VADDR 0x2000
@@ -84,6 +79,23 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
uint32_t data_memslot, uint32_t pgd_memslot);
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
+
+/*
+ * VM VCPU Dump
+ *
+ * Input Args:
+ * stream - Output FILE stream
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * indent - Left margin indent amount
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Dumps the current state of the VCPU specified by @vcpuid, within the VM
+ * given by @vm, to the FILE stream given by @stream.
+ */
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
uint8_t indent);
@@ -100,14 +112,31 @@ int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
void *arg);
void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
+void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
uint32_t data_memslot, uint32_t pgd_memslot);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- size_t size, uint32_t pgd_memslot);
+ unsigned int npages, uint32_t pgd_memslot);
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
+
+/*
+ * Address Guest Virtual to Guest Physical
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * gva - VM virtual address
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Equivalent VM physical address
+ *
+ * Returns the VM physical address of the translated VM virtual
+ * address given by @gva.
+ */
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
@@ -118,7 +147,27 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_mp_state *mp_state);
void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
+
+/*
+ * VM VCPU Args Set
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * num - number of arguments
+ * ... - arguments, each of type uint64_t
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Sets the first @num function input registers of the VCPU with @vcpuid,
+ * per the C calling convention of the architecture, to the values given
+ * as variable args. Each of the variable args is expected to be of type
+ * uint64_t. The maximum @num can be is specific to the architecture.
+ */
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
+
void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_sregs *sregs);
void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
@@ -147,15 +196,57 @@ int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
+
+/*
+ * VM Virtual Page Map
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vaddr - VM Virtual Address
+ * paddr - VM Physical Address
+ * memslot - Memory region slot for new virtual translation tables
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Within @vm, creates a virtual translation for the page starting
+ * at @vaddr to the page starting at @paddr.
+ */
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint32_t pgd_memslot);
+ uint32_t memslot);
+
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot);
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
+/*
+ * Create a VM with reasonable defaults
+ *
+ * Input Args:
+ * vcpuid - The id of the single VCPU to add to the VM.
+ * extra_mem_pages - The number of extra pages to add (this will
+ * decide how much extra space we will need to
+ * setup the page tables using memslot 0)
+ * guest_code - The vCPU's entry point
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Pointer to opaque structure that describes the created VM.
+ */
+struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
void *guest_code);
+
+/*
+ * Adds a vCPU with reasonable defaults (e.g. a stack)
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - The id of the VCPU to add to the VM.
+ * guest_code - The vCPU's entry point
+ */
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
@@ -164,6 +255,21 @@ unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
unsigned int vm_get_max_gfn(struct kvm_vm *vm);
+unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
+unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
+unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
+static inline unsigned int
+vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
+{
+ unsigned int n;
+ n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
+#ifdef __s390x__
+ /* s390 requires 1M aligned guest sizes */
+ n = (n + 255) & ~255;
+#endif
+ return n;
+}
+
struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end);
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index a41db6fb7e24..5eb01bf51b86 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -19,12 +19,28 @@
#include <fcntl.h>
#include "kselftest.h"
+static inline int _no_printf(const char *format, ...) { return 0; }
+
+#ifdef DEBUG
+#define pr_debug(...) printf(__VA_ARGS__)
+#else
+#define pr_debug(...) _no_printf(__VA_ARGS__)
+#endif
+#ifndef QUIET
+#define pr_info(...) printf(__VA_ARGS__)
+#else
+#define pr_info(...) _no_printf(__VA_ARGS__)
+#endif
+
+void print_skip(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+
ssize_t test_write(int fd, const void *buf, size_t count);
ssize_t test_read(int fd, void *buf, size_t count);
int test_seq_read(const char *path, char **bufp, size_t *sizep);
void test_assert(bool exp, const char *exp_str,
- const char *file, unsigned int line, const char *fmt, ...);
+ const char *file, unsigned int line, const char *fmt, ...)
+ __attribute__((format(printf, 5, 6)));
#define TEST_ASSERT(e, fmt, ...) \
test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
@@ -39,4 +55,14 @@ void test_assert(bool exp, const char *exp_str,
#a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
} while (0)
+#define TEST_FAIL(fmt, ...) \
+ TEST_ASSERT(false, fmt, ##__VA_ARGS__)
+
+size_t parse_size(const char *size);
+
+int64_t timespec_to_ns(struct timespec ts);
+struct timespec timespec_add_ns(struct timespec ts, int64_t ns);
+struct timespec timespec_add(struct timespec ts1, struct timespec ts2);
+struct timespec timespec_sub(struct timespec ts1, struct timespec ts2);
+
#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
index 6f38c3dc0d56..0299cd81b8ba 100644
--- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
+++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
@@ -24,8 +24,8 @@ void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
struct kvm_vm *vm;
int i;
- printf("Testing creating %d vCPUs, with IDs %d...%d.\n",
- num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
+ pr_info("Testing creating %d vCPUs, with IDs %d...%d.\n",
+ num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
@@ -41,8 +41,8 @@ int main(int argc, char *argv[])
int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
- printf("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
- printf("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
+ pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
+ pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
/*
* Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 86036a59a668..2afa6618b396 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -130,7 +130,7 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
break;
default:
- TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
+ TEST_FAIL("Page table levels must be 2, 3, or 4");
}
*ptep = paddr | 3;
@@ -173,20 +173,19 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
goto unmapped_gva;
break;
default:
- TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
+ TEST_FAIL("Page table levels must be 2, 3, or 4");
}
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
unmapped_gva:
- TEST_ASSERT(false, "No mapping for vm virtual address, "
- "gva: 0x%lx", gva);
+ TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
exit(1);
}
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
{
-#ifdef DEBUG_VM
+#ifdef DEBUG
static const char * const type[] = { "", "pud", "pmd", "pte" };
uint64_t pte, *ptep;
@@ -197,7 +196,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
ptep = addr_gpa2hva(vm, pte);
if (!*ptep)
continue;
- printf("%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
+ fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
}
#endif
@@ -215,7 +214,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
ptep = addr_gpa2hva(vm, pgd);
if (!*ptep)
continue;
- printf("%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
+ fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
}
}
@@ -262,11 +261,11 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *ini
switch (vm->mode) {
case VM_MODE_P52V48_4K:
- TEST_ASSERT(false, "AArch64 does not support 4K sized pages "
- "with 52-bit physical address ranges");
+ TEST_FAIL("AArch64 does not support 4K sized pages "
+ "with 52-bit physical address ranges");
case VM_MODE_PXXV48_4K:
- TEST_ASSERT(false, "AArch64 does not support 4K sized pages "
- "with ANY-bit physical address ranges");
+ TEST_FAIL("AArch64 does not support 4K sized pages "
+ "with ANY-bit physical address ranges");
case VM_MODE_P52V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
@@ -288,7 +287,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *ini
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
break;
default:
- TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
+ TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
@@ -333,3 +332,21 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{
aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
}
+
+void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+{
+ va_list ap;
+ int i;
+
+ TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
+ " num: %u\n", num);
+
+ va_start(ap, num);
+
+ for (i = 0; i < num; i++) {
+ set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
+ va_arg(ap, uint64_t));
+ }
+
+ va_end(ap);
+}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index 6cd91970fbad..c8e0ec20d3bf 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -62,7 +62,7 @@ void ucall_init(struct kvm_vm *vm, void *arg)
if (ucall_mmio_init(vm, start + offset))
return;
}
- TEST_ASSERT(false, "Can't find a ucall mmio address");
+ TEST_FAIL("Can't find a ucall mmio address");
}
void ucall_uninit(struct kvm_vm *vm)
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index d1cf9f6e0e6b..5ebbd0d6b472 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -82,8 +82,10 @@ test_assert(bool exp, const char *exp_str,
}
va_end(ap);
- if (errno == EACCES)
- ksft_exit_skip("Access denied - Exiting.\n");
+ if (errno == EACCES) {
+ print_skip("Access denied - Exiting");
+ exit(KSFT_SKIP);
+ }
exit(254);
}
diff --git a/tools/testing/selftests/kvm/lib/io.c b/tools/testing/selftests/kvm/lib/io.c
index eaf351cc7e7f..fedb2a741f0b 100644
--- a/tools/testing/selftests/kvm/lib/io.c
+++ b/tools/testing/selftests/kvm/lib/io.c
@@ -61,9 +61,9 @@ ssize_t test_write(int fd, const void *buf, size_t count)
continue;
case 0:
- TEST_ASSERT(false, "Unexpected EOF,\n"
- " rc: %zi num_written: %zi num_left: %zu",
- rc, num_written, num_left);
+ TEST_FAIL("Unexpected EOF,\n"
+ " rc: %zi num_written: %zi num_left: %zu",
+ rc, num_written, num_left);
break;
default:
@@ -138,9 +138,9 @@ ssize_t test_read(int fd, void *buf, size_t count)
break;
case 0:
- TEST_ASSERT(false, "Unexpected EOF,\n"
- " rc: %zi num_read: %zi num_left: %zu",
- rc, num_read, num_left);
+ TEST_FAIL("Unexpected EOF,\n"
+ " rc: %zi num_read: %zi num_left: %zu",
+ rc, num_read, num_left);
break;
default:
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index a6dd0401eb50..8a3523d4434f 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -92,7 +92,7 @@ static void vm_open(struct kvm_vm *vm, int perm)
exit(KSFT_SKIP);
if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
- fprintf(stderr, "immediate_exit not available, skipping test\n");
+ print_skip("immediate_exit not available");
exit(KSFT_SKIP);
}
@@ -113,6 +113,25 @@ const char * const vm_guest_mode_string[] = {
_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
+struct vm_guest_mode_params {
+ unsigned int pa_bits;
+ unsigned int va_bits;
+ unsigned int page_size;
+ unsigned int page_shift;
+};
+
+static const struct vm_guest_mode_params vm_guest_mode_params[] = {
+ { 52, 48, 0x1000, 12 },
+ { 52, 48, 0x10000, 16 },
+ { 48, 48, 0x1000, 12 },
+ { 48, 48, 0x10000, 16 },
+ { 40, 48, 0x1000, 12 },
+ { 40, 48, 0x10000, 16 },
+ { 0, 0, 0x1000, 12 },
+};
+_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
+ "Missing new mode params?");
+
/*
* VM Create
*
@@ -136,7 +155,8 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
{
struct kvm_vm *vm;
- DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+ pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__,
+ vm_guest_mode_string(mode), phy_pages, perm);
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");
@@ -144,67 +164,45 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
vm->mode = mode;
vm->type = 0;
+ vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
+ vm->va_bits = vm_guest_mode_params[mode].va_bits;
+ vm->page_size = vm_guest_mode_params[mode].page_size;
+ vm->page_shift = vm_guest_mode_params[mode].page_shift;
+
/* Setup mode specific traits. */
switch (vm->mode) {
case VM_MODE_P52V48_4K:
vm->pgtable_levels = 4;
- vm->pa_bits = 52;
- vm->va_bits = 48;
- vm->page_size = 0x1000;
- vm->page_shift = 12;
break;
case VM_MODE_P52V48_64K:
vm->pgtable_levels = 3;
- vm->pa_bits = 52;
- vm->va_bits = 48;
- vm->page_size = 0x10000;
- vm->page_shift = 16;
break;
case VM_MODE_P48V48_4K:
vm->pgtable_levels = 4;
- vm->pa_bits = 48;
- vm->va_bits = 48;
- vm->page_size = 0x1000;
- vm->page_shift = 12;
break;
case VM_MODE_P48V48_64K:
vm->pgtable_levels = 3;
- vm->pa_bits = 48;
- vm->va_bits = 48;
- vm->page_size = 0x10000;
- vm->page_shift = 16;
break;
case VM_MODE_P40V48_4K:
vm->pgtable_levels = 4;
- vm->pa_bits = 40;
- vm->va_bits = 48;
- vm->page_size = 0x1000;
- vm->page_shift = 12;
break;
case VM_MODE_P40V48_64K:
vm->pgtable_levels = 3;
- vm->pa_bits = 40;
- vm->va_bits = 48;
- vm->page_size = 0x10000;
- vm->page_shift = 16;
break;
case VM_MODE_PXXV48_4K:
#ifdef __x86_64__
kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
TEST_ASSERT(vm->va_bits == 48, "Linear address width "
"(%d bits) not supported", vm->va_bits);
+ pr_debug("Guest physical address width detected: %d\n",
+ vm->pa_bits);
vm->pgtable_levels = 4;
- vm->page_size = 0x1000;
- vm->page_shift = 12;
- DEBUG("Guest physical address width detected: %d\n",
- vm->pa_bits);
#else
- TEST_ASSERT(false, "VM_MODE_PXXV48_4K not supported on "
- "non-x86 platforms");
+ TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
#endif
break;
default:
- TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
+ TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
}
#ifdef __aarch64__
@@ -266,7 +264,7 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
" rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n"
- " guest_phys_addr: 0x%lx size: 0x%lx",
+ " guest_phys_addr: 0x%llx size: 0x%llx",
ret, errno, region->region.slot,
region->region.flags,
region->region.guest_phys_addr,
@@ -281,7 +279,7 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
- strerror(-ret));
+ __func__, strerror(-ret));
}
void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
@@ -294,7 +292,7 @@ void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
- strerror(-ret));
+ __func__, strerror(-ret));
}
/*
@@ -582,6 +580,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
size_t alignment;
+ TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
+ "Number of guest pages is not compatible with the host. "
+ "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
+
TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
"address not on a page boundary.\n"
" guest_paddr: 0x%lx vm->page_size: 0x%x",
@@ -600,7 +602,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region = (struct userspace_mem_region *) userspace_mem_region_find(
vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
if (region != NULL)
- TEST_ASSERT(false, "overlapping userspace_mem_region already "
+ TEST_FAIL("overlapping userspace_mem_region already "
"exists\n"
" requested guest_paddr: 0x%lx npages: 0x%lx "
"page_size: 0x%x\n"
@@ -616,7 +618,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
break;
}
if (region != NULL)
- TEST_ASSERT(false, "A mem region with the requested slot "
+ TEST_FAIL("A mem region with the requested slot "
"already exists.\n"
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
" existing slot: %u paddr: 0x%lx size: 0x%lx",
@@ -720,7 +722,7 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
" requested slot: %u\n", memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
- TEST_ASSERT(false, "Mem region not found");
+ TEST_FAIL("Mem region not found");
}
return region;
@@ -757,6 +759,36 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
}
/*
+ * VM Memory Region Move
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * slot - Slot of the memory region to move
+ * new_gpa - Starting guest physical address
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Change the gpa of a memory region.
+ */
+void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
+{
+ struct userspace_mem_region *region;
+ int ret;
+
+ region = memslot2region(vm, slot);
+
+ region->region.guest_phys_addr = new_gpa;
+
+ ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
+
+ TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
+ "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
+ ret, errno, slot, new_gpa);
+}
+
+/*
* VCPU mmap Size
*
* Input Args: None
@@ -808,7 +840,7 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
/* Confirm a vcpu with the specified id doesn't already exist. */
vcpu = vcpu_find(vm, vcpuid);
if (vcpu != NULL)
- TEST_ASSERT(false, "vcpu with the specified id "
+ TEST_FAIL("vcpu with the specified id "
"already exists,\n"
" requested vcpuid: %u\n"
" existing vcpuid: %u state: %p",
@@ -901,8 +933,7 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
} while (pgidx_start != 0);
no_va_found:
- TEST_ASSERT(false, "No vaddr of specified pages available, "
- "pages: 0x%lx", pages);
+ TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
/* NOT REACHED */
return -1;
@@ -982,21 +1013,21 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
* vm - Virtual Machine
* vaddr - Virtuall address to map
* paddr - VM Physical Address
- * size - The size of the range to map
+ * npages - The number of pages to map
* pgd_memslot - Memory region slot for new virtual translation tables
*
* Output Args: None
*
* Return: None
*
- * Within the VM given by vm, creates a virtual translation for the
- * page range starting at vaddr to the page range starting at paddr.
+ * Within the VM given by @vm, creates a virtual translation for
+ * @npages starting at @vaddr to the page range starting at @paddr.
*/
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- size_t size, uint32_t pgd_memslot)
+ unsigned int npages, uint32_t pgd_memslot)
{
size_t page_size = vm->page_size;
- size_t npages = size / page_size;
+ size_t size = npages * page_size;
TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
@@ -1037,7 +1068,7 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
+ (gpa - region->region.guest_phys_addr));
}
- TEST_ASSERT(false, "No vm physical memory at 0x%lx", gpa);
+ TEST_FAIL("No vm physical memory at 0x%lx", gpa);
return NULL;
}
@@ -1071,8 +1102,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
+ (hva - (uintptr_t) region->host_mem));
}
- TEST_ASSERT(false, "No mapping to a guest physical address, "
- "hva: %p", hva);
+ TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
return -1;
}
@@ -1703,3 +1733,43 @@ unsigned int vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
}
+
+static unsigned int vm_calc_num_pages(unsigned int num_pages,
+ unsigned int page_shift,
+ unsigned int new_page_shift,
+ bool ceil)
+{
+ unsigned int n = 1 << (new_page_shift - page_shift);
+
+ if (page_shift >= new_page_shift)
+ return num_pages * (1 << (page_shift - new_page_shift));
+
+ return num_pages / n + !!(ceil && num_pages % n);
+}
+
+static inline int getpageshift(void)
+{
+ return __builtin_ffs(getpagesize()) - 1;
+}
+
+unsigned int
+vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
+{
+ return vm_calc_num_pages(num_guest_pages,
+ vm_guest_mode_params[mode].page_shift,
+ getpageshift(), true);
+}
+
+unsigned int
+vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
+{
+ return vm_calc_num_pages(num_host_pages, getpageshift(),
+ vm_guest_mode_params[mode].page_shift, false);
+}
+
+unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
+{
+ unsigned int n;
+ n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
+ return vm_adjust_num_guest_pages(mode, n);
+}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index ac50c42750cf..ca56a0133127 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -12,17 +12,6 @@
#define KVM_DEV_PATH "/dev/kvm"
-#ifndef BITS_PER_BYTE
-#define BITS_PER_BYTE 8
-#endif
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
-#endif
-
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
-
struct userspace_mem_region {
struct userspace_mem_region *next, *prev;
struct kvm_userspace_memory_region region;
@@ -64,8 +53,56 @@ struct kvm_vm {
};
struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
+
+/*
+ * Virtual Translation Tables Dump
+ *
+ * Input Args:
+ * stream - Output FILE stream
+ * vm - Virtual Machine
+ * indent - Left margin indent amount
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Dumps to the FILE stream given by @stream, the contents of all the
+ * virtual translation tables for the VM given by @vm.
+ */
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
+
+/*
+ * Register Dump
+ *
+ * Input Args:
+ * stream - Output FILE stream
+ * regs - Registers
+ * indent - Left margin indent amount
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Dumps the state of the registers given by @regs, to the FILE stream
+ * given by @stream.
+ */
void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
+
+/*
+ * System Register Dump
+ *
+ * Input Args:
+ * stream - Output FILE stream
+ * sregs - System registers
+ * indent - Left margin indent amount
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Dumps the state of the system registers given by @sregs, to the FILE stream
+ * given by @stream.
+ */
void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
struct userspace_mem_region *
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index 32a02360b1eb..8d94961bd046 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -51,22 +51,6 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot)
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
}
-/*
- * VM Virtual Page Map
- *
- * Input Args:
- * vm - Virtual Machine
- * gva - VM Virtual Address
- * gpa - VM Physical Address
- * memslot - Memory region slot for new virtual translation tables
- *
- * Output Args: None
- *
- * Return: None
- *
- * Within the VM given by vm, creates a virtual translation for the page
- * starting at vaddr to the page starting at paddr.
- */
void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa,
uint32_t memslot)
{
@@ -107,26 +91,6 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa,
entry[idx] = gpa;
}
-/*
- * Address Guest Virtual to Guest Physical
- *
- * Input Args:
- * vm - Virtual Machine
- * gpa - VM virtual address
- *
- * Output Args: None
- *
- * Return:
- * Equivalent VM physical address
- *
- * Translates the VM virtual address given by gva to a VM physical
- * address and then locates the memory region containing the VM
- * physical address, within the VM given by vm. When found, the host
- * virtual address providing the memory to the vm physical address is
- * returned.
- * A TEST_ASSERT failure occurs if no region containing translated
- * VM virtual address exists.
- */
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
int ri, idx;
@@ -196,21 +160,6 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
virt_dump_region(stream, vm, indent, vm->pgd);
}
-/*
- * Create a VM with reasonable defaults
- *
- * Input Args:
- * vcpuid - The id of the single VCPU to add to the VM.
- * extra_mem_pages - The size of extra memories to add (this will
- * decide how much extra space we will need to
- * setup the page tables using mem slot 0)
- * guest_code - The vCPU's entry point
- *
- * Output Args: None
- *
- * Return:
- * Pointer to opaque structure that describes the created VM.
- */
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
void *guest_code)
{
@@ -231,13 +180,6 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
return vm;
}
-/*
- * Adds a vCPU with reasonable defaults (i.e. a stack and initial PSW)
- *
- * Input Args:
- * vcpuid - The id of the VCPU to add to the VM.
- * guest_code - The vCPU's entry point
- */
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
@@ -269,6 +211,26 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
run->psw_addr = (uintptr_t)guest_code;
}
+void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+{
+ va_list ap;
+ struct kvm_regs regs;
+ int i;
+
+ TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
+ " num: %u\n",
+ num);
+
+ va_start(ap, num);
+ vcpu_regs_get(vm, vcpuid, &regs);
+
+ for (i = 0; i < num; i++)
+ regs.gprs[i + 2] = va_arg(ap, uint64_t);
+
+ vcpu_regs_set(vm, vcpuid, &regs);
+ va_end(ap);
+}
+
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
{
struct vcpu *vcpu = vm->vcpu_head;
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
new file mode 100644
index 000000000000..689e97c27ee2
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * tools/testing/selftests/kvm/lib/test_util.c
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+#include <stdlib.h>
+#include <ctype.h>
+#include <limits.h>
+#include <assert.h>
+#include "test_util.h"
+
+/*
+ * Parses "[0-9]+[kmgt]?".
+ */
+size_t parse_size(const char *size)
+{
+ size_t base;
+ char *scale;
+ int shift = 0;
+
+ TEST_ASSERT(size && isdigit(size[0]), "Need at least one digit in '%s'", size);
+
+ base = strtoull(size, &scale, 0);
+
+ TEST_ASSERT(base != ULLONG_MAX, "Overflow parsing size!");
+
+ switch (tolower(*scale)) {
+ case 't':
+ shift = 40;
+ break;
+ case 'g':
+ shift = 30;
+ break;
+ case 'm':
+ shift = 20;
+ break;
+ case 'k':
+ shift = 10;
+ break;
+ case 'b':
+ case '\0':
+ shift = 0;
+ break;
+ default:
+ TEST_ASSERT(false, "Unknown size letter %c", *scale);
+ }
+
+ TEST_ASSERT((base << shift) >> shift == base, "Overflow scaling size!");
+
+ return base << shift;
+}
+
+int64_t timespec_to_ns(struct timespec ts)
+{
+ return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec;
+}
+
+struct timespec timespec_add_ns(struct timespec ts, int64_t ns)
+{
+ struct timespec res;
+
+ res.tv_nsec = ts.tv_nsec + ns;
+ res.tv_sec = ts.tv_sec + res.tv_nsec / 1000000000LL;
+ res.tv_nsec %= 1000000000LL;
+
+ return res;
+}
+
+struct timespec timespec_add(struct timespec ts1, struct timespec ts2)
+{
+ int64_t ns1 = timespec_to_ns(ts1);
+ int64_t ns2 = timespec_to_ns(ts2);
+ return timespec_add_ns((struct timespec){0}, ns1 + ns2);
+}
+
+struct timespec timespec_sub(struct timespec ts1, struct timespec ts2)
+{
+ int64_t ns1 = timespec_to_ns(ts1);
+ int64_t ns2 = timespec_to_ns(ts2);
+ return timespec_add_ns((struct timespec){0}, ns1 - ns2);
+}
+
+void print_skip(const char *fmt, ...)
+{
+ va_list ap;
+
+ assert(fmt);
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ puts(", skipping test");
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 683d3bdb8f6a..f6eb34eaa0d2 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -77,20 +77,6 @@ struct pageTableEntry {
uint64_t execute_disable:1;
};
-/* Register Dump
- *
- * Input Args:
- * indent - Left margin indent amount
- * regs - register
- *
- * Output Args:
- * stream - Output FILE stream
- *
- * Return: None
- *
- * Dumps the state of the registers given by regs, to the FILE stream
- * given by steam.
- */
void regs_dump(FILE *stream, struct kvm_regs *regs,
uint8_t indent)
{
@@ -115,19 +101,20 @@ void regs_dump(FILE *stream, struct kvm_regs *regs,
regs->rip, regs->rflags);
}
-/* Segment Dump
+/*
+ * Segment Dump
*
* Input Args:
- * indent - Left margin indent amount
+ * stream - Output FILE stream
* segment - KVM segment
+ * indent - Left margin indent amount
*
- * Output Args:
- * stream - Output FILE stream
+ * Output Args: None
*
* Return: None
*
- * Dumps the state of the KVM segment given by segment, to the FILE stream
- * given by steam.
+ * Dumps the state of the KVM segment given by @segment, to the FILE stream
+ * given by @stream.
*/
static void segment_dump(FILE *stream, struct kvm_segment *segment,
uint8_t indent)
@@ -146,19 +133,20 @@ static void segment_dump(FILE *stream, struct kvm_segment *segment,
segment->unusable, segment->padding);
}
-/* dtable Dump
+/*
+ * dtable Dump
*
* Input Args:
- * indent - Left margin indent amount
+ * stream - Output FILE stream
* dtable - KVM dtable
+ * indent - Left margin indent amount
*
- * Output Args:
- * stream - Output FILE stream
+ * Output Args: None
*
* Return: None
*
- * Dumps the state of the KVM dtable given by dtable, to the FILE stream
- * given by steam.
+ * Dumps the state of the KVM dtable given by @dtable, to the FILE stream
+ * given by @stream.
*/
static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
uint8_t indent)
@@ -169,20 +157,6 @@ static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
dtable->padding[0], dtable->padding[1], dtable->padding[2]);
}
-/* System Register Dump
- *
- * Input Args:
- * indent - Left margin indent amount
- * sregs - System registers
- *
- * Output Args:
- * stream - Output FILE stream
- *
- * Return: None
- *
- * Dumps the state of the system registers given by sregs, to the FILE stream
- * given by steam.
- */
void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
uint8_t indent)
{
@@ -240,21 +214,6 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
}
}
-/* VM Virtual Page Map
- *
- * Input Args:
- * vm - Virtual Machine
- * vaddr - VM Virtual Address
- * paddr - VM Physical Address
- * pgd_memslot - Memory region slot for new virtual translation tables
- *
- * Output Args: None
- *
- * Return: None
- *
- * Within the VM given by vm, creates a virtual translation for the page
- * starting at vaddr to the page starting at paddr.
- */
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint32_t pgd_memslot)
{
@@ -326,20 +285,6 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
pte[index[0]].present = 1;
}
-/* Virtual Translation Tables Dump
- *
- * Input Args:
- * vm - Virtual Machine
- * indent - Left margin indent amount
- *
- * Output Args:
- * stream - Output FILE stream
- *
- * Return: None
- *
- * Dumps to the FILE stream given by stream, the contents of all the
- * virtual translation tables for the VM given by vm.
- */
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
struct pageMapL4Entry *pml4e, *pml4e_start;
@@ -421,7 +366,8 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
}
-/* Set Unusable Segment
+/*
+ * Set Unusable Segment
*
* Input Args: None
*
@@ -430,7 +376,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
*
* Return: None
*
- * Sets the segment register pointed to by segp to an unusable state.
+ * Sets the segment register pointed to by @segp to an unusable state.
*/
static void kvm_seg_set_unusable(struct kvm_segment *segp)
{
@@ -460,7 +406,8 @@ static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
}
-/* Set Long Mode Flat Kernel Code Segment
+/*
+ * Set Long Mode Flat Kernel Code Segment
*
* Input Args:
* vm - VM whose GDT is being filled, or NULL to only write segp
@@ -471,8 +418,8 @@ static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
*
* Return: None
*
- * Sets up the KVM segment pointed to by segp, to be a code segment
- * with the selector value given by selector.
+ * Sets up the KVM segment pointed to by @segp, to be a code segment
+ * with the selector value given by @selector.
*/
static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
struct kvm_segment *segp)
@@ -491,7 +438,8 @@ static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
kvm_seg_fill_gdt_64bit(vm, segp);
}
-/* Set Long Mode Flat Kernel Data Segment
+/*
+ * Set Long Mode Flat Kernel Data Segment
*
* Input Args:
* vm - VM whose GDT is being filled, or NULL to only write segp
@@ -502,8 +450,8 @@ static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
*
* Return: None
*
- * Sets up the KVM segment pointed to by segp, to be a data segment
- * with the selector value given by selector.
+ * Sets up the KVM segment pointed to by @segp, to be a data segment
+ * with the selector value given by @selector.
*/
static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
struct kvm_segment *segp)
@@ -521,24 +469,6 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
kvm_seg_fill_gdt_64bit(vm, segp);
}
-/* Address Guest Virtual to Guest Physical
- *
- * Input Args:
- * vm - Virtual Machine
- * gpa - VM virtual address
- *
- * Output Args: None
- *
- * Return:
- * Equivalent VM physical address
- *
- * Translates the VM virtual address given by gva to a VM physical
- * address and then locates the memory region containing the VM
- * physical address, within the VM given by vm. When found, the host
- * virtual address providing the memory to the vm physical address is returned.
- * A TEST_ASSERT failure occurs if no region containing translated
- * VM virtual address exists.
- */
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint16_t index[4];
@@ -576,8 +506,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu);
unmapped_gva:
- TEST_ASSERT(false, "No mapping for vm virtual address, "
- "gva: 0x%lx", gva);
+ TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
exit(EXIT_FAILURE);
}
@@ -634,18 +563,13 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_m
break;
default:
- TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
+ TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
sregs.cr3 = vm->pgd;
vcpu_sregs_set(vm, vcpuid, &sregs);
}
-/* Adds a vCPU with reasonable defaults (i.e., a stack)
- *
- * Input Args:
- * vcpuid - The id of the VCPU to add to the VM.
- * guest_code - The vCPU's entry point
- */
+
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{
struct kvm_mp_state mp_state;
@@ -670,7 +594,8 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
vcpu_set_mp_state(vm, vcpuid, &mp_state);
}
-/* Allocate an instance of struct kvm_cpuid2
+/*
+ * Allocate an instance of struct kvm_cpuid2
*
* Input Args: None
*
@@ -703,7 +628,8 @@ static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
return cpuid;
}
-/* KVM Supported CPUID Get
+/*
+ * KVM Supported CPUID Get
*
* Input Args: None
*
@@ -735,11 +661,12 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
return cpuid;
}
-/* Locate a cpuid entry.
+/*
+ * Locate a cpuid entry.
*
* Input Args:
- * cpuid: The cpuid.
* function: The function of the cpuid entry to find.
+ * index: The index of the cpuid entry.
*
* Output Args: None
*
@@ -766,7 +693,8 @@ kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
return entry;
}
-/* VM VCPU CPUID Set
+/*
+ * VM VCPU CPUID Set
*
* Input Args:
* vm - Virtual Machine
@@ -793,20 +721,6 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
}
-/* Create a VM with reasonable defaults
- *
- * Input Args:
- * vcpuid - The id of the single VCPU to add to the VM.
- * extra_mem_pages - The size of extra memories to add (this will
- * decide how much extra space we will need to
- * setup the page tables using mem slot 0)
- * guest_code - The vCPU's entry point
- *
- * Output Args: None
- *
- * Return:
- * Pointer to opaque structure that describes the created VM.
- */
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
void *guest_code)
{
@@ -837,7 +751,8 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
return vm;
}
-/* VCPU Get MSR
+/*
+ * VCPU Get MSR
*
* Input Args:
* vm - Virtual Machine
@@ -869,7 +784,8 @@ uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
return buffer.entry.data;
}
-/* _VCPU Set MSR
+/*
+ * _VCPU Set MSR
*
* Input Args:
* vm - Virtual Machine
@@ -902,7 +818,8 @@ int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
return r;
}
-/* VCPU Set MSR
+/*
+ * VCPU Set MSR
*
* Input Args:
* vm - Virtual Machine
@@ -926,22 +843,6 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
" rc: %i errno: %i", r, errno);
}
-/* VM VCPU Args Set
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * num - number of arguments
- * ... - arguments, each of type uint64_t
- *
- * Output Args: None
- *
- * Return: None
- *
- * Sets the first num function input arguments to the values
- * given as variable args. Each of the variable args is expected to
- * be of type uint64_t.
- */
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
{
va_list ap;
@@ -976,22 +877,6 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
va_end(ap);
}
-/*
- * VM VCPU Dump
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * indent - Left margin indent amount
- *
- * Output Args:
- * stream - Output FILE stream
- *
- * Return: None
- *
- * Dumps the current state of the VCPU specified by vcpuid, within the VM
- * given by vm, to the FILE stream given by stream.
- */
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
{
struct kvm_regs regs;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c
index 6e05a8fc3fe0..c42401068373 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c
@@ -154,7 +154,7 @@ void nested_svm_check_supported(void)
kvm_get_supported_cpuid_entry(0x80000001);
if (!(entry->ecx & CPUID_SVM)) {
- fprintf(stderr, "nested SVM not enabled, skipping test\n");
+ print_skip("nested SVM not enabled");
exit(KSFT_SKIP);
}
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 7aaa99ca4dbc..6f17f69394be 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -191,7 +191,7 @@ bool load_vmcs(struct vmx_pages *vmx)
if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
vmx->enlightened_vmcs))
return false;
- current_evmcs->revision_id = vmcs_revision();
+ current_evmcs->revision_id = EVMCS_VERSION;
}
return true;
@@ -381,7 +381,7 @@ void nested_vmx_check_supported(void)
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
if (!(entry->ecx & CPUID_VMX)) {
- fprintf(stderr, "nested VMX not enabled, skipping test\n");
+ print_skip("nested VMX not enabled");
exit(KSFT_SKIP);
}
}
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index 9edaa9a134ce..9f49ead380ab 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -40,7 +40,7 @@ int main(int argc, char *argv[])
maxsize = kvm_check_cap(KVM_CAP_S390_MEM_OP);
if (!maxsize) {
- fprintf(stderr, "CAP_S390_MEM_OP not supported -> skip test\n");
+ print_skip("CAP_S390_MEM_OP not supported");
exit(KSFT_SKIP);
}
if (maxsize > sizeof(mem1))
diff --git a/tools/testing/selftests/kvm/s390x/resets.c b/tools/testing/selftests/kvm/s390x/resets.c
index 1485bc6c8999..b143db6d8693 100644
--- a/tools/testing/selftests/kvm/s390x/resets.c
+++ b/tools/testing/selftests/kvm/s390x/resets.c
@@ -20,29 +20,42 @@ struct kvm_s390_irq buf[VCPU_ID + LOCAL_IRQS];
struct kvm_vm *vm;
struct kvm_run *run;
-struct kvm_sync_regs *regs;
-static uint64_t regs_null[16];
-
-static uint64_t crs[16] = { 0x40000ULL,
- 0x42000ULL,
- 0, 0, 0, 0, 0,
- 0x43000ULL,
- 0, 0, 0, 0, 0,
- 0x44000ULL,
- 0, 0
-};
+struct kvm_sync_regs *sync_regs;
+static uint8_t regs_null[512];
static void guest_code_initial(void)
{
- /* Round toward 0 */
- uint32_t fpc = 0x11;
+ /* set several CRs to "safe" value */
+ unsigned long cr2_59 = 0x10; /* enable guarded storage */
+ unsigned long cr8_63 = 0x1; /* monitor mask = 1 */
+ unsigned long cr10 = 1; /* PER START */
+ unsigned long cr11 = -1; /* PER END */
+
/* Dirty registers */
asm volatile (
- " lctlg 0,15,%0\n"
- " sfpc %1\n"
- : : "Q" (crs), "d" (fpc));
- GUEST_SYNC(0);
+ " lghi 2,0x11\n" /* Round toward 0 */
+ " sfpc 2\n" /* set fpc to !=0 */
+ " lctlg 2,2,%0\n"
+ " lctlg 8,8,%1\n"
+ " lctlg 10,10,%2\n"
+ " lctlg 11,11,%3\n"
+ /* now clobber some general purpose regs */
+ " llihh 0,0xffff\n"
+ " llihl 1,0x5555\n"
+ " llilh 2,0xaaaa\n"
+ " llill 3,0x0000\n"
+ /* now clobber a floating point reg */
+ " lghi 4,0x1\n"
+ " cdgbr 0,4\n"
+ /* now clobber an access reg */
+ " sar 9,4\n"
+ /* We embed diag 501 here to control register content */
+ " diag 0,0,0x501\n"
+ :
+ : "m" (cr2_59), "m" (cr8_63), "m" (cr10), "m" (cr11)
+ /* no clobber list as this should not return */
+ );
}
static void test_one_reg(uint64_t id, uint64_t value)
@@ -53,7 +66,7 @@ static void test_one_reg(uint64_t id, uint64_t value)
reg.addr = (uintptr_t)&eval_reg;
reg.id = id;
vcpu_get_reg(vm, VCPU_ID, &reg);
- TEST_ASSERT(eval_reg == value, "value == %s", value);
+ TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
}
static void assert_noirq(void)
@@ -87,6 +100,31 @@ static void assert_clear(void)
vcpu_fpu_get(vm, VCPU_ID, &fpu);
TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0");
+
+ /* sync regs */
+ TEST_ASSERT(!memcmp(sync_regs->gprs, regs_null, sizeof(sync_regs->gprs)),
+ "gprs0-15 == 0 (sync_regs)");
+
+ TEST_ASSERT(!memcmp(sync_regs->acrs, regs_null, sizeof(sync_regs->acrs)),
+ "acrs0-15 == 0 (sync_regs)");
+
+ TEST_ASSERT(!memcmp(sync_regs->vrs, regs_null, sizeof(sync_regs->vrs)),
+ "vrs0-15 == 0 (sync_regs)");
+}
+
+static void assert_initial_noclear(void)
+{
+ TEST_ASSERT(sync_regs->gprs[0] == 0xffff000000000000UL,
+ "gpr0 == 0xffff000000000000 (sync_regs)");
+ TEST_ASSERT(sync_regs->gprs[1] == 0x0000555500000000UL,
+ "gpr1 == 0x0000555500000000 (sync_regs)");
+ TEST_ASSERT(sync_regs->gprs[2] == 0x00000000aaaa0000UL,
+ "gpr2 == 0x00000000aaaa0000 (sync_regs)");
+ TEST_ASSERT(sync_regs->gprs[3] == 0x0000000000000000UL,
+ "gpr3 == 0x0000000000000000 (sync_regs)");
+ TEST_ASSERT(sync_regs->fprs[0] == 0x3ff0000000000000UL,
+ "fpr0 == 0f1 (sync_regs)");
+ TEST_ASSERT(sync_regs->acrs[9] == 1, "ar9 == 1 (sync_regs)");
}
static void assert_initial(void)
@@ -94,12 +132,32 @@ static void assert_initial(void)
struct kvm_sregs sregs;
struct kvm_fpu fpu;
+ /* KVM_GET_SREGS */
vcpu_sregs_get(vm, VCPU_ID, &sregs);
- TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0");
- TEST_ASSERT(sregs.crs[14] == 0xC2000000UL, "cr14 == 0xC2000000");
+ TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)");
+ TEST_ASSERT(sregs.crs[14] == 0xC2000000UL,
+ "cr14 == 0xC2000000 (KVM_GET_SREGS)");
TEST_ASSERT(!memcmp(&sregs.crs[1], regs_null, sizeof(sregs.crs[1]) * 12),
- "cr1-13 == 0");
- TEST_ASSERT(sregs.crs[15] == 0, "cr15 == 0");
+ "cr1-13 == 0 (KVM_GET_SREGS)");
+ TEST_ASSERT(sregs.crs[15] == 0, "cr15 == 0 (KVM_GET_SREGS)");
+
+ /* sync regs */
+ TEST_ASSERT(sync_regs->crs[0] == 0xE0UL, "cr0 == 0xE0 (sync_regs)");
+ TEST_ASSERT(sync_regs->crs[14] == 0xC2000000UL,
+ "cr14 == 0xC2000000 (sync_regs)");
+ TEST_ASSERT(!memcmp(&sync_regs->crs[1], regs_null, 8 * 12),
+ "cr1-13 == 0 (sync_regs)");
+ TEST_ASSERT(sync_regs->crs[15] == 0, "cr15 == 0 (sync_regs)");
+ TEST_ASSERT(sync_regs->fpc == 0, "fpc == 0 (sync_regs)");
+ TEST_ASSERT(sync_regs->todpr == 0, "todpr == 0 (sync_regs)");
+ TEST_ASSERT(sync_regs->cputm == 0, "cputm == 0 (sync_regs)");
+ TEST_ASSERT(sync_regs->ckc == 0, "ckc == 0 (sync_regs)");
+ TEST_ASSERT(sync_regs->pp == 0, "pp == 0 (sync_regs)");
+ TEST_ASSERT(sync_regs->gbea == 1, "gbea == 1 (sync_regs)");
+
+ /* kvm_run */
+ TEST_ASSERT(run->psw_addr == 0, "psw_addr == 0 (kvm_run)");
+ TEST_ASSERT(run->psw_mask == 0, "psw_mask == 0 (kvm_run)");
vcpu_fpu_get(vm, VCPU_ID, &fpu);
TEST_ASSERT(!fpu.fpc, "fpc == 0");
@@ -111,9 +169,19 @@ static void assert_initial(void)
test_one_reg(KVM_REG_S390_CLOCK_COMP, 0);
}
+static void assert_normal_noclear(void)
+{
+ TEST_ASSERT(sync_regs->crs[2] == 0x10, "cr2 == 10 (sync_regs)");
+ TEST_ASSERT(sync_regs->crs[8] == 1, "cr10 == 1 (sync_regs)");
+ TEST_ASSERT(sync_regs->crs[10] == 1, "cr10 == 1 (sync_regs)");
+ TEST_ASSERT(sync_regs->crs[11] == -1, "cr11 == -1 (sync_regs)");
+}
+
static void assert_normal(void)
{
test_one_reg(KVM_REG_S390_PFTOKEN, KVM_S390_PFAULT_TOKEN_INVALID);
+ TEST_ASSERT(sync_regs->pft == KVM_S390_PFAULT_TOKEN_INVALID,
+ "pft == 0xff..... (sync_regs)");
assert_noirq();
}
@@ -134,53 +202,67 @@ static void inject_irq(int cpu_id)
static void test_normal(void)
{
- printf("Testing normal reset\n");
+ pr_info("Testing normal reset\n");
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
run = vcpu_state(vm, VCPU_ID);
- regs = &run->s.regs;
+ sync_regs = &run->s.regs;
vcpu_run(vm, VCPU_ID);
inject_irq(VCPU_ID);
vcpu_ioctl(vm, VCPU_ID, KVM_S390_NORMAL_RESET, 0);
+
+ /* must clears */
assert_normal();
+ /* must not clears */
+ assert_normal_noclear();
+ assert_initial_noclear();
+
kvm_vm_free(vm);
}
static void test_initial(void)
{
- printf("Testing initial reset\n");
+ pr_info("Testing initial reset\n");
vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
run = vcpu_state(vm, VCPU_ID);
- regs = &run->s.regs;
+ sync_regs = &run->s.regs;
vcpu_run(vm, VCPU_ID);
inject_irq(VCPU_ID);
vcpu_ioctl(vm, VCPU_ID, KVM_S390_INITIAL_RESET, 0);
+
+ /* must clears */
assert_normal();
assert_initial();
+ /* must not clears */
+ assert_initial_noclear();
+
kvm_vm_free(vm);
}
static void test_clear(void)
{
- printf("Testing clear reset\n");
+ pr_info("Testing clear reset\n");
vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
run = vcpu_state(vm, VCPU_ID);
- regs = &run->s.regs;
+ sync_regs = &run->s.regs;
vcpu_run(vm, VCPU_ID);
inject_irq(VCPU_ID);
vcpu_ioctl(vm, VCPU_ID, KVM_S390_CLEAR_RESET, 0);
+
+ /* must clears */
assert_normal();
assert_initial();
assert_clear();
+
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/s390x/sync_regs_test.c b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
index b705637ca14b..5731ccf34917 100644
--- a/tools/testing/selftests/kvm/s390x/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
@@ -42,6 +42,13 @@ static void guest_code(void)
" values did not match: 0x%llx, 0x%llx\n", \
left->reg, right->reg)
+#define REG_COMPARE32(reg) \
+ TEST_ASSERT(left->reg == right->reg, \
+ "Register " #reg \
+ " values did not match: 0x%x, 0x%x\n", \
+ left->reg, right->reg)
+
+
static void compare_regs(struct kvm_regs *left, struct kvm_sync_regs *right)
{
int i;
@@ -55,7 +62,7 @@ static void compare_sregs(struct kvm_sregs *left, struct kvm_sync_regs *right)
int i;
for (i = 0; i < 16; i++)
- REG_COMPARE(acrs[i]);
+ REG_COMPARE32(acrs[i]);
for (i = 0; i < 16; i++)
REG_COMPARE(crs[i]);
@@ -79,7 +86,7 @@ int main(int argc, char *argv[])
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
if (!cap) {
- fprintf(stderr, "CAP_SYNC_REGS not supported, skipping test\n");
+ print_skip("CAP_SYNC_REGS not supported");
exit(KSFT_SKIP);
}
@@ -155,7 +162,7 @@ int main(int argc, char *argv[])
"r11 sync regs value incorrect 0x%llx.",
run->s.regs.gprs[11]);
TEST_ASSERT(run->s.regs.acrs[0] == 1 << 11,
- "acr0 sync regs value incorrect 0x%llx.",
+ "acr0 sync regs value incorrect 0x%x.",
run->s.regs.acrs[0]);
vcpu_regs_get(vm, VCPU_ID, &regs);
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
new file mode 100644
index 000000000000..fcc840088c91
--- /dev/null
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * steal/stolen time test
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <time.h>
+#include <sched.h>
+#include <pthread.h>
+#include <linux/kernel.h>
+#include <sys/syscall.h>
+#include <asm/kvm.h>
+#include <asm/kvm_para.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#define NR_VCPUS 4
+#define ST_GPA_BASE (1 << 30)
+#define MIN_RUN_DELAY_NS 200000UL
+
+static void *st_gva[NR_VCPUS];
+static uint64_t guest_stolen_time[NR_VCPUS];
+
+#if defined(__x86_64__)
+
+/* steal_time must have 64-byte alignment */
+#define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63)
+
+static void check_status(struct kvm_steal_time *st)
+{
+ GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
+ GUEST_ASSERT(READ_ONCE(st->flags) == 0);
+ GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
+}
+
+static void guest_code(int cpu)
+{
+ struct kvm_steal_time *st = st_gva[cpu];
+ uint32_t version;
+
+ GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
+
+ memset(st, 0, sizeof(*st));
+ GUEST_SYNC(0);
+
+ check_status(st);
+ WRITE_ONCE(guest_stolen_time[cpu], st->steal);
+ version = READ_ONCE(st->version);
+ check_status(st);
+ GUEST_SYNC(1);
+
+ check_status(st);
+ GUEST_ASSERT(version < READ_ONCE(st->version));
+ WRITE_ONCE(guest_stolen_time[cpu], st->steal);
+ check_status(st);
+ GUEST_DONE();
+}
+
+static void steal_time_init(struct kvm_vm *vm)
+{
+ int i;
+
+ if (!(kvm_get_supported_cpuid_entry(KVM_CPUID_FEATURES)->eax &
+ KVM_FEATURE_STEAL_TIME)) {
+ print_skip("steal-time not supported");
+ exit(KSFT_SKIP);
+ }
+
+ for (i = 0; i < NR_VCPUS; ++i) {
+ int ret;
+
+ vcpu_set_cpuid(vm, i, kvm_get_supported_cpuid());
+
+ /* ST_GPA_BASE is identity mapped */
+ st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
+ sync_global_to_guest(vm, st_gva[i]);
+
+ ret = _vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
+ TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
+
+ vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
+ }
+}
+
+static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]);
+ int i;
+
+ pr_info("VCPU%d:\n", vcpuid);
+ pr_info(" steal: %lld\n", st->steal);
+ pr_info(" version: %d\n", st->version);
+ pr_info(" flags: %d\n", st->flags);
+ pr_info(" preempted: %d\n", st->preempted);
+ pr_info(" u8_pad: ");
+ for (i = 0; i < 3; ++i)
+ pr_info("%d", st->u8_pad[i]);
+ pr_info("\n pad: ");
+ for (i = 0; i < 11; ++i)
+ pr_info("%d", st->pad[i]);
+ pr_info("\n");
+}
+
+#elif defined(__aarch64__)
+
+/* PV_TIME_ST must have 64-byte alignment */
+#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
+
+#define SMCCC_ARCH_FEATURES 0x80000001
+#define PV_TIME_FEATURES 0xc5000020
+#define PV_TIME_ST 0xc5000021
+
+struct st_time {
+ uint32_t rev;
+ uint32_t attr;
+ uint64_t st_time;
+};
+
+static int64_t smccc(uint32_t func, uint32_t arg)
+{
+ unsigned long ret;
+
+ asm volatile(
+ "mov x0, %1\n"
+ "mov x1, %2\n"
+ "hvc #0\n"
+ "mov %0, x0\n"
+ : "=r" (ret) : "r" (func), "r" (arg) :
+ "x0", "x1", "x2", "x3");
+
+ return ret;
+}
+
+static void check_status(struct st_time *st)
+{
+ GUEST_ASSERT(READ_ONCE(st->rev) == 0);
+ GUEST_ASSERT(READ_ONCE(st->attr) == 0);
+}
+
+static void guest_code(int cpu)
+{
+ struct st_time *st;
+ int64_t status;
+
+ status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
+ GUEST_ASSERT(status == 0);
+ status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
+ GUEST_ASSERT(status == 0);
+ status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
+ GUEST_ASSERT(status == 0);
+
+ status = smccc(PV_TIME_ST, 0);
+ GUEST_ASSERT(status != -1);
+ GUEST_ASSERT(status == (ulong)st_gva[cpu]);
+
+ st = (struct st_time *)status;
+ GUEST_SYNC(0);
+
+ check_status(st);
+ WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
+ GUEST_SYNC(1);
+
+ check_status(st);
+ WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
+ GUEST_DONE();
+}
+
+static void steal_time_init(struct kvm_vm *vm)
+{
+ struct kvm_device_attr dev = {
+ .group = KVM_ARM_VCPU_PVTIME_CTRL,
+ .attr = KVM_ARM_VCPU_PVTIME_IPA,
+ };
+ int i, ret;
+
+ ret = _vcpu_ioctl(vm, 0, KVM_HAS_DEVICE_ATTR, &dev);
+ if (ret != 0 && errno == ENXIO) {
+ print_skip("steal-time not supported");
+ exit(KSFT_SKIP);
+ }
+
+ for (i = 0; i < NR_VCPUS; ++i) {
+ uint64_t st_ipa;
+
+ vcpu_ioctl(vm, i, KVM_HAS_DEVICE_ATTR, &dev);
+
+ dev.addr = (uint64_t)&st_ipa;
+
+ /* ST_GPA_BASE is identity mapped */
+ st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
+ sync_global_to_guest(vm, st_gva[i]);
+
+ st_ipa = (ulong)st_gva[i] | 1;
+ ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
+ TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
+
+ st_ipa = (ulong)st_gva[i];
+ vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
+
+ ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
+ TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
+
+ }
+}
+
+static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]);
+
+ pr_info("VCPU%d:\n", vcpuid);
+ pr_info(" rev: %d\n", st->rev);
+ pr_info(" attr: %d\n", st->attr);
+ pr_info(" st_time: %ld\n", st->st_time);
+}
+
+#endif
+
+static long get_run_delay(void)
+{
+ char path[64];
+ long val[2];
+ FILE *fp;
+
+ sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
+ fp = fopen(path, "r");
+ fscanf(fp, "%ld %ld ", &val[0], &val[1]);
+ fclose(fp);
+
+ return val[1];
+}
+
+static void *do_steal_time(void *arg)
+{
+ struct timespec ts, stop;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
+
+ while (1) {
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
+ break;
+ }
+
+ return NULL;
+}
+
+static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct ucall uc;
+
+ vcpu_args_set(vm, vcpuid, 1, vcpuid);
+
+ vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
+
+ switch (get_ucall(vm, vcpuid, &uc)) {
+ case UCALL_SYNC:
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s at %s:%ld", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
+ default:
+ TEST_ASSERT(false, "Unexpected exit: %s",
+ exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
+ }
+}
+
+int main(int ac, char **av)
+{
+ struct kvm_vm *vm;
+ pthread_attr_t attr;
+ pthread_t thread;
+ cpu_set_t cpuset;
+ unsigned int gpages;
+ long stolen_time;
+ long run_delay;
+ bool verbose;
+ int i;
+
+ verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
+
+ /* Set CPU affinity so we can force preemption of the VCPU */
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ pthread_attr_init(&attr);
+ pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
+
+ /* Create a one VCPU guest and an identity mapped memslot for the steal time structure */
+ vm = vm_create_default(0, 0, guest_code);
+ gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
+ virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages, 0);
+ ucall_init(vm, NULL);
+
+ /* Add the rest of the VCPUs */
+ for (i = 1; i < NR_VCPUS; ++i)
+ vm_vcpu_add_default(vm, i, guest_code);
+
+ steal_time_init(vm);
+
+ /* Run test on each VCPU */
+ for (i = 0; i < NR_VCPUS; ++i) {
+ /* First VCPU run initializes steal-time */
+ run_vcpu(vm, i);
+
+ /* Second VCPU run, expect guest stolen time to be <= run_delay */
+ run_vcpu(vm, i);
+ sync_global_from_guest(vm, guest_stolen_time[i]);
+ stolen_time = guest_stolen_time[i];
+ run_delay = get_run_delay();
+ TEST_ASSERT(stolen_time <= run_delay,
+ "Expected stolen time <= %ld, got %ld",
+ run_delay, stolen_time);
+
+ /* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
+ run_delay = get_run_delay();
+ pthread_create(&thread, &attr, do_steal_time, NULL);
+ do
+ pthread_yield();
+ while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
+ pthread_join(thread, NULL);
+ run_delay = get_run_delay() - run_delay;
+ TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
+ "Expected run_delay >= %ld, got %ld",
+ MIN_RUN_DELAY_NS, run_delay);
+
+ /* Run VCPU again to confirm stolen time is consistent with run_delay */
+ run_vcpu(vm, i);
+ sync_global_from_guest(vm, guest_stolen_time[i]);
+ stolen_time = guest_stolen_time[i] - stolen_time;
+ TEST_ASSERT(stolen_time >= run_delay,
+ "Expected stolen time >= %ld, got %ld",
+ run_delay, stolen_time);
+
+ if (verbose) {
+ pr_info("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld", i,
+ guest_stolen_time[i], stolen_time);
+ if (stolen_time == run_delay)
+ pr_info(" (BONUS: guest test-stolen-time even exactly matches test-run_delay)");
+ pr_info("\n");
+ steal_time_dump(vm, i);
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index 63cc9c3f5ab6..140e91901582 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -72,7 +72,7 @@ int main(int argc, char *argv[])
entry = kvm_get_supported_cpuid_entry(1);
if (!(entry->ecx & X86_FEATURE_XSAVE)) {
- printf("XSAVE feature not supported, skipping test\n");
+ print_skip("XSAVE feature not supported");
return 0;
}
@@ -101,12 +101,12 @@ int main(int argc, char *argv[])
vcpu_sregs_set(vm, VCPU_ID, &sregs);
break;
case UCALL_ABORT:
- TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
+ TEST_FAIL("Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
break;
case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index 92915e6408e7..e6e62e5e75b2 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -21,10 +21,10 @@
void l2_guest_code(void)
{
- GUEST_SYNC(6);
-
GUEST_SYNC(7);
+ GUEST_SYNC(8);
+
/* Done, exit to L1 and never come back. */
vmcall();
}
@@ -50,12 +50,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_SYNC(5);
GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ current_evmcs->revision_id = -1u;
+ GUEST_ASSERT(vmlaunch());
+ current_evmcs->revision_id = EVMCS_VERSION;
+ GUEST_SYNC(6);
+
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
- GUEST_SYNC(8);
+ GUEST_SYNC(9);
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
- GUEST_SYNC(9);
+ GUEST_SYNC(10);
}
void guest_code(struct vmx_pages *vmx_pages)
@@ -67,6 +72,10 @@ void guest_code(struct vmx_pages *vmx_pages)
l1_guest_code(vmx_pages);
GUEST_DONE();
+
+ /* Try enlightened vmptrld with an incorrect GPA */
+ evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
+ GUEST_ASSERT(vmlaunch());
}
int main(int argc, char *argv[])
@@ -87,7 +96,7 @@ int main(int argc, char *argv[])
if (!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
- printf("capabilities not available, skipping test\n");
+ print_skip("capabilities not available");
exit(KSFT_SKIP);
}
@@ -109,20 +118,20 @@ int main(int argc, char *argv[])
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
- TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
- goto done;
+ goto part1_done;
default:
- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
/* UCALL_SYNC is handled here. */
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
- uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
+ uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, VCPU_ID);
@@ -147,6 +156,10 @@ int main(int argc, char *argv[])
(ulong) regs2.rdi, (ulong) regs2.rsi);
}
-done:
+part1_done:
+ _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Unexpected successful VMEnter with invalid eVMCS pointer!");
+
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index 443a2b54645b..83323f3d7ca0 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -66,7 +66,7 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
TEST_ASSERT((entry->function >= 0x40000000) &&
(entry->function <= 0x4000000A),
- "function %lx is our of supported range",
+ "function %x is our of supported range",
entry->function);
TEST_ASSERT(entry->index == 0,
@@ -141,8 +141,7 @@ int main(int argc, char *argv[])
rv = kvm_check_cap(KVM_CAP_HYPERV_CPUID);
if (!rv) {
- fprintf(stderr,
- "KVM_CAP_HYPERV_CPUID not supported, skip test\n");
+ print_skip("KVM_CAP_HYPERV_CPUID not supported");
exit(KSFT_SKIP);
}
@@ -160,8 +159,7 @@ int main(int argc, char *argv[])
free(hv_cpuid_entries);
if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
- fprintf(stderr,
- "Enlightened VMCS is unsupported, skip related test\n");
+ print_skip("Enlightened VMCS is unsupported");
goto vm_free;
}
diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
index 00bb97d76000..e6480fd5c4bd 100644
--- a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
+++ b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
@@ -44,7 +44,7 @@ void *thr(void *arg)
struct kvm_run *run = tc->run;
res = ioctl(kvmcpu, KVM_RUN, 0);
- printf("ret1=%d exit_reason=%d suberror=%d\n",
+ pr_info("ret1=%d exit_reason=%d suberror=%d\n",
res, run->exit_reason, run->internal.suberror);
return 0;
@@ -93,12 +93,12 @@ int main(void)
int warnings_before, warnings_after;
if (!is_intel_cpu()) {
- printf("Must be run on an Intel CPU, skipping test\n");
+ print_skip("Must be run on an Intel CPU");
exit(KSFT_SKIP);
}
if (vm_is_unrestricted_guest(NULL)) {
- printf("Unrestricted guest must be disabled, skipping test\n");
+ print_skip("Unrestricted guest must be disabled");
exit(KSFT_SKIP);
}
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index f9334bd3cce9..1e89688cbbbf 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -58,8 +58,7 @@ static void test_msr_platform_info_enabled(struct kvm_vm *vm)
exit_reason_str(run->exit_reason));
get_ucall(vm, VCPU_ID, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
- "Received ucall other than UCALL_SYNC: %u\n",
- ucall);
+ "Received ucall other than UCALL_SYNC: %lu\n", uc.cmd);
TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
"Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
@@ -89,8 +88,7 @@ int main(int argc, char *argv[])
rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
if (!rv) {
- fprintf(stderr,
- "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n");
+ print_skip("KVM_CAP_MSR_PLATFORM_INFO not supported");
exit(KSFT_SKIP);
}
diff --git a/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c b/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
new file mode 100644
index 000000000000..c6691cff4e19
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include <linux/compiler.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#define VCPU_ID 0
+
+/*
+ * Somewhat arbitrary location and slot, intended to not overlap anything. The
+ * location and size are specifically 2mb sized/aligned so that the initial
+ * region corresponds to exactly one large page.
+ */
+#define MEM_REGION_GPA 0xc0000000
+#define MEM_REGION_SIZE 0x200000
+#define MEM_REGION_SLOT 10
+
+static void guest_code(void)
+{
+ uint64_t val;
+
+ do {
+ val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
+ } while (!val);
+
+ if (val != 1)
+ ucall(UCALL_ABORT, 1, val);
+
+ GUEST_DONE();
+}
+
+static void *vcpu_worker(void *data)
+{
+ struct kvm_vm *vm = data;
+ struct kvm_run *run;
+ struct ucall uc;
+ uint64_t cmd;
+
+ /*
+ * Loop until the guest is done. Re-enter the guest on all MMIO exits,
+ * which will occur if the guest attempts to access a memslot while it
+ * is being moved.
+ */
+ run = vcpu_state(vm, VCPU_ID);
+ do {
+ vcpu_run(vm, VCPU_ID);
+ } while (run->exit_reason == KVM_EXIT_MMIO);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason = %d", run->exit_reason);
+
+ cmd = get_ucall(vm, VCPU_ID, &uc);
+ TEST_ASSERT(cmd == UCALL_DONE, "Unexpected val in guest = %lu", uc.args[0]);
+ return NULL;
+}
+
+static void test_move_memory_region(void)
+{
+ pthread_t vcpu_thread;
+ struct kvm_vm *vm;
+ uint64_t *hva;
+ uint64_t gpa;
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
+ MEM_REGION_GPA, MEM_REGION_SLOT,
+ MEM_REGION_SIZE / getpagesize(), 0);
+
+ /*
+ * Allocate and map two pages so that the GPA accessed by guest_code()
+ * stays valid across the memslot move.
+ */
+ gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
+ TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
+
+ virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0);
+
+ /* Ditto for the host mapping so that both pages can be zeroed. */
+ hva = addr_gpa2hva(vm, MEM_REGION_GPA);
+ memset(hva, 0, 2 * 4096);
+
+ pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
+
+ /* Ensure the guest thread is spun up. */
+ usleep(100000);
+
+ /*
+ * Shift the region's base GPA. The guest should not see "2" as the
+ * hva->gpa translation is misaligned, i.e. the guest is accessing a
+ * different host pfn.
+ */
+ vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
+ WRITE_ONCE(*hva, 2);
+
+ usleep(100000);
+
+ /*
+ * Note, value in memory needs to be changed *before* restoring the
+ * memslot, else the guest could race the update and see "2".
+ */
+ WRITE_ONCE(*hva, 1);
+
+ /* Restore the original base, the guest should see "1". */
+ vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
+
+ pthread_join(vcpu_thread, NULL);
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ int i, loops;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ if (argc > 1)
+ loops = atoi(argv[1]);
+ else
+ loops = 10;
+
+ for (i = 0; i < loops; i++)
+ test_move_memory_region();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index 8c063646f2a0..8230b6bc6b8f 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -117,7 +117,7 @@ int main(int argc, char *argv[])
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
} else {
- printf("will skip SMM test with VMX enabled\n");
+ pr_info("will skip SMM test with VMX enabled\n");
vcpu_args_set(vm, VCPU_ID, 1, 0);
}
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 3ab5ec3da9f4..5b1a016edf55 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -139,7 +139,7 @@ int main(int argc, char *argv[])
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
} else {
- printf("will skip nested state checks\n");
+ pr_info("will skip nested state checks\n");
vcpu_args_set(vm, VCPU_ID, 1, 0);
}
@@ -152,20 +152,20 @@ int main(int argc, char *argv[])
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
- TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
/* UCALL_SYNC is handled here. */
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
- uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
+ uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, VCPU_ID);
diff --git a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
index e280f68f6365..0e1adb4e3199 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
@@ -61,16 +61,14 @@ int main(int argc, char *argv[])
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
- TEST_ASSERT(false, "%s",
- (const char *)uc.args[0]);
+ TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */
case UCALL_SYNC:
break;
case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false,
- "Unknown ucall 0x%x.", uc.cmd);
+ TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
}
}
done:
diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index 5c8224256294..d672f0a473f8 100644
--- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -91,11 +91,11 @@ int main(int argc, char *argv[])
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
- fprintf(stderr, "KVM_CAP_SYNC_REGS not supported, skipping test\n");
+ print_skip("KVM_CAP_SYNC_REGS not supported");
exit(KSFT_SKIP);
}
if ((cap & INVALID_SYNC_FIELD) != 0) {
- fprintf(stderr, "The \"invalid\" field is not invalid, skipping test\n");
+ print_skip("The \"invalid\" field is not invalid");
exit(KSFT_SKIP);
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
index 5dfb53546a26..fe40ade06a49 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
@@ -78,10 +78,10 @@ int main(int argc, char *argv[])
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
- TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
+ TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */
default:
- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index a223a6401258..e894a638a155 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -21,7 +21,7 @@
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
-#define TEST_MEM_SIZE 3
+#define TEST_MEM_PAGES 3
/* L1 guest test virtual memory offset */
#define GUEST_TEST_MEM 0xc0000000
@@ -91,15 +91,14 @@ int main(int argc, char *argv[])
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
GUEST_TEST_MEM,
TEST_MEM_SLOT_INDEX,
- TEST_MEM_SIZE,
+ TEST_MEM_PAGES,
KVM_MEM_LOG_DIRTY_PAGES);
/*
* Add an identity map for GVA range [0xc0000000, 0xc0002000). This
* affects both L1 and L2. However...
*/
- virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM,
- TEST_MEM_SIZE * 4096, 0);
+ virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES, 0);
/*
* ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
@@ -113,11 +112,11 @@ int main(int argc, char *argv[])
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
- bmap = bitmap_alloc(TEST_MEM_SIZE);
+ bmap = bitmap_alloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) {
- memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096);
+ memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
@@ -126,8 +125,8 @@ int main(int argc, char *argv[])
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
- TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
/* NOT REACHED */
case UCALL_SYNC:
/*
@@ -152,7 +151,7 @@ int main(int argc, char *argv[])
done = true;
break;
default:
- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
index 9ef7fab39d48..54cdefdfb49d 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -212,7 +212,7 @@ void test_vmx_nested_state(struct kvm_vm *vm)
test_nested_state(vm, state);
vcpu_nested_state_get(vm, VCPU_ID, state);
TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
- "Size must be between %d and %d. The size returned was %d.",
+ "Size must be between %ld and %d. The size returned was %d.",
sizeof(*state), state_sz, state->size);
TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
@@ -228,7 +228,7 @@ int main(int argc, char *argv[])
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
+ print_skip("KVM_CAP_NESTED_STATE not available");
exit(KSFT_SKIP);
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 69e482a95c47..fbe8417cbc2c 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -121,8 +121,8 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
static void report(int64_t val)
{
- printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
- val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
+ pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
+ val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
}
int main(int argc, char *argv[])
@@ -150,7 +150,7 @@ int main(int argc, char *argv[])
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
- TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
+ TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */
case UCALL_SYNC:
report(uc.args[1]);
@@ -158,7 +158,7 @@ int main(int argc, char *argv[])
case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
index 851ea81b9d9f..3529376747c2 100644
--- a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
@@ -51,7 +51,7 @@ int main(int argc, char *argv[])
xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES);
}
if (!xss_supported) {
- printf("IA32_XSS is not supported by the vCPU.\n");
+ print_skip("IA32_XSS is not supported by the vCPU");
exit(KSFT_SKIP);
}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 3ed0134a764d..b0556c752443 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -137,7 +137,8 @@ endif
# Selftest makefiles can override those targets by setting
# OVERRIDE_TARGETS = 1.
ifeq ($(OVERRIDE_TARGETS),)
-$(OUTPUT)/%:%.c
+LOCAL_HDRS := $(selfdir)/kselftest_harness.h $(selfdir)/kselftest.h
+$(OUTPUT)/%:%.c $(LOCAL_HDRS)
$(LINK.c) $^ $(LDLIBS) -o $@
$(OUTPUT)/%.o:%.S
diff --git a/tools/testing/selftests/media_tests/.gitignore b/tools/testing/selftests/media_tests/.gitignore
index 8745eba39012..da438e780ffe 100644
--- a/tools/testing/selftests/media_tests/.gitignore
+++ b/tools/testing/selftests/media_tests/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
media_device_test
media_device_open
video_device_test
diff --git a/tools/testing/selftests/membarrier/.gitignore b/tools/testing/selftests/membarrier/.gitignore
index f2f7ec0a99b4..f2fbba178601 100644
--- a/tools/testing/selftests/membarrier/.gitignore
+++ b/tools/testing/selftests/membarrier/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
membarrier_test_multi_thread
membarrier_test_single_thread
diff --git a/tools/testing/selftests/memfd/.gitignore b/tools/testing/selftests/memfd/.gitignore
index afe87c40ac80..dd9a051f608e 100644
--- a/tools/testing/selftests/memfd/.gitignore
+++ b/tools/testing/selftests/memfd/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
fuse_mnt
fuse_test
memfd_test
diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile
index 53a848109f7b..0a15f9e23431 100644
--- a/tools/testing/selftests/memfd/Makefile
+++ b/tools/testing/selftests/memfd/Makefile
@@ -4,9 +4,8 @@ CFLAGS += -I../../../../include/uapi/
CFLAGS += -I../../../../include/
CFLAGS += -I../../../../usr/include/
-TEST_GEN_PROGS := memfd_test
+TEST_GEN_PROGS := memfd_test fuse_test fuse_mnt
TEST_PROGS := run_fuse_test.sh run_hugetlbfs_test.sh
-TEST_GEN_FILES := fuse_mnt fuse_test
fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
@@ -14,7 +13,7 @@ include ../lib.mk
$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs)
-$(OUTPUT)/memfd_test: memfd_test.c common.o
-$(OUTPUT)/fuse_test: fuse_test.c common.o
+$(OUTPUT)/memfd_test: memfd_test.c common.c
+$(OUTPUT)/fuse_test: fuse_test.c common.c
-EXTRA_CLEAN = common.o
+EXTRA_CLEAN = $(OUTPUT)/common.o
diff --git a/tools/testing/selftests/mount/.gitignore b/tools/testing/selftests/mount/.gitignore
index 856ad4107eb3..0bc64a6d4c18 100644
--- a/tools/testing/selftests/mount/.gitignore
+++ b/tools/testing/selftests/mount/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
unprivileged-remount-test
diff --git a/tools/testing/selftests/mqueue/.gitignore b/tools/testing/selftests/mqueue/.gitignore
index d8d42377205a..72ad8ca691c9 100644
--- a/tools/testing/selftests/mqueue/.gitignore
+++ b/tools/testing/selftests/mqueue/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
mq_open_tests
mq_perf_tests
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 997c65dcad68..742c499328b2 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
msg_zerocopy
socket
psock_fanout
diff --git a/tools/testing/selftests/net/forwarding/.gitignore b/tools/testing/selftests/net/forwarding/.gitignore
index a793eef5b876..2dea317f12e7 100644
--- a/tools/testing/selftests/net/forwarding/.gitignore
+++ b/tools/testing/selftests/net/forwarding/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
forwarding.config
diff --git a/tools/testing/selftests/net/mptcp/.gitignore b/tools/testing/selftests/net/mptcp/.gitignore
index ea13b255a99d..260336d5f0b1 100644
--- a/tools/testing/selftests/net/mptcp/.gitignore
+++ b/tools/testing/selftests/net/mptcp/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
mptcp_connect
pm_nl_ctl
*.pcap
diff --git a/tools/testing/selftests/nsfs/.gitignore b/tools/testing/selftests/nsfs/.gitignore
index 2ab2c824ce86..ed79ebdf286e 100644
--- a/tools/testing/selftests/nsfs/.gitignore
+++ b/tools/testing/selftests/nsfs/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
owner
pidns
diff --git a/tools/testing/selftests/openat2/.gitignore b/tools/testing/selftests/openat2/.gitignore
index bd68f6c3fd07..82a4846cbc4b 100644
--- a/tools/testing/selftests/openat2/.gitignore
+++ b/tools/testing/selftests/openat2/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
/*_test
diff --git a/tools/testing/selftests/pid_namespace/.gitignore b/tools/testing/selftests/pid_namespace/.gitignore
new file mode 100644
index 000000000000..93ab9d7e5b7e
--- /dev/null
+++ b/tools/testing/selftests/pid_namespace/.gitignore
@@ -0,0 +1 @@
+regression_enomem
diff --git a/tools/testing/selftests/pid_namespace/Makefile b/tools/testing/selftests/pid_namespace/Makefile
new file mode 100644
index 000000000000..dcaefa224ca0
--- /dev/null
+++ b/tools/testing/selftests/pid_namespace/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -g -I../../../../usr/include/
+
+TEST_GEN_PROGS := regression_enomem
+
+include ../lib.mk
+
+$(OUTPUT)/regression_enomem: regression_enomem.c ../pidfd/pidfd.h
diff --git a/tools/testing/selftests/pid_namespace/config b/tools/testing/selftests/pid_namespace/config
new file mode 100644
index 000000000000..26cdb27e7dbb
--- /dev/null
+++ b/tools/testing/selftests/pid_namespace/config
@@ -0,0 +1,2 @@
+CONFIG_PID_NS=y
+CONFIG_USER_NS=y
diff --git a/tools/testing/selftests/pid_namespace/regression_enomem.c b/tools/testing/selftests/pid_namespace/regression_enomem.c
new file mode 100644
index 000000000000..73d532556d17
--- /dev/null
+++ b/tools/testing/selftests/pid_namespace/regression_enomem.c
@@ -0,0 +1,45 @@
+#define _GNU_SOURCE
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/types.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/wait.h>
+
+#include "../kselftest.h"
+#include "../kselftest_harness.h"
+#include "../pidfd/pidfd.h"
+
+/*
+ * Regression test for:
+ * 35f71bc0a09a ("fork: report pid reservation failure properly")
+ * b26ebfe12f34 ("pid: Fix error return value in some cases")
+ */
+TEST(regression_enomem)
+{
+ pid_t pid;
+
+ if (geteuid())
+ EXPECT_EQ(0, unshare(CLONE_NEWUSER));
+
+ EXPECT_EQ(0, unshare(CLONE_NEWPID));
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+
+ EXPECT_EQ(0, wait_for_pid(pid));
+
+ pid = fork();
+ ASSERT_LT(pid, 0);
+ ASSERT_EQ(errno, ENOMEM);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
index 39559d723c41..2d4db5afb142 100644
--- a/tools/testing/selftests/pidfd/.gitignore
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
pidfd_open_test
pidfd_poll_test
pidfd_test
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index d482515604db..c1921a53dbed 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -13,6 +13,8 @@
#include <string.h>
#include <syscall.h>
#include <sys/mount.h>
+#include <sys/types.h>
+#include <sys/wait.h>
#include "../kselftest.h"
diff --git a/tools/testing/selftests/powerpc/alignment/.gitignore b/tools/testing/selftests/powerpc/alignment/.gitignore
index 6d4fd014511c..28bc6ca13cc6 100644
--- a/tools/testing/selftests/powerpc/alignment/.gitignore
+++ b/tools/testing/selftests/powerpc/alignment/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
copy_first_unaligned
alignment_handler
diff --git a/tools/testing/selftests/powerpc/benchmarks/.gitignore b/tools/testing/selftests/powerpc/benchmarks/.gitignore
index 9161679b1e1a..c9ce13983c99 100644
--- a/tools/testing/selftests/powerpc/benchmarks/.gitignore
+++ b/tools/testing/selftests/powerpc/benchmarks/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
gettimeofday
context_switch
fork
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index d40300a65b42..a32a6ab89914 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -2,6 +2,8 @@
TEST_GEN_PROGS := gettimeofday context_switch fork mmap_bench futex_bench null_syscall
TEST_GEN_FILES := exec_target
+TEST_FILES := settings
+
CFLAGS += -O2
top_srcdir = ../../../../..
diff --git a/tools/testing/selftests/powerpc/benchmarks/settings b/tools/testing/selftests/powerpc/benchmarks/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/benchmarks/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/powerpc/cache_shape/.gitignore b/tools/testing/selftests/powerpc/cache_shape/.gitignore
index ec1848434be5..b385eee3012c 100644
--- a/tools/testing/selftests/powerpc/cache_shape/.gitignore
+++ b/tools/testing/selftests/powerpc/cache_shape/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
cache_shape
diff --git a/tools/testing/selftests/powerpc/copyloops/.gitignore b/tools/testing/selftests/powerpc/copyloops/.gitignore
index 12ef5b031974..ddaf140b8255 100644
--- a/tools/testing/selftests/powerpc/copyloops/.gitignore
+++ b/tools/testing/selftests/powerpc/copyloops/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
copyuser_64_t0
copyuser_64_t1
copyuser_64_t2
diff --git a/tools/testing/selftests/powerpc/dscr/.gitignore b/tools/testing/selftests/powerpc/dscr/.gitignore
index b585c6c1564a..1d08b15af697 100644
--- a/tools/testing/selftests/powerpc/dscr/.gitignore
+++ b/tools/testing/selftests/powerpc/dscr/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
dscr_default_test
dscr_explicit_test
dscr_inherit_exec_test
diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile
index 5df476364b4d..cfa6eedcb66c 100644
--- a/tools/testing/selftests/powerpc/dscr/Makefile
+++ b/tools/testing/selftests/powerpc/dscr/Makefile
@@ -3,6 +3,8 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \
dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \
dscr_sysfs_thread_test
+TEST_FILES := settings
+
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/dscr/settings b/tools/testing/selftests/powerpc/dscr/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/dscr/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/powerpc/math/.gitignore b/tools/testing/selftests/powerpc/math/.gitignore
index 50ded63e25b7..e31ca6f453ed 100644
--- a/tools/testing/selftests/powerpc/math/.gitignore
+++ b/tools/testing/selftests/powerpc/math/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
fpu_syscall
vmx_syscall
fpu_preempt
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index 0ebeaea22641..2ca523255b1b 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
hugetlb_vs_thp_test
subpage_prot
tempfile
@@ -6,3 +7,4 @@ segv_errors
wild_bctr
large_vm_fork_separation
bad_accesses
+tlbie_test
diff --git a/tools/testing/selftests/powerpc/pmu/.gitignore b/tools/testing/selftests/powerpc/pmu/.gitignore
index e748f336eed3..ff7896903d7b 100644
--- a/tools/testing/selftests/powerpc/pmu/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
count_instructions
l3_bank_test
per_event_excludes
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 42bddbed8b64..2920fb39439b 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
reg_access_test
event_attributes_test
cycles_test
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 417306353e07..ca35dd8848b0 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -7,6 +7,7 @@ noarg:
# The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64
+TMPOUT = $(OUTPUT)/
# Toolchains may build PIE by default which breaks the assembly
no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
diff --git a/tools/testing/selftests/powerpc/primitives/.gitignore b/tools/testing/selftests/powerpc/primitives/.gitignore
index 4cc4e31bed1d..1e5c04e24254 100644
--- a/tools/testing/selftests/powerpc/primitives/.gitignore
+++ b/tools/testing/selftests/powerpc/primitives/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
load_unaligned_zeropad
diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore
index dce19f221c46..0e96150b7c7e 100644
--- a/tools/testing/selftests/powerpc/ptrace/.gitignore
+++ b/tools/testing/selftests/powerpc/ptrace/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ptrace-gpr
ptrace-tm-gpr
ptrace-tm-spd-gpr
diff --git a/tools/testing/selftests/powerpc/security/.gitignore b/tools/testing/selftests/powerpc/security/.gitignore
index 0b969fba3beb..f795e06f5ae3 100644
--- a/tools/testing/selftests/powerpc/security/.gitignore
+++ b/tools/testing/selftests/powerpc/security/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
rfi_flush
diff --git a/tools/testing/selftests/powerpc/signal/.gitignore b/tools/testing/selftests/powerpc/signal/.gitignore
index dca5852a1546..405b5364044c 100644
--- a/tools/testing/selftests/powerpc/signal/.gitignore
+++ b/tools/testing/selftests/powerpc/signal/.gitignore
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
signal
signal_tm
sigfuz
+sigreturn_vdso
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
index 113838fbbe7f..932a032bf036 100644
--- a/tools/testing/selftests/powerpc/signal/Makefile
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := signal signal_tm sigfuz
+TEST_GEN_PROGS := signal signal_tm sigfuz sigreturn_vdso
CFLAGS += -maltivec
$(OUTPUT)/signal_tm: CFLAGS += -mhtm
$(OUTPUT)/sigfuz: CFLAGS += -pthread -m64
+TEST_FILES := settings
+
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/signal/settings b/tools/testing/selftests/powerpc/signal/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/powerpc/signal/sigreturn_vdso.c b/tools/testing/selftests/powerpc/signal/sigreturn_vdso.c
new file mode 100644
index 000000000000..e282fff0fe25
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/sigreturn_vdso.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test that we can take signals with and without the VDSO mapped, which trigger
+ * different paths in the signal handling code.
+ *
+ * See handle_rt_signal64() and setup_trampoline() in signal_64.c
+ */
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Ensure assert() is not compiled out
+#undef NDEBUG
+#include <assert.h>
+
+#include "utils.h"
+
+static int search_proc_maps(char *needle, unsigned long *low, unsigned long *high)
+{
+ unsigned long start, end;
+ static char buf[4096];
+ char name[128];
+ FILE *f;
+ int rc = -1;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!f) {
+ perror("fopen");
+ return -1;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ rc = sscanf(buf, "%lx-%lx %*c%*c%*c%*c %*x %*d:%*d %*d %127s\n",
+ &start, &end, name);
+ if (rc == 2)
+ continue;
+
+ if (rc != 3) {
+ printf("sscanf errored\n");
+ rc = -1;
+ break;
+ }
+
+ if (strstr(name, needle)) {
+ *low = start;
+ *high = end - 1;
+ rc = 0;
+ break;
+ }
+ }
+
+ fclose(f);
+
+ return rc;
+}
+
+static volatile sig_atomic_t took_signal = 0;
+
+static void sigusr1_handler(int sig)
+{
+ took_signal++;
+}
+
+int test_sigreturn_vdso(void)
+{
+ unsigned long low, high, size;
+ struct sigaction act;
+ char *p;
+
+ act.sa_handler = sigusr1_handler;
+ act.sa_flags = 0;
+ sigemptyset(&act.sa_mask);
+
+ assert(sigaction(SIGUSR1, &act, NULL) == 0);
+
+ // Confirm the VDSO is mapped, and work out where it is
+ assert(search_proc_maps("[vdso]", &low, &high) == 0);
+ size = high - low + 1;
+ printf("VDSO is at 0x%lx-0x%lx (%lu bytes)\n", low, high, size);
+
+ kill(getpid(), SIGUSR1);
+ assert(took_signal == 1);
+ printf("Signal delivered OK with VDSO mapped\n");
+
+ // Remap the VDSO somewhere else
+ p = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ assert(p != MAP_FAILED);
+ assert(mremap((void *)low, size, size, MREMAP_MAYMOVE|MREMAP_FIXED, p) != MAP_FAILED);
+ assert(search_proc_maps("[vdso]", &low, &high) == 0);
+ size = high - low + 1;
+ printf("VDSO moved to 0x%lx-0x%lx (%lu bytes)\n", low, high, size);
+
+ kill(getpid(), SIGUSR1);
+ assert(took_signal == 2);
+ printf("Signal delivered OK with VDSO moved\n");
+
+ assert(munmap((void *)low, size) == 0);
+ printf("Unmapped VDSO\n");
+
+ // Confirm the VDSO is not mapped anymore
+ assert(search_proc_maps("[vdso]", &low, &high) != 0);
+
+ // Make the stack executable
+ assert(search_proc_maps("[stack]", &low, &high) == 0);
+ size = high - low + 1;
+ mprotect((void *)low, size, PROT_READ|PROT_WRITE|PROT_EXEC);
+ printf("Remapped the stack executable\n");
+
+ kill(getpid(), SIGUSR1);
+ assert(took_signal == 3);
+ printf("Signal delivered OK with VDSO unmapped\n");
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_sigreturn_vdso, "sigreturn_vdso");
+}
diff --git a/tools/testing/selftests/powerpc/stringloops/.gitignore b/tools/testing/selftests/powerpc/stringloops/.gitignore
index 31a17e0ba884..b0dfc74aa57e 100644
--- a/tools/testing/selftests/powerpc/stringloops/.gitignore
+++ b/tools/testing/selftests/powerpc/stringloops/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
memcmp_64
memcmp_32
strlen
diff --git a/tools/testing/selftests/powerpc/switch_endian/.gitignore b/tools/testing/selftests/powerpc/switch_endian/.gitignore
index 89e762eab676..30e962cf84d1 100644
--- a/tools/testing/selftests/powerpc/switch_endian/.gitignore
+++ b/tools/testing/selftests/powerpc/switch_endian/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
switch_endian_test
check-reversed.S
diff --git a/tools/testing/selftests/powerpc/syscalls/.gitignore b/tools/testing/selftests/powerpc/syscalls/.gitignore
index f0f3fcc9d802..b00cab225476 100644
--- a/tools/testing/selftests/powerpc/syscalls/.gitignore
+++ b/tools/testing/selftests/powerpc/syscalls/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
ipc_unmuxed
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 98f2708d86cc..d8900a0c47a1 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
tm-resched-dscr
tm-syscall
tm-signal-msr-resv
@@ -13,6 +14,7 @@ tm-signal-context-chk-vmx
tm-signal-context-chk-vsx
tm-signal-context-force-tm
tm-signal-sigreturn-nt
+tm-signal-pagefault
tm-vmx-unavail
tm-unavailable
tm-trap
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index b15a1a325bd0..0b0db8d3857c 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -5,7 +5,9 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
$(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn tm-signal-sigreturn-nt \
- tm-signal-context-force-tm tm-poison
+ tm-signal-context-force-tm tm-poison tm-signal-pagefault
+
+TEST_FILES := settings
top_srcdir = ../../../../..
include ../../lib.mk
@@ -22,6 +24,7 @@ $(OUTPUT)/tm-resched-dscr: ../pmu/lib.c
$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx
$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64
$(OUTPUT)/tm-signal-context-force-tm: CFLAGS += -pthread -m64
+$(OUTPUT)/tm-signal-pagefault: CFLAGS += -pthread -m64
SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
diff --git a/tools/testing/selftests/powerpc/tm/settings b/tools/testing/selftests/powerpc/tm/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
index 31717625f318..421cb082f6be 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
@@ -42,9 +42,10 @@
#endif
/* Setting contexts because the test will crash and we want to recover */
-ucontext_t init_context, main_context;
+ucontext_t init_context;
-static int count, first_time;
+/* count is changed in the signal handler, so it must be volatile */
+static volatile int count;
void usr_signal_handler(int signo, siginfo_t *si, void *uc)
{
@@ -98,11 +99,6 @@ void usr_signal_handler(int signo, siginfo_t *si, void *uc)
void seg_signal_handler(int signo, siginfo_t *si, void *uc)
{
- if (count == COUNT_MAX) {
- /* Return to tm_signal_force_msr() and exit */
- setcontext(&main_context);
- }
-
count++;
/* Reexecute the test */
@@ -126,37 +122,41 @@ void tm_trap_test(void)
*/
getcontext(&init_context);
- /* Allocated an alternative signal stack area */
- ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
- ss.ss_size = SIGSTKSZ;
- ss.ss_flags = 0;
+ while (count < COUNT_MAX) {
+ /* Allocated an alternative signal stack area */
+ ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = 0;
- if (ss.ss_sp == (void *)-1) {
- perror("mmap error\n");
- exit(-1);
- }
+ if (ss.ss_sp == (void *)-1) {
+ perror("mmap error\n");
+ exit(-1);
+ }
- /* Force the allocation through a page fault */
- if (madvise(ss.ss_sp, SIGSTKSZ, MADV_DONTNEED)) {
- perror("madvise\n");
- exit(-1);
- }
+ /* Force the allocation through a page fault */
+ if (madvise(ss.ss_sp, SIGSTKSZ, MADV_DONTNEED)) {
+ perror("madvise\n");
+ exit(-1);
+ }
- /* Setting an alternative stack to generate a page fault when
- * the signal is raised.
- */
- if (sigaltstack(&ss, NULL)) {
- perror("sigaltstack\n");
- exit(-1);
+ /*
+ * Setting an alternative stack to generate a page fault when
+ * the signal is raised.
+ */
+ if (sigaltstack(&ss, NULL)) {
+ perror("sigaltstack\n");
+ exit(-1);
+ }
+
+ /* The signal handler will enable MSR_TS */
+ sigaction(SIGUSR1, &usr_sa, NULL);
+ /* If it does not crash, it might segfault, avoid it to retest */
+ sigaction(SIGSEGV, &seg_sa, NULL);
+
+ raise(SIGUSR1);
+ count++;
}
-
- /* The signal handler will enable MSR_TS */
- sigaction(SIGUSR1, &usr_sa, NULL);
- /* If it does not crash, it will segfault, avoid it to retest */
- sigaction(SIGSEGV, &seg_sa, NULL);
-
- raise(SIGUSR1);
}
int tm_signal_context_force_tm(void)
@@ -169,11 +169,7 @@ int tm_signal_context_force_tm(void)
*/
SKIP_IF(!is_ppc64le());
- /* Will get back here after COUNT_MAX interactions */
- getcontext(&main_context);
-
- if (!first_time++)
- tm_trap_test();
+ tm_trap_test();
return EXIT_SUCCESS;
}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c b/tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c
new file mode 100644
index 000000000000..5908bc6abe60
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020, Gustavo Luiz Duarte, IBM Corp.
+ *
+ * This test starts a transaction and triggers a signal, forcing a pagefault to
+ * happen when the kernel signal handling code touches the user signal stack.
+ *
+ * In order to avoid pre-faulting the signal stack memory and to force the
+ * pagefault to happen precisely in the kernel signal handling code, the
+ * pagefault handling is done in userspace using the userfaultfd facility.
+ *
+ * Further pagefaults are triggered by crafting the signal handler's ucontext
+ * to point to additional memory regions managed by the userfaultfd, so using
+ * the same mechanism used to avoid pre-faulting the signal stack memory.
+ *
+ * On failure (bug is present) kernel crashes or never returns control back to
+ * userspace. If bug is not present, tests completes almost immediately.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/userfaultfd.h>
+#include <poll.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <signal.h>
+#include <errno.h>
+
+#include "tm.h"
+
+
+#define UF_MEM_SIZE 655360 /* 10 x 64k pages */
+
+/* Memory handled by userfaultfd */
+static char *uf_mem;
+static size_t uf_mem_offset = 0;
+
+/*
+ * Data that will be copied into the faulting pages (instead of zero-filled
+ * pages). This is used to make the test more reliable and avoid segfaulting
+ * when we return from the signal handler. Since we are making the signal
+ * handler's ucontext point to newly allocated memory, when that memory is
+ * paged-in it will contain the expected content.
+ */
+static char backing_mem[UF_MEM_SIZE];
+
+static size_t pagesize;
+
+/*
+ * Return a chunk of at least 'size' bytes of memory that will be handled by
+ * userfaultfd. If 'backing_data' is not NULL, its content will be save to
+ * 'backing_mem' and then copied into the faulting pages when the page fault
+ * is handled.
+ */
+void *get_uf_mem(size_t size, void *backing_data)
+{
+ void *ret;
+
+ if (uf_mem_offset + size > UF_MEM_SIZE) {
+ fprintf(stderr, "Requesting more uf_mem than expected!\n");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = &uf_mem[uf_mem_offset];
+
+ /* Save the data that will be copied into the faulting page */
+ if (backing_data != NULL)
+ memcpy(&backing_mem[uf_mem_offset], backing_data, size);
+
+ /* Reserve the requested amount of uf_mem */
+ uf_mem_offset += size;
+ /* Keep uf_mem_offset aligned to the page size (round up) */
+ uf_mem_offset = (uf_mem_offset + pagesize - 1) & ~(pagesize - 1);
+
+ return ret;
+}
+
+void *fault_handler_thread(void *arg)
+{
+ struct uffd_msg msg; /* Data read from userfaultfd */
+ long uffd; /* userfaultfd file descriptor */
+ struct uffdio_copy uffdio_copy;
+ struct pollfd pollfd;
+ ssize_t nread, offset;
+
+ uffd = (long) arg;
+
+ for (;;) {
+ pollfd.fd = uffd;
+ pollfd.events = POLLIN;
+ if (poll(&pollfd, 1, -1) == -1) {
+ perror("poll() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ nread = read(uffd, &msg, sizeof(msg));
+ if (nread == 0) {
+ fprintf(stderr, "read(): EOF on userfaultfd\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (nread == -1) {
+ perror("read() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /* We expect only one kind of event */
+ if (msg.event != UFFD_EVENT_PAGEFAULT) {
+ fprintf(stderr, "Unexpected event on userfaultfd\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * We need to handle page faults in units of pages(!).
+ * So, round faulting address down to page boundary.
+ */
+ uffdio_copy.dst = msg.arg.pagefault.address & ~(pagesize-1);
+
+ offset = (char *) uffdio_copy.dst - uf_mem;
+ uffdio_copy.src = (unsigned long) &backing_mem[offset];
+
+ uffdio_copy.len = pagesize;
+ uffdio_copy.mode = 0;
+ uffdio_copy.copy = 0;
+ if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy) == -1) {
+ perror("ioctl-UFFDIO_COPY failed");
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+void setup_uf_mem(void)
+{
+ long uffd; /* userfaultfd file descriptor */
+ pthread_t thr;
+ struct uffdio_api uffdio_api;
+ struct uffdio_register uffdio_register;
+ int ret;
+
+ pagesize = sysconf(_SC_PAGE_SIZE);
+
+ /* Create and enable userfaultfd object */
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ if (uffd == -1) {
+ perror("userfaultfd() failed");
+ exit(EXIT_FAILURE);
+ }
+ uffdio_api.api = UFFD_API;
+ uffdio_api.features = 0;
+ if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
+ perror("ioctl-UFFDIO_API failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Create a private anonymous mapping. The memory will be demand-zero
+ * paged, that is, not yet allocated. When we actually touch the memory
+ * the related page will be allocated via the userfaultfd mechanism.
+ */
+ uf_mem = mmap(NULL, UF_MEM_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (uf_mem == MAP_FAILED) {
+ perror("mmap() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Register the memory range of the mapping we've just mapped to be
+ * handled by the userfaultfd object. In 'mode' we request to track
+ * missing pages (i.e. pages that have not yet been faulted-in).
+ */
+ uffdio_register.range.start = (unsigned long) uf_mem;
+ uffdio_register.range.len = UF_MEM_SIZE;
+ uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
+ perror("ioctl-UFFDIO_REGISTER");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Create a thread that will process the userfaultfd events */
+ ret = pthread_create(&thr, NULL, fault_handler_thread, (void *) uffd);
+ if (ret != 0) {
+ fprintf(stderr, "pthread_create(): Error. Returned %d\n", ret);
+ exit(EXIT_FAILURE);
+ }
+}
+
+/*
+ * Assumption: the signal was delivered while userspace was in transactional or
+ * suspended state, i.e. uc->uc_link != NULL.
+ */
+void signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ ucontext_t *ucp = uc;
+
+ /* Skip 'trap' after returning, otherwise we get a SIGTRAP again */
+ ucp->uc_link->uc_mcontext.regs->nip += 4;
+
+ ucp->uc_mcontext.v_regs =
+ get_uf_mem(sizeof(elf_vrreg_t), ucp->uc_mcontext.v_regs);
+
+ ucp->uc_link->uc_mcontext.v_regs =
+ get_uf_mem(sizeof(elf_vrreg_t), ucp->uc_link->uc_mcontext.v_regs);
+
+ ucp->uc_link = get_uf_mem(sizeof(ucontext_t), ucp->uc_link);
+}
+
+bool have_userfaultfd(void)
+{
+ long rc;
+
+ errno = 0;
+ rc = syscall(__NR_userfaultfd, -1);
+
+ return rc == 0 || errno != ENOSYS;
+}
+
+int tm_signal_pagefault(void)
+{
+ struct sigaction sa;
+ stack_t ss;
+
+ SKIP_IF(!have_htm());
+ SKIP_IF(!have_userfaultfd());
+
+ setup_uf_mem();
+
+ /*
+ * Set an alternative stack that will generate a page fault when the
+ * signal is raised. The page fault will be treated via userfaultfd,
+ * i.e. via fault_handler_thread.
+ */
+ ss.ss_sp = get_uf_mem(SIGSTKSZ, NULL);
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = 0;
+ if (sigaltstack(&ss, NULL) == -1) {
+ perror("sigaltstack() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ sa.sa_sigaction = signal_handler;
+ if (sigaction(SIGTRAP, &sa, NULL) == -1) {
+ perror("sigaction() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Trigger a SIGTRAP in transactional state */
+ asm __volatile__(
+ "tbegin.;"
+ "beq 1f;"
+ "trap;"
+ "1: ;"
+ : : : "memory");
+
+ /* Trigger a SIGTRAP in suspended state */
+ asm __volatile__(
+ "tbegin.;"
+ "beq 1f;"
+ "tsuspend.;"
+ "trap;"
+ "tresume.;"
+ "1: ;"
+ : : : "memory");
+
+ return EXIT_SUCCESS;
+}
+
+int main(int argc, char **argv)
+{
+ /*
+ * Depending on kernel config, the TM Bad Thing might not result in a
+ * crash, instead the kernel never returns control back to userspace, so
+ * set a tight timeout. If the test passes it completes almost
+ * immediately.
+ */
+ test_harness_set_timeout(2);
+ return test_harness(tm_signal_pagefault, "tm_signal_pagefault");
+}
diff --git a/tools/testing/selftests/powerpc/vphn/.gitignore b/tools/testing/selftests/powerpc/vphn/.gitignore
index 7c04395010cb..b744aedfd1f2 100644
--- a/tools/testing/selftests/powerpc/vphn/.gitignore
+++ b/tools/testing/selftests/powerpc/vphn/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
test-vphn
diff --git a/tools/testing/selftests/prctl/.gitignore b/tools/testing/selftests/prctl/.gitignore
index 0b5c27447bf6..91af2b631bc9 100644
--- a/tools/testing/selftests/prctl/.gitignore
+++ b/tools/testing/selftests/prctl/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
disable-tsc-ctxt-sw-stress-test
disable-tsc-on-off-stress-test
disable-tsc-test
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 66fab4c58ed4..4bca5a9327a4 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
/fd-001-lookup
/fd-002-posix-eq
/fd-003-kthread
diff --git a/tools/testing/selftests/pstore/.gitignore b/tools/testing/selftests/pstore/.gitignore
index 5a4a26e5464b..9938fb406389 100644
--- a/tools/testing/selftests/pstore/.gitignore
+++ b/tools/testing/selftests/pstore/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
logs
*uuid
diff --git a/tools/testing/selftests/ptp/.gitignore b/tools/testing/selftests/ptp/.gitignore
index f562e49d6917..534ca26eee48 100644
--- a/tools/testing/selftests/ptp/.gitignore
+++ b/tools/testing/selftests/ptp/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
testptp
diff --git a/tools/testing/selftests/ptrace/.gitignore b/tools/testing/selftests/ptrace/.gitignore
index cfcc49a7def7..7bebf9534a86 100644
--- a/tools/testing/selftests/ptrace/.gitignore
+++ b/tools/testing/selftests/ptrace/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
get_syscall_info
peeksiginfo
diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile
index c0b7f89f0930..2f1f532c39db 100644
--- a/tools/testing/selftests/ptrace/Makefile
+++ b/tools/testing/selftests/ptrace/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-CFLAGS += -iquote../../../../include/uapi -Wall
+CFLAGS += -std=c99 -pthread -iquote../../../../include/uapi -Wall
-TEST_GEN_PROGS := get_syscall_info peeksiginfo
+TEST_GEN_PROGS := get_syscall_info peeksiginfo vmaccess
include ../lib.mk
diff --git a/tools/testing/selftests/ptrace/vmaccess.c b/tools/testing/selftests/ptrace/vmaccess.c
new file mode 100644
index 000000000000..4db327b44586
--- /dev/null
+++ b/tools/testing/selftests/ptrace/vmaccess.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2020 Bernd Edlinger <bernd.edlinger@hotmail.de>
+ * All rights reserved.
+ *
+ * Check whether /proc/$pid/mem can be accessed without causing deadlocks
+ * when de_thread is blocked with ->cred_guard_mutex held.
+ */
+
+#include "../kselftest_harness.h"
+#include <stdio.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/ptrace.h>
+
+static void *thread(void *arg)
+{
+ ptrace(PTRACE_TRACEME, 0, 0L, 0L);
+ return NULL;
+}
+
+TEST(vmaccess)
+{
+ int f, pid = fork();
+ char mm[64];
+
+ if (!pid) {
+ pthread_t pt;
+
+ pthread_create(&pt, NULL, thread, NULL);
+ pthread_join(pt, NULL);
+ execlp("true", "true", NULL);
+ }
+
+ sleep(1);
+ sprintf(mm, "/proc/%d/mem", pid);
+ f = open(mm, O_RDONLY);
+ ASSERT_GE(f, 0);
+ close(f);
+ f = kill(pid, SIGCONT);
+ ASSERT_EQ(f, 0);
+}
+
+TEST(attach)
+{
+ int s, k, pid = fork();
+
+ if (!pid) {
+ pthread_t pt;
+
+ pthread_create(&pt, NULL, thread, NULL);
+ pthread_join(pt, NULL);
+ execlp("sleep", "sleep", "2", NULL);
+ }
+
+ sleep(1);
+ k = ptrace(PTRACE_ATTACH, pid, 0L, 0L);
+ ASSERT_EQ(errno, EAGAIN);
+ ASSERT_EQ(k, -1);
+ k = waitpid(-1, &s, WNOHANG);
+ ASSERT_NE(k, -1);
+ ASSERT_NE(k, 0);
+ ASSERT_NE(k, pid);
+ ASSERT_EQ(WIFEXITED(s), 1);
+ ASSERT_EQ(WEXITSTATUS(s), 0);
+ sleep(1);
+ k = ptrace(PTRACE_ATTACH, pid, 0L, 0L);
+ ASSERT_EQ(k, 0);
+ k = waitpid(-1, &s, 0);
+ ASSERT_EQ(k, pid);
+ ASSERT_EQ(WIFSTOPPED(s), 1);
+ ASSERT_EQ(WSTOPSIG(s), SIGSTOP);
+ k = ptrace(PTRACE_DETACH, pid, 0L, 0L);
+ ASSERT_EQ(k, 0);
+ k = waitpid(-1, &s, 0);
+ ASSERT_EQ(k, pid);
+ ASSERT_EQ(WIFEXITED(s), 1);
+ ASSERT_EQ(WEXITSTATUS(s), 0);
+ k = waitpid(-1, NULL, 0);
+ ASSERT_EQ(k, -1);
+ ASSERT_EQ(errno, ECHILD);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/rcutorture/.gitignore b/tools/testing/selftests/rcutorture/.gitignore
index ccc240275d1c..f6cbce77460b 100644
--- a/tools/testing/selftests/rcutorture/.gitignore
+++ b/tools/testing/selftests/rcutorture/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
initrd
b[0-9]*
res
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore
index 712a3d41a325..24e27957efcc 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
srcu.c
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore
index 1d016e66980a..57d296341304 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
srcu.h
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore
index f47cb2045f13..d65462d64816 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.out
diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
new file mode 100644
index 000000000000..d585cc1948cc
--- /dev/null
+++ b/tools/testing/selftests/resctrl/Makefile
@@ -0,0 +1,17 @@
+CC = $(CROSS_COMPILE)gcc
+CFLAGS = -g -Wall
+SRCS=$(wildcard *.c)
+OBJS=$(SRCS:.c=.o)
+
+all: resctrl_tests
+
+$(OBJS): $(SRCS)
+ $(CC) $(CFLAGS) -c $(SRCS)
+
+resctrl_tests: $(OBJS)
+ $(CC) $(CFLAGS) -o $@ $^
+
+.PHONY: clean
+
+clean:
+ $(RM) $(OBJS) resctrl_tests
diff --git a/tools/testing/selftests/resctrl/README b/tools/testing/selftests/resctrl/README
new file mode 100644
index 000000000000..6e5a0ffa18e8
--- /dev/null
+++ b/tools/testing/selftests/resctrl/README
@@ -0,0 +1,53 @@
+resctrl_tests - resctrl file system test suit
+
+Authors:
+ Fenghua Yu <fenghua.yu@intel.com>
+ Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+
+resctrl_tests tests various resctrl functionalities and interfaces including
+both software and hardware.
+
+Currently it supports Memory Bandwidth Monitoring test and Memory Bandwidth
+Allocation test on Intel RDT hardware. More tests will be added in the future.
+And the test suit can be extended to cover AMD QoS and ARM MPAM hardware
+as well.
+
+BUILD
+-----
+
+Run "make" to build executable file "resctrl_tests".
+
+RUN
+---
+
+To use resctrl_tests, root or sudoer privileges are required. This is because
+the test needs to mount resctrl file system and change contents in the file
+system.
+
+Executing the test without any parameter will run all supported tests:
+
+ sudo ./resctrl_tests
+
+OVERVIEW OF EXECUTION
+---------------------
+
+A test case has four stages:
+
+ - setup: mount resctrl file system, create group, setup schemata, move test
+ process pids to tasks, start benchmark.
+ - execute: let benchmark run
+ - verify: get resctrl data and verify the data with another source, e.g.
+ perf event.
+ - teardown: umount resctrl and clear temporary files.
+
+ARGUMENTS
+---------
+
+Parameter '-h' shows usage information.
+
+usage: resctrl_tests [-h] [-b "benchmark_cmd [options]"] [-t test list] [-n no_of_bits]
+ -b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM default benchmark is builtin fill_buf
+ -t test list: run tests specified in the test list, e.g. -t mbm, mba, cqm, cat
+ -n no_of_bits: run cache tests using specified no of bits in cache bit mask
+ -p cpu_no: specify CPU number to run the test. 1 is default
+ -h: help
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
new file mode 100644
index 000000000000..38dbf4962e33
--- /dev/null
+++ b/tools/testing/selftests/resctrl/cache.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdint.h>
+#include "resctrl.h"
+
+struct read_format {
+ __u64 nr; /* The number of events */
+ struct {
+ __u64 value; /* The value of the event */
+ } values[2];
+};
+
+static struct perf_event_attr pea_llc_miss;
+static struct read_format rf_cqm;
+static int fd_lm;
+char llc_occup_path[1024];
+
+static void initialize_perf_event_attr(void)
+{
+ pea_llc_miss.type = PERF_TYPE_HARDWARE;
+ pea_llc_miss.size = sizeof(struct perf_event_attr);
+ pea_llc_miss.read_format = PERF_FORMAT_GROUP;
+ pea_llc_miss.exclude_kernel = 1;
+ pea_llc_miss.exclude_hv = 1;
+ pea_llc_miss.exclude_idle = 1;
+ pea_llc_miss.exclude_callchain_kernel = 1;
+ pea_llc_miss.inherit = 1;
+ pea_llc_miss.exclude_guest = 1;
+ pea_llc_miss.disabled = 1;
+}
+
+static void ioctl_perf_event_ioc_reset_enable(void)
+{
+ ioctl(fd_lm, PERF_EVENT_IOC_RESET, 0);
+ ioctl(fd_lm, PERF_EVENT_IOC_ENABLE, 0);
+}
+
+static int perf_event_open_llc_miss(pid_t pid, int cpu_no)
+{
+ fd_lm = perf_event_open(&pea_llc_miss, pid, cpu_no, -1,
+ PERF_FLAG_FD_CLOEXEC);
+ if (fd_lm == -1) {
+ perror("Error opening leader");
+ ctrlc_handler(0, NULL, NULL);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int initialize_llc_perf(void)
+{
+ memset(&pea_llc_miss, 0, sizeof(struct perf_event_attr));
+ memset(&rf_cqm, 0, sizeof(struct read_format));
+
+ /* Initialize perf_event_attr structures for HW_CACHE_MISSES */
+ initialize_perf_event_attr();
+
+ pea_llc_miss.config = PERF_COUNT_HW_CACHE_MISSES;
+
+ rf_cqm.nr = 1;
+
+ return 0;
+}
+
+static int reset_enable_llc_perf(pid_t pid, int cpu_no)
+{
+ int ret = 0;
+
+ ret = perf_event_open_llc_miss(pid, cpu_no);
+ if (ret < 0)
+ return ret;
+
+ /* Start counters to log values */
+ ioctl_perf_event_ioc_reset_enable();
+
+ return 0;
+}
+
+/*
+ * get_llc_perf: llc cache miss through perf events
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ *
+ * Perf events like HW_CACHE_MISSES could be used to validate number of
+ * cache lines allocated.
+ *
+ * Return: =0 on success. <0 on failure.
+ */
+static int get_llc_perf(unsigned long *llc_perf_miss)
+{
+ __u64 total_misses;
+
+ /* Stop counters after one span to get miss rate */
+
+ ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
+
+ if (read(fd_lm, &rf_cqm, sizeof(struct read_format)) == -1) {
+ perror("Could not get llc misses through perf");
+
+ return -1;
+ }
+
+ total_misses = rf_cqm.values[0].value;
+
+ close(fd_lm);
+
+ *llc_perf_miss = total_misses;
+
+ return 0;
+}
+
+/*
+ * Get LLC Occupancy as reported by RESCTRL FS
+ * For CQM,
+ * 1. If con_mon grp and mon grp given, then read from mon grp in
+ * con_mon grp
+ * 2. If only con_mon grp given, then read from con_mon grp
+ * 3. If both not given, then read from root con_mon grp
+ * For CAT,
+ * 1. If con_mon grp given, then read from it
+ * 2. If con_mon grp not given, then read from root con_mon grp
+ *
+ * Return: =0 on success. <0 on failure.
+ */
+static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
+{
+ FILE *fp;
+
+ fp = fopen(llc_occup_path, "r");
+ if (!fp) {
+ perror("Failed to open results file");
+
+ return errno;
+ }
+ if (fscanf(fp, "%lu", llc_occupancy) <= 0) {
+ perror("Could not get llc occupancy");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+/*
+ * print_results_cache: the cache results are stored in a file
+ * @filename: file that stores the results
+ * @bm_pid: child pid that runs benchmark
+ * @llc_value: perf miss value /
+ * llc occupancy value reported by resctrl FS
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+static int print_results_cache(char *filename, int bm_pid,
+ unsigned long llc_value)
+{
+ FILE *fp;
+
+ if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
+ printf("Pid: %d \t LLC_value: %lu\n", bm_pid,
+ llc_value);
+ } else {
+ fp = fopen(filename, "a");
+ if (!fp) {
+ perror("Cannot open results file");
+
+ return errno;
+ }
+ fprintf(fp, "Pid: %d \t llc_value: %lu\n", bm_pid, llc_value);
+ fclose(fp);
+ }
+
+ return 0;
+}
+
+int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
+{
+ unsigned long llc_perf_miss = 0, llc_occu_resc = 0, llc_value = 0;
+ int ret;
+
+ /*
+ * Measure cache miss from perf.
+ */
+ if (!strcmp(param->resctrl_val, "cat")) {
+ ret = get_llc_perf(&llc_perf_miss);
+ if (ret < 0)
+ return ret;
+ llc_value = llc_perf_miss;
+ }
+
+ /*
+ * Measure llc occupancy from resctrl.
+ */
+ if (!strcmp(param->resctrl_val, "cqm")) {
+ ret = get_llc_occu_resctrl(&llc_occu_resc);
+ if (ret < 0)
+ return ret;
+ llc_value = llc_occu_resc;
+ }
+ ret = print_results_cache(param->filename, bm_pid, llc_value);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * cache_val: execute benchmark and measure LLC occupancy resctrl
+ * and perf cache miss for the benchmark
+ * @param: parameters passed to cache_val()
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+int cat_val(struct resctrl_val_param *param)
+{
+ int malloc_and_init_memory = 1, memflush = 1, operation = 0, ret = 0;
+ char *resctrl_val = param->resctrl_val;
+ pid_t bm_pid;
+
+ if (strcmp(param->filename, "") == 0)
+ sprintf(param->filename, "stdio");
+
+ bm_pid = getpid();
+
+ /* Taskset benchmark to specified cpu */
+ ret = taskset_benchmark(bm_pid, param->cpu_no);
+ if (ret)
+ return ret;
+
+ /* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
+ resctrl_val);
+ if (ret)
+ return ret;
+
+ if ((strcmp(resctrl_val, "cat") == 0)) {
+ ret = initialize_llc_perf();
+ if (ret)
+ return ret;
+ }
+
+ /* Test runs until the callback setup() tells the test to stop. */
+ while (1) {
+ if (strcmp(resctrl_val, "cat") == 0) {
+ ret = param->setup(1, param);
+ if (ret) {
+ ret = 0;
+ break;
+ }
+ ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
+ if (ret)
+ break;
+
+ if (run_fill_buf(param->span, malloc_and_init_memory,
+ memflush, operation, resctrl_val)) {
+ fprintf(stderr, "Error-running fill buffer\n");
+ ret = -1;
+ break;
+ }
+
+ sleep(1);
+ ret = measure_cache_vals(param, bm_pid);
+ if (ret)
+ break;
+ } else {
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
new file mode 100644
index 000000000000..5da43767b973
--- /dev/null
+++ b/tools/testing/selftests/resctrl/cat_test.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cache Allocation Technology (CAT) test
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+#include <unistd.h>
+
+#define RESULT_FILE_NAME1 "result_cat1"
+#define RESULT_FILE_NAME2 "result_cat2"
+#define NUM_OF_RUNS 5
+#define MAX_DIFF_PERCENT 4
+#define MAX_DIFF 1000000
+
+int count_of_bits;
+char cbm_mask[256];
+unsigned long long_mask;
+unsigned long cache_size;
+
+/*
+ * Change schemata. Write schemata to specified
+ * con_mon grp, mon_grp in resctrl FS.
+ * Run 5 times in order to get average values.
+ */
+static int cat_setup(int num, ...)
+{
+ struct resctrl_val_param *p;
+ char schemata[64];
+ va_list param;
+ int ret = 0;
+
+ va_start(param, num);
+ p = va_arg(param, struct resctrl_val_param *);
+ va_end(param);
+
+ /* Run NUM_OF_RUNS times */
+ if (p->num_of_runs >= NUM_OF_RUNS)
+ return -1;
+
+ if (p->num_of_runs == 0) {
+ sprintf(schemata, "%lx", p->mask);
+ ret = write_schemata(p->ctrlgrp, schemata, p->cpu_no,
+ p->resctrl_val);
+ }
+ p->num_of_runs++;
+
+ return ret;
+}
+
+static void show_cache_info(unsigned long sum_llc_perf_miss, int no_of_bits,
+ unsigned long span)
+{
+ unsigned long allocated_cache_lines = span / 64;
+ unsigned long avg_llc_perf_miss = 0;
+ float diff_percent;
+
+ avg_llc_perf_miss = sum_llc_perf_miss / (NUM_OF_RUNS - 1);
+ diff_percent = ((float)allocated_cache_lines - avg_llc_perf_miss) /
+ allocated_cache_lines * 100;
+
+ printf("%sok CAT: cache miss rate within %d%%\n",
+ !is_amd && abs((int)diff_percent) > MAX_DIFF_PERCENT ?
+ "not " : "", MAX_DIFF_PERCENT);
+ tests_run++;
+ printf("# Percent diff=%d\n", abs((int)diff_percent));
+ printf("# Number of bits: %d\n", no_of_bits);
+ printf("# Avg_llc_perf_miss: %lu\n", avg_llc_perf_miss);
+ printf("# Allocated cache lines: %lu\n", allocated_cache_lines);
+}
+
+static int check_results(struct resctrl_val_param *param)
+{
+ char *token_array[8], temp[512];
+ unsigned long sum_llc_perf_miss = 0;
+ int runs = 0, no_of_bits = 0;
+ FILE *fp;
+
+ printf("# Checking for pass/fail\n");
+ fp = fopen(param->filename, "r");
+ if (!fp) {
+ perror("# Cannot open file");
+
+ return errno;
+ }
+
+ while (fgets(temp, sizeof(temp), fp)) {
+ char *token = strtok(temp, ":\t");
+ int fields = 0;
+
+ while (token) {
+ token_array[fields++] = token;
+ token = strtok(NULL, ":\t");
+ }
+ /*
+ * Discard the first value which is inaccurate due to monitoring
+ * setup transition phase.
+ */
+ if (runs > 0)
+ sum_llc_perf_miss += strtoul(token_array[3], NULL, 0);
+ runs++;
+ }
+
+ fclose(fp);
+ no_of_bits = count_bits(param->mask);
+
+ show_cache_info(sum_llc_perf_miss, no_of_bits, param->span);
+
+ return 0;
+}
+
+void cat_test_cleanup(void)
+{
+ remove(RESULT_FILE_NAME1);
+ remove(RESULT_FILE_NAME2);
+}
+
+int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+{
+ unsigned long l_mask, l_mask_1;
+ int ret, pipefd[2], sibling_cpu_no;
+ char pipe_message;
+ pid_t bm_pid;
+
+ cache_size = 0;
+
+ ret = remount_resctrlfs(true);
+ if (ret)
+ return ret;
+
+ if (!validate_resctrl_feature_request("cat"))
+ return -1;
+
+ /* Get default cbm mask for L3/L2 cache */
+ ret = get_cbm_mask(cache_type);
+ if (ret)
+ return ret;
+
+ long_mask = strtoul(cbm_mask, NULL, 16);
+
+ /* Get L3/L2 cache size */
+ ret = get_cache_size(cpu_no, cache_type, &cache_size);
+ if (ret)
+ return ret;
+ printf("cache size :%lu\n", cache_size);
+
+ /* Get max number of bits from default-cabm mask */
+ count_of_bits = count_bits(long_mask);
+
+ if (n < 1 || n > count_of_bits - 1) {
+ printf("Invalid input value for no_of_bits n!\n");
+ printf("Please Enter value in range 1 to %d\n",
+ count_of_bits - 1);
+ return -1;
+ }
+
+ /* Get core id from same socket for running another thread */
+ sibling_cpu_no = get_core_sibling(cpu_no);
+ if (sibling_cpu_no < 0)
+ return -1;
+
+ struct resctrl_val_param param = {
+ .resctrl_val = "cat",
+ .cpu_no = cpu_no,
+ .mum_resctrlfs = 0,
+ .setup = cat_setup,
+ };
+
+ l_mask = long_mask >> n;
+ l_mask_1 = ~l_mask & long_mask;
+
+ /* Set param values for parent thread which will be allocated bitmask
+ * with (max_bits - n) bits
+ */
+ param.span = cache_size * (count_of_bits - n) / count_of_bits;
+ strcpy(param.ctrlgrp, "c2");
+ strcpy(param.mongrp, "m2");
+ strcpy(param.filename, RESULT_FILE_NAME2);
+ param.mask = l_mask;
+ param.num_of_runs = 0;
+
+ if (pipe(pipefd)) {
+ perror("# Unable to create pipe");
+ return errno;
+ }
+
+ bm_pid = fork();
+
+ /* Set param values for child thread which will be allocated bitmask
+ * with n bits
+ */
+ if (bm_pid == 0) {
+ param.mask = l_mask_1;
+ strcpy(param.ctrlgrp, "c1");
+ strcpy(param.mongrp, "m1");
+ param.span = cache_size * n / count_of_bits;
+ strcpy(param.filename, RESULT_FILE_NAME1);
+ param.num_of_runs = 0;
+ param.cpu_no = sibling_cpu_no;
+ }
+
+ remove(param.filename);
+
+ ret = cat_val(&param);
+ if (ret)
+ return ret;
+
+ ret = check_results(&param);
+ if (ret)
+ return ret;
+
+ if (bm_pid == 0) {
+ /* Tell parent that child is ready */
+ close(pipefd[0]);
+ pipe_message = 1;
+ if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
+ sizeof(pipe_message)) {
+ close(pipefd[1]);
+ perror("# failed signaling parent process");
+ return errno;
+ }
+
+ close(pipefd[1]);
+ while (1)
+ ;
+ } else {
+ /* Parent waits for child to be ready. */
+ close(pipefd[1]);
+ pipe_message = 0;
+ while (pipe_message != 1) {
+ if (read(pipefd[0], &pipe_message,
+ sizeof(pipe_message)) < sizeof(pipe_message)) {
+ perror("# failed reading from child process");
+ break;
+ }
+ }
+ close(pipefd[0]);
+ kill(bm_pid, SIGKILL);
+ }
+
+ cat_test_cleanup();
+ if (bm_pid)
+ umount_resctrlfs();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cqm_test.c
new file mode 100644
index 000000000000..c8756152bd61
--- /dev/null
+++ b/tools/testing/selftests/resctrl/cqm_test.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cache Monitoring Technology (CQM) test
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+#include <unistd.h>
+
+#define RESULT_FILE_NAME "result_cqm"
+#define NUM_OF_RUNS 5
+#define MAX_DIFF 2000000
+#define MAX_DIFF_PERCENT 15
+
+int count_of_bits;
+char cbm_mask[256];
+unsigned long long_mask;
+unsigned long cache_size;
+
+static int cqm_setup(int num, ...)
+{
+ struct resctrl_val_param *p;
+ va_list param;
+
+ va_start(param, num);
+ p = va_arg(param, struct resctrl_val_param *);
+ va_end(param);
+
+ /* Run NUM_OF_RUNS times */
+ if (p->num_of_runs >= NUM_OF_RUNS)
+ return -1;
+
+ p->num_of_runs++;
+
+ return 0;
+}
+
+static void show_cache_info(unsigned long sum_llc_occu_resc, int no_of_bits,
+ unsigned long span)
+{
+ unsigned long avg_llc_occu_resc = 0;
+ float diff_percent;
+ long avg_diff = 0;
+ bool res;
+
+ avg_llc_occu_resc = sum_llc_occu_resc / (NUM_OF_RUNS - 1);
+ avg_diff = (long)abs(span - avg_llc_occu_resc);
+
+ diff_percent = (((float)span - avg_llc_occu_resc) / span) * 100;
+
+ if ((abs((int)diff_percent) <= MAX_DIFF_PERCENT) ||
+ (abs(avg_diff) <= MAX_DIFF))
+ res = true;
+ else
+ res = false;
+
+ printf("%sok CQM: diff within %d, %d\%%\n", res ? "" : "not",
+ MAX_DIFF, (int)MAX_DIFF_PERCENT);
+
+ printf("# diff: %ld\n", avg_diff);
+ printf("# percent diff=%d\n", abs((int)diff_percent));
+ printf("# Results are displayed in (Bytes)\n");
+ printf("# Number of bits: %d\n", no_of_bits);
+ printf("# Avg_llc_occu_resc: %lu\n", avg_llc_occu_resc);
+ printf("# llc_occu_exp (span): %lu\n", span);
+
+ tests_run++;
+}
+
+static int check_results(struct resctrl_val_param *param, int no_of_bits)
+{
+ char *token_array[8], temp[512];
+ unsigned long sum_llc_occu_resc = 0;
+ int runs = 0;
+ FILE *fp;
+
+ printf("# checking for pass/fail\n");
+ fp = fopen(param->filename, "r");
+ if (!fp) {
+ perror("# Error in opening file\n");
+
+ return errno;
+ }
+
+ while (fgets(temp, 1024, fp)) {
+ char *token = strtok(temp, ":\t");
+ int fields = 0;
+
+ while (token) {
+ token_array[fields++] = token;
+ token = strtok(NULL, ":\t");
+ }
+
+ /* Field 3 is llc occ resc value */
+ if (runs > 0)
+ sum_llc_occu_resc += strtoul(token_array[3], NULL, 0);
+ runs++;
+ }
+ fclose(fp);
+ show_cache_info(sum_llc_occu_resc, no_of_bits, param->span);
+
+ return 0;
+}
+
+void cqm_test_cleanup(void)
+{
+ remove(RESULT_FILE_NAME);
+}
+
+int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+{
+ int ret, mum_resctrlfs;
+
+ cache_size = 0;
+ mum_resctrlfs = 1;
+
+ ret = remount_resctrlfs(mum_resctrlfs);
+ if (ret)
+ return ret;
+
+ if (!validate_resctrl_feature_request("cqm"))
+ return -1;
+
+ ret = get_cbm_mask("L3");
+ if (ret)
+ return ret;
+
+ long_mask = strtoul(cbm_mask, NULL, 16);
+
+ ret = get_cache_size(cpu_no, "L3", &cache_size);
+ if (ret)
+ return ret;
+ printf("cache size :%lu\n", cache_size);
+
+ count_of_bits = count_bits(long_mask);
+
+ if (n < 1 || n > count_of_bits) {
+ printf("Invalid input value for numbr_of_bits n!\n");
+ printf("Please Enter value in range 1 to %d\n", count_of_bits);
+ return -1;
+ }
+
+ struct resctrl_val_param param = {
+ .resctrl_val = "cqm",
+ .ctrlgrp = "c1",
+ .mongrp = "m1",
+ .cpu_no = cpu_no,
+ .mum_resctrlfs = 0,
+ .filename = RESULT_FILE_NAME,
+ .mask = ~(long_mask << n) & long_mask,
+ .span = cache_size * n / count_of_bits,
+ .num_of_runs = 0,
+ .setup = cqm_setup,
+ };
+
+ if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
+ sprintf(benchmark_cmd[1], "%lu", param.span);
+
+ remove(RESULT_FILE_NAME);
+
+ ret = resctrl_val(benchmark_cmd, &param);
+ if (ret)
+ return ret;
+
+ ret = check_results(&param, n);
+ if (ret)
+ return ret;
+
+ cqm_test_cleanup();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
new file mode 100644
index 000000000000..79c611c99a3d
--- /dev/null
+++ b/tools/testing/selftests/resctrl/fill_buf.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fill_buf benchmark
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <inttypes.h>
+#include <malloc.h>
+#include <string.h>
+
+#include "resctrl.h"
+
+#define CL_SIZE (64)
+#define PAGE_SIZE (4 * 1024)
+#define MB (1024 * 1024)
+
+static unsigned char *startptr;
+
+static void sb(void)
+{
+#if defined(__i386) || defined(__x86_64)
+ asm volatile("sfence\n\t"
+ : : : "memory");
+#endif
+}
+
+static void ctrl_handler(int signo)
+{
+ free(startptr);
+ printf("\nEnding\n");
+ sb();
+ exit(EXIT_SUCCESS);
+}
+
+static void cl_flush(void *p)
+{
+#if defined(__i386) || defined(__x86_64)
+ asm volatile("clflush (%0)\n\t"
+ : : "r"(p) : "memory");
+#endif
+}
+
+static void mem_flush(void *p, size_t s)
+{
+ char *cp = (char *)p;
+ size_t i = 0;
+
+ s = s / CL_SIZE; /* mem size in cache llines */
+
+ for (i = 0; i < s; i++)
+ cl_flush(&cp[i * CL_SIZE]);
+
+ sb();
+}
+
+static void *malloc_and_init_memory(size_t s)
+{
+ uint64_t *p64;
+ size_t s64;
+
+ void *p = memalign(PAGE_SIZE, s);
+
+ p64 = (uint64_t *)p;
+ s64 = s / sizeof(uint64_t);
+
+ while (s64 > 0) {
+ *p64 = (uint64_t)rand();
+ p64 += (CL_SIZE / sizeof(uint64_t));
+ s64 -= (CL_SIZE / sizeof(uint64_t));
+ }
+
+ return p;
+}
+
+static int fill_one_span_read(unsigned char *start_ptr, unsigned char *end_ptr)
+{
+ unsigned char sum, *p;
+
+ sum = 0;
+ p = start_ptr;
+ while (p < end_ptr) {
+ sum += *p;
+ p += (CL_SIZE / 2);
+ }
+
+ return sum;
+}
+
+static
+void fill_one_span_write(unsigned char *start_ptr, unsigned char *end_ptr)
+{
+ unsigned char *p;
+
+ p = start_ptr;
+ while (p < end_ptr) {
+ *p = '1';
+ p += (CL_SIZE / 2);
+ }
+}
+
+static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
+ char *resctrl_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ while (1) {
+ ret = fill_one_span_read(start_ptr, end_ptr);
+ if (!strcmp(resctrl_val, "cat"))
+ break;
+ }
+
+ /* Consume read result so that reading memory is not optimized out. */
+ fp = fopen("/dev/null", "w");
+ if (!fp)
+ perror("Unable to write to /dev/null");
+ fprintf(fp, "Sum: %d ", ret);
+ fclose(fp);
+
+ return 0;
+}
+
+static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
+ char *resctrl_val)
+{
+ while (1) {
+ fill_one_span_write(start_ptr, end_ptr);
+ if (!strcmp(resctrl_val, "cat"))
+ break;
+ }
+
+ return 0;
+}
+
+static int
+fill_cache(unsigned long long buf_size, int malloc_and_init, int memflush,
+ int op, char *resctrl_val)
+{
+ unsigned char *start_ptr, *end_ptr;
+ unsigned long long i;
+ int ret;
+
+ if (malloc_and_init)
+ start_ptr = malloc_and_init_memory(buf_size);
+ else
+ start_ptr = malloc(buf_size);
+
+ if (!start_ptr)
+ return -1;
+
+ startptr = start_ptr;
+ end_ptr = start_ptr + buf_size;
+
+ /*
+ * It's better to touch the memory once to avoid any compiler
+ * optimizations
+ */
+ if (!malloc_and_init) {
+ for (i = 0; i < buf_size; i++)
+ *start_ptr++ = (unsigned char)rand();
+ }
+
+ start_ptr = startptr;
+
+ /* Flush the memory before using to avoid "cache hot pages" effect */
+ if (memflush)
+ mem_flush(start_ptr, buf_size);
+
+ if (op == 0)
+ ret = fill_cache_read(start_ptr, end_ptr, resctrl_val);
+ else
+ ret = fill_cache_write(start_ptr, end_ptr, resctrl_val);
+
+ if (ret) {
+ printf("\n Error in fill cache read/write...\n");
+ return -1;
+ }
+
+ free(startptr);
+
+ return 0;
+}
+
+int run_fill_buf(unsigned long span, int malloc_and_init_memory,
+ int memflush, int op, char *resctrl_val)
+{
+ unsigned long long cache_size = span;
+ int ret;
+
+ /* set up ctrl-c handler */
+ if (signal(SIGINT, ctrl_handler) == SIG_ERR)
+ printf("Failed to catch SIGINT!\n");
+ if (signal(SIGHUP, ctrl_handler) == SIG_ERR)
+ printf("Failed to catch SIGHUP!\n");
+
+ ret = fill_cache(cache_size, malloc_and_init_memory, memflush, op,
+ resctrl_val);
+ if (ret) {
+ printf("\n Error in fill cache\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
new file mode 100644
index 000000000000..7bf8eaa6204b
--- /dev/null
+++ b/tools/testing/selftests/resctrl/mba_test.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory Bandwidth Allocation (MBA) test
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+#define RESULT_FILE_NAME "result_mba"
+#define NUM_OF_RUNS 5
+#define MAX_DIFF 300
+#define ALLOCATION_MAX 100
+#define ALLOCATION_MIN 10
+#define ALLOCATION_STEP 10
+
+/*
+ * Change schemata percentage from 100 to 10%. Write schemata to specified
+ * con_mon grp, mon_grp in resctrl FS.
+ * For each allocation, run 5 times in order to get average values.
+ */
+static int mba_setup(int num, ...)
+{
+ static int runs_per_allocation, allocation = 100;
+ struct resctrl_val_param *p;
+ char allocation_str[64];
+ va_list param;
+
+ va_start(param, num);
+ p = va_arg(param, struct resctrl_val_param *);
+ va_end(param);
+
+ if (runs_per_allocation >= NUM_OF_RUNS)
+ runs_per_allocation = 0;
+
+ /* Only set up schemata once every NUM_OF_RUNS of allocations */
+ if (runs_per_allocation++ != 0)
+ return 0;
+
+ if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX)
+ return -1;
+
+ sprintf(allocation_str, "%d", allocation);
+
+ write_schemata(p->ctrlgrp, allocation_str, p->cpu_no, p->resctrl_val);
+ allocation -= ALLOCATION_STEP;
+
+ return 0;
+}
+
+static void show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
+{
+ int allocation, runs;
+ bool failed = false;
+
+ printf("# Results are displayed in (MB)\n");
+ /* Memory bandwidth from 100% down to 10% */
+ for (allocation = 0; allocation < ALLOCATION_MAX / ALLOCATION_STEP;
+ allocation++) {
+ unsigned long avg_bw_imc, avg_bw_resc;
+ unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
+ unsigned long avg_diff;
+
+ /*
+ * The first run is discarded due to inaccurate value from
+ * phase transition.
+ */
+ for (runs = NUM_OF_RUNS * allocation + 1;
+ runs < NUM_OF_RUNS * allocation + NUM_OF_RUNS ; runs++) {
+ sum_bw_imc += bw_imc[runs];
+ sum_bw_resc += bw_resc[runs];
+ }
+
+ avg_bw_imc = sum_bw_imc / (NUM_OF_RUNS - 1);
+ avg_bw_resc = sum_bw_resc / (NUM_OF_RUNS - 1);
+ avg_diff = labs((long)(avg_bw_resc - avg_bw_imc));
+
+ printf("%sok MBA schemata percentage %u smaller than %d %%\n",
+ avg_diff > MAX_DIFF ? "not " : "",
+ ALLOCATION_MAX - ALLOCATION_STEP * allocation,
+ MAX_DIFF);
+ tests_run++;
+ printf("# avg_diff: %lu\n", avg_diff);
+ printf("# avg_bw_imc: %lu\n", avg_bw_imc);
+ printf("# avg_bw_resc: %lu\n", avg_bw_resc);
+ if (avg_diff > MAX_DIFF)
+ failed = true;
+ }
+
+ printf("%sok schemata change using MBA%s\n", failed ? "not " : "",
+ failed ? " # at least one test failed" : "");
+ tests_run++;
+}
+
+static int check_results(void)
+{
+ char *token_array[8], output[] = RESULT_FILE_NAME, temp[512];
+ unsigned long bw_imc[1024], bw_resc[1024];
+ int runs;
+ FILE *fp;
+
+ fp = fopen(output, "r");
+ if (!fp) {
+ perror(output);
+
+ return errno;
+ }
+
+ runs = 0;
+ while (fgets(temp, sizeof(temp), fp)) {
+ char *token = strtok(temp, ":\t");
+ int fields = 0;
+
+ while (token) {
+ token_array[fields++] = token;
+ token = strtok(NULL, ":\t");
+ }
+
+ /* Field 3 is perf imc value */
+ bw_imc[runs] = strtoul(token_array[3], NULL, 0);
+ /* Field 5 is resctrl value */
+ bw_resc[runs] = strtoul(token_array[5], NULL, 0);
+ runs++;
+ }
+
+ fclose(fp);
+
+ show_mba_info(bw_imc, bw_resc);
+
+ return 0;
+}
+
+void mba_test_cleanup(void)
+{
+ remove(RESULT_FILE_NAME);
+}
+
+int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
+{
+ struct resctrl_val_param param = {
+ .resctrl_val = "mba",
+ .ctrlgrp = "c1",
+ .mongrp = "m1",
+ .cpu_no = cpu_no,
+ .mum_resctrlfs = 1,
+ .filename = RESULT_FILE_NAME,
+ .bw_report = bw_report,
+ .setup = mba_setup
+ };
+ int ret;
+
+ remove(RESULT_FILE_NAME);
+
+ if (!validate_resctrl_feature_request("mba"))
+ return -1;
+
+ ret = resctrl_val(benchmark_cmd, &param);
+ if (ret)
+ return ret;
+
+ ret = check_results();
+ if (ret)
+ return ret;
+
+ mba_test_cleanup();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
new file mode 100644
index 000000000000..4700f7453f81
--- /dev/null
+++ b/tools/testing/selftests/resctrl/mbm_test.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory Bandwidth Monitoring (MBM) test
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+#define RESULT_FILE_NAME "result_mbm"
+#define MAX_DIFF 300
+#define NUM_OF_RUNS 5
+
+static void
+show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
+{
+ unsigned long avg_bw_imc = 0, avg_bw_resc = 0;
+ unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
+ long avg_diff = 0;
+ int runs;
+
+ /*
+ * Discard the first value which is inaccurate due to monitoring setup
+ * transition phase.
+ */
+ for (runs = 1; runs < NUM_OF_RUNS ; runs++) {
+ sum_bw_imc += bw_imc[runs];
+ sum_bw_resc += bw_resc[runs];
+ }
+
+ avg_bw_imc = sum_bw_imc / 4;
+ avg_bw_resc = sum_bw_resc / 4;
+ avg_diff = avg_bw_resc - avg_bw_imc;
+
+ printf("%sok MBM: diff within %d%%\n",
+ labs(avg_diff) > MAX_DIFF ? "not " : "", MAX_DIFF);
+ tests_run++;
+ printf("# avg_diff: %lu\n", labs(avg_diff));
+ printf("# Span (MB): %d\n", span);
+ printf("# avg_bw_imc: %lu\n", avg_bw_imc);
+ printf("# avg_bw_resc: %lu\n", avg_bw_resc);
+}
+
+static int check_results(int span)
+{
+ unsigned long bw_imc[NUM_OF_RUNS], bw_resc[NUM_OF_RUNS];
+ char temp[1024], *token_array[8];
+ char output[] = RESULT_FILE_NAME;
+ int runs;
+ FILE *fp;
+
+ printf("# Checking for pass/fail\n");
+
+ fp = fopen(output, "r");
+ if (!fp) {
+ perror(output);
+
+ return errno;
+ }
+
+ runs = 0;
+ while (fgets(temp, sizeof(temp), fp)) {
+ char *token = strtok(temp, ":\t");
+ int i = 0;
+
+ while (token) {
+ token_array[i++] = token;
+ token = strtok(NULL, ":\t");
+ }
+
+ bw_resc[runs] = strtoul(token_array[5], NULL, 0);
+ bw_imc[runs] = strtoul(token_array[3], NULL, 0);
+ runs++;
+ }
+
+ show_bw_info(bw_imc, bw_resc, span);
+
+ fclose(fp);
+
+ return 0;
+}
+
+static int mbm_setup(int num, ...)
+{
+ struct resctrl_val_param *p;
+ static int num_of_runs;
+ va_list param;
+ int ret = 0;
+
+ /* Run NUM_OF_RUNS times */
+ if (num_of_runs++ >= NUM_OF_RUNS)
+ return -1;
+
+ va_start(param, num);
+ p = va_arg(param, struct resctrl_val_param *);
+ va_end(param);
+
+ /* Set up shemata with 100% allocation on the first run. */
+ if (num_of_runs == 0)
+ ret = write_schemata(p->ctrlgrp, "100", p->cpu_no,
+ p->resctrl_val);
+
+ return ret;
+}
+
+void mbm_test_cleanup(void)
+{
+ remove(RESULT_FILE_NAME);
+}
+
+int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
+{
+ struct resctrl_val_param param = {
+ .resctrl_val = "mbm",
+ .ctrlgrp = "c1",
+ .mongrp = "m1",
+ .span = span,
+ .cpu_no = cpu_no,
+ .mum_resctrlfs = 1,
+ .filename = RESULT_FILE_NAME,
+ .bw_report = bw_report,
+ .setup = mbm_setup
+ };
+ int ret;
+
+ remove(RESULT_FILE_NAME);
+
+ if (!validate_resctrl_feature_request("mbm"))
+ return -1;
+
+ ret = resctrl_val(benchmark_cmd, &param);
+ if (ret)
+ return ret;
+
+ ret = check_results(span);
+ if (ret)
+ return ret;
+
+ mbm_test_cleanup();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
new file mode 100644
index 000000000000..39bf59c6b9c5
--- /dev/null
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#define _GNU_SOURCE
+#ifndef RESCTRL_H
+#define RESCTRL_H
+#include <stdio.h>
+#include <stdarg.h>
+#include <math.h>
+#include <errno.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <signal.h>
+#include <dirent.h>
+#include <stdbool.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/select.h>
+#include <sys/time.h>
+#include <sys/eventfd.h>
+#include <asm/unistd.h>
+#include <linux/perf_event.h>
+
+#define MB (1024 * 1024)
+#define RESCTRL_PATH "/sys/fs/resctrl"
+#define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
+#define CBM_MASK_PATH "/sys/fs/resctrl/info"
+
+#define PARENT_EXIT(err_msg) \
+ do { \
+ perror(err_msg); \
+ kill(ppid, SIGKILL); \
+ exit(EXIT_FAILURE); \
+ } while (0)
+
+/*
+ * resctrl_val_param: resctrl test parameters
+ * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @cpu_no: CPU number to which the benchmark would be binded
+ * @span: Memory bytes accessed in each benchmark iteration
+ * @mum_resctrlfs: Should the resctrl FS be remounted?
+ * @filename: Name of file to which the o/p should be written
+ * @bw_report: Bandwidth report type (reads vs writes)
+ * @setup: Call back function to setup test environment
+ */
+struct resctrl_val_param {
+ char *resctrl_val;
+ char ctrlgrp[64];
+ char mongrp[64];
+ int cpu_no;
+ unsigned long span;
+ int mum_resctrlfs;
+ char filename[64];
+ char *bw_report;
+ unsigned long mask;
+ int num_of_runs;
+ int (*setup)(int num, ...);
+};
+
+pid_t bm_pid, ppid;
+int tests_run;
+
+char llc_occup_path[1024];
+bool is_amd;
+
+bool check_resctrlfs_support(void);
+int filter_dmesg(void);
+int remount_resctrlfs(bool mum_resctrlfs);
+int get_resource_id(int cpu_no, int *resource_id);
+int umount_resctrlfs(void);
+int validate_bw_report_request(char *bw_report);
+bool validate_resctrl_feature_request(char *resctrl_val);
+char *fgrep(FILE *inf, const char *str);
+int taskset_benchmark(pid_t bm_pid, int cpu_no);
+void run_benchmark(int signum, siginfo_t *info, void *ucontext);
+int write_schemata(char *ctrlgrp, char *schemata, int cpu_no,
+ char *resctrl_val);
+int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ char *resctrl_val);
+int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
+ int group_fd, unsigned long flags);
+int run_fill_buf(unsigned long span, int malloc_and_init_memory, int memflush,
+ int op, char *resctrl_va);
+int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
+int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd);
+void tests_cleanup(void);
+void mbm_test_cleanup(void);
+int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
+void mba_test_cleanup(void);
+int get_cbm_mask(char *cache_type);
+int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
+void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
+int cat_val(struct resctrl_val_param *param);
+void cat_test_cleanup(void);
+int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
+int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
+unsigned int count_bits(unsigned long n);
+void cqm_test_cleanup(void);
+int get_core_sibling(int cpu_no);
+int measure_cache_vals(struct resctrl_val_param *param, int bm_pid);
+
+#endif /* RESCTRL_H */
diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
new file mode 100644
index 000000000000..425cc85ac883
--- /dev/null
+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Resctrl tests
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+#define BENCHMARK_ARGS 64
+#define BENCHMARK_ARG_SIZE 64
+
+bool is_amd;
+
+void detect_amd(void)
+{
+ FILE *inf = fopen("/proc/cpuinfo", "r");
+ char *res;
+
+ if (!inf)
+ return;
+
+ res = fgrep(inf, "vendor_id");
+
+ if (res) {
+ char *s = strchr(res, ':');
+
+ is_amd = s && !strcmp(s, ": AuthenticAMD\n");
+ free(res);
+ }
+ fclose(inf);
+}
+
+static void cmd_help(void)
+{
+ printf("usage: resctrl_tests [-h] [-b \"benchmark_cmd [options]\"] [-t test list] [-n no_of_bits]\n");
+ printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM");
+ printf("\t default benchmark is builtin fill_buf\n");
+ printf("\t-t test list: run tests specified in the test list, ");
+ printf("e.g. -t mbm, mba, cqm, cat\n");
+ printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
+ printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
+ printf("\t-h: help\n");
+}
+
+void tests_cleanup(void)
+{
+ mbm_test_cleanup();
+ mba_test_cleanup();
+ cqm_test_cleanup();
+ cat_test_cleanup();
+}
+
+int main(int argc, char **argv)
+{
+ bool has_ben = false, mbm_test = true, mba_test = true, cqm_test = true;
+ int res, c, cpu_no = 1, span = 250, argc_new = argc, i, no_of_bits = 5;
+ char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
+ char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
+ int ben_ind, ben_count;
+ bool cat_test = true;
+
+ for (i = 0; i < argc; i++) {
+ if (strcmp(argv[i], "-b") == 0) {
+ ben_ind = i + 1;
+ ben_count = argc - ben_ind;
+ argc_new = ben_ind - 1;
+ has_ben = true;
+ break;
+ }
+ }
+
+ while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
+ char *token;
+
+ switch (c) {
+ case 't':
+ token = strtok(optarg, ",");
+
+ mbm_test = false;
+ mba_test = false;
+ cqm_test = false;
+ cat_test = false;
+ while (token) {
+ if (!strcmp(token, "mbm")) {
+ mbm_test = true;
+ } else if (!strcmp(token, "mba")) {
+ mba_test = true;
+ } else if (!strcmp(token, "cqm")) {
+ cqm_test = true;
+ } else if (!strcmp(token, "cat")) {
+ cat_test = true;
+ } else {
+ printf("invalid argument\n");
+
+ return -1;
+ }
+ token = strtok(NULL, ":\t");
+ }
+ break;
+ case 'p':
+ cpu_no = atoi(optarg);
+ break;
+ case 'n':
+ no_of_bits = atoi(optarg);
+ break;
+ case 'h':
+ cmd_help();
+
+ return 0;
+ default:
+ printf("invalid argument\n");
+
+ return -1;
+ }
+ }
+
+ printf("TAP version 13\n");
+
+ /*
+ * Typically we need root privileges, because:
+ * 1. We write to resctrl FS
+ * 2. We execute perf commands
+ */
+ if (geteuid() != 0)
+ printf("# WARNING: not running as root, tests may fail.\n");
+
+ /* Detect AMD vendor */
+ detect_amd();
+
+ if (has_ben) {
+ /* Extract benchmark command from command line. */
+ for (i = ben_ind; i < argc; i++) {
+ benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
+ sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
+ }
+ benchmark_cmd[ben_count] = NULL;
+ } else {
+ /* If no benchmark is given by "-b" argument, use fill_buf. */
+ for (i = 0; i < 6; i++)
+ benchmark_cmd[i] = benchmark_cmd_area[i];
+
+ strcpy(benchmark_cmd[0], "fill_buf");
+ sprintf(benchmark_cmd[1], "%d", span);
+ strcpy(benchmark_cmd[2], "1");
+ strcpy(benchmark_cmd[3], "1");
+ strcpy(benchmark_cmd[4], "0");
+ strcpy(benchmark_cmd[5], "");
+ benchmark_cmd[6] = NULL;
+ }
+
+ sprintf(bw_report, "reads");
+ sprintf(bm_type, "fill_buf");
+
+ check_resctrlfs_support();
+ filter_dmesg();
+
+ if (!is_amd && mbm_test) {
+ printf("# Starting MBM BW change ...\n");
+ if (!has_ben)
+ sprintf(benchmark_cmd[5], "%s", "mba");
+ res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
+ printf("%sok MBM: bw change\n", res ? "not " : "");
+ mbm_test_cleanup();
+ tests_run++;
+ }
+
+ if (!is_amd && mba_test) {
+ printf("# Starting MBA Schemata change ...\n");
+ if (!has_ben)
+ sprintf(benchmark_cmd[1], "%d", span);
+ res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
+ printf("%sok MBA: schemata change\n", res ? "not " : "");
+ mba_test_cleanup();
+ tests_run++;
+ }
+
+ if (cqm_test) {
+ printf("# Starting CQM test ...\n");
+ if (!has_ben)
+ sprintf(benchmark_cmd[5], "%s", "cqm");
+ res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
+ printf("%sok CQM: test\n", res ? "not " : "");
+ cqm_test_cleanup();
+ tests_run++;
+ }
+
+ if (cat_test) {
+ printf("# Starting CAT test ...\n");
+ res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
+ printf("%sok CAT: test\n", res ? "not " : "");
+ tests_run++;
+ cat_test_cleanup();
+ }
+
+ printf("1..%d\n", tests_run);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
new file mode 100644
index 000000000000..520fea3606d1
--- /dev/null
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -0,0 +1,744 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory bandwidth monitoring and allocation library
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+#define UNCORE_IMC "uncore_imc"
+#define READ_FILE_NAME "events/cas_count_read"
+#define WRITE_FILE_NAME "events/cas_count_write"
+#define DYN_PMU_PATH "/sys/bus/event_source/devices"
+#define SCALE 0.00006103515625
+#define MAX_IMCS 20
+#define MAX_TOKENS 5
+#define READ 0
+#define WRITE 1
+#define CON_MON_MBM_LOCAL_BYTES_PATH \
+ "%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
+
+#define CON_MBM_LOCAL_BYTES_PATH \
+ "%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
+
+#define MON_MBM_LOCAL_BYTES_PATH \
+ "%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
+
+#define MBM_LOCAL_BYTES_PATH \
+ "%s/mon_data/mon_L3_%02d/mbm_local_bytes"
+
+#define CON_MON_LCC_OCCUP_PATH \
+ "%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+#define CON_LCC_OCCUP_PATH \
+ "%s/%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+#define MON_LCC_OCCUP_PATH \
+ "%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+#define LCC_OCCUP_PATH \
+ "%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+struct membw_read_format {
+ __u64 value; /* The value of the event */
+ __u64 time_enabled; /* if PERF_FORMAT_TOTAL_TIME_ENABLED */
+ __u64 time_running; /* if PERF_FORMAT_TOTAL_TIME_RUNNING */
+ __u64 id; /* if PERF_FORMAT_ID */
+};
+
+struct imc_counter_config {
+ __u32 type;
+ __u64 event;
+ __u64 umask;
+ struct perf_event_attr pe;
+ struct membw_read_format return_value;
+ int fd;
+};
+
+static char mbm_total_path[1024];
+static int imcs;
+static struct imc_counter_config imc_counters_config[MAX_IMCS][2];
+
+void membw_initialize_perf_event_attr(int i, int j)
+{
+ memset(&imc_counters_config[i][j].pe, 0,
+ sizeof(struct perf_event_attr));
+ imc_counters_config[i][j].pe.type = imc_counters_config[i][j].type;
+ imc_counters_config[i][j].pe.size = sizeof(struct perf_event_attr);
+ imc_counters_config[i][j].pe.disabled = 1;
+ imc_counters_config[i][j].pe.inherit = 1;
+ imc_counters_config[i][j].pe.exclude_guest = 0;
+ imc_counters_config[i][j].pe.config =
+ imc_counters_config[i][j].umask << 8 |
+ imc_counters_config[i][j].event;
+ imc_counters_config[i][j].pe.sample_type = PERF_SAMPLE_IDENTIFIER;
+ imc_counters_config[i][j].pe.read_format =
+ PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
+}
+
+void membw_ioctl_perf_event_ioc_reset_enable(int i, int j)
+{
+ ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_RESET, 0);
+ ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_ENABLE, 0);
+}
+
+void membw_ioctl_perf_event_ioc_disable(int i, int j)
+{
+ ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_DISABLE, 0);
+}
+
+/*
+ * get_event_and_umask: Parse config into event and umask
+ * @cas_count_cfg: Config
+ * @count: iMC number
+ * @op: Operation (read/write)
+ */
+void get_event_and_umask(char *cas_count_cfg, int count, bool op)
+{
+ char *token[MAX_TOKENS];
+ int i = 0;
+
+ strcat(cas_count_cfg, ",");
+ token[0] = strtok(cas_count_cfg, "=,");
+
+ for (i = 1; i < MAX_TOKENS; i++)
+ token[i] = strtok(NULL, "=,");
+
+ for (i = 0; i < MAX_TOKENS; i++) {
+ if (!token[i])
+ break;
+ if (strcmp(token[i], "event") == 0) {
+ if (op == READ)
+ imc_counters_config[count][READ].event =
+ strtol(token[i + 1], NULL, 16);
+ else
+ imc_counters_config[count][WRITE].event =
+ strtol(token[i + 1], NULL, 16);
+ }
+ if (strcmp(token[i], "umask") == 0) {
+ if (op == READ)
+ imc_counters_config[count][READ].umask =
+ strtol(token[i + 1], NULL, 16);
+ else
+ imc_counters_config[count][WRITE].umask =
+ strtol(token[i + 1], NULL, 16);
+ }
+ }
+}
+
+static int open_perf_event(int i, int cpu_no, int j)
+{
+ imc_counters_config[i][j].fd =
+ perf_event_open(&imc_counters_config[i][j].pe, -1, cpu_no, -1,
+ PERF_FLAG_FD_CLOEXEC);
+
+ if (imc_counters_config[i][j].fd == -1) {
+ fprintf(stderr, "Error opening leader %llx\n",
+ imc_counters_config[i][j].pe.config);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Get type and config (read and write) of an iMC counter */
+static int read_from_imc_dir(char *imc_dir, int count)
+{
+ char cas_count_cfg[1024], imc_counter_cfg[1024], imc_counter_type[1024];
+ FILE *fp;
+
+ /* Get type of iMC counter */
+ sprintf(imc_counter_type, "%s%s", imc_dir, "type");
+ fp = fopen(imc_counter_type, "r");
+ if (!fp) {
+ perror("Failed to open imc counter type file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) {
+ perror("Could not get imc type");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ imc_counters_config[count][WRITE].type =
+ imc_counters_config[count][READ].type;
+
+ /* Get read config */
+ sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME);
+ fp = fopen(imc_counter_cfg, "r");
+ if (!fp) {
+ perror("Failed to open imc config file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
+ perror("Could not get imc cas count read");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ get_event_and_umask(cas_count_cfg, count, READ);
+
+ /* Get write config */
+ sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME);
+ fp = fopen(imc_counter_cfg, "r");
+ if (!fp) {
+ perror("Failed to open imc config file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
+ perror("Could not get imc cas count write");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ get_event_and_umask(cas_count_cfg, count, WRITE);
+
+ return 0;
+}
+
+/*
+ * A system can have 'n' number of iMC (Integrated Memory Controller)
+ * counters, get that 'n'. For each iMC counter get it's type and config.
+ * Also, each counter has two configs, one for read and the other for write.
+ * A config again has two parts, event and umask.
+ * Enumerate all these details into an array of structures.
+ *
+ * Return: >= 0 on success. < 0 on failure.
+ */
+static int num_of_imcs(void)
+{
+ unsigned int count = 0;
+ char imc_dir[512];
+ struct dirent *ep;
+ int ret;
+ DIR *dp;
+
+ dp = opendir(DYN_PMU_PATH);
+ if (dp) {
+ while ((ep = readdir(dp))) {
+ if (strstr(ep->d_name, UNCORE_IMC)) {
+ sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
+ ep->d_name);
+ ret = read_from_imc_dir(imc_dir, count);
+ if (ret) {
+ closedir(dp);
+
+ return ret;
+ }
+ count++;
+ }
+ }
+ closedir(dp);
+ if (count == 0) {
+ perror("Unable find iMC counters!\n");
+
+ return -1;
+ }
+ } else {
+ perror("Unable to open PMU directory!\n");
+
+ return -1;
+ }
+
+ return count;
+}
+
+static int initialize_mem_bw_imc(void)
+{
+ int imc, j;
+
+ imcs = num_of_imcs();
+ if (imcs <= 0)
+ return imcs;
+
+ /* Initialize perf_event_attr structures for all iMC's */
+ for (imc = 0; imc < imcs; imc++) {
+ for (j = 0; j < 2; j++)
+ membw_initialize_perf_event_attr(imc, j);
+ }
+
+ return 0;
+}
+
+/*
+ * get_mem_bw_imc: Memory band width as reported by iMC counters
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ * @bw_report: Bandwidth report type (reads, writes)
+ *
+ * Memory B/W utilized by a process on a socket can be calculated using
+ * iMC counters. Perf events are used to read these counters.
+ *
+ * Return: >= 0 on success. < 0 on failure.
+ */
+static float get_mem_bw_imc(int cpu_no, char *bw_report)
+{
+ float reads, writes, of_mul_read, of_mul_write;
+ int imc, j, ret;
+
+ /* Start all iMC counters to log values (both read and write) */
+ reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
+ for (imc = 0; imc < imcs; imc++) {
+ for (j = 0; j < 2; j++) {
+ ret = open_perf_event(imc, cpu_no, j);
+ if (ret)
+ return -1;
+ }
+ for (j = 0; j < 2; j++)
+ membw_ioctl_perf_event_ioc_reset_enable(imc, j);
+ }
+
+ sleep(1);
+
+ /* Stop counters after a second to get results (both read and write) */
+ for (imc = 0; imc < imcs; imc++) {
+ for (j = 0; j < 2; j++)
+ membw_ioctl_perf_event_ioc_disable(imc, j);
+ }
+
+ /*
+ * Get results which are stored in struct type imc_counter_config
+ * Take over flow into consideration before calculating total b/w
+ */
+ for (imc = 0; imc < imcs; imc++) {
+ struct imc_counter_config *r =
+ &imc_counters_config[imc][READ];
+ struct imc_counter_config *w =
+ &imc_counters_config[imc][WRITE];
+
+ if (read(r->fd, &r->return_value,
+ sizeof(struct membw_read_format)) == -1) {
+ perror("Couldn't get read b/w through iMC");
+
+ return -1;
+ }
+
+ if (read(w->fd, &w->return_value,
+ sizeof(struct membw_read_format)) == -1) {
+ perror("Couldn't get write bw through iMC");
+
+ return -1;
+ }
+
+ __u64 r_time_enabled = r->return_value.time_enabled;
+ __u64 r_time_running = r->return_value.time_running;
+
+ if (r_time_enabled != r_time_running)
+ of_mul_read = (float)r_time_enabled /
+ (float)r_time_running;
+
+ __u64 w_time_enabled = w->return_value.time_enabled;
+ __u64 w_time_running = w->return_value.time_running;
+
+ if (w_time_enabled != w_time_running)
+ of_mul_write = (float)w_time_enabled /
+ (float)w_time_running;
+ reads += r->return_value.value * of_mul_read * SCALE;
+ writes += w->return_value.value * of_mul_write * SCALE;
+ }
+
+ for (imc = 0; imc < imcs; imc++) {
+ close(imc_counters_config[imc][READ].fd);
+ close(imc_counters_config[imc][WRITE].fd);
+ }
+
+ if (strcmp(bw_report, "reads") == 0)
+ return reads;
+
+ if (strcmp(bw_report, "writes") == 0)
+ return writes;
+
+ return (reads + writes);
+}
+
+void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
+{
+ if (ctrlgrp && mongrp)
+ sprintf(mbm_total_path, CON_MON_MBM_LOCAL_BYTES_PATH,
+ RESCTRL_PATH, ctrlgrp, mongrp, resource_id);
+ else if (!ctrlgrp && mongrp)
+ sprintf(mbm_total_path, MON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
+ mongrp, resource_id);
+ else if (ctrlgrp && !mongrp)
+ sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
+ ctrlgrp, resource_id);
+ else if (!ctrlgrp && !mongrp)
+ sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
+ resource_id);
+}
+
+/*
+ * initialize_mem_bw_resctrl: Appropriately populate "mbm_total_path"
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ */
+static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
+ int cpu_no, char *resctrl_val)
+{
+ int resource_id;
+
+ if (get_resource_id(cpu_no, &resource_id) < 0) {
+ perror("Could not get resource_id");
+ return;
+ }
+
+ if (strcmp(resctrl_val, "mbm") == 0)
+ set_mbm_path(ctrlgrp, mongrp, resource_id);
+
+ if ((strcmp(resctrl_val, "mba") == 0)) {
+ if (ctrlgrp)
+ sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
+ RESCTRL_PATH, ctrlgrp, resource_id);
+ else
+ sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH,
+ RESCTRL_PATH, resource_id);
+ }
+}
+
+/*
+ * Get MBM Local bytes as reported by resctrl FS
+ * For MBM,
+ * 1. If con_mon grp and mon grp are given, then read from con_mon grp's mon grp
+ * 2. If only con_mon grp is given, then read from con_mon grp
+ * 3. If both are not given, then read from root con_mon grp
+ * For MBA,
+ * 1. If con_mon grp is given, then read from it
+ * 2. If con_mon grp is not given, then read from root con_mon grp
+ */
+static unsigned long get_mem_bw_resctrl(void)
+{
+ unsigned long mbm_total = 0;
+ FILE *fp;
+
+ fp = fopen(mbm_total_path, "r");
+ if (!fp) {
+ perror("Failed to open total bw file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%lu", &mbm_total) <= 0) {
+ perror("Could not get mbm local bytes");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return mbm_total;
+}
+
+pid_t bm_pid, ppid;
+
+void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
+{
+ kill(bm_pid, SIGKILL);
+ umount_resctrlfs();
+ tests_cleanup();
+ printf("Ending\n\n");
+
+ exit(EXIT_SUCCESS);
+}
+
+/*
+ * print_results_bw: the memory bandwidth results are stored in a file
+ * @filename: file that stores the results
+ * @bm_pid: child pid that runs benchmark
+ * @bw_imc: perf imc counter value
+ * @bw_resc: memory bandwidth value
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+static int print_results_bw(char *filename, int bm_pid, float bw_imc,
+ unsigned long bw_resc)
+{
+ unsigned long diff = fabs(bw_imc - bw_resc);
+ FILE *fp;
+
+ if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
+ printf("Pid: %d \t Mem_BW_iMC: %f \t ", bm_pid, bw_imc);
+ printf("Mem_BW_resc: %lu \t Difference: %lu\n", bw_resc, diff);
+ } else {
+ fp = fopen(filename, "a");
+ if (!fp) {
+ perror("Cannot open results file");
+
+ return errno;
+ }
+ if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
+ bm_pid, bw_imc, bw_resc, diff) <= 0) {
+ fclose(fp);
+ perror("Could not log results.");
+
+ return errno;
+ }
+ fclose(fp);
+ }
+
+ return 0;
+}
+
+static void set_cqm_path(const char *ctrlgrp, const char *mongrp, char sock_num)
+{
+ if (strlen(ctrlgrp) && strlen(mongrp))
+ sprintf(llc_occup_path, CON_MON_LCC_OCCUP_PATH, RESCTRL_PATH,
+ ctrlgrp, mongrp, sock_num);
+ else if (!strlen(ctrlgrp) && strlen(mongrp))
+ sprintf(llc_occup_path, MON_LCC_OCCUP_PATH, RESCTRL_PATH,
+ mongrp, sock_num);
+ else if (strlen(ctrlgrp) && !strlen(mongrp))
+ sprintf(llc_occup_path, CON_LCC_OCCUP_PATH, RESCTRL_PATH,
+ ctrlgrp, sock_num);
+ else if (!strlen(ctrlgrp) && !strlen(mongrp))
+ sprintf(llc_occup_path, LCC_OCCUP_PATH, RESCTRL_PATH, sock_num);
+}
+
+/*
+ * initialize_llc_occu_resctrl: Appropriately populate "llc_occup_path"
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ * @resctrl_val: Resctrl feature (Eg: cat, cqm.. etc)
+ */
+static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
+ int cpu_no, char *resctrl_val)
+{
+ int resource_id;
+
+ if (get_resource_id(cpu_no, &resource_id) < 0) {
+ perror("# Unable to resource_id");
+ return;
+ }
+
+ if (strcmp(resctrl_val, "cqm") == 0)
+ set_cqm_path(ctrlgrp, mongrp, resource_id);
+}
+
+static int
+measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+{
+ unsigned long bw_imc, bw_resc, bw_resc_end;
+ int ret;
+
+ /*
+ * Measure memory bandwidth from resctrl and from
+ * another source which is perf imc value or could
+ * be something else if perf imc event is not available.
+ * Compare the two values to validate resctrl value.
+ * It takes 1sec to measure the data.
+ */
+ bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
+ if (bw_imc <= 0)
+ return bw_imc;
+
+ bw_resc_end = get_mem_bw_resctrl();
+ if (bw_resc_end <= 0)
+ return bw_resc_end;
+
+ bw_resc = (bw_resc_end - *bw_resc_start) / MB;
+ ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
+ if (ret)
+ return ret;
+
+ *bw_resc_start = bw_resc_end;
+
+ return 0;
+}
+
+/*
+ * resctrl_val: execute benchmark and measure memory bandwidth on
+ * the benchmark
+ * @benchmark_cmd: benchmark command and its arguments
+ * @param: parameters passed to resctrl_val()
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+{
+ char *resctrl_val = param->resctrl_val;
+ unsigned long bw_resc_start = 0;
+ struct sigaction sigact;
+ int ret = 0, pipefd[2];
+ char pipe_message = 0;
+ union sigval value;
+
+ if (strcmp(param->filename, "") == 0)
+ sprintf(param->filename, "stdio");
+
+ if ((strcmp(resctrl_val, "mba")) == 0 ||
+ (strcmp(resctrl_val, "mbm")) == 0) {
+ ret = validate_bw_report_request(param->bw_report);
+ if (ret)
+ return ret;
+ }
+
+ ret = remount_resctrlfs(param->mum_resctrlfs);
+ if (ret)
+ return ret;
+
+ /*
+ * If benchmark wasn't successfully started by child, then child should
+ * kill parent, so save parent's pid
+ */
+ ppid = getpid();
+
+ if (pipe(pipefd)) {
+ perror("# Unable to create pipe");
+
+ return -1;
+ }
+
+ /*
+ * Fork to start benchmark, save child's pid so that it can be killed
+ * when needed
+ */
+ bm_pid = fork();
+ if (bm_pid == -1) {
+ perror("# Unable to fork");
+
+ return -1;
+ }
+
+ if (bm_pid == 0) {
+ /*
+ * Mask all signals except SIGUSR1, parent uses SIGUSR1 to
+ * start benchmark
+ */
+ sigfillset(&sigact.sa_mask);
+ sigdelset(&sigact.sa_mask, SIGUSR1);
+
+ sigact.sa_sigaction = run_benchmark;
+ sigact.sa_flags = SA_SIGINFO;
+
+ /* Register for "SIGUSR1" signal from parent */
+ if (sigaction(SIGUSR1, &sigact, NULL))
+ PARENT_EXIT("Can't register child for signal");
+
+ /* Tell parent that child is ready */
+ close(pipefd[0]);
+ pipe_message = 1;
+ if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
+ sizeof(pipe_message)) {
+ perror("# failed signaling parent process");
+ close(pipefd[1]);
+ return -1;
+ }
+ close(pipefd[1]);
+
+ /* Suspend child until delivery of "SIGUSR1" from parent */
+ sigsuspend(&sigact.sa_mask);
+
+ PARENT_EXIT("Child is done");
+ }
+
+ printf("# benchmark PID: %d\n", bm_pid);
+
+ /*
+ * Register CTRL-C handler for parent, as it has to kill benchmark
+ * before exiting
+ */
+ sigact.sa_sigaction = ctrlc_handler;
+ sigemptyset(&sigact.sa_mask);
+ sigact.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGINT, &sigact, NULL) ||
+ sigaction(SIGHUP, &sigact, NULL)) {
+ perror("# sigaction");
+ ret = errno;
+ goto out;
+ }
+
+ value.sival_ptr = benchmark_cmd;
+
+ /* Taskset benchmark to specified cpu */
+ ret = taskset_benchmark(bm_pid, param->cpu_no);
+ if (ret)
+ goto out;
+
+ /* Write benchmark to specified control&monitoring grp in resctrl FS */
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
+ resctrl_val);
+ if (ret)
+ goto out;
+
+ if ((strcmp(resctrl_val, "mbm") == 0) ||
+ (strcmp(resctrl_val, "mba") == 0)) {
+ ret = initialize_mem_bw_imc();
+ if (ret)
+ goto out;
+
+ initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
+ param->cpu_no, resctrl_val);
+ } else if (strcmp(resctrl_val, "cqm") == 0)
+ initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
+ param->cpu_no, resctrl_val);
+
+ /* Parent waits for child to be ready. */
+ close(pipefd[1]);
+ while (pipe_message != 1) {
+ if (read(pipefd[0], &pipe_message, sizeof(pipe_message)) <
+ sizeof(pipe_message)) {
+ perror("# failed reading message from child process");
+ close(pipefd[0]);
+ goto out;
+ }
+ }
+ close(pipefd[0]);
+
+ /* Signal child to start benchmark */
+ if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
+ perror("# sigqueue SIGUSR1 to child");
+ ret = errno;
+ goto out;
+ }
+
+ /* Give benchmark enough time to fully run */
+ sleep(1);
+
+ /* Test runs until the callback setup() tells the test to stop. */
+ while (1) {
+ if ((strcmp(resctrl_val, "mbm") == 0) ||
+ (strcmp(resctrl_val, "mba") == 0)) {
+ ret = param->setup(1, param);
+ if (ret) {
+ ret = 0;
+ break;
+ }
+
+ ret = measure_vals(param, &bw_resc_start);
+ if (ret)
+ break;
+ } else if (strcmp(resctrl_val, "cqm") == 0) {
+ ret = param->setup(1, param);
+ if (ret) {
+ ret = 0;
+ break;
+ }
+ sleep(1);
+ ret = measure_cache_vals(param, bm_pid);
+ if (ret)
+ break;
+ } else {
+ break;
+ }
+ }
+
+out:
+ kill(bm_pid, SIGKILL);
+ umount_resctrlfs();
+
+ return ret;
+}
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
new file mode 100644
index 000000000000..19c0ec4045a4
--- /dev/null
+++ b/tools/testing/selftests/resctrl/resctrlfs.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic resctrl file system operations
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+int tests_run;
+
+static int find_resctrl_mount(char *buffer)
+{
+ FILE *mounts;
+ char line[256], *fs, *mntpoint;
+
+ mounts = fopen("/proc/mounts", "r");
+ if (!mounts) {
+ perror("/proc/mounts");
+ return -ENXIO;
+ }
+ while (!feof(mounts)) {
+ if (!fgets(line, 256, mounts))
+ break;
+ fs = strtok(line, " \t");
+ if (!fs)
+ continue;
+ mntpoint = strtok(NULL, " \t");
+ if (!mntpoint)
+ continue;
+ fs = strtok(NULL, " \t");
+ if (!fs)
+ continue;
+ if (strcmp(fs, "resctrl"))
+ continue;
+
+ fclose(mounts);
+ if (buffer)
+ strncpy(buffer, mntpoint, 256);
+
+ return 0;
+ }
+
+ fclose(mounts);
+
+ return -ENOENT;
+}
+
+char cbm_mask[256];
+
+/*
+ * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
+ * @mum_resctrlfs: Should the resctrl FS be remounted?
+ *
+ * If not mounted, mount it.
+ * If mounted and mum_resctrlfs then remount resctrl FS.
+ * If mounted and !mum_resctrlfs then noop
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int remount_resctrlfs(bool mum_resctrlfs)
+{
+ char mountpoint[256];
+ int ret;
+
+ ret = find_resctrl_mount(mountpoint);
+ if (ret)
+ strcpy(mountpoint, RESCTRL_PATH);
+
+ if (!ret && mum_resctrlfs && umount(mountpoint)) {
+ printf("not ok unmounting \"%s\"\n", mountpoint);
+ perror("# umount");
+ tests_run++;
+ }
+
+ if (!ret && !mum_resctrlfs)
+ return 0;
+
+ ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
+ printf("%sok mounting resctrl to \"%s\"\n", ret ? "not " : "",
+ RESCTRL_PATH);
+ if (ret)
+ perror("# mount");
+
+ tests_run++;
+
+ return ret;
+}
+
+int umount_resctrlfs(void)
+{
+ if (umount(RESCTRL_PATH)) {
+ perror("# Unable to umount resctrl");
+
+ return errno;
+ }
+
+ return 0;
+}
+
+/*
+ * get_resource_id - Get socket number/l3 id for a specified CPU
+ * @cpu_no: CPU number
+ * @resource_id: Socket number or l3_id
+ *
+ * Return: >= 0 on success, < 0 on failure.
+ */
+int get_resource_id(int cpu_no, int *resource_id)
+{
+ char phys_pkg_path[1024];
+ FILE *fp;
+
+ if (is_amd)
+ sprintf(phys_pkg_path, "%s%d/cache/index3/id",
+ PHYS_ID_PATH, cpu_no);
+ else
+ sprintf(phys_pkg_path, "%s%d/topology/physical_package_id",
+ PHYS_ID_PATH, cpu_no);
+
+ fp = fopen(phys_pkg_path, "r");
+ if (!fp) {
+ perror("Failed to open physical_package_id");
+
+ return -1;
+ }
+ if (fscanf(fp, "%d", resource_id) <= 0) {
+ perror("Could not get socket number or l3 id");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+/*
+ * get_cache_size - Get cache size for a specified CPU
+ * @cpu_no: CPU number
+ * @cache_type: Cache level L2/L3
+ * @cache_size: pointer to cache_size
+ *
+ * Return: = 0 on success, < 0 on failure.
+ */
+int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
+{
+ char cache_path[1024], cache_str[64];
+ int length, i, cache_num;
+ FILE *fp;
+
+ if (!strcmp(cache_type, "L3")) {
+ cache_num = 3;
+ } else if (!strcmp(cache_type, "L2")) {
+ cache_num = 2;
+ } else {
+ perror("Invalid cache level");
+ return -1;
+ }
+
+ sprintf(cache_path, "/sys/bus/cpu/devices/cpu%d/cache/index%d/size",
+ cpu_no, cache_num);
+ fp = fopen(cache_path, "r");
+ if (!fp) {
+ perror("Failed to open cache size");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cache_str) <= 0) {
+ perror("Could not get cache_size");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ length = (int)strlen(cache_str);
+
+ *cache_size = 0;
+
+ for (i = 0; i < length; i++) {
+ if ((cache_str[i] >= '0') && (cache_str[i] <= '9'))
+
+ *cache_size = *cache_size * 10 + (cache_str[i] - '0');
+
+ else if (cache_str[i] == 'K')
+
+ *cache_size = *cache_size * 1024;
+
+ else if (cache_str[i] == 'M')
+
+ *cache_size = *cache_size * 1024 * 1024;
+
+ else
+ break;
+ }
+
+ return 0;
+}
+
+#define CORE_SIBLINGS_PATH "/sys/bus/cpu/devices/cpu"
+
+/*
+ * get_cbm_mask - Get cbm mask for given cache
+ * @cache_type: Cache level L2/L3
+ *
+ * Mask is stored in cbm_mask which is global variable.
+ *
+ * Return: = 0 on success, < 0 on failure.
+ */
+int get_cbm_mask(char *cache_type)
+{
+ char cbm_mask_path[1024];
+ FILE *fp;
+
+ sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
+
+ fp = fopen(cbm_mask_path, "r");
+ if (!fp) {
+ perror("Failed to open cache level");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cbm_mask) <= 0) {
+ perror("Could not get max cbm_mask");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+/*
+ * get_core_sibling - Get sibling core id from the same socket for given CPU
+ * @cpu_no: CPU number
+ *
+ * Return: > 0 on success, < 0 on failure.
+ */
+int get_core_sibling(int cpu_no)
+{
+ char core_siblings_path[1024], cpu_list_str[64];
+ int sibling_cpu_no = -1;
+ FILE *fp;
+
+ sprintf(core_siblings_path, "%s%d/topology/core_siblings_list",
+ CORE_SIBLINGS_PATH, cpu_no);
+
+ fp = fopen(core_siblings_path, "r");
+ if (!fp) {
+ perror("Failed to open core siblings path");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cpu_list_str) <= 0) {
+ perror("Could not get core_siblings list");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ char *token = strtok(cpu_list_str, "-,");
+
+ while (token) {
+ sibling_cpu_no = atoi(token);
+ /* Skipping core 0 as we don't want to run test on core 0 */
+ if (sibling_cpu_no != 0)
+ break;
+ token = strtok(NULL, "-,");
+ }
+
+ return sibling_cpu_no;
+}
+
+/*
+ * taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu
+ * @bm_pid: PID that should be binded
+ * @cpu_no: CPU number at which the PID would be binded
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int taskset_benchmark(pid_t bm_pid, int cpu_no)
+{
+ cpu_set_t my_set;
+
+ CPU_ZERO(&my_set);
+ CPU_SET(cpu_no, &my_set);
+
+ if (sched_setaffinity(bm_pid, sizeof(cpu_set_t), &my_set)) {
+ perror("Unable to taskset benchmark");
+
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * run_benchmark - Run a specified benchmark or fill_buf (default benchmark)
+ * in specified signal. Direct benchmark stdio to /dev/null.
+ * @signum: signal number
+ * @info: signal info
+ * @ucontext: user context in signal handling
+ *
+ * Return: void
+ */
+void run_benchmark(int signum, siginfo_t *info, void *ucontext)
+{
+ int operation, ret, malloc_and_init_memory, memflush;
+ unsigned long span, buffer_span;
+ char **benchmark_cmd;
+ char resctrl_val[64];
+ FILE *fp;
+
+ benchmark_cmd = info->si_ptr;
+
+ /*
+ * Direct stdio of child to /dev/null, so that only parent writes to
+ * stdio (console)
+ */
+ fp = freopen("/dev/null", "w", stdout);
+ if (!fp)
+ PARENT_EXIT("Unable to direct benchmark status to /dev/null");
+
+ if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
+ /* Execute default fill_buf benchmark */
+ span = strtoul(benchmark_cmd[1], NULL, 10);
+ malloc_and_init_memory = atoi(benchmark_cmd[2]);
+ memflush = atoi(benchmark_cmd[3]);
+ operation = atoi(benchmark_cmd[4]);
+ sprintf(resctrl_val, "%s", benchmark_cmd[5]);
+
+ if (strcmp(resctrl_val, "cqm") != 0)
+ buffer_span = span * MB;
+ else
+ buffer_span = span;
+
+ if (run_fill_buf(buffer_span, malloc_and_init_memory, memflush,
+ operation, resctrl_val))
+ fprintf(stderr, "Error in running fill buffer\n");
+ } else {
+ /* Execute specified benchmark */
+ ret = execvp(benchmark_cmd[0], benchmark_cmd);
+ if (ret)
+ perror("wrong\n");
+ }
+
+ fclose(stdout);
+ PARENT_EXIT("Unable to run specified benchmark");
+}
+
+/*
+ * create_grp - Create a group only if one doesn't exist
+ * @grp_name: Name of the group
+ * @grp: Full path and name of the group
+ * @parent_grp: Full path and name of the parent group
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
+{
+ int found_grp = 0;
+ struct dirent *ep;
+ DIR *dp;
+
+ /*
+ * At this point, we are guaranteed to have resctrl FS mounted and if
+ * length of grp_name == 0, it means, user wants to use root con_mon
+ * grp, so do nothing
+ */
+ if (strlen(grp_name) == 0)
+ return 0;
+
+ /* Check if requested grp exists or not */
+ dp = opendir(parent_grp);
+ if (dp) {
+ while ((ep = readdir(dp)) != NULL) {
+ if (strcmp(ep->d_name, grp_name) == 0)
+ found_grp = 1;
+ }
+ closedir(dp);
+ } else {
+ perror("Unable to open resctrl for group");
+
+ return -1;
+ }
+
+ /* Requested grp doesn't exist, hence create it */
+ if (found_grp == 0) {
+ if (mkdir(grp, 0) == -1) {
+ perror("Unable to create group");
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int write_pid_to_tasks(char *tasks, pid_t pid)
+{
+ FILE *fp;
+
+ fp = fopen(tasks, "w");
+ if (!fp) {
+ perror("Failed to open tasks file");
+
+ return -1;
+ }
+ if (fprintf(fp, "%d\n", pid) < 0) {
+ perror("Failed to wr pid to tasks file");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+/*
+ * write_bm_pid_to_resctrl - Write a PID (i.e. benchmark) to resctrl FS
+ * @bm_pid: PID that should be written
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ *
+ * If a con_mon grp is requested, create it and write pid to it, otherwise
+ * write pid to root con_mon grp.
+ * If a mon grp is requested, create it and write pid to it, otherwise
+ * pid is not written, this means that pid is in con_mon grp and hence
+ * should consult con_mon grp's mon_data directory for results.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ char *resctrl_val)
+{
+ char controlgroup[128], monitorgroup[512], monitorgroup_p[256];
+ char tasks[1024];
+ int ret = 0;
+
+ if (strlen(ctrlgrp))
+ sprintf(controlgroup, "%s/%s", RESCTRL_PATH, ctrlgrp);
+ else
+ sprintf(controlgroup, "%s", RESCTRL_PATH);
+
+ /* Create control and monitoring group and write pid into it */
+ ret = create_grp(ctrlgrp, controlgroup, RESCTRL_PATH);
+ if (ret)
+ goto out;
+ sprintf(tasks, "%s/tasks", controlgroup);
+ ret = write_pid_to_tasks(tasks, bm_pid);
+ if (ret)
+ goto out;
+
+ /* Create mon grp and write pid into it for "mbm" and "cqm" test */
+ if ((strcmp(resctrl_val, "cqm") == 0) ||
+ (strcmp(resctrl_val, "mbm") == 0)) {
+ if (strlen(mongrp)) {
+ sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
+ sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
+ ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
+ if (ret)
+ goto out;
+
+ sprintf(tasks, "%s/mon_groups/%s/tasks",
+ controlgroup, mongrp);
+ ret = write_pid_to_tasks(tasks, bm_pid);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ printf("%sok writing benchmark parameters to resctrl FS\n",
+ ret ? "not " : "");
+ if (ret)
+ perror("# writing to resctrlfs");
+
+ tests_run++;
+
+ return ret;
+}
+
+/*
+ * write_schemata - Update schemata of a con_mon grp
+ * @ctrlgrp: Name of the con_mon grp
+ * @schemata: Schemata that should be updated to
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ *
+ * Update schemata of a con_mon grp *only* if requested resctrl feature is
+ * allocation type
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
+{
+ char controlgroup[1024], schema[1024], reason[64];
+ int resource_id, ret = 0;
+ FILE *fp;
+
+ if ((strcmp(resctrl_val, "mba") != 0) &&
+ (strcmp(resctrl_val, "cat") != 0) &&
+ (strcmp(resctrl_val, "cqm") != 0))
+ return -ENOENT;
+
+ if (!schemata) {
+ printf("# Skipping empty schemata update\n");
+
+ return -1;
+ }
+
+ if (get_resource_id(cpu_no, &resource_id) < 0) {
+ sprintf(reason, "Failed to get resource id");
+ ret = -1;
+
+ goto out;
+ }
+
+ if (strlen(ctrlgrp) != 0)
+ sprintf(controlgroup, "%s/%s/schemata", RESCTRL_PATH, ctrlgrp);
+ else
+ sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
+
+ if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
+ sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
+ if (strcmp(resctrl_val, "mba") == 0)
+ sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
+
+ fp = fopen(controlgroup, "w");
+ if (!fp) {
+ sprintf(reason, "Failed to open control group");
+ ret = -1;
+
+ goto out;
+ }
+
+ if (fprintf(fp, "%s\n", schema) < 0) {
+ sprintf(reason, "Failed to write schemata in control group");
+ fclose(fp);
+ ret = -1;
+
+ goto out;
+ }
+ fclose(fp);
+
+out:
+ printf("%sok Write schema \"%s\" to resctrl FS%s%s\n",
+ ret ? "not " : "", schema, ret ? " # " : "",
+ ret ? reason : "");
+ tests_run++;
+
+ return ret;
+}
+
+bool check_resctrlfs_support(void)
+{
+ FILE *inf = fopen("/proc/filesystems", "r");
+ DIR *dp;
+ char *res;
+ bool ret = false;
+
+ if (!inf)
+ return false;
+
+ res = fgrep(inf, "nodev\tresctrl\n");
+
+ if (res) {
+ ret = true;
+ free(res);
+ }
+
+ fclose(inf);
+
+ printf("%sok kernel supports resctrl filesystem\n", ret ? "" : "not ");
+ tests_run++;
+
+ dp = opendir(RESCTRL_PATH);
+ printf("%sok resctrl mountpoint \"%s\" exists\n",
+ dp ? "" : "not ", RESCTRL_PATH);
+ if (dp)
+ closedir(dp);
+ tests_run++;
+
+ printf("# resctrl filesystem %s mounted\n",
+ find_resctrl_mount(NULL) ? "not" : "is");
+
+ return ret;
+}
+
+char *fgrep(FILE *inf, const char *str)
+{
+ char line[256];
+ int slen = strlen(str);
+
+ while (!feof(inf)) {
+ if (!fgets(line, 256, inf))
+ break;
+ if (strncmp(line, str, slen))
+ continue;
+
+ return strdup(line);
+ }
+
+ return NULL;
+}
+
+/*
+ * validate_resctrl_feature_request - Check if requested feature is valid.
+ * @resctrl_val: Requested feature
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+bool validate_resctrl_feature_request(char *resctrl_val)
+{
+ FILE *inf = fopen("/proc/cpuinfo", "r");
+ bool found = false;
+ char *res;
+
+ if (!inf)
+ return false;
+
+ res = fgrep(inf, "flags");
+
+ if (res) {
+ char *s = strchr(res, ':');
+
+ found = s && !strstr(s, resctrl_val);
+ free(res);
+ }
+ fclose(inf);
+
+ return found;
+}
+
+int filter_dmesg(void)
+{
+ char line[1024];
+ FILE *fp;
+ int pipefds[2];
+ pid_t pid;
+ int ret;
+
+ ret = pipe(pipefds);
+ if (ret) {
+ perror("pipe");
+ return ret;
+ }
+ pid = fork();
+ if (pid == 0) {
+ close(pipefds[0]);
+ dup2(pipefds[1], STDOUT_FILENO);
+ execlp("dmesg", "dmesg", NULL);
+ perror("executing dmesg");
+ exit(1);
+ }
+ close(pipefds[1]);
+ fp = fdopen(pipefds[0], "r");
+ if (!fp) {
+ perror("fdopen(pipe)");
+ kill(pid, SIGTERM);
+
+ return -1;
+ }
+
+ while (fgets(line, 1024, fp)) {
+ if (strstr(line, "intel_rdt:"))
+ printf("# dmesg: %s", line);
+ if (strstr(line, "resctrl:"))
+ printf("# dmesg: %s", line);
+ }
+ fclose(fp);
+ waitpid(pid, NULL, 0);
+
+ return 0;
+}
+
+int validate_bw_report_request(char *bw_report)
+{
+ if (strcmp(bw_report, "reads") == 0)
+ return 0;
+ if (strcmp(bw_report, "writes") == 0)
+ return 0;
+ if (strcmp(bw_report, "nt-writes") == 0) {
+ strcpy(bw_report, "writes");
+ return 0;
+ }
+ if (strcmp(bw_report, "total") == 0)
+ return 0;
+
+ fprintf(stderr, "Requested iMC B/W report type unavailable\n");
+
+ return -1;
+}
+
+int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
+ int group_fd, unsigned long flags)
+{
+ int ret;
+
+ ret = syscall(__NR_perf_event_open, hw_event, pid, cpu,
+ group_fd, flags);
+ return ret;
+}
+
+unsigned int count_bits(unsigned long n)
+{
+ unsigned int count = 0;
+
+ while (n) {
+ count += n & 1;
+ n >>= 1;
+ }
+
+ return count;
+}
diff --git a/tools/testing/selftests/rseq/.gitignore b/tools/testing/selftests/rseq/.gitignore
index cc610da7e369..5910888ebfe1 100644
--- a/tools/testing/selftests/rseq/.gitignore
+++ b/tools/testing/selftests/rseq/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
basic_percpu_ops_test
basic_test
basic_rseq_op_test
diff --git a/tools/testing/selftests/rtc/.gitignore b/tools/testing/selftests/rtc/.gitignore
index d0ad44f6294a..fb2d533aa575 100644
--- a/tools/testing/selftests/rtc/.gitignore
+++ b/tools/testing/selftests/rtc/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
rtctest
setdate
diff --git a/tools/testing/selftests/safesetid/.gitignore b/tools/testing/selftests/safesetid/.gitignore
index 9c1a629bca01..25d3db172907 100644
--- a/tools/testing/selftests/safesetid/.gitignore
+++ b/tools/testing/selftests/safesetid/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
safesetid-test
diff --git a/tools/testing/selftests/seccomp/.gitignore b/tools/testing/selftests/seccomp/.gitignore
index 5af29d3a1b0a..dec678577f9c 100644
--- a/tools/testing/selftests/seccomp/.gitignore
+++ b/tools/testing/selftests/seccomp/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
seccomp_bpf
seccomp_benchmark
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index 1760b3e39730..0ebfe8b0e147 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -1,17 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-all:
-
-include ../lib.mk
-
-.PHONY: all clean
-
-BINARIES := seccomp_bpf seccomp_benchmark
CFLAGS += -Wl,-no-as-needed -Wall
+LDFLAGS += -lpthread
-seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
- $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
-
-TEST_PROGS += $(BINARIES)
-EXTRA_CLEAN := $(BINARIES)
-
-all: $(BINARIES)
+TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark
+include ../lib.mk
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index a9ad3bd8b2ad..89fb3e0b552e 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -913,7 +913,7 @@ TEST(ERRNO_order)
EXPECT_EQ(12, errno);
}
-FIXTURE_DATA(TRAP) {
+FIXTURE(TRAP) {
struct sock_fprog prog;
};
@@ -1024,7 +1024,7 @@ TEST_F(TRAP, handler)
EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
}
-FIXTURE_DATA(precedence) {
+FIXTURE(precedence) {
struct sock_fprog allow;
struct sock_fprog log;
struct sock_fprog trace;
@@ -1513,7 +1513,7 @@ void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
EXPECT_EQ(0, ret);
}
-FIXTURE_DATA(TRACE_poke) {
+FIXTURE(TRACE_poke) {
struct sock_fprog prog;
pid_t tracer;
long poked;
@@ -1821,7 +1821,7 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
change_syscall(_metadata, tracee, -1, -ESRCH);
}
-FIXTURE_DATA(TRACE_syscall) {
+FIXTURE(TRACE_syscall) {
struct sock_fprog prog;
pid_t tracer, mytid, mypid, parent;
};
@@ -2326,7 +2326,7 @@ struct tsync_sibling {
} \
} while (0)
-FIXTURE_DATA(TSYNC) {
+FIXTURE(TSYNC) {
struct sock_fprog root_prog, apply_prog;
struct tsync_sibling sibling[TSYNC_SIBLINGS];
sem_t started;
diff --git a/tools/testing/selftests/sigaltstack/.gitignore b/tools/testing/selftests/sigaltstack/.gitignore
index 35897b0a3f44..50a19a8888ce 100644
--- a/tools/testing/selftests/sigaltstack/.gitignore
+++ b/tools/testing/selftests/sigaltstack/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
sas
diff --git a/tools/testing/selftests/size/.gitignore b/tools/testing/selftests/size/.gitignore
index 189b7818de34..923e18eed1a0 100644
--- a/tools/testing/selftests/size/.gitignore
+++ b/tools/testing/selftests/size/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
get_size
diff --git a/tools/testing/selftests/sparc64/drivers/.gitignore b/tools/testing/selftests/sparc64/drivers/.gitignore
index 90e835ed74e6..0331f77373b5 100644
--- a/tools/testing/selftests/sparc64/drivers/.gitignore
+++ b/tools/testing/selftests/sparc64/drivers/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
adi-test
diff --git a/tools/testing/selftests/splice/.gitignore b/tools/testing/selftests/splice/.gitignore
index 1e23fefd68e8..d5a2da428752 100644
--- a/tools/testing/selftests/splice/.gitignore
+++ b/tools/testing/selftests/splice/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
default_file_splice_read
diff --git a/tools/testing/selftests/sync/.gitignore b/tools/testing/selftests/sync/.gitignore
index f5091e7792f2..f1152357712f 100644
--- a/tools/testing/selftests/sync/.gitignore
+++ b/tools/testing/selftests/sync/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
sync_test
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
index c26d72e0166f..d52f65de23b4 100644
--- a/tools/testing/selftests/tc-testing/.gitignore
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
__pycache__/
*.pyc
plugins/
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index e566c70e64a1..a3e43189d940 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -713,9 +713,8 @@ def set_operation_mode(pm, parser, args, remaining):
exit(0)
if args.list:
- if args.list:
- list_test_cases(alltests)
- exit(0)
+ list_test_cases(alltests)
+ exit(0)
if len(alltests):
req_plugins = pm.get_required_plugins(alltests)
diff --git a/tools/testing/selftests/timens/.gitignore b/tools/testing/selftests/timens/.gitignore
index 789f21e81028..2e43851b47c1 100644
--- a/tools/testing/selftests/timens/.gitignore
+++ b/tools/testing/selftests/timens/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
clock_nanosleep
exec
gettime_perf
diff --git a/tools/testing/selftests/timens/exec.c b/tools/testing/selftests/timens/exec.c
index 87b47b557a7a..e40dc5be2f66 100644
--- a/tools/testing/selftests/timens/exec.c
+++ b/tools/testing/selftests/timens/exec.c
@@ -11,7 +11,6 @@
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
-#include <time.h>
#include <string.h>
#include "log.h"
diff --git a/tools/testing/selftests/timens/procfs.c b/tools/testing/selftests/timens/procfs.c
index 43d93f4006b9..7f14f0fdac84 100644
--- a/tools/testing/selftests/timens/procfs.c
+++ b/tools/testing/selftests/timens/procfs.c
@@ -12,7 +12,6 @@
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
-#include <time.h>
#include "log.h"
#include "timens.h"
diff --git a/tools/testing/selftests/timens/timens.c b/tools/testing/selftests/timens/timens.c
index 559d26e21ba0..098be7c83be3 100644
--- a/tools/testing/selftests/timens/timens.c
+++ b/tools/testing/selftests/timens/timens.c
@@ -10,7 +10,6 @@
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
-#include <time.h>
#include <string.h>
#include "log.h"
diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c
index 0cca7aafc4bd..96dba11ebe44 100644
--- a/tools/testing/selftests/timens/timer.c
+++ b/tools/testing/selftests/timens/timer.c
@@ -11,7 +11,6 @@
#include <stdio.h>
#include <stdint.h>
#include <signal.h>
-#include <time.h>
#include "log.h"
#include "timens.h"
diff --git a/tools/testing/selftests/timers/.gitignore b/tools/testing/selftests/timers/.gitignore
index 32a9eadb2d4e..bb5326ff900b 100644
--- a/tools/testing/selftests/timers/.gitignore
+++ b/tools/testing/selftests/timers/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
alarmtimer-suspend
change_skew
clocksource-switch
diff --git a/tools/testing/selftests/tmpfs/.gitignore b/tools/testing/selftests/tmpfs/.gitignore
index a96838fad74d..b1afaa925905 100644
--- a/tools/testing/selftests/tmpfs/.gitignore
+++ b/tools/testing/selftests/tmpfs/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
/bug-link-o-tmpfile
diff --git a/tools/testing/selftests/vDSO/.gitignore b/tools/testing/selftests/vDSO/.gitignore
index 133bf9ee986c..382cfb39a1a3 100644
--- a/tools/testing/selftests/vDSO/.gitignore
+++ b/tools/testing/selftests/vDSO/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
vdso_test
vdso_standalone_test_x86
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index 31b3c98b6d34..0edb6d900e8d 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
hugepage-mmap
hugepage-shm
map_hugetlb
@@ -14,3 +15,4 @@ virtual_address_range
gup_benchmark
va_128TBswitch
map_fixed_noreplace
+write_to_hugetlbfs
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 7f9a8a8c31da..d31db052dff6 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -14,6 +14,7 @@ TEST_GEN_FILES += map_fixed_noreplace
TEST_GEN_FILES += map_populate
TEST_GEN_FILES += mlock-random-test
TEST_GEN_FILES += mlock2-tests
+TEST_GEN_FILES += mremap_dontunmap
TEST_GEN_FILES += on-fault-limit
TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
@@ -22,6 +23,7 @@ TEST_GEN_FILES += userfaultfd
ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sh64 sparc64 x86_64))
TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range
+TEST_GEN_FILES += write_to_hugetlbfs
endif
TEST_PROGS := run_vmtests
diff --git a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh
new file mode 100644
index 000000000000..18d33684faad
--- /dev/null
+++ b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh
@@ -0,0 +1,575 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+if [[ $(id -u) -ne 0 ]]; then
+ echo "This test must be run as root. Skipping..."
+ exit 0
+fi
+
+fault_limit_file=limit_in_bytes
+reservation_limit_file=rsvd.limit_in_bytes
+fault_usage_file=usage_in_bytes
+reservation_usage_file=rsvd.usage_in_bytes
+
+if [[ "$1" == "-cgroup-v2" ]]; then
+ cgroup2=1
+ fault_limit_file=max
+ reservation_limit_file=rsvd.max
+ fault_usage_file=current
+ reservation_usage_file=rsvd.current
+fi
+
+cgroup_path=/dev/cgroup/memory
+if [[ ! -e $cgroup_path ]]; then
+ mkdir -p $cgroup_path
+ if [[ $cgroup2 ]]; then
+ mount -t cgroup2 none $cgroup_path
+ else
+ mount -t cgroup memory,hugetlb $cgroup_path
+ fi
+fi
+
+if [[ $cgroup2 ]]; then
+ echo "+hugetlb" >/dev/cgroup/memory/cgroup.subtree_control
+fi
+
+function cleanup() {
+ if [[ $cgroup2 ]]; then
+ echo $$ >$cgroup_path/cgroup.procs
+ else
+ echo $$ >$cgroup_path/tasks
+ fi
+
+ if [[ -e /mnt/huge ]]; then
+ rm -rf /mnt/huge/*
+ umount /mnt/huge || echo error
+ rmdir /mnt/huge
+ fi
+ if [[ -e $cgroup_path/hugetlb_cgroup_test ]]; then
+ rmdir $cgroup_path/hugetlb_cgroup_test
+ fi
+ if [[ -e $cgroup_path/hugetlb_cgroup_test1 ]]; then
+ rmdir $cgroup_path/hugetlb_cgroup_test1
+ fi
+ if [[ -e $cgroup_path/hugetlb_cgroup_test2 ]]; then
+ rmdir $cgroup_path/hugetlb_cgroup_test2
+ fi
+ echo 0 >/proc/sys/vm/nr_hugepages
+ echo CLEANUP DONE
+}
+
+function expect_equal() {
+ local expected="$1"
+ local actual="$2"
+ local error="$3"
+
+ if [[ "$expected" != "$actual" ]]; then
+ echo "expected ($expected) != actual ($actual): $3"
+ cleanup
+ exit 1
+ fi
+}
+
+function get_machine_hugepage_size() {
+ hpz=$(grep -i hugepagesize /proc/meminfo)
+ kb=${hpz:14:-3}
+ mb=$(($kb / 1024))
+ echo $mb
+}
+
+MB=$(get_machine_hugepage_size)
+
+function setup_cgroup() {
+ local name="$1"
+ local cgroup_limit="$2"
+ local reservation_limit="$3"
+
+ mkdir $cgroup_path/$name
+
+ echo writing cgroup limit: "$cgroup_limit"
+ echo "$cgroup_limit" >$cgroup_path/$name/hugetlb.${MB}MB.$fault_limit_file
+
+ echo writing reseravation limit: "$reservation_limit"
+ echo "$reservation_limit" > \
+ $cgroup_path/$name/hugetlb.${MB}MB.$reservation_limit_file
+
+ if [ -e "$cgroup_path/$name/cpuset.cpus" ]; then
+ echo 0 >$cgroup_path/$name/cpuset.cpus
+ fi
+ if [ -e "$cgroup_path/$name/cpuset.mems" ]; then
+ echo 0 >$cgroup_path/$name/cpuset.mems
+ fi
+}
+
+function wait_for_hugetlb_memory_to_get_depleted() {
+ local cgroup="$1"
+ local path="/dev/cgroup/memory/$cgroup/hugetlb.${MB}MB.$reservation_usage_file"
+ # Wait for hugetlbfs memory to get depleted.
+ while [ $(cat $path) != 0 ]; do
+ echo Waiting for hugetlb memory to get depleted.
+ cat $path
+ sleep 0.5
+ done
+}
+
+function wait_for_hugetlb_memory_to_get_reserved() {
+ local cgroup="$1"
+ local size="$2"
+
+ local path="/dev/cgroup/memory/$cgroup/hugetlb.${MB}MB.$reservation_usage_file"
+ # Wait for hugetlbfs memory to get written.
+ while [ $(cat $path) != $size ]; do
+ echo Waiting for hugetlb memory reservation to reach size $size.
+ cat $path
+ sleep 0.5
+ done
+}
+
+function wait_for_hugetlb_memory_to_get_written() {
+ local cgroup="$1"
+ local size="$2"
+
+ local path="/dev/cgroup/memory/$cgroup/hugetlb.${MB}MB.$fault_usage_file"
+ # Wait for hugetlbfs memory to get written.
+ while [ $(cat $path) != $size ]; do
+ echo Waiting for hugetlb memory to reach size $size.
+ cat $path
+ sleep 0.5
+ done
+}
+
+function write_hugetlbfs_and_get_usage() {
+ local cgroup="$1"
+ local size="$2"
+ local populate="$3"
+ local write="$4"
+ local path="$5"
+ local method="$6"
+ local private="$7"
+ local expect_failure="$8"
+ local reserve="$9"
+
+ # Function return values.
+ reservation_failed=0
+ oom_killed=0
+ hugetlb_difference=0
+ reserved_difference=0
+
+ local hugetlb_usage=$cgroup_path/$cgroup/hugetlb.${MB}MB.$fault_usage_file
+ local reserved_usage=$cgroup_path/$cgroup/hugetlb.${MB}MB.$reservation_usage_file
+
+ local hugetlb_before=$(cat $hugetlb_usage)
+ local reserved_before=$(cat $reserved_usage)
+
+ echo
+ echo Starting:
+ echo hugetlb_usage="$hugetlb_before"
+ echo reserved_usage="$reserved_before"
+ echo expect_failure is "$expect_failure"
+
+ output=$(mktemp)
+ set +e
+ if [[ "$method" == "1" ]] || [[ "$method" == 2 ]] ||
+ [[ "$private" == "-r" ]] && [[ "$expect_failure" != 1 ]]; then
+
+ bash write_hugetlb_memory.sh "$size" "$populate" "$write" \
+ "$cgroup" "$path" "$method" "$private" "-l" "$reserve" 2>&1 | tee $output &
+
+ local write_result=$?
+ local write_pid=$!
+
+ until grep -q -i "DONE" $output; do
+ echo waiting for DONE signal.
+ if ! ps $write_pid > /dev/null
+ then
+ echo "FAIL: The write died"
+ cleanup
+ exit 1
+ fi
+ sleep 0.5
+ done
+
+ echo ================= write_hugetlb_memory.sh output is:
+ cat $output
+ echo ================= end output.
+
+ if [[ "$populate" == "-o" ]] || [[ "$write" == "-w" ]]; then
+ wait_for_hugetlb_memory_to_get_written "$cgroup" "$size"
+ elif [[ "$reserve" != "-n" ]]; then
+ wait_for_hugetlb_memory_to_get_reserved "$cgroup" "$size"
+ else
+ # This case doesn't produce visible effects, but we still have
+ # to wait for the async process to start and execute...
+ sleep 0.5
+ fi
+
+ echo write_result is $write_result
+ else
+ bash write_hugetlb_memory.sh "$size" "$populate" "$write" \
+ "$cgroup" "$path" "$method" "$private" "$reserve"
+ local write_result=$?
+
+ if [[ "$reserve" != "-n" ]]; then
+ wait_for_hugetlb_memory_to_get_reserved "$cgroup" "$size"
+ fi
+ fi
+ set -e
+
+ if [[ "$write_result" == 1 ]]; then
+ reservation_failed=1
+ fi
+
+ # On linus/master, the above process gets SIGBUS'd on oomkill, with
+ # return code 135. On earlier kernels, it gets actual oomkill, with return
+ # code 137, so just check for both conditions in case we're testing
+ # against an earlier kernel.
+ if [[ "$write_result" == 135 ]] || [[ "$write_result" == 137 ]]; then
+ oom_killed=1
+ fi
+
+ local hugetlb_after=$(cat $hugetlb_usage)
+ local reserved_after=$(cat $reserved_usage)
+
+ echo After write:
+ echo hugetlb_usage="$hugetlb_after"
+ echo reserved_usage="$reserved_after"
+
+ hugetlb_difference=$(($hugetlb_after - $hugetlb_before))
+ reserved_difference=$(($reserved_after - $reserved_before))
+}
+
+function cleanup_hugetlb_memory() {
+ set +e
+ local cgroup="$1"
+ if [[ "$(pgrep -f write_to_hugetlbfs)" != "" ]]; then
+ echo killing write_to_hugetlbfs
+ killall -2 write_to_hugetlbfs
+ wait_for_hugetlb_memory_to_get_depleted $cgroup
+ fi
+ set -e
+
+ if [[ -e /mnt/huge ]]; then
+ rm -rf /mnt/huge/*
+ umount /mnt/huge
+ rmdir /mnt/huge
+ fi
+}
+
+function run_test() {
+ local size=$(($1 * ${MB} * 1024 * 1024))
+ local populate="$2"
+ local write="$3"
+ local cgroup_limit=$(($4 * ${MB} * 1024 * 1024))
+ local reservation_limit=$(($5 * ${MB} * 1024 * 1024))
+ local nr_hugepages="$6"
+ local method="$7"
+ local private="$8"
+ local expect_failure="$9"
+ local reserve="${10}"
+
+ # Function return values.
+ hugetlb_difference=0
+ reserved_difference=0
+ reservation_failed=0
+ oom_killed=0
+
+ echo nr hugepages = "$nr_hugepages"
+ echo "$nr_hugepages" >/proc/sys/vm/nr_hugepages
+
+ setup_cgroup "hugetlb_cgroup_test" "$cgroup_limit" "$reservation_limit"
+
+ mkdir -p /mnt/huge
+ mount -t hugetlbfs -o pagesize=${MB}M,size=256M none /mnt/huge
+
+ write_hugetlbfs_and_get_usage "hugetlb_cgroup_test" "$size" "$populate" \
+ "$write" "/mnt/huge/test" "$method" "$private" "$expect_failure" \
+ "$reserve"
+
+ cleanup_hugetlb_memory "hugetlb_cgroup_test"
+
+ local final_hugetlb=$(cat $cgroup_path/hugetlb_cgroup_test/hugetlb.${MB}MB.$fault_usage_file)
+ local final_reservation=$(cat $cgroup_path/hugetlb_cgroup_test/hugetlb.${MB}MB.$reservation_usage_file)
+
+ echo $hugetlb_difference
+ echo $reserved_difference
+ expect_equal "0" "$final_hugetlb" "final hugetlb is not zero"
+ expect_equal "0" "$final_reservation" "final reservation is not zero"
+}
+
+function run_multiple_cgroup_test() {
+ local size1="$1"
+ local populate1="$2"
+ local write1="$3"
+ local cgroup_limit1="$4"
+ local reservation_limit1="$5"
+
+ local size2="$6"
+ local populate2="$7"
+ local write2="$8"
+ local cgroup_limit2="$9"
+ local reservation_limit2="${10}"
+
+ local nr_hugepages="${11}"
+ local method="${12}"
+ local private="${13}"
+ local expect_failure="${14}"
+ local reserve="${15}"
+
+ # Function return values.
+ hugetlb_difference1=0
+ reserved_difference1=0
+ reservation_failed1=0
+ oom_killed1=0
+
+ hugetlb_difference2=0
+ reserved_difference2=0
+ reservation_failed2=0
+ oom_killed2=0
+
+ echo nr hugepages = "$nr_hugepages"
+ echo "$nr_hugepages" >/proc/sys/vm/nr_hugepages
+
+ setup_cgroup "hugetlb_cgroup_test1" "$cgroup_limit1" "$reservation_limit1"
+ setup_cgroup "hugetlb_cgroup_test2" "$cgroup_limit2" "$reservation_limit2"
+
+ mkdir -p /mnt/huge
+ mount -t hugetlbfs -o pagesize=${MB}M,size=256M none /mnt/huge
+
+ write_hugetlbfs_and_get_usage "hugetlb_cgroup_test1" "$size1" \
+ "$populate1" "$write1" "/mnt/huge/test1" "$method" "$private" \
+ "$expect_failure" "$reserve"
+
+ hugetlb_difference1=$hugetlb_difference
+ reserved_difference1=$reserved_difference
+ reservation_failed1=$reservation_failed
+ oom_killed1=$oom_killed
+
+ local cgroup1_hugetlb_usage=$cgroup_path/hugetlb_cgroup_test1/hugetlb.${MB}MB.$fault_usage_file
+ local cgroup1_reservation_usage=$cgroup_path/hugetlb_cgroup_test1/hugetlb.${MB}MB.$reservation_usage_file
+ local cgroup2_hugetlb_usage=$cgroup_path/hugetlb_cgroup_test2/hugetlb.${MB}MB.$fault_usage_file
+ local cgroup2_reservation_usage=$cgroup_path/hugetlb_cgroup_test2/hugetlb.${MB}MB.$reservation_usage_file
+
+ local usage_before_second_write=$(cat $cgroup1_hugetlb_usage)
+ local reservation_usage_before_second_write=$(cat $cgroup1_reservation_usage)
+
+ write_hugetlbfs_and_get_usage "hugetlb_cgroup_test2" "$size2" \
+ "$populate2" "$write2" "/mnt/huge/test2" "$method" "$private" \
+ "$expect_failure" "$reserve"
+
+ hugetlb_difference2=$hugetlb_difference
+ reserved_difference2=$reserved_difference
+ reservation_failed2=$reservation_failed
+ oom_killed2=$oom_killed
+
+ expect_equal "$usage_before_second_write" \
+ "$(cat $cgroup1_hugetlb_usage)" "Usage changed."
+ expect_equal "$reservation_usage_before_second_write" \
+ "$(cat $cgroup1_reservation_usage)" "Reservation usage changed."
+
+ cleanup_hugetlb_memory
+
+ local final_hugetlb=$(cat $cgroup1_hugetlb_usage)
+ local final_reservation=$(cat $cgroup1_reservation_usage)
+
+ expect_equal "0" "$final_hugetlb" \
+ "hugetlbt_cgroup_test1 final hugetlb is not zero"
+ expect_equal "0" "$final_reservation" \
+ "hugetlbt_cgroup_test1 final reservation is not zero"
+
+ local final_hugetlb=$(cat $cgroup2_hugetlb_usage)
+ local final_reservation=$(cat $cgroup2_reservation_usage)
+
+ expect_equal "0" "$final_hugetlb" \
+ "hugetlb_cgroup_test2 final hugetlb is not zero"
+ expect_equal "0" "$final_reservation" \
+ "hugetlb_cgroup_test2 final reservation is not zero"
+}
+
+cleanup
+
+for populate in "" "-o"; do
+ for method in 0 1 2; do
+ for private in "" "-r"; do
+ for reserve in "" "-n"; do
+
+ # Skip mmap(MAP_HUGETLB | MAP_SHARED). Doesn't seem to be supported.
+ if [[ "$method" == 1 ]] && [[ "$private" == "" ]]; then
+ continue
+ fi
+
+ # Skip populated shmem tests. Doesn't seem to be supported.
+ if [[ "$method" == 2"" ]] && [[ "$populate" == "-o" ]]; then
+ continue
+ fi
+
+ if [[ "$method" == 2"" ]] && [[ "$reserve" == "-n" ]]; then
+ continue
+ fi
+
+ cleanup
+ echo
+ echo
+ echo
+ echo Test normal case.
+ echo private=$private, populate=$populate, method=$method, reserve=$reserve
+ run_test 5 "$populate" "" 10 10 10 "$method" "$private" "0" "$reserve"
+
+ echo Memory charged to hugtlb=$hugetlb_difference
+ echo Memory charged to reservation=$reserved_difference
+
+ if [[ "$populate" == "-o" ]]; then
+ expect_equal "$((5 * $MB * 1024 * 1024))" "$hugetlb_difference" \
+ "Reserved memory charged to hugetlb cgroup."
+ else
+ expect_equal "0" "$hugetlb_difference" \
+ "Reserved memory charged to hugetlb cgroup."
+ fi
+
+ if [[ "$reserve" != "-n" ]] || [[ "$populate" == "-o" ]]; then
+ expect_equal "$((5 * $MB * 1024 * 1024))" "$reserved_difference" \
+ "Reserved memory not charged to reservation usage."
+ else
+ expect_equal "0" "$reserved_difference" \
+ "Reserved memory not charged to reservation usage."
+ fi
+
+ echo 'PASS'
+
+ cleanup
+ echo
+ echo
+ echo
+ echo Test normal case with write.
+ echo private=$private, populate=$populate, method=$method, reserve=$reserve
+ run_test 5 "$populate" '-w' 5 5 10 "$method" "$private" "0" "$reserve"
+
+ echo Memory charged to hugtlb=$hugetlb_difference
+ echo Memory charged to reservation=$reserved_difference
+
+ expect_equal "$((5 * $MB * 1024 * 1024))" "$hugetlb_difference" \
+ "Reserved memory charged to hugetlb cgroup."
+
+ expect_equal "$((5 * $MB * 1024 * 1024))" "$reserved_difference" \
+ "Reserved memory not charged to reservation usage."
+
+ echo 'PASS'
+
+ cleanup
+ continue
+ echo
+ echo
+ echo
+ echo Test more than reservation case.
+ echo private=$private, populate=$populate, method=$method, reserve=$reserve
+
+ if [ "$reserve" != "-n" ]; then
+ run_test "5" "$populate" '' "10" "2" "10" "$method" "$private" "1" \
+ "$reserve"
+
+ expect_equal "1" "$reservation_failed" "Reservation succeeded."
+ fi
+
+ echo 'PASS'
+
+ cleanup
+
+ echo
+ echo
+ echo
+ echo Test more than cgroup limit case.
+ echo private=$private, populate=$populate, method=$method, reserve=$reserve
+
+ # Not sure if shm memory can be cleaned up when the process gets sigbus'd.
+ if [[ "$method" != 2 ]]; then
+ run_test 5 "$populate" "-w" 2 10 10 "$method" "$private" "1" "$reserve"
+
+ expect_equal "1" "$oom_killed" "Not oom killed."
+ fi
+ echo 'PASS'
+
+ cleanup
+
+ echo
+ echo
+ echo
+ echo Test normal case, multiple cgroups.
+ echo private=$private, populate=$populate, method=$method, reserve=$reserve
+ run_multiple_cgroup_test "3" "$populate" "" "10" "10" "5" \
+ "$populate" "" "10" "10" "10" \
+ "$method" "$private" "0" "$reserve"
+
+ echo Memory charged to hugtlb1=$hugetlb_difference1
+ echo Memory charged to reservation1=$reserved_difference1
+ echo Memory charged to hugtlb2=$hugetlb_difference2
+ echo Memory charged to reservation2=$reserved_difference2
+
+ if [[ "$reserve" != "-n" ]] || [[ "$populate" == "-o" ]]; then
+ expect_equal "3" "$reserved_difference1" \
+ "Incorrect reservations charged to cgroup 1."
+
+ expect_equal "5" "$reserved_difference2" \
+ "Incorrect reservation charged to cgroup 2."
+
+ else
+ expect_equal "0" "$reserved_difference1" \
+ "Incorrect reservations charged to cgroup 1."
+
+ expect_equal "0" "$reserved_difference2" \
+ "Incorrect reservation charged to cgroup 2."
+ fi
+
+ if [[ "$populate" == "-o" ]]; then
+ expect_equal "3" "$hugetlb_difference1" \
+ "Incorrect hugetlb charged to cgroup 1."
+
+ expect_equal "5" "$hugetlb_difference2" \
+ "Incorrect hugetlb charged to cgroup 2."
+
+ else
+ expect_equal "0" "$hugetlb_difference1" \
+ "Incorrect hugetlb charged to cgroup 1."
+
+ expect_equal "0" "$hugetlb_difference2" \
+ "Incorrect hugetlb charged to cgroup 2."
+ fi
+ echo 'PASS'
+
+ cleanup
+ echo
+ echo
+ echo
+ echo Test normal case with write, multiple cgroups.
+ echo private=$private, populate=$populate, method=$method, reserve=$reserve
+ run_multiple_cgroup_test "3" "$populate" "-w" "10" "10" "5" \
+ "$populate" "-w" "10" "10" "10" \
+ "$method" "$private" "0" "$reserve"
+
+ echo Memory charged to hugtlb1=$hugetlb_difference1
+ echo Memory charged to reservation1=$reserved_difference1
+ echo Memory charged to hugtlb2=$hugetlb_difference2
+ echo Memory charged to reservation2=$reserved_difference2
+
+ expect_equal "3" "$hugetlb_difference1" \
+ "Incorrect hugetlb charged to cgroup 1."
+
+ expect_equal "3" "$reserved_difference1" \
+ "Incorrect reservation charged to cgroup 1."
+
+ expect_equal "5" "$hugetlb_difference2" \
+ "Incorrect hugetlb charged to cgroup 2."
+
+ expect_equal "5" "$reserved_difference2" \
+ "Incorrected reservation charged to cgroup 2."
+ echo 'PASS'
+
+ cleanup
+
+ done # reserve
+ done # private
+ done # populate
+done # method
+
+umount $cgroup_path
+rmdir $cgroup_path
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index 389327e9b30a..43b4dfe161a2 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -18,6 +18,10 @@
#define GUP_LONGTERM_BENCHMARK _IOWR('g', 2, struct gup_benchmark)
#define GUP_BENCHMARK _IOWR('g', 3, struct gup_benchmark)
+/* Similar to above, but use FOLL_PIN instead of FOLL_GET. */
+#define PIN_FAST_BENCHMARK _IOWR('g', 4, struct gup_benchmark)
+#define PIN_BENCHMARK _IOWR('g', 5, struct gup_benchmark)
+
/* Just the flags we need, copied from mm.h: */
#define FOLL_WRITE 0x01 /* check pte is writable */
@@ -40,8 +44,14 @@ int main(int argc, char **argv)
char *file = "/dev/zero";
char *p;
- while ((opt = getopt(argc, argv, "m:r:n:f:tTLUwSH")) != -1) {
+ while ((opt = getopt(argc, argv, "m:r:n:f:abtTLUuwSH")) != -1) {
switch (opt) {
+ case 'a':
+ cmd = PIN_FAST_BENCHMARK;
+ break;
+ case 'b':
+ cmd = PIN_BENCHMARK;
+ break;
case 'm':
size = atoi(optarg) * MB;
break;
@@ -63,6 +73,9 @@ int main(int argc, char **argv)
case 'U':
cmd = GUP_BENCHMARK;
break;
+ case 'u':
+ cmd = GUP_FAST_BENCHMARK;
+ break;
case 'w':
write = 1;
break;
diff --git a/tools/testing/selftests/vm/hugetlb_reparenting_test.sh b/tools/testing/selftests/vm/hugetlb_reparenting_test.sh
new file mode 100644
index 000000000000..d11d1febccc3
--- /dev/null
+++ b/tools/testing/selftests/vm/hugetlb_reparenting_test.sh
@@ -0,0 +1,244 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+if [[ $(id -u) -ne 0 ]]; then
+ echo "This test must be run as root. Skipping..."
+ exit 0
+fi
+
+usage_file=usage_in_bytes
+
+if [[ "$1" == "-cgroup-v2" ]]; then
+ cgroup2=1
+ usage_file=current
+fi
+
+CGROUP_ROOT='/dev/cgroup/memory'
+MNT='/mnt/huge/'
+
+if [[ ! -e $CGROUP_ROOT ]]; then
+ mkdir -p $CGROUP_ROOT
+ if [[ $cgroup2 ]]; then
+ mount -t cgroup2 none $CGROUP_ROOT
+ sleep 1
+ echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control
+ else
+ mount -t cgroup memory,hugetlb $CGROUP_ROOT
+ fi
+fi
+
+function get_machine_hugepage_size() {
+ hpz=$(grep -i hugepagesize /proc/meminfo)
+ kb=${hpz:14:-3}
+ mb=$(($kb / 1024))
+ echo $mb
+}
+
+MB=$(get_machine_hugepage_size)
+
+function cleanup() {
+ echo cleanup
+ set +e
+ rm -rf "$MNT"/* 2>/dev/null
+ umount "$MNT" 2>/dev/null
+ rmdir "$MNT" 2>/dev/null
+ rmdir "$CGROUP_ROOT"/a/b 2>/dev/null
+ rmdir "$CGROUP_ROOT"/a 2>/dev/null
+ rmdir "$CGROUP_ROOT"/test1 2>/dev/null
+ echo 0 >/proc/sys/vm/nr_hugepages
+ set -e
+}
+
+function assert_state() {
+ local expected_a="$1"
+ local expected_a_hugetlb="$2"
+ local expected_b=""
+ local expected_b_hugetlb=""
+
+ if [ ! -z ${3:-} ] && [ ! -z ${4:-} ]; then
+ expected_b="$3"
+ expected_b_hugetlb="$4"
+ fi
+ local tolerance=$((5 * 1024 * 1024))
+
+ local actual_a
+ actual_a="$(cat "$CGROUP_ROOT"/a/memory.$usage_file)"
+ if [[ $actual_a -lt $(($expected_a - $tolerance)) ]] ||
+ [[ $actual_a -gt $(($expected_a + $tolerance)) ]]; then
+ echo actual a = $((${actual_a%% *} / 1024 / 1024)) MB
+ echo expected a = $((${expected_a%% *} / 1024 / 1024)) MB
+ echo fail
+
+ cleanup
+ exit 1
+ fi
+
+ local actual_a_hugetlb
+ actual_a_hugetlb="$(cat "$CGROUP_ROOT"/a/hugetlb.${MB}MB.$usage_file)"
+ if [[ $actual_a_hugetlb -lt $(($expected_a_hugetlb - $tolerance)) ]] ||
+ [[ $actual_a_hugetlb -gt $(($expected_a_hugetlb + $tolerance)) ]]; then
+ echo actual a hugetlb = $((${actual_a_hugetlb%% *} / 1024 / 1024)) MB
+ echo expected a hugetlb = $((${expected_a_hugetlb%% *} / 1024 / 1024)) MB
+ echo fail
+
+ cleanup
+ exit 1
+ fi
+
+ if [[ -z "$expected_b" || -z "$expected_b_hugetlb" ]]; then
+ return
+ fi
+
+ local actual_b
+ actual_b="$(cat "$CGROUP_ROOT"/a/b/memory.$usage_file)"
+ if [[ $actual_b -lt $(($expected_b - $tolerance)) ]] ||
+ [[ $actual_b -gt $(($expected_b + $tolerance)) ]]; then
+ echo actual b = $((${actual_b%% *} / 1024 / 1024)) MB
+ echo expected b = $((${expected_b%% *} / 1024 / 1024)) MB
+ echo fail
+
+ cleanup
+ exit 1
+ fi
+
+ local actual_b_hugetlb
+ actual_b_hugetlb="$(cat "$CGROUP_ROOT"/a/b/hugetlb.${MB}MB.$usage_file)"
+ if [[ $actual_b_hugetlb -lt $(($expected_b_hugetlb - $tolerance)) ]] ||
+ [[ $actual_b_hugetlb -gt $(($expected_b_hugetlb + $tolerance)) ]]; then
+ echo actual b hugetlb = $((${actual_b_hugetlb%% *} / 1024 / 1024)) MB
+ echo expected b hugetlb = $((${expected_b_hugetlb%% *} / 1024 / 1024)) MB
+ echo fail
+
+ cleanup
+ exit 1
+ fi
+}
+
+function setup() {
+ echo 100 >/proc/sys/vm/nr_hugepages
+ mkdir "$CGROUP_ROOT"/a
+ sleep 1
+ if [[ $cgroup2 ]]; then
+ echo "+hugetlb +memory" >$CGROUP_ROOT/a/cgroup.subtree_control
+ else
+ echo 0 >$CGROUP_ROOT/a/cpuset.mems
+ echo 0 >$CGROUP_ROOT/a/cpuset.cpus
+ fi
+
+ mkdir "$CGROUP_ROOT"/a/b
+
+ if [[ ! $cgroup2 ]]; then
+ echo 0 >$CGROUP_ROOT/a/b/cpuset.mems
+ echo 0 >$CGROUP_ROOT/a/b/cpuset.cpus
+ fi
+
+ mkdir -p "$MNT"
+ mount -t hugetlbfs none "$MNT"
+}
+
+write_hugetlbfs() {
+ local cgroup="$1"
+ local path="$2"
+ local size="$3"
+
+ if [[ $cgroup2 ]]; then
+ echo $$ >$CGROUP_ROOT/$cgroup/cgroup.procs
+ else
+ echo 0 >$CGROUP_ROOT/$cgroup/cpuset.mems
+ echo 0 >$CGROUP_ROOT/$cgroup/cpuset.cpus
+ echo $$ >"$CGROUP_ROOT/$cgroup/tasks"
+ fi
+ ./write_to_hugetlbfs -p "$path" -s "$size" -m 0 -o
+ if [[ $cgroup2 ]]; then
+ echo $$ >$CGROUP_ROOT/cgroup.procs
+ else
+ echo $$ >"$CGROUP_ROOT/tasks"
+ fi
+ echo
+}
+
+set -e
+
+size=$((${MB} * 1024 * 1024 * 25)) # 50MB = 25 * 2MB hugepages.
+
+cleanup
+
+echo
+echo
+echo Test charge, rmdir, uncharge
+setup
+echo mkdir
+mkdir $CGROUP_ROOT/test1
+
+echo write
+write_hugetlbfs test1 "$MNT"/test $size
+
+echo rmdir
+rmdir $CGROUP_ROOT/test1
+mkdir $CGROUP_ROOT/test1
+
+echo uncharge
+rm -rf /mnt/huge/*
+
+cleanup
+
+echo done
+echo
+echo
+if [[ ! $cgroup2 ]]; then
+ echo "Test parent and child hugetlb usage"
+ setup
+
+ echo write
+ write_hugetlbfs a "$MNT"/test $size
+
+ echo Assert memory charged correctly for parent use.
+ assert_state 0 $size 0 0
+
+ write_hugetlbfs a/b "$MNT"/test2 $size
+
+ echo Assert memory charged correctly for child use.
+ assert_state 0 $(($size * 2)) 0 $size
+
+ rmdir "$CGROUP_ROOT"/a/b
+ sleep 5
+ echo Assert memory reparent correctly.
+ assert_state 0 $(($size * 2))
+
+ rm -rf "$MNT"/*
+ umount "$MNT"
+ echo Assert memory uncharged correctly.
+ assert_state 0 0
+
+ cleanup
+fi
+
+echo
+echo
+echo "Test child only hugetlb usage"
+echo setup
+setup
+
+echo write
+write_hugetlbfs a/b "$MNT"/test2 $size
+
+echo Assert memory charged correctly for child only use.
+assert_state 0 $(($size)) 0 $size
+
+rmdir "$CGROUP_ROOT"/a/b
+echo Assert memory reparent correctly.
+assert_state 0 $size
+
+rm -rf "$MNT"/*
+umount "$MNT"
+echo Assert memory uncharged correctly.
+assert_state 0 0
+
+cleanup
+
+echo ALL PASS
+
+umount $CGROUP_ROOT
+rm -rf $CGROUP_ROOT
diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
index 5a2d7b8efc40..6af951900aa3 100644
--- a/tools/testing/selftests/vm/map_hugetlb.c
+++ b/tools/testing/selftests/vm/map_hugetlb.c
@@ -45,20 +45,20 @@ static void check_bytes(char *addr)
printf("First hex is %x\n", *((unsigned int *)addr));
}
-static void write_bytes(char *addr)
+static void write_bytes(char *addr, size_t length)
{
unsigned long i;
- for (i = 0; i < LENGTH; i++)
+ for (i = 0; i < length; i++)
*(addr + i) = (char)i;
}
-static int read_bytes(char *addr)
+static int read_bytes(char *addr, size_t length)
{
unsigned long i;
check_bytes(addr);
- for (i = 0; i < LENGTH; i++)
+ for (i = 0; i < length; i++)
if (*(addr + i) != (char)i) {
printf("Mismatch at %lu\n", i);
return 1;
@@ -96,11 +96,11 @@ int main(int argc, char **argv)
printf("Returned address is %p\n", addr);
check_bytes(addr);
- write_bytes(addr);
- ret = read_bytes(addr);
+ write_bytes(addr, length);
+ ret = read_bytes(addr, length);
/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
- if (munmap(addr, LENGTH)) {
+ if (munmap(addr, length)) {
perror("munmap");
exit(1);
}
diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c
index 637b6d0ac0d0..11b2301f3aa3 100644
--- a/tools/testing/selftests/vm/mlock2-tests.c
+++ b/tools/testing/selftests/vm/mlock2-tests.c
@@ -67,59 +67,6 @@ out:
return ret;
}
-static uint64_t get_pageflags(unsigned long addr)
-{
- FILE *file;
- uint64_t pfn;
- unsigned long offset;
-
- file = fopen("/proc/self/pagemap", "r");
- if (!file) {
- perror("fopen pagemap");
- _exit(1);
- }
-
- offset = addr / getpagesize() * sizeof(pfn);
-
- if (fseek(file, offset, SEEK_SET)) {
- perror("fseek pagemap");
- _exit(1);
- }
-
- if (fread(&pfn, sizeof(pfn), 1, file) != 1) {
- perror("fread pagemap");
- _exit(1);
- }
-
- fclose(file);
- return pfn;
-}
-
-static uint64_t get_kpageflags(unsigned long pfn)
-{
- uint64_t flags;
- FILE *file;
-
- file = fopen("/proc/kpageflags", "r");
- if (!file) {
- perror("fopen kpageflags");
- _exit(1);
- }
-
- if (fseek(file, pfn * sizeof(flags), SEEK_SET)) {
- perror("fseek kpageflags");
- _exit(1);
- }
-
- if (fread(&flags, sizeof(flags), 1, file) != 1) {
- perror("fread kpageflags");
- _exit(1);
- }
-
- fclose(file);
- return flags;
-}
-
#define VMFLAGS "VmFlags:"
static bool is_vmflag_set(unsigned long addr, const char *vmflag)
@@ -159,19 +106,13 @@ out:
#define RSS "Rss:"
#define LOCKED "lo"
-static bool is_vma_lock_on_fault(unsigned long addr)
+static unsigned long get_value_for_name(unsigned long addr, const char *name)
{
- bool ret = false;
- bool locked;
- FILE *smaps = NULL;
- unsigned long vma_size, vma_rss;
char *line = NULL;
- char *value;
size_t size = 0;
-
- locked = is_vmflag_set(addr, LOCKED);
- if (!locked)
- goto out;
+ char *value_ptr;
+ FILE *smaps = NULL;
+ unsigned long value = -1UL;
smaps = seek_to_smaps_entry(addr);
if (!smaps) {
@@ -180,112 +121,70 @@ static bool is_vma_lock_on_fault(unsigned long addr)
}
while (getline(&line, &size, smaps) > 0) {
- if (!strstr(line, SIZE)) {
+ if (!strstr(line, name)) {
free(line);
line = NULL;
size = 0;
continue;
}
- value = line + strlen(SIZE);
- if (sscanf(value, "%lu kB", &vma_size) < 1) {
+ value_ptr = line + strlen(name);
+ if (sscanf(value_ptr, "%lu kB", &value) < 1) {
printf("Unable to parse smaps entry for Size\n");
goto out;
}
break;
}
- while (getline(&line, &size, smaps) > 0) {
- if (!strstr(line, RSS)) {
- free(line);
- line = NULL;
- size = 0;
- continue;
- }
-
- value = line + strlen(RSS);
- if (sscanf(value, "%lu kB", &vma_rss) < 1) {
- printf("Unable to parse smaps entry for Rss\n");
- goto out;
- }
- break;
- }
-
- ret = locked && (vma_rss < vma_size);
out:
- free(line);
if (smaps)
fclose(smaps);
- return ret;
+ free(line);
+ return value;
}
-#define PRESENT_BIT 0x8000000000000000ULL
-#define PFN_MASK 0x007FFFFFFFFFFFFFULL
-#define UNEVICTABLE_BIT (1UL << 18)
-
-static int lock_check(char *map)
+static bool is_vma_lock_on_fault(unsigned long addr)
{
- unsigned long page_size = getpagesize();
- uint64_t page1_flags, page2_flags;
+ bool locked;
+ unsigned long vma_size, vma_rss;
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
+ locked = is_vmflag_set(addr, LOCKED);
+ if (!locked)
+ return false;
- /* Both pages should be present */
- if (((page1_flags & PRESENT_BIT) == 0) ||
- ((page2_flags & PRESENT_BIT) == 0)) {
- printf("Failed to make both pages present\n");
- return 1;
- }
+ vma_size = get_value_for_name(addr, SIZE);
+ vma_rss = get_value_for_name(addr, RSS);
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
+ /* only one page is faulted in */
+ return (vma_rss < vma_size);
+}
- /* Both pages should be unevictable */
- if (((page1_flags & UNEVICTABLE_BIT) == 0) ||
- ((page2_flags & UNEVICTABLE_BIT) == 0)) {
- printf("Failed to make both pages unevictable\n");
- return 1;
- }
+#define PRESENT_BIT 0x8000000000000000ULL
+#define PFN_MASK 0x007FFFFFFFFFFFFFULL
+#define UNEVICTABLE_BIT (1UL << 18)
- if (!is_vmflag_set((unsigned long)map, LOCKED)) {
- printf("VMA flag %s is missing on page 1\n", LOCKED);
- return 1;
- }
+static int lock_check(unsigned long addr)
+{
+ bool locked;
+ unsigned long vma_size, vma_rss;
- if (!is_vmflag_set((unsigned long)map + page_size, LOCKED)) {
- printf("VMA flag %s is missing on page 2\n", LOCKED);
- return 1;
- }
+ locked = is_vmflag_set(addr, LOCKED);
+ if (!locked)
+ return false;
- return 0;
+ vma_size = get_value_for_name(addr, SIZE);
+ vma_rss = get_value_for_name(addr, RSS);
+
+ return (vma_rss == vma_size);
}
static int unlock_lock_check(char *map)
{
- unsigned long page_size = getpagesize();
- uint64_t page1_flags, page2_flags;
-
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
-
- if ((page1_flags & UNEVICTABLE_BIT) || (page2_flags & UNEVICTABLE_BIT)) {
- printf("A page is still marked unevictable after unlock\n");
- return 1;
- }
-
if (is_vmflag_set((unsigned long)map, LOCKED)) {
printf("VMA flag %s is present on page 1 after unlock\n", LOCKED);
return 1;
}
- if (is_vmflag_set((unsigned long)map + page_size, LOCKED)) {
- printf("VMA flag %s is present on page 2 after unlock\n", LOCKED);
- return 1;
- }
-
return 0;
}
@@ -311,7 +210,7 @@ static int test_mlock_lock()
goto unmap;
}
- if (lock_check(map))
+ if (!lock_check((unsigned long)map))
goto unmap;
/* Now unlock and recheck attributes */
@@ -330,64 +229,18 @@ out:
static int onfault_check(char *map)
{
- unsigned long page_size = getpagesize();
- uint64_t page1_flags, page2_flags;
-
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
-
- /* Neither page should be present */
- if ((page1_flags & PRESENT_BIT) || (page2_flags & PRESENT_BIT)) {
- printf("Pages were made present by MLOCK_ONFAULT\n");
- return 1;
- }
-
*map = 'a';
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
-
- /* Only page 1 should be present */
- if ((page1_flags & PRESENT_BIT) == 0) {
- printf("Page 1 is not present after fault\n");
- return 1;
- } else if (page2_flags & PRESENT_BIT) {
- printf("Page 2 was made present\n");
- return 1;
- }
-
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
-
- /* Page 1 should be unevictable */
- if ((page1_flags & UNEVICTABLE_BIT) == 0) {
- printf("Failed to make faulted page unevictable\n");
- return 1;
- }
-
if (!is_vma_lock_on_fault((unsigned long)map)) {
printf("VMA is not marked for lock on fault\n");
return 1;
}
- if (!is_vma_lock_on_fault((unsigned long)map + page_size)) {
- printf("VMA is not marked for lock on fault\n");
- return 1;
- }
-
return 0;
}
static int unlock_onfault_check(char *map)
{
unsigned long page_size = getpagesize();
- uint64_t page1_flags;
-
- page1_flags = get_pageflags((unsigned long)map);
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
-
- if (page1_flags & UNEVICTABLE_BIT) {
- printf("Page 1 is still marked unevictable after unlock\n");
- return 1;
- }
if (is_vma_lock_on_fault((unsigned long)map) ||
is_vma_lock_on_fault((unsigned long)map + page_size)) {
@@ -445,7 +298,6 @@ static int test_lock_onfault_of_present()
char *map;
int ret = 1;
unsigned long page_size = getpagesize();
- uint64_t page1_flags, page2_flags;
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
@@ -465,17 +317,6 @@ static int test_lock_onfault_of_present()
goto unmap;
}
- page1_flags = get_pageflags((unsigned long)map);
- page2_flags = get_pageflags((unsigned long)map + page_size);
- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
-
- /* Page 1 should be unevictable */
- if ((page1_flags & UNEVICTABLE_BIT) == 0) {
- printf("Failed to make present page unevictable\n");
- goto unmap;
- }
-
if (!is_vma_lock_on_fault((unsigned long)map) ||
!is_vma_lock_on_fault((unsigned long)map + page_size)) {
printf("VMA with present pages is not marked lock on fault\n");
@@ -507,7 +348,7 @@ static int test_munlockall()
goto out;
}
- if (lock_check(map))
+ if (!lock_check((unsigned long)map))
goto unmap;
if (munlockall()) {
@@ -549,7 +390,7 @@ static int test_munlockall()
goto out;
}
- if (lock_check(map))
+ if (!lock_check((unsigned long)map))
goto unmap;
if (munlockall()) {
diff --git a/tools/testing/selftests/vm/mremap_dontunmap.c b/tools/testing/selftests/vm/mremap_dontunmap.c
new file mode 100644
index 000000000000..ee06cb0b9efb
--- /dev/null
+++ b/tools/testing/selftests/vm/mremap_dontunmap.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Tests for mremap w/ MREMAP_DONTUNMAP.
+ *
+ * Copyright 2020, Brian Geffon <bgeffon@google.com>
+ */
+#define _GNU_SOURCE
+#include <sys/mman.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+
+#ifndef MREMAP_DONTUNMAP
+#define MREMAP_DONTUNMAP 4
+#endif
+
+unsigned long page_size;
+char *page_buffer;
+
+static void dump_maps(void)
+{
+ char cmd[32];
+
+ snprintf(cmd, sizeof(cmd), "cat /proc/%d/maps", getpid());
+ system(cmd);
+}
+
+#define BUG_ON(condition, description) \
+ do { \
+ if (condition) { \
+ fprintf(stderr, "[FAIL]\t%s():%d\t%s:%s\n", __func__, \
+ __LINE__, (description), strerror(errno)); \
+ dump_maps(); \
+ exit(1); \
+ } \
+ } while (0)
+
+// Try a simple operation for to "test" for kernel support this prevents
+// reporting tests as failed when it's run on an older kernel.
+static int kernel_support_for_mremap_dontunmap()
+{
+ int ret = 0;
+ unsigned long num_pages = 1;
+ void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ BUG_ON(source_mapping == MAP_FAILED, "mmap");
+
+ // This simple remap should only fail if MREMAP_DONTUNMAP isn't
+ // supported.
+ void *dest_mapping =
+ mremap(source_mapping, num_pages * page_size, num_pages * page_size,
+ MREMAP_DONTUNMAP | MREMAP_MAYMOVE, 0);
+ if (dest_mapping == MAP_FAILED) {
+ ret = errno;
+ } else {
+ BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1,
+ "unable to unmap destination mapping");
+ }
+
+ BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
+ "unable to unmap source mapping");
+ return ret;
+}
+
+// This helper will just validate that an entire mapping contains the expected
+// byte.
+static int check_region_contains_byte(void *addr, unsigned long size, char byte)
+{
+ BUG_ON(size & (page_size - 1),
+ "check_region_contains_byte expects page multiples");
+ BUG_ON((unsigned long)addr & (page_size - 1),
+ "check_region_contains_byte expects page alignment");
+
+ memset(page_buffer, byte, page_size);
+
+ unsigned long num_pages = size / page_size;
+ unsigned long i;
+
+ // Compare each page checking that it contains our expected byte.
+ for (i = 0; i < num_pages; ++i) {
+ int ret =
+ memcmp(addr + (i * page_size), page_buffer, page_size);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+// this test validates that MREMAP_DONTUNMAP moves the pagetables while leaving
+// the source mapping mapped.
+static void mremap_dontunmap_simple()
+{
+ unsigned long num_pages = 5;
+
+ void *source_mapping =
+ mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ BUG_ON(source_mapping == MAP_FAILED, "mmap");
+
+ memset(source_mapping, 'a', num_pages * page_size);
+
+ // Try to just move the whole mapping anywhere (not fixed).
+ void *dest_mapping =
+ mremap(source_mapping, num_pages * page_size, num_pages * page_size,
+ MREMAP_DONTUNMAP | MREMAP_MAYMOVE, NULL);
+ BUG_ON(dest_mapping == MAP_FAILED, "mremap");
+
+ // Validate that the pages have been moved, we know they were moved if
+ // the dest_mapping contains a's.
+ BUG_ON(check_region_contains_byte
+ (dest_mapping, num_pages * page_size, 'a') != 0,
+ "pages did not migrate");
+ BUG_ON(check_region_contains_byte
+ (source_mapping, num_pages * page_size, 0) != 0,
+ "source should have no ptes");
+
+ BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1,
+ "unable to unmap destination mapping");
+ BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
+ "unable to unmap source mapping");
+}
+
+// This test validates MREMAP_DONTUNMAP will move page tables to a specific
+// destination using MREMAP_FIXED, also while validating that the source
+// remains intact.
+static void mremap_dontunmap_simple_fixed()
+{
+ unsigned long num_pages = 5;
+
+ // Since we want to guarantee that we can remap to a point, we will
+ // create a mapping up front.
+ void *dest_mapping =
+ mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ BUG_ON(dest_mapping == MAP_FAILED, "mmap");
+ memset(dest_mapping, 'X', num_pages * page_size);
+
+ void *source_mapping =
+ mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ BUG_ON(source_mapping == MAP_FAILED, "mmap");
+ memset(source_mapping, 'a', num_pages * page_size);
+
+ void *remapped_mapping =
+ mremap(source_mapping, num_pages * page_size, num_pages * page_size,
+ MREMAP_FIXED | MREMAP_DONTUNMAP | MREMAP_MAYMOVE,
+ dest_mapping);
+ BUG_ON(remapped_mapping == MAP_FAILED, "mremap");
+ BUG_ON(remapped_mapping != dest_mapping,
+ "mremap should have placed the remapped mapping at dest_mapping");
+
+ // The dest mapping will have been unmap by mremap so we expect the Xs
+ // to be gone and replaced with a's.
+ BUG_ON(check_region_contains_byte
+ (dest_mapping, num_pages * page_size, 'a') != 0,
+ "pages did not migrate");
+
+ // And the source mapping will have had its ptes dropped.
+ BUG_ON(check_region_contains_byte
+ (source_mapping, num_pages * page_size, 0) != 0,
+ "source should have no ptes");
+
+ BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1,
+ "unable to unmap destination mapping");
+ BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
+ "unable to unmap source mapping");
+}
+
+// This test validates that we can MREMAP_DONTUNMAP for a portion of an
+// existing mapping.
+static void mremap_dontunmap_partial_mapping()
+{
+ /*
+ * source mapping:
+ * --------------
+ * | aaaaaaaaaa |
+ * --------------
+ * to become:
+ * --------------
+ * | aaaaa00000 |
+ * --------------
+ * With the destination mapping containing 5 pages of As.
+ * ---------
+ * | aaaaa |
+ * ---------
+ */
+ unsigned long num_pages = 10;
+ void *source_mapping =
+ mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ BUG_ON(source_mapping == MAP_FAILED, "mmap");
+ memset(source_mapping, 'a', num_pages * page_size);
+
+ // We will grab the last 5 pages of the source and move them.
+ void *dest_mapping =
+ mremap(source_mapping + (5 * page_size), 5 * page_size,
+ 5 * page_size,
+ MREMAP_DONTUNMAP | MREMAP_MAYMOVE, NULL);
+ BUG_ON(dest_mapping == MAP_FAILED, "mremap");
+
+ // We expect the first 5 pages of the source to contain a's and the
+ // final 5 pages to contain zeros.
+ BUG_ON(check_region_contains_byte(source_mapping, 5 * page_size, 'a') !=
+ 0, "first 5 pages of source should have original pages");
+ BUG_ON(check_region_contains_byte
+ (source_mapping + (5 * page_size), 5 * page_size, 0) != 0,
+ "final 5 pages of source should have no ptes");
+
+ // Finally we expect the destination to have 5 pages worth of a's.
+ BUG_ON(check_region_contains_byte(dest_mapping, 5 * page_size, 'a') !=
+ 0, "dest mapping should contain ptes from the source");
+
+ BUG_ON(munmap(dest_mapping, 5 * page_size) == -1,
+ "unable to unmap destination mapping");
+ BUG_ON(munmap(source_mapping, num_pages * page_size) == -1,
+ "unable to unmap source mapping");
+}
+
+// This test validates that we can remap over only a portion of a mapping.
+static void mremap_dontunmap_partial_mapping_overwrite(void)
+{
+ /*
+ * source mapping:
+ * ---------
+ * |aaaaa|
+ * ---------
+ * dest mapping initially:
+ * -----------
+ * |XXXXXXXXXX|
+ * ------------
+ * Source to become:
+ * ---------
+ * |00000|
+ * ---------
+ * With the destination mapping containing 5 pages of As.
+ * ------------
+ * |aaaaaXXXXX|
+ * ------------
+ */
+ void *source_mapping =
+ mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ BUG_ON(source_mapping == MAP_FAILED, "mmap");
+ memset(source_mapping, 'a', 5 * page_size);
+
+ void *dest_mapping =
+ mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ BUG_ON(dest_mapping == MAP_FAILED, "mmap");
+ memset(dest_mapping, 'X', 10 * page_size);
+
+ // We will grab the last 5 pages of the source and move them.
+ void *remapped_mapping =
+ mremap(source_mapping, 5 * page_size,
+ 5 * page_size,
+ MREMAP_DONTUNMAP | MREMAP_MAYMOVE | MREMAP_FIXED, dest_mapping);
+ BUG_ON(dest_mapping == MAP_FAILED, "mremap");
+ BUG_ON(dest_mapping != remapped_mapping, "expected to remap to dest_mapping");
+
+ BUG_ON(check_region_contains_byte(source_mapping, 5 * page_size, 0) !=
+ 0, "first 5 pages of source should have no ptes");
+
+ // Finally we expect the destination to have 5 pages worth of a's.
+ BUG_ON(check_region_contains_byte(dest_mapping, 5 * page_size, 'a') != 0,
+ "dest mapping should contain ptes from the source");
+
+ // Finally the last 5 pages shouldn't have been touched.
+ BUG_ON(check_region_contains_byte(dest_mapping + (5 * page_size),
+ 5 * page_size, 'X') != 0,
+ "dest mapping should have retained the last 5 pages");
+
+ BUG_ON(munmap(dest_mapping, 10 * page_size) == -1,
+ "unable to unmap destination mapping");
+ BUG_ON(munmap(source_mapping, 5 * page_size) == -1,
+ "unable to unmap source mapping");
+}
+
+int main(void)
+{
+ page_size = sysconf(_SC_PAGE_SIZE);
+
+ // test for kernel support for MREMAP_DONTUNMAP skipping the test if
+ // not.
+ if (kernel_support_for_mremap_dontunmap() != 0) {
+ printf("No kernel support for MREMAP_DONTUNMAP\n");
+ return KSFT_SKIP;
+ }
+
+ // Keep a page sized buffer around for when we need it.
+ page_buffer =
+ mmap(NULL, page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ BUG_ON(page_buffer == MAP_FAILED, "unable to mmap a page.");
+
+ mremap_dontunmap_simple();
+ mremap_dontunmap_simple_fixed();
+ mremap_dontunmap_partial_mapping();
+ mremap_dontunmap_partial_mapping_overwrite();
+
+ BUG_ON(munmap(page_buffer, page_size) == -1,
+ "unable to unmap page buffer");
+
+ printf("OK\n");
+ return 0;
+}
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index f33714843198..665009ebfba4 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -123,6 +123,28 @@ else
echo "[PASS]"
fi
+echo "--------------------------------------------"
+echo "running 'gup_benchmark -U' (normal/slow gup)"
+echo "--------------------------------------------"
+./gup_benchmark -U
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "------------------------------------------"
+echo "running gup_benchmark -b (pin_user_pages)"
+echo "------------------------------------------"
+./gup_benchmark -b
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
echo "-------------------"
echo "running userfaultfd"
echo "-------------------"
@@ -270,4 +292,19 @@ else
exitcode=1
fi
+echo "------------------------------------"
+echo "running MREMAP_DONTUNMAP smoke test"
+echo "------------------------------------"
+./mremap_dontunmap
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
exit $exitcode
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index d3362777a425..61e5cfeb1350 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -54,6 +54,7 @@
#include <linux/userfaultfd.h>
#include <setjmp.h>
#include <stdbool.h>
+#include <assert.h>
#include "../kselftest.h"
@@ -76,6 +77,8 @@ static int test_type;
#define ALARM_INTERVAL_SECS 10
static volatile bool test_uffdio_copy_eexist = true;
static volatile bool test_uffdio_zeropage_eexist = true;
+/* Whether to test uffd write-protection */
+static bool test_uffdio_wp = false;
static bool map_shared;
static int huge_fd;
@@ -86,6 +89,13 @@ static char *area_src, *area_src_alias, *area_dst, *area_dst_alias;
static char *zeropage;
pthread_attr_t attr;
+/* Userfaultfd test statistics */
+struct uffd_stats {
+ int cpu;
+ unsigned long missing_faults;
+ unsigned long wp_faults;
+};
+
/* pthread_mutex_t starts at page offset 0 */
#define area_mutex(___area, ___nr) \
((pthread_mutex_t *) ((___area) + (___nr)*page_size))
@@ -125,6 +135,37 @@ static void usage(void)
exit(1);
}
+static void uffd_stats_reset(struct uffd_stats *uffd_stats,
+ unsigned long n_cpus)
+{
+ int i;
+
+ for (i = 0; i < n_cpus; i++) {
+ uffd_stats[i].cpu = i;
+ uffd_stats[i].missing_faults = 0;
+ uffd_stats[i].wp_faults = 0;
+ }
+}
+
+static void uffd_stats_report(struct uffd_stats *stats, int n_cpus)
+{
+ int i;
+ unsigned long long miss_total = 0, wp_total = 0;
+
+ for (i = 0; i < n_cpus; i++) {
+ miss_total += stats[i].missing_faults;
+ wp_total += stats[i].wp_faults;
+ }
+
+ printf("userfaults: %llu missing (", miss_total);
+ for (i = 0; i < n_cpus; i++)
+ printf("%lu+", stats[i].missing_faults);
+ printf("\b), %llu wp (", wp_total);
+ for (i = 0; i < n_cpus; i++)
+ printf("%lu+", stats[i].wp_faults);
+ printf("\b)\n");
+}
+
static int anon_release_pages(char *rel_area)
{
int ret = 0;
@@ -245,10 +286,15 @@ struct uffd_test_ops {
void (*alias_mapping)(__u64 *start, size_t len, unsigned long offset);
};
-#define ANON_EXPECTED_IOCTLS ((1 << _UFFDIO_WAKE) | \
+#define SHMEM_EXPECTED_IOCTLS ((1 << _UFFDIO_WAKE) | \
(1 << _UFFDIO_COPY) | \
(1 << _UFFDIO_ZEROPAGE))
+#define ANON_EXPECTED_IOCTLS ((1 << _UFFDIO_WAKE) | \
+ (1 << _UFFDIO_COPY) | \
+ (1 << _UFFDIO_ZEROPAGE) | \
+ (1 << _UFFDIO_WRITEPROTECT))
+
static struct uffd_test_ops anon_uffd_test_ops = {
.expected_ioctls = ANON_EXPECTED_IOCTLS,
.allocate_area = anon_allocate_area,
@@ -257,7 +303,7 @@ static struct uffd_test_ops anon_uffd_test_ops = {
};
static struct uffd_test_ops shmem_uffd_test_ops = {
- .expected_ioctls = ANON_EXPECTED_IOCTLS,
+ .expected_ioctls = SHMEM_EXPECTED_IOCTLS,
.allocate_area = shmem_allocate_area,
.release_pages = shmem_release_pages,
.alias_mapping = noop_alias_mapping,
@@ -281,6 +327,21 @@ static int my_bcmp(char *str1, char *str2, size_t n)
return 0;
}
+static void wp_range(int ufd, __u64 start, __u64 len, bool wp)
+{
+ struct uffdio_writeprotect prms = { 0 };
+
+ /* Write protection page faults */
+ prms.range.start = start;
+ prms.range.len = len;
+ /* Undo write-protect, do wakeup after that */
+ prms.mode = wp ? UFFDIO_WRITEPROTECT_MODE_WP : 0;
+
+ if (ioctl(ufd, UFFDIO_WRITEPROTECT, &prms))
+ fprintf(stderr, "clear WP failed for address 0x%Lx\n",
+ start), exit(1);
+}
+
static void *locking_thread(void *arg)
{
unsigned long cpu = (unsigned long) arg;
@@ -419,7 +480,10 @@ static int __copy_page(int ufd, unsigned long offset, bool retry)
uffdio_copy.dst = (unsigned long) area_dst + offset;
uffdio_copy.src = (unsigned long) area_src + offset;
uffdio_copy.len = page_size;
- uffdio_copy.mode = 0;
+ if (test_uffdio_wp)
+ uffdio_copy.mode = UFFDIO_COPY_MODE_WP;
+ else
+ uffdio_copy.mode = 0;
uffdio_copy.copy = 0;
if (ioctl(ufd, UFFDIO_COPY, &uffdio_copy)) {
/* real retval in ufdio_copy.copy */
@@ -467,8 +531,8 @@ static int uffd_read_msg(int ufd, struct uffd_msg *msg)
return 0;
}
-/* Return 1 if page fault handled by us; otherwise 0 */
-static int uffd_handle_page_fault(struct uffd_msg *msg)
+static void uffd_handle_page_fault(struct uffd_msg *msg,
+ struct uffd_stats *stats)
{
unsigned long offset;
@@ -476,25 +540,32 @@ static int uffd_handle_page_fault(struct uffd_msg *msg)
fprintf(stderr, "unexpected msg event %u\n",
msg->event), exit(1);
- if (bounces & BOUNCE_VERIFY &&
- msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
- fprintf(stderr, "unexpected write fault\n"), exit(1);
+ if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WP) {
+ wp_range(uffd, msg->arg.pagefault.address, page_size, false);
+ stats->wp_faults++;
+ } else {
+ /* Missing page faults */
+ if (bounces & BOUNCE_VERIFY &&
+ msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
+ fprintf(stderr, "unexpected write fault\n"), exit(1);
- offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
- offset &= ~(page_size-1);
+ offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst;
+ offset &= ~(page_size-1);
- return copy_page(uffd, offset);
+ if (copy_page(uffd, offset))
+ stats->missing_faults++;
+ }
}
static void *uffd_poll_thread(void *arg)
{
- unsigned long cpu = (unsigned long) arg;
+ struct uffd_stats *stats = (struct uffd_stats *)arg;
+ unsigned long cpu = stats->cpu;
struct pollfd pollfd[2];
struct uffd_msg msg;
struct uffdio_register uffd_reg;
int ret;
char tmp_chr;
- unsigned long userfaults = 0;
pollfd[0].fd = uffd;
pollfd[0].events = POLLIN;
@@ -524,7 +595,7 @@ static void *uffd_poll_thread(void *arg)
msg.event), exit(1);
break;
case UFFD_EVENT_PAGEFAULT:
- userfaults += uffd_handle_page_fault(&msg);
+ uffd_handle_page_fault(&msg, stats);
break;
case UFFD_EVENT_FORK:
close(uffd);
@@ -543,50 +614,67 @@ static void *uffd_poll_thread(void *arg)
break;
}
}
- return (void *)userfaults;
+
+ return NULL;
}
pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER;
static void *uffd_read_thread(void *arg)
{
- unsigned long *this_cpu_userfaults;
+ struct uffd_stats *stats = (struct uffd_stats *)arg;
struct uffd_msg msg;
- this_cpu_userfaults = (unsigned long *) arg;
- *this_cpu_userfaults = 0;
-
pthread_mutex_unlock(&uffd_read_mutex);
/* from here cancellation is ok */
for (;;) {
if (uffd_read_msg(uffd, &msg))
continue;
- (*this_cpu_userfaults) += uffd_handle_page_fault(&msg);
+ uffd_handle_page_fault(&msg, stats);
}
- return (void *)NULL;
+
+ return NULL;
}
static void *background_thread(void *arg)
{
unsigned long cpu = (unsigned long) arg;
- unsigned long page_nr;
+ unsigned long page_nr, start_nr, mid_nr, end_nr;
+
+ start_nr = cpu * nr_pages_per_cpu;
+ end_nr = (cpu+1) * nr_pages_per_cpu;
+ mid_nr = (start_nr + end_nr) / 2;
- for (page_nr = cpu * nr_pages_per_cpu;
- page_nr < (cpu+1) * nr_pages_per_cpu;
- page_nr++)
+ /* Copy the first half of the pages */
+ for (page_nr = start_nr; page_nr < mid_nr; page_nr++)
+ copy_page_retry(uffd, page_nr * page_size);
+
+ /*
+ * If we need to test uffd-wp, set it up now. Then we'll have
+ * at least the first half of the pages mapped already which
+ * can be write-protected for testing
+ */
+ if (test_uffdio_wp)
+ wp_range(uffd, (unsigned long)area_dst + start_nr * page_size,
+ nr_pages_per_cpu * page_size, true);
+
+ /*
+ * Continue the 2nd half of the page copying, handling write
+ * protection faults if any
+ */
+ for (page_nr = mid_nr; page_nr < end_nr; page_nr++)
copy_page_retry(uffd, page_nr * page_size);
return NULL;
}
-static int stress(unsigned long *userfaults)
+static int stress(struct uffd_stats *uffd_stats)
{
unsigned long cpu;
pthread_t locking_threads[nr_cpus];
pthread_t uffd_threads[nr_cpus];
pthread_t background_threads[nr_cpus];
- void **_userfaults = (void **) userfaults;
finished = 0;
for (cpu = 0; cpu < nr_cpus; cpu++) {
@@ -595,12 +683,13 @@ static int stress(unsigned long *userfaults)
return 1;
if (bounces & BOUNCE_POLL) {
if (pthread_create(&uffd_threads[cpu], &attr,
- uffd_poll_thread, (void *)cpu))
+ uffd_poll_thread,
+ (void *)&uffd_stats[cpu]))
return 1;
} else {
if (pthread_create(&uffd_threads[cpu], &attr,
uffd_read_thread,
- &_userfaults[cpu]))
+ (void *)&uffd_stats[cpu]))
return 1;
pthread_mutex_lock(&uffd_read_mutex);
}
@@ -637,7 +726,8 @@ static int stress(unsigned long *userfaults)
fprintf(stderr, "pipefd write error\n");
return 1;
}
- if (pthread_join(uffd_threads[cpu], &_userfaults[cpu]))
+ if (pthread_join(uffd_threads[cpu],
+ (void *)&uffd_stats[cpu]))
return 1;
} else {
if (pthread_cancel(uffd_threads[cpu]))
@@ -735,17 +825,31 @@ static int faulting_process(int signal_test)
}
for (nr = 0; nr < split_nr_pages; nr++) {
+ int steps = 1;
+ unsigned long offset = nr * page_size;
+
if (signal_test) {
if (sigsetjmp(*sigbuf, 1) != 0) {
- if (nr == lastnr) {
+ if (steps == 1 && nr == lastnr) {
fprintf(stderr, "Signal repeated\n");
return 1;
}
lastnr = nr;
if (signal_test == 1) {
- if (copy_page(uffd, nr * page_size))
- signalled++;
+ if (steps == 1) {
+ /* This is a MISSING request */
+ steps++;
+ if (copy_page(uffd, offset))
+ signalled++;
+ } else {
+ /* This is a WP request */
+ assert(steps == 2);
+ wp_range(uffd,
+ (__u64)area_dst +
+ offset,
+ page_size, false);
+ }
} else {
signalled++;
continue;
@@ -758,8 +862,13 @@ static int faulting_process(int signal_test)
fprintf(stderr,
"nr %lu memory corruption %Lu %Lu\n",
nr, count,
- count_verify[nr]), exit(1);
- }
+ count_verify[nr]);
+ }
+ /*
+ * Trigger write protection if there is by writting
+ * the same value back.
+ */
+ *area_count(area_dst, nr) = count;
}
if (signal_test)
@@ -781,6 +890,11 @@ static int faulting_process(int signal_test)
nr, count,
count_verify[nr]), exit(1);
}
+ /*
+ * Trigger write protection if there is by writting
+ * the same value back.
+ */
+ *area_count(area_dst, nr) = count;
}
if (uffd_test_ops->release_pages(area_dst))
@@ -884,6 +998,8 @@ static int userfaultfd_zeropage_test(void)
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (test_uffdio_wp)
+ uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
fprintf(stderr, "register failure\n"), exit(1);
@@ -908,11 +1024,11 @@ static int userfaultfd_events_test(void)
{
struct uffdio_register uffdio_register;
unsigned long expected_ioctls;
- unsigned long userfaults;
pthread_t uffd_mon;
int err, features;
pid_t pid;
char c;
+ struct uffd_stats stats = { 0 };
printf("testing events (fork, remap, remove): ");
fflush(stdout);
@@ -929,6 +1045,8 @@ static int userfaultfd_events_test(void)
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (test_uffdio_wp)
+ uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
fprintf(stderr, "register failure\n"), exit(1);
@@ -939,7 +1057,7 @@ static int userfaultfd_events_test(void)
"unexpected missing ioctl for anon memory\n"),
exit(1);
- if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, NULL))
+ if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats))
perror("uffd_poll_thread create"), exit(1);
pid = fork();
@@ -955,13 +1073,14 @@ static int userfaultfd_events_test(void)
if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
perror("pipe write"), exit(1);
- if (pthread_join(uffd_mon, (void **)&userfaults))
+ if (pthread_join(uffd_mon, NULL))
return 1;
close(uffd);
- printf("userfaults: %ld\n", userfaults);
- return userfaults != nr_pages;
+ uffd_stats_report(&stats, 1);
+
+ return stats.missing_faults != nr_pages;
}
static int userfaultfd_sig_test(void)
@@ -973,6 +1092,7 @@ static int userfaultfd_sig_test(void)
int err, features;
pid_t pid;
char c;
+ struct uffd_stats stats = { 0 };
printf("testing signal delivery: ");
fflush(stdout);
@@ -988,6 +1108,8 @@ static int userfaultfd_sig_test(void)
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (test_uffdio_wp)
+ uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
fprintf(stderr, "register failure\n"), exit(1);
@@ -1004,7 +1126,7 @@ static int userfaultfd_sig_test(void)
if (uffd_test_ops->release_pages(area_dst))
return 1;
- if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, NULL))
+ if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats))
perror("uffd_poll_thread create"), exit(1);
pid = fork();
@@ -1030,6 +1152,7 @@ static int userfaultfd_sig_test(void)
close(uffd);
return userfaults != 0;
}
+
static int userfaultfd_stress(void)
{
void *area;
@@ -1038,7 +1161,7 @@ static int userfaultfd_stress(void)
struct uffdio_register uffdio_register;
unsigned long cpu;
int err;
- unsigned long userfaults[nr_cpus];
+ struct uffd_stats uffd_stats[nr_cpus];
uffd_test_ops->allocate_area((void **)&area_src);
if (!area_src)
@@ -1119,6 +1242,8 @@ static int userfaultfd_stress(void)
uffdio_register.range.start = (unsigned long) area_dst;
uffdio_register.range.len = nr_pages * page_size;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (test_uffdio_wp)
+ uffdio_register.mode |= UFFDIO_REGISTER_MODE_WP;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
fprintf(stderr, "register failure\n");
return 1;
@@ -1167,10 +1292,17 @@ static int userfaultfd_stress(void)
if (uffd_test_ops->release_pages(area_dst))
return 1;
+ uffd_stats_reset(uffd_stats, nr_cpus);
+
/* bounce pass */
- if (stress(userfaults))
+ if (stress(uffd_stats))
return 1;
+ /* Clear all the write protections if there is any */
+ if (test_uffdio_wp)
+ wp_range(uffd, (unsigned long)area_dst,
+ nr_pages * page_size, false);
+
/* unregister */
if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) {
fprintf(stderr, "unregister failure\n");
@@ -1209,10 +1341,7 @@ static int userfaultfd_stress(void)
area_src_alias = area_dst_alias;
area_dst_alias = tmp_area;
- printf("userfaults:");
- for (cpu = 0; cpu < nr_cpus; cpu++)
- printf(" %lu", userfaults[cpu]);
- printf("\n");
+ uffd_stats_report(uffd_stats, nr_cpus);
}
if (err)
@@ -1252,6 +1381,8 @@ static void set_test_type(const char *type)
if (!strcmp(type, "anon")) {
test_type = TEST_ANON;
uffd_test_ops = &anon_uffd_test_ops;
+ /* Only enable write-protect test for anonymous test */
+ test_uffdio_wp = true;
} else if (!strcmp(type, "hugetlb")) {
test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
diff --git a/tools/testing/selftests/vm/write_hugetlb_memory.sh b/tools/testing/selftests/vm/write_hugetlb_memory.sh
new file mode 100644
index 000000000000..d3d0d108924d
--- /dev/null
+++ b/tools/testing/selftests/vm/write_hugetlb_memory.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+size=$1
+populate=$2
+write=$3
+cgroup=$4
+path=$5
+method=$6
+private=$7
+want_sleep=$8
+reserve=$9
+
+echo "Putting task in cgroup '$cgroup'"
+echo $$ > /dev/cgroup/memory/"$cgroup"/cgroup.procs
+
+echo "Method is $method"
+
+set +e
+./write_to_hugetlbfs -p "$path" -s "$size" "$write" "$populate" -m "$method" \
+ "$private" "$want_sleep" "$reserve"
diff --git a/tools/testing/selftests/vm/write_to_hugetlbfs.c b/tools/testing/selftests/vm/write_to_hugetlbfs.c
new file mode 100644
index 000000000000..110bc4e4015d
--- /dev/null
+++ b/tools/testing/selftests/vm/write_to_hugetlbfs.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program reserves and uses hugetlb memory, supporting a bunch of
+ * scenarios needed by the charged_reserved_hugetlb.sh test.
+ */
+
+#include <err.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/shm.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+/* Global definitions. */
+enum method {
+ HUGETLBFS,
+ MMAP_MAP_HUGETLB,
+ SHM,
+ MAX_METHOD
+};
+
+
+/* Global variables. */
+static const char *self;
+static char *shmaddr;
+static int shmid;
+
+/*
+ * Show usage and exit.
+ */
+static void exit_usage(void)
+{
+ printf("Usage: %s -p <path to hugetlbfs file> -s <size to map> "
+ "[-m <0=hugetlbfs | 1=mmap(MAP_HUGETLB)>] [-l] [-r] "
+ "[-o] [-w] [-n]\n",
+ self);
+ exit(EXIT_FAILURE);
+}
+
+void sig_handler(int signo)
+{
+ printf("Received %d.\n", signo);
+ if (signo == SIGINT) {
+ printf("Deleting the memory\n");
+ if (shmdt((const void *)shmaddr) != 0) {
+ perror("Detach failure");
+ shmctl(shmid, IPC_RMID, NULL);
+ exit(4);
+ }
+
+ shmctl(shmid, IPC_RMID, NULL);
+ printf("Done deleting the memory\n");
+ }
+ exit(2);
+}
+
+int main(int argc, char **argv)
+{
+ int fd = 0;
+ int key = 0;
+ int *ptr = NULL;
+ int c = 0;
+ int size = 0;
+ char path[256] = "";
+ enum method method = MAX_METHOD;
+ int want_sleep = 0, private = 0;
+ int populate = 0;
+ int write = 0;
+ int reserve = 1;
+
+ unsigned long i;
+
+ if (signal(SIGINT, sig_handler) == SIG_ERR)
+ err(1, "\ncan't catch SIGINT\n");
+
+ /* Parse command-line arguments. */
+ setvbuf(stdout, NULL, _IONBF, 0);
+ self = argv[0];
+
+ while ((c = getopt(argc, argv, "s:p:m:owlrn")) != -1) {
+ switch (c) {
+ case 's':
+ size = atoi(optarg);
+ break;
+ case 'p':
+ strncpy(path, optarg, sizeof(path));
+ break;
+ case 'm':
+ if (atoi(optarg) >= MAX_METHOD) {
+ errno = EINVAL;
+ perror("Invalid -m.");
+ exit_usage();
+ }
+ method = atoi(optarg);
+ break;
+ case 'o':
+ populate = 1;
+ break;
+ case 'w':
+ write = 1;
+ break;
+ case 'l':
+ want_sleep = 1;
+ break;
+ case 'r':
+ private
+ = 1;
+ break;
+ case 'n':
+ reserve = 0;
+ break;
+ default:
+ errno = EINVAL;
+ perror("Invalid arg");
+ exit_usage();
+ }
+ }
+
+ if (strncmp(path, "", sizeof(path)) != 0) {
+ printf("Writing to this path: %s\n", path);
+ } else {
+ errno = EINVAL;
+ perror("path not found");
+ exit_usage();
+ }
+
+ if (size != 0) {
+ printf("Writing this size: %d\n", size);
+ } else {
+ errno = EINVAL;
+ perror("size not found");
+ exit_usage();
+ }
+
+ if (!populate)
+ printf("Not populating.\n");
+ else
+ printf("Populating.\n");
+
+ if (!write)
+ printf("Not writing to memory.\n");
+
+ if (method == MAX_METHOD) {
+ errno = EINVAL;
+ perror("-m Invalid");
+ exit_usage();
+ } else
+ printf("Using method=%d\n", method);
+
+ if (!private)
+ printf("Shared mapping.\n");
+ else
+ printf("Private mapping.\n");
+
+ if (!reserve)
+ printf("NO_RESERVE mapping.\n");
+ else
+ printf("RESERVE mapping.\n");
+
+ switch (method) {
+ case HUGETLBFS:
+ printf("Allocating using HUGETLBFS.\n");
+ fd = open(path, O_CREAT | O_RDWR, 0777);
+ if (fd == -1)
+ err(1, "Failed to open file.");
+
+ ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ (private ? MAP_PRIVATE : MAP_SHARED) |
+ (populate ? MAP_POPULATE : 0) |
+ (reserve ? 0 : MAP_NORESERVE),
+ fd, 0);
+
+ if (ptr == MAP_FAILED) {
+ close(fd);
+ err(1, "Error mapping the file");
+ }
+ break;
+ case MMAP_MAP_HUGETLB:
+ printf("Allocating using MAP_HUGETLB.\n");
+ ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ (private ? (MAP_PRIVATE | MAP_ANONYMOUS) :
+ MAP_SHARED) |
+ MAP_HUGETLB | (populate ? MAP_POPULATE : 0) |
+ (reserve ? 0 : MAP_NORESERVE),
+ -1, 0);
+
+ if (ptr == MAP_FAILED)
+ err(1, "mmap");
+
+ printf("Returned address is %p\n", ptr);
+ break;
+ case SHM:
+ printf("Allocating using SHM.\n");
+ shmid = shmget(key, size,
+ SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W);
+ if (shmid < 0) {
+ shmid = shmget(++key, size,
+ SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W);
+ if (shmid < 0)
+ err(1, "shmget");
+ }
+ printf("shmid: 0x%x, shmget key:%d\n", shmid, key);
+
+ ptr = shmat(shmid, NULL, 0);
+ if (ptr == (int *)-1) {
+ perror("Shared memory attach failure");
+ shmctl(shmid, IPC_RMID, NULL);
+ exit(2);
+ }
+ printf("shmaddr: %p\n", ptr);
+
+ break;
+ default:
+ errno = EINVAL;
+ err(1, "Invalid method.");
+ }
+
+ if (write) {
+ printf("Writing to memory.\n");
+ memset(ptr, 1, size);
+ }
+
+ if (want_sleep) {
+ /* Signal to caller that we're done. */
+ printf("DONE\n");
+
+ /* Hold memory until external kill signal is delivered. */
+ while (1)
+ sleep(100);
+ }
+
+ if (method == HUGETLBFS)
+ close(fd);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/watchdog/.gitignore b/tools/testing/selftests/watchdog/.gitignore
index 5aac51575c7e..61d7b89cdbca 100644
--- a/tools/testing/selftests/watchdog/.gitignore
+++ b/tools/testing/selftests/watchdog/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
watchdog-test
diff --git a/tools/testing/selftests/wireguard/qemu/.gitignore b/tools/testing/selftests/wireguard/qemu/.gitignore
index 415b542a9d59..bfa15e6feb2f 100644
--- a/tools/testing/selftests/wireguard/qemu/.gitignore
+++ b/tools/testing/selftests/wireguard/qemu/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
build/
distfiles/
diff --git a/tools/testing/selftests/x86/.gitignore b/tools/testing/selftests/x86/.gitignore
index 7757f73ff9a3..022a1f3b64ef 100644
--- a/tools/testing/selftests/x86/.gitignore
+++ b/tools/testing/selftests/x86/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
*_32
*_64
single_step_syscall
diff --git a/tools/testing/vsock/.gitignore b/tools/testing/vsock/.gitignore
index 7f7a2ccc30c4..87ca2731cff9 100644
--- a/tools/testing/vsock/.gitignore
+++ b/tools/testing/vsock/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.d
vsock_test
vsock_diag_test
diff --git a/tools/thermal/tmon/.gitignore b/tools/thermal/tmon/.gitignore
index 06e96be65276..d9e97a0308f5 100644
--- a/tools/thermal/tmon/.gitignore
+++ b/tools/thermal/tmon/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
/tmon
diff --git a/tools/usb/.gitignore b/tools/usb/.gitignore
index 1b7448981435..fce1ef5a9267 100644
--- a/tools/usb/.gitignore
+++ b/tools/usb/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
ffs-test
testusb
diff --git a/tools/usb/usbip/.gitignore b/tools/usb/usbip/.gitignore
index 03b892c8bd8c..597361a96dbb 100644
--- a/tools/usb/usbip/.gitignore
+++ b/tools/usb/usbip/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
Makefile
Makefile.in
aclocal.m4
diff --git a/tools/virtio/.gitignore b/tools/virtio/.gitignore
index 1cfbb0157a46..075588c4da08 100644
--- a/tools/virtio/.gitignore
+++ b/tools/virtio/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
*.d
virtio_test
vringh_test
diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore
index 44f095fa2604..79bb92ae1bb3 100644
--- a/tools/vm/.gitignore
+++ b/tools/vm/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
slabinfo
page-types
diff --git a/usr/.gitignore b/usr/.gitignore
index 610de736b75e..935442ed1eb2 100644
--- a/usr/.gitignore
+++ b/usr/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
gen_init_cpio
initramfs_data.cpio
/initramfs_inc_data
diff --git a/usr/include/.gitignore b/usr/include/.gitignore
index a0991ff4402b..d2fab782cb7d 100644
--- a/usr/include/.gitignore
+++ b/usr/include/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
*
!.gitignore
!Makefile
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 0d9438e9de2a..93bd59b46848 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -788,7 +788,7 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
vcpu_ptimer(vcpu), TIMER_REG_CTL);
case KVM_REG_ARM_PTIMER_CNT:
return kvm_arm_timer_read(vcpu,
- vcpu_vtimer(vcpu), TIMER_REG_CNT);
+ vcpu_ptimer(vcpu), TIMER_REG_CNT);
case KVM_REG_ARM_PTIMER_CVAL:
return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CVAL);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index eda7b624eab8..48d0ec44ad77 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -64,12 +64,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
-int kvm_arch_hardware_setup(void)
+int kvm_arch_hardware_setup(void *opaque)
{
return 0;
}
-int kvm_arch_check_processor_compat(void)
+int kvm_arch_check_processor_compat(void *opaque)
{
return 0;
}
@@ -625,6 +625,14 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
kvm_update_stolen_time(vcpu);
+
+ if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
+ /* The distributor enable bits were changed */
+ preempt_disable();
+ vgic_v4_put(vcpu, false);
+ vgic_v4_load(vcpu);
+ preempt_enable();
+ }
}
}
@@ -1181,55 +1189,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return r;
}
-/**
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
- * @kvm: kvm instance
- * @log: slot id and address to which we copy the log
- *
- * Steps 1-4 below provide general overview of dirty page logging. See
- * kvm_get_dirty_log_protect() function description for additional details.
- *
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
- * always flush the TLB (step 4) even if previous step failed and the dirty
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
- * writes will be marked dirty for next log read.
- *
- * 1. Take a snapshot of the bit and clear it if needed.
- * 2. Write protect the corresponding page.
- * 3. Copy the snapshot to the userspace.
- * 4. Flush TLB's if needed.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log_protect(kvm, log, &flush);
-
- if (flush)
- kvm_flush_remote_tlbs(kvm);
-
- mutex_unlock(&kvm->slots_lock);
- return r;
}
-int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
{
- bool flush = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_clear_dirty_log_protect(kvm, log, &flush);
-
- if (flush)
- kvm_flush_remote_tlbs(kvm);
-
- mutex_unlock(&kvm->slots_lock);
- return r;
+ kvm_flush_remote_tlbs(kvm);
}
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 19c961ac4e3c..e3b9ee268823 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1534,8 +1534,13 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
- phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
- phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+ phys_addr_t start, end;
+
+ if (WARN_ON_ONCE(!memslot))
+ return;
+
+ start = memslot->base_gfn << PAGE_SHIFT;
+ end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
stage2_wp_range(kvm, start, end);
@@ -2251,7 +2256,7 @@ out:
void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
@@ -2349,17 +2354,10 @@ out:
return ret;
}
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
}
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
}
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 17e2bdd4b76f..14a162e295a9 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -12,7 +12,6 @@
#include <asm/cputype.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_host.h>
#include <kvm/arm_psci.h>
#include <kvm/arm_hypercalls.h>
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index cc12fe9b2df3..b13a9e3f99dd 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -178,6 +178,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
struct kvm_vcpu *vcpu)
{
char *type;
+ bool pending;
+
if (irq->intid < VGIC_NR_SGIS)
type = "SGI";
else if (irq->intid < VGIC_NR_PRIVATE_IRQS)
@@ -190,6 +192,16 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS)
print_header(s, irq, vcpu);
+ pending = irq->pending_latch;
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ int err;
+
+ err = irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &pending);
+ WARN_ON_ONCE(err);
+ }
+
seq_printf(s, " %s %4d "
" %2d "
"%d%d%d%d%d%d%d "
@@ -201,7 +213,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
"\n",
type, irq->intid,
(irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1,
- irq->pending_latch,
+ pending,
irq->line_level,
irq->active,
irq->enabled,
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index ebc218840fc2..e72dcc454247 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -3,9 +3,11 @@
* VGICv3 MMIO handling functions
*/
+#include <linux/bitfield.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
#include <kvm/iodev.h>
#include <kvm/arm_vgic.h>
@@ -69,6 +71,8 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
if (vgic->enabled)
value |= GICD_CTLR_ENABLE_SS_G1;
value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+ if (vgic->nassgireq)
+ value |= GICD_CTLR_nASSGIreq;
break;
case GICD_TYPER:
value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
@@ -80,6 +84,10 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
}
break;
+ case GICD_TYPER2:
+ if (kvm_vgic_global_state.has_gicv4_1)
+ value = GICD_TYPER2_nASSGIcap;
+ break;
case GICD_IIDR:
value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
@@ -97,17 +105,46 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
unsigned long val)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- bool was_enabled = dist->enabled;
switch (addr & 0x0c) {
- case GICD_CTLR:
+ case GICD_CTLR: {
+ bool was_enabled, is_hwsgi;
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ was_enabled = dist->enabled;
+ is_hwsgi = dist->nassgireq;
+
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
- if (!was_enabled && dist->enabled)
+ /* Not a GICv4.1? No HW SGIs */
+ if (!kvm_vgic_global_state.has_gicv4_1)
+ val &= ~GICD_CTLR_nASSGIreq;
+
+ /* Dist stays enabled? nASSGIreq is RO */
+ if (was_enabled && dist->enabled) {
+ val &= ~GICD_CTLR_nASSGIreq;
+ val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi);
+ }
+
+ /* Switching HW SGIs? */
+ dist->nassgireq = val & GICD_CTLR_nASSGIreq;
+ if (is_hwsgi != dist->nassgireq)
+ vgic_v4_configure_vsgis(vcpu->kvm);
+
+ if (kvm_vgic_global_state.has_gicv4_1 &&
+ was_enabled != dist->enabled)
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
+ else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm);
+
+ mutex_unlock(&vcpu->kvm->lock);
break;
+ }
case GICD_TYPER:
+ case GICD_TYPER2:
case GICD_IIDR:
+ /* This is at best for documentation purposes... */
return;
}
}
@@ -116,10 +153,22 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
switch (addr & 0x0c) {
+ case GICD_TYPER2:
case GICD_IIDR:
if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
return -EINVAL;
+ return 0;
+ case GICD_CTLR:
+ /* Not a GICv4.1? No HW SGIs */
+ if (!kvm_vgic_global_state.has_gicv4_1)
+ val &= ~GICD_CTLR_nASSGIreq;
+
+ dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
+ dist->nassgireq = val & GICD_CTLR_nASSGIreq;
+ return 0;
}
vgic_mmio_write_v3_misc(vcpu, addr, len, val);
@@ -257,8 +306,18 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
*/
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ bool state = irq->pending_latch;
- if (irq->pending_latch)
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ int err;
+
+ err = irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &state);
+ WARN_ON(err);
+ }
+
+ if (state)
value |= (1U << i);
vgic_put_irq(vcpu->kvm, irq);
@@ -942,8 +1001,18 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
* generate interrupts of either group.
*/
if (!irq->group || allow_group1) {
- irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ if (!irq->hw) {
+ irq->pending_latch = true;
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ } else {
+ /* HW SGI? Ask the GIC to inject it */
+ int err;
+ err = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ true);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
} else {
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 97fb2a40e6ba..2199302597fa 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -5,6 +5,8 @@
#include <linux/bitops.h>
#include <linux/bsearch.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/iodev.h>
@@ -59,6 +61,11 @@ unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
return value;
}
+static void vgic_update_vsgi(struct vgic_irq *irq)
+{
+ WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
+}
+
void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
unsigned int len, unsigned long val)
{
@@ -71,7 +78,12 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->group = !!(val & BIT(i));
- vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ vgic_update_vsgi(irq);
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ } else {
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+ }
vgic_put_irq(vcpu->kvm, irq);
}
@@ -113,7 +125,21 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- if (vgic_irq_is_mapped_level(irq)) {
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ if (!irq->enabled) {
+ struct irq_data *data;
+
+ irq->enabled = true;
+ data = &irq_to_desc(irq->host_irq)->irq_data;
+ while (irqd_irq_disabled(data))
+ enable_irq(irq->host_irq);
+ }
+
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+
+ continue;
+ } else if (vgic_irq_is_mapped_level(irq)) {
bool was_high = irq->line_level;
/*
@@ -148,6 +174,8 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
+ disable_irq_nosync(irq->host_irq);
irq->enabled = false;
@@ -167,10 +195,22 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
unsigned long flags;
+ bool val;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- if (irq_is_pending(irq))
- value |= (1U << i);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ int err;
+
+ val = false;
+ err = irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &val);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ } else {
+ val = irq_is_pending(irq);
+ }
+
+ value |= ((u32)val << i);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
@@ -215,6 +255,21 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
}
raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ /* HW SGI? Ask the GIC to inject it */
+ int err;
+ err = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ true);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+
+ continue;
+ }
+
if (irq->hw)
vgic_hw_irq_spending(vcpu, irq, is_uaccess);
else
@@ -269,6 +324,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ /* HW SGI? Ask the GIC to clear its pending bit */
+ int err;
+ err = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ false);
+ WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+
+ continue;
+ }
+
if (irq->hw)
vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
else
@@ -318,8 +387,15 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- if (irq->hw) {
+ if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
+ } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+ /*
+ * GICv4.1 VSGI feature doesn't track an active state,
+ * so let's not kid ourselves, there is nothing we can
+ * do here.
+ */
+ irq->active = false;
} else {
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u8 active_source;
@@ -493,6 +569,8 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
+ if (irq->hw && vgic_irq_is_sgi(irq->intid))
+ vgic_update_vsgi(irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 1bc09b523486..2c9fc13e2c59 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -540,6 +540,8 @@ int vgic_v3_map_resources(struct kvm *kvm)
goto out;
}
+ if (kvm_vgic_global_state.has_gicv4_1)
+ vgic_v4_configure_vsgis(kvm);
dist->ready = true;
out:
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 1eb0f8c76219..27ac833e5ec7 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -97,6 +97,104 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
return IRQ_HANDLED;
}
+static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
+{
+ vpe->sgi_config[irq->intid].enabled = irq->enabled;
+ vpe->sgi_config[irq->intid].group = irq->group;
+ vpe->sgi_config[irq->intid].priority = irq->priority;
+}
+
+static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
+{
+ struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+ int i;
+
+ /*
+ * With GICv4.1, every virtual SGI can be directly injected. So
+ * let's pretend that they are HW interrupts, tied to a host
+ * IRQ. The SGI code will do its magic.
+ */
+ for (i = 0; i < VGIC_NR_SGIS; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
+ struct irq_desc *desc;
+ unsigned long flags;
+ int ret;
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if (irq->hw)
+ goto unlock;
+
+ irq->hw = true;
+ irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
+
+ /* Transfer the full irq state to the vPE */
+ vgic_v4_sync_sgi_config(vpe, irq);
+ desc = irq_to_desc(irq->host_irq);
+ ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
+ false);
+ if (!WARN_ON(ret)) {
+ /* Transfer pending state */
+ ret = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ irq->pending_latch);
+ WARN_ON(ret);
+ irq->pending_latch = false;
+ }
+ unlock:
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+}
+
+static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ for (i = 0; i < VGIC_NR_SGIS; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
+ struct irq_desc *desc;
+ unsigned long flags;
+ int ret;
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if (!irq->hw)
+ goto unlock;
+
+ irq->hw = false;
+ ret = irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &irq->pending_latch);
+ WARN_ON(ret);
+
+ desc = irq_to_desc(irq->host_irq);
+ irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
+ unlock:
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+}
+
+/* Must be called with the kvm lock held */
+void vgic_v4_configure_vsgis(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_arm_halt_guest(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (dist->nassgireq)
+ vgic_v4_enable_vsgis(vcpu);
+ else
+ vgic_v4_disable_vsgis(vcpu);
+ }
+
+ kvm_arm_resume_guest(kvm);
+}
+
/**
* vgic_v4_init - Initialize the GICv4 data structures
* @kvm: Pointer to the VM being initialized
@@ -141,6 +239,7 @@ int vgic_v4_init(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
int irq = dist->its_vm.vpes[i]->irq;
+ unsigned long irq_flags = DB_IRQ_FLAGS;
/*
* Don't automatically enable the doorbell, as we're
@@ -148,8 +247,14 @@ int vgic_v4_init(struct kvm *kvm)
* blocked. Also disable the lazy disabling, as the
* doorbell could kick us out of the guest too
* early...
+ *
+ * On GICv4.1, the doorbell is managed in HW and must
+ * be left enabled.
*/
- irq_set_status_flags(irq, DB_IRQ_FLAGS);
+ if (kvm_vgic_global_state.has_gicv4_1)
+ irq_flags &= ~IRQ_NOAUTOEN;
+ irq_set_status_flags(irq, irq_flags);
+
ret = request_irq(irq, vgic_v4_doorbell_handler,
0, "vcpu", vcpu);
if (ret) {
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index c7fefd6b1c80..769e4802645e 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -316,5 +316,6 @@ void vgic_its_invalidate_cache(struct kvm *kvm);
bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
+void vgic_v4_configure_vsgis(struct kvm *kvm);
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 70f03ce0e5c1..74bdb7bf3295 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -149,8 +149,6 @@ static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
-static bool largepages_enabled = true;
-
#define KVM_EVENT_CREATE_VM 0
#define KVM_EVENT_DESTROY_VM 1
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
@@ -566,7 +564,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
return NULL;
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
- slots->id_to_index[i] = slots->memslots[i].id = i;
+ slots->id_to_index[i] = -1;
return slots;
}
@@ -580,18 +578,14 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
memslot->dirty_bitmap = NULL;
}
-/*
- * Free any memory in @free but not in @dont.
- */
-static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
- kvm_destroy_dirty_bitmap(free);
+ kvm_destroy_dirty_bitmap(slot);
- kvm_arch_free_memslot(kvm, free, dont);
+ kvm_arch_free_memslot(kvm, slot);
- free->npages = 0;
+ slot->flags = 0;
+ slot->npages = 0;
}
static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
@@ -602,7 +596,7 @@ static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
return;
kvm_for_each_memslot(memslot, slots)
- kvm_free_memslot(kvm, memslot, NULL);
+ kvm_free_memslot(kvm, memslot);
kvfree(slots);
}
@@ -860,9 +854,9 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
/*
* Allocation size is twice as large as the actual dirty bitmap size.
- * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
+ * See kvm_vm_ioctl_get_dirty_log() why this is needed.
*/
-static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
+static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
{
unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
@@ -874,63 +868,165 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
}
/*
- * Insert memslot and re-sort memslots based on their GFN,
- * so binary search could be used to lookup GFN.
- * Sorting algorithm takes advantage of having initially
- * sorted array and known changed memslot position.
+ * Delete a memslot by decrementing the number of used slots and shifting all
+ * other entries in the array forward one spot.
*/
-static void update_memslots(struct kvm_memslots *slots,
- struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+static inline void kvm_memslot_delete(struct kvm_memslots *slots,
+ struct kvm_memory_slot *memslot)
{
- int id = new->id;
- int i = slots->id_to_index[id];
struct kvm_memory_slot *mslots = slots->memslots;
+ int i;
- WARN_ON(mslots[i].id != id);
- switch (change) {
- case KVM_MR_CREATE:
- slots->used_slots++;
- WARN_ON(mslots[i].npages || !new->npages);
- break;
- case KVM_MR_DELETE:
- slots->used_slots--;
- WARN_ON(new->npages || !mslots[i].npages);
- break;
- default:
- break;
- }
+ if (WARN_ON(slots->id_to_index[memslot->id] == -1))
+ return;
- while (i < KVM_MEM_SLOTS_NUM - 1 &&
- new->base_gfn <= mslots[i + 1].base_gfn) {
- if (!mslots[i + 1].npages)
- break;
+ slots->used_slots--;
+
+ if (atomic_read(&slots->lru_slot) >= slots->used_slots)
+ atomic_set(&slots->lru_slot, 0);
+
+ for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
mslots[i] = mslots[i + 1];
slots->id_to_index[mslots[i].id] = i;
- i++;
}
+ mslots[i] = *memslot;
+ slots->id_to_index[memslot->id] = -1;
+}
+
+/*
+ * "Insert" a new memslot by incrementing the number of used slots. Returns
+ * the new slot's initial index into the memslots array.
+ */
+static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
+{
+ return slots->used_slots++;
+}
+
+/*
+ * Move a changed memslot backwards in the array by shifting existing slots
+ * with a higher GFN toward the front of the array. Note, the changed memslot
+ * itself is not preserved in the array, i.e. not swapped at this time, only
+ * its new index into the array is tracked. Returns the changed memslot's
+ * current index into the memslots array.
+ */
+static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
+ struct kvm_memory_slot *memslot)
+{
+ struct kvm_memory_slot *mslots = slots->memslots;
+ int i;
+
+ if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) ||
+ WARN_ON_ONCE(!slots->used_slots))
+ return -1;
/*
- * The ">=" is needed when creating a slot with base_gfn == 0,
- * so that it moves before all those with base_gfn == npages == 0.
- *
- * On the other hand, if new->npages is zero, the above loop has
- * already left i pointing to the beginning of the empty part of
- * mslots, and the ">=" would move the hole backwards in this
- * case---which is wrong. So skip the loop when deleting a slot.
+ * Move the target memslot backward in the array by shifting existing
+ * memslots with a higher GFN (than the target memslot) towards the
+ * front of the array.
*/
- if (new->npages) {
- while (i > 0 &&
- new->base_gfn >= mslots[i - 1].base_gfn) {
- mslots[i] = mslots[i - 1];
- slots->id_to_index[mslots[i].id] = i;
- i--;
- }
- } else
- WARN_ON_ONCE(i != slots->used_slots);
+ for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
+ if (memslot->base_gfn > mslots[i + 1].base_gfn)
+ break;
+
+ WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
+
+ /* Shift the next memslot forward one and update its index. */
+ mslots[i] = mslots[i + 1];
+ slots->id_to_index[mslots[i].id] = i;
+ }
+ return i;
+}
+
+/*
+ * Move a changed memslot forwards in the array by shifting existing slots with
+ * a lower GFN toward the back of the array. Note, the changed memslot itself
+ * is not preserved in the array, i.e. not swapped at this time, only its new
+ * index into the array is tracked. Returns the changed memslot's final index
+ * into the memslots array.
+ */
+static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
+ struct kvm_memory_slot *memslot,
+ int start)
+{
+ struct kvm_memory_slot *mslots = slots->memslots;
+ int i;
+
+ for (i = start; i > 0; i--) {
+ if (memslot->base_gfn < mslots[i - 1].base_gfn)
+ break;
+
+ WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
+
+ /* Shift the next memslot back one and update its index. */
+ mslots[i] = mslots[i - 1];
+ slots->id_to_index[mslots[i].id] = i;
+ }
+ return i;
+}
+
+/*
+ * Re-sort memslots based on their GFN to account for an added, deleted, or
+ * moved memslot. Sorting memslots by GFN allows using a binary search during
+ * memslot lookup.
+ *
+ * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry
+ * at memslots[0] has the highest GFN.
+ *
+ * The sorting algorithm takes advantage of having initially sorted memslots
+ * and knowing the position of the changed memslot. Sorting is also optimized
+ * by not swapping the updated memslot and instead only shifting other memslots
+ * and tracking the new index for the update memslot. Only once its final
+ * index is known is the updated memslot copied into its position in the array.
+ *
+ * - When deleting a memslot, the deleted memslot simply needs to be moved to
+ * the end of the array.
+ *
+ * - When creating a memslot, the algorithm "inserts" the new memslot at the
+ * end of the array and then it forward to its correct location.
+ *
+ * - When moving a memslot, the algorithm first moves the updated memslot
+ * backward to handle the scenario where the memslot's GFN was changed to a
+ * lower value. update_memslots() then falls through and runs the same flow
+ * as creating a memslot to move the memslot forward to handle the scenario
+ * where its GFN was changed to a higher value.
+ *
+ * Note, slots are sorted from highest->lowest instead of lowest->highest for
+ * historical reasons. Originally, invalid memslots where denoted by having
+ * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots
+ * to the end of the array. The current algorithm uses dedicated logic to
+ * delete a memslot and thus does not rely on invalid memslots having GFN=0.
+ *
+ * The other historical motiviation for highest->lowest was to improve the
+ * performance of memslot lookup. KVM originally used a linear search starting
+ * at memslots[0]. On x86, the largest memslot usually has one of the highest,
+ * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a
+ * single memslot above the 4gb boundary. As the largest memslot is also the
+ * most likely to be referenced, sorting it to the front of the array was
+ * advantageous. The current binary search starts from the middle of the array
+ * and uses an LRU pointer to improve performance for all memslots and GFNs.
+ */
+static void update_memslots(struct kvm_memslots *slots,
+ struct kvm_memory_slot *memslot,
+ enum kvm_mr_change change)
+{
+ int i;
+
+ if (change == KVM_MR_DELETE) {
+ kvm_memslot_delete(slots, memslot);
+ } else {
+ if (change == KVM_MR_CREATE)
+ i = kvm_memslot_insert_back(slots);
+ else
+ i = kvm_memslot_move_backward(slots, memslot);
+ i = kvm_memslot_move_forward(slots, memslot, i);
- mslots[i] = *new;
- slots->id_to_index[mslots[i].id] = i;
+ /*
+ * Copy the memslot to its new position in memslots and update
+ * its index accordingly.
+ */
+ slots->memslots[i] = *memslot;
+ slots->id_to_index[memslot->id] = i;
+ }
}
static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
@@ -984,6 +1080,112 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
}
/*
+ * Note, at a minimum, the current number of used slots must be allocated, even
+ * when deleting a memslot, as we need a complete duplicate of the memslots for
+ * use when invalidating a memslot prior to deleting/moving the memslot.
+ */
+static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
+ enum kvm_mr_change change)
+{
+ struct kvm_memslots *slots;
+ size_t old_size, new_size;
+
+ old_size = sizeof(struct kvm_memslots) +
+ (sizeof(struct kvm_memory_slot) * old->used_slots);
+
+ if (change == KVM_MR_CREATE)
+ new_size = old_size + sizeof(struct kvm_memory_slot);
+ else
+ new_size = old_size;
+
+ slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
+ if (likely(slots))
+ memcpy(slots, old, old_size);
+
+ return slots;
+}
+
+static int kvm_set_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new, int as_id,
+ enum kvm_mr_change change)
+{
+ struct kvm_memory_slot *slot;
+ struct kvm_memslots *slots;
+ int r;
+
+ slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
+ if (!slots)
+ return -ENOMEM;
+
+ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
+ /*
+ * Note, the INVALID flag needs to be in the appropriate entry
+ * in the freshly allocated memslots, not in @old or @new.
+ */
+ slot = id_to_memslot(slots, old->id);
+ slot->flags |= KVM_MEMSLOT_INVALID;
+
+ /*
+ * We can re-use the old memslots, the only difference from the
+ * newly installed memslots is the invalid flag, which will get
+ * dropped by update_memslots anyway. We'll also revert to the
+ * old memslots if preparing the new memory region fails.
+ */
+ slots = install_new_memslots(kvm, as_id, slots);
+
+ /* From this point no new shadow pages pointing to a deleted,
+ * or moved, memslot will be created.
+ *
+ * validation of sp->gfn happens in:
+ * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
+ * - kvm_is_visible_gfn (mmu_check_root)
+ */
+ kvm_arch_flush_shadow_memslot(kvm, slot);
+ }
+
+ r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
+ if (r)
+ goto out_slots;
+
+ update_memslots(slots, new, change);
+ slots = install_new_memslots(kvm, as_id, slots);
+
+ kvm_arch_commit_memory_region(kvm, mem, old, new, change);
+
+ kvfree(slots);
+ return 0;
+
+out_slots:
+ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
+ slots = install_new_memslots(kvm, as_id, slots);
+ kvfree(slots);
+ return r;
+}
+
+static int kvm_delete_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *old, int as_id)
+{
+ struct kvm_memory_slot new;
+ int r;
+
+ if (!old->npages)
+ return -EINVAL;
+
+ memset(&new, 0, sizeof(new));
+ new.id = old->id;
+
+ r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
+ if (r)
+ return r;
+
+ kvm_free_memslot(kvm, old);
+ return 0;
+}
+
+/*
* Allocate some memory and give it an address in the guest physical address
* space.
*
@@ -994,162 +1196,118 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem)
{
- int r;
- gfn_t base_gfn;
- unsigned long npages;
- struct kvm_memory_slot *slot;
struct kvm_memory_slot old, new;
- struct kvm_memslots *slots = NULL, *old_memslots;
- int as_id, id;
+ struct kvm_memory_slot *tmp;
enum kvm_mr_change change;
+ int as_id, id;
+ int r;
r = check_memory_region_flags(mem);
if (r)
- goto out;
+ return r;
- r = -EINVAL;
as_id = mem->slot >> 16;
id = (u16)mem->slot;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
- goto out;
+ return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
- goto out;
+ return -EINVAL;
/* We can read the guest memory with __xxx_user() later on. */
if ((id < KVM_USER_MEM_SLOTS) &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
- goto out;
+ return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
- goto out;
+ return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
- goto out;
-
- slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
- base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
- npages = mem->memory_size >> PAGE_SHIFT;
+ return -EINVAL;
- if (npages > KVM_MEM_MAX_NR_PAGES)
- goto out;
+ /*
+ * Make a full copy of the old memslot, the pointer will become stale
+ * when the memslots are re-sorted by update_memslots(), and the old
+ * memslot needs to be referenced after calling update_memslots(), e.g.
+ * to free its resources and for arch specific behavior.
+ */
+ tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
+ if (tmp) {
+ old = *tmp;
+ tmp = NULL;
+ } else {
+ memset(&old, 0, sizeof(old));
+ old.id = id;
+ }
- new = old = *slot;
+ if (!mem->memory_size)
+ return kvm_delete_memslot(kvm, mem, &old, as_id);
new.id = id;
- new.base_gfn = base_gfn;
- new.npages = npages;
+ new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
+ new.npages = mem->memory_size >> PAGE_SHIFT;
new.flags = mem->flags;
+ new.userspace_addr = mem->userspace_addr;
- if (npages) {
- if (!old.npages)
- change = KVM_MR_CREATE;
- else { /* Modify an existing slot. */
- if ((mem->userspace_addr != old.userspace_addr) ||
- (npages != old.npages) ||
- ((new.flags ^ old.flags) & KVM_MEM_READONLY))
- goto out;
+ if (new.npages > KVM_MEM_MAX_NR_PAGES)
+ return -EINVAL;
- if (base_gfn != old.base_gfn)
- change = KVM_MR_MOVE;
- else if (new.flags != old.flags)
- change = KVM_MR_FLAGS_ONLY;
- else { /* Nothing to change. */
- r = 0;
- goto out;
- }
- }
- } else {
- if (!old.npages)
- goto out;
+ if (!old.npages) {
+ change = KVM_MR_CREATE;
+ new.dirty_bitmap = NULL;
+ memset(&new.arch, 0, sizeof(new.arch));
+ } else { /* Modify an existing slot. */
+ if ((new.userspace_addr != old.userspace_addr) ||
+ (new.npages != old.npages) ||
+ ((new.flags ^ old.flags) & KVM_MEM_READONLY))
+ return -EINVAL;
- change = KVM_MR_DELETE;
- new.base_gfn = 0;
- new.flags = 0;
+ if (new.base_gfn != old.base_gfn)
+ change = KVM_MR_MOVE;
+ else if (new.flags != old.flags)
+ change = KVM_MR_FLAGS_ONLY;
+ else /* Nothing to change. */
+ return 0;
+
+ /* Copy dirty_bitmap and arch from the current memslot. */
+ new.dirty_bitmap = old.dirty_bitmap;
+ memcpy(&new.arch, &old.arch, sizeof(new.arch));
}
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
/* Check for overlaps */
- r = -EEXIST;
- kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
- if (slot->id == id)
+ kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) {
+ if (tmp->id == id)
continue;
- if (!((base_gfn + npages <= slot->base_gfn) ||
- (base_gfn >= slot->base_gfn + slot->npages)))
- goto out;
+ if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
+ (new.base_gfn >= tmp->base_gfn + tmp->npages)))
+ return -EEXIST;
}
}
- /* Free page dirty bitmap if unneeded */
+ /* Allocate/free page dirty bitmap as needed */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
+ else if (!new.dirty_bitmap) {
+ r = kvm_alloc_dirty_bitmap(&new);
+ if (r)
+ return r;
- r = -ENOMEM;
- if (change == KVM_MR_CREATE) {
- new.userspace_addr = mem->userspace_addr;
-
- if (kvm_arch_create_memslot(kvm, &new, npages))
- goto out_free;
- }
-
- /* Allocate page dirty bitmap if needed */
- if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
- if (kvm_create_dirty_bitmap(&new) < 0)
- goto out_free;
- }
-
- slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
- if (!slots)
- goto out_free;
- memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
-
- if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
- slot = id_to_memslot(slots, id);
- slot->flags |= KVM_MEMSLOT_INVALID;
-
- old_memslots = install_new_memslots(kvm, as_id, slots);
-
- /* From this point no new shadow pages pointing to a deleted,
- * or moved, memslot will be created.
- *
- * validation of sp->gfn happens in:
- * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
- * - kvm_is_visible_gfn (mmu_check_root)
- */
- kvm_arch_flush_shadow_memslot(kvm, slot);
-
- /*
- * We can re-use the old_memslots from above, the only difference
- * from the currently installed memslots is the invalid flag. This
- * will get overwritten by update_memslots anyway.
- */
- slots = old_memslots;
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ bitmap_set(new.dirty_bitmap, 0, new.npages);
}
- r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
+ r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
if (r)
- goto out_slots;
-
- /* actual memory is freed via old in kvm_free_memslot below */
- if (change == KVM_MR_DELETE) {
- new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
- }
-
- update_memslots(slots, &new, change);
- old_memslots = install_new_memslots(kvm, as_id, slots);
-
- kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
+ goto out_bitmap;
- kvm_free_memslot(kvm, &old, &new);
- kvfree(old_memslots);
+ if (old.dirty_bitmap && !new.dirty_bitmap)
+ kvm_destroy_dirty_bitmap(&old);
return 0;
-out_slots:
- kvfree(slots);
-out_free:
- kvm_free_memslot(kvm, &new, &old);
-out:
+out_bitmap:
+ if (new.dirty_bitmap && !old.dirty_bitmap)
+ kvm_destroy_dirty_bitmap(&new);
return r;
}
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
@@ -1175,31 +1333,43 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
return kvm_set_memory_region(kvm, mem);
}
-int kvm_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log, int *is_dirty)
+#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+/**
+ * kvm_get_dirty_log - get a snapshot of dirty pages
+ * @kvm: pointer to kvm instance
+ * @log: slot id and address to which we copy the log
+ * @is_dirty: set to '1' if any dirty pages were found
+ * @memslot: set to the associated memslot, always valid on success
+ */
+int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ int *is_dirty, struct kvm_memory_slot **memslot)
{
struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
int i, as_id, id;
unsigned long n;
unsigned long any = 0;
+ *memslot = NULL;
+ *is_dirty = 0;
+
as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
- memslot = id_to_memslot(slots, id);
- if (!memslot->dirty_bitmap)
+ *memslot = id_to_memslot(slots, id);
+ if (!(*memslot) || !(*memslot)->dirty_bitmap)
return -ENOENT;
- n = kvm_dirty_bitmap_bytes(memslot);
+ kvm_arch_sync_dirty_log(kvm, *memslot);
+
+ n = kvm_dirty_bitmap_bytes(*memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
- any = memslot->dirty_bitmap[i];
+ any = (*memslot)->dirty_bitmap[i];
- if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
+ if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
return -EFAULT;
if (any)
@@ -1208,13 +1378,12 @@ int kvm_get_dirty_log(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
-#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
/**
* kvm_get_dirty_log_protect - get a snapshot of dirty pages
* and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address to which we copy the log
- * @flush: true if TLB flush is needed by caller
*
* We need to keep it in mind that VCPU threads can write to the bitmap
* concurrently. So, to avoid losing track of dirty pages we keep the
@@ -1231,8 +1400,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
* exiting to userspace will be logged for the next call.
*
*/
-int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *flush)
+static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -1240,6 +1408,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
unsigned long n;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
+ bool flush;
as_id = log->slot >> 16;
id = (u16)log->slot;
@@ -1248,13 +1417,15 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
slots = __kvm_memslots(kvm, as_id);
memslot = id_to_memslot(slots, id);
+ if (!memslot || !memslot->dirty_bitmap)
+ return -ENOENT;
dirty_bitmap = memslot->dirty_bitmap;
- if (!dirty_bitmap)
- return -ENOENT;
+
+ kvm_arch_sync_dirty_log(kvm, memslot);
n = kvm_dirty_bitmap_bytes(memslot);
- *flush = false;
+ flush = false;
if (kvm->manual_dirty_log_protect) {
/*
* Unlike kvm_get_dirty_log, we always return false in *flush,
@@ -1277,7 +1448,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
if (!dirty_bitmap[i])
continue;
- *flush = true;
+ flush = true;
mask = xchg(&dirty_bitmap[i], 0);
dirty_bitmap_buffer[i] = mask;
@@ -1288,21 +1459,55 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
}
+ if (flush)
+ kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
+
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
return -EFAULT;
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
+
+
+/**
+ * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
+ * @kvm: kvm instance
+ * @log: slot id and address to which we copy the log
+ *
+ * Steps 1-4 below provide general overview of dirty page logging. See
+ * kvm_get_dirty_log_protect() function description for additional details.
+ *
+ * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
+ * always flush the TLB (step 4) even if previous step failed and the dirty
+ * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
+ * does not preclude user space subsequent dirty log read. Flushing TLB ensures
+ * writes will be marked dirty for next log read.
+ *
+ * 1. Take a snapshot of the bit and clear it if needed.
+ * 2. Write protect the corresponding page.
+ * 3. Copy the snapshot to the userspace.
+ * 4. Flush TLB's if needed.
+ */
+static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ int r;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_get_dirty_log_protect(kvm, log);
+
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
/**
* kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
* and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address from which to fetch the bitmap of dirty pages
- * @flush: true if TLB flush is needed by caller
*/
-int kvm_clear_dirty_log_protect(struct kvm *kvm,
- struct kvm_clear_dirty_log *log, bool *flush)
+static int kvm_clear_dirty_log_protect(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -1311,6 +1516,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
unsigned long i, n;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
+ bool flush;
as_id = log->slot >> 16;
id = (u16)log->slot;
@@ -1322,10 +1528,10 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
slots = __kvm_memslots(kvm, as_id);
memslot = id_to_memslot(slots, id);
+ if (!memslot || !memslot->dirty_bitmap)
+ return -ENOENT;
dirty_bitmap = memslot->dirty_bitmap;
- if (!dirty_bitmap)
- return -ENOENT;
n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
@@ -1334,7 +1540,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
(log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
return -EINVAL;
- *flush = false;
+ kvm_arch_sync_dirty_log(kvm, memslot);
+
+ flush = false;
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
return -EFAULT;
@@ -1357,28 +1565,32 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
* a problem if userspace sets them in log->dirty_bitmap.
*/
if (mask) {
- *flush = true;
+ flush = true;
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
offset, mask);
}
}
spin_unlock(&kvm->mmu_lock);
+ if (flush)
+ kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
+
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_clear_dirty_log_protect);
-#endif
-bool kvm_largepages_enabled(void)
+static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log)
{
- return largepages_enabled;
-}
+ int r;
-void kvm_disable_largepages(void)
-{
- largepages_enabled = false;
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_clear_dirty_log_protect(kvm, log);
+
+ mutex_unlock(&kvm->slots_lock);
+ return r;
}
-EXPORT_SYMBOL_GPL(kvm_disable_largepages);
+#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
@@ -1754,12 +1966,6 @@ kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
-kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
-{
- return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
-}
-EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
-
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
@@ -3310,9 +3516,6 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
case KVM_CAP_ENABLE_CAP_VM:
-#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
- case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
-#endif
return 1;
#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
@@ -3320,6 +3523,10 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_COALESCED_PIO:
return 1;
#endif
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
+ return KVM_DIRTY_LOG_MANUAL_CAPS;
+#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
@@ -3347,11 +3554,17 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
{
switch (cap->cap) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
- case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
- if (cap->flags || (cap->args[0] & ~1))
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
+ u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
+
+ if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
+ allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
+
+ if (cap->flags || (cap->args[0] & ~allowed_options))
return -EINVAL;
kvm->manual_dirty_log_protect = cap->args[0];
return 0;
+ }
#endif
default:
return kvm_vm_ioctl_enable_cap(kvm, cap);
@@ -4435,14 +4648,22 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
return &kvm_running_vcpu;
}
-static void check_processor_compat(void *rtn)
+struct kvm_cpu_compat_check {
+ void *opaque;
+ int *ret;
+};
+
+static void check_processor_compat(void *data)
{
- *(int *)rtn = kvm_arch_check_processor_compat();
+ struct kvm_cpu_compat_check *c = data;
+
+ *c->ret = kvm_arch_check_processor_compat(c->opaque);
}
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
struct module *module)
{
+ struct kvm_cpu_compat_check c;
int r;
int cpu;
@@ -4466,12 +4687,14 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_free_0;
}
- r = kvm_arch_hardware_setup();
+ r = kvm_arch_hardware_setup(opaque);
if (r < 0)
goto out_free_1;
+ c.ret = &r;
+ c.opaque = opaque;
for_each_online_cpu(cpu) {
- smp_call_function_single(cpu, check_processor_compat, &r, 1);
+ smp_call_function_single(cpu, check_processor_compat, &c, 1);
if (r < 0)
goto out_free_2;
}